From dff2c686c277f4f8de5f1cab5a486dbad70d7864 Mon Sep 17 00:00:00 2001 From: renzhc Date: Tue, 3 Sep 2024 09:52:03 +0800 Subject: [PATCH] first commit --- CITATION.cff | 9 + CONTRIBUTING.md | 73 + LICENSE | 203 + MANIFEST.in | 5 + configs/_base_/datasets/cifar100_bs16.py | 45 + configs/_base_/datasets/cifar10_bs16.py | 45 + configs/_base_/datasets/coco_caption.py | 70 + configs/_base_/datasets/coco_okvqa.py | 75 + configs/_base_/datasets/coco_retrieval.py | 99 + configs/_base_/datasets/coco_vg_vqa.py | 96 + configs/_base_/datasets/coco_vqa.py | 84 + configs/_base_/datasets/cub_bs8_384.py | 51 + configs/_base_/datasets/cub_bs8_448.py | 50 + configs/_base_/datasets/flickr30k_caption.py | 92 + .../_base_/datasets/flickr30k_retrieval.py | 112 + configs/_base_/datasets/gqa.py | 81 + configs/_base_/datasets/imagenet21k_bs128.py | 28 + .../_base_/datasets/imagenet_bs128_mbv3.py | 66 + .../imagenet_bs128_poolformer_medium_224.py | 80 + .../imagenet_bs128_poolformer_small_224.py | 80 + .../datasets/imagenet_bs128_revvit_224.py | 83 + .../imagenet_bs128_riformer_medium_384.py | 80 + .../imagenet_bs128_riformer_small_384.py | 80 + .../_base_/datasets/imagenet_bs128_vig_224.py | 80 + .../_base_/datasets/imagenet_bs16_eva_196.py | 60 + .../_base_/datasets/imagenet_bs16_eva_336.py | 60 + .../_base_/datasets/imagenet_bs16_eva_448.py | 62 + .../_base_/datasets/imagenet_bs16_eva_560.py | 60 + .../datasets/imagenet_bs16_pil_bicubic_384.py | 53 + .../_base_/datasets/imagenet_bs256_beitv2.py | 47 + .../datasets/imagenet_bs256_davit_224.py | 80 + .../_base_/datasets/imagenet_bs256_itpn.py | 49 + .../datasets/imagenet_bs256_levit_224.py | 80 + .../_base_/datasets/imagenet_bs256_rsb_a12.py | 72 + .../_base_/datasets/imagenet_bs256_rsb_a3.py | 72 + .../datasets/imagenet_bs256_simmim_192.py | 33 + .../datasets/imagenet_bs256_swin_192.py | 81 + configs/_base_/datasets/imagenet_bs32.py | 51 + configs/_base_/datasets/imagenet_bs32_byol.py | 89 + .../_base_/datasets/imagenet_bs32_mocov2.py | 58 + .../datasets/imagenet_bs32_pil_bicubic.py | 60 + .../datasets/imagenet_bs32_pil_resize.py | 51 + .../_base_/datasets/imagenet_bs32_simclr.py | 52 + configs/_base_/datasets/imagenet_bs512_mae.py | 32 + .../_base_/datasets/imagenet_bs512_mocov3.py | 90 + configs/_base_/datasets/imagenet_bs64.py | 51 + .../_base_/datasets/imagenet_bs64_autoaug.py | 59 + .../_base_/datasets/imagenet_bs64_clip_224.py | 73 + .../_base_/datasets/imagenet_bs64_clip_384.py | 73 + .../_base_/datasets/imagenet_bs64_clip_448.py | 74 + .../datasets/imagenet_bs64_convmixer_224.py | 80 + .../datasets/imagenet_bs64_deit3_224.py | 80 + .../datasets/imagenet_bs64_deit3_384.py | 60 + .../datasets/imagenet_bs64_edgenext_256.py | 80 + .../datasets/imagenet_bs64_hivit_224.py | 83 + .../datasets/imagenet_bs64_mixer_224.py | 52 + .../datasets/imagenet_bs64_pil_resize.py | 51 + .../imagenet_bs64_pil_resize_autoaug.py | 68 + .../_base_/datasets/imagenet_bs64_swin_224.py | 80 + .../_base_/datasets/imagenet_bs64_swin_256.py | 80 + .../_base_/datasets/imagenet_bs64_swin_384.py | 54 + .../_base_/datasets/imagenet_bs64_t2t_224.py | 80 + .../datasets/imagenet_bs8_pil_bicubic_320.py | 59 + configs/_base_/datasets/inshop_bs32_448.py | 64 + configs/_base_/datasets/nlvr2.py | 86 + configs/_base_/datasets/nocaps.py | 41 + configs/_base_/datasets/ocrvqa.py | 81 + configs/_base_/datasets/pipelines/auto_aug.py | 96 + configs/_base_/datasets/pipelines/rand_aug.py | 43 + configs/_base_/datasets/refcoco.py | 105 + configs/_base_/datasets/tiny_imagenet_bs32.py | 51 + .../datasets/tiny_imagenet_bs32_pil_resize.py | 51 + .../tiny_imagenet_bs64_pil_resize_autoaug.py | 68 + .../datasets/tiny_imagenet_bs64_swin_224.py | 80 + configs/_base_/datasets/vizwiz.py | 80 + configs/_base_/datasets/voc_bs16.py | 65 + configs/_base_/datasets/vsr.py | 81 + configs/_base_/default_runtime.py | 51 + configs/_base_/models/conformer/base-p16.py | 23 + configs/_base_/models/conformer/small-p16.py | 23 + configs/_base_/models/conformer/small-p32.py | 27 + configs/_base_/models/conformer/tiny-p16.py | 23 + .../models/convmixer/convmixer-1024-20.py | 11 + .../models/convmixer/convmixer-1536-20.py | 11 + .../models/convmixer/convmixer-768-32.py | 11 + .../_base_/models/convnext/convnext-base.py | 19 + .../_base_/models/convnext/convnext-large.py | 19 + .../_base_/models/convnext/convnext-small.py | 19 + .../_base_/models/convnext/convnext-tiny.py | 19 + .../_base_/models/convnext/convnext-xlarge.py | 19 + configs/_base_/models/convnext_v2/atto.py | 20 + configs/_base_/models/convnext_v2/base.py | 24 + configs/_base_/models/convnext_v2/femto.py | 20 + configs/_base_/models/convnext_v2/huge.py | 24 + configs/_base_/models/convnext_v2/large.py | 24 + configs/_base_/models/convnext_v2/nano.py | 20 + configs/_base_/models/convnext_v2/pico.py | 20 + configs/_base_/models/convnext_v2/tiny.py | 24 + configs/_base_/models/davit/davit-base.py | 16 + configs/_base_/models/davit/davit-small.py | 16 + configs/_base_/models/davit/davit-tiny.py | 16 + .../_base_/models/deit3/deit3-base-p16-224.py | 24 + .../_base_/models/deit3/deit3-base-p16-384.py | 24 + .../_base_/models/deit3/deit3-huge-p14-224.py | 24 + .../models/deit3/deit3-large-p16-224.py | 24 + .../models/deit3/deit3-large-p16-384.py | 24 + .../models/deit3/deit3-medium-p16-224.py | 24 + .../models/deit3/deit3-small-p16-224.py | 24 + .../models/deit3/deit3-small-p16-384.py | 24 + configs/_base_/models/densenet/densenet121.py | 11 + configs/_base_/models/densenet/densenet161.py | 11 + configs/_base_/models/densenet/densenet169.py | 11 + configs/_base_/models/densenet/densenet201.py | 11 + .../_base_/models/edgenext/edgenext-base.py | 23 + .../_base_/models/edgenext/edgenext-small.py | 23 + .../_base_/models/edgenext/edgenext-xsmall.py | 23 + .../models/edgenext/edgenext-xxsmall.py | 23 + configs/_base_/models/efficientformer-l1.py | 18 + configs/_base_/models/efficientnet_b0.py | 12 + configs/_base_/models/efficientnet_b1.py | 12 + configs/_base_/models/efficientnet_b2.py | 12 + configs/_base_/models/efficientnet_b3.py | 12 + configs/_base_/models/efficientnet_b4.py | 12 + configs/_base_/models/efficientnet_b5.py | 12 + configs/_base_/models/efficientnet_b6.py | 12 + configs/_base_/models/efficientnet_b7.py | 12 + configs/_base_/models/efficientnet_b8.py | 12 + configs/_base_/models/efficientnet_em.py | 13 + configs/_base_/models/efficientnet_es.py | 13 + configs/_base_/models/efficientnet_l2.py | 12 + .../efficientnet_v2/efficientnetv2_b0.py | 12 + .../efficientnet_v2/efficientnetv2_b1.py | 12 + .../efficientnet_v2/efficientnetv2_b2.py | 12 + .../efficientnet_v2/efficientnetv2_b3.py | 12 + .../efficientnet_v2/efficientnetv2_l.py | 12 + .../efficientnet_v2/efficientnetv2_m.py | 12 + .../efficientnet_v2/efficientnetv2_s.py | 12 + .../efficientnet_v2/efficientnetv2_xl.py | 12 + configs/_base_/models/eva/eva-g.py | 29 + configs/_base_/models/eva/eva-l.py | 30 + configs/_base_/models/hivit/base_224.py | 28 + configs/_base_/models/hivit/small_224.py | 28 + configs/_base_/models/hivit/tiny_224.py | 28 + .../_base_/models/hornet/hornet-base-gf.py | 20 + configs/_base_/models/hornet/hornet-base.py | 21 + .../_base_/models/hornet/hornet-large-gf.py | 21 + .../models/hornet/hornet-large-gf384.py | 17 + configs/_base_/models/hornet/hornet-large.py | 21 + .../_base_/models/hornet/hornet-small-gf.py | 21 + configs/_base_/models/hornet/hornet-small.py | 21 + .../_base_/models/hornet/hornet-tiny-gf.py | 21 + configs/_base_/models/hornet/hornet-tiny.py | 21 + configs/_base_/models/hrnet/hrnet-w18.py | 15 + configs/_base_/models/hrnet/hrnet-w30.py | 15 + configs/_base_/models/hrnet/hrnet-w32.py | 15 + configs/_base_/models/hrnet/hrnet-w40.py | 15 + configs/_base_/models/hrnet/hrnet-w44.py | 15 + configs/_base_/models/hrnet/hrnet-w48.py | 15 + configs/_base_/models/hrnet/hrnet-w64.py | 15 + configs/_base_/models/inception_v3.py | 10 + configs/_base_/models/itpn_hivit-base-p16.py | 33 + configs/_base_/models/levit-256-p16.py | 26 + configs/_base_/models/mae_hivit-base-p16.py | 24 + configs/_base_/models/mae_vit-base-p16.py | 23 + configs/_base_/models/mixmim/mixmim_base.py | 20 + .../_base_/models/mlp_mixer_base_patch16.py | 25 + .../_base_/models/mlp_mixer_large_patch16.py | 25 + configs/_base_/models/mobilenet_v2_1x.py | 12 + .../mobilenet_v3_large_imagenet.py | 16 + .../mobilenet_v3_small_050_imagenet.py | 16 + .../mobilenet_v3_small_075_imagenet.py | 16 + .../mobilenet_v3/mobilenet_v3_small_cifar.py | 13 + .../mobilenet_v3_small_imagenet.py | 16 + .../_base_/models/mobileone/mobileone_s0.py | 19 + .../_base_/models/mobileone/mobileone_s1.py | 19 + .../_base_/models/mobileone/mobileone_s2.py | 19 + .../_base_/models/mobileone/mobileone_s3.py | 19 + .../_base_/models/mobileone/mobileone_s4.py | 19 + .../_base_/models/mobilevit/mobilevit_s.py | 12 + .../_base_/models/mobilevit/mobilevit_xs.py | 12 + .../_base_/models/mobilevit/mobilevit_xxs.py | 12 + configs/_base_/models/mvit/mvitv2-base.py | 19 + configs/_base_/models/mvit/mvitv2-large.py | 23 + configs/_base_/models/mvit/mvitv2-small.py | 19 + configs/_base_/models/mvit/mvitv2-tiny.py | 19 + .../models/poolformer/poolformer_m36.py | 22 + .../models/poolformer/poolformer_m48.py | 22 + .../models/poolformer/poolformer_s12.py | 22 + .../models/poolformer/poolformer_s24.py | 22 + .../models/poolformer/poolformer_s36.py | 22 + configs/_base_/models/regnet/regnetx_1.6gf.py | 12 + configs/_base_/models/regnet/regnetx_12gf.py | 12 + configs/_base_/models/regnet/regnetx_3.2gf.py | 12 + configs/_base_/models/regnet/regnetx_4.0gf.py | 12 + configs/_base_/models/regnet/regnetx_400mf.py | 12 + configs/_base_/models/regnet/regnetx_6.4gf.py | 12 + configs/_base_/models/regnet/regnetx_8.0gf.py | 12 + configs/_base_/models/regnet/regnetx_800mf.py | 12 + configs/_base_/models/replknet-31B_in1k.py | 25 + configs/_base_/models/replknet-31L_in1k.py | 15 + configs/_base_/models/replknet-XL_in1k.py | 15 + configs/_base_/models/repmlp-base_224.py | 18 + configs/_base_/models/repvgg-A0_in1k.py | 15 + .../_base_/models/repvgg-B3_lbs-mixup_in1k.py | 22 + configs/_base_/models/res2net101-w26-s4.py | 18 + configs/_base_/models/res2net50-w14-s8.py | 18 + configs/_base_/models/res2net50-w26-s4.py | 18 + configs/_base_/models/res2net50-w26-s6.py | 18 + configs/_base_/models/res2net50-w26-s8.py | 18 + configs/_base_/models/res2net50-w48-s2.py | 18 + configs/_base_/models/resnest101.py | 25 + configs/_base_/models/resnest200.py | 25 + configs/_base_/models/resnest269.py | 25 + configs/_base_/models/resnest50.py | 24 + configs/_base_/models/resnet101.py | 17 + configs/_base_/models/resnet101_cifar.py | 16 + configs/_base_/models/resnet152.py | 17 + configs/_base_/models/resnet152_cifar.py | 16 + configs/_base_/models/resnet18.py | 17 + configs/_base_/models/resnet18_cifar.py | 16 + configs/_base_/models/resnet34.py | 17 + configs/_base_/models/resnet34_cifar.py | 16 + configs/_base_/models/resnet34_gem.py | 17 + configs/_base_/models/resnet50.py | 17 + configs/_base_/models/resnet50_cifar.py | 16 + .../_base_/models/resnet50_cifar_cutmix.py | 18 + configs/_base_/models/resnet50_cifar_mixup.py | 17 + configs/_base_/models/resnet50_cutmix.py | 18 + .../_base_/models/resnet50_label_smooth.py | 18 + configs/_base_/models/resnet50_mixup.py | 17 + configs/_base_/models/resnetv1c50.py | 17 + configs/_base_/models/resnetv1d101.py | 17 + configs/_base_/models/resnetv1d152.py | 17 + configs/_base_/models/resnetv1d50.py | 17 + configs/_base_/models/resnext101_32x4d.py | 19 + configs/_base_/models/resnext101_32x8d.py | 19 + configs/_base_/models/resnext152_32x4d.py | 19 + configs/_base_/models/resnext50_32x4d.py | 19 + configs/_base_/models/revvit/revvit-base.py | 27 + configs/_base_/models/revvit/revvit-small.py | 27 + configs/_base_/models/seresnet101.py | 17 + configs/_base_/models/seresnet50.py | 17 + configs/_base_/models/seresnext101_32x4d.py | 20 + configs/_base_/models/seresnext50_32x4d.py | 20 + configs/_base_/models/shufflenet_v1_1x.py | 12 + configs/_base_/models/shufflenet_v2_1x.py | 12 + .../models/swin_transformer/base_224.py | 23 + .../models/swin_transformer/base_384.py | 16 + .../models/swin_transformer/large_224.py | 12 + .../models/swin_transformer/large_384.py | 16 + .../models/swin_transformer/small_224.py | 24 + .../models/swin_transformer/tiny_224.py | 23 + .../models/swin_transformer/tiny_base_224.py | 23 + .../models/swin_transformer/tiny_large_224.py | 12 + .../models/swin_transformer_v2/base_256.py | 26 + .../models/swin_transformer_v2/base_384.py | 17 + .../models/swin_transformer_v2/large_256.py | 16 + .../models/swin_transformer_v2/large_384.py | 16 + .../models/swin_transformer_v2/small_256.py | 26 + .../models/swin_transformer_v2/tiny_256.py | 26 + configs/_base_/models/t2t-vit-t-14.py | 42 + configs/_base_/models/t2t-vit-t-19.py | 42 + configs/_base_/models/t2t-vit-t-24.py | 42 + configs/_base_/models/tiny-vit-large-p16.py | 24 + configs/_base_/models/tinyvit/tinyvit-11m.py | 25 + configs/_base_/models/tinyvit/tinyvit-21m.py | 25 + configs/_base_/models/tinyvit/tinyvit-5m.py | 25 + configs/_base_/models/tnt_s_patch16_224.py | 29 + configs/_base_/models/twins_pcpvt_base.py | 31 + configs/_base_/models/twins_svt_base.py | 31 + configs/_base_/models/van/van_base.py | 13 + configs/_base_/models/van/van_large.py | 13 + configs/_base_/models/van/van_small.py | 22 + configs/_base_/models/van/van_tiny.py | 22 + configs/_base_/models/vgg11.py | 10 + configs/_base_/models/vgg11bn.py | 11 + configs/_base_/models/vgg13.py | 10 + configs/_base_/models/vgg13bn.py | 11 + configs/_base_/models/vgg16.py | 10 + configs/_base_/models/vgg16bn.py | 11 + configs/_base_/models/vgg19.py | 10 + configs/_base_/models/vgg19bn.py | 11 + configs/_base_/models/vig/pyramid_vig_base.py | 32 + .../_base_/models/vig/pyramid_vig_medium.py | 32 + .../_base_/models/vig/pyramid_vig_small.py | 32 + configs/_base_/models/vig/pyramid_vig_tiny.py | 32 + configs/_base_/models/vig/vig_base.py | 33 + configs/_base_/models/vig/vig_small.py | 33 + configs/_base_/models/vig/vig_tiny.py | 33 + configs/_base_/models/vit-base-p16.py | 25 + configs/_base_/models/vit-base-p32.py | 24 + configs/_base_/models/vit-large-p16.py | 24 + configs/_base_/models/vit-large-p32.py | 24 + configs/_base_/models/wide-resnet50.py | 20 + configs/_base_/schedules/cifar10_bs128.py | 15 + configs/_base_/schedules/cub_bs64.py | 34 + .../imagenet_bs1024_adamw_conformer.py | 43 + .../schedules/imagenet_bs1024_adamw_hivit.py | 41 + .../schedules/imagenet_bs1024_adamw_revvit.py | 41 + .../schedules/imagenet_bs1024_adamw_swin.py | 41 + .../_base_/schedules/imagenet_bs1024_coslr.py | 18 + .../imagenet_bs1024_linearlr_bn_nowd.py | 20 + configs/_base_/schedules/imagenet_bs2048.py | 21 + .../_base_/schedules/imagenet_bs2048_AdamW.py | 39 + .../schedules/imagenet_bs2048_adamw_levit.py | 40 + .../_base_/schedules/imagenet_bs2048_coslr.py | 35 + .../_base_/schedules/imagenet_bs2048_rsb.py | 32 + configs/_base_/schedules/imagenet_bs256.py | 16 + .../_base_/schedules/imagenet_bs256_140e.py | 16 + .../imagenet_bs256_200e_coslr_warmup.py | 34 + .../_base_/schedules/imagenet_bs256_coslr.py | 16 + .../imagenet_bs256_coslr_coswd_300e.py | 40 + .../schedules/imagenet_bs256_epochstep.py | 15 + .../_base_/schedules/imagenet_bs4096_AdamW.py | 39 + .../schedules/imagenet_lars_coslr_200e.py | 20 + .../schedules/imagenet_lars_coslr_90e.py | 14 + .../schedules/imagenet_sgd_coslr_100e.py | 14 + .../schedules/imagenet_sgd_coslr_200e.py | 12 + .../schedules/imagenet_sgd_steplr_100e.py | 14 + configs/arcface/README.md | 80 + configs/arcface/metafile.yml | 28 + .../arcface/resnet50-arcface_8xb32_inshop.py | 71 + configs/barlowtwins/README.md | 85 + ...wtwins_resnet50_8xb256-coslr-1000e_in1k.py | 70 + ...owtwins_resnet50_8xb256-coslr-300e_in1k.py | 70 + .../resnet50_8xb32-linear-coslr-100e_in1k.py | 15 + configs/barlowtwins/metafile.yml | 44 + configs/beit/README.md | 88 + ...eit-base-p16_8xb256-amp-coslr-300e_in1k.py | 130 + .../beit-base-p16_8xb128-coslr-100e_in1k.py | 127 + .../benchmarks/beit-base-p16_8xb64_in1k.py | 43 + configs/beit/metafile.yml | 69 + configs/beitv2/README.md | 90 + ...it-base-p16_8xb256-amp-coslr-1600e_in1k.py | 119 + ...eit-base-p16_8xb256-amp-coslr-300e_in1k.py | 119 + .../beit-base-p16_8xb128-coslr-100e_in1k.py | 122 + .../benchmarks/beit-base-p16_8xb64_in1k.py | 34 + configs/beitv2/metafile.yml | 69 + configs/blip/README.md | 128 + configs/blip/blip-base_8xb16_refcoco.py | 62 + configs/blip/blip-base_8xb32_caption.py | 59 + .../blip/blip-base_8xb32_caption_flickr30k.py | 59 + configs/blip/blip-base_8xb32_nlvr.py | 59 + configs/blip/blip-base_8xb32_nocaps.py | 46 + configs/blip/blip-base_8xb32_ocrvqa.py | 75 + configs/blip/blip-base_8xb32_okvqa.py | 75 + configs/blip/blip-base_8xb32_retrieval.py | 83 + .../blip-base_8xb32_retrieval_flickr30k.py | 83 + configs/blip/blip-base_8xb32_vqa.py | 76 + configs/blip/metafile.yml | 99 + configs/blip2/README.md | 74 + configs/blip2/blip2-opt2.7b_8xb16_gqa.py | 87 + configs/blip2/blip2-opt2.7b_8xb16_vqa.py | 95 + configs/blip2/blip2-opt2.7b_8xb32_caption.py | 76 + configs/blip2/blip2_8xb32_retrieval.py | 82 + configs/blip2/metafile.yml | 71 + configs/byol/README.md | 85 + .../benchmarks/mask-rcnn_r50-c4_ms-1x_coco.py | 46 + .../mask-rcnn_r50_fpn_ms-1x_coco.py | 24 + .../resnet50_8xb512-linear-coslr-90e_in1k.py | 18 + .../byol_resnet50_16xb256-coslr-200e_in1k.py | 60 + configs/byol/metafile.yml | 44 + configs/cae/README.md | 86 + .../beit-base-p16_8xb128-coslr-100e_in1k.py | 130 + ...eit-base-p16_8xb256-amp-coslr-300e_in1k.py | 115 + configs/cae/metafile.yml | 43 + configs/chinese_clip/README.md | 69 + .../cn-clip_resnet50_zeroshot-cls_cifar100.py | 72 + ...clip_vit-base-p16_zeroshot-cls_cifar100.py | 76 + ...clip_vit-huge-p14_zeroshot-cls_cifar100.py | 75 + ...lip_vit-large-p14_zeroshot-cls_cifar100.py | 75 + configs/chinese_clip/metafile.yml | 79 + configs/clip/README.md | 90 + ...clip_vit-base-p16_zeroshot-cls_cifar100.py | 68 + .../clip_vit-base-p16_zeroshot-cls_in1k.py | 69 + ...lip_vit-large-p14_zeroshot-cls_cifar100.py | 68 + .../clip_vit-large-p14_zeroshot-cls_in1k.py | 69 + configs/clip/metafile.yml | 308 + .../clip/vit-base-p16_pt-64xb64_in1k-384px.py | 40 + .../clip/vit-base-p16_pt-64xb64_in1k-448px.py | 40 + configs/clip/vit-base-p16_pt-64xb64_in1k.py | 40 + .../clip/vit-base-p32_pt-64xb64_in1k-384px.py | 40 + .../clip/vit-base-p32_pt-64xb64_in1k-448px.py | 40 + configs/clip/vit-base-p32_pt-64xb64_in1k.py | 40 + configs/clip/vit-large-p14_headless.py | 34 + configs/conformer/README.md | 84 + .../conformer-base-p16_8xb128_in1k.py | 8 + .../conformer-small-p16_8xb128_in1k.py | 8 + .../conformer-small-p32_8xb128_in1k.py | 8 + .../conformer-tiny-p16_8xb128_in1k.py | 8 + configs/conformer/metafile.yml | 78 + configs/convmixer/README.md | 79 + .../convmixer-1024-20_10xb64_in1k.py | 39 + .../convmixer-1536-20_10xb64_in1k.py | 39 + .../convmixer/convmixer-768-32_10xb64_in1k.py | 19 + configs/convmixer/metafile.yml | 61 + configs/convnext/README.md | 123 + .../convnext-base_32xb128_in1k-384px.py | 23 + .../convnext/convnext-base_32xb128_in1k.py | 23 + .../convnext/convnext-base_32xb128_in21k.py | 24 + .../convnext-large_64xb64_in1k-384px.py | 23 + .../convnext/convnext-large_64xb64_in1k.py | 23 + .../convnext/convnext-large_64xb64_in21k.py | 24 + .../convnext-small_32xb128_in1k-384px.py | 23 + .../convnext/convnext-small_32xb128_in1k.py | 23 + .../convnext-tiny_32xb128_in1k-384px.py | 23 + .../convnext/convnext-tiny_32xb128_in1k.py | 23 + .../convnext-xlarge_64xb64_in1k-384px.py | 23 + .../convnext/convnext-xlarge_64xb64_in1k.py | 23 + .../convnext/convnext-xlarge_64xb64_in21k.py | 24 + configs/convnext/metafile.yml | 410 + configs/convnext_v2/README.md | 107 + .../convnext-v2-atto_32xb32_in1k.py | 24 + .../convnext-v2-base_32xb32_in1k-384px.py | 35 + .../convnext-v2-base_32xb32_in1k.py | 35 + .../convnext-v2-femto_32xb32_in1k.py | 24 + .../convnext-v2-huge_32xb32_in1k-384px.py | 35 + .../convnext-v2-huge_32xb32_in1k-512px.py | 54 + .../convnext-v2-huge_32xb32_in1k.py | 35 + .../convnext-v2-large_32xb32_in1k-384px.py | 35 + .../convnext-v2-large_32xb32_in1k.py | 35 + .../convnext-v2-nano_32xb32_in1k-384px.py | 24 + .../convnext-v2-nano_32xb32_in1k.py | 24 + .../convnext-v2-pico_32xb32_in1k.py | 24 + .../convnext-v2-tiny_32xb32_in1k-384px.py | 35 + .../convnext-v2-tiny_32xb32_in1k.py | 35 + configs/convnext_v2/metafile.yml | 433 + configs/cspnet/README.md | 78 + configs/cspnet/cspdarknet50_8xb32_in1k.py | 45 + configs/cspnet/cspresnet50_8xb32_in1k.py | 45 + configs/cspnet/cspresnext50_8xb32_in1k.py | 45 + configs/cspnet/metafile.yml | 64 + configs/csra/README.md | 73 + configs/csra/metafile.yml | 29 + .../csra/resnet101-csra_1xb16_voc07-448px.py | 78 + configs/davit/README.md | 77 + configs/davit/davit-base_4xb256_in1k.py | 9 + configs/davit/davit-small_4xb256_in1k.py | 9 + configs/davit/davit-tiny_4xb256_in1k.py | 9 + configs/davit/metafile.yml | 71 + configs/deit/README.md | 97 + .../deit-base-distilled_16xb32_in1k-384px.py | 37 + .../deit/deit-base-distilled_16xb64_in1k.py | 46 + configs/deit/deit-base_16xb32_in1k-384px.py | 37 + configs/deit/deit-base_16xb64_in1k.py | 50 + .../deit/deit-small-distilled_4xb256_in1k.py | 46 + configs/deit/deit-small_4xb256_in1k.py | 48 + .../deit/deit-tiny-distilled_4xb256_in1k.py | 47 + configs/deit/deit-tiny_4xb256_in1k.py | 48 + configs/deit/metafile.yml | 153 + configs/deit3/README.md | 90 + .../deit3/deit3-base-p16_64xb32_in1k-384px.py | 17 + configs/deit3/deit3-base-p16_64xb64_in1k.py | 17 + configs/deit3/deit3-huge-p14_64xb32_in1k.py | 17 + .../deit3-large-p16_64xb16_in1k-384px.py | 17 + configs/deit3/deit3-large-p16_64xb64_in1k.py | 17 + configs/deit3/deit3-medium-p16_64xb64_in1k.py | 17 + .../deit3-small-p16_64xb64_in1k-384px.py | 17 + configs/deit3/deit3-small-p16_64xb64_in1k.py | 17 + configs/deit3/metafile.yml | 310 + configs/densecl/README.md | 85 + .../resnet50_8xb32-linear-steplr-100e_in1k.py | 20 + .../densecl_resnet50_8xb32-coslr-200e_in1k.py | 39 + configs/densecl/metafile.yml | 44 + configs/densenet/README.md | 82 + configs/densenet/densenet121_4xb256_in1k.py | 17 + configs/densenet/densenet161_4xb256_in1k.py | 17 + configs/densenet/densenet169_4xb256_in1k.py | 17 + configs/densenet/densenet201_4xb256_in1k.py | 17 + configs/densenet/metafile.yml | 76 + configs/dinov2/README.md | 58 + configs/dinov2/metafile.yml | 73 + .../vit-base-p14_dinov2-pre_headless.py | 20 + .../vit-giant-p14_dinov2-pre_headless.py | 21 + .../vit-large-p14_dinov2-pre_headless.py | 20 + .../vit-small-p14_dinov2-pre_headless.py | 20 + configs/edgenext/README.md | 80 + .../edgenext/edgenext-base_8xb256-usi_in1k.py | 19 + configs/edgenext/edgenext-base_8xb256_in1k.py | 20 + .../edgenext-small_8xb256-usi_in1k.py | 19 + .../edgenext/edgenext-small_8xb256_in1k.py | 20 + .../edgenext/edgenext-xsmall_8xb256_in1k.py | 20 + .../edgenext/edgenext-xxsmall_8xb256_in1k.py | 20 + configs/edgenext/metafile.yml | 118 + configs/efficientformer/README.md | 88 + .../efficientformer-l1_8xb128_in1k.py | 6 + .../efficientformer-l3_8xb128_in1k.py | 3 + .../efficientformer-l7_8xb128_in1k.py | 3 + configs/efficientformer/metafile.yml | 67 + configs/efficientnet/README.md | 122 + .../efficientnet-b0_8xb32-01norm_in1k.py | 31 + .../efficientnet-b0_8xb32_in1k.py | 24 + .../efficientnet-b1_8xb32-01norm_in1k.py | 31 + .../efficientnet-b1_8xb32_in1k.py | 24 + .../efficientnet-b2_8xb32-01norm_in1k.py | 31 + .../efficientnet-b2_8xb32_in1k.py | 24 + .../efficientnet-b3_8xb32-01norm_in1k.py | 31 + .../efficientnet-b3_8xb32_in1k.py | 24 + .../efficientnet-b4_8xb32-01norm_in1k.py | 31 + .../efficientnet-b4_8xb32_in1k.py | 24 + .../efficientnet-b5_8xb32-01norm_in1k.py | 31 + .../efficientnet-b5_8xb32_in1k.py | 24 + .../efficientnet-b6_8xb32-01norm_in1k.py | 31 + .../efficientnet-b6_8xb32_in1k.py | 24 + .../efficientnet-b7_8xb32-01norm_in1k.py | 31 + .../efficientnet-b7_8xb32_in1k.py | 24 + .../efficientnet-b8_8xb32-01norm_in1k.py | 31 + .../efficientnet-b8_8xb32_in1k.py | 24 + .../efficientnet-em_8xb32-01norm_in1k.py | 31 + .../efficientnet-es_8xb32-01norm_in1k.py | 24 + .../efficientnet-l2_8xb32_in1k-475px.py | 24 + .../efficientnet-l2_8xb8_in1k-800px.py | 24 + configs/efficientnet/metafile.yml | 551 + configs/efficientnet_v2/README.md | 98 + .../efficientnetv2-b0_8xb32_in1k.py | 58 + .../efficientnetv2-b1_8xb32_in1k.py | 21 + .../efficientnetv2-b2_8xb32_in1k.py | 21 + .../efficientnetv2-b3_8xb32_in1k.py | 21 + .../efficientnetv2-l_8xb32_in1k-480px.py | 23 + .../efficientnetv2-l_8xb32_in21k.py | 4 + .../efficientnetv2-m_8xb32_in1k-480px.py | 23 + .../efficientnetv2-m_8xb32_in21k.py | 4 + .../efficientnetv2-s_8xb32_in1k-384px.py | 34 + .../efficientnetv2-s_8xb32_in21k.py | 43 + .../efficientnetv2-xl_8xb32_in1k-512px.py | 23 + .../efficientnetv2-xl_8xb32_in21k.py | 4 + configs/efficientnet_v2/metafile.yml | 255 + configs/eva/README.md | 101 + .../vit-base-p16_8xb128-coslr-100e_in1k.py | 114 + ...base-p16_8xb2048-linear-coslr-100e_in1k.py | 70 + configs/eva/eva-g-p14_8xb16_in1k-336px.py | 9 + configs/eva/eva-g-p14_8xb16_in1k-560px.py | 9 + configs/eva/eva-g-p14_headless.py | 24 + configs/eva/eva-g-p16_headless.py | 24 + configs/eva/eva-l-p14_8xb16_in1k-196px.py | 9 + configs/eva/eva-l-p14_8xb16_in1k-336px.py | 9 + configs/eva/eva-l-p14_headless.py | 25 + ...le_vit-base-p16_16xb256-coslr-400e_in1k.py | 86 + configs/eva/metafile.yml | 261 + configs/eva02/README.md | 109 + configs/eva02/eva02-base-p14_headless.py | 21 + configs/eva02/eva02-base-p14_in1k.py | 32 + configs/eva02/eva02-large-p14_headless.py | 21 + configs/eva02/eva02-large-p14_in1k.py | 32 + configs/eva02/eva02-small-p14_headless.py | 20 + configs/eva02/eva02-small-p14_in1k.py | 31 + configs/eva02/eva02-tiny-p14_headless.py | 20 + configs/eva02/eva02-tiny-p14_in1k.py | 31 + configs/eva02/metafile.yml | 199 + configs/flamingo/README.md | 82 + configs/flamingo/flamingo_fewshot_caption.py | 95 + configs/flamingo/flamingo_fewshot_vqa.py | 109 + configs/flamingo/flamingo_zeroshot_caption.py | 95 + configs/flamingo/flamingo_zeroshot_vqa.py | 107 + configs/flamingo/metafile.yml | 42 + configs/glip/README.md | 57 + configs/glip/glip-l_headless.py | 18 + configs/glip/glip-t_headless.py | 18 + configs/glip/metafile.yml | 49 + configs/hivit/README.md | 81 + configs/hivit/hivit-base-p16_16xb64_in1k.py | 9 + configs/hivit/hivit-small-p16_16xb64_in1k.py | 9 + configs/hivit/hivit-tiny-p16_16xb64_in1k.py | 9 + configs/hivit/metafile.yml | 63 + configs/hornet/README.md | 80 + configs/hornet/hornet-base-gf_8xb64_in1k.py | 12 + configs/hornet/hornet-base_8xb64_in1k.py | 12 + configs/hornet/hornet-small-gf_8xb64_in1k.py | 12 + configs/hornet/hornet-small_8xb64_in1k.py | 12 + configs/hornet/hornet-tiny-gf_8xb128_in1k.py | 12 + configs/hornet/hornet-tiny_8xb128_in1k.py | 12 + configs/hornet/metafile.yml | 115 + configs/hrnet/README.md | 85 + configs/hrnet/hrnet-w18_4xb32_in1k.py | 11 + configs/hrnet/hrnet-w30_4xb32_in1k.py | 11 + configs/hrnet/hrnet-w32_4xb32_in1k.py | 11 + configs/hrnet/hrnet-w40_4xb32_in1k.py | 11 + configs/hrnet/hrnet-w44_4xb32_in1k.py | 11 + configs/hrnet/hrnet-w48_4xb32_in1k.py | 11 + configs/hrnet/hrnet-w64_4xb32_in1k.py | 11 + configs/hrnet/metafile.yml | 162 + configs/inception_v3/README.md | 76 + .../inception_v3/inception-v3_8xb32_in1k.py | 24 + configs/inception_v3/metafile.yml | 37 + configs/itpn/README.md | 65 + ...vit-base-p16_8xb256-amp-coslr-300e_in1k.py | 84 + ...vit-base-p16_8xb256-amp-coslr-800e_in1k.py | 84 + ...it-base-p16_8xb512-amp-coslr-1600e_in1k.py | 56 + ...vit-base-p16_8xb512-amp-coslr-400e_in1k.py | 56 + ...vit-base-p16_8xb512-amp-coslr-800e_in1k.py | 56 + ...t-large-p16_8xb512-amp-coslr-1600e_in1k.py | 61 + ...it-large-p16_8xb512-amp-coslr-400e_in1k.py | 61 + ...it-large-p16_8xb512-amp-coslr-800e_in1k.py | 61 + configs/itpn/metafile.yml | 50 + configs/lenet/README.md | 28 + configs/lenet/lenet5_mnist.py | 89 + configs/levit/README.md | 81 + configs/levit/deploy/levit-128_8xb256_in1k.py | 3 + .../levit/deploy/levit-128s_8xb256_in1k.py | 3 + configs/levit/deploy/levit-192_8xb256_in1k.py | 3 + configs/levit/deploy/levit-256_8xb256_in1k.py | 3 + configs/levit/deploy/levit-384_8xb256_in1k.py | 3 + configs/levit/levit-128_8xb256_in1k.py | 12 + configs/levit/levit-128s_8xb256_in1k.py | 12 + configs/levit/levit-192_8xb256_in1k.py | 12 + configs/levit/levit-256_8xb256_in1k.py | 9 + configs/levit/levit-384_8xb256_in1k.py | 15 + configs/levit/metafile.yml | 101 + configs/llava/README.md | 51 + configs/llava/llava-7b-v1.5_caption.py | 76 + configs/llava/llava-7b-v1.5_vqa.py | 76 + configs/llava/llava-7b-v1_caption.py | 78 + configs/llava/metafile.yml | 51 + configs/mae/README.md | 123 + .../vit-base-p16_8xb128-coslr-100e_in1k.py | 114 + ...-base-p16_8xb2048-linear-coslr-90e_in1k.py | 64 + ...vit-huge-p14_32xb8-coslr-50e_in1k-448px.py | 116 + .../vit-huge-p14_8xb128-coslr-50e_in1k.py | 115 + .../vit-huge-p14_8xb128-ds-coslr-50e_in1k.py | 31 + ...vit-huge-p14_8xb128-fsdp-coslr-50e_in1k.py | 13 + .../vit-large-p16_8xb128-coslr-50e_in1k.py | 115 + .../vit-large-p16_8xb128-ds-coslr-50e_in1k.py | 31 + ...it-large-p16_8xb128-fsdp-coslr-50e_in1k.py | 13 + ...large-p16_8xb2048-linear-coslr-90e_in1k.py | 64 + ...it-base-p16_8xb512-amp-coslr-1600e_in1k.py | 56 + ...vit-base-p16_8xb512-amp-coslr-400e_in1k.py | 56 + ...vit-base-p16_8xb512-amp-coslr-800e_in1k.py | 56 + ...t-large-p16_8xb512-amp-coslr-1600e_in1k.py | 61 + ...it-large-p16_8xb512-amp-coslr-400e_in1k.py | 61 + ...it-large-p16_8xb512-amp-coslr-800e_in1k.py | 61 + ...it-base-p16_8xb512-amp-coslr-1600e_in1k.py | 56 + ...vit-base-p16_8xb512-amp-coslr-300e_in1k.py | 56 + ...vit-base-p16_8xb512-amp-coslr-400e_in1k.py | 56 + ...vit-base-p16_8xb512-amp-coslr-800e_in1k.py | 56 + ...it-huge-p14_8xb512-amp-coslr-1600e_in1k.py | 66 + ...t-large-p16_8xb512-amp-coslr-1600e_in1k.py | 61 + ...it-large-p16_8xb512-amp-coslr-300e_in1k.py | 61 + ...it-large-p16_8xb512-amp-coslr-400e_in1k.py | 61 + ...it-large-p16_8xb512-amp-coslr-800e_in1k.py | 61 + configs/mae/metafile.yml | 367 + configs/maskfeat/README.md | 85 + .../vit-base-p16_8xb256-coslr-100e_in1k.py | 114 + ...vit-base-p16_8xb256-amp-coslr-300e_in1k.py | 103 + configs/maskfeat/metafile.yml | 43 + configs/mff/README.md | 60 + .../vit-base-p16_8xb128-coslr-100e_in1k.py | 114 + ...-base-p16_8xb2048-linear-coslr-90e_in1k.py | 74 + configs/mff/metafile.yml | 103 + ...vit-base-p16_8xb512-amp-coslr-300e_in1k.py | 24 + ...vit-base-p16_8xb512-amp-coslr-800e_in1k.py | 24 + configs/milan/README.md | 104 + .../vit-base-p16_8xb128-coslr-100e_in1k.py | 114 + ...base-p16_8xb2048-linear-coslr-100e_in1k.py | 70 + configs/milan/metafile.yml | 59 + ...it-base-p16_16xb256-amp-coslr-400e_in1k.py | 88 + configs/minigpt4/README.md | 53 + configs/minigpt4/metafile.yml | 37 + .../minigpt4/minigpt-4_baichuan-7b_caption.py | 190 + .../minigpt4/minigpt-4_vicuna-7b_caption.py | 94 + configs/mixmim/README.md | 102 + .../mixmim-base_8xb128-coslr-100e_in1k.py | 133 + .../benchmarks/mixmim-base_8xb64_in1k.py | 6 + configs/mixmim/metafile.yml | 51 + ...mim_mixmim-base_16xb128-coslr-300e_in1k.py | 98 + configs/mlp_mixer/README.md | 78 + configs/mlp_mixer/metafile.yml | 50 + .../mlp-mixer-base-p16_64xb64_in1k.py | 8 + .../mlp-mixer-large-p16_64xb64_in1k.py | 8 + configs/mobilenet_v2/README.md | 97 + configs/mobilenet_v2/metafile.yml | 34 + .../mobilenet_v2/mobilenet-v2_8xb32_in1k.py | 6 + configs/mobilenet_v3/README.md | 99 + configs/mobilenet_v3/metafile.yml | 111 + .../mobilenet-v3-large_8xb128_in1k.py | 28 + .../mobilenet-v3-small-050_8xb128_in1k.py | 70 + .../mobilenet-v3-small-075_8xb128_in1k.py | 68 + .../mobilenet-v3-small_8xb128_in1k.py | 28 + .../mobilenet-v3-small_8xb16_cifar10.py | 15 + configs/mobileone/README.md | 98 + .../deploy/mobileone-s0_deploy_8xb32_in1k.py | 3 + .../deploy/mobileone-s1_deploy_8xb32_in1k.py | 3 + .../deploy/mobileone-s2_deploy_8xb32_in1k.py | 3 + .../deploy/mobileone-s3_deploy_8xb32_in1k.py | 3 + .../deploy/mobileone-s4_deploy_8xb32_in1k.py | 3 + configs/mobileone/metafile.yml | 83 + configs/mobileone/mobileone-s0_8xb32_in1k.py | 20 + configs/mobileone/mobileone-s1_8xb32_in1k.py | 60 + configs/mobileone/mobileone-s2_8xb32_in1k.py | 65 + configs/mobileone/mobileone-s3_8xb32_in1k.py | 65 + configs/mobileone/mobileone-s4_8xb32_in1k.py | 63 + configs/mobilevit/README.md | 96 + configs/mobilevit/metafile.yml | 60 + .../mobilevit/mobilevit-small_8xb128_in1k.py | 30 + .../mobilevit/mobilevit-xsmall_8xb128_in1k.py | 30 + .../mobilevit-xxsmall_8xb128_in1k.py | 30 + configs/mocov2/README.md | 85 + .../resnet50_8xb32-linear-steplr-100e_in1k.py | 20 + configs/mocov2/metafile.yml | 45 + .../mocov2_resnet50_8xb32-coslr-200e_in1k.py | 34 + configs/mocov3/README.md | 96 + .../resnet50_8xb128-linear-coslr-90e_in1k.py | 31 + ...t-base-p16_8xb128-linear-coslr-90e_in1k.py | 45 + .../vit-base-p16_8xb64-coslr-150e_in1k.py | 74 + .../vit-large-p16_8xb64-coslr-100e_in1k.py | 74 + ...-small-p16_8xb128-linear-coslr-90e_in1k.py | 45 + configs/mocov3/metafile.yml | 201 + ...ov3_resnet50_8xb512-amp-coslr-100e_in1k.py | 82 + ...ov3_resnet50_8xb512-amp-coslr-300e_in1k.py | 82 + ...ov3_resnet50_8xb512-amp-coslr-800e_in1k.py | 82 + ...it-base-p16_16xb256-amp-coslr-300e_in1k.py | 151 + ...it-large-p16_64xb64-amp-coslr-300e_in1k.py | 154 + ...t-small-p16_16xb256-amp-coslr-300e_in1k.py | 151 + configs/mvit/README.md | 85 + configs/mvit/metafile.yml | 95 + configs/mvit/mvitv2-base_8xb256_in1k.py | 43 + configs/mvit/mvitv2-large_8xb256_in1k.py | 43 + configs/mvit/mvitv2-small_8xb256_in1k.py | 43 + configs/mvit/mvitv2-tiny_8xb256_in1k.py | 43 + configs/ofa/README.md | 88 + configs/ofa/metafile.yml | 89 + configs/ofa/ofa-base_finetuned_caption.py | 41 + configs/ofa/ofa-base_finetuned_refcoco.py | 45 + configs/ofa/ofa-base_finetuned_vqa.py | 64 + configs/ofa/ofa-base_zeroshot_vqa.py | 42 + configs/ofa/ofa-large_zeroshot_vqa.py | 43 + configs/otter/README.md | 79 + configs/otter/metafile.yml | 43 + configs/otter/otter-9b_caption.py | 87 + configs/otter/otter-9b_vqa.py | 104 + configs/poolformer/README.md | 80 + configs/poolformer/metafile.yml | 99 + .../poolformer/poolformer-m36_32xb128_in1k.py | 17 + .../poolformer/poolformer-m48_32xb128_in1k.py | 17 + .../poolformer/poolformer-s12_32xb128_in1k.py | 17 + .../poolformer/poolformer-s24_32xb128_in1k.py | 17 + .../poolformer/poolformer-s36_32xb128_in1k.py | 17 + configs/regnet/README.md | 88 + configs/regnet/metafile.yml | 122 + configs/regnet/regnetx-1.6gf_8xb128_in1k.py | 6 + configs/regnet/regnetx-12gf_8xb64_in1k.py | 18 + configs/regnet/regnetx-3.2gf_8xb64_in1k.py | 18 + configs/regnet/regnetx-4.0gf_8xb64_in1k.py | 18 + configs/regnet/regnetx-400mf_8xb128_in1k.py | 58 + configs/regnet/regnetx-6.4gf_8xb64_in1k.py | 18 + configs/regnet/regnetx-8.0gf_8xb64_in1k.py | 18 + configs/regnet/regnetx-800mf_8xb128_in1k.py | 6 + configs/replknet/README.md | 108 + .../replknet-31B-deploy_32xb64_in1k-384px.py | 3 + .../deploy/replknet-31B-deploy_32xb64_in1k.py | 3 + .../replknet-31L-deploy_32xb64_in1k-384px.py | 3 + .../replknet-XL-deploy_32xb64_in1k-320px.py | 3 + configs/replknet/metafile.yml | 129 + .../replknet-31B_32xb64_in1k-384px.py | 12 + configs/replknet/replknet-31B_32xb64_in1k.py | 12 + .../replknet-31L_32xb64_in1k-384px.py | 12 + .../replknet/replknet-XL_32xb64_in1k-320px.py | 12 + configs/repmlp/README.md | 103 + configs/repmlp/metafile.yml | 48 + .../repmlp/repmlp-base_8xb64_in1k-256px.py | 36 + configs/repmlp/repmlp-base_8xb64_in1k.py | 26 + .../repmlp/repmlp-base_delopy_8xb64_in1k.py | 3 + .../repmlp-base_deploy_8xb64_in1k-256px.py | 3 + configs/repvgg/README.md | 142 + configs/repvgg/metafile.yml | 175 + configs/repvgg/repvgg-A0_8xb32_in1k.py | 33 + configs/repvgg/repvgg-A0_deploy_in1k.py | 3 + configs/repvgg/repvgg-A1_8xb32_in1k.py | 3 + configs/repvgg/repvgg-A2_8xb32_in1k.py | 3 + configs/repvgg/repvgg-B0_8xb32_in1k.py | 3 + configs/repvgg/repvgg-B1_8xb32_in1k.py | 3 + configs/repvgg/repvgg-B1g2_8xb32_in1k.py | 3 + configs/repvgg/repvgg-B1g4_8xb32_in1k.py | 3 + configs/repvgg/repvgg-B2_8xb32_in1k.py | 3 + configs/repvgg/repvgg-B2g4_8xb32_in1k.py | 3 + configs/repvgg/repvgg-B3_8xb32_in1k.py | 67 + configs/repvgg/repvgg-B3g4_8xb32_in1k.py | 3 + configs/repvgg/repvgg-D2se_8xb32_in1k.py | 28 + configs/res2net/README.md | 78 + configs/res2net/metafile.yml | 70 + .../res2net/res2net101-w26-s4_8xb32_in1k.py | 5 + .../res2net/res2net50-w14-s8_8xb32_in1k.py | 5 + .../res2net/res2net50-w26-s8_8xb32_in1k.py | 5 + configs/resnest/README.md | 26 + configs/resnest/_randaug_policies.py | 92 + configs/resnest/resnest101_32xb64_in1k.py | 78 + configs/resnest/resnest200_64xb32_in1k.py | 74 + configs/resnest/resnest269_64xb32_in1k.py | 78 + configs/resnest/resnest50_32xb64_in1k.py | 78 + configs/resnet/README.md | 140 + configs/resnet/metafile.yml | 352 + configs/resnet/resnet101_8xb16_cifar10.py | 5 + configs/resnet/resnet101_8xb32_in1k.py | 4 + configs/resnet/resnet152_8xb16_cifar10.py | 5 + configs/resnet/resnet152_8xb32_in1k.py | 4 + configs/resnet/resnet18_8xb16_cifar10.py | 4 + configs/resnet/resnet18_8xb32_in1k.py | 4 + configs/resnet/resnet34_8xb16_cifar10.py | 4 + configs/resnet/resnet34_8xb32_in1k.py | 4 + .../resnet50_32xb64-warmup-coslr_in1k.py | 5 + .../resnet/resnet50_32xb64-warmup-lbs_in1k.py | 12 + configs/resnet/resnet50_32xb64-warmup_in1k.py | 4 + .../resnet/resnet50_8xb128_coslr-90e_in21k.py | 11 + .../resnet/resnet50_8xb16-mixup_cifar10.py | 5 + configs/resnet/resnet50_8xb16_cifar10.py | 4 + configs/resnet/resnet50_8xb16_cifar100.py | 19 + .../resnet50_8xb256-rsb-a1-600e_in1k.py | 56 + .../resnet50_8xb256-rsb-a2-300e_in1k.py | 46 + .../resnet50_8xb256-rsb-a3-100e_in1k.py | 22 + .../resnet50_8xb32-coslr-preciseBN_in1k.py | 13 + configs/resnet/resnet50_8xb32-coslr_in1k.py | 5 + configs/resnet/resnet50_8xb32-cutmix_in1k.py | 5 + .../resnet50_8xb32-fp16-dynamic_in1k.py | 4 + configs/resnet/resnet50_8xb32-fp16_in1k.py | 4 + configs/resnet/resnet50_8xb32-lbs_in1k.py | 5 + configs/resnet/resnet50_8xb32-mixup_in1k.py | 5 + configs/resnet/resnet50_8xb32_in1k.py | 4 + configs/resnet/resnet50_8xb8_cub.py | 20 + configs/resnet/resnetv1c101_8xb32_in1k.py | 7 + configs/resnet/resnetv1c152_8xb32_in1k.py | 7 + configs/resnet/resnetv1c50_8xb32_in1k.py | 5 + configs/resnet/resnetv1d101_8xb32_in1k.py | 5 + configs/resnet/resnetv1d152_8xb32_in1k.py | 5 + configs/resnet/resnetv1d50_8xb32_in1k.py | 5 + configs/resnext/README.md | 83 + configs/resnext/metafile.yml | 73 + .../resnext/resnext101-32x4d_8xb32_in1k.py | 5 + .../resnext/resnext101-32x8d_8xb32_in1k.py | 5 + .../resnext/resnext152-32x4d_8xb32_in1k.py | 5 + configs/resnext/resnext50-32x4d_8xb32_in1k.py | 5 + configs/revvit/README.md | 91 + configs/revvit/metafile.yml | 48 + configs/revvit/revvit-base_8xb256_in1k.py | 6 + configs/revvit/revvit-small_8xb256_in1k.py | 6 + configs/riformer/README.md | 181 + .../deploy/riformer-m36-deploy_8xb128_in1k.py | 3 + .../riformer-m36-deploy_8xb64_in1k-384px.py | 3 + .../riformer-m48-deploy_8xb64_in1k-384px.py | 3 + .../deploy/riformer-m48-deploy_8xb64_in1k.py | 3 + .../riformer-s12-deploy_8xb128_in1k-384px.py | 3 + .../deploy/riformer-s12-deploy_8xb128_in1k.py | 3 + .../riformer-s24-deploy_8xb128_in1k-384px.py | 3 + .../deploy/riformer-s24-deploy_8xb128_in1k.py | 3 + .../deploy/riformer-s36-deploy_8xb128_in1k.py | 3 + .../riformer-s36-deploy_8xb64_in1k-384px.py | 3 + configs/riformer/metafile.yml | 152 + configs/riformer/riformer-m36_8xb128_in1k.py | 39 + .../riformer/riformer-m36_8xb64_in1k-384px.py | 39 + .../riformer/riformer-m48_8xb64_in1k-384px.py | 39 + configs/riformer/riformer-m48_8xb64_in1k.py | 39 + .../riformer-s12_8xb128_in1k-384px.py | 39 + configs/riformer/riformer-s12_8xb128_in1k.py | 39 + .../riformer-s24_8xb128_in1k-384px.py | 39 + configs/riformer/riformer-s24_8xb128_in1k.py | 39 + configs/riformer/riformer-s36_8xb128_in1k.py | 39 + .../riformer/riformer-s36_8xb64_in1k-384px.py | 39 + configs/sam/README.md | 57 + configs/sam/metafile.yml | 61 + configs/sam/vit-base-p16_sam_headless.py | 24 + configs/sam/vit-huge-p16_sam_headless.py | 24 + configs/sam/vit-large-p16_sam_headless.py | 24 + configs/seresnet/README.md | 81 + configs/seresnet/metafile.yml | 47 + configs/seresnet/seresnet101_8xb32_in1k.py | 5 + configs/seresnet/seresnet50_8xb32_in1k.py | 6 + .../seresnet/seresnext101-32x4d_8xb32_in1k.py | 5 + .../seresnet/seresnext50-32x4d_8xb32_in1k.py | 5 + configs/shufflenet_v1/README.md | 80 + configs/shufflenet_v1/metafile.yml | 35 + .../shufflenet-v1-1x_16xb64_in1k.py | 6 + configs/shufflenet_v2/README.md | 80 + configs/shufflenet_v2/metafile.yml | 35 + .../shufflenet-v2-1x_16xb64_in1k.py | 6 + configs/simclr/README.md | 87 + .../resnet50_8xb512-linear-coslr-90e_in1k.py | 18 + configs/simclr/metafile.yml | 72 + ...simclr_resnet50_16xb256-coslr-200e_in1k.py | 46 + ...simclr_resnet50_16xb256-coslr-800e_in1k.py | 57 + .../simclr_resnet50_8xb32-coslr-200e_in1k.py | 47 + configs/simmim/README.md | 90 + ...in-base-w6_8xb256-coslr-100e_in1k-192px.py | 59 + .../swin-base-w7_8xb256-coslr-100e_in1k.py | 102 + .../swin-large-w14_8xb256-coslr-100e_in1k.py | 105 + configs/simmim/metafile.yml | 115 + ...se-w6_16xb128-amp-coslr-100e_in1k-192px.py | 4 + ...se-w6_16xb128-amp-coslr-800e_in1k-192px.py | 64 + ...ase-w6_8xb256-amp-coslr-100e_in1k-192px.py | 65 + ...e-w12_16xb128-amp-coslr-800e_in1k-192px.py | 65 + configs/simsiam/README.md | 87 + .../resnet50_8xb512-linear-coslr-90e_in1k.py | 18 + configs/simsiam/metafile.yml | 72 + .../simsiam_resnet50_8xb32-coslr-100e_in1k.py | 58 + .../simsiam_resnet50_8xb32-coslr-200e_in1k.py | 52 + configs/spark/README.md | 87 + .../convnextv2-tiny_8xb256-coslr-300e_in1k.py | 122 + .../resnet50_8xb256-coslr-300e_in1k.py | 107 + configs/spark/metafile.yml | 73 + ...vnext-small_16xb256-amp-coslr-800e_in1k.py | 81 + ...nextv2-tiny_16xb256-amp-coslr-800e_in1k.py | 84 + ...se-resnet50_8xb512-amp-coslr-1600e_in1k.py | 30 + ...rse-resnet50_8xb512-amp-coslr-800e_in1k.py | 80 + configs/swav/README.md | 85 + .../resnet50_8xb512-linear-coslr-90e_in1k.py | 18 + configs/swav/metafile.yml | 44 + ..._8xb32-mcrop-coslr-200e_in1k-224px-96px.py | 159 + configs/swin_transformer/README.md | 111 + configs/swin_transformer/metafile.yml | 201 + .../swin-base_16xb64_in1k-384px.py | 9 + .../swin_transformer/swin-base_16xb64_in1k.py | 9 + .../swin-large_16xb64_in1k-384px.py | 9 + .../swin-large_16xb64_in1k.py | 9 + .../swin-large_8xb8_cub-384px.py | 40 + .../swin-small_16xb64_in1k.py | 9 + .../swin_transformer/swin-tiny_16xb64_in1k.py | 9 + configs/swin_transformer_v2/README.md | 121 + configs/swin_transformer_v2/metafile.yml | 206 + .../swinv2-base-w12_8xb128_in21k-192px.py | 19 + .../swinv2-base-w16_16xb64_in1k-256px.py | 8 + ...v2-base-w16_in21k-pre_16xb64_in1k-256px.py | 13 + ...v2-base-w24_in21k-pre_16xb64_in1k-384px.py | 14 + .../swinv2-base-w8_16xb64_in1k-256px.py | 6 + .../swinv2-large-w12_8xb128_in21k-192px.py | 19 + ...2-large-w16_in21k-pre_16xb64_in1k-256px.py | 13 + ...2-large-w24_in21k-pre_16xb64_in1k-384px.py | 15 + .../swinv2-small-w16_16xb64_in1k-256px.py | 8 + .../swinv2-small-w8_16xb64_in1k-256px.py | 6 + .../swinv2-tiny-w16_16xb64_in1k-256px.py | 8 + .../swinv2-tiny-w8_16xb64_in1k-256px.py | 6 + configs/t2t_vit/README.md | 81 + configs/t2t_vit/metafile.yml | 58 + configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py | 49 + configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py | 49 + configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py | 49 + configs/tinyvit/README.md | 82 + configs/tinyvit/metafile.yml | 162 + .../tinyvit-11m-distill_8xb256_in1k.py | 3 + configs/tinyvit/tinyvit-11m_8xb256_in1k.py | 6 + .../tinyvit-21m-distill_8xb256_in1k-384px.py | 29 + .../tinyvit-21m-distill_8xb256_in1k-512px.py | 28 + .../tinyvit-21m-distill_8xb256_in1k.py | 3 + configs/tinyvit/tinyvit-21m_8xb256_in1k.py | 6 + .../tinyvit/tinyvit-5m-distill_8xb256_in1k.py | 3 + configs/tinyvit/tinyvit-5m_8xb256_in1k.py | 6 + configs/tnt/README.md | 77 + configs/tnt/metafile.yml | 29 + configs/tnt/tnt-s-p16_16xb64_in1k.py | 56 + configs/twins/README.md | 80 + configs/twins/metafile.yml | 114 + configs/twins/twins-pcpvt-base_8xb128_in1k.py | 41 + .../twins/twins-pcpvt-large_16xb64_in1k.py | 7 + .../twins/twins-pcpvt-small_8xb128_in1k.py | 4 + configs/twins/twins-svt-base_8xb128_in1k.py | 41 + configs/twins/twins-svt-large_16xb64_in1k.py | 7 + configs/twins/twins-svt-small_8xb128_in1k.py | 4 + configs/van/README.md | 78 + configs/van/metafile.yml | 82 + configs/van/van-base_8xb128_in1k.py | 65 + configs/van/van-large_8xb128_in1k.py | 65 + configs/van/van-small_8xb128_in1k.py | 65 + configs/van/van-tiny_8xb128_in1k.py | 65 + configs/vgg/README.md | 86 + configs/vgg/metafile.yml | 125 + configs/vgg/vgg11_8xb32_in1k.py | 9 + configs/vgg/vgg11bn_8xb32_in1k.py | 6 + configs/vgg/vgg13_8xb32_in1k.py | 9 + configs/vgg/vgg13bn_8xb32_in1k.py | 6 + configs/vgg/vgg16_8xb16_voc.py | 43 + configs/vgg/vgg16_8xb32_in1k.py | 9 + configs/vgg/vgg16bn_8xb32_in1k.py | 6 + configs/vgg/vgg19_8xb32_in1k.py | 9 + configs/vgg/vgg19bn_8xb32_in1k.py | 6 + configs/vig/README.md | 81 + configs/vig/metafile.yml | 134 + configs/vig/pvig-base_8xb128_in1k.py | 22 + configs/vig/pvig-medium_8xb128_in1k.py | 6 + configs/vig/pvig-small_8xb128_in1k.py | 6 + configs/vig/pvig-tiny_8xb128_in1k.py | 6 + configs/vig/vig-base_8xb128_in1k.py | 6 + configs/vig/vig-small_8xb128_in1k.py | 6 + configs/vig/vig-tiny_8xb128_in1k.py | 6 + configs/vision_transformer/README.md | 101 + configs/vision_transformer/metafile.yml | 95 + .../vit-base-p16_32xb128-mae_in1k.py | 58 + .../vit-base-p16_4xb544-ipu_in1k.py | 114 + .../vit-base-p16_64xb64_in1k-384px.py | 38 + .../vit-base-p16_64xb64_in1k.py | 15 + .../vit-base-p16_8xb64-lora_in1k-384px.py | 84 + .../vit-base-p32_64xb64_in1k-384px.py | 38 + .../vit-base-p32_64xb64_in1k.py | 15 + .../vit-large-p16_64xb64_in1k-384px.py | 38 + .../vit-large-p16_64xb64_in1k.py | 15 + .../vit-large-p32_64xb64_in1k-384px.py | 38 + .../vit-large-p32_64xb64_in1k.py | 15 + configs/wrn/README.md | 76 + configs/wrn/metafile.yml | 77 + configs/wrn/wide-resnet101_8xb32_in1k.py | 7 + configs/wrn/wide-resnet50_8xb32_in1k.py | 5 + configs/wrn/wide-resnet50_timm_8xb32_in1k.py | 5 + configs/xcit/README.md | 106 + configs/xcit/metafile.yml | 727 ++ .../xcit-large-24-p16_8xb128_in1k-384px.py | 34 + configs/xcit/xcit-large-24-p16_8xb128_in1k.py | 34 + .../xcit-large-24-p8_8xb128_in1k-384px.py | 34 + configs/xcit/xcit-large-24-p8_8xb128_in1k.py | 34 + .../xcit-medium-24-p16_8xb128_in1k-384px.py | 34 + .../xcit/xcit-medium-24-p16_8xb128_in1k.py | 34 + .../xcit-medium-24-p8_8xb128_in1k-384px.py | 34 + configs/xcit/xcit-medium-24-p8_8xb128_in1k.py | 34 + .../xcit-nano-12-p16_8xb128_in1k-384px.py | 34 + configs/xcit/xcit-nano-12-p16_8xb128_in1k.py | 34 + .../xcit/xcit-nano-12-p8_8xb128_in1k-384px.py | 34 + configs/xcit/xcit-nano-12-p8_8xb128_in1k.py | 34 + .../xcit-small-12-p16_8xb128_in1k-384px.py | 34 + configs/xcit/xcit-small-12-p16_8xb128_in1k.py | 34 + .../xcit-small-12-p8_8xb128_in1k-384px.py | 34 + configs/xcit/xcit-small-12-p8_8xb128_in1k.py | 34 + .../xcit-small-24-p16_8xb128_in1k-384px.py | 34 + configs/xcit/xcit-small-24-p16_8xb128_in1k.py | 34 + .../xcit-small-24-p8_8xb128_in1k-384px.py | 34 + configs/xcit/xcit-small-24-p8_8xb128_in1k.py | 34 + .../xcit-tiny-12-p16_8xb128_in1k-384px.py | 34 + configs/xcit/xcit-tiny-12-p16_8xb128_in1k.py | 34 + .../xcit/xcit-tiny-12-p8_8xb128_in1k-384px.py | 34 + configs/xcit/xcit-tiny-12-p8_8xb128_in1k.py | 34 + .../xcit-tiny-24-p16_8xb128_in1k-384px.py | 34 + configs/xcit/xcit-tiny-24-p16_8xb128_in1k.py | 34 + .../xcit/xcit-tiny-24-p8_8xb128_in1k-384px.py | 34 + configs/xcit/xcit-tiny-24-p8_8xb128_in1k.py | 34 + dataset-index.yml | 11 + demo/bird.JPEG | Bin 0 -> 74237 bytes demo/cat-dog.png | Bin 0 -> 744894 bytes demo/demo.JPEG | Bin 0 -> 109527 bytes demo/dog.jpg | Bin 0 -> 26160 bytes demo/image_demo.py | 44 + demo/ipu_train_example.sh | 9 + docker/Dockerfile | 26 + docker/serve/Dockerfile | 37 + docker/serve/config.properties | 5 + docker/serve/entrypoint.sh | 12 + docs/en/Makefile | 20 + docs/en/_static/css/readthedocs.css | 62 + docs/en/_static/image/confusion-matrix.png | Bin 0 -> 51804 bytes docs/en/_static/image/mmpt-logo.png | Bin 0 -> 28982 bytes .../image/tools/analysis/analyze_log.jpg | Bin 0 -> 68146 bytes docs/en/_static/js/custom.js | 10 + docs/en/_templates/404.html | 18 + docs/en/_templates/autosummary/class.rst | 13 + docs/en/_templates/callable.rst | 14 + docs/en/_templates/data_transform.rst | 13 + docs/en/advanced_guides/convention.md | 120 + docs/en/advanced_guides/datasets.md | 72 + docs/en/advanced_guides/evaluation.md | 103 + docs/en/advanced_guides/modules.md | 511 + docs/en/advanced_guides/pipeline.md | 170 + docs/en/advanced_guides/runtime.md | 221 + docs/en/advanced_guides/schedule.md | 361 + docs/en/api/apis.rst | 48 + docs/en/api/data_process.rst | 329 + docs/en/api/datasets.rst | 129 + docs/en/api/engine.rst | 51 + docs/en/api/evaluation.rst | 47 + docs/en/api/models.rst | 364 + docs/en/api/structures.rst | 13 + docs/en/api/utils.rst | 19 + docs/en/api/visualization.rst | 14 + docs/en/conf.py | 248 + docs/en/device/npu.md | 47 + docs/en/docutils.conf | 2 + docs/en/get_started.md | 164 + docs/en/index.rst | 157 + docs/en/migration.md | 772 ++ docs/en/notes/changelog.md | 1055 ++ docs/en/notes/contribution_guide.md | 1 + docs/en/notes/faq.md | 116 + docs/en/notes/finetune_custom_dataset.md | 340 + docs/en/notes/pretrain_custom_dataset.md | 255 + docs/en/notes/projects.md | 21 + docs/en/stat.py | 249 + docs/en/useful_tools/cam_visualization.md | 164 + docs/en/useful_tools/complexity_analysis.md | 77 + docs/en/useful_tools/confusion_matrix.md | 84 + docs/en/useful_tools/dataset_visualization.md | 90 + docs/en/useful_tools/log_result_analysis.md | 226 + docs/en/useful_tools/model_serving.md | 88 + docs/en/useful_tools/print_config.md | 27 + .../useful_tools/scheduler_visualization.md | 44 + docs/en/useful_tools/shape_bias.md | 100 + docs/en/useful_tools/t-sne_visualization.md | 85 + docs/en/useful_tools/verify_dataset.md | 28 + docs/en/user_guides/config.md | 421 + docs/en/user_guides/dataset_prepare.md | 364 + docs/en/user_guides/downstream.md | 128 + docs/en/user_guides/inference.md | 179 + docs/en/user_guides/test.md | 123 + docs/en/user_guides/train.md | 121 + docs/zh_CN/Makefile | 20 + docs/zh_CN/_static/css/readthedocs.css | 61 + docs/zh_CN/_static/image/confusion-matrix.png | 1 + docs/zh_CN/_static/image/mmpt-logo.png | Bin 0 -> 28982 bytes .../image/tools/analysis/analyze_log.jpg | Bin 0 -> 68146 bytes docs/zh_CN/_static/js/custom.js | 20 + docs/zh_CN/_templates/404.html | 16 + docs/zh_CN/_templates/autosummary/class.rst | 13 + docs/zh_CN/_templates/callable.rst | 14 + docs/zh_CN/_templates/data_transform.rst | 13 + docs/zh_CN/advanced_guides/convention.md | 114 + docs/zh_CN/advanced_guides/datasets.md | 73 + docs/zh_CN/advanced_guides/evaluation.md | 97 + docs/zh_CN/advanced_guides/modules.md | 512 + docs/zh_CN/advanced_guides/pipeline.md | 148 + docs/zh_CN/advanced_guides/runtime.md | 213 + docs/zh_CN/advanced_guides/schedule.md | 359 + docs/zh_CN/api | 1 + docs/zh_CN/conf.py | 253 + docs/zh_CN/device/npu.md | 41 + docs/zh_CN/docutils.conf | 2 + docs/zh_CN/get_started.md | 163 + docs/zh_CN/index.rst | 150 + docs/zh_CN/locales/zh_CN/LC_MESSAGES/api.po | 9090 +++++++++++++++++ .../zh_CN/locales/zh_CN/LC_MESSAGES/papers.po | 8971 ++++++++++++++++ docs/zh_CN/migration.md | 732 ++ docs/zh_CN/notes/changelog.md | 1 + docs/zh_CN/notes/contribution_guide.md | 62 + docs/zh_CN/notes/faq.md | 101 + docs/zh_CN/notes/finetune_custom_dataset.md | 328 + docs/zh_CN/notes/pretrain_custom_dataset.md | 247 + docs/zh_CN/notes/projects.md | 1 + docs/zh_CN/stat.py | 249 + docs/zh_CN/useful_tools/cam_visualization.md | 164 + .../zh_CN/useful_tools/complexity_analysis.md | 80 + docs/zh_CN/useful_tools/confusion_matrix.md | 83 + .../useful_tools/dataset_visualization.md | 90 + .../zh_CN/useful_tools/log_result_analysis.md | 223 + docs/zh_CN/useful_tools/model_serving.md | 88 + docs/zh_CN/useful_tools/print_config.md | 28 + .../useful_tools/scheduler_visualization.md | 44 + docs/zh_CN/useful_tools/shape_bias.md | 96 + .../zh_CN/useful_tools/t-sne_visualization.md | 85 + docs/zh_CN/useful_tools/verify_dataset.md | 28 + docs/zh_CN/user_guides/config.md | 412 + docs/zh_CN/user_guides/dataset_prepare.md | 351 + docs/zh_CN/user_guides/downstream.md | 125 + docs/zh_CN/user_guides/inference.md | 176 + docs/zh_CN/user_guides/test.md | 117 + docs/zh_CN/user_guides/train.md | 118 + inception-v3_8xb32_in1k.py | 46 + mmpretrain.egg-info/PKG-INFO | 399 + mmpretrain.egg-info/SOURCES.txt | 371 + .../dependency_links.txt | 0 mmpretrain.egg-info/not-zip-safe | 1 + mmpretrain.egg-info/requires.txt | 42 + mmpretrain.egg-info/top_level.txt | 1 + mmpretrain/__init__.py | 28 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 787 bytes .../__pycache__/registry.cpython-310.pyc | Bin 0 -> 2738 bytes .../__pycache__/version.cpython-310.pyc | Bin 0 -> 807 bytes mmpretrain/apis/__init__.py | 22 + .../apis/__pycache__/__init__.cpython-310.pyc | Bin 0 -> 967 bytes .../apis/__pycache__/base.cpython-310.pyc | Bin 0 -> 13841 bytes .../feature_extractor.cpython-310.pyc | Bin 0 -> 5475 bytes .../__pycache__/image_caption.cpython-310.pyc | Bin 0 -> 6223 bytes .../image_classification.cpython-310.pyc | Bin 0 -> 8164 bytes .../image_retrieval.cpython-310.pyc | Bin 0 -> 9964 bytes .../apis/__pycache__/model.cpython-310.pyc | Bin 0 -> 13966 bytes .../multimodal_retrieval.cpython-310.pyc | Bin 0 -> 19337 bytes .../apis/__pycache__/nlvr.cpython-310.pyc | Bin 0 -> 5788 bytes .../visual_grounding.cpython-310.pyc | Bin 0 -> 6827 bytes .../visual_question_answering.cpython-310.pyc | Bin 0 -> 6927 bytes mmpretrain/apis/base.py | 390 + mmpretrain/apis/feature_extractor.py | 130 + mmpretrain/apis/image_caption.py | 166 + mmpretrain/apis/image_classification.py | 223 + mmpretrain/apis/image_retrieval.py | 288 + mmpretrain/apis/model.py | 408 + mmpretrain/apis/multimodal_retrieval.py | 603 ++ mmpretrain/apis/nlvr.py | 150 + mmpretrain/apis/utils.py | 270 + mmpretrain/apis/visual_grounding.py | 182 + mmpretrain/apis/visual_question_answering.py | 183 + .../configs/_base_/datasets/cifar10_bs16.py | 52 + .../configs/_base_/datasets/cub_bs8_384.py | 59 + .../_base_/datasets/imagenet21k_bs128.py | 35 + .../_base_/datasets/imagenet_bs128_mbv3.py | 75 + .../_base_/datasets/imagenet_bs256_beitv2.py | 53 + .../configs/_base_/datasets/imagenet_bs32.py | 62 + .../datasets/imagenet_bs32_pil_resize.py | 60 + .../_base_/datasets/imagenet_bs32_simclr.py | 63 + .../_base_/datasets/imagenet_bs512_mae.py | 40 + .../datasets/imagenet_bs64_pil_resize.py | 60 + .../imagenet_bs64_pil_resize_autoaug.py | 78 + .../_base_/datasets/imagenet_bs64_swin_224.py | 89 + .../_base_/datasets/imagenet_bs64_swin_256.py | 89 + .../_base_/datasets/imagenet_bs64_swin_384.py | 64 + mmpretrain/configs/_base_/default_runtime.py | 61 + .../configs/_base_/models/convnext_base.py | 25 + .../_base_/models/mae_hivit_base_p16.py | 28 + .../configs/_base_/models/mae_vit_base_p16.py | 28 + .../configs/_base_/models/mobilenet_v2_1x.py | 17 + .../_base_/models/mobilenet_v3_small.py | 25 + mmpretrain/configs/_base_/models/resnet18.py | 22 + .../_base_/models/swin_transformer_base.py | 20 + .../_base_/models/swin_transformer_v2_base.py | 19 + .../configs/_base_/models/vit_base_p16.py | 31 + .../configs/_base_/schedules/cifar10_bs128.py | 20 + .../configs/_base_/schedules/cub_bs64.py | 39 + .../schedules/imagenet_bs1024_adamw_swin.py | 46 + .../_base_/schedules/imagenet_bs256.py | 21 + .../schedules/imagenet_bs256_epochstep.py | 20 + .../_base_/schedules/imagenet_bs4096_adamw.py | 44 + .../schedules/imagenet_lars_coslr_200e.py | 27 + ...eit_base_p16_8xb256_amp_coslr_300e_in1k.py | 146 + .../beit-base-p16_8xb128-coslr-100e_in1k.py | 139 + .../benchmarks/beit-base-p16_8xb64_in1k.py | 50 + ...it-base-p16_8xb256-amp-coslr-1600e_in1k.py | 130 + ...eit-base-p16_8xb256-amp-coslr-300e_in1k.py | 130 + .../beit-base-p16_8xb128-coslr-100e_in1k.py | 132 + .../benchmarks/beit-base-p16_8xb64_in1k.py | 42 + .../convnext/convnext-base_32xb128_in1k.py | 28 + .../convnext/convnext-base_32xb128_in21k.py | 27 + .../convnext-large_64xb64_in1k-384px.py | 27 + .../convnext/convnext-large_64xb64_in1k.py | 27 + .../convnext/convnext-large_64xb64_in21k.py | 26 + .../convnext-small_32xb128_in1k-384px.py | 27 + .../convnext/convnext-small_32xb128_in1k.py | 27 + .../convnext-tiny_32xb128_in1k-384px.py | 27 + .../convnext/convnext-tiny_32xb128_in1k.py | 27 + .../convnext-xlarge_64xb64_in1k-384px.py | 27 + .../convnext/convnext-xlarge_64xb64_in1k.py | 27 + .../convnext/convnext-xlarge_64xb64_in21k.py | 28 + .../convnext_base_32xb128_in1k_384px.py | 28 + ...le_vit_base_p16_16xb256_coslr_400e_in1k.py | 92 + ...it_base_p16_8xb512_amp_coslr_1600e_in1k.py | 65 + ...vit_base_p16_8xb512_amp_coslr_400e_in1k.py | 65 + ...vit_base_p16_8xb512_amp_coslr_800e_in1k.py | 65 + ...t_large_p16_8xb512_amp_coslr_1600e_in1k.py | 70 + ...it_large_p16_8xb512_amp_coslr_400e_in1k.py | 70 + ...it_large_p16_8xb512_amp_coslr_800e_in1k.py | 70 + ...it_base_p16_8xb512_amp_coslr_1600e_in1k.py | 65 + ...vit_base_p16_8xb512_amp_coslr_300e_in1k.py | 65 + ...vit_base_p16_8xb512_amp_coslr_400e_in1k.py | 65 + ...vit_base_p16_8xb512_amp_coslr_800e_in1k.py | 65 + ...it_huge_p14_8xb512_amp_coslr_1600e_in1k.py | 75 + ...t_large_p16_8xb512_amp_coslr_1600e_in1k.py | 70 + ...it_large_p16_8xb512_amp_coslr_300e_in1k.py | 70 + ...it_large_p16_8xb512_amp_coslr_400e_in1k.py | 70 + ...it_large_p16_8xb512_amp_coslr_800e_in1k.py | 70 + .../mobilenet_v2/mobilenet_v2_8xb32_in1k.py | 9 + .../mobilenet_v3_large_8xb128_in1k.py | 40 + .../mobilenet_v3_small_050_8xb128_in1k.py | 85 + .../mobilenet_v3_small_075_8xb128_in1k.py | 83 + .../mobilenet_v3_small_8xb128_in1k.py | 34 + .../mobilenet_v3_small_8xb16_cifar10.py | 34 + .../configs/resnet/resnet18_8xb32_in1k.py | 9 + ...simclr_resnet50_16xb256_coslr_200e_in1k.py | 58 + .../swin_transformer/swin_base_16xb64_in1k.py | 35 + .../swin_base_16xb64_in1k_384px.py | 12 + .../swin_large_16xb64_in1k.py | 18 + .../swin_large_16xb64_in1k_384px.py | 18 + .../swin_large_8xb8_cub_384px.py | 49 + .../swin_small_16xb64_in1k.py | 37 + .../swin_transformer/swin_tiny_16xb64_in1k.py | 37 + .../swinv2_base_w12_8xb128_in21k_192px.py | 32 + .../swinv2_base_w16_16xb64_in1k_256px.py | 24 + ...v2_base_w16_in21k_pre_16xb64_in1k_256px.py | 26 + ...v2_base_w24_in21k_pre_16xb64_in1k_384px.py | 14 + .../swinv2_base_w8_16xb64_in1k_256px.py | 23 + .../swinv2_large_w12_8xb128_in21k_192px.py | 32 + ...2_large_w16_in21k_pre_16xb64_in1k_256px.py | 24 + ...2_large_w24_in21k_pre_16xb64_in1k_384px.py | 24 + .../swinv2_small_w16_16xb64_in1k_256px.py | 28 + .../swinv2_small_w8_16xb64_in1k_256px.py | 24 + .../swinv2_tiny_w16_16xb64_in1k_256px.py | 28 + .../swinv2_tiny_w8_16xb64_in1k_256px.py | 24 + .../vit_base_p16_32xb128_mae_in1k.py | 52 + .../vit_base_p16_64xb64_in1k.py | 20 + .../vit_base_p16_64xb64_in1k_384px.py | 44 + .../vit_base_p32_64xb64_in1k.py | 26 + .../vit_base_p32_64xb64_in1k_384px.py | 48 + .../vit_large_p16_64xb64_in1k.py | 27 + .../vit_large_p16_64xb64_in1k_384px.py | 49 + .../vit_large_p32_64xb64_in1k.py | 27 + .../vit_large_p32_64xb64_in1k_384px.py | 49 + mmpretrain/datasets/__init__.py | 62 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 2383 bytes .../__pycache__/base_dataset.cpython-310.pyc | Bin 0 -> 8339 bytes .../__pycache__/builder.cpython-310.pyc | Bin 0 -> 1008 bytes .../__pycache__/caltech101.cpython-310.pyc | Bin 0 -> 3901 bytes .../__pycache__/categories.cpython-310.pyc | Bin 0 -> 56316 bytes .../__pycache__/cifar.cpython-310.pyc | Bin 0 -> 6766 bytes .../datasets/__pycache__/cub.cpython-310.pyc | Bin 0 -> 4482 bytes .../__pycache__/custom.cpython-310.pyc | Bin 0 -> 9018 bytes .../dataset_wrappers.cpython-310.pyc | Bin 0 -> 5755 bytes .../datasets/__pycache__/dtd.cpython-310.pyc | Bin 0 -> 3656 bytes .../__pycache__/fgvcaircraft.cpython-310.pyc | Bin 0 -> 3586 bytes .../__pycache__/flowers102.cpython-310.pyc | Bin 0 -> 3346 bytes .../__pycache__/food101.cpython-310.pyc | Bin 0 -> 3430 bytes .../__pycache__/imagenet.cpython-310.pyc | Bin 0 -> 7370 bytes .../__pycache__/inshop.cpython-310.pyc | Bin 0 -> 4921 bytes .../__pycache__/mnist.cpython-310.pyc | Bin 0 -> 7956 bytes .../__pycache__/multi_label.cpython-310.pyc | Bin 0 -> 3586 bytes .../__pycache__/multi_task.cpython-310.pyc | Bin 0 -> 10726 bytes .../__pycache__/nlvr2.cpython-310.pyc | Bin 0 -> 1360 bytes .../__pycache__/oxfordiiitpet.cpython-310.pyc | Bin 0 -> 3426 bytes .../__pycache__/places205.cpython-310.pyc | Bin 0 -> 1672 bytes .../__pycache__/stanfordcars.cpython-310.pyc | Bin 0 -> 4612 bytes .../__pycache__/sun397.cpython-310.pyc | Bin 0 -> 4412 bytes .../__pycache__/utils.cpython-310.pyc | Bin 0 -> 6945 bytes .../datasets/__pycache__/voc.cpython-310.pyc | Bin 0 -> 6438 bytes mmpretrain/datasets/base_dataset.py | 219 + mmpretrain/datasets/builder.py | 25 + mmpretrain/datasets/caltech101.py | 113 + mmpretrain/datasets/categories.py | 1661 +++ mmpretrain/datasets/cifar.py | 210 + mmpretrain/datasets/coco_caption.py | 42 + mmpretrain/datasets/coco_retrieval.py | 148 + mmpretrain/datasets/coco_vqa.py | 114 + mmpretrain/datasets/cub.py | 142 + mmpretrain/datasets/custom.py | 287 + mmpretrain/datasets/dataset_wrappers.py | 176 + mmpretrain/datasets/dtd.py | 116 + mmpretrain/datasets/fgvcaircraft.py | 98 + mmpretrain/datasets/flamingo.py | 295 + mmpretrain/datasets/flickr30k_caption.py | 77 + mmpretrain/datasets/flickr30k_retrieval.py | 110 + mmpretrain/datasets/flowers102.py | 104 + mmpretrain/datasets/food101.py | 102 + mmpretrain/datasets/gqa_dataset.py | 70 + mmpretrain/datasets/iconqa.py | 63 + mmpretrain/datasets/imagenet.py | 235 + mmpretrain/datasets/infographic_vqa.py | 61 + mmpretrain/datasets/inshop.py | 157 + mmpretrain/datasets/minigpt4_dataset.py | 79 + mmpretrain/datasets/mnist.py | 234 + mmpretrain/datasets/multi_label.py | 85 + mmpretrain/datasets/multi_task.py | 337 + mmpretrain/datasets/nlvr2.py | 36 + mmpretrain/datasets/nocaps.py | 46 + mmpretrain/datasets/ocr_vqa.py | 91 + mmpretrain/datasets/oxfordiiitpet.py | 97 + mmpretrain/datasets/places205.py | 40 + mmpretrain/datasets/refcoco.py | 112 + mmpretrain/datasets/samplers/__init__.py | 5 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 282 bytes .../__pycache__/repeat_aug.cpython-310.pyc | Bin 0 -> 3660 bytes .../__pycache__/sequential.cpython-310.pyc | Bin 0 -> 2210 bytes mmpretrain/datasets/samplers/repeat_aug.py | 101 + mmpretrain/datasets/samplers/sequential.py | 56 + mmpretrain/datasets/scienceqa.py | 109 + mmpretrain/datasets/stanfordcars.py | 148 + mmpretrain/datasets/sun397.py | 125 + mmpretrain/datasets/textvqa.py | 105 + mmpretrain/datasets/transforms/__init__.py | 41 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 1874 bytes .../__pycache__/auto_augment.cpython-310.pyc | Bin 0 -> 39651 bytes .../__pycache__/formatting.cpython-310.pyc | Bin 0 -> 11204 bytes .../__pycache__/processing.cpython-310.pyc | Bin 0 -> 55705 bytes .../__pycache__/utils.cpython-310.pyc | Bin 0 -> 1515 bytes .../__pycache__/wrappers.cpython-310.pyc | Bin 0 -> 5398 bytes .../datasets/transforms/auto_augment.py | 1244 +++ mmpretrain/datasets/transforms/formatting.py | 353 + mmpretrain/datasets/transforms/processing.py | 1795 ++++ mmpretrain/datasets/transforms/utils.py | 53 + mmpretrain/datasets/transforms/wrappers.py | 144 + mmpretrain/datasets/utils.py | 243 + mmpretrain/datasets/vg_vqa.py | 77 + mmpretrain/datasets/visual_genome.py | 95 + mmpretrain/datasets/vizwiz.py | 112 + mmpretrain/datasets/voc.py | 195 + mmpretrain/datasets/vsr.py | 55 + mmpretrain/engine/__init__.py | 5 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 224 bytes mmpretrain/engine/hooks/__init__.py | 19 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 823 bytes .../class_num_check_hook.cpython-310.pyc | Bin 0 -> 2466 bytes .../__pycache__/densecl_hook.cpython-310.pyc | Bin 0 -> 1778 bytes .../__pycache__/ema_hook.cpython-310.pyc | Bin 0 -> 7332 bytes .../margin_head_hooks.cpython-310.pyc | Bin 0 -> 2370 bytes .../precise_bn_hook.cpython-310.pyc | Bin 0 -> 7698 bytes .../retriever_hooks.cpython-310.pyc | Bin 0 -> 1303 bytes .../__pycache__/simsiam_hook.cpython-310.pyc | Bin 0 -> 1823 bytes .../__pycache__/swav_hook.cpython-310.pyc | Bin 0 -> 4240 bytes .../switch_recipe_hook.cpython-310.pyc | Bin 0 -> 5956 bytes .../visualization_hook.cpython-310.pyc | Bin 0 -> 4653 bytes .../warmup_param_hook.cpython-310.pyc | Bin 0 -> 2651 bytes .../engine/hooks/class_num_check_hook.py | 63 + mmpretrain/engine/hooks/densecl_hook.py | 42 + mmpretrain/engine/hooks/ema_hook.py | 216 + mmpretrain/engine/hooks/margin_head_hooks.py | 61 + mmpretrain/engine/hooks/precise_bn_hook.py | 223 + mmpretrain/engine/hooks/retriever_hooks.py | 32 + mmpretrain/engine/hooks/simsiam_hook.py | 48 + mmpretrain/engine/hooks/swav_hook.py | 119 + mmpretrain/engine/hooks/switch_recipe_hook.py | 169 + mmpretrain/engine/hooks/visualization_hook.py | 126 + mmpretrain/engine/hooks/warmup_param_hook.py | 66 + mmpretrain/engine/optimizers/__init__.py | 8 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 394 bytes .../__pycache__/adan_t.cpython-310.pyc | Bin 0 -> 6798 bytes .../__pycache__/lamb.cpython-310.pyc | Bin 0 -> 5134 bytes .../__pycache__/lars.cpython-310.pyc | Bin 0 -> 3830 bytes ..._optim_wrapper_constructor.cpython-310.pyc | Bin 0 -> 5756 bytes mmpretrain/engine/optimizers/adan_t.py | 312 + mmpretrain/engine/optimizers/lamb.py | 228 + mmpretrain/engine/optimizers/lars.py | 130 + .../layer_decay_optim_wrapper_constructor.py | 166 + mmpretrain/engine/runners/__init__.py | 4 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 259 bytes .../retrieval_loop.cpython-310.pyc | Bin 0 -> 4102 bytes mmpretrain/engine/runners/retrieval_loop.py | 168 + mmpretrain/engine/schedulers/__init__.py | 4 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 245 bytes .../weight_decay_scheduler.cpython-310.pyc | Bin 0 -> 2360 bytes .../schedulers/weight_decay_scheduler.py | 64 + mmpretrain/evaluation/__init__.py | 3 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 189 bytes mmpretrain/evaluation/functional/__init__.py | 1 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 154 bytes mmpretrain/evaluation/metrics/ANLS.py | 103 + mmpretrain/evaluation/metrics/__init__.py | 22 + .../metrics/__pycache__/ANLS.cpython-310.pyc | Bin 0 -> 3696 bytes .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 1041 bytes .../__pycache__/caption.cpython-310.pyc | Bin 0 -> 4212 bytes .../metrics/__pycache__/gqa.cpython-310.pyc | Bin 0 -> 3015 bytes .../__pycache__/multi_label.cpython-310.pyc | Bin 0 -> 21484 bytes .../__pycache__/multi_task.cpython-310.pyc | Bin 0 -> 4486 bytes .../__pycache__/nocaps.cpython-310.pyc | Bin 0 -> 2302 bytes .../__pycache__/retrieval.cpython-310.pyc | Bin 0 -> 16671 bytes .../__pycache__/scienceqa.cpython-310.pyc | Bin 0 -> 5438 bytes .../shape_bias_label.cpython-310.pyc | Bin 0 -> 6344 bytes .../__pycache__/single_label.cpython-310.pyc | Bin 0 -> 26291 bytes .../visual_grounding_eval.cpython-310.pyc | Bin 0 -> 3426 bytes .../voc_multi_label.cpython-310.pyc | Bin 0 -> 3870 bytes .../metrics/__pycache__/vqa.cpython-310.pyc | Bin 0 -> 9501 bytes mmpretrain/evaluation/metrics/caption.py | 136 + mmpretrain/evaluation/metrics/gqa.py | 78 + mmpretrain/evaluation/metrics/multi_label.py | 599 ++ mmpretrain/evaluation/metrics/multi_task.py | 120 + mmpretrain/evaluation/metrics/nocaps.py | 59 + mmpretrain/evaluation/metrics/retrieval.py | 445 + mmpretrain/evaluation/metrics/scienceqa.py | 170 + .../evaluation/metrics/shape_bias_label.py | 172 + mmpretrain/evaluation/metrics/single_label.py | 776 ++ .../metrics/visual_grounding_eval.py | 85 + .../evaluation/metrics/voc_multi_label.py | 98 + mmpretrain/evaluation/metrics/vqa.py | 315 + mmpretrain/models/__init__.py | 20 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 649 bytes .../__pycache__/builder.cpython-310.pyc | Bin 0 -> 1035 bytes mmpretrain/models/backbones/__init__.py | 129 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 3132 bytes .../__pycache__/alexnet.cpython-310.pyc | Bin 0 -> 1758 bytes .../__pycache__/base_backbone.cpython-310.pyc | Bin 0 -> 1464 bytes .../__pycache__/beit.cpython-310.pyc | Bin 0 -> 19569 bytes .../__pycache__/conformer.cpython-310.pyc | Bin 0 -> 14895 bytes .../__pycache__/convmixer.cpython-310.pyc | Bin 0 -> 5316 bytes .../__pycache__/convnext.cpython-310.pyc | Bin 0 -> 11379 bytes .../__pycache__/cspnet.cpython-310.pyc | Bin 0 -> 21039 bytes .../__pycache__/davit.cpython-310.pyc | Bin 0 -> 24799 bytes .../__pycache__/deit.cpython-310.pyc | Bin 0 -> 5007 bytes .../__pycache__/deit3.cpython-310.pyc | Bin 0 -> 12958 bytes .../__pycache__/densenet.cpython-310.pyc | Bin 0 -> 9205 bytes .../__pycache__/edgenext.cpython-310.pyc | Bin 0 -> 11275 bytes .../efficientformer.cpython-310.pyc | Bin 0 -> 17020 bytes .../__pycache__/efficientnet.cpython-310.pyc | Bin 0 -> 10810 bytes .../efficientnet_v2.cpython-310.pyc | Bin 0 -> 12500 bytes .../__pycache__/hivit.cpython-310.pyc | Bin 0 -> 18218 bytes .../__pycache__/hornet.cpython-310.pyc | Bin 0 -> 14571 bytes .../__pycache__/hrnet.cpython-310.pyc | Bin 0 -> 14713 bytes .../__pycache__/inception_v3.cpython-310.pyc | Bin 0 -> 14122 bytes .../__pycache__/lenet.cpython-310.pyc | Bin 0 -> 1517 bytes .../__pycache__/levit.cpython-310.pyc | Bin 0 -> 14302 bytes .../__pycache__/mixmim.cpython-310.pyc | Bin 0 -> 15199 bytes .../__pycache__/mlp_mixer.cpython-310.pyc | Bin 0 -> 7700 bytes .../__pycache__/mobilenet_v2.cpython-310.pyc | Bin 0 -> 7458 bytes .../__pycache__/mobilenet_v3.cpython-310.pyc | Bin 0 -> 6037 bytes .../__pycache__/mobileone.cpython-310.pyc | Bin 0 -> 14017 bytes .../__pycache__/mobilevit.cpython-310.pyc | Bin 0 -> 12458 bytes .../__pycache__/mvit.cpython-310.pyc | Bin 0 -> 19843 bytes .../__pycache__/poolformer.cpython-310.pyc | Bin 0 -> 11778 bytes .../__pycache__/regnet.cpython-310.pyc | Bin 0 -> 10258 bytes .../__pycache__/replknet.cpython-310.pyc | Bin 0 -> 18484 bytes .../__pycache__/repmlp.cpython-310.pyc | Bin 0 -> 17771 bytes .../__pycache__/repvgg.cpython-310.pyc | Bin 0 -> 15934 bytes .../__pycache__/res2net.cpython-310.pyc | Bin 0 -> 8605 bytes .../__pycache__/resnest.cpython-310.pyc | Bin 0 -> 10701 bytes .../__pycache__/resnet.cpython-310.pyc | Bin 0 -> 19859 bytes .../__pycache__/resnet_cifar.cpython-310.pyc | Bin 0 -> 4029 bytes .../__pycache__/resnext.cpython-310.pyc | Bin 0 -> 5665 bytes .../__pycache__/revvit.cpython-310.pyc | Bin 0 -> 18062 bytes .../__pycache__/riformer.cpython-310.pyc | Bin 0 -> 10708 bytes .../__pycache__/seresnet.cpython-310.pyc | Bin 0 -> 5070 bytes .../__pycache__/seresnext.cpython-310.pyc | Bin 0 -> 5942 bytes .../__pycache__/shufflenet_v1.cpython-310.pyc | Bin 0 -> 9378 bytes .../__pycache__/shufflenet_v2.cpython-310.pyc | Bin 0 -> 7999 bytes .../sparse_convnext.cpython-310.pyc | Bin 0 -> 8619 bytes .../__pycache__/sparse_resnet.cpython-310.pyc | Bin 0 -> 5739 bytes .../swin_transformer.cpython-310.pyc | Bin 0 -> 17982 bytes .../swin_transformer_v2.cpython-310.pyc | Bin 0 -> 17428 bytes .../__pycache__/t2t_vit.cpython-310.pyc | Bin 0 -> 13730 bytes .../__pycache__/timm_backbone.cpython-310.pyc | Bin 0 -> 3925 bytes .../__pycache__/tinyvit.cpython-310.pyc | Bin 0 -> 20600 bytes .../backbones/__pycache__/tnt.cpython-310.pyc | Bin 0 -> 11066 bytes .../__pycache__/twins.cpython-310.pyc | Bin 0 -> 22166 bytes .../backbones/__pycache__/van.cpython-310.pyc | Bin 0 -> 13384 bytes .../backbones/__pycache__/vgg.cpython-310.pyc | Bin 0 -> 5016 bytes .../backbones/__pycache__/vig.cpython-310.pyc | Bin 0 -> 24289 bytes .../vision_transformer.cpython-310.pyc | Bin 0 -> 14318 bytes .../__pycache__/vit_eva02.cpython-310.pyc | Bin 0 -> 9992 bytes .../__pycache__/vit_sam.cpython-310.pyc | Bin 0 -> 20558 bytes .../__pycache__/xcit.cpython-310.pyc | Bin 0 -> 23522 bytes mmpretrain/models/backbones/alexnet.py | 56 + mmpretrain/models/backbones/base_backbone.py | 33 + mmpretrain/models/backbones/beit.py | 697 ++ mmpretrain/models/backbones/conformer.py | 621 ++ mmpretrain/models/backbones/convmixer.py | 176 + mmpretrain/models/backbones/convnext.py | 412 + mmpretrain/models/backbones/cspnet.py | 679 ++ mmpretrain/models/backbones/davit.py | 834 ++ mmpretrain/models/backbones/deit.py | 116 + mmpretrain/models/backbones/deit3.py | 454 + mmpretrain/models/backbones/densenet.py | 332 + mmpretrain/models/backbones/edgenext.py | 398 + .../models/backbones/efficientformer.py | 606 ++ mmpretrain/models/backbones/efficientnet.py | 410 + .../models/backbones/efficientnet_v2.py | 343 + mmpretrain/models/backbones/hivit.py | 656 ++ mmpretrain/models/backbones/hornet.py | 500 + mmpretrain/models/backbones/hrnet.py | 563 + mmpretrain/models/backbones/inception_v3.py | 501 + mmpretrain/models/backbones/lenet.py | 42 + mmpretrain/models/backbones/levit.py | 522 + mmpretrain/models/backbones/mixmim.py | 533 + mmpretrain/models/backbones/mlp_mixer.py | 263 + mmpretrain/models/backbones/mobilenet_v2.py | 264 + mmpretrain/models/backbones/mobilenet_v3.py | 217 + mmpretrain/models/backbones/mobileone.py | 515 + mmpretrain/models/backbones/mobilevit.py | 431 + mmpretrain/models/backbones/mvit.py | 700 ++ mmpretrain/models/backbones/poolformer.py | 416 + mmpretrain/models/backbones/regnet.py | 312 + mmpretrain/models/backbones/replknet.py | 668 ++ mmpretrain/models/backbones/repmlp.py | 578 ++ mmpretrain/models/backbones/repvgg.py | 622 ++ mmpretrain/models/backbones/res2net.py | 317 + mmpretrain/models/backbones/resnest.py | 339 + mmpretrain/models/backbones/resnet.py | 768 ++ mmpretrain/models/backbones/resnet_cifar.py | 81 + mmpretrain/models/backbones/resnext.py | 148 + mmpretrain/models/backbones/revvit.py | 671 ++ mmpretrain/models/backbones/riformer.py | 390 + mmpretrain/models/backbones/seresnet.py | 125 + mmpretrain/models/backbones/seresnext.py | 155 + mmpretrain/models/backbones/shufflenet_v1.py | 321 + mmpretrain/models/backbones/shufflenet_v2.py | 305 + .../models/backbones/sparse_convnext.py | 298 + mmpretrain/models/backbones/sparse_resnet.py | 179 + .../models/backbones/swin_transformer.py | 585 ++ .../models/backbones/swin_transformer_v2.py | 567 + mmpretrain/models/backbones/t2t_vit.py | 447 + mmpretrain/models/backbones/timm_backbone.py | 111 + mmpretrain/models/backbones/tinyvit.py | 769 ++ mmpretrain/models/backbones/tnt.py | 368 + mmpretrain/models/backbones/twins.py | 721 ++ mmpretrain/models/backbones/van.py | 434 + mmpretrain/models/backbones/vgg.py | 183 + mmpretrain/models/backbones/vig.py | 852 ++ .../models/backbones/vision_transformer.py | 537 + mmpretrain/models/backbones/vit_eva02.py | 350 + mmpretrain/models/backbones/vit_sam.py | 697 ++ mmpretrain/models/backbones/xcit.py | 770 ++ mmpretrain/models/builder.py | 39 + mmpretrain/models/classifiers/__init__.py | 10 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 381 bytes .../__pycache__/base.cpython-310.pyc | Bin 0 -> 5047 bytes .../__pycache__/hugging_face.cpython-310.pyc | Bin 0 -> 8178 bytes .../__pycache__/image.cpython-310.pyc | Bin 0 -> 10161 bytes .../__pycache__/timm.cpython-310.pyc | Bin 0 -> 7462 bytes mmpretrain/models/classifiers/base.py | 108 + mmpretrain/models/classifiers/hugging_face.py | 222 + mmpretrain/models/classifiers/image.py | 265 + mmpretrain/models/classifiers/timm.py | 209 + mmpretrain/models/heads/__init__.py | 69 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 1990 bytes .../__pycache__/beitv1_head.cpython-310.pyc | Bin 0 -> 2018 bytes .../__pycache__/beitv2_head.cpython-310.pyc | Bin 0 -> 2157 bytes .../__pycache__/cae_head.cpython-310.pyc | Bin 0 -> 2661 bytes .../__pycache__/cls_head.cpython-310.pyc | Bin 0 -> 5960 bytes .../conformer_head.cpython-310.pyc | Bin 0 -> 4612 bytes .../contrastive_head.cpython-310.pyc | Bin 0 -> 2029 bytes .../__pycache__/deit_head.cpython-310.pyc | Bin 0 -> 2927 bytes .../efficientformer_head.cpython-310.pyc | Bin 0 -> 3322 bytes .../grounding_head.cpython-310.pyc | Bin 0 -> 5436 bytes .../__pycache__/itc_head.cpython-310.pyc | Bin 0 -> 5108 bytes .../__pycache__/itm_head.cpython-310.pyc | Bin 0 -> 4202 bytes .../itpn_clip_head.cpython-310.pyc | Bin 0 -> 2065 bytes .../__pycache__/latent_heads.cpython-310.pyc | Bin 0 -> 3543 bytes .../__pycache__/levit_head.cpython-310.pyc | Bin 0 -> 2764 bytes .../__pycache__/linear_head.cpython-310.pyc | Bin 0 -> 2526 bytes .../__pycache__/mae_head.cpython-310.pyc | Bin 0 -> 3718 bytes .../__pycache__/margin_head.cpython-310.pyc | Bin 0 -> 10178 bytes .../__pycache__/mim_head.cpython-310.pyc | Bin 0 -> 1398 bytes .../__pycache__/mixmim_head.cpython-310.pyc | Bin 0 -> 1737 bytes .../__pycache__/mocov3_head.cpython-310.pyc | Bin 0 -> 2324 bytes .../multi_label_cls_head.cpython-310.pyc | Bin 0 -> 5739 bytes .../multi_label_csra_head.cpython-310.pyc | Bin 0 -> 4607 bytes .../multi_label_linear_head.cpython-310.pyc | Bin 0 -> 2718 bytes .../multi_task_head.cpython-310.pyc | Bin 0 -> 5433 bytes .../__pycache__/seq_gen_head.cpython-310.pyc | Bin 0 -> 5319 bytes .../__pycache__/simmim_head.cpython-310.pyc | Bin 0 -> 1593 bytes .../__pycache__/spark_head.cpython-310.pyc | Bin 0 -> 3077 bytes .../__pycache__/stacked_head.cpython-310.pyc | Bin 0 -> 4292 bytes .../__pycache__/swav_head.cpython-310.pyc | Bin 0 -> 1131 bytes .../__pycache__/vig_head.cpython-310.pyc | Bin 0 -> 2499 bytes .../vision_transformer_head.cpython-310.pyc | Bin 0 -> 3705 bytes .../__pycache__/vqa_head.cpython-310.pyc | Bin 0 -> 7874 bytes mmpretrain/models/heads/beitv1_head.py | 55 + mmpretrain/models/heads/beitv2_head.py | 57 + mmpretrain/models/heads/cae_head.py | 69 + mmpretrain/models/heads/cls_head.py | 156 + mmpretrain/models/heads/conformer_head.py | 122 + mmpretrain/models/heads/contrastive_head.py | 50 + mmpretrain/models/heads/deit_head.py | 72 + .../models/heads/efficientformer_head.py | 89 + mmpretrain/models/heads/grounding_head.py | 217 + mmpretrain/models/heads/itc_head.py | 157 + mmpretrain/models/heads/itm_head.py | 117 + mmpretrain/models/heads/itpn_clip_head.py | 56 + mmpretrain/models/heads/latent_heads.py | 94 + mmpretrain/models/heads/levit_head.py | 81 + mmpretrain/models/heads/linear_head.py | 63 + mmpretrain/models/heads/mae_head.py | 106 + mmpretrain/models/heads/margin_head.py | 300 + mmpretrain/models/heads/mim_head.py | 37 + mmpretrain/models/heads/mixmim_head.py | 49 + mmpretrain/models/heads/mocov3_head.py | 66 + .../models/heads/multi_label_cls_head.py | 155 + .../models/heads/multi_label_csra_head.py | 112 + .../models/heads/multi_label_linear_head.py | 66 + mmpretrain/models/heads/multi_task_head.py | 153 + mmpretrain/models/heads/seq_gen_head.py | 188 + mmpretrain/models/heads/simmim_head.py | 40 + mmpretrain/models/heads/spark_head.py | 92 + mmpretrain/models/heads/stacked_head.py | 135 + mmpretrain/models/heads/swav_head.py | 31 + mmpretrain/models/heads/vig_head.py | 65 + .../models/heads/vision_transformer_head.py | 97 + mmpretrain/models/heads/vqa_head.py | 246 + mmpretrain/models/losses/__init__.py | 35 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 1016 bytes .../asymmetric_loss.cpython-310.pyc | Bin 0 -> 4940 bytes .../__pycache__/cae_loss.cpython-310.pyc | Bin 0 -> 1804 bytes .../cosine_similarity_loss.cpython-310.pyc | Bin 0 -> 1911 bytes .../cross_correlation_loss.cpython-310.pyc | Bin 0 -> 1817 bytes .../cross_entropy_loss.cpython-310.pyc | Bin 0 -> 5934 bytes .../__pycache__/focal_loss.cpython-310.pyc | Bin 0 -> 3999 bytes .../label_smooth_loss.cpython-310.pyc | Bin 0 -> 6126 bytes .../reconstruction_loss.cpython-310.pyc | Bin 0 -> 2548 bytes .../__pycache__/seesaw_loss.cpython-310.pyc | Bin 0 -> 5444 bytes .../__pycache__/swav_loss.cpython-310.pyc | Bin 0 -> 6224 bytes .../losses/__pycache__/utils.cpython-310.pyc | Bin 0 -> 3411 bytes mmpretrain/models/losses/asymmetric_loss.py | 149 + mmpretrain/models/losses/cae_loss.py | 48 + .../models/losses/cosine_similarity_loss.py | 55 + .../models/losses/cross_correlation_loss.py | 44 + .../models/losses/cross_entropy_loss.py | 209 + mmpretrain/models/losses/focal_loss.py | 116 + mmpretrain/models/losses/label_smooth_loss.py | 177 + .../models/losses/reconstruction_loss.py | 67 + mmpretrain/models/losses/seesaw_loss.py | 173 + mmpretrain/models/losses/swav_loss.py | 190 + mmpretrain/models/losses/utils.py | 119 + mmpretrain/models/multimodal/__init__.py | 24 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 742 bytes mmpretrain/models/multimodal/blip/__init__.py | 12 + .../models/multimodal/blip/blip_caption.py | 184 + .../models/multimodal/blip/blip_grounding.py | 248 + .../models/multimodal/blip/blip_nlvr.py | 205 + .../models/multimodal/blip/blip_retrieval.py | 716 ++ mmpretrain/models/multimodal/blip/blip_vqa.py | 265 + .../models/multimodal/blip/language_model.py | 1320 +++ mmpretrain/models/multimodal/blip2/Qformer.py | 773 ++ .../models/multimodal/blip2/__init__.py | 10 + .../models/multimodal/blip2/blip2_caption.py | 315 + .../models/multimodal/blip2/blip2_opt_vqa.py | 92 + .../multimodal/blip2/blip2_retriever.py | 505 + .../models/multimodal/blip2/modeling_opt.py | 1083 ++ .../multimodal/chinese_clip/__init__.py | 5 + .../models/multimodal/chinese_clip/bert.py | 263 + .../multimodal/chinese_clip/chinese_clip.py | 446 + .../models/multimodal/chinese_clip/utils.py | 186 + mmpretrain/models/multimodal/clip/__init__.py | 5 + mmpretrain/models/multimodal/clip/clip.py | 364 + .../multimodal/clip/clip_transformer.py | 99 + mmpretrain/models/multimodal/clip/utils.py | 115 + .../models/multimodal/flamingo/__init__.py | 5 + .../models/multimodal/flamingo/adapter.py | 96 + .../models/multimodal/flamingo/flamingo.py | 323 + .../models/multimodal/flamingo/modules.py | 398 + .../models/multimodal/flamingo/utils.py | 64 + .../models/multimodal/llava/__init__.py | 5 + mmpretrain/models/multimodal/llava/llava.py | 267 + mmpretrain/models/multimodal/llava/modules.py | 234 + .../models/multimodal/minigpt4/__init__.py | 4 + .../models/multimodal/minigpt4/minigpt4.py | 410 + mmpretrain/models/multimodal/ofa/__init__.py | 5 + mmpretrain/models/multimodal/ofa/ofa.py | 320 + .../models/multimodal/ofa/ofa_modules.py | 1613 +++ .../models/multimodal/otter/__init__.py | 4 + mmpretrain/models/multimodal/otter/otter.py | 143 + mmpretrain/models/multimodal/ram/__init__.py | 4 + mmpretrain/models/multimodal/ram/bert.py | 1197 +++ .../models/multimodal/ram/config/__init__.py | 1 + .../ram/config/ram_swin_large_14m.py | 93 + .../multimodal/ram/data/ram_tag_list.pickle | Bin 0 -> 51099 bytes .../ram/data/ram_tag_list_chinese.pickle | Bin 0 -> 50796 bytes .../ram/data/ram_tag_list_threshold.pickle | Bin 0 -> 41289 bytes .../models/multimodal/ram/gradio_demo.py | 109 + .../models/multimodal/ram/openset_utils.py | 212 + mmpretrain/models/multimodal/ram/ram.py | 332 + .../models/multimodal/ram/run/__init__.py | 1 + .../models/multimodal/ram/run/inference.py | 29 + mmpretrain/models/multimodal/ram/utils.py | 87 + mmpretrain/models/necks/__init__.py | 37 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 1063 bytes .../__pycache__/beitv2_neck.cpython-310.pyc | Bin 0 -> 5172 bytes .../__pycache__/cae_neck.cpython-310.pyc | Bin 0 -> 9315 bytes .../__pycache__/densecl_neck.cpython-310.pyc | Bin 0 -> 2594 bytes .../necks/__pycache__/gap.cpython-310.pyc | Bin 0 -> 2161 bytes .../necks/__pycache__/gem.cpython-310.pyc | Bin 0 -> 2602 bytes .../necks/__pycache__/hr_fuse.cpython-310.pyc | Bin 0 -> 2345 bytes .../__pycache__/itpn_neck.cpython-310.pyc | Bin 0 -> 10390 bytes .../__pycache__/linear_neck.cpython-310.pyc | Bin 0 -> 3071 bytes .../__pycache__/mae_neck.cpython-310.pyc | Bin 0 -> 6778 bytes .../__pycache__/milan_neck.cpython-310.pyc | Bin 0 -> 7447 bytes .../__pycache__/mixmim_neck.cpython-310.pyc | Bin 0 -> 3914 bytes .../__pycache__/mocov2_neck.cpython-310.pyc | Bin 0 -> 2049 bytes .../nonlinear_neck.cpython-310.pyc | Bin 0 -> 3837 bytes .../__pycache__/simmim_neck.cpython-310.pyc | Bin 0 -> 1360 bytes .../__pycache__/spark_neck.cpython-310.pyc | Bin 0 -> 6029 bytes .../__pycache__/swav_neck.cpython-310.pyc | Bin 0 -> 3137 bytes mmpretrain/models/necks/beitv2_neck.py | 153 + mmpretrain/models/necks/cae_neck.py | 273 + mmpretrain/models/necks/densecl_neck.py | 71 + mmpretrain/models/necks/gap.py | 45 + mmpretrain/models/necks/gem.py | 53 + mmpretrain/models/necks/hr_fuse.py | 83 + mmpretrain/models/necks/itpn_neck.py | 388 + mmpretrain/models/necks/linear_neck.py | 88 + mmpretrain/models/necks/mae_neck.py | 188 + mmpretrain/models/necks/milan_neck.py | 222 + mmpretrain/models/necks/mixmim_neck.py | 111 + mmpretrain/models/necks/mocov2_neck.py | 52 + mmpretrain/models/necks/nonlinear_neck.py | 115 + mmpretrain/models/necks/simmim_neck.py | 33 + mmpretrain/models/necks/spark_neck.py | 169 + mmpretrain/models/necks/swav_neck.py | 93 + mmpretrain/models/peft/__init__.py | 6 + .../peft/__pycache__/__init__.cpython-310.pyc | Bin 0 -> 206 bytes .../peft/__pycache__/lora.cpython-310.pyc | Bin 0 -> 7470 bytes mmpretrain/models/peft/lora.py | 205 + mmpretrain/models/retrievers/__init__.py | 5 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 278 bytes .../__pycache__/base.cpython-310.pyc | Bin 0 -> 6413 bytes .../__pycache__/image2image.cpython-310.pyc | Bin 0 -> 11415 bytes mmpretrain/models/retrievers/base.py | 151 + mmpretrain/models/retrievers/image2image.py | 314 + mmpretrain/models/selfsup/__init__.py | 59 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 1401 bytes .../__pycache__/barlowtwins.cpython-310.pyc | Bin 0 -> 1608 bytes .../selfsup/__pycache__/base.cpython-310.pyc | Bin 0 -> 6883 bytes .../selfsup/__pycache__/beit.cpython-310.pyc | Bin 0 -> 13103 bytes .../selfsup/__pycache__/byol.cpython-310.pyc | Bin 0 -> 3293 bytes .../selfsup/__pycache__/cae.cpython-310.pyc | Bin 0 -> 16367 bytes .../__pycache__/densecl.cpython-310.pyc | Bin 0 -> 5788 bytes .../selfsup/__pycache__/eva.cpython-310.pyc | Bin 0 -> 1634 bytes .../selfsup/__pycache__/itpn.cpython-310.pyc | Bin 0 -> 9752 bytes .../selfsup/__pycache__/mae.cpython-310.pyc | Bin 0 -> 13004 bytes .../__pycache__/maskfeat.cpython-310.pyc | Bin 0 -> 12105 bytes .../selfsup/__pycache__/mff.cpython-310.pyc | Bin 0 -> 7306 bytes .../selfsup/__pycache__/milan.cpython-310.pyc | Bin 0 -> 7367 bytes .../__pycache__/mixmim.cpython-310.pyc | Bin 0 -> 9343 bytes .../selfsup/__pycache__/moco.cpython-310.pyc | Bin 0 -> 4511 bytes .../__pycache__/mocov3.cpython-310.pyc | Bin 0 -> 7069 bytes .../__pycache__/simclr.cpython-310.pyc | Bin 0 -> 3683 bytes .../__pycache__/simmim.cpython-310.pyc | Bin 0 -> 7262 bytes .../__pycache__/simsiam.cpython-310.pyc | Bin 0 -> 1596 bytes .../selfsup/__pycache__/spark.cpython-310.pyc | Bin 0 -> 4536 bytes .../selfsup/__pycache__/swav.cpython-310.pyc | Bin 0 -> 1863 bytes mmpretrain/models/selfsup/barlowtwins.py | 42 + mmpretrain/models/selfsup/base.py | 179 + mmpretrain/models/selfsup/beit.py | 357 + mmpretrain/models/selfsup/byol.py | 89 + mmpretrain/models/selfsup/cae.py | 472 + mmpretrain/models/selfsup/densecl.py | 203 + mmpretrain/models/selfsup/eva.py | 43 + mmpretrain/models/selfsup/itpn.py | 359 + mmpretrain/models/selfsup/mae.py | 416 + mmpretrain/models/selfsup/maskfeat.py | 336 + mmpretrain/models/selfsup/mff.py | 194 + mmpretrain/models/selfsup/milan.py | 202 + mmpretrain/models/selfsup/mixmim.py | 263 + mmpretrain/models/selfsup/moco.py | 137 + mmpretrain/models/selfsup/mocov3.py | 215 + mmpretrain/models/selfsup/simclr.py | 98 + mmpretrain/models/selfsup/simmim.py | 194 + mmpretrain/models/selfsup/simsiam.py | 43 + mmpretrain/models/selfsup/spark.py | 163 + mmpretrain/models/selfsup/swav.py | 49 + mmpretrain/models/tta/__init__.py | 4 + .../tta/__pycache__/__init__.cpython-310.pyc | Bin 0 -> 217 bytes .../tta/__pycache__/score_tta.cpython-310.pyc | Bin 0 -> 1559 bytes mmpretrain/models/tta/score_tta.py | 36 + mmpretrain/models/utils/__init__.py | 102 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 2783 bytes .../__pycache__/attention.cpython-310.pyc | Bin 0 -> 32899 bytes .../__pycache__/batch_shuffle.cpython-310.pyc | Bin 0 -> 1588 bytes .../__pycache__/box_utils.cpython-310.pyc | Bin 0 -> 1987 bytes .../channel_shuffle.cpython-310.pyc | Bin 0 -> 930 bytes .../clip_generator_helper.cpython-310.pyc | Bin 0 -> 11748 bytes .../data_preprocessor.cpython-310.pyc | Bin 0 -> 19665 bytes .../utils/__pycache__/ema.cpython-310.pyc | Bin 0 -> 3577 bytes .../utils/__pycache__/embed.cpython-310.pyc | Bin 0 -> 12290 bytes .../utils/__pycache__/helpers.cpython-310.pyc | Bin 0 -> 1568 bytes .../inverted_residual.cpython-310.pyc | Bin 0 -> 3447 bytes .../__pycache__/layer_scale.cpython-310.pyc | Bin 0 -> 1757 bytes .../make_divisible.cpython-310.pyc | Bin 0 -> 1031 bytes .../utils/__pycache__/norm.cpython-310.pyc | Bin 0 -> 4563 bytes .../position_encoding.cpython-310.pyc | Bin 0 -> 7873 bytes .../res_layer_extra_norm.cpython-310.pyc | Bin 0 -> 1310 bytes .../__pycache__/se_layer.cpython-310.pyc | Bin 0 -> 2908 bytes .../sparse_modules.cpython-310.pyc | Bin 0 -> 4303 bytes .../__pycache__/swiglu_ffn.cpython-310.pyc | Bin 0 -> 2754 bytes .../vector_quantizer.cpython-310.pyc | Bin 0 -> 7113 bytes mmpretrain/models/utils/attention.py | 1129 ++ .../models/utils/batch_augments/__init__.py | 7 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 362 bytes .../__pycache__/cutmix.cpython-310.pyc | Bin 0 -> 6124 bytes .../__pycache__/mixup.cpython-310.pyc | Bin 0 -> 2600 bytes .../__pycache__/resizemix.cpython-310.pyc | Bin 0 -> 4121 bytes .../__pycache__/wrapper.cpython-310.pyc | Bin 0 -> 2636 bytes .../models/utils/batch_augments/cutmix.py | 157 + .../models/utils/batch_augments/mixup.py | 65 + .../models/utils/batch_augments/resizemix.py | 95 + .../models/utils/batch_augments/wrapper.py | 74 + mmpretrain/models/utils/batch_shuffle.py | 66 + mmpretrain/models/utils/box_utils.py | 56 + mmpretrain/models/utils/channel_shuffle.py | 29 + .../models/utils/clip_generator_helper.py | 394 + mmpretrain/models/utils/data_preprocessor.py | 620 ++ mmpretrain/models/utils/ema.py | 87 + mmpretrain/models/utils/embed.py | 423 + mmpretrain/models/utils/helpers.py | 53 + mmpretrain/models/utils/huggingface.py | 100 + mmpretrain/models/utils/inverted_residual.py | 125 + mmpretrain/models/utils/layer_scale.py | 40 + mmpretrain/models/utils/make_divisible.py | 25 + mmpretrain/models/utils/norm.py | 133 + mmpretrain/models/utils/position_encoding.py | 247 + .../models/utils/res_layer_extra_norm.py | 31 + mmpretrain/models/utils/se_layer.py | 80 + mmpretrain/models/utils/sparse_modules.py | 149 + mmpretrain/models/utils/swiglu_ffn.py | 98 + mmpretrain/models/utils/tokenizer.py | 188 + mmpretrain/models/utils/vector_quantizer.py | 232 + mmpretrain/registry.py | 195 + mmpretrain/structures/__init__.py | 10 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 489 bytes .../__pycache__/data_sample.cpython-310.pyc | Bin 0 -> 5708 bytes .../multi_task_data_sample.cpython-310.pyc | Bin 0 -> 538 bytes .../__pycache__/utils.cpython-310.pyc | Bin 0 -> 4869 bytes mmpretrain/structures/data_sample.py | 167 + .../structures/multi_task_data_sample.py | 10 + mmpretrain/structures/utils.py | 153 + mmpretrain/utils/__init__.py | 12 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 481 bytes .../utils/__pycache__/analyze.cpython-310.pyc | Bin 0 -> 1370 bytes .../__pycache__/collect_env.cpython-310.pyc | Bin 0 -> 649 bytes .../__pycache__/dependency.cpython-310.pyc | Bin 0 -> 2974 bytes .../utils/__pycache__/misc.cpython-310.pyc | Bin 0 -> 591 bytes .../__pycache__/progress.cpython-310.pyc | Bin 0 -> 1491 bytes .../__pycache__/setup_env.cpython-310.pyc | Bin 0 -> 1572 bytes mmpretrain/utils/analyze.py | 43 + mmpretrain/utils/collect_env.py | 16 + mmpretrain/utils/dependency.py | 82 + mmpretrain/utils/misc.py | 18 + mmpretrain/utils/progress.py | 40 + mmpretrain/utils/setup_env.py | 41 + mmpretrain/version.py | 28 + mmpretrain/visualization/__init__.py | 5 + .../__pycache__/__init__.cpython-310.pyc | Bin 0 -> 308 bytes .../__pycache__/utils.cpython-310.pyc | Bin 0 -> 2196 bytes .../__pycache__/visualizer.cpython-310.pyc | Bin 0 -> 26347 bytes mmpretrain/visualization/utils.py | 60 + mmpretrain/visualization/visualizer.py | 777 ++ model-index.yml | 85 + projects/README.md | 21 + projects/dino/README.md | 26 + ..._vit-base-p16_8xb64-amp-coslr-100e_in1k.py | 104 + projects/dino/dataset/__init__.py | 1 + projects/dino/dataset/transform/__init__.py | 3 + projects/dino/dataset/transform/processing.py | 91 + projects/dino/engine/__init__.py | 1 + projects/dino/engine/hooks/__init__.py | 3 + .../hooks/dino_teacher_temp_warmup_hook.py | 33 + projects/dino/models/__init__.py | 3 + projects/dino/models/algorithm/__init__.py | 3 + projects/dino/models/algorithm/dino.py | 82 + projects/dino/models/head/__init__.py | 3 + projects/dino/models/head/dino_head.py | 69 + projects/dino/models/neck/__init__.py | 3 + projects/dino/models/neck/dino_neck.py | 41 + projects/dino/tools/dist_train.sh | 19 + projects/dino/tools/slurm_train.sh | 23 + projects/dino/tools/train.py | 104 + projects/example_project/README.md | 128 + .../configs/examplenet_8xb32_in1k.py | 10 + projects/example_project/models/__init__.py | 3 + .../example_project/models/example_net.py | 31 + projects/fgia_accv2022_1st/README.md | 143 + ...t-large-p16_8xb512-amp-coslr-1600e_in1k.py | 107 + projects/gradio_demo/README.md | 44 + projects/gradio_demo/conversation.py | 137 + projects/gradio_demo/launch.py | 467 + projects/gradio_demo/minigpt4_demo.py | 144 + projects/internimage_classification/README.md | 121 + .../configs/_base_.py | 113 + .../internimage-base_8xb128_in1k-224.py | 13 + .../internimage-giant_8xb128_in1k-512.py | 55 + .../internimage-huge_8xb128_in1k-640.py | 55 + .../internimage-large_8xb128_in1k-384.py | 51 + .../internimage-small_8xb128_in1k-224.py | 11 + .../internimage-tiny_8xb128_in1k-224.py | 8 + .../internimage-xlagre_8xb128_in1k-384.py | 50 + .../models/__init__.py | 4 + .../models/intern_image.py | 636 ++ .../ops_dcnv3/functions/__init__.py | 10 + .../ops_dcnv3/functions/dcnv3_func.py | 248 + .../ops_dcnv3/make.sh | 11 + .../ops_dcnv3/modules/__init__.py | 10 + .../ops_dcnv3/modules/dcnv3.py | 360 + .../ops_dcnv3/setup.py | 72 + .../ops_dcnv3/src/cpu/dcnv3_cpu.cpp | 37 + .../ops_dcnv3/src/cpu/dcnv3_cpu.h | 31 + .../ops_dcnv3/src/cuda/dcnv3_cuda.cu | 174 + .../ops_dcnv3/src/cuda/dcnv3_cuda.h | 31 + .../ops_dcnv3/src/cuda/dcnv3_im2col_cuda.cuh | 1094 ++ .../ops_dcnv3/src/dcnv3.h | 59 + .../ops_dcnv3/src/vision.cpp | 17 + .../ops_dcnv3/test.py | 255 + projects/maskfeat_video/README.md | 275 + ...t_mvit-small_16xb32-amp-coslr-300e_k400.py | 101 + ...at_mvit-small_8xb32-amp-coslr-300e_k400.py | 5 + .../mvit-small_ft-8xb16-coslr-100e_k400.py | 157 + projects/maskfeat_video/models/__init__.py | 9 + .../maskfeat_video/models/hog_generator_3d.py | 39 + projects/maskfeat_video/models/maskfeat.py | 59 + .../maskfeat_video/models/maskfeat_mvit.py | 146 + projects/maskfeat_video/models/transforms.py | 130 + projects/maskfeat_video/tools/dist_train.sh | 19 + projects/maskfeat_video/tools/slurm_train.sh | 23 + projects/maskfeat_video/tools/train.py | 93 + requirements.txt | 3 + requirements/docs.txt | 10 + requirements/mminstall.txt | 2 + requirements/multimodal.txt | 2 + requirements/optional.txt | 4 + requirements/readthedocs.txt | 7 + requirements/runtime.txt | 7 + requirements/tests.txt | 3 + resnet50-test.py | 25 + resnet50_imagenet200_8b32.py | 22 + resources/miaomiao_qrcode.jpg | Bin 0 -> 225737 bytes resources/mmpt-logo.png | Bin 0 -> 28982 bytes resources/xiaozhushou_weixin_qrcode.jpeg | Bin 0 -> 42538 bytes resources/zhihu_qrcode.jpg | Bin 0 -> 397245 bytes setup.cfg | 33 + setup.py | 198 + swin-b-test.py | 29 + swin-l-test.py | 27 + tests/__init__.py | 1 + tests/data/color.jpg | Bin 0 -> 39779 bytes tests/data/dataset/3.jpeg | 0 tests/data/dataset/a/1.JPG | 1 + tests/data/dataset/ann.json | 28 + tests/data/dataset/ann.txt | 3 + tests/data/dataset/ann_without_labels.txt | 3 + tests/data/dataset/b/2.jpeg | 1 + tests/data/dataset/b/subb/3.jpg | 1 + tests/data/dataset/classes.txt | 2 + tests/data/dataset/multi-task.json | 40 + tests/data/dataset/multi_label_ann.json | 28 + tests/data/gray.jpg | Bin 0 -> 39088 bytes tests/data/meta.yml | 13 + tests/data/retinanet.py | 83 + tests/data/vis_data.json | 21 + tests/test_apis/test_inference.py | 116 + tests/test_apis/test_model.py | 88 + tests/test_datasets/test_dataset_utils.py | 39 + tests/test_datasets/test_datasets.py | 2201 ++++ .../test_samplers/test_repeat_aug.py | 98 + .../test_transforms/test_auto_augment.py | 1330 +++ .../test_transforms/test_formatting.py | 219 + .../test_transforms/test_processing.py | 959 ++ .../test_transforms/test_wrappers.py | 43 + .../test_hooks/test_arcface_hooks.py | 102 + .../test_hooks/test_class_num_check_hook.py | 52 + .../test_hooks/test_densecl_hook.py | 113 + tests/test_engine/test_hooks/test_ema_hook.py | 224 + .../test_hooks/test_precise_bn_hook.py | 232 + .../test_hooks/test_retrievers_hooks.py | 34 + .../test_hooks/test_simsiam_hook.py | 117 + .../test_engine/test_hooks/test_swav_hook.py | 127 + .../test_hooks/test_switch_recipe_hook.py | 371 + .../test_hooks/test_visualization_hook.py | 148 + ...t_layer_decay_optim_wrapper_constructor.py | 107 + .../test_evaluation/test_metrics/test_gqa.py | 30 + .../test_metrics/test_metric_utils.py | 33 + .../test_metrics/test_multi_label.py | 388 + .../test_metrics/test_multi_task_metrics.py | 134 + .../test_metrics/test_retrieval.py | 227 + .../test_metrics/test_scienceqa.py | 44 + .../test_metrics/test_shape_bias_metric.py | 15 + .../test_metrics/test_single_label.py | 409 + .../test_metrics/test_voc_metrics.py | 228 + tests/test_models/test_backbones/__init__.py | 1 + tests/test_models/test_backbones/test_beit.py | 122 + .../test_backbones/test_conformer.py | 112 + .../test_backbones/test_convmixer.py | 85 + .../test_backbones/test_convnext.py | 106 + .../test_models/test_backbones/test_cspnet.py | 147 + .../test_models/test_backbones/test_davit.py | 110 + tests/test_models/test_backbones/test_deit.py | 111 + .../test_models/test_backbones/test_deit3.py | 167 + .../test_backbones/test_densenet.py | 95 + .../test_backbones/test_edgenext.py | 84 + .../test_backbones/test_efficientformer.py | 199 + .../test_backbones/test_efficientnet.py | 144 + .../test_backbones/test_efficientnet_v2.py | 150 + .../test_models/test_backbones/test_eva02.py | 143 + .../test_models/test_backbones/test_hornet.py | 174 + .../test_models/test_backbones/test_hrnet.py | 93 + .../test_backbones/test_inception_v3.py | 56 + .../test_models/test_backbones/test_levit.py | 169 + .../test_models/test_backbones/test_mixmim.py | 40 + .../test_backbones/test_mlp_mixer.py | 119 + .../test_backbones/test_mobilenet_v2.py | 259 + .../test_backbones/test_mobilenet_v3.py | 175 + .../test_backbones/test_mobileone.py | 337 + .../test_backbones/test_mobilevit.py | 86 + tests/test_models/test_backbones/test_mvit.py | 130 + .../test_backbones/test_poolformer.py | 143 + .../test_models/test_backbones/test_regnet.py | 94 + .../test_backbones/test_replknet.py | 304 + .../test_models/test_backbones/test_repmlp.py | 173 + .../test_models/test_backbones/test_repvgg.py | 351 + .../test_backbones/test_res2net.py | 71 + .../test_backbones/test_resnest.py | 44 + .../test_models/test_backbones/test_resnet.py | 618 ++ .../test_backbones/test_resnet_cifar.py | 67 + .../test_backbones/test_resnext.py | 61 + .../test_models/test_backbones/test_revvit.py | 131 + .../test_backbones/test_riformer.py | 168 + .../test_backbones/test_seresnet.py | 247 + .../test_backbones/test_seresnext.py | 74 + .../test_backbones/test_shufflenet_v1.py | 246 + .../test_backbones/test_shufflenet_v2.py | 205 + .../test_backbones/test_swin_transformer.py | 255 + .../test_swin_transformer_v2.py | 243 + .../test_backbones/test_t2t_vit.py | 144 + .../test_backbones/test_timm_backbone.py | 216 + .../test_backbones/test_tinyvit.py | 80 + tests/test_models/test_backbones/test_tnt.py | 50 + .../test_models/test_backbones/test_twins.py | 243 + tests/test_models/test_backbones/test_van.py | 188 + tests/test_models/test_backbones/test_vgg.py | 139 + .../test_backbones/test_vision_transformer.py | 176 + tests/test_models/test_backbones/test_xcit.py | 41 + tests/test_models/test_backbones/utils.py | 31 + tests/test_models/test_classifiers.py | 471 + tests/test_models/test_heads.py | 736 ++ tests/test_models/test_losses.py | 403 + tests/test_models/test_models.py | 95 + tests/test_models/test_necks.py | 179 + tests/test_models/test_peft/test_lora.py | 122 + tests/test_models/test_retrievers.py | 273 + .../test_selfsup/test_barlowtwins.py | 49 + tests/test_models/test_selfsup/test_beit.py | 169 + tests/test_models/test_selfsup/test_byol.py | 59 + tests/test_models/test_selfsup/test_cae.py | 78 + .../test_models/test_selfsup/test_densecl.py | 62 + tests/test_models/test_selfsup/test_eva.py | 51 + tests/test_models/test_selfsup/test_itpn.py | 57 + tests/test_models/test_selfsup/test_mae.py | 61 + .../test_models/test_selfsup/test_maskfeat.py | 66 + tests/test_models/test_selfsup/test_mff.py | 63 + tests/test_models/test_selfsup/test_milan.py | 69 + tests/test_models/test_selfsup/test_mixmim.py | 71 + tests/test_models/test_selfsup/test_moco.py | 58 + tests/test_models/test_selfsup/test_mocov3.py | 91 + tests/test_models/test_selfsup/test_simclr.py | 52 + tests/test_models/test_selfsup/test_simmim.py | 70 + .../test_models/test_selfsup/test_simsiam.py | 64 + tests/test_models/test_selfsup/test_spark.py | 51 + tests/test_models/test_selfsup/test_swav.py | 61 + .../test_selfsup/test_target_generators.py | 72 + tests/test_models/test_tta.py | 67 + .../test_models/test_utils/test_attention.py | 189 + .../test_utils/test_batch_augments.py | 166 + .../test_utils/test_data_preprocessor.py | 248 + tests/test_models/test_utils/test_ema.py | 48 + tests/test_models/test_utils/test_embed.py | 88 + .../test_utils/test_inverted_residual.py | 82 + .../test_utils/test_layer_scale.py | 47 + tests/test_models/test_utils/test_misc.py | 59 + tests/test_models/test_utils/test_norm.py | 60 + .../test_utils/test_position_encoding.py | 21 + tests/test_models/test_utils/test_se.py | 95 + .../test_models/test_utils/test_swiglu_ffn.py | 53 + tests/test_structures/test_datasample.py | 108 + tests/test_structures/test_utils.py | 63 + tests/test_tools.py | 418 + tests/test_utils/test_analyze.py | 43 + tests/test_utils/test_setup_env.py | 40 + tests/test_utils/test_version_utils.py | 21 + tests/test_visualization/test_visualizer.py | 200 + tools/analysis_tools/analyze_logs.py | 218 + tools/analysis_tools/analyze_results.py | 121 + tools/analysis_tools/confusion_matrix.py | 108 + tools/analysis_tools/eval_metric.py | 62 + tools/analysis_tools/get_flops.py | 61 + tools/analysis_tools/shape_bias.py | 284 + tools/analysis_tools/utils.py | 277 + tools/benchmarks/mmdetection/mim_dist_test.sh | 16 + .../mmdetection/mim_dist_train_c4.sh | 19 + .../mmdetection/mim_dist_train_fpn.sh | 16 + .../benchmarks/mmdetection/mim_slurm_test.sh | 23 + .../mmdetection/mim_slurm_train_c4.sh | 27 + .../mmdetection/mim_slurm_train_fpn.sh | 24 + .../mmsegmentation/mim_dist_test.sh | 16 + .../mmsegmentation/mim_dist_train.sh | 17 + .../mmsegmentation/mim_slurm_test.sh | 23 + .../mmsegmentation/mim_slurm_train.sh | 25 + .../convert_flickr30k_ann.py | 56 + .../convert_imagenet_subsets.py | 48 + .../dataset_converters/convert_inaturalist.py | 32 + .../dataset_converters/odl_cub_preprocess.sh | 15 + .../odl_imagenet1k_preprocess.sh | 22 + tools/dist_test.sh | 22 + tools/dist_train.sh | 19 + tools/kfold-cross-valid.py | 254 + tools/misc/print_config.py | 38 + tools/misc/verify_dataset.py | 145 + tools/model_converters/clip_to_mmpretrain.py | 72 + .../convnext_to_mmpretrain.py | 63 + tools/model_converters/davit_to_mmpretrain.py | 87 + tools/model_converters/deit3_to_mmpretrain.py | 75 + .../edgenext_to_mmpretrain.py | 74 + .../efficientnet_to_mmpretrain.py | 222 + .../efficientnetv2_to_mmpretrain.py | 100 + tools/model_converters/eva02_to_mmpretrain.py | 153 + tools/model_converters/eva_to_mmpretrain.py | 76 + tools/model_converters/glip_to_mmpretrain.py | 76 + tools/model_converters/hornet2mmpretrain.py | 62 + tools/model_converters/levit2mmpretrain.py | 80 + tools/model_converters/llava-delta2mmpre.py | 79 + tools/model_converters/merge_lora_weight.py | 90 + .../model_converters/mixmim_to_mmpretrain.py | 99 + .../mlpmixer_to_mmpretrain.py | 58 + .../mobilenetv2_to_mmpretrain.py | 135 + tools/model_converters/ofa.py | 111 + .../openai-clip_to_mmpretrain-clip.py | 77 + tools/model_converters/otter2mmpre.py | 66 + tools/model_converters/publish_model.py | 108 + tools/model_converters/ram2mmpretrain.py | 117 + .../model_converters/reparameterize_model.py | 57 + .../replknet_to_mmpretrain.py | 58 + .../model_converters/repvgg_to_mmpretrain.py | 60 + .../model_converters/revvit_to_mmpretrain.py | 99 + .../shufflenetv2_to_mmpretrain.py | 113 + .../model_converters/tinyvit_to_mmpretrain.py | 61 + .../torchvision_to_mmpretrain.py | 63 + tools/model_converters/twins2mmpretrain.py | 73 + tools/model_converters/van2mmpretrain.py | 66 + tools/model_converters/vgg_to_mmpretrain.py | 118 + tools/model_converters/vig_to_mmpretrain.py | 98 + tools/slurm_test.sh | 24 + tools/slurm_train.sh | 24 + tools/test.py | 193 + tools/torchserve/mmpretrain2torchserve.py | 112 + tools/torchserve/mmpretrain_handler.py | 68 + tools/torchserve/test_torchserver.py | 43 + tools/train.py | 162 + tools/visualization/browse_dataset.py | 253 + tools/visualization/vis_cam.py | 274 + tools/visualization/vis_scheduler.py | 280 + tools/visualization/vis_tsne.py | 267 + vgg16_8xb32_in1k.py | 18 + vit-base-p16_32xb128-mae_in200.py | 74 + vit-large-p16-64xb64-test.py | 18 + vit-large-p16_32xb128-mae_in200.py | 74 + 2199 files changed, 199398 insertions(+) create mode 100644 CITATION.cff create mode 100644 CONTRIBUTING.md create mode 100644 LICENSE create mode 100644 MANIFEST.in create mode 100644 configs/_base_/datasets/cifar100_bs16.py create mode 100644 configs/_base_/datasets/cifar10_bs16.py create mode 100644 configs/_base_/datasets/coco_caption.py create mode 100644 configs/_base_/datasets/coco_okvqa.py create mode 100644 configs/_base_/datasets/coco_retrieval.py create mode 100644 configs/_base_/datasets/coco_vg_vqa.py create mode 100644 configs/_base_/datasets/coco_vqa.py create mode 100644 configs/_base_/datasets/cub_bs8_384.py create mode 100644 configs/_base_/datasets/cub_bs8_448.py create mode 100644 configs/_base_/datasets/flickr30k_caption.py create mode 100644 configs/_base_/datasets/flickr30k_retrieval.py create mode 100644 configs/_base_/datasets/gqa.py create mode 100644 configs/_base_/datasets/imagenet21k_bs128.py create mode 100644 configs/_base_/datasets/imagenet_bs128_mbv3.py create mode 100644 configs/_base_/datasets/imagenet_bs128_poolformer_medium_224.py create mode 100644 configs/_base_/datasets/imagenet_bs128_poolformer_small_224.py create mode 100644 configs/_base_/datasets/imagenet_bs128_revvit_224.py create mode 100644 configs/_base_/datasets/imagenet_bs128_riformer_medium_384.py create mode 100644 configs/_base_/datasets/imagenet_bs128_riformer_small_384.py create mode 100644 configs/_base_/datasets/imagenet_bs128_vig_224.py create mode 100644 configs/_base_/datasets/imagenet_bs16_eva_196.py create mode 100644 configs/_base_/datasets/imagenet_bs16_eva_336.py create mode 100644 configs/_base_/datasets/imagenet_bs16_eva_448.py create mode 100644 configs/_base_/datasets/imagenet_bs16_eva_560.py create mode 100644 configs/_base_/datasets/imagenet_bs16_pil_bicubic_384.py create mode 100644 configs/_base_/datasets/imagenet_bs256_beitv2.py create mode 100644 configs/_base_/datasets/imagenet_bs256_davit_224.py create mode 100644 configs/_base_/datasets/imagenet_bs256_itpn.py create mode 100644 configs/_base_/datasets/imagenet_bs256_levit_224.py create mode 100644 configs/_base_/datasets/imagenet_bs256_rsb_a12.py create mode 100644 configs/_base_/datasets/imagenet_bs256_rsb_a3.py create mode 100644 configs/_base_/datasets/imagenet_bs256_simmim_192.py create mode 100644 configs/_base_/datasets/imagenet_bs256_swin_192.py create mode 100644 configs/_base_/datasets/imagenet_bs32.py create mode 100644 configs/_base_/datasets/imagenet_bs32_byol.py create mode 100644 configs/_base_/datasets/imagenet_bs32_mocov2.py create mode 100644 configs/_base_/datasets/imagenet_bs32_pil_bicubic.py create mode 100644 configs/_base_/datasets/imagenet_bs32_pil_resize.py create mode 100644 configs/_base_/datasets/imagenet_bs32_simclr.py create mode 100644 configs/_base_/datasets/imagenet_bs512_mae.py create mode 100644 configs/_base_/datasets/imagenet_bs512_mocov3.py create mode 100644 configs/_base_/datasets/imagenet_bs64.py create mode 100644 configs/_base_/datasets/imagenet_bs64_autoaug.py create mode 100644 configs/_base_/datasets/imagenet_bs64_clip_224.py create mode 100644 configs/_base_/datasets/imagenet_bs64_clip_384.py create mode 100644 configs/_base_/datasets/imagenet_bs64_clip_448.py create mode 100644 configs/_base_/datasets/imagenet_bs64_convmixer_224.py create mode 100644 configs/_base_/datasets/imagenet_bs64_deit3_224.py create mode 100644 configs/_base_/datasets/imagenet_bs64_deit3_384.py create mode 100644 configs/_base_/datasets/imagenet_bs64_edgenext_256.py create mode 100644 configs/_base_/datasets/imagenet_bs64_hivit_224.py create mode 100644 configs/_base_/datasets/imagenet_bs64_mixer_224.py create mode 100644 configs/_base_/datasets/imagenet_bs64_pil_resize.py create mode 100644 configs/_base_/datasets/imagenet_bs64_pil_resize_autoaug.py create mode 100644 configs/_base_/datasets/imagenet_bs64_swin_224.py create mode 100644 configs/_base_/datasets/imagenet_bs64_swin_256.py create mode 100644 configs/_base_/datasets/imagenet_bs64_swin_384.py create mode 100644 configs/_base_/datasets/imagenet_bs64_t2t_224.py create mode 100644 configs/_base_/datasets/imagenet_bs8_pil_bicubic_320.py create mode 100644 configs/_base_/datasets/inshop_bs32_448.py create mode 100644 configs/_base_/datasets/nlvr2.py create mode 100644 configs/_base_/datasets/nocaps.py create mode 100644 configs/_base_/datasets/ocrvqa.py create mode 100644 configs/_base_/datasets/pipelines/auto_aug.py create mode 100644 configs/_base_/datasets/pipelines/rand_aug.py create mode 100644 configs/_base_/datasets/refcoco.py create mode 100644 configs/_base_/datasets/tiny_imagenet_bs32.py create mode 100644 configs/_base_/datasets/tiny_imagenet_bs32_pil_resize.py create mode 100644 configs/_base_/datasets/tiny_imagenet_bs64_pil_resize_autoaug.py create mode 100644 configs/_base_/datasets/tiny_imagenet_bs64_swin_224.py create mode 100644 configs/_base_/datasets/vizwiz.py create mode 100644 configs/_base_/datasets/voc_bs16.py create mode 100644 configs/_base_/datasets/vsr.py create mode 100644 configs/_base_/default_runtime.py create mode 100644 configs/_base_/models/conformer/base-p16.py create mode 100644 configs/_base_/models/conformer/small-p16.py create mode 100644 configs/_base_/models/conformer/small-p32.py create mode 100644 configs/_base_/models/conformer/tiny-p16.py create mode 100644 configs/_base_/models/convmixer/convmixer-1024-20.py create mode 100644 configs/_base_/models/convmixer/convmixer-1536-20.py create mode 100644 configs/_base_/models/convmixer/convmixer-768-32.py create mode 100644 configs/_base_/models/convnext/convnext-base.py create mode 100644 configs/_base_/models/convnext/convnext-large.py create mode 100644 configs/_base_/models/convnext/convnext-small.py create mode 100644 configs/_base_/models/convnext/convnext-tiny.py create mode 100644 configs/_base_/models/convnext/convnext-xlarge.py create mode 100644 configs/_base_/models/convnext_v2/atto.py create mode 100644 configs/_base_/models/convnext_v2/base.py create mode 100644 configs/_base_/models/convnext_v2/femto.py create mode 100644 configs/_base_/models/convnext_v2/huge.py create mode 100644 configs/_base_/models/convnext_v2/large.py create mode 100644 configs/_base_/models/convnext_v2/nano.py create mode 100644 configs/_base_/models/convnext_v2/pico.py create mode 100644 configs/_base_/models/convnext_v2/tiny.py create mode 100644 configs/_base_/models/davit/davit-base.py create mode 100644 configs/_base_/models/davit/davit-small.py create mode 100644 configs/_base_/models/davit/davit-tiny.py create mode 100644 configs/_base_/models/deit3/deit3-base-p16-224.py create mode 100644 configs/_base_/models/deit3/deit3-base-p16-384.py create mode 100644 configs/_base_/models/deit3/deit3-huge-p14-224.py create mode 100644 configs/_base_/models/deit3/deit3-large-p16-224.py create mode 100644 configs/_base_/models/deit3/deit3-large-p16-384.py create mode 100644 configs/_base_/models/deit3/deit3-medium-p16-224.py create mode 100644 configs/_base_/models/deit3/deit3-small-p16-224.py create mode 100644 configs/_base_/models/deit3/deit3-small-p16-384.py create mode 100644 configs/_base_/models/densenet/densenet121.py create mode 100644 configs/_base_/models/densenet/densenet161.py create mode 100644 configs/_base_/models/densenet/densenet169.py create mode 100644 configs/_base_/models/densenet/densenet201.py create mode 100644 configs/_base_/models/edgenext/edgenext-base.py create mode 100644 configs/_base_/models/edgenext/edgenext-small.py create mode 100644 configs/_base_/models/edgenext/edgenext-xsmall.py create mode 100644 configs/_base_/models/edgenext/edgenext-xxsmall.py create mode 100644 configs/_base_/models/efficientformer-l1.py create mode 100644 configs/_base_/models/efficientnet_b0.py create mode 100644 configs/_base_/models/efficientnet_b1.py create mode 100644 configs/_base_/models/efficientnet_b2.py create mode 100644 configs/_base_/models/efficientnet_b3.py create mode 100644 configs/_base_/models/efficientnet_b4.py create mode 100644 configs/_base_/models/efficientnet_b5.py create mode 100644 configs/_base_/models/efficientnet_b6.py create mode 100644 configs/_base_/models/efficientnet_b7.py create mode 100644 configs/_base_/models/efficientnet_b8.py create mode 100644 configs/_base_/models/efficientnet_em.py create mode 100644 configs/_base_/models/efficientnet_es.py create mode 100644 configs/_base_/models/efficientnet_l2.py create mode 100644 configs/_base_/models/efficientnet_v2/efficientnetv2_b0.py create mode 100644 configs/_base_/models/efficientnet_v2/efficientnetv2_b1.py create mode 100644 configs/_base_/models/efficientnet_v2/efficientnetv2_b2.py create mode 100644 configs/_base_/models/efficientnet_v2/efficientnetv2_b3.py create mode 100644 configs/_base_/models/efficientnet_v2/efficientnetv2_l.py create mode 100644 configs/_base_/models/efficientnet_v2/efficientnetv2_m.py create mode 100644 configs/_base_/models/efficientnet_v2/efficientnetv2_s.py create mode 100644 configs/_base_/models/efficientnet_v2/efficientnetv2_xl.py create mode 100644 configs/_base_/models/eva/eva-g.py create mode 100644 configs/_base_/models/eva/eva-l.py create mode 100644 configs/_base_/models/hivit/base_224.py create mode 100644 configs/_base_/models/hivit/small_224.py create mode 100644 configs/_base_/models/hivit/tiny_224.py create mode 100644 configs/_base_/models/hornet/hornet-base-gf.py create mode 100644 configs/_base_/models/hornet/hornet-base.py create mode 100644 configs/_base_/models/hornet/hornet-large-gf.py create mode 100644 configs/_base_/models/hornet/hornet-large-gf384.py create mode 100644 configs/_base_/models/hornet/hornet-large.py create mode 100644 configs/_base_/models/hornet/hornet-small-gf.py create mode 100644 configs/_base_/models/hornet/hornet-small.py create mode 100644 configs/_base_/models/hornet/hornet-tiny-gf.py create mode 100644 configs/_base_/models/hornet/hornet-tiny.py create mode 100644 configs/_base_/models/hrnet/hrnet-w18.py create mode 100644 configs/_base_/models/hrnet/hrnet-w30.py create mode 100644 configs/_base_/models/hrnet/hrnet-w32.py create mode 100644 configs/_base_/models/hrnet/hrnet-w40.py create mode 100644 configs/_base_/models/hrnet/hrnet-w44.py create mode 100644 configs/_base_/models/hrnet/hrnet-w48.py create mode 100644 configs/_base_/models/hrnet/hrnet-w64.py create mode 100644 configs/_base_/models/inception_v3.py create mode 100644 configs/_base_/models/itpn_hivit-base-p16.py create mode 100644 configs/_base_/models/levit-256-p16.py create mode 100644 configs/_base_/models/mae_hivit-base-p16.py create mode 100644 configs/_base_/models/mae_vit-base-p16.py create mode 100644 configs/_base_/models/mixmim/mixmim_base.py create mode 100644 configs/_base_/models/mlp_mixer_base_patch16.py create mode 100644 configs/_base_/models/mlp_mixer_large_patch16.py create mode 100644 configs/_base_/models/mobilenet_v2_1x.py create mode 100644 configs/_base_/models/mobilenet_v3/mobilenet_v3_large_imagenet.py create mode 100644 configs/_base_/models/mobilenet_v3/mobilenet_v3_small_050_imagenet.py create mode 100644 configs/_base_/models/mobilenet_v3/mobilenet_v3_small_075_imagenet.py create mode 100644 configs/_base_/models/mobilenet_v3/mobilenet_v3_small_cifar.py create mode 100644 configs/_base_/models/mobilenet_v3/mobilenet_v3_small_imagenet.py create mode 100644 configs/_base_/models/mobileone/mobileone_s0.py create mode 100644 configs/_base_/models/mobileone/mobileone_s1.py create mode 100644 configs/_base_/models/mobileone/mobileone_s2.py create mode 100644 configs/_base_/models/mobileone/mobileone_s3.py create mode 100644 configs/_base_/models/mobileone/mobileone_s4.py create mode 100644 configs/_base_/models/mobilevit/mobilevit_s.py create mode 100644 configs/_base_/models/mobilevit/mobilevit_xs.py create mode 100644 configs/_base_/models/mobilevit/mobilevit_xxs.py create mode 100644 configs/_base_/models/mvit/mvitv2-base.py create mode 100644 configs/_base_/models/mvit/mvitv2-large.py create mode 100644 configs/_base_/models/mvit/mvitv2-small.py create mode 100644 configs/_base_/models/mvit/mvitv2-tiny.py create mode 100644 configs/_base_/models/poolformer/poolformer_m36.py create mode 100644 configs/_base_/models/poolformer/poolformer_m48.py create mode 100644 configs/_base_/models/poolformer/poolformer_s12.py create mode 100644 configs/_base_/models/poolformer/poolformer_s24.py create mode 100644 configs/_base_/models/poolformer/poolformer_s36.py create mode 100644 configs/_base_/models/regnet/regnetx_1.6gf.py create mode 100644 configs/_base_/models/regnet/regnetx_12gf.py create mode 100644 configs/_base_/models/regnet/regnetx_3.2gf.py create mode 100644 configs/_base_/models/regnet/regnetx_4.0gf.py create mode 100644 configs/_base_/models/regnet/regnetx_400mf.py create mode 100644 configs/_base_/models/regnet/regnetx_6.4gf.py create mode 100644 configs/_base_/models/regnet/regnetx_8.0gf.py create mode 100644 configs/_base_/models/regnet/regnetx_800mf.py create mode 100644 configs/_base_/models/replknet-31B_in1k.py create mode 100644 configs/_base_/models/replknet-31L_in1k.py create mode 100644 configs/_base_/models/replknet-XL_in1k.py create mode 100644 configs/_base_/models/repmlp-base_224.py create mode 100644 configs/_base_/models/repvgg-A0_in1k.py create mode 100644 configs/_base_/models/repvgg-B3_lbs-mixup_in1k.py create mode 100644 configs/_base_/models/res2net101-w26-s4.py create mode 100644 configs/_base_/models/res2net50-w14-s8.py create mode 100644 configs/_base_/models/res2net50-w26-s4.py create mode 100644 configs/_base_/models/res2net50-w26-s6.py create mode 100644 configs/_base_/models/res2net50-w26-s8.py create mode 100644 configs/_base_/models/res2net50-w48-s2.py create mode 100644 configs/_base_/models/resnest101.py create mode 100644 configs/_base_/models/resnest200.py create mode 100644 configs/_base_/models/resnest269.py create mode 100644 configs/_base_/models/resnest50.py create mode 100644 configs/_base_/models/resnet101.py create mode 100644 configs/_base_/models/resnet101_cifar.py create mode 100644 configs/_base_/models/resnet152.py create mode 100644 configs/_base_/models/resnet152_cifar.py create mode 100644 configs/_base_/models/resnet18.py create mode 100644 configs/_base_/models/resnet18_cifar.py create mode 100644 configs/_base_/models/resnet34.py create mode 100644 configs/_base_/models/resnet34_cifar.py create mode 100644 configs/_base_/models/resnet34_gem.py create mode 100644 configs/_base_/models/resnet50.py create mode 100644 configs/_base_/models/resnet50_cifar.py create mode 100644 configs/_base_/models/resnet50_cifar_cutmix.py create mode 100644 configs/_base_/models/resnet50_cifar_mixup.py create mode 100644 configs/_base_/models/resnet50_cutmix.py create mode 100644 configs/_base_/models/resnet50_label_smooth.py create mode 100644 configs/_base_/models/resnet50_mixup.py create mode 100644 configs/_base_/models/resnetv1c50.py create mode 100644 configs/_base_/models/resnetv1d101.py create mode 100644 configs/_base_/models/resnetv1d152.py create mode 100644 configs/_base_/models/resnetv1d50.py create mode 100644 configs/_base_/models/resnext101_32x4d.py create mode 100644 configs/_base_/models/resnext101_32x8d.py create mode 100644 configs/_base_/models/resnext152_32x4d.py create mode 100644 configs/_base_/models/resnext50_32x4d.py create mode 100644 configs/_base_/models/revvit/revvit-base.py create mode 100644 configs/_base_/models/revvit/revvit-small.py create mode 100644 configs/_base_/models/seresnet101.py create mode 100644 configs/_base_/models/seresnet50.py create mode 100644 configs/_base_/models/seresnext101_32x4d.py create mode 100644 configs/_base_/models/seresnext50_32x4d.py create mode 100644 configs/_base_/models/shufflenet_v1_1x.py create mode 100644 configs/_base_/models/shufflenet_v2_1x.py create mode 100644 configs/_base_/models/swin_transformer/base_224.py create mode 100644 configs/_base_/models/swin_transformer/base_384.py create mode 100644 configs/_base_/models/swin_transformer/large_224.py create mode 100644 configs/_base_/models/swin_transformer/large_384.py create mode 100644 configs/_base_/models/swin_transformer/small_224.py create mode 100644 configs/_base_/models/swin_transformer/tiny_224.py create mode 100644 configs/_base_/models/swin_transformer/tiny_base_224.py create mode 100644 configs/_base_/models/swin_transformer/tiny_large_224.py create mode 100644 configs/_base_/models/swin_transformer_v2/base_256.py create mode 100644 configs/_base_/models/swin_transformer_v2/base_384.py create mode 100644 configs/_base_/models/swin_transformer_v2/large_256.py create mode 100644 configs/_base_/models/swin_transformer_v2/large_384.py create mode 100644 configs/_base_/models/swin_transformer_v2/small_256.py create mode 100644 configs/_base_/models/swin_transformer_v2/tiny_256.py create mode 100644 configs/_base_/models/t2t-vit-t-14.py create mode 100644 configs/_base_/models/t2t-vit-t-19.py create mode 100644 configs/_base_/models/t2t-vit-t-24.py create mode 100644 configs/_base_/models/tiny-vit-large-p16.py create mode 100644 configs/_base_/models/tinyvit/tinyvit-11m.py create mode 100644 configs/_base_/models/tinyvit/tinyvit-21m.py create mode 100644 configs/_base_/models/tinyvit/tinyvit-5m.py create mode 100644 configs/_base_/models/tnt_s_patch16_224.py create mode 100644 configs/_base_/models/twins_pcpvt_base.py create mode 100644 configs/_base_/models/twins_svt_base.py create mode 100644 configs/_base_/models/van/van_base.py create mode 100644 configs/_base_/models/van/van_large.py create mode 100644 configs/_base_/models/van/van_small.py create mode 100644 configs/_base_/models/van/van_tiny.py create mode 100644 configs/_base_/models/vgg11.py create mode 100644 configs/_base_/models/vgg11bn.py create mode 100644 configs/_base_/models/vgg13.py create mode 100644 configs/_base_/models/vgg13bn.py create mode 100644 configs/_base_/models/vgg16.py create mode 100644 configs/_base_/models/vgg16bn.py create mode 100644 configs/_base_/models/vgg19.py create mode 100644 configs/_base_/models/vgg19bn.py create mode 100644 configs/_base_/models/vig/pyramid_vig_base.py create mode 100644 configs/_base_/models/vig/pyramid_vig_medium.py create mode 100644 configs/_base_/models/vig/pyramid_vig_small.py create mode 100644 configs/_base_/models/vig/pyramid_vig_tiny.py create mode 100644 configs/_base_/models/vig/vig_base.py create mode 100644 configs/_base_/models/vig/vig_small.py create mode 100644 configs/_base_/models/vig/vig_tiny.py create mode 100644 configs/_base_/models/vit-base-p16.py create mode 100644 configs/_base_/models/vit-base-p32.py create mode 100644 configs/_base_/models/vit-large-p16.py create mode 100644 configs/_base_/models/vit-large-p32.py create mode 100644 configs/_base_/models/wide-resnet50.py create mode 100644 configs/_base_/schedules/cifar10_bs128.py create mode 100644 configs/_base_/schedules/cub_bs64.py create mode 100644 configs/_base_/schedules/imagenet_bs1024_adamw_conformer.py create mode 100644 configs/_base_/schedules/imagenet_bs1024_adamw_hivit.py create mode 100644 configs/_base_/schedules/imagenet_bs1024_adamw_revvit.py create mode 100644 configs/_base_/schedules/imagenet_bs1024_adamw_swin.py create mode 100644 configs/_base_/schedules/imagenet_bs1024_coslr.py create mode 100644 configs/_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py create mode 100644 configs/_base_/schedules/imagenet_bs2048.py create mode 100644 configs/_base_/schedules/imagenet_bs2048_AdamW.py create mode 100644 configs/_base_/schedules/imagenet_bs2048_adamw_levit.py create mode 100644 configs/_base_/schedules/imagenet_bs2048_coslr.py create mode 100644 configs/_base_/schedules/imagenet_bs2048_rsb.py create mode 100644 configs/_base_/schedules/imagenet_bs256.py create mode 100644 configs/_base_/schedules/imagenet_bs256_140e.py create mode 100644 configs/_base_/schedules/imagenet_bs256_200e_coslr_warmup.py create mode 100644 configs/_base_/schedules/imagenet_bs256_coslr.py create mode 100644 configs/_base_/schedules/imagenet_bs256_coslr_coswd_300e.py create mode 100644 configs/_base_/schedules/imagenet_bs256_epochstep.py create mode 100644 configs/_base_/schedules/imagenet_bs4096_AdamW.py create mode 100644 configs/_base_/schedules/imagenet_lars_coslr_200e.py create mode 100644 configs/_base_/schedules/imagenet_lars_coslr_90e.py create mode 100644 configs/_base_/schedules/imagenet_sgd_coslr_100e.py create mode 100644 configs/_base_/schedules/imagenet_sgd_coslr_200e.py create mode 100644 configs/_base_/schedules/imagenet_sgd_steplr_100e.py create mode 100644 configs/arcface/README.md create mode 100644 configs/arcface/metafile.yml create mode 100644 configs/arcface/resnet50-arcface_8xb32_inshop.py create mode 100644 configs/barlowtwins/README.md create mode 100644 configs/barlowtwins/barlowtwins_resnet50_8xb256-coslr-1000e_in1k.py create mode 100644 configs/barlowtwins/barlowtwins_resnet50_8xb256-coslr-300e_in1k.py create mode 100644 configs/barlowtwins/benchmarks/resnet50_8xb32-linear-coslr-100e_in1k.py create mode 100644 configs/barlowtwins/metafile.yml create mode 100644 configs/beit/README.md create mode 100644 configs/beit/beit_beit-base-p16_8xb256-amp-coslr-300e_in1k.py create mode 100644 configs/beit/benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py create mode 100644 configs/beit/benchmarks/beit-base-p16_8xb64_in1k.py create mode 100644 configs/beit/metafile.yml create mode 100644 configs/beitv2/README.md create mode 100644 configs/beitv2/beitv2_beit-base-p16_8xb256-amp-coslr-1600e_in1k.py create mode 100644 configs/beitv2/beitv2_beit-base-p16_8xb256-amp-coslr-300e_in1k.py create mode 100644 configs/beitv2/benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py create mode 100644 configs/beitv2/benchmarks/beit-base-p16_8xb64_in1k.py create mode 100644 configs/beitv2/metafile.yml create mode 100644 configs/blip/README.md create mode 100644 configs/blip/blip-base_8xb16_refcoco.py create mode 100644 configs/blip/blip-base_8xb32_caption.py create mode 100644 configs/blip/blip-base_8xb32_caption_flickr30k.py create mode 100644 configs/blip/blip-base_8xb32_nlvr.py create mode 100644 configs/blip/blip-base_8xb32_nocaps.py create mode 100644 configs/blip/blip-base_8xb32_ocrvqa.py create mode 100644 configs/blip/blip-base_8xb32_okvqa.py create mode 100644 configs/blip/blip-base_8xb32_retrieval.py create mode 100644 configs/blip/blip-base_8xb32_retrieval_flickr30k.py create mode 100644 configs/blip/blip-base_8xb32_vqa.py create mode 100644 configs/blip/metafile.yml create mode 100644 configs/blip2/README.md create mode 100644 configs/blip2/blip2-opt2.7b_8xb16_gqa.py create mode 100644 configs/blip2/blip2-opt2.7b_8xb16_vqa.py create mode 100644 configs/blip2/blip2-opt2.7b_8xb32_caption.py create mode 100644 configs/blip2/blip2_8xb32_retrieval.py create mode 100644 configs/blip2/metafile.yml create mode 100644 configs/byol/README.md create mode 100644 configs/byol/benchmarks/mask-rcnn_r50-c4_ms-1x_coco.py create mode 100644 configs/byol/benchmarks/mask-rcnn_r50_fpn_ms-1x_coco.py create mode 100644 configs/byol/benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py create mode 100644 configs/byol/byol_resnet50_16xb256-coslr-200e_in1k.py create mode 100644 configs/byol/metafile.yml create mode 100644 configs/cae/README.md create mode 100644 configs/cae/benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py create mode 100644 configs/cae/cae_beit-base-p16_8xb256-amp-coslr-300e_in1k.py create mode 100644 configs/cae/metafile.yml create mode 100644 configs/chinese_clip/README.md create mode 100644 configs/chinese_clip/cn-clip_resnet50_zeroshot-cls_cifar100.py create mode 100644 configs/chinese_clip/cn-clip_vit-base-p16_zeroshot-cls_cifar100.py create mode 100644 configs/chinese_clip/cn-clip_vit-huge-p14_zeroshot-cls_cifar100.py create mode 100644 configs/chinese_clip/cn-clip_vit-large-p14_zeroshot-cls_cifar100.py create mode 100644 configs/chinese_clip/metafile.yml create mode 100644 configs/clip/README.md create mode 100644 configs/clip/clip_vit-base-p16_zeroshot-cls_cifar100.py create mode 100644 configs/clip/clip_vit-base-p16_zeroshot-cls_in1k.py create mode 100644 configs/clip/clip_vit-large-p14_zeroshot-cls_cifar100.py create mode 100644 configs/clip/clip_vit-large-p14_zeroshot-cls_in1k.py create mode 100644 configs/clip/metafile.yml create mode 100644 configs/clip/vit-base-p16_pt-64xb64_in1k-384px.py create mode 100644 configs/clip/vit-base-p16_pt-64xb64_in1k-448px.py create mode 100644 configs/clip/vit-base-p16_pt-64xb64_in1k.py create mode 100644 configs/clip/vit-base-p32_pt-64xb64_in1k-384px.py create mode 100644 configs/clip/vit-base-p32_pt-64xb64_in1k-448px.py create mode 100644 configs/clip/vit-base-p32_pt-64xb64_in1k.py create mode 100644 configs/clip/vit-large-p14_headless.py create mode 100644 configs/conformer/README.md create mode 100644 configs/conformer/conformer-base-p16_8xb128_in1k.py create mode 100644 configs/conformer/conformer-small-p16_8xb128_in1k.py create mode 100644 configs/conformer/conformer-small-p32_8xb128_in1k.py create mode 100644 configs/conformer/conformer-tiny-p16_8xb128_in1k.py create mode 100644 configs/conformer/metafile.yml create mode 100644 configs/convmixer/README.md create mode 100644 configs/convmixer/convmixer-1024-20_10xb64_in1k.py create mode 100644 configs/convmixer/convmixer-1536-20_10xb64_in1k.py create mode 100644 configs/convmixer/convmixer-768-32_10xb64_in1k.py create mode 100644 configs/convmixer/metafile.yml create mode 100644 configs/convnext/README.md create mode 100644 configs/convnext/convnext-base_32xb128_in1k-384px.py create mode 100644 configs/convnext/convnext-base_32xb128_in1k.py create mode 100644 configs/convnext/convnext-base_32xb128_in21k.py create mode 100644 configs/convnext/convnext-large_64xb64_in1k-384px.py create mode 100644 configs/convnext/convnext-large_64xb64_in1k.py create mode 100644 configs/convnext/convnext-large_64xb64_in21k.py create mode 100644 configs/convnext/convnext-small_32xb128_in1k-384px.py create mode 100644 configs/convnext/convnext-small_32xb128_in1k.py create mode 100644 configs/convnext/convnext-tiny_32xb128_in1k-384px.py create mode 100644 configs/convnext/convnext-tiny_32xb128_in1k.py create mode 100644 configs/convnext/convnext-xlarge_64xb64_in1k-384px.py create mode 100644 configs/convnext/convnext-xlarge_64xb64_in1k.py create mode 100644 configs/convnext/convnext-xlarge_64xb64_in21k.py create mode 100644 configs/convnext/metafile.yml create mode 100644 configs/convnext_v2/README.md create mode 100644 configs/convnext_v2/convnext-v2-atto_32xb32_in1k.py create mode 100644 configs/convnext_v2/convnext-v2-base_32xb32_in1k-384px.py create mode 100644 configs/convnext_v2/convnext-v2-base_32xb32_in1k.py create mode 100644 configs/convnext_v2/convnext-v2-femto_32xb32_in1k.py create mode 100644 configs/convnext_v2/convnext-v2-huge_32xb32_in1k-384px.py create mode 100644 configs/convnext_v2/convnext-v2-huge_32xb32_in1k-512px.py create mode 100644 configs/convnext_v2/convnext-v2-huge_32xb32_in1k.py create mode 100644 configs/convnext_v2/convnext-v2-large_32xb32_in1k-384px.py create mode 100644 configs/convnext_v2/convnext-v2-large_32xb32_in1k.py create mode 100644 configs/convnext_v2/convnext-v2-nano_32xb32_in1k-384px.py create mode 100644 configs/convnext_v2/convnext-v2-nano_32xb32_in1k.py create mode 100644 configs/convnext_v2/convnext-v2-pico_32xb32_in1k.py create mode 100644 configs/convnext_v2/convnext-v2-tiny_32xb32_in1k-384px.py create mode 100644 configs/convnext_v2/convnext-v2-tiny_32xb32_in1k.py create mode 100644 configs/convnext_v2/metafile.yml create mode 100644 configs/cspnet/README.md create mode 100644 configs/cspnet/cspdarknet50_8xb32_in1k.py create mode 100644 configs/cspnet/cspresnet50_8xb32_in1k.py create mode 100644 configs/cspnet/cspresnext50_8xb32_in1k.py create mode 100644 configs/cspnet/metafile.yml create mode 100644 configs/csra/README.md create mode 100644 configs/csra/metafile.yml create mode 100644 configs/csra/resnet101-csra_1xb16_voc07-448px.py create mode 100644 configs/davit/README.md create mode 100644 configs/davit/davit-base_4xb256_in1k.py create mode 100644 configs/davit/davit-small_4xb256_in1k.py create mode 100644 configs/davit/davit-tiny_4xb256_in1k.py create mode 100644 configs/davit/metafile.yml create mode 100644 configs/deit/README.md create mode 100644 configs/deit/deit-base-distilled_16xb32_in1k-384px.py create mode 100644 configs/deit/deit-base-distilled_16xb64_in1k.py create mode 100644 configs/deit/deit-base_16xb32_in1k-384px.py create mode 100644 configs/deit/deit-base_16xb64_in1k.py create mode 100644 configs/deit/deit-small-distilled_4xb256_in1k.py create mode 100644 configs/deit/deit-small_4xb256_in1k.py create mode 100644 configs/deit/deit-tiny-distilled_4xb256_in1k.py create mode 100644 configs/deit/deit-tiny_4xb256_in1k.py create mode 100644 configs/deit/metafile.yml create mode 100644 configs/deit3/README.md create mode 100644 configs/deit3/deit3-base-p16_64xb32_in1k-384px.py create mode 100644 configs/deit3/deit3-base-p16_64xb64_in1k.py create mode 100644 configs/deit3/deit3-huge-p14_64xb32_in1k.py create mode 100644 configs/deit3/deit3-large-p16_64xb16_in1k-384px.py create mode 100644 configs/deit3/deit3-large-p16_64xb64_in1k.py create mode 100644 configs/deit3/deit3-medium-p16_64xb64_in1k.py create mode 100644 configs/deit3/deit3-small-p16_64xb64_in1k-384px.py create mode 100644 configs/deit3/deit3-small-p16_64xb64_in1k.py create mode 100644 configs/deit3/metafile.yml create mode 100644 configs/densecl/README.md create mode 100644 configs/densecl/benchmarks/resnet50_8xb32-linear-steplr-100e_in1k.py create mode 100644 configs/densecl/densecl_resnet50_8xb32-coslr-200e_in1k.py create mode 100644 configs/densecl/metafile.yml create mode 100644 configs/densenet/README.md create mode 100644 configs/densenet/densenet121_4xb256_in1k.py create mode 100644 configs/densenet/densenet161_4xb256_in1k.py create mode 100644 configs/densenet/densenet169_4xb256_in1k.py create mode 100644 configs/densenet/densenet201_4xb256_in1k.py create mode 100644 configs/densenet/metafile.yml create mode 100644 configs/dinov2/README.md create mode 100644 configs/dinov2/metafile.yml create mode 100644 configs/dinov2/vit-base-p14_dinov2-pre_headless.py create mode 100644 configs/dinov2/vit-giant-p14_dinov2-pre_headless.py create mode 100644 configs/dinov2/vit-large-p14_dinov2-pre_headless.py create mode 100644 configs/dinov2/vit-small-p14_dinov2-pre_headless.py create mode 100644 configs/edgenext/README.md create mode 100644 configs/edgenext/edgenext-base_8xb256-usi_in1k.py create mode 100644 configs/edgenext/edgenext-base_8xb256_in1k.py create mode 100644 configs/edgenext/edgenext-small_8xb256-usi_in1k.py create mode 100644 configs/edgenext/edgenext-small_8xb256_in1k.py create mode 100644 configs/edgenext/edgenext-xsmall_8xb256_in1k.py create mode 100644 configs/edgenext/edgenext-xxsmall_8xb256_in1k.py create mode 100644 configs/edgenext/metafile.yml create mode 100644 configs/efficientformer/README.md create mode 100644 configs/efficientformer/efficientformer-l1_8xb128_in1k.py create mode 100644 configs/efficientformer/efficientformer-l3_8xb128_in1k.py create mode 100644 configs/efficientformer/efficientformer-l7_8xb128_in1k.py create mode 100644 configs/efficientformer/metafile.yml create mode 100644 configs/efficientnet/README.md create mode 100644 configs/efficientnet/efficientnet-b0_8xb32-01norm_in1k.py create mode 100644 configs/efficientnet/efficientnet-b0_8xb32_in1k.py create mode 100644 configs/efficientnet/efficientnet-b1_8xb32-01norm_in1k.py create mode 100644 configs/efficientnet/efficientnet-b1_8xb32_in1k.py create mode 100644 configs/efficientnet/efficientnet-b2_8xb32-01norm_in1k.py create mode 100644 configs/efficientnet/efficientnet-b2_8xb32_in1k.py create mode 100644 configs/efficientnet/efficientnet-b3_8xb32-01norm_in1k.py create mode 100644 configs/efficientnet/efficientnet-b3_8xb32_in1k.py create mode 100644 configs/efficientnet/efficientnet-b4_8xb32-01norm_in1k.py create mode 100644 configs/efficientnet/efficientnet-b4_8xb32_in1k.py create mode 100644 configs/efficientnet/efficientnet-b5_8xb32-01norm_in1k.py create mode 100644 configs/efficientnet/efficientnet-b5_8xb32_in1k.py create mode 100644 configs/efficientnet/efficientnet-b6_8xb32-01norm_in1k.py create mode 100644 configs/efficientnet/efficientnet-b6_8xb32_in1k.py create mode 100644 configs/efficientnet/efficientnet-b7_8xb32-01norm_in1k.py create mode 100644 configs/efficientnet/efficientnet-b7_8xb32_in1k.py create mode 100644 configs/efficientnet/efficientnet-b8_8xb32-01norm_in1k.py create mode 100644 configs/efficientnet/efficientnet-b8_8xb32_in1k.py create mode 100644 configs/efficientnet/efficientnet-em_8xb32-01norm_in1k.py create mode 100644 configs/efficientnet/efficientnet-es_8xb32-01norm_in1k.py create mode 100644 configs/efficientnet/efficientnet-l2_8xb32_in1k-475px.py create mode 100644 configs/efficientnet/efficientnet-l2_8xb8_in1k-800px.py create mode 100644 configs/efficientnet/metafile.yml create mode 100644 configs/efficientnet_v2/README.md create mode 100644 configs/efficientnet_v2/efficientnetv2-b0_8xb32_in1k.py create mode 100644 configs/efficientnet_v2/efficientnetv2-b1_8xb32_in1k.py create mode 100644 configs/efficientnet_v2/efficientnetv2-b2_8xb32_in1k.py create mode 100644 configs/efficientnet_v2/efficientnetv2-b3_8xb32_in1k.py create mode 100644 configs/efficientnet_v2/efficientnetv2-l_8xb32_in1k-480px.py create mode 100644 configs/efficientnet_v2/efficientnetv2-l_8xb32_in21k.py create mode 100644 configs/efficientnet_v2/efficientnetv2-m_8xb32_in1k-480px.py create mode 100644 configs/efficientnet_v2/efficientnetv2-m_8xb32_in21k.py create mode 100644 configs/efficientnet_v2/efficientnetv2-s_8xb32_in1k-384px.py create mode 100644 configs/efficientnet_v2/efficientnetv2-s_8xb32_in21k.py create mode 100644 configs/efficientnet_v2/efficientnetv2-xl_8xb32_in1k-512px.py create mode 100644 configs/efficientnet_v2/efficientnetv2-xl_8xb32_in21k.py create mode 100644 configs/efficientnet_v2/metafile.yml create mode 100644 configs/eva/README.md create mode 100644 configs/eva/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py create mode 100644 configs/eva/benchmarks/vit-base-p16_8xb2048-linear-coslr-100e_in1k.py create mode 100644 configs/eva/eva-g-p14_8xb16_in1k-336px.py create mode 100644 configs/eva/eva-g-p14_8xb16_in1k-560px.py create mode 100644 configs/eva/eva-g-p14_headless.py create mode 100644 configs/eva/eva-g-p16_headless.py create mode 100644 configs/eva/eva-l-p14_8xb16_in1k-196px.py create mode 100644 configs/eva/eva-l-p14_8xb16_in1k-336px.py create mode 100644 configs/eva/eva-l-p14_headless.py create mode 100644 configs/eva/eva-mae-style_vit-base-p16_16xb256-coslr-400e_in1k.py create mode 100644 configs/eva/metafile.yml create mode 100644 configs/eva02/README.md create mode 100644 configs/eva02/eva02-base-p14_headless.py create mode 100644 configs/eva02/eva02-base-p14_in1k.py create mode 100644 configs/eva02/eva02-large-p14_headless.py create mode 100644 configs/eva02/eva02-large-p14_in1k.py create mode 100644 configs/eva02/eva02-small-p14_headless.py create mode 100644 configs/eva02/eva02-small-p14_in1k.py create mode 100644 configs/eva02/eva02-tiny-p14_headless.py create mode 100644 configs/eva02/eva02-tiny-p14_in1k.py create mode 100644 configs/eva02/metafile.yml create mode 100644 configs/flamingo/README.md create mode 100644 configs/flamingo/flamingo_fewshot_caption.py create mode 100644 configs/flamingo/flamingo_fewshot_vqa.py create mode 100644 configs/flamingo/flamingo_zeroshot_caption.py create mode 100644 configs/flamingo/flamingo_zeroshot_vqa.py create mode 100644 configs/flamingo/metafile.yml create mode 100644 configs/glip/README.md create mode 100644 configs/glip/glip-l_headless.py create mode 100644 configs/glip/glip-t_headless.py create mode 100644 configs/glip/metafile.yml create mode 100644 configs/hivit/README.md create mode 100644 configs/hivit/hivit-base-p16_16xb64_in1k.py create mode 100644 configs/hivit/hivit-small-p16_16xb64_in1k.py create mode 100644 configs/hivit/hivit-tiny-p16_16xb64_in1k.py create mode 100644 configs/hivit/metafile.yml create mode 100644 configs/hornet/README.md create mode 100644 configs/hornet/hornet-base-gf_8xb64_in1k.py create mode 100644 configs/hornet/hornet-base_8xb64_in1k.py create mode 100644 configs/hornet/hornet-small-gf_8xb64_in1k.py create mode 100644 configs/hornet/hornet-small_8xb64_in1k.py create mode 100644 configs/hornet/hornet-tiny-gf_8xb128_in1k.py create mode 100644 configs/hornet/hornet-tiny_8xb128_in1k.py create mode 100644 configs/hornet/metafile.yml create mode 100644 configs/hrnet/README.md create mode 100644 configs/hrnet/hrnet-w18_4xb32_in1k.py create mode 100644 configs/hrnet/hrnet-w30_4xb32_in1k.py create mode 100644 configs/hrnet/hrnet-w32_4xb32_in1k.py create mode 100644 configs/hrnet/hrnet-w40_4xb32_in1k.py create mode 100644 configs/hrnet/hrnet-w44_4xb32_in1k.py create mode 100644 configs/hrnet/hrnet-w48_4xb32_in1k.py create mode 100644 configs/hrnet/hrnet-w64_4xb32_in1k.py create mode 100644 configs/hrnet/metafile.yml create mode 100644 configs/inception_v3/README.md create mode 100644 configs/inception_v3/inception-v3_8xb32_in1k.py create mode 100644 configs/inception_v3/metafile.yml create mode 100644 configs/itpn/README.md create mode 100644 configs/itpn/itpn-clip-b_hivit-base-p16_8xb256-amp-coslr-300e_in1k.py create mode 100644 configs/itpn/itpn-clip-b_hivit-base-p16_8xb256-amp-coslr-800e_in1k.py create mode 100644 configs/itpn/itpn-pixel_hivit-base-p16_8xb512-amp-coslr-1600e_in1k.py create mode 100644 configs/itpn/itpn-pixel_hivit-base-p16_8xb512-amp-coslr-400e_in1k.py create mode 100644 configs/itpn/itpn-pixel_hivit-base-p16_8xb512-amp-coslr-800e_in1k.py create mode 100644 configs/itpn/itpn-pixel_hivit-large-p16_8xb512-amp-coslr-1600e_in1k.py create mode 100644 configs/itpn/itpn-pixel_hivit-large-p16_8xb512-amp-coslr-400e_in1k.py create mode 100644 configs/itpn/itpn-pixel_hivit-large-p16_8xb512-amp-coslr-800e_in1k.py create mode 100644 configs/itpn/metafile.yml create mode 100644 configs/lenet/README.md create mode 100644 configs/lenet/lenet5_mnist.py create mode 100644 configs/levit/README.md create mode 100644 configs/levit/deploy/levit-128_8xb256_in1k.py create mode 100644 configs/levit/deploy/levit-128s_8xb256_in1k.py create mode 100644 configs/levit/deploy/levit-192_8xb256_in1k.py create mode 100644 configs/levit/deploy/levit-256_8xb256_in1k.py create mode 100644 configs/levit/deploy/levit-384_8xb256_in1k.py create mode 100644 configs/levit/levit-128_8xb256_in1k.py create mode 100644 configs/levit/levit-128s_8xb256_in1k.py create mode 100644 configs/levit/levit-192_8xb256_in1k.py create mode 100644 configs/levit/levit-256_8xb256_in1k.py create mode 100644 configs/levit/levit-384_8xb256_in1k.py create mode 100644 configs/levit/metafile.yml create mode 100644 configs/llava/README.md create mode 100644 configs/llava/llava-7b-v1.5_caption.py create mode 100644 configs/llava/llava-7b-v1.5_vqa.py create mode 100644 configs/llava/llava-7b-v1_caption.py create mode 100644 configs/llava/metafile.yml create mode 100644 configs/mae/README.md create mode 100644 configs/mae/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py create mode 100644 configs/mae/benchmarks/vit-base-p16_8xb2048-linear-coslr-90e_in1k.py create mode 100644 configs/mae/benchmarks/vit-huge-p14_32xb8-coslr-50e_in1k-448px.py create mode 100644 configs/mae/benchmarks/vit-huge-p14_8xb128-coslr-50e_in1k.py create mode 100644 configs/mae/benchmarks/vit-huge-p14_8xb128-ds-coslr-50e_in1k.py create mode 100644 configs/mae/benchmarks/vit-huge-p14_8xb128-fsdp-coslr-50e_in1k.py create mode 100644 configs/mae/benchmarks/vit-large-p16_8xb128-coslr-50e_in1k.py create mode 100644 configs/mae/benchmarks/vit-large-p16_8xb128-ds-coslr-50e_in1k.py create mode 100644 configs/mae/benchmarks/vit-large-p16_8xb128-fsdp-coslr-50e_in1k.py create mode 100644 configs/mae/benchmarks/vit-large-p16_8xb2048-linear-coslr-90e_in1k.py create mode 100644 configs/mae/mae_hivit-base-p16_8xb512-amp-coslr-1600e_in1k.py create mode 100644 configs/mae/mae_hivit-base-p16_8xb512-amp-coslr-400e_in1k.py create mode 100644 configs/mae/mae_hivit-base-p16_8xb512-amp-coslr-800e_in1k.py create mode 100644 configs/mae/mae_hivit-large-p16_8xb512-amp-coslr-1600e_in1k.py create mode 100644 configs/mae/mae_hivit-large-p16_8xb512-amp-coslr-400e_in1k.py create mode 100644 configs/mae/mae_hivit-large-p16_8xb512-amp-coslr-800e_in1k.py create mode 100644 configs/mae/mae_vit-base-p16_8xb512-amp-coslr-1600e_in1k.py create mode 100644 configs/mae/mae_vit-base-p16_8xb512-amp-coslr-300e_in1k.py create mode 100644 configs/mae/mae_vit-base-p16_8xb512-amp-coslr-400e_in1k.py create mode 100644 configs/mae/mae_vit-base-p16_8xb512-amp-coslr-800e_in1k.py create mode 100644 configs/mae/mae_vit-huge-p14_8xb512-amp-coslr-1600e_in1k.py create mode 100644 configs/mae/mae_vit-large-p16_8xb512-amp-coslr-1600e_in1k.py create mode 100644 configs/mae/mae_vit-large-p16_8xb512-amp-coslr-300e_in1k.py create mode 100644 configs/mae/mae_vit-large-p16_8xb512-amp-coslr-400e_in1k.py create mode 100644 configs/mae/mae_vit-large-p16_8xb512-amp-coslr-800e_in1k.py create mode 100644 configs/mae/metafile.yml create mode 100644 configs/maskfeat/README.md create mode 100644 configs/maskfeat/benchmarks/vit-base-p16_8xb256-coslr-100e_in1k.py create mode 100644 configs/maskfeat/maskfeat_vit-base-p16_8xb256-amp-coslr-300e_in1k.py create mode 100644 configs/maskfeat/metafile.yml create mode 100644 configs/mff/README.md create mode 100644 configs/mff/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py create mode 100644 configs/mff/benchmarks/vit-base-p16_8xb2048-linear-coslr-90e_in1k.py create mode 100644 configs/mff/metafile.yml create mode 100644 configs/mff/mff_vit-base-p16_8xb512-amp-coslr-300e_in1k.py create mode 100644 configs/mff/mff_vit-base-p16_8xb512-amp-coslr-800e_in1k.py create mode 100644 configs/milan/README.md create mode 100644 configs/milan/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py create mode 100644 configs/milan/benchmarks/vit-base-p16_8xb2048-linear-coslr-100e_in1k.py create mode 100644 configs/milan/metafile.yml create mode 100644 configs/milan/milan_vit-base-p16_16xb256-amp-coslr-400e_in1k.py create mode 100644 configs/minigpt4/README.md create mode 100644 configs/minigpt4/metafile.yml create mode 100644 configs/minigpt4/minigpt-4_baichuan-7b_caption.py create mode 100644 configs/minigpt4/minigpt-4_vicuna-7b_caption.py create mode 100644 configs/mixmim/README.md create mode 100644 configs/mixmim/benchmarks/mixmim-base_8xb128-coslr-100e_in1k.py create mode 100644 configs/mixmim/benchmarks/mixmim-base_8xb64_in1k.py create mode 100644 configs/mixmim/metafile.yml create mode 100644 configs/mixmim/mixmim_mixmim-base_16xb128-coslr-300e_in1k.py create mode 100644 configs/mlp_mixer/README.md create mode 100644 configs/mlp_mixer/metafile.yml create mode 100644 configs/mlp_mixer/mlp-mixer-base-p16_64xb64_in1k.py create mode 100644 configs/mlp_mixer/mlp-mixer-large-p16_64xb64_in1k.py create mode 100644 configs/mobilenet_v2/README.md create mode 100644 configs/mobilenet_v2/metafile.yml create mode 100644 configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py create mode 100644 configs/mobilenet_v3/README.md create mode 100644 configs/mobilenet_v3/metafile.yml create mode 100644 configs/mobilenet_v3/mobilenet-v3-large_8xb128_in1k.py create mode 100644 configs/mobilenet_v3/mobilenet-v3-small-050_8xb128_in1k.py create mode 100644 configs/mobilenet_v3/mobilenet-v3-small-075_8xb128_in1k.py create mode 100644 configs/mobilenet_v3/mobilenet-v3-small_8xb128_in1k.py create mode 100644 configs/mobilenet_v3/mobilenet-v3-small_8xb16_cifar10.py create mode 100644 configs/mobileone/README.md create mode 100644 configs/mobileone/deploy/mobileone-s0_deploy_8xb32_in1k.py create mode 100644 configs/mobileone/deploy/mobileone-s1_deploy_8xb32_in1k.py create mode 100644 configs/mobileone/deploy/mobileone-s2_deploy_8xb32_in1k.py create mode 100644 configs/mobileone/deploy/mobileone-s3_deploy_8xb32_in1k.py create mode 100644 configs/mobileone/deploy/mobileone-s4_deploy_8xb32_in1k.py create mode 100644 configs/mobileone/metafile.yml create mode 100644 configs/mobileone/mobileone-s0_8xb32_in1k.py create mode 100644 configs/mobileone/mobileone-s1_8xb32_in1k.py create mode 100644 configs/mobileone/mobileone-s2_8xb32_in1k.py create mode 100644 configs/mobileone/mobileone-s3_8xb32_in1k.py create mode 100644 configs/mobileone/mobileone-s4_8xb32_in1k.py create mode 100644 configs/mobilevit/README.md create mode 100644 configs/mobilevit/metafile.yml create mode 100644 configs/mobilevit/mobilevit-small_8xb128_in1k.py create mode 100644 configs/mobilevit/mobilevit-xsmall_8xb128_in1k.py create mode 100644 configs/mobilevit/mobilevit-xxsmall_8xb128_in1k.py create mode 100644 configs/mocov2/README.md create mode 100644 configs/mocov2/benchmarks/resnet50_8xb32-linear-steplr-100e_in1k.py create mode 100644 configs/mocov2/metafile.yml create mode 100644 configs/mocov2/mocov2_resnet50_8xb32-coslr-200e_in1k.py create mode 100644 configs/mocov3/README.md create mode 100644 configs/mocov3/benchmarks/resnet50_8xb128-linear-coslr-90e_in1k.py create mode 100644 configs/mocov3/benchmarks/vit-base-p16_8xb128-linear-coslr-90e_in1k.py create mode 100644 configs/mocov3/benchmarks/vit-base-p16_8xb64-coslr-150e_in1k.py create mode 100644 configs/mocov3/benchmarks/vit-large-p16_8xb64-coslr-100e_in1k.py create mode 100644 configs/mocov3/benchmarks/vit-small-p16_8xb128-linear-coslr-90e_in1k.py create mode 100644 configs/mocov3/metafile.yml create mode 100644 configs/mocov3/mocov3_resnet50_8xb512-amp-coslr-100e_in1k.py create mode 100644 configs/mocov3/mocov3_resnet50_8xb512-amp-coslr-300e_in1k.py create mode 100644 configs/mocov3/mocov3_resnet50_8xb512-amp-coslr-800e_in1k.py create mode 100644 configs/mocov3/mocov3_vit-base-p16_16xb256-amp-coslr-300e_in1k.py create mode 100644 configs/mocov3/mocov3_vit-large-p16_64xb64-amp-coslr-300e_in1k.py create mode 100644 configs/mocov3/mocov3_vit-small-p16_16xb256-amp-coslr-300e_in1k.py create mode 100644 configs/mvit/README.md create mode 100644 configs/mvit/metafile.yml create mode 100644 configs/mvit/mvitv2-base_8xb256_in1k.py create mode 100644 configs/mvit/mvitv2-large_8xb256_in1k.py create mode 100644 configs/mvit/mvitv2-small_8xb256_in1k.py create mode 100644 configs/mvit/mvitv2-tiny_8xb256_in1k.py create mode 100644 configs/ofa/README.md create mode 100644 configs/ofa/metafile.yml create mode 100644 configs/ofa/ofa-base_finetuned_caption.py create mode 100644 configs/ofa/ofa-base_finetuned_refcoco.py create mode 100644 configs/ofa/ofa-base_finetuned_vqa.py create mode 100644 configs/ofa/ofa-base_zeroshot_vqa.py create mode 100644 configs/ofa/ofa-large_zeroshot_vqa.py create mode 100644 configs/otter/README.md create mode 100644 configs/otter/metafile.yml create mode 100644 configs/otter/otter-9b_caption.py create mode 100644 configs/otter/otter-9b_vqa.py create mode 100644 configs/poolformer/README.md create mode 100644 configs/poolformer/metafile.yml create mode 100644 configs/poolformer/poolformer-m36_32xb128_in1k.py create mode 100644 configs/poolformer/poolformer-m48_32xb128_in1k.py create mode 100644 configs/poolformer/poolformer-s12_32xb128_in1k.py create mode 100644 configs/poolformer/poolformer-s24_32xb128_in1k.py create mode 100644 configs/poolformer/poolformer-s36_32xb128_in1k.py create mode 100644 configs/regnet/README.md create mode 100644 configs/regnet/metafile.yml create mode 100644 configs/regnet/regnetx-1.6gf_8xb128_in1k.py create mode 100644 configs/regnet/regnetx-12gf_8xb64_in1k.py create mode 100644 configs/regnet/regnetx-3.2gf_8xb64_in1k.py create mode 100644 configs/regnet/regnetx-4.0gf_8xb64_in1k.py create mode 100644 configs/regnet/regnetx-400mf_8xb128_in1k.py create mode 100644 configs/regnet/regnetx-6.4gf_8xb64_in1k.py create mode 100644 configs/regnet/regnetx-8.0gf_8xb64_in1k.py create mode 100644 configs/regnet/regnetx-800mf_8xb128_in1k.py create mode 100644 configs/replknet/README.md create mode 100644 configs/replknet/deploy/replknet-31B-deploy_32xb64_in1k-384px.py create mode 100644 configs/replknet/deploy/replknet-31B-deploy_32xb64_in1k.py create mode 100644 configs/replknet/deploy/replknet-31L-deploy_32xb64_in1k-384px.py create mode 100644 configs/replknet/deploy/replknet-XL-deploy_32xb64_in1k-320px.py create mode 100644 configs/replknet/metafile.yml create mode 100644 configs/replknet/replknet-31B_32xb64_in1k-384px.py create mode 100644 configs/replknet/replknet-31B_32xb64_in1k.py create mode 100644 configs/replknet/replknet-31L_32xb64_in1k-384px.py create mode 100644 configs/replknet/replknet-XL_32xb64_in1k-320px.py create mode 100644 configs/repmlp/README.md create mode 100644 configs/repmlp/metafile.yml create mode 100644 configs/repmlp/repmlp-base_8xb64_in1k-256px.py create mode 100644 configs/repmlp/repmlp-base_8xb64_in1k.py create mode 100644 configs/repmlp/repmlp-base_delopy_8xb64_in1k.py create mode 100644 configs/repmlp/repmlp-base_deploy_8xb64_in1k-256px.py create mode 100644 configs/repvgg/README.md create mode 100644 configs/repvgg/metafile.yml create mode 100644 configs/repvgg/repvgg-A0_8xb32_in1k.py create mode 100644 configs/repvgg/repvgg-A0_deploy_in1k.py create mode 100644 configs/repvgg/repvgg-A1_8xb32_in1k.py create mode 100644 configs/repvgg/repvgg-A2_8xb32_in1k.py create mode 100644 configs/repvgg/repvgg-B0_8xb32_in1k.py create mode 100644 configs/repvgg/repvgg-B1_8xb32_in1k.py create mode 100644 configs/repvgg/repvgg-B1g2_8xb32_in1k.py create mode 100644 configs/repvgg/repvgg-B1g4_8xb32_in1k.py create mode 100644 configs/repvgg/repvgg-B2_8xb32_in1k.py create mode 100644 configs/repvgg/repvgg-B2g4_8xb32_in1k.py create mode 100644 configs/repvgg/repvgg-B3_8xb32_in1k.py create mode 100644 configs/repvgg/repvgg-B3g4_8xb32_in1k.py create mode 100644 configs/repvgg/repvgg-D2se_8xb32_in1k.py create mode 100644 configs/res2net/README.md create mode 100644 configs/res2net/metafile.yml create mode 100644 configs/res2net/res2net101-w26-s4_8xb32_in1k.py create mode 100644 configs/res2net/res2net50-w14-s8_8xb32_in1k.py create mode 100644 configs/res2net/res2net50-w26-s8_8xb32_in1k.py create mode 100644 configs/resnest/README.md create mode 100644 configs/resnest/_randaug_policies.py create mode 100644 configs/resnest/resnest101_32xb64_in1k.py create mode 100644 configs/resnest/resnest200_64xb32_in1k.py create mode 100644 configs/resnest/resnest269_64xb32_in1k.py create mode 100644 configs/resnest/resnest50_32xb64_in1k.py create mode 100644 configs/resnet/README.md create mode 100644 configs/resnet/metafile.yml create mode 100644 configs/resnet/resnet101_8xb16_cifar10.py create mode 100644 configs/resnet/resnet101_8xb32_in1k.py create mode 100644 configs/resnet/resnet152_8xb16_cifar10.py create mode 100644 configs/resnet/resnet152_8xb32_in1k.py create mode 100644 configs/resnet/resnet18_8xb16_cifar10.py create mode 100644 configs/resnet/resnet18_8xb32_in1k.py create mode 100644 configs/resnet/resnet34_8xb16_cifar10.py create mode 100644 configs/resnet/resnet34_8xb32_in1k.py create mode 100644 configs/resnet/resnet50_32xb64-warmup-coslr_in1k.py create mode 100644 configs/resnet/resnet50_32xb64-warmup-lbs_in1k.py create mode 100644 configs/resnet/resnet50_32xb64-warmup_in1k.py create mode 100644 configs/resnet/resnet50_8xb128_coslr-90e_in21k.py create mode 100644 configs/resnet/resnet50_8xb16-mixup_cifar10.py create mode 100644 configs/resnet/resnet50_8xb16_cifar10.py create mode 100644 configs/resnet/resnet50_8xb16_cifar100.py create mode 100644 configs/resnet/resnet50_8xb256-rsb-a1-600e_in1k.py create mode 100644 configs/resnet/resnet50_8xb256-rsb-a2-300e_in1k.py create mode 100644 configs/resnet/resnet50_8xb256-rsb-a3-100e_in1k.py create mode 100644 configs/resnet/resnet50_8xb32-coslr-preciseBN_in1k.py create mode 100644 configs/resnet/resnet50_8xb32-coslr_in1k.py create mode 100644 configs/resnet/resnet50_8xb32-cutmix_in1k.py create mode 100644 configs/resnet/resnet50_8xb32-fp16-dynamic_in1k.py create mode 100644 configs/resnet/resnet50_8xb32-fp16_in1k.py create mode 100644 configs/resnet/resnet50_8xb32-lbs_in1k.py create mode 100644 configs/resnet/resnet50_8xb32-mixup_in1k.py create mode 100644 configs/resnet/resnet50_8xb32_in1k.py create mode 100644 configs/resnet/resnet50_8xb8_cub.py create mode 100644 configs/resnet/resnetv1c101_8xb32_in1k.py create mode 100644 configs/resnet/resnetv1c152_8xb32_in1k.py create mode 100644 configs/resnet/resnetv1c50_8xb32_in1k.py create mode 100644 configs/resnet/resnetv1d101_8xb32_in1k.py create mode 100644 configs/resnet/resnetv1d152_8xb32_in1k.py create mode 100644 configs/resnet/resnetv1d50_8xb32_in1k.py create mode 100644 configs/resnext/README.md create mode 100644 configs/resnext/metafile.yml create mode 100644 configs/resnext/resnext101-32x4d_8xb32_in1k.py create mode 100644 configs/resnext/resnext101-32x8d_8xb32_in1k.py create mode 100644 configs/resnext/resnext152-32x4d_8xb32_in1k.py create mode 100644 configs/resnext/resnext50-32x4d_8xb32_in1k.py create mode 100644 configs/revvit/README.md create mode 100644 configs/revvit/metafile.yml create mode 100644 configs/revvit/revvit-base_8xb256_in1k.py create mode 100644 configs/revvit/revvit-small_8xb256_in1k.py create mode 100644 configs/riformer/README.md create mode 100644 configs/riformer/deploy/riformer-m36-deploy_8xb128_in1k.py create mode 100644 configs/riformer/deploy/riformer-m36-deploy_8xb64_in1k-384px.py create mode 100644 configs/riformer/deploy/riformer-m48-deploy_8xb64_in1k-384px.py create mode 100644 configs/riformer/deploy/riformer-m48-deploy_8xb64_in1k.py create mode 100644 configs/riformer/deploy/riformer-s12-deploy_8xb128_in1k-384px.py create mode 100644 configs/riformer/deploy/riformer-s12-deploy_8xb128_in1k.py create mode 100644 configs/riformer/deploy/riformer-s24-deploy_8xb128_in1k-384px.py create mode 100644 configs/riformer/deploy/riformer-s24-deploy_8xb128_in1k.py create mode 100644 configs/riformer/deploy/riformer-s36-deploy_8xb128_in1k.py create mode 100644 configs/riformer/deploy/riformer-s36-deploy_8xb64_in1k-384px.py create mode 100644 configs/riformer/metafile.yml create mode 100644 configs/riformer/riformer-m36_8xb128_in1k.py create mode 100644 configs/riformer/riformer-m36_8xb64_in1k-384px.py create mode 100644 configs/riformer/riformer-m48_8xb64_in1k-384px.py create mode 100644 configs/riformer/riformer-m48_8xb64_in1k.py create mode 100644 configs/riformer/riformer-s12_8xb128_in1k-384px.py create mode 100644 configs/riformer/riformer-s12_8xb128_in1k.py create mode 100644 configs/riformer/riformer-s24_8xb128_in1k-384px.py create mode 100644 configs/riformer/riformer-s24_8xb128_in1k.py create mode 100644 configs/riformer/riformer-s36_8xb128_in1k.py create mode 100644 configs/riformer/riformer-s36_8xb64_in1k-384px.py create mode 100644 configs/sam/README.md create mode 100644 configs/sam/metafile.yml create mode 100644 configs/sam/vit-base-p16_sam_headless.py create mode 100644 configs/sam/vit-huge-p16_sam_headless.py create mode 100644 configs/sam/vit-large-p16_sam_headless.py create mode 100644 configs/seresnet/README.md create mode 100644 configs/seresnet/metafile.yml create mode 100644 configs/seresnet/seresnet101_8xb32_in1k.py create mode 100644 configs/seresnet/seresnet50_8xb32_in1k.py create mode 100644 configs/seresnet/seresnext101-32x4d_8xb32_in1k.py create mode 100644 configs/seresnet/seresnext50-32x4d_8xb32_in1k.py create mode 100644 configs/shufflenet_v1/README.md create mode 100644 configs/shufflenet_v1/metafile.yml create mode 100644 configs/shufflenet_v1/shufflenet-v1-1x_16xb64_in1k.py create mode 100644 configs/shufflenet_v2/README.md create mode 100644 configs/shufflenet_v2/metafile.yml create mode 100644 configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py create mode 100644 configs/simclr/README.md create mode 100644 configs/simclr/benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py create mode 100644 configs/simclr/metafile.yml create mode 100644 configs/simclr/simclr_resnet50_16xb256-coslr-200e_in1k.py create mode 100644 configs/simclr/simclr_resnet50_16xb256-coslr-800e_in1k.py create mode 100644 configs/simclr/simclr_resnet50_8xb32-coslr-200e_in1k.py create mode 100644 configs/simmim/README.md create mode 100644 configs/simmim/benchmarks/swin-base-w6_8xb256-coslr-100e_in1k-192px.py create mode 100644 configs/simmim/benchmarks/swin-base-w7_8xb256-coslr-100e_in1k.py create mode 100644 configs/simmim/benchmarks/swin-large-w14_8xb256-coslr-100e_in1k.py create mode 100644 configs/simmim/metafile.yml create mode 100644 configs/simmim/simmim_swin-base-w6_16xb128-amp-coslr-100e_in1k-192px.py create mode 100644 configs/simmim/simmim_swin-base-w6_16xb128-amp-coslr-800e_in1k-192px.py create mode 100644 configs/simmim/simmim_swin-base-w6_8xb256-amp-coslr-100e_in1k-192px.py create mode 100644 configs/simmim/simmim_swin-large-w12_16xb128-amp-coslr-800e_in1k-192px.py create mode 100644 configs/simsiam/README.md create mode 100644 configs/simsiam/benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py create mode 100644 configs/simsiam/metafile.yml create mode 100644 configs/simsiam/simsiam_resnet50_8xb32-coslr-100e_in1k.py create mode 100644 configs/simsiam/simsiam_resnet50_8xb32-coslr-200e_in1k.py create mode 100644 configs/spark/README.md create mode 100644 configs/spark/benchmarks/convnextv2-tiny_8xb256-coslr-300e_in1k.py create mode 100644 configs/spark/benchmarks/resnet50_8xb256-coslr-300e_in1k.py create mode 100644 configs/spark/metafile.yml create mode 100644 configs/spark/spark_sparse-convnext-small_16xb256-amp-coslr-800e_in1k.py create mode 100644 configs/spark/spark_sparse-convnextv2-tiny_16xb256-amp-coslr-800e_in1k.py create mode 100644 configs/spark/spark_sparse-resnet50_8xb512-amp-coslr-1600e_in1k.py create mode 100644 configs/spark/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k.py create mode 100644 configs/swav/README.md create mode 100644 configs/swav/benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py create mode 100644 configs/swav/metafile.yml create mode 100644 configs/swav/swav_resnet50_8xb32-mcrop-coslr-200e_in1k-224px-96px.py create mode 100644 configs/swin_transformer/README.md create mode 100644 configs/swin_transformer/metafile.yml create mode 100644 configs/swin_transformer/swin-base_16xb64_in1k-384px.py create mode 100644 configs/swin_transformer/swin-base_16xb64_in1k.py create mode 100644 configs/swin_transformer/swin-large_16xb64_in1k-384px.py create mode 100644 configs/swin_transformer/swin-large_16xb64_in1k.py create mode 100644 configs/swin_transformer/swin-large_8xb8_cub-384px.py create mode 100644 configs/swin_transformer/swin-small_16xb64_in1k.py create mode 100644 configs/swin_transformer/swin-tiny_16xb64_in1k.py create mode 100644 configs/swin_transformer_v2/README.md create mode 100644 configs/swin_transformer_v2/metafile.yml create mode 100644 configs/swin_transformer_v2/swinv2-base-w12_8xb128_in21k-192px.py create mode 100644 configs/swin_transformer_v2/swinv2-base-w16_16xb64_in1k-256px.py create mode 100644 configs/swin_transformer_v2/swinv2-base-w16_in21k-pre_16xb64_in1k-256px.py create mode 100644 configs/swin_transformer_v2/swinv2-base-w24_in21k-pre_16xb64_in1k-384px.py create mode 100644 configs/swin_transformer_v2/swinv2-base-w8_16xb64_in1k-256px.py create mode 100644 configs/swin_transformer_v2/swinv2-large-w12_8xb128_in21k-192px.py create mode 100644 configs/swin_transformer_v2/swinv2-large-w16_in21k-pre_16xb64_in1k-256px.py create mode 100644 configs/swin_transformer_v2/swinv2-large-w24_in21k-pre_16xb64_in1k-384px.py create mode 100644 configs/swin_transformer_v2/swinv2-small-w16_16xb64_in1k-256px.py create mode 100644 configs/swin_transformer_v2/swinv2-small-w8_16xb64_in1k-256px.py create mode 100644 configs/swin_transformer_v2/swinv2-tiny-w16_16xb64_in1k-256px.py create mode 100644 configs/swin_transformer_v2/swinv2-tiny-w8_16xb64_in1k-256px.py create mode 100644 configs/t2t_vit/README.md create mode 100644 configs/t2t_vit/metafile.yml create mode 100644 configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py create mode 100644 configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py create mode 100644 configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py create mode 100644 configs/tinyvit/README.md create mode 100644 configs/tinyvit/metafile.yml create mode 100644 configs/tinyvit/tinyvit-11m-distill_8xb256_in1k.py create mode 100644 configs/tinyvit/tinyvit-11m_8xb256_in1k.py create mode 100644 configs/tinyvit/tinyvit-21m-distill_8xb256_in1k-384px.py create mode 100644 configs/tinyvit/tinyvit-21m-distill_8xb256_in1k-512px.py create mode 100644 configs/tinyvit/tinyvit-21m-distill_8xb256_in1k.py create mode 100644 configs/tinyvit/tinyvit-21m_8xb256_in1k.py create mode 100644 configs/tinyvit/tinyvit-5m-distill_8xb256_in1k.py create mode 100644 configs/tinyvit/tinyvit-5m_8xb256_in1k.py create mode 100644 configs/tnt/README.md create mode 100644 configs/tnt/metafile.yml create mode 100644 configs/tnt/tnt-s-p16_16xb64_in1k.py create mode 100644 configs/twins/README.md create mode 100644 configs/twins/metafile.yml create mode 100644 configs/twins/twins-pcpvt-base_8xb128_in1k.py create mode 100644 configs/twins/twins-pcpvt-large_16xb64_in1k.py create mode 100644 configs/twins/twins-pcpvt-small_8xb128_in1k.py create mode 100644 configs/twins/twins-svt-base_8xb128_in1k.py create mode 100644 configs/twins/twins-svt-large_16xb64_in1k.py create mode 100644 configs/twins/twins-svt-small_8xb128_in1k.py create mode 100644 configs/van/README.md create mode 100644 configs/van/metafile.yml create mode 100644 configs/van/van-base_8xb128_in1k.py create mode 100644 configs/van/van-large_8xb128_in1k.py create mode 100644 configs/van/van-small_8xb128_in1k.py create mode 100644 configs/van/van-tiny_8xb128_in1k.py create mode 100644 configs/vgg/README.md create mode 100644 configs/vgg/metafile.yml create mode 100644 configs/vgg/vgg11_8xb32_in1k.py create mode 100644 configs/vgg/vgg11bn_8xb32_in1k.py create mode 100644 configs/vgg/vgg13_8xb32_in1k.py create mode 100644 configs/vgg/vgg13bn_8xb32_in1k.py create mode 100644 configs/vgg/vgg16_8xb16_voc.py create mode 100644 configs/vgg/vgg16_8xb32_in1k.py create mode 100644 configs/vgg/vgg16bn_8xb32_in1k.py create mode 100644 configs/vgg/vgg19_8xb32_in1k.py create mode 100644 configs/vgg/vgg19bn_8xb32_in1k.py create mode 100644 configs/vig/README.md create mode 100644 configs/vig/metafile.yml create mode 100644 configs/vig/pvig-base_8xb128_in1k.py create mode 100644 configs/vig/pvig-medium_8xb128_in1k.py create mode 100644 configs/vig/pvig-small_8xb128_in1k.py create mode 100644 configs/vig/pvig-tiny_8xb128_in1k.py create mode 100644 configs/vig/vig-base_8xb128_in1k.py create mode 100644 configs/vig/vig-small_8xb128_in1k.py create mode 100644 configs/vig/vig-tiny_8xb128_in1k.py create mode 100644 configs/vision_transformer/README.md create mode 100644 configs/vision_transformer/metafile.yml create mode 100644 configs/vision_transformer/vit-base-p16_32xb128-mae_in1k.py create mode 100644 configs/vision_transformer/vit-base-p16_4xb544-ipu_in1k.py create mode 100644 configs/vision_transformer/vit-base-p16_64xb64_in1k-384px.py create mode 100644 configs/vision_transformer/vit-base-p16_64xb64_in1k.py create mode 100644 configs/vision_transformer/vit-base-p16_8xb64-lora_in1k-384px.py create mode 100644 configs/vision_transformer/vit-base-p32_64xb64_in1k-384px.py create mode 100644 configs/vision_transformer/vit-base-p32_64xb64_in1k.py create mode 100644 configs/vision_transformer/vit-large-p16_64xb64_in1k-384px.py create mode 100644 configs/vision_transformer/vit-large-p16_64xb64_in1k.py create mode 100644 configs/vision_transformer/vit-large-p32_64xb64_in1k-384px.py create mode 100644 configs/vision_transformer/vit-large-p32_64xb64_in1k.py create mode 100644 configs/wrn/README.md create mode 100644 configs/wrn/metafile.yml create mode 100644 configs/wrn/wide-resnet101_8xb32_in1k.py create mode 100644 configs/wrn/wide-resnet50_8xb32_in1k.py create mode 100644 configs/wrn/wide-resnet50_timm_8xb32_in1k.py create mode 100644 configs/xcit/README.md create mode 100644 configs/xcit/metafile.yml create mode 100644 configs/xcit/xcit-large-24-p16_8xb128_in1k-384px.py create mode 100644 configs/xcit/xcit-large-24-p16_8xb128_in1k.py create mode 100644 configs/xcit/xcit-large-24-p8_8xb128_in1k-384px.py create mode 100644 configs/xcit/xcit-large-24-p8_8xb128_in1k.py create mode 100644 configs/xcit/xcit-medium-24-p16_8xb128_in1k-384px.py create mode 100644 configs/xcit/xcit-medium-24-p16_8xb128_in1k.py create mode 100644 configs/xcit/xcit-medium-24-p8_8xb128_in1k-384px.py create mode 100644 configs/xcit/xcit-medium-24-p8_8xb128_in1k.py create mode 100644 configs/xcit/xcit-nano-12-p16_8xb128_in1k-384px.py create mode 100644 configs/xcit/xcit-nano-12-p16_8xb128_in1k.py create mode 100644 configs/xcit/xcit-nano-12-p8_8xb128_in1k-384px.py create mode 100644 configs/xcit/xcit-nano-12-p8_8xb128_in1k.py create mode 100644 configs/xcit/xcit-small-12-p16_8xb128_in1k-384px.py create mode 100644 configs/xcit/xcit-small-12-p16_8xb128_in1k.py create mode 100644 configs/xcit/xcit-small-12-p8_8xb128_in1k-384px.py create mode 100644 configs/xcit/xcit-small-12-p8_8xb128_in1k.py create mode 100644 configs/xcit/xcit-small-24-p16_8xb128_in1k-384px.py create mode 100644 configs/xcit/xcit-small-24-p16_8xb128_in1k.py create mode 100644 configs/xcit/xcit-small-24-p8_8xb128_in1k-384px.py create mode 100644 configs/xcit/xcit-small-24-p8_8xb128_in1k.py create mode 100644 configs/xcit/xcit-tiny-12-p16_8xb128_in1k-384px.py create mode 100644 configs/xcit/xcit-tiny-12-p16_8xb128_in1k.py create mode 100644 configs/xcit/xcit-tiny-12-p8_8xb128_in1k-384px.py create mode 100644 configs/xcit/xcit-tiny-12-p8_8xb128_in1k.py create mode 100644 configs/xcit/xcit-tiny-24-p16_8xb128_in1k-384px.py create mode 100644 configs/xcit/xcit-tiny-24-p16_8xb128_in1k.py create mode 100644 configs/xcit/xcit-tiny-24-p8_8xb128_in1k-384px.py create mode 100644 configs/xcit/xcit-tiny-24-p8_8xb128_in1k.py create mode 100644 dataset-index.yml create mode 100644 demo/bird.JPEG create mode 100644 demo/cat-dog.png create mode 100644 demo/demo.JPEG create mode 100644 demo/dog.jpg create mode 100644 demo/image_demo.py create mode 100644 demo/ipu_train_example.sh create mode 100644 docker/Dockerfile create mode 100644 docker/serve/Dockerfile create mode 100644 docker/serve/config.properties create mode 100644 docker/serve/entrypoint.sh create mode 100644 docs/en/Makefile create mode 100644 docs/en/_static/css/readthedocs.css create mode 100644 docs/en/_static/image/confusion-matrix.png create mode 100644 docs/en/_static/image/mmpt-logo.png create mode 100644 docs/en/_static/image/tools/analysis/analyze_log.jpg create mode 100644 docs/en/_static/js/custom.js create mode 100644 docs/en/_templates/404.html create mode 100644 docs/en/_templates/autosummary/class.rst create mode 100644 docs/en/_templates/callable.rst create mode 100644 docs/en/_templates/data_transform.rst create mode 100644 docs/en/advanced_guides/convention.md create mode 100644 docs/en/advanced_guides/datasets.md create mode 100644 docs/en/advanced_guides/evaluation.md create mode 100644 docs/en/advanced_guides/modules.md create mode 100644 docs/en/advanced_guides/pipeline.md create mode 100644 docs/en/advanced_guides/runtime.md create mode 100644 docs/en/advanced_guides/schedule.md create mode 100644 docs/en/api/apis.rst create mode 100644 docs/en/api/data_process.rst create mode 100644 docs/en/api/datasets.rst create mode 100644 docs/en/api/engine.rst create mode 100644 docs/en/api/evaluation.rst create mode 100644 docs/en/api/models.rst create mode 100644 docs/en/api/structures.rst create mode 100644 docs/en/api/utils.rst create mode 100644 docs/en/api/visualization.rst create mode 100644 docs/en/conf.py create mode 100644 docs/en/device/npu.md create mode 100644 docs/en/docutils.conf create mode 100644 docs/en/get_started.md create mode 100644 docs/en/index.rst create mode 100644 docs/en/migration.md create mode 100644 docs/en/notes/changelog.md create mode 100644 docs/en/notes/contribution_guide.md create mode 100644 docs/en/notes/faq.md create mode 100644 docs/en/notes/finetune_custom_dataset.md create mode 100644 docs/en/notes/pretrain_custom_dataset.md create mode 100644 docs/en/notes/projects.md create mode 100644 docs/en/stat.py create mode 100644 docs/en/useful_tools/cam_visualization.md create mode 100644 docs/en/useful_tools/complexity_analysis.md create mode 100644 docs/en/useful_tools/confusion_matrix.md create mode 100644 docs/en/useful_tools/dataset_visualization.md create mode 100644 docs/en/useful_tools/log_result_analysis.md create mode 100644 docs/en/useful_tools/model_serving.md create mode 100644 docs/en/useful_tools/print_config.md create mode 100644 docs/en/useful_tools/scheduler_visualization.md create mode 100644 docs/en/useful_tools/shape_bias.md create mode 100644 docs/en/useful_tools/t-sne_visualization.md create mode 100644 docs/en/useful_tools/verify_dataset.md create mode 100644 docs/en/user_guides/config.md create mode 100644 docs/en/user_guides/dataset_prepare.md create mode 100644 docs/en/user_guides/downstream.md create mode 100644 docs/en/user_guides/inference.md create mode 100644 docs/en/user_guides/test.md create mode 100644 docs/en/user_guides/train.md create mode 100644 docs/zh_CN/Makefile create mode 100644 docs/zh_CN/_static/css/readthedocs.css create mode 100644 docs/zh_CN/_static/image/confusion-matrix.png create mode 100644 docs/zh_CN/_static/image/mmpt-logo.png create mode 100644 docs/zh_CN/_static/image/tools/analysis/analyze_log.jpg create mode 100644 docs/zh_CN/_static/js/custom.js create mode 100644 docs/zh_CN/_templates/404.html create mode 100644 docs/zh_CN/_templates/autosummary/class.rst create mode 100644 docs/zh_CN/_templates/callable.rst create mode 100644 docs/zh_CN/_templates/data_transform.rst create mode 100644 docs/zh_CN/advanced_guides/convention.md create mode 100644 docs/zh_CN/advanced_guides/datasets.md create mode 100644 docs/zh_CN/advanced_guides/evaluation.md create mode 100644 docs/zh_CN/advanced_guides/modules.md create mode 100644 docs/zh_CN/advanced_guides/pipeline.md create mode 100644 docs/zh_CN/advanced_guides/runtime.md create mode 100644 docs/zh_CN/advanced_guides/schedule.md create mode 100644 docs/zh_CN/api create mode 100644 docs/zh_CN/conf.py create mode 100644 docs/zh_CN/device/npu.md create mode 100644 docs/zh_CN/docutils.conf create mode 100644 docs/zh_CN/get_started.md create mode 100644 docs/zh_CN/index.rst create mode 100644 docs/zh_CN/locales/zh_CN/LC_MESSAGES/api.po create mode 100644 docs/zh_CN/locales/zh_CN/LC_MESSAGES/papers.po create mode 100644 docs/zh_CN/migration.md create mode 100644 docs/zh_CN/notes/changelog.md create mode 100644 docs/zh_CN/notes/contribution_guide.md create mode 100644 docs/zh_CN/notes/faq.md create mode 100644 docs/zh_CN/notes/finetune_custom_dataset.md create mode 100644 docs/zh_CN/notes/pretrain_custom_dataset.md create mode 100644 docs/zh_CN/notes/projects.md create mode 100644 docs/zh_CN/stat.py create mode 100644 docs/zh_CN/useful_tools/cam_visualization.md create mode 100644 docs/zh_CN/useful_tools/complexity_analysis.md create mode 100644 docs/zh_CN/useful_tools/confusion_matrix.md create mode 100644 docs/zh_CN/useful_tools/dataset_visualization.md create mode 100644 docs/zh_CN/useful_tools/log_result_analysis.md create mode 100644 docs/zh_CN/useful_tools/model_serving.md create mode 100644 docs/zh_CN/useful_tools/print_config.md create mode 100644 docs/zh_CN/useful_tools/scheduler_visualization.md create mode 100644 docs/zh_CN/useful_tools/shape_bias.md create mode 100644 docs/zh_CN/useful_tools/t-sne_visualization.md create mode 100644 docs/zh_CN/useful_tools/verify_dataset.md create mode 100644 docs/zh_CN/user_guides/config.md create mode 100644 docs/zh_CN/user_guides/dataset_prepare.md create mode 100644 docs/zh_CN/user_guides/downstream.md create mode 100644 docs/zh_CN/user_guides/inference.md create mode 100644 docs/zh_CN/user_guides/test.md create mode 100644 docs/zh_CN/user_guides/train.md create mode 100644 inception-v3_8xb32_in1k.py create mode 100644 mmpretrain.egg-info/PKG-INFO create mode 100644 mmpretrain.egg-info/SOURCES.txt rename README.md => mmpretrain.egg-info/dependency_links.txt (100%) create mode 100644 mmpretrain.egg-info/not-zip-safe create mode 100644 mmpretrain.egg-info/requires.txt create mode 100644 mmpretrain.egg-info/top_level.txt create mode 100644 mmpretrain/__init__.py create mode 100644 mmpretrain/__pycache__/__init__.cpython-310.pyc create mode 100644 mmpretrain/__pycache__/registry.cpython-310.pyc create mode 100644 mmpretrain/__pycache__/version.cpython-310.pyc create mode 100644 mmpretrain/apis/__init__.py create mode 100644 mmpretrain/apis/__pycache__/__init__.cpython-310.pyc create mode 100644 mmpretrain/apis/__pycache__/base.cpython-310.pyc create mode 100644 mmpretrain/apis/__pycache__/feature_extractor.cpython-310.pyc create mode 100644 mmpretrain/apis/__pycache__/image_caption.cpython-310.pyc create mode 100644 mmpretrain/apis/__pycache__/image_classification.cpython-310.pyc create mode 100644 mmpretrain/apis/__pycache__/image_retrieval.cpython-310.pyc create mode 100644 mmpretrain/apis/__pycache__/model.cpython-310.pyc create mode 100644 mmpretrain/apis/__pycache__/multimodal_retrieval.cpython-310.pyc create mode 100644 mmpretrain/apis/__pycache__/nlvr.cpython-310.pyc create mode 100644 mmpretrain/apis/__pycache__/visual_grounding.cpython-310.pyc create mode 100644 mmpretrain/apis/__pycache__/visual_question_answering.cpython-310.pyc create mode 100644 mmpretrain/apis/base.py create mode 100644 mmpretrain/apis/feature_extractor.py create mode 100644 mmpretrain/apis/image_caption.py create mode 100644 mmpretrain/apis/image_classification.py create mode 100644 mmpretrain/apis/image_retrieval.py create mode 100644 mmpretrain/apis/model.py create mode 100644 mmpretrain/apis/multimodal_retrieval.py create mode 100644 mmpretrain/apis/nlvr.py create mode 100644 mmpretrain/apis/utils.py create mode 100644 mmpretrain/apis/visual_grounding.py create mode 100644 mmpretrain/apis/visual_question_answering.py create mode 100644 mmpretrain/configs/_base_/datasets/cifar10_bs16.py create mode 100644 mmpretrain/configs/_base_/datasets/cub_bs8_384.py create mode 100644 mmpretrain/configs/_base_/datasets/imagenet21k_bs128.py create mode 100644 mmpretrain/configs/_base_/datasets/imagenet_bs128_mbv3.py create mode 100644 mmpretrain/configs/_base_/datasets/imagenet_bs256_beitv2.py create mode 100644 mmpretrain/configs/_base_/datasets/imagenet_bs32.py create mode 100644 mmpretrain/configs/_base_/datasets/imagenet_bs32_pil_resize.py create mode 100644 mmpretrain/configs/_base_/datasets/imagenet_bs32_simclr.py create mode 100644 mmpretrain/configs/_base_/datasets/imagenet_bs512_mae.py create mode 100644 mmpretrain/configs/_base_/datasets/imagenet_bs64_pil_resize.py create mode 100644 mmpretrain/configs/_base_/datasets/imagenet_bs64_pil_resize_autoaug.py create mode 100644 mmpretrain/configs/_base_/datasets/imagenet_bs64_swin_224.py create mode 100644 mmpretrain/configs/_base_/datasets/imagenet_bs64_swin_256.py create mode 100644 mmpretrain/configs/_base_/datasets/imagenet_bs64_swin_384.py create mode 100644 mmpretrain/configs/_base_/default_runtime.py create mode 100644 mmpretrain/configs/_base_/models/convnext_base.py create mode 100644 mmpretrain/configs/_base_/models/mae_hivit_base_p16.py create mode 100644 mmpretrain/configs/_base_/models/mae_vit_base_p16.py create mode 100644 mmpretrain/configs/_base_/models/mobilenet_v2_1x.py create mode 100644 mmpretrain/configs/_base_/models/mobilenet_v3_small.py create mode 100644 mmpretrain/configs/_base_/models/resnet18.py create mode 100644 mmpretrain/configs/_base_/models/swin_transformer_base.py create mode 100644 mmpretrain/configs/_base_/models/swin_transformer_v2_base.py create mode 100644 mmpretrain/configs/_base_/models/vit_base_p16.py create mode 100644 mmpretrain/configs/_base_/schedules/cifar10_bs128.py create mode 100644 mmpretrain/configs/_base_/schedules/cub_bs64.py create mode 100644 mmpretrain/configs/_base_/schedules/imagenet_bs1024_adamw_swin.py create mode 100644 mmpretrain/configs/_base_/schedules/imagenet_bs256.py create mode 100644 mmpretrain/configs/_base_/schedules/imagenet_bs256_epochstep.py create mode 100644 mmpretrain/configs/_base_/schedules/imagenet_bs4096_adamw.py create mode 100644 mmpretrain/configs/_base_/schedules/imagenet_lars_coslr_200e.py create mode 100644 mmpretrain/configs/beit/beit_beit_base_p16_8xb256_amp_coslr_300e_in1k.py create mode 100644 mmpretrain/configs/beit/benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py create mode 100644 mmpretrain/configs/beit/benchmarks/beit-base-p16_8xb64_in1k.py create mode 100644 mmpretrain/configs/beitv2/beitv2_beit-base-p16_8xb256-amp-coslr-1600e_in1k.py create mode 100644 mmpretrain/configs/beitv2/beitv2_beit-base-p16_8xb256-amp-coslr-300e_in1k.py create mode 100644 mmpretrain/configs/beitv2/benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py create mode 100644 mmpretrain/configs/beitv2/benchmarks/beit-base-p16_8xb64_in1k.py create mode 100644 mmpretrain/configs/convnext/convnext-base_32xb128_in1k.py create mode 100644 mmpretrain/configs/convnext/convnext-base_32xb128_in21k.py create mode 100644 mmpretrain/configs/convnext/convnext-large_64xb64_in1k-384px.py create mode 100644 mmpretrain/configs/convnext/convnext-large_64xb64_in1k.py create mode 100644 mmpretrain/configs/convnext/convnext-large_64xb64_in21k.py create mode 100644 mmpretrain/configs/convnext/convnext-small_32xb128_in1k-384px.py create mode 100644 mmpretrain/configs/convnext/convnext-small_32xb128_in1k.py create mode 100644 mmpretrain/configs/convnext/convnext-tiny_32xb128_in1k-384px.py create mode 100644 mmpretrain/configs/convnext/convnext-tiny_32xb128_in1k.py create mode 100644 mmpretrain/configs/convnext/convnext-xlarge_64xb64_in1k-384px.py create mode 100644 mmpretrain/configs/convnext/convnext-xlarge_64xb64_in1k.py create mode 100644 mmpretrain/configs/convnext/convnext-xlarge_64xb64_in21k.py create mode 100644 mmpretrain/configs/convnext/convnext_base_32xb128_in1k_384px.py create mode 100644 mmpretrain/configs/eva/eva_mae_style_vit_base_p16_16xb256_coslr_400e_in1k.py create mode 100644 mmpretrain/configs/mae/mae_hivit_base_p16_8xb512_amp_coslr_1600e_in1k.py create mode 100644 mmpretrain/configs/mae/mae_hivit_base_p16_8xb512_amp_coslr_400e_in1k.py create mode 100644 mmpretrain/configs/mae/mae_hivit_base_p16_8xb512_amp_coslr_800e_in1k.py create mode 100644 mmpretrain/configs/mae/mae_hivit_large_p16_8xb512_amp_coslr_1600e_in1k.py create mode 100644 mmpretrain/configs/mae/mae_hivit_large_p16_8xb512_amp_coslr_400e_in1k.py create mode 100644 mmpretrain/configs/mae/mae_hivit_large_p16_8xb512_amp_coslr_800e_in1k.py create mode 100644 mmpretrain/configs/mae/mae_vit_base_p16_8xb512_amp_coslr_1600e_in1k.py create mode 100644 mmpretrain/configs/mae/mae_vit_base_p16_8xb512_amp_coslr_300e_in1k.py create mode 100644 mmpretrain/configs/mae/mae_vit_base_p16_8xb512_amp_coslr_400e_in1k.py create mode 100644 mmpretrain/configs/mae/mae_vit_base_p16_8xb512_amp_coslr_800e_in1k.py create mode 100644 mmpretrain/configs/mae/mae_vit_huge_p14_8xb512_amp_coslr_1600e_in1k.py create mode 100644 mmpretrain/configs/mae/mae_vit_large_p16_8xb512_amp_coslr_1600e_in1k.py create mode 100644 mmpretrain/configs/mae/mae_vit_large_p16_8xb512_amp_coslr_300e_in1k.py create mode 100644 mmpretrain/configs/mae/mae_vit_large_p16_8xb512_amp_coslr_400e_in1k.py create mode 100644 mmpretrain/configs/mae/mae_vit_large_p16_8xb512_amp_coslr_800e_in1k.py create mode 100644 mmpretrain/configs/mobilenet_v2/mobilenet_v2_8xb32_in1k.py create mode 100644 mmpretrain/configs/mobilenet_v3/mobilenet_v3_large_8xb128_in1k.py create mode 100644 mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_050_8xb128_in1k.py create mode 100644 mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_075_8xb128_in1k.py create mode 100644 mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_8xb128_in1k.py create mode 100644 mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_8xb16_cifar10.py create mode 100644 mmpretrain/configs/resnet/resnet18_8xb32_in1k.py create mode 100644 mmpretrain/configs/simclr/simclr_resnet50_16xb256_coslr_200e_in1k.py create mode 100644 mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k.py create mode 100644 mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k_384px.py create mode 100644 mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k.py create mode 100644 mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k_384px.py create mode 100644 mmpretrain/configs/swin_transformer/swin_large_8xb8_cub_384px.py create mode 100644 mmpretrain/configs/swin_transformer/swin_small_16xb64_in1k.py create mode 100644 mmpretrain/configs/swin_transformer/swin_tiny_16xb64_in1k.py create mode 100644 mmpretrain/configs/swin_transformer_v2/swinv2_base_w12_8xb128_in21k_192px.py create mode 100644 mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_16xb64_in1k_256px.py create mode 100644 mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_in21k_pre_16xb64_in1k_256px.py create mode 100644 mmpretrain/configs/swin_transformer_v2/swinv2_base_w24_in21k_pre_16xb64_in1k_384px.py create mode 100644 mmpretrain/configs/swin_transformer_v2/swinv2_base_w8_16xb64_in1k_256px.py create mode 100644 mmpretrain/configs/swin_transformer_v2/swinv2_large_w12_8xb128_in21k_192px.py create mode 100644 mmpretrain/configs/swin_transformer_v2/swinv2_large_w16_in21k_pre_16xb64_in1k_256px.py create mode 100644 mmpretrain/configs/swin_transformer_v2/swinv2_large_w24_in21k_pre_16xb64_in1k_384px.py create mode 100644 mmpretrain/configs/swin_transformer_v2/swinv2_small_w16_16xb64_in1k_256px.py create mode 100644 mmpretrain/configs/swin_transformer_v2/swinv2_small_w8_16xb64_in1k_256px.py create mode 100644 mmpretrain/configs/swin_transformer_v2/swinv2_tiny_w16_16xb64_in1k_256px.py create mode 100644 mmpretrain/configs/swin_transformer_v2/swinv2_tiny_w8_16xb64_in1k_256px.py create mode 100644 mmpretrain/configs/vision_transformer/vit_base_p16_32xb128_mae_in1k.py create mode 100644 mmpretrain/configs/vision_transformer/vit_base_p16_64xb64_in1k.py create mode 100644 mmpretrain/configs/vision_transformer/vit_base_p16_64xb64_in1k_384px.py create mode 100644 mmpretrain/configs/vision_transformer/vit_base_p32_64xb64_in1k.py create mode 100644 mmpretrain/configs/vision_transformer/vit_base_p32_64xb64_in1k_384px.py create mode 100644 mmpretrain/configs/vision_transformer/vit_large_p16_64xb64_in1k.py create mode 100644 mmpretrain/configs/vision_transformer/vit_large_p16_64xb64_in1k_384px.py create mode 100644 mmpretrain/configs/vision_transformer/vit_large_p32_64xb64_in1k.py create mode 100644 mmpretrain/configs/vision_transformer/vit_large_p32_64xb64_in1k_384px.py create mode 100644 mmpretrain/datasets/__init__.py create mode 100644 mmpretrain/datasets/__pycache__/__init__.cpython-310.pyc create mode 100644 mmpretrain/datasets/__pycache__/base_dataset.cpython-310.pyc create mode 100644 mmpretrain/datasets/__pycache__/builder.cpython-310.pyc create mode 100644 mmpretrain/datasets/__pycache__/caltech101.cpython-310.pyc create mode 100644 mmpretrain/datasets/__pycache__/categories.cpython-310.pyc create mode 100644 mmpretrain/datasets/__pycache__/cifar.cpython-310.pyc create mode 100644 mmpretrain/datasets/__pycache__/cub.cpython-310.pyc create mode 100644 mmpretrain/datasets/__pycache__/custom.cpython-310.pyc create mode 100644 mmpretrain/datasets/__pycache__/dataset_wrappers.cpython-310.pyc create mode 100644 mmpretrain/datasets/__pycache__/dtd.cpython-310.pyc create mode 100644 mmpretrain/datasets/__pycache__/fgvcaircraft.cpython-310.pyc create mode 100644 mmpretrain/datasets/__pycache__/flowers102.cpython-310.pyc create mode 100644 mmpretrain/datasets/__pycache__/food101.cpython-310.pyc create mode 100644 mmpretrain/datasets/__pycache__/imagenet.cpython-310.pyc create mode 100644 mmpretrain/datasets/__pycache__/inshop.cpython-310.pyc create mode 100644 mmpretrain/datasets/__pycache__/mnist.cpython-310.pyc create mode 100644 mmpretrain/datasets/__pycache__/multi_label.cpython-310.pyc create mode 100644 mmpretrain/datasets/__pycache__/multi_task.cpython-310.pyc create mode 100644 mmpretrain/datasets/__pycache__/nlvr2.cpython-310.pyc create mode 100644 mmpretrain/datasets/__pycache__/oxfordiiitpet.cpython-310.pyc create mode 100644 mmpretrain/datasets/__pycache__/places205.cpython-310.pyc create mode 100644 mmpretrain/datasets/__pycache__/stanfordcars.cpython-310.pyc create mode 100644 mmpretrain/datasets/__pycache__/sun397.cpython-310.pyc create mode 100644 mmpretrain/datasets/__pycache__/utils.cpython-310.pyc create mode 100644 mmpretrain/datasets/__pycache__/voc.cpython-310.pyc create mode 100644 mmpretrain/datasets/base_dataset.py create mode 100644 mmpretrain/datasets/builder.py create mode 100644 mmpretrain/datasets/caltech101.py create mode 100644 mmpretrain/datasets/categories.py create mode 100644 mmpretrain/datasets/cifar.py create mode 100644 mmpretrain/datasets/coco_caption.py create mode 100644 mmpretrain/datasets/coco_retrieval.py create mode 100644 mmpretrain/datasets/coco_vqa.py create mode 100644 mmpretrain/datasets/cub.py create mode 100644 mmpretrain/datasets/custom.py create mode 100644 mmpretrain/datasets/dataset_wrappers.py create mode 100644 mmpretrain/datasets/dtd.py create mode 100644 mmpretrain/datasets/fgvcaircraft.py create mode 100644 mmpretrain/datasets/flamingo.py create mode 100644 mmpretrain/datasets/flickr30k_caption.py create mode 100644 mmpretrain/datasets/flickr30k_retrieval.py create mode 100644 mmpretrain/datasets/flowers102.py create mode 100644 mmpretrain/datasets/food101.py create mode 100644 mmpretrain/datasets/gqa_dataset.py create mode 100644 mmpretrain/datasets/iconqa.py create mode 100644 mmpretrain/datasets/imagenet.py create mode 100644 mmpretrain/datasets/infographic_vqa.py create mode 100644 mmpretrain/datasets/inshop.py create mode 100644 mmpretrain/datasets/minigpt4_dataset.py create mode 100644 mmpretrain/datasets/mnist.py create mode 100644 mmpretrain/datasets/multi_label.py create mode 100644 mmpretrain/datasets/multi_task.py create mode 100644 mmpretrain/datasets/nlvr2.py create mode 100644 mmpretrain/datasets/nocaps.py create mode 100644 mmpretrain/datasets/ocr_vqa.py create mode 100644 mmpretrain/datasets/oxfordiiitpet.py create mode 100644 mmpretrain/datasets/places205.py create mode 100644 mmpretrain/datasets/refcoco.py create mode 100644 mmpretrain/datasets/samplers/__init__.py create mode 100644 mmpretrain/datasets/samplers/__pycache__/__init__.cpython-310.pyc create mode 100644 mmpretrain/datasets/samplers/__pycache__/repeat_aug.cpython-310.pyc create mode 100644 mmpretrain/datasets/samplers/__pycache__/sequential.cpython-310.pyc create mode 100644 mmpretrain/datasets/samplers/repeat_aug.py create mode 100644 mmpretrain/datasets/samplers/sequential.py create mode 100644 mmpretrain/datasets/scienceqa.py create mode 100644 mmpretrain/datasets/stanfordcars.py create mode 100644 mmpretrain/datasets/sun397.py create mode 100644 mmpretrain/datasets/textvqa.py create mode 100644 mmpretrain/datasets/transforms/__init__.py create mode 100644 mmpretrain/datasets/transforms/__pycache__/__init__.cpython-310.pyc create mode 100644 mmpretrain/datasets/transforms/__pycache__/auto_augment.cpython-310.pyc create mode 100644 mmpretrain/datasets/transforms/__pycache__/formatting.cpython-310.pyc create mode 100644 mmpretrain/datasets/transforms/__pycache__/processing.cpython-310.pyc create mode 100644 mmpretrain/datasets/transforms/__pycache__/utils.cpython-310.pyc create mode 100644 mmpretrain/datasets/transforms/__pycache__/wrappers.cpython-310.pyc create mode 100644 mmpretrain/datasets/transforms/auto_augment.py create mode 100644 mmpretrain/datasets/transforms/formatting.py create mode 100644 mmpretrain/datasets/transforms/processing.py create mode 100644 mmpretrain/datasets/transforms/utils.py create mode 100644 mmpretrain/datasets/transforms/wrappers.py create mode 100644 mmpretrain/datasets/utils.py create mode 100644 mmpretrain/datasets/vg_vqa.py create mode 100644 mmpretrain/datasets/visual_genome.py create mode 100644 mmpretrain/datasets/vizwiz.py create mode 100644 mmpretrain/datasets/voc.py create mode 100644 mmpretrain/datasets/vsr.py create mode 100644 mmpretrain/engine/__init__.py create mode 100644 mmpretrain/engine/__pycache__/__init__.cpython-310.pyc create mode 100644 mmpretrain/engine/hooks/__init__.py create mode 100644 mmpretrain/engine/hooks/__pycache__/__init__.cpython-310.pyc create mode 100644 mmpretrain/engine/hooks/__pycache__/class_num_check_hook.cpython-310.pyc create mode 100644 mmpretrain/engine/hooks/__pycache__/densecl_hook.cpython-310.pyc create mode 100644 mmpretrain/engine/hooks/__pycache__/ema_hook.cpython-310.pyc create mode 100644 mmpretrain/engine/hooks/__pycache__/margin_head_hooks.cpython-310.pyc create mode 100644 mmpretrain/engine/hooks/__pycache__/precise_bn_hook.cpython-310.pyc create mode 100644 mmpretrain/engine/hooks/__pycache__/retriever_hooks.cpython-310.pyc create mode 100644 mmpretrain/engine/hooks/__pycache__/simsiam_hook.cpython-310.pyc create mode 100644 mmpretrain/engine/hooks/__pycache__/swav_hook.cpython-310.pyc create mode 100644 mmpretrain/engine/hooks/__pycache__/switch_recipe_hook.cpython-310.pyc create mode 100644 mmpretrain/engine/hooks/__pycache__/visualization_hook.cpython-310.pyc create mode 100644 mmpretrain/engine/hooks/__pycache__/warmup_param_hook.cpython-310.pyc create mode 100644 mmpretrain/engine/hooks/class_num_check_hook.py create mode 100644 mmpretrain/engine/hooks/densecl_hook.py create mode 100644 mmpretrain/engine/hooks/ema_hook.py create mode 100644 mmpretrain/engine/hooks/margin_head_hooks.py create mode 100644 mmpretrain/engine/hooks/precise_bn_hook.py create mode 100644 mmpretrain/engine/hooks/retriever_hooks.py create mode 100644 mmpretrain/engine/hooks/simsiam_hook.py create mode 100644 mmpretrain/engine/hooks/swav_hook.py create mode 100644 mmpretrain/engine/hooks/switch_recipe_hook.py create mode 100644 mmpretrain/engine/hooks/visualization_hook.py create mode 100644 mmpretrain/engine/hooks/warmup_param_hook.py create mode 100644 mmpretrain/engine/optimizers/__init__.py create mode 100644 mmpretrain/engine/optimizers/__pycache__/__init__.cpython-310.pyc create mode 100644 mmpretrain/engine/optimizers/__pycache__/adan_t.cpython-310.pyc create mode 100644 mmpretrain/engine/optimizers/__pycache__/lamb.cpython-310.pyc create mode 100644 mmpretrain/engine/optimizers/__pycache__/lars.cpython-310.pyc create mode 100644 mmpretrain/engine/optimizers/__pycache__/layer_decay_optim_wrapper_constructor.cpython-310.pyc create mode 100644 mmpretrain/engine/optimizers/adan_t.py create mode 100644 mmpretrain/engine/optimizers/lamb.py create mode 100644 mmpretrain/engine/optimizers/lars.py create mode 100644 mmpretrain/engine/optimizers/layer_decay_optim_wrapper_constructor.py create mode 100644 mmpretrain/engine/runners/__init__.py create mode 100644 mmpretrain/engine/runners/__pycache__/__init__.cpython-310.pyc create mode 100644 mmpretrain/engine/runners/__pycache__/retrieval_loop.cpython-310.pyc create mode 100644 mmpretrain/engine/runners/retrieval_loop.py create mode 100644 mmpretrain/engine/schedulers/__init__.py create mode 100644 mmpretrain/engine/schedulers/__pycache__/__init__.cpython-310.pyc create mode 100644 mmpretrain/engine/schedulers/__pycache__/weight_decay_scheduler.cpython-310.pyc create mode 100644 mmpretrain/engine/schedulers/weight_decay_scheduler.py create mode 100644 mmpretrain/evaluation/__init__.py create mode 100644 mmpretrain/evaluation/__pycache__/__init__.cpython-310.pyc create mode 100644 mmpretrain/evaluation/functional/__init__.py create mode 100644 mmpretrain/evaluation/functional/__pycache__/__init__.cpython-310.pyc create mode 100644 mmpretrain/evaluation/metrics/ANLS.py create mode 100644 mmpretrain/evaluation/metrics/__init__.py create mode 100644 mmpretrain/evaluation/metrics/__pycache__/ANLS.cpython-310.pyc create mode 100644 mmpretrain/evaluation/metrics/__pycache__/__init__.cpython-310.pyc create mode 100644 mmpretrain/evaluation/metrics/__pycache__/caption.cpython-310.pyc create mode 100644 mmpretrain/evaluation/metrics/__pycache__/gqa.cpython-310.pyc create mode 100644 mmpretrain/evaluation/metrics/__pycache__/multi_label.cpython-310.pyc create mode 100644 mmpretrain/evaluation/metrics/__pycache__/multi_task.cpython-310.pyc create mode 100644 mmpretrain/evaluation/metrics/__pycache__/nocaps.cpython-310.pyc create mode 100644 mmpretrain/evaluation/metrics/__pycache__/retrieval.cpython-310.pyc create mode 100644 mmpretrain/evaluation/metrics/__pycache__/scienceqa.cpython-310.pyc create mode 100644 mmpretrain/evaluation/metrics/__pycache__/shape_bias_label.cpython-310.pyc create mode 100644 mmpretrain/evaluation/metrics/__pycache__/single_label.cpython-310.pyc create mode 100644 mmpretrain/evaluation/metrics/__pycache__/visual_grounding_eval.cpython-310.pyc create mode 100644 mmpretrain/evaluation/metrics/__pycache__/voc_multi_label.cpython-310.pyc create mode 100644 mmpretrain/evaluation/metrics/__pycache__/vqa.cpython-310.pyc create mode 100644 mmpretrain/evaluation/metrics/caption.py create mode 100644 mmpretrain/evaluation/metrics/gqa.py create mode 100644 mmpretrain/evaluation/metrics/multi_label.py create mode 100644 mmpretrain/evaluation/metrics/multi_task.py create mode 100644 mmpretrain/evaluation/metrics/nocaps.py create mode 100644 mmpretrain/evaluation/metrics/retrieval.py create mode 100644 mmpretrain/evaluation/metrics/scienceqa.py create mode 100644 mmpretrain/evaluation/metrics/shape_bias_label.py create mode 100644 mmpretrain/evaluation/metrics/single_label.py create mode 100644 mmpretrain/evaluation/metrics/visual_grounding_eval.py create mode 100644 mmpretrain/evaluation/metrics/voc_multi_label.py create mode 100644 mmpretrain/evaluation/metrics/vqa.py create mode 100644 mmpretrain/models/__init__.py create mode 100644 mmpretrain/models/__pycache__/__init__.cpython-310.pyc create mode 100644 mmpretrain/models/__pycache__/builder.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__init__.py create mode 100644 mmpretrain/models/backbones/__pycache__/__init__.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/alexnet.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/base_backbone.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/beit.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/conformer.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/convmixer.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/convnext.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/cspnet.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/davit.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/deit.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/deit3.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/densenet.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/edgenext.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/efficientformer.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/efficientnet.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/efficientnet_v2.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/hivit.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/hornet.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/hrnet.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/inception_v3.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/lenet.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/levit.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/mixmim.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/mlp_mixer.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/mobilenet_v2.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/mobilenet_v3.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/mobileone.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/mobilevit.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/mvit.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/poolformer.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/regnet.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/replknet.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/repmlp.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/repvgg.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/res2net.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/resnest.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/resnet.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/resnet_cifar.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/resnext.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/revvit.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/riformer.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/seresnet.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/seresnext.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/shufflenet_v1.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/shufflenet_v2.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/sparse_convnext.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/sparse_resnet.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/swin_transformer.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/swin_transformer_v2.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/t2t_vit.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/timm_backbone.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/tinyvit.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/tnt.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/twins.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/van.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/vgg.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/vig.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/vision_transformer.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/vit_eva02.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/vit_sam.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/__pycache__/xcit.cpython-310.pyc create mode 100644 mmpretrain/models/backbones/alexnet.py create mode 100644 mmpretrain/models/backbones/base_backbone.py create mode 100644 mmpretrain/models/backbones/beit.py create mode 100644 mmpretrain/models/backbones/conformer.py create mode 100644 mmpretrain/models/backbones/convmixer.py create mode 100644 mmpretrain/models/backbones/convnext.py create mode 100644 mmpretrain/models/backbones/cspnet.py create mode 100644 mmpretrain/models/backbones/davit.py create mode 100644 mmpretrain/models/backbones/deit.py create mode 100644 mmpretrain/models/backbones/deit3.py create mode 100644 mmpretrain/models/backbones/densenet.py create mode 100644 mmpretrain/models/backbones/edgenext.py create mode 100644 mmpretrain/models/backbones/efficientformer.py create mode 100644 mmpretrain/models/backbones/efficientnet.py create mode 100644 mmpretrain/models/backbones/efficientnet_v2.py create mode 100644 mmpretrain/models/backbones/hivit.py create mode 100644 mmpretrain/models/backbones/hornet.py create mode 100644 mmpretrain/models/backbones/hrnet.py create mode 100644 mmpretrain/models/backbones/inception_v3.py create mode 100644 mmpretrain/models/backbones/lenet.py create mode 100644 mmpretrain/models/backbones/levit.py create mode 100644 mmpretrain/models/backbones/mixmim.py create mode 100644 mmpretrain/models/backbones/mlp_mixer.py create mode 100644 mmpretrain/models/backbones/mobilenet_v2.py create mode 100644 mmpretrain/models/backbones/mobilenet_v3.py create mode 100644 mmpretrain/models/backbones/mobileone.py create mode 100644 mmpretrain/models/backbones/mobilevit.py create mode 100644 mmpretrain/models/backbones/mvit.py create mode 100644 mmpretrain/models/backbones/poolformer.py create mode 100644 mmpretrain/models/backbones/regnet.py create mode 100644 mmpretrain/models/backbones/replknet.py create mode 100644 mmpretrain/models/backbones/repmlp.py create mode 100644 mmpretrain/models/backbones/repvgg.py create mode 100644 mmpretrain/models/backbones/res2net.py create mode 100644 mmpretrain/models/backbones/resnest.py create mode 100644 mmpretrain/models/backbones/resnet.py create mode 100644 mmpretrain/models/backbones/resnet_cifar.py create mode 100644 mmpretrain/models/backbones/resnext.py create mode 100644 mmpretrain/models/backbones/revvit.py create mode 100644 mmpretrain/models/backbones/riformer.py create mode 100644 mmpretrain/models/backbones/seresnet.py create mode 100644 mmpretrain/models/backbones/seresnext.py create mode 100644 mmpretrain/models/backbones/shufflenet_v1.py create mode 100644 mmpretrain/models/backbones/shufflenet_v2.py create mode 100644 mmpretrain/models/backbones/sparse_convnext.py create mode 100644 mmpretrain/models/backbones/sparse_resnet.py create mode 100644 mmpretrain/models/backbones/swin_transformer.py create mode 100644 mmpretrain/models/backbones/swin_transformer_v2.py create mode 100644 mmpretrain/models/backbones/t2t_vit.py create mode 100644 mmpretrain/models/backbones/timm_backbone.py create mode 100644 mmpretrain/models/backbones/tinyvit.py create mode 100644 mmpretrain/models/backbones/tnt.py create mode 100644 mmpretrain/models/backbones/twins.py create mode 100644 mmpretrain/models/backbones/van.py create mode 100644 mmpretrain/models/backbones/vgg.py create mode 100644 mmpretrain/models/backbones/vig.py create mode 100644 mmpretrain/models/backbones/vision_transformer.py create mode 100644 mmpretrain/models/backbones/vit_eva02.py create mode 100644 mmpretrain/models/backbones/vit_sam.py create mode 100644 mmpretrain/models/backbones/xcit.py create mode 100644 mmpretrain/models/builder.py create mode 100644 mmpretrain/models/classifiers/__init__.py create mode 100644 mmpretrain/models/classifiers/__pycache__/__init__.cpython-310.pyc create mode 100644 mmpretrain/models/classifiers/__pycache__/base.cpython-310.pyc create mode 100644 mmpretrain/models/classifiers/__pycache__/hugging_face.cpython-310.pyc create mode 100644 mmpretrain/models/classifiers/__pycache__/image.cpython-310.pyc create mode 100644 mmpretrain/models/classifiers/__pycache__/timm.cpython-310.pyc create mode 100644 mmpretrain/models/classifiers/base.py create mode 100644 mmpretrain/models/classifiers/hugging_face.py create mode 100644 mmpretrain/models/classifiers/image.py create mode 100644 mmpretrain/models/classifiers/timm.py create mode 100644 mmpretrain/models/heads/__init__.py create mode 100644 mmpretrain/models/heads/__pycache__/__init__.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/beitv1_head.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/beitv2_head.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/cae_head.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/cls_head.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/conformer_head.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/contrastive_head.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/deit_head.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/efficientformer_head.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/grounding_head.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/itc_head.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/itm_head.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/itpn_clip_head.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/latent_heads.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/levit_head.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/linear_head.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/mae_head.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/margin_head.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/mim_head.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/mixmim_head.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/mocov3_head.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/multi_label_cls_head.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/multi_label_csra_head.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/multi_label_linear_head.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/multi_task_head.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/seq_gen_head.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/simmim_head.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/spark_head.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/stacked_head.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/swav_head.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/vig_head.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/vision_transformer_head.cpython-310.pyc create mode 100644 mmpretrain/models/heads/__pycache__/vqa_head.cpython-310.pyc create mode 100644 mmpretrain/models/heads/beitv1_head.py create mode 100644 mmpretrain/models/heads/beitv2_head.py create mode 100644 mmpretrain/models/heads/cae_head.py create mode 100644 mmpretrain/models/heads/cls_head.py create mode 100644 mmpretrain/models/heads/conformer_head.py create mode 100644 mmpretrain/models/heads/contrastive_head.py create mode 100644 mmpretrain/models/heads/deit_head.py create mode 100644 mmpretrain/models/heads/efficientformer_head.py create mode 100644 mmpretrain/models/heads/grounding_head.py create mode 100644 mmpretrain/models/heads/itc_head.py create mode 100644 mmpretrain/models/heads/itm_head.py create mode 100644 mmpretrain/models/heads/itpn_clip_head.py create mode 100644 mmpretrain/models/heads/latent_heads.py create mode 100644 mmpretrain/models/heads/levit_head.py create mode 100644 mmpretrain/models/heads/linear_head.py create mode 100644 mmpretrain/models/heads/mae_head.py create mode 100644 mmpretrain/models/heads/margin_head.py create mode 100644 mmpretrain/models/heads/mim_head.py create mode 100644 mmpretrain/models/heads/mixmim_head.py create mode 100644 mmpretrain/models/heads/mocov3_head.py create mode 100644 mmpretrain/models/heads/multi_label_cls_head.py create mode 100644 mmpretrain/models/heads/multi_label_csra_head.py create mode 100644 mmpretrain/models/heads/multi_label_linear_head.py create mode 100644 mmpretrain/models/heads/multi_task_head.py create mode 100644 mmpretrain/models/heads/seq_gen_head.py create mode 100644 mmpretrain/models/heads/simmim_head.py create mode 100644 mmpretrain/models/heads/spark_head.py create mode 100644 mmpretrain/models/heads/stacked_head.py create mode 100644 mmpretrain/models/heads/swav_head.py create mode 100644 mmpretrain/models/heads/vig_head.py create mode 100644 mmpretrain/models/heads/vision_transformer_head.py create mode 100644 mmpretrain/models/heads/vqa_head.py create mode 100644 mmpretrain/models/losses/__init__.py create mode 100644 mmpretrain/models/losses/__pycache__/__init__.cpython-310.pyc create mode 100644 mmpretrain/models/losses/__pycache__/asymmetric_loss.cpython-310.pyc create mode 100644 mmpretrain/models/losses/__pycache__/cae_loss.cpython-310.pyc create mode 100644 mmpretrain/models/losses/__pycache__/cosine_similarity_loss.cpython-310.pyc create mode 100644 mmpretrain/models/losses/__pycache__/cross_correlation_loss.cpython-310.pyc create mode 100644 mmpretrain/models/losses/__pycache__/cross_entropy_loss.cpython-310.pyc create mode 100644 mmpretrain/models/losses/__pycache__/focal_loss.cpython-310.pyc create mode 100644 mmpretrain/models/losses/__pycache__/label_smooth_loss.cpython-310.pyc create mode 100644 mmpretrain/models/losses/__pycache__/reconstruction_loss.cpython-310.pyc create mode 100644 mmpretrain/models/losses/__pycache__/seesaw_loss.cpython-310.pyc create mode 100644 mmpretrain/models/losses/__pycache__/swav_loss.cpython-310.pyc create mode 100644 mmpretrain/models/losses/__pycache__/utils.cpython-310.pyc create mode 100644 mmpretrain/models/losses/asymmetric_loss.py create mode 100644 mmpretrain/models/losses/cae_loss.py create mode 100644 mmpretrain/models/losses/cosine_similarity_loss.py create mode 100644 mmpretrain/models/losses/cross_correlation_loss.py create mode 100644 mmpretrain/models/losses/cross_entropy_loss.py create mode 100644 mmpretrain/models/losses/focal_loss.py create mode 100644 mmpretrain/models/losses/label_smooth_loss.py create mode 100644 mmpretrain/models/losses/reconstruction_loss.py create mode 100644 mmpretrain/models/losses/seesaw_loss.py create mode 100644 mmpretrain/models/losses/swav_loss.py create mode 100644 mmpretrain/models/losses/utils.py create mode 100644 mmpretrain/models/multimodal/__init__.py create mode 100644 mmpretrain/models/multimodal/__pycache__/__init__.cpython-310.pyc create mode 100644 mmpretrain/models/multimodal/blip/__init__.py create mode 100644 mmpretrain/models/multimodal/blip/blip_caption.py create mode 100644 mmpretrain/models/multimodal/blip/blip_grounding.py create mode 100644 mmpretrain/models/multimodal/blip/blip_nlvr.py create mode 100644 mmpretrain/models/multimodal/blip/blip_retrieval.py create mode 100644 mmpretrain/models/multimodal/blip/blip_vqa.py create mode 100644 mmpretrain/models/multimodal/blip/language_model.py create mode 100644 mmpretrain/models/multimodal/blip2/Qformer.py create mode 100644 mmpretrain/models/multimodal/blip2/__init__.py create mode 100644 mmpretrain/models/multimodal/blip2/blip2_caption.py create mode 100644 mmpretrain/models/multimodal/blip2/blip2_opt_vqa.py create mode 100644 mmpretrain/models/multimodal/blip2/blip2_retriever.py create mode 100644 mmpretrain/models/multimodal/blip2/modeling_opt.py create mode 100644 mmpretrain/models/multimodal/chinese_clip/__init__.py create mode 100644 mmpretrain/models/multimodal/chinese_clip/bert.py create mode 100644 mmpretrain/models/multimodal/chinese_clip/chinese_clip.py create mode 100644 mmpretrain/models/multimodal/chinese_clip/utils.py create mode 100644 mmpretrain/models/multimodal/clip/__init__.py create mode 100644 mmpretrain/models/multimodal/clip/clip.py create mode 100644 mmpretrain/models/multimodal/clip/clip_transformer.py create mode 100644 mmpretrain/models/multimodal/clip/utils.py create mode 100644 mmpretrain/models/multimodal/flamingo/__init__.py create mode 100644 mmpretrain/models/multimodal/flamingo/adapter.py create mode 100644 mmpretrain/models/multimodal/flamingo/flamingo.py create mode 100644 mmpretrain/models/multimodal/flamingo/modules.py create mode 100644 mmpretrain/models/multimodal/flamingo/utils.py create mode 100644 mmpretrain/models/multimodal/llava/__init__.py create mode 100644 mmpretrain/models/multimodal/llava/llava.py create mode 100644 mmpretrain/models/multimodal/llava/modules.py create mode 100644 mmpretrain/models/multimodal/minigpt4/__init__.py create mode 100644 mmpretrain/models/multimodal/minigpt4/minigpt4.py create mode 100644 mmpretrain/models/multimodal/ofa/__init__.py create mode 100644 mmpretrain/models/multimodal/ofa/ofa.py create mode 100644 mmpretrain/models/multimodal/ofa/ofa_modules.py create mode 100644 mmpretrain/models/multimodal/otter/__init__.py create mode 100644 mmpretrain/models/multimodal/otter/otter.py create mode 100644 mmpretrain/models/multimodal/ram/__init__.py create mode 100644 mmpretrain/models/multimodal/ram/bert.py create mode 100644 mmpretrain/models/multimodal/ram/config/__init__.py create mode 100644 mmpretrain/models/multimodal/ram/config/ram_swin_large_14m.py create mode 100644 mmpretrain/models/multimodal/ram/data/ram_tag_list.pickle create mode 100644 mmpretrain/models/multimodal/ram/data/ram_tag_list_chinese.pickle create mode 100644 mmpretrain/models/multimodal/ram/data/ram_tag_list_threshold.pickle create mode 100644 mmpretrain/models/multimodal/ram/gradio_demo.py create mode 100644 mmpretrain/models/multimodal/ram/openset_utils.py create mode 100644 mmpretrain/models/multimodal/ram/ram.py create mode 100644 mmpretrain/models/multimodal/ram/run/__init__.py create mode 100644 mmpretrain/models/multimodal/ram/run/inference.py create mode 100644 mmpretrain/models/multimodal/ram/utils.py create mode 100644 mmpretrain/models/necks/__init__.py create mode 100644 mmpretrain/models/necks/__pycache__/__init__.cpython-310.pyc create mode 100644 mmpretrain/models/necks/__pycache__/beitv2_neck.cpython-310.pyc create mode 100644 mmpretrain/models/necks/__pycache__/cae_neck.cpython-310.pyc create mode 100644 mmpretrain/models/necks/__pycache__/densecl_neck.cpython-310.pyc create mode 100644 mmpretrain/models/necks/__pycache__/gap.cpython-310.pyc create mode 100644 mmpretrain/models/necks/__pycache__/gem.cpython-310.pyc create mode 100644 mmpretrain/models/necks/__pycache__/hr_fuse.cpython-310.pyc create mode 100644 mmpretrain/models/necks/__pycache__/itpn_neck.cpython-310.pyc create mode 100644 mmpretrain/models/necks/__pycache__/linear_neck.cpython-310.pyc create mode 100644 mmpretrain/models/necks/__pycache__/mae_neck.cpython-310.pyc create mode 100644 mmpretrain/models/necks/__pycache__/milan_neck.cpython-310.pyc create mode 100644 mmpretrain/models/necks/__pycache__/mixmim_neck.cpython-310.pyc create mode 100644 mmpretrain/models/necks/__pycache__/mocov2_neck.cpython-310.pyc create mode 100644 mmpretrain/models/necks/__pycache__/nonlinear_neck.cpython-310.pyc create mode 100644 mmpretrain/models/necks/__pycache__/simmim_neck.cpython-310.pyc create mode 100644 mmpretrain/models/necks/__pycache__/spark_neck.cpython-310.pyc create mode 100644 mmpretrain/models/necks/__pycache__/swav_neck.cpython-310.pyc create mode 100644 mmpretrain/models/necks/beitv2_neck.py create mode 100644 mmpretrain/models/necks/cae_neck.py create mode 100644 mmpretrain/models/necks/densecl_neck.py create mode 100644 mmpretrain/models/necks/gap.py create mode 100644 mmpretrain/models/necks/gem.py create mode 100644 mmpretrain/models/necks/hr_fuse.py create mode 100644 mmpretrain/models/necks/itpn_neck.py create mode 100644 mmpretrain/models/necks/linear_neck.py create mode 100644 mmpretrain/models/necks/mae_neck.py create mode 100644 mmpretrain/models/necks/milan_neck.py create mode 100644 mmpretrain/models/necks/mixmim_neck.py create mode 100644 mmpretrain/models/necks/mocov2_neck.py create mode 100644 mmpretrain/models/necks/nonlinear_neck.py create mode 100644 mmpretrain/models/necks/simmim_neck.py create mode 100644 mmpretrain/models/necks/spark_neck.py create mode 100644 mmpretrain/models/necks/swav_neck.py create mode 100644 mmpretrain/models/peft/__init__.py create mode 100644 mmpretrain/models/peft/__pycache__/__init__.cpython-310.pyc create mode 100644 mmpretrain/models/peft/__pycache__/lora.cpython-310.pyc create mode 100644 mmpretrain/models/peft/lora.py create mode 100644 mmpretrain/models/retrievers/__init__.py create mode 100644 mmpretrain/models/retrievers/__pycache__/__init__.cpython-310.pyc create mode 100644 mmpretrain/models/retrievers/__pycache__/base.cpython-310.pyc create mode 100644 mmpretrain/models/retrievers/__pycache__/image2image.cpython-310.pyc create mode 100644 mmpretrain/models/retrievers/base.py create mode 100644 mmpretrain/models/retrievers/image2image.py create mode 100644 mmpretrain/models/selfsup/__init__.py create mode 100644 mmpretrain/models/selfsup/__pycache__/__init__.cpython-310.pyc create mode 100644 mmpretrain/models/selfsup/__pycache__/barlowtwins.cpython-310.pyc create mode 100644 mmpretrain/models/selfsup/__pycache__/base.cpython-310.pyc create mode 100644 mmpretrain/models/selfsup/__pycache__/beit.cpython-310.pyc create mode 100644 mmpretrain/models/selfsup/__pycache__/byol.cpython-310.pyc create mode 100644 mmpretrain/models/selfsup/__pycache__/cae.cpython-310.pyc create mode 100644 mmpretrain/models/selfsup/__pycache__/densecl.cpython-310.pyc create mode 100644 mmpretrain/models/selfsup/__pycache__/eva.cpython-310.pyc create mode 100644 mmpretrain/models/selfsup/__pycache__/itpn.cpython-310.pyc create mode 100644 mmpretrain/models/selfsup/__pycache__/mae.cpython-310.pyc create mode 100644 mmpretrain/models/selfsup/__pycache__/maskfeat.cpython-310.pyc create mode 100644 mmpretrain/models/selfsup/__pycache__/mff.cpython-310.pyc create mode 100644 mmpretrain/models/selfsup/__pycache__/milan.cpython-310.pyc create mode 100644 mmpretrain/models/selfsup/__pycache__/mixmim.cpython-310.pyc create mode 100644 mmpretrain/models/selfsup/__pycache__/moco.cpython-310.pyc create mode 100644 mmpretrain/models/selfsup/__pycache__/mocov3.cpython-310.pyc create mode 100644 mmpretrain/models/selfsup/__pycache__/simclr.cpython-310.pyc create mode 100644 mmpretrain/models/selfsup/__pycache__/simmim.cpython-310.pyc create mode 100644 mmpretrain/models/selfsup/__pycache__/simsiam.cpython-310.pyc create mode 100644 mmpretrain/models/selfsup/__pycache__/spark.cpython-310.pyc create mode 100644 mmpretrain/models/selfsup/__pycache__/swav.cpython-310.pyc create mode 100644 mmpretrain/models/selfsup/barlowtwins.py create mode 100644 mmpretrain/models/selfsup/base.py create mode 100644 mmpretrain/models/selfsup/beit.py create mode 100644 mmpretrain/models/selfsup/byol.py create mode 100644 mmpretrain/models/selfsup/cae.py create mode 100644 mmpretrain/models/selfsup/densecl.py create mode 100644 mmpretrain/models/selfsup/eva.py create mode 100644 mmpretrain/models/selfsup/itpn.py create mode 100644 mmpretrain/models/selfsup/mae.py create mode 100644 mmpretrain/models/selfsup/maskfeat.py create mode 100644 mmpretrain/models/selfsup/mff.py create mode 100644 mmpretrain/models/selfsup/milan.py create mode 100644 mmpretrain/models/selfsup/mixmim.py create mode 100644 mmpretrain/models/selfsup/moco.py create mode 100644 mmpretrain/models/selfsup/mocov3.py create mode 100644 mmpretrain/models/selfsup/simclr.py create mode 100644 mmpretrain/models/selfsup/simmim.py create mode 100644 mmpretrain/models/selfsup/simsiam.py create mode 100644 mmpretrain/models/selfsup/spark.py create mode 100644 mmpretrain/models/selfsup/swav.py create mode 100644 mmpretrain/models/tta/__init__.py create mode 100644 mmpretrain/models/tta/__pycache__/__init__.cpython-310.pyc create mode 100644 mmpretrain/models/tta/__pycache__/score_tta.cpython-310.pyc create mode 100644 mmpretrain/models/tta/score_tta.py create mode 100644 mmpretrain/models/utils/__init__.py create mode 100644 mmpretrain/models/utils/__pycache__/__init__.cpython-310.pyc create mode 100644 mmpretrain/models/utils/__pycache__/attention.cpython-310.pyc create mode 100644 mmpretrain/models/utils/__pycache__/batch_shuffle.cpython-310.pyc create mode 100644 mmpretrain/models/utils/__pycache__/box_utils.cpython-310.pyc create mode 100644 mmpretrain/models/utils/__pycache__/channel_shuffle.cpython-310.pyc create mode 100644 mmpretrain/models/utils/__pycache__/clip_generator_helper.cpython-310.pyc create mode 100644 mmpretrain/models/utils/__pycache__/data_preprocessor.cpython-310.pyc create mode 100644 mmpretrain/models/utils/__pycache__/ema.cpython-310.pyc create mode 100644 mmpretrain/models/utils/__pycache__/embed.cpython-310.pyc create mode 100644 mmpretrain/models/utils/__pycache__/helpers.cpython-310.pyc create mode 100644 mmpretrain/models/utils/__pycache__/inverted_residual.cpython-310.pyc create mode 100644 mmpretrain/models/utils/__pycache__/layer_scale.cpython-310.pyc create mode 100644 mmpretrain/models/utils/__pycache__/make_divisible.cpython-310.pyc create mode 100644 mmpretrain/models/utils/__pycache__/norm.cpython-310.pyc create mode 100644 mmpretrain/models/utils/__pycache__/position_encoding.cpython-310.pyc create mode 100644 mmpretrain/models/utils/__pycache__/res_layer_extra_norm.cpython-310.pyc create mode 100644 mmpretrain/models/utils/__pycache__/se_layer.cpython-310.pyc create mode 100644 mmpretrain/models/utils/__pycache__/sparse_modules.cpython-310.pyc create mode 100644 mmpretrain/models/utils/__pycache__/swiglu_ffn.cpython-310.pyc create mode 100644 mmpretrain/models/utils/__pycache__/vector_quantizer.cpython-310.pyc create mode 100644 mmpretrain/models/utils/attention.py create mode 100644 mmpretrain/models/utils/batch_augments/__init__.py create mode 100644 mmpretrain/models/utils/batch_augments/__pycache__/__init__.cpython-310.pyc create mode 100644 mmpretrain/models/utils/batch_augments/__pycache__/cutmix.cpython-310.pyc create mode 100644 mmpretrain/models/utils/batch_augments/__pycache__/mixup.cpython-310.pyc create mode 100644 mmpretrain/models/utils/batch_augments/__pycache__/resizemix.cpython-310.pyc create mode 100644 mmpretrain/models/utils/batch_augments/__pycache__/wrapper.cpython-310.pyc create mode 100644 mmpretrain/models/utils/batch_augments/cutmix.py create mode 100644 mmpretrain/models/utils/batch_augments/mixup.py create mode 100644 mmpretrain/models/utils/batch_augments/resizemix.py create mode 100644 mmpretrain/models/utils/batch_augments/wrapper.py create mode 100644 mmpretrain/models/utils/batch_shuffle.py create mode 100644 mmpretrain/models/utils/box_utils.py create mode 100644 mmpretrain/models/utils/channel_shuffle.py create mode 100644 mmpretrain/models/utils/clip_generator_helper.py create mode 100644 mmpretrain/models/utils/data_preprocessor.py create mode 100644 mmpretrain/models/utils/ema.py create mode 100644 mmpretrain/models/utils/embed.py create mode 100644 mmpretrain/models/utils/helpers.py create mode 100644 mmpretrain/models/utils/huggingface.py create mode 100644 mmpretrain/models/utils/inverted_residual.py create mode 100644 mmpretrain/models/utils/layer_scale.py create mode 100644 mmpretrain/models/utils/make_divisible.py create mode 100644 mmpretrain/models/utils/norm.py create mode 100644 mmpretrain/models/utils/position_encoding.py create mode 100644 mmpretrain/models/utils/res_layer_extra_norm.py create mode 100644 mmpretrain/models/utils/se_layer.py create mode 100644 mmpretrain/models/utils/sparse_modules.py create mode 100644 mmpretrain/models/utils/swiglu_ffn.py create mode 100644 mmpretrain/models/utils/tokenizer.py create mode 100644 mmpretrain/models/utils/vector_quantizer.py create mode 100644 mmpretrain/registry.py create mode 100644 mmpretrain/structures/__init__.py create mode 100644 mmpretrain/structures/__pycache__/__init__.cpython-310.pyc create mode 100644 mmpretrain/structures/__pycache__/data_sample.cpython-310.pyc create mode 100644 mmpretrain/structures/__pycache__/multi_task_data_sample.cpython-310.pyc create mode 100644 mmpretrain/structures/__pycache__/utils.cpython-310.pyc create mode 100644 mmpretrain/structures/data_sample.py create mode 100644 mmpretrain/structures/multi_task_data_sample.py create mode 100644 mmpretrain/structures/utils.py create mode 100644 mmpretrain/utils/__init__.py create mode 100644 mmpretrain/utils/__pycache__/__init__.cpython-310.pyc create mode 100644 mmpretrain/utils/__pycache__/analyze.cpython-310.pyc create mode 100644 mmpretrain/utils/__pycache__/collect_env.cpython-310.pyc create mode 100644 mmpretrain/utils/__pycache__/dependency.cpython-310.pyc create mode 100644 mmpretrain/utils/__pycache__/misc.cpython-310.pyc create mode 100644 mmpretrain/utils/__pycache__/progress.cpython-310.pyc create mode 100644 mmpretrain/utils/__pycache__/setup_env.cpython-310.pyc create mode 100644 mmpretrain/utils/analyze.py create mode 100644 mmpretrain/utils/collect_env.py create mode 100644 mmpretrain/utils/dependency.py create mode 100644 mmpretrain/utils/misc.py create mode 100644 mmpretrain/utils/progress.py create mode 100644 mmpretrain/utils/setup_env.py create mode 100644 mmpretrain/version.py create mode 100644 mmpretrain/visualization/__init__.py create mode 100644 mmpretrain/visualization/__pycache__/__init__.cpython-310.pyc create mode 100644 mmpretrain/visualization/__pycache__/utils.cpython-310.pyc create mode 100644 mmpretrain/visualization/__pycache__/visualizer.cpython-310.pyc create mode 100644 mmpretrain/visualization/utils.py create mode 100644 mmpretrain/visualization/visualizer.py create mode 100644 model-index.yml create mode 100644 projects/README.md create mode 100644 projects/dino/README.md create mode 100644 projects/dino/config/dino_vit-base-p16_8xb64-amp-coslr-100e_in1k.py create mode 100644 projects/dino/dataset/__init__.py create mode 100644 projects/dino/dataset/transform/__init__.py create mode 100644 projects/dino/dataset/transform/processing.py create mode 100644 projects/dino/engine/__init__.py create mode 100644 projects/dino/engine/hooks/__init__.py create mode 100644 projects/dino/engine/hooks/dino_teacher_temp_warmup_hook.py create mode 100644 projects/dino/models/__init__.py create mode 100644 projects/dino/models/algorithm/__init__.py create mode 100644 projects/dino/models/algorithm/dino.py create mode 100644 projects/dino/models/head/__init__.py create mode 100644 projects/dino/models/head/dino_head.py create mode 100644 projects/dino/models/neck/__init__.py create mode 100644 projects/dino/models/neck/dino_neck.py create mode 100644 projects/dino/tools/dist_train.sh create mode 100644 projects/dino/tools/slurm_train.sh create mode 100644 projects/dino/tools/train.py create mode 100644 projects/example_project/README.md create mode 100644 projects/example_project/configs/examplenet_8xb32_in1k.py create mode 100644 projects/example_project/models/__init__.py create mode 100644 projects/example_project/models/example_net.py create mode 100644 projects/fgia_accv2022_1st/README.md create mode 100644 projects/fgia_accv2022_1st/config/mae_vit-large-p16_8xb512-amp-coslr-1600e_in1k.py create mode 100644 projects/gradio_demo/README.md create mode 100644 projects/gradio_demo/conversation.py create mode 100644 projects/gradio_demo/launch.py create mode 100644 projects/gradio_demo/minigpt4_demo.py create mode 100644 projects/internimage_classification/README.md create mode 100644 projects/internimage_classification/configs/_base_.py create mode 100644 projects/internimage_classification/configs/internimage-base_8xb128_in1k-224.py create mode 100644 projects/internimage_classification/configs/internimage-giant_8xb128_in1k-512.py create mode 100644 projects/internimage_classification/configs/internimage-huge_8xb128_in1k-640.py create mode 100644 projects/internimage_classification/configs/internimage-large_8xb128_in1k-384.py create mode 100644 projects/internimage_classification/configs/internimage-small_8xb128_in1k-224.py create mode 100644 projects/internimage_classification/configs/internimage-tiny_8xb128_in1k-224.py create mode 100644 projects/internimage_classification/configs/internimage-xlagre_8xb128_in1k-384.py create mode 100644 projects/internimage_classification/models/__init__.py create mode 100644 projects/internimage_classification/models/intern_image.py create mode 100644 projects/internimage_classification/ops_dcnv3/functions/__init__.py create mode 100644 projects/internimage_classification/ops_dcnv3/functions/dcnv3_func.py create mode 100644 projects/internimage_classification/ops_dcnv3/make.sh create mode 100644 projects/internimage_classification/ops_dcnv3/modules/__init__.py create mode 100644 projects/internimage_classification/ops_dcnv3/modules/dcnv3.py create mode 100644 projects/internimage_classification/ops_dcnv3/setup.py create mode 100644 projects/internimage_classification/ops_dcnv3/src/cpu/dcnv3_cpu.cpp create mode 100644 projects/internimage_classification/ops_dcnv3/src/cpu/dcnv3_cpu.h create mode 100644 projects/internimage_classification/ops_dcnv3/src/cuda/dcnv3_cuda.cu create mode 100644 projects/internimage_classification/ops_dcnv3/src/cuda/dcnv3_cuda.h create mode 100644 projects/internimage_classification/ops_dcnv3/src/cuda/dcnv3_im2col_cuda.cuh create mode 100644 projects/internimage_classification/ops_dcnv3/src/dcnv3.h create mode 100644 projects/internimage_classification/ops_dcnv3/src/vision.cpp create mode 100644 projects/internimage_classification/ops_dcnv3/test.py create mode 100644 projects/maskfeat_video/README.md create mode 100644 projects/maskfeat_video/configs/maskfeat_mvit-small_16xb32-amp-coslr-300e_k400.py create mode 100644 projects/maskfeat_video/configs/maskfeat_mvit-small_8xb32-amp-coslr-300e_k400.py create mode 100644 projects/maskfeat_video/configs/mvit-small_ft-8xb16-coslr-100e_k400.py create mode 100644 projects/maskfeat_video/models/__init__.py create mode 100644 projects/maskfeat_video/models/hog_generator_3d.py create mode 100644 projects/maskfeat_video/models/maskfeat.py create mode 100644 projects/maskfeat_video/models/maskfeat_mvit.py create mode 100644 projects/maskfeat_video/models/transforms.py create mode 100644 projects/maskfeat_video/tools/dist_train.sh create mode 100644 projects/maskfeat_video/tools/slurm_train.sh create mode 100644 projects/maskfeat_video/tools/train.py create mode 100644 requirements.txt create mode 100644 requirements/docs.txt create mode 100644 requirements/mminstall.txt create mode 100644 requirements/multimodal.txt create mode 100644 requirements/optional.txt create mode 100644 requirements/readthedocs.txt create mode 100644 requirements/runtime.txt create mode 100644 requirements/tests.txt create mode 100644 resnet50-test.py create mode 100644 resnet50_imagenet200_8b32.py create mode 100644 resources/miaomiao_qrcode.jpg create mode 100644 resources/mmpt-logo.png create mode 100644 resources/xiaozhushou_weixin_qrcode.jpeg create mode 100644 resources/zhihu_qrcode.jpg create mode 100644 setup.cfg create mode 100644 setup.py create mode 100644 swin-b-test.py create mode 100644 swin-l-test.py create mode 100644 tests/__init__.py create mode 100644 tests/data/color.jpg create mode 100644 tests/data/dataset/3.jpeg create mode 100644 tests/data/dataset/a/1.JPG create mode 100644 tests/data/dataset/ann.json create mode 100644 tests/data/dataset/ann.txt create mode 100644 tests/data/dataset/ann_without_labels.txt create mode 100644 tests/data/dataset/b/2.jpeg create mode 100644 tests/data/dataset/b/subb/3.jpg create mode 100644 tests/data/dataset/classes.txt create mode 100644 tests/data/dataset/multi-task.json create mode 100644 tests/data/dataset/multi_label_ann.json create mode 100644 tests/data/gray.jpg create mode 100644 tests/data/meta.yml create mode 100644 tests/data/retinanet.py create mode 100644 tests/data/vis_data.json create mode 100644 tests/test_apis/test_inference.py create mode 100644 tests/test_apis/test_model.py create mode 100644 tests/test_datasets/test_dataset_utils.py create mode 100644 tests/test_datasets/test_datasets.py create mode 100644 tests/test_datasets/test_samplers/test_repeat_aug.py create mode 100644 tests/test_datasets/test_transforms/test_auto_augment.py create mode 100644 tests/test_datasets/test_transforms/test_formatting.py create mode 100644 tests/test_datasets/test_transforms/test_processing.py create mode 100644 tests/test_datasets/test_transforms/test_wrappers.py create mode 100644 tests/test_engine/test_hooks/test_arcface_hooks.py create mode 100644 tests/test_engine/test_hooks/test_class_num_check_hook.py create mode 100644 tests/test_engine/test_hooks/test_densecl_hook.py create mode 100644 tests/test_engine/test_hooks/test_ema_hook.py create mode 100644 tests/test_engine/test_hooks/test_precise_bn_hook.py create mode 100644 tests/test_engine/test_hooks/test_retrievers_hooks.py create mode 100644 tests/test_engine/test_hooks/test_simsiam_hook.py create mode 100644 tests/test_engine/test_hooks/test_swav_hook.py create mode 100644 tests/test_engine/test_hooks/test_switch_recipe_hook.py create mode 100644 tests/test_engine/test_hooks/test_visualization_hook.py create mode 100644 tests/test_engine/test_optimizers/test_layer_decay_optim_wrapper_constructor.py create mode 100644 tests/test_evaluation/test_metrics/test_gqa.py create mode 100644 tests/test_evaluation/test_metrics/test_metric_utils.py create mode 100644 tests/test_evaluation/test_metrics/test_multi_label.py create mode 100644 tests/test_evaluation/test_metrics/test_multi_task_metrics.py create mode 100644 tests/test_evaluation/test_metrics/test_retrieval.py create mode 100644 tests/test_evaluation/test_metrics/test_scienceqa.py create mode 100644 tests/test_evaluation/test_metrics/test_shape_bias_metric.py create mode 100644 tests/test_evaluation/test_metrics/test_single_label.py create mode 100644 tests/test_evaluation/test_metrics/test_voc_metrics.py create mode 100644 tests/test_models/test_backbones/__init__.py create mode 100644 tests/test_models/test_backbones/test_beit.py create mode 100644 tests/test_models/test_backbones/test_conformer.py create mode 100644 tests/test_models/test_backbones/test_convmixer.py create mode 100644 tests/test_models/test_backbones/test_convnext.py create mode 100644 tests/test_models/test_backbones/test_cspnet.py create mode 100644 tests/test_models/test_backbones/test_davit.py create mode 100644 tests/test_models/test_backbones/test_deit.py create mode 100644 tests/test_models/test_backbones/test_deit3.py create mode 100644 tests/test_models/test_backbones/test_densenet.py create mode 100644 tests/test_models/test_backbones/test_edgenext.py create mode 100644 tests/test_models/test_backbones/test_efficientformer.py create mode 100644 tests/test_models/test_backbones/test_efficientnet.py create mode 100644 tests/test_models/test_backbones/test_efficientnet_v2.py create mode 100644 tests/test_models/test_backbones/test_eva02.py create mode 100644 tests/test_models/test_backbones/test_hornet.py create mode 100644 tests/test_models/test_backbones/test_hrnet.py create mode 100644 tests/test_models/test_backbones/test_inception_v3.py create mode 100644 tests/test_models/test_backbones/test_levit.py create mode 100644 tests/test_models/test_backbones/test_mixmim.py create mode 100644 tests/test_models/test_backbones/test_mlp_mixer.py create mode 100644 tests/test_models/test_backbones/test_mobilenet_v2.py create mode 100644 tests/test_models/test_backbones/test_mobilenet_v3.py create mode 100644 tests/test_models/test_backbones/test_mobileone.py create mode 100644 tests/test_models/test_backbones/test_mobilevit.py create mode 100644 tests/test_models/test_backbones/test_mvit.py create mode 100644 tests/test_models/test_backbones/test_poolformer.py create mode 100644 tests/test_models/test_backbones/test_regnet.py create mode 100644 tests/test_models/test_backbones/test_replknet.py create mode 100644 tests/test_models/test_backbones/test_repmlp.py create mode 100644 tests/test_models/test_backbones/test_repvgg.py create mode 100644 tests/test_models/test_backbones/test_res2net.py create mode 100644 tests/test_models/test_backbones/test_resnest.py create mode 100644 tests/test_models/test_backbones/test_resnet.py create mode 100644 tests/test_models/test_backbones/test_resnet_cifar.py create mode 100644 tests/test_models/test_backbones/test_resnext.py create mode 100644 tests/test_models/test_backbones/test_revvit.py create mode 100644 tests/test_models/test_backbones/test_riformer.py create mode 100644 tests/test_models/test_backbones/test_seresnet.py create mode 100644 tests/test_models/test_backbones/test_seresnext.py create mode 100644 tests/test_models/test_backbones/test_shufflenet_v1.py create mode 100644 tests/test_models/test_backbones/test_shufflenet_v2.py create mode 100644 tests/test_models/test_backbones/test_swin_transformer.py create mode 100644 tests/test_models/test_backbones/test_swin_transformer_v2.py create mode 100644 tests/test_models/test_backbones/test_t2t_vit.py create mode 100644 tests/test_models/test_backbones/test_timm_backbone.py create mode 100644 tests/test_models/test_backbones/test_tinyvit.py create mode 100644 tests/test_models/test_backbones/test_tnt.py create mode 100644 tests/test_models/test_backbones/test_twins.py create mode 100644 tests/test_models/test_backbones/test_van.py create mode 100644 tests/test_models/test_backbones/test_vgg.py create mode 100644 tests/test_models/test_backbones/test_vision_transformer.py create mode 100644 tests/test_models/test_backbones/test_xcit.py create mode 100644 tests/test_models/test_backbones/utils.py create mode 100644 tests/test_models/test_classifiers.py create mode 100644 tests/test_models/test_heads.py create mode 100644 tests/test_models/test_losses.py create mode 100644 tests/test_models/test_models.py create mode 100644 tests/test_models/test_necks.py create mode 100644 tests/test_models/test_peft/test_lora.py create mode 100644 tests/test_models/test_retrievers.py create mode 100644 tests/test_models/test_selfsup/test_barlowtwins.py create mode 100644 tests/test_models/test_selfsup/test_beit.py create mode 100644 tests/test_models/test_selfsup/test_byol.py create mode 100644 tests/test_models/test_selfsup/test_cae.py create mode 100644 tests/test_models/test_selfsup/test_densecl.py create mode 100644 tests/test_models/test_selfsup/test_eva.py create mode 100644 tests/test_models/test_selfsup/test_itpn.py create mode 100644 tests/test_models/test_selfsup/test_mae.py create mode 100644 tests/test_models/test_selfsup/test_maskfeat.py create mode 100644 tests/test_models/test_selfsup/test_mff.py create mode 100644 tests/test_models/test_selfsup/test_milan.py create mode 100644 tests/test_models/test_selfsup/test_mixmim.py create mode 100644 tests/test_models/test_selfsup/test_moco.py create mode 100644 tests/test_models/test_selfsup/test_mocov3.py create mode 100644 tests/test_models/test_selfsup/test_simclr.py create mode 100644 tests/test_models/test_selfsup/test_simmim.py create mode 100644 tests/test_models/test_selfsup/test_simsiam.py create mode 100644 tests/test_models/test_selfsup/test_spark.py create mode 100644 tests/test_models/test_selfsup/test_swav.py create mode 100644 tests/test_models/test_selfsup/test_target_generators.py create mode 100644 tests/test_models/test_tta.py create mode 100644 tests/test_models/test_utils/test_attention.py create mode 100644 tests/test_models/test_utils/test_batch_augments.py create mode 100644 tests/test_models/test_utils/test_data_preprocessor.py create mode 100644 tests/test_models/test_utils/test_ema.py create mode 100644 tests/test_models/test_utils/test_embed.py create mode 100644 tests/test_models/test_utils/test_inverted_residual.py create mode 100644 tests/test_models/test_utils/test_layer_scale.py create mode 100644 tests/test_models/test_utils/test_misc.py create mode 100644 tests/test_models/test_utils/test_norm.py create mode 100644 tests/test_models/test_utils/test_position_encoding.py create mode 100644 tests/test_models/test_utils/test_se.py create mode 100644 tests/test_models/test_utils/test_swiglu_ffn.py create mode 100644 tests/test_structures/test_datasample.py create mode 100644 tests/test_structures/test_utils.py create mode 100644 tests/test_tools.py create mode 100644 tests/test_utils/test_analyze.py create mode 100644 tests/test_utils/test_setup_env.py create mode 100644 tests/test_utils/test_version_utils.py create mode 100644 tests/test_visualization/test_visualizer.py create mode 100644 tools/analysis_tools/analyze_logs.py create mode 100644 tools/analysis_tools/analyze_results.py create mode 100644 tools/analysis_tools/confusion_matrix.py create mode 100644 tools/analysis_tools/eval_metric.py create mode 100644 tools/analysis_tools/get_flops.py create mode 100644 tools/analysis_tools/shape_bias.py create mode 100644 tools/analysis_tools/utils.py create mode 100644 tools/benchmarks/mmdetection/mim_dist_test.sh create mode 100644 tools/benchmarks/mmdetection/mim_dist_train_c4.sh create mode 100644 tools/benchmarks/mmdetection/mim_dist_train_fpn.sh create mode 100644 tools/benchmarks/mmdetection/mim_slurm_test.sh create mode 100644 tools/benchmarks/mmdetection/mim_slurm_train_c4.sh create mode 100644 tools/benchmarks/mmdetection/mim_slurm_train_fpn.sh create mode 100644 tools/benchmarks/mmsegmentation/mim_dist_test.sh create mode 100644 tools/benchmarks/mmsegmentation/mim_dist_train.sh create mode 100644 tools/benchmarks/mmsegmentation/mim_slurm_test.sh create mode 100644 tools/benchmarks/mmsegmentation/mim_slurm_train.sh create mode 100644 tools/dataset_converters/convert_flickr30k_ann.py create mode 100644 tools/dataset_converters/convert_imagenet_subsets.py create mode 100644 tools/dataset_converters/convert_inaturalist.py create mode 100644 tools/dataset_converters/odl_cub_preprocess.sh create mode 100644 tools/dataset_converters/odl_imagenet1k_preprocess.sh create mode 100644 tools/dist_test.sh create mode 100644 tools/dist_train.sh create mode 100644 tools/kfold-cross-valid.py create mode 100644 tools/misc/print_config.py create mode 100644 tools/misc/verify_dataset.py create mode 100644 tools/model_converters/clip_to_mmpretrain.py create mode 100644 tools/model_converters/convnext_to_mmpretrain.py create mode 100644 tools/model_converters/davit_to_mmpretrain.py create mode 100644 tools/model_converters/deit3_to_mmpretrain.py create mode 100644 tools/model_converters/edgenext_to_mmpretrain.py create mode 100644 tools/model_converters/efficientnet_to_mmpretrain.py create mode 100644 tools/model_converters/efficientnetv2_to_mmpretrain.py create mode 100644 tools/model_converters/eva02_to_mmpretrain.py create mode 100644 tools/model_converters/eva_to_mmpretrain.py create mode 100644 tools/model_converters/glip_to_mmpretrain.py create mode 100644 tools/model_converters/hornet2mmpretrain.py create mode 100644 tools/model_converters/levit2mmpretrain.py create mode 100644 tools/model_converters/llava-delta2mmpre.py create mode 100644 tools/model_converters/merge_lora_weight.py create mode 100644 tools/model_converters/mixmim_to_mmpretrain.py create mode 100644 tools/model_converters/mlpmixer_to_mmpretrain.py create mode 100644 tools/model_converters/mobilenetv2_to_mmpretrain.py create mode 100644 tools/model_converters/ofa.py create mode 100644 tools/model_converters/openai-clip_to_mmpretrain-clip.py create mode 100644 tools/model_converters/otter2mmpre.py create mode 100644 tools/model_converters/publish_model.py create mode 100644 tools/model_converters/ram2mmpretrain.py create mode 100644 tools/model_converters/reparameterize_model.py create mode 100644 tools/model_converters/replknet_to_mmpretrain.py create mode 100644 tools/model_converters/repvgg_to_mmpretrain.py create mode 100644 tools/model_converters/revvit_to_mmpretrain.py create mode 100644 tools/model_converters/shufflenetv2_to_mmpretrain.py create mode 100644 tools/model_converters/tinyvit_to_mmpretrain.py create mode 100644 tools/model_converters/torchvision_to_mmpretrain.py create mode 100644 tools/model_converters/twins2mmpretrain.py create mode 100644 tools/model_converters/van2mmpretrain.py create mode 100644 tools/model_converters/vgg_to_mmpretrain.py create mode 100644 tools/model_converters/vig_to_mmpretrain.py create mode 100644 tools/slurm_test.sh create mode 100644 tools/slurm_train.sh create mode 100644 tools/test.py create mode 100644 tools/torchserve/mmpretrain2torchserve.py create mode 100644 tools/torchserve/mmpretrain_handler.py create mode 100644 tools/torchserve/test_torchserver.py create mode 100644 tools/train.py create mode 100644 tools/visualization/browse_dataset.py create mode 100644 tools/visualization/vis_cam.py create mode 100644 tools/visualization/vis_scheduler.py create mode 100644 tools/visualization/vis_tsne.py create mode 100644 vgg16_8xb32_in1k.py create mode 100644 vit-base-p16_32xb128-mae_in200.py create mode 100644 vit-large-p16-64xb64-test.py create mode 100644 vit-large-p16_32xb128-mae_in200.py diff --git a/CITATION.cff b/CITATION.cff new file mode 100644 index 0000000..81ea8f7 --- /dev/null +++ b/CITATION.cff @@ -0,0 +1,9 @@ +cff-version: 1.2.0 +message: "If you use this software, please cite it as below." +title: "OpenMMLab's Pre-training Toolbox and Benchmark" +authors: + - name: "MMPreTrain Contributors" +version: 0.15.0 +date-released: 2023-04-06 +repository-code: "https://github.com/open-mmlab/mmpretrain" +license: Apache-2.0 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..ce84c2a --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,73 @@ +# Contributing to MMPreTrain + +- [Contributing to MMPreTrain](#contributing-to-mmpretrain) + - [Workflow](#workflow) + - [Code style](#code-style) + - [Python](#python) + - [C++ and CUDA](#c-and-cuda) + - [Pre-commit Hook](#pre-commit-hook) + +Thanks for your interest in contributing to MMPreTrain! All kinds of contributions are welcome, including but not limited to the following. + +- Fix typo or bugs +- Add documentation or translate the documentation into other languages +- Add new features and components + +## Workflow + +We recommend the potential contributors follow this workflow for contribution. + +1. Fork and pull the latest MMPreTrain repository, follow [get started](https://mmpretrain.readthedocs.io/en/latest/get_started.html) to setup the environment. +2. Checkout a new branch (**do not use the master or dev branch** for PRs) + +```bash +git checkout -b xxxx # xxxx is the name of new branch +``` + +3. Edit the related files follow the code style mentioned below +4. Use [pre-commit hook](https://pre-commit.com/) to check and format your changes. +5. Commit your changes +6. Create a PR with related information + +## Code style + +### Python + +We adopt [PEP8](https://www.python.org/dev/peps/pep-0008/) as the preferred code style. + +We use the following tools for linting and formatting: + +- [flake8](https://github.com/PyCQA/flake8): A wrapper around some linter tools. +- [isort](https://github.com/timothycrosley/isort): A Python utility to sort imports. +- [yapf](https://github.com/google/yapf): A formatter for Python files. +- [codespell](https://github.com/codespell-project/codespell): A Python utility to fix common misspellings in text files. +- [mdformat](https://github.com/executablebooks/mdformat): Mdformat is an opinionated Markdown formatter that can be used to enforce a consistent style in Markdown files. +- [docformatter](https://github.com/myint/docformatter): A formatter to format docstring. + +Style configurations of yapf and isort can be found in [setup.cfg](https://github.com/open-mmlab/mmpretrain/blob/main/setup.cfg). + +### C++ and CUDA + +We follow the [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html). + +## Pre-commit Hook + +We use [pre-commit hook](https://pre-commit.com/) that checks and formats for `flake8`, `yapf`, `isort`, `trailing whitespaces`, `markdown files`, +fixes `end-of-files`, `double-quoted-strings`, `python-encoding-pragma`, `mixed-line-ending`, sorts `requirments.txt` automatically on every commit. +The config for a pre-commit hook is stored in [.pre-commit-config](https://github.com/open-mmlab/mmpretrain/blob/main/.pre-commit-config.yaml). + +After you clone the repository, you will need to install initialize pre-commit hook. + +```shell +pip install -U pre-commit +``` + +From the repository folder + +```shell +pre-commit install +``` + +After this on every commit check code linters and formatter will be enforced. + +> Before you create a PR, make sure that your code lints and is formatted by yapf. diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..ae87343 --- /dev/null +++ b/LICENSE @@ -0,0 +1,203 @@ +Copyright (c) OpenMMLab. All rights reserved + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020 MMPreTrain Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..ad4d8da --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,5 @@ +include requirements/*.txt +include mmpretrain/.mim/model-index.yml +include mmpretrain/.mim/dataset-index.yml +recursive-include mmpretrain/.mim/configs *.py *.yml +recursive-include mmpretrain/.mim/tools *.py *.sh diff --git a/configs/_base_/datasets/cifar100_bs16.py b/configs/_base_/datasets/cifar100_bs16.py new file mode 100644 index 0000000..67477db --- /dev/null +++ b/configs/_base_/datasets/cifar100_bs16.py @@ -0,0 +1,45 @@ +# dataset settings +dataset_type = 'CIFAR100' +data_preprocessor = dict( + num_classes=100, + # RGB format normalization parameters + mean=[129.304, 124.070, 112.434], + std=[68.170, 65.392, 70.418], + # loaded images are already RGB format + to_rgb=False) + +train_pipeline = [ + dict(type='RandomCrop', crop_size=32, padding=4), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + dataset=dict( + type=dataset_type, + data_root='data/cifar100', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=16, + num_workers=2, + dataset=dict( + type=dataset_type, + data_root='data/cifar100/', + split='test', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, )) + +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/cifar10_bs16.py b/configs/_base_/datasets/cifar10_bs16.py new file mode 100644 index 0000000..408be35 --- /dev/null +++ b/configs/_base_/datasets/cifar10_bs16.py @@ -0,0 +1,45 @@ +# dataset settings +dataset_type = 'CIFAR10' +data_preprocessor = dict( + num_classes=10, + # RGB format normalization parameters + mean=[125.307, 122.961, 113.8575], + std=[51.5865, 50.847, 51.255], + # loaded images are already RGB format + to_rgb=False) + +train_pipeline = [ + dict(type='RandomCrop', crop_size=32, padding=4), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + dataset=dict( + type=dataset_type, + data_root='data/cifar10', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=16, + num_workers=2, + dataset=dict( + type=dataset_type, + data_root='data/cifar10/', + split='test', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, )) + +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/coco_caption.py b/configs/_base_/datasets/coco_caption.py new file mode 100644 index 0000000..5346111 --- /dev/null +++ b/configs/_base_/datasets/coco_caption.py @@ -0,0 +1,70 @@ +# data settings +# coco caption annotations can be grabbed from LAVIS repo +# https://github.com/salesforce/LAVIS/blob/main/lavis/configs/datasets/coco/defaults_cap.yaml +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[122.770938, 116.7460125, 104.09373615], + std=[68.5005327, 66.6321579, 70.32316305], + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=384, + interpolation='bicubic', + backend='pillow'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='CleanCaption', keys='gt_caption'), + dict( + type='PackInputs', + algorithm_keys=['gt_caption'], + meta_keys=['image_id'], + ), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + scale=(384, 384), + interpolation='bicubic', + backend='pillow'), + dict(type='PackInputs', meta_keys=['image_id']), +] + +train_dataloader = dict( + batch_size=32, + num_workers=5, + dataset=dict( + type='COCOCaption', + data_root='data/coco', + ann_file='annotations/coco_karpathy_train.json', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), + persistent_workers=True, + drop_last=True, +) + +val_dataloader = dict( + batch_size=16, + num_workers=5, + dataset=dict( + type='COCOCaption', + data_root='data/coco', + ann_file='annotations/coco_karpathy_val.json', + pipeline=test_pipeline, + ), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, +) + +val_evaluator = dict( + type='COCOCaption', + ann_file='data/coco/annotations/coco_karpathy_val_gt.json', +) + +# # If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/coco_okvqa.py b/configs/_base_/datasets/coco_okvqa.py new file mode 100644 index 0000000..16f1577 --- /dev/null +++ b/configs/_base_/datasets/coco_okvqa.py @@ -0,0 +1,75 @@ +# data settings + +data_preprocessor = dict( + mean=[122.770938, 116.7460125, 104.09373615], + std=[68.5005327, 66.6321579, 70.32316305], + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=384, + interpolation='bicubic', + backend='pillow'), + dict( + type='PackInputs', + algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], + meta_keys=['question_id', 'image_id'], + ), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + scale=(480, 480), + interpolation='bicubic', + backend='pillow'), + dict( + type='CleanCaption', + keys=['question'], + ), + dict( + type='PackInputs', + algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], + meta_keys=['question_id', 'image_id'], + ), +] + +train_dataloader = dict( + batch_size=16, + num_workers=8, + dataset=dict( + type='COCOVQA', + data_root='data/coco', + data_prefix='train2014', + question_file= + 'annotations/okvqa_OpenEnded_mscoco_train2014_questions.json', + ann_file='annotations/okvqa_mscoco_train2014_annotations.json', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), + persistent_workers=True, + drop_last=True, +) + +val_dataloader = dict( + batch_size=16, + num_workers=8, + dataset=dict( + type='COCOVQA', + data_root='data/coco', + data_prefix='val2014', + question_file= + 'annotations/okvqa_OpenEnded_mscoco_val2014_questions.json', + ann_file='annotations/okvqa_mscoco_val2014_annotations.json', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, +) + +val_evaluator = dict(type='VQAAcc') + +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/coco_retrieval.py b/configs/_base_/datasets/coco_retrieval.py new file mode 100644 index 0000000..6f6b802 --- /dev/null +++ b/configs/_base_/datasets/coco_retrieval.py @@ -0,0 +1,99 @@ +# data settings +# Here are the links to download the annotations for coco retrieval for conveniency # noqa +# https://download.openmmlab.com/mmclassification/datasets/coco_retrieval/caption_karpathy_train2014.json +# https://download.openmmlab.com/mmclassification/datasets/coco_retrieval/caption_karpathy_val2014.json +# https://download.openmmlab.com/mmclassification/datasets/coco_retrieval/caption_karpathy_test2014.json +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[122.770938, 116.7460125, 104.09373615], + std=[68.5005327, 66.6321579, 70.32316305], + to_rgb=True, +) + +rand_increasing_policies = [ + dict(type='AutoContrast'), + dict(type='Equalize'), + dict(type='Rotate', magnitude_key='angle', magnitude_range=(0, 30)), + dict( + type='Brightness', magnitude_key='magnitude', + magnitude_range=(0, 0.0)), + dict(type='Sharpness', magnitude_key='magnitude', magnitude_range=(0, 0)), + dict( + type='Shear', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + direction='horizontal'), + dict( + type='Shear', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + direction='vertical'), +] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=384, + crop_ratio_range=(0.5, 1.0), + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies=rand_increasing_policies, + num_policies=2, + magnitude_level=5), + dict(type='CleanCaption', keys='text'), + dict( + type='PackInputs', + algorithm_keys=['text', 'is_matched'], + meta_keys=['image_id']), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + scale=(384, 384), + interpolation='bicubic', + backend='pillow'), + dict(type='CleanCaption', keys='text'), + dict( + type='PackInputs', + algorithm_keys=['text', 'gt_text_id', 'gt_image_id'], + meta_keys=['image_id']), +] + +train_dataloader = dict( + batch_size=32, + num_workers=16, + dataset=dict( + type='COCORetrieval', + data_root='data/coco', + ann_file='annotations/caption_karpathy_train2014.json', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), + persistent_workers=True, + drop_last=True, +) + +val_dataloader = dict( + batch_size=64, + num_workers=16, + dataset=dict( + type='COCORetrieval', + data_root='data/coco', + ann_file='annotations/caption_karpathy_val2014.json', + pipeline=test_pipeline, + # This is required for evaluation + test_mode=True, + ), + sampler=dict(type='SequentialSampler', subsample_type='sequential'), + persistent_workers=True, +) + +val_evaluator = dict(type='RetrievalRecall', topk=(1, 5, 10)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/coco_vg_vqa.py b/configs/_base_/datasets/coco_vg_vqa.py new file mode 100644 index 0000000..7ba0eac --- /dev/null +++ b/configs/_base_/datasets/coco_vg_vqa.py @@ -0,0 +1,96 @@ +# data settings +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[122.770938, 116.7460125, 104.09373615], + std=[68.5005327, 66.6321579, 70.32316305], + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=(480, 480), + crop_ratio_range=(0.5, 1.0), + interpolation='bicubic', + backend='pillow'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='simple_increasing', # slightly different from LAVIS + num_policies=2, + magnitude_level=5), + dict(type='CleanCaption', keys=['question', 'gt_answer']), + dict( + type='PackInputs', + algorithm_keys=['question', 'gt_answer', 'gt_answer_weight']), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + scale=(480, 480), + interpolation='bicubic', + backend='pillow'), + dict(type='CleanCaption', keys=['question']), + dict( + type='PackInputs', + algorithm_keys=['question'], + meta_keys=['question_id']), +] + +train_dataloader = dict( + batch_size=32, + num_workers=8, + dataset=dict( + type='ConcatDataset', + datasets=[ + # VQAv2 train + dict( + type='COCOVQA', + data_root='data/coco', + data_prefix='train2014', + question_file= + 'annotations/v2_OpenEnded_mscoco_train2014_questions.json', + ann_file='annotations/v2_mscoco_train2014_annotations.json', + pipeline=train_pipeline, + ), + # VQAv2 val + dict( + type='COCOVQA', + data_root='data/coco', + data_prefix='val2014', + question_file= + 'annotations/v2_OpenEnded_mscoco_val2014_questions.json', + ann_file='annotations/v2_mscoco_val2014_annotations.json', + pipeline=train_pipeline, + ), + # Visual Genome + dict( + type='VisualGenomeQA', + data_root='visual_genome', + data_prefix='image', + ann_file='question_answers.json', + pipeline=train_pipeline, + ) + ]), + sampler=dict(type='DefaultSampler', shuffle=True), + persistent_workers=True, + drop_last=True, +) + +test_dataloader = dict( + batch_size=32, + num_workers=8, + dataset=dict( + type='COCOVQA', + data_root='data/coco', + data_prefix='test2015', + question_file= + 'annotations/v2_OpenEnded_mscoco_test2015_questions.json', # noqa: E501 + pipeline=test_pipeline, + ), + sampler=dict(type='DefaultSampler', shuffle=False), +) +test_evaluator = dict(type='ReportVQA', file_path='vqa_test.json') diff --git a/configs/_base_/datasets/coco_vqa.py b/configs/_base_/datasets/coco_vqa.py new file mode 100644 index 0000000..7fb16bd --- /dev/null +++ b/configs/_base_/datasets/coco_vqa.py @@ -0,0 +1,84 @@ +# data settings + +data_preprocessor = dict( + mean=[122.770938, 116.7460125, 104.09373615], + std=[68.5005327, 66.6321579, 70.32316305], + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=384, + interpolation='bicubic', + backend='pillow'), + dict( + type='PackInputs', + algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], + meta_keys=['question_id', 'image_id'], + ), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + scale=(480, 480), + interpolation='bicubic', + backend='pillow'), + dict( + type='CleanCaption', + keys=['question'], + ), + dict( + type='PackInputs', + algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], + meta_keys=['question_id', 'image_id'], + ), +] + +train_dataloader = dict( + batch_size=16, + num_workers=8, + dataset=dict( + type='COCOVQA', + data_root='data/coco', + data_prefix='train2014', + question_file= + 'annotations/v2_OpenEnded_mscoco_train2014_questions.json', + ann_file='annotations/v2_mscoco_train2014_annotations.json', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), + persistent_workers=True, + drop_last=True, +) + +val_dataloader = dict( + batch_size=16, + num_workers=8, + dataset=dict( + type='COCOVQA', + data_root='data/coco', + data_prefix='val2014', + question_file='annotations/v2_OpenEnded_mscoco_val2014_questions.json', + ann_file='annotations/v2_mscoco_val2014_annotations.json', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, +) +val_evaluator = dict(type='VQAAcc') + +test_dataloader = dict( + batch_size=16, + num_workers=8, + dataset=dict( + type='COCOVQA', + data_root='data/coco', + data_prefix='test2015', + question_file= # noqa: E251 + 'annotations/v2_OpenEnded_mscoco_test2015_questions.json', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +test_evaluator = dict(type='ReportVQA', file_path='vqa_test.json') diff --git a/configs/_base_/datasets/cub_bs8_384.py b/configs/_base_/datasets/cub_bs8_384.py new file mode 100644 index 0000000..24b3a9f --- /dev/null +++ b/configs/_base_/datasets/cub_bs8_384.py @@ -0,0 +1,51 @@ +# dataset settings +dataset_type = 'CUB' +data_preprocessor = dict( + num_classes=200, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=510), + dict(type='RandomCrop', crop_size=384), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=510), + dict(type='CenterCrop', crop_size=384), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=8, + num_workers=2, + dataset=dict( + type=dataset_type, + data_root='data/CUB_200_2011', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=8, + num_workers=2, + dataset=dict( + type=dataset_type, + data_root='data/CUB_200_2011', + split='test', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, )) + +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/cub_bs8_448.py b/configs/_base_/datasets/cub_bs8_448.py new file mode 100644 index 0000000..c0bc7b7 --- /dev/null +++ b/configs/_base_/datasets/cub_bs8_448.py @@ -0,0 +1,50 @@ +# dataset settings +dataset_type = 'CUB' +data_preprocessor = dict( + num_classes=200, + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=600), + dict(type='RandomCrop', crop_size=448), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=600), + dict(type='CenterCrop', crop_size=448), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=8, + num_workers=2, + dataset=dict( + type=dataset_type, + data_root='data/CUB_200_2011', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=8, + num_workers=2, + dataset=dict( + type=dataset_type, + data_root='data/CUB_200_2011', + split='test', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, )) + +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/flickr30k_caption.py b/configs/_base_/datasets/flickr30k_caption.py new file mode 100644 index 0000000..a902b52 --- /dev/null +++ b/configs/_base_/datasets/flickr30k_caption.py @@ -0,0 +1,92 @@ +# data settings + +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[122.770938, 116.7460125, 104.09373615], + std=[68.5005327, 66.6321579, 70.32316305], + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=384, + interpolation='bicubic', + backend='pillow'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='CleanCaption', keys='gt_caption'), + dict( + type='PackInputs', + algorithm_keys=['gt_caption'], + meta_keys=['image_id'], + ), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + scale=(384, 384), + interpolation='bicubic', + backend='pillow'), + dict(type='PackInputs', meta_keys=['image_id']), +] + +train_dataloader = dict( + batch_size=32, + num_workers=5, + dataset=dict( + type='Flickr30kCaption', + data_root='data/flickr30k', + ann_file='annotations/dataset_flickr30k.json', + data_prefix='images', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), + persistent_workers=True, + drop_last=True, +) + +val_dataloader = dict( + batch_size=16, + num_workers=5, + dataset=dict( + type='Flickr30kCaption', + data_root='data/flickr30k', + ann_file='annotations/dataset_flickr30k.json', + data_prefix='images', + split='val', + pipeline=test_pipeline, + ), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, +) + +# refer tools/dataset_converters/convert_flickr30k_ann.py +val_evaluator = dict( + type='COCOCaption', + ann_file='data/flickr30k_val_gt.json', +) + +# # If you want standard test, please manually configure the test dataset +test_dataloader = dict( + batch_size=16, + num_workers=5, + dataset=dict( + type='Flickr30kCaption', + data_root='data/flickr30k', + ann_file='annotations/dataset_flickr30k.json', + data_prefix='images', + split='test', + pipeline=test_pipeline, + ), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, +) + +# refer tools/dataset_converters/convert_flickr30k_ann.py +test_evaluator = dict( + type='COCOCaption', + ann_file='data/flickr30k_test_gt.json', +) diff --git a/configs/_base_/datasets/flickr30k_retrieval.py b/configs/_base_/datasets/flickr30k_retrieval.py new file mode 100644 index 0000000..acbc645 --- /dev/null +++ b/configs/_base_/datasets/flickr30k_retrieval.py @@ -0,0 +1,112 @@ +# data settings +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[122.770938, 116.7460125, 104.09373615], + std=[68.5005327, 66.6321579, 70.32316305], + to_rgb=True, +) + +rand_increasing_policies = [ + dict(type='AutoContrast'), + dict(type='Equalize'), + dict(type='Rotate', magnitude_key='angle', magnitude_range=(0, 30)), + dict( + type='Brightness', magnitude_key='magnitude', + magnitude_range=(0, 0.0)), + dict(type='Sharpness', magnitude_key='magnitude', magnitude_range=(0, 0)), + dict( + type='Shear', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + direction='horizontal'), + dict( + type='Shear', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + direction='vertical'), +] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=384, + crop_ratio_range=(0.5, 1.0), + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies=rand_increasing_policies, + num_policies=2, + magnitude_level=5), + dict(type='CleanCaption', keys='text'), + dict( + type='PackInputs', + algorithm_keys=['text', 'is_matched'], + meta_keys=['image_id']), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + scale=(384, 384), + interpolation='bicubic', + backend='pillow'), + dict(type='CleanCaption', keys='text'), + dict( + type='PackInputs', + algorithm_keys=['text', 'gt_text_id', 'gt_image_id'], + meta_keys=['image_id']), +] + +train_dataloader = dict( + batch_size=32, + num_workers=16, + dataset=dict( + type='Flickr30kRetrieval', + data_root='data/flickr30k', + ann_file='annotations/dataset_flickr30k.json', + data_prefix='images', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), + persistent_workers=True, + drop_last=True, +) + +val_dataloader = dict( + batch_size=64, + num_workers=16, + dataset=dict( + type='Flickr30kRetrieval', + data_root='data/flickr30k', + ann_file='annotations/dataset_flickr30k.json', + data_prefix='images', + split='val', + pipeline=test_pipeline, + test_mode=True, # This is required for evaluation + ), + sampler=dict(type='SequentialSampler', subsample_type='sequential'), + persistent_workers=True, +) + +val_evaluator = dict(type='RetrievalRecall', topk=(1, 5, 10)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = dict( + batch_size=64, + num_workers=16, + dataset=dict( + type='Flickr30kRetrieval', + data_root='data/flickr30k', + ann_file='annotations/dataset_flickr30k.json', + data_prefix='images', + split='test', + pipeline=test_pipeline, + test_mode=True, # This is required for evaluation + ), + sampler=dict(type='SequentialSampler', subsample_type='sequential'), + persistent_workers=True, +) +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/gqa.py b/configs/_base_/datasets/gqa.py new file mode 100644 index 0000000..872ab45 --- /dev/null +++ b/configs/_base_/datasets/gqa.py @@ -0,0 +1,81 @@ +# data settings + +data_preprocessor = dict( + mean=[122.770938, 116.7460125, 104.09373615], + std=[68.5005327, 66.6321579, 70.32316305], + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=384, + interpolation='bicubic', + backend='pillow'), + dict( + type='PackInputs', + algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], + meta_keys=['question_id', 'image_id'], + ), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + scale=(480, 480), + interpolation='bicubic', + backend='pillow'), + dict( + type='CleanCaption', + keys=['question'], + ), + dict( + type='PackInputs', + algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], + meta_keys=['question_id', 'image_id'], + ), +] + +train_dataloader = dict( + batch_size=16, + num_workers=8, + dataset=dict( + type='GQA', + data_root='data/gqa', + data_prefix='images', + ann_file='annotations/train_balanced_questions.json', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, + drop_last=True, +) + +val_dataloader = dict( + batch_size=16, + num_workers=8, + dataset=dict( + type='GQA', + data_root='data/gqa', + data_prefix='images', + ann_file='annotations/testdev_balanced_questions.json', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, +) +val_evaluator = dict(type='GQAAcc') + +test_dataloader = dict( + batch_size=16, + num_workers=8, + dataset=dict( + type='GQA', + data_root='data/gqa', + data_prefix='images', + ann_file='annotations/testdev_balanced_questions.json', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, +) +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet21k_bs128.py b/configs/_base_/datasets/imagenet21k_bs128.py new file mode 100644 index 0000000..38bfd35 --- /dev/null +++ b/configs/_base_/datasets/imagenet21k_bs128.py @@ -0,0 +1,28 @@ +# dataset settings +dataset_type = 'ImageNet21k' +data_preprocessor = dict( + num_classes=21842, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=224), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=128, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet21k', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) diff --git a/configs/_base_/datasets/imagenet_bs128_mbv3.py b/configs/_base_/datasets/imagenet_bs128_mbv3.py new file mode 100644 index 0000000..d355f50 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs128_mbv3.py @@ -0,0 +1,66 @@ +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=224, backend='pillow'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='AutoAugment', + policies='imagenet', + hparams=dict(pad_val=[round(x) for x in bgr_mean])), + dict( + type='RandomErasing', + erase_prob=0.2, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='ResizeEdge', scale=256, edge='short', backend='pillow'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=128, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=128, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs128_poolformer_medium_224.py b/configs/_base_/datasets/imagenet_bs128_poolformer_medium_224.py new file mode 100644 index 0000000..be90a65 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs128_poolformer_medium_224.py @@ -0,0 +1,80 @@ +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=236, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=128, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=128, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs128_poolformer_small_224.py b/configs/_base_/datasets/imagenet_bs128_poolformer_small_224.py new file mode 100644 index 0000000..c9e0f07 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs128_poolformer_small_224.py @@ -0,0 +1,80 @@ +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=248, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=128, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=128, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs128_revvit_224.py b/configs/_base_/datasets/imagenet_bs128_revvit_224.py new file mode 100644 index 0000000..fd87aaf --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs128_revvit_224.py @@ -0,0 +1,83 @@ +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=7, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', # should be 'pixel', but currently not supported + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=256, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=256, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), + persistent_workers=True, +) + +val_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs128_riformer_medium_384.py b/configs/_base_/datasets/imagenet_bs128_riformer_medium_384.py new file mode 100644 index 0000000..151ded7 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs128_riformer_medium_384.py @@ -0,0 +1,80 @@ +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=384, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=404, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=384), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=128, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=16, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs128_riformer_small_384.py b/configs/_base_/datasets/imagenet_bs128_riformer_small_384.py new file mode 100644 index 0000000..ea9799b --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs128_riformer_small_384.py @@ -0,0 +1,80 @@ +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=384, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=426, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=384), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=128, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=32, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs128_vig_224.py b/configs/_base_/datasets/imagenet_bs128_vig_224.py new file mode 100644 index 0000000..abb0182 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs128_vig_224.py @@ -0,0 +1,80 @@ +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=248, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=128, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=128, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs16_eva_196.py b/configs/_base_/datasets/imagenet_bs16_eva_196.py new file mode 100644 index 0000000..f668e1d --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs16_eva_196.py @@ -0,0 +1,60 @@ +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255], + std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=196, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=196, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=196), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=16, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=16, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs16_eva_336.py b/configs/_base_/datasets/imagenet_bs16_eva_336.py new file mode 100644 index 0000000..e2c770a --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs16_eva_336.py @@ -0,0 +1,60 @@ +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255], + std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=336, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=336, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=336), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=16, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=16, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs16_eva_448.py b/configs/_base_/datasets/imagenet_bs16_eva_448.py new file mode 100644 index 0000000..b90bba1 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs16_eva_448.py @@ -0,0 +1,62 @@ +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255], + std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=448, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=448, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=448), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=16, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + ann_file='meta/train.txt', + data_prefix='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=8, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + ann_file='meta/val.txt', + data_prefix='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs16_eva_560.py b/configs/_base_/datasets/imagenet_bs16_eva_560.py new file mode 100644 index 0000000..9e548cc --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs16_eva_560.py @@ -0,0 +1,60 @@ +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255], + std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=560, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=560, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=560), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=16, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=16, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs16_pil_bicubic_384.py b/configs/_base_/datasets/imagenet_bs16_pil_bicubic_384.py new file mode 100644 index 0000000..8507af4 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs16_pil_bicubic_384.py @@ -0,0 +1,53 @@ +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=384, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=384, backend='pillow', interpolation='bicubic'), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=16, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=16, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs256_beitv2.py b/configs/_base_/datasets/imagenet_bs256_beitv2.py new file mode 100644 index 0000000..9d42032 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs256_beitv2.py @@ -0,0 +1,47 @@ +# dataset settings +dataset_type = 'ImageNet' +data_root = 'data/imagenet/' +data_preprocessor = dict( + type='TwoNormDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + second_mean=[127.5, 127.5, 127.5], + second_std=[127.5, 127.5, 127.5], + to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ColorJitter', + brightness=0.4, + contrast=0.4, + saturation=0.4, + hue=0.), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandomResizedCropAndInterpolationWithTwoPic', + size=224, + second_size=224, + interpolation='bicubic', + second_interpolation='bicubic', + scale=(0.2, 1.0)), + dict( + type='BEiTMaskGenerator', + input_size=(14, 14), + num_masking_patches=75, + max_num_patches=75, + min_num_patches=16), + dict(type='PackInputs') +] + +train_dataloader = dict( + batch_size=256, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + collate_fn=dict(type='default_collate'), + dataset=dict( + type=dataset_type, + data_root=data_root, + split='train', + pipeline=train_pipeline)) diff --git a/configs/_base_/datasets/imagenet_bs256_davit_224.py b/configs/_base_/datasets/imagenet_bs256_davit_224.py new file mode 100644 index 0000000..3ea0a83 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs256_davit_224.py @@ -0,0 +1,80 @@ +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=236, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs256_itpn.py b/configs/_base_/datasets/imagenet_bs256_itpn.py new file mode 100644 index 0000000..0b51c47 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs256_itpn.py @@ -0,0 +1,49 @@ +# dataset settings +dataset_type = 'ImageNet' +data_root = 'data/imagenet/' +data_preprocessor = dict( + type='TwoNormDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # clip mean & std + second_mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255], + second_std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255], + to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ColorJitter', + brightness=0.4, + contrast=0.4, + saturation=0.4, + hue=0.), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandomResizedCropAndInterpolationWithTwoPic', + size=224, + second_size=224, + interpolation='bicubic', + second_interpolation='bicubic', + scale=(0.2, 1.0)), + dict( + type='BEiTMaskGenerator', + input_size=(14, 14), + num_masking_patches=75, + max_num_patches=75, + min_num_patches=16), + dict(type='PackInputs') +] + +train_dataloader = dict( + batch_size=256, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + collate_fn=dict(type='default_collate'), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='meta/train.txt', + data_prefix=dict(img_path='train/'), + pipeline=train_pipeline)) diff --git a/configs/_base_/datasets/imagenet_bs256_levit_224.py b/configs/_base_/datasets/imagenet_bs256_levit_224.py new file mode 100644 index 0000000..612db7d --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs256_levit_224.py @@ -0,0 +1,80 @@ +dataset_type = 'ImageNet' + +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=256, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=256, + num_workers=4, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=256, + num_workers=4, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs256_rsb_a12.py b/configs/_base_/datasets/imagenet_bs256_rsb_a12.py new file mode 100644 index 0000000..ab59d9e --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs256_rsb_a12.py @@ -0,0 +1,72 @@ +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=7, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=236, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs') +] + +train_dataloader = dict( + batch_size=256, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=256, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs256_rsb_a3.py b/configs/_base_/datasets/imagenet_bs256_rsb_a3.py new file mode 100644 index 0000000..02e3449 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs256_rsb_a3.py @@ -0,0 +1,72 @@ +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=6, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=236, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs') +] + +train_dataloader = dict( + batch_size=256, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=256, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs256_simmim_192.py b/configs/_base_/datasets/imagenet_bs256_simmim_192.py new file mode 100644 index 0000000..45062e9 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs256_simmim_192.py @@ -0,0 +1,33 @@ +# dataset settings +dataset_type = 'ImageNet' +data_root = 'data/imagenet/' +data_preprocessor = dict( + type='SelfSupDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=192, crop_ratio_range=(0.67, 1.0)), + dict(type='RandomFlip', prob=0.5), + dict( + type='SimMIMMaskGenerator', + input_size=192, + mask_patch_size=32, + model_patch_size=4, + mask_ratio=0.6), + dict(type='PackInputs') +] + +train_dataloader = dict( + batch_size=256, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + collate_fn=dict(type='default_collate'), + dataset=dict( + type=dataset_type, + data_root=data_root, + split='train', + pipeline=train_pipeline)) diff --git a/configs/_base_/datasets/imagenet_bs256_swin_192.py b/configs/_base_/datasets/imagenet_bs256_swin_192.py new file mode 100644 index 0000000..11c2cb2 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs256_swin_192.py @@ -0,0 +1,81 @@ +# dataset settings +dataset_type = 'ImageNet' +data_root = 'data/imagenet/' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=192, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict(pad_val=[104, 116, 124], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=[103.53, 116.28, 123.675], + fill_std=[57.375, 57.12, 58.395]), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=219, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=192), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=256, + num_workers=8, + collate_fn=dict(type='default_collate'), + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + data_root=data_root, + split='train', + pipeline=train_pipeline), +) + +val_dataloader = dict( + batch_size=64, + num_workers=5, + collate_fn=dict(type='default_collate'), + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + split='val', + pipeline=test_pipeline), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs32.py b/configs/_base_/datasets/imagenet_bs32.py new file mode 100644 index 0000000..a069bb9 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs32.py @@ -0,0 +1,51 @@ +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=224), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='ResizeEdge', scale=256, edge='short'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=32, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=32, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs32_byol.py b/configs/_base_/datasets/imagenet_bs32_byol.py new file mode 100644 index 0000000..a7235b3 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs32_byol.py @@ -0,0 +1,89 @@ +# dataset settings +dataset_type = 'ImageNet' +data_root = 'data/imagenet/' +data_preprocessor = dict( + type='SelfSupDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True) + +view_pipeline1 = [ + dict( + type='RandomResizedCrop', + scale=224, + interpolation='bicubic', + backend='pillow'), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomApply', + transforms=[ + dict( + type='ColorJitter', + brightness=0.4, + contrast=0.4, + saturation=0.2, + hue=0.1) + ], + prob=0.8), + dict( + type='RandomGrayscale', + prob=0.2, + keep_channels=True, + channel_weights=(0.114, 0.587, 0.2989)), + dict( + type='GaussianBlur', + magnitude_range=(0.1, 2.0), + magnitude_std='inf', + prob=1.), + dict(type='Solarize', thr=128, prob=0.), +] +view_pipeline2 = [ + dict( + type='RandomResizedCrop', + scale=224, + interpolation='bicubic', + backend='pillow'), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomApply', + transforms=[ + dict( + type='ColorJitter', + brightness=0.4, + contrast=0.4, + saturation=0.2, + hue=0.1) + ], + prob=0.8), + dict( + type='RandomGrayscale', + prob=0.2, + keep_channels=True, + channel_weights=(0.114, 0.587, 0.2989)), + dict( + type='GaussianBlur', + magnitude_range=(0.1, 2.0), + magnitude_std='inf', + prob=0.1), + dict(type='Solarize', thr=128, prob=0.2) +] +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiView', + num_views=[1, 1], + transforms=[view_pipeline1, view_pipeline2]), + dict(type='PackInputs') +] + +train_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + collate_fn=dict(type='default_collate'), + dataset=dict( + type=dataset_type, + data_root=data_root, + split='train', + pipeline=train_pipeline)) diff --git a/configs/_base_/datasets/imagenet_bs32_mocov2.py b/configs/_base_/datasets/imagenet_bs32_mocov2.py new file mode 100644 index 0000000..dc60050 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs32_mocov2.py @@ -0,0 +1,58 @@ +# dataset settings +dataset_type = 'ImageNet' +data_root = 'data/imagenet/' +data_preprocessor = dict( + type='SelfSupDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True) + +# The difference between mocov2 and mocov1 is the transforms in the pipeline +view_pipeline = [ + dict( + type='RandomResizedCrop', + scale=224, + crop_ratio_range=(0.2, 1.), + backend='pillow'), + dict( + type='RandomApply', + transforms=[ + dict( + type='ColorJitter', + brightness=0.4, + contrast=0.4, + saturation=0.4, + hue=0.1) + ], + prob=0.8), + dict( + type='RandomGrayscale', + prob=0.2, + keep_channels=True, + channel_weights=(0.114, 0.587, 0.2989)), + dict( + type='GaussianBlur', + magnitude_range=(0.1, 2.0), + magnitude_std='inf', + prob=0.5), + dict(type='RandomFlip', prob=0.5), +] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='MultiView', num_views=2, transforms=[view_pipeline]), + dict(type='PackInputs') +] + +train_dataloader = dict( + batch_size=32, + num_workers=8, + drop_last=True, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + collate_fn=dict(type='default_collate'), + dataset=dict( + type=dataset_type, + data_root=data_root, + split='train', + pipeline=train_pipeline)) diff --git a/configs/_base_/datasets/imagenet_bs32_pil_bicubic.py b/configs/_base_/datasets/imagenet_bs32_pil_bicubic.py new file mode 100644 index 0000000..36880ff --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs32_pil_bicubic.py @@ -0,0 +1,60 @@ +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=256, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=32, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=32, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs32_pil_resize.py b/configs/_base_/datasets/imagenet_bs32_pil_resize.py new file mode 100644 index 0000000..f9afc5c --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs32_pil_resize.py @@ -0,0 +1,51 @@ +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=224, backend='pillow'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='ResizeEdge', scale=256, edge='short', backend='pillow'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=32, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=32, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs32_simclr.py b/configs/_base_/datasets/imagenet_bs32_simclr.py new file mode 100644 index 0000000..8e487b0 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs32_simclr.py @@ -0,0 +1,52 @@ +# dataset settings +dataset_type = 'ImageNet' +data_root = 'data/imagenet/' +data_preprocessor = dict( + type='SelfSupDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True) + +view_pipeline = [ + dict(type='RandomResizedCrop', scale=224, backend='pillow'), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomApply', + transforms=[ + dict( + type='ColorJitter', + brightness=0.8, + contrast=0.8, + saturation=0.8, + hue=0.2) + ], + prob=0.8), + dict( + type='RandomGrayscale', + prob=0.2, + keep_channels=True, + channel_weights=(0.114, 0.587, 0.2989)), + dict( + type='GaussianBlur', + magnitude_range=(0.1, 2.0), + magnitude_std='inf', + prob=0.5), +] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='MultiView', num_views=2, transforms=[view_pipeline]), + dict(type='PackInputs') +] + +train_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + collate_fn=dict(type='default_collate'), + dataset=dict( + type=dataset_type, + data_root=data_root, + split='train', + pipeline=train_pipeline)) diff --git a/configs/_base_/datasets/imagenet_bs512_mae.py b/configs/_base_/datasets/imagenet_bs512_mae.py new file mode 100644 index 0000000..03d350e --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs512_mae.py @@ -0,0 +1,32 @@ +# dataset settings +dataset_type = 'ImageNet' +data_root = 'data/imagenet/' +data_preprocessor = dict( + type='SelfSupDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + crop_ratio_range=(0.2, 1.0), + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5), + dict(type='PackInputs') +] + +train_dataloader = dict( + batch_size=512, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + collate_fn=dict(type='default_collate'), + dataset=dict( + type=dataset_type, + data_root=data_root, + split='train', + pipeline=train_pipeline)) diff --git a/configs/_base_/datasets/imagenet_bs512_mocov3.py b/configs/_base_/datasets/imagenet_bs512_mocov3.py new file mode 100644 index 0000000..1679f63 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs512_mocov3.py @@ -0,0 +1,90 @@ +# dataset settings +dataset_type = 'ImageNet' +data_root = 'data/imagenet/' +data_preprocessor = dict( + type='SelfSupDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True) + +view_pipeline1 = [ + dict( + type='RandomResizedCrop', + scale=224, + crop_ratio_range=(0.2, 1.), + backend='pillow'), + dict( + type='RandomApply', + transforms=[ + dict( + type='ColorJitter', + brightness=0.4, + contrast=0.4, + saturation=0.2, + hue=0.1) + ], + prob=0.8), + dict( + type='RandomGrayscale', + prob=0.2, + keep_channels=True, + channel_weights=(0.114, 0.587, 0.2989)), + dict( + type='GaussianBlur', + magnitude_range=(0.1, 2.0), + magnitude_std='inf', + prob=1.), + dict(type='Solarize', thr=128, prob=0.), + dict(type='RandomFlip', prob=0.5), +] +view_pipeline2 = [ + dict( + type='RandomResizedCrop', + scale=224, + crop_ratio_range=(0.2, 1.), + backend='pillow'), + dict( + type='RandomApply', + transforms=[ + dict( + type='ColorJitter', + brightness=0.4, + contrast=0.4, + saturation=0.2, + hue=0.1) + ], + prob=0.8), + dict( + type='RandomGrayscale', + prob=0.2, + keep_channels=True, + channel_weights=(0.114, 0.587, 0.2989)), + dict( + type='GaussianBlur', + magnitude_range=(0.1, 2.0), + magnitude_std='inf', + prob=0.1), + dict(type='Solarize', thr=128, prob=0.2), + dict(type='RandomFlip', prob=0.5), +] +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiView', + num_views=[1, 1], + transforms=[view_pipeline1, view_pipeline2]), + dict(type='PackInputs') +] + +train_dataloader = dict( + batch_size=512, + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(type='DefaultSampler', shuffle=True), + collate_fn=dict(type='default_collate'), + dataset=dict( + type=dataset_type, + data_root=data_root, + split='train', + pipeline=train_pipeline)) diff --git a/configs/_base_/datasets/imagenet_bs64.py b/configs/_base_/datasets/imagenet_bs64.py new file mode 100644 index 0000000..73e6d54 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs64.py @@ -0,0 +1,51 @@ +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=224), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='ResizeEdge', scale=256, edge='short'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs64_autoaug.py b/configs/_base_/datasets/imagenet_bs64_autoaug.py new file mode 100644 index 0000000..3160b8c --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs64_autoaug.py @@ -0,0 +1,59 @@ +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=224), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='AutoAugment', + policies='imagenet', + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='ResizeEdge', scale=256, edge='short'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs64_clip_224.py b/configs/_base_/datasets/imagenet_bs64_clip_224.py new file mode 100644 index 0000000..c200601 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs64_clip_224.py @@ -0,0 +1,73 @@ +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255], + std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255], + to_rgb=True) +image_size = 224 +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=image_size, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + # dict( + # type='RandAugment', + # policies={{_base_.rand_increasing_policies}}, + # num_policies=2, + # total_level=10, + # magnitude_level=9, + # magnitude_std=0.5, + # hparams=dict( + # pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]], + # interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=img_norm_cfg['mean'][::-1], + fill_std=img_norm_cfg['std'][::-1]), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + size=(image_size, -1), + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=image_size), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] + +data = dict( + samples_per_gpu=64, + workers_per_gpu=8, + train=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline)) + +evaluation = dict(interval=10, metric='accuracy') diff --git a/configs/_base_/datasets/imagenet_bs64_clip_384.py b/configs/_base_/datasets/imagenet_bs64_clip_384.py new file mode 100644 index 0000000..a7caee6 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs64_clip_384.py @@ -0,0 +1,73 @@ +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255], + std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255], + to_rgb=True) +image_size = 384 +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=image_size, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + # dict( + # type='RandAugment', + # policies={{_base_.rand_increasing_policies}}, + # num_policies=2, + # total_level=10, + # magnitude_level=9, + # magnitude_std=0.5, + # hparams=dict( + # pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]], + # interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=img_norm_cfg['mean'][::-1], + fill_std=img_norm_cfg['std'][::-1]), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + size=(image_size, -1), + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=image_size), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] + +data = dict( + samples_per_gpu=64, + workers_per_gpu=8, + train=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline)) + +evaluation = dict(interval=10, metric='accuracy') diff --git a/configs/_base_/datasets/imagenet_bs64_clip_448.py b/configs/_base_/datasets/imagenet_bs64_clip_448.py new file mode 100644 index 0000000..32a92ef --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs64_clip_448.py @@ -0,0 +1,74 @@ +# dataset settings +dataset_type = 'ImageNet' +img_norm_cfg = dict( + mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255], + std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255], + to_rgb=True) +image_size = 448 + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=image_size, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + # dict( + # type='RandAugment', + # policies={{_base_.rand_increasing_policies}}, + # num_policies=2, + # total_level=10, + # magnitude_level=9, + # magnitude_std=0.5, + # hparams=dict( + # pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]], + # interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=img_norm_cfg['mean'][::-1], + fill_std=img_norm_cfg['std'][::-1]), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + size=(image_size, -1), + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=image_size), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) +] + +data = dict( + samples_per_gpu=64, + workers_per_gpu=8, + train=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + test=dict( + # replace `data/val` with `data/test` for standard test + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline)) + +evaluation = dict(interval=10, metric='accuracy') diff --git a/configs/_base_/datasets/imagenet_bs64_convmixer_224.py b/configs/_base_/datasets/imagenet_bs64_convmixer_224.py new file mode 100644 index 0000000..7e9c0aa --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs64_convmixer_224.py @@ -0,0 +1,80 @@ +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + dict(type='PackInputs') +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=233, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs') +] + +train_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs64_deit3_224.py b/configs/_base_/datasets/imagenet_bs64_deit3_224.py new file mode 100644 index 0000000..5e460a4 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs64_deit3_224.py @@ -0,0 +1,80 @@ +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=224, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs64_deit3_384.py b/configs/_base_/datasets/imagenet_bs64_deit3_384.py new file mode 100644 index 0000000..bc554dd --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs64_deit3_384.py @@ -0,0 +1,60 @@ +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=384, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=384, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=384), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs64_edgenext_256.py b/configs/_base_/datasets/imagenet_bs64_edgenext_256.py new file mode 100644 index 0000000..7db9e4e --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs64_edgenext_256.py @@ -0,0 +1,80 @@ +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=256, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=292, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=256), + dict(type='PackInputs') +] + +train_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs64_hivit_224.py b/configs/_base_/datasets/imagenet_bs64_hivit_224.py new file mode 100644 index 0000000..4c258d7 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs64_hivit_224.py @@ -0,0 +1,83 @@ +# dataset settings +dataset_type = 'ImageNet' +data_root = 'data/imagenet/' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=256, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='meta/train.txt', + data_prefix='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='meta/val.txt', + data_prefix='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs64_mixer_224.py b/configs/_base_/datasets/imagenet_bs64_mixer_224.py new file mode 100644 index 0000000..b92a514 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs64_mixer_224.py @@ -0,0 +1,52 @@ +# dataset settings +dataset_type = 'ImageNet' + +# Google research usually use the below normalization setting. +data_preprocessor = dict( + num_classes=1000, + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=224), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='ResizeEdge', scale=256, edge='short', interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs64_pil_resize.py b/configs/_base_/datasets/imagenet_bs64_pil_resize.py new file mode 100644 index 0000000..79f9325 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs64_pil_resize.py @@ -0,0 +1,51 @@ +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=224, backend='pillow'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='ResizeEdge', scale=256, edge='short', backend='pillow'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs64_pil_resize_autoaug.py b/configs/_base_/datasets/imagenet_bs64_pil_resize_autoaug.py new file mode 100644 index 0000000..c259067 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs64_pil_resize_autoaug.py @@ -0,0 +1,68 @@ +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='AutoAugment', + policies='imagenet', + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=256, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs64_swin_224.py b/configs/_base_/datasets/imagenet_bs64_swin_224.py new file mode 100644 index 0000000..6e8786e --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs64_swin_224.py @@ -0,0 +1,80 @@ +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=256, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs64_swin_256.py b/configs/_base_/datasets/imagenet_bs64_swin_256.py new file mode 100644 index 0000000..9ecb41b --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs64_swin_256.py @@ -0,0 +1,80 @@ +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=256, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=292, # ( 256 / 224 * 256 ) + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=256), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs64_swin_384.py b/configs/_base_/datasets/imagenet_bs64_swin_384.py new file mode 100644 index 0000000..11264f8 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs64_swin_384.py @@ -0,0 +1,54 @@ +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=384, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=384, backend='pillow', interpolation='bicubic'), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs64_t2t_224.py b/configs/_base_/datasets/imagenet_bs64_t2t_224.py new file mode 100644 index 0000000..8a2dc10 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs64_t2t_224.py @@ -0,0 +1,80 @@ +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=248, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/imagenet_bs8_pil_bicubic_320.py b/configs/_base_/datasets/imagenet_bs8_pil_bicubic_320.py new file mode 100644 index 0000000..7160084 --- /dev/null +++ b/configs/_base_/datasets/imagenet_bs8_pil_bicubic_320.py @@ -0,0 +1,59 @@ +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + # RGB format normalization parameters + mean=[122.5, 122.5, 122.5], + std=[122.5, 122.5, 122.5], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=320, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=int(320 / 224 * 256), + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=320), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=8, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=8, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/inshop_bs32_448.py b/configs/_base_/datasets/inshop_bs32_448.py new file mode 100644 index 0000000..f9772fa --- /dev/null +++ b/configs/_base_/datasets/inshop_bs32_448.py @@ -0,0 +1,64 @@ +# dataset settings +dataset_type = 'InShop' +data_preprocessor = dict( + num_classes=3997, + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=512), + dict(type='RandomCrop', crop_size=448), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=512), + dict(type='CenterCrop', crop_size=448), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=32, + num_workers=4, + dataset=dict( + type=dataset_type, + data_root='data/inshop', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +query_dataloader = dict( + batch_size=32, + num_workers=4, + dataset=dict( + type=dataset_type, + data_root='data/inshop', + split='query', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) + +gallery_dataloader = dict( + batch_size=32, + num_workers=4, + dataset=dict( + type=dataset_type, + data_root='data/inshop', + split='gallery', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_dataloader = query_dataloader +val_evaluator = [ + dict(type='RetrievalRecall', topk=1), + dict(type='RetrievalAveragePrecision', topk=10), +] + +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/nlvr2.py b/configs/_base_/datasets/nlvr2.py new file mode 100644 index 0000000..2f5314b --- /dev/null +++ b/configs/_base_/datasets/nlvr2.py @@ -0,0 +1,86 @@ +# dataset settings +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[122.770938, 116.7460125, 104.09373615], + std=[68.5005327, 66.6321579, 70.32316305], + to_rgb=True, +) + +train_pipeline = [ + dict( + type='ApplyToList', + # NLVR requires to load two images in task. + scatter_key='img_path', + transforms=[ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=384, + interpolation='bicubic', + backend='pillow'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + ], + collate_keys=['img', 'scale_factor', 'ori_shape'], + ), + dict(type='CleanCaption', keys='text'), + dict( + type='PackInputs', + algorithm_keys=['text'], + meta_keys=['image_id'], + ), +] + +test_pipeline = [ + dict( + type='ApplyToList', + # NLVR requires to load two images in task. + scatter_key='img_path', + transforms=[ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + scale=(384, 384), + interpolation='bicubic', + backend='pillow'), + ], + collate_keys=['img', 'scale_factor', 'ori_shape'], + ), + dict( + type='PackInputs', + algorithm_keys=['text'], + meta_keys=['image_id'], + ), +] + +train_dataloader = dict( + batch_size=16, + num_workers=8, + dataset=dict( + type='NLVR2', + data_root='data/nlvr2', + ann_file='dev.json', + data_prefix='dev', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), + persistent_workers=True, + drop_last=True, +) + +val_dataloader = dict( + batch_size=64, + num_workers=8, + dataset=dict( + type='NLVR2', + data_root='data/nlvr2', + ann_file='dev.json', + data_prefix='dev', + pipeline=test_pipeline, + ), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, +) +val_evaluator = dict(type='Accuracy') + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/nocaps.py b/configs/_base_/datasets/nocaps.py new file mode 100644 index 0000000..5176671 --- /dev/null +++ b/configs/_base_/datasets/nocaps.py @@ -0,0 +1,41 @@ +# data settings + +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[122.770938, 116.7460125, 104.09373615], + std=[68.5005327, 66.6321579, 70.32316305], + to_rgb=True, +) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + scale=(384, 384), + interpolation='bicubic', + backend='pillow'), + dict(type='PackInputs', meta_keys=['image_id']), +] + +val_dataloader = dict( + batch_size=16, + num_workers=5, + dataset=dict( + type='NoCaps', + data_root='data/nocaps/', + data_prefix=dict(img_path='images/'), + ann_file='annotations/nocaps_val_4500_captions.json', + pipeline=test_pipeline, + ), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, +) + +val_evaluator = dict( + type='NocapsSave', + save_dir='./', +) + +# # If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/ocrvqa.py b/configs/_base_/datasets/ocrvqa.py new file mode 100644 index 0000000..09e6e35 --- /dev/null +++ b/configs/_base_/datasets/ocrvqa.py @@ -0,0 +1,81 @@ +# data settings + +data_preprocessor = dict( + mean=[122.770938, 116.7460125, 104.09373615], + std=[68.5005327, 66.6321579, 70.32316305], + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=384, + interpolation='bicubic', + backend='pillow'), + dict(type='CleanCaption', keys=['question', 'gt_answer']), + dict( + type='PackInputs', + algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], + meta_keys=[], + ), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + scale=(480, 480), + interpolation='bicubic', + backend='pillow'), + dict(type='CleanCaption', keys=['question', 'gt_answer']), + dict( + type='PackInputs', + algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], + meta_keys=[], + ), +] + +train_dataloader = dict( + batch_size=16, + num_workers=8, + dataset=dict( + type='OCRVQA', + data_root='data/ocrvqa', + data_prefix='images', + ann_file='annotations/dataset.json', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), + persistent_workers=True, + drop_last=True, +) + +val_dataloader = dict( + batch_size=64, + num_workers=8, + dataset=dict( + type='OCRVQA', + data_root='data/ocrvqa', + data_prefix='images', + ann_file='annotations/dataset.json', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, +) +val_evaluator = dict(type='VQAAcc') + +test_dataloader = dict( + batch_size=64, + num_workers=8, + dataset=dict( + type='OCRVQA', + data_root='data/ocrvqa', + data_prefix='images', + ann_file='annotations/dataset.json', + split='test', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +test_evaluator = dict(type='VQAAcc') diff --git a/configs/_base_/datasets/pipelines/auto_aug.py b/configs/_base_/datasets/pipelines/auto_aug.py new file mode 100644 index 0000000..5a10f7e --- /dev/null +++ b/configs/_base_/datasets/pipelines/auto_aug.py @@ -0,0 +1,96 @@ +# Policy for ImageNet, refers to +# https://github.com/DeepVoltaire/AutoAugment/blame/master/autoaugment.py +policy_imagenet = [ + [ + dict(type='Posterize', bits=4, prob=0.4), + dict(type='Rotate', angle=30., prob=0.6) + ], + [ + dict(type='Solarize', thr=256 / 9 * 4, prob=0.6), + dict(type='AutoContrast', prob=0.6) + ], + [dict(type='Equalize', prob=0.8), + dict(type='Equalize', prob=0.6)], + [ + dict(type='Posterize', bits=5, prob=0.6), + dict(type='Posterize', bits=5, prob=0.6) + ], + [ + dict(type='Equalize', prob=0.4), + dict(type='Solarize', thr=256 / 9 * 5, prob=0.2) + ], + [ + dict(type='Equalize', prob=0.4), + dict(type='Rotate', angle=30 / 9 * 8, prob=0.8) + ], + [ + dict(type='Solarize', thr=256 / 9 * 6, prob=0.6), + dict(type='Equalize', prob=0.6) + ], + [dict(type='Posterize', bits=6, prob=0.8), + dict(type='Equalize', prob=1.)], + [ + dict(type='Rotate', angle=10., prob=0.2), + dict(type='Solarize', thr=256 / 9, prob=0.6) + ], + [ + dict(type='Equalize', prob=0.6), + dict(type='Posterize', bits=5, prob=0.4) + ], + [ + dict(type='Rotate', angle=30 / 9 * 8, prob=0.8), + dict(type='ColorTransform', magnitude=0., prob=0.4) + ], + [ + dict(type='Rotate', angle=30., prob=0.4), + dict(type='Equalize', prob=0.6) + ], + [dict(type='Equalize', prob=0.0), + dict(type='Equalize', prob=0.8)], + [dict(type='Invert', prob=0.6), + dict(type='Equalize', prob=1.)], + [ + dict(type='ColorTransform', magnitude=0.4, prob=0.6), + dict(type='Contrast', magnitude=0.8, prob=1.) + ], + [ + dict(type='Rotate', angle=30 / 9 * 8, prob=0.8), + dict(type='ColorTransform', magnitude=0.2, prob=1.) + ], + [ + dict(type='ColorTransform', magnitude=0.8, prob=0.8), + dict(type='Solarize', thr=256 / 9 * 2, prob=0.8) + ], + [ + dict(type='Sharpness', magnitude=0.7, prob=0.4), + dict(type='Invert', prob=0.6) + ], + [ + dict( + type='Shear', + magnitude=0.3 / 9 * 5, + prob=0.6, + direction='horizontal'), + dict(type='Equalize', prob=1.) + ], + [ + dict(type='ColorTransform', magnitude=0., prob=0.4), + dict(type='Equalize', prob=0.6) + ], + [ + dict(type='Equalize', prob=0.4), + dict(type='Solarize', thr=256 / 9 * 5, prob=0.2) + ], + [ + dict(type='Solarize', thr=256 / 9 * 4, prob=0.6), + dict(type='AutoContrast', prob=0.6) + ], + [dict(type='Invert', prob=0.6), + dict(type='Equalize', prob=1.)], + [ + dict(type='ColorTransform', magnitude=0.4, prob=0.6), + dict(type='Contrast', magnitude=0.8, prob=1.) + ], + [dict(type='Equalize', prob=0.8), + dict(type='Equalize', prob=0.6)], +] diff --git a/configs/_base_/datasets/pipelines/rand_aug.py b/configs/_base_/datasets/pipelines/rand_aug.py new file mode 100644 index 0000000..f2bab3c --- /dev/null +++ b/configs/_base_/datasets/pipelines/rand_aug.py @@ -0,0 +1,43 @@ +# Refers to `_RAND_INCREASING_TRANSFORMS` in pytorch-image-models +rand_increasing_policies = [ + dict(type='AutoContrast'), + dict(type='Equalize'), + dict(type='Invert'), + dict(type='Rotate', magnitude_key='angle', magnitude_range=(0, 30)), + dict(type='Posterize', magnitude_key='bits', magnitude_range=(4, 0)), + dict(type='Solarize', magnitude_key='thr', magnitude_range=(256, 0)), + dict( + type='SolarizeAdd', + magnitude_key='magnitude', + magnitude_range=(0, 110)), + dict( + type='ColorTransform', + magnitude_key='magnitude', + magnitude_range=(0, 0.9)), + dict(type='Contrast', magnitude_key='magnitude', magnitude_range=(0, 0.9)), + dict( + type='Brightness', magnitude_key='magnitude', + magnitude_range=(0, 0.9)), + dict( + type='Sharpness', magnitude_key='magnitude', magnitude_range=(0, 0.9)), + dict( + type='Shear', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + direction='horizontal'), + dict( + type='Shear', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + direction='vertical'), + dict( + type='Translate', + magnitude_key='magnitude', + magnitude_range=(0, 0.45), + direction='horizontal'), + dict( + type='Translate', + magnitude_key='magnitude', + magnitude_range=(0, 0.45), + direction='vertical') +] diff --git a/configs/_base_/datasets/refcoco.py b/configs/_base_/datasets/refcoco.py new file mode 100644 index 0000000..f698e76 --- /dev/null +++ b/configs/_base_/datasets/refcoco.py @@ -0,0 +1,105 @@ +# data settings + +data_preprocessor = dict( + mean=[122.770938, 116.7460125, 104.09373615], + std=[68.5005327, 66.6321579, 70.32316305], + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomApply', + transforms=[ + dict( + type='ColorJitter', + brightness=0.4, + contrast=0.4, + saturation=0.4, + hue=0.1, + backend='cv2') + ], + prob=0.5), + dict( + type='mmdet.RandomCrop', + crop_type='relative_range', + crop_size=(0.8, 0.8), + allow_negative_crop=False), + dict( + type='RandomChoiceResize', + scales=[(384, 384), (360, 360), (344, 344), (312, 312), (300, 300), + (286, 286), (270, 270)], + keep_ratio=False), + dict( + type='RandomTranslatePad', + size=384, + aug_translate=True, + ), + dict(type='CleanCaption', keys='text'), + dict( + type='PackInputs', + algorithm_keys=['text', 'gt_bboxes', 'scale_factor'], + meta_keys=['image_id'], + ), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + scale=(384, 384), + interpolation='bicubic', + backend='pillow'), + dict(type='CleanCaption', keys='text'), + dict( + type='PackInputs', + algorithm_keys=['text', 'gt_bboxes', 'scale_factor'], + meta_keys=['image_id'], + ), +] + +train_dataloader = dict( + batch_size=16, + num_workers=8, + dataset=dict( + type='RefCOCO', + data_root='data/coco', + data_prefix='train2014', + ann_file='refcoco/instances.json', + split_file='refcoco/refs(unc).p', + split='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), + drop_last=True, +) + +val_dataloader = dict( + batch_size=16, + num_workers=8, + dataset=dict( + type='RefCOCO', + data_root='data/coco', + data_prefix='train2014', + ann_file='refcoco/instances.json', + split_file='refcoco/refs(unc).p', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) + +val_evaluator = dict(type='VisualGroundingMetric') + +test_dataloader = dict( + batch_size=16, + num_workers=8, + dataset=dict( + type='RefCOCO', + data_root='data/coco', + data_prefix='train2014', + ann_file='refcoco/instances.json', + split_file='refcoco/refs(unc).p', + split='testA', # or 'testB' + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/tiny_imagenet_bs32.py b/configs/_base_/datasets/tiny_imagenet_bs32.py new file mode 100644 index 0000000..6701413 --- /dev/null +++ b/configs/_base_/datasets/tiny_imagenet_bs32.py @@ -0,0 +1,51 @@ +# dataset settings +dataset_type = 'CustomDataset' +data_preprocessor = dict( + num_classes=200, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=224), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='ResizeEdge', scale=256, edge='short'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=32, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + data_prefix='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=32, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + data_prefix='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/tiny_imagenet_bs32_pil_resize.py b/configs/_base_/datasets/tiny_imagenet_bs32_pil_resize.py new file mode 100644 index 0000000..66250a4 --- /dev/null +++ b/configs/_base_/datasets/tiny_imagenet_bs32_pil_resize.py @@ -0,0 +1,51 @@ +# dataset settings +dataset_type = 'CustomDataset' +data_preprocessor = dict( + num_classes=200, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=224, backend='pillow'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='ResizeEdge', scale=256, edge='short', backend='pillow'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=32, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + data_prefix='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=32, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + data_prefix='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/tiny_imagenet_bs64_pil_resize_autoaug.py b/configs/_base_/datasets/tiny_imagenet_bs64_pil_resize_autoaug.py new file mode 100644 index 0000000..0c41d7f --- /dev/null +++ b/configs/_base_/datasets/tiny_imagenet_bs64_pil_resize_autoaug.py @@ -0,0 +1,68 @@ +# dataset settings +dataset_type = 'CustomDataset' +data_preprocessor = dict( + num_classes=200, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='AutoAugment', + policies='imagenet', + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=256, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + data_prefix='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + data_prefix='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/tiny_imagenet_bs64_swin_224.py b/configs/_base_/datasets/tiny_imagenet_bs64_swin_224.py new file mode 100644 index 0000000..bddb78b --- /dev/null +++ b/configs/_base_/datasets/tiny_imagenet_bs64_swin_224.py @@ -0,0 +1,80 @@ +# dataset settings +dataset_type = 'CustomDataset' +data_preprocessor = dict( + num_classes=200, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=256, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + data_prefix='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + data_prefix='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/vizwiz.py b/configs/_base_/datasets/vizwiz.py new file mode 100644 index 0000000..bb7156c --- /dev/null +++ b/configs/_base_/datasets/vizwiz.py @@ -0,0 +1,80 @@ +# data settings + +data_preprocessor = dict( + mean=[122.770938, 116.7460125, 104.09373615], + std=[68.5005327, 66.6321579, 70.32316305], + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=384, + interpolation='bicubic', + backend='pillow'), + dict( + type='PackInputs', + algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], + meta_keys=['question_id', 'image_id'], + ), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + scale=(480, 480), + interpolation='bicubic', + backend='pillow'), + dict( + type='CleanCaption', + keys=['question'], + ), + dict( + type='PackInputs', + algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], + meta_keys=['question_id', 'image_id'], + ), +] + +train_dataloader = dict( + batch_size=16, + num_workers=8, + dataset=dict( + type='VizWiz', + data_root='data/vizwiz/Images', + data_prefix='', + ann_file='Annotations/train.json', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), + persistent_workers=True, + drop_last=True, +) + +val_dataloader = dict( + batch_size=16, + num_workers=8, + dataset=dict( + type='VizWiz', + data_root='data/vizwiz/Images', + data_prefix='', + ann_file='Annotations/val.json', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, +) +val_evaluator = dict(type='VizWizAcc') + +test_dataloader = dict( + batch_size=16, + num_workers=8, + dataset=dict( + type='VizWiz', + data_root='data/vizwiz/Images', + data_prefix='', + ann_file='Annotations/test.json', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +test_evaluator = dict(type='ReportVQA', file_path='vqa_test.json') diff --git a/configs/_base_/datasets/voc_bs16.py b/configs/_base_/datasets/voc_bs16.py new file mode 100644 index 0000000..cac2248 --- /dev/null +++ b/configs/_base_/datasets/voc_bs16.py @@ -0,0 +1,65 @@ +# dataset settings +dataset_type = 'VOC' +data_preprocessor = dict( + num_classes=20, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, + # generate onehot-format labels for multi-label classification. + to_onehot=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=224), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='ResizeEdge', scale=256, edge='short'), + dict(type='CenterCrop', crop_size=224), + dict( + type='PackInputs', + # `gt_label_difficult` is needed for VOC evaluation + meta_keys=('sample_idx', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', + 'gt_label_difficult')), +] + +train_dataloader = dict( + batch_size=16, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/VOC2007', + split='trainval', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=16, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/VOC2007', + split='test', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) + +test_dataloader = val_dataloader + +# calculate precision_recall_f1 and mAP +val_evaluator = [ + dict(type='VOCMultiLabelMetric'), + dict(type='VOCMultiLabelMetric', average='micro'), + dict(type='VOCAveragePrecision') +] + +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/configs/_base_/datasets/vsr.py b/configs/_base_/datasets/vsr.py new file mode 100644 index 0000000..0fa9b89 --- /dev/null +++ b/configs/_base_/datasets/vsr.py @@ -0,0 +1,81 @@ +# data settings + +data_preprocessor = dict( + mean=[122.770938, 116.7460125, 104.09373615], + std=[68.5005327, 66.6321579, 70.32316305], + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=384, + interpolation='bicubic', + backend='pillow'), + dict( + type='PackInputs', + algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], + meta_keys=['question_id', 'image_id'], + ), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + scale=(480, 480), + interpolation='bicubic', + backend='pillow'), + dict( + type='CleanCaption', + keys=['question'], + ), + dict( + type='PackInputs', + algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], + meta_keys=['question_id', 'image_id'], + ), +] + +train_dataloader = dict( + batch_size=16, + num_workers=8, + dataset=dict( + type='VSR', + data_root='data/coco', + data_prefix='', + ann_file='annotations/train.json', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, + drop_last=True, +) + +val_dataloader = dict( + batch_size=16, + num_workers=8, + dataset=dict( + type='VSR', + data_root='data/coco', + data_prefix='', + ann_file='annotations/val.json', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, +) +val_evaluator = dict(type='VSRAcc') + +test_dataloader = dict( + batch_size=16, + num_workers=8, + dataset=dict( + type='VSR', + data_root='data/coco', + data_prefix='', + ann_file='annotations/test.json', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, +) +test_evaluator = val_evaluator diff --git a/configs/_base_/default_runtime.py b/configs/_base_/default_runtime.py new file mode 100644 index 0000000..3816d42 --- /dev/null +++ b/configs/_base_/default_runtime.py @@ -0,0 +1,51 @@ +# defaults to use registries in mmpretrain +default_scope = 'mmpretrain' + +# configure default hooks +default_hooks = dict( + # record the time of every iteration. + timer=dict(type='IterTimerHook'), + + # print log every 100 iterations. + logger=dict(type='LoggerHook', interval=100), + + # enable the parameter scheduler. + param_scheduler=dict(type='ParamSchedulerHook'), + + # save checkpoint per epoch. + checkpoint=dict(type='CheckpointHook', interval=1), + + # set sampler seed in distributed evrionment. + sampler_seed=dict(type='DistSamplerSeedHook'), + + # validation results visualization, set True to enable it. + visualization=dict(type='VisualizationHook', enable=False), +) + +# configure environment +env_cfg = dict( + # whether to enable cudnn benchmark + cudnn_benchmark=False, + + # set multi process parameters + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + + # set distributed parameters + dist_cfg=dict(backend='nccl'), +) + +# set visualizer +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict(type='UniversalVisualizer', vis_backends=vis_backends) + +# set log level +log_level = 'INFO' + +# load from which checkpoint +load_from = None + +# whether to resume training from the loaded checkpoint +resume = False + +# Defaults to use random seed and disable `deterministic` +randomness = dict(seed=None, deterministic=False) diff --git a/configs/_base_/models/conformer/base-p16.py b/configs/_base_/models/conformer/base-p16.py new file mode 100644 index 0000000..959da50 --- /dev/null +++ b/configs/_base_/models/conformer/base-p16.py @@ -0,0 +1,23 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='Conformer', arch='base', drop_path_rate=0.1, init_cfg=None), + neck=None, + head=dict( + type='ConformerHead', + num_classes=1000, + in_channels=[1536, 576], + init_cfg=None, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) diff --git a/configs/_base_/models/conformer/small-p16.py b/configs/_base_/models/conformer/small-p16.py new file mode 100644 index 0000000..2e4f9f8 --- /dev/null +++ b/configs/_base_/models/conformer/small-p16.py @@ -0,0 +1,23 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='Conformer', arch='small', drop_path_rate=0.1, init_cfg=None), + neck=None, + head=dict( + type='ConformerHead', + num_classes=1000, + in_channels=[1024, 384], + init_cfg=None, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) diff --git a/configs/_base_/models/conformer/small-p32.py b/configs/_base_/models/conformer/small-p32.py new file mode 100644 index 0000000..f73811f --- /dev/null +++ b/configs/_base_/models/conformer/small-p32.py @@ -0,0 +1,27 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='Conformer', + arch='small', + patch_size=32, + drop_path_rate=0.1, + init_cfg=None), + neck=None, + head=dict( + type='ConformerHead', + num_classes=1000, + in_channels=[1024, 384], + init_cfg=None, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) diff --git a/configs/_base_/models/conformer/tiny-p16.py b/configs/_base_/models/conformer/tiny-p16.py new file mode 100644 index 0000000..fa9753b --- /dev/null +++ b/configs/_base_/models/conformer/tiny-p16.py @@ -0,0 +1,23 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='Conformer', arch='tiny', drop_path_rate=0.1, init_cfg=None), + neck=None, + head=dict( + type='ConformerHead', + num_classes=1000, + in_channels=[256, 384], + init_cfg=None, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) diff --git a/configs/_base_/models/convmixer/convmixer-1024-20.py b/configs/_base_/models/convmixer/convmixer-1024-20.py new file mode 100644 index 0000000..a8f4d51 --- /dev/null +++ b/configs/_base_/models/convmixer/convmixer-1024-20.py @@ -0,0 +1,11 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='ConvMixer', arch='1024/20'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/convmixer/convmixer-1536-20.py b/configs/_base_/models/convmixer/convmixer-1536-20.py new file mode 100644 index 0000000..9ad8209 --- /dev/null +++ b/configs/_base_/models/convmixer/convmixer-1536-20.py @@ -0,0 +1,11 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='ConvMixer', arch='1536/20'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1536, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/convmixer/convmixer-768-32.py b/configs/_base_/models/convmixer/convmixer-768-32.py new file mode 100644 index 0000000..1cba528 --- /dev/null +++ b/configs/_base_/models/convmixer/convmixer-768-32.py @@ -0,0 +1,11 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='ConvMixer', arch='768/32', act_cfg=dict(type='ReLU')), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/convnext/convnext-base.py b/configs/_base_/models/convnext/convnext-base.py new file mode 100644 index 0000000..aba6c19 --- /dev/null +++ b/configs/_base_/models/convnext/convnext-base.py @@ -0,0 +1,19 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='ConvNeXt', arch='base', drop_path_rate=0.5), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + init_cfg=None, + ), + init_cfg=dict( + type='TruncNormal', layer=['Conv2d', 'Linear'], std=.02, bias=0.), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) diff --git a/configs/_base_/models/convnext/convnext-large.py b/configs/_base_/models/convnext/convnext-large.py new file mode 100644 index 0000000..9bd4d9f --- /dev/null +++ b/configs/_base_/models/convnext/convnext-large.py @@ -0,0 +1,19 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='ConvNeXt', arch='large', drop_path_rate=0.5), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1536, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + init_cfg=None, + ), + init_cfg=dict( + type='TruncNormal', layer=['Conv2d', 'Linear'], std=.02, bias=0.), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) diff --git a/configs/_base_/models/convnext/convnext-small.py b/configs/_base_/models/convnext/convnext-small.py new file mode 100644 index 0000000..aeedb6d --- /dev/null +++ b/configs/_base_/models/convnext/convnext-small.py @@ -0,0 +1,19 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='ConvNeXt', arch='small', drop_path_rate=0.4), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + init_cfg=None, + ), + init_cfg=dict( + type='TruncNormal', layer=['Conv2d', 'Linear'], std=.02, bias=0.), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) diff --git a/configs/_base_/models/convnext/convnext-tiny.py b/configs/_base_/models/convnext/convnext-tiny.py new file mode 100644 index 0000000..05baba0 --- /dev/null +++ b/configs/_base_/models/convnext/convnext-tiny.py @@ -0,0 +1,19 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='ConvNeXt', arch='tiny', drop_path_rate=0.1), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + init_cfg=None, + ), + init_cfg=dict( + type='TruncNormal', layer=['Conv2d', 'Linear'], std=.02, bias=0.), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) diff --git a/configs/_base_/models/convnext/convnext-xlarge.py b/configs/_base_/models/convnext/convnext-xlarge.py new file mode 100644 index 0000000..7211b94 --- /dev/null +++ b/configs/_base_/models/convnext/convnext-xlarge.py @@ -0,0 +1,19 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='ConvNeXt', arch='xlarge', drop_path_rate=0.5), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + init_cfg=None, + ), + init_cfg=dict( + type='TruncNormal', layer=['Conv2d', 'Linear'], std=.02, bias=0.), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) diff --git a/configs/_base_/models/convnext_v2/atto.py b/configs/_base_/models/convnext_v2/atto.py new file mode 100644 index 0000000..557ce93 --- /dev/null +++ b/configs/_base_/models/convnext_v2/atto.py @@ -0,0 +1,20 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ConvNeXt', + arch='atto', + drop_path_rate=0.1, + layer_scale_init_value=0., + use_grn=True, + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=320, + loss=dict(type='LabelSmoothLoss', label_smooth_val=0.2), + init_cfg=None, + ), + init_cfg=dict( + type='TruncNormal', layer=['Conv2d', 'Linear'], std=.02, bias=0.), +) diff --git a/configs/_base_/models/convnext_v2/base.py b/configs/_base_/models/convnext_v2/base.py new file mode 100644 index 0000000..1401ef7 --- /dev/null +++ b/configs/_base_/models/convnext_v2/base.py @@ -0,0 +1,24 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ConvNeXt', + arch='base', + drop_path_rate=0.1, + layer_scale_init_value=0., + use_grn=True, + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + loss=dict(type='LabelSmoothLoss', label_smooth_val=0.1), + init_cfg=None, + ), + init_cfg=dict( + type='TruncNormal', layer=['Conv2d', 'Linear'], std=.02, bias=0.), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) diff --git a/configs/_base_/models/convnext_v2/femto.py b/configs/_base_/models/convnext_v2/femto.py new file mode 100644 index 0000000..d56a241 --- /dev/null +++ b/configs/_base_/models/convnext_v2/femto.py @@ -0,0 +1,20 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ConvNeXt', + arch='femto', + drop_path_rate=0.1, + layer_scale_init_value=0., + use_grn=True, + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=384, + loss=dict(type='LabelSmoothLoss', label_smooth_val=0.1), + init_cfg=None, + ), + init_cfg=dict( + type='TruncNormal', layer=['Conv2d', 'Linear'], std=.02, bias=0.), +) diff --git a/configs/_base_/models/convnext_v2/huge.py b/configs/_base_/models/convnext_v2/huge.py new file mode 100644 index 0000000..54141dd --- /dev/null +++ b/configs/_base_/models/convnext_v2/huge.py @@ -0,0 +1,24 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ConvNeXt', + arch='huge', + drop_path_rate=0.1, + layer_scale_init_value=0., + use_grn=True, + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2816, + loss=dict(type='LabelSmoothLoss', label_smooth_val=0.1), + init_cfg=None, + ), + init_cfg=dict( + type='TruncNormal', layer=['Conv2d', 'Linear'], std=.02, bias=0.), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) diff --git a/configs/_base_/models/convnext_v2/large.py b/configs/_base_/models/convnext_v2/large.py new file mode 100644 index 0000000..20237de --- /dev/null +++ b/configs/_base_/models/convnext_v2/large.py @@ -0,0 +1,24 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ConvNeXt', + arch='large', + drop_path_rate=0.1, + layer_scale_init_value=0., + use_grn=True, + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1536, + loss=dict(type='LabelSmoothLoss', label_smooth_val=0.1), + init_cfg=None, + ), + init_cfg=dict( + type='TruncNormal', layer=['Conv2d', 'Linear'], std=.02, bias=0.), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) diff --git a/configs/_base_/models/convnext_v2/nano.py b/configs/_base_/models/convnext_v2/nano.py new file mode 100644 index 0000000..05575d0 --- /dev/null +++ b/configs/_base_/models/convnext_v2/nano.py @@ -0,0 +1,20 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ConvNeXt', + arch='nano', + drop_path_rate=0.1, + layer_scale_init_value=0., + use_grn=True, + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=640, + loss=dict(type='LabelSmoothLoss', label_smooth_val=0.2), + init_cfg=None, + ), + init_cfg=dict( + type='TruncNormal', layer=['Conv2d', 'Linear'], std=.02, bias=0.), +) diff --git a/configs/_base_/models/convnext_v2/pico.py b/configs/_base_/models/convnext_v2/pico.py new file mode 100644 index 0000000..6d50ba8 --- /dev/null +++ b/configs/_base_/models/convnext_v2/pico.py @@ -0,0 +1,20 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ConvNeXt', + arch='pico', + drop_path_rate=0.1, + layer_scale_init_value=0., + use_grn=True, + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + loss=dict(type='LabelSmoothLoss', label_smooth_val=0.1), + init_cfg=None, + ), + init_cfg=dict( + type='TruncNormal', layer=['Conv2d', 'Linear'], std=.02, bias=0.), +) diff --git a/configs/_base_/models/convnext_v2/tiny.py b/configs/_base_/models/convnext_v2/tiny.py new file mode 100644 index 0000000..c9835cc --- /dev/null +++ b/configs/_base_/models/convnext_v2/tiny.py @@ -0,0 +1,24 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ConvNeXt', + arch='tiny', + drop_path_rate=0.2, + layer_scale_init_value=0., + use_grn=True, + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='LabelSmoothLoss', label_smooth_val=0.2), + init_cfg=None, + ), + init_cfg=dict( + type='TruncNormal', layer=['Conv2d', 'Linear'], std=.02, bias=0.), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) diff --git a/configs/_base_/models/davit/davit-base.py b/configs/_base_/models/davit/davit-base.py new file mode 100644 index 0000000..0dbf077 --- /dev/null +++ b/configs/_base_/models/davit/davit-base.py @@ -0,0 +1,16 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='DaViT', arch='base', out_indices=(3, ), drop_path_rate=0.4), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/_base_/models/davit/davit-small.py b/configs/_base_/models/davit/davit-small.py new file mode 100644 index 0000000..2fa0325 --- /dev/null +++ b/configs/_base_/models/davit/davit-small.py @@ -0,0 +1,16 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='DaViT', arch='small', out_indices=(3, ), drop_path_rate=0.2), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/_base_/models/davit/davit-tiny.py b/configs/_base_/models/davit/davit-tiny.py new file mode 100644 index 0000000..29432d2 --- /dev/null +++ b/configs/_base_/models/davit/davit-tiny.py @@ -0,0 +1,16 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='DaViT', arch='t', out_indices=(3, ), drop_path_rate=0.1), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/_base_/models/deit3/deit3-base-p16-224.py b/configs/_base_/models/deit3/deit3-base-p16-224.py new file mode 100644 index 0000000..84cba1a --- /dev/null +++ b/configs/_base_/models/deit3/deit3-base-p16-224.py @@ -0,0 +1,24 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='DeiT3', + arch='b', + img_size=224, + patch_size=16, + drop_path_rate=0.2), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/_base_/models/deit3/deit3-base-p16-384.py b/configs/_base_/models/deit3/deit3-base-p16-384.py new file mode 100644 index 0000000..1c9f42b --- /dev/null +++ b/configs/_base_/models/deit3/deit3-base-p16-384.py @@ -0,0 +1,24 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='DeiT3', + arch='b', + img_size=384, + patch_size=16, + drop_path_rate=0.15), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/_base_/models/deit3/deit3-huge-p14-224.py b/configs/_base_/models/deit3/deit3-huge-p14-224.py new file mode 100644 index 0000000..b7a69ce --- /dev/null +++ b/configs/_base_/models/deit3/deit3-huge-p14-224.py @@ -0,0 +1,24 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='DeiT3', + arch='h', + img_size=224, + patch_size=14, + drop_path_rate=0.55), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=1280, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/_base_/models/deit3/deit3-large-p16-224.py b/configs/_base_/models/deit3/deit3-large-p16-224.py new file mode 100644 index 0000000..96135c5 --- /dev/null +++ b/configs/_base_/models/deit3/deit3-large-p16-224.py @@ -0,0 +1,24 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='DeiT3', + arch='l', + img_size=224, + patch_size=16, + drop_path_rate=0.45), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=1024, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/_base_/models/deit3/deit3-large-p16-384.py b/configs/_base_/models/deit3/deit3-large-p16-384.py new file mode 100644 index 0000000..aa9326c --- /dev/null +++ b/configs/_base_/models/deit3/deit3-large-p16-384.py @@ -0,0 +1,24 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='DeiT3', + arch='l', + img_size=384, + patch_size=16, + drop_path_rate=0.4), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=1024, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/_base_/models/deit3/deit3-medium-p16-224.py b/configs/_base_/models/deit3/deit3-medium-p16-224.py new file mode 100644 index 0000000..84233e5 --- /dev/null +++ b/configs/_base_/models/deit3/deit3-medium-p16-224.py @@ -0,0 +1,24 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='DeiT3', + arch='m', + img_size=224, + patch_size=16, + drop_path_rate=0.2), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=512, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/_base_/models/deit3/deit3-small-p16-224.py b/configs/_base_/models/deit3/deit3-small-p16-224.py new file mode 100644 index 0000000..af29d32 --- /dev/null +++ b/configs/_base_/models/deit3/deit3-small-p16-224.py @@ -0,0 +1,24 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='DeiT3', + arch='s', + img_size=224, + patch_size=16, + drop_path_rate=0.05), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=384, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/_base_/models/deit3/deit3-small-p16-384.py b/configs/_base_/models/deit3/deit3-small-p16-384.py new file mode 100644 index 0000000..bebb484 --- /dev/null +++ b/configs/_base_/models/deit3/deit3-small-p16-384.py @@ -0,0 +1,24 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='DeiT3', + arch='s', + img_size=384, + patch_size=16, + drop_path_rate=0.0), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=384, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/_base_/models/densenet/densenet121.py b/configs/_base_/models/densenet/densenet121.py new file mode 100644 index 0000000..0a14d30 --- /dev/null +++ b/configs/_base_/models/densenet/densenet121.py @@ -0,0 +1,11 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='DenseNet', arch='121'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/densenet/densenet161.py b/configs/_base_/models/densenet/densenet161.py new file mode 100644 index 0000000..61a0d83 --- /dev/null +++ b/configs/_base_/models/densenet/densenet161.py @@ -0,0 +1,11 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='DenseNet', arch='161'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2208, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/densenet/densenet169.py b/configs/_base_/models/densenet/densenet169.py new file mode 100644 index 0000000..779ea17 --- /dev/null +++ b/configs/_base_/models/densenet/densenet169.py @@ -0,0 +1,11 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='DenseNet', arch='169'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1664, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/densenet/densenet201.py b/configs/_base_/models/densenet/densenet201.py new file mode 100644 index 0000000..2909af0 --- /dev/null +++ b/configs/_base_/models/densenet/densenet201.py @@ -0,0 +1,11 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='DenseNet', arch='201'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1920, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/edgenext/edgenext-base.py b/configs/_base_/models/edgenext/edgenext-base.py new file mode 100644 index 0000000..3783972 --- /dev/null +++ b/configs/_base_/models/edgenext/edgenext-base.py @@ -0,0 +1,23 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='EdgeNeXt', + arch='base', + out_indices=(3, ), + drop_path_rate=0.1, + gap_before_final_norm=True, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['LayerNorm'], val=1., bias=0.), + ]), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=584, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/edgenext/edgenext-small.py b/configs/_base_/models/edgenext/edgenext-small.py new file mode 100644 index 0000000..e1f7e17 --- /dev/null +++ b/configs/_base_/models/edgenext/edgenext-small.py @@ -0,0 +1,23 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='EdgeNeXt', + arch='small', + out_indices=(3, ), + drop_path_rate=0.1, + gap_before_final_norm=True, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['LayerNorm'], val=1., bias=0.), + ]), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=304, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/edgenext/edgenext-xsmall.py b/configs/_base_/models/edgenext/edgenext-xsmall.py new file mode 100644 index 0000000..69c7d0d --- /dev/null +++ b/configs/_base_/models/edgenext/edgenext-xsmall.py @@ -0,0 +1,23 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='EdgeNeXt', + arch='xsmall', + out_indices=(3, ), + drop_path_rate=0.1, + gap_before_final_norm=True, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['LayerNorm'], val=1., bias=0.), + ]), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=192, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/edgenext/edgenext-xxsmall.py b/configs/_base_/models/edgenext/edgenext-xxsmall.py new file mode 100644 index 0000000..fb68819 --- /dev/null +++ b/configs/_base_/models/edgenext/edgenext-xxsmall.py @@ -0,0 +1,23 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='EdgeNeXt', + arch='xxsmall', + out_indices=(3, ), + drop_path_rate=0.1, + gap_before_final_norm=True, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['LayerNorm'], val=1., bias=0.), + ]), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=168, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/efficientformer-l1.py b/configs/_base_/models/efficientformer-l1.py new file mode 100644 index 0000000..37dc62c --- /dev/null +++ b/configs/_base_/models/efficientformer-l1.py @@ -0,0 +1,18 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='EfficientFormer', + arch='l1', + drop_path_rate=0, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.), + dict(type='Constant', layer=['LayerScale'], val=1e-5) + ]), + neck=dict(type='GlobalAveragePooling', dim=1), + head=dict( + type='EfficientFormerClsHead', in_channels=448, num_classes=1000)) diff --git a/configs/_base_/models/efficientnet_b0.py b/configs/_base_/models/efficientnet_b0.py new file mode 100644 index 0000000..d9ba685 --- /dev/null +++ b/configs/_base_/models/efficientnet_b0.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='EfficientNet', arch='b0'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1280, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/efficientnet_b1.py b/configs/_base_/models/efficientnet_b1.py new file mode 100644 index 0000000..63e15c8 --- /dev/null +++ b/configs/_base_/models/efficientnet_b1.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='EfficientNet', arch='b1'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1280, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/efficientnet_b2.py b/configs/_base_/models/efficientnet_b2.py new file mode 100644 index 0000000..5edcfa5 --- /dev/null +++ b/configs/_base_/models/efficientnet_b2.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='EfficientNet', arch='b2'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1408, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/efficientnet_b3.py b/configs/_base_/models/efficientnet_b3.py new file mode 100644 index 0000000..c7c6d6d --- /dev/null +++ b/configs/_base_/models/efficientnet_b3.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='EfficientNet', arch='b3'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1536, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/efficientnet_b4.py b/configs/_base_/models/efficientnet_b4.py new file mode 100644 index 0000000..06840ed --- /dev/null +++ b/configs/_base_/models/efficientnet_b4.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='EfficientNet', arch='b4'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1792, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/efficientnet_b5.py b/configs/_base_/models/efficientnet_b5.py new file mode 100644 index 0000000..a86eebd --- /dev/null +++ b/configs/_base_/models/efficientnet_b5.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='EfficientNet', arch='b5'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/efficientnet_b6.py b/configs/_base_/models/efficientnet_b6.py new file mode 100644 index 0000000..4eada1d --- /dev/null +++ b/configs/_base_/models/efficientnet_b6.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='EfficientNet', arch='b6'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2304, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/efficientnet_b7.py b/configs/_base_/models/efficientnet_b7.py new file mode 100644 index 0000000..1d84ba4 --- /dev/null +++ b/configs/_base_/models/efficientnet_b7.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='EfficientNet', arch='b7'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2560, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/efficientnet_b8.py b/configs/_base_/models/efficientnet_b8.py new file mode 100644 index 0000000..c950064 --- /dev/null +++ b/configs/_base_/models/efficientnet_b8.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='EfficientNet', arch='b8'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2816, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/efficientnet_em.py b/configs/_base_/models/efficientnet_em.py new file mode 100644 index 0000000..abecdbe --- /dev/null +++ b/configs/_base_/models/efficientnet_em.py @@ -0,0 +1,13 @@ +# model settings +model = dict( + type='ImageClassifier', + # `em` means EfficientNet-EdgeTPU-M arch + backbone=dict(type='EfficientNet', arch='em', act_cfg=dict(type='ReLU')), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1280, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/efficientnet_es.py b/configs/_base_/models/efficientnet_es.py new file mode 100644 index 0000000..911ba4a --- /dev/null +++ b/configs/_base_/models/efficientnet_es.py @@ -0,0 +1,13 @@ +# model settings +model = dict( + type='ImageClassifier', + # `es` means EfficientNet-EdgeTPU-S arch + backbone=dict(type='EfficientNet', arch='es', act_cfg=dict(type='ReLU')), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1280, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/efficientnet_l2.py b/configs/_base_/models/efficientnet_l2.py new file mode 100644 index 0000000..4219c87 --- /dev/null +++ b/configs/_base_/models/efficientnet_l2.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='EfficientNet', arch='l2'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=5504, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/efficientnet_v2/efficientnetv2_b0.py b/configs/_base_/models/efficientnet_v2/efficientnetv2_b0.py new file mode 100644 index 0000000..d42e329 --- /dev/null +++ b/configs/_base_/models/efficientnet_v2/efficientnetv2_b0.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='EfficientNetV2', arch='b0'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1280, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/efficientnet_v2/efficientnetv2_b1.py b/configs/_base_/models/efficientnet_v2/efficientnetv2_b1.py new file mode 100644 index 0000000..10736fc --- /dev/null +++ b/configs/_base_/models/efficientnet_v2/efficientnetv2_b1.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='EfficientNetV2', arch='b1'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1280, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/efficientnet_v2/efficientnetv2_b2.py b/configs/_base_/models/efficientnet_v2/efficientnetv2_b2.py new file mode 100644 index 0000000..61f4771 --- /dev/null +++ b/configs/_base_/models/efficientnet_v2/efficientnetv2_b2.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='EfficientNetV2', arch='b2'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1408, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/efficientnet_v2/efficientnetv2_b3.py b/configs/_base_/models/efficientnet_v2/efficientnetv2_b3.py new file mode 100644 index 0000000..14e523f --- /dev/null +++ b/configs/_base_/models/efficientnet_v2/efficientnetv2_b3.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='EfficientNetV2', arch='b3'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1536, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/efficientnet_v2/efficientnetv2_l.py b/configs/_base_/models/efficientnet_v2/efficientnetv2_l.py new file mode 100644 index 0000000..456467d --- /dev/null +++ b/configs/_base_/models/efficientnet_v2/efficientnetv2_l.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='EfficientNetV2', arch='l'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1280, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/efficientnet_v2/efficientnetv2_m.py b/configs/_base_/models/efficientnet_v2/efficientnetv2_m.py new file mode 100644 index 0000000..8e4d303 --- /dev/null +++ b/configs/_base_/models/efficientnet_v2/efficientnetv2_m.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='EfficientNetV2', arch='m'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1280, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/efficientnet_v2/efficientnetv2_s.py b/configs/_base_/models/efficientnet_v2/efficientnetv2_s.py new file mode 100644 index 0000000..8666482 --- /dev/null +++ b/configs/_base_/models/efficientnet_v2/efficientnetv2_s.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='EfficientNetV2', arch='s'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1280, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/efficientnet_v2/efficientnetv2_xl.py b/configs/_base_/models/efficientnet_v2/efficientnetv2_xl.py new file mode 100644 index 0000000..2216c9d --- /dev/null +++ b/configs/_base_/models/efficientnet_v2/efficientnetv2_xl.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='EfficientNetV2', arch='xl'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1280, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/eva/eva-g.py b/configs/_base_/models/eva/eva-g.py new file mode 100644 index 0000000..17bc84a --- /dev/null +++ b/configs/_base_/models/eva/eva-g.py @@ -0,0 +1,29 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='BEiTViT', + arch='eva-g', + img_size=224, + patch_size=14, + layer_scale_init_value=0.0, + out_type='avg_featmap', + use_abs_pos_emb=True, + use_rel_pos_bias=False, + use_shared_rel_pos_bias=False, + ), + neck=None, + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1408, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/_base_/models/eva/eva-l.py b/configs/_base_/models/eva/eva-l.py new file mode 100644 index 0000000..9b08e4b --- /dev/null +++ b/configs/_base_/models/eva/eva-l.py @@ -0,0 +1,30 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='BEiTViT', + arch='l', + img_size=224, + patch_size=14, + layer_scale_init_value=0.0, + out_type='avg_featmap', + use_abs_pos_emb=True, + use_rel_pos_bias=False, + use_shared_rel_pos_bias=False, + layer_cfgs=dict(bias=True), + ), + neck=None, + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/_base_/models/hivit/base_224.py b/configs/_base_/models/hivit/base_224.py new file mode 100644 index 0000000..a87a68c --- /dev/null +++ b/configs/_base_/models/hivit/base_224.py @@ -0,0 +1,28 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='HiViT', + arch='base', + img_size=224, + ape=True, + rpe=True, + drop_path_rate=0.5), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) diff --git a/configs/_base_/models/hivit/small_224.py b/configs/_base_/models/hivit/small_224.py new file mode 100644 index 0000000..333b246 --- /dev/null +++ b/configs/_base_/models/hivit/small_224.py @@ -0,0 +1,28 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='HiViT', + arch='small', + img_size=224, + ape=True, + rpe=True, + drop_path_rate=0.3), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=384, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) diff --git a/configs/_base_/models/hivit/tiny_224.py b/configs/_base_/models/hivit/tiny_224.py new file mode 100644 index 0000000..b3e2fdb --- /dev/null +++ b/configs/_base_/models/hivit/tiny_224.py @@ -0,0 +1,28 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='HiViT', + arch='tiny', + img_size=224, + ape=True, + rpe=True, + drop_path_rate=0.05), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=384, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) diff --git a/configs/_base_/models/hornet/hornet-base-gf.py b/configs/_base_/models/hornet/hornet-base-gf.py new file mode 100644 index 0000000..b6924f9 --- /dev/null +++ b/configs/_base_/models/hornet/hornet-base-gf.py @@ -0,0 +1,20 @@ +model = dict( + type='ImageClassifier', + backbone=dict(type='HorNet', arch='base-gf', drop_path_rate=0.5), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + dict(type='Constant', layer=['LayerScale'], val=1e-6) + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/_base_/models/hornet/hornet-base.py b/configs/_base_/models/hornet/hornet-base.py new file mode 100644 index 0000000..904379a --- /dev/null +++ b/configs/_base_/models/hornet/hornet-base.py @@ -0,0 +1,21 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='HorNet', arch='base', drop_path_rate=0.5), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + dict(type='Constant', layer=['LayerScale'], val=1e-6) + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/_base_/models/hornet/hornet-large-gf.py b/configs/_base_/models/hornet/hornet-large-gf.py new file mode 100644 index 0000000..1607ba2 --- /dev/null +++ b/configs/_base_/models/hornet/hornet-large-gf.py @@ -0,0 +1,21 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='HorNet', arch='large-gf', drop_path_rate=0.2), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1536, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + dict(type='Constant', layer=['LayerScale'], val=1e-6) + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/_base_/models/hornet/hornet-large-gf384.py b/configs/_base_/models/hornet/hornet-large-gf384.py new file mode 100644 index 0000000..fbb5478 --- /dev/null +++ b/configs/_base_/models/hornet/hornet-large-gf384.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='HorNet', arch='large-gf384', drop_path_rate=0.4), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1536, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + dict(type='Constant', layer=['LayerScale'], val=1e-6) + ]) diff --git a/configs/_base_/models/hornet/hornet-large.py b/configs/_base_/models/hornet/hornet-large.py new file mode 100644 index 0000000..b5494fd --- /dev/null +++ b/configs/_base_/models/hornet/hornet-large.py @@ -0,0 +1,21 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='HorNet', arch='large', drop_path_rate=0.2), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1536, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + dict(type='Constant', layer=['LayerScale'], val=1e-6) + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/_base_/models/hornet/hornet-small-gf.py b/configs/_base_/models/hornet/hornet-small-gf.py new file mode 100644 index 0000000..42e26d3 --- /dev/null +++ b/configs/_base_/models/hornet/hornet-small-gf.py @@ -0,0 +1,21 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='HorNet', arch='small-gf', drop_path_rate=0.4), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + dict(type='Constant', layer=['LayerScale'], val=1e-6) + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/_base_/models/hornet/hornet-small.py b/configs/_base_/models/hornet/hornet-small.py new file mode 100644 index 0000000..d59184d --- /dev/null +++ b/configs/_base_/models/hornet/hornet-small.py @@ -0,0 +1,21 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='HorNet', arch='small', drop_path_rate=0.4), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + dict(type='Constant', layer=['LayerScale'], val=1e-6) + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/_base_/models/hornet/hornet-tiny-gf.py b/configs/_base_/models/hornet/hornet-tiny-gf.py new file mode 100644 index 0000000..6b06f5b --- /dev/null +++ b/configs/_base_/models/hornet/hornet-tiny-gf.py @@ -0,0 +1,21 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='HorNet', arch='tiny-gf', drop_path_rate=0.2), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + dict(type='Constant', layer=['LayerScale'], val=1e-6) + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/_base_/models/hornet/hornet-tiny.py b/configs/_base_/models/hornet/hornet-tiny.py new file mode 100644 index 0000000..aed710e --- /dev/null +++ b/configs/_base_/models/hornet/hornet-tiny.py @@ -0,0 +1,21 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='HorNet', arch='tiny', drop_path_rate=0.2), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + dict(type='Constant', layer=['LayerScale'], val=1e-6) + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/_base_/models/hrnet/hrnet-w18.py b/configs/_base_/models/hrnet/hrnet-w18.py new file mode 100644 index 0000000..f7fbf29 --- /dev/null +++ b/configs/_base_/models/hrnet/hrnet-w18.py @@ -0,0 +1,15 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='HRNet', arch='w18'), + neck=[ + dict(type='HRFuseScales', in_channels=(18, 36, 72, 144)), + dict(type='GlobalAveragePooling'), + ], + head=dict( + type='LinearClsHead', + in_channels=2048, + num_classes=1000, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/hrnet/hrnet-w30.py b/configs/_base_/models/hrnet/hrnet-w30.py new file mode 100644 index 0000000..babcaca --- /dev/null +++ b/configs/_base_/models/hrnet/hrnet-w30.py @@ -0,0 +1,15 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='HRNet', arch='w30'), + neck=[ + dict(type='HRFuseScales', in_channels=(30, 60, 120, 240)), + dict(type='GlobalAveragePooling'), + ], + head=dict( + type='LinearClsHead', + in_channels=2048, + num_classes=1000, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/hrnet/hrnet-w32.py b/configs/_base_/models/hrnet/hrnet-w32.py new file mode 100644 index 0000000..2c1e980 --- /dev/null +++ b/configs/_base_/models/hrnet/hrnet-w32.py @@ -0,0 +1,15 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='HRNet', arch='w32'), + neck=[ + dict(type='HRFuseScales', in_channels=(32, 64, 128, 256)), + dict(type='GlobalAveragePooling'), + ], + head=dict( + type='LinearClsHead', + in_channels=2048, + num_classes=1000, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/hrnet/hrnet-w40.py b/configs/_base_/models/hrnet/hrnet-w40.py new file mode 100644 index 0000000..83f65d8 --- /dev/null +++ b/configs/_base_/models/hrnet/hrnet-w40.py @@ -0,0 +1,15 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='HRNet', arch='w40'), + neck=[ + dict(type='HRFuseScales', in_channels=(40, 80, 160, 320)), + dict(type='GlobalAveragePooling'), + ], + head=dict( + type='LinearClsHead', + in_channels=2048, + num_classes=1000, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/hrnet/hrnet-w44.py b/configs/_base_/models/hrnet/hrnet-w44.py new file mode 100644 index 0000000..e75dc0f --- /dev/null +++ b/configs/_base_/models/hrnet/hrnet-w44.py @@ -0,0 +1,15 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='HRNet', arch='w44'), + neck=[ + dict(type='HRFuseScales', in_channels=(44, 88, 176, 352)), + dict(type='GlobalAveragePooling'), + ], + head=dict( + type='LinearClsHead', + in_channels=2048, + num_classes=1000, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/hrnet/hrnet-w48.py b/configs/_base_/models/hrnet/hrnet-w48.py new file mode 100644 index 0000000..f060495 --- /dev/null +++ b/configs/_base_/models/hrnet/hrnet-w48.py @@ -0,0 +1,15 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='HRNet', arch='w48'), + neck=[ + dict(type='HRFuseScales', in_channels=(48, 96, 192, 384)), + dict(type='GlobalAveragePooling'), + ], + head=dict( + type='LinearClsHead', + in_channels=2048, + num_classes=1000, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/hrnet/hrnet-w64.py b/configs/_base_/models/hrnet/hrnet-w64.py new file mode 100644 index 0000000..844c3fe --- /dev/null +++ b/configs/_base_/models/hrnet/hrnet-w64.py @@ -0,0 +1,15 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='HRNet', arch='w64'), + neck=[ + dict(type='HRFuseScales', in_channels=(64, 128, 256, 512)), + dict(type='GlobalAveragePooling'), + ], + head=dict( + type='LinearClsHead', + in_channels=2048, + num_classes=1000, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/inception_v3.py b/configs/_base_/models/inception_v3.py new file mode 100644 index 0000000..3f6a830 --- /dev/null +++ b/configs/_base_/models/inception_v3.py @@ -0,0 +1,10 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='InceptionV3', num_classes=1000, aux_logits=False), + neck=None, + head=dict( + type='ClsHead', + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5)), +) diff --git a/configs/_base_/models/itpn_hivit-base-p16.py b/configs/_base_/models/itpn_hivit-base-p16.py new file mode 100644 index 0000000..834d6fe --- /dev/null +++ b/configs/_base_/models/itpn_hivit-base-p16.py @@ -0,0 +1,33 @@ +# model settings +model = dict( + type='iTPN', + backbone=dict( + type='iTPNHiViT', + arch='base', + reconstruction_type='pixel', + mask_ratio=0.75), + neck=dict( + type='iTPNPretrainDecoder', + num_patches=196, + patch_size=16, + in_chans=3, + embed_dim=512, + decoder_embed_dim=512, + decoder_depth=6, + decoder_num_heads=16, + mlp_ratio=4., + reconstruction_type='pixel', + # transformer pyramid + fpn_dim=256, + fpn_depth=2, + num_outs=3, + ), + head=dict( + type='MAEPretrainHead', + norm_pix=True, + patch_size=16, + loss=dict(type='PixelReconstructionLoss', criterion='L2')), + init_cfg=[ + dict(type='Xavier', layer='Linear', distribution='uniform'), + dict(type='Constant', layer='LayerNorm', val=1.0, bias=0.0) + ]) diff --git a/configs/_base_/models/levit-256-p16.py b/configs/_base_/models/levit-256-p16.py new file mode 100644 index 0000000..936305b --- /dev/null +++ b/configs/_base_/models/levit-256-p16.py @@ -0,0 +1,26 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='LeViT', + arch='256', + img_size=224, + patch_size=16, + drop_path_rate=0, + attn_ratio=2, + mlp_ratio=2, + out_indices=(2, )), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LeViTClsHead', + num_classes=1000, + in_channels=512, + distillation=True, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, loss_weight=1.0), + topk=(1, 5), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ])) diff --git a/configs/_base_/models/mae_hivit-base-p16.py b/configs/_base_/models/mae_hivit-base-p16.py new file mode 100644 index 0000000..bac073c --- /dev/null +++ b/configs/_base_/models/mae_hivit-base-p16.py @@ -0,0 +1,24 @@ +# model settings +model = dict( + type='MAE', + backbone=dict( + type='MAEHiViT', patch_size=16, arch='base', mask_ratio=0.75), + neck=dict( + type='MAEPretrainDecoder', + patch_size=16, + in_chans=3, + embed_dim=512, + decoder_embed_dim=512, + decoder_depth=6, + decoder_num_heads=16, + mlp_ratio=4., + ), + head=dict( + type='MAEPretrainHead', + norm_pix=True, + patch_size=16, + loss=dict(type='PixelReconstructionLoss', criterion='L2')), + init_cfg=[ + dict(type='Xavier', layer='Linear', distribution='uniform'), + dict(type='Constant', layer='LayerNorm', val=1.0, bias=0.0) + ]) diff --git a/configs/_base_/models/mae_vit-base-p16.py b/configs/_base_/models/mae_vit-base-p16.py new file mode 100644 index 0000000..8cde8cb --- /dev/null +++ b/configs/_base_/models/mae_vit-base-p16.py @@ -0,0 +1,23 @@ +# model settings +model = dict( + type='MAE', + backbone=dict(type='MAEViT', arch='b', patch_size=16, mask_ratio=0.75), + neck=dict( + type='MAEPretrainDecoder', + patch_size=16, + in_chans=3, + embed_dim=768, + decoder_embed_dim=512, + decoder_depth=8, + decoder_num_heads=16, + mlp_ratio=4., + ), + head=dict( + type='MAEPretrainHead', + norm_pix=True, + patch_size=16, + loss=dict(type='PixelReconstructionLoss', criterion='L2')), + init_cfg=[ + dict(type='Xavier', layer='Linear', distribution='uniform'), + dict(type='Constant', layer='LayerNorm', val=1.0, bias=0.0) + ]) diff --git a/configs/_base_/models/mixmim/mixmim_base.py b/configs/_base_/models/mixmim/mixmim_base.py new file mode 100644 index 0000000..ccde357 --- /dev/null +++ b/configs/_base_/models/mixmim/mixmim_base.py @@ -0,0 +1,20 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='MixMIMTransformer', arch='B', drop_rate=0.0, drop_path_rate=0.1), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + init_cfg=None, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/_base_/models/mlp_mixer_base_patch16.py b/configs/_base_/models/mlp_mixer_base_patch16.py new file mode 100644 index 0000000..5ebd17f --- /dev/null +++ b/configs/_base_/models/mlp_mixer_base_patch16.py @@ -0,0 +1,25 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='MlpMixer', + arch='b', + img_size=224, + patch_size=16, + drop_rate=0.1, + init_cfg=[ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ]), + neck=dict(type='GlobalAveragePooling', dim=1), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + ), +) diff --git a/configs/_base_/models/mlp_mixer_large_patch16.py b/configs/_base_/models/mlp_mixer_large_patch16.py new file mode 100644 index 0000000..ff10713 --- /dev/null +++ b/configs/_base_/models/mlp_mixer_large_patch16.py @@ -0,0 +1,25 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='MlpMixer', + arch='l', + img_size=224, + patch_size=16, + drop_rate=0.1, + init_cfg=[ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ]), + neck=dict(type='GlobalAveragePooling', dim=1), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + ), +) diff --git a/configs/_base_/models/mobilenet_v2_1x.py b/configs/_base_/models/mobilenet_v2_1x.py new file mode 100644 index 0000000..6ebff1e --- /dev/null +++ b/configs/_base_/models/mobilenet_v2_1x.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='MobileNetV2', widen_factor=1.0), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1280, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/mobilenet_v3/mobilenet_v3_large_imagenet.py b/configs/_base_/models/mobilenet_v3/mobilenet_v3_large_imagenet.py new file mode 100644 index 0000000..5318f50 --- /dev/null +++ b/configs/_base_/models/mobilenet_v3/mobilenet_v3_large_imagenet.py @@ -0,0 +1,16 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='MobileNetV3', arch='large'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='StackedLinearClsHead', + num_classes=1000, + in_channels=960, + mid_channels=[1280], + dropout_rate=0.2, + act_cfg=dict(type='HSwish'), + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + init_cfg=dict( + type='Normal', layer='Linear', mean=0., std=0.01, bias=0.), + topk=(1, 5))) diff --git a/configs/_base_/models/mobilenet_v3/mobilenet_v3_small_050_imagenet.py b/configs/_base_/models/mobilenet_v3/mobilenet_v3_small_050_imagenet.py new file mode 100644 index 0000000..6356efc --- /dev/null +++ b/configs/_base_/models/mobilenet_v3/mobilenet_v3_small_050_imagenet.py @@ -0,0 +1,16 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='MobileNetV3', arch='small_050'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='StackedLinearClsHead', + num_classes=1000, + in_channels=288, + mid_channels=[1024], + dropout_rate=0.2, + act_cfg=dict(type='HSwish'), + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + init_cfg=dict( + type='Normal', layer='Linear', mean=0., std=0.01, bias=0.), + topk=(1, 5))) diff --git a/configs/_base_/models/mobilenet_v3/mobilenet_v3_small_075_imagenet.py b/configs/_base_/models/mobilenet_v3/mobilenet_v3_small_075_imagenet.py new file mode 100644 index 0000000..19391ec --- /dev/null +++ b/configs/_base_/models/mobilenet_v3/mobilenet_v3_small_075_imagenet.py @@ -0,0 +1,16 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='MobileNetV3', arch='small_075'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='StackedLinearClsHead', + num_classes=1000, + in_channels=432, + mid_channels=[1024], + dropout_rate=0.2, + act_cfg=dict(type='HSwish'), + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + init_cfg=dict( + type='Normal', layer='Linear', mean=0., std=0.01, bias=0.), + topk=(1, 5))) diff --git a/configs/_base_/models/mobilenet_v3/mobilenet_v3_small_cifar.py b/configs/_base_/models/mobilenet_v3/mobilenet_v3_small_cifar.py new file mode 100644 index 0000000..5dbe980 --- /dev/null +++ b/configs/_base_/models/mobilenet_v3/mobilenet_v3_small_cifar.py @@ -0,0 +1,13 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='MobileNetV3', arch='small'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='StackedLinearClsHead', + num_classes=10, + in_channels=576, + mid_channels=[1280], + act_cfg=dict(type='HSwish'), + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5))) diff --git a/configs/_base_/models/mobilenet_v3/mobilenet_v3_small_imagenet.py b/configs/_base_/models/mobilenet_v3/mobilenet_v3_small_imagenet.py new file mode 100644 index 0000000..af6cc1b --- /dev/null +++ b/configs/_base_/models/mobilenet_v3/mobilenet_v3_small_imagenet.py @@ -0,0 +1,16 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='MobileNetV3', arch='small'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='StackedLinearClsHead', + num_classes=1000, + in_channels=576, + mid_channels=[1024], + dropout_rate=0.2, + act_cfg=dict(type='HSwish'), + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + init_cfg=dict( + type='Normal', layer='Linear', mean=0., std=0.01, bias=0.), + topk=(1, 5))) diff --git a/configs/_base_/models/mobileone/mobileone_s0.py b/configs/_base_/models/mobileone/mobileone_s0.py new file mode 100644 index 0000000..39624e5 --- /dev/null +++ b/configs/_base_/models/mobileone/mobileone_s0.py @@ -0,0 +1,19 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='MobileOne', + arch='s0', + out_indices=(3, ), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + loss=dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='original', + ), + topk=(1, 5), + )) diff --git a/configs/_base_/models/mobileone/mobileone_s1.py b/configs/_base_/models/mobileone/mobileone_s1.py new file mode 100644 index 0000000..cea7762 --- /dev/null +++ b/configs/_base_/models/mobileone/mobileone_s1.py @@ -0,0 +1,19 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='MobileOne', + arch='s1', + out_indices=(3, ), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1280, + loss=dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='original', + ), + topk=(1, 5), + )) diff --git a/configs/_base_/models/mobileone/mobileone_s2.py b/configs/_base_/models/mobileone/mobileone_s2.py new file mode 100644 index 0000000..dfae0e1 --- /dev/null +++ b/configs/_base_/models/mobileone/mobileone_s2.py @@ -0,0 +1,19 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='MobileOne', + arch='s2', + out_indices=(3, ), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='original', + ), + topk=(1, 5), + )) diff --git a/configs/_base_/models/mobileone/mobileone_s3.py b/configs/_base_/models/mobileone/mobileone_s3.py new file mode 100644 index 0000000..8135675 --- /dev/null +++ b/configs/_base_/models/mobileone/mobileone_s3.py @@ -0,0 +1,19 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='MobileOne', + arch='s3', + out_indices=(3, ), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='original', + ), + topk=(1, 5), + )) diff --git a/configs/_base_/models/mobileone/mobileone_s4.py b/configs/_base_/models/mobileone/mobileone_s4.py new file mode 100644 index 0000000..282eec8 --- /dev/null +++ b/configs/_base_/models/mobileone/mobileone_s4.py @@ -0,0 +1,19 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='MobileOne', + arch='s4', + out_indices=(3, ), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='original', + ), + topk=(1, 5), + )) diff --git a/configs/_base_/models/mobilevit/mobilevit_s.py b/configs/_base_/models/mobilevit/mobilevit_s.py new file mode 100644 index 0000000..f6a4e05 --- /dev/null +++ b/configs/_base_/models/mobilevit/mobilevit_s.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='MobileViT', arch='small'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=640, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/mobilevit/mobilevit_xs.py b/configs/_base_/models/mobilevit/mobilevit_xs.py new file mode 100644 index 0000000..f8c6ef0 --- /dev/null +++ b/configs/_base_/models/mobilevit/mobilevit_xs.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='MobileViT', arch='x_small'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=384, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/mobilevit/mobilevit_xxs.py b/configs/_base_/models/mobilevit/mobilevit_xxs.py new file mode 100644 index 0000000..e1c26e6 --- /dev/null +++ b/configs/_base_/models/mobilevit/mobilevit_xxs.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='MobileViT', arch='xx_small'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=320, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/mvit/mvitv2-base.py b/configs/_base_/models/mvit/mvitv2-base.py new file mode 100644 index 0000000..0cb6064 --- /dev/null +++ b/configs/_base_/models/mvit/mvitv2-base.py @@ -0,0 +1,19 @@ +model = dict( + type='ImageClassifier', + backbone=dict(type='MViT', arch='base', drop_path_rate=0.3), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + in_channels=768, + num_classes=1000, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/_base_/models/mvit/mvitv2-large.py b/configs/_base_/models/mvit/mvitv2-large.py new file mode 100644 index 0000000..2c84424 --- /dev/null +++ b/configs/_base_/models/mvit/mvitv2-large.py @@ -0,0 +1,23 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='MViT', + arch='large', + drop_path_rate=0.5, + dim_mul_in_attention=False), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + in_channels=1152, + num_classes=1000, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/_base_/models/mvit/mvitv2-small.py b/configs/_base_/models/mvit/mvitv2-small.py new file mode 100644 index 0000000..df895f2 --- /dev/null +++ b/configs/_base_/models/mvit/mvitv2-small.py @@ -0,0 +1,19 @@ +model = dict( + type='ImageClassifier', + backbone=dict(type='MViT', arch='small', drop_path_rate=0.1), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + in_channels=768, + num_classes=1000, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/_base_/models/mvit/mvitv2-tiny.py b/configs/_base_/models/mvit/mvitv2-tiny.py new file mode 100644 index 0000000..836f04b --- /dev/null +++ b/configs/_base_/models/mvit/mvitv2-tiny.py @@ -0,0 +1,19 @@ +model = dict( + type='ImageClassifier', + backbone=dict(type='MViT', arch='tiny', drop_path_rate=0.1), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + in_channels=768, + num_classes=1000, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/_base_/models/poolformer/poolformer_m36.py b/configs/_base_/models/poolformer/poolformer_m36.py new file mode 100644 index 0000000..276a721 --- /dev/null +++ b/configs/_base_/models/poolformer/poolformer_m36.py @@ -0,0 +1,22 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='PoolFormer', + arch='m36', + drop_path_rate=0.1, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.), + ]), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/poolformer/poolformer_m48.py b/configs/_base_/models/poolformer/poolformer_m48.py new file mode 100644 index 0000000..8c006ac --- /dev/null +++ b/configs/_base_/models/poolformer/poolformer_m48.py @@ -0,0 +1,22 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='PoolFormer', + arch='m48', + drop_path_rate=0.1, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.), + ]), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/poolformer/poolformer_s12.py b/configs/_base_/models/poolformer/poolformer_s12.py new file mode 100644 index 0000000..b7b3600 --- /dev/null +++ b/configs/_base_/models/poolformer/poolformer_s12.py @@ -0,0 +1,22 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='PoolFormer', + arch='s12', + drop_path_rate=0.1, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.), + ]), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/poolformer/poolformer_s24.py b/configs/_base_/models/poolformer/poolformer_s24.py new file mode 100644 index 0000000..822ab5b --- /dev/null +++ b/configs/_base_/models/poolformer/poolformer_s24.py @@ -0,0 +1,22 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='PoolFormer', + arch='s24', + drop_path_rate=0.1, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.), + ]), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/poolformer/poolformer_s36.py b/configs/_base_/models/poolformer/poolformer_s36.py new file mode 100644 index 0000000..489f222 --- /dev/null +++ b/configs/_base_/models/poolformer/poolformer_s36.py @@ -0,0 +1,22 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='PoolFormer', + arch='s36', + drop_path_rate=0.1, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.), + ]), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/regnet/regnetx_1.6gf.py b/configs/_base_/models/regnet/regnetx_1.6gf.py new file mode 100644 index 0000000..b81f0ad --- /dev/null +++ b/configs/_base_/models/regnet/regnetx_1.6gf.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='RegNet', arch='regnetx_1.6gf'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=912, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/regnet/regnetx_12gf.py b/configs/_base_/models/regnet/regnetx_12gf.py new file mode 100644 index 0000000..383d4f8 --- /dev/null +++ b/configs/_base_/models/regnet/regnetx_12gf.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='RegNet', arch='regnetx_12gf'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2240, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/regnet/regnetx_3.2gf.py b/configs/_base_/models/regnet/regnetx_3.2gf.py new file mode 100644 index 0000000..67d4541 --- /dev/null +++ b/configs/_base_/models/regnet/regnetx_3.2gf.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='RegNet', arch='regnetx_3.2gf'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1008, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/regnet/regnetx_4.0gf.py b/configs/_base_/models/regnet/regnetx_4.0gf.py new file mode 100644 index 0000000..01419c6 --- /dev/null +++ b/configs/_base_/models/regnet/regnetx_4.0gf.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='RegNet', arch='regnetx_4.0gf'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1360, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/regnet/regnetx_400mf.py b/configs/_base_/models/regnet/regnetx_400mf.py new file mode 100644 index 0000000..ef518b9 --- /dev/null +++ b/configs/_base_/models/regnet/regnetx_400mf.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='RegNet', arch='regnetx_400mf'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=384, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/regnet/regnetx_6.4gf.py b/configs/_base_/models/regnet/regnetx_6.4gf.py new file mode 100644 index 0000000..44e6222 --- /dev/null +++ b/configs/_base_/models/regnet/regnetx_6.4gf.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='RegNet', arch='regnetx_6.4gf'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1624, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/regnet/regnetx_8.0gf.py b/configs/_base_/models/regnet/regnetx_8.0gf.py new file mode 100644 index 0000000..2929826 --- /dev/null +++ b/configs/_base_/models/regnet/regnetx_8.0gf.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='RegNet', arch='regnetx_8.0gf'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1920, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/regnet/regnetx_800mf.py b/configs/_base_/models/regnet/regnetx_800mf.py new file mode 100644 index 0000000..210f760 --- /dev/null +++ b/configs/_base_/models/regnet/regnetx_800mf.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='RegNet', arch='regnetx_800mf'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=672, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/replknet-31B_in1k.py b/configs/_base_/models/replknet-31B_in1k.py new file mode 100644 index 0000000..0cc5095 --- /dev/null +++ b/configs/_base_/models/replknet-31B_in1k.py @@ -0,0 +1,25 @@ +from mmpretrain.models import build_classifier + +model = dict( + type='ImageClassifier', + backbone=dict( + type='RepLKNet', + arch='31B', + out_indices=(3, ), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) + +if __name__ == '__main__': + # model.pop('type') + model = build_classifier(model) + model.eval() + print('------------------- training-time model -------------') + for i in model.state_dict().keys(): + print(i) diff --git a/configs/_base_/models/replknet-31L_in1k.py b/configs/_base_/models/replknet-31L_in1k.py new file mode 100644 index 0000000..7830fb0 --- /dev/null +++ b/configs/_base_/models/replknet-31L_in1k.py @@ -0,0 +1,15 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='RepLKNet', + arch='31L', + out_indices=(3, ), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1536, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/replknet-XL_in1k.py b/configs/_base_/models/replknet-XL_in1k.py new file mode 100644 index 0000000..b63f345 --- /dev/null +++ b/configs/_base_/models/replknet-XL_in1k.py @@ -0,0 +1,15 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='RepLKNet', + arch='XL', + out_indices=(3, ), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/repmlp-base_224.py b/configs/_base_/models/repmlp-base_224.py new file mode 100644 index 0000000..7db0077 --- /dev/null +++ b/configs/_base_/models/repmlp-base_224.py @@ -0,0 +1,18 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='RepMLPNet', + arch='B', + img_size=224, + out_indices=(3, ), + reparam_conv_kernels=(1, 3), + deploy=False), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/repvgg-A0_in1k.py b/configs/_base_/models/repvgg-A0_in1k.py new file mode 100644 index 0000000..093ffb7 --- /dev/null +++ b/configs/_base_/models/repvgg-A0_in1k.py @@ -0,0 +1,15 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='RepVGG', + arch='A0', + out_indices=(3, ), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1280, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/repvgg-B3_lbs-mixup_in1k.py b/configs/_base_/models/repvgg-B3_lbs-mixup_in1k.py new file mode 100644 index 0000000..d88e687 --- /dev/null +++ b/configs/_base_/models/repvgg-B3_lbs-mixup_in1k.py @@ -0,0 +1,22 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='RepVGG', + arch='B3', + out_indices=(3, ), + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2560, + loss=dict( + type='LabelSmoothLoss', + loss_weight=1.0, + label_smooth_val=0.1, + mode='classy_vision', + num_classes=1000), + topk=(1, 5), + ), + train_cfg=dict(augments=dict(type='Mixup', alpha=0.2)), +) diff --git a/configs/_base_/models/res2net101-w26-s4.py b/configs/_base_/models/res2net101-w26-s4.py new file mode 100644 index 0000000..3bf64c5 --- /dev/null +++ b/configs/_base_/models/res2net101-w26-s4.py @@ -0,0 +1,18 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='Res2Net', + depth=101, + scales=4, + base_width=26, + deep_stem=False, + avg_down=False, + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/res2net50-w14-s8.py b/configs/_base_/models/res2net50-w14-s8.py new file mode 100644 index 0000000..5875142 --- /dev/null +++ b/configs/_base_/models/res2net50-w14-s8.py @@ -0,0 +1,18 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='Res2Net', + depth=50, + scales=8, + base_width=14, + deep_stem=False, + avg_down=False, + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/res2net50-w26-s4.py b/configs/_base_/models/res2net50-w26-s4.py new file mode 100644 index 0000000..be8fdb5 --- /dev/null +++ b/configs/_base_/models/res2net50-w26-s4.py @@ -0,0 +1,18 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='Res2Net', + depth=50, + scales=4, + base_width=26, + deep_stem=False, + avg_down=False, + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/res2net50-w26-s6.py b/configs/_base_/models/res2net50-w26-s6.py new file mode 100644 index 0000000..281b136 --- /dev/null +++ b/configs/_base_/models/res2net50-w26-s6.py @@ -0,0 +1,18 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='Res2Net', + depth=50, + scales=6, + base_width=26, + deep_stem=False, + avg_down=False, + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/res2net50-w26-s8.py b/configs/_base_/models/res2net50-w26-s8.py new file mode 100644 index 0000000..b4f62f3 --- /dev/null +++ b/configs/_base_/models/res2net50-w26-s8.py @@ -0,0 +1,18 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='Res2Net', + depth=50, + scales=8, + base_width=26, + deep_stem=False, + avg_down=False, + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/res2net50-w48-s2.py b/configs/_base_/models/res2net50-w48-s2.py new file mode 100644 index 0000000..8675c91 --- /dev/null +++ b/configs/_base_/models/res2net50-w48-s2.py @@ -0,0 +1,18 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='Res2Net', + depth=50, + scales=2, + base_width=48, + deep_stem=False, + avg_down=False, + ), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/resnest101.py b/configs/_base_/models/resnest101.py new file mode 100644 index 0000000..3780c15 --- /dev/null +++ b/configs/_base_/models/resnest101.py @@ -0,0 +1,25 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNeSt', + depth=101, + num_stages=4, + stem_channels=128, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + num_classes=1000, + reduction='mean', + loss_weight=1.0), + topk=(1, 5), + cal_acc=False), + train_cfg=dict(augments=dict(type='Mixup', alpha=0.2)), +) diff --git a/configs/_base_/models/resnest200.py b/configs/_base_/models/resnest200.py new file mode 100644 index 0000000..40d8f03 --- /dev/null +++ b/configs/_base_/models/resnest200.py @@ -0,0 +1,25 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNeSt', + depth=200, + num_stages=4, + stem_channels=128, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + num_classes=1000, + reduction='mean', + loss_weight=1.0), + topk=(1, 5), + cal_acc=False), + train_cfg=dict(augments=dict(type='Mixup', alpha=0.2)), +) diff --git a/configs/_base_/models/resnest269.py b/configs/_base_/models/resnest269.py new file mode 100644 index 0000000..c37626f --- /dev/null +++ b/configs/_base_/models/resnest269.py @@ -0,0 +1,25 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNeSt', + depth=269, + num_stages=4, + stem_channels=128, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + num_classes=1000, + reduction='mean', + loss_weight=1.0), + topk=(1, 5), + cal_acc=False), + train_cfg=dict(augments=dict(type='Mixup', alpha=0.2)), +) diff --git a/configs/_base_/models/resnest50.py b/configs/_base_/models/resnest50.py new file mode 100644 index 0000000..51c90e8 --- /dev/null +++ b/configs/_base_/models/resnest50.py @@ -0,0 +1,24 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNeSt', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + num_classes=1000, + reduction='mean', + loss_weight=1.0), + topk=(1, 5), + cal_acc=False), + train_cfg=dict(augments=dict(type='Mixup', alpha=0.2)), +) diff --git a/configs/_base_/models/resnet101.py b/configs/_base_/models/resnet101.py new file mode 100644 index 0000000..1147cd4 --- /dev/null +++ b/configs/_base_/models/resnet101.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/resnet101_cifar.py b/configs/_base_/models/resnet101_cifar.py new file mode 100644 index 0000000..a84d470 --- /dev/null +++ b/configs/_base_/models/resnet101_cifar.py @@ -0,0 +1,16 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet_CIFAR', + depth=101, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=10, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/resnet152.py b/configs/_base_/models/resnet152.py new file mode 100644 index 0000000..94a718c --- /dev/null +++ b/configs/_base_/models/resnet152.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=152, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/resnet152_cifar.py b/configs/_base_/models/resnet152_cifar.py new file mode 100644 index 0000000..55c0cc6 --- /dev/null +++ b/configs/_base_/models/resnet152_cifar.py @@ -0,0 +1,16 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet_CIFAR', + depth=152, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=10, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/resnet18.py b/configs/_base_/models/resnet18.py new file mode 100644 index 0000000..7c66758 --- /dev/null +++ b/configs/_base_/models/resnet18.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=18, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/resnet18_cifar.py b/configs/_base_/models/resnet18_cifar.py new file mode 100644 index 0000000..7b9cf1e --- /dev/null +++ b/configs/_base_/models/resnet18_cifar.py @@ -0,0 +1,16 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet_CIFAR', + depth=18, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=10, + in_channels=512, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/resnet34.py b/configs/_base_/models/resnet34.py new file mode 100644 index 0000000..100ee28 --- /dev/null +++ b/configs/_base_/models/resnet34.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=34, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/resnet34_cifar.py b/configs/_base_/models/resnet34_cifar.py new file mode 100644 index 0000000..55d033b --- /dev/null +++ b/configs/_base_/models/resnet34_cifar.py @@ -0,0 +1,16 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet_CIFAR', + depth=34, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=10, + in_channels=512, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/resnet34_gem.py b/configs/_base_/models/resnet34_gem.py new file mode 100644 index 0000000..5c0e0d3 --- /dev/null +++ b/configs/_base_/models/resnet34_gem.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=34, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GeneralizedMeanPooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/resnet50.py b/configs/_base_/models/resnet50.py new file mode 100644 index 0000000..129a2bb --- /dev/null +++ b/configs/_base_/models/resnet50.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/resnet50_cifar.py b/configs/_base_/models/resnet50_cifar.py new file mode 100644 index 0000000..33b66d5 --- /dev/null +++ b/configs/_base_/models/resnet50_cifar.py @@ -0,0 +1,16 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet_CIFAR', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=10, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/resnet50_cifar_cutmix.py b/configs/_base_/models/resnet50_cifar_cutmix.py new file mode 100644 index 0000000..73c38be --- /dev/null +++ b/configs/_base_/models/resnet50_cifar_cutmix.py @@ -0,0 +1,18 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet_CIFAR', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='MultiLabelLinearClsHead', + num_classes=10, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0, use_soft=True)), + train_cfg=dict( + augments=dict(type='BatchCutMix', alpha=1.0, num_classes=10, + prob=1.0))) diff --git a/configs/_base_/models/resnet50_cifar_mixup.py b/configs/_base_/models/resnet50_cifar_mixup.py new file mode 100644 index 0000000..f165c24 --- /dev/null +++ b/configs/_base_/models/resnet50_cifar_mixup.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet_CIFAR', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='MultiLabelLinearClsHead', + num_classes=10, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0, use_soft=True)), + train_cfg=dict(augments=dict(type='Mixup', alpha=1.)), +) diff --git a/configs/_base_/models/resnet50_cutmix.py b/configs/_base_/models/resnet50_cutmix.py new file mode 100644 index 0000000..fb79088 --- /dev/null +++ b/configs/_base_/models/resnet50_cutmix.py @@ -0,0 +1,18 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='MultiLabelLinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0, use_soft=True)), + train_cfg=dict( + augments=dict( + type='BatchCutMix', alpha=1.0, num_classes=1000, prob=1.0))) diff --git a/configs/_base_/models/resnet50_label_smooth.py b/configs/_base_/models/resnet50_label_smooth.py new file mode 100644 index 0000000..b6f7937 --- /dev/null +++ b/configs/_base_/models/resnet50_label_smooth.py @@ -0,0 +1,18 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/resnet50_mixup.py b/configs/_base_/models/resnet50_mixup.py new file mode 100644 index 0000000..23130a6 --- /dev/null +++ b/configs/_base_/models/resnet50_mixup.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='MultiLabelLinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0, use_soft=True)), + train_cfg=dict(augments=dict(type='Mixup', alpha=0.2)), +) diff --git a/configs/_base_/models/resnetv1c50.py b/configs/_base_/models/resnetv1c50.py new file mode 100644 index 0000000..3b973e2 --- /dev/null +++ b/configs/_base_/models/resnetv1c50.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/resnetv1d101.py b/configs/_base_/models/resnetv1d101.py new file mode 100644 index 0000000..1e56223 --- /dev/null +++ b/configs/_base_/models/resnetv1d101.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNetV1d', + depth=101, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/resnetv1d152.py b/configs/_base_/models/resnetv1d152.py new file mode 100644 index 0000000..58cc73b --- /dev/null +++ b/configs/_base_/models/resnetv1d152.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNetV1d', + depth=152, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/resnetv1d50.py b/configs/_base_/models/resnetv1d50.py new file mode 100644 index 0000000..015aaa3 --- /dev/null +++ b/configs/_base_/models/resnetv1d50.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNetV1d', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/resnext101_32x4d.py b/configs/_base_/models/resnext101_32x4d.py new file mode 100644 index 0000000..1c89fb6 --- /dev/null +++ b/configs/_base_/models/resnext101_32x4d.py @@ -0,0 +1,19 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNeXt', + depth=101, + num_stages=4, + out_indices=(3, ), + groups=32, + width_per_group=4, + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/resnext101_32x8d.py b/configs/_base_/models/resnext101_32x8d.py new file mode 100644 index 0000000..2bb63f3 --- /dev/null +++ b/configs/_base_/models/resnext101_32x8d.py @@ -0,0 +1,19 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNeXt', + depth=101, + num_stages=4, + out_indices=(3, ), + groups=32, + width_per_group=8, + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/resnext152_32x4d.py b/configs/_base_/models/resnext152_32x4d.py new file mode 100644 index 0000000..d392eff --- /dev/null +++ b/configs/_base_/models/resnext152_32x4d.py @@ -0,0 +1,19 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNeXt', + depth=152, + num_stages=4, + out_indices=(3, ), + groups=32, + width_per_group=4, + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/resnext50_32x4d.py b/configs/_base_/models/resnext50_32x4d.py new file mode 100644 index 0000000..0604262 --- /dev/null +++ b/configs/_base_/models/resnext50_32x4d.py @@ -0,0 +1,19 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNeXt', + depth=50, + num_stages=4, + out_indices=(3, ), + groups=32, + width_per_group=4, + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/revvit/revvit-base.py b/configs/_base_/models/revvit/revvit-base.py new file mode 100644 index 0000000..85b7af4 --- /dev/null +++ b/configs/_base_/models/revvit/revvit-base.py @@ -0,0 +1,27 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='RevVisionTransformer', + arch='deit-base', + img_size=224, + patch_size=16, + out_type='avg_featmap', + ), + neck=None, + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1536, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) diff --git a/configs/_base_/models/revvit/revvit-small.py b/configs/_base_/models/revvit/revvit-small.py new file mode 100644 index 0000000..dd1a0b2 --- /dev/null +++ b/configs/_base_/models/revvit/revvit-small.py @@ -0,0 +1,27 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='RevVisionTransformer', + arch='deit-small', + img_size=224, + patch_size=16, + out_type='avg_featmap', + ), + neck=None, + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) diff --git a/configs/_base_/models/seresnet101.py b/configs/_base_/models/seresnet101.py new file mode 100644 index 0000000..137a6f9 --- /dev/null +++ b/configs/_base_/models/seresnet101.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SEResNet', + depth=101, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/seresnet50.py b/configs/_base_/models/seresnet50.py new file mode 100644 index 0000000..e5f6bfc --- /dev/null +++ b/configs/_base_/models/seresnet50.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SEResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/seresnext101_32x4d.py b/configs/_base_/models/seresnext101_32x4d.py new file mode 100644 index 0000000..cc8a62c --- /dev/null +++ b/configs/_base_/models/seresnext101_32x4d.py @@ -0,0 +1,20 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SEResNeXt', + depth=101, + num_stages=4, + out_indices=(3, ), + groups=32, + width_per_group=4, + se_ratio=16, + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/seresnext50_32x4d.py b/configs/_base_/models/seresnext50_32x4d.py new file mode 100644 index 0000000..0cdf7cb --- /dev/null +++ b/configs/_base_/models/seresnext50_32x4d.py @@ -0,0 +1,20 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SEResNeXt', + depth=50, + num_stages=4, + out_indices=(3, ), + groups=32, + width_per_group=4, + se_ratio=16, + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/shufflenet_v1_1x.py b/configs/_base_/models/shufflenet_v1_1x.py new file mode 100644 index 0000000..f0f9d1f --- /dev/null +++ b/configs/_base_/models/shufflenet_v1_1x.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='ShuffleNetV1', groups=3), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=960, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/shufflenet_v2_1x.py b/configs/_base_/models/shufflenet_v2_1x.py new file mode 100644 index 0000000..190800e --- /dev/null +++ b/configs/_base_/models/shufflenet_v2_1x.py @@ -0,0 +1,12 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='ShuffleNetV2', widen_factor=1.0), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/swin_transformer/base_224.py b/configs/_base_/models/swin_transformer/base_224.py new file mode 100644 index 0000000..b7c277f --- /dev/null +++ b/configs/_base_/models/swin_transformer/base_224.py @@ -0,0 +1,23 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SwinTransformer', arch='base', img_size=224, drop_path_rate=0.5), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) diff --git a/configs/_base_/models/swin_transformer/base_384.py b/configs/_base_/models/swin_transformer/base_384.py new file mode 100644 index 0000000..ce78981 --- /dev/null +++ b/configs/_base_/models/swin_transformer/base_384.py @@ -0,0 +1,16 @@ +# model settings +# Only for evaluation +model = dict( + type='ImageClassifier', + backbone=dict( + type='SwinTransformer', + arch='base', + img_size=384, + stage_cfgs=dict(block_cfgs=dict(window_size=12))), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5))) diff --git a/configs/_base_/models/swin_transformer/large_224.py b/configs/_base_/models/swin_transformer/large_224.py new file mode 100644 index 0000000..747d00e --- /dev/null +++ b/configs/_base_/models/swin_transformer/large_224.py @@ -0,0 +1,12 @@ +# model settings +# Only for evaluation +model = dict( + type='ImageClassifier', + backbone=dict(type='SwinTransformer', arch='large', img_size=224), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1536, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5))) diff --git a/configs/_base_/models/swin_transformer/large_384.py b/configs/_base_/models/swin_transformer/large_384.py new file mode 100644 index 0000000..7026f81 --- /dev/null +++ b/configs/_base_/models/swin_transformer/large_384.py @@ -0,0 +1,16 @@ +# model settings +# Only for evaluation +model = dict( + type='ImageClassifier', + backbone=dict( + type='SwinTransformer', + arch='large', + img_size=384, + stage_cfgs=dict(block_cfgs=dict(window_size=12))), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1536, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5))) diff --git a/configs/_base_/models/swin_transformer/small_224.py b/configs/_base_/models/swin_transformer/small_224.py new file mode 100644 index 0000000..d87d9d9 --- /dev/null +++ b/configs/_base_/models/swin_transformer/small_224.py @@ -0,0 +1,24 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SwinTransformer', arch='small', img_size=224, + drop_path_rate=0.3), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) diff --git a/configs/_base_/models/swin_transformer/tiny_224.py b/configs/_base_/models/swin_transformer/tiny_224.py new file mode 100644 index 0000000..f1781cf --- /dev/null +++ b/configs/_base_/models/swin_transformer/tiny_224.py @@ -0,0 +1,23 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SwinTransformer', arch='tiny', img_size=224, drop_path_rate=0.2), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) diff --git a/configs/_base_/models/swin_transformer/tiny_base_224.py b/configs/_base_/models/swin_transformer/tiny_base_224.py new file mode 100644 index 0000000..e353b8c --- /dev/null +++ b/configs/_base_/models/swin_transformer/tiny_base_224.py @@ -0,0 +1,23 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SwinTransformer', arch='base', img_size=224, drop_path_rate=0.5), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=200, + in_channels=1024, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) diff --git a/configs/_base_/models/swin_transformer/tiny_large_224.py b/configs/_base_/models/swin_transformer/tiny_large_224.py new file mode 100644 index 0000000..c9e3f91 --- /dev/null +++ b/configs/_base_/models/swin_transformer/tiny_large_224.py @@ -0,0 +1,12 @@ +# model settings +# Only for evaluation +model = dict( + type='ImageClassifier', + backbone=dict(type='SwinTransformer', arch='large', img_size=224), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=200, + in_channels=1536, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5))) diff --git a/configs/_base_/models/swin_transformer_v2/base_256.py b/configs/_base_/models/swin_transformer_v2/base_256.py new file mode 100644 index 0000000..66594db --- /dev/null +++ b/configs/_base_/models/swin_transformer_v2/base_256.py @@ -0,0 +1,26 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SwinTransformerV2', + arch='base', + img_size=256, + drop_path_rate=0.5), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) diff --git a/configs/_base_/models/swin_transformer_v2/base_384.py b/configs/_base_/models/swin_transformer_v2/base_384.py new file mode 100644 index 0000000..5fb9aea --- /dev/null +++ b/configs/_base_/models/swin_transformer_v2/base_384.py @@ -0,0 +1,17 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SwinTransformerV2', + arch='base', + img_size=384, + drop_path_rate=0.2), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False)) diff --git a/configs/_base_/models/swin_transformer_v2/large_256.py b/configs/_base_/models/swin_transformer_v2/large_256.py new file mode 100644 index 0000000..fe557c3 --- /dev/null +++ b/configs/_base_/models/swin_transformer_v2/large_256.py @@ -0,0 +1,16 @@ +# model settings +# Only for evaluation +model = dict( + type='ImageClassifier', + backbone=dict( + type='SwinTransformerV2', + arch='large', + img_size=256, + drop_path_rate=0.2), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1536, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5))) diff --git a/configs/_base_/models/swin_transformer_v2/large_384.py b/configs/_base_/models/swin_transformer_v2/large_384.py new file mode 100644 index 0000000..a626c40 --- /dev/null +++ b/configs/_base_/models/swin_transformer_v2/large_384.py @@ -0,0 +1,16 @@ +# model settings +# Only for evaluation +model = dict( + type='ImageClassifier', + backbone=dict( + type='SwinTransformerV2', + arch='large', + img_size=384, + drop_path_rate=0.2), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1536, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5))) diff --git a/configs/_base_/models/swin_transformer_v2/small_256.py b/configs/_base_/models/swin_transformer_v2/small_256.py new file mode 100644 index 0000000..0ec706f --- /dev/null +++ b/configs/_base_/models/swin_transformer_v2/small_256.py @@ -0,0 +1,26 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SwinTransformerV2', + arch='small', + img_size=256, + drop_path_rate=0.3), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) diff --git a/configs/_base_/models/swin_transformer_v2/tiny_256.py b/configs/_base_/models/swin_transformer_v2/tiny_256.py new file mode 100644 index 0000000..61055a1 --- /dev/null +++ b/configs/_base_/models/swin_transformer_v2/tiny_256.py @@ -0,0 +1,26 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SwinTransformerV2', + arch='tiny', + img_size=256, + drop_path_rate=0.2), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) diff --git a/configs/_base_/models/t2t-vit-t-14.py b/configs/_base_/models/t2t-vit-t-14.py new file mode 100644 index 0000000..58ea660 --- /dev/null +++ b/configs/_base_/models/t2t-vit-t-14.py @@ -0,0 +1,42 @@ +# model settings +embed_dims = 384 +num_classes = 1000 + +model = dict( + type='ImageClassifier', + backbone=dict( + type='T2T_ViT', + img_size=224, + in_channels=3, + embed_dims=embed_dims, + t2t_cfg=dict( + token_dims=64, + use_performer=False, + ), + num_layers=14, + layer_cfgs=dict( + num_heads=6, + feedforward_channels=3 * embed_dims, # mlp_ratio = 3 + ), + drop_path_rate=0.1, + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ]), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=num_classes, + in_channels=embed_dims, + loss=dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='original', + ), + topk=(1, 5), + init_cfg=dict(type='TruncNormal', layer='Linear', std=.02)), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) diff --git a/configs/_base_/models/t2t-vit-t-19.py b/configs/_base_/models/t2t-vit-t-19.py new file mode 100644 index 0000000..51741c7 --- /dev/null +++ b/configs/_base_/models/t2t-vit-t-19.py @@ -0,0 +1,42 @@ +# model settings +embed_dims = 448 +num_classes = 1000 + +model = dict( + type='ImageClassifier', + backbone=dict( + type='T2T_ViT', + img_size=224, + in_channels=3, + embed_dims=embed_dims, + t2t_cfg=dict( + token_dims=64, + use_performer=False, + ), + num_layers=19, + layer_cfgs=dict( + num_heads=7, + feedforward_channels=3 * embed_dims, # mlp_ratio = 3 + ), + drop_path_rate=0.1, + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ]), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=num_classes, + in_channels=embed_dims, + loss=dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='original', + ), + topk=(1, 5), + init_cfg=dict(type='TruncNormal', layer='Linear', std=.02)), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) diff --git a/configs/_base_/models/t2t-vit-t-24.py b/configs/_base_/models/t2t-vit-t-24.py new file mode 100644 index 0000000..ad772cf --- /dev/null +++ b/configs/_base_/models/t2t-vit-t-24.py @@ -0,0 +1,42 @@ +# model settings +embed_dims = 512 +num_classes = 1000 + +model = dict( + type='ImageClassifier', + backbone=dict( + type='T2T_ViT', + img_size=224, + in_channels=3, + embed_dims=embed_dims, + t2t_cfg=dict( + token_dims=64, + use_performer=False, + ), + num_layers=24, + layer_cfgs=dict( + num_heads=8, + feedforward_channels=3 * embed_dims, # mlp_ratio = 3 + ), + drop_path_rate=0.1, + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ]), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=num_classes, + in_channels=embed_dims, + loss=dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='original', + ), + topk=(1, 5), + init_cfg=dict(type='TruncNormal', layer='Linear', std=.02)), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) diff --git a/configs/_base_/models/tiny-vit-large-p16.py b/configs/_base_/models/tiny-vit-large-p16.py new file mode 100644 index 0000000..8e4e7f6 --- /dev/null +++ b/configs/_base_/models/tiny-vit-large-p16.py @@ -0,0 +1,24 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='l', + img_size=224, + patch_size=16, + drop_rate=0.1, + init_cfg=[ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ]), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=200, + in_channels=1024, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/tinyvit/tinyvit-11m.py b/configs/_base_/models/tinyvit/tinyvit-11m.py new file mode 100644 index 0000000..6c046e3 --- /dev/null +++ b/configs/_base_/models/tinyvit/tinyvit-11m.py @@ -0,0 +1,25 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='TinyViT', + arch='11m', + img_size=(224, 224), + window_size=[7, 7, 14, 7], + out_indices=(3, ), + drop_path_rate=0.1, + gap_before_final_norm=True, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['LayerNorm'], val=1., bias=0.), + ]), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=448, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/tinyvit/tinyvit-21m.py b/configs/_base_/models/tinyvit/tinyvit-21m.py new file mode 100644 index 0000000..7f362f8 --- /dev/null +++ b/configs/_base_/models/tinyvit/tinyvit-21m.py @@ -0,0 +1,25 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='TinyViT', + arch='21m', + img_size=(224, 224), + window_size=[7, 7, 14, 7], + out_indices=(3, ), + drop_path_rate=0.2, + gap_before_final_norm=True, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['LayerNorm'], val=1., bias=0.), + ]), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=576, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/tinyvit/tinyvit-5m.py b/configs/_base_/models/tinyvit/tinyvit-5m.py new file mode 100644 index 0000000..923ebd9 --- /dev/null +++ b/configs/_base_/models/tinyvit/tinyvit-5m.py @@ -0,0 +1,25 @@ +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='TinyViT', + arch='5m', + img_size=(224, 224), + window_size=[7, 7, 14, 7], + out_indices=(3, ), + drop_path_rate=0.0, + gap_before_final_norm=True, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['LayerNorm'], val=1., bias=0.), + ]), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=320, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) diff --git a/configs/_base_/models/tnt_s_patch16_224.py b/configs/_base_/models/tnt_s_patch16_224.py new file mode 100644 index 0000000..5e13d07 --- /dev/null +++ b/configs/_base_/models/tnt_s_patch16_224.py @@ -0,0 +1,29 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='TNT', + arch='s', + img_size=224, + patch_size=16, + in_channels=3, + ffn_ratio=4, + qkv_bias=False, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.1, + first_stride=4, + num_fcs=2, + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ]), + neck=None, + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=384, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + topk=(1, 5), + init_cfg=dict(type='TruncNormal', layer='Linear', std=.02))) diff --git a/configs/_base_/models/twins_pcpvt_base.py b/configs/_base_/models/twins_pcpvt_base.py new file mode 100644 index 0000000..14e46ba --- /dev/null +++ b/configs/_base_/models/twins_pcpvt_base.py @@ -0,0 +1,31 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='PCPVT', + arch='base', + in_channels=3, + out_indices=(3, ), + qkv_bias=True, + norm_cfg=dict(type='LN', eps=1e-06), + norm_after_stage=[False, False, False, True], + drop_rate=0.0, + attn_drop_rate=0., + drop_path_rate=0.3), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) diff --git a/configs/_base_/models/twins_svt_base.py b/configs/_base_/models/twins_svt_base.py new file mode 100644 index 0000000..a37385b --- /dev/null +++ b/configs/_base_/models/twins_svt_base.py @@ -0,0 +1,31 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='SVT', + arch='base', + in_channels=3, + out_indices=(3, ), + qkv_bias=True, + norm_cfg=dict(type='LN'), + norm_after_stage=[False, False, False, True], + drop_rate=0.0, + attn_drop_rate=0., + drop_path_rate=0.3), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) diff --git a/configs/_base_/models/van/van_base.py b/configs/_base_/models/van/van_base.py new file mode 100644 index 0000000..0064592 --- /dev/null +++ b/configs/_base_/models/van/van_base.py @@ -0,0 +1,13 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='VAN', arch='base', drop_path_rate=0.1), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False)) diff --git a/configs/_base_/models/van/van_large.py b/configs/_base_/models/van/van_large.py new file mode 100644 index 0000000..4ebafab --- /dev/null +++ b/configs/_base_/models/van/van_large.py @@ -0,0 +1,13 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='VAN', arch='large', drop_path_rate=0.2), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False)) diff --git a/configs/_base_/models/van/van_small.py b/configs/_base_/models/van/van_small.py new file mode 100644 index 0000000..29393c6 --- /dev/null +++ b/configs/_base_/models/van/van_small.py @@ -0,0 +1,22 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='VAN', arch='small', drop_path_rate=0.1), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) diff --git a/configs/_base_/models/van/van_tiny.py b/configs/_base_/models/van/van_tiny.py new file mode 100644 index 0000000..9cf5b28 --- /dev/null +++ b/configs/_base_/models/van/van_tiny.py @@ -0,0 +1,22 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='VAN', arch='tiny', drop_path_rate=0.1), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=256, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + cal_acc=False), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) diff --git a/configs/_base_/models/vgg11.py b/configs/_base_/models/vgg11.py new file mode 100644 index 0000000..2b6ee14 --- /dev/null +++ b/configs/_base_/models/vgg11.py @@ -0,0 +1,10 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='VGG', depth=11, num_classes=1000), + neck=None, + head=dict( + type='ClsHead', + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/vgg11bn.py b/configs/_base_/models/vgg11bn.py new file mode 100644 index 0000000..cb4c64e --- /dev/null +++ b/configs/_base_/models/vgg11bn.py @@ -0,0 +1,11 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VGG', depth=11, norm_cfg=dict(type='BN'), num_classes=1000), + neck=None, + head=dict( + type='ClsHead', + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/vgg13.py b/configs/_base_/models/vgg13.py new file mode 100644 index 0000000..a938910 --- /dev/null +++ b/configs/_base_/models/vgg13.py @@ -0,0 +1,10 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='VGG', depth=13, num_classes=1000), + neck=None, + head=dict( + type='ClsHead', + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/vgg13bn.py b/configs/_base_/models/vgg13bn.py new file mode 100644 index 0000000..b12173b --- /dev/null +++ b/configs/_base_/models/vgg13bn.py @@ -0,0 +1,11 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VGG', depth=13, norm_cfg=dict(type='BN'), num_classes=1000), + neck=None, + head=dict( + type='ClsHead', + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/vgg16.py b/configs/_base_/models/vgg16.py new file mode 100644 index 0000000..93ce864 --- /dev/null +++ b/configs/_base_/models/vgg16.py @@ -0,0 +1,10 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='VGG', depth=16, num_classes=1000), + neck=None, + head=dict( + type='ClsHead', + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/vgg16bn.py b/configs/_base_/models/vgg16bn.py new file mode 100644 index 0000000..765e34f --- /dev/null +++ b/configs/_base_/models/vgg16bn.py @@ -0,0 +1,11 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VGG', depth=16, norm_cfg=dict(type='BN'), num_classes=1000), + neck=None, + head=dict( + type='ClsHead', + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/vgg19.py b/configs/_base_/models/vgg19.py new file mode 100644 index 0000000..6f4ab06 --- /dev/null +++ b/configs/_base_/models/vgg19.py @@ -0,0 +1,10 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='VGG', depth=19, num_classes=1000), + neck=None, + head=dict( + type='ClsHead', + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/vgg19bn.py b/configs/_base_/models/vgg19bn.py new file mode 100644 index 0000000..c468b5d --- /dev/null +++ b/configs/_base_/models/vgg19bn.py @@ -0,0 +1,11 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VGG', depth=19, norm_cfg=dict(type='BN'), num_classes=1000), + neck=None, + head=dict( + type='ClsHead', + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/vig/pyramid_vig_base.py b/configs/_base_/models/vig/pyramid_vig_base.py new file mode 100644 index 0000000..a258457 --- /dev/null +++ b/configs/_base_/models/vig/pyramid_vig_base.py @@ -0,0 +1,32 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='PyramidVig', + arch='base', + k=9, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='BN'), + graph_conv_type='mr', + graph_conv_bias=True, + epsilon=0.2, + use_stochastic=False, + drop_path=0.1, + norm_eval=False, + frozen_stages=0), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='VigClsHead', + num_classes=1000, + in_channels=1024, + hidden_dim=1024, + act_cfg=dict(type='GELU'), + dropout=0., + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) diff --git a/configs/_base_/models/vig/pyramid_vig_medium.py b/configs/_base_/models/vig/pyramid_vig_medium.py new file mode 100644 index 0000000..a551aba --- /dev/null +++ b/configs/_base_/models/vig/pyramid_vig_medium.py @@ -0,0 +1,32 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='PyramidVig', + arch='medium', + k=9, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='BN'), + graph_conv_type='mr', + graph_conv_bias=True, + epsilon=0.2, + use_stochastic=False, + drop_path=0.1, + norm_eval=False, + frozen_stages=0), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='VigClsHead', + num_classes=1000, + in_channels=768, + hidden_dim=1024, + act_cfg=dict(type='GELU'), + dropout=0., + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) diff --git a/configs/_base_/models/vig/pyramid_vig_small.py b/configs/_base_/models/vig/pyramid_vig_small.py new file mode 100644 index 0000000..940275e --- /dev/null +++ b/configs/_base_/models/vig/pyramid_vig_small.py @@ -0,0 +1,32 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='PyramidVig', + arch='small', + k=9, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='BN'), + graph_conv_type='mr', + graph_conv_bias=True, + epsilon=0.2, + use_stochastic=False, + drop_path=0.1, + norm_eval=False, + frozen_stages=0), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='VigClsHead', + num_classes=1000, + in_channels=640, + hidden_dim=1024, + act_cfg=dict(type='GELU'), + dropout=0., + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) diff --git a/configs/_base_/models/vig/pyramid_vig_tiny.py b/configs/_base_/models/vig/pyramid_vig_tiny.py new file mode 100644 index 0000000..fea0734 --- /dev/null +++ b/configs/_base_/models/vig/pyramid_vig_tiny.py @@ -0,0 +1,32 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='PyramidVig', + arch='tiny', + k=9, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='BN'), + graph_conv_type='mr', + graph_conv_bias=True, + epsilon=0.2, + use_stochastic=False, + drop_path=0.1, + norm_eval=False, + frozen_stages=0), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='VigClsHead', + num_classes=1000, + in_channels=384, + hidden_dim=1024, + act_cfg=dict(type='GELU'), + dropout=0., + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) diff --git a/configs/_base_/models/vig/vig_base.py b/configs/_base_/models/vig/vig_base.py new file mode 100644 index 0000000..6c5f293 --- /dev/null +++ b/configs/_base_/models/vig/vig_base.py @@ -0,0 +1,33 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='Vig', + arch='base', + k=9, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='BN'), + graph_conv_type='mr', + graph_conv_bias=True, + epsilon=0.2, + use_dilation=True, + use_stochastic=False, + drop_path=0.1, + relative_pos=False, + norm_eval=False, + frozen_stages=0), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='VigClsHead', + num_classes=1000, + in_channels=640, + hidden_dim=1024, + act_cfg=dict(type='GELU'), + dropout=0., + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) diff --git a/configs/_base_/models/vig/vig_small.py b/configs/_base_/models/vig/vig_small.py new file mode 100644 index 0000000..93587ff --- /dev/null +++ b/configs/_base_/models/vig/vig_small.py @@ -0,0 +1,33 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='Vig', + arch='small', + k=9, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='BN'), + graph_conv_type='mr', + graph_conv_bias=True, + epsilon=0.2, + use_dilation=True, + use_stochastic=False, + drop_path=0.1, + relative_pos=False, + norm_eval=False, + frozen_stages=0), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='VigClsHead', + num_classes=1000, + in_channels=320, + hidden_dim=1024, + act_cfg=dict(type='GELU'), + dropout=0., + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) diff --git a/configs/_base_/models/vig/vig_tiny.py b/configs/_base_/models/vig/vig_tiny.py new file mode 100644 index 0000000..c50bac2 --- /dev/null +++ b/configs/_base_/models/vig/vig_tiny.py @@ -0,0 +1,33 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='Vig', + arch='tiny', + k=9, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='BN'), + graph_conv_type='mr', + graph_conv_bias=True, + epsilon=0.2, + use_dilation=True, + use_stochastic=False, + drop_path=0.1, + relative_pos=False, + norm_eval=False, + frozen_stages=0), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='VigClsHead', + num_classes=1000, + in_channels=192, + hidden_dim=1024, + act_cfg=dict(type='GELU'), + dropout=0., + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) diff --git a/configs/_base_/models/vit-base-p16.py b/configs/_base_/models/vit-base-p16.py new file mode 100644 index 0000000..bb42bed --- /dev/null +++ b/configs/_base_/models/vit-base-p16.py @@ -0,0 +1,25 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='b', + img_size=224, + patch_size=16, + drop_rate=0.1, + init_cfg=[ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ]), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, + mode='classy_vision'), + )) diff --git a/configs/_base_/models/vit-base-p32.py b/configs/_base_/models/vit-base-p32.py new file mode 100644 index 0000000..ad550ef --- /dev/null +++ b/configs/_base_/models/vit-base-p32.py @@ -0,0 +1,24 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='b', + img_size=224, + patch_size=32, + drop_rate=0.1, + init_cfg=[ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ]), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/vit-large-p16.py b/configs/_base_/models/vit-large-p16.py new file mode 100644 index 0000000..9716230 --- /dev/null +++ b/configs/_base_/models/vit-large-p16.py @@ -0,0 +1,24 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='l', + img_size=224, + patch_size=16, + drop_rate=0.1, + init_cfg=[ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ]), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=1024, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/vit-large-p32.py b/configs/_base_/models/vit-large-p32.py new file mode 100644 index 0000000..f9491bb --- /dev/null +++ b/configs/_base_/models/vit-large-p32.py @@ -0,0 +1,24 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='l', + img_size=224, + patch_size=32, + drop_rate=0.1, + init_cfg=[ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ]), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=1024, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/models/wide-resnet50.py b/configs/_base_/models/wide-resnet50.py new file mode 100644 index 0000000..a2913b9 --- /dev/null +++ b/configs/_base_/models/wide-resnet50.py @@ -0,0 +1,20 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(3, ), + stem_channels=64, + base_channels=128, + expansion=2, + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + )) diff --git a/configs/_base_/schedules/cifar10_bs128.py b/configs/_base_/schedules/cifar10_bs128.py new file mode 100644 index 0000000..fadb6c1 --- /dev/null +++ b/configs/_base_/schedules/cifar10_bs128.py @@ -0,0 +1,15 @@ +# optimizer +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)) +# learning policy +param_scheduler = dict( + type='MultiStepLR', by_epoch=True, milestones=[100, 150], gamma=0.1) + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=200, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=128) diff --git a/configs/_base_/schedules/cub_bs64.py b/configs/_base_/schedules/cub_bs64.py new file mode 100644 index 0000000..1d0b4be --- /dev/null +++ b/configs/_base_/schedules/cub_bs64.py @@ -0,0 +1,34 @@ +# optimizer +optim_wrapper = dict( + optimizer=dict( + type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005, nesterov=True)) + +# learning policy +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=0.01, + by_epoch=True, + begin=0, + end=5, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict( + type='CosineAnnealingLR', + T_max=95, + by_epoch=True, + begin=5, + end=100, + ) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=100, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=64) diff --git a/configs/_base_/schedules/imagenet_bs1024_adamw_conformer.py b/configs/_base_/schedules/imagenet_bs1024_adamw_conformer.py new file mode 100644 index 0000000..2285d0e --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs1024_adamw_conformer.py @@ -0,0 +1,43 @@ +optim_wrapper = dict( + optimizer=dict( + type='AdamW', + # for batch in each gpu is 128, 8 gpu + # lr = 5e-4 * 128 * 8 / 512 = 0.001 + lr=5e-4 * 128 * 8 / 512, + weight_decay=0.05, + eps=1e-8, + betas=(0.9, 0.999)), + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.cls_token': dict(decay_mult=0.0), + }), +) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-3, + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=295, + eta_min=1e-5, + by_epoch=True, + begin=5, + end=300) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=300, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=1024) diff --git a/configs/_base_/schedules/imagenet_bs1024_adamw_hivit.py b/configs/_base_/schedules/imagenet_bs1024_adamw_hivit.py new file mode 100644 index 0000000..5b2df97 --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs1024_adamw_hivit.py @@ -0,0 +1,41 @@ +# for batch in each gpu is 128, 8 gpu +# lr = 5e-4 * 128 * 8 / 512 = 0.001 +optim_wrapper = dict( + optimizer=dict( + type='AdamW', + lr=5e-4 * 1024 / 512, + weight_decay=0.05, + eps=1e-8, + betas=(0.9, 0.999)), + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + flat_decay_mult=0.0, + custom_keys={ + '.pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0) + }), +) + +# learning policy +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=1e-3, + by_epoch=True, + end=20, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict(type='CosineAnnealingLR', eta_min=1e-5, by_epoch=True, begin=20) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=300, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=1024) diff --git a/configs/_base_/schedules/imagenet_bs1024_adamw_revvit.py b/configs/_base_/schedules/imagenet_bs1024_adamw_revvit.py new file mode 100644 index 0000000..87fd202 --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs1024_adamw_revvit.py @@ -0,0 +1,41 @@ +# for batch in each gpu is 128, 8 gpu +# lr = 5e-4 * 128 * 8 / 512 = 0.001 +# schedule settings +optim_wrapper = dict( + optimizer=dict( + type='AdamW', + lr=5e-4 * 2048 / 512, + weight_decay=0.05, + eps=1e-8, + betas=(0.9, 0.999)), + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0) + }), + clip_grad=dict(max_norm=1.0), +) +# learning policy +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=1e-8 / 2e-3, + by_epoch=True, + end=70, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict(type='CosineAnnealingLR', eta_min=1e-5, by_epoch=True, begin=70) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=300, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=1024) diff --git a/configs/_base_/schedules/imagenet_bs1024_adamw_swin.py b/configs/_base_/schedules/imagenet_bs1024_adamw_swin.py new file mode 100644 index 0000000..fd06cc1 --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs1024_adamw_swin.py @@ -0,0 +1,41 @@ +# for batch in each gpu is 128, 8 gpu +# lr = 5e-4 * 128 * 8 / 512 = 0.001 +optim_wrapper = dict( + optimizer=dict( + type='AdamW', + lr=5e-4 * 1024 / 512, + weight_decay=0.05, + eps=1e-8, + betas=(0.9, 0.999)), + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + flat_decay_mult=0.0, + custom_keys={ + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0) + }), +) + +# learning policy +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=1e-3, + by_epoch=True, + end=20, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict(type='CosineAnnealingLR', eta_min=1e-5, by_epoch=True, begin=20) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=300, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=1024) diff --git a/configs/_base_/schedules/imagenet_bs1024_coslr.py b/configs/_base_/schedules/imagenet_bs1024_coslr.py new file mode 100644 index 0000000..285884d --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs1024_coslr.py @@ -0,0 +1,18 @@ +# optimizer +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.8, momentum=0.9, weight_decay=5e-5)) + +# learning policy +param_scheduler = [ + dict(type='LinearLR', start_factor=0.1, by_epoch=True, begin=0, end=5), + dict(type='CosineAnnealingLR', T_max=95, by_epoch=True, begin=5, end=100) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=100, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=1024) diff --git a/configs/_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py b/configs/_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py new file mode 100644 index 0000000..cf38d47 --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py @@ -0,0 +1,20 @@ +# optimizer +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.5, momentum=0.9, weight_decay=0.00004), + paramwise_cfg=dict(norm_decay_mult=0), +) + +# learning policy +param_scheduler = [ + dict(type='ConstantLR', factor=0.1, by_epoch=False, begin=0, end=5000), + dict(type='PolyLR', eta_min=0, by_epoch=False, begin=5000) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=300, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=1024) diff --git a/configs/_base_/schedules/imagenet_bs2048.py b/configs/_base_/schedules/imagenet_bs2048.py new file mode 100644 index 0000000..1cfbfbe --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs2048.py @@ -0,0 +1,21 @@ +# optimizer +optim_wrapper = dict( + optimizer=dict( + type='SGD', lr=0.8, momentum=0.9, weight_decay=0.0001, nesterov=True)) + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', start_factor=0.25, by_epoch=False, begin=0, end=2500), + dict( + type='MultiStepLR', by_epoch=True, milestones=[30, 60, 90], gamma=0.1) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=100, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=2048) diff --git a/configs/_base_/schedules/imagenet_bs2048_AdamW.py b/configs/_base_/schedules/imagenet_bs2048_AdamW.py new file mode 100644 index 0000000..bbfae8e --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs2048_AdamW.py @@ -0,0 +1,39 @@ +# optimizer +# In ClassyVision, the lr is set to 0.003 for bs4096. +# In this implementation(bs2048), lr = 0.003 / 4096 * (32bs * 64gpus) = 0.0015 +optim_wrapper = dict( + optimizer=dict(type='AdamW', lr=0.0015, weight_decay=0.3), + # specific to vit pretrain + paramwise_cfg=dict(custom_keys={ + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0) + }), +) + +# learning policy +warmup_epochs = 15 # about 10000 iterations for ImageNet-1k +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=1e-3, + by_epoch=True, + end=warmup_epochs, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict( + type='CosineAnnealingLR', + eta_min=1e-5, + by_epoch=True, + begin=warmup_epochs) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=300, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=2048) diff --git a/configs/_base_/schedules/imagenet_bs2048_adamw_levit.py b/configs/_base_/schedules/imagenet_bs2048_adamw_levit.py new file mode 100644 index 0000000..25a536e --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs2048_adamw_levit.py @@ -0,0 +1,40 @@ +# for batch in each gpu is 256, 8 gpu +# lr = 5e-4 * 256 * 8 / 512 = 0.002 +optim_wrapper = dict( + optimizer=dict( + type='AdamW', + lr=0.002, + weight_decay=0.025, + eps=1e-8, + betas=(0.9, 0.999)), + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.attention_biases': dict(decay_mult=0.0), + }), +) + +# learning policy +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=1e-6 / 0.002, + by_epoch=True, + end=5, + # update by iter + convert_to_iter_based=True, + ), + # main learning rate scheduler + dict(type='CosineAnnealingLR', eta_min=1e-5, by_epoch=True, begin=5) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=1000) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=2048) diff --git a/configs/_base_/schedules/imagenet_bs2048_coslr.py b/configs/_base_/schedules/imagenet_bs2048_coslr.py new file mode 100644 index 0000000..b8551f5 --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs2048_coslr.py @@ -0,0 +1,35 @@ +# optimizer +optim_wrapper = dict( + optimizer=dict( + type='SGD', lr=0.8, momentum=0.9, weight_decay=0.0001, nesterov=True)) + +# learning policy +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=0.25, + by_epoch=True, + begin=0, + # about 2500 iterations for ImageNet-1k + end=5, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict( + type='CosineAnnealingLR', + T_max=95, + by_epoch=True, + begin=5, + end=100, + ) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=100, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=2048) diff --git a/configs/_base_/schedules/imagenet_bs2048_rsb.py b/configs/_base_/schedules/imagenet_bs2048_rsb.py new file mode 100644 index 0000000..f0d2d79 --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs2048_rsb.py @@ -0,0 +1,32 @@ +# optimizer +optim_wrapper = dict(optimizer=dict(type='Lamb', lr=0.005, weight_decay=0.02)) + +# learning policy +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=0.0001, + by_epoch=True, + begin=0, + end=5, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict( + type='CosineAnnealingLR', + T_max=95, + eta_min=1.0e-6, + by_epoch=True, + begin=5, + end=100) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=100, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=2048) diff --git a/configs/_base_/schedules/imagenet_bs256.py b/configs/_base_/schedules/imagenet_bs256.py new file mode 100644 index 0000000..3f92273 --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs256.py @@ -0,0 +1,16 @@ +# optimizer +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)) + +# learning policy +param_scheduler = dict( + type='MultiStepLR', by_epoch=True, milestones=[30, 60, 90], gamma=0.1) + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=100, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=256) diff --git a/configs/_base_/schedules/imagenet_bs256_140e.py b/configs/_base_/schedules/imagenet_bs256_140e.py new file mode 100644 index 0000000..e65bf52 --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs256_140e.py @@ -0,0 +1,16 @@ +# optimizer +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)) + +# learning policy +param_scheduler = dict( + type='MultiStepLR', by_epoch=True, milestones=[40, 80, 120], gamma=0.1) + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=140, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=256) diff --git a/configs/_base_/schedules/imagenet_bs256_200e_coslr_warmup.py b/configs/_base_/schedules/imagenet_bs256_200e_coslr_warmup.py new file mode 100644 index 0000000..c8d94a7 --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs256_200e_coslr_warmup.py @@ -0,0 +1,34 @@ +# optimizer +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)) + +# learning policy +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=0.25, + by_epoch=True, + begin=0, + end=5, + # update by iter + convert_to_iter_based=True, + ), + # main learning rate scheduler + dict( + type='CosineAnnealingLR', + T_max=195, + by_epoch=True, + begin=5, + end=200, + ) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=200, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=256) diff --git a/configs/_base_/schedules/imagenet_bs256_coslr.py b/configs/_base_/schedules/imagenet_bs256_coslr.py new file mode 100644 index 0000000..44e2c8b --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs256_coslr.py @@ -0,0 +1,16 @@ +# optimizer +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)) + +# learning policy +param_scheduler = dict( + type='CosineAnnealingLR', T_max=100, by_epoch=True, begin=0, end=100) + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=100, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=256) diff --git a/configs/_base_/schedules/imagenet_bs256_coslr_coswd_300e.py b/configs/_base_/schedules/imagenet_bs256_coslr_coswd_300e.py new file mode 100644 index 0000000..318e031 --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs256_coslr_coswd_300e.py @@ -0,0 +1,40 @@ +# optimizer +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)) + +# learning policy +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=0.001, + by_epoch=True, + begin=0, + end=5, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict( + type='CosineAnnealingLR', + T_max=295, + eta_min=1.0e-6, + by_epoch=True, + begin=5, + end=300), + dict( + type='CosineAnnealingParamScheduler', + param_name='weight_decay', + eta_min=0.00001, + by_epoch=True, + begin=0, + end=300) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=300, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=256) diff --git a/configs/_base_/schedules/imagenet_bs256_epochstep.py b/configs/_base_/schedules/imagenet_bs256_epochstep.py new file mode 100644 index 0000000..b8c2b90 --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs256_epochstep.py @@ -0,0 +1,15 @@ +# optimizer +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.045, momentum=0.9, weight_decay=0.00004)) + +# learning policy +param_scheduler = dict(type='StepLR', by_epoch=True, step_size=1, gamma=0.98) + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=300, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=256) diff --git a/configs/_base_/schedules/imagenet_bs4096_AdamW.py b/configs/_base_/schedules/imagenet_bs4096_AdamW.py new file mode 100644 index 0000000..84b1f39 --- /dev/null +++ b/configs/_base_/schedules/imagenet_bs4096_AdamW.py @@ -0,0 +1,39 @@ +# optimizer +optim_wrapper = dict( + optimizer=dict(type='AdamW', lr=0.003, weight_decay=0.3), + # specific to vit pretrain + paramwise_cfg=dict(custom_keys={ + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0) + }), +) + +# learning policy +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=30, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict( + type='CosineAnnealingLR', + T_max=270, + by_epoch=True, + begin=30, + end=300, + ) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=300, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/_base_/schedules/imagenet_lars_coslr_200e.py b/configs/_base_/schedules/imagenet_lars_coslr_200e.py new file mode 100644 index 0000000..baba55c --- /dev/null +++ b/configs/_base_/schedules/imagenet_lars_coslr_200e.py @@ -0,0 +1,20 @@ +# optimizer wrapper +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='LARS', lr=4.8, weight_decay=1e-6, momentum=0.9)) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=10, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', T_max=190, by_epoch=True, begin=10, end=200) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=200) diff --git a/configs/_base_/schedules/imagenet_lars_coslr_90e.py b/configs/_base_/schedules/imagenet_lars_coslr_90e.py new file mode 100644 index 0000000..6e7875a --- /dev/null +++ b/configs/_base_/schedules/imagenet_lars_coslr_90e.py @@ -0,0 +1,14 @@ +# optimizer wrapper +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='LARS', lr=1.6, momentum=0.9, weight_decay=0.)) + +# learning rate scheduler +param_scheduler = [ + dict(type='CosineAnnealingLR', T_max=90, by_epoch=True, begin=0, end=90) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=90) +val_cfg = dict() +test_cfg = dict() diff --git a/configs/_base_/schedules/imagenet_sgd_coslr_100e.py b/configs/_base_/schedules/imagenet_sgd_coslr_100e.py new file mode 100644 index 0000000..08e9a3e --- /dev/null +++ b/configs/_base_/schedules/imagenet_sgd_coslr_100e.py @@ -0,0 +1,14 @@ +# optimizer wrapper +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.3, momentum=0.9, weight_decay=1e-6)) + +# learning rate scheduler +param_scheduler = [ + dict(type='CosineAnnealingLR', T_max=100, by_epoch=True, begin=0, end=100) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=100) +val_cfg = dict() +test_cfg = dict() diff --git a/configs/_base_/schedules/imagenet_sgd_coslr_200e.py b/configs/_base_/schedules/imagenet_sgd_coslr_200e.py new file mode 100644 index 0000000..f38e498 --- /dev/null +++ b/configs/_base_/schedules/imagenet_sgd_coslr_200e.py @@ -0,0 +1,12 @@ +# optimizer wrapper +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.03, weight_decay=1e-4, momentum=0.9)) + +# learning rate scheduler +param_scheduler = [ + dict(type='CosineAnnealingLR', T_max=200, by_epoch=True, begin=0, end=200) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=200) diff --git a/configs/_base_/schedules/imagenet_sgd_steplr_100e.py b/configs/_base_/schedules/imagenet_sgd_steplr_100e.py new file mode 100644 index 0000000..75b725c --- /dev/null +++ b/configs/_base_/schedules/imagenet_sgd_steplr_100e.py @@ -0,0 +1,14 @@ +# optimizer wrapper +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=1e-4)) + +# learning rate scheduler +param_scheduler = [ + dict(type='MultiStepLR', by_epoch=True, milestones=[60, 80], gamma=0.1) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=100) +val_cfg = dict() +test_cfg = dict() diff --git a/configs/arcface/README.md b/configs/arcface/README.md new file mode 100644 index 0000000..6b2ee6a --- /dev/null +++ b/configs/arcface/README.md @@ -0,0 +1,80 @@ +# ArcFace + +> [ArcFace: Additive Angular Margin Loss for Deep Face Recognition](https://arxiv.org/abs/1801.07698) + + + +## Abstract + +Recently, a popular line of research in face recognition is adopting margins in the well-established softmax loss function to maximize class separability. In this paper, we first introduce an Additive Angular Margin Loss (ArcFace), which not only has a clear geometric interpretation but also significantly enhances the discriminative power. Since ArcFace is susceptible to the massive label noise, we further propose sub-center ArcFace, in which each class contains K sub-centers and training samples only need to be close to any of the K positive sub-centers. Sub-center ArcFace encourages one dominant sub-class that contains the majority of clean faces and non-dominant sub-classes that include hard or noisy faces. Based on this self-propelled isolation, we boost the performance through automatically purifying raw web faces under massive real-world noise. Besides discriminative feature embedding, we also explore the inverse problem, mapping feature vectors to face images. Without training any additional generator or discriminator, the pre-trained ArcFace model can generate identity-preserved face images for both subjects inside and outside the training data only by using the network gradient and Batch Normalization (BN) priors. Extensive experiments demonstrate that ArcFace can enhance the discriminative feature embedding as well as strengthen the generative face synthesis. + +
+ +
+ +## How to use it? + + + +**Retrieve image** + +```python +from mmpretrain import ImageRetrievalInferencer + +inferencer = ImageRetrievalInferencer('resnet50-arcface_inshop', prototype='demo/') +predict = inferencer('demo/dog.jpg', topk=2)[0] +print(predict[0]) +print(predict[1]) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('resnet50-arcface_inshop', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/arcface/resnet50-arcface_8xb32_inshop.py +``` + +Test: + +```shell +python tools/test.py configs/arcface/resnet50-arcface_8xb32_inshop.py https://download.openmmlab.com/mmclassification/v0/arcface/resnet50-arcface_inshop_20230202-b766fe7f.pth +``` + + + +## Models and results + +### Image Retrieval on InShop + +| Model | Pretrain | Params(M) | Flops(G) | Recall@1 | mAP@10 | Config | Download | +| :-----------------------: | :------------------------------------------------: | :-------: | :------: | :------: | :----: | :------------------------------------------: | :------------------------------------------------: | +| `resnet50-arcface_inshop` | [ImageNet-21k-mill](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_3rdparty-mill_in21k_20220331-faac000b.pth) | 31.69 | 16.48 | 90.18 | 69.30 | [config](./resnet50-arcface_8xb32_inshop.py) | [model](https://download.openmmlab.com/mmclassification/v0/arcface/resnet50-arcface_inshop_20230202-b766fe7f.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/arcface/resnet50-arcface_inshop_20230202-b766fe7f.log) | + +## Citation + +```bibtex +@inproceedings{deng2018arcface, +title={ArcFace: Additive Angular Margin Loss for Deep Face Recognition}, +author={Deng, Jiankang and Guo, Jia and Niannan, Xue and Zafeiriou, Stefanos}, +booktitle={CVPR}, +year={2019} +} +``` diff --git a/configs/arcface/metafile.yml b/configs/arcface/metafile.yml new file mode 100644 index 0000000..050aba5 --- /dev/null +++ b/configs/arcface/metafile.yml @@ -0,0 +1,28 @@ +Collections: + - Name: ArcFace + Metadata: + Training Data: InShop + Architecture: + - Additive Angular Margin Loss + Paper: + URL: https://arxiv.org/abs/1801.07698 + Title: 'ArcFace: Additive Angular Margin Loss for Deep Face Recognition' + README: configs/arcface/README.md + Code: + Version: v1.0.0rc3 + URL: https://github.com/open-mmlab/mmpretrain/blob/v1.0.0rc3/mmcls/models/heads/margin_head.py + +Models: + - Name: resnet50-arcface_inshop + Metadata: + FLOPs: 16571226112 + Parameters: 31693888 + In Collection: ArcFace + Results: + - Dataset: InShop + Metrics: + Recall@1: 90.18 + mAP@10: 69.30 + Task: Image Retrieval + Weights: https://download.openmmlab.com/mmclassification/v0/arcface/resnet50-arcface_inshop_20230202-b766fe7f.pth + Config: configs/arcface/resnet50-arcface_8xb32_inshop.py diff --git a/configs/arcface/resnet50-arcface_8xb32_inshop.py b/configs/arcface/resnet50-arcface_8xb32_inshop.py new file mode 100644 index 0000000..cc351e7 --- /dev/null +++ b/configs/arcface/resnet50-arcface_8xb32_inshop.py @@ -0,0 +1,71 @@ +_base_ = [ + '../_base_/datasets/inshop_bs32_448.py', + '../_base_/schedules/cub_bs64.py', + '../_base_/default_runtime.py', +] + +pretrained = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_3rdparty-mill_in21k_20220331-faac000b.pth' # noqa +model = dict( + type='ImageToImageRetriever', + image_encoder=[ + dict( + type='ResNet', + depth=50, + init_cfg=dict( + type='Pretrained', checkpoint=pretrained, prefix='backbone')), + dict(type='GlobalAveragePooling'), + ], + head=dict( + type='ArcFaceClsHead', + num_classes=3997, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + init_cfg=None), + prototype={{_base_.gallery_dataloader}}) + +# runtime settings +default_hooks = dict( + # log every 20 intervals + logger=dict(type='LoggerHook', interval=20), + # save last three checkpoints + checkpoint=dict( + type='CheckpointHook', + save_best='auto', + interval=1, + max_keep_ckpts=3, + rule='greater')) + +# optimizer +optim_wrapper = dict( + optimizer=dict( + type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0005, nesterov=True)) + +# learning policy +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=0.01, + by_epoch=True, + begin=0, + end=5, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict( + type='CosineAnnealingLR', + T_max=45, + by_epoch=True, + begin=5, + end=50, + ) +] + +train_cfg = dict(by_epoch=True, max_epochs=50, val_interval=1) + +auto_scale_lr = dict(enable=True, base_batch_size=256) + +custom_hooks = [ + dict(type='PrepareProtoBeforeValLoopHook'), + dict(type='SyncBuffersHook') +] diff --git a/configs/barlowtwins/README.md b/configs/barlowtwins/README.md new file mode 100644 index 0000000..515d138 --- /dev/null +++ b/configs/barlowtwins/README.md @@ -0,0 +1,85 @@ +# BarlowTwins + +> [Barlow Twins: Self-Supervised Learning via Redundancy Reduction](https://arxiv.org/abs/2103.03230) + + + +## Abstract + +Self-supervised learning (SSL) is rapidly closing the gap with supervised methods on large computer vision benchmarks. A successful approach to SSL is to learn embeddings which are invariant to distortions of the input sample. However, a recurring issue with this approach is the existence of trivial constant solutions. Most current methods avoid such solutions by careful implementation details. We propose an objective function that naturally avoids collapse by measuring the cross-correlation matrix between the outputs of two identical networks fed with distorted versions of a sample, and making it as close to the identity matrix as possible. This causes the embedding vectors of distorted versions of a sample to be similar, while minimizing the redundancy between the components of these vectors. The method is called Barlow Twins, owing to neuroscientist H. Barlow's redundancy-reduction principle applied to a pair of identical networks. Barlow Twins does not require large batches nor asymmetry between the network twins such as a predictor network, gradient stopping, or a moving average on the weight updates. Intriguingly it benefits from very high-dimensional output vectors. Barlow Twins outperforms previous methods on ImageNet for semi-supervised classification in the low-data regime, and is on par with current state of the art for ImageNet classification with a linear classifier head, and for transfer tasks of classification and object detection. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('resnet50_barlowtwins-pre_8xb32-linear-coslr-100e_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('barlowtwins_resnet50_8xb256-coslr-300e_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/barlowtwins/barlowtwins_resnet50_8xb256-coslr-300e_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/barlowtwins/benchmarks/resnet50_8xb32-linear-coslr-100e_in1k.py https://download.openmmlab.com/mmselfsup/1.x/barlowtwins/barlowtwins_resnet50_8xb256-coslr-300e_in1k/resnet50_linear-8xb32-coslr-100e_in1k/resnet50_linear-8xb32-coslr-100e_in1k_20220825-52fde35f.pth +``` + + + +## Models and results + +### Pretrained models + +| Model | Params (M) | Flops (G) | Config | Download | +| :-------------------------------------------- | :--------: | :-------: | :------------------------------------------------------: | :------------------------------------------------------------------------------: | +| `barlowtwins_resnet50_8xb256-coslr-300e_in1k` | 174.54 | 4.11 | [config](barlowtwins_resnet50_8xb256-coslr-300e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/barlowtwins/barlowtwins_resnet50_8xb256-coslr-300e_in1k/barlowtwins_resnet50_8xb256-coslr-300e_in1k_20220825-57307488.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/barlowtwins/barlowtwins_resnet50_8xb256-coslr-300e_in1k/barlowtwins_resnet50_8xb256-coslr-300e_in1k_20220825-57307488.json) | + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Config | Download | +| :---------------------------------------- | :------------------------------------------: | :--------: | :-------: | :-------: | :----------------------------------------: | :-------------------------------------------: | +| `resnet50_barlowtwins-pre_8xb32-linear-coslr-100e_in1k` | [BARLOWTWINS](https://download.openmmlab.com/mmselfsup/1.x/barlowtwins/barlowtwins_resnet50_8xb256-coslr-300e_in1k/barlowtwins_resnet50_8xb256-coslr-300e_in1k_20220825-57307488.pth) | 25.56 | 4.11 | 71.80 | [config](benchmarks/resnet50_8xb32-linear-coslr-100e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/barlowtwins/barlowtwins_resnet50_8xb256-coslr-300e_in1k/resnet50_linear-8xb32-coslr-100e_in1k/resnet50_linear-8xb32-coslr-100e_in1k_20220825-52fde35f.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/barlowtwins/barlowtwins_resnet50_8xb256-coslr-300e_in1k/resnet50_linear-8xb32-coslr-100e_in1k/resnet50_linear-8xb32-coslr-100e_in1k_20220825-52fde35f.json) | + +## Citation + +```bibtex +@inproceedings{zbontar2021barlow, + title={Barlow twins: Self-supervised learning via redundancy reduction}, + author={Zbontar, Jure and Jing, Li and Misra, Ishan and LeCun, Yann and Deny, St{\'e}phane}, + booktitle={International Conference on Machine Learning}, + year={2021}, +} +``` diff --git a/configs/barlowtwins/barlowtwins_resnet50_8xb256-coslr-1000e_in1k.py b/configs/barlowtwins/barlowtwins_resnet50_8xb256-coslr-1000e_in1k.py new file mode 100644 index 0000000..f12dd2e --- /dev/null +++ b/configs/barlowtwins/barlowtwins_resnet50_8xb256-coslr-1000e_in1k.py @@ -0,0 +1,70 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs32_byol.py', + '../_base_/default_runtime.py', +] +# datasets +train_dataloader = dict(batch_size=256) + +# model settings +model = dict( + type='BarlowTwins', + backbone=dict( + type='ResNet', + depth=50, + norm_cfg=dict(type='SyncBN'), + zero_init_residual=True), + neck=dict( + type='NonLinearNeck', + in_channels=2048, + hid_channels=8192, + out_channels=8192, + num_layers=3, + with_last_bn=False, + with_last_bn_affine=False, + with_avg_pool=True, + init_cfg=dict( + type='Kaiming', distribution='uniform', layer=['Linear'])), + head=dict( + type='LatentCrossCorrelationHead', + in_channels=8192, + loss=dict(type='CrossCorrelationLoss'))) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='LARS', lr=1.6, momentum=0.9, weight_decay=1e-6), + paramwise_cfg=dict( + custom_keys={ + 'bn': dict(decay_mult=0, lr_mult=0.024, lars_exclude=True), + 'bias': dict(decay_mult=0, lr_mult=0.024, lars_exclude=True), + # bn layer in ResNet block downsample module + 'downsample.1': dict( + decay_mult=0, lr_mult=0.024, lars_exclude=True), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.6e-4, + by_epoch=True, + begin=0, + end=10, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=990, + eta_min=0.0016, + by_epoch=True, + begin=10, + end=1000, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=1000) +default_hooks = dict(checkpoint=dict(max_keep_ckpts=3)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=2048) diff --git a/configs/barlowtwins/barlowtwins_resnet50_8xb256-coslr-300e_in1k.py b/configs/barlowtwins/barlowtwins_resnet50_8xb256-coslr-300e_in1k.py new file mode 100644 index 0000000..74a7f2b --- /dev/null +++ b/configs/barlowtwins/barlowtwins_resnet50_8xb256-coslr-300e_in1k.py @@ -0,0 +1,70 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs32_byol.py', + '../_base_/default_runtime.py', +] +# datasets +train_dataloader = dict(batch_size=256) + +# model settings +model = dict( + type='BarlowTwins', + backbone=dict( + type='ResNet', + depth=50, + norm_cfg=dict(type='SyncBN'), + zero_init_residual=True), + neck=dict( + type='NonLinearNeck', + in_channels=2048, + hid_channels=8192, + out_channels=8192, + num_layers=3, + with_last_bn=False, + with_last_bn_affine=False, + with_avg_pool=True, + init_cfg=dict( + type='Kaiming', distribution='uniform', layer=['Linear'])), + head=dict( + type='LatentCrossCorrelationHead', + in_channels=8192, + loss=dict(type='CrossCorrelationLoss'))) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='LARS', lr=1.6, momentum=0.9, weight_decay=1e-6), + paramwise_cfg=dict( + custom_keys={ + 'bn': dict(decay_mult=0, lr_mult=0.024, lars_exclude=True), + 'bias': dict(decay_mult=0, lr_mult=0.024, lars_exclude=True), + # bn layer in ResNet block downsample module + 'downsample.1': dict( + decay_mult=0, lr_mult=0.024, lars_exclude=True), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1.6e-4, + by_epoch=True, + begin=0, + end=10, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=290, + eta_min=0.0016, + by_epoch=True, + begin=10, + end=300, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=300) +default_hooks = dict(checkpoint=dict(max_keep_ckpts=3)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=2048) diff --git a/configs/barlowtwins/benchmarks/resnet50_8xb32-linear-coslr-100e_in1k.py b/configs/barlowtwins/benchmarks/resnet50_8xb32-linear-coslr-100e_in1k.py new file mode 100644 index 0000000..2f4e4f5 --- /dev/null +++ b/configs/barlowtwins/benchmarks/resnet50_8xb32-linear-coslr-100e_in1k.py @@ -0,0 +1,15 @@ +_base_ = [ + '../../_base_/models/resnet50.py', + '../../_base_/datasets/imagenet_bs32_pil_resize.py', + '../../_base_/schedules/imagenet_sgd_coslr_100e.py', + '../../_base_/default_runtime.py', +] + +model = dict( + backbone=dict( + frozen_stages=4, + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.'))) + +# runtime settings +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3)) diff --git a/configs/barlowtwins/metafile.yml b/configs/barlowtwins/metafile.yml new file mode 100644 index 0000000..705080e --- /dev/null +++ b/configs/barlowtwins/metafile.yml @@ -0,0 +1,44 @@ +Collections: + - Name: BarlowTwins + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - LARS + Training Resources: 8x A100 GPUs + Architecture: + - ResNet + - BarlowTwins + Paper: + Title: 'Barlow Twins: Self-Supervised Learning via Redundancy Reduction' + URL: https://arxiv.org/abs/2103.03230 + README: configs/barlowtwins/README.md + +Models: + - Name: barlowtwins_resnet50_8xb256-coslr-300e_in1k + Metadata: + Epochs: 300 + Batch Size: 2048 + FLOPs: 4109364224 + Parameters: 174535744 + Training Data: ImageNet-1k + In Collection: BarlowTwins + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/barlowtwins/barlowtwins_resnet50_8xb256-coslr-300e_in1k/barlowtwins_resnet50_8xb256-coslr-300e_in1k_20220825-57307488.pth + Config: configs/barlowtwins/barlowtwins_resnet50_8xb256-coslr-300e_in1k.py + Downstream: + - resnet50_barlowtwins-pre_8xb32-linear-coslr-100e_in1k + - Name: resnet50_barlowtwins-pre_8xb32-linear-coslr-100e_in1k + Metadata: + Epochs: 100 + Batch Size: 256 + FLOPs: 4109464576 + Parameters: 25557032 + Training Data: ImageNet-1k + In Collection: BarlowTwins + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 71.8 + Weights: https://download.openmmlab.com/mmselfsup/1.x/barlowtwins/barlowtwins_resnet50_8xb256-coslr-300e_in1k/resnet50_linear-8xb32-coslr-100e_in1k/resnet50_linear-8xb32-coslr-100e_in1k_20220825-52fde35f.pth + Config: configs/barlowtwins/benchmarks/resnet50_8xb32-linear-coslr-100e_in1k.py diff --git a/configs/beit/README.md b/configs/beit/README.md new file mode 100644 index 0000000..404e652 --- /dev/null +++ b/configs/beit/README.md @@ -0,0 +1,88 @@ +# BEiT + +> [BEiT: BERT Pre-Training of Image Transformers](https://arxiv.org/abs/2106.08254) + + + +## Abstract + +We introduce a self-supervised vision representation model BEiT, which stands for Bidirectional Encoder representation from Image Transformers. Following BERT developed in the natural language processing area, we propose a masked image modeling task to pretrain vision Transformers. Specifically, each image has two views in our pre-training, i.e, image patches (such as 16x16 pixels), and visual tokens (i.e., discrete tokens). We first "tokenize" the original image into visual tokens. Then we randomly mask some image patches and fed them into the backbone Transformer. The pre-training objective is to recover the original visual tokens based on the corrupted image patches. After pre-training BEiT, we directly fine-tune the model parameters on downstream tasks by appending task layers upon the pretrained encoder. Experimental results on image classification and semantic segmentation show that our model achieves competitive results with previous pre-training methods. For example, base-size BEiT achieves 83.2% top-1 accuracy on ImageNet-1K, significantly outperforming from-scratch DeiT training (81.8%) with the same setup. Moreover, large-size BEiT obtains 86.3% only using ImageNet-1K, even outperforming ViT-L with supervised pre-training on ImageNet-22K (85.2%). + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('beit-base-p16_beit-pre_8xb128-coslr-100e_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('beit_beit-base-p16_8xb256-amp-coslr-300e_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/beit/beit_beit-base-p16_8xb256-amp-coslr-300e_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/beit/benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py https://download.openmmlab.com/mmselfsup/1.x/beit/beit_vit-base-p16_8xb256-amp-coslr-300e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k_20221128-0ca393e9.pth +``` + + + +## Models and results + +### Pretrained models + +| Model | Params (M) | Flops (G) | Config | Download | +| :---------------------------------------------- | :--------: | :-------: | :--------------------------------------------------------: | :--------------------------------------------------------------------------: | +| `beit_beit-base-p16_8xb256-amp-coslr-300e_in1k` | 86.53 | 17.58 | [config](beit_beit-base-p16_8xb256-amp-coslr-300e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/beit/beit_vit-base-p16_8xb256-amp-coslr-300e_in1k/beit_vit-base-p16_8xb256-amp-coslr-300e_in1k_20221128-ab79e626.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/beit/beit_vit-base-p16_8xb256-amp-coslr-300e_in1k/beit_vit-base-p16_8xb256-amp-coslr-300e_in1k_20221128-ab79e626.json) | + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :-------------------------------------- | :----------------------------------------: | :--------: | :-------: | :-------: | :-------: | :--------------------------------------: | :----------------------------------------: | +| `beit-base-p16_beit-pre_8xb128-coslr-100e_in1k` | [BEIT](https://download.openmmlab.com/mmselfsup/1.x/beit/beit_vit-base-p16_8xb256-amp-coslr-300e_in1k/beit_vit-base-p16_8xb256-amp-coslr-300e_in1k_20221128-ab79e626.pth) | 86.53 | 17.58 | 83.10 | N/A | [config](benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/beit/beit_vit-base-p16_8xb256-amp-coslr-300e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k_20221128-0ca393e9.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/beit/beit_vit-base-p16_8xb256-amp-coslr-300e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k_20221128-0ca393e9.json) | +| `beit-base-p16_beit-in21k-pre_3rdparty_in1k`\* | BEIT ImageNet-21k | 86.53 | 17.58 | 85.28 | 97.59 | [config](benchmarks/beit-base-p16_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/beit/beit-base_3rdparty_in1k_20221114-c0a4df23.pth) | + +*Models with * are converted from the [official repo](https://github.com/microsoft/unilm/tree/master/beit). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@inproceedings{bao2022beit, + title={{BE}iT: {BERT} Pre-Training of Image Transformers}, + author={Hangbo Bao and Li Dong and Songhao Piao and Furu Wei}, + booktitle={International Conference on Learning Representations}, + year={2022}, +} +``` diff --git a/configs/beit/beit_beit-base-p16_8xb256-amp-coslr-300e_in1k.py b/configs/beit/beit_beit-base-p16_8xb256-amp-coslr-300e_in1k.py new file mode 100644 index 0000000..5786f79 --- /dev/null +++ b/configs/beit/beit_beit-base-p16_8xb256-amp-coslr-300e_in1k.py @@ -0,0 +1,130 @@ +_base_ = '../_base_/default_runtime.py' + +# dataset settings +dataset_type = 'ImageNet' +data_root = 'data/imagenet/' +data_preprocessor = dict( + type='TwoNormDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + second_mean=[-31.875, -31.875, -31.875], + second_std=[318.75, 318.75, 318.75], + to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ColorJitter', + brightness=0.4, + contrast=0.4, + saturation=0.4, + hue=0.), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandomResizedCropAndInterpolationWithTwoPic', + size=224, + second_size=112, + interpolation='bicubic', + second_interpolation='lanczos', + scale=(0.08, 1.0)), + dict( + type='BEiTMaskGenerator', + input_size=(14, 14), + num_masking_patches=75, + max_num_patches=None, + min_num_patches=16), + dict(type='PackInputs') +] +train_dataloader = dict( + batch_size=256, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + collate_fn=dict(type='default_collate'), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='meta/train.txt', + data_prefix=dict(img_path='train/'), + pipeline=train_pipeline)) + +# model settings +model = dict( + type='BEiT', + backbone=dict( + type='BEiTPretrainViT', + arch='base', + patch_size=16, + drop_path_rate=0.1, + final_norm=True, + out_type='raw', + layer_scale_init_value=0.1, + init_cfg=[ + dict(type='TruncNormal', std=0.02, layer='Linear'), + dict(type='TruncNormal', std=0.02, layer='Conv2d'), + dict(type='Constant', layer='LayerNorm', val=1.0, bias=0.0) + ]), + neck=None, + head=dict( + type='BEiTV1Head', + embed_dims=768, + num_embed=8192, + loss=dict(type='CrossEntropyLoss')), + target_generator=dict( + type='DALL-E', + init_cfg=dict( + type='Pretrained', + checkpoint= # noqa: E251 + 'https://download.openmmlab.com/mmselfsup/1.x/target_generator_ckpt/dalle_encoder.pth', # noqa: E501 + ))) + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='AdamW', lr=1.5e-3, betas=(0.9, 0.999), weight_decay=0.05), + clip_grad=dict(max_norm=3.0), + paramwise_cfg=dict( + custom_keys={ + # the following configurations are designed for BEiT + '.ln': dict(decay_mult=0.0), + '.bias': dict(decay_mult=0.0), + 'q_bias': dict(decay_mult=0.0), + 'v_bias': dict(decay_mult=0.0), + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.gamma': dict(decay_mult=0.0), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=10, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + eta_min=1e-5, + by_epoch=True, + begin=10, + end=300, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=300) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +find_unused_parameters = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=2048) diff --git a/configs/beit/benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py b/configs/beit/benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py new file mode 100644 index 0000000..dbab34f --- /dev/null +++ b/configs/beit/benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py @@ -0,0 +1,127 @@ +_base_ = [ + '../../_base_/datasets/imagenet_bs64_swin_224.py', + '../../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../../_base_/default_runtime.py' +] + +data_preprocessor = dict( + num_classes=1000, + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + to_rgb=True, +) + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='BEiTViT', + arch='base', + img_size=224, + patch_size=16, + drop_path_rate=0.1, + out_type='avg_featmap', + use_abs_pos_emb=False, + use_rel_pos_bias=True, + use_shared_rel_pos_bias=False, + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')), + neck=None, + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + init_cfg=[dict(type='TruncNormal', layer='Linear', std=0.02)]), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict(pad_val=[104, 116, 124], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=0.3333333333333333, + fill_color=[103.53, 116.28, 123.675], + fill_std=[57.375, 57.12, 58.395]), + dict(type='PackInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=256, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs') +] + +train_dataloader = dict(batch_size=128, dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(batch_size=128, dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# optimizer wrapper +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=4e-3, weight_decay=0.05, betas=(0.9, 0.999)), + constructor='LearningRateDecayOptimWrapperConstructor', + paramwise_cfg=dict( + _delete_=True, + layer_decay_rate=0.65, + custom_keys={ + # the following configurations are designed for BEiT + '.ln': dict(decay_mult=0.0), + '.bias': dict(decay_mult=0.0), + 'q_bias': dict(decay_mult=0.0), + 'v_bias': dict(decay_mult=0.0), + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.gamma': dict(decay_mult=0.0), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=20, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + by_epoch=True, + begin=20, + end=100, + eta_min=1e-6, + convert_to_iter_based=True) +] + +# runtime settings +default_hooks = dict( + # save checkpoint per epoch. + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=2)) + +train_cfg = dict(by_epoch=True, max_epochs=100) + +randomness = dict(seed=0) diff --git a/configs/beit/benchmarks/beit-base-p16_8xb64_in1k.py b/configs/beit/benchmarks/beit-base-p16_8xb64_in1k.py new file mode 100644 index 0000000..8380b69 --- /dev/null +++ b/configs/beit/benchmarks/beit-base-p16_8xb64_in1k.py @@ -0,0 +1,43 @@ +_base_ = [ + '../../_base_/datasets/imagenet_bs64_swin_224.py', + '../../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../../_base_/default_runtime.py' +] + +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + # convert image from BGR to RGB + to_rgb=True, +) + +model = dict( + type='ImageClassifier', + backbone=dict( + type='BEiTViT', + arch='base', + img_size=224, + patch_size=16, + out_type='avg_featmap', + use_abs_pos_emb=False, + use_rel_pos_bias=True, + use_shared_rel_pos_bias=False, + ), + neck=None, + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/beit/metafile.yml b/configs/beit/metafile.yml new file mode 100644 index 0000000..e4524fa --- /dev/null +++ b/configs/beit/metafile.yml @@ -0,0 +1,69 @@ +Collections: + - Name: BEiT + Metadata: + Architecture: + - Attention Dropout + - Convolution + - Dense Connections + - Dropout + - GELU + - Layer Normalization + - Multi-Head Attention + - Scaled Dot-Product Attention + - Tanh Activation + Paper: + Title: 'BEiT: BERT Pre-Training of Image Transformers' + URL: https://arxiv.org/abs/2106.08254 + README: configs/beit/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/main/mmpretrain/models/backbones/beit.py + Version: v1.0.0rc4 + +Models: + - Name: beit_beit-base-p16_8xb256-amp-coslr-300e_in1k + Metadata: + Epochs: 300 + Batch Size: 2048 + FLOPs: 17581219584 + Parameters: 86530984 + Training Data: ImageNet-1k + In Collection: BEiT + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/beit/beit_vit-base-p16_8xb256-amp-coslr-300e_in1k/beit_vit-base-p16_8xb256-amp-coslr-300e_in1k_20221128-ab79e626.pth + Config: configs/beit/beit_beit-base-p16_8xb256-amp-coslr-300e_in1k.py + Downstream: + - beit-base-p16_beit-pre_8xb128-coslr-100e_in1k + - Name: beit-base-p16_beit-pre_8xb128-coslr-100e_in1k + Metadata: + Epochs: 100 + Batch Size: 1024 + FLOPs: 17581219584 + Parameters: 86530984 + Training Data: ImageNet-1k + In Collection: BEiT + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.1 + Weights: https://download.openmmlab.com/mmselfsup/1.x/beit/beit_vit-base-p16_8xb256-amp-coslr-300e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k_20221128-0ca393e9.pth + Config: configs/beit/benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py + - Name: beit-base-p16_beit-in21k-pre_3rdparty_in1k + Metadata: + FLOPs: 17581219584 + Parameters: 86530984 + Training Data: + - ImageNet-21k + - ImageNet-1k + In Collection: BEiT + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 85.28 + Top 5 Accuracy: 97.59 + Weights: https://download.openmmlab.com/mmclassification/v0/beit/beit-base_3rdparty_in1k_20221114-c0a4df23.pth + Config: configs/beit/benchmarks/beit-base-p16_8xb64_in1k.py + Converted From: + Weights: https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_224_pt22k_ft22kto1k.pth + Code: https://github.com/microsoft/unilm/tree/master/beit diff --git a/configs/beitv2/README.md b/configs/beitv2/README.md new file mode 100644 index 0000000..5447e2d --- /dev/null +++ b/configs/beitv2/README.md @@ -0,0 +1,90 @@ +# BEiTv2 + +> [BEiT v2: Masked Image Modeling with Vector-Quantized Visual Tokenizers](https://arxiv.org/abs/2208.06366) + + + +## Abstract + +Masked image modeling (MIM) has demonstrated impressive results in self-supervised representation learning by recovering corrupted image patches. However, most existing studies operate on low-level image pixels, which hinders the exploitation of high-level semantics for representation models. In this work, we propose to use a semantic-rich visual tokenizer as the reconstruction target for masked prediction, providing a systematic way to promote MIM from pixel-level to semantic-level. Specifically, we propose vector-quantized knowledge distillation to train the tokenizer, which discretizes a continuous semantic space to compact codes. We then pretrain vision Transformers by predicting the original visual tokens for the masked image patches. Furthermore, we introduce a patch aggregation strategy which associates discrete image patches to enhance global semantic representation. Experiments on image classification and semantic segmentation show that BEiT v2 outperforms all compared MIM methods. On ImageNet-1K (224 size), the base-size BEiT v2 achieves 85.5% top-1 accuracy for fine-tuning and 80.1% top-1 accuracy for linear probing. The large-size BEiT v2 obtains 87.3% top-1 accuracy for ImageNet-1K (224 size) fine-tuning, and 56.7% mIoU on ADE20K for semantic segmentation. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('beit-base-p16_beitv2-pre_8xb128-coslr-100e_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('beitv2_beit-base-p16_8xb256-amp-coslr-300e_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/beitv2/beitv2_beit-base-p16_8xb256-amp-coslr-300e_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/beitv2/benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py https://download.openmmlab.com/mmselfsup/1.x/beitv2/beitv2_vit-base-p16_8xb256-amp-coslr-300e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k_20221212-d1c0789e.pth +``` + + + +## Models and results + +### Pretrained models + +| Model | Params (M) | Flops (G) | Config | Download | +| :------------------------------------------------ | :--------: | :-------: | :----------------------------------------------------------: | :----------------------------------------------------------------------: | +| `beitv2_beit-base-p16_8xb256-amp-coslr-300e_in1k` | 192.81 | 17.58 | [config](beitv2_beit-base-p16_8xb256-amp-coslr-300e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/beitv2/beitv2_vit-base-p16_8xb256-amp-coslr-300e_in1k/beitv2_vit-base-p16_8xb256-amp-coslr-300e_in1k_20221212-a157be30.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/beitv2/beitv2_vit-base-p16_8xb256-amp-coslr-300e_in1k/beitv2_vit-base-p16_8xb256-amp-coslr-300e_in1k_20221212-a157be30.json) | + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :-------------------------------------- | :----------------------------------------: | :--------: | :-------: | :-------: | :-------: | :--------------------------------------: | :----------------------------------------: | +| `beit-base-p16_beitv2-pre_8xb128-coslr-100e_in1k` | [BEITV2](https://download.openmmlab.com/mmselfsup/1.x/beitv2/beitv2_vit-base-p16_8xb256-amp-coslr-300e_in1k/beitv2_vit-base-p16_8xb256-amp-coslr-300e_in1k_20221212-a157be30.pth) | 86.53 | 17.58 | 85.00 | N/A | [config](benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/beitv2/beitv2_vit-base-p16_8xb256-amp-coslr-300e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k_20221212-d1c0789e.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/beitv2/beitv2_vit-base-p16_8xb256-amp-coslr-300e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k_20221212-d1c0789e.json) | +| `beit-base-p16_beitv2-in21k-pre_3rdparty_in1k`\* | BEITV2 ImageNet-21k | 86.53 | 17.58 | 86.47 | 97.99 | [config](benchmarks/beit-base-p16_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/beit/beitv2-base_3rdparty_in1k_20221114-73e11905.pth) | + +*Models with * are converted from the [official repo](https://github.com/microsoft/unilm/tree/master/beit2). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@article{beitv2, + title={{BEiT v2}: Masked Image Modeling with Vector-Quantized Visual Tokenizers}, + author={Zhiliang Peng and Li Dong and Hangbo Bao and Qixiang Ye and Furu Wei}, + year={2022}, + eprint={2208.06366}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` diff --git a/configs/beitv2/beitv2_beit-base-p16_8xb256-amp-coslr-1600e_in1k.py b/configs/beitv2/beitv2_beit-base-p16_8xb256-amp-coslr-1600e_in1k.py new file mode 100644 index 0000000..c4a2070 --- /dev/null +++ b/configs/beitv2/beitv2_beit-base-p16_8xb256-amp-coslr-1600e_in1k.py @@ -0,0 +1,119 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs256_beitv2.py', + '../_base_/default_runtime.py', +] + +# model settings +vqkd_encoder = dict( + arch='base', + img_size=224, + patch_size=16, + in_channels=3, + out_indices=-1, + drop_rate=0., + drop_path_rate=0., + norm_cfg=dict(type='LN', eps=1e-6), + final_norm=True, + out_type='featmap', + with_cls_token=True, + frozen_stages=-1, + use_abs_pos_emb=True, + use_rel_pos_bias=False, + use_shared_rel_pos_bias=False, + layer_scale_init_value=0., + interpolate_mode='bicubic', + patch_cfg=dict(), + layer_cfgs=dict(), + init_cfg=None) + +layer_scale_init_value = 0.1 +drop_path_rate = 0.1 # 0. for 300 epochs and 0.1 for 1600 epochs. +model = dict( + type='BEiT', + backbone=dict( + type='BEiTPretrainViT', + arch='base', + patch_size=16, + out_indices=[-4, -1], + drop_path_rate=drop_path_rate, + final_norm=False, + out_type='raw', + layer_scale_init_value=layer_scale_init_value, + init_cfg=[ + dict(type='TruncNormal', std=0.02, layer='Linear'), + dict(type='TruncNormal', std=0.02, layer='Conv2d'), + dict(type='Constant', layer='LayerNorm', val=1.0, bias=0.0) + ]), + neck=dict( + type='BEiTV2Neck', + num_layers=2, + early_layers=9, + backbone_arch='base', + drop_path_rate=drop_path_rate, + layer_scale_init_value=layer_scale_init_value, + ), + head=dict( + type='BEiTV2Head', + embed_dims=768, + num_embed=8192, + loss=dict(type='CrossEntropyLoss')), + target_generator=dict( + type='VQKD', + encoder_config=vqkd_encoder, + init_cfg=dict( + type='Pretrained', + checkpoint= # noqa + 'https://download.openmmlab.com/mmselfsup/1.x/target_generator_ckpt/vqkd_encoder.pth' # noqa + ))) + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + # betas: (0.9, 0.98) for 300 epochs and (0.9, 0.999) for 1600 epochs. + optimizer=dict( + type='AdamW', lr=1.5e-3, betas=(0.9, 0.999), weight_decay=0.05), + clip_grad=dict(max_norm=3.0), + paramwise_cfg=dict( + custom_keys={ + # the following configurations are designed for BEiT + '.ln': dict(decay_mult=0.0), + '.bias': dict(decay_mult=0.0), + 'q_bias': dict(decay_mult=0.0), + 'v_bias': dict(decay_mult=0.0), + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.gamma': dict(decay_mult=0.0), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=10, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + eta_min=1e-5, + by_epoch=True, + begin=10, + end=1600, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=1600) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +find_unused_parameters = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=2048) diff --git a/configs/beitv2/beitv2_beit-base-p16_8xb256-amp-coslr-300e_in1k.py b/configs/beitv2/beitv2_beit-base-p16_8xb256-amp-coslr-300e_in1k.py new file mode 100644 index 0000000..fddeccf --- /dev/null +++ b/configs/beitv2/beitv2_beit-base-p16_8xb256-amp-coslr-300e_in1k.py @@ -0,0 +1,119 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs256_beitv2.py', + '../_base_/default_runtime.py', +] + +# model settings +vqkd_encoder = dict( + arch='base', + img_size=224, + patch_size=16, + in_channels=3, + out_indices=-1, + drop_rate=0., + drop_path_rate=0., + norm_cfg=dict(type='LN', eps=1e-6), + final_norm=True, + out_type='featmap', + with_cls_token=True, + frozen_stages=-1, + use_abs_pos_emb=True, + use_rel_pos_bias=False, + use_shared_rel_pos_bias=False, + layer_scale_init_value=0., + interpolate_mode='bicubic', + patch_cfg=dict(), + layer_cfgs=dict(), + init_cfg=None) + +layer_scale_init_value = 0.1 +drop_path_rate = 0. # 0. for 300 epochs and 0.1 for 1600 epochs. +model = dict( + type='BEiT', + backbone=dict( + type='BEiTPretrainViT', + arch='base', + patch_size=16, + out_indices=[-4, -1], + drop_path_rate=drop_path_rate, + final_norm=False, + out_type='raw', + layer_scale_init_value=layer_scale_init_value, + init_cfg=[ + dict(type='TruncNormal', std=0.02, layer='Linear'), + dict(type='TruncNormal', std=0.02, layer='Conv2d'), + dict(type='Constant', layer='LayerNorm', val=1.0, bias=0.0) + ]), + neck=dict( + type='BEiTV2Neck', + num_layers=2, + early_layers=9, + backbone_arch='base', + drop_path_rate=drop_path_rate, + layer_scale_init_value=layer_scale_init_value, + ), + head=dict( + type='BEiTV2Head', + embed_dims=768, + num_embed=8192, + loss=dict(type='CrossEntropyLoss')), + target_generator=dict( + type='VQKD', + encoder_config=vqkd_encoder, + init_cfg=dict( + type='Pretrained', + checkpoint= # noqa + 'https://download.openmmlab.com/mmselfsup/1.x/target_generator_ckpt/vqkd_encoder.pth' # noqa + ))) + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + # betas: (0.9, 0.98) for 300 epochs and (0.9, 0.999) for 1600 epochs. + optimizer=dict( + type='AdamW', lr=1.5e-3, betas=(0.9, 0.98), weight_decay=0.05), + clip_grad=dict(max_norm=3.0), + paramwise_cfg=dict( + custom_keys={ + # the following configurations are designed for BEiT + '.ln': dict(decay_mult=0.0), + '.bias': dict(decay_mult=0.0), + 'q_bias': dict(decay_mult=0.0), + 'v_bias': dict(decay_mult=0.0), + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.gamma': dict(decay_mult=0.0), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=10, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + eta_min=1e-5, + by_epoch=True, + begin=10, + end=300, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=300) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +find_unused_parameters = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=2048) diff --git a/configs/beitv2/benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py b/configs/beitv2/benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py new file mode 100644 index 0000000..a2c55a7 --- /dev/null +++ b/configs/beitv2/benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py @@ -0,0 +1,122 @@ +_base_ = [ + '../../_base_/datasets/imagenet_bs64_swin_224.py', + '../../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../../_base_/default_runtime.py' +] + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='BEiTViT', + arch='base', + img_size=224, + patch_size=16, + # 0.2 for 1600 epochs pretrained models and 0.1 for 300 epochs. + drop_path_rate=0.1, + out_type='avg_featmap', + use_abs_pos_emb=False, + use_rel_pos_bias=True, + use_shared_rel_pos_bias=False, + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')), + neck=None, + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + init_cfg=[dict(type='TruncNormal', layer='Linear', std=0.02)]), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict(pad_val=[104, 116, 124], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=0.3333333333333333, + fill_color=[103.53, 116.28, 123.675], + fill_std=[57.375, 57.12, 58.395]), + dict(type='PackInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=256, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs') +] + +train_dataloader = dict(batch_size=128, dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(batch_size=128, dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# optimizer wrapper +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=5e-4, weight_decay=0.05, betas=(0.9, 0.999)), + constructor='LearningRateDecayOptimWrapperConstructor', + paramwise_cfg=dict( + _delete_=True, + # 0.6 for 1600 epochs pretrained models and 0.65 for 300 epochs + layer_decay_rate=0.65, + custom_keys={ + # the following configurations are designed for BEiT + '.ln': dict(decay_mult=0.0), + '.bias': dict(decay_mult=0.0), + 'q_bias': dict(decay_mult=0.0), + 'v_bias': dict(decay_mult=0.0), + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.gamma': dict(decay_mult=0.0), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=20, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + by_epoch=True, + begin=20, + end=100, + eta_min=1e-6, + convert_to_iter_based=True) +] + +# runtime settings +default_hooks = dict( + # save checkpoint per epoch. + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=2)) + +train_cfg = dict(by_epoch=True, max_epochs=100) + +randomness = dict(seed=0) diff --git a/configs/beitv2/benchmarks/beit-base-p16_8xb64_in1k.py b/configs/beitv2/benchmarks/beit-base-p16_8xb64_in1k.py new file mode 100644 index 0000000..17ed4ff --- /dev/null +++ b/configs/beitv2/benchmarks/beit-base-p16_8xb64_in1k.py @@ -0,0 +1,34 @@ +_base_ = [ + '../../_base_/datasets/imagenet_bs64_swin_224.py', + '../../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../../_base_/default_runtime.py' +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='BEiTViT', + arch='base', + img_size=224, + patch_size=16, + out_type='avg_featmap', + use_abs_pos_emb=False, + use_rel_pos_bias=True, + use_shared_rel_pos_bias=False, + ), + neck=None, + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/beitv2/metafile.yml b/configs/beitv2/metafile.yml new file mode 100644 index 0000000..74c3885 --- /dev/null +++ b/configs/beitv2/metafile.yml @@ -0,0 +1,69 @@ +Collections: + - Name: BEiTv2 + Metadata: + Architecture: + - Attention Dropout + - Convolution + - Dense Connections + - Dropout + - GELU + - Layer Normalization + - Multi-Head Attention + - Scaled Dot-Product Attention + - Tanh Activation + Paper: + Title: 'BEiT v2: Masked Image Modeling with Vector-Quantized Visual Tokenizers' + URL: https://arxiv.org/abs/2208.06366 + README: configs/beitv2/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/main/mmpretrain/models/backbones/beit.py + Version: v1.0.0rc4 + +Models: + - Name: beitv2_beit-base-p16_8xb256-amp-coslr-300e_in1k + Metadata: + Epochs: 300 + Batch Size: 2048 + FLOPs: 17581223424 + Parameters: 192811376 + Training Data: ImageNet-1k + In Collection: BEiTv2 + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/beitv2/beitv2_vit-base-p16_8xb256-amp-coslr-300e_in1k/beitv2_vit-base-p16_8xb256-amp-coslr-300e_in1k_20221212-a157be30.pth + Config: configs/beitv2/beitv2_beit-base-p16_8xb256-amp-coslr-300e_in1k.py + Downstream: + - beit-base-p16_beitv2-pre_8xb128-coslr-100e_in1k + - Name: beit-base-p16_beitv2-pre_8xb128-coslr-100e_in1k + Metadata: + Epochs: 100 + Batch Size: 1024 + FLOPs: 17581219584 + Parameters: 86530984 + Training Data: ImageNet-1k + In Collection: BEiTv2 + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.0 + Weights: https://download.openmmlab.com/mmselfsup/1.x/beitv2/beitv2_vit-base-p16_8xb256-amp-coslr-300e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k_20221212-d1c0789e.pth + Config: configs/beitv2/benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py + - Name: beit-base-p16_beitv2-in21k-pre_3rdparty_in1k + Metadata: + FLOPs: 17581219584 + Parameters: 86530984 + Training Data: + - ImageNet-21k + - ImageNet-1k + In Collection: BEiTv2 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 86.47 + Top 5 Accuracy: 97.99 + Weights: https://download.openmmlab.com/mmclassification/v0/beit/beitv2-base_3rdparty_in1k_20221114-73e11905.pth + Config: configs/beitv2/benchmarks/beit-base-p16_8xb64_in1k.py + Converted From: + Weights: https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_base_patch16_224_pt1k_ft21kto1k.pth + Code: https://github.com/microsoft/unilm/tree/master/beit2 diff --git a/configs/blip/README.md b/configs/blip/README.md new file mode 100644 index 0000000..1a8dce3 --- /dev/null +++ b/configs/blip/README.md @@ -0,0 +1,128 @@ +# BLIP + +> [BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation](https://arxiv.org/abs/2201.12086) + + + +## Abstract + +Vision-Language Pre-training (VLP) has advanced the performance for many vision-language tasks. However, most existing pre-trained models only excel in either understanding-based tasks or generation-based tasks. Furthermore, performance improvement has been largely achieved by scaling up the dataset with noisy image-text pairs collected from the web, which is a suboptimal source of supervision. In this paper, we propose BLIP, a new VLP framework which transfers flexibly to both vision-language understanding and generation tasks. BLIP effectively utilizes the noisy web data by bootstrapping the captions, where a captioner generates synthetic captions and a filter removes the noisy ones. We achieve state-of-the-art results on a wide range of vision-language tasks, such as image-text retrieval (+2.7% in average recall@1), image captioning (+2.8% in CIDEr), and VQA (+1.6% in VQA score). BLIP also demonstrates strong generalization ability when directly transferred to video-language tasks in a zero-shot manner. + +
+ +
+ +## How to use it? + + + +**Use the model** + +```python +from mmpretrain import inference_model + +result = inference_model('blip-base_3rdparty_caption', 'demo/cat-dog.png') +print(result) +# {'pred_caption': 'a puppy and a cat sitting on a blanket'} +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/blip/blip-base_8xb32_caption.py https://download.openmmlab.com/mmclassification/v1/blip/blip-base_3rdparty_coco-caption_20230419-a5b71af3.pth +``` + + + +## Models and results + +### Image Caption on COCO + +| Model | Params (M) | BLEU-4 | CIDER | Config | Download | +| :----------------------------- | :--------: | :----: | :----: | :------------------------------------: | :------------------------------------------------------------------------------------------------------------: | +| `blip-base_3rdparty_caption`\* | 223.97 | 40.12 | 132.82 | [config](./blip-base_8xb32_caption.py) | [model](https://download.openmmlab.com/mmclassification/v1/blip/blip-base_3rdparty_coco-caption_20230419-a5b71af3.pth) | + +### Image Caption on NoCaps + +| Model | Params (M) | SPICE | CIDER | Config | Download | +| :----------------------------- | :--------: | :---: | :----: | :-----------------------------------: | :--------------------------------------------------------------------------------------------------------------: | +| `blip-base_3rdparty_caption`\* | 223.97 | 14.69 | 109.12 | [config](./blip-base_8xb32_nocaps.py) | [model](https://download.openmmlab.com/mmclassification/v1/blip/blip-base_3rdparty_coco-caption_20230419-a5b71af3.pth) | + +### Image Caption on Flickr30k + +| Model | Params (M) | SPICE | CIDER | Config | Download | +| :----------------------------- | :--------: | :---: | :---: | :----------------------------------------------: | :----------------------------------------------------------------------------------------------------: | +| `blip-base_3rdparty_caption`\* | 223.97 | 15.58 | 68.89 | [config](./blip-base_8xb32_caption_flickr30k.py) | [model](https://download.openmmlab.com/mmclassification/v1/blip/blip-base_3rdparty_coco-caption_20230419-a5b71af3.pth) | + +### Visual Grounding on RefCOCO + +| Model | Params (M) | Accuracy (testA) | Accuracy (testB) | Config | Download | +| :------------------------ | :--------: | :--------------: | :--------------: | :----------------------------------: | :-----------------------------------------------------------------------------------------------: | +| `blip-base_8xb16_refcoco` | 498.49 | 86.14 | 77.33 | [config](blip-base_8xb16_refcoco.py) | [model](https://download.openmmlab.com/mmclassification/v1/blip/blip-base_8xb16_refcoco_20230508-d2d10f4c.pth) \| [log](https://download.openmmlab.com/mmclassification/v1/blip/blip-base_8xb16_refcoco_20230508-d2d10f4c.json) | + +### Visual Question Answering on VQAv2 + +| Model | Params (M) | Accuracy | Config | Download | +| :------------------------- | :--------: | :------: | :--------------------------------: | :-------------------------------------------------------------------------------------------------------------------: | +| `blip-base_3rdparty_vqa`\* | 361.48 | 78.20 | [config](./blip-base_8xb32_vqa.py) | [model](https://download.openmmlab.com/mmclassification/v1/blip/blip-base_3rdparty-capflit_vqa_20230505-81488941.pth) | + +### Visual Question Answering on OK-VQA + +| Model | Params (M) | Accuracy | Config | Download | +| :------------------------- | :--------: | :------: | :----------------------------------: | :-------------------------------------------------------------------------------------------------------------------: | +| `blip-base_3rdparty_vqa`\* | 361.48 | 40.59# | [config](./blip-base_8xb32_okvqa.py) | [model](https://download.openmmlab.com/mmclassification/v1/blip/blip-base_3rdparty-capflit_vqa_20230505-81488941.pth) | + +### Visual Question Answering on OCR-VQA + +| Model | Params (M) | Accuracy | Config | Download | +| :------------------------- | :--------: | :------: | :-----------------------------------: | :-------------------------------------------------------------------------------------------------------------------: | +| `blip-base_3rdparty_vqa`\* | 361.48 | 28.30# | [config](./blip-base_8xb32_ocrvqa.py) | [model](https://download.openmmlab.com/mmclassification/v1/blip/blip-base_3rdparty-capflit_vqa_20230505-81488941.pth) | + +### Image-To-Text Retrieval on COCO + +| Model | Params (M) | Recall@1 | Recall@5 | Config | Download | +| :------------------------------- | :--------: | :------: | :------: | :--------------------------------------: | :----------------------------------------------------------------------------------------------------: | +| `blip-base_3rdparty_retrieval`\* | 447.49 | 82.52 | 95.34 | [config](./blip-base_8xb32_retrieval.py) | [model](https://download.openmmlab.com/mmclassification/v1/blip/blip-base_3rdparty_coco-retrieval_20230419-a1804d2c.pth) | + +### Text-To-Image Retrieval on COCO + +| Model | Params (M) | Recall@1 | Recall@5 | Config | Download | +| :------------------------------- | :--------: | :------: | :------: | :--------------------------------------: | :----------------------------------------------------------------------------------------------------: | +| `blip-base_3rdparty_retrieval`\* | 447.49 | 64.82 | 86.28 | [config](./blip-base_8xb32_retrieval.py) | [model](https://download.openmmlab.com/mmclassification/v1/blip/blip-base_3rdparty_coco-retrieval_20230419-a1804d2c.pth) | + +### Image-To-Text Retrieval on Flickr30k + +| Model | Params (M) | Recall@1 | Recall@5 | Config | Download | +| :------------------------------- | :--------: | :------: | :------: | :------------------------------------------------: | :------------------------------------------------------------------------------------------: | +| `blip-base_3rdparty_retrieval`\* | 447.49 | 95.10# | 99.60# | [config](./blip-base_8xb32_retrieval_flickr30k.py) | [model](https://download.openmmlab.com/mmclassification/v1/blip/blip-base_3rdparty_coco-retrieval_20230419-a1804d2c.pth) | + +### Text-To-Image Retrieval on Flickr30k + +| Model | Params (M) | Recall@1 | Recall@5 | Config | Download | +| :------------------------------- | :--------: | :------: | :------: | :------------------------------------------------: | :------------------------------------------------------------------------------------------: | +| `blip-base_3rdparty_retrieval`\* | 447.49 | 85.26# | 96.58# | [config](./blip-base_8xb32_retrieval_flickr30k.py) | [model](https://download.openmmlab.com/mmclassification/v1/blip/blip-base_3rdparty_coco-retrieval_20230419-a1804d2c.pth) | + +### NLVR on NLVR2 + +| Model | Params (M) | Top-1 (%) | Config | Download | +| :-------------------------- | :--------: | :-------: | :---------------------------------: | :------------------------------------------------------------------------------------------------------------: | +| `blip-base_3rdparty_nlvr`\* | 259.37 | 82.33 | [config](./blip-base_8xb32_nlvr.py) | [model](https://download.openmmlab.com/mmclassification/v1/blip/blip-base_3rdparty_nlvr_20230427-3b14d33f.pth) | + +*Models with * are converted from the [official repo](https://github.com/salesforce/LAVIS). The config files of these models are only for inference. We haven't reproduce the training results.* + +*Results with # denote zero-shot evaluation. The corresponding model hasn't been finetuned on that dataset.* + +## Citation + +```bibtex +@inproceedings{li2022blip, + title={BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation}, + author={Junnan Li and Dongxu Li and Caiming Xiong and Steven Hoi}, + year={2022}, + booktitle={ICML}, +} +``` diff --git a/configs/blip/blip-base_8xb16_refcoco.py b/configs/blip/blip-base_8xb16_refcoco.py new file mode 100644 index 0000000..b498614 --- /dev/null +++ b/configs/blip/blip-base_8xb16_refcoco.py @@ -0,0 +1,62 @@ +_base_ = [ + '../_base_/datasets/refcoco.py', + '../_base_/default_runtime.py', +] + +med_config = { + 'architectures': ['BertModel'], + 'attention_probs_dropout_prob': 0.1, + 'hidden_act': 'gelu', + 'hidden_dropout_prob': 0.1, + 'hidden_size': 768, + 'initializer_range': 0.02, + 'intermediate_size': 3072, + 'layer_norm_eps': 1e-12, + 'max_position_embeddings': 512, + 'model_type': 'bert', + 'num_attention_heads': 12, + 'num_hidden_layers': 12, + 'pad_token_id': 0, + 'add_type_embeddings': False, + 'vocab_size': 30524, + 'encoder_width': 768, + 'add_cross_attention': True +} + +model = dict( + type='BlipGrounding', + visual_encoder=dict( + type='VisionTransformer', + arch='b', + img_size=384, + patch_size=16, + out_type='raw', + ), + text_encoder=dict( + type='XBertEncoder', + med_config=med_config, + ), + multimodal_encoder=dict( + type='XBertEncoder', + med_config=med_config, + ), + tokenizer=dict(type='BlipTokenizer', name_or_path='bert-base-uncased'), + head=dict( + type='GroundingHead', + decoder=dict( + type='XBertLMHeadDecoder', + med_config=med_config, + ), + box_l1_loss_coeff=4.0, + box_giou_loss_coeff=2.0, + ), +) + +# schedule settings +optimizer = dict(type='AdamW', lr=1.5e-5, weight_decay=0.02) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) +param_scheduler = [dict(type='CosineAnnealingLR', by_epoch=True)] + +train_cfg = dict(by_epoch=True, max_epochs=120) +val_cfg = dict() +test_cfg = dict() diff --git a/configs/blip/blip-base_8xb32_caption.py b/configs/blip/blip-base_8xb32_caption.py new file mode 100644 index 0000000..1e24e9e --- /dev/null +++ b/configs/blip/blip-base_8xb32_caption.py @@ -0,0 +1,59 @@ +_base_ = [ + '../_base_/datasets/coco_caption.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='BlipCaption', + vision_encoder=dict( + type='VisionTransformer', + arch='b', + img_size=384, + patch_size=16, + out_type='raw', + ), + tokenizer=dict(type='BlipTokenizer', name_or_path='bert-base-uncased'), + decoder_head=dict( + type='SeqGenerationHead', + decoder=dict( + type='XBertLMHeadDecoder', + med_config=dict( + architectures=['BertModel'], + attention_probs_dropout_prob=0.1, + hidden_act='gelu', + hidden_dropout_prob=0.1, + hidden_size=768, + initializer_range=0.02, + intermediate_size=3072, + layer_norm_eps=1e-12, + max_position_embeddings=512, + model_type='bert', + num_attention_heads=12, + num_hidden_layers=12, + pad_token_id=0, + add_type_embeddings=False, + vocab_size=30524, + encoder_width=768, + add_cross_attention=True), + ), + ), + prompt='a picture of ', + max_txt_len=20, +) + +# schedule settings +optim_wrapper = dict(optimizer=dict(type='AdamW', lr=1e-5, weight_decay=0.05)) + +param_scheduler = [ + dict( + type='CosineAnnealingLR', + by_epoch=True, + begin=0, + end=10, + ) +] + +train_cfg = dict(max_epochs=10) +val_cfg = dict() +test_cfg = dict() diff --git a/configs/blip/blip-base_8xb32_caption_flickr30k.py b/configs/blip/blip-base_8xb32_caption_flickr30k.py new file mode 100644 index 0000000..9fe6ec5 --- /dev/null +++ b/configs/blip/blip-base_8xb32_caption_flickr30k.py @@ -0,0 +1,59 @@ +_base_ = [ + '../_base_/datasets/flickr30k_caption.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='BlipCaption', + vision_encoder=dict( + type='VisionTransformer', + arch='b', + img_size=384, + patch_size=16, + out_type='raw', + ), + tokenizer=dict(type='BlipTokenizer', name_or_path='bert-base-uncased'), + decoder_head=dict( + type='SeqGenerationHead', + decoder=dict( + type='XBertLMHeadDecoder', + med_config=dict( + architectures=['BertModel'], + attention_probs_dropout_prob=0.1, + hidden_act='gelu', + hidden_dropout_prob=0.1, + hidden_size=768, + initializer_range=0.02, + intermediate_size=3072, + layer_norm_eps=1e-12, + max_position_embeddings=512, + model_type='bert', + num_attention_heads=12, + num_hidden_layers=12, + pad_token_id=0, + add_type_embeddings=False, + vocab_size=30524, + encoder_width=768, + add_cross_attention=True), + ), + ), + prompt='a picture of ', + max_txt_len=20, +) + +# schedule settings +optim_wrapper = dict(optimizer=dict(type='AdamW', lr=1e-5, weight_decay=0.05)) + +param_scheduler = [ + dict( + type='CosineAnnealingLR', + by_epoch=True, + begin=0, + end=10, + ) +] + +train_cfg = dict(max_epochs=10) +val_cfg = dict() +test_cfg = dict() diff --git a/configs/blip/blip-base_8xb32_nlvr.py b/configs/blip/blip-base_8xb32_nlvr.py new file mode 100644 index 0000000..0a6cfe1 --- /dev/null +++ b/configs/blip/blip-base_8xb32_nlvr.py @@ -0,0 +1,59 @@ +_base_ = [ + '../_base_/datasets/nlvr2.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='BlipNLVR', + vision_backbone=dict( + type='VisionTransformer', + arch='b', + img_size=384, + patch_size=16, + out_type='raw', + ), + tokenizer=dict(type='BlipTokenizer', name_or_path='bert-base-uncased'), + multimodal_backbone=dict( + type='BertModel', + config=dict( + architectures=['BertModel'], + attention_probs_dropout_prob=0.1, + hidden_act='gelu', + hidden_dropout_prob=0.1, + hidden_size=768, + initializer_range=0.02, + intermediate_size=3072, + layer_norm_eps=1e-12, + max_position_embeddings=512, + model_type='bert', + num_attention_heads=12, + num_hidden_layers=12, + pad_token_id=0, + add_type_embeddings=False, + vocab_size=30524, + encoder_width=768, + add_cross_attention=True, + nlvr=True), + add_pooling_layer=False), +) + +# optimizer +optimizer = dict(type='AdamW', lr=2e-5, weight_decay=0.05) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) + +param_scheduler = [ + dict( + type='CosineAnnealingLR', + by_epoch=True, + begin=0, + end=10, + ) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=10) +val_cfg = dict() +test_cfg = dict() + +default_hooks = dict(logger=dict(interval=1)) diff --git a/configs/blip/blip-base_8xb32_nocaps.py b/configs/blip/blip-base_8xb32_nocaps.py new file mode 100644 index 0000000..c47c56a --- /dev/null +++ b/configs/blip/blip-base_8xb32_nocaps.py @@ -0,0 +1,46 @@ +_base_ = [ + '../_base_/datasets/nocaps.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='BlipCaption', + vision_encoder=dict( + type='VisionTransformer', + arch='b', + img_size=384, + patch_size=16, + out_type='raw', + ), + tokenizer=dict(type='BlipTokenizer', name_or_path='bert-base-uncased'), + decoder_head=dict( + type='SeqGenerationHead', + decoder=dict( + type='XBertLMHeadDecoder', + med_config=dict( + architectures=['BertModel'], + attention_probs_dropout_prob=0.1, + hidden_act='gelu', + hidden_dropout_prob=0.1, + hidden_size=768, + initializer_range=0.02, + intermediate_size=3072, + layer_norm_eps=1e-12, + max_position_embeddings=512, + model_type='bert', + num_attention_heads=12, + num_hidden_layers=12, + pad_token_id=0, + add_type_embeddings=False, + vocab_size=30524, + encoder_width=768, + add_cross_attention=True), + ), + ), + prompt='a picture of ', + max_txt_len=20, +) + +val_cfg = dict() +test_cfg = dict() diff --git a/configs/blip/blip-base_8xb32_ocrvqa.py b/configs/blip/blip-base_8xb32_ocrvqa.py new file mode 100644 index 0000000..117d597 --- /dev/null +++ b/configs/blip/blip-base_8xb32_ocrvqa.py @@ -0,0 +1,75 @@ +_base_ = [ + '../_base_/datasets/ocrvqa.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='BlipVQA', + tokenizer=dict(type='BlipTokenizer', name_or_path='bert-base-uncased'), + vision_backbone=dict( + type='VisionTransformer', + arch='b', + img_size=480, + patch_size=16, + out_type='raw'), + multimodal_backbone=dict( + type='XBertEncoder', + med_config=dict( + architectures=['BertModel'], + attention_probs_dropout_prob=0.1, + hidden_act='gelu', + hidden_dropout_prob=0.1, + hidden_size=768, + initializer_range=0.02, + intermediate_size=3072, + layer_norm_eps=1e-12, + max_position_embeddings=512, + model_type='bert', + num_attention_heads=12, + num_hidden_layers=12, + pad_token_id=0, + add_type_embeddings=False, + vocab_size=30524, + encoder_width=768, + add_cross_attention=True), + ), + head=dict( + type='VQAGenerationHead', + decoder=dict( + type='XBertLMHeadDecoder', + med_config=dict( + architectures=['BertModel'], + attention_probs_dropout_prob=0.1, + hidden_act='gelu', + hidden_dropout_prob=0.1, + hidden_size=768, + initializer_range=0.02, + intermediate_size=3072, + layer_norm_eps=1e-12, + max_position_embeddings=512, + model_type='bert', + num_attention_heads=12, + num_hidden_layers=12, + pad_token_id=0, + add_type_embeddings=False, + vocab_size=30524, + encoder_width=768, + add_cross_attention=True), + ), + inference_method='generate', + ), +) + +# schedule settings +optimizer = dict(type='AdamW', lr=2e-5, weight_decay=0.05) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) + +param_scheduler = [dict(type='CosineAnnealingLR', by_epoch=True)] + +train_cfg = dict(max_epochs=10, by_epoch=True) +val_cfg = dict() +test_cfg = dict() + +# runtime settings +randomness = dict(seed=42) diff --git a/configs/blip/blip-base_8xb32_okvqa.py b/configs/blip/blip-base_8xb32_okvqa.py new file mode 100644 index 0000000..548775c --- /dev/null +++ b/configs/blip/blip-base_8xb32_okvqa.py @@ -0,0 +1,75 @@ +_base_ = [ + '../_base_/datasets/coco_okvqa.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='BlipVQA', + tokenizer=dict(type='BlipTokenizer', name_or_path='bert-base-uncased'), + vision_backbone=dict( + type='VisionTransformer', + arch='b', + img_size=480, + patch_size=16, + out_type='raw'), + multimodal_backbone=dict( + type='XBertEncoder', + med_config=dict( + architectures=['BertModel'], + attention_probs_dropout_prob=0.1, + hidden_act='gelu', + hidden_dropout_prob=0.1, + hidden_size=768, + initializer_range=0.02, + intermediate_size=3072, + layer_norm_eps=1e-12, + max_position_embeddings=512, + model_type='bert', + num_attention_heads=12, + num_hidden_layers=12, + pad_token_id=0, + add_type_embeddings=False, + vocab_size=30524, + encoder_width=768, + add_cross_attention=True), + ), + head=dict( + type='VQAGenerationHead', + decoder=dict( + type='XBertLMHeadDecoder', + med_config=dict( + architectures=['BertModel'], + attention_probs_dropout_prob=0.1, + hidden_act='gelu', + hidden_dropout_prob=0.1, + hidden_size=768, + initializer_range=0.02, + intermediate_size=3072, + layer_norm_eps=1e-12, + max_position_embeddings=512, + model_type='bert', + num_attention_heads=12, + num_hidden_layers=12, + pad_token_id=0, + add_type_embeddings=False, + vocab_size=30524, + encoder_width=768, + add_cross_attention=True), + ), + inference_method='generate', + ), +) + +# schedule settings +optimizer = dict(type='AdamW', lr=2e-5, weight_decay=0.05) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) + +param_scheduler = [dict(type='CosineAnnealingLR', by_epoch=True)] + +train_cfg = dict(max_epochs=10, by_epoch=True) +val_cfg = dict() +test_cfg = dict() + +# runtime settings +randomness = dict(seed=42) diff --git a/configs/blip/blip-base_8xb32_retrieval.py b/configs/blip/blip-base_8xb32_retrieval.py new file mode 100644 index 0000000..645f88f --- /dev/null +++ b/configs/blip/blip-base_8xb32_retrieval.py @@ -0,0 +1,83 @@ +_base_ = [ + '../_base_/datasets/coco_retrieval.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='BlipRetrieval', + tokenizer=dict(type='BlipTokenizer', name_or_path='bert-base-uncased'), + vision_backbone=dict( + type='VisionTransformer', + arch='b', + img_size=384, + patch_size=16, + out_type='raw', + ), + text_backbone=dict( + type='XBertEncoder', + med_config=dict( + architectures=['BertModel'], + attention_probs_dropout_prob=0.1, + hidden_act='gelu', + hidden_dropout_prob=0.1, + hidden_size=768, + initializer_range=0.02, + intermediate_size=3072, + layer_norm_eps=1e-12, + max_position_embeddings=512, + model_type='bert', + num_attention_heads=12, + num_hidden_layers=12, + pad_token_id=0, + add_type_embeddings=False, + vocab_size=30524, + encoder_width=768, + add_cross_attention=True), + ), + vision_neck=dict( + type='Linear', + in_features=768, + out_features=256, + ), + text_neck=dict( + type='Linear', + in_features=768, + out_features=256, + ), + head=dict( + type='ITCHead', + embed_dim=256, + ), + multimodal_head=dict( + type='ITMHead', + hidden_size=768, + with_pooler=False, + ), + topk=256, + max_txt_len=35, +) + +# optimizer +optimizer = dict(type='AdamW', lr=2e-5, weight_decay=0.04) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) + +# learning rate scheduler +param_scheduler = [dict(type='CosineAnnealingLR', by_epoch=True)] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=6) +val_cfg = dict(type='RetrievalValLoop') +test_cfg = dict(type='RetrievalTestLoop') + +randomness = dict(seed=42) + +default_hooks = dict(logger=dict(interval=1)) + +custom_hooks = [ + dict( + type='WarmupParamHook', + param_name='alpha', + module_name='head', + warmup_epochs=2) +] diff --git a/configs/blip/blip-base_8xb32_retrieval_flickr30k.py b/configs/blip/blip-base_8xb32_retrieval_flickr30k.py new file mode 100644 index 0000000..0d2e78e --- /dev/null +++ b/configs/blip/blip-base_8xb32_retrieval_flickr30k.py @@ -0,0 +1,83 @@ +_base_ = [ + '../_base_/datasets/flickr30k_retrieval.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='BlipRetrieval', + tokenizer=dict(type='BlipTokenizer', name_or_path='bert-base-uncased'), + vision_backbone=dict( + type='VisionTransformer', + arch='b', + img_size=384, + patch_size=16, + out_type='raw', + ), + text_backbone=dict( + type='XBertEncoder', + med_config=dict( + architectures=['BertModel'], + attention_probs_dropout_prob=0.1, + hidden_act='gelu', + hidden_dropout_prob=0.1, + hidden_size=768, + initializer_range=0.02, + intermediate_size=3072, + layer_norm_eps=1e-12, + max_position_embeddings=512, + model_type='bert', + num_attention_heads=12, + num_hidden_layers=12, + pad_token_id=0, + add_type_embeddings=False, + vocab_size=30524, + encoder_width=768, + add_cross_attention=True), + ), + vision_neck=dict( + type='Linear', + in_features=768, + out_features=256, + ), + text_neck=dict( + type='Linear', + in_features=768, + out_features=256, + ), + head=dict( + type='ITCHead', + embed_dim=256, + ), + multimodal_head=dict( + type='ITMHead', + hidden_size=768, + with_pooler=False, + ), + topk=256, + max_txt_len=35, +) + +# optimizer +optimizer = dict(type='AdamW', lr=2e-5, weight_decay=0.04) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) + +# learning rate scheduler +param_scheduler = [dict(type='CosineAnnealingLR', by_epoch=True)] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=6) +val_cfg = dict(type='RetrievalValLoop') +test_cfg = dict(type='RetrievalTestLoop') + +randomness = dict(seed=42) + +default_hooks = dict(logger=dict(interval=1)) + +custom_hooks = [ + dict( + type='WarmupParamHook', + param_name='alpha', + module_name='head', + warmup_epochs=2) +] diff --git a/configs/blip/blip-base_8xb32_vqa.py b/configs/blip/blip-base_8xb32_vqa.py new file mode 100644 index 0000000..2aa3f25 --- /dev/null +++ b/configs/blip/blip-base_8xb32_vqa.py @@ -0,0 +1,76 @@ +_base_ = [ + '../_base_/datasets/coco_vg_vqa.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='BlipVQA', + tokenizer=dict(type='BlipTokenizer', name_or_path='bert-base-uncased'), + vision_backbone=dict( + type='VisionTransformer', + arch='b', + img_size=480, + patch_size=16, + out_type='raw'), + multimodal_backbone=dict( + type='XBertEncoder', + med_config=dict( + architectures=['BertModel'], + attention_probs_dropout_prob=0.1, + hidden_act='gelu', + hidden_dropout_prob=0.1, + hidden_size=768, + initializer_range=0.02, + intermediate_size=3072, + layer_norm_eps=1e-12, + max_position_embeddings=512, + model_type='bert', + num_attention_heads=12, + num_hidden_layers=12, + pad_token_id=0, + add_type_embeddings=False, + vocab_size=30524, + encoder_width=768, + add_cross_attention=True), + ), + head=dict( + type='VQAGenerationHead', + decoder=dict( + type='XBertLMHeadDecoder', + med_config=dict( + architectures=['BertModel'], + attention_probs_dropout_prob=0.1, + hidden_act='gelu', + hidden_dropout_prob=0.1, + hidden_size=768, + initializer_range=0.02, + intermediate_size=3072, + layer_norm_eps=1e-12, + max_position_embeddings=512, + model_type='bert', + num_attention_heads=12, + num_hidden_layers=12, + pad_token_id=0, + add_type_embeddings=False, + vocab_size=30524, + encoder_width=768, + add_cross_attention=True), + ), + inference_method='rank', # or 'generate' + answer_list_path= + 'https://storage.googleapis.com/sfr-vision-language-research/datasets/answer_list.json', # noqa: E501 + ), +) + +# schedule settings +optimizer = dict(type='AdamW', lr=2e-5, weight_decay=0.05) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) + +param_scheduler = [dict(type='CosineAnnealingLR', by_epoch=True)] + +train_cfg = dict(max_epochs=10, by_epoch=True) +test_cfg = dict() + +# runtime settings +randomness = dict(seed=42) diff --git a/configs/blip/metafile.yml b/configs/blip/metafile.yml new file mode 100644 index 0000000..8877e81 --- /dev/null +++ b/configs/blip/metafile.yml @@ -0,0 +1,99 @@ +Collections: + - Name: BLIP + Metadata: + Training Data: + - COCO + - VG + - Conceptual Captions + - Conceptual 12M + - SBU captions + Architecture: + - Transformer + Training Resources: 8x A100 GPUs + Paper: + Title: 'BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language + Understanding and Generation' + URL: https://arxiv.org/abs/2201.12086 + README: configs/blip/README.md + +Models: + - Name: blip-base_8xb16_refcoco + Metadata: + FLOPs: null + Parameters: 498488636 + In Collection: BLIP + Results: + - Task: Visual Grounding + Dataset: RefCOCO + Metrics: + Accuracy (testA): 86.14 + Accuracy (testB): 77.33 + Weights: https://download.openmmlab.com/mmclassification/v1/blip/blip-base_8xb16_refcoco_20230508-d2d10f4c.pth + Config: configs/blip/blip-base_8xb16_refcoco.py + - Name: blip-base_3rdparty_caption + Metadata: + FLOPs: null + Parameters: 223971644 + In Collection: BLIP + Results: + - Dataset: COCO + Task: Image Caption + Metrics: + BLEU-4: 40.12 + CIDER: 132.82 + Weights: https://download.openmmlab.com/mmclassification/v1/blip/blip-base_3rdparty_coco-caption_20230419-a5b71af3.pth + Config: configs/blip/blip-base_8xb32_caption.py + Converted From: + Weights: https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP/blip_coco_caption_base.pth + Code: https://github.com/salesforce/LAVIS + - Name: blip-base_3rdparty_nlvr + Metadata: + FLOPs: null + Parameters: 259372034 + In Collection: BLIP + Results: + - Task: NLVR + Dataset: NLVR2 + Metrics: + Top 1 Accuracy: 82.33 + Weights: https://download.openmmlab.com/mmclassification/v1/blip/blip-base_3rdparty_nlvr_20230427-3b14d33f.pth + Config: configs/blip/blip-base_8xb32_nlvr.py + Converted From: + Weights: https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_nlvr.pth + Code: https://github.com/salesforce/LAVIS + - Name: blip-base_3rdparty_vqa + Metadata: + FLOPs: null + Parameters: 361478972 + In Collection: BLIP + Results: + - Task: Visual Question Answering + Dataset: VQAv2 + Metrics: + Accuracy: 78.2 + Weights: https://download.openmmlab.com/mmclassification/v1/blip/blip-base_3rdparty-capflit_vqa_20230505-81488941.pth + Config: configs/blip/blip-base_8xb32_vqa.py + Converted From: + Weights: https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth + Code: https://github.com/salesforce/LAVIS + - Name: blip-base_3rdparty_retrieval + Metadata: + FLOPs: null + Parameters: 447486979 + In Collection: BLIP + Results: + - Task: Image-To-Text Retrieval + Dataset: COCO + Metrics: + Recall@1: 82.52 + Recall@5: 95.34 + - Task: Text-To-Image Retrieval + Dataset: COCO + Metrics: + Recall@1: 64.82 + Recall@5: 86.28 + Weights: https://download.openmmlab.com/mmclassification/v1/blip/blip-base_3rdparty_coco-retrieval_20230419-a1804d2c.pth + Config: configs/blip/blip-base_8xb32_retrieval.py + Converted From: + Weights: https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP/blip_coco_retrieval.pth + Code: https://github.com/salesforce/LAVIS diff --git a/configs/blip2/README.md b/configs/blip2/README.md new file mode 100644 index 0000000..68ce679 --- /dev/null +++ b/configs/blip2/README.md @@ -0,0 +1,74 @@ +# BLIP-2 + +> [BLIP-2: Bootstrapping Language-Image Pre-training with Frozen Image Encoders and Large Language Models](http://arxiv.org/abs/2301.12597) + + + +## Abstract + +The cost of vision-and-language pre-training has become increasingly prohibitive due to end-toend training of large-scale models. This paper proposes BLIP-2, a generic and efficient pretraining strategy that bootstraps vision-language pre-training from off-the-shelf frozen pre-trained image encoders and frozen large language models. BLIP-2 bridges the modality gap with a lightweight Querying Transformer, which is pretrained in two stages. The first stage bootstraps vision-language representation learning from a frozen image encoder. The second stage bootstraps vision-to-language generative learning from a frozen language model. BLIP-2 achieves state-of-the-art performance on various visionlanguage tasks, despite having significantly fewer trainable parameters than existing methods. For example, our model outperforms Flamingo80B by 8.7% on zero-shot VQAv2 with 54x fewer trainable parameters. We also demonstrate the model’s emerging capabilities of zero-shot image-to-text generation that can follow natural language instructions. + +
+ +
+ +## How to use it? + + + +**Use the model** + +```python +from mmpretrain import inference_model + +result = inference_model('blip2-opt2.7b_3rdparty-zeroshot_caption', 'demo/cat-dog.png') +print(result) +# {'pred_caption': 'a dog and a cat sitting on a blanket'} +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/blip2/blip2_8xb32_retrieval.py https://download.openmmlab.com/mmclassification/v1/blip2/blip2_3rdparty_pretrain_20230505-f7ef4390.pth +``` + + + +## Models and results + +### Image Caption on COCO + +| Model | Params (M) | BLEU-4 | CIDER | Config | Download | +| :------------------------------------------ | :--------: | :----: | :----: | :----------------------------------------: | :-------------------------------------------------------------------------------------------: | +| `blip2-opt2.7b_3rdparty-zeroshot_caption`\* | 3770.47 | 32.90 | 111.10 | [config](./blip2-opt2.7b_8xb32_caption.py) | [model](https://download.openmmlab.com/mmclassification/v1/blip2/blip2-opt2.7b_3rdparty_pretrain_20230505-b51db4e1.pth) | + +### Visual Question Answering on VQAv2 + +| Model | Params (M) | Accuracy | Config | Download | +| :-------------------------------------- | :--------: | :------: | :------------------------------------: | :-------------------------------------------------------------------------------------------------------: | +| `blip2-opt2.7b_3rdparty-zeroshot_vqa`\* | 3770.47 | 53.50 | [config](./blip2-opt2.7b_8xb16_vqa.py) | [model](https://download.openmmlab.com/mmclassification/v1/blip2/blip2-opt2.7b_3rdparty_pretrain_20230505-b51db4e1.pth) | + +### Image-To-Text Retrieval on COCO + +| Model | Params (M) | Recall@1 | Config | Download | +| :--------------------------- | :--------: | :------: | :----------------------------------: | :-------------------------------------------------------------------------------------------------------------: | +| `blip2_3rdparty_retrieval`\* | 1173.19 | 85.40 | [config](./blip2_8xb32_retrieval.py) | [model](https://download.openmmlab.com/mmclassification/v1/blip2/blip2_3rdparty_pretrain_20230505-f7ef4390.pth) | + +*Models with * are converted from the [official repo](https://github.com/salesforce/LAVIS). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@article{beitv2, + title={Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models}, + author={Li, Junnan and Li, Dongxu and Savarese, Silvio and Hoi, Steven}, + year={2023}, + eprint={2301.12597}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` diff --git a/configs/blip2/blip2-opt2.7b_8xb16_gqa.py b/configs/blip2/blip2-opt2.7b_8xb16_gqa.py new file mode 100644 index 0000000..37fbd95 --- /dev/null +++ b/configs/blip2/blip2-opt2.7b_8xb16_gqa.py @@ -0,0 +1,87 @@ +_base_ = [ + '../_base_/datasets/gqa.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='Blip2VQA', + tokenizer=dict( + type='AutoTokenizer', name_or_path='facebook/opt-2.7b', + use_fast=False), + vision_backbone=dict( + type='BEiTViT', + # eva-g without the final layer + arch=dict( + embed_dims=1408, + num_layers=39, + num_heads=16, + feedforward_channels=6144, + ), + img_size=364, + patch_size=14, + out_indices=-2, + layer_scale_init_value=0.0, + use_abs_pos_emb=True, + use_rel_pos_bias=False, + frozen_stages=39, + final_norm=False, + use_shared_rel_pos_bias=False, + out_type='raw'), + text_backbone=dict( + type='OPTForCausalLM', name_or_path='facebook/opt-2.7b'), + multimodal_backbone=dict( + type='Qformer', + model_style='bert-base-uncased', + vision_model_width=1408, + add_cross_attention=True, + cross_attention_freq=2, + num_query_token=32), + vision_neck=dict( + type='LinearClsHead', + in_channels=768, + num_classes=2560, + ), + prompt='Question: {} Short Answer:', + max_txt_len=10) + +# data settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=224), + dict(type='PackInputs', algorithm_keys=['question', 'gt_answer']), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + scale=(224, 224), + interpolation='bicubic', + backend='pillow'), + dict( + type='CleanCaption', + keys=['question'], + ), + dict(type='PackInputs', algorithm_keys=['question', 'gt_answer']), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# schedule settings +optim_wrapper = dict(optimizer=dict(type='AdamW', lr=1e-5, weight_decay=0.05)) + +param_scheduler = [ + dict( + type='CosineAnnealingLR', + by_epoch=True, + begin=0, + end=10, + ) +] + +train_cfg = dict(max_epochs=10) +val_cfg = dict() +test_cfg = dict() diff --git a/configs/blip2/blip2-opt2.7b_8xb16_vqa.py b/configs/blip2/blip2-opt2.7b_8xb16_vqa.py new file mode 100644 index 0000000..13a808d --- /dev/null +++ b/configs/blip2/blip2-opt2.7b_8xb16_vqa.py @@ -0,0 +1,95 @@ +_base_ = [ + '../_base_/datasets/coco_vqa.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='Blip2VQA', + tokenizer=dict( + type='AutoTokenizer', name_or_path='facebook/opt-2.7b', + use_fast=False), + vision_backbone=dict( + type='BEiTViT', + # eva-g without the final layer + arch=dict( + embed_dims=1408, + num_layers=39, + num_heads=16, + feedforward_channels=6144, + ), + img_size=364, + patch_size=14, + out_indices=-2, + layer_scale_init_value=0.0, + use_abs_pos_emb=True, + use_rel_pos_bias=False, + frozen_stages=39, + final_norm=False, + use_shared_rel_pos_bias=False, + out_type='raw'), + text_backbone=dict( + type='OPTForCausalLM', name_or_path='facebook/opt-2.7b'), + multimodal_backbone=dict( + type='Qformer', + model_style='bert-base-uncased', + vision_model_width=1408, + add_cross_attention=True, + cross_attention_freq=2, + num_query_token=32), + vision_neck=dict( + type='LinearClsHead', + in_channels=768, + num_classes=2560, + ), + prompt='Question: {} Answer:', + max_txt_len=10) + +# data settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=224), + dict( + type='PackInputs', + algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], + meta_keys=['question_id', 'image_id'], + ), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + scale=(224, 224), + interpolation='bicubic', + backend='pillow'), + dict( + type='CleanCaption', + keys=['question'], + ), + dict( + type='PackInputs', + algorithm_keys=['question', 'gt_answer', 'gt_answer_weight'], + meta_keys=['question_id', 'image_id'], + ), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# schedule settings +optim_wrapper = dict(optimizer=dict(type='AdamW', lr=1e-5, weight_decay=0.05)) + +param_scheduler = [ + dict( + type='CosineAnnealingLR', + by_epoch=True, + begin=0, + end=10, + ) +] + +train_cfg = dict(max_epochs=10) +val_cfg = dict() +test_cfg = dict() diff --git a/configs/blip2/blip2-opt2.7b_8xb32_caption.py b/configs/blip2/blip2-opt2.7b_8xb32_caption.py new file mode 100644 index 0000000..52d0a63 --- /dev/null +++ b/configs/blip2/blip2-opt2.7b_8xb32_caption.py @@ -0,0 +1,76 @@ +_base_ = [ + '../_base_/datasets/coco_caption.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='Blip2Caption', + tokenizer=dict( + type='AutoTokenizer', name_or_path='facebook/opt-2.7b', + use_fast=False), + vision_backbone=dict( + type='BEiTViT', + # eva-g without the final layer + arch=dict( + embed_dims=1408, + num_layers=39, + num_heads=16, + feedforward_channels=6144, + ), + img_size=364, + patch_size=14, + out_indices=-2, + layer_scale_init_value=0.0, + use_abs_pos_emb=True, + use_rel_pos_bias=False, + frozen_stages=39, + final_norm=False, + use_shared_rel_pos_bias=False, + out_type='raw'), + text_backbone=dict( + type='OPTForCausalLM', name_or_path='facebook/opt-2.7b'), + multimodal_backbone=dict( + type='Qformer', + model_style='bert-base-uncased', + vision_model_width=1408, + add_cross_attention=True, + cross_attention_freq=2, + num_query_token=32), + vision_neck=dict( + type='LinearClsHead', + in_channels=768, + num_classes=2560, + ), + prompt='a photo of', + max_txt_len=30) + +# schedule settings +optim_wrapper = dict(optimizer=dict(type='AdamW', lr=1e-5, weight_decay=0.05)) + +param_scheduler = [ + dict( + type='CosineAnnealingLR', + by_epoch=True, + begin=0, + end=10, + ) +] + +train_cfg = dict(by_epoch=True, max_epochs=10) +val_cfg = dict() +test_cfg = dict() + +# dataset settings +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + scale=(364, 364), + interpolation='bicubic', + backend='pillow'), + dict(type='PackInputs', meta_keys=['image_id']), +] + +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader diff --git a/configs/blip2/blip2_8xb32_retrieval.py b/configs/blip2/blip2_8xb32_retrieval.py new file mode 100644 index 0000000..75cb66c --- /dev/null +++ b/configs/blip2/blip2_8xb32_retrieval.py @@ -0,0 +1,82 @@ +_base_ = [ + '../_base_/datasets/coco_retrieval.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='Blip2Retrieval', + tokenizer=dict(type='Blip2Tokenizer', name_or_path='bert-base-uncased'), + vision_backbone=dict( + type='BEiTViT', + # eva-g without the final layer + arch=dict( + embed_dims=1408, + num_layers=39, + num_heads=16, + feedforward_channels=6144, + ), + img_size=364, + patch_size=14, + layer_scale_init_value=0.0, + use_abs_pos_emb=True, + use_rel_pos_bias=False, + final_norm=False, + use_shared_rel_pos_bias=False, + out_type='raw'), + multimodal_backbone=dict( + type='Qformer', + model_style='bert-base-uncased', + vision_model_width=1408, + add_cross_attention=True, + cross_attention_freq=2, + num_query_token=32), + vision_neck=dict( + type='LinearClsHead', + in_channels=768, + num_classes=256, + ), + text_neck=dict( + type='LinearClsHead', + in_channels=768, + num_classes=256, + ), + multimodal_head=dict( + type='ITMHead', + hidden_size=768, + with_pooler=False, + ), + topk=128, + max_txt_len=35, +) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + scale=(364, 364), + interpolation='bicubic', + backend='pillow'), + dict(type='CleanCaption', keys='text'), + dict( + type='PackInputs', + algorithm_keys=['text', 'gt_text_id', 'gt_image_id'], + meta_keys=['image_id']), +] + +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# optimizer +optimizer = dict(type='AdamW', lr=2e-5, weight_decay=0.04) +optim_wrapper = dict(type='OptimWrapper', optimizer=optimizer) + +# learning rate scheduler +param_scheduler = [dict(type='CosineAnnealingLR', by_epoch=True)] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=6) +val_cfg = dict(type='RetrievalValLoop') +test_cfg = dict(type='RetrievalTestLoop') + +randomness = dict(seed=42) diff --git a/configs/blip2/metafile.yml b/configs/blip2/metafile.yml new file mode 100644 index 0000000..b822103 --- /dev/null +++ b/configs/blip2/metafile.yml @@ -0,0 +1,71 @@ +Collections: + - Name: BLIP-2 + Metadata: + Training Data: + - COCO + - VG + - CC3M + - CC12M + - SBU + - LAION-400M + Training Resources: 8x A100 GPUs + Architecture: + - Transformer + - Q-Former + Paper: + Title: 'BLIP-2: Bootstrapping Language-Image Pre-training with Frozen Image + Encoders and Large Language Models' + URL: https://arxiv.org/abs/2301.12597 + README: configs/blip2/README.md + +Models: + - Name: blip2_3rdparty_retrieval + Metadata: + FLOPs: null + Parameters: 1173191358 + In Collection: BLIP-2 + Results: + - Task: Image-To-Text Retrieval + Dataset: COCO + Metrics: + Recall@1: 85.4 + - Task: Text-To-Image Retrieval + Dataset: COCO + Metrics: + Recall@1: 68.3 + Weights: https://download.openmmlab.com/mmclassification/v1/blip2/blip2_3rdparty_pretrain_20230505-f7ef4390.pth + Config: configs/blip2/blip2_8xb32_retrieval.py + Converted From: + Weights: https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_opt2.7b.pth + Code: https://github.com/salesforce/LAVIS + - Name: blip2-opt2.7b_3rdparty-zeroshot_vqa + Metadata: + FLOPs: null + Parameters: 3770465152 + In Collection: BLIP-2 + Results: + - Task: Visual Question Answering + Dataset: VQAv2 + Metrics: + Accuracy: 53.5 + Weights: https://download.openmmlab.com/mmclassification/v1/blip2/blip2-opt2.7b_3rdparty_pretrain_20230505-b51db4e1.pth + Config: configs/blip2/blip2-opt2.7b_8xb16_vqa.py + Converted From: + Weights: https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_opt2.7b.pth + Code: https://github.com/salesforce/LAVIS + - Name: blip2-opt2.7b_3rdparty-zeroshot_caption + Metadata: + FLOPs: null + Parameters: 3770465152 + In Collection: BLIP-2 + Results: + - Task: Image Caption + Dataset: COCO + Metrics: + BLEU-4: 32.90 + CIDER: 111.10 + Weights: https://download.openmmlab.com/mmclassification/v1/blip2/blip2-opt2.7b_3rdparty_pretrain_20230505-b51db4e1.pth + Config: configs/blip2/blip2-opt2.7b_8xb32_caption.py + Converted From: + Weights: https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_opt2.7b.pth + Code: https://github.com/salesforce/LAVIS diff --git a/configs/byol/README.md b/configs/byol/README.md new file mode 100644 index 0000000..2bfc8d0 --- /dev/null +++ b/configs/byol/README.md @@ -0,0 +1,85 @@ +# BYOL + +> [Bootstrap your own latent: A new approach to self-supervised Learning](https://arxiv.org/abs/2006.07733) + + + +## Abstract + +**B**ootstrap **Y**our **O**wn **L**atent (BYOL) is a new approach to self-supervised image representation learning. BYOL relies on two neural networks, referred to as online and target networks, that interact and learn from each other. From an augmented view of an image, we train the online network to predict the target network representation of the same image under a different augmented view. At the same time, we update the target network with a slow-moving average of the online network. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('resnet50_byol-pre_8xb512-linear-coslr-90e_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('byol_resnet50_16xb256-coslr-200e_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/byol/byol_resnet50_16xb256-coslr-200e_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/byol/benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py https://download.openmmlab.com/mmselfsup/1.x/byol/byol_resnet50_16xb256-coslr-200e_in1k/resnet50_linear-8xb512-coslr-90e_in1k/resnet50_linear-8xb512-coslr-90e_in1k_20220825-7596c6f5.pth +``` + + + +## Models and results + +### Pretrained models + +| Model | Params (M) | Flops (G) | Config | Download | +| :-------------------------------------- | :--------: | :-------: | :------------------------------------------------: | :------------------------------------------------------------------------------------------: | +| `byol_resnet50_16xb256-coslr-200e_in1k` | 68.02 | 4.11 | [config](byol_resnet50_16xb256-coslr-200e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/byol/byol_resnet50_16xb256-coslr-200e_in1k/byol_resnet50_16xb256-coslr-200e_in1k_20220825-de817331.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/byol/byol_resnet50_16xb256-coslr-200e_in1k/byol_resnet50_16xb256-coslr-200e_in1k_20220825-de817331.json) | + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Config | Download | +| :---------------------------------------- | :------------------------------------------: | :--------: | :-------: | :-------: | :----------------------------------------: | :-------------------------------------------: | +| `resnet50_byol-pre_8xb512-linear-coslr-90e_in1k` | [BYOL](https://download.openmmlab.com/mmselfsup/1.x/byol/byol_resnet50_16xb256-coslr-200e_in1k/byol_resnet50_16xb256-coslr-200e_in1k_20220825-de817331.pth) | 25.56 | 4.11 | 71.80 | [config](benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/byol/byol_resnet50_16xb256-coslr-200e_in1k/resnet50_linear-8xb512-coslr-90e_in1k/resnet50_linear-8xb512-coslr-90e_in1k_20220825-7596c6f5.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/byol/byol_resnet50_16xb256-coslr-200e_in1k/resnet50_linear-8xb512-coslr-90e_in1k/resnet50_linear-8xb512-coslr-90e_in1k_20220825-7596c6f5.json) | + +## Citation + +```bibtex +@inproceedings{grill2020bootstrap, + title={Bootstrap your own latent: A new approach to self-supervised learning}, + author={Grill, Jean-Bastien and Strub, Florian and Altch{\'e}, Florent and Tallec, Corentin and Richemond, Pierre H and Buchatskaya, Elena and Doersch, Carl and Pires, Bernardo Avila and Guo, Zhaohan Daniel and Azar, Mohammad Gheshlaghi and others}, + booktitle={NeurIPS}, + year={2020} +} +``` diff --git a/configs/byol/benchmarks/mask-rcnn_r50-c4_ms-1x_coco.py b/configs/byol/benchmarks/mask-rcnn_r50-c4_ms-1x_coco.py new file mode 100644 index 0000000..4949db1 --- /dev/null +++ b/configs/byol/benchmarks/mask-rcnn_r50-c4_ms-1x_coco.py @@ -0,0 +1,46 @@ +_base_ = 'mmdet::mask_rcnn/mask-rcnn_r50-caffe-c4_1x_coco.py' +# https://github.com/open-mmlab/mmdetection/blob/dev-3.x/configs/mask_rcnn/mask-rcnn_r50-caffe-c4_1x_coco.py + +data_preprocessor = dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_mask=True, + pad_size_divisor=32) + +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + data_preprocessor=data_preprocessor, + backbone=dict( + frozen_stages=-1, + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + roi_head=dict( + shared_head=dict( + type='ResLayerExtraNorm', + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch'))) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='RandomChoiceResize', + scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=1) + +custom_imports = dict( + imports=['mmpretrain.models.utils.res_layer_extra_norm'], + allow_failed_imports=False) diff --git a/configs/byol/benchmarks/mask-rcnn_r50_fpn_ms-1x_coco.py b/configs/byol/benchmarks/mask-rcnn_r50_fpn_ms-1x_coco.py new file mode 100644 index 0000000..1341f15 --- /dev/null +++ b/configs/byol/benchmarks/mask-rcnn_r50_fpn_ms-1x_coco.py @@ -0,0 +1,24 @@ +_base_ = 'mmdet::mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py' +# https://github.com/open-mmlab/mmdetection/blob/dev-3.x/configs/mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py + +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + backbone=dict(frozen_stages=-1, norm_cfg=norm_cfg, norm_eval=False), + neck=dict(norm_cfg=norm_cfg), + roi_head=dict( + bbox_head=dict(type='Shared4Conv1FCBBoxHead', norm_cfg=norm_cfg), + mask_head=dict(norm_cfg=norm_cfg))) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', with_bbox=True, with_mask=True), + dict( + type='RandomChoiceResize', + scales=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), + (1333, 768), (1333, 800)], + keep_ratio=True), + dict(type='RandomFlip', prob=0.5), + dict(type='PackDetInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) diff --git a/configs/byol/benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py b/configs/byol/benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py new file mode 100644 index 0000000..2b5074c --- /dev/null +++ b/configs/byol/benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py @@ -0,0 +1,18 @@ +_base_ = [ + '../../_base_/models/resnet50.py', + '../../_base_/datasets/imagenet_bs32_pil_resize.py', + '../../_base_/schedules/imagenet_lars_coslr_90e.py', + '../../_base_/default_runtime.py', +] + +model = dict( + backbone=dict( + frozen_stages=4, + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.'))) + +# dataset summary +train_dataloader = dict(batch_size=512) + +# runtime settings +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3)) diff --git a/configs/byol/byol_resnet50_16xb256-coslr-200e_in1k.py b/configs/byol/byol_resnet50_16xb256-coslr-200e_in1k.py new file mode 100644 index 0000000..8dd3fd8 --- /dev/null +++ b/configs/byol/byol_resnet50_16xb256-coslr-200e_in1k.py @@ -0,0 +1,60 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs32_byol.py', + '../_base_/schedules/imagenet_lars_coslr_200e.py', + '../_base_/default_runtime.py', +] + +train_dataloader = dict(batch_size=256) + +# model settings +model = dict( + type='BYOL', + base_momentum=0.01, + backbone=dict( + type='ResNet', + depth=50, + norm_cfg=dict(type='SyncBN'), + zero_init_residual=False), + neck=dict( + type='NonLinearNeck', + in_channels=2048, + hid_channels=4096, + out_channels=256, + num_layers=2, + with_bias=True, + with_last_bn=False, + with_avg_pool=True), + head=dict( + type='LatentPredictHead', + predictor=dict( + type='NonLinearNeck', + in_channels=256, + hid_channels=4096, + out_channels=256, + num_layers=2, + with_bias=True, + with_last_bn=False, + with_avg_pool=False), + loss=dict(type='CosineSimilarityLoss')), +) + +# optimizer +optimizer = dict(type='LARS', lr=4.8, momentum=0.9, weight_decay=1e-6) +optim_wrapper = dict( + type='OptimWrapper', + optimizer=optimizer, + paramwise_cfg=dict( + custom_keys={ + 'bn': dict(decay_mult=0, lars_exclude=True), + 'bias': dict(decay_mult=0, lars_exclude=True), + # bn layer in ResNet block downsample module + 'downsample.1': dict(decay_mult=0, lars_exclude=True), + }), +) + +# runtime settings +default_hooks = dict(checkpoint=dict(max_keep_ckpts=3)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/byol/metafile.yml b/configs/byol/metafile.yml new file mode 100644 index 0000000..09aacad --- /dev/null +++ b/configs/byol/metafile.yml @@ -0,0 +1,44 @@ +Collections: + - Name: BYOL + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - LARS + Training Resources: 8x V100 GPUs (b256), 16x A100-80G GPUs (b4096) + Architecture: + - ResNet + - BYOL + Paper: + Title: 'Bootstrap your own latent: A new approach to self-supervised Learning' + URL: https://arxiv.org/abs/2006.07733 + README: configs/byol/README.md + +Models: + - Name: byol_resnet50_16xb256-coslr-200e_in1k + Metadata: + Epochs: 200 + Batch Size: 4096 + FLOPs: 4109364224 + Parameters: 68024448 + Training Data: ImageNet-1k + In Collection: BYOL + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/byol/byol_resnet50_16xb256-coslr-200e_in1k/byol_resnet50_16xb256-coslr-200e_in1k_20220825-de817331.pth + Config: configs/byol/byol_resnet50_16xb256-coslr-200e_in1k.py + Downstream: + - resnet50_byol-pre_8xb512-linear-coslr-90e_in1k + - Name: resnet50_byol-pre_8xb512-linear-coslr-90e_in1k + Metadata: + Epochs: 90 + Batch Size: 4096 + FLOPs: 4109464576 + Parameters: 25557032 + Training Data: ImageNet-1k + In Collection: BYOL + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 71.8 + Weights: https://download.openmmlab.com/mmselfsup/1.x/byol/byol_resnet50_16xb256-coslr-200e_in1k/resnet50_linear-8xb512-coslr-90e_in1k/resnet50_linear-8xb512-coslr-90e_in1k_20220825-7596c6f5.pth + Config: configs/byol/benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py diff --git a/configs/cae/README.md b/configs/cae/README.md new file mode 100644 index 0000000..dc1c818 --- /dev/null +++ b/configs/cae/README.md @@ -0,0 +1,86 @@ +# CAE + +> [Context Autoencoder for Self-Supervised Representation Learning](https://arxiv.org/abs/2202.03026) + + + +## Abstract + +We present a novel masked image modeling (MIM) approach, context autoencoder (CAE), for self-supervised learning. We randomly partition the image into two sets: visible patches and masked patches. The CAE architecture consists of: (i) an encoder that takes visible patches as input and outputs their latent representations, (ii) a latent context regressor that predicts the masked patch representations from the visible patch representations that are not updated in this regressor, (iii) a decoder that takes the estimated masked patch representations as input and makes predictions for the masked patches, and (iv) an alignment module that aligns the masked patch representation estimation with the masked patch representations computed from the encoder. In comparison to previous MIM methods that couple the encoding and decoding roles, e.g., using a single module in BEiT, our approach attempts to separate the encoding role (content understanding) from the decoding role (making predictions for masked patches) using different modules, improving the content understanding capability. In addition, our approach makes predictions from the visible patches to the masked patches in the latent representation space that is expected to take on semantics. In addition, we present the explanations about why contrastive pretraining and supervised pretraining perform similarly and why MIM potentially performs better. We demonstrate the effectiveness of our CAE through superior transfer performance in downstream tasks: semantic segmentation, and object detection and instance segmentation. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('beit-base-p16_cae-pre_8xb128-coslr-100e_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('cae_beit-base-p16_8xb256-amp-coslr-300e_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/cae/cae_beit-base-p16_8xb256-amp-coslr-300e_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/cae/benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py https://download.openmmlab.com/mmselfsup/1.x/cae/cae_vit-base-p16_16xb128-fp16-coslr-300e_in1k/vit-base-p16_ft-8xb128-coslr-100e-rpe_in1k/vit-base-p16_ft-8xb128-coslr-100e-rpe_in1k_20220825-f3d234cd.pth +``` + + + +## Models and results + +### Pretrained models + +| Model | Params (M) | Flops (G) | Config | Download | +| :--------------------------------------------- | :--------: | :-------: | :-------------------------------------------------------: | :----------------------------------------------------------------------------: | +| `cae_beit-base-p16_8xb256-amp-coslr-300e_in1k` | 288.43 | 17.58 | [config](cae_beit-base-p16_8xb256-amp-coslr-300e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/cae/cae_vit-base-p16_8xb256-amp-coslr-300e_in1k/cae_vit-base-p16_8xb256-amp-coslr-300e_in1k_20221230-808170f3.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/cae/cae_vit-base-p16_8xb256-amp-coslr-300e_in1k/cae_vit-base-p16_8xb256-amp-coslr-300e_in1k_20221230-808170f3.json) | + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Config | Download | +| :---------------------------------------- | :------------------------------------------: | :--------: | :-------: | :-------: | :----------------------------------------: | :-------------------------------------------: | +| `beit-base-p16_cae-pre_8xb128-coslr-100e_in1k` | [CAE](https://download.openmmlab.com/mmselfsup/1.x/cae/cae_vit-base-p16_8xb256-amp-coslr-300e_in1k/cae_vit-base-p16_8xb256-amp-coslr-300e_in1k_20221230-808170f3.pth) | 86.68 | 17.58 | 83.20 | [config](benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/cae/cae_vit-base-p16_16xb128-fp16-coslr-300e_in1k/vit-base-p16_ft-8xb128-coslr-100e-rpe_in1k/vit-base-p16_ft-8xb128-coslr-100e-rpe_in1k_20220825-f3d234cd.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/cae/cae_vit-base-p16_16xb128-fp16-coslr-300e_in1k/vit-base-p16_ft-8xb128-coslr-100e-rpe_in1k/vit-base-p16_ft-8xb128-coslr-100e-rpe_in1k_20220825-f3d234cd.json) | + +## Citation + +```bibtex +@article{CAE, + title={Context Autoencoder for Self-Supervised Representation Learning}, + author={Xiaokang Chen, Mingyu Ding, Xiaodi Wang, Ying Xin, Shentong Mo, + Yunhao Wang, Shumin Han, Ping Luo, Gang Zeng, Jingdong Wang}, + journal={ArXiv}, + year={2022} +} +``` diff --git a/configs/cae/benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py b/configs/cae/benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py new file mode 100644 index 0000000..e7083ce --- /dev/null +++ b/configs/cae/benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py @@ -0,0 +1,130 @@ +_base_ = [ + '../../_base_/datasets/imagenet_bs64_swin_224.py', + '../../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../../_base_/default_runtime.py' +] +# CAE fine-tuning setting + +# dataset +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + # convert image from BGR to RGB + to_rgb=True, +) +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=256, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline), batch_size=128) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline), batch_size=128) + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='BEiTViT', + arch='base', + img_size=224, + patch_size=16, + final_norm=False, # do not use final norm + drop_path_rate=0.1, + layer_scale_init_value=0.1, + out_type='avg_featmap', + use_abs_pos_emb=True, + use_rel_pos_bias=True, + use_shared_rel_pos_bias=False, + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')), + neck=None, + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + init_cfg=dict(type='TruncNormal', layer='Linear', std=2e-5)), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) + +# optimizer wrapper +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=8e-3, betas=(0.9, 0.999), weight_decay=0.05), + constructor='LearningRateDecayOptimWrapperConstructor', + paramwise_cfg=dict( + layer_decay_rate=0.65, + custom_keys={ + '.ln': dict(decay_mult=0.0), + '.bias': dict(decay_mult=0.0), + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=95, + by_epoch=True, + begin=5, + end=100, + eta_min=1e-6, + convert_to_iter_based=True) +] + +default_hooks = dict( + # save checkpoint per epoch. + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +train_cfg = dict(by_epoch=True, max_epochs=100) + +randomness = dict(seed=0) diff --git a/configs/cae/cae_beit-base-p16_8xb256-amp-coslr-300e_in1k.py b/configs/cae/cae_beit-base-p16_8xb256-amp-coslr-300e_in1k.py new file mode 100644 index 0000000..725b0f0 --- /dev/null +++ b/configs/cae/cae_beit-base-p16_8xb256-amp-coslr-300e_in1k.py @@ -0,0 +1,115 @@ +_base_ = '../_base_/default_runtime.py' + +# dataset settings +dataset_type = 'ImageNet' +data_root = 'data/imagenet/' +data_preprocessor = dict( + type='TwoNormDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + second_mean=[-31.875, -31.875, -31.875], + second_std=[318.75, 318.75, 318.75], + to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomFlip', prob=0.5), + dict( + type='RandomResizedCropAndInterpolationWithTwoPic', + size=224, + second_size=112, + interpolation='bicubic', + second_interpolation='lanczos', + scale=(0.08, 1.0)), + dict( + type='BEiTMaskGenerator', + input_size=(14, 14), + num_masking_patches=75, + max_num_patches=None, + min_num_patches=16), + dict(type='PackInputs') +] + +train_dataloader = dict( + batch_size=256, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + collate_fn=dict(type='default_collate'), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='meta/train.txt', + data_prefix=dict(img_path='train/'), + pipeline=train_pipeline)) + +# model settings +model = dict( + type='CAE', + backbone=dict( + type='CAEPretrainViT', + arch='b', + patch_size=16, + layer_scale_init_value=0.1, + bias='qv_bias'), + neck=dict( + type='CAENeck', + embed_dims=768, + num_heads=12, + regressor_depth=4, + decoder_depth=4, + mlp_ratio=4, + layer_scale_init_value=0.1, + ), + head=dict(type='CAEHead', loss=dict(type='CAELoss', lambd=2)), + target_generator=dict( + type='DALL-E', + init_cfg=dict( + type='Pretrained', + checkpoint= # noqa: E251 + 'https://download.openmmlab.com/mmselfsup/1.x/target_generator_ckpt/dalle_encoder.pth', # noqa: E501 + )), + base_momentum=0.0) + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='AdamW', lr=1.5e-3, betas=(0.9, 0.999), weight_decay=0.05), + clip_grad=dict(max_norm=3.0), + paramwise_cfg=dict( + bias_decay_mult=0.0, norm_decay_mult=0.0, flat_decay_mult=0.0)) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=10, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=290, + eta_min=1e-5, + by_epoch=True, + begin=10, + end=300, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=300) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +find_unused_parameters = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=2048) diff --git a/configs/cae/metafile.yml b/configs/cae/metafile.yml new file mode 100644 index 0000000..83f46f9 --- /dev/null +++ b/configs/cae/metafile.yml @@ -0,0 +1,43 @@ +Collections: + - Name: CAE + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - AdamW + Training Resources: 8x A100-80G GPUs + Architecture: + - ViT + Paper: + Title: Context Autoencoder for Self-Supervised Representation Learning + URL: https://arxiv.org/abs/2202.03026 + README: configs/cae/README.md + +Models: + - Name: cae_beit-base-p16_8xb256-amp-coslr-300e_in1k + Metadata: + Epochs: 300 + Batch Size: 2048 + FLOPs: 17581976064 + Parameters: 288429952 + Training Data: ImageNet-1k + In Collection: CAE + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/cae/cae_vit-base-p16_8xb256-amp-coslr-300e_in1k/cae_vit-base-p16_8xb256-amp-coslr-300e_in1k_20221230-808170f3.pth + Config: configs/cae/cae_beit-base-p16_8xb256-amp-coslr-300e_in1k.py + Downstream: + - beit-base-p16_cae-pre_8xb128-coslr-100e_in1k + - Name: beit-base-p16_cae-pre_8xb128-coslr-100e_in1k + Metadata: + Epochs: 100 + Batch Size: 1024 + FLOPs: 17581219584 + Parameters: 86682280 + Training Data: ImageNet-1k + In Collection: CAE + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.2 + Weights: https://download.openmmlab.com/mmselfsup/1.x/cae/cae_vit-base-p16_16xb128-fp16-coslr-300e_in1k/vit-base-p16_ft-8xb128-coslr-100e-rpe_in1k/vit-base-p16_ft-8xb128-coslr-100e-rpe_in1k_20220825-f3d234cd.pth + Config: configs/cae/benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py diff --git a/configs/chinese_clip/README.md b/configs/chinese_clip/README.md new file mode 100644 index 0000000..acb37e7 --- /dev/null +++ b/configs/chinese_clip/README.md @@ -0,0 +1,69 @@ +# ChineseCLIP + +> [Chinese CLIP: Contrastive Vision-Language Pretraining in Chinese](https://arxiv.org/abs/2211.01335) + + + +## Abstract + +The tremendous success of CLIP (Radford et al., 2021) has promoted the research and application of contrastive learning for vision-language pretraining. In this work, we construct a large-scale dataset of image-text pairs in Chinese, where most data are retrieved from publicly available datasets, and we pretrain Chinese CLIP models on the new dataset. We develop 5 Chinese CLIP models of multiple sizes, spanning from 77 to 958 million parameters. Furthermore, we propose a two-stage pretraining method, where the model is first trained with the image encoder frozen and then trained with all parameters being optimized, to achieve enhanced model performance. Our comprehensive experiments demonstrate that Chinese CLIP can achieve the state-of-the-art performance on MUGE, Flickr30K-CN, and COCO-CN in the setups of zero-shot learning and finetuning, and it is able to achieve competitive performance in zero-shot image classification based on the evaluation on the ELEVATER benchmark (Li et al., 2022). We have released our codes, models, and demos in https://github.com/OFA-Sys/Chinese-CLIP + +
+ +
+ +## How to use it? + + + +**Use the model for zero-shot classification** + +```python +from mmpretrain import ImageClassificationInferencer + +inferencer = ImageClassificationInferencer( + 'cn-clip_resnet50_zeroshot-cls_cifar100', + pretrained=True, + classes=['鸟', '狗', '猫', '蛇'], + text_prototype=['鸟', '狗', '猫', '蛇'], +) + +prediction = inferencer('./demo/bird.JPEG')[0] +print('Results:', prediction['pred_class']) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/chinese_clip/cn-clip_resnet50_zeroshot-cls_cifar100.py https://download.openmmlab.com/mmpretrain/v1.0/chinese_clip/cn-clip_resnet50_3rdparty_20230519-6a2b3eb2.pth +``` + + + +## Models and results + +### Image Classification on CIFAR100 + +| Model | Params (M) | Top-1 (%) | Config | Download | +| :---------------------------------------------- | :--------: | :-------: | :------------------------------------------------------: | :----------------------------------------------------------------------------: | +| `cn-clip_resnet50_zeroshot-cls_cifar100`\* | 77.00 | 40.70 | [config](cn-clip_resnet50_zeroshot-cls_cifar100.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/chinese_clip/cn-clip_resnet50_3rdparty_20230519-6a2b3eb2.pth) | +| `cn-clip_vit-base-p16_zeroshot-cls_cifar100`\* | 188.00 | 64.50 | [config](cn-clip_vit-base-p16_zeroshot-cls_cifar100.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/chinese_clip/cn-clip_vit-base-p16_3rdparty_20230519-37fbc59e.pth) | +| `cn-clip_vit-large-p14_zeroshot-cls_cifar100`\* | 406.00 | 74.80 | [config](cn-clip_vit-large-p14_zeroshot-cls_cifar100.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/chinese_clip/cn-clip_vit-large-p14_3rdparty_20230519-3f844503.pth) | +| `cn-clip_vit-huge-p14_zeroshot-cls_cifar100`\* | 958.00 | 79.10 | [config](cn-clip_vit-huge-p14_zeroshot-cls_cifar100.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/chinese_clip/cn-clip_vit-huge-p14_3rdparty_20230519-e4f49b00.pth) | + +*Models with * are converted from the [official repo](https://github.com/OFA-Sys/Chinese-CLIP). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@article{chinese-clip, + title={Chinese CLIP: Contrastive Vision-Language Pretraining in Chinese}, + author={Yang, An and Pan, Junshu and Lin, Junyang and Men, Rui and Zhang, Yichang and Zhou, Jingren and Zhou, Chang}, + journal={arXiv preprint arXiv:2211.01335}, + year={2022} +} +``` diff --git a/configs/chinese_clip/cn-clip_resnet50_zeroshot-cls_cifar100.py b/configs/chinese_clip/cn-clip_resnet50_zeroshot-cls_cifar100.py new file mode 100644 index 0000000..e109a5b --- /dev/null +++ b/configs/chinese_clip/cn-clip_resnet50_zeroshot-cls_cifar100.py @@ -0,0 +1,72 @@ +_base_ = '../_base_/default_runtime.py' + +# data settings +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255], + std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255], + to_rgb=False, +) + +test_pipeline = [ + dict(type='Resize', scale=(224, 224), interpolation='bicubic'), + dict( + type='PackInputs', + meta_keys=['image_id', 'scale_factor'], + ), +] + +train_dataloader = None +test_dataloader = dict( + batch_size=32, + num_workers=8, + dataset=dict( + type='CIFAR100', + data_root='data/cifar100', + split='test', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +test_evaluator = dict(type='Accuracy', topk=(1, )) + +# schedule settings +train_cfg = None +val_cfg = None +test_cfg = dict() + +# model settings +model = dict( + type='ChineseCLIP', + vision_backbone=dict( + type='ModifiedResNet', + depth=50, + base_channels=64, + input_size=224, + num_attn_heads=32, + output_dim=1024, + ), + text_backbone=dict( + type='BertModelCN', + config=dict( + vocab_size=21128, + pad_token_id=0, + add_type_embeddings=True, + attention_probs_dropout_prob=0.1, + hidden_act='gelu', + hidden_dropout_prob=0.1, + hidden_size=768, + initializer_range=0.02, + intermediate_size=3072, + max_position_embeddings=512, + num_attention_heads=12, + num_hidden_layers=3, + type_vocab_size=2, + layer_norm_eps=1e-12)), + tokenizer=dict( + type='FullTokenizer', + vocab_file= # noqa + 'https://download.openmmlab.com/mmpretrain/v1.0/chinese_clip/vocab.txt' + ), + proj_dim=1024, + text_prototype='cifar100', +) diff --git a/configs/chinese_clip/cn-clip_vit-base-p16_zeroshot-cls_cifar100.py b/configs/chinese_clip/cn-clip_vit-base-p16_zeroshot-cls_cifar100.py new file mode 100644 index 0000000..1c0ad1c --- /dev/null +++ b/configs/chinese_clip/cn-clip_vit-base-p16_zeroshot-cls_cifar100.py @@ -0,0 +1,76 @@ +_base_ = '../_base_/default_runtime.py' + +# data settings +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255], + std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255], + to_rgb=False, +) + +test_pipeline = [ + dict(type='Resize', scale=(224, 224), interpolation='bicubic'), + dict( + type='PackInputs', + algorithm_keys=['text'], + meta_keys=['image_id', 'scale_factor'], + ), +] + +train_dataloader = None +test_dataloader = dict( + batch_size=32, + num_workers=8, + dataset=dict( + type='CIFAR100', + data_root='data/cifar100', + split='test', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +test_evaluator = dict(type='Accuracy', topk=(1, )) + +# schedule settings +train_cfg = None +val_cfg = None +test_cfg = dict() + +# model settings +model = dict( + type='ChineseCLIP', + vision_backbone=dict( + type='VisionTransformer', + arch='base', + img_size=224, + patch_size=16, + norm_cfg=dict(type='LN', eps=1e-5), + final_norm=True, + layer_cfgs=dict(act_cfg=dict(type='QuickGELU')), + pre_norm=True, + out_type='cls_token', + ), + text_backbone=dict( + type='BertModelCN', + config=dict( + vocab_size=21128, + pad_token_id=0, + add_type_embeddings=True, + attention_probs_dropout_prob=0.1, + hidden_act='gelu', + hidden_dropout_prob=0.1, + hidden_size=768, + initializer_range=0.02, + intermediate_size=3072, + max_position_embeddings=512, + num_attention_heads=12, + num_hidden_layers=12, + type_vocab_size=2, + layer_norm_eps=1e-12)), + tokenizer=dict( + type='FullTokenizer', + vocab_file= # noqa + 'https://download.openmmlab.com/mmpretrain/v1.0/chinese_clip/vocab.txt' + ), + proj_dim=512, + text_prototype='cifar100', +) diff --git a/configs/chinese_clip/cn-clip_vit-huge-p14_zeroshot-cls_cifar100.py b/configs/chinese_clip/cn-clip_vit-huge-p14_zeroshot-cls_cifar100.py new file mode 100644 index 0000000..83aae12 --- /dev/null +++ b/configs/chinese_clip/cn-clip_vit-huge-p14_zeroshot-cls_cifar100.py @@ -0,0 +1,75 @@ +_base_ = '../_base_/default_runtime.py' + +# data settings +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255], + std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255], + to_rgb=False, +) + +test_pipeline = [ + dict(type='Resize', scale=(224, 224), interpolation='bicubic'), + dict( + type='PackInputs', + meta_keys=['image_id', 'scale_factor'], + ), +] + +train_dataloader = None +test_dataloader = dict( + batch_size=32, + num_workers=8, + dataset=dict( + type='CIFAR100', + data_root='data/cifar100', + split='test', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +test_evaluator = dict(type='Accuracy', topk=(1, )) + +# schedule settings +train_cfg = None +val_cfg = None +test_cfg = dict() + +# model settings +model = dict( + type='ChineseCLIP', + vision_backbone=dict( + type='VisionTransformer', + arch='huge', + img_size=224, + patch_size=14, + norm_cfg=dict(type='LN', eps=1e-5), + final_norm=True, + layer_cfgs=dict(act_cfg=dict(type='QuickGELU')), + pre_norm=True, + out_type='cls_token', + ), + text_backbone=dict( + type='BertModelCN', + config=dict( + vocab_size=21128, + pad_token_id=0, + add_type_embeddings=True, + attention_probs_dropout_prob=0.1, + hidden_act='gelu', + hidden_dropout_prob=0.1, + hidden_size=1024, + initializer_range=0.02, + intermediate_size=4096, + max_position_embeddings=512, + num_attention_heads=16, + num_hidden_layers=24, + type_vocab_size=2, + layer_norm_eps=1e-12)), + tokenizer=dict( + type='FullTokenizer', + vocab_file= # noqa + 'https://download.openmmlab.com/mmpretrain/v1.0/chinese_clip/vocab.txt' + ), + proj_dim=1024, + text_prototype='cifar100', +) diff --git a/configs/chinese_clip/cn-clip_vit-large-p14_zeroshot-cls_cifar100.py b/configs/chinese_clip/cn-clip_vit-large-p14_zeroshot-cls_cifar100.py new file mode 100644 index 0000000..35f0b6f --- /dev/null +++ b/configs/chinese_clip/cn-clip_vit-large-p14_zeroshot-cls_cifar100.py @@ -0,0 +1,75 @@ +_base_ = '../_base_/default_runtime.py' + +# data settings +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255], + std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255], + to_rgb=False, +) + +test_pipeline = [ + dict(type='Resize', scale=(224, 224), interpolation='bicubic'), + dict( + type='PackInputs', + meta_keys=['image_id', 'scale_factor'], + ), +] + +train_dataloader = None +test_dataloader = dict( + batch_size=32, + num_workers=8, + dataset=dict( + type='CIFAR100', + data_root='data/cifar100', + split='test', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +test_evaluator = dict(type='Accuracy', topk=(1, )) + +# schedule settings +train_cfg = None +val_cfg = None +test_cfg = dict() + +# model settings +model = dict( + type='ChineseCLIP', + vision_backbone=dict( + type='VisionTransformer', + arch='large', + img_size=224, + patch_size=14, + norm_cfg=dict(type='LN', eps=1e-5), + final_norm=True, + layer_cfgs=dict(act_cfg=dict(type='QuickGELU')), + pre_norm=True, + out_type='cls_token', + ), + text_backbone=dict( + type='BertModelCN', + config=dict( + vocab_size=21128, + pad_token_id=0, + add_type_embeddings=True, + attention_probs_dropout_prob=0.1, + hidden_act='gelu', + hidden_dropout_prob=0.1, + hidden_size=768, + initializer_range=0.02, + intermediate_size=3072, + max_position_embeddings=512, + num_attention_heads=12, + num_hidden_layers=12, + type_vocab_size=2, + layer_norm_eps=1e-12)), + tokenizer=dict( + type='FullTokenizer', + vocab_file= # noqa + 'https://download.openmmlab.com/mmpretrain/v1.0/chinese_clip/vocab.txt' + ), + proj_dim=768, + text_prototype='cifar100', +) diff --git a/configs/chinese_clip/metafile.yml b/configs/chinese_clip/metafile.yml new file mode 100644 index 0000000..40ebb49 --- /dev/null +++ b/configs/chinese_clip/metafile.yml @@ -0,0 +1,79 @@ +Collections: + - Name: ChineseCLIP + Metadata: + Training Data: + - LAION-5B + - WuKong + - VisualGenome + - MSCOCO + Architecture: + - Transformer + Paper: + Title: 'Chinese CLIP: Contrastive Vision-Language Pretraining in Chinese' + URL: https://arxiv.org/abs/2211.01335 + README: configs/chinese_clip/README.md + +Models: + - Name: cn-clip_resnet50_zeroshot-cls_cifar100 + Metadata: + FLOPs: null + Parameters: 77000000 + In Collection: ChineseCLIP + Results: + - Task: Image Classification + Dataset: CIFAR100 + Metrics: + Top 1 Accuracy: 40.7 + Weights: https://download.openmmlab.com/mmpretrain/v1.0/chinese_clip/cn-clip_resnet50_3rdparty_20230519-6a2b3eb2.pth + Config: configs/chinese_clip/cn-clip_resnet50_zeroshot-cls_cifar100.py + Converted From: + Weights: https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/checkpoints/clip_cn_rn50.pt + Code: https://github.com/OFA-Sys/Chinese-CLIP + + - Name: cn-clip_vit-base-p16_zeroshot-cls_cifar100 + Metadata: + FLOPs: null + Parameters: 188000000 + In Collection: ChineseCLIP + Results: + - Task: Image Classification + Dataset: CIFAR100 + Metrics: + Top 1 Accuracy: 64.5 + Weights: https://download.openmmlab.com/mmpretrain/v1.0/chinese_clip/cn-clip_vit-base-p16_3rdparty_20230519-37fbc59e.pth + Config: configs/chinese_clip/cn-clip_vit-base-p16_zeroshot-cls_cifar100.py + Converted From: + Weights: https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/checkpoints/clip_cn_vit-b-16.pt + Code: https://github.com/OFA-Sys/Chinese-CLIP + + - Name: cn-clip_vit-large-p14_zeroshot-cls_cifar100 + Metadata: + FLOPs: null + Parameters: 406000000 + In Collection: ChineseCLIP + Results: + - Task: Image Classification + Dataset: CIFAR100 + Metrics: + Top 1 Accuracy: 74.8 + Weights: https://download.openmmlab.com/mmpretrain/v1.0/chinese_clip/cn-clip_vit-large-p14_3rdparty_20230519-3f844503.pth + Config: configs/chinese_clip/cn-clip_vit-large-p14_zeroshot-cls_cifar100.py + Converted From: + Weights: https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/checkpoints/clip_cn_vit-l-14.pt + Code: https://github.com/OFA-Sys/Chinese-CLIP + + - Name: cn-clip_vit-huge-p14_zeroshot-cls_cifar100 + Metadata: + FLOPs: null + Parameters: 958000000 + In Collection: ChineseCLIP + Results: + - Task: Image Classification + Dataset: CIFAR100 + Metrics: + Top 1 Accuracy: 79.1 + Weights: https://download.openmmlab.com/mmpretrain/v1.0/chinese_clip/cn-clip_vit-huge-p14_3rdparty_20230519-e4f49b00.pth + Config: configs/chinese_clip/cn-clip_vit-huge-p14_zeroshot-cls_cifar100.py + Converted From: + Weights: https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/checkpoints/clip_cn_vit-h-14.pt + Code: https://github.com/OFA-Sys/Chinese-CLIP diff --git a/configs/clip/README.md b/configs/clip/README.md new file mode 100644 index 0000000..7a14be4 --- /dev/null +++ b/configs/clip/README.md @@ -0,0 +1,90 @@ +# CLIP + +> [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) + + + +## Abstract + +State-of-the-art computer vision systems are trained to predict a fixed set of predetermined object categories. This restricted form of supervision limits their generality and usability since additional labeled data is needed to specify any other visual concept. Learning directly from raw text about images is a promising alternative which leverages a much broader source of supervision. We demonstrate that the simple pre-training task of predicting which caption goes with which image is an efficient and scalable way to learn SOTA image representations from scratch on a dataset of 400 million (image, text) pairs collected from the internet. After pre-training, natural language is used to reference learned visual concepts (or describe new ones) enabling zero-shot transfer of the model to downstream tasks. We study the performance of this approach by benchmarking on over 30 different existing computer vision datasets, spanning tasks such as OCR, action recognition in videos, geo-localization, and many types of fine-grained object classification. The model transfers non-trivially to most tasks and is often competitive with a fully supervised baseline without the need for any dataset specific training. For instance, we match the accuracy of the original ResNet-50 on ImageNet zero-shot without needing to use any of the 1.28 million training examples it was trained on. We release our code and pre-trained model weights at this https URL. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('vit-base-p32_clip-laion2b-in12k-pre_3rdparty_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('vit-base-p32_clip-laion2b-in12k-pre_3rdparty_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/clip/vit-base-p32_pt-64xb64_in1k.py https://download.openmmlab.com/mmclassification/v0/clip/clip-vit-base-p32_laion2b-in12k-pre_3rdparty_in1k_20221220-b384e830.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :------------------------------------------- | :-----------------------: | :--------: | :-------: | :-------: | :-------: | :--------------------------------------------: | :----------------------------------------------: | +| `vit-base-p32_clip-laion2b-in12k-pre_3rdparty_in1k`\* | CLIP LAION2B ImageNet-12k | 88.22 | 4.36 | 83.06 | 96.49 | [config](vit-base-p32_pt-64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/clip/clip-vit-base-p32_laion2b-in12k-pre_3rdparty_in1k_20221220-b384e830.pth) | +| `vit-base-p32_clip-laion2b-pre_3rdparty_in1k`\* | CLIP LAION2B | 88.22 | 4.36 | 82.46 | 96.12 | [config](vit-base-p32_pt-64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/clip/clip-vit-base-p32_laion2b-pre_3rdparty_in1k_20221220-194df57f.pth) | +| `vit-base-p32_clip-openai-pre_3rdparty_in1k`\* | CLIP OPENAI | 88.22 | 4.36 | 81.77 | 95.89 | [config](vit-base-p32_pt-64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/clip/clip-vit-base-p32_openai-pre_3rdparty_in1k_20221220-a0182ba9.pth) | +| `vit-base-p32_clip-laion2b-in12k-pre_3rdparty_in1k-384px`\* | CLIP LAION2B ImageNet-12k | 88.22 | 12.66 | 85.39 | 97.67 | [config](vit-base-p32_pt-64xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/clip/clip-vit-base-p32_laion2b-in12k-pre_3rdparty_in1k-384px_20221220-c7757552.pth) | +| `vit-base-p32_clip-openai-in12k-pre_3rdparty_in1k-384px`\* | CLIP OPENAI ImageNet-12k | 88.22 | 12.66 | 85.13 | 97.42 | [config](vit-base-p32_pt-64xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/clip/clip-vit-base-p32_openai-in12k-pre_3rdparty_in1k-384px_20221220-dc2e49ea.pth) | +| `vit-base-p16_clip-laion2b-in12k-pre_3rdparty_in1k`\* | CLIP LAION2B ImageNet-12k | 86.57 | 16.86 | 86.02 | 97.76 | [config](vit-base-p16_pt-64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/clip/clip-vit-base-p16_laion2b-in12k-pre_3rdparty_in1k_20221220-a5e31f8c.pth) | +| `vit-base-p16_clip-laion2b-pre_3rdparty_in1k`\* | CLIP LAION2B | 86.57 | 16.86 | 85.49 | 97.59 | [config](vit-base-p16_pt-64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/clip/clip-vit-base-p16_laion2b-pre_3rdparty_in1k_20221220-5e24ff58.pth) | +| `vit-base-p16_clip-openai-in12k-pre_3rdparty_in1k`\* | CLIP OPENAI ImageNet-12k | 86.57 | 16.86 | 85.99 | 97.72 | [config](vit-base-p16_pt-64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/clip/clip-vit-base-p16_openai-in12k-pre_3rdparty_in1k_20221220-90d930a8.pth) | +| `vit-base-p16_clip-openai-pre_3rdparty_in1k`\* | CLIP OPENAI | 86.57 | 16.86 | 85.30 | 97.50 | [config](vit-base-p16_pt-64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/clip/clip-vit-base-p16_openai-pre_3rdparty_in1k_20221220-c7d9c899.pth) | +| `vit-base-p32_clip-laion2b-in12k-pre_3rdparty_in1k-448px`\* | CLIP LAION2B ImageNet-12k | 88.22 | 17.20 | 85.76 | 97.63 | [config](vit-base-p32_pt-64xb64_in1k-448px.py) | [model](https://download.openmmlab.com/mmclassification/v0/clip/clip-vit-base-p32_laion2b-in12k-pre_3rdparty_in1k-448px_20221220-ca404a7d.pth) | +| `vit-base-p16_clip-laion2b-in12k-pre_3rdparty_in1k-384px`\* | CLIP LAION2B ImageNet-12k | 86.57 | 49.37 | 87.17 | 98.02 | [config](vit-base-p16_pt-64xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/clip/clip-vit-base-p16_laion2b-in12k-pre_3rdparty_in1k-384px_20221220-84ed0cc0.pth) | +| `vit-base-p16_clip-laion2b-pre_3rdparty_in1k-384px`\* | CLIP LAION2B | 86.57 | 49.37 | 86.52 | 97.97 | [config](vit-base-p16_pt-64xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/clip/clip-vit-base-p16_laion2b-pre_3rdparty_in1k-384px_20221220-558ed826.pth) | +| `vit-base-p16_clip-openai-in12k-pre_3rdparty_in1k-384px`\* | CLIP OPENAI ImageNet-12k | 86.57 | 49.37 | 86.87 | 98.05 | [config](vit-base-p16_pt-64xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/clip/clip-vit-base-p16_openai-in12k-pre_3rdparty_in1k-384px_20221220-8df86b74.pth) | +| `vit-base-p16_clip-openai-pre_3rdparty_in1k-384px`\* | CLIP OPENAI | 86.57 | 49.37 | 86.25 | 97.90 | [config](vit-base-p16_pt-64xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/clip/clip-vit-base-p16_openai-pre_3rdparty_in1k-384px_20221220-eb012e87.pth) | + +*Models with * are converted from the [timm](https://github.com/rwightman/pytorch-image-models). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@InProceedings{pmlr-v139-radford21a, +title = {Learning Transferable Visual Models From Natural Language Supervision}, +author = {Radford, Alec and Kim, Jong Wook and Hallacy, Chris and Ramesh, Aditya and Goh, Gabriel and Agarwal, Sandhini and Sastry, Girish and Askell, Amanda and Mishkin, Pamela and Clark, Jack and Krueger, Gretchen and Sutskever, Ilya}, +booktitle = {Proceedings of the 38th International Conference on Machine Learning}, +year = {2021}, +series = {Proceedings of Machine Learning Research}, +publisher = {PMLR}, +} +``` diff --git a/configs/clip/clip_vit-base-p16_zeroshot-cls_cifar100.py b/configs/clip/clip_vit-base-p16_zeroshot-cls_cifar100.py new file mode 100644 index 0000000..dd684a5 --- /dev/null +++ b/configs/clip/clip_vit-base-p16_zeroshot-cls_cifar100.py @@ -0,0 +1,68 @@ +_base_ = '../_base_/default_runtime.py' + +# data settings +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255], + std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255], + to_rgb=False, +) + +test_pipeline = [ + dict(type='Resize', scale=(224, 224), interpolation='bicubic'), + dict( + type='PackInputs', + algorithm_keys=['text'], + meta_keys=['image_id', 'scale_factor'], + ), +] + +train_dataloader = None +test_dataloader = dict( + batch_size=32, + num_workers=8, + dataset=dict( + type='CIFAR100', + data_root='data/cifar100', + split='test', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +test_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# schedule settings +train_cfg = None +val_cfg = None +test_cfg = dict() + +# model settings +model = dict( + type='CLIPZeroShot', + vision_backbone=dict( + type='VisionTransformer', + arch='base', + img_size=224, + patch_size=16, + drop_rate=0., + layer_cfgs=dict(act_cfg=dict(type='QuickGELU')), + pre_norm=True, + ), + projection=dict(type='CLIPProjection', in_channels=768, out_channels=512), + text_backbone=dict( + type='CLIPTransformer', + width=512, + layers=12, + heads=8, + attn_mask=True, + ), + tokenizer=dict( + type='AutoTokenizer', + name_or_path='openai/clip-vit-base-patch16', + use_fast=False), + vocab_size=49408, + transformer_width=512, + proj_dim=512, + text_prototype='cifar100', + text_prompt='openai_cifar100', + context_length=77, +) diff --git a/configs/clip/clip_vit-base-p16_zeroshot-cls_in1k.py b/configs/clip/clip_vit-base-p16_zeroshot-cls_in1k.py new file mode 100644 index 0000000..80c4fde --- /dev/null +++ b/configs/clip/clip_vit-base-p16_zeroshot-cls_in1k.py @@ -0,0 +1,69 @@ +_base_ = '../_base_/default_runtime.py' + +# data settings +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255], + std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255], + to_rgb=True, +) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(224, 224), interpolation='bicubic'), + dict( + type='PackInputs', + algorithm_keys=['text'], + meta_keys=['image_id', 'scale_factor'], + ), +] + +train_dataloader = None +test_dataloader = dict( + batch_size=32, + num_workers=8, + dataset=dict( + type='ImageNet', + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +test_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# schedule settings +train_cfg = None +val_cfg = None +test_cfg = dict() + +# model settings +model = dict( + type='CLIPZeroShot', + vision_backbone=dict( + type='VisionTransformer', + arch='base', + img_size=224, + patch_size=16, + drop_rate=0., + layer_cfgs=dict(act_cfg=dict(type='QuickGELU')), + pre_norm=True, + ), + projection=dict(type='CLIPProjection', in_channels=768, out_channels=512), + text_backbone=dict( + type='CLIPTransformer', + width=512, + layers=12, + heads=8, + attn_mask=True, + ), + tokenizer=dict( + type='AutoTokenizer', + name_or_path='openai/clip-vit-base-patch16', + use_fast=False), + vocab_size=49408, + transformer_width=512, + proj_dim=512, + text_prototype='imagenet', + text_prompt='openai_imagenet_sub', # openai_imagenet, openai_imagenet_sub + context_length=77, +) diff --git a/configs/clip/clip_vit-large-p14_zeroshot-cls_cifar100.py b/configs/clip/clip_vit-large-p14_zeroshot-cls_cifar100.py new file mode 100644 index 0000000..a6dd7c1 --- /dev/null +++ b/configs/clip/clip_vit-large-p14_zeroshot-cls_cifar100.py @@ -0,0 +1,68 @@ +_base_ = '../_base_/default_runtime.py' + +# data settings +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255], + std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255], + to_rgb=False, +) + +test_pipeline = [ + dict(type='Resize', scale=(224, 224), interpolation='bicubic'), + dict( + type='PackInputs', + algorithm_keys=['text'], + meta_keys=['image_id', 'scale_factor'], + ), +] + +train_dataloader = None +test_dataloader = dict( + batch_size=32, + num_workers=8, + dataset=dict( + type='CIFAR100', + data_root='data/cifar100', + split='test', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +test_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# schedule settings +train_cfg = None +val_cfg = None +test_cfg = dict() + +# model settings +model = dict( + type='CLIPZeroShot', + vision_backbone=dict( + type='VisionTransformer', + arch='large', + img_size=224, + patch_size=14, + drop_rate=0., + layer_cfgs=dict(act_cfg=dict(type='QuickGELU')), + pre_norm=True, + ), + projection=dict(type='CLIPProjection', in_channels=1024, out_channels=768), + text_backbone=dict( + type='CLIPTransformer', + width=768, + layers=12, + heads=12, + attn_mask=True, + ), + tokenizer=dict( + type='AutoTokenizer', + name_or_path='openai/clip-vit-large-patch14', + use_fast=False), + vocab_size=49408, + transformer_width=768, + proj_dim=768, + text_prototype='cifar100', + text_prompt='openai_cifar100', + context_length=77, +) diff --git a/configs/clip/clip_vit-large-p14_zeroshot-cls_in1k.py b/configs/clip/clip_vit-large-p14_zeroshot-cls_in1k.py new file mode 100644 index 0000000..1050001 --- /dev/null +++ b/configs/clip/clip_vit-large-p14_zeroshot-cls_in1k.py @@ -0,0 +1,69 @@ +_base_ = '../_base_/default_runtime.py' + +# data settings +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255], + std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255], + to_rgb=True, +) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(224, 224), interpolation='bicubic'), + dict( + type='PackInputs', + algorithm_keys=['text'], + meta_keys=['image_id', 'scale_factor'], + ), +] + +train_dataloader = None +test_dataloader = dict( + batch_size=32, + num_workers=8, + dataset=dict( + type='ImageNet', + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +test_evaluator = dict(type='Accuracy', topk=(1, 5)) + +# schedule settings +train_cfg = None +val_cfg = None +test_cfg = dict() + +# model settings +model = dict( + type='CLIPZeroShot', + vision_backbone=dict( + type='VisionTransformer', + arch='large', + img_size=224, + patch_size=14, + drop_rate=0., + layer_cfgs=dict(act_cfg=dict(type='QuickGELU')), + pre_norm=True, + ), + projection=dict(type='CLIPProjection', in_channels=1024, out_channels=768), + text_backbone=dict( + type='CLIPTransformer', + width=768, + layers=12, + heads=12, + attn_mask=True, + ), + tokenizer=dict( + type='AutoTokenizer', + name_or_path='openai/clip-vit-large-patch14', + use_fast=False), + vocab_size=49408, + transformer_width=768, + proj_dim=768, + text_prototype='imagenet', + text_prompt='openai_imagenet_sub', # openai_imagenet, openai_imagenet_sub + context_length=77, +) diff --git a/configs/clip/metafile.yml b/configs/clip/metafile.yml new file mode 100644 index 0000000..a82eea4 --- /dev/null +++ b/configs/clip/metafile.yml @@ -0,0 +1,308 @@ +Collections: + - Name: CLIP + Metadata: + Architecture: + - Attention Dropout + - Convolution + - Dense Connections + - Dropout + - GELU + - Layer Normalization + - Multi-Head Attention + - Scaled Dot-Product Attention + - Tanh Activation + Paper: + Title: Learning Transferable Visual Models From Natural Language Supervision + URL: https://arxiv.org/abs/2103.00020 + README: configs/clip/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/main/mmpretrain/models/backbones/vision_transformer.py + Version: v1.0.0 + +Models: + - Name: vit-base-p32_clip-openai-pre_3rdparty_in1k + Metadata: + FLOPs: 4364335104 + Parameters: 88225000 + Training Data: + - OpenAI + - ImageNet-1k + In Collection: CLIP + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.77 + Top 5 Accuracy: 95.89 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/clip/clip-vit-base-p32_openai-pre_3rdparty_in1k_20221220-a0182ba9.pth + Config: configs/clip/vit-base-p32_pt-64xb64_in1k.py + Converted From: + Code: https://github.com/rwightman/pytorch-image-models + Weights: https://huggingface.co/timm/vit_base_patch32_clip_224.openai_ft_in1k + - Name: vit-base-p32_clip-laion2b-pre_3rdparty_in1k + Metadata: + FLOPs: 4364335104 + Parameters: 88225000 + Training Data: + - LAION-2B + - ImageNet-1k + In Collection: CLIP + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.46 + Top 5 Accuracy: 96.12 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/clip/clip-vit-base-p32_laion2b-pre_3rdparty_in1k_20221220-194df57f.pth + Config: configs/clip/vit-base-p32_pt-64xb64_in1k.py + Converted From: + Code: https://github.com/rwightman/pytorch-image-models + Weights: https://huggingface.co/timm/vit_base_patch32_clip_224.laion2b_ft_in1k + - Name: vit-base-p32_clip-laion2b-in12k-pre_3rdparty_in1k + Metadata: + FLOPs: 4364335104 + Parameters: 88225000 + Training Data: + - LAION-2B + - ImageNet-12k + - ImageNet-1k + In Collection: CLIP + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.06 + Top 5 Accuracy: 96.49 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/clip/clip-vit-base-p32_laion2b-in12k-pre_3rdparty_in1k_20221220-b384e830.pth + Config: configs/clip/vit-base-p32_pt-64xb64_in1k.py + Converted From: + Code: https://github.com/rwightman/pytorch-image-models + Weights: https://huggingface.co/timm/vit_base_patch32_clip_224.laion2b_ft_in12k_in1k + - Name: vit-base-p32_clip-openai-in12k-pre_3rdparty_in1k-384px + Metadata: + FLOPs: 12661054464 + Parameters: 88225000 + Training Data: + - OpenAI + - ImageNet-12k + - ImageNet-1k + In Collection: CLIP + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.13 + Top 5 Accuracy: 97.42 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/clip/clip-vit-base-p32_openai-in12k-pre_3rdparty_in1k-384px_20221220-dc2e49ea.pth + Config: configs/clip/vit-base-p32_pt-64xb64_in1k-384px.py + Converted From: + Code: https://github.com/rwightman/pytorch-image-models + Weights: https://huggingface.co/timm/vit_base_patch32_clip_384.openai_ft_in12k_in1k + - Name: vit-base-p32_clip-laion2b-in12k-pre_3rdparty_in1k-384px + Metadata: + FLOPs: 12661054464 + Parameters: 88225000 + Training Data: + - LAION-2B + - ImageNet-12k + - ImageNet-1k + In Collection: CLIP + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.39 + Top 5 Accuracy: 97.67 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/clip/clip-vit-base-p32_laion2b-in12k-pre_3rdparty_in1k-384px_20221220-c7757552.pth + Config: configs/clip/vit-base-p32_pt-64xb64_in1k-384px.py + Converted From: + Code: https://github.com/rwightman/pytorch-image-models + Weights: https://huggingface.co/timm/vit_base_patch32_clip_384.laion2b_ft_in12k_in1k + - Name: vit-base-p16_clip-openai-pre_3rdparty_in1k + Metadata: + FLOPs: 16855600128 + Parameters: 86568424 + Training Data: + - OpenAI + - ImageNet-1k + In Collection: CLIP + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.3 + Top 5 Accuracy: 97.5 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/clip/clip-vit-base-p16_openai-pre_3rdparty_in1k_20221220-c7d9c899.pth + Config: configs/clip/vit-base-p16_pt-64xb64_in1k.py + Converted From: + Code: https://github.com/rwightman/pytorch-image-models + Weights: https://huggingface.co/timm/vit_base_patch16_clip_224.openai_ft_in1k + - Name: vit-base-p16_clip-laion2b-pre_3rdparty_in1k + Metadata: + FLOPs: 16855600128 + Parameters: 86568424 + Training Data: + - LAION-2B + - ImageNet-1k + In Collection: CLIP + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.49 + Top 5 Accuracy: 97.59 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/clip/clip-vit-base-p16_laion2b-pre_3rdparty_in1k_20221220-5e24ff58.pth + Config: configs/clip/vit-base-p16_pt-64xb64_in1k.py + Converted From: + Code: https://github.com/rwightman/pytorch-image-models + Weights: https://huggingface.co/timm/vit_base_patch16_clip_224.laion2b_ft_in1k + - Name: vit-base-p16_clip-openai-in12k-pre_3rdparty_in1k + Metadata: + FLOPs: 16855600128 + Parameters: 86568424 + Training Data: + - OpenAI + - ImageNet-12k + - ImageNet-1k + In Collection: CLIP + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.99 + Top 5 Accuracy: 97.72 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/clip/clip-vit-base-p16_openai-in12k-pre_3rdparty_in1k_20221220-90d930a8.pth + Config: configs/clip/vit-base-p16_pt-64xb64_in1k.py + Converted From: + Code: https://github.com/rwightman/pytorch-image-models + Weights: https://huggingface.co/timm/vit_base_patch16_clip_224.openai_ft_in12k_in1k + - Name: vit-base-p16_clip-laion2b-in12k-pre_3rdparty_in1k + Metadata: + FLOPs: 16855600128 + Parameters: 86568424 + Training Data: + - LAION-2B + - ImageNet-12k + - ImageNet-1k + In Collection: CLIP + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 86.02 + Top 5 Accuracy: 97.76 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/clip/clip-vit-base-p16_laion2b-in12k-pre_3rdparty_in1k_20221220-a5e31f8c.pth + Config: configs/clip/vit-base-p16_pt-64xb64_in1k.py + Converted From: + Code: https://github.com/rwightman/pytorch-image-models + Weights: https://huggingface.co/timm/vit_base_patch16_clip_224.laion2b_ft_in12k_in1k + - Name: vit-base-p32_clip-laion2b-in12k-pre_3rdparty_in1k-448px + Metadata: + FLOPs: 17202416640 + Parameters: 88225000 + Training Data: + - LAION-2B + - ImageNet-12k + - ImageNet-1k + In Collection: CLIP + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.76 + Top 5 Accuracy: 97.63 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/clip/clip-vit-base-p32_laion2b-in12k-pre_3rdparty_in1k-448px_20221220-ca404a7d.pth + Config: configs/clip/vit-base-p32_pt-64xb64_in1k-448px.py + Converted From: + Code: https://github.com/rwightman/pytorch-image-models + Weights: https://huggingface.co/timm/vit_base_patch32_clip_448.laion2b_ft_in12k_in1k + - Name: vit-base-p16_clip-openai-pre_3rdparty_in1k-384px + Metadata: + FLOPs: 49370078208 + Parameters: 86568424 + Training Data: + - OpenAI + - ImageNet-1k + In Collection: CLIP + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 86.25 + Top 5 Accuracy: 97.9 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/clip/clip-vit-base-p16_openai-pre_3rdparty_in1k-384px_20221220-eb012e87.pth + Config: configs/clip/vit-base-p16_pt-64xb64_in1k-384px.py + Converted From: + Code: https://github.com/rwightman/pytorch-image-models + Weights: https://huggingface.co/timm/vit_base_patch16_clip_384.openai_ft_in1k + - Name: vit-base-p16_clip-laion2b-pre_3rdparty_in1k-384px + Metadata: + FLOPs: 49370078208 + Parameters: 86568424 + Training Data: + - LAION-2B + - ImageNet-1k + In Collection: CLIP + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 86.52 + Top 5 Accuracy: 97.97 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/clip/clip-vit-base-p16_laion2b-pre_3rdparty_in1k-384px_20221220-558ed826.pth + Config: configs/clip/vit-base-p16_pt-64xb64_in1k-384px.py + Converted From: + Code: https://github.com/rwightman/pytorch-image-models + Weights: https://huggingface.co/timm/vit_base_patch16_clip_384.laion2b_ft_in1k + - Name: vit-base-p16_clip-openai-in12k-pre_3rdparty_in1k-384px + Metadata: + FLOPs: 49370078208 + Parameters: 86568424 + Training Data: + - OpenAI + - ImageNet-12k + - ImageNet-1k + In Collection: CLIP + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 86.87 + Top 5 Accuracy: 98.05 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/clip/clip-vit-base-p16_openai-in12k-pre_3rdparty_in1k-384px_20221220-8df86b74.pth + Config: configs/clip/vit-base-p16_pt-64xb64_in1k-384px.py + Converted From: + Code: https://github.com/rwightman/pytorch-image-models + Weights: https://huggingface.co/timm/vit_base_patch16_clip_384.openai_ft_in12k_in1k + - Name: vit-base-p16_clip-laion2b-in12k-pre_3rdparty_in1k-384px + Metadata: + FLOPs: 49370078208 + Parameters: 86568424 + Training Data: + - LAION-2B + - ImageNet-12k + - ImageNet-1k + In Collection: CLIP + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 87.17 + Top 5 Accuracy: 98.02 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/clip/clip-vit-base-p16_laion2b-in12k-pre_3rdparty_in1k-384px_20221220-84ed0cc0.pth + Config: configs/clip/vit-base-p16_pt-64xb64_in1k-384px.py + Converted From: + Code: https://github.com/rwightman/pytorch-image-models + Weights: https://huggingface.co/timm/vit_base_patch16_clip_384.laion2b_ft_in12k_in1k + - Name: vit-large-p14_clip-openai-pre_3rdparty + Metadata: + FLOPs: 59696580608 + Parameters: 303302656 + Training Data: + - OpenAI + In Collection: CLIP + Weights: https://download.openmmlab.com/mmclassification/v0/clip/vit-large-p14_clip-openai-pre_3rdparty_20230517-95e2af0b.pth + Config: configs/clip/vit-large-p14_headless.py + Converted From: + Code: https://github.com/mlfoundations/open_clip + Weights: https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt diff --git a/configs/clip/vit-base-p16_pt-64xb64_in1k-384px.py b/configs/clip/vit-base-p16_pt-64xb64_in1k-384px.py new file mode 100644 index 0000000..14046ce --- /dev/null +++ b/configs/clip/vit-base-p16_pt-64xb64_in1k-384px.py @@ -0,0 +1,40 @@ +_base_ = [ + '../_base_/models/vit-base-p16.py', + '../_base_/datasets/imagenet_bs64_pil_resize.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +# model setting +model = dict(backbone=dict(pre_norm=True)) + +# data settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=384, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=384, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=384), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# schedule setting +optim_wrapper = dict(clip_grad=dict(max_norm=1.0)) diff --git a/configs/clip/vit-base-p16_pt-64xb64_in1k-448px.py b/configs/clip/vit-base-p16_pt-64xb64_in1k-448px.py new file mode 100644 index 0000000..02af585 --- /dev/null +++ b/configs/clip/vit-base-p16_pt-64xb64_in1k-448px.py @@ -0,0 +1,40 @@ +_base_ = [ + '../_base_/models/vit-base-p16.py', + '../_base_/datasets/imagenet_bs64_pil_resize.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +# model setting +model = dict(backbone=dict(pre_norm=True)) + +# data settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=448, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=448, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=448), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# schedule setting +optim_wrapper = dict(clip_grad=dict(max_norm=1.0)) diff --git a/configs/clip/vit-base-p16_pt-64xb64_in1k.py b/configs/clip/vit-base-p16_pt-64xb64_in1k.py new file mode 100644 index 0000000..cd018ba --- /dev/null +++ b/configs/clip/vit-base-p16_pt-64xb64_in1k.py @@ -0,0 +1,40 @@ +_base_ = [ + '../_base_/models/vit-base-p16.py', + '../_base_/datasets/imagenet_bs64_pil_resize.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +# model setting +model = dict(backbone=dict(pre_norm=True)) + +# data settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=224, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# schedule setting +optim_wrapper = dict(clip_grad=dict(max_norm=1.0)) diff --git a/configs/clip/vit-base-p32_pt-64xb64_in1k-384px.py b/configs/clip/vit-base-p32_pt-64xb64_in1k-384px.py new file mode 100644 index 0000000..d1acf78 --- /dev/null +++ b/configs/clip/vit-base-p32_pt-64xb64_in1k-384px.py @@ -0,0 +1,40 @@ +_base_ = [ + '../_base_/models/vit-base-p32.py', + '../_base_/datasets/imagenet_bs64_pil_resize.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +# model setting +model = dict(backbone=dict(pre_norm=True)) + +# data settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=384, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=384, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=384), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# schedule setting +optim_wrapper = dict(clip_grad=dict(max_norm=1.0)) diff --git a/configs/clip/vit-base-p32_pt-64xb64_in1k-448px.py b/configs/clip/vit-base-p32_pt-64xb64_in1k-448px.py new file mode 100644 index 0000000..0f50391 --- /dev/null +++ b/configs/clip/vit-base-p32_pt-64xb64_in1k-448px.py @@ -0,0 +1,40 @@ +_base_ = [ + '../_base_/models/vit-base-p32.py', + '../_base_/datasets/imagenet_bs64_pil_resize.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +# model setting +model = dict(backbone=dict(pre_norm=True)) + +# data settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=448, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=448, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=448), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# schedule setting +optim_wrapper = dict(clip_grad=dict(max_norm=1.0)) diff --git a/configs/clip/vit-base-p32_pt-64xb64_in1k.py b/configs/clip/vit-base-p32_pt-64xb64_in1k.py new file mode 100644 index 0000000..abbb500 --- /dev/null +++ b/configs/clip/vit-base-p32_pt-64xb64_in1k.py @@ -0,0 +1,40 @@ +_base_ = [ + '../_base_/models/vit-base-p32.py', + '../_base_/datasets/imagenet_bs64_pil_resize.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +# model setting +model = dict(backbone=dict(pre_norm=True)) + +# data settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=224, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# schedule setting +optim_wrapper = dict(clip_grad=dict(max_norm=1.0)) diff --git a/configs/clip/vit-large-p14_headless.py b/configs/clip/vit-large-p14_headless.py new file mode 100644 index 0000000..c9b965d --- /dev/null +++ b/configs/clip/vit-large-p14_headless.py @@ -0,0 +1,34 @@ +_base_ = ['../_base_/default_runtime.py'] + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='l', + img_size=224, + patch_size=16, + drop_rate=0.1, + pre_norm=True, + ), +) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='ResizeEdge', scale=256, edge='short', backend='pillow'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +test_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type='ImageNet', + data_root='data/imagenet', + ann_file='meta/val.txt', + data_prefix='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +test_evaluator = None diff --git a/configs/conformer/README.md b/configs/conformer/README.md new file mode 100644 index 0000000..04b5d47 --- /dev/null +++ b/configs/conformer/README.md @@ -0,0 +1,84 @@ +# Conformer + +> [Conformer: Local Features Coupling Global Representations for Visual Recognition](https://arxiv.org/abs/2105.03889) + + + +## Abstract + +Within Convolutional Neural Network (CNN), the convolution operations are good at extracting local features but experience difficulty to capture global representations. Within visual transformer, the cascaded self-attention modules can capture long-distance feature dependencies but unfortunately deteriorate local feature details. In this paper, we propose a hybrid network structure, termed Conformer, to take advantage of convolutional operations and self-attention mechanisms for enhanced representation learning. Conformer roots in the Feature Coupling Unit (FCU), which fuses local features and global representations under different resolutions in an interactive fashion. Conformer adopts a concurrent structure so that local features and global representations are retained to the maximum extent. Experiments show that Conformer, under the comparable parameter complexity, outperforms the visual transformer (DeiT-B) by 2.3% on ImageNet. On MSCOCO, it outperforms ResNet-101 by 3.7% and 3.6% mAPs for object detection and instance segmentation, respectively, demonstrating the great potential to be a general backbone network. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('conformer-tiny-p16_3rdparty_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('conformer-tiny-p16_3rdparty_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/conformer/conformer-small-p32_8xb128_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/conformer/conformer-tiny-p16_8xb128_in1k.py https://download.openmmlab.com/mmclassification/v0/conformer/conformer-tiny-p16_3rdparty_8xb128_in1k_20211206-f6860372.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :------------------------------------ | :----------: | :--------: | :-------: | :-------: | :-------: | :------------------------------------------: | :--------------------------------------------------------------------: | +| `conformer-tiny-p16_3rdparty_in1k`\* | From scratch | 23.52 | 4.90 | 81.31 | 95.60 | [config](conformer-tiny-p16_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/conformer/conformer-tiny-p16_3rdparty_8xb128_in1k_20211206-f6860372.pth) | +| `conformer-small-p16_3rdparty_in1k`\* | From scratch | 37.67 | 10.31 | 83.32 | 96.46 | [config](conformer-small-p16_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/conformer/conformer-small-p16_3rdparty_8xb128_in1k_20211206-3065dcf5.pth) | +| `conformer-small-p32_8xb128_in1k` | From scratch | 38.85 | 7.09 | 81.96 | 96.02 | [config](conformer-small-p32_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/conformer/conformer-small-p32_8xb128_in1k_20211206-947a0816.pth) | +| `conformer-base-p16_3rdparty_in1k`\* | From scratch | 83.29 | 22.89 | 83.82 | 96.59 | [config](conformer-base-p16_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/conformer/conformer-base-p16_3rdparty_8xb128_in1k_20211206-bfdf8637.pth) | + +*Models with * are converted from the [official repo](https://github.com/pengzhiliang/Conformer/blob/main/models.py#L89). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@article{peng2021conformer, + title={Conformer: Local Features Coupling Global Representations for Visual Recognition}, + author={Zhiliang Peng and Wei Huang and Shanzhi Gu and Lingxi Xie and Yaowei Wang and Jianbin Jiao and Qixiang Ye}, + journal={arXiv preprint arXiv:2105.03889}, + year={2021}, +} +``` diff --git a/configs/conformer/conformer-base-p16_8xb128_in1k.py b/configs/conformer/conformer-base-p16_8xb128_in1k.py new file mode 100644 index 0000000..a44f56f --- /dev/null +++ b/configs/conformer/conformer-base-p16_8xb128_in1k.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/conformer/base-p16.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_conformer.py', + '../_base_/default_runtime.py' +] + +train_dataloader = dict(batch_size=128) diff --git a/configs/conformer/conformer-small-p16_8xb128_in1k.py b/configs/conformer/conformer-small-p16_8xb128_in1k.py new file mode 100644 index 0000000..a937f4f --- /dev/null +++ b/configs/conformer/conformer-small-p16_8xb128_in1k.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/conformer/small-p16.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_conformer.py', + '../_base_/default_runtime.py' +] + +train_dataloader = dict(batch_size=128) diff --git a/configs/conformer/conformer-small-p32_8xb128_in1k.py b/configs/conformer/conformer-small-p32_8xb128_in1k.py new file mode 100644 index 0000000..0b07ce2 --- /dev/null +++ b/configs/conformer/conformer-small-p32_8xb128_in1k.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/conformer/small-p32.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_conformer.py', + '../_base_/default_runtime.py' +] + +train_dataloader = dict(batch_size=128) diff --git a/configs/conformer/conformer-tiny-p16_8xb128_in1k.py b/configs/conformer/conformer-tiny-p16_8xb128_in1k.py new file mode 100644 index 0000000..f88c6c3 --- /dev/null +++ b/configs/conformer/conformer-tiny-p16_8xb128_in1k.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/conformer/tiny-p16.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_conformer.py', + '../_base_/default_runtime.py' +] + +train_dataloader = dict(batch_size=128) diff --git a/configs/conformer/metafile.yml b/configs/conformer/metafile.yml new file mode 100644 index 0000000..c0821ba --- /dev/null +++ b/configs/conformer/metafile.yml @@ -0,0 +1,78 @@ +Collections: + - Name: Conformer + Metadata: + Training Data: ImageNet-1k + Architecture: + - Layer Normalization + - Scaled Dot-Product Attention + - Dropout + Paper: + URL: https://arxiv.org/abs/2105.03889 + Title: "Conformer: Local Features Coupling Global Representations for Visual Recognition" + README: configs/conformer/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.19.0/mmcls/models/backbones/conformer.py + Version: v0.19.0 + +Models: + - Name: conformer-tiny-p16_3rdparty_in1k + In Collection: Conformer + Config: configs/conformer/conformer-tiny-p16_8xb128_in1k.py + Metadata: + FLOPs: 4899611328 + Parameters: 23524704 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.31 + Top 5 Accuracy: 95.60 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/conformer/conformer-tiny-p16_3rdparty_8xb128_in1k_20211206-f6860372.pth + Converted From: + Weights: https://drive.google.com/file/d/19SxGhKcWOR5oQSxNUWUM2MGYiaWMrF1z/view?usp=sharing + Code: https://github.com/pengzhiliang/Conformer/blob/main/models.py#L65 + - Name: conformer-small-p16_3rdparty_in1k + In Collection: Conformer + Config: configs/conformer/conformer-small-p16_8xb128_in1k.py + Metadata: + FLOPs: 10311309312 + Parameters: 37673424 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.32 + Top 5 Accuracy: 96.46 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/conformer/conformer-small-p16_3rdparty_8xb128_in1k_20211206-3065dcf5.pth + Converted From: + Weights: https://drive.google.com/file/d/1mpOlbLaVxOfEwV4-ha78j_1Ebqzj2B83/view?usp=sharing + Code: https://github.com/pengzhiliang/Conformer/blob/main/models.py#L73 + - Name: conformer-small-p32_8xb128_in1k + In Collection: Conformer + Config: configs/conformer/conformer-small-p32_8xb128_in1k.py + Metadata: + FLOPs: 7087281792 + Parameters: 38853072 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.96 + Top 5 Accuracy: 96.02 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/conformer/conformer-small-p32_8xb128_in1k_20211206-947a0816.pth + - Name: conformer-base-p16_3rdparty_in1k + In Collection: Conformer + Config: configs/conformer/conformer-base-p16_8xb128_in1k.py + Metadata: + FLOPs: 22892078080 + Parameters: 83289136 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.82 + Top 5 Accuracy: 96.59 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/conformer/conformer-base-p16_3rdparty_8xb128_in1k_20211206-bfdf8637.pth + Converted From: + Weights: https://drive.google.com/file/d/1oeQ9LSOGKEUaYGu7WTlUGl3KDsQIi0MA/view?usp=sharing + Code: https://github.com/pengzhiliang/Conformer/blob/main/models.py#L89 diff --git a/configs/convmixer/README.md b/configs/convmixer/README.md new file mode 100644 index 0000000..a87d27f --- /dev/null +++ b/configs/convmixer/README.md @@ -0,0 +1,79 @@ +# ConvMixer + +> [Patches Are All You Need?](https://arxiv.org/abs/2201.09792) + + + +## Abstract + +Although convolutional networks have been the dominant architecture for vision tasks for many years, recent experiments have shown that Transformer-based models, most notably the Vision Transformer (ViT), may exceed their performance in some settings. However, due to the quadratic runtime of the self-attention layers in Transformers, ViTs require the use of patch embeddings, which group together small regions of the image into single input features, in order to be applied to larger image sizes. This raises a question: Is the performance of ViTs due to the inherently-more-powerful Transformer architecture, or is it at least partly due to using patches as the input representation? In this paper, we present some evidence for the latter: specifically, we propose the ConvMixer, an extremely simple model that is similar in spirit to the ViT and the even-more-basic MLP-Mixer in that it operates directly on patches as input, separates the mixing of spatial and channel dimensions, and maintains equal size and resolution throughout the network. In contrast, however, the ConvMixer uses only standard convolutions to achieve the mixing steps. Despite its simplicity, we show that the ConvMixer outperforms the ViT, MLP-Mixer, and some of their variants for similar parameter counts and data set sizes, in addition to outperforming classical vision models such as the ResNet. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('convmixer-768-32_3rdparty_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('convmixer-768-32_3rdparty_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/convmixer/convmixer-768-32_10xb64_in1k.py https://download.openmmlab.com/mmclassification/v0/convmixer/convmixer-768-32_3rdparty_10xb64_in1k_20220323-bca1f7b8.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :---------------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :----------------------------------------: | :------------------------------------------------------------------------: | +| `convmixer-768-32_3rdparty_in1k`\* | From scratch | 21.11 | 19.62 | 80.16 | 95.08 | [config](convmixer-768-32_10xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convmixer/convmixer-768-32_3rdparty_10xb64_in1k_20220323-bca1f7b8.pth) | +| `convmixer-1024-20_3rdparty_in1k`\* | From scratch | 24.38 | 5.55 | 76.94 | 93.36 | [config](convmixer-1024-20_10xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convmixer/convmixer-1024-20_3rdparty_10xb64_in1k_20220323-48f8aeba.pth) | +| `convmixer-1536-20_3rdparty_in1k`\* | From scratch | 51.63 | 48.71 | 81.37 | 95.61 | [config](convmixer-1536-20_10xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convmixer/convmixer-1536_20_3rdparty_10xb64_in1k_20220323-ea5786f3.pth) | + +*Models with * are converted from the [official repo](https://github.com/locuslab/convmixer). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@misc{trockman2022patches, + title={Patches Are All You Need?}, + author={Asher Trockman and J. Zico Kolter}, + year={2022}, + eprint={2201.09792}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` diff --git a/configs/convmixer/convmixer-1024-20_10xb64_in1k.py b/configs/convmixer/convmixer-1024-20_10xb64_in1k.py new file mode 100644 index 0000000..0dbc664 --- /dev/null +++ b/configs/convmixer/convmixer-1024-20_10xb64_in1k.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/convmixer/convmixer-1024-20.py', + '../_base_/datasets/imagenet_bs64_convmixer_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=0.01), + clip_grad=dict(max_norm=5.0), +) + +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=1e-3, + by_epoch=True, + begin=0, + end=20, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict( + type='CosineAnnealingLR', + T_max=130, + eta_min=1e-5, + by_epoch=True, + begin=20, + end=150) +] + +train_cfg = dict(by_epoch=True, max_epochs=150) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (10 GPUs) x (64 samples per GPU) +auto_scale_lr = dict(base_batch_size=640) diff --git a/configs/convmixer/convmixer-1536-20_10xb64_in1k.py b/configs/convmixer/convmixer-1536-20_10xb64_in1k.py new file mode 100644 index 0000000..3c8cc95 --- /dev/null +++ b/configs/convmixer/convmixer-1536-20_10xb64_in1k.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/models/convmixer/convmixer-1536-20.py', + '../_base_/datasets/imagenet_bs64_convmixer_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=0.01), + clip_grad=dict(max_norm=5.0), +) + +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=1e-3, + by_epoch=True, + begin=0, + end=20, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict( + type='CosineAnnealingLR', + T_max=130, + eta_min=1e-5, + by_epoch=True, + begin=20, + end=150) +] + +train_cfg = dict(by_epoch=True, max_epochs=150) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (10 GPUs) x (64 samples per GPU) +auto_scale_lr = dict(base_batch_size=640) diff --git a/configs/convmixer/convmixer-768-32_10xb64_in1k.py b/configs/convmixer/convmixer-768-32_10xb64_in1k.py new file mode 100644 index 0000000..d872d44 --- /dev/null +++ b/configs/convmixer/convmixer-768-32_10xb64_in1k.py @@ -0,0 +1,19 @@ +_base_ = [ + '../_base_/models/convmixer/convmixer-768-32.py', + '../_base_/datasets/imagenet_bs64_convmixer_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=0.01), + clip_grad=dict(max_norm=5.0), +) + +train_cfg = dict(by_epoch=True, max_epochs=300) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (10 GPUs) x (64 samples per GPU) +auto_scale_lr = dict(base_batch_size=640) diff --git a/configs/convmixer/metafile.yml b/configs/convmixer/metafile.yml new file mode 100644 index 0000000..f9dcdc7 --- /dev/null +++ b/configs/convmixer/metafile.yml @@ -0,0 +1,61 @@ +Collections: + - Name: ConvMixer + Metadata: + Training Data: ImageNet-1k + Architecture: + - 1x1 Convolution + - LayerScale + Paper: + URL: https://arxiv.org/abs/2201.09792 + Title: Patches Are All You Need? + README: configs/convmixer/README.md + +Models: + - Name: convmixer-768-32_3rdparty_in1k + Metadata: + FLOPs: 19623051264 + Parameters: 21110248 + In Collection: ConvMixer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 80.16 + Top 5 Accuracy: 95.08 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convmixer/convmixer-768-32_3rdparty_10xb64_in1k_20220323-bca1f7b8.pth + Config: configs/convmixer/convmixer-768-32_10xb64_in1k.py + Converted From: + Weights: https://github.com/tmp-iclr/convmixer/releases/download/v1.0/convmixer_768_32_ks7_p7_relu.pth.tar + Code: https://github.com/locuslab/convmixer + - Name: convmixer-1024-20_3rdparty_in1k + Metadata: + FLOPs: 5550112768 + Parameters: 24383464 + In Collection: ConvMixer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 76.94 + Top 5 Accuracy: 93.36 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convmixer/convmixer-1024-20_3rdparty_10xb64_in1k_20220323-48f8aeba.pth + Config: configs/convmixer/convmixer-1024-20_10xb64_in1k.py + Converted From: + Weights: https://github.com/tmp-iclr/convmixer/releases/download/v1.0/convmixer_1024_20_ks9_p14.pth.tar + Code: https://github.com/locuslab/convmixer + - Name: convmixer-1536-20_3rdparty_in1k + Metadata: + FLOPs: 48713170944 + Parameters: 51625960 + In Collection: ConvMixer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.37 + Top 5 Accuracy: 95.61 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convmixer/convmixer-1536_20_3rdparty_10xb64_in1k_20220323-ea5786f3.pth + Config: configs/convmixer/convmixer-1536-20_10xb64_in1k.py + Converted From: + Weights: https://github.com/tmp-iclr/convmixer/releases/download/v1.0/convmixer_1536_20_ks9_p7.pth.tar + Code: https://github.com/locuslab/convmixer diff --git a/configs/convnext/README.md b/configs/convnext/README.md new file mode 100644 index 0000000..2e6e14c --- /dev/null +++ b/configs/convnext/README.md @@ -0,0 +1,123 @@ +# ConvNeXt + +> [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545v1) + + + +## Introduction + +**ConvNeXt** is initially described in [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545v1), which is a pure convolutional model (ConvNet), inspired by the design of Vision Transformers. The ConvNeXt has the pyramid structure and achieve competitive performance on various vision tasks, with simplicity and efficiency. + +
+ +
+ +## Abstract + +
+ +Show the paper's abstract + +
+The "Roaring 20s" of visual recognition began with the introduction of Vision Transformers (ViTs), which quickly superseded ConvNets as the state-of-the-art image classification model. A vanilla ViT, on the other hand, faces difficulties when applied to general computer vision tasks such as object detection and semantic segmentation. It is the hierarchical Transformers (e.g., Swin Transformers) that reintroduced several ConvNet priors, making Transformers practically viable as a generic vision backbone and demonstrating remarkable performance on a wide variety of vision tasks. However, the effectiveness of such hybrid approaches is still largely credited to the intrinsic superiority of Transformers, rather than the inherent inductive biases of convolutions. In this work, we reexamine the design spaces and test the limits of what a pure ConvNet can achieve. We gradually "modernize" a standard ResNet toward the design of a vision Transformer, and discover several key components that contribute to the performance difference along the way. The outcome of this exploration is a family of pure ConvNet models dubbed ConvNeXt. Constructed entirely from standard ConvNet modules, ConvNeXts compete favorably with Transformers in terms of accuracy and scalability, achieving 87.8% ImageNet top-1 accuracy and outperforming Swin Transformers on COCO detection and ADE20K segmentation, while maintaining the simplicity and efficiency of standard ConvNets. +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('convnext-tiny_32xb128_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('convnext-tiny_32xb128_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/convnext/convnext-tiny_32xb128_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/convnext/convnext-tiny_32xb128_in1k.py https://download.openmmlab.com/mmclassification/v0/convnext/convnext-tiny_32xb128_in1k_20221207-998cf3e9.pth +``` + + + +## Models and results + +### Pretrained models + +| Model | Params (M) | Flops (G) | Config | Download | +| :--------------------------------- | :--------: | :-------: | :---------------------------------------: | :--------------------------------------------------------------------------------------------------------: | +| `convnext-base_3rdparty_in21k`\* | 88.59 | 15.36 | [config](convnext-base_32xb128_in21k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_3rdparty_in21k_20220124-13b83eec.pth) | +| `convnext-large_3rdparty_in21k`\* | 197.77 | 34.37 | [config](convnext-large_64xb64_in21k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-large_3rdparty_in21k_20220124-41b5a79f.pth) | +| `convnext-xlarge_3rdparty_in21k`\* | 350.20 | 60.93 | [config](convnext-xlarge_64xb64_in21k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-xlarge_3rdparty_in21k_20220124-f909bad7.pth) | + +*Models with * are converted from the [official repo](https://github.com/facebookresearch/ConvNeXt). The config files of these models are only for inference. We haven't reproduce the training results.* + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :------------------------------------------------ | :----------: | :--------: | :-------: | :-------: | :-------: | :--------------------------------------------: | :------------------------------------------------------: | +| `convnext-tiny_32xb128_in1k` | From scratch | 28.59 | 4.46 | 82.14 | 96.06 | [config](convnext-tiny_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-tiny_32xb128_in1k_20221207-998cf3e9.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-tiny_32xb128_in1k_20221207-998cf3e9.json) | +| `convnext-tiny_32xb128-noema_in1k` | From scratch | 28.59 | 4.46 | 81.95 | 95.89 | [config](convnext-tiny_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-tiny_32xb128-noema_in1k_20221208-5d4509c7.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-tiny_32xb128_in1k_20221207-998cf3e9.json) | +| `convnext-tiny_in21k-pre_3rdparty_in1k`\* | ImageNet-21k | 28.59 | 4.46 | 82.90 | 96.62 | [config](convnext-tiny_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-tiny_in21k-pre_3rdparty_in1k_20221219-7501e534.pth) | +| `convnext-tiny_in21k-pre_3rdparty_in1k-384px`\* | ImageNet-21k | 28.59 | 13.14 | 84.11 | 97.14 | [config](convnext-tiny_32xb128_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-tiny_in21k-pre_3rdparty_in1k-384px_20221219-c1182362.pth) | +| `convnext-small_32xb128_in1k` | From scratch | 50.22 | 8.69 | 83.16 | 96.56 | [config](convnext-small_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-small_32xb128_in1k_20221207-4ab7052c.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-small_32xb128_in1k_20221207-4ab7052c.json) | +| `convnext-small_32xb128-noema_in1k` | From scratch | 50.22 | 8.69 | 83.21 | 96.48 | [config](convnext-small_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-small_32xb128-noema_in1k_20221208-4a618995.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-small_32xb128_in1k_20221207-4ab7052c.json) | +| `convnext-small_in21k-pre_3rdparty_in1k`\* | ImageNet-21k | 50.22 | 8.69 | 84.59 | 97.41 | [config](convnext-small_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-small_in21k-pre_3rdparty_in1k_20221219-aeca4c93.pth) | +| `convnext-small_in21k-pre_3rdparty_in1k-384px`\* | ImageNet-21k | 50.22 | 25.58 | 85.75 | 97.88 | [config](convnext-small_32xb128_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-small_in21k-pre_3rdparty_in1k-384px_20221219-96f0bb87.pth) | +| `convnext-base_32xb128_in1k` | From scratch | 88.59 | 15.36 | 83.66 | 96.74 | [config](convnext-base_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_32xb128_in1k_20221207-fbdb5eb9.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_32xb128_in1k_20221207-fbdb5eb9.json) | +| `convnext-base_32xb128-noema_in1k` | From scratch | 88.59 | 15.36 | 83.64 | 96.61 | [config](convnext-base_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_32xb128-noema_in1k_20221208-f8182678.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_32xb128_in1k_20221207-fbdb5eb9.json) | +| `convnext-base_3rdparty_in1k`\* | From scratch | 88.59 | 15.36 | 83.85 | 96.74 | [config](convnext-base_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_3rdparty_32xb128_in1k_20220124-d0915162.pth) | +| `convnext-base_3rdparty-noema_in1k`\* | From scratch | 88.59 | 15.36 | 83.71 | 96.60 | [config](convnext-base_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_3rdparty_32xb128-noema_in1k_20220222-dba4f95f.pth) | +| `convnext-base_3rdparty_in1k-384px`\* | From scratch | 88.59 | 45.21 | 85.10 | 97.34 | [config](convnext-base_32xb128_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_3rdparty_in1k-384px_20221219-c8f1dc2b.pth) | +| `convnext-base_in21k-pre_3rdparty_in1k`\* | ImageNet-21k | 88.59 | 15.36 | 85.81 | 97.86 | [config](convnext-base_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_in21k-pre-3rdparty_32xb128_in1k_20220124-eb2d6ada.pth) | +| `convnext-base_in21k-pre-3rdparty_in1k-384px`\* | From scratch | 88.59 | 45.21 | 86.82 | 98.25 | [config](convnext-base_32xb128_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_in21k-pre-3rdparty_in1k-384px_20221219-4570f792.pth) | +| `convnext-large_3rdparty_in1k`\* | From scratch | 197.77 | 34.37 | 84.30 | 96.89 | [config](convnext-large_64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-large_3rdparty_64xb64_in1k_20220124-f8a0ded0.pth) | +| `convnext-large_3rdparty_in1k-384px`\* | From scratch | 197.77 | 101.10 | 85.50 | 97.59 | [config](convnext-large_64xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-large_3rdparty_in1k-384px_20221219-6dd29d10.pth) | +| `convnext-large_in21k-pre_3rdparty_in1k`\* | ImageNet-21k | 197.77 | 34.37 | 86.61 | 98.04 | [config](convnext-large_64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-large_in21k-pre-3rdparty_64xb64_in1k_20220124-2412403d.pth) | +| `convnext-large_in21k-pre-3rdparty_in1k-384px`\* | From scratch | 197.77 | 101.10 | 87.46 | 98.37 | [config](convnext-large_64xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-large_in21k-pre-3rdparty_in1k-384px_20221219-6d38dd66.pth) | +| `convnext-xlarge_in21k-pre_3rdparty_in1k`\* | ImageNet-21k | 350.20 | 60.93 | 86.97 | 98.20 | [config](convnext-xlarge_64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-xlarge_in21k-pre-3rdparty_64xb64_in1k_20220124-76b6863d.pth) | +| `convnext-xlarge_in21k-pre-3rdparty_in1k-384px`\* | From scratch | 350.20 | 179.20 | 87.76 | 98.55 | [config](convnext-xlarge_64xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-xlarge_in21k-pre-3rdparty_in1k-384px_20221219-b161bc14.pth) | + +*Models with * are converted from the [official repo](https://github.com/facebookresearch/ConvNeXt). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@Article{liu2022convnet, + author = {Zhuang Liu and Hanzi Mao and Chao-Yuan Wu and Christoph Feichtenhofer and Trevor Darrell and Saining Xie}, + title = {A ConvNet for the 2020s}, + journal = {arXiv preprint arXiv:2201.03545}, + year = {2022}, +} +``` diff --git a/configs/convnext/convnext-base_32xb128_in1k-384px.py b/configs/convnext/convnext-base_32xb128_in1k-384px.py new file mode 100644 index 0000000..6554694 --- /dev/null +++ b/configs/convnext/convnext-base_32xb128_in1k-384px.py @@ -0,0 +1,23 @@ +_base_ = [ + '../_base_/models/convnext/convnext-base.py', + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# dataset setting +train_dataloader = dict(batch_size=128) + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) + +# runtime setting +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/convnext/convnext-base_32xb128_in1k.py b/configs/convnext/convnext-base_32xb128_in1k.py new file mode 100644 index 0000000..5ae8ec4 --- /dev/null +++ b/configs/convnext/convnext-base_32xb128_in1k.py @@ -0,0 +1,23 @@ +_base_ = [ + '../_base_/models/convnext/convnext-base.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# dataset setting +train_dataloader = dict(batch_size=128) + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=4e-3), + clip_grad=None, +) + +# runtime setting +custom_hooks = [dict(type='EMAHook', momentum=1e-4, priority='ABOVE_NORMAL')] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/convnext/convnext-base_32xb128_in21k.py b/configs/convnext/convnext-base_32xb128_in21k.py new file mode 100644 index 0000000..c343526 --- /dev/null +++ b/configs/convnext/convnext-base_32xb128_in21k.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/models/convnext/convnext-base.py', + '../_base_/datasets/imagenet21k_bs128.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# model setting +model = dict(head=dict(num_classes=21841)) + +# dataset setting +data_preprocessor = dict(num_classes=21841) +train_dataloader = dict(batch_size=128) + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/convnext/convnext-large_64xb64_in1k-384px.py b/configs/convnext/convnext-large_64xb64_in1k-384px.py new file mode 100644 index 0000000..6698b9e --- /dev/null +++ b/configs/convnext/convnext-large_64xb64_in1k-384px.py @@ -0,0 +1,23 @@ +_base_ = [ + '../_base_/models/convnext/convnext-large.py', + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# dataset setting +train_dataloader = dict(batch_size=64) + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) + +# runtime setting +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (64 GPUs) x (64 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/convnext/convnext-large_64xb64_in1k.py b/configs/convnext/convnext-large_64xb64_in1k.py new file mode 100644 index 0000000..8a78c58 --- /dev/null +++ b/configs/convnext/convnext-large_64xb64_in1k.py @@ -0,0 +1,23 @@ +_base_ = [ + '../_base_/models/convnext/convnext-large.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# dataset setting +train_dataloader = dict(batch_size=64) + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=4e-3), + clip_grad=None, +) + +# runtime setting +custom_hooks = [dict(type='EMAHook', momentum=1e-4, priority='ABOVE_NORMAL')] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (64 GPUs) x (64 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/convnext/convnext-large_64xb64_in21k.py b/configs/convnext/convnext-large_64xb64_in21k.py new file mode 100644 index 0000000..420edab --- /dev/null +++ b/configs/convnext/convnext-large_64xb64_in21k.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/models/convnext/convnext-base.py', + '../_base_/datasets/imagenet21k_bs128.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# model setting +model = dict(head=dict(num_classes=21841)) + +# dataset setting +data_preprocessor = dict(num_classes=21841) +train_dataloader = dict(batch_size=64) + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/convnext/convnext-small_32xb128_in1k-384px.py b/configs/convnext/convnext-small_32xb128_in1k-384px.py new file mode 100644 index 0000000..729f00a --- /dev/null +++ b/configs/convnext/convnext-small_32xb128_in1k-384px.py @@ -0,0 +1,23 @@ +_base_ = [ + '../_base_/models/convnext/convnext-small.py', + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# dataset setting +train_dataloader = dict(batch_size=128) + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) + +# runtime setting +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/convnext/convnext-small_32xb128_in1k.py b/configs/convnext/convnext-small_32xb128_in1k.py new file mode 100644 index 0000000..b623e90 --- /dev/null +++ b/configs/convnext/convnext-small_32xb128_in1k.py @@ -0,0 +1,23 @@ +_base_ = [ + '../_base_/models/convnext/convnext-small.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# dataset setting +train_dataloader = dict(batch_size=128) + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=4e-3), + clip_grad=None, +) + +# runtime setting +custom_hooks = [dict(type='EMAHook', momentum=1e-4, priority='ABOVE_NORMAL')] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/convnext/convnext-tiny_32xb128_in1k-384px.py b/configs/convnext/convnext-tiny_32xb128_in1k-384px.py new file mode 100644 index 0000000..6513ad8 --- /dev/null +++ b/configs/convnext/convnext-tiny_32xb128_in1k-384px.py @@ -0,0 +1,23 @@ +_base_ = [ + '../_base_/models/convnext/convnext-tiny.py', + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# dataset setting +train_dataloader = dict(batch_size=128) + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) + +# runtime setting +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/convnext/convnext-tiny_32xb128_in1k.py b/configs/convnext/convnext-tiny_32xb128_in1k.py new file mode 100644 index 0000000..59d3004 --- /dev/null +++ b/configs/convnext/convnext-tiny_32xb128_in1k.py @@ -0,0 +1,23 @@ +_base_ = [ + '../_base_/models/convnext/convnext-tiny.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# dataset setting +train_dataloader = dict(batch_size=128) + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=4e-3), + clip_grad=None, +) + +# runtime setting +custom_hooks = [dict(type='EMAHook', momentum=1e-4, priority='ABOVE_NORMAL')] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/convnext/convnext-xlarge_64xb64_in1k-384px.py b/configs/convnext/convnext-xlarge_64xb64_in1k-384px.py new file mode 100644 index 0000000..6edc94d --- /dev/null +++ b/configs/convnext/convnext-xlarge_64xb64_in1k-384px.py @@ -0,0 +1,23 @@ +_base_ = [ + '../_base_/models/convnext/convnext-xlarge.py', + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# dataset setting +train_dataloader = dict(batch_size=64) + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) + +# runtime setting +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (64 GPUs) x (64 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/convnext/convnext-xlarge_64xb64_in1k.py b/configs/convnext/convnext-xlarge_64xb64_in1k.py new file mode 100644 index 0000000..528894e --- /dev/null +++ b/configs/convnext/convnext-xlarge_64xb64_in1k.py @@ -0,0 +1,23 @@ +_base_ = [ + '../_base_/models/convnext/convnext-xlarge.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# dataset setting +train_dataloader = dict(batch_size=64) + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=4e-3), + clip_grad=None, +) + +# runtime setting +custom_hooks = [dict(type='EMAHook', momentum=1e-4, priority='ABOVE_NORMAL')] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (64 GPUs) x (64 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/convnext/convnext-xlarge_64xb64_in21k.py b/configs/convnext/convnext-xlarge_64xb64_in21k.py new file mode 100644 index 0000000..420edab --- /dev/null +++ b/configs/convnext/convnext-xlarge_64xb64_in21k.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/models/convnext/convnext-base.py', + '../_base_/datasets/imagenet21k_bs128.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# model setting +model = dict(head=dict(num_classes=21841)) + +# dataset setting +data_preprocessor = dict(num_classes=21841) +train_dataloader = dict(batch_size=64) + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/convnext/metafile.yml b/configs/convnext/metafile.yml new file mode 100644 index 0000000..1689662 --- /dev/null +++ b/configs/convnext/metafile.yml @@ -0,0 +1,410 @@ +Collections: + - Name: ConvNeXt + Metadata: + Training Data: ImageNet-1k + Architecture: + - 1x1 Convolution + - LayerScale + Paper: + URL: https://arxiv.org/abs/2201.03545v1 + Title: A ConvNet for the 2020s + README: configs/convnext/README.md + Code: + Version: v0.20.1 + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.20.1/mmcls/models/backbones/convnext.py + +Models: + - Name: convnext-tiny_32xb128_in1k + Metadata: + FLOPs: 4457472768 + Parameters: 28589128 + In Collection: ConvNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.14 + Top 5 Accuracy: 96.06 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-tiny_32xb128_in1k_20221207-998cf3e9.pth + Config: configs/convnext/convnext-tiny_32xb128_in1k.py + - Name: convnext-tiny_32xb128-noema_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 4457472768 + Parameters: 28589128 + In Collection: ConvNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.95 + Top 5 Accuracy: 95.89 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-tiny_32xb128-noema_in1k_20221208-5d4509c7.pth + Config: configs/convnext/convnext-tiny_32xb128_in1k.py + - Name: convnext-tiny_in21k-pre_3rdparty_in1k + Metadata: + Training Data: + - ImageNet-21k + - ImageNet-1k + FLOPs: 4457472768 + Parameters: 28589128 + In Collection: ConvNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.90 + Top 5 Accuracy: 96.62 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-tiny_in21k-pre_3rdparty_in1k_20221219-7501e534.pth + Config: configs/convnext/convnext-tiny_32xb128_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_1k_224.pth + Code: https://github.com/facebookresearch/ConvNeXt + - Name: convnext-tiny_in21k-pre_3rdparty_in1k-384px + Metadata: + Training Data: + - ImageNet-21k + - ImageNet-1k + FLOPs: 13135236864 + Parameters: 28589128 + In Collection: ConvNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.11 + Top 5 Accuracy: 97.14 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-tiny_in21k-pre_3rdparty_in1k-384px_20221219-c1182362.pth + Config: configs/convnext/convnext-tiny_32xb128_in1k-384px.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnext_tiny_22k_1k_384.pth + Code: https://github.com/facebookresearch/ConvNeXt + - Name: convnext-small_32xb128_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 8687008512 + Parameters: 50223688 + In Collection: ConvNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.16 + Top 5 Accuracy: 96.56 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-small_32xb128_in1k_20221207-4ab7052c.pth + Config: configs/convnext/convnext-small_32xb128_in1k.py + - Name: convnext-small_32xb128-noema_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 8687008512 + Parameters: 50223688 + In Collection: ConvNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.21 + Top 5 Accuracy: 96.48 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-small_32xb128-noema_in1k_20221208-4a618995.pth + Config: configs/convnext/convnext-small_32xb128_in1k.py + - Name: convnext-small_in21k-pre_3rdparty_in1k + Metadata: + Training Data: + - ImageNet-21k + - ImageNet-1k + FLOPs: 8687008512 + Parameters: 50223688 + In Collection: ConvNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.59 + Top 5 Accuracy: 97.41 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-small_in21k-pre_3rdparty_in1k_20221219-aeca4c93.pth + Config: configs/convnext/convnext-small_32xb128_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_1k_224.pth + Code: https://github.com/facebookresearch/ConvNeXt + - Name: convnext-small_in21k-pre_3rdparty_in1k-384px + Metadata: + Training Data: + - ImageNet-21k + - ImageNet-1k + FLOPs: 25580818176 + Parameters: 50223688 + In Collection: ConvNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.75 + Top 5 Accuracy: 97.88 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-small_in21k-pre_3rdparty_in1k-384px_20221219-96f0bb87.pth + Config: configs/convnext/convnext-small_32xb128_in1k-384px.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnext_small_22k_1k_384.pth + Code: https://github.com/facebookresearch/ConvNeXt + - Name: convnext-base_32xb128_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 15359124480 + Parameters: 88591464 + In Collection: ConvNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.66 + Top 5 Accuracy: 96.74 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_32xb128_in1k_20221207-fbdb5eb9.pth + Config: configs/convnext/convnext-base_32xb128_in1k.py + - Name: convnext-base_32xb128-noema_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 15359124480 + Parameters: 88591464 + In Collection: ConvNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.64 + Top 5 Accuracy: 96.61 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_32xb128-noema_in1k_20221208-f8182678.pth + Config: configs/convnext/convnext-base_32xb128_in1k.py + - Name: convnext-base_3rdparty_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 15359124480 + Parameters: 88591464 + In Collection: ConvNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.85 + Top 5 Accuracy: 96.74 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_3rdparty_32xb128_in1k_20220124-d0915162.pth + Config: configs/convnext/convnext-base_32xb128_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth + Code: https://github.com/facebookresearch/ConvNeXt + - Name: convnext-base_3rdparty-noema_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 15359124480 + Parameters: 88591464 + In Collection: ConvNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.71 + Top 5 Accuracy: 96.60 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_3rdparty_32xb128-noema_in1k_20220222-dba4f95f.pth + Config: configs/convnext/convnext-base_32xb128_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224.pth + Code: https://github.com/facebookresearch/ConvNeXt + - Name: convnext-base_3rdparty_in1k-384px + Metadata: + Training Data: ImageNet-1k + FLOPs: 45205885952 + Parameters: 88591464 + In Collection: ConvNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.10 + Top 5 Accuracy: 97.34 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_3rdparty_in1k-384px_20221219-c8f1dc2b.pth + Config: configs/convnext/convnext-base_32xb128_in1k-384px.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_384.pth + Code: https://github.com/facebookresearch/ConvNeXt + - Name: convnext-base_3rdparty_in21k + Metadata: + Training Data: ImageNet-21k + FLOPs: 15359124480 + Parameters: 88591464 + In Collection: ConvNeXt + Results: null + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_3rdparty_in21k_20220124-13b83eec.pth + Config: configs/convnext/convnext-base_32xb128_in21k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth + Code: https://github.com/facebookresearch/ConvNeXt + - Name: convnext-base_in21k-pre_3rdparty_in1k + Metadata: + Training Data: + - ImageNet-21k + - ImageNet-1k + FLOPs: 15359124480 + Parameters: 88591464 + In Collection: ConvNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.81 + Top 5 Accuracy: 97.86 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_in21k-pre-3rdparty_32xb128_in1k_20220124-eb2d6ada.pth + Config: configs/convnext/convnext-base_32xb128_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_224.pth + Code: https://github.com/facebookresearch/ConvNeXt + - Name: convnext-base_in21k-pre-3rdparty_in1k-384px + Metadata: + Training Data: + - ImageNet-21k + - ImageNet-1k + FLOPs: 45205885952 + Parameters: 88591464 + In Collection: ConvNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 86.82 + Top 5 Accuracy: 98.25 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_in21k-pre-3rdparty_in1k-384px_20221219-4570f792.pth + Config: configs/convnext/convnext-base_32xb128_in1k-384px.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_1k_384.pth + Code: https://github.com/facebookresearch/ConvNeXt + - Name: convnext-large_3rdparty_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 34368026112 + Parameters: 197767336 + In Collection: ConvNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.30 + Top 5 Accuracy: 96.89 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-large_3rdparty_64xb64_in1k_20220124-f8a0ded0.pth + Config: configs/convnext/convnext-large_64xb64_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth + Code: https://github.com/facebookresearch/ConvNeXt + - Name: convnext-large_3rdparty_in1k-384px + Metadata: + Training Data: ImageNet-1k + FLOPs: 101103214080 + Parameters: 197767336 + In Collection: ConvNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.50 + Top 5 Accuracy: 97.59 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-large_3rdparty_in1k-384px_20221219-6dd29d10.pth + Config: configs/convnext/convnext-large_64xb64_in1k-384px.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_384.pth + Code: https://github.com/facebookresearch/ConvNeXt + - Name: convnext-large_3rdparty_in21k + Metadata: + Training Data: ImageNet-21k + FLOPs: 34368026112 + Parameters: 197767336 + In Collection: ConvNeXt + Results: null + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-large_3rdparty_in21k_20220124-41b5a79f.pth + Config: configs/convnext/convnext-large_64xb64_in21k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth + Code: https://github.com/facebookresearch/ConvNeXt + - Name: convnext-large_in21k-pre_3rdparty_in1k + Metadata: + Training Data: + - ImageNet-21k + - ImageNet-1k + FLOPs: 34368026112 + Parameters: 197767336 + In Collection: ConvNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 86.61 + Top 5 Accuracy: 98.04 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-large_in21k-pre-3rdparty_64xb64_in1k_20220124-2412403d.pth + Config: configs/convnext/convnext-large_64xb64_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_224.pth + Code: https://github.com/facebookresearch/ConvNeXt + - Name: convnext-large_in21k-pre-3rdparty_in1k-384px + Metadata: + Training Data: + - ImageNet-21k + - ImageNet-1k + FLOPs: 101103214080 + Parameters: 197767336 + In Collection: ConvNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 87.46 + Top 5 Accuracy: 98.37 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-large_in21k-pre-3rdparty_in1k-384px_20221219-6d38dd66.pth + Config: configs/convnext/convnext-large_64xb64_in1k-384px.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_1k_384.pth + Code: https://github.com/facebookresearch/ConvNeXt + - Name: convnext-xlarge_3rdparty_in21k + Metadata: + Training Data: ImageNet-21k + FLOPs: 60929820672 + Parameters: 350196968 + In Collection: ConvNeXt + Results: null + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-xlarge_3rdparty_in21k_20220124-f909bad7.pth + Config: configs/convnext/convnext-xlarge_64xb64_in21k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth + Code: https://github.com/facebookresearch/ConvNeXt + - Name: convnext-xlarge_in21k-pre_3rdparty_in1k + Metadata: + Training Data: + - ImageNet-21k + - ImageNet-1k + FLOPs: 60929820672 + Parameters: 350196968 + In Collection: ConvNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 86.97 + Top 5 Accuracy: 98.20 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-xlarge_in21k-pre-3rdparty_64xb64_in1k_20220124-76b6863d.pth + Config: configs/convnext/convnext-xlarge_64xb64_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_224_ema.pth + Code: https://github.com/facebookresearch/ConvNeXt + - Name: convnext-xlarge_in21k-pre-3rdparty_in1k-384px + Metadata: + Training Data: + - ImageNet-21k + - ImageNet-1k + FLOPs: 179196798976 + Parameters: 350196968 + In Collection: ConvNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 87.76 + Top 5 Accuracy: 98.55 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext/convnext-xlarge_in21k-pre-3rdparty_in1k-384px_20221219-b161bc14.pth + Config: configs/convnext/convnext-xlarge_64xb64_in1k-384px.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_1k_384_ema.pth + Code: https://github.com/facebookresearch/ConvNeXt diff --git a/configs/convnext_v2/README.md b/configs/convnext_v2/README.md new file mode 100644 index 0000000..e561387 --- /dev/null +++ b/configs/convnext_v2/README.md @@ -0,0 +1,107 @@ +# ConvNeXt V2 + +> [Co-designing and Scaling ConvNets with Masked Autoencoders](http://arxiv.org/abs/2301.00808) + + + +## Abstract + +Driven by improved architectures and better representation learning frameworks, the field of visual recognition has enjoyed rapid modernization and performance boost in the early 2020s. For example, modern ConvNets, represented by ConvNeXt, have demonstrated strong performance in various scenarios. While these models were originally designed for supervised learning with ImageNet labels, they can also potentially benefit from self-supervised learning techniques such as masked autoencoders (MAE). However, we found that simply combining these two approaches leads to subpar performance. In this paper, we propose a fully convolutional masked autoencoder framework and a new Global Response Normalization (GRN) layer that can be added to the ConvNeXt architecture to enhance inter-channel feature competition. This co-design of self-supervised learning techniques and architectural improvement results in a new model family called ConvNeXt V2, which significantly improves the performance of pure ConvNets on various recognition benchmarks, including ImageNet classification, COCO detection, and ADE20K segmentation. We also provide pre-trained ConvNeXt V2 models of various sizes, ranging from an efficient 3.7M-parameter Atto model with 76.7% top-1 accuracy on ImageNet, to a 650M Huge model that achieves a state-of-the-art 88.9% accuracy using only public training data. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('convnext-v2-atto_fcmae-pre_3rdparty_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('convnext-v2-atto_3rdparty-fcmae_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/convnext_v2/convnext-v2-atto_32xb32_in1k.py https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-atto_fcmae-pre_3rdparty_in1k_20230104-23765f83.pth +``` + + + +## Models and results + +### Pretrained models + +| Model | Params (M) | Flops (G) | Config | Download | +| :---------------------------------------- | :--------: | :-------: | :----------------------------------------: | :------------------------------------------------------------------------------------------------: | +| `convnext-v2-atto_3rdparty-fcmae_in1k`\* | 3.71 | 0.55 | [config](convnext-v2-atto_32xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-atto_3rdparty-fcmae_in1k_20230104-07514db4.pth) | +| `convnext-v2-femto_3rdparty-fcmae_in1k`\* | 5.23 | 0.78 | [config](convnext-v2-femto_32xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-femto_3rdparty-fcmae_in1k_20230104-adbe2082.pth) | +| `convnext-v2-pico_3rdparty-fcmae_in1k`\* | 9.07 | 1.37 | [config](convnext-v2-pico_32xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-pico_3rdparty-fcmae_in1k_20230104-147b1b59.pth) | +| `convnext-v2-nano_3rdparty-fcmae_in1k`\* | 15.62 | 2.45 | [config](convnext-v2-nano_32xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-nano_3rdparty-fcmae_in1k_20230104-3dd1f29e.pth) | +| `convnext-v2-tiny_3rdparty-fcmae_in1k`\* | 28.64 | 4.47 | [config](convnext-v2-tiny_32xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-tiny_3rdparty-fcmae_in1k_20230104-80513adc.pth) | +| `convnext-v2-base_3rdparty-fcmae_in1k`\* | 88.72 | 15.38 | [config](convnext-v2-base_32xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-base_3rdparty-fcmae_in1k_20230104-8a798eaf.pth) | +| `convnext-v2-large_3rdparty-fcmae_in1k`\* | 197.96 | 34.40 | [config](convnext-v2-large_32xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-large_3rdparty-fcmae_in1k_20230104-bf38df92.pth) | +| `convnext-v2-huge_3rdparty-fcmae_in1k`\* | 660.29 | 115.00 | [config](convnext-v2-huge_32xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-huge_3rdparty-fcmae_in1k_20230104-fe43ae6c.pth) | + +*Models with * are converted from the [official repo](https://github.com/facebookresearch/ConvNeXt-V2). The config files of these models are only for inference. We haven't reproduce the training results.* + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :---------------------------------------------- | :----------------: | :--------: | :-------: | :-------: | :-------: | :----------------------------------------------: | :------------------------------------------------: | +| `convnext-v2-atto_fcmae-pre_3rdparty_in1k`\* | FCMAE | 3.71 | 0.55 | 76.64 | 93.04 | [config](convnext-v2-atto_32xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-atto_fcmae-pre_3rdparty_in1k_20230104-23765f83.pth) | +| `convnext-v2-femto_fcmae-pre_3rdparty_in1k`\* | FCMAE | 5.23 | 0.78 | 78.48 | 93.98 | [config](convnext-v2-femto_32xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-femto_fcmae-pre_3rdparty_in1k_20230104-92a75d75.pth) | +| `convnext-v2-pico_fcmae-pre_3rdparty_in1k`\* | FCMAE | 9.07 | 1.37 | 80.31 | 95.08 | [config](convnext-v2-pico_32xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-pico_fcmae-pre_3rdparty_in1k_20230104-d20263ca.pth) | +| `convnext-v2-nano_fcmae-pre_3rdparty_in1k`\* | FCMAE | 15.62 | 2.45 | 81.86 | 95.75 | [config](convnext-v2-nano_32xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-nano_fcmae-pre_3rdparty_in1k_20230104-fe1aaaf2.pth) | +| `convnext-v2-nano_fcmae-in21k-pre_3rdparty_in1k`\* | FCMAE ImageNet-21k | 15.62 | 2.45 | 82.04 | 96.16 | [config](convnext-v2-nano_32xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-nano_fcmae-in21k-pre_3rdparty_in1k_20230104-91fa8ae2.pth) | +| `convnext-v2-tiny_fcmae-pre_3rdparty_in1k`\* | FCMAE | 28.64 | 4.47 | 82.94 | 96.29 | [config](convnext-v2-tiny_32xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-tiny_fcmae-pre_3rdparty_in1k_20230104-471a86de.pth) | +| `convnext-v2-tiny_fcmae-in21k-pre_3rdparty_in1k`\* | FCMAE ImageNet-21k | 28.64 | 4.47 | 83.89 | 96.96 | [config](convnext-v2-tiny_32xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-tiny_fcmae-in21k-pre_3rdparty_in1k_20230104-8cc8b8f2.pth) | +| `convnext-v2-nano_fcmae-in21k-pre_3rdparty_in1k-384px`\* | FCMAE ImageNet-21k | 15.62 | 7.21 | 83.36 | 96.75 | [config](convnext-v2-nano_32xb32_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-nano_fcmae-in21k-pre_3rdparty_in1k-384px_20230104-f951ae87.pth) | +| `convnext-v2-tiny_fcmae-in21k-pre_3rdparty_in1k-384px`\* | FCMAE ImageNet-21k | 28.64 | 13.14 | 85.09 | 97.63 | [config](convnext-v2-tiny_32xb32_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-tiny_fcmae-in21k-pre_3rdparty_in1k-384px_20230104-d8579f84.pth) | +| `convnext-v2-base_fcmae-pre_3rdparty_in1k`\* | FCMAE | 88.72 | 15.38 | 84.87 | 97.08 | [config](convnext-v2-base_32xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-base_fcmae-pre_3rdparty_in1k_20230104-00a70fa4.pth) | +| `convnext-v2-base_fcmae-in21k-pre_3rdparty_in1k`\* | FCMAE ImageNet-21k | 88.72 | 15.38 | 86.74 | 98.02 | [config](convnext-v2-base_32xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-base_fcmae-in21k-pre_3rdparty_in1k_20230104-c48d16a5.pth) | +| `convnext-v2-large_fcmae-pre_3rdparty_in1k`\* | FCMAE | 197.96 | 34.40 | 85.76 | 97.59 | [config](convnext-v2-large_32xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-large_fcmae-pre_3rdparty_in1k_20230104-ef393013.pth) | +| `convnext-v2-large_fcmae-in21k-pre_3rdparty_in1k`\* | FCMAE ImageNet-21k | 197.96 | 34.40 | 87.26 | 98.24 | [config](convnext-v2-large_32xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-large_fcmae-in21k-pre_3rdparty_in1k_20230104-d9c4dc0c.pth) | +| `convnext-v2-base_fcmae-in21k-pre_3rdparty_in1k-384px`\* | FCMAE ImageNet-21k | 88.72 | 45.21 | 87.63 | 98.42 | [config](convnext-v2-base_32xb32_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-base_fcmae-in21k-pre_3rdparty_in1k-384px_20230104-379425cc.pth) | +| `convnext-v2-large_fcmae-in21k-pre_3rdparty_in1k-384px`\* | FCMAE ImageNet-21k | 197.96 | 101.10 | 88.18 | 98.52 | [config](convnext-v2-large_32xb32_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-large_fcmae-in21k-pre_3rdparty_in1k-384px_20230104-9139a1f3.pth) | +| `convnext-v2-huge_fcmae-pre_3rdparty_in1k`\* | FCMAE | 660.29 | 115.00 | 86.25 | 97.75 | [config](convnext-v2-huge_32xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-huge_fcmae-pre_3rdparty_in1k_20230104-f795e5b8.pth) | +| `convnext-v2-huge_fcmae-in21k-pre_3rdparty_in1k-384px`\* | FCMAE ImageNet-21k | 660.29 | 337.96 | 88.68 | 98.73 | [config](convnext-v2-huge_32xb32_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-huge_fcmae-in21k-pre_3rdparty_in1k-384px_20230104-02a4eb35.pth) | +| `convnext-v2-huge_fcmae-in21k-pre_3rdparty_in1k-512px`\* | FCMAE ImageNet-21k | 660.29 | 600.81 | 88.86 | 98.74 | [config](convnext-v2-huge_32xb32_in1k-512px.py) | [model](https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-huge_fcmae-in21k-pre_3rdparty_in1k-512px_20230104-ce32e63c.pth) | + +*Models with * are converted from the [official repo](https://github.com/facebookresearch/ConvNeXt-V2). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@article{Woo2023ConvNeXtV2, + title={ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders}, + author={Sanghyun Woo, Shoubhik Debnath, Ronghang Hu, Xinlei Chen, Zhuang Liu, In So Kweon and Saining Xie}, + year={2023}, + journal={arXiv preprint arXiv:2301.00808}, +} +``` diff --git a/configs/convnext_v2/convnext-v2-atto_32xb32_in1k.py b/configs/convnext_v2/convnext-v2-atto_32xb32_in1k.py new file mode 100644 index 0000000..68f34c9 --- /dev/null +++ b/configs/convnext_v2/convnext-v2-atto_32xb32_in1k.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/models/convnext_v2/atto.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# dataset setting +train_dataloader = dict(batch_size=32) + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=8e-4, weight_decay=0.3), + clip_grad=None, +) + +# learning policy +param_scheduler = [dict(type='CosineAnnealingLR', eta_min=1e-5, by_epoch=True)] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=600, val_interval=1) + +# runtime setting +custom_hooks = [dict(type='EMAHook', momentum=1e-4, priority='ABOVE_NORMAL')] diff --git a/configs/convnext_v2/convnext-v2-base_32xb32_in1k-384px.py b/configs/convnext_v2/convnext-v2-base_32xb32_in1k-384px.py new file mode 100644 index 0000000..70b7f18 --- /dev/null +++ b/configs/convnext_v2/convnext-v2-base_32xb32_in1k-384px.py @@ -0,0 +1,35 @@ +_base_ = [ + '../_base_/models/convnext_v2/base.py', + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# dataset setting +train_dataloader = dict(batch_size=32) + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=2.5e-3), + clip_grad=None, +) + +# learning policy +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=1e-3, + by_epoch=True, + end=20, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict(type='CosineAnnealingLR', eta_min=1e-5, by_epoch=True, begin=20) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=100, val_interval=1) + +# runtime setting +custom_hooks = [dict(type='EMAHook', momentum=1e-4, priority='ABOVE_NORMAL')] diff --git a/configs/convnext_v2/convnext-v2-base_32xb32_in1k.py b/configs/convnext_v2/convnext-v2-base_32xb32_in1k.py new file mode 100644 index 0000000..b66b375 --- /dev/null +++ b/configs/convnext_v2/convnext-v2-base_32xb32_in1k.py @@ -0,0 +1,35 @@ +_base_ = [ + '../_base_/models/convnext_v2/base.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# dataset setting +train_dataloader = dict(batch_size=32) + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=2.5e-3), + clip_grad=None, +) + +# learning policy +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=1e-3, + by_epoch=True, + end=20, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict(type='CosineAnnealingLR', eta_min=1e-5, by_epoch=True, begin=20) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=100, val_interval=1) + +# runtime setting +custom_hooks = [dict(type='EMAHook', momentum=1e-4, priority='ABOVE_NORMAL')] diff --git a/configs/convnext_v2/convnext-v2-femto_32xb32_in1k.py b/configs/convnext_v2/convnext-v2-femto_32xb32_in1k.py new file mode 100644 index 0000000..053e194 --- /dev/null +++ b/configs/convnext_v2/convnext-v2-femto_32xb32_in1k.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/models/convnext_v2/femto.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# dataset setting +train_dataloader = dict(batch_size=32) + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=8e-4, weight_decay=0.3), + clip_grad=None, +) + +# learning policy +param_scheduler = [dict(type='CosineAnnealingLR', eta_min=1e-5, by_epoch=True)] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=600, val_interval=1) + +# runtime setting +custom_hooks = [dict(type='EMAHook', momentum=1e-4, priority='ABOVE_NORMAL')] diff --git a/configs/convnext_v2/convnext-v2-huge_32xb32_in1k-384px.py b/configs/convnext_v2/convnext-v2-huge_32xb32_in1k-384px.py new file mode 100644 index 0000000..b734b27 --- /dev/null +++ b/configs/convnext_v2/convnext-v2-huge_32xb32_in1k-384px.py @@ -0,0 +1,35 @@ +_base_ = [ + '../_base_/models/convnext_v2/huge.py', + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# dataset setting +train_dataloader = dict(batch_size=32) + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=2.5e-3), + clip_grad=None, +) + +# learning policy +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=1e-3, + by_epoch=True, + end=20, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict(type='CosineAnnealingLR', eta_min=1e-5, by_epoch=True, begin=20) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=100, val_interval=1) + +# runtime setting +custom_hooks = [dict(type='EMAHook', momentum=1e-4, priority='ABOVE_NORMAL')] diff --git a/configs/convnext_v2/convnext-v2-huge_32xb32_in1k-512px.py b/configs/convnext_v2/convnext-v2-huge_32xb32_in1k-512px.py new file mode 100644 index 0000000..7c63b02 --- /dev/null +++ b/configs/convnext_v2/convnext-v2-huge_32xb32_in1k-512px.py @@ -0,0 +1,54 @@ +_base_ = [ + '../_base_/models/convnext_v2/huge.py', + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# dataset setting +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=512, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=512, backend='pillow', interpolation='bicubic'), + dict(type='PackInputs'), +] + +train_dataloader = dict(batch_size=32, dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=2.5e-3), + clip_grad=None, +) + +# learning policy +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=1e-3, + by_epoch=True, + end=20, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict(type='CosineAnnealingLR', eta_min=1e-5, by_epoch=True, begin=20) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=100, val_interval=1) + +# runtime setting +custom_hooks = [dict(type='EMAHook', momentum=1e-4, priority='ABOVE_NORMAL')] diff --git a/configs/convnext_v2/convnext-v2-huge_32xb32_in1k.py b/configs/convnext_v2/convnext-v2-huge_32xb32_in1k.py new file mode 100644 index 0000000..18621f3 --- /dev/null +++ b/configs/convnext_v2/convnext-v2-huge_32xb32_in1k.py @@ -0,0 +1,35 @@ +_base_ = [ + '../_base_/models/convnext_v2/huge.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# dataset setting +train_dataloader = dict(batch_size=32) + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=2.5e-3), + clip_grad=None, +) + +# learning policy +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=1e-3, + by_epoch=True, + end=20, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict(type='CosineAnnealingLR', eta_min=1e-5, by_epoch=True, begin=20) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=100, val_interval=1) + +# runtime setting +custom_hooks = [dict(type='EMAHook', momentum=1e-4, priority='ABOVE_NORMAL')] diff --git a/configs/convnext_v2/convnext-v2-large_32xb32_in1k-384px.py b/configs/convnext_v2/convnext-v2-large_32xb32_in1k-384px.py new file mode 100644 index 0000000..b08b12e --- /dev/null +++ b/configs/convnext_v2/convnext-v2-large_32xb32_in1k-384px.py @@ -0,0 +1,35 @@ +_base_ = [ + '../_base_/models/convnext_v2/large.py', + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# dataset setting +train_dataloader = dict(batch_size=32) + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=2.5e-3), + clip_grad=None, +) + +# learning policy +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=1e-3, + by_epoch=True, + end=20, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict(type='CosineAnnealingLR', eta_min=1e-5, by_epoch=True, begin=20) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=100, val_interval=1) + +# runtime setting +custom_hooks = [dict(type='EMAHook', momentum=1e-4, priority='ABOVE_NORMAL')] diff --git a/configs/convnext_v2/convnext-v2-large_32xb32_in1k.py b/configs/convnext_v2/convnext-v2-large_32xb32_in1k.py new file mode 100644 index 0000000..e9695d0 --- /dev/null +++ b/configs/convnext_v2/convnext-v2-large_32xb32_in1k.py @@ -0,0 +1,35 @@ +_base_ = [ + '../_base_/models/convnext_v2/large.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# dataset setting +train_dataloader = dict(batch_size=32) + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=2.5e-3), + clip_grad=None, +) + +# learning policy +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=1e-3, + by_epoch=True, + end=20, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict(type='CosineAnnealingLR', eta_min=1e-5, by_epoch=True, begin=20) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=100, val_interval=1) + +# runtime setting +custom_hooks = [dict(type='EMAHook', momentum=1e-4, priority='ABOVE_NORMAL')] diff --git a/configs/convnext_v2/convnext-v2-nano_32xb32_in1k-384px.py b/configs/convnext_v2/convnext-v2-nano_32xb32_in1k-384px.py new file mode 100644 index 0000000..a9b36dc --- /dev/null +++ b/configs/convnext_v2/convnext-v2-nano_32xb32_in1k-384px.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/models/convnext_v2/nano.py', + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# dataset setting +train_dataloader = dict(batch_size=32) + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=8e-4, weight_decay=0.3), + clip_grad=None, +) + +# learning policy +param_scheduler = [dict(type='CosineAnnealingLR', eta_min=1e-5, by_epoch=True)] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=600, val_interval=1) + +# runtime setting +custom_hooks = [dict(type='EMAHook', momentum=1e-4, priority='ABOVE_NORMAL')] diff --git a/configs/convnext_v2/convnext-v2-nano_32xb32_in1k.py b/configs/convnext_v2/convnext-v2-nano_32xb32_in1k.py new file mode 100644 index 0000000..9a7c9e3 --- /dev/null +++ b/configs/convnext_v2/convnext-v2-nano_32xb32_in1k.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/models/convnext_v2/nano.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# dataset setting +train_dataloader = dict(batch_size=32) + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=8e-4, weight_decay=0.3), + clip_grad=None, +) + +# learning policy +param_scheduler = [dict(type='CosineAnnealingLR', eta_min=1e-5, by_epoch=True)] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=600, val_interval=1) + +# runtime setting +custom_hooks = [dict(type='EMAHook', momentum=1e-4, priority='ABOVE_NORMAL')] diff --git a/configs/convnext_v2/convnext-v2-pico_32xb32_in1k.py b/configs/convnext_v2/convnext-v2-pico_32xb32_in1k.py new file mode 100644 index 0000000..e2cc52f --- /dev/null +++ b/configs/convnext_v2/convnext-v2-pico_32xb32_in1k.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/models/convnext_v2/pico.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# dataset setting +train_dataloader = dict(batch_size=32) + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=8e-4, weight_decay=0.3), + clip_grad=None, +) + +# learning policy +param_scheduler = [dict(type='CosineAnnealingLR', eta_min=1e-5, by_epoch=True)] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=600, val_interval=1) + +# runtime setting +custom_hooks = [dict(type='EMAHook', momentum=1e-4, priority='ABOVE_NORMAL')] diff --git a/configs/convnext_v2/convnext-v2-tiny_32xb32_in1k-384px.py b/configs/convnext_v2/convnext-v2-tiny_32xb32_in1k-384px.py new file mode 100644 index 0000000..a19fd6c --- /dev/null +++ b/configs/convnext_v2/convnext-v2-tiny_32xb32_in1k-384px.py @@ -0,0 +1,35 @@ +_base_ = [ + '../_base_/models/convnext_v2/tiny.py', + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# dataset setting +train_dataloader = dict(batch_size=32) + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=3.2e-3), + clip_grad=None, +) + +# learning policy +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=1e-3, + by_epoch=True, + end=40, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict(type='CosineAnnealingLR', eta_min=1e-5, by_epoch=True, begin=40) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=300, val_interval=1) + +# runtime setting +custom_hooks = [dict(type='EMAHook', momentum=1e-4, priority='ABOVE_NORMAL')] diff --git a/configs/convnext_v2/convnext-v2-tiny_32xb32_in1k.py b/configs/convnext_v2/convnext-v2-tiny_32xb32_in1k.py new file mode 100644 index 0000000..c6fbd0f --- /dev/null +++ b/configs/convnext_v2/convnext-v2-tiny_32xb32_in1k.py @@ -0,0 +1,35 @@ +_base_ = [ + '../_base_/models/convnext_v2/tiny.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# dataset setting +train_dataloader = dict(batch_size=32) + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=3.2e-3), + clip_grad=None, +) + +# learning policy +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=1e-3, + by_epoch=True, + end=40, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict(type='CosineAnnealingLR', eta_min=1e-5, by_epoch=True, begin=40) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=300, val_interval=1) + +# runtime setting +custom_hooks = [dict(type='EMAHook', momentum=1e-4, priority='ABOVE_NORMAL')] diff --git a/configs/convnext_v2/metafile.yml b/configs/convnext_v2/metafile.yml new file mode 100644 index 0000000..86baa58 --- /dev/null +++ b/configs/convnext_v2/metafile.yml @@ -0,0 +1,433 @@ +Collections: + - Name: ConvNeXt V2 + Metadata: + Architecture: + - Global Response Normalization + Paper: + Title: Co-designing and Scaling ConvNets with Masked Autoencoders + URL: http://arxiv.org/abs/2301.00808 + README: configs/convnext_v2/README.md + +Models: + - Name: convnext-v2-atto_3rdparty-fcmae_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 551718080 + Parameters: 3708400 + In Collection: ConvNeXt V2 + Results: null + Weights: https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-atto_3rdparty-fcmae_in1k_20230104-07514db4.pth + Config: configs/convnext_v2/convnext-v2-atto_32xb32_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_atto_1k_224_fcmae.pt + Code: https://github.com/facebookresearch/ConvNeXt-V2 + - Name: convnext-v2-atto_fcmae-pre_3rdparty_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 551718080 + Parameters: 3708400 + In Collection: ConvNeXt V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 76.64 + Top 5 Accuracy: 93.04 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-atto_fcmae-pre_3rdparty_in1k_20230104-23765f83.pth + Config: configs/convnext_v2/convnext-v2-atto_32xb32_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_atto_1k_224_ema.pt + Code: https://github.com/facebookresearch/ConvNeXt-V2 + - Name: convnext-v2-femto_3rdparty-fcmae_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 784892544 + Parameters: 5233240 + In Collection: ConvNeXt V2 + Results: null + Weights: https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-femto_3rdparty-fcmae_in1k_20230104-adbe2082.pth + Config: configs/convnext_v2/convnext-v2-femto_32xb32_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_femto_1k_224_fcmae.pt + Code: https://github.com/facebookresearch/ConvNeXt-V2 + - Name: convnext-v2-femto_fcmae-pre_3rdparty_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 784892544 + Parameters: 5233240 + In Collection: ConvNeXt V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.48 + Top 5 Accuracy: 93.98 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-femto_fcmae-pre_3rdparty_in1k_20230104-92a75d75.pth + Config: configs/convnext_v2/convnext-v2-femto_32xb32_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_femto_1k_224_ema.pt + Code: https://github.com/facebookresearch/ConvNeXt-V2 + - Name: convnext-v2-pico_3rdparty-fcmae_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 1374072320 + Parameters: 9066280 + In Collection: ConvNeXt V2 + Results: null + Weights: https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-pico_3rdparty-fcmae_in1k_20230104-147b1b59.pth + Config: configs/convnext_v2/convnext-v2-pico_32xb32_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_pico_1k_224_fcmae.pt + Code: https://github.com/facebookresearch/ConvNeXt-V2 + - Name: convnext-v2-pico_fcmae-pre_3rdparty_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 1374072320 + Parameters: 9066280 + In Collection: ConvNeXt V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 80.31 + Top 5 Accuracy: 95.08 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-pico_fcmae-pre_3rdparty_in1k_20230104-d20263ca.pth + Config: configs/convnext_v2/convnext-v2-pico_32xb32_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_pico_1k_224_ema.pt + Code: https://github.com/facebookresearch/ConvNeXt-V2 + - Name: convnext-v2-nano_3rdparty-fcmae_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 2454926720 + Parameters: 15623800 + In Collection: ConvNeXt V2 + Results: null + Weights: https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-nano_3rdparty-fcmae_in1k_20230104-3dd1f29e.pth + Config: configs/convnext_v2/convnext-v2-nano_32xb32_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_nano_1k_224_fcmae.pt + Code: https://github.com/facebookresearch/ConvNeXt-V2 + - Name: convnext-v2-nano_fcmae-pre_3rdparty_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 2454926720 + Parameters: 15623800 + In Collection: ConvNeXt V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.86 + Top 5 Accuracy: 95.75 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-nano_fcmae-pre_3rdparty_in1k_20230104-fe1aaaf2.pth + Config: configs/convnext_v2/convnext-v2-nano_32xb32_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_nano_1k_224_ema.pt + Code: https://github.com/facebookresearch/ConvNeXt-V2 + - Name: convnext-v2-nano_fcmae-in21k-pre_3rdparty_in1k + Metadata: + Training Data: + - ImageNet-21k + - ImageNet-1k + FLOPs: 2454926720 + Parameters: 15623800 + In Collection: ConvNeXt V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.04 + Top 5 Accuracy: 96.16 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-nano_fcmae-in21k-pre_3rdparty_in1k_20230104-91fa8ae2.pth + Config: configs/convnext_v2/convnext-v2-nano_32xb32_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_nano_22k_224_ema.pt + Code: https://github.com/facebookresearch/ConvNeXt-V2 + - Name: convnext-v2-tiny_3rdparty-fcmae_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 4469631744 + Parameters: 28635496 + In Collection: ConvNeXt V2 + Results: null + Weights: https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-tiny_3rdparty-fcmae_in1k_20230104-80513adc.pth + Config: configs/convnext_v2/convnext-v2-tiny_32xb32_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_tiny_1k_224_fcmae.pt + Code: https://github.com/facebookresearch/ConvNeXt-V2 + - Name: convnext-v2-tiny_fcmae-pre_3rdparty_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 4469631744 + Parameters: 28635496 + In Collection: ConvNeXt V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.94 + Top 5 Accuracy: 96.29 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-tiny_fcmae-pre_3rdparty_in1k_20230104-471a86de.pth + Config: configs/convnext_v2/convnext-v2-tiny_32xb32_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_tiny_1k_224_ema.pt + Code: https://github.com/facebookresearch/ConvNeXt-V2 + - Name: convnext-v2-tiny_fcmae-in21k-pre_3rdparty_in1k + Metadata: + Training Data: + - ImageNet-21k + - ImageNet-1k + FLOPs: 4469631744 + Parameters: 28635496 + In Collection: ConvNeXt V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.89 + Top 5 Accuracy: 96.96 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-tiny_fcmae-in21k-pre_3rdparty_in1k_20230104-8cc8b8f2.pth + Config: configs/convnext_v2/convnext-v2-tiny_32xb32_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_tiny_22k_224_ema.pt + Code: https://github.com/facebookresearch/ConvNeXt-V2 + - Name: convnext-v2-nano_fcmae-in21k-pre_3rdparty_in1k-384px + Metadata: + Training Data: + - ImageNet-21k + - ImageNet-1k + FLOPs: 7214472320 + Parameters: 15623800 + In Collection: ConvNeXt V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.36 + Top 5 Accuracy: 96.75 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-nano_fcmae-in21k-pre_3rdparty_in1k-384px_20230104-f951ae87.pth + Config: configs/convnext_v2/convnext-v2-nano_32xb32_in1k-384px.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_nano_22k_384_ema.pt + Code: https://github.com/facebookresearch/ConvNeXt-V2 + - Name: convnext-v2-tiny_fcmae-in21k-pre_3rdparty_in1k-384px + Metadata: + Training Data: + - ImageNet-21k + - ImageNet-1k + FLOPs: 13135236864 + Parameters: 28635496 + In Collection: ConvNeXt V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.09 + Top 5 Accuracy: 97.63 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-tiny_fcmae-in21k-pre_3rdparty_in1k-384px_20230104-d8579f84.pth + Config: configs/convnext_v2/convnext-v2-tiny_32xb32_in1k-384px.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_tiny_22k_384_ema.pt + Code: https://github.com/facebookresearch/ConvNeXt-V2 + - Name: convnext-v2-base_3rdparty-fcmae_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 15382561792 + Parameters: 88717800 + In Collection: ConvNeXt V2 + Results: null + Weights: https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-base_3rdparty-fcmae_in1k_20230104-8a798eaf.pth + Config: configs/convnext_v2/convnext-v2-base_32xb32_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_base_1k_224_fcmae.pt + Code: https://github.com/facebookresearch/ConvNeXt-V2 + - Name: convnext-v2-base_fcmae-pre_3rdparty_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 15382561792 + Parameters: 88717800 + In Collection: ConvNeXt V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.87 + Top 5 Accuracy: 97.08 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-base_fcmae-pre_3rdparty_in1k_20230104-00a70fa4.pth + Config: configs/convnext_v2/convnext-v2-base_32xb32_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_base_1k_224_ema.pt + Code: https://github.com/facebookresearch/ConvNeXt-V2 + - Name: convnext-v2-base_fcmae-in21k-pre_3rdparty_in1k + Metadata: + Training Data: + - ImageNet-21k + - ImageNet-1k + FLOPs: 15382561792 + Parameters: 88717800 + In Collection: ConvNeXt V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 86.74 + Top 5 Accuracy: 98.02 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-base_fcmae-in21k-pre_3rdparty_in1k_20230104-c48d16a5.pth + Config: configs/convnext_v2/convnext-v2-base_32xb32_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_base_22k_224_ema.pt + Code: https://github.com/facebookresearch/ConvNeXt-V2 + - Name: convnext-v2-large_3rdparty-fcmae_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 34403182080 + Parameters: 197956840 + In Collection: ConvNeXt V2 + Results: null + Weights: https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-large_3rdparty-fcmae_in1k_20230104-bf38df92.pth + Config: configs/convnext_v2/convnext-v2-large_32xb32_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_large_1k_224_fcmae.pt + Code: https://github.com/facebookresearch/ConvNeXt-V2 + - Name: convnext-v2-large_fcmae-pre_3rdparty_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 34403182080 + Parameters: 197956840 + In Collection: ConvNeXt V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.76 + Top 5 Accuracy: 97.59 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-large_fcmae-pre_3rdparty_in1k_20230104-ef393013.pth + Config: configs/convnext_v2/convnext-v2-large_32xb32_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_large_1k_224_ema.pt + Code: https://github.com/facebookresearch/ConvNeXt-V2 + - Name: convnext-v2-large_fcmae-in21k-pre_3rdparty_in1k + Metadata: + Training Data: + - ImageNet-21k + - ImageNet-1k + FLOPs: 34403182080 + Parameters: 197956840 + In Collection: ConvNeXt V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 87.26 + Top 5 Accuracy: 98.24 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-large_fcmae-in21k-pre_3rdparty_in1k_20230104-d9c4dc0c.pth + Config: configs/convnext_v2/convnext-v2-large_32xb32_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_large_22k_224_ema.pt + Code: https://github.com/facebookresearch/ConvNeXt-V2 + - Name: convnext-v2-base_fcmae-in21k-pre_3rdparty_in1k-384px + Metadata: + Training Data: + - ImageNet-21k + - ImageNet-1k + FLOPs: 45205885952 + Parameters: 88717800 + In Collection: ConvNeXt V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 87.63 + Top 5 Accuracy: 98.42 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-base_fcmae-in21k-pre_3rdparty_in1k-384px_20230104-379425cc.pth + Config: configs/convnext_v2/convnext-v2-base_32xb32_in1k-384px.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_base_22k_384_ema.pt + Code: https://github.com/facebookresearch/ConvNeXt-V2 + - Name: convnext-v2-large_fcmae-in21k-pre_3rdparty_in1k-384px + Metadata: + Training Data: + - ImageNet-21k + - ImageNet-1k + FLOPs: 101103214080 + Parameters: 197956840 + In Collection: ConvNeXt V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 88.18 + Top 5 Accuracy: 98.52 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-large_fcmae-in21k-pre_3rdparty_in1k-384px_20230104-9139a1f3.pth + Config: configs/convnext_v2/convnext-v2-large_32xb32_in1k-384px.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_large_22k_384_ema.pt + Code: https://github.com/facebookresearch/ConvNeXt-V2 + - Name: convnext-v2-huge_3rdparty-fcmae_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 114998639360 + Parameters: 660289640 + In Collection: ConvNeXt V2 + Results: null + Weights: https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-huge_3rdparty-fcmae_in1k_20230104-fe43ae6c.pth + Config: configs/convnext_v2/convnext-v2-huge_32xb32_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnextv2/pt_only/convnextv2_huge_1k_224_fcmae.pt + Code: https://github.com/facebookresearch/ConvNeXt-V2 + - Name: convnext-v2-huge_fcmae-pre_3rdparty_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 114998639360 + Parameters: 660289640 + In Collection: ConvNeXt V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 86.25 + Top 5 Accuracy: 97.75 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-huge_fcmae-pre_3rdparty_in1k_20230104-f795e5b8.pth + Config: configs/convnext_v2/convnext-v2-huge_32xb32_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnextv2/im1k/convnextv2_huge_1k_224_ema.pt + Code: https://github.com/facebookresearch/ConvNeXt-V2 + - Name: convnext-v2-huge_fcmae-in21k-pre_3rdparty_in1k-384px + Metadata: + Training Data: + - ImageNet-21k + - ImageNet-1k + FLOPs: 337955157760 + Parameters: 660289640 + In Collection: ConvNeXt V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 88.68 + Top 5 Accuracy: 98.73 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-huge_fcmae-in21k-pre_3rdparty_in1k-384px_20230104-02a4eb35.pth + Config: configs/convnext_v2/convnext-v2-huge_32xb32_in1k-384px.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_huge_22k_384_ema.pt + Code: https://github.com/facebookresearch/ConvNeXt-V2 + - Name: convnext-v2-huge_fcmae-in21k-pre_3rdparty_in1k-512px + Metadata: + Training Data: + - ImageNet-21k + - ImageNet-1k + FLOPs: 600809158400 + Parameters: 660289640 + In Collection: ConvNeXt V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 88.86 + Top 5 Accuracy: 98.74 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/convnext-v2/convnext-v2-huge_fcmae-in21k-pre_3rdparty_in1k-512px_20230104-ce32e63c.pth + Config: configs/convnext_v2/convnext-v2-huge_32xb32_in1k-512px.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/convnext/convnextv2/im22k/convnextv2_huge_22k_512_ema.pt + Code: https://github.com/facebookresearch/ConvNeXt-V2 diff --git a/configs/cspnet/README.md b/configs/cspnet/README.md new file mode 100644 index 0000000..f3b145b --- /dev/null +++ b/configs/cspnet/README.md @@ -0,0 +1,78 @@ +# CSPNet + +> [CSPNet: A New Backbone that can Enhance Learning Capability of CNN](https://arxiv.org/abs/1911.11929) + + + +## Abstract + +Neural networks have enabled state-of-the-art approaches to achieve incredible results on computer vision tasks such as object detection. However, such success greatly relies on costly computation resources, which hinders people with cheap devices from appreciating the advanced technology. In this paper, we propose Cross Stage Partial Network (CSPNet) to mitigate the problem that previous works require heavy inference computations from the network architecture perspective. We attribute the problem to the duplicate gradient information within network optimization. The proposed networks respect the variability of the gradients by integrating feature maps from the beginning and the end of a network stage, which, in our experiments, reduces computations by 20% with equivalent or even superior accuracy on the ImageNet dataset, and significantly outperforms state-of-the-art approaches in terms of AP50 on the MS COCO object detection dataset. The CSPNet is easy to implement and general enough to cope with architectures based on ResNet, ResNeXt, and DenseNet. Source code is at this https URL. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('cspdarknet50_3rdparty_8xb32_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('cspdarknet50_3rdparty_8xb32_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/cspnet/cspdarknet50_8xb32_in1k.py https://download.openmmlab.com/mmclassification/v0/cspnet/cspdarknet50_3rdparty_8xb32_in1k_20220329-bd275287.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :----------------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :----------------------------------: | :-----------------------------------------------------------------------------: | +| `cspdarknet50_3rdparty_8xb32_in1k`\* | From scratch | 27.64 | 5.04 | 80.05 | 95.07 | [config](cspdarknet50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/cspnet/cspdarknet50_3rdparty_8xb32_in1k_20220329-bd275287.pth) | +| `cspresnet50_3rdparty_8xb32_in1k`\* | From scratch | 21.62 | 3.48 | 79.55 | 94.68 | [config](cspresnet50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/cspnet/cspresnet50_3rdparty_8xb32_in1k_20220329-dd6dddfb.pth) | +| `cspresnext50_3rdparty_8xb32_in1k`\* | From scratch | 20.57 | 3.11 | 79.96 | 94.96 | [config](cspresnext50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/cspnet/cspresnext50_3rdparty_8xb32_in1k_20220329-2cc84d21.pth) | + +*Models with * are converted from the [official repo](https://github.com/rwightman/pytorch-image-models). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@inproceedings{wang2020cspnet, + title={CSPNet: A new backbone that can enhance learning capability of CNN}, + author={Wang, Chien-Yao and Liao, Hong-Yuan Mark and Wu, Yueh-Hua and Chen, Ping-Yang and Hsieh, Jun-Wei and Yeh, I-Hau}, + booktitle={Proceedings of the IEEE/CVF conference on computer vision and pattern recognition workshops}, + pages={390--391}, + year={2020} +} +``` diff --git a/configs/cspnet/cspdarknet50_8xb32_in1k.py b/configs/cspnet/cspdarknet50_8xb32_in1k.py new file mode 100644 index 0000000..8511481 --- /dev/null +++ b/configs/cspnet/cspdarknet50_8xb32_in1k.py @@ -0,0 +1,45 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='CSPDarkNet', depth=53), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=288, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=256), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/cspnet/cspresnet50_8xb32_in1k.py b/configs/cspnet/cspresnet50_8xb32_in1k.py new file mode 100644 index 0000000..d149637 --- /dev/null +++ b/configs/cspnet/cspresnet50_8xb32_in1k.py @@ -0,0 +1,45 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='CSPResNet', depth=50), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=288, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=256), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/cspnet/cspresnext50_8xb32_in1k.py b/configs/cspnet/cspresnext50_8xb32_in1k.py new file mode 100644 index 0000000..1f8c15c --- /dev/null +++ b/configs/cspnet/cspresnext50_8xb32_in1k.py @@ -0,0 +1,45 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='CSPResNeXt', depth=50), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=256, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/cspnet/metafile.yml b/configs/cspnet/metafile.yml new file mode 100644 index 0000000..3103632 --- /dev/null +++ b/configs/cspnet/metafile.yml @@ -0,0 +1,64 @@ +Collections: + - Name: CSPNet + Metadata: + Training Data: ImageNet-1k + Architecture: + - Cross Stage Partia Stage + Paper: + URL: https://arxiv.org/abs/1911.11929 + Title: 'CSPNet: A New Backbone that can Enhance Learning Capability of CNN' + README: configs/cspnet/README.md + Code: + Version: v0.22.0 + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.22.0/mmcls/models/backbones/cspnet.py + +Models: + - Name: cspdarknet50_3rdparty_8xb32_in1k + Metadata: + FLOPs: 5040000000 + Parameters: 27640000 + In Collection: CSPNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 80.05 + Top 5 Accuracy: 95.07 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/cspnet/cspdarknet50_3rdparty_8xb32_in1k_20220329-bd275287.pth + Config: configs/cspnet/cspdarknet50_8xb32_in1k.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspdarknet53_ra_256-d05c7c21.pth + Code: https://github.com/rwightman/pytorch-image-models + - Name: cspresnet50_3rdparty_8xb32_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 3480000000 + Parameters: 21620000 + In Collection: CSPNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.55 + Top 5 Accuracy: 94.68 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/cspnet/cspresnet50_3rdparty_8xb32_in1k_20220329-dd6dddfb.pth + Config: configs/cspnet/cspresnet50_8xb32_in1k.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnet50_ra-d3e8d487.pth + Code: https://github.com/rwightman/pytorch-image-models + - Name: cspresnext50_3rdparty_8xb32_in1k + Metadata: + FLOPs: 3110000000 + Parameters: 20570000 + In Collection: CSPNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.96 + Top 5 Accuracy: 94.96 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/cspnet/cspresnext50_3rdparty_8xb32_in1k_20220329-2cc84d21.pth + Config: configs/cspnet/cspresnext50_8xb32_in1k.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/cspresnext50_ra_224-648b4713.pth + Code: https://github.com/rwightman/pytorch-image-models diff --git a/configs/csra/README.md b/configs/csra/README.md new file mode 100644 index 0000000..99b2957 --- /dev/null +++ b/configs/csra/README.md @@ -0,0 +1,73 @@ +# CSRA + +> [Residual Attention: A Simple but Effective Method for Multi-Label Recognition](https://arxiv.org/abs/2108.02456) + + + +## Abstract + +Multi-label image recognition is a challenging computer vision task of practical use. Progresses in this area, however, are often characterized by complicated methods, heavy computations, and lack of intuitive explanations. To effectively capture different spatial regions occupied by objects from different categories, we propose an embarrassingly simple module, named class-specific residual attention (CSRA). CSRA generates class-specific features for every category by proposing a simple spatial attention score, and then combines it with the class-agnostic average pooling feature. CSRA achieves state-of-the-art results on multilabel recognition, and at the same time is much simpler than them. Furthermore, with only 4 lines of code, CSRA also leads to consistent improvement across many diverse pretrained models and datasets without any extra training. CSRA is both easy to implement and light in computations, which also enjoys intuitive explanations and visualizations. + +
+ +
+ +## How to use it? + + + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('resnet101-csra_1xb16_voc07-448px', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/csra/resnet101-csra_1xb16_voc07-448px.py +``` + +Test: + +```shell +python tools/test.py configs/csra/resnet101-csra_1xb16_voc07-448px.py https://download.openmmlab.com/mmclassification/v0/csra/resnet101-csra_1xb16_voc07-448px_20220722-29efb40a.pth +``` + + + +## Models and results + +### Multi-Label Classification on PASCAL VOC 2007 + +| Model | Pretrain | Params (M) | Flops (G) | CF1 | OF1 | mAP | Config | Download | +| :--------------------------------- | :----------: | :--------: | :-------: | :---: | :---: | :---: | :-------------------------------------------: | :-------------------------------------------------------------------------: | +| `resnet101-csra_1xb16_voc07-448px` | From scratch | 23.55 | 4.12 | 89.16 | 90.80 | 94.98 | [config](resnet101-csra_1xb16_voc07-448px.py) | [model](https://download.openmmlab.com/mmclassification/v0/csra/resnet101-csra_1xb16_voc07-448px_20220722-29efb40a.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/csra/resnet101-csra_1xb16_voc07-448px_20220722-29efb40a.json) | + +## Citation + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2108.02456, + doi = {10.48550/ARXIV.2108.02456}, + url = {https://arxiv.org/abs/2108.02456}, + author = {Zhu, Ke and Wu, Jianxin}, + keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {Residual Attention: A Simple but Effective Method for Multi-Label Recognition}, + publisher = {arXiv}, + year = {2021}, + copyright = {arXiv.org perpetual, non-exclusive license} +} +``` diff --git a/configs/csra/metafile.yml b/configs/csra/metafile.yml new file mode 100644 index 0000000..112f50c --- /dev/null +++ b/configs/csra/metafile.yml @@ -0,0 +1,29 @@ +Collections: + - Name: CSRA + Metadata: + Training Data: PASCAL VOC 2007 + Architecture: + - Class-specific Residual Attention + Paper: + URL: https://arxiv.org/abs/2108.02456 + Title: 'Residual Attention: A Simple but Effective Method for Multi-Label Recognition' + README: configs/csra/README.md + Code: + Version: v0.24.0 + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.24.0/mmcls/models/heads/multi_label_csra_head.py + +Models: + - Name: resnet101-csra_1xb16_voc07-448px + Metadata: + FLOPs: 4120000000 + Parameters: 23550000 + In Collection: CSRA + Results: + - Dataset: PASCAL VOC 2007 + Metrics: + mAP: 94.98 + OF1: 90.80 + CF1: 89.16 + Task: Multi-Label Classification + Weights: https://download.openmmlab.com/mmclassification/v0/csra/resnet101-csra_1xb16_voc07-448px_20220722-29efb40a.pth + Config: configs/csra/resnet101-csra_1xb16_voc07-448px.py diff --git a/configs/csra/resnet101-csra_1xb16_voc07-448px.py b/configs/csra/resnet101-csra_1xb16_voc07-448px.py new file mode 100644 index 0000000..85135ae --- /dev/null +++ b/configs/csra/resnet101-csra_1xb16_voc07-448px.py @@ -0,0 +1,78 @@ +_base_ = ['../_base_/datasets/voc_bs16.py', '../_base_/default_runtime.py'] + +# Pre-trained Checkpoint Path +checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_8xb32_in1k_20210831-539c63f8.pth' # noqa +# If you want to use the pre-trained weight of ResNet101-CutMix from the +# originary repo(https://github.com/Kevinz-code/CSRA). Script of +# 'tools/model_converters/torchvision_to_mmpretrain.py' can help you convert +# weight into mmpretrain format. The mAP result would hit 95.5 by using the +# weight. checkpoint = 'PATH/TO/PRE-TRAINED_WEIGHT' + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=101, + num_stages=4, + out_indices=(3, ), + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint, prefix='backbone')), + neck=None, + head=dict( + type='CSRAClsHead', + num_classes=20, + in_channels=2048, + num_heads=1, + lam=0.1, + loss=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0))) + +# dataset setting +data_preprocessor = dict( + # RGB format normalization parameters + mean=[0, 0, 0], + std=[255, 255, 255]) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=448, crop_ratio_range=(0.7, 1.0)), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=448), + dict( + type='PackInputs', + # `gt_label_difficult` is needed for VOC evaluation + meta_keys=('sample_idx', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction', + 'gt_label_difficult')), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# optimizer +# the lr of classifier.head is 10 * base_lr, which help convergence. +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.0002, momentum=0.9, weight_decay=0.0001), + paramwise_cfg=dict(custom_keys={'head': dict(lr_mult=10)})) + +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-7, + by_epoch=True, + begin=0, + end=1, + convert_to_iter_based=True), + dict(type='StepLR', by_epoch=True, step_size=6, gamma=0.1) +] + +train_cfg = dict(by_epoch=True, max_epochs=20, val_interval=1) +val_cfg = dict() +test_cfg = dict() diff --git a/configs/davit/README.md b/configs/davit/README.md new file mode 100644 index 0000000..1be19d9 --- /dev/null +++ b/configs/davit/README.md @@ -0,0 +1,77 @@ +# DaViT + +> [DaViT: Dual Attention Vision Transformers](https://arxiv.org/abs/2204.03645v1) + + + +## Abstract + +In this work, we introduce Dual Attention Vision Transformers (DaViT), a simple yet effective vision transformer architecture that is able to capture global context while maintaining computational efficiency. We propose approaching the problem from an orthogonal angle: exploiting self-attention mechanisms with both "spatial tokens" and "channel tokens". With spatial tokens, the spatial dimension defines the token scope, and the channel dimension defines the token feature dimension. With channel tokens, we have the inverse: the channel dimension defines the token scope, and the spatial dimension defines the token feature dimension. We further group tokens along the sequence direction for both spatial and channel tokens to maintain the linear complexity of the entire model. We show that these two self-attentions complement each other: (i) since each channel token contains an abstract representation of the entire image, the channel attention naturally captures global interactions and representations by taking all spatial positions into account when computing attention scores between channels; (ii) the spatial attention refines the local representations by performing fine-grained interactions across spatial locations, which in turn helps the global information modeling in channel attention. Extensive experiments show our DaViT achieves state-of-the-art performance on four different tasks with efficient computations. Without extra data, DaViT-Tiny, DaViT-Small, and DaViT-Base achieve 82.8%, 84.2%, and 84.6% top-1 accuracy on ImageNet-1K with 28.3M, 49.7M, and 87.9M parameters, respectively. When we further scale up DaViT with 1.5B weakly supervised image and text pairs, DaViT-Gaint reaches 90.4% top-1 accuracy on ImageNet-1K. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('davit-tiny_3rdparty_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('davit-tiny_3rdparty_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/davit/davit-tiny_4xb256_in1k.py https://download.openmmlab.com/mmclassification/v0/davit/davit-tiny_3rdparty_in1k_20221116-700fdf7d.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :---------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :----------------------------------: | :------------------------------------------------------------------------------------: | +| `davit-tiny_3rdparty_in1k`\* | From scratch | 28.36 | 4.54 | 82.24 | 96.13 | [config](davit-tiny_4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/davit/davit-tiny_3rdparty_in1k_20221116-700fdf7d.pth) | +| `davit-small_3rdparty_in1k`\* | From scratch | 49.75 | 8.80 | 83.61 | 96.75 | [config](davit-small_4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/davit/davit-small_3rdparty_in1k_20221116-51a849a6.pth) | +| `davit-base_3rdparty_in1k`\* | From scratch | 87.95 | 15.51 | 84.09 | 96.82 | [config](davit-base_4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/davit/davit-base_3rdparty_in1k_20221116-19e0d956.pth) | + +*Models with * are converted from the [official repo](https://github.com/dingmyu/davit/blob/main/mmdet/mmdet/models/backbones/davit.py#L355). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@inproceedings{ding2022davit, + title={DaViT: Dual Attention Vision Transformer}, + author={Ding, Mingyu and Xiao, Bin and Codella, Noel and Luo, Ping and Wang, Jingdong and Yuan, Lu}, + booktitle={ECCV}, + year={2022}, +} +``` diff --git a/configs/davit/davit-base_4xb256_in1k.py b/configs/davit/davit-base_4xb256_in1k.py new file mode 100644 index 0000000..071702f --- /dev/null +++ b/configs/davit/davit-base_4xb256_in1k.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/davit/davit-base.py', + '../_base_/datasets/imagenet_bs256_davit_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# data settings +train_dataloader = dict(batch_size=256) diff --git a/configs/davit/davit-small_4xb256_in1k.py b/configs/davit/davit-small_4xb256_in1k.py new file mode 100644 index 0000000..e341031 --- /dev/null +++ b/configs/davit/davit-small_4xb256_in1k.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/davit/davit-small.py', + '../_base_/datasets/imagenet_bs256_davit_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# data settings +train_dataloader = dict(batch_size=256) diff --git a/configs/davit/davit-tiny_4xb256_in1k.py b/configs/davit/davit-tiny_4xb256_in1k.py new file mode 100644 index 0000000..a16d87f --- /dev/null +++ b/configs/davit/davit-tiny_4xb256_in1k.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/davit/davit-tiny.py', + '../_base_/datasets/imagenet_bs256_davit_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# data settings +train_dataloader = dict(batch_size=256) diff --git a/configs/davit/metafile.yml b/configs/davit/metafile.yml new file mode 100644 index 0000000..588c18f --- /dev/null +++ b/configs/davit/metafile.yml @@ -0,0 +1,71 @@ +Collections: + - Name: DaViT + Metadata: + Architecture: + - GELU + - Layer Normalization + - Multi-Head Attention + - Scaled Dot-Product Attention + Paper: + URL: https://arxiv.org/abs/2204.03645v1 + Title: 'DaViT: Dual Attention Vision Transformers' + README: configs/davit/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/v1.0.0rc3/mmcls/models/backbones/davit.py + Version: v1.0.0rc3 + +Models: + - Name: davit-tiny_3rdparty_in1k + In Collection: DaViT + Metadata: + FLOPs: 4539698688 + Parameters: 28360168 + Training Data: + - ImageNet-1k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 82.24 + Top 5 Accuracy: 96.13 + Weights: https://download.openmmlab.com/mmclassification/v0/davit/davit-tiny_3rdparty_in1k_20221116-700fdf7d.pth + Converted From: + Weights: https://drive.google.com/file/d/1RSpi3lxKaloOL5-or20HuG975tbPwxRZ/view?usp=sharing + Code: https://github.com/dingmyu/davit/blob/main/mmdet/mmdet/models/backbones/davit.py#L355 + Config: configs/davit/davit-tiny_4xb256_in1k.py + - Name: davit-small_3rdparty_in1k + In Collection: DaViT + Metadata: + FLOPs: 8799942144 + Parameters: 49745896 + Training Data: + - ImageNet-1k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 83.61 + Top 5 Accuracy: 96.75 + Weights: https://download.openmmlab.com/mmclassification/v0/davit/davit-small_3rdparty_in1k_20221116-51a849a6.pth + Converted From: + Weights: https://drive.google.com/file/d/1q976ruj45mt0RhO9oxhOo6EP_cmj4ahQ/view?usp=sharing + Code: https://github.com/dingmyu/davit/blob/main/mmdet/mmdet/models/backbones/davit.py#L355 + Config: configs/davit/davit-small_4xb256_in1k.py + - Name: davit-base_3rdparty_in1k + In Collection: DaViT + Metadata: + FLOPs: 15509702656 + Parameters: 87954408 + Training Data: + - ImageNet-1k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 84.09 + Top 5 Accuracy: 96.82 + Weights: https://download.openmmlab.com/mmclassification/v0/davit/davit-base_3rdparty_in1k_20221116-19e0d956.pth + Converted From: + Weights: https://drive.google.com/file/d/1u9sDBEueB-YFuLigvcwf4b2YyA4MIVsZ/view?usp=sharing + Code: https://github.com/dingmyu/davit/blob/main/mmdet/mmdet/models/backbones/davit.py#L355 + Config: configs/davit/davit-base_4xb256_in1k.py diff --git a/configs/deit/README.md b/configs/deit/README.md new file mode 100644 index 0000000..ee43414 --- /dev/null +++ b/configs/deit/README.md @@ -0,0 +1,97 @@ +# DeiT + +> [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) + + + +## Abstract + +Recently, neural networks purely based on attention were shown to address image understanding tasks such as image classification. However, these visual transformers are pre-trained with hundreds of millions of images using an expensive infrastructure, thereby limiting their adoption. In this work, we produce a competitive convolution-free transformer by training on Imagenet only. We train them on a single computer in less than 3 days. Our reference vision transformer (86M parameters) achieves top-1 accuracy of 83.1% (single-crop evaluation) on ImageNet with no external data. More importantly, we introduce a teacher-student strategy specific to transformers. It relies on a distillation token ensuring that the student learns from the teacher through attention. We show the interest of this token-based distillation, especially when using a convnet as a teacher. This leads us to report results competitive with convnets for both Imagenet (where we obtain up to 85.2% accuracy) and when transferring to other tasks. We share our code and models. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('deit-tiny_4xb256_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('deit-tiny_4xb256_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/deit/deit-tiny_4xb256_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/deit/deit-tiny_4xb256_in1k.py https://download.openmmlab.com/mmclassification/v0/deit/deit-tiny_pt-4xb256_in1k_20220218-13b382a0.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :------------------------------------------------ | :----------: | :--------: | :-------: | :-------: | :-------: | :------------------------------------------------: | :--------------------------------------------------: | +| `deit-tiny_4xb256_in1k` | From scratch | 5.72 | 1.26 | 74.50 | 92.24 | [config](deit-tiny_4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-tiny_pt-4xb256_in1k_20220218-13b382a0.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/deit/deit-tiny_pt-4xb256_in1k_20220218-13b382a0.json) | +| `deit-tiny-distilled_3rdparty_in1k`\* | From scratch | 5.91 | 1.27 | 74.51 | 91.90 | [config](deit-tiny-distilled_4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-tiny-distilled_3rdparty_pt-4xb256_in1k_20211216-c429839a.pth) | +| `deit-small_4xb256_in1k` | From scratch | 22.05 | 4.61 | 80.69 | 95.06 | [config](deit-small_4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-small_pt-4xb256_in1k_20220218-9425b9bb.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/deit/deit-small_pt-4xb256_in1k_20220218-9425b9bb.json) | +| `deit-small-distilled_3rdparty_in1k`\* | From scratch | 22.44 | 4.63 | 81.17 | 95.40 | [config](deit-small-distilled_4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-small-distilled_3rdparty_pt-4xb256_in1k_20211216-4de1d725.pth) | +| `deit-base_16xb64_in1k` | From scratch | 86.57 | 17.58 | 81.76 | 95.81 | [config](deit-base_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-base_pt-16xb64_in1k_20220216-db63c16c.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/deit/deit-base_pt-16xb64_in1k_20220216-db63c16c.json) | +| `deit-base_3rdparty_in1k`\* | From scratch | 86.57 | 17.58 | 81.79 | 95.59 | [config](deit-base_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-base_3rdparty_pt-16xb64_in1k_20211124-6f40c188.pth) | +| `deit-base-distilled_3rdparty_in1k`\* | From scratch | 87.34 | 17.67 | 83.33 | 96.49 | [config](deit-base-distilled_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-base-distilled_3rdparty_pt-16xb64_in1k_20211216-42891296.pth) | +| `deit-base_224px-pre_3rdparty_in1k-384px`\* | 224px | 86.86 | 55.54 | 83.04 | 96.31 | [config](deit-base_16xb32_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-base_3rdparty_ft-16xb32_in1k-384px_20211124-822d02f2.pth) | +| `deit-base-distilled_224px-pre_3rdparty_in1k-384px`\* | 224px | 87.63 | 55.65 | 85.55 | 97.35 | [config](deit-base-distilled_16xb32_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit/deit-base-distilled_3rdparty_ft-16xb32_in1k-384px_20211216-e48d6000.pth) | + +*Models with * are converted from the [official repo](https://github.com/facebookresearch/deit/blob/f5123946205daf72a88783dae94cabff98c49c55/models.py#L168). The config files of these models are only for inference. We haven't reproduce the training results.* + +```{warning} +MMPretrain doesn't support training the distilled version DeiT. +And we provide distilled version checkpoints for inference only. +``` + +## Citation + +```bibtex +@InProceedings{pmlr-v139-touvron21a, + title = {Training data-efficient image transformers & distillation through attention}, + author = {Touvron, Hugo and Cord, Matthieu and Douze, Matthijs and Massa, Francisco and Sablayrolles, Alexandre and Jegou, Herve}, + booktitle = {International Conference on Machine Learning}, + pages = {10347--10357}, + year = {2021}, + volume = {139}, + month = {July} +} +``` diff --git a/configs/deit/deit-base-distilled_16xb32_in1k-384px.py b/configs/deit/deit-base-distilled_16xb32_in1k-384px.py new file mode 100644 index 0000000..60d3112 --- /dev/null +++ b/configs/deit/deit-base-distilled_16xb32_in1k-384px.py @@ -0,0 +1,37 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='DistilledVisionTransformer', + arch='deit-base', + img_size=384, + patch_size=16, + ), + neck=None, + head=dict( + type='DeiTClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + # Change to the path of the pretrained model + # init_cfg=dict(type='Pretrained', checkpoint=''), +) + +# dataset settings +train_dataloader = dict(batch_size=32) + +# schedule settings +optim_wrapper = dict(clip_grad=dict(max_norm=1.0)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (16 GPUs) x (32 samples per GPU) +auto_scale_lr = dict(base_batch_size=512) diff --git a/configs/deit/deit-base-distilled_16xb64_in1k.py b/configs/deit/deit-base-distilled_16xb64_in1k.py new file mode 100644 index 0000000..207bf25 --- /dev/null +++ b/configs/deit/deit-base-distilled_16xb64_in1k.py @@ -0,0 +1,46 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='DistilledVisionTransformer', + arch='deit-base', + img_size=224, + patch_size=16), + neck=None, + head=dict( + type='DeiTClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) + +# dataset settings +train_dataloader = dict(batch_size=64) + +# schedule settings +optim_wrapper = dict( + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0) + }), + clip_grad=dict(max_norm=5.0), +) diff --git a/configs/deit/deit-base_16xb32_in1k-384px.py b/configs/deit/deit-base_16xb32_in1k-384px.py new file mode 100644 index 0000000..762b460 --- /dev/null +++ b/configs/deit/deit-base_16xb32_in1k-384px.py @@ -0,0 +1,37 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='deit-base', + img_size=384, + patch_size=16, + ), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + # Change to the path of the pretrained model + # init_cfg=dict(type='Pretrained', checkpoint=''), +) + +# dataset settings +train_dataloader = dict(batch_size=32) + +# schedule settings +optim_wrapper = dict(clip_grad=dict(max_norm=1.0)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (16 GPUs) x (32 samples per GPU) +auto_scale_lr = dict(base_batch_size=512) diff --git a/configs/deit/deit-base_16xb64_in1k.py b/configs/deit/deit-base_16xb64_in1k.py new file mode 100644 index 0000000..66f03a9 --- /dev/null +++ b/configs/deit/deit-base_16xb64_in1k.py @@ -0,0 +1,50 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='deit-base', + img_size=224, + patch_size=16, + drop_path_rate=0.1), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) + +# dataset settings +train_dataloader = dict(batch_size=64) + +# schedule settings +optim_wrapper = dict( + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0) + }), + clip_grad=dict(max_norm=5.0), +) + +# runtime settings +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] diff --git a/configs/deit/deit-small-distilled_4xb256_in1k.py b/configs/deit/deit-small-distilled_4xb256_in1k.py new file mode 100644 index 0000000..9c7c58c --- /dev/null +++ b/configs/deit/deit-small-distilled_4xb256_in1k.py @@ -0,0 +1,46 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='DistilledVisionTransformer', + arch='deit-small', + img_size=224, + patch_size=16), + neck=None, + head=dict( + type='DeiTClsHead', + num_classes=1000, + in_channels=384, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) + +# data settings +train_dataloader = dict(batch_size=256) + +# schedule settings +optim_wrapper = dict( + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0) + }), + clip_grad=dict(max_norm=5.0), +) diff --git a/configs/deit/deit-small_4xb256_in1k.py b/configs/deit/deit-small_4xb256_in1k.py new file mode 100644 index 0000000..b96d84e --- /dev/null +++ b/configs/deit/deit-small_4xb256_in1k.py @@ -0,0 +1,48 @@ +# In small and tiny arch, remove drop path and EMA hook comparing with the +# original config +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='deit-small', + img_size=224, + patch_size=16), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=384, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) + +# data settings +train_dataloader = dict(batch_size=256) + +# schedule settings +optim_wrapper = dict( + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0) + }), + clip_grad=dict(max_norm=5.0), +) diff --git a/configs/deit/deit-tiny-distilled_4xb256_in1k.py b/configs/deit/deit-tiny-distilled_4xb256_in1k.py new file mode 100644 index 0000000..00a9c4b --- /dev/null +++ b/configs/deit/deit-tiny-distilled_4xb256_in1k.py @@ -0,0 +1,47 @@ +# The distillation config is only for evaluation. +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='DistilledVisionTransformer', + arch='deit-tiny', + img_size=224, + patch_size=16), + neck=None, + head=dict( + type='DeiTClsHead', + num_classes=1000, + in_channels=192, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) + +# data settings +train_dataloader = dict(batch_size=256) + +# schedule settings +optim_wrapper = dict( + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0) + }), + clip_grad=dict(max_norm=5.0), +) diff --git a/configs/deit/deit-tiny_4xb256_in1k.py b/configs/deit/deit-tiny_4xb256_in1k.py new file mode 100644 index 0000000..486669e --- /dev/null +++ b/configs/deit/deit-tiny_4xb256_in1k.py @@ -0,0 +1,48 @@ +# In small and tiny arch, remove drop path and EMA hook comparing with the +# original config +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='deit-tiny', + img_size=224, + patch_size=16), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=192, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ]), +) + +# data settings +train_dataloader = dict(batch_size=256) + +# schedule settings +optim_wrapper = dict( + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0) + }), + clip_grad=dict(max_norm=5.0), +) diff --git a/configs/deit/metafile.yml b/configs/deit/metafile.yml new file mode 100644 index 0000000..f6f0c5e --- /dev/null +++ b/configs/deit/metafile.yml @@ -0,0 +1,153 @@ +Collections: + - Name: DeiT + Metadata: + Training Data: ImageNet-1k + Architecture: + - Layer Normalization + - Scaled Dot-Product Attention + - Attention Dropout + - Multi-Head Attention + Paper: + Title: Training data-efficient image transformers & distillation through attention + URL: https://arxiv.org/abs/2012.12877 + README: configs/deit/README.md + Code: + URL: v0.19.0 + Version: https://github.com/open-mmlab/mmpretrain/blob/v0.19.0/mmcls/models/backbones/deit.py + +Models: + - Name: deit-tiny_4xb256_in1k + Metadata: + FLOPs: 1258219200 + Parameters: 5717416 + In Collection: DeiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 74.5 + Top 5 Accuracy: 92.24 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/deit/deit-tiny_pt-4xb256_in1k_20220218-13b382a0.pth + Config: configs/deit/deit-tiny_4xb256_in1k.py + - Name: deit-tiny-distilled_3rdparty_in1k + Metadata: + FLOPs: 1265371776 + Parameters: 5910800 + In Collection: DeiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 74.51 + Top 5 Accuracy: 91.9 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/deit/deit-tiny-distilled_3rdparty_pt-4xb256_in1k_20211216-c429839a.pth + Config: configs/deit/deit-tiny-distilled_4xb256_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth + Code: https://github.com/facebookresearch/deit/blob/f5123946205daf72a88783dae94cabff98c49c55/models.py#L108 + - Name: deit-small_4xb256_in1k + Metadata: + FLOPs: 4607954304 + Parameters: 22050664 + In Collection: DeiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 80.69 + Top 5 Accuracy: 95.06 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/deit/deit-small_pt-4xb256_in1k_20220218-9425b9bb.pth + Config: configs/deit/deit-small_4xb256_in1k.py + - Name: deit-small-distilled_3rdparty_in1k + Metadata: + FLOPs: 4632876288 + Parameters: 22436432 + In Collection: DeiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.17 + Top 5 Accuracy: 95.4 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/deit/deit-small-distilled_3rdparty_pt-4xb256_in1k_20211216-4de1d725.pth + Config: configs/deit/deit-small-distilled_4xb256_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth + Code: https://github.com/facebookresearch/deit/blob/f5123946205daf72a88783dae94cabff98c49c55/models.py#L123 + - Name: deit-base_16xb64_in1k + Metadata: + FLOPs: 17581972224 + Parameters: 86567656 + In Collection: DeiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.76 + Top 5 Accuracy: 95.81 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/deit/deit-base_pt-16xb64_in1k_20220216-db63c16c.pth + Config: configs/deit/deit-base_16xb64_in1k.py + - Name: deit-base_3rdparty_in1k + Metadata: + FLOPs: 17581972224 + Parameters: 86567656 + In Collection: DeiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.79 + Top 5 Accuracy: 95.59 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/deit/deit-base_3rdparty_pt-16xb64_in1k_20211124-6f40c188.pth + Config: configs/deit/deit-base_16xb64_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth + Code: https://github.com/facebookresearch/deit/blob/f5123946205daf72a88783dae94cabff98c49c55/models.py#L93 + - Name: deit-base-distilled_3rdparty_in1k + Metadata: + FLOPs: 17674283520 + Parameters: 87338192 + In Collection: DeiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.33 + Top 5 Accuracy: 96.49 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/deit/deit-base-distilled_3rdparty_pt-16xb64_in1k_20211216-42891296.pth + Config: configs/deit/deit-base-distilled_16xb64_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth + Code: https://github.com/facebookresearch/deit/blob/f5123946205daf72a88783dae94cabff98c49c55/models.py#L138 + - Name: deit-base_224px-pre_3rdparty_in1k-384px + Metadata: + FLOPs: 55538974464 + Parameters: 86859496 + In Collection: DeiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.04 + Top 5 Accuracy: 96.31 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/deit/deit-base_3rdparty_ft-16xb32_in1k-384px_20211124-822d02f2.pth + Config: configs/deit/deit-base_16xb32_in1k-384px.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth + Code: https://github.com/facebookresearch/deit/blob/f5123946205daf72a88783dae94cabff98c49c55/models.py#L153 + - Name: deit-base-distilled_224px-pre_3rdparty_in1k-384px + Metadata: + FLOPs: 55645294080 + Parameters: 87630032 + In Collection: DeiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.55 + Top 5 Accuracy: 97.35 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/deit/deit-base-distilled_3rdparty_ft-16xb32_in1k-384px_20211216-e48d6000.pth + Config: configs/deit/deit-base-distilled_16xb32_in1k-384px.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth + Code: https://github.com/facebookresearch/deit/blob/f5123946205daf72a88783dae94cabff98c49c55/models.py#L168 diff --git a/configs/deit3/README.md b/configs/deit3/README.md new file mode 100644 index 0000000..18694b7 --- /dev/null +++ b/configs/deit3/README.md @@ -0,0 +1,90 @@ +# DeiT III: Revenge of the ViT + +> [DeiT III: Revenge of the ViT](https://arxiv.org/abs/2204.07118) + + + +## Abstract + +A Vision Transformer (ViT) is a simple neural architecture amenable to serve several computer vision tasks. It has limited built-in architectural priors, in contrast to more recent architectures that incorporate priors either about the input data or of specific tasks. Recent works show that ViTs benefit from self-supervised pre-training, in particular BerT-like pre-training like BeiT. In this paper, we revisit the supervised training of ViTs. Our procedure builds upon and simplifies a recipe introduced for training ResNet-50. It includes a new simple data-augmentation procedure with only 3 augmentations, closer to the practice in self-supervised learning. Our evaluations on Image classification (ImageNet-1k with and without pre-training on ImageNet-21k), transfer learning and semantic segmentation show that our procedure outperforms by a large margin previous fully supervised training recipes for ViT. It also reveals that the performance of our ViT trained with supervision is comparable to that of more recent architectures. Our results could serve as better baselines for recent self-supervised approaches demonstrated on ViT. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('deit3-small-p16_3rdparty_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('deit3-small-p16_3rdparty_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/deit3/deit3-small-p16_64xb64_in1k.py https://download.openmmlab.com/mmclassification/v0/deit3/deit3-small-p16_3rdparty_in1k_20221008-0f7c70cf.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :------------------------------------------------ | :----------: | :--------: | :-------: | :-------: | :-------: | :--------------------------------------------: | :------------------------------------------------------: | +| `deit3-small-p16_3rdparty_in1k`\* | From scratch | 22.06 | 4.61 | 81.35 | 95.31 | [config](deit3-small-p16_64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-small-p16_3rdparty_in1k_20221008-0f7c70cf.pth) | +| `deit3-small-p16_3rdparty_in1k-384px`\* | From scratch | 22.21 | 15.52 | 83.43 | 96.68 | [config](deit3-small-p16_64xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-small-p16_3rdparty_in1k-384px_20221008-a2c1a0c7.pth) | +| `deit3-small-p16_in21k-pre_3rdparty_in1k`\* | ImageNet-21k | 22.06 | 4.61 | 83.06 | 96.77 | [config](deit3-small-p16_64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-small-p16_in21k-pre_3rdparty_in1k_20221009-dcd90827.pth) | +| `deit3-small-p16_in21k-pre_3rdparty_in1k-384px`\* | ImageNet-21k | 22.21 | 15.52 | 84.84 | 97.48 | [config](deit3-small-p16_64xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-small-p16_in21k-pre_3rdparty_in1k-384px_20221009-de116dd7.pth) | +| `deit3-medium-p16_3rdparty_in1k`\* | From scratch | 38.85 | 8.00 | 82.99 | 96.22 | [config](deit3-medium-p16_64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-medium-p16_3rdparty_in1k_20221008-3b21284d.pth) | +| `deit3-medium-p16_in21k-pre_3rdparty_in1k`\* | ImageNet-21k | 38.85 | 8.00 | 84.56 | 97.19 | [config](deit3-medium-p16_64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-medium-p16_in21k-pre_3rdparty_in1k_20221009-472f11e2.pth) | +| `deit3-base-p16_3rdparty_in1k`\* | From scratch | 86.59 | 17.58 | 83.80 | 96.55 | [config](deit3-base-p16_64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-base-p16_3rdparty_in1k_20221008-60b8c8bf.pth) | +| `deit3-base-p16_3rdparty_in1k-384px`\* | From scratch | 86.88 | 55.54 | 85.08 | 97.25 | [config](deit3-base-p16_64xb32_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-base-p16_3rdparty_in1k-384px_20221009-e19e36d4.pth) | +| `deit3-base-p16_in21k-pre_3rdparty_in1k`\* | ImageNet-21k | 86.59 | 17.58 | 85.70 | 97.75 | [config](deit3-base-p16_64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-base-p16_in21k-pre_3rdparty_in1k_20221009-87983ca1.pth) | +| `deit3-base-p16_in21k-pre_3rdparty_in1k-384px`\* | ImageNet-21k | 86.88 | 55.54 | 86.73 | 98.11 | [config](deit3-base-p16_64xb32_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-base-p16_in21k-pre_3rdparty_in1k-384px_20221009-5e4e37b9.pth) | +| `deit3-large-p16_3rdparty_in1k`\* | From scratch | 304.37 | 61.60 | 84.87 | 97.01 | [config](deit3-large-p16_64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-large-p16_3rdparty_in1k_20221009-03b427ea.pth) | +| `deit3-large-p16_3rdparty_in1k-384px`\* | From scratch | 304.76 | 191.21 | 85.82 | 97.60 | [config](deit3-large-p16_64xb16_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-large-p16_3rdparty_in1k-384px_20221009-4317ce62.pth) | +| `deit3-large-p16_in21k-pre_3rdparty_in1k`\* | ImageNet-21k | 304.37 | 61.60 | 86.97 | 98.24 | [config](deit3-large-p16_64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-large-p16_in21k-pre_3rdparty_in1k_20221009-d8d27084.pth) | +| `deit3-large-p16_in21k-pre_3rdparty_in1k-384px`\* | ImageNet-21k | 304.76 | 191.21 | 87.73 | 98.51 | [config](deit3-large-p16_64xb16_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-large-p16_in21k-pre_3rdparty_in1k-384px_20221009-75fea03f.pth) | +| `deit3-huge-p14_3rdparty_in1k`\* | From scratch | 632.13 | 167.40 | 85.21 | 97.36 | [config](deit3-huge-p14_64xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-huge-p14_3rdparty_in1k_20221009-e107bcb7.pth) | +| `deit3-huge-p14_in21k-pre_3rdparty_in1k`\* | ImageNet-21k | 632.13 | 167.40 | 87.19 | 98.26 | [config](deit3-huge-p14_64xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-huge-p14_in21k-pre_3rdparty_in1k_20221009-19b8a535.pth) | + +*Models with * are converted from the [official repo](https://github.com/facebookresearch/deit/blob/main/models_v2.py#L171). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@article{Touvron2022DeiTIR, + title={DeiT III: Revenge of the ViT}, + author={Hugo Touvron and Matthieu Cord and Herve Jegou}, + journal={arXiv preprint arXiv:2204.07118}, + year={2022}, +} +``` diff --git a/configs/deit3/deit3-base-p16_64xb32_in1k-384px.py b/configs/deit3/deit3-base-p16_64xb32_in1k-384px.py new file mode 100644 index 0000000..b6c8a8c --- /dev/null +++ b/configs/deit3/deit3-base-p16_64xb32_in1k-384px.py @@ -0,0 +1,17 @@ +_base_ = [ + '../_base_/models/deit3/deit3-base-p16-384.py', + '../_base_/datasets/imagenet_bs64_deit3_384.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +# dataset setting +train_dataloader = dict(batch_size=32) + +# schedule settings +optim_wrapper = dict(optimizer=dict(lr=1e-5, weight_decay=0.1)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (64 GPUs) x (32 samples per GPU) +auto_scale_lr = dict(base_batch_size=2048) diff --git a/configs/deit3/deit3-base-p16_64xb64_in1k.py b/configs/deit3/deit3-base-p16_64xb64_in1k.py new file mode 100644 index 0000000..c69a64c --- /dev/null +++ b/configs/deit3/deit3-base-p16_64xb64_in1k.py @@ -0,0 +1,17 @@ +_base_ = [ + '../_base_/models/deit3/deit3-base-p16-224.py', + '../_base_/datasets/imagenet_bs64_deit3_224.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +# dataset setting +train_dataloader = dict(batch_size=64) + +# schedule settings +optim_wrapper = dict(optimizer=dict(lr=1e-5, weight_decay=0.1)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (64 GPUs) x (64 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/deit3/deit3-huge-p14_64xb32_in1k.py b/configs/deit3/deit3-huge-p14_64xb32_in1k.py new file mode 100644 index 0000000..f8cae07 --- /dev/null +++ b/configs/deit3/deit3-huge-p14_64xb32_in1k.py @@ -0,0 +1,17 @@ +_base_ = [ + '../_base_/models/deit3/deit3-huge-p14-224.py', + '../_base_/datasets/imagenet_bs64_deit3_224.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +# dataset setting +train_dataloader = dict(batch_size=32) + +# schedule settings +optim_wrapper = dict(optimizer=dict(lr=1e-5, weight_decay=0.1)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (64 GPUs) x (32 samples per GPU) +auto_scale_lr = dict(base_batch_size=2048) diff --git a/configs/deit3/deit3-large-p16_64xb16_in1k-384px.py b/configs/deit3/deit3-large-p16_64xb16_in1k-384px.py new file mode 100644 index 0000000..84fb0fe --- /dev/null +++ b/configs/deit3/deit3-large-p16_64xb16_in1k-384px.py @@ -0,0 +1,17 @@ +_base_ = [ + '../_base_/models/deit3/deit3-large-p16-384.py', + '../_base_/datasets/imagenet_bs64_deit3_384.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +# dataset setting +train_dataloader = dict(batch_size=16) + +# schedule settings +optim_wrapper = dict(optimizer=dict(lr=1e-5, weight_decay=0.1)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (64 GPUs) x (16 samples per GPU) +auto_scale_lr = dict(base_batch_size=1025) diff --git a/configs/deit3/deit3-large-p16_64xb64_in1k.py b/configs/deit3/deit3-large-p16_64xb64_in1k.py new file mode 100644 index 0000000..a67ac21 --- /dev/null +++ b/configs/deit3/deit3-large-p16_64xb64_in1k.py @@ -0,0 +1,17 @@ +_base_ = [ + '../_base_/models/deit3/deit3-large-p16-224.py', + '../_base_/datasets/imagenet_bs64_deit3_224.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +# dataset setting +train_dataloader = dict(batch_size=64) + +# schedule settings +optim_wrapper = dict(optimizer=dict(lr=1e-5, weight_decay=0.1)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (64 GPUs) x (64 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/deit3/deit3-medium-p16_64xb64_in1k.py b/configs/deit3/deit3-medium-p16_64xb64_in1k.py new file mode 100644 index 0000000..def48e6 --- /dev/null +++ b/configs/deit3/deit3-medium-p16_64xb64_in1k.py @@ -0,0 +1,17 @@ +_base_ = [ + '../_base_/models/deit3/deit3-medium-p16-224.py', + '../_base_/datasets/imagenet_bs64_deit3_224.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +# dataset setting +train_dataloader = dict(batch_size=64) + +# schedule settings +optim_wrapper = dict(optimizer=dict(lr=1e-5, weight_decay=0.1)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (64 GPUs) x (64 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/deit3/deit3-small-p16_64xb64_in1k-384px.py b/configs/deit3/deit3-small-p16_64xb64_in1k-384px.py new file mode 100644 index 0000000..e6b3e89 --- /dev/null +++ b/configs/deit3/deit3-small-p16_64xb64_in1k-384px.py @@ -0,0 +1,17 @@ +_base_ = [ + '../_base_/models/deit3/deit3-small-p16-384.py', + '../_base_/datasets/imagenet_bs64_deit3_384.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +# dataset setting +train_dataloader = dict(batch_size=64) + +# schedule settings +optim_wrapper = dict(optimizer=dict(lr=1e-5, weight_decay=0.1)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (64 GPUs) x (64 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/deit3/deit3-small-p16_64xb64_in1k.py b/configs/deit3/deit3-small-p16_64xb64_in1k.py new file mode 100644 index 0000000..58b0a2f --- /dev/null +++ b/configs/deit3/deit3-small-p16_64xb64_in1k.py @@ -0,0 +1,17 @@ +_base_ = [ + '../_base_/models/deit3/deit3-small-p16-224.py', + '../_base_/datasets/imagenet_bs64_deit3_224.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +# dataset setting +train_dataloader = dict(batch_size=64) + +# schedule settings +optim_wrapper = dict(optimizer=dict(lr=1e-5, weight_decay=0.1)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (64 GPUs) x (64 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/deit3/metafile.yml b/configs/deit3/metafile.yml new file mode 100644 index 0000000..6f50fdc --- /dev/null +++ b/configs/deit3/metafile.yml @@ -0,0 +1,310 @@ +Collections: + - Name: DeiT3 + Metadata: + Architecture: + - Attention Dropout + - Convolution + - Dense Connections + - Dropout + - GELU + - Layer Normalization + - Multi-Head Attention + - Scaled Dot-Product Attention + - Tanh Activation + Paper: + URL: https://arxiv.org/abs/2204.07118 + Title: 'DeiT III: Revenge of the ViT' + README: configs/deit3/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/v1.0.0rc2/mmcls/models/backbones/deit3.py + Version: v1.0.0rc2 + +Models: + - Name: deit3-small-p16_3rdparty_in1k + In Collection: DeiT3 + Metadata: + FLOPs: 4607954304 + Parameters: 22059496 + Training Data: + - ImageNet-1k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 81.35 + Top 5 Accuracy: 95.31 + Weights: https://download.openmmlab.com/mmclassification/v0/deit3/deit3-small-p16_3rdparty_in1k_20221008-0f7c70cf.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/deit/deit_3_small_224_1k.pth + Code: https://github.com/facebookresearch/deit/blob/main/models_v2.py#L171 + Config: configs/deit3/deit3-small-p16_64xb64_in1k.py + - Name: deit3-small-p16_3rdparty_in1k-384px + In Collection: DeiT3 + Metadata: + FLOPs: 15517663104 + Parameters: 22205416 + Training Data: + - ImageNet-1k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 83.43 + Top 5 Accuracy: 96.68 + Weights: https://download.openmmlab.com/mmclassification/v0/deit3/deit3-small-p16_3rdparty_in1k-384px_20221008-a2c1a0c7.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/deit/deit_3_small_384_1k.pth + Code: https://github.com/facebookresearch/deit/blob/main/models_v2.py#L171 + Config: configs/deit3/deit3-small-p16_64xb64_in1k-384px.py + - Name: deit3-small-p16_in21k-pre_3rdparty_in1k + In Collection: DeiT3 + Metadata: + FLOPs: 4607954304 + Parameters: 22059496 + Training Data: + - ImageNet-21k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 83.06 + Top 5 Accuracy: 96.77 + Weights: https://download.openmmlab.com/mmclassification/v0/deit3/deit3-small-p16_in21k-pre_3rdparty_in1k_20221009-dcd90827.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/deit/deit_3_small_224_21k.pth + Code: https://github.com/facebookresearch/deit/blob/main/models_v2.py#L171 + Config: configs/deit3/deit3-small-p16_64xb64_in1k.py + - Name: deit3-small-p16_in21k-pre_3rdparty_in1k-384px + In Collection: DeiT3 + Metadata: + FLOPs: 15517663104 + Parameters: 22205416 + Training Data: + - ImageNet-21k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 84.84 + Top 5 Accuracy: 97.48 + Weights: https://download.openmmlab.com/mmclassification/v0/deit3/deit3-small-p16_in21k-pre_3rdparty_in1k-384px_20221009-de116dd7.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/deit/deit_3_small_384_21k.pth + Code: https://github.com/facebookresearch/deit/blob/main/models_v2.py#L171 + Config: configs/deit3/deit3-small-p16_64xb64_in1k-384px.py + - Name: deit3-medium-p16_3rdparty_in1k + In Collection: DeiT3 + Metadata: + FLOPs: 8003064320 + Parameters: 38849512 + Training Data: + - ImageNet-1k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 82.99 + Top 5 Accuracy: 96.22 + Weights: https://download.openmmlab.com/mmclassification/v0/deit3/deit3-medium-p16_3rdparty_in1k_20221008-3b21284d.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/deit/deit_3_medium_224_1k.pth + Code: https://github.com/facebookresearch/deit/blob/main/models_v2.py#L171 + Config: configs/deit3/deit3-medium-p16_64xb64_in1k.py + - Name: deit3-medium-p16_in21k-pre_3rdparty_in1k + In Collection: DeiT3 + Metadata: + FLOPs: 8003064320 + Parameters: 38849512 + Training Data: + - ImageNet-21k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 84.56 + Top 5 Accuracy: 97.19 + Weights: https://download.openmmlab.com/mmclassification/v0/deit3/deit3-medium-p16_in21k-pre_3rdparty_in1k_20221009-472f11e2.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/deit/deit_3_medium_224_21k.pth + Code: https://github.com/facebookresearch/deit/blob/main/models_v2.py#L171 + Config: configs/deit3/deit3-medium-p16_64xb64_in1k.py + - Name: deit3-base-p16_3rdparty_in1k + In Collection: DeiT3 + Metadata: + FLOPs: 17581972224 + Parameters: 86585320 + Training Data: + - ImageNet-1k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 83.80 + Top 5 Accuracy: 96.55 + Weights: https://download.openmmlab.com/mmclassification/v0/deit3/deit3-base-p16_3rdparty_in1k_20221008-60b8c8bf.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/deit/deit_3_base_224_1k.pth + Code: https://github.com/facebookresearch/deit/blob/main/models_v2.py#L171 + Config: configs/deit3/deit3-base-p16_64xb64_in1k.py + - Name: deit3-base-p16_3rdparty_in1k-384px + In Collection: DeiT3 + Metadata: + FLOPs: 55538974464 + Parameters: 86877160 + Training Data: + - ImageNet-1k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 85.08 + Top 5 Accuracy: 97.25 + Weights: https://download.openmmlab.com/mmclassification/v0/deit3/deit3-base-p16_3rdparty_in1k-384px_20221009-e19e36d4.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/deit/deit_3_base_384_1k.pth + Code: https://github.com/facebookresearch/deit/blob/main/models_v2.py#L171 + Config: configs/deit3/deit3-base-p16_64xb32_in1k-384px.py + - Name: deit3-base-p16_in21k-pre_3rdparty_in1k + In Collection: DeiT3 + Metadata: + FLOPs: 17581972224 + Parameters: 86585320 + Training Data: + - ImageNet-21k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 85.70 + Top 5 Accuracy: 97.75 + Weights: https://download.openmmlab.com/mmclassification/v0/deit3/deit3-base-p16_in21k-pre_3rdparty_in1k_20221009-87983ca1.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/deit/deit_3_base_224_21k.pth + Code: https://github.com/facebookresearch/deit/blob/main/models_v2.py#L171 + Config: configs/deit3/deit3-base-p16_64xb64_in1k.py + - Name: deit3-base-p16_in21k-pre_3rdparty_in1k-384px + In Collection: DeiT3 + Metadata: + FLOPs: 55538974464 + Parameters: 86877160 + Training Data: + - ImageNet-21k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 86.73 + Top 5 Accuracy: 98.11 + Weights: https://download.openmmlab.com/mmclassification/v0/deit3/deit3-base-p16_in21k-pre_3rdparty_in1k-384px_20221009-5e4e37b9.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/deit/deit_3_base_384_21k.pth + Code: https://github.com/facebookresearch/deit/blob/main/models_v2.py#L171 + Config: configs/deit3/deit3-base-p16_64xb32_in1k-384px.py + - Name: deit3-large-p16_3rdparty_in1k + In Collection: DeiT3 + Metadata: + FLOPs: 61603111936 + Parameters: 304374760 + Training Data: + - ImageNet-1k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 84.87 + Top 5 Accuracy: 97.01 + Weights: https://download.openmmlab.com/mmclassification/v0/deit3/deit3-large-p16_3rdparty_in1k_20221009-03b427ea.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/deit/deit_3_large_224_1k.pth + Code: https://github.com/facebookresearch/deit/blob/main/models_v2.py#L171 + Config: configs/deit3/deit3-large-p16_64xb64_in1k.py + - Name: deit3-large-p16_3rdparty_in1k-384px + In Collection: DeiT3 + Metadata: + FLOPs: 191210034176 + Parameters: 304763880 + Training Data: + - ImageNet-1k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 85.82 + Top 5 Accuracy: 97.60 + Weights: https://download.openmmlab.com/mmclassification/v0/deit3/deit3-large-p16_3rdparty_in1k-384px_20221009-4317ce62.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/deit/deit_3_large_384_1k.pth + Code: https://github.com/facebookresearch/deit/blob/main/models_v2.py#L171 + Config: configs/deit3/deit3-large-p16_64xb16_in1k-384px.py + - Name: deit3-large-p16_in21k-pre_3rdparty_in1k + In Collection: DeiT3 + Metadata: + FLOPs: 61603111936 + Parameters: 304374760 + Training Data: + - ImageNet-21k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 86.97 + Top 5 Accuracy: 98.24 + Weights: https://download.openmmlab.com/mmclassification/v0/deit3/deit3-large-p16_in21k-pre_3rdparty_in1k_20221009-d8d27084.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/deit/deit_3_large_224_21k.pth + Code: https://github.com/facebookresearch/deit/blob/main/models_v2.py#L171 + Config: configs/deit3/deit3-large-p16_64xb64_in1k.py + - Name: deit3-large-p16_in21k-pre_3rdparty_in1k-384px + In Collection: DeiT3 + Metadata: + FLOPs: 191210034176 + Parameters: 304763880 + Training Data: + - ImageNet-21k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 87.73 + Top 5 Accuracy: 98.51 + Weights: https://download.openmmlab.com/mmclassification/v0/deit3/deit3-large-p16_in21k-pre_3rdparty_in1k-384px_20221009-75fea03f.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/deit/deit_3_large_384_21k.pth + Code: https://github.com/facebookresearch/deit/blob/main/models_v2.py#L171 + Config: configs/deit3/deit3-large-p16_64xb16_in1k-384px.py + - Name: deit3-huge-p14_3rdparty_in1k + In Collection: DeiT3 + Metadata: + FLOPs: 167400741120 + Parameters: 632126440 + Training Data: + - ImageNet-1k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 85.21 + Top 5 Accuracy: 97.36 + Weights: https://download.openmmlab.com/mmclassification/v0/deit3/deit3-huge-p14_3rdparty_in1k_20221009-e107bcb7.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/deit/deit_3_huge_224_1k.pth + Code: https://github.com/facebookresearch/deit/blob/main/models_v2.py#L171 + Config: configs/deit3/deit3-huge-p14_64xb32_in1k.py + - Name: deit3-huge-p14_in21k-pre_3rdparty_in1k + In Collection: DeiT3 + Metadata: + FLOPs: 167400741120 + Parameters: 632126440 + Training Data: + - ImageNet-21k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 87.19 + Top 5 Accuracy: 98.26 + Weights: https://download.openmmlab.com/mmclassification/v0/deit3/deit3-huge-p14_in21k-pre_3rdparty_in1k_20221009-19b8a535.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/deit/deit_3_huge_224_1k.pth + Code: https://github.com/facebookresearch/deit/blob/main/models_v2.py#L171 + Config: configs/deit3/deit3-huge-p14_64xb32_in1k.py diff --git a/configs/densecl/README.md b/configs/densecl/README.md new file mode 100644 index 0000000..d1e1295 --- /dev/null +++ b/configs/densecl/README.md @@ -0,0 +1,85 @@ +# DenseCL + +> [Dense contrastive learning for self-supervised visual pre-training](https://arxiv.org/abs/2011.09157) + + + +## Abstract + +To date, most existing self-supervised learning methods are designed and optimized for image classification. These pre-trained models can be sub-optimal for dense prediction tasks due to the discrepancy between image-level prediction and pixel-level prediction. To fill this gap, we aim to design an effective, dense self-supervised learning method that directly works at the level of pixels (or local features) by taking into account the correspondence between local features. We present dense contrastive learning (DenseCL), which implements self-supervised learning by optimizing a pairwise contrastive (dis)similarity loss at the pixel level between two views of input images. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('resnet50_densecl-pre_8xb32-linear-steplr-100e_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('densecl_resnet50_8xb32-coslr-200e_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/densecl/densecl_resnet50_8xb32-coslr-200e_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/densecl/benchmarks/resnet50_8xb32-linear-steplr-100e_in1k.py https://download.openmmlab.com/mmselfsup/1.x/densecl/densecl_resnet50_8xb32-coslr-200e_in1k/resnet50_linear-8xb32-steplr-100e_in1k/resnet50_linear-8xb32-steplr-100e_in1k_20220825-f0f0a579.pth +``` + + + +## Models and results + +### Pretrained models + +| Model | Params (M) | Flops (G) | Config | Download | +| :--------------------------------------- | :--------: | :-------: | :-------------------------------------------------: | :----------------------------------------------------------------------------------------: | +| `densecl_resnet50_8xb32-coslr-200e_in1k` | 64.85 | 4.11 | [config](densecl_resnet50_8xb32-coslr-200e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/densecl/densecl_resnet50_8xb32-coslr-200e_in1k/densecl_resnet50_8xb32-coslr-200e_in1k_20220825-3078723b.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/densecl/densecl_resnet50_8xb32-coslr-200e_in1k/densecl_resnet50_8xb32-coslr-200e_in1k_20220825-3078723b.json) | + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Config | Download | +| :---------------------------------------- | :------------------------------------------: | :--------: | :-------: | :-------: | :----------------------------------------: | :-------------------------------------------: | +| `resnet50_densecl-pre_8xb32-linear-steplr-100e_in1k` | [DENSECL](https://download.openmmlab.com/mmselfsup/1.x/densecl/densecl_resnet50_8xb32-coslr-200e_in1k/densecl_resnet50_8xb32-coslr-200e_in1k_20220825-3078723b.pth) | 25.56 | 4.11 | 63.50 | [config](benchmarks/resnet50_8xb32-linear-steplr-100e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/densecl/densecl_resnet50_8xb32-coslr-200e_in1k/resnet50_linear-8xb32-steplr-100e_in1k/resnet50_linear-8xb32-steplr-100e_in1k_20220825-f0f0a579.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/densecl/densecl_resnet50_8xb32-coslr-200e_in1k/resnet50_linear-8xb32-steplr-100e_in1k/resnet50_linear-8xb32-steplr-100e_in1k_20220825-f0f0a579.json) | + +## Citation + +```bibtex +@inproceedings{wang2021dense, + title={Dense contrastive learning for self-supervised visual pre-training}, + author={Wang, Xinlong and Zhang, Rufeng and Shen, Chunhua and Kong, Tao and Li, Lei}, + booktitle={CVPR}, + year={2021} +} +``` diff --git a/configs/densecl/benchmarks/resnet50_8xb32-linear-steplr-100e_in1k.py b/configs/densecl/benchmarks/resnet50_8xb32-linear-steplr-100e_in1k.py new file mode 100644 index 0000000..37795d9 --- /dev/null +++ b/configs/densecl/benchmarks/resnet50_8xb32-linear-steplr-100e_in1k.py @@ -0,0 +1,20 @@ +_base_ = [ + '../../_base_/models/resnet50.py', + '../../_base_/datasets/imagenet_bs32_pil_resize.py', + '../../_base_/schedules/imagenet_sgd_steplr_100e.py', + '../../_base_/default_runtime.py', +] + +model = dict( + backbone=dict( + frozen_stages=4, + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.'))) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.)) + +# runtime settings +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3)) diff --git a/configs/densecl/densecl_resnet50_8xb32-coslr-200e_in1k.py b/configs/densecl/densecl_resnet50_8xb32-coslr-200e_in1k.py new file mode 100644 index 0000000..8a3959f --- /dev/null +++ b/configs/densecl/densecl_resnet50_8xb32-coslr-200e_in1k.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs32_mocov2.py', + '../_base_/schedules/imagenet_sgd_coslr_200e.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='DenseCL', + queue_len=65536, + feat_dim=128, + momentum=0.001, + loss_lambda=0.5, + backbone=dict( + type='ResNet', + depth=50, + norm_cfg=dict(type='BN'), + zero_init_residual=False), + neck=dict( + type='DenseCLNeck', + in_channels=2048, + hid_channels=2048, + out_channels=128, + num_grid=None), + head=dict( + type='ContrastiveHead', + loss=dict(type='CrossEntropyLoss'), + temperature=0.2), +) +find_unused_parameters = True + +# runtime settings +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=256) diff --git a/configs/densecl/metafile.yml b/configs/densecl/metafile.yml new file mode 100644 index 0000000..2444991 --- /dev/null +++ b/configs/densecl/metafile.yml @@ -0,0 +1,44 @@ +Collections: + - Name: DenseCL + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ResNet + Paper: + Title: Dense contrastive learning for self-supervised visual pre-training + URL: https://arxiv.org/abs/2011.09157 + README: configs/densecl/README.md + +Models: + - Name: densecl_resnet50_8xb32-coslr-200e_in1k + Metadata: + Epochs: 200 + Batch Size: 256 + FLOPs: 4109364224 + Parameters: 64850560 + Training Data: ImageNet-1k + In Collection: DenseCL + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/densecl/densecl_resnet50_8xb32-coslr-200e_in1k/densecl_resnet50_8xb32-coslr-200e_in1k_20220825-3078723b.pth + Config: configs/densecl/densecl_resnet50_8xb32-coslr-200e_in1k.py + Downstream: + - resnet50_densecl-pre_8xb32-linear-steplr-100e_in1k + - Name: resnet50_densecl-pre_8xb32-linear-steplr-100e_in1k + Metadata: + Epochs: 100 + Batch Size: 256 + FLOPs: 4109464576 + Parameters: 25557032 + Training Data: ImageNet-1k + In Collection: DenseCL + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 63.5 + Weights: https://download.openmmlab.com/mmselfsup/1.x/densecl/densecl_resnet50_8xb32-coslr-200e_in1k/resnet50_linear-8xb32-steplr-100e_in1k/resnet50_linear-8xb32-steplr-100e_in1k_20220825-f0f0a579.pth + Config: configs/densecl/benchmarks/resnet50_8xb32-linear-steplr-100e_in1k.py diff --git a/configs/densenet/README.md b/configs/densenet/README.md new file mode 100644 index 0000000..fe40fdd --- /dev/null +++ b/configs/densenet/README.md @@ -0,0 +1,82 @@ +# DenseNet + +> [Densely Connected Convolutional Networks](https://arxiv.org/abs/1608.06993) + + + +## Abstract + +Recent work has shown that convolutional networks can be substantially deeper, more accurate, and efficient to train if they contain shorter connections between layers close to the input and those close to the output. In this paper, we embrace this observation and introduce the Dense Convolutional Network (DenseNet), which connects each layer to every layer in a feed-forward fashion. Whereas traditional convolutional networks with L layers have L connections - one between each layer and its subsequent layer - our network has L(L+1)/2 direct connections. For each layer, the feature-maps of all preceding layers are used as inputs, and its own feature-maps are used as inputs into all subsequent layers. DenseNets have several compelling advantages: they alleviate the vanishing-gradient problem, strengthen feature propagation, encourage feature reuse, and substantially reduce the number of parameters. We evaluate our proposed architecture on four highly competitive object recognition benchmark tasks (CIFAR-10, CIFAR-100, SVHN, and ImageNet). DenseNets obtain significant improvements over the state-of-the-art on most of them, whilst requiring less computation to achieve high performance. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('densenet121_3rdparty_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('densenet121_3rdparty_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/densenet/densenet121_4xb256_in1k.py https://download.openmmlab.com/mmclassification/v0/densenet/densenet121_4xb256_in1k_20220426-07450f99.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :---------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :----------------------------------: | :------------------------------------------------------------------------------------: | +| `densenet121_3rdparty_in1k`\* | From scratch | 7.98 | 2.88 | 74.96 | 92.21 | [config](densenet121_4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/densenet/densenet121_4xb256_in1k_20220426-07450f99.pth) | +| `densenet169_3rdparty_in1k`\* | From scratch | 14.15 | 3.42 | 76.08 | 93.11 | [config](densenet169_4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/densenet/densenet169_4xb256_in1k_20220426-a2889902.pth) | +| `densenet201_3rdparty_in1k`\* | From scratch | 20.01 | 4.37 | 77.32 | 93.64 | [config](densenet201_4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/densenet/densenet201_4xb256_in1k_20220426-05cae4ef.pth) | +| `densenet161_3rdparty_in1k`\* | From scratch | 28.68 | 7.82 | 77.61 | 93.83 | [config](densenet161_4xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/densenet/densenet161_4xb256_in1k_20220426-ee6a80a9.pth) | + +*Models with * are converted from the [official repo](https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@misc{https://doi.org/10.48550/arxiv.1608.06993, + doi = {10.48550/ARXIV.1608.06993}, + url = {https://arxiv.org/abs/1608.06993}, + author = {Huang, Gao and Liu, Zhuang and van der Maaten, Laurens and Weinberger, Kilian Q.}, + keywords = {Computer Vision and Pattern Recognition (cs.CV), Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {Densely Connected Convolutional Networks}, + publisher = {arXiv}, + year = {2016}, + copyright = {arXiv.org perpetual, non-exclusive license} +} +``` diff --git a/configs/densenet/densenet121_4xb256_in1k.py b/configs/densenet/densenet121_4xb256_in1k.py new file mode 100644 index 0000000..dc9854f --- /dev/null +++ b/configs/densenet/densenet121_4xb256_in1k.py @@ -0,0 +1,17 @@ +_base_ = [ + '../_base_/models/densenet/densenet121.py', + '../_base_/datasets/imagenet_bs64.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +train_dataloader = dict(batch_size=256) + +# schedule settings +train_cfg = dict(by_epoch=True, max_epochs=90) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (4 GPUs) x (256 samples per GPU) +auto_scale_lr = dict(base_batch_size=1024) diff --git a/configs/densenet/densenet161_4xb256_in1k.py b/configs/densenet/densenet161_4xb256_in1k.py new file mode 100644 index 0000000..a28a278 --- /dev/null +++ b/configs/densenet/densenet161_4xb256_in1k.py @@ -0,0 +1,17 @@ +_base_ = [ + '../_base_/models/densenet/densenet161.py', + '../_base_/datasets/imagenet_bs64.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +train_dataloader = dict(batch_size=256) + +# schedule settings +train_cfg = dict(by_epoch=True, max_epochs=90) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (4 GPUs) x (256 samples per GPU) +auto_scale_lr = dict(base_batch_size=1024) diff --git a/configs/densenet/densenet169_4xb256_in1k.py b/configs/densenet/densenet169_4xb256_in1k.py new file mode 100644 index 0000000..73469da --- /dev/null +++ b/configs/densenet/densenet169_4xb256_in1k.py @@ -0,0 +1,17 @@ +_base_ = [ + '../_base_/models/densenet/densenet169.py', + '../_base_/datasets/imagenet_bs64.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +train_dataloader = dict(batch_size=256) + +# schedule settings +train_cfg = dict(by_epoch=True, max_epochs=90) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (4 GPUs) x (256 samples per GPU) +auto_scale_lr = dict(base_batch_size=1024) diff --git a/configs/densenet/densenet201_4xb256_in1k.py b/configs/densenet/densenet201_4xb256_in1k.py new file mode 100644 index 0000000..4a9b7b1 --- /dev/null +++ b/configs/densenet/densenet201_4xb256_in1k.py @@ -0,0 +1,17 @@ +_base_ = [ + '../_base_/models/densenet/densenet201.py', + '../_base_/datasets/imagenet_bs64.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +train_dataloader = dict(batch_size=256) + +# schedule settings +train_cfg = dict(by_epoch=True, max_epochs=90) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (4 GPUs) x (256 samples per GPU) +auto_scale_lr = dict(base_batch_size=1024) diff --git a/configs/densenet/metafile.yml b/configs/densenet/metafile.yml new file mode 100644 index 0000000..40575ac --- /dev/null +++ b/configs/densenet/metafile.yml @@ -0,0 +1,76 @@ +Collections: + - Name: DenseNet + Metadata: + Training Data: ImageNet-1k + Architecture: + - DenseBlock + Paper: + URL: https://arxiv.org/abs/1608.06993 + Title: Densely Connected Convolutional Networks + README: configs/densenet/README.md + +Models: + - Name: densenet121_3rdparty_in1k + Metadata: + FLOPs: 2881695488 + Parameters: 7978856 + In Collection: DenseNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 74.96 + Top 5 Accuracy: 92.21 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/densenet/densenet121_4xb256_in1k_20220426-07450f99.pth + Config: configs/densenet/densenet121_4xb256_in1k.py + Converted From: + Weights: https://download.pytorch.org/models/densenet121-a639ec97.pth + Code: https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py + - Name: densenet169_3rdparty_in1k + Metadata: + FLOPs: 3416860160 + Parameters: 14149480 + In Collection: DenseNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 76.08 + Top 5 Accuracy: 93.11 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/densenet/densenet169_4xb256_in1k_20220426-a2889902.pth + Config: configs/densenet/densenet169_4xb256_in1k.py + Converted From: + Weights: https://download.pytorch.org/models/densenet169-b2777c0a.pth + Code: https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py + - Name: densenet201_3rdparty_in1k + Metadata: + FLOPs: 4365236736 + Parameters: 20013928 + In Collection: DenseNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 77.32 + Top 5 Accuracy: 93.64 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/densenet/densenet201_4xb256_in1k_20220426-05cae4ef.pth + Config: configs/densenet/densenet201_4xb256_in1k.py + Converted From: + Weights: https://download.pytorch.org/models/densenet201-c1103571.pth + Code: https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py + - Name: densenet161_3rdparty_in1k + Metadata: + FLOPs: 7816363968 + Parameters: 28681000 + In Collection: DenseNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 77.61 + Top 5 Accuracy: 93.83 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/densenet/densenet161_4xb256_in1k_20220426-ee6a80a9.pth + Config: configs/densenet/densenet161_4xb256_in1k.py + Converted From: + Weights: https://download.pytorch.org/models/densenet161-8d451a50.pth + Code: https://github.com/pytorch/vision/blob/main/torchvision/models/densenet.py diff --git a/configs/dinov2/README.md b/configs/dinov2/README.md new file mode 100644 index 0000000..aa79d6b --- /dev/null +++ b/configs/dinov2/README.md @@ -0,0 +1,58 @@ +# DINOv2 + +> [DINOv2: Learning Robust Visual Features without Supervision](https://arxiv.org/abs/2304.07193) + + + +## Abstract + +The recent breakthroughs in natural language processing for model pretraining on large quantities of data have opened the way for similar foundation models in computer vision. These models could greatly simplify the use of images in any system by producing allpurpose visual features, i.e., features that work across image distributions and tasks without finetuning. This work shows that existing pretraining methods, especially self-supervised methods, can produce such features if trained on enough curated data from diverse sources. We revisit existing approaches and combine different techniques to scale our pretraining in terms of data and model size. Most of the technical contributions aim at accelerating and stabilizing the training at scale. In terms of data, we propose an automatic pipeline to build a dedicated, diverse, and curated image dataset instead of uncurated data, as typically done in the self-supervised literature. In terms of models, we train a ViT model (Dosovitskiy et al., 2020) with 1B parameters and distill it into a series of smaller models that surpass the best available all-purpose features, OpenCLIP (Ilharco et al., 2021) on most of the benchmarks at image and pixel levels. + +
+ +
+ +## How to use it? + + + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('vit-small-p14_dinov2-pre_3rdparty', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + + + +## Models and results + +### Pretrained models + +| Model | Params (M) | Flops (G) | Config | Download | +| :------------------------------------ | :--------: | :-------: | :--------------------------------------------: | :------------------------------------------------------------------------------------------------: | +| `vit-small-p14_dinov2-pre_3rdparty`\* | 22.06 | 46.76 | [config](vit-small-p14_dinov2-pre_headless.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/dinov2/vit-small-p14_dinov2-pre_3rdparty_20230426-5641ca5a.pth) | +| `vit-base-p14_dinov2-pre_3rdparty`\* | 86.58 | 152.00 | [config](vit-base-p14_dinov2-pre_headless.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/dinov2/vit-base-p14_dinov2-pre_3rdparty_20230426-ba246503.pth) | +| `vit-large-p14_dinov2-pre_3rdparty`\* | 304.00 | 507.00 | [config](vit-large-p14_dinov2-pre_headless.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/dinov2/vit-large-p14_dinov2-pre_3rdparty_20230426-f3302d9e.pth) | +| `vit-giant-p14_dinov2-pre_3rdparty`\* | 1136.00 | 1784.00 | [config](vit-giant-p14_dinov2-pre_headless.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/dinov2/vit-giant-p14_dinov2-pre_3rdparty_20230426-2934a630.pth) | + +*Models with * are converted from the [official repo](https://github.com/facebookresearch/dinov2). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@misc{oquab2023dinov2, + title={DINOv2: Learning Robust Visual Features without Supervision}, + author={Oquab, Maxime and Darcet, Timothée and Moutakanni, Theo and Vo, Huy V. and Szafraniec, Marc and Khalidov, Vasil and Fernandez, Pierre and Haziza, Daniel and Massa, Francisco and El-Nouby, Alaaeldin and Howes, Russell and Huang, Po-Yao and Xu, Hu and Sharma, Vasu and Li, Shang-Wen and Galuba, Wojciech and Rabbat, Mike and Assran, Mido and Ballas, Nicolas and Synnaeve, Gabriel and Misra, Ishan and Jegou, Herve and Mairal, Julien and Labatut, Patrick and Joulin, Armand and Bojanowski, Piotr}, + journal={arXiv:2304.07193}, + year={2023} +} +``` diff --git a/configs/dinov2/metafile.yml b/configs/dinov2/metafile.yml new file mode 100644 index 0000000..48f205a --- /dev/null +++ b/configs/dinov2/metafile.yml @@ -0,0 +1,73 @@ +Collections: + - Name: DINOv2 + Metadata: + Architecture: + - Dropout + - GELU + - Layer Normalization + - Multi-Head Attention + - Scaled Dot-Product Attention + Paper: + Title: 'DINOv2: Learning Robust Visual Features without Supervision' + URL: https://arxiv.org/abs/2304.07193 + README: configs/dinov2/README.md + Code: + URL: null + Version: null + +Models: + - Name: vit-small-p14_dinov2-pre_3rdparty + Metadata: + FLOPs: 46762000000 + Parameters: 22056000 + Training Data: + - LVD-142M + In Collection: DINOv2 + Results: null + Weights: https://download.openmmlab.com/mmpretrain/v1.0/dinov2/vit-small-p14_dinov2-pre_3rdparty_20230426-5641ca5a.pth + Config: configs/dinov2/vit-small-p14_dinov2-pre_headless.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/dinov2/dinov2_vits14/dinov2_vits14_pretrain.pth + Code: https://github.com/facebookresearch/dinov2 + + - Name: vit-base-p14_dinov2-pre_3rdparty + Metadata: + FLOPs: 152000000000 + Parameters: 86580000 + Training Data: + - LVD-142M + In Collection: DINOv2 + Results: null + Weights: https://download.openmmlab.com/mmpretrain/v1.0/dinov2/vit-base-p14_dinov2-pre_3rdparty_20230426-ba246503.pth + Config: configs/dinov2/vit-base-p14_dinov2-pre_headless.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/dinov2/dinov2_vitb14/dinov2_vitb14_pretrain.pth + Code: https://github.com/facebookresearch/dinov2 + + - Name: vit-large-p14_dinov2-pre_3rdparty + Metadata: + FLOPs: 507000000000 + Parameters: 304000000 + Training Data: + - LVD-142M + In Collection: DINOv2 + Results: null + Weights: https://download.openmmlab.com/mmpretrain/v1.0/dinov2/vit-large-p14_dinov2-pre_3rdparty_20230426-f3302d9e.pth + Config: configs/dinov2/vit-large-p14_dinov2-pre_headless.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_pretrain.pth + Code: https://github.com/facebookresearch/dinov2 + + - Name: vit-giant-p14_dinov2-pre_3rdparty + Metadata: + FLOPs: 1784000000000 + Parameters: 1136000000 + Training Data: + - LVD-142M + In Collection: DINOv2 + Results: null + Weights: https://download.openmmlab.com/mmpretrain/v1.0/dinov2/vit-giant-p14_dinov2-pre_3rdparty_20230426-2934a630.pth + Config: configs/dinov2/vit-giant-p14_dinov2-pre_headless.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/dinov2/dinov2_vitg14/dinov2_vitg14_pretrain.pth + Code: https://github.com/facebookresearch/dinov2 diff --git a/configs/dinov2/vit-base-p14_dinov2-pre_headless.py b/configs/dinov2/vit-base-p14_dinov2-pre_headless.py new file mode 100644 index 0000000..524dfe3 --- /dev/null +++ b/configs/dinov2/vit-base-p14_dinov2-pre_headless.py @@ -0,0 +1,20 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='base', + img_size=518, + patch_size=14, + layer_scale_init_value=1e-5, + ), + neck=None, + head=None) + +data_preprocessor = dict( + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) diff --git a/configs/dinov2/vit-giant-p14_dinov2-pre_headless.py b/configs/dinov2/vit-giant-p14_dinov2-pre_headless.py new file mode 100644 index 0000000..a127359 --- /dev/null +++ b/configs/dinov2/vit-giant-p14_dinov2-pre_headless.py @@ -0,0 +1,21 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='dinov2-giant', + img_size=518, + patch_size=14, + layer_scale_init_value=1e-5, + layer_cfgs=dict(ffn_type='swiglu_fused'), + ), + neck=None, + head=None) + +data_preprocessor = dict( + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) diff --git a/configs/dinov2/vit-large-p14_dinov2-pre_headless.py b/configs/dinov2/vit-large-p14_dinov2-pre_headless.py new file mode 100644 index 0000000..4ec7bc6 --- /dev/null +++ b/configs/dinov2/vit-large-p14_dinov2-pre_headless.py @@ -0,0 +1,20 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='large', + img_size=518, + patch_size=14, + layer_scale_init_value=1e-5, + ), + neck=None, + head=None) + +data_preprocessor = dict( + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) diff --git a/configs/dinov2/vit-small-p14_dinov2-pre_headless.py b/configs/dinov2/vit-small-p14_dinov2-pre_headless.py new file mode 100644 index 0000000..198c5e5 --- /dev/null +++ b/configs/dinov2/vit-small-p14_dinov2-pre_headless.py @@ -0,0 +1,20 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='dinov2-small', + img_size=518, + patch_size=14, + layer_scale_init_value=1e-5, + ), + neck=None, + head=None) + +data_preprocessor = dict( + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) diff --git a/configs/edgenext/README.md b/configs/edgenext/README.md new file mode 100644 index 0000000..1c9686f --- /dev/null +++ b/configs/edgenext/README.md @@ -0,0 +1,80 @@ +# EdgeNeXt + +> [EdgeNeXt: Efficiently Amalgamated CNN-Transformer Architecture for Mobile Vision Applications](https://arxiv.org/abs/2206.10589) + + + +## Abstract + +In the pursuit of achieving ever-increasing accuracy, large and complex neural networks are usually developed. Such models demand high computational resources and therefore cannot be deployed on edge devices. It is of great interest to build resource-efficient general purpose networks due to their usefulness in several application areas. In this work, we strive to effectively combine the strengths of both CNN and Transformer models and propose a new efficient hybrid architecture EdgeNeXt. Specifically in EdgeNeXt, we introduce split depth-wise transpose attention (SDTA) encoder that splits input tensors into multiple channel groups and utilizes depth-wise convolution along with self-attention across channel dimensions to implicitly increase the receptive field and encode multi-scale features. Our extensive experiments on classification, detection and segmentation tasks, reveal the merits of the proposed approach, outperforming state-of-the-art methods with comparatively lower compute requirements. Our EdgeNeXt model with 1.3M parameters achieves 71.2% top-1 accuracy on ImageNet-1K, outperforming MobileViT with an absolute gain of 2.2% with 28% reduction in FLOPs. Further, our EdgeNeXt model with 5.6M parameters achieves 79.4% top-1 accuracy on ImageNet-1K. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('edgenext-xxsmall_3rdparty_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('edgenext-xxsmall_3rdparty_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/edgenext/edgenext-xxsmall_8xb256_in1k.py https://download.openmmlab.com/mmclassification/v0/edgenext/edgenext-xxsmall_3rdparty_in1k_20220801-7ca8a81d.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :----------------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :-----------------------------------------: | :----------------------------------------------------------------------: | +| `edgenext-xxsmall_3rdparty_in1k`\* | From scratch | 1.33 | 0.26 | 71.20 | 89.91 | [config](edgenext-xxsmall_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/edgenext/edgenext-xxsmall_3rdparty_in1k_20220801-7ca8a81d.pth) | +| `edgenext-xsmall_3rdparty_in1k`\* | From scratch | 2.34 | 0.53 | 74.86 | 92.31 | [config](edgenext-xsmall_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/edgenext/edgenext-xsmall_3rdparty_in1k_20220801-974f9fe7.pth) | +| `edgenext-small_3rdparty_in1k`\* | From scratch | 5.59 | 1.25 | 79.41 | 94.53 | [config](edgenext-small_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/edgenext/edgenext-small_3rdparty_in1k_20220801-d00db5f8.pth) | +| `edgenext-small-usi_3rdparty_in1k`\* | From scratch | 5.59 | 1.25 | 81.06 | 95.34 | [config](edgenext-small_8xb256-usi_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/edgenext/edgenext-small_3rdparty-usi_in1k_20220801-ae6d8dd3.pth) | +| `edgenext-base_3rdparty_in1k`\* | From scratch | 18.51 | 3.81 | 82.48 | 96.20 | [config](edgenext-base_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/edgenext/edgenext-base_3rdparty_in1k_20220801-9ade408b.pth) | +| `edgenext-base_3rdparty-usi_in1k`\* | From scratch | 18.51 | 3.81 | 83.67 | 96.70 | [config](edgenext-base_8xb256-usi_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/edgenext/edgenext-base_3rdparty-usi_in1k_20220801-909e8939.pth) | + +*Models with * are converted from the [official repo](https://github.com/mmaaz60/EdgeNeXt). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@article{Maaz2022EdgeNeXt, + title={EdgeNeXt: Efficiently Amalgamated CNN-Transformer Architecture for Mobile Vision Applications}, + author={Muhammad Maaz and Abdelrahman Shaker and Hisham Cholakkal and Salman Khan and Syed Waqas Zamir and Rao Muhammad Anwer and Fahad Shahbaz Khan}, + journal={2206.10589}, + year={2022} +} +``` diff --git a/configs/edgenext/edgenext-base_8xb256-usi_in1k.py b/configs/edgenext/edgenext-base_8xb256-usi_in1k.py new file mode 100644 index 0000000..13949de --- /dev/null +++ b/configs/edgenext/edgenext-base_8xb256-usi_in1k.py @@ -0,0 +1,19 @@ +_base_ = ['./edgenext-base_8xb256_in1k.py'] + +# dataset setting + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=269, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=256), + dict(type='PackInputs') +] + +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +test_dataloader = val_dataloader diff --git a/configs/edgenext/edgenext-base_8xb256_in1k.py b/configs/edgenext/edgenext-base_8xb256_in1k.py new file mode 100644 index 0000000..5d0a75c --- /dev/null +++ b/configs/edgenext/edgenext-base_8xb256_in1k.py @@ -0,0 +1,20 @@ +_base_ = [ + '../_base_/models/edgenext/edgenext-base.py', + '../_base_/datasets/imagenet_bs64_edgenext_256.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=6e-3), + clip_grad=dict(max_norm=5.0), +) + +# runtime setting +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/edgenext/edgenext-small_8xb256-usi_in1k.py b/configs/edgenext/edgenext-small_8xb256-usi_in1k.py new file mode 100644 index 0000000..d6bc904 --- /dev/null +++ b/configs/edgenext/edgenext-small_8xb256-usi_in1k.py @@ -0,0 +1,19 @@ +_base_ = ['./edgenext-small_8xb256_in1k.py'] + +# dataset setting + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=269, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=256), + dict(type='PackInputs') +] + +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +test_dataloader = val_dataloader diff --git a/configs/edgenext/edgenext-small_8xb256_in1k.py b/configs/edgenext/edgenext-small_8xb256_in1k.py new file mode 100644 index 0000000..f1d99bd --- /dev/null +++ b/configs/edgenext/edgenext-small_8xb256_in1k.py @@ -0,0 +1,20 @@ +_base_ = [ + '../_base_/models/edgenext/edgenext-small.py', + '../_base_/datasets/imagenet_bs64_edgenext_256.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=6e-3), + clip_grad=dict(max_norm=5.0), +) + +# runtime setting +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/edgenext/edgenext-xsmall_8xb256_in1k.py b/configs/edgenext/edgenext-xsmall_8xb256_in1k.py new file mode 100644 index 0000000..9d2326f --- /dev/null +++ b/configs/edgenext/edgenext-xsmall_8xb256_in1k.py @@ -0,0 +1,20 @@ +_base_ = [ + '../_base_/models/edgenext/edgenext-xsmall.py', + '../_base_/datasets/imagenet_bs64_edgenext_256.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=6e-3), + clip_grad=dict(max_norm=5.0), +) + +# runtime setting +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/edgenext/edgenext-xxsmall_8xb256_in1k.py b/configs/edgenext/edgenext-xxsmall_8xb256_in1k.py new file mode 100644 index 0000000..507c3cb --- /dev/null +++ b/configs/edgenext/edgenext-xxsmall_8xb256_in1k.py @@ -0,0 +1,20 @@ +_base_ = [ + '../_base_/models/edgenext/edgenext-xxsmall.py', + '../_base_/datasets/imagenet_bs64_edgenext_256.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=6e-3), + clip_grad=dict(max_norm=5.0), +) + +# runtime setting +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/edgenext/metafile.yml b/configs/edgenext/metafile.yml new file mode 100644 index 0000000..e69ac17 --- /dev/null +++ b/configs/edgenext/metafile.yml @@ -0,0 +1,118 @@ +Collections: + - Name: EdgeNeXt + Metadata: + Training Data: ImageNet-1k + Architecture: + - SDTA + - 1x1 Convolution + - Channel Self-attention + Paper: + URL: https://arxiv.org/abs/2206.10589 + Title: 'EdgeNeXt: Efficiently Amalgamated CNN-Transformer Architecture for Mobile Vision Applications' + README: configs/edgenext/README.md + Code: + Version: v1.0.0rc1 + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.23.2/mmcls/models/backbones/edgenext.py + +Models: + - Name: edgenext-xxsmall_3rdparty_in1k + Metadata: + FLOPs: 255640144 + Parameters: 1327216 + In Collection: EdgeNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 71.20 + Top 5 Accuracy: 89.91 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/edgenext/edgenext-xxsmall_3rdparty_in1k_20220801-7ca8a81d.pth + Config: configs/edgenext/edgenext-xxsmall_8xb256_in1k.py + Converted From: + Weights: https://github.com/mmaaz60/EdgeNeXt/releases/download/v1.0/edgenext_xxsmall.pth + Code: https://github.com/mmaaz60/EdgeNeXt + - Name: edgenext-xsmall_3rdparty_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 529970560 + Parameters: 2336804 + In Collection: EdgeNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 74.86 + Top 5 Accuracy: 92.31 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/edgenext/edgenext-xsmall_3rdparty_in1k_20220801-974f9fe7.pth + Config: configs/edgenext/edgenext-xsmall_8xb256_in1k.py + Converted From: + Weights: https://github.com/mmaaz60/EdgeNeXt/releases/download/v1.0/edgenext_xsmall.pth + Code: https://github.com/mmaaz60/EdgeNeXt + - Name: edgenext-small_3rdparty_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 1249788000 + Parameters: 5586832 + In Collection: EdgeNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.41 + Top 5 Accuracy: 94.53 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/edgenext/edgenext-small_3rdparty_in1k_20220801-d00db5f8.pth + Config: configs/edgenext/edgenext-small_8xb256_in1k.py + Converted From: + Weights: https://github.com/mmaaz60/EdgeNeXt/releases/download/v1.0/edgenext_small.pth + Code: https://github.com/mmaaz60/EdgeNeXt + - Name: edgenext-small-usi_3rdparty_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 1249788000 + Parameters: 5586832 + In Collection: EdgeNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.06 + Top 5 Accuracy: 95.34 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/edgenext/edgenext-small_3rdparty-usi_in1k_20220801-ae6d8dd3.pth + Config: configs/edgenext/edgenext-small_8xb256-usi_in1k.py + Converted From: + Weights: https://github.com/mmaaz60/EdgeNeXt/releases/download/v1.1/edgenext_small_usi.pth + Code: https://github.com/mmaaz60/EdgeNeXt + - Name: edgenext-base_3rdparty_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 3814395280 + Parameters: 18511292 + In Collection: EdgeNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.48 + Top 5 Accuracy: 96.2 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/edgenext/edgenext-base_3rdparty_in1k_20220801-9ade408b.pth + Config: configs/edgenext/edgenext-base_8xb256_in1k.py + Converted From: + Weights: https://github.com/mmaaz60/EdgeNeXt/releases/download/v1.2/edgenext_base.pth + Code: https://github.com/mmaaz60/EdgeNeXt + - Name: edgenext-base_3rdparty-usi_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 3814395280 + Parameters: 18511292 + In Collection: EdgeNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.67 + Top 5 Accuracy: 96.7 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/edgenext/edgenext-base_3rdparty-usi_in1k_20220801-909e8939.pth + Config: configs/edgenext/edgenext-base_8xb256-usi_in1k.py + Converted From: + Weights: https://github.com/mmaaz60/EdgeNeXt/releases/download/v1.2/edgenext_base_usi.pth + Code: https://github.com/mmaaz60/EdgeNeXt diff --git a/configs/efficientformer/README.md b/configs/efficientformer/README.md new file mode 100644 index 0000000..537777e --- /dev/null +++ b/configs/efficientformer/README.md @@ -0,0 +1,88 @@ +# EfficientFormer + +> [EfficientFormer: Vision Transformers at MobileNet Speed](https://arxiv.org/abs/2206.01191) + + + +## Abstract + +Vision Transformers (ViT) have shown rapid progress in computer vision tasks, achieving promising results on various benchmarks. However, due to the massive number of parameters and model design, e.g., attention mechanism, ViT-based models are generally times slower than lightweight convolutional networks. Therefore, the deployment of ViT for real-time applications is particularly challenging, especially on resource-constrained hardware such as mobile devices. Recent efforts try to reduce the computation complexity of ViT through network architecture search or hybrid design with MobileNet block, yet the inference speed is still unsatisfactory. This leads to an important question: can transformers run as fast as MobileNet while obtaining high performance? To answer this, we first revisit the network architecture and operators used in ViT-based models and identify inefficient designs. Then we introduce a dimension-consistent pure transformer (without MobileNet blocks) as a design paradigm. Finally, we perform latency-driven slimming to get a series of final models dubbed EfficientFormer. Extensive experiments show the superiority of EfficientFormer in performance and speed on mobile devices. Our fastest model, EfficientFormer-L1, achieves 79.2% top-1 accuracy on ImageNet-1K with only 1.6 ms inference latency on iPhone 12 (compiled with CoreML), which runs as fast as MobileNetV2×1.4 (1.6 ms, 74.7% top-1), and our largest model, EfficientFormer-L7, obtains 83.3% accuracy with only 7.0 ms latency. Our work proves that properly designed transformers can reach extremely low latency on mobile devices while maintaining high performance. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('efficientformer-l1_3rdparty_8xb128_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('efficientformer-l1_3rdparty_8xb128_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/efficientformer/efficientformer-l1_8xb128_in1k.py https://download.openmmlab.com/mmclassification/v0/efficientformer/efficientformer-l1_3rdparty_in1k_20220915-cc3e1ac6.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :------------------------------------------ | :----------: | :--------: | :-------: | :-------: | :-------: | :-----------------------------------------: | :---------------------------------------------------------------: | +| `efficientformer-l1_3rdparty_8xb128_in1k`\* | From scratch | 12.28 | 1.30 | 80.46 | 94.99 | [config](efficientformer-l1_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientformer/efficientformer-l1_3rdparty_in1k_20220915-cc3e1ac6.pth) | +| `efficientformer-l3_3rdparty_8xb128_in1k`\* | From scratch | 31.41 | 3.74 | 82.45 | 96.18 | [config](efficientformer-l3_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientformer/efficientformer-l3_3rdparty_in1k_20220915-466793d6.pth) | +| `efficientformer-l7_3rdparty_8xb128_in1k`\* | From scratch | 82.23 | 10.16 | 83.40 | 96.60 | [config](efficientformer-l7_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientformer/efficientformer-l7_3rdparty_in1k_20220915-185e30af.pth) | + +*Models with * are converted from the [official repo](https://github.com/snap-research/EfficientFormer). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@misc{https://doi.org/10.48550/arxiv.2206.01191, + doi = {10.48550/ARXIV.2206.01191}, + + url = {https://arxiv.org/abs/2206.01191}, + + author = {Li, Yanyu and Yuan, Geng and Wen, Yang and Hu, Eric and Evangelidis, Georgios and Tulyakov, Sergey and Wang, Yanzhi and Ren, Jian}, + + keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, + + title = {EfficientFormer: Vision Transformers at MobileNet Speed}, + + publisher = {arXiv}, + + year = {2022}, + + copyright = {Creative Commons Attribution 4.0 International} +} +``` diff --git a/configs/efficientformer/efficientformer-l1_8xb128_in1k.py b/configs/efficientformer/efficientformer-l1_8xb128_in1k.py new file mode 100644 index 0000000..7f55dc6 --- /dev/null +++ b/configs/efficientformer/efficientformer-l1_8xb128_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/efficientformer-l1.py', + '../_base_/datasets/imagenet_bs128_poolformer_small_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] diff --git a/configs/efficientformer/efficientformer-l3_8xb128_in1k.py b/configs/efficientformer/efficientformer-l3_8xb128_in1k.py new file mode 100644 index 0000000..d8be5ef --- /dev/null +++ b/configs/efficientformer/efficientformer-l3_8xb128_in1k.py @@ -0,0 +1,3 @@ +_base_ = './efficientformer-l1_8xb128_in1k.py' + +model = dict(backbone=dict(arch='l3'), head=dict(in_channels=512)) diff --git a/configs/efficientformer/efficientformer-l7_8xb128_in1k.py b/configs/efficientformer/efficientformer-l7_8xb128_in1k.py new file mode 100644 index 0000000..c225265 --- /dev/null +++ b/configs/efficientformer/efficientformer-l7_8xb128_in1k.py @@ -0,0 +1,3 @@ +_base_ = './efficientformer-l1_8xb128_in1k.py' + +model = dict(backbone=dict(arch='l7'), head=dict(in_channels=768)) diff --git a/configs/efficientformer/metafile.yml b/configs/efficientformer/metafile.yml new file mode 100644 index 0000000..5c70f07 --- /dev/null +++ b/configs/efficientformer/metafile.yml @@ -0,0 +1,67 @@ +Collections: + - Name: EfficientFormer + Metadata: + Training Data: ImageNet-1k + Architecture: + - Pooling + - 1x1 Convolution + - LayerScale + - MetaFormer + Paper: + URL: https://arxiv.org/abs/2206.01191 + Title: "EfficientFormer: Vision Transformers at MobileNet Speed" + README: configs/efficientformer/README.md + Code: + Version: v1.0.0rc1 + URL: https://github.com/open-mmlab/mmpretrain/blob/v1.0.0rc1/configs/efficientformer/metafile.yml + +Models: + - Name: efficientformer-l1_3rdparty_8xb128_in1k + Metadata: + FLOPs: 1304601088 # 1.3G + Parameters: 12278696 # 12M + In Collection: EfficientFormer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 80.46 + Top 5 Accuracy: 94.99 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientformer/efficientformer-l1_3rdparty_in1k_20220915-cc3e1ac6.pth + Config: configs/efficientformer/efficientformer-l1_8xb128_in1k.py + Converted From: + Weights: https://drive.google.com/file/d/11SbX-3cfqTOc247xKYubrAjBiUmr818y/view?usp=sharing + Code: https://github.com/snap-research/EfficientFormer + - Name: efficientformer-l3_3rdparty_8xb128_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 3737045760 # 3.7G + Parameters: 31406000 # 31M + In Collection: EfficientFormer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.45 + Top 5 Accuracy: 96.18 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientformer/efficientformer-l3_3rdparty_in1k_20220915-466793d6.pth + Config: configs/efficientformer/efficientformer-l3_8xb128_in1k.py + Converted From: + Weights: https://drive.google.com/file/d/1OyyjKKxDyMj-BcfInp4GlDdwLu3hc30m/view?usp=sharing + Code: https://github.com/snap-research/EfficientFormer + - Name: efficientformer-l7_3rdparty_8xb128_in1k + Metadata: + FLOPs: 10163951616 # 10.2G + Parameters: 82229328 # 82M + In Collection: EfficientFormer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.40 + Top 5 Accuracy: 96.60 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientformer/efficientformer-l7_3rdparty_in1k_20220915-185e30af.pth + Config: configs/efficientformer/efficientformer-l7_8xb128_in1k.py + Converted From: + Weights: https://drive.google.com/file/d/1cVw-pctJwgvGafeouynqWWCwgkcoFMM5/view?usp=sharing + Code: https://github.com/snap-research/EfficientFormer diff --git a/configs/efficientnet/README.md b/configs/efficientnet/README.md new file mode 100644 index 0000000..c7b7b76 --- /dev/null +++ b/configs/efficientnet/README.md @@ -0,0 +1,122 @@ +# EfficientNet + +> [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946v5) + + + +## Introduction + +EfficientNets are a family of image classification models, which achieve state-of-the-art accuracy, yet being an order-of-magnitude smaller and faster than previous models. + +EfficientNets are based on AutoML and Compound Scaling. In particular, we first use [AutoML MNAS Mobile framework](https://ai.googleblog.com/2018/08/mnasnet-towards-automating-design-of.html) to develop a mobile-size baseline network, named as EfficientNet-B0; Then, we use the compound scaling method to scale up this baseline to obtain EfficientNet-B1 to B7. + +
+ +
+ +## Abstract + +
+ +Click to show the detailed Abstract + +
+Convolutional Neural Networks (ConvNets) are commonly developed at a fixed resource budget, and then scaled up for better accuracy if more resources are available. In this paper, we systematically study model scaling and identify that carefully balancing network depth, width, and resolution can lead to better performance. Based on this observation, we propose a new scaling method that uniformly scales all dimensions of depth/width/resolution using a simple yet highly effective compound coefficient. We demonstrate the effectiveness of this method on scaling up MobileNets and ResNet. To go even further, we use neural architecture search to design a new baseline network and scale it up to obtain a family of models, called EfficientNets, which achieve much better accuracy and efficiency than previous ConvNets. In particular, our EfficientNet-B7 achieves state-of-the-art 84.3% top-1 accuracy on ImageNet, while being 8.4x smaller and 6.1x faster on inference than the best existing ConvNet. Our EfficientNets also transfer well and achieve state-of-the-art accuracy on CIFAR-100 (91.7%), Flowers (98.8%), and 3 other transfer learning datasets, with an order of magnitude fewer parameters. + +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('efficientnet-b0_3rdparty_8xb32_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('efficientnet-b0_3rdparty_8xb32_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/efficientnet/efficientnet-b0_8xb32_in1k.py https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty_8xb32_in1k_20220119-a7e2a0b1.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :-------------------------------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :--------------------------------------------: | :----------------------------------------------------: | +| `efficientnet-b0_3rdparty_8xb32_in1k`\* | From scratch | 5.29 | 0.42 | 76.74 | 93.17 | [config](efficientnet-b0_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty_8xb32_in1k_20220119-a7e2a0b1.pth) | +| `efficientnet-b0_3rdparty_8xb32-aa_in1k`\* | From scratch | 5.29 | 0.42 | 77.26 | 93.41 | [config](efficientnet-b0_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty_8xb32-aa_in1k_20220119-8d939117.pth) | +| `efficientnet-b0_3rdparty_8xb32-aa-advprop_in1k`\* | From scratch | 5.29 | 0.42 | 77.53 | 93.61 | [config](efficientnet-b0_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty_8xb32-aa-advprop_in1k_20220119-26434485.pth) | +| `efficientnet-b0_3rdparty-ra-noisystudent_in1k`\* | From scratch | 5.29 | 0.42 | 77.63 | 94.00 | [config](efficientnet-b0_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty-ra-noisystudent_in1k_20221103-75cd08d3.pth) | +| `efficientnet-b1_3rdparty_8xb32_in1k`\* | From scratch | 7.79 | 0.74 | 78.68 | 94.28 | [config](efficientnet-b1_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b1_3rdparty_8xb32_in1k_20220119-002556d9.pth) | +| `efficientnet-b1_3rdparty_8xb32-aa_in1k`\* | From scratch | 7.79 | 0.74 | 79.20 | 94.42 | [config](efficientnet-b1_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b1_3rdparty_8xb32-aa_in1k_20220119-619d8ae3.pth) | +| `efficientnet-b1_3rdparty_8xb32-aa-advprop_in1k`\* | From scratch | 7.79 | 0.74 | 79.52 | 94.43 | [config](efficientnet-b1_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b1_3rdparty_8xb32-aa-advprop_in1k_20220119-5715267d.pth) | +| `efficientnet-b1_3rdparty-ra-noisystudent_in1k`\* | From scratch | 7.79 | 0.74 | 81.44 | 95.83 | [config](efficientnet-b1_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b1_3rdparty-ra-noisystudent_in1k_20221103-756bcbc0.pth) | +| `efficientnet-b2_3rdparty_8xb32_in1k`\* | From scratch | 9.11 | 1.07 | 79.64 | 94.80 | [config](efficientnet-b2_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b2_3rdparty_8xb32_in1k_20220119-ea374a30.pth) | +| `efficientnet-b2_3rdparty_8xb32-aa_in1k`\* | From scratch | 9.11 | 1.07 | 80.21 | 94.96 | [config](efficientnet-b2_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b2_3rdparty_8xb32-aa_in1k_20220119-dd61e80b.pth) | +| `efficientnet-b2_3rdparty_8xb32-aa-advprop_in1k`\* | From scratch | 9.11 | 1.07 | 80.45 | 95.07 | [config](efficientnet-b2_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b2_3rdparty_8xb32-aa-advprop_in1k_20220119-1655338a.pth) | +| `efficientnet-b2_3rdparty-ra-noisystudent_in1k`\* | From scratch | 9.11 | 1.07 | 82.47 | 96.23 | [config](efficientnet-b2_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b2_3rdparty-ra-noisystudent_in1k_20221103-301ed299.pth) | +| `efficientnet-b3_3rdparty_8xb32_in1k`\* | From scratch | 12.23 | 1.95 | 81.01 | 95.34 | [config](efficientnet-b3_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32_in1k_20220119-4b4d7487.pth) | +| `efficientnet-b3_3rdparty_8xb32-aa_in1k`\* | From scratch | 12.23 | 1.95 | 81.58 | 95.67 | [config](efficientnet-b3_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa_in1k_20220119-5b4887a0.pth) | +| `efficientnet-b3_3rdparty_8xb32-aa-advprop_in1k`\* | From scratch | 12.23 | 1.95 | 81.81 | 95.69 | [config](efficientnet-b3_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa-advprop_in1k_20220119-53b41118.pth) | +| `efficientnet-b3_3rdparty-ra-noisystudent_in1k`\* | From scratch | 12.23 | 1.95 | 84.02 | 96.89 | [config](efficientnet-b3_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty-ra-noisystudent_in1k_20221103-a4ab5fd6.pth) | +| `efficientnet-b4_3rdparty_8xb32_in1k`\* | From scratch | 19.34 | 4.66 | 82.57 | 96.09 | [config](efficientnet-b4_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b4_3rdparty_8xb32_in1k_20220119-81fd4077.pth) | +| `efficientnet-b4_3rdparty_8xb32-aa_in1k`\* | From scratch | 19.34 | 4.66 | 82.95 | 96.26 | [config](efficientnet-b4_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b4_3rdparty_8xb32-aa_in1k_20220119-45b8bd2b.pth) | +| `efficientnet-b4_3rdparty_8xb32-aa-advprop_in1k`\* | From scratch | 19.34 | 4.66 | 83.25 | 96.44 | [config](efficientnet-b4_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b4_3rdparty_8xb32-aa-advprop_in1k_20220119-38c2238c.pth) | +| `efficientnet-b4_3rdparty-ra-noisystudent_in1k`\* | From scratch | 19.34 | 4.66 | 85.25 | 97.52 | [config](efficientnet-b4_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b4_3rdparty-ra-noisystudent_in1k_20221103-16ba8a2d.pth) | +| `efficientnet-b5_3rdparty_8xb32_in1k`\* | From scratch | 30.39 | 10.80 | 83.18 | 96.47 | [config](efficientnet-b5_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b5_3rdparty_8xb32_in1k_20220119-e9814430.pth) | +| `efficientnet-b5_3rdparty_8xb32-aa_in1k`\* | From scratch | 30.39 | 10.80 | 83.82 | 96.76 | [config](efficientnet-b5_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b5_3rdparty_8xb32-aa_in1k_20220119-2cab8b78.pth) | +| `efficientnet-b5_3rdparty_8xb32-aa-advprop_in1k`\* | From scratch | 30.39 | 10.80 | 84.21 | 96.98 | [config](efficientnet-b5_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b5_3rdparty_8xb32-aa-advprop_in1k_20220119-f57a895a.pth) | +| `efficientnet-b5_3rdparty-ra-noisystudent_in1k`\* | From scratch | 30.39 | 10.80 | 86.08 | 97.75 | [config](efficientnet-b5_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b5_3rdparty-ra-noisystudent_in1k_20221103-111a185f.pth) | +| `efficientnet-b6_3rdparty_8xb32-aa_in1k`\* | From scratch | 43.04 | 19.97 | 84.05 | 96.82 | [config](efficientnet-b6_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b6_3rdparty_8xb32-aa_in1k_20220119-45b03310.pth) | +| `efficientnet-b6_3rdparty_8xb32-aa-advprop_in1k`\* | From scratch | 43.04 | 19.97 | 84.74 | 97.14 | [config](efficientnet-b6_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b6_3rdparty_8xb32-aa-advprop_in1k_20220119-bfe3485e.pth) | +| `efficientnet-b6_3rdparty-ra-noisystudent_in1k`\* | From scratch | 43.04 | 19.97 | 86.47 | 97.87 | [config](efficientnet-b6_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b6_3rdparty-ra-noisystudent_in1k_20221103-7de7d2cc.pth) | +| `efficientnet-b7_3rdparty_8xb32-aa_in1k`\* | From scratch | 66.35 | 39.32 | 84.38 | 96.88 | [config](efficientnet-b7_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b7_3rdparty_8xb32-aa_in1k_20220119-bf03951c.pth) | +| `efficientnet-b7_3rdparty_8xb32-aa-advprop_in1k`\* | From scratch | 66.35 | 39.32 | 85.14 | 97.23 | [config](efficientnet-b7_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b7_3rdparty_8xb32-aa-advprop_in1k_20220119-c6dbff10.pth) | +| `efficientnet-b7_3rdparty-ra-noisystudent_in1k`\* | From scratch | 66.35 | 39.32 | 86.83 | 98.08 | [config](efficientnet-b7_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b7_3rdparty-ra-noisystudent_in1k_20221103-a82894bc.pth) | +| `efficientnet-b8_3rdparty_8xb32-aa-advprop_in1k`\* | From scratch | 87.41 | 65.00 | 85.38 | 97.28 | [config](efficientnet-b8_8xb32-01norm_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b8_3rdparty_8xb32-aa-advprop_in1k_20220119-297ce1b7.pth) | +| `efficientnet-l2_3rdparty-ra-noisystudent_in1k-800px`\* | From scratch | 480.31 | 174.20 | 88.33 | 98.65 | [config](efficientnet-l2_8xb8_in1k-800px.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-l2_3rdparty-ra-noisystudent_in1k_20221103-be73be13.pth) | +| `efficientnet-l2_3rdparty-ra-noisystudent_in1k-475px`\* | From scratch | 480.31 | 484.98 | 88.18 | 98.55 | [config](efficientnet-l2_8xb32_in1k-475px.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-l2_3rdparty-ra-noisystudent_in1k-475px_20221103-5a0d8058.pth) | + +*Models with * are converted from the [official repo](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@inproceedings{tan2019efficientnet, + title={Efficientnet: Rethinking model scaling for convolutional neural networks}, + author={Tan, Mingxing and Le, Quoc}, + booktitle={International Conference on Machine Learning}, + pages={6105--6114}, + year={2019}, + organization={PMLR} +} +``` diff --git a/configs/efficientnet/efficientnet-b0_8xb32-01norm_in1k.py b/configs/efficientnet/efficientnet-b0_8xb32-01norm_in1k.py new file mode 100644 index 0000000..369d0a4 --- /dev/null +++ b/configs/efficientnet/efficientnet-b0_8xb32-01norm_in1k.py @@ -0,0 +1,31 @@ +_base_ = [ + '../_base_/models/efficientnet_b0.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +data_preprocessor = dict( + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetRandomCrop', scale=224), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b0_8xb32_in1k.py b/configs/efficientnet/efficientnet-b0_8xb32_in1k.py new file mode 100644 index 0000000..e4263da --- /dev/null +++ b/configs/efficientnet/efficientnet-b0_8xb32_in1k.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/models/efficientnet_b0.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetRandomCrop', scale=224), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b1_8xb32-01norm_in1k.py b/configs/efficientnet/efficientnet-b1_8xb32-01norm_in1k.py new file mode 100644 index 0000000..0405cf5 --- /dev/null +++ b/configs/efficientnet/efficientnet-b1_8xb32-01norm_in1k.py @@ -0,0 +1,31 @@ +_base_ = [ + '../_base_/models/efficientnet_b1.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +data_preprocessor = dict( + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetRandomCrop', scale=240), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=240), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b1_8xb32_in1k.py b/configs/efficientnet/efficientnet-b1_8xb32_in1k.py new file mode 100644 index 0000000..e5bf2e8 --- /dev/null +++ b/configs/efficientnet/efficientnet-b1_8xb32_in1k.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/models/efficientnet_b1.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetRandomCrop', scale=240), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=240), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b2_8xb32-01norm_in1k.py b/configs/efficientnet/efficientnet-b2_8xb32-01norm_in1k.py new file mode 100644 index 0000000..da3f23b --- /dev/null +++ b/configs/efficientnet/efficientnet-b2_8xb32-01norm_in1k.py @@ -0,0 +1,31 @@ +_base_ = [ + '../_base_/models/efficientnet_b2.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +data_preprocessor = dict( + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetRandomCrop', scale=260), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=260), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b2_8xb32_in1k.py b/configs/efficientnet/efficientnet-b2_8xb32_in1k.py new file mode 100644 index 0000000..060a2ad --- /dev/null +++ b/configs/efficientnet/efficientnet-b2_8xb32_in1k.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/models/efficientnet_b2.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetRandomCrop', scale=260), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=260), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b3_8xb32-01norm_in1k.py b/configs/efficientnet/efficientnet-b3_8xb32-01norm_in1k.py new file mode 100644 index 0000000..55729a9 --- /dev/null +++ b/configs/efficientnet/efficientnet-b3_8xb32-01norm_in1k.py @@ -0,0 +1,31 @@ +_base_ = [ + '../_base_/models/efficientnet_b3.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +data_preprocessor = dict( + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetRandomCrop', scale=300), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=300), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b3_8xb32_in1k.py b/configs/efficientnet/efficientnet-b3_8xb32_in1k.py new file mode 100644 index 0000000..d84de5a --- /dev/null +++ b/configs/efficientnet/efficientnet-b3_8xb32_in1k.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/models/efficientnet_b3.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetRandomCrop', scale=300), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=300), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b4_8xb32-01norm_in1k.py b/configs/efficientnet/efficientnet-b4_8xb32-01norm_in1k.py new file mode 100644 index 0000000..a4dbfb2 --- /dev/null +++ b/configs/efficientnet/efficientnet-b4_8xb32-01norm_in1k.py @@ -0,0 +1,31 @@ +_base_ = [ + '../_base_/models/efficientnet_b4.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +data_preprocessor = dict( + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetRandomCrop', scale=380), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=380), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b4_8xb32_in1k.py b/configs/efficientnet/efficientnet-b4_8xb32_in1k.py new file mode 100644 index 0000000..08e246c --- /dev/null +++ b/configs/efficientnet/efficientnet-b4_8xb32_in1k.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/models/efficientnet_b4.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetRandomCrop', scale=380), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=380), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b5_8xb32-01norm_in1k.py b/configs/efficientnet/efficientnet-b5_8xb32-01norm_in1k.py new file mode 100644 index 0000000..0c646da --- /dev/null +++ b/configs/efficientnet/efficientnet-b5_8xb32-01norm_in1k.py @@ -0,0 +1,31 @@ +_base_ = [ + '../_base_/models/efficientnet_b5.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +data_preprocessor = dict( + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetRandomCrop', scale=456), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=456), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b5_8xb32_in1k.py b/configs/efficientnet/efficientnet-b5_8xb32_in1k.py new file mode 100644 index 0000000..af4fa4b --- /dev/null +++ b/configs/efficientnet/efficientnet-b5_8xb32_in1k.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/models/efficientnet_b5.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetRandomCrop', scale=456), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=456), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b6_8xb32-01norm_in1k.py b/configs/efficientnet/efficientnet-b6_8xb32-01norm_in1k.py new file mode 100644 index 0000000..dd15054 --- /dev/null +++ b/configs/efficientnet/efficientnet-b6_8xb32-01norm_in1k.py @@ -0,0 +1,31 @@ +_base_ = [ + '../_base_/models/efficientnet_b6.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +data_preprocessor = dict( + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetRandomCrop', scale=528), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=528), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b6_8xb32_in1k.py b/configs/efficientnet/efficientnet-b6_8xb32_in1k.py new file mode 100644 index 0000000..fae02ae --- /dev/null +++ b/configs/efficientnet/efficientnet-b6_8xb32_in1k.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/models/efficientnet_b6.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetRandomCrop', scale=528), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=528), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b7_8xb32-01norm_in1k.py b/configs/efficientnet/efficientnet-b7_8xb32-01norm_in1k.py new file mode 100644 index 0000000..687dfd2 --- /dev/null +++ b/configs/efficientnet/efficientnet-b7_8xb32-01norm_in1k.py @@ -0,0 +1,31 @@ +_base_ = [ + '../_base_/models/efficientnet_b7.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +data_preprocessor = dict( + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetRandomCrop', scale=600), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=600), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b7_8xb32_in1k.py b/configs/efficientnet/efficientnet-b7_8xb32_in1k.py new file mode 100644 index 0000000..5d783bb --- /dev/null +++ b/configs/efficientnet/efficientnet-b7_8xb32_in1k.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/models/efficientnet_b7.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetRandomCrop', scale=600), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=600), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b8_8xb32-01norm_in1k.py b/configs/efficientnet/efficientnet-b8_8xb32-01norm_in1k.py new file mode 100644 index 0000000..07d3692 --- /dev/null +++ b/configs/efficientnet/efficientnet-b8_8xb32-01norm_in1k.py @@ -0,0 +1,31 @@ +_base_ = [ + '../_base_/models/efficientnet_b8.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +data_preprocessor = dict( + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetRandomCrop', scale=672), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=672), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-b8_8xb32_in1k.py b/configs/efficientnet/efficientnet-b8_8xb32_in1k.py new file mode 100644 index 0000000..868986f --- /dev/null +++ b/configs/efficientnet/efficientnet-b8_8xb32_in1k.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/models/efficientnet_b8.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetRandomCrop', scale=672), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=672), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-em_8xb32-01norm_in1k.py b/configs/efficientnet/efficientnet-em_8xb32-01norm_in1k.py new file mode 100644 index 0000000..9de3b27 --- /dev/null +++ b/configs/efficientnet/efficientnet-em_8xb32-01norm_in1k.py @@ -0,0 +1,31 @@ +_base_ = [ + '../_base_/models/efficientnet_em.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +data_preprocessor = dict( + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetRandomCrop', scale=240), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=240), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-es_8xb32-01norm_in1k.py b/configs/efficientnet/efficientnet-es_8xb32-01norm_in1k.py new file mode 100644 index 0000000..e643d55 --- /dev/null +++ b/configs/efficientnet/efficientnet-es_8xb32-01norm_in1k.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/models/efficientnet_es.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetRandomCrop', scale=224), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-l2_8xb32_in1k-475px.py b/configs/efficientnet/efficientnet-l2_8xb32_in1k-475px.py new file mode 100644 index 0000000..5606951 --- /dev/null +++ b/configs/efficientnet/efficientnet-l2_8xb32_in1k-475px.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/models/efficientnet_l2.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetRandomCrop', scale=475), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=475), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/efficientnet-l2_8xb8_in1k-800px.py b/configs/efficientnet/efficientnet-l2_8xb8_in1k-800px.py new file mode 100644 index 0000000..61bddfa --- /dev/null +++ b/configs/efficientnet/efficientnet-l2_8xb8_in1k-800px.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/models/efficientnet_l2.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetRandomCrop', scale=800), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=800), + dict(type='PackInputs'), +] + +train_dataloader = dict(batch_size=8, dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(batch_size=8, dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(batch_size=8, dataset=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet/metafile.yml b/configs/efficientnet/metafile.yml new file mode 100644 index 0000000..21130c4 --- /dev/null +++ b/configs/efficientnet/metafile.yml @@ -0,0 +1,551 @@ +Collections: + - Name: EfficientNet + Metadata: + Training Data: ImageNet-1k + Architecture: + - 1x1 Convolution + - Average Pooling + - Convolution + - Dense Connections + - Dropout + - Inverted Residual Block + - RMSProp + - Squeeze-and-Excitation Block + - Swish + Paper: + URL: https://arxiv.org/abs/1905.11946v5 + Title: "EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks" + README: configs/efficientnet/README.md + Code: + Version: v0.20.1 + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.20.1/mmcls/models/backbones/efficientnet.py + +Models: + - Name: efficientnet-b0_3rdparty_8xb32_in1k + Metadata: + FLOPs: 420592480 + Parameters: 5288548 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 76.74 + Top 5 Accuracy: 93.17 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty_8xb32_in1k_20220119-a7e2a0b1.pth + Config: configs/efficientnet/efficientnet-b0_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckpts/efficientnet-b0.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b0_3rdparty_8xb32-aa_in1k + Metadata: + FLOPs: 420592480 + Parameters: 5288548 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 77.26 + Top 5 Accuracy: 93.41 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty_8xb32-aa_in1k_20220119-8d939117.pth + Config: configs/efficientnet/efficientnet-b0_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckptsaug/efficientnet-b0.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b0_3rdparty_8xb32-aa-advprop_in1k + Metadata: + FLOPs: 420592480 + Parameters: 5288548 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 77.53 + Top 5 Accuracy: 93.61 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty_8xb32-aa-advprop_in1k_20220119-26434485.pth + Config: configs/efficientnet/efficientnet-b0_8xb32-01norm_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/advprop/efficientnet-b0.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b0_3rdparty-ra-noisystudent_in1k + Metadata: + FLOPs: 420592480 + Parameters: 5288548 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 77.63 + Top 5 Accuracy: 94.00 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty-ra-noisystudent_in1k_20221103-75cd08d3.pth + Config: configs/efficientnet/efficientnet-b0_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/noisystudent/noisy_student_efficientnet-b0.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b1_3rdparty_8xb32_in1k + Metadata: + FLOPs: 744059920 + Parameters: 7794184 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.68 + Top 5 Accuracy: 94.28 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b1_3rdparty_8xb32_in1k_20220119-002556d9.pth + Config: configs/efficientnet/efficientnet-b1_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckpts/efficientnet-b1.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b1_3rdparty_8xb32-aa_in1k + Metadata: + FLOPs: 744059920 + Parameters: 7794184 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.20 + Top 5 Accuracy: 94.42 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b1_3rdparty_8xb32-aa_in1k_20220119-619d8ae3.pth + Config: configs/efficientnet/efficientnet-b1_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckptsaug/efficientnet-b1.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b1_3rdparty_8xb32-aa-advprop_in1k + Metadata: + FLOPs: 744059920 + Parameters: 7794184 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.52 + Top 5 Accuracy: 94.43 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b1_3rdparty_8xb32-aa-advprop_in1k_20220119-5715267d.pth + Config: configs/efficientnet/efficientnet-b1_8xb32-01norm_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/advprop/efficientnet-b1.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b1_3rdparty-ra-noisystudent_in1k + Metadata: + FLOPs: 744059920 + Parameters: 7794184 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.44 + Top 5 Accuracy: 95.83 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b1_3rdparty-ra-noisystudent_in1k_20221103-756bcbc0.pth + Config: configs/efficientnet/efficientnet-b1_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/noisystudent/noisy_student_efficientnet-b1.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b2_3rdparty_8xb32_in1k + Metadata: + FLOPs: 1066620392 + Parameters: 9109994 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.64 + Top 5 Accuracy: 94.80 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b2_3rdparty_8xb32_in1k_20220119-ea374a30.pth + Config: configs/efficientnet/efficientnet-b2_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckpts/efficientnet-b2.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b2_3rdparty_8xb32-aa_in1k + Metadata: + FLOPs: 1066620392 + Parameters: 9109994 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 80.21 + Top 5 Accuracy: 94.96 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b2_3rdparty_8xb32-aa_in1k_20220119-dd61e80b.pth + Config: configs/efficientnet/efficientnet-b2_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckptsaug/efficientnet-b2.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b2_3rdparty_8xb32-aa-advprop_in1k + Metadata: + FLOPs: 1066620392 + Parameters: 9109994 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 80.45 + Top 5 Accuracy: 95.07 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b2_3rdparty_8xb32-aa-advprop_in1k_20220119-1655338a.pth + Config: configs/efficientnet/efficientnet-b2_8xb32-01norm_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/advprop/efficientnet-b2.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b2_3rdparty-ra-noisystudent_in1k + Metadata: + FLOPs: 1066620392 + Parameters: 9109994 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.47 + Top 5 Accuracy: 96.23 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b2_3rdparty-ra-noisystudent_in1k_20221103-301ed299.pth + Config: configs/efficientnet/efficientnet-b2_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/noisystudent/noisy_student_efficientnet-b2.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b3_3rdparty_8xb32_in1k + Metadata: + FLOPs: 1953798216 + Parameters: 12233232 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.01 + Top 5 Accuracy: 95.34 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32_in1k_20220119-4b4d7487.pth + Config: configs/efficientnet/efficientnet-b3_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckpts/efficientnet-b3.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b3_3rdparty_8xb32-aa_in1k + Metadata: + FLOPs: 1953798216 + Parameters: 12233232 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.58 + Top 5 Accuracy: 95.67 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa_in1k_20220119-5b4887a0.pth + Config: configs/efficientnet/efficientnet-b3_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckptsaug/efficientnet-b3.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b3_3rdparty_8xb32-aa-advprop_in1k + Metadata: + FLOPs: 1953798216 + Parameters: 12233232 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.81 + Top 5 Accuracy: 95.69 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa-advprop_in1k_20220119-53b41118.pth + Config: configs/efficientnet/efficientnet-b3_8xb32-01norm_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/advprop/efficientnet-b3.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b3_3rdparty-ra-noisystudent_in1k + Metadata: + FLOPs: 1953798216 + Parameters: 12233232 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.02 + Top 5 Accuracy: 96.89 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty-ra-noisystudent_in1k_20221103-a4ab5fd6.pth + Config: configs/efficientnet/efficientnet-b3_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/noisystudent/noisy_student_efficientnet-b3.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b4_3rdparty_8xb32_in1k + Metadata: + FLOPs: 4659080176 + Parameters: 19341616 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.57 + Top 5 Accuracy: 96.09 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b4_3rdparty_8xb32_in1k_20220119-81fd4077.pth + Config: configs/efficientnet/efficientnet-b4_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckpts/efficientnet-b4.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b4_3rdparty_8xb32-aa_in1k + Metadata: + FLOPs: 4659080176 + Parameters: 19341616 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.95 + Top 5 Accuracy: 96.26 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b4_3rdparty_8xb32-aa_in1k_20220119-45b8bd2b.pth + Config: configs/efficientnet/efficientnet-b4_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckptsaug/efficientnet-b4.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b4_3rdparty_8xb32-aa-advprop_in1k + Metadata: + FLOPs: 4659080176 + Parameters: 19341616 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.25 + Top 5 Accuracy: 96.44 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b4_3rdparty_8xb32-aa-advprop_in1k_20220119-38c2238c.pth + Config: configs/efficientnet/efficientnet-b4_8xb32-01norm_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/advprop/efficientnet-b4.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b4_3rdparty-ra-noisystudent_in1k + Metadata: + FLOPs: 4659080176 + Parameters: 19341616 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.25 + Top 5 Accuracy: 97.52 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b4_3rdparty-ra-noisystudent_in1k_20221103-16ba8a2d.pth + Config: configs/efficientnet/efficientnet-b4_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/noisystudent/noisy_student_efficientnet-b4.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b5_3rdparty_8xb32_in1k + Metadata: + FLOPs: 10799472560 + Parameters: 30389784 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.18 + Top 5 Accuracy: 96.47 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b5_3rdparty_8xb32_in1k_20220119-e9814430.pth + Config: configs/efficientnet/efficientnet-b5_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckpts/efficientnet-b5.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b5_3rdparty_8xb32-aa_in1k + Metadata: + FLOPs: 10799472560 + Parameters: 30389784 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.82 + Top 5 Accuracy: 96.76 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b5_3rdparty_8xb32-aa_in1k_20220119-2cab8b78.pth + Config: configs/efficientnet/efficientnet-b5_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckptsaug/efficientnet-b5.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b5_3rdparty_8xb32-aa-advprop_in1k + Metadata: + FLOPs: 10799472560 + Parameters: 30389784 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.21 + Top 5 Accuracy: 96.98 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b5_3rdparty_8xb32-aa-advprop_in1k_20220119-f57a895a.pth + Config: configs/efficientnet/efficientnet-b5_8xb32-01norm_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/advprop/efficientnet-b5.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b5_3rdparty-ra-noisystudent_in1k + Metadata: + FLOPs: 10799472560 + Parameters: 30389784 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 86.08 + Top 5 Accuracy: 97.75 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b5_3rdparty-ra-noisystudent_in1k_20221103-111a185f.pth + Config: configs/efficientnet/efficientnet-b5_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/noisystudent/noisy_student_efficientnet-b5.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b6_3rdparty_8xb32-aa_in1k + Metadata: + FLOPs: 19971777560 + Parameters: 43040704 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.05 + Top 5 Accuracy: 96.82 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b6_3rdparty_8xb32-aa_in1k_20220119-45b03310.pth + Config: configs/efficientnet/efficientnet-b6_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckptsaug/efficientnet-b6.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b6_3rdparty_8xb32-aa-advprop_in1k + Metadata: + FLOPs: 19971777560 + Parameters: 43040704 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.74 + Top 5 Accuracy: 97.14 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b6_3rdparty_8xb32-aa-advprop_in1k_20220119-bfe3485e.pth + Config: configs/efficientnet/efficientnet-b6_8xb32-01norm_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/advprop/efficientnet-b6.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b6_3rdparty-ra-noisystudent_in1k + Metadata: + FLOPs: 19971777560 + Parameters: 43040704 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 86.47 + Top 5 Accuracy: 97.87 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b6_3rdparty-ra-noisystudent_in1k_20221103-7de7d2cc.pth + Config: configs/efficientnet/efficientnet-b6_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/noisystudent/noisy_student_efficientnet-b6.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b7_3rdparty_8xb32-aa_in1k + Metadata: + FLOPs: 39316473392 + Parameters: 66347960 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.38 + Top 5 Accuracy: 96.88 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b7_3rdparty_8xb32-aa_in1k_20220119-bf03951c.pth + Config: configs/efficientnet/efficientnet-b7_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckptsaug/efficientnet-b7.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b7_3rdparty_8xb32-aa-advprop_in1k + Metadata: + FLOPs: 39316473392 + Parameters: 66347960 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.14 + Top 5 Accuracy: 97.23 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b7_3rdparty_8xb32-aa-advprop_in1k_20220119-c6dbff10.pth + Config: configs/efficientnet/efficientnet-b7_8xb32-01norm_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/advprop/efficientnet-b7.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b7_3rdparty-ra-noisystudent_in1k + Metadata: + FLOPs: 39316473392 + Parameters: 66347960 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 86.83 + Top 5 Accuracy: 98.08 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b7_3rdparty-ra-noisystudent_in1k_20221103-a82894bc.pth + Config: configs/efficientnet/efficientnet-b7_8xb32_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/noisystudent/noisy_student_efficientnet-b7.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-b8_3rdparty_8xb32-aa-advprop_in1k + Metadata: + FLOPs: 64999827816 + Parameters: 87413142 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.38 + Top 5 Accuracy: 97.28 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b8_3rdparty_8xb32-aa-advprop_in1k_20220119-297ce1b7.pth + Config: configs/efficientnet/efficientnet-b8_8xb32-01norm_in1k.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/advprop/efficientnet-b8.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-l2_3rdparty-ra-noisystudent_in1k-800px + Metadata: + FLOPs: 174203533416 + Parameters: 480309308 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 88.33 + Top 5 Accuracy: 98.65 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-l2_3rdparty-ra-noisystudent_in1k_20221103-be73be13.pth + Config: configs/efficientnet/efficientnet-l2_8xb8_in1k-800px.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/noisystudent/noisy_student_efficientnet-l2.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet + - Name: efficientnet-l2_3rdparty-ra-noisystudent_in1k-475px + Metadata: + FLOPs: 484984099280 + Parameters: 480309308 + In Collection: EfficientNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 88.18 + Top 5 Accuracy: 98.55 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-l2_3rdparty-ra-noisystudent_in1k-475px_20221103-5a0d8058.pth + Config: configs/efficientnet/efficientnet-l2_8xb32_in1k-475px.py + Converted From: + Weights: https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/noisystudent/noisy_student_efficientnet-l2_475.tar.gz + Code: https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet diff --git a/configs/efficientnet_v2/README.md b/configs/efficientnet_v2/README.md new file mode 100644 index 0000000..9654218 --- /dev/null +++ b/configs/efficientnet_v2/README.md @@ -0,0 +1,98 @@ +# EfficientNetV2 + +> [EfficientNetV2: Smaller Models and Faster Training](https://arxiv.org/abs/2104.00298) + + + +## Abstract + +This paper introduces EfficientNetV2, a new family of convolutional networks that have faster training speed and better parameter efficiency than previous models. To develop this family of models, we use a combination of training-aware neural architecture search and scaling, to jointly optimize training speed and parameter efficiency. The models were searched from the search space enriched with new ops such as Fused-MBConv. Our experiments show that EfficientNetV2 models train much faster than state-of-the-art models while being up to 6.8x smaller. Our training can be further sped up by progressively increasing the image size during training, but it often causes a drop in accuracy. To compensate for this accuracy drop, we propose to adaptively adjust regularization (e.g., dropout and data augmentation) as well, such that we can achieve both fast training and good accuracy. With progressive learning, our EfficientNetV2 significantly outperforms previous models on ImageNet and CIFAR/Cars/Flowers datasets. By pretraining on the same ImageNet21k, our EfficientNetV2 achieves 87.3% top-1 accuracy on ImageNet ILSVRC2012, outperforming the recent ViT by 2.0% accuracy while training 5x-11x faster using the same computing resources. Code will be available at https://github.com/google/automl/tree/master/efficientnetv2. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('efficientnetv2-b0_3rdparty_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('efficientnetv2-b0_3rdparty_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/efficientnet_v2/efficientnetv2-b0_8xb32_in1k.py https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-b0_3rdparty_in1k_20221221-9ef6e736.pth +``` + + + +## Models and results + +### Pretrained models + +| Model | Params (M) | Flops (G) | Config | Download | +| :----------------------------------- | :--------: | :-------: | :----------------------------------------: | :-----------------------------------------------------------------------------------------------------: | +| `efficientnetv2-s_3rdparty_in21k`\* | 48.16 | 3.31 | [config](efficientnetv2-s_8xb32_in21k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-s_3rdparty_in21k_20221220-c0572b56.pth) | +| `efficientnetv2-m_3rdparty_in21k`\* | 80.84 | 5.86 | [config](efficientnetv2-m_8xb32_in21k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-m_3rdparty_in21k_20221220-073e944c.pth) | +| `efficientnetv2-l_3rdparty_in21k`\* | 145.22 | 13.11 | [config](efficientnetv2-l_8xb32_in21k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-l_3rdparty_in21k_20221220-f28f91e1.pth) | +| `efficientnetv2-xl_3rdparty_in21k`\* | 234.82 | 18.86 | [config](efficientnetv2-xl_8xb32_in21k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-xl_3rdparty_in21k_20221220-b2c9329c.pth) | + +*Models with * are converted from the [timm](https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/efficientnet.py). The config files of these models are only for inference. We haven't reproduce the training results.* + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :-------------------------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :---------------------------------------------: | :---------------------------------------------------------: | +| `efficientnetv2-b0_3rdparty_in1k`\* | From scratch | 7.14 | 0.92 | 78.52 | 94.44 | [config](efficientnetv2-b0_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-b0_3rdparty_in1k_20221221-9ef6e736.pth) | +| `efficientnetv2-b1_3rdparty_in1k`\* | From scratch | 8.14 | 1.44 | 79.80 | 94.89 | [config](efficientnetv2-b1_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-b1_3rdparty_in1k_20221221-6955d9ce.pth) | +| `efficientnetv2-b2_3rdparty_in1k`\* | From scratch | 10.10 | 1.99 | 80.63 | 95.30 | [config](efficientnetv2-b2_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-b2_3rdparty_in1k_20221221-74f7d493.pth) | +| `efficientnetv2-b3_3rdparty_in1k`\* | From scratch | 14.36 | 3.50 | 82.03 | 95.88 | [config](efficientnetv2-b3_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-b3_3rdparty_in1k_20221221-b6f07a36.pth) | +| `efficientnetv2-s_3rdparty_in1k`\* | From scratch | 21.46 | 9.72 | 83.82 | 96.67 | [config](efficientnetv2-s_8xb32_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-s_3rdparty_in1k_20221220-f0eaff9d.pth) | +| `efficientnetv2-m_3rdparty_in1k`\* | From scratch | 54.14 | 26.88 | 85.01 | 97.26 | [config](efficientnetv2-m_8xb32_in1k-480px.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-m_3rdparty_in1k_20221220-9dc0c729.pth) | +| `efficientnetv2-l_3rdparty_in1k`\* | From scratch | 118.52 | 60.14 | 85.43 | 97.31 | [config](efficientnetv2-l_8xb32_in1k-480px.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-l_3rdparty_in1k_20221220-5c3bac0f.pth) | +| `efficientnetv2-s_in21k-pre_3rdparty_in1k`\* | ImageNet-21k | 21.46 | 9.72 | 84.29 | 97.26 | [config](efficientnetv2-s_8xb32_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-s_in21k-pre-3rdparty_in1k_20221220-7a7c8475.pth) | +| `efficientnetv2-m_in21k-pre_3rdparty_in1k`\* | ImageNet-21k | 54.14 | 26.88 | 85.47 | 97.76 | [config](efficientnetv2-m_8xb32_in1k-480px.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-m_in21k-pre-3rdparty_in1k_20221220-a1013a04.pth) | +| `efficientnetv2-l_in21k-pre_3rdparty_in1k`\* | ImageNet-21k | 118.52 | 60.14 | 86.31 | 97.99 | [config](efficientnetv2-l_8xb32_in1k-480px.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-l_in21k-pre-3rdparty_in1k_20221220-63df0efd.pth) | +| `efficientnetv2-xl_in21k-pre_3rdparty_in1k`\* | ImageNet-21k | 208.12 | 98.34 | 86.39 | 97.83 | [config](efficientnetv2-xl_8xb32_in1k-512px.py) | [model](https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-xl_in21k-pre-3rdparty_in1k_20221220-583ac18b.pth) | + +*Models with * are converted from the [timm](https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/efficientnet.py). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@inproceedings{tan2021efficientnetv2, + title={Efficientnetv2: Smaller models and faster training}, + author={Tan, Mingxing and Le, Quoc}, + booktitle={International Conference on Machine Learning}, + pages={10096--10106}, + year={2021}, + organization={PMLR} +} +``` diff --git a/configs/efficientnet_v2/efficientnetv2-b0_8xb32_in1k.py b/configs/efficientnet_v2/efficientnetv2-b0_8xb32_in1k.py new file mode 100644 index 0000000..4dc23d4 --- /dev/null +++ b/configs/efficientnet_v2/efficientnetv2-b0_8xb32_in1k.py @@ -0,0 +1,58 @@ +_base_ = [ + '../_base_/models/efficientnet_v2/efficientnetv2_b0.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=192, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=224, crop_padding=0), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet_v2/efficientnetv2-b1_8xb32_in1k.py b/configs/efficientnet_v2/efficientnetv2-b1_8xb32_in1k.py new file mode 100644 index 0000000..fa187ff --- /dev/null +++ b/configs/efficientnet_v2/efficientnetv2-b1_8xb32_in1k.py @@ -0,0 +1,21 @@ +_base_ = ['./efficientnetv2-b0_8xb32_in1k.py'] + +# model setting +model = dict(backbone=dict(arch='b1'), head=dict(in_channels=1280, )) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetRandomCrop', scale=192), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=240, crop_padding=0), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet_v2/efficientnetv2-b2_8xb32_in1k.py b/configs/efficientnet_v2/efficientnetv2-b2_8xb32_in1k.py new file mode 100644 index 0000000..3ff5530 --- /dev/null +++ b/configs/efficientnet_v2/efficientnetv2-b2_8xb32_in1k.py @@ -0,0 +1,21 @@ +_base_ = ['./efficientnetv2-b0_8xb32_in1k.py'] + +# model setting +model = dict(backbone=dict(arch='b2'), head=dict(in_channels=1408, )) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetRandomCrop', scale=208), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=260, crop_padding=0), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet_v2/efficientnetv2-b3_8xb32_in1k.py b/configs/efficientnet_v2/efficientnetv2-b3_8xb32_in1k.py new file mode 100644 index 0000000..84fb29a --- /dev/null +++ b/configs/efficientnet_v2/efficientnetv2-b3_8xb32_in1k.py @@ -0,0 +1,21 @@ +_base_ = ['./efficientnetv2-b0_8xb32_in1k.py'] + +# model setting +model = dict(backbone=dict(arch='b3'), head=dict(in_channels=1536, )) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetRandomCrop', scale=240), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=300, crop_padding=0), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet_v2/efficientnetv2-l_8xb32_in1k-480px.py b/configs/efficientnet_v2/efficientnetv2-l_8xb32_in1k-480px.py new file mode 100644 index 0000000..c3606cf --- /dev/null +++ b/configs/efficientnet_v2/efficientnetv2-l_8xb32_in1k-480px.py @@ -0,0 +1,23 @@ +_base_ = [ + 'efficientnetv2-s_8xb32_in1k-384px.py', +] + +# model setting +model = dict(backbone=dict(arch='l'), ) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetRandomCrop', scale=384, crop_padding=0), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=480, crop_padding=0), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet_v2/efficientnetv2-l_8xb32_in21k.py b/configs/efficientnet_v2/efficientnetv2-l_8xb32_in21k.py new file mode 100644 index 0000000..179c720 --- /dev/null +++ b/configs/efficientnet_v2/efficientnetv2-l_8xb32_in21k.py @@ -0,0 +1,4 @@ +_base_ = ['./efficientnetv2-s_8xb32_in21k.py'] + +# model setting +model = dict(backbone=dict(arch='l'), ) diff --git a/configs/efficientnet_v2/efficientnetv2-m_8xb32_in1k-480px.py b/configs/efficientnet_v2/efficientnetv2-m_8xb32_in1k-480px.py new file mode 100644 index 0000000..c7bdd9b --- /dev/null +++ b/configs/efficientnet_v2/efficientnetv2-m_8xb32_in1k-480px.py @@ -0,0 +1,23 @@ +_base_ = [ + 'efficientnetv2-s_8xb32_in1k-384px.py', +] + +# model setting +model = dict(backbone=dict(arch='m'), ) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetRandomCrop', scale=384, crop_padding=0), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=480, crop_padding=0), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet_v2/efficientnetv2-m_8xb32_in21k.py b/configs/efficientnet_v2/efficientnetv2-m_8xb32_in21k.py new file mode 100644 index 0000000..f04d616 --- /dev/null +++ b/configs/efficientnet_v2/efficientnetv2-m_8xb32_in21k.py @@ -0,0 +1,4 @@ +_base_ = ['./efficientnetv2-s_8xb32_in21k.py'] + +# model setting +model = dict(backbone=dict(arch='m'), ) diff --git a/configs/efficientnet_v2/efficientnetv2-s_8xb32_in1k-384px.py b/configs/efficientnet_v2/efficientnetv2-s_8xb32_in1k-384px.py new file mode 100644 index 0000000..2bdee63 --- /dev/null +++ b/configs/efficientnet_v2/efficientnetv2-s_8xb32_in1k-384px.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/models/efficientnet_v2/efficientnetv2_s.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetRandomCrop', scale=300, crop_padding=0), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=384, crop_padding=0), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet_v2/efficientnetv2-s_8xb32_in21k.py b/configs/efficientnet_v2/efficientnetv2-s_8xb32_in21k.py new file mode 100644 index 0000000..54f8a5a --- /dev/null +++ b/configs/efficientnet_v2/efficientnetv2-s_8xb32_in21k.py @@ -0,0 +1,43 @@ +_base_ = [ + '../_base_/models/efficientnet_v2/efficientnetv2_s.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# model setting +model = dict(head=dict(num_classes=21843)) + +# dataset settings +dataset_type = 'ImageNet21k' +data_preprocessor = dict( + num_classes=21843, + # RGB format normalization parameters + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetRandomCrop', scale=224), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=224, crop_padding=0), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# schedule setting +optim_wrapper = dict( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) diff --git a/configs/efficientnet_v2/efficientnetv2-xl_8xb32_in1k-512px.py b/configs/efficientnet_v2/efficientnetv2-xl_8xb32_in1k-512px.py new file mode 100644 index 0000000..18f56ff --- /dev/null +++ b/configs/efficientnet_v2/efficientnetv2-xl_8xb32_in1k-512px.py @@ -0,0 +1,23 @@ +_base_ = [ + 'efficientnetv2-s_8xb32_in1k-384px.py', +] + +# model setting +model = dict(backbone=dict(arch='xl'), ) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetRandomCrop', scale=384, crop_padding=0), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=512, crop_padding=0), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/efficientnet_v2/efficientnetv2-xl_8xb32_in21k.py b/configs/efficientnet_v2/efficientnetv2-xl_8xb32_in21k.py new file mode 100644 index 0000000..e2ee84c --- /dev/null +++ b/configs/efficientnet_v2/efficientnetv2-xl_8xb32_in21k.py @@ -0,0 +1,4 @@ +_base_ = ['./efficientnetv2-s_8xb32_in21k.py'] + +# model setting +model = dict(backbone=dict(arch='xl'), ) diff --git a/configs/efficientnet_v2/metafile.yml b/configs/efficientnet_v2/metafile.yml new file mode 100644 index 0000000..6c927dc --- /dev/null +++ b/configs/efficientnet_v2/metafile.yml @@ -0,0 +1,255 @@ +Collections: + - Name: EfficientNetV2 + Metadata: + Training Data: ImageNet-1k + Architecture: + - 1x1 Convolution + - Average Pooling + - Convolution + - Dense Connections + - Dropout + - Inverted Residual Block + - RMSProp + - Squeeze-and-Excitation Block + - Swish + Paper: + URL: https://arxiv.org/abs/2104.00298 + Title: "EfficientNetV2: Smaller Models and Faster Training" + README: configs/efficientnet_v2/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/main/mmpretrain/models/backbones/beit.py + Version: v1.0.0rc4 + +Models: + - Name: efficientnetv2-b0_3rdparty_in1k + Metadata: + FLOPs: 919843360 + Parameters: 7139704 + In Collection: EfficientNetV2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.52 + Top 5 Accuracy: 94.44 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-b0_3rdparty_in1k_20221221-9ef6e736.pth + Config: configs/efficientnet_v2/efficientnetv2-b0_8xb32_in1k.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b0-c7cc451f.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/efficientnet.py + - Name: efficientnetv2-b1_3rdparty_in1k + Metadata: + FLOPs: 1438287552 + Parameters: 8141052 + In Collection: EfficientNetV2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.80 + Top 5 Accuracy: 94.89 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-b1_3rdparty_in1k_20221221-6955d9ce.pth + Config: configs/efficientnet_v2/efficientnetv2-b1_8xb32_in1k.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b1-be6e41b0.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/efficientnet.py + - Name: efficientnetv2-b2_3rdparty_in1k + Metadata: + FLOPs: 1986433080 + Parameters: 10096086 + In Collection: EfficientNetV2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 80.63 + Top 5 Accuracy: 95.30 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-b2_3rdparty_in1k_20221221-74f7d493.pth + Config: configs/efficientnet_v2/efficientnetv2-b2_8xb32_in1k.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b2-847de54e.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/efficientnet.py + - Name: efficientnetv2-b3_3rdparty_in1k + Metadata: + FLOPs: 3498068400 + Parameters: 14358406 + In Collection: EfficientNetV2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.03 + Top 5 Accuracy: 95.88 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-b3_3rdparty_in1k_20221221-b6f07a36.pth + Config: configs/efficientnet_v2/efficientnetv2-b3_8xb32_in1k.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_b3-57773f13.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/efficientnet.py + - Name: efficientnetv2-s_3rdparty_in1k + Metadata: + FLOPs: 9719420928 + Parameters: 21458488 + In Collection: EfficientNetV2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.82 + Top 5 Accuracy: 96.67 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-s_3rdparty_in1k_20221220-f0eaff9d.pth + Config: configs/efficientnet_v2/efficientnetv2-s_8xb32_in1k-384px.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s-eb54923e.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/efficientnet.py + - Name: efficientnetv2-m_3rdparty_in1k + Metadata: + FLOPs: 26880363584 + Parameters: 54139356 + In Collection: EfficientNetV2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.01 + Top 5 Accuracy: 97.26 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-m_3rdparty_in1k_20221220-9dc0c729.pth + Config: configs/efficientnet_v2/efficientnetv2-m_8xb32_in1k-480px.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m-cc09e0cd.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/efficientnet.py + - Name: efficientnetv2-l_3rdparty_in1k + Metadata: + FLOPs: 60142387008 + Parameters: 118515272 + In Collection: EfficientNetV2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.43 + Top 5 Accuracy: 97.31 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-l_3rdparty_in1k_20221220-5c3bac0f.pth + Config: configs/efficientnet_v2/efficientnetv2-l_8xb32_in1k-480px.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l-d664b728.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/efficientnet.py + - Name: efficientnetv2-s_in21k-pre_3rdparty_in1k + Metadata: + Training Data: + - ImageNet-21k + - ImageNet-1k + FLOPs: 9719420928 + Parameters: 21458488 + In Collection: EfficientNetV2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.29 + Top 5 Accuracy: 97.26 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-s_in21k-pre-3rdparty_in1k_20221220-7a7c8475.pth + Config: configs/efficientnet_v2/efficientnetv2-s_8xb32_in1k-384px.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21ft1k-d7dafa41.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/efficientnet.py + - Name: efficientnetv2-m_in21k-pre_3rdparty_in1k + Metadata: + Training Data: + - ImageNet-21k + - ImageNet-1k + FLOPs: 26880363584 + Parameters: 54139356 + In Collection: EfficientNetV2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.47 + Top 5 Accuracy: 97.76 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-m_in21k-pre-3rdparty_in1k_20221220-a1013a04.pth + Config: configs/efficientnet_v2/efficientnetv2-m_8xb32_in1k-480px.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21ft1k-bf41664a.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/efficientnet.py + - Name: efficientnetv2-l_in21k-pre_3rdparty_in1k + Metadata: + Training Data: + - ImageNet-21k + - ImageNet-1k + FLOPs: 60142387008 + Parameters: 118515272 + In Collection: EfficientNetV2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 86.31 + Top 5 Accuracy: 97.99 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-l_in21k-pre-3rdparty_in1k_20221220-63df0efd.pth + Config: configs/efficientnet_v2/efficientnetv2-l_8xb32_in1k-480px.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21ft1k-60127a9d.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/efficientnet.py + - Name: efficientnetv2-xl_in21k-pre_3rdparty_in1k + Metadata: + Training Data: + - ImageNet-21k + - ImageNet-1k + FLOPs: 98341230592 + Parameters: 208119808 + In Collection: EfficientNetV2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 86.39 + Top 5 Accuracy: 97.83 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-xl_in21k-pre-3rdparty_in1k_20221220-583ac18b.pth + Config: configs/efficientnet_v2/efficientnetv2-xl_8xb32_in1k-512px.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_xl_in21ft1k-06c35c48.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/efficientnet.py + - Name: efficientnetv2-s_3rdparty_in21k + Metadata: + FLOPs: 3309720768 + Parameters: 48158371 + In Collection: EfficientNetV2 + Results: null + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-s_3rdparty_in21k_20221220-c0572b56.pth + Config: configs/efficientnet_v2/efficientnetv2-s_8xb32_in21k.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_s_21k-6337ad01.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/efficientnet.py + - Name: efficientnetv2-m_3rdparty_in21k + Metadata: + FLOPs: 5861638208 + Parameters: 80839239 + In Collection: EfficientNetV2 + Results: null + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-m_3rdparty_in21k_20221220-073e944c.pth + Config: configs/efficientnet_v2/efficientnetv2-m_8xb32_in21k.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_m_21k-361418a2.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/efficientnet.py + - Name: efficientnetv2-l_3rdparty_in21k + Metadata: + FLOPs: 13114950464 + Parameters: 145215155 + In Collection: EfficientNetV2 + Results: null + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-l_3rdparty_in21k_20221220-f28f91e1.pth + Config: configs/efficientnet_v2/efficientnetv2-l_8xb32_in21k.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_l_21k-91a19ec9.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/efficientnet.py + - Name: efficientnetv2-xl_3rdparty_in21k + Metadata: + FLOPs: 18855244288 + Parameters: 234819691 + In Collection: EfficientNetV2 + Results: null + Weights: https://download.openmmlab.com/mmclassification/v0/efficientnetv2/efficientnetv2-xl_3rdparty_in21k_20221220-b2c9329c.pth + Config: configs/efficientnet_v2/efficientnetv2-xl_8xb32_in21k.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-effv2-weights/tf_efficientnetv2_xl_in21k-fd7e8abf.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/efficientnet.py diff --git a/configs/eva/README.md b/configs/eva/README.md new file mode 100644 index 0000000..6e49c8a --- /dev/null +++ b/configs/eva/README.md @@ -0,0 +1,101 @@ +# EVA + +> [EVA: Exploring the Limits of Masked Visual Representation Learning at Scale](https://arxiv.org/abs/2211.07636) + + + +## Abstract + +We launch EVA, a vision-centric foundation model to explore the limits of visual representation at scale using only publicly accessible data. EVA is a vanilla ViT pre-trained to reconstruct the masked out image-text aligned vision features conditioned on visible image patches. Via this pretext task, we can efficiently scale up EVA to one billion parameters, and sets new records on a broad range of representative vision downstream tasks, such as image recognition, video action recognition, object detection, instance segmentation and semantic segmentation without heavy supervised training. Moreover, we observe quantitative changes in scaling EVA result in qualitative changes in transfer learning performance that are not present in other models. For instance, EVA takes a great leap in the challenging large vocabulary instance segmentation task: our model achieves almost the same state-of-the-art performance on LVISv1.0 dataset with over a thousand categories and COCO dataset with only eighty categories. Beyond a pure vision encoder, EVA can also serve as a vision-centric, multi-modal pivot to connect images and text. We find initializing the vision tower of a giant CLIP from EVA can greatly stabilize the training and outperform the training from scratch counterpart with much fewer samples and less compute, providing a new direction for scaling up and accelerating the costly training of multi-modal foundation models. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('vit-base-p16_eva-mae-style-pre_8xb128-coslr-100e_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('eva-mae-style_vit-base-p16_16xb256-coslr-400e_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/eva/eva-mae-style_vit-base-p16_16xb256-coslr-400e_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/eva/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py https://download.openmmlab.com/mmselfsup/1.x/eva/eva-mae-style_vit-base-p16_16xb256-coslr-400e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k_20221226-f61cf992.pth +``` + + + +## Models and results + +### Pretrained models + +| Model | Params (M) | Flops (G) | Config | Download | +| :--------------------------------------------------- | :--------: | :-------: | :-------------------------------------------------------------: | :----------------------------------------------------------------: | +| `eva-mae-style_vit-base-p16_16xb256-coslr-400e_in1k` | 111.78 | 17.58 | [config](eva-mae-style_vit-base-p16_16xb256-coslr-400e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/eva/eva-mae-style_vit-base-p16_16xb256-coslr-400e_in1k/eva-mae-style_vit-base-p16_16xb256-coslr-400e_in1k_20221226-26d90f07.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/eva/eva-mae-style_vit-base-p16_16xb256-coslr-400e_in1k/eva-mae-style_vit-base-p16_16xb256-coslr-400e_in1k_20221226-26d90f07.json) | +| `beit-l-p14_3rdparty-eva_in21k`\* | 303.18 | 81.08 | [config](eva-l-p14_headless.py) | [model](https://download.openmmlab.com/mmclassification/v0/eva/eva-l-p14_3rdparty-mim_in21k_20221213-3a5da50b.pth) | +| `beit-l-p14_eva-pre_3rdparty_in21k`\* | 303.18 | 81.08 | [config](eva-l-p14_headless.py) | [model](https://download.openmmlab.com/mmclassification/v0/eva/eva-l-p14_mim-pre_3rdparty_in21k_20221213-8f194fa2.pth) | +| `beit-g-p16_3rdparty-eva_30m`\* | 1011.32 | 203.52 | [config](eva-g-p16_headless.py) | [model](https://download.openmmlab.com/mmclassification/v0/eva/eva-g-p16_3rdparty_30m_20221213-7bed23ee.pth) | +| `beit-g-p14_3rdparty-eva_30m`\* | 1011.60 | 267.17 | [config](eva-g-p14_headless.py) | [model](https://download.openmmlab.com/mmclassification/v0/eva/eva-g-p14_3rdparty_30m_20221213-3b7aca97.pth) | +| `beit-g-p14_eva-30m-pre_3rdparty_in21k`\* | 1011.60 | 267.17 | [config](eva-g-p14_headless.py) | [model](https://download.openmmlab.com/mmclassification/v0/eva/eva-g-p14_30m-pre_3rdparty_in21k_20221213-d72285b7.pth) | + +*Models with * are converted from the [official repo](https://github.com/baaivision/EVA). The config files of these models are only for inference. We haven't reproduce the training results.* + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :-------------------------------------- | :----------------------------------------: | :--------: | :-------: | :-------: | :-------: | :--------------------------------------: | :----------------------------------------: | +| `vit-base-p16_eva-mae-style-pre_8xb128-coslr-100e_in1k` | [EVA MAE STYLE](https://download.openmmlab.com/mmselfsup/1.x/eva/eva-mae-style_vit-base-p16_16xb256-coslr-400e_in1k/eva-mae-style_vit-base-p16_16xb256-coslr-400e_in1k_20221226-26d90f07.pth) | 86.57 | 17.58 | 83.70 | N/A | [config](benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/eva/eva-mae-style_vit-base-p16_16xb256-coslr-400e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k_20221226-f61cf992.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/eva/eva-mae-style_vit-base-p16_16xb256-coslr-400e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k_20221226-f61cf992.json) | +| `vit-base-p16_eva-mae-style-pre_8xb2048-linear-coslr-100e_in1k` | [EVA MAE STYLE](https://download.openmmlab.com/mmselfsup/1.x/eva/eva-mae-style_vit-base-p16_16xb256-coslr-400e_in1k/eva-mae-style_vit-base-p16_16xb256-coslr-400e_in1k_20221226-26d90f07.pth) | 86.57 | 17.58 | 69.00 | N/A | [config](benchmarks/vit-base-p16_8xb2048-linear-coslr-100e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/eva/eva-mae-style_vit-base-p16_16xb256-coslr-400e_in1k/vit-base-p16_linear-8xb2048-coslr-100e_in1k/vit-base-p16_linear-8xb2048-coslr-100e_in1k_20221226-ef51bf09.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/eva/eva-mae-style_vit-base-p16_16xb256-coslr-400e_in1k/vit-base-p16_linear-8xb2048-coslr-100e_in1k/vit-base-p16_linear-8xb2048-coslr-100e_in1k_20221226-ef51bf09.json) | +| `beit-l-p14_eva-pre_3rdparty_in1k-196px`\* | [EVA](https://download.openmmlab.com/mmclassification/v0/eva/eva-l-p14_3rdparty-mim_in21k_20221213-3a5da50b.pth) | 304.14 | 61.57 | 87.94 | 98.5 | [config](eva-l-p14_8xb16_in1k-196px.py) | [model](https://download.openmmlab.com/mmclassification/v0/eva/eva-l-p14_mim-pre_3rdparty_in1k-196px_20221214-2adf4d28.pth) | +| `beit-l-p14_eva-in21k-pre_3rdparty_in1k-196px`\* | EVA ImageNet-21k | 304.14 | 61.57 | 88.58 | 98.65 | [config](eva-l-p14_8xb16_in1k-196px.py) | [model](https://download.openmmlab.com/mmclassification/v0/eva/eva-l-p14_mim-in21k-pre_3rdparty_in1k-196px_20221213-b730c7e7.pth) | +| `beit-l-p14_eva-pre_3rdparty_in1k-336px`\* | [EVA](https://download.openmmlab.com/mmclassification/v0/eva/eva-l-p14_3rdparty-mim_in21k_20221213-3a5da50b.pth) | 304.53 | 191.10 | 88.66 | 98.75 | [config](eva-l-p14_8xb16_in1k-336px.py) | [model](https://download.openmmlab.com/mmclassification/v0/eva/eva-l-p14_mim-pre_3rdparty_in1k-336px_20221214-07785cfd.pth) | +| `beit-l-p14_eva-in21k-pre_3rdparty_in1k-336px`\* | EVA ImageNet-21k | 304.53 | 191.10 | 89.17 | 98.86 | [config](eva-l-p14_8xb16_in1k-336px.py) | [model](https://download.openmmlab.com/mmclassification/v0/eva/eva-l-p14_mim-in21k-pre_3rdparty_in1k-336px_20221213-f25b7634.pth) | +| `beit-g-p14_eva-30m-in21k-pre_3rdparty_in1k-336px`\* | [EVA 30M ImageNet-21k](https://download.openmmlab.com/mmclassification/v0/eva/eva-g-p14_30m-pre_3rdparty_in21k_20221213-d72285b7.pth) | 1013.01 | 620.64 | 89.61 | 98.93 | [config](eva-g-p14_8xb16_in1k-336px.py) | [model](https://download.openmmlab.com/mmclassification/v0/eva/eva-g-p14_30m-in21k-pre_3rdparty_in1k-336px_20221213-210f9071.pth) | +| `beit-g-p14_eva-30m-in21k-pre_3rdparty_in1k-560px`\* | [EVA 30M ImageNet-21k](https://download.openmmlab.com/mmclassification/v0/eva/eva-g-p14_30m-pre_3rdparty_in21k_20221213-d72285b7.pth) | 1014.45 | 1906.76 | 89.71 | 98.96 | [config](eva-g-p14_8xb16_in1k-560px.py) | [model](https://download.openmmlab.com/mmclassification/v0/eva/eva-g-p14_30m-in21k-pre_3rdparty_in1k-560px_20221213-fa1c3652.pth) | + +*Models with * are converted from the [official repo](https://github.com/baaivision/EVA). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@article{EVA, + title={EVA: Exploring the Limits of Masked Visual Representation Learning at Scale}, + author={Fang, Yuxin and Wang, Wen and Xie, Binhui and Sun, Quan and Wu, Ledell and Wang, Xinggang and Huang, Tiejun and Wang, Xinlong and Cao, Yue}, + journal={arXiv preprint arXiv:2211.07636}, + year={2022} +} +``` diff --git a/configs/eva/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py b/configs/eva/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py new file mode 100644 index 0000000..e8a3f49 --- /dev/null +++ b/configs/eva/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py @@ -0,0 +1,114 @@ +_base_ = [ + '../../_base_/datasets/imagenet_bs64_swin_224.py', + '../../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../../_base_/default_runtime.py' +] + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict(pad_val=[104, 116, 124], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=0.3333333333333333, + fill_color=[103.53, 116.28, 123.675], + fill_std=[57.375, 57.12, 58.395]), + dict(type='PackInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=256, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs') +] + +train_dataloader = dict(batch_size=128, dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(batch_size=128, dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='base', + img_size=224, + patch_size=16, + drop_path_rate=0.1, + out_type='avg_featmap', + final_norm=False, + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')), + neck=None, + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + init_cfg=[dict(type='TruncNormal', layer='Linear', std=0.02)]), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) + +# optimizer wrapper +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=4e-4, weight_decay=0.05, betas=(0.9, 0.999)), + constructor='LearningRateDecayOptimWrapperConstructor', + paramwise_cfg=dict( + layer_decay_rate=0.65, + custom_keys={ + '.ln': dict(decay_mult=0.0), + '.bias': dict(decay_mult=0.0), + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=95, + by_epoch=True, + begin=5, + end=100, + eta_min=1e-6, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(by_epoch=True, max_epochs=100) +default_hooks = dict( + # save checkpoint per epoch. + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) diff --git a/configs/eva/benchmarks/vit-base-p16_8xb2048-linear-coslr-100e_in1k.py b/configs/eva/benchmarks/vit-base-p16_8xb2048-linear-coslr-100e_in1k.py new file mode 100644 index 0000000..0b7333c --- /dev/null +++ b/configs/eva/benchmarks/vit-base-p16_8xb2048-linear-coslr-100e_in1k.py @@ -0,0 +1,70 @@ +_base_ = [ + '../../_base_/datasets/imagenet_bs32_pil_resize.py', + '../../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../../_base_/default_runtime.py' +] + +train_dataloader = dict(batch_size=2048, drop_last=True) +val_dataloader = dict(drop_last=False) +test_dataloader = dict(drop_last=False) + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='base', + img_size=224, + patch_size=16, + frozen_stages=12, + out_type='cls_token', + final_norm=True, + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')), + neck=dict(type='ClsBatchNormNeck', input_features=768), + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='CrossEntropyLoss'), + init_cfg=[dict(type='TruncNormal', layer='Linear', std=0.01)]), + data_preprocessor=dict( + num_classes=1000, + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True, + )) + +# optimizer +optim_wrapper = dict( + _delete_=True, + type='AmpOptimWrapper', + optimizer=dict(type='LARS', lr=3.2, weight_decay=0.0, momentum=0.9), +) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=10, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=90, + by_epoch=True, + begin=10, + end=100, + eta_min=0.0, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(by_epoch=True, max_epochs=100) + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3), + logger=dict(type='LoggerHook', interval=10)) + +randomness = dict(seed=0, diff_rank_seed=True) diff --git a/configs/eva/eva-g-p14_8xb16_in1k-336px.py b/configs/eva/eva-g-p14_8xb16_in1k-336px.py new file mode 100644 index 0000000..aa2bd7e --- /dev/null +++ b/configs/eva/eva-g-p14_8xb16_in1k-336px.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/eva/eva-g.py', + '../_base_/datasets/imagenet_bs16_eva_336.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# model settings +model = dict(backbone=dict(img_size=336)) diff --git a/configs/eva/eva-g-p14_8xb16_in1k-560px.py b/configs/eva/eva-g-p14_8xb16_in1k-560px.py new file mode 100644 index 0000000..ed20866 --- /dev/null +++ b/configs/eva/eva-g-p14_8xb16_in1k-560px.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/eva/eva-g.py', + '../_base_/datasets/imagenet_bs16_eva_560.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# model settings +model = dict(backbone=dict(img_size=560)) diff --git a/configs/eva/eva-g-p14_headless.py b/configs/eva/eva-g-p14_headless.py new file mode 100644 index 0000000..b278ace --- /dev/null +++ b/configs/eva/eva-g-p14_headless.py @@ -0,0 +1,24 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='BEiTViT', + arch='eva-g', + img_size=224, + patch_size=14, + layer_scale_init_value=0.0, + out_type='avg_featmap', + use_abs_pos_emb=True, + use_rel_pos_bias=False, + use_shared_rel_pos_bias=False, + ), + neck=None, + head=None, +) + +data_preprocessor = dict( + # RGB format normalization parameters + mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255], + std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255], + # convert image from BGR to RGB + to_rgb=True, +) diff --git a/configs/eva/eva-g-p16_headless.py b/configs/eva/eva-g-p16_headless.py new file mode 100644 index 0000000..ca5de18 --- /dev/null +++ b/configs/eva/eva-g-p16_headless.py @@ -0,0 +1,24 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='BEiTViT', + arch='eva-g', + img_size=224, + patch_size=16, + layer_scale_init_value=0.0, + out_type='avg_featmap', + use_abs_pos_emb=True, + use_rel_pos_bias=False, + use_shared_rel_pos_bias=False, + ), + neck=None, + head=None, +) + +data_preprocessor = dict( + # RGB format normalization parameters + mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255], + std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255], + # convert image from BGR to RGB + to_rgb=True, +) diff --git a/configs/eva/eva-l-p14_8xb16_in1k-196px.py b/configs/eva/eva-l-p14_8xb16_in1k-196px.py new file mode 100644 index 0000000..3503ca5 --- /dev/null +++ b/configs/eva/eva-l-p14_8xb16_in1k-196px.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/eva/eva-l.py', + '../_base_/datasets/imagenet_bs16_eva_196.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# model settings +model = dict(backbone=dict(img_size=196)) diff --git a/configs/eva/eva-l-p14_8xb16_in1k-336px.py b/configs/eva/eva-l-p14_8xb16_in1k-336px.py new file mode 100644 index 0000000..7094df8 --- /dev/null +++ b/configs/eva/eva-l-p14_8xb16_in1k-336px.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/eva/eva-l.py', + '../_base_/datasets/imagenet_bs16_eva_336.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# model settings +model = dict(backbone=dict(img_size=336)) diff --git a/configs/eva/eva-l-p14_headless.py b/configs/eva/eva-l-p14_headless.py new file mode 100644 index 0000000..89a4ce1 --- /dev/null +++ b/configs/eva/eva-l-p14_headless.py @@ -0,0 +1,25 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='BEiTViT', + arch='l', + img_size=224, + patch_size=14, + layer_scale_init_value=0.0, + out_type='avg_featmap', + use_abs_pos_emb=True, + use_rel_pos_bias=False, + use_shared_rel_pos_bias=False, + layer_cfgs=dict(bias=True), + ), + neck=None, + head=None, +) + +data_preprocessor = dict( + # RGB format normalization parameters + mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255], + std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255], + # convert image from BGR to RGB + to_rgb=True, +) diff --git a/configs/eva/eva-mae-style_vit-base-p16_16xb256-coslr-400e_in1k.py b/configs/eva/eva-mae-style_vit-base-p16_16xb256-coslr-400e_in1k.py new file mode 100644 index 0000000..bbedb07 --- /dev/null +++ b/configs/eva/eva-mae-style_vit-base-p16_16xb256-coslr-400e_in1k.py @@ -0,0 +1,86 @@ +_base_ = [ + '../_base_/models/mae_vit-base-p16.py', + '../_base_/datasets/imagenet_bs512_mae.py', + '../_base_/default_runtime.py', +] + +# dataset settings +train_dataloader = dict(batch_size=256) + +# model settings +model = dict( + type='EVA', + backbone=dict(init_cfg=[ + dict(type='Xavier', distribution='uniform', layer='Linear'), + dict(type='Constant', layer='LayerNorm', val=1.0, bias=0.0) + ]), + neck=dict( + type='MAEPretrainDecoder', + predict_feature_dim=512, + init_cfg=[ + dict(type='Xavier', distribution='uniform', layer='Linear'), + dict(type='Constant', layer='LayerNorm', val=1.0, bias=0.0) + ]), + head=dict( + _delete_=True, + type='MIMHead', + loss=dict( + type='CosineSimilarityLoss', shift_factor=2.0, scale_factor=2.0), + ), + target_generator=dict( + type='CLIPGenerator', + tokenizer_path= # noqa + 'https://download.openmmlab.com/mmselfsup/1.x/target_generator_ckpt/clip_vit_base_16.pth.tar' # noqa + ), + init_cfg=None) + +# optimizer wrapper +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict( + type='AdamW', + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'ln': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.) + })) +find_unused_parameters = True + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=360, + by_epoch=True, + begin=40, + end=400, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=400) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +# auto resume +resume = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/eva/metafile.yml b/configs/eva/metafile.yml new file mode 100644 index 0000000..dd8dbbf --- /dev/null +++ b/configs/eva/metafile.yml @@ -0,0 +1,261 @@ +Collections: + - Name: EVA + Metadata: + Architecture: + - Attention Dropout + - Convolution + - Dense Connections + - Dropout + - GELU + - Layer Normalization + - Multi-Head Attention + - Scaled Dot-Product Attention + - Tanh Activation + Paper: + Title: 'EVA: Exploring the Limits of Masked Visual Representation Learning at + Scale' + URL: https://arxiv.org/abs/2211.07636 + README: configs/eva/README.md + Code: + URL: null + Version: null + +Models: + - Name: eva-mae-style_vit-base-p16_16xb256-coslr-400e_in1k + Metadata: + Epochs: 400 + Batch Size: 4096 + FLOPs: 17581972224 + Parameters: 111776512 + Training Data: ImageNet-1k + In Collection: EVA + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/eva/eva-mae-style_vit-base-p16_16xb256-coslr-400e_in1k/eva-mae-style_vit-base-p16_16xb256-coslr-400e_in1k_20221226-26d90f07.pth + Config: configs/eva/eva-mae-style_vit-base-p16_16xb256-coslr-400e_in1k.py + Downstream: + - vit-base-p16_eva-mae-style-pre_8xb128-coslr-100e_in1k + - vit-base-p16_eva-mae-style-pre_8xb2048-linear-coslr-100e_in1k + - Name: vit-base-p16_eva-mae-style-pre_8xb128-coslr-100e_in1k + Metadata: + Epochs: 100 + Batch Size: 1024 + FLOPs: 17581215744 + Parameters: 86566120 + Training Data: ImageNet-1k + In Collection: EVA + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.7 + Weights: https://download.openmmlab.com/mmselfsup/1.x/eva/eva-mae-style_vit-base-p16_16xb256-coslr-400e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k_20221226-f61cf992.pth + Config: configs/eva/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py + - Name: vit-base-p16_eva-mae-style-pre_8xb2048-linear-coslr-100e_in1k + Metadata: + Epochs: 100 + Batch Size: 16384 + FLOPs: 17581972992 + Parameters: 86567656 + Training Data: ImageNet-1k + In Collection: EVA + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 69.0 + Weights: https://download.openmmlab.com/mmselfsup/1.x/eva/eva-mae-style_vit-base-p16_16xb256-coslr-400e_in1k/vit-base-p16_linear-8xb2048-coslr-100e_in1k/vit-base-p16_linear-8xb2048-coslr-100e_in1k_20221226-ef51bf09.pth + Config: configs/eva/benchmarks/vit-base-p16_8xb2048-linear-coslr-100e_in1k.py + - Name: beit-l-p14_eva-pre_3rdparty_in1k-196px + Metadata: + FLOPs: 61565981696 + Parameters: 304142312 + Training Data: + - ImageNet-21k + - ImageNet-1k + In Collection: EVA + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 87.94 + Top 5 Accuracy: 98.5 + Weights: https://download.openmmlab.com/mmclassification/v0/eva/eva-l-p14_mim-pre_3rdparty_in1k-196px_20221214-2adf4d28.pth + Config: configs/eva/eva-l-p14_8xb16_in1k-196px.py + Converted From: + Weights: https://huggingface.co/BAAI/EVA/blob/main/eva_l_psz14_196px_1k_ft_88p0.pt + Code: https://github.com/baaivision/EVA + - Name: beit-l-p14_eva-in21k-pre_3rdparty_in1k-196px + Metadata: + FLOPs: 61565981696 + Parameters: 304142312 + Training Data: + - ImageNet-21k + - ImageNet-1k + In Collection: EVA + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 88.58 + Top 5 Accuracy: 98.65 + Weights: https://download.openmmlab.com/mmclassification/v0/eva/eva-l-p14_mim-in21k-pre_3rdparty_in1k-196px_20221213-b730c7e7.pth + Config: configs/eva/eva-l-p14_8xb16_in1k-196px.py + Converted From: + Weights: https://huggingface.co/BAAI/EVA/blob/main/eva_l_psz14_196px_21k_to_1k_ft_88p6.pt + Code: https://github.com/baaivision/EVA + - Name: beit-l-p14_3rdparty-eva_in21k + Metadata: + FLOPs: 81075147776 + Parameters: 303178752 + Training Data: + - ImageNet-21k + In Collection: EVA + Results: null + Weights: https://download.openmmlab.com/mmclassification/v0/eva/eva-l-p14_3rdparty-mim_in21k_20221213-3a5da50b.pth + Config: configs/eva/eva-l-p14_headless.py + Converted From: + Weights: https://huggingface.co/BAAI/EVA/blob/main/eva_l_psz14.pt + Code: https://github.com/baaivision/EVA + Downstream: + - beit-l-p14_eva-pre_3rdparty_in21k + - beit-l-p14_eva-pre_3rdparty_in1k-336px + - beit-l-p14_eva-pre_3rdparty_in1k-196px + - Name: beit-l-p14_eva-pre_3rdparty_in21k + Metadata: + FLOPs: 81075147776 + Parameters: 303178752 + Training Data: + - ImageNet-21k + In Collection: EVA + Results: null + Weights: https://download.openmmlab.com/mmclassification/v0/eva/eva-l-p14_mim-pre_3rdparty_in21k_20221213-8f194fa2.pth + Config: configs/eva/eva-l-p14_headless.py + Converted From: + Weights: https://huggingface.co/BAAI/EVA/blob/main/eva_l_psz14_21k_ft.pt + Code: https://github.com/baaivision/EVA + - Name: beit-l-p14_eva-pre_3rdparty_in1k-336px + Metadata: + FLOPs: 191100916736 + Parameters: 304531432 + Training Data: + - ImageNet-21k + - ImageNet-1k + In Collection: EVA + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 88.66 + Top 5 Accuracy: 98.75 + Weights: https://download.openmmlab.com/mmclassification/v0/eva/eva-l-p14_mim-pre_3rdparty_in1k-336px_20221214-07785cfd.pth + Config: configs/eva/eva-l-p14_8xb16_in1k-336px.py + Converted From: + Weights: https://huggingface.co/BAAI/EVA/blob/main/eva_l_psz14_336px_1k_ft_88p65.pt + Code: https://github.com/baaivision/EVA + Downstream: + - beit-l-p14_eva-in21k-pre_3rdparty_in1k-336px + - beit-l-p14_eva-in21k-pre_3rdparty_in1k-196px + - Name: beit-l-p14_eva-in21k-pre_3rdparty_in1k-336px + Metadata: + FLOPs: 191100916736 + Parameters: 304531432 + Training Data: + - ImageNet-21k + - ImageNet-1k + In Collection: EVA + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 89.17 + Top 5 Accuracy: 98.86 + Weights: https://download.openmmlab.com/mmclassification/v0/eva/eva-l-p14_mim-in21k-pre_3rdparty_in1k-336px_20221213-f25b7634.pth + Config: configs/eva/eva-l-p14_8xb16_in1k-336px.py + Converted From: + Weights: https://huggingface.co/BAAI/EVA/blob/main/eva_l_psz14_336px_21k_to_1k_ft_89p2.pt + Code: https://github.com/baaivision/EVA + - Name: beit-g-p16_3rdparty-eva_30m + Metadata: + FLOPs: 203517463424 + Parameters: 1011315072 + Training Data: + - merged-30M + In Collection: EVA + Results: null + Weights: https://download.openmmlab.com/mmclassification/v0/eva/eva-g-p16_3rdparty_30m_20221213-7bed23ee.pth + Config: configs/eva/eva-g-p16_headless.py + Converted From: + Weights: https://huggingface.co/BAAI/EVA/blob/main/eva_psz14to16.pt + Code: https://github.com/baaivision/EVA + - Name: beit-g-p14_3rdparty-eva_30m + Metadata: + FLOPs: 267174833024 + Parameters: 1011596672 + Training Data: + - merged-30M + In Collection: EVA + Results: null + Weights: https://download.openmmlab.com/mmclassification/v0/eva/eva-g-p14_3rdparty_30m_20221213-3b7aca97.pth + Config: configs/eva/eva-g-p14_headless.py + Converted From: + Weights: https://huggingface.co/BAAI/EVA/blob/main/eva_psz14.pt + Code: https://github.com/baaivision/EVA + Downstream: + - beit-g-p14_eva-30m-pre_3rdparty_in21k + - Name: beit-g-p14_eva-30m-pre_3rdparty_in21k + Metadata: + FLOPs: 267174833024 + Parameters: 1011596672 + Training Data: + - merged-30M + - ImageNet-21k + In Collection: EVA + Results: null + Weights: https://download.openmmlab.com/mmclassification/v0/eva/eva-g-p14_30m-pre_3rdparty_in21k_20221213-d72285b7.pth + Config: configs/eva/eva-g-p14_headless.py + Converted From: + Weights: https://huggingface.co/BAAI/EVA/blob/main/eva_21k_224px_psz14.pt + Code: https://github.com/baaivision/EVA + Downstream: + - beit-g-p14_eva-30m-in21k-pre_3rdparty_in1k-336px + - beit-g-p14_eva-30m-in21k-pre_3rdparty_in1k-560px + - Name: beit-g-p14_eva-30m-in21k-pre_3rdparty_in1k-336px + Metadata: + FLOPs: 620642757504 + Parameters: 1013005672 + Training Data: + - merged-30M + - ImageNet-21k + - ImageNet-1k + In Collection: EVA + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 89.61 + Top 5 Accuracy: 98.93 + Weights: https://download.openmmlab.com/mmclassification/v0/eva/eva-g-p14_30m-in21k-pre_3rdparty_in1k-336px_20221213-210f9071.pth + Config: configs/eva/eva-g-p14_8xb16_in1k-336px.py + Converted From: + Weights: https://huggingface.co/BAAI/EVA/blob/main/eva_21k_1k_336px_psz14_ema_89p6.pt + Code: https://github.com/baaivision/EVA + - Name: beit-g-p14_eva-30m-in21k-pre_3rdparty_in1k-560px + Metadata: + FLOPs: 1906761591680 + Parameters: 1014447464 + Training Data: + - merged-30M + - ImageNet-21k + - ImageNet-1k + In Collection: EVA + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 89.71 + Top 5 Accuracy: 98.96 + Weights: https://download.openmmlab.com/mmclassification/v0/eva/eva-g-p14_30m-in21k-pre_3rdparty_in1k-560px_20221213-fa1c3652.pth + Config: configs/eva/eva-g-p14_8xb16_in1k-560px.py + Converted From: + Weights: https://huggingface.co/BAAI/EVA/blob/main/eva_21k_1k_560px_psz14_ema_89p7.pt + Code: https://github.com/baaivision/EVA diff --git a/configs/eva02/README.md b/configs/eva02/README.md new file mode 100644 index 0000000..bc8f64e --- /dev/null +++ b/configs/eva02/README.md @@ -0,0 +1,109 @@ +# EVA-02 + +> [EVA-02: A Visual Representation for Neon Genesis](https://arxiv.org/abs/2303.11331) + + + +## Abstract + +We launch EVA-02, a next-generation Transformer-based visual representation pre-trained to reconstruct strong and robust language-aligned vision features via masked image modeling. With an updated plain Transformer architecture as well as extensive pre-training from an open & accessible giant CLIP vision encoder, EVA-02 demonstrates superior performance compared to prior state-of-the-art approaches across various representative vision tasks, while utilizing significantly fewer parameters and compute budgets. Notably, using exclusively publicly accessible training data, EVA-02 with only 304M parameters achieves a phenomenal 90.0 fine-tuning top-1 accuracy on ImageNet-1K val set. Additionally, our EVA-02-CLIP can reach up to 80.4 zero-shot top-1 on ImageNet-1K, outperforming the previous largest & best open-sourced CLIP with only ~1/6 parameters and ~1/6 image-text training data. We offer four EVA-02 variants in various model sizes, ranging from 6M to 304M parameters, all with impressive performance. To facilitate open accessand open research, we release the complete suite of EVA-02 to the community. + +
+TrV builds upon the original plain ViT architecture and includes several enhancements: SwinGLU FFN, sub-LN, 2D RoPE, and JAX weight initialization. To keep the parameter & FLOPs consistent with the baseline, the FFN hidden dim of SwiGLU is 2/3× of the typical MLP counterpart. +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('vit-tiny-p14_eva02-in21k-pre_3rdparty_in1k-336px', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('vit-tiny-p14_eva02-in21k-pre_3rdparty_in1k-336px', pretrained=True) +inputs = torch.rand(1, 3, 336, 336) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/eva02/eva02-tiny-p14_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/eva02/eva02-tiny-p14_in1k.py /path/to/eva02-tiny-p14_in1k.pth +``` + + + +## Models and results + +### Pretrained models + +| Model | Params (M) | Flops (G) | Config | Download | +| :-------------------------------- | :--------: | :-------: | :-----------------------------------: | :-----------------------------------------------------------------------------------------------------------: | +| `vit-tiny-p14_eva02-pre_in21k`\* | 5.50 | 1.70 | [config](eva02-tiny-p14_headless.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/eva02/eva02-tiny-p14_pre_in21k_20230505-d703e7b1.pth) | +| `vit-small-p14_eva02-pre_in21k`\* | 21.62 | 6.14 | [config](eva02-small-p14_headless.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/eva02/eva02-small-p14_pre_in21k_20230505-3175f463.pth) | +| `vit-base-p14_eva02-pre_in21k`\* | 85.77 | 23.22 | [config](eva02-base-p14_headless.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/eva02/eva02-base-p14_pre_in21k_20230505-2f2d4d3c.pth) | +| `vit-large-p14_eva02-pre_in21k`\* | 303.29 | 81.15 | [config](eva02-large-p14_headless.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/eva02/eva02-large-p14_pre_in21k_20230505-9072de5d.pth) | +| `vit-large-p14_eva02-pre_m38m`\* | 303.29 | 81.15 | [config](eva02-large-p14_headless.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/eva02/eva02-large-p14_pre_m38m_20230505-b8a1a261.pth) | + +- The input size / patch size of MIM pre-trained EVA-02 is `224x224` / `14x14`. + +*Models with * are converted from the [official repo](https://github.com/baaivision/EVA).* + +### Image Classification on ImageNet-1k + +#### (*w/o* IN-21K intermediate fine-tuning) + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :---------------------------------------------------- | :----------------: | :--------: | :-------: | :-------: | :-------: | :---------------------------------: | :-------------------------------------------------------: | +| `vit-tiny-p14_eva02-in21k-pre_3rdparty_in1k-336px`\* | EVA02 ImageNet-21k | 5.76 | 4.68 | 80.69 | 95.54 | [config](./eva02-tiny-p14_in1k.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/eva02/eva02-tiny-p14_in21k-pre_3rdparty_in1k-336px_20230505-a4e8708a.pth) | +| `vit-small-p14_eva02-in21k-pre_3rdparty_in1k-336px`\* | EVA02 ImageNet-21k | 22.13 | 15.48 | 85.78 | 97.60 | [config](./eva02-small-p14_in1k.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/eva02/eva02-small-p14_in21k-pre_3rdparty_in1k-336px_20230505-9c5b0e85.pth) | +| `vit-base-p14_eva02-in21k-pre_3rdparty_in1k-448px`\* | EVA02 ImageNet-21k | 87.13 | 107.11 | 88.29 | 98.53 | [config](./eva02-base-p14_in1k.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/eva02/eva02-base-p14_in21k-pre_3rdparty_in1k-448px_20230505-8ad211c5.pth) | + +*Models with * are converted from the [official repo](https://github.com/baaivision/EVA/tree/master/EVA-02). The config files of these models are only for inference. We haven't reproduce the training results.* + +#### (*w* IN-21K intermediate fine-tuning) + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :---------------------------------------------------- | :----------------: | :--------: | :-------: | :-------: | :-------: | :---------------------------------: | :-------------------------------------------------------: | +| `vit-base-p14_eva02-in21k-pre_in21k-medft_3rdparty_in1k-448px`\* | EVA02 ImageNet-21k | 87.13 | 107.11 | 88.47 | 98.62 | [config](./eva02-base-p14_in1k.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/eva02/eva02-base-p14_in21k-pre_in21k-medft_3rdparty_in1k-448px_20230505-5cd4d87f.pth) | +| `vit-large-p14_eva02-in21k-pre_in21k-medft_3rdparty_in1k-448px`\* | EVA02 ImageNet-21k | 305.08 | 362.33 | 89.65 | 98.95 | [config](./eva02-large-p14_in1k.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/eva02/eva02-large-p14_in21k-pre_in21k-medft_3rdparty_in1k-448px_20230505-926d1599.pth) | +| `vit-large-p14_eva02_m38m-pre_in21k-medft_3rdparty_in1k-448px`\* | EVA02 Merged-38M | 305.10 | 362.33 | 89.83 | 99.00 | [config](./eva02-large-p14_in1k.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/eva02/eva02-large-p14_m38m-pre_in21k-medft_3rdparty_in1k-448px_20230505-150dc5ed.pth) | + +*Models with * are converted from the [official repo](https://github.com/baaivision/EVA/tree/master/EVA-02). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@article{EVA-02, + title={EVA-02: A Visual Representation for Neon Genesis}, + author={Yuxin Fang and Quan Sun and Xinggang Wang and Tiejun Huang and Xinlong Wang and Yue Cao}, + journal={arXiv preprint arXiv:2303.11331}, + year={2023} +} +``` diff --git a/configs/eva02/eva02-base-p14_headless.py b/configs/eva02/eva02-base-p14_headless.py new file mode 100644 index 0000000..27aa8f8 --- /dev/null +++ b/configs/eva02/eva02-base-p14_headless.py @@ -0,0 +1,21 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='ViTEVA02', + arch='b', + img_size=224, + patch_size=14, + sub_ln=True, + final_norm=False, + out_type='avg_featmap'), + neck=None, + head=None, +) + +data_preprocessor = dict( + # RGB format normalization parameters + mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255], + std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255], + # convert image from BGR to RGB + to_rgb=True, +) diff --git a/configs/eva02/eva02-base-p14_in1k.py b/configs/eva02/eva02-base-p14_in1k.py new file mode 100644 index 0000000..c8400d3 --- /dev/null +++ b/configs/eva02/eva02-base-p14_in1k.py @@ -0,0 +1,32 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs16_eva_448.py', + '../_base_/schedules/imagenet_bs2048_AdamW.py', + '../_base_/default_runtime.py' +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='ViTEVA02', + arch='b', + img_size=448, + patch_size=14, + sub_ln=True, + final_norm=False, + out_type='avg_featmap'), + neck=None, + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/eva02/eva02-large-p14_headless.py b/configs/eva02/eva02-large-p14_headless.py new file mode 100644 index 0000000..e101ac9 --- /dev/null +++ b/configs/eva02/eva02-large-p14_headless.py @@ -0,0 +1,21 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='ViTEVA02', + arch='l', + img_size=224, + patch_size=14, + sub_ln=True, + final_norm=False, + out_type='avg_featmap'), + neck=None, + head=None, +) + +data_preprocessor = dict( + # RGB format normalization parameters + mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255], + std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255], + # convert image from BGR to RGB + to_rgb=True, +) diff --git a/configs/eva02/eva02-large-p14_in1k.py b/configs/eva02/eva02-large-p14_in1k.py new file mode 100644 index 0000000..91a4277 --- /dev/null +++ b/configs/eva02/eva02-large-p14_in1k.py @@ -0,0 +1,32 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs16_eva_448.py', + '../_base_/schedules/imagenet_bs2048_AdamW.py', + '../_base_/default_runtime.py' +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='ViTEVA02', + arch='l', + img_size=448, + patch_size=14, + sub_ln=True, + final_norm=False, + out_type='avg_featmap'), + neck=None, + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/eva02/eva02-small-p14_headless.py b/configs/eva02/eva02-small-p14_headless.py new file mode 100644 index 0000000..a969819 --- /dev/null +++ b/configs/eva02/eva02-small-p14_headless.py @@ -0,0 +1,20 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='ViTEVA02', + arch='s', + img_size=224, + patch_size=14, + final_norm=False, + out_type='avg_featmap'), + neck=None, + head=None, +) + +data_preprocessor = dict( + # RGB format normalization parameters + mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255], + std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255], + # convert image from BGR to RGB + to_rgb=True, +) diff --git a/configs/eva02/eva02-small-p14_in1k.py b/configs/eva02/eva02-small-p14_in1k.py new file mode 100644 index 0000000..4a16d92 --- /dev/null +++ b/configs/eva02/eva02-small-p14_in1k.py @@ -0,0 +1,31 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs16_eva_336.py', + '../_base_/schedules/imagenet_bs2048_AdamW.py', + '../_base_/default_runtime.py' +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='ViTEVA02', + arch='s', + img_size=336, + patch_size=14, + final_norm=False, + out_type='avg_featmap'), + neck=None, + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=384, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/eva02/eva02-tiny-p14_headless.py b/configs/eva02/eva02-tiny-p14_headless.py new file mode 100644 index 0000000..783d0ea --- /dev/null +++ b/configs/eva02/eva02-tiny-p14_headless.py @@ -0,0 +1,20 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='ViTEVA02', + arch='t', + img_size=224, + patch_size=14, + final_norm=False, + out_type='avg_featmap'), + neck=None, + head=None, +) + +data_preprocessor = dict( + # RGB format normalization parameters + mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255], + std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255], + # convert image from BGR to RGB + to_rgb=True, +) diff --git a/configs/eva02/eva02-tiny-p14_in1k.py b/configs/eva02/eva02-tiny-p14_in1k.py new file mode 100644 index 0000000..84e68d7 --- /dev/null +++ b/configs/eva02/eva02-tiny-p14_in1k.py @@ -0,0 +1,31 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs16_eva_336.py', + '../_base_/schedules/imagenet_bs2048_AdamW.py', + '../_base_/default_runtime.py' +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='ViTEVA02', + arch='t', + img_size=336, + patch_size=14, + final_norm=False, + out_type='avg_featmap'), + neck=None, + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=192, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) diff --git a/configs/eva02/metafile.yml b/configs/eva02/metafile.yml new file mode 100644 index 0000000..80acf90 --- /dev/null +++ b/configs/eva02/metafile.yml @@ -0,0 +1,199 @@ +Collections: + - Name: EVA02 + Metadata: + Architecture: + - Rotary Position Embedding + - Sub Layer Normalization + - SwiGLU + Paper: + Title: 'EVA-02: A Visual Representation for Neon Genesis' + URL: https://arxiv.org/abs/2303.11331 + README: configs/eva02/README.md + +Models: + - Name: vit-tiny-p14_eva02-pre_in21k + Metadata: + FLOPs: 1703439360 + Parameters: 5504064 + Training Data: + - ImageNet-21k + In Collection: EVA02 + Weights: https://download.openmmlab.com/mmpretrain/v1.0/eva02/eva02-tiny-p14_pre_in21k_20230505-d703e7b1.pth + Config: configs/eva02/eva02-tiny-p14_headless.py + Converted From: + Weights: https://huggingface.co/Yuxin-CV/EVA-02/blob/main/eva02/pt/eva02_Ti_pt_in21k_p14.pt + Code: https://github.com/baaivision/EVA/tree/master/EVA-02 + Downstream: + - vit-tiny-p14_eva02-in21k-pre_3rdparty_in1k-336px + - Name: vit-tiny-p14_eva02-in21k-pre_3rdparty_in1k-336px + Metadata: + FLOPs: 4675416000 + Parameters: 5758888 + Training Data: + - ImageNet-21k + - ImageNet-1k + In Collection: EVA02 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 80.69 + Top 5 Accuracy: 95.54 + Weights: https://download.openmmlab.com/mmpretrain/v1.0/eva02/eva02-tiny-p14_in21k-pre_3rdparty_in1k-336px_20230505-a4e8708a.pth + Config: configs/eva02/eva02-tiny-p14_in1k.py + Converted From: + Weights: https://huggingface.co/Yuxin-CV/EVA-02/blob/main/eva02/cls/in1k/eva02_Ti_pt_in21k_ft_in1k_p14.pt + Code: https://github.com/baaivision/EVA/tree/master/EVA-02 + - Name: vit-small-p14_eva02-pre_in21k + Metadata: + FLOPs: 6135404544 + Parameters: 21624960 + Training Data: + - ImageNet-21k + In Collection: EVA02 + Weights: https://download.openmmlab.com/mmpretrain/v1.0/eva02/eva02-small-p14_pre_in21k_20230505-3175f463.pth + Config: configs/eva02/eva02-small-p14_headless.py + Converted From: + Weights: https://huggingface.co/Yuxin-CV/EVA-02/blob/main/eva02/pt/eva02_S_pt_in21k_p14.pt + Code: https://github.com/baaivision/EVA/tree/master/EVA-02 + Downstream: + - vit-small-p14_eva02-in21k-pre_3rdparty_in1k-336px + - Name: vit-small-p14_eva02-in21k-pre_3rdparty_in1k-336px + Metadata: + FLOPs: 15476744064 + Parameters: 22133608 + Training Data: + - ImageNet-21k + - ImageNet-1k + In Collection: EVA02 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 85.78 + Top 5 Accuracy: 97.60 + Weights: https://download.openmmlab.com/mmpretrain/v1.0/eva02/eva02-small-p14_in21k-pre_3rdparty_in1k-336px_20230505-9c5b0e85.pth + Config: configs/eva02/eva02-small-p14_in1k.py + Converted From: + Weights: https://huggingface.co/Yuxin-CV/EVA-02/blob/main/eva02/cls/in1k/eva02_S_pt_in21k_ft_in1k_p14.pt + Code: https://github.com/baaivision/EVA/tree/master/EVA-02 + - Name: vit-base-p14_eva02-pre_in21k + Metadata: + FLOPs: 23216492544 + Parameters: 85766400 + Training Data: + - ImageNet-21k + In Collection: EVA02 + Weights: https://download.openmmlab.com/mmpretrain/v1.0/eva02/eva02-base-p14_pre_in21k_20230505-2f2d4d3c.pth + Config: configs/eva02/eva02-base-p14_headless.py + Converted From: + Weights: https://huggingface.co/Yuxin-CV/EVA-02/blob/main/eva02/pt/eva02_B_pt_in21k_p14.pt + Code: https://github.com/baaivision/EVA/tree/master/EVA-02 + Downstream: + - vit-base-p14_eva02-in21k-pre_3rdparty_in1k-448px + - vit-base-p14_eva02-in21k-pre_in21k-medft_3rdparty_in1k-448px + - Name: vit-base-p14_eva02-in21k-pre_3rdparty_in1k-448px + Metadata: + FLOPs: 107105984256 + Parameters: 87126760 + Training Data: + - ImageNet-21k + - ImageNet-1k + In Collection: EVA02 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 88.29 + Top 5 Accuracy: 98.53 + Weights: https://download.openmmlab.com/mmpretrain/v1.0/eva02/eva02-base-p14_in21k-pre_3rdparty_in1k-448px_20230505-8ad211c5.pth + Config: configs/eva02/eva02-base-p14_in1k.py + Converted From: + Weights: https://huggingface.co/Yuxin-CV/EVA-02/blob/main/eva02/cls/in1k/eva02_B_pt_in21k_ft_in1k_p14.pt + Code: https://github.com/baaivision/EVA/tree/master/EVA-02 + - Name: vit-base-p14_eva02-in21k-pre_in21k-medft_3rdparty_in1k-448px + Metadata: + FLOPs: 107105984256 + Parameters: 87126760 + Training Data: + - ImageNet-21k + - ImageNet-1k + In Collection: EVA02 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 88.47 + Top 5 Accuracy: 98.62 + Weights: https://download.openmmlab.com/mmpretrain/v1.0/eva02/eva02-base-p14_in21k-pre_in21k-medft_3rdparty_in1k-448px_20230505-5cd4d87f.pth + Config: configs/eva02/eva02-base-p14_in1k.py + Converted From: + Weights: https://huggingface.co/Yuxin-CV/EVA-02/blob/main/eva02/cls/in21k/eva02_B_pt_in21k_medft_in21k_p14.pt + Code: https://github.com/baaivision/EVA/tree/master/EVA-02 + - Name: vit-large-p14_eva02-pre_in21k + Metadata: + FLOPs: 81146703792 + Parameters: 303291328 + Training Data: + - ImageNet-21k + In Collection: EVA02 + Weights: https://download.openmmlab.com/mmpretrain/v1.0/eva02/eva02-large-p14_pre_in21k_20230505-9072de5d.pth + Config: configs/eva02/eva02-large-p14_headless.py + Converted From: + Weights: https://huggingface.co/Yuxin-CV/EVA-02/blob/main/eva02/pt/eva02_L_pt_in21k_p14.pt + Code: https://github.com/baaivision/EVA/tree/master/EVA-02 + Downstream: + - vit-large-p14_eva02-in21k-pre_in21k-medft_3rdparty_in1k-448px + - Name: vit-large-p14_eva02-in21k-pre_in21k-medft_3rdparty_in1k-448px + Metadata: + FLOPs: 362333836208 + Parameters: 305104808 + Training Data: + - ImageNet-21k + - ImageNet-1k + In Collection: EVA02 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 89.65 + Top 5 Accuracy: 98.95 + Weights: https://download.openmmlab.com/mmpretrain/v1.0/eva02/eva02-large-p14_in21k-pre_in21k-medft_3rdparty_in1k-448px_20230505-926d1599.pth + Config: configs/eva02/eva02-large-p14_in1k.py + Converted From: + Weights: https://huggingface.co/Yuxin-CV/EVA-02/blob/main/eva02/cls/in21k/eva02_L_pt_in21k_medft_in21k_p14.pt + Code: https://github.com/baaivision/EVA/tree/master/EVA-02 + - Name: vit-large-p14_eva02-pre_m38m + Metadata: + FLOPs: 81146703792 + Parameters: 303291328 + Training Data: + - Merged-38M + In Collection: EVA02 + Weights: https://download.openmmlab.com/mmpretrain/v1.0/eva02/eva02-large-p14_pre_m38m_20230505-b8a1a261.pth + Config: configs/eva02/eva02-large-p14_headless.py + Converted From: + Weights: https://huggingface.co/Yuxin-CV/EVA-02/blob/main/eva02/pt/eva02_L_pt_m38m_p14.pt + Code: https://github.com/baaivision/EVA/tree/master/EVA-02 + Downstream: + - vit-large-p14_eva02_m38m-pre_in21k-medft_3rdparty_in1k-448px + - Name: vit-large-p14_eva02_m38m-pre_in21k-medft_3rdparty_in1k-448px + Metadata: + FLOPs: 362333836208 + Parameters: 305104808 + Training Data: + - Merged-38M + - ImageNet-21k + - ImageNet-1k + In Collection: EVA02 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 89.83 + Top 5 Accuracy: 99.00 + Weights: https://download.openmmlab.com/mmpretrain/v1.0/eva02/eva02-large-p14_m38m-pre_in21k-medft_3rdparty_in1k-448px_20230505-150dc5ed.pth + Config: configs/eva02/eva02-large-p14_in1k.py + Converted From: + Weights: https://huggingface.co/Yuxin-CV/EVA-02/blob/main/eva02/cls/in21k/eva02_L_pt_m38m_medft_in21k_p14.pt + Code: https://github.com/baaivision/EVA/tree/master/EVA-02 diff --git a/configs/flamingo/README.md b/configs/flamingo/README.md new file mode 100644 index 0000000..60c6af0 --- /dev/null +++ b/configs/flamingo/README.md @@ -0,0 +1,82 @@ +# Flamingo + +> [Flamingo: a Visual Language Model for Few-Shot Learning](https://arxiv.org/abs/2204.14198) + + + +## Abstract + +Building models that can be rapidly adapted to novel tasks using only a handful of annotated examples is an open challenge for multimodal machine learning research. We introduce Flamingo, a family of Visual Language Models (VLM) with this ability. We propose key architectural innovations to: (i) bridge powerful pretrained vision-only and language-only models, (ii) handle sequences of arbitrarily interleaved visual and textual data, and (iii) seamlessly ingest images or videos as inputs. Thanks to their flexibility, Flamingo models can be trained on large-scale multimodal web corpora containing arbitrarily interleaved text and images, which is key to endow them with in-context few-shot learning capabilities. We perform a thorough evaluation of our models, exploring and measuring their ability to rapidly adapt to a variety of image and video tasks. These include open-ended tasks such as visual question-answering, where the model is prompted with a question which it has to answer; captioning tasks, which evaluate the ability to describe a scene or an event; and close-ended tasks such as multiple-choice visual question-answering. For tasks lying anywhere on this spectrum, a single Flamingo model can achieve a new state of the art with few-shot learning, simply by prompting the model with task-specific examples. On numerous benchmarks, Flamingo outperforms models fine-tuned on thousands of times more task-specific data. + +
+ +
+ +## How to use it? + + + +**Use the model** + +```python +from mmpretrain import inference_model + +result = inference_model('flamingo_3rdparty-zeroshot_caption', 'demo/cat-dog.png') +print(result) +# {'pred_caption': 'A dog and a cat are looking at each other. '} +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/flamingo/flamingo_zeroshot_caption.py https://download.openmmlab.com/mmclassification/v1/flamingo/openflamingo-9b-adapter_20230505-554310c8.pth +``` + + + +## Models and results + +### Image Caption on COCO + +| Model | Params (G) | CIDER | Config | Download | +| :------------------------------------- | :--------: | :---: | :------------------------------------: | :-----------------------------------------------------------------------------------------------------------: | +| `flamingo_3rdparty-zeroshot_caption`\* | 8.220 | 65.50 | [config](flamingo_zeroshot_caption.py) | [model](https://download.openmmlab.com/mmclassification/v1/flamingo/openflamingo-9b-adapter_20230505-554310c8.pth) | + +*Models with * are converted from the [openflamingo](https://github.com/mlfoundations/open_flamingo). The config files of these models are only for inference. We haven't reproduce the training results.* + +### Visual Question Answering on VQAv2 + +| Model | Params (G) | Accuracy | Config | Download | +| :--------------------------------- | :--------: | :------: | :--------------------------------: | :----------------------------------------------------------------------------------------------------------------: | +| `flamingo_3rdparty-zeroshot_vqa`\* | 8.22 | 43.50 | [config](flamingo_zeroshot_vqa.py) | [model](https://download.openmmlab.com/mmclassification/v1/flamingo/openflamingo-9b-adapter_20230505-554310c8.pth) | + +*Models with * are converted from the [openflamingo](https://github.com/mlfoundations/open_flamingo). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@article{Alayrac2022FlamingoAV, + title={Flamingo: a Visual Language Model for Few-Shot Learning}, + author={Jean-Baptiste Alayrac and Jeff Donahue and Pauline Luc and Antoine Miech and Iain Barr and Yana Hasson and Karel Lenc and Arthur Mensch and Katie Millican and Malcolm Reynolds and Roman Ring and Eliza Rutherford and Serkan Cabi and Tengda Han and Zhitao Gong and Sina Samangooei and Marianne Monteiro and Jacob Menick and Sebastian Borgeaud and Andy Brock and Aida Nematzadeh and Sahand Sharifzadeh and Mikolaj Binkowski and Ricardo Barreira and Oriol Vinyals and Andrew Zisserman and Karen Simonyan}, + journal={ArXiv}, + year={2022}, + volume={abs/2204.14198} +} +``` + +```bibtex +@software{anas_awadalla_2023_7733589, + author = {Awadalla, Anas and Gao, Irena and Gardner, Joshua and Hessel, Jack and Hanafy, Yusuf and Zhu, Wanrong and Marathe, Kalyani and Bitton, Yonatan and Gadre, Samir and Jitsev, Jenia and Kornblith, Simon and Koh, Pang Wei and Ilharco, Gabriel and Wortsman, Mitchell and Schmidt, Ludwig}, + title = {OpenFlamingo}, + month = mar, + year = 2023, + publisher = {Zenodo}, + version = {v0.1.1}, + doi = {10.5281/zenodo.7733589}, + url = {https://doi.org/10.5281/zenodo.7733589} +} +``` diff --git a/configs/flamingo/flamingo_fewshot_caption.py b/configs/flamingo/flamingo_fewshot_caption.py new file mode 100644 index 0000000..d6f9c2b --- /dev/null +++ b/configs/flamingo/flamingo_fewshot_caption.py @@ -0,0 +1,95 @@ +_base_ = [ + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='Flamingo', + tokenizer=dict( + type='LlamaTokenizer', name_or_path='decapoda-research/llama-7b-hf'), + vision_encoder=dict( + type='VisionTransformer', + arch='l', + patch_size=14, + pre_norm=True, + norm_cfg=dict(type='LN', eps=1e-5), + layer_cfgs=dict(act_cfg=dict(type='QuickGELU')), + final_norm=False, + out_type='raw', + pretrained=( + 'https://download.openmmlab.com/mmclassification/v0/clip/' + 'vit-large-p14_clip-openai-pre_3rdparty_20230517-95e2af0b.pth'), + ), + lang_encoder=dict( + base=dict( + type='AutoModelForCausalLM', + name_or_path='decapoda-research/llama-7b-hf', + local_files_only=True), + adapter=dict( + type='FlamingoLMAdapter', + vis_hidden_size=1024, + cross_attn_every_n_layers=4, + use_media_placement_augmentation=False), + ), + task='caption', + shot_prompt_tmpl='Output:{caption}<|endofchunk|>', + final_prompt_tmpl='Output:', + generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0)) + +# data settings +data_preprocessor = dict( + mean=[122.770938, 116.7460125, 104.09373615], + std=[68.5005327, 66.6321579, 70.32316305], + to_rgb=True, +) + +test_pipeline = [ + dict( + type='ApplyToList', + # Flamingo requires to load multiple images during few-shot inference. + scatter_key='img_path', + transforms=[ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=224, + interpolation='bicubic', + backend='pillow'), + dict(type='CenterCrop', crop_size=(224, 224)), + ], + collate_keys=['img', 'scale_factor', 'ori_shape'], + ), + dict( + type='PackInputs', + algorithm_keys=['gt_caption', 'shots'], + meta_keys=['image_id']), +] + +val_dataloader = dict( + batch_size=8, + num_workers=8, + dataset=dict( + type='FlamingoEvalCOCOCaption', + data_root='data/coco', + ann_file='annotations/captions_train2014.json', + data_prefix=dict(img_path='train2014'), + pipeline=test_pipeline, + num_shots=2, + num_support_examples=2048, + num_query_examples=5000, + ), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, +) + +val_evaluator = dict( + type='COCOCaption', + ann_file='data/coco/annotations/captions_train2014.json') + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator + +# schedule settings +val_cfg = dict() +test_cfg = dict() diff --git a/configs/flamingo/flamingo_fewshot_vqa.py b/configs/flamingo/flamingo_fewshot_vqa.py new file mode 100644 index 0000000..b85a698 --- /dev/null +++ b/configs/flamingo/flamingo_fewshot_vqa.py @@ -0,0 +1,109 @@ +_base_ = [ + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='Flamingo', + tokenizer=dict( + type='LlamaTokenizer', name_or_path='decapoda-research/llama-7b-hf'), + vision_encoder=dict( + type='VisionTransformer', + arch='l', + patch_size=14, + pre_norm=True, + norm_cfg=dict(type='LN', eps=1e-5), + layer_cfgs=dict(act_cfg=dict(type='QuickGELU')), + final_norm=False, + out_type='raw', + pretrained=( + 'https://download.openmmlab.com/mmclassification/v0/clip/' + 'vit-large-p14_clip-openai-pre_3rdparty_20230517-95e2af0b.pth'), + ), + lang_encoder=dict( + base=dict( + type='AutoModelForCausalLM', + name_or_path='decapoda-research/llama-7b-hf', + local_files_only=True), + adapter=dict( + type='FlamingoLMAdapter', + vis_hidden_size=1024, + cross_attn_every_n_layers=4, + use_media_placement_augmentation=False), + ), + task='vqa', + shot_prompt_tmpl= + 'Question:{question} Short Answer:{answer}<|endofchunk|>', + final_prompt_tmpl='Question:{question} Short Answer:', + generation_cfg=dict(num_beams=3, max_new_tokens=5, length_penalty=-2.0)) + +# data settings +data_preprocessor = dict( + mean=[122.770938, 116.7460125, 104.09373615], + std=[68.5005327, 66.6321579, 70.32316305], + to_rgb=True, +) + +test_pipeline = [ + dict( + type='ApplyToList', + # Flamingo requires to load multiple images during few-shot inference. + scatter_key='img_path', + transforms=[ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=224, + interpolation='bicubic', + backend='pillow'), + dict(type='CenterCrop', crop_size=(224, 224)), + ], + collate_keys=['img', 'scale_factor', 'ori_shape'], + ), + dict( + type='PackInputs', + algorithm_keys=['question', 'gt_answer', 'gt_answer_weight', 'shots'], + meta_keys=['image_id']), +] + +val_dataloader = dict( + batch_size=8, + num_workers=8, + dataset=dict( + type='FlamingoEvalCOCOVQA', + data_root='data/coco', + data_prefix='val2014', + question_file='annotations/v2_OpenEnded_mscoco_val2014_questions.json', + ann_file='annotations/v2_mscoco_val2014_annotations.json', + pipeline=test_pipeline, + num_shots=2, + num_support_examples=2048, + num_query_examples=5000, + ), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, +) +val_evaluator = dict(type='VQAAcc') + +test_dataloader = dict( + batch_size=8, + num_workers=8, + dataset=dict( + type='FlamingoEvalCOCOVQA', + data_root='data/coco', + data_prefix='test2015', + question_file= + 'annotations/v2_OpenEnded_mscoco_test-dev2015_questions.json', + pipeline=test_pipeline, + num_shots=0, + num_support_examples=2048, + num_query_examples=5000, + ), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, +) +test_evaluator = dict(type='ReportVQA', file_path='vqa_test-dev.json') + +# schedule settings +val_cfg = dict() +test_cfg = dict() diff --git a/configs/flamingo/flamingo_zeroshot_caption.py b/configs/flamingo/flamingo_zeroshot_caption.py new file mode 100644 index 0000000..deb786e --- /dev/null +++ b/configs/flamingo/flamingo_zeroshot_caption.py @@ -0,0 +1,95 @@ +_base_ = [ + '../_base_/default_runtime.py', +] + +zeroshot_prompt = ( + 'Output:A child holding a flowered umbrella and petting a yak.<|endofchunk|>' # noqa: E501 + 'Output:The child is holding a brush close to his mouth.<|endofchunk|>' # noqa: E501 +) + +# model settings +model = dict( + type='Flamingo', + tokenizer=dict( + type='LlamaTokenizer', name_or_path='decapoda-research/llama-7b-hf'), + vision_encoder=dict( + type='VisionTransformer', + arch='l', + patch_size=14, + pre_norm=True, + norm_cfg=dict(type='LN', eps=1e-5), + layer_cfgs=dict(act_cfg=dict(type='QuickGELU')), + final_norm=False, + out_type='raw', + pretrained=( + 'https://download.openmmlab.com/mmclassification/v0/clip/' + 'vit-large-p14_clip-openai-pre_3rdparty_20230517-95e2af0b.pth'), + ), + lang_encoder=dict( + base=dict( + type='AutoModelForCausalLM', + name_or_path='decapoda-research/llama-7b-hf', + local_files_only=True), + adapter=dict( + type='FlamingoLMAdapter', + vis_hidden_size=1024, + cross_attn_every_n_layers=4, + use_media_placement_augmentation=False), + ), + task='caption', + zeroshot_prompt=zeroshot_prompt, + final_prompt_tmpl='Output:', + generation_cfg=dict(num_beams=3, max_new_tokens=20, length_penalty=-2.0), +) + +# data settings +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[122.770938, 116.7460125, 104.09373615], + std=[68.5005327, 66.6321579, 70.32316305], + to_rgb=True, +) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=224, + interpolation='bicubic', + backend='pillow'), + dict(type='CenterCrop', crop_size=(224, 224)), + dict( + type='PackInputs', + algorithm_keys=['gt_caption'], + meta_keys=['image_id'], + ), +] + +val_dataloader = dict( + batch_size=8, + num_workers=8, + dataset=dict( + type='FlamingoEvalCOCOCaption', + data_root='data/coco', + ann_file='annotations/captions_train2014.json', + data_prefix=dict(img_path='train2014'), + pipeline=test_pipeline, + num_shots=0, + num_support_examples=2048, + num_query_examples=5000, + ), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, +) + +val_evaluator = dict( + type='COCOCaption', + ann_file='data/coco/annotations/captions_train2014.json') + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator + +# schedule settings +val_cfg = dict() +test_cfg = dict() diff --git a/configs/flamingo/flamingo_zeroshot_vqa.py b/configs/flamingo/flamingo_zeroshot_vqa.py new file mode 100644 index 0000000..c43c7b8 --- /dev/null +++ b/configs/flamingo/flamingo_zeroshot_vqa.py @@ -0,0 +1,107 @@ +_base_ = [ + '../_base_/default_runtime.py', +] + +zeroshot_prompt = ( + 'Question:What is this photo taken looking through? Short Answer:pitcher<|endofchunk|>' # noqa: E501 + 'Question:How many people are wearing shorts in the forefront of this photo? Short Answer:4<|endofchunk|>' # noqa: E501 +) + +# model settings +model = dict( + type='Flamingo', + tokenizer=dict( + type='LlamaTokenizer', name_or_path='decapoda-research/llama-7b-hf'), + vision_encoder=dict( + type='VisionTransformer', + arch='l', + patch_size=14, + pre_norm=True, + norm_cfg=dict(type='LN', eps=1e-5), + layer_cfgs=dict(act_cfg=dict(type='QuickGELU')), + final_norm=False, + out_type='raw', + pretrained=( + 'https://download.openmmlab.com/mmclassification/v0/clip/' + 'vit-large-p14_clip-openai-pre_3rdparty_20230517-95e2af0b.pth'), + ), + lang_encoder=dict( + base=dict( + type='AutoModelForCausalLM', + name_or_path='decapoda-research/llama-7b-hf', + local_files_only=True), + adapter=dict( + type='FlamingoLMAdapter', + vis_hidden_size=1024, + cross_attn_every_n_layers=4, + use_media_placement_augmentation=False), + ), + task='vqa', + zeroshot_prompt=zeroshot_prompt, + final_prompt_tmpl='Question:{question} Short Answer:', + generation_cfg=dict(num_beams=3, max_new_tokens=5, length_penalty=-2.0)) + +# data settings +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[122.770938, 116.7460125, 104.09373615], + std=[68.5005327, 66.6321579, 70.32316305], + to_rgb=True, +) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=224, + interpolation='bicubic', + backend='pillow'), + dict(type='CenterCrop', crop_size=(224, 224)), + dict( + type='PackInputs', + algorithm_keys=['question', 'gt_answer', 'gt_answer_weight', 'shots'], + meta_keys=['image_id'], + ), +] + +val_dataloader = dict( + batch_size=8, + num_workers=8, + dataset=dict( + type='FlamingoEvalCOCOVQA', + data_root='data/coco', + data_prefix='val2014', + question_file='annotations/v2_OpenEnded_mscoco_val2014_questions.json', + ann_file='annotations/v2_mscoco_val2014_annotations.json', + pipeline=test_pipeline, + num_shots=0, + num_support_examples=2048, + num_query_examples=5000, + ), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, +) +val_evaluator = dict(type='VQAAcc') + +test_dataloader = dict( + batch_size=8, + num_workers=8, + dataset=dict( + type='FlamingoEvalCOCOVQA', + data_root='data/coco', + data_prefix='test2015', + question_file= + 'annotations/v2_OpenEnded_mscoco_test-dev2015_questions.json', + pipeline=test_pipeline, + num_shots=0, + num_support_examples=2048, + num_query_examples=5000, + ), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, +) +test_evaluator = dict(type='ReportVQA', file_path='vqa_test-dev.json') + +# schedule settings +val_cfg = dict() +test_cfg = dict() diff --git a/configs/flamingo/metafile.yml b/configs/flamingo/metafile.yml new file mode 100644 index 0000000..6ff33e9 --- /dev/null +++ b/configs/flamingo/metafile.yml @@ -0,0 +1,42 @@ +Collections: + - Name: Flamingo + Metadata: + Architecture: + - Transformer + - Gated Cross-Attention Dense + Paper: + Title: 'Flamingo: a Visual Language Model for Few-Shot Learning' + URL: https://arxiv.org/abs/2204.14198 + README: configs/flamingo/README.md + +Models: + - Name: flamingo_3rdparty-zeroshot_caption + Metadata: + FLOPs: null + Parameters: 8220452880 + In Collection: Flamingo + Results: + - Task: Image Caption + Dataset: COCO + Metrics: + CIDER: 65.50 # Report from the official repo + Weights: https://download.openmmlab.com/mmclassification/v1/flamingo/openflamingo-9b-adapter_20230505-554310c8.pth + Config: configs/flamingo/flamingo_zeroshot_caption.py + Converted From: + Weights: https://huggingface.co/openflamingo/OpenFlamingo-9B + Code: https://github.com/mlfoundations/open_flamingo + - Name: flamingo_3rdparty-zeroshot_vqa + Metadata: + FLOPs: null + Parameters: 8220452880 + In Collection: Flamingo + Results: + - Task: Visual Question Answering + Dataset: VQAv2 + Metrics: + Accuracy: 43.50 # Report from the official repo + Weights: https://download.openmmlab.com/mmclassification/v1/flamingo/openflamingo-9b-adapter_20230505-554310c8.pth + Config: configs/flamingo/flamingo_zeroshot_vqa.py + Converted From: + Weights: https://huggingface.co/openflamingo/OpenFlamingo-9B + Code: https://github.com/mlfoundations/open_flamingo diff --git a/configs/glip/README.md b/configs/glip/README.md new file mode 100644 index 0000000..48ee305 --- /dev/null +++ b/configs/glip/README.md @@ -0,0 +1,57 @@ +# GLIP + +> [Grounded Language-Image Pre-training](https://arxiv.org/abs/2112.03857) + + + +## Abstract + +This paper presents a grounded language-image pre-training (GLIP) model for learning object-level, language-aware, and semantic-rich visual representations. GLIP unifies object detection and phrase grounding for pre-training. The unification brings two benefits: 1) it allows GLIP to learn from both detection and grounding data to improve both tasks and bootstrap a good grounding model; 2) GLIP can leverage massive image-text pairs by generating grounding boxes in a self-training fashion, making the learned representation semantic-rich. In our experiments, we pre-train GLIP on 27M grounding data, including 3M human-annotated and 24M web-crawled image-text pairs. The learned representations demonstrate strong zero-shot and few-shot transferability to various object-level recognition tasks. 1) When directly evaluated on COCO and LVIS (without seeing any images in COCO during pre-training), GLIP achieves 49.8 AP and 26.9 AP, respectively, surpassing many supervised baselines. 2) After fine-tuned on COCO, GLIP achieves 60.8 AP on val and 61.5 AP on test-dev, surpassing prior SoTA. 3) When transferred to 13 downstream object detection tasks, a 1-shot GLIP rivals with a fully-supervised Dynamic Head. + +
+ +
+ +## How to use it? + + + +**Use the model** + +```python +import torch +from mmpretrain import get_model +model = get_model('swin-t_glip-pre_3rdparty', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + + + +## Results and models + +### Pre-trained models + +The pre-trained models are used to fine-tune, and therefore don't have evaluation results. + +| Model | Pretrain | resolution | Download | +| :------------------------------------------ | :------------------------: | :--------: | :-------------------------------------------------------------------------------------------------------------------: | +| GLIP-T (`swin-t_glip-pre_3rdparty`)\* | O365,GoldG,CC3M,SBU | 224x224 | [model](https://download.openmmlab.com/mmclassification/v1/glip/swin-t_glip-pre_3rdparty_20230413-d85813b5.pth) | +| GLIP-L (`swin-l_glip-pre_3rdparty_384px`)\* | FourODs,GoldG,CC3M+12M,SBU | 384x384 | [model](https://download.openmmlab.com/mmclassification/v1/glip/swin-l_glip-pre_3rdparty_384px_20230413-04b198e8.pth) | + +*Models with * are converted from the [official repo](https://github.com/microsoft/GLIP).* + +## Citation + +```bibtex +@inproceedings{li2021grounded, + title={Grounded Language-Image Pre-training}, + author={Liunian Harold Li* and Pengchuan Zhang* and Haotian Zhang* and Jianwei Yang and Chunyuan Li and Yiwu Zhong and Lijuan Wang and Lu Yuan and Lei Zhang and Jenq-Neng Hwang and Kai-Wei Chang and Jianfeng Gao}, + year={2022}, + booktitle={CVPR}, +} +``` diff --git a/configs/glip/glip-l_headless.py b/configs/glip/glip-l_headless.py new file mode 100644 index 0000000..991b6b8 --- /dev/null +++ b/configs/glip/glip-l_headless.py @@ -0,0 +1,18 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='SwinTransformer', + arch='large', + img_size=384, + out_indices=(1, 2, 3), # original weight is for detection + stage_cfgs=dict(block_cfgs=dict(window_size=12))), + neck=None, + head=None) + +data_preprocessor = dict( + # RGB format normalization parameters + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + # convert image from BGR to RGB + to_rgb=False, +) diff --git a/configs/glip/glip-t_headless.py b/configs/glip/glip-t_headless.py new file mode 100644 index 0000000..08b89f8 --- /dev/null +++ b/configs/glip/glip-t_headless.py @@ -0,0 +1,18 @@ +model = dict( + type='ImageClassifier', + backbone=dict( + type='SwinTransformer', + arch='tiny', + img_size=224, + out_indices=(1, 2, 3), # original weight is for detection + ), + neck=None, + head=None) + +data_preprocessor = dict( + # RGB format normalization parameters + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + # convert image from BGR to RGB + to_rgb=False, +) diff --git a/configs/glip/metafile.yml b/configs/glip/metafile.yml new file mode 100644 index 0000000..0691fd0 --- /dev/null +++ b/configs/glip/metafile.yml @@ -0,0 +1,49 @@ +Collections: + - Name: GLIP + Metadata: + Training Techniques: + - AdamW + - Weight Decay + Architecture: + - Shift Window Multihead Self Attention + Paper: + URL: https://arxiv.org/abs/2112.03857 + Title: "Grounded Language-Image Pre-training" + README: configs/glip/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/main/mmpretrain/models/backbones/vit.py + Version: v1.0.0rc8 + +Models: + - Name: swin-t_glip-pre_3rdparty + In Collection: GLIP + Metadata: + FLOPs: 4508464128 + Parameters: 29056354 + Training Data: + - O365 + - GoldG + - CC3M + - SBU + Results: null + Weights: https://download.openmmlab.com/mmclassification/v1/glip/swin-t_glip-pre_3rdparty_20230413-d85813b5.pth + Converted From: + Weights: https://penzhanwu2bbs.blob.core.windows.net/data/GLIPv1_Open/models/glip_tiny_model_o365_goldg_cc_sbu.pth + Code: https://github.com/microsoft/GLIP + Config: configs/glip/glip-t_headless.py + - Name: swin-l_glip-pre_3rdparty_384px + In Collection: GLIP + Metadata: + FLOPs: 104080343040 + Parameters: 196735516 + Training Data: + - FourODs + - GoldG + - CC3M+12M + - SBU + Results: null + Weights: https://download.openmmlab.com/mmclassification/v1/glip/swin-l_glip-pre_3rdparty_384px_20230413-04b198e8.pth + Converted From: + Weights: https://penzhanwu2bbs.blob.core.windows.net/data/GLIPv1_Open/models/glip_large_model.pth + Code: https://github.com/microsoft/GLIP + Config: configs/glip/glip-l_headless.py diff --git a/configs/hivit/README.md b/configs/hivit/README.md new file mode 100644 index 0000000..18ae086 --- /dev/null +++ b/configs/hivit/README.md @@ -0,0 +1,81 @@ +# HiViT + +> [HiViT: A Simple and More Efficient Design of Hierarchical Vision Transformer](https://arxiv.org/abs/2205.14949) + + + +## Abstract + +Recently, masked image modeling (MIM) has offered a new methodology of self-supervised pre-training of vision transformers. A key idea of efficient implementation is to discard the masked image patches (or tokens) throughout the target network (encoder), which requires the encoder to be a plain vision transformer (e.g., ViT), albeit hierarchical vision transformers (e.g., Swin Transformer) have potentially better properties in formulating vision inputs. In this paper, we offer a new design of hierarchical vision transformers named HiViT (short for Hierarchical ViT) that enjoys both high efficiency and good performance in MIM. The key is to remove the unnecessary "local inter-unit operations", deriving structurally simple hierarchical vision transformers in which mask-units can be serialized like plain vision transformers. For this purpose, we start with Swin Transformer and (i) set the masking unit size to be the token size in the main stage of Swin Transformer, (ii) switch off inter-unit self-attentions before the main stage, and (iii) eliminate all operations after the main stage. Empirical studies demonstrate the advantageous performance of HiViT in terms of fully-supervised, self-supervised, and transfer learning. In particular, in running MAE on ImageNet-1K, HiViT-B reports a +0.6% accuracy gain over ViT-B and a 1.9$\times$ speed-up over Swin-B, and the performance gain generalizes to downstream tasks of detection and segmentation. Code will be made publicly available. + +
+ +
+ +## How to use it? + + + + + + + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/hivit/hivit-tiny-p16_16xb64_in1k.py +``` + + + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Config | Download | +| :---------------------------- | :----------: | :--------: | :-------: | :-------: | :--------------------------------------: | :------: | +| `hivit-tiny-p16_16xb64_in1k` | From scratch | 19.18 | 4.60 | 82.10 | [config](hivit-tiny-p16_16xb64_in1k.py) | N/A | +| `hivit-small-p16_16xb64_in1k` | From scratch | 37.53 | 9.07 | N/A | [config](hivit-small-p16_16xb64_in1k.py) | N/A | +| `hivit-base-p16_16xb64_in1k` | From scratch | 79.05 | 18.47 | N/A | [config](hivit-base-p16_16xb64_in1k.py) | N/A | + +## Citation + +```bibtex +@inproceedings{zhanghivit, + title={HiViT: A Simpler and More Efficient Design of Hierarchical Vision Transformer}, + author={Zhang, Xiaosong and Tian, Yunjie and Xie, Lingxi and Huang, Wei and Dai, Qi and Ye, Qixiang and Tian, Qi}, + booktitle={International Conference on Learning Representations}, + year={2023}, +} +``` diff --git a/configs/hivit/hivit-base-p16_16xb64_in1k.py b/configs/hivit/hivit-base-p16_16xb64_in1k.py new file mode 100644 index 0000000..d37dcda --- /dev/null +++ b/configs/hivit/hivit-base-p16_16xb64_in1k.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/hivit/base_224.py', + '../_base_/datasets/imagenet_bs64_hivit_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_hivit.py', + '../_base_/default_runtime.py' +] + +# schedule settings +optim_wrapper = dict(clip_grad=dict(max_norm=5.0)) diff --git a/configs/hivit/hivit-small-p16_16xb64_in1k.py b/configs/hivit/hivit-small-p16_16xb64_in1k.py new file mode 100644 index 0000000..4fa3976 --- /dev/null +++ b/configs/hivit/hivit-small-p16_16xb64_in1k.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/hivit/small_224.py', + '../_base_/datasets/imagenet_bs64_hivit_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_hivit.py', + '../_base_/default_runtime.py' +] + +# schedule settings +optim_wrapper = dict(clip_grad=dict(max_norm=5.0)) diff --git a/configs/hivit/hivit-tiny-p16_16xb64_in1k.py b/configs/hivit/hivit-tiny-p16_16xb64_in1k.py new file mode 100644 index 0000000..4ed3b6a --- /dev/null +++ b/configs/hivit/hivit-tiny-p16_16xb64_in1k.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/hivit/tiny_224.py', + '../_base_/datasets/imagenet_bs64_hivit_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_hivit.py', + '../_base_/default_runtime.py' +] + +# schedule settings +optim_wrapper = dict(clip_grad=dict(max_norm=5.0)) diff --git a/configs/hivit/metafile.yml b/configs/hivit/metafile.yml new file mode 100644 index 0000000..67f3a69 --- /dev/null +++ b/configs/hivit/metafile.yml @@ -0,0 +1,63 @@ +Collections: + - Name: HiViT + Metadata: + Architecture: + - Dense Connections + - Dropout + - GELU + - Layer Normalization + - Multi-Head Attention + - Scaled Dot-Product Attention + Paper: + Title: 'HiViT: A Simple and More Efficient Design of Hierarchical Vision Transformer' + URL: https://arxiv.org/abs/2205.14949 + README: configs/hivit/README.md + Code: + URL: null + Version: null + +Models: + - Name: hivit-tiny-p16_16xb64_in1k + Metadata: + FLOPs: 4603000000 + Parameters: 19181000 + Training Data: + - ImageNet-1k + In Collection: HiViT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.1 + Task: Image Classification + Weights: + Config: configs/hivit/hivit-tiny-p16_16xb64_in1k.py + + - Name: hivit-small-p16_16xb64_in1k + Metadata: + FLOPs: 9072000000 + Parameters: 37526000 + Training Data: + - ImageNet-1k + In Collection: HiViT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: + Task: Image Classification + Weights: + Config: configs/hivit/hivit-small-p16_16xb64_in1k.py + + - Name: hivit-base-p16_16xb64_in1k + Metadata: + FLOPs: 18474000000 + Parameters: 79051000 + Training Data: + - ImageNet-1k + In Collection: HiViT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: + Task: Image Classification + Weights: + Config: configs/hivit/hivit-base-p16_16xb64_in1k.py diff --git a/configs/hornet/README.md b/configs/hornet/README.md new file mode 100644 index 0000000..b4dbf05 --- /dev/null +++ b/configs/hornet/README.md @@ -0,0 +1,80 @@ +# HorNet + +> [HorNet: Efficient High-Order Spatial Interactions with Recursive Gated Convolutions](https://arxiv.org/abs/2207.14284) + + + +## Abstract + +Recent progress in vision Transformers exhibits great success in various tasks driven by the new spatial modeling mechanism based on dot-product self-attention. In this paper, we show that the key ingredients behind the vision Transformers, namely input-adaptive, long-range and high-order spatial interactions, can also be efficiently implemented with a convolution-based framework. We present the Recursive Gated Convolution (g nConv) that performs high-order spatial interactions with gated convolutions and recursive designs. The new operation is highly flexible and customizable, which is compatible with various variants of convolution and extends the two-order interactions in self-attention to arbitrary orders without introducing significant extra computation. g nConv can serve as a plug-and-play module to improve various vision Transformers and convolution-based models. Based on the operation, we construct a new family of generic vision backbones named HorNet. Extensive experiments on ImageNet classification, COCO object detection and ADE20K semantic segmentation show HorNet outperform Swin Transformers and ConvNeXt by a significant margin with similar overall architecture and training configurations. HorNet also shows favorable scalability to more training data and a larger model size. Apart from the effectiveness in visual encoders, we also show g nConv can be applied to task-specific decoders and consistently improve dense prediction performance with less computation. Our results demonstrate that g nConv can be a new basic module for visual modeling that effectively combines the merits of both vision Transformers and CNNs. Code is available at https://github.com/raoyongming/HorNet. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('hornet-tiny_3rdparty_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('hornet-tiny_3rdparty_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/hornet/hornet-tiny_8xb128_in1k.py https://download.openmmlab.com/mmclassification/v0/hornet/hornet-tiny_3rdparty_in1k_20220915-0e8eedff.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :-------------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :-------------------------------------: | :-----------------------------------------------------------------------------: | +| `hornet-tiny_3rdparty_in1k`\* | From scratch | 22.41 | 3.98 | 82.84 | 96.24 | [config](hornet-tiny_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-tiny_3rdparty_in1k_20220915-0e8eedff.pth) | +| `hornet-tiny-gf_3rdparty_in1k`\* | From scratch | 22.99 | 3.90 | 82.98 | 96.38 | [config](hornet-tiny-gf_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-tiny-gf_3rdparty_in1k_20220915-4c35a66b.pth) | +| `hornet-small_3rdparty_in1k`\* | From scratch | 49.53 | 8.83 | 83.79 | 96.75 | [config](hornet-small_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-small_3rdparty_in1k_20220915-5935f60f.pth) | +| `hornet-small-gf_3rdparty_in1k`\* | From scratch | 50.40 | 8.71 | 83.98 | 96.77 | [config](hornet-small-gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-small-gf_3rdparty_in1k_20220915-649ca492.pth) | +| `hornet-base_3rdparty_in1k`\* | From scratch | 87.26 | 15.58 | 84.24 | 96.94 | [config](hornet-base_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-base_3rdparty_in1k_20220915-a06176bb.pth) | +| `hornet-base-gf_3rdparty_in1k`\* | From scratch | 88.42 | 15.42 | 84.32 | 96.95 | [config](hornet-base-gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-base-gf_3rdparty_in1k_20220915-82c06fa7.pth) | + +*Models with * are converted from the [official repo](https://github.com/raoyongming/HorNet). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@article{rao2022hornet, + title={HorNet: Efficient High-Order Spatial Interactions with Recursive Gated Convolutions}, + author={Rao, Yongming and Zhao, Wenliang and Tang, Yansong and Zhou, Jie and Lim, Ser-Lam and Lu, Jiwen}, + journal={arXiv preprint arXiv:2207.14284}, + year={2022} +} +``` diff --git a/configs/hornet/hornet-base-gf_8xb64_in1k.py b/configs/hornet/hornet-base-gf_8xb64_in1k.py new file mode 100644 index 0000000..b27012d --- /dev/null +++ b/configs/hornet/hornet-base-gf_8xb64_in1k.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/hornet/hornet-base-gf.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +data = dict(samples_per_gpu=64) + +optim_wrapper = dict(optimizer=dict(lr=4e-3), clip_grad=dict(max_norm=1.0)) + +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] diff --git a/configs/hornet/hornet-base_8xb64_in1k.py b/configs/hornet/hornet-base_8xb64_in1k.py new file mode 100644 index 0000000..cb78a7d --- /dev/null +++ b/configs/hornet/hornet-base_8xb64_in1k.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/hornet/hornet-base.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +data = dict(samples_per_gpu=64) + +optim_wrapper = dict(optimizer=dict(lr=4e-3), clip_grad=dict(max_norm=5.0)) + +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] diff --git a/configs/hornet/hornet-small-gf_8xb64_in1k.py b/configs/hornet/hornet-small-gf_8xb64_in1k.py new file mode 100644 index 0000000..96fcc77 --- /dev/null +++ b/configs/hornet/hornet-small-gf_8xb64_in1k.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/hornet/hornet-small-gf.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +data = dict(samples_per_gpu=64) + +optim_wrapper = dict(optimizer=dict(lr=4e-3), clip_grad=dict(max_norm=1.0)) + +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] diff --git a/configs/hornet/hornet-small_8xb64_in1k.py b/configs/hornet/hornet-small_8xb64_in1k.py new file mode 100644 index 0000000..f0535ad --- /dev/null +++ b/configs/hornet/hornet-small_8xb64_in1k.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/hornet/hornet-small.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +data = dict(samples_per_gpu=64) + +optim_wrapper = dict(optimizer=dict(lr=4e-3), clip_grad=dict(max_norm=5.0)) + +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] diff --git a/configs/hornet/hornet-tiny-gf_8xb128_in1k.py b/configs/hornet/hornet-tiny-gf_8xb128_in1k.py new file mode 100644 index 0000000..3556de9 --- /dev/null +++ b/configs/hornet/hornet-tiny-gf_8xb128_in1k.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/hornet/hornet-tiny-gf.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +data = dict(samples_per_gpu=128) + +optim_wrapper = dict(optimizer=dict(lr=4e-3), clip_grad=dict(max_norm=1.0)) + +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] diff --git a/configs/hornet/hornet-tiny_8xb128_in1k.py b/configs/hornet/hornet-tiny_8xb128_in1k.py new file mode 100644 index 0000000..31bd1dd --- /dev/null +++ b/configs/hornet/hornet-tiny_8xb128_in1k.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/hornet/hornet-tiny.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +data = dict(samples_per_gpu=128) + +optim_wrapper = dict(optimizer=dict(lr=4e-3), clip_grad=dict(max_norm=100.0)) + +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] diff --git a/configs/hornet/metafile.yml b/configs/hornet/metafile.yml new file mode 100644 index 0000000..eba0ed2 --- /dev/null +++ b/configs/hornet/metafile.yml @@ -0,0 +1,115 @@ +Collections: + - Name: HorNet + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - AdamW + - Weight Decay + Architecture: + - HorNet + - gnConv + Paper: + URL: https://arxiv.org/abs/2207.14284 + Title: "HorNet: Efficient High-Order Spatial Interactions with Recursive Gated Convolutions" + README: configs/hornet/README.md + Code: + Version: v0.24.0 + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.24.0/mmcls/models/backbones/hornet.py + +Models: + - Name: hornet-tiny_3rdparty_in1k + Metadata: + FLOPs: 3976156352 # 3.98G + Parameters: 22409512 # 22.41M + In Collection: HorNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.84 + Top 5 Accuracy: 96.24 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/hornet/hornet-tiny_3rdparty_in1k_20220915-0e8eedff.pth + Config: configs/hornet/hornet-tiny_8xb128_in1k.py + Converted From: + Code: https://github.com/raoyongming/HorNet + Weights: https://cloud.tsinghua.edu.cn/f/1ca970586c6043709a3f/?dl=1 + - Name: hornet-tiny-gf_3rdparty_in1k + Metadata: + FLOPs: 3896472160 # 3.9G + Parameters: 22991848 # 22.99M + In Collection: HorNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.98 + Top 5 Accuracy: 96.38 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/hornet/hornet-tiny-gf_3rdparty_in1k_20220915-4c35a66b.pth + Config: configs/hornet/hornet-tiny-gf_8xb128_in1k.py + Converted From: + Code: https://github.com/raoyongming/HorNet + Weights: https://cloud.tsinghua.edu.cn/f/511faad0bde94dfcaa54/?dl=1 + - Name: hornet-small_3rdparty_in1k + Metadata: + FLOPs: 8825621280 # 8.83G + Parameters: 49528264 # 49.53M + In Collection: HorNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.79 + Top 5 Accuracy: 96.75 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/hornet/hornet-small_3rdparty_in1k_20220915-5935f60f.pth + Config: configs/hornet/hornet-small_8xb64_in1k.py + Converted From: + Code: https://github.com/raoyongming/HorNet + Weights: https://cloud.tsinghua.edu.cn/f/46422799db2941f7b684/?dl=1 + - Name: hornet-small-gf_3rdparty_in1k + Metadata: + FLOPs: 8706094992 # 8.71G + Parameters: 50401768 # 50.4M + In Collection: HorNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.98 + Top 5 Accuracy: 96.77 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/hornet/hornet-small-gf_3rdparty_in1k_20220915-649ca492.pth + Config: configs/hornet/hornet-small-gf_8xb64_in1k.py + Converted From: + Code: https://github.com/raoyongming/HorNet + Weights: https://cloud.tsinghua.edu.cn/f/8405c984bf084d2ba85a/?dl=1 + - Name: hornet-base_3rdparty_in1k + Metadata: + FLOPs: 15582677376 # 15.59G + Parameters: 87256680 # 87.26M + In Collection: HorNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.24 + Top 5 Accuracy: 96.94 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/hornet/hornet-base_3rdparty_in1k_20220915-a06176bb.pth + Config: configs/hornet/hornet-base_8xb64_in1k.py + Converted From: + Code: https://github.com/raoyongming/HorNet + Weights: https://cloud.tsinghua.edu.cn/f/5c86cb3d655d4c17a959/?dl=1 + - Name: hornet-base-gf_3rdparty_in1k + Metadata: + FLOPs: 15423308992 # 15.42G + Parameters: 88421352 # 88.42M + In Collection: HorNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.32 + Top 5 Accuracy: 96.95 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/hornet/hornet-base-gf_3rdparty_in1k_20220915-82c06fa7.pth + Config: configs/hornet/hornet-base-gf_8xb64_in1k.py + Converted From: + Code: https://github.com/raoyongming/HorNet + Weights: https://cloud.tsinghua.edu.cn/f/6c84935e63b547f383fb/?dl=1 diff --git a/configs/hrnet/README.md b/configs/hrnet/README.md new file mode 100644 index 0000000..31725cf --- /dev/null +++ b/configs/hrnet/README.md @@ -0,0 +1,85 @@ +# HRNet + +> [Deep High-Resolution Representation Learning for Visual Recognition](https://arxiv.org/abs/1908.07919v2) + + + +## Abstract + +High-resolution representations are essential for position-sensitive vision problems, such as human pose estimation, semantic segmentation, and object detection. Existing state-of-the-art frameworks first encode the input image as a low-resolution representation through a subnetwork that is formed by connecting high-to-low resolution convolutions *in series* (e.g., ResNet, VGGNet), and then recover the high-resolution representation from the encoded low-resolution representation. Instead, our proposed network, named as High-Resolution Network (HRNet), maintains high-resolution representations through the whole process. There are two key characteristics: (i) Connect the high-to-low resolution convolution streams *in parallel*; (ii) Repeatedly exchange the information across resolutions. The benefit is that the resulting representation is semantically richer and spatially more precise. We show the superiority of the proposed HRNet in a wide range of applications, including human pose estimation, semantic segmentation, and object detection, suggesting that the HRNet is a stronger backbone for computer vision problems. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('hrnet-w18_3rdparty_8xb32_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('hrnet-w18_3rdparty_8xb32_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/hrnet/hrnet-w18_4xb32_in1k.py https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w18_3rdparty_8xb32_in1k_20220120-0c10b180.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :------------------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :-------------------------------: | :------------------------------------------------------------------------------: | +| `hrnet-w18_3rdparty_8xb32_in1k`\* | From scratch | 21.30 | 4.33 | 76.75 | 93.44 | [config](hrnet-w18_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w18_3rdparty_8xb32_in1k_20220120-0c10b180.pth) | +| `hrnet-w30_3rdparty_8xb32_in1k`\* | From scratch | 37.71 | 8.17 | 78.19 | 94.22 | [config](hrnet-w30_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w30_3rdparty_8xb32_in1k_20220120-8aa3832f.pth) | +| `hrnet-w32_3rdparty_8xb32_in1k`\* | From scratch | 41.23 | 8.99 | 78.44 | 94.19 | [config](hrnet-w32_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w32_3rdparty_8xb32_in1k_20220120-c394f1ab.pth) | +| `hrnet-w40_3rdparty_8xb32_in1k`\* | From scratch | 57.55 | 12.77 | 78.94 | 94.47 | [config](hrnet-w40_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w40_3rdparty_8xb32_in1k_20220120-9a2dbfc5.pth) | +| `hrnet-w44_3rdparty_8xb32_in1k`\* | From scratch | 67.06 | 14.96 | 78.88 | 94.37 | [config](hrnet-w44_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w44_3rdparty_8xb32_in1k_20220120-35d07f73.pth) | +| `hrnet-w48_3rdparty_8xb32_in1k`\* | From scratch | 77.47 | 17.36 | 79.32 | 94.52 | [config](hrnet-w48_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w48_3rdparty_8xb32_in1k_20220120-e555ef50.pth) | +| `hrnet-w64_3rdparty_8xb32_in1k`\* | From scratch | 128.06 | 29.00 | 79.46 | 94.65 | [config](hrnet-w64_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w64_3rdparty_8xb32_in1k_20220120-19126642.pth) | +| `hrnet-w18_3rdparty_8xb32-ssld_in1k`\* | From scratch | 21.30 | 4.33 | 81.06 | 95.70 | [config](hrnet-w18_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w18_3rdparty_8xb32-ssld_in1k_20220120-455f69ea.pth) | +| `hrnet-w48_3rdparty_8xb32-ssld_in1k`\* | From scratch | 77.47 | 17.36 | 83.63 | 96.79 | [config](hrnet-w48_4xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w48_3rdparty_8xb32-ssld_in1k_20220120-d0459c38.pth) | + +*Models with * are converted from the [official repo](https://github.com/HRNet/HRNet-Image-Classification). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@article{WangSCJDZLMTWLX19, + title={Deep High-Resolution Representation Learning for Visual Recognition}, + author={Jingdong Wang and Ke Sun and Tianheng Cheng and + Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and + Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, + journal={TPAMI}, + year={2019} +} +``` diff --git a/configs/hrnet/hrnet-w18_4xb32_in1k.py b/configs/hrnet/hrnet-w18_4xb32_in1k.py new file mode 100644 index 0000000..3bc329a --- /dev/null +++ b/configs/hrnet/hrnet-w18_4xb32_in1k.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/hrnet/hrnet-w18.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256_coslr.py', + '../_base_/default_runtime.py' +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (4 GPUs) x (32 samples per GPU) +auto_scale_lr = dict(base_batch_size=128) diff --git a/configs/hrnet/hrnet-w30_4xb32_in1k.py b/configs/hrnet/hrnet-w30_4xb32_in1k.py new file mode 100644 index 0000000..669a66b --- /dev/null +++ b/configs/hrnet/hrnet-w30_4xb32_in1k.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/hrnet/hrnet-w30.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256_coslr.py', + '../_base_/default_runtime.py' +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (4 GPUs) x (32 samples per GPU) +auto_scale_lr = dict(base_batch_size=128) diff --git a/configs/hrnet/hrnet-w32_4xb32_in1k.py b/configs/hrnet/hrnet-w32_4xb32_in1k.py new file mode 100644 index 0000000..1e48740 --- /dev/null +++ b/configs/hrnet/hrnet-w32_4xb32_in1k.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/hrnet/hrnet-w32.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256_coslr.py', + '../_base_/default_runtime.py' +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (4 GPUs) x (32 samples per GPU) +auto_scale_lr = dict(base_batch_size=128) diff --git a/configs/hrnet/hrnet-w40_4xb32_in1k.py b/configs/hrnet/hrnet-w40_4xb32_in1k.py new file mode 100644 index 0000000..1866a2a --- /dev/null +++ b/configs/hrnet/hrnet-w40_4xb32_in1k.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/hrnet/hrnet-w40.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256_coslr.py', + '../_base_/default_runtime.py' +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (4 GPUs) x (32 samples per GPU) +auto_scale_lr = dict(base_batch_size=128) diff --git a/configs/hrnet/hrnet-w44_4xb32_in1k.py b/configs/hrnet/hrnet-w44_4xb32_in1k.py new file mode 100644 index 0000000..4ec913f --- /dev/null +++ b/configs/hrnet/hrnet-w44_4xb32_in1k.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/hrnet/hrnet-w44.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256_coslr.py', + '../_base_/default_runtime.py' +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (4 GPUs) x (32 samples per GPU) +auto_scale_lr = dict(base_batch_size=128) diff --git a/configs/hrnet/hrnet-w48_4xb32_in1k.py b/configs/hrnet/hrnet-w48_4xb32_in1k.py new file mode 100644 index 0000000..0fc3f18 --- /dev/null +++ b/configs/hrnet/hrnet-w48_4xb32_in1k.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/hrnet/hrnet-w48.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256_coslr.py', + '../_base_/default_runtime.py' +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (4 GPUs) x (32 samples per GPU) +auto_scale_lr = dict(base_batch_size=128) diff --git a/configs/hrnet/hrnet-w64_4xb32_in1k.py b/configs/hrnet/hrnet-w64_4xb32_in1k.py new file mode 100644 index 0000000..659b3cd --- /dev/null +++ b/configs/hrnet/hrnet-w64_4xb32_in1k.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/hrnet/hrnet-w64.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256_coslr.py', + '../_base_/default_runtime.py' +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (4 GPUs) x (32 samples per GPU) +auto_scale_lr = dict(base_batch_size=128) diff --git a/configs/hrnet/metafile.yml b/configs/hrnet/metafile.yml new file mode 100644 index 0000000..3a17b12 --- /dev/null +++ b/configs/hrnet/metafile.yml @@ -0,0 +1,162 @@ +Collections: + - Name: HRNet + Metadata: + Training Data: ImageNet-1k + Architecture: + - Batch Normalization + - Convolution + - ReLU + - Residual Connection + Paper: + URL: https://arxiv.org/abs/1908.07919v2 + Title: "Deep High-Resolution Representation Learning for Visual Recognition" + README: configs/hrnet/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.20.1/mmcls/models/backbones/hrnet.py + Version: v0.20.1 + +Models: + - Name: hrnet-w18_3rdparty_8xb32_in1k + Metadata: + FLOPs: 4330397932 + Parameters: 21295164 + In Collection: HRNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 76.75 + Top 5 Accuracy: 93.44 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w18_3rdparty_8xb32_in1k_20220120-0c10b180.pth + Config: configs/hrnet/hrnet-w18_4xb32_in1k.py + Converted From: + Weights: https://1drv.ms/u/s!Aus8VCZ_C_33cMkPimlmClRvmpw + Code: https://github.com/HRNet/HRNet-Image-Classification + - Name: hrnet-w30_3rdparty_8xb32_in1k + Metadata: + FLOPs: 8168305684 + Parameters: 37708380 + In Collection: HRNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.19 + Top 5 Accuracy: 94.22 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w30_3rdparty_8xb32_in1k_20220120-8aa3832f.pth + Config: configs/hrnet/hrnet-w30_4xb32_in1k.py + Converted From: + Weights: https://1drv.ms/u/s!Aus8VCZ_C_33cQoACCEfrzcSaVI + Code: https://github.com/HRNet/HRNet-Image-Classification + - Name: hrnet-w32_3rdparty_8xb32_in1k + Metadata: + FLOPs: 8986267584 + Parameters: 41228840 + In Collection: HRNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.44 + Top 5 Accuracy: 94.19 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w32_3rdparty_8xb32_in1k_20220120-c394f1ab.pth + Config: configs/hrnet/hrnet-w32_4xb32_in1k.py + Converted From: + Weights: https://1drv.ms/u/s!Aus8VCZ_C_33dYBMemi9xOUFR0w + Code: https://github.com/HRNet/HRNet-Image-Classification + - Name: hrnet-w40_3rdparty_8xb32_in1k + Metadata: + FLOPs: 12767574064 + Parameters: 57553320 + In Collection: HRNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.94 + Top 5 Accuracy: 94.47 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w40_3rdparty_8xb32_in1k_20220120-9a2dbfc5.pth + Config: configs/hrnet/hrnet-w40_4xb32_in1k.py + Converted From: + Weights: https://1drv.ms/u/s!Aus8VCZ_C_33ck0gvo5jfoWBOPo + Code: https://github.com/HRNet/HRNet-Image-Classification + - Name: hrnet-w44_3rdparty_8xb32_in1k + Metadata: + FLOPs: 14963902632 + Parameters: 67061144 + In Collection: HRNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.88 + Top 5 Accuracy: 94.37 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w44_3rdparty_8xb32_in1k_20220120-35d07f73.pth + Config: configs/hrnet/hrnet-w44_4xb32_in1k.py + Converted From: + Weights: https://1drv.ms/u/s!Aus8VCZ_C_33czZQ0woUb980gRs + Code: https://github.com/HRNet/HRNet-Image-Classification + - Name: hrnet-w48_3rdparty_8xb32_in1k + Metadata: + FLOPs: 17364014752 + Parameters: 77466024 + In Collection: HRNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.32 + Top 5 Accuracy: 94.52 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w48_3rdparty_8xb32_in1k_20220120-e555ef50.pth + Config: configs/hrnet/hrnet-w48_4xb32_in1k.py + Converted From: + Weights: https://1drv.ms/u/s!Aus8VCZ_C_33dKvqI6pBZlifgJk + Code: https://github.com/HRNet/HRNet-Image-Classification + - Name: hrnet-w64_3rdparty_8xb32_in1k + Metadata: + FLOPs: 29002298752 + Parameters: 128056104 + In Collection: HRNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.46 + Top 5 Accuracy: 94.65 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w64_3rdparty_8xb32_in1k_20220120-19126642.pth + Config: configs/hrnet/hrnet-w64_4xb32_in1k.py + Converted From: + Weights: https://1drv.ms/u/s!Aus8VCZ_C_33gQbJsUPTIj3rQu99 + Code: https://github.com/HRNet/HRNet-Image-Classification + - Name: hrnet-w18_3rdparty_8xb32-ssld_in1k + Metadata: + FLOPs: 4330397932 + Parameters: 21295164 + In Collection: HRNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.06 + Top 5 Accuracy: 95.7 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w18_3rdparty_8xb32-ssld_in1k_20220120-455f69ea.pth + Config: configs/hrnet/hrnet-w18_4xb32_in1k.py + Converted From: + Weights: https://github.com/HRNet/HRNet-Image-Classification/releases/download/PretrainedWeights/HRNet_W18_C_ssld_pretrained.pth + Code: https://github.com/HRNet/HRNet-Image-Classification + - Name: hrnet-w48_3rdparty_8xb32-ssld_in1k + Metadata: + FLOPs: 17364014752 + Parameters: 77466024 + In Collection: HRNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.63 + Top 5 Accuracy: 96.79 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w48_3rdparty_8xb32-ssld_in1k_20220120-d0459c38.pth + Config: configs/hrnet/hrnet-w48_4xb32_in1k.py + Converted From: + Weights: https://github.com/HRNet/HRNet-Image-Classification/releases/download/PretrainedWeights/HRNet_W48_C_ssld_pretrained.pth + Code: https://github.com/HRNet/HRNet-Image-Classification diff --git a/configs/inception_v3/README.md b/configs/inception_v3/README.md new file mode 100644 index 0000000..24fde38 --- /dev/null +++ b/configs/inception_v3/README.md @@ -0,0 +1,76 @@ +# Inception V3 + +> [Rethinking the Inception Architecture for Computer Vision](http://arxiv.org/abs/1512.00567) + + + +## Abstract + +Convolutional networks are at the core of most state-of-the-art computer vision solutions for a wide variety of tasks. Since 2014 very deep convolutional networks started to become mainstream, yielding substantial gains in various benchmarks. Although increased model size and computational cost tend to translate to immediate quality gains for most tasks (as long as enough labeled data is provided for training), computational efficiency and low parameter count are still enabling factors for various use cases such as mobile vision and big-data scenarios. Here we explore ways to scale up networks in ways that aim at utilizing the added computation as efficiently as possible by suitably factorized convolutions and aggressive regularization. We benchmark our methods on the ILSVRC 2012 classification challenge validation set demonstrate substantial gains over the state of the art: 21.2% top-1 and 5.6% top-5 error for single frame evaluation using a network with a computational cost of 5 billion multiply-adds per inference and with using less than 25 million parameters. With an ensemble of 4 models and multi-crop evaluation, we report 3.5% top-5 error on the validation set (3.6% error on the test set) and 17.3% top-1 error on the validation set. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('inception-v3_3rdparty_8xb32_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('inception-v3_3rdparty_8xb32_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/inception_v3/inception-v3_8xb32_in1k.py https://download.openmmlab.com/mmclassification/v0/inception-v3/inception-v3_3rdparty_8xb32_in1k_20220615-dcd4d910.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :----------------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :----------------------------------: | :-----------------------------------------------------------------------------: | +| `inception-v3_3rdparty_8xb32_in1k`\* | From scratch | 23.83 | 5.75 | 77.57 | 93.58 | [config](inception-v3_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/inception-v3/inception-v3_3rdparty_8xb32_in1k_20220615-dcd4d910.pth) | + +*Models with * are converted from the [official repo](https://github.com/pytorch/vision/blob/main/torchvision/models/inception.py#L28). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@inproceedings{szegedy2016rethinking, + title={Rethinking the inception architecture for computer vision}, + author={Szegedy, Christian and Vanhoucke, Vincent and Ioffe, Sergey and Shlens, Jon and Wojna, Zbigniew}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={2818--2826}, + year={2016} +} +``` diff --git a/configs/inception_v3/inception-v3_8xb32_in1k.py b/configs/inception_v3/inception-v3_8xb32_in1k.py new file mode 100644 index 0000000..ac977f4 --- /dev/null +++ b/configs/inception_v3/inception-v3_8xb32_in1k.py @@ -0,0 +1,24 @@ +_base_ = [ + '../_base_/models/inception_v3.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256_coslr.py', + '../_base_/default_runtime.py', +] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=299), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='ResizeEdge', scale=342, edge='short'), + dict(type='CenterCrop', crop_size=299), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/inception_v3/metafile.yml b/configs/inception_v3/metafile.yml new file mode 100644 index 0000000..0b556de --- /dev/null +++ b/configs/inception_v3/metafile.yml @@ -0,0 +1,37 @@ +Collections: + - Name: Inception V3 + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Epochs: 100 + Batch Size: 256 + Architecture: + - Inception + Paper: + URL: http://arxiv.org/abs/1512.00567 + Title: "Rethinking the Inception Architecture for Computer Vision" + README: configs/inception_v3/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/v1.0.0rc1/configs/inception_v3/metafile.yml + Version: v1.0.0rc1 + +Models: + - Name: inception-v3_3rdparty_8xb32_in1k + Metadata: + FLOPs: 5745177632 + Parameters: 23834568 + In Collection: Inception V3 + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 77.57 + Top 5 Accuracy: 93.58 + Weights: https://download.openmmlab.com/mmclassification/v0/inception-v3/inception-v3_3rdparty_8xb32_in1k_20220615-dcd4d910.pth + Config: configs/inception_v3/inception-v3_8xb32_in1k.py + Converted From: + Weights: https://download.pytorch.org/models/inception_v3_google-0cc3c7bd.pth + Code: https://github.com/pytorch/vision/blob/main/torchvision/models/inception.py#L28 diff --git a/configs/itpn/README.md b/configs/itpn/README.md new file mode 100644 index 0000000..93200d0 --- /dev/null +++ b/configs/itpn/README.md @@ -0,0 +1,65 @@ +# iTPN + +> [Integrally Pre-Trained Transformer Pyramid Networks](https://arxiv.org/abs/2211.12735) + + + +## Abstract + +In this paper, we present an integral pre-training framework based on masked image modeling (MIM). We advocate for pre-training the backbone and neck jointly so that the transfer gap between MIM and downstream recognition tasks is minimal. We make two technical contributions. First, we unify the reconstruction and recognition necks by inserting a feature pyramid into the pre-training stage. Second, we complement mask image modeling (MIM) with masked feature modeling (MFM) that offers multi-stage supervision to the feature pyramid. The pre-trained models, termed integrally pre-trained transformer pyramid networks (iTPNs), serve as powerful foundation models for visual recognition. In particular, the base/large-level iTPN achieves an 86.2%/87.8% top-1 accuracy on ImageNet-1K, a 53.2%/55.6% box AP on COCO object detection with 1x training schedule using Mask-RCNN, and a 54.7%/57.7% mIoU on ADE20K semantic segmentation using UPerHead -- all these results set new records. Our work inspires the community to work on unifying upstream pre-training and downstream fine-tuning tasks. Code and the pre-trained models will be released at https://github.com/sunsmarterjie/iTPN. + +
+ +
+ +## How to use it? + + + + + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/itpn/itpn-pixel_hivit-base-p16_8xb512-amp-coslr-800e_in1k.py +``` + + + +## Models and results + +### Pretrained models + +| Model | Params (M) | Flops (G) | Config | Download | +| :------------------------------------------------------ | :--------: | :-------: | :----------------------------------------------------------------: | :------: | +| `itpn-clip-b_hivit-base-p16_8xb256-amp-coslr-800e_in1k` | 233.00 | 18.47 | [config](itpn-clip-b_hivit-base-p16_8xb256-amp-coslr-800e_in1k.py) | N/A | +| `itpn-pixel_hivit-base-p16_8xb512-amp-coslr-800e_in1k` | 103.00 | 18.47 | [config](itpn-pixel_hivit-base-p16_8xb512-amp-coslr-800e_in1k.py) | N/A | +| `itpn-pixel_hivit-large-p16_8xb512-amp-coslr-800e_in1k` | 314.00 | 63.98 | [config](itpn-pixel_hivit-large-p16_8xb512-amp-coslr-800e_in1k.py) | N/A | + +## Citation + +```bibtex +@article{tian2022integrally, + title={Integrally Pre-Trained Transformer Pyramid Networks}, + author={Tian, Yunjie and Xie, Lingxi and Wang, Zhaozhi and Wei, Longhui and Zhang, Xiaopeng and Jiao, Jianbin and Wang, Yaowei and Tian, Qi and Ye, Qixiang}, + journal={arXiv preprint arXiv:2211.12735}, + year={2022} +} +``` diff --git a/configs/itpn/itpn-clip-b_hivit-base-p16_8xb256-amp-coslr-300e_in1k.py b/configs/itpn/itpn-clip-b_hivit-base-p16_8xb256-amp-coslr-300e_in1k.py new file mode 100644 index 0000000..40f35d9 --- /dev/null +++ b/configs/itpn/itpn-clip-b_hivit-base-p16_8xb256-amp-coslr-300e_in1k.py @@ -0,0 +1,84 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs256_itpn.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='iTPN', + backbone=dict( + type='iTPNHiViT', + arch='base', + drop_path_rate=0.0, + rpe=True, + layer_scale_init_value=0.1, + reconstruction_type='clip'), + neck=dict( + type='iTPNPretrainDecoder', + patch_size=16, + in_chans=3, + embed_dim=512, + mlp_ratio=4., + reconstruction_type='clip', + # transformer pyramid + fpn_dim=256, + fpn_depth=2, + num_outs=3, + ), + head=dict( + type='iTPNClipHead', + embed_dims=512, + num_embed=512, + loss=dict(type='CosineSimilarityLoss')), + target_generator=dict( + type='CLIPGenerator', + tokenizer_path= # noqa + 'https://download.openmmlab.com/mmselfsup/1.x/target_generator_ckpt/clip_vit_base_16.pth.tar' # noqa + ), +) + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + # betas: (0.9, 0.98) for 300 epochs and (0.9, 0.999) for 1600 epochs. + optimizer=dict( + type='AdamW', lr=1.5e-3, betas=(0.9, 0.98), weight_decay=0.05), + clip_grad=dict(max_norm=3.0), + paramwise_cfg=dict( + custom_keys={ + '.norm': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.gamma': dict(decay_mult=0.0), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=10, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + eta_min=1e-5, + by_epoch=True, + begin=10, + end=300, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=300) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +find_unused_parameters = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=2048) diff --git a/configs/itpn/itpn-clip-b_hivit-base-p16_8xb256-amp-coslr-800e_in1k.py b/configs/itpn/itpn-clip-b_hivit-base-p16_8xb256-amp-coslr-800e_in1k.py new file mode 100644 index 0000000..2c624e7 --- /dev/null +++ b/configs/itpn/itpn-clip-b_hivit-base-p16_8xb256-amp-coslr-800e_in1k.py @@ -0,0 +1,84 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs256_itpn.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='iTPN', + backbone=dict( + type='iTPNHiViT', + arch='base', + drop_path_rate=0.1, + rpe=True, + layer_scale_init_value=0.1, + reconstruction_type='clip'), + neck=dict( + type='iTPNPretrainDecoder', + patch_size=16, + in_chans=3, + embed_dim=512, + mlp_ratio=4., + reconstruction_type='clip', + # transformer pyramid + fpn_dim=256, + fpn_depth=2, + num_outs=3, + ), + head=dict( + type='iTPNClipHead', + embed_dims=512, + num_embed=512, + loss=dict(type='CrossEntropyLoss')), + target_generator=dict( + type='CLIPGenerator', + tokenizer_path= # noqa + 'https://download.openmmlab.com/mmselfsup/1.x/target_generator_ckpt/clip_vit_base_16.pth.tar' # noqa + ), +) + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + # betas: (0.9, 0.98) for 300 epochs and (0.9, 0.999) for 800/1600 epochs. + optimizer=dict( + type='AdamW', lr=1.5e-3, betas=(0.9, 0.999), weight_decay=0.05), + clip_grad=dict(max_norm=3.0), + paramwise_cfg=dict( + custom_keys={ + '.norm': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.gamma': dict(decay_mult=0.0), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=10, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + eta_min=1e-5, + by_epoch=True, + begin=10, + end=800, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=800) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +find_unused_parameters = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=2048) diff --git a/configs/itpn/itpn-pixel_hivit-base-p16_8xb512-amp-coslr-1600e_in1k.py b/configs/itpn/itpn-pixel_hivit-base-p16_8xb512-amp-coslr-1600e_in1k.py new file mode 100644 index 0000000..d324a44 --- /dev/null +++ b/configs/itpn/itpn-pixel_hivit-base-p16_8xb512-amp-coslr-1600e_in1k.py @@ -0,0 +1,56 @@ +_base_ = [ + '../_base_/models/itpn_hivit-base-p16.py', + '../_base_/datasets/imagenet_bs512_mae.py', + '../_base_/default_runtime.py', +] + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='AdamW', + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'norm': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=1560, + by_epoch=True, + begin=40, + end=1600, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=1600) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +# auto resume +resume = True +find_unused_parameters = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/itpn/itpn-pixel_hivit-base-p16_8xb512-amp-coslr-400e_in1k.py b/configs/itpn/itpn-pixel_hivit-base-p16_8xb512-amp-coslr-400e_in1k.py new file mode 100644 index 0000000..c489dda --- /dev/null +++ b/configs/itpn/itpn-pixel_hivit-base-p16_8xb512-amp-coslr-400e_in1k.py @@ -0,0 +1,56 @@ +_base_ = [ + '../_base_/models/itpn_hivit-base-p16.py', + '../_base_/datasets/imagenet_bs512_mae.py', + '../_base_/default_runtime.py', +] + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='AdamW', + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'norm': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=360, + by_epoch=True, + begin=40, + end=400, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=400) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +# auto resume +resume = True +find_unused_parameters = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/itpn/itpn-pixel_hivit-base-p16_8xb512-amp-coslr-800e_in1k.py b/configs/itpn/itpn-pixel_hivit-base-p16_8xb512-amp-coslr-800e_in1k.py new file mode 100644 index 0000000..ebc5be0 --- /dev/null +++ b/configs/itpn/itpn-pixel_hivit-base-p16_8xb512-amp-coslr-800e_in1k.py @@ -0,0 +1,56 @@ +_base_ = [ + '../_base_/models/itpn_hivit-base-p16.py', + '../_base_/datasets/imagenet_bs512_mae.py', + '../_base_/default_runtime.py', +] + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='AdamW', + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'norm': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=760, + by_epoch=True, + begin=40, + end=800, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=800) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +# auto resume +resume = True +find_unused_parameters = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/itpn/itpn-pixel_hivit-large-p16_8xb512-amp-coslr-1600e_in1k.py b/configs/itpn/itpn-pixel_hivit-large-p16_8xb512-amp-coslr-1600e_in1k.py new file mode 100644 index 0000000..359191b --- /dev/null +++ b/configs/itpn/itpn-pixel_hivit-large-p16_8xb512-amp-coslr-1600e_in1k.py @@ -0,0 +1,61 @@ +_base_ = [ + '../_base_/models/itpn_hivit-base-p16.py', + '../_base_/datasets/imagenet_bs512_mae.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + backbone=dict(type='iTPNHiViT', arch='large'), + neck=dict(type='iTPNPretrainDecoder', embed_dim=768)) + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='AdamW', + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'ln': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=1560, + by_epoch=True, + begin=40, + end=1600, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=1600) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +# auto resume +resume = True +find_unused_parameters = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/itpn/itpn-pixel_hivit-large-p16_8xb512-amp-coslr-400e_in1k.py b/configs/itpn/itpn-pixel_hivit-large-p16_8xb512-amp-coslr-400e_in1k.py new file mode 100644 index 0000000..ca4ba00 --- /dev/null +++ b/configs/itpn/itpn-pixel_hivit-large-p16_8xb512-amp-coslr-400e_in1k.py @@ -0,0 +1,61 @@ +_base_ = [ + '../_base_/models/itpn_hivit-base-p16.py', + '../_base_/datasets/imagenet_bs512_mae.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + backbone=dict(type='iTPNHiViT', arch='large'), + neck=dict(type='iTPNPretrainDecoder', embed_dim=768)) + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='AdamW', + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'ln': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=360, + by_epoch=True, + begin=40, + end=400, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=400) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +# auto resume +resume = True +find_unused_parameters = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/itpn/itpn-pixel_hivit-large-p16_8xb512-amp-coslr-800e_in1k.py b/configs/itpn/itpn-pixel_hivit-large-p16_8xb512-amp-coslr-800e_in1k.py new file mode 100644 index 0000000..b1e298b --- /dev/null +++ b/configs/itpn/itpn-pixel_hivit-large-p16_8xb512-amp-coslr-800e_in1k.py @@ -0,0 +1,61 @@ +_base_ = [ + '../_base_/models/itpn_hivit-base-p16.py', + '../_base_/datasets/imagenet_bs512_mae.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + backbone=dict(type='iTPNHiViT', arch='large'), + neck=dict(type='iTPNPretrainDecoder', embed_dim=768)) + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='AdamW', + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'ln': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=760, + by_epoch=True, + begin=40, + end=800, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=800) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +# auto resume +resume = True +find_unused_parameters = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/itpn/metafile.yml b/configs/itpn/metafile.yml new file mode 100644 index 0000000..b8f5844 --- /dev/null +++ b/configs/itpn/metafile.yml @@ -0,0 +1,50 @@ +Collections: + - Name: iTPN + Metadata: + Architecture: + - Dense Connections + - GELU + - Layer Normalization + - Multi-Head Attention + - Scaled Dot-Product Attention + Paper: + Title: 'Integrally Pre-Trained Transformer Pyramid Networks' + URL: https://arxiv.org/abs/2211.12735 + README: configs/itpn/README.md + Code: + URL: null + Version: null + +Models: + - Name: itpn-clip-b_hivit-base-p16_8xb256-amp-coslr-800e_in1k + Metadata: + FLOPs: 18474000000 + Parameters: 233000000 + Training Data: + - ImageNet-1k + In Collection: iTPN + Results: null + Weights: + Config: configs/itpn/itpn-clip-b_hivit-base-p16_8xb256-amp-coslr-800e_in1k.py + + - Name: itpn-pixel_hivit-base-p16_8xb512-amp-coslr-800e_in1k + Metadata: + FLOPs: 18474000000 + Parameters: 103000000 + Training Data: + - ImageNet-1k + In Collection: iTPN + Results: null + Weights: + Config: configs/itpn/itpn-pixel_hivit-base-p16_8xb512-amp-coslr-800e_in1k.py + + - Name: itpn-pixel_hivit-large-p16_8xb512-amp-coslr-800e_in1k + Metadata: + FLOPs: 63977000000 + Parameters: 314000000 + Training Data: + - ImageNet-1k + In Collection: iTPN + Results: null + Weights: + Config: configs/itpn/itpn-pixel_hivit-large-p16_8xb512-amp-coslr-800e_in1k.py diff --git a/configs/lenet/README.md b/configs/lenet/README.md new file mode 100644 index 0000000..2cd68ea --- /dev/null +++ b/configs/lenet/README.md @@ -0,0 +1,28 @@ +# LeNet + +> [Backpropagation Applied to Handwritten Zip Code Recognition](https://ieeexplore.ieee.org/document/6795724) + + + +## Abstract + +The ability of learning networks to generalize can be greatly enhanced by providing constraints from the task domain. This paper demonstrates how such constraints can be integrated into a backpropagation network through the architecture of the network. This approach has been successfully applied to the recognition of handwritten zip code digits provided by the U.S. Postal Service. A single network learns the entire recognition operation, going from the normalized image of the character to the final classification. + +
+ +
+ +## Citation + +``` +@ARTICLE{6795724, + author={Y. {LeCun} and B. {Boser} and J. S. {Denker} and D. {Henderson} and R. E. {Howard} and W. {Hubbard} and L. D. {Jackel}}, + journal={Neural Computation}, + title={Backpropagation Applied to Handwritten Zip Code Recognition}, + year={1989}, + volume={1}, + number={4}, + pages={541-551}, + doi={10.1162/neco.1989.1.4.541}} +} +``` diff --git a/configs/lenet/lenet5_mnist.py b/configs/lenet/lenet5_mnist.py new file mode 100644 index 0000000..0ae8192 --- /dev/null +++ b/configs/lenet/lenet5_mnist.py @@ -0,0 +1,89 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict(type='LeNet5', num_classes=10), + neck=None, + head=dict( + type='ClsHead', + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) + +# dataset settings +dataset_type = 'MNIST' +data_preprocessor = dict(mean=[33.46], std=[78.87], num_classes=10) + +pipeline = [dict(type='Resize', scale=32), dict(type='PackInputs')] + +common_data_cfg = dict( + type=dataset_type, data_prefix='data/mnist', pipeline=pipeline) + +train_dataloader = dict( + batch_size=128, + num_workers=2, + dataset=dict(**common_data_cfg, test_mode=False), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=128, + num_workers=2, + dataset=dict(**common_data_cfg, test_mode=True), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, )) + +test_dataloader = val_dataloader +test_evaluator = val_evaluator + +# schedule settings +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) + +param_scheduler = dict( + type='MultiStepLR', # learning policy, decay on several milestones. + by_epoch=True, # update based on epoch. + milestones=[15], # decay at the 15th epochs. + gamma=0.1, # decay to 0.1 times. +) + +train_cfg = dict(by_epoch=True, max_epochs=5, val_interval=1) # train 5 epochs +val_cfg = dict() +test_cfg = dict() + +# runtime settings +default_scope = 'mmpretrain' + +default_hooks = dict( + # record the time of every iteration. + timer=dict(type='IterTimerHook'), + # print log every 150 iterations. + logger=dict(type='LoggerHook', interval=150), + # enable the parameter scheduler. + param_scheduler=dict(type='ParamSchedulerHook'), + # save checkpoint per epoch. + checkpoint=dict(type='CheckpointHook', interval=1), + # set sampler seed in distributed evrionment. + sampler_seed=dict(type='DistSamplerSeedHook'), +) + +env_cfg = dict( + # disable cudnn benchmark + cudnn_benchmark=False, + # set multi process parameters + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + # set distributed parameters + dist_cfg=dict(backend='nccl'), +) + +log_level = 'INFO' + +# load from which checkpoint +load_from = None + +# whether to resume the training of the checkpoint +resume_from = None + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (1 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=128) diff --git a/configs/levit/README.md b/configs/levit/README.md new file mode 100644 index 0000000..234edb6 --- /dev/null +++ b/configs/levit/README.md @@ -0,0 +1,81 @@ +# LeViT + +> [LeViT: a Vision Transformer in ConvNet’s Clothing for Faster Inference](https://arxiv.org/abs/2104.01136) + + + +## Abstract + +We design a family of image classification architectures that optimize the trade-off between accuracy and efficiency in a high-speed regime. Our work exploits recent findings in attention-based architectures, which are competitive on highly parallel processing hardware. We revisit principles from the extensive literature on convolutional neural networks to apply them to transformers, in particular activation maps with decreasing resolutions. We also introduce the attention bias, a new way to integrate positional information in vision transformers. As a result, we propose LeVIT: a hybrid neural network for fast inference image classification. We consider different measures of efficiency on different hardware platforms, so as to best reflect a wide range of application scenarios. Our extensive experiments empirically validate our technical choices and show they are suitable to most architectures. Overall, LeViT significantly outperforms existing convnets and vision transformers with respect to the speed/accuracy tradeoff. For example, at 80% ImageNet top-1 accuracy, LeViT is 5 times faster than EfficientNet on CPU. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('levit-128s_3rdparty_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('levit-128s_3rdparty_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/levit/levit-128s_8xb256_in1k.py https://download.openmmlab.com/mmclassification/v0/levit/levit-128s_3rdparty_in1k_20230117-e9fbd209.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :--------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :---------------------------------: | :--------------------------------------------------------------------------------------: | +| `levit-128s_3rdparty_in1k`\* | From scratch | 7.39 | 0.31 | 76.51 | 92.90 | [config](levit-128s_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/levit/levit-128s_3rdparty_in1k_20230117-e9fbd209.pth) | +| `levit-128_3rdparty_in1k`\* | From scratch | 8.83 | 0.41 | 78.58 | 93.95 | [config](levit-128_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/levit/levit-128_3rdparty_in1k_20230117-3be02a02.pth) | +| `levit-192_3rdparty_in1k`\* | From scratch | 10.56 | 0.67 | 79.86 | 94.75 | [config](levit-192_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/levit/levit-192_3rdparty_in1k_20230117-8217a0f9.pth) | +| `levit-256_3rdparty_in1k`\* | From scratch | 18.38 | 1.14 | 81.59 | 95.46 | [config](levit-256_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/levit/levit-256_3rdparty_in1k_20230117-5ae2ce7d.pth) | +| `levit-384_3rdparty_in1k`\* | From scratch | 38.36 | 2.37 | 82.59 | 95.95 | [config](levit-384_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/levit/levit-384_3rdparty_in1k_20230117-f3539cce.pth) | + +*Models with * are converted from the [official repo](https://github.com/facebookresearch/LeViT). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@InProceedings{Graham_2021_ICCV, + author = {Graham, Benjamin and El-Nouby, Alaaeldin and Touvron, Hugo and Stock, Pierre and Joulin, Armand and Jegou, Herve and Douze, Matthijs}, + title = {LeViT: A Vision Transformer in ConvNet's Clothing for Faster Inference}, + booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)}, + month = {October}, + year = {2021}, + pages = {12259-12269} +} +``` diff --git a/configs/levit/deploy/levit-128_8xb256_in1k.py b/configs/levit/deploy/levit-128_8xb256_in1k.py new file mode 100644 index 0000000..ab58119 --- /dev/null +++ b/configs/levit/deploy/levit-128_8xb256_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../levit-128_8xb256_in1k.py' + +model = dict(backbone=dict(deploy=True), head=dict(deploy=True)) diff --git a/configs/levit/deploy/levit-128s_8xb256_in1k.py b/configs/levit/deploy/levit-128s_8xb256_in1k.py new file mode 100644 index 0000000..93ebc37 --- /dev/null +++ b/configs/levit/deploy/levit-128s_8xb256_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../levit-128s_8xb256_in1k.py' + +model = dict(backbone=dict(deploy=True), head=dict(deploy=True)) diff --git a/configs/levit/deploy/levit-192_8xb256_in1k.py b/configs/levit/deploy/levit-192_8xb256_in1k.py new file mode 100644 index 0000000..34249fd --- /dev/null +++ b/configs/levit/deploy/levit-192_8xb256_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../levit-192_8xb256_in1k.py' + +model = dict(backbone=dict(deploy=True), head=dict(deploy=True)) diff --git a/configs/levit/deploy/levit-256_8xb256_in1k.py b/configs/levit/deploy/levit-256_8xb256_in1k.py new file mode 100644 index 0000000..687f835 --- /dev/null +++ b/configs/levit/deploy/levit-256_8xb256_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../levit-256_8xb256_in1k.py' + +model = dict(backbone=dict(deploy=True), head=dict(deploy=True)) diff --git a/configs/levit/deploy/levit-384_8xb256_in1k.py b/configs/levit/deploy/levit-384_8xb256_in1k.py new file mode 100644 index 0000000..9a83d47 --- /dev/null +++ b/configs/levit/deploy/levit-384_8xb256_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../levit-384_8xb256_in1k.py' + +model = dict(backbone=dict(deploy=True), head=dict(deploy=True)) diff --git a/configs/levit/levit-128_8xb256_in1k.py b/configs/levit/levit-128_8xb256_in1k.py new file mode 100644 index 0000000..cdec48e --- /dev/null +++ b/configs/levit/levit-128_8xb256_in1k.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/levit-256-p16.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs2048_adamw_levit.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict(backbone=dict(arch='128'), head=dict(in_channels=384)) + +# dataset settings +train_dataloader = dict(batch_size=256) diff --git a/configs/levit/levit-128s_8xb256_in1k.py b/configs/levit/levit-128s_8xb256_in1k.py new file mode 100644 index 0000000..0564cac --- /dev/null +++ b/configs/levit/levit-128s_8xb256_in1k.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/levit-256-p16.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs2048_adamw_levit.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict(backbone=dict(arch='128s'), head=dict(in_channels=384)) + +# dataset settings +train_dataloader = dict(batch_size=256) diff --git a/configs/levit/levit-192_8xb256_in1k.py b/configs/levit/levit-192_8xb256_in1k.py new file mode 100644 index 0000000..dfbf70e --- /dev/null +++ b/configs/levit/levit-192_8xb256_in1k.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/levit-256-p16.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs2048_adamw_levit.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict(backbone=dict(arch='192'), head=dict(in_channels=384)) + +# dataset settings +train_dataloader = dict(batch_size=256) diff --git a/configs/levit/levit-256_8xb256_in1k.py b/configs/levit/levit-256_8xb256_in1k.py new file mode 100644 index 0000000..e961e77 --- /dev/null +++ b/configs/levit/levit-256_8xb256_in1k.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/levit-256-p16.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs2048_adamw_levit.py', + '../_base_/default_runtime.py', +] + +# dataset settings +train_dataloader = dict(batch_size=256) diff --git a/configs/levit/levit-384_8xb256_in1k.py b/configs/levit/levit-384_8xb256_in1k.py new file mode 100644 index 0000000..10ceac4 --- /dev/null +++ b/configs/levit/levit-384_8xb256_in1k.py @@ -0,0 +1,15 @@ +_base_ = [ + '../_base_/models/levit-256-p16.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs2048_adamw_levit.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + backbone=dict(arch='384', drop_path_rate=0.1), + head=dict(in_channels=768), +) + +# dataset settings +train_dataloader = dict(batch_size=256) diff --git a/configs/levit/metafile.yml b/configs/levit/metafile.yml new file mode 100644 index 0000000..78b62c5 --- /dev/null +++ b/configs/levit/metafile.yml @@ -0,0 +1,101 @@ +Collections: + - Name: LeViT + Metadata: + Training Data: ImageNet-1k + Architecture: + - 1x1 Convolution + - LeViT Attention Block + Paper: + Title: "LeViT: a Vision Transformer in ConvNet\u2019s Clothing for Faster Inference" + URL: https://arxiv.org/abs/2104.01136 + README: configs/levit/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/main/mmpretrain/models/backbones/levit.py + Version: v1.0.0rc5 + +Models: + - Name: levit-128s_3rdparty_in1k + Metadata: + FLOPs: 310342496 + Parameters: 7391290 + Training Data: ImageNet-1k + In Collection: LeViT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 76.51 + Top 5 Accuracy: 92.90 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/levit/levit-128s_3rdparty_in1k_20230117-e9fbd209.pth + Config: configs/levit/levit-128s_8xb256_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/LeViT/LeViT-128S-96703c44.pth + Code: https://github.com/facebookresearch/LeViT + - Name: levit-128_3rdparty_in1k + Metadata: + FLOPs: 413060992 + Parameters: 8828168 + Training Data: ImageNet-1k + In Collection: LeViT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.58 + Top 5 Accuracy: 93.95 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/levit/levit-128_3rdparty_in1k_20230117-3be02a02.pth + Config: configs/levit/levit-128_8xb256_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/LeViT/LeViT-128-b88c2750.pth + Code: https://github.com/facebookresearch/LeViT + - Name: levit-192_3rdparty_in1k + Metadata: + FLOPs: 667860704 + Parameters: 10561301 + Training Data: ImageNet-1k + In Collection: LeViT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.86 + Top 5 Accuracy: 94.75 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/levit/levit-192_3rdparty_in1k_20230117-8217a0f9.pth + Config: configs/levit/levit-192_8xb256_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/LeViT/LeViT-192-92712e41.pth + Code: https://github.com/facebookresearch/LeViT + - Name: levit-256_3rdparty_in1k + Metadata: + FLOPs: 1141625216 + Parameters: 18379852 + Training Data: ImageNet-1k + In Collection: LeViT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.59 + Top 5 Accuracy: 95.46 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/levit/levit-256_3rdparty_in1k_20230117-5ae2ce7d.pth + Config: configs/levit/levit-256_8xb256_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/LeViT/LeViT-256-13b5763e.pth + Code: https://github.com/facebookresearch/LeViT + - Name: levit-384_3rdparty_in1k + Metadata: + FLOPs: 2372941568 + Parameters: 38358300 + Training Data: ImageNet-1k + In Collection: LeViT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.59 + Top 5 Accuracy: 95.95 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/levit/levit-384_3rdparty_in1k_20230117-f3539cce.pth + Config: configs/levit/levit-384_8xb256_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/LeViT/LeViT-384-9bdaf2e2.pth + Code: https://github.com/facebookresearch/LeViT diff --git a/configs/llava/README.md b/configs/llava/README.md new file mode 100644 index 0000000..581abfe --- /dev/null +++ b/configs/llava/README.md @@ -0,0 +1,51 @@ +# LLaVA + +> [Visual Instruction Tuning](https://arxiv.org/abs/2304.08485) + + + +## Abstract + +Instruction tuning large language models (LLMs) using machine-generated instruction-following data has improved zero-shot capabilities on new tasks, but the idea is less explored in the multimodal field. In this paper, we present the first attempt to use language-only GPT-4 to generate multimodal language-image instruction-following data. By instruction tuning on such generated data, we introduce LLaVA: Large Language and Vision Assistant, an end-to-end trained large multimodal model that connects a vision encoder and LLM for general-purpose visual and language understanding.Our early experiments show that LLaVA demonstrates impressive multimodel chat abilities, sometimes exhibiting the behaviors of multimodal GPT-4 on unseen images/instructions, and yields a 85.1% relative score compared with GPT-4 on a synthetic multimodal instruction-following dataset. When fine-tuned on Science QA, the synergy of LLaVA and GPT-4 achieves a new state-of-the-art accuracy of 92.53%. We make GPT-4 generated visual instruction tuning data, our model and code base publicly available. + +
+ +
+ +## How to use it? + + + +**Use the model** + +```python +import torch +from mmpretrain import get_model, inference_model + +out = inference_model('llava-7b-v1_caption', 'demo/cat-dog.png', device='cuda') +print(out) +# {'pred_caption': 'In the image, there are two cats sitting on a blanket.'} +``` + + + +## Models and results + +### Image Caption on COCO + +| Model | Params (M) | Config | Download | +| :---------------------- | :--------: | :--------------------------------: | :-------------------------------------------------------------------------------------------------------------: | +| `llava-7b-v1_caption` | 7045.82 | [config](llava-7b-v1_caption.py) | [ckpt](https://download.openmmlab.com/mmclassification/v1/llava/llava-7b-v1_liuhaotian_20231025-c9e119b6.pth) | +| `llava-7b-v1.5_caption` | 7062.90 | [config](llava-7b-v1.5_caption.py) | [ckpt](https://download.openmmlab.com/mmclassification/v1/llava/llava-7b-v1.5_liuhaotian_20231025-5828aa5a.pth) | +| `llava-7b-v1.5_vqa` | 7062.90 | [config](llava-7b-v1.5_vqa.py) | [ckpt](https://download.openmmlab.com/mmclassification/v1/llava/llava-7b-v1.5_liuhaotian_20231025-5828aa5a.pth) | + +## Citation + +```bibtex +@misc{liu2023llava, + title={Visual Instruction Tuning}, + author={Liu, Haotian and Li, Chunyuan and Wu, Qingyang and Lee, Yong Jae}, + publisher={arXiv:2304.08485}, + year={2023}, +} +``` diff --git a/configs/llava/llava-7b-v1.5_caption.py b/configs/llava/llava-7b-v1.5_caption.py new file mode 100644 index 0000000..371c9b5 --- /dev/null +++ b/configs/llava/llava-7b-v1.5_caption.py @@ -0,0 +1,76 @@ +_base_ = '../_base_/default_runtime.py' + +meta_prompt = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions." # noqa: E501 +image_size = 336 +prompt_tmpl = f'''{meta_prompt} User: +Describe the image in detail. ASSISTANT:''' + +# model settings +model = dict( + type='Llava', + tokenizer=dict( + type='AutoTokenizer', name_or_path='liuhaotian/llava-v1.5-7b'), + vision_encoder=dict( + type='VisionTransformer', + arch='l', + patch_size=14, + img_size=image_size, + pre_norm=True, + norm_cfg=dict(type='LN', eps=1e-5), + layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')), + final_norm=False, + out_type='raw', + pretrained='https://download.openmmlab.com/mmclassification/v0/clip/' + 'vit-large-p14_clip-openai-pre_336px_20231025-fb1315ed.pth', + ), + mm_hidden_size=1024, + use_im_patch=False, + use_im_start_end=False, + mm_proj_depth=2, + lang_encoder=dict( + type='AutoModelForCausalLM', + name_or_path='huggyllama/llama-7b', + ), + task='caption', + prompt_tmpl=prompt_tmpl, + generation_cfg=dict(num_beams=3, max_new_tokens=50, length_penalty=-1.0), +) + +# data settings +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[122.770938, 116.7460125, 104.09373615], + std=[68.5005327, 66.6321579, 70.32316305], + to_rgb=True, +) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + scale=(image_size, image_size), + interpolation='bicubic', + backend='pillow'), + dict(type='PackInputs', meta_keys=['image_id']), +] + +test_dataloader = dict( + batch_size=8, + num_workers=5, + dataset=dict( + type='COCOCaption', + data_root='data/coco', + ann_file='annotations/coco_karpathy_val.json', + pipeline=test_pipeline, + ), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, +) + +test_evaluator = dict( + type='COCOCaption', + ann_file='data/coco/annotations/coco_karpathy_val_gt.json', +) + +# schedule settings +test_cfg = dict() diff --git a/configs/llava/llava-7b-v1.5_vqa.py b/configs/llava/llava-7b-v1.5_vqa.py new file mode 100644 index 0000000..5cb9812 --- /dev/null +++ b/configs/llava/llava-7b-v1.5_vqa.py @@ -0,0 +1,76 @@ +_base_ = '../_base_/default_runtime.py' + +meta_prompt = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions." # noqa: E501 +image_size = 336 +prompt_tmpl = f'''{meta_prompt} User: +{{question}} ASSISTANT:''' + +# model settings +model = dict( + type='Llava', + tokenizer=dict( + type='AutoTokenizer', name_or_path='liuhaotian/llava-v1.5-7b'), + vision_encoder=dict( + type='VisionTransformer', + arch='l', + patch_size=14, + img_size=image_size, + pre_norm=True, + norm_cfg=dict(type='LN', eps=1e-5), + layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')), + final_norm=False, + out_type='raw', + pretrained='https://download.openmmlab.com/mmclassification/v0/clip/' + 'vit-large-p14_clip-openai-pre_336px_20231025-fb1315ed.pth', + ), + mm_hidden_size=1024, + use_im_patch=False, + use_im_start_end=False, + mm_proj_depth=2, + lang_encoder=dict( + type='AutoModelForCausalLM', + name_or_path='huggyllama/llama-7b', + ), + task='vqa', + prompt_tmpl=prompt_tmpl, + generation_cfg=dict(max_new_tokens=100), +) + +# data settings +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[122.770938, 116.7460125, 104.09373615], + std=[68.5005327, 66.6321579, 70.32316305], + to_rgb=True, +) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + scale=(image_size, image_size), + interpolation='bicubic', + backend='pillow'), + dict(type='PackInputs', meta_keys=['image_id', 'question']), +] + +test_dataloader = dict( + batch_size=8, + num_workers=5, + dataset=dict( + type='COCOCaption', + data_root='data/coco', + ann_file='annotations/coco_karpathy_val.json', + pipeline=test_pipeline, + ), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, +) + +test_evaluator = dict( + type='COCOCaption', + ann_file='data/coco/annotations/coco_karpathy_val_gt.json', +) + +# schedule settings +test_cfg = dict() diff --git a/configs/llava/llava-7b-v1_caption.py b/configs/llava/llava-7b-v1_caption.py new file mode 100644 index 0000000..92e2d1f --- /dev/null +++ b/configs/llava/llava-7b-v1_caption.py @@ -0,0 +1,78 @@ +_base_ = '../_base_/default_runtime.py' + +meta_prompt = 'You are LLaVA, a large language and vision assistant trained by UW Madison WAIV Lab.You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.Follow the instructions carefully and explain your answers in detail.' # noqa: E501 +image_size = 224 +prompt_tmpl = f'''{meta_prompt} User: +Describe the image in detail. ASSISTANT:''' + +# model settings +model = dict( + type='Llava', + tokenizer=dict( + type='AutoTokenizer', + name_or_path='liuhaotian/LLaVA-Lightning-7B-delta-v1-1'), + vision_encoder=dict( + type='VisionTransformer', + arch='l', + patch_size=14, + img_size=image_size, + pre_norm=True, + norm_cfg=dict(type='LN', eps=1e-5), + layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')), + final_norm=False, + out_type='raw', + pretrained=( + 'https://download.openmmlab.com/mmclassification/v0/clip/' + 'vit-large-p14_clip-openai-pre_3rdparty_20230517-95e2af0b.pth'), + ), + mm_hidden_size=1024, + use_im_patch=False, + use_im_start_end=True, + mm_proj_depth=1, + lang_encoder=dict( + type='AutoModelForCausalLM', + name_or_path='huggyllama/llama-7b', + ), + task='caption', + prompt_tmpl=prompt_tmpl, + generation_cfg=dict(max_new_tokens=50), +) + +# data settings +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[122.770938, 116.7460125, 104.09373615], + std=[68.5005327, 66.6321579, 70.32316305], + to_rgb=True, +) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + scale=(image_size, image_size), + interpolation='bicubic', + backend='pillow'), + dict(type='PackInputs', meta_keys=['image_id']), +] + +test_dataloader = dict( + batch_size=8, + num_workers=5, + dataset=dict( + type='COCOCaption', + data_root='data/coco', + ann_file='annotations/coco_karpathy_val.json', + pipeline=test_pipeline, + ), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, +) + +test_evaluator = dict( + type='COCOCaption', + ann_file='data/coco/annotations/coco_karpathy_val_gt.json', +) + +# schedule settings +test_cfg = dict() diff --git a/configs/llava/metafile.yml b/configs/llava/metafile.yml new file mode 100644 index 0000000..406a214 --- /dev/null +++ b/configs/llava/metafile.yml @@ -0,0 +1,51 @@ +Collections: + - Name: LLaVA + Metadata: + Architecture: + - LLaMA + - CLIP + Paper: + Title: Visual Instruction Tuning + URL: https://arxiv.org/abs/2304.08485 + README: configs/llava/README.md + +Models: + - Name: llava-7b-v1_caption + Metadata: + FLOPs: null + Parameters: 7045816320 + In Collection: LLaVA + Results: + - Task: Image Caption + Dataset: COCO + Metrics: + BLEU-4: null + CIDER: null + Weights: https://download.openmmlab.com/mmclassification/v1/llava/llava-7b-v1_liuhaotian_20231025-c9e119b6.pth + Config: configs/llava/llava-7b-v1_caption.py + - Name: llava-7b-v1.5_caption + Metadata: + FLOPs: null + Parameters: 7062900736 + In Collection: LLaVA + Results: + - Task: Image Caption + Dataset: COCO + Metrics: + BLEU-4: null + CIDER: null + Weights: https://download.openmmlab.com/mmclassification/v1/llava/llava-7b-v1.5_liuhaotian_20231025-5828aa5a.pth + Config: configs/llava/llava-7b-v1.5_caption.py + - Name: llava-7b-v1.5_vqa + Metadata: + FLOPs: null + Parameters: 7062900736 + In Collection: LLaVA + Results: + - Task: Visual Question Answering + Dataset: COCO + Metrics: + BLEU-4: null + CIDER: null + Weights: https://download.openmmlab.com/mmclassification/v1/llava/llava-7b-v1.5_liuhaotian_20231025-5828aa5a.pth + Config: configs/llava/llava-7b-v1.5_vqa.py diff --git a/configs/mae/README.md b/configs/mae/README.md new file mode 100644 index 0000000..69f5f9b --- /dev/null +++ b/configs/mae/README.md @@ -0,0 +1,123 @@ +# MAE + +> [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) + + + +## Abstract + +This paper shows that masked autoencoders (MAE) are +scalable self-supervised learners for computer vision. Our +MAE approach is simple: we mask random patches of the +input image and reconstruct the missing pixels. It is based +on two core designs. First, we develop an asymmetric +encoder-decoder architecture, with an encoder that operates only on the +visible subset of patches (without mask tokens), along with a lightweight +decoder that reconstructs the original image from the latent representation +and mask tokens. Second, we find that masking a high proportion +of the input image, e.g., 75%, yields a nontrivial and +meaningful self-supervisory task. Coupling these two designs enables us to +train large models efficiently and effectively: we accelerate +training (by 3× or more) and improve accuracy. Our scalable approach allows +for learning high-capacity models that generalize well: e.g., a vanilla +ViT-Huge model achieves the best accuracy (87.8%) among +methods that use only ImageNet-1K data. Transfer performance in downstream tasks outperforms supervised pretraining and shows promising scaling behavior. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('vit-base-p16_mae-300e-pre_8xb128-coslr-100e_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('mae_vit-base-p16_8xb512-amp-coslr-300e_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/mae/mae_vit-base-p16_8xb512-amp-coslr-300e_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/mae/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py None +``` + + + +## Models and results + +### Pretrained models + +| Model | Params (M) | Flops (G) | Config | Download | +| :---------------------------------------------- | :--------: | :-------: | :--------------------------------------------------------: | :--------------------------------------------------------------------------: | +| `mae_vit-base-p16_8xb512-amp-coslr-300e_in1k` | 111.91 | 17.58 | [config](mae_vit-base-p16_8xb512-amp-coslr-300e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-base-p16_8xb512-fp16-coslr-300e_in1k/mae_vit-base-p16_8xb512-coslr-300e-fp16_in1k_20220829-c2cf66ba.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-base-p16_8xb512-fp16-coslr-300e_in1k/mae_vit-base-p16_8xb512-coslr-300e-fp16_in1k_20220829-c2cf66ba.json) | +| `mae_vit-base-p16_8xb512-amp-coslr-400e_in1k` | 111.91 | 17.58 | [config](mae_vit-base-p16_8xb512-amp-coslr-400e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-base-p16_8xb512-fp16-coslr-400e_in1k/mae_vit-base-p16_8xb512-coslr-400e-fp16_in1k_20220825-bc79e40b.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-base-p16_8xb512-fp16-coslr-400e_in1k/mae_vit-base-p16_8xb512-coslr-400e-fp16_in1k_20220825-bc79e40b.json) | +| `mae_vit-base-p16_8xb512-amp-coslr-800e_in1k` | 111.91 | 17.58 | [config](mae_vit-base-p16_8xb512-amp-coslr-800e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-base-p16_8xb512-fp16-coslr-800e_in1k/mae_vit-base-p16_8xb512-coslr-800e-fp16_in1k_20220825-5d81fbc4.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-base-p16_8xb512-fp16-coslr-800e_in1k/mae_vit-base-p16_8xb512-coslr-800e-fp16_in1k_20220825-5d81fbc4.json) | +| `mae_vit-base-p16_8xb512-amp-coslr-1600e_in1k` | 111.91 | 17.58 | [config](mae_vit-base-p16_8xb512-amp-coslr-1600e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-base-p16_8xb512-fp16-coslr-1600e_in1k/mae_vit-base-p16_8xb512-fp16-coslr-1600e_in1k_20220825-f7569ca2.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-base-p16_8xb512-fp16-coslr-1600e_in1k/mae_vit-base-p16_8xb512-fp16-coslr-1600e_in1k_20220825-f7569ca2.json) | +| `mae_vit-large-p16_8xb512-amp-coslr-400e_in1k` | 329.54 | 61.60 | [config](mae_vit-large-p16_8xb512-amp-coslr-400e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-large-p16_8xb512-fp16-coslr-400e_in1k/mae_vit-large-p16_8xb512-fp16-coslr-400e_in1k_20220825-b11d0425.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-large-p16_8xb512-fp16-coslr-400e_in1k/mae_vit-large-p16_8xb512-fp16-coslr-400e_in1k_20220825-b11d0425.json) | +| `mae_vit-large-p16_8xb512-amp-coslr-800e_in1k` | 329.54 | 61.60 | [config](mae_vit-large-p16_8xb512-amp-coslr-800e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-large-p16_8xb512-fp16-coslr-800e_in1k/mae_vit-large-p16_8xb512-fp16-coslr-800e_in1k_20220825-df72726a.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-large-p16_8xb512-fp16-coslr-800e_in1k/mae_vit-large-p16_8xb512-fp16-coslr-800e_in1k_20220825-df72726a.json) | +| `mae_vit-large-p16_8xb512-amp-coslr-1600e_in1k` | 329.54 | 61.60 | [config](mae_vit-large-p16_8xb512-amp-coslr-1600e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-large-p16_8xb512-fp16-coslr-1600e_in1k/mae_vit-large-p16_8xb512-fp16-coslr-1600e_in1k_20220825-cc7e98c9.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-large-p16_8xb512-fp16-coslr-1600e_in1k/mae_vit-large-p16_8xb512-fp16-coslr-1600e_in1k_20220825-cc7e98c9.json) | +| `mae_vit-huge-p16_8xb512-amp-coslr-1600e_in1k` | 657.07 | 167.40 | [config](mae_vit-huge-p14_8xb512-amp-coslr-1600e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-huge-p16_8xb512-fp16-coslr-1600e_in1k/mae_vit-huge-p16_8xb512-fp16-coslr-1600e_in1k_20220916-ff848775.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-huge-p16_8xb512-fp16-coslr-1600e_in1k/mae_vit-huge-p16_8xb512-fp16-coslr-1600e_in1k_20220916-ff848775.json) | + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Config | Download | +| :---------------------------------------- | :------------------------------------------: | :--------: | :-------: | :-------: | :----------------------------------------: | :-------------------------------------------: | +| `vit-base-p16_mae-300e-pre_8xb128-coslr-100e_in1k` | [MAE 300-Epochs](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-base-p16_8xb512-fp16-coslr-300e_in1k/mae_vit-base-p16_8xb512-coslr-300e-fp16_in1k_20220829-c2cf66ba.pth) | 86.57 | 17.58 | 83.10 | [config](benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py) | N/A | +| `vit-base-p16_mae-400e-pre_8xb128-coslr-100e_in1k` | [MAE 400-Epochs](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-base-p16_8xb512-fp16-coslr-400e_in1k/mae_vit-base-p16_8xb512-coslr-400e-fp16_in1k_20220825-bc79e40b.pth) | 86.57 | 17.58 | 83.30 | [config](benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py) | N/A | +| `vit-base-p16_mae-800e-pre_8xb128-coslr-100e_in1k` | [MAE 800-Epochs](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-base-p16_8xb512-fp16-coslr-800e_in1k/mae_vit-base-p16_8xb512-coslr-800e-fp16_in1k_20220825-5d81fbc4.pth) | 86.57 | 17.58 | 83.30 | [config](benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py) | N/A | +| `vit-base-p16_mae-1600e-pre_8xb128-coslr-100e_in1k` | [MAE 1600-Epochs](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-base-p16_8xb512-fp16-coslr-1600e_in1k/mae_vit-base-p16_8xb512-fp16-coslr-1600e_in1k_20220825-f7569ca2.pth) | 86.57 | 17.58 | 83.50 | [config](benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-base-p16_8xb512-fp16-coslr-1600e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k_20220825-cf70aa21.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-base-p16_8xb512-fp16-coslr-1600e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k_20220825-cf70aa21.json) | +| `vit-base-p16_mae-300e-pre_8xb2048-linear-coslr-90e_in1k` | [MAE 300-Epochs](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-base-p16_8xb512-fp16-coslr-300e_in1k/mae_vit-base-p16_8xb512-coslr-300e-fp16_in1k_20220829-c2cf66ba.pth) | 86.57 | 17.58 | 60.80 | [config](benchmarks/vit-base-p16_8xb2048-linear-coslr-90e_in1k.py) | N/A | +| `vit-base-p16_mae-400e-pre_8xb2048-linear-coslr-90e_in1k` | [MAE 400-Epochs](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-base-p16_8xb512-fp16-coslr-400e_in1k/mae_vit-base-p16_8xb512-coslr-400e-fp16_in1k_20220825-bc79e40b.pth) | 86.57 | 17.58 | 62.50 | [config](benchmarks/vit-base-p16_8xb2048-linear-coslr-90e_in1k.py) | N/A | +| `vit-base-p16_mae-800e-pre_8xb2048-linear-coslr-90e_in1k` | [MAE 800-Epochs](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-base-p16_8xb512-fp16-coslr-800e_in1k/mae_vit-base-p16_8xb512-coslr-800e-fp16_in1k_20220825-5d81fbc4.pth) | 86.57 | 17.58 | 65.10 | [config](benchmarks/vit-base-p16_8xb2048-linear-coslr-90e_in1k.py) | N/A | +| `vit-base-p16_mae-1600e-pre_8xb2048-linear-coslr-90e_in1k` | [MAE 1600-Epochs](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-base-p16_8xb512-fp16-coslr-1600e_in1k/mae_vit-base-p16_8xb512-fp16-coslr-1600e_in1k_20220825-f7569ca2.pth) | 86.57 | 17.58 | 67.10 | [config](benchmarks/vit-base-p16_8xb2048-linear-coslr-90e_in1k.py) | N/A | +| `vit-large-p16_mae-400e-pre_8xb128-coslr-50e_in1k` | [MAE 400-Epochs](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-large-p16_8xb512-fp16-coslr-400e_in1k/mae_vit-large-p16_8xb512-fp16-coslr-400e_in1k_20220825-b11d0425.pth) | 304.32 | 61.60 | 85.20 | [config](benchmarks/vit-large-p16_8xb128-coslr-50e_in1k.py) | N/A | +| `vit-large-p16_mae-800e-pre_8xb128-coslr-50e_in1k` | [MAE 800-Epochs](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-large-p16_8xb512-fp16-coslr-800e_in1k/mae_vit-large-p16_8xb512-fp16-coslr-800e_in1k_20220825-df72726a.pth) | 304.32 | 61.60 | 85.40 | [config](benchmarks/vit-large-p16_8xb128-coslr-50e_in1k.py) | N/A | +| `vit-large-p16_mae-1600e-pre_8xb128-coslr-50e_in1k` | [MAE 1600-Epochs](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-large-p16_8xb512-fp16-coslr-1600e_in1k/mae_vit-large-p16_8xb512-fp16-coslr-1600e_in1k_20220825-cc7e98c9.pth) | 304.32 | 61.60 | 85.70 | [config](benchmarks/vit-large-p16_8xb128-coslr-50e_in1k.py) | N/A | +| `vit-large-p16_mae-400e-pre_8xb2048-linear-coslr-90e_in1k` | [MAE 400-Epochs](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-large-p16_8xb512-fp16-coslr-400e_in1k/mae_vit-large-p16_8xb512-fp16-coslr-400e_in1k_20220825-b11d0425.pth) | 304.33 | 61.60 | 70.70 | [config](benchmarks/vit-large-p16_8xb2048-linear-coslr-90e_in1k.py) | N/A | +| `vit-large-p16_mae-800e-pre_8xb2048-linear-coslr-90e_in1k` | [MAE 800-Epochs](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-large-p16_8xb512-fp16-coslr-800e_in1k/mae_vit-large-p16_8xb512-fp16-coslr-800e_in1k_20220825-df72726a.pth) | 304.33 | 61.60 | 73.70 | [config](benchmarks/vit-large-p16_8xb2048-linear-coslr-90e_in1k.py) | N/A | +| `vit-large-p16_mae-1600e-pre_8xb2048-linear-coslr-90e_in1k` | [MAE 1600-Epochs](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-large-p16_8xb512-fp16-coslr-1600e_in1k/mae_vit-large-p16_8xb512-fp16-coslr-1600e_in1k_20220825-cc7e98c9.pth) | 304.33 | 61.60 | 75.50 | [config](benchmarks/vit-large-p16_8xb2048-linear-coslr-90e_in1k.py) | N/A | +| `vit-huge-p14_mae-1600e-pre_8xb128-coslr-50e_in1k` | [MAE 1600-Epochs](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-huge-p16_8xb512-fp16-coslr-1600e_in1k/mae_vit-huge-p16_8xb512-fp16-coslr-1600e_in1k_20220916-ff848775.pth) | 632.04 | 167.40 | 86.90 | [config](benchmarks/vit-huge-p14_8xb128-coslr-50e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-huge-p16_8xb512-fp16-coslr-1600e_in1k/vit-huge-p16_ft-8xb128-coslr-50e_in1k/vit-huge-p16_ft-8xb128-coslr-50e_in1k_20220916-0bfc9bfd.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-huge-p16_8xb512-fp16-coslr-1600e_in1k/vit-huge-p16_ft-8xb128-coslr-50e_in1k/vit-huge-p16_ft-8xb128-coslr-50e_in1k_20220916-0bfc9bfd.json) | +| `vit-huge-p14_mae-1600e-pre_32xb8-coslr-50e_in1k-448px` | [MAE 1600-Epochs](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-huge-p16_8xb512-fp16-coslr-1600e_in1k/mae_vit-huge-p16_8xb512-fp16-coslr-1600e_in1k_20220916-ff848775.pth) | 633.03 | 732.13 | 87.30 | [config](benchmarks/vit-huge-p14_32xb8-coslr-50e_in1k-448px.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-huge-p16_8xb512-fp16-coslr-1600e_in1k/vit-huge-p16_ft-32xb8-coslr-50e_in1k-448/vit-huge-p16_ft-32xb8-coslr-50e_in1k-448_20220916-95b6a0ce.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-huge-p16_8xb512-fp16-coslr-1600e_in1k/vit-huge-p16_ft-32xb8-coslr-50e_in1k-448/vit-huge-p16_ft-32xb8-coslr-50e_in1k-448_20220916-95b6a0ce.json) | + +## Citation + +```bibtex +@article{He2021MaskedAA, + title={Masked Autoencoders Are Scalable Vision Learners}, + author={Kaiming He and Xinlei Chen and Saining Xie and Yanghao Li and + Piotr Doll'ar and Ross B. Girshick}, + journal={arXiv}, + year={2021} +} +``` diff --git a/configs/mae/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py b/configs/mae/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py new file mode 100644 index 0000000..4cf9ca1 --- /dev/null +++ b/configs/mae/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py @@ -0,0 +1,114 @@ +_base_ = [ + '../../_base_/datasets/imagenet_bs64_swin_224.py', + '../../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../../_base_/default_runtime.py' +] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict(pad_val=[104, 116, 124], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=0.3333333333333333, + fill_color=[103.53, 116.28, 123.675], + fill_std=[57.375, 57.12, 58.395]), + dict(type='PackInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=256, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs') +] + +train_dataloader = dict(batch_size=128, dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(batch_size=128, dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='base', + img_size=224, + patch_size=16, + drop_path_rate=0.1, + out_type='avg_featmap', + final_norm=False, + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')), + neck=None, + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + init_cfg=[dict(type='TruncNormal', layer='Linear', std=2e-5)]), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) + +# optimizer wrapper +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=2e-3, weight_decay=0.05, betas=(0.9, 0.999)), + constructor='LearningRateDecayOptimWrapperConstructor', + paramwise_cfg=dict( + layer_decay_rate=0.65, + custom_keys={ + '.ln': dict(decay_mult=0.0), + '.bias': dict(decay_mult=0.0), + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=95, + by_epoch=True, + begin=5, + end=100, + eta_min=1e-6, + convert_to_iter_based=True) +] + +# runtime settings +default_hooks = dict( + # save checkpoint per epoch. + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +train_cfg = dict(by_epoch=True, max_epochs=100) + +randomness = dict(seed=0, diff_rank_seed=True) diff --git a/configs/mae/benchmarks/vit-base-p16_8xb2048-linear-coslr-90e_in1k.py b/configs/mae/benchmarks/vit-base-p16_8xb2048-linear-coslr-90e_in1k.py new file mode 100644 index 0000000..b0545c9 --- /dev/null +++ b/configs/mae/benchmarks/vit-base-p16_8xb2048-linear-coslr-90e_in1k.py @@ -0,0 +1,64 @@ +_base_ = [ + '../../_base_/datasets/imagenet_bs32_pil_resize.py', + '../../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../../_base_/default_runtime.py' +] + +# dataset settings +train_dataloader = dict(batch_size=2048, drop_last=True) +val_dataloader = dict(drop_last=False) +test_dataloader = dict(drop_last=False) + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='base', + img_size=224, + patch_size=16, + frozen_stages=12, + out_type='cls_token', + final_norm=True, + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')), + neck=dict(type='ClsBatchNormNeck', input_features=768), + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='CrossEntropyLoss'), + init_cfg=[dict(type='TruncNormal', layer='Linear', std=0.01)])) + +# optimizer +optim_wrapper = dict( + _delete_=True, + type='AmpOptimWrapper', + optimizer=dict(type='LARS', lr=6.4, weight_decay=0.0, momentum=0.9)) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=10, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=80, + by_epoch=True, + begin=10, + end=90, + eta_min=0.0, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(by_epoch=True, max_epochs=90) + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3), + logger=dict(type='LoggerHook', interval=10)) + +randomness = dict(seed=0, diff_rank_seed=True) diff --git a/configs/mae/benchmarks/vit-huge-p14_32xb8-coslr-50e_in1k-448px.py b/configs/mae/benchmarks/vit-huge-p14_32xb8-coslr-50e_in1k-448px.py new file mode 100644 index 0000000..60046b4 --- /dev/null +++ b/configs/mae/benchmarks/vit-huge-p14_32xb8-coslr-50e_in1k-448px.py @@ -0,0 +1,116 @@ +_base_ = [ + '../../_base_/datasets/imagenet_bs64_swin_224.py', + '../../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../../_base_/default_runtime.py' +] + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=448, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict(pad_val=[104, 116, 124], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=0.3333333333333333, + fill_color=[103.53, 116.28, 123.675], + fill_std=[57.375, 57.12, 58.395]), + dict(type='PackInputs') +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=512, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=448), + dict(type='PackInputs') +] + +train_dataloader = dict(batch_size=128, dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(batch_size=128, dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='huge', + img_size=448, + patch_size=14, + drop_path_rate=0.3, # set to 0.3 + out_type='avg_featmap', + final_norm=False, + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')), + neck=None, + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1280, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + init_cfg=[dict(type='TruncNormal', layer='Linear', std=2e-5)]), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) + +# optimizer wrapper +# learning rate and layer decay rate are set to 0.004 and 0.75 respectively +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=4e-3, weight_decay=0.05, betas=(0.9, 0.999)), + constructor='LearningRateDecayOptimWrapperConstructor', + paramwise_cfg=dict( + layer_decay_rate=0.75, + custom_keys={ + '.ln': dict(decay_mult=0.0), + '.bias': dict(decay_mult=0.0), + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=45, + by_epoch=True, + begin=5, + end=50, + eta_min=1e-6, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(by_epoch=True, max_epochs=50) +default_hooks = dict( + # save checkpoint per epoch. + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) diff --git a/configs/mae/benchmarks/vit-huge-p14_8xb128-coslr-50e_in1k.py b/configs/mae/benchmarks/vit-huge-p14_8xb128-coslr-50e_in1k.py new file mode 100644 index 0000000..2a9ff51 --- /dev/null +++ b/configs/mae/benchmarks/vit-huge-p14_8xb128-coslr-50e_in1k.py @@ -0,0 +1,115 @@ +_base_ = [ + '../../_base_/datasets/imagenet_bs64_swin_224.py', + '../../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../../_base_/default_runtime.py' +] + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict(pad_val=[104, 116, 124], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=0.3333333333333333, + fill_color=[103.53, 116.28, 123.675], + fill_std=[57.375, 57.12, 58.395]), + dict(type='PackInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=256, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs') +] + +train_dataloader = dict(batch_size=128, dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(batch_size=128, dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='huge', + img_size=224, + patch_size=14, + drop_path_rate=0.3, # set to 0.3 + out_type='avg_featmap', + final_norm=False, + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')), + neck=None, + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1280, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + init_cfg=[dict(type='TruncNormal', layer='Linear', std=2e-5)]), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) + +# optimizer wrapper +# learning rate and layer decay rate are set to 0.004 and 0.75 respectively +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=4e-3, weight_decay=0.05, betas=(0.9, 0.999)), + constructor='LearningRateDecayOptimWrapperConstructor', + paramwise_cfg=dict( + layer_decay_rate=0.75, + custom_keys={ + '.ln': dict(decay_mult=0.0), + '.bias': dict(decay_mult=0.0), + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=45, + by_epoch=True, + begin=5, + end=50, + eta_min=1e-6, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(by_epoch=True, max_epochs=50) +default_hooks = dict( + # save checkpoint per epoch. + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) diff --git a/configs/mae/benchmarks/vit-huge-p14_8xb128-ds-coslr-50e_in1k.py b/configs/mae/benchmarks/vit-huge-p14_8xb128-ds-coslr-50e_in1k.py new file mode 100644 index 0000000..813f7c0 --- /dev/null +++ b/configs/mae/benchmarks/vit-huge-p14_8xb128-ds-coslr-50e_in1k.py @@ -0,0 +1,31 @@ +_base_ = ['./vit-huge-p14_8xb128-coslr-50e_in1k.py'] + +# optimizer wrapper +optim_wrapper = dict(type='DeepSpeedOptimWrapper') + +# training strategy +strategy = dict( + type='DeepSpeedStrategy', + fp16=dict( + enabled=True, + fp16_master_weights_and_grads=False, + loss_scale=0, + loss_scale_window=500, + hysteresis=2, + min_loss_scale=1, + initial_scale_power=15, + ), + inputs_to_half=['inputs'], + zero_optimization=dict( + stage=1, + allgather_partitions=True, + reduce_scatter=True, + allgather_bucket_size=50000000, + reduce_bucket_size=50000000, + overlap_comm=True, + contiguous_gradients=True, + cpu_offload=False, + )) + +# runner which supports strategies +runner_type = 'FlexibleRunner' diff --git a/configs/mae/benchmarks/vit-huge-p14_8xb128-fsdp-coslr-50e_in1k.py b/configs/mae/benchmarks/vit-huge-p14_8xb128-fsdp-coslr-50e_in1k.py new file mode 100644 index 0000000..5f8dfb7 --- /dev/null +++ b/configs/mae/benchmarks/vit-huge-p14_8xb128-fsdp-coslr-50e_in1k.py @@ -0,0 +1,13 @@ +_base_ = ['./vit-huge-p14_8xb128-coslr-50e_in1k.py'] + +strategy = dict( + type='FSDPStrategy', + model_wrapper=dict( + auto_wrap_policy=dict( + type='torch.distributed.fsdp.wrap.size_based_auto_wrap_policy', + min_num_params=1e7))) + +optim_wrapper = dict(type='AmpOptimWrapper') + +# runner which supports strategies +runner_type = 'FlexibleRunner' diff --git a/configs/mae/benchmarks/vit-large-p16_8xb128-coslr-50e_in1k.py b/configs/mae/benchmarks/vit-large-p16_8xb128-coslr-50e_in1k.py new file mode 100644 index 0000000..ae86b40 --- /dev/null +++ b/configs/mae/benchmarks/vit-large-p16_8xb128-coslr-50e_in1k.py @@ -0,0 +1,115 @@ +_base_ = [ + '../../_base_/datasets/imagenet_bs64_swin_224.py', + '../../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../../_base_/default_runtime.py' +] + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict(pad_val=[104, 116, 124], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=0.3333333333333333, + fill_color=[103.53, 116.28, 123.675], + fill_std=[57.375, 57.12, 58.395]), + dict(type='PackInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=256, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs') +] + +train_dataloader = dict(batch_size=128, dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(batch_size=128, dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='large', + img_size=224, + patch_size=16, + drop_path_rate=0.2, # set to 0.2 + out_type='avg_featmap', + final_norm=False, + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')), + neck=None, + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=1024, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + init_cfg=[dict(type='TruncNormal', layer='Linear', std=2e-5)]), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) + +# optimizer wrapper +# learning rate and layer decay rate are set to 0.004 and 0.75 respectively +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=4e-3, weight_decay=0.05, betas=(0.9, 0.999)), + constructor='LearningRateDecayOptimWrapperConstructor', + paramwise_cfg=dict( + layer_decay_rate=0.75, + custom_keys={ + '.ln': dict(decay_mult=0.0), + '.bias': dict(decay_mult=0.0), + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=45, + by_epoch=True, + begin=5, + end=50, + eta_min=1e-6, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(by_epoch=True, max_epochs=50) +default_hooks = dict( + # save checkpoint per epoch. + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) diff --git a/configs/mae/benchmarks/vit-large-p16_8xb128-ds-coslr-50e_in1k.py b/configs/mae/benchmarks/vit-large-p16_8xb128-ds-coslr-50e_in1k.py new file mode 100644 index 0000000..9aedb43 --- /dev/null +++ b/configs/mae/benchmarks/vit-large-p16_8xb128-ds-coslr-50e_in1k.py @@ -0,0 +1,31 @@ +_base_ = ['./vit-large-p16_8xb128-coslr-50e_in1k.py'] + +# optimizer wrapper +optim_wrapper = dict(type='DeepSpeedOptimWrapper') + +# training strategy +strategy = dict( + type='DeepSpeedStrategy', + fp16=dict( + enabled=True, + fp16_master_weights_and_grads=False, + loss_scale=0, + loss_scale_window=500, + hysteresis=2, + min_loss_scale=1, + initial_scale_power=15, + ), + inputs_to_half=['inputs'], + zero_optimization=dict( + stage=1, + allgather_partitions=True, + reduce_scatter=True, + allgather_bucket_size=50000000, + reduce_bucket_size=50000000, + overlap_comm=True, + contiguous_gradients=True, + cpu_offload=False, + )) + +# runner which supports strategies +runner_type = 'FlexibleRunner' diff --git a/configs/mae/benchmarks/vit-large-p16_8xb128-fsdp-coslr-50e_in1k.py b/configs/mae/benchmarks/vit-large-p16_8xb128-fsdp-coslr-50e_in1k.py new file mode 100644 index 0000000..3a8a674 --- /dev/null +++ b/configs/mae/benchmarks/vit-large-p16_8xb128-fsdp-coslr-50e_in1k.py @@ -0,0 +1,13 @@ +_base_ = ['./vit-large-p16_8xb128-coslr-50e_in1k.py'] + +strategy = dict( + type='FSDPStrategy', + model_wrapper=dict( + auto_wrap_policy=dict( + type='torch.distributed.fsdp.wrap.size_based_auto_wrap_policy', + min_num_params=1e7))) + +optim_wrapper = dict(type='AmpOptimWrapper') + +# runner which supports strategies +runner_type = 'FlexibleRunner' diff --git a/configs/mae/benchmarks/vit-large-p16_8xb2048-linear-coslr-90e_in1k.py b/configs/mae/benchmarks/vit-large-p16_8xb2048-linear-coslr-90e_in1k.py new file mode 100644 index 0000000..c895181 --- /dev/null +++ b/configs/mae/benchmarks/vit-large-p16_8xb2048-linear-coslr-90e_in1k.py @@ -0,0 +1,64 @@ +_base_ = [ + '../../_base_/datasets/imagenet_bs32_pil_resize.py', + '../../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../../_base_/default_runtime.py' +] + +# dataset settings +train_dataloader = dict(batch_size=2048, drop_last=True) +val_dataloader = dict(drop_last=False) +test_dataloader = dict(drop_last=False) + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='large', + img_size=224, + patch_size=16, + frozen_stages=24, + out_type='cls_token', + final_norm=True, + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')), + neck=dict(type='ClsBatchNormNeck', input_features=1024), + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=1024, + loss=dict(type='CrossEntropyLoss'), + init_cfg=[dict(type='TruncNormal', layer='Linear', std=0.01)])) + +# optimizer +optim_wrapper = dict( + _delete_=True, + type='AmpOptimWrapper', + optimizer=dict(type='LARS', lr=6.4, weight_decay=0.0, momentum=0.9)) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=10, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=80, + by_epoch=True, + begin=10, + end=90, + eta_min=0.0, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(by_epoch=True, max_epochs=90) + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3), + logger=dict(type='LoggerHook', interval=10)) + +randomness = dict(seed=0, diff_rank_seed=True) diff --git a/configs/mae/mae_hivit-base-p16_8xb512-amp-coslr-1600e_in1k.py b/configs/mae/mae_hivit-base-p16_8xb512-amp-coslr-1600e_in1k.py new file mode 100644 index 0000000..76c0df2 --- /dev/null +++ b/configs/mae/mae_hivit-base-p16_8xb512-amp-coslr-1600e_in1k.py @@ -0,0 +1,56 @@ +_base_ = [ + '../_base_/models/mae_hivit-base-p16.py', + '../_base_/datasets/imagenet_bs512_mae.py', + '../_base_/default_runtime.py', +] + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='AdamW', + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'norm': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=1560, + by_epoch=True, + begin=40, + end=1600, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=1600) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +# auto resume +resume = True +find_unused_parameters = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/mae/mae_hivit-base-p16_8xb512-amp-coslr-400e_in1k.py b/configs/mae/mae_hivit-base-p16_8xb512-amp-coslr-400e_in1k.py new file mode 100644 index 0000000..8107fcc --- /dev/null +++ b/configs/mae/mae_hivit-base-p16_8xb512-amp-coslr-400e_in1k.py @@ -0,0 +1,56 @@ +_base_ = [ + '../_base_/models/mae_hivit-base-p16.py', + '../_base_/datasets/imagenet_bs512_mae.py', + '../_base_/default_runtime.py', +] + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='AdamW', + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'norm': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=360, + by_epoch=True, + begin=40, + end=400, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=400) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +# auto resume +resume = True +find_unused_parameters = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/mae/mae_hivit-base-p16_8xb512-amp-coslr-800e_in1k.py b/configs/mae/mae_hivit-base-p16_8xb512-amp-coslr-800e_in1k.py new file mode 100644 index 0000000..c150e04 --- /dev/null +++ b/configs/mae/mae_hivit-base-p16_8xb512-amp-coslr-800e_in1k.py @@ -0,0 +1,56 @@ +_base_ = [ + '../_base_/models/mae_hivit-base-p16.py', + '../_base_/datasets/imagenet_bs512_mae.py', + '../_base_/default_runtime.py', +] + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='AdamW', + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'norm': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=760, + by_epoch=True, + begin=40, + end=800, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=800) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +# auto resume +resume = True +find_unused_parameters = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/mae/mae_hivit-large-p16_8xb512-amp-coslr-1600e_in1k.py b/configs/mae/mae_hivit-large-p16_8xb512-amp-coslr-1600e_in1k.py new file mode 100644 index 0000000..5d5e40d --- /dev/null +++ b/configs/mae/mae_hivit-large-p16_8xb512-amp-coslr-1600e_in1k.py @@ -0,0 +1,61 @@ +_base_ = [ + '../_base_/models/mae_hivit-base-p16.py', + '../_base_/datasets/imagenet_bs512_mae.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + backbone=dict(type='MAEHiViT', arch='large'), + neck=dict(type='MAEPretrainDecoder', embed_dim=768)) + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='AdamW', + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'norm': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.0001, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=1560, + by_epoch=True, + begin=40, + end=1600, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=1600) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +# auto resume +resume = True +find_unused_parameters = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/mae/mae_hivit-large-p16_8xb512-amp-coslr-400e_in1k.py b/configs/mae/mae_hivit-large-p16_8xb512-amp-coslr-400e_in1k.py new file mode 100644 index 0000000..2c6c47d --- /dev/null +++ b/configs/mae/mae_hivit-large-p16_8xb512-amp-coslr-400e_in1k.py @@ -0,0 +1,61 @@ +_base_ = [ + '../_base_/models/mae_hivit-base-p16.py', + '../_base_/datasets/imagenet_bs512_mae.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + backbone=dict(type='MAEHiViT', arch='large'), + neck=dict(type='MAEPretrainDecoder', embed_dim=768)) + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='AdamW', + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'norm': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.0001, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=360, + by_epoch=True, + begin=40, + end=400, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=400) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +# auto resume +resume = True +find_unused_parameters = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/mae/mae_hivit-large-p16_8xb512-amp-coslr-800e_in1k.py b/configs/mae/mae_hivit-large-p16_8xb512-amp-coslr-800e_in1k.py new file mode 100644 index 0000000..4ed7d20 --- /dev/null +++ b/configs/mae/mae_hivit-large-p16_8xb512-amp-coslr-800e_in1k.py @@ -0,0 +1,61 @@ +_base_ = [ + '../_base_/models/mae_hivit-base-p16.py', + '../_base_/datasets/imagenet_bs512_mae.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + backbone=dict(type='MAEHiViT', arch='large'), + neck=dict(type='MAEPretrainDecoder', embed_dim=768)) + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='AdamW', + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'norm': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.0001, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=760, + by_epoch=True, + begin=40, + end=800, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=800) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +# auto resume +resume = True +find_unused_parameters = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/mae/mae_vit-base-p16_8xb512-amp-coslr-1600e_in1k.py b/configs/mae/mae_vit-base-p16_8xb512-amp-coslr-1600e_in1k.py new file mode 100644 index 0000000..bbad841 --- /dev/null +++ b/configs/mae/mae_vit-base-p16_8xb512-amp-coslr-1600e_in1k.py @@ -0,0 +1,56 @@ +_base_ = [ + '../_base_/models/mae_vit-base-p16.py', + '../_base_/datasets/imagenet_bs512_mae.py', + '../_base_/default_runtime.py', +] + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='AdamW', + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'ln': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.0001, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=1560, + by_epoch=True, + begin=40, + end=1600, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=1600) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +# auto resume +resume = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/mae/mae_vit-base-p16_8xb512-amp-coslr-300e_in1k.py b/configs/mae/mae_vit-base-p16_8xb512-amp-coslr-300e_in1k.py new file mode 100644 index 0000000..f11fb2f --- /dev/null +++ b/configs/mae/mae_vit-base-p16_8xb512-amp-coslr-300e_in1k.py @@ -0,0 +1,56 @@ +_base_ = [ + '../_base_/models/mae_vit-base-p16.py', + '../_base_/datasets/imagenet_bs512_mae.py', + '../_base_/default_runtime.py', +] + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='AdamW', + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'ln': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.0001, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=260, + by_epoch=True, + begin=40, + end=300, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=300) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +# auto resume +resume = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/mae/mae_vit-base-p16_8xb512-amp-coslr-400e_in1k.py b/configs/mae/mae_vit-base-p16_8xb512-amp-coslr-400e_in1k.py new file mode 100644 index 0000000..d8f0398 --- /dev/null +++ b/configs/mae/mae_vit-base-p16_8xb512-amp-coslr-400e_in1k.py @@ -0,0 +1,56 @@ +_base_ = [ + '../_base_/models/mae_vit-base-p16.py', + '../_base_/datasets/imagenet_bs512_mae.py', + '../_base_/default_runtime.py', +] + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='AdamW', + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'ln': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=360, + by_epoch=True, + begin=40, + end=400, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=400) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +# auto resume +resume = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/mae/mae_vit-base-p16_8xb512-amp-coslr-800e_in1k.py b/configs/mae/mae_vit-base-p16_8xb512-amp-coslr-800e_in1k.py new file mode 100644 index 0000000..01e0fb4 --- /dev/null +++ b/configs/mae/mae_vit-base-p16_8xb512-amp-coslr-800e_in1k.py @@ -0,0 +1,56 @@ +_base_ = [ + '../_base_/models/mae_vit-base-p16.py', + '../_base_/datasets/imagenet_bs512_mae.py', + '../_base_/default_runtime.py', +] + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='AdamW', + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'ln': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.000000001, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=760, + by_epoch=True, + begin=40, + end=800, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=800) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +# auto resume +resume = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/mae/mae_vit-huge-p14_8xb512-amp-coslr-1600e_in1k.py b/configs/mae/mae_vit-huge-p14_8xb512-amp-coslr-1600e_in1k.py new file mode 100644 index 0000000..5eb7a42 --- /dev/null +++ b/configs/mae/mae_vit-huge-p14_8xb512-amp-coslr-1600e_in1k.py @@ -0,0 +1,66 @@ +_base_ = [ + '../_base_/models/mae_vit-base-p16.py', + '../_base_/datasets/imagenet_bs512_mae.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + backbone=dict(type='MAEViT', arch='h', patch_size=14), + neck=dict( + type='MAEPretrainDecoder', + embed_dim=1280, + patch_size=14, + num_patches=256), + head=dict(patch_size=14)) + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='AdamW', + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'ln': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=1560, + by_epoch=True, + begin=40, + end=1600, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=1600) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +# auto resume +resume = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/mae/mae_vit-large-p16_8xb512-amp-coslr-1600e_in1k.py b/configs/mae/mae_vit-large-p16_8xb512-amp-coslr-1600e_in1k.py new file mode 100644 index 0000000..683790c --- /dev/null +++ b/configs/mae/mae_vit-large-p16_8xb512-amp-coslr-1600e_in1k.py @@ -0,0 +1,61 @@ +_base_ = [ + '../_base_/models/mae_vit-base-p16.py', + '../_base_/datasets/imagenet_bs512_mae.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + backbone=dict(type='MAEViT', arch='l'), + neck=dict(type='MAEPretrainDecoder', embed_dim=1024)) + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='AdamW', + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'ln': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.0001, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=1560, + by_epoch=True, + begin=40, + end=1600, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=1600) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +# auto resume +resume = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/mae/mae_vit-large-p16_8xb512-amp-coslr-300e_in1k.py b/configs/mae/mae_vit-large-p16_8xb512-amp-coslr-300e_in1k.py new file mode 100644 index 0000000..5392074 --- /dev/null +++ b/configs/mae/mae_vit-large-p16_8xb512-amp-coslr-300e_in1k.py @@ -0,0 +1,61 @@ +_base_ = [ + '../_base_/models/mae_vit-base-p16.py', + '../_base_/datasets/imagenet_bs512_mae.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + backbone=dict(type='MAEViT', arch='l'), + neck=dict(type='MAEPretrainDecoder', embed_dim=1024)) + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='AdamW', + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'ln': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.0001, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=260, + by_epoch=True, + begin=40, + end=300, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=300) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +# auto resume +resume = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/mae/mae_vit-large-p16_8xb512-amp-coslr-400e_in1k.py b/configs/mae/mae_vit-large-p16_8xb512-amp-coslr-400e_in1k.py new file mode 100644 index 0000000..f050522 --- /dev/null +++ b/configs/mae/mae_vit-large-p16_8xb512-amp-coslr-400e_in1k.py @@ -0,0 +1,61 @@ +_base_ = [ + '../_base_/models/mae_vit-base-p16.py', + '../_base_/datasets/imagenet_bs512_mae.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + backbone=dict(type='MAEViT', arch='l'), + neck=dict(type='MAEPretrainDecoder', embed_dim=1024)) + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='AdamW', + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'ln': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=360, + by_epoch=True, + begin=40, + end=400, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=400) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +# auto resume +resume = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/mae/mae_vit-large-p16_8xb512-amp-coslr-800e_in1k.py b/configs/mae/mae_vit-large-p16_8xb512-amp-coslr-800e_in1k.py new file mode 100644 index 0000000..5a4294d --- /dev/null +++ b/configs/mae/mae_vit-large-p16_8xb512-amp-coslr-800e_in1k.py @@ -0,0 +1,61 @@ +_base_ = [ + '../_base_/models/mae_vit-base-p16.py', + '../_base_/datasets/imagenet_bs512_mae.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + backbone=dict(type='MAEViT', arch='l'), + neck=dict(type='MAEPretrainDecoder', embed_dim=1024)) + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='AdamW', + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'ln': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.000000001, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=760, + by_epoch=True, + begin=40, + end=800, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=800) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +# auto resume +resume = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/mae/metafile.yml b/configs/mae/metafile.yml new file mode 100644 index 0000000..8192672 --- /dev/null +++ b/configs/mae/metafile.yml @@ -0,0 +1,367 @@ +Collections: + - Name: MAE + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - AdamW + Training Resources: 8x A100-80G GPUs + Architecture: + - ViT + Paper: + Title: Masked Autoencoders Are Scalable Vision Learners + URL: https://arxiv.org/abs/2111.06377 + README: configs/mae/README.md + +Models: + - Name: mae_vit-base-p16_8xb512-amp-coslr-300e_in1k + Metadata: + Epochs: 300 + Batch Size: 4096 + FLOPs: 17581972224 + Parameters: 111907840 + Training Data: ImageNet-1k + In Collection: MAE + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-base-p16_8xb512-fp16-coslr-300e_in1k/mae_vit-base-p16_8xb512-coslr-300e-fp16_in1k_20220829-c2cf66ba.pth + Config: configs/mae/mae_vit-base-p16_8xb512-amp-coslr-300e_in1k.py + Downstream: + - vit-base-p16_mae-300e-pre_8xb2048-linear-coslr-90e_in1k + - vit-base-p16_mae-300e-pre_8xb128-coslr-100e_in1k + - Name: mae_vit-base-p16_8xb512-amp-coslr-400e_in1k + Metadata: + Epochs: 400 + Batch Size: 4096 + FLOPs: 17581972224 + Parameters: 111907840 + Training Data: ImageNet-1k + In Collection: MAE + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-base-p16_8xb512-fp16-coslr-400e_in1k/mae_vit-base-p16_8xb512-coslr-400e-fp16_in1k_20220825-bc79e40b.pth + Config: configs/mae/mae_vit-base-p16_8xb512-amp-coslr-400e_in1k.py + Downstream: + - vit-base-p16_mae-400e-pre_8xb2048-linear-coslr-90e_in1k + - vit-base-p16_mae-400e-pre_8xb128-coslr-100e_in1k + - Name: mae_vit-base-p16_8xb512-amp-coslr-800e_in1k + Metadata: + Epochs: 800 + Batch Size: 4096 + FLOPs: 17581972224 + Parameters: 111907840 + Training Data: ImageNet-1k + In Collection: MAE + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-base-p16_8xb512-fp16-coslr-800e_in1k/mae_vit-base-p16_8xb512-coslr-800e-fp16_in1k_20220825-5d81fbc4.pth + Config: configs/mae/mae_vit-base-p16_8xb512-amp-coslr-800e_in1k.py + Downstream: + - vit-base-p16_mae-800e-pre_8xb2048-linear-coslr-90e_in1k + - vit-base-p16_mae-800e-pre_8xb128-coslr-100e_in1k + - Name: mae_vit-base-p16_8xb512-amp-coslr-1600e_in1k + Metadata: + Epochs: 1600 + Batch Size: 4096 + FLOPs: 17581972224 + Parameters: 111907840 + Training Data: ImageNet-1k + In Collection: MAE + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-base-p16_8xb512-fp16-coslr-1600e_in1k/mae_vit-base-p16_8xb512-fp16-coslr-1600e_in1k_20220825-f7569ca2.pth + Config: configs/mae/mae_vit-base-p16_8xb512-amp-coslr-1600e_in1k.py + Downstream: + - vit-base-p16_mae-1600e-pre_8xb2048-linear-coslr-90e_in1k + - vit-base-p16_mae-1600e-pre_8xb128-coslr-100e_in1k + - Name: mae_vit-large-p16_8xb512-amp-coslr-400e_in1k + Metadata: + Epochs: 400 + Batch Size: 4096 + FLOPs: 61603111936 + Parameters: 329541888 + Training Data: ImageNet-1k + In Collection: MAE + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-large-p16_8xb512-fp16-coslr-400e_in1k/mae_vit-large-p16_8xb512-fp16-coslr-400e_in1k_20220825-b11d0425.pth + Config: configs/mae/mae_vit-large-p16_8xb512-amp-coslr-400e_in1k.py + Downstream: + - vit-large-p16_mae-400e-pre_8xb2048-linear-coslr-90e_in1k + - vit-large-p16_mae-400e-pre_8xb128-coslr-50e_in1k + - Name: mae_vit-large-p16_8xb512-amp-coslr-800e_in1k + Metadata: + Epochs: 800 + Batch Size: 4096 + FLOPs: 61603111936 + Parameters: 329541888 + Training Data: ImageNet-1k + In Collection: MAE + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-large-p16_8xb512-fp16-coslr-800e_in1k/mae_vit-large-p16_8xb512-fp16-coslr-800e_in1k_20220825-df72726a.pth + Config: configs/mae/mae_vit-large-p16_8xb512-amp-coslr-800e_in1k.py + Downstream: + - vit-large-p16_mae-800e-pre_8xb2048-linear-coslr-90e_in1k + - vit-large-p16_mae-800e-pre_8xb128-coslr-50e_in1k + - Name: mae_vit-large-p16_8xb512-amp-coslr-1600e_in1k + Metadata: + Epochs: 1600 + Batch Size: 4096 + FLOPs: 61603111936 + Parameters: 329541888 + Training Data: ImageNet-1k + In Collection: MAE + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-large-p16_8xb512-fp16-coslr-1600e_in1k/mae_vit-large-p16_8xb512-fp16-coslr-1600e_in1k_20220825-cc7e98c9.pth + Config: configs/mae/mae_vit-large-p16_8xb512-amp-coslr-1600e_in1k.py + Downstream: + - vit-large-p16_mae-1600e-pre_8xb2048-linear-coslr-90e_in1k + - vit-large-p16_mae-1600e-pre_8xb128-coslr-50e_in1k + - Name: mae_vit-huge-p16_8xb512-amp-coslr-1600e_in1k + Metadata: + Epochs: 1600 + Batch Size: 4096 + FLOPs: 167400741120 + Parameters: 657074508 + Training Data: ImageNet-1k + In Collection: MAE + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-huge-p16_8xb512-fp16-coslr-1600e_in1k/mae_vit-huge-p16_8xb512-fp16-coslr-1600e_in1k_20220916-ff848775.pth + Config: configs/mae/mae_vit-huge-p14_8xb512-amp-coslr-1600e_in1k.py + Downstream: + - vit-huge-p14_mae-1600e-pre_8xb128-coslr-50e_in1k + - vit-huge-p14_mae-1600e-pre_32xb8-coslr-50e_in1k-448px + - Name: vit-base-p16_mae-300e-pre_8xb128-coslr-100e_in1k + Metadata: + Epochs: 100 + Batch Size: 1024 + FLOPs: 17581215744 + Parameters: 86566120 + Training Data: ImageNet-1k + In Collection: MAE + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.1 + Weights: null + Config: configs/mae/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py + - Name: vit-base-p16_mae-400e-pre_8xb128-coslr-100e_in1k + Metadata: + Epochs: 100 + Batch Size: 1024 + FLOPs: 17581215744 + Parameters: 86566120 + Training Data: ImageNet-1k + In Collection: MAE + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.3 + Weights: null + Config: configs/mae/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py + - Name: vit-base-p16_mae-800e-pre_8xb128-coslr-100e_in1k + Metadata: + Epochs: 100 + Batch Size: 1024 + FLOPs: 17581215744 + Parameters: 86566120 + Training Data: ImageNet-1k + In Collection: MAE + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.3 + Weights: null + Config: configs/mae/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py + - Name: vit-base-p16_mae-1600e-pre_8xb128-coslr-100e_in1k + Metadata: + Epochs: 100 + Batch Size: 1024 + FLOPs: 17581215744 + Parameters: 86566120 + Training Data: ImageNet-1k + In Collection: MAE + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.5 + Weights: https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-base-p16_8xb512-fp16-coslr-1600e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k_20220825-cf70aa21.pth + Config: configs/mae/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py + - Name: vit-base-p16_mae-300e-pre_8xb2048-linear-coslr-90e_in1k + Metadata: + Epochs: 90 + Batch Size: 16384 + FLOPs: 17581972992 + Parameters: 86567656 + Training Data: ImageNet-1k + In Collection: MAE + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 60.8 + Weights: null + Config: configs/mae/benchmarks/vit-base-p16_8xb2048-linear-coslr-90e_in1k.py + - Name: vit-base-p16_mae-400e-pre_8xb2048-linear-coslr-90e_in1k + Metadata: + Epochs: 90 + Batch Size: 16384 + FLOPs: 17581972992 + Parameters: 86567656 + Training Data: ImageNet-1k + In Collection: MAE + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 62.5 + Weights: null + Config: configs/mae/benchmarks/vit-base-p16_8xb2048-linear-coslr-90e_in1k.py + - Name: vit-base-p16_mae-800e-pre_8xb2048-linear-coslr-90e_in1k + Metadata: + Epochs: 90 + Batch Size: 16384 + FLOPs: 17581972992 + Parameters: 86567656 + Training Data: ImageNet-1k + In Collection: MAE + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 65.1 + Weights: null + Config: configs/mae/benchmarks/vit-base-p16_8xb2048-linear-coslr-90e_in1k.py + - Name: vit-base-p16_mae-1600e-pre_8xb2048-linear-coslr-90e_in1k + Metadata: + Epochs: 90 + Batch Size: 16384 + FLOPs: 17581972992 + Parameters: 86567656 + Training Data: ImageNet-1k + In Collection: MAE + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 67.1 + Weights: null + Config: configs/mae/benchmarks/vit-base-p16_8xb2048-linear-coslr-90e_in1k.py + - Name: vit-large-p16_mae-400e-pre_8xb128-coslr-50e_in1k + Metadata: + Epochs: 50 + Batch Size: 1024 + FLOPs: 61602103296 + Parameters: 304324584 + Training Data: ImageNet-1k + In Collection: MAE + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.2 + Weights: null + Config: configs/mae/benchmarks/vit-large-p16_8xb128-coslr-50e_in1k.py + - Name: vit-large-p16_mae-800e-pre_8xb128-coslr-50e_in1k + Metadata: + Epochs: 50 + Batch Size: 1024 + FLOPs: 61602103296 + Parameters: 304324584 + Training Data: ImageNet-1k + In Collection: MAE + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.4 + Weights: null + Config: configs/mae/benchmarks/vit-large-p16_8xb128-coslr-50e_in1k.py + - Name: vit-large-p16_mae-1600e-pre_8xb128-coslr-50e_in1k + Metadata: + Epochs: 50 + Batch Size: 1024 + FLOPs: 61602103296 + Parameters: 304324584 + Training Data: ImageNet-1k + In Collection: MAE + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.7 + Weights: null + Config: configs/mae/benchmarks/vit-large-p16_8xb128-coslr-50e_in1k.py + - Name: vit-large-p16_mae-400e-pre_8xb2048-linear-coslr-90e_in1k + Metadata: + Epochs: 90 + Batch Size: 16384 + FLOPs: 61603112960 + Parameters: 304326632 + Training Data: ImageNet-1k + In Collection: MAE + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 70.7 + Weights: null + Config: configs/mae/benchmarks/vit-large-p16_8xb2048-linear-coslr-90e_in1k.py + - Name: vit-large-p16_mae-800e-pre_8xb2048-linear-coslr-90e_in1k + Metadata: + Epochs: 90 + Batch Size: 16384 + FLOPs: 61603112960 + Parameters: 304326632 + Training Data: ImageNet-1k + In Collection: MAE + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 73.7 + Weights: null + Config: configs/mae/benchmarks/vit-large-p16_8xb2048-linear-coslr-90e_in1k.py + - Name: vit-large-p16_mae-1600e-pre_8xb2048-linear-coslr-90e_in1k + Metadata: + Epochs: 90 + Batch Size: 16384 + FLOPs: 61603112960 + Parameters: 304326632 + Training Data: ImageNet-1k + In Collection: MAE + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 75.5 + Weights: null + Config: configs/mae/benchmarks/vit-large-p16_8xb2048-linear-coslr-90e_in1k.py + - Name: vit-huge-p14_mae-1600e-pre_8xb128-coslr-50e_in1k + Metadata: + Epochs: 50 + Batch Size: 1024 + FLOPs: 167399096320 + Parameters: 632043240 + Training Data: ImageNet-1k + In Collection: MAE + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 86.9 + Weights: https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-huge-p16_8xb512-fp16-coslr-1600e_in1k/vit-huge-p16_ft-8xb128-coslr-50e_in1k/vit-huge-p16_ft-8xb128-coslr-50e_in1k_20220916-0bfc9bfd.pth + Config: configs/mae/benchmarks/vit-huge-p14_8xb128-coslr-50e_in1k.py + - Name: vit-huge-p14_mae-1600e-pre_32xb8-coslr-50e_in1k-448px + Metadata: + Epochs: 50 + Batch Size: 256 + FLOPs: 732131983360 + Parameters: 633026280 + Training Data: ImageNet-1k + In Collection: MAE + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 87.3 + Weights: https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-huge-p16_8xb512-fp16-coslr-1600e_in1k/vit-huge-p16_ft-32xb8-coslr-50e_in1k-448/vit-huge-p16_ft-32xb8-coslr-50e_in1k-448_20220916-95b6a0ce.pth + Config: configs/mae/benchmarks/vit-huge-p14_32xb8-coslr-50e_in1k-448px.py diff --git a/configs/maskfeat/README.md b/configs/maskfeat/README.md new file mode 100644 index 0000000..d25b32b --- /dev/null +++ b/configs/maskfeat/README.md @@ -0,0 +1,85 @@ +# MaskFeat + +> [Masked Feature Prediction for Self-Supervised Visual Pre-Training](https://arxiv.org/abs/2112.09133v1) + + + +## Abstract + +We present Masked Feature Prediction (MaskFeat) for self-supervised pre-training of video models. Our approach first randomly masks out a portion of the input sequence and then predicts the feature of the masked regions. We study five different types of features and find Histograms of Oriented Gradients (HOG), a hand-crafted feature descriptor, works particularly well in terms of both performance and efficiency. We observe that the local contrast normalization in HOG is essential for good results, which is in line with earlier work using HOG for visual recognition. Our approach can learn abundant visual knowledge and drive large-scale Transformer-based models. Without using extra model weights or supervision, MaskFeat pre-trained on unlabeled videos achieves unprecedented results of 86.7% with MViT-L on Kinetics-400, 88.3% on Kinetics-600, 80.4% on Kinetics-700, 38.8 mAP on AVA, and 75.0% on SSv2. MaskFeat further generalizes to image input, which can be interpreted as a video with a single frame and obtains competitive results on ImageNet. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('vit-base-p16_maskfeat-pre_8xb256-coslr-100e_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('maskfeat_vit-base-p16_8xb256-amp-coslr-300e_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/maskfeat/maskfeat_vit-base-p16_8xb256-amp-coslr-300e_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/maskfeat/benchmarks/vit-base-p16_8xb256-coslr-100e_in1k.py https://download.openmmlab.com/mmselfsup/1.x/maskfeat/maskfeat_vit-base-p16_8xb256-amp-coslr-300e_in1k/vit-base-p16_ft-8xb256-coslr-100e_in1k/vit-base-p16_ft-8xb256-coslr-100e_in1k_20221028-5134431c.pth +``` + + + +## Models and results + +### Pretrained models + +| Model | Params (M) | Flops (G) | Config | Download | +| :------------------------------------------------- | :--------: | :-------: | :-----------------------------------------------------------: | :--------------------------------------------------------------------: | +| `maskfeat_vit-base-p16_8xb256-amp-coslr-300e_in1k` | 85.88 | 17.58 | [config](maskfeat_vit-base-p16_8xb256-amp-coslr-300e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/maskfeat/maskfeat_vit-base-p16_8xb256-amp-coslr-300e_in1k/maskfeat_vit-base-p16_8xb256-amp-coslr-300e_in1k_20221101-6dfc8bf3.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/maskfeat/maskfeat_vit-base-p16_8xb256-amp-coslr-300e_in1k/maskfeat_vit-base-p16_8xb256-amp-coslr-300e_in1k_20221101-6dfc8bf3.json) | + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Config | Download | +| :---------------------------------------- | :------------------------------------------: | :--------: | :-------: | :-------: | :----------------------------------------: | :-------------------------------------------: | +| `vit-base-p16_maskfeat-pre_8xb256-coslr-100e_in1k` | [MASKFEAT](https://download.openmmlab.com/mmselfsup/1.x/maskfeat/maskfeat_vit-base-p16_8xb256-amp-coslr-300e_in1k/maskfeat_vit-base-p16_8xb256-amp-coslr-300e_in1k_20221101-6dfc8bf3.pth) | 86.57 | 17.58 | 83.40 | [config](benchmarks/vit-base-p16_8xb256-coslr-100e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/maskfeat/maskfeat_vit-base-p16_8xb256-amp-coslr-300e_in1k/vit-base-p16_ft-8xb256-coslr-100e_in1k/vit-base-p16_ft-8xb256-coslr-100e_in1k_20221028-5134431c.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/maskfeat/maskfeat_vit-base-p16_8xb256-amp-coslr-300e_in1k/vit-base-p16_ft-8xb256-coslr-100e_in1k/vit-base-p16_ft-8xb256-coslr-100e_in1k_20221028-5134431c.json) | + +## Citation + +```bibtex +@InProceedings{wei2022masked, + author = {Wei, Chen and Fan, Haoqi and Xie, Saining and Wu, Chao-Yuan and Yuille, Alan and Feichtenhofer, Christoph}, + title = {Masked Feature Prediction for Self-Supervised Visual Pre-Training}, + booktitle = {CVPR}, + year = {2022}, +} +``` diff --git a/configs/maskfeat/benchmarks/vit-base-p16_8xb256-coslr-100e_in1k.py b/configs/maskfeat/benchmarks/vit-base-p16_8xb256-coslr-100e_in1k.py new file mode 100644 index 0000000..5a7620b --- /dev/null +++ b/configs/maskfeat/benchmarks/vit-base-p16_8xb256-coslr-100e_in1k.py @@ -0,0 +1,114 @@ +_base_ = [ + '../../_base_/datasets/imagenet_bs64_swin_224.py', + '../../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../../_base_/default_runtime.py' +] + +# dataset +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict(pad_val=[104, 116, 124], interpolation='bicubic')), + dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=0.3333333333333333, + fill_color=[103.53, 116.28, 123.675], + fill_std=[57.375, 57.12, 58.395]), + dict(type='PackInputs'), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='ResizeEdge', scale=256, edge='short', backend='pillow'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict(batch_size=256, dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(batch_size=256, dataset=dict(pipeline=test_pipeline)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='base', + img_size=224, + patch_size=16, + drop_path_rate=0.1, + out_type='avg_featmap', + final_norm=False, + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')), + neck=None, + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=2e-5, bias=2e-5) + ]), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) + +# optimizer wrapper +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=8e-3, weight_decay=0.05, betas=(0.9, 0.999)), + constructor='LearningRateDecayOptimWrapperConstructor', + paramwise_cfg=dict( + layer_decay_rate=0.65, + custom_keys={ + '.ln': dict(decay_mult=0.0), + '.bias': dict(decay_mult=0.0), + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=20, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=80, + by_epoch=True, + begin=20, + end=100, + eta_min=1e-6, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=100) +default_hooks = dict( + # save checkpoint per epoch. + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0) diff --git a/configs/maskfeat/maskfeat_vit-base-p16_8xb256-amp-coslr-300e_in1k.py b/configs/maskfeat/maskfeat_vit-base-p16_8xb256-amp-coslr-300e_in1k.py new file mode 100644 index 0000000..465ff5c --- /dev/null +++ b/configs/maskfeat/maskfeat_vit-base-p16_8xb256-amp-coslr-300e_in1k.py @@ -0,0 +1,103 @@ +_base_ = '../_base_/default_runtime.py' + +# dataset settings +dataset_type = 'ImageNet' +data_root = 'data/imagenet/' +data_preprocessor = dict( + type='SelfSupDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + crop_ratio_range=(0.5, 1.0), + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='BEiTMaskGenerator', + input_size=14, + num_masking_patches=78, + min_num_patches=15, + ), + dict(type='PackInputs') +] + +train_dataloader = dict( + batch_size=256, + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(type='DefaultSampler', shuffle=True), + collate_fn=dict(type='default_collate'), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='meta/train.txt', + data_prefix=dict(img_path='train/'), + pipeline=train_pipeline)) + +# model settings +model = dict( + type='MaskFeat', + backbone=dict(type='MaskFeatViT', arch='b', patch_size=16), + neck=dict( + type='LinearNeck', + in_channels=768, + out_channels=108, + norm_cfg=None, + init_cfg=dict(type='TruncNormal', layer='Linear', std=0.02, bias=0)), + head=dict( + type='MIMHead', + loss=dict(type='PixelReconstructionLoss', criterion='L2')), + target_generator=dict( + type='HOGGenerator', nbins=9, pool=8, gaussian_window=16)) + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='AdamW', lr=2e-4 * 8, betas=(0.9, 0.999), weight_decay=0.05), + clip_grad=dict(max_norm=0.02), + paramwise_cfg=dict( + bias_decay_mult=0.0, + norm_decay_mult=0.0, + flat_decay_mult=0.0, + custom_keys={ + # 'pos_embed': dict(decay_mult=0.), + # 'cls_token': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-6, + by_epoch=True, + begin=0, + end=30, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=270, + eta_min=1e-6, + by_epoch=True, + begin=30, + end=300, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=300) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=2048) diff --git a/configs/maskfeat/metafile.yml b/configs/maskfeat/metafile.yml new file mode 100644 index 0000000..1e1e1b4 --- /dev/null +++ b/configs/maskfeat/metafile.yml @@ -0,0 +1,43 @@ +Collections: + - Name: MaskFeat + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - AdamW + Training Resources: 8x A100-80G GPUs + Architecture: + - ViT + Paper: + Title: Masked Feature Prediction for Self-Supervised Visual Pre-Training + URL: https://arxiv.org/abs/2112.09133v1 + README: configs/maskfeat/README.md + +Models: + - Name: maskfeat_vit-base-p16_8xb256-amp-coslr-300e_in1k + Metadata: + Epochs: 300 + Batch Size: 2048 + FLOPs: 17581972224 + Parameters: 85882692 + Training Data: ImageNet-1k + In Collection: MaskFeat + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/maskfeat/maskfeat_vit-base-p16_8xb256-amp-coslr-300e_in1k/maskfeat_vit-base-p16_8xb256-amp-coslr-300e_in1k_20221101-6dfc8bf3.pth + Config: configs/maskfeat/maskfeat_vit-base-p16_8xb256-amp-coslr-300e_in1k.py + Downstream: + - vit-base-p16_maskfeat-pre_8xb256-coslr-100e_in1k + - Name: vit-base-p16_maskfeat-pre_8xb256-coslr-100e_in1k + Metadata: + Epochs: 100 + Batch Size: 2048 + FLOPs: 17581215744 + Parameters: 86566120 + Training Data: ImageNet-1k + In Collection: MaskFeat + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.4 + Weights: https://download.openmmlab.com/mmselfsup/1.x/maskfeat/maskfeat_vit-base-p16_8xb256-amp-coslr-300e_in1k/vit-base-p16_ft-8xb256-coslr-100e_in1k/vit-base-p16_ft-8xb256-coslr-100e_in1k_20221028-5134431c.pth + Config: configs/maskfeat/benchmarks/vit-base-p16_8xb256-coslr-100e_in1k.py diff --git a/configs/mff/README.md b/configs/mff/README.md new file mode 100644 index 0000000..7001c74 --- /dev/null +++ b/configs/mff/README.md @@ -0,0 +1,60 @@ +# MFF + +> [Improving Pixel-based MIM by Reducing Wasted Modeling Capability](https://arxiv.org/abs/2308.00261) + + + +## Abstract + +There has been significant progress in Masked Image Modeling (MIM). Existing MIM methods can be broadly categorized into two groups based on the reconstruction target: pixel-based and tokenizer-based approaches. The former offers a simpler pipeline and lower computational cost, but it is known to be biased toward high-frequency details. In this paper, we provide a set of empirical studies to confirm this limitation of pixel-based MIM and propose a new method that explicitly utilizes low-level features from shallow layers to aid pixel reconstruction. By incorporating this design into our base method, MAE, we reduce the wasted modeling capability of pixel-based MIM, improving its convergence and achieving non-trivial improvements across various downstream tasks. To the best of our knowledge, we are the first to systematically investigate multi-level feature fusion for isotropic architectures like the standard Vision Transformer (ViT). Notably, when applied to a smaller model (e.g., ViT-S), our method yields significant performance gains, such as 1.2% on fine-tuning, 2.8% on linear probing, and 2.6% on semantic segmentation. + +
+ +
+ +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/mff/mff_vit-base-p16_8xb512-amp-coslr-300e_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/mff/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py None +``` + + + +## Models and results + +### Pretrained models + +| Model | Params (M) | Flops (G) | Config | Download | +| :-------------------------------------------- | :--------: | :-------: | :------------------------------------------------------: | :------------------------------------------------------------------------------: | +| `mff_vit-base-p16_8xb512-amp-coslr-300e_in1k` | - | - | [config](mff_vit-base-p16_8xb512-amp-coslr-300e_in1k.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/mff/mff_vit-base-p16_8xb512-amp-coslr-300e_in1k/mff_vit-base-p16_8xb512-amp-coslr-300e_in1k_20230801-3c1bcce4.pth) \| [log](https://download.openmmlab.com/mmpretrain/v1.0/mff/mff_vit-base-p16_8xb512-amp-coslr-300e_in1k/mff_vit-base-p16_8xb512-amp-coslr-300e_in1k_20230801-3c1bcce4.json) | +| `mff_vit-base-p16_8xb512-amp-coslr-800e_in1k` | - | - | [config](mff_vit-base-p16_8xb512-amp-coslr-300e_in1k.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/mff/mff_vit-base-p16_8xb512-amp-coslr-800e_in1k/mff_vit-base-p16_8xb512-amp-coslr-800e_in1k_20230801-3af7cd9d.pth) \| [log](https://download.openmmlab.com/mmpretrain/v1.0/mff/mff_vit-base-p16_8xb512-amp-coslr-800e_in1k/mff_vit-base-p16_8xb512-amp-coslr-800e_in1k_20230801-3af7cd9d.json) | + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Config | Download | +| :---------------------------------------- | :------------------------------------------: | :--------: | :-------: | :-------: | :----------------------------------------: | :-------------------------------------------: | +| `vit-base-p16_mff-300e-pre_8xb128-coslr-100e_in1k` | [MFF 300-Epochs](https://download.openmmlab.com/mmpretrain/v1.0/mff/mff_vit-base-p16_8xb512-amp-coslr-300e_in1k/mff_vit-base-p16_8xb512-amp-coslr-300e_in1k_20230801-3c1bcce4.pth) | 86.57 | 17.58 | 83.00 | [config](benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/mff/mff_vit-base-p16_8xb512-amp-coslr-300e_in1k/vit-base-p16_8xb128-coslr-100e_in1k/vit-base-p16_8xb128-coslr-100e_in1k_20230802-d746fdb7.pth) / [log](https://download.openmmlab.com/mmpretrain/v1.0/mff/mff_vit-base-p16_8xb512-amp-coslr-300e_in1k/vit-base-p16_8xb128-coslr-100e_in1k/vit-base-p16_8xb128-coslr-100e_in1k_20230802-d746fdb7.json) | +| `vit-base-p16_mff-800e-pre_8xb128-coslr-100e_in1k` | [MFF 800-Epochs](https://download.openmmlab.com/mmpretrain/v1.0/mff/mff_vit-base-p16_8xb512-amp-coslr-800e_in1k/mff_vit-base-p16_8xb512-amp-coslr-800e_in1k_20230801-3af7cd9d.pth) | 86.57 | 17.58 | 83.70 | [config](benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/mff/mff_vit-base-p16_8xb512-amp-coslr-800e_in1k/vit-base-p16_8xb128-coslr-100e/vit-base-p16_8xb128-coslr-100e_20230802-6780e47d.pth) / [log](https://download.openmmlab.com/mmpretrain/v1.0/mff/mff_vit-base-p16_8xb512-amp-coslr-800e_in1k/vit-base-p16_8xb128-coslr-100e/vit-base-p16_8xb128-coslr-100e_20230802-6780e47d.json) | +| `vit-base-p16_mff-300e-pre_8xb2048-linear-coslr-90e_in1k` | [MFF 300-Epochs](https://download.openmmlab.com/mmpretrain/v1.0/mff/mff_vit-base-p16_8xb512-amp-coslr-300e_in1k/mff_vit-base-p16_8xb512-amp-coslr-300e_in1k_20230801-3c1bcce4.pth) | 304.33 | 61.60 | 64.20 | [config](benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py) | [log](https://download.openmmlab.com/mmpretrain/v1.0/mff/mff_vit-base-p16_8xb512-amp-coslr-300e_in1k/vit-base-p16_8xb2048-linear-coslr-90e_in1k/vit-base-p16_8xb2048-linear-coslr-90e_in1k.json) | +| `vit-base-p16_mff-800e-pre_8xb2048-linear-coslr-90e_in1k` | [MFF 800-Epochs](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-base-p16_8xb512-fp16-coslr-1600e_in1k/mae_vit-base-p16_8xb512-fp16-coslr-1600e_in1k_20220825-f7569ca2.pth) | 304.33 | 61.60 | 68.30 | [config](benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/mff/mff_vit-base-p16_8xb512-amp-coslr-800e_in1k/vit-base-p16_8xb2048-linear-coslr-90e/vit-base-p16_8xb2048-linear-coslr-90e_20230802-6b1f7bc8.pth) / [log](https://download.openmmlab.com/mmpretrain/v1.0/mff/mff_vit-base-p16_8xb512-amp-coslr-800e_in1k/vit-base-p16_8xb2048-linear-coslr-90e/vit-base-p16_8xb2048-linear-coslr-90e_20230802-6b1f7bc8.json) | + +## Citation + +```bibtex +@article{MFF, + title={Improving Pixel-based MIM by Reducing Wasted Modeling Capability}, + author={Yuan Liu, Songyang Zhang, Jiacheng Chen, Zhaohui Yu, Kai Chen, Dahua Lin}, + journal={arXiv}, + year={2023} +} +``` diff --git a/configs/mff/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py b/configs/mff/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py new file mode 100644 index 0000000..4cf9ca1 --- /dev/null +++ b/configs/mff/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py @@ -0,0 +1,114 @@ +_base_ = [ + '../../_base_/datasets/imagenet_bs64_swin_224.py', + '../../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../../_base_/default_runtime.py' +] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict(pad_val=[104, 116, 124], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=0.3333333333333333, + fill_color=[103.53, 116.28, 123.675], + fill_std=[57.375, 57.12, 58.395]), + dict(type='PackInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=256, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs') +] + +train_dataloader = dict(batch_size=128, dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(batch_size=128, dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='base', + img_size=224, + patch_size=16, + drop_path_rate=0.1, + out_type='avg_featmap', + final_norm=False, + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')), + neck=None, + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + init_cfg=[dict(type='TruncNormal', layer='Linear', std=2e-5)]), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) + +# optimizer wrapper +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=2e-3, weight_decay=0.05, betas=(0.9, 0.999)), + constructor='LearningRateDecayOptimWrapperConstructor', + paramwise_cfg=dict( + layer_decay_rate=0.65, + custom_keys={ + '.ln': dict(decay_mult=0.0), + '.bias': dict(decay_mult=0.0), + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=95, + by_epoch=True, + begin=5, + end=100, + eta_min=1e-6, + convert_to_iter_based=True) +] + +# runtime settings +default_hooks = dict( + # save checkpoint per epoch. + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +train_cfg = dict(by_epoch=True, max_epochs=100) + +randomness = dict(seed=0, diff_rank_seed=True) diff --git a/configs/mff/benchmarks/vit-base-p16_8xb2048-linear-coslr-90e_in1k.py b/configs/mff/benchmarks/vit-base-p16_8xb2048-linear-coslr-90e_in1k.py new file mode 100644 index 0000000..dc5f230 --- /dev/null +++ b/configs/mff/benchmarks/vit-base-p16_8xb2048-linear-coslr-90e_in1k.py @@ -0,0 +1,74 @@ +_base_ = [ + '../../_base_/datasets/imagenet_bs32_pil_resize.py', + '../../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../../_base_/default_runtime.py' +] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='ToPIL', to_rgb=True), + dict(type='MAERandomResizedCrop', size=224, interpolation=3), + dict(type='torchvision/RandomHorizontalFlip', p=0.5), + dict(type='ToNumpy', to_bgr=True), + dict(type='PackInputs'), +] + +# dataset settings +train_dataloader = dict( + batch_size=2048, drop_last=True, dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(drop_last=False) +test_dataloader = dict(drop_last=False) + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='base', + img_size=224, + patch_size=16, + frozen_stages=12, + out_type='cls_token', + final_norm=True, + init_cfg=dict(type='Pretrained', prefix='backbone.')), + neck=dict(type='ClsBatchNormNeck', input_features=768), + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='CrossEntropyLoss'), + init_cfg=[dict(type='TruncNormal', layer='Linear', std=0.01)])) + +# optimizer +optim_wrapper = dict( + _delete_=True, + type='AmpOptimWrapper', + optimizer=dict(type='LARS', lr=6.4, weight_decay=0.0, momentum=0.9)) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=10, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=80, + by_epoch=True, + begin=10, + end=90, + eta_min=0.0, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(by_epoch=True, max_epochs=90) + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=1), + logger=dict(type='LoggerHook', interval=10)) + +randomness = dict(seed=0, diff_rank_seed=True) diff --git a/configs/mff/metafile.yml b/configs/mff/metafile.yml new file mode 100644 index 0000000..f1da4cc --- /dev/null +++ b/configs/mff/metafile.yml @@ -0,0 +1,103 @@ +Collections: + - Name: MFF + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - AdamW + Training Resources: 8x A100-80G GPUs + Architecture: + - ViT + Paper: + Title: Improving Pixel-based MIM by Reducing Wasted Modeling Capability + URL: https://arxiv.org/pdf/2308.00261.pdf + README: configs/mff/README.md + +Models: + - Name: mff_vit-base-p16_8xb512-amp-coslr-300e_in1k + Metadata: + Epochs: 300 + Batch Size: 2048 + FLOPs: 17581972224 + Parameters: 85882692 + Training Data: ImageNet-1k + In Collection: MaskFeat + Results: null + Weights: https://download.openmmlab.com/mmpretrain/v1.0/mff/mff_vit-base-p16_8xb512-amp-coslr-300e_in1k/mff_vit-base-p16_8xb512-amp-coslr-300e_in1k_20230801-3c1bcce4.pth + Config: configs/mff/mff_vit-base-p16_8xb512-amp-coslr-300e_in1k.py + Downstream: + - vit-base-p16_mff-300e-pre_8xb128-coslr-100e_in1k + - vit-base-p16_mff-300e-pre_8xb2048-linear-coslr-90e_in1k + - Name: mff_vit-base-p16_8xb512-amp-coslr-800e_in1k + Metadata: + Epochs: 800 + Batch Size: 2048 + FLOPs: 17581972224 + Parameters: 85882692 + Training Data: ImageNet-1k + In Collection: MaskFeat + Results: null + Weights: https://download.openmmlab.com/mmpretrain/v1.0/mff/mff_vit-base-p16_8xb512-amp-coslr-800e_in1k/mff_vit-base-p16_8xb512-amp-coslr-800e_in1k_20230801-3af7cd9d.pth + Config: configs/mff/mff_vit-base-p16_8xb512-amp-coslr-800e_in1k.py + Downstream: + - vit-base-p16_mff-800e-pre_8xb128-coslr-100e_in1k + - vit-base-p16_mff-800e-pre_8xb2048-linear-coslr-90e_in1k + - Name: vit-base-p16_mff-300e-pre_8xb128-coslr-100e_in1k + Metadata: + Epochs: 100 + Batch Size: 1024 + FLOPs: 17581215744 + Parameters: 86566120 + Training Data: ImageNet-1k + In Collection: MaskFeat + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.0 + Weights: https://download.openmmlab.com/mmpretrain/v1.0/mff/mff_vit-base-p16_8xb512-amp-coslr-300e_in1k/vit-base-p16_8xb128-coslr-100e_in1k/vit-base-p16_8xb128-coslr-100e_in1k_20230802-d746fdb7.pth + Config: configs/mff/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py + - Name: vit-base-p16_mff-800e-pre_8xb128-coslr-100e_in1k + Metadata: + Epochs: 100 + Batch Size: 1024 + FLOPs: 17581215744 + Parameters: 86566120 + Training Data: ImageNet-1k + In Collection: MFF + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.7 + Weights: https://download.openmmlab.com/mmpretrain/v1.0/mff/mff_vit-base-p16_8xb512-amp-coslr-800e_in1k/vit-base-p16_8xb128-coslr-100e/vit-base-p16_8xb128-coslr-100e_20230802-6780e47d.pth + Config: configs/mff/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py + - Name: vit-base-p16_mff-300e-pre_8xb2048-linear-coslr-90e_in1k + Metadata: + Epochs: 90 + Batch Size: 16384 + FLOPs: 17581215744 + Parameters: 86566120 + Training Data: ImageNet-1k + In Collection: MFF + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 64.2 + Weights: + Config: configs/mff/benchmarks/vit-base-p16_8xb2048-linear-coslr-90e_in1k.py + - Name: vit-base-p16_mff-800e-pre_8xb2048-linear-coslr-90e_in1k + Metadata: + Epochs: 90 + Batch Size: 16384 + FLOPs: 17581215744 + Parameters: 86566120 + Training Data: ImageNet-1k + In Collection: MFF + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 68.3 + Weights: https://download.openmmlab.com/mmpretrain/v1.0/mff/mff_vit-base-p16_8xb512-amp-coslr-300e_in1k/vit-base-p16_8xb128-coslr-100e_in1k/vit-base-p16_8xb128-coslr-100e_in1k_20230802-d746fdb7.pth + Config: configs/mff/benchmarks/vit-base-p16_8xb2048-linear-coslr-90e_in1k.py diff --git a/configs/mff/mff_vit-base-p16_8xb512-amp-coslr-300e_in1k.py b/configs/mff/mff_vit-base-p16_8xb512-amp-coslr-300e_in1k.py new file mode 100644 index 0000000..f9fc521 --- /dev/null +++ b/configs/mff/mff_vit-base-p16_8xb512-amp-coslr-300e_in1k.py @@ -0,0 +1,24 @@ +_base_ = '../mae/mae_vit-base-p16_8xb512-amp-coslr-300e_in1k.py' + +randomness = dict(seed=2, diff_rank_seed=True) + +# dataset config +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='ToPIL', to_rgb=True), + dict(type='torchvision/Resize', size=224), + dict( + type='torchvision/RandomCrop', + size=224, + padding=4, + padding_mode='reflect'), + dict(type='torchvision/RandomHorizontalFlip', p=0.5), + dict(type='ToNumpy', to_bgr=True), + dict(type='PackInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +# model config +model = dict( + type='MFF', backbone=dict(type='MFFViT', out_indices=[0, 2, 4, 6, 8, 11])) diff --git a/configs/mff/mff_vit-base-p16_8xb512-amp-coslr-800e_in1k.py b/configs/mff/mff_vit-base-p16_8xb512-amp-coslr-800e_in1k.py new file mode 100644 index 0000000..d8976b2 --- /dev/null +++ b/configs/mff/mff_vit-base-p16_8xb512-amp-coslr-800e_in1k.py @@ -0,0 +1,24 @@ +_base_ = '../mae/mae_vit-base-p16_8xb512-amp-coslr-800e_in1k.py' + +randomness = dict(seed=2, diff_rank_seed=True) + +# dataset config +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='ToPIL', to_rgb=True), + dict(type='torchvision/Resize', size=224), + dict( + type='torchvision/RandomCrop', + size=224, + padding=4, + padding_mode='reflect'), + dict(type='torchvision/RandomHorizontalFlip', p=0.5), + dict(type='ToNumpy', to_bgr=True), + dict(type='PackInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +# model config +model = dict( + type='MFF', backbone=dict(type='MFFViT', out_indices=[0, 2, 4, 6, 8, 11])) diff --git a/configs/milan/README.md b/configs/milan/README.md new file mode 100644 index 0000000..e1fe228 --- /dev/null +++ b/configs/milan/README.md @@ -0,0 +1,104 @@ +# MILAN + +> [MILAN: Masked Image Pretraining on Language Assisted Representation](https://arxiv.org/pdf/2208.06049) + + + +## Abstract + +Self-attention based transformer models have been dominating many computer +vision tasks in the past few years. Their superb model qualities heavily depend +on the excessively large labeled image datasets. In order to reduce the reliance +on large labeled datasets, reconstruction based masked autoencoders are gaining +popularity, which learn high quality transferable representations from unlabeled +images. For the same purpose, recent weakly supervised image pretraining methods +explore language supervision from text captions accompanying the images. In this +work, we propose masked image pretraining on language assisted representation, +dubbed as MILAN. Instead of predicting raw pixels or low level features, our +pretraining objective is to reconstruct the image features with substantial semantic +signals that are obtained using caption supervision. Moreover, to accommodate our +reconstruction target, we propose a more efficient prompting decoder architecture +and a semantic aware mask sampling mechanism, which further advance the +transfer performance of the pretrained model. Experimental results demonstrate +that MILAN delivers higher accuracy than the previous works. When the masked +autoencoder is pretrained and finetuned on ImageNet-1K dataset with an input +resolution of 224×224, MILAN achieves a top-1 accuracy of 85.4% on ViTB/16, surpassing previous state-of-the-arts by 1%. In the downstream semantic +segmentation task, MILAN achieves 52.7 mIoU using ViT-B/16 backbone on +ADE20K dataset, outperforming previous masked pretraining results by 4 points. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('vit-base-p16_milan-pre_8xb128-coslr-100e_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('milan_vit-base-p16_16xb256-amp-coslr-400e_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/milan/milan_vit-base-p16_16xb256-amp-coslr-400e_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/milan/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py https://download.openmmlab.com/mmselfsup/1.x/milan/milan_vit-base-p16_16xb256-amp-coslr-400e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k-milan_20221129-74ac94fa.pth +``` + + + +## Models and results + +### Pretrained models + +| Model | Params (M) | Flops (G) | Config | Download | +| :----------------------------------------------- | :--------: | :-------: | :---------------------------------------------------------: | :------------------------------------------------------------------------: | +| `milan_vit-base-p16_16xb256-amp-coslr-400e_in1k` | 111.91 | 17.58 | [config](milan_vit-base-p16_16xb256-amp-coslr-400e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/milan/milan_vit-base-p16_16xb256-amp-coslr-400e_in1k/milan_vit-base-p16_16xb256-amp-coslr-400e_in1k_20221129-180922e8.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/milan/milan_vit-base-p16_16xb256-amp-coslr-400e_in1k/milan_vit-base-p16_16xb256-amp-coslr-400e_in1k_20221129-180922e8.json) | + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Config | Download | +| :---------------------------------------- | :------------------------------------------: | :--------: | :-------: | :-------: | :----------------------------------------: | :-------------------------------------------: | +| `vit-base-p16_milan-pre_8xb128-coslr-100e_in1k` | [MILAN](https://download.openmmlab.com/mmselfsup/1.x/milan/milan_vit-base-p16_16xb256-amp-coslr-400e_in1k/milan_vit-base-p16_16xb256-amp-coslr-400e_in1k_20221129-180922e8.pth) | 86.57 | 17.58 | 85.30 | [config](benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/milan/milan_vit-base-p16_16xb256-amp-coslr-400e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k-milan_20221129-74ac94fa.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/milan/milan_vit-base-p16_16xb256-amp-coslr-400e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k-milan_20221129-74ac94fa.json) | +| `vit-base-p16_milan-pre_8xb2048-linear-coslr-100e_in1k` | [MILAN](https://download.openmmlab.com/mmselfsup/1.x/milan/milan_vit-base-p16_16xb256-amp-coslr-400e_in1k/milan_vit-base-p16_16xb256-amp-coslr-400e_in1k_20221129-180922e8.pth) | 86.57 | 17.58 | 78.90 | [config](benchmarks/vit-base-p16_8xb2048-linear-coslr-100e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/milan/milan_vit-base-p16_16xb256-amp-coslr-400e_in1k/vit-base-p16_linear-8xb2048-coslr-100e_in1k/vit-base-p16_linear-8xb2048-coslr-100e_in1k_20221129-03f26f85.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/milan/milan_vit-base-p16_16xb256-amp-coslr-400e_in1k/vit-base-p16_linear-8xb2048-coslr-100e_in1k/vit-base-p16_linear-8xb2048-coslr-100e_in1k_20221129-03f26f85.json) | + +## Citation + +```bibtex +@article{Hou2022MILANMI, + title={MILAN: Masked Image Pretraining on Language Assisted Representation}, + author={Zejiang Hou and Fei Sun and Yen-Kuang Chen and Yuan Xie and S. Y. Kung}, + journal={ArXiv}, + year={2022} +} +``` diff --git a/configs/milan/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py b/configs/milan/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py new file mode 100644 index 0000000..e8a3f49 --- /dev/null +++ b/configs/milan/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py @@ -0,0 +1,114 @@ +_base_ = [ + '../../_base_/datasets/imagenet_bs64_swin_224.py', + '../../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../../_base_/default_runtime.py' +] + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict(pad_val=[104, 116, 124], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=0.3333333333333333, + fill_color=[103.53, 116.28, 123.675], + fill_std=[57.375, 57.12, 58.395]), + dict(type='PackInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=256, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs') +] + +train_dataloader = dict(batch_size=128, dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(batch_size=128, dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='base', + img_size=224, + patch_size=16, + drop_path_rate=0.1, + out_type='avg_featmap', + final_norm=False, + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')), + neck=None, + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + init_cfg=[dict(type='TruncNormal', layer='Linear', std=0.02)]), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) + +# optimizer wrapper +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=4e-4, weight_decay=0.05, betas=(0.9, 0.999)), + constructor='LearningRateDecayOptimWrapperConstructor', + paramwise_cfg=dict( + layer_decay_rate=0.65, + custom_keys={ + '.ln': dict(decay_mult=0.0), + '.bias': dict(decay_mult=0.0), + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=95, + by_epoch=True, + begin=5, + end=100, + eta_min=1e-6, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(by_epoch=True, max_epochs=100) +default_hooks = dict( + # save checkpoint per epoch. + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) diff --git a/configs/milan/benchmarks/vit-base-p16_8xb2048-linear-coslr-100e_in1k.py b/configs/milan/benchmarks/vit-base-p16_8xb2048-linear-coslr-100e_in1k.py new file mode 100644 index 0000000..0b7333c --- /dev/null +++ b/configs/milan/benchmarks/vit-base-p16_8xb2048-linear-coslr-100e_in1k.py @@ -0,0 +1,70 @@ +_base_ = [ + '../../_base_/datasets/imagenet_bs32_pil_resize.py', + '../../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../../_base_/default_runtime.py' +] + +train_dataloader = dict(batch_size=2048, drop_last=True) +val_dataloader = dict(drop_last=False) +test_dataloader = dict(drop_last=False) + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='base', + img_size=224, + patch_size=16, + frozen_stages=12, + out_type='cls_token', + final_norm=True, + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')), + neck=dict(type='ClsBatchNormNeck', input_features=768), + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='CrossEntropyLoss'), + init_cfg=[dict(type='TruncNormal', layer='Linear', std=0.01)]), + data_preprocessor=dict( + num_classes=1000, + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True, + )) + +# optimizer +optim_wrapper = dict( + _delete_=True, + type='AmpOptimWrapper', + optimizer=dict(type='LARS', lr=3.2, weight_decay=0.0, momentum=0.9), +) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=10, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=90, + by_epoch=True, + begin=10, + end=100, + eta_min=0.0, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(by_epoch=True, max_epochs=100) + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3), + logger=dict(type='LoggerHook', interval=10)) + +randomness = dict(seed=0, diff_rank_seed=True) diff --git a/configs/milan/metafile.yml b/configs/milan/metafile.yml new file mode 100644 index 0000000..a790815 --- /dev/null +++ b/configs/milan/metafile.yml @@ -0,0 +1,59 @@ +Collections: + - Name: MILAN + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - AdamW + Training Resources: 16x A100-80G GPUs + Architecture: + - ViT + Paper: + Title: 'MILAN: Masked Image Pretraining on Language Assisted Representation' + URL: https://arxiv.org/pdf/2208.06049 + README: configs/milan/README.md + +Models: + - Name: milan_vit-base-p16_16xb256-amp-coslr-400e_in1k + Metadata: + Epochs: 400 + Batch Size: 4096 + FLOPs: 17581972224 + Parameters: 111907584 + Training Data: ImageNet-1k + In Collection: MILAN + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/milan/milan_vit-base-p16_16xb256-amp-coslr-400e_in1k/milan_vit-base-p16_16xb256-amp-coslr-400e_in1k_20221129-180922e8.pth + Config: configs/milan/milan_vit-base-p16_16xb256-amp-coslr-400e_in1k.py + Downstream: + - vit-base-p16_milan-pre_8xb128-coslr-100e_in1k + - vit-base-p16_milan-pre_8xb2048-linear-coslr-100e_in1k + - Name: vit-base-p16_milan-pre_8xb128-coslr-100e_in1k + Metadata: + Epochs: 100 + Batch Size: 1024 + FLOPs: 17581215744 + Parameters: 86566120 + Training Data: ImageNet-1k + In Collection: MILAN + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.3 + Weights: https://download.openmmlab.com/mmselfsup/1.x/milan/milan_vit-base-p16_16xb256-amp-coslr-400e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k-milan_20221129-74ac94fa.pth + Config: configs/milan/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py + - Name: vit-base-p16_milan-pre_8xb2048-linear-coslr-100e_in1k + Metadata: + Epochs: 100 + Batch Size: 16384 + FLOPs: 17581972992 + Parameters: 86567656 + Training Data: ImageNet-1k + In Collection: MILAN + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.9 + Weights: https://download.openmmlab.com/mmselfsup/1.x/milan/milan_vit-base-p16_16xb256-amp-coslr-400e_in1k/vit-base-p16_linear-8xb2048-coslr-100e_in1k/vit-base-p16_linear-8xb2048-coslr-100e_in1k_20221129-03f26f85.pth + Config: configs/milan/benchmarks/vit-base-p16_8xb2048-linear-coslr-100e_in1k.py diff --git a/configs/milan/milan_vit-base-p16_16xb256-amp-coslr-400e_in1k.py b/configs/milan/milan_vit-base-p16_16xb256-amp-coslr-400e_in1k.py new file mode 100644 index 0000000..ac80ab7 --- /dev/null +++ b/configs/milan/milan_vit-base-p16_16xb256-amp-coslr-400e_in1k.py @@ -0,0 +1,88 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs512_mae.py', + '../_base_/default_runtime.py', +] + +# dataset settings +train_dataloader = dict(batch_size=256) + +# model settings +model = dict( + type='MILAN', + backbone=dict( + type='MILANViT', + arch='b', + patch_size=16, + mask_ratio=0.75, + init_cfg=[ + dict(type='Xavier', distribution='uniform', layer='Linear'), + dict(type='Constant', layer='LayerNorm', val=1.0, bias=0.0) + ]), + neck=dict( + type='MILANPretrainDecoder', + init_cfg=[ + dict(type='Xavier', distribution='uniform', layer='Linear'), + dict(type='Constant', layer='LayerNorm', val=1.0, bias=0.0) + ]), + head=dict( + type='MIMHead', + loss=dict( + type='CosineSimilarityLoss', shift_factor=2.0, scale_factor=2.0), + ), + target_generator=dict( + type='CLIPGenerator', + tokenizer_path= # noqa + 'https://download.openmmlab.com/mmselfsup/1.x/target_generator_ckpt/clip_vit_base_16.pth.tar' # noqa + ), + init_cfg=None) + +# optimizer wrapper +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict( + type='AdamW', + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'ln': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.) + })) +find_unused_parameters = True + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=360, + by_epoch=True, + begin=40, + end=400, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=400) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +# auto resume +resume = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=2048) diff --git a/configs/minigpt4/README.md b/configs/minigpt4/README.md new file mode 100644 index 0000000..23666fc --- /dev/null +++ b/configs/minigpt4/README.md @@ -0,0 +1,53 @@ +# MiniGPT4 + +> [MiniGPT-4: Enhancing Vision-language Understanding with Advanced Large Language Models](https://arxiv.org/abs/2304.10592) + + + +## Abstract + +The recent GPT-4 has demonstrated extraordinary multi-modal abilities, such as directly generating websites from handwritten text and identifying humorous elements within images. These features are rarely observed in previous vision-language models. We believe the primary reason for GPT-4's advanced multi-modal generation capabilities lies in the utilization of a more advanced large language model (LLM). To examine this phenomenon, we present MiniGPT-4, which aligns a frozen visual encoder with a frozen LLM, Vicuna, using just one projection layer. Our findings reveal that MiniGPT-4 possesses many capabilities similar to those exhibited by GPT-4 like detailed image description generation and website creation from hand-written drafts. Furthermore, we also observe other emerging capabilities in MiniGPT-4, including writing stories and poems inspired by given images, providing solutions to problems shown in images, teaching users how to cook based on food photos, etc. In our experiment, we found that only performing the pretraining on raw image-text pairs could produce unnatural language outputs that lack coherency including repetition and fragmented sentences. To address this problem, we curate a high-quality, well-aligned dataset in the second stage to finetune our model using a conversational template. This step proved crucial for augmenting the model's generation reliability and overall usability. Notably, our model is highly computationally efficient, as we only train a projection layer utilizing approximately 5 million aligned image-text pairs. Our code, pre-trained model, and collected dataset are available at https://minigpt-4.github.io/. + +
+ +
+ +## How to use it? + + + +**Use the model** + +```python +from mmpretrain import inference_model + +result = inference_model('minigpt-4_vicuna-7b_caption', 'demo/cat-dog.png') +print(result) +# {'pred_caption': 'This image shows a small dog and a kitten sitting on a blanket in a field of flowers. The dog is looking up at the kitten with a playful expression on its face. The background is a colorful striped blanket, and there are flowers all around them. The image is well composed with the two animals sitting in the center of the frame, surrounded by the flowers and blanket.'} +``` + + + +## Models and results + +For Vicuna model, please refer to [MiniGPT-4 page](https://github.com/Vision-CAIR/MiniGPT-4) for preparation guidelines. + +### Pretrained models + +| Model | Params (M) | Flops (G) | Config | Download | +| :------------------------------ | :--------: | :-------: | :----------------------------------------: | :----------------------------------------------------------------------------------------------------------: | +| `minigpt-4_baichuan-7b_caption` | 8094.77 | N/A | [config](minigpt-4_baichuan-7b_caption.py) | [model](https://download.openmmlab.com/mmclassification/v1/minigpt4/minigpt-4_linear_baichuan7b_20231011-5dca7ed6.pth) | +| `minigpt-4_vicuna-7b_caption`\* | 8121.32 | N/A | [config](minigpt-4_vicuna-7b_caption.py) | [model](https://download.openmmlab.com/mmclassification/v1/minigpt4/minigpt-4_linear_vicuna7b_20230615-714b5f52.pth) | + +*Models with * are converted from the [official repo](https://github.com/Vision-CAIR/MiniGPT-4/tree/main). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@article{zhu2023minigpt, + title={MiniGPT-4: Enhancing Vision-Language Understanding with Advanced Large Language Models}, + author={Zhu, Deyao and Chen, Jun and Shen, Xiaoqian and Li, Xiang and Elhoseiny, Mohamed}, + journal={arXiv preprint arXiv:2304.10592}, + year={2023} +} +``` diff --git a/configs/minigpt4/metafile.yml b/configs/minigpt4/metafile.yml new file mode 100644 index 0000000..f70cc9b --- /dev/null +++ b/configs/minigpt4/metafile.yml @@ -0,0 +1,37 @@ +Collections: + - Name: MiniGPT4 + Metadata: + Architecture: + - Transformer + - Gated Cross-Attention Dense + Paper: + Title: 'MiniGPT-4: Enhancing Vision-language Understanding with Advanced Large Language Models' + URL: https://arxiv.org/abs/2304.10592 + README: configs/minigpt4/README.md + +Models: + - Name: minigpt-4_vicuna-7b_caption + Metadata: + FLOPs: null + Parameters: 8121315072 + In Collection: MiniGPT4 + Results: + - Task: Image Caption + Dataset: COCO + Metrics: null + Weights: https://download.openmmlab.com/mmclassification/v1/minigpt4/minigpt-4_linear_vicuna7b_20230615-714b5f52.pth + Config: configs/minigpt4/minigpt-4_vicuna-7b_caption.py + Converted From: + Weights: https://github.com/Vision-CAIR/MiniGPT-4/tree/main + Code: https://github.com/Vision-CAIR/MiniGPT-4/tree/main + - Name: minigpt-4_baichuan-7b_caption + Metadata: + FLOPs: null + Parameters: 8094769024 + In Collection: MiniGPT4 + Results: + - Task: Image Caption + Dataset: COCO + Metrics: null + Weights: https://download.openmmlab.com/mmclassification/v1/minigpt4/minigpt-4_linear_baichuan7b_20231011-5dca7ed6.pth + Config: configs/minigpt4/minigpt-4_baichuan-7b_caption.py diff --git a/configs/minigpt4/minigpt-4_baichuan-7b_caption.py b/configs/minigpt4/minigpt-4_baichuan-7b_caption.py new file mode 100644 index 0000000..7e610a0 --- /dev/null +++ b/configs/minigpt4/minigpt-4_baichuan-7b_caption.py @@ -0,0 +1,190 @@ +_base_ = [ + '../_base_/default_runtime.py', +] + +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[122.770938, 116.7460125, 104.09373615], + std=[68.5005327, 66.6321579, 70.32316305], + to_rgb=True, +) + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + scale=(224, 224), + interpolation='bicubic', + backend='pillow'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='CleanCaption', + keys='chat_content', + remove_chars='', + lowercase=False), + dict( + type='PackInputs', + algorithm_keys=['chat_content', 'lang'], + meta_keys=['image_id']), +] + +train_dataloader = dict( + batch_size=2, + num_workers=4, + dataset=dict( + type='MiniGPT4Dataset', + data_root='YOUR_DATA_DIRECTORY', + ann_file='YOUR_DATA_FILE', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), + collate_fn=dict(type='default_collate'), + drop_last=False, +) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + scale=(224, 224), + interpolation='bicubic', + backend='pillow'), + dict(type='PackInputs', meta_keys=['image_id']), +] + +test_evaluator = dict( + type='COCOCaption', + ann_file='data/coco/annotations/coco_karpathy_val_gt.json', +) + +test_dataloader = dict( + batch_size=1, + dataset=dict( + type='COCOCaption', + data_root='data/coco', + ann_file='annotations/coco_karpathy_val.json', + pipeline=test_pipeline)) + +# model settings +model = dict( + type='MiniGPT4', + vision_encoder=dict( + type='BEiTViT', + # eva-g without the final layer + arch=dict( + embed_dims=1408, + num_layers=39, + num_heads=16, + feedforward_channels=6144, + ), + img_size=224, + patch_size=14, + layer_scale_init_value=0.0, + frozen_stages=39, + use_abs_pos_emb=True, + use_rel_pos_bias=False, + final_norm=False, + use_shared_rel_pos_bias=False, + out_type='raw', + pretrained= # noqa + 'https://download.openmmlab.com/mmpretrain/v1.0/minigpt4/minigpt-4_eva-g-p14_20230615-e908c021.pth' # noqa + ), + q_former_model=dict( + type='Qformer', + model_style='bert-base-uncased', + vision_model_width=1408, + add_cross_attention=True, + cross_attention_freq=2, + num_query_token=32, + pretrained= # noqa + 'https://download.openmmlab.com/mmpretrain/v1.0/minigpt4/minigpt-4_qformer_20230615-1dfa889c.pth' # noqa + ), + lang_encoder=dict( + type='AutoModelForCausalLM', + name_or_path='baichuan-inc/baichuan-7B', + trust_remote_code=True), + tokenizer=dict( + type='AutoTokenizer', + name_or_path='baichuan-inc/baichuan-7B', + trust_remote_code=True), + task='caption', + prompt_template=dict([('en', '###Ask: {} ###Answer: '), + ('zh', '###问:{} ###答:')]), + raw_prompts=dict([ + ('en', [(' ' + 'Describe this image in detail.'), + (' ' + 'Take a look at this image and describe what you notice.'), + (' ' + 'Please provide a detailed description of the picture.'), + (' ' + 'Could you describe the contents of this image for me?')]), + ('zh', [(' ' + '详细描述这张图片。'), (' ' + '浏览这张图片并描述你注意到什么。'), + (' ' + '请对这张图片进行详细的描述。'), + (' ' + '你能为我描述这张图片的内容吗?')]) + ]), + max_txt_len=160, + end_sym='###') + +strategy = dict( + type='DeepSpeedStrategy', + fp16=dict( + enabled=True, + auto_cast=False, + fp16_master_weights_and_grads=False, + loss_scale=0, + loss_scale_window=1000, + hysteresis=1, + min_loss_scale=1, + initial_scale_power=16, + ), + inputs_to_half=[0], + zero_optimization=dict( + stage=2, + allgather_partitions=True, + allgather_bucket_size=2e8, + reduce_scatter=True, + reduce_bucket_size='auto', + overlap_comm=True, + contiguous_gradients=True, + ), +) + +# schedule settings +optim_wrapper = dict( + type='DeepSpeedOptimWrapper', + optimizer=dict(type='AdamW', lr=1e-3, weight_decay=0.05)) + +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-3 / 500, + by_epoch=False, + begin=0, + end=500, + ), + dict( + type='CosineAnnealingLR', + eta_min=2e-4, + by_epoch=False, + begin=500, + ), +] + +train_cfg = dict(by_epoch=True, max_epochs=6) +test_cfg = dict() + +runner_type = 'FlexibleRunner' + +default_hooks = dict( + checkpoint=dict( + type='CheckpointHook', + interval=1, + by_epoch=True, + save_last=True, + max_keep_ckpts=1, + )) diff --git a/configs/minigpt4/minigpt-4_vicuna-7b_caption.py b/configs/minigpt4/minigpt-4_vicuna-7b_caption.py new file mode 100644 index 0000000..f468e2d --- /dev/null +++ b/configs/minigpt4/minigpt-4_vicuna-7b_caption.py @@ -0,0 +1,94 @@ +_base_ = [ + '../_base_/datasets/coco_caption.py', + '../_base_/default_runtime.py', +] + +# dataset settings +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + scale=(224, 224), + interpolation='bicubic', + backend='pillow'), + dict(type='PackInputs', meta_keys=['image_id']), +] + +val_dataloader = dict(batch_size=1, dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# model settings +model = dict( + type='MiniGPT4', + vision_encoder=dict( + type='BEiTViT', + # eva-g without the final layer + arch=dict( + embed_dims=1408, + num_layers=39, + num_heads=16, + feedforward_channels=6144, + ), + img_size=224, + patch_size=14, + layer_scale_init_value=0.0, + frozen_stages=39, + use_abs_pos_emb=True, + use_rel_pos_bias=False, + final_norm=False, + use_shared_rel_pos_bias=False, + out_type='raw', + pretrained= # noqa + 'https://download.openmmlab.com/mmpretrain/v1.0/minigpt4/minigpt-4_eva-g-p14_20230615-e908c021.pth' # noqa + ), + q_former_model=dict( + type='Qformer', + model_style='bert-base-uncased', + vision_model_width=1408, + add_cross_attention=True, + cross_attention_freq=2, + num_query_token=32, + pretrained= # noqa + 'https://download.openmmlab.com/mmpretrain/v1.0/minigpt4/minigpt-4_qformer_20230615-1dfa889c.pth' # noqa + ), + lang_encoder=dict( + type='AutoModelForCausalLM', name_or_path='YOUR_PATH_TO_VICUNA'), + tokenizer=dict(type='LlamaTokenizer', name_or_path='YOUR_PATH_TO_VICUNA'), + task='caption', + prompt_template=dict([('en', '###Ask: {} ###Answer: '), + ('zh', '###问:{} ###答:')]), + raw_prompts=dict([ + ('en', [(' ' + 'Describe this image in detail.'), + (' ' + 'Take a look at this image and describe what you notice.'), + (' ' + 'Please provide a detailed description of the picture.'), + (' ' + 'Could you describe the contents of this image for me?')]), + ('zh', [(' ' + '详细描述这张图片。'), (' ' + '浏览这张图片并描述你注意到什么。'), + (' ' + '请对这张图片进行详细的描述。'), + (' ' + '你能为我描述这张图片的内容吗?')]) + ]), + max_txt_len=160, + end_sym='###') + +# schedule settings +optim_wrapper = dict(optimizer=dict(type='AdamW', lr=1e-5, weight_decay=0.05)) + +param_scheduler = [ + dict( + type='CosineAnnealingLR', + by_epoch=True, + begin=0, + end=5, + ) +] + +train_cfg = dict(by_epoch=True, max_epochs=5) +val_cfg = dict() +test_cfg = dict() diff --git a/configs/mixmim/README.md b/configs/mixmim/README.md new file mode 100644 index 0000000..e07f501 --- /dev/null +++ b/configs/mixmim/README.md @@ -0,0 +1,102 @@ +# MixMIM + +> [MixMIM: Mixed and Masked Image Modeling for Efficient Visual Representation Learning](https://arxiv.org/abs/2205.13137) + + + +## Abstract + +In this study, we propose Mixed and Masked Image Modeling (MixMIM), a +simple but efficient MIM method that is applicable to various hierarchical Vision +Transformers. Existing MIM methods replace a random subset of input tokens with +a special [MASK] symbol and aim at reconstructing original image tokens from +the corrupted image. However, we find that using the [MASK] symbol greatly +slows down the training and causes training-finetuning inconsistency, due to the +large masking ratio (e.g., 40% in BEiT). In contrast, we replace the masked tokens +of one image with visible tokens of another image, i.e., creating a mixed image. +We then conduct dual reconstruction to reconstruct the original two images from +the mixed input, which significantly improves efficiency. While MixMIM can +be applied to various architectures, this paper explores a simpler but stronger +hierarchical Transformer, and scales with MixMIM-B, -L, and -H. Empirical +results demonstrate that MixMIM can learn high-quality visual representations +efficiently. Notably, MixMIM-B with 88M parameters achieves 85.1% top-1 +accuracy on ImageNet-1K by pretraining for 600 epochs, setting a new record for +neural networks with comparable model sizes (e.g., ViT-B) among MIM methods. +Besides, its transferring performances on the other 6 datasets show MixMIM has +better FLOPs / performance tradeoff than previous MIM methods + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('mixmim-base_mixmim-pre_8xb128-coslr-100e_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('mixmim_mixmim-base_16xb128-coslr-300e_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/mixmim/mixmim_mixmim-base_16xb128-coslr-300e_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/mixmim/benchmarks/mixmim-base_8xb128-coslr-100e_in1k.py https://download.openmmlab.com/mmselfsup/1.x/mixmim/mixmim-base-p16_16xb128-coslr-300e_in1k/mixmim-base-p16_ft-8xb128-coslr-100e_in1k/mixmim-base-p16_ft-8xb128-coslr-100e_in1k_20221208-41ecada9.pth +``` + + + +## Models and results + +### Pretrained models + +| Model | Params (M) | Flops (G) | Config | Download | +| :------------------------------------------- | :--------: | :-------: | :-----------------------------------------------------: | :--------------------------------------------------------------------------------: | +| `mixmim_mixmim-base_16xb128-coslr-300e_in1k` | 114.67 | 16.35 | [config](mixmim_mixmim-base_16xb128-coslr-300e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/mixmim/mixmim-base-p16_16xb128-coslr-300e_in1k/mixmim-base-p16_16xb128-coslr-300e_in1k_20221208-44fe8d2c.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/mixmim/mixmim-base-p16_16xb128-coslr-300e_in1k/mixmim-base-p16_16xb128-coslr-300e_in1k_20221208-44fe8d2c.json) | + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Config | Download | +| :---------------------------------------- | :------------------------------------------: | :--------: | :-------: | :-------: | :----------------------------------------: | :-------------------------------------------: | +| `mixmim-base_mixmim-pre_8xb128-coslr-100e_in1k` | [MIXMIM](https://download.openmmlab.com/mmselfsup/1.x/mixmim/mixmim-base-p16_16xb128-coslr-300e_in1k/mixmim-base-p16_16xb128-coslr-300e_in1k_20221208-44fe8d2c.pth) | 88.34 | 16.35 | 84.63 | [config](benchmarks/mixmim-base_8xb128-coslr-100e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/mixmim/mixmim-base-p16_16xb128-coslr-300e_in1k/mixmim-base-p16_ft-8xb128-coslr-100e_in1k/mixmim-base-p16_ft-8xb128-coslr-100e_in1k_20221208-41ecada9.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/mixmim/mixmim-base-p16_16xb128-coslr-300e_in1k/mixmim-base-p16_ft-8xb128-coslr-100e_in1k/mixmim-base-p16_ft-8xb128-coslr-100e_in1k_20221208-41ecada9.json) | + +## Citation + +```bibtex +@article{MixMIM2022, + author = {Jihao Liu, Xin Huang, Yu Liu, Hongsheng Li}, + journal = {arXiv:2205.13137}, + title = {MixMIM: Mixed and Masked Image Modeling for Efficient Visual Representation Learning}, + year = {2022}, +} +``` diff --git a/configs/mixmim/benchmarks/mixmim-base_8xb128-coslr-100e_in1k.py b/configs/mixmim/benchmarks/mixmim-base_8xb128-coslr-100e_in1k.py new file mode 100644 index 0000000..c48ee3b --- /dev/null +++ b/configs/mixmim/benchmarks/mixmim-base_8xb128-coslr-100e_in1k.py @@ -0,0 +1,133 @@ +_base_ = [ + '../../_base_/models/mixmim/mixmim_base.py', + '../../_base_/datasets/imagenet_bs64_swin_224.py', + '../../_base_/default_runtime.py' +] + +# dataset settings +dataset_type = 'ImageNet' +data_root = 'data/imagenet/' + +data_preprocessor = dict( + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=128, + num_workers=16, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='meta/train.txt', + data_prefix='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), + persistent_workers=True, +) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=256, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +val_dataloader = dict( + batch_size=64, + num_workers=8, + pin_memory=True, + collate_fn=dict(type='default_collate'), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='meta/val.txt', + data_prefix='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, +) +test_dataloader = val_dataloader + +model = dict( + backbone=dict( + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.'))) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict( + type='AdamW', + lr=5e-4 * (8 * 128 / 256), + betas=(0.9, 0.999), + weight_decay=0.05), + constructor='LearningRateDecayOptimWrapperConstructor', + paramwise_cfg=dict( + layer_decay_rate=0.7, + custom_keys={ + '.ln': dict(decay_mult=0.0), # do not decay on ln and bias + '.bias': dict(decay_mult=0.0) + })) + +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-6, + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=95, + eta_min=1e-6, + by_epoch=True, + begin=5, + end=100, + convert_to_iter_based=True) +] + +train_cfg = dict(by_epoch=True, max_epochs=100, val_interval=10) +val_cfg = dict() +test_cfg = dict() + +default_hooks = dict( + # save checkpoint per epoch. + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=1)) diff --git a/configs/mixmim/benchmarks/mixmim-base_8xb64_in1k.py b/configs/mixmim/benchmarks/mixmim-base_8xb64_in1k.py new file mode 100644 index 0000000..86ada85 --- /dev/null +++ b/configs/mixmim/benchmarks/mixmim-base_8xb64_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../../_base_/models/mixmim/mixmim_base.py', + '../../_base_/datasets/imagenet_bs64_swin_224.py', + '../../_base_/schedules/imagenet_bs256.py', + '../../_base_/default_runtime.py' +] diff --git a/configs/mixmim/metafile.yml b/configs/mixmim/metafile.yml new file mode 100644 index 0000000..5bf87bd --- /dev/null +++ b/configs/mixmim/metafile.yml @@ -0,0 +1,51 @@ +Collections: + - Name: MixMIM + Metadata: + Architecture: + - Attention Dropout + - Convolution + - Dense Connections + - Dropout + - GELU + - Layer Normalization + - Multi-Head Attention + - Scaled Dot-Product Attention + - Tanh Activation + Paper: + Title: 'MixMIM: Mixed and Masked Image Modeling for Efficient Visual Representation + Learning' + URL: https://arxiv.org/abs/2205.13137 + README: configs/mixmim/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/main/mmpretrain/models/backbones/mixmim.py + Version: v1.0.0rc4 + +Models: + - Name: mixmim_mixmim-base_16xb128-coslr-300e_in1k + Metadata: + Epochs: 300 + Batch Size: 2048 + FLOPs: 16351906816 + Parameters: 114665784 + Training Data: ImageNet-1k + In Collection: MixMIM + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/mixmim/mixmim-base-p16_16xb128-coslr-300e_in1k/mixmim-base-p16_16xb128-coslr-300e_in1k_20221208-44fe8d2c.pth + Config: configs/mixmim/mixmim_mixmim-base_16xb128-coslr-300e_in1k.py + Downstream: + - mixmim-base_mixmim-pre_8xb128-coslr-100e_in1k + - Name: mixmim-base_mixmim-pre_8xb128-coslr-100e_in1k + Metadata: + Epochs: 100 + Batch Size: 1024 + FLOPs: 16351906816 + Parameters: 88344352 + Training Data: ImageNet-1k + In Collection: MixMIM + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.63 + Weights: https://download.openmmlab.com/mmselfsup/1.x/mixmim/mixmim-base-p16_16xb128-coslr-300e_in1k/mixmim-base-p16_ft-8xb128-coslr-100e_in1k/mixmim-base-p16_ft-8xb128-coslr-100e_in1k_20221208-41ecada9.pth + Config: configs/mixmim/benchmarks/mixmim-base_8xb128-coslr-100e_in1k.py diff --git a/configs/mixmim/mixmim_mixmim-base_16xb128-coslr-300e_in1k.py b/configs/mixmim/mixmim_mixmim-base_16xb128-coslr-300e_in1k.py new file mode 100644 index 0000000..29b94ea --- /dev/null +++ b/configs/mixmim/mixmim_mixmim-base_16xb128-coslr-300e_in1k.py @@ -0,0 +1,98 @@ +_base_ = '../_base_/default_runtime.py' + +# dataset settings +dataset_type = 'ImageNet' +data_root = 'data/imagenet/' +data_preprocessor = dict( + type='SelfSupDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + crop_ratio_range=(0.2, 1.0), + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5), + dict(type='PackInputs') +] + +train_dataloader = dict( + batch_size=128, + num_workers=8, + persistent_workers=True, + pin_memory=True, + sampler=dict(type='DefaultSampler', shuffle=True), + collate_fn=dict(type='default_collate'), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='meta/train.txt', + data_prefix=dict(img_path='train/'), + pipeline=train_pipeline)) + +# model settings +model = dict( + type='MixMIM', + backbone=dict( + type='MixMIMPretrainTransformer', + arch='B', + drop_rate=0.0, + drop_path_rate=0.0, # drop_path_rate=0.0 during pretraining + mask_ratio=0.5), + neck=dict( + type='MixMIMPretrainDecoder', + num_patches=49, + encoder_stride=32, + embed_dim=1024, + decoder_embed_dim=512, + decoder_depth=8, + decoder_num_heads=16), + head=dict( + type='MixMIMPretrainHead', + norm_pix=True, + loss=dict(type='PixelReconstructionLoss', criterion='L2'))) + +# optimizer wrapper +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict( + type='AdamW', + lr=1.5e-4 * (2048 / 256), + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict(custom_keys={ + 'ln': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0) + })) + +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=260, + by_epoch=True, + begin=40, + end=300, + convert_to_iter_based=True) +] + +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=300) +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=1)) + +randomness = dict(seed=0, diff_rank_seed=True) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=2048) diff --git a/configs/mlp_mixer/README.md b/configs/mlp_mixer/README.md new file mode 100644 index 0000000..f0bb4ce --- /dev/null +++ b/configs/mlp_mixer/README.md @@ -0,0 +1,78 @@ +# MLP-Mixer + +> [MLP-Mixer: An all-MLP Architecture for Vision](https://arxiv.org/abs/2105.01601) + + + +## Abstract + +Convolutional Neural Networks (CNNs) are the go-to model for computer vision. Recently, attention-based networks, such as the Vision Transformer, have also become popular. In this paper we show that while convolutions and attention are both sufficient for good performance, neither of them are necessary. We present MLP-Mixer, an architecture based exclusively on multi-layer perceptrons (MLPs). MLP-Mixer contains two types of layers: one with MLPs applied independently to image patches (i.e. "mixing" the per-location features), and one with MLPs applied across patches (i.e. "mixing" spatial information). When trained on large datasets, or with modern regularization schemes, MLP-Mixer attains competitive scores on image classification benchmarks, with pre-training and inference cost comparable to state-of-the-art models. We hope that these results spark further research beyond the realms of well established CNNs and Transformers. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('mlp-mixer-base-p16_3rdparty_64xb64_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('mlp-mixer-base-p16_3rdparty_64xb64_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/mlp_mixer/mlp-mixer-base-p16_64xb64_in1k.py https://download.openmmlab.com/mmclassification/v0/mlp-mixer/mixer-base-p16_3rdparty_64xb64_in1k_20211124-1377e3e0.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :------------------------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :------------------------------------------: | :-------------------------------------------------------------: | +| `mlp-mixer-base-p16_3rdparty_64xb64_in1k`\* | From scratch | 59.88 | 12.61 | 76.68 | 92.25 | [config](mlp-mixer-base-p16_64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mlp-mixer/mixer-base-p16_3rdparty_64xb64_in1k_20211124-1377e3e0.pth) | +| `mlp-mixer-large-p16_3rdparty_64xb64_in1k`\* | From scratch | 208.20 | 44.57 | 72.34 | 88.02 | [config](mlp-mixer-large-p16_64xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mlp-mixer/mixer-large-p16_3rdparty_64xb64_in1k_20211124-5a2519d2.pth) | + +*Models with * are converted from the [timm](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mlp_mixer.py). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@misc{tolstikhin2021mlpmixer, + title={MLP-Mixer: An all-MLP Architecture for Vision}, + author={Ilya Tolstikhin and Neil Houlsby and Alexander Kolesnikov and Lucas Beyer and Xiaohua Zhai and Thomas Unterthiner and Jessica Yung and Andreas Steiner and Daniel Keysers and Jakob Uszkoreit and Mario Lucic and Alexey Dosovitskiy}, + year={2021}, + eprint={2105.01601}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` diff --git a/configs/mlp_mixer/metafile.yml b/configs/mlp_mixer/metafile.yml new file mode 100644 index 0000000..8b632db --- /dev/null +++ b/configs/mlp_mixer/metafile.yml @@ -0,0 +1,50 @@ +Collections: + - Name: MLP-Mixer + Metadata: + Training Data: ImageNet-1k + Architecture: + - MLP + - Layer Normalization + - Dropout + Paper: + URL: https://arxiv.org/abs/2105.01601 + Title: "MLP-Mixer: An all-MLP Architecture for Vision" + README: configs/mlp_mixer/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.18.0/mmcls/models/backbones/mlp_mixer.py + Version: v0.18.0 + +Models: + - Name: mlp-mixer-base-p16_3rdparty_64xb64_in1k + In Collection: MLP-Mixer + Config: configs/mlp_mixer/mlp-mixer-base-p16_64xb64_in1k.py + Metadata: + FLOPs: 12610000000 # 12.61 G + Parameters: 59880000 # 59.88 M + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 76.68 + Top 5 Accuracy: 92.25 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/mlp-mixer/mixer-base-p16_3rdparty_64xb64_in1k_20211124-1377e3e0.pth + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_b16_224-76587d61.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mlp_mixer.py#L70 + + - Name: mlp-mixer-large-p16_3rdparty_64xb64_in1k + In Collection: MLP-Mixer + Config: configs/mlp_mixer/mlp-mixer-large-p16_64xb64_in1k.py + Metadata: + FLOPs: 44570000000 # 44.57 G + Parameters: 208200000 # 208.2 M + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 72.34 + Top 5 Accuracy: 88.02 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/mlp-mixer/mixer-large-p16_3rdparty_64xb64_in1k_20211124-5a2519d2.pth + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_mixer_b16_224_in21k-617b3de2.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/mlp_mixer.py#L73 diff --git a/configs/mlp_mixer/mlp-mixer-base-p16_64xb64_in1k.py b/configs/mlp_mixer/mlp-mixer-base-p16_64xb64_in1k.py new file mode 100644 index 0000000..bbf4268 --- /dev/null +++ b/configs/mlp_mixer/mlp-mixer-base-p16_64xb64_in1k.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/mlp_mixer_base_patch16.py', + '../_base_/datasets/imagenet_bs64_mixer_224.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py', +] + +optim_wrapper = dict(clip_grad=dict(max_norm=1.0)) diff --git a/configs/mlp_mixer/mlp-mixer-large-p16_64xb64_in1k.py b/configs/mlp_mixer/mlp-mixer-large-p16_64xb64_in1k.py new file mode 100644 index 0000000..4fbe9c5 --- /dev/null +++ b/configs/mlp_mixer/mlp-mixer-large-p16_64xb64_in1k.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/mlp_mixer_large_patch16.py', + '../_base_/datasets/imagenet_bs64_mixer_224.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py', +] + +optim_wrapper = dict(clip_grad=dict(max_norm=1.0)) diff --git a/configs/mobilenet_v2/README.md b/configs/mobilenet_v2/README.md new file mode 100644 index 0000000..74548e1 --- /dev/null +++ b/configs/mobilenet_v2/README.md @@ -0,0 +1,97 @@ +# MobileNet V2 + +> [MobileNetV2: Inverted Residuals and Linear Bottlenecks](https://arxiv.org/abs/1801.04381) + + + +## Introduction + +**MobileNet V2** is initially described in [the paper](https://arxiv.org/pdf/1801.04381.pdf), which improves the state of the art performance of mobile models on multiple tasks. MobileNetV2 is an improvement on V1. Its new ideas include Linear Bottleneck and Inverted Residuals, and is based on an inverted residual structure where the input and output of the residual block are thin bottleneck layers. The intermediate expansion layer uses lightweight depthwise convolutions to filter features as a source of non-linearity. The author of MobileNet V2 measure its performance on Imagenet classification, COCO object detection, and VOC image segmentation. + +
+ +
+ +## Abstract + +
+ +Show the paper's abstract + +
+In this paper we describe a new mobile architecture, MobileNetV2, that improves the state of the art performance of mobile models on multiple tasks and benchmarks as well as across a spectrum of different model sizes. We also describe efficient ways of applying these mobile models to object detection in a novel framework we call SSDLite. Additionally, we demonstrate how to build mobile semantic segmentation models through a reduced form of DeepLabv3 which we call Mobile DeepLabv3. + +The MobileNetV2 architecture is based on an inverted residual structure where the input and output of the residual block are thin bottleneck layers opposite to traditional residual models which use expanded representations in the input an MobileNetV2 uses lightweight depthwise convolutions to filter features in the intermediate expansion layer. Additionally, we find that it is important to remove non-linearities in the narrow layers in order to maintain representational power. We demonstrate that this improves performance and provide an intuition that led to this design. Finally, our approach allows decoupling of the input/output domains from the expressiveness of the transformation, which provides a convenient framework for further analysis. We measure our performance on Imagenet classification, COCO object detection, VOC image segmentation. We evaluate the trade-offs between accuracy, and number of operations measured by multiply-adds (MAdd), as well as the number of parameters. +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('mobilenet-v2_8xb32_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('mobilenet-v2_8xb32_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :------------------------ | :----------: | :--------: | :-------: | :-------: | :-------: | :----------------------------------: | :----------------------------------------------------------------------------------------: | +| `mobilenet-v2_8xb32_in1k` | From scratch | 3.50 | 0.32 | 71.86 | 90.42 | [config](mobilenet-v2_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.json) | + +## Citation + +```bibtex +@INPROCEEDINGS{8578572, + author={M. {Sandler} and A. {Howard} and M. {Zhu} and A. {Zhmoginov} and L. {Chen}}, + booktitle={2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + title={MobileNetV2: Inverted Residuals and Linear Bottlenecks}, + year={2018}, + volume={}, + number={}, + pages={4510-4520}, + doi={10.1109/CVPR.2018.00474}} +} +``` diff --git a/configs/mobilenet_v2/metafile.yml b/configs/mobilenet_v2/metafile.yml new file mode 100644 index 0000000..aaa490a --- /dev/null +++ b/configs/mobilenet_v2/metafile.yml @@ -0,0 +1,34 @@ +Collections: + - Name: MobileNet V2 + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Epochs: 300 + Batch Size: 256 + Architecture: + - MobileNet V2 + Paper: + URL: https://arxiv.org/abs/1801.04381 + Title: "MobileNetV2: Inverted Residuals and Linear Bottlenecks" + README: configs/mobilenet_v2/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.15.0/mmcls/models/backbones/mobilenet_v2.py#L101 + Version: v0.15.0 + +Models: + - Name: mobilenet-v2_8xb32_in1k + Metadata: + FLOPs: 319000000 + Parameters: 3500000 + In Collection: MobileNet V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 71.86 + Top 5 Accuracy: 90.42 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth + Config: configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py diff --git a/configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py b/configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py new file mode 100644 index 0000000..afd2d97 --- /dev/null +++ b/configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/mobilenet_v2_1x.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256_epochstep.py', + '../_base_/default_runtime.py' +] diff --git a/configs/mobilenet_v3/README.md b/configs/mobilenet_v3/README.md new file mode 100644 index 0000000..833de5b --- /dev/null +++ b/configs/mobilenet_v3/README.md @@ -0,0 +1,99 @@ +# MobileNet V3 + +> [Searching for MobileNetV3](https://arxiv.org/abs/1905.02244) + + + +## Introduction + +**MobileNet V3** is initially described in [the paper](https://arxiv.org/pdf/1905.02244.pdf). MobileNetV3 parameters are obtained by NAS (network architecture search) search, and some practical results of V1 and V2 are inherited, and the attention mechanism of SE channel is attracted, which can be considered as a masterpiece. The author create two new MobileNet models for release: MobileNetV3-Large and MobileNetV3-Small which are targeted for high and low resource use cases. These models are then adapted and applied to the tasks of object detection and semantic segmentation. The author of MobileNet V3 measure its performance on Imagenet classification, COCO object detection, and Cityscapes segmentation. + +
+ +
+ +## Abstract + +
+ +Show the paper's abstract + +
+We present the next generation of MobileNets based on a combination of complementary search techniques as well as a novel architecture design. MobileNetV3 is tuned to mobile phone CPUs through a combination of hardware-aware network architecture search (NAS) complemented by the NetAdapt algorithm and then subsequently improved through novel architecture advances. This paper starts the exploration of how automated search algorithms and network design can work together to harness complementary approaches improving the overall state of the art. Through this process we create two new MobileNet models for release: MobileNetV3-Large and MobileNetV3-Small which are targeted for high and low resource use cases. These models are then adapted and applied to the tasks of object detection and semantic segmentation. For the task of semantic segmentation (or any dense pixel prediction), we propose a new efficient segmentation decoder Lite Reduced Atrous Spatial Pyramid Pooling (LR-ASPP). We achieve new state of the art results for mobile classification, detection and segmentation. MobileNetV3-Large is 3.2% more accurate on ImageNet classification while reducing latency by 15% compared to MobileNetV2. MobileNetV3-Small is 4.6% more accurate while reducing latency by 5% compared to MobileNetV2. MobileNetV3-Large detection is 25% faster at roughly the same accuracy as MobileNetV2 on COCO detection. MobileNetV3-Large LR-ASPP is 30% faster than MobileNetV2 R-ASPP at similar accuracy for Cityscapes segmentation. +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('mobilenet-v3-small-050_3rdparty_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('mobilenet-v3-small-050_3rdparty_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/mobilenet_v3/mobilenet-v3-small_8xb128_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/mobilenet_v3/mobilenet-v3-small-050_8xb128_in1k.py https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/mobilenet-v3-small-050_3rdparty_in1k_20221114-e0b86be1.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :--------------------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :---------------------------------------------: | :--------------------------------------------------------------: | +| `mobilenet-v3-small-050_3rdparty_in1k`\* | From scratch | 1.59 | 0.02 | 57.91 | 80.19 | [config](mobilenet-v3-small-050_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/mobilenet-v3-small-050_3rdparty_in1k_20221114-e0b86be1.pth) | +| `mobilenet-v3-small-075_3rdparty_in1k`\* | From scratch | 2.04 | 0.04 | 65.23 | 85.44 | [config](mobilenet-v3-small-075_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/mobilenet-v3-small-075_3rdparty_in1k_20221114-2011fa76.pth) | +| `mobilenet-v3-small_8xb128_in1k` | From scratch | 2.54 | 0.06 | 66.68 | 86.74 | [config](mobilenet-v3-small_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/mobilenet-v3-small_8xb128_in1k_20221114-bd1bfcde.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/mobilenet-v3-small_8xb128_in1k_20221114-bd1bfcde.json) | +| `mobilenet-v3-small_3rdparty_in1k`\* | From scratch | 2.54 | 0.06 | 67.66 | 87.41 | [config](mobilenet-v3-small_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/convert/mobilenet_v3_small-8427ecf0.pth) | +| `mobilenet-v3-large_8xb128_in1k` | From scratch | 5.48 | 0.23 | 73.49 | 91.31 | [config](mobilenet-v3-large_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/mobilenet-v3-large_8xb128_in1k_20221114-0ed9ed9a.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/mobilenet-v3-large_8xb128_in1k_20221114-0ed9ed9a.json) | +| `mobilenet-v3-large_3rdparty_in1k`\* | From scratch | 5.48 | 0.23 | 74.04 | 91.34 | [config](mobilenet-v3-large_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/convert/mobilenet_v3_large-3ea3c186.pth) | + +*Models with * are converted from the [official repo](https://github.com/pytorch/vision/blob/main/torchvision/models/mobilenetv3.py). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@inproceedings{Howard_2019_ICCV, + author = {Howard, Andrew and Sandler, Mark and Chu, Grace and Chen, Liang-Chieh and Chen, Bo and Tan, Mingxing and Wang, Weijun and Zhu, Yukun and Pang, Ruoming and Vasudevan, Vijay and Le, Quoc V. and Adam, Hartwig}, + title = {Searching for MobileNetV3}, + booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)}, + month = {October}, + year = {2019} +} +``` diff --git a/configs/mobilenet_v3/metafile.yml b/configs/mobilenet_v3/metafile.yml new file mode 100644 index 0000000..53f1653 --- /dev/null +++ b/configs/mobilenet_v3/metafile.yml @@ -0,0 +1,111 @@ +Collections: + - Name: MobileNet V3 + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - RMSprop with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Epochs: 600 + Batch Size: 1024 + Architecture: + - MobileNet V3 + Paper: + URL: https://arxiv.org/abs/1905.02244 + Title: Searching for MobileNetV3 + README: configs/mobilenet_v3/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.15.0/mmcls/models/backbones/mobilenet_v3.py + Version: v0.15.0 + +Models: + - Name: mobilenet-v3-small-050_3rdparty_in1k + Metadata: + FLOPs: 24895000 + Parameters: 1590000 + In Collection: MobileNet V3 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 57.91 + Top 5 Accuracy: 80.19 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/mobilenet-v3-small-050_3rdparty_in1k_20221114-e0b86be1.pth + Config: configs/mobilenet_v3/mobilenet-v3-small-050_8xb128_in1k.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_small_050_lambc-4b7bbe87.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/mobilenetv3.py + - Name: mobilenet-v3-small-075_3rdparty_in1k + Metadata: + FLOPs: 44791000 + Parameters: 2040000 + In Collection: MobileNet V3 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 65.23 + Top 5 Accuracy: 85.44 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/mobilenet-v3-small-075_3rdparty_in1k_20221114-2011fa76.pth + Config: configs/mobilenet_v3/mobilenet-v3-small-075_8xb128_in1k.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_small_075_lambc-384766db.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/mobilenetv3.py + - Name: mobilenet-v3-small_8xb128_in1k + Metadata: + FLOPs: 60000000 + Parameters: 2540000 + In Collection: MobileNet V3 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 66.68 + Top 5 Accuracy: 86.74 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/mobilenet-v3-small_8xb128_in1k_20221114-bd1bfcde.pth + Config: configs/mobilenet_v3/mobilenet-v3-small_8xb128_in1k.py + - Name: mobilenet-v3-small_3rdparty_in1k + Metadata: + FLOPs: 60000000 + Parameters: 2540000 + In Collection: MobileNet V3 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 67.66 + Top 5 Accuracy: 87.41 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/convert/mobilenet_v3_small-8427ecf0.pth + Config: configs/mobilenet_v3/mobilenet-v3-small_8xb128_in1k.py + Converted From: + Weights: https://download.pytorch.org/models/mobilenet_v3_small-047dcff4.pth + Code: https://github.com/pytorch/vision/blob/main/torchvision/models/mobilenetv3.py + - Name: mobilenet-v3-large_8xb128_in1k + Metadata: + FLOPs: 230000000 + Parameters: 5480000 + In Collection: MobileNet V3 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 73.49 + Top 5 Accuracy: 91.31 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/mobilenet-v3-large_8xb128_in1k_20221114-0ed9ed9a.pth + Config: configs/mobilenet_v3/mobilenet-v3-large_8xb128_in1k.py + - Name: mobilenet-v3-large_3rdparty_in1k + Metadata: + FLOPs: 230000000 + Parameters: 5480000 + In Collection: MobileNet V3 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 74.04 + Top 5 Accuracy: 91.34 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/convert/mobilenet_v3_large-3ea3c186.pth + Config: configs/mobilenet_v3/mobilenet-v3-large_8xb128_in1k.py + Converted From: + Weights: https://download.pytorch.org/models/mobilenet_v3_large-8738ca79.pth + Code: https://github.com/pytorch/vision/blob/main/torchvision/models/mobilenetv3.py diff --git a/configs/mobilenet_v3/mobilenet-v3-large_8xb128_in1k.py b/configs/mobilenet_v3/mobilenet-v3-large_8xb128_in1k.py new file mode 100644 index 0000000..f5c05ba --- /dev/null +++ b/configs/mobilenet_v3/mobilenet-v3-large_8xb128_in1k.py @@ -0,0 +1,28 @@ +# Refers to https://pytorch.org/blog/ml-models-torchvision-v0.9/#classification + +_base_ = [ + '../_base_/models/mobilenet_v3/mobilenet_v3_large_imagenet.py', + '../_base_/datasets/imagenet_bs128_mbv3.py', + '../_base_/default_runtime.py', +] + +# schedule settings +optim_wrapper = dict( + optimizer=dict( + type='RMSprop', + lr=0.064, + alpha=0.9, + momentum=0.9, + eps=0.0316, + weight_decay=1e-5)) + +param_scheduler = dict(type='StepLR', by_epoch=True, step_size=2, gamma=0.973) + +train_cfg = dict(by_epoch=True, max_epochs=600, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (8 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=1024) diff --git a/configs/mobilenet_v3/mobilenet-v3-small-050_8xb128_in1k.py b/configs/mobilenet_v3/mobilenet-v3-small-050_8xb128_in1k.py new file mode 100644 index 0000000..fc14562 --- /dev/null +++ b/configs/mobilenet_v3/mobilenet-v3-small-050_8xb128_in1k.py @@ -0,0 +1,70 @@ +_base_ = [ + '../_base_/models/mobilenet_v3/mobilenet_v3_small_050_imagenet.py', + '../_base_/datasets/imagenet_bs128_mbv3.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict(backbone=dict(norm_cfg=dict(type='BN', eps=1e-5, momentum=0.1))) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='AutoAugment', + policies='imagenet', + hparams=dict(pad_val=[round(x) for x in [103.53, 116.28, 123.675]])), + dict( + type='RandomErasing', + erase_prob=0.2, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=[103.53, 116.28, 123.675], + fill_std=[57.375, 57.12, 58.395]), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=256, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader + +# schedule settings +optim_wrapper = dict( + optimizer=dict( + type='RMSprop', + lr=0.064, + alpha=0.9, + momentum=0.9, + eps=0.0316, + weight_decay=1e-5)) + +param_scheduler = dict(type='StepLR', by_epoch=True, step_size=2, gamma=0.973) + +train_cfg = dict(by_epoch=True, max_epochs=600, val_interval=10) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (8 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=1024) diff --git a/configs/mobilenet_v3/mobilenet-v3-small-075_8xb128_in1k.py b/configs/mobilenet_v3/mobilenet-v3-small-075_8xb128_in1k.py new file mode 100644 index 0000000..464b7cb --- /dev/null +++ b/configs/mobilenet_v3/mobilenet-v3-small-075_8xb128_in1k.py @@ -0,0 +1,68 @@ +_base_ = [ + '../_base_/models/mobilenet_v3/mobilenet_v3_small_075_imagenet.py', + '../_base_/datasets/imagenet_bs128_mbv3.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict(backbone=dict(norm_cfg=dict(type='BN', eps=1e-5, momentum=0.1))) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='AutoAugment', + policies='imagenet', + hparams=dict(pad_val=[round(x) for x in [103.53, 116.28, 123.675]])), + dict( + type='RandomErasing', + erase_prob=0.2, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=[103.53, 116.28, 123.675], + fill_std=[57.375, 57.12, 58.395]), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=256, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# schedule settings +optim_wrapper = dict( + optimizer=dict( + type='RMSprop', + lr=0.064, + alpha=0.9, + momentum=0.9, + eps=0.0316, + weight_decay=1e-5)) + +param_scheduler = dict(type='StepLR', by_epoch=True, step_size=2, gamma=0.973) + +train_cfg = dict(by_epoch=True, max_epochs=600, val_interval=10) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (8 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=1024) diff --git a/configs/mobilenet_v3/mobilenet-v3-small_8xb128_in1k.py b/configs/mobilenet_v3/mobilenet-v3-small_8xb128_in1k.py new file mode 100644 index 0000000..06b0a32 --- /dev/null +++ b/configs/mobilenet_v3/mobilenet-v3-small_8xb128_in1k.py @@ -0,0 +1,28 @@ +# Refers to https://pytorch.org/blog/ml-models-torchvision-v0.9/#classification + +_base_ = [ + '../_base_/models/mobilenet_v3/mobilenet_v3_small_imagenet.py', + '../_base_/datasets/imagenet_bs128_mbv3.py', + '../_base_/default_runtime.py', +] + +# schedule settings +optim_wrapper = dict( + optimizer=dict( + type='RMSprop', + lr=0.064, + alpha=0.9, + momentum=0.9, + eps=0.0316, + weight_decay=1e-5)) + +param_scheduler = dict(type='StepLR', by_epoch=True, step_size=2, gamma=0.973) + +train_cfg = dict(by_epoch=True, max_epochs=600, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (8 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=1024) diff --git a/configs/mobilenet_v3/mobilenet-v3-small_8xb16_cifar10.py b/configs/mobilenet_v3/mobilenet-v3-small_8xb16_cifar10.py new file mode 100644 index 0000000..4cfaa2f --- /dev/null +++ b/configs/mobilenet_v3/mobilenet-v3-small_8xb16_cifar10.py @@ -0,0 +1,15 @@ +_base_ = [ + '../_base_/models/mobilenet_v3/mobilenet_v3_small_cifar.py', + '../_base_/datasets/cifar10_bs16.py', + '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py' +] + +# schedule settings +param_scheduler = dict( + type='MultiStepLR', + by_epoch=True, + milestones=[120, 170], + gamma=0.1, +) + +train_cfg = dict(by_epoch=True, max_epochs=200) diff --git a/configs/mobileone/README.md b/configs/mobileone/README.md new file mode 100644 index 0000000..e753aff --- /dev/null +++ b/configs/mobileone/README.md @@ -0,0 +1,98 @@ +# MobileOne + +> [An Improved One millisecond Mobile Backbone](https://arxiv.org/abs/2206.04040) + + + +## Introduction + +Mobileone is proposed by apple and based on reparameterization. On the apple chips, the accuracy of the model is close to 0.76 on the ImageNet dataset when the latency is less than 1ms. Its main improvements based on [RepVGG](../repvgg) are fllowing: + +- Reparameterization using Depthwise convolution and Pointwise convolution instead of normal convolution. +- Removal of the residual structure which is not friendly to access memory. + +
+ +
+ +## Abstract + +
+ +Show the paper's abstract + +
+Efficient neural network backbones for mobile devices are often optimized for metrics such as FLOPs or parameter count. However, these metrics may not correlate well with latency of the network when deployed on a mobile device. Therefore, we perform extensive analysis of different metrics by deploying several mobile-friendly networks on a mobile device. We identify and analyze architectural and optimization bottlenecks in recent efficient neural networks and provide ways to mitigate these bottlenecks. To this end, we design an efficient backbone MobileOne, with variants achieving an inference time under 1 ms on an iPhone12 with 75.9% top-1 accuracy on ImageNet. We show that MobileOne achieves state-of-the-art performance within the efficient architectures while being many times faster on mobile. Our best model obtains similar performance on ImageNet as MobileFormer while being 38x faster. Our model obtains 2.3% better top-1 accuracy on ImageNet than EfficientNet at similar latency. Furthermore, we show that our model generalizes to multiple tasks - image classification, object detection, and semantic segmentation with significant improvements in latency and accuracy as compared to existing efficient architectures when deployed on a mobile device. +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('mobileone-s0_8xb32_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('mobileone-s0_8xb32_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/mobileone/mobileone-s0_8xb32_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/mobileone/mobileone-s0_8xb32_in1k.py https://download.openmmlab.com/mmclassification/v0/mobileone/mobileone-s0_8xb32_in1k_20221110-0bc94952.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :------------------------ | :----------: | :--------: | :-------: | :-------: | :-------: | :----------------------------------: | :----------------------------------------------------------------------------------------: | +| `mobileone-s0_8xb32_in1k` | From scratch | 2.08 | 0.27 | 71.34 | 89.87 | [config](mobileone-s0_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mobileone/mobileone-s0_8xb32_in1k_20221110-0bc94952.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/mobileone/mobileone-s0_8xb32_in1k_20221110-0bc94952.json) | +| `mobileone-s1_8xb32_in1k` | From scratch | 4.76 | 0.82 | 75.72 | 92.54 | [config](mobileone-s1_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mobileone/mobileone-s1_8xb32_in1k_20221110-ceeef467.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/mobileone/mobileone-s1_8xb32_in1k_20221110-ceeef467.json) | +| `mobileone-s2_8xb32_in1k` | From scratch | 7.81 | 1.30 | 77.37 | 93.34 | [config](mobileone-s2_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mobileone/mobileone-s2_8xb32_in1k_20221110-9c7ecb97.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/mobileone/mobileone-s2_8xb32_in1k_20221110-9c7ecb97.json) | +| `mobileone-s3_8xb32_in1k` | From scratch | 10.08 | 1.89 | 78.06 | 93.83 | [config](mobileone-s3_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mobileone/mobileone-s3_8xb32_in1k_20221110-c95eb3bf.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/mobileone/mobileone-s3_8xb32_in1k_20221110-c95eb3bf.json) | +| `mobileone-s4_8xb32_in1k` | From scratch | 14.84 | 2.98 | 79.69 | 94.46 | [config](mobileone-s4_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mobileone/mobileone-s4_8xb32_in1k_20221110-28d888cb.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/mobileone/mobileone-s4_8xb32_in1k_20221110-28d888cb.json) | + +## Citation + +```bibtex +@article{mobileone2022, + title={An Improved One millisecond Mobile Backbone}, + author={Vasu, Pavan Kumar Anasosalu and Gabriel, James and Zhu, Jeff and Tuzel, Oncel and Ranjan, Anurag}, + journal={arXiv preprint arXiv:2206.04040}, + year={2022} +} +``` diff --git a/configs/mobileone/deploy/mobileone-s0_deploy_8xb32_in1k.py b/configs/mobileone/deploy/mobileone-s0_deploy_8xb32_in1k.py new file mode 100644 index 0000000..145f3f4 --- /dev/null +++ b/configs/mobileone/deploy/mobileone-s0_deploy_8xb32_in1k.py @@ -0,0 +1,3 @@ +_base_ = ['../mobileone-s0_8xb32_in1k.py'] + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/mobileone/deploy/mobileone-s1_deploy_8xb32_in1k.py b/configs/mobileone/deploy/mobileone-s1_deploy_8xb32_in1k.py new file mode 100644 index 0000000..8602c31 --- /dev/null +++ b/configs/mobileone/deploy/mobileone-s1_deploy_8xb32_in1k.py @@ -0,0 +1,3 @@ +_base_ = ['../mobileone-s1_8xb32_in1k.py'] + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/mobileone/deploy/mobileone-s2_deploy_8xb32_in1k.py b/configs/mobileone/deploy/mobileone-s2_deploy_8xb32_in1k.py new file mode 100644 index 0000000..97aaddd --- /dev/null +++ b/configs/mobileone/deploy/mobileone-s2_deploy_8xb32_in1k.py @@ -0,0 +1,3 @@ +_base_ = ['../mobileone-s2_8xb32_in1k.py'] + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/mobileone/deploy/mobileone-s3_deploy_8xb32_in1k.py b/configs/mobileone/deploy/mobileone-s3_deploy_8xb32_in1k.py new file mode 100644 index 0000000..0d335a7 --- /dev/null +++ b/configs/mobileone/deploy/mobileone-s3_deploy_8xb32_in1k.py @@ -0,0 +1,3 @@ +_base_ = ['../mobileone-s3_8xb32_in1k.py'] + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/mobileone/deploy/mobileone-s4_deploy_8xb32_in1k.py b/configs/mobileone/deploy/mobileone-s4_deploy_8xb32_in1k.py new file mode 100644 index 0000000..b82f5a9 --- /dev/null +++ b/configs/mobileone/deploy/mobileone-s4_deploy_8xb32_in1k.py @@ -0,0 +1,3 @@ +_base_ = ['../mobileone-s4_8xb32_in1k.py'] + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/mobileone/metafile.yml b/configs/mobileone/metafile.yml new file mode 100644 index 0000000..70370da --- /dev/null +++ b/configs/mobileone/metafile.yml @@ -0,0 +1,83 @@ +Collections: + - Name: MobileOne + Metadata: + Training Data: ImageNet-1k + Architecture: + - re-parameterization Convolution + - VGG-style Neural Network + - Depthwise Convolution + - Pointwise Convolution + Paper: + URL: https://arxiv.org/abs/2206.04040 + Title: 'An Improved One millisecond Mobile Backbone' + README: configs/mobileone/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/v1.0.0rc1/configs/mobileone/metafile.yml + Version: v1.0.0rc1 + +Models: + - Name: mobileone-s0_8xb32_in1k + In Collection: MobileOne + Config: configs/mobileone/mobileone-s0_8xb32_in1k.py + Metadata: + FLOPs: 274136576 # 0.27G + Parameters: 2078504 # 2.08M + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 71.34 + Top 5 Accuracy: 89.87 + Weights: https://download.openmmlab.com/mmclassification/v0/mobileone/mobileone-s0_8xb32_in1k_20221110-0bc94952.pth + - Name: mobileone-s1_8xb32_in1k + In Collection: MobileOne + Config: configs/mobileone/mobileone-s1_8xb32_in1k.py + Metadata: + FLOPs: 823839744 # 8.6G + Parameters: 4764840 # 4.82M + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 75.72 + Top 5 Accuracy: 92.54 + Weights: https://download.openmmlab.com/mmclassification/v0/mobileone/mobileone-s1_8xb32_in1k_20221110-ceeef467.pth + - Name: mobileone-s2_8xb32_in1k + In Collection: MobileOne + Config: configs/mobileone/mobileone-s2_8xb32_in1k.py + Metadata: + FLOPs: 1296478848 + Parameters: 7808168 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 77.37 + Top 5 Accuracy: 93.34 + Weights: https://download.openmmlab.com/mmclassification/v0/mobileone/mobileone-s2_8xb32_in1k_20221110-9c7ecb97.pth + - Name: mobileone-s3_8xb32_in1k + In Collection: MobileOne + Config: configs/mobileone/mobileone-s3_8xb32_in1k.py + Metadata: + FLOPs: 1893842944 + Parameters: 10078312 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 78.06 + Top 5 Accuracy: 93.83 + Weights: https://download.openmmlab.com/mmclassification/v0/mobileone/mobileone-s3_8xb32_in1k_20221110-c95eb3bf.pth + - Name: mobileone-s4_8xb32_in1k + In Collection: MobileOne + Config: configs/mobileone/mobileone-s4_8xb32_in1k.py + Metadata: + FLOPs: 2979222528 + Parameters: 14838352 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 79.69 + Top 5 Accuracy: 94.46 + Weights: https://download.openmmlab.com/mmclassification/v0/mobileone/mobileone-s4_8xb32_in1k_20221110-28d888cb.pth diff --git a/configs/mobileone/mobileone-s0_8xb32_in1k.py b/configs/mobileone/mobileone-s0_8xb32_in1k.py new file mode 100644 index 0000000..be56b86 --- /dev/null +++ b/configs/mobileone/mobileone-s0_8xb32_in1k.py @@ -0,0 +1,20 @@ +_base_ = [ + '../_base_/models/mobileone/mobileone_s0.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256_coslr_coswd_300e.py', + '../_base_/default_runtime.py' +] + +# schedule settings +optim_wrapper = dict(paramwise_cfg=dict(norm_decay_mult=0.)) + +val_dataloader = dict(batch_size=256) +test_dataloader = dict(batch_size=256) + +custom_hooks = [ + dict( + type='EMAHook', + momentum=5e-4, + priority='ABOVE_NORMAL', + update_buffers=True) +] diff --git a/configs/mobileone/mobileone-s1_8xb32_in1k.py b/configs/mobileone/mobileone-s1_8xb32_in1k.py new file mode 100644 index 0000000..0bc3fb0 --- /dev/null +++ b/configs/mobileone/mobileone-s1_8xb32_in1k.py @@ -0,0 +1,60 @@ +_base_ = [ + '../_base_/models/mobileone/mobileone_s1.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256_coslr_coswd_300e.py', + '../_base_/default_runtime.py' +] + +# schedule settings +optim_wrapper = dict(paramwise_cfg=dict(norm_decay_mult=0.)) + +val_dataloader = dict(batch_size=256) +test_dataloader = dict(batch_size=256) + +bgr_mean = _base_.data_preprocessor['mean'][::-1] +base_train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=224, backend='pillow'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=7, + magnitude_std=0.5, + hparams=dict(pad_val=[round(x) for x in bgr_mean])), + dict(type='PackInputs') +] + +import copy # noqa: E402 + +# modify start epoch's RandomResizedCrop.scale to 160 +train_pipeline_1e = copy.deepcopy(base_train_pipeline) +train_pipeline_1e[1]['scale'] = 160 +train_pipeline_1e[3]['magnitude_level'] *= 0.1 +_base_.train_dataloader.dataset.pipeline = train_pipeline_1e + +# modify 37 epoch's RandomResizedCrop.scale to 192 +train_pipeline_37e = copy.deepcopy(base_train_pipeline) +train_pipeline_37e[1]['scale'] = 192 +train_pipeline_1e[3]['magnitude_level'] *= 0.2 + +# modify 112 epoch's RandomResizedCrop.scale to 224 +train_pipeline_112e = copy.deepcopy(base_train_pipeline) +train_pipeline_112e[1]['scale'] = 224 +train_pipeline_1e[3]['magnitude_level'] *= 0.3 + +custom_hooks = [ + dict( + type='SwitchRecipeHook', + schedule=[ + dict(action_epoch=37, pipeline=train_pipeline_37e), + dict(action_epoch=112, pipeline=train_pipeline_112e), + ]), + dict( + type='EMAHook', + momentum=5e-4, + priority='ABOVE_NORMAL', + update_buffers=True) +] diff --git a/configs/mobileone/mobileone-s2_8xb32_in1k.py b/configs/mobileone/mobileone-s2_8xb32_in1k.py new file mode 100644 index 0000000..a7d4aae --- /dev/null +++ b/configs/mobileone/mobileone-s2_8xb32_in1k.py @@ -0,0 +1,65 @@ +_base_ = [ + '../_base_/models/mobileone/mobileone_s2.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256_coslr_coswd_300e.py', + '../_base_/default_runtime.py' +] + +# schedule settings +optim_wrapper = dict(paramwise_cfg=dict(norm_decay_mult=0.)) + +val_dataloader = dict(batch_size=256) +test_dataloader = dict(batch_size=256) + +import copy # noqa: E402 + +bgr_mean = _base_.data_preprocessor['mean'][::-1] +base_train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=224, backend='pillow'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=7, + magnitude_std=0.5, + hparams=dict(pad_val=[round(x) for x in bgr_mean])), + dict(type='PackInputs') +] + +# modify start epoch RandomResizedCrop.scale to 160 +# and RA.magnitude_level * 0.3 +train_pipeline_1e = copy.deepcopy(base_train_pipeline) +train_pipeline_1e[1]['scale'] = 160 +train_pipeline_1e[3]['magnitude_level'] *= 0.3 +_base_.train_dataloader.dataset.pipeline = train_pipeline_1e + +import copy # noqa: E402 + +# modify 137 epoch's RandomResizedCrop.scale to 192 +# and RA.magnitude_level * 0.7 +train_pipeline_37e = copy.deepcopy(base_train_pipeline) +train_pipeline_37e[1]['scale'] = 192 +train_pipeline_37e[3]['magnitude_level'] *= 0.7 + +# modify 112 epoch's RandomResizedCrop.scale to 224 +# and RA.magnitude_level * 1.0 +train_pipeline_112e = copy.deepcopy(base_train_pipeline) +train_pipeline_112e[1]['scale'] = 224 +train_pipeline_112e[3]['magnitude_level'] *= 1.0 + +custom_hooks = [ + dict( + type='SwitchRecipeHook', + schedule=[ + dict(action_epoch=37, pipeline=train_pipeline_37e), + dict(action_epoch=112, pipeline=train_pipeline_112e), + ]), + dict( + type='EMAHook', + momentum=5e-4, + priority='ABOVE_NORMAL', + update_buffers=True) +] diff --git a/configs/mobileone/mobileone-s3_8xb32_in1k.py b/configs/mobileone/mobileone-s3_8xb32_in1k.py new file mode 100644 index 0000000..2be0dc7 --- /dev/null +++ b/configs/mobileone/mobileone-s3_8xb32_in1k.py @@ -0,0 +1,65 @@ +_base_ = [ + '../_base_/models/mobileone/mobileone_s3.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256_coslr_coswd_300e.py', + '../_base_/default_runtime.py' +] + +# schedule settings +optim_wrapper = dict(paramwise_cfg=dict(norm_decay_mult=0.)) + +val_dataloader = dict(batch_size=256) +test_dataloader = dict(batch_size=256) + +import copy # noqa: E402 + +bgr_mean = _base_.data_preprocessor['mean'][::-1] +base_train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=224, backend='pillow'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=7, + magnitude_std=0.5, + hparams=dict(pad_val=[round(x) for x in bgr_mean])), + dict(type='PackInputs') +] + +# modify start epoch RandomResizedCrop.scale to 160 +# and RA.magnitude_level * 0.3 +train_pipeline_1e = copy.deepcopy(base_train_pipeline) +train_pipeline_1e[1]['scale'] = 160 +train_pipeline_1e[3]['magnitude_level'] *= 0.3 +_base_.train_dataloader.dataset.pipeline = train_pipeline_1e + +import copy # noqa: E402 + +# modify 137 epoch's RandomResizedCrop.scale to 192 +# and RA.magnitude_level * 0.7 +train_pipeline_37e = copy.deepcopy(base_train_pipeline) +train_pipeline_37e[1]['scale'] = 192 +train_pipeline_37e[3]['magnitude_level'] *= 0.7 + +# modify 112 epoch's RandomResizedCrop.scale to 224 +# and RA.magnitude_level * 1.0 +train_pipeline_112e = copy.deepcopy(base_train_pipeline) +train_pipeline_112e[1]['scale'] = 224 +train_pipeline_112e[3]['magnitude_level'] *= 1.0 + +custom_hooks = [ + dict( + type='SwitchRecipeHook', + schedule=[ + dict(action_epoch=37, pipeline=train_pipeline_37e), + dict(action_epoch=112, pipeline=train_pipeline_112e), + ]), + dict( + type='EMAHook', + momentum=5e-4, + priority='ABOVE_NORMAL', + update_buffers=True) +] diff --git a/configs/mobileone/mobileone-s4_8xb32_in1k.py b/configs/mobileone/mobileone-s4_8xb32_in1k.py new file mode 100644 index 0000000..49356f0 --- /dev/null +++ b/configs/mobileone/mobileone-s4_8xb32_in1k.py @@ -0,0 +1,63 @@ +_base_ = [ + '../_base_/models/mobileone/mobileone_s4.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256_coslr_coswd_300e.py', + '../_base_/default_runtime.py' +] + +# schedule settings +optim_wrapper = dict(paramwise_cfg=dict(norm_decay_mult=0.)) + +val_dataloader = dict(batch_size=256) +test_dataloader = dict(batch_size=256) + +bgr_mean = _base_.data_preprocessor['mean'][::-1] +base_train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=224, backend='pillow'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=7, + magnitude_std=0.5, + hparams=dict(pad_val=[round(x) for x in bgr_mean])), + dict(type='PackInputs') +] + +import copy # noqa: E402 + +# modify start epoch RandomResizedCrop.scale to 160 +# and RA.magnitude_level * 0.3 +train_pipeline_1e = copy.deepcopy(base_train_pipeline) +train_pipeline_1e[1]['scale'] = 160 +train_pipeline_1e[3]['magnitude_level'] *= 0.3 +_base_.train_dataloader.dataset.pipeline = train_pipeline_1e + +# modify 137 epoch's RandomResizedCrop.scale to 192 +# and RA.magnitude_level * 0.7 +train_pipeline_37e = copy.deepcopy(base_train_pipeline) +train_pipeline_37e[1]['scale'] = 192 +train_pipeline_37e[3]['magnitude_level'] *= 0.7 + +# modify 112 epoch's RandomResizedCrop.scale to 224 +# and RA.magnitude_level * 1.0 +train_pipeline_112e = copy.deepcopy(base_train_pipeline) +train_pipeline_112e[1]['scale'] = 224 +train_pipeline_112e[3]['magnitude_level'] *= 1.0 + +custom_hooks = [ + dict( + type='SwitchRecipeHook', + schedule=[ + dict(action_epoch=37, pipeline=train_pipeline_37e), + dict(action_epoch=112, pipeline=train_pipeline_112e), + ]), + dict( + type='EMAHook', + momentum=5e-4, + priority='ABOVE_NORMAL', + update_buffers=True) +] diff --git a/configs/mobilevit/README.md b/configs/mobilevit/README.md new file mode 100644 index 0000000..fa0960d --- /dev/null +++ b/configs/mobilevit/README.md @@ -0,0 +1,96 @@ +# MobileViT + +> [MobileViT Light-weight, General-purpose, and Mobile-friendly Vision Transformer](https://arxiv.org/abs/2110.02178) + + + +## Introduction + +**MobileViT** aims at introducing a light-weight network, which takes the advantages of both ViTs and CNNs, uses the `InvertedResidual` blocks in [MobileNetV2](../mobilenet_v2/README.md) and `MobileViTBlock` which refers to [ViT](../vision_transformer/README.md) transformer blocks to build a standard 5-stage model structure. + +The MobileViTBlock reckons transformers as convolutions to perform a global representation, meanwhile conbined with original convolution layers for local representation to build a block with global receptive field. This is different from ViT, which adds an extra class token and position embeddings for learning relative relationship. Without any position embeddings, MobileViT can benfit from multi-scale inputs during training. + +Also, this paper puts forward a strategy for multi-scale training to dynamically adjust batch size based on the image size to both improve training efficiency and final performance. + +It is also proven effective in downstream tasks such as object detection and segmentation. + +
+ +
+ +## Abstract + +
+ +Show the paper's abstract + +
+ +Light-weight convolutional neural networks (CNNs) are the de-facto for mobile vision tasks. Their spatial inductive biases allow them to learn representations with fewer parameters across different vision tasks. However, these networks are spatially local. To learn global representations, self-attention-based vision trans-formers (ViTs) have been adopted. Unlike CNNs, ViTs are heavy-weight. In this paper, we ask the following question: is it possible to combine the strengths of CNNs and ViTs to build a light-weight and low latency network for mobile vision tasks? Towards this end, we introduce MobileViT, a light-weight and general-purpose vision transformer for mobile devices. MobileViT presents a different perspective for the global processing of information with transformers, i.e., transformers as convolutions. Our results show that MobileViT significantly outperforms CNN- and ViT-based networks across different tasks and datasets. On the ImageNet-1k dataset, MobileViT achieves top-1 accuracy of 78.4% with about 6 million parameters, which is 3.2% and 6.2% more accurate than MobileNetv3 (CNN-based) and DeIT (ViT-based) for a similar number of parameters. On the MS-COCO object detection task, MobileViT is 5.7% more accurate than MobileNetv3 for a similar number of parameters. +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('mobilevit-small_3rdparty_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('mobilevit-small_3rdparty_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/mobilevit/mobilevit-small_8xb128_in1k.py https://download.openmmlab.com/mmclassification/v0/mobilevit/mobilevit-small_3rdparty_in1k_20221018-cb4f741c.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :---------------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :----------------------------------------: | :------------------------------------------------------------------------: | +| `mobilevit-small_3rdparty_in1k`\* | From scratch | 5.58 | 2.03 | 78.25 | 94.09 | [config](mobilevit-small_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mobilevit/mobilevit-small_3rdparty_in1k_20221018-cb4f741c.pth) | +| `mobilevit-xsmall_3rdparty_in1k`\* | From scratch | 2.32 | 1.05 | 74.75 | 92.32 | [config](mobilevit-xsmall_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mobilevit/mobilevit-xsmall_3rdparty_in1k_20221018-be39a6e7.pth) | +| `mobilevit-xxsmall_3rdparty_in1k`\* | From scratch | 1.27 | 0.42 | 69.02 | 88.91 | [config](mobilevit-xxsmall_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mobilevit/mobilevit-xxsmall_3rdparty_in1k_20221018-77835605.pth) | + +*Models with * are converted from the [official repo](https://github.com/apple/ml-cvnets). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@article{mehta2021mobilevit, + title={MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer}, + author={Mehta, Sachin and Rastegari, Mohammad}, + journal={arXiv preprint arXiv:2110.02178}, + year={2021} +} +``` diff --git a/configs/mobilevit/metafile.yml b/configs/mobilevit/metafile.yml new file mode 100644 index 0000000..15fd84a --- /dev/null +++ b/configs/mobilevit/metafile.yml @@ -0,0 +1,60 @@ +Collections: + - Name: MobileViT + Metadata: + Training Data: ImageNet-1k + Architecture: + - MobileViT Block + Paper: + URL: https://arxiv.org/abs/2110.02178 + Title: MobileViT Light-weight, General-purpose, and Mobile-friendly Vision Transformer + README: configs/mobilevit/README.md + +Models: + - Name: mobilevit-small_3rdparty_in1k + Metadata: + FLOPs: 2030000000 + Parameters: 5580000 + In Collection: MobileViT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.25 + Top 5 Accuracy: 94.09 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/mobilevit/mobilevit-small_3rdparty_in1k_20221018-cb4f741c.pth + Config: configs/mobilevit/mobilevit-small_8xb128_in1k.py + Converted From: + Weights: https://docs-assets.developer.apple.com/ml-research/models/cvnets/classification/mobilevit_s.pt + Code: https://github.com/apple/ml-cvnets + - Name: mobilevit-xsmall_3rdparty_in1k + Metadata: + FLOPs: 1050000000 + Parameters: 2320000 + In Collection: MobileViT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 74.75 + Top 5 Accuracy: 92.32 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/mobilevit/mobilevit-xsmall_3rdparty_in1k_20221018-be39a6e7.pth + Config: configs/mobilevit/mobilevit-xsmall_8xb128_in1k.py + Converted From: + Weights: https://docs-assets.developer.apple.com/ml-research/models/cvnets/classification/mobilevit_xs.pt + Code: https://github.com/apple/ml-cvnets + - Name: mobilevit-xxsmall_3rdparty_in1k + Metadata: + FLOPs: 420000000 + Parameters: 1270000 + In Collection: MobileViT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 69.02 + Top 5 Accuracy: 88.91 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/mobilevit/mobilevit-xxsmall_3rdparty_in1k_20221018-77835605.pth + Config: configs/mobilevit/mobilevit-xxsmall_8xb128_in1k.py + Converted From: + Weights: https://docs-assets.developer.apple.com/ml-research/models/cvnets/classification/mobilevit_xxs.pt + Code: https://github.com/apple/ml-cvnets diff --git a/configs/mobilevit/mobilevit-small_8xb128_in1k.py b/configs/mobilevit/mobilevit-small_8xb128_in1k.py new file mode 100644 index 0000000..5969396 --- /dev/null +++ b/configs/mobilevit/mobilevit-small_8xb128_in1k.py @@ -0,0 +1,30 @@ +_base_ = [ + '../_base_/models/mobilevit/mobilevit_s.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/default_runtime.py', + '../_base_/schedules/imagenet_bs256.py', +] + +# no normalize for original implements +data_preprocessor = dict( + # RGB format normalization parameters + mean=[0, 0, 0], + std=[255, 255, 255], + # use bgr directly + to_rgb=False, +) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='ResizeEdge', scale=288, edge='short'), + dict(type='CenterCrop', crop_size=256), + dict(type='PackInputs'), +] + +train_dataloader = dict(batch_size=128) + +val_dataloader = dict( + batch_size=128, + dataset=dict(pipeline=test_pipeline), +) +test_dataloader = val_dataloader diff --git a/configs/mobilevit/mobilevit-xsmall_8xb128_in1k.py b/configs/mobilevit/mobilevit-xsmall_8xb128_in1k.py new file mode 100644 index 0000000..557892b --- /dev/null +++ b/configs/mobilevit/mobilevit-xsmall_8xb128_in1k.py @@ -0,0 +1,30 @@ +_base_ = [ + '../_base_/models/mobilevit/mobilevit_xs.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/default_runtime.py', + '../_base_/schedules/imagenet_bs256.py', +] + +# no normalize for original implements +data_preprocessor = dict( + # RGB format normalization parameters + mean=[0, 0, 0], + std=[255, 255, 255], + # use bgr directly + to_rgb=False, +) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='ResizeEdge', scale=288, edge='short'), + dict(type='CenterCrop', crop_size=256), + dict(type='PackInputs'), +] + +train_dataloader = dict(batch_size=128) + +val_dataloader = dict( + batch_size=128, + dataset=dict(pipeline=test_pipeline), +) +test_dataloader = val_dataloader diff --git a/configs/mobilevit/mobilevit-xxsmall_8xb128_in1k.py b/configs/mobilevit/mobilevit-xxsmall_8xb128_in1k.py new file mode 100644 index 0000000..74aea82 --- /dev/null +++ b/configs/mobilevit/mobilevit-xxsmall_8xb128_in1k.py @@ -0,0 +1,30 @@ +_base_ = [ + '../_base_/models/mobilevit/mobilevit_xxs.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/default_runtime.py', + '../_base_/schedules/imagenet_bs256.py', +] + +# no normalize for original implements +data_preprocessor = dict( + # RGB format normalization parameters + mean=[0, 0, 0], + std=[255, 255, 255], + # use bgr directly + to_rgb=False, +) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='ResizeEdge', scale=288, edge='short'), + dict(type='CenterCrop', crop_size=256), + dict(type='PackInputs'), +] + +train_dataloader = dict(batch_size=128) + +val_dataloader = dict( + batch_size=128, + dataset=dict(pipeline=test_pipeline), +) +test_dataloader = val_dataloader diff --git a/configs/mocov2/README.md b/configs/mocov2/README.md new file mode 100644 index 0000000..cb0ae4e --- /dev/null +++ b/configs/mocov2/README.md @@ -0,0 +1,85 @@ +# MoCoV2 + +> [Improved Baselines with Momentum Contrastive Learning](https://arxiv.org/abs/2003.04297) + + + +## Abstract + +Contrastive unsupervised learning has recently shown encouraging progress, e.g., in Momentum Contrast (MoCo) and SimCLR. In this note, we verify the effectiveness of two of SimCLR’s design improvements by implementing them in the MoCo framework. With simple modifications to MoCo—namely, using an MLP projection head and more data augmentation—we establish stronger baselines that outperform SimCLR and do not require large training batches. We hope this will make state-of-the-art unsupervised learning research more accessible. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('resnet50_mocov2-pre_8xb32-linear-steplr-100e_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('mocov2_resnet50_8xb32-coslr-200e_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/mocov2/mocov2_resnet50_8xb32-coslr-200e_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/mocov2/benchmarks/resnet50_8xb32-linear-steplr-100e_in1k.py https://download.openmmlab.com/mmselfsup/1.x/mocov2/mocov2_resnet50_8xb32-coslr-200e_in1k/resnet50_linear-8xb32-steplr-100e_in1k/resnet50_linear-8xb32-steplr-100e_in1k_20220825-994c4128.pth +``` + + + +## Models and results + +### Pretrained models + +| Model | Params (M) | Flops (G) | Config | Download | +| :-------------------------------------- | :--------: | :-------: | :------------------------------------------------: | :------------------------------------------------------------------------------------------: | +| `mocov2_resnet50_8xb32-coslr-200e_in1k` | 55.93 | 4.11 | [config](mocov2_resnet50_8xb32-coslr-200e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/mocov2/mocov2_resnet50_8xb32-coslr-200e_in1k/mocov2_resnet50_8xb32-coslr-200e_in1k_20220825-b6d23c86.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/mocov2/mocov2_resnet50_8xb32-coslr-200e_in1k/mocov2_resnet50_8xb32-coslr-200e_in1k_20220825-b6d23c86.json) | + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Config | Download | +| :---------------------------------------- | :------------------------------------------: | :--------: | :-------: | :-------: | :----------------------------------------: | :-------------------------------------------: | +| `resnet50_mocov2-pre_8xb32-linear-steplr-100e_in1k` | [MOCOV2](https://download.openmmlab.com/mmselfsup/1.x/mocov2/mocov2_resnet50_8xb32-coslr-200e_in1k/mocov2_resnet50_8xb32-coslr-200e_in1k_20220825-b6d23c86.pth) | 25.56 | 4.11 | 67.50 | [config](benchmarks/resnet50_8xb32-linear-steplr-100e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/mocov2/mocov2_resnet50_8xb32-coslr-200e_in1k/resnet50_linear-8xb32-steplr-100e_in1k/resnet50_linear-8xb32-steplr-100e_in1k_20220825-994c4128.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/mocov2/mocov2_resnet50_8xb32-coslr-200e_in1k/resnet50_linear-8xb32-steplr-100e_in1k/resnet50_linear-8xb32-steplr-100e_in1k_20220825-994c4128.json) | + +## Citation + +```bibtex +@article{chen2020improved, + title={Improved baselines with momentum contrastive learning}, + author={Chen, Xinlei and Fan, Haoqi and Girshick, Ross and He, Kaiming}, + journal={arXiv preprint arXiv:2003.04297}, + year={2020} +} +``` diff --git a/configs/mocov2/benchmarks/resnet50_8xb32-linear-steplr-100e_in1k.py b/configs/mocov2/benchmarks/resnet50_8xb32-linear-steplr-100e_in1k.py new file mode 100644 index 0000000..37795d9 --- /dev/null +++ b/configs/mocov2/benchmarks/resnet50_8xb32-linear-steplr-100e_in1k.py @@ -0,0 +1,20 @@ +_base_ = [ + '../../_base_/models/resnet50.py', + '../../_base_/datasets/imagenet_bs32_pil_resize.py', + '../../_base_/schedules/imagenet_sgd_steplr_100e.py', + '../../_base_/default_runtime.py', +] + +model = dict( + backbone=dict( + frozen_stages=4, + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.'))) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=30., momentum=0.9, weight_decay=0.)) + +# runtime settings +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3)) diff --git a/configs/mocov2/metafile.yml b/configs/mocov2/metafile.yml new file mode 100644 index 0000000..4440db4 --- /dev/null +++ b/configs/mocov2/metafile.yml @@ -0,0 +1,45 @@ +Collections: + - Name: MoCoV2 + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ResNet + - MoCo + Paper: + Title: Improved Baselines with Momentum Contrastive Learning + URL: https://arxiv.org/abs/2003.04297 + README: configs/mocov2/README.md + +Models: + - Name: mocov2_resnet50_8xb32-coslr-200e_in1k + Metadata: + Epochs: 200 + Batch Size: 256 + FLOPs: 4109364224 + Parameters: 55933312 + Training Data: ImageNet-1k + In Collection: MoCoV2 + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/mocov2/mocov2_resnet50_8xb32-coslr-200e_in1k/mocov2_resnet50_8xb32-coslr-200e_in1k_20220825-b6d23c86.pth + Config: configs/mocov2/mocov2_resnet50_8xb32-coslr-200e_in1k.py + Downstream: + - resnet50_mocov2-pre_8xb32-linear-steplr-100e_in1k + - Name: resnet50_mocov2-pre_8xb32-linear-steplr-100e_in1k + Metadata: + Epochs: 100 + Batch Size: 256 + FLOPs: 4109464576 + Parameters: 25557032 + Training Data: ImageNet-1k + In Collection: MoCoV2 + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 67.5 + Weights: https://download.openmmlab.com/mmselfsup/1.x/mocov2/mocov2_resnet50_8xb32-coslr-200e_in1k/resnet50_linear-8xb32-steplr-100e_in1k/resnet50_linear-8xb32-steplr-100e_in1k_20220825-994c4128.pth + Config: configs/mocov2/benchmarks/resnet50_8xb32-linear-steplr-100e_in1k.py diff --git a/configs/mocov2/mocov2_resnet50_8xb32-coslr-200e_in1k.py b/configs/mocov2/mocov2_resnet50_8xb32-coslr-200e_in1k.py new file mode 100644 index 0000000..8037d07 --- /dev/null +++ b/configs/mocov2/mocov2_resnet50_8xb32-coslr-200e_in1k.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs32_mocov2.py', + '../_base_/schedules/imagenet_sgd_coslr_200e.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='MoCo', + queue_len=65536, + feat_dim=128, + momentum=0.001, + backbone=dict( + type='ResNet', + depth=50, + norm_cfg=dict(type='BN'), + zero_init_residual=False), + neck=dict( + type='MoCoV2Neck', + in_channels=2048, + hid_channels=2048, + out_channels=128, + with_avg_pool=True), + head=dict( + type='ContrastiveHead', + loss=dict(type='CrossEntropyLoss'), + temperature=0.2)) + +# only keeps the latest 3 checkpoints +default_hooks = dict(checkpoint=dict(max_keep_ckpts=3)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=256) diff --git a/configs/mocov3/README.md b/configs/mocov3/README.md new file mode 100644 index 0000000..a9477e8 --- /dev/null +++ b/configs/mocov3/README.md @@ -0,0 +1,96 @@ +# MoCoV3 + +> [An Empirical Study of Training Self-Supervised Vision Transformers](https://arxiv.org/abs/2104.02057) + + + +## Abstract + +This paper does not describe a novel method. Instead, it studies a straightforward, incremental, yet must-know baseline given the recent progress in computer vision: self-supervised learning for Vision Transformers (ViT). While the training recipes for standard convolutional networks have been highly mature and robust, the recipes for ViT are yet to be built, especially in the self-supervised scenarios where training becomes more challenging. In this work, we go back to basics and investigate the effects of several fundamental components for training self-supervised ViT. We observe that instability is a major issue that degrades accuracy, and it can be hidden by apparently good results. We reveal that these results are indeed partial failure, and they can be improved when training is made more stable. We benchmark ViT results in MoCo v3 and several other self-supervised frameworks, with ablations in various aspects. We discuss the currently positive evidence as well as challenges and open questions. We hope that this work will provide useful data points and experience for future research. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('resnet50_mocov3-100e-pre_8xb128-linear-coslr-90e_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('mocov3_resnet50_8xb512-amp-coslr-100e_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/mocov3/mocov3_resnet50_8xb512-amp-coslr-100e_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/mocov3/benchmarks/resnet50_8xb128-linear-coslr-90e_in1k.py https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_resnet50_8xb512-amp-coslr-100e_in1k/resnet50_linear-8xb128-coslr-90e_in1k/resnet50_linear-8xb128-coslr-90e_in1k_20220927-8f7d937e.pth +``` + + + +## Models and results + +### Pretrained models + +| Model | Params (M) | Flops (G) | Config | Download | +| :------------------------------------------------- | :--------: | :-------: | :-----------------------------------------------------------: | :--------------------------------------------------------------------: | +| `mocov3_resnet50_8xb512-amp-coslr-100e_in1k` | 68.01 | 4.11 | [config](mocov3_resnet50_8xb512-amp-coslr-100e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_resnet50_8xb512-amp-coslr-100e_in1k/mocov3_resnet50_8xb512-amp-coslr-100e_in1k_20220927-f1144efa.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_resnet50_8xb512-amp-coslr-100e_in1k/mocov3_resnet50_8xb512-amp-coslr-100e_in1k_20220927-f1144efa.json) | +| `mocov3_resnet50_8xb512-amp-coslr-300e_in1k` | 68.01 | 4.11 | [config](mocov3_resnet50_8xb512-amp-coslr-300e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_resnet50_8xb512-amp-coslr-300e_in1k/mocov3_resnet50_8xb512-amp-coslr-300e_in1k_20220927-1e4f3304.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_resnet50_8xb512-amp-coslr-300e_in1k/mocov3_resnet50_8xb512-amp-coslr-300e_in1k_20220927-1e4f3304.json) | +| `mocov3_resnet50_8xb512-amp-coslr-800e_in1k` | 68.01 | 4.11 | [config](mocov3_resnet50_8xb512-amp-coslr-800e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_resnet50_8xb512-amp-coslr-800e_in1k/mocov3_resnet50_8xb512-amp-coslr-800e_in1k_20220927-e043f51a.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_resnet50_8xb512-amp-coslr-800e_in1k/mocov3_resnet50_8xb512-amp-coslr-800e_in1k_20220927-e043f51a.json) | +| `mocov3_vit-small-p16_16xb256-amp-coslr-300e_in1k` | 84.27 | 4.61 | [config](mocov3_vit-small-p16_16xb256-amp-coslr-300e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_vit-small-p16_16xb256-amp-coslr-300e_in1k/mocov3_vit-small-p16_16xb256-amp-coslr-300e_in1k-224_20220826-08bc52f7.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_vit-small-p16_16xb256-amp-coslr-300e_in1k/mocov3_vit-small-p16_16xb256-amp-coslr-300e_in1k-224_20220826-08bc52f7.json) | +| `mocov3_vit-base-p16_16xb256-amp-coslr-300e_in1k` | 215.68 | 17.58 | [config](mocov3_vit-base-p16_16xb256-amp-coslr-300e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_vit-base-p16_16xb256-amp-coslr-300e_in1k/mocov3_vit-base-p16_16xb256-amp-coslr-300e_in1k-224_20220826-25213343.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_vit-base-p16_16xb256-amp-coslr-300e_in1k/mocov3_vit-base-p16_16xb256-amp-coslr-300e_in1k-224_20220826-25213343.json) | +| `mocov3_vit-large-p16_64xb64-amp-coslr-300e_in1k` | 652.78 | 61.60 | [config](mocov3_vit-large-p16_64xb64-amp-coslr-300e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_vit-large-p16_64xb64-amp-coslr-300e_in1k/mocov3_vit-large-p16_64xb64-amp-coslr-300e_in1k-224_20220829-9b88a442.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_vit-large-p16_64xb64-amp-coslr-300e_in1k/mocov3_vit-large-p16_64xb64-amp-coslr-300e_in1k-224_20220829-9b88a442.json) | + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Config | Download | +| :---------------------------------------- | :------------------------------------------: | :--------: | :-------: | :-------: | :----------------------------------------: | :-------------------------------------------: | +| `resnet50_mocov3-100e-pre_8xb128-linear-coslr-90e_in1k` | [MOCOV3 100-Epochs](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_resnet50_8xb512-amp-coslr-100e_in1k/mocov3_resnet50_8xb512-amp-coslr-100e_in1k_20220927-f1144efa.pth) | 25.56 | 4.11 | 69.60 | [config](benchmarks/resnet50_8xb128-linear-coslr-90e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_resnet50_8xb512-amp-coslr-100e_in1k/resnet50_linear-8xb128-coslr-90e_in1k/resnet50_linear-8xb128-coslr-90e_in1k_20220927-8f7d937e.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_resnet50_8xb512-amp-coslr-100e_in1k/resnet50_linear-8xb128-coslr-90e_in1k/resnet50_linear-8xb128-coslr-90e_in1k_20220927-8f7d937e.json) | +| `resnet50_mocov3-300e-pre_8xb128-linear-coslr-90e_in1k` | [MOCOV3 300-Epochs](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_resnet50_8xb512-amp-coslr-300e_in1k/mocov3_resnet50_8xb512-amp-coslr-300e_in1k_20220927-1e4f3304.pth) | 25.56 | 4.11 | 72.80 | [config](benchmarks/resnet50_8xb128-linear-coslr-90e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_resnet50_8xb512-amp-coslr-300e_in1k/resnet50_linear-8xb128-coslr-90e_in1k/resnet50_linear-8xb128-coslr-90e_in1k_20220927-d21ddac2.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_resnet50_8xb512-amp-coslr-300e_in1k/resnet50_linear-8xb128-coslr-90e_in1k/resnet50_linear-8xb128-coslr-90e_in1k_20220927-d21ddac2.json) | +| `resnet50_mocov3-800e-pre_8xb128-linear-coslr-90e_in1k` | [MOCOV3 800-Epochs](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_resnet50_8xb512-amp-coslr-800e_in1k/mocov3_resnet50_8xb512-amp-coslr-800e_in1k_20220927-e043f51a.pth) | 25.56 | 4.11 | 74.40 | [config](benchmarks/resnet50_8xb128-linear-coslr-90e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_resnet50_8xb512-amp-coslr-800e_in1k/resnet50_linear-8xb128-coslr-90e_in1k/resnet50_linear-8xb128-coslr-90e_in1k_20220927-0e97a483.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_resnet50_8xb512-amp-coslr-800e_in1k/resnet50_linear-8xb128-coslr-90e_in1k/resnet50_linear-8xb128-coslr-90e_in1k_20220927-0e97a483.json) | +| `vit-small-p16_mocov3-pre_8xb128-linear-coslr-90e_in1k` | [MOCOV3](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_vit-small-p16_16xb256-amp-coslr-300e_in1k/mocov3_vit-small-p16_16xb256-amp-coslr-300e_in1k-224_20220826-08bc52f7.pth) | 22.05 | 4.61 | 73.60 | [config](benchmarks/vit-small-p16_8xb128-linear-coslr-90e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_vit-small-p16_16xb256-amp-coslr-300e_in1k/vit-small-p16_linear-8xb128-coslr-90e_in1k/vit-small-p16_linear-8xb128-coslr-90e_in1k_20220826-376674ef.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_vit-small-p16_16xb256-amp-coslr-300e_in1k/vit-small-p16_linear-8xb128-coslr-90e_in1k/vit-small-p16_linear-8xb128-coslr-90e_in1k_20220826-376674ef.json) | +| `vit-base-p16_mocov3-pre_8xb64-coslr-150e_in1k` | [MOCOV3](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_vit-base-p16_16xb256-amp-coslr-300e_in1k/mocov3_vit-base-p16_16xb256-amp-coslr-300e_in1k-224_20220826-25213343.pth) | 86.57 | 17.58 | 83.00 | [config](benchmarks/vit-base-p16_8xb64-coslr-150e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_vit-base-p16_16xb256-amp-coslr-300e_in1k/vit-base-p16_ft-8xb64-coslr-150e_in1k/vit-base-p16_ft-8xb64-coslr-150e_in1k_20220826-f1e6c442.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_vit-base-p16_16xb256-amp-coslr-300e_in1k/vit-base-p16_ft-8xb64-coslr-150e_in1k/vit-base-p16_ft-8xb64-coslr-150e_in1k_20220826-f1e6c442.json) | +| `vit-base-p16_mocov3-pre_8xb128-linear-coslr-90e_in1k` | [MOCOV3](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_vit-base-p16_16xb256-amp-coslr-300e_in1k/mocov3_vit-base-p16_16xb256-amp-coslr-300e_in1k-224_20220826-25213343.pth) | 86.57 | 17.58 | 76.90 | [config](benchmarks/vit-base-p16_8xb128-linear-coslr-90e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_vit-base-p16_16xb256-amp-coslr-300e_in1k/vit-base-p16_linear-8xb128-coslr-90e_in1k/vit-base-p16_linear-8xb128-coslr-90e_in1k_20220826-83be7758.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_vit-base-p16_16xb256-amp-coslr-300e_in1k/vit-base-p16_linear-8xb128-coslr-90e_in1k/vit-base-p16_linear-8xb128-coslr-90e_in1k_20220826-83be7758.json) | +| `vit-large-p16_mocov3-pre_8xb64-coslr-100e_in1k` | [MOCOV3](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_vit-large-p16_64xb64-amp-coslr-300e_in1k/mocov3_vit-large-p16_64xb64-amp-coslr-300e_in1k-224_20220829-9b88a442.pth) | 304.33 | 61.60 | 83.70 | [config](benchmarks/vit-large-p16_8xb64-coslr-100e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_vit-large-p16_64xb64-amp-coslr-300e_in1k/vit-large-p16_ft-8xb64-coslr-100e_in1k/vit-large-p16_ft-8xb64-coslr-100e_in1k_20220829-878a2f7f.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_vit-large-p16_64xb64-amp-coslr-300e_in1k/vit-large-p16_ft-8xb64-coslr-100e_in1k/vit-large-p16_ft-8xb64-coslr-100e_in1k_20220829-878a2f7f.json) | + +## Citation + +```bibtex +@InProceedings{Chen_2021_ICCV, + title = {An Empirical Study of Training Self-Supervised Vision Transformers}, + author = {Chen, Xinlei and Xie, Saining and He, Kaiming}, + booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)}, + year = {2021} +} +``` diff --git a/configs/mocov3/benchmarks/resnet50_8xb128-linear-coslr-90e_in1k.py b/configs/mocov3/benchmarks/resnet50_8xb128-linear-coslr-90e_in1k.py new file mode 100644 index 0000000..4d0b202 --- /dev/null +++ b/configs/mocov3/benchmarks/resnet50_8xb128-linear-coslr-90e_in1k.py @@ -0,0 +1,31 @@ +_base_ = [ + '../../_base_/models/resnet50.py', + '../../_base_/datasets/imagenet_bs32_pil_resize.py', + '../../_base_/schedules/imagenet_sgd_coslr_100e.py', + '../../_base_/default_runtime.py', +] + +# dataset settings +train_dataloader = dict(batch_size=128) + +model = dict( + backbone=dict( + frozen_stages=4, + norm_eval=True, + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.'))) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.4, momentum=0.9, weight_decay=0.)) + +# learning rate scheduler +param_scheduler = [ + dict(type='CosineAnnealingLR', T_max=90, by_epoch=True, begin=0, end=90) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=90) + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3)) diff --git a/configs/mocov3/benchmarks/vit-base-p16_8xb128-linear-coslr-90e_in1k.py b/configs/mocov3/benchmarks/vit-base-p16_8xb128-linear-coslr-90e_in1k.py new file mode 100644 index 0000000..91509fc --- /dev/null +++ b/configs/mocov3/benchmarks/vit-base-p16_8xb128-linear-coslr-90e_in1k.py @@ -0,0 +1,45 @@ +_base_ = [ + '../../_base_/datasets/imagenet_bs32_pil_resize.py', + '../../_base_/default_runtime.py', +] + +# dataset settings +train_dataloader = dict(batch_size=128) + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='MoCoV3ViT', + arch='base', # embed_dim = 768 + img_size=224, + patch_size=16, + stop_grad_conv1=True, + frozen_stages=12, + norm_eval=True, + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')), + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + init_cfg=dict(type='Normal', std=0.01, layer='Linear'), + )) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=12, momentum=0.9, weight_decay=0.)) + +# learning rate scheduler +param_scheduler = [ + dict(type='CosineAnnealingLR', T_max=90, by_epoch=True, begin=0, end=90) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=90) +val_cfg = dict() +test_cfg = dict() + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3)) diff --git a/configs/mocov3/benchmarks/vit-base-p16_8xb64-coslr-150e_in1k.py b/configs/mocov3/benchmarks/vit-base-p16_8xb64-coslr-150e_in1k.py new file mode 100644 index 0000000..f3d074f --- /dev/null +++ b/configs/mocov3/benchmarks/vit-base-p16_8xb64-coslr-150e_in1k.py @@ -0,0 +1,74 @@ +_base_ = [ + '../../_base_/datasets/imagenet_bs64_swin_224.py', + '../../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='base', + img_size=224, + patch_size=16, + drop_path_rate=0.1, + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ]), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict( + type='AdamW', lr=5e-4, eps=1e-8, betas=(0.9, 0.999), + weight_decay=0.05), + clip_grad=dict(max_norm=5.0), + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-3, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=145, + eta_min=1e-5, + by_epoch=True, + begin=5, + end=150, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=150) +val_cfg = dict() +test_cfg = dict() + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3)) +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] + +randomness = dict(seed=0) diff --git a/configs/mocov3/benchmarks/vit-large-p16_8xb64-coslr-100e_in1k.py b/configs/mocov3/benchmarks/vit-large-p16_8xb64-coslr-100e_in1k.py new file mode 100644 index 0000000..46d7f48 --- /dev/null +++ b/configs/mocov3/benchmarks/vit-large-p16_8xb64-coslr-100e_in1k.py @@ -0,0 +1,74 @@ +_base_ = [ + '../../_base_/datasets/imagenet_bs64_swin_224.py', + '../../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='large', + img_size=224, + patch_size=16, + drop_path_rate=0.5, + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=1024, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ]), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict( + type='AdamW', lr=5e-4, eps=1e-8, betas=(0.9, 0.999), + weight_decay=0.05), + clip_grad=dict(max_norm=5.0), + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-3, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=95, + eta_min=1e-5, + by_epoch=True, + begin=5, + end=100, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=100) +val_cfg = dict() +test_cfg = dict() + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3)) +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] + +randomness = dict(seed=0) diff --git a/configs/mocov3/benchmarks/vit-small-p16_8xb128-linear-coslr-90e_in1k.py b/configs/mocov3/benchmarks/vit-small-p16_8xb128-linear-coslr-90e_in1k.py new file mode 100644 index 0000000..0c1ffa1 --- /dev/null +++ b/configs/mocov3/benchmarks/vit-small-p16_8xb128-linear-coslr-90e_in1k.py @@ -0,0 +1,45 @@ +_base_ = [ + '../../_base_/datasets/imagenet_bs32_pil_resize.py', + '../../_base_/default_runtime.py', +] + +# dataset settings +train_dataloader = dict(batch_size=128) + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='MoCoV3ViT', + arch='mocov3-small', # embed_dim = 384 + img_size=224, + patch_size=16, + stop_grad_conv1=True, + frozen_stages=12, + norm_eval=True, + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')), + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=384, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + init_cfg=dict(type='Normal', std=0.01, layer='Linear'), + )) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=12, momentum=0.9, weight_decay=0.)) + +# learning rate scheduler +param_scheduler = [ + dict(type='CosineAnnealingLR', T_max=90, by_epoch=True, begin=0, end=90) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=90) +val_cfg = dict() +test_cfg = dict() + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3)) diff --git a/configs/mocov3/metafile.yml b/configs/mocov3/metafile.yml new file mode 100644 index 0000000..649d9f4 --- /dev/null +++ b/configs/mocov3/metafile.yml @@ -0,0 +1,201 @@ +Collections: + - Name: MoCoV3 + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - LARS + Training Resources: 32x V100 GPUs + Architecture: + - ResNet + - ViT + - MoCo + Paper: + Title: An Empirical Study of Training Self-Supervised Vision Transformers + URL: https://arxiv.org/abs/2104.02057 + README: configs/mocov3/README.md + +Models: + - Name: mocov3_resnet50_8xb512-amp-coslr-100e_in1k + Metadata: + Epochs: 100 + Batch Size: 4096 + FLOPs: 4109364224 + Parameters: 68012160 + Training Data: ImageNet-1k + In Collection: MoCoV3 + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_resnet50_8xb512-amp-coslr-100e_in1k/mocov3_resnet50_8xb512-amp-coslr-100e_in1k_20220927-f1144efa.pth + Config: configs/mocov3/mocov3_resnet50_8xb512-amp-coslr-100e_in1k.py + Downstream: + - resnet50_mocov3-100e-pre_8xb128-linear-coslr-90e_in1k + - Name: mocov3_resnet50_8xb512-amp-coslr-300e_in1k + Metadata: + Epochs: 300 + Batch Size: 4096 + FLOPs: 4109364224 + Parameters: 68012160 + Training Data: ImageNet-1k + In Collection: MoCoV3 + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_resnet50_8xb512-amp-coslr-300e_in1k/mocov3_resnet50_8xb512-amp-coslr-300e_in1k_20220927-1e4f3304.pth + Config: configs/mocov3/mocov3_resnet50_8xb512-amp-coslr-300e_in1k.py + Downstream: + - resnet50_mocov3-300e-pre_8xb128-linear-coslr-90e_in1k + - Name: mocov3_resnet50_8xb512-amp-coslr-800e_in1k + Metadata: + Epochs: 800 + Batch Size: 4096 + FLOPs: 4109364224 + Parameters: 68012160 + Training Data: ImageNet-1k + In Collection: MoCoV3 + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_resnet50_8xb512-amp-coslr-800e_in1k/mocov3_resnet50_8xb512-amp-coslr-800e_in1k_20220927-e043f51a.pth + Config: configs/mocov3/mocov3_resnet50_8xb512-amp-coslr-800e_in1k.py + Downstream: + - resnet50_mocov3-800e-pre_8xb128-linear-coslr-90e_in1k + - Name: resnet50_mocov3-100e-pre_8xb128-linear-coslr-90e_in1k + Metadata: + Epochs: 90 + Batch Size: 1024 + FLOPs: 4109464576 + Parameters: 25557032 + Training Data: ImageNet-1k + In Collection: MoCoV3 + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 69.6 + Weights: https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_resnet50_8xb512-amp-coslr-100e_in1k/resnet50_linear-8xb128-coslr-90e_in1k/resnet50_linear-8xb128-coslr-90e_in1k_20220927-8f7d937e.pth + Config: configs/mocov3/benchmarks/resnet50_8xb128-linear-coslr-90e_in1k.py + - Name: resnet50_mocov3-300e-pre_8xb128-linear-coslr-90e_in1k + Metadata: + Epochs: 90 + Batch Size: 1024 + FLOPs: 4109464576 + Parameters: 25557032 + Training Data: ImageNet-1k + In Collection: MoCoV3 + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 72.8 + Weights: https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_resnet50_8xb512-amp-coslr-300e_in1k/resnet50_linear-8xb128-coslr-90e_in1k/resnet50_linear-8xb128-coslr-90e_in1k_20220927-d21ddac2.pth + Config: configs/mocov3/benchmarks/resnet50_8xb128-linear-coslr-90e_in1k.py + - Name: resnet50_mocov3-800e-pre_8xb128-linear-coslr-90e_in1k + Metadata: + Epochs: 90 + Batch Size: 1024 + FLOPs: 4109464576 + Parameters: 25557032 + Training Data: ImageNet-1k + In Collection: MoCoV3 + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 74.4 + Weights: https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_resnet50_8xb512-amp-coslr-800e_in1k/resnet50_linear-8xb128-coslr-90e_in1k/resnet50_linear-8xb128-coslr-90e_in1k_20220927-0e97a483.pth + Config: configs/mocov3/benchmarks/resnet50_8xb128-linear-coslr-90e_in1k.py + - Name: mocov3_vit-small-p16_16xb256-amp-coslr-300e_in1k + Metadata: + Epochs: 300 + Batch Size: 4096 + FLOPs: 4607954304 + Parameters: 84266752 + Training Data: ImageNet-1k + In Collection: MoCoV3 + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_vit-small-p16_16xb256-amp-coslr-300e_in1k/mocov3_vit-small-p16_16xb256-amp-coslr-300e_in1k-224_20220826-08bc52f7.pth + Config: configs/mocov3/mocov3_vit-small-p16_16xb256-amp-coslr-300e_in1k.py + Downstream: + - vit-small-p16_mocov3-pre_8xb128-linear-coslr-90e_in1k + - Name: vit-small-p16_mocov3-pre_8xb128-linear-coslr-90e_in1k + Metadata: + Epochs: 90 + Batch Size: 1024 + FLOPs: 4607954304 + Parameters: 22050664 + Training Data: ImageNet-1k + In Collection: MoCoV3 + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 73.6 + Weights: https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_vit-small-p16_16xb256-amp-coslr-300e_in1k/vit-small-p16_linear-8xb128-coslr-90e_in1k/vit-small-p16_linear-8xb128-coslr-90e_in1k_20220826-376674ef.pth + Config: configs/mocov3/benchmarks/vit-small-p16_8xb128-linear-coslr-90e_in1k.py + - Name: mocov3_vit-base-p16_16xb256-amp-coslr-300e_in1k + Metadata: + Epochs: 300 + Batch Size: 4096 + FLOPs: 17581972224 + Parameters: 215678464 + Training Data: ImageNet-1k + In Collection: MoCoV3 + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_vit-base-p16_16xb256-amp-coslr-300e_in1k/mocov3_vit-base-p16_16xb256-amp-coslr-300e_in1k-224_20220826-25213343.pth + Config: configs/mocov3/mocov3_vit-base-p16_16xb256-amp-coslr-300e_in1k.py + Downstream: + - vit-base-p16_mocov3-pre_8xb128-linear-coslr-90e_in1k + - vit-base-p16_mocov3-pre_8xb64-coslr-150e_in1k + - Name: vit-base-p16_mocov3-pre_8xb64-coslr-150e_in1k + Metadata: + Epochs: 150 + Batch Size: 512 + FLOPs: 17581972224 + Parameters: 86567656 + Training Data: ImageNet-1k + In Collection: MoCoV3 + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.0 + Weights: https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_vit-base-p16_16xb256-amp-coslr-300e_in1k/vit-base-p16_ft-8xb64-coslr-150e_in1k/vit-base-p16_ft-8xb64-coslr-150e_in1k_20220826-f1e6c442.pth + Config: configs/mocov3/benchmarks/vit-base-p16_8xb64-coslr-150e_in1k.py + - Name: vit-base-p16_mocov3-pre_8xb128-linear-coslr-90e_in1k + Metadata: + Epochs: 90 + Batch Size: 1024 + FLOPs: 17581972224 + Parameters: 86567656 + Training Data: ImageNet-1k + In Collection: MoCoV3 + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 76.9 + Weights: https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_vit-base-p16_16xb256-amp-coslr-300e_in1k/vit-base-p16_linear-8xb128-coslr-90e_in1k/vit-base-p16_linear-8xb128-coslr-90e_in1k_20220826-83be7758.pth + Config: configs/mocov3/benchmarks/vit-base-p16_8xb128-linear-coslr-90e_in1k.py + - Name: mocov3_vit-large-p16_64xb64-amp-coslr-300e_in1k + Metadata: + Epochs: 300 + Batch Size: 4096 + FLOPs: 61603111936 + Parameters: 652781568 + Training Data: ImageNet-1k + In Collection: MoCoV3 + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_vit-large-p16_64xb64-amp-coslr-300e_in1k/mocov3_vit-large-p16_64xb64-amp-coslr-300e_in1k-224_20220829-9b88a442.pth + Config: configs/mocov3/mocov3_vit-large-p16_64xb64-amp-coslr-300e_in1k.py + Downstream: + - vit-large-p16_mocov3-pre_8xb64-coslr-100e_in1k + - Name: vit-large-p16_mocov3-pre_8xb64-coslr-100e_in1k + Metadata: + Epochs: 100 + Batch Size: 512 + FLOPs: 61603111936 + Parameters: 304326632 + Training Data: ImageNet-1k + In Collection: MoCoV3 + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.7 + Weights: https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_vit-large-p16_64xb64-amp-coslr-300e_in1k/vit-large-p16_ft-8xb64-coslr-100e_in1k/vit-large-p16_ft-8xb64-coslr-100e_in1k_20220829-878a2f7f.pth + Config: configs/mocov3/benchmarks/vit-large-p16_8xb64-coslr-100e_in1k.py diff --git a/configs/mocov3/mocov3_resnet50_8xb512-amp-coslr-100e_in1k.py b/configs/mocov3/mocov3_resnet50_8xb512-amp-coslr-100e_in1k.py new file mode 100644 index 0000000..e4eabcc --- /dev/null +++ b/configs/mocov3/mocov3_resnet50_8xb512-amp-coslr-100e_in1k.py @@ -0,0 +1,82 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs512_mocov3.py', + '../_base_/default_runtime.py', +] + +# model settings +temperature = 1.0 +model = dict( + type='MoCoV3', + base_momentum=0.01, # 0.01 for 100e and 300e, 0.004 for 1000e + backbone=dict( + type='ResNet', + depth=50, + norm_cfg=dict(type='SyncBN'), + zero_init_residual=False), + neck=dict( + type='NonLinearNeck', + in_channels=2048, + hid_channels=4096, + out_channels=256, + num_layers=2, + with_bias=False, + with_last_bn=True, + with_last_bn_affine=False, + with_last_bias=False, + with_avg_pool=True), + head=dict( + type='MoCoV3Head', + predictor=dict( + type='NonLinearNeck', + in_channels=256, + hid_channels=4096, + out_channels=256, + num_layers=2, + with_bias=False, + with_last_bn=False, + with_last_bn_affine=False, + with_last_bias=False, + with_avg_pool=False), + loss=dict(type='CrossEntropyLoss', loss_weight=2 * temperature), + temperature=temperature)) + +# optimizer +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict(type='LARS', lr=9.6, weight_decay=1e-6, momentum=0.9), + paramwise_cfg=dict( + custom_keys={ + 'bn': dict(decay_mult=0, lars_exclude=True), + 'bias': dict(decay_mult=0, lars_exclude=True), + # bn layer in ResNet block downsample module + 'downsample.1': dict(decay_mult=0, lars_exclude=True), + }), +) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=10, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=90, + by_epoch=True, + begin=10, + end=100, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=100) +# only keeps the latest 3 checkpoints +default_hooks = dict(checkpoint=dict(max_keep_ckpts=3)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/mocov3/mocov3_resnet50_8xb512-amp-coslr-300e_in1k.py b/configs/mocov3/mocov3_resnet50_8xb512-amp-coslr-300e_in1k.py new file mode 100644 index 0000000..cc0e414 --- /dev/null +++ b/configs/mocov3/mocov3_resnet50_8xb512-amp-coslr-300e_in1k.py @@ -0,0 +1,82 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs512_mocov3.py', + '../_base_/default_runtime.py', +] + +# model settings +temperature = 1.0 +model = dict( + type='MoCoV3', + base_momentum=0.01, # 0.01 for 100e and 300e, 0.004 for 1000e + backbone=dict( + type='ResNet', + depth=50, + norm_cfg=dict(type='SyncBN'), + zero_init_residual=False), + neck=dict( + type='NonLinearNeck', + in_channels=2048, + hid_channels=4096, + out_channels=256, + num_layers=2, + with_bias=False, + with_last_bn=True, + with_last_bn_affine=False, + with_last_bias=False, + with_avg_pool=True), + head=dict( + type='MoCoV3Head', + predictor=dict( + type='NonLinearNeck', + in_channels=256, + hid_channels=4096, + out_channels=256, + num_layers=2, + with_bias=False, + with_last_bn=False, + with_last_bn_affine=False, + with_last_bias=False, + with_avg_pool=False), + loss=dict(type='CrossEntropyLoss', loss_weight=2 * temperature), + temperature=temperature)) + +# optimizer +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict(type='LARS', lr=4.8, weight_decay=1e-6, momentum=0.9), + paramwise_cfg=dict( + custom_keys={ + 'bn': dict(decay_mult=0, lars_exclude=True), + 'bias': dict(decay_mult=0, lars_exclude=True), + # bn layer in ResNet block downsample module + 'downsample.1': dict(decay_mult=0, lars_exclude=True), + }), +) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=10, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=290, + by_epoch=True, + begin=10, + end=300, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=300) +# only keeps the latest 3 checkpoints +default_hooks = dict(checkpoint=dict(max_keep_ckpts=3)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/mocov3/mocov3_resnet50_8xb512-amp-coslr-800e_in1k.py b/configs/mocov3/mocov3_resnet50_8xb512-amp-coslr-800e_in1k.py new file mode 100644 index 0000000..87f18e3 --- /dev/null +++ b/configs/mocov3/mocov3_resnet50_8xb512-amp-coslr-800e_in1k.py @@ -0,0 +1,82 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs512_mocov3.py', + '../_base_/default_runtime.py', +] + +# model settings +temperature = 1.0 +model = dict( + type='MoCoV3', + base_momentum=0.004, # 0.01 for 100e and 300e, 0.004 for 800 and 1000e + backbone=dict( + type='ResNet', + depth=50, + norm_cfg=dict(type='SyncBN'), + zero_init_residual=False), + neck=dict( + type='NonLinearNeck', + in_channels=2048, + hid_channels=4096, + out_channels=256, + num_layers=2, + with_bias=False, + with_last_bn=True, + with_last_bn_affine=False, + with_last_bias=False, + with_avg_pool=True), + head=dict( + type='MoCoV3Head', + predictor=dict( + type='NonLinearNeck', + in_channels=256, + hid_channels=4096, + out_channels=256, + num_layers=2, + with_bias=False, + with_last_bn=False, + with_last_bn_affine=False, + with_last_bias=False, + with_avg_pool=False), + loss=dict(type='CrossEntropyLoss', loss_weight=2 * temperature), + temperature=temperature)) + +# optimizer +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict(type='LARS', lr=4.8, weight_decay=1.5e-6, momentum=0.9), + paramwise_cfg=dict( + custom_keys={ + 'bn': dict(decay_mult=0, lars_exclude=True), + 'bias': dict(decay_mult=0, lars_exclude=True), + # bn layer in ResNet block downsample module + 'downsample.1': dict(decay_mult=0, lars_exclude=True), + }), +) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=10, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=790, + by_epoch=True, + begin=10, + end=800, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=800) +# only keeps the latest 3 checkpoints +default_hooks = dict(checkpoint=dict(max_keep_ckpts=3)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/mocov3/mocov3_vit-base-p16_16xb256-amp-coslr-300e_in1k.py b/configs/mocov3/mocov3_vit-base-p16_16xb256-amp-coslr-300e_in1k.py new file mode 100644 index 0000000..6b18fda --- /dev/null +++ b/configs/mocov3/mocov3_vit-base-p16_16xb256-amp-coslr-300e_in1k.py @@ -0,0 +1,151 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs512_mocov3.py', + '../_base_/default_runtime.py', +] + +# dataset settings +# the difference between ResNet50 and ViT pipeline is the `scale` in +# `RandomResizedCrop`, `scale=(0.08, 1.)` in ViT pipeline +view_pipeline1 = [ + dict( + type='RandomResizedCrop', + scale=224, + crop_ratio_range=(0.08, 1.), + backend='pillow'), + dict( + type='RandomApply', + transforms=[ + dict( + type='ColorJitter', + brightness=0.4, + contrast=0.4, + saturation=0.2, + hue=0.1) + ], + prob=0.8), + dict( + type='RandomGrayscale', + prob=0.2, + keep_channels=True, + channel_weights=(0.114, 0.587, 0.2989)), + dict( + type='GaussianBlur', + magnitude_range=(0.1, 2.0), + magnitude_std='inf', + prob=1.), + dict(type='Solarize', thr=128, prob=0.), + dict(type='RandomFlip', prob=0.5), +] +view_pipeline2 = [ + dict( + type='RandomResizedCrop', + scale=224, + crop_ratio_range=(0.08, 1.), + backend='pillow'), + dict( + type='RandomApply', + transforms=[ + dict( + type='ColorJitter', + brightness=0.4, + contrast=0.4, + saturation=0.2, + hue=0.1) + ], + prob=0.8), + dict( + type='RandomGrayscale', + prob=0.2, + keep_channels=True, + channel_weights=(0.114, 0.587, 0.2989)), + dict( + type='GaussianBlur', + magnitude_range=(0.1, 2.0), + magnitude_std='inf', + prob=0.1), + dict(type='Solarize', thr=128, prob=0.2), + dict(type='RandomFlip', prob=0.5), +] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiView', + num_views=[1, 1], + transforms=[view_pipeline1, view_pipeline2]), + dict(type='PackInputs') +] + +train_dataloader = dict(batch_size=256, dataset=dict(pipeline=train_pipeline)) + +# model settings +temperature = 0.2 +model = dict( + type='MoCoV3', + base_momentum=0.01, + backbone=dict( + type='MoCoV3ViT', + arch='base', # embed_dim = 768 + img_size=224, + patch_size=16, + stop_grad_conv1=True), + neck=dict( + type='NonLinearNeck', + in_channels=768, + hid_channels=4096, + out_channels=256, + num_layers=3, + with_bias=False, + with_last_bn=True, + with_last_bn_affine=False, + with_last_bias=False, + with_avg_pool=False), + head=dict( + type='MoCoV3Head', + predictor=dict( + type='NonLinearNeck', + in_channels=256, + hid_channels=4096, + out_channels=256, + num_layers=2, + with_bias=False, + with_last_bn=True, + with_last_bn_affine=False, + with_last_bias=False, + with_avg_pool=False), + loss=dict(type='CrossEntropyLoss', loss_weight=2 * temperature), + temperature=temperature)) + +# optimizer +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict(type='AdamW', lr=2.4e-3, weight_decay=0.1)) +find_unused_parameters = True + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=260, + by_epoch=True, + begin=40, + end=300, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=300) +# only keeps the latest 3 checkpoints +default_hooks = dict(checkpoint=dict(max_keep_ckpts=3)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/mocov3/mocov3_vit-large-p16_64xb64-amp-coslr-300e_in1k.py b/configs/mocov3/mocov3_vit-large-p16_64xb64-amp-coslr-300e_in1k.py new file mode 100644 index 0000000..ae31c6d --- /dev/null +++ b/configs/mocov3/mocov3_vit-large-p16_64xb64-amp-coslr-300e_in1k.py @@ -0,0 +1,154 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs512_mocov3.py', + '../_base_/default_runtime.py', +] + +# dataset settings +# the difference between ResNet50 and ViT pipeline is the `scale` in +# `RandomResizedCrop`, `scale=(0.08, 1.)` in ViT pipeline +view_pipeline1 = [ + dict( + type='RandomResizedCrop', + scale=224, + crop_ratio_range=(0.08, 1.), + backend='pillow'), + dict( + type='RandomApply', + transforms=[ + dict( + type='ColorJitter', + brightness=0.4, + contrast=0.4, + saturation=0.2, + hue=0.1) + ], + prob=0.8), + dict( + type='RandomGrayscale', + prob=0.2, + keep_channels=True, + channel_weights=(0.114, 0.587, 0.2989)), + dict( + type='GaussianBlur', + magnitude_range=(0.1, 2.0), + magnitude_std='inf', + prob=1.), + dict(type='Solarize', thr=128, prob=0.), + dict(type='RandomFlip', prob=0.5), +] +view_pipeline2 = [ + dict( + type='RandomResizedCrop', + scale=224, + crop_ratio_range=(0.08, 1.), + backend='pillow'), + dict( + type='RandomApply', + transforms=[ + dict( + type='ColorJitter', + brightness=0.4, + contrast=0.4, + saturation=0.2, + hue=0.1) + ], + prob=0.8), + dict( + type='RandomGrayscale', + prob=0.2, + keep_channels=True, + channel_weights=(0.114, 0.587, 0.2989)), + dict( + type='GaussianBlur', + magnitude_range=(0.1, 2.0), + magnitude_std='inf', + prob=0.1), + dict(type='Solarize', thr=128, prob=0.2), + dict(type='RandomFlip', prob=0.5), +] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiView', + num_views=[1, 1], + transforms=[view_pipeline1, view_pipeline2]), + dict(type='PackInputs') +] + +train_dataloader = dict(batch_size=64, dataset=dict(pipeline=train_pipeline)) + +# model settings +temperature = 0.2 +model = dict( + type='MoCoV3', + base_momentum=0.01, + backbone=dict( + type='MoCoV3ViT', + arch='large', # embed_dim = 1024 + img_size=224, + patch_size=16, + stop_grad_conv1=True), + neck=dict( + type='NonLinearNeck', + in_channels=1024, + hid_channels=4096, + out_channels=256, + num_layers=3, + with_bias=False, + with_last_bn=True, + with_last_bn_affine=False, + with_last_bias=False, + with_avg_pool=False), + head=dict( + type='MoCoV3Head', + predictor=dict( + type='NonLinearNeck', + in_channels=256, + hid_channels=4096, + out_channels=256, + num_layers=2, + with_bias=False, + with_last_bn=True, + with_last_bn_affine=False, + with_last_bias=False, + with_avg_pool=False), + loss=dict(type='CrossEntropyLoss', loss_weight=2 * temperature), + temperature=temperature)) + +# optimizer +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + clip_grad=dict(max_norm=5.0, error_if_nonfinite=False), + optimizer=dict(type='AdamW', lr=2.4e-3, weight_decay=0.1)) +find_unused_parameters = True + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=260, + by_epoch=True, + begin=40, + end=300, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=300) +# only keeps the latest 3 checkpoints +default_hooks = dict(checkpoint=dict(max_keep_ckpts=3)) + +randomness = dict(seed=0) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/mocov3/mocov3_vit-small-p16_16xb256-amp-coslr-300e_in1k.py b/configs/mocov3/mocov3_vit-small-p16_16xb256-amp-coslr-300e_in1k.py new file mode 100644 index 0000000..0d26eec --- /dev/null +++ b/configs/mocov3/mocov3_vit-small-p16_16xb256-amp-coslr-300e_in1k.py @@ -0,0 +1,151 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs512_mocov3.py', + '../_base_/default_runtime.py', +] + +# dataset settings +# the difference between ResNet50 and ViT pipeline is the `scale` in +# `RandomResizedCrop`, `scale=(0.08, 1.)` in ViT pipeline +view_pipeline1 = [ + dict( + type='RandomResizedCrop', + scale=224, + crop_ratio_range=(0.08, 1.), + backend='pillow'), + dict( + type='RandomApply', + transforms=[ + dict( + type='ColorJitter', + brightness=0.4, + contrast=0.4, + saturation=0.2, + hue=0.1) + ], + prob=0.8), + dict( + type='RandomGrayscale', + prob=0.2, + keep_channels=True, + channel_weights=(0.114, 0.587, 0.2989)), + dict( + type='GaussianBlur', + magnitude_range=(0.1, 2.0), + magnitude_std='inf', + prob=1.), + dict(type='Solarize', thr=128, prob=0.), + dict(type='RandomFlip', prob=0.5), +] +view_pipeline2 = [ + dict( + type='RandomResizedCrop', + scale=224, + crop_ratio_range=(0.08, 1.), + backend='pillow'), + dict( + type='RandomApply', + transforms=[ + dict( + type='ColorJitter', + brightness=0.4, + contrast=0.4, + saturation=0.2, + hue=0.1) + ], + prob=0.8), + dict( + type='RandomGrayscale', + prob=0.2, + keep_channels=True, + channel_weights=(0.114, 0.587, 0.2989)), + dict( + type='GaussianBlur', + magnitude_range=(0.1, 2.0), + magnitude_std='inf', + prob=0.1), + dict(type='Solarize', thr=128, prob=0.2), + dict(type='RandomFlip', prob=0.5), +] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiView', + num_views=[1, 1], + transforms=[view_pipeline1, view_pipeline2]), + dict(type='PackInputs') +] + +train_dataloader = dict(batch_size=256, dataset=dict(pipeline=train_pipeline)) + +# model settings +temperature = 0.2 +model = dict( + type='MoCoV3', + base_momentum=0.01, + backbone=dict( + type='MoCoV3ViT', + arch='mocov3-small', # embed_dim = 384 + img_size=224, + patch_size=16, + stop_grad_conv1=True), + neck=dict( + type='NonLinearNeck', + in_channels=384, + hid_channels=4096, + out_channels=256, + num_layers=3, + with_bias=False, + with_last_bn=True, + with_last_bn_affine=False, + with_last_bias=False, + with_avg_pool=False), + head=dict( + type='MoCoV3Head', + predictor=dict( + type='NonLinearNeck', + in_channels=256, + hid_channels=4096, + out_channels=256, + num_layers=2, + with_bias=False, + with_last_bn=True, + with_last_bn_affine=False, + with_last_bias=False, + with_avg_pool=False), + loss=dict(type='CrossEntropyLoss', loss_weight=2 * temperature), + temperature=temperature)) + +# optimizer +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict(type='AdamW', lr=2.4e-3, weight_decay=0.1)) +find_unused_parameters = True + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=260, + by_epoch=True, + begin=40, + end=300, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=300) +# only keeps the latest 3 checkpoints +default_hooks = dict(checkpoint=dict(max_keep_ckpts=3)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/mvit/README.md b/configs/mvit/README.md new file mode 100644 index 0000000..1bf72e5 --- /dev/null +++ b/configs/mvit/README.md @@ -0,0 +1,85 @@ +# MViT V2 + +> [MViTv2: Improved Multiscale Vision Transformers for Classification and Detection](http://openaccess.thecvf.com//content/CVPR2022/papers/Li_MViTv2_Improved_Multiscale_Vision_Transformers_for_Classification_and_Detection_CVPR_2022_paper.pdf) + + + +## Abstract + +In this paper, we study Multiscale Vision Transformers (MViTv2) as a unified architecture for image and video +classification, as well as object detection. We present an improved version of MViT that incorporates +decomposed relative positional embeddings and residual pooling connections. We instantiate this architecture +in five sizes and evaluate it for ImageNet classification, COCO detection and Kinetics video recognition where +it outperforms prior work. We further compare MViTv2s' pooling attention to window attention mechanisms where +it outperforms the latter in accuracy/compute. Without bells-and-whistles, MViTv2 has state-of-the-art +performance in 3 domains: 88.8% accuracy on ImageNet classification, 58.7 boxAP on COCO object detection as +well as 86.1% on Kinetics-400 video classification. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('mvitv2-tiny_3rdparty_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('mvitv2-tiny_3rdparty_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/mvit/mvitv2-tiny_8xb256_in1k.py https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-tiny_3rdparty_in1k_20220722-db7beeef.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :----------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :-----------------------------------: | :----------------------------------------------------------------------------------: | +| `mvitv2-tiny_3rdparty_in1k`\* | From scratch | 24.17 | 4.70 | 82.33 | 96.15 | [config](mvitv2-tiny_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-tiny_3rdparty_in1k_20220722-db7beeef.pth) | +| `mvitv2-small_3rdparty_in1k`\* | From scratch | 34.87 | 7.00 | 83.63 | 96.51 | [config](mvitv2-small_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-small_3rdparty_in1k_20220722-986bd741.pth) | +| `mvitv2-base_3rdparty_in1k`\* | From scratch | 51.47 | 10.16 | 84.34 | 96.86 | [config](mvitv2-base_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-base_3rdparty_in1k_20220722-9c4f0a17.pth) | +| `mvitv2-large_3rdparty_in1k`\* | From scratch | 217.99 | 43.87 | 85.25 | 97.14 | [config](mvitv2-large_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-large_3rdparty_in1k_20220722-2b57b983.pth) | + +*Models with * are converted from the [official repo](https://github.com/facebookresearch/mvit). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@inproceedings{li2021improved, + title={MViTv2: Improved multiscale vision transformers for classification and detection}, + author={Li, Yanghao and Wu, Chao-Yuan and Fan, Haoqi and Mangalam, Karttikeya and Xiong, Bo and Malik, Jitendra and Feichtenhofer, Christoph}, + booktitle={CVPR}, + year={2022} +} +``` diff --git a/configs/mvit/metafile.yml b/configs/mvit/metafile.yml new file mode 100644 index 0000000..c16f4f8 --- /dev/null +++ b/configs/mvit/metafile.yml @@ -0,0 +1,95 @@ +Collections: + - Name: MViT V2 + Metadata: + Architecture: + - Attention Dropout + - Convolution + - Dense Connections + - GELU + - Layer Normalization + - Scaled Dot-Product Attention + - Attention Pooling + Paper: + URL: http://openaccess.thecvf.com//content/CVPR2022/papers/Li_MViTv2_Improved_Multiscale_Vision_Transformers_for_Classification_and_Detection_CVPR_2022_paper.pdf + Title: 'MViTv2: Improved Multiscale Vision Transformers for Classification and Detection' + README: configs/mvit/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.24.0/mmcls/models/backbones/mvit.py + Version: v0.24.0 + +Models: + - Name: mvitv2-tiny_3rdparty_in1k + In Collection: MViT V2 + Metadata: + FLOPs: 4703510768 + Parameters: 24173320 + Training Data: + - ImageNet-1k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 82.33 + Top 5 Accuracy: 96.15 + Weights: https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-tiny_3rdparty_in1k_20220722-db7beeef.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_T_in1k.pyth + Code: https://github.com/facebookresearch/mvit + Config: configs/mvit/mvitv2-tiny_8xb256_in1k.py + + - Name: mvitv2-small_3rdparty_in1k + In Collection: MViT V2 + Metadata: + FLOPs: 6997555136 + Parameters: 34870216 + Training Data: + - ImageNet-1k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 83.63 + Top 5 Accuracy: 96.51 + Weights: https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-small_3rdparty_in1k_20220722-986bd741.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_S_in1k.pyth + Code: https://github.com/facebookresearch/mvit + Config: configs/mvit/mvitv2-small_8xb256_in1k.py + + - Name: mvitv2-base_3rdparty_in1k + In Collection: MViT V2 + Metadata: + FLOPs: 10157964400 + Parameters: 51472744 + Training Data: + - ImageNet-1k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 84.34 + Top 5 Accuracy: 96.86 + Weights: https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-base_3rdparty_in1k_20220722-9c4f0a17.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_B_in1k.pyth + Code: https://github.com/facebookresearch/mvit + Config: configs/mvit/mvitv2-base_8xb256_in1k.py + + - Name: mvitv2-large_3rdparty_in1k + In Collection: MViT V2 + Metadata: + FLOPs: 43868151412 + Parameters: 217992952 + Training Data: + - ImageNet-1k + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 85.25 + Top 5 Accuracy: 97.14 + Weights: https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-large_3rdparty_in1k_20220722-2b57b983.pth + Converted From: + Weights: https://dl.fbaipublicfiles.com/mvit/mvitv2_models/MViTv2_L_in1k.pyth + Code: https://github.com/facebookresearch/mvit + Config: configs/mvit/mvitv2-large_8xb256_in1k.py diff --git a/configs/mvit/mvitv2-base_8xb256_in1k.py b/configs/mvit/mvitv2-base_8xb256_in1k.py new file mode 100644 index 0000000..ee3ec11 --- /dev/null +++ b/configs/mvit/mvitv2-base_8xb256_in1k.py @@ -0,0 +1,43 @@ +_base_ = [ + '../_base_/models/mvit/mvitv2-base.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# dataset settings +train_dataloader = dict(batch_size=256) +val_dataloader = dict(batch_size=256) +test_dataloader = dict(batch_size=256) + +# schedule settings +optim_wrapper = dict( + optimizer=dict(lr=2.5e-4), + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.pos_embed': dict(decay_mult=0.0), + '.rel_pos_h': dict(decay_mult=0.0), + '.rel_pos_w': dict(decay_mult=0.0) + }), + clip_grad=dict(max_norm=1.0), +) + +# learning policy +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=1e-3, + by_epoch=True, + end=70, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict(type='CosineAnnealingLR', eta_min=1e-5, by_epoch=True, begin=70) +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=2048) diff --git a/configs/mvit/mvitv2-large_8xb256_in1k.py b/configs/mvit/mvitv2-large_8xb256_in1k.py new file mode 100644 index 0000000..eacddf9 --- /dev/null +++ b/configs/mvit/mvitv2-large_8xb256_in1k.py @@ -0,0 +1,43 @@ +_base_ = [ + '../_base_/models/mvit/mvitv2-large.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs2048_AdamW.py', + '../_base_/default_runtime.py' +] + +# dataset settings +train_dataloader = dict(batch_size=256) +val_dataloader = dict(batch_size=256) +test_dataloader = dict(batch_size=256) + +# schedule settings +optim_wrapper = dict( + optimizer=dict(lr=2.5e-4), + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.pos_embed': dict(decay_mult=0.0), + '.rel_pos_h': dict(decay_mult=0.0), + '.rel_pos_w': dict(decay_mult=0.0) + }), + clip_grad=dict(max_norm=1.0), +) + +# learning policy +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=1e-3, + by_epoch=True, + end=70, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict(type='CosineAnnealingLR', eta_min=1e-5, by_epoch=True, begin=70) +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=2048) diff --git a/configs/mvit/mvitv2-small_8xb256_in1k.py b/configs/mvit/mvitv2-small_8xb256_in1k.py new file mode 100644 index 0000000..74cfd0a --- /dev/null +++ b/configs/mvit/mvitv2-small_8xb256_in1k.py @@ -0,0 +1,43 @@ +_base_ = [ + '../_base_/models/mvit/mvitv2-small.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs2048_AdamW.py', + '../_base_/default_runtime.py' +] + +# dataset settings +train_dataloader = dict(batch_size=256) +val_dataloader = dict(batch_size=256) +test_dataloader = dict(batch_size=256) + +# schedule settings +optim_wrapper = dict( + optimizer=dict(lr=2.5e-4), + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.pos_embed': dict(decay_mult=0.0), + '.rel_pos_h': dict(decay_mult=0.0), + '.rel_pos_w': dict(decay_mult=0.0) + }), + clip_grad=dict(max_norm=1.0), +) + +# learning policy +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=1e-3, + by_epoch=True, + end=70, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict(type='CosineAnnealingLR', eta_min=1e-5, by_epoch=True, begin=70) +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=2048) diff --git a/configs/mvit/mvitv2-tiny_8xb256_in1k.py b/configs/mvit/mvitv2-tiny_8xb256_in1k.py new file mode 100644 index 0000000..4e563a2 --- /dev/null +++ b/configs/mvit/mvitv2-tiny_8xb256_in1k.py @@ -0,0 +1,43 @@ +_base_ = [ + '../_base_/models/mvit/mvitv2-tiny.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs2048_AdamW.py', + '../_base_/default_runtime.py' +] + +# dataset settings +train_dataloader = dict(batch_size=256) +val_dataloader = dict(batch_size=256) +test_dataloader = dict(batch_size=256) + +# schedule settings +optim_wrapper = dict( + optimizer=dict(lr=2.5e-4), + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.pos_embed': dict(decay_mult=0.0), + '.rel_pos_h': dict(decay_mult=0.0), + '.rel_pos_w': dict(decay_mult=0.0) + }), + clip_grad=dict(max_norm=1.0), +) + +# learning policy +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=1e-3, + by_epoch=True, + end=70, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict(type='CosineAnnealingLR', eta_min=1e-5, by_epoch=True, begin=70) +] + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=2048) diff --git a/configs/ofa/README.md b/configs/ofa/README.md new file mode 100644 index 0000000..22e20f8 --- /dev/null +++ b/configs/ofa/README.md @@ -0,0 +1,88 @@ +# OFA + +> [OFA: Unifying Architectures, Tasks, and Modalities Through a Simple Sequence-to-Sequence Learning Framework](https://arxiv.org/abs/2202.03052) + + + +## Abstract + +In this work, we pursue a unified paradigm for multimodal pretraining to break the scaffolds of complex task/modality-specific customization. We propose OFA, a Task-Agnostic and Modality-Agnostic framework that supports Task Comprehensiveness. OFA unifies a diverse set of cross-modal and unimodal tasks, including image generation, visual grounding, image captioning, image classification, language modeling, etc., in a simple sequence-to-sequence learning framework. OFA follows the instruction-based learning in both pretraining and finetuning stages, requiring no extra task-specific layers for downstream tasks. In comparison with the recent state-of-the-art vision & language models that rely on extremely large cross-modal datasets, OFA is pretrained on only 20M publicly available image-text pairs. Despite its simplicity and relatively small-scale training data, OFA achieves new SOTAs in a series of cross-modal tasks while attaining highly competitive performances on uni-modal tasks. Our further analysis indicates that OFA can also effectively transfer to unseen tasks and unseen domains. + +
+ +
+ +## How to use it? + + + +**Use the model** + +```python +from mmpretrain import inference_model + +result = inference_model('ofa-base_3rdparty-finetuned_caption', 'demo/cat-dog.png') +print(result) +# {'pred_caption': 'a dog and a kitten sitting next to each other'} +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/ofa/ofa-base_finetuned_refcoco.py https://download.openmmlab.com/mmclassification/v1/ofa/ofa-base_3rdparty_refcoco_20230418-2797d3ab.pth +``` + + + +## Models and results + +### Image Caption on COCO + +| Model | Params (M) | BLEU-4 | CIDER | Config | Download | +| :-------------------------------------- | :--------: | :----: | :----: | :-------------------------------------: | :--------------------------------------------------------------------------------------------------: | +| `ofa-base_3rdparty-finetuned_caption`\* | 182.24 | 42.64 | 144.50 | [config](ofa-base_finetuned_caption.py) | [model](https://download.openmmlab.com/mmclassification/v1/ofa/ofa-base_3rdparty_coco-caption_20230418-de18914e.pth) | + +*Models with * are converted from the [official repo](https://github.com/OFA-Sys/OFA). The config files of these models are only for inference. We haven't reproduce the training results.* + +### Visual Grounding on RefCOCO + +| Model | Params (M) | Accuracy (testA) | Accuracy (testB) | Config | Download | +| :-------------------------------------- | :--------: | :--------------: | :--------------: | :-------------------------------------: | :------------------------------------------------------------------------------: | +| `ofa-base_3rdparty-finetuned_refcoco`\* | 182.24 | 90.49 | 83.63 | [config](ofa-base_finetuned_refcoco.py) | [model](https://download.openmmlab.com/mmclassification/v1/ofa/ofa-base_3rdparty_refcoco_20230418-2797d3ab.pth) | + +*Models with * are converted from the [official repo](https://github.com/OFA-Sys/OFA). The config files of these models are only for inference. We haven't reproduce the training results.* + +### Visual Question Answering on VQAv2 + +| Model | Params (M) | Accuracy | Config | Download | +| :---------------------------------- | :--------: | :------: | :---------------------------------: | :--------------------------------------------------------------------------------------------------------------: | +| `ofa-base_3rdparty-finetuned_vqa`\* | 182.24 | 78.00 | [config](ofa-base_finetuned_vqa.py) | [model](https://download.openmmlab.com/mmclassification/v1/ofa/ofa-base_3rdparty_coco-vqa_20230418-f38539a5.pth) | +| `ofa-base_3rdparty-zeroshot_vqa`\* | 182.24 | 58.32 | [config](ofa-base_zeroshot_vqa.py) | [model](https://download.openmmlab.com/mmclassification/v1/ofa/ofa-base_3rdparty_pretrain_20230418-dccfc07f.pth) | + +*Models with * are converted from the [official repo](https://github.com/OFA-Sys/OFA). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@article{wang2022ofa, + author = {Peng Wang and + An Yang and + Rui Men and + Junyang Lin and + Shuai Bai and + Zhikang Li and + Jianxin Ma and + Chang Zhou and + Jingren Zhou and + Hongxia Yang}, + title = {OFA: Unifying Architectures, Tasks, and Modalities Through a Simple Sequence-to-Sequence + Learning Framework}, + journal = {CoRR}, + volume = {abs/2202.03052}, + year = {2022} +} +``` diff --git a/configs/ofa/metafile.yml b/configs/ofa/metafile.yml new file mode 100644 index 0000000..9c4b3eb --- /dev/null +++ b/configs/ofa/metafile.yml @@ -0,0 +1,89 @@ +Collections: + - Name: OFA + Metadata: + Architecture: + - ResNet + - Transformer + Training Data: + - CC12M + - CC3M + - SBU + - COCO + - VG + - VQAv2 + - GQA + - RefCOCO + - OpenImages + - Object365 + - YFCC100M + - ImageNet-21K + - Pile + Paper: + Title: 'OFA: Unifying Architectures, Tasks, and Modalities Through a Simple + Sequence-to-Sequence Learning Framework' + URL: https://arxiv.org/abs/2202.03052 + README: configs/ofa/README.md + +Models: + - Name: ofa-base_3rdparty-finetuned_refcoco + Metadata: + FLOPs: null + Parameters: 182238536 + In Collection: OFA + Results: + - Task: Visual Grounding + Dataset: RefCOCO + Metrics: + Accuracy (testA): 90.49 + Accuracy (testB): 83.63 + Weights: https://download.openmmlab.com/mmclassification/v1/ofa/ofa-base_3rdparty_refcoco_20230418-2797d3ab.pth + Config: configs/ofa/ofa-base_finetuned_refcoco.py + Converted From: + Weights: https://ofa-beijing.oss-cn-beijing.aliyuncs.com/checkpoints/refcoco_base_best.pt + Code: https://github.com/OFA-Sys/OFA + - Name: ofa-base_3rdparty-finetuned_vqa + Metadata: + FLOPs: null + Parameters: 182238536 + In Collection: OFA + Results: + - Task: Visual Question Answering + Dataset: VQAv2 + Metrics: + Accuracy: 78.00 # Report from the official repo + Weights: https://download.openmmlab.com/mmclassification/v1/ofa/ofa-base_3rdparty_coco-vqa_20230418-f38539a5.pth + Config: configs/ofa/ofa-base_finetuned_vqa.py + Converted From: + Weights: https://ofa-beijing.oss-cn-beijing.aliyuncs.com/checkpoints/vqa_large_best.pt + Code: https://github.com/OFA-Sys/OFA + - Name: ofa-base_3rdparty-finetuned_caption + Metadata: + FLOPs: null + Parameters: 182238536 + In Collection: OFA + Results: + - Task: Image Caption + Dataset: COCO + Metrics: + BLEU-4: 42.64 + CIDER: 144.50 + Weights: https://download.openmmlab.com/mmclassification/v1/ofa/ofa-base_3rdparty_coco-caption_20230418-de18914e.pth + Config: configs/ofa/ofa-base_finetuned_caption.py + Converted From: + Weights: https://ofa-beijing.oss-cn-beijing.aliyuncs.com/checkpoints/caption_base_best.pt + Code: https://github.com/OFA-Sys/OFA + - Name: ofa-base_3rdparty-zeroshot_vqa + Metadata: + FLOPs: null + Parameters: 182238536 + In Collection: OFA + Results: + - Task: Visual Question Answering + Dataset: VQAv2 + Metrics: + Accuracy: 58.32 + Weights: https://download.openmmlab.com/mmclassification/v1/ofa/ofa-base_3rdparty_pretrain_20230418-dccfc07f.pth + Config: configs/ofa/ofa-base_zeroshot_vqa.py + Converted From: + Weights: https://ofa-beijing.oss-cn-beijing.aliyuncs.com/checkpoints/ofa_base.pt + Code: https://github.com/OFA-Sys/OFA diff --git a/configs/ofa/ofa-base_finetuned_caption.py b/configs/ofa/ofa-base_finetuned_caption.py new file mode 100644 index 0000000..45efff0 --- /dev/null +++ b/configs/ofa/ofa-base_finetuned_caption.py @@ -0,0 +1,41 @@ +_base_ = [ + '../_base_/datasets/coco_caption.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='OFA', + task='caption', + vocab_size=59457, + embedding_dim=768, + encoder_cfg=dict( + embed_images=dict(type='OFAResNet', depth=101), + num_layers=6, + ), + decoder_cfg=dict(num_layers=6), + generation_cfg=dict(use_cache=True), + tokenizer=dict(type='OFATokenizer', name_or_path='OFA-Sys/OFA-base'), +) + +# data settings +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + to_rgb=True, +) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(480, 480)), + dict(type='PackInputs', meta_keys=('image_id', )), +] + +train_dataloader = None +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# schedule settings +train_cfg = None +val_cfg = dict() +test_cfg = dict() diff --git a/configs/ofa/ofa-base_finetuned_refcoco.py b/configs/ofa/ofa-base_finetuned_refcoco.py new file mode 100644 index 0000000..5a7435d --- /dev/null +++ b/configs/ofa/ofa-base_finetuned_refcoco.py @@ -0,0 +1,45 @@ +_base_ = [ + '../_base_/datasets/refcoco.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='OFA', + task='refcoco', + vocab_size=59457, + embedding_dim=768, + encoder_cfg=dict( + embed_images=dict(type='OFAResNet', depth=101), + num_layers=6, + ), + decoder_cfg=dict(num_layers=6), + generation_cfg=dict(use_cache=True), + tokenizer=dict(type='OFATokenizer', name_or_path='OFA-Sys/OFA-base'), +) + +# data settings +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + to_rgb=True, +) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(512, 512)), + dict( + type='PackInputs', + algorithm_keys=['text', 'gt_bboxes'], + meta_keys=['image_id', 'scale_factor'], + ), +] + +train_dataloader = None +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# schedule settings +train_cfg = None +val_cfg = dict() +test_cfg = dict() diff --git a/configs/ofa/ofa-base_finetuned_vqa.py b/configs/ofa/ofa-base_finetuned_vqa.py new file mode 100644 index 0000000..b120d09 --- /dev/null +++ b/configs/ofa/ofa-base_finetuned_vqa.py @@ -0,0 +1,64 @@ +_base_ = [ + '../_base_/datasets/coco_vqa.py', + '../_base_/default_runtime.py', +] + +ANS2LABEL = 'https://ofa-beijing.oss-cn-beijing.aliyuncs.com/datasets/vqa_data/trainval_ans2label.pkl' # noqa: E501 + +# model settings +model = dict( + type='OFA', + task='vqa', + vocab_size=59457, + embedding_dim=768, + ans2label=ANS2LABEL, + encoder_cfg=dict( + embed_images=dict(type='OFAResNet', depth=101), + num_layers=6, + num_heads=12, + ), + decoder_cfg=dict( + num_layers=6, + num_heads=12, + ), + generation_cfg=dict( + num_beams=5, + max_new_tokens=200, + length_penalty=0., # VQA doesn't require longer answer. + use_cache=True, + ), + tokenizer=dict(type='OFATokenizer', name_or_path='OFA-Sys/OFA-base'), +) + +# data settings +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + to_rgb=True, +) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + scale=(480, 480), + interpolation='bicubic', + backend='pillow'), + dict(type='OFAAddObjects'), + dict( + type='PackInputs', + algorithm_keys=[ + 'question', 'gt_answer', 'gt_answer_weight', 'decoder_prompt' + ], + meta_keys=['question_id', 'image_id'], + ), +] + +train_dataloader = None # Eval only +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# schedule settings +train_cfg = None +val_cfg = dict() +test_cfg = dict() diff --git a/configs/ofa/ofa-base_zeroshot_vqa.py b/configs/ofa/ofa-base_zeroshot_vqa.py new file mode 100644 index 0000000..9890cdd --- /dev/null +++ b/configs/ofa/ofa-base_zeroshot_vqa.py @@ -0,0 +1,42 @@ +_base_ = [ + '../_base_/datasets/coco_vqa.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='OFA', + task='vqa', + vocab_size=59457, + embedding_dim=768, + encoder_cfg=dict( + embed_images=dict(type='OFAResNet', depth=101), + num_layers=6, + num_heads=12, + ), + decoder_cfg=dict( + num_layers=6, + num_heads=12, + ), + generation_cfg=dict( + num_beams=20, + max_new_tokens=200, + length_penalty=0., # VQA doesn't require longer answer. + use_cache=True, + ), + tokenizer=dict(type='OFATokenizer', name_or_path='OFA-Sys/OFA-base'), +) + +# data settings +data_preprocessor = dict( + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + to_rgb=True, +) + +train_dataloader = None # Eval only + +# schedule settings +train_cfg = None +val_cfg = dict() +test_cfg = dict() diff --git a/configs/ofa/ofa-large_zeroshot_vqa.py b/configs/ofa/ofa-large_zeroshot_vqa.py new file mode 100644 index 0000000..8b47121 --- /dev/null +++ b/configs/ofa/ofa-large_zeroshot_vqa.py @@ -0,0 +1,43 @@ +_base_ = [ + '../_base_/datasets/coco_vqa.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='OFA', + task='vqa', + vocab_size=59457, + embedding_dim=1024, + encoder_cfg=dict( + embed_images=dict(type='OFAResNet', depth=152), + num_layers=12, + num_heads=16, + ), + decoder_cfg=dict( + num_layers=12, + num_heads=16, + ), + generation_cfg=dict( + num_beams=20, + max_new_tokens=200, + length_penalty=0., # VQA doesn't require longer answer. + use_cache=True, + ), + tokenizer=dict(type='OFATokenizer', name_or_path='OFA-Sys/OFA-large'), +) + +# data settings +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + to_rgb=True, +) + +train_dataloader = None # Eval only + +# schedule settings +train_cfg = None +val_cfg = dict() +test_cfg = dict() diff --git a/configs/otter/README.md b/configs/otter/README.md new file mode 100644 index 0000000..18a8468 --- /dev/null +++ b/configs/otter/README.md @@ -0,0 +1,79 @@ +# Otter + +> [Otter: A Multi-Modal Model with In-Context Instruction Tuning](https://arxiv.org/abs/2305.03726) + + + +## Abstract + +Large language models (LLMs) have demonstrated significant universal capabilities as few/zero-shot learners in various tasks due to their pre-training on vast amounts of text data, as exemplified by GPT-3, which boosted to InstrctGPT and ChatGPT, effectively following natural language instructions to accomplish real-world tasks. In this paper, we propose to introduce instruction tuning into multi-modal models, motivated by the Flamingo model's upstream interleaved format pretraining dataset. We adopt a similar approach to construct our MultI-Modal In-Context Instruction Tuning (MIMIC-IT) dataset. We then introduce Otter, a multi-modal model based on OpenFlamingo (open-sourced version of DeepMind's Flamingo), trained on MIMIC-IT and showcasing improved instruction-following ability and in-context learning. We also optimize OpenFlamingo's implementation for researchers, democratizing the required training resources from 1$\times$ A100 GPU to 4$\times$ RTX-3090 GPUs, and integrate both OpenFlamingo and Otter into Huggingface Transformers for more researchers to incorporate the models into their customized training and inference pipelines. + +
+ +
+ +## How to use it? + + + +**Use the model** + +```python +import torch +from mmpretrain import get_model, inference_model + +model = get_model('otter-9b_3rdparty_caption', pretrained=True, device='cuda', generation_cfg=dict(max_new_tokens=50)) +out = inference_model(model, 'demo/cat-dog.png') +print(out) +# {'pred_caption': 'The image features two adorable small puppies sitting next to each other on the grass. One puppy is brown and white, while the other is tan and white. They appear to be relaxing outdoors, enjoying each other'} +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/otter/otter-9b_caption.py https://download.openmmlab.com/mmclassification/v1/otter/otter-9b-adapter_20230613-51c5be8d.pth +``` + + + +## Models and results + +### Image Caption on COCO + +| Model | Params (M) | BLEU-4 | CIDER | Config | Download | +| :---------------------------- | :--------: | :------: | :------: | :---------------------------: | :------------------------------------------------------------------------------------------------------: | +| `otter-9b_3rdparty_caption`\* | 8220.45 | Upcoming | Upcoming | [config](otter-9b_caption.py) | [model](https://download.openmmlab.com/mmclassification/v1/otter/otter-9b-adapter_20230613-51c5be8d.pth) | + +*Models with * are converted from the [official repo](https://github.com/Luodian/Otter/tree/main). The config files of these models are only for inference. We haven't reproduce the training results.* + +### Visual Question Answering on VQAv2 + +| Model | Params (M) | Accuracy | Config | Download | +| :------------------------ | :--------: | :------: | :-----------------------: | :------------------------------------------------------------------------------------------------------: | +| `otter-9b_3rdparty_vqa`\* | 8220.45 | Upcoming | [config](otter-9b_vqa.py) | [model](https://download.openmmlab.com/mmclassification/v1/otter/otter-9b-adapter_20230613-51c5be8d.pth) | + +*Models with * are converted from the [official repo](https://github.com/Luodian/Otter/tree/main). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@article{li2023otter, + title={Otter: A Multi-Modal Model with In-Context Instruction Tuning}, + author={Li, Bo and Zhang, Yuanhan and Chen, Liangyu and Wang, Jinghao and Yang, Jingkang and Liu, Ziwei}, + journal={arXiv preprint arXiv:2305.03726}, + year={2023} +} + +@article{li2023mimicit, + title={MIMIC-IT: Multi-Modal In-Context Instruction Tuning}, + author={Bo Li and Yuanhan Zhang and Liangyu Chen and Jinghao Wang and Fanyi Pu and Jingkang Yang and Chunyuan Li and Ziwei Liu}, + year={2023}, + eprint={2306.05425}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` diff --git a/configs/otter/metafile.yml b/configs/otter/metafile.yml new file mode 100644 index 0000000..6ee89c6 --- /dev/null +++ b/configs/otter/metafile.yml @@ -0,0 +1,43 @@ +Collections: + - Name: Otter + Metadata: + Architecture: + - Transformer + - Gated Cross-Attention Dense + Paper: + Title: 'Otter: A Multi-Modal Model with In-Context Instruction Tuning' + URL: https://arxiv.org/abs/2305.03726 + README: configs/otter/README.md + +Models: + - Name: otter-9b_3rdparty_caption + Metadata: + FLOPs: null + Parameters: 8220452880 + In Collection: Otter + Results: + - Task: Image Caption + Dataset: COCO + Metrics: + BLEU-4: null + CIDER: null + Weights: https://download.openmmlab.com/mmclassification/v1/otter/otter-9b-adapter_20230613-51c5be8d.pth + Config: configs/otter/otter-9b_caption.py + Converted From: + Weights: https://huggingface.co/luodian/otter-9b-hf + Code: https://github.com/Luodian/Otter/tree/main + - Name: otter-9b_3rdparty_vqa + Metadata: + FLOPs: null + Parameters: 8220452880 + In Collection: Otter + Results: + - Task: Visual Question Answering + Dataset: VQAv2 + Metrics: + Accuracy: null + Weights: https://download.openmmlab.com/mmclassification/v1/otter/otter-9b-adapter_20230613-51c5be8d.pth + Config: configs/otter/otter-9b_vqa.py + Converted From: + Weights: https://huggingface.co/luodian/otter-9b-hf + Code: https://github.com/Luodian/Otter/tree/main diff --git a/configs/otter/otter-9b_caption.py b/configs/otter/otter-9b_caption.py new file mode 100644 index 0000000..e35e92e --- /dev/null +++ b/configs/otter/otter-9b_caption.py @@ -0,0 +1,87 @@ +_base_ = [ + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='Otter', + tokenizer=dict(type='LlamaTokenizer', name_or_path='huggyllama/llama-7b'), + vision_encoder=dict( + type='VisionTransformer', + arch='l', + patch_size=14, + pre_norm=True, + norm_cfg=dict(type='LN', eps=1e-5), + layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')), + final_norm=False, + out_type='raw', + pretrained=( + 'https://download.openmmlab.com/mmclassification/v0/clip/' + 'vit-large-p14_clip-openai-pre_3rdparty_20230517-95e2af0b.pth'), + ), + lang_encoder=dict( + base=dict( + type='AutoModelForCausalLM', + name_or_path='huggyllama/llama-7b', + local_files_only=True), + adapter=dict( + type='FlamingoLMAdapter', + vis_hidden_size=1024, + cross_attn_every_n_layers=4, + use_media_placement_augmentation=False, + only_attend_previous=True, + ), + ), + task='caption', + final_prompt_tmpl='User:Please describe the image. GPT:', + generation_cfg=dict( + num_beams=3, max_new_tokens=24, no_repeat_ngram_size=3), +) + +# data settings +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[122.770938, 116.7460125, 104.09373615], + std=[68.5005327, 66.6321579, 70.32316305], + to_rgb=True, +) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=224, + interpolation='bicubic', + backend='pillow'), + dict(type='CenterCrop', crop_size=(224, 224)), + dict( + type='PackInputs', + algorithm_keys=['gt_caption'], + meta_keys=['image_id'], + ), +] + +val_dataloader = dict( + batch_size=8, + num_workers=8, + dataset=dict( + type='COCOCaption', + data_root='data/coco', + ann_file='annotations/coco_karpathy_val.json', + pipeline=test_pipeline, + ), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, +) + +val_evaluator = dict( + type='COCOCaption', + ann_file='data/coco/annotations/coco_karpathy_val_gt.json') + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator + +# schedule settings +val_cfg = dict() +test_cfg = dict() diff --git a/configs/otter/otter-9b_vqa.py b/configs/otter/otter-9b_vqa.py new file mode 100644 index 0000000..72f2b64 --- /dev/null +++ b/configs/otter/otter-9b_vqa.py @@ -0,0 +1,104 @@ +_base_ = [ + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='Otter', + tokenizer=dict(type='LlamaTokenizer', name_or_path='huggyllama/llama-7b'), + vision_encoder=dict( + type='VisionTransformer', + arch='l', + patch_size=14, + pre_norm=True, + norm_cfg=dict(type='LN', eps=1e-5), + layer_cfgs=dict(act_cfg=dict(type='QuickGELU')), + final_norm=False, + out_type='raw', + pretrained=( + 'https://download.openmmlab.com/mmclassification/v0/clip/' + 'vit-large-p14_clip-openai-pre_3rdparty_20230517-95e2af0b.pth'), + ), + lang_encoder=dict( + base=dict( + type='AutoModelForCausalLM', + name_or_path='huggyllama/llama-7b', + local_files_only=True), + adapter=dict( + type='FlamingoLMAdapter', + vis_hidden_size=1024, + cross_attn_every_n_layers=4, + use_media_placement_augmentation=False, + only_attend_previous=True, + ), + ), + task='vqa', + final_prompt_tmpl='User:{question} GPT:', + generation_cfg=dict( + num_beams=3, max_new_tokens=24, no_repeat_ngram_size=3), +) + +# data settings +data_preprocessor = dict( + type='MultiModalDataPreprocessor', + mean=[122.770938, 116.7460125, 104.09373615], + std=[68.5005327, 66.6321579, 70.32316305], + to_rgb=True, +) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=224, + interpolation='bicubic', + backend='pillow'), + dict(type='CenterCrop', crop_size=(224, 224)), + dict( + type='PackInputs', + algorithm_keys=['question', 'gt_answer', 'gt_answer_weight', 'shots'], + meta_keys=['image_id'], + ), +] + +val_dataloader = dict( + batch_size=8, + num_workers=8, + dataset=dict( + type='FlamingoEvalCOCOVQA', + data_root='data/coco', + data_prefix='val2014', + question_file='annotations/v2_OpenEnded_mscoco_val2014_questions.json', + ann_file='annotations/v2_mscoco_val2014_annotations.json', + pipeline=test_pipeline, + num_shots=0, + num_support_examples=2048, + num_query_examples=5000, + ), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, +) +val_evaluator = dict(type='VQAAcc') + +test_dataloader = dict( + batch_size=8, + num_workers=8, + dataset=dict( + type='FlamingoEvalCOCOVQA', + data_root='data/coco', + data_prefix='test2015', + question_file= + 'annotations/v2_OpenEnded_mscoco_test-dev2015_questions.json', + pipeline=test_pipeline, + num_shots=0, + num_support_examples=2048, + num_query_examples=5000, + ), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, +) +test_evaluator = dict(type='ReportVQA', file_path='vqa_test-dev.json') + +# schedule settings +val_cfg = dict() +test_cfg = dict() diff --git a/configs/poolformer/README.md b/configs/poolformer/README.md new file mode 100644 index 0000000..2c4b249 --- /dev/null +++ b/configs/poolformer/README.md @@ -0,0 +1,80 @@ +# PoolFormer + +> [MetaFormer is Actually What You Need for Vision](https://arxiv.org/abs/2111.11418) + + + +## Abstract + +Transformers have shown great potential in computer vision tasks. A common belief is their attention-based token mixer module contributes most to their competence. However, recent works show the attention-based module in transformers can be replaced by spatial MLPs and the resulted models still perform quite well. Based on this observation, we hypothesize that the general architecture of the transformers, instead of the specific token mixer module, is more essential to the model's performance. To verify this, we deliberately replace the attention module in transformers with an embarrassingly simple spatial pooling operator to conduct only basic token mixing. Surprisingly, we observe that the derived model, termed as PoolFormer, achieves competitive performance on multiple computer vision tasks. For example, on ImageNet-1K, PoolFormer achieves 82.1% top-1 accuracy, surpassing well-tuned vision transformer/MLP-like baselines DeiT-B/ResMLP-B24 by 0.3%/1.1% accuracy with 35%/52% fewer parameters and 49%/61% fewer MACs. The effectiveness of PoolFormer verifies our hypothesis and urges us to initiate the concept of "MetaFormer", a general architecture abstracted from transformers without specifying the token mixer. Based on the extensive experiments, we argue that MetaFormer is the key player in achieving superior results for recent transformer and MLP-like models on vision tasks. This work calls for more future research dedicated to improving MetaFormer instead of focusing on the token mixer modules. Additionally, our proposed PoolFormer could serve as a starting baseline for future MetaFormer architecture design. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('poolformer-s12_3rdparty_32xb128_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('poolformer-s12_3rdparty_32xb128_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/poolformer/poolformer-s12_32xb128_in1k.py https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-s12_3rdparty_32xb128_in1k_20220414-f8d83051.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :--------------------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :--------------------------------------: | :---------------------------------------------------------------------: | +| `poolformer-s12_3rdparty_32xb128_in1k`\* | From scratch | 11.92 | 1.87 | 77.24 | 93.51 | [config](poolformer-s12_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-s12_3rdparty_32xb128_in1k_20220414-f8d83051.pth) | +| `poolformer-s24_3rdparty_32xb128_in1k`\* | From scratch | 21.39 | 3.51 | 80.33 | 95.05 | [config](poolformer-s24_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-s24_3rdparty_32xb128_in1k_20220414-d7055904.pth) | +| `poolformer-s36_3rdparty_32xb128_in1k`\* | From scratch | 30.86 | 5.15 | 81.43 | 95.45 | [config](poolformer-s36_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-s36_3rdparty_32xb128_in1k_20220414-d78ff3e8.pth) | +| `poolformer-m36_3rdparty_32xb128_in1k`\* | From scratch | 56.17 | 8.96 | 82.14 | 95.71 | [config](poolformer-m36_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-m36_3rdparty_32xb128_in1k_20220414-c55e0949.pth) | +| `poolformer-m48_3rdparty_32xb128_in1k`\* | From scratch | 73.47 | 11.80 | 82.51 | 95.95 | [config](poolformer-m48_32xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-m48_3rdparty_32xb128_in1k_20220414-9378f3eb.pth) | + +*Models with * are converted from the [official repo](https://github.com/sail-sg/poolformer). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@inproceedings{yu2022metaformer, + title={Metaformer is actually what you need for vision}, + author={Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng}, + booktitle={Proceedings of the IEEE/CVF conference on computer vision and pattern recognition}, + pages={10819--10829}, + year={2022} +} +``` diff --git a/configs/poolformer/metafile.yml b/configs/poolformer/metafile.yml new file mode 100644 index 0000000..55285dd --- /dev/null +++ b/configs/poolformer/metafile.yml @@ -0,0 +1,99 @@ +Collections: + - Name: PoolFormer + Metadata: + Training Data: ImageNet-1k + Architecture: + - Pooling + - 1x1 Convolution + - LayerScale + Paper: + URL: https://arxiv.org/abs/2111.11418 + Title: MetaFormer is Actually What You Need for Vision + README: configs/poolformer/README.md + Code: + Version: v0.22.1 + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.22.1/mmcls/models/backbones/poolformer.py + +Models: + - Name: poolformer-s12_3rdparty_32xb128_in1k + Metadata: + FLOPs: 1871399424 + Parameters: 11915176 + In Collection: PoolFormer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 77.24 + Top 5 Accuracy: 93.51 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-s12_3rdparty_32xb128_in1k_20220414-f8d83051.pth + Config: configs/poolformer/poolformer-s12_32xb128_in1k.py + Converted From: + Weights: https://github.com/sail-sg/poolformer/releases/download/v1.0/poolformer_s12.pth.tar + Code: https://github.com/sail-sg/poolformer + - Name: poolformer-s24_3rdparty_32xb128_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 3510411008 + Parameters: 21388968 + In Collection: PoolFormer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 80.33 + Top 5 Accuracy: 95.05 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-s24_3rdparty_32xb128_in1k_20220414-d7055904.pth + Config: configs/poolformer/poolformer-s24_32xb128_in1k.py + Converted From: + Weights: https://github.com/sail-sg/poolformer/releases/download/v1.0/poolformer_s24.pth.tar + Code: https://github.com/sail-sg/poolformer + - Name: poolformer-s36_3rdparty_32xb128_in1k + Metadata: + FLOPs: 5149422592 + Parameters: 30862760 + In Collection: PoolFormer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.43 + Top 5 Accuracy: 95.45 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-s36_3rdparty_32xb128_in1k_20220414-d78ff3e8.pth + Config: configs/poolformer/poolformer-s36_32xb128_in1k.py + Converted From: + Weights: https://github.com/sail-sg/poolformer/releases/download/v1.0/poolformer_s36.pth.tar + Code: https://github.com/sail-sg/poolformer + - Name: poolformer-m36_3rdparty_32xb128_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 8960175744 + Parameters: 56172520 + In Collection: PoolFormer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.14 + Top 5 Accuracy: 95.71 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-m36_3rdparty_32xb128_in1k_20220414-c55e0949.pth + Config: configs/poolformer/poolformer-m36_32xb128_in1k.py + Converted From: + Weights: https://github.com/sail-sg/poolformer/releases/download/v1.0/poolformer_m36.pth.tar + Code: https://github.com/sail-sg/poolformer + - Name: poolformer-m48_3rdparty_32xb128_in1k + Metadata: + FLOPs: 11801805696 + Parameters: 73473448 + In Collection: PoolFormer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.51 + Top 5 Accuracy: 95.95 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-m48_3rdparty_32xb128_in1k_20220414-9378f3eb.pth + Config: configs/poolformer/poolformer-m48_32xb128_in1k.py + Converted From: + Weights: https://github.com/sail-sg/poolformer/releases/download/v1.0/poolformer_m48.pth.tar + Code: https://github.com/sail-sg/poolformer diff --git a/configs/poolformer/poolformer-m36_32xb128_in1k.py b/configs/poolformer/poolformer-m36_32xb128_in1k.py new file mode 100644 index 0000000..13065b8 --- /dev/null +++ b/configs/poolformer/poolformer-m36_32xb128_in1k.py @@ -0,0 +1,17 @@ +_base_ = [ + '../_base_/models/poolformer/poolformer_m36.py', + '../_base_/datasets/imagenet_bs128_poolformer_medium_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# schedule settings +optim_wrapper = dict( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/poolformer/poolformer-m48_32xb128_in1k.py b/configs/poolformer/poolformer-m48_32xb128_in1k.py new file mode 100644 index 0000000..2078df3 --- /dev/null +++ b/configs/poolformer/poolformer-m48_32xb128_in1k.py @@ -0,0 +1,17 @@ +_base_ = [ + '../_base_/models/poolformer/poolformer_m48.py', + '../_base_/datasets/imagenet_bs128_poolformer_medium_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# schedule settings +optim_wrapper = dict( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/poolformer/poolformer-s12_32xb128_in1k.py b/configs/poolformer/poolformer-s12_32xb128_in1k.py new file mode 100644 index 0000000..7cf4a63 --- /dev/null +++ b/configs/poolformer/poolformer-s12_32xb128_in1k.py @@ -0,0 +1,17 @@ +_base_ = [ + '../_base_/models/poolformer/poolformer_s12.py', + '../_base_/datasets/imagenet_bs128_poolformer_small_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# schedule settings +optim_wrapper = dict( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/poolformer/poolformer-s24_32xb128_in1k.py b/configs/poolformer/poolformer-s24_32xb128_in1k.py new file mode 100644 index 0000000..ffb2482 --- /dev/null +++ b/configs/poolformer/poolformer-s24_32xb128_in1k.py @@ -0,0 +1,17 @@ +_base_ = [ + '../_base_/models/poolformer/poolformer_s24.py', + '../_base_/datasets/imagenet_bs128_poolformer_small_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# schedule settings +optim_wrapper = dict( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/poolformer/poolformer-s36_32xb128_in1k.py b/configs/poolformer/poolformer-s36_32xb128_in1k.py new file mode 100644 index 0000000..842dab3 --- /dev/null +++ b/configs/poolformer/poolformer-s36_32xb128_in1k.py @@ -0,0 +1,17 @@ +_base_ = [ + '../_base_/models/poolformer/poolformer_s36.py', + '../_base_/datasets/imagenet_bs128_poolformer_small_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# schedule settings +optim_wrapper = dict( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/regnet/README.md b/configs/regnet/README.md new file mode 100644 index 0000000..63031f4 --- /dev/null +++ b/configs/regnet/README.md @@ -0,0 +1,88 @@ +# RegNet + +> [Designing Network Design Spaces](https://arxiv.org/abs/2003.13678) + + + +## Abstract + +In this work, we present a new network design paradigm. Our goal is to help advance the understanding of network design and discover design principles that generalize across settings. Instead of focusing on designing individual network instances, we design network design spaces that parametrize populations of networks. The overall process is analogous to classic manual design of networks, but elevated to the design space level. Using our methodology we explore the structure aspect of network design and arrive at a low-dimensional design space consisting of simple, regular networks that we call RegNet. The core insight of the RegNet parametrization is surprisingly simple: widths and depths of good networks can be explained by a quantized linear function. We analyze the RegNet design space and arrive at interesting findings that do not match the current practice of network design. The RegNet design space provides simple and fast networks that work well across a wide range of flop regimes. Under comparable training settings and flops, the RegNet models outperform the popular EfficientNet models while being up to 5x faster on GPUs. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('regnetx-400mf_8xb128_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('regnetx-400mf_8xb128_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/regnet/regnetx-400mf_8xb128_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/regnet/regnetx-400mf_8xb128_in1k.py https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-400mf_8xb128_in1k_20211213-89bfc226.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :-------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :------------------------------------: | :------------------------------------------------------------------------------------: | +| `regnetx-400mf_8xb128_in1k` | From scratch | 5.16 | 0.41 | 72.56 | 90.78 | [config](regnetx-400mf_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-400mf_8xb128_in1k_20211213-89bfc226.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-400mf_8xb128_in1k_20211208_143316.json) | +| `regnetx-800mf_8xb128_in1k` | From scratch | 7.26 | 0.81 | 74.76 | 92.32 | [config](regnetx-800mf_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-800mf_8xb128_in1k_20211213-222b0f11.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-800mf_8xb128_in1k_20211207_143037.log.json) | +| `regnetx-1.6gf_8xb128_in1k` | From scratch | 9.19 | 1.63 | 76.84 | 93.31 | [config](regnetx-1.6gf_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-1.6gf_8xb128_in1k_20211213-d1b89758.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-1.6gf_8xb128_in1k_20211208_143018.log.json) | +| `regnetx-3.2gf_8xb64_in1k` | From scratch | 3.21 | 1.53 | 78.09 | 94.08 | [config](regnetx-3.2gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-3.2gf_8xb64_in1k_20211213-1fdd82ae.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-3.2gf_8xb64_in1k_20211208_142720.log.json) | +| `regnetx-4.0gf_8xb64_in1k` | From scratch | 22.12 | 4.00 | 78.60 | 94.17 | [config](regnetx-4.0gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-4.0gf_8xb64_in1k_20211213-efed675c.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-4.0gf_8xb64_in1k_20211207_150431.log.json) | +| `regnetx-6.4gf_8xb64_in1k` | From scratch | 26.21 | 6.51 | 79.38 | 94.65 | [config](regnetx-6.4gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-6.4gf_8xb64_in1k_20211215-5c6089da.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-6.4gf_8xb64_in1k_20211213_172748.log.json) | +| `regnetx-8.0gf_8xb64_in1k` | From scratch | 39.57 | 8.03 | 79.12 | 94.51 | [config](regnetx-8.0gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-8.0gf_8xb64_in1k_20211213-9a9fcc76.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-8.0gf_8xb64_in1k_20211208_103250.log.json) | +| `regnetx-12gf_8xb64_in1k` | From scratch | 46.11 | 12.15 | 79.67 | 95.03 | [config](regnetx-12gf_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-12gf_8xb64_in1k_20211213-5df8c2f8.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-12gf_8xb64_in1k_20211208_143713.log.json) | + +## Citation + +```bibtex +@article{radosavovic2020designing, + title={Designing Network Design Spaces}, + author={Ilija Radosavovic and Raj Prateek Kosaraju and Ross Girshick and Kaiming He and Piotr Dollár}, + year={2020}, + eprint={2003.13678}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` diff --git a/configs/regnet/metafile.yml b/configs/regnet/metafile.yml new file mode 100644 index 0000000..4796a9f --- /dev/null +++ b/configs/regnet/metafile.yml @@ -0,0 +1,122 @@ +Collections: + - Name: RegNet + Metadata: + Training Data: ImageNet-1k + Architecture: + - Neural Architecture Search + - Design Space Design + - Precise BN + - SGD with nesterov + Paper: + URL: https://arxiv.org/abs/2003.13678 + Title: Designing Network Design Spaces + README: configs/regnet/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.18.0/mmcls/models/backbones/regnet.py + Version: v0.18.0 + +Models: + - Name: regnetx-400mf_8xb128_in1k + In Collection: RegNet + Config: configs/regnet/regnetx-400mf_8xb128_in1k.py + Metadata: + FLOPs: 410000000 # 0.41G + Parameters: 5160000 # 5.16M + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 72.56 + Top 5 Accuracy: 90.78 + Weights: https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-400mf_8xb128_in1k_20211213-89bfc226.pth + - Name: regnetx-800mf_8xb128_in1k + In Collection: RegNet + Config: configs/regnet/regnetx-800mf_8xb128_in1k.py + Metadata: + FLOPs: 810000000 # 0.81G + Parameters: 7260000 # 7.26M + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 74.76 + Top 5 Accuracy: 92.32 + Weights: https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-800mf_8xb128_in1k_20211213-222b0f11.pth + - Name: regnetx-1.6gf_8xb128_in1k + In Collection: RegNet + Config: configs/regnet/regnetx-1.6gf_8xb128_in1k.py + Metadata: + FLOPs: 1630000000 # 1.63G + Parameters: 9190000 # 9.19M + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 76.84 + Top 5 Accuracy: 93.31 + Weights: https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-1.6gf_8xb128_in1k_20211213-d1b89758.pth + - Name: regnetx-3.2gf_8xb64_in1k + In Collection: RegNet + Config: configs/regnet/regnetx-3.2gf_8xb64_in1k.py + Metadata: + FLOPs: 1530000000 # 1.53G + Parameters: 3210000 # 32.1M + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 78.09 + Top 5 Accuracy: 94.08 + Weights: https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-3.2gf_8xb64_in1k_20211213-1fdd82ae.pth + - Name: regnetx-4.0gf_8xb64_in1k + In Collection: RegNet + Config: configs/regnet/regnetx-4.0gf_8xb64_in1k.py + Metadata: + FLOPs: 4000000000 # 4G + Parameters: 22120000 # 22.12M + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 78.60 + Top 5 Accuracy: 94.17 + Weights: https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-4.0gf_8xb64_in1k_20211213-efed675c.pth + - Name: regnetx-6.4gf_8xb64_in1k + In Collection: RegNet + Config: configs/regnet/regnetx-6.4gf_8xb64_in1k.py + Metadata: + FLOPs: 6510000000 # 6.51G + Parameters: 26210000 # 26.21M + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 79.38 + Top 5 Accuracy: 94.65 + Weights: https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-6.4gf_8xb64_in1k_20211215-5c6089da.pth + - Name: regnetx-8.0gf_8xb64_in1k + In Collection: RegNet + Config: configs/regnet/regnetx-8.0gf_8xb64_in1k.py + Metadata: + FLOPs: 8030000000 # 8.03G + Parameters: 39570000 # 39.57M + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 79.12 + Top 5 Accuracy: 94.51 + Weights: https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-8.0gf_8xb64_in1k_20211213-9a9fcc76.pth + - Name: regnetx-12gf_8xb64_in1k + In Collection: RegNet + Config: configs/regnet/regnetx-12gf_8xb64_in1k.py + Metadata: + FLOPs: 12150000000 # 12.15G + Parameters: 46110000 # 46.11M + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 79.67 + Top 5 Accuracy: 95.03 + Weights: https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-12gf_8xb64_in1k_20211213-5df8c2f8.pth diff --git a/configs/regnet/regnetx-1.6gf_8xb128_in1k.py b/configs/regnet/regnetx-1.6gf_8xb128_in1k.py new file mode 100644 index 0000000..d3e9e93 --- /dev/null +++ b/configs/regnet/regnetx-1.6gf_8xb128_in1k.py @@ -0,0 +1,6 @@ +_base_ = ['./regnetx-400mf_8xb128_in1k.py'] + +# model settings +model = dict( + backbone=dict(type='RegNet', arch='regnetx_1.6gf'), + head=dict(in_channels=912, )) diff --git a/configs/regnet/regnetx-12gf_8xb64_in1k.py b/configs/regnet/regnetx-12gf_8xb64_in1k.py new file mode 100644 index 0000000..7a2c0b5 --- /dev/null +++ b/configs/regnet/regnetx-12gf_8xb64_in1k.py @@ -0,0 +1,18 @@ +_base_ = ['./regnetx-400mf_8xb128_in1k.py'] + +# model settings +model = dict( + backbone=dict(type='RegNet', arch='regnetx_12gf'), + head=dict(in_channels=2240, )) + +# dataset settings +train_dataloader = dict(batch_size=64) + +# schedule settings +# for batch_size 512, use lr = 0.4 +optim_wrapper = dict(optimizer=dict(lr=0.4)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (8 GPUs) x (64 samples per GPU) +auto_scale_lr = dict(base_batch_size=512) diff --git a/configs/regnet/regnetx-3.2gf_8xb64_in1k.py b/configs/regnet/regnetx-3.2gf_8xb64_in1k.py new file mode 100644 index 0000000..a78478d --- /dev/null +++ b/configs/regnet/regnetx-3.2gf_8xb64_in1k.py @@ -0,0 +1,18 @@ +_base_ = ['./regnetx-400mf_8xb128_in1k.py'] + +# model settings +model = dict( + backbone=dict(type='RegNet', arch='regnetx_3.2gf'), + head=dict(in_channels=1008, )) + +# dataset settings +train_dataloader = dict(batch_size=64) + +# schedule settings +# for batch_size 512, use lr = 0.4 +optim_wrapper = dict(optimizer=dict(lr=0.4)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (8 GPUs) x (64 samples per GPU) +auto_scale_lr = dict(base_batch_size=512) diff --git a/configs/regnet/regnetx-4.0gf_8xb64_in1k.py b/configs/regnet/regnetx-4.0gf_8xb64_in1k.py new file mode 100644 index 0000000..dfc241f --- /dev/null +++ b/configs/regnet/regnetx-4.0gf_8xb64_in1k.py @@ -0,0 +1,18 @@ +_base_ = ['./regnetx-400mf_8xb128_in1k.py'] + +# model settings +model = dict( + backbone=dict(type='RegNet', arch='regnetx_4.0gf'), + head=dict(in_channels=1360, )) + +# dataset settings +train_dataloader = dict(batch_size=64) + +# schedule settings +# for batch_size 512, use lr = 0.4 +optim_wrapper = dict(optimizer=dict(lr=0.4)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (8 GPUs) x (64 samples per GPU) +auto_scale_lr = dict(base_batch_size=512) diff --git a/configs/regnet/regnetx-400mf_8xb128_in1k.py b/configs/regnet/regnetx-400mf_8xb128_in1k.py new file mode 100644 index 0000000..bad1678 --- /dev/null +++ b/configs/regnet/regnetx-400mf_8xb128_in1k.py @@ -0,0 +1,58 @@ +_base_ = [ + '../_base_/models/regnet/regnetx_400mf.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs1024_coslr.py', + '../_base_/default_runtime.py' +] + +# dataset settings +data_preprocessor = dict( + # BGR format normalization parameters + mean=[103.53, 116.28, 123.675], + std=[57.375, 57.12, 58.395], + to_rgb=False, # The checkpoints from PyCls requires BGR format inputs. +) + +# lighting params, in order of BGR, from repo. pycls +EIGVAL = [0.2175, 0.0188, 0.0045] +EIGVEC = [ + [-0.5836, -0.6948, 0.4203], + [-0.5808, -0.0045, -0.814], + [-0.5675, 0.7192, 0.4009], +] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=224), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='Lighting', + eigval=EIGVAL, + eigvec=EIGVEC, + alphastd=25.5, # because the value range of images is [0,255] + to_rgb=False), + dict(type='PackInputs'), +] + +train_dataloader = dict(batch_size=128, dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(batch_size=128) +test_dataloader = dict(batch_size=128) + +# schedule settings + +# sgd with nesterov, base ls is 0.8 for batch_size 1024, +optim_wrapper = dict(optimizer=dict(lr=0.8, nesterov=True)) + +# runtime settings + +# Precise BN hook will update the bn stats, so this hook should be executed +# before CheckpointHook(priority of 'VERY_LOW') and +# EMAHook(priority of 'NORMAL') So set the priority of PreciseBNHook to +# 'ABOVENORMAL' here. +custom_hooks = [ + dict( + type='PreciseBNHook', + num_samples=8192, + interval=1, + priority='ABOVE_NORMAL') +] diff --git a/configs/regnet/regnetx-6.4gf_8xb64_in1k.py b/configs/regnet/regnetx-6.4gf_8xb64_in1k.py new file mode 100644 index 0000000..edb1fb8 --- /dev/null +++ b/configs/regnet/regnetx-6.4gf_8xb64_in1k.py @@ -0,0 +1,18 @@ +_base_ = ['./regnetx-400mf_8xb128_in1k.py'] + +# model settings +model = dict( + backbone=dict(type='RegNet', arch='regnetx_6.4gf'), + head=dict(in_channels=1624, )) + +# dataset settings +train_dataloader = dict(batch_size=64) + +# schedule settings +# for batch_size 512, use lr = 0.4 +optim_wrapper = dict(optimizer=dict(lr=0.4)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (8 GPUs) x (64 samples per GPU) +auto_scale_lr = dict(base_batch_size=512) diff --git a/configs/regnet/regnetx-8.0gf_8xb64_in1k.py b/configs/regnet/regnetx-8.0gf_8xb64_in1k.py new file mode 100644 index 0000000..04b75bb --- /dev/null +++ b/configs/regnet/regnetx-8.0gf_8xb64_in1k.py @@ -0,0 +1,18 @@ +_base_ = ['./regnetx-400mf_8xb128_in1k.py'] + +# model settings +model = dict( + backbone=dict(type='RegNet', arch='regnetx_8.0gf'), + head=dict(in_channels=1920, )) + +# dataset settings +train_dataloader = dict(batch_size=64) + +# schedule settings +# for batch_size 512, use lr = 0.4 +optim_wrapper = dict(optimizer=dict(lr=0.4)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (8 GPUs) x (64 samples per GPU) +auto_scale_lr = dict(base_batch_size=512) diff --git a/configs/regnet/regnetx-800mf_8xb128_in1k.py b/configs/regnet/regnetx-800mf_8xb128_in1k.py new file mode 100644 index 0000000..9cd7137 --- /dev/null +++ b/configs/regnet/regnetx-800mf_8xb128_in1k.py @@ -0,0 +1,6 @@ +_base_ = ['./regnetx-400mf_8xb128_in1k.py'] + +# model settings +model = dict( + backbone=dict(type='RegNet', arch='regnetx_800mf'), + head=dict(in_channels=672, )) diff --git a/configs/replknet/README.md b/configs/replknet/README.md new file mode 100644 index 0000000..3d312f2 --- /dev/null +++ b/configs/replknet/README.md @@ -0,0 +1,108 @@ +# RepLKNet + +> [Scaling Up Your Kernels to 31x31: Revisiting Large Kernel Design in CNNs](https://arxiv.org/abs/2203.06717) + + + +## Abstract + +We revisit large kernel design in modern convolutional neural networks (CNNs). Inspired by recent advances in vision transformers (ViTs), in this paper, we demonstrate that using a few large convolutional kernels instead of a stack of small kernels could be a more powerful paradigm. We suggested five guidelines, e.g., applying re-parameterized large depth-wise convolutions, to design efficient highperformance large-kernel CNNs. Following the guidelines, we propose RepLKNet, a pure CNN architecture whose kernel size is as large as 31×31, in contrast to commonly used 3×3. RepLKNet greatly closes the performance gap between CNNs and ViTs, e.g., achieving comparable or superior results than Swin Transformer on ImageNet and a few typical downstream tasks, with lower latency. RepLKNet also shows nice scalability to big data and large models, obtaining 87.8% top-1 accuracy on ImageNet and 56.0% mIoU on ADE20K, which is very competitive among the state-of-the-arts with similar model sizes. Our study further reveals that, in contrast to small-kernel CNNs, large kernel CNNs have much larger effective receptive fields and higher shape bias rather than texture bias. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model, get_model + +model = get_model('replknet-31B_3rdparty_in1k', pretrained=True) +model.backbone.switch_to_deploy() +predict = inference_model(model, 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('replknet-31B_3rdparty_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/replknet/replknet-31B_32xb64_in1k.py https://download.openmmlab.com/mmclassification/v0/replknet/replknet-31B_3rdparty_in1k_20221118-fd08e268.pth +``` + +**Reparameterization** + +The checkpoints provided are all `training-time` models. Use the reparameterize tool to switch them to more efficient `inference-time` architecture, which not only has fewer parameters but also less calculations. + +```bash +python tools/convert_models/reparameterize_model.py ${CFG_PATH} ${SRC_CKPT_PATH} ${TARGET_CKPT_PATH} +``` + +`${CFG_PATH}` is the config file, `${SRC_CKPT_PATH}` is the source chenpoint file, `${TARGET_CKPT_PATH}` is the target deploy weight file path. + +To use reparameterized weights, the config file must switch to the deploy config files. + +```bash +python tools/test.py ${deploy_cfg} ${deploy_checkpoint} --metrics accuracy +``` + +You can also use `backbone.switch_to_deploy()` to switch to the deploy mode in Python code. For example: + +```python +from mmpretrain.models import RepLKNet + +backbone = RepLKNet(arch='31B') +backbone.switch_to_deploy() +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :--------------------------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :-----------------------------------------: | :------------------------------------------------------------: | +| `replknet-31B_3rdparty_in1k`\* | From scratch | 79.86 | 15.64 | 83.48 | 96.57 | [config](replknet-31B_32xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/replknet/replknet-31B_3rdparty_in1k_20221118-fd08e268.pth) | +| `replknet-31B_3rdparty_in1k-384px`\* | From scratch | 79.86 | 45.95 | 84.84 | 97.34 | [config](replknet-31B_32xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/replknet/replknet-31B_3rdparty_in1k-384px_20221118-03a170ce.pth) | +| `replknet-31B_in21k-pre_3rdparty_in1k`\* | ImageNet-21k | 79.86 | 15.64 | 85.20 | 97.56 | [config](replknet-31B_32xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/replknet/replknet-31B_in21k-pre_3rdparty_in1k_20221118-54ed5c46.pth) | +| `replknet-31B_in21k-pre_3rdparty_in1k-384px`\* | ImageNet-21k | 79.86 | 45.95 | 85.99 | 97.75 | [config](replknet-31B_32xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/replknet/replknet-31B_in21k-pre_3rdparty_in1k-384px_20221118-76c92b24.pth) | +| `replknet-31L_in21k-pre_3rdparty_in1k-384px`\* | ImageNet-21k | 172.67 | 97.24 | 86.63 | 98.00 | [config](replknet-31L_32xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/replknet/replknet-31L_in21k-pre_3rdparty_in1k-384px_20221118-dc3fc07c.pth) | +| `replknet-XL_meg73m-pre_3rdparty_in1k-320px`\* | MEG73M | 335.44 | 129.57 | 87.57 | 98.39 | [config](replknet-XL_32xb64_in1k-320px.py) | [model](https://download.openmmlab.com/mmclassification/v0/replknet/replknet-XL_meg73m-pre_3rdparty_in1k-320px_20221118-88259b1d.pth) | + +*Models with * are converted from the [official repo](https://github.com/DingXiaoH/RepLKNet-pytorch/blob/main/replknet.py). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@inproceedings{ding2022scaling, + title={Scaling up your kernels to 31x31: Revisiting large kernel design in cnns}, + author={Ding, Xiaohan and Zhang, Xiangyu and Han, Jungong and Ding, Guiguang}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={11963--11975}, + year={2022} +} +``` diff --git a/configs/replknet/deploy/replknet-31B-deploy_32xb64_in1k-384px.py b/configs/replknet/deploy/replknet-31B-deploy_32xb64_in1k-384px.py new file mode 100644 index 0000000..a14fe63 --- /dev/null +++ b/configs/replknet/deploy/replknet-31B-deploy_32xb64_in1k-384px.py @@ -0,0 +1,3 @@ +_base_ = '../replknet-31B_32xb64_in1k-384px.py' + +model = dict(backbone=dict(small_kernel_merged=True)) diff --git a/configs/replknet/deploy/replknet-31B-deploy_32xb64_in1k.py b/configs/replknet/deploy/replknet-31B-deploy_32xb64_in1k.py new file mode 100644 index 0000000..4f92c49 --- /dev/null +++ b/configs/replknet/deploy/replknet-31B-deploy_32xb64_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../replknet-31B_32xb64_in1k.py' + +model = dict(backbone=dict(small_kernel_merged=True)) diff --git a/configs/replknet/deploy/replknet-31L-deploy_32xb64_in1k-384px.py b/configs/replknet/deploy/replknet-31L-deploy_32xb64_in1k-384px.py new file mode 100644 index 0000000..63e590f --- /dev/null +++ b/configs/replknet/deploy/replknet-31L-deploy_32xb64_in1k-384px.py @@ -0,0 +1,3 @@ +_base_ = '../replknet-31L_32xb64_in1k-384px.py' + +model = dict(backbone=dict(small_kernel_merged=True)) diff --git a/configs/replknet/deploy/replknet-XL-deploy_32xb64_in1k-320px.py b/configs/replknet/deploy/replknet-XL-deploy_32xb64_in1k-320px.py new file mode 100644 index 0000000..a0a8ed5 --- /dev/null +++ b/configs/replknet/deploy/replknet-XL-deploy_32xb64_in1k-320px.py @@ -0,0 +1,3 @@ +_base_ = '../replknet-XL_32xb64_in1k-320px.py' + +model = dict(backbone=dict(small_kernel_merged=True)) diff --git a/configs/replknet/metafile.yml b/configs/replknet/metafile.yml new file mode 100644 index 0000000..f9f3744 --- /dev/null +++ b/configs/replknet/metafile.yml @@ -0,0 +1,129 @@ +Collections: + - Name: RepLKNet + Metadata: + Training Data: ImageNet-1k + Architecture: + - Large-Kernel Convolution + - VGG-style Neural Network + Paper: + URL: https://arxiv.org/abs/2203.06717 + Title: 'Scaling Up Your Kernels to 31x31: Revisiting Large Kernel Design in CNNs' + README: configs/replknet/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/v1.0.0rc3/mmcls/models/backbones/replknet.py + Version: v1.0.0rc3 + +Models: + - Name: replknet-31B_3rdparty_in1k + In Collection: RepLKNet + Config: configs/replknet/replknet-31B_32xb64_in1k.py + Metadata: + FLOPs: 15636547584 + Parameters: 79864168 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 83.48 + Top 5 Accuracy: 96.57 + Weights: https://download.openmmlab.com/mmclassification/v0/replknet/replknet-31B_3rdparty_in1k_20221118-fd08e268.pth + Converted From: + Weights: https://drive.google.com/u/0/uc?id=1azQUiCxK9feYVkkrPqwVPBtNsTzDrX7S&export=download + Code: https://github.com/DingXiaoH/RepLKNet-pytorch/blob/main/replknet.py + + - Name: replknet-31B_3rdparty_in1k-384px + In Collection: RepLKNet + Config: configs/replknet/replknet-31B_32xb64_in1k-384px.py + Metadata: + FLOPs: 45952303104 + Parameters: 79864168 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 84.84 + Top 5 Accuracy: 97.34 + Weights: https://download.openmmlab.com/mmclassification/v0/replknet/replknet-31B_3rdparty_in1k-384px_20221118-03a170ce.pth + Converted From: + Weights: https://drive.google.com/u/0/uc?id=1vo-P3XB6mRLUeDzmgv90dOu73uCeLfZN&export=download + Code: https://github.com/DingXiaoH/RepLKNet-pytorch/blob/main/replknet.py + + - Name: replknet-31B_in21k-pre_3rdparty_in1k + In Collection: RepLKNet + Config: configs/replknet/replknet-31B_32xb64_in1k.py + Metadata: + Training Data: + - ImageNet-21k + - ImageNet-1k + FLOPs: 15636547584 + Parameters: 79864168 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 85.20 + Top 5 Accuracy: 97.56 + Weights: https://download.openmmlab.com/mmclassification/v0/replknet/replknet-31B_in21k-pre_3rdparty_in1k_20221118-54ed5c46.pth + Converted From: + Weights: https://drive.google.com/u/0/uc?id=1DslZ2voXZQR1QoFY9KnbsHAeF84hzS0s&export=download + Code: https://github.com/DingXiaoH/RepLKNet-pytorch/blob/main/replknet.py + + - Name: replknet-31B_in21k-pre_3rdparty_in1k-384px + In Collection: RepLKNet + Config: configs/replknet/replknet-31B_32xb64_in1k-384px.py + Metadata: + Training Data: + - ImageNet-21k + - ImageNet-1k + FLOPs: 45952303104 + Parameters: 79864168 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 85.99 + Top 5 Accuracy: 97.75 + Weights: https://download.openmmlab.com/mmclassification/v0/replknet/replknet-31B_in21k-pre_3rdparty_in1k-384px_20221118-76c92b24.pth + Converted From: + Weights: https://drive.google.com/u/0/uc?id=1Sc46BWdXXm2fVP-K_hKKU_W8vAB-0duX&export=download + Code: https://github.com/DingXiaoH/RepLKNet-pytorch/blob/main/replknet.py + + - Name: replknet-31L_in21k-pre_3rdparty_in1k-384px + In Collection: RepLKNet + Config: configs/replknet/replknet-31L_32xb64_in1k-384px.py + Metadata: + Training Data: + - ImageNet-21k + - ImageNet-1k + FLOPs: 97240006656 + Parameters: 172671016 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 86.63 + Top 5 Accuracy: 98.00 + Weights: https://download.openmmlab.com/mmclassification/v0/replknet/replknet-31L_in21k-pre_3rdparty_in1k-384px_20221118-dc3fc07c.pth + Converted From: + Weights: https://drive.google.com/u/0/uc?id=1JYXoNHuRvC33QV1pmpzMTKEni1hpWfBl&export=download + Code: https://github.com/DingXiaoH/RepLKNet-pytorch/blob/main/replknet.py + + - Name: replknet-XL_meg73m-pre_3rdparty_in1k-320px + In Collection: RepLKNet + Config: configs/replknet/replknet-XL_32xb64_in1k-320px.py + Metadata: + Training Data: + - MegData-73M + - ImageNet-1k + FLOPs: 129570201600 + Parameters: 335435752 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 87.57 + Top 5 Accuracy: 98.39 + Weights: https://download.openmmlab.com/mmclassification/v0/replknet/replknet-XL_meg73m-pre_3rdparty_in1k-320px_20221118-88259b1d.pth + Converted From: + Weights: https://drive.google.com/u/0/uc?id=1tPC60El34GntXByIRHb-z-Apm4Y5LX1T&export=download + Code: https://github.com/DingXiaoH/RepLKNet-pytorch/blob/main/replknet.py diff --git a/configs/replknet/replknet-31B_32xb64_in1k-384px.py b/configs/replknet/replknet-31B_32xb64_in1k-384px.py new file mode 100644 index 0000000..4e714f3 --- /dev/null +++ b/configs/replknet/replknet-31B_32xb64_in1k-384px.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/replknet-31B_in1k.py', + '../_base_/datasets/imagenet_bs16_pil_bicubic_384.py', + '../_base_/schedules/imagenet_bs256_coslr.py', + '../_base_/default_runtime.py' +] + +# schedule settings +param_scheduler = dict( + type='CosineAnnealingLR', T_max=300, by_epoch=True, begin=0, end=300) + +train_cfg = dict(by_epoch=True, max_epochs=300) diff --git a/configs/replknet/replknet-31B_32xb64_in1k.py b/configs/replknet/replknet-31B_32xb64_in1k.py new file mode 100644 index 0000000..cf06f2d --- /dev/null +++ b/configs/replknet/replknet-31B_32xb64_in1k.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/replknet-31B_in1k.py', + '../_base_/datasets/imagenet_bs32_pil_bicubic.py', + '../_base_/schedules/imagenet_bs256_coslr.py', + '../_base_/default_runtime.py' +] + +# schedule settings +param_scheduler = dict( + type='CosineAnnealingLR', T_max=300, by_epoch=True, begin=0, end=300) + +train_cfg = dict(by_epoch=True, max_epochs=300) diff --git a/configs/replknet/replknet-31L_32xb64_in1k-384px.py b/configs/replknet/replknet-31L_32xb64_in1k-384px.py new file mode 100644 index 0000000..8cdab24 --- /dev/null +++ b/configs/replknet/replknet-31L_32xb64_in1k-384px.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/replknet-31L_in1k.py', + '../_base_/datasets/imagenet_bs16_pil_bicubic_384.py', + '../_base_/schedules/imagenet_bs256_coslr.py', + '../_base_/default_runtime.py' +] + +# schedule settings +param_scheduler = dict( + type='CosineAnnealingLR', T_max=300, by_epoch=True, begin=0, end=300) + +train_cfg = dict(by_epoch=True, max_epochs=300) diff --git a/configs/replknet/replknet-XL_32xb64_in1k-320px.py b/configs/replknet/replknet-XL_32xb64_in1k-320px.py new file mode 100644 index 0000000..9b0aab1 --- /dev/null +++ b/configs/replknet/replknet-XL_32xb64_in1k-320px.py @@ -0,0 +1,12 @@ +_base_ = [ + '../_base_/models/replknet-XL_in1k.py', + '../_base_/datasets/imagenet_bs8_pil_bicubic_320.py', + '../_base_/schedules/imagenet_bs256_coslr.py', + '../_base_/default_runtime.py' +] + +# schedule settings +param_scheduler = dict( + type='CosineAnnealingLR', T_max=300, by_epoch=True, begin=0, end=300) + +train_cfg = dict(by_epoch=True, max_epochs=300) diff --git a/configs/repmlp/README.md b/configs/repmlp/README.md new file mode 100644 index 0000000..41dfa23 --- /dev/null +++ b/configs/repmlp/README.md @@ -0,0 +1,103 @@ +# RepMLP + +> [RepMLP: Re-parameterizing Convolutions into Fully-connected Layers for Image Recognition](https://arxiv.org/abs/2105.01883) + + + +## Abstract + +We propose RepMLP, a multi-layer-perceptron-style neural network building block for image recognition, which is composed of a series of fully-connected (FC) layers. Compared to convolutional layers, FC layers are more efficient, better at modeling the long-range dependencies and positional patterns, but worse at capturing the local structures, hence usually less favored for image recognition. We propose a structural re-parameterization technique that adds local prior into an FC to make it powerful for image recognition. Specifically, we construct convolutional layers inside a RepMLP during training and merge them into the FC for inference. On CIFAR, a simple pure-MLP model shows performance very close to CNN. By inserting RepMLP in traditional CNN, we improve ResNets by 1.8% accuracy on ImageNet, 2.9% for face recognition, and 2.3% mIoU on Cityscapes with lower FLOPs. Our intriguing findings highlight that combining the global representational capacity and positional perception of FC with the local prior of convolution can improve the performance of neural network with faster speed on both the tasks with translation invariance (e.g., semantic segmentation) and those with aligned images and positional patterns (e.g., face recognition). + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model, get_model + +model = get_model('repmlp-base_3rdparty_8xb64_in1k', pretrained=True) +model.backbone.switch_to_deploy() +predict = inference_model(model, 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('repmlp-base_3rdparty_8xb64_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/repmlp/repmlp-base_8xb64_in1k.py https://download.openmmlab.com/mmclassification/v0/repmlp/repmlp-base_3rdparty_8xb64_in1k_20220330-1cb1f11b.pth +``` + +**Reparameterization** + +The checkpoints provided are all `training-time` models. Use the reparameterize tool to switch them to more efficient `inference-time` architecture, which not only has fewer parameters but also less calculations. + +```bash +python tools/convert_models/reparameterize_model.py ${CFG_PATH} ${SRC_CKPT_PATH} ${TARGET_CKPT_PATH} +``` + +`${CFG_PATH}` is the config file, `${SRC_CKPT_PATH}` is the source chenpoint file, `${TARGET_CKPT_PATH}` is the target deploy weight file path. + +To use reparameterized weights, the config file must switch to the deploy config files. + +```bash +python tools/test.py ${deploy_cfg} ${deploy_checkpoint} --metrics accuracy +``` + +You can also use `backbone.switch_to_deploy()` to switch to the deploy mode in Python code. For example: + +```python +from mmpretrain.models import RepMLPNet + +backbone = RepMLPNet(arch='B', img_size=224, reparam_conv_kernels=(1, 3)) +backbone.switch_to_deploy() +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :---------------------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :---------------------------------------: | :-------------------------------------------------------------------: | +| `repmlp-base_3rdparty_8xb64_in1k`\* | From scratch | 68.24 | 6.71 | 80.41 | 95.14 | [config](repmlp-base_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repmlp/repmlp-base_3rdparty_8xb64_in1k_20220330-1cb1f11b.pth) | +| `repmlp-base_3rdparty_8xb64_in1k-256px`\* | From scratch | 96.45 | 9.69 | 81.11 | 95.50 | [config](repmlp-base_8xb64_in1k-256px.py) | [model](https://download.openmmlab.com/mmclassification/v0/repmlp/repmlp-base_3rdparty_8xb64_in1k-256px_20220330-7c5a91ce.pth) | + +*Models with * are converted from the [official repo](https://github.com/DingXiaoH/RepMLP/blob/072d8516beba83d75dfe6ebb12f625abad4b53d5/repmlpnet.py#L278). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@article{ding2021repmlp, + title={Repmlp: Re-parameterizing convolutions into fully-connected layers for image recognition}, + author={Ding, Xiaohan and Xia, Chunlong and Zhang, Xiangyu and Chu, Xiaojie and Han, Jungong and Ding, Guiguang}, + journal={arXiv preprint arXiv:2105.01883}, + year={2021} +} +``` diff --git a/configs/repmlp/metafile.yml b/configs/repmlp/metafile.yml new file mode 100644 index 0000000..7f391e0 --- /dev/null +++ b/configs/repmlp/metafile.yml @@ -0,0 +1,48 @@ +Collections: + - Name: RepMLP + Metadata: + Training Data: ImageNet-1k + Architecture: + - Multi-layer Perceptron + - Re-parameterization Convolution + Paper: + URL: https://arxiv.org/abs/2105.01883 + Title: 'RepMLP: Re-parameterizing Convolutions into Fully-connected Layers for Image Recognition' + README: configs/repmlp/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.21.0/mmcls/models/backbones/repmlp.py + Version: v0.21.0 + +Models: + - Name: repmlp-base_3rdparty_8xb64_in1k + In Collection: RepMLP + Config: configs/repmlp/repmlp-base_8xb64_in1k.py + Metadata: + FLOPs: 6710000000 # 6.71 G + Parameters: 68240000 # 68.24 M + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 80.41 + Top 5 Accuracy: 95.14 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/repmlp/repmlp-base_3rdparty_8xb64_in1k_20220330-1cb1f11b.pth + Converted From: + Weights: https://github.com/DingXiaoH/RepMLP + Code: https://github.com/DingXiaoH/RepMLP/blob/072d8516beba83d75dfe6ebb12f625abad4b53d5/repmlpnet.py#L274 + - Name: repmlp-base_3rdparty_8xb64_in1k-256px + In Collection: RepMLP + Config: configs/repmlp/repmlp-base_8xb64_in1k-256px.py + Metadata: + FLOPs: 9690000000 # 9.69 G + Parameters: 96450000 # 96.45M + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.11 + Top 5 Accuracy: 95.50 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/repmlp/repmlp-base_3rdparty_8xb64_in1k-256px_20220330-7c5a91ce.pth + Converted From: + Weights: https://github.com/DingXiaoH/RepMLP + Code: https://github.com/DingXiaoH/RepMLP/blob/072d8516beba83d75dfe6ebb12f625abad4b53d5/repmlpnet.py#L278 diff --git a/configs/repmlp/repmlp-base_8xb64_in1k-256px.py b/configs/repmlp/repmlp-base_8xb64_in1k-256px.py new file mode 100644 index 0000000..81dc55a --- /dev/null +++ b/configs/repmlp/repmlp-base_8xb64_in1k-256px.py @@ -0,0 +1,36 @@ +_base_ = [ + '../_base_/models/repmlp-base_224.py', + '../_base_/datasets/imagenet_bs64_pil_resize.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +# model settings +model = dict(backbone=dict(img_size=256)) + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=256), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='ResizeEdge', scale=292, edge='short', backend='pillow'), + dict(type='CenterCrop', crop_size=256), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# schedule settings +optim_wrapper = dict(clip_grad=dict(max_norm=1.0)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (8 GPUs) x (64 samples per GPU) +auto_scale_lr = dict(base_batch_size=512) diff --git a/configs/repmlp/repmlp-base_8xb64_in1k.py b/configs/repmlp/repmlp-base_8xb64_in1k.py new file mode 100644 index 0000000..666ce40 --- /dev/null +++ b/configs/repmlp/repmlp-base_8xb64_in1k.py @@ -0,0 +1,26 @@ +_base_ = [ + '../_base_/models/repmlp-base_224.py', + '../_base_/datasets/imagenet_bs64_pil_resize.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# dataset settings +test_pipeline = [ + dict(type='LoadImageFromFile'), + # resizing to (256, 256) here, different from resizing shorter edge to 256 + dict(type='Resize', scale=(256, 256), backend='pillow'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# schedule settings +optim_wrapper = dict(clip_grad=dict(max_norm=5.0)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (8 GPUs) x (64 samples per GPU) +auto_scale_lr = dict(base_batch_size=512) diff --git a/configs/repmlp/repmlp-base_delopy_8xb64_in1k.py b/configs/repmlp/repmlp-base_delopy_8xb64_in1k.py new file mode 100644 index 0000000..b5b2c88 --- /dev/null +++ b/configs/repmlp/repmlp-base_delopy_8xb64_in1k.py @@ -0,0 +1,3 @@ +_base_ = ['./repmlp-base_8xb64_in1k.py'] + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/repmlp/repmlp-base_deploy_8xb64_in1k-256px.py b/configs/repmlp/repmlp-base_deploy_8xb64_in1k-256px.py new file mode 100644 index 0000000..27ff50a --- /dev/null +++ b/configs/repmlp/repmlp-base_deploy_8xb64_in1k-256px.py @@ -0,0 +1,3 @@ +_base_ = ['./repmlp-base_8xb64_in1k-256px.py'] + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/repvgg/README.md b/configs/repvgg/README.md new file mode 100644 index 0000000..9a47f9d --- /dev/null +++ b/configs/repvgg/README.md @@ -0,0 +1,142 @@ +# RepVGG + +> [RepVGG: Making VGG-style ConvNets Great Again](https://arxiv.org/abs/2101.03697) + + + +## Introduction + +RepVGG is a VGG-style convolutional architecture. It has the following advantages: + +1. The model has a VGG-like plain (a.k.a. feed-forward) topology 1 without any branches. I.e., every layer takes the output of its only preceding layer as input and feeds the output into its only following layer. +2. The model’s body uses only 3 × 3 conv and ReLU. +3. The concrete architecture (including the specific depth and layer widths) is instantiated with no automatic search, manual refinement, compound scaling, nor other heavy designs. + +
+ +
+ +## Abstract + +
+ +Show the paper's abstract + +
+We present a simple but powerful architecture of convolutional neural network, which has a VGG-like inference-time body composed of nothing but a stack of 3x3 convolution and ReLU, while the training-time model has a multi-branch topology. Such decoupling of the training-time and inference-time architecture is realized by a structural re-parameterization technique so that the model is named RepVGG. On ImageNet, RepVGG reaches over 80% top-1 accuracy, which is the first time for a plain model, to the best of our knowledge. On NVIDIA 1080Ti GPU, RepVGG models run 83% faster than ResNet-50 or 101% faster than ResNet-101 with higher accuracy and show favorable accuracy-speed trade-off compared to the state-of-the-art models like EfficientNet and RegNet. +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model, get_model + +model = get_model('repvgg-A0_8xb32_in1k', pretrained=True) +model.backbone.switch_to_deploy() +predict = inference_model(model, 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('repvgg-A0_8xb32_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/repvgg/repvgg-A0_8xb32_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/repvgg/repvgg-A0_8xb32_in1k.py https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A0_8xb32_in1k_20221213-60ae8e23.pth +``` + +Test with reparameterized model: + +```shell +python tools/test.py configs/repvgg/repvgg-A0_8xb32_in1k.py repvgg_A0_deploy.pth --cfg-options model.backbone.deploy=True +``` + +**Reparameterization** + +The checkpoints provided are all `training-time` models. Use the reparameterize tool to switch them to more efficient `inference-time` architecture, which not only has fewer parameters but also less calculations. + +```bash +python tools/convert_models/reparameterize_model.py ${CFG_PATH} ${SRC_CKPT_PATH} ${TARGET_CKPT_PATH} +``` + +`${CFG_PATH}` is the config file, `${SRC_CKPT_PATH}` is the source chenpoint file, `${TARGET_CKPT_PATH}` is the target deploy weight file path. + +To use reparameterized weights, the config file must switch to the deploy config files. + +```bash +python tools/test.py ${deploy_cfg} ${deploy_checkpoint} --metrics accuracy +``` + +You can also use `backbone.switch_to_deploy()` to switch to the deploy mode in Python code. For example: + +```python +from mmpretrain.models import RepVGG + +backbone = RepVGG(arch='A0') +backbone.switch_to_deploy() +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :---------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :---------------------------------: | :-------------------------------------------------------------------------------------: | +| `repvgg-A0_8xb32_in1k` | From scratch | 8.31 | 1.36 | 72.37 | 90.56 | [config](repvgg-A0_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A0_8xb32_in1k_20221213-60ae8e23.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A0_8xb32_in1k_20221213-60ae8e23.log) | +| `repvgg-A1_8xb32_in1k` | From scratch | 12.79 | 2.36 | 74.23 | 91.80 | [config](repvgg-A1_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A1_8xb32_in1k_20221213-f81bf3df.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A1_8xb32_in1k_20221213-f81bf3df.log) | +| `repvgg-A2_8xb32_in1k` | From scratch | 25.50 | 5.12 | 76.49 | 93.09 | [config](repvgg-A2_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A2_8xb32_in1k_20221213-a8767caf.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A2_8xb32_in1k_20221213-a8767caf.log) | +| `repvgg-B0_8xb32_in1k` | From scratch | 3.42 | 15.82 | 75.27 | 92.21 | [config](repvgg-B0_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B0_8xb32_in1k_20221213-5091ecc7.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B0_8xb32_in1k_20221213-5091ecc7.log) | +| `repvgg-B1_8xb32_in1k` | From scratch | 51.83 | 11.81 | 78.19 | 94.04 | [config](repvgg-B1_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1_8xb32_in1k_20221213-d17c45e7.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1_8xb32_in1k_20221213-d17c45e7.log) | +| `repvgg-B1g2_8xb32_in1k` | From scratch | 41.36 | 8.81 | 77.87 | 93.99 | [config](repvgg-B1g2_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1g2_8xb32_in1k_20221213-ae6428fd.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1g2_8xb32_in1k_20221213-ae6428fd.log) | +| `repvgg-B1g4_8xb32_in1k` | From scratch | 36.13 | 7.30 | 77.81 | 93.77 | [config](repvgg-B1g4_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1g4_8xb32_in1k_20221213-a7a4aaea.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1g4_8xb32_in1k_20221213-a7a4aaea.log) | +| `repvgg-B2_8xb32_in1k` | From scratch | 80.32 | 18.37 | 78.58 | 94.23 | [config](repvgg-B2_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B2_8xb32_in1k_20221213-d8b420ef.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B2_8xb32_in1k_20221213-d8b420ef.log) | +| `repvgg-B2g4_8xb32_in1k` | From scratch | 55.78 | 11.33 | 79.44 | 94.72 | [config](repvgg-B2g4_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B2g4_8xb32_in1k_20221213-0c1990eb.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B2g4_8xb32_in1k_20221213-0c1990eb.log) | +| `repvgg-B3_8xb32_in1k` | From scratch | 110.96 | 26.21 | 80.58 | 95.33 | [config](repvgg-B3_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B3_8xb32_in1k_20221213-927a329a.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B3_8xb32_in1k_20221213-927a329a.log) | +| `repvgg-B3g4_8xb32_in1k` | From scratch | 75.63 | 16.06 | 80.26 | 95.15 | [config](repvgg-B3g4_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B3g4_8xb32_in1k_20221213-e01cb280.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B3g4_8xb32_in1k_20221213-e01cb280.log) | +| `repvgg-D2se_3rdparty_in1k`\* | From scratch | 120.39 | 32.84 | 81.81 | 95.94 | [config](repvgg-D2se_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-D2se_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-cf3139b7.pth) | + +*Models with * are converted from the [official repo](https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L250). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@inproceedings{ding2021repvgg, + title={Repvgg: Making vgg-style convnets great again}, + author={Ding, Xiaohan and Zhang, Xiangyu and Ma, Ningning and Han, Jungong and Ding, Guiguang and Sun, Jian}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={13733--13742}, + year={2021} +} +``` diff --git a/configs/repvgg/metafile.yml b/configs/repvgg/metafile.yml new file mode 100644 index 0000000..e93250a --- /dev/null +++ b/configs/repvgg/metafile.yml @@ -0,0 +1,175 @@ +Collections: + - Name: RepVGG + Metadata: + Training Data: ImageNet-1k + Architecture: + - re-parameterization Convolution + - VGG-style Neural Network + Paper: + URL: https://arxiv.org/abs/2101.03697 + Title: 'RepVGG: Making VGG-style ConvNets Great Again' + README: configs/repvgg/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.16.0/mmcls/models/backbones/repvgg.py#L257 + Version: v0.16.0 + +Models: + - Name: repvgg-A0_8xb32_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-A0_8xb32_in1k.py + Metadata: + FLOPs: 1360233728 + Parameters: 8309384 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 72.37 + Top 5 Accuracy: 90.56 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A0_8xb32_in1k_20221213-60ae8e23.pth + - Name: repvgg-A1_8xb32_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-A1_8xb32_in1k.py + Metadata: + FLOPs: 2362750208 + Parameters: 12789864 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 74.23 + Top 5 Accuracy: 91.80 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A1_8xb32_in1k_20221213-f81bf3df.pth + - Name: repvgg-A2_8xb32_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-A2_8xb32_in1k.py + Metadata: + FLOPs: 5115612544 + Parameters: 25499944 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 76.49 + Top 5 Accuracy: 93.09 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A2_8xb32_in1k_20221213-a8767caf.pth + - Name: repvgg-B0_8xb32_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-B0_8xb32_in1k.py + Metadata: + FLOPs: 15820000000 + Parameters: 3420000 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 75.27 + Top 5 Accuracy: 92.21 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B0_8xb32_in1k_20221213-5091ecc7.pth + - Name: repvgg-B1_8xb32_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-B1_8xb32_in1k.py + Metadata: + FLOPs: 11813537792 + Parameters: 51829480 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 78.19 + Top 5 Accuracy: 94.04 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1_8xb32_in1k_20221213-d17c45e7.pth + - Name: repvgg-B1g2_8xb32_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-B1g2_8xb32_in1k.py + Metadata: + FLOPs: 8807794688 + Parameters: 41360104 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 77.87 + Top 5 Accuracy: 93.99 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1g2_8xb32_in1k_20221213-ae6428fd.pth + - Name: repvgg-B1g4_8xb32_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-B1g4_8xb32_in1k.py + Metadata: + FLOPs: 7304923136 + Parameters: 36125416 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 77.81 + Top 5 Accuracy: 93.77 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1g4_8xb32_in1k_20221213-a7a4aaea.pth + - Name: repvgg-B2_8xb32_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-B2_8xb32_in1k.py + Metadata: + FLOPs: 18374175232 + Parameters: 80315112 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 78.58 + Top 5 Accuracy: 94.23 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B2_8xb32_in1k_20221213-d8b420ef.pth + - Name: repvgg-B2g4_8xb32_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-B2g4_8xb32_in1k.py + Metadata: + FLOPs: 11329464832 + Parameters: 55777512 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 79.44 + Top 5 Accuracy: 94.72 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B2g4_8xb32_in1k_20221213-0c1990eb.pth + - Name: repvgg-B3_8xb32_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-B3_8xb32_in1k.py + Metadata: + FLOPs: 26206448128 + Parameters: 110960872 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 80.58 + Top 5 Accuracy: 95.33 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B3_8xb32_in1k_20221213-927a329a.pth + - Name: repvgg-B3g4_8xb32_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-B3g4_8xb32_in1k.py + Metadata: + FLOPs: 16062065152 + Parameters: 75626728 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 80.26 + Top 5 Accuracy: 95.15 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B3g4_8xb32_in1k_20221213-e01cb280.pth + - Name: repvgg-D2se_3rdparty_in1k + In Collection: RepVGG + Config: configs/repvgg/repvgg-D2se_8xb32_in1k.py + Metadata: + FLOPs: 32838581760 + Parameters: 120387572 + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 81.81 + Top 5 Accuracy: 95.94 + Weights: https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-D2se_3rdparty_4xb64-autoaug-lbs-mixup-coslr-200e_in1k_20210909-cf3139b7.pth + Converted From: + Weights: https://drive.google.com/drive/folders/1Avome4KvNp0Lqh2QwhXO6L5URQjzCjUq + Code: https://github.com/DingXiaoH/RepVGG/blob/9f272318abfc47a2b702cd0e916fca8d25d683e7/repvgg.py#L250 diff --git a/configs/repvgg/repvgg-A0_8xb32_in1k.py b/configs/repvgg/repvgg-A0_8xb32_in1k.py new file mode 100644 index 0000000..b767ae2 --- /dev/null +++ b/configs/repvgg/repvgg-A0_8xb32_in1k.py @@ -0,0 +1,33 @@ +_base_ = [ + '../_base_/models/repvgg-A0_in1k.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256_coslr.py', + '../_base_/default_runtime.py' +] + +val_dataloader = dict(batch_size=256) +test_dataloader = dict(batch_size=256) + +# schedule settings +optim_wrapper = dict( + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys={ + 'branch_3x3.norm': dict(decay_mult=0.0), + 'branch_1x1.norm': dict(decay_mult=0.0), + 'branch_norm.bias': dict(decay_mult=0.0), + })) + +# schedule settings +param_scheduler = dict( + type='CosineAnnealingLR', + T_max=120, + by_epoch=True, + begin=0, + end=120, + convert_to_iter_based=True) + +train_cfg = dict(by_epoch=True, max_epochs=120) + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) diff --git a/configs/repvgg/repvgg-A0_deploy_in1k.py b/configs/repvgg/repvgg-A0_deploy_in1k.py new file mode 100644 index 0000000..897e2bb --- /dev/null +++ b/configs/repvgg/repvgg-A0_deploy_in1k.py @@ -0,0 +1,3 @@ +_base_ = './repvgg-A0_8xb32_in1k.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/repvgg/repvgg-A1_8xb32_in1k.py b/configs/repvgg/repvgg-A1_8xb32_in1k.py new file mode 100644 index 0000000..fab5e58 --- /dev/null +++ b/configs/repvgg/repvgg-A1_8xb32_in1k.py @@ -0,0 +1,3 @@ +_base_ = './repvgg-A0_8xb32_in1k.py' + +model = dict(backbone=dict(arch='A1')) diff --git a/configs/repvgg/repvgg-A2_8xb32_in1k.py b/configs/repvgg/repvgg-A2_8xb32_in1k.py new file mode 100644 index 0000000..f6196f0 --- /dev/null +++ b/configs/repvgg/repvgg-A2_8xb32_in1k.py @@ -0,0 +1,3 @@ +_base_ = './repvgg-A0_8xb32_in1k.py' + +model = dict(backbone=dict(arch='A2'), head=dict(in_channels=1408)) diff --git a/configs/repvgg/repvgg-B0_8xb32_in1k.py b/configs/repvgg/repvgg-B0_8xb32_in1k.py new file mode 100644 index 0000000..9bbc4ab --- /dev/null +++ b/configs/repvgg/repvgg-B0_8xb32_in1k.py @@ -0,0 +1,3 @@ +_base_ = './repvgg-A0_8xb32_in1k.py' + +model = dict(backbone=dict(arch='B0'), head=dict(in_channels=1280)) diff --git a/configs/repvgg/repvgg-B1_8xb32_in1k.py b/configs/repvgg/repvgg-B1_8xb32_in1k.py new file mode 100644 index 0000000..e08db3c --- /dev/null +++ b/configs/repvgg/repvgg-B1_8xb32_in1k.py @@ -0,0 +1,3 @@ +_base_ = './repvgg-A0_8xb32_in1k.py' + +model = dict(backbone=dict(arch='B1'), head=dict(in_channels=2048)) diff --git a/configs/repvgg/repvgg-B1g2_8xb32_in1k.py b/configs/repvgg/repvgg-B1g2_8xb32_in1k.py new file mode 100644 index 0000000..a1c53fd --- /dev/null +++ b/configs/repvgg/repvgg-B1g2_8xb32_in1k.py @@ -0,0 +1,3 @@ +_base_ = './repvgg-A0_8xb32_in1k.py' + +model = dict(backbone=dict(arch='B1g2'), head=dict(in_channels=2048)) diff --git a/configs/repvgg/repvgg-B1g4_8xb32_in1k.py b/configs/repvgg/repvgg-B1g4_8xb32_in1k.py new file mode 100644 index 0000000..0757b1e --- /dev/null +++ b/configs/repvgg/repvgg-B1g4_8xb32_in1k.py @@ -0,0 +1,3 @@ +_base_ = './repvgg-A0_8xb32_in1k.py' + +model = dict(backbone=dict(arch='B1g4'), head=dict(in_channels=2048)) diff --git a/configs/repvgg/repvgg-B2_8xb32_in1k.py b/configs/repvgg/repvgg-B2_8xb32_in1k.py new file mode 100644 index 0000000..b9a7d4c --- /dev/null +++ b/configs/repvgg/repvgg-B2_8xb32_in1k.py @@ -0,0 +1,3 @@ +_base_ = './repvgg-A0_8xb32_in1k.py' + +model = dict(backbone=dict(arch='B2'), head=dict(in_channels=2560)) diff --git a/configs/repvgg/repvgg-B2g4_8xb32_in1k.py b/configs/repvgg/repvgg-B2g4_8xb32_in1k.py new file mode 100644 index 0000000..8b33978 --- /dev/null +++ b/configs/repvgg/repvgg-B2g4_8xb32_in1k.py @@ -0,0 +1,3 @@ +_base_ = './repvgg-B3_8xb32_in1k.py' + +model = dict(backbone=dict(arch='B2g4'), head=dict(in_channels=2560)) diff --git a/configs/repvgg/repvgg-B3_8xb32_in1k.py b/configs/repvgg/repvgg-B3_8xb32_in1k.py new file mode 100644 index 0000000..e9d5257 --- /dev/null +++ b/configs/repvgg/repvgg-B3_8xb32_in1k.py @@ -0,0 +1,67 @@ +_base_ = [ + '../_base_/models/repvgg-B3_lbs-mixup_in1k.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256_coslr.py', + '../_base_/default_runtime.py' +] + +# schedule settings +optim_wrapper = dict( + paramwise_cfg=dict( + bias_decay_mult=0.0, + custom_keys={ + 'branch_3x3.norm': dict(decay_mult=0.0), + 'branch_1x1.norm': dict(decay_mult=0.0), + 'branch_norm.bias': dict(decay_mult=0.0), + })) + +data_preprocessor = dict( + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=224, backend='pillow'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=7, + magnitude_std=0.5, + hparams=dict(pad_val=[round(x) for x in bgr_mean])), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='ResizeEdge', scale=256, edge='short', backend='pillow'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# schedule settings +param_scheduler = dict( + type='CosineAnnealingLR', + T_max=200, + by_epoch=True, + begin=0, + end=200, + convert_to_iter_based=True) + +train_cfg = dict(by_epoch=True, max_epochs=200) + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) diff --git a/configs/repvgg/repvgg-B3g4_8xb32_in1k.py b/configs/repvgg/repvgg-B3g4_8xb32_in1k.py new file mode 100644 index 0000000..b0c5c00 --- /dev/null +++ b/configs/repvgg/repvgg-B3g4_8xb32_in1k.py @@ -0,0 +1,3 @@ +_base_ = './repvgg-B3_8xb32_in1k.py' + +model = dict(backbone=dict(arch='B3g4')) diff --git a/configs/repvgg/repvgg-D2se_8xb32_in1k.py b/configs/repvgg/repvgg-D2se_8xb32_in1k.py new file mode 100644 index 0000000..f532dcd --- /dev/null +++ b/configs/repvgg/repvgg-D2se_8xb32_in1k.py @@ -0,0 +1,28 @@ +_base_ = './repvgg-B3_8xb32_in1k.py' + +model = dict(backbone=dict(arch='D2se'), head=dict(in_channels=2560)) + +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=0.0001, + by_epoch=True, + begin=0, + end=5, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict( + type='CosineAnnealingLR', + T_max=295, + eta_min=1.0e-6, + by_epoch=True, + begin=5, + end=300) +] + +train_cfg = dict(by_epoch=True, max_epochs=300) + +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) diff --git a/configs/res2net/README.md b/configs/res2net/README.md new file mode 100644 index 0000000..68b1acc --- /dev/null +++ b/configs/res2net/README.md @@ -0,0 +1,78 @@ +# Res2Net + +> [Res2Net: A New Multi-scale Backbone Architecture](https://arxiv.org/abs/1904.01169) + + + +## Abstract + +Representing features at multiple scales is of great importance for numerous vision tasks. Recent advances in backbone convolutional neural networks (CNNs) continually demonstrate stronger multi-scale representation ability, leading to consistent performance gains on a wide range of applications. However, most existing methods represent the multi-scale features in a layer-wise manner. In this paper, we propose a novel building block for CNNs, namely Res2Net, by constructing hierarchical residual-like connections within one single residual block. The Res2Net represents multi-scale features at a granular level and increases the range of receptive fields for each network layer. The proposed Res2Net block can be plugged into the state-of-the-art backbone CNN models, e.g., ResNet, ResNeXt, and DLA. We evaluate the Res2Net block on all these models and demonstrate consistent performance gains over baseline models on widely-used datasets, e.g., CIFAR-100 and ImageNet. Further ablation studies and experimental results on representative computer vision tasks, i.e., object detection, class activation mapping, and salient object detection, further verify the superiority of the Res2Net over the state-of-the-art baseline methods. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('res2net50-w14-s8_3rdparty_8xb32_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('res2net50-w14-s8_3rdparty_8xb32_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/res2net/res2net50-w14-s8_8xb32_in1k.py https://download.openmmlab.com/mmclassification/v0/res2net/res2net50-w14-s8_3rdparty_8xb32_in1k_20210927-bc967bf1.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :---------------------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :---------------------------------------: | :-------------------------------------------------------------------: | +| `res2net50-w14-s8_3rdparty_8xb32_in1k`\* | From scratch | 25.06 | 4.22 | 78.14 | 93.85 | [config](res2net50-w14-s8_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/res2net/res2net50-w14-s8_3rdparty_8xb32_in1k_20210927-bc967bf1.pth) | +| `res2net50-w26-s8_3rdparty_8xb32_in1k`\* | From scratch | 48.40 | 8.39 | 79.20 | 94.36 | [config](res2net50-w26-s8_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/res2net/res2net50-w26-s8_3rdparty_8xb32_in1k_20210927-f547a94b.pth) | +| `res2net101-w26-s4_3rdparty_8xb32_in1k`\* | From scratch | 45.21 | 8.12 | 79.19 | 94.44 | [config](res2net101-w26-s4_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/res2net/res2net101-w26-s4_3rdparty_8xb32_in1k_20210927-870b6c36.pth) | + +*Models with * are converted from the [official repo](https://github.com/Res2Net/Res2Net-PretrainedModels/blob/master/res2net.py#L181). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@article{gao2019res2net, + title={Res2Net: A New Multi-scale Backbone Architecture}, + author={Gao, Shang-Hua and Cheng, Ming-Ming and Zhao, Kai and Zhang, Xin-Yu and Yang, Ming-Hsuan and Torr, Philip}, + journal={IEEE TPAMI}, + year={2021}, + doi={10.1109/TPAMI.2019.2938758}, +} +``` diff --git a/configs/res2net/metafile.yml b/configs/res2net/metafile.yml new file mode 100644 index 0000000..b19b102 --- /dev/null +++ b/configs/res2net/metafile.yml @@ -0,0 +1,70 @@ +Collections: + - Name: Res2Net + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - SGD with Momentum + - Weight Decay + Architecture: + - Batch Normalization + - Convolution + - Global Average Pooling + - ReLU + - Res2Net Block + Paper: + Title: 'Res2Net: A New Multi-scale Backbone Architecture' + URL: https://arxiv.org/abs/1904.01169 + README: configs/res2net/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.17.0/mmcls/models/backbones/res2net.py + Version: v0.17.0 + +Models: + - Name: res2net50-w14-s8_3rdparty_8xb32_in1k + Metadata: + FLOPs: 4220000000 + Parameters: 25060000 + In Collection: Res2Net + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.14 + Top 5 Accuracy: 93.85 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/res2net/res2net50-w14-s8_3rdparty_8xb32_in1k_20210927-bc967bf1.pth + Converted From: + Weights: https://1drv.ms/u/s!AkxDDnOtroRPdOTqhF8ne_aakDI?e=EVb8Ri + Code: https://github.com/Res2Net/Res2Net-PretrainedModels/blob/master/res2net.py#L221 + Config: configs/res2net/res2net50-w14-s8_8xb32_in1k.py + - Name: res2net50-w26-s8_3rdparty_8xb32_in1k + Metadata: + FLOPs: 8390000000 + Parameters: 48400000 + In Collection: Res2Net + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.20 + Top 5 Accuracy: 94.36 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/res2net/res2net50-w26-s8_3rdparty_8xb32_in1k_20210927-f547a94b.pth + Converted From: + Weights: https://1drv.ms/u/s!AkxDDnOtroRPdTrAd_Afzc26Z7Q?e=slYqsR + Code: https://github.com/Res2Net/Res2Net-PretrainedModels/blob/master/res2net.py#L201 + Config: configs/res2net/res2net50-w26-s8_8xb32_in1k.py + - Name: res2net101-w26-s4_3rdparty_8xb32_in1k + Metadata: + FLOPs: 8120000000 + Parameters: 45210000 + In Collection: Res2Net + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.19 + Top 5 Accuracy: 94.44 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/res2net/res2net101-w26-s4_3rdparty_8xb32_in1k_20210927-870b6c36.pth + Converted From: + Weights: https://1drv.ms/u/s!AkxDDnOtroRPcJRgTLkahL0cFYw?e=nwbnic + Code: https://github.com/Res2Net/Res2Net-PretrainedModels/blob/master/res2net.py#L181 + Config: configs/res2net/res2net101-w26-s4_8xb32_in1k.py diff --git a/configs/res2net/res2net101-w26-s4_8xb32_in1k.py b/configs/res2net/res2net101-w26-s4_8xb32_in1k.py new file mode 100644 index 0000000..7ebe9e9 --- /dev/null +++ b/configs/res2net/res2net101-w26-s4_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/res2net101-w26-s4.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/res2net/res2net50-w14-s8_8xb32_in1k.py b/configs/res2net/res2net50-w14-s8_8xb32_in1k.py new file mode 100644 index 0000000..56cc02e --- /dev/null +++ b/configs/res2net/res2net50-w14-s8_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/res2net50-w14-s8.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/res2net/res2net50-w26-s8_8xb32_in1k.py b/configs/res2net/res2net50-w26-s8_8xb32_in1k.py new file mode 100644 index 0000000..d7dcbeb --- /dev/null +++ b/configs/res2net/res2net50-w26-s8_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/res2net50-w26-s8.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnest/README.md b/configs/resnest/README.md new file mode 100644 index 0000000..eb6c5fd --- /dev/null +++ b/configs/resnest/README.md @@ -0,0 +1,26 @@ +# ResNeSt + +> [ResNeSt: Split-Attention Networks](https://arxiv.org/abs/2004.08955) + + + +## Abstract + +It is well known that featuremap attention and multi-path representation are important for visual recognition. In this paper, we present a modularized architecture, which applies the channel-wise attention on different network branches to leverage their success in capturing cross-feature interactions and learning diverse representations. Our design results in a simple and unified computation block, which can be parameterized using only a few variables. Our model, named ResNeSt, outperforms EfficientNet in accuracy and latency trade-off on image classification. In addition, ResNeSt has achieved superior transfer learning results on several public benchmarks serving as the backbone, and has been adopted by the winning entries of COCO-LVIS challenge. The source code for complete system and pretrained models are publicly available. + +
+ +
+ +## Citation + +``` +@misc{zhang2020resnest, + title={ResNeSt: Split-Attention Networks}, + author={Hang Zhang and Chongruo Wu and Zhongyue Zhang and Yi Zhu and Haibin Lin and Zhi Zhang and Yue Sun and Tong He and Jonas Mueller and R. Manmatha and Mu Li and Alexander Smola}, + year={2020}, + eprint={2004.08955}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` diff --git a/configs/resnest/_randaug_policies.py b/configs/resnest/_randaug_policies.py new file mode 100644 index 0000000..d650caa --- /dev/null +++ b/configs/resnest/_randaug_policies.py @@ -0,0 +1,92 @@ +policies = [ + dict(type='AutoContrast', prob=0.5), + dict(type='Equalize', prob=0.5), + dict(type='Invert', prob=0.5), + dict( + type='Rotate', + magnitude_key='angle', + magnitude_range=(0, 30), + pad_val=0, + prob=0.5, + random_negative_prob=0.5), + dict( + type='Posterize', + magnitude_key='bits', + magnitude_range=(0, 4), + prob=0.5), + dict( + type='Solarize', + magnitude_key='thr', + magnitude_range=(0, 256), + prob=0.5), + dict( + type='SolarizeAdd', + magnitude_key='magnitude', + magnitude_range=(0, 110), + thr=128, + prob=0.5), + dict( + type='ColorTransform', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Contrast', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Brightness', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Sharpness', + magnitude_key='magnitude', + magnitude_range=(-0.9, 0.9), + prob=0.5, + random_negative_prob=0.), + dict( + type='Shear', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='horizontal', + random_negative_prob=0.5), + dict( + type='Shear', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='vertical', + random_negative_prob=0.5), + dict( + type='Cutout', + magnitude_key='shape', + magnitude_range=(1, 41), + pad_val=0, + prob=0.5), + dict( + type='Translate', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='horizontal', + random_negative_prob=0.5, + interpolation='bicubic'), + dict( + type='Translate', + magnitude_key='magnitude', + magnitude_range=(0, 0.3), + pad_val=0, + prob=0.5, + direction='vertical', + random_negative_prob=0.5, + interpolation='bicubic') +] diff --git a/configs/resnest/resnest101_32xb64_in1k.py b/configs/resnest/resnest101_32xb64_in1k.py new file mode 100644 index 0000000..ac78659 --- /dev/null +++ b/configs/resnest/resnest101_32xb64_in1k.py @@ -0,0 +1,78 @@ +_base_ = [ + '../_base_/models/resnest101.py', + '../_base_/datasets/imagenet_bs64.py', + '../_base_/default_runtime.py', + './_randaug_policies.py', +] + +# dataset settings + +# lighting params, in order of BGR +EIGVAL = [55.4625, 4.7940, 1.1475] +EIGVEC = [ + [-0.5836, -0.6948, 0.4203], + [-0.5808, -0.0045, -0.8140], + [-0.5675, 0.7192, 0.4009], +] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandAugment', + policies={{_base_.policies}}, + num_policies=2, + magnitude_level=12), + dict(type='EfficientNetRandomCrop', scale=256, backend='pillow'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4), + dict( + type='Lighting', + eigval=EIGVAL, + eigvec=EIGVEC, + alphastd=0.1, + to_rgb=False), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=256, backend='pillow'), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# schedule settings +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.8, momentum=0.9, weight_decay=1e-4), + paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.), +) + +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=1e-6, + by_epoch=True, + begin=0, + end=5, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict( + type='CosineAnnealingLR', + T_max=265, + by_epoch=True, + begin=5, + end=270, + ) +] + +train_cfg = dict(by_epoch=True, max_epochs=270) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (64 samples per GPU) +auto_scale_lr = dict(base_batch_size=2048) diff --git a/configs/resnest/resnest200_64xb32_in1k.py b/configs/resnest/resnest200_64xb32_in1k.py new file mode 100644 index 0000000..e3b9fb3 --- /dev/null +++ b/configs/resnest/resnest200_64xb32_in1k.py @@ -0,0 +1,74 @@ +_base_ = [ + '../_base_/models/resnest200.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/default_runtime.py', + './_randaug_policies.py', +] + +# dataset settings + +# lighting params, in order of BGR +EIGVAL = [55.4625, 4.7940, 1.1475] +EIGVEC = [ + [-0.5836, -0.6948, 0.4203], + [-0.5808, -0.0045, -0.8140], + [-0.5675, 0.7192, 0.4009], +] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandAugment', + policies={{_base_.policies}}, + num_policies=2, + magnitude_level=12), + dict(type='EfficientNetRandomCrop', scale=320, backend='pillow'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4), + dict( + type='Lighting', + eigval=EIGVAL, + eigvec=EIGVEC, + alphastd=0.1, + to_rgb=False), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=320, backend='pillow'), + dict(type='PackInputs'), +] + +# schedule settings +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.8, momentum=0.9, weight_decay=1e-4), + paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.), +) + +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=1e-6, + by_epoch=True, + begin=0, + end=5, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict( + type='CosineAnnealingLR', + T_max=265, + by_epoch=True, + begin=5, + end=270, + ) +] + +train_cfg = dict(by_epoch=True, max_epochs=270) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (64 GPUs) x (32 samples per GPU) +auto_scale_lr = dict(base_batch_size=2048) diff --git a/configs/resnest/resnest269_64xb32_in1k.py b/configs/resnest/resnest269_64xb32_in1k.py new file mode 100644 index 0000000..0e884d6 --- /dev/null +++ b/configs/resnest/resnest269_64xb32_in1k.py @@ -0,0 +1,78 @@ +_base_ = [ + '../_base_/models/resnest269.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/default_runtime.py', + './_randaug_policies.py', +] + +# dataset settings + +# lighting params, in order of BGR +EIGVAL = [55.4625, 4.7940, 1.1475] +EIGVEC = [ + [-0.5836, -0.6948, 0.4203], + [-0.5808, -0.0045, -0.8140], + [-0.5675, 0.7192, 0.4009], +] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandAugment', + policies={{_base_.policies}}, + num_policies=2, + magnitude_level=12), + dict(type='EfficientNetRandomCrop', scale=416, backend='pillow'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4), + dict( + type='Lighting', + eigval=EIGVAL, + eigvec=EIGVEC, + alphastd=0.1, + to_rgb=False), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=416, backend='pillow'), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# schedule settings +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.8, momentum=0.9, weight_decay=1e-4), + paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.), +) + +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=1e-6, + by_epoch=True, + begin=0, + end=5, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict( + type='CosineAnnealingLR', + T_max=265, + by_epoch=True, + begin=5, + end=270, + ) +] + +train_cfg = dict(by_epoch=True, max_epochs=270) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (64 GPUs) x (32 samples per GPU) +auto_scale_lr = dict(base_batch_size=2048) diff --git a/configs/resnest/resnest50_32xb64_in1k.py b/configs/resnest/resnest50_32xb64_in1k.py new file mode 100644 index 0000000..05f839b --- /dev/null +++ b/configs/resnest/resnest50_32xb64_in1k.py @@ -0,0 +1,78 @@ +_base_ = [ + '../_base_/models/resnest50.py', + '../_base_/datasets/imagenet_bs64.py', + '../_base_/default_runtime.py', + './_randaug_policies.py', +] + +# dataset settings + +# lighting params, in order of BGR +EIGVAL = [55.4625, 4.7940, 1.1475] +EIGVEC = [ + [-0.5836, -0.6948, 0.4203], + [-0.5808, -0.0045, -0.8140], + [-0.5675, 0.7192, 0.4009], +] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandAugment', + policies={{_base_.policies}}, + num_policies=2, + magnitude_level=12), + dict(type='EfficientNetRandomCrop', scale=224, backend='pillow'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4), + dict( + type='Lighting', + eigval=EIGVAL, + eigvec=EIGVEC, + alphastd=0.1, + to_rgb=False), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='EfficientNetCenterCrop', crop_size=256, backend='pillow'), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# schedule settings +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.8, momentum=0.9, weight_decay=1e-4), + paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.), +) + +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=1e-6, + by_epoch=True, + begin=0, + end=5, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict( + type='CosineAnnealingLR', + T_max=265, + by_epoch=True, + begin=5, + end=270, + ) +] + +train_cfg = dict(by_epoch=True, max_epochs=270) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (64 samples per GPU) +auto_scale_lr = dict(base_batch_size=2048) diff --git a/configs/resnet/README.md b/configs/resnet/README.md new file mode 100644 index 0000000..286b773 --- /dev/null +++ b/configs/resnet/README.md @@ -0,0 +1,140 @@ +# ResNet + +> [Deep Residual Learning for Image Recognition](https://openaccess.thecvf.com/content_cvpr_2016/html/He_Deep_Residual_Learning_CVPR_2016_paper.html) + + + +## Introduction + +**Residual Networks**, or **ResNets**, learn residual functions with reference to the layer inputs, instead of +learning unreferenced functions. In the mainstream previous works, like VGG, the neural networks are a stack +of layers and every layer attempts to fit a desired underlying mapping. In ResNets, a few stacked layers are +grouped as a block, and the layers in a block attempts to learn a residual mapping. + +Formally, denoting the desired underlying mapping of a block as $\mathcal{H}(x)$, split the underlying mapping +into the sum of the identity and the residual mapping as $\mathcal{H}(x) = x + \mathcal{F}(x)$, and let the +stacked non-linear layers fit the residual mapping $\mathcal{F}(x)$. + +Many works proved this method makes deep neural networks easier to optimize, and can gain accuracy from +considerably increased depth. Recently, the residual structure is widely used in various models. + +
+ +
+ +## Abstract + +
+ +Show the paper's abstract + +
+Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual networks are easier to optimize, and can gain accuracy from considerably increased depth. On the ImageNet dataset we evaluate residual nets with a depth of up to 152 layers---8x deeper than VGG nets but still having lower complexity. An ensemble of these residual nets achieves 3.57% error on the ImageNet test set. This result won the 1st place on the ILSVRC 2015 classification task. We also present analysis on CIFAR-10 with 100 and 1000 layers. + +The depth of representations is of central importance for many visual recognition tasks. Solely due to our extremely deep representations, we obtain a 28% relative improvement on the COCO object detection dataset. Deep residual nets are foundations of our submissions to ILSVRC & COCO 2015 competitions, where we also won the 1st places on the tasks of ImageNet detection, ImageNet localization, COCO detection, and COCO segmentation. +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('resnet18_8xb16_cifar10', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('resnet18_8xb16_cifar10', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/resnet/resnet18_8xb16_cifar10.py +``` + +Test: + +```shell +python tools/test.py configs/resnet/resnet18_8xb16_cifar10.py https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :--------------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :-------------------------------------------: | :----------------------------------------------------------------------: | +| `resnet18_8xb32_in1k` | From scratch | 11.69 | 1.82 | 69.90 | 89.43 | [config](resnet18_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_8xb32_in1k_20210831-fbbb1da6.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_8xb32_in1k_20210831-fbbb1da6.json) | +| `resnet34_8xb32_in1k` | From scratch | 2.18 | 3.68 | 73.62 | 91.59 | [config](resnet34_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_8xb32_in1k_20210831-f257d4e6.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_8xb32_in1k_20210831-f257d4e6.json) | +| `resnet50_8xb32_in1k` | From scratch | 25.56 | 4.12 | 76.55 | 93.06 | [config](resnet50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.json) | +| `resnet101_8xb32_in1k` | From scratch | 44.55 | 7.85 | 77.97 | 94.06 | [config](resnet101_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_8xb32_in1k_20210831-539c63f8.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_8xb32_in1k_20210831-539c63f8.json) | +| `resnet152_8xb32_in1k` | From scratch | 60.19 | 11.58 | 78.48 | 94.13 | [config](resnet152_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_8xb32_in1k_20210901-4d7582fa.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_8xb32_in1k_20210901-4d7582fa.json) | +| `resnetv1d50_8xb32_in1k` | From scratch | 25.58 | 4.36 | 77.54 | 93.57 | [config](resnetv1d50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d50_b32x8_imagenet_20210531-db14775a.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d50_b32x8_imagenet_20210531-db14775a.json) | +| `resnetv1d101_8xb32_in1k` | From scratch | 44.57 | 8.09 | 78.93 | 94.48 | [config](resnetv1d101_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d101_b32x8_imagenet_20210531-6e13bcd3.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d101_b32x8_imagenet_20210531-6e13bcd3.json) | +| `resnetv1d152_8xb32_in1k` | From scratch | 60.21 | 11.82 | 79.41 | 94.70 | [config](resnetv1d152_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d152_b32x8_imagenet_20210531-278cf22a.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d152_b32x8_imagenet_20210531-278cf22a.json) | +| `resnet50_8xb32-fp16_in1k` | From scratch | 25.56 | 4.12 | 76.30 | 93.07 | [config](resnet50_8xb32-fp16_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/fp16/resnet50_batch256_fp16_imagenet_20210320-b3964210.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/fp16/resnet50_batch256_fp16_imagenet_20210320-b3964210.json) | +| `resnet50_8xb256-rsb-a1-600e_in1k` | From scratch | 25.56 | 4.12 | 80.12 | 94.78 | [config](resnet50_8xb256-rsb-a1-600e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.json) | +| `resnet50_8xb256-rsb-a2-300e_in1k` | From scratch | 25.56 | 4.12 | 79.55 | 94.37 | [config](resnet50_8xb256-rsb-a2-300e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a2-300e_in1k_20211228-0fd8be6e.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a2-300e_in1k_20211228-0fd8be6e.json) | +| `resnet50_8xb256-rsb-a3-100e_in1k` | From scratch | 25.56 | 4.12 | 78.30 | 93.80 | [config](resnet50_8xb256-rsb-a3-100e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a3-100e_in1k_20211228-3493673c.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a3-100e_in1k_20211228-3493673c.json) | +| `resnetv1c50_8xb32_in1k` | From scratch | 25.58 | 4.36 | 77.01 | 93.58 | [config](resnetv1c50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c50_8xb32_in1k_20220214-3343eccd.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c50_8xb32_in1k_20220214-3343eccd.json) | +| `resnetv1c101_8xb32_in1k` | From scratch | 44.57 | 8.09 | 78.30 | 94.27 | [config](resnetv1c101_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c101_8xb32_in1k_20220214-434fe45f.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c101_8xb32_in1k_20220214-434fe45f.json) | +| `resnetv1c152_8xb32_in1k` | From scratch | 60.21 | 11.82 | 78.76 | 94.41 | [config](resnetv1c152_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c152_8xb32_in1k_20220214-c013291f.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c152_8xb32_in1k_20220214-c013291f.json) | + +### Image Classification on CIFAR-10 + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Config | Download | +| :------------------------ | :----------: | :--------: | :-------: | :-------: | :----------------------------------: | :-------------------------------------------------------------------------------------------------: | +| `resnet18_8xb16_cifar10` | From scratch | 11.17 | 0.56 | 94.82 | [config](resnet18_8xb16_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8.json) | +| `resnet34_8xb16_cifar10` | From scratch | 21.28 | 1.16 | 95.34 | [config](resnet34_8xb16_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_b16x8_cifar10_20210528-a8aa36a6.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_b16x8_cifar10_20210528-a8aa36a6.json) | +| `resnet50_8xb16_cifar10` | From scratch | 23.52 | 1.31 | 95.55 | [config](resnet50_8xb16_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar10_20210528-f54bfad9.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar10_20210528-f54bfad9.json) | +| `resnet101_8xb16_cifar10` | From scratch | 42.51 | 2.52 | 95.58 | [config](resnet101_8xb16_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_b16x8_cifar10_20210528-2d29e936.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_b16x8_cifar10_20210528-2d29e936.json) | +| `resnet152_8xb16_cifar10` | From scratch | 58.16 | 3.74 | 95.76 | [config](resnet152_8xb16_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_b16x8_cifar10_20210528-3e8e9178.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_b16x8_cifar10_20210528-3e8e9178.json) | + +### Image Classification on CIFAR-100 + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :------------------------ | :----------: | :--------: | :-------: | :-------: | :-------: | :----------------------------------: | :----------------------------------------------------------------------------------------: | +| `resnet50_8xb16_cifar100` | From scratch | 23.71 | 1.31 | 79.90 | 95.19 | [config](resnet50_8xb16_cifar100.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar100_20210528-67b58a1b.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar100_20210528-67b58a1b.json) | + +### Image Classification on CUB-200-2011 + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Config | Download | +| :------------------ | :----------: | :--------: | :-------: | :-------: | :----------------------------: | :-------------------------------------------------------------------------------------------------------------: | +| `resnet50_8xb8_cub` | From scratch | 23.92 | 16.48 | 88.45 | [config](resnet50_8xb8_cub.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb8_cub_20220307-57840e60.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb8_cub_20220307-57840e60.json) | + +## Citation + +```bibtex +@inproceedings{he2016deep, + title={Deep residual learning for image recognition}, + author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={770--778}, + year={2016} +} +``` diff --git a/configs/resnet/metafile.yml b/configs/resnet/metafile.yml new file mode 100644 index 0000000..1638724 --- /dev/null +++ b/configs/resnet/metafile.yml @@ -0,0 +1,352 @@ +Collections: + - Name: ResNet + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Epochs: 100 + Batch Size: 256 + Architecture: + - ResNet + Paper: + URL: https://openaccess.thecvf.com/content_cvpr_2016/html/He_Deep_Residual_Learning_CVPR_2016_paper.html + Title: "Deep Residual Learning for Image Recognition" + README: configs/resnet/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.15.0/mmcls/models/backbones/resnet.py#L383 + Version: v0.15.0 + +Models: + - Name: resnet18_8xb16_cifar10 + Metadata: + Training Data: CIFAR-10 + Epochs: 200 + Batch Size: 128 + FLOPs: 560000000 + Parameters: 11170000 + In Collection: ResNet + Results: + - Dataset: CIFAR-10 + Metrics: + Top 1 Accuracy: 94.82 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8.pth + Config: configs/resnet/resnet18_8xb16_cifar10.py + - Name: resnet34_8xb16_cifar10 + Metadata: + Training Data: CIFAR-10 + Epochs: 200 + Batch Size: 128 + FLOPs: 1160000000 + Parameters: 21280000 + In Collection: ResNet + Results: + - Dataset: CIFAR-10 + Metrics: + Top 1 Accuracy: 95.34 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_b16x8_cifar10_20210528-a8aa36a6.pth + Config: configs/resnet/resnet34_8xb16_cifar10.py + - Name: resnet50_8xb16_cifar10 + Metadata: + Training Data: CIFAR-10 + Epochs: 200 + Batch Size: 128 + FLOPs: 1310000000 + Parameters: 23520000 + In Collection: ResNet + Results: + - Dataset: CIFAR-10 + Metrics: + Top 1 Accuracy: 95.55 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar10_20210528-f54bfad9.pth + Config: configs/resnet/resnet50_8xb16_cifar10.py + - Name: resnet101_8xb16_cifar10 + Metadata: + Training Data: CIFAR-10 + Epochs: 200 + Batch Size: 128 + FLOPs: 2520000000 + Parameters: 42510000 + In Collection: ResNet + Results: + - Dataset: CIFAR-10 + Metrics: + Top 1 Accuracy: 95.58 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_b16x8_cifar10_20210528-2d29e936.pth + Config: configs/resnet/resnet101_8xb16_cifar10.py + - Name: resnet152_8xb16_cifar10 + Metadata: + Training Data: CIFAR-10 + Epochs: 200 + Batch Size: 128 + FLOPs: 3740000000 + Parameters: 58160000 + In Collection: ResNet + Results: + - Dataset: CIFAR-10 + Metrics: + Top 1 Accuracy: 95.76 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_b16x8_cifar10_20210528-3e8e9178.pth + Config: configs/resnet/resnet152_8xb16_cifar10.py + - Name: resnet50_8xb16_cifar100 + Metadata: + Training Data: CIFAR-100 + Epochs: 200 + Batch Size: 128 + FLOPs: 1310000000 + Parameters: 23710000 + In Collection: ResNet + Results: + - Dataset: CIFAR-100 + Metrics: + Top 1 Accuracy: 79.90 + Top 5 Accuracy: 95.19 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar100_20210528-67b58a1b.pth + Config: configs/resnet/resnet50_8xb16_cifar100.py + - Name: resnet18_8xb32_in1k + Metadata: + FLOPs: 1820000000 + Parameters: 11690000 + In Collection: ResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 69.90 + Top 5 Accuracy: 89.43 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_8xb32_in1k_20210831-fbbb1da6.pth + Config: configs/resnet/resnet18_8xb32_in1k.py + - Name: resnet34_8xb32_in1k + Metadata: + FLOPs: 3680000000 + Parameters: 2180000 + In Collection: ResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 73.62 + Top 5 Accuracy: 91.59 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_8xb32_in1k_20210831-f257d4e6.pth + Config: configs/resnet/resnet34_8xb32_in1k.py + - Name: resnet50_8xb32_in1k + Metadata: + FLOPs: 4120000000 + Parameters: 25560000 + In Collection: ResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 76.55 + Top 5 Accuracy: 93.06 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth + Config: configs/resnet/resnet50_8xb32_in1k.py + - Name: resnet101_8xb32_in1k + Metadata: + FLOPs: 7850000000 + Parameters: 44550000 + In Collection: ResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 77.97 + Top 5 Accuracy: 94.06 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_8xb32_in1k_20210831-539c63f8.pth + Config: configs/resnet/resnet101_8xb32_in1k.py + - Name: resnet152_8xb32_in1k + Metadata: + FLOPs: 11580000000 + Parameters: 60190000 + In Collection: ResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.48 + Top 5 Accuracy: 94.13 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_8xb32_in1k_20210901-4d7582fa.pth + Config: configs/resnet/resnet152_8xb32_in1k.py + - Name: resnetv1d50_8xb32_in1k + Metadata: + FLOPs: 4360000000 + Parameters: 25580000 + In Collection: ResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 77.54 + Top 5 Accuracy: 93.57 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d50_b32x8_imagenet_20210531-db14775a.pth + Config: configs/resnet/resnetv1d50_8xb32_in1k.py + - Name: resnetv1d101_8xb32_in1k + Metadata: + FLOPs: 8090000000 + Parameters: 44570000 + In Collection: ResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.93 + Top 5 Accuracy: 94.48 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d101_b32x8_imagenet_20210531-6e13bcd3.pth + Config: configs/resnet/resnetv1d101_8xb32_in1k.py + - Name: resnetv1d152_8xb32_in1k + Metadata: + FLOPs: 11820000000 + Parameters: 60210000 + In Collection: ResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.41 + Top 5 Accuracy: 94.70 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d152_b32x8_imagenet_20210531-278cf22a.pth + Config: configs/resnet/resnetv1d152_8xb32_in1k.py + - Name: resnet50_8xb32-fp16_in1k + Metadata: + FLOPs: 4120000000 + Parameters: 25560000 + Training Techniques: + - SGD with Momentum + - Weight Decay + - Mixed Precision Training + In Collection: ResNet + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 76.30 + Top 5 Accuracy: 93.07 + Weights: https://download.openmmlab.com/mmclassification/v0/fp16/resnet50_batch256_fp16_imagenet_20210320-b3964210.pth + Config: configs/resnet/resnet50_8xb32-fp16_in1k.py + - Name: resnet50_8xb256-rsb-a1-600e_in1k + Metadata: + FLOPs: 4120000000 + Parameters: 25560000 + Training Techniques: + - LAMB + - Weight Decay + - Cosine Annealing + - Mixup + - CutMix + - RepeatAugSampler + - RandAugment + Epochs: 600 + Batch Size: 2048 + In Collection: ResNet + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 80.12 + Top 5 Accuracy: 94.78 + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth + Config: configs/resnet/resnet50_8xb256-rsb-a1-600e_in1k.py + - Name: resnet50_8xb256-rsb-a2-300e_in1k + Metadata: + FLOPs: 4120000000 + Parameters: 25560000 + Training Techniques: + - LAMB + - Weight Decay + - Cosine Annealing + - Mixup + - CutMix + - RepeatAugSampler + - RandAugment + Epochs: 300 + Batch Size: 2048 + In Collection: ResNet + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.55 + Top 5 Accuracy: 94.37 + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a2-300e_in1k_20211228-0fd8be6e.pth + Config: configs/resnet/resnet50_8xb256-rsb-a2-300e_in1k.py + - Name: resnet50_8xb256-rsb-a3-100e_in1k + Metadata: + FLOPs: 4120000000 + Parameters: 25560000 + Training Techniques: + - LAMB + - Weight Decay + - Cosine Annealing + - Mixup + - CutMix + - RandAugment + Batch Size: 2048 + In Collection: ResNet + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.30 + Top 5 Accuracy: 93.80 + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a3-100e_in1k_20211228-3493673c.pth + Config: configs/resnet/resnet50_8xb256-rsb-a3-100e_in1k.py + - Name: resnetv1c50_8xb32_in1k + Metadata: + FLOPs: 4360000000 + Parameters: 25580000 + In Collection: ResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 77.01 + Top 5 Accuracy: 93.58 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c50_8xb32_in1k_20220214-3343eccd.pth + Config: configs/resnet/resnetv1c50_8xb32_in1k.py + - Name: resnetv1c101_8xb32_in1k + Metadata: + FLOPs: 8090000000 + Parameters: 44570000 + In Collection: ResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.30 + Top 5 Accuracy: 94.27 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c101_8xb32_in1k_20220214-434fe45f.pth + Config: configs/resnet/resnetv1c101_8xb32_in1k.py + - Name: resnetv1c152_8xb32_in1k + Metadata: + FLOPs: 11820000000 + Parameters: 60210000 + In Collection: ResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.76 + Top 5 Accuracy: 94.41 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c152_8xb32_in1k_20220214-c013291f.pth + Config: configs/resnet/resnetv1c152_8xb32_in1k.py + - Name: resnet50_8xb8_cub + Metadata: + FLOPs: 16480000000 + Parameters: 23920000 + In Collection: ResNet + Results: + - Dataset: CUB-200-2011 + Metrics: + Top 1 Accuracy: 88.45 + Task: Image Classification + Pretrain: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_3rdparty-mill_in21k_20220331-faac000b.pth + Weights: https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb8_cub_20220307-57840e60.pth + Config: configs/resnet/resnet50_8xb8_cub.py diff --git a/configs/resnet/resnet101_8xb16_cifar10.py b/configs/resnet/resnet101_8xb16_cifar10.py new file mode 100644 index 0000000..166a174 --- /dev/null +++ b/configs/resnet/resnet101_8xb16_cifar10.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnet101_cifar.py', + '../_base_/datasets/cifar10_bs16.py', + '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet101_8xb32_in1k.py b/configs/resnet/resnet101_8xb32_in1k.py new file mode 100644 index 0000000..388d2cd --- /dev/null +++ b/configs/resnet/resnet101_8xb32_in1k.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/resnet101.py', '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet152_8xb16_cifar10.py b/configs/resnet/resnet152_8xb16_cifar10.py new file mode 100644 index 0000000..3f307b6 --- /dev/null +++ b/configs/resnet/resnet152_8xb16_cifar10.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnet152_cifar.py', + '../_base_/datasets/cifar10_bs16.py', + '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet152_8xb32_in1k.py b/configs/resnet/resnet152_8xb32_in1k.py new file mode 100644 index 0000000..cc9dc2c --- /dev/null +++ b/configs/resnet/resnet152_8xb32_in1k.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/resnet152.py', '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet18_8xb16_cifar10.py b/configs/resnet/resnet18_8xb16_cifar10.py new file mode 100644 index 0000000..c7afa39 --- /dev/null +++ b/configs/resnet/resnet18_8xb16_cifar10.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/resnet18_cifar.py', '../_base_/datasets/cifar10_bs16.py', + '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet18_8xb32_in1k.py b/configs/resnet/resnet18_8xb32_in1k.py new file mode 100644 index 0000000..ac452ff --- /dev/null +++ b/configs/resnet/resnet18_8xb32_in1k.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/resnet18.py', '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet34_8xb16_cifar10.py b/configs/resnet/resnet34_8xb16_cifar10.py new file mode 100644 index 0000000..7f5cd51 --- /dev/null +++ b/configs/resnet/resnet34_8xb16_cifar10.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/resnet34_cifar.py', '../_base_/datasets/cifar10_bs16.py', + '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet34_8xb32_in1k.py b/configs/resnet/resnet34_8xb32_in1k.py new file mode 100644 index 0000000..7749261 --- /dev/null +++ b/configs/resnet/resnet34_8xb32_in1k.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/resnet34.py', '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet50_32xb64-warmup-coslr_in1k.py b/configs/resnet/resnet50_32xb64-warmup-coslr_in1k.py new file mode 100644 index 0000000..c26245e --- /dev/null +++ b/configs/resnet/resnet50_32xb64-warmup-coslr_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnet50.py', '../_base_/datasets/imagenet_bs64.py', + '../_base_/schedules/imagenet_bs2048_coslr.py', + '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet50_32xb64-warmup-lbs_in1k.py b/configs/resnet/resnet50_32xb64-warmup-lbs_in1k.py new file mode 100644 index 0000000..2f24f9a --- /dev/null +++ b/configs/resnet/resnet50_32xb64-warmup-lbs_in1k.py @@ -0,0 +1,12 @@ +_base_ = ['./resnet50_32xb64-warmup_in1k.py'] +model = dict( + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=2048, + loss=dict( + type='LabelSmoothLoss', + loss_weight=1.0, + label_smooth_val=0.1, + num_classes=1000), + )) diff --git a/configs/resnet/resnet50_32xb64-warmup_in1k.py b/configs/resnet/resnet50_32xb64-warmup_in1k.py new file mode 100644 index 0000000..34d5288 --- /dev/null +++ b/configs/resnet/resnet50_32xb64-warmup_in1k.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/resnet50.py', '../_base_/datasets/imagenet_bs64.py', + '../_base_/schedules/imagenet_bs2048.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet50_8xb128_coslr-90e_in21k.py b/configs/resnet/resnet50_8xb128_coslr-90e_in21k.py new file mode 100644 index 0000000..d2cc1ee --- /dev/null +++ b/configs/resnet/resnet50_8xb128_coslr-90e_in21k.py @@ -0,0 +1,11 @@ +_base_ = [ + '../_base_/models/resnet50.py', '../_base_/datasets/imagenet21k_bs128.py', + '../_base_/schedules/imagenet_bs1024_coslr.py', + '../_base_/default_runtime.py' +] + +# model settings +model = dict(head=dict(num_classes=21843)) + +# runtime settings +train_cfg = dict(by_epoch=True, max_epochs=90) diff --git a/configs/resnet/resnet50_8xb16-mixup_cifar10.py b/configs/resnet/resnet50_8xb16-mixup_cifar10.py new file mode 100644 index 0000000..2420ebf --- /dev/null +++ b/configs/resnet/resnet50_8xb16-mixup_cifar10.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnet50_cifar_mixup.py', + '../_base_/datasets/cifar10_bs16.py', + '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet50_8xb16_cifar10.py b/configs/resnet/resnet50_8xb16_cifar10.py new file mode 100644 index 0000000..669e5de --- /dev/null +++ b/configs/resnet/resnet50_8xb16_cifar10.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/resnet50_cifar.py', '../_base_/datasets/cifar10_bs16.py', + '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet50_8xb16_cifar100.py b/configs/resnet/resnet50_8xb16_cifar100.py new file mode 100644 index 0000000..ebde6c7 --- /dev/null +++ b/configs/resnet/resnet50_8xb16_cifar100.py @@ -0,0 +1,19 @@ +_base_ = [ + '../_base_/models/resnet50_cifar.py', + '../_base_/datasets/cifar100_bs16.py', + '../_base_/schedules/cifar10_bs128.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict(head=dict(num_classes=100)) + +# schedule settings +optim_wrapper = dict(optimizer=dict(weight_decay=0.0005)) + +param_scheduler = dict( + type='MultiStepLR', + by_epoch=True, + milestones=[60, 120, 160], + gamma=0.2, +) diff --git a/configs/resnet/resnet50_8xb256-rsb-a1-600e_in1k.py b/configs/resnet/resnet50_8xb256-rsb-a1-600e_in1k.py new file mode 100644 index 0000000..a4ea159 --- /dev/null +++ b/configs/resnet/resnet50_8xb256-rsb-a1-600e_in1k.py @@ -0,0 +1,56 @@ +_base_ = [ + '../_base_/models/resnet50.py', + '../_base_/datasets/imagenet_bs256_rsb_a12.py', + '../_base_/schedules/imagenet_bs2048_rsb.py', + '../_base_/default_runtime.py' +] + +# model settings +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), + drop_path_rate=0.05, + ), + head=dict( + loss=dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='original', + use_sigmoid=True, + )), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.2), + dict(type='CutMix', alpha=1.0) + ]), +) + +# dataset settings +train_dataloader = dict(sampler=dict(type='RepeatAugSampler', shuffle=True)) + +# schedule settings +optim_wrapper = dict( + optimizer=dict(weight_decay=0.01), + paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.), +) + +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=0.0001, + by_epoch=True, + begin=0, + end=5, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict( + type='CosineAnnealingLR', + T_max=595, + eta_min=1.0e-6, + by_epoch=True, + begin=5, + end=600) +] + +train_cfg = dict(by_epoch=True, max_epochs=600) diff --git a/configs/resnet/resnet50_8xb256-rsb-a2-300e_in1k.py b/configs/resnet/resnet50_8xb256-rsb-a2-300e_in1k.py new file mode 100644 index 0000000..df8edc0 --- /dev/null +++ b/configs/resnet/resnet50_8xb256-rsb-a2-300e_in1k.py @@ -0,0 +1,46 @@ +_base_ = [ + '../_base_/models/resnet50.py', + '../_base_/datasets/imagenet_bs256_rsb_a12.py', + '../_base_/schedules/imagenet_bs2048_rsb.py', + '../_base_/default_runtime.py' +] + +# model settings +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), + drop_path_rate=0.05, + ), + head=dict(loss=dict(use_sigmoid=True)), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.1), + dict(type='CutMix', alpha=1.0) + ])) + +# dataset settings +train_dataloader = dict(sampler=dict(type='RepeatAugSampler', shuffle=True)) + +# schedule settings +optim_wrapper = dict( + paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.)) + +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=0.0001, + by_epoch=True, + begin=0, + end=5, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict( + type='CosineAnnealingLR', + T_max=295, + eta_min=1.0e-6, + by_epoch=True, + begin=5, + end=300) +] +train_cfg = dict(by_epoch=True, max_epochs=300) diff --git a/configs/resnet/resnet50_8xb256-rsb-a3-100e_in1k.py b/configs/resnet/resnet50_8xb256-rsb-a3-100e_in1k.py new file mode 100644 index 0000000..3a36c58 --- /dev/null +++ b/configs/resnet/resnet50_8xb256-rsb-a3-100e_in1k.py @@ -0,0 +1,22 @@ +_base_ = [ + '../_base_/models/resnet50.py', + '../_base_/datasets/imagenet_bs256_rsb_a3.py', + '../_base_/schedules/imagenet_bs2048_rsb.py', + '../_base_/default_runtime.py' +] + +# model settings +model = dict( + backbone=dict(norm_cfg=dict(type='SyncBN', requires_grad=True)), + head=dict(loss=dict(use_sigmoid=True)), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.1), + dict(type='CutMix', alpha=1.0) + ]), +) + +# schedule settings +optim_wrapper = dict( + optimizer=dict(lr=0.008), + paramwise_cfg=dict(bias_decay_mult=0., norm_decay_mult=0.), +) diff --git a/configs/resnet/resnet50_8xb32-coslr-preciseBN_in1k.py b/configs/resnet/resnet50_8xb32-coslr-preciseBN_in1k.py new file mode 100644 index 0000000..01fefbb --- /dev/null +++ b/configs/resnet/resnet50_8xb32-coslr-preciseBN_in1k.py @@ -0,0 +1,13 @@ +_base_ = 'resnet50_8xb32-coslr_in1k.py' + +# Precise BN hook will update the bn stats, so this hook should be executed +# before CheckpointHook(priority of 'VERY_LOW') and +# EMAHook(priority of 'NORMAL') So set the priority of PreciseBNHook to +# 'ABOVENORMAL' here. +custom_hooks = [ + dict( + type='PreciseBNHook', + num_samples=8192, + interval=1, + priority='ABOVE_NORMAL') +] diff --git a/configs/resnet/resnet50_8xb32-coslr_in1k.py b/configs/resnet/resnet50_8xb32-coslr_in1k.py new file mode 100644 index 0000000..938a114 --- /dev/null +++ b/configs/resnet/resnet50_8xb32-coslr_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnet50.py', '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256_coslr.py', + '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet50_8xb32-cutmix_in1k.py b/configs/resnet/resnet50_8xb32-cutmix_in1k.py new file mode 100644 index 0000000..2f8d0ca --- /dev/null +++ b/configs/resnet/resnet50_8xb32-cutmix_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnet50_cutmix.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet50_8xb32-fp16-dynamic_in1k.py b/configs/resnet/resnet50_8xb32-fp16-dynamic_in1k.py new file mode 100644 index 0000000..58f6fe4 --- /dev/null +++ b/configs/resnet/resnet50_8xb32-fp16-dynamic_in1k.py @@ -0,0 +1,4 @@ +_base_ = ['./resnet50_8xb32_in1k.py'] + +# schedule settings +optim_wrapper = dict(type='AmpOptimWrapper', loss_scale='dynamic') diff --git a/configs/resnet/resnet50_8xb32-fp16_in1k.py b/configs/resnet/resnet50_8xb32-fp16_in1k.py new file mode 100644 index 0000000..19ee6ee --- /dev/null +++ b/configs/resnet/resnet50_8xb32-fp16_in1k.py @@ -0,0 +1,4 @@ +_base_ = ['./resnet50_8xb32_in1k.py'] + +# schedule settings +optim_wrapper = dict(type='AmpOptimWrapper', loss_scale=512.) diff --git a/configs/resnet/resnet50_8xb32-lbs_in1k.py b/configs/resnet/resnet50_8xb32-lbs_in1k.py new file mode 100644 index 0000000..1c1aa5a --- /dev/null +++ b/configs/resnet/resnet50_8xb32-lbs_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnet50_label_smooth.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet50_8xb32-mixup_in1k.py b/configs/resnet/resnet50_8xb32-mixup_in1k.py new file mode 100644 index 0000000..2a153d0 --- /dev/null +++ b/configs/resnet/resnet50_8xb32-mixup_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnet50_mixup.py', + '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet50_8xb32_in1k.py b/configs/resnet/resnet50_8xb32_in1k.py new file mode 100644 index 0000000..c32f333 --- /dev/null +++ b/configs/resnet/resnet50_8xb32_in1k.py @@ -0,0 +1,4 @@ +_base_ = [ + '../_base_/models/resnet50.py', '../_base_/datasets/imagenet_bs32.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnet50_8xb8_cub.py b/configs/resnet/resnet50_8xb8_cub.py new file mode 100644 index 0000000..17054ef --- /dev/null +++ b/configs/resnet/resnet50_8xb8_cub.py @@ -0,0 +1,20 @@ +_base_ = [ + '../_base_/models/resnet50.py', + '../_base_/datasets/cub_bs8_448.py', + '../_base_/schedules/cub_bs64.py', + '../_base_/default_runtime.py', +] + +# model settings +# use pre-train weight converted from https://github.com/Alibaba-MIIL/ImageNet21K # noqa +pretrained = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_3rdparty-mill_in21k_20220331-faac000b.pth' # noqa + +model = dict( + type='ImageClassifier', + backbone=dict( + init_cfg=dict( + type='Pretrained', checkpoint=pretrained, prefix='backbone')), + head=dict(num_classes=200, )) + +# runtime settings +default_hooks = dict(logger=dict(type='LoggerHook', interval=20)) diff --git a/configs/resnet/resnetv1c101_8xb32_in1k.py b/configs/resnet/resnetv1c101_8xb32_in1k.py new file mode 100644 index 0000000..441aff5 --- /dev/null +++ b/configs/resnet/resnetv1c101_8xb32_in1k.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/resnetv1c50.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] + +model = dict(backbone=dict(depth=101)) diff --git a/configs/resnet/resnetv1c152_8xb32_in1k.py b/configs/resnet/resnetv1c152_8xb32_in1k.py new file mode 100644 index 0000000..b9f466f --- /dev/null +++ b/configs/resnet/resnetv1c152_8xb32_in1k.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/resnetv1c50.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] + +model = dict(backbone=dict(depth=152)) diff --git a/configs/resnet/resnetv1c50_8xb32_in1k.py b/configs/resnet/resnetv1c50_8xb32_in1k.py new file mode 100644 index 0000000..aa1c8b6 --- /dev/null +++ b/configs/resnet/resnetv1c50_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnetv1c50.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnetv1d101_8xb32_in1k.py b/configs/resnet/resnetv1d101_8xb32_in1k.py new file mode 100644 index 0000000..b16ca86 --- /dev/null +++ b/configs/resnet/resnetv1d101_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnetv1d101.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnetv1d152_8xb32_in1k.py b/configs/resnet/resnetv1d152_8xb32_in1k.py new file mode 100644 index 0000000..76926dd --- /dev/null +++ b/configs/resnet/resnetv1d152_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnetv1d152.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnet/resnetv1d50_8xb32_in1k.py b/configs/resnet/resnetv1d50_8xb32_in1k.py new file mode 100644 index 0000000..208bde4 --- /dev/null +++ b/configs/resnet/resnetv1d50_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnetv1d50.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnext/README.md b/configs/resnext/README.md new file mode 100644 index 0000000..b901b31 --- /dev/null +++ b/configs/resnext/README.md @@ -0,0 +1,83 @@ +# ResNeXt + +> [Aggregated Residual Transformations for Deep Neural Networks](https://openaccess.thecvf.com/content_cvpr_2017/html/Xie_Aggregated_Residual_Transformations_CVPR_2017_paper.html) + + + +## Abstract + +We present a simple, highly modularized network architecture for image classification. Our network is constructed by repeating a building block that aggregates a set of transformations with the same topology. Our simple design results in a homogeneous, multi-branch architecture that has only a few hyper-parameters to set. This strategy exposes a new dimension, which we call "cardinality" (the size of the set of transformations), as an essential factor in addition to the dimensions of depth and width. On the ImageNet-1K dataset, we empirically show that even under the restricted condition of maintaining complexity, increasing cardinality is able to improve classification accuracy. Moreover, increasing cardinality is more effective than going deeper or wider when we increase the capacity. Our models, named ResNeXt, are the foundations of our entry to the ILSVRC 2016 classification task in which we secured 2nd place. We further investigate ResNeXt on an ImageNet-5K set and the COCO detection set, also showing better results than its ResNet counterpart. The code and models are publicly available online. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('resnext50-32x4d_8xb32_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('resnext50-32x4d_8xb32_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/resnext/resnext50-32x4d_8xb32_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/resnext/resnext50-32x4d_8xb32_in1k.py https://download.openmmlab.com/mmclassification/v0/resnext/resnext50_32x4d_b32x8_imagenet_20210429-56066e27.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :---------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :--------------------------------------: | :--------------------------------------------------------------------------------: | +| `resnext50-32x4d_8xb32_in1k` | From scratch | 25.03 | 4.27 | 77.90 | 93.66 | [config](resnext50-32x4d_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnext/resnext50_32x4d_b32x8_imagenet_20210429-56066e27.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnext/resnext50_32x4d_b32x8_imagenet_20210429-56066e27.json) | +| `resnext101-32x4d_8xb32_in1k` | From scratch | 44.18 | 8.03 | 78.61 | 94.17 | [config](resnext101-32x4d_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x4d_b32x8_imagenet_20210506-e0fa3dd5.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x4d_b32x8_imagenet_20210506-e0fa3dd5.json) | +| `resnext101-32x8d_8xb32_in1k` | From scratch | 88.79 | 16.50 | 79.27 | 94.58 | [config](resnext101-32x8d_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x8d_b32x8_imagenet_20210506-23a247d5.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x8d_b32x8_imagenet_20210506-23a247d5.json) | +| `resnext152-32x4d_8xb32_in1k` | From scratch | 59.95 | 11.80 | 78.88 | 94.33 | [config](resnext152-32x4d_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnext/resnext152_32x4d_b32x8_imagenet_20210524-927787be.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnext/resnext152_32x4d_b32x8_imagenet_20210524-927787be.json) | + +## Citation + +```bibtex +@inproceedings{xie2017aggregated, + title={Aggregated residual transformations for deep neural networks}, + author={Xie, Saining and Girshick, Ross and Doll{\'a}r, Piotr and Tu, Zhuowen and He, Kaiming}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={1492--1500}, + year={2017} +} +``` diff --git a/configs/resnext/metafile.yml b/configs/resnext/metafile.yml new file mode 100644 index 0000000..7128328 --- /dev/null +++ b/configs/resnext/metafile.yml @@ -0,0 +1,73 @@ +Collections: + - Name: ResNeXt + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Epochs: 100 + Batch Size: 256 + Architecture: + - ResNeXt + Paper: + URL: https://openaccess.thecvf.com/content_cvpr_2017/html/Xie_Aggregated_Residual_Transformations_CVPR_2017_paper.html + Title: "Aggregated Residual Transformations for Deep Neural Networks" + README: configs/resnext/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.15.0/mmcls/models/backbones/resnext.py#L90 + Version: v0.15.0 + +Models: + - Name: resnext50-32x4d_8xb32_in1k + Metadata: + FLOPs: 4270000000 + Parameters: 25030000 + In Collection: ResNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 77.90 + Top 5 Accuracy: 93.66 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnext/resnext50_32x4d_b32x8_imagenet_20210429-56066e27.pth + Config: configs/resnext/resnext50-32x4d_8xb32_in1k.py + - Name: resnext101-32x4d_8xb32_in1k + Metadata: + FLOPs: 8030000000 + Parameters: 44180000 + In Collection: ResNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.61 + Top 5 Accuracy: 94.17 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x4d_b32x8_imagenet_20210506-e0fa3dd5.pth + Config: configs/resnext/resnext101-32x4d_8xb32_in1k.py + - Name: resnext101-32x8d_8xb32_in1k + Metadata: + FLOPs: 16500000000 + Parameters: 88790000 + In Collection: ResNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.27 + Top 5 Accuracy: 94.58 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x8d_b32x8_imagenet_20210506-23a247d5.pth + Config: configs/resnext/resnext101-32x8d_8xb32_in1k.py + - Name: resnext152-32x4d_8xb32_in1k + Metadata: + FLOPs: 11800000000 + Parameters: 59950000 + In Collection: ResNeXt + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.88 + Top 5 Accuracy: 94.33 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/resnext/resnext152_32x4d_b32x8_imagenet_20210524-927787be.pth + Config: configs/resnext/resnext152-32x4d_8xb32_in1k.py diff --git a/configs/resnext/resnext101-32x4d_8xb32_in1k.py b/configs/resnext/resnext101-32x4d_8xb32_in1k.py new file mode 100644 index 0000000..970aa60 --- /dev/null +++ b/configs/resnext/resnext101-32x4d_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnext101_32x4d.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnext/resnext101-32x8d_8xb32_in1k.py b/configs/resnext/resnext101-32x8d_8xb32_in1k.py new file mode 100644 index 0000000..315d05f --- /dev/null +++ b/configs/resnext/resnext101-32x8d_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnext101_32x8d.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnext/resnext152-32x4d_8xb32_in1k.py b/configs/resnext/resnext152-32x4d_8xb32_in1k.py new file mode 100644 index 0000000..9c13731 --- /dev/null +++ b/configs/resnext/resnext152-32x4d_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnext152_32x4d.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/resnext/resnext50-32x4d_8xb32_in1k.py b/configs/resnext/resnext50-32x4d_8xb32_in1k.py new file mode 100644 index 0000000..bd9c9fc --- /dev/null +++ b/configs/resnext/resnext50-32x4d_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/resnext50_32x4d.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/revvit/README.md b/configs/revvit/README.md new file mode 100644 index 0000000..0439b22 --- /dev/null +++ b/configs/revvit/README.md @@ -0,0 +1,91 @@ +# Reversible Vision Transformers + +> [Reversible Vision Transformers](https://openaccess.thecvf.com/content/CVPR2022/papers/Mangalam_Reversible_Vision_Transformers_CVPR_2022_paper.pdf) + + + +## Introduction + +**RevViT** is initially described in [Reversible Vision Tranformers](https://openaccess.thecvf.com/content/CVPR2022/papers/Mangalam_Reversible_Vision_Transformers_CVPR_2022_paper.pdf), which introduce the reversible idea into vision transformer, to reduce the GPU memory footprint required for training. + + + +
+ +
+ +## Abstract + +
+ +Show the paper's abstract + +
+We present Reversible Vision Transformers, a memory efficient architecture design for visual recognition. By decoupling the GPU memory footprint from the depth of the model, Reversible Vision Transformers enable memory efficient scaling of transformer architectures. We adapt two popular models, namely Vision Transformer and Multiscale Vision Transformers, to reversible variants and benchmark extensively across both model sizes and tasks of image classification, object detection and video classification. Reversible Vision Transformers achieve a reduced memory footprint of up to 15.5× at identical model complexity, parameters and accuracy, demonstrating the promise of reversible vision transformers as an efficient backbone for resource limited training regimes. Finally, we find that the additional computational burden of recomputing activations is more than overcome for deeper models, where throughput can increase up to 3.9× over their non-reversible counterparts. +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('revvit-small_3rdparty_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('revvit-small_3rdparty_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/revvit/revvit-small_8xb256_in1k.py https://download.openmmlab.com/mmclassification/v0/revvit/revvit-base_3rdparty_in1k_20221213-87a7b0a5.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :----------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :-----------------------------------: | :----------------------------------------------------------------------------------: | +| `revvit-small_3rdparty_in1k`\* | From scratch | 22.44 | 4.58 | 79.87 | 94.90 | [config](revvit-small_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/revvit/revvit-base_3rdparty_in1k_20221213-87a7b0a5.pth) | +| `revvit-base_3rdparty_in1k`\* | From scratch | 87.34 | 17.49 | 81.81 | 95.56 | [config](revvit-base_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/revvit/revvit-small_3rdparty_in1k_20221213-a3a34f5c.pth) | + +*Models with * are converted from the [official repo](https://github.com/facebookresearch/SlowFast). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@inproceedings{mangalam2022reversible, + title={Reversible Vision Transformers}, + author={Mangalam, Karttikeya and Fan, Haoqi and Li, Yanghao and Wu, Chao-Yuan and Xiong, Bo and Feichtenhofer, Christoph and Malik, Jitendra}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + pages={10830--10840}, + year={2022} +} +``` diff --git a/configs/revvit/metafile.yml b/configs/revvit/metafile.yml new file mode 100644 index 0000000..842de07 --- /dev/null +++ b/configs/revvit/metafile.yml @@ -0,0 +1,48 @@ +Collections: + - Name: RevViT + Metadata: + Training Data: ImageNet-1k + Architecture: + - Vision Transformer + - Reversible + Paper: + URL: https://openaccess.thecvf.com/content/CVPR2022/papers/Mangalam_Reversible_Vision_Transformers_CVPR_2022_paper.pdf + Title: Reversible Vision Transformers + README: configs/revvit/README.md + Code: + Version: v1.0.0rc5 + URL: https://github.com/open-mmlab/mmpretrain/blob/1.0.0rc5/mmcls/models/backbones/revvit.py + +Models: + - Name: revvit-small_3rdparty_in1k + Metadata: + FLOPs: 4583427072 + Parameters: 22435432 + In Collection: RevViT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.87 + Top 5 Accuracy: 94.90 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/revvit/revvit-small_3rdparty_in1k_20221213-a3a34f5c.pth + Config: configs/revvit/revvit-small_8xb256_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/pyslowfast/rev/REV_VIT_S.pyth + Code: https://github.com/facebookresearch/SlowFast + - Name: revvit-base_3rdparty_in1k + Metadata: + FLOPs: 17490450432 + Parameters: 87337192 + In Collection: RevViT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.81 + Top 5 Accuracy: 95.56 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/revvit/revvit-base_3rdparty_in1k_20221213-87a7b0a5.pth + Config: configs/revvit/revvit-base_8xb256_in1k.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/pyslowfast/rev/REV_VIT_B.pyth + Code: https://github.com/facebookresearch/SlowFast diff --git a/configs/revvit/revvit-base_8xb256_in1k.py b/configs/revvit/revvit-base_8xb256_in1k.py new file mode 100644 index 0000000..e4fde5c --- /dev/null +++ b/configs/revvit/revvit-base_8xb256_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/revvit/revvit-base.py', + '../_base_/datasets/imagenet_bs128_revvit_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_revvit.py', + '../_base_/default_runtime.py' +] diff --git a/configs/revvit/revvit-small_8xb256_in1k.py b/configs/revvit/revvit-small_8xb256_in1k.py new file mode 100644 index 0000000..ec3904a --- /dev/null +++ b/configs/revvit/revvit-small_8xb256_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/revvit/revvit-small.py', + '../_base_/datasets/imagenet_bs128_revvit_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_revvit.py', + '../_base_/default_runtime.py' +] diff --git a/configs/riformer/README.md b/configs/riformer/README.md new file mode 100644 index 0000000..6be694d --- /dev/null +++ b/configs/riformer/README.md @@ -0,0 +1,181 @@ +# RIFormer + +> [RIFormer: Keep Your Vision Backbone Effective But Removing Token Mixer](https://arxiv.org/abs/2304.05659) + + + +## Introduction + +RIFormer is a way to keep a vision backbone effective while removing token mixers in its basic building blocks. Equipped with our proposed optimization strategy, we are able to build an extremely simple vision backbone with encouraging performance, while enjoying the high efficiency during inference. RIFormer shares nearly the same macro and micro design as MetaFormer, but safely removing all token mixers. The quantitative results show that our networks outperform many prevailing backbones with faster inference speed on ImageNet-1K. + +
+ +
+ +## Abstract + +
+ +Show the paper's abstract + +
+This paper studies how to keep a vision backbone effective while removing token mixers in its basic building blocks. Token mixers, as self-attention for vision transformers (ViTs), are intended to perform information communication between different spatial tokens but suffer from considerable computational cost and latency. However, directly removing them will lead to an incomplete model structure prior, and thus brings a significant accuracy drop. To this end, we first develop an RepIdentityFormer base on the re-parameterizing idea, to study the token mixer free model architecture. And we then explore the improved learning paradigm to break the limitation of simple token mixer free backbone, and summarize the empirical practice into 5 guidelines. Equipped with the proposed optimization strategy, we are able to build an extremely simple vision backbone with encouraging performance, while enjoying the high efficiency during inference. Extensive experiments and ablative analysis also demonstrate that the inductive bias of network architecture, can be incorporated into simple network structure with appropriate optimization strategy. We hope this work can serve as a starting point for the exploration of optimization-driven efficient network design. +
+ +
+ +## How to use + +The checkpoints provided are all `training-time` models. Use the reparameterize tool or `switch_to_deploy` interface to switch them to more efficient `inference-time` architecture, which not only has fewer parameters but also less calculations. + + + +**Predict image** + +Use `classifier.backbone.switch_to_deploy()` interface to switch the RIFormer models into inference mode. + +```python +>>> import torch +>>> from mmpretrain import get_model, inference_model +>>> +>>> model = get_model("riformer-s12_in1k", pretrained=True) +>>> results = inference_model(model, 'demo/demo.JPEG') +>>> print( (results['pred_class'], results['pred_score']) ) +('sea snake', 0.7827484011650085) +>>> +>>> # switch to deploy mode +>>> model.backbone.switch_to_deploy() +>>> results = inference_model(model, 'demo/demo.JPEG') +>>> print( (results['pred_class'], results['pred_score']) ) +('sea snake', 0.7827480435371399) +``` + +**Use the model** + +```python +>>> import torch +>>> +>>> model = get_model("riformer-s12_in1k", pretrained=True) +>>> model.eval() +>>> inputs = torch.rand(1, 3, 224, 224).to(model.data_preprocessor.device) +>>> # To get classification scores. +>>> out = model(inputs) +>>> print(out.shape) +torch.Size([1, 1000]) +>>> # To extract features. +>>> outs = model.extract_feat(inputs) +>>> print(outs[0].shape) +torch.Size([1, 512]) +>>> +>>> # switch to deploy mode +>>> model.backbone.switch_to_deploy() +>>> out_deploy = model(inputs) +>>> print(out.shape) +torch.Size([1, 1000]) +>>> assert torch.allclose(out, out_deploy, rtol=1e-4, atol=1e-5) # pass without error +``` + +**Test Command** + +Place the ImageNet dataset to the `data/imagenet/` directory, or prepare datasets according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +*224×224* + +Download Checkpoint: + +```shell +wget https://download.openmmlab.com/mmclassification/v1/riformer/riformer-s12_32xb128_in1k_20230406-6741ce71.pth +``` + +Test use unfused model: + +```shell +python tools/test.py configs/riformer/riformer-s12_8xb128_in1k.py riformer-s12_32xb128_in1k_20230406-6741ce71.pth +``` + +Reparameterize checkpoint: + +```shell +python tools/model_converters/reparameterize_model.py configs/riformer/riformer-s12_8xb128_in1k.py riformer-s12_32xb128_in1k_20230406-6741ce71.pth riformer-s12_deploy.pth +``` + +Test use fused model: + +```shell +python tools/test.py configs/riformer/deploy/riformer-s12-deploy_8xb128_in1k.py riformer-s12_deploy.pth +``` + + + +For more configurable parameters, please refer to the [API](https://mmpretrain.readthedocs.io/en/latest/api/generated/mmpretrain.models.backbones.RIFormer.html#mmpretrain.models.backbones.RIFormer). + +
+ +How to use the reparameterization tool(click to show) + +
+ +Use provided tool to reparameterize the given model and save the checkpoint: + +```bash +python tools/convert_models/reparameterize_model.py ${CFG_PATH} ${SRC_CKPT_PATH} ${TARGET_CKPT_PATH} +``` + +`${CFG_PATH}` is the config file path, `${SRC_CKPT_PATH}` is the source chenpoint file path, `${TARGET_CKPT_PATH}` is the target deploy weight file path. + +For example: + +```shell +# download the weight +wget https://download.openmmlab.com/mmclassification/v1/riformer/riformer-s12_32xb128_in1k_20230406-6741ce71.pth + +# reparameterize unfused weight to fused weight +python tools/model_converters/reparameterize_model.py configs/riformer/riformer-s12_8xb128_in1k.py riformer-s12_32xb128_in1k_20230406-6741ce71.pth riformer-s12_deploy.pth +``` + +To use reparameterized weights, you can use the deploy model config file such as the [s12_deploy example](./deploy/riformer-s12-deploy_8xb128_in1k.py): + +```text +# in riformer-s12-deploy_8xb128_in1k.py +_base_ = '../deploy/riformer-s12-deploy_8xb128_in1k.py' # basic s12 config + +model = dict(backbone=dict(deploy=True)) # switch model into deploy mode +``` + +```shell +python tools/test.py configs/riformer/deploy/riformer-s12-deploy_8xb128_in1k.py riformer-s12_deploy.pth +``` + +
+ +
+ +## Results and models + +### ImageNet-1k + +| Model | resolution | Params(M) | Flops(G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :-------------------: | :--------: | :-------: | :------: | :-------: | :-------: | :-------------------------------------------: | :---------------------------------------------------------------------------------------: | +| riformer-s12_in1k | 224x224 | 11.92 | 1.82 | 76.90 | 93.06 | [config](./riformer-s12_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v1/riformer/riformer-s12_32xb128_in1k_20230406-6741ce71.pth) | +| riformer-s24_in1k | 224x224 | 21.39 | 3.41 | 80.28 | 94.80 | [config](./riformer-s24_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v1/riformer/riformer-s24_32xb128_in1k_20230406-fdab072a.pth) | +| riformer-s36_in1k | 224x224 | 30.86 | 5.00 | 81.29 | 95.41 | [config](./riformer-s36_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v1/riformer/riformer-s36_32xb128_in1k_20230406-fdfcd3b0.pth) | +| riformer-m36_in1k | 224x224 | 56.17 | 8.80 | 82.57 | 95.99 | [config](./riformer-m36_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v1/riformer/riformer-m36_32xb128_in1k_20230406-2fcb9d9b.pth) | +| riformer-m48_in1k | 224x224 | 73.47 | 11.59 | 82.75 | 96.11 | [config](./riformer-m48_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v1/riformer/riformer-m48_32xb128_in1k_20230406-2b9d1abf.pth) | +| riformer-s12_384_in1k | 384x384 | 11.92 | 5.36 | 78.29 | 93.93 | [config](./riformer-s12_8xb128_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v1/riformer/riformer-s12_32xb128_in1k-384px_20230406-145eda4c.pth) | +| riformer-s24_384_in1k | 384x384 | 21.39 | 10.03 | 81.36 | 95.40 | [config](./riformer-s24_8xb128_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v1/riformer/riformer-s24_32xb128_in1k-384px_20230406-bafae7ab.pth) | +| riformer-s36_384_in1k | 384x384 | 30.86 | 14.70 | 82.22 | 95.95 | [config](./riformer-s36_8xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v1/riformer/riformer-s36_32xb128_in1k-384px_20230406-017ed3c4.pth) | +| riformer-m36_384_in1k | 384x384 | 56.17 | 25.87 | 83.39 | 96.40 | [config](./riformer-m36_8xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v1/riformer/riformer-m36_32xb128_in1k-384px_20230406-66a6f764.pth) | +| riformer-m48_384_in1k | 384x384 | 73.47 | 34.06 | 83.70 | 96.60 | [config](./riformer-m48_8xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v1/riformer/riformer-m48_32xb128_in1k-384px_20230406-2e874826.pth) | + +The config files of these models are only for inference. + +## Citation + +```bibtex +@inproceedings{wang2023riformer, + title={RIFormer: Keep Your Vision Backbone Effective But Removing Token Mixer}, + author={Wang, Jiahao and Zhang, Songyang and Liu, Yong and Wu, Taiqiang and Yang, Yujiu and Liu, Xihui and Chen, Kai and Luo, Ping and Lin, Dahua}, + booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, + year={2023} +} +``` diff --git a/configs/riformer/deploy/riformer-m36-deploy_8xb128_in1k.py b/configs/riformer/deploy/riformer-m36-deploy_8xb128_in1k.py new file mode 100644 index 0000000..fcec41c --- /dev/null +++ b/configs/riformer/deploy/riformer-m36-deploy_8xb128_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../riformer-m36_8xb128_in1k.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/riformer/deploy/riformer-m36-deploy_8xb64_in1k-384px.py b/configs/riformer/deploy/riformer-m36-deploy_8xb64_in1k-384px.py new file mode 100644 index 0000000..e18f836 --- /dev/null +++ b/configs/riformer/deploy/riformer-m36-deploy_8xb64_in1k-384px.py @@ -0,0 +1,3 @@ +_base_ = '../riformer-m36_8xb64_in1k-384px.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/riformer/deploy/riformer-m48-deploy_8xb64_in1k-384px.py b/configs/riformer/deploy/riformer-m48-deploy_8xb64_in1k-384px.py new file mode 100644 index 0000000..0ab3353 --- /dev/null +++ b/configs/riformer/deploy/riformer-m48-deploy_8xb64_in1k-384px.py @@ -0,0 +1,3 @@ +_base_ = '../riformer-m48_8xb64_in1k-384px.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/riformer/deploy/riformer-m48-deploy_8xb64_in1k.py b/configs/riformer/deploy/riformer-m48-deploy_8xb64_in1k.py new file mode 100644 index 0000000..e32ad32 --- /dev/null +++ b/configs/riformer/deploy/riformer-m48-deploy_8xb64_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../riformer-m48_8xb64_in1k.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/riformer/deploy/riformer-s12-deploy_8xb128_in1k-384px.py b/configs/riformer/deploy/riformer-s12-deploy_8xb128_in1k-384px.py new file mode 100644 index 0000000..ffbb4be --- /dev/null +++ b/configs/riformer/deploy/riformer-s12-deploy_8xb128_in1k-384px.py @@ -0,0 +1,3 @@ +_base_ = '../riformer-s12_8xb128_in1k-384px.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/riformer/deploy/riformer-s12-deploy_8xb128_in1k.py b/configs/riformer/deploy/riformer-s12-deploy_8xb128_in1k.py new file mode 100644 index 0000000..70fd8b7 --- /dev/null +++ b/configs/riformer/deploy/riformer-s12-deploy_8xb128_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../riformer-s12_8xb128_in1k.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/riformer/deploy/riformer-s24-deploy_8xb128_in1k-384px.py b/configs/riformer/deploy/riformer-s24-deploy_8xb128_in1k-384px.py new file mode 100644 index 0000000..7d05e5c --- /dev/null +++ b/configs/riformer/deploy/riformer-s24-deploy_8xb128_in1k-384px.py @@ -0,0 +1,3 @@ +_base_ = '../riformer-s24_8xb128_in1k-384px.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/riformer/deploy/riformer-s24-deploy_8xb128_in1k.py b/configs/riformer/deploy/riformer-s24-deploy_8xb128_in1k.py new file mode 100644 index 0000000..47f83a0 --- /dev/null +++ b/configs/riformer/deploy/riformer-s24-deploy_8xb128_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../riformer-s24_8xb128_in1k.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/riformer/deploy/riformer-s36-deploy_8xb128_in1k.py b/configs/riformer/deploy/riformer-s36-deploy_8xb128_in1k.py new file mode 100644 index 0000000..2c03bb1 --- /dev/null +++ b/configs/riformer/deploy/riformer-s36-deploy_8xb128_in1k.py @@ -0,0 +1,3 @@ +_base_ = '../riformer-s36_8xb128_in1k.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/riformer/deploy/riformer-s36-deploy_8xb64_in1k-384px.py b/configs/riformer/deploy/riformer-s36-deploy_8xb64_in1k-384px.py new file mode 100644 index 0000000..67b17ee --- /dev/null +++ b/configs/riformer/deploy/riformer-s36-deploy_8xb64_in1k-384px.py @@ -0,0 +1,3 @@ +_base_ = '../riformer-s36_8xb64_in1k-384px.py' + +model = dict(backbone=dict(deploy=True)) diff --git a/configs/riformer/metafile.yml b/configs/riformer/metafile.yml new file mode 100644 index 0000000..5f3e2ec --- /dev/null +++ b/configs/riformer/metafile.yml @@ -0,0 +1,152 @@ +Collections: + - Name: RIFormer + Metadata: + Training Data: ImageNet-1k + Training Resources: 8x A100 GPUs + Architecture: + - Affine + - 1x1 Convolution + - LayerScale + Paper: + URL: https://arxiv.org/abs/xxxx.xxxxx + Title: "RIFormer: Keep Your Vision Backbone Effective But Removing Token Mixer" + README: configs/riformer/README.md + Code: + Version: v1.0.0rc7 + URL: null + +Models: + - Name: riformer-s12_in1k + Metadata: + FLOPs: 1822000000 + Parameters: 11915000 + In Collection: RIFormer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 76.90 + Top 5 Accuracy: 93.06 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v1/riformer/riformer-s12_32xb128_in1k_20230406-6741ce71.pth + Config: configs/riformer/riformer-s12_8xb128_in1k.py + - Name: riformer-s24_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 3412000000 + Parameters: 21389000 + In Collection: RIFormer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 80.28 + Top 5 Accuracy: 94.80 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v1/riformer/riformer-s24_32xb128_in1k_20230406-fdab072a.pth + Config: configs/riformer/riformer-s24_8xb128_in1k.py + - Name: riformer-s36_in1k + Metadata: + FLOPs: 5003000000 + Parameters: 30863000 + In Collection: RIFormer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.29 + Top 5 Accuracy: 95.41 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v1/riformer/riformer-s36_32xb128_in1k_20230406-fdfcd3b0.pth + Config: configs/riformer/riformer-s36_8xb128_in1k.py + - Name: riformer-m36_in1k + Metadata: + Training Data: ImageNet-1k + FLOPs: 8801000000 + Parameters: 56173000 + In Collection: RIFormer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.57 + Top 5 Accuracy: 95.99 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v1/riformer/riformer-m36_32xb128_in1k_20230406-2fcb9d9b.pth + Config: configs/riformer/riformer-m36_8xb128_in1k.py + - Name: riformer-m48_in1k + Metadata: + FLOPs: 11590000000 + Parameters: 73473000 + In Collection: RIFormer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.75 + Top 5 Accuracy: 96.11 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v1/riformer/riformer-m48_32xb128_in1k_20230406-2b9d1abf.pth + Config: configs/riformer/riformer-m48_8xb64_in1k.py + - Name: riformer-s12_in1k-384 + Metadata: + FLOPs: 5355000000 + Parameters: 11915000 + In Collection: RIFormer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.29 + Top 5 Accuracy: 93.93 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v1/riformer/riformer-s12_32xb128_in1k-384px_20230406-145eda4c.pth + Config: configs/riformer/riformer-s12_8xb128_in1k-384px.py + - Name: riformer-s24_in1k-384 + Metadata: + Training Data: ImageNet-1k + FLOPs: 10028000000 + Parameters: 21389000 + In Collection: RIFormer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.36 + Top 5 Accuracy: 95.40 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v1/riformer/riformer-s24_32xb128_in1k-384px_20230406-bafae7ab.pth + Config: configs/riformer/riformer-s24_8xb128_in1k-384px.py + - Name: riformer-s36_in1k-384 + Metadata: + FLOPs: 14702000000 + Parameters: 30863000 + In Collection: RIFormer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.22 + Top 5 Accuracy: 95.95 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v1/riformer/riformer-s36_32xb128_in1k-384px_20230406-017ed3c4.pth + Config: configs/riformer/riformer-s36_8xb64_in1k-384px.py + - Name: riformer-m36_in1k-384 + Metadata: + Training Data: ImageNet-1k + FLOPs: 25865000000 + Parameters: 56173000 + In Collection: RIFormer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.39 + Top 5 Accuracy: 96.40 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v1/riformer/riformer-m36_32xb128_in1k-384px_20230406-66a6f764.pth + Config: configs/riformer/riformer-m36_8xb64_in1k-384px.py + - Name: riformer-m48_in1k-384 + Metadata: + FLOPs: 34060000000 + Parameters: 73473000 + In Collection: RIFormer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.70 + Top 5 Accuracy: 96.60 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v1/riformer/riformer-m48_32xb128_in1k-384px_20230406-2e874826.pth + Config: configs/riformer/riformer-m48_8xb64_in1k-384px.py diff --git a/configs/riformer/riformer-m36_8xb128_in1k.py b/configs/riformer/riformer-m36_8xb128_in1k.py new file mode 100644 index 0000000..30e93aa --- /dev/null +++ b/configs/riformer/riformer-m36_8xb128_in1k.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs128_poolformer_medium_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='RIFormer', + arch='m36', + drop_path_rate=0.1, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.), + ]), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) + +# schedule settings +optim_wrapper = dict( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/riformer/riformer-m36_8xb64_in1k-384px.py b/configs/riformer/riformer-m36_8xb64_in1k-384px.py new file mode 100644 index 0000000..57f687c --- /dev/null +++ b/configs/riformer/riformer-m36_8xb64_in1k-384px.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs128_riformer_medium_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='RIFormer', + arch='m36', + drop_path_rate=0.1, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.), + ]), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) + +# schedule settings +optim_wrapper = dict( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/riformer/riformer-m48_8xb64_in1k-384px.py b/configs/riformer/riformer-m48_8xb64_in1k-384px.py new file mode 100644 index 0000000..ef6f196 --- /dev/null +++ b/configs/riformer/riformer-m48_8xb64_in1k-384px.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs128_riformer_medium_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='RIFormer', + arch='m48', + drop_path_rate=0.1, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.), + ]), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) + +# schedule settings +optim_wrapper = dict( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/riformer/riformer-m48_8xb64_in1k.py b/configs/riformer/riformer-m48_8xb64_in1k.py new file mode 100644 index 0000000..9dc5c3e --- /dev/null +++ b/configs/riformer/riformer-m48_8xb64_in1k.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs128_poolformer_medium_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='RIFormer', + arch='m48', + drop_path_rate=0.1, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.), + ]), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) + +# schedule settings +optim_wrapper = dict( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/riformer/riformer-s12_8xb128_in1k-384px.py b/configs/riformer/riformer-s12_8xb128_in1k-384px.py new file mode 100644 index 0000000..6d19dae --- /dev/null +++ b/configs/riformer/riformer-s12_8xb128_in1k-384px.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs128_riformer_small_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='RIFormer', + arch='s12', + drop_path_rate=0.1, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.), + ]), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) + +# schedule settings +optim_wrapper = dict( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/riformer/riformer-s12_8xb128_in1k.py b/configs/riformer/riformer-s12_8xb128_in1k.py new file mode 100644 index 0000000..e85f8fb --- /dev/null +++ b/configs/riformer/riformer-s12_8xb128_in1k.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs128_poolformer_small_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='RIFormer', + arch='s12', + drop_path_rate=0.1, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.), + ]), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) + +# schedule settings +optim_wrapper = dict( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/riformer/riformer-s24_8xb128_in1k-384px.py b/configs/riformer/riformer-s24_8xb128_in1k-384px.py new file mode 100644 index 0000000..6a1ec7b --- /dev/null +++ b/configs/riformer/riformer-s24_8xb128_in1k-384px.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs128_riformer_small_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='RIFormer', + arch='s24', + drop_path_rate=0.1, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.), + ]), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) + +# schedule settings +optim_wrapper = dict( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/riformer/riformer-s24_8xb128_in1k.py b/configs/riformer/riformer-s24_8xb128_in1k.py new file mode 100644 index 0000000..560cddc --- /dev/null +++ b/configs/riformer/riformer-s24_8xb128_in1k.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs128_poolformer_small_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='RIFormer', + arch='s24', + drop_path_rate=0.1, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.), + ]), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) + +# schedule settings +optim_wrapper = dict( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/riformer/riformer-s36_8xb128_in1k.py b/configs/riformer/riformer-s36_8xb128_in1k.py new file mode 100644 index 0000000..2851130 --- /dev/null +++ b/configs/riformer/riformer-s36_8xb128_in1k.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs128_poolformer_small_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='RIFormer', + arch='s36', + drop_path_rate=0.1, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.), + ]), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) + +# schedule settings +optim_wrapper = dict( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/riformer/riformer-s36_8xb64_in1k-384px.py b/configs/riformer/riformer-s36_8xb64_in1k-384px.py new file mode 100644 index 0000000..b307735 --- /dev/null +++ b/configs/riformer/riformer-s36_8xb64_in1k-384px.py @@ -0,0 +1,39 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs128_riformer_small_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='RIFormer', + arch='s36', + drop_path_rate=0.1, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict(type='Constant', layer=['GroupNorm'], val=1., bias=0.), + ]), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) + +# schedule settings +optim_wrapper = dict( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/sam/README.md b/configs/sam/README.md new file mode 100644 index 0000000..1a5668a --- /dev/null +++ b/configs/sam/README.md @@ -0,0 +1,57 @@ +# SAM + +> [Segment Anything](https://arxiv.org/abs/2304.02643) + + + +## Abstract + +We introduce the Segment Anything (SA) project: a new task, model, and dataset for image segmentation. Using our efficient model in a data collection loop, we built the largest segmentation dataset to date (by far), with over 1 billionmasks on 11M licensed and privacy respecting images. The model is designed and trained to be promptable, so it can transfer zero-shot to new image distributions and tasks. We evaluate its capabilities on numerous tasks and find that its zero-shot performance is impressive – often competitive with or even superior to prior fully supervised results. We are releasing the Segment Anything Model (SAM) and corresponding dataset (SA-1B) of 1B masks and 11M images at https://segment-anything.com to foster research into foundation models for computer vision. + +
+ +
+ +## How to use it? + + + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('vit-base-p16_sam-pre_3rdparty_sa1b-1024px', pretrained=True) +inputs = torch.rand(1, 3, 1024, 1024) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + + + +## Models and results + +### Pretrained models + +| Model | Params (M) | Flops (G) | Config | Download | +| :--------------------------------------------- | :--------: | :-------: | :-------------------------------------: | :----------------------------------------------------------------------------------------------: | +| `vit-base-p16_sam-pre_3rdparty_sa1b-1024px`\* | 89.67 | 486.00 | [config](vit-base-p16_sam_headless.py) | [model](https://download.openmmlab.com/mmclassification/v1/vit_sam/vit-base-p16_sam-pre_3rdparty_sa1b-1024px_20230411-2320f9cc.pth) | +| `vit-large-p16_sam-pre_3rdparty_sa1b-1024px`\* | 308.00 | 1494.00 | [config](vit-large-p16_sam_headless.py) | [model](https://download.openmmlab.com/mmclassification/v1/vit_sam/vit-large-p16_sam-pre_3rdparty_sa1b-1024px_20230411-595feafd.pth) | +| `vit-huge-p16_sam-pre_3rdparty_sa1b-1024px`\* | 637.00 | 2982.00 | [config](vit-huge-p16_sam_headless.py) | [model](https://download.openmmlab.com/mmclassification/v1/vit_sam/vit-huge-p16_sam-pre_3rdparty_sa1b-1024px_20230411-3f13c653.pth) | + +*Models with * are converted from the [official repo](https://github.com/facebookresearch/segment-anything/). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@article{kirillov2023segany, + title={Segment Anything}, + author={Kirillov, Alexander and Mintun, Eric and Ravi, Nikhila and Mao, Hanzi and Rolland, Chloe and Gustafson, Laura and Xiao, Tete and Whitehead, Spencer and Berg, Alexander C. and Lo, Wan-Yen and Doll{\'a}r, Piotr and Girshick, Ross}, + journal={arXiv:2304.02643}, + year={2023} +} +``` diff --git a/configs/sam/metafile.yml b/configs/sam/metafile.yml new file mode 100644 index 0000000..1ac65ce --- /dev/null +++ b/configs/sam/metafile.yml @@ -0,0 +1,61 @@ +Collections: + - Name: SAM + Metadata: + Architecture: + - Convolution + - Dense Connections + - Dropout + - GELU + - Layer Normalization + - Multi-Head Attention + - Scaled Dot-Product Attention + Paper: + Title: 'Segment Anything' + URL: https://arxiv.org/abs/2304.02643 + README: configs/sam/README.md + Code: + URL: null + Version: null + +Models: + - Name: vit-base-p16_sam-pre_3rdparty_sa1b-1024px + Metadata: + FLOPs: 486000000000 + Parameters: 89671000 + Training Data: + - SA-1B + In Collection: SAM + Results: null + Weights: https://download.openmmlab.com/mmclassification/v1/vit_sam/vit-base-p16_sam-pre_3rdparty_sa1b-1024px_20230411-2320f9cc.pth + Config: configs/sam/vit-base-p16_sam_headless.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth + Code: https://github.com/facebookresearch/segment-anything/ + + - Name: vit-large-p16_sam-pre_3rdparty_sa1b-1024px + Metadata: + FLOPs: 1494000000000 + Parameters: 308000000 + Training Data: + - SA-1B + In Collection: SAM + Results: null + Weights: https://download.openmmlab.com/mmclassification/v1/vit_sam/vit-large-p16_sam-pre_3rdparty_sa1b-1024px_20230411-595feafd.pth + Config: configs/sam/vit-large-p16_sam_headless.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth + Code: https://github.com/facebookresearch/segment-anything/ + + - Name: vit-huge-p16_sam-pre_3rdparty_sa1b-1024px + Metadata: + FLOPs: 2982000000000 + Parameters: 637000000 + Training Data: + - SA-1B + In Collection: SAM + Results: null + Weights: https://download.openmmlab.com/mmclassification/v1/vit_sam/vit-huge-p16_sam-pre_3rdparty_sa1b-1024px_20230411-3f13c653.pth + Config: configs/sam/vit-huge-p16_sam_headless.py + Converted From: + Weights: https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth + Code: https://github.com/facebookresearch/segment-anything/ diff --git a/configs/sam/vit-base-p16_sam_headless.py b/configs/sam/vit-base-p16_sam_headless.py new file mode 100644 index 0000000..bea2637 --- /dev/null +++ b/configs/sam/vit-base-p16_sam_headless.py @@ -0,0 +1,24 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ViTSAM', + arch='base', + img_size=1024, + patch_size=16, + out_channels=256, + use_abs_pos=True, + use_rel_pos=True, + window_size=14, + ), + neck=None, + head=None, +) + +data_preprocessor = dict( + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) diff --git a/configs/sam/vit-huge-p16_sam_headless.py b/configs/sam/vit-huge-p16_sam_headless.py new file mode 100644 index 0000000..8004755 --- /dev/null +++ b/configs/sam/vit-huge-p16_sam_headless.py @@ -0,0 +1,24 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ViTSAM', + arch='huge', + img_size=1024, + patch_size=16, + out_channels=256, + use_abs_pos=True, + use_rel_pos=True, + window_size=14, + ), + neck=None, + head=None, +) + +data_preprocessor = dict( + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) diff --git a/configs/sam/vit-large-p16_sam_headless.py b/configs/sam/vit-large-p16_sam_headless.py new file mode 100644 index 0000000..1cebeb0 --- /dev/null +++ b/configs/sam/vit-large-p16_sam_headless.py @@ -0,0 +1,24 @@ +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ViTSAM', + arch='large', + img_size=1024, + patch_size=16, + out_channels=256, + use_abs_pos=True, + use_rel_pos=True, + window_size=14, + ), + neck=None, + head=None, +) + +data_preprocessor = dict( + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) diff --git a/configs/seresnet/README.md b/configs/seresnet/README.md new file mode 100644 index 0000000..b5151cc --- /dev/null +++ b/configs/seresnet/README.md @@ -0,0 +1,81 @@ +# SEResNet + +> [Squeeze-and-Excitation Networks](https://openaccess.thecvf.com/content_cvpr_2018/html/Hu_Squeeze-and-Excitation_Networks_CVPR_2018_paper.html) + + + +## Abstract + +The central building block of convolutional neural networks (CNNs) is the convolution operator, which enables networks to construct informative features by fusing both spatial and channel-wise information within local receptive fields at each layer. A broad range of prior research has investigated the spatial component of this relationship, seeking to strengthen the representational power of a CNN by enhancing the quality of spatial encodings throughout its feature hierarchy. In this work, we focus instead on the channel relationship and propose a novel architectural unit, which we term the "Squeeze-and-Excitation" (SE) block, that adaptively recalibrates channel-wise feature responses by explicitly modelling interdependencies between channels. We show that these blocks can be stacked together to form SENet architectures that generalise extremely effectively across different datasets. We further demonstrate that SE blocks bring significant improvements in performance for existing state-of-the-art CNNs at slight additional computational cost. Squeeze-and-Excitation Networks formed the foundation of our ILSVRC 2017 classification submission which won first place and reduced the top-5 error to 2.251%, surpassing the winning entry of 2016 by a relative improvement of ~25%. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('seresnet50_8xb32_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('seresnet50_8xb32_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/seresnet/seresnet50_8xb32_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/seresnet/seresnet50_8xb32_in1k.py https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet50_batch256_imagenet_20200804-ae206104.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :----------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :---------------------------------: | :------------------------------------------------------------------------------------------: | +| `seresnet50_8xb32_in1k` | From scratch | 28.09 | 4.13 | 77.74 | 93.84 | [config](seresnet50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet50_batch256_imagenet_20200804-ae206104.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet50_batch256_imagenet_20200708-657b3c36.log.json) | +| `seresnet101_8xb32_in1k` | From scratch | 49.33 | 7.86 | 78.26 | 94.07 | [config](seresnet101_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet101_batch256_imagenet_20200804-ba5b51d4.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet101_batch256_imagenet_20200708-038a4d04.log.json) | + +## Citation + +```bibtex +@inproceedings{hu2018squeeze, + title={Squeeze-and-excitation networks}, + author={Hu, Jie and Shen, Li and Sun, Gang}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={7132--7141}, + year={2018} +} +``` diff --git a/configs/seresnet/metafile.yml b/configs/seresnet/metafile.yml new file mode 100644 index 0000000..1a9f116 --- /dev/null +++ b/configs/seresnet/metafile.yml @@ -0,0 +1,47 @@ +Collections: + - Name: SEResNet + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Epochs: 140 + Batch Size: 256 + Architecture: + - ResNet + Paper: + URL: https://openaccess.thecvf.com/content_cvpr_2018/html/Hu_Squeeze-and-Excitation_Networks_CVPR_2018_paper.html + Title: "Squeeze-and-Excitation Networks" + README: configs/seresnet/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.15.0/mmcls/models/backbones/seresnet.py#L58 + Version: v0.15.0 + +Models: + - Name: seresnet50_8xb32_in1k + Metadata: + FLOPs: 4130000000 + Parameters: 28090000 + In Collection: SEResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 77.74 + Top 5 Accuracy: 93.84 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet50_batch256_imagenet_20200804-ae206104.pth + Config: configs/seresnet/seresnet50_8xb32_in1k.py + - Name: seresnet101_8xb32_in1k + Metadata: + FLOPs: 7860000000 + Parameters: 49330000 + In Collection: SEResNet + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.26 + Top 5 Accuracy: 94.07 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet101_batch256_imagenet_20200804-ba5b51d4.pth + Config: configs/seresnet/seresnet101_8xb32_in1k.py diff --git a/configs/seresnet/seresnet101_8xb32_in1k.py b/configs/seresnet/seresnet101_8xb32_in1k.py new file mode 100644 index 0000000..8be39e7 --- /dev/null +++ b/configs/seresnet/seresnet101_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/seresnet101.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/seresnet/seresnet50_8xb32_in1k.py b/configs/seresnet/seresnet50_8xb32_in1k.py new file mode 100644 index 0000000..19082bd --- /dev/null +++ b/configs/seresnet/seresnet50_8xb32_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/seresnet50.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256_140e.py', + '../_base_/default_runtime.py' +] diff --git a/configs/seresnet/seresnext101-32x4d_8xb32_in1k.py b/configs/seresnet/seresnext101-32x4d_8xb32_in1k.py new file mode 100644 index 0000000..0177830 --- /dev/null +++ b/configs/seresnet/seresnext101-32x4d_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/seresnext101_32x4d.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/seresnet/seresnext50-32x4d_8xb32_in1k.py b/configs/seresnet/seresnext50-32x4d_8xb32_in1k.py new file mode 100644 index 0000000..4d593e4 --- /dev/null +++ b/configs/seresnet/seresnext50-32x4d_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/seresnext50_32x4d.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/shufflenet_v1/README.md b/configs/shufflenet_v1/README.md new file mode 100644 index 0000000..618a22d --- /dev/null +++ b/configs/shufflenet_v1/README.md @@ -0,0 +1,80 @@ +# Shufflenet V1 + +> [ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices](https://openaccess.thecvf.com/content_cvpr_2018/html/Zhang_ShuffleNet_An_Extremely_CVPR_2018_paper.html) + + + +## Abstract + +We introduce an extremely computation-efficient CNN architecture named ShuffleNet, which is designed specially for mobile devices with very limited computing power (e.g., 10-150 MFLOPs). The new architecture utilizes two new operations, pointwise group convolution and channel shuffle, to greatly reduce computation cost while maintaining accuracy. Experiments on ImageNet classification and MS COCO object detection demonstrate the superior performance of ShuffleNet over other structures, e.g. lower top-1 error (absolute 7.8%) than recent MobileNet on ImageNet classification task, under the computation budget of 40 MFLOPs. On an ARM-based mobile device, ShuffleNet achieves ~13x actual speedup over AlexNet while maintaining comparable accuracy. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('shufflenet-v1-1x_16xb64_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('shufflenet-v1-1x_16xb64_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/shufflenet_v1/shufflenet-v1-1x_16xb64_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/shufflenet_v1/shufflenet-v1-1x_16xb64_in1k.py https://download.openmmlab.com/mmclassification/v0/shufflenet_v1/shufflenet_v1_batch1024_imagenet_20200804-5d6cec73.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :----------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :---------------------------------------: | :------------------------------------------------------------------------------: | +| `shufflenet-v1-1x_16xb64_in1k` | From scratch | 1.87 | 0.15 | 68.13 | 87.81 | [config](shufflenet-v1-1x_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/shufflenet_v1/shufflenet_v1_batch1024_imagenet_20200804-5d6cec73.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/shufflenet_v1/shufflenet_v1_batch1024_imagenet_20200804-5d6cec73.json) | + +## Citation + +```bibtex +@inproceedings{zhang2018shufflenet, + title={Shufflenet: An extremely efficient convolutional neural network for mobile devices}, + author={Zhang, Xiangyu and Zhou, Xinyu and Lin, Mengxiao and Sun, Jian}, + booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, + pages={6848--6856}, + year={2018} +} +``` diff --git a/configs/shufflenet_v1/metafile.yml b/configs/shufflenet_v1/metafile.yml new file mode 100644 index 0000000..e3ca139 --- /dev/null +++ b/configs/shufflenet_v1/metafile.yml @@ -0,0 +1,35 @@ +Collections: + - Name: Shufflenet V1 + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - SGD with Momentum + - Weight Decay + - No BN decay + Training Resources: 8x 1080 GPUs + Epochs: 300 + Batch Size: 1024 + Architecture: + - Shufflenet V1 + Paper: + URL: https://openaccess.thecvf.com/content_cvpr_2018/html/Zhang_ShuffleNet_An_Extremely_CVPR_2018_paper.html + Title: "ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices" + README: configs/shufflenet_v1/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.15.0/mmcls/models/backbones/shufflenet_v1.py#L152 + Version: v0.15.0 + +Models: + - Name: shufflenet-v1-1x_16xb64_in1k + Metadata: + FLOPs: 146000000 + Parameters: 1870000 + In Collection: Shufflenet V1 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 68.13 + Top 5 Accuracy: 87.81 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/shufflenet_v1/shufflenet_v1_batch1024_imagenet_20200804-5d6cec73.pth + Config: configs/shufflenet_v1/shufflenet-v1-1x_16xb64_in1k.py diff --git a/configs/shufflenet_v1/shufflenet-v1-1x_16xb64_in1k.py b/configs/shufflenet_v1/shufflenet-v1-1x_16xb64_in1k.py new file mode 100644 index 0000000..58e45f1 --- /dev/null +++ b/configs/shufflenet_v1/shufflenet-v1-1x_16xb64_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/shufflenet_v1_1x.py', + '../_base_/datasets/imagenet_bs64_pil_resize.py', + '../_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py', + '../_base_/default_runtime.py' +] diff --git a/configs/shufflenet_v2/README.md b/configs/shufflenet_v2/README.md new file mode 100644 index 0000000..804aac1 --- /dev/null +++ b/configs/shufflenet_v2/README.md @@ -0,0 +1,80 @@ +# Shufflenet V2 + +> [ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design](https://openaccess.thecvf.com/content_ECCV_2018/papers/Ningning_Light-weight_CNN_Architecture_ECCV_2018_paper.pdf) + + + +## Abstract + +Currently, the neural network architecture design is mostly guided by the *indirect* metric of computation complexity, i.e., FLOPs. However, the *direct* metric, e.g., speed, also depends on the other factors such as memory access cost and platform characterics. Thus, this work proposes to evaluate the direct metric on the target platform, beyond only considering FLOPs. Based on a series of controlled experiments, this work derives several practical *guidelines* for efficient network design. Accordingly, a new architecture is presented, called *ShuffleNet V2*. Comprehensive ablation experiments verify that our model is the state-of-the-art in terms of speed and accuracy tradeoff. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('shufflenet-v2-1x_16xb64_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('shufflenet-v2-1x_16xb64_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py https://download.openmmlab.com/mmclassification/v0/shufflenet_v2/shufflenet_v2_batch1024_imagenet_20200812-5bf4721e.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :----------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :---------------------------------------: | :------------------------------------------------------------------------------: | +| `shufflenet-v2-1x_16xb64_in1k` | From scratch | 2.28 | 0.15 | 69.55 | 88.92 | [config](shufflenet-v2-1x_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/shufflenet_v2/shufflenet_v2_batch1024_imagenet_20200812-5bf4721e.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/shufflenet_v2/shufflenet_v2_batch1024_imagenet_20200812-5bf4721e.json) | + +## Citation + +```bibtex +@inproceedings{ma2018shufflenet, + title={Shufflenet v2: Practical guidelines for efficient cnn architecture design}, + author={Ma, Ningning and Zhang, Xiangyu and Zheng, Hai-Tao and Sun, Jian}, + booktitle={Proceedings of the European conference on computer vision (ECCV)}, + pages={116--131}, + year={2018} +} +``` diff --git a/configs/shufflenet_v2/metafile.yml b/configs/shufflenet_v2/metafile.yml new file mode 100644 index 0000000..9c1eebc --- /dev/null +++ b/configs/shufflenet_v2/metafile.yml @@ -0,0 +1,35 @@ +Collections: + - Name: Shufflenet V2 + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - SGD with Momentum + - Weight Decay + - No BN decay + Training Resources: 8x 1080 GPUs + Epochs: 300 + Batch Size: 1024 + Architecture: + - Shufflenet V2 + Paper: + URL: https://openaccess.thecvf.com/content_ECCV_2018/papers/Ningning_Light-weight_CNN_Architecture_ECCV_2018_paper.pdf + Title: "ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" + README: configs/shufflenet_v2/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.15.0/mmcls/models/backbones/shufflenet_v2.py#L134 + Version: v0.15.0 + +Models: + - Name: shufflenet-v2-1x_16xb64_in1k + Metadata: + FLOPs: 149000000 + Parameters: 2280000 + In Collection: Shufflenet V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 69.55 + Top 5 Accuracy: 88.92 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/shufflenet_v2/shufflenet_v2_batch1024_imagenet_20200812-5bf4721e.pth + Config: configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py diff --git a/configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py b/configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py new file mode 100644 index 0000000..a106ab8 --- /dev/null +++ b/configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/shufflenet_v2_1x.py', + '../_base_/datasets/imagenet_bs64_pil_resize.py', + '../_base_/schedules/imagenet_bs1024_linearlr_bn_nowd.py', + '../_base_/default_runtime.py' +] diff --git a/configs/simclr/README.md b/configs/simclr/README.md new file mode 100644 index 0000000..17d0de2 --- /dev/null +++ b/configs/simclr/README.md @@ -0,0 +1,87 @@ +# SimCLR + +> [A simple framework for contrastive learning of visual representations](https://arxiv.org/abs/2002.05709) + + + +## Abstract + +This paper presents SimCLR: a simple framework for contrastive learning of visual representations. We simplify recently proposed contrastive self-supervised learning algorithms without requiring specialized architectures or a memory bank. In order to understand what enables the contrastive prediction tasks to learn useful representations, we systematically study the major components of our framework. We show that (1) composition of data augmentations plays a critical role in defining effective predictive tasks, (2) introducing a learnable nonlinear transformation between the representation and the contrastive loss substantially improves the quality of the learned representations, and (3) contrastive learning benefits from larger batch sizes and more training steps compared to supervised learning. By combining these findings, we are able to considerably outperform previous methods for self-supervised and semi-supervised learning on ImageNet. A linear classifier trained on self-supervised representations learned by SimCLR achieves 76.5% top-1 accuracy, which is a 7% relative improvement over previous state-of-the-art, matching the performance of a supervised ResNet-50. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('resnet50_simclr-200e-pre_8xb512-linear-coslr-90e_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('simclr_resnet50_16xb256-coslr-200e_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/simclr/simclr_resnet50_16xb256-coslr-200e_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/simclr/benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py https://download.openmmlab.com/mmselfsup/1.x/simclr/simclr_resnet50_16xb256-coslr-200e_in1k/resnet50_linear-8xb512-coslr-90e_in1k/resnet50_linear-8xb512-coslr-90e_in1k_20220825-f12c0457.pth +``` + + + +## Models and results + +### Pretrained models + +| Model | Params (M) | Flops (G) | Config | Download | +| :---------------------------------------- | :--------: | :-------: | :--------------------------------------------------: | :--------------------------------------------------------------------------------------: | +| `simclr_resnet50_16xb256-coslr-200e_in1k` | 27.97 | 4.11 | [config](simclr_resnet50_16xb256-coslr-200e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/simclr/simclr_resnet50_16xb256-coslr-200e_in1k/simclr_resnet50_16xb256-coslr-200e_in1k_20220825-4d9cce50.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/simclr/simclr_resnet50_16xb256-coslr-200e_in1k/simclr_resnet50_16xb256-coslr-200e_in1k_20220825-4d9cce50.json) | +| `simclr_resnet50_16xb256-coslr-800e_in1k` | 27.97 | 4.11 | [config](simclr_resnet50_16xb256-coslr-800e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/simclr/simclr_resnet50_16xb256-coslr-800e_in1k/simclr_resnet50_16xb256-coslr-800e_in1k_20220825-85fcc4de.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/simclr/simclr_resnet50_16xb256-coslr-800e_in1k/simclr_resnet50_16xb256-coslr-800e_in1k_20220825-85fcc4de.json) | + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Config | Download | +| :---------------------------------------- | :------------------------------------------: | :--------: | :-------: | :-------: | :----------------------------------------: | :-------------------------------------------: | +| `resnet50_simclr-200e-pre_8xb512-linear-coslr-90e_in1k` | [SIMCLR 200-Epochs](https://download.openmmlab.com/mmselfsup/1.x/simclr/simclr_resnet50_16xb256-coslr-200e_in1k/simclr_resnet50_16xb256-coslr-200e_in1k_20220825-4d9cce50.pth) | 25.56 | 4.11 | 66.90 | [config](benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/simclr/simclr_resnet50_16xb256-coslr-200e_in1k/resnet50_linear-8xb512-coslr-90e_in1k/resnet50_linear-8xb512-coslr-90e_in1k_20220825-f12c0457.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/simclr/simclr_resnet50_16xb256-coslr-200e_in1k/resnet50_linear-8xb512-coslr-90e_in1k/resnet50_linear-8xb512-coslr-90e_in1k_20220825-f12c0457.json) | +| `resnet50_simclr-800e-pre_8xb512-linear-coslr-90e_in1k` | [SIMCLR 800-Epochs](https://download.openmmlab.com/mmselfsup/1.x/simclr/simclr_resnet50_16xb256-coslr-800e_in1k/simclr_resnet50_16xb256-coslr-800e_in1k_20220825-85fcc4de.pth) | 25.56 | 4.11 | 69.20 | [config](benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/simclr/simclr_resnet50_16xb256-coslr-800e_in1k/resnet50_linear-8xb512-coslr-90e_in1k/resnet50_linear-8xb512-coslr-90e_in1k_20220825-b80ae1e5.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/simclr/simclr_resnet50_16xb256-coslr-800e_in1k/resnet50_linear-8xb512-coslr-90e_in1k/resnet50_linear-8xb512-coslr-90e_in1k_20220825-b80ae1e5.json) | + +## Citation + +```bibtex +@inproceedings{chen2020simple, + title={A simple framework for contrastive learning of visual representations}, + author={Chen, Ting and Kornblith, Simon and Norouzi, Mohammad and Hinton, Geoffrey}, + booktitle={ICML}, + year={2020}, +} +``` diff --git a/configs/simclr/benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py b/configs/simclr/benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py new file mode 100644 index 0000000..2b5074c --- /dev/null +++ b/configs/simclr/benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py @@ -0,0 +1,18 @@ +_base_ = [ + '../../_base_/models/resnet50.py', + '../../_base_/datasets/imagenet_bs32_pil_resize.py', + '../../_base_/schedules/imagenet_lars_coslr_90e.py', + '../../_base_/default_runtime.py', +] + +model = dict( + backbone=dict( + frozen_stages=4, + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.'))) + +# dataset summary +train_dataloader = dict(batch_size=512) + +# runtime settings +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3)) diff --git a/configs/simclr/metafile.yml b/configs/simclr/metafile.yml new file mode 100644 index 0000000..23c31ed --- /dev/null +++ b/configs/simclr/metafile.yml @@ -0,0 +1,72 @@ +Collections: + - Name: SimCLR + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - LARS + Training Resources: 8x V100 GPUs (b256), 16x A100-80G GPUs (b4096) + Architecture: + - ResNet + - SimCLR + Paper: + Title: A simple framework for contrastive learning of visual representations + URL: https://arxiv.org/abs/2002.05709 + README: configs/simclr/README.md + +Models: + - Name: simclr_resnet50_16xb256-coslr-200e_in1k + Metadata: + Epochs: 200 + Batch Size: 4096 + FLOPs: 4109364224 + Parameters: 27968832 + Training Data: ImageNet-1k + In Collection: SimCLR + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/simclr/simclr_resnet50_16xb256-coslr-200e_in1k/simclr_resnet50_16xb256-coslr-200e_in1k_20220825-4d9cce50.pth + Config: configs/simclr/simclr_resnet50_16xb256-coslr-200e_in1k.py + Downstream: + - resnet50_simclr-200e-pre_8xb512-linear-coslr-90e_in1k + - Name: simclr_resnet50_16xb256-coslr-800e_in1k + Metadata: + Epochs: 200 + Batch Size: 4096 + FLOPs: 4109364224 + Parameters: 27968832 + Training Data: ImageNet-1k + In Collection: SimCLR + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/simclr/simclr_resnet50_16xb256-coslr-800e_in1k/simclr_resnet50_16xb256-coslr-800e_in1k_20220825-85fcc4de.pth + Config: configs/simclr/simclr_resnet50_16xb256-coslr-800e_in1k.py + Downstream: + - resnet50_simclr-800e-pre_8xb512-linear-coslr-90e_in1k + - Name: resnet50_simclr-200e-pre_8xb512-linear-coslr-90e_in1k + Metadata: + Epochs: 90 + Batch Size: 4096 + FLOPs: 4109464576 + Parameters: 25557032 + Training Data: ImageNet-1k + In Collection: SimCLR + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 66.9 + Weights: https://download.openmmlab.com/mmselfsup/1.x/simclr/simclr_resnet50_16xb256-coslr-200e_in1k/resnet50_linear-8xb512-coslr-90e_in1k/resnet50_linear-8xb512-coslr-90e_in1k_20220825-f12c0457.pth + Config: configs/simclr/benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py + - Name: resnet50_simclr-800e-pre_8xb512-linear-coslr-90e_in1k + Metadata: + Epochs: 90 + Batch Size: 4096 + FLOPs: 4109464576 + Parameters: 25557032 + Training Data: ImageNet-1k + In Collection: SimCLR + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 69.2 + Weights: https://download.openmmlab.com/mmselfsup/1.x/simclr/simclr_resnet50_16xb256-coslr-800e_in1k/resnet50_linear-8xb512-coslr-90e_in1k/resnet50_linear-8xb512-coslr-90e_in1k_20220825-b80ae1e5.pth + Config: configs/simclr/benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py diff --git a/configs/simclr/simclr_resnet50_16xb256-coslr-200e_in1k.py b/configs/simclr/simclr_resnet50_16xb256-coslr-200e_in1k.py new file mode 100644 index 0000000..b48d5b3 --- /dev/null +++ b/configs/simclr/simclr_resnet50_16xb256-coslr-200e_in1k.py @@ -0,0 +1,46 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs32_simclr.py', + '../_base_/schedules/imagenet_lars_coslr_200e.py', + '../_base_/default_runtime.py', +] + +# dataset settings +train_dataloader = dict(batch_size=256) + +# model settings +model = dict( + type='SimCLR', + backbone=dict( + type='ResNet', + depth=50, + norm_cfg=dict(type='SyncBN'), + zero_init_residual=True), + neck=dict( + type='NonLinearNeck', # SimCLR non-linear neck + in_channels=2048, + hid_channels=2048, + out_channels=128, + num_layers=2, + with_avg_pool=True), + head=dict( + type='ContrastiveHead', + loss=dict(type='CrossEntropyLoss'), + temperature=0.1), +) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='LARS', lr=4.8, momentum=0.9, weight_decay=1e-6), + paramwise_cfg=dict( + custom_keys={ + 'bn': dict(decay_mult=0, lars_exclude=True), + 'bias': dict(decay_mult=0, lars_exclude=True), + # bn layer in ResNet block downsample module + 'downsample.1': dict(decay_mult=0, lars_exclude=True), + })) + +# runtime settings +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3)) diff --git a/configs/simclr/simclr_resnet50_16xb256-coslr-800e_in1k.py b/configs/simclr/simclr_resnet50_16xb256-coslr-800e_in1k.py new file mode 100644 index 0000000..478ef0c --- /dev/null +++ b/configs/simclr/simclr_resnet50_16xb256-coslr-800e_in1k.py @@ -0,0 +1,57 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs32_simclr.py', + '../_base_/schedules/imagenet_lars_coslr_200e.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='SimCLR', + backbone=dict( + type='ResNet', + depth=50, + norm_cfg=dict(type='SyncBN'), + zero_init_residual=True), + neck=dict( + type='NonLinearNeck', # SimCLR non-linear neck + in_channels=2048, + hid_channels=2048, + out_channels=128, + num_layers=2, + with_avg_pool=True), + head=dict( + type='ContrastiveHead', + loss=dict(type='CrossEntropyLoss'), + temperature=0.1), +) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='LARS', lr=4.8, momentum=0.9, weight_decay=1e-6), + paramwise_cfg=dict( + custom_keys={ + 'bn': dict(decay_mult=0, lars_exclude=True), + 'bias': dict(decay_mult=0, lars_exclude=True), + # bn layer in ResNet block downsample module + 'downsample.1': dict(decay_mult=0, lars_exclude=True), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=10, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', T_max=790, by_epoch=True, begin=10, end=800) +] + +# runtime settings +train_cfg = dict(max_epochs=800) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3)) diff --git a/configs/simclr/simclr_resnet50_8xb32-coslr-200e_in1k.py b/configs/simclr/simclr_resnet50_8xb32-coslr-200e_in1k.py new file mode 100644 index 0000000..36a1445 --- /dev/null +++ b/configs/simclr/simclr_resnet50_8xb32-coslr-200e_in1k.py @@ -0,0 +1,47 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs32_simclr.py', + '../_base_/schedules/imagenet_lars_coslr_200e.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='SimCLR', + backbone=dict( + type='ResNet', + depth=50, + norm_cfg=dict(type='SyncBN'), + zero_init_residual=True), + neck=dict( + type='NonLinearNeck', # SimCLR non-linear neck + in_channels=2048, + hid_channels=2048, + out_channels=128, + num_layers=2, + with_avg_pool=True), + head=dict( + type='ContrastiveHead', + loss=dict(type='CrossEntropyLoss'), + temperature=0.1), +) + +# optimizer +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='LARS', lr=0.3, momentum=0.9, weight_decay=1e-6), + paramwise_cfg=dict( + custom_keys={ + 'bn': dict(decay_mult=0, lars_exclude=True), + 'bias': dict(decay_mult=0, lars_exclude=True), + # bn layer in ResNet block downsample module + 'downsample.1': dict(decay_mult=0, lars_exclude=True), + })) + +# runtime settings +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=256) diff --git a/configs/simmim/README.md b/configs/simmim/README.md new file mode 100644 index 0000000..3e44b07 --- /dev/null +++ b/configs/simmim/README.md @@ -0,0 +1,90 @@ +# SimMIM + +> [SimMIM: A Simple Framework for Masked Image Modeling](https://arxiv.org/abs/2111.09886) + + + +## Abstract + +This paper presents SimMIM, a simple framework for masked image modeling. We simplify recently proposed related approaches without special designs such as blockwise masking and tokenization via discrete VAE or clustering. To study what let the masked image modeling task learn good representations, we systematically study the major components in our framework, and find that simple designs of each component have revealed very strong representation learning performance: 1) random masking of the input image with a moderately large masked patch size (e.g., 32) makes a strong pre-text task; 2) predicting raw pixels of RGB values by direct regression performs no worse than the patch classification approaches with complex designs; 3) the prediction head can be as light as a linear layer, with no worse performance than heavier ones. Using ViT-B, our approach achieves 83.8% top-1 fine-tuning accuracy on ImageNet-1K by pre-training also on this dataset, surpassing previous best approach by +0.6%. When applied on a larger model of about 650 million parameters, SwinV2H, it achieves 87.1% top-1 accuracy on ImageNet-1K using only ImageNet-1K data. We also leverage this approach to facilitate the training of a 3B model (SwinV2-G), that by 40× less data than that in previous practice, we achieve the state-of-the-art on four representative vision benchmarks. The code and models will be publicly available at https: //github.com/microsoft/SimMIM . + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('swin-base-w6_simmim-100e-pre_8xb256-coslr-100e_in1k-192px', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('simmim_swin-base-w6_8xb256-amp-coslr-100e_in1k-192px', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/simmim/simmim_swin-base-w6_8xb256-amp-coslr-100e_in1k-192px.py +``` + +Test: + +```shell +python tools/test.py configs/simmim/benchmarks/swin-base-w6_8xb256-coslr-100e_in1k-192px.py https://download.openmmlab.com/mmselfsup/1.x/simmim/simmim_swin-base_8xb256-amp-coslr-100e_in1k-192/swin-base_ft-8xb256-coslr-100e_in1k/swin-base_ft-8xb256-coslr-100e_in1k_20220829-9cf23aa1.pth +``` + + + +## Models and results + +### Pretrained models + +| Model | Params (M) | Flops (G) | Config | Download | +| :-------------------------------------------------------- | :--------: | :-------: | :-----------------------------------------------------------: | :-------------------------------------------------------------: | +| `simmim_swin-base-w6_8xb256-amp-coslr-100e_in1k-192px` | 89.87 | 18.83 | [config](simmim_swin-base-w6_8xb256-amp-coslr-100e_in1k-192px.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/simmim/simmim_swin-base_8xb256-amp-coslr-100e_in1k-192/simmim_swin-base_8xb256-amp-coslr-100e_in1k-192_20220829-0e15782d.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/simmim/simmim_swin-base_8xb256-amp-coslr-100e_in1k-192/simmim_swin-base_8xb256-amp-coslr-100e_in1k-192_20220829-0e15782d.json) | +| `simmim_swin-base-w6_16xb128-amp-coslr-800e_in1k-192px` | 89.87 | 18.83 | [config](simmim_swin-base-w6_16xb128-amp-coslr-800e_in1k-192px.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/simmim/simmim_swin-base_16xb128-amp-coslr-800e_in1k-192/simmim_swin-base_16xb128-amp-coslr-800e_in1k-192_20220916-a0e931ac.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/simmim/simmim_swin-base_16xb128-amp-coslr-800e_in1k-192/simmim_swin-base_16xb128-amp-coslr-800e_in1k-192_20220916-a0e931ac.json) | +| `simmim_swin-large-w12_16xb128-amp-coslr-800e_in1k-192px` | 199.92 | 55.85 | [config](simmim_swin-large-w12_16xb128-amp-coslr-800e_in1k-192px.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/simmim/simmim_swin-large_16xb128-amp-coslr-800e_in1k-192/simmim_swin-large_16xb128-amp-coslr-800e_in1k-192_20220916-4ad216d3.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/simmim/simmim_swin-large_16xb128-amp-coslr-800e_in1k-192/simmim_swin-large_16xb128-amp-coslr-800e_in1k-192_20220916-4ad216d3.json) | + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Config | Download | +| :---------------------------------------- | :------------------------------------------: | :--------: | :-------: | :-------: | :----------------------------------------: | :-------------------------------------------: | +| `swin-base-w6_simmim-100e-pre_8xb256-coslr-100e_in1k-192px` | [SIMMIM 100-Epochs](https://download.openmmlab.com/mmselfsup/1.x/simmim/simmim_swin-base_8xb256-amp-coslr-100e_in1k-192/simmim_swin-base_8xb256-amp-coslr-100e_in1k-192_20220829-0e15782d.pth) | 87.75 | 11.30 | 82.70 | [config](benchmarks/swin-base-w6_8xb256-coslr-100e_in1k-192px.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/simmim/simmim_swin-base_8xb256-amp-coslr-100e_in1k-192/swin-base_ft-8xb256-coslr-100e_in1k/swin-base_ft-8xb256-coslr-100e_in1k_20220829-9cf23aa1.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/simmim/simmim_swin-base_8xb256-amp-coslr-100e_in1k-192/swin-base_ft-8xb256-coslr-100e_in1k/swin-base_ft-8xb256-coslr-100e_in1k_20220829-9cf23aa1.json) | +| `swin-base-w7_simmim-100e-pre_8xb256-coslr-100e_in1k` | [SIMMIM 100-Epochs](https://download.openmmlab.com/mmselfsup/1.x/simmim/simmim_swin-base_8xb256-amp-coslr-100e_in1k-192/simmim_swin-base_8xb256-amp-coslr-100e_in1k-192_20220829-0e15782d.pth) | 87.77 | 15.47 | 83.50 | [config](benchmarks/swin-base-w7_8xb256-coslr-100e_in1k.py) | N/A | +| `swin-base-w6_simmim-800e-pre_8xb256-coslr-100e_in1k-192px` | [SIMMIM 800-Epochs](https://download.openmmlab.com/mmselfsup/1.x/simmim/simmim_swin-base_16xb128-amp-coslr-800e_in1k-192/simmim_swin-base_16xb128-amp-coslr-800e_in1k-192_20220916-a0e931ac.pth) | 87.77 | 15.47 | 83.80 | [config](benchmarks/swin-base-w7_8xb256-coslr-100e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/simmim/simmim_swin-base_16xb128-amp-coslr-800e_in1k-192/swin-base_ft-8xb256-coslr-100e_in1k-224/swin-base_ft-8xb256-coslr-100e_in1k-224_20221208-155cc6e6.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/simmim/simmim_swin-base_16xb128-amp-coslr-800e_in1k-192/swin-base_ft-8xb256-coslr-100e_in1k-224/swin-base_ft-8xb256-coslr-100e_in1k-224_20221208-155cc6e6.json) | +| `swin-large-w14_simmim-800e-pre_8xb256-coslr-100e_in1k` | [SIMMIM 800-Epochs](https://download.openmmlab.com/mmselfsup/1.x/simmim/simmim_swin-large_16xb128-amp-coslr-800e_in1k-192/simmim_swin-large_16xb128-amp-coslr-800e_in1k-192_20220916-4ad216d3.pth) | 196.85 | 38.85 | 84.80 | [config](benchmarks/swin-large-w14_8xb256-coslr-100e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/simmim/simmim_swin-large_16xb128-amp-coslr-800e_in1k-192/swin-large_ft-8xb256-coslr-ws14-100e_in1k-224/swin-large_ft-8xb256-coslr-ws14-100e_in1k-224_20220916-d4865790.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/simmim/simmim_swin-large_16xb128-amp-coslr-800e_in1k-192/swin-large_ft-8xb256-coslr-ws14-100e_in1k-224/swin-large_ft-8xb256-coslr-ws14-100e_in1k-224_20220916-d4865790.json) | + +## Citation + +```bibtex +@inproceedings{xie2021simmim, + title={SimMIM: A Simple Framework for Masked Image Modeling}, + author={Xie, Zhenda and Zhang, Zheng and Cao, Yue and Lin, Yutong and Bao, Jianmin and Yao, Zhuliang and Dai, Qi and Hu, Han}, + booktitle={International Conference on Computer Vision and Pattern Recognition (CVPR)}, + year={2022} +} +``` diff --git a/configs/simmim/benchmarks/swin-base-w6_8xb256-coslr-100e_in1k-192px.py b/configs/simmim/benchmarks/swin-base-w6_8xb256-coslr-100e_in1k-192px.py new file mode 100644 index 0000000..47c4fa1 --- /dev/null +++ b/configs/simmim/benchmarks/swin-base-w6_8xb256-coslr-100e_in1k-192px.py @@ -0,0 +1,59 @@ +_base_ = [ + '../../_base_/models/swin_transformer/base_224.py', + '../../_base_/datasets/imagenet_bs256_swin_192.py', + '../../_base_/default_runtime.py' +] + +# model settings +model = dict( + backbone=dict( + img_size=192, + drop_path_rate=0.1, + stage_cfgs=dict(block_cfgs=dict(window_size=6)), + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.'))) + +# optimizer settings +optim_wrapper = dict( + type='AmpOptimWrapper', + optimizer=dict(type='AdamW', lr=5e-3, weight_decay=0.05), + clip_grad=dict(max_norm=5.0), + constructor='LearningRateDecayOptimWrapperConstructor', + paramwise_cfg=dict( + layer_decay_rate=0.9, + custom_keys={ + '.norm': dict(decay_mult=0.0), + '.bias': dict(decay_mult=0.0), + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=2.5e-7 / 1.25e-3, + by_epoch=True, + begin=0, + end=20, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=80, + eta_min=2.5e-7 * 2048 / 512, + by_epoch=True, + begin=20, + end=100, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=100) +val_cfg = dict() +test_cfg = dict() + +default_hooks = dict( + # save checkpoint per epoch. + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3), + logger=dict(type='LoggerHook', interval=100)) + +randomness = dict(seed=0) diff --git a/configs/simmim/benchmarks/swin-base-w7_8xb256-coslr-100e_in1k.py b/configs/simmim/benchmarks/swin-base-w7_8xb256-coslr-100e_in1k.py new file mode 100644 index 0000000..f7325f0 --- /dev/null +++ b/configs/simmim/benchmarks/swin-base-w7_8xb256-coslr-100e_in1k.py @@ -0,0 +1,102 @@ +_base_ = [ + '../../_base_/models/swin_transformer/base_224.py', + '../../_base_/datasets/imagenet_bs256_swin_192.py', + '../../_base_/default_runtime.py' +] + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict(pad_val=[104, 116, 124], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=0.3333333333333333, + fill_color=[103.53, 116.28, 123.675], + fill_std=[57.375, 57.12, 58.395]), + dict(type='PackInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=256, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# model settings +model = dict( + backbone=dict( + img_size=224, + drop_path_rate=0.1, + stage_cfgs=dict(block_cfgs=dict(window_size=7)), + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.'))) + +# optimizer settings +optim_wrapper = dict( + type='AmpOptimWrapper', + optimizer=dict(type='AdamW', lr=5e-3, weight_decay=0.05), + clip_grad=dict(max_norm=5.0), + constructor='LearningRateDecayOptimWrapperConstructor', + paramwise_cfg=dict( + layer_decay_rate=0.9, + custom_keys={ + '.norm': dict(decay_mult=0.0), + '.bias': dict(decay_mult=0.0), + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=2.5e-7 / 1.25e-3, + by_epoch=True, + begin=0, + end=20, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=80, + eta_min=2.5e-7 * 2048 / 512, + by_epoch=True, + begin=20, + end=100, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=100) +val_cfg = dict() +test_cfg = dict() + +default_hooks = dict( + # save checkpoint per epoch. + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3), + logger=dict(type='LoggerHook', interval=100)) + +randomness = dict(seed=0) diff --git a/configs/simmim/benchmarks/swin-large-w14_8xb256-coslr-100e_in1k.py b/configs/simmim/benchmarks/swin-large-w14_8xb256-coslr-100e_in1k.py new file mode 100644 index 0000000..a6eafd8 --- /dev/null +++ b/configs/simmim/benchmarks/swin-large-w14_8xb256-coslr-100e_in1k.py @@ -0,0 +1,105 @@ +_base_ = [ + '../../_base_/models/swin_transformer/base_224.py', + '../../_base_/datasets/imagenet_bs256_swin_192.py', + '../../_base_/default_runtime.py' +] + +# dataset settings +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict(pad_val=[104, 116, 124], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=0.3333333333333333, + fill_color=[103.53, 116.28, 123.675], + fill_std=[57.375, 57.12, 58.395]), + dict(type='PackInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=256, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# model settings +model = dict( + backbone=dict( + arch='large', + img_size=224, + drop_path_rate=0.2, + stage_cfgs=dict(block_cfgs=dict(window_size=14)), + pad_small_map=True, + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')), + head=dict(in_channels=1536)) + +# optimizer settings +optim_wrapper = dict( + type='AmpOptimWrapper', + optimizer=dict(type='AdamW', lr=5e-3, weight_decay=0.05), + clip_grad=dict(max_norm=5.0), + constructor='LearningRateDecayOptimWrapperConstructor', + paramwise_cfg=dict( + layer_decay_rate=0.7, + custom_keys={ + '.norm': dict(decay_mult=0.0), + '.bias': dict(decay_mult=0.0), + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=2.5e-7 / 1.25e-3, + by_epoch=True, + begin=0, + end=20, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=100, + eta_min=1e-6, + by_epoch=True, + begin=20, + end=100, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=100) +val_cfg = dict() +test_cfg = dict() + +default_hooks = dict( + # save checkpoint per epoch. + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3), + logger=dict(type='LoggerHook', interval=100)) + +randomness = dict(seed=0) diff --git a/configs/simmim/metafile.yml b/configs/simmim/metafile.yml new file mode 100644 index 0000000..19d9446 --- /dev/null +++ b/configs/simmim/metafile.yml @@ -0,0 +1,115 @@ +Collections: + - Name: SimMIM + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - AdamW + Training Resources: 16x A100 GPUs + Architecture: + - Swin + Paper: + Title: 'SimMIM: A Simple Framework for Masked Image Modeling' + URL: https://arxiv.org/abs/2111.09886 + README: configs/simmim/README.md + +Models: + - Name: simmim_swin-base-w6_8xb256-amp-coslr-100e_in1k-192px + Metadata: + Epochs: 100 + Batch Size: 2048 + FLOPs: 18832161792 + Parameters: 89874104 + Training Data: ImageNet-1k + In Collection: SimMIM + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/simmim/simmim_swin-base_8xb256-amp-coslr-100e_in1k-192/simmim_swin-base_8xb256-amp-coslr-100e_in1k-192_20220829-0e15782d.pth + Config: configs/simmim/simmim_swin-base-w6_8xb256-amp-coslr-100e_in1k-192px.py + Downstream: + - swin-base-w6_simmim-100e-pre_8xb256-coslr-100e_in1k-192px + - swin-base-w7_simmim-100e-pre_8xb256-coslr-100e_in1k + - Name: simmim_swin-base-w6_16xb128-amp-coslr-800e_in1k-192px + Metadata: + Epochs: 100 + Batch Size: 2048 + FLOPs: 18832161792 + Parameters: 89874104 + Training Data: ImageNet-1k + In Collection: SimMIM + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/simmim/simmim_swin-base_16xb128-amp-coslr-800e_in1k-192/simmim_swin-base_16xb128-amp-coslr-800e_in1k-192_20220916-a0e931ac.pth + Config: configs/simmim/simmim_swin-base-w6_16xb128-amp-coslr-800e_in1k-192px.py + Downstream: + - swin-base-w6_simmim-800e-pre_8xb256-coslr-100e_in1k-192px + - Name: simmim_swin-large-w12_16xb128-amp-coslr-800e_in1k-192px + Metadata: + Epochs: 100 + Batch Size: 2048 + FLOPs: 55849130496 + Parameters: 199920372 + Training Data: ImageNet-1k + In Collection: SimMIM + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/simmim/simmim_swin-large_16xb128-amp-coslr-800e_in1k-192/simmim_swin-large_16xb128-amp-coslr-800e_in1k-192_20220916-4ad216d3.pth + Config: configs/simmim/simmim_swin-large-w12_16xb128-amp-coslr-800e_in1k-192px.py + Downstream: + - swin-large-w14_simmim-800e-pre_8xb256-coslr-100e_in1k + - Name: swin-base-w6_simmim-100e-pre_8xb256-coslr-100e_in1k-192px + Metadata: + Epochs: 100 + Batch Size: 2048 + FLOPs: 11303976960 + Parameters: 87750176 + Training Data: ImageNet-1k + In Collection: SimMIM + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.7 + Weights: https://download.openmmlab.com/mmselfsup/1.x/simmim/simmim_swin-base_8xb256-amp-coslr-100e_in1k-192/swin-base_ft-8xb256-coslr-100e_in1k/swin-base_ft-8xb256-coslr-100e_in1k_20220829-9cf23aa1.pth + Config: configs/simmim/benchmarks/swin-base-w6_8xb256-coslr-100e_in1k-192px.py + - Name: swin-base-w7_simmim-100e-pre_8xb256-coslr-100e_in1k + Metadata: + Epochs: 100 + Batch Size: 2048 + FLOPs: 15466852352 + Parameters: 87768224 + Training Data: ImageNet-1k + In Collection: SimMIM + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.5 + Weights: null + Config: configs/simmim/benchmarks/swin-base-w7_8xb256-coslr-100e_in1k.py + - Name: swin-base-w6_simmim-800e-pre_8xb256-coslr-100e_in1k-192px + Metadata: + Epochs: 100 + Batch Size: 2048 + FLOPs: 15466852352 + Parameters: 87768224 + Training Data: ImageNet-1k + In Collection: SimMIM + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.8 + Weights: https://download.openmmlab.com/mmselfsup/1.x/simmim/simmim_swin-base_16xb128-amp-coslr-800e_in1k-192/swin-base_ft-8xb256-coslr-100e_in1k-224/swin-base_ft-8xb256-coslr-100e_in1k-224_20221208-155cc6e6.pth + Config: configs/simmim/benchmarks/swin-base-w7_8xb256-coslr-100e_in1k.py + - Name: swin-large-w14_simmim-800e-pre_8xb256-coslr-100e_in1k + Metadata: + Epochs: 100 + Batch Size: 2048 + FLOPs: 38853083136 + Parameters: 196848316 + Training Data: ImageNet-1k + In Collection: SimMIM + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.8 + Weights: https://download.openmmlab.com/mmselfsup/1.x/simmim/simmim_swin-large_16xb128-amp-coslr-800e_in1k-192/swin-large_ft-8xb256-coslr-ws14-100e_in1k-224/swin-large_ft-8xb256-coslr-ws14-100e_in1k-224_20220916-d4865790.pth + Config: configs/simmim/benchmarks/swin-large-w14_8xb256-coslr-100e_in1k.py diff --git a/configs/simmim/simmim_swin-base-w6_16xb128-amp-coslr-100e_in1k-192px.py b/configs/simmim/simmim_swin-base-w6_16xb128-amp-coslr-100e_in1k-192px.py new file mode 100644 index 0000000..ed9dfdb --- /dev/null +++ b/configs/simmim/simmim_swin-base-w6_16xb128-amp-coslr-100e_in1k-192px.py @@ -0,0 +1,4 @@ +_base_ = 'simmim_swin-base-w6_8xb256-amp-coslr-100e_in1k-192px.py' + +# dataset 16 GPUs x 128 +train_dataloader = dict(batch_size=128) diff --git a/configs/simmim/simmim_swin-base-w6_16xb128-amp-coslr-800e_in1k-192px.py b/configs/simmim/simmim_swin-base-w6_16xb128-amp-coslr-800e_in1k-192px.py new file mode 100644 index 0000000..560714b --- /dev/null +++ b/configs/simmim/simmim_swin-base-w6_16xb128-amp-coslr-800e_in1k-192px.py @@ -0,0 +1,64 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs256_simmim_192.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='SimMIM', + backbone=dict( + type='SimMIMSwinTransformer', + arch='base', + img_size=192, + stage_cfgs=dict(block_cfgs=dict(window_size=6))), + neck=dict( + type='SimMIMLinearDecoder', in_channels=128 * 2**3, encoder_stride=32), + head=dict( + type='SimMIMHead', + patch_size=4, + loss=dict(type='PixelReconstructionLoss', criterion='L1', channel=3))) + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + optimizer=dict( + type='AdamW', + lr=1e-4 * 2048 / 512, + betas=(0.9, 0.999), + weight_decay=0.05), + clip_grad=dict(max_norm=5.0), + paramwise_cfg=dict( + custom_keys={ + 'norm': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'absolute_pos_embed': dict(decay_mult=0.), + 'relative_position_bias_table': dict(decay_mult=0.) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=5e-7 / 1e-4, + by_epoch=True, + begin=0, + end=10, + convert_to_iter_based=True), + dict( + type='MultiStepLR', + milestones=[700], + by_epoch=True, + begin=10, + end=800, + convert_to_iter_based=True) +] + +# runtime +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=800) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=2048) diff --git a/configs/simmim/simmim_swin-base-w6_8xb256-amp-coslr-100e_in1k-192px.py b/configs/simmim/simmim_swin-base-w6_8xb256-amp-coslr-100e_in1k-192px.py new file mode 100644 index 0000000..a0be144 --- /dev/null +++ b/configs/simmim/simmim_swin-base-w6_8xb256-amp-coslr-100e_in1k-192px.py @@ -0,0 +1,65 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs256_simmim_192.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='SimMIM', + backbone=dict( + type='SimMIMSwinTransformer', + arch='base', + img_size=192, + stage_cfgs=dict(block_cfgs=dict(window_size=6))), + neck=dict( + type='SimMIMLinearDecoder', in_channels=128 * 2**3, encoder_stride=32), + head=dict( + type='SimMIMHead', + patch_size=4, + loss=dict(type='PixelReconstructionLoss', criterion='L1', channel=3))) + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + optimizer=dict( + type='AdamW', + lr=2e-4 * 2048 / 512, + betas=(0.9, 0.999), + weight_decay=0.05), + clip_grad=dict(max_norm=5.0), + paramwise_cfg=dict( + custom_keys={ + 'norm': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'absolute_pos_embed': dict(decay_mult=0.), + 'relative_position_bias_table': dict(decay_mult=0.) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-6 / 2e-4, + by_epoch=True, + begin=0, + end=10, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=90, + eta_min=1e-5 * 2048 / 512, + by_epoch=True, + begin=10, + end=100, + convert_to_iter_based=True) +] + +# runtime +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=100) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=2048) diff --git a/configs/simmim/simmim_swin-large-w12_16xb128-amp-coslr-800e_in1k-192px.py b/configs/simmim/simmim_swin-large-w12_16xb128-amp-coslr-800e_in1k-192px.py new file mode 100644 index 0000000..0563023 --- /dev/null +++ b/configs/simmim/simmim_swin-large-w12_16xb128-amp-coslr-800e_in1k-192px.py @@ -0,0 +1,65 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs256_simmim_192.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='SimMIM', + backbone=dict( + type='SimMIMSwinTransformer', + arch='large', + img_size=192, + stage_cfgs=dict(block_cfgs=dict(window_size=12)), + pad_small_map=True), + neck=dict( + type='SimMIMLinearDecoder', in_channels=192 * 2**3, encoder_stride=32), + head=dict( + type='SimMIMHead', + patch_size=4, + loss=dict(type='PixelReconstructionLoss', criterion='L1', channel=3))) + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + optimizer=dict( + type='AdamW', + lr=1e-4 * 2048 / 512, + betas=(0.9, 0.999), + weight_decay=0.05), + clip_grad=dict(max_norm=5.0), + paramwise_cfg=dict( + custom_keys={ + 'norm': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'absolute_pos_embed': dict(decay_mult=0.), + 'relative_position_bias_table': dict(decay_mult=0.) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=5e-7 / 1e-4, + by_epoch=True, + begin=0, + end=10, + convert_to_iter_based=True), + dict( + type='MultiStepLR', + milestones=[700], + by_epoch=True, + begin=10, + end=800, + convert_to_iter_based=True) +] + +# runtime +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=800) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=2048) diff --git a/configs/simsiam/README.md b/configs/simsiam/README.md new file mode 100644 index 0000000..117e45b --- /dev/null +++ b/configs/simsiam/README.md @@ -0,0 +1,87 @@ +# SimSiam + +> [Exploring simple siamese representation learning](https://arxiv.org/abs/2011.10566) + + + +## Abstract + +Siamese networks have become a common structure in various recent models for unsupervised visual representation learning. These models maximize the similarity between two augmentations of one image, subject to certain conditions for avoiding collapsing solutions. In this paper, we report surprising empirical results that simple Siamese networks can learn meaningful representations even using none of the following: (i) negative sample pairs, (ii) large batches, (iii) momentum encoders. Our experiments show that collapsing solutions do exist for the loss and structure, but a stop-gradient operation plays an essential role in preventing collapsing. We provide a hypothesis on the implication of stop-gradient, and further show proof-of-concept experiments verifying it. Our “SimSiam” method achieves competitive results on ImageNet and downstream tasks. We hope this simple baseline will motivate people to rethink the roles of Siamese architectures for unsupervised representation learning. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('resnet50_simsiam-100e-pre_8xb512-linear-coslr-90e_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('simsiam_resnet50_8xb32-coslr-100e_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/simsiam/simsiam_resnet50_8xb32-coslr-100e_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/simsiam/benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py https://download.openmmlab.com/mmselfsup/1.x/simsiam/simsiam_resnet50_8xb32-coslr-100e_in1k/resnet50_linear-8xb512-coslr-90e_in1k/resnet50_linear-8xb512-coslr-90e_in1k_20220825-f53ba400.pth +``` + + + +## Models and results + +### Pretrained models + +| Model | Params (M) | Flops (G) | Config | Download | +| :--------------------------------------- | :--------: | :-------: | :-------------------------------------------------: | :----------------------------------------------------------------------------------------: | +| `simsiam_resnet50_8xb32-coslr-100e_in1k` | 38.20 | 4.11 | [config](simsiam_resnet50_8xb32-coslr-100e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/simsiam/simsiam_resnet50_8xb32-coslr-100e_in1k/simsiam_resnet50_8xb32-coslr-100e_in1k_20220825-d07cb2e6.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/simsiam/simsiam_resnet50_8xb32-coslr-100e_in1k/simsiam_resnet50_8xb32-coslr-100e_in1k_20220825-d07cb2e6.json) | +| `simsiam_resnet50_8xb32-coslr-200e_in1k` | 38.20 | 4.11 | [config](simsiam_resnet50_8xb32-coslr-200e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/simsiam/simsiam_resnet50_8xb32-coslr-200e_in1k/simsiam_resnet50_8xb32-coslr-200e_in1k_20220825-efe91299.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/simsiam/simsiam_resnet50_8xb32-coslr-200e_in1k/simsiam_resnet50_8xb32-coslr-200e_in1k_20220825-efe91299.json) | + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Config | Download | +| :---------------------------------------- | :------------------------------------------: | :--------: | :-------: | :-------: | :----------------------------------------: | :-------------------------------------------: | +| `resnet50_simsiam-100e-pre_8xb512-linear-coslr-90e_in1k` | [SIMSIAM 100-Epochs](https://download.openmmlab.com/mmselfsup/1.x/simsiam/simsiam_resnet50_8xb32-coslr-100e_in1k/simsiam_resnet50_8xb32-coslr-100e_in1k_20220825-d07cb2e6.pth) | 25.56 | 4.11 | 68.30 | [config](benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/simsiam/simsiam_resnet50_8xb32-coslr-100e_in1k/resnet50_linear-8xb512-coslr-90e_in1k/resnet50_linear-8xb512-coslr-90e_in1k_20220825-f53ba400.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/simsiam/simsiam_resnet50_8xb32-coslr-100e_in1k/resnet50_linear-8xb512-coslr-90e_in1k/resnet50_linear-8xb512-coslr-90e_in1k_20220825-f53ba400.json) | +| `resnet50_simsiam-200e-pre_8xb512-linear-coslr-90e_in1k` | [SIMSIAM 200-Epochs](https://download.openmmlab.com/mmselfsup/1.x/simsiam/simsiam_resnet50_8xb32-coslr-200e_in1k/simsiam_resnet50_8xb32-coslr-200e_in1k_20220825-efe91299.pth) | 25.56 | 4.11 | 69.80 | [config](benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/simsiam/simsiam_resnet50_8xb32-coslr-200e_in1k/resnet50_linear-8xb512-coslr-90e_in1k/resnet50_linear-8xb512-coslr-90e_in1k_20220825-519b5135.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/simsiam/simsiam_resnet50_8xb32-coslr-200e_in1k/resnet50_linear-8xb512-coslr-90e_in1k/resnet50_linear-8xb512-coslr-90e_in1k_20220825-519b5135.json) | + +## Citation + +```bibtex +@inproceedings{chen2021exploring, + title={Exploring simple siamese representation learning}, + author={Chen, Xinlei and He, Kaiming}, + booktitle={CVPR}, + year={2021} +} +``` diff --git a/configs/simsiam/benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py b/configs/simsiam/benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py new file mode 100644 index 0000000..2b5074c --- /dev/null +++ b/configs/simsiam/benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py @@ -0,0 +1,18 @@ +_base_ = [ + '../../_base_/models/resnet50.py', + '../../_base_/datasets/imagenet_bs32_pil_resize.py', + '../../_base_/schedules/imagenet_lars_coslr_90e.py', + '../../_base_/default_runtime.py', +] + +model = dict( + backbone=dict( + frozen_stages=4, + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.'))) + +# dataset summary +train_dataloader = dict(batch_size=512) + +# runtime settings +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3)) diff --git a/configs/simsiam/metafile.yml b/configs/simsiam/metafile.yml new file mode 100644 index 0000000..40f6706 --- /dev/null +++ b/configs/simsiam/metafile.yml @@ -0,0 +1,72 @@ +Collections: + - Name: SimSiam + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Architecture: + - ResNet + Paper: + Title: Exploring simple siamese representation learning + URL: https://arxiv.org/abs/2011.10566 + README: configs/simsiam/README.md + +Models: + - Name: simsiam_resnet50_8xb32-coslr-100e_in1k + Metadata: + Epochs: 100 + Batch Size: 256 + FLOPs: 4109364224 + Parameters: 38199360 + Training Data: ImageNet-1k + In Collection: SimSiam + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/simsiam/simsiam_resnet50_8xb32-coslr-100e_in1k/simsiam_resnet50_8xb32-coslr-100e_in1k_20220825-d07cb2e6.pth + Config: configs/simsiam/simsiam_resnet50_8xb32-coslr-100e_in1k.py + Downstream: + - resnet50_simsiam-100e-pre_8xb512-linear-coslr-90e_in1k + - Name: simsiam_resnet50_8xb32-coslr-200e_in1k + Metadata: + Epochs: 200 + Batch Size: 256 + FLOPs: 4109364224 + Parameters: 38199360 + Training Data: ImageNet-1k + In Collection: SimSiam + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/simsiam/simsiam_resnet50_8xb32-coslr-200e_in1k/simsiam_resnet50_8xb32-coslr-200e_in1k_20220825-efe91299.pth + Config: configs/simsiam/simsiam_resnet50_8xb32-coslr-200e_in1k.py + Downstream: + - resnet50_simsiam-200e-pre_8xb512-linear-coslr-90e_in1k + - Name: resnet50_simsiam-100e-pre_8xb512-linear-coslr-90e_in1k + Metadata: + Epochs: 90 + Batch Size: 4096 + FLOPs: 4109464576 + Parameters: 25557032 + Training Data: ImageNet-1k + In Collection: SimSiam + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 68.3 + Weights: https://download.openmmlab.com/mmselfsup/1.x/simsiam/simsiam_resnet50_8xb32-coslr-100e_in1k/resnet50_linear-8xb512-coslr-90e_in1k/resnet50_linear-8xb512-coslr-90e_in1k_20220825-f53ba400.pth + Config: configs/simsiam/benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py + - Name: resnet50_simsiam-200e-pre_8xb512-linear-coslr-90e_in1k + Metadata: + Epochs: 90 + Batch Size: 4096 + FLOPs: 4109464576 + Parameters: 25557032 + Training Data: ImageNet-1k + In Collection: SimSiam + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 69.8 + Weights: https://download.openmmlab.com/mmselfsup/1.x/simsiam/simsiam_resnet50_8xb32-coslr-200e_in1k/resnet50_linear-8xb512-coslr-90e_in1k/resnet50_linear-8xb512-coslr-90e_in1k_20220825-519b5135.pth + Config: configs/simsiam/benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py diff --git a/configs/simsiam/simsiam_resnet50_8xb32-coslr-100e_in1k.py b/configs/simsiam/simsiam_resnet50_8xb32-coslr-100e_in1k.py new file mode 100644 index 0000000..ad19af6 --- /dev/null +++ b/configs/simsiam/simsiam_resnet50_8xb32-coslr-100e_in1k.py @@ -0,0 +1,58 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs32_mocov2.py', + '../_base_/schedules/imagenet_sgd_coslr_200e.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='SimSiam', + backbone=dict( + type='ResNet', + depth=50, + norm_cfg=dict(type='SyncBN'), + zero_init_residual=True), + neck=dict( + type='NonLinearNeck', + in_channels=2048, + hid_channels=2048, + out_channels=2048, + num_layers=3, + with_last_bn_affine=False, + with_avg_pool=True), + head=dict( + type='LatentPredictHead', + loss=dict(type='CosineSimilarityLoss'), + predictor=dict( + type='NonLinearNeck', + in_channels=2048, + hid_channels=512, + out_channels=2048, + with_avg_pool=False, + with_last_bn=False, + with_last_bias=True)), +) + +# optimizer +# set base learning rate +lr = 0.05 +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=lr, weight_decay=1e-4, momentum=0.9), + paramwise_cfg=dict(custom_keys={'predictor': dict(fix_lr=True)})) + +# learning rate scheduler +param_scheduler = [ + dict(type='CosineAnnealingLR', T_max=100, by_epoch=True, begin=0, end=100) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=100) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3)) + +# additional hooks +custom_hooks = [ + dict(type='SimSiamHook', priority='HIGH', fix_pred_lr=True, lr=lr) +] diff --git a/configs/simsiam/simsiam_resnet50_8xb32-coslr-200e_in1k.py b/configs/simsiam/simsiam_resnet50_8xb32-coslr-200e_in1k.py new file mode 100644 index 0000000..fa3b2bb --- /dev/null +++ b/configs/simsiam/simsiam_resnet50_8xb32-coslr-200e_in1k.py @@ -0,0 +1,52 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs32_mocov2.py', + '../_base_/schedules/imagenet_sgd_coslr_200e.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + type='SimSiam', + backbone=dict( + type='ResNet', + depth=50, + norm_cfg=dict(type='SyncBN'), + zero_init_residual=True), + neck=dict( + type='NonLinearNeck', + in_channels=2048, + hid_channels=2048, + out_channels=2048, + num_layers=3, + with_last_bn_affine=False, + with_avg_pool=True), + head=dict( + type='LatentPredictHead', + loss=dict(type='CosineSimilarityLoss'), + predictor=dict( + type='NonLinearNeck', + in_channels=2048, + hid_channels=512, + out_channels=2048, + with_avg_pool=False, + with_last_bn=False, + with_last_bias=True)), +) + +# optimizer +# set base learning rate +lr = 0.05 +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=lr, weight_decay=1e-4, momentum=0.9), + paramwise_cfg=dict(custom_keys={'predictor': dict(fix_lr=True)})) + +# runtime settings +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3)) + +# additional hooks +custom_hooks = [ + dict(type='SimSiamHook', priority='HIGH', fix_pred_lr=True, lr=lr) +] diff --git a/configs/spark/README.md b/configs/spark/README.md new file mode 100644 index 0000000..60f510e --- /dev/null +++ b/configs/spark/README.md @@ -0,0 +1,87 @@ +# SparK + +> [Designing BERT for Convolutional Networks: Sparse and Hierarchical Masked Modeling](https://arxiv.org/abs/2301.03580) + + + +## Abstract + +We identify and overcome two key obstacles in extending the success of BERT-style pre-training, or the masked image modeling, to convolutional networks (convnets): (i) convolution operation cannot handle irregular, random-masked input images; (ii) the single-scale nature of BERT pre-training is inconsistent with convnet's hierarchical structure. For (i), we treat unmasked pixels as sparse voxels of 3D point clouds and use sparse convolution to encode. This is the first use of sparse convolution for 2D masked modeling. For (ii), we develop a hierarchical decoder to reconstruct images from multi-scale encoded features. Our method called Sparse masKed modeling (SparK) is general: it can be used directly on any convolutional model without backbone modifications. We validate it on both classical (ResNet) and modern (ConvNeXt) models: on three downstream tasks, it surpasses both state-of-the-art contrastive learning and transformer-based masked modeling by similarly large margins (around +1.0%). Improvements on object detection and instance segmentation are more substantial (up to +3.5%), verifying the strong transferability of features learned. We also find its favorable scaling behavior by observing more gains on larger models. All this evidence reveals a promising future of generative pre-training on convnets. Codes and models are released at https://github.com/keyu-tian/SparK. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('resnet50_spark-pre_300e_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('spark_sparse-resnet50_800e_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/spark/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/spark/benchmarks/resnet50_8xb256-coslr-300e_in1k.py https://download.openmmlab.com/mmpretrain/v1.0/spark/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k/resnet50_8xb256-coslr-300e_in1k/resnet50_8xb256-coslr-300e_in1k_20230612-f86aab51.pth +``` + + + +## Models and results + +### Pretrained models + +| Model | Params (M) | Flops (G) | Config | Download | +| :--------------------------------------- | :--------: | :-------: | :-------------------------------------------------------------------: | :----------------------------------------------------------------------: | +| `spark_sparse-resnet50_800e_in1k` | 37.97 | 4.10 | [config](spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/spark/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k_20230612-e403c28f.pth) \| [log](https://download.openmmlab.com/mmpretrain/v1.0/spark/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k_20230612-e403c28f.json) | +| `spark_sparse-convnextv2-tiny_800e_in1k` | 39.73 | 4.47 | [config](spark_sparse-convnextv2-tiny_16xb256-amp-coslr-800e_in1k.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/spark/spark_sparse-convnextv2-tiny_16xb256-amp-coslr-800e_in1k/spark_sparse-convnextv2-tiny_16xb256-amp-coslr-800e_in1k_20230612-b0ea712e.pth) \| [log](https://download.openmmlab.com/mmpretrain/v1.0/spark/spark_sparse-convnextv2-tiny_16xb256-amp-coslr-800e_in1k/spark_sparse-convnextv2-tiny_16xb256-amp-coslr-800e_in1k_20230612-b0ea712e.json) | + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :------------------------------------ | :----------------------------------------: | :--------: | :-------: | :-------: | :-------: | :---------------------------------------: | :-----------------------------------------: | +| `resnet50_spark-pre_300e_in1k` | [SPARK](https://download.openmmlab.com/mmpretrain/v1.0/spark/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k_20230612-e403c28f.pth) | 23.52 | 1.31 | 80.10 | 94.90 | [config](benchmarks/resnet50_8xb256-coslr-300e_in1k.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/spark/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k/resnet50_8xb256-coslr-300e_in1k/resnet50_8xb256-coslr-300e_in1k_20230612-f86aab51.pth) \| [log](https://download.openmmlab.com/mmpretrain/v1.0/spark/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k/resnet50_8xb256-coslr-300e_in1k/resnet50_8xb256-coslr-300e_in1k_20230612-f86aab51.json) | +| `convnextv2-tiny_spark-pre_300e_in1k` | [SPARK](https://download.openmmlab.com/mmpretrain/v1.0/spark/spark_sparse-convnextv2-tiny_16xb256-amp-coslr-800e_in1k/spark_sparse-convnextv2-tiny_16xb256-amp-coslr-800e_in1k_20230612-b0ea712e.pth) | 28.64 | 4.47 | 82.80 | 96.30 | [config](benchmarks/convnextv2-tiny_8xb256-coslr-300e_in1k.py) | [model](https://download.openmmlab.com/mmpretrain/v1.0/spark//spark_sparse-convnextv2-tiny_16xb256-amp-coslr-800e_in1k/convnextv2-tiny_8xb256-coslr-300e_in1k/convnextv2-tiny_8xb256-coslr-300e_in1k_20230612-ffc78743.pth) \| [log](https://download.openmmlab.com/mmpretrain/v1.0/spark//spark_sparse-convnextv2-tiny_16xb256-amp-coslr-800e_in1k/convnextv2-tiny_8xb256-coslr-300e_in1k/convnextv2-tiny_8xb256-coslr-300e_in1k_20230612-ffc78743.json) | + +## Citation + +```bibtex +@Article{tian2023designing, + author = {Keyu Tian and Yi Jiang and Qishuai Diao and Chen Lin and Liwei Wang and Zehuan Yuan}, + title = {Designing BERT for Convolutional Networks: Sparse and Hierarchical Masked Modeling}, + journal = {arXiv:2301.03580}, + year = {2023}, +} +``` diff --git a/configs/spark/benchmarks/convnextv2-tiny_8xb256-coslr-300e_in1k.py b/configs/spark/benchmarks/convnextv2-tiny_8xb256-coslr-300e_in1k.py new file mode 100644 index 0000000..95ef81f --- /dev/null +++ b/configs/spark/benchmarks/convnextv2-tiny_8xb256-coslr-300e_in1k.py @@ -0,0 +1,122 @@ +_base_ = [ + '../../_base_/datasets/imagenet_bs64_swin_224.py', + '../../_base_/default_runtime.py', +] + +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='NumpyToPIL', to_rgb=True), + dict( + type='torchvision/TrivialAugmentWide', + num_magnitude_bins=31, + interpolation='bicubic', + fill=None), + dict(type='PILToNumpy', to_bgr=True), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + dict(type='PackInputs'), +] + +train_dataloader = dict( + dataset=dict(pipeline=train_pipeline), + sampler=dict(type='RepeatAugSampler', shuffle=True), +) + +# Model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='ConvNeXt', + arch='tiny', + drop_path_rate=0.1, + layer_scale_init_value=0., + use_grn=True, + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + init_cfg=dict(type='TruncNormal', layer='Linear', std=.02, bias=0.), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) + +custom_hooks = [ + dict( + type='EMAHook', + momentum=1e-4, + evaluate_on_origin=True, + priority='ABOVE_NORMAL') +] + +# schedule settings +# optimizer +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=3.2e-3, betas=(0.9, 0.999), weight_decay=0.05), + constructor='LearningRateDecayOptimWrapperConstructor', + paramwise_cfg=dict( + layer_decay_rate=0.7, + norm_decay_mult=0.0, + bias_decay_mult=0.0, + flat_decay_mult=0.0)) + +# learning policy +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=0.0001, + by_epoch=True, + begin=0, + end=20, + convert_to_iter_based=True), + # main learning rate scheduler + dict( + type='CosineAnnealingLR', + T_max=280, + eta_min=1.0e-5, + by_epoch=True, + begin=20, + end=300) +] +train_cfg = dict(by_epoch=True, max_epochs=300) +val_cfg = dict() +test_cfg = dict() + +default_hooks = dict( + # only keeps the latest 2 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=2)) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=2048) diff --git a/configs/spark/benchmarks/resnet50_8xb256-coslr-300e_in1k.py b/configs/spark/benchmarks/resnet50_8xb256-coslr-300e_in1k.py new file mode 100644 index 0000000..5d7527c --- /dev/null +++ b/configs/spark/benchmarks/resnet50_8xb256-coslr-300e_in1k.py @@ -0,0 +1,107 @@ +_base_ = [ + '../../_base_/models/resnet50.py', + '../../_base_/datasets/imagenet_bs256_rsb_a12.py', + '../../_base_/default_runtime.py' +] +# modification is based on ResNets RSB settings +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='NumpyToPIL', to_rgb=True), + dict( + type='torchvision/TrivialAugmentWide', + num_magnitude_bins=31, + interpolation='bicubic', + fill=None), + dict(type='PILToNumpy', to_bgr=True), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + dict(type='PackInputs'), +] +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +# model settings +model = dict( + backbone=dict( + norm_cfg=dict(type='SyncBN', requires_grad=True), + drop_path_rate=0.05, + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.')), + head=dict( + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, use_sigmoid=True)), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.1), + dict(type='CutMix', alpha=1.0) + ])) + +# schedule settings +# optimizer +optim_wrapper = dict( + optimizer=dict( + type='Lamb', + lr=0.016, + weight_decay=0.02, + ), + constructor='LearningRateDecayOptimWrapperConstructor', + paramwise_cfg=dict( + layer_decay_rate=0.7, + norm_decay_mult=0.0, + bias_decay_mult=0.0, + flat_decay_mult=0.0)) + +# learning policy +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=0.0001, + by_epoch=True, + begin=0, + end=5, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict( + type='CosineAnnealingLR', + T_max=295, + eta_min=1.0e-6, + by_epoch=True, + begin=5, + end=300) +] +train_cfg = dict(by_epoch=True, max_epochs=300) +val_cfg = dict() +test_cfg = dict() + +default_hooks = dict( + # only keeps the latest 2 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=2)) +# randomness +randomness = dict(seed=0, diff_rank_seed=True) + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=2048) diff --git a/configs/spark/metafile.yml b/configs/spark/metafile.yml new file mode 100644 index 0000000..81ca3a7 --- /dev/null +++ b/configs/spark/metafile.yml @@ -0,0 +1,73 @@ +Collections: + - Name: SparK + Metadata: + Architecture: + - Dense Connections + - GELU + - Layer Normalization + - Multi-Head Attention + - Scaled Dot-Product Attention + Paper: + Title: 'Designing BERT for Convolutional Networks: Sparse and Hierarchical Masked Modeling' + URL: https://arxiv.org/abs/2301.03580 + README: configs/spark/README.md + Code: + URL: null + Version: null + +Models: + - Name: spark_sparse-resnet50_800e_in1k + Metadata: + FLOPs: 4100000000 + Parameters: 37971000 + Training Data: + - ImageNet-1k + In Collection: SparK + Results: null + Weights: https://download.openmmlab.com/mmpretrain/v1.0/spark/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k_20230612-e403c28f.pth + Config: configs/spark/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k.py + Downstream: + - resnet50_spark-pre_300e_in1k + - Name: resnet50_spark-pre_300e_in1k + Metadata: + FLOPs: 1310000000 + Parameters: 23520000 + Training Data: + - ImageNet-1k + In Collection: SparK + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 80.1 + Top 5 Accuracy: 94.9 + Task: Image Classification + Weights: https://download.openmmlab.com/mmpretrain/v1.0/spark/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k/resnet50_8xb256-coslr-300e_in1k/resnet50_8xb256-coslr-300e_in1k_20230612-f86aab51.pth + Config: configs/spark/benchmarks/resnet50_8xb256-coslr-300e_in1k.py + + - Name: spark_sparse-convnextv2-tiny_800e_in1k + Metadata: + FLOPs: 4470000000 + Parameters: 39732000 + Training Data: + - ImageNet-1k + In Collection: SparK + Results: null + Weights: https://download.openmmlab.com/mmpretrain/v1.0/spark/spark_sparse-convnextv2-tiny_16xb256-amp-coslr-800e_in1k/spark_sparse-convnextv2-tiny_16xb256-amp-coslr-800e_in1k_20230612-b0ea712e.pth + Config: configs/spark/spark_sparse-convnextv2-tiny_16xb256-amp-coslr-800e_in1k.py + Downstream: + - convnextv2-tiny_spark-pre_300e_in1k + - Name: convnextv2-tiny_spark-pre_300e_in1k + Metadata: + FLOPs: 4469631744 + Parameters: 28635496 + Training Data: + - ImageNet-1k + In Collection: SparK + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.8 + Top 5 Accuracy: 96.3 + Task: Image Classification + Weights: https://download.openmmlab.com/mmpretrain/v1.0/spark//spark_sparse-convnextv2-tiny_16xb256-amp-coslr-800e_in1k/convnextv2-tiny_8xb256-coslr-300e_in1k/convnextv2-tiny_8xb256-coslr-300e_in1k_20230612-ffc78743.pth + Config: configs/spark/benchmarks/convnextv2-tiny_8xb256-coslr-300e_in1k.py diff --git a/configs/spark/spark_sparse-convnext-small_16xb256-amp-coslr-800e_in1k.py b/configs/spark/spark_sparse-convnext-small_16xb256-amp-coslr-800e_in1k.py new file mode 100644 index 0000000..5cefb5b --- /dev/null +++ b/configs/spark/spark_sparse-convnext-small_16xb256-amp-coslr-800e_in1k.py @@ -0,0 +1,81 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs512_mae.py', + '../_base_/default_runtime.py', +] + +# dataset 8 x 512 +train_dataloader = dict(batch_size=256, num_workers=8) + +# model settings +model = dict( + type='SparK', + input_size=224, + downsample_raito=32, + mask_ratio=0.6, + enc_dec_norm_cfg=dict(type='SparseLN2d', eps=1e-6), + enc_dec_norm_dim=768, + backbone=dict( + type='SparseConvNeXt', + arch='small', + drop_path_rate=0.2, + out_indices=(0, 1, 2, 3), + gap_before_output=False), + neck=dict( + type='SparKLightDecoder', + feature_dim=512, + upsample_ratio=32, # equal to downsample_raito + mid_channels=0, + last_act=False), + head=dict( + type='SparKPretrainHead', + loss=dict(type='PixelReconstructionLoss', criterion='L2'))) + +# optimizer wrapper +optimizer = dict( + type='Lamb', lr=2e-4 * 4096 / 512, betas=(0.9, 0.95), weight_decay=0.04) +optim_wrapper = dict( + type='AmpOptimWrapper', + optimizer=optimizer, + clip_grad=dict(max_norm=5.0), + paramwise_cfg=dict( + bias_decay_mult=0.0, + flat_decay_mult=0.0, + custom_keys={ + 'mask_token': dict(decay_mult=0.), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=760, + by_epoch=True, + begin=40, + end=800, + convert_to_iter_based=True), + dict( + type='CosineAnnealingWeightDecay', + eta_min=0.2, + T_max=800, + by_epoch=True, + begin=0, + end=800, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=800) +default_hooks = dict( + logger=dict(type='LoggerHook', interval=100), + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=2)) + +# randomness +randomness = dict(seed=0, diff_rank_seed=True) diff --git a/configs/spark/spark_sparse-convnextv2-tiny_16xb256-amp-coslr-800e_in1k.py b/configs/spark/spark_sparse-convnextv2-tiny_16xb256-amp-coslr-800e_in1k.py new file mode 100644 index 0000000..3a1afc8 --- /dev/null +++ b/configs/spark/spark_sparse-convnextv2-tiny_16xb256-amp-coslr-800e_in1k.py @@ -0,0 +1,84 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs512_mae.py', + '../_base_/default_runtime.py', +] + +# dataset 16 x 256 +train_dataloader = dict(batch_size=256, num_workers=8) + +# model settings, use ConvNeXt V2 +model = dict( + type='SparK', + input_size=224, + downsample_raito=32, + mask_ratio=0.6, + enc_dec_norm_cfg=dict(type='SparseLN2d', eps=1e-6), + enc_dec_norm_dim=768, + backbone=dict( + type='SparseConvNeXt', + arch='tiny', + drop_path_rate=0.2, + out_indices=(0, 1, 2, 3), + gap_before_output=False, + layer_scale_init_value=0., + use_grn=True, + ), + neck=dict( + type='SparKLightDecoder', + feature_dim=512, + upsample_ratio=32, # equal to downsample_raito + mid_channels=0, + last_act=False), + head=dict( + type='SparKPretrainHead', + loss=dict(type='PixelReconstructionLoss', criterion='L2'))) + +# optimizer wrapper +optimizer = dict( + type='Lamb', lr=2e-4 * 4096 / 512, betas=(0.9, 0.95), weight_decay=0.04) +optim_wrapper = dict( + type='AmpOptimWrapper', + optimizer=optimizer, + clip_grad=dict(max_norm=5.0), + paramwise_cfg=dict( + bias_decay_mult=0.0, + flat_decay_mult=0.0, + custom_keys={ + 'mask_token': dict(decay_mult=0.), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=20, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=780, + by_epoch=True, + begin=20, + end=800, + convert_to_iter_based=True), + dict( + type='CosineAnnealingWeightDecay', + eta_min=0.2, + T_max=800, + by_epoch=True, + begin=0, + end=800, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=800) +default_hooks = dict( + logger=dict(type='LoggerHook', interval=100), + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=2)) + +# randomness +randomness = dict(seed=0, diff_rank_seed=True) diff --git a/configs/spark/spark_sparse-resnet50_8xb512-amp-coslr-1600e_in1k.py b/configs/spark/spark_sparse-resnet50_8xb512-amp-coslr-1600e_in1k.py new file mode 100644 index 0000000..10fc675 --- /dev/null +++ b/configs/spark/spark_sparse-resnet50_8xb512-amp-coslr-1600e_in1k.py @@ -0,0 +1,30 @@ +_base_ = 'spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k.py' + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=1560, + by_epoch=True, + begin=40, + end=1600, + convert_to_iter_based=True), + dict( + type='CosineAnnealingWeightDecay', + eta_min=0.2, + T_max=1600, + by_epoch=True, + begin=0, + end=1600, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(max_epochs=1600) diff --git a/configs/spark/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k.py b/configs/spark/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k.py new file mode 100644 index 0000000..864f616 --- /dev/null +++ b/configs/spark/spark_sparse-resnet50_8xb512-amp-coslr-800e_in1k.py @@ -0,0 +1,80 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs512_mae.py', + '../_base_/default_runtime.py', +] + +# dataset 8 x 512 +train_dataloader = dict(batch_size=512, num_workers=8) + +# model settings +model = dict( + type='SparK', + input_size=224, + downsample_raito=32, + mask_ratio=0.6, + enc_dec_norm_cfg=dict(type='SparseSyncBatchNorm2d'), + enc_dec_norm_dim=2048, + backbone=dict( + type='SparseResNet', + depth=50, + out_indices=(0, 1, 2, 3), + drop_path_rate=0.05), + neck=dict( + type='SparKLightDecoder', + feature_dim=512, + upsample_ratio=32, # equal to downsample_raito + mid_channels=0, + last_act=False), + head=dict( + type='SparKPretrainHead', + loss=dict(type='PixelReconstructionLoss', criterion='L2'))) + +# optimizer wrapper +optimizer = dict( + type='Lamb', lr=2e-4 * 4096 / 512, betas=(0.9, 0.95), weight_decay=0.04) +optim_wrapper = dict( + type='AmpOptimWrapper', + optimizer=optimizer, + clip_grad=dict(max_norm=5.0), + paramwise_cfg=dict( + bias_decay_mult=0.0, + flat_decay_mult=0.0, + custom_keys={ + 'mask_token': dict(decay_mult=0.), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=760, + by_epoch=True, + begin=40, + end=800, + convert_to_iter_based=True), + dict( + type='CosineAnnealingWeightDecay', + eta_min=0.2, + T_max=800, + by_epoch=True, + begin=0, + end=800, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=800) +default_hooks = dict( + logger=dict(type='LoggerHook', interval=100), + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=2)) + +# randomness +randomness = dict(seed=0, diff_rank_seed=True) diff --git a/configs/swav/README.md b/configs/swav/README.md new file mode 100644 index 0000000..fdcdfeb --- /dev/null +++ b/configs/swav/README.md @@ -0,0 +1,85 @@ +# SwAV + +> [Unsupervised Learning of Visual Features by Contrasting Cluster Assignments](https://arxiv.org/abs/2006.09882) + + + +## Abstract + +Unsupervised image representations have significantly reduced the gap with supervised pretraining, notably with the recent achievements of contrastive learning methods. These contrastive methods typically work online and rely on a large number of explicit pairwise feature comparisons, which is computationally challenging. In this paper, we propose an online algorithm, SwAV, that takes advantage of contrastive methods without requiring to compute pairwise comparisons. Specifically, our method simultaneously clusters the data while enforcing consistency between cluster assignments produced for different augmentations (or “views”) of the same image, instead of comparing features directly as in contrastive learning. Simply put, we use a “swapped” prediction mechanism where we predict the code of a view from the representation of another view. Our method can be trained with large and small batches and can scale to unlimited amounts of data. Compared to previous contrastive methods, our method is more memory efficient since it does not require a large memory bank or a special momentum network. In addition, we also propose a new data augmentation strategy, multi-crop, that uses a mix of views with different resolutions in place of two full-resolution views, without increasing the memory or compute requirements. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('resnet50_swav-pre_8xb32-linear-coslr-100e_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('swav_resnet50_8xb32-mcrop-coslr-200e_in1k-224px-96px', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/swav/swav_resnet50_8xb32-mcrop-coslr-200e_in1k-224px-96px.py +``` + +Test: + +```shell +python tools/test.py configs/swav/benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py https://download.openmmlab.com/mmselfsup/1.x/swav/swav_resnet50_8xb32-mcrop-2-6-coslr-200e_in1k-224-96/resnet50_linear-8xb32-coslr-100e_in1k/resnet50_linear-8xb32-coslr-100e_in1k_20220825-80341e08.pth +``` + + + +## Models and results + +### Pretrained models + +| Model | Params (M) | Flops (G) | Config | Download | +| :----------------------------------------------------- | :--------: | :-------: | :------------------------------------------------------------: | :---------------------------------------------------------------: | +| `swav_resnet50_8xb32-mcrop-coslr-200e_in1k-224px-96px` | 28.35 | 4.11 | [config](swav_resnet50_8xb32-mcrop-coslr-200e_in1k-224px-96px.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/swav/swav_resnet50_8xb32-mcrop-2-6-coslr-200e_in1k-224-96/swav_resnet50_8xb32-mcrop-2-6-coslr-200e_in1k-224-96_20220825-5b3fc7fc.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/swav/swav_resnet50_8xb32-mcrop-2-6-coslr-200e_in1k-224-96/swav_resnet50_8xb32-mcrop-2-6-coslr-200e_in1k-224-96_20220825-5b3fc7fc.json) | + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Config | Download | +| :---------------------------------------- | :------------------------------------------: | :--------: | :-------: | :-------: | :----------------------------------------: | :-------------------------------------------: | +| `resnet50_swav-pre_8xb32-linear-coslr-100e_in1k` | [SWAV](https://download.openmmlab.com/mmselfsup/1.x/swav/swav_resnet50_8xb32-mcrop-2-6-coslr-200e_in1k-224-96/swav_resnet50_8xb32-mcrop-2-6-coslr-200e_in1k-224-96_20220825-5b3fc7fc.pth) | 25.56 | 4.11 | 70.50 | [config](benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py) | [model](https://download.openmmlab.com/mmselfsup/1.x/swav/swav_resnet50_8xb32-mcrop-2-6-coslr-200e_in1k-224-96/resnet50_linear-8xb32-coslr-100e_in1k/resnet50_linear-8xb32-coslr-100e_in1k_20220825-80341e08.pth) \| [log](https://download.openmmlab.com/mmselfsup/1.x/swav/swav_resnet50_8xb32-mcrop-2-6-coslr-200e_in1k-224-96/resnet50_linear-8xb32-coslr-100e_in1k/resnet50_linear-8xb32-coslr-100e_in1k_20220825-80341e08.json) | + +## Citation + +```bibtex +@article{caron2020unsupervised, + title={Unsupervised Learning of Visual Features by Contrasting Cluster Assignments}, + author={Caron, Mathilde and Misra, Ishan and Mairal, Julien and Goyal, Priya and Bojanowski, Piotr and Joulin, Armand}, + booktitle={NeurIPS}, + year={2020} +} +``` diff --git a/configs/swav/benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py b/configs/swav/benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py new file mode 100644 index 0000000..2b5074c --- /dev/null +++ b/configs/swav/benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py @@ -0,0 +1,18 @@ +_base_ = [ + '../../_base_/models/resnet50.py', + '../../_base_/datasets/imagenet_bs32_pil_resize.py', + '../../_base_/schedules/imagenet_lars_coslr_90e.py', + '../../_base_/default_runtime.py', +] + +model = dict( + backbone=dict( + frozen_stages=4, + init_cfg=dict(type='Pretrained', checkpoint='', prefix='backbone.'))) + +# dataset summary +train_dataloader = dict(batch_size=512) + +# runtime settings +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3)) diff --git a/configs/swav/metafile.yml b/configs/swav/metafile.yml new file mode 100644 index 0000000..5bc1252 --- /dev/null +++ b/configs/swav/metafile.yml @@ -0,0 +1,44 @@ +Collections: + - Name: SwAV + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - LARS + Training Resources: 8x V100 GPUs + Architecture: + - ResNet + - SwAV + Paper: + Title: Unsupervised Learning of Visual Features by Contrasting Cluster Assignments + URL: https://arxiv.org/abs/2006.09882 + README: configs/swav/README.md + +Models: + - Name: swav_resnet50_8xb32-mcrop-coslr-200e_in1k-224px-96px + Metadata: + Epochs: 200 + Batch Size: 256 + FLOPs: 4109364224 + Parameters: 28354752 + Training Data: ImageNet-1k + In Collection: SwAV + Results: null + Weights: https://download.openmmlab.com/mmselfsup/1.x/swav/swav_resnet50_8xb32-mcrop-2-6-coslr-200e_in1k-224-96/swav_resnet50_8xb32-mcrop-2-6-coslr-200e_in1k-224-96_20220825-5b3fc7fc.pth + Config: configs/swav/swav_resnet50_8xb32-mcrop-coslr-200e_in1k-224px-96px.py + Downstream: + - resnet50_swav-pre_8xb32-linear-coslr-100e_in1k + - Name: resnet50_swav-pre_8xb32-linear-coslr-100e_in1k + Metadata: + Epochs: 100 + Batch Size: 256 + FLOPs: 4109464576 + Parameters: 25557032 + Training Data: ImageNet-1k + In Collection: SwAV + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 70.5 + Weights: https://download.openmmlab.com/mmselfsup/1.x/swav/swav_resnet50_8xb32-mcrop-2-6-coslr-200e_in1k-224-96/resnet50_linear-8xb32-coslr-100e_in1k/resnet50_linear-8xb32-coslr-100e_in1k_20220825-80341e08.pth + Config: configs/swav/benchmarks/resnet50_8xb512-linear-coslr-90e_in1k.py diff --git a/configs/swav/swav_resnet50_8xb32-mcrop-coslr-200e_in1k-224px-96px.py b/configs/swav/swav_resnet50_8xb32-mcrop-coslr-200e_in1k-224px-96px.py new file mode 100644 index 0000000..ebb9ead --- /dev/null +++ b/configs/swav/swav_resnet50_8xb32-mcrop-coslr-200e_in1k-224px-96px.py @@ -0,0 +1,159 @@ +_base_ = [ + '../_base_/schedules/imagenet_lars_coslr_200e.py', + '../_base_/default_runtime.py', +] + +# dataset settings +dataset_type = 'ImageNet' +data_root = 'data/imagenet/' +data_preprocessor = dict( + type='SelfSupDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True) + +num_crops = [2, 6] +color_distort_strength = 1.0 +view_pipeline1 = [ + dict( + type='RandomResizedCrop', + scale=224, + crop_ratio_range=(0.14, 1.), + backend='pillow'), + dict( + type='RandomApply', + transforms=[ + dict( + type='ColorJitter', + brightness=0.8 * color_distort_strength, + contrast=0.8 * color_distort_strength, + saturation=0.8 * color_distort_strength, + hue=0.2 * color_distort_strength) + ], + prob=0.8), + dict( + type='RandomGrayscale', + prob=0.2, + keep_channels=True, + channel_weights=(0.114, 0.587, 0.2989)), + dict( + type='GaussianBlur', + magnitude_range=(0.1, 2.0), + magnitude_std='inf', + prob=0.5), + dict(type='RandomFlip', prob=0.5), +] +view_pipeline2 = [ + dict( + type='RandomResizedCrop', + scale=96, + crop_ratio_range=(0.05, 0.14), + backend='pillow'), + dict( + type='RandomApply', + transforms=[ + dict( + type='ColorJitter', + brightness=0.8 * color_distort_strength, + contrast=0.8 * color_distort_strength, + saturation=0.8 * color_distort_strength, + hue=0.2 * color_distort_strength) + ], + prob=0.8), + dict( + type='RandomGrayscale', + prob=0.2, + keep_channels=True, + channel_weights=(0.114, 0.587, 0.2989)), + dict( + type='GaussianBlur', + magnitude_range=(0.1, 2.0), + magnitude_std='inf', + prob=0.5), + dict(type='RandomFlip', prob=0.5), +] +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiView', + num_views=num_crops, + transforms=[view_pipeline1, view_pipeline2]), + dict(type='PackInputs') +] + +batch_size = 32 +train_dataloader = dict( + batch_size=batch_size, + num_workers=8, + drop_last=True, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + collate_fn=dict(type='default_collate'), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='meta/train.txt', + data_prefix=dict(img_path='train/'), + pipeline=train_pipeline)) + +# model settings +model = dict( + type='SwAV', + data_preprocessor=dict( + mean=(123.675, 116.28, 103.53), + std=(58.395, 57.12, 57.375), + to_rgb=True), + backbone=dict( + type='ResNet', + depth=50, + norm_cfg=dict(type='SyncBN'), + zero_init_residual=True), + neck=dict( + type='SwAVNeck', + in_channels=2048, + hid_channels=2048, + out_channels=128, + with_avg_pool=True), + head=dict( + type='SwAVHead', + loss=dict( + type='SwAVLoss', + feat_dim=128, # equal to neck['out_channels'] + epsilon=0.05, + temperature=0.1, + num_crops=num_crops, + ))) + +# optimizer +optim_wrapper = dict(type='OptimWrapper', optimizer=dict(type='LARS', lr=0.6)) +find_unused_parameters = True + +# learning policy +param_scheduler = [ + dict( + type='CosineAnnealingLR', + T_max=200, + eta_min=6e-4, + by_epoch=True, + begin=0, + end=200, + convert_to_iter_based=True) +] + +# runtime settings +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=10, max_keep_ckpts=3)) + +# additional hooks +custom_hooks = [ + dict( + type='SwAVHook', + priority='VERY_HIGH', + batch_size=batch_size, + epoch_queue_starts=15, + crops_for_assign=[0, 1], + feat_dim=128, + queue_length=3840, + frozen_layers_cfg=dict(prototypes=5005)) +] diff --git a/configs/swin_transformer/README.md b/configs/swin_transformer/README.md new file mode 100644 index 0000000..1d41f13 --- /dev/null +++ b/configs/swin_transformer/README.md @@ -0,0 +1,111 @@ +# Swin-Transformer + +> [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) + + + +## Introduction + +**Swin Transformer** (the name **Swin** stands for Shifted window) is initially described in [the paper](https://arxiv.org/pdf/2103.14030.pdf), which capably serves as a general-purpose backbone for computer vision. It is basically a hierarchical Transformer whose representation is computed with shifted windows. The shifted windowing scheme brings greater efficiency by limiting self-attention computation to non-overlapping local windows while also allowing for cross-window connection. + +Swin Transformer achieves strong performance on COCO object detection (58.7 box AP and 51.1 mask AP on test-dev) and ADE20K semantic segmentation (53.5 mIoU on val), surpassing previous models by a large margin. + +
+ +
+ +## Abstract + +
+ +Show the paper's abstract + +
+This paper presents a new vision Transformer, called Swin Transformer, that capably serves as a general-purpose backbone for computer vision. Challenges in adapting Transformer from language to vision arise from differences between the two domains, such as large variations in the scale of visual entities and the high resolution of pixels in images compared to words in text. To address these differences, we propose a hierarchical Transformer whose representation is computed with **Shifted windows**. The shifted windowing scheme brings greater efficiency by limiting self-attention computation to non-overlapping local windows while also allowing for cross-window connection. This hierarchical architecture has the flexibility to model at various scales and has linear computational complexity with respect to image size. These qualities of Swin Transformer make it compatible with a broad range of vision tasks, including image classification (87.3 top-1 accuracy on ImageNet-1K) and dense prediction tasks such as object detection (58.7 box AP and 51.1 mask AP on COCO test-dev) and semantic segmentation (53.5 mIoU on ADE20K val). Its performance surpasses the previous state-of-the-art by a large margin of +2.7 box AP and +2.6 mask AP on COCO, and +3.2 mIoU on ADE20K, demonstrating the potential of Transformer-based models as vision backbones. The hierarchical design and the shifted window approach also prove beneficial for all-MLP architectures. +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('swin-tiny_16xb64_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('swin-tiny_16xb64_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/swin_transformer/swin-tiny_16xb64_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/swin_transformer/swin-tiny_16xb64_in1k.py https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_tiny_224_b16x64_300e_imagenet_20210616_090925-66df6be6.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :----------------------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :---------------------------------------: | :------------------------------------------------------------------: | +| `swin-tiny_16xb64_in1k` | From scratch | 28.29 | 4.36 | 81.18 | 95.61 | [config](swin-tiny_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_tiny_224_b16x64_300e_imagenet_20210616_090925-66df6be6.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_tiny_224_b16x64_300e_imagenet_20210616_090925.json) | +| `swin-small_16xb64_in1k` | From scratch | 49.61 | 8.52 | 83.02 | 96.29 | [config](swin-small_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_small_224_b16x64_300e_imagenet_20210615_110219-7f9d988b.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_small_224_b16x64_300e_imagenet_20210615_110219.json) | +| `swin-base_16xb64_in1k` | From scratch | 87.77 | 15.14 | 83.36 | 96.44 | [config](swin-base_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_base_224_b16x64_300e_imagenet_20210616_190742-93230b0d.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_base_224_b16x64_300e_imagenet_20210616_190742.json) | +| `swin-tiny_3rdparty_in1k`\* | From scratch | 28.29 | 4.36 | 81.18 | 95.52 | [config](swin-tiny_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_tiny_patch4_window7_224-160bb0a5.pth) | +| `swin-small_3rdparty_in1k`\* | From scratch | 49.61 | 8.52 | 83.21 | 96.25 | [config](swin-small_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_small_patch4_window7_224-cc7a01c9.pth) | +| `swin-base_3rdparty_in1k`\* | From scratch | 87.77 | 15.14 | 83.42 | 96.44 | [config](swin-base_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window7_224-4670dd19.pth) | +| `swin-base_3rdparty_in1k-384`\* | From scratch | 87.90 | 44.49 | 84.49 | 96.95 | [config](swin-base_16xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window12_384-02c598a4.pth) | +| `swin-base_in21k-pre-3rdparty_in1k`\* | From scratch | 87.77 | 15.14 | 85.16 | 97.50 | [config](swin-base_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window7_224_22kto1k-f967f799.pth) | +| `swin-base_in21k-pre-3rdparty_in1k-384`\* | From scratch | 87.90 | 44.49 | 86.44 | 98.05 | [config](swin-base_16xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window12_384_22kto1k-d59b0d1d.pth) | +| `swin-large_in21k-pre-3rdparty_in1k`\* | From scratch | 196.53 | 34.04 | 86.24 | 97.88 | [config](swin-large_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_large_patch4_window7_224_22kto1k-5f0996db.pth) | +| `swin-large_in21k-pre-3rdparty_in1k-384`\* | From scratch | 196.74 | 100.04 | 87.25 | 98.25 | [config](swin-large_16xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_large_patch4_window12_384_22kto1k-0a40944b.pth) | + +*Models with * are converted from the [official repo](https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458). The config files of these models are only for inference. We haven't reproduce the training results.* + +### Image Classification on CUB-200-2011 + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Config | Download | +| :-------------------------- | :----------: | :--------: | :-------: | :-------: | :------------------------------------: | :---------------------------------------------------------------------------------------------: | +| `swin-large_8xb8_cub-384px` | From scratch | 195.51 | 100.04 | 91.87 | [config](swin-large_8xb8_cub-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin-large_8xb8_cub_384px_20220307-1bbaee6a.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin-large_8xb8_cub_384px_20220307-1bbaee6a.json) | + +## Citation + +```bibtex +@article{liu2021Swin, + title={Swin Transformer: Hierarchical Vision Transformer using Shifted Windows}, + author={Liu, Ze and Lin, Yutong and Cao, Yue and Hu, Han and Wei, Yixuan and Zhang, Zheng and Lin, Stephen and Guo, Baining}, + journal={arXiv preprint arXiv:2103.14030}, + year={2021} +} +``` diff --git a/configs/swin_transformer/metafile.yml b/configs/swin_transformer/metafile.yml new file mode 100644 index 0000000..8bff599 --- /dev/null +++ b/configs/swin_transformer/metafile.yml @@ -0,0 +1,201 @@ +Collections: + - Name: Swin-Transformer + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - AdamW + - Weight Decay + Training Resources: 16x V100 GPUs + Epochs: 300 + Batch Size: 1024 + Architecture: + - Shift Window Multihead Self Attention + Paper: + URL: https://arxiv.org/abs/2103.14030 + Title: "Swin Transformer: Hierarchical Vision Transformer using Shifted Windows" + README: configs/swin_transformer/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.15.0/mmcls/models/backbones/swin_transformer.py#L176 + Version: v0.15.0 + +Models: + - Name: swin-tiny_16xb64_in1k + Metadata: + FLOPs: 4360000000 + Parameters: 28290000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.18 + Top 5 Accuracy: 95.61 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_tiny_224_b16x64_300e_imagenet_20210616_090925-66df6be6.pth + Config: configs/swin_transformer/swin-tiny_16xb64_in1k.py + - Name: swin-small_16xb64_in1k + Metadata: + FLOPs: 8520000000 + Parameters: 49610000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.02 + Top 5 Accuracy: 96.29 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_small_224_b16x64_300e_imagenet_20210615_110219-7f9d988b.pth + Config: configs/swin_transformer/swin-small_16xb64_in1k.py + - Name: swin-base_16xb64_in1k + Metadata: + FLOPs: 15140000000 + Parameters: 87770000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.36 + Top 5 Accuracy: 96.44 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_base_224_b16x64_300e_imagenet_20210616_190742-93230b0d.pth + Config: configs/swin_transformer/swin-base_16xb64_in1k.py + - Name: swin-tiny_3rdparty_in1k + Metadata: + FLOPs: 4360000000 + Parameters: 28290000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.18 + Top 5 Accuracy: 95.52 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_tiny_patch4_window7_224-160bb0a5.pth + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth + Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458 + Config: configs/swin_transformer/swin-tiny_16xb64_in1k.py + - Name: swin-small_3rdparty_in1k + Metadata: + FLOPs: 8520000000 + Parameters: 49610000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.21 + Top 5 Accuracy: 96.25 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_small_patch4_window7_224-cc7a01c9.pth + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth + Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458 + Config: configs/swin_transformer/swin-small_16xb64_in1k.py + - Name: swin-base_3rdparty_in1k + Metadata: + FLOPs: 15140000000 + Parameters: 87770000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.42 + Top 5 Accuracy: 96.44 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window7_224-4670dd19.pth + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth + Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458 + Config: configs/swin_transformer/swin-base_16xb64_in1k.py + - Name: swin-base_3rdparty_in1k-384 + Metadata: + FLOPs: 44490000000 + Parameters: 87900000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.49 + Top 5 Accuracy: 96.95 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window12_384-02c598a4.pth + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth + Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458 + Config: configs/swin_transformer/swin-base_16xb64_in1k-384px.py + - Name: swin-base_in21k-pre-3rdparty_in1k + Metadata: + FLOPs: 15140000000 + Parameters: 87770000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.16 + Top 5 Accuracy: 97.50 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window7_224_22kto1k-f967f799.pth + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22kto1k.pth + Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458 + Config: configs/swin_transformer/swin-base_16xb64_in1k.py + - Name: swin-base_in21k-pre-3rdparty_in1k-384 + Metadata: + FLOPs: 44490000000 + Parameters: 87900000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 86.44 + Top 5 Accuracy: 98.05 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_base_patch4_window12_384_22kto1k-d59b0d1d.pth + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22kto1k.pth + Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458 + Config: configs/swin_transformer/swin-base_16xb64_in1k-384px.py + - Name: swin-large_in21k-pre-3rdparty_in1k + Metadata: + FLOPs: 34040000000 + Parameters: 196530000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 86.24 + Top 5 Accuracy: 97.88 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_large_patch4_window7_224_22kto1k-5f0996db.pth + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22kto1k.pth + Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458 + Config: configs/swin_transformer/swin-large_16xb64_in1k.py + - Name: swin-large_in21k-pre-3rdparty_in1k-384 + Metadata: + FLOPs: 100040000000 + Parameters: 196740000 + In Collection: Swin-Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 87.25 + Top 5 Accuracy: 98.25 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin_large_patch4_window12_384_22kto1k-0a40944b.pth + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22kto1k.pth + Code: https://github.com/microsoft/Swin-Transformer/blob/777f6c66604bb5579086c4447efe3620344d95a9/models/swin_transformer.py#L458 + Config: configs/swin_transformer/swin-large_16xb64_in1k-384px.py + - Name: swin-large_8xb8_cub-384px + Metadata: + FLOPs: 100040000000 + Parameters: 195510000 + In Collection: Swin-Transformer + Results: + - Dataset: CUB-200-2011 + Metrics: + Top 1 Accuracy: 91.87 + Task: Image Classification + Pretrain: https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin-large_3rdparty_in21k-384px.pth + Weights: https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin-large_8xb8_cub_384px_20220307-1bbaee6a.pth + Config: configs/swin_transformer/swin-large_8xb8_cub-384px.py diff --git a/configs/swin_transformer/swin-base_16xb64_in1k-384px.py b/configs/swin_transformer/swin-base_16xb64_in1k-384px.py new file mode 100644 index 0000000..10f8992 --- /dev/null +++ b/configs/swin_transformer/swin-base_16xb64_in1k-384px.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/swin_transformer/base_384.py', + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# schedule settings +optim_wrapper = dict(clip_grad=dict(max_norm=5.0)) diff --git a/configs/swin_transformer/swin-base_16xb64_in1k.py b/configs/swin_transformer/swin-base_16xb64_in1k.py new file mode 100644 index 0000000..05a95b4 --- /dev/null +++ b/configs/swin_transformer/swin-base_16xb64_in1k.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/swin_transformer/base_224.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# schedule settings +optim_wrapper = dict(clip_grad=dict(max_norm=5.0)) diff --git a/configs/swin_transformer/swin-large_16xb64_in1k-384px.py b/configs/swin_transformer/swin-large_16xb64_in1k-384px.py new file mode 100644 index 0000000..5ba52b3 --- /dev/null +++ b/configs/swin_transformer/swin-large_16xb64_in1k-384px.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/swin_transformer/large_384.py', + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# schedule settings +optim_wrapper = dict(clip_grad=dict(max_norm=5.0)) diff --git a/configs/swin_transformer/swin-large_16xb64_in1k.py b/configs/swin_transformer/swin-large_16xb64_in1k.py new file mode 100644 index 0000000..36121ef --- /dev/null +++ b/configs/swin_transformer/swin-large_16xb64_in1k.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/swin_transformer/large_224.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# schedule settings +optim_wrapper = dict(clip_grad=dict(max_norm=5.0)) diff --git a/configs/swin_transformer/swin-large_8xb8_cub-384px.py b/configs/swin_transformer/swin-large_8xb8_cub-384px.py new file mode 100644 index 0000000..a2f10a6 --- /dev/null +++ b/configs/swin_transformer/swin-large_8xb8_cub-384px.py @@ -0,0 +1,40 @@ +_base_ = [ + '../_base_/models/swin_transformer/large_384.py', + '../_base_/datasets/cub_bs8_384.py', + '../_base_/schedules/cub_bs64.py', + '../_base_/default_runtime.py', +] + +# model settings +checkpoint = 'https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin-large_3rdparty_in21k-384px.pth' # noqa +model = dict( + type='ImageClassifier', + backbone=dict( + init_cfg=dict( + type='Pretrained', checkpoint=checkpoint, prefix='backbone')), + head=dict(num_classes=200, )) + +# schedule settings +optim_wrapper = dict( + optimizer=dict( + _delete_=True, + type='AdamW', + lr=5e-6, + weight_decay=0.0005, + eps=1e-8, + betas=(0.9, 0.999)), + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0) + }), + clip_grad=dict(max_norm=5.0), +) + +default_hooks = dict( + # log every 20 intervals + logger=dict(type='LoggerHook', interval=20), + # save last three checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) diff --git a/configs/swin_transformer/swin-small_16xb64_in1k.py b/configs/swin_transformer/swin-small_16xb64_in1k.py new file mode 100644 index 0000000..7c1a8e2 --- /dev/null +++ b/configs/swin_transformer/swin-small_16xb64_in1k.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/swin_transformer/small_224.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# schedule settings +optim_wrapper = dict(clip_grad=dict(max_norm=5.0)) diff --git a/configs/swin_transformer/swin-tiny_16xb64_in1k.py b/configs/swin_transformer/swin-tiny_16xb64_in1k.py new file mode 100644 index 0000000..9a1ce25 --- /dev/null +++ b/configs/swin_transformer/swin-tiny_16xb64_in1k.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/swin_transformer/tiny_224.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# schedule settings +optim_wrapper = dict(clip_grad=dict(max_norm=5.0)) diff --git a/configs/swin_transformer_v2/README.md b/configs/swin_transformer_v2/README.md new file mode 100644 index 0000000..dd20548 --- /dev/null +++ b/configs/swin_transformer_v2/README.md @@ -0,0 +1,121 @@ +# Swin-Transformer V2 + +> [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883) + + + +## Introduction + +**Swin Transformer V2** is a work on the scale up visual model based on [Swin Transformer](https://github.com/open-mmlab/mmpretrain/tree/main/configs/swin_transformer). In the visual field, We can not increase the performance by just simply scaling up the visual model like NLP models. The possible reasons mentioned in the article are: + +- Training instability when increasing the vision model +- Migrating the model trained at low resolution to a larger scale resolution task +- Too mush GPU memory + +To solve it, The following method improvements are proposed in the paper: + +- post normalization: layer normalization after self-attention layer and MLP block +- scaled cosine attention approach: use cosine similarity to calculate the relationship between token pairs +- log-spaced continuous position bias: redefine relative position encoding + +
+ +
+ +## Abstract + +
+ +Show the detailed Abstract + +
+ +Large-scale NLP models have been shown to significantly improve the performance on language tasks with no signs of saturation. They also demonstrate amazing few-shot capabilities like that of human beings. This paper aims to explore large-scale models in computer vision. We tackle three major issues in training and application of large vision models, including training instability, resolution gaps between pre-training and fine-tuning, and hunger on labelled data. Three main techniques are proposed: 1) a residual-post-norm method combined with cosine attention to improve training stability; 2) A log-spaced continuous position bias method to effectively transfer models pre-trained using low-resolution images to downstream tasks with high-resolution inputs; 3) A self-supervised pre-training method, SimMIM, to reduce the needs of vast labeled images. Through these techniques, this paper successfully trained a 3 billion-parameter Swin Transformer V2 model, which is the largest dense vision model to date, and makes it capable of training with images of up to 1,536×1,536 resolution. It set new performance records on 4 representative vision tasks, including ImageNet-V2 image classification, COCO object detection, ADE20K semantic segmentation, and Kinetics-400 video action classification. Also note our training is much more efficient than that in Google's billion-level visual models, which consumes 40 times less labelled data and 40 times less training time. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('swinv2-tiny-w8_3rdparty_in1k-256px', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('swinv2-tiny-w8_3rdparty_in1k-256px', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/swin_transformer_v2/swinv2-tiny-w8_16xb64_in1k-256px.py https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-tiny-w8_3rdparty_in1k-256px_20220803-e318968f.pth +``` + + + +## Models and results + +### Pretrained models + +| Model | Params (M) | Flops (G) | Config | Download | +| :---------------------------------------- | :--------: | :-------: | :----------------------------------------------: | :------------------------------------------------------------------------------------------: | +| `swinv2-base-w12_3rdparty_in21k-192px`\* | 87.92 | 8.51 | [config](swinv2-base-w12_8xb128_in21k-192px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/pretrain/swinv2-base-w12_3rdparty_in21k-192px_20220803-f7dc9763.pth) | +| `swinv2-large-w12_3rdparty_in21k-192px`\* | 196.74 | 19.04 | [config](swinv2-large-w12_8xb128_in21k-192px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/pretrain/swinv2-large-w12_3rdparty_in21k-192px_20220803-d9073fee.pth) | + +*Models with * are converted from the [official repo](https://github.com/microsoft/Swin-Transformer). The config files of these models are only for inference. We haven't reproduce the training results.* + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :------------------------------------------------ | :----------: | :--------: | :-------: | :-------: | :-------: | :------------------------------------------------: | :--------------------------------------------------: | +| `swinv2-tiny-w8_3rdparty_in1k-256px`\* | From scratch | 28.35 | 4.35 | 81.76 | 95.87 | [config](swinv2-tiny-w8_16xb64_in1k-256px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-tiny-w8_3rdparty_in1k-256px_20220803-e318968f.pth) | +| `swinv2-tiny-w16_3rdparty_in1k-256px`\* | From scratch | 28.35 | 4.40 | 82.81 | 96.23 | [config](swinv2-tiny-w16_16xb64_in1k-256px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-tiny-w16_3rdparty_in1k-256px_20220803-9651cdd7.pth) | +| `swinv2-small-w8_3rdparty_in1k-256px`\* | From scratch | 49.73 | 8.45 | 83.74 | 96.60 | [config](swinv2-small-w8_16xb64_in1k-256px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-small-w8_3rdparty_in1k-256px_20220803-b01a4332.pth) | +| `swinv2-small-w16_3rdparty_in1k-256px`\* | From scratch | 49.73 | 8.57 | 84.13 | 96.83 | [config](swinv2-small-w16_16xb64_in1k-256px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-small-w16_3rdparty_in1k-256px_20220803-b707d206.pth) | +| `swinv2-base-w8_3rdparty_in1k-256px`\* | From scratch | 87.92 | 14.99 | 84.20 | 96.86 | [config](swinv2-base-w8_16xb64_in1k-256px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-base-w8_3rdparty_in1k-256px_20220803-8ff28f2b.pth) | +| `swinv2-base-w16_3rdparty_in1k-256px`\* | From scratch | 87.92 | 15.14 | 84.60 | 97.05 | [config](swinv2-base-w16_16xb64_in1k-256px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-base-w16_3rdparty_in1k-256px_20220803-5a1886b7.pth) | +| `swinv2-base-w16_in21k-pre_3rdparty_in1k-256px`\* | ImageNet-21k | 87.92 | 15.14 | 86.17 | 97.88 | [config](swinv2-base-w16_in21k-pre_16xb64_in1k-256px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-base-w16_in21k-pre_3rdparty_in1k-256px_20220803-8d7aa8ad.pth) | +| `swinv2-base-w24_in21k-pre_3rdparty_in1k-384px`\* | ImageNet-21k | 87.92 | 34.07 | 87.14 | 98.23 | [config](swinv2-base-w24_in21k-pre_16xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-base-w24_in21k-pre_3rdparty_in1k-384px_20220803-44eb70f8.pth) | +| `swinv2-large-w16_in21k-pre_3rdparty_in1k-256px`\* | ImageNet-21k | 196.75 | 33.86 | 86.93 | 98.06 | [config](swinv2-large-w16_in21k-pre_16xb64_in1k-256px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-large-w16_in21k-pre_3rdparty_in1k-256px_20220803-c40cbed7.pth) | +| `swinv2-large-w24_in21k-pre_3rdparty_in1k-384px`\* | ImageNet-21k | 196.75 | 76.20 | 87.59 | 98.27 | [config](swinv2-large-w24_in21k-pre_16xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-large-w24_in21k-pre_3rdparty_in1k-384px_20220803-3b36c165.pth) | + +*Models with * are converted from the [official repo](https://github.com/microsoft/Swin-Transformer). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@article{https://doi.org/10.48550/arxiv.2111.09883, + doi = {10.48550/ARXIV.2111.09883}, + url = {https://arxiv.org/abs/2111.09883}, + author = {Liu, Ze and Hu, Han and Lin, Yutong and Yao, Zhuliang and Xie, Zhenda and Wei, Yixuan and Ning, Jia and Cao, Yue and Zhang, Zheng and Dong, Li and Wei, Furu and Guo, Baining}, + keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences}, + title = {Swin Transformer V2: Scaling Up Capacity and Resolution}, + publisher = {arXiv}, + year = {2021}, + copyright = {Creative Commons Attribution 4.0 International} +} +``` diff --git a/configs/swin_transformer_v2/metafile.yml b/configs/swin_transformer_v2/metafile.yml new file mode 100644 index 0000000..55a14cb --- /dev/null +++ b/configs/swin_transformer_v2/metafile.yml @@ -0,0 +1,206 @@ +Collections: + - Name: Swin-Transformer V2 + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - AdamW + - Weight Decay + Training Resources: 16x V100 GPUs + Epochs: 300 + Batch Size: 1024 + Architecture: + - Shift Window Multihead Self Attention + Paper: + URL: https://arxiv.org/abs/2111.09883 + Title: "Swin Transformer V2: Scaling Up Capacity and Resolution" + README: configs/swin_transformer_v2/README.md + +Models: + - Name: swinv2-tiny-w8_3rdparty_in1k-256px + Metadata: + FLOPs: 4350000000 + Parameters: 28350000 + In Collection: Swin-Transformer V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.76 + Top 5 Accuracy: 95.87 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-tiny-w8_3rdparty_in1k-256px_20220803-e318968f.pth + Config: configs/swin_transformer_v2/swinv2-tiny-w8_16xb64_in1k-256px.py + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_tiny_patch4_window8_256.pth + Code: https://github.com/microsoft/Swin-Transformer + - Name: swinv2-tiny-w16_3rdparty_in1k-256px + Metadata: + FLOPs: 4400000000 + Parameters: 28350000 + In Collection: Swin-Transformer V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.81 + Top 5 Accuracy: 96.23 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-tiny-w16_3rdparty_in1k-256px_20220803-9651cdd7.pth + Config: configs/swin_transformer_v2/swinv2-tiny-w16_16xb64_in1k-256px.py + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_tiny_patch4_window16_256.pth + Code: https://github.com/microsoft/Swin-Transformer + - Name: swinv2-small-w8_3rdparty_in1k-256px + Metadata: + FLOPs: 8450000000 + Parameters: 49730000 + In Collection: Swin-Transformer V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.74 + Top 5 Accuracy: 96.6 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-small-w8_3rdparty_in1k-256px_20220803-b01a4332.pth + Config: configs/swin_transformer_v2/swinv2-small-w8_16xb64_in1k-256px.py + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_small_patch4_window8_256.pth + Code: https://github.com/microsoft/Swin-Transformer + - Name: swinv2-small-w16_3rdparty_in1k-256px + Metadata: + FLOPs: 8570000000 + Parameters: 49730000 + In Collection: Swin-Transformer V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.13 + Top 5 Accuracy: 96.83 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-small-w16_3rdparty_in1k-256px_20220803-b707d206.pth + Config: configs/swin_transformer_v2/swinv2-small-w16_16xb64_in1k-256px.py + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_small_patch4_window16_256.pth + Code: https://github.com/microsoft/Swin-Transformer + - Name: swinv2-base-w8_3rdparty_in1k-256px + Metadata: + FLOPs: 14990000000 + Parameters: 87920000 + In Collection: Swin-Transformer V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.2 + Top 5 Accuracy: 96.86 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-base-w8_3rdparty_in1k-256px_20220803-8ff28f2b.pth + Config: configs/swin_transformer_v2/swinv2-base-w8_16xb64_in1k-256px.py + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window8_256.pth + Code: https://github.com/microsoft/Swin-Transformer + - Name: swinv2-base-w16_3rdparty_in1k-256px + Metadata: + FLOPs: 15140000000 + Parameters: 87920000 + In Collection: Swin-Transformer V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.6 + Top 5 Accuracy: 97.05 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-base-w16_3rdparty_in1k-256px_20220803-5a1886b7.pth + Config: configs/swin_transformer_v2/swinv2-base-w16_16xb64_in1k-256px.py + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window16_256.pth + Code: https://github.com/microsoft/Swin-Transformer + - Name: swinv2-base-w16_in21k-pre_3rdparty_in1k-256px + Metadata: + Training Data: ImageNet-21k + FLOPs: 15140000000 + Parameters: 87920000 + In Collection: Swin-Transformer V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 86.17 + Top 5 Accuracy: 97.88 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-base-w16_in21k-pre_3rdparty_in1k-256px_20220803-8d7aa8ad.pth + Config: configs/swin_transformer_v2/swinv2-base-w16_in21k-pre_16xb64_in1k-256px.py + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12to16_192to256_22kto1k_ft.pth + Code: https://github.com/microsoft/Swin-Transformer + - Name: swinv2-base-w24_in21k-pre_3rdparty_in1k-384px + Metadata: + Training Data: ImageNet-21k + FLOPs: 34070000000 + Parameters: 87920000 + In Collection: Swin-Transformer V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 87.14 + Top 5 Accuracy: 98.23 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-base-w24_in21k-pre_3rdparty_in1k-384px_20220803-44eb70f8.pth + Config: configs/swin_transformer_v2/swinv2-base-w24_in21k-pre_16xb64_in1k-384px.py + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12to24_192to384_22kto1k_ft.pth + Code: https://github.com/microsoft/Swin-Transformer + - Name: swinv2-large-w16_in21k-pre_3rdparty_in1k-256px + Metadata: + Training Data: ImageNet-21k + FLOPs: 33860000000 + Parameters: 196750000 + In Collection: Swin-Transformer V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 86.93 + Top 5 Accuracy: 98.06 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-large-w16_in21k-pre_3rdparty_in1k-256px_20220803-c40cbed7.pth + Config: configs/swin_transformer_v2/swinv2-large-w16_in21k-pre_16xb64_in1k-256px.py + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12to16_192to256_22kto1k_ft.pth + Code: https://github.com/microsoft/Swin-Transformer + - Name: swinv2-large-w24_in21k-pre_3rdparty_in1k-384px + Metadata: + Training Data: ImageNet-21k + FLOPs: 76200000000 + Parameters: 196750000 + In Collection: Swin-Transformer V2 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 87.59 + Top 5 Accuracy: 98.27 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-large-w24_in21k-pre_3rdparty_in1k-384px_20220803-3b36c165.pth + Config: configs/swin_transformer_v2/swinv2-large-w24_in21k-pre_16xb64_in1k-384px.py + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12to24_192to384_22kto1k_ft.pth + Code: https://github.com/microsoft/Swin-Transformer + - Name: swinv2-base-w12_3rdparty_in21k-192px + Metadata: + Training Data: ImageNet-21k + FLOPs: 8510000000 + Parameters: 87920000 + In Collection: Swin-Transformer V2 + Results: null + Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/pretrain/swinv2-base-w12_3rdparty_in21k-192px_20220803-f7dc9763.pth + Config: configs/swin_transformer_v2/swinv2-base-w12_8xb128_in21k-192px.py + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_base_patch4_window12_192_22k.pth + Code: https://github.com/microsoft/Swin-Transformer + - Name: swinv2-large-w12_3rdparty_in21k-192px + Metadata: + Training Data: ImageNet-21k + FLOPs: 19040000000 + Parameters: 196740000 + In Collection: Swin-Transformer V2 + Results: null + Weights: https://download.openmmlab.com/mmclassification/v0/swin-v2/pretrain/swinv2-large-w12_3rdparty_in21k-192px_20220803-d9073fee.pth + Config: configs/swin_transformer_v2/swinv2-large-w12_8xb128_in21k-192px.py + Converted From: + Weights: https://github.com/SwinTransformer/storage/releases/download/v2.0.0/swinv2_large_patch4_window12_192_22k.pth + Code: https://github.com/microsoft/Swin-Transformer diff --git a/configs/swin_transformer_v2/swinv2-base-w12_8xb128_in21k-192px.py b/configs/swin_transformer_v2/swinv2-base-w12_8xb128_in21k-192px.py new file mode 100644 index 0000000..9b01b75 --- /dev/null +++ b/configs/swin_transformer_v2/swinv2-base-w12_8xb128_in21k-192px.py @@ -0,0 +1,19 @@ +_base_ = [ + '../_base_/models/swin_transformer_v2/base_256.py', + '../_base_/datasets/imagenet21k_bs128.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + backbone=dict(img_size=192, window_size=[12, 12, 12, 6]), + head=dict(num_classes=21841), +) + +# dataset settings +data_preprocessor = dict(num_classes=21841) + +_base_['train_pipeline'][1]['scale'] = 192 # RandomResizedCrop +_base_['test_pipeline'][1]['scale'] = 219 # ResizeEdge +_base_['test_pipeline'][2]['crop_size'] = 192 # CenterCrop diff --git a/configs/swin_transformer_v2/swinv2-base-w16_16xb64_in1k-256px.py b/configs/swin_transformer_v2/swinv2-base-w16_16xb64_in1k-256px.py new file mode 100644 index 0000000..5f375ee --- /dev/null +++ b/configs/swin_transformer_v2/swinv2-base-w16_16xb64_in1k-256px.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/swin_transformer_v2/base_256.py', + '../_base_/datasets/imagenet_bs64_swin_256.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +model = dict(backbone=dict(window_size=[16, 16, 16, 8])) diff --git a/configs/swin_transformer_v2/swinv2-base-w16_in21k-pre_16xb64_in1k-256px.py b/configs/swin_transformer_v2/swinv2-base-w16_in21k-pre_16xb64_in1k-256px.py new file mode 100644 index 0000000..0725f9e --- /dev/null +++ b/configs/swin_transformer_v2/swinv2-base-w16_in21k-pre_16xb64_in1k-256px.py @@ -0,0 +1,13 @@ +_base_ = [ + '../_base_/models/swin_transformer_v2/base_256.py', + '../_base_/datasets/imagenet_bs64_swin_256.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +model = dict( + type='ImageClassifier', + backbone=dict( + window_size=[16, 16, 16, 8], + drop_path_rate=0.2, + pretrained_window_sizes=[12, 12, 12, 6])) diff --git a/configs/swin_transformer_v2/swinv2-base-w24_in21k-pre_16xb64_in1k-384px.py b/configs/swin_transformer_v2/swinv2-base-w24_in21k-pre_16xb64_in1k-384px.py new file mode 100644 index 0000000..3dd4e5f --- /dev/null +++ b/configs/swin_transformer_v2/swinv2-base-w24_in21k-pre_16xb64_in1k-384px.py @@ -0,0 +1,14 @@ +_base_ = [ + '../_base_/models/swin_transformer_v2/base_384.py', + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +model = dict( + type='ImageClassifier', + backbone=dict( + img_size=384, + window_size=[24, 24, 24, 12], + drop_path_rate=0.2, + pretrained_window_sizes=[12, 12, 12, 6])) diff --git a/configs/swin_transformer_v2/swinv2-base-w8_16xb64_in1k-256px.py b/configs/swin_transformer_v2/swinv2-base-w8_16xb64_in1k-256px.py new file mode 100644 index 0000000..23fc407 --- /dev/null +++ b/configs/swin_transformer_v2/swinv2-base-w8_16xb64_in1k-256px.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/swin_transformer_v2/base_256.py', + '../_base_/datasets/imagenet_bs64_swin_256.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] diff --git a/configs/swin_transformer_v2/swinv2-large-w12_8xb128_in21k-192px.py b/configs/swin_transformer_v2/swinv2-large-w12_8xb128_in21k-192px.py new file mode 100644 index 0000000..9b01b75 --- /dev/null +++ b/configs/swin_transformer_v2/swinv2-large-w12_8xb128_in21k-192px.py @@ -0,0 +1,19 @@ +_base_ = [ + '../_base_/models/swin_transformer_v2/base_256.py', + '../_base_/datasets/imagenet21k_bs128.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# model settings +model = dict( + backbone=dict(img_size=192, window_size=[12, 12, 12, 6]), + head=dict(num_classes=21841), +) + +# dataset settings +data_preprocessor = dict(num_classes=21841) + +_base_['train_pipeline'][1]['scale'] = 192 # RandomResizedCrop +_base_['test_pipeline'][1]['scale'] = 219 # ResizeEdge +_base_['test_pipeline'][2]['crop_size'] = 192 # CenterCrop diff --git a/configs/swin_transformer_v2/swinv2-large-w16_in21k-pre_16xb64_in1k-256px.py b/configs/swin_transformer_v2/swinv2-large-w16_in21k-pre_16xb64_in1k-256px.py new file mode 100644 index 0000000..62a2a29 --- /dev/null +++ b/configs/swin_transformer_v2/swinv2-large-w16_in21k-pre_16xb64_in1k-256px.py @@ -0,0 +1,13 @@ +# Only for evaluation +_base_ = [ + '../_base_/models/swin_transformer_v2/large_256.py', + '../_base_/datasets/imagenet_bs64_swin_256.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +model = dict( + type='ImageClassifier', + backbone=dict( + window_size=[16, 16, 16, 8], pretrained_window_sizes=[12, 12, 12, 6]), +) diff --git a/configs/swin_transformer_v2/swinv2-large-w24_in21k-pre_16xb64_in1k-384px.py b/configs/swin_transformer_v2/swinv2-large-w24_in21k-pre_16xb64_in1k-384px.py new file mode 100644 index 0000000..d97d9b2 --- /dev/null +++ b/configs/swin_transformer_v2/swinv2-large-w24_in21k-pre_16xb64_in1k-384px.py @@ -0,0 +1,15 @@ +# Only for evaluation +_base_ = [ + '../_base_/models/swin_transformer_v2/large_384.py', + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +model = dict( + type='ImageClassifier', + backbone=dict( + img_size=384, + window_size=[24, 24, 24, 12], + pretrained_window_sizes=[12, 12, 12, 6]), +) diff --git a/configs/swin_transformer_v2/swinv2-small-w16_16xb64_in1k-256px.py b/configs/swin_transformer_v2/swinv2-small-w16_16xb64_in1k-256px.py new file mode 100644 index 0000000..f87265d --- /dev/null +++ b/configs/swin_transformer_v2/swinv2-small-w16_16xb64_in1k-256px.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/swin_transformer_v2/small_256.py', + '../_base_/datasets/imagenet_bs64_swin_256.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +model = dict(backbone=dict(window_size=[16, 16, 16, 8])) diff --git a/configs/swin_transformer_v2/swinv2-small-w8_16xb64_in1k-256px.py b/configs/swin_transformer_v2/swinv2-small-w8_16xb64_in1k-256px.py new file mode 100644 index 0000000..f1001f1 --- /dev/null +++ b/configs/swin_transformer_v2/swinv2-small-w8_16xb64_in1k-256px.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/swin_transformer_v2/small_256.py', + '../_base_/datasets/imagenet_bs64_swin_256.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] diff --git a/configs/swin_transformer_v2/swinv2-tiny-w16_16xb64_in1k-256px.py b/configs/swin_transformer_v2/swinv2-tiny-w16_16xb64_in1k-256px.py new file mode 100644 index 0000000..7e1f290 --- /dev/null +++ b/configs/swin_transformer_v2/swinv2-tiny-w16_16xb64_in1k-256px.py @@ -0,0 +1,8 @@ +_base_ = [ + '../_base_/models/swin_transformer_v2/tiny_256.py', + '../_base_/datasets/imagenet_bs64_swin_256.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +model = dict(backbone=dict(window_size=[16, 16, 16, 8])) diff --git a/configs/swin_transformer_v2/swinv2-tiny-w8_16xb64_in1k-256px.py b/configs/swin_transformer_v2/swinv2-tiny-w8_16xb64_in1k-256px.py new file mode 100644 index 0000000..2cdc9a2 --- /dev/null +++ b/configs/swin_transformer_v2/swinv2-tiny-w8_16xb64_in1k-256px.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/swin_transformer_v2/tiny_256.py', + '../_base_/datasets/imagenet_bs64_swin_256.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] diff --git a/configs/t2t_vit/README.md b/configs/t2t_vit/README.md new file mode 100644 index 0000000..bf0967c --- /dev/null +++ b/configs/t2t_vit/README.md @@ -0,0 +1,81 @@ +# Tokens-to-Token ViT + +> [Tokens-to-Token ViT: Training Vision Transformers from Scratch on ImageNet](https://arxiv.org/abs/2101.11986) + + + +## Abstract + +Transformers, which are popular for language modeling, have been explored for solving vision tasks recently, e.g., the Vision Transformer (ViT) for image classification. The ViT model splits each image into a sequence of tokens with fixed length and then applies multiple Transformer layers to model their global relation for classification. However, ViT achieves inferior performance to CNNs when trained from scratch on a midsize dataset like ImageNet. We find it is because: 1) the simple tokenization of input images fails to model the important local structure such as edges and lines among neighboring pixels, leading to low training sample efficiency; 2) the redundant attention backbone design of ViT leads to limited feature richness for fixed computation budgets and limited training samples. To overcome such limitations, we propose a new Tokens-To-Token Vision Transformer (T2T-ViT), which incorporates 1) a layer-wise Tokens-to-Token (T2T) transformation to progressively structurize the image to tokens by recursively aggregating neighboring Tokens into one Token (Tokens-to-Token), such that local structure represented by surrounding tokens can be modeled and tokens length can be reduced; 2) an efficient backbone with a deep-narrow structure for vision transformer motivated by CNN architecture design after empirical study. Notably, T2T-ViT reduces the parameter count and MACs of vanilla ViT by half, while achieving more than 3.0% improvement when trained from scratch on ImageNet. It also outperforms ResNets and achieves comparable performance with MobileNets by directly training on ImageNet. For example, T2T-ViT with comparable size to ResNet50 (21.5M parameters) can achieve 83.3% top1 accuracy in image resolution 384×384 on ImageNet. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('t2t-vit-t-14_8xb64_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('t2t-vit-t-14_8xb64_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-14_8xb64_in1k_20211220-f7378dd5.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :------------------------ | :----------: | :--------: | :-------: | :-------: | :-------: | :----------------------------------: | :----------------------------------------------------------------------------------------: | +| `t2t-vit-t-14_8xb64_in1k` | From scratch | 21.47 | 4.34 | 81.83 | 95.84 | [config](t2t-vit-t-14_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-14_8xb64_in1k_20211220-f7378dd5.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-14_8xb64_in1k_20211220-f7378dd5.json) | +| `t2t-vit-t-19_8xb64_in1k` | From scratch | 39.08 | 7.80 | 82.63 | 96.18 | [config](t2t-vit-t-19_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-19_8xb64_in1k_20211214-7f5e3aaf.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-19_8xb64_in1k_20211214-7f5e3aaf.json) | +| `t2t-vit-t-24_8xb64_in1k` | From scratch | 64.00 | 12.69 | 82.71 | 96.09 | [config](t2t-vit-t-24_8xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-24_8xb64_in1k_20211214-b2a68ae3.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-24_8xb64_in1k_20211214-b2a68ae3.json) | + +## Citation + +```bibtex +@article{yuan2021tokens, + title={Tokens-to-token vit: Training vision transformers from scratch on imagenet}, + author={Yuan, Li and Chen, Yunpeng and Wang, Tao and Yu, Weihao and Shi, Yujun and Tay, Francis EH and Feng, Jiashi and Yan, Shuicheng}, + journal={arXiv preprint arXiv:2101.11986}, + year={2021} +} +``` diff --git a/configs/t2t_vit/metafile.yml b/configs/t2t_vit/metafile.yml new file mode 100644 index 0000000..72cb2df --- /dev/null +++ b/configs/t2t_vit/metafile.yml @@ -0,0 +1,58 @@ +Collections: + - Name: Tokens-to-Token ViT + Metadata: + Training Data: ImageNet-1k + Architecture: + - Layer Normalization + - Scaled Dot-Product Attention + - Attention Dropout + - Dropout + - Tokens to Token + Paper: + URL: https://arxiv.org/abs/2101.11986 + Title: "Tokens-to-Token ViT: Training Vision Transformers from Scratch on ImageNet" + README: configs/t2t_vit/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.17.0/mmcls/models/backbones/t2t_vit.py + Version: v0.17.0 + +Models: + - Name: t2t-vit-t-14_8xb64_in1k + Metadata: + FLOPs: 4340000000 + Parameters: 21470000 + In Collection: Tokens-to-Token ViT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.83 + Top 5 Accuracy: 95.84 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-14_8xb64_in1k_20211220-f7378dd5.pth + Config: configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py + - Name: t2t-vit-t-19_8xb64_in1k + Metadata: + FLOPs: 7800000000 + Parameters: 39080000 + In Collection: Tokens-to-Token ViT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.63 + Top 5 Accuracy: 96.18 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-19_8xb64_in1k_20211214-7f5e3aaf.pth + Config: configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py + - Name: t2t-vit-t-24_8xb64_in1k + Metadata: + FLOPs: 12690000000 + Parameters: 64000000 + In Collection: Tokens-to-Token ViT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.71 + Top 5 Accuracy: 96.09 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-24_8xb64_in1k_20211214-b2a68ae3.pth + Config: configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py diff --git a/configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py b/configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py new file mode 100644 index 0000000..8ff6444 --- /dev/null +++ b/configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py @@ -0,0 +1,49 @@ +_base_ = [ + '../_base_/models/t2t-vit-t-14.py', + '../_base_/datasets/imagenet_bs64_t2t_224.py', + '../_base_/default_runtime.py', +] + +# schedule settings +optim_wrapper = dict( + optimizer=dict(type='AdamW', lr=5e-4, weight_decay=0.05), + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={'cls_token': dict(decay_mult=0.0)}, + ), +) + +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=1e-6, + by_epoch=True, + begin=0, + end=10, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict( + type='CosineAnnealingLR', + T_max=290, + eta_min=1e-5, + by_epoch=True, + begin=10, + end=300), + # cool down learning rate scheduler + dict(type='ConstantLR', factor=0.1, by_epoch=True, begin=300, end=310), +] + +train_cfg = dict(by_epoch=True, max_epochs=310, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# runtime settings +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (8 GPUs) x (64 samples per GPU) +auto_scale_lr = dict(base_batch_size=512) diff --git a/configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py b/configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py new file mode 100644 index 0000000..0c72753 --- /dev/null +++ b/configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py @@ -0,0 +1,49 @@ +_base_ = [ + '../_base_/models/t2t-vit-t-19.py', + '../_base_/datasets/imagenet_bs64_t2t_224.py', + '../_base_/default_runtime.py', +] + +# schedule settings +optim_wrapper = dict( + optimizer=dict(type='AdamW', lr=5e-4, weight_decay=0.065), + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={'cls_token': dict(decay_mult=0.0)}, + ), +) + +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=1e-6, + by_epoch=True, + begin=0, + end=10, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict( + type='CosineAnnealingLR', + T_max=290, + eta_min=1e-5, + by_epoch=True, + begin=10, + end=300), + # cool down learning rate scheduler + dict(type='ConstantLR', factor=0.1, by_epoch=True, begin=300, end=310), +] + +train_cfg = dict(by_epoch=True, max_epochs=310, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# runtime settings +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (8 GPUs) x (64 samples per GPU) +auto_scale_lr = dict(base_batch_size=512) diff --git a/configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py b/configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py new file mode 100644 index 0000000..e180ff3 --- /dev/null +++ b/configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py @@ -0,0 +1,49 @@ +_base_ = [ + '../_base_/models/t2t-vit-t-24.py', + '../_base_/datasets/imagenet_bs64_t2t_224.py', + '../_base_/default_runtime.py', +] + +# schedule settings +optim_wrapper = dict( + optimizer=dict(type='AdamW', lr=5e-4, weight_decay=0.065), + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={'cls_token': dict(decay_mult=0.0)}, + ), +) + +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=1e-6, + by_epoch=True, + begin=0, + end=10, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict( + type='CosineAnnealingLR', + T_max=290, + eta_min=1e-5, + by_epoch=True, + begin=10, + end=300), + # cool down learning rate scheduler + dict(type='ConstantLR', factor=0.1, by_epoch=True, begin=300, end=310), +] + +train_cfg = dict(by_epoch=True, max_epochs=310, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# runtime settings +custom_hooks = [dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL')] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (8 GPUs) x (64 samples per GPU) +auto_scale_lr = dict(base_batch_size=512) diff --git a/configs/tinyvit/README.md b/configs/tinyvit/README.md new file mode 100644 index 0000000..58ceb57 --- /dev/null +++ b/configs/tinyvit/README.md @@ -0,0 +1,82 @@ +# TinyViT + +> [TinyViT: Fast Pretraining Distillation for Small Vision Transformers](https://arxiv.org/abs/2207.10666) + + + +## Abstract + +Vision transformer (ViT) recently has drawn great attention in computer vision due to its remarkable model capability. However, most prevailing ViT models suffer from huge number of parameters, restricting their applicability on devices with limited resources. To alleviate this issue, we propose TinyViT, a new family of tiny and efficient small vision transformers pretrained on large-scale datasets with our proposed fast distillation framework. The central idea is to transfer knowledge from large pretrained models to small ones, while enabling small models to get the dividends of massive pretraining data. More specifically, we apply distillation during pretraining for knowledge transfer. The logits of large teacher models are sparsified and stored in disk in advance to save the memory cost and computation overheads. The tiny student transformers are automatically scaled down from a large pretrained model with computation and parameter constraints. Comprehensive experiments demonstrate the efficacy of TinyViT. It achieves a top-1 accuracy of 84.8% on ImageNet-1k with only 21M parameters, being comparable to SwinB pretrained on ImageNet-21k while using 4.2 times fewer parameters. Moreover, increasing image resolutions, TinyViT can reach 86.5% accuracy, being slightly better than Swin-L while using only 11% parameters. Last but not the least, we demonstrate a good transfer ability of TinyViT on various downstream tasks. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('tinyvit-5m_3rdparty_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('tinyvit-5m_3rdparty_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/tinyvit/tinyvit-5m_8xb256_in1k.py https://download.openmmlab.com/mmclassification/v0/tinyvit/tinyvit-5m_3rdparty_in1k_20221021-62cb5abf.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :--------------------------------------------- | :------------------: | :--------: | :-------: | :-------: | :-------: | :---------------------------------------------: | :------------------------------------------------: | +| `tinyvit-5m_3rdparty_in1k`\* | From scratch | 5.39 | 1.29 | 79.02 | 94.74 | [config](tinyvit-5m_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/tinyvit/tinyvit-5m_3rdparty_in1k_20221021-62cb5abf.pth) | +| `tinyvit-5m_in21k-distill-pre_3rdparty_in1k`\* | ImageNet-21k DISTILL | 5.39 | 1.29 | 80.71 | 95.57 | [config](tinyvit-5m-distill_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/tinyvit/tinyvit-5m_in21k-distill-pre_3rdparty_in1k_20221021-d4b010a8.pth) | +| `tinyvit-11m_3rdparty_in1k`\* | From scratch | 11.00 | 2.05 | 81.44 | 95.79 | [config](tinyvit-11m_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/tinyvit/tinyvit-11m_3rdparty_in1k_20221021-11ccef16.pth) | +| `tinyvit-11m_in21k-distill-pre_3rdparty_in1k`\* | ImageNet-21k DISTILL | 11.00 | 2.05 | 83.19 | 96.53 | [config](tinyvit-11m-distill_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/tinyvit/tinyvit-11m_in21k-distill-pre_3rdparty_in1k_20221021-5d3bc0dc.pth) | +| `tinyvit-21m_3rdparty_in1k`\* | From scratch | 21.20 | 4.30 | 83.08 | 96.58 | [config](tinyvit-21m_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/tinyvit/tinyvit-21m_3rdparty_in1k_20221021-5346ba34.pth) | +| `tinyvit-21m_in21k-distill-pre_3rdparty_in1k`\* | ImageNet-21k DISTILL | 21.20 | 4.30 | 84.85 | 97.27 | [config](tinyvit-21m-distill_8xb256_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/tinyvit/tinyvit-21m_in21k-distill-pre_3rdparty_in1k_20221021-3d9b30a2.pth) | +| `tinyvit-21m_in21k-distill-pre_3rdparty_in1k-384px`\* | ImageNet-21k DISTILL | 21.23 | 13.85 | 86.21 | 97.77 | [config](tinyvit-21m-distill_8xb256_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/tinyvit/tinyvit-21m_in21k-distill-pre_3rdparty_in1k-384px_20221021-65be6b3f.pth) | +| `tinyvit-21m_in21k-distill-pre_3rdparty_in1k-512px`\* | ImageNet-21k DISTILL | 21.27 | 27.15 | 86.44 | 97.89 | [config](tinyvit-21m-distill_8xb256_in1k-512px.py) | [model](https://download.openmmlab.com/mmclassification/v0/tinyvit/tinyvit-21m_in21k-distill-pre_3rdparty_in1k-512px_20221021-e42a9bea.pth) | + +*Models with * are converted from the [official repo](https://github.com/microsoft/Cream/tree/main/TinyViT). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@InProceedings{tiny_vit, + title={TinyViT: Fast Pretraining Distillation for Small Vision Transformers}, + author={Wu, Kan and Zhang, Jinnian and Peng, Houwen and Liu, Mengchen and Xiao, Bin and Fu, Jianlong and Yuan, Lu}, + booktitle={European conference on computer vision (ECCV)}, + year={2022} +} +``` diff --git a/configs/tinyvit/metafile.yml b/configs/tinyvit/metafile.yml new file mode 100644 index 0000000..a1c5438 --- /dev/null +++ b/configs/tinyvit/metafile.yml @@ -0,0 +1,162 @@ +Collections: + - Name: TinyViT + Metadata: + Training Data: ImageNet-1k + Architecture: + - MBConv + - Window Multi-head Self-Attention + Paper: + Title: 'TinyViT: Fast Pretraining Distillation for Small Vision Transformers' + URL: https://arxiv.org/abs/2207.10666 + README: configs/tinyvit/README.md + Code: + Version: v1.0.0rc1 + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.23.2/mmcls/models/backbones/tinyvit.py + +Models: + - Name: tinyvit-5m_3rdparty_in1k + Metadata: + FLOPs: 1286655360 + Parameters: 5392764 + Training Data: ImageNet-1k + In Collection: TinyViT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.02 + Top 5 Accuracy: 94.74 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/tinyvit/tinyvit-5m_3rdparty_in1k_20221021-62cb5abf.pth + Config: configs/tinyvit/tinyvit-5m_8xb256_in1k.py + Converted From: + Weights: https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_5m_1k.pth + Code: https://github.com/microsoft/Cream/tree/main/TinyViT + - Name: tinyvit-5m_in21k-distill-pre_3rdparty_in1k + Metadata: + FLOPs: 1286655360 + Parameters: 5392764 + Training Data: + - ImageNet-21k + - ImageNet-1k + In Collection: TinyViT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 80.71 + Top 5 Accuracy: 95.57 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/tinyvit/tinyvit-5m_in21k-distill-pre_3rdparty_in1k_20221021-d4b010a8.pth + Config: configs/tinyvit/tinyvit-5m-distill_8xb256_in1k.py + Converted From: + Weights: https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_5m_22kto1k_distill.pth + Code: https://github.com/microsoft/Cream/tree/main/TinyViT + - Name: tinyvit-11m_3rdparty_in1k + Metadata: + FLOPs: 2050033664 + Parameters: 10996972 + Training Data: ImageNet-1k + In Collection: TinyViT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.44 + Top 5 Accuracy: 95.79 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/tinyvit/tinyvit-11m_3rdparty_in1k_20221021-11ccef16.pth + Config: configs/tinyvit/tinyvit-11m_8xb256_in1k.py + Converted From: + Weights: https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_11m_1k.pth + Code: https://github.com/microsoft/Cream/tree/main/TinyViT + - Name: tinyvit-11m_in21k-distill-pre_3rdparty_in1k + Metadata: + FLOPs: 2050033664 + Parameters: 10996972 + Training Data: + - ImageNet-21k + - ImageNet-1k + In Collection: TinyViT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.19 + Top 5 Accuracy: 96.53 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/tinyvit/tinyvit-11m_in21k-distill-pre_3rdparty_in1k_20221021-5d3bc0dc.pth + Config: configs/tinyvit/tinyvit-11m-distill_8xb256_in1k.py + Converted From: + Weights: https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_11m_22kto1k_distill.pth + Code: https://github.com/microsoft/Cream/tree/main/TinyViT + - Name: tinyvit-21m_3rdparty_in1k + Metadata: + FLOPs: 4301124096 + Parameters: 21198568 + Training Data: ImageNet-1k + In Collection: TinyViT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.08 + Top 5 Accuracy: 96.58 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/tinyvit/tinyvit-21m_3rdparty_in1k_20221021-5346ba34.pth + Config: configs/tinyvit/tinyvit-21m_8xb256_in1k.py + Converted From: + Weights: https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_1k.pth + Code: https://github.com/microsoft/Cream/tree/main/TinyViT + - Name: tinyvit-21m_in21k-distill-pre_3rdparty_in1k + Metadata: + FLOPs: 4301124096 + Parameters: 21198568 + Training Data: + - ImageNet-21k + - ImageNet-1k + In Collection: TinyViT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.85 + Top 5 Accuracy: 97.27 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/tinyvit/tinyvit-21m_in21k-distill-pre_3rdparty_in1k_20221021-3d9b30a2.pth + Config: configs/tinyvit/tinyvit-21m-distill_8xb256_in1k.py + Converted From: + Weights: https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_22kto1k_distill.pth + Code: https://github.com/microsoft/Cream/tree/main/TinyViT + - Name: tinyvit-21m_in21k-distill-pre_3rdparty_in1k-384px + Metadata: + FLOPs: 13848250176 + Parameters: 21230488 + Training Data: + - ImageNet-21k + - ImageNet-1k + In Collection: TinyViT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 86.21 + Top 5 Accuracy: 97.77 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/tinyvit/tinyvit-21m_in21k-distill-pre_3rdparty_in1k-384px_20221021-65be6b3f.pth + Config: configs/tinyvit/tinyvit-21m-distill_8xb256_in1k-384px.py + Converted From: + Weights: https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_22kto1k_384_distill.pth + Code: https://github.com/microsoft/Cream/tree/main/TinyViT + - Name: tinyvit-21m_in21k-distill-pre_3rdparty_in1k-512px + Metadata: + FLOPs: 27151420224 + Parameters: 21268120 + Training Data: + - ImageNet-21k + - ImageNet-1k + In Collection: TinyViT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 86.44 + Top 5 Accuracy: 97.89 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/tinyvit/tinyvit-21m_in21k-distill-pre_3rdparty_in1k-512px_20221021-e42a9bea.pth + Config: configs/tinyvit/tinyvit-21m-distill_8xb256_in1k-512px.py + Converted From: + Weights: https://github.com/wkcn/TinyViT-model-zoo/releases/download/checkpoints/tiny_vit_21m_22kto1k_512_distill.pth + Code: https://github.com/microsoft/Cream/tree/main/TinyViT diff --git a/configs/tinyvit/tinyvit-11m-distill_8xb256_in1k.py b/configs/tinyvit/tinyvit-11m-distill_8xb256_in1k.py new file mode 100644 index 0000000..145feb9 --- /dev/null +++ b/configs/tinyvit/tinyvit-11m-distill_8xb256_in1k.py @@ -0,0 +1,3 @@ +_base_ = [ + './tinyvit-11m_8xb256_in1k.py', +] diff --git a/configs/tinyvit/tinyvit-11m_8xb256_in1k.py b/configs/tinyvit/tinyvit-11m_8xb256_in1k.py new file mode 100644 index 0000000..f3acfa8 --- /dev/null +++ b/configs/tinyvit/tinyvit-11m_8xb256_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs32_pil_bicubic.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', + '../_base_/models/tinyvit/tinyvit-11m.py', +] diff --git a/configs/tinyvit/tinyvit-21m-distill_8xb256_in1k-384px.py b/configs/tinyvit/tinyvit-21m-distill_8xb256_in1k-384px.py new file mode 100644 index 0000000..44e51b1 --- /dev/null +++ b/configs/tinyvit/tinyvit-21m-distill_8xb256_in1k-384px.py @@ -0,0 +1,29 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs32_pil_bicubic.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', + '../_base_/models/tinyvit/tinyvit-21m.py', +] + +# model settings +model = dict( + backbone=dict( + img_size=(384, 384), + window_size=[12, 12, 24, 12], + drop_path_rate=0.1, + )) + +# data settings +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + scale=(384, 384), + backend='pillow', + interpolation='bicubic'), + dict(type='PackInputs'), +] + +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +test_dataloader = val_dataloader diff --git a/configs/tinyvit/tinyvit-21m-distill_8xb256_in1k-512px.py b/configs/tinyvit/tinyvit-21m-distill_8xb256_in1k-512px.py new file mode 100644 index 0000000..05b47c6 --- /dev/null +++ b/configs/tinyvit/tinyvit-21m-distill_8xb256_in1k-512px.py @@ -0,0 +1,28 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs32_pil_bicubic.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', + '../_base_/models/tinyvit/tinyvit-21m.py', +] + +# model settings +model = dict( + backbone=dict( + img_size=(512, 512), + window_size=[16, 16, 32, 16], + drop_path_rate=0.1, + )) +# data settings +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='Resize', + scale=(512, 512), + backend='pillow', + interpolation='bicubic'), + dict(type='PackInputs'), +] + +val_dataloader = dict(batch_size=16, dataset=dict(pipeline=test_pipeline)) + +test_dataloader = val_dataloader diff --git a/configs/tinyvit/tinyvit-21m-distill_8xb256_in1k.py b/configs/tinyvit/tinyvit-21m-distill_8xb256_in1k.py new file mode 100644 index 0000000..5388585 --- /dev/null +++ b/configs/tinyvit/tinyvit-21m-distill_8xb256_in1k.py @@ -0,0 +1,3 @@ +_base_ = [ + './tinyvit-21m_8xb256_in1k.py', +] diff --git a/configs/tinyvit/tinyvit-21m_8xb256_in1k.py b/configs/tinyvit/tinyvit-21m_8xb256_in1k.py new file mode 100644 index 0000000..6c12019 --- /dev/null +++ b/configs/tinyvit/tinyvit-21m_8xb256_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs32_pil_bicubic.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', + '../_base_/models/tinyvit/tinyvit-21m.py', +] diff --git a/configs/tinyvit/tinyvit-5m-distill_8xb256_in1k.py b/configs/tinyvit/tinyvit-5m-distill_8xb256_in1k.py new file mode 100644 index 0000000..0003c30 --- /dev/null +++ b/configs/tinyvit/tinyvit-5m-distill_8xb256_in1k.py @@ -0,0 +1,3 @@ +_base_ = [ + './tinyvit-5m_8xb256_in1k.py', +] diff --git a/configs/tinyvit/tinyvit-5m_8xb256_in1k.py b/configs/tinyvit/tinyvit-5m_8xb256_in1k.py new file mode 100644 index 0000000..262b5a4 --- /dev/null +++ b/configs/tinyvit/tinyvit-5m_8xb256_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs32_pil_bicubic.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', + '../_base_/models/tinyvit/tinyvit-5m.py', +] diff --git a/configs/tnt/README.md b/configs/tnt/README.md new file mode 100644 index 0000000..e86da0b --- /dev/null +++ b/configs/tnt/README.md @@ -0,0 +1,77 @@ +# Transformer in Transformer + +> [Transformer in Transformer](https://arxiv.org/abs/2103.00112) + + + +## Abstract + +Transformer is a new kind of neural architecture which encodes the input data as powerful features via the attention mechanism. Basically, the visual transformers first divide the input images into several local patches and then calculate both representations and their relationship. Since natural images are of high complexity with abundant detail and color information, the granularity of the patch dividing is not fine enough for excavating features of objects in different scales and locations. In this paper, we point out that the attention inside these local patches are also essential for building visual transformers with high performance and we explore a new architecture, namely, Transformer iN Transformer (TNT). Specifically, we regard the local patches (e.g., 16×16) as "visual sentences" and present to further divide them into smaller patches (e.g., 4×4) as "visual words". The attention of each word will be calculated with other words in the given visual sentence with negligible computational costs. Features of both words and sentences will be aggregated to enhance the representation ability. Experiments on several benchmarks demonstrate the effectiveness of the proposed TNT architecture, e.g., we achieve an 81.5% top-1 accuracy on the ImageNet, which is about 1.7% higher than that of the state-of-the-art visual transformer with similar computational cost. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('tnt-small-p16_3rdparty_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('tnt-small-p16_3rdparty_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/tnt/tnt-s-p16_16xb64_in1k.py https://download.openmmlab.com/mmclassification/v0/tnt/tnt-small-p16_3rdparty_in1k_20210903-c56ee7df.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :------------------------------ | :----------: | :--------: | :-------: | :-------: | :-------: | :--------------------------------: | :------------------------------------------------------------------------------------: | +| `tnt-small-p16_3rdparty_in1k`\* | From scratch | 23.76 | 3.36 | 81.52 | 95.73 | [config](tnt-s-p16_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/tnt/tnt-small-p16_3rdparty_in1k_20210903-c56ee7df.pth) | + +*Models with * are converted from the [official repo](https://github.com/contrastive/pytorch-image-models/blob/809271b0f3e5d9be4e11c0c5cec1dbba8b5e2c60/timm/models/tnt.py#L144). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@misc{han2021transformer, + title={Transformer in Transformer}, + author={Kai Han and An Xiao and Enhua Wu and Jianyuan Guo and Chunjing Xu and Yunhe Wang}, + year={2021}, + eprint={2103.00112}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` diff --git a/configs/tnt/metafile.yml b/configs/tnt/metafile.yml new file mode 100644 index 0000000..dcc2edd --- /dev/null +++ b/configs/tnt/metafile.yml @@ -0,0 +1,29 @@ +Collections: + - Name: Transformer in Transformer + Metadata: + Training Data: ImageNet-1k + Paper: + URL: https://arxiv.org/abs/2103.00112 + Title: "Transformer in Transformer" + README: configs/tnt/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.15.0/mmcls/models/backbones/tnt.py#L203 + Version: v0.15.0 + +Models: + - Name: tnt-small-p16_3rdparty_in1k + Metadata: + FLOPs: 3360000000 + Parameters: 23760000 + In Collection: Transformer in Transformer + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.52 + Top 5 Accuracy: 95.73 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/tnt/tnt-small-p16_3rdparty_in1k_20210903-c56ee7df.pth + Config: configs/tnt/tnt-s-p16_16xb64_in1k.py + Converted From: + Weights: https://github.com/contrastive/pytorch-image-models/releases/download/TNT/tnt_s_patch16_224.pth.tar + Code: https://github.com/contrastive/pytorch-image-models/blob/809271b0f3e5d9be4e11c0c5cec1dbba8b5e2c60/timm/models/tnt.py#L144 diff --git a/configs/tnt/tnt-s-p16_16xb64_in1k.py b/configs/tnt/tnt-s-p16_16xb64_in1k.py new file mode 100644 index 0000000..af71232 --- /dev/null +++ b/configs/tnt/tnt-s-p16_16xb64_in1k.py @@ -0,0 +1,56 @@ +# accuracy_top-1 : 81.52 accuracy_top-5 : 95.73 +_base_ = [ + '../_base_/models/tnt_s_patch16_224.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/default_runtime.py' +] + +# dataset settings +data_preprocessor = dict( + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + # convert image from BGR to RGB + to_rgb=True, +) + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=248, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict(batch_size=64) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# schedule settings +optim_wrapper = dict(optimizer=dict(type='AdamW', lr=1e-3, weight_decay=0.05)) + +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=1e-3, + by_epoch=True, + begin=0, + end=5, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict(type='CosineAnnealingLR', T_max=295, by_epoch=True, begin=5, end=300) +] + +train_cfg = dict(by_epoch=True, max_epochs=300, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (16 GPUs) x (64 samples per GPU) +auto_scale_lr = dict(base_batch_size=1024) diff --git a/configs/twins/README.md b/configs/twins/README.md new file mode 100644 index 0000000..9e97b78 --- /dev/null +++ b/configs/twins/README.md @@ -0,0 +1,80 @@ +# Twins + +> [Twins: Revisiting the Design of Spatial Attention in Vision Transformers](http://arxiv-export-lb.library.cornell.edu/abs/2104.13840) + + + +## Abstract + +Very recently, a variety of vision transformer architectures for dense prediction tasks have been proposed and they show that the design of spatial attention is critical to their success in these tasks. In this work, we revisit the design of the spatial attention and demonstrate that a carefully-devised yet simple spatial attention mechanism performs favourably against the state-of-the-art schemes. As a result, we propose two vision transformer architectures, namely, Twins-PCPVT and Twins-SVT. Our proposed architectures are highly-efficient and easy to implement, only involving matrix multiplications that are highly optimized in modern deep learning frameworks. More importantly, the proposed architectures achieve excellent performance on a wide range of visual tasks, including image level classification as well as dense detection and segmentation. The simplicity and strong performance suggest that our proposed architectures may serve as stronger backbones for many vision tasks. Our code is released at [this https URL](https://github.com/Meituan-AutoML/Twins). + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('twins-pcpvt-small_3rdparty_8xb128_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('twins-pcpvt-small_3rdparty_8xb128_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/twins/twins-pcpvt-small_8xb128_in1k.py https://download.openmmlab.com/mmclassification/v0/twins/twins-pcpvt-small_3rdparty_8xb128_in1k_20220126-ef23c132.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :----------------------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :----------------------------------------: | :-----------------------------------------------------------------: | +| `twins-pcpvt-small_3rdparty_8xb128_in1k`\* | From scratch | 24.11 | 3.67 | 81.14 | 95.69 | [config](twins-pcpvt-small_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/twins/twins-pcpvt-small_3rdparty_8xb128_in1k_20220126-ef23c132.pth) | +| `twins-pcpvt-base_3rdparty_8xb128_in1k`\* | From scratch | 43.83 | 6.45 | 82.66 | 96.26 | [config](twins-pcpvt-base_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/twins/twins-pcpvt-base_3rdparty_8xb128_in1k_20220126-f8c4b0d5.pth) | +| `twins-pcpvt-large_3rdparty_16xb64_in1k`\* | From scratch | 60.99 | 9.51 | 83.09 | 96.59 | [config](twins-pcpvt-large_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/twins/twins-pcpvt-large_3rdparty_16xb64_in1k_20220126-c1ef8d80.pth) | +| `twins-svt-small_3rdparty_8xb128_in1k`\* | From scratch | 24.06 | 2.82 | 81.77 | 95.57 | [config](twins-svt-small_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/twins/twins-svt-small_3rdparty_8xb128_in1k_20220126-8fe5205b.pth) | +| `twins-svt-base_8xb128_3rdparty_in1k`\* | From scratch | 56.07 | 8.35 | 83.13 | 96.29 | [config](twins-svt-base_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/twins/twins-svt-base_3rdparty_8xb128_in1k_20220126-e31cc8e9.pth) | +| `twins-svt-large_3rdparty_16xb64_in1k`\* | From scratch | 99.27 | 14.82 | 83.60 | 96.50 | [config](twins-svt-large_16xb64_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/twins/twins-svt-large_3rdparty_16xb64_in1k_20220126-4817645f.pth) | + +*Models with * are converted from the [timm](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/twins.py). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@article{chu2021twins, + title={Twins: Revisiting spatial attention design in vision transformers}, + author={Chu, Xiangxiang and Tian, Zhi and Wang, Yuqing and Zhang, Bo and Ren, Haibing and Wei, Xiaolin and Xia, Huaxia and Shen, Chunhua}, + journal={arXiv preprint arXiv:2104.13840}, + year={2021}altgvt +} +``` diff --git a/configs/twins/metafile.yml b/configs/twins/metafile.yml new file mode 100644 index 0000000..d0d8ff4 --- /dev/null +++ b/configs/twins/metafile.yml @@ -0,0 +1,114 @@ +Collections: + - Name: Twins + Metadata: + Training Data: ImageNet-1k + Architecture: + - Global Subsampled Attention + - Locally Grouped SelfAttention + - Conditional Position Encoding + - Pyramid Vision Transformer + Paper: + URL: http://arxiv-export-lb.library.cornell.edu/abs/2104.13840 + Title: "Twins: Revisiting the Design of Spatial Attention in Vision Transformers" + README: configs/twins/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.20.1/mmcls/models/backbones/twins.py + Version: v0.20.1 + +Models: + - Name: twins-pcpvt-small_3rdparty_8xb128_in1k + Metadata: + FLOPs: 3670000000 # 3.67G + Parameters: 24110000 # 24.11M + In Collection: Twins + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.14 + Top 5 Accuracy: 95.69 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/twins/twins-pcpvt-small_3rdparty_8xb128_in1k_20220126-ef23c132.pth + Config: configs/twins/twins-pcpvt-small_8xb128_in1k.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_small-e70e7e7a.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/twins.py + - Name: twins-pcpvt-base_3rdparty_8xb128_in1k + Metadata: + FLOPs: 6450000000 # 6.45G + Parameters: 43830000 # 43.83M + In Collection: Twins + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.66 + Top 5 Accuracy: 96.26 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/twins/twins-pcpvt-base_3rdparty_8xb128_in1k_20220126-f8c4b0d5.pth + Config: configs/twins/twins-pcpvt-base_8xb128_in1k.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_small-e70e7e7a.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/twins.py + - Name: twins-pcpvt-large_3rdparty_16xb64_in1k + Metadata: + FLOPs: 9510000000 # 9.51G + Parameters: 60990000 # 60.99M + In Collection: Twins + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.09 + Top 5 Accuracy: 96.59 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/twins/twins-pcpvt-large_3rdparty_16xb64_in1k_20220126-c1ef8d80.pth + Config: configs/twins/twins-pcpvt-large_16xb64_in1k.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_small-e70e7e7a.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/twins.py + - Name: twins-svt-small_3rdparty_8xb128_in1k + Metadata: + FLOPs: 2820000000 # 2.82G + Parameters: 24060000 # 24.06M + In Collection: Twins + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.77 + Top 5 Accuracy: 95.57 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/twins/twins-svt-small_3rdparty_8xb128_in1k_20220126-8fe5205b.pth + Config: configs/twins/twins-svt-small_8xb128_in1k.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_small-e70e7e7a.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/twins.py + - Name: twins-svt-base_8xb128_3rdparty_in1k + Metadata: + FLOPs: 8350000000 # 8.35G + Parameters: 56070000 # 56.07M + In Collection: Twins + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.13 + Top 5 Accuracy: 96.29 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/twins/twins-svt-base_3rdparty_8xb128_in1k_20220126-e31cc8e9.pth + Config: configs/twins/twins-svt-base_8xb128_in1k.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_small-e70e7e7a.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/twins.py + - Name: twins-svt-large_3rdparty_16xb64_in1k + Metadata: + FLOPs: 14820000000 # 14.82G + Parameters: 99270000 # 99.27M + In Collection: Twins + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.60 + Top 5 Accuracy: 96.50 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/twins/twins-svt-large_3rdparty_16xb64_in1k_20220126-4817645f.pth + Config: configs/twins/twins-svt-large_16xb64_in1k.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vt3p-weights/twins_pcpvt_small-e70e7e7a.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/twins.py diff --git a/configs/twins/twins-pcpvt-base_8xb128_in1k.py b/configs/twins/twins-pcpvt-base_8xb128_in1k.py new file mode 100644 index 0000000..3ac5d2a --- /dev/null +++ b/configs/twins/twins-pcpvt-base_8xb128_in1k.py @@ -0,0 +1,41 @@ +_base_ = [ + '../_base_/models/twins_pcpvt_base.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# dataset settings +train_dataloader = dict(batch_size=128) + +# schedule settings +optim_wrapper = dict( + optimizer=dict( + type='AdamW', + lr=5e-4 * 128 * 8 / 512, # learning rate for 128 batch size, 8 gpu. + weight_decay=0.05, + eps=1e-8, + betas=(0.9, 0.999)), + paramwise_cfg=dict(_delete=True, norm_decay_mult=0.0, bias_decay_mult=0.0), + clip_grad=dict(max_norm=5.0), +) + +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=1e-3, + by_epoch=True, + begin=0, + end=5, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict( + type='CosineAnnealingLR', + T_max=295, + eta_min=1e-5, + by_epoch=True, + begin=5, + end=300) +] diff --git a/configs/twins/twins-pcpvt-large_16xb64_in1k.py b/configs/twins/twins-pcpvt-large_16xb64_in1k.py new file mode 100644 index 0000000..0acfd75 --- /dev/null +++ b/configs/twins/twins-pcpvt-large_16xb64_in1k.py @@ -0,0 +1,7 @@ +_base_ = ['twins-pcpvt-base_8xb128_in1k.py'] + +# model settings +model = dict(backbone=dict(arch='large'), head=dict(in_channels=512)) + +# dataset settings +train_dataloader = dict(batch_size=64) diff --git a/configs/twins/twins-pcpvt-small_8xb128_in1k.py b/configs/twins/twins-pcpvt-small_8xb128_in1k.py new file mode 100644 index 0000000..9fe763b --- /dev/null +++ b/configs/twins/twins-pcpvt-small_8xb128_in1k.py @@ -0,0 +1,4 @@ +_base_ = ['twins-pcpvt-base_8xb128_in1k.py'] + +# model settings +model = dict(backbone=dict(arch='small'), head=dict(in_channels=512)) diff --git a/configs/twins/twins-svt-base_8xb128_in1k.py b/configs/twins/twins-svt-base_8xb128_in1k.py new file mode 100644 index 0000000..1d24f63 --- /dev/null +++ b/configs/twins/twins-svt-base_8xb128_in1k.py @@ -0,0 +1,41 @@ +_base_ = [ + '../_base_/models/twins_svt_base.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# dataset settings +train_dataloader = dict(batch_size=128) + +# schedule settings +optim_wrapper = dict( + optimizer=dict( + type='AdamW', + lr=5e-4 * 128 * 8 / 512, # learning rate for 128 batch size, 8 gpu. + weight_decay=0.05, + eps=1e-8, + betas=(0.9, 0.999)), + paramwise_cfg=dict(_delete=True, norm_decay_mult=0.0, bias_decay_mult=0.0), + clip_grad=dict(max_norm=5.0), +) + +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + start_factor=1e-3, + by_epoch=True, + begin=0, + end=5, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict( + type='CosineAnnealingLR', + T_max=295, + eta_min=1e-5, + by_epoch=True, + begin=5, + end=300) +] diff --git a/configs/twins/twins-svt-large_16xb64_in1k.py b/configs/twins/twins-svt-large_16xb64_in1k.py new file mode 100644 index 0000000..e8a1eba --- /dev/null +++ b/configs/twins/twins-svt-large_16xb64_in1k.py @@ -0,0 +1,7 @@ +_base_ = ['twins-svt-base_8xb128_in1k.py'] + +# model settings +model = dict(backbone=dict(arch='large'), head=dict(in_channels=1024)) + +# dataset settings +train_dataloader = dict(batch_size=64) diff --git a/configs/twins/twins-svt-small_8xb128_in1k.py b/configs/twins/twins-svt-small_8xb128_in1k.py new file mode 100644 index 0000000..2ffe267 --- /dev/null +++ b/configs/twins/twins-svt-small_8xb128_in1k.py @@ -0,0 +1,4 @@ +_base_ = ['twins-svt-base_8xb128_in1k.py'] + +# model settings +model = dict(backbone=dict(arch='small'), head=dict(in_channels=512)) diff --git a/configs/van/README.md b/configs/van/README.md new file mode 100644 index 0000000..7e548b6 --- /dev/null +++ b/configs/van/README.md @@ -0,0 +1,78 @@ +# Visual-Attention-Network + +> [Visual Attention Network](https://arxiv.org/abs/2202.09741) + + + +## Abstract + +While originally designed for natural language processing (NLP) tasks, the self-attention mechanism has recently taken various computer vision areas by storm. However, the 2D nature of images brings three challenges for applying self-attention in computer vision. (1) Treating images as 1D sequences neglects their 2D structures. (2) The quadratic complexity is too expensive for high-resolution images. (3) It only captures spatial adaptability but ignores channel adaptability. In this paper, we propose a novel large kernel attention (LKA) module to enable self-adaptive and long-range correlations in self-attention while avoiding the above issues. We further introduce a novel neural network based on LKA, namely Visual Attention Network (VAN). While extremely simple and efficient, VAN outperforms the state-of-the-art vision transformers and convolutional neural networks with a large margin in extensive experiments, including image classification, object detection, semantic segmentation, instance segmentation, etc. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('van-tiny_3rdparty_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('van-tiny_3rdparty_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/van/van-tiny_8xb128_in1k.py https://download.openmmlab.com/mmclassification/v0/van/van-tiny_8xb128_in1k_20220501-385941af.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :-------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :--------------------------------: | :----------------------------------------------------------------------------------------: | +| `van-tiny_3rdparty_in1k`\* | From scratch | 4.11 | 0.88 | 75.41 | 93.02 | [config](van-tiny_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/van/van-tiny_8xb128_in1k_20220501-385941af.pth) | +| `van-small_3rdparty_in1k`\* | From scratch | 13.86 | 2.52 | 81.01 | 95.63 | [config](van-small_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/van/van-small_8xb128_in1k_20220501-17bc91aa.pth) | +| `van-base_3rdparty_in1k`\* | From scratch | 26.58 | 5.03 | 82.80 | 96.21 | [config](van-base_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/van/van-base_8xb128_in1k_20220501-6a4cc31b.pth) | +| `van-large_3rdparty_in1k`\* | From scratch | 44.77 | 8.99 | 83.86 | 96.73 | [config](van-large_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/van/van-large_8xb128_in1k_20220501-f212ba21.pth) | + +*Models with * are converted from the [official repo](https://github.com/Visual-Attention-Network/VAN-Classification). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@article{guo2022visual, + title={Visual Attention Network}, + author={Guo, Meng-Hao and Lu, Cheng-Ze and Liu, Zheng-Ning and Cheng, Ming-Ming and Hu, Shi-Min}, + journal={arXiv preprint arXiv:2202.09741}, + year={2022} +} +``` diff --git a/configs/van/metafile.yml b/configs/van/metafile.yml new file mode 100644 index 0000000..db5a6e6 --- /dev/null +++ b/configs/van/metafile.yml @@ -0,0 +1,82 @@ +Collections: + - Name: Visual-Attention-Network + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - AdamW + - Weight Decay + Architecture: + - Visual Attention Network + Paper: + URL: https://arxiv.org/abs/2202.09741 + Title: "Visual Attention Network" + README: configs/van/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.23.0/mmcls/models/backbones/van.py + Version: v0.23.0 + +Models: + - Name: van-tiny_3rdparty_in1k + Metadata: + Parameters: 4110000 # 4.11M + FLOPs: 880000000 # 0.88G + In Collection: Visual-Attention-Network + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 75.41 + Top 5 Accuracy: 93.02 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/van/van-tiny_8xb128_in1k_20220501-385941af.pth + Config: configs/van/van-tiny_8xb128_in1k.py + Converted From: + Code: https://github.com/Visual-Attention-Network/VAN-Classification + Weights: https://cloud.tsinghua.edu.cn/f/aada2242a16245d6a561/?dl=1 + - Name: van-small_3rdparty_in1k + Metadata: + Parameters: 13860000 # 13.86M + FLOPs: 2520000000 # 2.52G + In Collection: Visual-Attention-Network + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.01 + Top 5 Accuracy: 95.63 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/van/van-small_8xb128_in1k_20220501-17bc91aa.pth + Config: configs/van/van-small_8xb128_in1k.py + Converted From: + Code: https://github.com/Visual-Attention-Network/VAN-Classification + Weights: https://cloud.tsinghua.edu.cn/f/dd3eb73692f74a2499c9/?dl=1 + - Name: van-base_3rdparty_in1k + Metadata: + Parameters: 26580000 # 26.58M + FLOPs: 5030000000 # 5.03G + In Collection: Visual-Attention-Network + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.80 + Top 5 Accuracy: 96.21 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/van/van-base_8xb128_in1k_20220501-6a4cc31b.pth + Config: configs/van/van-base_8xb128_in1k.py + Converted From: + Code: https://github.com/Visual-Attention-Network/VAN-Classification + Weights: https://cloud.tsinghua.edu.cn/f/58e7acceaf334ecdba89/?dl=1 + - Name: van-large_3rdparty_in1k + Metadata: + Parameters: 44770000 # 44.77 M + FLOPs: 8990000000 # 8.99G + In Collection: Visual-Attention-Network + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.86 + Top 5 Accuracy: 96.73 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/van/van-large_8xb128_in1k_20220501-f212ba21.pth + Config: configs/van/van-large_8xb128_in1k.py + Converted From: + Code: https://github.com/Visual-Attention-Network/VAN-Classification + Weights: https://cloud.tsinghua.edu.cn/f/0201745f6920482490a0/?dl=1 diff --git a/configs/van/van-base_8xb128_in1k.py b/configs/van/van-base_8xb128_in1k.py new file mode 100644 index 0000000..47082b7 --- /dev/null +++ b/configs/van/van-base_8xb128_in1k.py @@ -0,0 +1,65 @@ +_base_ = [ + '../_base_/models/van/van_base.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +# dataset setting +data_preprocessor = dict( + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=248, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline), batch_size=128) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# schedule settings +optim_wrapper = dict(clip_grad=dict(max_norm=5.0)) diff --git a/configs/van/van-large_8xb128_in1k.py b/configs/van/van-large_8xb128_in1k.py new file mode 100644 index 0000000..b165677 --- /dev/null +++ b/configs/van/van-large_8xb128_in1k.py @@ -0,0 +1,65 @@ +_base_ = [ + '../_base_/models/van/van_large.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# dataset setting +data_preprocessor = dict( + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=248, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline), batch_size=128) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# schedule settings +optim_wrapper = dict(clip_grad=dict(max_norm=5.0)) diff --git a/configs/van/van-small_8xb128_in1k.py b/configs/van/van-small_8xb128_in1k.py new file mode 100644 index 0000000..bbbbbdf --- /dev/null +++ b/configs/van/van-small_8xb128_in1k.py @@ -0,0 +1,65 @@ +_base_ = [ + '../_base_/models/van/van_small.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# dataset setting +data_preprocessor = dict( + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=248, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline), batch_size=128) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# schedule settings +optim_wrapper = dict(clip_grad=dict(max_norm=5.0)) diff --git a/configs/van/van-tiny_8xb128_in1k.py b/configs/van/van-tiny_8xb128_in1k.py new file mode 100644 index 0000000..2ac62da --- /dev/null +++ b/configs/van/van-tiny_8xb128_in1k.py @@ -0,0 +1,65 @@ +_base_ = [ + '../_base_/models/van/van_tiny.py', + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# dataset setting +data_preprocessor = dict( + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=248, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline), batch_size=128) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# schedule settings +optim_wrapper = dict(clip_grad=dict(max_norm=5.0)) diff --git a/configs/vgg/README.md b/configs/vgg/README.md new file mode 100644 index 0000000..7af69ce --- /dev/null +++ b/configs/vgg/README.md @@ -0,0 +1,86 @@ +# VGG + +> [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556) + + + +## Abstract + +In this work we investigate the effect of the convolutional network depth on its accuracy in the large-scale image recognition setting. Our main contribution is a thorough evaluation of networks of increasing depth using an architecture with very small (3x3) convolution filters, which shows that a significant improvement on the prior-art configurations can be achieved by pushing the depth to 16-19 weight layers. These findings were the basis of our ImageNet Challenge 2014 submission, where our team secured the first and the second places in the localisation and classification tracks respectively. We also show that our representations generalise well to other datasets, where they achieve state-of-the-art results. We have made our two best-performing ConvNet models publicly available to facilitate further research on the use of deep visual representations in computer vision. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('vgg11_8xb32_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('vgg11_8xb32_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/vgg/vgg11_8xb32_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/vgg/vgg11_8xb32_in1k.py https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_batch256_imagenet_20210208-4271cd6c.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :-----------------------------: | :--------------------------------------------------------------------------------------------------: | +| `vgg11_8xb32_in1k` | From scratch | 132.86 | 7.63 | 68.75 | 88.87 | [config](vgg11_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_batch256_imagenet_20210208-4271cd6c.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_batch256_imagenet_20210208-4271cd6c.json) | +| `vgg13_8xb32_in1k` | From scratch | 133.05 | 11.34 | 70.02 | 89.46 | [config](vgg13_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_batch256_imagenet_20210208-4d1d6080.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_batch256_imagenet_20210208-4d1d6080.json) | +| `vgg16_8xb32_in1k` | From scratch | 138.36 | 15.50 | 71.62 | 90.49 | [config](vgg16_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_batch256_imagenet_20210208-db26f1a5.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_batch256_imagenet_20210208-db26f1a5.json) | +| `vgg19_8xb32_in1k` | From scratch | 143.67 | 19.67 | 72.41 | 90.80 | [config](vgg19_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_batch256_imagenet_20210208-e6920e4a.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_batch256_imagenet_20210208-e6920e4a.json) | +| `vgg11bn_8xb32_in1k` | From scratch | 132.87 | 7.64 | 70.67 | 90.16 | [config](vgg11bn_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_bn_batch256_imagenet_20210207-f244902c.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_bn_batch256_imagenet_20210207-f244902c.json) | +| `vgg13bn_8xb32_in1k` | From scratch | 133.05 | 11.36 | 72.12 | 90.66 | [config](vgg13bn_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_bn_batch256_imagenet_20210207-1a8b7864.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_bn_batch256_imagenet_20210207-1a8b7864.json) | +| `vgg16bn_8xb32_in1k` | From scratch | 138.37 | 15.53 | 73.74 | 91.66 | [config](vgg16bn_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_bn_batch256_imagenet_20210208-7e55cd29.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_bn_batch256_imagenet_20210208-7e55cd29.json) | +| `vgg19bn_8xb32_in1k` | From scratch | 143.68 | 19.70 | 74.68 | 92.27 | [config](vgg19bn_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_bn_batch256_imagenet_20210208-da620c4f.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_bn_batch256_imagenet_20210208-da620c4f.json) | + +## Citation + +```bibtex +@article{simonyan2014very, + title={Very deep convolutional networks for large-scale image recognition}, + author={Simonyan, Karen and Zisserman, Andrew}, + journal={arXiv preprint arXiv:1409.1556}, + year={2014} +} +``` diff --git a/configs/vgg/metafile.yml b/configs/vgg/metafile.yml new file mode 100644 index 0000000..ce3af19 --- /dev/null +++ b/configs/vgg/metafile.yml @@ -0,0 +1,125 @@ +Collections: + - Name: VGG + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x Xp GPUs + Epochs: 100 + Batch Size: 256 + Architecture: + - VGG + Paper: + URL: https://arxiv.org/abs/1409.1556 + Title: "Very Deep Convolutional Networks for Large-Scale Image Recognition" + README: configs/vgg/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.15.0/mmcls/models/backbones/vgg.py#L39 + Version: v0.15.0 + +Models: + - Name: vgg11_8xb32_in1k + Metadata: + FLOPs: 7630000000 + Parameters: 132860000 + In Collection: VGG + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 68.75 + Top 5 Accuracy: 88.87 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_batch256_imagenet_20210208-4271cd6c.pth + Config: configs/vgg/vgg11_8xb32_in1k.py + - Name: vgg13_8xb32_in1k + Metadata: + FLOPs: 11340000000 + Parameters: 133050000 + In Collection: VGG + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 70.02 + Top 5 Accuracy: 89.46 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_batch256_imagenet_20210208-4d1d6080.pth + Config: configs/vgg/vgg13_8xb32_in1k.py + - Name: vgg16_8xb32_in1k + Metadata: + FLOPs: 15500000000 + Parameters: 138360000 + In Collection: VGG + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 71.62 + Top 5 Accuracy: 90.49 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_batch256_imagenet_20210208-db26f1a5.pth + Config: configs/vgg/vgg16_8xb32_in1k.py + - Name: vgg19_8xb32_in1k + Metadata: + FLOPs: 19670000000 + Parameters: 143670000 + In Collection: VGG + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 72.41 + Top 5 Accuracy: 90.8 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_batch256_imagenet_20210208-e6920e4a.pth + Config: configs/vgg/vgg19_8xb32_in1k.py + - Name: vgg11bn_8xb32_in1k + Metadata: + FLOPs: 7640000000 + Parameters: 132870000 + In Collection: VGG + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 70.67 + Top 5 Accuracy: 90.16 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_bn_batch256_imagenet_20210207-f244902c.pth + Config: configs/vgg/vgg11bn_8xb32_in1k.py + - Name: vgg13bn_8xb32_in1k + Metadata: + FLOPs: 11360000000 + Parameters: 133050000 + In Collection: VGG + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 72.12 + Top 5 Accuracy: 90.66 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_bn_batch256_imagenet_20210207-1a8b7864.pth + Config: configs/vgg/vgg13bn_8xb32_in1k.py + - Name: vgg16bn_8xb32_in1k + Metadata: + FLOPs: 15530000000 + Parameters: 138370000 + In Collection: VGG + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 73.74 + Top 5 Accuracy: 91.66 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_bn_batch256_imagenet_20210208-7e55cd29.pth + Config: configs/vgg/vgg16bn_8xb32_in1k.py + - Name: vgg19bn_8xb32_in1k + Metadata: + FLOPs: 19700000000 + Parameters: 143680000 + In Collection: VGG + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 74.68 + Top 5 Accuracy: 92.27 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_bn_batch256_imagenet_20210208-da620c4f.pth + Config: configs/vgg/vgg19bn_8xb32_in1k.py diff --git a/configs/vgg/vgg11_8xb32_in1k.py b/configs/vgg/vgg11_8xb32_in1k.py new file mode 100644 index 0000000..616233c --- /dev/null +++ b/configs/vgg/vgg11_8xb32_in1k.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/vgg11.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# schedule settings +optim_wrapper = dict(optimizer=dict(lr=0.01)) diff --git a/configs/vgg/vgg11bn_8xb32_in1k.py b/configs/vgg/vgg11bn_8xb32_in1k.py new file mode 100644 index 0000000..22f55ef --- /dev/null +++ b/configs/vgg/vgg11bn_8xb32_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/vgg11bn.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] diff --git a/configs/vgg/vgg13_8xb32_in1k.py b/configs/vgg/vgg13_8xb32_in1k.py new file mode 100644 index 0000000..ec1c98f --- /dev/null +++ b/configs/vgg/vgg13_8xb32_in1k.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/vgg13.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# schedule settings +optim_wrapper = dict(optimizer=dict(lr=0.01)) diff --git a/configs/vgg/vgg13bn_8xb32_in1k.py b/configs/vgg/vgg13bn_8xb32_in1k.py new file mode 100644 index 0000000..3cb3592 --- /dev/null +++ b/configs/vgg/vgg13bn_8xb32_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/vgg13bn.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] diff --git a/configs/vgg/vgg16_8xb16_voc.py b/configs/vgg/vgg16_8xb16_voc.py new file mode 100644 index 0000000..5d9e347 --- /dev/null +++ b/configs/vgg/vgg16_8xb16_voc.py @@ -0,0 +1,43 @@ +_base_ = [ + '../_base_/datasets/voc_bs16.py', + '../_base_/default_runtime.py', +] + +# model settings + +# load model pretrained on imagenet +pretrained = 'https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_batch256_imagenet_20210208-db26f1a5.pth' # noqa + +# use different head for multilabel task +model = dict( + type='ImageClassifier', + backbone=dict( + type='VGG', + depth=16, + num_classes=20, + init_cfg=dict( + type='Pretrained', checkpoint=pretrained, prefix='backbone')), + neck=None, + head=dict( + type='MultiLabelClsHead', + loss=dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0))) + +# schedule settings +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0), + # update the final linear by 10 times learning rate. + paramwise_cfg=dict(custom_keys={'.backbone.classifier': dict(lr_mult=10)}), +) + +# learning policy +param_scheduler = dict(type='StepLR', by_epoch=True, step_size=20, gamma=0.1) + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=40, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (8 GPUs) x (16 samples per GPU) +auto_scale_lr = dict(base_batch_size=128) diff --git a/configs/vgg/vgg16_8xb32_in1k.py b/configs/vgg/vgg16_8xb32_in1k.py new file mode 100644 index 0000000..a291da2 --- /dev/null +++ b/configs/vgg/vgg16_8xb32_in1k.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/vgg16.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# schedule settings +optim_wrapper = dict(optimizer=dict(lr=0.01)) diff --git a/configs/vgg/vgg16bn_8xb32_in1k.py b/configs/vgg/vgg16bn_8xb32_in1k.py new file mode 100644 index 0000000..f6bbb81 --- /dev/null +++ b/configs/vgg/vgg16bn_8xb32_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/vgg16bn.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] diff --git a/configs/vgg/vgg19_8xb32_in1k.py b/configs/vgg/vgg19_8xb32_in1k.py new file mode 100644 index 0000000..88cd24c --- /dev/null +++ b/configs/vgg/vgg19_8xb32_in1k.py @@ -0,0 +1,9 @@ +_base_ = [ + '../_base_/models/vgg19.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# schedule settings +optim_wrapper = dict(optimizer=dict(lr=0.01)) diff --git a/configs/vgg/vgg19bn_8xb32_in1k.py b/configs/vgg/vgg19bn_8xb32_in1k.py new file mode 100644 index 0000000..4b4f34a --- /dev/null +++ b/configs/vgg/vgg19bn_8xb32_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/vgg19bn.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] diff --git a/configs/vig/README.md b/configs/vig/README.md new file mode 100644 index 0000000..624e387 --- /dev/null +++ b/configs/vig/README.md @@ -0,0 +1,81 @@ +# VIG + +> [Vision GNN: An Image is Worth Graph of Nodes](https://arxiv.org/abs/2206.00272) + + + +## Abstract + +Network architecture plays a key role in the deep learning-based computer vision system. The widely-used convolutional neural network and transformer treat the image as a grid or sequence structure, which is not flexible to capture irregular and complex objects. In this paper, we propose to represent the image as a graph structure and introduce a new Vision GNN (ViG) architecture to extract graph-level feature for visual tasks. We first split the image to a number of patches which are viewed as nodes, and construct a graph by connecting the nearest neighbors. Based on the graph representation of images, we build our ViG model to transform and exchange information among all the nodes. ViG consists of two basic modules: Grapher module with graph convolution for aggregating and updating graph information, and FFN module with two linear layers for node feature transformation. Both isotropic and pyramid architectures of ViG are built with different model sizes. Extensive experiments on image recognition and object detection tasks demonstrate the superiority of our ViG architecture. We hope this pioneering study of GNN on general visual tasks will provide useful inspiration and experience for future research. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('vig-tiny_3rdparty_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('vig-tiny_3rdparty_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/vig/vig-tiny_8xb128_in1k.py https://download.openmmlab.com/mmclassification/v0/vig/vig-tiny_3rdparty_in1k_20230117-6414c684.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :---------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :----------------------------------: | :------------------------------------------------------------------------------------: | +| `vig-tiny_3rdparty_in1k`\* | From scratch | 7.18 | 1.31 | 74.40 | 92.34 | [config](vig-tiny_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vig/vig-tiny_3rdparty_in1k_20230117-6414c684.pth) | +| `vig-small_3rdparty_in1k`\* | From scratch | 22.75 | 4.54 | 80.61 | 95.28 | [config](vig-small_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vig/vig-small_3rdparty_in1k_20230117-5338bf3b.pth) | +| `vig-base_3rdparty_in1k`\* | From scratch | 20.68 | 17.68 | 82.62 | 96.04 | [config](vig-base_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vig/vig-base_3rdparty_in1k_20230117-92f6f12f.pth) | +| `pvig-tiny_3rdparty_in1k`\* | From scratch | 9.46 | 1.71 | 78.38 | 94.38 | [config](pvig-tiny_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vig/pvig-tiny_3rdparty_in1k_20230117-eb77347d.pth) | +| `pvig-small_3rdparty_in1k`\* | From scratch | 29.02 | 4.57 | 82.00 | 95.97 | [config](pvig-small_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vig/pvig-small_3rdparty_in1k_20230117-9433dc96.pth) | +| `pvig-medium_3rdparty_in1k`\* | From scratch | 51.68 | 8.89 | 83.12 | 96.35 | [config](pvig-medium_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vig/pvig-medium_3rdparty_in1k_20230117-21057a6d.pth) | +| `pvig-base_3rdparty_in1k`\* | From scratch | 95.21 | 16.86 | 83.59 | 96.52 | [config](pvig-base_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vig/pvig-base_3rdparty_in1k_20230117-dbab3c85.pth) | + +*Models with * are converted from the [official repo](https://github.com/huawei-noah/Efficient-AI-Backbones/tree/master/vig_pytorch). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@inproceedings{han2022vig, + title={Vision GNN: An Image is Worth Graph of Nodes}, + author={Kai Han and Yunhe Wang and Jianyuan Guo and Yehui Tang and Enhua Wu}, + booktitle={NeurIPS}, + year={2022} +} +``` diff --git a/configs/vig/metafile.yml b/configs/vig/metafile.yml new file mode 100644 index 0000000..52bd18b --- /dev/null +++ b/configs/vig/metafile.yml @@ -0,0 +1,134 @@ +Collections: + - Name: VIG + Metadata: + Training Data: ImageNet-1k + Architecture: + - Vision GNN + Paper: + Title: 'Vision GNN: An Image is Worth Graph of Nodes' + URL: https://arxiv.org/abs/2206.00272 + README: configs/vig/README.md + Code: + URL: null + Version: null + +Models: + - Name: vig-tiny_3rdparty_in1k + Metadata: + FLOPs: 1309000000 + Parameters: 7185000 + Training Data: ImageNet-1k + In Collection: VIG + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 74.40 + Top 5 Accuracy: 92.34 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/vig/vig-tiny_3rdparty_in1k_20230117-6414c684.pth + Config: configs/vig/vig-tiny_8xb128_in1k.py + Converted From: + Weights: https://github.com/huawei-noah/Efficient-AI-Backbones/releases/download/vig/vig_ti_74.5.pth + Code: https://github.com/huawei-noah/Efficient-AI-Backbones/tree/master/vig_pytorch + - Name: vig-small_3rdparty_in1k + Metadata: + FLOPs: 4535000000 + Parameters: 22748000 + Training Data: ImageNet-1k + In Collection: VIG + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 80.61 + Top 5 Accuracy: 95.28 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/vig/vig-small_3rdparty_in1k_20230117-5338bf3b.pth + Config: configs/vig/vig-small_8xb128_in1k.py + Converted From: + Weights: https://github.com/huawei-noah/Efficient-AI-Backbones/releases/download/vig/vig_s_80.6.pth + Code: https://github.com/huawei-noah/Efficient-AI-Backbones/tree/master/vig_pytorch + - Name: vig-base_3rdparty_in1k + Metadata: + FLOPs: 17681000000 + Parameters: 20685000 + Training Data: ImageNet-1k + In Collection: VIG + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.62 + Top 5 Accuracy: 96.04 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/vig/vig-base_3rdparty_in1k_20230117-92f6f12f.pth + Config: configs/vig/vig-base_8xb128_in1k.py + Converted From: + Weights: https://github.com/huawei-noah/Efficient-AI-Backbones/releases/download/vig/vig_b_82.6.pth + Code: https://github.com/huawei-noah/Efficient-AI-Backbones/tree/master/vig_pytorch + - Name: pvig-tiny_3rdparty_in1k + Metadata: + FLOPs: 1714000000 + Parameters: 9458000 + Training Data: ImageNet-1k + In Collection: VIG + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.38 + Top 5 Accuracy: 94.38 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/vig/pvig-tiny_3rdparty_in1k_20230117-eb77347d.pth + Config: configs/vig/pvig-tiny_8xb128_in1k.py + Converted From: + Weights: https://github.com/huawei-noah/Efficient-AI-Backbones/releases/download/pyramid-vig/pvig_ti_78.5.pth.tar + Code: https://github.com/huawei-noah/Efficient-AI-Backbones/tree/master/vig_pytorch + - Name: pvig-small_3rdparty_in1k + Metadata: + FLOPs: 4572000000 + Parameters: 29024000 + Training Data: ImageNet-1k + In Collection: VIG + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.00 + Top 5 Accuracy: 95.97 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/vig/pvig-small_3rdparty_in1k_20230117-9433dc96.pth + Config: configs/vig/pvig-small_8xb128_in1k.py + Converted From: + Weights: https://github.com/huawei-noah/Efficient-AI-Backbones/releases/download/pyramid-vig/pvig_s_82.1.pth.tar + Code: https://github.com/huawei-noah/Efficient-AI-Backbones/tree/master/vig_pytorch + - Name: pvig-medium_3rdparty_in1k + Metadata: + FLOPs: 8886000000 + Parameters: 51682000 + Training Data: ImageNet-1k + In Collection: VIG + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.12 + Top 5 Accuracy: 96.35 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/vig/pvig-medium_3rdparty_in1k_20230117-21057a6d.pth + Config: configs/vig/pvig-medium_8xb128_in1k.py + Converted From: + Weights: https://github.com/huawei-noah/Efficient-AI-Backbones/releases/download/pyramid-vig/pvig_m_83.1.pth.tar + Code: https://github.com/huawei-noah/Efficient-AI-Backbones/tree/master/vig_pytorch + - Name: pvig-base_3rdparty_in1k + Metadata: + FLOPs: 16861000000 + Parameters: 95213000 + Training Data: ImageNet-1k + In Collection: VIG + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.59 + Top 5 Accuracy: 96.52 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/vig/pvig-base_3rdparty_in1k_20230117-dbab3c85.pth + Config: configs/vig/pvig-base_8xb128_in1k.py + Converted From: + Weights: https://github.com/huawei-noah/Efficient-AI-Backbones/releases/download/pyramid-vig/pvig_b_83.66.pth.tar + Code: https://github.com/huawei-noah/Efficient-AI-Backbones/tree/master/vig_pytorch diff --git a/configs/vig/pvig-base_8xb128_in1k.py b/configs/vig/pvig-base_8xb128_in1k.py new file mode 100644 index 0000000..1d66359 --- /dev/null +++ b/configs/vig/pvig-base_8xb128_in1k.py @@ -0,0 +1,22 @@ +_base_ = [ + '../_base_/models/vig/pyramid_vig_base.py', + '../_base_/datasets/imagenet_bs128_vig_224.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] + +# dataset settings +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=235, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) diff --git a/configs/vig/pvig-medium_8xb128_in1k.py b/configs/vig/pvig-medium_8xb128_in1k.py new file mode 100644 index 0000000..75c25a2 --- /dev/null +++ b/configs/vig/pvig-medium_8xb128_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/vig/pyramid_vig_medium.py', + '../_base_/datasets/imagenet_bs128_vig_224.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] diff --git a/configs/vig/pvig-small_8xb128_in1k.py b/configs/vig/pvig-small_8xb128_in1k.py new file mode 100644 index 0000000..755b331 --- /dev/null +++ b/configs/vig/pvig-small_8xb128_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/vig/pyramid_vig_small.py', + '../_base_/datasets/imagenet_bs128_vig_224.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] diff --git a/configs/vig/pvig-tiny_8xb128_in1k.py b/configs/vig/pvig-tiny_8xb128_in1k.py new file mode 100644 index 0000000..7a88555 --- /dev/null +++ b/configs/vig/pvig-tiny_8xb128_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/vig/pyramid_vig_tiny.py', + '../_base_/datasets/imagenet_bs128_vig_224.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] diff --git a/configs/vig/vig-base_8xb128_in1k.py b/configs/vig/vig-base_8xb128_in1k.py new file mode 100644 index 0000000..cb8b55e --- /dev/null +++ b/configs/vig/vig-base_8xb128_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/vig/vig_base.py', + '../_base_/datasets/imagenet_bs128_vig_224.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] diff --git a/configs/vig/vig-small_8xb128_in1k.py b/configs/vig/vig-small_8xb128_in1k.py new file mode 100644 index 0000000..41508b2 --- /dev/null +++ b/configs/vig/vig-small_8xb128_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/vig/vig_small.py', + '../_base_/datasets/imagenet_bs128_vig_224.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] diff --git a/configs/vig/vig-tiny_8xb128_in1k.py b/configs/vig/vig-tiny_8xb128_in1k.py new file mode 100644 index 0000000..80b1693 --- /dev/null +++ b/configs/vig/vig-tiny_8xb128_in1k.py @@ -0,0 +1,6 @@ +_base_ = [ + '../_base_/models/vig/vig_tiny.py', + '../_base_/datasets/imagenet_bs128_vig_224.py', + '../_base_/schedules/imagenet_bs256.py', + '../_base_/default_runtime.py', +] diff --git a/configs/vision_transformer/README.md b/configs/vision_transformer/README.md new file mode 100644 index 0000000..66bd3f5 --- /dev/null +++ b/configs/vision_transformer/README.md @@ -0,0 +1,101 @@ +# Vision Transformer + +> [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) + + + +## Introduction + +**Vision Transformer**, known as **ViT**, succeeded in using a full transformer to outperform previous works that based on convolutional networks in vision field. ViT splits image into patches to feed the multi-head attentions, concatenates a learnable class token for final prediction and adds a learnable position embeddings for relative positional message between patches. Based on these three techniques with attentions, ViT provides a brand-new pattern to build a basic structure in vision field. + +The strategy works even better when coupled with large datasets pre-trainings. Because of its simplicity and effectiveness, some after works in classification field are originated from ViT. And even in recent multi-modality field, ViT-based method still plays a role in it. + +
+ +
+ +## Abstract + +
+ +Show the paper's abstract + +
+ +While the Transformer architecture has become the de-facto standard for natural language processing tasks, its applications to computer vision remain limited. In vision, attention is either applied in conjunction with convolutional networks, or used to replace certain components of convolutional networks while keeping their overall structure in place. We show that this reliance on CNNs is not necessary and a pure transformer applied directly to sequences of image patches can perform very well on image classification tasks. When pre-trained on large amounts of data and transferred to multiple mid-sized or small image recognition benchmarks (ImageNet, CIFAR-100, VTAB, etc.), Vision Transformer (ViT) attains excellent results compared to state-of-the-art convolutional networks while requiring substantially fewer computational resources to train. +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('vit-base-p32_in21k-pre_3rdparty_in1k-384px', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('vit-base-p32_in21k-pre_3rdparty_in1k-384px', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Train/Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Train: + +```shell +python tools/train.py configs/vision_transformer/vit-base-p16_32xb128-mae_in1k.py +``` + +Test: + +```shell +python tools/test.py configs/vision_transformer/vit-base-p32_64xb64_in1k-384px.py https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p32_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-9cea8599.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :---------------------------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :------------------------------------------: | :----------------------------------------------------------: | +| `vit-base-p32_in21k-pre_3rdparty_in1k-384px`\* | ImageNet-21k | 88.30 | 13.06 | 84.01 | 97.08 | [config](vit-base-p32_64xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p32_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-9cea8599.pth) | +| `vit-base-p16_32xb128-mae_in1k` | From scratch | 86.57 | 17.58 | 82.37 | 96.15 | [config](vit-base-p16_32xb128-mae_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/vit/vit-base-p16_pt-32xb128-mae_in1k_20220623-4c544545.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/vit/vit-base-p16_pt-32xb128-mae_in1k_20220623-4c544545.log) | +| `vit-base-p16_in21k-pre_3rdparty_in1k-384px`\* | ImageNet-21k | 86.86 | 55.54 | 85.43 | 97.77 | [config](vit-base-p16_64xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-98e8652b.pth) | +| `vit-large-p16_in21k-pre_3rdparty_in1k-384px`\* | ImageNet-21k | 304.72 | 191.21 | 85.63 | 97.63 | [config](vit-large-p16_64xb64_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-large-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-b20ba619.pth) | + +*Models with * are converted from the [official repo](https://github.com/google-research/vision_transformer/blob/88a52f8892c80c10de99194990a517b4d80485fd/vit_jax/models.py#L208). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@inproceedings{ + dosovitskiy2021an, + title={An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale}, + author={Alexey Dosovitskiy and Lucas Beyer and Alexander Kolesnikov and Dirk Weissenborn and Xiaohua Zhai and Thomas Unterthiner and Mostafa Dehghani and Matthias Minderer and Georg Heigold and Sylvain Gelly and Jakob Uszkoreit and Neil Houlsby}, + booktitle={International Conference on Learning Representations}, + year={2021}, + url={https://openreview.net/forum?id=YicbFdNTTy} +} +``` diff --git a/configs/vision_transformer/metafile.yml b/configs/vision_transformer/metafile.yml new file mode 100644 index 0000000..891c413 --- /dev/null +++ b/configs/vision_transformer/metafile.yml @@ -0,0 +1,95 @@ +Collections: + - Name: Vision Transformer + Metadata: + Architecture: + - Attention Dropout + - Convolution + - Dense Connections + - Dropout + - GELU + - Layer Normalization + - Multi-Head Attention + - Scaled Dot-Product Attention + - Tanh Activation + Paper: + Title: 'An Image is Worth 16x16 Words: Transformers for Image Recognition at + Scale' + URL: https://arxiv.org/abs/2010.11929 + README: configs/vision_transformer/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.17.0/mmcls/models/backbones/vision_transformer.py + Version: v0.17.0 + +Models: + - Name: vit-base-p32_in21k-pre_3rdparty_in1k-384px + Metadata: + FLOPs: 13056716544 + Parameters: 88297192 + Training Data: + - ImageNet-21k + - ImageNet-1k + In Collection: Vision Transformer + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 84.01 + Top 5 Accuracy: 97.08 + Weights: https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p32_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-9cea8599.pth + Config: configs/vision_transformer/vit-base-p32_64xb64_in1k-384px.py + Converted From: + Weights: https://console.cloud.google.com/storage/browser/_details/vit_models/augreg/B_32-i21k-300ep-lr_0.001-aug_light1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz + Code: https://github.com/google-research/vision_transformer/blob/88a52f8892c80c10de99194990a517b4d80485fd/vit_jax/models.py#L208 + - Name: vit-base-p16_32xb128-mae_in1k + Metadata: + FLOPs: 17581972224 + Parameters: 86567656 + Training Data: + - ImageNet-1k + In Collection: Vision Transformer + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 82.37 + Top 5 Accuracy: 96.15 + Weights: https://download.openmmlab.com/mmclassification/v0/vit/vit-base-p16_pt-32xb128-mae_in1k_20220623-4c544545.pth + Config: configs/vision_transformer/vit-base-p16_32xb128-mae_in1k.py + - Name: vit-base-p16_in21k-pre_3rdparty_in1k-384px + Metadata: + FLOPs: 55538974464 + Parameters: 86859496 + Training Data: + - ImageNet-21k + - ImageNet-1k + In Collection: Vision Transformer + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 85.43 + Top 5 Accuracy: 97.77 + Weights: https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-98e8652b.pth + Config: configs/vision_transformer/vit-base-p16_64xb64_in1k-384px.py + Converted From: + Weights: https://console.cloud.google.com/storage/browser/_details/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz + Code: https://github.com/google-research/vision_transformer/blob/88a52f8892c80c10de99194990a517b4d80485fd/vit_jax/models.py#L208 + - Name: vit-large-p16_in21k-pre_3rdparty_in1k-384px + Metadata: + FLOPs: 191210034176 + Parameters: 304715752 + Training Data: + - ImageNet-21k + - ImageNet-1k + In Collection: Vision Transformer + Results: + - Dataset: ImageNet-1k + Task: Image Classification + Metrics: + Top 1 Accuracy: 85.63 + Top 5 Accuracy: 97.63 + Weights: https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-large-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-b20ba619.pth + Config: configs/vision_transformer/vit-large-p16_64xb64_in1k-384px.py + Converted From: + Weights: https://console.cloud.google.com/storage/browser/_details/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_strong1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz + Code: https://github.com/google-research/vision_transformer/blob/88a52f8892c80c10de99194990a517b4d80485fd/vit_jax/models.py#L208 diff --git a/configs/vision_transformer/vit-base-p16_32xb128-mae_in1k.py b/configs/vision_transformer/vit-base-p16_32xb128-mae_in1k.py new file mode 100644 index 0000000..a46bbb2 --- /dev/null +++ b/configs/vision_transformer/vit-base-p16_32xb128-mae_in1k.py @@ -0,0 +1,58 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py' +] + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='base', + img_size=224, + patch_size=16, + drop_path_rate=0.1), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) + +# dataset settings +train_dataloader = dict(batch_size=128) + +# schedule settings +optim_wrapper = dict( + optimizer=dict( + type='AdamW', + lr=1e-4 * 4096 / 256, + weight_decay=0.3, + eps=1e-8, + betas=(0.9, 0.95)), + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0) + })) + +# runtime settings +custom_hooks = [dict(type='EMAHook', momentum=1e-4)] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/configs/vision_transformer/vit-base-p16_4xb544-ipu_in1k.py b/configs/vision_transformer/vit-base-p16_4xb544-ipu_in1k.py new file mode 100644 index 0000000..d378b3b --- /dev/null +++ b/configs/vision_transformer/vit-base-p16_4xb544-ipu_in1k.py @@ -0,0 +1,114 @@ +_base_ = [ + '../_base_/models/vit-base-p16.py', + '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py', + '../_base_/default_runtime.py' +] + +# specific to vit pretrain +paramwise_cfg = dict(custom_keys={ + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0) +}) + +pretrained = 'https://download.openmmlab.com/mmclassification/v0/vit/pretrain/vit-base-p16_3rdparty_pt-64xb64_in1k-224_20210928-02284250.pth' # noqa + +model = dict( + head=dict( + loss=dict(type='CrossEntropyLoss', loss_weight=1.0, _delete_=True), ), + backbone=dict( + img_size=224, + init_cfg=dict( + type='Pretrained', + checkpoint=pretrained, + _delete_=True, + prefix='backbone'))) + +img_norm_cfg = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=224, backend='pillow'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='ToHalf', keys=['img']), + dict(type='Collect', keys=['img', 'gt_label']) +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='Resize', scale=(224, -1), keep_ratio=True, backend='pillow'), + dict(type='CenterCrop', crop_size=224), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToHalf', keys=['img']), + dict(type='Collect', keys=['img']) +] + +# change batch size +data = dict( + samples_per_gpu=17, + workers_per_gpu=16, + drop_last=True, + train=dict(pipeline=train_pipeline), + train_dataloader=dict(mode='async'), + val=dict(pipeline=test_pipeline, ), + val_dataloader=dict(samples_per_gpu=4, workers_per_gpu=1), + test=dict(pipeline=test_pipeline), + test_dataloader=dict(samples_per_gpu=4, workers_per_gpu=1)) + +# optimizer +optimizer = dict( + type='SGD', + lr=0.08, + weight_decay=1e-5, + momentum=0.9, + paramwise_cfg=paramwise_cfg, +) + +# learning policy +param_scheduler = [ + dict(type='LinearLR', start_factor=0.02, by_epoch=False, begin=0, end=800), + dict( + type='CosineAnnealingLR', + T_max=4200, + by_epoch=False, + begin=800, + end=5000) +] + +# ipu cfg +# model partition config +ipu_model_cfg = dict( + train_split_edges=[ + dict(layer_to_call='backbone.patch_embed', ipu_id=0), + dict(layer_to_call='backbone.layers.3', ipu_id=1), + dict(layer_to_call='backbone.layers.6', ipu_id=2), + dict(layer_to_call='backbone.layers.9', ipu_id=3) + ], + train_ckpt_nodes=['backbone.layers.{}'.format(i) for i in range(12)]) + +# device config +options_cfg = dict( + randomSeed=42, + partialsType='half', + train_cfg=dict( + executionStrategy='SameAsIpu', + Training=dict(gradientAccumulation=32), + availableMemoryProportion=[0.3, 0.3, 0.3, 0.3], + ), + eval_cfg=dict(deviceIterations=1, ), +) + +# add model partition config and device config to runner +runner = dict( + type='IterBasedRunner', + ipu_model_cfg=ipu_model_cfg, + options_cfg=options_cfg, + max_iters=5000) + +default_hooks = dict(checkpoint=dict(type='CheckpointHook', interval=1000)) + +fp16 = dict(loss_scale=256.0, velocity_accum_type='half', accum_type='half') diff --git a/configs/vision_transformer/vit-base-p16_64xb64_in1k-384px.py b/configs/vision_transformer/vit-base-p16_64xb64_in1k-384px.py new file mode 100644 index 0000000..e0f7458 --- /dev/null +++ b/configs/vision_transformer/vit-base-p16_64xb64_in1k-384px.py @@ -0,0 +1,38 @@ +_base_ = [ + '../_base_/models/vit-base-p16.py', + '../_base_/datasets/imagenet_bs64_pil_resize.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +# model setting +model = dict(backbone=dict(img_size=384)) + +# dataset setting +data_preprocessor = dict( + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=384, backend='pillow'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='ResizeEdge', scale=384, edge='short', backend='pillow'), + dict(type='CenterCrop', crop_size=384), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# schedule setting +optim_wrapper = dict(clip_grad=dict(max_norm=1.0)) diff --git a/configs/vision_transformer/vit-base-p16_64xb64_in1k.py b/configs/vision_transformer/vit-base-p16_64xb64_in1k.py new file mode 100644 index 0000000..07be0e9 --- /dev/null +++ b/configs/vision_transformer/vit-base-p16_64xb64_in1k.py @@ -0,0 +1,15 @@ +_base_ = [ + '../_base_/models/vit-base-p16.py', + '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +# model setting +model = dict( + head=dict(hidden_dim=3072), + train_cfg=dict(augments=dict(type='Mixup', alpha=0.2)), +) + +# schedule setting +optim_wrapper = dict(clip_grad=dict(max_norm=1.0)) diff --git a/configs/vision_transformer/vit-base-p16_8xb64-lora_in1k-384px.py b/configs/vision_transformer/vit-base-p16_8xb64-lora_in1k-384px.py new file mode 100644 index 0000000..ffe1018 --- /dev/null +++ b/configs/vision_transformer/vit-base-p16_8xb64-lora_in1k-384px.py @@ -0,0 +1,84 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_pil_resize.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +# model setting +model = dict( + type='ImageClassifier', + backbone=dict( + type='LoRAModel', + module=dict( + type='VisionTransformer', + arch='b', + img_size=384, + patch_size=16, + drop_rate=0.1, + init_cfg=dict(type='Pretrained', checkpoint='', + prefix='backbone')), + alpha=16, + rank=16, + drop_rate=0.1, + targets=[dict(type='qkv')]), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=1000, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, + mode='classy_vision'), + init_cfg=[dict(type='TruncNormal', layer='Linear', std=2e-5)], + )) + +# dataset setting +data_preprocessor = dict( + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=384, backend='pillow'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='ResizeEdge', scale=384, edge='short', backend='pillow'), + dict(type='CenterCrop', crop_size=384), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=5, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=45, + by_epoch=True, + begin=5, + end=50, + eta_min=1e-6, + convert_to_iter_based=True) +] + +train_cfg = dict(by_epoch=True, max_epochs=50) +default_hooks = dict( + # save checkpoint per epoch. + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +# schedule setting +optim_wrapper = dict(clip_grad=dict(max_norm=1.0)) diff --git a/configs/vision_transformer/vit-base-p32_64xb64_in1k-384px.py b/configs/vision_transformer/vit-base-p32_64xb64_in1k-384px.py new file mode 100644 index 0000000..e5a4d14 --- /dev/null +++ b/configs/vision_transformer/vit-base-p32_64xb64_in1k-384px.py @@ -0,0 +1,38 @@ +_base_ = [ + '../_base_/models/vit-base-p32.py', + '../_base_/datasets/imagenet_bs64_pil_resize.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +# model setting +model = dict(backbone=dict(img_size=384)) + +# dataset setting +data_preprocessor = dict( + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=384, backend='pillow'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='ResizeEdge', scale=384, edge='short', backend='pillow'), + dict(type='CenterCrop', crop_size=384), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# schedule setting +optim_wrapper = dict(clip_grad=dict(max_norm=1.0)) diff --git a/configs/vision_transformer/vit-base-p32_64xb64_in1k.py b/configs/vision_transformer/vit-base-p32_64xb64_in1k.py new file mode 100644 index 0000000..9cfc7c4 --- /dev/null +++ b/configs/vision_transformer/vit-base-p32_64xb64_in1k.py @@ -0,0 +1,15 @@ +_base_ = [ + '../_base_/models/vit-base-p32.py', + '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +# model setting +model = dict( + head=dict(hidden_dim=3072), + train_cfg=dict(augments=dict(type='Mixup', alpha=0.2)), +) + +# schedule setting +optim_wrapper = dict(clip_grad=dict(max_norm=1.0)) diff --git a/configs/vision_transformer/vit-large-p16_64xb64_in1k-384px.py b/configs/vision_transformer/vit-large-p16_64xb64_in1k-384px.py new file mode 100644 index 0000000..98e96ec --- /dev/null +++ b/configs/vision_transformer/vit-large-p16_64xb64_in1k-384px.py @@ -0,0 +1,38 @@ +_base_ = [ + '../_base_/models/vit-large-p16.py', + '../_base_/datasets/imagenet_bs64_pil_resize.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +# model setting +model = dict(backbone=dict(img_size=384)) + +# dataset setting +data_preprocessor = dict( + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=384, backend='pillow'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='ResizeEdge', scale=384, edge='short', backend='pillow'), + dict(type='CenterCrop', crop_size=384), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# schedule setting +optim_wrapper = dict(clip_grad=dict(max_norm=1.0)) diff --git a/configs/vision_transformer/vit-large-p16_64xb64_in1k.py b/configs/vision_transformer/vit-large-p16_64xb64_in1k.py new file mode 100644 index 0000000..0d9bd28 --- /dev/null +++ b/configs/vision_transformer/vit-large-p16_64xb64_in1k.py @@ -0,0 +1,15 @@ +_base_ = [ + '../_base_/models/vit-large-p16.py', + '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +# model setting +model = dict( + head=dict(hidden_dim=3072), + train_cfg=dict(augments=dict(type='Mixup', alpha=0.2)), +) + +# schedule setting +optim_wrapper = dict(clip_grad=dict(max_norm=1.0)) diff --git a/configs/vision_transformer/vit-large-p32_64xb64_in1k-384px.py b/configs/vision_transformer/vit-large-p32_64xb64_in1k-384px.py new file mode 100644 index 0000000..22320d1 --- /dev/null +++ b/configs/vision_transformer/vit-large-p32_64xb64_in1k-384px.py @@ -0,0 +1,38 @@ +_base_ = [ + '../_base_/models/vit-large-p32.py', + '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +# model setting +model = dict(backbone=dict(img_size=384)) + +# dataset setting +data_preprocessor = dict( + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=384, backend='pillow'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='ResizeEdge', scale=384, edge='short', backend='pillow'), + dict(type='CenterCrop', crop_size=384), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# schedule setting +optim_wrapper = dict(clip_grad=dict(max_norm=1.0)) diff --git a/configs/vision_transformer/vit-large-p32_64xb64_in1k.py b/configs/vision_transformer/vit-large-p32_64xb64_in1k.py new file mode 100644 index 0000000..61e1791 --- /dev/null +++ b/configs/vision_transformer/vit-large-p32_64xb64_in1k.py @@ -0,0 +1,15 @@ +_base_ = [ + '../_base_/models/vit-large-p32.py', + '../_base_/datasets/imagenet_bs64_pil_resize_autoaug.py', + '../_base_/schedules/imagenet_bs4096_AdamW.py', + '../_base_/default_runtime.py' +] + +# model setting +model = dict( + head=dict(hidden_dim=3072), + train_cfg=dict(augments=dict(type='Mixup', alpha=0.2)), +) + +# schedule setting +optim_wrapper = dict(clip_grad=dict(max_norm=1.0)) diff --git a/configs/wrn/README.md b/configs/wrn/README.md new file mode 100644 index 0000000..2753307 --- /dev/null +++ b/configs/wrn/README.md @@ -0,0 +1,76 @@ +# Wide-ResNet + +> [Wide Residual Networks](https://arxiv.org/abs/1605.07146) + + + +## Abstract + +Deep residual networks were shown to be able to scale up to thousands of layers and still have improving performance. However, each fraction of a percent of improved accuracy costs nearly doubling the number of layers, and so training very deep residual networks has a problem of diminishing feature reuse, which makes these networks very slow to train. To tackle these problems, in this paper we conduct a detailed experimental study on the architecture of ResNet blocks, based on which we propose a novel architecture where we decrease depth and increase width of residual networks. We call the resulting network structures wide residual networks (WRNs) and show that these are far superior over their commonly used thin and very deep counterparts. For example, we demonstrate that even a simple 16-layer-deep wide residual network outperforms in accuracy and efficiency all previous deep residual networks, including thousand-layer-deep networks, achieving new state-of-the-art results on CIFAR, SVHN, COCO, and significant improvements on ImageNet. + +
+ +
+ +## How to use it? + + + +**Predict image** + +```python +from mmpretrain import inference_model + +predict = inference_model('wide-resnet50_3rdparty_8xb32_in1k', 'demo/bird.JPEG') +print(predict['pred_class']) +print(predict['pred_score']) +``` + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('wide-resnet50_3rdparty_8xb32_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/wrn/wide-resnet50_8xb32_in1k.py https://download.openmmlab.com/mmclassification/v0/wrn/wide-resnet50_3rdparty_8xb32_in1k_20220304-66678344.pth +``` + + + +## Models and results + +### Image Classification on ImageNet-1k + +| Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download | +| :----------------------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :----------------------------------------: | :-----------------------------------------------------------------: | +| `wide-resnet50_3rdparty_8xb32_in1k`\* | From scratch | 68.88 | 11.44 | 78.48 | 94.08 | [config](wide-resnet50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/wrn/wide-resnet50_3rdparty_8xb32_in1k_20220304-66678344.pth) | +| `wide-resnet101_3rdparty_8xb32_in1k`\* | From scratch | 126.89 | 22.81 | 78.84 | 94.28 | [config](wide-resnet101_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/wrn/wide-resnet101_3rdparty_8xb32_in1k_20220304-8d5f9d61.pth) | +| `wide-resnet50_3rdparty-timm_8xb32_in1k`\* | From scratch | 68.88 | 11.44 | 81.45 | 95.53 | [config](wide-resnet50_timm_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/wrn/wide-resnet50_3rdparty-timm_8xb32_in1k_20220304-83ae4399.pth) | + +*Models with * are converted from the [timm](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnet.py). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@INPROCEEDINGS{Zagoruyko2016WRN, + author = {Sergey Zagoruyko and Nikos Komodakis}, + title = {Wide Residual Networks}, + booktitle = {BMVC}, + year = {2016}} +``` diff --git a/configs/wrn/metafile.yml b/configs/wrn/metafile.yml new file mode 100644 index 0000000..75e3467 --- /dev/null +++ b/configs/wrn/metafile.yml @@ -0,0 +1,77 @@ +Collections: + - Name: Wide-ResNet + Metadata: + Training Data: ImageNet-1k + Training Techniques: + - SGD with Momentum + - Weight Decay + Training Resources: 8x V100 GPUs + Epochs: 100 + Batch Size: 256 + Architecture: + - 1x1 Convolution + - Batch Normalization + - Convolution + - Global Average Pooling + - Max Pooling + - ReLU + - Residual Connection + - Softmax + - Wide Residual Block + Paper: + URL: https://arxiv.org/abs/1605.07146 + Title: "Wide Residual Networks" + README: configs/wrn/README.md + Code: + URL: https://github.com/open-mmlab/mmpretrain/blob/v0.20.1/mmcls/models/backbones/resnet.py#L383 + Version: v0.20.1 + +Models: + - Name: wide-resnet50_3rdparty_8xb32_in1k + Metadata: + FLOPs: 11440000000 # 11.44G + Parameters: 68880000 # 68.88M + In Collection: Wide-ResNet + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.48 + Top 5 Accuracy: 94.08 + Weights: https://download.openmmlab.com/mmclassification/v0/wrn/wide-resnet50_3rdparty_8xb32_in1k_20220304-66678344.pth + Config: configs/wrn/wide-resnet50_8xb32_in1k.py + Converted From: + Weights: https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth + Code: https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py + - Name: wide-resnet101_3rdparty_8xb32_in1k + Metadata: + FLOPs: 22810000000 # 22.81G + Parameters: 126890000 # 126.89M + In Collection: Wide-ResNet + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.84 + Top 5 Accuracy: 94.28 + Weights: https://download.openmmlab.com/mmclassification/v0/wrn/wide-resnet101_3rdparty_8xb32_in1k_20220304-8d5f9d61.pth + Config: configs/wrn/wide-resnet101_8xb32_in1k.py + Converted From: + Weights: https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth + Code: https://github.com/pytorch/vision/blob/main/torchvision/models/resnet.py + - Name: wide-resnet50_3rdparty-timm_8xb32_in1k + Metadata: + FLOPs: 11440000000 # 11.44G + Parameters: 68880000 # 68.88M + In Collection: Wide-ResNet + Results: + - Task: Image Classification + Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.45 + Top 5 Accuracy: 95.53 + Weights: https://download.openmmlab.com/mmclassification/v0/wrn/wide-resnet50_3rdparty-timm_8xb32_in1k_20220304-83ae4399.pth + Config: configs/wrn/wide-resnet50_timm_8xb32_in1k.py + Converted From: + Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/wide_resnet50_racm-8234f177.pth + Code: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/resnet.py diff --git a/configs/wrn/wide-resnet101_8xb32_in1k.py b/configs/wrn/wide-resnet101_8xb32_in1k.py new file mode 100644 index 0000000..d1bf5e5 --- /dev/null +++ b/configs/wrn/wide-resnet101_8xb32_in1k.py @@ -0,0 +1,7 @@ +_base_ = [ + '../_base_/models/wide-resnet50.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] + +model = dict(backbone=dict(depth=101)) diff --git a/configs/wrn/wide-resnet50_8xb32_in1k.py b/configs/wrn/wide-resnet50_8xb32_in1k.py new file mode 100644 index 0000000..edf6a05 --- /dev/null +++ b/configs/wrn/wide-resnet50_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/wide-resnet50.py', + '../_base_/datasets/imagenet_bs32_pil_resize.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/wrn/wide-resnet50_timm_8xb32_in1k.py b/configs/wrn/wide-resnet50_timm_8xb32_in1k.py new file mode 100644 index 0000000..8dca8f3 --- /dev/null +++ b/configs/wrn/wide-resnet50_timm_8xb32_in1k.py @@ -0,0 +1,5 @@ +_base_ = [ + '../_base_/models/wide-resnet50.py', + '../_base_/datasets/imagenet_bs32_pil_bicubic.py', + '../_base_/schedules/imagenet_bs256.py', '../_base_/default_runtime.py' +] diff --git a/configs/xcit/README.md b/configs/xcit/README.md new file mode 100644 index 0000000..ab2cd7a --- /dev/null +++ b/configs/xcit/README.md @@ -0,0 +1,106 @@ +# XCiT + +> [XCiT: Cross-Covariance Image Transformers](https://arxiv.org/abs/2106.09681) + + + +## Abstract + +Following their success in natural language processing, transformers have recently shown much promise for computer vision. The self-attention operation underlying transformers yields global interactions between all tokens ,i.e. words or image patches, and enables flexible modelling of image data beyond the local interactions of convolutions. This flexibility, however, comes with a quadratic complexity in time and memory, hindering application to long sequences and high-resolution images. We propose a "transposed" version of self-attention that operates across feature channels rather than tokens, where the interactions are based on the cross-covariance matrix between keys and queries. The resulting cross-covariance attention (XCA) has linear complexity in the number of tokens, and allows efficient processing of high-resolution images. Our cross-covariance image transformer (XCiT) is built upon XCA. It combines the accuracy of conventional transformers with the scalability of convolutional architectures. We validate the effectiveness and generality of XCiT by reporting excellent results on multiple vision benchmarks, including image classification and self-supervised feature learning on ImageNet-1k, object detection and instance segmentation on COCO, and semantic segmentation on ADE20k. + +
+ +
+ +## How to use it? + + + +**Use the model** + +```python +import torch +from mmpretrain import get_model + +model = get_model('xcit-nano-12-p16_3rdparty_in1k', pretrained=True) +inputs = torch.rand(1, 3, 224, 224) +out = model(inputs) +print(type(out)) +# To extract features. +feats = model.extract_feat(inputs) +print(type(feats)) +``` + +**Test Command** + +Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset). + +Test: + +```shell +python tools/test.py configs/xcit/xcit-nano-12-p16_8xb128_in1k.py https://download.openmmlab.com/mmclassification/v0/xcit/xcit-nano-12-p16_3rdparty_in1k_20230213-ed776c38.pth +``` + + + +## Models and results + +### Pretrained models + +| Model | Params (M) | Flops (G) | Config | Download | +| :---------------------------------------------- | :--------: | :-------: | :-----------------------------------------------: | :-----------------------------------------------------------------------------------: | +| `xcit-nano-12-p16_3rdparty_in1k`\* | 3.05 | 0.56 | [config](xcit-nano-12-p16_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-nano-12-p16_3rdparty_in1k_20230213-ed776c38.pth) | +| `xcit-nano-12-p16_3rdparty-dist_in1k`\* | 3.05 | 0.56 | [config](xcit-nano-12-p16_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-nano-12-p16_3rdparty-dist_in1k_20230213-fb247f7b.pth) | +| `xcit-tiny-12-p16_3rdparty_in1k`\* | 6.72 | 1.24 | [config](xcit-tiny-12-p16_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-tiny-12-p16_3rdparty_in1k_20230213-82c547ca.pth) | +| `xcit-tiny-12-p16_3rdparty-dist_in1k`\* | 6.72 | 1.24 | [config](xcit-tiny-12-p16_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-tiny-12-p16_3rdparty-dist_in1k_20230213-d5fde0a3.pth) | +| `xcit-nano-12-p16_3rdparty-dist_in1k-384px`\* | 3.05 | 1.64 | [config](xcit-nano-12-p16_8xb128_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-nano-12-p16_3rdparty-dist_in1k-384px_20230213-712db4d4.pth) | +| `xcit-nano-12-p8_3rdparty_in1k`\* | 3.05 | 2.16 | [config](xcit-nano-12-p8_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-nano-12-p8_3rdparty_in1k_20230213-3370c293.pth) | +| `xcit-nano-12-p8_3rdparty-dist_in1k`\* | 3.05 | 2.16 | [config](xcit-nano-12-p8_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-nano-12-p8_3rdparty-dist_in1k_20230213-2f87d2b3.pth) | +| `xcit-tiny-24-p16_3rdparty_in1k`\* | 12.12 | 2.34 | [config](xcit-tiny-24-p16_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-tiny-24-p16_3rdparty_in1k_20230213-366c1cd0.pth) | +| `xcit-tiny-24-p16_3rdparty-dist_in1k`\* | 12.12 | 2.34 | [config](xcit-tiny-24-p16_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-tiny-24-p16_3rdparty-dist_in1k_20230213-b472e80a.pth) | +| `xcit-tiny-12-p16_3rdparty-dist_in1k-384px`\* | 6.72 | 3.64 | [config](xcit-tiny-12-p16_8xb128_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-tiny-12-p16_3rdparty-dist_in1k-384px_20230213-00a20023.pth) | +| `xcit-tiny-12-p8_3rdparty_in1k`\* | 6.71 | 4.81 | [config](xcit-tiny-12-p8_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-tiny-12-p8_3rdparty_in1k_20230213-8b02f8f5.pth) | +| `xcit-tiny-12-p8_3rdparty-dist_in1k`\* | 6.71 | 4.81 | [config](xcit-tiny-12-p8_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-tiny-12-p8_3rdparty-dist_in1k_20230213-f3f9b44f.pth) | +| `xcit-small-12-p16_3rdparty_in1k`\* | 26.25 | 4.81 | [config](xcit-small-12-p16_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-small-12-p16_3rdparty_in1k_20230213-d36779d2.pth) | +| `xcit-small-12-p16_3rdparty-dist_in1k`\* | 26.25 | 4.81 | [config](xcit-small-12-p16_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-small-12-p16_3rdparty-dist_in1k_20230213-c95bbae1.pth) | +| `xcit-nano-12-p8_3rdparty-dist_in1k-384px`\* | 3.05 | 6.34 | [config](xcit-nano-12-p8_8xb128_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-nano-12-p8_3rdparty-dist_in1k-384px_20230213-09d925ef.pth) | +| `xcit-tiny-24-p16_3rdparty-dist_in1k-384px`\* | 12.12 | 6.87 | [config](xcit-tiny-24-p16_8xb128_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-tiny-24-p16_3rdparty-dist_in1k-384px_20230213-20e13917.pth) | +| `xcit-small-24-p16_3rdparty_in1k`\* | 47.67 | 9.10 | [config](xcit-small-24-p16_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-small-24-p16_3rdparty_in1k_20230213-40febe38.pth) | +| `xcit-small-24-p16_3rdparty-dist_in1k`\* | 47.67 | 9.10 | [config](xcit-small-24-p16_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-small-24-p16_3rdparty-dist_in1k_20230213-130d7262.pth) | +| `xcit-tiny-24-p8_3rdparty_in1k`\* | 12.11 | 9.21 | [config](xcit-tiny-24-p8_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-tiny-24-p8_3rdparty_in1k_20230213-4b9ba392.pth) | +| `xcit-tiny-24-p8_3rdparty-dist_in1k`\* | 12.11 | 9.21 | [config](xcit-tiny-24-p8_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-tiny-24-p8_3rdparty-dist_in1k_20230213-ad9c44b0.pth) | +| `xcit-tiny-12-p8_3rdparty-dist_in1k-384px`\* | 6.71 | 14.13 | [config](xcit-tiny-12-p8_8xb128_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-tiny-12-p8_3rdparty-dist_in1k-384px_20230213-a072174a.pth) | +| `xcit-small-12-p16_3rdparty-dist_in1k-384px`\* | 26.25 | 14.14 | [config](xcit-small-12-p16_8xb128_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-small-12-p16_3rdparty-dist_in1k-384px_20230213-ba36c982.pth) | +| `xcit-medium-24-p16_3rdparty_in1k`\* | 84.40 | 16.13 | [config](xcit-medium-24-p16_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-medium-24-p16_3rdparty_in1k_20230213-ad0aa92e.pth) | +| `xcit-medium-24-p16_3rdparty-dist_in1k`\* | 84.40 | 16.13 | [config](xcit-medium-24-p16_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-medium-24-p16_3rdparty-dist_in1k_20230213-aca5cd0c.pth) | +| `xcit-small-12-p8_3rdparty_in1k`\* | 26.21 | 18.69 | [config](xcit-small-12-p8_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-small-12-p8_3rdparty_in1k_20230213-9e364ce3.pth) | +| `xcit-small-12-p8_3rdparty-dist_in1k`\* | 26.21 | 18.69 | [config](xcit-small-12-p8_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-small-12-p8_3rdparty-dist_in1k_20230213-71886580.pth) | +| `xcit-small-24-p16_3rdparty-dist_in1k-384px`\* | 47.67 | 26.72 | [config](xcit-small-24-p16_8xb128_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-small-24-p16_3rdparty-dist_in1k-384px_20230213-28fa2d0e.pth) | +| `xcit-tiny-24-p8_3rdparty-dist_in1k-384px`\* | 12.11 | 27.05 | [config](xcit-tiny-24-p8_8xb128_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-tiny-24-p8_3rdparty-dist_in1k-384px_20230213-30d5e5ec.pth) | +| `xcit-small-24-p8_3rdparty_in1k`\* | 47.63 | 35.81 | [config](xcit-small-24-p8_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-small-24-p8_3rdparty_in1k_20230213-280ebcc7.pth) | +| `xcit-small-24-p8_3rdparty-dist_in1k`\* | 47.63 | 35.81 | [config](xcit-small-24-p8_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-small-24-p8_3rdparty-dist_in1k_20230213-f2773c78.pth) | +| `xcit-large-24-p16_3rdparty_in1k`\* | 189.10 | 35.86 | [config](xcit-large-24-p16_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-large-24-p16_3rdparty_in1k_20230214-d29d2529.pth) | +| `xcit-large-24-p16_3rdparty-dist_in1k`\* | 189.10 | 35.86 | [config](xcit-large-24-p16_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-large-24-p16_3rdparty-dist_in1k_20230214-4fea599c.pth) | +| `xcit-medium-24-p16_3rdparty-dist_in1k-384px`\* | 84.40 | 47.39 | [config](xcit-medium-24-p16_8xb128_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-medium-24-p16_3rdparty-dist_in1k-384px_20230214-6c23a201.pth) | +| `xcit-small-12-p8_3rdparty-dist_in1k-384px`\* | 26.21 | 54.92 | [config](xcit-small-12-p8_8xb128_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-small-12-p8_3rdparty-dist_in1k-384px_20230214-9f2178bc.pth) | +| `xcit-medium-24-p8_3rdparty_in1k`\* | 84.32 | 63.52 | [config](xcit-medium-24-p8_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-medium-24-p8_3rdparty_in1k_20230214-c362850b.pth) | +| `xcit-medium-24-p8_3rdparty-dist_in1k`\* | 84.32 | 63.52 | [config](xcit-medium-24-p8_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-medium-24-p8_3rdparty-dist_in1k_20230214-625c953b.pth) | +| `xcit-small-24-p8_3rdparty-dist_in1k-384px`\* | 47.63 | 105.24 | [config](xcit-small-24-p8_8xb128_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-small-24-p8_3rdparty-dist_in1k-384px_20230214-57298eca.pth) | +| `xcit-large-24-p16_3rdparty-dist_in1k-384px`\* | 189.10 | 105.35 | [config](xcit-large-24-p16_8xb128_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-large-24-p16_3rdparty-dist_in1k-384px_20230214-bd515a34.pth) | +| `xcit-large-24-p8_3rdparty_in1k`\* | 188.93 | 141.23 | [config](xcit-large-24-p8_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-large-24-p8_3rdparty_in1k_20230214-08f2f664.pth) | +| `xcit-large-24-p8_3rdparty-dist_in1k`\* | 188.93 | 141.23 | [config](xcit-large-24-p8_8xb128_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-large-24-p8_3rdparty-dist_in1k_20230214-8c092b34.pth) | +| `xcit-medium-24-p8_3rdparty-dist_in1k-384px`\* | 84.32 | 186.67 | [config](xcit-medium-24-p8_8xb128_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-medium-24-p8_3rdparty-dist_in1k-384px_20230214-5db925e0.pth) | +| `xcit-large-24-p8_3rdparty-dist_in1k-384px`\* | 188.93 | 415.00 | [config](xcit-large-24-p8_8xb128_in1k-384px.py) | [model](https://download.openmmlab.com/mmclassification/v0/xcit/xcit-large-24-p8_3rdparty-dist_in1k-384px_20230214-9f718b1a.pth) | + +*Models with * are converted from the [official repo](https://github.com/facebookresearch/xcit). The config files of these models are only for inference. We haven't reproduce the training results.* + +## Citation + +```bibtex +@article{el2021xcit, + title={XCiT: Cross-Covariance Image Transformers}, + author={El-Nouby, Alaaeldin and Touvron, Hugo and Caron, Mathilde and Bojanowski, Piotr and Douze, Matthijs and Joulin, Armand and Laptev, Ivan and Neverova, Natalia and Synnaeve, Gabriel and Verbeek, Jakob and others}, + journal={arXiv preprint arXiv:2106.09681}, + year={2021} +} +``` diff --git a/configs/xcit/metafile.yml b/configs/xcit/metafile.yml new file mode 100644 index 0000000..8379da1 --- /dev/null +++ b/configs/xcit/metafile.yml @@ -0,0 +1,727 @@ +Collections: + - Name: XCiT + Metadata: + Architecture: + - Class Attention + - Local Patch Interaction + - Cross-Covariance Attention + Paper: + Title: 'XCiT: Cross-Covariance Image Transformers' + URL: https://arxiv.org/abs/2106.09681 + README: configs/xcit/README.md + +Models: + - Name: xcit-nano-12-p16_3rdparty_in1k + Metadata: + FLOPs: 557074560 + Parameters: 3053224 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 70.35 + Top 5 Accuracy: 89.98 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-nano-12-p16_3rdparty_in1k_20230213-ed776c38.pth + Config: configs/xcit/xcit-nano-12-p16_8xb128_in1k.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_224.pth + - Name: xcit-nano-12-p16_3rdparty-dist_in1k + Metadata: + FLOPs: 557074560 + Parameters: 3053224 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 72.36 + Top 5 Accuracy: 91.02 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-nano-12-p16_3rdparty-dist_in1k_20230213-fb247f7b.pth + Config: configs/xcit/xcit-nano-12-p16_8xb128_in1k.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_224_dist.pth + - Name: xcit-tiny-12-p16_3rdparty_in1k + Metadata: + FLOPs: 1239698112 + Parameters: 6716272 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 77.21 + Top 5 Accuracy: 93.62 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-tiny-12-p16_3rdparty_in1k_20230213-82c547ca.pth + Config: configs/xcit/xcit-tiny-12-p16_8xb128_in1k.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_224.pth + - Name: xcit-tiny-12-p16_3rdparty-dist_in1k + Metadata: + FLOPs: 1239698112 + Parameters: 6716272 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 78.7 + Top 5 Accuracy: 94.12 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-tiny-12-p16_3rdparty-dist_in1k_20230213-d5fde0a3.pth + Config: configs/xcit/xcit-tiny-12-p16_8xb128_in1k.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_224_dist.pth + - Name: xcit-nano-12-p16_3rdparty-dist_in1k-384px + Metadata: + FLOPs: 1636347520 + Parameters: 3053224 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 74.93 + Top 5 Accuracy: 92.42 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-nano-12-p16_3rdparty-dist_in1k-384px_20230213-712db4d4.pth + Config: configs/xcit/xcit-nano-12-p16_8xb128_in1k-384px.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p16_384_dist.pth + - Name: xcit-nano-12-p8_3rdparty_in1k + Metadata: + FLOPs: 2156861056 + Parameters: 3049016 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 73.8 + Top 5 Accuracy: 92.08 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-nano-12-p8_3rdparty_in1k_20230213-3370c293.pth + Config: configs/xcit/xcit-nano-12-p8_8xb128_in1k.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_224.pth + - Name: xcit-nano-12-p8_3rdparty-dist_in1k + Metadata: + FLOPs: 2156861056 + Parameters: 3049016 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 76.17 + Top 5 Accuracy: 93.08 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-nano-12-p8_3rdparty-dist_in1k_20230213-2f87d2b3.pth + Config: configs/xcit/xcit-nano-12-p8_8xb128_in1k.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_224_dist.pth + - Name: xcit-tiny-24-p16_3rdparty_in1k + Metadata: + FLOPs: 2339305152 + Parameters: 12116896 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.47 + Top 5 Accuracy: 94.85 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-tiny-24-p16_3rdparty_in1k_20230213-366c1cd0.pth + Config: configs/xcit/xcit-tiny-24-p16_8xb128_in1k.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_224.pth + - Name: xcit-tiny-24-p16_3rdparty-dist_in1k + Metadata: + FLOPs: 2339305152 + Parameters: 12116896 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 80.51 + Top 5 Accuracy: 95.17 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-tiny-24-p16_3rdparty-dist_in1k_20230213-b472e80a.pth + Config: configs/xcit/xcit-tiny-24-p16_8xb128_in1k.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_224_dist.pth + - Name: xcit-tiny-12-p16_3rdparty-dist_in1k-384px + Metadata: + FLOPs: 3641468352 + Parameters: 6716272 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 80.58 + Top 5 Accuracy: 95.38 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-tiny-12-p16_3rdparty-dist_in1k-384px_20230213-00a20023.pth + Config: configs/xcit/xcit-tiny-12-p16_8xb128_in1k-384px.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p16_384_dist.pth + - Name: xcit-tiny-12-p8_3rdparty_in1k + Metadata: + FLOPs: 4807399872 + Parameters: 6706504 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 79.75 + Top 5 Accuracy: 94.88 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-tiny-12-p8_3rdparty_in1k_20230213-8b02f8f5.pth + Config: configs/xcit/xcit-tiny-12-p8_8xb128_in1k.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_224.pth + - Name: xcit-tiny-12-p8_3rdparty-dist_in1k + Metadata: + FLOPs: 4807399872 + Parameters: 6706504 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.26 + Top 5 Accuracy: 95.46 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-tiny-12-p8_3rdparty-dist_in1k_20230213-f3f9b44f.pth + Config: configs/xcit/xcit-tiny-12-p8_8xb128_in1k.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_224_dist.pth + - Name: xcit-small-12-p16_3rdparty_in1k + Metadata: + FLOPs: 4814951808 + Parameters: 26253304 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.87 + Top 5 Accuracy: 95.77 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-small-12-p16_3rdparty_in1k_20230213-d36779d2.pth + Config: configs/xcit/xcit-small-12-p16_8xb128_in1k.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_224.pth + - Name: xcit-small-12-p16_3rdparty-dist_in1k + Metadata: + FLOPs: 4814951808 + Parameters: 26253304 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.12 + Top 5 Accuracy: 96.41 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-small-12-p16_3rdparty-dist_in1k_20230213-c95bbae1.pth + Config: configs/xcit/xcit-small-12-p16_8xb128_in1k.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_224_dist.pth + - Name: xcit-nano-12-p8_3rdparty-dist_in1k-384px + Metadata: + FLOPs: 6337760896 + Parameters: 3049016 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 77.69 + Top 5 Accuracy: 94.09 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-nano-12-p8_3rdparty-dist_in1k-384px_20230213-09d925ef.pth + Config: configs/xcit/xcit-nano-12-p8_8xb128_in1k-384px.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_nano_12_p8_384_dist.pth + - Name: xcit-tiny-24-p16_3rdparty-dist_in1k-384px + Metadata: + FLOPs: 6872966592 + Parameters: 12116896 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.43 + Top 5 Accuracy: 96.2 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-tiny-24-p16_3rdparty-dist_in1k-384px_20230213-20e13917.pth + Config: configs/xcit/xcit-tiny-24-p16_8xb128_in1k-384px.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p16_384_dist.pth + - Name: xcit-small-24-p16_3rdparty_in1k + Metadata: + FLOPs: 9095064960 + Parameters: 47671384 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.38 + Top 5 Accuracy: 95.93 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-small-24-p16_3rdparty_in1k_20230213-40febe38.pth + Config: configs/xcit/xcit-small-24-p16_8xb128_in1k.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_224.pth + - Name: xcit-small-24-p16_3rdparty-dist_in1k + Metadata: + FLOPs: 9095064960 + Parameters: 47671384 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.7 + Top 5 Accuracy: 96.61 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-small-24-p16_3rdparty-dist_in1k_20230213-130d7262.pth + Config: configs/xcit/xcit-small-24-p16_8xb128_in1k.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_224_dist.pth + - Name: xcit-tiny-24-p8_3rdparty_in1k + Metadata: + FLOPs: 9205828032 + Parameters: 12107128 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 81.7 + Top 5 Accuracy: 95.9 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-tiny-24-p8_3rdparty_in1k_20230213-4b9ba392.pth + Config: configs/xcit/xcit-tiny-24-p8_8xb128_in1k.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_224.pth + - Name: xcit-tiny-24-p8_3rdparty-dist_in1k + Metadata: + FLOPs: 9205828032 + Parameters: 12107128 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.62 + Top 5 Accuracy: 96.16 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-tiny-24-p8_3rdparty-dist_in1k_20230213-ad9c44b0.pth + Config: configs/xcit/xcit-tiny-24-p8_8xb128_in1k.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_224_dist.pth + - Name: xcit-tiny-12-p8_3rdparty-dist_in1k-384px + Metadata: + FLOPs: 14126142912 + Parameters: 6706504 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.46 + Top 5 Accuracy: 96.22 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-tiny-12-p8_3rdparty-dist_in1k-384px_20230213-a072174a.pth + Config: configs/xcit/xcit-tiny-12-p8_8xb128_in1k-384px.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_tiny_12_p8_384_dist.pth + - Name: xcit-small-12-p16_3rdparty-dist_in1k-384px + Metadata: + FLOPs: 14143179648 + Parameters: 26253304 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.74 + Top 5 Accuracy: 97.19 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-small-12-p16_3rdparty-dist_in1k-384px_20230213-ba36c982.pth + Config: configs/xcit/xcit-small-12-p16_8xb128_in1k-384px.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p16_384_dist.pth + - Name: xcit-medium-24-p16_3rdparty_in1k + Metadata: + FLOPs: 16129561088 + Parameters: 84395752 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.56 + Top 5 Accuracy: 95.82 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-medium-24-p16_3rdparty_in1k_20230213-ad0aa92e.pth + Config: configs/xcit/xcit-medium-24-p16_8xb128_in1k.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_224.pth + - Name: xcit-medium-24-p16_3rdparty-dist_in1k + Metadata: + FLOPs: 16129561088 + Parameters: 84395752 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.15 + Top 5 Accuracy: 96.82 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-medium-24-p16_3rdparty-dist_in1k_20230213-aca5cd0c.pth + Config: configs/xcit/xcit-medium-24-p16_8xb128_in1k.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_224_dist.pth + - Name: xcit-small-12-p8_3rdparty_in1k + Metadata: + FLOPs: 18691601280 + Parameters: 26213032 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.21 + Top 5 Accuracy: 96.41 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-small-12-p8_3rdparty_in1k_20230213-9e364ce3.pth + Config: configs/xcit/xcit-small-12-p8_8xb128_in1k.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_224.pth + - Name: xcit-small-12-p8_3rdparty-dist_in1k + Metadata: + FLOPs: 18691601280 + Parameters: 26213032 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.97 + Top 5 Accuracy: 96.81 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-small-12-p8_3rdparty-dist_in1k_20230213-71886580.pth + Config: configs/xcit/xcit-small-12-p8_8xb128_in1k.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_224_dist.pth + - Name: xcit-small-24-p16_3rdparty-dist_in1k-384px + Metadata: + FLOPs: 26721471360 + Parameters: 47671384 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.1 + Top 5 Accuracy: 97.32 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-small-24-p16_3rdparty-dist_in1k-384px_20230213-28fa2d0e.pth + Config: configs/xcit/xcit-small-24-p16_8xb128_in1k-384px.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p16_384_dist.pth + - Name: xcit-tiny-24-p8_3rdparty-dist_in1k-384px + Metadata: + FLOPs: 27052135872 + Parameters: 12107128 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.77 + Top 5 Accuracy: 96.72 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-tiny-24-p8_3rdparty-dist_in1k-384px_20230213-30d5e5ec.pth + Config: configs/xcit/xcit-tiny-24-p8_8xb128_in1k-384px.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_tiny_24_p8_384_dist.pth + - Name: xcit-small-24-p8_3rdparty_in1k + Metadata: + FLOPs: 35812053888 + Parameters: 47631112 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.62 + Top 5 Accuracy: 96.51 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-small-24-p8_3rdparty_in1k_20230213-280ebcc7.pth + Config: configs/xcit/xcit-small-24-p8_8xb128_in1k.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_224.pth + - Name: xcit-small-24-p8_3rdparty-dist_in1k + Metadata: + FLOPs: 35812053888 + Parameters: 47631112 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.68 + Top 5 Accuracy: 97.07 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-small-24-p8_3rdparty-dist_in1k_20230213-f2773c78.pth + Config: configs/xcit/xcit-small-24-p8_8xb128_in1k.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_224_dist.pth + - Name: xcit-large-24-p16_3rdparty_in1k + Metadata: + FLOPs: 35855948544 + Parameters: 189096136 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 82.97 + Top 5 Accuracy: 95.86 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-large-24-p16_3rdparty_in1k_20230214-d29d2529.pth + Config: configs/xcit/xcit-large-24-p16_8xb128_in1k.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_224.pth + - Name: xcit-large-24-p16_3rdparty-dist_in1k + Metadata: + FLOPs: 35855948544 + Parameters: 189096136 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.61 + Top 5 Accuracy: 97.07 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-large-24-p16_3rdparty-dist_in1k_20230214-4fea599c.pth + Config: configs/xcit/xcit-large-24-p16_8xb128_in1k.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_224_dist.pth + - Name: xcit-medium-24-p16_3rdparty-dist_in1k-384px + Metadata: + FLOPs: 47388932608 + Parameters: 84395752 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.47 + Top 5 Accuracy: 97.49 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-medium-24-p16_3rdparty-dist_in1k-384px_20230214-6c23a201.pth + Config: configs/xcit/xcit-medium-24-p16_8xb128_in1k-384px.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p16_384_dist.pth + - Name: xcit-small-12-p8_3rdparty-dist_in1k-384px + Metadata: + FLOPs: 54923537280 + Parameters: 26213032 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.12 + Top 5 Accuracy: 97.31 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-small-12-p8_3rdparty-dist_in1k-384px_20230214-9f2178bc.pth + Config: configs/xcit/xcit-small-12-p8_8xb128_in1k-384px.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_small_12_p8_384_dist.pth + - Name: xcit-medium-24-p8_3rdparty_in1k + Metadata: + FLOPs: 63524706816 + Parameters: 84323624 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 83.61 + Top 5 Accuracy: 96.23 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-medium-24-p8_3rdparty_in1k_20230214-c362850b.pth + Config: configs/xcit/xcit-medium-24-p8_8xb128_in1k.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_224.pth + - Name: xcit-medium-24-p8_3rdparty-dist_in1k + Metadata: + FLOPs: 63524706816 + Parameters: 84323624 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.0 + Top 5 Accuracy: 97.16 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-medium-24-p8_3rdparty-dist_in1k_20230214-625c953b.pth + Config: configs/xcit/xcit-medium-24-p8_8xb128_in1k.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_224_dist.pth + - Name: xcit-small-24-p8_3rdparty-dist_in1k-384px + Metadata: + FLOPs: 105236704128 + Parameters: 47631112 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.57 + Top 5 Accuracy: 97.6 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-small-24-p8_3rdparty-dist_in1k-384px_20230214-57298eca.pth + Config: configs/xcit/xcit-small-24-p8_8xb128_in1k-384px.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_small_24_p8_384_dist.pth + - Name: xcit-large-24-p16_3rdparty-dist_in1k-384px + Metadata: + FLOPs: 105345095424 + Parameters: 189096136 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.78 + Top 5 Accuracy: 97.6 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-large-24-p16_3rdparty-dist_in1k-384px_20230214-bd515a34.pth + Config: configs/xcit/xcit-large-24-p16_8xb128_in1k-384px.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p16_384_dist.pth + - Name: xcit-large-24-p8_3rdparty_in1k + Metadata: + FLOPs: 141225699072 + Parameters: 188932648 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 84.23 + Top 5 Accuracy: 96.58 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-large-24-p8_3rdparty_in1k_20230214-08f2f664.pth + Config: configs/xcit/xcit-large-24-p8_8xb128_in1k.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_224.pth + - Name: xcit-large-24-p8_3rdparty-dist_in1k + Metadata: + FLOPs: 141225699072 + Parameters: 188932648 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.14 + Top 5 Accuracy: 97.32 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-large-24-p8_3rdparty-dist_in1k_20230214-8c092b34.pth + Config: configs/xcit/xcit-large-24-p8_8xb128_in1k.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_224_dist.pth + - Name: xcit-medium-24-p8_3rdparty-dist_in1k-384px + Metadata: + FLOPs: 186672626176 + Parameters: 84323624 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 85.87 + Top 5 Accuracy: 97.61 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-medium-24-p8_3rdparty-dist_in1k-384px_20230214-5db925e0.pth + Config: configs/xcit/xcit-medium-24-p8_8xb128_in1k-384px.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_medium_24_p8_384_dist.pth + - Name: xcit-large-24-p8_3rdparty-dist_in1k-384px + Metadata: + FLOPs: 415003137792 + Parameters: 188932648 + Training Data: ImageNet-1k + In Collection: XCiT + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 86.13 + Top 5 Accuracy: 97.75 + Task: Image Classification + Weights: https://download.openmmlab.com/mmclassification/v0/xcit/xcit-large-24-p8_3rdparty-dist_in1k-384px_20230214-9f718b1a.pth + Config: configs/xcit/xcit-large-24-p8_8xb128_in1k-384px.py + Converted From: + Code: https://github.com/facebookresearch/xcit + Weights: https://dl.fbaipublicfiles.com/xcit/xcit_large_24_p8_384_dist.pth diff --git a/configs/xcit/xcit-large-24-p16_8xb128_in1k-384px.py b/configs/xcit/xcit-large-24-p16_8xb128_in1k-384px.py new file mode 100644 index 0000000..b393c4a --- /dev/null +++ b/configs/xcit/xcit-large-24-p16_8xb128_in1k-384px.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='XCiT', + patch_size=16, + embed_dims=768, + depth=24, + num_heads=16, + mlp_ratio=4, + qkv_bias=True, + layer_scale_init_value=1e-5, + tokens_norm=True, + out_type='cls_token', + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) + +# dataset settings +train_dataloader = dict(batch_size=128) diff --git a/configs/xcit/xcit-large-24-p16_8xb128_in1k.py b/configs/xcit/xcit-large-24-p16_8xb128_in1k.py new file mode 100644 index 0000000..b5c01cb --- /dev/null +++ b/configs/xcit/xcit-large-24-p16_8xb128_in1k.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='XCiT', + patch_size=16, + embed_dims=768, + depth=24, + num_heads=16, + mlp_ratio=4, + qkv_bias=True, + layer_scale_init_value=1e-5, + tokens_norm=True, + out_type='cls_token', + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) + +# dataset settings +train_dataloader = dict(batch_size=128) diff --git a/configs/xcit/xcit-large-24-p8_8xb128_in1k-384px.py b/configs/xcit/xcit-large-24-p8_8xb128_in1k-384px.py new file mode 100644 index 0000000..46b8422 --- /dev/null +++ b/configs/xcit/xcit-large-24-p8_8xb128_in1k-384px.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='XCiT', + patch_size=8, + embed_dims=768, + depth=24, + num_heads=16, + mlp_ratio=4, + qkv_bias=True, + layer_scale_init_value=1e-5, + tokens_norm=True, + out_type='cls_token', + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) + +# dataset settings +train_dataloader = dict(batch_size=128) diff --git a/configs/xcit/xcit-large-24-p8_8xb128_in1k.py b/configs/xcit/xcit-large-24-p8_8xb128_in1k.py new file mode 100644 index 0000000..6dc67ba --- /dev/null +++ b/configs/xcit/xcit-large-24-p8_8xb128_in1k.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='XCiT', + patch_size=8, + embed_dims=768, + depth=24, + num_heads=16, + mlp_ratio=4, + qkv_bias=True, + layer_scale_init_value=1e-5, + tokens_norm=True, + out_type='cls_token', + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) + +# dataset settings +train_dataloader = dict(batch_size=128) diff --git a/configs/xcit/xcit-medium-24-p16_8xb128_in1k-384px.py b/configs/xcit/xcit-medium-24-p16_8xb128_in1k-384px.py new file mode 100644 index 0000000..8c91b9c --- /dev/null +++ b/configs/xcit/xcit-medium-24-p16_8xb128_in1k-384px.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='XCiT', + patch_size=16, + embed_dims=512, + depth=24, + num_heads=8, + mlp_ratio=4, + qkv_bias=True, + layer_scale_init_value=1e-5, + tokens_norm=True, + out_type='cls_token', + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) + +# dataset settings +train_dataloader = dict(batch_size=128) diff --git a/configs/xcit/xcit-medium-24-p16_8xb128_in1k.py b/configs/xcit/xcit-medium-24-p16_8xb128_in1k.py new file mode 100644 index 0000000..148ed06 --- /dev/null +++ b/configs/xcit/xcit-medium-24-p16_8xb128_in1k.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='XCiT', + patch_size=16, + embed_dims=512, + depth=24, + num_heads=8, + mlp_ratio=4, + qkv_bias=True, + layer_scale_init_value=1e-5, + tokens_norm=True, + out_type='cls_token', + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) + +# dataset settings +train_dataloader = dict(batch_size=128) diff --git a/configs/xcit/xcit-medium-24-p8_8xb128_in1k-384px.py b/configs/xcit/xcit-medium-24-p8_8xb128_in1k-384px.py new file mode 100644 index 0000000..3138ec4 --- /dev/null +++ b/configs/xcit/xcit-medium-24-p8_8xb128_in1k-384px.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='XCiT', + patch_size=8, + embed_dims=512, + depth=24, + num_heads=8, + mlp_ratio=4, + qkv_bias=True, + layer_scale_init_value=1e-5, + tokens_norm=True, + out_type='cls_token', + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) + +# dataset settings +train_dataloader = dict(batch_size=128) diff --git a/configs/xcit/xcit-medium-24-p8_8xb128_in1k.py b/configs/xcit/xcit-medium-24-p8_8xb128_in1k.py new file mode 100644 index 0000000..b8277a1 --- /dev/null +++ b/configs/xcit/xcit-medium-24-p8_8xb128_in1k.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='XCiT', + patch_size=8, + embed_dims=512, + depth=24, + num_heads=8, + mlp_ratio=4, + qkv_bias=True, + layer_scale_init_value=1e-5, + tokens_norm=True, + out_type='cls_token', + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) + +# dataset settings +train_dataloader = dict(batch_size=128) diff --git a/configs/xcit/xcit-nano-12-p16_8xb128_in1k-384px.py b/configs/xcit/xcit-nano-12-p16_8xb128_in1k-384px.py new file mode 100644 index 0000000..bf8c27b --- /dev/null +++ b/configs/xcit/xcit-nano-12-p16_8xb128_in1k-384px.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='XCiT', + patch_size=16, + embed_dims=128, + depth=12, + num_heads=4, + mlp_ratio=4, + qkv_bias=True, + layer_scale_init_value=1.0, + tokens_norm=False, + out_type='cls_token', + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=128, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) + +# dataset settings +train_dataloader = dict(batch_size=128) diff --git a/configs/xcit/xcit-nano-12-p16_8xb128_in1k.py b/configs/xcit/xcit-nano-12-p16_8xb128_in1k.py new file mode 100644 index 0000000..3e9bf81 --- /dev/null +++ b/configs/xcit/xcit-nano-12-p16_8xb128_in1k.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='XCiT', + patch_size=16, + embed_dims=128, + depth=12, + num_heads=4, + mlp_ratio=4, + qkv_bias=True, + layer_scale_init_value=1.0, + tokens_norm=False, + out_type='cls_token', + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=128, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) + +# dataset settings +train_dataloader = dict(batch_size=128) diff --git a/configs/xcit/xcit-nano-12-p8_8xb128_in1k-384px.py b/configs/xcit/xcit-nano-12-p8_8xb128_in1k-384px.py new file mode 100644 index 0000000..7dae69f --- /dev/null +++ b/configs/xcit/xcit-nano-12-p8_8xb128_in1k-384px.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='XCiT', + patch_size=8, + embed_dims=128, + depth=12, + num_heads=4, + mlp_ratio=4, + qkv_bias=True, + layer_scale_init_value=1.0, + tokens_norm=False, + out_type='cls_token', + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=128, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) + +# dataset settings +train_dataloader = dict(batch_size=128) diff --git a/configs/xcit/xcit-nano-12-p8_8xb128_in1k.py b/configs/xcit/xcit-nano-12-p8_8xb128_in1k.py new file mode 100644 index 0000000..e6a003a --- /dev/null +++ b/configs/xcit/xcit-nano-12-p8_8xb128_in1k.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='XCiT', + patch_size=8, + embed_dims=128, + depth=12, + num_heads=4, + mlp_ratio=4, + qkv_bias=True, + layer_scale_init_value=1.0, + tokens_norm=False, + out_type='cls_token', + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=128, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) + +# dataset settings +train_dataloader = dict(batch_size=128) diff --git a/configs/xcit/xcit-small-12-p16_8xb128_in1k-384px.py b/configs/xcit/xcit-small-12-p16_8xb128_in1k-384px.py new file mode 100644 index 0000000..54c80d4 --- /dev/null +++ b/configs/xcit/xcit-small-12-p16_8xb128_in1k-384px.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='XCiT', + patch_size=16, + embed_dims=384, + depth=12, + num_heads=8, + mlp_ratio=4, + qkv_bias=True, + layer_scale_init_value=1.0, + tokens_norm=True, + out_type='cls_token', + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=384, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) + +# dataset settings +train_dataloader = dict(batch_size=128) diff --git a/configs/xcit/xcit-small-12-p16_8xb128_in1k.py b/configs/xcit/xcit-small-12-p16_8xb128_in1k.py new file mode 100644 index 0000000..c546179 --- /dev/null +++ b/configs/xcit/xcit-small-12-p16_8xb128_in1k.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='XCiT', + patch_size=16, + embed_dims=384, + depth=12, + num_heads=8, + mlp_ratio=4, + qkv_bias=True, + layer_scale_init_value=1.0, + tokens_norm=True, + out_type='cls_token', + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=384, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) + +# dataset settings +train_dataloader = dict(batch_size=128) diff --git a/configs/xcit/xcit-small-12-p8_8xb128_in1k-384px.py b/configs/xcit/xcit-small-12-p8_8xb128_in1k-384px.py new file mode 100644 index 0000000..f1b6a52 --- /dev/null +++ b/configs/xcit/xcit-small-12-p8_8xb128_in1k-384px.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='XCiT', + patch_size=8, + embed_dims=384, + depth=12, + num_heads=8, + mlp_ratio=4, + qkv_bias=True, + layer_scale_init_value=1.0, + tokens_norm=True, + out_type='cls_token', + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=384, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) + +# dataset settings +train_dataloader = dict(batch_size=128) diff --git a/configs/xcit/xcit-small-12-p8_8xb128_in1k.py b/configs/xcit/xcit-small-12-p8_8xb128_in1k.py new file mode 100644 index 0000000..cbfbe15 --- /dev/null +++ b/configs/xcit/xcit-small-12-p8_8xb128_in1k.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='XCiT', + patch_size=8, + embed_dims=384, + depth=12, + num_heads=8, + mlp_ratio=4, + qkv_bias=True, + layer_scale_init_value=1.0, + tokens_norm=True, + out_type='cls_token', + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=384, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) + +# dataset settings +train_dataloader = dict(batch_size=128) diff --git a/configs/xcit/xcit-small-24-p16_8xb128_in1k-384px.py b/configs/xcit/xcit-small-24-p16_8xb128_in1k-384px.py new file mode 100644 index 0000000..6eb4127 --- /dev/null +++ b/configs/xcit/xcit-small-24-p16_8xb128_in1k-384px.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='XCiT', + patch_size=16, + embed_dims=384, + depth=24, + num_heads=8, + mlp_ratio=4, + qkv_bias=True, + layer_scale_init_value=1e-5, + tokens_norm=True, + out_type='cls_token', + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=384, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) + +# dataset settings +train_dataloader = dict(batch_size=128) diff --git a/configs/xcit/xcit-small-24-p16_8xb128_in1k.py b/configs/xcit/xcit-small-24-p16_8xb128_in1k.py new file mode 100644 index 0000000..5b3dc18 --- /dev/null +++ b/configs/xcit/xcit-small-24-p16_8xb128_in1k.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='XCiT', + patch_size=16, + embed_dims=384, + depth=24, + num_heads=8, + mlp_ratio=4, + qkv_bias=True, + layer_scale_init_value=1e-5, + tokens_norm=True, + out_type='cls_token', + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=384, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) + +# dataset settings +train_dataloader = dict(batch_size=128) diff --git a/configs/xcit/xcit-small-24-p8_8xb128_in1k-384px.py b/configs/xcit/xcit-small-24-p8_8xb128_in1k-384px.py new file mode 100644 index 0000000..34445a0 --- /dev/null +++ b/configs/xcit/xcit-small-24-p8_8xb128_in1k-384px.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='XCiT', + patch_size=8, + embed_dims=384, + depth=24, + num_heads=8, + mlp_ratio=4, + qkv_bias=True, + layer_scale_init_value=1e-5, + tokens_norm=True, + out_type='cls_token', + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=384, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) + +# dataset settings +train_dataloader = dict(batch_size=128) diff --git a/configs/xcit/xcit-small-24-p8_8xb128_in1k.py b/configs/xcit/xcit-small-24-p8_8xb128_in1k.py new file mode 100644 index 0000000..108e64d --- /dev/null +++ b/configs/xcit/xcit-small-24-p8_8xb128_in1k.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='XCiT', + patch_size=8, + embed_dims=384, + depth=24, + num_heads=8, + mlp_ratio=4, + qkv_bias=True, + layer_scale_init_value=1e-5, + tokens_norm=True, + out_type='cls_token', + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=384, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) + +# dataset settings +train_dataloader = dict(batch_size=128) diff --git a/configs/xcit/xcit-tiny-12-p16_8xb128_in1k-384px.py b/configs/xcit/xcit-tiny-12-p16_8xb128_in1k-384px.py new file mode 100644 index 0000000..b64ebe4 --- /dev/null +++ b/configs/xcit/xcit-tiny-12-p16_8xb128_in1k-384px.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='XCiT', + patch_size=16, + embed_dims=192, + depth=12, + num_heads=4, + mlp_ratio=4, + qkv_bias=True, + layer_scale_init_value=1.0, + tokens_norm=True, + out_type='cls_token', + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=192, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) + +# dataset settings +train_dataloader = dict(batch_size=128) diff --git a/configs/xcit/xcit-tiny-12-p16_8xb128_in1k.py b/configs/xcit/xcit-tiny-12-p16_8xb128_in1k.py new file mode 100644 index 0000000..1b54592 --- /dev/null +++ b/configs/xcit/xcit-tiny-12-p16_8xb128_in1k.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='XCiT', + patch_size=16, + embed_dims=192, + depth=12, + num_heads=4, + mlp_ratio=4, + qkv_bias=True, + layer_scale_init_value=1.0, + tokens_norm=True, + out_type='cls_token', + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=192, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) + +# dataset settings +train_dataloader = dict(batch_size=128) diff --git a/configs/xcit/xcit-tiny-12-p8_8xb128_in1k-384px.py b/configs/xcit/xcit-tiny-12-p8_8xb128_in1k-384px.py new file mode 100644 index 0000000..f1acff7 --- /dev/null +++ b/configs/xcit/xcit-tiny-12-p8_8xb128_in1k-384px.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='XCiT', + patch_size=8, + embed_dims=192, + depth=12, + num_heads=4, + mlp_ratio=4, + qkv_bias=True, + layer_scale_init_value=1.0, + tokens_norm=True, + out_type='cls_token', + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=192, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) + +# dataset settings +train_dataloader = dict(batch_size=128) diff --git a/configs/xcit/xcit-tiny-12-p8_8xb128_in1k.py b/configs/xcit/xcit-tiny-12-p8_8xb128_in1k.py new file mode 100644 index 0000000..39d97da --- /dev/null +++ b/configs/xcit/xcit-tiny-12-p8_8xb128_in1k.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='XCiT', + patch_size=8, + embed_dims=192, + depth=12, + num_heads=4, + mlp_ratio=4, + qkv_bias=True, + layer_scale_init_value=1.0, + tokens_norm=True, + out_type='cls_token', + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=192, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) + +# dataset settings +train_dataloader = dict(batch_size=128) diff --git a/configs/xcit/xcit-tiny-24-p16_8xb128_in1k-384px.py b/configs/xcit/xcit-tiny-24-p16_8xb128_in1k-384px.py new file mode 100644 index 0000000..5560435 --- /dev/null +++ b/configs/xcit/xcit-tiny-24-p16_8xb128_in1k-384px.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='XCiT', + patch_size=16, + embed_dims=192, + depth=24, + num_heads=4, + mlp_ratio=4, + qkv_bias=True, + layer_scale_init_value=1e-5, + tokens_norm=True, + out_type='cls_token', + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=192, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) + +# dataset settings +train_dataloader = dict(batch_size=128) diff --git a/configs/xcit/xcit-tiny-24-p16_8xb128_in1k.py b/configs/xcit/xcit-tiny-24-p16_8xb128_in1k.py new file mode 100644 index 0000000..fdceb14 --- /dev/null +++ b/configs/xcit/xcit-tiny-24-p16_8xb128_in1k.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='XCiT', + patch_size=16, + embed_dims=192, + depth=24, + num_heads=4, + mlp_ratio=4, + qkv_bias=True, + layer_scale_init_value=1e-5, + tokens_norm=True, + out_type='cls_token', + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=192, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) + +# dataset settings +train_dataloader = dict(batch_size=128) diff --git a/configs/xcit/xcit-tiny-24-p8_8xb128_in1k-384px.py b/configs/xcit/xcit-tiny-24-p8_8xb128_in1k-384px.py new file mode 100644 index 0000000..2cee442 --- /dev/null +++ b/configs/xcit/xcit-tiny-24-p8_8xb128_in1k-384px.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_384.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='XCiT', + patch_size=8, + embed_dims=192, + depth=24, + num_heads=4, + mlp_ratio=4, + qkv_bias=True, + layer_scale_init_value=1e-5, + tokens_norm=True, + out_type='cls_token', + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=192, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) + +# dataset settings +train_dataloader = dict(batch_size=128) diff --git a/configs/xcit/xcit-tiny-24-p8_8xb128_in1k.py b/configs/xcit/xcit-tiny-24-p8_8xb128_in1k.py new file mode 100644 index 0000000..283f17e --- /dev/null +++ b/configs/xcit/xcit-tiny-24-p8_8xb128_in1k.py @@ -0,0 +1,34 @@ +_base_ = [ + '../_base_/datasets/imagenet_bs64_swin_224.py', + '../_base_/schedules/imagenet_bs1024_adamw_swin.py', + '../_base_/default_runtime.py', +] + +model = dict( + type='ImageClassifier', + backbone=dict( + type='XCiT', + patch_size=8, + embed_dims=192, + depth=24, + num_heads=4, + mlp_ratio=4, + qkv_bias=True, + layer_scale_init_value=1e-5, + tokens_norm=True, + out_type='cls_token', + ), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=192, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + ), + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), +) + +# dataset settings +train_dataloader = dict(batch_size=128) diff --git a/dataset-index.yml b/dataset-index.yml new file mode 100644 index 0000000..40ca620 --- /dev/null +++ b/dataset-index.yml @@ -0,0 +1,11 @@ +imagenet1k: + dataset: OpenDataLab/ImageNet-1K + download_root: data + data_root: data/imagenet + script: tools/dataset_converters/odl_imagenet1k_preprocess.sh + +cub: + dataset: OpenDataLab/CUB-200-2011 + download_root: data + data_root: data/CUB_200_2011 + script: tools/dataset_converters/odl_cub_preprocess.sh diff --git a/demo/bird.JPEG b/demo/bird.JPEG new file mode 100644 index 0000000000000000000000000000000000000000..9c132a099e87d1c3c1a76dfd9201b03801301eab GIT binary patch literal 74237 zcmcG$1z1$w+c&yr7`jVo1{hk9kPrlhW{^$^k?v0ERzT@SS{g}70YRlhx*J6rrKQh+ z&-4DD=l$Muec!pxIcu}mzH8m?^(Y+!}Zkl5_$o=Y2@taU}|LI<^r`ddv0Z7WbcM-PY&Szm)CnxWw{#(ww!~Tiy2hL%KoNbasZ-e zWP9ra_c_w3B{CXk2Rkd18x@|XnXRpZ2h`oj*$Szn0=ojl{|+3=!^O>YJ4VLaF%p1( zd;`xb*0&n%EI;xV>i=+~L)rPGyU7p$LT-N@Z`(25>hNys+e$0|_>(oodrPXXUXaCFPa0r$<#ks4cMjdm*m{UymsP5Uni4^jgH zbdmMnHhi}dj{it*ef~!B0N{u`>;GdxS-(;LkE-kcH@e$gVBGiyZpMlnCm8-`|0{1a zNGSl^UURp$6gMnNBC`53o*Ndx%mx7THYC9XNB{_c8+qgc9w2u`h&ijjc_P++Xd~OkBB<+-({( zkdx8Y!QKMN-xdWkbJu_J60Qz*fB2iU0Du~D_Wj{+LIcoTY^_YpT(oQzT#y{-U;Uuo z>X2e02Bui`ahU!{Vo2tIkvHdq=T)4^FNYH_{YD5?0+ecO#mea z`lQt07u-^ z!PxAXq$1K{7r?va08?ZuBtHv;05}*J7?>D1n3y<(*jU(vr1&^E_@rdS#H7T;WP~`k z<*&mZ=D*hbvsB586_(68GG}X{Zmi?5Ez1rhK_-Wg^hfm{96eEz$kw!2>=iTgaU?uQPI&b zP*HGrkxD`o2pthOs<^5VF}+hD4;o2qPL%`$j8yHDG4BKCpkYaL#yIs~%X=m+e0OrI zgPEk_HB6;HzjfumJEFN_w*Nll%c$G80~zLoywIA&_Ob7W`L!M6t4CVq?k|!G>N+QW z91F;5TX?)oF0AjGTst8Iz+hywklUEh(NOtrL!jd(f*>Psq9;b>35+GViQtpk@GoAN z@v?JJoTNIbiOYjMMzos%(3$u?k0671d)HK2li$^B<-bDszmI_VKOtPt05~YOeG&p< zz@|iuDwenwPH8BYh7oM3z&q&Gybxq0#+uE%EsTURH)WiZ74|D7#7aJ48A=i$A8Y&c zb09{@v1&*$|LdSP)(@CwLfM8s%QdOVCpugYZA!e+V+~f-B{l4?VWLyyaVGm5V;!$I zui3l#-1XrRRRp;UR8Y@`9mpEEWS`oit=T=C3i}vz4J02W%!1@N%?HcuI%;>C?^fjn znlKpAwiqm))#ksNEUp?qi*ziQw6mL)oB26kuUKyX^H-uT{;vp(1E&*_=9o;@XBfOF zLC=;uVwRKfN#tS{M>g9x(@~)dJ;$GG+CLLH1?24rs)E%d3DQwTikBI=K`iRH%9yM) zW3ldeaaq^HTLJ1+EX8%ChP_2f6FrKDujy+miGz}HmT(cvo+lIA&DdT|WhDh~021A` z2O`P*Rd3%p>{iVdn_L4$P4@4zw3RpNpPAIP4BX3qkLNPKO(`Zk-RQac#NJ!PwjOI{ zA}qkbL}k!n#AUupI_rM@mv1ANkN4Vo4<$}qPtv%Zb_6F0^ye&y&v=e^{U#Q=gh$@b zTmx>cr-2_hiP!tO8_e!#9S3kfE1ILiZykLNnkW)+npMm zVjgP(mlW%VT2)pQP0OQNR3Cab8)^-8lg44^8n11QSkz~d#u7S-Kl|qJAT(fc2-fS7 zt)MtVLb4vsFYIh-g(h1Ch4WUknu`2Sd?SL>-!#kgab0zU^>&%>Fv>DcnVp7T3qL4IBUp(%LL9*;c0IbC#w=}K z2Ny5?P-=%o_Ci8qqM)zyAz8A@5cWHhw4TfLWB(CpOLYorVdn7&J%{L{cMhr@Ut|S) zsAPXMma;WEnphZo`>K2;?JlPO!Nqd(ZdI_MkLiSLXnafSRBgGwuid?dN^3qxhK7LU zQ%y+|$>MO^@Tb<@;68SjbiZ^VH}v|ZbNSIS&gXfAIt04ET<4d6U!qEk=zn_x^Sn@> zK#e?X5U6+Dvq>#j%0$1j93}Irf%|GY?6|Q>#Qnn?3S)r`@?DQ%_sh)WA53ShZ&Hu7 z$$bShh84dOjkxy}xs_c5h%QkHlcR%X(Fsx7}7DAUb&jGDo@ zH_o27FtD1Mh)%FpH&GkugD>wy6Q#i$95?gi-47YkI^Pj5LfBIzPY+JjF>VxPqF>Tt> zpPZ&mL~Hg1Un(r!?)ZP6yas|b-u7UR`!Ei#@}w-CM8Dk>a8vpvGV^n@Q4@8LPteLu z2@=gfR1up~u*_-s67Sq%<`|mpyINuLVrSq4e|`tEa+qgalu6QA&e5?HT2q5^s1UjE zCxn>luG0w!=A? zzgj!>tfuP8X0?~yo4HAT?6fau1O79e3+djMMYyfS56j8NHLihkl|F0kqB~stJ6XT6 z=b8fUW-NW@uc&w^_|B0oL%eRX&cv)u=Nib|FEfZF$ihYCNOsGiCy6IneMk3zHKB*m z5&?^oydM9_+4s5KcJ=#cl-RPZjo9Yg``xpRBW0?N47Fs!nU)FvwStGw)!$X^a#oFe z>tWWU@fY5I`{lI*y+8i>cl|*)t)0j3uCvSqUYehq!xopPWlDp4kw+qPKSbWojapnD zl^f7g_$be*Z@V6x^oo`2o%dR>TiEwG-g9jVi!`l`i>Z3t*z16e5@&sSahoAqcM zI(yo2=biYq&Vpsv^meBv>QB-?df`?&`mJP?@y}|)EZ2ZFSswp=R&4tS1BpvssgO`-<@Y$CZ2h)5%{qqm~m&woYM;RT)THhszoiUUh_V1Qc zF`v?=udZF?x7QwdV-Rx0c=+BWr`hQn6=MFqY(Akq>6kt;h1c7`^N7N>t7UQR%;OrE z7?Z*NonBIDSAP6(09yH_eJgL5_AseXuG5|0tD3D$T$-XEHk)w`5>49&J^FI;7fI!{M_tT5R8l0hq-%+ovf zqv7f@x~%o6gQi{~sATx?{GHfSN3TSC(Ijn^+_=&6kh+q3f8CbSneMR)!L!m~ z8y2R*OP%UVJ#xAfy28M%n0Q{9ba}{Y`XE)QFFe~*--b|eleG} zi&foTTfh?IJL;j???W>U%iiKtl1!W0oSkcc?>w1({2J&;mg>iQU-)od=v(YX4SgHFLCNI z#&BsLBNNwH-nrR0&(n2RJ89HicyNtk@Wg+oevwHi5wcVz8U4&>}`KP`k~O? zez~#P&d~+`M`QU^OY~;Nqyqjsz6gCZ0aY8b-|gLwwDSGPNf2W3llOF{^_NtX&>h^a zJ&bSf%R|@InHAC!<4HCm0vZ#xfKzH(4EG@4Nl4}i!shL?ut!cfg=-1YP+eG$NUfMy^AP5v>u*?Wn~_GP~W7VF&cP2*Cn9USa?wCjITUc{VGK= z<7sV!P!q*t{Ym`XV?p1$wKj*g`g?B@hqiL2WeTT8UoRF=>(C7rp3v5o6soVEzd25S z=B=;}5lp%U1Shqx@aZdl)fAd^hl?LM6t_OrnIP+@$<}+OhBCE>Yqx}}|LIUC_9_W^ zuYdN-SHB|T%-m+?xU#uwn{Iq+6e4KdRkfIQTw!vSbM($FLbNAE(klZunFh6=_Bgld zv`*k^44F%P<#cH$+08q_LoIZy5mpNFkzR3%tgyoCq1{hPql`FB()=vi1=i^~LHR7j zDQxhZpiEUYoyX2J-(SVn@XjTDXH{-wSI2VX;@&SGcuP@IUG4Vq_yPJyG2bn*cfL9s zzN+;)8v?XEQ_PG(m@a<76*j(g*TA#W@#02SY2_EbChMNmV%+D`>(hE|-BxED%FZxJAMzC^$duRjDa8D#7p{R#ho@hc z;2d-V^pY>KgTK7FG-aj`vQ;NG@nOS$Yr~mFNl1f{?d|Mcoz-L|pigvkq3DB1NGuaa=NmNX0=e^;nzF|LPJI3KM)8SCK58bDX3_eSeO`C zH_VNb8wWS7kbfA+R<|bqkE&br3UmmHM*$(>o)30GH zUNz(8J9lB~&Ov*Uab%1SnE0hlT!Q0sN09EQkv)Nt-Tk9OB>M*nL#oA*p&@}N6jXF% zb|7d-ObCghp%6lleIdaxG-7%qr$8hW_G$PRItdSh#Io8Rsj+h|#vNYCpt!p*#_E3q zNhhcn&``q4S*HOX{S%aRDo~?v2QU-_@gQ|Ib5wQ>4=mbI1BqjbM6Xqnvh=pmD{Tlf zKc8JF=j+rX!X(0`oOz^|#wB7KO8|=AP(xjqM>n%N z9CR9ZiW}%#4+C6(9(2gvHAf(XGlgj^(PvyTqz;%@0t5|4Wc{pF<_Out!uSZJ8Z2VD zKarqHypDO0=r0783dmRjgegF7gw3fa*5D$Q7|-L=Y;pvbvR%`7LYrF4qqz8e|HLP# zB+E>uAk~4gK&c{{HoQ?a29`N1%5AjaixND`7*|3rI3E{*BPdSP5>%8Dd2%T&qxAz) zPV%+}aki%s)O`wNi8DQ?DDaLC{;sPKr}X_K`lzIgzWtrLHiPnhajd!&Nwxrw@PTS( zd&fjEFM8wL7!~I4nOGy*^M9I;6~qQ}u`p$dx9AOyE`V$Uf(@<>zsf zQ-h;NADnA+u_q_pQG*1b6KZ|hNA1CmC1zqaT6m|_26c^cEoyhVvQtx^*hYQ4qRys5 zbHprS8=$gZ|A=hor#dQ1Tl8lIClXY@U$H#OmBr>GZS`~WQsMM|*3hlw`^DBO?67`K zPQ`U4d{rqh;zHJG<~_z-!X(eBeX5#i zo-KOBygMT6Xd~H>0ER6%36abfxsz=Ae1cfYi1=6N4-rHqVS>z!X<%t7R7XpI)!F^g zg$rDT07I1s)r>tbUlUb1-kW4mWkNgBf9t5% z$A-8fK6O#in7+3ilm?}3S08M6cVf=jku(g(FWOU0ur+)trwt+c2qL9)h83D(%4O$qO*$1)e@a1t1R8TUe~tvciG|hTw!4Q5&8J@7LSK4ssWwDL*Q+ z9}A%~(gdY2Sbvq073R*x2!L9}Q-TacgmZ^O;^gF}Rdueuqp2tj7cT;Tna~K9XN;9Qo)nG| z>MfyCf8tAD$KqTwX%M3>X5Gr>sF*8(v4pzwVal(T%wmwD5*3elk10sQ$U}(Kpb447 zTI#&2F87a%0X&SWaU)fKLpTBk(gPZ5QZdnRhVY>P9gihgX2cx*pqTv$agaE}i06-j z=0H?p;cXl8+A&l#aTpPPhjf=7N<1MZ;G@ofjzbcMFMLp|gaNSfcw+HIf5#%l6^>RO zv`$QF180q3ptr87I<5mfs(uaJ4A-pfU1O3Vjoxi6LAP6j|}<3 zwZxgnFlo$JFnA#sBuLyp9aYJSCDy*BoVvP+74PumezB52V37}v`4l(Z+p%5{v zO61df?<_p8ps~eaWK^XWSzEjyK?%pAf)l{gs)@cE^?s3K@H`BoF8T6kn=3;_!_a4< zjY2|PG)r!$1*2DM&MjwFj){pLd6US!!BTIn)H37jL}FcP;}gsCR98)f;AyX zx$>kqfZg?qh!p01Akl%Bi!W#x$egi-d%!f_9{wn};oCKk)}ufAF>l&Pl}CfwNk@LW zTB*CCG88_$P$u79vXfi@ppO8*>DjvNrJ8Gi07U#uIqGmOKVkg?lwbt92Hu4VDrEpe zY>Pa1!$U|APS{B)i7ggE5$~CcG$ptQT^<30AFrgUFcF!-9Cp;|xk#Xd-3$u>SaGuG zaw(}l;q+%o{2+Pi@6tF=u>jLVl-URZhjyM2Vgmk?oJ2;W&Kc(CP-W4hd+)lQ3%vjU zDGfVcj5QNETNtyUKij~ESW2r(k&tsIEmQbCr7qu(HQj*{k!q$OHT1D-D?wj+Ye z!Xf3|cM0`%<=p^r6yL^oEQDWseTN)2AT;>arZU8pjCx>ytf=`dENX={Lrr^uB@KNt z=ZU4UHtxB-Jyr54kbTOG+}VWlDf!oalC0AZUs=*6US3x-tYBdzQnplTe26k|4fK0t ztX-UtveenUgww>pK6uXf~YY|D}g9`MYE zH3PLS$V+R(h$6SANIBT0kJ2=Ty!D1 zQ2t6k(9uqQ%HflkZVZ*oP^Mo3N*E>>0d=62p8^G8y`=o5+yXG>VuT%^4H4NfKEhDM zc~+znUj$dU7#Y}>aQ-eAkPxSIk@|pv*-0C*)79*rUt2}U$H%NtplqFB;^=fwoG8Y; zFcvg1Lurdobq!djP}{XE5ud5x zxbp-&qs5O&ctpFU+8tOv5CjWni2D4|*%eI{KwfFGZ7HO70@*Zn2v#&Yv9(LK_Hv`n zZwR61%wEmi<_ctTsqwTU0d`m;7?5-StbAM0aU+)$t#!JZJ7@`)3ncRGTr+lxn2yPf z`faW0XRWD{_;-*XJ{S6Y@+VPJ_wtH}a6D+n&0S%qU{zM(R8o2@Y7ACA>b$|8oJ39q zGRK(g;?(0GAqVU_aY}JP2q1D(WiDUTvPhu>1M2V9Q0<^d_L%^P5$9%-C1Izs!kU(r z5Q&thOjWDgB#La2T=Z&a8<7HF_!ZWPhm}-z++Zn<92!HnmauqU>XJ77*v4Ip)~aGj zN71P`5WOeyLOBXt4oaJ+Ql3JZEzWGT2QM63A?9tQJXewTMT;Y@g!xIld!&3p-GS@q{D<;pZt^0~=W1 zyS#uvs$FCLY#hSIu(0DF0TE&HuoBU%iGVtN{cfNLQ`n)Ws867>qCe*b}@p`Pf1g|QrIXJ0-(_cUi}P)RQ!h2D-VlMZph_fu5@T%++a{owgR-DptV=dCCG{$>WQdJ~=$G$ejI-H+^=>l~btf053W28bonm!7L9*wA^|<-%uzaGK}&-4TVJkPn>G7 z7C1nR=Eb&(Wjv*R{Vd1Xi1jD>LY~S4U~W4{d*(biLS`u>tsAA1ituuiT?!Ai8yMVD zg9KnL$SaQq!yWnYi+a#&cGnI=}hQt}&6rUe`Vg zlOP^4=-VNork8w28^Z+w!Qaq$s81|671=YB4+&62TB;kV*?wx4_Lc<>6KANyA&sTDYF|9KRrX4(dhqKLM`WXQs5Gj(;n@f%} zbd)20H1sqRh9oiZzBY zMELcmU0!ka9ZEJsJmkWqo@WPI((^>_kKy6A8QhR-fLDzjONBs|Sfq;t*V(v_u_Q-?Pe($0-BNfU9Hs9lMh$VMsP`6x&Gu;9_oKC8bdm@UO#EHGqb9~S6Wdg0n_FF> zQu!#U>3v^dQ%?d~Vr)nr&Z`aTl#ml538f9@6SPO8f4DYNhJt~BkO#vb%zy$Q5eaIB zPE}l(sQQ;|D?EuW-%w}(Gi<7@^s()MgjB!SmgJc&)4*)D=N{R2ajm$Bi~QMu4W%F8 zD}j|l+-}9!jKvTl5UA~ z4E2|fivUSrofE+hW~#w-ke$V>GN8|ndYm;pDZn-eumIz-1baMB zyPF%dBm=Lz)&4LP|7@_sr)HP$JEBr5t{BBhlE~*J5rea=QHL#Q^n;N^nivJy(=*LV zEqIGAmxO8&;pLT$IVE_v&%|!j${mifr<<)S#9xUrOaO@-9w|3w+FvsSLA|mdc14mfNageh{3GMMM;v?a+bIc!-m9 zj0So%^yLjkf>T{qyi`qNvhX_3EIgJKWhp{0mTKFR15{v2R!U-5N>2$7!u-}+E3ewX zlGK*6E9_CSe*}ska_t8lsJ|K_k~j4uzbG+G3MjgOX5{81E-aPkFg_xljYwhs?o}5E zcjU=Mw?FPjblCx4Xbta4d?zMWc%?HeJGW&jbvmOi)%hsbInDjxBRp zr4&^#G$T}=r%^vnsat1BZq`4gn2ZDyePebdKQZ6~KY_Vyi=sGK6+N)Dd8~~wt`;NF z=K#(#&^T-Can-t1${Mse-|W(lHT|NKxM4;w5fYxsX3kvs$<~V5=;L^0^K0{oZsKsr zCi-F;vHWM74#yw*=JVQ$Y|GAI%4EE`_&frL1G8oPOtS;=rPPujTS*i%xuW8Fk{NSB zh1^09qx(XVP=f(#WEs_1F`~Tg{DlL>XDovCR?|wU{DCi~4x89h1khdnnUc>}`Nw{> znlec|MDg0g8)gH&(`s5?^{b6SEg4-BoSLkBb~I_{^}?uC_Ny<-Q&haBCDc{~0!bRf=x7pfw zW=&Kgukf9&YFX^DbVpF&o2d%u6?4ZNfJT#w1l#LzaC&KmzrV1Ax1yX23(;$ZZ@Rj#w zHlP}4@b3_x2{?S3NV$~+6^vN4Ecy;lac~B>cRCm-fwF3hC9A7MC>1v(1=fD9f-XrJ zK1TsI%4r5j7XsE2M8)J$9;3*pAz%)zgwI$b8W-)qSel#0B-%9`zw3C=iyeG#Lk?=U zX#GKW?yk(Jkg51j?VumC7OnxJCYGmV0s0iOv|)Z4CgP}G{OYVzmgnnQv0o7Ahb1qL!Zw)JAE1aZI`wllXGCd>1{fH-b2a)zlW2m@hwMvWgnrSvgo*< z8JTqm&wGRZ83Wo?dxR+mz6Yo3Y!-t66X@6hc!$sH+y~0JoB!+-T zpM8$~zCEv-_Z+R7m>~hbI7cOlFAS{pMSs6o@jHz(kE@}iSo%4Mg$V4#^S%vZwOz-| zE++DeN6$=IHEj4;PUksV?5?y`g`?h?M`dkwau-q!Op~$7Pv*0KRKwqUP^m(iC%bz{ zgHz-zCgYdvzhV)g5~Xx+f%8>T+^~jxpVe~{cHw$T0lU1i!dE-^8<*tu_rwloR(r3~ zU!TN%c$x72vc%qb=V+)A_q1HcV3u=cC!vj}P778zCW08tIIWn=NZpCX{YKKXK&z=V zTb9>id}Q#Ug7>@N6}cYnqr}AKQ*ZLcSxdVPueTDqcsXyWUq@6Yb?_v1QP{zW>sNnj z5#kgC#mP~~9%ch}BI^0{o_r~t=U}=Rv5=D)_B$T~tT2L72oHa%y>_S~-@e-$2t}dJ zeb<*Cn|LQFJ0U1M9woAA%sIc2tlaFw@(_Q@&srI!?NnR0^;wt0q5uOG4I*)kAyk}< zm)+S|NswrSuzL>9R?LAN5qxy>I0v>%QiHbge8!TvQR;mIg(j^$IVHy7Clq@Y%p8DL z4jjN``HAvmB_PF0(wLa~r8Q7&FSlTc7BjT7d8UH8Kj;h1@>`^x2+@dD35zB2OMoF* zG_cTiis>h-X18?Svwa=leXJryJ=o%tkH`$|5$RMJQuvgW$u?4z$Inkm{D>k$m`k7B zG8;F6d8LklGU)pZb8L}EDZH1F9^mnyJW%uu@-qZ5!7Lc3g*8_-+|0)Lh^de;(Q#Mz zDmYKJ+yyi%imkgmJ2wFuS$IBPHuc#i=4~EvW5V(2J60xBzG|$o)Zo@FHRxLL6vvc2 zSY}oBsI<8AoFJiGKPLDSfjN}oBe!Lk9U4D@pR@v5YpJi{MwcYd(;xnuQRX$hr*`$~ z);(%Ied#hA5~|2=s*89Cu!rM9738B5f^u5E{C2a|RN9Pa`pr20QLcTnNc^iVL*bH+ zL%AP)brj<(9aG`-jL|oJ&Q0uMO(Ndm#$k|zhmO^=b~<(PQaQRw451!pqIci>+{eXf zs3}FPk0l2i=onnhKZ<^+{@R$CeaKVoNBuD`zDXwahxpNL!WS+|`n|{eVIG6;1@|fU zbun$pB~cLuJs{P5;sR(-g(9aIDOJMLvi$gJgO1#NzU82wG*(+%6&-VXv>hjE6+-Ic z=~(AQcN$uDbvwA)4Uavq0lZd;dyWciwX9A2d#|W5P4xuhB%aw>sp#LUTZzeQJasu* zJgi5b(G}U{OtM<-%`f|A)8c8iR^L0g?Hjy9)W*5<;H|Iv!TRCY>|a!!~S%>5|xGQB|=RPlTlXFFn7v?X7zfysQ?d*$=jz*u8b z*#mE@!PpavE*7OsFK49*;g6ti&^JxC)Z2PobI~1gsBb{Ev0Ds z_!X`v(MPV4brLEvm#wxOPkX2BH@?146NxB7G_`WzPWNQqceXdTd)JblGU#qQy}UaI zeYO;($N!^JUp|gnKFkop3T=o9wU?VwuFF4jc#Qip(n}{jH-A|z>Pg-1&WFb%S5`Ei zeT4WNcYf%+(Y5jTas1FbedNVG-ZhB>t^1=F^6f?Zy+g=M6>V@ZONzVXMJuO3^H`y5 znuj!9dl$-hD{K{>u*4+WJ|*4t^0GnukN!M6_supti3(W?l9MO(XbPS`*z>ShK}RQl z_As{(b+aYjQu3p^SrT_Qf?b^>A)0NO06VFZgs4U^WL*{W@?>gvLVe|dNox6iUym5y z@R7mjQo6|t`H(jezb+#8B}}a;FNG(Y1Wg8N^O(e`i_n75Izii_Y};WEc5>8)g(F|K z$Q3ItEJZdzqui%7okNdc@3y~sN<+zcgwqmCjMxvvi5>()B4QzLZMZ3eXR1-r!GkF}0CPQ!kgQd>{!lmTiZ=nqr^O03#P#C-RBT zXD5&!=x7NG@}Px`AD)4Jj1URLu(c5|5heT*t%1l3%o?M8TL`tzQ4vf*6O&I6phLb- z@&W23w9%nk%Clv|GC76%8TAt*Lm8&(p4Fb1(e6QmqwOrYnpph#MLw;{Yl#NiE z>DhLfkx&F)Ioh07yNQebHqOj;G2kw&H6U_`!dj2H!>}}-9UK-QAxF<*#-1hOZ_DJn z$G0pib~xC+h~VyLr>gCUbxC^1jiZcR&4Mb5dl;dX}zE`zkX!Mw2Y?oQ+J-MT)G<3amVTpR^@!yZ7e8 zejc9#ktpz^y6KJUORS`XWWpL;FSH|RDtjj9{>%*5)(_`*waO^`s6P1M+VvZ-UdXtP zoZwr$>nb99`nH^IYz2R1Wq-VfLd$PIl$9fnQsZv0vkE%t@RM{+{%?0vnfrDcDV~4O zBpPUZ?~AgB5_)98ZA^ye+)wT_mv=?$cCOULRg+wnQRkGHE9RAG16ijGa>qtM(20fg zl{8@oO3zkkCH|0W0e2=nrY+A**xFanGErfB<{=H9Nm3iFYjmFhm-jR%+1`- z%kD3wq7ZC^eiVNBq{aCf*uM9ae=3RM#R!&xQ*O+g7j7`6mn3;1)N&(klil7rXDBoF z_~Mf}I$Dt#qaz!f%H?nP>d&HOKZ$Aos?FM2_4YnFKWeQaAEo;t>{B1M;u+2T%<%X$ z3%l8Dj(-`oQ`@UXuncrMk{6&MPlAa#Ry2|-aoSqo`@YN2hE*PKV4lNM#P`1Y;q->R zvP(@yY)Xe%GK=h?lPbdJ(bnD%hoz`Z>J^>x<3UAtE$md&B~j|1_T zC3=DRKj#nydayq|rY{9UHz z=Zo7e0STvW`eidI!~dv#wcAu+GY-z&qVTRfcn%3<;dvT5oA_DE^Vk?;$s}}BO8pdP z)qm>Jz;KWgN|`hnkqG^?GoYT4#4@YJdg6#*n(3EF#Hnm3U$+2{-Nk*(z{pf*q|#1^ zvZd4A`%>4Ml(=-Ekn(_+*~6(lyT6Ai&(ITh+mvNr8pKQt4F?B2Q!bX;gDdyZPK|La zGo~ggn|zkm8h?%0B({4=7FG<7XA7N=z$yR7EyL>QtGKbyy4{<<~Iz z6&(w=s)*Fji2G1BobHiNUSud>Z|uOsHSw(3iu3XkmLyN978KDN2p_>bq7F-&>=0bt z{aHF|;1f9qOJ^$V@rjN~pAO+2)t`Mnfo=JNqRB%ev`z$uDeMspTxb(xc07HklX0O% zs%TSHHP5>xD$_5>K5ZlNWLfEnDAAkujw7T$V8Nv-Dxr1^<2AHpUYk_*oNTJ@7I;4Q zlU7H0pY!ykqEk|P|LU*^HoG8y&A+kjz?sdzQ^0JftDSO`@xD9D@Oqc0It9CBHpLM7LfA4W~c%==$u z-V@6A9f=KEc~YlieAb87R#EjM^!(Egg8NV9Yu;5mq}yRQVTbbXe0VAe>hf8QO5eDb z`t5XRIEr3Lhn6E!VNYbV`5G{vuM>{1YPS@(qPll1#YzrKcMge2R3^Zw+5EAM&s zi&?eL>8uk4NeT9j>ALv>j2DR~+>r&Vkfbh^UwspJxl5T09FJt&KJ`n2I`*$bt^q5j zezPaoS`~V9Hjf=DOEnmi((z6sp3TR_nB}uTB4tXxsbzR6u^XhhFn`ftMIbhg&$Awj z2)Cbkj5Tk)Hh**Pi*=o2>6+bQt&{cBa(cc{J+^85GWN?AKEZ+nygW8Q$hb8tBYS!3 z9kuRP3YMN8tHQ+i#fpNosY}++-jW81WS#NDrG8??33{FH@k0&DjHsV&2swWwP0I3p zm29{3Zel*N>7$R(X!u0#s1lux)#qU3hem9SL-^b_`sTgms6xeF?$QrhHBYm`?RN?; zc1m<<;VWbc0u;+pQFH7m8@w7e>@=x+gj9YZuZi=Whu)^Ek zsUE4N>!dz*s?D(!{YApBQZeVl96sD>v>g7W>GLzRj5I&1g4EwXBY&k$QAzzuiC#l@ zV9p;kD89sb{o+%A6vcXaOnw7IJS>|{#SkHUev*CHmGA<3(3nl^gryGy`yzjE0btd} zF{@sUhm=*)*GafjV)Y~}tYU+~HORGCknwC#Mqw-R<4L^L0YUv>b-kOJ>oULGS zkb^xfGW*%r34W3nSDGxHt7?h8IW66zgM3Z|8#3$6a#>F5K|eJ4gWiP{#&w>oDZ0d@JKnLRf2x-kOng199vd>9Q}rl9Dj6r)fB;_E|BYEto634K+?; zt?svOtKcM498as_e|67*ODU#iF5DK^T3+nADw|d;TMXxCjq)VhdIQ}HjImN>^P>}* z0N18crIq=6UsLZ=w%hJEy%@IR3wsolH_Nz^)E_yPlP9Q_)E>+uVH#C?&$E5(QToi+UV_YH|K z71cWhJMV@{HtQ55D?WSE`q)`|ij!QOEd6QxZ(p?cO5`anM*XS5e8%-HJgtqNrW)RN z&iE`WPw^eSTK`bf$YMXVP)Dt9EvjeKNqs<+tP(e*`=IImS$=TY{n>BUMe<6+0|lm6 zx<}Q%4sTzQZO?tXl;^6}ZL+bUI7ymV@cQ)Y(dvsO9pTUQT6a`mEKIMYy4|@H8Jswa z%YNzvh}r$%%Q2`|S&^4Oi%S~ueR*LYQu0BqDC2{Di)!z4tl@$Z7Czsf?++TAXb2sf z7e_AEA`44I_B(R>M!gH-SAJ2eomcxI z@zIH+^~@Hf`!)mCsYeT4?(x{FC2nSM4i(Qd>+63+D}C&nJAT?XT}U^#_>`@48^;5q z-l>*9X*QBGpM{4{CY)7xod~4pYjUXlsz{^a3o{bu^HCpF%-`zWy@)0~Tb|xssePny zC#zF=(Cw)HRK!jKN1OoHFF+JSA=ko+8?Gw(9sg?p$g-#nxq_lD!b1M@2aP!N0T9t} zN-znyyuJZ}F$!13E%9aWV?iexVntW@dL)=9-KR%_Qx_gUA6FYr!V+KYeU%6}(nCR4 zJ}QK)8M%lZxEx&M>w`y#XWNgG@JOlcr&4X;fP`{oSv=}=NSr&VD?e(xAIT*f{6(ZZMW9Fz@)f2PZB!OP zikS2V?J%<~Dp^8V749D&w@e43&wp_A=1j_GhiobpjLszzF(^!a71M0kuGFW`XX7Xt zkAJN@?_Ydgv*nRrFFc?5xg>EM!QLr!`EjjJOp|-o&FFXSKxwE)y-NL4w~l+g+N_LU zru+!h2TdON6drzy_!6>Al2_*Dd${3mwc&Jl4LE5HYIshkaviD@GT6>zxfH5et zS*&mA=>@_y8LjUU()`8C;*ARvX1L)tGulT94!={Y)SdD>eNvpi3%}NQg1kKNcz)DC zU%beltY6ZXk7BBM7iWKbuzzvz@R3AF@kq;0hwwZy=9hSl=G)6j2TBf`Ow_Tom390{ zPjs-Ij|wRCeTO@zDRk88H3-Up9C?};_KcaRQOB>uLGRLecNh8mg|}7AohMQ$Assf0Wr_M*jdUT!p)2%c@X8LRTax6u??lrA4Ky zNm7)Q{;A>hRZ20f3B5PHz82(p3Yf_x>COrIU+QJtxqOVm-TmGblW#Fr?i^jiu3>qO z-Pi9FpZxoEC?zkYB%)ELPNNVut^7qgQ;neS=*=iO(|KC4v6&wf>z7OLoCd`hTdcI> zDO>hzt55)Q6akQ!8;{n2tR07cfM@|J zBWVZ#?Nf?C50@lr^q*>gPnlmVZKWc6#R}B9u$n+eU^uB~V$X1DQIe2Bq@Bqt^CFZ; zGC}Xh6%s)-k+BDOpciokGJDVf2_^}VKE{BrZOI=>0fxxikSGF`5hRa@s83*1Uj*){ z-YTL|jmZTWh$KezELtG#>L3HEMN2y>w@|56NeAMtFiTYLDnI+E2$Et-bo8lWmU5T3$rFP zr(;$W%W~k`VgiZZoyYR6h{uvDlVM6yB!x+>f}sbM2T@+1?NOAhWh;pkmlOhIleE<( zv5kwZotQhpgZ@QZ7~68WO6Or^lz=7xi2SPfN0}1jgb+X=6-O0xBYhYyOr|#<$W_2( zCsCe$C?y_NivUI93;C=aHA%llzrkkc27I zuTp?X{ANhgzl!Z=3#C#@w9{_Z)mzRLb8=KokL`#RY;isG!JGWY@Lq zKk5^jr{2poMWf{J_qDI*(2o+}w!4b(?!7r0>(dr2EbdnIjuh4tiQ5%6(@VQ;1;!8| zK}#u1YXuFjWRd_%7wVF#@}I1j%uAQ;RZP_9XXx*3Y_{ipp1m%1dp%J<9F>|kivY=% zff#M33yND>;!imB)w-BE*?nk&>K25gXjCl(&#O*?Kmf<)8SxBElc7>x6yH1Q`Cof! z>8B^D?Po|&&2eEYK|;8@ETR%E-qe(BpQWguGz6w{8f6t>c)9a>h_r8Wps zONvlheQDIA2Do^-)YVrBw3L;$ZEdG5y!YjE(RP0iDNfP&J9@A2vF)dZj-gz21EqLa z#x0kJ=Ij~O;Fz_d-43m%8YDKdq4<)v!U$J=D%_B3>Rpw@%6KSKsP8&{7uUn0_*-6Q z&wEvYjvpH?>E06YC+n+T#%Gm{L`MGroq8kA5DkeQ+Ehp-Baezk9ZaWUX=X;pakP8|z^h@ohU|;O3P}MzkT(4)`0h|#5~QR`hy->bwqdymM&})JQ?j6+@+&f*LJBu6 zD{vJ@e&01&YE~JwYf^zAN6ktxxe3dW*qB0nDFQv?dbwKVWzm!osDMcBJ*!|3B_wGe z03)|*x-m%srpkx_2>wE(29rMWJD++3qxC;LucrM!%$eObHW$YaW6tvKuTyN<>wID5Rwp<`2rnacn!O*8HU1sH0)`H~~wfd*J@=2Zb z!g66FJu^w|eSF*cU)W|G`D4)Rn&a3$Gn#PK<_jDJ^_xH1Nn7d0VgCT#GS{okaWdoU zQY8%AlHni{Q_bOg62BD1RI3QNMZR0}SM*JC_nG0C2+3uZkMmDmU#911#B)`KWjLlq z{iuv*CEH=nBG(VUQ*B=c6wB%Iu1ZuJWtAj)O38Ce$<&Y{dt3vukiknmSt(h!wy$-s zxoY*-(Dochgi@hTD~;yX+?ThW<{h@l)>&U1>J|*v6z*3;I6cSgXKgMOukSXcgr(54 zt!P4nnhI2nE+y5tU@8+wYxo69@`YLo?>_!kc5Q6!ckjK|FCOs9inR2)zqflAOe?8u zV>q@CgyNZh8_JiL7op4*-!s$apy+v*A5WG;1un19N}NWZ6jEhs`A--9H*~1Fly5C> zB=_AnXLRlDzMQU41;y`EEcEDYUr#fZGm`SgC2%l%Y}0AwIPH?z^B;Qp*(9yR1f?of zy~Ar(xJpzMl7((Vi5t8ZfzYc3Mx-p?J3Up~+g!H2+g4Y6(q;Ji^P9}SKIT^GXI*-4 z)K_luZXDZLnCMHlxo-K$eS%Qg)XlQtMY4sqLeL=rMGDlN(fT8^eVeU9j8+;>Rj()xoQ}QJik2=gV$SUyq^Zs&QCGa)Y&>$J(^~)U@J0AG7Wug1cpL zi#wY!yHe%2yS%s#J|!ATE&>PsA(&Fo+mAM)-*N<#(86%)ju|RD$$Nj_%KIFO*vfgc zMQHy3KP+~fzl360Cnn&p<_t3(vbA(KZHPMS46U0lEoNDI+1*M7NhlJMQlqK?R_fyM z1q?KmNv4#vZ%Z#Gp1Q5Q*1707JiEs7HFdJpH|W<+X5Dx!)0R4e)r_jkuPV~*Lglbl zu-=3ysf99!KX?)4Iui%D1|yF}*$8)at3#_#*~-c4w05_&^8KZ)-o_JqtFOPr^nyf5 zOl`e;v&ocFfII`({7?c@n+^<61JqUNfTP}kfl7()vCqd|_9?@4ra-i$DR2~QCR)JN%q9ec8 zngT6?wFShEAbU_9lEr}uh#PjR)Oi}^^_zJ!A5E&*!>BIJv+g$UTM&;hFi>JR?N$Jd zB4bfWC%pmEx|`JOjnV9-h-Yr0zL1-zsxHM?@;NNJY#D{z2zkN}cM0&8LvucAcZ z2Pb5F@_5+$Me|2jEHRvckyyUTnQ1@_KMi5scX@$C!d!Jj@gX~_N>k;hOv{@Z+(RA4Rv5&w(9#lDXKveC*4YFlDh;TR6cDAfRHoJPz7fYER=TX^ zl`R{0vw!J|wzj*>?5?caO4pmgMN)8r-+s3G7khhr*MxMQ$%~8*`xfeUKJB{+%+5R> zz8GTMZ7ej^vSC4M(3oXJI8cQzND{O3db~!8#M6pyEj`t?>29w}-&-eZJGa&0^=L;4 zDO6XNCGvMX+v`41#H=30@tl7cw9VPz-6F>k7dGuC=LuULTPe0$8qyTvo3|@iCzvbN z6*cv4%yDSblA}TiQ+BgmZ+Fk0_uE@Hp|8TRF^hLvwqtC!)E=N#T4t+!m5*m5Yxnz%R{_UZVe3$E<@xQX0^(8tyg^f7iSkqufTg61u$WgV zWR!K5z8BSO`FwdC&LoqhoSurx&3bG@?>O#!vb4KH`C}HsFyFXc-V3)&k1_?-yM9t$ zZEZFe_ZDncIVGp(tO51ZN^jstvHoBhb3w$bQ+E;hGEs*A!#(V zT5b96=4S|!S)}y*{Mha|KTh*jH)m~&W)|^0Ue{7vxWz881Y#~XcrB=ARYSf8xP+K6u0WLWry4Yz}qa_75712vibhq|<*~G*`g;n8- zZ9Ur8;q~|#^Cf-nCr{Ee%Ab$aShzLx=^;#-hk$<%ZWmjC{1$c;H&5Ut68mN-nM>EmYZJwT#f0^ z3h5^l_th;fzu$jl`I|}5TMXHQbwdWmb4&O$t~q$CSA~mm!wxu=peeN^1to_iM?>H( z4#?EGOnUwgO07D%tM7Yh`J#O#s(jtg8^x75MXEbm`tSC?r!9|6LX{*D5dyu5<^imp z$AS7#3zBv0LHVE?Wuj6-lgG^f>RE_8kEgw5M&&Y<*hDp?@Sm+sL#Yw8vnPdAqZGLQ!c ze`u(d6dF@I4gP3Uo4GWZKf({vg`|eSABq8I41IZ^0Hr_#PztR>x$VEwfDvC1VEj-J zI!V}qV_`r;Z)5t<8djB!;3W2vYvkvk4wWeIMAYf^0nn)-K9Tv=pk*qJAdolq;;&BR zDs6G*+%YmtdzzeGnNdewjgG*Iu}a_?N|tv3@3^FAs4rG+5>g{=yH%0gu2M&Aw6`J% z1PzY;t7Z$CS=btA4Z=#k1#Bp{s2&IjMxaRVR)Pg8Qojh>;($jT$8cPwfMYqUGU5zi z`F{_#a`N`+&%(ng5=?g|@Pv)jq5#~Avq-D5P=l1R$9879%bTn# zAzP@igjzhEOks$fI*LFkdf{diNeTgC5)ObVQy#GNT;-#Cetv3tTt5g&rG2+N@5$B} z)@tgF!|25AvhD!V4Xf^4Jhj|iSn35pX;!r@(vnmVVqhqcbgT2P>V`ezFw={Rl9r0e zziWQ|wSK4AxFsbVI-^Z!wf3Hj+H}_qOE&qB8^m3;u~cbv1E~-|0e1)lq?ICk$ECC%QBuAa4iQ>)`|8cPU)43yE2ZY|yIOdEmhXQ~ zH1hBC{QG-4XGk)BGt(?170$PrUk+~X6@1%a%5n?9I)iR?`HF_rpp|Tb17Nutl%*8? z0RaK;?$kg=HL3QXzZJ7Tf6HkTSRlIVPu=FJ-IJK6LlxUh7pP3esfMBTswYm zv3r|PqWgy6DJr#UR6h}Q5>q8gTY$k*b$UL^ahz2=TJD}|X*9gj+5C3C*4JHUh@(z0 zyLOfJ_L`ZQ{59QWJVzJ7UBmGO+gK@Sx={fp;3=mAhe|>96u9ca2ji1pS>e6EsW&)F zN4A@Dchg&ayw2YTsVK@Vy%DM%MCk5n!dbh@7*=A$?ehL0uKxfN(D6rVX8{)!k<}7$YGGmnr#jRA!o_f(pzx~)Re8QCuG-WhVkq}sc6mH&t6MQdo<^! zrr&K}4>d`zXHSh(RP3Ujoh{4A@D+PEd1)v0HoKwSC+fb8KRey!>?xL6y-G1= zFocv_~6|6B~CQ3haYU08%o?# z&GYPqN8o~+V*LR4kpTJ>6c*48})x9gTMgnioT=P%=~+F9Fu&hpCR z%XRy8CEX=<&aI$_66U0+A>f@-0(6xHpIt_ha#z(_+S~KyBTTxOI(}q42o@pyBHtBE0)6-**QjDgpd!DQ0{O5${Y#kWB7QGAfrs0JU zOA8`K<7FX05|xdJ2e?okl7)EGSBp5+X*fKr?GZ7y-t}=&R5m6w57L0SG(urq_Mu$Y zF|b06ZYso`ipd}e*a`WmMppw#k5t6`)PXsGG>QU9KB?8XRlvbb?NYn`+o$4gwlh~kC5vc9I=R#zIktRe(V?wnX?hQ{t zCWV5_cih3C7E|#6m=pr5eKG*p{du4uUI|IwKeYh@`X)Ui^Pno1WCIkt=vx@YtK8EB zD}fdAbG8yh00-$)gL?pgr65c}J3y;IBJrSqSqZeQb8S8))??5y9fN;R2JkTQ#&KL9Mt%7ls=We-HGl;|@k7>>EKQ zB0CMatPpe5IxW;lNgMwFO0qEOVbpw&6@J(s5Vx6MDd2qy${5mZv0K*8BaI=V3cylG zw`M1G`n=GebvC20t!zdiX72iKdXCR<*E`ml)0fonqblW0ad%dBc2=v!?%Q|Dzj_QU z)1)2@sZgki04Hf1*T%b5+Nr}bxzpmb!^*5_xyV+U>;AD(7 zzxx+#i}zD*tNVl%3uY5;yPHPTkPE6O_bmB}BqR;tD_K;4pM&EuFBAU&r?Pys%+p%F zmc5d`nqOVEDdA-w6D)mT%h+|cS+R1*8o9=jgKC5}q^ZPy_10QOgEAAT^rKM(XiJLp zn2u)*?7Zt$m7V01dh2$*Y|`IbS?jaVyAz0Ur>?;|bCL@<_7K6lTWu#1Zt;dx_M|xf z0G#6iNz{1#%d6Z8@>^EGBo9YIrCL~Z8M|h;KCa%nrK0GQ*&|M*Q)O@6cf~_Qwbn7x85$ZAN2D8KJ$})8zn{Vd5{IvFY9gZK1$txv)1W*Smb>tr8TTp1%suc`1% zE2h)7{{Za0oQ>QZr&U_|etk_$#v8W5Gj+w1)r+-gU1jhZE?hTyr6pQjN@r(OC9)vv z&_LXZ_Si_qQM1t=cZk*Q=XA9;@1{As_A?g&j7&3QexmiWPN5|W3eu+x5S?tT3R;49 z)u{JKDz_A+7&zMMT%OPTSmjiwO1+cabNt!gncZ^+V$9ffEoqxBac?rg2}&^wkl0bU zXlV7RwN9r309oVANK_P*gx5cUN?2uRDEuPCSue27&=;S}Yq^K^gYD)w9H)XSL@GGUmm zEt+x;V7al!t(bW?4%rol9b*r2N_049%v){wQ6qH7DkUXD9nF?mEB(B`J|(qazmzm) z%yeI=N28hY*Dl>=XD%|r+i6xRe3ww$P7qaUT_aMhmI_>9N+&{r6EF^NSeJ*Wqkj+P zo}AZ0j7=Q1P(<2 zbfQ4moxP|Da8Ij$r2r)+L=ZnZ03-lFAW!w7D9XaBFk`&cGEU{GSQg;>j%b?ZWQ{}% zsE}ul#Lx=3cAnqTfJH>ijt{rJ0J6yvU=h#L{N8|FO58_3T8dAZW@NM~Nst0${-^b* z!uykQu~4}H=}l(RCy|PkXnPvu)MPR0(-!yzdUm3#cw z;4Udi;sg)nRy){)q_zkUW6~$;IH^T4q4s>HwYzds(+Tysr4Ry8H&puw2CRy^2gdh^ ztkaA2r^N1b%=g#9>j8{B>n{;0Tr2Sa0oXu++zq%B^S&k88hCm&D>r+${tu|&>Exjb zYoXcj%gbys%I4DnYTrX_A2Q>z!6#OcJCiDvPRR#;YvJ<2M-x8wPs0~;UlWLNb~?4j zHtTrBh1kRJ>($w)w8fpue87N7CP?7QluEe-?mG_=#Bh+ENmQw08eF( z<{rG4iEel7JFTr|i;Gq;WCWIchT20n9$0lrLe@c2q5vszDl-RQnINe6Y)4@Gip@h* zmHhW|a(uPf>#oPAp;b-^^MCAam5vF4W7rLihAU3bk(SNg_<8U_n)RE+fM!(1es#^n8}DM7=My`CqBarHjvNv!~)WiqDxx z6U{c*n(gr{I~6Ox@)C_YUex3TZ4K&mXHNeByKAvgi@Q6)ty=Y!@2|DH-7hDvHEZ8e z`kX;%)`qTi8>u;?FIit@J3Ly#{@n}nw4t>tP+KnRZ&=^bak|vg;ee)tf}>^Aqh{Iuol|Hg{4bzwIRVe z3Wz5`1~8{4^+tv=X{MJWI&{OT7dd`xvtA(6D?(J#+q<}3wbNQt2|;iJG73N;B{LG0 z2~hR8CgV#}=lI_g?2Wq1nZpmgvADtRF{Uv*w`in1(=J`P=UM9qL3D z#iTTluz*xb)2#1QwY$Zsnp*z=PDaME9_nUT#&TG|uJTV(vZ-a;FcN<4duXL8_L9np z^)NyT3J&N7J*%0-aIPOahv$#&iWl z05%cD2>@A%+|TJiF0~ybNdEv@0S6*?-Y5kY0q2do&;irt4T+iFgrh4GDT&lzd(gQq zgmkB}WP8Y=TboaRfvE{jqXSW%E9B>JIza+889NXDYEaz8&_krlGTUUC0ITc%eJWtN+FF%Digu6IuY$2$+1f2_KmY;h`K`!C&nYx93-tI1GMmW}Rx9|q+md+2rC*NqDc7Z_|CbSNmxNjkO@ zuSktV5|~dOuW64ngvCp`l6oci?IVb%;!MW)P9ok6997_XVLFs`sx(N{N|m>$q#cKN zKNYK2hN`PYujb@?*9WQ9mz^4aF+MKtoMlZkrMwwrXlMcG2|@n=?FrxxNY%ejhE;|h z)VU+6hQzk6j?ahP{{Y#{NxK`ITG^CYyqlHVu$IG2q^ji$f=-gJTWcXue+Wz^Rnp?Q zWuvygi;X49Bag?CQ?stM~!|}cn zskKrn*6HWd&i?>YO0N^Ch@I*?ncpyVHs1owV&e%{-=4g-abcwdx>~%pcod~gCR7r( zReTAMbgXwf@bKI%Vk<)G`E2jD>h9X|e=dxotz3|9Jk4Rmv-U3D6`XJjo3@4Hnuq_JcU!%~bAkU+1X(CqYM6ci~~Ds&Xel!YlRx}cLFl`JJdnzswXqXm{*C%LswypvXA`O({# zaK_V!STOD-YEWEOlg7G zwTh$f4&J`D+Hf?KDammPLV`k6l+UIT6C+pQ1cD}av2e4@&SY`QdLkL`3C;LDi&*Yl zRm0ZGKXZbVDR5INXo7UOeJIc*h$&9#J8$8oDZw|n$i_Pv=cE{(1)Q?Qh7*pph~h2W zYEqTw7T;+BNCdXG05Fi0`b>|)6p0|7s;c2fGJ2j%5d`YlGq>F2oX?PPOfwM6nCM%o z4xBRmwo=Q5u&}2R+lkUp;s^?HlcSwr=`bzA;$1|fw<)qt%*t3dRS^1Jz&fpi;a6_7 zv|d{M?U0o{b^%K-CEyh8lE7NRE}ISnm1$a3l>u8cYdFg+{{V~S*X`OggJ21-M4kt7NKrJ zl6=G?pbvEAD(bvZSe*!9m6p@MJN?o+_rckNdZYIB24lR5`FIo#>Wv(>vf^!arh@j zc6&d@Y?EpLjr5PDb&|29TpBaH81Gp%%acG7l6RT`nIR@Wr2z?4iJA9`0EtjLbMgNG zS_6u3M&s2$1xtcR*w729Qc2{0Y8gu8C14Y@cJ`<_qcV+%kPMB#O2ntoxEhQO0qyZs zm~pt1gu;w^cN8)#wGaST2l>zou1|6a{N{j3bJ!|=cc2$hAn_uAq>Uj&9jXp4_eHUp z7)%}De>%%)YGb_UPz1*@`BaSN@-;QIq>bWx5nm`#XJ$^rXlW!#NCd$$YWPLiu2Q(d zQWLO9*zN}+uS(k(O5xgQm`O+{ald0(FkWp~^Ar+O@y}|UBX$nSZDvSFAOY>|TabaS zc3WFrFf@b3Zc~X=o~7xQbdVt_gZ!%K?qR#K;L){DE`?K@*&2!ChaVUpM;pUZ!X8!jAj;Iy5*$N5xC8$HS!~CBKU(qqx#DrdVPj4I z0PafO?S98!f#KZhN?++m&7V_oVQ+hhU%ZoTX5kJJz6Yj69p*?-k8(G!g?6@ASB~9J zpzwI9Q`d7JCGCngmPe4k*i9taI(e8RivMEq$L_ccpwQoR{sD9#N?^n%~>Vuf6b15Ck*APF)lZn zaeN}|TsH5X^U77q*RRx3uW(9WK+u$c1cNcWM{l@MyIW@l|N3+B;> zT_`Nw0wRHK9-B~7Tx&7fLUsj>(<+tw6N zp(Q3zV5})y9iGNDHy5GI;?r#yyCA0+uyo^g_D1FKqPG;5{VFpA06_o{0h6#h9f?wX zL$vf~o{lzg)iY>u%tHdrIYn3Yc8*(PmQJ$XA!Trtr(CR|rKgx>MS23sK3P}_I%JO4 z8j_4Iisg@#o=3AVgIJx`TgRNZx4yY+8fEp}wQ5rQ3K?>O6kc~UH z`RVzorH;*YV;Sch2L!^JV*+}3SgVUj_g|6FCk&HY`bn_!%{RGEzz75qZ-DQ~CTi8AogNca)S)t`QwAILgp`yH9Y&=~E0wLX*YnzRBHEqun46_uE%C04^_w7dGavia z??<|YhO>|2cw5PqEHM`@6pTg7X2A(V*7&8R^576u+O(g>4O0IAt4`~E{i*cz*w|9E zwLXY^B;%)~el;`31z-}cmGyGm;?^aEk0RF`WgcKsRG&#p?3Mbql8<_`gW{boAivXB zPj2h#y`_9d4ys_?_k+K)XY}_T+M&J(f$j5ONIZD^O{4)E4|Bx|WbQnK@FVl~p%O|l zzTh5c0I6G$Qm=X$O5}XP2NGytrm{%XM0=VN$ij&R2?m5gC=vngM{a0kI}{D5jrRW4 zT+uZ898z|jlkHK+$tc-@SP9fS{{TwERwN|g0GWVEtN~@D@f%P9TXF}a0oYIrsi`nY z1LlBPO^^XHOho_%AaAkyP!dW`!attou+{f6n!U*e(m*;2zIUuUi}@Oo(oCd6Vs|FK zVHj$5r90ghCeWx%&fe9t7`=!D2oQJQ>p=X+nr~5n6Z7<;hbtYQ54O~(36N`Mj7pre zvD_`~hx|H|1A6FFflQ>JtF@%e<`-1Nr-y0mF-w8h%g_rM?}Kc||tZNI1D zzDN4OLaeZqCrw>OrQf>C=Gp3dD>$jti{_7;4zXc37{(&oDJep%mQbWm<52@l?ejlK z`L7X&;Y$}noTa*76V~wDTBO_`GeGlm7={{~>nl>S@0;W#DC*@SZ=nQlzvmU`Fuj?C ztfsX800!l1x78SL5XJ4VacN|N%dfISs!AX!P6D4)7z#>2h#uf_cjUgyNB0+w%vPtC zDYG8Cja!AM3`?qV@mk6WI)H>A0waMdo%^KiUAb^5!apyOEF`>Iop$Qo#HP)p7p|`m zrKP}usHB2SAD!3RxEprhW9X`W7$eZqnn>;+w(hNR{qo#-t7cfYa^ch#bz%*Do}Amg?F04`|a z_>3yGDD<&u$$7sRVM5aJ`;wBB+pRA+B@SuO*+?)ZP?Ul_pzN)ob)r+PEUg?iqy&W|fK&o>okBnhNfqd6aHPqZI-0vv3t+o#(`rdnyu^88Qp>~M0s=w}{} zW}KOSm~(bl>l0YsXUgrqj4KD9#$h|)423o}Mtct6&T<1KUh+ua}eOFe> z3?lj4Z!na;!q44ZBuaG>sr4GRd^RegRHbamwa)!*@HOMa-Jg3Mx8-hzSZ~6ARC+zr z%r^nG#8}RmivTWMX1Cf&&n1YPyX8Xm@H3yMm3VQ zWS^&1>~}ntCX%O8RNp550Kwi{Ngz)Zv}%U{O!4zT3CROsCEYPOs& zsY`b|&KYJB6A1!qc9aU|vDzEAfB+xo`qt!QVxs2LK_Ey-_lmj^U}X%!;v?p)F(+c@ zZqa}MG5zacrMs8rSrM=?9qQ<8V0pW62|wVgp&nqzePGE-&UqUX!gUM+5CTS;Em3@V+IE#&`Ygh>gGcCAID|cXrU2@<&Lx^QCP#?opz)=yX86a(1m9X9W z@bo-gYR;bK!sI&(EO1}kB{z1f4G@(sN?*28iqg1=3IjsoBrjJW@1a8v1*2OKo+fhJ zV>01amiT?6c%{QvSe2?lV*cTPR$XX=sHr1$q(WXo#KB6V_>y$^x>BOmu3JKFJxkUg z@%@=@j0Ww1pD|A*I-$F?DGCm@f;JKfB?r=VCv?vI96ji(OZ*#myU^(Q52|>QZEbKI z!lLFSYj(Umxa7;hDj*dhB+TiBb|A^z16}S9Da~3&!>p1_Yu`Y6LYGFSe86O76@sQ0+Vi@6=_X`!96(<0*qYVq(> ziC8x8nQGUofp2i-#j>~p*BQG07Kk2O8%j~TUca8FcL`TCa=p>$H%$7Kjr1ozT#OqP z$1waWAY(S&OUrj{JW}Y%c$r0}FK(V-+ncLrw21(?_#@L?>UfnLwIJ=4-`MvzTyC6X zw0}d^oL9$pUvO)@xowzp{iWTVg|papF=&e>Qq~~6I~84?BBiJ&X4K9pg5p6#acKR* zWThz6y)G?@Z6_<)Ta}%irSpux>AL7s4jn^VD>v)SY4U5;Bi4TeonB|EnL|9-qYk;k z?6B-$U0~>aI$Ty4bkbIk;Oy&H2)(y(ajl|8#8;{8&NbA8>9uUG>;9MZQo4R;2aRE} zN-|v5zN_x6Zs(Gr1Pv0X@S717&q_2z&Y&?L%a&=CkahjBm{AtSLPngg0fsF)wp zfUlPz^B*7DfRsj=A}9$E4orPA}6R|VD(xWSLGZ|O{)2IM= z?@*jkVpduTECII45gcx61cgdA-;e1)MQPFu$NEqLErSMj_lg23XdR@9{&WP?fhW?h z(ty;oCIV6E2EJ(Y*z6NZbwLQ8^%+l~E|!e!JtN=gS4CaeqE7DTZ^JCXDN&Gl_^rxt zFn5b9Z7@KMwynxgNTIuzmRU^hJN~t|81rRvm9`yizBegMRfTFz4z|wV!GrNuQ!|@O z7khR$0Vpx_tDvu`686g{Nc5let(ZppmAUjCLqb${AZ^83LA#AMmi)wrM7Br@k8b|} z%4oEAF`C+=IkvGlApWq4Iw(r1}HN9p4hzqRqu@NU=4Q_UTWsGtH9JDv{5pK9W8E+a4DC1!dY zUlDdq^fV8wctfhgGWoc+bQ1IKI^dO~LLXCU{YaBB8jKCA=&5Fk)aPOG(syjutU|#y zEh+mZtxFH6H|t)-Y9T2f@{VBb@4(w;EzMa9*ruJ%*Ojf&WtNt;n}?9&W#+vFURpgw zog;urQvU!U^c<&I)02b0=jF}X6?{2_Si|jCg?;+o`(#cd@vt_$(5^ z%X>Tgyi&k-?pl`m7pM2c5+rZgAqG!z9qV>@RZ zKHjvqq>W0_>W8Oo!~+AkQ0JIZ!b&quGt1*?-8W-2WUJS%vNm7LHtKHD$XX#zp|^ob z86p;y0tcWSFy{I|NLYwa+sE*T_fDlf*~!Dr-6NWN`y zRN7J|6{#B_nM%@92`U8nTuug1+}v`pjIezE_APAVwjXuHFx3+6zm`D{HOv9HONsTT zSFlKe3654Kj%SgV!ls#vu42l}<{PwQ*k#(7yVND*q?9e@hy%?*{w*P}p~(VB?sqT? zx^|9>a;Z(rb3XJ_sOi4Cw8AkxSZA2OE!_~OEmRf(3M26-5|Ikb6YXXvJ6%e_TSsxpICOTzClbho95fxII6*KbI#9_ct$bZzPAcU45^$icjHIV3Rx%INRjbhL&(n}aI&Rs!T6>|0I7o;fS;{TBCrHl;z)o$N{pe< z6$vEwC$#Kp0E8xZ^!tCGN&=xO`cM^6gr<3*H~>gVRLud!0tr^pKm^BmBeejdi3e#X z;)LUVLheBWx%m}CjfuuK7I89nB!WHbIaweD5JA)@x6Lx?a8XDwcc_42vL-=4+JG`x zB!LIsh8&V9<;LEl^E5JP@g(V;$9nm!mYN+zt$PkIkb5AXy6E1bC(cANBss0jX*vlV1kHW8;|r2hbF zkRnF#2rA?2KzNJxu5_x~2))XER>H8>(8e;0n;c1KARoW7&bHUT@i^L2K2*&2HS-?c zaFrD-WhJbZzHKcZt6F;xTiNQl*L3-9=+6jqnPJSn{u_uotBY~!Do`dC7aR`$q;DOj zzdT?ulEl`vFXXrSp31!}u(R2kBbn{na|yk}oGv&TZ9!{!MMW-oWl%@T)QCKOBh7uE zhv28(`1d&NGK_ET$VqUv6P z3WC&0Nm)qTzyUjZ^EE6F5{=Xr-hR@&P957aGs3XTXB#fqq!vPw)}G}jZH#lNaz2%_ z`?RHVehsyl>Gs%iQ;A;Nyi(1e3vFkW6#1T&{E!^q{vZcNKz`JyJGFV~Es|UG#4_In_4YuMX8V<0q zK?7H(REXUnUAC?2JVhmz=7gyvW*p;*VXfho)~p~}Fxx3CD&1jES{A}mg{M*7Nhxdt zKBz!Yl$9FsScp`HQ=5pU;`1X7S+#kQ^S!<}+csk>Vz&htr3rYa@Y_0N z0*^4vX{|~eKQtC?*3a%!-`V*a9hUP32aEmX_L3EFl7?|BU=yuHBk=usqPpRq68zK>PqHGdv{eQS|Dr zyfChnMJKV9TQqEXKWCB?joaAf95tZcFKmbG-)uOd)+J=(LS43bQ|bf|v@H6Nw535( zG@nmOyLOW_@};5IvVJ9o*yem+r5T?LeRxgNg`0NXLRJ9hKp4NbK}ilVsbHZJl0G=$_T8@-1g)0dMW@dm-b=!NpNQU0pC;!P z>E0EVx~*Y$_9E@8y*AG32iCjGbSo%c+$fL&N|?UMc!4Gqe(Y2`8s;jg)$wgFuZ8=s zA91?rT0Wnjb13+3>xMOu`kmq1s#vwE(~MeT_*N^AzJ&m7aRQez_F%Sr2r{eewQqF> zVB$(UmC5X3u`Z0^QL8l6pSsfbQnm*M$w%6-rNqK?-kf3BKCvbj* z5Nqs*fqHI5X@Dgl@FIYTZ($Ih^`I465(wLqA4&i!K<~Eo9Ug#(m&^^ntybdDI1v-T zH4t#5&aEVx0tfUQ{U{2OI3^GDpch`9w;Ov<3#;3|wG6dkVvtYZC);Wh?*Zum07tz- z&h{k@h-gpZkItq@riQMyh)75 zgyHS94j%o|SzfdgEx{vil%)VG-Veoof8iaSrB4{~5ZV5Hy-%Xz@k=U4pPhBXpUXI& zWW!s#KYD3xT#|zXAb>#nk~jC5HS`(JA~oX$G^}d2WW?FJ!cyJ4gs@m`q-u?ZfRLYW z;xRtmtCLp}<@`>?IvHYOOfeXMw?-FaguaGSmlD|;fo&xNWBey@KOwM+rk!bI@f<4N z8QLa8w!E@)4ZX8!IK&Y_)pDq{1I+Z)tfkW=cD9gi(E*s%HMEL zoXK#dM@VUCNdWD>#v~7MH7b>}#{ABWY&(k3>KC_yr5JV2!1Rtc1 z(kR46s*i_biBfL%Em?;bZpqs_2T5)3EGa4k5aW7?ByTb&LHG2ZYTV%!q}8O(ZX*$w zLmNGeTjBW18nbfgZIyY-ZH1J$%BN)m*n>On0fXAQwJT+9rfRzRm66$VP1XB2TQ{h= zXg-Cw@PuhV0H~DcR>3DqQc(Z^K#1D2gM{Tg@AFer5ha$)%efx;yw#IgbvDX-TW$q1 zsVi}!l3^f;^An(YNF_;-2~hQWEHrNd`iz7M;J7XsWrju@RXoeMHhqFXx| z21J9fuR&7|ro2Y1Ws_sl9*pK&EI!>9*XmnWDR3#KTo4^>oytJio+J(7K#)Y&PYHy} z3o^V+^F0ik*Itp@=4NpIpk=H$D^i<$Eu?<&r&~UdBfppt!5*XoAc-8lDTcbw6mM|q z{{Yy{M)5__{2Q;`7Q;G!n{dn@_FdDq_?}no5GS@X=+e}w&V#{;VWqq3Ld=- zKM;3n@P82<3CXOnj$sbrU1;e}DQAda*sqqqy<)j~%`Ud4 z2@3MAZQDia7SRNf+Mi&lO|$|v@l=zXrET>T>P^#0?;nxv?nlD0j}kpP%sp(w+rw}R zT=9Zhwr9yT92jSlld~*5e<# z)Mt58P=$Z)udr}jLaBv=T$R1re39WejU1Kb9#_BA*b7TsFJ3PBQg0)kJGBsPBxDkw)*95f;bSH%do zazz^ubdT*rF?~Ul0!cG8Gbp>7RLUjw5$Wicoo;gr0C^1k)+*G>nqoH@ zB}O;QpXcYq-JGRsNXK}qw-|mcV{q~xU4nwxd?d^X2U0|GAgB)ejpn67uB}Mnzq`7XA3^R9{7pK69Be~-%$n%oEh$>sT=TW%bFX6d3%R?&@tZ_C z-L<+>UPzM-D5ws7iG(jEVxv&s5(b- zBk_on5w@pF<{yY#DXWw=i3dVfwQ5JmDcEoR?O^bl zz7%Df$(eGo+d7?>?sIjDF7Q-XSw0Y6TM9%p9@5&BPzdybw542nbr2((-m6McjnQ2> zrq1DIVTfWKKf|5E@dxcOmRXiW`jp^Igu0ay2^)wzEi?ECD;tz9QE2AsoeIK;P zGB$R}HYg1_deZT1AI)g3t7po=b(FO+0mO+wfI`VkY8A|UF(}FlTk5{3=~2f#%5wB( znRP=cxth8Gb#YKUN3C(``+O%EAGw&xKk^)eT$-zz}ArA+z@z|mRAP+ zQ2Bki9eKvmQ(H*YU$c9}?o;rOh1&%QCH1n^!Di2yUa?nhMhI`e~*#tWuHWGYo4NI zeyL>^>~Z|Kwr{YsuMTy$nHH+Pw#;}_;c1jD4dNE}_GwVtMd%z8K3f5Ys~hE4*H2^5 zAN!M!wH>@wSjndJ>+*jMPguUrC0%81o%IK#S9Z9Cn^|iz-&$BE=kY4f%sxQ}zy29k zMtG8Ay?s-$^HHZd3uwvP)BUoZD~(f5ai3IrGchOBJ4JgFgE-mTrkOg5L`k6+J#-{d zrT_^vEu62fBB8K0RDt#8g>selCZ>cQWBbs{o%RB@cQMb!3nM1hqK>CeB$M$&B1=TT z(to{GD04trZ37}|02)xOhW`MN;()1a7%2en`cM=tav}hr7gJFJAkYv?A|QfK^q?!{ zz<`+ir~-gsY<`??^`Ia@AObe+Kt>h+02F>y2A5i-f(av3aBJp}S{;M5S+%Ii9~H9* zkaOsFd^w2HNUhpX=ulzrCk`<7j_#EHquF}V2ZQo zU72rg2p#IwuIxZ%DKot9v>LWCuo^%KAtV?wckNRHsT+W5QCE6`Dw8lZOHoC*XQmn*?HozP{h*PpV0PH@p)k3EzJ(k;j1jcm4n4>3-zr!rUG3GN>qgp%tx^* z-+KBF3gL63PsGlAYhGHK=hQ0}4s&l4NH(|n3&mlmONcAbM#cneqxI*Co-rjHhgOiJC{od&EKGqQ%#Bg)5PijVDB+S` z1BX_dMf=Hpyei>V?kidXz>-MWumqj=A;Ogd_<>M7f;usv;IGW()RHUF@q24$e`9mz zrM8|@fjqJXgr#zJAB-oMfRcU8u=tlbS&)=81D!D@S?Ug5a9;~dNz^)9gp@6MLbXS- zeRC5bZKO=@pHYU&m5m1+&gRKwq<#9xsk5zwvbB_?kffu^Bg{b)tO-dVOr=x5eXK?o zGQ3E_c4hMa0046P1irPyowjY|wBoca1PK~RAV@ncAiyAvdnqOgbX6A>;WkvSW3TvB z%#Y()uR37&?p+k#+&b&pke3#R6oQZzf&yIB60K!Gd74yJ>h;#H9JFRCVB!~fBQjxc<&42+Y_%cP;rFRh6cVHmpr%&hR+2vi1#m0R){Qw;WIr zgsjdbI|4h7)H4Q_`;#CP$gIEB(n1kv3r~#H^w0~*>K3aE5cKoOc zl$82R&;nCnk+6ybo>Xn9r{X9LF0o*&gCZina~K_YI~~@_-b!Fe`c~W`$`0XV&;dIc z?OXJfwJNE;hiSKMT0tVVAi0n)3^WWVkJr6j4a_-Myflq5z}uR*pt`i)!*lWTR>NR2 z+d>HgZHG_4q(u{wS)q1bGy7rsg zI=^>SUuUEC{h9I)h~=Cytdof=C$!=iXt{M`Xt^F*9UgFOr`aWLCOc{PubE-!#to=! z^*wBz-nui(uDiOYSiZ$9ZI^J?9{U*UEg+BHS~w83kO}m`-`j3#+(lHC`69(lGo@xM zIlO872Kg+Y**v8vesG(jnM+#B1E?6p4ZpvRlb!kY} zgaUOaD@an3d+dKD0C(WS_A52f2UDqqy1I+DGma6D+7!A0I#iJX1R!r4fwCm0@~)LF zH{7Sp*v^G#7O_PB&AM*s*MalfU^o!zNKs3HmpD<9Q~)9t36ZaU{sLNMX01rw+}kIZ zZB|cR7ubK#O1z7L-i0_|0;MKBE=ZCH5J}@;GmNEui(xKCyJA9ZRX!k-_0?7>M;K_Ec8}r7V)VHz~Z@GjW+V8K)7;*xng;Y@Ei;(jTzAQvG*Y z5~MiOvmtBHq3R%#5I0l;y75$$(&}wcn~l$*PX^svV>V?RD-TvUE7&%cy=q?dww`Cp&B=;117rE#-tITGgpaaFrUr z&2#G)v+}dt+oL$I6+INb5B!=Tlrh}DpV{diX~FROv}6oQ!KW;+u!wref~&WaguIT= zVc3PXXTrccmWB+T19dvIUv{A_Y`y!vJh!)7$H=l3V=20;pXc~DnX9O)7#_lFO072_Gj0>PqU64 z)EtkK^WF~X9>VdgF548hQcQw}TS~y5{{V_B>X>@UtvJ+N)z1xHlcwA{I~~ZC6SNNB zO1!EH?5aVXCJCWv!caCON%lRcQ!T<210=`Ej%Y+0kTyO3v;fr5lQ2iYp_J@H$RvRh zKa~qy(O)7&Zb#CFZ`283L=X>ssAO4fdXCAcIH#a08c#5N>6e!vMNJLJ1zSBhy&{0!oBwDI@t*qZ@#!NfRm_+*QEgG7JTLO#vWa z84@OdC_5Pd{b&v$#UW{N2TIbQpnZ*DOI-o-kM@Ohg+3j;R5fLXTD4)*%;n}@;VaN| zTDtS9T3e7H1YDO|j=|)6*UtE^2~e)m{x4PJpV{f|J#GsVo|>OEzpwjhc!$-jk9U_d z-PO&`9-)(;xOEMMCsKTuN%PqQ-3kR^JIYj{Tnf~rp1U22bz-A*Vs5DARprdGaJ&hI zQo9v5b!?TSk_r}u1f-vU6qE5C>dG>4PA0Wwb+N7%tu=|}3@*SM-EGODL@Jc zSV3EAQcmJ@sCVG|){Z@0FU?HQe9X6(R=h6q+YZIqN-&$11zOyyH8mj091+Li(s!M3 zeTOc-5;96wI#_&Di?i71{adtqD`t!@5oCau9P&KF&hzOb*o98qd+}QsO0J!u$j&kf zQqFZ;rF`ZkXNtB*-R|0TyHx_ML>(&KTgK%xJZ`AsyBIoIuFeiIHL--crsLQqZMVhk z9a8X{mkfHnC>n|YRB$0fXr1TgySPYrj!jmyvDi9$ire0(;a8U{WmeaLO}vj+lnDcI z{75oFk|$+I+$Mb;9J?Ad6K>_xGGLat-Wc=O46>J-Nqv_Bb?I;`k-q6Yk8oh_4MwFo zYRN&{LmG6q6n_Z0%yut2<@0SQI*kd^vOq~n(68ZALc&OcsR|MfgEPa{GUH=&GA~pH zA(Y*@!|_C7*-7Wxb><5E>zikbsah7=rb0-cN{J9wnL1Jq?55M$+X-V&Sz&(gIe6?XrL+COyW{6I|?1iCEsFWvGU8x6Bzc0m8A2CB`1UKu?&U z+^IJW%1V#{0U^6AS|e8;%Z7UzcX(laLw|O; zZWPt?N;*g-CKBef32tR2N(2Hl*J?FXmAagX(v&%m{iVJVUhzfYn>OINUlAA%Z@08f z#ubXPRNAl%THVCIhhiR|P)ip~+PJv>2G#CGXBg5-vsY2y(`WGC(6a|S-j_GGuOsCP zE#uEzXTGRl@7iH~I=0S@TJ>VX%NmiDFEM+|d@~+tDhE!SCpO_&P0<5nur5q#T%Acr zG&kAXp89L=%Vp9psnv^3zklD{{$}fR!yIn|zQJ#tuwGliv%Xrq%hwjEkmehWStWN? z_N5#LTT^(Qk^cai+vHawtXh(5W}5Woy{gR{F_qt*=hIKwlbzYm9uGQd<2OjVP8jXp z5|sq$9$RlIO@KQM4j@;v?A=`WRpi?E-|Bh(FG)tbYo4c|Xh?}LYrJy;X(`yrpbnIX z0KpV1o{vEKcMua4D>>!I8$|)nDExMVKv7MT$N+sP4i@0<0osOdt;N=jj>PZH3n8$w z+7f0Xy$O)ee-s)51twxPkJ5k&o7-s#s5Nu|=$H}!p4Ct&rq6LS1w&*Ju=;z@1PFl= zq48O>2BBCmAsZR_?Oq<74ve0sY_e-kk=y%L+#}4Ya^!X^R6!CTkDAzmD?25+0geFX zermv8nQw4Nj^eLM#<_D#e&^{|!(cp=?Et{|sR4qXAt&4ES?8z>lmbbQPrXVlUqDG| zRHPBLS_r|I)P7`fKu83T1cO*=;O9I*Rb&tV07pMlKzX_Lr+9HsUi>cREO!IKQHga6 zC2P#MQWA-B^3|{n<)TR5R?)eT#AK#$5p3Lpj zn@-HrYs<0JZk@S{-m)-^%-Ws9aZ6MzYQyiE8dljo;L4JJi3&*u39T5y@V_53WI7XAb>#FN>#TcA9}7(lw+YN$u(v*p7Ns|M9KG7*EcSvUV;my&yP%B}v%KK?zrL70{)Ma@f|CbiX4pUE&Pi-Pl{( ztt?_?l?MWrONodnDp1)%Bh(3)3Ybx@tf!%*m8Em6Wz4B#))o4wJm-A%}?QSJH^pc;G?w3= zbFqW0G^JMIxkzD3_Vpi zr_FqxkKFYb%CBY|UoGdHGdWG?Rx-XO&S$$V$YT@CSS|CGSj(*fHcipAYWRU8{{YS= zJyoGyC7MqE0P0?`-=_9Y{{WG(7qaGif%YGD2EqKh;=`cLT_x4z{P?TPITB3aSImcP zlnLz#Qg-7_cDpfFDy3=(-pw`W{{Y#0%^o#P_$K_(?+OC827je`^M=z5lnDUoni!L| zyn+F%h@L(uP}pD}1I1pFpE-r z%UJzavFqs1PeQY;1vk3U{XOT4S=S0Wl@OG(c-ocaI)S+L z0<;P0N6t zzC*q#0d=i9uM-ZgAdafjTrW6rfy5Ryj3jnYqWLH;fADlOXlYQD1@3|fo)icJ+E2XJlxZlfNvfH)F;+L0TE=mc zi6MiC+rMsKLb&qoQy|9M6(qszPW`5(PF8vnicMU^If2J*F$O0O7ed1Am0f8hk!<87 zy4=9@z$9=)?db-)5SmfEjS7+RSro?ddT%nGJBQf1n|EGnB~Iz`oFJqH8QCh+#O)~} zwlL61H`La|$C-)p?VcrTi)A&g{9C_dU$%J#Bov_ujWTvmJA!+N_dPmNZ^E}Z?i%?S zXC&f}W0ts;<}ddpo6F?4Ztb$lkn$UpB!kJ;5J@}*Dl270X>&2>V-t&kgmpI&v$Zy% zr)}K}ONt>E2wDOH3P<8W*0Q1BXx<3d&25R7q@m2KhItpuyRyPo^Xp}$%B`DA(dQvX z6$nrXnM#0CQndpGLi#l&8F-4P6!bQ)!mcTbUtu|G0>yFsM-gt>=aFn~3cUXSx|UFa zl8IRf)PkiaT7pzp$dgvpw&W?w%^rwgxdzKTWps>5#aZF1c@KwBojy~^Y$&NSxk}Or zgUoi~x!9;FXwHTnrJiJFEAD|}IY*~hmR-c{F+2Q|i!t{r=fh`BtAB^@gW(0n6&$HL zf!YYrjtN38GTK@l7mCc*SE~H6>_^!3n(>UKk?;ChjozT#;g<*uA#Z95v{$JLOzsL+ zOKL*4l*k~;Y8{)SIu)u?mfQY(zZniUYLw$NbRV|&!|q6LI`mV;R%gFWr%x_Y{{T+2 z({4V5vi4lYt&SIbsZj&WX-;8xjqmDJqJZ3kUCu1!2I{S(lit>G(*FScU%=;Z%8+r% zK37l8OZ@7258;D4;@LY9$oNlD6>W^;mhL}>W>vU#PCbL&vzD_4r5)nt%{f~W9_0=$ zqgEf-m?+IolfSY|`gZ?uOo^upm@LNiuCxWS$i{`&g} zxJsH{{OY!KQSd5`2`5J#8g~9k$J(`D1X;wRo`ZcX{-0CowGEv>&hk6g(2pxASY#$l zMF=@1Cdnlr2?KH3gk<#$l!yWhd(gvt!vH`AeLqS9L0U|14FOL|z0dm40#Hx`leCHe zD@foSNBYnL^7Rk^BmF2!F2I%mP)O&B7XjN~HzI%;f=C;IKm|$EW4CVg89M+ZLH_j# zwe18zonD%NIVkRu4FRmyOu+K66q>p%as-bX z2s>}@QUHp1_KpawH(+zPipV*vBu1_O0HrO=08+LZM|y;#eNZ?AC;$ig{MK?HVBZm) z1j>FSI$Mo&>om5DdzA%UVHkvZ@T>5JCAY+Kk`z$1{{Rvcq3s&j%p7Y&Bj1zg`u)v} zHYL)P`E&F>F?>pND>Zc2sA%c-eZv)j8v58?pwYr|Ed z;?yUjPviFUJuNzx@hy!j>fQy2;H+8Z)@|G_VTfC|R;fXYyKU0uq23ZozNsGdRxMf% zT?=TneGO{Nm`mAa#`@aC7w}|jHYZ%E7ODY3GI$9h2R@PnY-3)F@il)!%IeEOewkxw zUA8J^PF(`4M_42!r_iLd>uwSJDgk5>xsgO00XS;qPpho_9< zclmkDE$C$y530e0rAk081rnM0C0y+yYRWg2^8!k-Mn9huY|>vBOr+Z(@%Wr;_h~L9 z@C=2(jf`PR4yI`q65%))xKk5Mc# z)s5|ywX_Sn2iyCynpBh{Qwk|cl6@y*1b6aBb6Od0tjiSI)X~PX)?)C5<143F!NoC) zj29fbLw|B{ANWM!idu`UYy{|9?i({J)DWm4a6Ildys))ZRjc}QN1?+(VkzW&{(O&G zzS*pkgmtUzqVZRTxJmn_2HrDf;=n#a+jXZBLbxJ8P*5;W=U(!)NOx+lD74qZ=y?c1 zsW{7;clIADJqznD2cPiEPN-wnZ`<7FECUPAn3|9_7`20c_G33RkT@>c+~N3%{{WuY zD|njHr%6UGoSowyy*7S#ah7jlI#a0R)Y8@K?b)8Tcwfvb!#s6(FzQ9}+g;WnelXkT zm9mp?ZQjE!l76nV()ZZw9^$lPCmcl@lif)t>wJvdsRb%kA>{t&*=iDC9tSaBL_Bn* zbzrG%b|e|0iAAMydMEu23h%(atH@Z4`UPu52%#EAkYy4My)&06juAEarB`m z#^98lL~1EeKgxtil}Ip2_n|d()y38zDqwvD3q_QjFbN+MQEe?)1h7`cA-56fLY1x~L6vv}=w4`d5>JHzv-{Xx5;T4Qx{@FN~r9+=J~_ z6}jqM*&C%P3IaRTuv!(DW`F=~#akV~`X_0CD&QR^LF{M(`bVco`Jg17_7k_wN*jVo zi2?v01GP!75FkV#$oZ=PGGIy5AFW}#01GB!C-Uu3l(f(i2}*<$y&!=pPOVc`0)c7Z ze3_s;^7}x16k#4Kx_xt+{f6NBF+K`FgL6|FY!0w9pDQ))kjM1!)cm&C9g z^lu;Lzdij8{1z!i1LYq&K1Fl+lOc5YJAmXR7;TYG>LJeUN0Ks!xqJU+HIU41oXp5Ndq z$X6FOM%7Is&?L6KML+mLPJpF%&`OX!k_T$#)}+(alF=RYw%N>ak|{57=)gsuERCAf!`Z^Z_IlGdg9&iwV4bdAR+xqG7~BC z)(U-`Pyq%9-xIkyA=clh~}u_?T^WpRe5LBKL)o*iKR9deda zi8T$}dVnekAt_d`bLrpYMAYcXqB3`OD4|O)c;8 zTe!9k<*G_<*}DuWHs9|dvQkjBCKeQjN|LR#7D$?$D@!W6)ARG}W^~=pqhAEQZpS*K zkr#K2*kUWTX@+qeDfKWLa^lHp4z`z&6rkGDuf4KUB&3j%OoDaj%JQ;WetvyPPM?PF zbF=-Soi)JoS3)q3r7)J-#Pa*CyXS`FTQ=&^FD*&znL^5@eFtM)-aUtm>35wzyv~;e z#7Z?BIWuk7o$zlH{{UrQTCiMY!HFXGr8wIcC_a}Hac^L?5~6n2m9&=vyUI^_J#H0M zVmN+Ru2u5?0AruUY4=!{CA0dU4BiZS*@R#g`D({HS}x}<*=Czudl5%5g5y?)2FES* z_7dY)AM&ZNNSfZN;|bmDwDns*dVd2_jYi{dJD)&a8}o8*Zu(!CGYzfH=5NY5!ws;y zb+wS9^H=Dxxp`mum1KvHxp8 zABy+w^E`f3w+<3E8i*7jCveGaP+=)FFmg8s)HahqQ7DnTiUK5VM&!^HBS?r%022V8 zOn0ClsRTyfPV_Ssn28g~8_>NZwg9jri1sxdKn40#N2nfZGkky-0FC$U%~lAM%=>)M z10X?MfH*V-G?1wzPk5jPu20s0z&5CWAb}O-W3jhA&c$h^MI|!>xvSu`AYKrel7Kzp zDrF6IE=)iikIt@+#s?6sreY094YsixX_>2|D}l5xl!@nhkO?q$`TBh*20IRBDp1@U zR+;VnKU%fEV4;w9B%bw&HOvswbxf=CsQ_z0PSdgZsB*gmK!SJQn#?u@6%uzF_X4B< zl|+rdr7g?@CvMdhuBKF5Jj?OR;3o$06JdzR$fDZKFhUq7=t7+hVFr#-s#K45yu(Jb@Rr*hTLP%?fam$0k#yJc%|^Ggl4 zc7RglvdJEm5D5V9A~&uUMy#f-bA86%!%p+wMRxlQxK*XQn3t9|1u~_i!d4aDK^utr z*0ku~%*Kbxre6r`k8IVa5TQ8+o-G3&?ZoijRY3wZM4*5Pb#x2lx?w+e`(=3 z&AOJ`Bq>@;mh%K^+7d{D3@SL_imG%j%OhK<*Yesi3?;jqvM%l(x3*e?H$=iN?Npsj z>j1za3uoC#Onx9LRO7rwmiI4KxRzkTSpA#kq#!9uZ(@}KsZoUpB0`D?;Ll^dZqtjk zid@^+>^(NRb1?qiyu_^8Y+Jg}au9WSx{{QldxVDESd$@9@mAA{dyyM+1jF0SS-@Gl zZr#Jrpdlzg)#jL7d)B6%N5oh5e6$=vFJr)_hVY`(G!V(P>0tp^t+FDJ_jP(JXJ zGwmmkL6dirw&;zzn=g(f$H+I;q9l8c(T?k-ni z*QvcH$MUV^x8B>hwic^OhRJaap-MV92T|B|)l;iOJr_-o!_>)9Owz81=O6o(@Mq%V zCgPW_+kVf*0F;RUjlPsDj1yO4?zXZM1c*DwdI-YWqAK1A3i%_w zLCtJf57HtaNcX8Ha0$e~5IL%+xCBQ#!0$B>4@iun^K3!`tw%9U^d#*18>MwzVZ@pIKt;r~(4t1NP83V4pKg^vn>OKQ+jb)rkxOF;0U18DqA8CB0)R-Yd zdZx7))QgPvW%O%G4>V7l&b$4SeNpM{wq(b$9%VV^=F#O{Slf$zE@L)2B~S+!ZEvYR z#*!47+*cB=4y+q%U#jQrT-Qxe)2CXTwA%jw56(w|op(k~%kMX5n67Y~D;T}=8(tXs z4ZCoprWB+%{{SgMiQ{m4il#DzmZDLT)tZR?z9)v_&9d7;{5?fMms=Y;kVzBlNF>K) zOm?blld(&Vg~HroNw{Ybuv(JReQqH;1p~1l5wd|3?Yf7xR_Wz;MAN!1p?4RI3rAL! z+Yq^InT=hCawZ`s{UdHOR@AKRnMVCsfE7P5IIPl2Ey)23tTn*AdgkKZ;@0*2 zMPZc!VM{_rg#va7S71bHC$ZY8O(`GZV86; zQ=(mNWo|0k*B#5at%&k8D0%NblqX{f^@jkN;49!)0*4QDD!9k{uk$+ij9hC_cjaT_ z4>@7jgE`*c6jh?wv2h05&5f7qkO`h;B0n#B^)%wLSo4Zjc4wnsX9rYQZQ`fHo?pgY zwG3TnTq6#Azp^}+@m5r%k6?AIK<=vMV=oBiTU~$tk5d0cn}iJ}U{_kQJkS_CECqYoH(yKs-eh*B}LH2iy+zSO=$L0zPO6l+5g6C77m(8E-h@lE<% zu2rD7W){bMbof8Y+R=@8iRpU_Ov+ls1Efd(iLSK{0gamQQj_*GT6{``U7V4I@qe|S zzy=9EcZeJk`VSVCk2^m|O?0VtGl|b?FW_p=j$xy`jCXzZ*!`c@Yg!T1oXL6<5~i?c z0R2f7yARX;DKGuC{{Y#2&3X2h3Z1TWJMXu*z!i(BWjOazZvo*<;KC2)Rz8(u{XOD$ z{wv;y{{ZG}H&RLadVB zd0T(tGUea^{FJ1M*L{TIkgC5_ZgoA`%ioXW(M1O&&20Xtcu_S-iPhX z_8zwR-nA-rKZ(ajMf)uNMYQ`tgMZ#;+_mD%!v=6uOyzG89T>)L9N7)En5~kt?dej1 zu5E4$jHmqQ$$J^M4kw3`v}V2LYQwRdA02Exf#Uc+G|IP1?5>-*3jENcT*|O-AAZrU%qT{&~;Vk+%)J_4^XUljiUY1fS`Ef%)zXTK8NP0Yx$w{Fpvu-jvD zut-a6qf0jS%{I|dDcLA(N+n7pnjRHK4O;J?n|pNM+`SIf3h;`{CViRZzpx7DDRg(I zm|hFj7P_09TS`-!uCLi$mzQ(=YqLoONNF*r%WxI&qGm;DONP{Tlp1fJ=a;dStW_(t z?u``c*V&)cu8-pvcy>8yb!mE+mTj(5*;TG2$OCrQ!B~ztnD-UUuNst{ri+v#C3rJc z^N)l+t6(^jY+_j}8MecnZ3}J0Tc|MWYd(h?dNh!tc?#rqiiFx)%TMw`I9A@8oh2Ox z&A3g~!j2n>B(?%>H6WEEcp+!t&pgPA#A;h(D-PA}cH3Noi`yi*mBX$dY8 z0Wu0?`|sS=#B1L}Iafoo*nON`73v0R&Umh4@i&o6IDQ^(F`OeWu)JRqOA1LSagQi@ zN?ZXFuwZG@2?np7JT&JhN}_T~?JZrrxqM6MQmK(tg*iCh*Lo}Zn}3x0W0(2`g5?Zr zC-{NO8P6mO5Z$A=t*WdqE^Wa}rNeD1Pzp<6rAhSa1wi`L2E9sgqYU)wryo5ZUANZW z{{T~7Q&nY_RN8)8{7*K1)9$mepRxX*bk{Ha>e~;q!k3C-=vu#cx7~S7w1)uX1Tq{4 zl?9;ecCDCB6&h)5bLSaG@VoR{-n}jl^xGFbYRO09{s#)T zYAB~@>Ur(dkM@x>l8g5JH^?|`f=q-yyM1xAdb}s;mLLBBa&7u)^SQ{y_Nr0- zE9z(`P=4NT8Tq{3CdWUwm2JnQrrAhAM&8K;jl0k1UW%7tcx5EBM*gk8vN)3MCMKh5 zEYb}1_L+E@&W^2lqpjJ>_s!Hw1=G3*{{ZbC)!D=FjvGaIVI4e?rB@$|n$@#fG0zm4 z;~G=v6V7>>^};<`?qHH;C;1B7c-%ENXsdb1tKV5RHnL0P7p={OS+1N$H|CWx?IAaA@jP=~HCfwg3P@NP!B0F~;yG>00$T z{%c&EI5JA_aIN(p5K~V70OC>-06MhB=ZHV&)}*^Zrh)KhyJG{X*o#)^N^cT^R|XXd zN~gcKyw%kEL#4_p@KqaMT zmWahKL*SDk#VF6_efxZ$n!69jV`EY9w%Oe<-w;@%4x!Xv-5d0|_|&kpz#Bj#@dZgH zdvRAoj>WrNl(HRFe>mD4vnrhjZNj9!@~t85R%H&fcRZ6LJU__D5BKER>fV2$^lYnO=O94@!> zqaSs?RxPKF##4R?zj?nMGXDT*^TmE&w`IKBansJEl$awI!4k@d{{Z^drLuoXt_C-- zTq2}z3&Ht4J~uk}UeaTew4ME*V|%h+`%oPev%;FW>o-F&>o-DF+U_#OBo^Dxr81_P zMbbp7N=M^>R3wV|?`S(EgTgQA*KPE1_+5RES;IKaCcD;`$LM(B<3s(We;J-4u_v+@ zhgL^}zL{vedCTmncDwA&irad8w(c<}l7!hUI!P2tvBJe%TIg0 zzXWKqI@3H(^_VVK8{6iPSo?XMYw@TEtIP>mnTh1*+l_-@J zDDGsaQRLtw4uOKqgvJBrfBZ9_1E)XgI7+lK{7p1Emz*!!!mw-~8OvCf7W(g&iEi5A*Vzm*>d5lbsnV6X2ug_}b=E$fY_93~ ztv#=8{NGbqsV!uWRk3WZfM!b?KncG$HuRrP!)mUl4WTdP6kM^@SN9(_UOv1x z-}qY3w}`g4!*23T;Wv0<($p<1l|e}(x^+aJ+jif>@k|`>wQam!sPtGoO0667=6Sz+ zm@KUDhpw?KCB=DqijaT`G!jmvtB?-cayRy_bTK;ki=|AmUG-lpV75imVR*%}jUh=T z8i8#<83Vj4{2bM@#lkM^f`zn39nZcqa!yW04Tqx9<>GfYDq#vuy2?mWM0&{^Y6&3r zm_GGnF)?dR&3d&QZFFv**~io?-1pRc(|w3HZHKdXkGizbZ_RPy+6#(G6ks+$(n^#! z3h!9p&~oA_PiaN+`E)&w7ESPqmeVYgHqFehmh<05a4cgVY^ChIXLQ0@LSR|2{llm+ z-7V>DNgxoIC^1`j>I!sobA{Kvwe&o-3`31)ZKQnp=j_*KowMIkb2AHd7C(M(3}q!n zNlPp=mQ$q3BoKn5@=SZy&kllxJ3d+*++6cbvt{sK3mr-6uZQlLb-yrS_J^n0@V|3~ zINJ4;6sg33>R@P6yrqyg;sTILKV z6tq#_Z@xBQd-{@(@@rQf;^|H;?w`q+qZ>{wbzd{}-NIJm*maYx0p=;Nr5~A^_t2Hq z*}8*hy#)8F}zS5(hl)72M-5$pddhkt`8gY zKtV_w{u%%a2{X3+jSC|t*cB#5tuy_oSVPN+P$Gbih~N5fYYkijQ)B}(`c^U^ET-U} zPc==sAW)zUBavB3;Gh%PBl9(Yela)QR>U27=Ik4{u?m$E(m>TSJMMV?pCZ2Jh~w_o ziq!d*ABEmtBc@b&aJG>rM{e*x>sr6Qme)4k5<3d2 z;pV|gUz7v5QT+Mizdpn0Dyd?d(X!a#)OHbMtx9k>^4e$7vUVVCBo4=adZu`r7=|5l z7;)B%t|Hm@QbdHEI&|&IqrTM%;$5{bjTj~kf(xlt#5&L$rCSZAXMKcr`T3|i=`9(T zhr7LqDcAL~8Yx0RJL;JLh(Aw{fmwKn^JG;geY78CtCtA5TZ&zfPQhfrF~1Q69sdAI zs#wId=31$1enM-xTWu+6Sf0oLp5kPEVDJ6v%6OLcE1flKuy*gd7Pm{TM8ZrM0L*ba z%I$_%3X!>##LqnYd)3k6Q+TQIy zv|~37A-4*gQ%i!;{{V}A-<*%-+AD56R!eqK917moG6qJzPA`b!*B8zr;|9YloW<{L zl;LT2ETKRIAcHbg;wCsqS=t3l6UW^;ZSygP;c=B~J&}uZUT?h2E#6|U+`iqtvmRZ$ zAt`MMKsvOeOavr|nBHfScJLJ;R~1vds`ovWH%|{07%f3%-`h#aSk8F7vF++t9xcQ@nYkcbkKRG1H8-joX{&UbnDyr!^}| zS$f^osr02sZA%M40PndmFz$VFa( zNlKD_Qg;=%7~67VNN->w2>x|oK4SjfO#c9gWgfD4aIk5nZ!ml73+qefmLvw&Q3?ax z5&WypajZM3G`zm2SBKPbT6H`(vvUx$#jy^X=4aA}5|eX=Un)QGcB(smY2ep{uS!vy zrz6lpd6m)A*Jzux-{kydEet`8LfJt+n})ZACS&s-2C8@dGTfVzZ$nl3!_li;!G+z} zIG-l%Bt2~C1Qa@*(s|-yp=TY6>Ow79oBsgzPFUmEk5?^li^f=Q3|)=Yqw03!$SA&B zWe5myTL)CHC?PTw=9QpsV!bzKCmCYtx2Eg!*O9p8DBvna`e^;u`W-*5elBqrI(LHf zzalqtgJag~#PIya`=Nil{qopC5M)UqDq58rDMSjZ6H@ZZv}wEh>~Ncw=a?Nmyu~qG zBNT5FeLs4)yLqA;1t4ir(nrj9_n&&`;Nzv)sC}{YpThKDN`^jrvN}RgctG)wH7BpDh6i z2@$n@7TUIXmcs{Qz?ld7&;dUK z>Ag`ntAIjaPc@6Kfc#3>Y2+-nQ{_(KKnRFC$J6?KYui=FMQV;DFs%o9rRqz;Y@tw+ zk4TVxpdNRvCB*p_e$h#fgxtJ@sV^)lJtQUv$Mfw}Pl#KYm4-=$RkF%kZ3(yzB{H&7 zcRvyIJCAM-^{*}>xia|S)vl3STqfaaE~eGcj%27oNhEgs%zr)Vn%qy!wiF2`3UwvP z3?vZ{9nwhp`*X+Z?+I~BqY=Z{slyVKs5q-6pvYdQB7U*WQuvF#(GKU4!-gkP1FORl zvP6`-t9@P1f2BX-GGygS%Tbo>8ZD(sdcjBp5v9pIk>9xA^ZhCh9?U9y*iX1EX<8X} z-M~@+Az)9xADwO>ygXLMNUrmRWJB?K~(+R&-ok^m`-F@O)XA1LFH>n{u-u9)N3>F^ev+$2JO2r-R9tNX^1`J zYTlEybTylI)Y+U11;ZHd-}_e*e(Udc7t6Iaf#Mr+3VGKQqL1PlQV5>sv{#(PC*po* zp}?ix`Wo?pUA>HS>le${caGbp(&SOdK^m-iiQFn5= z)v){u?(Hsp-rco-yb20YKtiL_C!c;cJR07X85+rt{{Y$aT9Av?-+7WWr3EA`gzR+^ zJw1o%2CAMDp(MJU&j2>Ica~U%s#4X{2}bwQJSEsY5K4 z*XO+0okwjCbi5;dzu#yVge*G?%Qz=nF+R3s3wK&da^F$bam+5t)Fae}E>=bR8kBZZ zcREu51R0Ubsl(h8?p|Mt_dlaWQE!%v`hM_x{?%WDj;|um0P$bbyyM|pHtKD+ahyF) zW=yo|j>Tohn??1ibwq_Q@JK(5#Z{WD6LXEy=C%I-wd?L|u~qh?SU%fszVxTWA6EQ0 zXFh`UXQw@5$Sz^4alBJ3;|yXp`jolSwKSqySOCf#2~dr|iss^Gr&{sozNcRaE5}Z! zows<+mN936VR((hR@s4-ul8li`EX9{PbPMbeXGuKtOjoscjR{P)w1Z$m((mx#(w0d z!L!7el(6aA7e9pe_xjhJsVl8_FPgJ07r5ea%mJI*yJ>t9hjen2zjIki>gn8f2=~gf+xTaok@fUI49HnQq}vvSZk_bcpy(dQJ;d z`f`Q8jGwvd^K9i~W2$~%BVD|FeUUeqH;iu%elvVN*e?G71G4Hq1&U^jxYH#{w?DKQ zF5cULwvdvgIEamk#Cn0{@tzwfuJV$)y1MDA+sm&{L!-lSDsL>^n)@^9!qp_&+k$q` z2ge-Otf?!>vxFH*8$<#0+OP-|^BbB1hgPKVswfJD5Id3Dfahb+J8eKqZ^iLIE~zSl z5`Cx$eEL$V082X>fAb&}PUbz#@g3qS4x$!(&_Xa)gTYI2B^xLqNAutO6_+%GV_@i3UP^+NnL?oI z2bD>P;LrNh6;7PYO|jNjfT;*7Q;!%0X-uU=NrAs@!1{e=j_^rsY8aVcQkPEq2|bt8xl1eu*!2(H5KR3K&;4t1mNbKgwk-EJ zCNUSc4E6bSPK5JCLC_=?<)Eix@~HY5JR+2ef?#&D-oXmQJIGxn{!R`{jA-!4nc zIN}G~K=X@9lxlDo155%|1!~r5yM0%)L#sy0jh8b-@ z_v7gmu?HDO+|kQU#$|g1WHW1bhgvye$CMRv+J>~LLw#ux=>(8ZwDKke^y;SES(!sk z4t~XE?+Q4BuCSKU;Xw|le)NEd0t|(Ey|){4#@0=BHT&0==(Vo`d_2l5k#fz+jVVC% zs&;~r+i8v_tCbYdB|fFv$1h;kNOJQr-rPKVhb}@IKqYF`2}b_@qdN_Nj`dvVC95v) zUHm36xW#doQ*KTw&7{1r<)To%Ht0%raY+M8lNzH*B`|oJ(5X)D*3h;#T#b>)*uPRT zHa6|OyV%BE$LzG12)BCD;?X1pO51rU*oCEI%p59W4Mu!AGFe(PDvl2EHuCqxW_!f& zCNPZD<{f!|War))$MEnFZQeqN1;B&lx{`L39pu36TwW`{B`EkFpD$^*M~Pqg8~K(o zjIZK@Cg3Q>aa?OHhMBllC6D1&58kn8?YcDv!D%YgiR98FHecJN=|h0r+aQ`$Bg_R32($F zPP*BZF>E6i!*gC*!wxdL2GrYVy+T~IR<_LOQ5MZFPNTDcf&l<>IIbu9vCX`advn|A z$mUjs@8rxynj2_XE!K;Tt&$%@jkyI2LbU>Zu@n5N>P;xKntgHq0JCw{92X1DJyhvU zz16c#W*j!#9%ZC}0BRsNJAkR6Aa~n%ngB{qAP>{E3E8bk5Is@9d7(JUaRWh|vC8danKQMn!dcpbk(R8Hi{HqFzz%C6oDl1`M9uV4&(NBY%{D%z3J53zw> zU8sf{zqoP=LV^sZ>&K_(w%?sljoyPgJB5c&ahuesNWw1NQP?P_g&%#QIQ@9uveH|T zQg^xBaK4FVn}oRPEO8dzQAqT+5+(wNwL)$@*=o&gj@fJBvo+kO&PERWs2&!DvctZ|$B^l0+>ELc!Z<;(xrM?L%d)$znhYDLoK#?xhcsY-6{+Wi1RQs7F8{sI19TD~~7&qH0& zmCcsvt~mZ7)2pnvZ-QD{IcAlm#@X27DGLPw)g*;0QeN_siG-~wR^F=Ih9|(){LPF! zTaDS~qj@It3B;5Yb!N6-)TY^J5>gK1@oppa_pO-Xt*SLUyXs`dn#RquX;YUfwIHw7 zWGXuncZvM=5nGsiL|>^>jf=h%&KTYuiM4y?2f`sGNGemuRD_KqVhPkqKK}p!?mE|c z6*wzu6VJB$+j>IM=a80KaYsj%K#gRi>;~%Cf?^KaR>WlNgx`@Hm#r@B?CnkCh)`P8 zRUumhKn?-)kgd!UBWSIJuMJIn?K_=z*2c}dkG|O@SXJUu9BIZ~D0fs8%!8rAGvwKr_{GrN|wDK0#s%XJ3xzz-R?qC zzN6T6-R3BZ1-8o<4Tk)!)g>V((gHz(VL-tVxPUjQ_igB5w6!)br=B1>!H}}U-8jPV z{H2)inZ35Oy?w_Ta>bj47gQA5V7hfUlM)hqw4;QX;bXW|r%}r_k=Vg;7fu;cH(R24 zwAf&IHp{D+Zdb+=V)HIw_N&7_?|S2lDrvi=g`}I-l((FRnJR4#AbE}Os?)-7+7u(+ z)a9~!>(5oH<>jMj^gq6>IOUTt--$l5+;tDEc9}1x*ft3DwWX^U3|(b(>9)-GPBh5- z!yBD`V`ywGNISk35)-5;UVn+j%CCDk+s$*^TlLdZr-82??$LL@)97q2UAV)%O>_qk zxy$Y2nEqzS>39CprR*{P0KDLBEVfWCZkFi|X>q%Q<)M`MAH6GDwSb9no*(;+G-^{< zl{K!K+3K~vy#0qSzfq{vwwB(!{>GDaSmO-fH*0C&m8A*RltdB;+#dDkaOp{F(57*= z$E%-XXNb$Lj&Q7@|^54vAmF~0wn+d0o#fKfPO8bw#I;* zKrjb9`_K_kowod_4rnnNXbunwgTK~*ijpQ~C+2_|5Rym}KtV}~J0I^tN^bW8q0GS^ z(?ZnMx&!q3f0Yb_Go}I!02M%!xu2TCcK{T2+GpA;AXR6+>Kw_c=Y7CBhP?s!$YtLQ z+3y)sCF@1n0#4E@5g`T$Ei5(*>{c0L~f@^zRbR}brs8B`!Ukqq~_Hu2{xEJ z%WR*2F+d}J`;Kc~SgLo?-{Ng79a$^0)2@~O0JHbv!!;p?E;>79ml#pi=VRETEI^Lv zMzwxbFWkM9Tljq6&^#Pt{N%oWBhfyaf3ta?Jhm5|`kT`Hte75SRu)fQFaH3!MFaVM zwT||E685+9M5ElO+U>t3_cwDT{{XYe;p+)QE6yES&iLz~dXr;gZD@skB`PEJsI@oG z;rC`c+DmA*{Mob_YwSeuCxD@$f0ncEWl~40?|9>g3;LzF6_-Z)$@|yXo5ikBUlToL z-?GENMqfe^YvJQ8-l%R>eRA{UpL3~On#!Rbdi!78kMAeRCG1~QE#ck){1@TsZOd;A z{Up?s=>^O!D}nz2x@r4rT;uclvkx6tYy213?p9t6JSNzbs@KDZMq6@0pV$l^>Y6MS z9r7bG;uWp@i^%Y+;h1sZ_oHGYfBU^{$NvCrN?2M;`xm%={;&GkYrF7w;Xe>bP2Yz8 zid-ky<=b#Sl4{S{)BgZU635o(FOkskPqVYY<}#J0`@+{q6qR%3VS%61K@mlRq_j)o z{^oyuSDR1RzQ!2LpJV6Q>zE6!J;J?%z1a z-S)ApS=a0U@HLr>&R{r)Sh7Qkohi4*8oOx+{{TzvD3Ql{!=o$SGk*v4L-!Ql@}|9w zueqoB_x+kKpJENZtcBt#>8E${+Q)L^Wiok}Eu2hu-%VyJvUgT~#=b^#;pMt(-2TRi zbsPPg?-hA*7uOj!{{W=@QnnIXMV2saE)@~@Te?C7{CS;*^qoqVSC6@;^RWj)lU^F0 z^w-Mgn;l{MAbeVMsW&%S$HR|QFswNNQ*~>GD*E0%{L6CUM;yqkOAxJTCcRhGlfYE| z&ljUl<*Db^J>knA!q&D>i<=wv{{VKj7J|~yMDC=W!TS8wl$<5}bv0^IlwGE*%zJ;+ z+~r-H zSk)*1$)C${#8*O|H>phE&Y6hdYg6cD=9HC2{$P@%@IAY4`g_+xmL~Bjbgs(%Wv1mWn^JLyZ7m-~^GI}tA5(r&eYBCFhSW(( zP=lb!1nJ+CJj&N4XR&xjB>B5?;JC?i)*M@60u{EvcklB$ht_$lv@x?bE27zB2}UD^ zXH3~_yk7vfaW`LaYWaj*EhWN%clT#byQUJfsHqJCLQtg=U?l50(Q>qwi+!)|=Wcd7 zI9h2(ynFKe`5lj}UM(?fb3b9&b_$y;$o3TV%UFfo<7vCXZjw;r3%9klaUlz}N>+lB zu?i>?s1jAoY1gXdSFVfmmyx%L#o9Kd@9JZGN65r1aIAAB-@Sq6j5XC>S*p!Rw;|I9 z&=LYu!hIkBAVgPMz6LXnTU{KO;-vL%&A6Tu zhug5`aF+CR8qM{{VEk z`sfP(0Nq~4f>+OmihIYx{h#8;2Z>V2S=(o+yLxvpMEloV(I`>vKa~LuKZZYA0$OAU zpNayC?W%s11aydl1LlB`DFmcY5hQx0Qaz{ukT#f#0tF=SWk1q{;{1RwZ6Y?IILEG_ zXS#qCBtc38a6s+N00o#N_cQ{^Boby-&fWf$09WdjZ#7*&0#Cp4s=?-OOMDXeROthD z>B!v?$XIQ|U`5NeD0$@(8^B?$xC1XcHTwj=`w#VTqm>cuWNDnlfxu2Q>;GO{g00mALy^LF1lRQS_=CIS)$;2g71yv|5H3W~P zAd*shim(c-Oo0ZYodN*^a~f+YEMPv>%rMJ6laiV*qky%n} zV-`jp>Tic`gLR(S>u#-dW2PNKO2jGLQw3qi$H;snI+OAgD!I~xwURdJ<0`>fN!z2< zd!5fMy3h6u_$KO8cW>k+brYeus{Ke9Rq(_tP-cJqHQuEJ&t$rqs#s;RZ{Ys`&&0~b zLH__aEA?7^*TC`LuixzG_>0T!UEXw)H1y}K_)}qCRkKf=BN5RA$B}S+l_>ABUfy`9 zsLgJ!yuQ!w!ly~wB<%kH=3iBH@<)U{ckm(BK852>;eA=@PJ8K&A{4X*Vt8w|X>g7U zZm4-qjlYFT2V*paNPih<{lc~qqFJqV)oQ)y)jQmaneZi+oJZfSl9Eez2+~0Q`+U{i z-L*!OiaK$^tnF_aTekO@9w(#)EGN*RKGOr9-tkw{ttkHhB8lPev17)v_9%k4?$~D- z!{=x1Ae|?FQ23v(YTKoVX=$ShZ_>rf zW>YBHohIiE#BgVoeA^`s`rxgtT4n~I4+L+xn1jsk(WhNYMpdeJM^#0)-CXl71Gfkb z5L=j#Pq^go#CIQ_>#K4zR?y2Bo@mRqHW?1a?*+XqVVDiDx%1U(T)utw3yVqyooSNt zbdU)^Uh0wwaDIcB`aAzu&CQgd5Hi>@3GQ3 z?M6{nzQ;CGZZb;{*}8%O64vt)-#S)&d#)^3LQiG z03@kDgm3+)e%0u3IENjMRw|Na)8$^KSz_2qa=viEu>9$k7M<|>!lbU*dk-?&gnpVqT3)WDD5?*0C(gdxf091ZA+Mm;GP9HUV4eHZcwf*UO zFXWe4>;C{L?RZaU=~bGiSHV7AAE)^~*MHgJ>uJoLJ?e)_uP)N(-JiF{>_AvhS{+Gc z;+Qf1@syJv@rv<%oUXgc)4q)+y_!Fv*W)zwjQVNyK9$Nh9mi_*=LCX>{-Z!rKMvZ3 z0R<44-=24%AWz8sXb4YpKR*-)0S8eAOm9GNfw=_lKn-MtK_x;c04-2SG%Z%Om=zu% z2>78StN`t#_vVH{6YzLF=m4oer0E;`13(ExN#Kb6Ct%S1q&s}}x1phCzU=CTHxOn?ml zT_6HL&}a&2B%~2QNDaV&1b(yv*v1QlW?UlYFJ??12+Wyz^prn`;x?__+dK~;Ou7_; zKBAIYmRV|vQ>iFC&~|=Dl)fT9#GeA)dus0U0Ld7iK(kuPoK5aea3;}C9p%h5{Y()( znk^qSC()L zz#5 zrzF`{$nCZ__ZG=5yK=8FAf*ag%95ccgFX9ue>&N2%Q7OPbYwg$8Ep$>mk<;qaXXLx zf9o5H-os*KRaTSaW^4;J8&^QqhZg{94g=f}0OmFVc-X~yJT@%k)TPZC{{SR)68``U z#4Rsvu^X#P)|MV&_FPh2X+ctzB#p#&Ak3QdIBZJMw`OvxQM=gplkCC!M)FUF-oM~k z`>MD$A^SGzhw$uY5y4tYT)t`T2hMiYMJ=h+EwnCDP^EyNbtr>gKaX}74~nj_xLZQ9 zz4=CI^<1U@05>~$x}D4?XzEj?`+B#Oek<<0xgUJJG3uXA`qPpyUZixZIAEFYBjPC6 zX^B`kRNI7pBq<~XHtKj&O9@&?Ac0>z#NldDo2OM;OOe~7UKJyr4(RU=s3a$8C-SNb zL4r=>j%Wd>f!Oc-=m-)Z_dhfM)<>woBj>dNNr;pDs0z#o6BGm#pqP@N2YL%Pu(_Q_ z)xW`^Y#MtJB2JMVOX#-_#8&Jq44(IDY0M7iL^aATv#&qxTQIu{4RDFyd z)m--i@I8e@8q&(*4{ms`E_4gaJInwKRj}9(p)hqO0TnPA3gm$xP|I3b1wkA}^ed9n zT}y*xf<3;KoZ48ZxH4t{-|JU`?9h*5f2{!7J48<0P#in+w*LUF6Kh~Vu08&Cpa-g8 z?fTFh0Kk~|ngV{AASO@qsng!zr~xKHNFQ39Z_E`bKk~;lAQd4XPMtHp)B!@bQJts` zBoI;z9sdAY0LV`Vkw7vM01^}W&CgJ}jq0FHMRGNTN2%fnAV{7qm< z%iVeDMo;Q>q7YlRxxZkh{=??l`LdgaPqu}D?hQh8<1gf0M-x(#v|gPzUgz|)K6AWX z{{XXvhT?2unV-TJQ5m>gDQU`C#niO3-$k}6g!yas3Ak-Dw7bnmyORDo`u)pYT2pzN z%Kjg9@jhXELU<5B zb!&Pz zsExJ;BX7u$n(9%+BAG_W#5l%VU8AcGYf1;IQ~WW(9r)9+=00nsO1@-4M%tZS+m~5+ zhE}rMfRj4474JO%00}$u{*kjyHLOgxW4B(rwx)Zep{{OWz_cp>{jIt}BeCJbk% zShcnXpKWcD{x6j-?vlI90l|5dLVojWsK=a?f|&utHlqjQ{9}Muz|SpBV)koi_I^)a zUC&p6;xy~84*fZs$WlH(2TYiLSi;h-@Qpel{awd0M6TeIID~dH^J<9 zpcqKlY5;qR1EqB{r5!WtMozxz?^iMwR_azsxd+X|F`G18uoQS(VEUSHOotmOP&hczFxei;q$TdNm>-N9iXThrBur8HDOkSmis?*^fgZw$#*t)7YKUJ;}6Ge z*P-i2&`4hK9DH_-t70{*^))AkcDzhOkL1QNREwV>yJaapw$Ol52{H@_^pHsrx!NYW zG;vlnJEoSV1y3Fxq`v zsFFFKsT|4o5f#+$Q`ebXV)kX?&k|L$h%BipRLl*^mB1wapXWDWu^C;NRZ1E>g_GVWu;u2XYud1U&Td4-7GDh5*^kb4x!P*O=r=scAm;We5M zboPJe&1$zsx{ck_vs*Lo2kguHUpjN+D+tT|H|mA<6O#NuX+^VQ0~VQ;*x0g?bhC7* z{{VCdLxjsvPn4gSc89Xh4Ht-@^HS&5KE7Qg@3Y_0aPBLe_f>SC&(QXpP#S>>k;pr5 zUOeuRDm;y+=75P*6}a~%fT5ESzSIOr8dbVNz5VC`D8M9l??T8fU=<`E z!xSt4Rl28Y0A)fwJI}G^p)EB4%C;p?ewAC5^#Y0|K%NyG%^a8&S&hBF&FcUTv+O7h zX15#@9PM6C4Coi;0U%D~Z^c*#&^0So@M~ZUfxsJ321-Im8+(cozOLH_Lc|gxJ}5$a zmlhOG+zQHeAYBn7(vvlK843Ui{{Z5E9=Rq0fcT&Ym2D&HC<%iV%BBM(696lE0i>okJk+7M8Au$FC+4mMihU#vKLTh*Zpgv? zeel!J?-cnN-ktUDrulcM*=KWW_oNhE;cg%Pt(DpccG=Cp5)=X8g$=4MDXTq9)5T5> z@!9g^{eJuTpDBJi{=$B%^q2OgZ}9TQWAQ7}P}7%AVZKrmn)2gKks34rj72F51bJXQ zLRBptB-W(xuIba)=5#4mr&CDyUq`pG{U5!bHhJHo`G*R!v&3@s!g0(J)VpbM`=udR za>CZFFE*%KOG?VpQlNB!8htfPE2W`bOe-*D?paNv4Lf;cZY`e+LR5{yR%QhJM|d^8 zOA_}+jcP2{eDRSRK}I8P(P;}nSsQ6lC;1JA$AQGxa}SBq#+!>>}?8vbt5p?X|;}sbSEBGVpIx+Dcm`WD*b+3Qz!& z6Qn^phU_a@S(V0`XKTy(k!-ly9Y{iiq?m$Lq|ejq@m*{lGTJhnO$^4t<$S<}Ar7g; zk`xdQz{dtZKc#vcJ}UgqJxnu;xw5$fJ>&Uj6SKweEA`l73=p8>WCbg6%$Xx#W1a}& zYp+VXPUx<1Qqo7;2iZaP&Eb7u!|*PK_?^ub*7?zv+qJ_pMf1T6xdIZF@f9fg$Ke9= zw1Dst=N}2$9>}bZ6M$MRTsf|;Jy(|NmDb%ZAH>{dsMmMCiS_mK`JYJYDoWHAl{U1J z6qRg2ubkc|rLwR+e$aj@WZ;PGQVdA`bR?{uiV{`nKZ=D?jf#>al!%|?Xhu5dfUNH% zD68pFp2g9Pm4#eVq9#R0yd()q5xh*&a^wLiN-|1E=~aR%DI#J=6Ep(El6U5SS-}L7 zN9RBSPy9xJ*0vxdYClo)UQ!6?w{rJvN|KccnW>mAZKNh7l7CvdGPoHBPOd@rsS&hW zC#z`^PaIIL3hatizVScSgyz#u!}jU~jrRAUQFThh&ol&sqhL6xXJDFC2oooIXJBL~ z6S3Z<8Ea+^B}4*3r}C+1K$28cV4u)-sQ`&4J0E`Z2Pb)tPxYWB%#-S%CYj<_{pbUv z!5hG!4wxOIdbSSe4X0x!NcX9Mgh$~LlR~-NWeNg4#>P8PtYIqC2h*q8wgB1=oxj6C zG>^hbNk2aonC=Du1c}&H;HWTmBmF2sqtKF8lz=473W=qAr9wdmP$Q6i!Js_H@w4_^ z^e@HH#X5n2=;)V-#WyWy3?|JhZx&Dn8^&z5Me|33=cV+Bg_zQ6t41|6pTF~Sdk>Cr zStqLauTQhn%^w}!F#HAeN5fWlxypT6$+r0uGGdETY%eUA_l&ii1b`gFu809H$pqUt z5b6p1ByU`r)aOoCdik9Op(UCw>HccD9$@Cqm16i)&#KnK^_M~cyTg~WjTsWA`>Y5O zIS^}m4~*L8oopMnhNmwCk+GAmfTk=wCxAf$0Awnx$FZJfBY|^gg58b+=I4t*s8Ww53T>78I2N zBoIc#5J3bI2q2A&*UOaOBi1c-IjvfXWGMSqQ+G0wIHeuLfIhV{7R{q7R!H>7`U(g_ z&frSF3PB&OS5TO`j@rFZRe&X3wx4jVIljes0gptz-b03-b>QI&u|v)TveQttsxQzu|& z+AA(vf=EyjWQg~q9ndaJDgt9+Q#IqR!@gl8K_0?}T71f-dZh0nf-3IA2vPP-P{35V zr_&@F0Es_4M&^JxR+)tSR$Q1SldvGBc&w)G3hy9>*cx1oK~pp%C^o{S(WIFsg+#-g4&TgB z9%lWk-8#O{{t`NnaGOj%_Y)Tl&d!;^`IpR zyFyCS%%txZDX_k4( zxP}dh{oeM~lm?^$r(!}*>G(e#x358kuAEvls>R62vq_;+- ztn_ErPuUmtzcAYvFNfa|Slc$(14&K36^uTk{{ZC2vRBDAWXz6OGO#?oX-AXoFJ@zi zjImf%{a@~i{_i#C)pOl=uYK+v#9bDt&G;VAr;+tu)xutF4z%JNX-Nt~lAxrmBmxwb z2?Zn&K@tccff0Y3?mjyE>wnR74L}-27FgZY{g8$vRHpC0{=DGO4a(2qppV9MGJ#++iAt9K{HzCAh*w03dJf zXi^+NF}UB`iU6qXuqHd!V)X$IpGiy_yaq@j6(L_r0fcu34{mEIE9wS>L=E`->c}Jl zq-ui_CZ+}uH3{6GdI6MzAQK0*4RGjAkt76<(tt7t21NVCYypq}5#E3@RRn@edl~|2 zRPLizN__y5)=t|2{HUkY1M>ksqav&X(&3l{kUi)}A_^C)xuJ&+hAiuL0?ujaW^%!< zTSKiqX5vankU>cY`4d!PV|sE=p*m^CNuQG+j7)(408KnVbptA=Q$KRXZcy66Qc4OQ zB?Torf|8V|C=fvsH;VFjX*HqUs+S{jd@*>Tx#|`hchlal+}YxI;VD{iOXrf)SjPHw z;tP!}UH~KS=>R*hx=NM*0EtbGvx~I7+jtlBATXx_E3`S~9#wA2Pb!(A?>elbWqDOE+!Jo-UA9&bIbR9klzx zLD>3&q)!Lb;PEQgI2CEM?9C3$`AZSYm+Q5>vvGn6P(j=*Pbo9R9lu`H+=XkaGf~sl z=*Mr{I>qB_ZN(v?6un%O0(UX)Kc7A7=c;7j)y$!j^IP`oOKZDBiAe)}f`pJZKiB!z z#upIbxr(&ZjmF5ioUJRmZPd8mr_FCxkSD(!#BctU>+l%Mqlt*4W^8T;i``n>TI0AS z;_YwQT2|W7ttvv2WCA}S{d-q(on3PyMeS)L>u>CJ`&RJmQpYcNXY1Z!x3RZDO{QMO zuU-gF7PI(mU0H$mMG|zA{^3Xe04Sd?#CEH*imG_bI`MyUT>k*(UVT@!YkT#$UlGXF z8k=%|NAy0Q)>M@#4mPIT2TGEzK^sMU(Dd)f5m7sc`cMK=`AT(A5K{YaXaJSSALLLF z4gfdWfD)J41_WM}O;5Qeb5!R03rC zO-}wIGLl=3B~!sZ{{TvocQV&8lo*77O=QHa%wY#|NGhRHi(!2!Fm~UMY8jMl86@f; zYML0&bWJ5N4*b=Smm>jCt-u=}=~e-RC=UMsG1`D-3MNXAX$G>S&>2B09Fw(QYiI`3 zq-o#&lrYu9B( z1yt!JQ6qWW@jyda2TqU)_U}a4BwQbtYPK@CCKu}9`yZN^7sYK+iD7s44k#%vGPe2m z6&Wk57;5h4=3DmVbbaoc_{7h5D6|dQVkBZM?EveYLvQ(kBkNu@$0b=r?f(EMDIP>* z%u449#cZ)l7na+|SQ3-D5+oizJ-5;6G|#u1thJvYNv=xb7FMeu8fD&UX$hg$%i+Yvf_p27x#2N z9I|{oZJSwyJlfmzzidjAxdJzkl_g0}Zw-iX{w&h3hm7~!(e9%3^4mEeV3&4Hv5usx z$$g~Hns_TgiBKf&o_$l#;`I1zW$bY3P;quCNeZ1R+@96ZjZLI$dE zc|Aa339`!gmE*pwL?l9R_gqok!4S&M>JZVB<&*Q?)^>eH_{=FbM6C-wKCY$0t4%0O1*kF5k{a-jAQ zndf>2mf;dq2tEC%Qv8NB5DDD=ReE;;lB1{(Rb&k$kRZVK---dY%*Mi}32R_96(fCz z&uX*>Op*q~_m0#kZs8Jl-1g=;p@7Y~sE+Ear^p4?cRsRztz4bKMJ+%b&&jNTb-Fp< z>p)u(<|W7_C9%ompY)&^3Y~!*P!dY`-+BOnQhg_BtCQpd1xcBjLt$=b#E1J#Jqy2U zIOQIuTjAkWE;8+-&LumQ6RjYhVgmmFLta;kQ*p0#%^i#!uS&E@r6K2_J^al!0 zGqiU1GJds9Xj&|tHR4CQ9t(ULX5SFa;kiQswo$vpRJ5VhB}KNN5>!uc0R3y3hEAl^ zBfDp9(Tri5dkSZ~k=CEH{{X7jJvr*`U&d9_o440-ZtDp3+9_#;+}RZnHI#rIwowRu zAWoH~4kx70~n4R?WP@Ro>^%xG~zaH`Lq83 z)=HvOll)P*KCk$NdaM>U>0e`qTMd?0JjBl#Hy6tot=18Y+COE8G~o(uLwOOn=6<7* zzqNPi)kijQY1qdohq;` z2LUQ6NjsS{!~rxH%jkQN2ej;Kse#0`D;l`CUN<*uDPs)2hOTX)=AKe@r22G&xK}a6 z2#xF3(5jkOIP<4tI%LeliscMy%N)S39kIJ<(BfQSMGDlC2q2CWIS2XH^d^@3G7@)p zKHq-J543*`@pqC_(*C95?A!Q-!7M}Saph?_yAD7A-ZwnlwXl67bqIJ3+zI(lX?Pdy zYIl^E^yU8mYw7Lf?&Ytd;n?Lhl5fTHJ$fpEJA>Z5!PNj(6$XBk0F>dC8RzDJ513*^ zpY2iR2$c{wo;Rri1c>Lg02L|;1dw7Y4cMGgxE*Qttdh5ptSE3O0jM4;#i{i=Wx=Jw zKqV(}Qc}5GXfmX=+%TPq3i{S#xd82$0yZ^x96*3Kk=x>caiPE> z1rxTwX#{dTHFR&xgxd}m8jk~tkx?+9bbyf_#(c=xCfL%)@6#{4qrE?P??qYx@xZ(liPz6fFo=F|(2?dW*y(CQ3v#@!O_Mvzw z9x}QUi)CCbEVY^0L3+t+8zIz#q@Rw&dm82OTqLT(^#1^oH}Dvk!cH>TpBud*`wTo# z;=NzF%h``6uLZ*GE<)YCQ-L<>6C+BZN%)TDy!60%2Vv0>s{U%!y`dvddZyLC&e9K1T=~}?~MNd1AQOyp^&cf zj(@?~P9vsmubN0p%q})kSJ;o|TUg8_Yb|tZW@+JZu9lBg(D^m$U)hb~)4?8b#?zGY z{xQ~0fWL5V?yd0HH~4DwgqzD&)d~B+NJ>_T2)RHFtOX^fTj0)w=)W4%%WqSu3UQsK zN$<;R3YbugWA8y2^lCYSah{O>f~bX?T{Y zn)UGa@X1-4N7ZhRwbiTx6}?Tpou+)kt%{y!%?TImr+sZ5q=fD}7>{W77%XCGBhFW# z;Ox&Svldu$4!nJfi)FM!gb5H-pm!%$-&*t(>S<T`iTV2~hwf zlRHktL_}-`(;!kct*S7hwr1UQhdemj_N_bu(ny6Q%#Pzx$brhNJ!?$vf-^x%|BqD)(18t~Pcv4@$a$)ETDhU?Dmx{qIiY2PNvB}8RI-MrVhKM zJy$2ye~y1+*I_X#tTQ8Ul;-DQT;(^&*&s;WB0jYhE})K}M3@7PX|Cku8x$&;G6|_C zbYd~MBDmO*?G=?ISkg!m)LZ;7@YRgKJl>(vttH+%ZZ|LiCalY~y37d&`&E$GiFIIu z!SA(97luh8CQR`)WFD3S?I-~{hVd0)hORc1PM#Cqg)&@+5UBw2XlFp7L!Co^>r#xY zBJHU#pmzqWQn{CAn`J_ixQR6)P;Z!YsW2c9nhnty-jr zjUes_i1)0iCI=9)un`rI7%3o$ApUi0sDNKFGaJoiNiw-bgbR=&2&om7*Rb606a@8= zysO-N&;oJOp!&D>spnup$E2P|wP(Ble8IN=0QIMM38l}-{{UK&k_5w`Y#<7d2$z5( za0k|apPC>7U`!3@4i`ZXJNxfIBAYuTJ@t-(5P=c2d2#Og^tx@OKi=VT2TH`jNupS4Zu(m0u)8{wKV{Nq=+U}$Hd0d zU06HB(Q4ASAh}-_yTNfQJ5}8}hSIX6EgO7HPRHlp^r+LDjIN2wDeii2(SD;N5@oAb z8$oTo4Ww!&5>?43*aP|kd9PQ2#Ac0-P9mC&jqSr&wZ<^O(0CL!I~fUAxB^D-H`~05 z?NOWLYECwKn=9bE#XfQHRo6TtsW>XGu?)Ji`#Uk=A!RLNIC7OPAwY1kq%F0_rD#fr z8`Z`5TE|dyo|0Op=lQ>pEG{Fto@;1*&2-MEuBkNQxs)wW47 zxBAzQIt9rjauNXdnh~DC3b_JEniPRs!%+o58Ued?61jox^r1GnqWNyEzyocL^=vjE zUKl$K;Qd8R7Z#PufRW60tjBO%8&Df^uG`g_*1&NJjfTVd)XX(;gskp)9`qpL(3(~x znLF)3)I#ftGNJK8DO+-BAVh)&)oyiS#*-LI)iO@P1!YR)d!$cZ;#EHNBMzGw4)eGh zP=Itf?Xamu+3E(8gUKDI-ldt^#uRK!x@Jfu z^Yp7DE9_M$*qL;R+yW`o`*G-?vtHzyHBqv z-h{e^1W2@TM9XT9Aj(faoY%9$VpOAZg<6YJHH#G;C`U_hhOF@3qJQ78%?~|&e7mcE z*eu{e&<5LWnDfT1OazKs6dQ1&>7Llh~o%cO+>2`C=_^G$Hz^>PQ zY=+aT&tNpSNYem%m7-vOGqrlWJ|gPukfbdTU1+U3nAmT} za-L>3G2Xfmt>R4;N-D>zpJXqM$GQ!lfBZOgTNF5NN;56sIBWMuLwR=?Qi2lUC-AWO z32no|R2yE}O!;ot@FiLwyrZn6y6V2`kN>r5yAcF)S z&THkTr%9kiz-0HKnXN1ZONkrMunLx$ksy!hKnY75@jOrhk?trBVr=Oz21jG-Y zY7xt&f+b}`Z8RhiD&|S!dI6NIh%*C|S@L5xuBFRt{E>80!QUijI34|JF-Dk8k}cvO)hdwk7}4Cm6)HxKC}dbyG%l83WuQj zi4j>+XpTfE^}wI=YR+~tAr-$rhJQL|VVB{BgL_aG_^ z`&7)h*q56E1QhrBP{AVMT1b)=Kuxy@^phUj&;|1dkEBgz>>Trn)KB92)(J*8Bps2s z+nD#FWJ-r7Hv{R~rlr&p)&SVS{*>L4$j%jekY=n-<`Qi26YXH|7@nr(%)#PgqxpPZ zXX`sZ{{U@pYgYYPmkw?eZyE!CzAy%&M(Rk_xk~j_UW2HWzmoX*eZ9u^8ms;+`AfXY z8D|zoF_*J7rM>399jk|MRvW-ti_fS^nc`$;5f$+K*_C)F#vtj~9OXNO@}TAolM*40WVVL>3R zNYq08jiy!E@5>1zEgG=~BkA~mQhbtFuc zNP`ilzitw~oXsi1>gc%jA0T2lV~f0Lx0dTmQpyMkDQR2ER0#wSL_zIcY#n6ZlPRf5 z`%kEEvb)Ap-Xc0Y$4s;KChf;qvXzreuHxZeyvMMV>21522esz}DAV|89CS-e5 zqcvbeI$~$lJCAyjmB1yxw#0$KsR0)rkO$@UpcYev#__aJ6bmDfGzEOnRiv5UxS<(F zpNT;4f6 z@j~?OOgI4pQb-gq<8y)1Om9BZK^WMYUfo0!`tw;%#EF%T4+C$SnrX02FD(m8jk%@7 zkU{n}U_Mp8)(-*S_a~a}y8F_M6}vu=W4Hcw-Lnl4)=Kei#Nha9g<`X+f3?W zX|*o)KHtCG+=TP1KhHD3&Z=~co~>m2t9BFRV>ac)v2LWN%wb|mndc0a;d>Lsq8<79<46f*Bfcn+}m9?t5u~-g*?iM z)&WU7?c5MHkU@&{I2=lr`=ds+JdIfQH>bThwd$WwknA;cd@ax2l7dQ<5(;2F*c!Jl5T{(Z_x`;FCw`dqcvf|jouAo!t z!JUn5;_#I5*mp{z{{Rn@_8Ro-%8f-h?0tvyld9Q6t-UqL{Z+`9CgUvQR)Xb}sZ!;u zfa!MK-)9y zt^iDsJ5UR$#cDe%^q@7B4v5(6_OBjv4lTtWaAT2GJsr>$O0}vE;y#g3wwGjE=3Q-) z3WoAKRj}BudzbrgV`ComG9}%UFgZQ`Ybon=3xW&*9y3XXk1GO|)s38`J&9P>uR9llLi3I`+sw7{Sw z9Nfym9~1#nv$P+|tc?1Ap~I+fe=aIfmCPk=ilx&cL7(YTGBO+-aq&V51jB-LPV^>A zS>T_M??6jmq#2RkfS_2OnB@7_pL!XfG|@Ac!Rga zcIK}SGnX2TO3c|#jAzI2j854B;?fdQLDWi0Qg?tOa0on|r`oN+RBBdcYb&Galfeg8 zGWErdBZod8IKAGNZElkCQqoeQC8akT>M5B78321%r&nn-?zx%@D`U|Q_#z zv4j=f1=_49%t2eWtB?o@ABA9q!Q6sRVN=3lvebEb5UWXL*Hp-Tm7XQB4vqCS-ws_u z#_in6nWE(b4}DG@7-lu3C8B0DCgCXztRMcMWn3AqPi?p|txNZ`RX5Zha#uajQ`&Q0!z7eWJcrda!ONLEmBX#Q-%D44IC1pcf@wfdlDOX7dKD zA!?rAUhREL5>Mh4Wi5y=MtY&VXs95)@AICV((ffC>kB{{TOwSxf~aq)7Jv09vpW3Luls0A#5~ z6+6%clfLJ@2+e3vEkq6d>eTfqIJGDRjYREJ1roJsCU>A9R7$t@pcZjz2{32`Tv62@ zC;HG73nYn-L8(pUS^}vhNmP@u+v2VP5Ta&1KT3>t1c@mGj%q+NN+c3@Bv2CyDhQu{ z?^i=$NhH9X`RDxjqI%d8O3Z*@)PT!Gsy8E!C<%q1Pu3^{4iY%pfT0pK9n4S#1jS zHh#GLjDwcDd9sO#T&Z7Ul%G;)blNg8z*nXBlx`u+}D2vR!d_} ztFv}K%6u(l>^m~_n;E~w{_y*%VHYJKdO&b`bqL>Zl!L!vT9u^K(PUz-pz za_3ZE#Bi40w!3!Ww$z}$Aw+^Bg-TLqnF76k4SDIfOJkW?S>{`xOdc6?&SL6chpvq0 z+}m`E?9GZ`Y`uQ^Q>X7PrD|xnq5l9HgJDWN)KqquuZHnD5u;wDA+%eu?=Z7-q^B+H dX7jQNyTxfsV&V$nL*V}aTA&IkXHnvS|JlPx#1ZP1_K>z@;j|==^1poj532;bRa{vGi!vFvd!vV){sAK>D|D{PpK~#8NRQ-2& zE60`Y3-8FyoJo)bNdzK;Bp5*uks!gGbIv*Eoa4?(J7_ECEGt`*Ei2fPZ8?qY@r-9Y z)_9yPr-{eoIdi^y-}@G}Jm>lTSWg3u2GCu#YOUW}Rb72-tSf2AFp~DBw-F|544@B!w2-=21(bN}=+MYnv42Gh7BpmMHVAzI&V4DbqV=fBr zClKaZtJk++Zl=Uf6T6EVmfjz;!KEOIAg zFpnl8tv?1SozXCL#38L+3R7z=Qd_vMS%TyyT7$3{q&D&M7AewNmz4860XHW6mM!!wxdJEz792K zlhJr7jh4>WMwIO)d9C_Li|Vr)R355OwkJn9uc7*^3U!zGU0#c4m-V=p$k$p_92rr# zt%qwx4abred8*H#kpHfSvSJW)3b z3brTu728L4ZzN1@!O+(FLSA|TiABdDD>{utk1wQoeu#GmKvf!vq>@Nz%A$~5BSBhy z4AL7V$YLAH?u7(G&IM+*eZpwEdjdLIOy1B4ZVpl4#{B{ltDirMiZcjF@CyCL*U(23sHNX+(?sNdpRIjd0KE81uAn%xW0x=nISut~D7PtE|7pIONPpkh>uK zSB?Uzlq25%VAdRB`{;nQPM zIJ#+!mxXOQG!NM^x6z7Ax0Z4LGgtBK7oNoHAH9m#zxE1V|K`hh{X5U$g>T%4C%$?f zx89k@r8ft$_Dn5iA1%Su!(|w`uMDI2Rbl9MC5CTRV(fM`dM_2CWzUMXJsWzCoajEZ zq5Hs!u00Dn_H)pEWW~T`7Y488Vc?nz{TFi3f0%*3!%Tjbh3<2?Xg$b5)p`bFa5kz| zEGQh$fTM$D+{m_4t%KD~zsinARz?JJ%~7yqMk3b~1)GUE#>BXk%9vwdOrbeUtcz^s zEOP?PX0``QB(m(`$Z&?j#QIGw3WtH)BxfMx_5diI!H_$Gkzn;hT+V64W}SpI>lkF_ z<4DRq37!2k49?TgyH6su+!vYkp~$EYXIqOxR$ClQ%v1WB5F~S-z9tBo@-s-T4M1jN z2(lW2k=qsy7u%70gt2stade#Vcbu=;7F{E8u=Ip8U-_~P^MA@uLsxhbTK5U)oG19d zr(krQMl!d0JNG$wJ}w`ge*la{Autw4&|jG^OPLQ#mB=X5Bda6@S;cI7o(yCZWbwNh zFj`GWwHRS`CBsu`Mq_6_dM7F{uu_Y`&3g22)S+Xs5{=`9sOoW`xG@Wkl4KaniHOq% zBT9Y(F^XeIOgavIasX_0Day)I(AJrUvB_G@tTbb0s}ZyN^_V)W#Nbvw8fUXn)USuF zIT0CEQAjOg+bU*T%MXC2AP^~KY-bfQ$f{02R!suZD;b}v5|CXh_wLIm<8>7?w|m$| z-L!m`uZOX&Bo?M}DX%$>*G?{|Ai(8rhI#T$bnWz}GqH@fJvf&&Q^<^QyGY$5p zWSFZoNY0N(0^>u7>LdaZkKx!*Ul3ZmeeQx)&uP^4oIzce4;s3B(bVHdpbA9Ga0pt* z!q7e*!PjA^9}Gb)w-rq2h1}%{4ql}>tpcliGiV=1=zwuBPb;|FmTrki$h8ygFMYpcXywuMMkKS z(-cT=QzDIKYEclzWDNMR(6vND+YkX=LnI6h;Ye!^hpjgXWixSTVKVO7mtlxdGDg6f zyQ0Se0c-3)gPt7%)?N%6wxUq88iuOnP&V)g)NaP0jsRN2q+Y=!U$#nHBg}Xcxd4fH zmIL8}X5sZ(2%5Q*Q7nrXRtmVppt4+bHd0Gu-!>4lr}Jwnlej_{;K z%{e0)ucV>rYC7sJBoiW(DBh8w{2<9|r39$q?+bPb8vKp2 zqZA%vBt&VL@YRGB1}8#R{zftZg^stXCa|cO(A97gC<+KA?sX=0!j_$YmB&Po{~0`b zmD25Rj?xAR)P`=hzgcb6_2kb*i>$@%(q$g0&aH=sN5yc za$n&ZlOQ29|1ZX}aubNW*F*qx(F8mSNOUf-J$OyYawaee$Ra>l7FEbuBxLa#bCw8N ze4Q)WjkjzB;3{bpDlBsc{m4Z?bc&=*$hA?R{(?8G*_)#`ZX+f@v}dDdP&HP3Vz6Zsax@8B?hQItKUAR2ch>(6noysFNe1REoF) z2_yuZ@XSE?Yfs@MgWXBlahwvCcoJviKJZI81E084@RRx=DA5wwT=W~yx`txG;N9laCxqEq|2dPf$Q+j-Wt1_Iu$4l< zN@EPknx<%Fn<%HaXgRM&|8*l_%7leS(lPsx9y1SXG5rYP>H!_$$AE!b2DDw!For2n z%C_e2XS;2oP-#p;ReL4|C(E&QFoJ6j?BVfO@5c-8JdNkyc@i(Y|0G`g=n*{o(M>%0 z;W^y?(gLo$J&f(=8?pRU6_%c^#r%`?Sa_-hi%)i9`RN|+@5amnjTpOGiOE~ln7&no zk;@+R9@){opNlSnQ}1~zhOW9Wd_50C*PR%+l!L(wnHaicMn7NoU$&wB$U?wML-|S? zDwmlM3Fx`a6eTrmW93N%gLtH6M<6333^|$M$Ywp}q%x`SPCy-1re|Uy?`anJ%c#QX-F+6AhVw0 zF+R{2_`z7{&t&e0bOKZs^GgPEO$LR1R=W`X(J<79!N~pEnn0+^2ud{sF}C0A=0MoG zBH$j1^8%G;nlXyvrig8^WP!1j{^2HE=k_y4bW)7B_#?IUG>qj0tCCX$jMGT(2x*H5(y+~WY-vwQVtaoGhgT+WbaM_?rPC;g_ zm%vmJ4vohj>U@7*ALASWD^tJ}!78mH-g_;XH57kUtnV@jfs8plpF-0eMDarbRLFAX z_l%_x+_H_Zou*eab~VaiXecX!jVKa(*(^1fw0%wz!WkYT#jHAd3 zpg=o@Xx(uf>n2b&cAX?xokr7;KUzmZ&^8=O`)oTJj`oR2v`JPHkjs#;nl zlYZ$;Gzw-S;iNg{qP>7s#Kcv$nSgRy1!1dlHI5(=Lx_-|Xf*}}ON0c`^TdGcpwboU z+Q|fKUx;SV5P%iV-{N*U0?uh3&jgw8?eUO$d#$`Z9Zaka26sDExowhxodloUMJ3$I zLTjaZY>tGAPN|_1NNG+$N_`^q)l|eaQY6+JM zRtMqLIT3+EmVX(+vTP<6RV=>>zV=K?*oXv(M!`iGD<<4j9~uZ&22`EXq58ZY4Oddp zbS(|_m#JrvLI0by{i1&mk25sl6=rPeU z>FWqidM0oKTm-DV4IT0}bv#B74*{caSIe!M`;}gmozHVBAgJW86Ry_y+6!8uH%sm9 zZS$0TT>ut=)+u_em;hbe+6ZoD>`}wquR=EA$2=^Dd72Q#WS&Jx&gOq+^L)*OL;+Q1UQZT5 zz{<}BaND@ep`Ydw^m17T*|R(kil$7SQ#RXf)+B#_j5e-BHX+I~qeIRtmGYE^$1t$Z z>tLp@Wz+962=}HLJXpsbfay`!rBd6eU*0B97`bHZL5Jxq%2w zK7~_M$j9T4;Y1uki{_tj+6z^I0wm9T;v#$E&UNiS4BAd{eIY8w-Bt_;h%Zsp;GfwE5;OFD~ ze4OA#$hA)M{iJ{>iV}jC5EcTkiiPiOXpqM`unFj$CBRNnRF20WnPy`0&0tKiQn-59 z)~ogvXuYV$;7tQ2A4tLYLt0Edro;3T$ryWBhmi;M7`!hT9anUy+o3q)@8$FS9L#(Ri&BpUrKN1=)jD`8 zDS`?m1P(%TMJ&>bnTOo0e^)S)@+erG!HBo|BhKneJA-&Tg^lwJlJe;j#Q{jJ2u6Bc zC^DH_GDLo<4@W96eU;hh6Wm5*!zS;aZ96lKCynuW6ZTr~CM zp>wPV-Gr*1l~Q!CmJ+ZEQ8(g1WoH&V0@m{7&}M`~s_}tDc^pbT&&3>!!V(2KJFS>s zXu#3AG2FUu4fj93jr*S6z?~PDaqDx_xbRdzR_?9Gz)>D*XH${ir+~GE;;$wWx+01i zw;!~gV5FAO$IGSM#v-MRuv$*AVx1>5HW&!OdQX@athD)zeQw6XJYTPAJOQkiU;-BF zpgJ0v^|8oqVp%j(%yzO}4W^=UEDM#Ryxu_*JY9NN8)V2RizcwLzU+QbnhA_HKLXZ3 z09prp(L54}4gyUN6MHYArh7_)P6AX1t(_{TjWE&1#NNsV*+|%`nT$ZybQH?xV^BO# zu$qlzf{&tNVv=WLtspp5Y*O7bkyWlr30Puig~t-G3YVGWnDkr(EGLs;9#y&+G7`gs z)|pT$?qJxaLtvcgX<^XRMIp5*kx(Utu2KSBl?2AxI09F^w>Qg3b)MRsh_qH(yPWD?4pXO`Nr=GJ zngC5xG!%65B%vS(S_Xofsg;1$7lC4eXWdd1`u3$5zaYoNQ6eS?EYpOT$vq7Qx8&&B zj6=tIEIJ679qSTwZN#8wn^3kZ!@z+Oy}L?uu&ml?tvtSwKvG4BEM}u9B(xT>PCOG~ zC}snxW*syvCZd6GRX-n(npuJ?VWg4(TE_A#VR9`OLsbNW8iG~%SvAUzs4UMjAYMsE z?Pa5PUm@YHa3=vJ1j7F>I-(3g`KWAu}dn z>pa0l^k|8mwG{y=0^Z^YGE#!bUAPj%B4SvuXiJJB)<>y#n3PFlk3or6aVUmQwFEEv zCZWu^#2~iLa$}hk(7!zt4}~nF(j5gV_E^TdG{R^lt%7A%zWLekBCTXYMaWWmhk!+o zod7OxuNJ}5O)wF?j^3U5$ky03h z)Y4D}d#dcJ2&C0Uc!yuJ+X=$mi8Pi|7s0BNNu!(F-UOs}#nEDsM(D~IWH2A#zU~;L z_e+sIB10DAVD>0qGuh@6Uh@d##hZE>+a%kL0C5Y;I(?erlE)avVvsTsgEYqaY{pgx z+eYC=JgONJ+Agcne^ZCS`&AetU`;$>#L&YU3_PgCzyn6KU)7>|hjC*n4$fWz8qeKP z#rRRHM_acGQ*+JOIXi`$4aqT8jTf*sUhKg3%Y8U_a|&nQUc|Y#mvHX$^VolR3>#1PV)fBBEI-tMx!dKKyr&T3 z*YYubJr9$&+?cqPhYa-xvSy39!ng~b#OXD0KNg)uT$fx;|;mRW)7d;=OQGtgL@j@mjC zYMN3|R;z=vjQOaXe$H~pDy2YiM?mc&WYPb;fR*ca7p@$BNMI6AaG!y!;0)Adfk(oAa1k%OeihHYb%Bt6fXBYDhFh;qV*jajOx;t0*7Z!3j4=LnP>j?^ z5sZQ$cb#Fmv&>4PY4ml%udyhGZH~FVgt?#ZlM9&3^CO@q2y1w(%0(z*`&6-R^!$yK z@=&B!5_E;Jjb_%xAh%ftXFCOLw;IL$dKC6+QP4#(-$L=jV^WL!ky_}BjADP}R)(U0 z?c&(rWEg=e96f}j-o;q-FERPg#odK0CiD&hRVx9jc}9X30#=Ji@`Ro`!c{Hds*38T zWL{Ko1PT_TS&?yfd#~77%h$vZAwhu2tcLq4H>mmv8bvE{@X#3wMDGKYxEKO*5-RLG z2kT52l~@3knm?>#!Eh4r@^}vR2|E8+6oHBjW0=8ckm{?C0fu0e+QC9>Av`fb=$c|^ z5~%7J_!zjf^(^FuSUPGXG!+bdEHFcDG*YM4>h%oWGc}9vR!b9_l9QA~eY9{Kc4FziWo7LyEC_g8L`;t&bsH(cC zquqrr(LLQ1nT-iP zSM+)=$9snby^uukFCrWjZ&UScN>RdXx#-c`r4gvg2)zQf%2_WZ+j4>r!E9OL-4?U% ziW#uPJi`L|x`%?Hd@Bx>v?_v{u*$7?ii^7@+Cdqul;17o@ufV6vNggjK~J=Uq7|{_ zInfFT$ZlSH-e)FywwyE@p;|zvm1Sfl%;a*LOTemrxeYa>M3*6yBrE@9b5n6j-Ars!AMe4ov0%UL7Pv98VZk&In)XW zFugApnT(5O!n9+SZJhOyPxy9Gz&dCaf^XVX0`wy>&O3BL@gg34*Sq)fw zz75;2^yBF5Ib3>o1DD_1z@>LqaP-VgPYk+3&2 zX0|hiwoz2KnP6`-!`5OWaJk`ZD}k%6oK}XsmLlXgc~H_?fa=bCG<4^ox!;M_5ht3) ztZ16DqIu4arWqS*hB8sqLJ?dXhqT-fXiR62Na2&HIR<084;-#&G&P&BFkg*}m&WnL zlZSZujT?CNi#PG=2lwEmuUy14A8g~nFU;f8OTAdSuL51WIjESR*zUdySOSjZ4#KOI zF)g3D-V;rblE6^FcH@bGk*~exj)v9|2DL2+Y6pc-elXi)7}6`EAm(&s*GgcnCu7t^ z!Bj~BR7LKnk7F4yHg~XXb;;nOAhNOj<#Ae47Y&^VHaW8;ew z3^951Eytj5DGogZrmmT2bTPqqvEg;ln)te&pwqy{*hs6Jh(;~<*YVg&e!p}f5`_y9 zC|-&7CKr)pN?Do3OHrsKIX5tIHHt~pTX%;Ei@k}3364S8BNVt8`Vn)imc-=uNLVHV zVHppEYdVtP7DvE}g>^g{76Mid0V{iu&eub5>WCs(G3d}4wG2+0hP!~JXYkPywnPBZ z6Sj19R1+0}v;gQ>7)C- z1wXYZ$NaVe3$#Th@Wnk9=BPL(w^bOX`sigMZJ(B+l|UtW5W8u8Ec4;H1dKCCPB6)g zGhq!=jSeti_A+sG(wdo68`hIhx0Qt2eGRJ5>rhFks=S~>#RaX`%Fk=OR!ry;^Q(#u zlqfn>qF|2@A>Qv(^&Y9Yk07+qB)MmRdzbJ;a}l!aOmw+Sn7J!=w>G}F6TDoa_iE>^ zISElNS{{KapYMga#d{`Yw}7t=4V(lkF)`l7`p;v673SbIh+d9N(Zj*A%3UKw(6Sb# zOvqx$QVka&D~|w?FM7fFTkdr>EYV9PdUBXZoctYO4l!Itz_1B8VcClb?k<9in~);_ zOH5)f-eSP{46sVK2v}6)<%Hf!`fBB#5*0f#loF7Hl@OXma4KCVOl?p=(8TMK6=6}R z*`nasia^~qVOSVn|Br1((s;~z1gh5v%xjTet6Ghuq7G-^m!e{o=OB81mts*QdJ@Du zu{jCy771`H0~h_tPAKutsp5MJuPKLqWg%Qz2qU7G$42n5vL3PtEg6I_(W{iXNToiX z0Mir`GDV5DClYD`ma3JixQ#)j(;s?aUBO814M$2Z6MBCvQU;{R8Yiet5uD}-v8->` zhNx%46Gec7V!(00{d+9yT`inD^bPuffNtknGV3Xokd@4OrcV&G@;3R}3*MrKlRiV? zVP9vRZm~Xh3FY*a+%@_YVah^?HxuH_d@al(ppg(~=5=Kf$}>0g$RfCz)^tc_Tgc#f z`%1WThq$T?!TIY{gZ4e}ctZ?Zmgh);x zTyhfOF|@c-h*bI^))P_s+Z0J42A17l!2gAQ%gLkzDA9)Z##xN<)xVCWZ+KRy2JHk1gdfl{~gOf));a zRTwnoOhQz``fA3tifE|CjR&Rt&(d(nxs`LPtcZY$-_cM^>YE~w+R5ZFD0=znH*;(Q zGxVJ~wgbis^Q;^x<5Fk_qoL@FfN~%bddAaa#@5vFaAZw|!MzfTDvFJ|a|$$Fm7|kT z)%}15ZTBkCa+7i99>$k*GKy#^JhL&d^+dozz_L{kwn`FEP@+Y3of+K&C74)j#@fLM z&fi+b^+z{x{pnR)d~zB`Pxay8nRaYGRg2Z9E3oo(C9M`K&$VFd)d3uRVGbAHTgSzB z*KzLcMI5|3iQN~5u=R8g)*frY>cb6KxKoblTSb_?U5JJIi?Q@j2?4AeYfqG8?x6xN zU`^b~L+@n^ns!a7T}?sxEX80y<4Q{$a%-3@E92oRXE_zIe4LCk0XK4fm6LVK8 zAuFwxpw2v`tvp50I!Wj{j^z3iNN+m>%U~FsQxY#=m2NV2QDBRkmns z{-b?7_TDnCywr!KJ7wtH&PLfdLoL@(A~0E=+c5CMZ2)}sUKs{Kj+{&dO^q_;4b zbP(ivB(V2W?G41i$=|e6;b#+`GTMT;730+*NNEh!sz(nu(n4aznA5~pwvXw z*SQplf%Pbi(s3u&Vlm0mN+dWXVvs>`n3Xdz&F@UcV3OaTV~|{8LS3e+pIai3@%1RdVu04k#M49* zfYq?0p#mjXi5@IMRoVHwR(ekJKUT!XS9p*_n9`txN~icx4bOfOJbMH$Ce4BaJ>2^S zxJ2@!xtKs5OoFye(L=&SNYD{m8==WgcyjP_k!16B`5kVBiNxvVd$;(E-*XU(9BV4C zIcYAh31AYY@q(9?Nz_d67G_zM!@Nx3qF+ysv!j+iVz9E9r-Cm|Lwgqu_XEnjA zYK!2rK|kM;pn5~x1xpy>R@|UaFK$xcXLaim)Ne$hZ7%}t`(fxf3`5sZ1bWX#qW4@h z`p(Co{~YaXEP4;w#tsO~d(r5jb?>shZLtwD=(SRiG&5l}@q8M$6H&u+C|Qg~(P9jW zc)sG!T<_d5!l8>`Vq0P|VB4?|Ff4?c99r(C0l9>iEc#Lg%Pos#m`yOtnwKJj!Pg`v zHunpNihzm9HLX7wX#;*p8}vuUFoVQsILu=d9BgAb^jX{NU*@;b_?vnBZ#T=SU^jta zE~hE@ns82WP_Qcin)OWh$=gbyncyH~IX6;RC#fjdPUF@H&$baog!TL#`T>Q8L(CJS zzdE+HuzEpC2Md2Ehrg3UaI)~dRrnpv$~qVDm`-G=`<6eN>>yBwZ{({!k4xr$h%GQ_(SgY zgTn0(CC^>qCdj#h5N{7eoY#U7=Lkk(L74ZZ1Z5e)xjGDnMkX)DMhhX?K9&G0>(V+M z56g6-_byN4a4Z2!f~0|HDEkR)ge>haW8`o+(#FClbSZ+j#N0x{)HOBQZfVhSLxsj` z3N+tT@ij%-K^zK}qu{0xxAuf1hjG?Y$>dYSI9jMgNkuBbBNwfNkinT6%uq) zMw3xTz^a(00PKiovW$YcoC&pMQquq-77J@@#3JPl}9I6yZ=WlB5tS_@4K#|c~`j5VWiC>mzG>18hNVA-@M(%4qp41}#zWYnZ0y*d+?CMSWc z%xlip0=QfAP}bo_O;;XjI&)FonTguI4AhTgpkXotjnkQEm`F#}fB{8K{LSKUWIF

od+8w<1jca(}YX@Gy zy8L1<=IH}%>lr8-R3b-AZzv*MS$z;}|!ZwRfMWK0(T>YcuN929gJ~ZjBo8^lScZ77$U5dAg3mj7C^8HB#8NQ z|7pHI0ZZ*MxSCF(av%sTcN+gKzsi0T^X*!2?V>6ivFOvpf^C0H4o!n|{)l1Z9If>K&bDB&p* zIo)hXRP3$+3GxPFkk=m#M|U_ZRQ6^%t)R~oc(l(Z zU|=-?<4lTE+fqysBxlzXu*Af_v}VLK)zbKc1|t(n3=ByzG(`9si^T%rX?;qSs1;{CzF* z4+u&GEC&;(eTM+W?O$HoL^9k`d(B2rwCymkFiG0I#}KY)d3%QcYXY!DuLS|iJ0v7} zuf)V#v7`XEcYCGOOu+m;kI7wA65a?7+~%w#60#EbJvoyy6ZtmJomRa2nQapiHx=+~ z$l%_f4-uLigaA7M#ZKpTu1g7Xu>`p|0+^IQ7LP(6Bj#R-9;}j08tbf-pOy2NiWT}N zxAj}`1f4iEQxJ${Hah6H9s7i$LjgT|Vg`APou#Wmszy!g`z~Gt|^C{W(=$GQI*2--Ks&?sH`w6H$ zN1g+)hRQUK)z_(owXNf|BiIl<%jY z^pMAFDPU*4I+zf=@Xq?TZtHmsp8=MDEdf^C_ZPs*A(UsW2+-0agP@g8QIRElh9GaD zk7W`D)8^$cP05hPc9%AyhM`LiS#1oYrIAP|h(NrRphd`vHTgrLJA(+>2}DSbAvW<8 zqzYe16k^gX6|g>l0Xmq8F&s$(s`40P@}$ta;;4vYpms(eiOM(08AS6(qU|(fc3Pef zlm!Bof}ks7QY;TcautPI6%$HL5ECWAmdP@$hH;5Nlu}9YN1)Oa`qKiTD)9GC(v>@X zkYGE71j}h8=J+B$+ZVB!K8Q~DK~&lqM5g;9GQ$tCxxt8cg(4v@2r{a7Rar0$b^QI7 zNMv_QU>%bZvY5D8CfO6>uFOOvk0!!M2-S^<-Ymw-A;!#MLN_7XG95*DVtd$8p#Gv3 zP1kg2xU5Fq6&0Fq>CtxEh{_`|{K(izF>dJzg}ITCRTYgaG0~glH^am>icy?I?%tcYK6i~Diu54E3m@Gn9MkE}TcsR@naOcQSm@9?{ z1z;J_S(%NocK%;q2?47R{oO9Kx96gvB^y-@X(+7G!&)Insz(eRi#tty=s%|rn|kuD zrJY1v=1C+F02KMYP!{_ksgzdX3r&qbQdx%Ct!%f1D|1sEGHRocRw*Dn5}ACRPG2#W z`omD_L!Y8m`XIU1m-QR~6LVHZZzvoT@r7&?;?CzXikNB&>&B}FLY5k32l1#lWDdTp zK?7q=B0K|$$nTXCw3Gxa#`o4_=eT9z`9-z}qHiJ%DbLk!_6Qhy9l+GB_@oI-qxFJvYP6|WJvQV6()Gf<=i z!f1(vxsd)^B}YNM3BAYR&)fCAQuqdpS3Y_FDT`Z|F)yUGWax-+oW z`@r6Cir0UNFm?tt!$IhnkH8>>_rzW-7B9$fY+ybNy$ccOAz<~Ay7~w<-ILMi7>z_L zVXAg8gkSbYUfUVuwVy%xK-gWt;^+CjXIL45-aF%32u=+f{A?=>&ASn3*`qo?lA!Mb zLFA&04xfOw^RZ}TlB(H_MfEl-b&E-o$uwWA2efz>unK_}}r6`Ya@Ke*r5R1_579p*$vt02bl}EaP7QtC@w=!N%1U z1#34OR8It4JxpNT5y)hr_wMpsps zWzt_EtSs*_Su$}iG9j&Q7_qXNhS}+4OphB0Ray*m#i74F5~E!anBxD<^@U?~C=%O~ zvDlo5!RnXP`8kpYEAdLGB^p^>kXxsP@> z1^I``aPI42-y<09YT?+|!A0oGJJ7?ir(x1l(ULyf-hG|Rx)ji5CvXYSx(irFxM|*= zEhb6_p~@+S3i(cVgi5BE@6uGR3;|!@5=5D2@M8MmLw#LXu&apUoHTzw=J zmmW;U0h8A5O(XVhrC|TQG;H6Ofu$QM7`>oD_kjY9VwjQwD4&q!CTxjebTQ#x%qeqi zC<&`df(pTs*J0zi=PpMecPSM1l`!P3hEdT*Gl+_1L1KA*QK({jubK@-1V$sCkYh~GXA1Rm&)tJ1c!_55#%s;5d!u@*8-X;uQQ=$K| z3T+qEsG^T~b``91)+K$#xo?DXFPXlVN}w{KWY>ra)_L_|3aZW~6O_fId5wv@MpA1cnff#42-!A<+_qcm`dG<`iNSOnj;{kZS@UO*(_PBqm3_ zAL7yi30R>}5vX)-sTZs?j%cWCgezMx6pjEWT>^;CAj#tk6_ci_jHMzf>Y|X^ z9F5ebNSG+}(wf7N-pr)g^x2(awFE3CPCcPW>+Qw5dnc3JPN2#q@R?5{K8ui*ei~9H zqS)k9ROY7NDz~C`CHlw5${5f!MK(+Al3(rNA{=; z>7#O_(2Qep=tmNvqhK^p{27ONu0!F-8Wqd!NKizOFW*ps6+iV1Db9a zQFV@WO(Ei#q)_b%gPGT4u8BsvClYB639?-Yu;jDt7BRNhB%`u32aO|6w9Vw9b=iiN zjcl~;rlW~|(sCgg?bkBVanpw0`-(93cs<4+Z^rmz&6s+k71K|&V3w~JA8WzVqs{cc zI!s(I#?S>92G80tdeMn-g4X1AnEz%rwvE*-8i4Kh4&NV2jnQcoe8&>%4!Lo}0pY|3%Or5-0_9f!_Q6&>554D}Rp zjpwu!N-V2w4TYtaK&nF}g?!0`0>wj0` z9-J)>xEjpJt2Lpp-hir36PiY|&^e!r-c{D?x|LfCI_EOcG^j^Ss|=;pEc^UOq!2`n z1kiK}hVoJcMh6RUc7G5L-?xhwpSy+EKK~%zc;_L!^8Rf+_ud6O@$L?8zcGWuCp$59 zsThqj6mqTcNG%p?(D@)zdmLfPV~9v1WF;L(jEZeYdkTrVlStH`fZT8r>Qp}%vsm72 z$GHSz7vqAbo?^CHf|9lfsQy%toMZJqBaDQp_C2VdLr8tb= zBgg1983wLM(L<%*b}1M8wgi0!SRo8JqANBM zwr(k$eNyiNZFy9A&JHT~_HY(h1d2Mt;b{*gScRgDKvctmZXQlV&y*G;iz%2`Nx{rU zGM0CYSSN679RQ9%aldM?$z;E}N%&y`SX$I!X*LD3V=0&(He#$#jcx|ouIeBR^8d%` z{W00#i}_Z6toMasvp)js{gGH5kYIU;6*D5o%6JmiCzaTolwqA6q|zged`6$;c-} zx%c#hF9B926rW2&@%ePPk5b@Z@N*EboP;YkAuRtK?JR+bQ06?)(3lwa2}k@d`>qPM zog|vd+mq!KP_@tBKS=Q=Sh3CL`@GNQbLCT6=8H-3OsFmaSsPly70+ec$a6{Id78XE zAvPvVhq$h-Boy-Z%fu221jVW&u~4d#L4vAlKM@uCQbMH!t{nn1;oh-JKceOBO87r9 zge*3my?B0~P`^(Y-50$wN>nlci5{Bj4H;D~eR5esUyel=)!oq91WaC%Vfj`P4j)Oz zHNw#?LehgT+3@&ld3fec4_x8fkik|fl6mN#0ob9k` zD+p~zQ5a$Sn;{_1-=oITbq!XoYq7-Fa|FlPYYNOMn{hAZE361x@Y{AVJ%((DG8a5uJkKfXGd$x)QrOv$s*a=%UwhKG` zqu@~Q?XfB&s8{W(QN!0&v?8&L$7T}2nm#6auL$23S~g)jYgL#E*))rQtaUNh?Cuf} zVxE_MJ())Tpx+1pvxwd;`hE_7Pe7TNAfK~hKn~k$`h*esUNtmrN@$uAp{kA{fJ8FM zg+ZP}$V&EsRDB9E9l=B&0GZks3EDGANcM*`Jpl3MU;+IQk4aM81=tf$6L$kfQz%9wt1H?&_cN`9K+_mO$l|d~ z`pIH0R*_hcl#pfjhdlQTWI41fA1ZJrh?LVjS3;J~2NIpHcOF@k-XD_WKuA&p5lvVW zy<0J6AH+L&PWk>&l`@If`Z5Lvz$DfO9FW2^l!#=CMLkV35D!%^ud6Q_dd8vTzEGG3 zLSdPpuPr5_ctgcDmW;;B>8LqxM5S0@l>Q;s&MP3yJEob?`8^B8qgaP1t&qQABRJ;r z`fPmfih;S1F{xUIqBavM1~X7Im5y50OWlUy?!vr`C3WYM2>}^sykO#D|n&~xSLUjZ(2v!-T;m9nDL}n@X z5g?520O;L>vI1J6FASyrNUaG$3UgaZ3t@~foxxm^F-(Xg{N^o7z3U!Uo>il4U(Cgl zqv4VkjkKDx3Idi2jTiK&Kd(m}<9Y*Qe*KOf)hlY0&oJIjs0dkVxH~DJ8!1|O8RkaUlbvbd0;e#kQjrS{%}H9Al&t2pjsce*_KKu zDyk?*>L+5*Ivb0g)dY-Eyw9FjV&Nil#^rddUz1|%W(Y5Eh`(!9uhl0@9 z8;E*>RYPwG0V9MICuq$N9&Ss!Lr^^wh1&6G)J}wK!&%u|5(NdEUGPd9%^*EJ{(# zVCtTd!7&^UJA;B_LR>@CgiTvCwm$v5~>5Ih5e#4}BBq zxY-}+9YRG$A*C@2<_;Tp$ zc=V1bFtKRF!bT=mwllH5mw}zbbnH@z?;dJt8f-CGEG;TAH>Dz6=`k~$ikYEQjCHBd z#Rk(;9*oi2P)yVYV!F-`OU(gTZ4JbFM=;jeFgE%mSYrTOVc=UCl4Fh6yEP%hHp^gh ziqIwCYFvtmu{ewnuDYiZ&^(hsg(gGsvJ7Ii=iCiB%mgeO6Px?24j!r#4})R>&#~Z0 zz?Gh`qbI-^30wkt2s{KI0W^hY(@@B3DiCmUU?e~hq(@ZhUOxbH;| z?tL~Nx1P!4){O_A&nHY45*W+y`d6y)_Q$pO(l=>eufpd)AXpMAuRWiEm4|fbxDt=T zJtkYmfn1ii^-uvT6S14HRl;&EW&H`Dt=?zwCTtZ`F?)9Cm!fx*fMr=DP_2nxD9vX* zSmF*sCObluP0Y_*6$`EsxK{|}tI2R}h#M~`0(J;zq8H2CqpCr!0JU{Ja@SJ`W9i7A zNkht@0s0;_v{cTjx>zKYM?qB>4y8SqNsmCHI}N#>P+}l7>3tzL5^zj`NFZRvWeUg& zgwh%gH5IQim*27YBGGyVa%a0aRv2Pz0d6^Ck+?8v-# z)QYET*oLDiSSf(T(=du7p|>*j=1|b)(pY9Dmjvcw#_>AVcb5@mqbaDIH==e^j|#T! zihUI-8Jo%}_{#Y|wO8zDxavY9t@(-z4d<+=+cTqfD-+dgCX_E2QN5Uq+698&Vk+7< zvj|&eZZpxgk&f1lRMakMP&G#pHLFDFXd()FrO5B3sBVl!ZfPWPJrpV)3KVw&+>S)J zErhHrij^!moMysAI`d7Y9K|*r0V@>^h3RN1$wYfaHoEGp=xK4Hy~&E&noJay8sR8N zLZ&kTdV3^dO{Wo@bj)j^N}8O>o}iVGd;-dhlTcfIU~u~*xg-!N1gzAWK%|RdV*yH) zK}aqOCd7!l(msPLmT9^t9BBmvaHl`CPG4y9Xl`HViv0Nx`Z`JJIi z!}xge=DUItt_}QSWn%x6^F5 zBPs%x9G3ccq*q8`EQ*7+K&*7GA`GS?vo@2TWy4&VjI3fM>=pc8hk<~Sj)4^$MtAct zw(r8&o)aVMx#*uwN7pbv?^L6(IMxeT+1UZGT0+p)n1+SvYMk2}!IO_%z-uo*h_}A* zB)<6Nr|`;qckuK(=kfTLc5&^+aSFw1f{m51!z)io z%Xk19Mgveg;D?I#lPGIBNt*LT9UEI+Ul=L{WOWC5t(<^WH5i7539)oT6dLEk(6khS z_6@?>CKdB`Ai54hF?cBoQ#TVZcSnJ#TZtIEDaF{mGW6e+q4SCiEf<*7m^fSO~Qnx@icP+#56#5NVu-a#e~&Sqf$Tnctc{(EN)*xoZ>X-Q22S7ByEkJ(`p z=7-ZT*{wle4I#EB64MP5%r!(}u^|j=tr1uu2(Gq;V~v4ty*CD%eeu}nPsDm(A~yKn z+YHRR0HZx5p;h?9%cN$GJ;hl%?ns(lNflFgo#8n9K|s(MDsHV zN-xBtrrRdm+L;rpP zrY9eEnN3_}Vv{ z@7l*r_&V+5uQ%f3uQlL1-)hAVzSoWKeY+js`g#+-_93C{{R+JEg<^dEjeNZIRuNwN zd?7yfsv9rRo+o%c^=cs=eyISrpU=aEC#_h$FAZZ?wdfMpyq}0Fo@e=vcwA2;YPQ%G z_b6~KM0=rX^1cKtJu1ik^E%x6LN*?H&&r@=#gkvO;raLS@aEUb@zGzm;IBRz#P@zO zj&FZ5hL3+VjE{aWfDgXYhj%~j!Mk7WA|Q4U9=q`Iw|elczv{tv{<;@`{dfKN`%i}P zcR%XGxBse%Kv{;z-*V#cnKbm@V3{9;(+7g-e@tTgaj@>OJySJ%4(<*giXjWJ##jl1 zQW0UOh;UUvpRjM~VfOY~5w64#;ids*CSWn)U3?~bxWxQC@BFD+6lRPY6*16x#Ig&7snpYu z5l&<&r;$KQO!I?GfK?_zhUz*t%Lj6+A2cp8-^!10Mc}cYMv|CoRT>DLNOH}QNNJ;x z>yjX?p8z$Ifb4M@a@cn5(@Aj8XpuLgfpd!2F|L4xZQI<>xYi{_R*L{Es`-jY7zzbo zg%Yqrps|NRWeI|eNn4hD3h@N2STUrjK7$yQFJhAX`96rC6@(~_KO%KLcOi>OE6(hX z1kuA*bQ-D(A6|1H1!p)69TGxTJWLdEDHL;tekRRR>1I0g>JPg`>=G6*k5TQKz5LC~86XatGeP-N!E z!cwV#vsI0PemzR2^r%`-Mg@I93`3SNK@}fpQAlA|dM+Im7qU=&Asf}_vQV*`h9cHy z!2;tMg{EByZVFKsLA7u+i4aBDS~Q_{E`_g+sGQZIbSeo&EPD^@z}Y44hzof9Q${P6cPYtGk6 zTSot@3V^1Tklz%9)DD(+e>fpIoRAd?%Tx&L3*m6DGQVxcunkHH$uiWOSJ9NHy`V+C z7qAQzoLY!SeO0c=Q9dU}#f%a~6bP=4IM`bfklP?5WW`ei#1gXNp({*+won68xdEA_ zT4WZ=;izPc?o3AKL>9)@oS5EoV}^h=v+u;zmK~!DW(-WEqrHbhy(9*y>8Fun34nvX z-rkad<=GmX-y6kKk6!cw)|+oXi7$NcG+uu9UdFIXaMsEf=itJl@o zxUItWeM;=ymx!&~aag-Sz-k`}B3SvNqVEjKdT1S|P}Sv!`o3T^^oOBwI1&vbOkn*~ z$vr%_FBA>KgsO=Mv`j~$Wj+jTE8zsJaI^@(+6zL@xo`|$m0;pV9Hwr^W9nYo{c=n{ ztj5TFN$9yQN82SC8qZVF9w_dvHMA-KO9m&ElXZrHXM%x;fR!~u70AYv(h=>w4O!b5 z;hkrtYlwlCfF%|RO=(KtRy+`Fleou#ds#viVy<>TFGAA3QgTc}wCLD{T zxRdNncuoS6=-IOGC*8fJUko`Cu<~{Zc6c}=TUk-6GeopA_nsk zvBbpH&+IZAKEbF$ES!2s&^VujRziBqSuwAVD&t%%8m?*4cq;|fgrkxR3=o%;+^0d~ zr6fX?5uas}D}Mf6 z!qj)a-hdx`s|i2)>rVXmZ#waJ-*3V9zg)JbErfp_T2ch1PmD4E+=`Gj|fP`dAY7 zo>k%Et6JQ6%ZLZxHQ~i?+wtmOx$x?@oOt7#ZoK=wa(wfXHvI5c!}y0^P2!VZ%;1w> z&*PI%7xClYEOQ_2XS7e2@S`8j;-7vrkDvc&4nO_-Dg5Fe=kObT{_9^a;h%mog})(i zef0Hay!z#8TzkcV(c7$-qe$fKgwPKd*ta5JBM3V8<%BGP5kcJM^#S_MA$@00tgoge zK#98twLIQ{9QsWT{Ue*Nv&3+r_{@8gh435t8U5F}LrC$?^AkhScfm@C0p~#qAwL

r6e7~zBo zG5aK>79a20MR81uVlI@-ejNJZ)6kcmhQfUUDsOTNhPEsOhU!S~^Gs9QrAY53SPjcy znN%QmRt_tbuA6b8WG)$n1c1C*Eo=lX%Y+;jiZ{`tmC+i7w7PJlRYoACgz?Q20aacE zl+JL-tiedk@`a45TS|C}(Vc`O={O{clZck_z0w!4>HtKm{1C46L8$69Lbco)m~2cu zZwDbO-v>!0{?JtMTpKAsI|x%faWFEj7{#(2gRw{*Am9x|c~|?-5|30GgwLuMgJ4N0w<1y6(pm%DhF*14pfzAz~xfI z>QExvDu*sT3R1lvLRdF}Vwg&O*FtE~$;T0&aRM^>k&5-9%l9MP`_g;}S!a+^<_CQd z>&SJ20Ck)oe;NitmZ88OYL^d`c79LXJVCIMIgTMI|2X~cIQ{TAl8R13O@P+a5ZW38 z=!1dC>(?{Py6X^2H^d@Gw3~9~*JAokQ35pXc*0%+v~C$v^5rl(B*^i^qqt6m z&fyHqt+}wen}_9H2iEuUu)dp*#Z@l%+h{csSf2K{LPsOk$q{a`3s2v*IsrjaO8n*{Zv(Wn`^`y9g7 zv2b+EMx$dvf{x`#w68{>Z8IEgJb|vmQ1oAn#OQSiCT~eGeQyG0Npy1$Dlqq`1``je zF?cT_>qZjVnC$9F(Pg{h5hUV~AA|(LjChuucnn0wNDR`3Vqjt*NN$%v*CbYHkK~rG z#hfc|4^|ujivZR{&}w9$5P&5fYg0q2ul9wpG1$8*vbm3HlEKf?Lp4RXvJajK{3DZ*vVuv z%ODh`!9kdGiy=NjR^f$Ic*H!cBP|LT0E^Cx*Gzr_uC_$dqng_`dJ|mkHjU+EW$?`1 zRdCC|$E}UW+IXCOoA9*{d|;nS%ql^p7o1b9Nm+K+mE>L#ETVp{*8LP z{%#B2`LGQie6J-+pwW_+DM_TfkM_~0uwc#pvK_B%y* z<#i{X7sHaT5rc?ZA!)e`>U zha>p-n;m%L%k{YalG9tRmAeu2zYsXq!{J~6a%?dHZ?WD9M!BqeGfiw=-kww~>z}Z} zc9cVqGH>hoIpK;vl(qdAz|wdpu>S?H96JgEeUi6VHt#@8rsr=Q8oWKEE{Yx}1(1V2 zg@$oMR}}?)VHgt{;l*+aiL^NL2}GHXAu5-U zWjl#D>q*4ZON z&y;i$P&`Wn2?9motik&b=iF%}%#)09V-%7UahZgG492#s+GwPeN5fDY1yz0|u+4Zupe;Sn zdjYG0LaCV7l}{1upzyOcN5E1Ofy}}XBwGm!0b%5q<*%L#Q=tdCj>>H0{d)Q7W9cs?zW$PsH55wPq$MahrFBw0v78lOG>zkM}97^+agD?mHEk~C0rR$Tf*2aKYKD~0utc@l{^Cz0gv@y@~0dG11%p_qUr=28)W4TN?zAu7qi z<3ukNp-Na%-f3vWP_#$D{V}KuPe5IC0`h#qRuRFW@-%ayuXnU2y(^Hpj&L;+guJOB zcou_Eu@#BRy=YV%N>Ir-S#?RqSek?y#z!Ga#iaKZ#=8cJ*P7i#RIbILd^rYX^RXxt zLycWw$Zifp7RxrRG8*ZX6epz#NODFaAtxO2oG55)G2SN`q!Y3V%H?S9HDPYqjtv6V z&b}LG&z0cp*-Gs0lw*CZ39ni9P62k zLgQ!%YK8+*!%C?WGvdc0&@moEkP@qrMxve#t#L924dapCW80=8(X|kRt`!0n;i`R& zaJ3nV&b@H-osYyYlhyb=v6#FWiy1=J+`aLbyI+Rs`xO|uBlivycHbjI+ZDNY9!&`Y zv3PWecw9){B9-41Rp3}G(%Ils`xy|}h>Zj+ePa~CN=#(`3t&mSLxWzp5(=^@7N(XM zq_Q#T>w@myHP{+~^p03$^$@Q5WymG8k`=>rf}=C*Gx*D^T~v#6egw=CaP5L@LwyM7yj<$wgk@Wf`jONkZK%9f3)Q8a9CX z^MvzDDs*2_pz9**>Rc3B&eLBBW1Y8+Xup?=?1l!dH#O+GSBu`8DhytgVd7>Irf(+^ z2IA3wJ&yaOgfa;lF7x{rW6-=$(XbPZ;k^XR?J2QApuBj?LdbICp=XNm%qtamY8AQU64k`sI(>@vBd| z@T;Hn;5R=Xz^A_$#4mo@hoAjq0H6GQFaGwg+wi^bG!oXT@X-f_c>66IUVkG8FTG*L zLX+c0CO?93m6kUSos8XukG>m-e<$#O#EV~Fn>RV`%DlEo7$EM zY||=ajmeQQEJL~&66=>k(-99f1&^jO9Gb!)CNf{Da3(LeKV*5nh;@w(k> z6hNy<1QIRVDX)dFWnECg%=Rmu!w=VJ#bi7jft21bsGIzts3tU)`5>{F z^+fQH6`dh$_(56X3q=V*h2UkR$WE^hhPg2mHky;r<*to{yNp7hFcSIsG01nt!<9?G zG7GRu{A{wj=)p=vL9QGn_9T?Mb*SkJG$Zvj|oWkE+Yj-bPYa8Y4t^Bw?8Zc0dS55qF^=GT4=%fg3zmkOJi_9x$6VPxVMcp>@qIj&`6ydo$0BH@r zFx3PiSqz7lNAjE^kYEW!RH`2&siGH{KARqj6l*kG#ffO`O2y2)9h+Nj92^$n;>9Xl zx>$>&gBt8?R%3M7itkc|85WCT_nmhK&cR(7IwIU_~OkDg>q?F-I*7icA4mC;tbqL=RRX1&PMs zL%8xshS?8Ugj{p}DL6`fP}UrTmZ2yN(8nfs#RPRF4j$0s%F}7M_Iw)7KBmUz?KrI5 zh{ntLCd^-vYHzF`~DHJ0|fuHqY4gKF9 ztm=V4LRC0A*udJvqM{=as2&O*gNHWHA(oB+q1Xy62* zQL4JZD3}<`Qo17vSPVSgp+Q2GSWJ{bQYR+4*T;C_Dw+F|`MJ0)Iz`MisG~D91|hwj zs;4E2j>ljjo>t$V2zxgvyH~>NjYsW}6xICws*Vse^hTnSz|}S&K@%aYjX|`PfY(j{ z>mQL|bUY4|lku1#0M5?IF}0w?=)71}T@2glFfgXW(0~Dh9XfQ^DG68!1gtpp@%*Mc zldv_Iilea%T$sqlr71JcO=se0Dh)>ysn{FRVW&F@`yC3L>rvwDpbEPaO!6~HSS463 zE^08hX296G9_<9Hx`iZE60AH_$$1Qd-c`&uw0Bq3CftYx8pYZ{Vp#<7c$z{cs{*l5 zCn3wTuSY%s$wlDt4zGxb=es5p?`EQ8KMMs1S#Td_5U2=N1gydfnFO+QxX%&B2xVd` z;LN(i#JJ96!QZwsu{jR2VB5=rWhWixZ86-#a}dLXXZ3K2I}pX3EP|VBKhxXOAbPT# zVlmP~JpqeAMQ|&{P+hA{9Zji`F<__{;w+W-ES1(qc7X>`Ol~0=@)gl{X`

r|8+n9^Iz}7 zpMOvL)lK}rzr7du-3|OdpI*TK@ApUe-~YUafBoGGe*3E#eDcX4zWVJ(Jo`>D4j#)u z=Rq8bmcrniizHkyK~ac^2f5p330Yz#@f8JqQAPhuf_armlfLQLGtgg*FmLIRwIR%K z_fa?mq#W-=b@~qdSuE>u_h|rX`mY@M+p@d1!$iE3guGos7GXYL%<0-qMjn65!S<3n zt$=wf5!njy~BNKT4J#;;+e=23Vk4F^3#;~ zA+;(98TA1$)ty3i%Ne)_!rKa*F?&^p9Oi}0aY7bzg1#*XnuY-HyfSrF0F-5f zV?wYzpY@hc;3_x)Il)U)ej2IOgpS$(SSYydbzuZ7LRN*CgT>dbD7YQ5a9U#E%)UD` z=*S{8W=P@8ibq~HMZf69vZ+w&(xN=yfU*Jup-hb&!c_*plVwdnnk5dZ)KJ9fd=aWR z{y%^fMz{(WcM+za^4<}uu$+d*chr54M6xK1O9 zaIf|7v(g|KO2h+*eUMae4C>>>hvEhl9K;w3mowSJuPO zupfzz%Tjb*Pe41ZmDYTZ46V1BpYEsle9(Zt`!(pkrR3*{Xuq03@fuIiibLH_EJ~Lm z;2aHtr6&-XEd*$mx2{wyDG~^2jxWOWrx2k#gZR`CB&GxtG$UYRT{pEEF*EDH_EsT| z4oh(Hd<8C?t-{e>HTE`ZF+G}x###z8HQ80@m(XrluU-&n(|&)mQlUVj+x zed!6j`{l>*#``z$@>kB`*{^Ki&YPnYnziWO%0v-Gf}=?Sa~1QxCjv>?fsklVB7$`g z9#6=k#ZizX@jR2${g7_)MHT_p;yDIa#R*il2cTmr3S(lKr3*@I6FSd5YQWWJQ*ix- z6kL2-Lt(4H&O`CoxSya!mRYzGftmB6n7)dxW3iGs?E8&>97>wbw0T|f# z!Lg3vP*nH%qpbG~%KFZryw?vkVl~py2m)3#>IqmCeL*Pg^+)Y!C^{AiM@s~(MJBan zg4lW_dUj(l$O;){#Z0mC7H%uBcqF+S<8X$fkTnpA^qz2-Iv5mM|NjBY;03HG82CP=`R>qQN<$Q# zO~4f!U?rWi<_waXf|1-DPUj=M4HHBL<;d%eM@fG?!7%|f{rvyVaMZL1qq!#nom6pc z+%~iXp|Ld>EnT5#=Q;HBM`Lgx8YBFCf}k}$EyMV%0)sOO49#gVJeNYiGNP~7h=KML z47KomqZ@lhCpf;vvF$+gjuiY8+Jl-Z?Bxa_+rU6cLPycQg2TDr`H+gr;OT4^rVe z5|#oxfy{Pbfc=1<9i|{>$AFA=t+&@9cP|ZA!kKM92bSF|?n^;7lVZ+}mY-?3Wl-Y( zdWR4BS?-3IFT>vwE54uA5#qEcyJST9MFUD0WXmqdQFBd!#(Pz0B^b5bBv^?a&+}AR z=i?bP647%-itdZ7vva|;5Okc6K-Xmn;fwH3r|-ELhq3$RSa?*8wTCs>e<4gEIW&Z!7TAAC}`+|4@xjKdHlSK54{n z|FMOT)keT-$FF|UjbHy$AAb3dJ^1vWy7+l3e)*$%{QQSC_~{QS31=nvn{VXd8(+5I zOK+#*#g|mL|M6H{dLSHUABe#D$Kn_O6u9>VB_4lOi_g8Aiq}6(!`mOHcj;lF?V5aEjU`#S`!%lJRPKg6H@xP{;SW)A=K(;;xjKm~%y+ z6@ZoF9bTqiZ5X|e%(2m2+@HTmaM~n<)4#pzys?h%8k6&`3Z6X*2(jD<>&i{PcdcsR zTu{SGz{(jL7$?Q^TMtxd-xh0z#iM19$!lNEq$ZZ2QJ`Xn z$$3lWeQJPbQwsNnl-CrGYyw#Nq8w=itaJjFdDMX1L6$|27UottGAPQ7b&=53gnA$G zr6}}6qU#i5bB-h0M8G06i96FI1g$vL3BuJ$L@NkWvf~I7M(_%!ijQET31)H$7riKA zBEN;#luxl*$#~g7z-miCMh^jNKsWKu|Giutf}iQb2a7ZP;K z7%Pf5)F>uE7tPAy9*TprLjnh5v$-q`X@s~G%Ne9*okVK-38WCPl5;|k>WD@flW1m* z64`A^Sce#|#P`<{3B`mf@pOPAC5q4Uw;2nH&flHm>E0JlW>6w`fuKCia~h4M=w_U6 z4}-Qo7)cd=gehMF76FE^BJ-R^V&MrW30S&vAEZyhTo8&p z7eUJ!1$!1PLjq@}fU9_J365Ff&O%{gsF2s{(4Ztwi{gBqlZ%jSOF*`jb!ksPYEBH4 zDdNchJ_wN?^8%KDs|ePUus8u&OzcVG`FJk>&j3sGVlhY2^n^n(T&T$>(B}*A5}$o$ z1T4{mr6%yJ3Qt0rPrxcUPH{lEqNO&RL{^tCatHk391CKdgraga0*$-O(dS~&%NRLu zlVXx_aP;9M3~<|ZI}u&?DKPMe4#STbybtp3yI<+u-+3b*ZPy4$7h}+HBtgx#1SQP% z`IC{b_Aw9FiJ@j+o=*T0a?c=`02!h@iCDcaqy(*`I(Gv-jYOuRVwl-g^S? zefTgw|G`zf`r#p7_;3~Xzcqw|C+aYGn1jj*`b=LU+!PGCm1F={C}d)hS1sF$=;=+Q z?{lkQJ?Jw|BF%aVIi8bnlplkq{y6GcZoRy|sr`7YQOqAasKtdR^|tY zoTnW5?*qN_u$sprQ8ygU&-_u`a|V?oL1`Lm1NuMo!lOQp1r$(HGDo6 zldRB%n+aIDA;tXlC@fr$z}hVdR_}|&>}>+dwQ%%Z2xCGFXJU&+!$B;n1SvE4c*J8( zrlOG}ZZjoZWf5%BI@mZ{!jRf5fl;i|-VpsiTko zGTPYa#UxBtf{TEaN5Co`VxpeVpk-Qv=Fvpdv%xmgx&|cZql)Y4m!Q2X3@shOXz%2? z61aMLqA<`Ki6Mg4*oYL9({fDAs|i-Zj0CL|^bQ%(+nr3nGGU}84P%W4Ow=i`(4@j% zuL)Pia&c`U7ncZG=cmm$H=lzY0@mhq3by$Dok=71#&kHCAZRTZu|sfKWnx*{HejC0 ze4NRso5`r2iNAb_>S~$S#I21c9s(>LH6zvx5}@T|Qp#Ira$3`&NZ1NNi%CvARz^Gn zPAnK|+tYag%e8Ox-o;n4V`9=u^?p}81McplN*MUX-C0at*_#?Mv~=X|X2QIgfy^}nvY8+SR9UvE(y4mI+D2xcOU@dBjL>N1wK;Y)aEk>| z300*RG@m^;QHiQciDliUnKgkao>$djNFOA(&I`TyqJnBFK4o0TJgv`PTcpV6*peX#Ko6b?oX*PbWa>= zHW`?x`WhFb(7i0h;9>&C77{SOkwm~s#^u{tgd8iLCtSV#HsR{arG%g={P_Dd_{YDi zz$brKiXZ>55TE?52S5G$V*KjIW%v#4(@(1K$DcLgpMTMcKm4p6zx!zy{`kv5{OhOV z_?O>|;$3K{5XRJ8pdU>sEaDK{{T0O@oJ?h{eVGLU4E| z1ZN+P#+4@%aPM<6Jn?E0UV7I^z)Hp2Ur)ok-_F2SzMq9Je=p1X`n_-G;N5T7@!rSz z`1*IN@q-_C<0rqI#_#^Hi~s!7b^Q6y_u;?(>k$IhQ~2|rp2L6r+jIEuKRk{9{i{du z|Nj0_;E(s?zkhcbfBOAC{^bvA`1!9V@O?t!m%d$!C%%x2^UtPYl=V`!775ovEb^9A z1S~DvF@0->J~*f2J`G$H1Y+fO%esudK=2W;O~1+^glDgdheqoN^eNtFGzJb68ShjSl4{J~t7ClAVdAD9gV%NFxTNvUX{p+iqjZO{Z95uy zTM~+kIApCQz{ED3%yTqN>yX8EZy8I5bx;pWw-TA{1fZ617%2`#FO8UIB=h({>No*O z7U3%87^0alBus9SB)%q4i6O%W!vXDpl)xE8T?Q(hoU)-y0=okB|52^i8&Lzfu_|iwK`SLkf~2${Lhnfgu@1x>tVsQF zmici+8twv?oPedW6Rh$?FV)?is=GiXda?WnS$CmJ^PF^j?*_!Act_}WL_?q4q;9RN8M%=+7DyVcO@R9x8<0AFv%-0m!8&Q={Y^- zpV4FLDJ>?S)M4t`WK2Jkj7bX4u}8HSekcjOcM{QlPb^xl#Gv_79DO(rS5H_~PsLQW62f*Ycm}Z}ZB9(AO)3e>A zIZnY+S=eK5Fl21C1n+=gIaI}}4Zkr-onjh>Cc&_Or`4uWv3eKZ_3LxDU& zKU8vCBY=wvt#%+7^+TcFIa%d|u97}Kln?u(c7pIV6NHBO5Hu}@qGdH49a~Z8*_EJo zF9IWH0CGT$zoRg9S%SGM(U`jwhJ`C3Sic#Kjr(G-cqbZDH={5{@EW`pgPyCTBL4i`fZy;Q?(wc^3sPC4bwO4|k!8r6$#q|yF z-1?%>LAdJZ3`Z9mb8jyLDs8CmGr*!UpIp>32{WM18qht;ph^YW-L6M}YYK+y>A2N8 zEZ3^B-KfKbo($X^wc_S@E-sH{<7hGiJ5y;`nM%P50c~X=8LQlG&KYpHVxmK)V{<1N zi%dXs1ez%(q(OpF+o~MZi}5ICEA0)%`8DJG}r!o;3VI%zH^1pMbT7_jZ0j961VOmomok>i< zm3ceG3s+ei;#qL1+@>IVjoTIOUtzKkLp6j#vGAz4-QG%I^iFQyPNRwWS4_aWYUJ(7 zcn(q&AIVUCL512&3e;U8U|o$tD;s;yy$Kk4K!u5i^ccUNWpTe23lFhuA68@Wp+wBz zFTuin(O9@6!OZPQjNb~!`0WVHJ`{`H7qqzfwi%DU@5GB=E5hsFEXT`VE5>sl=Hs#V z9k}O>OsqeqMXwmHUJ6F-R3JL%qcFOfh*`qb>Yf&dS53I*0sh9bcD(YY2VW*!eeYXU z_{SgA;+H?J$EQE9$M1j5?JuhEi%*L2o1c{84?nNO@BgWsR*8T6brY=}fBd-stPcF) z7ybC>UytJVzZk;rf7#3Lx8axn*nnUBxEeqE`*Qrl_dWRjH*EO!*Dd(^SIv0iEj=E3 z>MmgI-3r3~eUUiNMt{%KQat>!9M671hZo<|Ac;`Er_~N%R@z&QfXlA_i zRU6*^S^>WN?K*s0EVb~nIsEz$2l$Ua-^QQ*_k;NJ|9Kq$_219nKmYs!{`?=$bdBVa>GMw|Q>!lR#(|i3gXEmOXP1V1_*E>2wy8&hV77blZIP`=o?}Ou87+*S~k=7OKoqV3%8iia6w1WOHRL_Q^ zc_k8kyJ8Zn7Av>3SiC{i`*O{9~4Z; zVws49tdmGEon&HTA}3@;iD5%pI8F4Ph`B#vEhc{{6F!r^=+TNz^M}G3#$+A?V-g&ILZOTka2i zl~{x`5SewsFxLgaS{n>UO)y-QA;>GC6@%-jzngy?nxYegn&Z6Ab=lEUqCsPk3Z2!-SQsk6_5FT4etQkCJ#&eGbuR(yK?2q-0@eXu|7eYXHHrH_ z-;C{to#;QLh*^w5(}EN&%x#Sm38-KmFQPap7B@8zu(LfUk;(G26rY5XVCZQIM0H;v z8ptxu6MpDg2*mJa2qq87DwiX%b}Jft55?i!Q;9hHbOQe;8Y{O$v3xrWD+H|7dnK5s zfFC;wCR_z$a61hBTZ{>tIq*|@#Lzf{7^R^h_>~+y;yzw(HLeR89zsmI?n`m zF%&CT!?ArY$?yRw*6xqRGPiU0#bfMd0t1j7U00N7KCeXiRssrF7@+6am5SD z?p|mQ_dZ_2SWlo5CITT%fR&L7Cb?1caD^~fiYJnXz}y*%++H@80V?X@Xt>AXkWVFB zL{(ijr$;5X)ibP^X*F6VRcIcTdvDWhACRJNBmv$1G3e@z@H#{W8 zIF;}i6`laB@hKU`sX&M3#9e(Fbd6}x*~>&tz#1Z8O*YcGs`c2aP~&Wk4wqX^xYe7D zo5N;Y8O_4Mm>Qb}ag>7Pqh!qN>oB~P zgwAytnpO#=Yl$dbibwu@4D#td$un^CE zT$3}QDF|2^0+#4S(nBo#SxjIm*&+B4jKss7Elhr50&))FD05wh4DL_=%miFn_Drzs zro!^su#V`NGA|imAz;}EU5-tI7ox;^MI!l$p~pgAn`bwZ$d%Lw^LivE9#&xGnN(~)&vJXog7dG~aN!jTF20({Z3@o5pu!;= z#Qx*)*nTt?D-T5xx+1Vb2)X#05s!Y^j_1Fck5@k~!WX|&h4;Q+gZKZo0q_4^6JGgt z8E(C4#rh-3R7A08p7BT9WFUqXqA;_bfYn_U4lWsS<3Tf?c)@|!zu>{wzFvVJ{8c3Z zs|vsVq!z#Vc`bhb+Xg~N1OD*q8d??p_0wwn+iz;{AHS`~pa0N`|Mk0e{L8P}30GbC z32+c;e9LK{6+;n_ueo}@zx#f$UUT_1y7zm{?Ae=( zZzpl!Mii^h_|UQ4io)fFOr0l{Qj0eJjiv+%u!5T032N{lsLqXmdPf49>||fE5~K?p z+Ds%Y<*~KkO6h^!mcR}>fwokr0<0M2j?x|%VkEg3C;N|bc{{S%mT@iisxw*PZT5r; zvO;9PR3b?BP37*@Am!4tyzeWR^HmzS*W2S$V~2NzBm~74I7^1>m}7!-uAnPV{yW!7 zDhsJ3XUaUyG?KDL@Rw_hpJ2={-$W8TBZ6ib6P0hq1VLJSxeh4}n&fm!0x3!1f-T-G zKM>5~9iFT>5WxN`iEKG1`_kbk79Wmb!M<2pHic5L(vRE`XQmIBF=?I=5j{q7yqMzE zZiS~Tn^&U~{&lVbQWpZt>gKU{~H6BXH{nu#!7{!#7k1qkCF=irT?8((R6rL)oH4Iv;iK8mUtvv z;5yv|w=@Y^e%BlUgWyg9sb{e%ZpE^`iUnITFW!|V1Sw~+ngu}0nZ!mLDO=14R|d+T zsiy)~;AENKC^Z4ri@16|i>v#;aZ(2Jbr$q^tK%3d339X{ZVB=q=|=bz7!Xh`-*1s} ztjnC}J}YA9+Yz%s5G@fgPeHe>DhUg1wUYlg|7XzZ0)0Gl_3;n@J5AHTUV!B|S=LRQ zngHY_i5GC1G75KP5ap?3@QNRWfBa}d;>Qvyp9zhTX@okVL7D{0eEB$y!Pimh4HsSf z9Q9;sNQk@O%FT#C7Xv~(rF;suWugFUT!fUFA?5^ync*E`jJ>xe=5B&J52e%B|EkU& zstIzH@$i(hSXweQ_j>`mFg0vLM`IVE0BfvZO4$xCVLwhVs0@JQ$sf%)fz`xXZKgEqk}CUD?m~0boxD{mZD`sk z_0D!jdiFRmXTLKGkGe5ulE7FIQJ_pU2+-6EljnvPDdibSl%KVHL015xOIC zw`C3|4v%p5^iob=Sj6#5vpICBj%{ahSb89iwzZy=NL^7d&xB%`zmgtvX0;lT(_lni zg9+I(uc<{^B<74EK6?~XWtuQcjl{ySq*bVqQ?Efmi$JMMi;6i~)GXAZZcvc5cq}c; zHECO|MeAw}>LsDAUObALp|LbB)uwTY9yJ4clqw_DbZIiHLx;j{6N+YAP|#%|$dc$X zBd5cVj5ej41_4$=kvbD+{d1OTe9rU4BwPFlOb?T%w>yX~2OQ!r;PNy~nvz2R-+B7WErG1G$vzHstz0y!-+K9GwW;Cv~ zkR8E-LfL>>L;rO8II+uAvS7hLtrTv8BX_})=S&T}Q~vm#y!{%L}jlol6e2msRR97(UX zC$qwij1o%$k~vvYkfvm6law}=nUX=K39d4gvsRL&l@^;(S!zLLnJML^#uS$uQz!@f zEXm?>Dy+yZu^}(tp5h$AP__rnnF6F_7kUL)^CsJ{aGC={X|62Jc4Ka?Gp%{HROMSx zB!yX?{9b;A3FQseGorGkuZ6AX4J1CbgN z-)F$&9v!CkX~{vYM!M|qWowM++UvsL@j#ZG2xHZmajZKxULsl|l(lF4Sb5x)B})HK zd#o7PX->~pBRV$e)4kn<#Yddkd@+avSH^Md`gktBk;s*|Q@D0Njhi22ap`_4`>su9 z^@$kzHhNR44Dek19GS&03ZS*AZ84&At}R1reAu`vgaa=}@u~_~@jSdch0h*N3<0(&eUX0-7TS>fnzlghEwDIxJi}>p4 zGQRzN1wa0^mLL9H&v(DC=liD;Kd<4NZUT1-^Es0X&X$$sELK!f1B(Gfqvx?j5`UY#a*b_gz% zc34tj`U$#}Uf4ssr9z!8`^s!VzS8GXp9>R|wmyQVD5VXdRO-0+dG-+)xOMH;>3(aHpI=fAiPKE3|NjWC3$HT z95%}_SMNkdMoxF(q4nrw<| zssJlPK$Ru0Wec)q$u!>-7b*9gWu3T|2#6$6^RF@^tie*Yg)QToY^6Mr1WuC9h)hGm zQVa-+*TrX?I_?Uvy#9l``?EN^{1+Ri=dkxu!y!l)`$&CUrzq{e4DpgA*1t-C)g-`D zE)(`yGyZ?T8YdAsPu54Tasz{{s{NkQ?L}av(q>F>m8Gj1Zpd+}tb@stWXEgaI$7!m z0hC?bXk6me@R%wgAoZB^65bPEB4FYuf?~%I7%34lh9G|p0zEbGb5+l!b;7&!e04Z)Ri_E69H zS=C_A`hs4iiw)~w!Bv>xDpF05Dceqv=P03^wNjAvPd5xo=b^H`q695b|Hf8uS36eUdr^5Iv^MoNL0YK?};4SyQvxmgY?kbng^c?Q>$mA!i1p z{2M;u#>i<8hE58yUUp-?#DWu^%zfFDo`c?W?DnR4nK+^F5^O2ua7OqMOVE3L^| zV#Q2>Oj55o@siw+uaf#BU$*m%v8tN~{1ODGlVp8IK97z6zp?iI7cPO%5fn9ssmaFV zv@Dc_T+mh3 zWk``Eeg#ch5N^0xb=BUyq9U%9$!Xa@%#tZIg0Al7qt8)`Beya@x$vX%|bh8FZ5p0KokOFTnl`oC1Dnj+v*oyAfv9TtsZO6gdpmXBpxRDO2Uua_0S+K%iI zJCYP&b(>3;YED>^w6BR_Sse=N1`JEE#2);HpXrl$shVs%k{cg1CGETz-uic~zp; zGRfwPY^lh1q&`rin;NY%%5h%+!R~dQq8GM)2Ap)et(vpz)p|6 z3Ioa{Lu-`mysOWZc?&#PxX_I`^Bm;49UXJ+X_ctyv6dagRMk!>tyZ6uYFYoahNOtt z6U+5fLl{jh(POIoW@52E$&!60R~V8c1=-YEBPP@vNrfeaM!Tt0P*Tu${cn)wHYvXx*X2ljj}r-V+(_*6WNMEWQ)l}!Rgg8M(}PK}tm6fH6WR<(>@{Law+?abg7!Xj zG6%INRrZ-3HViA5_%21U?bQkFz8cTo>r(|qlh}JDnq993v-O-O8;-lM{)iJR4%jiU z$AY;#OqjRZg5kpstUK+){#PS7b#oFo@1=3){Tx-i^+_RjJ}%(Wy)0h7oyPXl@yuE3 zEZbrfGjsn#eu*08HM%r)%eGtU$%>5uY}*&knR63)?fMkeU4Ngwm&V5*r1Hh*nSA$k zHa~uw$>Z-c`Sqtvet(k5)5n?o_EUynC5s=w&JtYB;?cKdeE)ShpMP4!XJ3@?!Kbsh z_ufqIzMaS$Z%yLPTd^{Y=H?rroWJVJkqhqZJn6*hL)P?flVeC=)v;dI&3ZxBMqOD} zBL?=F$+B9o?6@V%U$$oHQ7cv+b71pXKXzY^<>;;HTz*)_JKy#4=`SOE`P*_8u)hCu z72o}_l5d_a=R1k-e;MYxANyrl`UF>VWZJ=ppH%V2d$TzIRubD^31j5AWWGCXDOj%0 z^Z_~U1Xw}s+JYMs!MZ&movwtmyAdMCS%@SI!2+j1fxK_81wOOQ@$0a{zg?iyVNF<< zEm1v!@E)Zr06StOCMY53itQBix7rA@Yy|&ysz%n~f)Zsga3z#sa+I^LN)PVAT}~46 zUkbKlANFq&yfnyuS||ToZG~@{RE)}*p)zx#9O^U+5?x72rvM9Mow;fRs(49gajwf)XXi zSZuQuae~iD?G~zm$fKGBef5R}*BBEd$zni(A-b^V#Y6=x?>v11Dg|f%16EA00BfF<3H^@5 zNQ{%mQN2+jdFATinW>9MsxB^4FSt#SL_9$%{i%AgPIPdU_2QnOjc0-u zUQ?x>h#xD!8jWAsTobB(CITxnGUv;Fx!8u1_X#FtFB|!8JDYE|p_; z(17NJ`ZNs~(I5b;>o=lqp(%}vEL4r!n--f0t_)}#7GMo&P%Xf!9DISQMK4k@_#!0> zo~K~We<_^%9Hk4SK3Jef*<51*l_7Z@Qg62#QqXNCVIkm>db!P5aHUKQ1z1`|T> zB+R#ByeKfT#e$$}Q`M;8ZUP+_!Iev*GS-rs6a*6T+$~iOrVK6obF~RB)F!M%n{gF7 zOcZQPX%a}ZnK7fwg7h96vgbLGx4?z0dCv0Km7KY5bX5h3Y017+DysNW~LM>%9$x;n4FRdOG?WvsFduouF-~i$*3#p%_tJE6bUQ}Yt1RB zFsE3ua%QVO?RMgreqPR4)jd5pkuNz9aD{In`TVSG+hdk#xg5iLxitMPNAr} zLPRZDYm1<;M=Fs8QsFEVSS{!koJsUab~n4vmUdaLhAvCWl!n;Vy37(_<;#JWS7|_I zu{LP}=9%()8AUo|NamYWDu5GQWy-wH5L``>??pEoO66xHurk44kQJ!9eco2pC_Y?} z70_UfSDhtZ^)i)A)xF6M7kSN9mff?}Ue(jKe}{4zQL=CWtzV5k;SI)2l;4@uVM~Hk zcqwySNR@S-(eFvtpf8#I0i@3hVCI}4())r)>-8n6-HFMKCQNG5C8o!l)&k`DV%vTg_Cb2a{Sgr4&Ml4@2fs?@VT??gu4n^ zOZVF_xYtt9Va3ovJC+}IX0sse$Scvjb}ya#pA< zH(%!Q{WsZytZaVyDVtv(XA81&`Q_0pe)?XCVt)RynxB5G<(qG+_~Pp-KKxwf<^4?F zzL&(Ex8t~dH;UH;S66QbaQ>PnM=v_F`-}tYk6N>6mlPsf^qI5Wgx+mNbZ^$DZ>J#( z_Y1I&3Mh}7vGkaQge5DFJF?@|5DvdKN!C{?*WNGZ-5>h+^vMvP|2)jsPloyCmm$7< zGQby)=kwL$xqSPmhi|`Y^ zyUo{PLaz?tvi-uOpox(EHe9xUm~306KPqJa@30=_&Qrlop9OxirIPHll6_9O*}{sb zZmFo{y$QWC70AYPnhUHfr97~fsWsySVi8igMz#yIWZ#MsltoBHDpOTIZ~yf5_D|T# zZz^YFZ3vO$C`1`rr$H*kS~J0k?DLX3IcDO@rQ9e{XMC9&v6X6)d}uIP zvfL>xT1;(J+LvlcRw{{4j}gs_O&QqY!0H3uY&{ap%jYL^;qnYFUrFc9HBG8Ly7<1WokEsbLtQj)8zmH$b4}_dL4(FJc%h6@P>vOF>z@G8%>;Dot=D$Z}3M z!70N8`*b60G7YfLGs3yhM37~Ki*jjBk~XEEuc#V9s+4)-3QdW~kae4^s{)pf0LCrg zIh@`9g{$kc{{z-@*#94}A`NhjSI%G=39t{Y#AT$(H}gln`KfBuafCs0x^7Kv0sAs<(0PX-0U-bMHhw{1SEXm-qeS zH1LmA6JSX~9x9Lh)bR6A$J@~91jw5er@dKy-iy`eJXn3!jWy>zSb0vs zb=sAYQywfi?Zxm(Zx$W(rGLL4J-fUlnRTUQr!!5v9I4x3N7Ysv$~Re4vfi4)6*lA# zOAOkPJ6EvPVNPN#>}|5tTKVBwsx zl+D+tT)8AE30Ki6JODiwEjqe!g5h?#Zzq}J;(qd`}7#%g+nRN(Ud%o?S=iZ0X2wMeSaW_pz}Y>qCO z%~I)i=~LRLPj$Z@O)~RcD~#z`Yewffb2>K*61F%}yWWAqRraI}*)XY3vdT77LXW3m2a3}9qtc!wTxsLd;zVGhE#3;QBun*f zw#P%lRbF!wY$=xu{o0iV?AEF_HvzJLgh|9HZNmhF$&ziRNrjrb(1XIo-jpu!mvtW^ z+ar{sfiQ{|Mo`!vLH@iDGP~TF)?~(%dTo+gWj|^g!}QLVC>heDbE_@G$2{43F@z%m zsk3iQa0y-0SdCY}KY;P}RTBoW9*sc60@B2>+dGuv5-+WfUd+(-m>#gakGk}{;2g`rEQoGDT5*JCX1XuC11umU> zf+Z88d#qHW1w;vQlx`Kq$+4pJQ57l`Wq^V!Nq&L_SizDAg~&b=)gjsNY%3-xx9ZFO z7~O6pxH6UJ7L1qIqh)`O6o`d)SV|=<0F(VSw!>5v(GpQ|-2Y!hO2QDK^rt5=PH8hD z{}J7+Txyd9KoZ5s$}z;%jAdG*B!dE}RHcLD77fw`_G#T3Oz#>)YM&&RJ)@b?@w{ZB z&r{SnnyTKhbS}|j*N7K_?|-yd z_N`%FdoYhvH(Jj{gb0wz6JqRRb_bN}?B9VouO3bNsT*@JuzvZH6(9<)Ohat{cNWVrUp|wkgKNHErBv`yA?vkF*1II4N+Wj#SfL5ZSbti0g}j7^*K>G# zzKEOKi`Y58qym{{d^_{{c%GsD8GMluLqTDI1l~ z`U>*AGBoj$y1+YG8;>b!c*koIm>}>ISoupi>y>DXr%Zhk4Dgj@^NZKTPr&IHEwB=B z`Ua}u<2@E1Hw9SoTh^NR+sgMHC0Vr7$6K)FC(sIU7tngje(GsLh>w(YO2?vphIsnv z;pC%lh{LB+d}8iGoYzY@=)o zrx*=fl%a*jYpB{^*+u_HaP==)H~+Y;&erP}H@@O@*ueLmmtr^J1C6Y1LV8)}8m1aA)l~SJqu{XN>@Bg#c^0>a3O* zOXYLJM|>FE=PgN^E1lcyXp^M2Wv4X_yDX{QVMf(96G4^tM~}32v4Qv?p;90y0FTSlV?u_YD@7USPm7#ayLb%yg1^(~Z`eAQsL|WaIiWcJGnn=13<8PIk&M*C+{e2^&tNFtRT~5=AU&(i>0W6oCUH;! zt9jUzy2S?659z9UJg6EFL@j)Yauu}x>CY;!=Oxu0fkkt*sOUGLdZD#UEd^U<60&~! zjVYR^PjnvO0uy+U0iY`(j?>na!=Az4Th?W zA?Tm0PiT=LF;%8aZnPpvGMiL6C^P5VQ7|Ms-fCZ3Hiyu*C5)zxAvCNDl>Z8#c~dB@ zo5N|{5KhC&5Gof1P~7W9LA!?_(wW2pbH=CZ5-TxYfHgsYl~80xN|_+5!ji&DOUi4k zsB2b+$Z?>#Q*_^AN>PIzg|cH6)$3Cx%U@MyM5APT9eJW6(CvfpzBG=wa<;KIA+T5+DKatLsjgwf=7a&=eEg6!XPZj9LC(E&s zDluaeS+dQl%5~@z+%H+^%$}X&ICV0fE0>eFb9;sgBwu}z$KxLg`Q>pTPkvUSn4ce) z@#NPU{`j+vKmYFL*QXu)@MAsSekZ8;x|lCM&*q~K5_#wC2wr>5i%YNAar~4qhff-E z=&TugPMb+VBDmT-Mk;F^79TWa-X25xcIh#6)Rg5X%o#qU&(fm?tUGPV_VdmvfL?hp zjn`zoy)CHx@Y^;%`=OUlzwYIe&$jLUkou)&w62qcWrL-Z6(-bgFs5d;E)~OLs2&_e)8J@2 zhsLsKlQG*)cyjnc7#FU_%lazg>rXpn{mka^r;B;=`AR;&KgiGDY~-g$+xg(*<-GUV z3hsZplxuG-;N@4^Sbw;X?hTVE9QJ4W0w>k5PjSi(K9UVj>@i~UY(u6=CY&a)${ciL zrbNmjN0Ju1khsW|sS8|)?{Q{gt0OUW_NqIh!-{OBT(rVF#|jTAZ|ns|R%ymq&NRV1 z#Sr^63v5$Nu@q>SOw`0^+*pjFluL*Tu*P5|@URtt*d@qklZ>#NE+Npem-0&Kl+`xZ z2>V=lFINwTTx|iCHeN+~1eO>OSt`hta#Cr76*^s)@M!|A$$I!j2wwbO!rylco}Oc{ zcNv9)mnQZKu!02`0zS|GKVXFmu*S6uuzF;Fk>o`MtOX7NPyv?gN8!qq4W(^vso+QE z!A-eKJXvXDrH*UdXgnvX6A-UM&=dpwrU+ybP4J#(imx(_7mx`ey!yXTqYl2dvOM-C0xUDUtaJoix&)|zWk#^O34v}#1PQWyy$o>oki_3p6Gv|i ztULv*f+{V+m9F!%SojF0f;Dl9(8exA9V`D)*oKV7EmjZzsb;G1nrw)>GDM7o$0R*m z1*8r!qj8*|bSv>++yqzN(?{Vi$z6DXHWR93zmRe!p+)M}`Bqe{cBN&94;}k_Xy5BW z+in-S_qsB7zdH+6!183-DNk0N@nG#)HwhQkoOKpxDUaP)dCHX)C*1^D9xRppdHAp= zgYx~p?aGM{ITp5>$nP7|u+xazUHYo1*=|V9RwEj=n$x;PKDXI{*0pvtuCkG`+KSR4 zb7l!PvS*t!v)P@CmYBF)=7!otJ$Sp9FddQKca$mZu!|APwrLQW6xiw0I_W)W- zyl5`-qP;FqfHjp3>r2?RzlnXvIyi8;gMH`f*?G2z^(T^5!*ca*vZrgE5uK~G=v}MD z@HS)C>@{co9&@%Gux8IuM|K^yXX73#R%|t6M3!sOW(yVwvijCYy(~!VTy9VMQmLDV z6kr+BIATE4Qhn-2w5b`=pmtb`>cu*g^(#FiXiz*)M+K?sK}%`|tyR;?0aFPhsn2vN zoHv%tuIB_+|01RJU(9Ix59ysR@yv`eJth~bGpSgO1c8%sMoIxzT8$p*b^2s9D%TPX znO-kSRnAgX3am;rNRaO&6bY_MC5x>z6kr)Kv))iXZ$NIFfdET3(4Z;plIhM~WkuI& z3)O2cdr64NK z(4jP2o3eZ@if5@&UZhS_r7nH-CXBRLvQF@_Rj{+A(^jA*6>6su%R3|lFUtg2Lz0~@ zXfR-o;I~)4*Hx}ddx;K>v(%~194#@H`W!VH^3|v-97A2HI`#6|TB*#+AqQ7lw<+0u))Y$RQ99^K#Ugh>l^@M3!f0C+P0O-a$rNI#6<}5OM^Z7* zpL~I2MzbL^8a2r1RHJl}4&7U9SamXhz1O2Td3Q3G?j>>U{tRvjT<$!~=JtbhskEkX z@%30vUJK;Fc^CFeL2~GV4|~pfvHh$k+b;UD<&^+7obhGVQBM|awPj$ftlu3T96d9E z%dgGk&buYteXoqy-;sE$g6lU+IejvlJ=>F5yF8u+b0Vp(bYps|A#sUX#7-GYcP6${IU-FkL$7H zhz`qljAn4-^Nj40f@Qz#2irB7yF;C&$Bbm1nKE=hTVB^<-3c@HU3TZ(n*z%B1%4k1 zus+V`!KW2`@Oc9te%j1OA2#vf`?cJ^U%3sHH5??%=!uRh_<l1kSTsZ6YdNO~V6HSY41jv%X_8Bvy*N`b)y2N$qNf;6@Sc;!*rGnPPZYw7A z2$bg76D{zLndiuac~W`ySgF7?zQ>aIxi+ftB`5bvc_5K6&x&dNO0WHPOkQ9o-<8ky zSV$sf%GB8gBz5clzew!Xll@OuLXRZDUWz1kDUwyskjJUB^<|%weXB!X3Or58w$&(G zq}&}OFx}?J@IFsgzZ}4(3lVI3HG*}Qf?0nlkdeoY&m~_vz6aI-@>C$xA2V;UvB5y?|1XbS6c+}Yq|N> z2uChaFBIV_jJ^|KT6VeB4$sTqlW5k)X5f{>ydXl=- zivP|Yz-odCE1tdp3)+9r5q>O{WcV@aHMXF8?C!NXy4~f$6gm^?-O7h z5MUj4XR*@uO7OJew7Ve7mDOjQ1Xxb0vsi1+D!6iH*$G#cN)kHqvV8uCI|KWj>D_Kk z>t++0HyhEi-H4`Lh7yvz?=hfpx72Gp4d@WS^zE}_?p{X$nZ0~RVwVeTn`E9=+DlSm zFVL_hUAfC$68!kwvCI&V=amYuDjcMq^QNaRf(4Cn3^Yw))fk{8l}u@1-MZZo#* zwPe>3TLG3Wn*>;^wwbYXiz$o$AF#~mTwy``aw}SwSqZF6B}`~tVMhCE0p=MNGnq9eBvGBkCB;iDnAvZ_ z)Ls+GI!*DH-*zq%_2+3}EeDWe&Of8Q+ox#ZBoUBjMqI5EX|sLE9|)p+NiYp-Lh0NV zNzaZ*W(%hJ1Xc5P$4EvW%beZOf-HG&x4gb5N*>3`=O)m;F`9;zVN?yvZ!GkuWUenc z?XD!1TM(D0z)F`1IZ{dF%Cbtqky>FxX1OJWRf42uC)&E*=<0Q&z002ZCKD>_bf~Tq zSP4Yystu?smyEbrHcYQq)}Qdv2ch6YV~ z+YK3%%43PFx78h{Z0)sX*Bl$R_87B4Dvos>I;@xMext0bH7!OARcg^M$ekxUaj&em zj#-*C2&`(;)u_#q#{#V8>=$V*97Ri|CiM+^RJ0lkX3R*FN-bWX7TsnhnW4F=f1;3P z8-iMF323yzzfPtNjuOrU2|h#HTnUkkIY{XpyVX{{V@0UoD!SE_DN=c+&9`UPkQ*gZ z0hA7VNCoCbrBnnp!#>mv`%*s~AQ?h1)eD2E?GLAMk!*v3C~D>hQ#r?n!cKb?;xn4G zDec#%Ym+&P4?3~_oWSWu45#lTaP8epZrsb}*1bG#-^=IPopfHkJ&p6P#d72oZ}yzA zXWvC9_PwHXP2j}N3vTSb;wurr=CeMmd)bR6yPcRP*~{QMJ67-XW&fFYF5JrF=DiAD zy;aDS8^yeOxrkE&tb_a0*t&Tdt5;27aM5@wtEH05lFB_*OAqL=@suS8uK954P9(2Bi08)pDct@bgF7GQ z3bcy2_r8R{>YaD8dGpN_-g0IY3;YQnPcPS`cq;RyMZG|a|cFA#d zCXjR z?xb*6_Q8kuEBWT*9)9~~1;2l~hMym<<-51nasTo#ub*GUy<6+~;N3lZ@cw?;{`9Zcf|S^_JJI6bT;3#ugS1R{=!MmPzsoRSQ%PtwP6rXVXrzCY7Q zrt*Hal*`J%`#HLJ3BGgDnop4j8S8t2 zEP82R={^d5$A4qs_AJ)HW3dTV!#YR}`%n#Bqjm6_s9c^jB2>T^nk>jlFeEt9n8+EH z#H860HC>h~!Cc@a%akbaOB#jObXjj{>ICQM5>+boV~vFX%Z7BN4e_8eMax~NTJK83 zc2_}`EA4w^U*F?OuV8E5A$R(ZxH53eg~cyBF>=z073W-7d&z?}7u;E`w81*zB&c#^ z@lh8R9&i*aImmY%=-g>X<3@Ap*PGF@#hSJqHgxZ`qi4UZJT|6ziym_hS}}OUiTQ%8 zd3yy{J8fAgxSO+6=3$c)&8wZL9=0QYwh5_qnxs^ykuGMKRcS~?l?}~xUi3CaGSC*! zlCC6{c289yYp`u1^IK!-s0*X7D~^@RbJ)7Gl08T3*?Xo=b$M{}sdPz(Cb4LL0KHr6 z=-!|-IWnMYjV|*y8L(uB87p_0O4)AA+MPzM*;!saq9A)7o&lwuH+L62s8p@eInee@`?E1X%sMMa6q%gYA!%9YN5w zFNVHd6Y1I-NAucoR4)yY!Z?t!L4S(pdz01fBycn#E?<}NIa-X*)+SC0kST(s8Rb^w zNWoQHV@GA96ZI`h&+3k}b=c9~W=We!y|uxJ);ie%YmBL{Fs4=xirNx0>PoBxTlUoE z+f$w^sLU~|R|47MQT4-<0L^WapS`%Ju;p4h-3HNPxP3 zzA-x_L*LS-&1ON!`Wh{k7LQ?3-Y5oUsWE?+7Tr1OG-aq$m##rwmKF^eV`DV4k=hhfH*30A7p|r0GqgmE<-9RAa zbNmES0K24kJ3oxdK0hjYJt*$5C#ym5*Cr@kVkrlZGiy%yu>DdP$6kx$!dofa z`M8AJ@0N1?ZUI+r=W_nq3{^XrlUF0zcg~INr>xlbsv`$)xUut+13NA`bKtr!hi(S5 z<+P_%Xl?>5XZlu|)4Rf&h3nl|e<+5Xr>61pl|0VhPzElp;{5r1jvP+o$k9yp9mr(o z?wM@eK9fa569m-W%qq4cK2e*Hu>a!e^KT-R%MJn0;Su;eAu%s9dAbhSMOHL*da`I~ z1naju==n>1P!@{k@&XKiBg3aiv6sM6C)~fBoLXAHP)b_`7Vr{$whj zy&ui{_kwxn4R3DWa_90@XHJ~6Vb2j`*6q|{$;L5^tRKa&#E>9sWV0G0+cji4by#!6 zjLl~p*z>BVtdk&4z7frZx8u3|Ac-3fQ@Qu*lr?9FI)p9*A&6sR4NC@CEZXU|b1rEd&V=Zt02 zd<`Z_wl{ImhzW}%DUxG<@*-QNDxGZ(I+DE5g`|Z}OqTsSevu7R7YnWgQPUSWkSd{E z5=$Sn7i>6DASlWkk$q#aC7FV`3}pX$o8ChNnXBv~sD#Ip5N zJo{fs;>fjRj@?M%=*?92UrlAl`D8YooW#!4VI036!R332vK}&c{k>U&%`zT*R>}QO zOC$<-|I0!?`nr&ZpXPAygG}z;Pv_FLc#dBh&&g|3xq3H~ci*q(llNPA^5p=3e!Ygz zUz^WmsSwU@t>yZmxm-Cs$dRqR9NMvf%jY(6>&A9&-`>dE54Q8p2itk|jV0_j)y&|w zOsW=zliKM({;;f)Z$p3_qe^n*TVsPqxjAknrnr}x<635lbFqo49hF_C5mqw{u$X3mdAxutUH~@L z6gxqH!xST2ry1iu-B_l2I3#FGax17xHO791B)c;WaL6>kDO+BXMASJ;4^Ju6{Br~{ zGff0D1_Z|I6EHy+zX*Yh-x&P7#^CAxB2F&P;^ZyJ3X;bWFXBE)9lx3S_(~$}UnBsj zk%Ujmp15vj;^(+CagMX9;eU9C4dIgfh1b~-Qf4LpWhS^X#z(N?5~qu8ggTBfTJn1a zc*-*POf?axD1&Be<0Am`o~(sOv>KifYWRd{;1{TgzptRmRULQRF}PWc!NWq{x6~9| zDVGW5H>|Yrm&YNF#)P?;6XN{OorS?3<^+1l|GVqs?5csCGQhF(^B6h(8w;Q3af;H$ zF;W}5a7~<}1(32ngQl4hEb|(fYEDe16*1`oD~X9&vaFd7#HQLYF4>x>6mxF z`J|``urvu0%!U=2GOkh*ltw#J`#i{8=tjX(cPiF+P`k;EMx_nbc1Kzy+IKlKd!Gw^ z2b}0T=)kF4Sk{sZ3+R+HXgic4$Xsb! zX-}EdWjP(%WHxG$RjW>ZjUJWtR%}D@c!|Y9hthEO8tkY+qKx*+8 zBL>%Nvv9RKi`Hr}sI<>ot1soFiM(#g+!cm&3~AChG=|!dQ4(WScxqW`Oy_!~FGX3l zot_Nr@nT@Fr&z*?Ia@8IPSdAx#5F;5StJjtBo;B}OPR~fd!JzJMxDI}7+y(kzAp>}yB9h;)* z+bNjZ8^xjnQ4Aj*$H}=nqJ0w?+&hWE{gYX^e_IycABvNoFP zB_Wg!_)#Rd%AM^>suWIB%A`V=r9(`%rs|f{*laCTL%nImvZGYlQqtf^d4mIGb=Fh~ zG#Xp11X$K|v{=yAU?R9OqOsD5I{BR%0ajg^B@GpJ)R#I?U0_RPfh>QqIpw8>l!)j{ ztJSD!R;O!@J_CzPWrq@6EjDHSuq8W}*|SeF@xx1PIlbJDGfS;G)^Eb@b}cs6jAnJo zODvuB0s|TUWq$f|%*z}@Z?-xeSsFAeA@kpwtwu-gC}x+*>-Bn6HyM)GBAIck36on* ziEdOnBsCLYSx8tB-lUwJawJ6Q$4Y7B-r!6?okXnz0a9QGRtv;xO;rQdCiOXxv&e(e z75~q}!YPR(JASJSb}S{3qI6 zDeZI?U`fW)Y)0v9D>_$5VQ?sz&1a+8cV#lCZYOi;ZNbThrMz~(h*xiC3#?K&eK~=X zSH^SrQXqTIyRhS|HQNOrd#|_&ti0KA(T!afz2!H8*?QWKWqVy%Bw0!GLIav)UCv$Q zLH}AmmTZe;&E9wpp33I@r815k&Ee3&3{IRZ=IF6v_U)g={sYDA*i|UVno3<$5LtOr zFe+KG&$9%Djm10g1-yb@AS6~zko6L=Q`DHAsZWMv_+>Sw3=H|OaZ3c-cZPB4QX-#x zQp)FF6!Z1hCH(xjhM#_{;mOYp{P|Rn^|XekN_(yE(s=ZBBHw);#}^-s+D z)2~n9!rducek+Mf8XaUwZ4x9^S<1s&mr2uMMV@2_Rwq)01m^OC|Qx>QTn)FCmYAW%6gLK<7 zLv^W3x#Z~hoB94xJ&zvO@#MF9e*U#VKwQYJ zThn;u@?e7@qF{6uKYZTHU*9g_;~O0uU!BUHfn@eA&SK-740bK6;mqCv z4sMyx`sHnG-PFnP6a9kOHN5p;E3ZG;$mO?Ja`Ja-bh1yzF448i3T z*-xiQVw$niU4Z2&0QDke(2HpcJ!L&B2ytaxkF%;H-=H=-0+a+v89hPDDX$u9yeh5n zuC~Ur$^x%)Gu#UWT6rcoWc<_4$~Hkzm0*HXf+>JdzD%s)w^6MK!3t zeAZ^VJ~lJ-uuIp+K11-OB*7WFxXNRnOk@1h6kr(&unh1YuPd0*#M@^yz8<6TbbSFw z=YQek^NR{tOIq#ouNN|N2kqy?wwmT3v+nMn_PK@iaBSPsqrct8C zmatN50jD zg`b}w%S!_v7Y)4Z)bO&Auu{jY|06!%M0=BQ2c&i@E!Munim~|E0925`4w#%6teslLS`+r?510#$^bq@&r|Rc1*~T zX^tIn+4fAxup=hTMncNq8Ab%p&>?81E`bvMGxY>nhJ;JKF<#2x#CB(<_c#c$94T1h zMCmF=D%ROiz21fzrOSfN*0k-krE9Mp-TQ0>SvD*_PhmXR2hs&F?cHt3{K{S`G5+45(?5|LXK%PIm+gyC*W- zlfd#hNi6M^>AYkHx+c+F6-ZlC7z2X|tXP-I`W@+PI+)J-gUPHuG?~?h$FuB!yuZtn zdD|T5-Tcq^fU2I?*BLTjrhUsa=v|`5{1rOPTdvLgReJQVF_!mC=vl5$>yXruBN{X< z*OX9jWkAPTNxU~ZuwaJ=i}v`ic&{%*`+XQW;Kd@TJLYV$pmn7dHH%)NYW`R%dbKHR z*Cn@EkMw$}S8I&LO@>Ua&|y-QhJ=)b4WmhJ*Wj5H!PHdccI8q{2@NKfYW;IYsz9BI zf~nY?mxz(dTEW$%VgXm7Cgbzem{_2}lu}*dC8h|*rj+ZFP^C+f(rrV%7Bd=T<+kfk zFxQx}0Sih7EtoZ6O6GiHReOn;Ms31Hwm~vu$^f3uxw@(Y*H-DCL8ktNCWJ~B7Au9t z%wDO`hXQC^9ZK)^aV$C<%kYsX(X%pKN0{skF)UGlwRZvwcTEs^3a<8yr*FqNy0=8q zwK;~)O%rKd8%yodFv_mk`S^Yo2YG|=mb*IqWU`j)^A$1jkm{NVJOAG~ArUEQ08p`dcEwQ7j(2}YW3u>z@ zsjW4qs!l%Js6}I^7IO!T7+Ge)@?~bM95!L=N*fMsa_7i;7Y;AC;`9n@F0XUsm6Z;> ztenYeRAWuab1cpMH-qW_W`5eU%uRouIoV_A$x){>SCg)M9cCBm&?DQhuhxJr0aZ<# zC|e@A&6J7qd|Z>c6kJw>H7K{SOX1b*Aiz?F84+ByDjkiw65J%AoV{wYBecC~3E)xXqHfx%SLm6Tq4yaqK#u#NjIfsat7WyqnIA`*~b> zGm8tarSkH{$sB)WBFA2hWbb)z_P*lEo>v{%a>kl%=Uq8+Gn_-$!vz*0Y&zk`hGPM& zKN2k2jVG;xmMmD~%fhumv@UdEV7&lq_cS){PGRqXTu!`P&Z$#XoITsf*>f$NJJ-(1 zlWiP1(#(cUC3JRAqP8xI>}*d$!z8JgVol5hDR{%j;vYVWpvY0gN#;CpiaP$m&oFNM zf0!l5Np+15-96@P-t5o$bMf4`F`b7Ga(Voyf$zSp=KF7J`0YspfBag*?>`sw>knD{ z@@)#g{E*DAkEZkZhiQEC^%Oq(Y!dfBn7|!D&#ikgT)G*;0Vy<ab>)0UHjQ zvgxQLn~&SD^^_yKFL-hEYAB~~j_1PdDZF|oiK};KaP3YSH{VF(jW^S{cR!VPA5Q1J z_onjxdlLn7(E==q4`X@zK{VIjh~Smm(OkSWo)cHcvHwyen@@+z@#ap;I&*=L9$AaE zNLi#o@{l&umr0=`AeynlikZu8$&!R2dzmA-%bY1#;Y9Ij2lAI&kSmEr{;)Y^OKqr- zgrZyu=87RJssvxvOXPS_nia_J)U7b4ew8FLYfV+RXOHanWcQ_b&fQh|F>B}J?+5wz z$ufTUWeGq2GQgwX7VzllJbrxI%XhzY@#UiqKL1_-^-BYfeyQfG@5=>Gf~(J}W&OAC zz78FH3pzb`m$QC2&jj!^^i)Ie#UYdv90pqpUSD#C2XFX!IF+twlA$`&)V6nThhexp$gWot>);_KCWC_!L`>{apmp` zF21#dgEx9u_Hq$b%OaT4Vy^0*A$_q62@9;4HegMvfHX<=)r5J@#7j(?r(8mFCA#+? zTq!rF{L>$uBLQ;k1~fb4E6DPZBvJJ$FqNd$7?(67Tm(`siG~6z ziFm=m6oE;S0X`{4s&E%vDVGXu6ZNr3)WIT28>GA?jPXET)^EsSUzo*OxF*J$8)Jl-C7T z69ir3HF1qn$30B&6sAo;ur7W+0xQ=scsq{8&q0GgM|s^|TTmtFvXptU*2LFF3x7Ku zLR<_9bu%KsQ6T6b-*pu{dI~~a)UmOD4oipsVCD8-ti5HKLX~!40?s&r=>#3zCh6mw zWK3X+2_Z8~2+uHMT(*TI@Jd)p0xy`%vS!>&fmMnbq0^Oq!~|2aF2mDheaw_~nW|4< zhCI(VBf7$h@wL`WYO-Kjmj!9_EXkI#Y}T*^`Af|xS!pWJlDbG~guc^~_FV#xy@G}V z7R(b|%{ydC{}DSD9(ADqup>P?t!P|hK(lhmY^x*f>m6CRCxngXrnBix8avMCaqN0E zCvVkp;hknKKWyUk-6Hl}PGbG(XqFuDW9X14Lq|Ore%YPDBd#ns;6~pbciK0*P`%8C z!ucku`aiv1lUWUh)OR`4GdGC&^P(7>JD%ZrlUXu<3L`RI+CP>4*)cR$d(zV6$NYsc z46T^VvJF#NwOz2aBaUUeqFH%B;$Rd*yZxE7$zIj}Rp%<@COU!DYGe8YinEoWZicn! zS)xbJQhj~OuJ=!;=c`TVTx&t^Mq3tacM({5v-E&J%MS;# z{9pjf_xmxjM=-b1j^a61ER>yLz*4d_Q&M`2ncSvNRGsX^mHPOV zDg%rv*Y*Wi8Cuv*AA`G0qbsF=kpd&5*Hds6NaLzt+BO8yyDfHeHS6eV?K#F94g$)9wHra9HfNyBCr(FuRwsspD>rJSuGNiV` zK!7DmFI6rP3Z_aesV=ghve1GGWh#YRU5z>Qje@ThJsLW+Y46h!VCgfw)P!Zrj9D>a z$d*->9Ng-{ku4IdEjhi)f@>S>xxT@Xb3+y!?9yRt-DuX7y};60&oPkoEb}t{%ey`g&YqOxd)r^c5Q>IG9D1FW}3z%fNBbo$PjrRZCJ*X7U zGW8d11+_TKa~D-VMp07WjcZp5KFOkb?d8CBr($IQts5f*u(7fo;#qY(g-xd=+c-6o z&8IThcrrtvl*-7i1ljRpXh;kNG*{WcbFIj1Et7KnNNi;}>(Kz3U*-N|`-WJ2Q zew?O@%y8kQ}uptmr!sw} zJK-^sEQF6CI6^9xpyvn*d4Z{kTBN6|F+KU;WMuq@nraP(2JJa2nb_^uviab>GCujZ zg0H`<=I0;lc&c13{HcUrzt7>OVd@1nyT-lxP3o|I}h@C z`(X+9<^SK2$M4+B;@-U!9^6+NZ^!fDdvP+2<^A`^^UnQPfmkG0UXPLAn9QX&;yH0+ z0(%8nYmfWM_IIUwyKKvKrerVCCS|cYsY|6`U13DVa#OMdQhAarY6c9bU8qmppdQUj3~5(x!>vbf2w!o@Gx#`3XUn!HkTLe^+Ao;bp;N9SicfAw-4N8Kf zT;jJUND`ZX5=;DMnc!1J-nbf`2k4;qV0z*xKjluMQZDNn(cs~UdJ>H;TC zf}9j!$$M661PHQ%93-)~*OK?7ZgMkFMSzpeKW(*Kb$SjPm;Yk#{Q~v@ z0!T@&JSGU};*>ii)m1&pdry;fB1rX@^%In8NJOS2wmIfRWy|trm=c<5Oy~?HWF5$M z2u{)?L_QxrLtrNnm?Hlz;0nnTpcR`DR%}G1)HCDj4VltzL~@T2sq+j;@7HJ6hymqm zq#oL2LgRKbns=B=2q1PE(z(ZkzWoBNgSO1xWku&!3)OISWs(rJud`v~U>K{8Ol0q? zIUIYfN|L8?&b{8v+n=oA-lyw$=kqnZ_4!h6yx+_Dw`)0kGl$j3$H{T;&GM503>|Y7 zU^&x&Q0a%uO_FXGYFF7(G+;(nmmV`4w8#-))y;OHZy=cd!AKSj2+{^8FuZUgBMT?6 zY|$hJ=0(!p>Pc6(H~ovk=pPPc;nENWSB28QDvxd^|o{ikh%m= zT~gNctW?fW$@3)!bV#|SV2(!5-kHc8gH)+uMdESbH*nt5C8 z8Qkr{(t}>IF8m|{1akhYJrcy4LqYPt0>>d!YUk=v)}>Egqamr~dc@~y5V5~`~ zL~xqGJ?*~)X8o6_q8FK1J(g$UW{n{-=S4!ZpI1eA=5s`4y}$HV)AShziyo^^q=tq7!bMG#HP1F0JJqNv}Q(g81OMg&`e ztC}TYR4fXlq~DkPK2I_v`%9{~Ailzg3B`&+1z58*iOLg5=IfDAWI$ex6_o<4nhr-A zMdDp^-RYd|OmmYpjdd2(SINpRH=(x7Ops+kmB6Yb*M#DH6UrnzD=Rmqvet;YCPNz9 zWdrx>(A%%ayg?lnkLXFIZNSQ925giKze_ZIP_l~STkN?Tj)l7?iu4oMbTW-y=dvZT*m5e3&GLNX@if*RN@2;4Da>6RMf+lZ>gRh>FYB&x zfd{RFp7bvDW9~{n=B@H!cuOej1#5c*XGdPi<@lAEoV+%jlh>!n0iPg`Cvxm^413Ro zvg>pJJCz>F&-)0lyf}2jpOW5su{jr3vzg7Cs%Hh3_(zy3w5^vm}$kn?My!u8M zm+wS!^WG%xzCVKppUmQe&nx)o%LYFEx{YtX@8#Pc`uOk+} z#7IIruFh6KVNHZ2tKkKf1ZN2hlMQf}?|Q}Q;WJ4P7_Ezqz{AmFkYE5MS($U&P(cLM^QwFz|7 zCe%~FBjG2X^Kw$h$5oceTk0u6nzM&2hoAgk$QWG1M&TMY3ePw}-(*dkCJC_OH3glz z_)J&YRSC>e^$E=~CNfu`B-23U?47Jb|LHO>3Zhbt2ujo?NH7~Z%}@odKnZ`@Mgi$E zPuT)BsUrjPwFsB;Bc@jB;}#vJcIuGStx4v97R4)csoY>d-BvlaBycbDmKjxAl(t#XHqVpU^L?2!-=D#OPzDCVWg5=NP!t3GQm4#zr+1D!^ZLD*v%sCX zi#(V=EcL`97iJH+NIl`j;08Yi)_bsEy&ZGcSx5qJs>0Q5N#?qi8q+>3Wy)d$T80d0 z9nq&<%9+{g1z(#@QAWRLUZqR>T2pz=S{1#Utyv%m;_zN4Rvva|%@J=_9PnWEK_9jr z4`auv7*_5Kpku&-@^)9|0ZPC z3&d8c^Nez)Dnf;;=LnUGI!YNHr%;0=si>7Z0~5-1nJmCkt}jllG$2X2 z5Bj%zv0!T;U90_RUg}H3h#$2>K7uG$vb*gBQMQ69cghC>B!Vbg7(mGaLdx`HSrjm8F`-bG=@q6Fw>VPM|?)WtlOh zg$9%s7*Q^TMR}n)C6Xx?7nxB~VM3u`w6sc>s(Nke+BE6t(`Ei*efo#A8Cs^xs16x~p!pCR2blrOrSKZ2?w;nW}f|kOmtmc$JXCTC&_g zW&AcNpnU{aK8?1ju?T}(Y>1F`7pWBR?Per&Tah>DO5>UkdbdQgc-KT$39z=En#sO% zx$Hfg#f~#Gq#{ga(}`r(A4z1DplO+WZe+WFW6L;(Hb*kNIf5nIBU!y?9BcL|5y7_O zaqN@t9l4Onsp~nMzCMFfH>Psp`gqR1K9QqWqd0syiv1TN*>)mOcKjg8wjwxiIh3O} zVJk6wTSn>S<3Dch4Pziy!P%eOEzY3^PT0q{_c9g!WwSf z+aTaL$p;@C<@m7yP8?grS6{vYyvMIk?(o%TukzmgGkpBPWxoFMb-wuQHt#>Y!mXRf zdFQQ*?AkHR!i80A*x1GHU2`RRIC^+K2ljWeuzxxw#ja$fn=mWKm>Q4dHU%wXe*KZ{}{k@D| ze=iqkRq@NOP5k_GGe7=V%eUVNI=;x_{g0D)|I;MFR5A}fN|b3jUn*Ti2p)d=Wj0TK z?dR90!#w?S1;72Vg2zu5$#=W>{PQY4{2+(-A0+YaJCnHcdYFJKnz!%8a_jCmZoWC5 zTlc4O<6a_H-wNPN%E9-QG;yOm#ms^sfWbNTGOEFRpQ z$vbzm`S`;+e)?`Mk3Q|?+|C4s>Z}>UHxP>cFttyK#3r$nVriT zS-GH?wTsKyzqOP7JG$7kzLp(3nmKcEF&Azw;mm8p9Di+qQ}>2ga(-I_U;y6WHg(|n{=D4O<;*?~LRlGi?6E!dz zKSmX%acWpj(ZVJ{8yi7{t-R+vSsAWJ7w-u=_)gHnKUSZBaBTtvHbLI%_<0CGT%NUc)!N$BGoq>W>MELVsgZb3S@`fB3oqm93>E`i>9 z1h{Dtf=f;BhZh|Zay8EPD=yzt$LT5VW*w895Tc9;}sV8%n z%m1ylr+e019 z*2zY})p{%DD80RJw_;?k9jgV2>yEjza=$Yx_qnn8XfWH4N3(Q`A1!mtC~MRuTe+z( zOP{bQYPf{`7YpBKF!p)|LmvtMXE2R;28XHt!cR;US+2=5lm6FuJ*HTV*b;Rnm1~h) zZOF`eQ)VbV$O^C~m1+@RDX6OY2V6=UEv1pW65|U6Bl+rbAgD_=GK#=iqX{cjC#pt^ z*d{$Dv>OnobPCvEEWk1sbXhQ=*^+S$)`Ti!gejx8=NaOXDF>SzWRXgj3DuS))CsTz z1G$p1)hu$Qd%ZUccZ+=YdNE(Xv0#Ut;L4f)9kO9JdoXvCAMMM1s9WSo-H;zOgFY0_ zbs>AUBkAo9sC@B$FqUMIkk%4pSUeO9f~WAjE64(_z!#2#Bt@3!Ua zZYM77b>rercMdJHWJTL(7S%q_{EFx3F8MFrWuxe>Qm3O@@K&o!N24)qEt0Xc%6zn1 zGPB8ysg0(LmqIzJS=PJYDM-09u)#qdI}$7rpbU=N{J#)H21-^O+^Td4X+>14(!rxS z$$d7=8g!v{xj&t2L+Rf%yhP&n3#mJo)K9-+p;V;w^r8@+m)l{{i2A^)6q2`Zn*~yU7xN(09ugYuZ-iYPI zttj?g3ueOwAC??k}4k{b>!~eKSk8V+!xQnaCTr61e(G0=KVa^X+G|`RSt`PH&83et`iC zvW@6VF=Bb47fTELSzZ>&s@f@R>dxlKiYAV&?_&R&c6P65VW)s=-=+=@@9JgemR2@y ztYzQf*<8E3PS)irPQNkC!JG40ajNe6^p~Wl7hi#$>*73Sn#A#wWK@GF) zi&!l@#%baerHN0ZU?x<906(b@JjUYZK1$Wl-OEdWKmk^uvj$=AIz)Nt6X~u;sIw-)4(fym zrb2vl2o#V7`N(%9d_6Q(xbpGPz|(UqZeFAC2oO*y-9E^2dkgryCXB^PHMq1ot_fuf>#hEz%?*$sW?AaH$rRYYio_F{XNr z0rgU+bZ)n$Rlrj=s7KEx4>m~>yW?UO>t9agz_ltVTl-|+8{+1}wY>c5e2!im;P%6P z-2dbpZ@qtkI}a~$=e-NO`N>&sez=!gUu@#)=Zjc(K9&A`fh<1k%iv+9qf8uDRi$eY0dcLo-? zF?Ws=9qkshw3yN?^;c87C5;`{wDveKyWgF;OFWpr!hr>=t>~BJZ2nps=B%`0_6jpP zmkXj+7}CDlkSuH!SP+e0BRX zb-Sq|zsrp*DWp;bebZ~q1(8O?m*|pEqDQ7YFKo1>OmJ1*?nG^?gk+1=0+gy6Ys$;b zDJn8zR-S=`5&5$ORdN93<(rXRWK4FsK6zD2Z|}NPx9QT_tH%i^W_0xUh2 zt<;gwW@MQrLrc|JzDk=dTTM8;-=5h)uIJ@7Aqgz~9HBXnhbA;t=W*~W@7=Msfb&EDzKU%kK>!oWm{M=PBLEQuEFp|JEB@#1YE8H zEv4TpdjguB2x@m#HJT0X5PWsm5#DaixHbzWDP1$nv18VNE0x1uGz@#wKH^LFh(Em} z0Sv5;V9l<{Y(1X9)-zMsc6O>jD}kL7JCw(#C$ss)1h$=w)Hb%tEeT zTExZEy_`SQ#jyj`oH|y<;k{{GI-SS$bGbacUCIY{^7-`59KL&R7Eit?;`i^%`Qvdl zzdx=OOx5%Bmu8ulR(}7bS8&iTP?*bKf~~)vHu3anB~PB@3z%|cTF9fvCGxnA-=6jg ztcK+?tK_qr1tEI{X@?~C2rM=VDu#IUeLJ6jQpo!crt{#Pc<$eeW_ zhbhbUTe0GhJ!@WeXUmx&cFK0%aw?SNySkVAoA_+vh7uy!xn!ZJo3^C)#J1UEJk4v`u-3N&28)o}|GV1%gQ88jB(fU)>{OSq24*YzdA z$tb+MN8#r4qM+v`TtY|V5vz`${9j0_;3``Z?tE)v3T=s!X-JlUYUV%PKlrB@EIegTx_7ZscXyERzgKvGHZNwB@-L zUTI3#3Gxq9~^ufJ9ox*Pxi|MW>jK~(=L zcRoGC-ER-`?yq||c(;Lt`yyF(G?bAeKJ*{3XTd=mhEF=uBgcN@Iz6fsWcBIDeylF_ z=va!Sero6v1S#X{&b4OFd|T!&5GeJ#2(CO?)bGWDUJvGUxY1N)NqM0G6=H51 z1US`})U`O$HP3_DgAVkKSg~NG1O00qnY+rGo)xBauQaA>jgjEWkiKok%-dx`-wwgm zR!v$qjuC8*rBxZjaJdd$D|G2yBX#jc0|o_IBRefwvDco}2V7XW$Bi|6{aLvqkp7jP zw9d1mpw^Hnl9U9=zH9IEJcbIcte@dUi)VPr<{7jESeCK>mE%ddlBdOlMq{3tUTs8T zogoSJMog9Iv^pb_1X)RfE9Fde9snwNw$rek>EXb1ONJ&*EDMgLx zX(IMaebRD_NRgO6WchH1^5hDSXR6I*{SiD4&!4VDm7msEB z;?WEUvQ}--XV*R(jvaU7z`D#<+R1hb9(p0q9WV$QH+Gi#$*u_uO=hsUwz zXe=9!Ph`vKNdm6PY&$baD%0_7KQoa7SEh34+Ek7zmjG`ia^(7C4qchR>DyDdbT?6O z70so$;yL~LcwW94!-cn!IrUm9d(I}a?`$@&zR}LrH~YBw+HA?Pmhk=;>v{8oH9Y)$ z51;*boG*Sl&7(iA@`F^mKmBowr`!`z-Q)37!PMWM39R1Zr(Zti&%eLtw?Dt(>F-}l z<@_DL|NgC@>RbMNs(|VTsdyg=KA!N~&)@UT-CNwfb)E0N_>{XhuJOw0lUz7{jFrQK z3@_~E%<(;3y?9Kv(-PLMm?JTpO>3l5-M)}roBP)*st0NLG zOB|Ni!QX$bmgSt!4`0{w_dc4=yPwRENaf+jDH6%N`{6X+ zes2n|Kb*vkcjI{VUJNH*4`KI3cfqv1WXaa7Jz~LzBh~_Mdv>37<-lcsj=vtm$vcxc zc4Hzt&qT07lBRjfY-pcvO2>Rl23GpA_25)aT`l3wkNWuZ(K5dHbv57pwMlTbh3^Gd zk2uC-F7V{61(~1_cc8Ibs>)*werKaRs8sEG2efe$#>tU z@#s-5zy4Ce@4q#u#)bU(=W4-dr68?R0G7+=U(Mj7&tyB?iQ?oL56+$o4pyXMI-=o9C3We@Pn` zcMWsx$V!fG>SOEDMz*eLV%3sLmM$q{`|eJTpBs{WdJUJ~+QilSTiAZGhv^lb_#|r+ zQ)-P@hBhuzDf^U|;jZ*=Rc?lJr8y2VwJ$TlLCRE@GBZ3Yt?{pRAh6yEAAy!{ofF=b zmUtEmQt~zM%^Qt(mS894Ke!}6hew(kZZmbUPu9nLsv%}m%>-Ij*i13Oc9IS@@;x-7HkD+;T))qodC$n{Urg`C>(qRd;x-(5Pbqe4G9S`6l56^ z?k|Ax(jnMQogkO71iPvc=BY`Pw+`XHx&-)W;whMNm3en|d;urB=W%j;9yeJA51%o( z%d#n#I=#XLR?%v*91?Nzc(N>eg8XNqI-Y_ouOxv{iXQ%{h6JP=;ghPgZ_~nKy2d{t zb%p!%F}NnZh)d#2cuiBoKS>8a!I)2qd{=?hOig?QS3X&4_~oe)P%u_?c`&+Ki`ZH% z;u>|B-epMU0!i=(4apzWr(jTziX~<=tg@qQQ0lO@u{1ArlVoNV+fL}oTw-d!l= z=ZGXntEDX6Aj#=|&b_vkD{t=My{|6v)(2;~a{Cys-Mz%Ew{CFljceR_?-p-;dX0C! zIl~9Pz09j0OY(RjTN0*d)tz@sjw)R>m@#<7gu$aG%-Lf^#|9%BmKaen$AI!qL+ZQD z>6&XJu(D&$JX_|?wUcmUL7%I@O3Hu^AF0>8Xe)Q8CfAXYnKqPW*i$jfmC^!d3i9kJ zE^(%|(SxQ=M_T)A=o+wN_OQefE4r7P)2oa(xL$y@L6f0UqbZyq6ZT(o< z){LcPnL4c_f~+N4bS=}Sca=W#)|t@1$%4TxwhU~tr+>X8i#PeOaE&i*3mhqJHevcK zbt2+NtJ)HpI6sS~mB7mM8J;r{WXWS~?`I_uk$pUM48bMZ;vFNNQTo7|TBFaDS_2iZ zk{eB#UaxdMDvuRtDQc_IV|=LwvBhdA-8d+B6ONZZQ-BqoqnyFgAS72Zf?Q34W@!^z ztS?Y9BC6IzFlEZPMl+%tlrSfz!HUQlOZ-HpE&>y00hVW>WGt2T1XtJ)SYkqCr3vwk z0<2CeX3epqe4#USf~=Nh?zD--yVf`{ce8Ai?H=@Pbd&I5{21~|w_1}cDy|$1qh=_AYC%@*;&3V#1X9}LMUjH5R!4FgY$a^SZ?aR}<(4OzVZKD3 z0INX2S1#|>h@Kl-1z3Ws<~DblTRo_*b)cZcg7h3irf29eJxiaIY-E^a z*M?nt?Af%@g3artqFQUm+93-TbZgUIH=5eg=P4_Cj6~PfW9u0N8=BdRqc&t?-kK*uTj}kY!J3mm^_a4yw-n zBHJyQ&|$^29$T^{+bCV^NyUHwOEBBA*pK!>KROor(Y45rIjaIxXA>803t@OqI4cg0 zW36Co{mT-^V%T&dmR%Pnv+va@?7uRFL)WKr_*%RGD~^-561eo{bk5$M%JqloymBv1 z0G7<1&l?3^HJrFy%cWa$dF$bN-v4wD4?o<+*WVuJ@y{3d?5pE^`NJh1J$;>@et(1C zc)<5h?h2~j@a3l;ap~-7DFk|$-PXpIb!%BVFi1^tF$Ec!OrIQ2e#T7Zcel&7D4;Aq zlh&GIs*5wIDNU!dDWBHbOu8DSFj`d4=2h*i8mg1ZwUzDbTUfJn78_S&v3FB0`!=R> zbZZLd_oZ3dPe*bwce>@rBuV0t&_irl&SxW?0 z^Cj8^Sk*igVEz14Hs5_alSe-k2)LRBi~Rz}vkMrkWCj^;C`J27)_a=F7 zh+lr}o@FEb>v`1lK%ug_EY__OJ}|M4^)e3Zc3 zA5G2fthCwe$3>_<3fIY zT*1#jmJ7fN`RzWQn=AAS_i+xN$D>y2PepL1sSJ`)Zda^TLj={)|dkI!y5vSYpv zODilGnPtjwx(O?@Kq7A+E~i*9a3v1NvtHk z6&j4I7(;aFD8gqwM_|UY_@q6DXX6Qe~5L`_+7jT&2lVGI6l}nr!j_u$D zM&YE~F(}Y;4$;O*fMw$`8VlFwv626GlKJzL*8?I92#F9}g&7bo*a{EQBg{{S5D%%Z z+|>z|X}GsG;Xd-3uQqiEPTV;7X>RiCTCkY2%+_Kv0G${~urf7#~UZ1$@7Ad9G`0#kOsGVohwD zlVL^~+qTWjWNh2EZO`vno&SB#{k%Az^J0B=r_p*~KZowb==)7{yVk z_E^TsYXn)QQIfDDW&Omqk!_*4EnLUO>#Z!~1%MeHan9+8V|H6yB>%b-cmF&?f(K!&-PpzH^E;)$*w3-!hd6NfAV;JHO~8vQ4!OMcL1 z&`1M@NiH8gLXV+>qXB)?WqcGQRT$8(*p!+~3rc#JkrQk}ijN^NF1kdy>JsN?Omdhx zNm1sc#9NS+VL^U@F(sAyRQETe_fSLnjnScYvKB)W7rxI|V&MEv3|rQfkt@3~Y-tw; z% z$m@W2R8xWVpMv;5=pdkKDY*JiaMe^*9_#->7sr3mQ`~$!r5$c1of~i7-Xu7$9o+>` zjYqGlL}@ju#P*b^P$sgp3&8~)8e!#;*$l7jmIUUv#V5T5o+vgj-39joB|M7-SEVX=mi;e0N|kXhk)5hY8Cw|`EV7j_%~Zlf1}dvu4XMo3 zu#thnO@P&1AP_CUN*XNa8LCJAaAV5Gn9*~h;%Jr`gXSqt8=BL1hOvYN1LrtXJ=vbT zQG%vX)}#zIBdU)k(S5Z^8)8ARpsH-VrvS_S{{m~UCAk8ujB0(7OVmjzQ6pC>)1ux+ z6xHZcP^C+rz@wl-gEE1a;_zfuFC!{@N>m$5m{M9Hx-Ky!qez#O0v(bHw28~nARo}>TBYC~L(sPgsLq@3!tR%*DZA8}S2^|?bxg*0Tw54`bbNUQ#D(%0F zoYb1>bGor)nJ!D07%+FHCez2O2(Yx6FjSR+Jv&fU(469&W|T-}Qz6^1vZo4VH7eu@ zrqTpfaXq^cTHOsl2?ek`d#d4Hsexyuwg_9FfL{MYmYV?5sY>6pyr^aYv!G(AIn`qZor$a5nX=Y{*;{;Bv|Vv@D}W`t0$8!HJ1h4Fv3g%H8xMrha6FcSm$L;~ zrR+GF$ITa0IC*0NCoa$8&ZBznJlM&z=STSP{tTZ#oDoo*;lqcseEfPJN7D-*dY{m~OU|>x+!~2CYcS0hYm*ml~EQ1}( z6FIs$mGe8(xV$%w`=|1GakYxqxBKw%(ICFQ9>vcOf~qgG1XXk3yCCbw7-<#P5|uYz z4FBnUiY!MA&!497_Eov8r=fiNG?njPmI|;o^YiCUSqD31T{H+l|IbmZ@6-7DVF>SE zRq*nupyWv^FP^3F{JFf>%T%7c%8_2J8p5xZ+J7LRV0n4e&9)d7`&fE#+`1S7WIpxFp?GDUeX3CVA28^F%$TR`^ z+O?4!I$XxZE5ms5Vh*2wuH(zEO?(mze)v@{u-YlGI>2X+N}QD6o#gA!!?Jz1@!``_ zK7E`f%R8KRFROTVFM~%nVtH~aiucc=`S>b|ch6&ZE0}uwG?`bAl6ihVo(H$WxN*gY zYw{USpR!@^9>sJT%B)(|nN90-ID0URceiS}wZDM*{fwDaqQkT-O-4m@WKygYKbh7n z6=1C>_Ge{P7%Th6v3hVSYewX{5Iq zb^ZXRPAX&l#xWc}xs1)*Cb9q6LQY>>&G6~j1g9J0ld6krsus4XYFK8fW16LcQI6m$ zOK_Fh86yd^?5^15tKd?ifqS_WQc_F>$o?BDV`)TxC3@6$Cb70XNqt+9P}5S1=;rw6 zG{rULUu+Wu5Q@`<;T_NlYLBj;Joavjj$3OqoLZn^+YAi>l$K?4!AvuBjQ>MVF%5&M zRP7QL=Kl$#1XLFPVs9$IG7@YV{`>zuO=vE^GnKzLcW#Y+Kv$eYlyMGI!6`xkEfrj2 z)EdEMAEu0r{LZp_H%tOL3ar`)@>-zj{x2GC|Dfjlw_s2Hw|p)`*R~kDbimlLBf0`3 zO^c@J+Wm)#V9?gHBQCy5xcf@p@sd`sSyyYe80PK~^gaDd>{VZ|d0M{{kzpEjHcTU>hPpitdO*Y$xoaJ7XK! zNl>hKEXx_MhO+?DAx=e*tSpe~hJ9ieS>GM7j&Iv|nox0_m3>+#>?D6$Cd+%L3jPWN z#HF2ZDQ=5fSzEly+u&c_p64&>Y*nVUXyAF-u@bT#!JF$?fS2lC{#Cnb#-oo97m$`cLI2W$$=H}f4+`6@o zTQ?7L?#y2H?cBzZ{Rg>z@iKRAUgZ4cowA>A;pOM;Tzxo`*~=rDw8)7SdpudN!+<$E zy0Gl18Ve7qF{eR;>1*|vu)v6M)6JPM#hM8dtQa%GNX9>HstW~xQY=)Y>Itk&$P2b0 z#nqB%OA8`QZ3xo0#8cZCKNE98ooosBv?s#fmY6Wf?}=9A=2}u(WlhZhEBcN!6)5R5 zXoe<3<|;E}K^I0Y6XdN_W5f~_2F~tA-zlBwExER4R6DANx1oAyTPlZkkmySFFg1FP z&}p19sBEwyMYV<$4>Ti7@_l5kGTw@9txKc9CgOV`_SkMR2q35NhpzuLaoclWi=ll*?t-D(zWVg6R;Rc^iJU5_YUKaLR(Bz!sx!V-U1YHBhI_RPWP)B-*(N@+ zTz;}#-hwUnO2s_}HF2uZ#!d!hdqI|SA3fZ9%kM<+zJk@zS{-5s>617_kR_2L>$*@- zRWQhuqCsX<4zs3gxD}-%EUA!f(Q}fWbZQ6sPq(B03_AwSux0RUTZS#LXWTMp#xHkf z!b%rrZ1860245yGcV~G+G)Jyfvio!iyU!?Q;GV$CPur!+TEm^k`}p|fHm_gb;O*;M z{1iO>_eJ^heEIrI zK=wo4=PS>jedOM~r`)}FpUamoarp3I>g(&7HDe~dYkH9!pNPAY3vLdMIN91^prwPO zg*6W5Rv2sPVrOcJorx8;h8Flbd*J8nj;FnYV9SNL(C(!7h$5!DFWCu^l;zfNFMav;xkQkZ$;WpI0<2gb-s{1md)d5t-e1r$ zP1e~;__>vzzxE2S_VeS{ZUN|40oE!3)-1jV#NNMB%nwz>``5X=f0NDIcUioBpTp}9 zdA#~q!0WeJync}+@0lQgi);KpPoK&EdK}FS#Sx$zew@GR#hFWPoV?)7>5DF$zvjW! zJO13b-<@j@LbxoiJ9R05y$9XcxXGU7%dA;E-;R}web})jiBm_bxp{Sz{Er2E{j#2K z-?qqS-7ed77hl;YfIiH3j!T@BI3~Z_$Jejx1du|>{#-rj%EjY$Ts~vZ=_97>+o3_j=5B1;tir)v z=G;D$$j4g)xzv!!4B20%$hb8tN1JJh>P(N*Vo`=QOLCl8Uf|8zo)Og7rL$vPAq|tt z**3X1J7x@H+q7Y97(al$3&(SA>mrV9n9Z)0li0g)I(xUyWbN{i)Nh)^!2^rfwrv^* z4lUyPod%AaUCiW#Wn}gC!#z(ROQzrc-m&ZT>|~@S}|75=qxc$F?Wn$A@fYka$js0Tze?t6r+Sglq_Gg5-xG- zxF%>cLd!+^i*u4HPKxOolDc4*D9B0>WF@u1GOaCE8H#9wO>P?;i`(N`-T}9Yws=-4 zzBH!z)?j-=`gI_@e|usEb|ht(3K_%HDHvlQ`OTR0fofz8)S_;#7fW|$OTk*j&ZC3a zdu$A+&&}iH$t9dRv4(rs4{`D2F3ud=&#miMxN_+fH?AI*@!}}=1Xy>j?B&Xd-JCnH zk88)z^6=6P9$vZ3xntY7e14sbqx*UJY8`7gzDN8UL(6wKClhNeVbF*y9N0*ZOEy&5|8lG2MbxQtk>cbY{d00K)srLJrg;Wv~SWqns%cTop-MGSZ2nA-3f8Hz&W3 zF}VV#Y{67csiuHQll&s}#waNjV96k*i1JDu2|X$VSLGE3lvNs0RwclwG$p&th~y$Y z5(;&RE7B!afEAIaPN>K?G*9qa&`p5Vjch?rc|T?P4pydagfinNXfS%55<^FKV94mU z3>ej#z9U-Edqhk6OAM-O#;~C+nJ}(1bEj#ube0Kg1t&XJ`*UD(6noc2QNJvVjZ4B= zvpA9kvw|2u+L^usj42WjW%gAgQIHzdTZQh5Yv6kQ??QKP!IXEII(`aZRY;X9%jc$8 z=RLIqQ@YqxYG5Os&7r3bE;Ty1DZZE>&m z85CA-&E&+Dp&Yw3tnpfbhwr!W=JQD&ygb3nw>Jd^Z}|50J)hpZ;K#=o{QB^OA0O_+ z_vd_-Xgme@^#$+0JeQ>Vkyme@@#g(&K7RfzzyHpUAHR6}_PfC9Gf$p;J+1`r+OLSm>r#pz!3 z%=2c#fGDO83T6Jt5LQhIqhVnzCpM*Xac3SE_vCSQXAbB06>{@*FP_{O#s|fXh6P#Q zKTei*gtWc+^1g_VZ_)%u3jFlo#m1dA$FW$J>wjy!})lh{@&i`%H-pKE6%m zgT%*osl0uY$dhN$+)u?s;Vtm;=icKG9zF}>fg+xSaq~_vXU=%D zXO{~b*VwaK2H{OB0@$}TmD2|+xOs5|uO83kvg7tiN1k1A<;67@fvO7+&bo8wlqXjY zJ9BoAH79nNaB80sXAT&0VxJC&cdE0eUPa)e!G)tPyttLm%genux+0ndy^UE|Zpfkn zLsk@+vohD36*+dSE%IbzMR(Q~_Ag{^-y#+bDrfz~f$Uu> zkXk*R?Muc}zho?XHqKyQ!(4W4o5SYylVzPO5NIvs;GxA_yta|UXO}Q<-5}!2-LMGn ziiIG{I6-lb!1fp>cfcsMJ*MgHv6h0)MaEm-5_N*B)X5lONaYL*hAwkp@_HBMY;k0c z;@g|cbs0BTm0{DA=sl_{*)?qm$@~}l$UiXh{|#O5Khbpk8%>9%f~&t#wfGYilRr^4 z_zQJGho;WoXleh2o`j)P@TU5IV`=mcHl~WBSO4K?D!?-Qe;vURoS2#YhpANh7EZ0O z^X`P5Ul(ixI%DJC5xXD(mmtd_q$}2eT`~9Xiiuxm41C(7D_GL z-?r#^wnfLKHHOX|1X*1$aOi-(Ln}-LW3~b9@CZ}FKT4f|NKO0&T7Ch_1o*4s?bQ_r zJH>_U&9GLS%2Z4zVc7}`v*zeE9+hh*(Ht%Prl@F0QkBp(_!oUy4t)Wnp-nSPU0OCG z&f32X)&eZ+;5JwV$#R9Z!#PTFU9>DqL^o{11&~oHIQ3A+MUdr^D9}ohq@AoR!0IZ% zYL8Wd;3}mJ78&xHas+C{op7m;Tw2iqSAmsR?+*C)>qPh3jzkRZLQGv(dMIv1GfYr0 zMw{FbIuwjBkg?WC3SL8Mr@FFqM-IEr)Y5Qt00&Nv=HT&3oH#L`YnQih?ZPh3AK%X9 zll!@L_AFP=T;k^W^E|wDmZ!Im%h+;&N0;_;@AQ7|ojk#fgC{t@^B~s_9Ol}Qojkm} zgI_Wxe|p=%(IbPIIp3E>>l|6R(}<HIXcC&&i2w=j+>UtW zw8uxnJG~8_$u00nYe}GDItIn9bPC$xm)A;O+vllcs5yDV71#b)QaD-xE*-M!G{_sNOTlPEaz+}9rz;yOHr91ORB%3 zVs$Fyxnj|1PLVSCWf~M!$ZIO}aq zA|+yEu!}3|Mv6c-w?>)r0ix(SCHjx-#t;$pkTG2uGPW~y<2o^DbbI;@YeS#9mh>Ca zoI!({F}$udljLt*ncO&C&4bH>q@ozcoeRUcaef4s&yL{4(ShvTS;dl- zsf?K!#K5Uu^q%ZVtU~>lA3ap%@dQwb( zVM)dyb20}AJ_i|-JwTtV0Xh`cNtGo7K-n-E1SZ)saEb%96YUs1$BXgv{24bbfaNQ* zIdW_S*KW+=?1kA}yS0{wkN5ND?PXrQyuqvI_XGhiczowJPj21d^Yh1ie)WjYA6_(` z0{ryty|f>B_xUZ~etzZY^QXLiFVBDZ!TZnO`1t7?_a40D!J{|ay7Qa^ht9HT%Wme) zU&4stW5~}h#?LPh2YVOHO)SvS(854l7b_EUER0OhR#wAMQwjoOQ>+DC)+T0>K+G}G z(Z}B03U_-)%=GjHT!wf%IuhXGLU#{0g1y`blnO1#&5@L-5HezA5OJ|0)Xj*TXh%wu z+!$IBN?oZRqpN(GJ1mlwlj2!Dxd$6(C9z|17W>x}acX-XZX6rQql*)HescmZ?~mmD z^Wl7bGn6lHddcHL-agIb?elaV-%sG;xiHS1ijgX{MAqGCzI>d=mrpDC^mQX2f7J8& z$7a6&T*r^^i~00^60e^R;Mvm(Uc4;f`O92hy^(mAB~VT0?fWdjR#xK$^j|)v3F^}L z`XQAsA5sK&sq#2UwoMP-yolz-vq+vi3g!O&?%cf}#QjGhJeKF4zL4hySdSkEbN8+v z7caPTaGxWa*VwRPfh`*wftAj&y+vF(JAkM6r}6H^GCscEBse?(KaRl9WAN*^#1X;O z0m0Qy0ha*m*Aju_WIlbY<>{j=Zd~ln=|lD$-)+L#{l;8BX~wNHCfqu0#q|^RQk~m! zY`ZCYH*2wTLpSzp?ZUq8ojA0!D@S&#v3rXuyEbUaYXha9_2uQ&zU-Xk#{wzHR!ecU zBuAI!>H2Idab|nDCwuyYvTsN{+XnPtb?+zvR)PR4lXb%jSu?bPRdtoD8QF_<}DBI?bWAnT*>{>mOeOnf?zhN2MH!WaK!&1T461Hxc%h3}nxo~4Mr>?JK{K5*t z@~m)5RL3;BE&377u}BhJB?=H^%yyF^*1NbHfu%C;^w*|jk|{%$*fX=cpI zE&J`+w9A~;TlAT~LX+{c)agB}E2(8|@K5;F&;I;Kt0H5TaT{(-*kUl{5AjhVs!fMsngz!Ic53Q!!3n+vc6S4RI~Dc~{}(3n{_ z$I?MyqL@d@vx6X|Ef(Hwu?_6>zgz8e?;^PBgrQdl^gIMmif=5sH$%(4sUYhgbbS8B zFrXzy0j)3;Wa$gC^xfKHQ$vXb=>uMPQT${^4r)hpFKn z&=ni!mY7=qgSqA3SXurf*!mZ9SuR5pSvJFe8gGZAt|iN^(gZcN-_bK@ih(SHf%$(J z+5CrzV{^>hW!?L>z%o!Gpamx0ihB~a!X~(btiz62c5jb`JhqMyNX4k&7_Wh2l7!-0 zk}1kKrgp(0ts{<^0;&8?I2U%rxul~Mvr72&SHiPT2fS(}{|@a;_{gqAjqF0~C?yg` ztB^8EoovBX;Wz_|MhI4h8qj;34Ws7R2jc(#qlrDYrrt$Se8Ivk63 z{+M_2#91$ZSifX?m-J@h$l=VJJ)EU02e4#)5%bo^Fm;V9qn8^oYNP19ozrt zY}kbM`b}u7_Zw|>extPlSP~tipQt&s#lXKCb_qK8m737Kj}Z|=3`m+_MCNQ0n)oEP z#yzPGuF36jOYI=I>Wp7{7Xs5d6O<`x77zs&NL$pA;3BDVirNq&oglbGUQ^T+@4Rlf zWp!(uD9$FSOJmrjcEw3?kEtxdR+j8IY3;B|7V)RF!YV_cl_|2%RFTJO*kma=H*&qDpLyCh>i>NEx6_`Veihr8DJ>)F)%84k?2)NE@O_`cPd` zhUf^cv>Gprj}cf!^;RcJaHeQudTEo|--P_3wv>)^plpO4B}1$z>~BndFMaZ=w8^jh z-?dhS73!48C0g@mqRjR@y!B zB6WI*hNIHk5tG@DM8#AOg`G$*?@WH5u9OY#M%9RJ^c>ZN-eWq`Z%ju9jqA*i@m(1> zsw4gD+A?5BJBAEx&xoNNm_A9HH7i^>v^R+x7kcsZ?igM?oWYwXizMl+;nTYOn-&@Lw3sc#4Uwbt5I7m4(M1UpJv}T< zjHN0!k;Gw)nXv)7TB=wW>*Hu^ftiu6{LTn>2gNm77I-_^66)h2xN^eB-i8nl7h(f^ ziS>6UE6R<`5Gyjen^O|!M89l52Il)SraFQdgOZroH-;(w<5)T_Pmop1;dLW9zG*CH z8pd#PTTd<=DCF+>N^YGi;_k&fUfnO}&ErxY-^$|Zxj0T8j^Nyh1a4g^=J~^c(qAU= z_48Z-=OW&HTPny}#`9nzWrJ$u|T%@l*S2FA3o0G@})3IDo*T`%6iu}b&l*+<>UbsP94;d z)|5T<`qZ!2VBIRIx|jdUhIRk2ZQFnB-`k#}2Q=jOS^_L%t{scw%cK6hxzdMiliipq zW7D!cO;%;8}#hz({*)yY# zgY(C*W7cpsPaVqc#Z%b3Vm8~C&0_1aIqccAg5!I(vUSZewycxbxR8eJ^Eq&AIVUfz zWB-}O%vjxvj6NQ?CP)$p{|^TlzwKpwwTk;o#^bgG6e(_3)|sr~dQ{IaVeA?Q7Vq(9 z+sOd-pY>wD;PS{RTMiwwq+zEit2dZ3X|_J4{ksvJ+YXN&f)wAsFmx3_+5d)`RPHJ! zP0)~5-{wzD90VAWc#O>d#@O_4OpX4=T*A`uFRUbN4FAU2tOYKLThj=z%yi{7`hQ`j zm=DXk1=fyjuyJdLtyf3P+*(N@YmIGCN310j_YpMmX(zC1jizgJfz`k0c>ar?_rHRw z|1b}2g=Kgf%oG5 z){4`FO)+-<2NMrjHg5r@kGz+syr*|NK~`H#{aa(~FX#$xgJo1_EMvQ3*+UtdL^bSF z1z73Ijrq}|pez0|-Ujznm*Q2Gz+PSHK2V8}x^4oij&vW@fyhZxs7pjko=BXaLi$84 zGRNtVH`ajSu_ly{G^Fon3uZ10XUm>4b{`zT?)`(=zGE;44vgi<;mI63I+Ih!7jxn4 zIxb%jEM3^ol?x}hbN#9mF%Nlm_a65zUFOz_!bl!r%;aR0!5?(W^q_3hiZuw?_6 zc5LS6;cYxPzn8c7WFP!;k>^i0vb8>o`i3BuuGSGusj}mMHhT{1NuM=g=YAtLZZ%-_ zIBQn^kwj)>6=MfZWBI&gY+ApFE!!utephdn?M!C+1`o!rGG*LaBgU;VWXv){ z>Sk)uf4mws!<8xO)t;>4mZWC4ASt62aj9*Hkp3PT)sEoE&UgiO#Li8&ot^YOv%gU? z_=EOZO=ztlz|#7S76PkgYE5Wk@H-l=vTY+(u@jiN%6Jgm*N`a5Zwcd!$ew9N$s!Ay zxW+WYDYm8Px-Bk=9q~z5B2e%XoFN-s;1ryzxX}H7!0Il*3NCCz_ri7rinjgo6i2QE zQE5_nhyZPpx?kJLSmJTj?#-eyV z9Md#$5p=oAXYkEdOc$X+VlTl{e~E#bjhMPDzy>Iud5WmRe<$opj0nouy zEOm-|YEdFlRIN#Nh5W7T1U+O~V`N=~2|6S4l!z_SB(_+K9>vnVzFNQ6oE0-;o`u9o322;exGU zoftV7oV~V`(^r;CyFz|m%jJ9PID2~~hcC}(&-s~bJ3E1{^l z05@{#{3To)Ps0`rwafn7(&6S*jx?w5cv}Wea%8|*2kOSTF?zBecba4egDOcdk;B$>MRG2pXTh9 z>nvZpk@(~c+&lxYcXY?l&}^bOwztI1LBh@wPe*HfobBi?u<~=V$J5r5FfYMXbO;qG z-AM~Hqlc&T&k#!n7lu<;9La$Ekj8dcX*5GiqnOYqmAS);m^~zqSwr$!HKl?r^Yhue zCW~WRbJ@2(ku!U;d3%2l?;j7~_Jsm298cl+zF5v2Nfw9}@#IcF=|f}r{&@ypzRcn6 z=Q+H7Ka-a)Cvx{@e@-9G=J39FP99I>_rW?ZYOi*tUrei+OTi8uKW)rfq4h^Hb}ce zhwYnm*t}kyl`Gmw|NDa#tDCT8>)(=;+H?4zy1Z7OgWK%5crc2WiZ2;l>czH6Zj4QA z#q`*=EbbxIda^FN3Itdco@^{|W^IWV^|g_#>Ko4Tei1D1AI-{vscaip!_G|J zy|ad~W5#ecPL?8Q;t1!dfw&wFtCoFnJY%W}zjKXb~Fefu3*xzDP0&N;Lx=I%`}b#8)*~Xo|H{Gc0Z7b>@Fcaq+tV>vyaq@z_XGayDy*i@6|3?;ngbenVdp zlab!(65Zouyh_v|EJ>GO37

s`W+ zNLR%vTN$_fuK1J+sH&9-@2yHiKZya#L?~`6H&U=Wz8f)9B=<}gU`_5w#OO9;D(>Ss z(~!(DlCwq$peI=|c)AnQmxR!;ube~22eV`M0QT)0DaFMUjvSoIkpt5?c6ctQPb}l& zxlLTVvX5IgPIB|cMcMzZbN|+D?q9pX!%J6rc(I*|GW;3-yU>H z7epm@#yYXHph^WB39Dq;aRgb`^1Mw-Cuutfuw+L{Zqo=XoBv0UW$QY{Kc=RfJqyeczw26}r*+UVES;qUQ64^_cu%6ursp?8l zMHd3fI}upcg%HK0sRAd(99LO=1XR5Y$gk3)utJBzGA(k8RLRQkMs{I0@)f}9txHjF z9kLbIFq8|hij{~fkj^fz3C-$EXiirm@>K}Sl^vl#jcCPDt73tk;s}@G+lcah$%RTJ z=XWNhpaU6Y9muWeOi}+Xlnw4e8w~B}S=WxfLpv~FL>ESm*JS2wOIECq zig;Txm(KO&^@~~j{IZr`U+ek)WjEiy9_HuwQ}F8|a6{rIKe^6#uJYmgdES0G!@Do% z1R1A!{`NS}-yG-Vo0GhGcbYdJ&hYB}DW1GN%AM!?xb$EP$F8qn$C){7I5L67dxtP| zX(p+4?gUku5g-v*ZB9_NDgG6D_{g#a6w5j;6#$p25G@@mzRHBuKF;J0@}Xq7#4uNZ ztOI4kon#rEs2FNZ%?MlSrg$=TW+0R1gfeYz6mu6Qv20~N>o-)gX+uxuP0e6%wI7q} zdN8ba7+F0mNeVY3#7mdzqG;w%AIg}aH4N-s#@G=^@HcOP)) z>2ogMzQ>&>&$#>a1&?06;mMo#y#D;H5nCrNTxRdl6KveRie!T%4?Nv@yfZ(im@7dwg9TakVig#M_N9A9wtm?THESB|A2h%rIAy0?df@)TdWw z07J`S>6z|FX@WadDZbR^hcZymHLx(EF$Nb$F}_b43r82TXiO%HM#r;ZRyqgQmU3Zl zU#=bL%he;5TsvOImE%Pm-<`^Zqj@~I)|)4{YI$+5jt?)!^Wp6@o<5tvjjIDWd!md3 z+ml(p%#X!$>{vX{n&m6)SijAMjXRy$d03M2#b7R94&~0xa30*1_KqL-?)vihzCSM> z1n};0AfKKF@%33S-(Q6A<5ieM1mE67@$G#qUq8n2^-}_0J|yz-O&srEhVlBj4^JMu zaqGG>0GL2$zo(BmvwNFBV1*rv=2^0Ii4~hRyL0q-JlAhm^X&O#K7Lps`@u%|-XOTz zDeW$4_sV{-zw!F4pFg(>w$@3(u|P03nwQTjxqCN>E0_E^ciN6qM+Ho~l?6^39NuFr zk1g20+mx+aR9Ug|KLOTnY})cSyLYu^XG3Rp)$4F??W0vrXeELYtsYSopQaP;jN^*{t!XmA=nEm;?#p!dnQenhUC$ z;}F*pmqZ1wTH~ac^f69w71kQ_kk*(63Yz={VUjFe!rS8?CvZv8CMI2eV*Az{8 zUen-DG);a-(^6h*E8(b!-_a6y>bNvR&*MLIz5YSl_fPbKn_?0zpRb4Hz{D=tDUMv_ zsNyC?xMy(}DOx)d+`CKTH#}nps}VOso!F6*Z^lXfn6667EM<~rb|G$3TcSp{q+pIV zCA0L&7@^#F{qdkVF3emL#r8v$96B|GhTZ+yv8#?Fho^A(z%-5@p2N{Y^8{J*IdMb* ztZlLnAD4ajGWTxZ;P#E{+`e*+`&X~?{KhR_3ACP`KF5PYhq=3Z4>z{eb8YKJt~RXa z@{V=fJiL`>R}S&+o)q=3PfGFB!10s4*|NPm%hwsOWw##t1z7uz7_)tk4jZ?svT}nG z3zn-eQSs%9KI)_pD15jV!gfS;SorzOB9jzNrakb(A_8(FU@S+)N*iE$-+T3 z4R4cNLY>P<4Xb59#VqDd-pt0;J7r(p&Yr`Pzm82{l)$Ke@sIcq&!DEb`2Hv8Z;7RIGxRP0Moo%^F4|3K zFNHxX=>sj51XhBp&N{!NX7)FRE-i2pzz5|S5?*0QOg{rM##&M|%a*eF*7RI#NuT8c zEO)_`TT**G(nMn-Yrjl&f-=pnHiWW&Umh1O4CBe8 zg?#>0&yTNr`1<)E-@csU*RN}mLCZWojjNT3u4s;hpx>GX1nW{QB z`i}CV-$*wGkMm;O%wVR>ieTYVNjO_7sIRYKN5cTw`Ny(lT`j}_7=8it7-_ajBycQ84I**Z3JW{hVobsYa3IHP4uvLu*B8H4s#QI ztjrAYbg{?7(H0+P2jWA!(<3B^FmF#{y9W^I<3>WD1G&-8B>0<=AL~kSf~SN>W0WL% zQIYCLRhmDQ()P{{VPwx_rVPqsT<=tBbGkEJ@U?7oA-k9KXYcYpY+qc=o|Oe0*;35@ zjoIv8m&}=cMLf7v%j4^Vd2(YYukVfK`Tfz{xYVCx`|@d66V1|TE{v_!qJMcidRMfd zZ{Jo7l_Fs1*iKBGrN-(F7BuW|lEK=G3uk?}c;1aGmtDAh&7DU#y?AxkhmQ~a`0^-# z9|EnP&x83T(E9l*wDE}67imAr&mUfd^5Izo@1F(n=9vdCo;Y#;wmlcm+H-J^J?mGQ zGHh@ivLo z@_3ejZW!-hm+}0uVotAMZeMrh!YOmk9<}1qX;-eC^Ogd}gJVZ6*uJwHTeh`i+x8ah z*x80H8{5*bQG+uFy|{5Qj{5?vy9e{wC&1c1#Gb9?rYw%^%(ir6j#qeaTndBJ1EV?6 zKbl-W_(^)3CT3lPmteRYw4zFSDq@hgcRm8}u0;bgU zW7*7!0<7_j8rWY7jwyn?H8gBl$bmg8IeB6ehmNda-?3Gky0(Qgx3{ov{{(uCi;+T4 zk3=aN6{qaO3R{xeuQL_nR2e>7jfERbShdxZEjvs(al(P?m)yB?$%AWxt1|~}IkLxw zT@7}uU2n_0rPhp?q}N#KhbH_(Q1pKUM*mCqm}Z2;wxoM>b9{pT!7o^z3zMI_H>bOx zCe*(<;eIU$7j$(Obop5SiMy#l%lHrM4Vz$V^c(hOf8t;xAaj(W-lGk!ejTy*ZHq%- z2V5g$KM(DSWk7q(0$XAd_#YM_|6w2964!*bQvA2UJ*7218Eptu+{!Fl#`f&acxNbP zgKvv{OdD*%TVWZ}4Eykwc*Jy|d$J0#l3x;XjY!Bhm2ugO&`d)-;v`RncEZX}af_Xn znA`u0g(OuoL7@Uzx_W1aPBk=0}Cx!w;^Mv-;rgz0DS8!FNhF7UFz7+yu#r=8* zbSAb=Ks8!}v$_>hzys#iW%% ztlF8w;j_ayaC{^?b`Rsk$=MtgSZ&@gnnQc%a^}P;4((gSzFmtrcdCIW53cay=|kBc z1Xu#9C%5m)xNwWRXU}uv&=GF#J;3cN(EzSt7C2mj27F7_`db{{yTJ0xac5VELr0;V0Eu zK!!4bncWD??jjvp@y$X3mLN-Us?aZAK$g`Wm-G(UrwEvmq!LN$hLv<^3kmZiWy}+$ zN|4qnNm`M+d7|t%ic^Id{{xn7=Kp}zcny{S%e}w^_gn)B0alR?VHJAB^)@7Pu$chM zio!7#6pWR}W6jAMWlG8*U1EBv5LKg0q~I!4VAXi!N^qq(a^+jvQ6SI`uYz{}n@%FN zwE(CyX=NJZNd=l;piXhV234gx^sLaQOr9$&*P^hO;H$qOS+)8k_the)rv~vAY9#m6 zCcTdViPZwTDjj0V1%ZOBkX$7~a=HjeI}x1Tp3sc;L}Ya!Mh1Ym+;*fCwQ0OfPEp@3 z6xS-EBc%g73bZ;=J+upbhO01iydG0#+p&0sKlKgq96w&p_3LAK{CFkr-|yka_fvwY z3w--_S+I1AAHM`tKcDgQ*DC?mo5tw~zWsVB(0VQLitoQ(G{(0dFXi>pDsJ`i^)=r< z%In_B>)yZO=f~H4fB&5CAD-~-(-S_wzb6&{Wga{@&eaWt;obdt4B))%tX= zl)h1}giCQZTm_k)W%~G)nGjTJLa0QvJdPJ=r52k~-p_>^fmTHyduoSzGJaY()8<4o zdwv`X7N)RvZLy$g1V{FdX7|>i?5Q8Y+=+Qq=DIh+%1XBxu2vmLjkYEs+y+MrB{CBO zSTJieb7zcW%EVDjl`4P1y!k9#u$Z;$HgfdjSOaB@M-zzAa-J1iZX(a|?SOV8Mh+&h5?4CB4`*Cx>N|V%az^nS&b( zIJL8!ONV-K`}9C=o~q^I;VMq=E978(8k^?_F}c41Wr_ci8rp=k*d}D8NMto3HM7;>SZ@em?dW zYz6TBi9eqn`tbUWJ1_5e^5(uDuOE2w>Y+U^9$In#jyadln{)Jl1)JCFv2b2j=FVxy z;w4?!yv2kgM*_KaxsWIK>v;ci5?|lVmVUdKZy(n3^OJz;%U<}rPx|yOiF$s1Ud6A^ zbEF+BeY-DTJ{0rmZ3b^&#PIZ9Adl|^^ZY@Syk8U#?uBygng=IOn{e=m8hiG4Wc&8E zY+T!h`gLlYKIqNU>$$wURl{8=dUj9pqQ1WkTZ)a@k!#BFQWp;9J94bdoBb93tj~30 zV|fspdPT9JPc&NwC$p(8hjn#DY#3g_&Zz_0KX)WsCJkislsa}ToWjzvgIF+T2+OCB zZ#+dff6_Q+O&Cl4npLb{F`vVG)^Y3dA@=TE#fJ5B*>_+)Cok;a%Kd}fcy>UN%QX5= zh@oVNGX;t-mGoDm=XibkPEu#cl#WbYsKl(r-B`L(MG6{YLA5of57=;OuQf+@$hK^- zU`M^B>_=uSUTMOVIok9c+L7X(9b~NRL`FedQgd68meZQ_+_ut6WXp3I5;CTzr?e$E zwIlf{oym!7PnrNHLBJK|(Twhn|KMr$8?F{jaI^azPX)fbTj1Yaa!Hgj{*g+O?4(#t z*2OPg6W6G&xW;tCEv_9NiEZ$ef?QGLhZIO+Ea^gQr4k7>>cmz^VO^;v&&hj8PVq{S zf-~ljuehA1KrtVrD!x@pbnn}h$RSerk5ndUloBc9 zl*yc|LY4woGrEyCw;MULJCZxIJrxUOn=a6zX08ssXBaSKo(+>%`mv}!o`$0}96UXS z{YNHn?8IzN99zWp`YF_Jn8J~L%Q$;{1E-EEVhfkg9pK(g8RzfZ;nuY~+`f8?$G2|t z_}X=@A3er}9lN+LxVpb@AGfzRaDCGTZf;r6eF4^!WI<9ejO#QVR7w zGWPai(JD6?Uu{^wT~Asy8ul9qvW(fdQ-j6p+B11xa|Vw3kK*d)Waf2|expx#h%rGv zmiW0@;bCWstECIBX8yREhT~!!i1AGWZe zelwf)EMwii2`t&un<-n;7`nVWHM10VZZW5DxG7ov^hhdGC81D>7{#^SDV^}?AufoP zJ{Hj)$Dp>@`U}u~+hOd|5>1Q0Q8p4_={0FYmf}cOSDhw;>fbRDjJpOZfYy!JTpf9D zbBadUP&M6w+9j?GSm8wf6;{-)G-J?eW11*BqhnlKT#~!smZBt;f)bv|-SAH7f?ujc zMklEhI^vt#0pGm#_~a^Z)fOk&v1}7tV(m!y&+l#_X5tHujT%|DZGBYj*lM|_gK3ixVkBjdcaSf2%KID zrWB}pFPQow$WmO){ZZn*JpL%J`y}yE04DL{v&0tx+E;#l`!31nE8pLI=EvJ_{CfA5 zFVA1|@#%BEy?)8h4+5~a&-n1-E-xRQ=kBdt96vRWh3iYG8s|ogtZy$_&km&$0#S!D zL!3%X@GLPUK!6oqs!wc*7Rh-!l=rluPk(3n_IG6Tm;mN2NMPm4Og3*SVcX_P>Ni$# zVAoI%Y#+gz1yxKKl0V#Ug}QY{_g)Rk+TlsIwe3ReYG#{^r)E?naNt2bP_d!GfX*RoKe zW^f%g?w%M}*`Q-?iMELuDuSnOs+t%mz7c39AZPp?wCUkzMS7$weF|eKP6;H^+l&Y=Q<8%n$cgc!AiMn_N-5u3%ITxtmM+}Vves*Vf|!J##E@&C##)&h7ME|wj?v-cjA+q5R=e^r1U0KR{zV$ zk)4@4ONDhS)!4RPmA&;^oY&}}S z&b++t!b?He%iE5;yk{=ZF%n=Ia`mDSCk`92W2+7;mv>{qg0?JL)Q)v)wb`@FgL5Y` zxO25HFCUNQ!^sKmEA9dvU?E-$i z7{;BwxojQj%$jloR-~$OpxB!8J-s-b=fu8jCw3QlvnAh)Ljw~yIX0ieV{$n*xr}r3 zhj4ZE6izH0L;d(Z)KBb(VqUBrv&XP)&Nw#A7{i+B<5)3c0+WZ;GHK{w8JBXYt*W4| zUp4clj%4@d=Up3;ud6I(H5x}bHTDytT$vv#ot^{ez~SZ7Sb22(bzH)*_M#G)0t%w4X{ zgxRW$oub1iDgFixSEZjopxM1Y3crw?4g{pN#XGT;;H){G z@vR9=?o4#Hy1+`G^kNgzOUy_rHm66SIo&g*kZ-&es~e61U9fj=i;Y7oY;0R$X3-2I z#pJ1m|6yd<90R@PQbhfSrbbg#HU2?GLz1udUnuJfw2c1{U`bd>*!+%);~%KI{fV~E zU+4z?g+ZtSSN~$0-VVpyE`lq`uO+H@SE}HzIO8%va5YMm^hp||j#naiOh+;%bs>M2 zGR5=-gwOY8~1r2xO#H+Di6<| zakNUO{C114-gXu`%_+AQDPg&9lRFmzILs%u+PBuLJa0wpokoX98(0s~BO zcQMAv&J0Iu2OMoYWvmUr*(MT4+ZY_}V)6D$r+aV#@d-VttQpVnQS+EOdkKryEM>`- znJkb(ecG0ChOg*B@3}sdPI4e)h!p%%IL6Dk79qJgJiPV#)(2f=)MoWna{ zD`2wpYmJ3ZTMV3AG)_s<%}`(^!0IZYm{?Rr|F_1|!j>*g@eXTCSgL@yLXYC%HuRk0 zNT2ym3|u9^T5V51!Bv03)xfm|G;xe>iG6Ha92FN!CUn6?!cD>>QGg;9iFaB@yfZuC zo!uVK>~^?I)uO`Dc+1X$H=8}CWjZTt5U>pTB)-eE!pyFMRqauX*>4k1xLP?$HO{KKQ`LN1yoe z_#>YmzvuIlH++8jimxx<@bleUX&*^dbBs$D*0AgFXr`}EqewceZ;diGWnHk9?P4y# zvMe^jp~wiYB7K5O6c_ueONC-WO|>O`ds#DKte>DLk#(yx+1*gVxs$^$vs8Yn)8@JeV{3QENo#DdmyR2>4!PptI2#rrf z*TM?@{{t)yV^h=&4C$t+g|0j{QNT$9-PXw+8CCM%b#3@gqC1 zV0wF2&TG%+C0*FPN`<2v)w$51&CNXqJUwQ~+q3q3zT(WUJ6`;{@55KY)`uG|yt(Ge zi>ppNy>7*`+lD;7tHZrpI$XJ^$LZrn9N23_{U$9zRu`5n?ZSq2hV0$$EBz*$o9FxS z{O%auJ(fhjm%6ZVli+GiXV$GyVb2y@p57|p`?H~(-5AH} zT65M)-(S?D3!9R3IGAn4*-|g=436j4@HCFpL~&$5BIm{za(PBIw-*oQ%HrW%Tt0!* zOU5@|jN{9HXo zOzgs-5p5(XwHF|_XY}y)jFW9RZ9-S(%~Y3tRF|d8^;x)7n*}SiS+YT&#p?`Mvf7-* zD=e6|$dKuCbR?l^F=M`t6olGLny$`-$*PPSr_99h>dcvH$kN%?ESqV=qRCdw8)LQYQp`!HS8&$-UrZwY z!#wssY*Jg}lGhmz$p@ZgYJw{@{Clbr+*g^1f!&B3u0;Ao6@ir!Ia6eRo25$SLJew` zYEmN@?!8cj!OI1@D~%a6SC>IE4OzK6hV91-SW%zM+HH9pJT-pn{XmKF7Tj9I^3n^oIXSg@`Ow6;$*a%#d-TnJ|AWlb6>qZeoxPuUn7dd+sGcDXCV zws_l++pHxc2DxXpf=%jiDgRSaE7FLkqiX0aKQQv<|r%*ySo? zm#-wDid~*2*11{&E^RE+x&pN3|xQ)v{sw2pR?oo(7weGSGzN{zfG9(j%cp z;Md21%>Kq?3A8f%7?IJ-Kn4YQu9qR{)p{fgwvvmxkzUr3w30TBN3p60X;IooDic9h z-6#uYFX+yOEtwoWHjwMrr}O0DO5VQO#^?73`1bi6KYu1*L?f&Nl^8Z z&p&<&tQ7H!4`06W;iDkx(`Wgc&;0oEiSOSOlWGd6zI@~DyHC7$`j$s`UUKj9Gj5%G z#Ep~pxpU?*k1s#x>6I6}y8Vhb_h0h<(JQ{c7JNN@#>@NHxPNCqhfdC9$&L#8FYHc8 ztq%6ps@Tf1n^a1a>SHg-MlpRusNgePs)Ry;*4R2%ri}4s*}NEbZzc_1M zW4L;1G+S4dFl%faL#lj9>aK;qZ9BXy+u>`kLQ!US#tko%?Giv@v?u**@<>k!r)O0G z)25GS*RBRxU&q*g@Ca)*Y-yZk;n=B*>_2vza{{VM_a1QI^jQv{yTJKdcQ`KSs^52@ z5n6MWucUV5XdJwK(H1C~IXcUZV?tYHRk~^GpsKHjy1qUJre(Lldm+9I>{y6O>8yPA7VKfRK}sE!Mye|iK1v!fVQn#h>4B*s-HF{O_HtA7eJ2BflN zR4$umR`lbN!eW*H3oi>e&vQJ=vDyN4s$HxB;h6+Hi2c1zR@j zvPlyCu5Hd7+ZV^hBh}Jxhf1YAi8ptr^X|bEiE(^-HkP07$HSMgf~w*C{5V*EC7AkB zCzujkeH|s38ZDu?S7Seka(S+ppI-|2_9dJ5?-RKH$dAj{?Kyeggq?d;S-qwmix;$J z?Q#uH9`NPmog&`dtl{+9ShfszWbY6c>I;l$NHgM4jt!>@-MP^}nu~p-IoT(clLFhb zqw~2isf;T#`*3(dHR}c!vaB|r#r^VF&^M3Sy>gjUnaSwV42G9xGHp$s z$Ine<>4tO`u8U#H0(Vw#3ue+ha|VxAqjrcS@RFt!mo%fStU2XnP3hC8CBsH`5)>9D-sgiSka*uKkw-TRzbFUexrS{>?lTg&s7EM23`inV&IT5Bi;k2U*u zdC7hr%+cL}?A_$U-u3?MSslcN`R**9BG4PG%g{cWl;w4h;XRc)|3vgrmD`CiXoPi$rxNc+?=Z6rt}_TNv~0g`@^Y8Ye_*r8INUr zPmr-YG_@UJGL8pJ&Wg!YCcRLfEP2n&LV4dZ$sv_CB$b*=ax}p|SrfPLuJ}Z$VCU8X zXSY_kxwpgCu{k!5EikuihP7>583Po!5;U2#Lf5c4>bi=V?Egc{ygAyoEzxyofv$54 zwB4Gc=iLm$z~-2Sx4^1L3v82HuPEQ^&hZmB5~A#10V{4pt$oUmN0v zwkK<1H>wxt3959do~uEhg__hY*JIRbBgU^WXTowT#xAmE+Hw!p?@nXm&K$PwE$8s5 zVeCCJp8D;R*t~fvo7T^yVe=9W?%Kr3gF88McrVw_37#&W<<6D!+`fF7o0o5K<=h=k zAGspi>;#vO9OM4!GdwtPl3V-a_1m^_Z~Jzh?cOQTz{7@(JlMXTCkHn3>ZIW6!fxJO z-_NI~2c@7t#N8)TS>F)O@{M)^Dl0ZiAK9|YnswVuB=lIjLz|_W)tIqFi4l{P=siS( za>*g({cI@d>mb|4ny6Ggyu;LS_SeA4PY)M=8GHS#vGK6N%FPB_FK3(seeeto#y>iO z=!|%>tFovVnn%rqOsXfvQZYG*lF6RrjkhOxh!K(1>U1v7gUZ&JN6J5nXop33 z8?3|RpGPRpXN&A(TM4|R)Af)JCnC3qY=&8sbf9?AUrIM@BuoSS}bjeth{1%9E$}~`2wsQ0hU0_G+DjzNR|R(iX&Jax#|R!X%j7w zNgiO-2&!~JPMRPmZJ;Ts1B{82$|hJU7vGH5_-D5vytpedVx)?Q%~l-2QXHPtqNG%ZqCySw@>EI7>`G)}Yl0G65}Mta2&ptesx%t!hZxpV zhaP17Kr5%OF?sz=$?GHFsx~CKL{osJL`Jz{ z4z0GNm$av%R+};zWGZ@TGhvDk>(*y+=%C>0${e0O+Qf&~2l@6%VDtM`0)^^T9P-|_bKd!9dg z%l*608!MIbM{aO@=VcCUInRL&XF0azB4>AAK77lEr|)?4 z^a)QNUy!7;lC^sVQ@1#tNLhA!S;tngUd^StG%eS}u|hGsyE%z{^odC8NMWHG3#ZB; zyE&J02WvRBw~~{4%DHf)FZVBv<>c<`|lq3ZCpSQ=CH7mSq)eDZr(yqK>7dfXdt&eZ|G_*0vJz*vzD{wR3X9&C`=0 z#SHEODSv-|ygj|}a`(j7!n;ehTV$>a%@$94lgNW$Fu}?PKo36;vDX7tKsg>o}ArOz`=EC ztX~+y^0~e&oarv8(iYhLORvIKjH~U$q(Lp1IN(oa)iq_sxYlf%+?GufTe4~Ff9#&t znxhLkad<&{jxFuN`E?qc-Jrpxt$N(pYsQ%dE$M&lIk@>B0r+oh-`a#7+y7(lUS$p+ zHsRnwD}l5HyLZ}iWN&v)AI#+P;r`q@Hi3Jmr||6ZG+thx#ETol1zQ97^1L5E-qrBy zV+DMc_);a1>I2^gN;^Vgtb`y-@$>g;ets_E%g1cKd`ai?mqcE?36Uz?m9v*DIe64i z_RDT8nAd?7i!?Z}-IGU`a(R8FigOzhXc+CrjsXsA$}wP1js?f^1zahD!Yo%V39c>= zN#$^#IQI04V`uLKRuzUZJtKf=`B5zEpU0N5y=j;{kcMe>?3_D>J&UI`o+4Z#eSF!J zF|3$1nYk0jFn!!8HZEVr$^Cmce|SF^4C=a}efJc% z?;YgjtHZJ^+qiyr4!aKZX6~{`R&MUW+?CxKJI#qfBTOl(=t4~5Uj&6UAv*qdvJ3v9 zs$W}1OwwT5Vnb%GG!$SNvRV+e=ZG^$Pq=aBybq_&dvN%q11HYAbLf;KI}cc}_mB;T zjyiGrlrPsVMR4zW0uQbybN5m*ch049_e>@i4#aUrfV_7@Fe~RcF=w(J1A3{EmDY@+ z{4Pu!@4=Wc9z@5qC0+{i>OQvg9_UPUtvx*l+B0C3D+5M4Q#;Cm5mP)EGQp9-;~b<| zH6dTdp@M218Pk=CP47&k6#RkFZ3s)~M55%Cv>ZLM^7P5hm!daU#_&8tQcKK8EU~~d zrVD;?DnzI2;~Cf$h9>m1|e=$Hwv ztfaLQd^xwoz`ZpFlAMhLB#(x*!a7FsW@1~MGuz{q*9Grl)yCTv`Ic(nT_V7$R3~Pj z4$-wLgw(VmzOEzXv-Jg5`T_(MY8M(XYNZ9!HaRnOwH^H@s4;k=9-HkIE zW@A2wPmGY5z~*hEShryU>()(U+vY_a-dE4jecL&@cZb00JTLCw=IQMl+`M>RKy`=% zyAE^u=oM~VdCJ|(_qZ*%x_R_4*Z1z`#;zUQ7g#-#_Tkno+}XIATkDr|cgsp1?Oo4{ zWA(f}x06pd4)XQsA-;ay%l&6F*}gB4wOc*dxWkF9yPav+Sdo_ztfoI zTTGZI%QbPPErS(DeugQ+p2|UX&E`%4^5>;hETz_+thgy&_%#`>6dPG#I;+N9_xA>+wM*N0z zcoRIL|G+KkUz{Ub;}FpXtL`lYSIsf@YmTnRe`q)htQ`MD)usummQB!+*2M8IY}}jS z>ff4x*iJ-Ae~XiCnA}f?%n{;%@do70Frs|9Kx@4fHJdHzQ*S|US>7gAA@b(kn_(2v z5~I*o7zIg(3T%dX_vToK3Zx>MVHw#}0QL{2(wc-f7hnm(q!Zf+sLZ4_O6Z75iabsi zTxE2_GFt_QB5hnsM4}RIW%_tj>f=?d*EqkDt0ZLy#jSA!VHUC@m?Udpm9997rHw;| z8tyr2_!X%WQlUX?Z`pxr1uTlAM|}(%Bfhr*(dB|L#kFCgQ{R+U0;3KjNQICj5mV5W zSg9Hkq-sbk6+p@RBo(U=m)n^n0akXYCWV!{6f{;&8YJhc5S7`P;Pj3JWpySXzbpPl zO8Av36DaQ&UZqWsUi$LC4M^^-PnuvNyVi&T0f*uqd_@CHDXcZ4cz^|kea*?LG8SBE zkt$Hj5k)5#b|gU%SWv0ac*E%0!FDWIn8@xO{keE%22by9;nU0GeE)b&s_RFR44(7r z>r1|Wc_qkt%cqa;c=!H2?>~O!-Nzridi{;(FTe5Z*%zKZ_{g*SZ+Uh94euo0KX}bs ziI?|Y^XR(Z>ePKs@43Rhb*HIcaFBJ=_ON==Zr0B@z}9(3X;^fa1M5$7cGned9J|Yt zYp;27_YF@TKI7s2>zu!^gKhh!F=1&DX?3o62&!z$l(DW-!Copc$D+g(Sv1WECr+1Zd^+<1Sof^Q6lY_W;u$FDha+y9Xgdr8~6vdkn?$Q-csSd+k zw2AVwA}Q97xzk6oVbxsv^ekff)G;hxI7gDnQnqfZXWNeLGN_ys_+8@k#cN!?^^j}# zo^t-?-NvI$i>9WGQ~$cjA;(Ry!LXr{oo&q@#zEkDpW|?v2-BnL9b6 zZfq#fQb9#en{Ha_Xc_2X92|L!Em$CH*2Ezt5NJ~DuU zj5tzygpd#wNOp1*1)1>_=OmGt7(r~PFW~`hLl#=G@H|Eaj$lPg4tX*uunYjXWqmgURZ;s|!u6>@0CAP&tL&i>iM z*fx0pt4G$bsIE%3M->x$7c*r*PZo?H-gt^|?u5}y8Zne1y?ZgZR}G_T`!lR}A7)RO z$gYi>+20_=!m?$|nm&~k%NKC+)NXFxI>=*5c<(#c@FnM|iGvNXqgj3@;!TSxeCnl~VF$tZeP}U|astdWr=ENmR!4}ey z{9H3C%IwI>HX=4jjmT7WVsj-AMYX4UqOzb&4e!7fghzBDIHVn(0WI(k>x7e63+&xn zG#<^e6iAr~v`noPb0oFF(6$x2w$0FWXoi8Cz{;l$W{PPaLfT@bI5N_s9gfNEaLMd| zdtTSZscn3U)$uA&!!ut+9;-Iq#wfV5Gl8Y8i0Ijxq(Pl19IsB*G#v)aF`{m+F%y>9 zGI4j~)3Ru(mgFd)rp- zC@yYayOO)>B{nSQ=DJ1P+_ae6itDcqt>@jPU3`47mk)2Z^YH0x_8rNlVQ(lq_xrPJ zpC7xWJ$SS`2ao%+_lO(21naww`LN?q80)u1Gk0kSV`q9Xa)u{$Q{3n~&XI~?cBEDt z;h)?Y*XRzoM0XPyso)r{g-e7Ht`TN92b*CZU@YJ=#xYb+ey@&GkFI!UD-&L$L&6Ya z5(gU*(^s4DO4+vQt+9{z9sAHG^7l>fjQkDH=%xa#R=7mA!Y)jVBmOZH^BDLvMce&P zblm<#-M$H$woNc|`v=#+Hh4>4@(t@ia6%WN3smVLxjaGfCAcB-KZj|MI97vfL2`-g zKNTA+=_$zSx!Ig1`igr_c}e&-MK?fkh_fXoqB_fvR@jKfZKGOZ6C>D)X@*sgRuUbt zj_Znbq7oL;sSRS=Vi4aRvrHwda#XO)QNmU_l5?5j2Ew|H;ZdcBca0(beU0($WrRny z0nVlR*yihCDX20_)51Je1M4((>@(GH%T>d-NR8kMb)tG|(nF9HC%}pAsYi5`4p9|~ zZ}6$$pCOfp;L1-1jIhj(0vb)hjSeYADgvplq!cKTQKCj}wJzC}Qqh#EHU2JDI(WKN z6Y28$`Q7jpWCh3o;4cv!qrN{9LO>KZ@ZKjznm*AgH2_VyhgU%ll0t5>{w`clC2lE)9<^60?_?%aCIwJR^U za{eh-PCe$@i3i*|{(yVZK0I-c2gmPm_sCtY?zzs%4d>W7{}5}&ZfAbKbxf^T$)vIs z%&b|@ya8KSICKkZrtD?+(vzIpdX=lkA9MNKBd%YQ_q=+OOJ|R=f8Qz=tQbhoQIhEU zSP;-#s*IjpaIfw_KxIc_%eqk1Q=1WWcI;S_#>s6(>{*#AaLeM-zMee2JeIqshqHZI zHlusFGN{Cfic~8i-MZmn+zMCyHiS6o3mE)}=kt5&n?z!A<|zQLv254d{&Dc2vo;Lg)GoV#(GQox6!m%fQ^x8F#^tcW@}SZl35`m`j+VW~@sWZB;ak^f0!zltI-BZ9N0@42&=| zHkB|#Q%4^i17j>~9kH@^Mpv+6ofLgI-Z`{p~U-IQy@UC$>>2{aTX)X^BA0;O0U#N`esElLUHP_ zDwPR6lbP8+i4`Mcz@D7P(FIl9+Bk%Fho)M9$yPR!`no*8}GFuCF%#uYSSqQsoa-e}XrV60 zmK$?$sR0L<8*y~48K)bJIJrxO!@JsW=E#j|l- zIF`$$gZ%_p6M1lQ9#79M;Q7TFJUc&vN9TI-5kHGF;Fo6oPy<>vxEz02m^+hkt8is0@e&qiGBKdQ~{13GN3*JSbHt}K|X!i zV*0o-4C>jFqSQ2!!Xqe1O=rr;F>GGFnynkxvu4FIDL!U0fBq!ateGLm+RyiI*ZKbA zB459r;^pg|)Ndci^f_sao6Zibw<$Gs%8Z|D%G}kq ztY~my=aB$eFJWA}8p@@Mf!w+i!J}t!T)O7RrR%zr(-`*#xf{I-lA zA9~9+sN(ITQr5HFf>`m^3bq8B>y&H7$vyi!ue_Wo)Z2WA&;`)~wEA!}@%xD$GfW??z^tw(L`F z2n&|+Hq)H^LR)(G@}_?;A9B)-NlDP6q}ZC=0#l-sWGt7w=1xAvI3w8qG}IfkzPVdN?B@^6D> za62qQ6=#DKXMNh?lqA4P6JRN(l92r3ov+q7zoTo067E^u@RH}fvLtcmw7^%^zkkkO zB=_k+&k6broo!A3vHA=gsmFrl0+l1h96eIao?TTO*gHtp!$6K7oyCzu3uJ8U&4kHS zY}hn|b7yyQ?EsWOYroP_uADo;%?szbdHx)y4(w;&)-4>}yOWD2j&kF|CGK9o$Nihn zxP4WSb?z$n&z_cj>J(4q_YV*3o^*(If;m`g<5ga=a&$;s{oH^fv<7YxRcDB2W*>Rk| zmdnvoB{b~GXXB1cRy8CucWopSmvm>$!a({7wlew{5SFisU%C<=vVEN*1X^KgILY>P z4AQ~YUmKeMb*uuFuncOCRd{P$WnBkXs1mKX?y9dY(YhP4Vr~0#9*`TXYLtqFOah?O_qz4C8>N82S8-k(kKB?_XTQ+Tj<~iGYX>_=mTp zdt3)11#>at0mZjiV-Imwa@K4;@)sLYyuz5WRi-r2ac_c}$M0zP{DYq0 z$~3e+*5Mtoi|UADOh=sL4IL6i<%w;uOYDSwl4|30|5k}An8kO&NPcgg))mJhEkTtI zPJ$MfN^QIZM816t@#&*4VSs->k!e2xR&OIb{|~V23iYv;YQj2O4~sO-#*5(%tje{>l4>C% zUzyB2C9K$_u-0po}~53 z?a~-Q@;70{qGV}fq*WXl%Tmml)z^rUekPRmGo`%Ng7W^>6!o?syTVW~rzxRITvk^? zd$cAkN1fh%otQi^maQB5bN<*osWNu(;mJh-)+|36ZzjTwc*Y0rX=3Q<* zddjnRA9?WNHJ5JP<=D9k96EKD?fVZgXXy&k@(Kx!jmIZAO!`QXB$^})ZLG0$bHv!u z3MGAYOdYMUc6Jmz88u#yWoT-FhK@mFt1iG&)zrn<$`%uAJG2E|x@P7WS=-8EQ#1{Y z8z(Mx7GT*rx?p4Pgo~>e&dwg#**aluW#8EP2(Ubz-EniYC&1f{=&&GSBZ3JD^dUX5 z2Ysr`s4B@PH6ga~cTu5%ga!HF>1vO^mn%7G@$@N6V_zWXg_Z5E#vyme6DZJ;L^Hy4leLz{dhAL*J(1hZx?3uY{QK5=FBSp zmw6TcG9~X1rWO9q;y%q-J+vbY)3n*Uz?6MUEI7K_k#qH4TtDc|?NbiiIB&|e%LZJ$ zV#JB_CLB0s#Jxcy1h=%#Hn1xOreKw+{~H)}bnH zAIazbsdS!RN#WJ4WJ&08vd_fw_;wE--%I4#qja7>$>P!NWbWNm-1s_*J9h%PeaC~V zx9w%%H{++Z#|&oiq*1J0FoTBG%h|qRjl}xK*dm}>J%1q!XUr7vO=kL}am<-H zl{KrDuzB-JmM)vYoH?V}v3)70Pj2SU?F0P$b{l?N=lRn;9F%o7ZAK2&y}jsN>qT{6 zN8(aDOTze_;Fu<)7qwu(C_(cQ8>zxA9u7p`;X#Bm;-Jj%0^$9OIAUZC}6-)^35+svC?Tlu(u z8*le*;_0q6+-_LHt?i3>dTbr6qyUO#0a+tW#m;$PT}pmJ40D>aC%`G0u2 z>$obm{ek=EckexA7t$plAs8qos3;ABba!`mBiJCJw1|kHD2gHq2AD{w*d50@dXA?% z?!BL9?R}r;`Q!Uqvu6*phl#b`Yt5Q9dN?O)VG$@Jfc7aX9sh}i>&Mu*e~g{yXV`dt zj-|)vSa?pr%zH8`Jp{5`Ct~D0P8`e6Sn4=|HR3qh`iXUV%3>uDXYDvy3h?#9K~A9x zI0_dy#>ufRVFos-({L)3!@E?Kz+JNlZJbTaUOhfCu=*5T+X)y5sTqn|*&-23-GmxF z6j><%wNk9i*nbAg{HL?rUqH%VgC&0IEcO@33Q%RC$ZVF06Vfc*05b`mvUD&LsIo55 z$F|4-n}Rvm7S55TmSVnHrY@$bx&mXmtcah(vRHkVMH{d@QjcYlepsQZ7>6oJTd6WNjScHL#CS!!AMr+n|})1hIGmX|n~K>}GZ;%_Cd+lfun71MKwNuNtZiXMTnT6lyi;}#;1 zd$7DH)Ta|1Ih%rlwbWGy2w-jG_D~yd9-QaLH;+UH^rf_u`42CD=9}lg@b%Ll`ReHh zzI^l*&+okD{`J@50(;B#t8ck_{uQT>J)q~{O?viSr?+{O{^k)biWuBG!j<|f3~W0? zZ{aD9q;}C9dVoszCW@_hk!4yzs!2In=DR3#+9ROVN^SfxS_{w7wc`rCEjKvVcbBV! zPq=aEG50PCxbNCkIG{Y-F4%g&yB$X+%vu!yIyH~Qe+?2Dsy%;-|z=zxA z+&`DYt==^5p3R|ovmH6%^JuT|rDsn9<=HM|hFajddJYSf1&|l(6YaX5%;<2EV)d(tf+ue!w*i0Q-A}(W_T(j}FAWQL-4w`r zAqve`B3|=Afa}WW7*Ae_{rB_>>h|mtwbdAG9bH*tWlLIiz7$zz_V!q~*|W%O1+(S} z?3*mXVDUoKb+j=y6)-Ycg_4?N2nR!y)U{BaH5&tgEA4p;nWaA$&AIbX)zM?7s+vf3 zMNM}FMk`F1w{S7@MJ!yjl=<@)vCwdd6j+NyYPQCbH7nP!V##7wE?`_Qy9ID`hp;iRHV|mEsplWV2%_AbGbH=8!hQv z*&WG9do-^u<@4)j*<;JqW^2whtm1h2LOQq3;mBrf4i~F)AV-dU zX;ayoGKrS-$?VITOncExj+JZDTW7#P>k@9CT*nuee0h6K0O-CgZ=bE=`Ljjbe>|UC z4-L6~e<8!S=QDI`o`BB^9zS*9#gkB8KFH$Dol*g;D(;^t=WgF-9-b}Y;kgX%pN-++ zKnM>mcyW8co*M(^j1H`1`22E)uPo!*sHC5eDPyB%+`4APoiQ_R-!|sXy(QvYTFA|Z z0$A7eL@ivIJuTBH-!YNW%~L4dB+JgNvpCSOjL|+{u66lvsK|)+1U))}wKx%=M~{~t zmqV5_5@W$|f<4_qrc~SMQ(u9G{R6Ocy%c-p` zWzU`}cJ10s@rD#yn#;M+-^s|;Q#`$Ym7l+Pz~8@0-qm@Uv1`pVH^)&}WX-0nHl$=3 z;vF~zpODW;%#x+FW-j}Wtfu?473Z(6XY8(zD6swb_Nzp`coxL1n~uB_NPYU;lUsM! z@%mi=Z{7uo!c+1(BM+WE3gOEy5(I3MdG#QkcTbb}{?#A=}Ne(Y>WpJv^ zjk6tr^tFZ1QRU5pedUZ@*d_K!BM(Muxpk$S z_fOk+_oS5zC-N9Qw~h11i|A-f;QX=8?A+*wkE0%m5vxgxSjm=LCywn)r$`{n-Clw4 zkU4}!&cW4RUgXcSNX(f_W}zYZB638zlq@RTsrmDX&z_54q$nps)p77pV2!Qh)jpG0 zVK#;37SmW_F_lFYlcg_uo^Lgtc{ZOj&u%=69HvNtwak6`|6V;O%FI=PGg%X&EXoyi zfdFNeddRcbWd?I?KVyO2KSiPb2}=ZYOx+K=`{xipe0P|;w{v)KKbbeLMCI&Ft~lp% z`TA{vIDhi_`s-BQiSy{)SLwX}P{R9nRlIs#&C}QAJbGKk^~Xh=yOu=nwFFL#CeV5= zn5x6BlU>vYz;$*|sA7CYbRSOEZN zCcfJw?~BwRs!o@W7C4M&zNE94r#wr%RaoMwC{7RoDFG}KG0((Xjun2AmMa;S1}L#2 zcs5G|HCY<0AwV>XWkTjALW+`GfEKxWnCEI^RiKA$u>p3)bFnEBxDvp!&ey|2g0FNP zOjC4Nm8i=~kx{Ko(8n}>E^8C#vo=m2<1jUr1uIDJRXYg0IAvo2>@=4oZ5(4Y zag0`DeV7vK1LScDki$*X#ytFI;^`}czxW(3Y9SFKLdASPzv&_)nU0r$lUI~H-iZQ7 zDQeQ@J;xMfpI7n6pbz!7^MH>o8gI8_GC|ZEzRR@)mIN$_2oo1F&N3R*U`^w?sI#YnpfRX}x_Uww` z%;6o}=s(1>YoZ45;1M4lf62Rt-}3U#_oBA`y~t4BaqsF2ZeD)IQ2!&&_ul7B_XAEH zeIPEdJM62y!tNdYG;KaZbIB?87oX%{agQh{PjWQxIEUgp*dMZwM$dZ6?J6iJA_e23O!lllm z9BJM{>2`M#B?G{wP9?2Go|;A@Q4p+S@JJ9(PG|AM%^E)3sNq512F|raQ=4bQrg&pw z-F5J{QDZ~A4f`vT*k6^+)~rZe&6l%OR~;uaQ_^ChD9q0#HCcSWzm?<1j&t;A7bp8p zbELb63xgw!-n`4bXRmnj<}2xK!S_G?!E=#bzyFlGkH3(@YV7VK9zB1}7jM53C>!Ll zn0MpOBWicIkdT^%cR(m1k#TG;tq{{;*n4>566hx~&ebegvkFriD*>!UXwDIp0y}$F znOO={X-WHA$*aynMN5YT%Z#PClEm!!3s9P^%}h0QW*N+7j?rQ?CGgTSV9xx7(mq>r z^cS$$XgS6!R%0Nh4HphB{84J$%DoX8R2DojeGC@G08nVICrCXf;kOln||2w!4690+w> zOR)V4Lf0=P$=j4I32s!T`cW3^N==G4og1S#w=0Fd9Z|IBInlnso}soVUSG`T?fDGe z_9yYx`9xkE59C_&IxaVw3y7I|ZKSvPH59bcp(cQd^BTWW$?AG8wlOhNA z$OEUg`Vii#2v;)6&^2%)K=f{Oz^jE!92 z@f}fEzQ4hL{(Q*y?=NumQZ;*~11Y zZYT2aS_~gv6!P7xjocqjGmLcTSGZi<4@1-ScZ;n;%etsF1Bys%7HQl z4^(jV=uU2&ZRD0@6!L)vuAi$BXx=6A#TMS)@8GM)ojf0F;oBD{`1+a17mk;5qP2jt zo!dCHJDZ$DJ8~0jXsZ(ic3U=k%7e&?Ga)(3h>{H!Cw5p3Dk?s-;$wSuCcFM9jCI$UK281Iy1b zv=iS+ZV|dnWvR;~mby+9<);j*eHAeAQV^JuWyyL00-MRqS^cqy|HH`UUo5ov2P>>U zB_v*(q@4KzSGL@_yPX>&o7lhGm$Sz+>Fdnlcw4SOb{SU&_H(*tAAKh~ICb){$Rqc1 z_UvIsN6v8h!YR(5>EXuE6`tR_D~`u)p4=Mar2y8OCl7cjfc4_;1A*W##P#%w_YWTP zy-ArdUxV5Z;!R}x~rY9`i}DcY!}Z@9pKrSHom@gjNe}m zz+WT$@O>AL@0SQ<=kWfGIObn%;>QnLk-Ycxk6+69{kQG>^z$aZ`(X>;{;*S|d-?F) ze!l#!iTA&>@WpoxjJ?>x(34^=JuIYqBvl|Qg4$z&Y;AETv)qoTd{cblmaxuOc)?i% zV@D;iFK04;-6ZtwKEcrCGfYEdSeK}VYlZ+;njQ{OYS>D~^YogAne*pZIDd@2*XMYI zOu{2{A}%50aS5A*Q`lsIECDLtNtpRgVU4#ylZTl0oQjqA3~amvv_v?Ga@^i?CU)*J z*t*ZaLZEB4^LQ3J{F4Rt9|=GG6Pv^-xE3hlyH%CY3M~OF=LszIkaSv~$xpbO097G06&Cxeuqa>_3nc@H2CK6yN}ZL7nykr~jYZCE%yTrc$kV`X zgC34s4Mi9UWX;DSUmvSH1FS{Oz*5N4B27fHK5G+n1*&whNuGywk^vTCdaY2WY4j}C z2(VbBXkjaMmP3Xft|@wW3Xpll>EIJR8y|rfpCC1S{Z#N1H3NTdMf|)(&BRZMa8X-` zkPI^CCt%|~5eLt4IQUM$F?1@fu>xdCia1JMvY8-@eTpJ3*&4X#XyKlzfork~uJMZE zw^Z;=nMIKJ{m^WE!gA&ilBF#^YY>tlAeBB#`mVz8WRb?H6BVmPe7rt!G5SOVYZKxl zkmV+itK(EWoMiBGQX$T75u4N8*k6{#*}e6Q9O>p}&lT>Ud%&a1Z@GK%D~9^sirVUP zdXGNjg)jLE?(u*&^7Kndd3%De&EfAANcUopM3cF zPhNca4UfL~k|!_UbK~AqZa?~hXKy}m^4ukEJ$TOG^}DnkKEc-VY7)}(*i=?c$@VIo zy#fh|Ng^saUHW9n5g2z`O9xpNm{ zXt;z$;&&VzoG}xSlMHNXzS@j+mNwX$TjA#DM3{dd!QQ_3dbkr88A*6>AU^Ic#02@W zAw7Yd_(&ScHnVT%4k`+B*qD+?d2SY4)00RE@+RELiZDlWqMXc$b~Y!`)0*NaceciR zQ6i2-RkEKzmLDy7Vf0m|(^DEtZ$&htE$O`KE8(Sp%)7JM{B~nAzm62}?o@=xbp3dF z)Qh`&?YLIImaEk(=-r~nkwO)YY)}$Nn@&^4IGS@NaB%ZXx_7D4zjqG9U5mMK#)Pp8 ztGGS1mb*7BxO2;j>o*0~#!MI(T`X#|hMd1;#K@zy+<)oG^S3d)c%97`&x?67R>-}} zd5oM+=6qK;eI5QBZ*`@k(UP_rV_J3^(zrv5s?9Q#myDxw^QSaaj%Q!vQ~@m&dQa+b z>B4+Qt{O6QSs?DpTm}Yo1mIQKdq9zj`k54Nokrm%1$I=`e>i zaB(u-0xjAVR6=n-$RWmYlk;nH&c=q5tzx^x<&~JuC z;dhpY4?DPUv5fr(W7tw=%f_vy)HT|PLfVUSXM-5J62aJT6k|i7Vt=Rd%Ws?b<~xDe zn}NK0pUIOaaooBYB2XM73e^Pe-i+bqNEFvDhl}GG&&{jxT)!AAj&BMtZ*CFCDxcv~ zsiFeez({W%EoGi`HAHcsGMuiOWX|r}z_sJGJU)MrH$y#KKi0(AwhEEoRCD=69hbUm z1)z3uxu=>tm-q80LZc{p^K{v%Zko@@{hYv%ZVQAoEJaPed* zXOEUhTd{WU2w+o|H5;=n#5v(WORYD@4@A(@5g{s$ejMKIC603doktu+-ebb{@&)7; z=?H`>iwg22*1J!_%4s4iZ9ZeEr3itfCAQO9Xgy8D4D{BFXU^KsS?NAg`gGb7`w1+v z{fsr9vaAxYGBg{9siO?WHq$XQ`Gm!*KWC9hCGT`x=b}J(lp%GsK>|zyil;a6F!x3J^^0fx{?&WFe)N!6*T?ww!4rPCC*t-^ez`4>Ga2QTyEl}mg$ zdy==u+Xb-p@#<(Z_x9EE;@DAM_4ab>a66CA9^%8D9)AD&0{{MFh;QC?@bXCo?_ZYk z(>DTDKUDD7&sF^WOAUYj+Q46b?BVx6TKM_5Hh%c6gYSMl%7@>&_~!4UeD~jDy!)$z zC*SYk-iLZ_yxmFv198koGTDDFj*5f+0;rd!U~re0yX2&7eLjs z{Rl(Xf3hlg0(OaVq8wMlGhGemSb6M2reWtdk#(NqMMZiX>%Bh1E#z~&V#nhhKLL+e z0j+T1gP?I@xp7!~kH^|;GFF~bL^>TSAIU3mW@70ri=~e&YrSTOV?2fBPLo({JD&Mw zpP*;*f9S6Kh`BZb%6{VSlVtJOphi&HYym9S@htS7f|2hGmiWrB(qEC){>rTMmLN-! zmExaS6`;ZL0CfxnWadixUInVMBtjjN1TEHPXk#U5waVARJX@8u*(z9z>2)RZL@W^V z4KT}^Bfc}hDr+9*srs0u%)v5w4i<@GXT|GaC$MEB8BZx*7s~`4%;Pi#yfj#opow{k z4(n3q;FdTC-*`O&Vntycr9)7d7C}J*K)z}Oda4j0wl%;@kwAgNV6lBc!2(`^ig@_X zz+UV$E00gaf%ych0I?G#VvU)GeUco`87jDCtK*p?z?Q3tPlh_a0$Kjasssx_MP=!b zkUy6s@qZ-b>Jpbbo4CB$#0r$fq^lE~A|g(Wq*!e-6XuZ-J&&Y7UE;k&4ah^CFee2< z)(iC6Ymw--luZ%t?8(dG_zsbA>^{N8y%)K#?>Ymm_c*cpHZA2t?Ap}Nj*>pgH=m+R zfTwh0AKQvgv!&oPMVUS1CU%h))=rXd6N#?Xq`Fj+zW!!G9|tiSZIGe0}UlObrvr$)#Xx4%W_A%vm^(pztV)Hf<9% z>U~@s8s+4f0WMx0Zbk3Np>~1va?|y8@5&w7?D7FUI|H=g{-la3})wu^?G-f zn_6IMy$)MvH}n@8A*ZH}qJ{8YpY3qdk8vvkVN7QJ;l^#%y#KEEMQk zB*m4Y_^ha|DZQ;YTk?*>`9>HTEk{>x9y&VWvw+sZ#mg~XWrmIIdV#H_EMIJbgUxzu z%x!S8vB%5FmGzdkxZ2qh7Zxt?9*mupg@BhmsWIV{7G#ng7e-lOCUx63&{$eRRZ$it z$&9*~Ms}~_T-5?P zH!9O!BtvV#G@5chr>WpG+P6;TQ2BI@*2!|Fa~79Q&t>?6F{6WKqULSO@C{oo3v8Vk zUQX}uLi%s7VC0D{_uu&Q^nE=5co@gG;>;96R7fdy^f_wdT~6FJtHS z1yl;;ZQr0mN%ka)(*Gq2)qhZ4{4up<;{-e=v%gJ-gfM*pnczrsqz!vxU1&}Ar!_r{t}R)dsV(JHLlsAAsyNzM&(W5> z^mcY~sqeJ31AEtjHckp;9ogR^U|2_e%}!cd_Hg`ICkGF-(%!b0?&C*j+uzK#ts4cf zHn2e;Ennm-TQ(O{ydj^Ono33n&vA3Oo7=;k{P@)ffBbTbUw^&EyRXhLGFHpc6X_f{ z98J%u1TGGwa`i$oH!i1e`)UfK=OejuGnSXHGWhzN4Lo`*j{nnizJ6ECv&UJCjmB~B zMhZh>xxOwR`i}T;Nx=JjcPRZ`VO;G^=hi^6fKnPK_C|5<;tno$=CLu+g0_k%PBi6k zRGeeI%^Ml*7Wg{e#O>a829NCKblXn)4pa#|SJBhHgQLxx>2BRFpjFTH{&pS=9plRf z7kD_*&7~8&xpuByV7pHG-IMEGJigw+-K#BJzfjNcS+SnOg|yejuzyz=tvf?$uMXkJ z?r;V=Q^j^|Vf0i!XAi`3@k9*gPy2GPeJw3}S5aBHh+L7^heyj26d@|+zSCJ__X$hZ z{)?p+<5^)hLqKaLi>+p&Yy3I0m;IB4R-ds@0BnW*WXxPu0jW zWIT(Oeu|~d4Az=W#@a@P{2Uuj94+AC&HX&NbBG&P_X@P`7ZvLg5u2&ol`4RpNzd^b z4z!ig(Js!LBlWb2{dA(Ijcdafxj8y4uB9tny?jZ09^(GJJ3M>xkQXA}y?n-ZuV3=> z%a;OH5Bc%IWB&f)HNV`v!`D~N^Y!IkUiTg6i{k=2Cwq8*;s{?JYvJ3zRz8Sh^60=G zo^~GN#mRmii0@xr?c!}8 zZ|3hm5Ax^V$N2rBiFbRNfi`+qXe;r}pn`9GHXe#}|{CA)-axTeYC zl`e;C+%z1+C$K(n9L|0daP^yntN#St!av13@pJrBCg7VWkQFr!SAi}kVFL%B@vQTj z#5!*QGXX1WUs>S>0V{7gR(r{^OyJ7U@qeA+_11iXmgL=&h95D@=p(dO|AYBXpRqb} z8cqeO_?Kz((PEEDEcKhtvH)3uC^?M%O-3GLhFX<(V7F2JRQZN45h`TAI73tVODW0^T0i}bmeChM{) zQClEO4@-e9OMxwmBt5JIxU5B3B?@503SdcEv7%;4r`N@6<00_n7pFsDjEG3>{{bsd z0xVSlDGjL2% zz%^49HxXXhv+xyY@)Pp)7r+WiRwE)+gQOf?a*F1OFd(x)M-;|dA~Z=8C`(OKAw|@1 zQlb^fj8Z3GY+HVq4w*h$WVmaRDI(QHtiwUzYMnmmP9|&$^r9g-i;kjddUkYiOkk!% za+|O4H08-%qVU^GtXnl<&N~Qp-6o9?w=zOKDhYGnMUZnP{_A$&WwsGl<6PXAq~X6T zjhNM$q+8^$VciBwowl;mrJM%$3Yt8(P_sUh9p*8Vu8tANiY8MhjtuP_Qglm5n6sI* z<)vg>l~LrrgUXmj_U0evcttpv+?p-N8W}M zinislW#?v5u;t+FES?w-lPzqbd8k--$E$CH~7 zA)uAc#*74tQ{&j2oavYabWihdb_l^cy0-o zF0A6hr8Nv(SyisQ-qOmR#Lc>bu6hc|K=JQu_9 z4o?9PTLGYzY%QM022nvO%2FmTbq1O7pOPN?5jhDTQ6i94TKoySD#bQ5P2tc%1-d&` zId)Kq?n7!EY*S)a?Q}MkO(J8%1aaMeieJRXxOx8*Czp>&iPfNQpAR2KHu1PCp5r;I zX^${upSL~-J?7K0PM7+%8nigfr_tV!8iz$xIWMKs%b2F9bu>gdQ5WS-S8)mhwc9yY zU&;A>yXkM=&&6ZMxN+ekgJ;i*eR7OTXU{R%e}+?Chov3ePaYN5qKN);eO$b7j$>ke z)9yNUH`YqA)pMdt6t*4gZ*8WgTAa@*3B*K)Q;?rSV{N(EN41tD|uWOkQ}>^%88Orp-;QY7E0gG~$zk9?IaiL>alCO0oz*3@mgTXhG@FC<8#&fe%Av;1?5{53#Qq9_ zsXd%Nv`acY(A6LSwo9OEx4_&!F|S-~b1Szlw)1fKpupDw0jx%@4b*Y5r)@ z0&Zg4MgDbae=?^JBy+8|h?{3ixOt|SOUDwp+!xNN_z!$v|7wx`8n&H zE&5!Q}xG}hwi>Ir((%(S)-U3b@t78A& zVh*%y;Z%1cO^q9A6#M4H@n!}Fjxadb$MxY$+`2Z(qq}!``Rpn0-oEC;*YA1%?hUWM zc*>g>Pk8g_0dH>I;`!BKUR=D)S0W$%aCL+?{e8SXdyLnox_NfwD9;afib~S~zUpq} z=W_@6d7zVbJ%@RH;uJ4Wo#*Ai0B=Xm@$I8N{{H?VfBn?Yckd;DZ{e43>-pj9Qhxfd znZJLl;y-`Xi?oTq{%HRnu>Nz3U;cZBA4PoopFY0)tA}_0?&0;HT|D`zjVC|u<(^o_ z(95lyxtB}lP&_rqyeOzQBYv|H0U5eDMJcn^Z!$|<{>5VFf3n=`6IKO$j%CCI?Bgfn zoH7lM^y&CyOvfW(5^j;>MfpD-udqq@g-ydRd>Xzn;|WL^PiXoSLQ?}-EXJe72jdh*`SK=QpnS=AjIjk4c_W3$E6wJXs&j7pZxgr)|m$MKH zA<)$VSSE?HS(7S&l{HVmYc6Y&bTAY1tt9A5(8gTSXH8-Ec2 z(b@u7;$#oiARtJM0Doot1O|P?_PV=G!$k*WF-Z2Vz#Vg{SES433 z@=jO9HANBUWCdJA>Yk>ISDGq5B7BpS2uM&SOaLoBeKwf_SOo%*g#uT3xfJ%bkY_)ST>GUIIonVg7(#t~ z0WF!e?8|AVA+>|D$TkXmn~7UjiLdbn92aC_JvWiH29cP}i^9?{9^1vK*e%V(ZgD!+ z3zD!EVXYU5wT3@#x=LMI@GYY?w1W1~tu*^&Qs)#$?YcOs%#$cx zk;z7*LbB%;k)WGL@}eTrS8X8Iu9#B4a`q(br8EC1eFA@%YX`V>;4+U+T;P6B7dMa9 zap&ZAo}J&u<1=|&+80J|wJ#^j{b)?HrO0m;1^yNSerlNMOl7UUsKsed$KFJbK-aYd zdD-CZVvdc?N^uOmM4={rv!;~oH9Oc?ww?Htbdu7ur7c(zPz_ujlD1mie(;3rx9>B2 zR<)rTK`;hP_S<;UOtlE#N0f9LHtzetgF>){tXdG)n)TGFZ|0oL`qPdRbs5_Qc7 zXxw*53a#x`4Qwpk#lFKQ$lJ05D;F=UoIP>!4ie{22w`!_*g3nQr9TfvwOOopab<;x z2@@wzLRnQ6^;v36mzlwISve`9B$%2hr-XvC0GGNJa*8U*i%{3pMp;dh**f~t5#Uu- zHBb}Z8!cYO90LOuE?$J5z8>@D&1Lzrr7T^z2-9WDFeb-JilMe&7pMqPlj-))rGnWQ%dsYkeQ-PdeThN;-`=t{VB;&{~#mo{{%w+#g?Ma zsNOl1mVI(`b!yOiT!)jLTJ#>(5!<0feS?C4{Y(nB$P$(`UOGOMgQywX+WiA}hw&80 z>2q~oFi!+9`-<&ok6KEdvo?F34Fs&_QERTv9s#QwyM^o!k#9DK3=!4g_Uuay;7DN- zWBY3v>)1oDfX^ubqzi{TxYBcy8y7DLOmzz^Hgf6QX#tr5jgYiRdb{aAbCMI? zM>*8dCIywG^{TO=yW| z;NppLF>iJVbf8()3@fTQ7r8pcZy6a;&r%$MUtGveN2PR#|_FvBk&Oxz51J zLo93dDdyIbuwOp|v$YfO_0poI%9nk+BgA>@$GMY5ynAtkZ(jFtZ?sM9?@BJ8s~6`$ z3&##^6R;`~6_FNtx@raZ@~Ia8=iWV%Cq_HCfB!1CZeL;S+8~eb-r~!U^I8Dw)#!DeoEzZD>3*K|o#nx?PM#d^5WqUY7hT7BdhifW z+gfl(tj0S53s>}l=?+=n=oxgxkuz*jb zCjQa0#ZJ&7AVQOXP&NF5l<@Uez(=6a!)rQDZWD3zoP>LjIQgSg@QhZ%H9`*eXn8#2 z74Q{+3eM3YNNlrrhC1#dLv~MA!c&B|fR&F}#wS|x#G$-67%C*DXp)t#NkQf;in7%y z$WWpnRi2_`If}$VEsCB(VaOy3eJ8QWXF8=~Kb3kYu+34P9Zu@(bTy#TX)&eiP1xon zE>?F>%KZg$LN`zqxsz?7jTCs*leDgikk#98Gt9?UD-{dXc-E*!Vxkg+$*dquXNO@n zCkAtaI4ty|S*sItZ0A7icG|)V*y9(^ z9(9);`zyayf|IM-D4ZLusfO~n_UGG%_#9*K%$ifzRQ&fT&s`oYJF@CMQvlQ zJPTAOu|i)7yVZuc*sa3V*%EtubKE?gNX|?mqbQrgZ5wH7Yon~PhT<)yRMj?0F(pA& zkH`#e-G9uThfk%SpTB-5y)F2|uYXE$_46P9=KEj&;+voU=8JdVbNkUt>Fq%Yv>v{A zCru@{2uE%`lD1s!Zaqxj`5|iev`ewoe|3ziJ^OJBijYP~e2Tab(@4zB!@<=9H7y-h ztuYhlS|IA`>L@EIqo|HcphubRtjJ(V!6=*Y}Tw|rO_fx7B0km zg|QS{>&z|ic6G(p!U7);HnsWNbS5h{jIG%zUiS)82HO!6la1SC}tnoHmhQEa|Q4VX#^j%M$|2j&-*0C$a zo#Q)V8E8!9a&rndI#PJloy@b7i2~j+yg8r7FE=;v&1fbs&PQ?UgeS+Um#`~Gj*6^l zY)_j=PWVS;gnUFn!bfb$|A^`xpRv1UI?cNkIB-Cd&f|J?oY3aTSzY=Bu+9r$jXbsC z&X-<1`Vh|JuM>FkHiNgXN_h2jBX_T7aN$fCM-RHuSYyWK;`wBysgar}D`FZci4z1c zJ|idXQ#R&Jpls6=8tUaaa8R9--TL(RF5q0Z0sTGmIdOC@ds{TAs!=6xljN0IpW*EN zPaNGo#?0)WtXcLEVfIrwvBQyu+z2i=ZsF>uWjHx{srKTR6CqpHzySa-e z*CY?%x<$PBehw8K0UzJH~Wfli(ifG!FK~97xSz+#M zP6?wlD~8GqiBuLP2}tGAR=ttt3JIpFIn=m~Z8=etq=itD8cKO#Jf*p@VtE1I1GNHK zHJmzBAuw3S@VRFCI?FiKv6Z3IwNhyH9xM{r+rrKMD(+pX7BI{d7)AWx1R@8!pH-F(?|iZ4%J;D>A1`1aOCe)^)H|NL^9pTF(l z-OD}Vn%u$r*IW7Vn=SnGT?yZRC=?;mZ_D`p$2yT$@8icm5Anm_U3~ZNZa(~dk}v=4 z<^A7hc>Cuup8wLyvtJu{_+vSvU*>Y=K{SVkJlS=^ocuitN!X!7NRcXTDKoK)oPPvu0zRr-M!2Y;5zi1hBMmE!M+rgPy>Z zj)0XePMP{Rr0HXqq=#+79Bh*3VxDTi$|PMD$7--BMxCXJS}aSRjiJDhQKS;aB3%`x zglT|0=03963g`g6KtjJc`6%EKsDfX(2L1wB0THtZ2p6~t5g-!V?By?mr{8qke5bPB zO90DnGMLzv36FSrypomi7g!3&5)jCkg>SmL6kR?7SAJrdfEWew zJ2UYQmLWJ)mMF2F)K~>_6XhvLRv<57Ciz0TMKROZ5HXnzL84~sJB|{M@oX2ut@Mzk z%2}Q&2LTy>)}O>Uo=&L*=+aQ#0H;AGTm#5vE7BA$#$IQZ@@+? z3rp1$tW=V)o|S;*>=-O{qOlRMvYnrR?Sces=0{^SCj={9f2_3JaME=p+9Z_CZkaTM zl&~+dgx07M4kT>mP|8Lzoknv|6zyTLbc7^wz%QMBZn@N0<*{j57Ad+3B_5z6u^OrI>~DXB3>*Fd051tld_W@%`N5a`m?L0d;1HFa4u zH5IV7T7`|pN=%m|Rjf5O!gh@@ zPF8D(4fGM=O?;3@{XIwy_9e#KnRo%Lyckg+B}S1S9ZXt?x4@M>UJk1S&ddn+T!)_o zyk^FDSqK2HT!Np43BGF<6YsKyk_ZcO6TUwSG-8EK0ZxJnbX;7q=}EHcyU zn|XUBhbL!axqU(uur;Ps=co&0DN~s#M_%|RWQP1xpzoh-%J~PCTR&r0*#ve~Os08{ z0-aqt9P2aS#K2t63@zfqO%q0+*m3Wz7mq)LiiqLqmr1;Ro6oD~1)?xX=F))poh~<8 z_F7U=v4o<+*<@uZk)0(+ZthG9^JlQRM25;z1?qMxvv;o=hdZ@6BXD);++r^D8glWp z5xplC(0)LNnnn%sH_M6|?KpfwKF8JTb5@I*>Pn-Jh;)_{|F?&9wEN3D14RYKn6}WB z?DsLI(n60)%lXtet`LO}Xi4;=YfB2}>Naw&b_@MA+vu&_#*yutXe!L3BrcAU zlvH+Z+DdJCB~7)p9BggnMJ5okjelRVS z*#bpzG;GPBxhzkdpMGRVdQ(x5L|JY;SyA2;CI-<_zm3xe8`!^V6G!)y3#9F2S4kRi ze)fdAT9O*-CMpww?An;lxy~l~I_fyQXDi+9m%N#nHSR$@Z}C-x+wE>CFj#d)Ldm2M4)3*ubTpa!w!J!L^I6+!#C{aJ`d#jYXU~R!76GeCn!-IJkco zLzhqT@ZJz(H!d+UJizT+Biy@nT_EZjcO@gXOP(5h^nj;NAM@b;ZElQS<@(jj+!-3= znZVc6%Y)qSJHx|Mr@4RZ1h)?z;NGED9vp1rVcS6|uwHhw^YY+s-W_e^o1T4qb)ti3 zok#fc%mx0ucbDIvjPmE(i~RNN8Giktn;*Vwlc~4|57qkLn>ds&f@*M zV!ryYgRg(6=No~n4}Tuy!(W|z^;b9V|Lo)Izt8ek(t`EV9v=Nz#hq`9x$-iBo;yD5 zzidPKv8Ckh(Ic^H77<&N@fFv#Ysyr-GGvJ;Rwr(|7O~sZNhnn#YnL{K4Z7r2Yml~G zf#eM{h|Qi%O!{Qv)25LmFqNDxM`EgsNM%ULlqFtFMY_5{qgt^){WNPA&uEqLPT^thiu!+{eB4RdXF*=wg&S6Eo4htewnG>Rb zewZTjB9zb*Qk)wk!{T6BRs_gk>OBKXj~O_42u*s);}al&6{e0~gn(7Js+d;9S1jul zI2{k6Mad}XZb4IU37m{m$W&aTX5y8oDiEcCU#1oT8JhTs+O41DwKW-HJ?UyvY2?>)Wz<(NkK9dRbo=8mKRMNtykr6SSjL<1$1y3X|cmnx;pHt}h zDH~ix4cYNCwmVLu%4sG$?d7SlQKHE~mp!6JTW`CTMrT*{_(rlPESI{7Qc6Q=DDZ0{ z!Ksk|vt2kDZNpl(fVJuwn5m{>J1Y%Gy$qZdrit%Ta9)y%%kos5RwT22X$*D?gRwMl z$5!7F|K(m3c_z`EvYEqK+c}V2NNb#^8KxF+I6aTnn0Q(vBRQ6o%(2)MIs%hu_s*i( zIgfJFOtKB4Nz@J}Ry%^^xv^|slS7MVDW~FgGgNel+vQz6+I@^?0wa&wTe-HkQWP8s z6bG0RWu-;%8dcWmi*rcf|FF}a&iciwEK-?>kL40kpay;$^PT=bM-^BFa@cQeYq^OdFqyI8shJfSnK>9OG(u+bRJ2spnK5xPYH|vwOF~u& zrKxf#Oqq$w40$n685J>4ewr+@Q)EyRz|xzoi<*GgOc_}LEhP*L=AkQqrK78cg2F_! zG~_T{xd?m9m6)3>!g!GZ%jO%fP*)qHIl2N{3s@#%@%;JB)zw8`dp3&~7_fMr9@eHS z@NlptDI!=vYb}<>i*XTv4RW?8*4LdZF~1}wmhG8IY)wyOLvl1J5x#gko8xF@jORKt z)(c#%Te%c>%e7c7HpIzf8U9wLgxakkeBEl&eQd;ezm82&>)Br%$i>|W-0RBbi!;SM z?_kgcpdj%d-L?`P@a8=62OY#mB_eXf054P2MLUh zhSJ~fC2(d(;~rzSm(8JglR6tWtFv{R8asBVQN44Pz=t;bTXpF;tj+Ngx?CJEV&w7) zhR-iyFLt#ChJ`DNxnZ!<9>y=s(lPkwXGZ4V7%&m`nNgV(M#mQd?6+ok%6PlH3-Q z0PFwjdsSAtMZhwZjPzvE(~`&(m6qzA<+Qi#p|holte9}Zyqwv-A%kOw8#&vzpUamI z32624)%!ty{qY+A{`ES4|1ikkA4DPg_B7v!g8#+Uojkm-nHwkb7?ZTRoGPNfEs1N# zi|DS6;b>J9Cu&4tSers$eJ0&iDID4!$NnwRw3a7wbZ;T21*rNCmvXMFl1nG+*k6;& zj-n{`ROGNNH<~S(QIzH;kQVAiK|-j&Odb`5sRB?=WJdVVUb9sIYd5{^4V-AL709X- z=qe+|XFVR4#v}!~igk$nTv|ZS{#uUgso+54c1|9w-IL|iH)mqBc(|Z}a(8i;Y zPM+O3!lmvC0jn%d?k}KwZ#JC`DGYV*VDQKe0mqG;Y|5j3M+^-c{HfdE$)TDEy7$D= zQszNL{#vSv*RpHV8a8JyB0ou&)W}(6#pwxvEv3BJjH9g~44uj2N^cH#21|K)XE(3! z?BScI2l@5gNq&6U!`r(8G^5RooUY{jv2rf=Hgd67{I5rM2@G$grM`ro&U!lA1PJ>& zxH;M{AazNg>4Jz$j9nk(`n9V9ST}g~;JyIPLvdZ)+e^dzDc1k{vuphKo2&f&{b@dYxu1J?HgWStI(Kfyb9*dAr2ag86vm6^aeVPb z1~1=i5P5PLZ+@ud?JrHd`E8$wL%jX9hj+i9;^j{r-2bMATVHKv1ef+{7l zw#t%HFqNdN$s}b8V5QHH!YVm+25ACYsi|TeNdi`JQv|T46D>YRq{!nJJp<2(890Z| zz{Y>H4X@nLlV|7>@rHMh1BC~yEM9e@> zfJ#45G8(+(-G(x(@SG~dI2}6=8C*Rjoz@lb^Hst(NJ+p-8UHYmhRWg-JOl5LX?O-t z#yxnF*dbH#jG7^EH3Jv1gWMAo@Dv#IOjRWya~8p3+kyrD0wl;vRl_$)8P6CwJR@cB zix`NA`*NjkmNm%G|$h-a{rV(mru#J|Aa!T zPuOfVk#f74R9P#s%Tk#}J3U$)O$4&mvB%9%)OOQp3@v6ya0Oe#_mLOWLB#sKIIgI` z%wQucwQ^XYl7gvv3U<1gI2mT)Zjy!9+H71`rQy6P5jV30oJ=FIS?Y=HVkaDzIS^{& z$=1ju+6sy}R9MLV^fX$M6KNOe{-k(nf_&H==Ew201dhbVvd=G+HopY+du38>nM~25 zNRqVtiBR<=UOSW$<8+!_H*z?njKM;2@$5Rvi~T2f+RrHqEam6Dtc8tQ5+FkHj} zqotTzI}k45=Mxf5Kv)dh%Xg7ouz`)+cF=q7f)rPhj_p^k-Q>~pSKNQ{g129P$B)1L z?-t=Ne+pFn@i*W8{5wDV{%?N$4}A0UAAI@kFMRdGAH4eNM`F+D@)00F76hvv#dDmKHWB%v3;GRvx*j z(*>wx(33n_D8QvSSs-eL0;%o3~wt-g6-|_wpfk(>ZODVWW~Cek>+hpMUp#* zHwJRLBAoN0pzW`TYT@!9z#xc8*uXETrOQT;reYGZa!Ggz2_dh{3e38Kg99;U4#Hu z6t7-I^XN%9*Khi9dC*;;%z=($W;E|#PR;K5RMqIRvqqcxMjiGv>#%RXp1_s96mVzG z&1LYi5o069+!~gI2_r+soIY#Bfg}3Vway`byE5TP6Gfi$8O|;r3-o@3^O}E0B?o z7nQa{yJ#rOqOLTH&gODjc5M>KN@RO}GNEqvBm{Y}F)g0Lga~51oyZ9HrMe`C<{c#* z*aHeA$=LG`0_T+K7tBPB_ty~ody>wtFUAuB=EDWP{ zYoq{I8vX5=oEFEvt3d#u&Wj`UZXBs|68LhUY1>*Fwp!BF?9WJFCeLr|;KvuO0#$Xq zz1PHFU!UaXmtFk$yo;Y+p5Vpx{oEO-}RxPH&+|#x!K&xqx~H`Y2C-u zwmQBzR3k#90$A@)pXTS`8~k+h2EPel@#6@8et(kN*TuHByK&-#8y7Bk(09t5vuDg0 z8FA$1O>b`93E|d*c##KZ^WgO+9)Bq3*$*`$_VD=Y{XG5V08hT%D}YtS*t;Swzli75 zJuePlv!!WZ1zS7y$Zk?5sYZd=N_oP!$r8FrmWa&?B$ca?S))a6-E0a)C2CW%h$c-o zHmFiutw{E^>7*4-CM|CY8Tm6v&ygWDL(EGP(`mA#rpS_%EJK_?R&0t4F=AeXK$d^p zblk!x;}|v#htTPm`%DqQ`h-Px|H9Dr6Xscd%skUiSYYxQ3zmM)Tq6Omr5`hY&8I9B z5L@Cm9uu(+^IRo9TIDl?RRUNh!E&q!m?;I8i3C?-{!$S}ej)^ZjKbtu94*VL6a}nv z)uo-;T}tNQw#h&MOCN{q*;uEjvo=PFHId3##%N$2tBsX_)!JYUR)=Y@JY16n!J=>w z>!0m04K0tU=y^|LfdJf6u?;JHWTc(RECjIDOJ3mVC5Nx45T&;~-U55R!3qR~D+pxC z;wOYD>6GRlGacXPsrU&o`X>sICCK8DFjL@45id#S^%P|(uEKM)3C_@v!pb*6k$^;H z0%8^M36sG)NPsF}8bN{63Gtssu=f-K+$P}b_$h&|;|cYeOoZ1YqCCbEC&?CT?PPw7^wRH5)?O zN%cJ>YQ_7oT)cy2I)yBrmBAvF1XgM$VLLAam*rXb+2j-IvH`z!891$tWSyx$HmlsR zTkVAXYFivl%?WjMkUkyOn3q74s1feY2w_ipD9tIMR0g?_ZL^G`bpm=}?(C27qb0zH zLt)Vz3QlIXQv#)HqR5)(Ps}V&(&mJ*c~v^q4!InSF6Ub5L59nkIlFlWM>piNJB8@5ND}5%?sjSdfVYT6G=Bmv^ea1NEYA7;aTb%{ES}Zh} zgSv_;GBR?g&(g+lu`$-`-0=&EXT4h>wvHZb+E&iy(sF^UV%iQJl|Dsy{>t$G|MZ{) zS&|_d{x>|s|AuJz`FDY>-~N&U>o)K-RzcPH5}>_rFVlC2grXKO}9*lJvXK3o^qNKbU2M>Qt%Q6HR#qRHg|~O`IlDF>k6IGUKKoJ7FraVxH`z=>lYm%u-TAOI1VKiZw@P4s*2U zV7A&!+L_&OzBZb(LdZo0fONs^Sprvc#r#=lsw$zWu8gX( zJX)G+=!oxDEM6qmGas4B6VOyrz))B0do6XA8tAfQjt*vv7vf@WikFQA-qz;$*jf^> z-WDGRE7qBm zc=vS}PoH`7;GqZi9(glz)0Hz9tT}bgik>r8bR1vB-gZOww#}ohV*v*a&*#vQ`5ZrC z$k~B~T)JY&@X#V|U0ce%>&Dy_ar4GXhDKL$^4wDP95SG!N|pHRX#_@mj)&LBxH|j; zPn%DP_fVuRYYmroM=`J?i1v7E_Jx?!;B88shZ#GZO(|Ztio~@>1eh4$wQ??DcB|PM zA4=;cfsCp$E*)&=;iXI58NAA{1XTk9Pog3*I5@(+dk;jRG{$vNaNfHs3dQ@^1%U4H z;@Kna+`7(%^Jk?`5lU_qN}yG`T{41uKASccv3b)5X_}XtDQ&@$K+D_BnY8Fg_EwgY z8WBo}w+n^Y32fhzLuExihdOpLcI^ajo?Ya-H$(jK^>zOJ-7WrnKf@d2nVYBZs$edUrOL+cz`VzMXCXt2P0ull8@179hE}U!bJ2h<%&W*;g)bQjtqj zbs@Doa;YiJW^YZ2m{-K^$`VcrH0;^Am6FUv$_1kQUDgrc>Ofvn3|Y}3Bn5d>oE$BH zmB+5)T1vv-{%cZ4Tw~p(t*hOyW*oGDF9rI4?fmJe$Y6+ckWBzn50 z68o`!8wUgu54YCSd+eY60xtYLop>7+O(lc}#?!`_zF9PC&@%f2~u z9Gc7VZX-_ht>R3-4QDR8(mxcy#hao6_9Ta~*CpI~Q^vKY<=l8t#qGD1+#!hoXkq|X{<1v#8T67 zEVcTKWlo>7%>Q#tVkfaSTb7TEy{2L;X*mj<$?^b^`ibzF&T{YREEDMx0j$MAGg%TY z$FgX7RwpZBl|74f(kBJ=a4ed``aC^s(*>mBR9PLR#Og>@)<&wcCQO~x0#qvl)i4QC z!zfUNxxR|%xyzvKG6fx%$r!j!VUhbZOuT2Z##>h4Y6g-QDmr`0;^r|EAAu}CKLt^n zl#}8rBwC5!7?H-w5tb-NXtFFJNiu|_&LlWpER!OKkC3Tvnj-!gD)^C#>|9u=1Tnd-5C?6R<$+8^f7w zbX-Dn_qvfu7_f4g@Dq?-WIeNdRdEo@5z@uysue^{)AJCslI2qMh^GYuM)J zPl(APT#aVqV5EhUu|9Uo=U_5V9g_uG7zn?H*s^8}t%XE0w&5knmn7V6CsFc6@c zG#*t2dDgBnWy#{@0$55YDQjUc-4)|GFTeA{uYd5fK$Qemzy14v!20%=|M2~9|CP34NuDT_ z4A~&*gCzmhgXdpz{^}S_`#U*$s-Io~uSPMQS+tpT&OTVJb0t1GlhE*3jEq*WY>5fl z8rn?$d?IqwWYADlMt;h4-{CQUrv#Y!)6aX>Y|piFLgBy zF-GhNm`z!*I(2ET)67M$$oRXPMUD+q%l3GS90pC z38&AQaN)`-21iyfG_r`%(M61n8gYAM3Ae8;6TmWI_}VIg?&a)1X2|viEm8BGNm#6? zWch!JxBF*=c}*wTUxlKuIrLQeFwzvq!Bjh1!p*7mT1iuYHI)KcdA7@l6u1goyO0cD zdnz+yIaXWB*oniUAnq0gaVNJg4Dj&UHLed1aeY`6hBxkTW9$wOA3o*&gNHnO_LNs& zJmbd5AlF4mS|TJJ*d?9UrL7Y8Z!>mdgfpj4O8Z|)`ejvClu}Y$z{Zk7c9d)qvy9@)X@(F*!oHgSI6HhLR23V`HuvAvp`J#Ab))Wq@XZM2tf zpjkj_OFuWo?b*7l7S1S1R#SwmbdW7#D9^v;_X9T#!F+5!_Ahw6mQ;nS5yO}H94Gi_RaHOF!u$rZXWIsA&%XiW-jb% zV6eJ@`}XgKq5Yx1>Q_yXD6XDQeduyIGy|CN&c4l!>lV5Xh1zYKI(gI}}LWr9y7w zEQ3^u4wRL4cwg>4mE{6n%S0^o70?pE zS{5e93MsIZvB=QCDoYFN%-Psx>0pzggL#q$CJ~A(3zlbTkOIpBMFc3a)K8uzJ_;=I zR$!i&JO*yE=sU}x?>LQl>!)DkIF%Jn;-7Gr#oAMjbplq-0#(ieSI%zJ1b$}XC)O1h zrX(^vRiYBqh)hx;GFgdefv8BagCc}HBQj?ao+V3Qnpi$rhJaKB0@9T5Pf;Q;Srz{{ zMLY$rTmmNH;yVF1ze#xdP7>=MkGtz&4yIJOde9i0 z$bpn{>SOCEi`dVmh!Z4w9Kp|eKjuaiEYZzpp=J_>v%*mc*ftHcRAf+-os6d9O!PHX z(a}&vSzaDRS$VXzbXmS^H6|;qSZK5c4Xs7Y(ptjGH7}P}1>T z@?@c;r(5>QPWH&M)+fuewz?BuD)Pn&^;vMSnY8UkCB zP?1w$!5n=7EoBxO%w_RheGK)qP?4F!EcqF%UNm2z)c^w(CFUqAFh^Y(ZGoxTDvHuR zS8G;E-eoAc)u@DmoDAy9$|97QK4lULGiRVF{%*DiT{QtMRTcErR9QG%6Jvupm@Zr( zVm_0f77G@ObSY~M=CM{^4~L}-a9(MM z^NP8+EuDkcqS<)OSI2je27X4f@SG=)hrtX27ReL3M2=JoO?HNu&=|9p5?5_f%w^c* zKASy>0x>zuY0WdFF=H0h$sVac`I4&1ow#*nD__Fu6TP_m@|vbBJhEf)u^xqQ`{ zYh!j?ySbLp>r14-8oj!Z>sJi9Hn@<{k)>Q6S;@fQDmwd?vuod6kuj?hoivT05K+1E zn?kgx;H8GBQ5301XPNk&>LA+Ftl1N`hAOwE>Q;b?UUXIghMaG-%xd#X58wVBrP5_WCMBOxLHPbV8f zd|k;CxZ6^c&4K26@q2sNUXoAQrUKI9qgZEQiie{;F`qDU9dZIksC{9Ws zFD8oA;6Ujc3R){G=%}q0unHtUHd^2+m3{xotb@VcE=JCFF?i~L0M;h8PGzEZ}wH+J&+(k6kH?YzHM&7IT5+`~79S z9IofL7v22(?OFc&-7sH2>SOd|JwrW>-0W}T`q?&dj_+omvyoHnwKP{sTHtc1-kQs{ zf;5ioYvA^<$fG(B(%#g>*%LiHzAI2A&Y8jU0^h@;vUpdVJNHG!^X^^l-MS^>Cim~& zmJZW!|K4rx-nzj}ac(@gKEhW5Vc*_+z`NmVQeb^~@d{sExx}l1(~P#YajvS4{&Ioo zJqzv!*Q>|;_xp$Z{nHfzx?R*aT2WcOh|SwnsH&FW zN`=}68R~Y+hzhbCJL{FG6qTpS-E*mGT}t&KQyPxj(0tOFj%oN-w;GVMM^xIHGzG9UNvc;Prb?0UGFd{lPA6>3G-7wil2NNnL8BUl z^~w}h%SnM%v{R1aN(G9_6v^8xFK{JGTE2)pdD3$fMMYkL#57sr(q)Lvlp#7>hRB>5 zgl0|0KXodeiIZ`Sn~Gz^R4jbPVeIrV#?GIy%6%Mbye4DqGXqEA8h0NRJiS%$@>9lB z*vBDcGFB1eut*#)j5L{#mU>QMv9}0cfhoUftPtr6@p-wV=e#7u=jA~HU172qM=G#7 zPKC8e>LO-gnxM{_L=Dy^YOy+I7E6N_Ss;Gcz+;Ml)KnID%V6jui-G%e^o02IU1gZ- zGL!iNSVoRAEO!(IijxfHPBWzAi8u&+Il0Zm#X}ZH_Zc|2PQlSjAS-Yt{$UD)$EpyW zs77>(3UL|A#AhoLm!m*Twk**(vP9*`5h8Y&Uy{h^ljR6ZQ6w<&f3R|wv{VI7!AlbU zlW`ZYa&h?tSLaXg6u|OzA5W0yBuO1jj~`7D8SGCkrzWD7^6&$c zM)Z>EdK7=NeON9iW0`g~i!@?bs1d;G1;MykCJ^P7PI_b(S+VKlB&CrmG85-@X3U)< z$6Wm>tTL9xZM_Cbk&6Vdg!T(ov#ZdAx}p{A%3RE*U~MuS<%qYKM6M8abMkU7R{3ym zUpy~6b9r@m0}mSW85D&;hseOn?7b*n>rL6ZXbwbfqBD6L2a*fe6&_B6sS(z?vY2R1 zWuDRm7SC3~eAyyQmo8zBmKHOnPDVvp7Cl{cv^A7bRhYr-S<0x$%QAK11axNWFh_ST zlPAwaS7#w!UJ+Q@cr#mf1@jCoSZV5pjYA-A-r)p?$5OkyndbcmX=y(s9iBll4wmE= z;gyja(oXJQefJ};-hGgchxPUMKS^&9e*5zu0#yH%!s`DE$t^+&uq5MS-Ff_q8~2{^ z^8F7|XpP-}E^Wm+*wsr%=P8=@93(Sy1CHyxadq`0I5?W+OIM+(t|R^z9b~4=KyKPh z#(n%LGp9^LVa7~kCQlagr!ZrZ2!W~T6DKqI^YPN}Wd*z>9pWXpnxmzSrjm*PloC3s zvzV={hO&$-^R;!+RFHIvpNZnsDJ;;^z<%{Amg~~xiInLi5~tr?iBjK^)B97$GlDR5oFChz5J^;^ycx4CR^)u%yV z>rl=*T9S=vNm)p9iVkhrS{&V|MfWyM&Q|Mlrg|QSx6J0q4g-dd+VR!hXnua3%nz?) z`0-shKY!=X;|JyfMJu><+k)G7?HRpcC-Chk!iB+6X9h={86ERr?6x;I#@rdZ>Bjv> zUfg?V&#gPA+!`|xh0Zd@t}f>K;1Y(fuHfpZDVIjA=ov6)cgHe{%XElO6M0VLOd>+$ z2oIP^YNR@iTg*APH;A5cPdf7*XpUOLRtJ4@O=q*sX*tzFc2tBovMa%#j%`^SDlee1 zIF*i_B@Fi*srlG!?rrMpfH&#nW z`Hu_qBsnSoA9ow9Ef?eLw2GueUn1x5e zxH!^7LrE5(D$B{Cs<4R0ty{^8h!E%sW@k|mosA6~uCJpazku?h5}J0^ar)RPh6XNi z*CPfy_^%cx;k)~!GSAWI(M1N{a3ksewg9G8;p&NaclG%w*<%~zFSo|{W^_<|zRX*({wK$J zxOcFP7u~IVf9WuP-Z;hgL%q@w-@m+ig;&=v@ZX;v^6$T|bN+lb#hd4|ZHKN1H5vtm zn)WISa80DHaXPyiWhmV>gUyw4Y^YG7pi)BsNRJKm^Vr;E#Ll)g)U>Z_)sFG7H zPj;mYc@;A#s1V4iP@u3>k(|v6WNeTptx$oq0%cP3lt~o8ij@q}kT!$JjOj#VPa`Zx ztW$(v#uU6$r{I<_1*fPf*al9(%u~S1Z5(U8CSVyb89V=JI7o+vnvRF(OacNG35rt0 zD`q;*aTBpm8Ha7w1U@qMmvm&G#saU2ED~YtKb@6+Gg#>}jg`LBFcF_kf@LrXlfxuR zfmJce0$FO9M60koT!|GCDi}wou`E=DCBaG<`pcp3ISoB`Q5gG3I>;-a?==G*0jxO! zQuCa}?>Wn3;;MkT2wN9L);lZUBIY>@bUCgUs1nOM2-w)VPQu1>A`U)N@C=wvP=qWI z35rr+C1k0RoUck!p@5dyadG+bMCVF?HJyOOsRC9q1SiT97$=KI@DyDACW>`T#y?mH zRr2(q=XmjZ|H5VcKk;(@6hE=^g1sgY?lYAL@xOGZB1~X0(tQe1t`muN8b_?d zCnO7CWjTyv!}^JoteZ@UNRLP)AuveP=sXGbbiiu0 z0h(%`prZH@#!JSD%uA8Pa2<+L=Tn})h?)&X)D+C4JXw>C!Q$ldoyn#E1?rOx=-pw> z-2)+faXgN%`ZD;~Q*E9BwceEN%%*ef!*U7kTS`XzHXW)u4&@+ox-B5t(_ZlVBn zTp*6S_6+o7KWDzW9HvH#ursq_jmcV3$ZHE=DIqgs3bHa&m^l7ZQRvGFSZOh7!bB#F zpM-+EDhdjkC@Jfrr?(u_wXUpl2*JuGkX38ESiHg!H?K$vinp?1^LFV-?vhvINJ7$b zbxqXlC8N3Dee{fPfBIEA$f)F%IZwWLBY^c6zyC+lawWMn_-|?af5?)IjU_>reE@HHW{aRqK3IfpU{<>crKVh;Bq;Vt!Go{ zW=Ls}F^zHN9LrtLz*c8Qb~*F3!<#RTx-wM1oW5-k<=GluF9z71@ z@sm(qzmDSd+d#f}=_3B#iI-38d2!E%N4KrGJ!UB&X3OxHBWDNKv;Vj`+jcG_HCvO| zSTz!&G>8_p(~LMx4mY|mb}E@m&7qtr^QI%!hDr}Za!fTSvYtt z_N|!$C>!bCQ^t8wm|i>A&2WD&7f$yvG)m}G&FCr||f!wTMDz>JuueOkb^@ViS7jbg$7A|y@bNh?{ z#b6tUtJByffYsepK-0E(ft+xT>@Hz%xugSnF3n~6)QO5(n=G2~tEQ z`uUR_5J;G_3kkk{R2CG`QeHv*rp;1d6^Z34^783zle7jMr=zi%{f(`3wRds8x1URW z0|Jko9BgV9z}m|Jfx9|^y1iA~Xxbqe{4`(sl;V{W2Y57cj$htA;hPtC`1Zvu-aQ=V z?Y)b)iq;?K~RTEr8X=cQ@Mjacn=o+-~FTg(_a0E935|&D=Y^o%c6e`2N9R zetz1+k577eck4L!FSOCuwnLl)4cxmT>Hl?vQwN(kzOR8J&2=28uaHJ-br}a6chc5S zO>5mQ2G5@1&d4Yu7p_Rrc(O|py_`PL&)~)D0$4Y>Hgugk*KhFn-aY9^?r&bbnFreSW!fi|=n;M8zx=QO_wV7jK~&C7-rgSMKR-R-{o9i?G`dl= zNuTYNdX(*&MQxM1s2tCvtm+dgYsXVjJBe*olPTUYjht;Vq;HiYWt%c7rD|l9Ymi?# zoBVPua&~Bl@6?GeQ6@ZJj)1Hggore>Sdoy8l8)`lM3$=(u|onZSwhQZ5>_rpm`GzP zDnjeb7`ag^$dA#$yvQ3CGasxCk4$ z3G4U@=Y)kR5h)psAZaFE$>RIe&#}w+jE|OiPZhA5%p&hetPqG>9@OA#2U;(Odu|K515^1QX0G8WCDXwC~_c3C* zxOM*|#rB`%uKSb{`w48ao5&{132ZT&!gg~RHk-;&VmzHqt5w)yy_oH;u2Ntf&90|4 zv6X$v;>3&@AY)wz!Nv`&)7^-PYBGzJf>^BPj>RG$0@p{A9GpOQR21o9{v-yuifyvO z!BPi9{ZCmi=O0+C{tRE&=_G}#vLQvAorUvh-eN?3!5nrZsZbiLKzXbZjp?&Fx@8$7 z`z_Ci30+M7}?t&WE#kyggCOou*v+3KQr`Po^iUn6{`q@qY!7xWWtLq=~;yYM?|x&d?%;Qo|le+ zB|+BHmv5!7#*uVzmkiVJ<%jR3@&211`0>|2`2FAi<(I$y2deJ`uzvdUf42&M`0anG z$Njh~3x&p$+2KtyA8M1n=9!oXVStgb@)zimv-a@SAEn@X-eHO_pV}>ds}H+d7xaRV%o(a}{TH zm~edCVh)LUT~+hAa(ESwhwXX)+=Fl4dh*pP51u`A;?_-ptWgK94mon|hBx;gMhI9X z^5wfM-oMZ0)vFXhc5$pC)W4up)2?9y9vO1a_PDk zhx^u3US~{Z{v48%bx2FrCp}Sz{4_o4x2|UJXcS{d66vq-;nW6K+T$$AT{DY7ok=7t z*JP8VFP7^XN`f3{-IPj4rGQJ_77jIRr>%Coz)S@@OSg)1u8ac*J2`iLklwSG={a?t z#-?`a>Li1S9_3(rCp{;6IN8%J9rH>8sL_!@Mu!Kbw+H+BPH>>DMcR^e_x6|+TfHZ{ zIVZ5SZ*LPZ;UO&4Riij9mWqwpWT%7?ALWgow>5SmZ(3(-M0kk3fKCJr<(ahC6>^|D zpMyJdIo?o2S3@4h_Y`octC9;xD=3I{WK+5?7mn3(Vt*M8+cT&rOr)$JkxiM=Y|M-m zX*~JKQA7m!;%vVTdm9^UEG+ncc)IJjII`^V`2?(Px-0TKuj+%>@igb;#-Ktg~( z0&#bDo%qDv-Q7JS8TXx?o!yN`uo35&^uKT|Co;tU&zK$ge7cg_$ zG#1R6&2jC;%UU>>t3O=P72s-s34J4wkZwiH;^7nRUdHk=vb znwir!f%)wd>8x*1A0OJhh5P5^nCv;g_VwG?wRJc9cOBsPfumeLb&m7LPHdne12;iU)dmnqY?BwLZlUzA_jq@iibLHG^o=5_^6BXdd~@-N zpz0?7eR7rGZXe;p*&Td(W-q_ozrmlMzUJRwzvTDt?{n$=Lb|4g39kCHVtpVBmpL+T zsi~}+vVMyxt2Y|5XoW6wmMFk76kwUqGTT&uWli%U2gXZjnq|tEsfN^b8B*15NU1=l zV2n1|bsFT4m3q3(l;RFk%BEOSImcQ+WzDF?*3>SuqfScoVyXL=+0eehm9CAhOju?` z+d>mM7MU?&juGRh8!~RPp%}r0v6BUN(`*?(%~9=`89l*D4K1o@)~CE#m(ms;N+xJi z(ymL{MEUGieF2sp`3+_iH`tOl%8Jwy1EPlZCbqZ_IaNj!R?6SRNW+JVOGa8yJ=&a_ zRttttFeI;4lgze0bPF4*#lSpG1{LZEsEkOeG$%z$(g;%$hZ~brVM6jqd0cHtynM)D zK~`jq4Fg74(XZSBKS7pHsRdp|7I@^D;62m?{~>1h3Z}f$^zcm6!8=ozfNXt2vW*DK zG$AU>jD&0p(z2|`%CccdmbDaFPctPo*?=TLOG2g|@i}_L4bdYZU!QaVPoeBYCBkE+mR-jdzqeX3cZ|W0!&=mI( z9SOadnWD%1G-DPdo3J3kgn99%%!oE-YJ>q(A}yE}*OxijIjpT{W>f7v)>W@zOXC67 z)E}WEYl8r5KAAp~iFa;gkWDQEZHh?pEuto~j>&c7nAKd%ypAflfDLEnL?v@~7)3*^ zWxF+Hsm;hz+3#W6j2mgl;!aPt%nMgw=-n`Ov@QcWpEf-Zmxux>eJ6igZ?(U1UVTK z>~2GUFBiO>obVD@xjVRHZDNgsg`mma1siiKoNVkd(Kn_~4-Jg!=$uo|cX->KdEnhq_%I5UV+?1gV3JOqfy=w0E4h#tBhnKT6CfWi&4Q-4CdPbVElwv8{=WH&Y zWn+!Il^HH(Mz{%#y)DcMaA?p9d-aZp3*x5antq=dcasaRH3Z8c2P5f5r_7W%`H&=2R!qnct78Lp)ei z?#-$yFLsXa%lUax+*%Q@mWOLId9W^<%Zn1Yv?Puzt75paE`lff6Z!6L312=g;r-)$ zKD;jGyU&O7>Dv+>JFcTd{rl(?p# z^QRX6__2XMe;qCBmGZX|Y4I2G@>M<$pA~Wa{xJ5P%x2l9NG448WK^RiwT(6ce@`X| zu%@;6b81@|FHf{_WM&qJyRz8bn8LFBK{O9^Wqg!B&Cx+LMD(LQJ(`w`Xlay1(p-{B z+sHyD)mI2o#xSX=p8Am$GzmO9CbTnc%5+w)T+56Zb7`G0k*QN>GIPcpHg4F+kwg2{ z1f<8L;yobH+NvZQ-L#fnf~zeX*RpWlEEde2!Oo92bNujr4(;E=eyN~$Z`;U%X&rJs z%VBsy2Bmo^@|lHH$hMRhr&C&>#7Cvk+E~WCt}(1#*vZ!A(^)&WgPkj9sMFG{TR4IJ zTjq0o*K(FjZ)EArah&>OJr|E|7nF4|U0~JGID&~yRZMLgO?zV%;-?~%)u3WK{6)P622~vlbl`yHJO`X_!&GIEIpFfw$EzQ)H zm(eQtS~`0s6UR0&X5$-{U|T59_0C@Pk44=7oV!bs7HBtVm+63&*Q<#ReUC> z`s(2UUSHeE{j-~Rc>=Hoj;m8iz#)JDfwtE*FmM&%W{CRSm zH*xFi8E&3E&68U<_~ONLZeP48<&r?_8aK{f=INbB(kS@A7q33&)x(#(eg2M5<+}Dk zu8H4E!}AAefPVS;SG<4uR>~W`dH*>-fAJOHy?VLZt&^3 zb9}gRj_+?=UAU%$dn=TGqM>HU0rWpYmg(UTeLT$#4gfv!bX@_ELzPB)@unkg;Qt!SBHNAom08oO+%6FZD* zHdf~$EEhKn6J!l*GoYkHwque&Z=#XlPP{SBh@wUdhBercS7}FDfeF!B8Vt_qNm`K> zdF94xKF(6HQdy-T<+XZLHXBkYKr5dt+bwwP7GI!6T#+UTCE6sH>5*A!LT-&ExpkK0 z)L4*RV=l#lj4D%-O3{(208`C#Q z55IUF0#kGe&oU%NfEAl*NNlqqP!+fm&iCR1|?_`k)lPE z;B0WNJ_&h-WXorjjTXhWSW(?#A_qxFV5P}O`C=;SG%2qZiH_8xXt*wUrOJF_8e|s< zs!Dp1SKOT;`5%#;{Slez-N{JpNk(#SGLtmOl9H38Dag{IG*{`xGN!K7gobi6Y6V!8 zL$s;N)}}5)o5o}7^lrl+1`bz<}69IWKpsua}&&%9%I6k!A5irHm57jM}U>h z@{&fD6i;SJ*&^1~Z(~u_Mp`oGQ5iLj{C-`exONa@(?q02IcXjxG-T8=w{a{>I;&YT zZxoxCSF>qF8B6D-GO4{Ub=9Vf8lffN(qlrS3GEFgOdeyw>~RjvsJCV6NZFQZTNc&X zva!vDqYL|Sds`Hr9Z%<%YbE@8t(c4{2HG0a-`0cxdou#$ae#*-{+=%Qd$`lr%NuW3FYL|ivCt7<7+7IvYKM)94F;Nq znCO|}V(*5nr2}S0mN?kC;^ORyt*s00?tX*~iX|~6o6OuI(z6Q~T2xN$m}d1Z!qH<} zm^fvI3M(bas4_*)Te)z2|I2S`{MC0q{rZ0a?&@?of~(*D{3{REPyhUr9|c@L{NrDI z{=-jv^S9smAg_P()i?a|uYdFC?K^fKInMqQr`RLQQx~pt(UXR`w_UePFrWPg|ni%%(gJB;{EDTN2?W2vEzCcUY2ooJWtc*FBPV1l_aWU8f)yWlLq!4_vD9c*=aVP~Kzz|zK5Q0ro5fTLi_#oS0O z_QnDuO97OP1umBIUAQ?D>hFt>K-WW1rGU!GN^omwiK8G_xyw-jow=T&083Y}Wq_Hk zl0L#%&C_LLYKnuk8P2wXGl83%r3oGuhWJ^T5aJ+>DPLD2U2TbRbs%p*AO!=0iFb1% z!QF+Netj7l)Q>c8PvT@das^vuu~8I7g;ALjLsLOI<4e+ME>5PcG?|X!NldCpqP;AJ z(HUNh&vs(LNI%w$31rP^Uv{;HaDG8D7w08$dRi>!XC!h}&h`7NvU#y}C~x-;;q${e ze12*Oug>N1^im$L?-cU>Ns&~XLwWKzn}?4SU={J=Wi=nZn84qDozH*%xtjm}v6lb* zx|09=yoCS#GM|5b)y{99*GeT?&ELK&;fJsD`1;Fy-hEogtM?V$d0ZjqM5+KQj7jr7 zs2Hb9$!JZgnxutVW5>z`iM+l(m1jqqIXWwoOY_UvIXao8`H?h*y3-ojpIQNEv6};x z{vMS3xl$0|!LY$0jLJ%&c1RkH#ktHJ-@uY-lbADU0#n;tShip;A4>(hb?YWJZ{EcA zojcgJV>|2Ct!2r=xy+q0g|#b|uw%;xmM@-9=Y;W0nb^ks+0*Hi_ZQ`5Q(ltKf;rP! zzj`^F*R5j9x)to)xQaPlEmV~ZAu*yqO`}R_sxGBGKb^wtM24ouFrqM1;8HAbDP!7% zF-&b4&AiFu1u!#Mzic+MJI4vsCUE)Ge%36W&aO=>xO8Ga>zB@E#>BBq99Ki<_<8}+ zXzE84Q8{b~#RXX;CB?|~J%QM`NLi0%*6e9)-@ch;%NMe5pTJ_nYF4dU%9zGlYHBL! zn%qu%doz<|Ial^${I~`R^K+;fRl%HDQ)zDnI@A>rMZ9ac|o6jEI%N$9a4AJa6ut z=l+EQ+&jOQM;CYV=Jr88e|U@+*YBz}xO(acN9D7Q z3a&P)&3B)E&eL0WdG|!FgO8u` z;@$&3d;XGlN`U**SG<1p8J~XojhxFr^8Hs*zWAPBWc^>?|Hhx+{>ty~zvl06KJcIS z-|*jeU-GXP@A=!q7kqc~7C&6S$gelA@bCL~_^$x#=hJ8T;*))Rd*leeUpmJ>?_J`L z=a>1%`z!qKACLI%>(gx5Tu(<=Fw>_yF?X&l%U5`?X=4CexAbGn=D(J!R(mmXo;4FD z8`3h>h=~HM$%3oSm5#Knu%ltQIgKl=sa<42NvG7=O_~gC&>_28n~X}Smn*ddTsjOJ zFBqC+PSsR%YGzx~xX6z2%Ux+-<;kQq9!!xkOOQ2pg9o!$IWt*4r$azDVW!lLGcB1o z$AR`ag0UI)jPA6ew#}GPCft6HO?YWI|E9;7SFSF@;jE7F1c0 zRb)j{hB0xe+Qg*vBu4y@HbekeVoY9{ytY!0VfDHU8>>adL|sP7XIIU$pj&pi0XZX# z7&^*~!dgp)HQF(JyfYQ8E|iaVk~i4Phu8?ZEJzt)OzdzYqAN@Zt28H2^yXD+jB~yr zPB{kn7Q&wu&fVAGa@omaFt<5T$%y#sRFE2V*!?#v~tYk$7f1h ztRWFmIt+}|BqXvo!BM>kj_sx9mIzPRA$Eu%X~jy41!LI}BPts87}2Of#pqs)sOv*z zoeslCi9{>($uHFXo<Sf=QfcN3qT2yA~P$$@HOwnPS0I)4ai^;M2%t|m}ZlW2p6HJ*N zXF}ItLplcQGhwhP?EY+^y-R2G*kVP(~3=9jOgC2baUF*7I)oJ2gwd& zNwBXV$G4b<^a=r14QpnNV*Bb^_J7>Kft{n+wWUB>cQMSK;UleSbHz!yLM%-6sErUL87fBeD2*Kb(AV;7rt?`7xVBkVbNf*m^! z%f)v(gCkN%h)N+gEEancYfLnCG3X`8>eENowJ;St8E9&wr}5X2p@osT;K~4u1AX!pXbcQ>rGdt$4n zfwKV1)!a~pmDVS>i-(gV_JS*S2M1hj1-NGBYC=?N zQ*+D&ScV$9==YXw?xiJ5T>+sWOSa2az~*3WE-*92Nj}fjOdlUXR-mmZ5pH%2a<(GE z(V9d7b%KWr1FbC>WM@OXAS=nkl|-d@x-%rSKjrb!j7W|XSmiOTzJk`_Lzp-!k40^j z%xoy2qe2=7Ba)diB8I8uAuOm4VQF0m^DDerSnkfYv7wxpmcsd&SzMZx!>vVmJXu%F z+mDCwZdX3fx217!b1b)ar*Qp~Bp#p5;?w&@eEz(cPhSeOo)+@xLB3R^6@2)-jlcag zkAM8Kgunl=h~K`Q&p*DKC&-fLzG~z9x0U?x`7plzEJs@YnSAjfmv^5P$@w*shc7DG zeK?8vs{)ub&zb5rJ&NmkGQ7@+VP&SYHF|UHU_Eb7cW`V@E~lpDbGS8+jg@J%MEEi$ z#7BVT$EZMG8bSkUjvPRjIeoh88mF2XI9nHw{ z5=!!O<+@O#f^25z1lq<_(HjGib8=_E;VI&)RyNnS}-&$H7cp<@1QyNg9NDv#JzG|@XY*<{ET7N(Y3)pFZ=$`aM&MRP zoqWc~(xK9bjVB@^R2t2Lnb6TpYui{Fo9fuUL-uF=O7`sCE*0=nrV28Ll@>6vs$2z@ z^14w1s+RGMG>)kyPvBUq;H$Hp>goy#3v!vzHjbGyCNoanFK=dY9nqFkd`5&%JYd zIJbX2H%{;2;pJo8K6ivG$MACK$RQ)En`uWLQet-Tc{}XKe^O>wae#t+dKIPBH_XSZm`Tg1zem-}O zuMZzpf%X0IWBmQfIsQ<9_2LTu{QN3^{_PI$-t1$=su8qy1j_g4#FC}%tX%EOnl*jd zxG6}W70Ak!KFnX_%oHWST!1xhvH{J4j<)%hOj<6;TJOq)O-^)dbEi$#Yvx%}K2e#V z*@!%;%X3F+Fl2-V`GT#oCId#cnNZzn%IFyuG|#tV!V(uIukvQrdS7O*^Wx5sPr`gar#hUs_7Sy+!suMj|%m1uwHD!3K31t&ZDeVws zO)`~YOi8VwU4LQY@Bxjlt6|X}`L=XCheZ;`19t=+GLsF&&*~MBE)#_5- ztV87_J*sCKGHR{~-G&RED#tlc)8Ph)10amR(>M9 z#%queBL^o&lZ-??vXey6sUpxU14iT5S87N}K_c;|-Z4%eELjT4DrYF-A1SdeD-T#FU(BX5~#{LGePCSFC42$x0@s z&7m=RIwOKQDfH?f$E}Gpmr{oKXH%a(lsQc$Y*<*qC!4D|{>f-g92museZ|s(O=888 zKxR#KWWrcO+Q%3%exx2FvwP8yr%iK#5p`*OXv)xFitNLj3M1AuT5)PoAWwEB^6p?3 z?+y>)^}&1|Y%k#S(i}eSOlNU@60NxdDGYTZ+TK`jrB8n;{cH^JwUx@PmwO6k4&<=_9|yTAW~ci()+TY;4VtoPr3&+89garw?YHtgK3B5T_x z2UxRlI|~=CrM9}6xTsX(A`|KB;V&T3#6s5qb6s6QN*|0gG%!)tb@kBb-5V=Q1&_wE z--hV*))HU|V3c^XK7tu-J;9Y3#zrRSYwKgIYpA}?L{lFNZ6mC8Ot6;cEj0UJ+NV3V z`dT;`>0m9uaxl_UQ#ANG+T!PAFR*nGT-o9yOK(SeT&ykeb#cPm%^7dOPk_H49`3H{ zcVeukhnbRTUcMV69X;6|9Smig6l5vD((S2*frg$slxb=tpKok{wJfa+wAF2JG1A7% zOqW1w6Vm#5lOEtru%!t;dRhd?Ya<=)h!bGN3%HVe-KBVvvkNFmEDjXN+fZO#!nSikLsPl-c#!%&AUbQC%zxt0S0F=uKCyBg;nyvcDsRb2AIL zJg0rtN2RJ{kOXcxv?sSQ%eVOaZ@zUPv!9G^?Y7j%i!6~bY9;d%F72u zynQ-SDwsBY{BkPayzk`8Hxv0*mOp=(!r#A}%=hmbq~%}2m+uPr{9T5$`qTLILk_Rr z4dum~5}v#m!EwRcijBccnq^N-vo00&nv7|)XXfldESVk4p>+bf6B9WyBbWWHnVg?C zf^9XjU)fQ#Lv^Uf; zRj@U=qm_;cEzF!bon_0HGH2dgrca+NzxRb~TDy|<0QFEE6ZSvU<%j zIt7MGkp9@FdWs5%kddB5anVpphZT^V6wAoUGFGizqC%^AT!Z@Bi5)G}RLj1|Yx0I> zQ!#v)3Mb`8fb-{1sc9}QoIkA=WvZc-E0?Mn@s%86N=E$EtCz87`&MqAJtHVO!PDzk z`1;i|K7DeJS9h=T<;%yse|(2m_pZwNLwn+ z5AvDd>chT${BZI(f4_R3f5<-k^VJpp_2C+S{BV^!x7IOxo}9m}{F1w|g;bZvZoQ1~6fj z2TgMwsGnj><#<6(jShu^tm08xj2NR&b&D~hCz{ec&5Dlsj!a$d$-)i&ShYQr)NLDHc?;TQGdQ4JD)P8CqsdT8_9UMjyYRp1AvV!^^)LfuSD}5!;irJY9-w zj2S*opGw)+nwbXF%r&IjIDu5FfU13_wAkhZF==jpCQ6wws~=F?1g`LFZxDE5oGmOh6~f=ed*f7 zW%VY#unz^5T9nm@ZmTpYt<+$c5@$6`k0Awmq~~grnBJS1*WL=5`ie&=1iab4mmM*0^+7zUTq!W8l9M_Yw z!9A&r?#Y-qEn1WGX%o>-h?A0_OKXA-V-vJ!O4Mddk|DJTPSnH?VtiT=o!MiVnKz9E zg^QUpWC5MY)1`GgSz5ajDf1dnuG1(|9I_eGH<58evsg8yjDuTixp1tJo9COkaeg$H z&s1=9eK}r(WRwr_PSsJ^$;#piXh>rXad5;g7{_~g(rnp)b`o~u0vj<96qT27w3q)w5eM)@?>GMDT3 zOIO)-=m?v3?_u-qPguTc6U}2Mk&!Z#0f7+&_YK9x))|fNz0mKYftjF6U!yl#y}F|- zOI;l;diU;ysi`U2%Fv;n5tg!F=7y%2DhWnqX=Wt2GBCkFU}d0VpaRQCOJ9&C08+BO zYszy1G7D`fx*BRMl!vV{{Aa3SD#XW2orh3acePRGJTz06ZuWLK%hJ!y6@MR3Y~(%W z=B8LGDKQN7urxQr*})M@Q+X`FGSC)WDfzoJ^#oU%XnxciZCM)X=&N@?TA3PQV`7Mn zyw}c92Uinad@KwIv9}=1(V7TnTVmWD30JTuuTe^{wHX8LEg7Wz&D)Li-~e(36h(=H zsVm5)y}F#P`jIT?Xkyvqam*b*ibZXsSk_+4lJUctKY9qWMkdl#G?4aOU#1rau%afC z)zy)#8WGOcx;PF`$l}7RLLRQJ;PX9oe6ep7&$s3Acw08NHYaj!RVZiI1#xxvK<=N2 z<=w3zeDScHFQ3-(<7e$s0Z!zzr_H>1IEMF68+re%fw#}9d3>*chj%i0^D>9G0<71c zrtzSWn5J1Y5bZ!zCqTg0Vh zl^mIv$KJ*qRt-yFMn*Ud1AHj+cBD4AFJr<&Xb2CXYEUS{B0{K48qDb2R9edlnJ*|< zB9NFfp_#c8C$L~z7gO3>sjV6zP^e#d~=$b031Wrm%m4d4!^Jl9)TH_n* zRZuCoQjnDrKbWjkxqb-PvQiVMC>}~{!zguFP#FeHiy25sPBN<&OlRZrd90j2joF>! z|F1V|)}&_UPo1FRYHDi(a|Kums@5!?#mNIZSUjs!eQx*WRcfNpIg?vy6O1XPxvqlo zg0ra;8fhL~Nm0IhUU~w#Lo%4q-ptx{D>!^)A4iVv=lF?3e6nw+3asTT7EvzH3?CGt z_FgHkn>TL;le^m0Qa!4I!O`Kw3?4*c!eAmJ1~5WUs`Owfuv)rANwC_f_IN26i;Inv z*AJo~KbLiaQst%q-B4`TG?C z{2i{HKERV(7x?_eeQupU#<_!gIksB~Z2y?sXHRqG*ip8vSkCTs>o~r9H-~oYRC}u~ z96PR-o98d6JylA1aqqsI!_Rnn`wlPfKj7`-C%k*|lrLVs;@eNZ;H&o^)L5(+k6-ax z5cT=X_k8jEE${C?zB+oCuMQvL!@&c5cj74jxOtI(KfT6(KfT5OzQ4n7-(KYGnfY{1NoCxG09q&7 zF=>hov*&xTaH*dx-I+AihPFv&jO`E_F&HL zzRcSl$g~Z9Oj_o_gt@Ma?Nahw8q4oahmp0q)He!}TFhyiXvLJ7j?7)`#d7)WZ`m2a zo`Z4h*cZtLLDs4bo-AGK%;MD!%v)~9%x?kxF`}NtRSjwqm59uu2eDIoX2p31*aywWO%VimYN&;?s=?k2b_FPzPtvUbuSo zz&EfL1H=)jLyah|wxG1pkP&UN&r|gTS-NzaF+Z4@3xk=xcp$TvgfV0B0A?)>WA?Hz zrY#PoYe5hb=kyf`ddr6Th$8z6vOGzvu_a=dF+qZmegdpO0am{xErJCc3aH{!El5bQ zASS_t=y)Syl1zw8Fd{lyj|c%(WQ0CZQHJuvGN6Bm7X3qY2oBRBFkG8}a83FS(!^gj z+&@N>pm=Q}QuK*V)g>mq52<;5$Sc=o*hm3TrN~j%^NX|@lCMW*t{y2F0mAQq!bdb}PPvfbH&ul!UkN;0+7M5CoSniOSf zl9#GMVX7wOsoIQ4)}S)Jx8O;Gretl#DcX53Yg$LiNtoT8VR7Ba>feo2pKj#KvUETd3*ZfCB@3h9wQSA%q;Ba+kXIsr4{5BmPjigf%eJMxOMN5n#Em#)sd5DS-oM4 ziY+DnN|}RD87BPpr@yJaS4#YqGCZgN>*s&`i{JkIpNg!%|M_2j`1>C!u$1Me-+a%T z4_~Y6Pu{%e#D&Z3I(US&+qScM(>4|@T}^4pNP+_f;_2)oh;hT&)=6-phenSc7;0$= zNVL%E(-SR?p6CdOv~@JlH`GO2TT=~qce1v}#>`5fWrnSVHP+^T^==smWQ+t@W(vHd z7z=cadh1}NYmBSl)Y?EnS5It>q>B-?9ZBA;&{z|t3B8Q^PW%s|0YkhLixcIKo7`jXi{fC2Va1enQhUVzol z+Js;S3kLhRksQ*OlmS6xM1)cxh)^b&o>V`Iu7)b+w2WqMYdw=|rBWQD41Eu0R&4>( zD$>G^w{86Rrk&3pH}Up8fo2@ zSul6Xe@Y2P~#S2)zco9=3cL}ZdN6N zuIfjPP*F9lvt0$2l1r+qy+zG4HN2!ig_W{ioR>p+aX$H3sp|AN3Z9hSDW%scQyP3q zX%z(hKWMF6GF$DfQedS#UNEhl76H>N!Plmh3s@|{iHN1e~W%+8)R8esr^`l3rJyr^^Hf>(ZsnbVQU@clam+3RQ)Yp{^ z%NJ0NWAPG!+ni}kojQ>j)4OPDtRp=wQ3Y0T|Go^A^GpGjf-2?jr%oPW;ey#}&sAzl zJpF_GrSuhS4Q2VVMJlY6{9#I33ndw=dZ(p4r`&~k=-?;nv_lH8rc9p5;r)9!x_1|+ z1sg8}V@i2=U4hOyzWd@0fBgKl;OYkVuAC9ryx{jAKJel7L!RBa%)P5;d2r(*x38S# z?$z_$7nnS`BS5-#f%8WXa6}MuV&5LFojSp7LEMR5yV$dS9cTCN|*zvp#$NiZ@SR^7Q@_9^ZN-)bmf+kWyq&rVuzpu+98eXP2AtSnb!ipx3Yb&cYVye zJ-c|fcQ;=g*vHT3Pw{U7<)6>5@xL$cz%Tdt@ypZf`$X!kmMF%x1u(wdf$;*Lsk7Xf zv&fIBv)yRxw3g>As2#6I`RLx1)c2scp{KZ^4^@*4XqsosBxU}<4Q|Zc>CfWb{h7C= zFEdyBFmiQOX!uAKhh5-DEpz zr`l6D!;!j~_Eb%=qGF<%8k<$zV9L-+6H4k$^4?O&O6F5MF zm{dKo#Y2TP`jm~+p{helmp0v&uNlm;)ln>46UEXs(JWjQ!QwTMELt7OoaF+pMZrv+ z-;a)2el$<QhHO7 z)Pt(zUNod@FfLnwHB^U|0zp=x7UN`_#+FJc*P~&$HTC7bGz^QTwRi|!`ISu0tf4D$ z6jNernH=3nYe+ryed`$MKZ1(>c{HZwvb1eDhd0%8^>`hRZ#41pZj%73T7Xr?#bX)l z-q4?wvz(YOCt+u;AvIaOD2eV(r5y9R6jLhYxRnU9s-rYnQ0c~&mOwT%d9!y?Fvn*` zv8O$h^<(-mx7?dCiPjW_7?TrVMzWVVkx!DlnWQCut8D6%gxGCA$ZA`EcV3}DO zqiZacwy_~5<`$R=B=iLfS{iy7>Y8ArXCjrinN;ff=xQi;(V1hYXNrZnE#BUJ@%8J6 zrHvzcM&?*MI1>?*K-K6*>c_P(L4b8sP^DyXpFU>+yZ0aF^o1)NJ#|jafu$hpv#-A6 z>mPnm=OI*(rQAuVO!oY`UH$`3*N7 zJm%1;v#i~^oh55GFmu6T#6id zBkTp4{?4wr%6m<9G_e-kcstq==;=YAAj`vE{)d?<_7)b{nVYL=7nGX{LW6?va&yDc z-cGPGb2W3v3}@nl?*J8emxOiuG-US1%3np1eVu11hm!_@^DJXn**=X)!;I6sOr z^MZM>HI7q@eK|belY4uU_;9U~A09UG;cf%3u2=Em<_PXxD&Xk8C^m2OV%Z8SR;{z) zlLOwIJQv928)4je7|VmFsXTg`Bk;=L_=#B7t?kQHB{AVRL&l6YVoZY_9TWPqXj&4N z_q6calU>~1+`);?3J$iGu}Xk7BQs2$DyM#sKc#)$De!Qi)X$IMp+O7_52i9Ml8U$} zhD&J}lF3*>QCI6gW^)<|%G?7_dlbG4n#lpFBS-oN*_^n?A4+f6V=x!?@@x|m0l~Q7fTuXQ)01{xU7leYSp1Z1y>Ct zOBgHvyDTrAqUHl$+c= zcbOYUPjda>QSKf(&fWb-xv=qLPD(@a;_8h&+`g9=dk*k&*C)K$y_>hYw((4k!{ZHW zcrMs{zI7AtKH0_Jubt<=PjB$=mjbMB?!oVm`1XqvY}?XE?dT91n*F8NGFI@@Hra`e zsV=lnbEajo15FdHsTiZf(2*aJUD1u9)gMthPLrxmBN}E|)3VrsDeK)?xU(2|W&Q2S;I&7FS!G;;__RQ~cVZ}^uHZKn0ll6l+wlk4)htlPE<#6S6 z7UvaYeKLp*>pWSwNZvEof=M%tX`Nr&CItM+7d+q^xM4O^2~yE%c?8{=5MR-h%gTC{2qbC!lOV_}dA ztj;<9v`zD;X_7yq#(R-hYezzvfT>8IL3w%%%F-h$RaYuZ4I&~mh>O-IIo^z<1Tz&@ zv9ZPsjy8}ViVnfSngsRNA}BrE0y=%-5@cJHI}u}gGiY#cL6)Wz z4I*O&T2f-A})5!r0J41(sGX9eRG$2Ll0@jg??ZD)Qca zbTBiw5nu)4=_|;xcf!KXk-i~e)HF7;V8v?YEm^_3&D&H^EnB@_or_SJDo4T9t^1GF zxd@e^!oTh&{JQ|_pZxldfARN!{g+<_TFNltAOBT)u>Pas>RTy)`{%#;>c^i|Ts@Wd zp1OF2%{%w7P(E+w!X-4dwo_PCM)-gTyxn|ob#M`2S*b&4x=KL1k|FH}?XrhgVRxhlr&2Y38;0T7SP32fwS>bGLjjN3fo(>M`U405(Om%dyHZa6du;gSW z?=d#R-NqU}7iT%fR%#}9Ckt}|+}#LNda&GFa8+_(Sy|&I|HnsA)!)A_{rvpp^X$}k zD>DmIj4g~Yk?+Y+aAhvwva}ZT%C=Yt%q+~z)c|{A0iBXIL+MG=mG42jru&4ioD}vZS+B9Wq?fK9(hIqnXoC!Hn7hri{#Fk^rlD zND%d@?$jl?Fgo6uW!1Sdp57)@UBhFoH&m0brouFmh!wZ+@}oxDa-bzY86=us7i8EnB3Yx%jinRR1H&c z_1C1~L)6Emc^Raq#K^TdRfW^E>7C4(J6#?AyK?os3agc?mat{(diD8^i7hI$@(Xg* z-mV1;W~)Ph3a$$Bb4ZAfCTw7TK~ol$BTChw!6}nFSTKKhM7>96!d5^Jlp#z&d?kA7>98!$Q|_&!^Ubda-slB2|8Z@%|J50V9(UGZY~(N zyHF!gtm?33M5`4gV@$~#X-H;?0dW~R42bHDcVJH(J-cJ;-VG<8o_Gan;2W$#P(*K{ z#31Rz^vSC>ptxR-l16>HZQGT}CPCJ^ElDb{7O#qA!SZnCFCWO9WdoSCB!p=T0+}@1 zp9xd_7&p#8`5#I8WC+O8W3|W-7!DDH*K#*fjUMWo)^rcK)K9T-i{?gI%pW{-lE^``USPbTY3GP2|qzI?gUC z418ChVcFH3z34781<7zs`UVwwi>yqWrUEvd-0rPw>V z;pNv4CwDK*Y#s0n2%@^FnWgJCvS`H`b$T2nznFf+#ofUU2TLpY|NlR-+?An0Z=CHNuo1XfSeglN1y+J8J109lyxehg za==pXWi8M%HdM0A>-{xMsl2Z}L5v%HPZdS(dWTc6!=4 z>d9}^&Vo=kdj`5YFu=`$KnE*=oUHM8u)x(^4^L}j0$gn9>taiQKrAxIpRD*8hD#+k zv7wI1V@I<P@2@98+v3`padk^??@@zQgE=F_Z zS_0Q^q;uqOG}}K8WAplO)~||S=hiF^?yut1u?gHgzkru#SMtlvPxy5IS~ic$XW!Tg z)(EiL;{2G9+>eIAz6=j`7i76J)We-3`98}82vso=GzhR-1PxOKpbMHBS=K&*tqbOH z?Bi`5-?@{mD_04K<}-iBOr}ljWMa#BbqX0}c(8qZliF*g+$pF8vFD^GlbR5tE|p%Z zx~d8_7OSOcv^pit`W1^fxO1~Q%(rCLBo@z{q+)B+iuo*@-k}Z?DybPJjjLtt;#q1@ zdao2{DZN<=swOm!Qh}ueu`4lFqbrLk$V^a|%HI@JDUb6r;;9{8Kuvi*dGa_rGm(PA z9Gb?~t9J@2&|0u?wmLkh084pp^Okk&+PzH;Zm$?IjI8WbGBT5CXsA)qqy(@l!-GoC zl(JM1r3A8%QHEFN%~S*16>KT5Rqiy*$xc)Mrrc?mCdYZ+yjf~;Rs~j{?A@u}F{lg= zDpLzRdU#g_meRwuch63B{zhfE@%ojsoH==r6UrTv2X=Dt+%YL9IDh5{x36E|-K)oZ zF2H*7;3oHPU**Nqd%O``J$-zSJ9lnzOWkJbMZ3wPMqSV0P5bsBfL6whOaMQ}#Gx^(# zk9YBE+fJUYS;H-X>dkpGxGl%=>ipR}+PH>aE}h{&Pj2$(t6Ti<>-+H2J^ua6O%!@XZmddodqYa20q(_*7s{vXBE5HiYq^}^$KS-OtA$kOc8Q~kMhqnOB zKg>WjSgW2m2B@CWM*oTouxy1styTpTEq&fVq*;i zQN|1s;0=)NP`dR7392FmSqlE*&UQi0NtZOijvVW?D7vaU&TUUrJX|1uI&s`DD4Y z()ZMG^>7sz_LXpYdp5^6C9-{f0Lv!WvS^$YYbLmIY*`}rc31ImZzC@cck6wDupi@7+^LNbsSkFgB+QW^q0YqmSQ22ZjgN&k zzSgF=NyYDKX|7H(>|kSosi`6QCWhz<1hh?z(9qXMOJ6E*BLz_Ae?dxsr7yrTHMGFo z*ct;}Q+4=I32fKXH$hWNUqEIlxbns?C{$VkK3KcBkvb%gDf1SvV8tqS>^Yz!ONqbQ zvG)LnrPX!$#vK(^3bd}@d7!Qh7`!fJB#?}So}P^R9q=@2r9X(OpT2(ll@hO z2Mu(!(Cpmey zfrz6e-c}~^ypfvc%1Wm<_9pr`3cUPW9PxK?!pG5GoqMpa0L#PXSUPS)*B^l%ka_VaKiDJqQeylk4P${AZ(#{7vsGHq`}PNO#UE?EfT#Pj=;U;%E&wuTAI4y;Z!w z|1sa**~jm9PV(W&vY zq;OEuHz;=xD)Cp!^GdM$q_H*XU55&+lvpe!wn`cPQ$UrI98KxaG!42KLuE8)-G35GbnLZv*%1xIFF%2GSxg;%3XxYbMxlTP~Vf5nn2aa;cEQVu#y6` zS8CVJZEB2_f-Ge^A*ClviOEucr97wnU4fS}oTvcn)QN-gy>H|CRk`k5I>~tf)|pd> zIV-@rb>k9mUI}PkKIX>N^IW?kFnacYH?NcD}6T)cQuaCee-@7~G# z1y?t2@bKPUZeP2~6#>=N3un1?<)WbL5-%U#=j-?HdH?LWiYuijOSyAU>A_NBu#}!E zr6|2u3aXUnmB-4RgtyLJ;KISfQb+9PxKir)?7;#%(Z3V3|kg-*|L1H zGn;1lb6`mX7dB>aZ*MWr4p;K#RE_LsB@a%Pa^q+|XZEMbZ#;^{3*4DB*@E$tlw{;4 z)U=r~rpsP%p*#f1Er(wDXF(1zuKG}u}xZ$4v9J1 z3>Mc6jL{%qKo7ivy5Sno4HqdM{k!88(gTm+?)ZlHBvc7Y%GMz|SA*ywy@?&FLARBg zl3BbqmieoqnY}DRFcnDa3}0HN`Y>*aH)E%G(b(nAm`QHbwYgH$;>w6|PUP2FkulPk z^bxWl<;J8G>XDeHC4kW&ey}D|6T;hvTv{kKK=XP9VGe- z)}?QlAwB{uuMnx=6=Vspf+7qE8Eiywq#nV7tf&|Rl9G%`OEVxfRa=UN{P;ACH||ZLy)N z)r^idL)s_m)7GWWgsHl;PuG&7BP}!&rc7{_*ZMG{+L!6&KFk`{pE-pwbfqRs%QuhF z;d#_W=h0bE!LsIBfm9twH&%1%;}INKJ%o?v#j;ge>RYAEKTLX zt{UDS@8r7+3wd*J2Ip4PaA;vU`{tIge@-c9me;dqN(mD(eJKetA>ZGG9A8ti{4E$7 z>PpT)4}zVHa5L?Vzr6{*ww8Zo7Pqm%!@&+mTN}*GO)(H`D8SM*kcwGP0hU0-zyy6g zfs~F3M!Mz#lfQbeOpGkCwzLSJtVfQf;gpivK9jUJflX<%)jiD9pguoPhVIGNyTr$=9R3j#eX z@v%1`*u#o2Z##Ugq|somLzIsbgZ-Tt;BG~HkcS{eA6t2^tGPBIUe5G$cf#Mcg4%WUd?%>3?sVP+Y0`aV0!txI%McdfS+K?%T3Km<{;p5lBJb=PfD+w zxv>f6Cd$-BM)Fumc&b3r5FIVq4=n>J#%T03qGvZ9G(Xb9ptnAz0$_7ZZ7ei3agzPD z6HIzp8Vd}q@Uay@DM)m(#?!_`P^gQQ90vze*%xCyDf;-j*wf#~jX{CF3`vrTGBb&$ z@&XpLH?pjA9BZbIXJPXwrq`4(qejm4%49k!1~aZWnCdJK8ixAOQXImVoPN}&`Z6Zf zpPCpK3i}yQG0=!9dHtAKG>G;ze`XBr$BGdHSW@E6f_z6-l)16JK9Hkbf}{oMTwE?? zZ7O#^8OocBWxTpt!i6JoY}?$Ir3)RHJxwm;(=AviE$MY@z1g-cfL*(TIdCX~PxeK# z|6nRdj}=QLS;Ljfol-e21t_U!?L+*K>M;elht(vSW0izl-33{@LfsEpa4sO)%vA#)Vm9n=Z58`P+wWBB1^$j zaZWOs2@xbl29uQ-sitR8g4+wT64e}7qe_RW_)3inCpt2O%fM7g zW=>}2tSM?ftR+k5vvk=4H6BX=SWa%b+KZ*6WKeQijjkW{f51|P2UC)jtne{J3<_0y zw-zlFn2o7bd$NX?7vt;WuHI=_T$D$B-6-bIo26!nSJE&j@m5Ojl``d!(t~yN>P7V~ z!&NJns_7h*I|@%6Kggy{s|0}yxN_wbSFfGn%-N$HI<%Y9rw?;m>9G=AJ%9d4-hV;; zKguNBFJC<2-aP@20P6bnYwX;)gS~rqb4pUpR}G8&>k-cVJjp-aTvLY!`Ta5f``0}_d%st3oK3|j zZ^}nHQr%!j%@`YXj^K(qGfJurDIBFsVYN27mA%L;??&#(9+Wg_QrV(UZM&hs#)M}1 z{dLT-VDbVhrZ2H(#u6*0&NmZanJ{ynnV{K{c{40oI8_=#Gu_#-D43(GVg*=P+}vKo zlYN!EK3>POqocTgWH{Fk6>(}$CVRFFX7NH_rpk^TuO<;H*{6c!-7y6M{ zr5oN#8SoMA!5`rg+zlUbMo5Yt1C^n~bPZYdW>BU8YsrQrW-X6m(!5}SRe)g1o4R&a zMk%suaiDU%JtLITY)?g#E#+fu875_DodsFd0u(7(l_q4A7?LV_OG(itDNdXCC@lsH za>55nB@(JdK>yzO2KJDR>?K7m-168Xs5hSdweSto$3NUaHd+_&U~T+E1!RNt1V;u8 zh|nWEQkS@Rfn17-v{D3EDO$4LM_NgmWXR_wCx|kmw22hVMaX+1qU3#ovi{*3ghXh_ z`}+_br71<5*k~P+V~xm3v}9<44MSpV$PKq5JJf>gU~BS1?8pzYV~D>sx&BrZg;+Bp z+LoFmCmOTdX&&k>Ep2xuR(deGR>~MxI>*}6(QL_tR#VzKjA@%}Li;phCeARTbGk8I zT{g^^3ZIGIslnbZ%?XHvlk=GKm6Wk&_O7mwoP z<_3wCBe_07=H)+?DcRqmy^iJ9A7&dTujZe zH8jS`Sh?HG1VgE)we|GS)K`iD8oCDZ*a&@nQ}lFAFwm09T!5wYU@1ZErp8tn8knIg zpfWHt!_wLTYdaS;xLwcK9It-;35kj!I68(XsZg>Ci)o(Jr4HTAU$R0?&7e$?bK~wq zbv{C6c<}j~PkHh7Gc{d<(sQL0WvEa|KB@rgpMSzX{`Eip`1k(=S^wsTpa0HxKmLs$ zfBBu~uikU)u2saZsPhHG#vDC!fMwdYWwuJjy65(q_VvwLpmhpY<$qx4- zNgiiMcr!F6fIvrm0hl=ves&D#n7=2%Uao>P8vLzcdPSX*09kfroy z$#u@_W28tG!MFW54X-=)1deO+w{@^oaNpPQi1 z7H11%ydA7?vM|ERNCzKhTl`(@kWA%J}TY2p(T9;Piob zwyq9f;dDm(A&3S!4yFAA$Z~ff z%fpon`7V>4ok?(ZB1THItVg@M6YlCte+MT*T;1vK=83O^vz$*s6lP{Jvb2Oo!G;oR zrQ9JH9Ue+ZKR?2P`>ClJ6l^IyT1v_WCH`u`>>29ah0~?toZiu_f=P+1QjoQ4;|jH> zO6k4oXsT9wuau!eWk^tozfy`4+^!52D#L!t@Lxq?mKwmW#93v;4^o#25&hNR_L+h* zWmvFPpr*uT4GQumBE*l-U_YV?H%LQ{8qOFC<>~S;Xx(PT|t+UTy*EIt!jX~ zlJPw)J(2wU9O~p6SYKDA_EssVii#YFzn`a?ra=jIS9-E$&77j&Mo zHmA=X;?|u@T)lpt9lJIOxOZ{$_GKPFxyQZxx43ch3ilt};f3Jn!2@Xk-n`C}Cy%&x z?Xrri{rmUw?Ac?!{^kQuo<8LM{o8UL-Imw>HI%5VKYK3Mv=`5K|L!#(-o4}gwd-n- zyV5hI082?Wssz3(d9f5|?c4S-yEkoCfu%e?E9Lb5{ag@i-8y`fhsRHH_s}8EZQLXc z@I@S1xQHuj*6~vQ$BUgic`VR+F6GYRC0w67k887Lb8T`L7g`!Q)7-$ldDD5mb{P-W zFXP$XjeL9KIRE)R|LU zZ9x7=ZStx#88Wh$Agc!jwY{krt4HkwBSv?a(9~&4%T#kU(|XrjbEYq{WX56(rp-5H z+B{=sEi{#L!;IO}&6q#UlGStE*|{W?Bdg*#y)K@^8toZYYeU`$D>92MNF8cU zQjRI{nTA9u*$|?;6BOBluvjgrr?l}?I)wtIehlr2e^@suABkIf;2zQgS9#8Nu)HVD zfc|OP^h@qZU`h{y(|gfv){0m<<_}=ZWM699-Kn17&Ipm+uqHbSM_Wm;q(}~4af3C5 zf~x!)Q}X3N=GK{#U1uUoGjd0nkzHm;MxGuS+4`g<>k${NB`3Tl0|si*e}ESLA-(ZY z1Q^zbzTsM;KMg#B``{6xh2J0p0;5dmD@z~cvFuFW5DfyQ1c&xvP=uf;Ng$VDMpn9! zIwY7OxXM(5%96E7l!FizA)gnfBcEwNc!UALVOj(Z?5&QQMn)+;T>3-{+M);Qk~qki ztQgViUKCQo%@+8h^VEplhUQg0S7@n`Y;V3y5`X8BCf^~8b9 z8{dxwWBUuF2GLaD8Yu*clgM$LPrqhpAna_5`?%gWXX6H8kR z&8;!Fcf#Joi-9q546CjszoJrFL1Wpr`x92J-^`|ucd7AK3ape#pC3Jct)^)BE1UZ- zYJ8P)7vVQQ{-S1f|NB4xBe?pH+Jp7?KmHVK{ZH-1`u?Zi`10!?dHwDScJ4VqQ}YBG znp#v~B_wB%oR&paRyKWoJTX>=0tHe!8oklh=!1!YfJG^Kx|r!`VXE018=XGbYW2Wb zuQ&ayjigdE#L2iju9g}khr5tI$c4DRmVzo5@*_RT2(~9pz+RrzkLsK-qJ7K>b2lVe zAfFxOMO1(@@xlHihxHfaS`y;nM5rLk&&h`VQX%`hIN)h(jhCR;+reH)*XW$JuvOv3uhBUHTkJBoT=c-%fTFXTNA;aBN2kau)babE@%3> z*yH15i>F|#pSy!#(SZ<82hs-hqbfI^hN5H|3KFQxjiNRqinhWG+67syh4Iv8^=D+d zFSSDgsLb*uUvQN!$0jY%f#OI%D&vDF8t6t&U%40tJChsYNqT?_g#%p3^);s0Pmh_o z{wysDVntyf+iRjY*pk5I1w*;NrIfqd^SHe$i^s=?N((=a%LmgqxH+2j3j>)g7vHuq z#xytR(>C6aNu9<_mR9rJC7!I=63Zt?E4g@MIuD+$=hdq{eD(e$e|)&d|K306r%Ttl zG~oNAucSOl$dBW&PoYnpV->0_EagoSy5pE zil}h42dip$sUWPH*0BvNT`-#^^JlSj-AXpETBP<^DQH?gcM3ZGrNB!Gd{>4FmDegm zg`xg#__*2-)X$610fBP;4^s17De+fl&K_3-+m#`~dGlwgiAVomz`GLUuH?*0NsT8r zH(d>8S9-9Ncq#=>N{WWSe!lA6gbK1^V54o~zl~mEI~P?n=2^ zP{Edxw&CNo>jcLOrBN`0?d#V6-|(RllXd^{Wgf|UE^Obyw%N1UHe)&`RI8$>^pas_WfVU{#DXrEH`T#g&4q5jrZc z3aX{lXo?s5P*ktMh;e$e=oHM%XeT8F-#xaLp5+4sDbxj9r`BexLLdJ~!PP(=y!-dTS9Zcbum}DD-RUpukt(o^$w-&aPd6YV?Jr=Zr)ZEA*PFqS zy#!Z%7#OO_zyV6qQXTrq|Mm~=A)nogFxiH|g14xF`a}imlQ7VN%qS-^!<|X(XHT+^ zBk4YFa>4c`&pUu2ZUN+Z^ryVRwWE#az!R9TT58iQN-G|EY`IovSEA_n_I%!IZ-~V zF^EOO-B~=shxw(xG$&b88);1SU<=BlEGUtZKfsKf025L@4N3PBrMs9h&_ai37YpKj zoCtC-!^_414|^kQEi^GV?1imV+Sa;;0ty{<9y??Gzba{cL6w%C082?M8@F=e>_rt>N-vfIEG3J(G7q6L;qxE=`JWop{@0W_|KcBi{+D0> z{x5#~`FDQ#?VoBdmIADA1Y=4J)}yDdShsN-rNgTzDILM6ng%j+^2r%mNM>dxKJLyK z>1m)suso;|TQ)Klju^s+Tqait`i^>Q?)ubUl%gZq*!6a;hQG+?y zoXp87nVesc#pPA$T;H6^jh(677i8T%mdE+snerQqVs@)5b;GnM$m~u@K@X~Hdr{k{ zK}(kvvzLajaZd@Su1w?ZlTEyNdywxwzaYSRAi#RfKW{(f!t9l_hQ~8KC4m(csqAde zV_u~oE6s`8Bo|t81~5hTaY9xSjd8M{VFStY^CivIg)|R$Y0&kReG6tt=m5$R6B&`7 zMNwi3dGSf)#Ke;wn?P1VB1wZI<@z;%hyg()#Y8f^xIj(QP>`FcLMtvZ?61u2Qc)@? z9F+W6yS8mogWOjxn9ji+>(#(^1z5^mgi29THYmaCN;(E5(|h%>A^$g2SdbCV2*Hzr zECp1`kYI9jD3QUwMD&;MJ5aulp=oMRyV8r*F;-w!mZ$c1DLq&&avuA5I^yT&fwx>& zGqY0EV0R@3OX<0qK65ftr+2DxSW1TX1p+Q5A68v`6@^7ZC@LCCzB0FBS&`Z^6(1k1 z-XW*}DkVu^897i*D5?Oft#z!L@m)#ZpxkMw^mdJDsHLO5MMabnf2EX<<#=2;e@4xJ zrNmt+!-vY0I_0HBloaO+vP#Lz&z1(V0<1-B-nN>HS5C`y;|^DET;TexOWeM9gF8}g z-oDQ5yEl3I?6JV=YDlhPcv zqooWTDzR7!#ttg45p*3{y@n$zS8`#?7M>hB#G7Nsd9r&CcQiBMZn+t0h2nr5C1>YfQ}gu=>48tb?s)d?rWUUNDgCQ|Fa!QQJ$u}V}MUTkAT7*Vw(pQk<8S)XH17*V^weXXT@eJ3%Rq0*}>4{Icmi$cv zm%#3L^pnb~e|P#y@t5^rfmxKiK0aQbBoShAiVjJl#N_1O^0+54gFhm2P|}3d6F6GMXIkmsU9BWc=x5iFO)*> zffTrfG0ZcD>Yy~nMrF~OkWNQRI#Y&ZF}G|8%Nq*W&@qgyGl%iXh zT4>{7sfD$vCMG%>Sm@|tA(geM6cgocL8+K^bq&ze(nCv`drwyx9yCE$+elENk8W=R zjCIU06Ihv;T4HQsfu)rlPR{N)xq4tFs4|d>URxQeGPS_k*%cRWKiv8XXiLkOCM~qK zDbwkkK3mP;uHZ^ZII0A=|G&Ekl`QT`aJw=zsN5;|+duylZ2h4EOSyaSdu5J7DZl*o z4}KDaef|BNvk%U9DlwuS1N(TuFBRqq}wDlQ{`$WS%bOt)uuw0nMp zZl7N0YiY`|4+dIVay)bdAloPL89}}tuGj`ZO zsxy6P9MX@r;sMm8dQzL_MRQ&dWrOXhOme5ABAUu{U(yHIP!Qjjm;fidt#oDSOkAKl zG5uVL4fMp{QNgbA}*{ z!BR&LcPnc(F3eFr)8EyW!J&S{g$0lp)}O@iU;&jcp}uYe3VK5PT*!=<#zAogMd^bn zPKjh_jJ(FvhWvnl<8bDqo_w^(r(1%pPTT*BMA<8-I?<~(Nb7tz2BaMgQ{mf|!vtyCCMuT19Z`b1tFE#~#9GH&k4v{F^2w%Uwz}Js1^WE*6{C?*N*H><(IXZ!Pg+p0Xp2oHb`Rtfd zz_P|9+Vg{%R2a>~yadLl#W7YOR2?~pih%><_t~GJ>V(;W42u{zSBtgZZqR!$ViGIN9o;3OC&8mO3)QfdEpRsI-Ih?95NCIQ!*q&jl23- z&~kY14%RQ9&*GUA*|Ksj8<);z&BEy{mB;fYw=i3oq`9$*R%PDEx(Y@KvMTa3X&O0< z5e1nHO^H$Qq-1N44EAGSfF~*PnXzGkg!(JJTFKOpD3;e$&@y@?qe}CMAJiXTxwd#Z zSmNfS+=O8z4P=3Ma-153t_%&XS+k7F%2Jd>p%#?jjPmZTV_BSj2ul4-g3`&dgXsD}FCx2F6Ifh_Ygun;K}SuVzw5 zE9=*+;Kb2`96P+9eS3CtVBc<5uUyLHN$s>WHc|+PV>XHQ+#vhG(W$&!k@|z;lCd7!}n*nabqb97Z0U!lsn}WRuq-$ zQ7Da+BEi+L5qb=((x+GfR+R?j4Y~p=eJY!DsTr@w=m`dl?=+!vrX|zo*)VO6IWrcR zGk2*#YPmJD1XZ(^nK5UDnLIXU_F^+;&oyWMY&(|C@?hPpK=v$(n)w(camQCUH$gy(9U)wJd&k#{#a5wxzyNMbFSb-nWH>f*d5xohI z?n6XuA0lIwyjC9(m(+vgv|hx<3$7x%39z~mKA;B!1XBb0>k<}fMrg1FK>>yg2$c6p z862p`-~c82w4Jn!W#7E~$nXp#%Oi+l|44@U$57-EO_^r`HT{M#I;4=X5yKcCJB*2G zWy~!Z#mf3RwoYv1z{2qyU)RFvt>d|Nq?2bi=JEOS6@2^YI==sEGrxSdgMa?8pa1=M z9DXOoz!GiCh+ynRiiWo$)y zpj5tYax%G_3bbq(U}Hi*Gb8$0nBrq%fQOMTE(V%78T7%?xHnFwy>T$t!opZv4eB=1 z*F|4Ru&FHd1yQ$oUU@6G@f5GiaFV+uA648JEsSXn=JzC%Y`ggwk;YYsy=_gtL8{hu?3txW!qoC_o zo;-ia@e^m5GIb_xZ4;%1+r+rWX7ZFf2vaho(int`nTY_ZH^#ktq22u>0Y^`DS{x-V z%gWdQBUv}s?17zzGyshHknCkcjGZ3gR(%*A@5Y4kflRHAqO~ZHNoD<6)G(MCmBEb5 za$=0&x3k!v*;QeT$#kPG)s==UZ;GSr$Oy3{&fi4NNn^6YoGFO&Wk`$sy{p5lm^xzH9dCLGO>c)2mxgG`nkHC~&qhw8F*G z7H@ldLVN^%1AR%36!=62lN!~Z_;7zB`nwb8V?$prOKHfslbs|Dl$>}*4o#(8nJy(f zloFoOxB1EE8a?8duKBu?6g%T`diwY8KUHGk;_j zlkyVj$W39=&@A~)$51vPn7n>|q4GNFZ36l|+erXQ>e(WC`S^ zwJR}J#dynh3{ilWA_$C&WOzZ2Y{yW6RRL{+Jmobp1B38#w829f0d8_0ni%QJxn|D5 z&_Ln@Ov+GR$Ao6J=V|h!32YT$9X_y!39Vyk9y^+`jdk+7tW&208c|Zn;DI3|4~`@y zCW_eb0c0h`6C;h!#Hc~)^BD=TOi(aYH9~zH722QL5#`Jfa82!MXYR}?Y+kpDvnP*m z=EPxkZ{N(8jcb@StxFwd9WI}dD)0>s^dmVwnhyEg)vK1UblF@sZCb6Sl(>ECD%Y-E z;DP|_(xtOJd2*lk?_Toa#UnK{{Q2{z1s#`p`SJ<(AKl~aXYYCR{24Fayq0U!&%Ao~ zmPaq1@$~ge-hcIhcOSms{g(=|zT}&qf8vMVev^ja_k92T5B&Pm&%A&0mdApshj;Gs z=*~S}K7Pv03zxZi<~-*GR$ErCWz(`%Y?bB4r7JnQV>fqCpX27iqg>r{fG5Y#@bct2 z9_~HJ-H&(hWaloPY~9A;NfSBPGM=-YUF?%O>~MWOC&x8$WoidsY+uKZNB8o>$wU18 z#zlU*af!F*j`R8L3;g>2K7aoFjDP-epO;Vfuyu1i&Ep0#y55`74K9=p*Qa2Zrr=6f zkfljMSs%(q=}}g#Ls^X$Bgg1aGftN=ZAzoXkV(@`nK9p*Ig4$Wv&dYQf~iFo%wJ-~ zJb~7{<(AA@Wx<>^=1gBD?H#pB)Mcy_Rz+k3}z zW?MbG*Os$lK?ajM!)R_5V72v?#*#ZjORPvr7n8*46CR;Wn0RAAWFG=Td*b2u5zZdn zaB}O8y-Rm&96!R*O`znhiJPw`E!c)NI87w74506kC+y}_( zP}K>4^q>*6Cc-&!I9mFkrE;A5Al=Y+gmRBro;?zBs#>NL45^TUfK-wFqYP|BQYMn z#Jlz<$t8?brzo;rVkz=Tqr@wNLf14(J%&;nSjm_W`N0orq;+r;U8&<(P&$$IP1D#r zZ8oP?E#mslcy(q$p5M-e70OB?YpgE{n|qtPQO> z99uM!gL8+ms3wxmd_S5}U8s+Dp&{ObQIT#G^tB?xOMoT&JtWM9L?3H{t>i?|l@p*( z5B$xQDNgM1v$P~gD&h#KxKhLVk)IO5h`c1KOS33d<`I)tXQW)z{C&KzFg2lfkM8vD z*;CMEh^dho+M0UuoF>{D0xAO~4$G9@y>;l-yAQg?Qn6cGVPd5Ov@0=Nx>(p+;o;+r zr?0nQ%MCwi!6&Ar$cdjy@W4>gbEM_5XfdnSZBo-TD1q&F9zNyL_1kLi)xF2h)ybZf zI|dbCegDhfQ05{0E2H~A_~CB?tRH{kE4gTW^Xo5sEz2*z`=0OQqILiA6SnWz#l(qS zj2c!tOYm_}&7U;^Aca1cORYiMAhrGQry4Qhcb(3Z!AEKqJMUk_4snBQeNZh@-_-?sB{s{%0vCH1obBDPvvR`H z%nmCvYb?#p)w?8hbu`h?kngwmM*`(u0xd1HdkUs|3M~5=3$iTnw6UeHixPZqL8y;3 zwju(_6(l7J{00O#%W-rhT#iXpsJm2x-lRqcQJ6ZI%A8b2X2@@Aa4fm~2awjcFX_R) z6b$aq@U#d4)c}Ts%e5xdnZhV9s?rBArXZf$A+cnI`x5S9#~?3f+1I{A^!3Ki)dm|= zEj;W^2y(Smr*jIl)+0n2_O{U`$HRciU^6ykDI+bn@^#4le3qUB?J^Ot0qHl2-Q5ZemOODAqQOU{-lP z6XkWya;^`T-&BUT2g#l;r2Bc25g^CP-;+=QRh+LYIpIMh1bV3@($`tOXHSME4;JhU zl4C6m!R#b;>YJ+K95oZWvaV!lS8`b?rLriSAt{5^c&d%dX0dtMRMsu*VBwU}OlzrN zepdtSqf02yjip==Tw9V&V?_a@3bPq1Xv<58CPx4q6YM9~&meN-Ixt(XGHghuATLB+ zmgQwLK_E1`vP__rOH=g-s)iK`yrKy7a+TkTGRLJp=F*6BcCe;@KVJgnyNd`5R(qjF z*N&uJP&I$fbXF~2MAyVt!Ob|vD|ZgoR5H1}g_)B&7+G9Mz8tgk*uf+W3S&fJKFI>~ z0sg*(`uUI`{T{(FzL39YuUH!V-9_?i#^-7uyM@_ zI$Fm5-%SC^)Iv%en{r3y;svugc4*&U6F=|%m=nhjaq7eo9zDFn>sL>C{OB$(Up(g1 z_pf;O_J#WRvY_kko$I`j$M+sR;MMz2dH2N!?mm9Zo6kO%#@}x|ef64K_wV!hH{VDD z@khCa{r}gdpMREX*I&T;@~f};SsI6LU%%nTwd*{(cb~^n?kSntZ{6YA*$XPNj_*Cd zStUdJ#j8BMeuqyVJmvk}huk}Po*Rcw@KA7d>ytyASh1S_kEy?olj3X|Hc(*Q-QC^Y zSY?fMhh%KsI|;31)O>1Io9M^oB&F&Drx zpsKAdwVe#9m1n80-hlc>V|ouV69d|U;p40sJH?KPGaZ;Z$AM|{?3g;whH10ym?nTV zW1b^37CJJ0nIn^z+cJK!1rr3erZ043&MaRRObliH)Km^Fs^rYdPW-Z|57%}MNk+C|2L~7#`sEL=a zhCr4!u3p+WxN73+A;9Ekgjb*;?*7_%1js%iT6l(P;vJ@kSGX~r;fA<{>fsuuja!5U zZc$Bf75?xNzzWXMA+$h8g{&r(eZ48`>qSW)FA5t)dV7)I(~G?BG5~b*qNtnV$r9-z z7FS1ivN|}C)!C7pE{>G+auvYxqE%ma%6d3b(ZhvO*`C|Rh87h9IK?)kw6qq;vLv>J z6;YX1gk_i!k|vgRiXoxR^$1kLgdzXMJHrI3LLxNiq zkt$%7?bCu>uL4@S7nAE;LcVhur5>#)b8AhpV=F3L+tc2!H(f&p(GV@xVd4meWsYHL z>lu90u#lY-R&s3F7B24C!*AaoM!?WlvrDx5?M2X~&6(Fz;8;wXqGf4vwl=D z4Y|%Vv~;1ffL48mGXqNk8PPhDF|~<|s%=jHifB6L1kyUmgQ_G?I^{;ty)=mqc>*NK zL1ctDkrwPiPK+-(N~k*_gtGK_auXue{H-8YCpj-ye3X!(ol-niX})bMuxP2)CNozO z-z}6HWoBxVXk!z7wFQuYz8?D8252@>LWL&S8rk7wW{a!r>#Ou1^p*Q4+=uM=Fme(i zXddoIl)sxmssNa`JYzxb0s`R_ro~W@96?D+Bo%3Kv`&qus(CaO0ubc_XH^1NZ8Br1 zX%R_f^8kusJpSK=fbt|U7K)owlO0D!m>21xzQlUF5$J5E&S3O%G{;x*#(FpqCCCOfcLT^19)18=5=FOsxA?zKN!FOXaI4~lHBNO8}zbJf+=vc!o=lun#_nB{ICXd>SC8-H{`ntxD2nLKua`Ny zeG}aavILIhKb_EzoihisWpXbzjqApmp`G}qe;tb(+B3Ob6@yC)7*bYDzk-(3CC5`3 zE<26V_(;+NJt_(c#)pz4VD*2Enj5>crAJ+B+EwJM z>r+P$Qu_=lkfrnxRAvh*3F}HF_R#~oGihjd7Ec?@l;PbN+@lpE8f)oPl}&9)8Xd}I zS)57RqAXeoloce$P@I-PS&Qam%YT~|6E08{ORd1+xS<0CqFRxgnn(`;lPRNz(Ys3> zHD!hBc4eRZjAT+{BM9+v!^6oIUoTg@+?`Yj67ceu^NpA9tgb3$nv5}XXH8}1w2A6+ z{Fvd49x<3<0$!sA_h;0={&a6&OJR$2(qf}2&B1I?%{`{->HE0+s)rO zef|Qs{;Cek3Rt%U z#?A>;J(tfZA;UYr-QeEM+x&9&0(YX=iZOcO&`~ZP zI>Pw_-*I}|F3xP;#>K7MIKOT!2dB?q%aFnB89AIqZQHP1uHo@9Be}I~0bfrX;_dMR zyg0g7)YAptUp>#0Ge7eB*Gs&=d!6UEF7WR0C7wS#A=h!ZfN=|k4{1(A4*{62$}B?B zJ9Xx?6=~JZm{#qLXjgAehwkQd>S;=kzUK5BX3fxX_KcqF$hhf_Oc21DG~1pj3q(gRm5(Ow-r6|1X<_Tw3@2Az+{AEp_tVG4M+-M!O+12RpP*&}S^9W|8Q~sch)b|8 z&H`Sp;qqRTe2-GLA-NeLIl6=w>JeI`MU%2V9uzma3s|{R(A$HeKB9yhy#=xauzCo1 z_4K8rhge!&y~yqCN={cN3VOOx&`YGJ6D18URLJLB$@jEs@S?brShsB)$ZPFFZk0Rf z1iB~;1D5|9c|;3}#KaS6?c706P+DkZ5IEz-0o%rm04 z+E%O$dm6jDFr=?1V~6<2KoZ29@gd9@;mgDUo{X1c9^A>D`cgYur(04KYer^}F$o?< z#EG(rbMhg{5 z@xQ;G@b5qJ-oKCdmq+~b??e9n_dZ|$zRSn=zw++I6`tKb#hC-^SUP?HBRW*kPX^aP zMXAi~T+G@5HH@oFU}AM5yC!yE!^qZ5Y!}Cf$}kzM1L%K0vWv1C`@cFiZ4}^aXiW4p+pOKDIO1hH+ury9El0^ zCm}eP#K0h$hlY_B9Ybb(BDskvWW**A9S|Z4&J84Hn~ZZq(%vF1*`3ALOq-a_HZRGIEV;8 zKb);>)bp}d!i09tINCX?#YpY#6|u(3+FXS!ZEY=@HPKQFkm@%x#ze~$8$EM8EX6?b z5}=C>q9`Sbd{MyZ;ejLscoFUGil4nD&ZheK+F24Gk?AITx^q=dMV73D>#Kw)ie zG=m(G+A>&KDAMLZF>68~ko3ud65@cJPK!yTc@d2asPV?saWv(6E%a~3z464p&$&k)`zho>M1+M1yEMtE6N|rUYV_a1p(>u0d zcDK$9t0<*^aXteDMj8ros7p(tCRubX*YYwGBMj1jT4soRy;?JLU( z_H?I3LLA*Yv}5YH(JELS+P|AU+c&H7&EokC>EEc9x=^a&EA_*adSMD^mE`BBUajRz z=Ce-#OM$C7a%_hW?N_~23QRqGa7PUdet-A?hrZj#?OWG5_0usntY4)8IjOpwdHiT83J91U7$JZkV;KFhKym*W+B7gsO zo)6c~^Xiv#e7JL+FHdgs<@H_u{&chdt9u`EE{)z*ajp zPO4=7(3Xtt-JD)Bw$@ZRP+4I`VX-l3>6)suCOk|eM1#;kVU8dTf}$Jt9CM-NS$Je%R-ry(Lse=VG4Y;pA07SX~{zRyX%*Co6u zuJSwH&6^R{QlF?|10qXwX;RYHmEu0G6g0Y$-&?4l&KOiHMg2iEc`yz*Jm96H-$(X_=!>UbY^&S=y8p7}2IoD7eOf{sMaA1_v^0 zTqsMXMYCpZJlmF}uxnWgo94u@dP)RqCdIORbR4t#Ml!lXh??utCc&0$A2U*2+(~u{ zCB-qGH0KtiIu?*@Q$n(J1!;DzNwaOGO0s1oNhTE}o3tj~yaUOu6y%a0q-bJpR+{>O7vsgT;ACr5uV{F?3R`#jp z%;Mqf9p6RPOW8f4BU9>{(?36e0YxEn&+?^xk{e}VcH{?Gi&ZTDg($O{lu&B3W2w%L z5v3bJMnr(XftOhP?gThE;9+5fvxyn@0u`2edYFk)H4%s~(QPKcq=l7^4ptgk*l1{D zucMEht^s!X26)-n5#jAejDHZJo<4ZmI%02XfrWuF7WyXGn^<6JY>2Iuk{EA@u9gM{ zx*FJ6n&9qYkH5DY;lX}{i3EwVOp1@9uw@I1l|j3)2W!@@<*-;|+xH$|{*rIlx$mGF z8dPQvK795{EytiFxGQz96c3iNuE3R& z?$$;`d)g3WsYRNHF?|cd8ITvu_^L2g^-p8NkQDY!%w^w%EOw7gXa9sWc8-i?$A~EQ zk56X%h&XvKoqgjonN{b-$^r4L=oinLzRlS*FrBrH$t>>{$BJIf*)TYlZ6izBF{Tw8 zhL*9UF_(o6B8`PC?O)2;QEgc}wiELQwP$pY?czEJ&?}Vq5E1oWHxVyRH>@1*VZ-b>ki?INbo}90qQte%!O7URn zHPOYmsaR^pCTcZzB`jIeJWK#Uen*sYZcGTtfj&fexZ!VSgO`;BJ~mbaI?6Neu0W|R zi2-(GM!8Uy=1beWXw@@Snixu1VlY)HAr!~S*b?DPMWQ!V$-ZQV*pnV;M{cAid2s<` zN6Y=|rIZtKCdg6#14mo@9IWxM5%9A!Q>*8@*%;tvp-HHd9?iWC<+<0ReT)p&x$aDo zv0%A??17=Fd^akEBNMYYIkSl4vx+#iq=MbkQW@XLo*tzFWo1nSMobti2HSv!V1^D% zV)nE)?A$Pxi^n!__u?U5TtCg1d)K*iU;WkvK6AgLE<>6DpD=iCh1sNTI82FrgoxMMZ#1$Y{3%jw># zked7q26eAv-jq?yojg*2qK?KcHDX{6V%Z#llNsY!I(wpk)_4|9AFIlc-ks^)xecQS z)YG?fIlVg+(Ky{Z5$T7tQ z`H`EFNXP05H5{mTt=dt?s&d6Z{RS(we>62L@%74l1nF5j>RPR-0V!Rp} zRMxxHR8w7CNJ(}ksWDNiH%swcMFsc~73jy1KE3#6;XJmkU&D^g>s8scbt6mW&sKXl zD%I~5&y})NdORvU8^hmW{< z_YVQC=Y0P9w;0_2Cd;FBA_aAcax0{^${s%6dJj2Bw zPx0OM-5lPzm!D<*_luYL<@8y8I(UdfTeop^+jg!UIm*40C%Cd_CqFD*%z*)@73 z-wJ&GG;12qBc3ww}0dF!&{=y?(y~G1HQb!A$saO z7tgI`{n|#RPLk`@(TlovPPDGIp;fIVrLB#qs1cpr!Gv~QP3YWUPPhK%Gz<}=WsEh$ zCfPA^sy!p7*)d|e4I^jTFh<~N++qhNEOKJ}d}l__v1iD10kVmP^qXqHfa%5zQA#(= zbYl7pe-=!MWa-!hz8RUqnvq#-7*oKi5qZq)ox<>rQS@#bNL_{K!#oS}ge6*JXc8aa zgs{*igakJsG@vO#zRd^@(o);>1&135WEtQaU?`AffTODp*7nV?anMxP9o%)XbJf7! ztqG1^8aR1tU?;F;E6`=@B_glw{bb*uW;n_BJBK&HCs~uod_!VOjEOBbph;=P8&~owok?qHMSPk8&C~R0o~lh^QZr)Xny4qA(!42IEi@>T1M1k; zo-Va^vb3g0M+X|ax`@@}!GxhguTx^!xTraMSGVBE=3GwgEadF2LXNG==B>^%>dq$Gs5-*ZUx^p(o?Q%)9k%!+pk2uR5VlA?W zHq9c+IGZS=mc$yBkZ4>*mQ4o=9lKKIQBQ}c{tV2U$c!#aSwDUk2bcfI`8~gI>-0UI z-+ax-Cx7zi>wozA{(t=a1^)i)fBgCNU;g~(U;h4|$iH9t`tN6ft9ShM_Y1y!dLTFM z@4R|&nLjQa;q>0M99TY+P1A<6YFHO`i8cJon(_R+a-=BlE^HcJ!_3Yt7+f4qw@g1O zqwHzvXGTf56J4^SX%wrpt0>v>#1M5xT5_ldiNWspI#^(>uY;Yyj}q!}G&RS@NFYex z2y-1hfh%o{1%!`8x~#hxo2k9(Of|KzGc+R5&7HWAFe3c} zakaIR&x&Ghrh$<%)6U8S4+l$8=Od_*Nk6?Vo@0 z`pahlEhQi8CAS|uP%Xt{Cx2q`l4UgX?nCRUYHDlR{eQhz;e-c;VQprOxshBWb3050 zvh;L~(9+PwKu;et6M;+R*sTrlv(zENPK&A-2ZoeJFsdw+c^zZfJS2^c17bNizJ*9C zdjzZwjE!Z_@KCl5^y9#oXbw(DknJ%XoRrSiVeu^Q70lv#fA%T)K{JXuG@&J12MLG` zZ^7YdH*oT9@LT@6Wj3ZtS;=B+>VvQt5`Zh6z{OEES=Do#pC)g zXGAZi59!J1h7QadKZI!`1~RO7J!1y;p`l})Kvoh7;o-!EhT-q!jf=CB+8o-_+6)s5 zfhsctfh@)AXn~HF5&D{@m8bNe;0RVDh9eK7f|t-n2@NphHd^MbRFl`&m++ zAjUyP5XCWWY8|R}WwJVLMU5Rpc#?RIY7fVx|1Pq<*OmVh1!PD7P zz~4glH6z_e2K;bKdZ)QDUdDkX?PJ;AH-)2PTXK441t-J;J~_LB3(IQwZhAJ0`}i=T z)`a@vCiJc~X6yieCXGmB@|Y}^FYLhgyQgvX`xRU|wwWi_PVo8uH4d*^%z!q9Y?v~b zwc{IDJ*qP+hPG#R*CHm>WYaHGps8gt^~qxF2z<1SkD+Zs4Ao*-)HIK!gTP4V+;qC+ zwNOK-18OT7-`JI&N{NJC9a%7bsQiyZ8P%_zAq^c_GHX0*m&_EnQVN0gV(RDt%osPA zZ{|&A^U8UwTribc6NWOZPZxo#PK@Z+nSlb}b>(SvtbsnMYVEk3G;N(l`rVM4`=rK~G?SW5m?MSeEvay}_B z;UqvqlO13PZ-UN zsS}tpV~QHe8#S;WZOckj50(N~iq}fX!%{q13S23#72r}JO9>S!>&kM@l!+{xKZjjg zHge+VAvH|6ZsjtySEEvtwWwv5+7Md_6V9GCNv#{Ec(RnxpZxYaH&317 z{NCN1+_09Pw{GJ2l7$=;$ol!4#k|fNp(F=ri1sfnzNhI?0ORQ>_^}-HMU3tQj@OhS3WIxE9$nQXp%@JO_r)k}-9< zE&V53(m2tC#>qzXoo2$osg?|%;>d(a-g2*pGIvA_^9Ls~zi%RQ`!r{AH+gPaDY={O zw9j{ud)J<#OcSy*bVyItAVC--DpDjugUAqVLj1J|^w%aZSf9WUL;L~_@b)vrLyR&f zxBmdk(zY3PPTJTxYh&#sK;_&7dv}2;4+XFUtUR@`@e)0$0GFSDSFi>yVa;%lZbm?= z7SRPp#Fv?nP-#e$vH{MN^m7)-au#rM63B9-u+f>SsTrM*iSU6n}hmSMC@@}eTzm142{i=*5{Df!S+lwDeYGcle@jU*$S%yn@v z)5pe8SAa?nS8Ge0EiEzE6GbXuWvC?(p{XrPEsO-LObv9fG?e`fjIb4`vewqaLf$vi z5Yg7bLRVJ+Oa~LC2&b{JC^sv7lqSQTt~lCTW2~=@g@Km*u0CFNW(2v}sWaXJT&(f5 zF;N=>r$q;e1gjy4tb_=XBLXN$i>J1tkj7p;*|cpZ$4{OAe=Ty9G7ZOmI;-YjDIO~Y ztd#2QN~lm-D%;dV_m5wA|Me?x)i>`RE6u`PEmgD!wjKLNYOsx{G6FYO7o40Ou@$9fVqt`yu`XJ=+UN*aH*2bgR#O8EH4L#3QDECsU^CRt zgd}eZvLkHCiE72Z@AGl^QHC<}aUdP!stR{X`VNa&=U_W&ZMe z-fs5T$^T<(Yl5@0Iqr_e_&FL7?;(S%j|r^=>Kcy zr9hTk)A5KDxDYEv3ZG#MzMHAcp`=_BKqKP8WiE4p&xyBS)3YZ&$76 zS}95IB{>-?a4Fua#E4*ZnHm>KRA_*jgOx1%C=wYKNQUgE_RI;@qosJTu3o*wx%1~(xoQo2 z_U@PUi(L8T8n*R^XJdG zfB!xo-+$nZ5>9;ZkbAdotFsEv{&7ccYh#vT5+_B%Iz{)Gp>-QvOTx48HF4W9k+ z2hZ-_<=)k+T>t3=mk#dd!p?1+UcZLj(9sd6OkbnNVC&2eBpWX{>URlA?MK$!S_oKYhgqlv) zwCya$QLPSbI_uH3(S%+Dj2JY=oDq|)86ki)a)u?NW}6FGSuke472_7$F?N|FLl@aG zaIPKwr`ynXk~MwCThV*G7)2Az={4Sjo+5oFTF`%tErW(SF?@g*5fXhw5%3TvX4=t=kEZqgf+?!(U z-4y$PrnrV{;1#DqV5&AT1;!+lnUbggR!LubO8Yrd($`TSOO}0Pkn86{d4E>{FB$Y2 z1g?77Q_$NIA%cba9cJ-Iq~Af*3W}pV5Q;m^dPUnG=LmCxtO-M}SK| zi8zaVnp>8VW>-m}Z%0~3_oi#+aItphuwdwBHqH5-{j1ONi^(X8H+W83WFWVb)T$^Ot3OE z##~nq6D@6wWq$>#O!eh`fi*LMHB&`Q^l)^v#MjRaA5ngu9`*zZR5^;Gb~DrcZ)TgV z5eb1xE{-=bK2F5>Ig=S3NOj8;I*9e$re!LXnF%u3=gC9cg}wuauxa~lwM~xFE=LIw zDh-|$z*545iudaO!i4|LCj1Y&p1yu7U?moxSb>VDfOY=?m&J&U)Eec88kG<%nfW#_`t99ch$<69T8dBHdVtO3j)KY%rJ#iCcj zMm;)G(K3TfQS9k)(V_?=i3kZMFu)IY4`*y0EU~aLL&rc5U4d&&4Sl%=1{i1H_?TM~>R^w*jX4pn z_C(A5k>Kkr`}mOI??$SZ9R-nI0$CxnYVJ#EtQ%!c85E-Gaqy)PW z@9jj4i+qoZBY_S!Lp6oI0^`;Y7ZfKVHCUC@U6BWa*qSGFJ9y*2F;qSA!Wlq&vgd>l^#x9-IM0h3fy|HsAW=|f^(z&x(y=)PO1d0ys-KpkQDb29MgZ#+J zNTI%CJJmzBZ|7E)E|{avB2)luvXc8XeG*$Xu2I|Xe6xHJdv3!sRPhxq3~EZ-J|)&t7on?mZqp ze9W`wFM0grnF?0-l<=QG)RSk=`0()~??1ff?Yp;p`uIuB$NKc)Bkx|l;kiKBmv^$R zG`WB9n8!Eoa{KZ%9{zrtCwKnf<)cTc_v*&am$-fDXYT!cftx=b(;oY}CJpH?pA zm(A<=ZSz`g$oV|qzmt~-w)6D+9Xvg?pI4WE;N#8neEj1V-rc*-+lK;JFYoftpO5+9 zKM(oy%Qe1yI?wOd*RpI$t^8j>lvf#3QENb(PKLDYqDx(OZMyfVXUT)8zg?99rZBL(RvYcc`!)R-|jWDC@aAWF6nb1uj zt@|hox($+LZ+U*ZInuAA8~xjQ&{*Y8_ab45Oj|0FEXa>FCMVjE>;yw{lMTphu1iXM zGvcEJu)>-W9ik!kr#3-e+64HBZt~N^Q{c+OTTejC5Dyb%LtryJA~o?((jlaU9x?fbBq&8ps|2u$ z``A+2*N)OY|4CUNp+NyIMN0cRP}o}rL4l^iMuDsbXYzY_s51@ox{HFZm-l-(P~PA~ zg}_y1cPm==w4iN+743Uj(NQi+kAV&h80pQZ@!?Dwm&l~CDU2VL$k^dAOdcE0oT;fS zn3=|c8O@n9C6>99BgIOKWy$Pl7ETXi(admG%#UI7Hz^$2S|lJ-#`%L)+&R^W$LG8A z^juF~obAKoANz3UKp(Dd?#t=rjqDlUi3Obt7?PJl>ySt?90LWG{0TGiBS6m&AH6`_ zwEb{s=8l7wJ5G9DI4dHs<)Z12hfX;D`U!-aDP^lsQ%QqoXn_> ztC%-*FDs`UW9y>xe7o*fj_h44KWyOoR;7>BVD?Vw%Df(VG-L--6=6?N zkPUePR;eBq#5frfiI#?KK2|(xwEC?j&|5rfM z%)k(1J$($d1+WC3^tD7}sWdn?R>FV+S7xFhm32b_D?|B=iNJ=jK$Vf9jx6Q72&E|3m|X=-wI zeffV$+8xVs=+nI`gNKb|^7PqivC)m&cBwN6l}hdxu3YEF-3Mwx(T{)qt-_W9SxUDPatnu8S;5sjOpD_*XixY|3* z`6^*CGc3(4G1Spl6ZI9Zo{fnCL2mXWhq#gAXH9cAEvnFvWAUf>{r<f;`SIE#c(C zd=5=-!O__{Tw2qHYnwW9VNEUP*4A-lYY(n&Z{+8#jr_Q}o`Z|pb7E~zuIw4c)&1l7 zdG}OKZk)=YmE$?KembYOFJ#ApQLLRYh&?N&^4|ei-nIrqqze^h$>Z+(N$Rs~C zK@GbkL`D+m>x-wm;>ohbNzT>G$^u;jV>Gpl1h5RzYi5Xvz?OMa8DzA?LbK8*(Z_(i zI8$0@J5rtHM!S}Q^lmN6wC)d==*pwg}I{_nC;yv8S3=x=$ z51}a1m;7*nk63S7CHYdD9wbW{Tf${5N^qw(JB&6h!qsH=EF}~va31UKAmT)*iye`o zsKfld1niyg^l}k##=%bRMI&uo<^Bw@)+f?Zn-p6;S_O)M7wtgT7%L_e2eY=JCA)@{ z^6jWn_6^VG+i@9uJ1K>=L&TsHOL?jMhbtyEXU(jZY+h2sfeiyVbzm01o>@D+E5nG z7|w#}!oiKBI1GUZu`(Kn)Wr zqU2#I^{~lWE0Cr@R)|~+KW{g+989@D zTIY^!nJ{*Q8Y&z$cz`;KQ1M!YdU+7)?Md^HxQpz^$-?dHkVkzy1l&bhsCyY^l`@3)V zFniWih6(JgUGt4vs$uV*?aZAsU4^dSu3zEl(+BDd!);qP{5PZU_p2N}e1J`xHge&j z;>o(m)@?gf57wp2N^|Jn1)e_hOdv}EDrKnv*8_nq<@HMes1Kh$@m!3;x9{Ka=hwe@ z{q_x?WZOS~|HC6O6mS221OA2A0%bR@T;rLHg|8kwQN34>Zru^^dd9maPkAK!-8^@m zTc=O)+Yd+he&uqGE?>fP1q9`Ie-_ZXBiHBps-@iDww9Mick}A(cf1zBdUO30uYWtw ztJ_z2_vj{H-rVQ!FOT@^%U!vLXSADwoF{bZO z6NZfypqgsNl-XuXn{Udrg(gg1XvBm?#sXNzj1n2S$dn-qEa)@Kik?$#=ruv$YK#Nj zhg#FIp9!^n3}`>lh>ktaf$_SST+wx?5xos2owRHd0wlwd?&j3Jp3 z`lN;Fk{+!m_lXgi$@&6V8pKBmT!l3wLMa&PD{$qdL!iF_{sBgK`zhf;BYXo)@bOo4 znGsIzdN{fYU@752fvf)?U}<41V&T#ZQ|BfESxvC^Y>J(ql7H0<&uC2olXVHt)+esW zfTVH*(poDa!oD_?_YrBdp{)0R%L;+6as|8^ttswfO_3}M1(pgL?8)z`K$eo5hV#A6;Rxin9?XpbPEK6tkym%JRj#A|t`JS!oa@n_~ki$DlICD^x z(Xme4I@yyu$9i-9+Xk-f>d()c26JNBV786z#xxn2lol05-jO6)1`}ZzM4(V)B{6N0^_A9gyvBHq|)dkJj$;;b2rn^q(qTJiYnWf5dhK&W9UaW-v9ap^*iZy#F4 zjiY zV74I3$C?BeQ=%PBh;T9!kg+7j+n#VwTYMaphRu4o3t%}~8DeIvC7>kKZLW`nslbr{ zmZ_mAOJhUTTVIXepJ;1*r6mFjN4{*aTyxxTvv_tQ%r#W+YH$Ag^U# zBMtNn6j!P~p1yXt2<%zf>f-HdErWFcWl6zg1-p{qVMVNq8QEbTR3?X!7v(MMqHq!d z)gqGv+7>dgp*_?4ccy>)GP-xDVaA+!%$&cN4O@4zb=N+%n)}JK7gfNzD%P4(lHrXE zR{xjiuFN8Q`{_^K|M`_?N>b>@4=QB+a^nWS{Pw$Ae`f2>U5pqthWc(j$jxqvua_@w zPOi8*xL{{#E6{0(g|V>!sfh|!raIc#3Q#)AH4Al9;Lw#8{ubo;>(e9KjS(f@jH~cq zM7}i(>%7?|gU!~SK5Xgf#j!CloER6ww}bsSG~AzaGZMJIB#Z0cwB+2}G!9LO=E&4! zPA|;k@~TSCE-&N6;u20TtKjFgwOrZUg-e?{b76gFe%{=J-}VgT#)0Adykj88*Yr?} z>Rmf9hM)J0=HjkNTs^Rmi#z9WYRe3MJ-m{$yB2X^#YDbaGo2GVmviR8de$hOj!})w zpD={6{kzevO&P88b4iYhRukX7-CS@Hz;g9;!NJ7|T>&EvZ6h=_jM3K=(AE;TZmNNm zMpJw&w2AjNq$t^%4ssnjl?DnVM=+pWI>S2T)3_&z_QOi&t3M2if6xgmz@S`HmlbRGiYEu2=^KN35deJsJRBfA6oERWL=SP;@ zKS};>#CkdsjZEZldhb2{^c65q& zrcZ_s)2kC%*Gu4bOlwX|X~nV0`J9@Y$G-9LY#JTL=1FmEo14k5C8g|N(~%#x4d(2@ znf&_WDqj8e9Z$p%`fk-!w$B~O&bh-?z*;|{8{_IS>6-3OyErcjy=}?&aG<59GZ}7< zq>Cgw*^=aJN0tIy5rI^v#?!H72KA*aDHPC24fUqBAcN7pJ2Sjj2S)Yn!~}t;KHaJq z+}M^$qk1xJY$H=erj2VOPPfnDeDSgwMb1;bFdWelp+dbDL|EznW`qeyExjY3Ghm& zuuZEnrcN5m)QMx6K4GlD(mWNMf<4@D5$KN(4OT;hO2|-|J*d2HS5d}j0kQEz2QzW_ zQ1))##L@kG*(`81Vf1i1x38fvH&gXsDWO576ocZ`I&YALr76eH>f8f-~#Z@c8IqPAy)*xyAFjv33>b=T7JG_6@v0ae(JP?c>R*13Wl= zkU!3R&x6Z9@$&99zP!4}pYQ(=z`Daf|J>o@=kq*zypw(VhtR)YIGsA%(YAv=?YrvG zy-}Z`qfD4G(}o309a*~4fu*Z#S-8@YIo}9yEmgc&Mhu&y&%haaG)^_7eu4>|M_bT! zm=)cI*wVR=C2hJHs*R$ny6aQb%Ycd=`jmFirL@|Bs!B6j6I>@6oDQr>7uiJWwCUkd>(Sqgj=H`-Gu zpjFgMtjwMcl=X6?yoVj-U2SMxFIHk74~C2hXZqY0tXx~c-hEv;d2*1D{zR@^oWrj_ zFXHOO1zb8alZ&S&bMC}AP97b?0Q^7$ztL~|aA0R=c5bR+=jPUISzF3C3)7iBC6Z|q zLzzA;hWQJcvusHktCnT3eO*4^?`+MFd+Ru{qa(*QcIEiGMvkr=#NOEhSvs%_Lo17@ zOGqKpDV!*yAcAy)@X-=@(g?;yD-1`iP;9jVu+{a$TFX-uhd~lL|Tr?wbZW@iJ zb}GJl+4$?{6KYyPq*YrI9lMg{JAm@2@zf^Irc3S;`jo9=aLqEmdv zi>-y83RsqAy4c&8U~M5#rLQ4irG<%sE{3{V=nGuwYG|O-Oj96BDMz4>zCf0qo`9Ap zSrb!J5m^dcnVTD7CQ3{3T$z|@%eqo}K@(>uLqbBFhzNHjTqz9^=`Knml#T*WIYCaO zdCNT$?L|d=fGE!}x)mnVSl)tOC20()DPZovuIyPjku}psh(+3&Dbr^$OQ}-4cB2|5 zoV{QPd%iuamTFK6jb6KPM@@KFlG_!yQd;M{lmY7fpMUXCspKw}pOS<1+g%yle*Kl> zr%tJW)o;Kcx_9qMVtf+rF77zlIbm;QhqclKS>FI_0V+FFLrgTAVX3E$tDLKwnGSKH zB+HWgC|`!4%I3oSEPj~QoKy2#a%y1#XO@-;U{!Krkpfv2oLy1Fxm9(XTG5Ulm$l>A zvO0bcIli)mLZ(nFoVLtAn*q!lkav;#lQ>;{Pb;`cC zoR5c@0fE-WL^&$QVI1&dw=d-pU-1un;&#&#|>w^=#{p|=RHqT?z^nq-i-k)tV`?G1Pz)$x= z>a&8VYwl0$NN;kz97(b>C&A8w<^orVPBtoFwUFb^4)UfXHjIko7~1D&(6J<&vb0#* z31GF!OQWHE1yhDMFndyeCXenZFx7*J0#w8LwPR54HcS}Njq$_l8Pr%qX91{AB3;|% z(W7G#-RcTx->QYcR2U^K!f9LFoNhHOsmO_?C_S9g%xH?!qgBAlhz=yLc@&k|DYQ(C zQWM?#bZX1M9$nP342pLuQovR5Wcj+-6YS;6P=SznQzy{SxxETsK?3KV4%RqXny5A7 zq7|=^K$Zed$_&AAqleMAcXxI6ppuiN%qUbsh05$=W%+-ArFfmzEL+UR70c<>v5s&* zAAID$RYHSGo|O_FRNzYSXeqM?2lnjF+{qJJy=Vb@HgDwE!Tl_pHC+t_DsZJFwkyrB z6;GBD22>K?uUt68?cc6)>D(!GcHzV^BbYaHsu~_tik~Wtqo+@s$kHYA)iMl9h){v4 zg9rDjvj>&@E5&=I&jhfZ2xz?&z~0+YQO1QG{JLj1SGR8E@T?i^8#jjY3+Hi3 z;Pt!!<(<{bc(8LFxA(5+w|(omvS&R%@7uz!#}4rD+Bx1oy~ST2?(xrGfAII;vi|il z_aE%wn-z7mX=6`mg&yrY8_>|toFOBu89CaF36qRju*jB;n?2dQ&6CxeomsNho|(%n z89U#U!PE5VJwb>1F}l_CSfV3)O6|QpYjA_zJ;Hq^W5do}L4HiO;=CTwKxT7;1W#5i^ zHm#|a2c-p*#zZn|WGKT%hcj$k1S7{sFm^&DGiD^Pd`S*#mK3mRZV~I|wqo7v4y>M1 zFJRM!;k8w?PtTwrI8Fw*5W)-s@YnXoL(?CJroPxU^~YAj4+{-X%(dJxZRUtsQ+up6 z+;Gwkz)dF%H_Zs#bz<<)OT^1SK+7q%??AFkchWujkn2B)l86yh#f_#q zaWu6lW9gVVj;=YQ8Bj5aSq;nBGHEwwH=N58%-f$&^5BPE zoL@7GjU&1;t~#A=S^fegwxs*Z4I`AD9v}m>k5Yx*f&?WO#lwkES9>*)Jiy%+U)k>F zp!5>dz|Kl&+d@|bEOQfWY%C2`FP5p1fRV9)k&!M&x|$ejYoafJr6=1yxje>;ykIA4l7r}9(Sos^N}1NP4O4qmGqbUl{R_wQ{hImA7AtY?{6#EY zv4+LptX8YJE1;#U|0V;6l5?em43z?;|Nk=zl}heP9jsfAl>S~%)kJq?7UAXJu5;*z zA6UJ913eoW>C>klQIRpYI=NzHW{J6x8J0>TYXbvpyxiB}KbMrDeF*Achlao0)zkn0-i#Rr~h~slhI5ah%BeP04v#gd=OWJX8Mg_Yj z7O`VeDI3NVvSMg1t49^FVL}yKrq!}@R!0sk>BEm}hjC`}c+PH}%!&0AxVUpR7kAI+ z+|K#@a!9PyeakttVHQWXF5=+E1#DhCnH4ieGHGBp8fvPjX_+s|R4LLJg_o-nj*5bG zbH>Wv2^~XonrRuKrD=%1rXKoDn_}2h1{Iwq_}c2wJXoMV#haSEPl?v;dTc3DLItB#RM{E6O=l;3vy3kaQnk5*1I5rz5#Ro+@CqN)X6O4HU=#bl4rmLS4&I0Z0!hgaU;^( zn^1Q*ysgdD=G-18dIZ@R66t72b9ZBMyv^y5AfVhbl!fh6I54b)6O&4~vZ#U+Gt&8P zN<81qO6KsQ91bln=J2W-PHyeTZ$HfC{f)i+@6ic9{C<>6a(!2f>BhQ=J=rq7539#@ zVac#shPTb2Yi2m@n#-|=`jFycLxQ~}@%C0k$uUJ*nG)}8BiF!*>`-3{Vne9TYEI{} zTsjnI(XDkM9m;d*SzE>!F+e5`@6N=L-36%X1*p0TVAay6zKni7Dj7Mjjxj?z2~g#e zof1G+vcI}sTbV|e+FV-YiJ_C|CqNZJhl&i!vtwvinnrz_Ji4^bQ-7nQ($c6jmp&cZ zP~W}A z=_TM4=;ujQc@YEqHK^e~rJ!ifZk^OHp;DqnDLSfz3B3i#lwN`gh%K5oOUC~#EEXV8 zlH3)TiU|r(mr7V$so<^vw6dMD_zaA zQVNbLaCJk*+*`k1;pekIsWq}@2-Hm+JCar3ELI^tFqn-zkJQdmv8v|>Mb9h zzvAuV=e)W9h!;0*^Z1wRy!`D3Z?65$<1-g{_|s|beg8c_EnUjiVMD~Yo5Bf^qoV~h z$Bz*Ma6Fe5&*IYB1zgy$nDd*Li@~;@Ye)9+=&AtLlbd{bdzZie{DZGwZ}8>I&s@5^ zj44wKX=n(hTTd6db~mR_KTC#>ux0W@2jwr%la`wm|=ZuR7wb*{`_YR~9d z7WA87M7NQ;bQq#XO<#T5^fIKjhZ*hbEvasAMoBAu@+!2*t<)sDl@={3HApRKN@~6) znK=ezWtftYWJp4U2H^os2=@6;LVTNw9?~EtT$jXHeSv#z5@NOG9@8c&T!)|lZG1en z@o>|^%}ql9OT!{);0Olh5I4Ls*U7BIzEaRP1Q|tt|?3E&<0%ta!8d!S? zbjfcz1T@7ZR0EHwW&|W@5Sb<8Vu>M{ZA{5+XG~!y0jw&4rPjSg8bo@U(W-|DZF-wi zBOujUfvQGR%KDg5IY3?ywx(i$nE;mwr41$&cQchQF{8YTE$s!a`V38A%JeceZs^NT z$7XZm%6gvM+QX}R-|_b0G2T7>iI1<&@#)oBxlvB??)fnx^6z=}a4!$Q+UDXDbQeQOzJq7P2;w4YV~n$>^aA)pRV!eFE@B`;slpAuI0dt z@q9C&3u9Vm(^&?|!Vn8me07L%Z%TwqQzG5;i1idp+{2a#fvO;9u_TpT6oD;2H)~u( znb}%4!``MDj`jizc81tk8DMFqBXFfFkfo2EfRWO#)gKN8VZ1zo8V+^j+2!UZZ^j9+qQ%&ZBzX0X&&lA^AKlJf*mQ1 z^`=W!gjn&Bj1Y@@T!$k1m8R08Wh@=jL+O?s&5%}^%xq}GDzU~_jqJgeDFZpVaS^-M zuVB-bZLHg}gQY9is@2?&o;c06-TT#Y4N4A{;=xipS&wCqQ9M}hK7Zxyr!Q)f`@@$n zdGPWDkKerEz6@ADU%SSM(`Q(_ag&;V)qlVsnhQ|4IJ#n}Yao!Rga%EqmveJ8(ZSxJ z84dGSfQ7GN(r>%LW#(c2pHxC)M%&;{Kf8Fp6^mSXcJU zQsw8}bNK!6H{3n3k$XRF6)ScV=l8GR^xhR5+%Q*#tdslJvT5OD#x``Le{Bca6_khq zP9o6T16NlEY#kj`z%sRVL`PpBtC>DpO|{Tb0IOLOtn`}TZXx%juM-7HVU(puP?H-i z-<8Su9z~4qoK1%;U(!5u1tyxvbD^&`#m))zrdXNH8!SrN(}nCnFM+Ed@`K&U^bx}? zTDi_qbjk{%G)C^DI2WpteW(yAPVlBOO{r8LN`}CCbbuHY&St6?E5^s2U>8S$K3jsF zT;$pCB+Ap9SYKa41R5QT^{_J#3s#`v*_gG$B+Qi>?kMQo=K`!rI!um%?fK;yzj2m39dae|>8rHv-p?%vjdPqlx_isnfPDRwW&!cbm z3i|dC5Xz6GWqL4O+qD!+KY});&8e@+Rcm0?mL$`oHjj>#nQF1o!9ChAyrE7F84vE> zp5Aq>7~Q8klZW&b(5j|$RS|u=c2JYsm05#IsRm_6p;8B{rmRQ}6)JgHN^Pu`X-O(z zc{z&FC2-|rWrD4_k?NgN!hzko)Ty%rS1enomSPw@pi!MUsJz!aF@_+2Z_=8_tF3aD zFPg`?ZetN39P4Q~2U9yP7yLYl~>0%booK9c zue#j2WxblnuE?g1Yt-3=O2}~LjL9sQ^HGxDl`!H(`L2f#AMyS7M>%-#5GPNb<|hH1 zUw{2w4HYV$Ed{Rr_~X91uE5o;TXzMZp7LJlMDhAHcLl2cc=$jK75*y5;cqv7=iNJj zF9EFg0#~2jzvr({pZQA+$}ex<^X1KZzP|m)muIhdDdXacUj?pyyUEk@mw0#iSN@XU zy|!Z~J4TIW_s9_(9xpnkLml6=ZOhhfUD?;SffG~4aB9(1&aa-&m949}{_RfgpB7zx zOZ3x=Klt?eHeUs>{`bGTynK0_{RhS{eP*dZRu02Pgfn2EJ41#zGj@~LVo7gB_ z;uOG&(;+@isot(jpa7Pqr@)n~CN2U`F0RdRRRBw%%ELzhOT^tr8#fUbFU|iT%T3_Q zRRc$NZ5%yCrz#@*+sg0Q`8QR|Gq^`+;vK6&aEc}|xw@p38IoONN`9R&C0zxu$`zwI}Hf8xih ze0q0@S5J>~_vS9HTwKYIKTK!y`bMTtDPq{5L>l`=QP(?2z|W7`Mn9^%yV0t%Bb^(3 z=-v=S*RH|z>=H}YngrV9$B`QwDg&tx0aot#7&+r-AP>Ke4Q^U?IBM8o)yx`e9Xo6c z9I!O7!b0B|3q1oYboIqTH^M>30%sk2T(zBW(R0Gt&>0sacU%oUaWxR=G781ZSgd}7 z7(8@h@X=2s*ffe7kbGWyMe)?n(<^GZMV?33t;Z*jbZM7Xu>PEk&u=;4hYYkh>!x z-Yx`q*x~7Hj{D}gIp19@$Vos}WxhDzNlO@S&6 z3^X***HA!9fh;{tlhll=h+s>&04bl)=7VNg~5rXE3Q#A!B9GA6l8pu&OjwMpg@CiDK9=wt?>! zkLAp^rJOjplihpwvvt>gw(s7jh6YcZz4-rT5-K@Ze>{4o79PF#=s8bc{a0*M$;Ep1 z`W-Jn$hy*A;>lBfzjt3g_p926aOK){j2Jyu27^Hq7Zu~;;EcYO0HyrjRz}8nic;{j zmg{BM3{P`SLL3Z;^DrmP*P5yXFS=$2(IeZ3v27xlToc6LJS&Ffn=m5JfJtQ*tnA{$ zzQOVAQj*>KhH_wNG$*H|adBP_=VoVeW>yOUtW18KoyFyqt@&kbdoHYK%l?VE>>ZuU zuHiYX>JiWEnm}gA_s{Pf&Eon*=66YCdb?PrwT)qR=M)z8%3;NT64nfF#g_4P?3&So zJ#!j3zH$U7*Nx-YnsM9^z#@z8*|&mId%t1-nwcz{ zFr4wddeOPKlorv^1Pfq!xjSQKVTP`eDJB9~+WHnWRmwFeHNlip9vV%>7-@>N+&k`; zW<<$nvZ6!iT$0A5#@5UkQp5DVC5-Rdg0?9xqx?HTv5!ClRXv#9w}O=dSZgM9VXgqyg!*zCO4F%LkD)NkmrQw9(>=RFeHY_ z$o}maKeRKwyOdH{7_Ul0ml8ERs6dty8Z47#o6;19H!7{LTB}gi(4m;lt#YYqnMkXG zWI9%~l-GsSSC>%Vri}KbE$L96Pw)2C^zYi9QG!;6o`~0Oq);s&yKoe7yMwAAY^bU)OK(@#ia?UcQp`{RXnOS8tYg=*Z;!JZ2RYvaWqQws-5o zz9D@$GGjDnSI*_PJsWv&;t;Q|Uf{3Ccj3cx{`&Zcw{NcV>C<&y2w?5qKU(yBA$|IX ziQaW$KtFdTjt^wZ(gwoi-&knv}HCqNH4x{5(xsWHuqKMH7;<6sT%KoJh26i%HicQK_SuVoYL!5wWql zghwLU4d4kzopCX{&G*AFof809KH{7G45Wt}X&kPEB!kX{v?@ z-95DhvIMRaQ6NjH>i++QEJqIw90baoy>xN*(Z|t84@W;890HoD#YNT93^AJcB`V=T zO=9zPNv$*>M_{p_qY-8G0$8Qp%w$DiNocFQt09$L^=U1TRnuT3Kx0Jf1_P=^YWo_| zagY&Rh8WU$h!J%IOsVN@O4|lIdJhX>(u`c8)p~wBGKsraH}d}eG5&u33;(_p0)GAn zUqt?V^FZkPF<;&Zk-vE;f7}B;e0ap?&rjqg6Vku^liYaUbNTXi_V1gr0fAJ3$uC_!~Ll zrSE{Nt^-awj@W8DVxeh^nU)Ra+Li)UCRiC5Vx_Nxm9EgKjxJ8R#~F<>hROpQ}0U4o28n=-^}{+Z~Jrz7)@u zfdG>}X2#kW=rluD6s5k7CVC<|ni^;;xmcQ-YF?J9l8+>c*51q%cPlH^6A-7&gh~{P zTrBK%B`wr2L;K=X+T|tCF)xwc6`71^TgaF;x%A12VrXd+L&}pFT$;f6+6>0mq%y5T z7IV7hF|myRmTVtYmBRAD?f7B!WDc#J$o(aOBPY;W2D&3}Sr`1-7C%J~5TkGcq_UaP`a5Lau#N z%8xTzaDGu4XBW2O$2nE(6tG&|J(1Pjl9*o`!PHhkOlcFscv+4r^cPQ9gV{T8FyAg1#_<*71hmF;ecuwEoY=&jLo4}1AnVaj z8+rT7UOwME%#(}Tx$@m2PVb(}(Jk{?H+KRP2K1z^B44d(lo%U@x0e?tN?w|UJ-Px| ziU&(+>aC}tjb78H0$I%jvb3-_HYU*7iR2JJ+TqTOq0G5xPT7Wdd)j`0@gS3zklKcaSbaf}p*_AL?R|4g~ z_OiCX&0LHGGd1|DYbwOD zFXzmPN-nML#*Iy*c=r85K3?6%?H`wNWZiJSTQh`Bv+J4On9r=nBIft6Vn#zbW4o5n zr%ev6GNQ-{^B^bCRbbLx&A$pYGb6;rj6fq3;^i9V1P9V0#Fwt+xr}V+z@Q$rj2h65 z8RG{rVQ4SL59-3KQ4P!-*^3bkZ5iCX6(f4LrFW-N>Z)4MwQVll>k4RBl|ifG=5%V? zl7_Bj)Ylc!r%N^MD_hd0xCI@m@~NxLqcSg*>f$Wg6lIg28c#)T1|3@!Q<|MhZu1z5 zGLk9HNvE-n}vk?`4Z^vfrp(P z4l-^y+gQq&WR1UvEA3lXGHv`A=1rTzcYAiKqU1?6)OV)5pe1qPA%q3^soytx$N+kF zs$)>E?kt!-MXteUy0x#NIHv`P5g`OClNo$H1?pM~c;%rqnVu>8ocQhldpB=Tzqehr z9A9xheR_4Hu}3}ACXeUhnV)$6_&)DmJr{%i6)&GWH9s6a$hp%expL_|M-P9?mQ5SDaQ-Y8 zFP!JpPbc~L;za?iW8A-Yk84-2a_om6>Z3d#_Z17tnhD`VDU$J>~O@ zSA2c>ny-(a@jo#@KmGbUSGVtE@5Cvr?A6Gm;xZ<*EMRGEduEjsv%X7bz7>GoF}Q*6 zXO86O4U1&F*v;!-&hqEIoBZ?YDg61KfBt^Wn>RO9z*0hlKVM$P@|ErB)!;|Bu8s_B zaAxcU zm6kXD0y(~#`1)w#?<=qqtVwjFj)=fkxF%sivX5-{5y0|v6QFX_!^K$#M@NAtS-QE4 z4)8L-!^cn{%K&Fj9h?QY9KE%15a@FB)x$Z!04F~^oC39Q4b#9~zS}ES3;*U?gtX8m zELV%DLQRsZbjg%+%kON6;=B^TGN-7EF~xGyC7tyt?W{vtCrv85Xwtfy7BxL}sBO@t zPNYj;9eNGcrN%O$spWJF0sH&aFsI*jLooQvJWR_czS!7Rko>=mavT-u z@wClLp?AAt8atNIttOWSB|ITkb7NH|gR65H-@ceB?FyONt|jxjmoc+jA(K01F}W^{ zF|A^lSRKcV+U88Gj1@SElKon+azqDq2qYX>H;u#lHgV+WQO;cWS?%9-_~8{4LL3e&F4Qzj^)cPXVrX0$QJV@$N64zWKt_x1TvHV7BM* z5vI+Z!@R`{nLTF)+36{`ie+spK4)kc)DDFOC+vzc*1hL((MpG2oDZ(4?!QxfY+MxY(x4l^WhuerMR<@M=t7Ez6A2E^GZoTUnRmjB&E3IxCU> zohs;GU(JN!y;(YMBJ(E?6S!(*ijs@fzY}8{+si)Xbgs-)E3&sMOQ)xRRF4it^zBi_ zsKK2X(9lKztDFusc@$UCl!tg_RI4_X2Xx?n53Sdo{FqSbRhB9i{ zU`7rZ%&>t27&&x^e0CWJ_wHf!H_KVSdX?z7!(0-W`t{0XE}S{dHi0W;pYOj@^0W@~ z)5#yXa`|UfPX2iOKkzz#jx(oz;_T^D965Z5zIJRDgOV zkaY9LZCSqI^{clkSbh2UC!aoi=EJ*>d=x;o}Cn( zeeD!q@BhaCUOwU9cdz;T(>wn8`i`%EKjrH`fAH7eS9$z&4?mt9&*aG|3?CT4tZ@-) zt;*>WU0Jr+it!YwJZ$d#AeE}?85p9Y(G^4m(Mw^k8b_+=-kIdV9Sj$Gg4W#sRQ2~9?7M%3wa;|_}jZj`E>6*pC8=f>+_dF)}Q(K z^drw7JmcQ&2i&@GkJ~q&aR0#@9zFZalb2t3_UaS&9zW*JAHQ?smfVm(ZIhdKBD1Ep zr?GcBWz|7s6uXdGWJ6ks1*s)wWRzQyRxH+Wo*gZ6yl9agNJd;JsUe}nyZaMr;f9X@ zmZym$t|m@68aN1K*$ZUZiCBq}HOEHJK;WtwmfB6Q)@g!+o(67426!7=;%jV2pqUH7 z<{ku?yWwT(gtL*20G0(d`j%MhSz)7NhpmxL4fA5WNGGT~Y&G}q6i+^LpI z=eE>`_hfDF_3RzDn{TJ?5{uykKL}ubGja<3TD74lHbNAGJs~c}qUa4o(Hr3}kmD~_ zV1QV?3Sjwr+6jSM;w&KG;$nt}yCoj30$HNC+-((wXGN^PD=FdLq(;gB?rSTMWFP~c zE{<0E*qG^HVyKC(0G6gkQ-Q3eXfzeb5{0R!tNY(lTW&b9K4SuXXf6Y8i?9HS<0C{d zhNz9B%hEz=)iRp$>`v*qJO>@{bV_! zB7~V8o3li&-TDD#>>ksegVVZmcy1#{mJa0Dsu3JtGnK3RR&)30c5WTqz_Sy(xN~qV z$Ci#`|EvL=S~-?K4lUvLeRKHz+c~`Yc|C7_*~Iv7*XEQA~D_qRsOPX5N%R} zX`LKMQItDHk#6d?+Ke!&5`AS~rG}OR*|I$=T>dLx2T}vvh<0R5f39_JDs;dBQ2&?;LaAJO2 zeqGyzyE}UD$BwRCTT{!$#T_`mpeMhq8^+CpQ#rkB1iMysXZttx>|9dM{>43+(Jhma zRRUS9QW;R1LZiS>ugWYs79>&_<4KCQ4bA0PQ{BDD@C+c%){OuIbHXgFhd)-C*a^awu)Fnuckc5vTbemwpI zmwvt=0CkiTKOSTKy4C9X!w2`c`pacbpE}9;b7$1``}gh&K)vAkvnQ&&6S#UKK&6NR zQm>xB;>ELz@bS$np5M96+uL`! z^W#sf88w#W{RXmQ)HueMRxrJ?6|-BnW|>^$AE!^{!lL=y+_8o?XAki4m*Z-yoUe~= z!21{c^ZqrT-#+K_hi82K`i#$?l~NODc`AVQ>-B}KSW!vudS?c7b7bOBPsR;*VEPmX z=FW3qw&Jy#X2Ous0#*aG>D)t`P68Yqx|j=C8PlqbF>TtKQ&we6ez7hs1+el8#MsHx z6a%j*87cn(R*E7i8ftsJ1c8~TByECYH3*DuN=S?*A<>!yg=*m!sELoC7T!JrP(B*? z`D+joq(OM-f8oN25CO3OUD2O9c)RH0>0*GBJoh%%&9Jp=hJ%w9POc*E+BkWMcxvM8 ztBp&b0#{nNhUnlCu7_K=HtrGf8v=JhDf)!9Fd(|6Aqi#1B)2jmsg({%tu@G~(^hlT z+YPg&NnyP{gac9 zRO-s8iP0?lrivZA2lB%YGx+WDMqb=L%Et$n_jSrbea6+x z_qlZOo)G>Me*H~o{mw`Jc=#93U;e|Br*FA`{}2ASeTAzRzvG8*m#}(qKSm8HpP|_ya3YDf=P-B5y%J|E6Ii96mz7cET z7)NFv;?S(UTv&gcZPUJCSZ!AdqGE`2k>7DJA<$kQUt4W_tVQ`-nc!z_j<21-o`WS` zjtWef;Am%vi!9xo%;dGuxa{j`XGDaToxr7+SomHv4{{bEYbq5oK>NAa z<7F*?YGQ!Dg)!ldmSjeF(>7PYF~ytAU}Gv1tZ69pW=MG;Gdd+Qp;Z`zvYi-J;KJgr zVa%=bU|O{m(_5Rdtg{1K`uei9k3X9m{MgbEz>Yq_93B{8&$=Y{v|B#)Pk|)5%kURrar@!?pf{(uZ(0)eHP0bOIX#noE3e_SSst^ z3~t4?={@*i^;phsTfn*P-|*{!P2Bi)BR_9l%FP37_~ZKxoLxVI19J!Q?Ysf}xN;O% zcF*AU(M9}mVj1^;TFHa6oB8dBjU3;;ltY`AuyW2sx{9(2@l^^y`r+u{ij}n^O6wpE z4IKe-O>_jXbTtLgn<}|`T9^xzd)Szg8sPeK4b?A z_;{F*>}x}`iwTJy7UT=emd5$gQV9=+yQzRx7U!divYsFALP3NZZ3V7crv$67OJlv{ z9&{pG;7aL-*xc7qEn1ot7AW8Aq_*jaaCRX^6nun>8^QLD1c?OL%KvR{j=PAPxhbv! z)}AIhgoxo0XRk-HgADF6_}0Z(FeulBCEeoqc48qvFK^GyP4)b`t}_?E>B@zbefe$A z1n!-f%jrEM*euYseqK8^%&KMcw05i=UCqjY)dI%(3@KG0D}(+OIn);?Q<)M-VXTk9 zWhgB}LP&Q(M(zLr|MW>jK~#40BF@f@P#I$)tgVTYdoooZKP%Fk{G?#&tMjNWZ9%_I zRZJStm8pa38CKt#e(g)>*RhP=?TYBuCXbF)S#)n#paNF6x z&6zlH6k9f}VakLt4C&v8+E$emWM+~U6OXsRRgl1An2#TJCMIhADy7YhQqOAp+Er>8 zZ^JiozwO$=rWMPmudN|3Err~aB;rB>NsIfhne*`ez152GiVW!4Rc+G~;_arEgQ%@2 zriVbGQXZmH8v)J%jcOA9tchbdf8q$o4-?K=u0h6F$9r zt$MN^Klp=Rem*a-w2y^zXVJY&M<$IM&450Q^y$@;sS_tKYuZ%RbG2j3W_7t`!+KTL z2qd0AeTus`Z>WIvy%>ex9oWy0$9_-&YTeq^Y}~L;1+Fuvf8w|6*VOHbD4wmGH-6*k zlSjOI`TYMa|NQ(#^7r1)OlSfZg`S|`5|9n;2ul@I);?Yt>Nq$#y zv;O@2nV0fiPvn>$Ub(`_E!)^VYcAU+Ok!d8UJTC7V`60`Q(INBYvc&-?b^n@-P?F_ zbT>~=@0ELcH_v}L!Cz1B^7YkYK0SZLmv?eppPmcYJ?7optMXf?dHirES1!#FsBB=` zghcw*JBcCS!r;EvOdRLLwCT=_n`lG-p+@xVXFy%O9KaRm%Zw;4HI#eG ziptie6qV@;z-v=fqCnLgSkf9Nk0!OCT$% zsTcrF@eL52?W=*8w}6%amXA+U{QZ;$&l&<)dLo8|1{&e-CB~HimWQ)|lcNTXFe-o7^tCRQ5HdZn!l~in}UsrAbA1EvmX}P~N2pRRUM-dN!qNe{FgX zGhpCYGe%6XWa3muW>5EK*7P9e%}Zw0>Q?ODJ5VTnCg)GB;?|XYyt;jkFAs0?*CV0c zhaY(J$5-zC@|jDgp7YZWe{k%`ZIQbi`{4n{jz8k3Gtaql^#k`G{KMl%pLl%l8ISJV z zj40n=!d?6bbZ`?`vBAy46lYUIoQ(BVE7se}P~I~p!o!SMKXanJ4T*3UDiyj7b22B? z!G>U4I|8gNMa=M(!NNlZXeYJlu^E~+@YD_@$}))z z$Ci}2x1wi4BNHknGof;VD9WWAp0|%h{b$g>tP`!1GD%nRMr45Xu{2QY47r-hO=F`^ zfG9720W2R`dRhxmSsUSEBS2+mg1e&`ZUR|O0#`1UdIY&zksRzH0OUtbgqJ8FXHo-| z92F~qoaD1gn9x`oOCzP#jXoxZO5ss`6|QtNm1^#K0$zsLTA1POXp5(%+>kaVWC^gf zYY|7soLJgrhS4@Vgxb6iI+jMzxh#@yiGX3e2A4Gj& z0R5{Y8Ce(4lrCw^?UlofF6oSH9mkjs=`0%6jukU{v1#Ra8K8D^RT+#fUE$o%mv||I z$Mu`Hxbo|7T>Je$50>J&diGKVl`sFO-m9lC-t+v`M_#=C!lP#&dHU)L51zm0(W?(U zeD#jw=Ps~Ltj+1OXEJ*12(mL$#L_kqz%s|*%1j{Ih!__ewK!^50mJGvvDhO`=#l5a zxb~5ZZXF~8R0=cNMldkLmT^U{tZs;5MbBVnx3y(zr6H@ky9!+S3S9ZHzPlG2y7{oH zUnoBaP@S5d&i7;E*fS)Y{iEXfX>J+EW>m6!R54oym9nOHvD_Q+^h))jYnnUtS>AMS z89=X=LG;QAq%kL$f%)MKDU4=taV#U-q%gg)l$8^@vv27{4t_J66C0Ov^TS(YdGlN%~PFZ(F~rfgS2iTP1pw2bhixsMnRu1a2)2L;jLqzCvBBF}5EjXe=g zt^!)F@*KOUpcU@oDv)J|pS`^pA~rG($TMW1g{PqgLFQUSTWOH$ph3QejDdl=^l#zF z!p<@58z+$UO&4x$?aTGegSfnT7&pJ0#`83t*Kc`vBiSAiq%%?<@CNgg|8|@hDQy*t>oaSg{0?q%EB74)g^NM%8;09^_xF%jyqD4^G;zLOdj zR6J9YM-5evQ<=f29J}I$8zBIuc()Y58Z)?`3So*|IC+#4M-H-U+h#Tj&`qB_fp3;9 z#jiD4wdLhrZ+Th4ZSM6rlR)_%ZhG+@V6)Apxz;n>H|g+7xDsL8o}JE?zjtcLxus z>q=Pg@}-OXa^(`wo<8Q~i)X4A>%;rEeEjf%XHTB<>(%Q#dhkf~VkzFNn@Xkj2ai<` zma^^NzyJ6D!iE2Q{VzoL{MmCJKYqvy1=b$k=i-qgY?(KY9kb^M^p9pzyY@_PU&n%O z^`d_oxUga=UoV~G)1@<9-zSFI-c>xhc$mMQ-Q<7opY!qQJzhPy!=E2s!oMH*=dUMx zc=L+@))_9IS;*qKl}w+Q!PK#d4C(F7(7tXA?(f9t(e4ZyY(@8m{|{4d0cTa!{r?Zc zba&qA?(XjHZf2MnU>Lf)ySp0{0Rt6K>{cv9P*4yMqy*c?N1u5A*FJuq-|IiG*Lt0E zW6nMI+_U$3f7V`mZ3{+@wW77#gu)6vvI~Y#RBB3jr4=P*7F1SR)6(KZZG$-#)za>& zh6;2UQ(bLHMY%qeVrpAbGL+(CLkbEF1+WZA$s9^T+7M!r^@tUVPS7JfT94rHAp%%K z2oNv|3Nj=xP@ljcc`bY>F;S+(Mp+OQW<`jZ{c@D+RO%GRh0W1$O1z0|M zcm{|GocV<7<129G8!g98Fd!`5jPNv5qB2ZLEU+e{+@7pzv3eWwTE$OxSyD3Goa)gQ zG>o^PWr`);vu)998ab4ffuVGc8Orbph5}W_jGJM?^m$e+T;as(jh<}T8NlwnAwuy{ zJiaH16Nd^pd7_mUPEX*?*H`fIxdUAO>{)Jn{u&Q1pXcYR*ZJw|`#iiX;B?_1eEjA^ zUV82Zr%!&vbI)Am^mE_x;tO|p;l;bW{L1%y^wIBp@x`B9`}$V_tRK01?It%be#loJ zyuf?U@8;Of*{ohDs;e)Prp6Fz8oVj3b)}%jnUV(SeCvbAuZ*CeG@ksz6mqgsNK1&* zlvJpfrQS8b+jyWXp&R?4G!lg&i|#PphUjK9}r(aAMuv#UZ#7C7PLq#P=P!S&>4oOFje8&`D8{&NI z2=}l>XKRYtP(6%>=wWVTfRzB2m8y4TW`%>bE$#xMAwFK@CPq_~6h&ip5@Tu#nbK6u z#QGejG-ortOO*MrY{qw{GSD2yli}PZhm#v=b-oPDNaZz+)`^<9~;55 zQ{p)_Bau@x<9TsjGOw@5uI^Y_*)UY!0D~b0e^>lhGfoAtbRt%K?M%oBcV|*( zJ~O)WEs64lj3DXzQ9$EK)(Ve1yH%c(uQTfB`cEV%m^)_ zRhtwKN-0Vy;KBvmvGaX4C|k(wFSS z!rCx)jLPEd$_~CfI)iUdFX#FTtN7~73NF2{lo$68a$r>hM^-m;e0>+Ew+wJ-ek)rA zm^Y89XK80C6U#H1SewtNnsjP11IdnbCo|NOyzme*0_1T(D21UBlthG579B!qOrW-4 zdrfLM6#`lPwFS)RYhqkm1>IFyjBYMru(eFYO&+6KWWTG5al>nwEP$n!$mwj#r@ADO z-i}hnk7{I40Ia*YT7w*w!=&n1sYuZO>RBnUQjfJY1VaO~5({Cz9t5j$9U?pu;v*R~ zdN|8gEa2Jaj&t(a6LfdC(bv<#Mu8@k7(TkUN5CkB{M1y6GcySlSoC#qAv(xk1GdV7 zEOxA4$*ZTH=Jhit1+t#vna2-sbl*-^Eu77i!G6ke(>1UwR0~a4muf?nDng~8Nr9FE zJ_T1Q+NPpf%jV9|;7mo_R34eCC_iE3aJH=zkUM#T#}Dpj_x3G3bK(i!dhKOxDDcMB zD*`4z@#p{LVg2=4$y?$`n+oyenl2rfy!l z%DvmS`1Heb0&eeW5v<1#9prfdu>E`YuxjOU_UzutW;xc%70Y<$=_fT)pmpcFTYPv< z6(xO}>)%|}h7P}faF3sUd?c^k;r^WmTF%w|yZ8BCt-E^XzGiA@Pz@I<*!tyX6|MRw zkG}swgDeGDDpCIX?;ptHA9?WYO z7Qm4M5pt6zarSgJZ@yf_yDydV?wK0Sy)umVUme5iFHYz6mzMF-y9fCC{EOT;{}#7C z{e*8n`jTt!UFFMnZu80OcRBmab&ehSLKMOm?A`S_2ls!)i4))P%rp0S?z#KC`pQqd z|MpK@{PZX8UjGLVuHEC?&pzY&x!1WU3j5{9R5(!+S){G^@y!6Xnf5 zRJDas+7LxibsTx631sKRla>}mTwD;*;nL}bI}$4wI3`doP>>~AQI1q*c+pWBOixv? zY`tj6bD=avh*TgeNt{8Hw{$kr;e@!VWOEzY3j8=a;BMt4faQ+8kv9%Tfw-7OqO*#_ z$2O4w`y_$|tiqh)iFAx5#W|g>_+d;dnZn$<*&JW^7+WW-qc6Xm{=ycj6LN`j_a?y3 zh6s20Oiu>^DRVK?zjLshtOT;`iB?JO?hg1n+u-S7j?Uf;H(L|gH^bk>l5m}Xp#WB? zeBUa8EHx{uAlQyfKMRsPEr@nE*CJwG0$7fgCOBAGNe60=PSn1irw^h2fkcIb5*;iS z7(jBkA0+~6!^^W6D-L3?Jek3=cqY^&F|Hw=aZPcIZj7R<%$MO6f%KII(vs%J=+bZ| z)g&`eCa_Wx#fb7qW($0+98<)y{%ltE=didvi6yNmJU*|BV@m}(r?v6siFI6j_eown z^*Enh{G98zZfiMM3bJmhmFREC_Vzulesh~Izq+nPvF?5UGq>;lAh30xyAOWm&Vyg% z|9;@XkN*(J`k5Q|e&qY#{>j@Pf65E5zQWd>JD4?lhN#mTqNL;ZceEka(_Nf)Aoa=N zbYzDzqExKNmr=zYtm;o>_vCCg_s6qc_BXaia(GMz$0lX7r!S0kEza!f^JI6wj{RbX zMtF0m--m;}{+t*a!JUuy{!vjG)Ha?t_b5l7!Go78I;smr)*w~xJf`%0OQvIp$ zx2IH~L(R5J^e`6SHX_N}oHXeSGQC7Cd03O_X+w^`yeHCw(jVbLrpNzH^kc5 z2v;jp+{_K7jabs0AI{k3G{!Y23y5bjx;mDrZQ0E0E2lixg9JBY@`4>H75G;}e#(j> z+%>3DfYntHPg6z&l?j2A#Q2gUV3ii&K(V}Dl^jA=uq%ZTBCetW$Pe};-N%)16@m0{ zCnqq7cy~|2?VSb6+yu6~Nl+E!1rCF3?eVp;#YF(iNkGffUfPm_ibz}HYhj3=sdU6v z`lP!Wk?UbVt@IVW$!?4h*k0A1!gGtd_~gJ0zI<{apFJ^$4tB*Q<5!te`B9zDb`q(PD5oH!&(aI>!@Vf*nZ0MvPg~!)nG)ymRes>MT=C_N`XyAd=xRkeuVo8 zoQ4N!77^q_qC8(vludg_6We!e;@$V&;OyB`0v`KVwR9277S3a}Z2LOe$WKclB`Q+d zmOtUX-U4pEa{N$gOLAGiY(CE)Kg`pQ?c=r6Pjl>mO8wZx!Wk0{FCd7N_}yr)I4UVrTsEidcKFGMtcb(t?Nea_XdukiKdulVMws@i^E+bWp4 zBYmEND>YPj>GLnR^5s_=fc^6ye`*meHB_i_w^a1&(W8eve0Z0y<+$fweVJ?L&hgco zZ*qG3Hg-$Dw{Ol&o>{$&%L2GxpM91$_HW~pr;l*^;5XIx=j8 zHO<|Y)V5nu)@V(BrSx3|(wC_c3t|O@7L=D;QB!M0y#Q8igOO(SO=_5MD5Vv8lnY!{ z)u^bJD&JsAZM}s&wxFckl-z=$q-X1qkS2hYq(@||9-$F>1c&Mq9Ac;~;;bOc-*1RO z)(`>%1-b%F2nw(y(BFmtKO1??3NLRH+&zcl>M;ZtogPkNE?#=*B07x_0} zg(MjhooPv2jwOjX=46&w341tD(jr2!!7qX*)6?pP|XoXqCk7mGOmW+hiXsN?EKP2BjTgIk}Cle@@ccPmc~Wk`qn~he|Htgu{61@zzs=GG zZ_17ME*m#|&hFh;dHk{OIQ85k-hS;DKL7AHzPgB*fZ~ z8f#5ff-MCpj@0IQN=FsS`1WYVw}msJ&WDaNcWU#TDNJ!8Ey|T>KNo`BobYwB#?#ge zPiqT2ZJY$AJa97c!QLPcN24g*Es{mSXAz+*A~m?0yr_DL;_E0+tfMixh2a@JjL7a~ zRDK@=g*}WZ>t%AoD2C0YKJ=}bk%tMUUT1UOr3 z39SAeI@}$da1v$jV(*B%gA;B7TyEAjc-UGK;o~BJ6-rxv0^J31j3|g=TzM=L1z`J2 z0_iLeI4$&~A=8nXGzY4a?Wu~jqa)p${=86Hl6+`S6E#y3&OmttV=Kd%(~`iB36&h4 z-o)B2ft0Z|ys>Q#&#s%wv)ktK?sI#2>*Z%Pz`FDOLoQ#v#>LB5c=Yox+APy=McsXO z?-94|d@rzdU*PJIHZ-VA%_h9_;8y{$@A>ZjFWmj%H?9kO-Fo;Vuf2bc7hijoP209I zd(I5no9nb?Jwn`_$p{G)XBI$7m?y(>f|yhvBj3}H@ufPJcSo{iAdx+TX&jlD&92^f zcJ;)vuRoSU1JOJ_Hi{kHZmeyzU~9WQd%E2@IKrR9BSLs=WF*JO#dB0ZYyYTFc8>~S zTVD_xdV*Om$Jjh7i&evNXpi%tOrR;v)siSXBO;vScd|1e$iWbQl^19#ifX7RDlsnu zJ$%je@wGO>!`fKxOG~1C+yoTu2yu3xqqu<4;y?!*t64X7953$Q%(-U{@z&8DJh5gr zN0v?I`0D8#T|SYs+ZJ*A)#Lo~;R{?neTX*?tmBn~>v`eOX6BCXAtND}Fah?ch#*45 z@j5v_#@mJ>sDQm}xemk0)dPuVYu z^QAb}pPVoc620um4)P#3*jvk6igXk~q{>AIU?q9^$zxZ7q)ml6I1{gGVW~uSXIE)M z?gYDg;N|3kyN!di9V=tfqfII zIJKmccejq?^6>>+IzE@Tc8=xAbGglL8pXEIQoOMh89J;lj1q=eC&7A*kkOGi-( z<2x(qugj;WI-4;9Rtm5NWjoMOPG?;X-OU9|AK$^mfhH}IRa=%!g#g)@-Zm{7q#{77 z6o-PT|1I3ClE&3z1zT$Npo%!Df}yI92=(_QHYSYN_(;+-L|Bacs+AdpWXu3(q{hkE8px zacJj8fvB+pMr_)he@6o|CydqrO67`;>Fv^Tu2eBo1zKu&aL(j$+R&lO zyHYa=)q7P^{L;D8dH3~~c;@&~Hg8y~4gJ0S`YU|<&DVT?_qLYg{)dWS{r@6aYUuE( zV~?|7&MfB6oTf#u6ksWEdQ*hi>1R)BaCKSwrQ0{I^TYQKcyQ-C4YWQv_klJ{_{s}s zIPt{eY}vd~vmM*FvV7SRcJ7exd2qirM5vPARZiABZ@;PKWGPcoEtQjXS&nu7)APLd z&U<|L!AD#;FQ4`8cUp400;-$WZ*ujkYdn1LNHZ0^QVH(b>kqycd%%xB%4^@>)$+>T z7r;6%Aa{EIKDN%9!O<1VczM?jK6v6VukG8;OM5o+*{S2)`S?w4U3iy!-+abzk8bj> zU+(kQFAucH)t`^<@*jEp-{0@>&xc>~>z((xe&IOJJvNO4Te{f1yquMbGFZ7ZMc~St zu@l_r9kio!lno8t0w4lLIaQ`)l$w*0Z$?sd26lS2! zA`A*P#XrChUk$K^;_EvUZ%;$Kr9J!k3S5bp_4c(!=Vc~Iq&t`~=VMKE~r4<%U~wfE9BOuwdq4 zW=?*bDdV1G;+T_68h?tJ(_a)XZS2=m&A#cC(JLli|2VZ{hh^yyrar=|6xcl*^ zeD&%Jy#M4Lp4l;%Elayty`YYTbBY-`GJ^V64{AGosO=7>tUZ+cdg+)O1+oO5(n|eE zE%qik*M*dH8wzqAsVa1*zQmL6+CV0BMKhx}hAG3sL?QZ1=jBOriH_1t9qFRBBLm#= zc2P?|*y3nrik&ET7Yi5MEPQb=@W)y|2s`~KoD36iGfBbQDpMR|79kGl0!1mr>e9&c z$)?aRhq~wrCRB|Q(CDK(r;*`BjiS)giE;H2z_P=~+7f?(CSN-NIU5C7Mg%&W6YL~_ zL<_oE^`QUEG~jx2$sqG)C| zrZK%SiM~RAT65fJ%yyz7%Z0iO7b+8k=EH3%3AUj;*q$OEYntLb=@r=O&JGYTi)Man z3iIj`Sk;=z_Wn|KjHqO@sQjH1J2<&vHXolp%$4)+aq){wT)+JtH^dZV-GB50cLlg^ zh?-MFg9@@#g8R4MeXk|B-?{%YSFhiZ*B^21=0m=@^@uw^{DaTGzR71_f5Us{KjZ1s zr&zmj19Rriq`#*_Q|n6NrnZz}uQwTVuklP4F}xBKMJ6re_GQ7Wz1f!*?{)!_{agZU#ee z))$3kq>rtEKE`@_*jb1I6F9InlFq?Q!Yb6(t?cmF2_VD4+Ej+zp4%_CBVcE1^#`M*Zml;KLxDUaB z;w=4r@b~eT`qXtBGTBhd|`SdS-Ox zGOQqs`eYwUB3#G|vLjPExT0_u3PPR5Tr|L{h?9FR-k&;cvG54g0^vn5{*(*o=g0Vx zAMHy{gbyix&SVC7kRbq&?B!0Hk2jftfu#BQ3w#Ms=>(qbo$xjnc(S%7%-KcEnNSxO z0$p|ZIP37RbHZ6(ceSvu4J;gA!j#?|7EG*R!ti{ma{b7O zb0Rm^jen#xIPEM%$t^p!cLbPaJXI?Bl1=7}v(y2=cmv$CIUu`~} z0$+^<30e-;xUMQWb}bY8>a}`U!;4b2(x@)V(jr$1Qq&AQRpLOE zWl;H83a}JhDcDlOh04-m!nI+;n6Mxs!-I)WjG?F`pZbPsh70T*J$gv1AEm(M*(Z;& zbp9Mh_jGAX))Zx=Y0KF7i!cq)Ig*_e$&|t2?Ay9VMEe1r*ss891N*lL{A^sohUN2^ zGiiXP$^sh7)lA4v0i}Mik*r-ZPhMZ74F{@7l^POMc~{ERcTq|DDsrXv_ia_j+{haF zpW{dNYeR!OwrtcA+!Z`sxpaZ=Zhpi4@4nSaH2nJ0BmVs3H@?639hW})MEgElHmqTc zfTEf?_`jh+1y5=y@UE(`e)Ve&upZvOrzN+ad-ok)I&)f%|AdyWwRF)!X3dS6+UZ58gYcMXwZGDUeb#2*15? zOMC9l?Ymln`>mU|wLC2q;kxn7O|5$T{d@N`zw=VPh&v*G#;Ob9-tG|AJAaM17|NeZ3 z|NeM`zaCuTk9!|*{j*~n+}zLFh2Z~Ylv8G(kt)knK8dXDdoE1%zENGi9{ly$BhA*&YV7WbdPanzQh2w?1 z{%jfVpQ+%&tF>Hvr$y+tiyuB8&hM8;@UN?*`RnUZ{PXLv{PxvkZhazv_3jKles!Th z*+QP!HJ>A!mhsqzb?jTVj_vc-vU17_ru8pipmQEQtqW+WTTElk8d@8+(bcw}zMi9u z8$8LP`ERpl`xTyl@(ypFzR&w-Z}Q36FZtrt^L+jK`&>M8hIfzcfnNtg(LQ+YUZG~DBoal zd?DEChYMguW2YB`y<-iw?r`PmO)bIwhoAq!-3LEvLxVSN zJ>ZKgH@J4=zP$fuZr=HcZw0zOy7(3Ep1;5c7cTPTbI-AB|6b;aGnhP1t(_K6oWGYi z?Qjaj(KRFo)0^whh&(sOmAJF4Gn_30@oXF(#;#Fu?2&tAYezT-`xAL=R1(`e0yr@- zj#rmtv%SleZ5?hL9UH}y6B0SpAH@Ly_rn8;>>d%zreUFM>6*DU4qjty*`K9fRjyv44uvOL;#pmk>SV4hE&G4W-z@wmqi2R0$F*qX82PYDIy@mfdb7O$qf`3 z4|mgM6qd&LP!z3(0)weZQ-wEuG^i>~3MDtnPhi-GtS}$)1pbq}-9(%?2vCU_2=F7> z$5%vzPQ;Bf5zg+iFHmS>kEgkX*1p1BT?iJ)@>g}599{9Sb;QNe8V4mTjE!(MHb7@; zfRFqZUdH-b#4y6rkOZ|Pkev}#fi6rhieu}rLQYO=;hhzuIlpBxA8#GcnWY`<61ZA5 zn8pGD@wsC&nLDw7nPc;r(4WPa?hM*1BPdOk-y^}D!gyZ_;{@ylZqkGONelEQJIt4I z`JPSLQ8Z-=V5)YM9!^_El203yztyJoO$+X_HN(8ylImJu<9txNFh4NPg@xw z+|QlH%6vAgn9K3QJ9z4`T^!uGPP5&cSFn8k6jm;r#-xF6Mh|ac3@5Kgwf=_Oof- zY7Mp&VBNlPjeECmYKiU&u>SGe&pZ^sy8iWLUVG^U=FXhTiY1HKzIl^Yl0hZ4t06!Y zxl%b-cfP&Jt?S?L&6Ueq^y<=Q=e75!8dzI4s9A{pbhfv$V);^ScyRBYU0S01uASRC zed;;RoPJ&_)9~phA8C;+6}h@{`3v57{Z&rM>uDHktX(jZDjsMO5CUw-;U zd;CAJQbUF6JqoZCg#G&SFMsD?J$S%ZUtZ*cw_fM$(@%0@*A5=vxSp2|?$ZG4xow*` zxNJTLR?OprXCLRnD^GLdoK83z$3riigZ;U zs>G1ETz%rQM7U(Cq<1|^stu@YGNHE3OarsJE^|5u9O)hBPTNRZnuc46Zx^r{E@0Jd zM?<>}butw%9OAK3qs;d2#Yr+CD)Ox z5@+(O?I>@xp>~)pH3EdyBP^*OBM>-AY^ntlY&htj56g3GmH${(W^KKYuxyZ{8ixXRl1<-Ba^;dfzOz zEFQnn<#@=n`dGg>T`4KzO^P zn1S)hw2g?QrYDr5wg9plMd`K$k=x==W`n0Z){$M~Oi_&+wenh9doUxrBbhoXfqCPS znL9d$SpsR(dP5l5>`QHd8<~mDM25QI?d^e+vj;AYKDgS6+4!Qf4aLJc5@*v091S9I zGKj%>Xe_QnWAHGH!^b2JUy~RD%wveKOAx?Hr#7Kf6nrCN>U!uaYoRD2l|=6lLY&+M zphW38*lGDdfi89gxLD)sWFdfML4Y`4e+QvwCu2g~1f<+;@N*UC>0l}6A~%t|-(O%$ zRi{>fRTAq*aik}OA72D0QH8aM> zNNzxJD1naVgu7XhA0Mcx+g!dXYV(n*)c%MEKm5pV|NK)MBK+p&9SyF2{_Rih-2YJv`)@t0RP^~&o!t3voEpJwCcb=taPDycR(#Gmw#0CDW$ z^c5%5pYPAGG+V}(y0fY$g6)Ix><|FoEr7LeWFkAequJk=!11vevW;NJFdtrCl*N(3 z5P8jqJ-xvk7!k$egUKA9l*<#7@;Ed;lf7fp**a3-azrxg$KZqBFxuc8|t-}<9J({Qy>sIrLB=|GpDd)#v~SYH8Z!lMgX#lCuR@u%+iTG zwPFH?mJG6c(Ev*(i6E$mCsppBj0btqezFavI5CRKw0KGr!>P>_F;f^vTYeOc z>A`f0xEWrMKxbZ}0A~a>nNf6{M5xL%40N|?HKSAxlY%Q%_CNua8eUWIqLRc_lt|?~spwW(TqLOp(Zt6^k(ZxC zK|vm+rNs;m4*Wd~CXlsz+h%RanuIlqgXg&0&5n})gVeGuPZQA$?j@+ zaL4*pY+tvMbxRj$riKeuUe@!+A7kG(5fSqGGbfI+f6q>>RD%L6HEgB;O97RtYR?DX4}4uUxvw{qJsTk*uqiztkQp$WlXy z>sGB~pudma?k*-z9M86`o3$LQeR~DGVy`>di9l;v?OiV@Ce0BM2&3=6JqXu90^BQ%2V7t|0WM^ zU6IeY!|%WT$X~zy#J_%i#D9Ky2){hwKaXzm?|WbH^EL6uPpxJ1l1?@(X=KIRQYMX# zr>osd%a1ClF`!|%B~7CQfJRB5Gt7Y8E+g^kox44s4??ir`D>cpjboE3sW+0JS<5C5%l9)Lnf+^jBjBEF$qr!!P6l;rJN3&6!D2sfJ$TrERI^#F1erw< zZXHXaOENkB0$9mK4Au{0a>q#OGKxs{4JFAp2tRQK{-UmZY^?;^%Rv7Be13uc(M~`VJvFh+)VauZLqO0!`eh2TSGlL4`XtpeCR6AU`%HvQ~T=} z+fqP(i8Q2if7+8h=t}dXHPxM_G@SsJpEw0SaSlGBzC~dtgpn8OOKzZ=9p*=2lo!=0 z0gSHCU}AGNqe^2KnHR?7(s<^IGg#Ols=vFIW&KU8o7B%9>2#0m-Oej-yw0m{y{#p= zE4aG<=n=pF`4@Mkleu;0;r}Z%s^Cfu52`W^SFYaTlM7#KLxfjve$Ry~-*WT*Ph9-w zCQqJvfs<#>aOS1696Rwi<42Fw)>KOqm7Wr)A}8UDEKOygD3JafCnlGB2nhPIS>S5F z0Q}y8c(x6T6rhS>cXuqidt%w!7s&cVrQd7und^=GiOKbNt!i8Q2#676DzyMV2qwF7RZcGwtOVPar{sQ{FrkvT?& zW*7`H66mrLu(HQewnjtDF&Jtg_oX=Ll4=efJjU0Tu8C9G!I~B8_~QJR(w6%1lY7@v zAlkvufRvy>nldx!$Cj4#svD zP*o5?LZmyfp`4S+&;^*v*&Ps>1kuBzi76Mtum=0Anv4&tJfaPMTN04+}xiM~Z zSHv>dkiz`_Ql_?K(v;#&b(Ay3!FH5IxKN+suPu+06KF5yq(!i*Q$r|A2qZVsM}v@Z zfvf7wII<)CNe=WN*2k52UmdA|J|y~i5$W!%MW-U&+(ods;%Dbbux!KR`v83wYG}lX{*oCV5?c)Tbz}o!PNh1 zU)7ftYM`Y`9H_)_Wol-giWI5nmMTh`pO#2oMhbbkndIc;^&E(1LsTJn6n0B~%G59x6s)RtthX32B`tla`v^8}zq2wcr&#eykJ z9oxg)sRJSm>RCE>qW1cZ4NEl8nl`pipl`Aktx^f?Di2FV+Ey)`!?v|6wCC3>p2yZT z%e5$%8X`P?V7CTd+c&P^h3B8);J)2j75783{rH1-xpw6XErO*WOAQsOtx9(PO+f48 zbMI;892U=?s{xZrZdVEI3a(ZMa#>2;>cmGAZc^G^pyjnSig2P zPaJ(rtAX{{kwaQNEETzWR>a-QFTJSkt0Z?-ra{%m`d|45HAMK*i!br^oA3U=Bz6T< zYIsl$7yfUnfJ^0Qef;6a+`N8EdwlEWExs1G`tbetcuQdI1A)E|Uw)a_P8{c?ieky{ zv|j-D*{vHnvu_(O9ofZauRhE9SDz8UdYd1<`-UG?(!TVwzdrh&zXY)O{U`qW%RTr=)4Lvm&R{inKaYl1dGT&(R}ZAS+E(YA?|vziKFz?E)pe_S6oyp?-utZR1?%nBYds z7$;f;wAx0y(mu+Swmw%{h6!x7IndbRKvlgBMP(LbWSbF}U_w}gslb&f-oE1Nyo|&Q z(D~>KKn;4ZwUD+Q{;Lx^$nC(PQHKvORQ&HV8<55&tfP&%v#JZ<9yPLgrA%fr{Tl3?#< z!u&gk2+E`hB!Z}Qqb$Zc_@sL_ME_F#JYV;MJ? z%+$d|ru0QHzQd1!CLP^Xj#Ok?NatXHkB1>n&NkRNdt&1(Amtp6o0!fyLLf!qeKyRR0$krgixR4M{|-F!_s`{$?%~*&WVaxmE7({L5z;% z07o*zd<5{qNb>e1%tF4ho3km!1Oua|D?^WUtrA0Uah81mMHB! zu_}SBVYxAKUj9s~^krt9A2X`m+0Yx#p~-3N=#OS?a{$}AqS@6Q$EKDLwswkj`SaNL zC>|dd&Bft;#y^>9%vRF1OnI*$BSlCw}4l`UJ+kp^A z8+@z;sO>#)vvkGU*bZYOD@@JpF*dVjsO%5XGsD=x8XHp=ER7xI_p-&n&=xyae+sKw zIPuhr{PM#;dG47P=^H-C`|o_h>Sb#Q^btWI$F>=2NPvSQ)yb*iu+wQzOQ$a@o35k; zdXuA=Qk1~TVa4p7*~Ni{{p^_6!>SoAOc^20SpX>`-ka3u0HOkX@OE$}$k`7+dq14a zoUxPhv{JJQ4GaYQ^sp32ce5TMBEf*<5b1}P%IuyZ=5-fS7w1l8gai5hmgEN5 zP!;bb=1p;gI|X5G6i0c=Jsc?aiCR5CMHkhQIPy3voQO_ z&4myb0W1%>@1#uyILc#J9ici;>2qvwm-{@x#aV=ijv$>YJ|bwmMcAnK1v?{mt;O^- z7gAf6L~%}}mf)_6jZPR)%i)Olttrgp&hEup-kj4^`)EL${R8-Cj-wXS`&ol6HOug|m(DP4(s-@*mCCI;d-{|HQTGI@ ze*fiXX=jhLnS*x)u+$Pcs=B*cN@sB72*wYNX34?@TIA~F$*0)5Wz+wgJ@~(>?ytOj zR$EF(CAq7}l>#gk%~FrgoO+(;PCmzb@2EnacQtTQLx2jh)X<=UE48nlQ+Zk+fB1=* zT8iggE?&6ECFv_alz#J5u`3^aq}9QC@v$SkC6M*$spmMmcNb4@TF0T4i#W1wF&~^3 zVfMjG+`9NK-(CHJhYwVVhhO;Pr=R%qhll+8hx`2J;cfo9{Uv`~|A=3{d54Q{9^~o$ zGuXAEhXpeW89ye0apRI07@J5_mx!lMXDWMasU2-k<5X8_COcC)Xiq_}9XV|_WYn6I zR$@d__7IY@^hnRsBfDH*%VTOB=}c{(TC&fMCIPIjDPFV+WHk;t(mdKlAj^%;(Kr4Tyd^{Y>@N%lcU)Lxdco*sMqo~ZC zOh?5GhEYS}`6}WP>H<#;ZN|diF#Yth5CIpZh;sEkM4ZlRT zzue6JR>XUoi%K^ZSTn-i(G*uXuAQwKUNXkm#271a93F0t#D@A(m=ZxvRX<9Hvv0l_B`O=c=OKXw`&2iFjqMfKt(23LbBPUWvtdAoJz8<9b2N3PFU@k?CUK_TJ9| zSP!`N-S>Qb=N>1|zRdGy&+^`f=QO}tDk@mbNJ`Y}+eH@e9vg8^B;B8?@s5t&84`00PbU0hMVQwUVC6Hxk zVvC`vJ^BJt`a`WTlC8P16Xqt)SemS0+L8)IQ)fVlvcgSnmv z17lLd9jHk2W}rTWDXm#dYsr$&j-ocknW8}Jzw@xvWQZ`Azn9DrQ>z51d$=|uQht{( zvLd|64ELcRK9uxuf6`R$Rj^vT*-2o^iC`Be0X`?KcxadaR*mq%V+~dAOX{plyMS)Xec@g7B^k{&kh7A=I<)p-Gb+Pi(60|7R5&@T~;|Hk}peijbrM9+? zzP?_%x;hv(tb>VTN3&w_0=8{j%i%pcIkbDLmS8@jt3?}rSIO;Vc}YwiH;mnzm#}y1 zQg&@x#N4SPm@J}d*}UU;*0jO ze(6FDrW9bQ;Yo4)jnP&x#o)buVUR#`b>!$U*aO$K$ z$frEG`*$%;wGQE<`**bA!H4(mXsd}My z-2CR6R>55rA6+DSHpxazIaB1tN%r*RNj>WE0ufo znX>brXpt)gVF%^&UU}(dKL6}gv-@dy7Di2UQuP5yoNI{&(Jh2L&`#6NDI?Hqj8)oZ30-$6WpmAB|^T>p1M9qT1L3i z-setJrwGPIRRP|fvMM=dt^zDeA|tH`3KrkvYl)|~8J+@I-T}ti%s^Fw!O=qxXMrqd z&!HMzsd`tcm}p>(5n)NDM5Kx@O;MHN^@&c`Co)yev0x}U6~^RM8By74Lvyc-oQn;0 zBg|+TV@cC^E1ITQ(Kg$Tt_4nXE_R}Qu>&oO1hm9j<+0xA@pg=xXwQ_XF3g_c&Z2pq ztP$wiwl5F1Sb+9N-Z=ys|dW)L%G4sI7*c+X` z7al|W@X+_eb%;04`o1_D`r~92jH^i$&ZeMQ8$s-#QQcUyNJExn?`+7kt&q9_x< zDjOC_ew#met={A{ds5gaYPrRq=Jqh!+d^rr_mhrB2)WpaS|Q;4Bx7QOhT!cs1SdNq z92^{Qaubk}&v$c=MdvC`#WeyCr(jXI-U3)|xSL1^ZY*lpQaWxs4`Q8s$np~>l90#n zLIJ>*k<1!Cz{HMDQKtFCd3%Xdb<&0_JghCz3E+6y+2AFB<>g?7mzotQ5ajJ@MSzE$ zK$RWg-nL=_S2{~^5*BiPR&oyJxY!us=AiPhEJz9!r621jfMZXZuMK%2j${SclHg$> z4#60I2P1JbMmSj;VP$5BrI|6#j`jox`Vt);NJM}q!8$vl<@2&5+_f4@tgu6L8VeS+~iZhM!qB33_YI+32$}^eL*CHL~WP$t*JbB`%bTaQs=kXC& zE`Q0jYgf5?^%_^dxxv-zH@Pl~?#_cBxOML#cfS9bN5A}&`#=1Hhd=(w-S2 z!B7J$^!3$Hp)ICzpBW2$84R@%0CT{|%msZhJNIBx(@QBXsG}smj`-*-N(<{rh{+_t zD-36ASKO^!upeqlNMvrFA*g?ismM~bH!;IQI7PS|!pe={F?KvzRQO5Rp-5gps zo(&6zGksJg4Mnk(q=gCK_!8>oLcFgZG2Z@oSvz58Y>B0jnlxdKy@i}XHb)igdjAC+!0B2V&6FLeR(U3=LSvpm@v6N>=(o&W}XQfzq0woDPB6dP(k>^L& z<}zh?J!3no7~NVSfK|iDmNIJd6UY^zk{;_rVMaLR1tKi7B51A4rnj?91FSJ4nwT`Y zR~rIcG;=Cr`@89BZDjg{L1u`J=^I98V~zZ8HU*g}6l5e*F90@e+yH9@Ugpl8!SEgd z^N}N1yl6hNW=v!HlnG3qJf8UiJS!H>Wwn6Sw)HF6v0(+XCXS}9RwdA9(=Wnl)q?Tt z-Z-DNizl*uttu8hff1eZ9r_yt@)`sL^BLA$Ea#ldwDH5(yKN~a4sYey!5somOW3z% z6HgsJBu;$`yEd%Va;?Vn4%2F6sfzHbI{b?HGugaqvB1>}T>S7|K0EggZ@%_2ufO^- zr=Ne8Cmuh-8G(#%Z+yc)fBzM=!16zS`vp~$^X_-IWdB#L3pgD}G%q(V3pT^4N%h|GV6U&z_m20$sEt@uQ zH`<*P5RXhVb_y!SpIe()jZ-ur-$-jlxa-1~eg zz@~->-+S=|K6~R0z7iOG?Zh!o?cc*|#~$OUU0ZqS*dacB{R}TXb(oLdc!8T=UlMq{ z#~*S{{`mP3zkh#+zkayQe;$1!p!GTby#FcxxceU8U3!`eZ*AwbQ;T@wz#!{Z)G#d(l|j!!$dD?2HmLWbE0IJJ*A!YRLTF>bl6kTWI=J2F$E>2WapTX znrupJj0xdkrUV9vPxLjzTg=PH1TQ}kNWKQRcnuM_8iJeeP(1w&@Cwq$FKh_G(L;$$ zHX%OKoP-<;67$SR$}=KP&M!7okF=7Z6xN$j+G0-aFgsdCI@2=BhW0U5bWO6QbFwX6 zGwkURxawKrLiaLfhAnrYQ>w42a7%!0uYc1#}Y%*=`I za#Q=Vd~P6%WIJ=T7h~HzMP)frmSQIk)|NnVqHe}2pTt#QN{7=BFWmI~aUU9hZm2Ih z18>|7Y#sP9UqvOS9G?pSJi<0xtiCad0y>^cF3 zY7dI5J*lYnrLxM8@)B# zk@&iYNQWJOpYz{C5kYo#gh{6zCki=PI{i$4AF2}L8ChM;^zL4kjGe&jk^Qun=8+o{ zBI;i)KVyx%rKtd#DIPYKxZ7IbW@C=dL8#kVUUM+RMOAWlwZGA&E%R0uq61C5$r@pfIT@u4y1Woi_*3vL5>qC5aw@ZjH|hU zsB;tfEL;4&M8$inT39yNni^nhqKBV=TUvxbPkJ~N0#P}k(pbV>s7dsvAvKhSv`A`G zBL%Di#bJ90EIHw0BZ|S+9yfC<`A)Vt$bDdCpuUedcY7xsEUj=Ah;*?w!_mSBS1Ti; zyc|dkawR>`nVb+e`3xOR@|k1m^O!iSjwLh3@$9kvy#M-X=`7#m%g@en<&vm0>7XuM zy2uv-T$e6i;l_9OxUKTCzWm{V-q<(t%wuTD|m874R3E4cnBalIuY&XPpF3{{w`|y8+UvJumS|M0@ZVF@>t-@!%@|EbC9;>DEFkj2pC%t zD~?*36San-PA&K?0!PidbeHd=h6Wwvx3N>;Xl#MKiG?;x)6v`#CsRXQ%|!KD3?anQ zfD92OO|gNDFOFk*X9hdR=koa6LY`S(#+fzMoE8UkY*raNM`yFVCy}|sbyAW3*sr4_Ewi2O=n38{i+hYfK^{p86#UOnb_OJ zT1hlFcH>#Uy0#mc5Ok~Qq(JY)Z zgO!Wtvt<5kW>1|c5H_6Q9WAss)=^VdL`9JZ#LiY0%$hE3Wd+-}Zf5t+?QB@HQp;&t zE}*qx)l#;uTOm$%AuATlWa*qK;wMyns?h>-!l09$%VT@jaq7fjjvv^^(Y^aPwtv5D_ppD< zW@e5b%XAS5YvlaZQakUwe3~P>x3YWFYF>H%IOpCt%cb+@_~POPKK|$<-g{T&Ilai+ zZ@s0JGWhGyKl$^Y|K!g<|H;ok|HPd;-^p`-@#`=9Cd6_e(Utr(f{p{VN$}jBK?C{|u96WG{RV!AqZ1Lj%n}xWEt(!J# z!-%SkgIcFhCC957iE7wUm3UAkAXI@;RUuyGd%Y>9aq*ZKMWRet|T;Oa+Jc=Qwg zxPOixZ@$bI@9p92@wq&;e=K`9v@vUJ5W=i4n;KdSq1!U^N+#*J419Sb3i<4HLZN|Gg;f)5)>i z$!l|!<2sVpU_*YL8Tr*i$t~3bHXy*?jDSE(eEm)F z@i);TS)Ks~cn2EbA8JTYgaN@(`b5MJB|hDhWPz(>1y= zZ5Xqr$CjoMj&E zQmIFAnI7d;deqkF)6`%{YqK#OEhhBI1sfdc$iQ$rdYWu#F1D2p*IZPsu|R{Iyf{rq z12dcqoN!ctrSGHVCh3d=@HF-n^Tf;4L$*3x4Ls4A2H;^9EKnu@V;m-c6)AwFX5@v7 zg$jfP3y=lkVd#g4z?r9k2i|601Ud!^Y($bA8Bb1fGDSHllt~9vRg*$}YYKJkiBz;i zP}Lc&K~_;qD4DgsWLA3%HM^5p=tg$FJK5Pf(ool zF{{6q&e8%hM0tmM*h@#Qa;?k+bc{qj8>6#PaHUEoSV-qDTY)QAdx0lM6Fl842=ufi z%+pcJI|&x27UXU#PD4en^fkB&^|m6!L&!VOfjqV3NThtX5NB;|w@7;fVx3J%@vhS{TY!f5s;pTvkwVb=E4Q5~@4#^BFeWU+#NH$m-Sz&HyiG_fHt%W=`wZPfNMx33U ze2yBjG$&HdE5Xl^j35_sgI%bM@uox6dQW)<(?+#(VB1Qbdtwi-pLvq^UOOWS?G4^} z`*kg(e*Uvh1+uPa!-hBR+~@0Cce(S!uiXFP5ANKP?f1WHMMrNx_?geY`bL{Yc;U(w z&I@E6I&z4`3+6M}+e3S0B?a;Ev=G|!vv~~;!rnR!Nuac$x z^^Bwt;utN(F#^h4(yOrTdRzHZStTL)lg;!bLK2Bj$l{J$!8q$PII#A;p6!O)0D$ zU(Le4Li)<&Hx|gMN^qkrM%7RArXn#w8xkyt4Iwiukj&6v(t?9X4)7;EIFRh{AhJUJ zN%i;ChN2@qgG$;Boy*_~z!xPyw7{|ex={&o&T=x4&<2&POJ$DSQKc=Cy(+B$uQ_w8n* zfY!QYi#f1!Ge-}o(hD0|GG`J4J!-k0F6K`k!;)EJ1*)d7d(8}XuAIX91!Gt>dlW0@ z3^03qJHr}ssLGF_xiX#97$2G{GuW|l9-G(9V*TU6yLpSi)&X{J z+`xiqa;&9`1*q0?{NNtmI{Q2?J$sy|j_l>wf$hBU!qa^E;oDl&`y&A?WgmR-0iS;I zsle1-{wZ+vuYdiEKmPcGU$yGzj|8^<)SiFuz4v(IjW@XP`6aGi{f4`DAMn#pzwq$k z4_uSSAAa~TFTVH^&p&^fmtJ~B-uo7Bzx^(!PMu-<_MI$Rpw?$x&Ncz7&Fj}{pmp-Z zac%8FHLR%WXer22h?xsPeOvso91P?%(C^o!i>d zI;t?KihzB4^SWm0@w;zJ->8-Y5`elauPOWDqjP-z!F$^Fx*X%~wXb<_;~T!0|52u1 zyCfiX{maj}{mo?_+?LmGf5k7~-{AKjzTwf`&-vlbC;a)-1^)5ieXgB9#+%PCXZwm4 z#t)M=T;onAiY?h%rbp)YYfS*Gayr}u*;f?kvfWpdyvT55I7CbUt6Tw=u>h6@HN$Ob8Rf*VF%FEJ2y3 zNnY!egZK5>(bsJ&io=H5JZlQ#%t;nFiPWiCHzopDqW+AWaWK%yzL#`XJ_1r=W*&H$ zyW(l?BwJ^B&K)mHKY=m?}@Q(d6aCQd}BKWnCOqO>vYqgj3cMNm;w7xyBIEt9?nY@FuN5 zM@qIE$!V^{$GH#@>L9>miKnXx9!`en1X!E}t{kn6q$73^XXS&tb0|8ONIZ0*0$F|n zS>A*Nc#;_AL2jHE6)B>`a{}oq3}aYP1QVNcSTLfI`J&!t^t909a64l> zRDP9%B_6is0y-A5wL)iaC62&WI(=*0~2ns08o^l9r1zoq=&eWE|1k>x=9|E8eGMwBBgdF_{n!t5bUm&$T3qHD~#NTC=oFVu7b%84wSG59xy#wxc_8M3PdwS#T;woUI#F9KO@3FTu*P^wm7F>vzlV*`V?gY4~`g1nY zf2n#{0*;olZ)%0LTyuN5j}>4!%5P?8X(Y!`iSQzDY)lAuvnSfsiewK{YU3RlS>(=) zCU4daM6qv1I!ERf@Wg^jPAqERhyeJmnI&wVR>ac5Y!P9}jA>71M02WuQz|uu(G+Kd zl9wDvo&wJFFzN)BT8oouuSln-p^(90HH>Jhpsgy0_UatE8;cp%RIKGOwbkd-QkzRh zV-W+r4U8Gt%!uwfM)ou^s<)j*b7yL%U~2xXX{=a0UjwS$sy5P|9UMDyfYT?R`fBFSp2gCIe;3|Vpz!7^FY(&hGaNm%mmLanj_l#oQ;)G{>l$WH9$?nQ zQ7oQ4fu&*_mIz?2oyE3g6Ir)lkTt4^?W_^Zo!Ci#TagHhByGlHWqv&UofWKGK9#NO z=CF0`TuvO?&5>QZSUhtUYZosOz}nCDwQJ<@bhfCd+PalIwr4x9JpZJ&?9R!f2ebQ@nWUc|QO26TbT5k~Zs5%|=v(N`L(Rfi@fQ+Lg=NYpUeK7oUH|yYIZospp^7 za<)|NmNHc*ODzhmK`zxMws!C$&?o-3c9=e&r|_vH6^`|Jx^C3gi; zx2}B2&v(D$;kP%qCZD0;>b6>*N5D%1u5Yh%^Q%kT`sNEB-nqtakG>Vay3SAczvR~k zpYzwxpY!AGH#zs}R`zY|W1uaZtY|~>;`HPmHKHKXnDqQ10w#JSmkc4d(Ui(wdjTqG z--E8yj&-MDf+vmRyl5QsqItlFhF(uf8tlka)!2)S$tpDyKrq?EmRRKL-`&C0#=5kmJ0yZ45g^m zjLKn_)C&W3jCQ7HoFk*Ah)s2*f0`X5=Q=TRp)=hJ?C4x%OV=_d+Lt=fvc!?rr3$b* ztSN1`q+IB;x?N?;ThTbonijFvPAi7>+R-oMHhj1}oz0Fkm$^}r>L3c-nn*8!1aaJU z24>hB*<)wuf`gF`7c)=X%-wObaK_EjUhA|x)%0jP9l_24gt&;>b`*08BhWble}@PH z#C%jK1S5A`{MF)|Y7D7RxKvk7MRec0S zbs-crhEmiNPHvqj;R;_;O1*?oJxI!QCssQ4@Gutwd>ru<1?p;VsV!>iCgx~uCJsYB z$Jq^UU4U2^KAur{dIsR>BmYa=q?gClGEfHGCfsF-7JCXiqy*Vw}sM{!U# z7M22C#sYLhu$SZL1pHK``dB%~)Bq<6BfO|e3Z^18g0{+BrVMn7S{*H&=>k#88>RC< z#>uC}PCmt1>FD2j=Upy-b(O2PRK2V3xPJE`kAC@spMLpM0P7Ea{Po{_bNhSFUl!H( z#T7pO;!EE9_+yr@T+Vuxh`MeKv&Ib4Utdp0u{eHF`ZMd3S=XDxdR2kDA%<0*DeN3u z$mS900$8!^7)W9LuqdV#*|V-Am_rlN*fAoSEqyU;8JWi8vuimryPnNsOISU!h=twx zjIB62XNjc%mV>3EfU1@3 zTL=)FVP^2Z_5W1$bomYCTAK}3WfElHNcK&vgRIGL%La&DBT zWih$CgB248IJRjqA3nFAFU}s|^4Z;7e101rJh7Tr_b=k)#%Ua1J5eBPfUN>pbNVV7 zQJq3_uC%d49|~gJ$%)dD6yiyUk0-89&X}25YLPZW0clHPLmW-jaz6S*IvSAWZNgw# zGz)tQnbVoa*xF=9md8<>tkWh+RK$Bw5aCWyl%D`q07<@H#EX#0i;AW+A(;ekU*bJf zE?^jGzP<$6s+w+|B!@?8LxU>MN+(d|V5ZuWv>^dL1y}*@9@aOwm6&H;v}H!Z0(>GbhWp$z|p}J zCuzq{)*_fhZ1~H49PVr=?ahReNFy4PO&BP(V?kE{TPCJ*cy1ZT7uR!aaUJ^vu+|Qy z%XNrlQd2Zz8siz&luS>33KiKQlxBufo)bZd+%FZG;Z)~FN*|L#Ut=Bvogx6nv@@Z< zUf?R5#?n+e1gLsiN*K{qMMpysRmCav2!Kr+-^=K}7KV3JGj)6)OBV>_Y+TFgWs6w1 zaw%KZuV&xQtvq&Mj|NsJPaNghC!gT&npTH-sJ8!(oTd%&v)5jhYz}m-iPd?6xBYWArdXd1@B<4;TWYLVVY+N>t9jm9aedQFk zu9(cGrIT1ae^9&-VfeE6^k zvKP2{^Cmz4{Ih1i|Ngrcy;4y1*I$3}?|=VWdrm=@f-LpgKmYj`zx?u>23HRrJk$W| z-n|EWrzYA+fAfX(CFjpy;4A5WzWnlv7THpftasjihfmJ^U6}OZ`A;=?`r*NS9^Sjl zcQ&CUOxheqm`k7OlK5>j!1=18~T@~6rjn>vmhjSp9WY%@bfbuDA-tAIwvGV!IoMGT92q$X{*_`BO14 zo0{V(9jw2r1ED_7M2EVOlITfpt{=r^L6p`6Q&zaQaknGJ&y7$yj#|_?SZ6H` z+L}N&A$m_s;sPABA;RIM}6>r3^wn349Y2;8OBZwO;C?3d z4yU)Sj@E))Ek9~{Ln^C#^H|)L!mPR|Hjc_?+n7QDE&-`AIqaX9$Lfv<=2d&Lsx^@P z<5D;bQ&`P$cbqk6-@!>BR1M!c;^Lr6GnflR846$toQheft-diPdWK>K@_U+M zZD}djT!2=f%+oyp2OAw0CIZF2k@&j#3yiCk3pyFo-pquKdY;;`it{fV;r#Qv`Re68 zTowEB#hqL@wUu{{uHv7}-@|J6+a{c0oaq}S3$(8)D2nq$NV%&6ux$8t+1QMW<+2!*U zV5tS16<|48THz@B&H_qm>uPH+0?JOVqrjhuk+egBB2Vc%f_?B3QKKeeL`(aQ_VvNv z$w^EEosG0vF`WnlS1SiOjvQB2%(u6}$<+=wHvu*q3!JP>@e)B4=wL*o>rfIr^vLwn zqcVC3BMJqwIz!kzK9fDuOL%%&6DP%XPReIVcOuhTVi;W=NmpqYO$DJ8r}|KqAp#>i zf|~podK+@-Ysg`ET_&U3ikQ$>Bch{`F~h6qZ!Km-dnqG2%IIn;)S_2SB1$R>Qm8IY zr@6L}x{6!@t}Hs5Dw#QDJR4Ro*WgNRcW&9B!IgrkCl2q|AnW*J2idr01>;AL(8`#o zJS~-LHE#3(Esag2CMFRPB4RcthlZMJ#*P}nlKFGA_pMqoPoQWO+tx2<`GRT8m@txg zQwG?$T;OW;40f%b!M@FN*}HMB2&QSwp3uRhQ4Lzv`HI3gE$MyR+BuwgW-m`2-NLqY z^8}tYa_Ynr?AoxNt*ck@%wtEioUCK}_i{qo?vC|q*t=ytubn>0u8nFLo*lo23?o{^8O0 zS~N?+m9lTY6=?hBDp$U^$Y%mzFF$vZH(xl-XYakk#{y_ysT?c;t2YF^UXj;SUX}u@ zZ!dqrZx8MXIDN-g0&w4cb%{GSzT)~<7r1iqeQtjBA;0|~fc4k6{P&Nq`1jAB@{jv( za`lsgoO*mJN49jad`33y6?SrunNwV0L4Ktfxm5xswX$upqNLlA>H!aGMte~^+MBuo zKN?5*(=sZE*1jNWT0~ft*^`lFMSPM8QAvixrkN6%W=2q=G5!f=8d!OTS>YZie$r0_ zjlTd?@KA!o^$Cp>sEX1fI8u)w0j%JNp#+8sT>0za?W2c(fV@xON-h2z7G^+Lgb2Q< zAta?3Qy>DlNWdz;$d-Zhn90Y}=#IMXr4 zk?wJh^iHs+f08Y|ldKs&#fttp_VmuRr(>2i9dqsISm;2@0(+X&cCi3fWxp%c{m#^> z1g>9TpY=4v4zWGSh_Nf`HLc%IW3CG1M&1UhEbjGLq@C%f#QUnrL%FA zPTt;3l%AQ&S#XsO&qWlaz>+|by8s`MhIL`Qs{bf_sdgvD6nA0)8mX)BJw0cQs{abjM$I(o^zD~|RKxH>!GE6O}L z$dBM)KSDx%w->0g!`D&O^HF6F>h~zP$1cd-fh= z^3-XJ8a;~mgcvPiW@4a+wXr@P_Qr$?G-|_=QNE;vhyzayr7|yxx{?gW^tLi@+BgpE z-om~;+j;EBL7sZ*Nlu-4fe$`8&!wx^xpMP1-+up-HjD7?{oi@;@DHxvdZ+=`jl17- z>DqM-u)g^ED$7I-&z?SwU7I!tVD-{bRz__`D(yLOjID@cN_7-dt0S1-n#Ah999DLx zv8E@5UE>SbKA6SI_DGgB2eYWohb?`v9GhOib4%+vxvGP8WAj-(DDRumz`99wOzp^_ zE-RQgUmN)i%mj)Z2oLZjBRQ4kh88L-8>p=nXES*b%a(5^IjIn9D>vL+e6bP7?qDw> zz}XdR3kv}z1;=8Bs_>{>KYatutV{)*rEPf#bU4cUO^q%7jvTuC5iP%go1Ghx{=tmt z>S9z&9W(nodF!bITz=zeE}cEdqfejV$IqVT!AHmW?!6;?d3Fb%KD&|E4=v@HO;dSV z?AdKId1mVjw#@EgpeByWBoA`q{fG?l$H`d%mH>=e09x)Tb&pvORf*vGq>2D23w59) z#hcN^G0g2KVr+FfZCN4IiZH59@SsWoRsGk#V)&ggiDvBljnwYJ%K$}3AyUq;{ z5olg63c^frHrA6qOBA((9;se>l!^mrPBLX=o&z%*0$Djckz;dfczNwGo>|<;o+$#= z-Kq2z`U_k-k{|6vVS*>cDgLBK=_p7JV9c;GCiT}asAZfywQ8d+7$~_aLMe$W0s<+Nn`bT@Y|E#byx5U!S0#mvFj0B?9kiU;y zhsY55-7=GDZ>nbesNu|=Hi3E5$7xZlwM(W5EX@!=n##Vd^Ek4589O%2X3gU9ESN69 zKC(f;FrCVRcvdf($lI?R=kT6YtX(!u%cpwn)UzDkxm`eQ83(s*(e|G@e2{04J|^I` zj-4CUa^lcl4s74V(~lkComb9qUf}B7J8$yxOJ{lFi6=OF_AK9h_nih)Dng~ARmv1_ z{rA8B)t*;yrA$5k>#u+D>#x6SrlMF1sP5dk$E8bOYEdecgZ0ca&q^Qkj2!6;!u#)PQ1$56O>Kzq8v(a_ay%8ix+eQyeJ=gzr{`q*5qGX%;^zn7@aNCh`PVOB z^Pk_o;NQPp;Ky4p^WiJ&*}k%w*<+HJ)Emmk79AtHeCX^JfmCZndXWKXrH14;SW`O8 zh4Nkmh=*Y^lBQDm2h-gEi1++rq4DkxpLluKQpcPI&!WJF|xOo}i8)zi3WJFY~Y-0@w6fxr;p+`XYQ2auM;1!^Um#-dvf%*gl zi3JFJ$@38cWN~qNq-PjWRBWwTexU`0Wj2)7I#Sl`L`k!NSBniL9X6Er*itdvTA0X| zwsB5$O>m}rf)o9dT^OOFR|>8snhRiA(m%_V?wMA!PBo!*hB?i%tZ0zOb+c?}oacyM z!)P};#(UB?)rawOgP5~2ilrM9Sg|3Am165Rr?Pch1{*h~vTR8*(hKfi;%Q=rhml-RQz1|rQzD#932_!Gb(f3ft52+uX>yDSIhod!m%CC^ z??Gj~8|C%x@?(qQt#&4>(uriD)!0lc!V^pgj4;MaNZ8fgTslgD14nmUoxH`N`QYlJ z!_`$fO;s5^R8(`cH<2-3M8$a!mFP-rsv{}cLb|0+)Ym&PywgRTzl+>#-mI7w!D4}| zIsH)#)`e1^>OsDMPMjL%vo*%v&Ja`SfGsVD;^<(Gv%TCnj<$G9=dJ+D-CDp#Iy6sf zxsDdbcv=|~L5xYE-%7QiyY+|&Ylanf#%_V}trS_K5W zY>h<88_53)U@dntSy~!u=Bvw)R7@WEHE&zA}T6{ zgNIM>-Myc9@#Qy|Fl8FG4fTYEgqXAY$aw5YDQ|T)Vr!&KY z$u-ez7*ocQVHwP6j1kC6W#foUR&>U(tSyqI&7sUHa~IGG`FSi+R{Osxh~MOuIc!oH5?BnUv{XQtEA)WYzdk))*MvS`^xHf}${;KccK4WEF{ zD-?4JN33j}u(Y-lXD)Ey;*700Ju_2dtuU;Kia45@$+fq}!Nw8`Gr6Z-oJ1Tr%XKxx z-Nl0-{}A~--3bp2p|!e(s=REvs!Q3sdJ%6ueTYl1Kgs>iPV?)fQvz5|arcAA`R4V# zeD>T1K7M))ZysLA(;LQfcxfL877yp~l>_XY)4}YnOh#2i(_WfHW^xQ}Zmw7e0GgXy zVI;6_B!J~;Jd^-S14_f(X;1c}HQs|UC2=h1E@PlPnU0(=ZQf!`6-AE6#Bw2)CUYr*h(HK3$FFwAK{UTwP38a}5(m z_p*4-G%e>!%@|y^a4w7I&0_0@HLPE?jA;|cYO@71QW6BHYz5Yhur@Qo%y0-pl^7F2 zrP2t7=nd8S4J%7)xdzr6Wa-Q6L&Qw!9Ho!()j3gJlBbn;STuVwtCvn;^O|X_ zSu$3@Y9^2ETfue#tUX)jvwF#R26`%Kug|5iBAt~B$8+kbJ@WpUtX(>dXCB|fi%%ct z(2i{aSWC5`K~)n=m2FUwtjBim)VBKs6jcQ4fIy?FfpzusPxgxxh?>8UVuyu z52^^Ciu9d){D@q`C$*fbU+&$}AWP+Bsr{esh$#K?3%>eHKJSBfx$yA^e0xwF#X(_R$t=^HwW@oCK9VxA| zC9lSc9I@O6I||xdDD4sv)#FFa@Ib1&{HbpBrK-`3@)~!F02V>%zDr%m&2l6u&Yaj7 zW8#y|2ou2a2-3qXK*V>j_(N$Mj$YzJUBy?3@AnBX6yI!0T%s9~iN*xR48=cEU#q_E z8>~t&2w?dMUdOr-bs!O%y3~~ngb&zThceljNU2cjF@3f_cRMyCK}N^ z#h8YvrqoO}p?Zo1^)v0!YwmZTZ=#OL3xb%xDvA{wGT*s(j4J^OMvc(8zd zd-K?|K9hN~;~6s$Mpv6Z6{R}jqOI_9H_xB>jR}Bex6X)SWvTqR4+5)Y1_{snJm>A+G9d)Rsk#t%%(g~|XZetSV zVx>pvlb<3NN-kD$zA<&R_H=aVXd9-Zsl%1pR%goU9mp=TBtFN0aJjI-F>`HEo4;f)@B>CHs;Ac&Ym$_VHW1>7P377YK+M3}gou<8osVGo2{ojb8Lru^Z zu+kUMGLZeD`UaS(1#eX(&DlX5*566_{?2A{UPEycXQ>m&bu>4`TGcv|^R!o;tpJv} zsf{>(do0YIwHi&PrUG0xI;^eTNKVaT`m9AvojH%Bv@9IN3EDZR_n63e4Ea0JUd%0k&;g&4K;9IQsZu&Wh4gLxeBC@fL4=@DZ1; z-Qdm-zi{KbN8G;q3%~vGA8oBd1z2i^;l-=hdF}o8Idbfn{O?%~?%gfwd4v|hQaM;H z`MI>`CDWO$3ZS|$zB-yUgQd)AO=fmuEbB()v0-GcfL1cAhsCqBHJpVFLCh}qV0lw8 zJI7{oczz>Wrc|#!RNLxSF+_ z_woAsml!=|A#+!37IDyqlghpNN`3?2udYP(4o&~zRy*=^w z@xabXReKjlCnCnq%32`85l=TyfvaFTo0{lrYauf>f(bn>Ja=##ADn)iuitx)JD)tq z^|uai_0^r+e)kYJ-`d9)FKiabTFd)SEauGvb9s69bY9&%ofo%HnBIg)lZAiAaHLU^--Kl;|s7PY9ioO)b)0!DVWt@)U zNKv`5-sA+kkrC)cPFOIRA)%xOhW#Dk_6XG0>`W85$`ZKB4~r%{G=ea954l&|2z1kF zTOVf^yalQhV7c4cp|cl(Ci`lL&{jayL*PlxBDAx#z)sqot*x{*5j4IY^1rSsA>NZv z5qqkJR9T*)Re6<|$v zj0ZXK-V~+;Ql9nyG4gp&Ol_b!*VHRELis;v+iUD0~=v*gYRg+1} z2H6^5H7ZxdMYFVu>Ft|WYwHfSuC1bV(;7Ootfgn?wpxTrCAbeC)K8Ns<40@T%927t z{JlkI+hC;}9?`ctacFJKO~e^9(bg0cU9{R2~KnDCz&g#YjHFTebXt5ijZVzM~jUeezJwXK`96HjS-?$Y*LU8SAr-SCn&85R;oYNGV{^fdbS?xiKV zt0g*Yl@OxN+mEHbkh3k{&;HM9aleV5Oi-4HK&5 z_RrtG(URR&e%8Z>_j&U09uIEc;EJmIwQC1^H*eJ7>gevB+VG&ty;9LC6}3`Bgg<=o zId25ip52ji-usm2_dn%JIq%o+-tg|_Bi=r{&9`4X;IE%w@ZVpb@XPy4Jion#gPVtH zfYr5yKkXU_ROQ=HC$_h$T1Q1gCkiXYCKX$dR?m{G`Z{vuz5K=oikf&**36&kx)7?G z1yWq$MrMv3=>k+)sdl6#T9T3^-^;KeAw?YSC{qC|6M-yqJc6w-_={ciGSP~K`UM*k zDKHhAXiikJIT0yVgeO~R#X^Ik%xzl`rflecRXfr?ntG^>%hX_~=ai(*B z0jqu%bRA?P$8707!k+FU9q2UNo>qgcX(4dccz`912HMhexGP3=9UbY?*PX#*0+}!^ zk~s^K1gx@Izp;=_o5VxkT*i*A6>Q#6#){>|%$SkG;K8v%^nui`bR#KQ755T)^s*Gi zWsX50!b3boFBb=VL{t&YV1t^PZcl=ML7KOY3}0t5{M|_Nbtl!wgA|b@PdB1mbcEWP z5iT&2EPzrJtfw|PP`ut?TGUe&*R5$*W=qQ&9W5K#(X!T#j_nO}@9s^ft{$}O=uS&{ z-K5TivIe$f7OPnYMntC=5iS%P9Bm?hfDQh^F8BtD0`>F4C&-7;NPnW@eThx+Bq806 zlx)3#l^b~_9^}Y%^8~I+str`sI8$9M53Z379b383Td23Mz+LyI-qe-2QJ(Bbc9@Ph zFI)U{0-a74=-osyIhqJuSxJY~;b3nio}#5Fd>fo?)yzJXEG`X6z{N|9XrMfUFn32S z4@*rc4-&u(@eE*UB{f{OiB@<&S^!?9F>_J$x#V^(8kR zJYeU(eQemYfwKZxeY$j_XX{oB?b3zzRh87{B+^mDs#mY&Cg@Cx3ytWmH(2Jt?^!CPE06{0th>9FJ$g}lyalu`HCn_YA_H}gvsCDEB zTs17tX3n@loY}L6x1XNj#}}XQ^}WNqySiDxY7Jjr-Nc7$n|X77B~OkoS`L-Ra%FDLLsW_;|QsZ)J&%sVPn-0?a1HgjyR> zAbp^5q=9C!ZuF~2W?H8j+84!9C-BfXBZLNNAyg&@lN07elAk+iK>_4N$4a}7m-ZYk z$5doHlqfeJal%xDHdf#(Tp-9x1FMfo?j9OoDY*J4g5@rvAj@5zpBg4qGYA8`<$38{ z#ktW5VAn>bd>vr-YQ z7EKzEo)m|d!CBzS7+V1)H)jV;6f{|x3phzzQ@MRM*3!q#?XVP`VPz?8Q9#Sm)Iy77 zsr*e9?Xor#=TUTwljtu$PXoE~J9>3%#+>N`Su%e(D+RFDubeFXcp|Hpj%VePaoVEP z3a(U<)PX%(2tbY00BhC43GCatlARk?$o_1BtL2>gZuY*_3D*2D^NwS6iEF8P^vtGf-418YPe7V)<3YNW*jbF zyp*0j`%qO?Lu6z$q0;7}qGE`Rjn`fW2Zw5rD+OEu0YTbp70rr@iXtj396y2isIX9q z^YgStpw&bHRi@$P(~pZp|5K(xCAUAicb6NVsO0!-e0uw)22^V4ob%_-Xfq1c`h_a- zUAQy<4ofK!G$eM(F3*z!DNv zp|XL%Rx^RD)=pHnccNi8S8DqjL|g=vTlVb~ToUQqAhs70h2$!q8F4wCfZ^WwiiSsx`r(LXH9Ecp8K}UCr<{ z2q?IzS#a{6UT#=#PPD<4Bo6^6HxrU%Kh@KUbX5#fdH61tB)Z!YZ?GgGGnFbPbTu8}tBD2Vu zyfW1=Tqvrq7fN@NKSaQ-!iM_gmZHe)Xy448u66#jsBot!(Uzn@3!=S6;kw)7>trji zDN0A~&ECNjXMq&G-T`L^Tg~g!6p{ihYb$N34ma^6{l&8mbyq`sDvBfuT+JjDP*St| z{9NRkJL?PPoNkfm9rJB0L#S!KW9rK{9Fk2c0sSEsoPp; zLoJSWcG%n4%lQP>Oyzv$|EoN1?u@;iJJwcu*>}<^kXu+eVrFTNm7OSWdw~Y|4d$Z2 z9d&j%%RTF4-$4|$r-L0a0Rq~gUSimssVqpLYnvwY6+^Ld$y~N`#Eyth?awO z<<@P1sL#3k>?IFge8$&5{Lc43{-u>^_~Dnoc>TrK+1m-Jg-mWh>C5?&CD3)zCp1~2=yL(_KaA0CB|CNo1wGBpA z3b-7wws(=|YLA79nEl3#hF>OZB|QOx_HzH5sV+$jooYKa^>hI zzPNve@1CCHhlfY``N4j^xwS%50wYyiG{{5XNS>D})1bgg0hW(|rl)%Cta1e%rN61+aOrCBnKVwgkz4kRXmzt~f|lL9TR24Pb186t)a*$kEBII555iTZh-Oq>ox0x`IJ< z1vD>;p*S&s^au}X)R13}nn@T#`^H7|Zd1k39?j|1PGGBjwN|84%_JPywU!?3s>R7H zq(ONGRrS)SC`_fiAeEZ(Y+5ufqfOH)S~jVqZCw*Bd0j=ZR5Yt|yH*0m$y)SEr79>W zvNki(^5-lBj+I{W)H@5jy9toG2vn(lt_t7UU~MgqldXf6R-htTHkKbJ$yiGtR%Kt* zEK+s80B=1F>Sr@!YCl#i8_S9%qglOdJgWq*Hmsh?;&~&OKVzsk0cDis#WJFQI}NVp zPaDdb#go~%LgiPjWUci374v7Xd(#@Nu&64{uxHacEooZ;m5R!oIL&*R(9dThY$Sl!w(un{S&oPP^F?;D%o8fQ=*Q&efyS$3l~zqezgWw|G-mH zQi>)Dt|B6$2n&nQ082%!4AL%D6wAd$kDHqt;UOVJhlgo^tTh94dzNeDhzj*#sduA#Z>zA+I^RFMj=g%L$l&k^Jte209uxvZHZR9kp$osqN@Wqpkv1 zeGJqM_N3KN4_Xd&rQ>LKS`V(POA3 z9r~NlvX2Q(x*Jj5#fX~jW*9YUD%921nyx*Z88FzJ2~#2kqB2;urigXx%Gj`>iVf?k zS-!N4x$}ydJiU;9Ly~FJDU8a7UZef#ua7idJTJgjVaXn6r96a-n3BcDq)7?J8>LYk)$86NV(Vs-Sc2xn5;EarAEV18fm zzy~ETbzmqH`ua1fhZjRS%kS;%$dH}}hW2r%Pj^@G7+mPw(S_EnoM_z8j(R0#WC>uU z=9rL}X(oS&C6TEPLcZ=IqO8SpjF4?)swYutdg5~gS_%bP3Kd{EQBbY`%bEO2N6H!) z1ll}k)YzTchVC@0@u04OFLhP^l;;TCgsDVkb5X`t_`2GOl6OGwEQU`f%E#6OorB89 zvcuKM4i^V|Tm@*HY^=r0w!vNYy_|INT_;iOE?T`TZviqtfh4tbPJp|PAb~cO5FR5E z>M8)`B#qI_j&uR6oCtR^!}Y}asPYGbj#cA_LbaAjj{kmHV;*w`4bu@%^|bP&JsjA@i z801;_%JT{Jlza8Cr>0&8y*jqgJn>aa=d*q526pb)!toQwxp3(+*Y14Elh>bVfTd;& z{{Gj0`0bDX@c!#x`TpnM`S9IOJb39@N?Hmxrd|>Y$QR0i3I(#VO`}Spc zr;aqsOQj@MAUVo`(ikT?mPOLPNh%$SgK3-REp0K3@vYMsSsTr$#u3czm`?9}J$>?B z8BprO^p1H<5x5#wo592mg-`w25t22vue`y6j++54s({p)tcnU8LP2%~%i9FpqhU*&!vVUQJ zCiQJgS!yByPSVcJOmP<@s}@C$wYMVQ!-4t%j?{)b(>Wu6?uC&IZu6)x>st!m&-m>QB1 z(Q07{Bq_L3kt`L#QpHd8A{uBZ$Wmn*9CX+V4B1K>bJ6Q@6>$*=^Oyg`$KWCT&zS(d zhn&w}i(G|y`DsIh{;mRH0$_fw9t1l3OTUxfrVo&PIqu|0pxk?)qmDp(d;A4>y^O@U zv5 zqepdO)yna#Tt1GK%f_*K`2>ND*(_T)nx*qb)1hTK4a?Frz?wRyH;ZPDVD+Mj0#%a* zrWUb$-gIV98qL-<%Q(1WlNQa|v~n@W_Usg3xW*fSlq0)#a`ns^?p(gi>xU0{{qz~n zo;>CG^XD31so8@ndiDPf11jiJu%!~)6dtLUy^N=32M zu%U`#sXAFIx}^Z?^vUDw-?!)gYGEnRQn^?Prc^lwm4Br~Nh`t`dHeD|dQ z*ys1P2-d%Tyw9KCUFWOk2YGsPHAlA(X3O%{teR89z|O(+Y#&I|#%^L~?a42;C9BX@ zz(*~hY=nQbut2=%gcvgd1$IIrO+~K>8^|_RU??fplGHdea?@?dNV6g`T7i%WF#=ct zapt&(n&KL0inB0N(hg_I*ug)>Qzjqk=3G zjA|NL(6r8m_5v*3dW*L?*q4!GBbYQjg=sT0nK3($8M6wRFgcf@qcZ3{FqO7lVrfzr zCe-RjN~VtBFcSjh0fY;6#>#$Dgb5k)P;+C9sh4O%{d6;$WLnTHU9Od4PLm{aYE!Ie zl43`L1Z&E}%_$2rr7YZphC-v=OWc{*Hjc%8vROVbhZVyzSve++C8Oh+Gc=S*{d^eJ z&6QzYofy$e&#+!@^y}(E@6N7t>!6p8E8cS>M=DFL$x5!F>$)41FCzA5qMEpcT zNX!c(J}-#a9B*Q>^&}R!kX}zOfaOeDfg`ykdI~D_6xMg9q((1*l>=l2o|Q|p$bS@T3BgQ>eX z{}t+PBL>@%Bsq6-m>1Cj2Eu&x_`8WhwX?!m<=ELfU~l6f9<7szi=0m-Ity5t2t=8? ziic~!(%f0DCGQIqSlc*ZF2={w!WIX6XWU%e7Ohlx=3#rlm zG%U?wNS`iRnTBIW_Oox_Zmx)@rV5STxO11gPo8n-$qNmze)#2I{P^>~`R>QR`1Q}f z`R?c6w54*Mz5RkSSFW)C$Witv1-5^mDC_NP6v&!1pg-e!cBf-?G4)deL{v>CXBwt^ z(WyL&PNflaE(xJ`{RjqBMKHJ~oT+V6SlBI>;q?O@lR`lyVp8mbZQQM#+o!jKZfxks_Ng5i-mU>fF<}I$970o5 zJdMT4G7)8FVM;wuCu;m1s10?bZ&4&Yiz6A*G@H(q$y5qpl_rN#7#}9imlp|v(icO5 z1>~d1j7uglES6wTKf-+ji11YfQ^m<}brjeN5&&`+pmfvnuM}W~ifGH?2wbVzgi2Ht zOXYqlaCa5(QGn$ofM#%U!O7VfC%pj&C)pSA@$|u4K*~+kzS5~-cxihASh0bjMEM12 zpcNvY33B%)$PgfXD@0%~Q1;dEq*`uAftfSD0&{-$avf>wf!5B1TI-0Hew^zko_)9j zU2=VyT9?e4{`FZgup!gBlrgDmHRHNBqLWHi&x)ilIh@Me1ZvB(Wt&9n>LP}9Z^fW4 z&FI&$2|We26j-%uQX<<*I=8H^6&h92rdcUXs&c3(N)kz?QT-fRG%gYySdXR+D+IEd zYr}#C+3CbZh7ua!qve5GnVMiBj+ZLa;^Xf6G4VgpmjHRr8L7#%s%tCIQAcb{iolk; z^j+0|?XVX|N$+C7P4A`wmZLmxTT3f#0)~o|+gceD7NVzn_XaFmHkvhSCJAUwV9m-& z(wFD4e$^B$p}lp}B5Lbr(YH%2V}^8Q+3ZmQRF3_Sy3$n9wG=R#?%%*MI_Vn%BpPoH?Yf-JZxCAYFrG^Go{*~IQ&ngJ3tgNEA zsF<{r6vBc63HFzE6W~u)S}HY_6^tG}Oj|ZbEsb;I%4H3%)a*h9SU1G^Q-F2+$RQ1| z6kMsOm6|Q6@~l+!N`ck8&p*>ZOKrdV_G@j3P{G#AXHR%|=cYD`@XDzZ+7RIhIgf%X zwN*oeT5j5{8+@x0?EjbKuH;)e*O#we@$QAHf%SqH5AX8){&haQxyvtKKjP=FZfkY0 zzWeMb_di+6_ElY3I<1PO(<_)bD2d*kg6Y^Un3`$>g~dA3as-r;1gv6B2~VLWnUzBEdmM@{Ej$3NayGaYCvM>8Vy9Yg#1;d}WBEpJref1= zD5%g;Uf-JPYI7RZTGP0-jz;apq3>i%!(KX?3~-`hKN}G%+Njxq0|c5hz;a^nSSKb- zcVqSfPZlomVc}9=+4?eXkq^^myEAH{BfW=M(z=H+jXN1p)6p2CiYiMQG_j#YTPNCe zG0>sAJ3aab&~KP{@gw3HG&+@`W3uQ!JcG`Cl4;Q{o~kC{lvW2)Q0hx^y0g%y1(6Zv z#7CQvE)S_N&5ZiFmedwo(YDf>ZjJ2eSL?*kW(Gzy^JI8aF9tX9reBR0JuBSlR_aFQ zVplpBInuM-g;CA@nB6s+rTr6GJ2aIIqtjV8E=?dSkp;uTnbOCLQ38Y`Iy*74yDKAl zx-mq4_rR{6^zH0T_x3)tYwkg9l`|EkI!Y>>$*(YwQ{qWxJwK69GD;&!FO4FtES!{j z0mSFKiMWs?aFvjwBO%kC%tC_}#VV|HqPV`R{4pNV*#xYLeJIZIATQaC?064SB0Pu< z_9WEL11~qdC?hBF_?)!bIj+v)B|2D$QWEdZ#vDgG8(c*3JKNa__}E|&*z$5wQ_BU+ z90fLpK zwsOWw4HcT&%5Sh1rL2}O@yA09nzNOyT;B$J6H{C)%mgHDv`X($a?PgoisQCz=yiyNQb02+DL6vdzEsbSDs}g2*Yf9gmB3kEX z2#B;{O8qiI!J`gR|~gpu?)9YC6%pvMueKbfsxoO972M;zLu2 z4^Jg4seppaa!T@QXi(9Lwk>)Kj18i7ZFd?MH>PEIBbwI}1)r2betZ(Sad8wSCs3M^ zNV`TQ%oy35gX`z=@WM{s-95t3FVFMq>$7}wZx8QpZszkVYxr<;J>Q6YesL+!kI&-S zk!d_XJcUPlM)TspI6gZ%iKlypb8kyuKHV~aQ%eUjt!GOC$av!PP6SB%@isCdSd443 zI1$y-R@%gR(I&=?{`I05(I}n4wVAXm2&bVMA}mUi-x@(efG@FPj8Y?`$w?H$mXJnL zbRv-gIDQ_U(ifc2+gjl-PD?_Vue3uogy->p(W`$VSPHOI4wf1!4Dl9da&grF%U;^J zs>1FqfaNKl(Hq=x6ew_V(Q89HdS^ZE0{W^QsxmVM8=V%pQjnzpOKrn_{N)@7e3{b@9T_(Fb2UcLMEPh;$YQ#Y0E7zYUdQDB7pFFrq;e zv%1z}vH;eg7WwqAE1_?zN^0^FNRRXruu7)3ESHk>SelpT(YK9Sg9Rh>l7iDRp5>!Km3ak{E+uDi_bavH-3`Qs<*PQ3W=&x4JwMIW)mLXxm|mG z=kmv;bW|Re+CIH=ix>Cr^7PYBdHLuuUkG5Ss_xGp+~CcVYkd9LErGBb{Py)#etCb2 zJLl)KWLjhTwGS2O4`qtj9Lt7BFk9enTvt6q+vynG!GWQj zT^QNJgVDWx1+@Ic>kFiR=Rmr(_M>fM1z85^=w6gJ^rc=+AcfUo6jVo(*C37@k<98S zlF9;!&J*u7+kvPod-;>CiIG2ACAw!8I*QnlU*b%?Qg_Nqe5fc6q9Q+-vYarA(!)rJ z6ea88x;2Gc0W_aT1ST4G${7ackM( ziP)l5mUq?Rp;sjxR8qOhYqJo=u4WLLrbyDl68Eb&0vosaBF%keW#?nl{N(_*#fR;`x9BM{@vmKEh0$1KnBnez4 z2I$C)^q?>?h$Mlza4#n@Xs-A;>*Rb^TJc5|0kbr-(qYdxh{b9>fHxE{P-JReETz>fAteT{QMVx{0%?-{%?`L z`1Y6IdGYSOy#Jki{w}-4qq}qKCf6@t7EgIK8x}8N+4O1BUdyPLC&_Z9jZIhw6H0K@r5J@ zrx58KO@wzO!R{f1dW91c6i-@omKfFw8kM%7RzRyVr<$^q0W7&T7A48?J6D244}`ng6R#%r zxSLSV--PA~F7yzmu5V2$eQL58*rJS~?VHe|G)w+>>1#=mlxHPSFEvKmN**mra^(L^ zqfxy~8Wg8flo}=NCxEQDAOX@)(qaQh70F5t6$v3Z)}OdYFA`$J@r@U!G0IPVqX(I( z2?AXCWTz()7UYLsoFqHx^R^Zy0s}sj6y{P|luK%23_fx{wxTQ45<^J|sl>*n;^7%6 zP~rT)oXd~1kIhUiv9Ynk%hQX{pb&h$yl}L)lm2fZVuFLtl-#^1CQs_khV|1~yJng| z)>O7^oW;RC%d|xIq5WEF1xJVXZOsUgITQLxpPbC%8KampWdti0Ow}N3;q>v^ayZBK zZRg~n-Q2!%kxQqJbN#|u9t$Wu6o>2mtJl1I@Q}AJUuij5&!0WxryqaPqE-s1l&FQB z)q>9|qV>n`e`rI67cX3(od8vOTDsV=6xr{@_;Hh2yLJ=Xw(X{Wzrkdr=i(;q!OqrE zoMiz}Cj&Zr7aSbqe+~+#qP&V;J^M0h#7IUBA4cPv8X8qslai3A?XwZwL{@bl4w2RXEVKSvKA;^c{AA8TL<3~B2X3M}2eewBv;PpY8k7jHk~ z-DhuPUqI-|W8S=a!IMY#d8$B3p2Me?F9~p-Sz_0sp9%4t!zUFipV#Tn@ddAOi zXTefm0jdyo?1*H~?l|`EPh$VR6n5@NVC}j{rqA(W&?tLhCo>v%5Wp%fHX*mznBppP zDw^3-+Ek|%-KuHrNyFCS)z`UGQR_rS6KBesxKmK+Nm`zGXIVC+WLlDusmdkT)2fLF z-CB4uyi)+v`-QS}L?l}#C$eW|5(j1_a&%rArx)jOW@#a(78i14elB~br?YKR5}U^- zv3f)l3;PAKsDB{K28XbGNGNMYMzUdSEE~s0vu;!<3kP~Lp{p}P+S${$tv&rZIx)DL zfiZo3nJ^%bvAz8n*DFX2i$DDY)Os}cp>-oqnl<&OQHv02nuk%@T)e;LaTJJT)y9x4 zz!h5-Kva<@5!ud!q}dUiXi2m{V``QYsX01IhKOd`u>jbczEzIz+Gbhwl8mYS-u^!TR z18m3&b)+Q0hpKc@-eCe*Dlf~R^0XXqvlUn}F%b{g9BV6EEUk1{sYsS69i5FM)&gDD zqJ$kqA*m%_ER0p5Q&oA~M(*EPfYMD9XHnWNB2HEgBKCNzT3o(jK>a<44D+K!vxbZw zIh08g$Fpe3BKC+PI&(oF>-udjUH_E3Pha!(&%g8iZ-4XQ```HKw}0{b-~Z9z>WAO{ z;OifM;fG&;Vfu81POCrbG2+4F4Ox{WgOdG; z;bPok=u(}?u+}*Yu1#QYg9zq!&1O-LY?k%RWOCC$CN=hDUi%nkwNGS>I3A;$Ml-H1 znMv(bPF4xMtJCOSmCcl19q7}nF>T7r=~&ZPKq{S@+;SS`RZ^K+M2o@()K4iUB`}r{ zG13uUA;bkmi%}26&!7OpM-09%A#VO81;>ydn@(<20=c1KWcsU;2jLV&1d!q9M(^5k zru1&hnkj=hx^5Pa&TQrG(KTG(znFU`R`TTRIzIbkGe6wh!7mTC@WbuZyuZAF*C!_P z^2iuoA0EleeM5M@XAlpz_TlNCp&DR)b#@N#&&=c5!RhRp)q~EJDa3i$<8LcoqP-|a z*+z*Ik>lY&Rgj(*Q66+o3#3m`BqN(*`k())fMtyG@kBicwh8`r8r^MHrjfk z3Z_g%XQ(2@CMIT>NWWLbiY>)Si;s_|va&u2@d+4Q_2N|73!tf-E>p77qL?tcH|y8T zV)aUat4(v*wsjtRb}eMt(ou{V(Uu;aDh05bF|bPmagv*}Kp<<@xV|izJ&w(*=donY zM3&E=%F(@BICXdzM-S}a&UFPTm$`TAI!_)!c}?i-mM$ly?v8w0!p{9T+-m`!jZ$A+_Q_zCk}J=@LsK8 z>#hweICW?TSI)?L2X}J*_yO)-y}-3|$GLIgB=@hKplMU>rMXk(?x#zaFo+W#xY|;IU@$9(x$m5<%M>X6*^K@Pftaq8wDj!!Uoo) z6-m1)b0VdlBe8k5LLNU$X#QMQTpM8{ea8DmLAv?U<|SwSMcat)QY z}HNp zVWBY@`9@@ys+Fy6$gZ*`qe4ipzK;BA2l6Uy$Sg4-qsW-75-ZZ{=}64AA|+oGMzJNO zW!AK6re|=EV5SU?WYy$U_AU^RT3wG@+p4&?ql&v*O1Zzig2#Ir@@P*Z9_(tsoo&@z z-&D@U)kU0Hmd_ypu&tA#*gPSUt&?NeI3b$Nlj7JUvTk%JD~5S7ySF2wI#>%lS<|Df zEj|b}*Cr2QXzo2;+JLGD1LaaQhItw+y0vn{ZmTi>9`1w18tYC3WKU zi$P4P77x5KKqQE0fv2!+J%LHK1jMR!k4%XcqD~MB&&+nwD#llphtjAblB$9b3KBiV z*f|mDr50tj5zpBUPk|4E!3if<1snF_;fcbxwb8t21yrgUw~AOPz*1GXRpPA@2Rqx3 zv+6_{yQ&KBE{?d%xja-Ab%T(*s~J9eW4Vqo;U30BdKwD|nUEE%BO^#hngCX!N`7~g z#^q>5sEdsNlO+Mps*aU0&X%f3r~r|%z?FcGrLlOKCKfn|qEy3jK00$E1j=GPEQu3u zJ2S|Zl2|=8X?`>o;82S`$GAxYHrNs2?yOawcanQiRmIIsEihGU<=HuD zYq^%1ooJ)-zD(r0>a_r!f-Zx2fUe^G=`2;Ezj%d~@;t;a>cyA_2wdgmWzfETOIo#R zPLG~lm^ytbOI9pr)w&HFJa&?+w;yor)5qL?^n$nF{KQv3{mJKF|HwB2RKNZ8Z-K0T z^XtF<=IbARmhE4>e)lD}??2+v^OxMZ|Byq+PqAzNLALGQ$D9R==+nCgO{+^NOcP@! zo_~{iNp!8vqgRtGhSX&A>cVnbE&5y_+mKw-#k-V@kfS3ymT?X`SFlpMpsG){9|W%R+`|OYQhkmJmQ~b}CJCvM7p)B+1X4 zIA3pK0{n;x6_AJyCpI!%JpV9!y?n&kaMhwuejYx=21gJV5=p3sp8%EtZ;=33FX*i*4|NrDjzj-P$0`oV9P@Q%U#~{cG43p zKous>UV&DWw}@F6>HJ%Q|xpZq-LfiTUl%z+K9O_MGRDeKu zD8&L%d69v{`x^)oaEe=wPNy9?$r3qi9f5Nqk%sIy-BvK&_vzC&fj1bn4JXoSDYjd-mcGnwl8PRz;lE z>|-;`#F;Wv`Bhd{7>gcJVrp)Vv3xEyBb}aodWjt<6FBq0*}+kB8dYs9_1ne_>B;() zvstrZIy<&45>J02J9o}!`O*=L8`YNX9ZMO|wTd3Cis;|2jH!d$F?~dLRxX&xM)}^# zg;UwQWEEndBP%9E!L`0A_oeD>LE9z1-&{reAi z^zb1c-hIJuKmE+#fBeaRMIZ2A_|N|!>p$}PFWJ6*^_oTV=1W@(Cs_coSNFagJRrJZ z=P_~0I}sk1jE8#&4mxkq%*+|fSh->|`}ZCe;CsfQ z1BV4DSF>%)cKY<{Lw;@n9olu$UY{4|?%2^296oqNfO9KzX3b%ofY_)J!jc$&Krr z*}sRwJ2r7>#|Cx^U~OM3-`%=`bH{ed_cwEF-)63!J;LcjySN~*b^F3mfwO}=yK|Dy zpIqR>tINE9b)N6uUEt53Z;D*wU%y=Dn|B8}esDDNW;S7H|1>(Z3Z!-OK=Lx}h>I~H zJJ*)7YB!4NJCj*vM@pGE6_pO8RyYt>XhT?ze$)1!9XHruI;!>PQN|L@I$HODdi4?#J z4mTq}V9d+kNGm=Xpz33Vt1?Zd#3oyjnr=mQmIcM7*3>j}ptge(wcQ+O-p7Gf0~~4A z$Bt&bt!dj|7-NJpBd2&Wb)Fw{7WuGzrI)nd0FEAt;N0m%u3XIG;@J$2A4(7~juQtW zhQ*5m88zNOH&vz~FHe;+Fc#_*fU2+}t=yW#Vk?r0ghcDvkWr$D)Rfc$BT@=YNGdQV zSwt1`tQN3p-OiH%{X&^OA%*n|3puu_hAVrU^6*$o-kxjE`wQ*)@@#89oNvQ7mpbzG z#ZG*6p#xu>Y0sOJt$B918ISfi;pVpboL-s7v8CA@U7W?9xykIDmBhAb39K6v%#uNR zX7tiArmG!;I@!~!lLNiGx-z7%FXM)VGD&3ez#yg%j$m@X2*&q{W=zjG26m1W?=6nD z9pY%#E{5t>QPgW1K}LImMo=Oc&}E zcu`#zO8tT$@)O)h4Ra(h&_MvpP872peg?Hrv=eS_&H`AD*ogOPZ6okxZ>v>GSIbMN zB~P4n_F7TMf1+3FxRZQV9n(80xDtcS}*INu^nlfBMGyj``NhJq;zEyAUs%HB%Br46=MqFlK8mir7pb)7%CZ zTL-+|)lx;C6c*<&Z1_ONjUUB`QNx%%Yo@@}I+2ZR*}0cf7q4;a!E+wI`hu5Ve$T7- zKk)M1*L?Zy5B%`!AN=y?U;O!>|7wZuKmYL;AHMsM2T#Sbef>FCZhp!EQE>Z@9B1>6 z-7H$Ml5yk4(7a)pRzq(0Lz;ofj_nHCEros^Kx|6 zB5Hvy?$SpEzyz|qVbRRb^fh z$>HMYd8&GL9>jaP5G61cBo0%M-c~yv;j1UY&miJXoB(!4LKK-vv7{%)im?-5&r zw3sfPJ289q3|6mNrpd$!V<;-j)s_OYu{IZIu+^3XYt*oczI}R7+oXozAU^>J>Cd7k zYz4rqEu;^NlVm3S*-SvoLV)Z4082h+WMo84Yz$qxb)~X?CB8ns=$&1}d9=~0yvKzF zFtBHP)-0RJx>d8-wPOi;c8S5?wU7;KCNh0;H~RIE>vyi8b2GK>ViDu|wqWv*&dixI zn1wS(vtq$ywya&i>ZP;TwsAR^&L88-rBgh8dXF#Pf5xLn_xSP0@A=}(cN$>bym^y{ z0$Ja@|G>X~`&|R9f1pZ5uKtu`3a)6wmJk!m+*u1acjg)^S8gLI zF;6QsV`1*Btx}=jO1(BSbHvI@oI`t065?~1K7BEdAAQF2XYaUp;VQ3Ryyo@GHyTWB z-nfOiv*%Go_*W5a`MPQ&YU>PrE{mb{mCVs-M-E9JGZ%Z z`UD5JZDiMm*9&!2br>$j_Xc)O1i0$7XZHe%WVR)t$pThlS3_d02#vMWqDlb*S$+zz1cbbT zjPVLI7ud4KGf)7_&kVg*qQMxCKx2Huj0uwO#-}@xmG3T4=SD`BtJr8qEpinWDUcOy zO<+TK^ck6~UscBOz0J6Jx+9M+bmG+~0#diS@#Ebd{Bo}czdz{BzwYPm znKRIZDgE?}?&HRw-tG+S=flXMp-dPZ#rR=iOc)f(w4qT<8W=4dHJ&kjk{Q@7k&XgZ zEm}p=s6{vxO+zVY5KLNyKQYB#gy(w@p65>TZ}6}Zx=;Xfw_@frm% z1hAa`2UrgORJ{M+kfF-s@NjY#uUdK7qWA^0)Vg5a2DPk?qnzJDi$(=Hn-Jq|NveQV zR;Uxv26LkH=A;J5ZwhiFNfdCDK$%K}4|bNuDu89MGRD!&7&}pV)}kymxDv&w4G)UK z50lTQg&4?<5D9gpG}b`FR3B|glcre$l?ndj1i6yvA#mkjfuE{i?V<{j8gS8u1k^C6 zN_N*_t`;R0@KQ@_*jm|PFGkKq=d2kxof@{WbJQXVuJ+D2%KK_DT~#UDS>E&Ya1*5! zL0NerEn78Z>a>Y0Ts)7t^XD>u@nTl3-^j*oyEuIEJU8z@n7sMz?x#Fc52O%!TlIq+k`ebxin5nqbxFp ze3gsj>nD&KKw4-Bi2@w4fxZNbA@+B5AUPy}l9YIgk<)eDC zbIvGEt((c^?F%`xVHOv*%;D~lm3)46CvUH8=f#EfBI|f{X`LL~!24^f`TW9Mo*o&; zr`!8+e`g=w9vQ-Chx+qia|fR9?8%euJ+-02=Lbgf_T)^S9h%0O6$4o`xG|k8;>Zqm zAl}`AXctjz($309EMYJ0|vV$2=7Q@)O0=iWsP!Q%sakMWD)01hQpG!?< z8pR1Q1h9NuJunF9J4stqQBeog=4{n6Ued@A9&B!dtGZ^07j6)&yCb5ol^exQgx=TxcNo-l43BriGcLM+T6e8cj-cpaxR0 zJ_f>F#i>#2k-F;yDAmkhM*%&9RyHLmGLZDR2(nY+D9%ZvqBx)WWyLhAuAoQPPArf< zqv|M~I(dw}dv-8+(m3*RGYOJ5rIuYXH!~$v+F0vW%^5hLj~3nX_w&-g%EH_POZkmz zR-l!c#s2}8<^KT7Nc4z>IEH2x=6LydlarTAdPX{d{sG$aRST3`8jCUWb*FRd#w=Sn zg>|cE3s^1Z;QkdN%h3|6#kyqz zW?PoCeZx|AZC%avPfl~^<|SUgdd#~oUUTQuoBa0MFZ}Ss_dI?6jO*90^X$nJe*XSP z{`=S8{99n^k6(V}U%&sse^d^Z0GApz{9TS6J9L=xdL;x180Kf^uxY~$lvw^(jXV9cAg_C5#$5oB{oNGi*?Qh7at= zz+OEVF`%CYSVMdFVz2<%vbl4#Xx7R_0-Vd2uyv!FnRuK}uU+B()hj%Z?F|90L)*m> zUA2%M>*O=5<+F>XX~V5s*Dm7do=x15XL#%4Nv;cIy}YNu>WTo`ah~2fBA|7iAKzW& zr}vln^T+G_`?p*C^6gnZd$x_Edq*&9at&jL=h3FQKiO&KWM|mQe{W50JtqpQ3=}qU zqr9~*^*e~IY$I)>)<8s8GE2-zt7k-hwHeJjxzT@wz|4$v zR<0`L;NGTOIoE-wH+%8+)82fz--n-{3P?Tg%kR&6^Vjo!{O9>#ksd9vsmIPB*Kbo5SKt$XGq21G5ZV~U@XDUqHwM0;9`F%|`If@8?9S zzYB>z0$py>fDPg$xT>Uh6Wpwga56W-UR7-tQAH_T1oqr*E%9@(B+^6G({d#*LQj5} zBjqu!G?e>nlt1+Hpo)3ycu z`uAeS?5P4-3z@rMKFimvWAlzZ96E81E1y2({)^9f@bU|uef|v}e)yH21*HCgECpC< zxKP2Bf-DuyQbUNJK77K3E7v*y$u&N?d504MTJz`4rE{xhv};t!Nb$HwbZtquCWQ>F ztH=C-bu1gwj9Fa@S<zvao#ubKAxgwNP??@c z{p@6c`B*Yz)FcCM(!>0y&Pt|3{StaNu4ZDl_ADLQhm~V{acJ2j?jByprELp&c6uuh zkIVk{*;>@<%Nx7-?*2hO+}_38tDE@z+9tleyMvz}Y~}qY3%R>zC>K|EhEiSQ31!Y_zWZDygLR^?rlaPV_giT7&h9sw>V1HKNfns`Z@_jl40N7*L<;h= ziHQ!w#o0lC$yk8OhZfD7Flb<3nm21qWJHKIG^m!cvavG9S^&!O|A}Dz16WGTM1NR` z4s$lhbMuwo92$zhz_6XQjh5?aE9dZYbD~*;QWne_$GTOsIk;~%Cy#9uS;wKhi&?jP zER#lcpm&D~Iy4c7rmjew=jJS!GKdumCa`SY1Pu)K?_A5CZL8V0bG-n?K_1+_#@o-H z^YXaE1arek7ahgzw^!NFbhy1Q0xMwO#6Hnzda+J(r-WGzqY z(xqE+-A7!$cvT?l4nK>YQ6RNx!)8{mTtj+l28H=Wj2%6WQzuUI{)-R%D*EWVZ@%aG z(-$1rcYxWmXNp0dq74tOU%8y+0>}%+@mnQ&Z_J>98e~lvK8&$L2QzuhDCW(Y&a%Y| z*tvBR7fu}G%BhoFmhJf?hXk-Tv45+YLAah>0;b#6s5KmyXxm*ImvM6cHqIX2%_k=h zar?p%KD~H=dzbg~?AB41=F z4&-K9lbz#0qCit|l?M&mgi+Z>9KlvTly>l=M5M5_7nuTCNmY)-*0Uo;5p1f#7aQOR-ns4M2B~%wSbiw9wEjeM)(Mfg(jL4ooq>Lk_8EA zHX?RJ#93(7+(V+orl}?I1jszp5MhX%KU~foYAn|f+nHo5aOO%*q3EJ=SIQf^Qq@YF zosJIF_H?4Y4>pRqahlnJ0Pm+~girK~Yg)>COUGj`+vf;}>ItuQt6?yzy8e@>C~sGn~my z(NUf)-gsFgtt(<_oF76(nlzk5fwWLJqCM;hH`o)TcMu3tOHJs++p*IC%i6&jOM7c< zWZyv)nw`C>gls9D)gEU7tdDsL7TAk8T3X>Oib)knbQ8tzAzro03o$sTD%38rF9twh z%~Pim*9DwZg?uq+>7fR4qdWzU^d$Q_66YaM?4=XvvLo8viZE9pcL!741+bh2u+%W2 zy?D?%0VG%PRDI+(2Dw@j>29OVM9d9!ra%m1d9(mlk_U|vJ!qEZPhDmxwJE_Ag}4*r zWJ#cv34Y>@`|E7*5`Z*_H|rr@x?ZPN-d7ddo#cL1Rd)d_HFRj>phd6jEUd9Nw~~8t z#?w)tOMuH@r#@^xe*g2$_wcRtA*D8h?O(U7uFoF@K(oRdmnbj(v*{zBhQkuw!s!SGk zt!4edE^@xMOl~6vPK;2$Dm8t-6(ic$(y_XTUM;E^)Ug?T+BBqF^Ge#wv33>t3~JSg z`9u1!V$5JROdiJe*~8g2Zv^Ky&C#M)55#yqIlY1B=Qi^4;wIjFvPFZd@9ysr+0D22 zcJa;K?R;@%InPf_<E&A{omW#0uKQjPa`A3?d&KF52HnK0j=tn zk{%mIfR~fNrOLY#AafQ6##KjZs6bbM=o(*FQU$J(R8qT0L0TMD#W^$);O^d`6+^`t z8Q7->BZdrM%M(>} zR4tvO);2UaT2oq(!L$iO1stY{6TVsiX{&hsn>oC9iNMn&W{m4f&vvC6VD)NU%J_k; zSuk}Fs~1mZ<-*BqTepzIdp59r<5KntFdP>!xO?XcuUOcS0^059ZTLoKReeoqTrcBj_pVcsQO=T5xX3gcKIB#p$ zZY49T5E~n30V)MqYG$~LIF4?pdVXfAvc07^#O~q{mNsDUkTFb}G?OixcW_N~%DcB; zaOuKjwr<|W%H^v_PD~*$C!e8%hiQ>51z5j}Blw$&wtxGBeCL|j(9QCF0itc2*)0Fx zYH=!82xM(vyPCx_XK0Y6qF6mUwrA|HAuL`nhvfoTYgaDg(C(ca+q0V^JGXP@(0)!I z+{4j5@{D(_7tmVG0fEz#`!@?z?d0sC9h~03o#T79$aV);PV5o5I>@t|M|m!Q_4KBI z`PH4gxU-iZzBtRT->L-nQ+)Ph3nvbYX3^}XbZZwuRjCuT4ZSET6;P@0q_SxsWzGF4 zYUWA3wgOn4{ixSj+G#rhtmdvH)#!*VwIRCDnplCXm|U@&i56PEvtP709uWdpVuQWI ztwdCb2L}S91+t=S35d4FU*O3%N}PgdBSI34iAXjgD#@6*RB`fBt+k?}!BM6JMat(w zOz>8a6=Z^MsKA?wXsKj(0lC<82eOMj1g^X&Yv@UXR$eseBo1LWXPWhPqRk*Z-N*XS zd%Q0LC;2jLst;plc{6FA7tt2X$sZnG~Nw)(PTZwUJjgmdU%Bu9_LaBzPV zySIn2Wlb=v7y4k7on%B#rV)AhM&uWoQc!G8UZFX;1=bW4*-}`nGVU!YEwQAs#)6t! z6PmR&r&n)xCQgWC$)a?2ZLQ?esn$Hc)q^jd4B+c$1Nio7Uw(eskKbMiJiX}6@2~pt z=c|GI>y5zGXM_3o+adfV`+vR~z^~8x@yE*n{PegNUw_(#_qRLp`jgf?yU>EiXKJ~A zpn{_tve~jEktH)@nLZ_&aTB5$Ha41}V`3RGK7kQq;-mpbGPr*vJvxWby3UWrjr}Ms zbC>&aCNtMjV9|lNbRCfbP9cf*_{Z4dCx4)Sg#3X5S3w~bL`7JVne0eqkvDZ!VYIJ_ zp>=r-P4h!(kQqpMvRW6-izHuXBJ}ozxahQ`Z%+|5ThLyh%G%BnOFIhzEK3crbS?rN z0wdM}H43nt9Cg?UVA-0RYofET#L3DES1TI~s_>>jhr24qsncsS?bPs@f-hAJ)ZbZt zud{%bfK!TqRaT^0>{&0rM@6$#E|z$@KK8_TSP||j9)c?KU}J=fg%OSdSPHUq0$DDi z0R1&s`xp&N3v{F?LQh$=fvOl+8YjBZB*C4Q83A<2kD^^(6xA_4q_6i~$2^6Ch=^sF1 zLdQ_O07l zJYxncXHMtf`qk`MGLLy9`>}K0c+Rb#!`4YXST(3AJIA(W=g3<2jBLR6!KLgQQNf{6 zl^h;Z!QNr@Sko<)#ckqP+Afg=trMBrFp^0PqM2Qn!JN8mW(sIbZ;vOlsXi-vY)QjZqQ7}$Zu;y_kSAI|Ej!&o|Q0Ba@$!h?8Bfoy;`8g9`0~~kzPPcG7Z;cD z=-3P%9vaWX1EaXVYan;F_2BN-Zamr9oj3dY@_KI{K0iE^_b12kc+W7N>>tOo!&AAs zaU?5;)zY;%ghGEyQd~_)bT%Q$S)4F;a~gyxz;dT;yn*i7ehjTjU~FAJ&C&x&_O=ta z3Zr>(E=>fc^HsHZKW}115<`PXjfx;yBr?#SPyws}RqrY=nCQ?jR6$TxQo$h5?kf-z z;;EuY0UBhfnU-G8Zg>g&c>XV1rG^9@Y;+%oq}8I$DoSebz|CDP@cwb3UPpl}M}aC; zw!xrx$3fbptFs;tm0Km(^VEB2;w7TMO|40&B4p~?kv_r1_=U-D|G2bIh{1<2cRwvZ zEW}MkwtPQApBgHZcCHua%1y1oX(HeufE6GPRi-!<4fAqnSe#F0VzgXShntHHfu1h- zI@_Zb$WIRT7H|_sP>v@AdyyU!OpYq&ke^OPK_(T2+0-?urhCV>|63AgB-=J`(3V}f z`{^xSzj~(0{(ZZsYf(#bQk*;oXX%?Nnr=saUX~WY>ejU*xj7kxh6JE@RW<93u`o8) zCaI{cxj>brK$nT=j(-5lLhi}i*Nc#lK!OFZJPoRbmgqpaPixT+c2=gOCPp%RaCa8Y z9?RbCOF4UD3l~mr<;1~N>{vg8`BVGQuTv%6>xvlCr2*4NbrZlE%;MRjS-*TH$M$XH z%+Z}3639AvbT`*OIm5j>R|OpJ@b2^1{PvqTRNsBWk3asv?|=TzmtTCr2Z1LQq0#{B zAFvY0QegFOIi`jTAK!mKpB_Cmz*57-IT@LZ7&eOCyAH5fK&h&_3GULilpfP2@mTAy zwRO@~qp%eHW@;unP1<5`NR+@>0hN^vnKx%4XHTCKxV^?{(JdR-Z_ zhW6>j(0+XwGkho$#*Sjig1Pcsw`#S))cS+R_U;t8lGg`!aQ5gfPVC>p&h?Agxo$E0 z1h$Uv-o*KXJGpdpAD55q=g#@VJi2m(PtWh>ru;8=FKp+*C);^;Yd2rLJjSH*zR@-UQ7RAGPN2(9gR8(88?n{4 zge6-Onq*FRk}1*YrX=N>k(6Ufa*icQSyoyNEj2MMG}fFzfvf-lHb0f@9%Ut(WjK4+x5YYPl z^+18J0sJno_3Nwt^7-C;^`IMXZ;2;;tu-$`spHbp`{;!0+oD5*?)jdCO9&kT*TAUM{Vpjc64 z3b2Hj{X$iOoGlTupAxU5Fheg&&yOB0Qs`TkMO|?O70F(dCU{X0<)ID5#d= z)$3NX_uv7JojS{ftJgVp<`TOOo#6QSYuYg3mp}Z$H$VT$cR&BmAL6n8@$dig>%ac3 zfz?;vf6UE#`~GWw{Ou3ki^r%yOhMMU%U3ye`mA`Bhd8=xFV{|-T zg8T&@0!RuAAuT$Jgs@P7Cm(`5eTWPWB`zWge=k1`n$*zpKMCzYVgQ2$c2pS)F99nx z6sTZGMXMB8*;z{46H(DqowP+&1IyjZ8&5g6o0|tN0$EBFVChv=a)B!aS_-mMjVw1; zcl_kKO1$N~eg-vEDDWp>7vUX5oPQYc{^8OOg9sP63KdbgVPT#Dgn9?!EzqZiG?gQv zX5BiO30#?(<7qAi#ZvW84+`Srs4vf=Brk(NUp-!KI>G}~Nd|+!l^?mWp`=IplN9Ph zL2?uoxha(9Bx``xu&jUvA~j`&G^wc&{oIVv!w0is$wKz*-pQu|STA2Z<;C;ITsVK4 zp+g3cot38LVY$1xX!)VxVZmA+mP&M20G64Nq5+r!EPJ(_mHciSi;u&FW@g5im>6MX zY=oJG89J3Cs-jl`@_anp|Nq=lakf+~ElUA0Ur$$>Hz{N4_`z&lKaZpP*KqOF7S0`A z&xU2=Sw4FR!+SSnP}c@b8q$uLV|y}h@&M*fAI7T1Q#rJ2EvF7|=h%U*0%!XKjBfDy z`2(Ij`jlr+AMn@TYJI_9`0o2}`RV7M`2M@^`RRurHHcC}ga7&K@BafVb?o<_f6?-< zE}T6_WoelJtt$@VysBYBHRJEFI9x}MouWt2egp)DYULM4AVYeFO2dy}RG$b&_6cJ|pJ0af@n>XzZ^jSxmp?k3g)`z=wIGe{tMla#tIyRF zErfvE@#IQpUf&W(`m_gM-0R5)fvpb@d+)RLo`T5OYkwN?@;Pr!oDuJ(GJ`=!t zJD8tj|HI=R^4$)+zS)Ks*IIJ#ToX?1sV5C6n+f)TcWD82OFfJ`shdv(UTdf*Xm&<2v{X}T9fQ8_wQ{(q?-kS zPIBK4Cipm-;jJ^n)7}Jkfi5qRU?&q|+{{SxwW2uOg$9Xo|8XufiFcuOh7X+zLTH~I zOtVC98bo_g8SYMDkSiH#T|^h@5VmqoRk_*H7&n182P-r2j;(Q!-|8qp>7a%{#i$tM z{ygM9J=LPX_BsN@YYY{@3Xtb#kY}OtZgR3xXx^*=efxE1^qAqynLC~J8`iK#JhS7c zPjmXhCC-U=c=7rjF5bAy>8NQmOfbmSZT~}5y70Mp{(wl z#@en~ENhn{@RiN#jv}3M*gvcRXQs60;@tl19p8aH<2!P2MsE(v>;3Zwv3LFu_AeR5 zUXep9CvbetG!85t&$)H8xv_02r&rA2%$iv|IklBxt)pPio0>*Evn;mT}&xHOf=+k5bIdr!VR zGMrcY1_^Kt=h?o|JlZo>n?-nZUQcFsE1`Q)2vwnW6F{$v zcBE~R8w1MY8B&{1U0y8p5`rj6iKaLumV%@h>ZK)8loC%?Oe7gm5h9Uf#zd169xi|) z(CrtbC6q@51`{U6PED3D*gNAdpyT177IgjyJt{ZLLlqEpQj(DsAjCHD3$^& z8v!kQ0UcL?8h1}`Et=)*qShq*Sk>KGCA|w|dCPU(Z z0^NPJ+^-m)5aN78iT4X5(J!1>Z*den0zb}hR5h{WbH4IBJOr}TFu&^KIumhjOw4dM zwZI@-Z|M`UK0XwtCsAIMO;TJa(cyk1MF*1_86TIs#?&=wK;xQ9`u6O~(uMOhzXnpf)&G^lXs>e+$g-Dx0d@x`t&p^Zxs~Qf#>OSk zp+hHDtys+!aoUa_J;91)E15QBI%z5C#6-o?vPCNs||MW>j zK~$L~Gn+DINH#4Rx>Hr*N_j&s3LAJ()13nkCMCn$<_ZO18l_#scq1bAsaSh)Q!ME<;aDs*{#86`3S(m1a(A zzBSpUw&a#ulT&I&VWp+mdV7j0b)?ICF{x(SdWPX~rUZ+v4~$Z!8H|WXG$S$Bj+}B= zO5}G}HgTt}y&v8Bg)wMs43p=jGJjPzi`M6`d~-f)w-&Q$XDOR^)MMSoY*wvFV#TUx zmM#xr_QC)r%=BXPG&jc0ac9B;1Ec3TFlw$H6BgPreW|0IOV8p}?krra$Eba!6+N2T zGPt8NWBd9reOLsuN5wN|Y%=r5r?Fsyk`(5ROJv@}1Qtw9V4=v8nMo|4lfsI*sjOR^ zDO6m@fi0z+*j*!kQd4f6X~m}(+j0MLd-=0E@bF4|9^Giq^SfPmE1>o6Ngt8keEF=8 zkaB;%c{PCV-VESt0j|#luwH%Ikr%hx@$^P(?p|uf*+UiV*;XJOAfJURbA=w$87P3& zZB#U^`ubDd)|rAD0i@++N4DG~t4(h;0wtr2j5JaC_|k24We!7}eqb9_Us@Cg#o z3bGVW&XzENti*6jQT~q9p(c7LBfxfH=chZBLiSw``MsG%f zhoyj)6;T3M!2($Rj&lEwCV1(LHSu;Z66i7_%+*9bXHKRV$C3zF8YOwrJk?XgjnC${ z%Gt}>8ecm*0_6Sz9UKJuoCp`dit-bW-^ZI+@uV9!s9?hQ5u)4{uwvz6)~#R3?tMEr zeC!B^jvZsm&fRR?C7#{cOWb_$j0Z2@asS0To_zKdpMChAFTeR-%fC{QD+O072kW2L zD*x)&zy8e+zy7Wb6TbQ41CO4+;>wNN+_`g~`?v0K`NUaH?Agbu-MhGaU=LUJZR6OQ z`5ag{p356%a&yZJj?eAG&M|dt8`?mmnge4Sa$!y#R~NM8+|0%t9bdtLQKjq|CJuoB z*Pg+J>>X0Xj{bS<9bAt+gX*zsKoKX$HRS57_S{<1hhvl5@nFL^uCE!v-f5lKF|`xN zmk;IIu9=)zGmaCh$8&Y(d`_;I$eEQ>cyeexH?}V3>bAwaIJcAcH;(Z9>~`)RUd#KN zhxl;&AWzS%=hekcyt=%Z*Oz5`X&sMGFXP_P1>8S2pV#M?@XfWAd~;uDM5L_29m!y0C2Rm&?=LMTa&6qpJnA=sCk_(74Ul^q*JQF0<# zDtZ+ZL~>Xd@gZSE`UeX{xe=h&9t;S^*ImF!`i08HQjshV73CGEQbT}hSkT2z07|wV zuI~5<+$hj;lkYo;b7F6&W`sG2<0IeI3tTxHaFBB;ag_7v1l(K%hWvel@Drdi$iAzb z-&602x0^TKdM|NmK7vz-yT1lg(Ow|}R}rKHL=*2FMvT0U@eL!u;ET7jI1kl14l;&m7u%eiz4eC=}Q9_3{bhG*htEPA5=>7^(jr*x+qI&kUV;3MPytI%Z6>pVCbi6#xtWPP zD_e0!rN72Sk(3-yWMnAbUg{Z(^QaoL7?91Y5gT%HBV)@+BtX@1xn?*Qx>IhC9-p0$vH?#=W# zGiH*NmX4k1B?VY!2W|;a{HcLdo{Rf+N2q=(Q#UthF(2-Yx!7e=AcS$|MT~MiTuSk z;_N-Rcb}6dj&oRm>hQkZ?AW}41LEYJIIxe?2M_Sinv16o9ptcp^F{%%u_K4kvs)(( zutpCX%-G>Wm^Ec0o7b*j-;NFJ+rCzdU~O5mkS(j`ad_ui_HA6juC+_puy7XJmM!4O zmbDz(w2b51mvMU6a?b2o#+kiKxNu+z*H5hy+r5Fu*EaCt&L$pRU&+C(gP1z9gi-x6 zXx-S0vU)0@;Y-ubku>WbNkw~KD!a<-p%K)Mh@jDMv73WKDDL4+MoTARt1SsBG$tTZ zY)`TgzVWi3V1|E!z*Uqfo?$A-%7W-rN0M^%q~z+!$af~I$cdbK4&;^D2~^oqui9F~ zjCxfjl+{>LzpN?2(JRp*> zlTw*6FPo*S1*$eHs48RE{u&M(s^$2xmYh7=QvQ!d?A~6&wv9P#SS>8HFpi1S!WcIt zh)J`9nY*nXPW^OKP=I63%VIIrpiAO#sgC(dJ%Z(j<;%#le)P`cRzcLuQ1Vc$|(zdN>hcaKcBtHc#=2J;bANSJS@*fRs{E z!-P5q@cI_acp*&H>CP_1dElH9eVc2Ua8m?)23 z%_#H|kn<2_XV6*U=4e5vud66xSEBtKNDgu#EyPt@lQ6~4jwDZO;tl4+3tUATl@5@O8GrTPM%mR^Zl7I+jj66?q1} za_@omc7z-BM0%;2U>?MWh0w6Fj4>mIuzK|pw(Z!+-hDgSw|^Hqc5UULcyOoBofD5@ z7h8Al?|Ak3du<3%CAO;}!aqf)s{KFy7O?u|H*Hp- zig3Mr`-S#71zKuW;ge@CMd?1|`o(KpIelJ0>jamNh_V#GIxGrm|KiD<60kY7Yz+IS z_h8rfw(J-rs3$drbTiTAB2^j`sN`h=@p5#HRBzIaT zxzRDppK-0~(WN4Tv>Z}Q1sR)*1j~B{d%YNLFFB_hj`j{XiSy)OZh@nK zmEP1GZvn1ocTe);)H8`ED>;gcln7!YeF^q*l;;vebxsQPGZU!EPoZgfF12NOR2OAZ zAuv`iD~YVcDCs)^q$h}RE6%4}z^GAmr8bjr;`p(wU9(CI#UU-y^XTDSZr!{p;I)yq z0%0l_D>^C+A8&W9G=sf3f$DX$rVVLX*OZc?e3AvC;=)4+5ho}#z>n~dAVNa}h?D;> zRsP4ws8D&1j#|-DorsOODOSeDqSwrDkn_3eoe1>zAT-cZ?k|#F9hP?Uc~lQ zGugU)3S$MX`gN?3eF$9VPNJ`b*6xZX@$B*105ALwp2HNsI{2~CJM>T8ym+efu+q$^Vq$n zoPF!7*}tKYoUjRdHwwY6uVKg9DmJgGVC9lxmI!byoRcM9K$=`Ao+*>!m_99$>C+RL zIXi`U3$s|VJfBtTO4zWalI^=2vU^_xcJHkeI;`gC@y47yRm-`v&AEK3rF57M(s4R) z=SF)0uXfzLT*tMG&AEQ5IadX)E}v;E-b*zH4wMOHmauR`5#twUGeBUl)3|tQ`vp?3 zoh!Lb?8$5FOmevc!I{?hB-!GXV1sXhB|focctn~CWSQX|VTpf~9YIkJ#ECIT&-5cP z-i0E8i7qX(=-ND$HdSG?tqP|)*H^r5XYs7KUW2nVYLOrTtVjh|9%f|u31Edg(O3**+dO~T()K@;rhBG$TY=g}WOFkZnqA6x~EoEL}X0ty|ZzVbf|hZ59vgz;2ElJ;?Es z$2oWDBIiH3%7rU8v`X&xp1tA8XCHX<`aSoazvIF4&$L3KD!)oatyHqR8YWcvSHJ)D zx0ZkPrAca_HO6=&h=c_ zx{R~yXL5MKFitHV#+B8hI6k`@`zE#K_{_E(npDHSvGq7PqlU|iT5w^0Q%=upz$tls zYFd4cPcG-w)JiVQs^-!xIX0<;qhoRfvP!u!Tfl5a9nUrm=9}X)1!^YnXwNt>S36J!5#ZbG%%0BKLRAV2=Ri=%!h;O7@^K*p|`&accZ*$Pq}d z7buD^-kr7?ezZ>YqIse_Lz@=RrG5^X;eM26rBj-dDPRzz!9jjnJgJeP+7MyG!U9TC zQ;GKWCLu6L03(iskVwLO1GGG>5dR=8@jSr8S93fRRH-PH0xKnIrl6ypsv>SLVB>*{ zz?H4+>s<_Zd-~w0bH>_A4T^St*jShf+}g;qa>U!n9ky9Lm1Py6@A-RGqh)OmdqT=W^rOxFB-=jfvriSdoyqP2-Yu~#afYl zJJ<5y&SjoGyv5x+S9$d44xhbw!Q&?a9}n*F;j8z2_088>gi1xM)bOCnx%%_h-?Tg| zB|m@vgM9xHr;Z(G#ge50BU2dMe}DjBkXEtYPV}#vn=91~>oa}kRL;nAyL0!pz|$6n z4j)X5md(hM=MWnoO?GYuow~FadpDiU04D` zN;bTF{z|~=Us@zh1Wh4_u_G6B0m(7{L znnlytwQdR9S1jQ0Mgi|_8`!&g8T;2R=IE9soZ0>VG4&5%b|mlP|Gm56*tTuklR;)O z$~d-d+qSJqGQq^QZQFLTNp|1=yL-Q%-}79#?yLLs;pCjIdfipkRdWSa3pl1MtF>z( z7Y;7r#>o|2Ked!|hi0;K{Q#EFZo!B?$@FL!NQ*K@>SY@clU$SVlp4h5Xp&g0OIlN7 zayr>iubUn9y4y=Q$-WDj?H!40U_n4#EnH&$!an2=90gg0qhDV-mH=$Tp&0fz&;Dy7JxBCcj9Nvc}pJH>^cY{c3WoCOP?XU%mn9S^A`; z>ynhFO>A-vA`<@~CiM@2V0DTb>(Ziwm4G-z>Y7ZJFV1J<`V#ie;Hjp^23 zkM0At={Z=CHAI7_tuc6)y9v8ys>Cudvo5=Y2NlaXj z#Po%U%wL+ol9h=pUz5VBiWDl=*J0!4y6o7M&t4T=g&dODw>yu$yK*_O=fBvyGmGt8 z(pbALk%g-gn6xO40n;PtB*1E_0IQoD87-|yDl;Le)RORerntrF;}B_xO}GJeq1xC6 z*2daT!dFw&tcy#45uSl&1cX=<7GW!yh9z|poG8ojrzF#jvbrvmWw=n7=0QrNJ;81Q z8aqLmxfTv)dXiZiNd_%hkFGLVp#aO!5K9v&l=XE5Sb{4pW!82r6;KMM{s$>#UqO}v zDrMnk^?of4v^8q|2QN8pq^qMYbZcd#uj0x~UsHgkg^7WNWaTw6H_?y^MGHquEqok} z2y-(h%*8|P5gk(<>0hYkl!-NP| zV?vz_<#QMkBe!GR8{B*TmYWY>a_szdPF}jj zl{*Ts9`R7X^z!}3|A#DP4A%d)${4KwU0L|`*FStxF4ul1kA44@ckjOO^4UA?-nhqw zlcza*@BkMM?Bc|>3XZN{z`^B{**SX>dKFhU}Wqgu^o%vu{d$_D#y? z$gC2%UCe>Wc^sWl$eDSioSt38iRpP9nN(LIlYOI;**P?Z;}h$0aYh49j4S2Zg7&=L zHj3M8`*Lo1cdl&^&}~39_D@-pI2vl{`PQL7mB6=`Or~WGPRNE#cAr+1%MafhW5s z@oevSUhf;n_v2IfdUz7gw+`WUMIUah8_2z?vGSh7IXtgF3;UGQJv*2}Ut`i7HA!>Q zCQ*T`^ZtWrCx%y(BjOjk{9D zNg3-avxv*NV{ER%O1&yj8JlHdhNgy=KtM@&38VQrgFuwHhO|9z3MeEP_81B%G^%Qb3aR^1x4O zQHY;UL-~MlN=dHgue&Do-TO0$mg-Q!q>x9kQPFEN+L-K zairBrp-1;F%$+lX9g?UhS)tTy3p!b+#io z#GfF+qrH6h_7-}iM)}dMQ7+wE716#tla@s(3=m{Z9M+L(qq_?N2C;OGAZx}jmI%UD z%%8}N34@tEc{uxaNJ4S_3|B55=f<_OJbZYQr%xY9JmmWItK7bGi)YWDs|z|S;ncC?Y+7Hb&Ve<5&Rlh>hEDC;lUpwvOABSa!P?l^S(2QZ zz=TO-Idbfvls^a9R3*<@xrCXsr>Pg-%$_@gRcn^BcKs?9V9Qr5W2AsBFDIX%fMA>* zU5JW^CO@}8o&G^tLPtTBGO%5lqCuIe;iBMb&#ryyJXo6~q1(Q7Gds3yV(W%=Y+Suk zFtwV4+qbE(INOrDWO4agtY?TCd+ww(hTsVgFbJ(_~kHEMklZMq}Sg&}xwf3hl-;|WPn$#^drfx%Hl1j8mZ)QaO z9`@w+upz69wS*nnot;!*B{#7lEY}c^_*ys!up9#Zz|KdY<@*Qjfyxztf8pZ&CqBVa zhlSQ8Fyt@7qpFdbu0>9sKDqf?616BOs!4HKbqdS=l>KVt7u1sDngT6tGPATK6o?t9 ztzBu^)yXW7$2QiWX?p`Y_p)WIcxUPS0xDNYg1osICl7Sw(y{KGKiG{6`+IVA|6s1{ z8OFseLpZl#Fc&rsmhC`}t?b6prCr!JuPqy=ma|+E=IO(u7~0pJKE18z)8CBV0}SZZ zUyHu|H5nxF$Fy;&%%7RYs>N9%=R7u6)MrygF`L$wu~Cp!Sy9T`RfTdw7O-r29`lye zW6JD0jF}q8u!&Lh8x>5?VSe-&?oa#v9<(3eLFW;2du#~3CWO;_as>URL^5=G1mk9g zF?m)fQ|E*+Wlkv5=7ut7Q6x)OB(QEnI-9pLADedh^c3XQA%gsnCw;`rT3cgG$+>)$th&97G+7PF3 zT^vJnaSGPQJ$&9xnEz*oq!IcuZsjei*V*=ca@NzW7 z-dt%-(vVC|4@+ZXbpcsrAg#W(uI%ee<|-MhMh!udWJ89!l4)yWD21u9wvJ@Py6T{I z^~ymlr8P?ft=hHGQpRHmu+*k{1zx(^QfTSQdq`y=xUw+R61?fCmvEaH)|UHfVP~#` zleI4HHo5{8LqeU63AWcENH7&-QG-CUY62(a>Od_i{51)*)sRZAx@6drH5d!7%>Gmt zjrKSHi!ht!n@u5=L!I!hwVDO2YElK+RoP-DTC38fL1ltoxm7-B(|pWw<(msn>V z6n+C=8enMTFh6>9h3uHD->czC}Gti#8Tv;Xih_8dOWsf*XRasL@-uij)Fe*eSQzkl;V9`pX= zH$H!n4ENLDe0ckrM|bXXPBO}qdv|bt|5ncKtl(JXJb}z)PE}0e{MzyCo!OJE?#21z~MO!**Cq2!?Q{`FtvaqGwO43aU-s+Xv*a!4LLKXfU|S+IW{$&-6LZ- zH7%PP^BQwzQYp6Zn1Iz&y&TCZVQ(3bZsS478Lmr*(=Kz3azO6zNWgogw~?)LuHV>6}Dz}I9fPL0%eblk&T41#-T0ta@$;RWuh$NEVo_F1ret9 zDzLnb9p#)8=-3JZ4Q#N`G{MF|aAs~NV6nm2*aBT`Jv6FSL$}&r7z?s2YSvP_$^ES? zh!fbPM+T50_(+KGCfv_n{y%&vO^>7?A&ACV2{g`4p&&Ji{M2X)Gvdiih#)3Jd8|8u zo=$i=+2ZT&jF+n;&H|%AUoWZL!$^#eC97^4g#|f`88uApF1&F5G&gTtQRl`|x(i1R zA41c{9T<0)8kbdrZJ`G$rW~bZ(r(#6caH zJ+3!n2eoI^z_v^t)q@oa#<4|`n*BQ}*t2Cd%jQpJstbXEMuY zPGH5139O$#lTC{hV9jE0#axbUS=GG}m#?G(f{)N@tIlGkon}*2u zR79tyzI1Kw%cy<{^y(f;Zm|jR*|kVkE--6mLP19xih4Mb)5V7LcIKqE5`?vodbyb; zi4CoYDlo;rjyCR*)p7Dyx&$TJ7i9SesQi?1Q$f`Tko}NI1zCT|_wWbFDK*K>(I!8y zHhFo1rJ@>?mDix8>@V4`M*aFVD5$R~p-Vxb0fog zU6CWJXPmVXy$Fj{vLd zP=7i}v>W6{tNvcJ8t6^iA%3(U7C`4wA%ZP|*0@mmjSpnd#2^Mw4w9`uLnrw&dU^;G z=Y%tBaV(2gCb4Wy3M(s=7OrF})}_ejNn`!SG%6}nSz3|Ayp@SeT^i4bIkEJa8bQa= zK{S!~%xPyQz%nJi*n;ppIq~aS;+-y;RDv09amIK{_$m=AUvR8BA@R0EB{~qF>?DPY zqZAU>)QNMTe!8dHYE_ixMsAV=DG@e=cp2j%cyYDSlfqU9O9LIraE-7vx5PpjM61mA zq)bPksjNOUpO$;#A(Zono3M6$BQOT&al$HMV<+c?1we--fsmvQAD66Hk zY$;35$m8Yp%?y;5D}kJZmAsaXsR0&p+*qf!+}6g+!9p@+fuorQUN+i-EahrKJ;Gdc z1XDE#uonG0)+WkDn=mJ3*7sTj$!#yIYPgyIfv1&(}W!`nKnU!#4sCUBAsodB9+WW3ifWD+A?XvXqGHpz?SWs*tusr zdk^mA*r}uJ-oJ;+s?AjG+ReVB$2ofTBBw9k;FQF{Qy1BP@*-!i-Q&iC7urdZ!^70+;1y>)wDY*KVkAkd^-+oE_ zSynGH%PUGZ^EH2M4=F;2}Zm(>` z-HHyp+C7+0$0qW8{}>+c9nYN|W4XU~8XqsO=k?hN9v)cA=c_w;c48AJR?p=xdh+rr0-TlsLYidU!C@$A@2iRC;wynrVMXY=~R z0^Xin%*#V_dAMT=k9SSw^NB?~**TdTYlm~Iax_<0kKyu4LFMGWOly%(vj}Ih-SkM6 z-&KNLO=7KU5h&byMPq3-co; z$VZ)`AUi2u&NHQTCWvT%f1>;Xi0}=-&(#ALrHjzT9XBU;d<0hkK7Nwem}0K1c3@;I z=ZT?;EC*|426%gQ-YVs4LNgzIlLFS;ji1 z0ya5jAm_88rJUcgRc`B>$-cQdz}?)~QXQjZC1`T8a==Z(%hC})D?y;Sy&7H?4w%-K z^SibrM*=IQl}u017=6K=IweajWr6i-m{k7@JAsO?g#oc%c4UNkP(L|@1_F-5Djw8au2D zbEXbu*}}X>n?TfTyw8#l6Q@gj+Z>VWc% zYge&y*&=2MntF8aL`iWznHedhrzcZhR>YuSRi4C&j89v#~-s8?tDc4?6mupHqhirC)-9RNy2UeCzi2uY z^T)Ak?NpBMTp%%@!&_!@YVUk59bLltgLAlWa2D52%w_M!K};EuBf#>Zdn-SBwHN%C z*b8V(she*=mY^oP+=R?Va=WQOx|s#3O$1hQJF%fD2@NFqE;T1A-1cV@eyCP+V?6StBDFH#eq5TT?o8v7~oj2ZjytV#b7c`MV3* zwWc{osycCcXK&8!?#H>E13ABK7^l_`;rJ?n)zU#6oY#-NGkUUrW)F_e>&>Z!eFRuN zIJLM72j{nB!{l7%4vS=5e|JU>uw&#fD@KkmVfYYjMhvRSA8S|jV8h1ltliX(HJh8U zdQ$_ItuJK3iY(?X7EsNv!_1jUOq~+Tq{&fCnjXdYX_1Va93j98qwnYtx{U~;qhPB2 zkRY1%@uEp@51RM$re!~0L05p_E0_+0{pm2wpYG#A=p)bVGbxn56GIp@C6vKaLKr?R zgt4>2nKD0$*-K(sxIB)ftKwO?CYcp$Q&?6}hefOFFn47N)0ZYPZeA<{r-spWl)r$^ zi^9%Mq&2l7q0ExdJTro_%?Zk}!8cu?m25&_vY;we?n|*CEZK_aR6F7%5>xD{lj=-L zf&-aJ&J@=5rL0~c`N{4i3ml?+O$l^0#KTq_H)}myES1Z|jj=T`!_Le~Eqs-1TDf}3 z$WY0Ywb4|vJVPx^jdU>4u8pw(%T&EW&;T<{$(n1+<7-QnUCS85TJoHl%DQQq=xWx+ zK*g&H^t?))8XaWd7z!KgOgwmNcKAmyP)xTmh{ zSI5`>PXc8dCUA;&*CNqJpOipBj_i91syuBaYq0+Ve~Dn(k8`X>u9pstLM&(zp5-J}qhzhDL%s@AbX3jAYdPH_C} zDUO~w$(B7kSXEgm+x?ula7kcwm6I2*sY~S?Ie&!{mv3_M&LeI;RxtIJ2hZPe@x}v= zpS#M5^Vig|S&v_RRO7vX>fP60eE#_lZv<3tzxvu~9Etddu!Ht_cJMn0ae;``Mde7~`SFPAp+=Hx1#ADPGFgVVXYdop)+ zP2=gIMe=?td3|g(e_yHMojm4B<#^668^(oYBRR8h6svl+U_fRpC7u>!3gpvWG>Nya zPP|P`wR6o8N+x#>f^|kPKvZiigBn6qtTacwK zuW4W{z>@P>nSW03rF27Dn_9^G3AmIi63vu3v}|#d_feP55!3}ZIN@V$i<_X&TW(uu z>0&6TQ?6>%sHsWK8k%TnXv=#mQ?h8FSEHIBtQzi;s71O+cJFIVy)b85W<}DYVLA=c zf+$Y%qe*ro#i_v*BnOcrk((Mqaore+SPIgk$Vv<&L0P-d%Mo8^TYQ}C)U_O4tS#iT zS>WklkEfFZQ6T|jrzTNSKSw~+h6!Vavv}bgcJJK6*|W#Fc?~rrjM7byf+KxPiOm<^-`g)VE49-9N4{;J+j?ZwVtYq6-<-Y?bfC_ z9hx?zabYewsed@e;~3gYkko$wv&M~O)AFSpm-6MP z{4Hk=@8*L1{g=+3;PL%i+_`y05VV)ItCulx>Q9X9E92i1W1=a{%VXHUf%1Q}jb~DJ{QdbezrKD`r;GUb<}J6bUS&sB6*DGHVsO8H z^y|@s5rYRab>akOPoJi?q%B*th{}pJ?AgAJom;nXLV$Jt)JaYsJtCj!Am>k?;@+*B z+`n^M0D6wYd-kwn!v;32SfS3FwP5NLrjHxN=s|rL+^Zv_26kuKxIrwQJysyOj2#;n zvu4Q@wWV<0)V?g7-d(^okWDLwv9)5P{GZI^+UccSIU&$GHk(7+MzDERH@UAHOJ_8q zf9EjTG<2aX&x*PkI%H)VlANYTWMVC%(=1J!>SWhbAXSZ$!arzG@+VEo zHE7;IhsLEk0xoUAu(kl}zo;)!T&_#QrpB~vWkI`kR&?!bNB>@~f_Pu%PfDV4K_R;Z zSjV??bvP$C4d>vhL2R4dovJAv**>)kJEwGF$D|JIo7S0wv)XfbZflM% zY|W8H&Dk}(kd@=(nK9gl3Bw#1KiZ1%<4qYoT8l9w{^E~qRioIpa~Knij+0iBa?(6Hd30VRRW0 zPRsuOH0|w8^S-{ckZ99CfDVI#X){0|G{}d}ql4)=F+w7o-U76Kf~RCejB>2-%`hh*-CWcxu*$F`BEyQWyCnnyKm>4V4Q(VYN z_aZ07ldL#bQbTQt@Gv4+kl^R6jK9*uQ&|(t$^<(TBY}^hI+cQvWWWa6hL{?fVqtEs zUR9%|T@y`hWgJ#@b-Ds`WgL}qm7o;zX3EqH0xZ31lGWBQ5?mReBfwHxvGkO&SpqUc zJwcX%rex*{vLtl1u{AQnT3Mu4OB;KEoTsHZ_Hw_ov8Eu&fFNgm!rXNU_s}NDRkCA; zzwojBlW;c;>I9ooFWQdWI0x#4n-k`(C0TDRf<0==WB(#VkQF15^QWaEqnK||Ac_+`~{AlyP)Fg-1WO$y8DA%EX1= z`S$Gx-@g8>E@$!i{byc16~JA;!_7Sfffm~eKfzu0{abal_uCH#v%{48!x}p(x*EQwgrk32>Ajj6V z;C@vXUK|+0O99r~lhgTfc`2VRuH^I8O1@m*%JVbpd3>spXXm!aR^r4~uI^gR)!i$3 zbZirEFYQ+Gb$zFxOXB|Cr93+cO~^4LnA99Y29!*h5ph3DggGi84PAI?<@ z#5T+Q%H!7YP9A%E+YGMC-*jukRKejSR`qB}*SKKvoQ=qG)g|3olW21(3{9$0$61eh zK9=MKInpdOg1(LNX_=FXznv+u{vI^U&m=!RQJokwIVwaQI~3;QL9zfVJ}j8%-~gh6 z0`PHl!QIi3u)rX70bV~(AKabYaS$*$+B)Ly3#M$ekYg ztt3+BN_U?COWi$%PMtZz z*>kdg@&GrkpXc@sNiObO<^C=Kuz3I}T3HfP>!~}W~ z8Q_M$tDse1l0tJG5<}d{NerP)X%=Jqcan;32$P3*r(5e{`gd*0!kNQaxp+KlSI%a8 z)fx^T+{N*uhd6TN5T{R{;iSOooFrA(l=JV&bDlqc$>T?Ysn^eW_51-J-#+Ed>&HBQ z^+@6|uU2B!BZ>D%Y%F*3^j%k-xQ5+m8R^1Nm-(QW9Lx%|D#;|bi z9A-_M%9IJ?*<87vBM0_#Xy0B=9Xra!vuC(_^9IizJ(R@n0gvw8<Vdut4 z?A$P(eOo4Tbk}q)9G%bgGfQMUo5R~jv2j^f)+}g4<PGu!9u(KN zCRdqTs;(&sDU#sF*CI4QgUED!VzMNe&M_rE-<gXV8y4gXu$pnK zsvD=b_vhqRDYG{Y=HQxvY?<4YRb!j8ct|-bhBu;OL}NCLYQ~PqZP+`bCA+6L;K1C5 zoLJtR!%NCoJ2{RSL*1D)++MynTc%F6VA3RACQqorA3L^BW!tVXtl!p$l~o;Bwy`<$ z*Az2-Q5I9?q%nO~IdW{dJ@5EpRO%7)0lwd|o z4Q9gZaAquwVcxQMfmJ+nm&OVB27^P68S`1Kb5zPUc3~7%FQ6Dzi%Ksf!mYvqkG`>tUd8fUd3<8k*J7)T)M7 z?Z42~sQzEqp0+mn@;DO#ma+ST;XsjlEkS(H{seGO&syD~UlSEm;Gy0tLU6Kv^7 zC}SY>^;FcEXli1mtE;wR*&9e^XsSb?qcNebMufWP5aOaipmPnp?f=5xsV0d6pR8zG z>V%q;5-RwKw<9~nk<ai(%AC)SMR$ciBxS=OJk z6@$51HG*4JgSoJ(3+I-%;riN++}qrZI~zK1Wo1+DR<`Eq%0}F%Xu+EU19*3II8XN~ zz#7lTb94E6Wd%QORPz1CX1?6m#>?{?cz9whuP$!m^+hSn_pjwtgd0`UxL!4d$NT5;>i9xlonFA3vrG7Jv4SsGs(3AM zd$50*z-zT2tAa-Yu^aOEV+#hevQJa`)s3dyTYf(d`lLH(kZNC(C{qEJv1FL;#^m~1 zlI3MZyR10cWG7NL$b$$MD{|t(DXE)6QbZu>aSDCtT{PCCC;_PVUD8RDDP}2k>IVa8atuWKKl*byP zT|*lKWv(sdDnx;kGQP`7X~9yaauA$3OLz#re68*9miKluF~w1E<*Xn}u%!S?Up|MH zf-C`@o~E`s4pXm&GU;x0d5#`&O41SHK>a9pN@G1$U=_tnvY6yUv)pK!)Qh2MP8^LT zN;4uT$&4gFIfRTT1y|lAhxrg2;I58u^RSlk%GF7dCRcnM?eVa+k+2nTdE%$6_2}hJ zN^F$c0#lGvm*!0xFlb;Orc52jqQ!IBwPzdWE}iE5m9v}_V4XUDf-8cpD_75Q@BVcj zJdktl^bzg}t`uM$+`U!6u$xndB^lbho|W?zz;q<9P7*1R!G!y|;O%UIhrKcOrjqoz zS*zp7S{7w6v}+5d4eQCIA>A0zsS*9VG-lk;F3ghbH6cwRnnk|Vozv2ER2Rxez@?8)O9 zGq^7Ud$gx-*Vc>~(o>K*o&_^UvUtuIRxBJZ*cwe$#S{)mB6W1vEROD;DNvik-3zOD zdaF{9wUFbx$FprsFILQH&ZJ>=>E9`gew~6D&^d&zt$iseuq7))%Kj7sl2QzbN!BMc zP7D8NsfXgFK1wwpBEy)NY%^kW%!rnHIU+-k$TS_2WIrw6lsb8aB;{z6oU2J{o^0!B zQYTlBw0im^3$oHPwaCqpda0l`Wkoe;R9chf4Rj?8Z*P$+0csvBsWhqk^)MN7O45rT!Sk232{=9q^ zEy!iX;(GGs*JH74XU)xI%;Y5cjfkYnz!2K^3#MUb4+`5jQP9Sb{5B30cXXj)H!qq9 zwptDhp#AU=I*tgX{jeZ94iBRH*bw?m2&Jz;Ymh)|_|!1Q&J1V5tOzE}iD1gSNTw}_ zX8Ph-rb$c@cukxi!|2&j444u^x6yvI8SF(_cW1I&+mh1Qn#e*0S>}Wl*a)&LCCmvg zv?aFKiMS$(0%sEQT;vOOCBBXm@rjNkCAdg|Z!ZOd;L2BC!`likD{WlNYvF3Gg|nrO zWMR74o9JR|Y=Es4s8&+=DpwF12(A=h>FElTw3OL>tD~*0jK%ujqNf5dO%1dplQqyX zz(mhX;3b8);7Y5Oo>~b1H-@V=T3R&)XUg-d%j^9IEj>AAq^m6zfHHGi0mK>awH4S6rk2|H=>@81?6Fqp+~q2)?DTJa#F?zNhK0Si=r&Xjp)yY z4J+BUb2GaR>}3D3gX})Ik3EMDu=C)4wjbCl+e7R*a*Ttg&Tv5D;3;KbyMXK56;55f ztpe-*i}zf;^MrF(?{ek#W6oR_1U-1c^6j60`TARs z_5GI$tRKIABRBsGLDrAI1y|qy=I7tP_*V}8^Yb4mXythyzVhzHTVC9g0$mF4`=@qt z`%ndU1T(kyFXzhk*_>QAn&YbnbAJ6$u528_#r1u;Skav;YrAr7eOGQ)cIMWGPCVY; zlS=}vbIY3WU|Vk|Mqd2&h_(7v8)yO#3);&$#ISkA-!OL=vog7;_E^YZ8l9`2pTqkVG)S4((% zb}4VpFXruq} zsyv)r<(Sgyp)zt*<`bP|j5&9c9`9bDXRl1Wwji8<^v2ZX;!bJw6t;_*z)wDWGyO zl*CIAJJwqgBo}fL{3#RUmZnEjFHQg&=uWtYgCte91iAdT z+>nQr74BA+l3Y3B@9a!;pud!BK_o?mlPD1z5kiWx=4VzKEt?B&`u9}FUyYkEl9g+g zvT@6L)~sJC$gPJ7r83?M-J}d>cvwWJ+O-vi{`MiYCRW@A7=BKcaD9p}i=lU$PI>h_(7T)cEm5cQOoFP`!4 z^)o)cea4#?54m;YoP1V6*n?~8?T>QbOQjWS*AAwS96`IXGMeS((X&M}`nPYz;4W=h zIC&&{DwlKX+)+uO?(p}Q5B&T0SN{F^O~utevacNbDd1A}|M~S@ZQ=S`(Dm!b7ruUY zBj|a_@9*E?=TH8Ttrn}^ciY*;p}P3 z!p(D8xpWCt8!FYVz;mZga_!PZ9^ARD;_CJDXDY0eeFaxHu3T2T2rr)&fSo+WiGBOn zQn8v<3+A$X-Yn)%n;^*QP2Vo9nLN6`x^T8KzHQ;GkyI?3BH$Xyig`n+TsDO5Ye%!c zY9c50%;479C1zxmnNqh* zlJ#rD>XwB_)Kl`a&#yz(4@SmHjPU(Xj-mElQO|nk&eJi!Ivh* zg}MSQ0~(eIu9{lXwuKcPT3OSjl?}by*)h1AD`WflsFSA885PZn>6vU;Si;7|jafIZ zF)OAuV8OUzCiF{ZP@4#PHTGvf^I%4`k79bS6jqF=$F}K399dGvnbi$Bxw4r3i|ev= zP8_SI`7?jABXeiiFmIkEbLSYy_p8kx+jq`q^NvZZ+&qv)m7SQst|hZqmN8>-KGPTG zGFP^XmKL*EV)^3wtX!NY`pseCoGhkIOJl^?czO>Gr(^FRns)Xgx0xMjWfs&aGb63s zjC##&DemA#X%`O~39{M@4i#WU&~aoqodsH5M~Q?-2GVnM5CbQM39zCVGb5VOGa_Xh z#&`u<^P&Y-u}oeV%_P}QP`U``L^DV*)?-|tpv{*?z1_)e??_s6J7P<%1X+TlVmpEh ztOzNzBC3G{aSa7grOrg=I}laRftXAuB9rZjh_xdw)>#T7C&~CMrLZz1(8&yM8$(4TjEkv!K4p$6TSFqGunx4<#Lv1qp>C2fdT0~iu1S25Kqtb6NI_GO zdo8KR3`tA1BQMW`(o%m)3cblrwjwz|Fy^H}mX8)W{_;Z&GpBK^4Gp5q1zCcsa07vs zA?1QAr8TQ@f+Y>&ET}KX{sWddSpkM*2bhr)Xho){oIGwu6b0JRI?D(xfp}pF(XzmoYY+c8e9UH0IwMCG%hl9tDaN^t)2 z3SPn1kKcTI_n8lpL4SI1iTAgT^8V&Q-U@u4oY}~Y{foG|b0*iePvO$0(VSg3kPGYj zb8E{G9_$*<-K_(-x~>b4b`Id))?R|H&Ri9EUESD+JKKly`uI%Von6G+^UL^pW21m- z6K^hWY?h3|3d>u(B~#maMVG zR8P)ReG~OLN=uf$07$c%GA_ypJ2OYz>^xOu>DM+?yZtSVEY&VW1z2|Gmg?GqPRi5_ zrgj9_x!@;ob2qfW$IMdpt#L6h!m73=wt9x@N({O+H6@9X_o%InMh!`vYRI`?y*f7f z%2mHo9=Y3+>}yA9q95f+5{W)^Z%~KsjnimV5KE_qX*92&NS@rE9_~t3v={jaffOVL zNd%J}6F{uD6Jf6Me=OS=A9q4so$#=*z{}c-Fn7V1w-<3Cf~&|dNu(0VN=YO=DUR&) z6mkVv#f7=FZPT2#ZCj}8fA;O)gF!?4(W7rSCQcnk#k%Ee+_Z+%rw?;R;*g;0*rDBQ zT)RRtb-91}Lbk43#rUBEq_S5s)4H7g9U9T8X&zl#)MsS>b}F#eE}z1VP0LhZ&6_=it(z-3b@CX84;|s~k>lLE z|3vK~ym9j`Z{NM=`LidyeD;VhA7AtS^%JgNIn9+zCwYATn&9dY@18y2)xA61Javi< zixx4uS5LY%ZbX+x4d~ylg?yHF%oy2^sugoMcX$W4ubk)eyH{$9)$i}0{{xjEO5I8* zty@3;4_3-;rIqXJ``3K^^p;O=-}3hPbDlqZ$jwWa*iu=^_%$vu|Nt4x9tT{8LvsqxZZSy9!Y}}x>Tq#|E%2=x_7tX7gdj9wkPafPC zZMLNx@7KnW=tH& zf|(;&zj8VoR!^sL`B+vj=*POnec88pJm(J1=JK(5Tsk_NNuW`w7b%G*L`CTm9IlC*Z#7)J|H3P{27xhJM5P*% zm}O3Ct|fJH#Rqw&q}4Yfv&>jfWkgm(!Bt~DavIB4;8eFzU!7RAPP#T389Ll-JjwL6H_^f~(R(1ztMT&(oqHS6hD98r09xrcsFrEgPBBp}7@ZTiMdLy*)#_ zyD_f64-@+PGIeM$^T#K!YJ_$;OlPiNeKWI=Z%-I{pPzSx0o4P6<~%7<}X z!*ehz9O3gi;~&1IF`)|f><`gNl<0Ol7*HmUSP!Xg?ju^wR0{T z1X#;94`6;}2WGEp#*Eda%v@2QITDLjm9TtuIV%KRD+E|e=G9~2%uJ?FN@dKbIQk8U zpi{R1T6OfHxVaOl^-YMar$@9zY@Pvi8cG4%)|H}8o|Jd@q2+*3+76AB2&es!P&y3@ zq0`VndW{KZ@Z=bVPK{;M%s9r*j%UK$cqS?_FP8CwtZ_=rk74Y*7)H*HV&If8x{nQ_ z!-xQy_VcEolM9)x9EopWLrjSsF=dW~71T4@+Y_txa*YGLS-D!bS>WQ{@UmO>B(xu~i10OQve3Z-|-T z%G6K_W<4GBm08?%q%hU1iLN>ht0so}+87z=s_XM!|~KEd^T6W>VRhncyh*d0FTZAt(&8QPv-< zL8OO1L7sZ}y2=mR-$b%UDbW4&)p1vWKDCL6(5FtC4Y~Pl6cu|?zn&C;ags#@>yst8 z%9Cwrlo^fUtY{o(Nuy|`HOrWi5Iu@R^#xrfYIk92qzO3!v%2z{*@4<5yVVk)8Bsq- zzH>hda(&EcD4BWJtO)v-q|&o6na=r1^lMh1v3=V!ck*!7tzO9X9h=y?dkfq4ZkO$L zcI?~D(bFe6Cvz?G?w9U7=DvVR z8Ed7$Ng+N;ToE)~-^?pP&x`XLc&fBWUD(XmI|91PyZLy1FFzk0A+zmgXx=kws$Odg+Ez}u_q`0;o@-|rug*W1d6Yg_qw ze=oNW%%fs_E5^1?qkXC`4T2o0?`ciGn+f$?bjh%-MXF^r>N*I@-Ha*rv!!KR0Butu z_>&s!OI~su(ShECC^L}zc@P-jLzvu`7#mG&M7Y{D=DZim5;o(tDwzGz~(35bJ8=wN>fW61jbP-aXT!-Nro=qgB42DrCvRHj}P8W-#*-?JxSzD@!>Nqpt^px~;!t}>=9 zkk&<+^lVWqNmf2Z>5;T4&ti1{4on`|gVl>Cu(fhAOXp8x*`hh@+q;8{7nS9#E^zeN zDK1~RskUG}eDsW`PoMGf<#Rp?a1>yD{Uqo9ol9IkcZ6G)Px0>QJzn0w$&(vbcyi?m z$G2@`>7Cr=wRZj0#jhH9^SvYaHx+dR|9qYMr?j+A1-sQER?d^*veEIN3?Hc_4 zZP~)AB}V+yY3PM^+{apP26&7V1w^=sC!t?EBqDFfS&A3mgZ|0%$_bnYx?P8{dv)hhx= z`J0|T;oYm30?JpsdH#&ok0hDAeuE3g1fn~4uzmenRxg^zys6^_S;LqzdJvOF^=IP9 z-prjgOzkFIGIunU0<0|+6RBK0koAlDvVYS!&K;P=+5OWwv}GilmUk0;G-mpU9A=Kn zVS?aeT)%kcjmu{0uvDgv$dvCam*M@==+r7yu;WT*hBBsEe&a#4aQFTb55GSNjFiMa zS)bHwGwSABQm@dG>|%537D+;0W(Y2%U@H8hGoJp*ai z){`d9ohfeUKt_Q%k#)2PNT`8-QcVKWG>FPGB(;$(*{$3t?(9P&c}~+lAvEg~Ov`@3 zf~;^l4-co;*l7AqjAeizYxwkd#>`PBD6PXJi3#%(bA+eD=(WR2*l}Z6rYD;jTHNp9Ut$aE0>w6HF<4H`0 z2ca>J*gNWBVpKy4+uFF>nBr|~g_EhE%2*di3q71Hm2N>@xnCP=V*|mKiEIT}Qec`W z@KJE3txU5}TR^3Wv9X{^wt8B%Fwm32)le6G$zC-yHK<;@wg5_tTG~2j2)Z=ox!TGM z@OqkP3eIY2R-=~IUufu6N6%QmrmumHu7)~MsWN|-(pBgrz;cn#U@y;67M>1wG9%Pp zpD+h)qCAZWbl1gCB09*7#0U$*e6%Ig{}WHQKM4-eAU4T}gfufU>e*0~?@U>a3q?s* z}2bR7SVTXL!pz1~kiIc&A3p8q<$ei>I@`Vu?DjXw}Zm>^`_h z1=i*r+c|*zklbpDCU2VNmt{_y9b>Y@Sb%49l zVx@EiD$r882o*p*ee+qp@5TEc>Q=#*G7w(rO1%H-WF;F7aE?^-DpQ?Em=ogRdXIN(ipxIsg3lz&~G}^Y5o8{QL0%KVDqr{oUid z7Wh27P$h_2$CEQ_czA3vH}}rq=I$vxJvx_vr>c{%T|ui)d&3O?Q0%$K`6Wq%j% zu5IVry}j~Y+XPti**>QSGrASfF~grCe;W$DZ76UTj5}zOX(xrEy(aZN1lj_u`rcNw zPm!c2Cz&R3A;dVC%kN1*5FSK=fGRIBRw`Z}f&#qo@o~dXjzvd?QdcVenD8)s-Q5ZF z@=;r_LVW@W@$|#Z$_YP5FT8BsvDUZ5!_G~`lbK-4#nuHcXK#U(uNq40l>)5)j(e&5 zN-LJJj*+0t5hoi5IbZ+lCRElnR9dN&@mZ$27TB2B|6kal%-pVoIz5A-iJbT5cnPq) zOdWC6x5Crd8Xr?zJPhTysf3vY_Bwj%V&;ZQOI5X+X#Q0bjcUq(dQG$>^lNHjD(AeD zu^|!ej-(5YvclXcj`OB@W*Cj@1kj-{j@~V@=+Y>S4h_?2QxK~`CMDEaZqvR#6# zlo0Ahc#$07OiGZugeMVh_W0YH6E47tQ5Ma1b0NsdkpL$LLglu*t+hH9OTCyNB!D#e z9_r`RrKEnY+KQztkJGGqLlsz^1Xr!vG^I`Z7Bp&7LMuT?lcuE%7|?@B<3}(;&^2mk ze}?qyAwU?xG{M%WfqfX*y%UY{bBGo+hx@n_72rXbw~f(;luxD(6gk7fPJIaDm4$A+~l*t}`Ix)8E5$o=%03tYH( zm8Z{M^Z3b29z1x+=g%McEZ}+b;t@Z;y%!kW<;>B&9N)iwk+$)k|_-t)hmG^UBVgIF_cYaA^b`I1xDisS@CqN8;13#fs&?;iw*RwFJ^i}Wl*a`H_m5L^|OnN!ff zg#3mE)N7z6sH#a;Sq<42AeC#8C-BNGG9o9>NDw8-TY;%021XR+Xj7P7i~M@E$jz=v z-Hhs_r~XBHa&@xm$gvD<^3t^_&eEe*xfR{p2(G$$F`|z*qXbuD26!@Ys4r7T1~Gk9 z7*j@uGiGow1AF+;qrEF#S~=2LK-I31C9O*g>D<_izHMw6+S!RI0<*cpJXti{k>#VD zSUpB!oFf&J9H^Y_%%;VzY*^|@FN zmvM_Tn7AmN$@3-Vq%nDVDpMw>h{WUN?Y9a6+-I)5p)<9MVH}`bQ={(pK;L) znv%fK>4}V-l_aQ2VeH%_*^gz!>==eAu$mmEPV>-PS@3ym8107()&>R9xL**Z-2=&O z??+m5FF}?ds(~BfCH91r*b-W7M|hzF(M4_~6#I~n?@MfkH-QljSlVa`f@(+stck0Y z6q>fSIGQMhtG;Byy4afPNP(*(g|dX9p5V$*4Kvv{(bvIPR~tiJWs*=0!Im=Px)ip$ z%8c!`{#%(|?$g%MKtoG|np(A~t|`b8Fx3`ZX&MNy4E_U_wg64J0#Q@9h8(M@hK^os z6!g za{L1ik3R?st4UO{as`Je>GiD0%eA8{*O`V{j+7=#b`h#gL8P9b>A&$-O%tqWTgQ>s z$+k37#$pM=N<#FhFR&^ImLGGZ0cEi!DzfVNDOZyjQ51gBG_E5RbU9PIHef{C z`i$yS&h#OjSvq5sy2e<=>P1wnU%`fL8`yPVH+v54W7{4nAdVej&yhpwAon9@&Z<`t z?l^RWT}PxKJAHwZmv5-JQpR81c>I!kFW&L+SD!!e>DN!b{Qkw)-@p0x>lYut3c8d9WB>V= zZ{NQ2>C+dryYRE%N?Ek^?{8oE_3Z;cKfmVZrx*PC^o)N$Kb3gIzaQ=hwl4DV-eFz| zh8~<;!;J%rxOH$LkB%?n*{NkbJ~~gZHIJ9477MIqadrDd?jM@N%d;zaEhu_-S!unh z;){Ui=cE1netJj%v|B~cmpglTEvR~RQSfzZ4`1)@fPxjyg##qFPB#G{Z<8kKdj{E<0^hV-Nmmb`{lO0_Wk|xI(vA0Y9;rNEaLR~ zF)SO>gr0d36bIN+>}yA%r@4x&H0zqORe)tofu~?R)S3SE>(IA;I{5+agjpC+6ca{4 zTm*^!o@7J@m6Q-mUUn8yp<(!WcoFU&EWq-Ygdm7uj{w}PUGTQ^ zz|+PJJ0s<4IX5})lo{3?aI<$8RQcd9k5ho<=kE9ax(Jmk2t6D<)RrhSeN(ly%gM$b zOJf0-uHk>n>vYEi|i1VpL1cYXw<4WV3}fHaUgA#S7w zDd9=1mlGjQ)`Yp*3$WaX7F310IuqpLh@Z0~4yML+pQ6%y>Sm>qS|^U;l3Y4=Xu*(yy_h&|IO9hRVeIgM%$PKq zxl<=Fc1VBPHEBQ{WpdF#Uv&&ta#SE`u|cFn_)sq~l)R(}LADF|389n=xEj`tr*T$1 zP4kjyRhCK5wxx`da$)hTk*rxZomER`vuedcwrmz)?b^n#f61U0?R4rb_ zw7~-z+owB=Cy!={0A%B`nH=6($;LIyIC@|Yr;i=t)X{_Lm4hdb9N_Y~Q(QVL37;H0 zvTqlMB@Re3xO2-!wr{Rvj{=&lTUoJau?i^#RSKRqR#eE}vrKKZQnnk`tWgoB>?^Tc zz_)toQg!T=5(oG0QCqH#9z4Lwqes-PL1iqKf-7Z!yAnz_;l0~8Rd_vp@PL<(9`p3> zT^`-I&h3jA)WxBdv2kY(?_f14t0bQc$+$xABrEUTtC$cguNr=-UDxwxqk=02|s7>8WsW0-Rt|&F3abpY08<|kp zK#zikTGVf(A=s)xZg~yrH<0?LT%WvR1M>2X*LOd{%%Yh;m4Fwfs7mG zE4RJq-`#~SZEa}V%$$}DjcFy(wxN+EI68E0qEElJCJgFmCV#&j^F}zcTu`-oyd$f| z*s^l81*^uHvwE@xYi3#sR&Cg_#+d_Ky*RzspFc*=>%h?2jTtbjfc`Tx891j7gXbnQ zWVS$RdJKamM$&I&2z>?z(5GJj-2_%`TDj1)ksSs3retR5Q77GixMW?z;x+M!sE$j> zpExRO4Tb-KOKc5%(&WB8bCSyJNpI{zPAe~pI`~o6IeZ6ZFQOjBf*ls zW=%ns07)7Au2GX(a=VsbskXi*HTATprLTpi#Q#6Al!5NrI#N&@C{Yt#0|A=cr>`r( zQr0H?KVUf;o8T;;!(Bd`kA)r)E~dnJSrYDUBo&z!zOI@?_#2QAW?mNxnS|vhAc07cfW5k3LqP;uu4UqvV7TWHpNy(I+WgK$bLA z#$*Yw8poN@BE_20XajPBYf~i9DvC5GCs3Kr!G`wP0gP;&#p0o@Sva&U6MHmfQomNr z8`p=`3nsFD^?WMVOQEy5LT$m?D+R)SDZrE~2=^a7%noIM`=R~pK753|M~@4z4zuUj zNsgYo%;_t))T;xW?R}Nmk_e^cIQs&1} zt}aw?wR*#LR#k3e)%vaSK1aEF|2{9je&*xvpS=6=oi_rf_us$qK~VMK%U3>p{=)N@ zucc6b$tS_p#}A)*|L!9{{}z1x{Z*hP`vR?hzQ2dRKfq6A`wG51=GWUBe0p?>7uWXi z=v)<#&u!$e-@D!lF=SuFTk#p{ceyt}%YkJq>I?T#Qw zfc5jy0fEpyiT(U~e1vaz_w(ZXMnTqQ!O}s#-`l0W*6VYt`Ff?2zi)2h=dF$WxF*n2 z2DG1D#JjT#`E+$T-|w&E??+n%ZM*sB=|OqEyx#o-e7?O?UUMBU!P2c+8!K&NImcY^w{pc-aObB?Hfm&! zi;)Q~lIYmTd1+Ex&i5KM(NkKnYW#;Q<%&i_O+9p~)x@Y)E&T26$cYW7yiSaqV}9g> zyU@5!5bf*7(y=&!E{*EYqeV7t8l=*+D2e7uwjaf)~;9~$5yd&@nRKRo7S#Xk4+jgMjhC$plkZXi2|}YY!HYk-Gl!_ zl`;lPX|+=JmF~hjf+wY0Pyv=Q(EZJe=jxz$Wz5(6m#=vD>^VJ;9gysUzo*dmgisM^H zv!|jDn-;Za z0LY5^Id%egTRFFlNKMcpJw;0rDLqOHjcD4?gqBUsX(Es*YoJegV;vecQ)XP(ptw;j ziW_QD+(4ItB7OP&No46sLZCPW$FAbQNIr?dHLdzTOP!@5%7N9*i2|&d5Qo4C&`YzwUN)Yi~*0=0@t3iH%D% zX!(?>W`;Q% zm)Nnp(t~3=1GscBoIiSuE2HC>EIN!$q2u^CI!%nC%fv9cjSHchGL^y*KRWjHq+NG+ zTDEtlyrDG(g(l?W8w;%TNJ`cwxsCxbN%}-2=n@#CiAO{YoWrZ*Byx3;%G|4tK7m=L zgy&ikTi=f4au+h1xRc$?oq9^ER!dikJ9yE!hrd88hz>);=`vEjym7Jgnm z$qbkpOP@*6^p@y3K8kLFs~!{L=sqEl_9NqHJtU6CePb!^8c9yOFhN!bsZ9eV{7G!+ zO?-(5iKX5omG}{#=S@VaEB=x8*t;5{XILFm6K!lQjIlG50@KI{M++-lY^(%W##ou^ zt6^hqETJsytSpVAKuVbzT313(4P|*86KfIktyddu z6CG*^!fFYgG-a!8pe4YP#|f~M#iF&es-vm%mmI5(p1y{<_Ls6|p}v+j##*{q85m-v zrzglVz(wiC)UQRLgOL=p76dry;^VAIfV&pafd-^RnUNA>Oj4|1xQ-#2xn^Y6Hz%uD zFjZo7OF2 z^Tt(FZLMUh0Bh^+ZS35?Tb%<-X~EjMcNcq)9cJgj!&GeE!m3T1*>m&+r!HUT%+*_5 zy#0VX&)@P$S>ERLf8(r_=@&l!_=i^?zpKDft`<}VzhAofP@S@2+uoz>JaB?7yQQGr zx|bDetJt#VAeZmm4*2c`1Fz2U%&85wohKY;;v+gH*VhH*6q7Id-j6Y zuix_V<0t<4=QsbzzhD3U=C|PNpWol%*Ea#yXa4>B9shiN!9QRAYtj1g>N;=l9OLPg z-8{aqohRqEr~~4ko~h)~i8TTmrMqv33Z{p0`=xB(Ua8{EmCbw+U@5Iu3Vy!d-=oH- zTRYWbA7%UMQYG)NDXmwlcyw$r?=MyG>GE3MpBGe}Tf*l{EBJnOHJ>jo<>iUl0=tAlR9mpV+}^{-o7?#zn7e;;v1Dm2=u;F%lW0fsz0AmVGA7IEe^(LK zBtvkONU`< zCg-aFORuID1{zW(=$lGeVu6K@0k--EM0mK96BABRf<$~E1<@YVkMW>E9eDnrX&Mk83)+(P44bo_q7b_7*`|=DrHO!`E{ZvZQqezeNQRkD3^LHo4*NtFT zdpxX6a56E(#oPoBTWd7}-CYTE6CBF_dt{&=iP7OC%I8Q+jw3BaFqD}}qlP7tY~_)b zo}^+cJtL7`y*tyfV@q1KY{=k&y`+Nb%fvCm7(TE!y}PudbL(bwXx^AMjmo5Q&7_{Z zR*n)$(eizV)2x0boto6ANxc-ZqXP+dwxDj9HwEz_|^``3JY_nas9 zZ*cL zYFW;*c~b>L^H?%}w%RhaWy4x^t6=MY;M%%zonUAs+cvFNTedEome=36OFnM}JF7Oc zf2V+O_ipy>*g?he))v_bKV?jFSOj=oGA7{{n?5p-|rP0K{NMtPnEZJN2z zxvd*LyL&QVpbvwG_%LFGH$w+IGqA5M{d?KatE(lQ+nLg)xqOBOnlve?Nwbn_v~5_E z&P{4Dpq;k-y#|cyVZh8mW-J|N%j&820xAbKNNk|E&1wnZ-NTIt6A4Za-T z9?Zpkk^IqdR52}v)S=P9NE!|dq4D5Anho}+#UNk7jt?z*dC;tr8%^3dQ_{qibP;7z zrY1=l%4*e`#3X8wAWDr*F(N$CfS@>iyrZ>ojncq5y0!pI8;=w@mS#vmhACm$mPF^< z5?^RfVzC{GMb;!0S(4t!j@(wx6nFBVaZf*5_WuuBT}Q5*a6w3FRTV_FSzN!3Z%_*yA zNrSo;l%|_X;ipATga+ja#wrgf7{Xb7w44rIxU0qUyxYgbNZQ{@siZ(gGUt7`j3 zwn+h|0Bh^+9V)PP9NfpQ!w1;D{{U6H_p)vOLA3?z$XR81oI6~9_?-JM-}B`CSJ{5} zzpDj5^Zeacb%48aTj>^5#$zeKQYIbUd*lp9&sc9etzWNpYQnh`zwBZ6lA@=!I$TkdH>)HFRvZq&2{CUd~R$g@q9v5=wk{6w$%q__}%t zvI69{>yMkQ8(xmy1bK$wXz8L3Lbo+_!c5N^2Mbr6tv&E^@*~Jga3zoNb@jv9#zjEo zhMl>c+6tur%gfnQP-Tj;w3WKNm60+Lse=kBTQhsLYf!<}|0Wu>G8S;@N>XL4%(bPw zrv>KPhBzA8;3kiEQ5J@_u)$JSALH5@7}gR{Rj-EDUw@+gS2c76SO&GUG1W4_PF~AJ zK;~#{fw#4kpW$JYq$X0F7(sDd00oim6vcVcI3tJ_c@cCdPo_t!dUR}7m$r=rC(Y{# zK$0lQ2&Q4ZNIEyoqFeJk+Q|QNlY$hAGh@k!2_!bajSvre{GH|h)y_gbqp=#wPzPTJ zJA&L?)K)An*$?*eAT~6J*sx$hRs?m^k||XNx;HM93N4#@IT@6d7t*SA6NU`!ONS0E zsGpz7u)%#8J8Cf9I<}@qr#5tHBe-hWm`<&ls$GO-xmjc<#gQHxL2`Hib>l+mE^z7D zx}36%c=8j%i1V^1S3YlEY!Jx-E+hpy(=a=Zq22xi)|`n0STbjvKxU=jY7<+kDkTZ4 zWX+limM&Y#Zb^nto;uH|)91K;{U*z4CKGE<5<0HF^lKTqGH7o z6;&q=@8|fTeH@XpN$Cz$TC0@UynYRPcWmL%-koZ<;;{q!*}Hu^d$w)k@ZP;@OIFqT z^{iO5NF8&fv|cIu%Dh*~99YW0cV&O!oH(xm`6mh1>;+=ZD}vg?VQNfgVQ;_WjLD_v}5tOLIG8p;Hw@R z<~EYDJdHjrTDk1Y{w)LO*3gIMd5*Lz^`%3@P#VX;R;YR^<|9=Cmj>qHU=Ot&0t5S)xPB23j<2P=iKgf6=HxO`0^;r%@wg`CS@O zm}elNv|cH#SlZ;)tu4RV>SU(ZBs*P`y2-UkiV$RlR+IPAr#Ra}on^jNQy1E`aiME> zclr$SroV!#G2RRsDxbTb0IRn(eY#uHt+Oc|+Zxceg$`|+XwacaZF;oUVnBNx#`iX1 z&M<2hj<96a1Ut4a^kM(%5RR@5h?85wxUe&t zKiZ8dq2Yj3>UWonqN`*U-MuL6?m=-EIq}=OP}tIuyoR>aE3qQIz=XI=Z6Z=@5tXVz zM4|?fi8{oknGlm|La^Y@*r)X!{UPhM*miaWT|psSZ)OQ5GghFqU>-&(9=Sgg~J1P18WE6xDo#KUu zzZKR}VHlg##@JZ!VyKC=g%rT{wvxRnlNXv`X>5SGk-i{HA8RuswH3=!fb~CEDdV!N zjCIve_ATVNxq%jzdfHM5>q0oT23~<*(Su9#-$h;cTvjpOX>Z z4%+xS)h0-Q73r%bg`FO`bu1_^a+E^Ug~lzNXwb%y(w2f+`H^R3>yn>sNaF%4`N2EU zEZ>HLBwgx;*Q9>54z21~(=pSD)`>PWkCY4~!i1)gCbUYlq-C-NMG>0R3#vv@tPZ8A z7V;Va4C|K7oG~q^nA3}mO9ruWPHz@X@5#yq!>Cv`p7pC|v2nvvHdhI-lqEQ}NI|-@ zicQ!MbwqsoFjG^usrvzWefD zOVy2sT)F*3#g#H;gMz737jLTbVJUND9X@qQ#g&3BWjvPBZFu(DZH`~P!r9w5IDh9R zr>@Dq6l&KV+~@Yw$6S!C@z|L&oDpDMy?KiZ7cZ;v}e@C*M4As`FR0k>GJoL%;mEVLQVOV3S z{PPouwlyHl+krSYYvO&KDNK!{PNXtjK#%}S&cWb7Jnijq6>x<62B;Aq7DZx2EIuw? zcshF#3^*Lc>qzrJkRdMBF`cuulJi=(W({;|R71PkUuahQ6P@Zc(66P5v4)DVYuvgia#^P(loPoZ=3 zY&tYfr%i)8Qux=UYs-Au)}wL#6dL9wQIH-@bbu@VF4hvZ5_WjmTjFMIj*p!(N0u|8 zp6-N5C|49J5#r-Xm|!K*-mS|#xP6r?=T33)#Br{kIV-?A!sb;K z>{z>ztH;jr@RES+#yuWhzQNI*d)QK;bnPu<`I32(?5$$)f*Jo0R!S&%+AZ&|v}7qW zxL3(mX{}PArL0@HXzol_EnTQS?t~F& z%O{U{e*b~miuL^7J?>t<#4TlRtW(E0ylV?PH?CsWrd4dNSVZN@*({npn)x$^GjIA( z=1lI-@>zYTTG~%gHHNGEr*VGQc=oO7!W60ayl7V(M&s;2auQs~OLS4!^exSFp;0|&TGV%Pym~L!XZN4C`*hIKkEY;kK-u?8@dDZtPv` z%dyG`fl~^14%g#>#I4)R!EC2?hU66bcra(Y`d=eNc1M~lIQ0;)u^+6Pjn zwHqnT>`7^AOP$6x)G4>7PN5|!f}n&9W8yOnh^ebfc&b#~X=xW^gbk!*x#iZLEZhIl6#;g@1cK&lx5f-T=vLwr;8@UNpQx3vh# z)FE25npk2=ody=vZE8t&3mfXUcTy)RZPv$Mz~xPOH!m9Xl-vD+X)!QdwvjaI9Z6aD zND4ZHk=a}d^M(P$7W)vL??r5m2XWaRB<1>$RuD*9o}epJ?o0L}BGwZxUt7!sCx%9~ zF%lfvSeasHV~M4iDVC;6Yn2h^f-Ey-+?A1@3N5u;P#$Y3__31sAFTdg7znuZbg|IZ zmMpinWVAKWQ@Z~IQkr@-si{+qntC;;ZBP>}r3+AwEAw6b4_G?2q|g>%85`D?&+y;- z8tQ0cs3n=Pw!VbCzrLQ}$_QHpcLFgR<65}c=;G(1Pq2r4Pu^O@_-T_Gp)VPPH7y&t z)4Gi(E!(@%u#E$Gk|AW~Ym<|wM}rbeT9n$zNnlNzQU_Yrx1&M2sh~;jlkc)cf(6av z1XM8ss2EEcg&NQ}N{%O*(I8%*`VqCr7f?6KvS(nYG^PwIXW^7KY+OE&^-Bk`Xht`d z%KR8yMAAeUAxIKDVWX*uFeRuj$OXUafwTJ?{G&lo>S~^9%4D{t;+> ze+}-@2eYpe{`O&4^H#@CO+TZ%9lIa1z#HkTw7#+8?P>|;p2@Be7;$!0_)Y8#k@E;3+yX*ML<&2nUz9H zq?~gRfocm@l)o=t$}H>-jcphVKzZfy76w*o>y^^wr~vD~u~`BxGaGeayt3f5jftfsGd9@EF-t>J!Iu#_H6>}P z@fTV(tD#Z-Pc*ClCE%)wp@tSFTKd=s)Bxf@9lsP@1vq*V=;T4TnlM2K(;}FKEw*R8Z|1VTle<#AJ~IIgL*M`>`?mm=_(aoE~SNnB|%o3 zrseeP*oNL6+tQ^~Q+l**Mdubx1X}rYY2JYGgLzj`S03 z`j8XrM|oy6og3#eymw2c3bGc=7$pc9qfXPXR$wz@`c!7mp3U+Vt68{c8LQV+vVF&1 z4jnqop@aK5et19EFP-7?xf7f|c0iD`hogJsK#V0i|>YD&wsbY$;=|_U@1*aQR|E z;5-4+T6S&O$of^wS+{bfI_7Hgx^?P!D`oJyGLT)F?@AeKHF?Zfb?lWguw6mbu>%J< zvTq+dtE$u`ag>&;bEi(Jz*44WP=KW@iStol_59HzKE8d!3jx)myLZ*&Z(ly=>&Fj# z|MZ#9uixsw0cXm9TJhJ{#sVp<;R&<9bBUqmcsxTDdT=wF_NJE$Aj# zYE{pbW*P3Z%=V#0y+E2}htM=DjPmqg$})m!k`qp=!f@IY1<|(HTVUl$*M{zNZ{SR? zGJCoeo6)scpH8J(v{n{z7GO0g`HLo{8Z<1?r9rWQgpvI2j43M6B|D=Ab&~!dHR(^1 z;{PBi_AgT7YO39Xu_1p6v^2>~HY7jGf(GUGlr^@ZVQWj8b+e(BpsM2_JGu?9qGum7 zdi61*;sqF&hljcB45sKh~(1FL~b0c z%d>N(yt&YTSEq`2b}XN#M{{{}G>6*= z5XByZ<+&1A*O9<9dxBGCn`lc^h6@?RL8RsT6PM;rcKry-BwLJ$^TpUyaHC%fQ^Aa- z0L#qW7-M5Y%#{`_Gh;!Hf-HfJp$^7+nwScREDW`zu+>u6+A`DqUua^c{one5=1Nzd zWU~e}YN)_6(ig-TYojHos;2WNHT3_YmSJ_Y3~Qll@Lvm-dc~oJK&+M=uk|NJdbL!{ zDTC+@wB<8uXke_RgOR2-X3Fw8h7tzK+*ld{t{T{xY2xE#CJ{oxRH!FEXhWKo7}KhOCEZ)Q)1!reEKe{cKiC!m z=Qc?KtXNZ8M4Heh-jXKKMihoy}bcv4l#2*5+*+)S0ALSFB*>>}iY{J3>x` z9(3v1oiS6UFmHw2SGAR@eTUe6{4B=>RM#H7TTewzz|r;E353)-%7^73W%_T6XCdGzX)JoY7b z1y&E`_Urc_dGz!-4<0_|<*T>6{qTja0;n%d)dCb&5xH?W%mT{ZXM$7 zjRQQtw1XGQ@;H~b%XSOzukYZ)%^f^HzmAXBtN4C*hkE7Vm+R~J`}TSs9-7AO-D9}7 zcMMMsPvZUA+5EV^l-~lZ-?vxu^UfOnzO#;>cQ^3!-WGn`-7fokT)E40J)FH{U(jXa zD4=o^C^^b=tOQtA>J^6n4_Jma0wycD?W}ecDsyAmn%ZJ-u5=eFGs2r;sa#d491~<& zD3g}z>Z2{^wUz=bIah1{CBXXY4*^y+jO6?^kwnPGz*vA~Pmr@K(O#Y;2l|m66+&KY z2pJ*nB>C8o7v)AXsgT+h#?q!Rj#fqSG?o3_v;fisSnLbWD{7;VV#@O zGCzelZ#y!=J;_T5Brh?TqVx#b%ilA4U`LkBnZS&RBN;PvAhV}TV*bn-Oq(!CprTxH zx0=Os77G?O3T`&BWAkRtoj9hBkvg<@2ba#A;{5Rw9Nf8wV|xz?q)u=_lCi6&F7W)$ zBYuATE~nmS9^JgFE@88EVoj%lr4d zfAvzm{qDtcKFI#3SFiaj@#Eb)zPx(D<6GC%F=)31ofnVq zU-M}CH4BmOr*9(<1~hV^UjtkEG_<6rB>SDpwP;yXohJ3GNkX9^zimAlOP$n6aMeJf zq(GmX%vxloRwJ`cO_JiO5f%0a2~m>h#p#e7twU0jo+KtlWMx`XzfjCjVntC?OUl{_ zw7N(g*h`Sr$C7Tn&FL=D~tBg&ooaa zC#Bewn0mE|&8bdeK~0hiYs$8U-2RKm%s+@omq`1QP!VKU!k*5uwiF;HnT$F+E5jC(6{S#Z| z%EQQNxG4kUlXVHmFeW71jIbPYBJ<3NE3_uH+>wmN?gB0^(wlk;s60t1cO$ych44Hn zOtNh8u49Q?k{K=urZ~q*#Fz`*tO!W4B_`XMxNJwF>)24QAb>_qQ;1LWQ)gb+(W!;8 zU`AhAJJ8$&ePaXk1TQAWf*5lntSybOGS?Gm395`VrSR3l%21%Ct1N+`fsMWn);c;^ z)YigGnfIo;WU|#XF{!16snU{Vpe2y1Nlo3q1XzDi-QW*u8COHgq!!x7n&>D4-F4(K zN|&ORG8U^ky0!n3{Ti}g1D)D6FqHcZ<+Y6DeJo9k1P=sFNb=n3&71fC{(Jws?pk-Tir#ki zIdjgg?x(7&t7z8Jh|ZlI=-1neq5T3F(9w(b^{i-?u1|A$h}))G(ptXLrt#)9PPCwL znhlM!?J2M0Nw4yFhV`z;)N!3yIJ-ZqmyBl9>M5*QGM*)KN3u#4a$P=6WU5$t)7iLY zCR^6e5erV$-CEApElb(5bp`9!E?~l_z6|TznZZ3f(5^*8I(2BtnDHZ7zF{?+_wQxj z>2n;vdWREN?g?c5%(?3iIdSPGN6ubh$H5a!pTCS%n|EoRt&?*6xPaEPcb|Fj;Tuoh zd=z+k%Uyx1M=yWl&d*P|a{UgMuioV9jXPYsd6!F7YM-{)0ch2~2(Zliy|k+qdt0{`!TFpFi>S>sNuQ@BH)6-y(krTz%(nfvmq($&K$S zF6*0CenUtLR4ERDu7AF~)dU4ze<=8RE8z8#4=?WV}$i%Y)6lzp+jrYX>|##H&kdxwn5Vr&f$)Y5!*Qs}W1%5C>|x zm{QBpm>erzQq3!p0De9}I61hFRF_wnbI9WOgRM}upVF`UFWs#+P7YE>|*@)O2YD`KHr1xwwk*o(pBE5=%cyB%?=(0QagWw|jT z(E?bmWQ97C8|6x^6n~l&#?Z1(GEHhGP`7$GxhVmp#d(qt>P)l%Yn-1WnNhwJCWld! z5>D;R7@E}1qGf}8n$)dM!(R6%pEk{E)4EA5fvjv=mKM^ZV`GN(Z_mPM!`ZTQ5*y}>VA0s_ESuPe#p8R?uXP=P zt2AmRi7_AMMt)oX84+G$0D8;c6hVW$WZE^ZL!VCN0$P0;GoT;SM+|4}tl7*QGMrh1 zhO>R%Qr1nI%j&6f*ff6$yH>5^@Qz&qR6juF=-&Mt+jD>;JNI&2+S?bd^5*d~e*g6~ zA4EQiQS{~QZ+sRQdHw7u5ANRL>cw-MQ6;)|Zf4EmIqY1wf&*LEv3JvIcFFdRHOttx zdI@_3ypHbL%(2~D*tcm7+g2@MgMikuSyPxhWt`^GTC-v)TQ{y_=hjUuow`}Cl-d&tJe2~imSP!mWuI!1OqZs)FLh`fi7Dz z(;Uglav~>Jjqzfbl)6*DsWZhb9Vl(*NO@NWI`p)qv%po?o;LIn8PMCFp#omRds;H8 zrv+05u;vW0XZct+w#^Ra;F4%AY|P-!-kLl*RF4;@1gb8y;PZv%{B}}c>u4Q*JygWq z?dhCf9mRopzHFZ2NQLIz)2Z7sko<-YR4+CoQ@|>#UM1PDN>P()6f~_yZsRKCG^$9J z&}Bvq0j`{iBxhA7Jx8C^bOYiObcl+nMznxcSiAv#ak}`X7z$vS;GHXgRbYmHo;e`` zT%qa41g9Djn5>Uqq7FWB3bLx<5?xsUt0FEjRdG+y!!y|!pEPs)vMdCcEC|jqBO=d| zxSIAP)^#DNzB>tZ-H55}Mns_#A-VPhWZB{+Q00~=7HFI?4lzd9M;qZ7V~S6z9U+;H z^8QxDRktO-z7LICCQ!3h7=Zzn*vO#aU}uJ88auaVaG;x9Y4b1OIJ8~ z^8v>$-{-)|o9sJ&nPUP`CobM#>%OCG*m;0$`;Te+`%j$L;+Kbw!&mWV9l_iM}f^YK>xd9`2e zvzagVHuLw>{k#^yy0vo#8^*L}WN8N7vO;JP?n+$&teWai8Iwq6T#^P;5oN-aH zRgMBxYL%ahz>~E00$9FY0R+fv+-;l$=v?u3^wQ$4ln2bt!UZb}CyWhj(5-B&ttfOb zcfi)z25SQgY((tzEpd|bE&^Y+I;NP3;bdH?8itiBqyJL{j4D?|zhVUpL`*7+RH}fL z7)PF#Mnt+>k``n~ofIG17pKyxB%OxYq0~Ir_NM9FvH zYl)we5g{&OxOzDd<>?@xaMid@jtoRu)X7gFKO>st7(b!| zJxB=jBgEO6Fag|fcXxbjt?{<8BGlcPC_hgDUkAJ#tqAdQA~`aE?BqzQ3t;7C#?r8E zCQTX$JeB23zfoPFDx2;d8_}n06S}k&=o0DOr7?ZGHKl)#=8PHEnZUEque4OiNE^zVqDQ;a5oq75ze}4E~Amtr@eEK9d&KLgp_?|z%e&YV^8=O3LkUQ5d zuy4y+cCJ~%!7Xbzy>Ac#BYAkejCHTyTO=FI->Tsw7$)6yQ4>$k01#PqSl z89AgsGp9~s-KyoRUa^El^JcSr$pV%wn$MC2b6L7@9?KUmVDtJl?B2eGJv+9tf6ori zNPl+o(ghygyv}ohDCr|A=Atm;627 zK78Qgn^(Mj_6si_ieYy5irnK2_bwmd)|uU$+qaVA(#M|ME3=h@}W{Cs99 zw^VY`z2msLe+*X-j^N0KPHbD$m~9Iiv3Xv7`3e@pU7wL{>oBH$Arm|0Fs)lAGkc^kxpOQ- zg*yf|^Pyi8cY2mN)1}0o4h<|RuV+HJut=MRR_SX=xrI%%Yrez%$YX8Mu6UyMI)TqFx8)(^CCFDDwV6-inzDCE>8}X z@a9Bg-km7r#ev#9*_F$q?OEJfpTOD0LF}FG%7(F4RA|^Knt~>-|--1=a-Si=`xh6_Rd5P>NVl$$I!C=;9e)4R__iivA(a0$9!pxZ?D1 zjn~IDL0=1$_fuddkQJV9Lu3tmB8nUd%eN<3U@0KWO3qv1m12oof*CHcCO8UMIYb#_ z7h!}=s3D$-)`Vs{5Rz(6ihSv!fRjtG<=&n~FuN zhnd{hTmZ<*Rv$}81B?`8$v~xJR+(ys(u&oqucwcxktr5(FEarqW1}k4>c}x2Ela$W zsWGZ1p}D>RwkGnK1h}jO?raof382{u5W3r#5bj|k9hWUp9{MB)=+UscBW)Ua)2696 zt;&6A(#DGhEuE;<%tFMNMr}=KD-T<*K~4-F;mNS!-i#X&%(y|p^l9NpD|xV6rs>fx z(}LD%*0f5qr98)lR{0(@Dex3{3uk!Wn#`Hnja5rV3s_BNc;0PM)~H-aUudwR1nacO78oj(u$3wnqc5S1*6l0PF9+ zgn@*M)b=}pwfDb$68R|G(&@buOH!=LFP}eZkoD&uUp2T=koA9pD<%KPIh03BrFGDP z;YI$D?Z1DlKK$#ebf~|-;`dii`2EF0K0klRx95-e_UsXF1-2fZJ0zfWm^Zf$@#M^U zem$##*;nxO<_i9}vy2btrtk)N1CTwn<4(edP`W@=ESJXe0s z?iyTW#3sss?}eMSy*6#Awv~s=-^BydYI@k4TH)dBDKKP%n*u3;CmTyU(GN?kEo^ad za>vunM~omRxyBY}0WBA~w>s`E$9-Hq1+3geJjDoc*D|*|*|>>e;vhQhCOYRJhK#lB z+v6y}qW7wG{=* zs^M>EL2{rgg|S}LPV}K9BbZ`=s)86#Y9{$pKU1YDh@h+>k>+*MD9()~E6$hrP$#V} zYM7@bLC$6(7Wmnl5bS16YJ?93$r0ovgpe5@On!Q_fNY|`RWdbH>W1`4lH=5BO*j0V zZ1JCq~C0QJYWZt~>LZLR(F z!F}F7`9%z@pLy}~171IV#QW#Z`1Ad5e3Sm>cLA?&vi;}pANcU<6|bK@;l(5QJMP~S zSi8vW3&*&6VmFtMZ05|K6`bC+n6tYVa%ul!eip!baeW(4FRkIW%G|zb5C_-x6zRgQ zWi8peq9q5{l(Tg~3CkuGFn2^26MH8yv|Tj)nupV)G>o2&<7i($hQ@iJlw=0eP$j|@ zFfYqaqD4U#or`M-jMZjH%bJX6UBK9m*-Y)8$&8+9OzED;h;|VSE)S(|bAP&(y3ww_ zJ*{e6(zKQdE$Um+woJyX#!fU7@G7ZeO`RfhitgS91F6GEXk{DLCr?iGUnM**4C2d9n5Im$&|Jojp^LknBLt@ z8Q9&7q1{cy7_esM5C>)svSZ#*2Ud>vWaIQe4lIe|;--9V?W)bg{q_0vcq4(VQeGXd z%dh(jdA7Scch|>resKW%r#rK8j5!r*$zWTvg){YA+fmlRmL{DnXw}`4P6Hk2Gs>Mo z6MY#vHGtvMf*Cn6fCn!d1|=5c73h(W@)IG^74VO!fUkg-hlrbil2@`J{@J25 zxi$h!xMhX9sGoV<6Y zJ&~DKq!l^SqJ07#J7%(|Mg&*%5~N%h+g)s z1+Z)duxte2Y>cYmYGFXIi=~zmE6&H51YaF$B$?B)z6YJkgK5*opN7p`sMpMqIxTD{ zZf!-=PPTODZBLH@jtm^)%7n3j%$gj{v=QM9>*ztZdX}`wGSsFCTcujjS)i(84L@2H z`O>9H9AgI5W9Fo`ESlYqwadq|Y0Xp->F`!f{~sQ#v8-M)St~TUbJKiwZ(YE)4YSz1 zb{cEsHH#LEV#35;4D8#Ifjy)H?NmmWW_9V&x=a9VG%HsvX5E&JY~Ht@P5Y0s>-Z%O zow>uo6Sp`f?WJ3fIC0@R8+RUH_mR`=IdX{MI4xYTo-lJz(zkQ#Ua8$)+ z?LU5w-TRNRd-oys?>nMF)#_Cn*t%tx23#kOpXI@Qu?l~Et9h%`l;HDcuX+6FIZu9h z!HegwdGku-^~?Xt``@H9|MKYvME&#E{{yW4r~LE%D}M`MY4Z0M{`p(>1+>2Z@w+B} z{qbHb?l=7L;U#|wZ1MXW{{HO+-(Ec8-NPF^zjc}qk1p`#@o65OTFo!V7xVVQ5?-B} z$;V4``FwpI?=Q~OyjPD8jp3K06L@}VI&THAKB(1&H&*fG?gnYs3T$oSpC0m&Cw7CxY6na_EDAtG0wNhzPJyrnAmM{-% zk|X>`2=ymA(1*0>NNQzeQA-BMq_7C`lG7UVB z!-~}~sHlUH=(2t#9dre*^i3_F{#Ji^ zcKjWTa51TZyQvO=4(1w&`r4Wk;qEB?l{+bZ0-({MG)YgQUG;1_U=GV?PiM@q0b0z~&h;BvyLdS>#!X`Ls*tPf?&w~Q?B1-w*Ut4TSifum8&)sVyi>|kHGJ>@Mh_pN?W<|Pl}i_CkfqM;+P0bV zr%!P4>?sYjP98nTx#LH)Qa36{U1emyb>TcW&!5wvOUa{~H#ERf(DnM4$C?*Qf!90b z&3gS>1FSc){o>Jmez|j^BJ7gL*v|2c z=n%)Cw$b!#7EJpRcbe6*rBR^;4U5cZSkr>Cdcq?O9B5e2jyg50Daw2=mc#~2csXh2+=0ZBRfWE2^YSI>kRWhT^bVMJ-U z0nOU#)2f|59Xc4$vx_nPx|uMzixFdbTQFsS4U_sv+sBI8gB(~e+?CZ6{MkJ(jw4G` zIkP&8o7)O`xVIKh_txO)o&uil&f(#fB(ALt=EOXA_Dr#%LSa)!8nkz&yr&zT`@7P6 zxI058cr$KhATt+7vS3vlOV%Z`Y<(KbS7wTZlEIWQ@iGVoQr_B$f?E1Sr&k~_Q6NjG z(kH1h-YGf+0jzWbd{XrU#`NSh26&|!X?e3e zk_~VdaTDNjP1MIVNe}lF0kAYffh-ez(oF@pjB$?F$3C(e)*%8@K>{CvRj~4}f`wl- zX$6!51UiBYa11vV=o9Ntz#t~unzRCY>Xi6VQW8vhvV(L;7I=$QZY^+PClI9)h#Kk1 z{Y(u7ri`&P7pM`jR;vXCw3MgIUX{&Ju~?>h*a%=*8~ms>rUKh-Rd9P%0WJY77kw)% ze$vIbVr47^gw*SmM@mP(qL!)M*sLmsMgm!SViBvAhC2F~8yL$VVl2n2qGwPM139m+ zudb;q$A83M8CMfnGcpjcGsaF@2NMJA4Ryt8u8O;Swh&iqBE?b+chx1@qY4G_rj$4E zrfZu}TDS70q^Ur)fOWC_hK)NpQQpH@0Lzi?{p=Yu)P-@QeV9Kzjuo@gnK3w=evR$v zSYSp+0p>1+F7yxu>s}mAw=z+ZE`>}RSI&|JeObC-Fl&~LW8>meSvpZ5Yb+}k zP7oO{kR=`8nptdJH;Z*ECb44CXy(rv#OzrEnKHGHbWH6T)T1fA+BRT7$EJ)O*j=8x zNz9o&jk${!uwwHzmT%t0x?RWFe&8Zocc0_H(W{)kc!y)>u4=0X51+oQLDaUrN41hT zYc}s@&f?W9TDg%4)8{j2UetE>(H?Mw77iuwB|7W=R zA>a7^*H=ycQgHQ;D$DbQZ{Nkj{^JARzyB`qCGzb(|HwW9UH^P~EAmdP*I#*j=OV8k zUFENLcf=~)%+vGh`112^K3rMO=W9#(a$^Z!uP@@a^RswzYyv+Y7|Y|MlX-Jd0PE%& zUR_$oiwnzSdmUfzZR6|hZ31Omcy@FNrY@L%Xs(}l{Dr951A#^waLc@p61jfIg3E;jaf3vBthd*Nc|pd}Es zw^UUZ9B|iCGC1Sw?x~5oCPeP*?<$aG??#YEfB=>|4i+lIyPXDDcGhlKTe`@-f0WI! z(6Pvi>mw=TE;r>p__>lsJLhWHRH!NeuHNDb$ZlIhP~*S+OA$rNz>?X0{A)=~`Kv#YTv&7)hFwf6YF?E+pqxo}+7J>1Rt6Nfl`?4Slu3Z&-Go}o<@ zu3s(iC9-*5v0|kdNFd6v*^>;Vf7l&8l??Y}t`6)_o594&`#-PyzdQ6|rM& z0qYiLF>gi^BSwW%-o=5u5&o)_s(J!Ks)nH{7AATa3Sb#&-l?jX7*@kb zUnLRM(E!U-*AP>ICPM)+Rq)xsM66`Ps^}Z2m58eDp^*ky)`kLACMGzVh&51e%lwYswG6TBQJM5~~(Z(1P37E}O#o6;rh-!YylOvUd3dmP<#h;l!xR;)kB%C!gC zy!|-451eAp!IJ`8=h(XIFiTf$X5q5+jGsD}{zJx!jG>>jZ94X(UZWPY?$DE=BPTIt z+%zUkn9h{RGnqDZHZ!KrW&XS+Y}~MoBZp6N@W3(6o26p3lt)X+sgvh9ed+>dPMzoK zW$E}-LO@j??7=-PyY%yCPx$?VSjqp!Wc|O%-+u~NebeOcfBry~nf@-2_3f|EeEsty ze}4ZY@b!s5{}jlQR>f)k_4zj$C_ZXlueYzB@b<;e{PF$~FYcV+_3iz9d31I+j z!eSoopTMh=)A{r6O5R^tz@tOsd2)Q3R_yunz0JJ4v5u!_771W&lle=Unk|b|uKmhq#b%f<63j zwRV(y1mWf2iK;Se`iF;&$Cq~ZfziSltI%Fmfd<+%!UBU!~NMR*evEZb4O)wn(y<2J1gAMDX{dd~1W^Rn;*(#R7 zRh?JKD~I%J&G-@B#Go3j>GtX+YSr)rRxTXNoXGD=K=qDK??rc1c`MP&H!q&?@WCDK z+`P*5E9bd;^9uKGT;hVj(%EDCxp?X@XOHgJ zqd_6%1=-Y&jVCuWQVfeQa>GKXAabcB7h^|UniV?Xv*5u`hky727jBHC%Gb~7wu|FnB04lZ$K@pV*2(Kz| zRRwqdpK$T2fU9psd_$`d5~D|Wq8?GHIwV%tBfW+(S+(`aty`6%lFHOAtwKrTsx)h+ zL;KbSbZci!&$dPk>TJ$v84JgBw`Y{_&B#uU3~pn?ur{`g@9M;qUQWy%=*s%>K^zpw zI=eKPtE-Z^vo4tj8w9Y{L~&_JAZHhPQ=vsiC%W{O0d-se3l_$(c3l$Nx214kUnVDy z<#FzGAs5dUaq&zIE}g8+xdXM?zqvZ=SEVp-VJt%@`cmH8hT3fn$!VcSN)vrj8=H{b z!j`;N&Qx#aL{_N-X(jd~)UzR`jy2J>tO&1RPDp_X!Fk36RW~Li&rIM@?O~n7~vRfhI5#} zQ>;0Gsa6E1ni7#_L|U#DHS4(0qFFdi8ibP?ZI7R$9?`O}Q+-yZx?(jOVQ+3Mov|J!RRw?qkZcT01+vVfqcz61ihx;FLu_?b zPAu6sHp0qS)f7}g>s7SCbyL;B7#LutXP{+(HmRns6?Qf@6YE%cxCHzRO>{9b))CM% z!P?9W6S15XU^xn4Imta;6=WId<0xS0Dv;>sU?G5IE^uW)xN{{^gLG*jVAQ*PG+jGJ z(!AV{2F+Zk)5K0&N!YlfE#=+q=p?^o&wkd76v&!0Eu7`El9)FtiqW0C=vU@M_d1Sr zsp&?SI{vg5!0Op5gX!bjv1Zk1R<0CTI!eH5;(q{Z3L91_s8WD6QChKZm6a>9Y4vnD zH$~uT6!T{eV8)c5OrF?3dPF-$2w)BC)ryIOyD_GJH-`1?!NC6g7(9F=lV>ep z#KhSQ8#|kEQ5?_1~Git7{-p7#KZ~HSh0Mq=EYLeg?n}%)IjU-p%ef0 zXsP(Db7wDc`Qjz+-My_r)vK4k^76%VtyM8u-@bm<;;{bTVzIO-#DBhP@)y#I{3-vw z{qvPC0#x7r`obUI1-3MA)+hc^(}!Q)%K#yO_4jwae*4VF53l+Bn^@?t?(+WOY2M!4 z!-rd2d4F{+&yUaH@uA85dTJ)`E-c{pD~tL4+A`i=UCwW}*7Nznc7D6Dk#CRp@#XFg zxpp4!&#&Xi?7mEEoky2+ADRSN(lo%5X8x9xxEfOIZcJTwQwp37sqJS=lcWGzWJi%7 zu#ns-WplY%P^pgerE zX+h<^Qr;~uduL6YEo`K9lGpg)EYlwp?;B^S5lBhenu?S zlOwb#Km~Jk1kg&0(kYXB)lQ3~K~B63eAVgRv^K-KHfLzhmJI0GLX(k$I%poOdPRwv zsOdp9O&AmEOj^9J23>{Okpg^inwM+Nv_S$Eli478zHP%?wyc}O(s|=pIAb(ZM)s$3 zi&A+WB8m3(!r#FjD*;wB&FiN_fEc)0?)Ef}4yH{?EFA>YduFFGu%J4FigKAKa5b!9 zeQE>>pi95(XKhHBr>*>bF|=-0!mxh**tBvDw=Q1c{p+{<`9%ztkDvJb{=JqrN@@>VIJQr;@%wNlfB%Cog#&P;7vo!`D$ zfNIA!_HEz7&JF9>wO&m*u4CPj#jKn^k5voiYweO*Gqvqq8`f*!b!690E}b~ey{nhG zbL9%xFI?d2xiefjbBgQdRAB!JZl61<0oH|s8#uFPIp_8$$ePEoP188FeFmquP37j1 z*<3#`fupOta%g3HHqCCxg3(zl7?a7&p-D{XAH%$nnId`27@W(JagCTiq7lQ|XA5v- zGq7m}gPZ5lw=AFbH8N?C6h~fI1eszm#d&xW?dC#)j}s|Dc4S6aQ;=Xu%~W%0q?k)T zU?=UxFnj|v3KI36az6dPkeTACG^*>%}638@xDB^nbd z=L5ra@ekF*D^Ld~uZq~aRlw1+A|8R&2#S)iFTsGAR5eX#LPiY>vTK`Ay^bCQ#X8h! zs4HWD9!;AX(yrW;u5B&o+0K%|0###rdosSaFB5tNFs54ogIc@MUsz^vYe$B+w`D?C z2Nn+WX4CjE_Romn#N0S8E{^B+>J)CRjOWtgFfJ|%p+bAP+2A2w%$gC#S~;18g`x{y>NR$N^h!fILwXqn=dZHQ;8t^k$}jxp7+i>QiqXjLqOs$!w03FXVS z3pTR2?PN2$M6kdZMV?6;aeJvJC6?3*U7Hic^;K}?)pt-3)kGZaZn4tickp(t-X4qB} zAQHf`){*NJV5to3#sX|AsixH1yR zGE<-@P-S7D-bX&CY}*T*Iq2)+E|BG_rV0hzT;wzQJD3sfW=xcu4sr7E)yuG^XPZcR z00Ke%zI2YEWqAPg8aq+Du^shV+ECKgie~bgwd*O@^cTPy>BiIvfh?F3#o`I^%oq^P z@NzGD6}!;2wmZEVg)yj&z}3j+tXMdRW?2$|kdRg(`G9UI0q1BAhIp>r4Tv z8JZ_+#lmr{Tr{3dYo}|mS4-y)XUe#)Oc>psDU-W0VpLm(4Qb7gJ}sCyqzmH)bfI(e zMwFM9(7bUITD56UyDq(HTHcjrZF=qUA%CWn_@jGuhrW(FSXT#AAf((Z|~k{qPA5$mIhk? zSyA}!>Oz&m0si^#zrQrZ`b+t+1hD?#tMX)t{GsIUFMJp1(qgn!37+pNJ;XNwukQk0 zfAYsCwOsoiFYlh@)s6kUyS{_h7uNE-K-R0%3;5;8RGy!h$(wWY`EY#&?{BW*#pPAJ zxwc-*q4meZeSE#MgEwbZa(~ARwh8D?Y?evS3_r^J%_#9Opv+U?%2}7%jv}tc+7w}N zh!ahd0%?%oPhNy4q0Xj+%V3ug7c794KvrT5sZrt7&dSz;+^Z)fX(<|_1Acg{6ksW+ z`qx{fwv{*QU+c5sB2v1^=`5)(B&vVW@nF^ zm5o4|6&^Nr+I2y09{4%A%Xtq1-26lWv;?CDRYm7aWPn#`8vYAd4yXdq7J3#UW*7-D zRjXWCpsKRKm5PfL$WoOV^z_k{bGns9Pk*X{iL~Ywt72bO2QMQd!fmZ75(~b0W-1+O zRHs#5DtV#aB>Om$9^ye(gfG?Of~Y0nQ(IssKPiH2<#7u2A(Q%S8AH0aVpxw>4D8gD-sKJG)~q%?TGY{URrP6ApZ@I|(XUgP+_$L~ z#6D$gZ_Sg{pjL`DH8^Q>PX_i9y{Vh7d9~CkL*>a*(xqK7lg9R9u1eFeU>s{zQsK4p z#Ne98h86QzJZlnT2lb_GStCk{3do6#!%coGEBP)R^$hXW*CWo_l)6Elv`mPgMNAlN zVj~%lm(7II225$*gn{*Ik?HP8kQlf{iQ$wKq{?S4m-krA+2hB#f8#d4KKzAGzo{bZ z-$X=zzJ8JRGr#}#mbb5dKH)vie1yYM$K+7Xpm{@_lI?%cqsy&JfAXe0N|?d0CsZQMD% zkvk{WaQ*O7PHvgZnXRKav#~$N*LLH=_P!ii)rRd08nSC?8LMa1VD|770j)Hq4oGKg zk5s1gD`0%jA_0yp#&oX1@YeYZZdpL@M!A&dr%)>{oQ$9Vl6?J$_HZZ6MGP7j3qsx0 z!Ye(ZysHosP?=a6r^9`92=Ub?(5o7r&OhPiSOE{$pYZXnL_k1Q!om%Si8m)UQCK0- zLd2ZtBy%F-%>=GYMJx!8GRH5}7*`)1tQ~*C#lMbdfr$c{DGcur zLGQ-ibQgB%)6ACPZJe0W)q{oo{nUQ#{c6%RcweyzNmzr&T$Se0Gqp4VY4Q+|7XGwSsQv$0S;GHgzl~4ue zxT@N!L&s=cfh{HaILVjm9&d?Pq7AgW3t zC)Rn8qcJ|VMtG{}KLY_ZgGySqb_H0*Dle6Y23ckTSpr(BptF^!0oDRNRw~Q7u0G~^ z3bxF#HnPOl&{FOr+d6Vg0Lw-I%g$JU%TS7vJ!8zO>5FBp zpsFf*CY8}Mu7bXySjdJtm>B76Q-qdsj~`Qna!-AIZ1t+)Zf1;|0x;c50&^-BOCDYa zJtAGJ5$RGvnLQnk$x34bs!|$;vLGMp1oA8Z@P4i?+0C*^chr z`Y?IY45m(*rA-}f+PGcwWNq8JoBQ`3Ymjy1@Chzoy2kCBxA^7pL(Oyb?(J(0s8sxw z3U*ifYGt7UtndHvVkx*%s|^4CO97TbEcjdGhyUsy0j$4}D-aA5`9t$)sXcY{FM+Lp zl>GfCpTBL`hdHtUrvajvcOeU zik+%b*GY$BFLUboSX0Z_hT

YDapK8RSH`s~KS)He!_qP%9@@1FU2Lto-B@a^ey+ zz={hFC(1uW^I-iOgY|DdECpD}5i$P_c30q~0Lx0>7#B-hLOi?#pd19UoCTUZ<@IhF zRC(Jw;c2VD%2sQAom>Q{T($E7F79&N4j%_sfw~~vZPbc6f86Xn(W@f5Y3is=51Ja; zimuzC5|3KwTVX23k)^)5=26zI@*lubdMP@nt5XdF0W3X%0=>%BFsZ7Gg>2jC=-_Ig zF9W|B$?i@9o{_XG$f840Hs!gg6bnSROC=F)*{F!_?HX%P)uBZ(ZJO4hb>rG}lg3Qs zfTOrDjrww|VXX`r6iHhnou>7&Xx%W6t}W||Zk5odO$mcKmNBA73mH_@s=USwk^P}v zn=!mkYeo<4%+zsxm^*CB`^QRAdc>h+6q&plqu!lnj_j2smK>@Ah0!O>K zef=^AcJE;Qs%0EIxKG=E^6(z-UOngIyH|X8^Mdyxs>t-mcdz;I`UP)(eZuo!ewOR* z^We@+e!hE411<$sDv(_rdvyN}H?Lme+^OT*RH6!US3q|B(0=x9-^$UwyR~?$%_~=E zAhmko0+!C6#k4V_nKxy!23@l!PSBuBO&uy2+aRE|cF7Vpu3F8`O&i&{aV@(yt>f^{ zjqKmNii4Y%3t(;H{`uWJy11K{w+`^?);^wJ-Ol}!D+H>>b6{yFcF%9Yp%ty!zq~nz zR=429=638`)r_qR8?s|bQ?@N^!P1EhnLVsNW4jkIpj9TLJJ(`Zn<9FZWz(Tf3Qcq4 zC`t$+EyRbU08e85-Ne{Zt5r=!7xnRVs4RL~0dD~*Um1_Q1cp3Zs^IBT8CR!@I6M4= zvy+G%cXO|duYgium@&Z-B9SJ9#tNjxN&gaSMo6@|0G5pamKDAsrnvbUVB;v`wX=%D zGQ>Yj04&;qgj74y^BiT&x2JkdM{1V1P_MBwCC%(9ZDCKdmiCmF+taa~6FoY*F`%nA z!+QiWsz;arRveT2r7^irmi(=03}_iGfaOEa#%>I1<-zE--c0Wn$npVEY#JWVuF(k` zos!O_c?H~FQI`j68wg-^wr1oAcNWYEX5*S@_U}j#8cyZ%=?t!&&)~+z3~pV{;`ZfC zZeLC3&TZKixpY09;}?_Ib2^5#2Sb>*)sG1)-RM8dmd@iX=`>E-u}-ub?m+n<2b%YH zp|p<&CB3{T?&(SW?jF?Z?oM$JPa5_1qiMe&S_}%Md|(7E`-jrJUx>8fwCEp3<6c1m zUfvY8ai@9x&eqNBxad8HZRo@|6ivWn4?FFwnjz#K4oML)pejrQ-3UW6p6vs1UeevWuvc671|qC7I>+md9cik4F#@r z{|i`JOqM{FilwqNF^~>gt?VREED=v0$GEGmD9azi56HpuuDtYm)51MFpr{)3^GzuDXLM2T6G&zySNE; zikp#_U!U}}Lef(5$w)6GC%Yz50#$y#5rl^)5*3k5U_dm1svvM!0%4&EM1&=hl#oqw zaxMvRnItNYR$4Ckc{K&J>QYbc-@3em=FJ*1cqF4oNvF1at+u*Q#bv4KK`s93^hNI6 zy3fz|AMoq*r&?gUio5z3rc_?6fA@a?)t^FBKUNhgFP7R?bfp06ANlVuC9cSG@W83J=cgGn=upIObzW6Rkyu`^>ECDA3-o$?54 zO56pgoU2gkp+iG~^+ujXG!J#8G*~(;Uuzo0`OqXSl(YbQ0R!pS+^k3yz^a**OnO`- z>9H!vJzn!*6{cs993HJr5h|!sko9kXy9#JmK=p5atbYMZL737H6^j+@E`zXun3^^W zkmKq#UiL29zPq)(HkGKZQ?F4|je!DT>ORiqVrV(J6B`sEMve`h0$09n0qCn~e1R)l zOBak)aQpuNER(9n+7zLg=&nJvs#Fum(pLIPKTxfTO8l#ffeLgN(R5KyPYfI*0<2Am zcCw-{%!d})@nVz|(z#X+#i|Zpw2HwBBrC#)v~VvHLR5+h7ZF!2m_6Lvp6sX~>SZR< zI6sq?bqi=EaMiM55shkPQIHW$_2h7JM3N$X1x9_yl>4POX|Dq(3M9PaZ>CvhdL%NnRwr@GphjnIwfYRcb!^J=n zV`%kEEp~GIhIukREaTv=6&%>PoL!q2vSsaT4Yt(wzU@odyKM=(wk&4b=EdyZv6{n& zwsPdqHj(XIID3?fXO3y5W)AM&$?nZtShs8?lSYiAMM)E~6Eg63bjQogj6_#E>O}<7 zJSC2@s4z;yLg@aWM!?%cU9M$;AEzW$Xja{i^t zC-v+x9|goz5d3G6Z=c?4qE;Y&`S=@ueEz^6a_tv6_x{aGetq(gUw*#Fv&TORY(3Oq z>Z~d}E$y#Q9{srI)5pAe{zMaX-NSpgxqIW9Hnn&5@IlTUIn3pgC$wO91yI}9uF+uX zU!YnzeHt^yjn%wc%7e9a&1$x-RUWW)8er|&vXNZ^S!&he#iP49e{d_84{zq`v5h>u zxQq8cAJ+iu)s5Xexwt|4iCJ9SHjtfj8?$0uJ}V~Vv3X8ib}wthuH~g{p5K6#Q){tc zYz<}&tI2{fWzsfgSjQrfS`2Dio35qVw5*j#N%cqyl7Fm#jSqGrGQgHVPcuB7^l-JS zf~!qMoGdEfU|s=7s|q;V{e*+9h|Nzp+E>Cs#MZVV*47m?aS&i~^{j@Mzb?MPVzfjU z5f-BW%R*W!fh-#VDRaC6jRmTVarUT&ol8YL{q*EoQzGIlNY1b$E8mfvA_od;J5#5j z8^ujsY1G1r#;qJ_UhYVFYX>^Cb)tI*H~Mz*Vn{cCM)!(hOwV|RcaCF3rxcMa2DeV7 zSCephG!CFo6F&wv^UKO>*O3YxyIC`Qlsj|h z2eEElBs+J+h*BhQ@)KK#Us9J2$uw^$ zR(7}zq3$O5+8f|xRz;*T&Xzhjs8xT41{muJVCkuRR6hVqWo0*4uw|mWSprxl>N+K+ z(n*_RE`ViiY$*U`sVy-#*EJ9rGQ-i_N^4cA8wFF!Yh`C_Asw`dc8!87TY0^;g(;?{ z@}2@o#wI#i$_7(o1$D~eHq`=s)v82mfh&7?e>*wnXlf`>rH`GF4t4^6PG-9J*i<9J zMTc0|%B1;Mr7Yi>?rjxdh10N^C&evXDQW9MgLd{b>S#@?9=3EF?8NZ#ek@v+!Lr4f z%or2J>{0Q|8=k`WE)fiB5zN5yXoh#mVfu*XES}w$r3;6#aKR8(EFH=6#lu*!Xawt4 zjAPZ3u`HcGf+cfDv3%iJv3e(Jaak$=e#IhHbF2?j#&u%inD&es)s`N;%P4D@O-apE zIyS9Oqku`&DX>zl zI{ueRs`d8|xcXQA7Eyk!KmRw#`Y~;&{{8ij;FrLZ0<7jL+q*lZqus`bJG=ONXAe(~ zEaCdbN$eQYf$;)Z-BUeiA7w)$>9~rWD+^#%qf8*Hv9AfOB3x)1;ZB24XWC{((ylO( zL{Dq{#6VGil^zpJWUh-U62yEGyJK|{T@!u&zm4MVvwp9$4NmV0EtBLU>29cfs zmR_|gns-^JQe}*)>S9zy^s{O;?2V1JU~RQ3F4D!4bUz2m(!=OnJB6Ob)oGWPLVbar z+OdJ;NBfZ);zn|)2PskBWW)!O5f>;RB!*@ETza=|OzZlEln9{KNRATVbSEyvgX&3P zfk4bk4XzYy zDX>!ehxe>v&sG7dEsHe3+Obi^7s>fe%h|t2;9%EAkxiUEb%ZMyl*i_PRtWjg1iFowy`0>%Z1u80#-?J6!^>E=;%OcL?mOIHfP`T8El&}nQdxJh&?`^Mv=W1+YGT=AZ9>i*6|W5DgN&`y#se<;zF@_~R4b zMAYitPoLlO?zfksx9>$yKWJXAKfbD@zsevIv*ybuEg`9rFCX9k04@bt3a(^dLD+99 z$>+=G8Z;@0QmYdcR6Ul@qo7MAGgUEPYF`D{Dw?Ne?fw413k7`qd`?jfAt~DIov4NvI*KuOc zI)STg+&HnB$Cvl;`pzL9o!`vU%Uij7d@1L*j^gT$K^zw#*f_m5>!#LV?X)76j>}@% zr0T4c?S*6WnKn3w(LFL4A$|Ucu0;%LS0M0F%%F}9=+Y#gX0?;3pBqkox<6_0uEffC z8Y0l+`SyaT{>L)A(xGV&?Y^*C`XIoWkEiEfzV)_%N zW&&FF0$;9`aQCT(f2g4bSdnp7L?ze~DH0Uv-tDX}LN)=qWP*(0l(>6}DZ0ATD8T&f6x2Ic2Cwg^qWk6?7M)VA2sEl=e zTZPiEbsPiRrPHfvA|308(!O3Go$C41wXU#DgCNE=k7ZWdR2Fp1U~TUL4vcNag}L3h zxS$sm+Nw11quiJ>*N-J@gIT{ljP3g)*?lB}U5A6&aU_u4CqmeJGL-#if;n<2oYOaB zIdfYe?0PK6uEuisd^CGcMY7{q6l?Z|v0zgmGu8$&bxjBpSB5cWSvVt>L@{`N9KB~m z(Q{e^y+pcC4x`(I5PD7yr{DBw2F*%j=$vE*OFLv{GQ(%5GGcZbgJ&eudtw}&Mnuq} zpFhQ&T`6p3M|!CR@%2oIuA{_)sJhlf)VCLLBC3Ha(IwImHS{K_%$LLxH{xpBkX+k> z%(@oj*0H3d%#%*-6DV&iN}TRRR-6hhwZ+A}s#wBR1!5{{X&AJ$2-3l-8i5M1%uN(v z8ED=tGX*^YI2IH?`3TBv@SYT?XAj=#}**6uKG84$MF*X%?7r>J9Rw@u)&MU8# zrGc?lf!|O~HArhM*QvtNW&&8ICUVTUn!K-$++Tps*bplL*Ni%TXcF^$54 z+W!Tt9^LygaKJEz4IQm*E15KLx&~Y-g~Q1cXSsg$8V~N@<=N9;G%uD4ZdZ`?RRBxD zl>#pnkM%ETskkiV+4}NX4FEz^fBl#v{6|Cqmhxmt*Z#jCOHTax&tH63!SE{WgNTaX zQssF5`dej{7m$;|;roY|e0g?)$jD8eqwGpobq$mi7Wkc484bYh`cT?OX+- zyzq7Q!b7&5Egb~BT(oUZdpEosJ!D(Kmb<){isAASW5!JM#tCOzchONhoaC8M&xHak zE7^BbX&lV#1)$Wz6MZp!^hG}vR8_%D088b*G8MowsZtd)5mhG3RrD^{!A#)5iW*UF zbf^_apGN6)ERsGZD~_gV(bS6%qHc1ifKv#`VQwUbyHm4zEbW@trboL*w5VU5f`l*< z{8VtTGbzEI+7x1}0F_G5kRIhnTBM&A440W6d6XQ zw->S2HspHx&@m^UDIGd9vQ-=UHEzo6VM942ka79MF&+x2yn6nW5AT%6>36<;`pn-d zM)8|ykpPx5ZoUXCDf#~WM{1NmzklKPkMDT%?gbw|DR1*9O?SV1`lwy6MCq62l~##^ zMU-*$#~)wh9^bTM>fC1qUJA4Xtdx9s`$_|;f4x?kr|RZ4?#XAobK|PEt)>&zxu?=T zmHtEJxcWB-mRc!z@5*HjwA5-r1zRexT}>gX)C~u=ZPfzacWqL#QJ{Ax2X<^{%bJy1 zt;C~yws2s}D$eZR#62;%o?bu1n|sH3BY^ez!WLfM*u}l$%h|fl3 zBdc4ocWJ2xSPREwuy}lR=8jaE+zXk|CzkJer@Z}vvqx~E@q?J3DnFB zCO2E4KFx`^cnd;8_3`qmgp*?h(JcWi+e+A5Ru;e#F&B|-Gt&w(##O+|vJ$pdRi&+r zwRKfYSP) zS&is8eG*fQ$jLLOMlEY<)wQ91i5+E4e}Gl<4vw_yg49*ervW5a;v z>>k-kAgd<_CiJ61i(b}rAL+=5nI24E;ltuB0j%5;#Oi&1tk~_rvOTV>JmA5a!(MDS z=EL^00UW#%!LjRcoV*donVX56zM9DK^YI)!mB6l}@od}|%kmvD%-<5tjP=n>Toc2% zRk4g%8b|**5%ilK#-MqT44e~5zZqc+mUj63ct$TuX58Xb#!5SOQ7RLcWHM=KCgT@n zFlt^pgQkmBJvNdyg9B*X)04U#UC5U&E3>%+X-(`&ZSG8JOAk_8`H)iXM@nn4CjHVO&*%2#c^8I`nVt$?2IbmVpT;z=O+xYBjSH zz_OxY2OC=Uu%}%=NBWHNrvGp+rc8@v)9Sh`pP3^Ti!Xhfy3(uElU|L37}Yh0i33X* zJFtm#xSg0ct3M0p3}ESkLChCmS|Ix>C)TI}1s&RVWx@QV9F{@i&aK-#eIgyD z$iL|s)HI-q#Zq9UVzK@UU1B}|>*4zP`3wL2`5%|nzmWAGz*4({Py$&02weRwa3wIM zrWXaM{x`t->pLnN^`9T#@aMbde0h4Cw>M7m;`|JgGv>UIJB~ z4j!6#J9`UAdE#vCEXIWc9`^3|xcJC1fiBs0u~dbcRqZ`jEzzj8p_R6Z(9zme^vMo8 zm2^~|2Nm#cX=IDD>^qn_Xn>`3QvsGq(xMHOvIOoJ3!JY)lAs zG$mP0n`ZdYt5Groo2ApOP9p8{5@@c1*`s|a6v>PCBS)luVFIn2*3hJGK>|r(9wew0 zhW@Uk30!3gIHgDUlAjb#K}sZ<0#pjB3S>VoE`;2eAZn#VQ6x~78RA1$hyZPbKv$R_ zc~SnaX!Z0?QfX3(*`9xcl9$QM{^QJSX#mx3%6hc$Pa==1c^%$Ph{ z45+DW*|3T|JGO9m-)=6QJIlS>cSL9Ia^u=9&Yig=Mv6*#a*5r$j?k;e0Aj-<@wBld z(B6h5UoVn8JczKgCCR~sx?wRiP0FNULK@u~G-CDiSzJDHlt(vi@ciM=Jb(O)fXz!j z{-(0De-%X%-4Ve0^V?TVAC(?z)AoM|6n%Nmit z(?=CM`Gr3fM9J&_{PV}uyz)H%CHnoN)~Faha$NIVe|X0qpGCI?wA2*f(?<_9uzLFF zzBXm3yjTB<=DE6kLp%5W^{@OcQ1<1`Yrcx8grW+xR82y)txD!7=u*>%N>rY#^GA

yk! zBxl)>m~KT>q7hLE1_D{t2#={uOhQ%Sld6)IsY8CD5q0XCQ@_}phNV_CZD~*QcD6L{ zWKFBiRsvWSBBr$OXiP_ut{qJ2(ZQPD9i8aY-h&?HK6Gs!KwFiotH7Ox*`74d3!#0@ z7)`GSHppN?%bG0g(uCE$+p=k3SGEo9&Gumf*fD$<6-s-W({8v8eWo}vYLOc=Hux}a zhcEMXdN6CNBQv+zGG~_~^Y^*1^q@QIPWZ6nTnGm*M{(jt0vGS5bLmzFXRoAk>Ov|f z&u4JxbT-?MWUy*avOrb>(>EkAacu&lm&Y-1ZYTrhgfe776ockR(SMcz*4$`DEKFqV z;uIz`MlBG?nvq1$3DI;I8A`JO{uKA{p=Ku!3gycv z>?j>c=Rk_Og;Cf&ih}OZ6m*Lwt91}bjoe8rwj-;tBe|v0)-t7LeFvJi45hqzEagp8 zXx%uA)F@Bvj4R<`TTSy|DZnyQfTgSV1F#g-7#oTRUI*Q{Yw1kgayv@e+tQ?)11)>m(`}eQ)DRB_5BF#4xELl431?(ie})Uy z_iOG$@1}taX`jrnuKA4U+kmO#J1}`%CuUCRCh#(CB!R~98kClB@CTFmK*C@?< zwR!b4mI`c58`n+l-I;ka`!I8A4|?<{rCxC+WsO8>i;Kuljwi~?8*dv&9L=n;v#=3( zvJ<7V#Y6zp*i?X3EL$fB89eP>1*Dt=ejTtEz_PUxpc24xwDXj0R}H*WDHOG@081s7 zba(dGyj`x&zIeI^5Ec?kLR>m|xix9gybayD_Mub9?lh=ZN^(LvvC)adh{OwsHE+_2 zIkV=A^}V0V=Pz*U=5@`Br65aru~dhvEZxul@mMLSQv2Wk_+jy?&i?x!f=|kWr2tFG zf4o?7L~ROG{VV?nTq(f%TRM0pKZ;2IrA-w|NB`G10kz-x`}dc8etws?x6knM%0Zr< z-O8=@skp5UEWv(HYg<2DHo(cB!PI6*EP9S}%M`(c6IwhD6xp8FsIS^)VL{fk&@d56{ z1$vVb5kg!@An`$g0tPVxUXdh(gc0QzEI<_~;Fa*7V0VA5jqp`dg(`J}hX!2{zQF=k z&ZzuV-U3GIcvL_*o&r{`_HKAMdf;j&zoP;yS0Am6j)Rq>=z;*2lS(q`kC%X#n*f%B z9J4dE*1TJub^=`1E|}|EVybI_jfsQkmYwLCtytDhqMJ4vfT={Ird5qFsUmu(Bf2Ia zWg&)^tD-e|#Ca@%}@>Ho{_zc-jj}XIA zz$;7I8Uk6hQlqJ<0_PQMMM~cn8zKM~O!aU-YA1%%yfB$MsUg(M2q!nzi^KpiFk?I^ zO7hn{SS^cFX;+p_k2WNVPqXRLt^s3*bz|bFo{S&agV|GuiE%Z7$z#)w~+TXu0z?)B@#N4$9Qlt=d;2(&!r^{)a=?|$Rc?;rW?&0F5Qex;>OdGX>o zFQxtMx7R#<_L$RWPjc_)JG}kv6=%<#;ke8~Iv@A*S?^{>B_ zaU;O?{ktYVbn$NiuRleGEt-ObPCRJ!xU_LK+oUr*_Ras#o_VGs}t83`^ou3?#!2G@x#=F~tpyDQ#j-^Kxriw6~;XXER!N zGoxKM6WVn#qt1S(x+tV<|i{|-}bf}d| z-;zAWwkly>*S4(c(}Oj=d$GKGcWL{vY0wBZ4jN5`hP|aT7->iMsZI=D;Ktb1o=o20 z#njCnOx@(nl+E@`6VRHy)0IX0+*o_ei|uCu*?TF1qc;+{a6gM1k8-*Fa~?PER_EHy z9L`^=&fyc8Y~Gj3%55pk-I&a@H431j7%*ImJ)Qsn|MW>jK~y7v0kc9Gv>=Y2v!dxT zHIhDaQs_G?RbVTHA#>6gHaCe8^AZ`oB#Ch=k{P=)l~K#m8NN7Ez${&^OQ!pjc-oDL zqS=s88ubgJL9akc`h-gxDQz5$g}NILN|o&-YIX=GyQx@$4V}p>)!6i}EMfUA|tg(W|(@lOIgRWUX&5Lhyl)({f|<&6?hQmq0e0VQ)) z+Qvx4*jxZeEMpT(xz<#m%n&oNe3hq3t@^XjSFuq z_fm0HYGtBI31KeKq-qi>QQKDLMgj-s0u!cMOqSYrwztGq0815xR$ebF3w>^@;~EVHL4GkQQE{o1?Izl{e2+W0Y`t^9@^c)~KgIT#^6!T^eWYJuKt)&y#xNI_8mrrBWoKegg(}Qt?+cIiUYX*zu+^u6VO=ZAo zP%}dYom7$|BJp?fz`?{8D(1C6jBC9OW8KF!q`v&tp%1A0%H!I zVlBCe5<6+Z%QoiD0-grHQMhyty@#oGS+Z)v5|)__V-OE&pi z0PAnrS01Y$ow~e6UGtZ8@+#n6e(X6(!GFFfjs6aehH0 z3P5FsMG@xeMTB<{F@X`p1xE=)DXmdYJ30V+3Z zCwv5)JOs8}Y#p(+u$JG~3Rfp*++CFi%1!{>8V5TEoE=?ob8*Aj&PBl79(P9-)b5L; zDhh3GCy?fev5uOWH^ElG%A}ep##KcJ^{qsHk_ z=gij776)SoY<0{r|EV(WdU}Lgn~>;YLVl<%o$5wwxq=3_OJ_iOh>8sx`OoF~R)Ne+!` zrBL3eI(<5pFsgrRR?Q#H-YxSvux%kH4y@wHp5^S^Fjq?yJa^h)W=`s_mBE=eeJCRb zb!2egcA~TG8Q7;C!v=O?!kE5{AKj11;|GfVkCOgCB?g`f1{N<|$fgaO zICA6|2M!$O^yzb4x_pf%PhJRUJ>d4;hupY%hZW0K(4}pA5(UWZOiXbV9rv=f$H~}? z5NCI)$0X9VSvhOxE#>U~qg+09j-PMcv+NBs8T9eV^?R;^miS%H>Ej~;R5$`t{wB6Qiaw{^k`FNO0zmu{EJ8_v=m!&%sEFcnJrSqoq}&~2I< z0~UBNe3=JhR(mjUy@1w6ccyG|W%?F3=4|(5=^hW(AMs?{nE-ZQisaz+1kOIl;`Z|* z9=)u^(-*b)<(DGv-p&&XC5MAYGuf~smHBJqnYuKFQL}>RJJE~YQ-kO>BZ}4&Wssj3 zN82fBv>l&J`>|rxO-Q2Wq0xV9S*-9%c-j~cBprW{i!Nbe3FN0+Yk<|QM-)xV-6?DBL2)ZL3d-y$ZsSU6 zCkI;gaG-NvH@f$BW5^(X=1fju$+Rpc^pBu-xd(mP_%pa`9D{mhFnVZR=FINM^cfu( zJF2N{cVW()?#x}#hxv;JF=y@omM$61y5*DDxOfuVmP}#o>@iFq+Kqu7%V^)M22Dz` zXi}U(<2q^7NRJ^QG*I(c*qhp6u9nQppl4}rgRQMCwszJ6Spt>T0$UbV0=5?N9a#!| z*IHE1%O4I0-=}Yf)PkJUh(L2SNPSIk`CE75wz=O$U zkxXqN&`>v#_Bl~BiVGyu%b6G_2clg)2(tGe#w(P(xGZv_(?|)9CCWXJc&{L;hsBZ- z5J5tqz*le#pA9gn6j|_b@^{f@C`wFFOTS0=4b|(z@epVT-G^ z9nLm3VhLNwvu}x$vmH(@c39h(V=Kmny@LR`jTMfLPB=TbXhx5djSD^={@7Z{^P!4H z3t*Y)naT5|K*<(M9aAi-h{04zjH1d$m{vBx;3r*lD^x|lq7H@vSSql+s%)F+sf8-? zESY)YX6%APWn+AF^og@FCegMUbtCQQQx?aV?s<&rl}De}$+W7i;x~dA(z#f4s6E9w zNo2=_QkbfWJBN@J8%S!T7cl`2S`1rqh!^Q$ek244u=+R=Es&+MyQhSCYY9nfijEXX zTQ@6#Mg{3Kt(8r&KvsTY7&X&kwCwCLT;Qm8y3S2#T@{C^|{=)HtM;OqnAK`(a(wE8aV`PT0t_-k2 zp>%B3jyY3jvTw&8E}T5ejSE+~cU|rwkoEk@bAJ0x1+9PJy#Usy&tFB?J_~TY<;m0M zJbm^nFJ8V9p!%H`uU>Qe&OM$9kbVC8jYq%yBA@NNe2$y)IiB$N@ni1Zy{mb#Uc7k0 zD^>4LK3S#n1P7cJB_q+`hr1o7Z`K>wmQhxNZw< zojiDg6Ndy=Pv7AB*=qt;7d800bKwM!t{&&NM;G|>9DUlx(5Y!Sts8~Wq;3H93cM+<8A#p2 z0P56=rlh1gjT`0*Y$Z}V!=KuzuGC7kAveL0wAd;n#8o0Ht|GxP74eIxh<9*BJOe7@ z>Qf0@*Pn3osEDI`C9Lc!U}jwb3!93V3s{+1RlwY)0=7<-wD>EpfNBIr=o6JBMog9& zN!bRZ=IW7AU?^ftTAl$Z)%E3=0hxuys*zW>3bh*PQrgOx@{UGy z>}p6S1y)_nXxGtqkSnG5lG{)F*FcU4yV3D-K9(+%A_T(1=ruorev9K6ur!r{%Q7|Tw^#scZW>*tCeeOEyg-%`fvU0T zv>mA=i*6GD6+!C03+XG?tunR{}is2=X+-(X5idl0cA&5e5QBsw{@S3P#tF4p`vI+|UwJ>7?YIIkmAKT> zL?3e#Jpl-TF>Q)aK+IeK%f?Iw4od+JeT?-h3rHB?BKwX4URGL;E?u0g4RN%T>kZ^V zHWUlnvKl_NRb?QnLUxiR?aRaI*kKG)wm`aV69$0h(!zgF>Piy z<}4V%yoE!g!yWx2{%YZbAAmJ$43h?Qpj-1gl-5q6C?}kpBtNoaeMpu;#LvkNTLD!y zRb#4asU?I|gQLBjJ(iZ{@|&oI2FBQ1+2CaBB!KBG@Z}`u>@hdBk?&0gSu?rb(ghbA zcdU&aur_tX)!r9R*I-h$V+Ti2r zkF}8nAzr>>Rfm%n9VM23I~Gly#F|BO*}i@a`*v>Q+{t5F0IOOx^zO||ZJJQ67*r`6 zet4}u{THBA=d46o!T%Gi{-^wZ1J-x`6#4$oAAI}%i7#J&?em-bdjA}+ z?w#W8y%W5?bx^_LKQAt7?Sn(h*)gRj3wo3=t4$#@nq@JnRye~8 z{TU@4>A);k`ldV5H_L%h(#cM0mdt>9qI=b2s2Al+tUP;>cD6)1xfAT*MMjtyD$yBa zM5Jhd6=*AfWambzUl^I8u>x4pB!$KcT!jxFfh|AMD8u;!9B_uGq5w`hIC=}q=77)If8Xdr!a5o5SGpvt-;jZ zEsNQ(d>T{7_F>H6PD~%ySB$AaERpkDR?lS3k_lRwt7Y@YFl~HaRxX~%p*`z3xMw}v zH!o-Nh9#_7Ial<38q1c?Wy_XT?A*19^&8f*cI|q0?AXQj?K?Ss{G_t{BT}iu^ZSeQ>#ju(_K5ky3kIk4l zWrlo?o>n?@bKv150*NvAWOm4I~7bX zprv$Gd6fSUxcc(@2R^)gBhd7M*S|jJm73yv_Eg(fprzVhpZuahmO7`d{q6Njo(V`j zym3vF`&TdX^L16=`MTz@Qh=rQZ=OHPgKJkbxKi!CE0?%=;R5H6o#oWQbDS2)I)CJx z23QZ|9*?e_S%C@cu1R-Giu8fMX0`h)tp zfz;1*qd}G<^|Guf$}}Y>(}?s;eG=03h)LBYDnW;cSRFzmtKciJ@L3(|6}NzF4MHQ#{r0waMdLy`rql5+Iq zoFUmYOvtHaLUxe=R!vgrHbtV8WadNgcqM6=e0l(#peO$Sq2w=<(v8#7w8HWj!s zptMO<${JUqw6vlCRyA6b7}2zjDW&-qlvQ`2QKkot(uEhQC(*M`U4}Jk&A28#m{C4N zz-k=JdW>UMhyDyLZAtrr8dNCmVokGtwzM1NK)1=x^qcL%kVUQV_%vHAk#z3g%i!MKJkDOoV#Bs5 zW-Rt&^h_`MO>m*Za0glrccyHpJM{*8P;Y1uwFZSycThN`L!)RiG=lO`k+c~XM%zij zbetJV_XRQZTAW1h#i{gJBo_9X3=g;rodJ0}gZdGK2AbgM zQVlx``H?O3urM>mP*1GcD$>DLu7*K1gCBrpWQnbD?J0u1=ti^nW;QFDs6*^f-ZI3QeR$ctRPFR98|&WI$CQZ zfaNHFWvhbPMV!QHcM+>ymGCh&s4AbMio8w-Crd-I^i^V0T~yLkPfI-loeYWaG9)wB zf))+^>Cq*Y)@=hQY2qvopCkDV94IPtq(&1f>b5YaT~7~s^z)-*2Pb-Uc4Ol37#2^- zWzpmU#t%rMf2T+W_DEsEnEFhe(u^TP>M(I^b5<@L!1U>znLMovtJjTZ&H70KSYueZ zXe=AW0^Yb_0!t?imcgbY13H${vLu__v_N8l>||iEB-q6Y7jqM=bd5w*Hf;r2(wYcV z%0OnOR&1IW%D`zUzmq!wUIBQy`CuoIWn(1+u8Oy@aKu3cwOhI3Dg(Tmjh76jZgR{W zcLzVb-9qqm3&!0w2xsR&oE!wYT-1ufG!o+Th>y*oZmlMi6}Q0OBT!!BDpp$!S~V&a zi>9s&Ufq~GNhST8#fBA2*}8EpM-S}P5(g^C`c*B}7E4rHktCp{f@YQ1N_F0ahR+0+yOKRH+-3s3Z=5{PkG|fe(EDLwT{D^X0cke1HFlFE4NM`ROJ8cz%iR&(HJ6 zFQ@tP^KrgDI>DF6CwY5gFL(DZ;n?C4Y#iB+S*;5hRwIJJc>xT__GCzPAEq>jWM<=7 zh8KA;DBqpo^&{y~6h+hIAo2p8NpQ9y&cz8o3md{+{b-b5OloKn-qr$K&VEF=2a@C$ zN`$i)e)1dnIe3yNfTbi*;5EoCKma3*xS$x#o26h%9aG+{n1Covd<5JqMbA_YtPmeB zJRI$D6Y=tJ(L7fc^8DL7TI1+!FTiDkiK#%Boio0kK>}i)V*FUj^J9aHK%kYG4LSl% z##KZQ^~EUB6@AjTz*->Qyt0wz&9YT>2$gqQuF?Cc8hT;~8C25|y*AQfw~Q*wb6CX! zPl5I@D<@K%9Z9g(r@-5kb~%1@trx=B?zxQanM;=@QHtQ1hG=|VMar~s>Z z-CX*2Y|ikW?HJp?3*!fNW9FFtjOyQk5&hd~fTf^n*2Do~6g6kY_;U zmGjxPa|83|Pi4V^>1^D%o(&r|X>he~-vJIEKFXm(N7%J%kLJNTb?OXTw{GX*!^a#r za7e&x8|&9^WY)~t%$hNa`7`ITbpB$-j~q){LJC&WucpQe*!Swkycu(tHem{5hmU5_ zoCO@+f0#ShZt>{O1Mc3uBhYqV^G+#<`WLbkT>bIqUt)wP$oitmSGBrO^hmWYU%l15 zTneg`XIbf((oLnKN>~5&IRD>|DP8`d&mZ_*d8FP8sLA%XPonF}sFHo<$x_pT?_Rys zTII=lcuxho-{H~ydpr_=divll&+e-lg8vJi{spaHZr{|rRd+54e9E~W5GHUXz;*V> z5l-$u_5-etUEsp;b6k<{_TJ?SJiBw5w~s{kUtZn6o7}-a@?{4w5ZyqT9pa+H3ZAnWqAuHXA%yb(v zvt7w845m)K7#h}zq+vmTfQT1O3fyU2=uD#;4%Dw@ORd^g6xOsLS54{V3v|i2o0V-q zVoGH#W-2zJ8i4{^?(P+Ea{5V(=!)3dR2Dt1gpJyERI3Op;TNh$RFd*uS;@F8SE00?uIPrT{2f*_$g!a$)0qZot`sMG(j+5-E=74_ zXf|SKaZ7=$F3jmTkojE)FtJrTdep8(X-XUw>a;bdQ8z1E46vd7NIQB=ac00=H-;^7 zXY?u$#;@~c$`*g->YhyVsVr3+tWvHi>*HKf4 z&ej&V+uPvfBm;~LC@!{^+BL>Hl~F~i?aT}XuuNnylItvGP_i&3Kp-sA-I9a=OVT6E zsGsjlmv&LKE)UR_u;$gY6zaDny@okC4a_Mnx2L3~4UJpb(xHPJUD~_Rr;`_B`bRN! zSTYj^B@1N4N@tzK(Ed3L=$lLDj;V|o+K@Rj1g<8xXW^1TY}+-9^&6+Me7S(uvWaY3 zHiZoWSc@kP(mYuGJCx9(I8*apMfqA1;GmC(xehKSM%Ww5cUjF;zCRi4jBL@;UYME* z^qOODWr2&G9o_<1zOLRl*}7nEVl4n>t9dLOWzcg{kY(n8t+73hrp`E+IAJH^D4^wH z>#cdRLIV?sip(G?B8#+?Iy7w9juy?kQM0Iwyj=EMD? z8eDyp_ThZCq!jwajB+elY(RX@3D_)wM0^!brKUa(8!kcX#(HprEM2 z-QC^Y9TMX1J3`zEkRTygLWn`!_V@QOD|PNZx1Ia$|JozzZR(n<)|_*U{&g9BEGi3U zt$@^=JRhcKIJ2n4PXnwKbzv-RN?>GGAT=>w)T+_}fu5xJd6Nx8d^hvub9_y?SV-2*jmmI5pVQwppUaH(|#Vd`8jI}dD( zbOOrG#7Be@?Bj)pivxZ>Zn((1t+O`6S_}>cdFLISoCLU>a8e~41n~Sk1)}A7JK4G8 zmj2J^p9OPZI(X+%>K+92p%ih#lgD}&iZ+4S*;6h56w?Jwj>0y4_pn>#ARTcVnv*B*(_l+^ zv7UM6Io^HueQn>IIrF4{IHm#C@?|SnG=C8*7poe1+t{#b9fjHXI9iKRO-f@z?>G&t zRxe%2x)p1*a}e7Yt8f2-_gr5soUHgwbD+N^wqy|7s zd9zf4``NeDB+svF;B`xY?Ba(X^43eQ^X9+a;^VhI;?ob#YRT|7Kl_BQE`7+I%kS{> zt#|qH+RNPh_yKNybc}D#o#5QR*7Nwm(OMDFW1C7jD7tgol2q2uOJK#!IC*~}nLZ{= zo^=Q#+I?ti@}#E9h2laxN=qFnDsd#g*p=K8Z%WGjsjUp8wJL}aHNlLk31CE(mq5QK zUCkb}wYbvU;z(nY9gPiEG}c>EUu{Zx=@80F^(e^GCn;etVIhME^cjq|haT=O1_D|J zILp%2-2iWYLxLhqh)lE~Hr!i7tZ`myG82wNVFWbZQ(9Cqb~(<)Y$^a{A|12>EIBUgjutwB z009X(u4d#{(4{52Tblnjz*0+RYfIeiZ3HUZ2@t??)oNf_;vxI|T%8FNOWI$SK28q! zJ3HZLZ!b#C8XqeQe04^II2sV|Yao`e1LJ$*=ouL<4aSzdA}jJr9Y~g!HC|r${8~%$ zDvYVB*U{VS&6pk!>6l%aIW~}mQ=?flEsm*u;Y{w0X702s7R)VX@%$>5E@)!I>K=CQ zoX!IeuIKc_n>ciQCC5)};Qo`FdFa??9yz>;V_TN6PAu!G-8FPnr&E#{BFe*oKu3Wq zOFi7=x4M|gy;S)y`ql!Vj-uG?aI~=%fU?EK)>_047g27GmZD&_=OE8Og=x(daN6Tw zW{0hj4b}!~@@+>vtUd8lVC5VrfaNcZIg*UzLaNK#sjum#y=^?xr!Hge?A5fl^pcX8 zOImU+$qDHs#U?VmrIRrudYCa`A{&=4)G{Cxkl^^eZ zuhqa(r5nDL&Q}4|?c2At(MbxjRA>G_fz^Qg-$H~6u6|b`LP4y*{;jO;-=)j{4gR{z zpFiK`_aDCC&%0Oo`|jubdFKLmue`zSPhQ~bPoC$y3or8R`RBR)-lKf_>;azMI*To1 zs#w*U&9>nM8epxii)Be^2rH|@SXCRwoI-akM7VB57Cm{PR7AQ{9UDlrlMSH`_S9zO zQkIratN>D6U<`>NaU=>j#Ro(Z?&d?FoeN{E)r@bQ)f&PT}dg1Bnh_|P}vzsGUHs+X#0b;I3_{w`>@8E)?fSjv? zAKtE^__~K_1w|e0oCU@`u+!N~n{ku2sKb1az?Qscjw)eYmX-os=7awU4~m%U_t&U1 z8LX$3Y;aWXp{X6NW)6hN^&+gTh%gySO{gQoQ{9-|7{&JKr7Rqk%Dmomwk)h?c25p% z#i6v9#8Q_VM{#l#mDwpYmE=;LmqLDW7+I?5Xks9lQND5yU5N8_B{9I0)G%K%qWm@J zQgEfbSi@?Im^!LeldhUVnoG0jZLMO(tnutvHJ=5OM=`vvyUD`P0qe+%L3-l>}K}VVFEkTS+RJM zZ1=HZ$uyA}%$+%oi2`+F1@2}{9mARx^Vz+914j-W;NYRdY}&MiUAy*Z-mJrik8=HL` zeWm4Q-TwL;KEHZR%gg%W+6^vU{#-kEzzDi{@gg^G-qhepd8ia%sdcp+5ao?l@TJ0s zzbdbmfY}fKc&fgWYpC#_@@A>|50xmmQspAlJ{2xh=iR(^m5TyT=ihsWvud*D*I(n^ zSN_F2FTbQMKREL$AH8u#6Se-v3(xWLQ%`8e6m(rZcTS+{6E1vkfeY`S6X5!o>jG@w zeeoH0ub=0;&)?zJxo7$6{Nr5v;3OAc-_575Y~roQ7x2Q#37ptg&EC~n>|2w`mcxNKQu19|6VCiEA z6BBAcSfB|3-e&lCn&IVPil>(;J^>~Kh8YtSWk_JGK7k4Pgr*o0twt+mTacJzNm_xG zHqUTTr6VN*Mx~WHDyyxisk5Y})`V(-rusTl8XGNWZMCJN(^0$FJuw50jdo)N|9f-I(2SF^aSgJoS^%x zOnDCNs^(m(KZ8n}&8ZxwqrTUgmWcvdQ-$v4+0eJrfk_)(nX%K8`3J;uJsHBLhlAPq zL;$;<^ke&@-t2tbn-l*E;kozYc=MA)&R zBex}(lxhzWE1Zcdwh_QGC$U&ZmOPC78aE1SJSndaq@z2PykcK`gA51>v&PNIP?WVk zRz^dyG>{Hfz)6MwObtviF_2E(#6}>?8jFD(EHi7Y&2-uXv6dog@@5B9ohbYNC3Yra zCCh$QwcXZOg(>PK4I!z}kgQS@%Bpp=wmUImm>a#rJ(w`kn<>44OzjO}bcYWUMuoF@ zZa&KwSFmPzgIJZFY+l>Pq5bo?|MXf89$n7CW2-oIdJ89xZQ#B`>p8J&CA(M7VCD2~ zW{s|=rzMY?f+(@j9SC+ZCcw^Al$SBKqS≤<15^K&SOT0}0e9PBV0>s=ho6DuUX_BQ5YIq?g$TmjeFd&OiBMreH-TGMKZ4u>@V58VlG~LROTm^} zS9w_qveZ&}wIY1Nv}3AhsJo3F9u9W+dAQ)`?IshX*R(i^-CGXTA zLoLbOLQfe)CbBd@r*Dj-v~hOz$}oXlu{0aFKJgPh4#HL%jd zv@#IAU6stA)=hnx7?QG2!I$z}sj%TDX_LD)F5uvfdc~lL3W?B0Y9~g= z(=%+OmX|ep_$bZG^~6JuX`ZXo$K^Xt+|LWozQAj*o#EwIUgOQT-r=*0mo+cemCwJ> zfb0B)&osDFqV`?Ba#aH?wg0{MKhz8#1zD~UOlXu?Y6M-z{p?d$dS9tqh zFY@~H&+^){Pivx}OL?%~d-Y}BeDQhhJmuZGeD)kSE?(v9t2elP6}18?0#la6Z;&Q)N8}k(UwdeWy^0L)^Xf@e0txD`*HF!GrJ!8;oy^0pTfT#AeF7kz-AIp)L94&JPBb++)70WZeUmCkY9%0MMrEam z0G0*y74}pYIZ`HoRi5rnZMr8-89p?n1W*?rOh-mCQ>v?2*xtds_7 z^?5-IDylc9xW%0EE^}&nENL8PMavXx+GkrcY>^$kt6iD4-IuxhgIIFdk5wnUSa-h{ zYfgHyQI>n3@aMsoL;2Uc(R_S9L8w2AZ*J%F^_K-)yH?2APjh(T^+XOo9LlPFeoR>F zM8{$q>KEBlHs6WDxgO-s@)6JqqG)Cq1=E5x$SR#J>$7Cvf)FZ~hYM)=Q?3z<;{})0$cWGmN;5k z3rwk`ZhXG^iLRrv;iECpCm0sh2=1Zhwe?(0K{m#5~nvNto8 z>l@*slIm4Ly(k-Jxt@!W5w3=MVsRUh=x-)O?!u&zN%ReiqO90~%nWnV(k+NfF(Eu@ z2#I;dWR+NuU1&^xkr7q$vbEHS1=8xmnBiX1m^>KO?#0*&1@4@Zt-@Bas+m~=aI`6Gw2``^ITmY+%-nIhjiekhebT&dg;f~yae>ZU7a=WU6PXpWr5a=&GBV*R|?1G*Rpv+4%=oFvu;`uy>j2$iv8#;i=aLy zl(H1*-_l|zNQ@Sk3e*6rGB<&ogh1j2h_oqmq5`z=V46sNVmN(m)y(Yc&^%Y!u|XPG zDd1A!!PRpov3P1P4Mpihc-d?%)Jnp~$SJwgisX2knZoE0esQN0!-EX$42!iCG` zjN#;g^;+_~8d*4PY%3$0i?rgQbEXgH@kjRZ;QiYu>S8k~iMst+(Ih?RVbe!;j8tprt%m7cX7W;7W}yRAEEq#ro#k@A>w- z?=>%%YLlvc4Gcn2-YKx zmp{Frg$LET=E=JKC08$;joQPcS@@4L*c@Qss!u1R92xPs@PdDG@mz!t!_TuAQefOXM)>h8_Yc+2@I-A!X zoXD9+$MEW7-8^}!nZw%)*tj&2#iD=bO$`yaj$!unNcwtx>Fjh9Sh1zJOn5<{s!$}q z(vsXt(Ixe^baZ&hdlkgQVZlt2<<#zAW{wRLPzx2KG=S+7J(xDmf$3wdm_5dvS$!sq zA3lVMBMlfg+?1|5fu9lqn><^J1ZGm>?1>20;qPmLm$w0)K6-fg4#v%I5H9{AfdX0K zdIZLs5}9sALbeSl^4XkXNBJ9_DJ^%TvdWQ~Y6og71+dD^sV%plp;|{ntw_DKfRzJ{ zO^(zzI#Ji)NVWWqidsw2|E82x7*SecOnHfH7ur#t<03lKlgczt>Qa1ZObwwSIfAx~ z6ed>JvY=}ib2?g?+EmGyx;#c!C(|y5>Y#j~wVWCwa_S5zXf>f?q$M?D1%M{o&^*hY z5lh_}w=sZ;TSPXwGi{3_vv)W%XNMz8_Pet2geM0c_2ubTBlzH>1g?LP#hownxcg11 zD3}VaTr1|CkF$9A`FOUS2w~P{SGt$m(z?`viUkhj%&;eWx;yzZ{V1FfOy1-`awY~) zI6aiSnX)}sI;17xlr8cXAoHSho;M|PyeXb7GSiom>HZW?3!+S@yJA)h6*FQfof<>w zlsHNz$5A*YnymIvG8=+Pt@b6p+?Cj3JK_bhQp@bgs&eiJtUBqis=a7vjiPHr8hHiY zL`7N==xK_J0G3J=w>8$oTyKadWm7F2XeLl)Y@pM`#83dhNIGR>0RrjZt>qk@p^=<# zBoNT=X|b05YN@8fQDUKQq=g);RBbE)AvI-=lK%5ly&Y4WbOK-kT2=x`jsjT$ZmvWJ z29OjHPJC#H23LXZYW_7fblgG%EEkO5SnaAWQq~#c_w5R7*kkaM0J@39gU7+b$BvuOgJm& z=df@_I#b6buzW!&>z6mMWyNr|uNlut0jq}|+{ED%YdG=1W=`C{iJdzZvSZ^s_N<@7 z=7r-~Ieml%Sbc4IG!%;xh;$*+-2!(DJ?sqh1dL3v7%bp4$Q&~PEfYNxjP(pK96Cgn zLohWk5YQ6f(lf?*h#E#OOF74Iupx%Z^B~GndBAi-<=z@vVP|Y3%2X#x)k2<$l>nPg zK4Xc2h^et7mS!#(>f2yp=7furpD1rX!b4(7P0l7ZDuK$fYNkz|#+sFDSifd1dv@;R z@rNGd%)egXo!4LCjaOgNlHAqYVwW#o(B>ypRowe4xBsYFyvp)bp#{|;tIqmAVQK)d z2B7Odf$Dz}<+)N*>h#ws6+qJ6G_9{Pp`6)ktd4X@Q zyvMZe%^m#H?JIB$;0cXuwz0UtJ<>J(4NK45qa$B%4BO>3LBf_1g^qZT_48I zo^;lY$fPyJTj0uuG+#Ty94+v()e-9ML8M=(h??#ume}AJBK*SSy|gDWID$woe|+tn z@ptmpa<5!<&Kg)Lz*3?hY9M5&@~>1FalpH!M3rjrbruM7al_r-0e68cM_Vy!tSz;X zzUIG zmXl?rFHj|7sV8mEKzTupv6VLH=hz>5cNIe?z}Av*TVry(b<~Dgv!Fhj9bnnmfW}qic;bzmi2;^aFH-7vSTRE6k{XQ zo6?jR#Ixppx5FNKhrUt6Zz5k}PuKf;EZubs{dnRiI4zyx0Iz zqr6Ft_9j0qR0FEv0#7RUN+rJU-LjYi+n2FMj2)HWKBA>qJEyHCnF_?{5TKX_kiXRd-ADgc;n5tId}e3 z&bi>J_ad zg9`hpP~ZSmy(C7Pf~*0rmVzv`RLSltSL-Xe#toIvb@nV*&VI~IfvYbseZ;NL-r<`o zZ}8pcukg*qr^TQ;#mx^7a`XLNoPBW_|2j3E7moGt_~8~FIatF}C!2ZbP%S&xX0m2s zJS*nMGjB!&Qzit_H`ruCX>a5cNnm?_m@_nhv3ZN!AP$ZOkX}itoX^bij;gWtG){M=YoQM#R|E;>Gs73^7`aG^1ryiU zuyCidfRi_my%5Da?E0Ah^(S|NFF6weD4H1|&=pShj1cnYhEO7| zvLU(DP74vLQVr$x-sH)#rsiZMF7jrXox87!5Uy$XnSr)jN7~pSXM6{PFC24NV7?a7!jxdVz?F6tKNf5w_O)@2=z>eTV z10s@*$jG-KQy{CLz=YZ=8yahDX{xr7PC9@^Gt;G`&Xo?joc-HIuy<28dp3+=@5ZSd zKe&|pPOanc@iiPfy^({**06cY9Ja2X#nzPqDRX*QHnocd-q*52wWNH50-U3j1+JUmVF|ILjW-3cF^oLl;HLWo;uwkgcmXVp{cA#7N`fgRhniG}|#ufFs=ufO~P&pr7V&p-VHZ@>O3XFq(O&!xk> z^YvF6WT{bv_wK1lX8S8btL6XhvHDNIQp^7ds{T{{J7D!Ez7Ke@{{6RD(0~3Y>p%0? z@6x&7zQT9c&vWng11 zW$%W!qdP95XkF~&J-4#PQVa_VOLL4&j4?D8$QEP8#8ld+shY0J0Xv#v3$+8lVw=>YkO}@uRpiTj0q^tBH z9y*EwENP0;u}1FC<11Tuc4G&-re?FMH;KhP$;|CeWxNUzmI&nJg;SRkD}WUvZ7@NT z^yom6!#v4|@g+6FQ$Q(@30(~=pE{0-T}{$93boP7+qNCrc?%aVX4J?YYAR~9 zysM;`1cH15wD6!#`bL%8>g(>U?N3ik)pD}Br9G-p;>p9uIlT7}o7QjAVC{hiALgWd z$1~49&%a)NRSOxarOMAz9xW9TR5h?vm{7r%@@Rek!(Gkz82~7?u0*v}B?IkpK>ic7 zRDC-IR)78Wi{`b`KuZ8ijUrSYtp5xbDzDY8YhP%9^~L2&T)uFQtDm0Z`&&2oN%{)~ zPUqi!TMGrMd@BW7DqN^Ug$9+EOC`Dg*Jwkv)J7b>&-Du*Xx^(YKYN33F2Ba@&z|M_ z*$24(!BK90u#c}k-ox#WcXIWu)x7`obe=ud$?4q{oZMc@!43IrS(eJWMTx9hkieWN z;R3S3j2|1sh~WbD&CUWMc2v~bQZ726v_{_j8dHj^45_cPpu63X@gv3f?(<{Lq+sSw z31;^60A|ndVa8-vrV3z99c?Z~wHb59SO{cUFjw|XA8pIHE?fHAT^QNoNpqze4 zNfug5QlM39PF9N*xg9nXjF3O5&xzW}?ljN#rFE_!&9mHSo-UL*!-nQ*7Ie+FWZY6K zR_t`;fhVJR>+Mv&xKzNmH%s{Wn;L%lu95G*ZRX26t(?13$7>&!ar(tfRvz+a_%cf> zW*SpGLr38Z2Z1dQilzxH31H<<3ZiUwjKEYhSwg_s%Cj}okAj&3RRUspQ$;5GkUPm& zfXttqaRKCvm2<{LkTqJMsymp};lU(#1`*fnPgJEhktJS)7rGLb??8B-4KW4QBne<; zS28s{=eWH#>9eXPEuFa>(uW}Jt~aX})G}^VJQ*n_B#9T`XRE}Di zHQ54LRh2fhHo4L+#6MQ<@#2|T?Ap-6gGVRv=*gKJ*)fhI+oy4K$6SsdSjzEZt27VR zQ30$y2UoCa%@md`8q3Q0y)2tPOq+>)`p62#wdd1ar)v6yi=uJC+s;JpwHje*iis#Y zgFym9q6CfQ-WVDfpsznvWQeTmVJN>%U%*OF084L(KKeuD_+UNxj6TL%m{2Lme!#Mj zYgh{iTZ-rnHWCqqEYPJt#8N(MDWBB|xLFA>+hAets8xdx3ymc+BcIC3ItubjsIIEh z6o*n6JH$eN^pS^nS**%eUwl?9`qMn}zIT(;mp- z7|84R-#cx!{J(jz{ySi)iti!=UaY^>1h+r&$FD!|*Dv4l%Xc^U?#5?)ef1nSKYN$U z?}>#kfOTM59~&k&uz5lqi(ABcZO>(0Ylc<_Yhg(c%PYdz+Lp+!5$Wt0p3JVE4Azg# zp{FRE{4ht7ysQa#(Gl!okH5Twey;sf*SI@*;pX5$OlUL>Wp$)Q#|uQc;$bWIRe6@A z&8TN$Z(@tHg(GfO@;(b#DacZxLgnF7kmX|Oqyd=9$x{2>tkgB!#o%zj(b@r>g%ze^ zOqhxhVrXoLkpPytg(YTURG1hm&!n^gxxTY}#zDZ&!OlekEJthk8w9{?O{CpP+f^e6 zZS|#X3UDirrzkv_4Kfl_s-MOj7WC}5`7%V^s^@4&xEcdTUK{O z^TMW1UfI#jq1lD35x`o}m&%ec*^F(Br7be^}sS|2pPP9l@}sLTZYWs4hy>lG|I<)E^T&H2_m}veqn_ z%+f{U*tb{O?aC=ko7TySrIT1Rf2=^vd>+1kKZo{gVEcxptXedKjjI-Lbk7#{ZC%HK z9h*3?w;!fdGP_D-U%GU;22%>IRFb=bD%FPO&tJ&gx$~JZW0olJReQV3XH1U zD{oin^W?ozwXhUy`FZ$gO9fh?0`1Dnr9y_|M)k2^);yLhT*As#tJ%0|3v1VHVD*}H zoVxEc&%f{z4?pr4k39OgmYbzSdAJm4snDSc7k;Zkh2MUwd6!kY9006=_Nw;%Ct&?& zn^mGhgUVy2d9wbk=0NOEX#Yh3ODzXVHYl%^@?d@P;Rn3?<{O+5aC+srr!~)%0wz_e z;oIw~n)`Jv9HO=Hp&Ci}<>ib0MMmFyM>}7^*5{unkJd-hwq^P0TYP=xO}@K& zhTES$&DWnk%X=6he-xo~xFmGC#U8t$Gr@T^y3UvY@W)xN$QCew8OOuY?5iU&XRnZMk zrcdw^sPbmUG%u!4cW26ECnk-xW^#`OGkSE)?zLe~pChySoS7)B(c59im|?E;c6kZ# zJ5yceNQoHz*;&@4r0R%EGAAU;0PjG7D^Lx$@!LK zWm=J)X-U4oR%wwV6~!(T=Q@y+CIFUdBu2A}%2-k=3{X}f3?Lj(QR7H?oht8OAv(`Q zs|KG}XfB{;OKzqknJF$}47rftFKu58nw*FbN)r>P&&p;*Wd&nfYUyh(p{+bobXlN? zAA{0`KvSyCNUhb8(P%4xC7njI9qG+BWVYLqH^PbHUZJ;f9#o8TrFxtr^^=^mFk)4o z2@PZQn6ya8!INRU`dT_)d{)ZcuWI?@ZX3Vd?d0C~oqY3c7niR!@!^G9{`Fod2Odvg z<_>o{m)TM~-t{?rPZO2h*9F~!eHDA`gEXH$VIJrN^QZ3LkP zRi^g+B8tMoL@X`o)UEz0#h^wCDle6#0xGe<)DUmA)ESG_tVGt$<+x6c+gsXdc?PCK zjWE+Umh;TDNu3oac{w?YvJ!=2WreG32YRU~Tf7MHl8!epkjUU*ZQV^QdM9gZ?OZo! zC+r2dY%O%6j4W}t(cxllgp27A!ab!yR0OkPeib9T!pKN9qol-H0Mn7IB4-L~eMv8M zA~8=o=Qck^j*p|c!G5a#^TBC%m5id=Dr(K$CP0u^fqNOqBfPr z(pb{tL_s?%Z=QiRP(XQ_h6-Q}70}WXkQzF4D1(O#X2{S%=qb?B6R;Af8Y*x#bckL* zWQiy*mcbxBj8zqGeYyY2TOp!TqsC-uIMf6KB?cDqd2@yiw!p|hg$Et5FclHNa&_?~ zIx30eq)ck68yP;lhu&U+NR^x`iemeYonm<(~0)>RJHMX^f1xO#=# zU)|C?SU>)FR|6{5d8?&LoV|7H)_;X020G~hYgvivy#FV#8ju0N8j$|^qSZ0=IkBdH z7p3s?_u53zKYnwA+h1JJI^nCIyve!OALG>rw{v3s6t+%pVO?J(^XpSsRGZ9_syHnN zYg217YigreQ5nMe#%OkRr?Iy$n+-j=Ol?S|H8+CnP*;K-%?WU_CD7diKX+d|)jWZo zfdu-5;2$1I5}#Aj?VsTzNcIIA0)2rxM)NbV^p( zSlUTjvypeq3I{V=`3^fAOl)v4RG?*vjR3fot#- z%EMAHrE;-UjV!gT084qXlqg{9sLf~d%9&dID-{YrGJ*i8p z?5@sJFs7=uKlQ{RmM$AlM`tk$7WOf5d<)a2bg_TW8ussA%legbS+Q^$^Jk1>-O{=8 z+2!oqu#yu8cCl~QPHlAH4uPx<8#W1`&0x}`Da@Wdmu0FH!=fdO9Xp<`uHh6Fm5`8- zM1Ec&X=xdR289wE9!*?C48gvE8eByN3uwk9X!{j>MFfRu+tsC&nzt)2BZtQ7dYbB+ z1On>l9w`tyVG^TydYL;cW$Jb?$zCxN@2=KiiuUi3@E0g*ZOgS)lf+Lfr2tZA7V%&HEs2(fE4!2~|FdL?haA4|4cP5YYl<#q7 zWUIZrn=bTpxrq+8qpr%1nkom1i*3lww;?S{M{J@Af#G`i1P;bikdtXkezp@?DRv~rm=YT?g!t$oq$V2)!0E`#cc4)8 zUrB`vCDl$8R9I_M?qnC3lbvfNux3wcqCIhuwnPVs_}dd7=&I#M=EO!&ot;i+RSDho zB8@q8R433_5=>KBAcL}-tVnNAVn=3^BNO7E@XGQlGQ28E@$$F32cpY zrhK&gA>HyvcNmh_GKjA6hHTpD!?VvMartZ+cfV}n?;l6-_s>22{!=eM{n*E?ue!K! zsh)SwRrA=HJT{$*E)ZUp8! z6PRNsfMqR!B@eCCo`NbD3d&~$drXa_Q0+b$(P-$^8}5 zkIk|rsZd_19199+To^Mog$dJAXzTQ4_^<$$FDc^q!CtY5$Mfj%={$LIE=RWuU~QWq zfVF^!4zJ|&(bXI~vYI0&*Kt??Yxg0sD!0s)&yJH0yo-gCT3I;0i8;MBjA_lLy&{gL z@>p_G0`PH_`y_v>ncP2vp~egzWFk;yB2Z<>;GshpBycrYKzFEt9{L7`=<6Gyr)Ma~ z1+qj81iJKPsW)h-mXD>tN|n!0Qy8g{hDLg(0#WMO30#Q+HPE-fP+upX(}_~nVPoxz zw?{C6evw3k$CH+tBZ^?8Sldfjw0Jp(4jMYf7XzGwGto_*?Z4X_kksSx42Z=T_z zSfMIJc>Bwne0}>C*Q6u7bLWoMS*sCAYWfY8?5?(z40P726Ia^OTQ% zz>}pN`%60gUw_gd>zBJ~zO&D`bnbmFe*88coOzB{9y`R-M+JV@Ol0GfM%MIIu%a`U z`BjOmYDi&gXBPW<^4T>kleM)GtZ#~DZ%+pM#$>a!Es>GM;k4vMQXKC`QlL925u(_G zg79?mK-DDk_X#DyCs-?o5aA!ju*OzGz5N7`1Y(S>1g`8fa8e;c1zBn|p@J;stx|xc zLW63b0xY$y66e+Ye~xCZa=w>b!%^CjgFH`pW&%J40^|BBCr{-o3SgOh znB1Pk@XBcFazZIcP@bk>0U|%5{REKGVwpXDn84K-hSleb<*TZ-CsHWzl@}|p5g_0s z5T@WtRcTKN^CdaNTLY>v4>5{k0<=+v3aV1WeY89*RVQm&UmN!y+^Bi1R2Z-@L*P!} zN`(TI*Gh#2N4Ax)deKDoY+lI571OmMqYoV2!o&CNWZ9Alw09PP**vz_BE0{WEng&*rCr_okyps6% zB+}C}X>4q!t*t{_FDt8{u&|i8xCBzu(nXo&ke-~T)x1&#MoV%FDagtrB`%3#S#PYa z)1XSNtNjgCbsAhLDU|K3^el=?%BZNUrcA!GwyuGu=2j-l-=bhoLDo}GKdZr&8a1d! z5uW|zyq5f~3XLkb>d&eAMT07}9008WukwF-t<)4bs-6BRqTosk52|@YWnJZ6X`m&5 zrNV+rR8{vIS3lPh-OqmTz6M#JeE2@^zy2DpKJ%0YSSloV_MZ`g$~&b%>h~YM)4W>B zyQMr>YR*Enef#PqZeBXawa-4}tIy90a0zH#d6(Ol&+zrd7x?1C2l({Oy?pWhUar5t zoljm`%n>2=ZrmofBP31gzRcS{prR zY48w;^`x%GRX|oHhTBkGV@`8}CEXnYStG0&*P~;?7=fv=mh_I((L3Ink-e4-?^5L% zEa+{qVQiZ-W7@nJC;DV;m#+Yp3nSZ|KATicT0LhZbP-8;mqGDQF9y2y99l2_(TPO-31fi;!BwvxnLUA|30x($`;pQaOngfa(E}qT+0OW9+7X;(MYKRxT9F+&B@W~YY~|-zQC;Fnot)p-8pHO*?M&-Try#

3$e^?aJF;AMdeai3$$672`pLSU?UyB0F=G0wdSF6 zcTwU@fS)g6!9nUOzQ_=U7!5(s zNFd74NEE98mO$1J0jr_<0$8$bAfFwoyjVj7utY)Hi=tOU^9|H>h8W=>>sD3{0%s-y zTJj7moi$ISu_#zik05DyImE|h5EGqDNl6vMNAxgz&H@%LTE?2S8`-jThXCU?fvl}O z`p^Sffzel9e4dxY`uy;{x4EQ73W=3^{n{6N_2n(TkWTgb^&5Qq-FMnVv}zdmf5Mbn z{w|%eT34O={|&JEM-l#?0qgI-#j^hGXZ{qx`uX0s-1z)cK0W&`7e0E6cVB&$7a!iw zvjSMBHcVx~umYx5$FpWcAq#7hSl^b#+QuYy49nuc=mK^QOJi$C5<5qvYDw-(S_Bpf zLa9m-<0iy~gaB7^5~8%pxg!F@@K=RjU3>-Zoe1^|!PC*5xX=jckU|A$Y_;TZYq{qN zoRk-90J4-l2rwInQKF|W zfMu#YSXP+HJ8vM{M)Dq*OPd_1U8g2tw=uUBvC-;Txmx#o)LhK%ahC6M6sU4EQ=T;k z`CDb5h?~4SZf2G^3BU!|=t%T+BgJ3(z(5-+B5j!65XJGirM$4Ak+*hr@!YyvPA({9 z->iI=bjLBdDT0xeA+(o-QJE1)R+Jc#31K7#`;ZvoBUXGHE9XrVWmZULtRJ}vYLuWC z>7o7e5f-P$Ftw+hF&(u6TERs5I1}UVCP3vX;-X1fgs(tWkXApdHeVFw!fEVYw}{>j zfsw38(&PL{jq%aos;xGM=Bg~ZTT3;#I+=8M`A(}DY9N(UZI5t3kr&9Y-pmdr;nPlDlPOks;iss z_Th9iwKJ@>OBC=3hPMyX3XYB+JysLtxvD7>n5w7|0BB}Z&uCg&+coJMGmehVVcK%d z+VyPR)}MI)(!XBOz)FoARJm9xRH$;Xz7Yr-C@89Az{5O{BiY}!{ugAa+^hb!tGrkJ zIagY7Q2{FjR0^(s{plXxe*INHTz!5;nM8`|>>BU;lvH zmtN(|PoL)6NB42=%nsgpW;yRZwSbRbT*!yd%;aCEM)JhoChp%{&5rrmtehCn@(Hml z?2BS*S0H^&o{XqIV;R6Z}~)mby5&RLeqdlkm`?l49PYjia`(kU#` z(yXJt!Hnu!W6CN`DXX%gRK6=mN)2m>?A*{6~U<25Ax^ z5EU9cRCJ@Eh>3_9VS%Ou`V1w|cL)(7Cd5Wt%X?@~Qi?E7h9haY!a)TNr03a^o?}N^ zrV~lYu3~6A;qPsYuZI~vZt~r3riA+15*zAHep&=A)tL-$&Jwsvly((MUq=k1Tf-St zFhVS#4uPdMYqHuMNNaHE1ZLzSG0yYwf&qUU{){*!FM~ zvvzvYwaAXT>Go7laiescCk1^zP1YG4{=RFMAiBeTINkii5Edd?)c`3HJl}YmFz9 zR=B%6;P2x>WN-kY~rXW@Z7opx z3GsHs&fFMt14E4d$*&qZ#Gqe^%Dy26Lq!C#^hNaa7&26k!9(>KEXs6%4==_2>tO&VVW@iA#4siddn z(pcY0_lRC*&6>}$6{}gbW<5K1?_tZ%o$T4SmuH`TnomCdnD^g(k9XdFoAc+-asJ$S z&YnHTrOTJO_0I&eYEs#+zy6jV?*61r)p1Ycr=J9Xeo;`R@;qew$NnMTzy10ffBo^7 z7{dZo|0(|f)?W&)L{w?f-_`P;?LYoY0jOLq0k>c7aqoLk9Jj7=_2OARdh2E0`qvYj zdHfJ(iG`|1Wg+jlfRBx|D+db)-0a-seXzq?V9d@`o|BmimWH-i zh;d^oux6wuZAYHD0xDB6Y^*Khd5TyXi-Ds&$wsQZN*gk<#8%}xDleL>E6`B^GNQb=iUB#ki9i>`YdqFGb0s%z8T5xMVh+ zwM7)BMp2X=MOk(NnK7ZHh6icW=qLbNF?$kqMVVv>aH*WFP!A^(1kkc#LuA`e+n*L0 zKxa)chqkR_+lu*g)fSVP5JG`mGe13oiu^=vy|OTg38R|XuzVKlm(66sjL~dcJD*2R z?&0{p4PpR|XHs7)Th`BG^Tzp1nbgVL*`wLFdo@Q7ZeqrikxUyuf-P$oX&$Wk)5o%8 z)+Cn9oXnaJP*$>UYd=5qPInM|EHjcFp2#!hD9xXCP8u#`2+*Ro>CN*2sn z$fCK6m^NuT)27Z~@sedMU%rwRD^`g?w1j!{7qV{sMt1DjrR7f^IeJ`6dRMtu1BFJl zat+^nhZ>0TpC0A^?zQUAxl;QFyjs7?xvJg%EbUnBzxTrr{QToRF>Ze4M}e(7x4+`s zJ706_#&yl$`ux%*zPfdjufMv*X97rXzxgI_y!I+@zVb33zVjBJef$xhy!S32zV)V7 z8|&gfqXku1@VlGWxck)&eza!QP z`q2YieEWa^)>htoekrd!IF;8Pp3IA|0jK=2^$?PzYWqPa;& zLxTl1_2&IWLhEcr94W7GprFu}%q(jX;>?K%H6$cJkB~rp0V#cgg7pXt8A3p?h`?1~ zu)vor1N>#%SC1ecBZ7TQ1eA>M^D>nE<^%^@5f*MkSd&4kXvBaG^8l4NjytI*`?7LspjsDNV-2mKc&;Y)OhL*qJ_9 zo5yG0o;V@$3T|9&xJ0+-@iGdVDAtC!NbU<+bvoa8fV@y+P!S5bLg6I+YQgq;|QG(&0)% zi!0$Z&P3Gu5K|XKM1?;A1s?e4x{A2qo8^jIio3v75TThtgrx|Gr3jd%d6Jp#L3V}< z`8h5$)dn%8FN2kHD%ifXiNmWpSUEP2`XnbZ{p3Md>EUTI7*`VmoQ%zI7A0V7CY}C2 zfTXNr3)P96S_))Y$uVmUrc{oIs=cE`jXeCXr3zW7&&atJvQH&&n;42^FBZOojR1zu zRtu3Ci_$PxQ|nmjaI&)}$j={dcMo}C{P1#fC&0&-VE+KTJv{LB^v2iAi$H%LqE&~R z8ckYaH1T0UMEiN-XJ?MDtv)#djT1YQnbnsn#O)=G$C;8mbBgnIq$lf>k)%gKrU|uW z_R?_N8QvPef|aOWD6|jwpaR9N0924U2l1Ij)+K9RgTQskGK)Qj?!XT2v^0 zPWA$+Dk)u+vrv=m_J>Z5|52oR0#!qX2w-XXSPHcC1+I)W87iWuL=>+QeSueJ2Y2G4 zlcYna5evARj^-|M)AHmV`e1A6fQ_kxC~X^D1h9g^pRb2OfD;tGV;(XII1`{*-g)FY@Wd&w2Ch_htXP+Ws52q=WtH z8@{^pJ$HZnjURuOj#=R8r{DgN?S9a@_v6p}@WVa6|Mmyzq<`d(Uw-G`%0=}jXafWP z5$WssOXN=x1ysK&k@a8Y*spT_Z=wYLP@xMIn)p@aV2LIE%`LHTF9=+{!ZQz_;<4j< zc;)m?-aWmMw@<9%mHi9ZJF|s#ql?)(u7bs_X)I_=WNCXk%R4h!H!`0?vzs_Dvw@wH zt69}sL3e2id67QEdf5`;WLEFzbbx@<>^` z2q4*FV`QbZAy+FWoaFwis{59P7Gh`^3lv$(`){YU14lI}vz;?KX&0s{EN`Id&zWPQ zFYQG{jVv{jXKg0$hpCW;h5~yU6w2 zrTuxTawa+}`MXTC853M=Eb$XVCd}E1C?{v)9GysZwkOTSnsirFiu^2SO>kgzt`lpz zBl*|PHeTD`$}9Wod31fD0Ba;`CIqvn*N^F4KFk`K$msfH3gSG;2z90?!Jn$kC`L7u zuwddyMm1LoNQIFa8Ax(OAhpH0WGBRs5E?{lRT(ShOlM?s9r?*|l;@<8pAtuUbeOiC z9Un<%Tm&VV$)cP*WJphg)_8c^QQLEBiCxKE@V_k9epEOXlp2!{-m7x$`YFEtLbcQ zq_V7-(W6GPe*Ibjt0h`)*4nijShIQq8#ioY+qT`TS+h}qX*DZXu4DPKH3Cj+*|KFP z`}ZG_bGEWw&N+4B0k&=3&APQ4wc1<9kDt`)Y^f^ps)GE34?fI;4?LvR+fp^T-h1x@ zt$O>dTLM7es=9QlXsCiDHKJD;F=Dy=^G|DYV$u95jqWdLd}>Oa-+%8PjPk1(LTZ9& z<>?xjYgBol)%v}=ceQHps(9(OFRsb@4S}m$vi=31Ubw(nG0HwUdzK65&vQL#dQNU?;J!_b99&V! zwt2+@S6R&KNn}D(IK#{Q=_(7LyEcqrwZXJk`_WeCOIxGpibiMZ>#V7%HKVrLg2r-d z8p?Du2{^S16t%UQ)6!-_eXBk-%|oef97H z=0rwW5E5yMU${QLVM7Us6p0YH3RPY$0W6=P_t>9%r#bHKrntD7;-=QUOmX)y zz(3T8=tQ|@nkh+H!a_OLB&OLACE(>BWQnt@5%#u2u(uwBqs<^ZoDB$;z925bPW~QO zDhj=6s|jMX((gm^-n?)K0vKoZm6kw@vq5QZc`O(P@ zrlhnOl2oq%%ap`I`2(^oh>9Idb&Z+4+~K_Ne4)HN)qHoSjX!@G#b3YniA<7?ehPQ* zP2|?sBe`(3fmhzk=E3Kq*!4gFvo<);HPe!+KDlw7wgP&tWOhquG|H2lG2U9zd;X{Z zvOB!UXm=;2)rok4!1yM6Vw;=@t#Tl=!i~sEKf+3U@y~I?FUy4h5&sM~eA2uL$qFMp zErjqmKjOvmPEGVBE5(nZoM3762}~VV&X!f9d0_uso;kLd7mqCB@Zu5lm5EguZcntc zP_Lyv9;U{)O2_ReV5LSNsR^Ey=gM55OVw#n`BXZAC*`qHUabMR`k%zq*a9;-SFNiA zTq~96E{a9f>d~3YIbwO6s&WseX7W&NaCdegB1Gl3_!1TrjH`6~YCS3}T!XH-sA#g& z(n*MoA}T6S$le_fM^(Jk7&l9Kv2?0Rx)GIWUMw76#Ppsd8cOV`F3?euZAxjLCE3Y( zWF-!fPQJe()EC)lR>r(ZSuCAd%(j(nJb7w9Z@;jWcVFDWlP8z(*s+DY^vF8y+dGH- z0!|Mc-N60FH*)OQT8R3<=$9e3jr+N0pmwD#-mwEN|clhX&&$w{$Dj%P_ z$UEMC>ACRRw`vJI8UaMdKfh)Nrzx?qxzx~tk|M|yn{PNRXe)|3n_rAHw*|%Tg z(Gv$beRz*R@McbLUC1LFr*L{j56^6y#!GwVaCA`@2j{l2NwtLul`N6xvY<7U*$oM7 z9$&%Z8z*vN$w*fB)iS0=ARx?(AbV4S9HbBMawgixjR0o{ttMN1M5NriG-9IS1^R<9 zH@Cw~6?Qcfn3Z;8ZK#Uw%DXABrIu=+nZB9k#Z{hNWoX!1s}Q-IuSN^Xw#u0^)HlIE z&ln?l2h_ZvCbBdcYKWPDmDvz|%!deI$$o7Dbz_yBE-+QEiy3#1J- z1+u<7lvj5*^YQ7Cym7Rd{qtg3DSg(O$-!)y7tgBc@l5ZIXKYIr!zKn>&?tOXsk1 z!3@^Qa{IcaESfoiuEq)`_ja*$&0@B#T_Uj6KviLeoIiypA2=!o)>fvE>t<3R< zs}@XCIhErWKeA1Ot=W@Cvts^KfwvjV9N)#Lwo0at?PAHC3Cx@{im@Zx>1eECLSHv? zrcYw_bT#lr+T)@HY*@cetDU6=kQ_UHA4iU=+E~Zfv-c1?cO78I&V6j(u~*x7-~A8s z&?8UCxku$(RgU2$PCxJnr%pY<zP^Ts_jIyjQ4yy$w{T>29h+v#`!GI}IU^DT zt|A1k0%*?jr8(c9j`A?NY9bj{7fxrRfL5b7O%0CJ*DC-rrJ+iIsl<}%A~ULr)YLL! zl*|6=dLt?Xs>hT=tjhs4umnsZ`*q)IpZEqGPN?$5>EZXeNMSLWOWe zMVUF(RaP`LI?>+dPD_)sRxGqqzN@^@T=bL$#o1Qmr`wR3VoPd@@JWgz(Mh(1#0q3Z z>ERnWL||(Wejy_MgYokoil3VyKCYs_oyCx{H^Row5CtqUOr5F(zXGBD_IYGhFFL)S>u{Tt7^dM}l2I1m37=Ld=qQfmnPqvl6%bB)nU&eNY zF{3wxf7b@MR0~c5t*TcCj}B2BY+j_Pg-06 zImu!2z%!WG)5Pip6F9heA&(#4#4Gph;_dtQ^3=BF%x*0vC&-Ry7ZU<(jPWuz#ZzZ3 zR@~B$l*OqnKYe|qk5S*X$l999A??7 zl_IMIvQF}dbl{IY`5Z63@&+%z_Lc@(XU|{K&p-acuRs0DU%v@4$^ZWeSgND{Prw?0EG5c|^}7Ju zpMNO}{4Z&5KXdQ9ule=IZ@GKtCgS~HH_b4D<>HkZs`ccR>_iEyGrR}-9;qKswzn3q0*#%-@CSYN1kEy_wrCI+($y(_Q)qacxo<)==SmhrokCbXV zmS)ykyBIJ$ETvrx0G1L3*-HL*z%m;wkTqCO)`wuNZ-}e3AulU=kJLo#@($?)!0hBZ z0&ScLcXTH}44MENdjcID@v^tZ&)H7i0omtdLyWr&jIPgP z=BRp>Oc}xO+I+HN{K-uSqa;0EfGM72fvj*p4>A*D7(2X+*%QW#wO>TO3a=%^kSEKM z%v24m@>3Hun3^$mG>5itX35N{0%#FL_;`{KDt(=wCy5b()R*Ow8WT)qK{|_PO=Rhu z$(pQOFkJ&J<$)U2UdPli!!@{4K-E@TLPL3;wr}l{**tOj2opxN(_UAqf!6$KW3|s6 z*uGAKE(KWXn)9ZP(Y#ly7foZy>@fm}0#~!fvu4Rm`R?(|nKFiPqdJ)}X$)&uE@8)( zjVxQTkXbXQvv=*%rjxc~Gc96xcIUAqrxaP`QePx0ha&ugMU zOKrdK;;TIS+`j~%-r@~ufB*W|E4=*jtGxdDo18iG25-Opt^m!a8pwR{#r6LNR{h>6 zRhzDV`F{c|<=Il+Dg{|TOIuZ5uY31?LKP@gd6g<>O97SwEd^Ezrc_y$fn<3FSy!%H z(e^7(m&(IbkahO#Sw0o8x^(dh7gau(0M_NtF7lNC+3g$Gxqi9dgZ0(bOMH3dQ?7mb zF_%AjhtJ-Blgl5z%~vW!c>V*wV@@D#B@uK~Mlq~5lHpBZ z0>*wc*Sk_*XCshhL7nK}N`b4=d?SkU^eHJaptRhWl1f90Dhw#BkbS}mW#yt{1e)r~ zt!b{Xr@hjNPLcL9(M83U)a9E|n=f!yWJ*=BDP^)OFOl=B1;%Qf#Q3(SN-?ZqbPk()n+QKKx`yeW{!A5G@t z_siww=;Ehu#|UIi<@cW_^W)tfzP{5Y)_e^gozLUtx089~#RRq;4Q9ql7rLg{Q$E6m zoK}IVdK;oE%m|l<6IE?VY^@DZRXSO>BC^Vo5Fz5w3L(o<3w-m;@z1p;w9tjn9Cw0K z-3U$fBr?;NsLTK&(}IXf2_-5nh}dYc?4rZTh>4;oIax@$j42~Wv2OkX_ODyVBYXGq z%JCDtee!-@+`XIC(!msjdl2blhQGB5?qSXygvWvjFLFAtUiEPrnw4Xk`TJT*^N zd{m6KuRmnygO8`HkhVDvwq|&{ioy`35$s}1q=zlh9_F+ZgtBNtk-X@h##9uerQBZyuqL7?4fTx#vh)R{^tF&- z|A;Xo^aZ2_L?yH^4qrauvp9gdi71-c>4nlu++Np zUMVk@f~)@otgr6;z&GFj#81Ee!A}BF3b1~XPFv0HujIF1{@`~x_V@2XRR0&iQirt! zcLiH&zorEK=HGv-YSO>)+s{96@B1&gbNwP$K7NZc&pgDbJ)7CKbOy`DG%~Lxoeg73 z**Ue2?UQRbJimkSB_XUHRlt(YOl`SmMk6Pec60xV9u36qTi(OdyBD*#x0#~YKob3& ziEy*UOJ|I~gEi6qKH4-+&epc_tmOUk@Fg-V9$x`joz6uY7-A}LWir?ZRY_fi1yw$y zoeH^|tGp@;foPF`Lg@b$Mi3)Y z7#qd%DaA}5Q9`E}5M!DOSv;|eo`wSQ;sdG3ilsC?mYjre4X|nov&l?|rm85174zmW zanuNkGUPpsixgN2)*vfe;Hxk_iOz;<7EGJO?u~0%w`73;S0W)|TqQ?_$TRRJ$kUmk z%p@ZG-G~qK(|~Bx$^{x6b+^`N9;#Urdl=DDtpU>RO=2_%T-6k3QJRy?nBmRZzP(#k zYx~u6L zc~XO`2OoNz*Ur4nOaD5fL6wqMRhxVHb>4XMJuY3o#`$xfX(7OmK03>V3!m}HC+D^0 zKsZoAk%B4Zky1d_ANuR}Q2jpvR{uHu-YX^Q8VbLx#ke12ZQ>g@a6 zxcCV-1hPIq`ySWMeZZZ|=ee#%2fz3TZ$5pL4_`UPXKx+l?5jI?`L>ErO)dX|n$X5}R5!^h_`t23QRjY*6ui=-tdi24kF8nZ)aDU6`GD4h0+2!=I8 z%KH|e|8CC5}Z>=-kmG;suZ0RhwqrJ+WhB7P41h&crx~fa8sVcD%h*Ja51nSBh zs4sV1vS^VD zM~_5s=GARMBP>BhFrKb27Nrzrwf=`YyZYc(MrO19Y zLNHA_$^@}a6TFB?2_il%l*IIC5|d*{jExoxFM;gn6tcp_3XIL8A-h(r%FzNGi`X!C zEl1bwwfE8jXad=s7a+&c+kq+LW3;j!HS89 z)L_cR!BGnrs&%zrg$fm5dAqygFBYt~r-Oi&C=eev{G4oY7X|38Ga$}e$H?jg=Jm;M zFLWX+ZV+WTrj+KIk)L5qO^E|7m2Py^`LJVc3va);o;RLb%YFMMuxw^IQ+v_{tg2bR zpq0f_s#q|wlBLt6!A`Ab+wxvc>{-G?r?znN*m~}Na2qF1Z_xm2-NtEbSUZJP3wqhG zbR4_a&0x=lnbH}LXZqA8hIbXvT$dw_D2EaOtRztqVe&hCU7T^YvKA#Q_v9ZBma(dj zr2&>el_*g)V$e_&t(NRORPLY1z)}+hQ=-((4Fp;Ru$(L%aj|m7$-)5}F||Y`uqmhZhp;ocYopD&jL}>S^p+-_ov_ZQ2^_Y zzy20L5>OIS`a{6$uV4S<-@gki$-e(7JUHOJQl2Yyo!{hh3eZ$|;;-L-l@ovE@89lm z`|1VGzWzM#y(Eh1$@_Wu;8u=pTEd~_6WKVvN&~DT3pzD#m4Yb+TC?ioS=64+s*(B3 zZ%O&bd)3CNWg~fL?KoaOyqbMWr&69Aj*q-k{sQs|{vM=-2NL4$qJ;?k-BqDc7Yy}` zv6Xx6Tm77-1L_HJzLHgQ!k!q4|HF{RTm92#>=Bh2Jxg`ax6iBIXpn@zjX*0@$ zrFkib%JUZBR$`=Nuz>)Z9;V83HCP|BK|=+!ln2WIcWF}!u$1`NI^t{Rg0Hm`o&t5g zCRT*oIuYUEOq8oD(PEf{ib0a#?@msHKUuOY@E54cNMLG3CSwZ18J^}&PnIjYCl_$z z+2!1PZXOpN9LuH0CvbRPBD-hBv1egATV|)Sa7+Y~I)a$c7OyF*)}jQKPU@1rri0p? z7>ZNED9ebZB0G`n_%PCi$xb6S#Gkm(08(SZNeB-jFEvg+6G(KBr$A7%mNTV1Qp%&H!htGps-wP4 z1FbEq7iqASpB5`nmca1lDh-xSAK9&q5>y^7<=xu9Z7nm#kJP|QLDqohYrr$MeBM+o zM7Vk7T(+&5C+&SQD+REYi>R7e+tx2*UHF)vzrMjwx36>k{D)jS`wq81d!MgAdz+h|yvW6O z?&sYXw)67COL^wxbRIc4nqym<*f>9z(>uF3y<<45rew3QFNImd(wN+o&X}rX+H)hQ zPV}WT){C-4e=5>~sLTkUu`ra5>TofB{b{IhqrS>P;L3`sGBc_~Hy0Edk(sSWTDCr! z!UefSIuUEK@^oYibQBd>QB!KCCAN3hxzN|>%7i8lCN+66p~0Oob*}W)OS=+j6VR#< zeN>(!@RqL=-6Q~)X(~$#F+OamR#n;Mx+S^h6v{a{nPy~VS&^1uO+u1>XhqZy92MmSg-;B03oqK}ikK$zzsLc{ckjvY#>=)&xL zOEPnGq^4OB7pEgM)C|8sBRu_vYQ;`f?X94Y!9+(5CM{_wg;@sDE{z%4WX1Fz7Zy$Q zU{K*mOA1Gtlho(DDQTsqT5B*9Vi)YOGcA9NmV8!l$a5g zrble1Aqkm!BxMewym}BbX4^|63FTie=JMJ3CT`yv#ZTW);MaS7{Cu~ayWiLI?VTE~ zT`%Fnr2^jlIG;zKOJdK_NEWW~p?!>w{8~NY^5n;+40c~q3B z>iMx`B}Ne!7C^YaSmE9Ri%!ls=&ZH;HZxJSYCwQeyh_okBBKhjR8Edk)}}+1w@XBp zYMLD-YTMF4?&nZ5OokZA(gaIYwOdtpA7Um-*+ihriEy7V!hORCa1SIhD2fb~e^pjP zSNm{AcK5Pi{$gg%nk!(ngtZ$tbKuBPj-5Qs<4--W0haP$UA_L5=AlwRrE;;VI-Y>+`{eU-~d5Guk-_IjzYLodBm@~4T6}>gA>nmf^xH66{8m4)% z6i}&rtfgI9EE|@sf!6xI5;m)RtjqnbBT6@&f9b7idm zkB3Uyf##7EQQOLZQTx=$P*r|Ie~_Lgs+g=2Ljfvtc^9nJ+@1!;0#`=Z$kIum+|$a2 zASV|QF*XFS0!6|_V(eUrcXB7u)twYyFKOc*M7r6M?C(r*v_Iuhe$+(xF)A~bx%Jsh ztB7H8X%L(HvUzN68=pThpKo4S#(PJH^Zd3d0o7EFEzM!~{7lwNO=9KLG?q-vq(_BV zDiWDJx}I61>S-=aq#$0dujV8a3ro0h5yfqn~|bf);I zBzplT1zCBi35@IR)WE8}u2S>JM+f?nn-Z_(Zz&I0vcOtObO;{ymbBKCXrQDZO66cx z7iBWBr-R<6IuV~B5 z(zec>za*fgyi}^xLVx&A0hJOJ(o@Ol3ZB%uYGdl3S`Oq?4S1;jZ-Axbk3af7R|>BB zM;@xm@4xfici(g2!iE2;ai_pZg#<5Mx}!+ zxAeVrDIpYuxsw&>LRP3N*%4x3#CcJj9ZX|MD7A&&ROCBSEk<~a0--WXO3KUyr1VKl z8BARAP!iLPNy*ZYmSwBekjl%mmww%urYbkO8@w3b=FPNDA7*v=GQGu%X^kFCZt!5d zNN=qhon`ja=30v3VIu9qoYG7S3Q~;8OEM5+!-%3ZQ;IW0vW-Nq>ywpXKzf?7K#(a3 zDJDe38xRydSRiW<{^5i04G{^@!^hhgFAod6-E??3nd4$-gtLu4uJ%UKPZ{FwIJ7_9 zr|M=U7?P1`LT;V~dHL4l$ZyWbbd>$JgvVIoA7L!l9ZEphV8R6CVq*r8o-&ByoWV4d z88E8FoM}C_EEw;|p#0$$7&Jg{mHV zvYSmvuQMT|)`FxGBl4=PC~MGBTBlD<(@>_*v6Ds;%1bW^U|ne7tDD35@%ui0|G7^r z>f!wPOB=u3YZl0=p!X;r4PO-8cFC-Z^6jvcg?-;qU@#cia*bpA>LY#CI>G28VC+AR?-#~qC zGc{>V)F!o29??LWTM0?d3bg8|Na|)x!+d58Tgr;@YuPzxEBj_`VAqtT?47$v9)2}( zUXCJaWRVVUE4jh;RybLxxdK&a$xa~4MuQ|vH9TA=kYp)J+)S1NSPHHNM3rdxuXQ=! zMyz(VPbnEyvq=kMDQFW|)0qmW30x^rUaWx-q4HEIkO~b9($*DV`FeV3%MkwnqQXP) z_jAL=$r1-EQ(OhG+yx9h%=O5MaFdrjlitQ?&4X26Voztin^+jO@`9Mr)$Gr(#sKPy zT&OE>X8f=?md`3@?Scl5Y#Yz99g|r&xsv5G8ac3W5{I`=`u#_HLZZ=4E2p&h2CNqz*=RSJ2v=M|F7` zh1qdrr$mty9Yl19AK{`n{5?H!vay%@)DNWto()ymy&tgpm9+6-V@w5>2E=5Lu_kI= zO?jfA%GTIM8^NZ)%2bY-4;J{+6RXC?l~C^xLcD?r_X{UAE={1kKstv?I@^ZPJz_MY zd&V+n&H~nL+{%tU`+4%&7kK*lmw5iA*EH{y3JbpZ&WD=!Y9O&)fz_v%zW6V|QaM<+ zzq!i~KmNj<@9*;U_xHG~>R>6rlFnR(`4nLNt`x!VfBm1mS8}czL8y%!RNgB!vgx;9 z`Rn&z`1|*tH1E|f-(2VXTQBj%u^nt(JVlI%Hr7n*X7j8b=CtQAqb`9pJ*r+)GpARM z=IFvMp5G}TJidZ?&B-imOVf@kz*2cw3bxh@l&Ku7jWc?f*eds?EQ=OV*adOngt$3q z9;`4QZ|p2A%Sgm{u%1A)F%Hsh9K<+rR0B~=%&?Pv&Zg#?2P;q@%g0v! zMtc{MJ^aY^4JOmwi!5n71t9_C278m>X+w&)^n($eG$jPmkr+g`D4wZhu`Fy(Wc!3d zUfwx|_m55H+SBv-{>(BSUYXDKiJ@$n7|x#AsqCJY!@|+gES-?X@+tXD>_}tTlvcJZ z9M8DsVjA+|=%~n`tt>{Y+`jlq--pxn#~nfw}o?Uo)R=B3osB)2jJeS%U%+))L5Reh>IG!RlCr94&&j+EC)!It{h?~VHJfThkI5LFIE{Tl!*m4h|lxl*MW zzWYwW(hpp_c0)kwss>0Zr|N|lUeLmXDj!REvfh6CZOxmdlHOH)R^= zSs%a5^^czChpVsg=MV4l+qbU>cpc&OCl~YNu?Za9(8$qEZERm$$%-l2Y*|pr>S-lR z>kwT~ox!l;R4S7~$O?87W5Yp=4JT5AoXL)M7b75m>byWoGTkW1u%Rs9PMgZ6qTGhO zB4d&>h7u7sh)@BW@HhjalPri&u^}ndj?4@PO7ffqJX{%3=fQ+lfvPS~0W6VLPi8iG z3S@aQvB8tx8aIZ?b(;%psmw5!?=`0~$C}a%OA3+=$xqU!MCA3q#y3y^%hwohFAKcfbpli-I9d%A8H|gqKJNDV zcsL6%`RWTS3ERl`W@edak}a^8FM2Y+z>TzYCt~C`g(d38ls4R)+!2=K4znh&%bwzHSIT=l1*kj)t~_WS=T7T* zH>yY3lGkQTL7N#lO(x{X#VR_iY3#P8ahM4s#+fl^i9P!chw}1kd3^e*K-BeCzW;WF zSmdMSO;ys(pTBkqbhYvIS4~|0yo&ceEaHi0k~nlSf-Sp4nY-MFo{2)qt)^s`$`8mN zLTr&f;e`eS=NJ%}X@q~O2|kI2ctj7uD{?4-3b4ZEc(B~yU`Mg`!l=k7q@%cvo~p6* zRZd}a@f5l;##0wLihQp&QXK1ucdjPcr=Hx%7Alh4X~=4&HM^F!v{IUrvKUug&6Ku! za>BfcbTubZDA?E5Kmf}WZ#Ck`+7>5Ed#prpo2lto%mt9FEwHdO62LMMfU-ts++Rpk z6&2M+9jZd3vhARA&_W|-2Id%xHE*h~3XRG CL%$=X~eKw+$TsZ=hO%DIXTkI+UB zs-*V;PnNTT1L46Tn)nKg`1yF?;bD)Xy%`SHX1H3J;bfwZpRGAn8KF#Q&!)Z9pMnHq zO2ir{N;jZ9-$EOmQJP~Vmb14|csNsg(wH?imx;rZ>1mE-((nvsk11xMDBJBTMssNM zRE}(!$s>nWa^G%&rwudMw`Cruj&9`8fz|BXvy77uZ0F!H1z3yOzi%ancdy{!wk7OZ zH;2tDr?Y6*Xhw9E(cF|LkX1xgStezLY2;+Y5f$o7c#t0v!2*)5?$}$|Yf4!S*H=@Z zD9AFwy-#(tB} z{LIha-sHDC*ZAc1XT)-y!Q%1lVyzZ4sX3qZQ#;uTAs#K^@g$bYB zJemEoTeR?@%EMBkMiuU!-oT!j&Fq=etpQeFeK7^GfmCP2)6-B%LqRslVZj7@cxXxP zf!==7q4~)3G1CCc$=M$Z3kN&}ZrohGwA?Byot?B9D|wdvAfUjSu6@*5&UXikZwEYgc?KU+#d9chg5pfkan(W(9{Yfb0D<(&doW4ZL` zLcV)#E!Ur!E9;ZkKRuc~)1n1LrT?0ez=kPl%o`cY$|(iR?aN{M$b9y!p3Jr-6B*Z1 zEWlO4!~m*g zRA&>jr;KCtuvRjYVn~zEWh6$6fs{r{Y`A8asP%-1Kmnp*oIbKg^GL;pc+*l{qyd$J zBn3^%bEN<(IVwmy9vAAPd8AY*P=yIqE|!9<9qX4dc0`K?X|*NUnkd*(9xb(B<#j2* zQlk-9FPg!+B?9UzXR~wtB7v+0n)hnow$<$0zMh?%S8ITE>gWOPJAQ<1n>Mh1-5O4w zJkFC(KE=}lSg*hFjs{oCLv`l$cXV7e0xbnqKmPPPU)}yrTmSxtpSdky z^`)$T_wC*PmcXuHX#k)GN-+FSqK*xCy42_U;p-D9ynOkp0Ml&^oId&F6E0r7r~#GoQYr6M|46}0T6j>+=%64=iJF?{+O_L)&J{U! zi?46Xwh9$q_>?Q3e#-SLm$`Z664yRE&(~jE;m>>D^7A*hG{E}u(mB4oc$V+3e8gAh zU*n7SpW@4NPjmN+=lSc~m$`fG5do}4oY*mp?aL}zIU|?VGYXj1lgyF{()Py}GQK5) z;pM5+XU0$%8$eo+8*x7N#EOv-@9#izs1q5{E@UN$BnmJk>c~#DAYV-}lWR_z=;Ns5 zevgq~=pX{Z^$3YJ(L6oT@m2yK*5sz!QdQtUXO#}tOJ_r|^A$U0G;prm3 z!F!14S3^=$Of+E2mFwjc=*SevOUtk&In9pv411!p?1)OYBt~E^G0lLqOg(`-0j$Cy zbTpdKH%!Omkygy?vu03QvyfVwT(HZM>`rU4+ifZ6a;9{+I~60`s2$}>Q=f~719hXV zsT^fR#YmkTv!uFP;HuY_HX*f+v6hUPVa>c1PV7Du!i%rwiBf6g_U%r7{%MqS`hB7y zdPREp_isJ?ey^J!zZ=2rFWb0uxt6!yDd4%6GI;QbWcD16V#S64rp<9@E%76zg~jIjNG04#d}GX+>` z1eFqXOo=+ClDRF#@>ZTJ1yOcZ)|v-PL6j=VpmMX6$4cd5Sy@<+k&;S$bPU#(ma@-Q zl$brP&N`fIEpXPEijo;Bf42?oB}t5`OQI&*h1xtjn#-Li&JfVbF_#WIl%CEA#t)BY z(bQsA&Z%M6m^?-{Ml-xFiV0mAESXv>AT?2|*uH7eDAvpw#-6oPSTUo6jf?s?v415G z-M5_s`&Y4X>pb=xTE&k2%h`Qkm2{w6II?#&2e&Wd@OIg^VLmGsjAz2w7HVphCgew`YQ|hPwC|U7NYw5AMe!wVEra5 zzlk%rB1THclNYAMf^3LEa8UeDnLUF@4b zirtGQF|W6sqWBOJ{amH_Y;6Zx*MP&u1l&4Aom72`kK-N{k*&&1UwWXGSB_O6o5o$HC zRCv%xI!|kRJnb9_aB(BT+m}EmSDa1EaFO;BD27R_{9Wn(fuy>7Q5h0OeRvpk0)@5V z{){O~W<*XT1pzvW11xBYc3?!JJF`k7**~R{GkYg-@v%kRdut1Sezcv74^QOm$x%GJ zxttfa*9o9zv2kJot0$zgs4tEA0#|c;3)nDk7`s5z9vV5+sISi44UO0))DYTvM?N)5UcU@6#AvS#s24X`$@n8oJR zbJ@OTA$vBj;J}Wx8ekpRwTZ_bJjt;Q_!S5Q|e#;h(YB6`&A3yefQm6&C8^&tH4byzyJQOmfxjvuzm&xjGFdyTk_BV)m^M69(*>WK2SA;#Z^D1RMc{sK(_2J&th5FceoLaYIC@p?oj4k9FG5dO+r6fTewA-XZbRFm*% zbK>JH$V{=MOdzGT%#qQx&P;4`Wk#1fbGy8m-R{G5fvXAi9*nMbWmvg0ZN-k%$@vPh zs&jPIU*pOuyoXpwxVjJP^7H4Ot7bIl06-h ztQj%Yib)F`nX|%~O}l(~;<((L|s-4BTCAdP(6Y<%`=$aypY*7i)&%I&-;99z2CR~thLu1yY1x6>}$*H@vun2#XJ-@a}Rth>}Zd}!RX2y=#n~GT2Vr^-HsmWl>jQ_s@%i6?R zgDkBzv&Gh2UMt%w6sC%58tNKofTaqIsw8*iwNgbzRX9)qmO7Rg8>e})+?>VAb$7?f z&JJ5kGf`${qTB@H%+wl$ssg>hr?(@W)tU6xCsUQ_L~)8GZ8cu>jR+O6@@LlMbOD}H zmd&aZxT<2__!6e|ipA5DtAz!Z&uC!nJh6rrce7*VIM&Y}DUdai&5OpeV0;TR$27=5 zI*UUGH?nQVB37)O#=5QZ}JEJoow2+hdukBWdDJuS+{W;+jj2drB~n5AW9{< zE3ef@D)CmV@P{?96nH(HMW}%5iYSE3H*Ry``VB7KyulR#tm|53_x~*K|CJA-084qj z?#RX+IiNgPY8K&Lv9KSMX1LGIt76?=yTH}6C;0V?_jvp1JsjM;goEqnv3>3+=5>@Y zxju_gr7?^y4%g~jDR^2uEK3uWi?woO0V}%lHMmme)C|JM=Cx}cEH#U8{g`q#O&-dQ z1ru31p^NsiY?1@qHSegugEcwvG1L|pQIwsFkGnUj(5}6Wi}VMzERh-J=8hPfSYj?9 zywY7r)mzg$0P)9&ZgRB3-f-2u?fB@GZfhzTXQ=))NL6+vZQgEfC3XSS%N$%#- zUtF#1@U(Np-CE5EbRo*ypFl?!d~9q9a&#cn(Vlc)U$VV@DE0H9HrQXrgFtEB8J!nJ zcWx*p0VY%io6wo!!uT9-77vN#v1u|s9hk(8&o*%Pn@wE(a53LLI*hOO4dvrU8hCwU zB~LCXWc!S4)=tV51rp2X#yFNv9>Oj$Kvqs4!Pw>^>3>-augnmo5lwAQoG6eO@{=M+ zjSSFou(}1D+J;mS?Bz;)Xs`f|2W4X94INTRXNOL3+gY<@7MoVgWrM(#@?h;1uzGCg zI`(Z}%cDCt^3-Fyc=pLhwH&OqE0?it^G2RN^rXmBJp1fnUU~HmfvS%+kCh4uDi79M zDh&Uh2TMWLL%_OvP5O|Um3!+iu3cBqb5jg@Rs8dT$LXQRO8x!s_Wvg8+6QY4KFH0w zcI`oK)xZD#qj{>-EJRhBL4^X9x9I_JJy?xFg#(qxN_nKzj6nrkN)&9VitlO$p}IzS zxE^}16kw^GELGy+!i9_S`ad{+^f>3voaKrD*jY8Z@W&tc<@;|mz)~T?(?@>P0PF6h zvtmRY;o|WhxpedkF8=a1myf)`)nl)5{->w;?Xw*m{bCE}e%{M(pKs*TR~Pcc))B0n zQNsGUby^+7u`Q_#uTG>qGlcATFOtHYiSn0z;$eors}X_jCh{Db;OC}?r*nV2T>FXs z=}$=DK*EAm`GWqMr$`IOg$=+zd@z0jU%sLG0$X|ngz6Ch=Oi9fZz{;`Gme5JcbRaI}QSOKzSH#f5{YDjQ5=wQxhdA#KgZ^ouANL}aPXgW6VQ2E@Yx)89!O#1BZ;iqoXnEdY0O)a#`HOHjGGk9=&|1P_Bhiu%#IQA zPwO5M$k_UHCO1?wy`i0X&Er_mG?h6uGZ>#ciPq>I3Ot4qYh8_>aRH77$v7CtVrSxy zm7Xg#Ksc3%M)nK@}e|ONY-m05f1$cwG%2fGseT%26s~z+$>yi zwp7d9m|$)?7z-19ZCMU;J##G-rPg1wG8E+^fTi|r1+J6_MJv20=hTcuwXM#pqMAmc zFjN(H1yU->U4fPYEakaU+bYRDHZn@9i=_&Z+S}OREs$j;-^tF_1Q$nh0W5tibO+*O zq9+!LFQc0Z7+IGj7J?I{Y1WixnKOD=5Pc&fX>0Hk&$s=o7Hg`0;wl8A+##yZ1Fq2I? z7qMZ-LN;z&$X0=?Z5tM`bG>Y@oWqK_lbAna4AZCfGPbXsVJZQ*F`u%EbOGjgQd6Ty z6u^p$jUp-{5)U^|`HfCEJGg5~+g3o!*3uCRV}VjtdSRe~DV4;omPV2LDECr@4t4tJ zVyI(`nS!ywa$iO1Tgtt+v2w)P%0a6d?(X0sfaRr0WMHVkRXT+^h16Em(b3k$lu0vL zv3wm17A|MT?1gOJzL$fCUS#{ON7=Y#r)(eA085o}M_uKcxYSwZ*{_|w1@P-=rMFFVLpn|L0GT`48MWN+v zX-j#C5)r_0;5S2DZ)`$(HHEn9-QYxQbY2HKuCb zszvRYn)hlp?9nUGxX)$p8F71$UK$<)l?+w;;gD zm6Xs3vC4yp@Ng%>&4m~b7s_KIsR#?9*xQY|ATLIz$1*Y{gx<_xx-)%giWMt4){1et z9;|AK=dp>!ytk>7i|?=C?;o~u`GZCL{_-S#d}0J2Y^~;*MHxJ~Ad^E&OV~ZPm<>~l z7*Xy|uiW1?Glwy6Y=iVU0jr8M8uQ~Qlh4YJ_oqb0maJ%hlEZw+5O9eO@MQGx7HUfJ zhz(J|2;t3{)OeDHFyBgD^xFn>>) zYRfpdZ@Yj`r{oY;ESCN_x*uiX=HIP=h9w zSM_jTU8|rqXz)J|@c$p8AWOaOVc%0QrNV$})}Tt3 zSCDn?+yyP)>bviL&}Jzr@09|hGXg~lmLARwR4}EyR4R0@wv`u4L6#EbwNf4}B?`zC zuqkhtN|^uS^dFoRxh%kSOg(DyD<@&9HcUJP%TTA)kjm3Qa#u8rGH=cQYIZWzEX5N@$(UXY` zDGDP$)r+(^7lCqnB7>|5^b!yesPc8!!{1Y2$wNmg)amCvhyZ_)fPsXB=nx*EOIV~1 zAtHehI{1tDhU?%HDs9Lh{6Ys28a{~FsDWf8>58t=XGnnwttDoRsIXvUl?`2$*0h&c zO206rHp`frEHmnItf|kpp{dYLfXtrG8b=MRCbg;Ba^B49_F`tQ8`FDTn9w2M)nrS1 ztu<}JElm}c)RY=irPbat(n?5Vq^d=o4T%iX7yUU1Ur&KWw|)YR{p5EJBtW1`tyLHq zZcKc<8L^2*#3btxE3b=9GbA?MjQDgL;?fmZIT4j5_bJngs0>p%W=Kkw9@)9NwJO6Bl_hd4Vglm%4~z4djJalKASILV0NFxqhXS zyVu6@&&{d)dwVAT+?vk48!Mk?Jb-2MeFY3cSTie~O*3lPG-%y>p&T24t6@J9 zJ#CrSSkwdWgtrPanY!X(;f{-i1NP=7qO^5H46zZw zvJk*B7obuF6;;)0RiISuJDAyFqmttX8Dlg+U-P<{>8QFurvH=VuJW%eO#V-q1{ESy zt@2{2Z3R^-(cRukfWg92T5Bv!jj*>j#ns6IYg2vs+(G!*np2b*E}be@9_C<5l5A)w za-vWkpsIXx8Psh_h#aiV5*af*fhpZN0!%r~l#ZwX>&YE6c;)fsy!yl{p4+{c4GVgh z*ilMPLoQu)IgD#9W%{T_0jyCPU~SwqhmBk3v1!+0)^458+D-FVwtOlpmrN7ub`EP7 z&(LNTtynaT@#99&+Ey)aRmg~uH8csVRaWPcm!C{hVl+`v;rRLb;q_lmkE#sr>ga{D zoeD#_3q(3-%NAJ~S_z<9YLgCZOzpH9WvV=bS}@v3pw-se5o3WYJzZl=O|9e}x#I5T zBOvM~%F17qM=(L2KE#MpR;%@w=9f}kUQ2U*8{@`KV&Q@n%$Po(iBo1Xci{>)Y<`6G zn|88#-DbA#*u(w3@M@!{pDLHlK0ypm5=l8Q0ICbGqF5SAx zZB-mpfJs4=Dmto`&QXK9mX9R>r4rj!cu)f@x%Q3*Sjys73(=}@;$1HMagt*{e#Pf+ zy~tBL*RWzrH&fcmnK`VCDNVU78d1uc(N)ZBNM%~>gCus9gQdJv3aS>jXK5ipRhmJ0 zuvGHEpnXKK3u4&SPs92tbUZCOeTp2TLua=;#`1kflm7C~wxF{{P2= zr7QnK&5NbZ31n&Yul^?wYoHh`Vu%Q|nG0lD>KWqe;6zGT6lvj6#Q6pg?d?sZz*S0s z2MuZQ)JBI=>g7U1pgYqFlbK$WKwowcV{^p_$#G{yk`)tjoq1w%F<EzE-`Zvj$2`Cj1yL22?GI@`!iiPJn-N#S8MRF-IPm604v=dfnl znraCVfO4|45KCMYb9EH!+l{DDe{$sYF%d!JW+bt9=Vo?qUr%PD0;3?!7>t@3E8 zdRo218<@~DR6wpz1FSupm$P-vLbiw;*u7Z;tX*5zv;UE8Jb7Th0M-_E?%2jlFTKd? zufM@tZ@(u{^#HEatU(nLRB)wc3!eP_tXAtvd9V~*-Mam^7$njk?*9GX_I>`k_kVJ& z9{xS>P(4Ui|9_MFV#TOFr=aRVBKsYIpz|73sW9P<8#krB!TIwKAnRcz_%mnDYoWpi zo~_fGmr98OEF~)EN%b@5NlBX<7Wc}^TV&hIL`c<}&M^a+HbG%)B8L5==mjl^uj`3KRB7E zwvXVAr>64ylT&!@;563Htz}$W5`EI|n~TF_{P!R;)rI6l2NGiKh>o-(IM4`RZ#{gx zbnx<2Gw=jTdc3EV`Vde-A7L4*{ai13pCwMToPk^giAgkR;^J?`rInrKZNqw0C^%aKHRTxoGY(PPd z0a@wBBqyk)b@T}rI1C9KM4<0Lk--G}>k}F*za`3qm;@6dX4T=kiw$D6qV{xP@+e_pj-u3LUGvx z@z0bGR7q>{g<|pzNhmNNwOE&|ia{d%1-1r~C$N=UuTQ=}R!O@7wLNAujTOimXG8A{ z7e-CDr*FD7a~3Du_w9rBg#&|)WR4) z0W5E07d*|}a5lBY&cp~i3q9=2jj=ZRe+Mi(d7auf9c-+@m8x#70LxSXfWDeNC|16S zTqn@fjLTA^ zTi$ zo$OjQksT|>vuIKaZPyakZJWoEwbNL+ zb|woJO<>Xd39MQ)mAR8gi85)XcSIwdBN`aq(LhI69eopqQd^%Z7J&@hS@9%`vWbp~ zAUG%pKc4`+J^jRz3KpmoFtYc=&A|%?D;K%njyPF4<6`ZCi>)j6mJXW7%19NmG`7Od z+6gB|cT7#JFf=sBNWjFbm4K7Tn=rq5%+;x%mBxnF~<^_#Y{YtLhvC+nHRFKbKZs3dm+FP&dPY5cWzbG0JF}2@fh>2oKrP8#PXJ5L$do}k`j{&(mWzi5 zS_X!u3>csm_I?0T%3C$4pNGWNCmUz^2MHSQ{7-?(Iun zVv0aaBpDG=WJX46b*_^9-Kb88pfc2#`Vb%5!o8T36V3GE7^ankGrc^7X=Q#)E%9Vc zTNEFzAI|A#XLIU>861CMDyLqZ!Y|ME^1-$mUSCtl(+g90W^p=)6kyHDWLb9{vxg-z zyEBg+OGYujuaVZWB(cV0sZ0wYKgL_cM=R5i94rtO>?UyKLwt}Y1*vg#G}I96=|ZTF zJ9Q;R)Rq&Ck0RnloT*2&y*@QstSmzQVS{^sLDAg&y{*@pohH%U=N|{Az-QZ zP#!JSDo<8xT^V)qx*=sb^bV_K{%|NmF#5S z?p-|d^i#b1?%TZi=3Bh|&ifi*J@jBHxcWum--^7sr{iguy0Yp7WV*kI1I;Q|jfs^u9DacZuB^3@-kfi`fEyk|OGiWd*P^Ait zK1_Ch=)F>XQnkuc^}mza6?Cc64eHpr^XL8_Af`$@T)!q`!KKUmApmyj#7QomIU{f- z-{blfZeISAYv=zE{dtB+0 zzaCx!R&E~s@$edeU!b0dE?$1Z3PHL=M41vP*G5VkCXf{<@ZuLbP|L^iQwioGDufsn z-H)V%eqsO&q%>QX+C1Tgd;`%x1F6i?p;~yOJWHSAR2>SFb)_|wW9Bp!TQjV}RshS6 zNo|hI8R^E7v0kj0;Ki~@o-Cf|$-L3-Oda9Ogf?f!4t1ofUS3mcMn|J1!$g{7yQbWn ziV|yz@@>h^uqG|VT)vM9aWTe3g&PqWCT*lCF>z)@Cz=Rc=@J|}fPm=!1jY>_EXjzd zG#g?Ct|BuW2~D>pB-MgY0k0@kkTg}7)C^tGS2`jC$<7;0c7ZNw`MUJ;&lHl&8ccwk z49HNTM`*SY(L$B+LYFB*fLZ1GB7;dU89-X;Akr%blTtBI+Cdby2qg};q^d)1NRN$H z$7*P|0o{EDOqpTKsx=Ng`D7R$zn{;q-!=2cu^#?B)5n$bWPmm%J3#J%hXD~b zMg*9e<6~rthgj$?Mpif&8RKMbAfgtWHP&V}St=35Vr^vp0I+n_YW|j@3@pUrHq*q) zK#3`)Vu2g!sM4t((PDdYCX`2opP-0W5ebJELl2*IWu}$G;1t#CiMtlH8N&o6O$&4U}RSVP0b|? zZ7Zd)IEk#BSn>wL|Z4F!@H@kZ=tnqgcc@TuxL4R<}GH~igherwwmp`_Gw^s=-C%F$a?MV z5BN*~>&tI{u(wdUZ}OZ@iZcf9lBGd#9+HM2*x z(Nmkp*xGEmieu@kN@QGREaS@~Su#9_WgR)psEuL1ShDk5Qdr)V$0IWvcx*vC+osgA zVq`wguIgpYsQ+qZ&1uyZdfqW<2#+r8IEbTh@{K+tWJ>UJjRdFSy4>PjbK`SDAS9AS=JE8s-bbLXo=*Jz6?It*v{$0Gdc6p zOn!fH3co!+j-Q_E<;%y~cx7!d@9(JN?XA@uT$IK7v5{<_p2LR81x#&CV&RxdmQQJA zL`^1TX`$rB`3mHDk{;?#e1Ib{zV^fgxR4U&L%s^Xh53;h6+&lIEkOcUO;u&|w6zKV zg%arHDt)t#p5a4DiVhQJZ z-^YoP>|`SRJw#N9FrGL8RCPQm(2JtXB+7ErF)Z5K%EVuD(Wa9SUG zx3AR_+*dE1$D_Nq@zmq{cx>+@JoUt*yzB0 z*>F?(-JcgPbN-xKSMZJoQ>RXy;o=2VtLhe){=BLgKk8alZ%aXzN`U|4^jVG{Kf%>2 zSGgiEr9ezU*26HMf-L3XQlcPB9aHC3$WVF3Zd|>^)jzLsSw82oyr)`+@b{y~ID7Jh zRtxK<0Ng(}#pt?lj*BOba`woN{PFYG{CVtKZk+m@OGjSi><@=H_W2$@d1(o+9hk&R zkB;T3?ZbI;+i-TTX=3HfV)m@-VB3lg#tqM)CSQz@3}>>k97xZwBQ4#Ilr%e{W6cN- z(<2~6mw;daD&GOPdG*65K)@$CwJBh>nalCq`Zq8Es5xqz*xm0|<`l zuSr;BKY71?Bq#PKJ8clf0#cQEhE$1^We=t@Ur&sD6RHbLD9aN0z`=N}>gcapuIwTM(CIPIQ71fl&g9Vg2z` zb+p0+v|@#cl4MLtwId|Oh5*?PNHirN-jEQ1v53UM#HI`;F?}$p!crM3ESYaeO0EI@ zLb44B%{3(?M_!O_h_4V?P^u9T+47ylhFh{VEy#1{^h^Liu| z8s zICHi^;A$wBuGMq>R*NW;9_c9Kxp!}_bd1$vRcsN9WD9@YTg6{@mhji@W!%5BlKZ#U zh^*$Xo2$8VWhs}=%;Eg;S@OUw;QX&kSJ$egsq8*0FMdgx+a(o z6!6k9$4=i8TU~S6Ho{tzg`Jt{gB&hZq}0evz{vr37cYS;TPy{rLW9Eb_41eF_Sjh1 zYUiEoU2t-6(H6*6$6W2@HTG7x*&1VGG)O*sFfoCSRObs|)>ENLRG?Va3~(`0#>d*_;>{ha774k z1>)l9Bljd2e;>6FwyF&#fF<|E#?p=e?;!bKx(kRp;$Y{j9e0!S3c9S#?F2$y2oMkm z6L<^r^CLbiLM-@vDvOHAOV6TQ+N!d0YAULzt*&Ln@DWU&IGM$ZmoRneG{%e_&y?x2 z*u3Kro<95%PaJxVeNQ~eYwv!*R{~WZfBiile(^1zeEkC-eEuyzi-Nc=*78|ZaP-Dq zuH3oHSs9f7ym^bu0$69nf;}$-_SIVoxMYAA$hs|nbz2n0ZGo&?|H*YR*DqeV%=H`B zxN!CiXMaD&nWIN|cJD4WE||sgNxh6~C}Dh^{J+W)8B-R+*s^f?%7U3uAI+lHWaib! zF{3JyS#_~2k^y(k*dlg`b^FM?W)_UdWbgbIc1$0_n%)BT&lU@KT0IA5HuJ=SE}mL8 zf#)~Q<&_oPUb@#)> z%m#BSdyGsiF_1pvAb{oJ<1hWsMhhJp%CUiibkNmNWjIugxWTC9rwmnomH?IlD+>d; zPRgsm*z92y`HM%2yy~l7@85v*pg_bR*2!$ z5X=0kK<1Qqva>sdXJ?e~%Ay)R+up&6=caJ!t+||kYdS|?9K(lu>Um>ZC2#Gh;jxA3 zJioq@w|BR*V|Ff!dg53zUq{c>S z`B{k(VWh{$kQ@~*&#yPl)fGIta~o5~jw0OOTl0L4AJxf>Nn=?&Z#wx|DMSjOr6xp@ zlb%3&Vl=~t4q?UOInwvb2@CY5uCjAihh z%^+0g{}SN2!=+2tq`l2W0jlFC|KLyAR|~0My(S=}W+}>hUA}ysbLSN>sluV>IjYt< zJbsdMXU=Jhw*M(mbnUA2D!1fwZeJ0j>yJxZ zIU{f;pmpw~0G7PpsiP-3^1Ui9suJ68azoYp`t2x}PaNUqA4xEbMAdE9X+hg z9{l0Gb$s;v9Nu_h9EW#z^5oW`>|0;Y3VBYaj!tCG6nS1}mD5xgKz61T1p*v-xpo3s zR%B*blaORgMARUGr9p&*4J0t6zkpRg0>f0uuRkFX0|<}PAvVs4_!u)%6O~uUS|G}d zxL5nfeMBc|P5!D0HSU+fJTyE7H>}NJ=&)DprTU za5W>cA0B}ZmfP`(5TJ}V6|ulC&J@3RV*(P4@sBsaH)asN(E|vIA4o*94l!x+yVLba z%rqh~!;pSqStf)DOoe1y5SVU;pL|GAs?ez1=-6Bv;&ZJ<1T=Ex3ltcUP%xOdyg|ff z>l2w_M5tO6G2NKN0!x7}W0LX*kX_!N^2UKQ4jasfUPHP^8#88t35!>_v2#x-Pd}f) z>u)9V{zoZ%_hT-9oU7s7#UavJI;BHQ<@T+)+`GM&f9~z({@us<`|d&6-p4zZ|CN@ZCpRIgIj0!aP`D)&VRp)pWj}~>$|73efBV> zwU*M5ok(hE5Fskm=IM=tn{QitYg4Q&1iCD21QP5Y%$&1!!rMbYBParUTW9T< zqn!&@0txmu&H`cHa;<|vm<{fZ&UlHEb5TV`1+LtzbV!eJr=ux@viv}N-3E!}D*rS2 zU7z1%<%QDujm?4AgIDxAX%$?rJ z;<-J{p4!fs?h3kw6)|F12`x=Uban{H46CK2B!kGv0766j2@eYv$O^+>Kqw?6lJJN) zX=Cv82*$@JoT$h|0j^M7ojq`Ma@SgWTPHjOrd;e@aTCZ=+m1F)TKLde?wOMc8(KT! z>EuR$yQlUnLNgkYE=_{9WXpiXq@&?}3?x3%W)Q=$X3UADlo~SORXY zz8I_JapeA+Ti9V{DaL}c8|Kz_7@AmUd0K-8>7p}O4?PtI6o@hts8S(8Ljf%1>9tiQ zJH#+ClVcVFV%E9_0#_z@S(+2!Dpq;054i$09VMAeYpbF$J%XAzU&fS1GffPMS+&6| zZ4O~^trx4Ce0hFWA)jw-ERHXk*#JxjeFM6Khv2Vb#)w z?A*Mb=bn0;SDt@{H(ol-hwr?}=bwJYr=NeVdZSN`=~0i4(Qtlqz>} z{nmXk%6U-2L=2IuH|}sn07xah-@fzDf1a&>1c($=Jpiz4*9F29Olij+fR;M0YH2-8 zoWCRnkP=k}LM7CnIeU@w7cOhSrHYy=QRN|&sC=+fT7lEcoH(W`u^;E`ALj(L&U5|h z4eko?-MoHFgDwSGs#W`!l&9?M1+L5I+`V#_Tjy?a^ZX62oxQ|a`MnpF=j`+ae*57l z$G$(#`D5p~eDVTUPMqf2sbkze^9#5C_?o{ieaYPm?{WFqVb1;ZIH$he#gFf<jmbj6hRDf`L3ER^(+0pk>*Sm2OE|l8K0s95)uIG9n{c zpR7~^@`M!%vrSM7HLJA+D~rsiDKn)~0IRG}pVEA31+vNtj490%&=T0HEi|U7*oclw zWBQsbnAv5^qA`vv8Ry7?adylbXDi~soUsl}AL+=X;SK^=jsjRNA|CXKjBasfRI{6a zmMfjjE{te$rM1S5#!64>%G?FA94HpX$&-7Ko@Pu;>>z?dAHbxqfTeqY0<8gfhw0!I zri-U)BX#hO*1E6r7|-xcuITBz>Yq;zas|rWp~WBweh&OiRLqXd|+% ziOI1Qh_WFz%ZhlBq#P>}a!iQIlAEoTuSqu|B-xArA-dp1bE30sh>;r@l`)Xyf&t`J z4Whh3haqi-G`5Sh>CiRWkZE&lSh>z!K+Bh>p9>W$C5@B6R|(-a30U>=_x%|HSM#}l zXC43k^@xDf0RgM0`1kKa{PXvd(jF9fT;ws4N8#`N@Xw?CbAKOy-`T^zcOT{ct-aj4 zzL)zq9~J6+Q1weeKYI~EJQJwU?$4MLSG+S3sV6l0WdRD zfhb!I5-iOHnq0k!iipL_!xu9XEA6-v3o~oYE930whP9PF=5o%(#twIDOY97FaWIhw z#?6e{q8J&xb4g2Z#?7HW(IFPZN0|{5X-Z2&B9q2fGqJClVXe6o6+}vhiy~7j{o=xS zD$6rTO%5j`J%*afVzF4_hzbjnK~o?ozm&KrwJ5T5)}m@r961zcr80ixFm`TOp(V|3 zTse<*%Vx56*>sjK7|)D}?Mxlt!nodQdPkIMGI?wr^JkA_+}IWx8wx2YOd%~bg0K*8 zv4q4@PmC4F3L_{ugt)jQkrV<0BL%KPHH94$olJmV$o~MAr`#_ERJNkL6ctiHj#UJyU*X0kxHtGB~v} zXVxr6_x94;-NVwQ%d};4W-nO6qUEdDckoGGeDiJI_~0X6e)~Nhd-^c@4;|+DH{O-~ z4|(_VZ~5`}a~wJICuc=T{Bh|D$NxCX_eYL!;_Ml22xMKlaf6H3t_w_w0#VSV!iTqS zb4ATWx+fO|jo?vjeVTWH{M4;^OFq zsaoXO$Pr@;56msy@p2C)GB^%5XJ2{F?Jza5(U#J&vU0>uj0O{9OY~Lg4Yi!qU_(q~ z+eVe-7$k7jUl)^sde{hnxhfB&nShyGXDI+?tD}pTxjEtT`H9{xBzW1A6(ECZaSA;Z zY1Ae7(OVSD+~#zF%4ilhhp}>K5F3XF^URD)zS%yEA0FxC#8VTv^7eerzBY~D4v*$n z0jzJI9?tvwhVcIWCf?lL#NLHDY?+bH=2`iyn3Th`5y>*Rr_fdr&75)dG?yij5#ucY z;!A2|pyu^SkBcNzT2*nqEH{&?!W^wCyYd1(JU3=U8#Bg_)+({9s_az-In13rk;$Wa zD9_ENG&_@mv=o7w2u%_~gNX?W&`LTe_*po8DvM@JBOxqA^K3QNRx)McSk|suN{c{S zaX~g?Ms+c9+$fqGYP55!S1w_sd~RKJ8RN(HFn88edIj=Ew6}=S)hJ3x#*H}n{sMKm z$zqtw=Z1KB5Gta+Lv~_3*+~f`Murg){K%v9OTyz*7N?Msl2kcn^*Ub)BtPk>?}4c zDq>n+ENzV*jOvME_N+X*yQ0L1w4kKGnyPXqaxzWG7P!jKvmjR_D^ox#Rga`(U6Q5E z$TA{3&y1`bGl4M+@^ft|%(Ij0?ZgnbBqPmKMBXFQRAAmr-cPlr0x+f&^>6a@<~g+{9Uu>smrJFY$=zWs2K)-9kv9tyldt6#5d7=0iZHw3*)cB)j03=!Ac)z-F8YsY2!1DQ5HxjSx~! z)@s~LZiu2S!-euNd5F9XNOYE)YBi8Rv%vyaMtGSR;A&`qrbI5kqd)1CLH&Za#@INn){P zQdik1ouW*v-B=?0y&2ot%8vER*|v76Ru^Z{?D1?^F^hG}r?X&YH{*MU&^@e#DdQTL zFuGPCtAu$1Sbbxf#j*~gq$pXwSB89Eq`;kgW==X$GJyN}`w|T|0sf)*`-cf& z#S$D8NuU5$XkY|h0#~jMZiFb{5{2z75-QgQ`UGhnEC&HnFBdO-Tzv#K-Efv;9`?>e z%l(ZDjl$2}oA97eqQfHPUWs*{oF>0BUIvCDN{dRFJA0nUTp6TBGk5L+)^Fa*_TBs0 zw0#$wcI;yRQ&02KTkr7PtFNz=qU`QuRN6Tm7z?l31@a~6iZsw zOu_`_wC%g2qNC@zdpS0`+Z#F#KP7s&O(($X0lJ1<<^LNPQHBf!9pV9y9* zLzD4z_Q%x7217jyEKJ2vu=Bvm#1^B$(l<=(v6SPs2GS3#TrnMFh-rU)%`kB?mSdKV zcw0H)D!}ZlZ;Yq(r$8|*5J_3`ryJ-oT2mZw(a^VpJH z_Ae@6^YkoccL=Z!i(z7GoG5`s zqE*89gLMNHNU89kTCjP^>}d=W@KJ!Js<|s~@WvI(G|$wKQt87LWz-fIYEYAv5T|*y zlqX9`Q)M~pmoBESvxC}_BI+y4=(u2vtsE2W=@+(Yx58>CYl&Itd+j*5lrYENlSGlIr6zh=>muXdJ68;V$>yB8AJv6 z5#;4gcz`bv!TzL)L6j07O;l(gIqAua8#R*E%N8+f>O@8kZ>6_$7;~mi){br4xR$35 z?BmrJp4C9>m6u=U?RP)q{f|D^yjWj;^W*F?If)3Ny;}>X>q1yiA zjFO8SJMo9so)x()@N@g-ZIL_Ny78b@V5PuIfz>?$yc^eUYT%_r9Y3!==g296s3RQt z`WFqbRCw^KPe0?E&%WZ^2^lAT`$P2CFPxX_?#q~P@A4V0p8S@pCqCxJsdojgUgwqo z*5zNH2$XTP*dSZXGa)Q#;4NJ;!k6Z zyOyI=R$wKNV=2%jdM(38_O7MLSeoDQjuI!Y)eUrEk%Vk6c*Z&pKnE0 zmZ|3bN>4Wu<6K>Drh(f3mI8sR0(t*}d;zX}JqikRDJnA5)-^0GHWTAs!I+_ZzCc%= zp_ZdnSES3(N9wCW3geI$y zpBb^)s=k9*fhi_Lr}Mk#AWM|R%AkEiJ1VJ5O8)sa!UGBRX>p7UAoMkZ_N`=Mf3d+HDU=2msj+DA0ONgRFlAvi|-@VCwHZ{QK{I z?%&@no#_$&`Dd?KExWjVdzU=ykMQTYZT#@nQl5Eo6bl!W(B2tKX+tn+)&7JQxDt@> zN>G6t!TF+)^PCAvvm`3rjASA4)D$B|3=LtbT7i669E(P#Gr2m9p-B!D2N;p!JcuZp z{)F1d4Yo7H!&C=HeLZcd4hKV1taS~s(AC94PZujAQ4%JG*qRz)V`hZ8>>J5BGgY8(%c$L6Zt#?U4bhj zLOt!tOAeuJNH)2tegYN)2@f~kwMY8+DfO(Oc5)*m!)&22w*K? z^Xi4HTr`!;u%>JQv;-!MZD43)At{MLV$q}s?B$Y` z9!pYuIBCgoM1}_w92h`ExPVVY41ob51j*-zhs6*iFr|PhHarf054l%Pp7MPnwbs?n zReo;>-fliP**a@&SYSAQo^qY5m)sL?JnY;E^9>;xuHp3b~QOIf#R8>`lDV)@$jZ1@kdo_OvB&4cyKD{u1F z$6xTCDw_E9k9_#ePrU!tkG%ECmwYXj>ha%CbM=NO3>i2TXkEB`nHzT=B)#9b{a^_l zZHXMw4yskN3hxSR-M)2;U%vZ}*AE}&-4|bE!-55jX>4F_cPFE(O6e)dWpP&%OS)

VJiTEF<63G+3k$^G$pJSTJ3O4+ zwR&YPF21Zz^4#qA7x`dGwkxqzjlt7guhJyvB%$%?{cfr!Y1{VuA98Dag z4>{s%@*jFrJO z!;j&ae)Qx9vSer)2WHgr`l?nAi3R`m$_maLp1`G7r*Zq;1)P0;GG|_xs(G+Zyf~gO z9vjMMk2dk%?jamnS-{q*39KC#%ewK2%o`CQ_t~4dBh#2OGKaR(Fd6)FnKH79qLg5A z;sZ6nN>sU5vEf=MP(fB|Ot=PEDkP}-iV_7|DlE8i!EA<SNrP zZq0+G!0L&|_ONx+S`E4u%$>pN6^k|S>h5f(wW*HkvLZU=eU{Ikqj|27JFwYHk9q<8`5ATiuj-}2SGSPHTdV`g)!iEa64n47-XAV8htFOPq2hwNMiU`VsrNV=!mA6P>L>28-VM67}Qle_g-Maga z)+)gI-=3-e4Oq&9^#G#o30&R$pOD}~5L4GExKg6ZKB)5=IGqy+x^RUH0(J_@9)t)l z%WI^M3T*v;>a3QFbyGf9h5eLQN_nRgOev^RK&3oZ_wU{3j(W|1-mHfqLgm3ad;F{b z*022d`S+aq^*EQ$p6B$j6a4(`kDNSmk}Ib#@y9QxIP>#yuADrhKD92Qw%aXD}OG*kXD9SaZFwczALMtlD>?o@c zFe$U4pxBaJIi4vXmXT#lrbxEDHYeBof#jK!CxDeF0GBV4mv1KLb;!;gEU(k2P&lEu z*j!q30WlK_a*f1*H>5aMkBWl9VqECZQl&>ntv;OsTAeKx^4)D2*=k2;lMTabtZ1*a zVpzE~!z=6=QR$?~@M>o{Cty|UN_(BF=s^K21z8Plv^2$ti7Q_iuMJ35OxkjzS6P|2K zaGY3>5q5Zn+u{>#k7ty?R+ui{v3djvh(!p^#$=lln`I{NuSZPkAd<2Ml2Oo)(yD%p z9A(UwZJ~VlVL8A5K0+SY$^84*Z2r14n}7aV2>+}UxLPOeE&;4(xc~1<|H<=+qrXZr&t+l`S`;r?0RGfQ|G49 z))z(baDU?KT!^f45^*6)9J>$7$+T~-Hh?K6}U1nz}eIWM=Ote-(mD*O`D=Q1N zjEumXykCH;Eoo5!6lX?Jo*zwOq$oXmA#SGugasK3!~{^3??*zMoxp<+nQ2}EE_S4* z`f6}BQXr_jESY2ho}`3eGSZ^unt0-)Bk=X~6!8|Ii6vY>COs)zV8%;+mpQ$|TId=& zL@e+!)-IXF_Vr8IuyT$TB3z^;xOXyXY@-HPO|_}gi39)}GMLoYL|s(|Ri$YZ z3%v+Z3Gg0%c)Fll7lemDY%Ysg*%RCA!yA zTRB84*D!6$OzLVI8Pz+EaTBI6R{(3->UB(?GoKmr7qW5ZBOH41WuAHYH4YwriI?B` zkWaq-iBCmQy!Gi61y!QSleEh{%eEIb^oIG_(tnq7HymXnX*ROL^EzzN7 z4N0r2wcoohgR}Br-TO-he^CemWT%cF=h?>|WA^w7OzG}se&1N;^>#C_vx9{rhtXS> zLr+mEYsR*)eta{NYLb{(6VJTX3}!aOGpjzDc|+rwFK{)dHG%0Zs>DDNTW8mDaBUY$ z$L6taQVCD3=wR=h2KLWw;E4sTJU)LokInDm=@k=rclUDM-@leOcCF&|JsVj)Z7em} zDWrx8410J8Fj-<@WGT`G?}(MY1O9Gd0{*3Qq=|2KkK&_^4Sc`5m0R!5=Fe9rasI`L{C2RL z?;q{tr-QwGzGo<}Z!F{4l{xI2o5Ic+scfH`&Z;qT&xVIFxgmhDHGwRjP{QKAa#~9w znAKa$=%K}wrbUnzDWDPIM_P0U$&n!%R4H$j3I{50RY7`^wy#2j%7djAaDJG7rMz4v z*=fw2Fop>N%nGJd2vFr#sW4qmQUZA?Ng7zGat>oUhOu?^N}hk>02`Jq(Y#o_!`nEp zcNd2aKC0za^>hu>S|tjo_U+!z%P%~`&TSht&()^&t5~&skp@_!x;tnZQcY7`6(ieP zm^`MJWpie+YQa2ajqlUoYJArSri~e;!Q50iKeVozBh%vwKKv_?b(7P7 zT+}>PDjfK5?L*Z*fUT-XW503x z^soGN?g#Gv@flZ-zRKC}p5XLXy9KVc^V@rC`Rv(gyt;D)&uwnz!0IYC&dOrR#AK#+ zN7CCGK%4Q#nx06TTxXc zQfkeRN=NEzovE&Kq@qS3tJ0dnGJ&ijGqMDbGI9(_%QPf4(}=We+0PaD%CjP;z=pg+ zTXF@^vT{`YC|xq8&B`|t_%frQ(1QFzfw6qGwxA*T0$9cQI#iYDQeUAj-_L-yS`*P1 zRHOu#FEw-br*q-(hN7~DrXe)Q7t?EJURco#CUb)CK=`6;) zlRV@0G}MVmTU%vIMX4pl0$D1gnVhIgOyoeKBXo(56z&PpB{X257!dtL-}EOebRgjo zg9(q;B`ijd5Rss$!2(@_@C_J%myZBeiu}N&OnqVnu)^a96B0X!NCB+)baR0UOJb8v ziB2#iI^KY&1bxC1MUqSevIMZ=9r22^$6Wx+Gg_?5Xe*q;^l%OzjGIaVi_yhD-iV+? zV*=w1<+X0M=x=j)`IUB7Y{;T#Mi|w-USth(CZ$yd>}G+hT05dE?FcWjAYA_0L0P&)W$95Z z)Y~W|JiOA4g}q6v8I#HMnjkt-g?__@*nM_Ow@FDV>4}AftR_dIR=9V<7#C=Vpss#iQ&|hrpv>bLa3h=E)D|)Don%z z@skb`O=(dOIRaOCd4Xi4`;wICA=_cv;>(rtnw;ziq54Eh3R8)X4I;o-(V?}>zB`F`GScWU`?OU&ZIE{D4pfB zHD=KGbyWF|vCM zvllF8)r&5QNo+aGf1l{b0wldpJN zv(Ufhn1GThK6vizSy2ji_)A82HHBME=e~0FL0uhH5n5H0R?BOsVuZ(kJIcpzzs-il zi|KA^WLQN7qwDLKC5m>%*imemHje2{6*MP@GQF`t1FYF{pC?o&GP5azSmh$?}mAv=F4zZTkFukjdhQeHO;$pEeQWHBYaJ6^UmMC(w^TOTM7i$A2Y)k~G z1f;?P6G@5BBfvWnduva*))#N50C|Sp@s@2b8*dy8?C`d9#m&%)KuZS#!~ltPa;GRL zoLnD2@_f9g3iYQf(3Q#{N4hct8DA1kcaAqBbKIHN6vejDxxBHwjW2if@cT29IP?5? zPCq+}8}Clz%nM^U_LRWY-eG*Wxt@17)$`V-T3%gW&QnXX**!B=AgfUBLnRw0WwUZr z8tW$&u%Ii0>BBOZ(40M9826$^m$?Qpen)j-;u9AfCAkDKit*@7={5);pXC>jj-b4qAK^PjWLDtZkD!N*l zG|$quHLG~_nM0ZftD(G<(IbZO?2`xBv2_FOLmM<1-qxgfr<5ma*Y-`C2W#2ldD{Nc zMRPSz*9iG8)fL6mRFp_xY-MWSX!^Q37$-nAf9hoBO`gciabuZ2R{FAt%KK7Yw%uC< zu$L|pLur%-XDZpfAUj^^WNl&B z&Ye8-+)El@sime=EjhJ@r-G@6A;L=nQVOzEsPG|RT^E4*-yy<>Q1!pPSr1`Kd8L$x zN=(9-_P^=Z@+Wm=i{6{c9P$Z9pmza3*5bNi!%aM zN51))ODE58Q(klZ_v73;^^?HW*W5YvA=i(+#F=mQbL`_Socwe%r$67!uWv5m{R5*o zys3!;tE$*GFOON>;mqufX55Hy0TU1ED_!MzbEK)vo=(~CZt`Ymg@BWQRzry`jpcSU zl-UWu+fXNfRaat7Lxnv})sD2(xX@7RKxMTJWz|*`R|p)Hnvz{;OnR<9Nf|oCr#_HG zRcBF!4|4^o3Iu&wWQPC%|MW>jK~%1a#Rw{~BU^wfLm(_PB_cW8|REdCAu7wx~Ch{Dc zl96OidV&QhvBo3|P$fp`kRTE-P!$(vKwP{r(Q(EEtEGYh2jJ=54_9{qtjs(;(sJa2 zRH3l=0R+eNCn7;lZj71cxk^Ye)k??53LHfz8we!n5*VkaCAa&eK> zl{pUb0aikImj3;)3+jhU_#iwY_3#YW$1_YHudqS*MfN8+wjU`u185sz&gN~2eDHBI zM~{z^hj{`wZcG;?GhYB}fmjiXxqE-P$T|U~ZQTENJNN!p9x2)WX9s`(tz;+vNc%u` z^4HyM@}O@Ot3mB=;pUy?T)(?WI?GaSU0)*B#cV$RqMN-33YoMZh#_N~DCn>uW0)-o zO*Vv=n-W=OPHd$uk>ytSWDh1dXD~U{wnEtE43UR#W>*ZW#$~XiBaXgOFB;-4C=D|r z-A``1_h5n?2jXV&U~M#&i0fveN;jC{X=a3*nJ&&2y4c(3VQpuKfte2arUqEqiG^?B zfUQy_4&I`00tpX|z|r1Gz{vu4R}T%WtSxOY7R9AANEagmW9^)wz7fW%^1B>2*3&1* z(~Zoy2rWEOo|iynQJg3)1Dx&k$w+abtvQ*>@-PzP9mvY`5}-;VJ1c;gC`X!`a%mk} zNU^*oCdyr|381(zMIa{#Cwoi$eLRVaib54SwYPTA0L#w8QJYEF-QKP#5jR_NQ5+sL z*A%g4$xMN)`2tuISg~+CGbatBcSHppE%_Q;jTu=cQci1qHZ`S5G*)L(SCK9cv!6WV z@ua9cmS};+R0UWO1o?Ro9~DkvP8O-6WFkTYuH-(Y#UzL%3SdQu5(*Rihvu^V?R&Cg8n>|K)%848!DV21MwvlYB(d- z@MY--_qi|7b@lQUjvqb7M<0C1i_bpGlY4fvK$K~1b{4($wG7S6rado{1)Z(zo;#Tx zGsX%~WizTIi6vc?tn95}VofSz%43<{ATZUQ%&eAp<_$|`<=6t|3{PQhXFBtHGC8!N zN0arF%6N218&9qn!M^!TJiW4u7uNOh%ElSIx_KTiZJE!j+m`V5zO}sn#CAS>dLM_j zt`o=_Mz<*a#Nc3T4NdW|bH>%i33odWJe~Y;vi1_Va>Lrp1rtL%{Cpw^4UQ87!Vw2s z4}s_y0=+}CbRg z4x_n<^7Kf`(qd)MPar2DQu?8iXmXO{GcV_#iVCR6%h5bsDulOu?rbgbys98i`r=TwZd|KDRbF-) zh4O5a$Y<16m1_A`4?}@9l_gpZmP&9}Ia&<@cx^2WT7vvgd9GA0S8H806S}(?CJ^7* z)WFU@5S=aa)WK zfgrW5W)mvFQVH+c%tE#QA6TjFhu$nDYF|N-s->koQ7RuxU8f*Pd88iZT0Qh+sc@k> zu8uuCu7K-du~P+P%7dlCgx6(T0h9{;sqo!_*x{j+=2%i) z;voN5(Ot!Mbk@2vu04p(As#ddP&JB>KX1mTGI{zN4XBsN8tgbV?bY;#ictjR32C#Tq6V9SyW zfvD6hBT}*jv~mSz3e3qYupmR=D_wvqL%>TxR-Q;fwvNbP8I$!yAIayce6Ac*3Nno- zPB*3^!<70gOB%DSq(55FAls^~%d(I$Qh-dPGRKVi3Tv9{?F6uFX>GKpwaK2AdV#E3 z8-`R_iIHzE_eOc^Y$z32E6cT~JlBb`Y)1;ytjJ9^B`e8<%tV1nX;Y+)6SxYGG$b%s z2Oqxycz6o?xC&t96zB>$>XDE(h;X?fp>awEYUSc%1ghebj5Se!6`7<*aKb?RVnyN& z2uQTTJK9>{$`tz`Lma~m1+WHV8{A)mE60%jxP%SFB~%BO5K-=dA_2PiMCcHtrhKOK zBe!G#lV^JK%2e*$U&!ry3#47dEdi~YcNcK`uf^Q?YbiHxFXH;m z#nOqEap(3*Zr@rXN@+cR-`&LhJDY_1H*)*>dj7h#g@6Cr%xwXy+ked$Sd;gBqb{Ncu=wg^bp8534&NL0DNWsRfk+Y?x7Oi-ad$rTo47MU^qcWnQ#oI*=VC50giK!kq`;V`m_W#TpL*B0mdDf~?F5v@pTP zLLWCvJ?yOYu&~uf$7B$CCI$jq4w#$S<7(?oxL>4zM6kC0o`NbB`cq(~!hy=0r8`)c z!2@+Ps8WEX!h;5S0$BoF?oQ4`1^AN?8bD!s0(GSsB*%E-<0k*ZP#dz+JSZy;COy@S zlCmSGI<>S5Zf zxvUnrTCsKmD>rUt>z@4@WIg%(%RKkm+Z=xV9p3oxGd`Dr@`n>=`0m(gzWwzCCr+N` z=C$kGx^hi}t6MVgUOan_s~7(ixx^nQf9H3#y8drR`TXNgdGVR&*t2sNi)YVe>5S>D znl_mRQDAMwxeOQO*^!sd#JUm|bvCen{seYR@20yjjtMnsY@X7}f{s!FtT?6!V9jk$ zWoAnP)0G!%R36g>wq^=+%@N4jyR@C>wv1)xyhipfZsp0|M7FYA$wh{dH)O7BDvWdSw zTFL!Sm+|j6%eeRX9RB@Q#*_D^^ZN_qIrZ{1et&5uUq9B%8yg#WK@`(di;LMcEtl2Z zX)GR|%nFsO)|$%H`b2t4BIqiOr6o6#aV^C%kjH4=s{;8xC8-Hy#DtL*A5F1*M+Hpf z0y!!?sNhQVlh_b{Eume72G#b%BzUz{PD@Q0vnKWla7AgQ7*t|=Qbd>*CRDjs3cS?J zzQXiWEx}!d?-tLTE=ER_cCK@1la|+{;HkVMUxTY0fw|J6TqgAOYMv_vSn8aDFclV5 z*Q)%iVXcj#Y|7{v(Jpfn=F9ju>^oY3Iv;mkqh#Zudsuies;+7(bKz)~J8wXIr}@UFsps>1t2z}t9Rb5^!*`r|VL9+V=&Z_p* z83C;a`Bw_2RH#tFl>#YsPIu;Psahyx%&uB~K z+`25+34~ofd5UYlALsIK-*NTi*W9`A4L47{$IX*3bLaH)T>kY@etmx>U%oh#Z(o_g zCr^&$z=|?YM!f+a<04Cm)1%rh7?)T zB5hl(t6XFMz4<((3G`NCI0$5AnoycyPK^LnQ;s!5b8KkIwiFS_$~0q0rsyF7u<|qm zO4IbI5jblQUEWk}Nt=MyFa=pdY-tg|YN@po1K&o@+lk&3uq<)ZN;WQTtf%p5PKXZ{?^i8G@(`gq#;hk*EkVX=Ro)n8^Ws1iK{dtvDlD`dI!2X{OE1-X71QnR!k79zdMo9)&3NQ z8<6PHk7%LSAZNMRj%IjUnG#?rgllC&sD%;!76y3R2#neoU}_68SjjP0 z!h8fMLgVrG4#2_AUVzG6;7Wm&f+&4$-^jpF0Bex6YVl$tEi|ZF3uz<7q7U`)Ce&N3 zkE%kNYVwH=aUwC=h0Ih>lH#36NfwJZ-htE<7jkp_iHUI_KR=YIqJ$gjvj_J z9Ep)Wl;$OppAjpc<4-|WA}w`gax9&Ms1V}9f=CwSlNA$7TwoyH0#2TGwm4as<7{n3 zxW6Bsj`ld)TI1>BgtxmWd1nU!SSS2_eFYNTwa{U7SPaqOu|$MMNoURw2+h*ARe{sA zv@EgMDv6AWlY3Rn$nMd!v~_5J)!92n%f(u`eiN%UZejiQovhoolRXEY){3s{{z@*5|9`GvD5PV&PSU-HHa zFY?AqFY)NEM_4w00jm}-VdBUhMh>ZC=c0K`ZW%&RWB_eBDU8TVWo%g{y+ui^8`~@p zG>_>Gxr{7~V&#}RR*kJ^c54CCn$nou7|*2oSf;fkvSf6Dz*P!!Ix?BtnaS=&0$7{J z@yNn9_AMR8!PPzNUowoR*NowX4O4h=<4g{(o5ibJ7xCeN4SaB53vch=%$F}5;D@(f z;QO~;=J8Eyn9)6gp{2zHI67c2&#af7leV0ZlZ^{54(?hJT}MYZ^!1IfvbH5YK8eDj z5)uTKgZu*UareUB%nC1i7d)+ONc8j|#@?C~S5X47k#q?#%&skAg1~n}peTt1Zzkjf z(3xmUPr5A|dz1L$Kp&^xSj084?(clGo*VBi=C98MuD+1*;{B*UyqP@r^b@@F{L=vTKncIBT((dvJAZ)M~1rJC&Cyn)A6{*(Zc@=B>1R|>K||L8s5d-GN8eN@O$0oJIl z;aa#*f!4w~GuXRxtLDK{?K`i(!iOJx$QNIIr-cSp{VV0YQXxT=gQXzr+RZ!M|M$VH zK@}oYUMmGx>Y8h^{UF!s!Q#)VRr?B*6kPqF)&%|LM-7nFz6$p#Pt^Gfmr#Eyab1Cv zN?L#T9tzM@u9gZHswDYq*B_K^P@*7Bm1{VA?oUn}Kc$8IE{S2JW(b}+eU_8Q<-G)= zPX8_dC87XJ9sBY7pZN9XBOE_^LX7=m{PN>bj{bU5&YjW>s*@*<36!1W#^pb`f9n>1 zi4k^F#)osi{Ky~QeawyDzvTY;FF5o4vz+*HHy3_+jB`Ic!gne(_|$klKRk)gpPRt5 zo9bCNHJOEDA~nG3taB4ca-p)wh9ZHiiUMnfl-SWy;mq)QPdb}?>8SS-m~auGa**dz z!PEn(7YM5_9l>%Vp#g^pc3S4CkCQ;ZSI$nS(M!-rWTqGh+d9rkfN-`iO*@&1_ z6JpZT0?FniW?Bf8SqfYUpa=}5jOCyOMBB*qUSUfx%= z>2f|Z%YrNctK4)`4X}pD`wY#qVR*hRBMNLZ=@4nlx1w3VZAi9>w&r1FwgKfidQ|4? z%WoIhDl@0P)Qoy*n<@n?tL1)FJ80oh6+&&U_GDPSzeoVhRi4xqIS8N%+{y3AOf`Mr z!HPB_Ji>sW5Pf_D^ziV~!Np||{mQBgC@eE1t3bdYQw^d5{upblP9x`d_Z5GF*LoIikJV;oqvDV{C+(mC*a5pRAzly|;qn=Y(V314@Qp;W!98L7EDfI?x+N&w1&%I>rQQ=HR--O zgvt#La21%c)*;ApFp(Degq!GSwX*!IjB$4~$K2Wo18ZaSEKD#Ga8gyrV*;Xx35>+s z%@Z4IYfOz*GP^luCZ_1==wf1ItO1tVHa1kDK?PZcSXo%&>m^|2?~ji_M7;7~WyF&b z8$eQw7ioz;ga=#71L~;dU8SeF6BlPsbhNDimJ{vmg$x~wwR)lnLQA#;6WF-d+ zOeM&Joj`U@^TBPsjj2Etdh)(Y^tj2sBdT@ zPvEMiwt?{zrZB2+0%Ipmp?l0YEzy0~zQ@?UcR#xyf06@FALj7uZ)*wezsMkbOswN0 ze_Z6ZvzIt^=>`|CKbUoN^TMB8{^JanPM_w7&%fZ~x8CNJXP)J?=MMAIp{IFV!0M@e z`&c7jHEv`#3#QLt-lRzkuPkNFq%mxoK91`6P^x2t7@8JNpWL(Y6=}?GE#kn!G3=h* z#p00_OmEI%X>SedCbckkL;(}(6Bu6~&&*+&EbA+0{>Xe5k1A$SZy`J8wFzKN;;|Jy z0$80KTr*nRetP|Q4sV#s;q@~_1hBR(;@y2~d3*0VUVmgQ??1ksuU~$OpWl6jUq5_{ zS03BVK~cWVMFn`6nG)*ehPQ(Yt~RQ`s|!{pW>}kB;O68c&%T2;c_KG6gVOw5f_=R3 zb8*Ji(gHhqz8wwp2zLHi-ip(Ojot85`3AKAHm%6aOPG8vu;EJ z2j`aY!=XuB{X`6{Z?^I0+e`W5rRmbn;>_#g`SsZjKHOQ&iz^FwN-X{7mR9llx^`Y& zH;kti4Pp1RBG&b$v9Kk9IrWLmZA@coO%lBY;WVfC(k0JqWvn-Sbp>=+=aU;1NJ(-G zX`w-+hKEq1l{CoEW)P}Gb~STQ?lhIJb3j2hOaCBM&^GLc@UCXFAh!IT0h zm6P@Sv+{iIe}u&gW^1`uDj#dh#x+`!yDI6h=aFq3dg4(I9oWy_ZJSxWc!8E{wP4y5 z=1-Z#in+5j*m~lTogCV?N4sY2(nW0ButxJpJ+^13R&qgkvEF+1MV>kMn3ij$^0U-z z!pf2Yax+r2kf8#uu3n8>cS6(A{5#}rg457oz?e96xOHVUAWXG(<)l{f3gO%-Ci^FJOh z_4<4AVQS`~0xktuYG1*Y+E;m4M~|zu1y%C;MGcq~SgG)zl83VeA9}Ks{PycnKL7L! zO%!N71gv96PV&d;Go1SUB*%{aBJwjA&Ya@bmCM|{e1)6m&T{+QDQ=wnfy=*qz_nlB z;Oeh0aN*|z9Q|T5U%$18k6)P1yHAeg%|}P_^6qvXUsK7dsfkSO@MmI2AWh|VR2EoK zUFtxdz>Uf?sw=Y>uyEGEtEJ9^VU0et4)LU>)Q*xoa}B7f%Iv5rx2L#3peoadf?P97 ziey`b_EL33kMt)zydNQ9BGQIMDcBOgiWjCxG$1<3kVq8;6Bvm~HX&A}AW54ffRdV_ zV9Sd1TpNwR7E@E1_`*y`>V92I2~f52M`-Qhy;1hBzb*mvN5>=Zsh`4 zD(|YJ(2h|8MdRf6jIVTIe6tDWeoP>JsrH0GO7qn6`IR|`w)QkkVsl}JUFE*06j z)D#%eSZ*cv!&Y>U@?zOCq|{NiU8pM&z$~$+qEN=5Y%{XcP2{&5lcauAv;o1PYIz|8 ze1vs8Jaln#8ce^6YJG~z4ag}nAT?KyxC|jq1wQeE1+oSb7&U+(Ro@|6h4pmslOO35 zCzKXzif5!Lj)4Z)`sv{iq>odGylKz?oP+w|9x?#GNL_*=jqwW=m4B|6I& zQGgXLgc_bUh`5{qls1{rHN}mIi~Lx)C7R7oWwHI)9QM3Y#QxU`*mXF8orfaXd?1vS zJN;R<*_Y*;{Y3&~To~SxL<4NDxI9Lrc0N86k#*2~Y+&^e4i0APLsG zM4JpI#8{6Y8*{vzY_PJmz+75$M_VkcZSZyVCdMz6V0T|!9n{h;HX1-F&y@lzLwy5H z)V_i$1zk#X2kYSG;wtbIkFSS^23Q&KF{DI=5a#Dha-5HVg{n?(K&~pwAaE5GZB2eb zAo=-$Bqe#$(NRo#x*u)=g$l6J(}IYL^~BmzS0GP^h)^G5<^8P8jWIFMN3D~XmyxR_ zgS$FsN3rNO=Q;?BBMp7h+F~MY{M3bMBL_vNk<>k2oQ27+*=TTHp zNM4R8uZ%3pO3E1CKAbUQ`b5SuVZua4jUGcsXD8zXZbpwA&+Pg0nK^Gh^A;~BEbp%1;IgqiGN+R{?fER}sbu|>p)BYwWnxn*6B?75 zJtCiFV=7tHQ_6~Q)hroR#+I3_Jhy2Iht^GG?~-l~t{tbz(;Fu7^xDZhyLJYzY+cCP zyI1he-Zi|te*^Cy*up!HZQ;YGcJtNCPx9Rx&vWLRPdWC<`#dg+w=O%Kc!7i)>l;MwpWw-e&p)S?N&_>PEJ& zJ&o~!OfJi0^{{HzwU@EFIhTFoYI$;69fub*^8S`CKHbyHiC1QG>&tcA{bnl{-dW1& zS7vectr?trbsWDu-^~Y)4B_Q<RcKFRX0i)%7FUJFSXEt?^8+4rM}d2xIa? z=_`t6d}#us3uCF1zR{Et!jPm8fz?vl3$y5`D55ewnRo&Htmr5zbF!(-%hqNL=BFiU z9xRn(rIwvhl3MRf8)P7E~d^hKgbh#8iTNp+JbrsVYiO(|}5qXHbx(M9m^p zVM7%nR9>rfOBS(Y)(kCIYOcVQ@+ckLzefYBd9$Z!tpX|qQ5)B-)Dqg&@eONNXfp~G zSS^}AOM|d|0$2wh+spo)+j;f5r}^x?w>6Mb`Bod3En%&+iv+lq&zYrt?urF-nKx}R z%cWI`>r3X(X4Bf0JS~v+{L=?{^OYC)@SQg_@79dT<28?#3L7f0QZp4*G159Q7^X}Z z!{UXD*tPpnF_^#6JXQ*v9*Qc_a7`fSp$ALB)q{%ee`%2Qz=QRmmes>#bOlz*i={$` z3bYhtDKFNM-%e^dSgL%3x=wkd9)Q;E|8leL2}s@NANj2NfB&mNm3qB8r)C(cR)rMR z`iQFd=vDa~1yTyA)Qmxu)UFcUl_-yv0xlIAR0-~?;=6(?B}ab~C_8n5KhIy$fb7Eg zvz$Efn+90t|2W0f3uifZ;uzOXpXC0PbKJjlf?Fp)+9ILq<|fZ^Vu>#i$&w3=xuPOv));r8zUKu4XLhhk}=MKvLbtG%blnbsVotQ zDs!N*)`O;cFDhkQ&8ExCF{4=JBMEF2*KMOw~(<)D<8OI0mM)F(Vf z-dDIIK0%)Z;f^?YeXK}ayzD0$k(y*I{mPWuTq}kbInXCiHMP#2IgMV-Yw~7pt2eV- zy_nSK!l-InhL>6}R3Nb-%bXg4xT;ir%9C{{P8>jCVt-0g2McH!h~Z)`P$iJ1LWSjy z0-=r?Se52m$Zs~)>VjovnrNOhwZu+jj6MOOD#2Y3PhUM;Ty=185Wp&{(xsq6m#kua zQu7T+R0;1wcro%%h!lcUl}Ur;2L{QHQi~q?#u?%nWq^CQF|MJeI0OhZ`5WOZ0Ob~D zq=f^W0{Y_?H~{}Jd3}VL5UmA%;g)!cLibdO@o^RcX$FL47!jT+H!ELgtSuo-UhT$&<&JXBni11Y7&h66jwz;ePBW)#x&^(ntmvC( z%Y;RCBDPFgYQv0`_RQbt!saKVc;?+gKKgMeUmqF9=RY>{-j}r;d@Yw{d&22n>PGQ6 z3*v{F2uzvbU15cPr8}WD{zSI~5ZdH|N2xIpjkcr?7l>)HCcW93l3`9XcX=~vY&bK< zCo!v6tXu)C#T}{il?G6mY(q@&Ai`Yx5$7;~6q~`s8q2<++!s+Uo(^`{IM`z8>WHJ^k^ucfrNh0UImp{{gCpo+~Blm;x{5wNl4ba(ifSD6ugy_!Xy(lbHMO7jGmN<@)3 z<6&=xr;`~zu4d$Cgfe+-3kzp;i*&JUK@T$~HfxFQqdQ9IX(?jtuyRI=!mKY%roAqQ zwpx)PxeTq%)xw3TvHoNv1XGe5uZ0Z@GGj&g#8Q$PD^^Jyr6O4|ArvG>QJR%Nc9JNl z>_n=HGo*7QY6<(*Rk<{`lnYeW)7Vf+b8`cudU|MTXr!dLm|<;g%$PQv88c=uOJHWr znl&sGMKWjJ95$({>8n?=aqDK*Z{Ea)t=rhTYY)%A@;a}*^FD8U@FAanF9Z0gGaNa6 zPQd;mXT(ZYfc5KtaCP$14SqfO2j74D9p8TP36E^t#QdphHr7fFs3!IF(o$6|mQ)pE zIz}*6;BIp7C`L3k(O6N*?D2hUUo?m5Lu*(!v6E#zjSNi-rXxF?&YTcBGW{7-9L~J9 zTsBQ=WaH#UxvqgV6B=1QwvHvEtC=}GpGiYAnLDzW#k~~*SOO{IhiI_1VQLG9H&5sJ ztuqC%MhRR^}~5&+vS*KHO^ zQr@Y<2M=i8s>cPm4j*`wLwg=!+3cCL)m4iDBydz(q?Lu3H)D#Hz^=Sh(%%RFP5T zu~O>|sswlC!MZJAq!QiL@jC)fD$!l7NqGNn*_LZmev}H~DR@%Qq#)~IqPse#&MPmJ zD&3&AA0AVnrMy=UAxeb=6={4_W3-EfOIT7AbHg$AY8{2p@=V=m1UpLI)BQG=T6> z9in3lNlY;(CBs^qSQnFGOsoQ|1Yrb$v+yXj?xDbyK$$9;5D`9@$S7T6V)aRpYYNg$ zsL!{dyTX|%Lp+$@;>*g8Kvs7Kv!*A66}`dC>+oT6vlF9g1hNWkXq3;XNie1?R`@5n zKRJ>8$cgGlUaUw$e*sYg>PoB_QemToN9$BE(h_Tm^2{`^U1p{s>6yl)s&J+NR=9kZ z0QoJRzJqagABerPwH$Mz(@hCaG!)2^ zHxRh;i#Ne7!Vs4bV_ZTla11oZ-tPfuIR`7i(i6BEj8~8j{=o)>2!w@&TM-y(g;%r{ zeyR2ZrQ6|`WP(};F(OxPZmAI&weq>ugDGq`p=GigLnheMFxj3F0$W38IMOu5lBUTP z)Q&Nvv~v(e9Rn!u(xJLnpW0D+)Q!<)$e6(l89jj7(fz6K>qqO9{&ddPVd^?()*g)I z(7UC)`l$@aA69VatwJ_GmB{o>{GYkZ#G*6YW z33`KdwARwhTp&vydmCG=br(2GPE5qx!vjA7tmu#+0=-=c5A+~D!UrEWQ~bP5DJ%$) zjub*#iYwV!KJwt&6CNQCXLkiv)k$J?IO6YbO+tbXAtBCKm=D6;Ri1^oApCrs<+JrL z&>4h35hYPXT3k5x=6cwh>EdKMN^ZLNPS@%g(;DgWyR4@mPJiTIu%8!46QGsrZP(gkTmK9 z);dPi(%aiiOLG+?MA1!}Fpl1loeUq=%2-iYQ>IK5nZ%++^I5%SCEK=dVekIkJpSb4 z?3VUvvFx9D{y7dEex8?Je~S-4|B6q){*E7iJI+ZNSdYp8`t$Kq{H7pF2KsM)IjV&V zzxw%@K>lyM^5(ldx_b|smoH;fTPw}wWsDoq!F*AWi>6JLV?(JgDWpPb5&fW*%!MFCV=hH)v@Wpfc`24y3eE-@r{PfoIocQEzuKfBP z_f8+<$}ivX_G5ebK4_tP7`2Gh#=A1O(4pZ`jlAOvXZHd z4WlYDn9*f9bd@Agm*U6hiX=9TZ|1f2Gx_!D&76K=2S*O9;Md0%aq94Ljz70Rj?dta z*B5f@%k^CUaxF(*oW-9Xt>Nr@i}>_fv5NHW&gG3A1NT zVa=+g+LAacmoH-L=CwS!Zx@g3+`@i=-lv~@j3*B4)#R~#kFa<54gs%iJSyiN*(%Vr zc>{ap*?)BVR-SouuLf93R7i5_n9=fGhtee9si9J!Z-hKo^XF(~7goz>tysK(r3>dW zW72pQ3*bE~|NAdL`G7A!exIMd`-*SB_=GQH`;C{M7ub57ryhTl=bn9*mtTF8SKoM- z=U#Y~M;|}L3opN+Kr94`yy?pVq=AC*FI((4e{vZin46dWUbL{9TPRr+> zKYvla`=0_>=LEoxbM)8mIr9D2oD|49cjQOT|N1q*fBiP6zj~1i-#y2P&-d`@tBZN} z#p!(d#$4WeaWV%s*Ry6;8q24~vvhJSlSlZ`E?`lSXGFC?LWx#QUCjV=5@52SQ1ebX zis3ClCBT%Mql$srQd;gT;zXWYlbLHFU?s-|!g6IlOSW_KY~;P{DbBGYKhv75RC8@D z!lVR465>S?jKq*PAvV#Nh&VCQBXtC<2H+Fi55J)P_y_bSBxs<3n7~tl=o5jg*km&z z6BUX$ww=Mhp_5($hR%!GQw_@as=t-~eHZ0mMcPmVRMCZH@(+3|l)OAcva$v2(sfBl(IGBb<)^FqSGxEF4aUWD0CrCO zu(Ihdy0bt18tMg18jPr{GoiT3jNDRlGK(xom48B_iif5PWF-kCC72KxXN*s*F|OeT zIENVG7;J`}zbV$tISinl(W}s{U&2 z!#q*O#TLX?T9Q<6O?;gxX#y^V-Ol8UupzHg{>fupC>m)?;Rp+gM#v9rGbE!~M`RE= zZMx(S7lQ53BY&8Ju0a$JA3#xCKZ-?ay86>P!GPX*j?CB)%K9g=c;xvq9ywgj#wYVx zup^mK%R;D{A!rDeKp?ogI|O%kFYZ!Y3WW+TMN5r(-}kv| zPhY-!?;q>8$Ii?-b7uB>mh81D?BhwdL7~(Sj}_${Ky7Dl#??kKvU?EKrH+)!xpHD# zkSGsDx_b)}Y?~5lB7cmd1-=fJ0z`IL+gW2JfaK}vL!h@mF3NjlZjGIt0FALBI=Zbe zGStUVR|lP@O)%Bh!@|%2W1Uu58yln7LjGg{s_5_#?5xbR{Gz1zVEp~;aC0#h;4;L` zQ6Skv=sUrk+!Sw;q8;UBa3&(mmegc#x>w~B8fK4^tDyjwE1{uYI67KlW@do9n|wD9 zXDrQ4(A8;4%jV5+uy-IOF&RJkzAc&v0P30G>g0}>o2Psie}a5{a1?+vH!{H0-WpG5 zTY)@Fd_8Ol_K|xRA0&`fz~FwJ=+~z^y-QpA_328l>PiOmt!C7)0SxTln~_87MM+H& z;T)hY~lEkBYb}D9H&m5=HlgR-1y=yx9;5I{-dY7|5-X}>0n>I{hqJB zdBC+Bx43lW3I{$r#QyyUI4PgmxM?%vM~`M?T_1*YucSy|qF1T>*76c*3z#{40MiEd zrF(G>mDwrUzM<8f7}cvQHF8X4Rw84&m$P7S6*Ftg*)XX;le!mCn-?Y$LZ2Kpv2%o2 zbn#5@Q^@3=`O?X>VcDoER*mn?ig7&!uv#;^Dx2B$Lrw<0+Lh9?yo~bP9Q>Uf@o{h8MlFm>u==FOYK{CRVkJ7+cx4GUPh zWHHN^EoJ?>wX9yXl67lWvw717fvvS#9@gF++t|KwJv%mTVDGlgtXZ;%c~hq_YvKeZ z$+^c48O)HrwIVePlk1KgG>}>QZ@Ba2*Bm@@ob5aJa`N;!E?m6IT`^RC zP+lQ{oOjA2^xI$B6gYnfkUwkjy9zXq#hmW80Tov_{^eDsge$=Zq{Gg z=YRh22S5Gt8}EPoz>jiH1x!Eu_=}dbuF5a`^5NflTFR64Z_!be-2RHU0!IqE)c&8O z{Z-EY&)@&$SG7$mIQoY`)KC8#S@=?Gm3K>R`(DobgFv4u>!7wN$a?qcy~qz*(b4CR zpYrDUGv0jnn0GH8^7iRhe0}{Kk8hvn?fpwUxp|Q5$5wFZz+BGmnZ$uj{n@#unmrqO zuxDcr)-P(u^pS}SuL)#GAAf4PdWex}LVA2t3UkfK6G%!+Hz7xWDl5;NG|~5&1r}uG zTa%t`MRJA(DFRPfxi;j99#mkZCVx&&H4vzh^QeNM8D?ZgX-rGn#x$iYt}&GM~=xv^AoAYhy|ZjWxi^$Y?=IYGZ-HPYGA0DAa(!Pz8=HaQ0}9wS8kOt(#(Q-V_tV zPifS(vbiW8eL7Sc(x%Lif;J`sStcZ8s4DLIgbP)M#TpS5Wr(-Hm1|fl977afwZhU{ z2U8DSOa-n?1+t8t|4n4K^VPxGza_2#P4Nu=lz@n)1PW#P%T4!7G8Vux!8_FmU-{#L z3M~jIHYcRal86pAnnViBL|0l7+tG%&PBtWTv?8g48Hp7}#I@5Sy0jHhr7ehV*MgYx z7R0u1F58=uRNjQt_Kis`6__dglr~*;s2X5P&tcB=o#4yJ`Eg8Ip2@`J*$kPRM2`s( zw5gYlt&=r@t&MQa*2N)HA7^<8JqvB|ZsUkssWqSXX00 zoz3xcvBAmK3Ku6wf&)Ud!kvyb&e+>J;UFEfiG>OJhPpz-dKl{oXtipNg`O@ZE#%KM z5YQ6%vNqNyS}c5TS6e(>thG^p**ReZhuPrZ&>S1f#`wC)1M6>2a<~(jaUR43%6%3| zigu?>aV)tx;W)eKV{30fSh&Byo;xNc##mch$+ChiM@&u4Ffua6!orfkK(V%?V{vtH zr$zHt=;#PQd-&qz2xVfpiOcBh4DUgElHqf`z$)ONuf)-OuBc>rK`YO4*{z7?b0bMh!KgRbH_|NSEkdo zLl!mN+t9O1i9lbWKwlTe4Xb5n{~pYoFp}k}!nG)?4a=8v;=pI@-n^BIr_OTw@^!wv zahtEdy3e-{o@ljoo;-aa7Oz;QuikL&$~6w`-78l4CN_(*YM4EXWeeuAY4vg@j2uR< z%68NWNc1bsrXo3v?zsunx67wzK?-98o@UioQ&*Zp@1j%&l;vv24eMB1rb_;`}CSNOz56Z!_ZD_m{P~;iG64o z(TN!X67vREFtdLt6MGe~c2bSNRcDrr?7;y6syz$G3TTbz$ch;pUM^rI?I8iHLu(rZ zj+S$3>uSz!Tg&$$LV0~dF1;>N+9+&Q+N2j@=k=GJ9Cd~=)silX@Mi%0x)?>6th zzK3WT`0*|8?%(Cg@dKhHRH(L$AGovq?#`k1?zqX9;oXODA1Oe4} zdgUh3F*TVE$thIj=2DrSPC--@ecF{VvQIB6ap5IWwj*pnp9B2MuNT z@KFpNJdAM@CNX8|41uf$mM&Y#%2jJwxpIv_)kd~%-NA;9TiCLBJKMHyXVZoa?A*SU zJ-c?WO`hT10$Tfb?_|yLrOcW#nK@IZGJpDXW=xvMwDIFvAj>NkE@VZ6oV#HmE9JP= zE0;5K`c!7ln8wyE8#Q=aEKqmq_)(4?ImFu4E7`SUD+fQ@C+#*aeSV&=zy5|xm#=D& z_3+UXzP$St*RJ2<>u>IJ z?tb$vPo96r8+jjpR#Q&>{SO}mu73LUH%-)ZR6h%F{qV~NF^YZ>X#1eS+V2X!R7G|< z|Ifets)Yyt_)~=nRn4pK`R=6}b@+}SfBJ>@0&j2BKotS4KmYiLpMUy|AAb=2*+I#>6MIBq^yC=~_;fz*(jh`3k_)Xv7>-ac~P>0(TIg)wc)<%YGEo1U#Ixf=*z84?m>B#@+!myo3bEGGq60WAfv5CX* z;-ZI{tBzKV!CobM`3rGs^Yb*uKdPAkmH~k&rue6r;gf2DuTZjIo&{d=C;Jqb<6mq^ zKx=b>9dm-)2z0fvAXFqmmZRD#F(JIQe4k=H0);aD3!CFx&=mh7A>iVsgcN>CSivVm z6?{rk>*f@7Hl=f&6CLXu=sd`k-eUvkJ2`^h<0I%W*pIwkt|WD`BdVPhK_%vRPqw!nDeeM`9#(|7+TdX)zuVJ;*sv%< zyn}GFbHmlq4F`c5BU3{R4CTI>7-FoigJJWgSW0Wqv*UEpMELUg!~ zJn&J(C%WVArHiFyBdpB@a=qjw3b&Vz*+V*Y7eYKti1ZbUCC-Pk)``T&dgA1)hliI9 z{sGe@GgoKD<_}})HEp-IIoaM!lZQcO{2S?*0 zzr)JX9zA_yoE%+nb9TkeQC02sCM>|0Pz726UWEC&X$48MlOkxH8z;ZRL4&N4+$h== z#nYucg8@C-YPncF+GkTOz%;nJlrgm(8Q)J}tEQ6r&P7zWPNGwOB;88l=u??Vb^CNG z+a%MuTp+4_ra)u@-MSRet!qB*N|PyXn@CO1B6?RB(5HJLb-hXjhD*f)?<(+9%kTl+ z8Cut!#j_`~Y2{)T&zjD*wQD)LXDPNo&?lt#B zA)GyZnq`X?F=FsQk$NVN8OfBwSICxR*de+ zfHneYrHKq`mrU=1NV;bQ%eiux(YK7z(vIzxM}vH3>#Tw7Sv;I&W2=}wu#BZ+s+e9^ z%Ayh7SUJ8%q?Y|lCbFktya3ijj;x$1a5Yof*&JE3kfR#}Hn*+g?5<6m-?N3!_iW|T z-fdjjw}YF9_VVTNgFHNcidQ!-@%`;<{Qlr80j$UT@#H>#ym-n#?_TrI_aZ;M=GQl( zOdj6h%gZOYa&ixkzc|aAdl%(ATe!Ms4X-aB;loWi*Xiv%IIx&6HcjN=f%*J;V;ety zzLsBaZso`88+m_e6+c~F!`si7@bu(dZtWbyv8BB@vZM#sw~yfKeNzRn25@LrZ+1`V z#-6F&**LnAnevTN}x&sRIrN+$)O>1E-9vM zW(MK%-iNxok*>-v$o3!?XQF+5=uzH|PI63)EayZ;(mEwYgRFvt1ajhHDb2~EvZP2W z)1ao#>Cn1}F+&E@wlJ5{l6>j~u$C;G&xqk87&B%94U3jCe}032)k2mmSuP;8Sp%wF zyZ5ql*IqUYRIOXLk=3i$vvK2AS>MX`ZQI$tb+c?202A4{b2|t2?P1&IjjUX{goZhD zSTJiAD;Fdj!fhOS^EvJeDq5#7+UJU2?9y zdvh)V5Jbc9U8@IH5w{L%` zt*h+{e0~ua`s<(n@ZFoY+`RK8x9@$$vzM)4vH0V;bw3L_X4{iP3_djTGr9e!rzkT;!1F|1~{Gd(v{O0w04Y2>Q1fnPwvk(>7jfzEG>&c`%<2VY%$=OYvbkzBX*she<%*#g zA%>*~1g7|n{(G3Qqitbe-_hQm?iOy(6OopCTpDHfhlK3 zt&}v;RdLNpOl+Y|;GC0XPJWIJ1$j2)=BW_l{|i`I0$5qe`T|#aWF;Ah7?KvRN0J)# zCorO>o(TyQxDp8tYb1d336YV_w2^oT@g^iCnvOoyxNk!odw3qtVJi8 zlAW$ApxRvYWmBSsX;l7LP_Q1peg?R@>T7UiW2uXcnJ(5wIyCCv!+^Tpru3{9n(J&v zc_(WM%PdLFQS~ScwD|~wqV(|-xbg^Xg_{7Db4W|<{99lpfMq6tW#XnMVjzGepyi^A zxvN~nqXkaB&2bB8ibqgmJcV9;gfji*2KptN<0GKuBNXi`f0jpv32qt2cw`#knPrG~ zwjn+uz9RlYjY|Afn`exFzDS`lzWH)oo*tgLtpto(;GW%7KKm)YIr3)|$OF?_pS1R7 z6m+wpNN!T=-ZoV9ccF5S7v%%IDD3S_Y8P8#$}I)XcBE;7TFIP)>@MDFr0|k@}u(kgb zJG;g>+BU|=r4{iZHe|(lkQCxTu$!>}mK8}+^1bt-@t0*6HzTnid<2qQ1x%=Sh6^NsQ8#dOCvd<7RQwtny z?B)9jO!@oa=i!E5C5)*+kx^bjfpoazO9x|F6- znjJ-lf_MgWEn#@Cawhig%G^)pReRoAO|F z%A;@ZGJ5xHO}9=t0#!wf7}SwLb?pR(i^Sru&_*}TnL3myV+SyDU{5BE8o;u7)7h}R zfi0_+Yn9Uvh~oHc`%casKEdhFj&keLb?ysX-Tm?&XV0GF@S!7YSht?pGo~|U_%MOE z?)0kaqSX?r>rq9ms+irY8@)Qpbvu<(RgfZ}R6xD*#O20Ql@%>emCLl+P8wt>z#3as zszFwbfS5XVT=z1@RJD<|nCSve4a2&zc%;C>w0b!%U6YYra<#TkVYJpx>C;xumrGsS z1UX*;YbV!oaQS#vPwLIg0cEV3*qa7{s|CZluzXw%t0(s3z|zU=UO1jz3&zW_(>b(! z7Drdj(Qzbm27b zZeQo0=MVYoxtiufK|2N&0m;NqH*Tv$1rt>ZguaJ9I9F*AE)GpDAIY29-fQ<=)B%5(D#q4{kvC*;ZdrUYUt9gRG!)X^y}4~iDO1+BltF~T}8u! zh0L8ZkKMcW2q5iZ|%#J%lr23)0Wq+S;x|)%UH8|t$@)M0j#ZT z6);-9Yzgz`&eq^Zd9>E8UP;5;IV_$#kJZbTv47`I&L2C$h2!#096HRY!-qL}^e9IU zALP)X1DrX1QqHx7F{4Lnkahgn5k5Pxk5ea)vtNL%rlvQ=MFsS)>&xufGg-NEg$95} z<($fEr2y;M^Y3{1N>x*TDNyxTgDkcG^qF(q`|2BQ`;P)d9|WZCKYqgbOP9HN^A=CP zd#S;d@?I&3Qr;>BSV}$umRkNrpz0@qF9k{prW9Z)h*F}K6-cRlNa|H z42Yq9nIrkR=Cm$xAV1HR*f?DxV{}LmfQd;^`n?6ANdgK`) zJ9r znqHQ4?Pf_uXDfTZL?xO<>fj@A8gMK8JyZqVCiH5i& z>EkBiE;rUg#4|-7ZxPkH2obx;O?S_b&*WH$$meoQa1|hP%GAXnO}5pxTDQ9`@v_{He~i#J9I3Qq{LAFvKlg2U`Iw+aw(vjE zF}KCi%tGKw9|K(lSaN^$T4B(vF~%*LVA@jJ=AU5FstJDXR(Lwg?+tdMRDdKg(F;cx z`7QRJ;O5#KccFbxN1^TzTe9OkiS@St*pb8tH}bQ>adR=k%S&FW_#m8J>@~o0 zadE@b(;IVhOIoyOrM1d?rMyUY58p{Q+RyE9F z@0JbPD5}r4Z{x)N10374pU<}K=J@_YoR?1i@WDgW52&YF6z=E|!x=KLUar-X`r6*q z_3Wl4Gq)?qrfqH-o!jKmtyC0B*K)e%DQGIxU}|8y9J*#i(XVy7fKz8icWon`L6!zo z%A2K@m3M2ZzyL}?wZ7Mcp6z6N2_rk_YT>*EL%XqY`amWMKo0GY$)w(@+(H?{1ty19 zW-+N(8>ZC!yFQ|G4%7OzW3@om_65UOGNwDT23D|gVh!sB(iX}2)=aMF@XDF&X^;+F zofKfix3?f3lk>q~xm`z^nU{Q2%1 z{(N~?fb2Fu-aXIzTPJyNY!lxcT*4psKI50G+xY#~PX4^Jn>XiH@%-dc-dY#P^zJ=1!!a%el|)fO_VcOFx^WihlYmJ#if znbNa}k)5*XR}x1}Q5-$R_~?`xM(0d{qWnDC2wbHE1dtmQMela)sBY7Sq@W;qr~HTs z@Fg=YhMdHBEj$?R=S`5O8v(A)nx`tj$x-uSRmgcd6y$53E48kGN&%LFtN6eGQp3Zj zZdXQmUasb`DoIMBG(DYC0j+KVS2Z0g>D8e^n{c^SKuZ-F?cA=dJo5vXJa(iud9o^% zuz0~-RxDY<^r=(Wwq=Vz)=oBW+{l(qn^?DIjlkCeX3dzv+}X2Oy>g{?%*OTWSt-!8 zdc_J>uUf&1&TwH9NV{_ z^8#9TZrozaGd&T=7e$qsR=afh5)akQq-Xibg$LWBM&{C}eDg{^{evxabyepM|CF|O;Ds(8vy?*hQS5IH@^y^1F z5nz1%=mD=D-s9E%FL?LxIuCCi=F!aq{Ql+=f4)7>i`!edcYY~1PtM`Uw!y4hTq%83 zDRZafGon6*fwd9T_lu&ky}Ookm6vBtmH<(Fk{Ho3x`aiGZjaU>IHoy4u}!r$Sgl8j z?u;-ZB;1hj2m_*H^@)mUNqG3DL>6yl)W*L#DM%SeaB&8Z? zfTclJikikoM0vHOO;>qb2?BRw~puvJ^Y-71#AUmY+7P%(+n%iPXuT_!CxRN%Ka0v1Dn$!&WLXEJNt>%J}M}1pc2DGzbP&;czRM<1An+x;%2v`jYVAH4|c1#Ro$CN16 zj}K$v&=98f4`X;$2vx}$rbIveV0Z0X+Jit-L-cVeGy%pmB<6&B4Fn(;vvNC8motWw208Dvp|_^nx%*Zj;W^DB^zNce~`UE zm~CP!?1aRf(*)8o47KoMWLs+z$}LE4Z%k6TG4W+aM7J>{v{;{@d_4m54G1n0$SO3) zC(j($3}YPRd=5h1E}0fMX|QF4hYDpDSqW&_;NRAP(DtrGck&=!U@KmrE2Gkt!mb_^ zb#SCe{=ClJ+~pw;W<>8``GewEF)E+AJrfv~>q)<4XF5mNQXqd!j;|GQE@nhKTM2>t zkmlz{h=7W#sU?=iYHA!~ObraRWN`~4ear>2j9WIru-PZ*HED#Cr2w#}1)i>kq$GG# zQW!-_il^qXbaHNtyIV5>tY&yQx0KG`o#M0rqP)$;S~e#;-dFQr73D_ZCBPNnZ%0Uo z2S#!&6BGGg1hAC1%FxgleSJfLCnucMlrsWU3b53&@@#o~`~DBS9PsrG#@xaIomNJe z7+YdsV4~$<`FMD1MI?Pxg?TSGf<#n^Fv8!1Ko5HXJwMvyCD2BIs$Fpcg#vvl7pr|q zB9$dcbZeJES5>Y-0IQowr~CxE6(!TNZ3ex{G8t4|#Dt-h%oyLD>Eo-JHlY{OC)P4~ zTn$qv)G={%9}TjG4^;V8)l3~*!}PIzSv+$X%jS+^#li_9lbJDbu6 zbCSqpmd>52g$cK;TFE|9MDwOhW&V`utXsB%d9!9QZuDr{6ctmDlg)@BgXLOvRCn&k z(7GD>Rd=RSn*utw&Zm33Vyemt=}?#f+D? zU;%UMvYFI9fiaz;1e{Ws+c%%7-7}b5Qy^_76FR3eyj?saD^eKWHCw99^iWzyds7_gLs?QZT?C9uQ&NfX@}eLvj@pU}+N7qEDu9#}5==&Pq!uDna|Q;w zsS&{*L})MW*ft}B(yUAhQ{2s?(=VaOJY>zk7!}w{K}4u6=uV3yfXi+i$+&%9Se|K5~pro40C^ zrMy(%eEUH2VySST0v*-f`r;19Pn^;K>*A#=8gxB=`kbRDPqAs+cJ?1S%#GW3c=7s; z=Dqp|SRXxEzy2v;B;cj;u++Z4{-dVX`4^s4I8cGp$8e#NkKQZ=UTRq#qX6pn{beUJ6`2yZ@AL zzqrdcw{8h!-Qn5SxA^Xx8@zdVh5Oe&&) zRKSGcNlX}#!sx+?4D1_8-`-(#>+DZSi6a?VmL!Y5OiVE)D&CO5@RkHdwh(ECf4Hi1 zDTaDT3;cp~@C($%FF+Unz*YnVHYF(dQ_%~J1hPIMHl~?CmcD?J0SPMHn4+UeLW(ZQ z0#KwmBtPW)!3w z3S_k+GpRXgiA_k1`IK1sZc*}GLOc!du{RW*r;oMhJqxQAn3;ZxwMiq~tQrwu+lU1B z#uSFMq%6DzUE}rXE#Ov{ZOLE(t6{Aj8PwX2!R_oA(!rLIm3B<%;>_$`UM%Y$z{X*r z>>L-#foXAkHY-^SvIJHPk6~VYg7n8R^vw68D9%F+AWt!bMF0Ca65(S{sHZIfE|&7$ zObK?iCtcp-%A7*#JM^T{h#FG{*ILo9mkr&zSkR`en$22==)`71v`w@Gb^i#t@!_rT zl0U%NzbTGBO|bE3jG1dwjGdYbShW<1E3|4S4~(M_t6hQ|llZUL$)E3>Vu){!sWz`*m{uUP6+yWz3CM1VUsfyp zvgJX_F~C31gn)c=dQT*<@enPsJ|yUGLLmTM_uj*C19?)g^ul-Lu{)``$E zXQIoUh%0v_smzYlQfqBe=n^60u3g;e+r@|RH8Ct4oIyihQHEV&8Q(U9x^#C+gUreE zGbhna;LqEdyl@}V{iU%s`KGkl4Ub08og40fP2JCcNO zSBkQt2=K8cJj_*K$zGsHPs_DZfTg@u%1fm{ON9h2Ev?bjl?U9~R@-K0=O8a$q~`H* zmgB?1W3;*D4D`)#uy>Ugz!C>LM_e47u{Kw809xVe?o5!kJ5fR2a{U0}!~Dof3>UCU z(ekg97fa<}buL$VR4G&xB~VqKNtZSPS8dbjTAWPxHfap#RHOk`yNn32>=PK!JD-t* z+6hc`73ESZU{xcEqZf-7jAhZhv7&qiF>m@%=1m<$!;E3fpFD`g(}&R@GI#P&W=$F@ za5jiZqxv&>bUo9@4P(maA&ef>kG|czP~WSEK-m~(O_<2=x&aL8Q^TTJvl%^f2wf`M z)46>)<)ZZZ_3j~HRZC5`&J3>UDW9vNuBszF%3IU5O&+~Fw4t_RTY9w3p>Jsx^#W8X zJgD-l6jUj1*2vB!)C+j2ZK_ppHL+*={{yWFJ4cm^q*$Q~I`NM*m9Y4(Z11L7kc2zk=CAJF{wP9Xl6~Wk-W3;YoEg zjPA|01>@KEXBh{`4t7ie>)m%jf+5{Y!p&{e++1J>|ns zFZk<^xBUILz}BDN@%yh&`TeH{{PoKte*W$Y-aoj^o4coZbMp}IukPmO8@qUUZk@<_ zo}OI6cW2k~^wcUo-!y^EldA--N|;cUO#jxwRA;Gb>t15DhBCHGDq}jP3Q#36pd^yf z9n-0A6G!j72nLs@QCFHuWm+($@qXk-c#;|9LP3-d?NY*NBY>3~9Yv}tyWsDy0gnpX zMSFW|aHYIZc?t2vs$_Kms&HR#&5IQm5=4lAl>#UQSjw_(ot~zFlnM{3^~B&HV*Gry z94^fhCd&h=y3tNxJwHC4oaktZlaeSB$&>ZMz;yXZjT8&78)P1#?+2XNKnOQpc+*>H}(fYvIM^4fEN! zb`?tn$`oMDnJvbJz?K?ms1nz=Y*^3k?b}4Q^4ad)+B8mQj~&xyAh>+`3>VLcoIA@! z0V?IGIwH@73JEIEx_0$4x4*cl!PR4V7VdxhwFX&A)V7;9uXFY4H7&>L?70iv;f5A2R?BU}lIC18z23QZCJmuxvcUnkL zrJQWuFn3MG}2kUoL zqf6lG*AIVau=Ts_fBD@z9^8MT$c&}~-95|8uTJse?jfFjv5N;+*Yn+-t-Seat5%|6=W2QHrxr7OKs18| ztcKLZP}?(vs?NTm(*tPN&V$TsG2|1Bh!KfOFeW5QUvzs*e1aR}8PEu?fKTue-R|wz z9B<#2c>Bmc?`EP8KEYr1hY4IoMt(|6bW;-J1*k;7Mn|_GLe?W=n-UY>TuX#cPSg=F z(hMwfR-4IV(W9-chML+3dX4wi8qsEvUG{VVDeuG6LA{{;I975x+*O-f({3BEyE#3{zpi!gbYL!BMT3=OAC zZV5v=)iSgHWE$018Pd1PRIF`ND$0#0D%K}W;3F!bDS=T!w&9|r!^p&VEgB z@Dgft{RDI8#uzy?Mc<|wde+U+wQhl-tu7`G`XT~ab^=oNEwOfNg{3HXQ>PY~y0*m3 zttI9nmR_pVgC4g2s>-_nmcM{hppsT10$U#?NY+F21hNcpj1u@!Uaxo&X`SLsM2v8X z7x)s;a!NGDDM3IaRu`KX9oZ&uktUMf4A0aic%(MPGfjXoOP|m}b7IOINa)~BVh6D@ z%cWy%<4SOmqd=EHPPzbBhBt6q*uK zVos6(R%&Z=a^?ITOPv|qJ&bYHk@9E8GOuSMGrPtxxWJ2!F%}dA8I$H~NP3_Z`7vIk z1vnFGZ;q#hDYk~jIG9^vZDg#?@n>bEhn<-|1}z$EfMuflDPGP-B*(eYrYM5;r7pk^otLqCXV{@x+I?kew7vXrKdeF}}FF zTg$^MztP@7#7WD$QeG;RbLHgZqJfw4U@5Rt`B}=VrNW5T*4DVWdJ5QuVqxhZaOFl| zKp0k*b{HC(;4H^^ySd|RYlD-GrF?IDA_KjM7RUlvqz|Uq=R&9?Q2K8$r3Z@_PW(^h# zy}wxN<5;tNCaafDW9^b@te8KZ)e9!Fa?V(mO&=kkHI%8N`%4E^&9MGe(n)n?Xus}a z5e|^HCj0oJTxtQByaHAvucP#4+Oj;Z}Ruy90A77KW(?bG^q zWX_N(<__!5f{{H~HnA^DC-i0g>|yL#Hi^x1N6WSWY?Ax3P43U(bqm&z+cUO3NPQLuN;p5?30&-3`& zHQwJBz!K&6^OHxsfBKN0Uq9oQchC9t=a-_GUh~&q-}Bes??hzz&li07;alDdT>bLy z9>0k^zjcgvUmoX|FAwvR0M>UGHt@r>9lX1|jfclqa(U}?_RJf=>akr}G^CvIRp|_r z_o08AC~Aws=#djZue@Lew22jCrxS~ZcVmo}rT zm^5Z2gX(HD89QQ#=J^^kY>?*tQq$)woHvIB^XJmAaK6^6JglXQ7qNczDz zIC=Om2lwpZvzD6WmGV-ZKX-I@WSVpICAv3R^mi?wN$uJ(%%2d){jvEEO*N|ALkB zV13NNQsKfkvi#liH@tfGlGl%)@!kCgBH!@(;a37yDl~YOC$~@W^4@XY-9OHgo7=f_ zdMVFtZQ#SR{XDv{f|GkjuyI*8=1wVOH@S;e@aA@>{oeOam`2(*vb~bD#|dVBukhgQ=ftiUGhW^=cb7sN^dFaIurc!--zBFvilt5OYf%OpKdg zrr!uh<3{*e%J+0`MqZ#k6|rV?Ni?M@#hg9@mqXe*Gq#g26T1CdbaZ?V7bf>~V`eWe z4X{=X3TMm67CBkXHfe-Ozks{r6ZQmsHW18stQ9Y+UipxaFtgefR(97Y_blavC4(m z9G}o;cmy`VJ)kLW{>=oi8e`?s2s5WoFtTrquGOcsF#l8_t0jhZx)|E%h_u37K+DF( z5GyAGOa-(IYz4+_n_%qN97}gyEIo9waBqc`rw+EhdN>6c;SwT%6l5S^r7K`1+XDp9 z0-9qRET9#tgF~1uP7!)IN9yAeZHQ}(AucfnI7jOVeCgs4t&5|;m4k@A0G54B3!LMd zN&5-*(L%cMO>hx<_Da_$s;vV#)xnh2CsH-Gfa>ujln>4zzqj19j===wJK~dLi)*?$ z&LXZkR@i2kVwqxueYORTIp)|1fjej+Ndqk@KU{+>k$gkqt0h4`y3)xTlAq*7 zS!M`Hfp(Nu}v)R0Q0c!;qR?VBt+WAx1&@i3V z(oPvQKme_iHga(Muzrji+K;hA`Z2h+yFiwj|8FpJCQo4I_;C#E-AnUijTtnMQS}4l zx_#t(cG4#19i(8aPj|)+>c{B*y&2S_6C-M>m@#4ilLqysS6Lo4?F$&yyOREuMbx(W z*F&WsO7l$hDA&AJ3b+(pDVS1;?MlXUYs;Ma?iysNb7J3KP6=RLIC7XPCr)tn z#BnYw$U1w5uPJFZqUdQ88 z>-hHQN{%ca&EjE|%pFk1w3-s?+QiT!HBQ>EZZL6q`TDUX)grr=6#SD>ZfOSKBNs!H3+ zdIYJVp`?ps2xO&(glN}N`@5(__ikOZD(JkzF#QO8}qa)pNvA8Q^e1yq->T;tmHn_8>#t`u}Buaz3j zrvOKJl+<)K%A0ld#x0J?_t>rSB&9w3`9%%5?tJw%_rATa!InyVSCIAYhaa^P4j<*U zK-=>duSC8RkW%v*{-}YRx{d-bRjNUOmI6DqP3>3LR&aOk-Zxsg7Uii@fc4=QmDK*T zK;sJyt{y*nu7wFdh6oj8sSu$8vKP-@^X|oKet!8@p7)m`&-vlG{JuwD^7`AWyuJT9 zAD&<2$44jm_VPwOmveKkpfvEku3=b5wP-YBygn)cs9k?w}prRjbC$uf|?Q@ z-c$pu=$KChu$q#bs3(x6Pi$-}qGFqCkQJ^*1BQPh`lk^|(T&Lwekjfqz{=1QLqL}{ zS<+_f3T(BcIJ*^PdHS@?(WiB`fowOXIM)yW-`k(%{XcRx9j$1(JeH za1JrSQ2@a{SRgA@AKMTCvA`Bs$@Oi6ZiY>&v4|1Y$p#{N*vZ3WpU@nqgr<0>$U`X)QAk<~ zLKEeF%00|ax1ddm8MVc(Gz>^#L2WFPy9NnS2hua&fr<

BJpK3suSP0z~$$1h5Qo zGnac|Di4&gK9>3|G1F;+sh-?-lcoYJpW@+aKyFqb?aN}N;}okl+mF^Vtc;9Iv|KE;O~IEc)1X3yYQMRKB@Xt^*xEYLvZbMjKBfX%HmVkuxwIp zOXje7Rdui>ohITN_ndkV5#t+%EeNUrCJ483anI4mXg^6s+c>l znyK>n=>xhlrGF>J_AY0|4w^;7W zGC6Loz}2RCW7)BEDw`IJXU~dR>|H)fAZr@Cmd@hP`XwA(w}kyG8w9kLa(L}>j%--X z38ftOs>!JLaD4wh&Iw>$6=ing+&ONFd~yB)_by%L{`KoTzIlsBB2QGJ@?BNW=e_{e z6TTM(^3z)ZtX~DN{{9G9@A%`dSN!qE3;y}*Ie-23m|x%B<>wbS`1QLR{Qcc^{&;Yj zKfXD}ulEk|^z241Zk^4c6=T>wXCU(iRmlCy5MWKFTeiSVr~}23u3GhX<)P}56;5%K zJMEK$=vyksRpir2J#z`c6vPCHfujo5$}s{}ZPF9yke5kiK@P>KiDHO^kR>owlqMZ% zS_=8ei5g&~goSE#t&+k*{tvJcWm#2S{|H@?zCId=DVS0t2o+GN&ni$-P^F+rCAg~` zECpRE)TjWfM6Q)D%lYy-RR$u?&riFaf;*Mu-mTrgBM4O;tn~O8G81Dd%E{2?_fyc6 zo*Yl>!dz`)Wd&Ml`WgjK-8#0Xe{D}j%QHQszAt@ySJS6QH~RMKPK~r(Wq*0wB05yG zqj&F~T7tXE!BVYCL|5U#H7l00dg)Rv$$itBHSF9duqCo}%~~z#{lwuz+`aP!w{BeH z-2XgR3aV7%yz*+PWcQ65)@fnGM-T67-m9li9%;+p-v5?o0#~Zm)txWzX<&8b>UFJj z!=b}R*}Lz6wys7PDlgafs(QLgRR8cBKm7DFU*CVgnG2tD@aQou4@)JxA3uFY%gs7@ z_8jK~{#6Nw*WbVAn}?71N{)a0{DtP_QsAbpsq($F(SdSam2aiIWU5tg^~;BU;Y)=n z6<{d{d-&+7z~NWiym3c>@sT#=&36K8U)_7a!v{|VzW!|$a4FDI-Yg|g9zW-sd-r+# z^?lwxek#VqGu}SB&zpz$WceQN9^c^A*Jt@&p8Hq#4)E=z_1rnVh>Hg%bM?>^?w*~) ztur&&x2YcsrWQ$G6fXv;pEhB0Wtj^_c{UW~+fyKbm7Qy+K~|dRo8)wB5>hRQNz^4= z04YcyD=4@rA!=k?ur9$Op&>d%h+d72X-Ry%(q+v^Ol(O?iUBD}MzT$hsF)T4R{}Sp zUn4^s5hIc;P?VdbCQ#OuXSkKTOFFd6)}t&(mr?~+xvgklAb^#tD`KF92HO-_Q&?!J zCCo=BHzz!?F=2^K2##xtPe>D-+*JY3CStg^5YffdOh?+5SepwEn2X-EYeka3B_(mr zbVzccYpN4HvK;B1=SXddGxhB~7}C*G3@R7uJJ>R~lMO>V*)gobo>3J}OzPsv>>hqB z=pDqOKH)6t8O{7|2~6*h%6NgRp#oQZ6AGw`?o35^4edf}DGTmNiGL>wyvr!^E2Ujn z2YROUVPyMp%wVt;y~ZK~Ro8t_j9?rdw*T<(^@QU4jAT za(-)hz-$HJY{EaqHn&XM}Lsjc|tr%xkqz8!s z&ICA_$v&}elEMUlOfb}IimS7uyjW41$4Ysy+}%CJVzALh5~}tpVf8YQU0 zgda;YsO=v^gERVdVOIZc0unu#J+M2ohxB0Dz-~DVv0}<#*31~r`q`scGjoK%)kxOQ8N-Hog#UTRe|gV;|MP~w z{&*_z^@#t7{8!0O-|(N;xA^(%<9vT>A3uNj880vI;LOGuY?xlh$_c%hGq58A%2KG# zix#Mg(-PEEylrR~A0*onDT(zXJJglROnHW~qUoxpP)ZCTJHn6L=pc%dW9V3zBhOg6 zR&_loG?=Wo81j>nv})>k0y4!JX=KI6YQ;mN{Czc`itzQ;Bsn5Xn-WKfmh3M3RH!gk zph*MIbkAS7NDKD2AX{e4(6BtVh2qZTqMjJ_3A#kP6lMwo^ zhfGcSoR^YBRFFTe@?Lp5+e?3tr+Kmpb2I7Gp^U07VsRH_Q&HZA9^E@r*}g43r5!zD zur~SgRC%V=bUO9@Yv|pxo3^j2YbEW=%P1)>q*u>sMvoey)v8iq!u4xb(J*(eRtIa% zvSn;pw~iefH?nEvDwfQdD+bC87S5T?#`Wv8G7LMmZ`F#59y@wi8%d~^l_zWEie=im z8cC?~uU>xlobTVg;r;vfT7HuX!6`46@?NPNt21ZMYdKl__J5{%wiF1deQE^ZzXeBs z;m^PS(Y#X%nl4?tuEEri6DQfJN>PX?;M%fdC);-I*2*|2&(_C$t;cdLl}xYlzf?Zg z$x~;v^DBr_aHXoZE8zOie^qYQ-e3bvGd`Q_JKyK;lO*T3M!x8L$g;PUYu`AuJ3=DV-2^6blV+`oE&XSYA& z>8+hwc<}OpX`I?Nl9OA9a(weZ&h8n`g+rrRyRd?RJ%i}p-i2=M-RRWLm9~X;v?+9; zFwc(sd^?K7$j>iu5zrFY5?z*_X+m;J3$4aglIX_dBr}o{%t?qdBQf5D1krOzN&2Lu z>XDizlBz}vnvj-iN>ZXBQBf@j3l+c;y%#O|FIL)A(S7*|EoqY?V3nauWsV`;icIKU zVnVkPBf7UX5(C0S`gCK;3yf)#V@g4`8Ch9!?KArWhMF#mGRGO`GCq)e3J1eWJWA$d7TQW0t^Wwhx`toT$onqI-@5-Q{=pEODSm zYdg9Xo71zcnHc9*)U~&!f0+#f%NzuX+?dcwbhGH=NfrK#FAZd5VJL$#qv)TQOy8Jn zdPSDfEvyF>K?5lDA3$5bzLa?Nq`<8Ug&v({y*r&_>ZmWAz@+X=Su$)Fjnqwv5}=BT zYC=R*GXbn-_=o;GF{4*tV|+v8hlc48ET9t-VMIW%5xxNiS`kYpk5<^b>tOGxkG+>M z4t{1h`dMJ-C5qKu;KDWSvqE)pkw|C2128THjOcoo2GKJG%uFF zA+C{@I7XUd9VQA}u4f%;_fz%eYu5g z=N2<--7tC$FDAXzm(Wxzg43)C&alNd)e@&@JX1*bu7z5OhT?3o?i1wqiL&g|xj+M~%A!O8 zsR9OcDW*=qs(W#&yu7jU;-)gFs)Wfys+c;gnu$ZJnKYt?5ejYw)i86?SXM7-V4Za6 z8&<7kr&!oKH>_vfk|oTWEZ{VLG>c|U7UeL7xl_i7jAiNEX<7{|wOx%QTry|6K-X|N zXRTH&R6*9LfpsjLHi_9|hcmFc6Lnq78ChG!(4L(b(WeVDhV^6ez#i0h5?CzHV`R^A zCim~k=pOAD)xDJZ@*DwzfAgypP$^H90xShmN)%iv$Wnl%yjE(P@?5Df;k>~;m^-XD z6YDxNbzl_@n_m__3Uux!c@R?i&ChPh+e+AxWoOQ#D=O=I)I z$pTm7SwDBYfYmIKIqY06aJ6E-$N~-vSe@Cmi4)t_b9lolj%{Ag=^b16eE(i99X-nV zqsKUN^f>2^pX9PAz8e=W^TnlW+!DCDE$vs=RFdvjJpS@4p4|P0XQEtQiE?@O_z6Ee zd93AO{q*t)zrKIL?;l?A$L|7Ff2&0IH~jtAOaA%m1^)I!ot&5p7VLX$@jiqnzUW^$wjLBoiGGo#t8fMRC zhX7x&~29_F4co;%K^0X9m?fvW^R|KXMR4IT{VaT1k_ON}& zE-fsnz)O7}tt7)Ae`=vZ1zzg=DX*9MUdnTI;k-)Xzb7zxiZiD#@WrjW8eDyT;fe-a z3ci$=OSSjEeXPOOO@XUx=Pz>i>J=W{xy7@uZp-^}jR!YQbNk$0t{vaN*B7_(^yW^! zII&2eYJ|MUz1X{=H~Uuhl6SrXd)8F5drfzy3`?SSg*)BKoM~TVL+fmFN(HitHBXiu zrET5ie^jL#9LdYG7cnCB{9k71c{Dy^J|QQdn26O8sqBP3MU60Y%H2# zVf-l$*3I#AHxUEImi%}RI^~ASwW8@;8bR-pKoK9h7dX*5*M?5{)^x}>r!3cy$^rwb zTAPaAFq8deR2NuKQ)EkBkt22aPV~=qp(fRp>R2zjiZ1LD7)Q6DT&jaQ(AmEarC#-v zxYtwcR!5;zEjji*MS4)+(wnltda6?IreK4Jp?MV|zO7WJ5SPcdBw8h&- zA7`_sxLCKu$JK~H*_R{_l$WQPn!rZ@D~2Fn6N3E=i4l+r^wB5CUq_@RA%VI?_{+l^ zYDxRtV0x7$(>Y%(^}IMrlEVmdv!+zwrZ_7RYhzs;Y|RDCym5AM{r{C>P%x#uRBD?F z3F_&oQVwQXh*0fQfE5@JqCu4cEHhIZe0_rO_Yc9-!-r5+`8q9~(xL*|6y(Z1N+L5+ zz%VtA);TG3Y@0_}VG_wP|0cPsQG`_;vZ<*m)~38EN|TpIl;@05HH_=ug`TCETA2ow zgQW_Nj_=orv3)x-bx1X{N7u1<`WWU+9K{TQp@lQ1v3B_~mM>bwswGR=wP~vW)@D{L zXkfv#=>k%-S+{hdD2Q2d>_p~G9j8H+f-AK=Z|ZnipTfAI^^6havuM^-=1d;TkiNZ` zGIB7JhxKRT;Jys6=_Zg@%d{bVnONV0=|gMeJ9VZ;fmCG?BfFO|bwC$pYRT!X8QdX{ z;hhS#(4c}S<+)OTrMy%s6sSB}YM+9t1w(piV5JI>E*#pMMI&mNH>?j6>IBAQeZklP zteP=W;AuKbClAqGU~1KYgCd=Pq(x zfa;5@H~8Z6b#91)QK0peD3J$WD8LenT;%D!2fV!hQ~>Ll23S8l5rw7lu--i7=l9R} z@be3StXKT`=Sz_n{Qc*1{`&PXzX@1M)`yt>8TZ*TL*qjP+BWhb`|F6F}ZIb7MZ zh|9Z|uw%g}8bS-82C$6&~!;zKkASJ5rF7!GPLc^b@cuD=nd> zrY9XL+c8j}Y0~8J3?Dv-2@}UKQ=a2#)5Sm#<6-fV1uR?Ez}$JWm@;)DlN4-?8ZNLk zkUqV8ibBpNEj5{5J*pYdx0b%WdNFRq2u2MV!m#>!Cdl&KDO1HTna2iI8*AeR&4Z;v ziYh^V+}P1f7(Z6?fGLlbs^qRhhAR18!IesG|N5)Dyngi^uV26B2Z0xr%&v-veyo9| zAWC_QRPy<==P$Yc;E@Jh%9EvFOIyD8HQzjXtmRv&dRSX`?$UrtL6!;=Dv(;SW-ap< zEn)Tg4eUE8&-2YMw0c=LZ-1$cR@}39Kc5{E5WI9n^I)w~Ft%Zn26GDb6v(Nfrz%9L zMm8$Xnwr|@(#2~WJ#vy8*YB`x>n;JvLmWDIOj|y3_=L7!d9IdX6d9nW{pT<{k&2(FREZzuONX4D>@WfiLqcRMye$xIhGU&XtXJI7IC34-|>H; zLsN3|T9IF%Cjf3iiJIU!*Opx6gvhcYJHt#XE}D^PKxUQ#EfZ4Ie1nOmB*q&PCHgN? z_#s*#D=oEN@o>(Q5*O30}0z z4WnnfL(pT`l^wOANK`eCbl;Aos$WvOH65!t0W(mb4Y9Y@4a0dD+?~ z>CryPhzbF`_A$m($aUKVThqqdk=AbBv~`W4!Xux~zMZM`tfjT{01E9z>;{l;)1PdM zzGPU`l4aYMBG18ejG9PI?p#K7UP~ii?@#gbYl?qR3;aV`;V)p~FVGSwU=k!y6&htk zNQ41Fp?U&VqPT;M2@E#HH_#Y&UjZuxS>F0M`5Fmenc^B`E@FnWEV~7o;}&3sqo)DZ zE;^Xm>u8W=AaG@9DUfB+2t5HTBl{*=vb(LnDDZGIY@*GuiWB9XEO3=-g+-brrfKGw z3Qe0O3wQ}ES;QJ*8Eqh-B@(75K&6L$q%rnUCRl|VU>2f_g|t@k{gnsIRvrc?IiG8q zHU4?dWLC#fKes!}56|Z4m)p7d`V@8H3jK7p*R31;KqIF{8#Go^D7v->17zGEzf%bkC&?gD7m+CPmA(HXh?Lv6`U^&-GqRhMl^oZQERXmg?h4TuXg zp|$MmSD8tlb{VuzQXRJ!X<^<}h!r0n;v?389d1rGIN3Yk^uMwUA47v`IvkbU{&5N& zRlQy1VW|;>I=Xt=h{EKgG#s7OXhToDy#jD^^}@$nl!+**%+w5$Vq*mWL**rkA}v0e z?BrMhyJUf_SW;s|VMTb65b36sYUt25MXTsu*Rw6%E3)X>A(xqB6vR|eDR5ZbHeE|@ zSAbF9xtLx8Ttli#rK9P~lF1`jIB^6c`}JaQO)sWO=e}U(EauLb!{Ygi*tBLNo7Ze& z?XuOaVVp|GXEoNA_p_n0gw<4Ps=^3i`IoW`aP{xZV{^t?SIXsY4mpqm)tI zikZ@qoqK@Pr>8KN5MskIx?P!*>t){^fmsc=wQB ze|gS7f4||Me_rwHhev$);T!(^;cNbS|24l0VEy{^GJkz{nR_SKaADgV&Tg5-)_Fsi zR#(ojE_n>9%%x{>3e|-v(z%uj&~~72S&>+&S+q`wBvybb(!-XtP#>~mf=LbYCo3kD ztoTUslHxT$3ib9TKurbZ?MsY+RZ?`kKz{=1s$4=!q9&;^|I4isz)_%;tfsjUQQoXz zPk|Z7+SQP$H!LbOs1 z8Ib~90$OUjI!0BES3p>lnJ({92E{pIZT^6nc1^uP(Pqf3pY-k zGKpbB2QzZSFl|z4mF#}-v;A5KQPs_Q{`@)b-o4i#M!}WxR;l*i{HNcvx>N5}g1W$# zT30zx3VhUY-wTL6c>F|5ZdV~fB_G3rJNE2l^Y$IgS+J0)(jTl^w_by-4O_ObYu|ni zzLd90g&{|d8pFg%Q~uA3r2tE9SM|132vGr*$^-lIt^m`u8|>b_k4+o5X&|MbYU~&_ zIAAGzb|2vIq2roIOKn%nsy%x2q~_JScJ&rlmFM@;CGK9i$d>|GUtK=SgPZ3BvM%!D zlWY9?;tH?7I?Rm|OXZ!M#P1^J<1&E z+Qxx$0j<{A0v6d86lGabAW|p*RZ^e;OJFMBh{D1av@X%7yqy*0^10RmBn5f40$Mg? z3!tT`@L;O0z?Gpa8<7yNPh5-v@iAi13t&YHV8sPDCMCQnS<%f!x9HMIuGzcTn!y#$ z(uesnYd|pbhlH?TXfSgJ1u(VNlM$UA7*J+UwOp%RsttJpRw)s-M2A=r5o|$(utc!G zAztpyMZbN5yWOYwi_VMmGZPqd6JQFbPsa>K)U;vh;7Yj%U6?vzMAlGalna2G|GwMsanQU_!N@>t|Iwww}Z|epc zc?)29`!>Pbzd7DPsxC$=yn|cf8Q2^zfi9JI6%Z;4TYx1XSU@Y-M8p)oAX7Yj4RQ9+ z$HBt@M=t>^Uz2~KN^Xo>hy`9@mUx8g=nG)!nKcr~`c$?x z$4bs)<0o(wDDV_+jBTO?4w*LCWC~!Vn`0#phP4p2b*8jw%Bv*+Ce&*kZz`ZApNllY zF2-C$_D7n^IgGH3G{H936#E3(uSORpnh;pvLPTqCQaVP^XHrKN?Viek%Nw}$;v~<0 zy}`@hZ}C#vXFp%z8`<{FHwXCg>J~1aTFLSKb6LM)DD!8EVx8KJdDE&y`SxUOLk(M( z_h;XldQNT~!L`pObMM3)Zk<@jsXenews|5a*N))x)dTrreLd&qRd8%-A=`&1v2S7y z%lkz$t6K>3dnYio*qctVmUKz+5DPqz%rFl!B0R~7^CK$A7I$Z%cMl`nUG;EwY(Y$v z14)T)c)J^7Z`lktq5EJzBcdbBNRGE9E5(VDY+pK-M2J-rPWzlNp>}~8KL_G`9Vkwb z?ZugRI#>`L<|A-rfs=#1mgMf~;jN7TY$<@Hyj9ARrLSk8g$mU+<=y%?(og|bKtQkt zRh}Mxn%Bx*mc2cF3G@#lz{eLiM|&KsEyMz`!QaD)aDNYNE<#nNAw>YIohZh3g^Bd& zm`80@Ygta9rb`ihIu!~mb!Pma?)2+YOxKcB>N*uMTmWQPk8;M;c4CG=#=;3hm^*qf zGe-_$?)Xtm6$qL~XtHwYN>(isOMCrhZK{tQ8`iKwAZppX znev}b(>zuy7S3kzoN3IOI9A$OvOZ7XYLV=p!PGGV-2!f_8|Jcb>R86q^X zrgLE3A~r6V!iM>i+1wyNwPdD%*KBqypTqX$(yowp#a#AnSj54tD><@#4TrX^;qbP# zTsX8_+I1Y?xrs~1J`)k8a^xVFj~(XH(L-E3dW1`-PVxEK3!FQ1o(tzb7i;_~w{PCz zt2xaDf<}q&`z2JwZB99*P{?S8zeENtVU&yjR z*8A5F`Q?YF{Pp`w{`=2s{`1dE{`vhWfBg70zrMZ0hgUcG^}DP5^xzCHZ|>vkQyVzB zaV8BTs-zRopgK2NB#w@$QFIhwAJD!vy-M?|0;XVjCjIai$O`ihBq1u6l(+;k5)-r#T&RyHDNzv=Wu*OYgkXRM zW@_Z13KPZ%P$g)U-Tg`UUwtbDQ7Q*3OMpm?9#r+OBIH;FR%(i@Sh;qbTsvMqn;7Iz z>$GIDB*?^qZz6~eAbWC~(!F$+8uTS-N}?YuB&h;NkroKXI6? z+cvUk!#WLsl($KR@lYw}pP*oC^7I)@oHC6Wv*)mK)f%lNM8l#b3>rL?frEx<)Ap#M zr?Y0yW%ZhM+Ws9ocWd)FD&RVI{&QBWT+Jf+%>MnKu}aP{cFcIDO;eLc&+uL~ion)+L^Nc}bfh=lh43cy{|R zk8bSZ{*{f~yRd}2=jL&7-zb5sTK2815`(IYrBkvPRU5$Ax)2sk&0$=9r1be#|>*7|lD>bF8jSa;G0z283vhjJrM}FX@l`I&?;psD z5s|DN6T|v(ajY5{O~Zf?ruFb;WP5jN@*Js1auCR|CneBc-Whvh100F)x53xd052yU z0^N;?53-l%-MGa}aCi3}(%&{w$ePErxbGrVJ@yXrBan zcMPCI8%O!Q#-zkIBUMfLlh9HOrIsXyHzy%PBtUejXHz1bJ|Wtn88J3G#9N3CHFG1& zGKONiBFdcvupDbCwXUbcvYsN#ffQH{BFB6H=_Ylgnh9vx4JOBHG;Ja#(KTxxjr`;W z`N;)TAyL0bJ^Z8e@d?+(Eua~$eoY0mS`rv;NN}XFhzY?F0#_kMc={RQ?rn&xD0@d& zBkZ03ZC$)gaSyP-JIo5-7#qB!%y9^8g}GM~EWDax<|cq;-nQm>$woxBb|bkwh}^_n+e>`^vpm>8U6mL7bKX2T!`la^dHu~X9$eqW7iZUTsi+OrvGMCr% z;Y346wvNkXd3_?Qhh;FXgAZL2jp>#l)GdIO8|_PCpp$gomiV~IJ$KQ^RShVxY>cC!AUG{7xGhl$V>DlJwle_d_;W76lhBbauG{7 zg4`740W-!=tb1E4Lwu$45A^ZJ!Nvh2eIs<5>tJeZhMlcFCPt>TP~k$^t`gmqM@xCO z!ongnZ*Q+@0$4DokY72N&Fh!3MQFO?n@ zMfB=WNT1Fn4D8WflfIo>GoY%B34?kGT(zO9G>1|Bx-v;XV~n&@ht{xo%1D;Yn!vn? zqnN0|c!TPgpgL`VpxKirvS_Y=(t>#`TeO%Z(!nm0PIZNJuxnPW)?}^73fZ?{#tatC znZv5Zi&(vQF)JDxG;4geSmx6ws8NVZSSiO%8#i7%Zt47atXZ^>#nUG;c~D;#j2q6H zIa66UemL{T4P(jVF-(>1BYJig3rl&{Mlo;9Am)s$XV&ok%pO)JmR5HfM)hN6eKiY) z*J|x_0jzoQyQbGwF-72Nx*Ri0e(%)2U70W2*G-e(J8lpQM)YUdgb_5179}uil327; zSSRg)wTs!hNQD9CvTez1HZ{y(^Ws?oR`c1nekljHt>QC*s(qVRad`Je&K=v!kv*F^ zAYgUw$UaUAWF0@SlQTyTaN*<;K0kGg^T&^H=I}v|e|CT~r%s9jyT;`kH@I~DI#;B< zdF$WFckkZ0$5(g1=KfdT@<_ny>HSB1r-rUSQbW&Q^W*bZynFCO0PBeW(@Xw%`&Izz zl~xx^g$sZG`2~Of^@e}`ddZ)^KjHTe_XWBh^2dj7`04F+fwQyRIkAqTYo~E)<7~Fh z9?692Qu^d3(kU^Vj`0Fm0#y|W@f3uGksc68PIx3miHQO%u_OiuNayWKsHYoI{@!8@ zduvJP5q>_J#DoMA8R$oJa3HA(F~ke#L@3A+m`V&6pi<#O<%J3lCrOn~kZlU65&{D? zsEQRBitzB%B+AQ63+t%~n-k=`iD5wmd$>~$&b_!f06>cNDouf z@Pv>a>`R&eV2XT~6ghvk09K}ahx~+S+GfPlIz5Jxj2PNx$5WCSE7p7p?F+N1D9YF7 zC+t$zj-mbQ89!+}!^e$a$mpRA9W#_c!v_g)jby=+`7B+zh@XjALVnI(`lX)=??3rtDdFmJvX zF3VUX;4*dcBu0%G#xQvg$I5%Cyk9Crs6a}2wNwGq0sZ??-mX-Gt-*upwJ@RzA#UBW ziOT|W@4tV?KY#zly}Mruh#ceT(j!@M=_jrI=byj%;fMD;fA&lO>lsfTKNfi+ z%ddF){5vgCUFA}J4CC$Dw_hM~Bg+MbRtN+wTfS10r6RNE&S(0JSxgluTDxuop9uu5 zlVjETy!i{YT?|xP*R|!fNaj3xvUca9Y1ay^XJaf?$0Cv&PCFX ztY5p9y}NgF=(B?yJ$!^?0)EGj9+mrfjFZQYb5ZW?Z2`-B*Dnibea_Rbukqu{FXer? z#>=mdiR|aetxeoHyMW6F$8mIXtr%3D*t4<|o0qg@!IUH>4hvyaeSjF1(aao|NWbpx z6lSy(0B=HZmZ2;QTnS(m3piyb8;EXbCBW5$!rVr*EpATh0$mF8j43LxqA1^5^IoN= z>ywtILsC+6Vq+Q;8TJX`!JiN*P!%l_8}um|(K_;68q!AeYlln|dgPnXuf&K^l@`qD z}0pJ>Ka1W}vqNoBk{c|ooudpeWk?Lo2_8?n9~ zgt$2nA@54Eyg#bwZpYk0>MA=kuC5<*#|&rbwDGK(Eq(L639N0H!1~3bS+#Jm7{R@n zJ+YEegR<${J%Y~VUbN1&A}c{(j3oiBSW}`y3`sQc7%#R0Shk&LZ{Cwi)BaRi)Kg~Bmtw0L3hn!mEs|tYOQOR7Qrw4=A3TXp z=}Tx76xE!d=vD;9=@S%hKtQYk{xJes5xRJX=-?ZwLWM>eU@6e@4Ku(aP!|`kRyeuq z;OMN2or50Mc6!)38p<{kTzt*6(4a?{DV|ZLxP}|x5ZvnDx>kNVSh%&s(54AG#-E^P z+!SL=HPX-!QwKvV1lX(sO|Xp-z)Cg6Dcb_KJWIUuZ1Br-z&F<(4}mP_EE~|)vfjHo>|I<@p(+H3Z!$gJgfqU*val7)+>#i7Yq5^ym|!PSG(ogU3m0oc zja;%LBgUV!*Z{m;1xl^;aJDxmJt+YncQ5o>>SEAJU%*Hp$KUNhqNRMcrL--iZH|?hvF5pQu##uN&H{fAM|rpf93um@3hw1a znN(Gj(5tcy{j199-Jyia!gOjow`J0hS|$$@NU7~6pjFQBnl8*4H<;N1DwBrRYRmH{ z4rlDZS_by2W_X<(D;@X9`u$wSu}{unl^>i%N0~fhq`1j8#b(GsPK~&9W6Nmriru%-Ld74(2<$Buz?wI97?bL2nLfOphDl?XHA-H7*{(W<*`o$AcSIfY zNAzR%kUlIPJCIp}do#Vh2Mb5lF>iQZW(io%9af_qqxLNku-Y_xJgWsNmkPwKoH$x! zEE{G{X3N|eY+X2;y`nhxtXwRhwUnJJ77AR=X3Nrf>{+*zL)+GJM3m@}-J3YHYcogo zZR7Njy&MyuI(A?e=T9Bw{OO~dK7Nq1Cy#RR+$k=9eumG_pXS`@lbk+zjL$#+oa?vm zaP7{QTBYn80#;w#xy#*qU-Pwq)%|ZD^623co&nS@ggeZ7ata^d8*>WLr9DYBQ7$8_{dOl&aMAlSNjivYqN)~#8~?wz~jIXJ~m0j+=JvU|r)frew8IekXtlr~yWdB4<1K{cnLnk-sDn+gZ2$)8oy`xG&N6nKpo zK2-B|shlshuEK^7AABotbz7_au8Na>b?+`e3Q+z1_n&2p z7B-^1bu(#oD9SYyxU!&7AS*M|n8c)3#3wY>ii}2v zHzG7h#J>^Y0iO^haFrO=l$>}yic^d!PdBBiz(O0DH=@#-sXgskHpq+3V?x+5Hj-VV zkVGN7L?Z$PP68tI@C(()H&}oxsFfBrboFhHqkB{Aotj~1*Bon`7TUZ( zb^-y;0$8rz0!H43II2YWU<2F*&O8LtTtkg;4m85f(*QI3mgt)_M%SPzdIl{pG}FV_ z)&LVneayV{u?#W9I@S<7`Qx1BkMj_a^2xHtJI6r)%MO<;JDj9-6miaX#7V%*I@uJf zc$IHuf_;oAbp=;(0)%O*#DW2Vc~-=idy-KpfYl|I&UJ+hpVEs(t0%By?;?(!TF3Pp zyLtNLG;dyA5Wu?3ukUa0_s@6u&oB4*^ZhM8ytyKBiQh#2cz21v1)To*@pF+2{P(8| zpyX$fUu6Bm6@Gg!$GyKS>(}`GX7Ewf>b5 zXL8a~ak6zlr&%kE_2mDuv=vCQ)WAxKo{qi-SjvN?yjN;uVL(8Tmi%s@tET~$si6UO zR+drNRYC*>>2Y*vS4atpMC zGi&S+2KDSjr;=<&_wUJq$)lJ$d;pV&_G6;J*O18! ziY2Nn^A(Gius}Ll1zpNYU$Sr!JGX9S?b0QzobcXHHFBU9;+v~VNeDPCn>2(u!)lqQ^2o*uhzW41 zeG5j`$+D_%)t}{)hOuGxI95*^&FZORSvPHxfYnSk&6&Zj#S8ds?Fx1;Z{W!0wOSt4 z-gV2_zM_F0s}{3=^C}Ilj_ujPv3=V)wtpvw_wL~Ep6wi0mD!IU;=-BZ(%GNl!r7Bt zIxn66r3+lYa#5>eq(VeWZrr%Z7k9tn3jwMx?taakyWa?0-RJ9XRAI@-JiPye#{y8_ zJ$uEQ7jJp{{4KAaz2@cPm%I`A;i-r~mjbNsA3x*WqsP2^`cN$PC;a^OF~9%x40%xm zv|jT2uaEfU=WqD_``f&Faalfpp66d2n*6nVri{24(xpukQ8`p9 zuPQesQ99*hvJ(?X5s*q0ph}I5Bq>C|D^N{yBVZ+x5D+K^ObDsL0$d`AegP!-%DW|? zl@bz4iU3@whpPrvIkAx>$h#2c;Y75LO15_)E!2mhPq5^rM@-OCuHy$4kc?$d+eed`$9tFI{78io!Yz=TPoMaD2{ z>~O7}IeV&r!X#!2ls7cYVWG&P#dF!bbuHVruV=%?RRRu&IHl$cymXl(hYkxA?BuL~ z(}s2HSuF;L^1z%qb(-^M|DEVu$vIiRdie@>1-c(Ue4y2~Qjm1_%iG#qglfJ)Rcus+ z8`Zi3CpDUI%jS)m=StPuQgGF;Z;dwpp~~A*>uQ=EHAkT;b4|KTTT|M>k^ z{`u<8IplQv4TU6sXaMDp6e}%d4C! z1zt*y9Y4vIt=l!|QaM;EEU3UswF<7rji1P<(PJ4tLf&yPw*CdR0|KF&wS@lhpZyo1y8^If`@l6@#WRS+`hbr zudnas>nj_%dVDU&w-4v|wt*u3*t@C|n--R^c0mCv=4LT>LYx?y0SxTv!q6H|F*JSY zQffk*tmc$tHYYdfQ*shMr7*P_1tP7pThXqtITa<1sc7AtwgoLH6cEc7u*%6aB{kWA zxY*`IN2`gRKM{S?h!6p+-~ed@1-KMosl2N=9SY>ww&^BR6!G*Ei-I&(Loh5_( z*)TeijU%I3KRA|+15?>tpTp|fJZ5)EWn%jj`lUzHP7IXR{?W7z$)GqWn;icPl06fM zb&Vv=UyP-cw)8FS!`NP<#o(O9^2rTsn75Q|OIET~jO2|=#Yk8-i?z$9uyXNe7B3jc z>}fq1GrSFTeNw3I6fVEflY$HzG7`r!fw0 zpJ3{{XMYJ{7BfupBBww|qU@YBI9)DZVDV_bs-c>Dx>l$Xj* z2P4zw=;}8^r&Tlb4O(Gfrh|c1D-0b)vCC)81?sG#^s$Q*SW7U&HN{Fm%MOoR2iyd- zoCT_!@&vT1S`0XnEF6)2D{@-r%j|hL<7D&6oe}B0MA1?6E z+oSyPd>=o5yM?!RR&njKv8YdecV`DeM5%-Y`4QmlMu@Km38FaC;={>Gj-pNO zzat1M1gsQj4d~I4Q36sE1fWI_=%snJDh0f1I+rnNcz>phtY^ZIzDyr8SUav?xAxTc z5XD+k&5*u*7%gz6!h%bsgIgkCwX9)2Th^~*$Cl07WVajFZ_vVmvu21DKWDC3nTs`9 zqQZd+q?Rnz0BY_`u~ete(4cGMTIpbA+tNjg*dP}Ca)GU_V!7|%y_ad@Cop%?ICigH zu0htq$zxe60JU)P7-o+f&dkw6nKoh|3#W`>-h`11uj$UBspDARAPPkwY{{e%EEIU! zFlPeG1h(dn9>DS`BcuZv!6JdI1+u<){190mE&w*1H8TXNW{sD29P0$+)=Znsj)r;i z`I+oqCPu>M_3T)>kYif~q_%D3(B^gQ-?*B+8&6n&duurR=2+3&YgRF`PH}Fx%aOq zxccfVQDEAzb2VIDAnU@@$IdHe0hE=*AK1c%;x!Q zpEHvA_1&4Iyqo0$S4Fw>&dZ@&W+v?tlE@8@BFR69C{J$z86N?i5DF6#$c&02)XkL$ zPkA?@qbN#E(MAb|ddhnttt#HB63A7)mCChB3=0!uC7kTII5A3Mv~XawudfDIDlC{5 z7B26YCtJMpKcO$p1suTgO$keDU8m43O?P z#G$*pySo)JKv7Xz6hXj3L_kqd5xcv)vAesw3tMk|@8?--yx!mMd7eMkYi7@mv(Gtu z&%9U6nm)-P$mJ~b1Y7+w;!x1H2L|`bM8EV5hyJw^!H6=+{w%F>$xw812SFc{++t<$o-S6?|AK&m70TzQ6 z3xC$rr$4}oXB=|SJJ)&Xn;)qazKaB*oLYg?DsagG@|>mp|4z5S=WtpGo;Ao8DzmZ*EnbTHiH+#fHwV3X_r-+z20Cs(4jwpw@wH=7T2dq|R_q4-30Z8h+q7}L z2;~2{@HNkzyhM;Gevj){@bJzJJiUJt4{n~v&5K8H>*9VqxV9HhZtlXv%j>ESZsjspI@mU*U@CA}fMzQ;aWn#E5(|F7g!^^u9 z+&y{G=ikKAHO>y5;YN_<>-HeCE+xQPfTi?1FK(Mca8*yr?lf&1{0c+$PB7Q#1}9xD z`01M=M33HCi8s=uQRr!sfn1yZDD)`AQ2$bthn8bR|~iWkmujRXHfA%ZPOim3iGlYdL5;D?raf8HPR@1WUTGh%kg@tP#xOqy$*{ zFpSlOK^#epHgv-Y-Xhds9;*$@Xf0TTYrvAI+A%>3ZfSb(%QQtqjx!Pmk%l-VlmIIZ z14`3SRNe~}HT_XrUw~O{)mXo7GTQgeBTbAnCwjOa+&h4`ug~Gj2ZF0F*9n|%(g1J^ ze}BJ*Kfe-eeZ7dkzg@)tNdEbL3HXEL&rA608$sCD>$H!K`$B+4$NfVf_Rp7-_+Q5% zAzzJ01AB853ZclSXb$nz{`_-AobUY=RI1D}P&dIT|D0*O|0o8US2u zkVuf@VPgyneRW8+l;LP;3TtCis4J>KQAQr!I@A9{rLw#-i!L5;r zm{G|2vK$?qA<;8{x(WlCDhzaW2(F|sF(e>1kqXH2bhd|&s{_H62Wb(WNQw$1?I{|) z2(AVaNL3L~)e%6AA2}4GhLQGNH~@wHvQUws3Ksty@zY8!&HLGgi-Q#k?sE zm@~OvlrtxeBd8jS7Iq5~X#LzbZg?@KjUR>OZ7oPR#e_rkFB zC=3kuMV5~%dJ?>J_x3~z!A_Ku3j%HI;BIOGXL?rsZS6(60~S{k6S$e0BEZ1`vA(|a zp18u(%1Y#>;zXdFDAeEi|0EOjvA2UC9UI~0E!=``78ar{+S^C0O&ISVK=9*_2v>Ik zB`5e=*&x8ip6U)_shco20xTO#`V4afIor`a*dfB*nUo!ZIWHGFkK?VZ5asTGIByr> zChQp=h~Cj*RQEt69n0$>riS<+IfxX*V0WZOc%pYwFacH!K~)@;3FsaZf%M1-4D8+m zc|Ef*Z14aK%O61RRsm|O$}z5L1jba3zyyM+`f+1WUt3FqQ8T8~v)o7uMN7*Jf`N6| zPC&JN+csgTVyo1JbLRzIF=R1xweQDgA7#+X?zWat`8sRd3*Kp-{o!I320*XAZKCc*@i4Xmb&?oiJgVpcnKVKC;rUV z&gXDDgBhoZ;F*P-x`AV}*owuVHGM`a%7&FAB{d!0d-SCCML2f!s0h?&YaCmv*!nhg z%4DoqzVyFkf`ql|!a0$eg29u23%2w(nb=s$i;R7^K*hdXHsme%;QbcSB0agG3mLKO^ z>_#vm1219(Pwy^paq9v{r%rGnz+x+wpU3YA^_M|ZkRsy4R0!ZC7~I1QWjR);>}QEl z{jD%A*9PPJIiexQ3zPeWp`m94Mkj=zA}Sb#egPQZ9*%+TY3SqF8yVL9(8InE8TR=| zu+Bx2T@Erm2clnS0Sc4KF*dgzGb>36p0E&0rYy#a)}>fBb3PVMpNV-hCS%t0Isy)3ekW&eJ2Z>UtwTG4SJ@sFtn0~iM+IDM~@kkR)Ueh{b@x38);|2hEdWdYj}H{vbH}hhXcU&li9% z=LxQkdVa^fB}YrdyI0tPFdCRukplAXWMWhRVM{LU5%` zqOb8gq}rVk-5AZW2iWRN`q+)d$arw@MzL+bo3NMq;D*%`r(W@4&( zq=!9Xyq)1=Yet&9vj`+KmuSGz+z7_nngm-$&{orgd{;T)CS<^33l_TvJOADV-3YQ6 zu-GNYOXqMZ2N%-XIitJ2M310K3QJQH*z$}i(%J*OJrENfjF_+>ga>#dgf#QSs37DM zAPq0bMe)Gi0+{NmhhcmbJ+vcAFmmV++8&O^aa9;sSw;gyK5B*+iR|hliUy&Cplf95 z5R9TW&P_FmK!)S3S|&FVSk++$cWk6Nu3xr?;2iE{SjiI_Tm3|bo}ppELy<7zNv{Aes|n~p`(Clg>*W7Vu? z5s=%L;v1#5s zY+f)ATNW)u`}(!mwQ41S)jG73#(a3k7988X0|&Nm#;%QPuyZ593Dpnn-A%F&hxfPR z1kVU;KP2Sri8Er_IOpcMaPG1It8-_mJbwk3FEd=-qk-WO$rDmOo)B35DDT~Vj{EmW z^MCLPj~>0nlP3gJ&p+VV3tlqhJsuNWJ*BpnPdm^-u+{P8l?Zx&_v8g$KYBv)h}xgv z>$?~D>+>5@V&3BKKi-hM!uKza@%RCO-m%R%xo-*fubqNfwL?&qn}EUbzQ~R6LVwPA z73hV&fxbxh@<1FxRHUN|!U(bg>>LSftm)aeg@>sb0hbxP36KKq>`39T7fC#wq*6E= z8;ev5kzSr+h99R)2qy6Ib94~1|6I)I{h{N3CIn?G7=snh@QZNwK$t7F6I}V*5R_S3 z3CIevqj%DcI%gj*1Q4vbnHs~#+Cl&pLsPJ$Edp$*6L+#kw1*=S{oE1b=|tOB0=lBy z9EF=O$li*;%axQP7y3MVdY@g85$TPdvHk>Ep~xmhB_}nO;3^E6u`wv^-=6?06BR=S zqH;(s$_8emG&dWS1%okWSQ))jBTzqj6edrg0c+}XQaqMn^{Vw)zI+XqFI$6kYd2!e z>a`-yirs{lFJ2OsD~7B~7cPqO2Y{c&@uSCZ>cmL_Sr6{r$K!{O=s$lV;DM#1gV)}B zA>7Lxb9CeSRRL2B806r6O!_A zY{lYppFDmjK#KwED}mP6uir&xbp|Z9VsShcTcsGheu5T5)gOQU{cqw>PW$jd02R+n z{0UwRW9(jJHzKXEA#zMs|J(r>MZe{o*|SA*RR$_{19GN%zGs%r zn>K*gVC3({aLM1D!HR!}=g%JF>BDXUjx^)KzVSG}w+4Gx4J2hN1~bP6VCpCzG*!Bzw$u)lc_zpwz$)pZf#w<)G*#H4 zxR(lTD`QNt0mc;RV{EZLCJZ;i#Bvi%C^g2&fdwg59eg%_XgvQ8Nd#>*rxX^8(Cko=tu0G_+1>#Iz~n(A->wCIYP5y1^(P zk%=Msap>1C1era&kPvT;pfD{sdv=A1^{>#C5?ty21~rXNP*<0MwuU?mh)YZ~Rp4l- z2Y+)*1RC2CEIA^^#2dZblTaF)huYqim^x@YT8A`X#(;X%rH{qXpb^M+C`F2CF(UPf z5u{UsU##5};oz?Xw{T^6$Ev_TNgaVHS_n+lMR2kq!jkFbPqaZ`j0HRbbz$$W04qls zm|D}|Z%$&$F+p8mK!ByK^&50`2(a|JKv%CTbPWi+2(qLW@{pRzLtjdwrvN=&1?XzZ zLrYZwY6MnFva(R>Mj|H*Ir+}$rq~H`YMr1=*U&UmfW$@(`p#O=_t1g9w=VSks0`MI zK?K27yaXo6`Y=tFKpLk3V|qX=60~7MkY%5sOK?Sle~K==dq@$|*B0@4)G3ulqF;Fm za?3K%CqD_@v!jra6@s)Lq%rmm!@vPCC@x4wRY^{U4UkH}OZuHGKcTaCH;kUta<~-6XIg(E4x*f4w<{KVKZh z->;71>*Kw6dvgHEH*17ASyJatzlDhe0jw#Lmh9(XNrQwN z?4)|oS5btwgDoVgDlpO2hn|+Ma1Uy$X^B#?o1%!llIhw_1iD*UTEo`XUceRS%L)kz zgPolNY-nKNlnib(IQe+E!PnCrApt&!3JpYLXaHiv0?<7v8iTSkQI_AI0I2{?<0{ZF zdIYM<3Q=8Ng7GyK7+p!QRb7F)QI({1v*taVH1A;~1Y5&Mdl+7fi3BTkqeo)m*lNs~ zIgPZJsc4u`i)jQ|%N8xb%4JKbtp%g2t1z^<1f?Vu<&|itZ$w+`YyzkS1XKi23zyIU zvy^0sh%sTP;yPQpSU9kkw>f@m*34GYP^SqXV>jc{1q(2L`ZToEkH_Lx8U*GOs5MQ* z%%*y=J-?+H(+T*R39@DqWX+j6iMILJ7R=+65EI8@!-7_ z-N4l=w?tOzix)^P-Ne;vcX9jnV*;rcxPSL0?%#Qddn8fI^Kkgvy{{x;r{fKAJ zKH@3KW7>X1?GH(K`z1lwn`iIv{y7bHuioS9+YeN}L&vk{B+u~e{agI+>qq?gPro4uAdm1}~moC&1c@t*fSB{rs_*H?bI_^HMNoV5-O^R?{aH1#w}>4)#aikRbF7 z4nl$_!Mvk0{A?XWj8q`Oj+dDQeCZjDa&w2jwT%F+U`I!>kg})czX9oXw(z#K7J$V| z*Kir)?kbkEakn%V%h$yD`w7bxTflg>pfdrQtC10WE$AAw9YF6upsgK3>2t$foDu8k zL8TMzvn24cAgHoLn6tf*P$yd|8MMscWo{^h>%P{;1cV0gFfl-|gB4;)847kZhmVyZ zDM_|S4f8}g!DVuYC(>xp%FReb|MVpEO^QcJzZ?wC?2gjxo*3RQ3&p)NFsOS9hGb@7 z_`m@KS=7&r9*rq=^`x9Mpt)%pDGp1paKQ?+&RT#)ikx{dTcEEAB$f$lt;@W`RV z0;<@O#g;3EERMrsVF>#_R;+shgcyJrhIpMpcJXn{6}t<0o8f|m_kaA*A%KbH@uLUA zWyrG+`}fNhS>QQ#Y2BJtA~2jGg|EeyEOs$IeZubOM?WlC-w2)vlHPUj^Y=o46$94K zTv{(gveGvMP+tT<{RA$~vBfULFJHeAeEp5jUw$mr^V6ls=d$~7fBQkKS-V~Y!1D}3 zb{$TgHXY4VrilJv+LUGybH#2=&Qi}-J9d4tYw*JPv*NxPD*5+$@%-7pZq%1}`HZ0S z;VnG9e@y_^`$OBlxj1~nc#>ywT_rt zVT-YJPDz#&ed8oZ4AMe`j|PIg)v5nhg^!Oa`~x)L7pMVme|5O~ka(%X+gll-!HS5D zBDjjxMN*tT(&LSgm28T>sip+{rs&TSZ-VSFWAqEMLM|!$*&eRwVdID%R&MBH7lLfN z6!f*tK|k9v^s%l%y3t6aNGp&a9g0}#5Tsb=qmNe!28UIkB5gb-44R5b!)9XI=vip3 zr~bBKCT28EA?2Fm_QnwOR-nFdD8|(0VHg2cUO^nP`-P%MPani3l2R2efwQ+9Ol^OM z#P~O8(KxH2B?m1{Wk|HuVW_1J6D>{H>+8c^Y6?$1QpOA&k>n7HL6N;s)teOhf-z_t zM*Z~gX3Q&TMsuGDsEn*af9Eo!o0cF-Qj9>&Vg@XAIP#AcsR=)VlHhcG1ZR+>OA(e~ zfrxZFgr`sklSrCgq&Zyuv|;O_2ve)BFgEQ1Qxh2&NxMQ{=XV(DGH6kqZkRz!N74zp z1Ydf_vM?}`Bk<}5J#9JYXwsKam4&*ZEL3G>px8wQDspmAmY0K^Y!}FO>jWA3Um>UZ zJJjeLZ8KG9S*byTKug0x4Vtc;*Ge6dU@hoH5opC~!!S-0#tB-mNYaLNk}d(34(yZk z;GAj*5Be_tS>_1u=Y)hjKV+6fA-gOYeT!3(krRW2j36YX2O~1Z9nsOQNKN)dQoJY1 ziZZZtUL6kZT!iaqw&U6DBl!H{68`3O(cY7MxJ@8+Pn3VYzK(Ctui(qGOZfcc0zN%H zk9YS@F40S7-3&(x`Lbn?4=W8|yJ^aue1rCa9ZF?UNeN z*4%_eq;)TzH3M^}HKV0r0v60@!5RX;xl^0zm`2Q*(uBFq4Ola8CYHBO!;%@3v2u0` z=1-l7xsxYg(KLds>65U$ZK^nC+lqzQv}7)pOrMNRi{|10!PII3y%nu(*uI?Zm$d5b zt5#$8dV;L2TXAIfZXDmg562I*4=S^g zP?Qjf+=yW0goU6_coW=`=C=1xk9JmM&eUc@pC{FUgPsQ6r#^!0ErbZ* zG9x9404&mt?%UoJ0d__RbG1aQuLBYTT@mN!jJNSGJLVHHS6Z}>;GA%o)G-7JCI@M{{6cmXq{bU zKjDho+1?&j{)|#~&uxia3EL*lr%#>tVvwEelVlh}TQ1bnAP(R08vGDz}Rgb~)9RXIy>t}fL z{2rd%yNt(o&*M3b7Y}Y6z@5umar@$0oIf-RmyfjIT>E%>?@O>_SwC!9l!dizDFj#% zm_Ei26NfuvbiO63^GqJ0;~S$XFVLf%&L*1SAlq)VTjf$K!nZ!#G4F6cc&r9@hL%JR23?F)?sX3111cc zgr@3dOddziE%oiwCfB2>u?l0y7h%-6Jd7NbjnWYr$SaIRpZ+1p$n-&cGAT@SPtHDy zA_bL!SyyOFxUKRAc$`GwjfTc}P~)4T{}l(XDGI$jT64 z$^Hsi`Cp){PIPIY2z6s+sF*54)lwO1_R0iVD$w>N&7<{V6ZcS0v(YM z>q|uLjk3Z_ENZL8fz9)A{rDC{oA~?f9jf2Lmq%C7apxRf zUOR!u7Y^h8>HWBIXa}zD+k(4Ccj3v|gLrl27&>mB#@GAj@#o`<`0MEf{PXHE{`c%G z{TRxx7*8uC``aNYzz#j4ypY>H0)3N% z5l{CW>FTE-UfGM0!C2+Sgf{%kGTr6oourx)mr>mGH#X-Do zHa2iFH;1{d9s!mLBq|!vCSkA=DH=2hs&w^XWM~Yjfsp_#dpk!F*v@MdvTHClHXa@x zUa+>XhMSW!JVIo7IW>I^hBOh)VUHY}XCgg|LNX3cD)eX~h> zUqEoR1ZxOVIA6`KojV0oHBXvMgAHF}8m2T)Aq|yaYvCd>__R)M!8`(|c`XD%ix&!j zS~PQp*q%3i8Wzo(iG{5*u#jMD1>M`Kh4Zm=4t*ZaDk3ObFntP^@eCMY{hEa}^Jio3 zltu!tM&WK`SK{*7GiV<{)T*V}vV5^<+qPl}4s9g>oWBrj=X0Xab+m6Kc2K#OwCaOA zt84FG9BXgK@q;u#9XX8CCywL9u_HJ@&~;#cI}L`seTIPQBCZIF)oon5cpc|BEBnCy!eV2 zuf7R+{_-oH5o|qs^%XDQ{Ye`8A9(lj6W%`WAbE>-Bpol`kx>1`Ybsyi^ZR%B>-%T? z^Vb*r^@ZT--AjCX{}O+G;xrE@@$%L_Tsp7c)i!awt96u%?g5io?Vr;Qt_Z`E~&meEMR566H<>~*VTzD?l zBV-_BAYqFSAIHbA#fssCfs4;)mm3Gj^YuA4OIXd=D)jC>UcGuN;EKVBVTvJ%g+Ysj z+u4oCF;^@MTMSd2A)W)`*;UBU#j_9@tQgF$Ub`V?Au?pK1&f!^VQUu8JlsqQ&E`#; zuye;YVP#|Cm@x)LhDk0tzU=3B;@^h9mgn~xq*jw&fxC#!?<~A zC+=U{j;FV`;=WmAcqBcy!A1xSH9>ToEs|4gkVs$^AEQH%B|%(_l*UdI z+BQaflp*57^$_K!fk+Qk#JbV=k38Lu27Kaf^J=Yha8D~x6Y84?}Tphzd=E{6I8Ui zi4+d1QUWYfC8${|LBmNITAnJ<@mGa}XAMSaz${)D)=37iO_0KYXxfQL+AZ0P0LvT! zy+{M>?~a7LAY>Fo(|1ciQr{RvrUW4<(i?#x?(p?@hM%t^;-dTs3gR%PatKz;ZNl;G zOL6=3PCUDI1fL#Vz#lKJQq=*@Y9hd%PVN_iVsF4Mk;ClBDw)l>L7(wd$15n!W5ovZjz?i6AS4ETlLp>Pj zNq?khP}hboL6$^EUs$jtI(o3Sv=M04TKbFp+HU=zb8f?7i`mv-<_v_PB zEIwOSG#JB*@=#u!N1&BQpj9Y>+Q(IvV03vAMvojOmg3;$G-}6=#`tmc(oC$!ECLu_ zZi6jSlL)3}5WMg-&&%da!~EF`F{NoLffL7GEyChOi?EacYt^b%q=Bx%iWMs`ckVp2wzmEs zq3dzEh>qplKXcmJux!CR0aGif+_-W%)-GLyB?M6m39c5k&BT0$E8530hwgnPLDsUl zvoXD)URbr3&1uCF0<6`G=3zZS+Ps!2)IN=1Z9Y~nr1R#{OFVxLwyh?pSi1`A=$P$m zR$||#&DgYb1$M97g!U~vv32EI?Af>lhj;D8(Y*(7j5_`kM~>j+F^-Wrfiq`L zG{7Cg!Gniz?AR%sKE*kPI6>$wT)#ns$kp4pcq9F#GiG9ln41L~!*Pe}4XmKR>?5|Gs_#zH@*)y=&KZBw|(}XIghPH-jI+6lZAnBVntRpS?BwcqX8Ooe+krV1lz44|fC-JcUr5 zAS)VZkDW zD8khi(eAbcSjKdHQzZF0BaNUc&f8H;R!Ix-M1nu3Z}<^cnG)=S)KEVp26FNT*4MgE|C6bvaQrDPIXrnL%7?+j8* z*n-8NRZ%_yBP%NDooo=$^%JmI*uu4io=1+;V!+z6c@qvDY8S!n3>FL(Y;9r?VkqKx zECwld?Xe|_>uk|sz+zBhHz1$O=doq#?c3KP9Rouax3NWtgT^^noVWR!9NYEa0l^A^ z(@(eG%U5syi@#!sVzBxNSKP)iRt!<>3gq^m+dm-dZ*dK7XR8+nyo*Jh@7~9SiU}K zo;^H|dp8deU~R|MGplj!%u?JsJ0E8cHsbisY8=>5h&`)wv2#Tqte=~TB~zm@v(_IC zBb-n-)D{!U?5MQC*aB0G8f1XFLQ}MjaYJ*t6{c6(Va{l0EUfjw0+J;Yf-$!y6qAYr zF{-~GiqgH174A&WiXEbT><}4XkEl?3wxaA28D)d8Fmr?vY{e%yA}!ScX-URNjMYa> zB*9dKJ|e>m5J3_iB0*??1_C@35$GgOiia{H>@*Q(u8l|&LnK((Aj8fBy&P$La7jb9 zLvN%T^+uvjU!)ieM4B`Yi3T}{)z3t{X-_0u_d=ROA7pvvVqio8iqlI`omYj%%JFEZ zrm>=GIBG`~V_fZERE_G5(h*508Xkv1CE@5h$Pa0~ToIc>?|rN({3CSW7D(^An*xk1 zWT36r1*&SDp{mpw>Po~hn#!=$*M)_)7OXTS^o}|rn(i~*Cl$F-*(lE{LSyl0`W+`@ z$$0ADt6MN3cN~U96d~0q2a%>Z2s0jtaPva^;zU5<5$1-VqyR+r3`cx+48cb-a>{xl zx4buUhGwGspajJ92!?;83tT+SVee=N3kyvc87e|ot1BcLT_DvVc+%(uiTdv_)aoLD zODd81;V#tvosOe@e5|Gn4cJ7Ds$Gc|yAot|CCHM2yet7$=ieaLwKJ3zWT8R;rl{Bj ziVC!?N-(Ca_%CEBLd{$e8g@$1a#e!1w=yIFYA_7bgjqBV`~+6EvBt2ACJ>A@gKMHC zyfdhC%c4%M4|PP@K1k>vhLrxXNbDVph@?>X1i8cA%Mo7Q1RP%UaK(h6BtHku6Kb$} z!Bl#vSK#L9U3hf)5MJLojd%CXu3>qqwC?9MINziKH~PoIL8no5iv zJP0FtWudZn7AEB9Vb-XTVmkfN4Xbhc=svu>ata-{&*A&i>-g*WRs8kjJlD^u9pSP%#qQ)vZX4^MbFxe$E0!P3we<_1R4 zRnsOA)}{Z?kU-f0x*9rAS5_y&R~5pLWnpG1l8o}A&kR-(5mCaTWo=;vAG&4;=j{m( zLuPsk@(1=8i_aG2<)Ua%4#Cx6jIJm_UCnSz99>Q?$54zLS&FgMBT!RQg@%R(f|!}q z$xjpI%o#H=dh{rg(U?7FHs;TphcN_C6%`ejJb5yvw-A7F-W<-Q(>eo_rZl5=LLH`X z61t_!v3&VTx<(s;Su3VZnNHexGp0>y!Hg-bXl|T}nbTVdu9jfUnl;!&TIl-q>j|vp zqJ^%@AjP0Hk3N&jrAwD$8eMl5XL_eL;aa3)md%@kb<3Ax^O{uzSu3z&;Q}nBwiN_Z zO9-ZT-v)xOxdepkS1iXmjt`^zWiVT@U@n%^F)OKDw{-E3`uw@rM8~tN{ns+Kecc-D z*}PGdyEksc{>|I4dHEVDcjCmpL)g7xJN9haj>CJ}aq{qSoH}|Er%#;5nNw$R?(BJ7 zxNs5Y&YdUipCIeVF&sb6fW=wZIZ5apT)%k_SFRCYUA{q3^#pewKF7^_&v5tAD?E7m zmg>)Oa`yH?Ul~->Fv zk!?MQz>C-R3$V9`t-c<0qS~;b($+v9jwVKMrgN>iZfcAm4_A>Ni*sUm+E~JuCv4ag zKv`K3q*)`1KrYJN6~2~ig)$+?vL?79fFh_0<=KbqVl**^uZan~rAF{IHi16@Rxs_4 zbfiJc#Svk=JddR*ftVRW9qBwLYl1dQAt6o#XRg+yP`RRSOdxti`XMvi2Yq9M&@Yh& zj<^t{hf;s$=Ze%2Z)7J%Vn~l<fOc0hpaMnFC06k+v(LI(1 zjMxbDNQ^^%Zhx_8b4h+7h71}kmU)>nc`7L-vxG~WCw_4J6l*tXAd@PSSwt5Y-wVv6I*i5pF1PKi7ip=9{dSV3?K{~Y~f-c zW7i;;3|(ybdPiWz5XQ$coN#U}wp?)x6GMw|!Ck(9yLayjixsa)$Nc2`sq2idSWUr9j?RCZKVWQ{RyyoW5c`@ES(mGd6UA>I>8@PMtNX7!PUq-Ba9qq zh|xogQCDPvmXWTQHo_6phTEfcqzl@r+%U7k6>TH^Fk^TqCgz7=WZyvKC;Ov!gfERP zUWgCyLIOQsiBaAJQ3Uld&H}VzV;qs1;(;FN1nh}sNRE|~B28cwYJkunJp}q|!_SA~ zw-n*!+!fyTT@dUjk4Oh~L|SVj*4hwhj&{g$_d+kXVDxo~K_BN7^sr1vvXrF<(oA|H z$tVr6(gehrB_iG`8S&PsNO9_cEdRb3l#q)N{fbdrT7hxH%1~2A3VTIA)KvFEc||hv zhlU}qgx-^UUu5-nMM_UnRM`1PFCBQ9I3U6y6e(UQ$cgBUq3L-TlUqiRRgKAIV=%F>8pC=PBP*m2fmj9t zEz%KSmPJZcfBfPf8G`VXI3#EHM9(3)=u?u10p(nH?F2owPB7FYz|!eTkkwVV4)rwu1uX5ZR1#om z$U;k91{wres>&ovvQU)o3fZo|p_|O_P*RkInsPU&D0Zdx&a_PsrX~j^EqN$OOrEf14BbYgoj5UEGP_)*7mR`twKsbWkg`5t*Qx4f-1IRnHrkV|E3FN zMHLa?&aOgpGYjbH{YcfoDH_7UA`luH0e2TycyKPD;2`wONW;K>eKBM}Ka>{aVdSt9 z5wJeKrW|7`OHoT;)i7p+kf!m~Xq-@s$<33+ES=W2Hnh$nD4NMD{I_5xb@pwu=ZLkv z8XB9>*ffdYs}1w0^XL5>WIA*9EXnx=E zZ6nB9vxN4OEM*W|wnSLLShlWRjqU5{zE-cqrd2DjW5YV^qXBFG_N_Ryb0-e%*o)08 z))Q>)#QCFVaCrAY?B22q2X-G2vjNW$Jn?M6i|4P9hJO`TuHL|<%hzz~^m!a3xZ)*5 zPM^6zZPy8|?&A7w0<7xT>MsbeUVp}`ci-@)<2wzC-|_Y>$6bBJn>Szaq2o_{e9u;_Z+J_9 z#q#d$XMA|~86QbLydmIv`4%5vci@jt-|*MBKk&b=e-K>#fxkGHQO67X{pCJh-#viS zyBDHud;x~{iAG*RAo_%QAl=&u@lLjgBoGR5h%G@ThhB@CYH|mnS9iXuIXwbg{QeO0UEpL+(nv&5Jy{t z6DTEkdmzfyi2%zKzUBm51Yzz5dT^1DwoCx$V`Pj_Ya4{y*&&PIEQT&VmaSnw0gEkKyn4YE`p#FbUd6K~PelGNcKtE*arz0)&BelTW_-^KRQ!Dz zzW94QxPOO!hby>o^&GBVJc_I558&dNT{wMo9WI~QfEyRq(YrkxXWE--tQm{bdq?5O z))H)5*aNHE60mkoB9_gF#{A|`v^54|+Bi>)A7=Xlum%}nOo1h83#`#l;(#f`Tu?XI z5|fJ^(NtuQ#sUXS9O91pA%O%8At>t+ggk!MqQa0x<3f6PxJdiaogh6u!5^s!K1fOM zKw7dF_3i%1N~d}p0alC@aS=v{3^hVJWLPC2)iMSNX5mP%ibR4F$FSm53@wR8UO@nI^4!rk&l$Z3I-q+ty-PjJ z5R)uLc)SFm@tO#TR)Kp!H`uy#hMCnbFft*L{sI%}udp)yo%#WJI2fzI#Y7)|)~pBx z(zu(5evt%NX}K8LYcK)nU{v-Sj1m2ZprGdf^o+?uv`-5B9TMSVn}lHJO#BiWACK4` zndm-X2>O){!=Q>P6w=F8GHw*gYDbccKtXjrdgf&yGBE`1)B%{97(icF9a23Sv~^S< zQR_xv)fEN=O@``SV5rfVAd5t+vq&evu0wsDE`Wc9 zvx5UYED>S>)6pZ!Xdte|=Cy5T-@P1%_N~Uz0~>H??^^8IvJ4wn&BIcjUf(z#<12=v zC?}g9$QbI--QY;zVWFuBb5&*7Xww5{YKmCutP6AcU|QWctXVV%?OWI4(uuvecj+*m z-#U(`*V_rY*5lyDnOHu3Bu!dh8es z9Wfj=W5x(D<7~_0$2XvPa*Keg1@jgQu$nYsGL|h|jx{USVfCuj)VXgGR;lLZW`d_h z*tTsOcJJOzgTgjpy<)&xLNLY4>#SY3R=^eq!>^>z-o$Yz8#ZF=`V9i4)~{SeP_=^U zn+T{@V-11Uz8yQn{tc^EVGCVr6rVt&U-9<+AL!^H zA;^07<{LT)us#u7eg612K7M3N)^{3ozSAJ|9iKjY$7hmH@98|cE@ypz_4Fm)JbQ(2 z?>+&4{)2!1{2%^!|C+$-F229LfCm@0W8>WMXd73ENkjXiC?x_h4rYk5H5V?z5P}$X z+Xd5m?QUfacY>Q>P8RCsDrN;TSXmSJFjQF*T<9Q&8A93H{R| zkdsRNT230u3$lgA&sO7cRb?1MGOlKXm{2l&Xdz092BV^!H1vt}Vokj^dWU$T2fG3} zFBa#(Vv82nf6gF0cI1f2mBo3n?%ckOdw1{P)~)LTpg16%L5abG7jtIe8Fi;m9T#B4 zz{Mq}GvGv>oRWdt&Yt<;G`TqS|VafVw&APw~B@?W?e*FsX-haRw zj>95wVo3VGgWMUq7?6HK7(*79Y~5mrVha{qtyp-Q!HXe_%Z`6tjC>6)8MfFmcI(bv zT)M(}y>8R9%*XMBj~}_f7^D~izkL2E+87!cu-;I+uwGrej7Jad2p8e)>nCyN`Y~L; zv>!+IFT;_23vmAU5}Y|S11I)0;NX@j9Nb)nLz@e6bXz_SZOX-_MX6XiGZNk=4h;YR z|MW>jK~%FE{7^sA0b_?*Vf=7g)DE>p^9Xo0+sOf7# zr6npdZK%x+!+Q8*2tgFP*!xGv640lhS4jv&jHAj=j#``IF+k0nyF%#hGS50Pma z2#!^PcW^hjdv}GKdl$Hqc)E6__q`L`ZDrtPr%Ly2P70JOg6({e=oyE+n7$}aABYj% z`=cZ+3wfy-$W6~c@04@`%_M{e#lg=b8oq8Z2=h(FFM;%9h9!4LQm=u?8d64pH5Nl_ z8&TQNgvyC^D6boZqUsX#8r%y}$&qk%cZIRB5%hR%IX!LYYpX(6r5p6sWMQl&2NMD( zDM6Nj+V2EhzeCFILINwPo-7P>WeK!o>D+DvTHTKn^ zaH4Cf(zY6bma0-WYVQhp5(Sm6P}C;yGL(UuIRTn2ftsTnbleq1GE!52J(&3!!rWI1 zD}Pg12bse%$Pm_HQrJb9!y()T&S3;q0Zy=Vv4II`tLD~ruqH~iG_!@3sWl>lLeVd) z7fJ{Y#@3W#CNCnnbSg;;mM@)wMGK~4){Kd0tgpt1(!m&%lZ75h@d)?xgR6}#%=8SP ztE>iH1tsVy$U{q37CQ3sB5*FkpC06#9E=}55{u@v&;Ys;rw?rvS$pqYZpXC~n{jyS zT&$Wk28|W{P}D0NISF1!4X_~%z=$+oeMC7LBi!B?u7+x)?V2Jj%$tBt3l{onaJH7h zR8O4%mmo?<9eSFo&`?!?CT&afwV=ylD1n8I6^zWx5E2%SwDj(9uy=x)u{n$lOkiec z0%Lt6=o4766-!Bu*B6u%ncNlR6<|(-g|D|Sft5RggF+A%8jh%_Xr!g2AR#7}UcwXs zR|9hTqO2&NzH2q=3D`$d2R^Ez47D{Cs2g30iQ}pcY8B)FPCYZ~Uv;6kg4AZzXtESk>=M3-RMl2uquu(pDrXl83G zX3=1>cI{dMsts68;IxszYxCwU*t2)904#xjEV1t!?fXiA^^LZ_<0F@}{hlE1J$=qwy7sdNPw?{5 zGkos&gulQ4h5vp1fNvk~;*Sqk@b$%699lOMGirulTE$?}$or!>H3muUj)-!wLA<9M z!d;!=Zf*__&Vl7f$`vV4oHW$W*%3ZY4g#*M4E111AY>}hfvpKCDR$QIu(zf6*b26! zgc$4Tz|24*0E^v&LBd5y&%c!!b-WIU=NxYyF7Tvt9dxzfXK5zfe*PT8WoCjP4r(Xm z%FDnI-XuPTMhK#Nigt8B0`;YF9H&UT3HR8PN&@V0! z{S$)GGu#L1!JLfPmjEk*H29tvK^pwv?n&qq8;&e7lQ0rF)K_Pd^h=LIes(g3^iIa$ zzR4&Vn2BNe*{CcTASRd8R+nLH)lk%sqBFLJXWI@X&?>|9DNUF$Z3ElN@N5FLG z;C`Gqc9_8Ihug576b8-{#W9y0e>tUjlGxt4eVdq&apm$Q0Vllt&W&qV1Z1&Q=;4F= z1Xw?o<6+R^^#~cT-hcREZDJ^T_wI*_?*Rc7Tdp4c3r7q__a8nM8QY&SXb~)NycW9# ze*)J39sJJM`Z?o}ul-)Y7bzD6ZVX)aA5i(=0X`61u{)EkZ5%W9>g6*$``2B_R;{~t zZqj`|7VyRQed7jO>F(m;y=%C1?f|Y`Y{!FJM{(usR$M&27B?@h#l_=uarWR;TsS<5 z#)nC`ba)(2?;VZ<8wXXjsrWhD)gF!KF!U~*8u-`W#nx3~*^o~hEW^|meVx`6e zAT5rby+j{mBzqw}$sK9&4oHbMMMAg{qJpIe4KP5EpC0^uwF$D+;NjMdAnP}HIsJ}E zZv~_W>Yz74*nkLI6vTN_>52aIJP#tM8xk3b!J$zY5*mY2`rNYkR1`$UV_*n9%Ry1d z@{2;Mdn6LwqLJ>Ki0p_Y3{FW#VJ1D>J!9zE4nu~1W7$K5ud7#gk%k*CTk%zQ4`UjiU{?VN3@?ddc<&? zqYu5Cp2!G_MPYI;jOfu9BQkrTC_PoU5G*nOBOzBgdm+;#7>8G81EHFdEf33B$*XLBE0m#H6Od+0z?R>L7IWXz@xLtX`HGD=XDA#jwT@6t^LT8f%5H>8d^AQ%Jt_7VA>=FMot*46WH za^HF!*}<;HshHbPgW8f@6!eHiZZg4MVi0wxo``nm8Bh{98*0K%q6$kbWzxdwfe7#< z;x>aW{f_!t1X3ExbPx1U&)fc+<=52IjQYkY zm_3hRWA#QXU9k?U)@;IxRqHX0plAv~(lTn>ymhx|Q)}Uq9T#T)6#HhLk zf~!fGw`3*eFI|Z-v|ZOQ1ud;}F_o^-JdHqW#vCkNz7~s?tj6>i^Ra%zHf-6po8YYt z6Y1E+bnd$KTd;U3Tg_+?*}ez6_Z-09eTT4;K6lyjHCV7{ITkEjhBa$9Vbl7}*syvX zHj?bwzKcNWAhvDTj8#k6vPGb@iNJ_J?BK55*iEptdG#9X+rAxJ2(YG2nk<&*S+Qif zScYda4O+Xl@4yZkxOQ#Zj(t0KAg9f>)xOC|zfz<=tyiJ-jmACIb6>{;)ErP4FIB@tRjvPOWvlp+4 zwzC(glfQNc*KXebukFsm7kKjGJsv)LN5J(O_ny#T^_=PiRJWhL!5xwZFG#b0^O@l4 z8^P5dc=z!y0<6E#@t%aBs)OBvwEf}3-z0wuu=@DnD?WYv0a@<|s$Rc(kJm40;ClBN z-@g2bzrOvAFLcbyXRq*>*DQSb7XSS9760HBzI?obKRd4AcSVof}O680|trkQL=hVCd#Zz~+uvUUQH@G0w{k zX@P#o2=OBY%L8da-XiTn77a*!<04ViyE{e|=AkNo00w3xAvHVznQ;*)$mRu~`=F>_ zCd!8N#mLeDsI4f%r12GKo=}PUQ3O>LJd5{7iWGL$k0=?0^3p-1Fb=}d!hWbMD@1XA z9)^{apr)z{!%9m@Y05(Pv^12^STJ?+WXzv4hm@jOqFl9XIaV!ShGmNvi*nD-Z8&-S zFhRp<+`oNYthIOY*dg4wdJ%VSUBkK4$HjV=y#LP4t2laSKkCPiMOkqnYDQL~xv2>o zNDm#*OMt=qVD{RW;td5ZgY?%~P9NA!%m#+z5K=sDut zQt$BP^A~!)*y{D1;O;4b-ZN1?qw+C<)ooHJZqs$1(luTaT)lqvnw~{Yc=`?BzJ4c2 zqGys6i4O!?pExFq+8MAq=$f3&^y5dK75M;9=z5<&e#7`VTbYA33%ZL1pQqG$VtlnVs)rh)vOq#& zi1IusO8ZKY-@_1t(+rUlt&4t9dgvV{L1v&Ky8Bz8hrd06ixblQJdhOma~K3w~Z|@Nrjy zkEVQhHCVFgV5$g$bS*9OsSxk)9YB<43R;j*=vj#4uE5C16ayR8;jz z#L$dL3`q(>ZcGq*)AQXU*q^QwfUKAhK%$f*?|}`zz_L(9>^c!jQm1- z6qeYcWVk(sj&ww6l|70_(sq>%3Pze?aJdwD!wd<$NDAq986w5t!N$lNV1(?Rdgzs= zkG|c_>DsOskm-ZoNnYre5rl$X(HNE;hl)Xos2CEDp?TpL(mxQnIRVJ-8;G7+0VE;F z&W^&s+ywmM<{t{5h!}*YWg>0B5DXeogW=;FQQg#nF_Q?g8d@-7)I{_fJPgsvnQ-v( zg`v3>wDk0$tg0m3cbWuPS_D2i3Y{TU=>k&?8JII@sS{wSc7c&v7f3k;z1r_I=y!rX z)pb-lLsOv>)a5%vO+f~ls=R=(3e?q<39P6jNKj+oB4NnVP$$q*?na;2i6HBDsA>>k z>2`&N9*L0*w9UIg(_97`mOp@tGr()vDMQCz1*Y!$Fn5!}%$Xp|$pn_pRs>fLFtc!h zp`J6{zdhZ*10>3h1cQ#SF?4{t6=}K7ypp~b0{sIJ9vXpYBKxSYNQC$Y!{5sf?oMv7 zHnW1MK0%2VgOxU2hoDnVg`kS=TUHgSa+*+*)r6|NF4Pqy1fv`$WsX1(FAVIRNgZ4n zX&-f1H@^idTM0bs#$ZzUFjNifkCI;L$V(@1i3*|zlml}u#R9R;riOHnx-iyKgPxiS zJZRAHqfXgaUjkiCbx5={prft^ZB-TMQYq1-Z4F)&+5|QtyB(lvCTLf=DEU6=k79ca_GzOEk3jg8?;P?Zo9iMXgRc+eo`?dpIK zUk`$-B$N#5M^H*yNOlIslow*+sB%mmUxUWl8nHA-x7bbP~RnvEb{|(ruHE37fG=_9{tMi^^F)+)AG{!Nx`V^V&g1yW zOH`i2-uB}-f9W;>)=iwdcnkL*y~3624{-d{C0xJxNW@eKJg;Gf6^fMhk&iO zZ+MFSJG|i;h3{xUBl!B``~UErK#TXieZ$k|KjH1i+xYzbCjR_(7w;dP#fvM4uzKn^ zWQTbp!PN#a?)C_@H;1dK9vlsI;BG2~kA)drrH1gZFoh36m%F7ITuhDNL?GpCVj|+P z+$}BP>%bY?tzc(p1WS6}tt9%ik7pn{(0R5ZeFNvmawR3h&DaDU^gj5Qm=iEr!H*p`< zmSE}a%U1%t9^ZdJ?N9LV-hBeRYq(8dcK`Mr0-1YwO8Z~&@pNp*o452V(lbU-_3`~D zeC+sy4;>%z4$--9_X9sir#77$j%DHfSeHI4-7#;UJwfNd{H#mogm!>l_T6R zqS66XV;nJRf&*&mZBSKjf$ByJRMZ=xyv_g>b;hU~Z;Gn1W*9lj9F-$2F=DtSozG&6 zVMR_TD{?_up$91)UMMag$@iwZA1a0iW7NnHR8{z4Xo)L|3Y<|`=!SxPPZSsVp{y(z zLx%?A7hBS{Tmk|R5CAek&AuOxxU62t95fgMh7Ct?^=J$qU5oNjbtoEEjjWtJghnU9 z#?>7L)Tw9_V5zC8LPbFq+5|q@id~>B{~PoaJ3*@28OCZ1S6yMCOkkzl2?naa!%&U( zY0$O^i0=v=rLNFck|n^AgEoPcmIe*#B*ydLybbzr2e4MXY}4fPCQW^4)zGjmu_X{>JuJ#AeA z7oKgWLlC0{O(nW#IVGrdQy{RChf-HLD0fpPxKcy6PD)Vdq6QUNEhx*VK|@{@Qf;Dq z7kgx9#9?GnKTIOfnn^G+udxmbnkHg0b&x~*_CQutFgVMzyO{x;j3sa|)`NqIz6i=S z)uqoTz_O4Uz{SoE&NK*ElV)w8Lr|nja3?PhHAMyJ5MW7kv|(mOaA{=@Q!{!ok`ho= zRe|2UdLoHBZGY-C%?u0#V3`>5k~fAhByg1I7{FLcU}kAeLV)LJFEX?HxVylb1}{%1 zTSSwFo!uh^gL5)P2R)87!9Tuz(005jW`5PWUNjy*@Pb;kiLBgk1mfU|;thwFSy z zeSlzV5BBZci+wu?x=4-^7#-Y6Z3JKK1YA2dZpM=N3$bj;ax7dxnm<9-hIJdUV;c=D zbS#6_sS~96A3r7PJo}I9$B&*MxVnG~7p@CPIzwP|;mTc{B{;fDKy>ZaW06vU(W0*!_~cquc`i;;OQZb(Rl}toW+jahlO0YeCI#FIzzC=fW^?ouy*g^D{(&O!+P-e zH9^%UJbd~d_a42)qh}xR^38VwtH1G%fa=2+;QeR(zrpJN76Pny1W+CCJ_@j6$YO~4 zOu+RMvYtJCg~yLy&;a%ke{uZP{~*fme|{yndO{#~5#PVu!M9h}@a543yuW!0dlpPb zA%Q`#tqELBbYZKf4l5lsI7{_JT$Hnc1TMw|S@irfV7YNj6#*8{9yF(S(URVCCjusC zQ!_Xin!ujk0ar6i_}DtY+s2O0vw|DFTbzv4!O##cQX>K=Q+OE>v>BTr+Q}86Hl%P! zX`A}TFb5}uaXvT`Q&_2Mz*%1pA@+7c+>MRkZX^|e#oG*5T<4Syah@K?3=2h{s3>H| z#-M*fJo-dOp?gR$dWMBza9S!V2K2{h2AYB)q($cuT;*YOSqTAFF};6%kk>bhfT@5$ zs$5LeX&N^YlWIqb_pyG|@c&?nOOc0ecmc`_b1|q-I{Ku=ipAG^rzapeE}DSK6S0vI zh=~Y?3*9HL(BMVj=V)sqa^3N4VSjHgQanSEmXw5^8R_Vo)dPe2^~LCs<(M(0iIj-x zShsSiC>PIb!=7!M1xy`3v>)eB9mBa3M{t_0TbIuhNG(ERT^%;A-9T-p2*QpC&|+D) zaxK;@UyV&`=^Y{1y3G07E?mKj$Io&5>Mc6|I&NLLApq*tv*&n1V0HcS72LdX6?bmj z#0@GhvIXqoB>`OwRUhAV;Pd+rqI~!I4LaU(;!kS(_(eb#*STb{>L8%vlJ~uQ{z}OA zuYU;Gx^?3gUJ=xN`t%7;pFYLat5?O0#&_@D;pNMhqJHZZ-Q$fL0<5lFxgw7F0a@2@ z|L#>hxO*OtAD+RRR~PAC&fwAgy|{O4hXAcR*SF%*nUy$qd?Bu!oloC)9`0Oe#kI4O zaPDv|+P9Qq&B7j-(-MR!4UVWAZG@`f+89=>fs!Fw7@Vz*zNwnX&5)v3oCHaMsz~%# zLxQh1i4o#`OsTX$N`NggLS2vj{k7+n&9s)A6I<@#etmOFC0J7ZwB zHwp#^p|qI5tSk(bWg(~>=8uuXy)e4U19juQP&>gH6DPT#sl^42)9p|{-3AS<)~KIl zi3zjJF|pMOjWcY}*kX%@W;--AIiQ|kZ0u+|jID9Nm>Oq{s&YfkNDow1dSPUxH>yVZ zqO#HlBS+HbjP}OZT5nX3a>eiw4j5YIfMLVjP*LfNs_Hv`#x|C)c6EYhXaGVI zViA#&gd`etvjz@8&X6JKKO`T$`sLD(nF4SBAXwTuLSJeOU5Oqv2(VNY2(Ac_bd_YF ztI$aRmLUNU$6-m;39tyH^l95rt&51yGS!iTiH-uniy{m)6bZJJVW6o734xWinwqe* zFl1>kU=cj15nyT1F&h5@ma=jesL(alHD#f$BMW710xBH_ERM_S3Kgjgl#K~)OckJF zszjir05yUvEh`1+TB|@}r2$SQCI-+k%G)5HRXgd|#;&`?n(IMjvI zkYJd;gB}esZk}!kjR-}L%nTG1<-?yeWN(72r1)5ZNGU8xqqniLC8#nZ$dU@c;+QKt z8yn#sxKOOy)b4(F{ad2iG`VY`J1VY<1nL{G}_4% zg=RZt@?^|vZNrkKE3tONR&3a^3v1SI7htt$*?Iw0T+Ugz3bPli#8LvN^;`B}GXc&P zlFi%Ov3BDwtlqFgz)%x`)l>qj1xwcns}w`j>_w|EZ|Pc0X8^4rqu*;>o@KGuiU-w81@}FiOt*gV;y}iThJItX3txW z6|1%oTph>31IKZwonuLk;4qaZkDQ_bW-R(F|TKCBVheeL6g9Ki? zx9p$+XCW37T=9B{+X=jO5r{Eh?b)?gz|^@j=W+Vv839;ao+q$6d+MxMBk^FIQz{=Y(FldxdDC2)H8_5(h= z{~|0{3|I_WY_;Mx2CXLqSPWVn1Y@5+eG~gWeR+?se?G(OcbDpvlG50)F;R2qC31#LZ2tPv|I>iiD&LSpHVl2(~6bBCtyK@wCpH?rVxmykyC;VB&qhvK5_-f%Au}-+sWIW`-!mQKDu)RxR@KlU0-h>M z2*NnmTzR2bRGn8);PfaZgZiLfW|Dv{hOe?AIT+9@4SCr;ME<&%hzO(*=Z(0?Pz)K6gE3Vj&@#CJbz`bUx)z44)yo!< z^0Ae`X(LV?ZWk6U2ChwOS7P0&l{i2O#<`Pcapw4G+E2>Gp<~#xelwOYT86bN)(F5l zdE|ruth+bv(ED~+q$Rm|^*YHlJbCa?z|{o;sw)>Riu$c<*KzmeEj+q+UjW#f7cT{5 zy?^r-pE};-1HlzrqZpVNoEWSaqJ9Dwx3hJNh1;J#W>@1AJbLh$V2GEHVyo3VeE$3y zj~+exkuu`(W8AxUPgt*57_ivF#c=ggI8DRNYv*w5`bj*vcapAs4sTwb# zriJoi9TW}HL2gfN>b-epomrRYwbH z7#WGW5n&ip7L4lR0E`&ogVJ0N6y>_1G~Wy5L;X=*8ALD?g7KpQQ9GK>ALD`g@ouEV zlT3ERlolr%6Pz$@jxCz!SYyh3D@= zoal%N>GG)(ry1p2I+ads%LvL>i8kL-Y_|#-1rllZ;UNFDF05~{0!_<@p zZyI#E>M(^=*vLSKpu)xB`6swL&;bXN+z;U5$;2IXj`g4+e{PM1S`76<^+ol zFwl1=aB_pXvOR&48I-!|Lq%Q-s>-TRB8XB_k|Vg|I4}iKm*>1)GM%BoBHKme?vn4) z2?`=3y$pd>H-aoWN1pCMRvC&iY{^oBVi!5c{niQU-DF{`qapHBdD@r~;k#o{&t&Q> zdx^M;{HzT0PfI{XL?FWGe{rY($I(bagMkDVx|+~amWPq1I$c8xQY|g$5SVHxDO0Hd zb7K=Y+1bO2Iy$LTA0{S7BHl`BB89Vu3xXm-5Ec`G#o)1pfQ~I$>>}K= z|G20hIDAHwvkBTZZQhGRhfd=NwH?@h0*463j#K>zl?V49BMqG3g#hgEzQZ`yegtQZ zoyMudCvb4r0qotemj;t{SiO?Kj)dWgU4sl-JGbw`iDM@PP%&7s)r#|3T{w3UXV09Y z@)C8Iw(~2dMjuN;rq;Wcir!PO^*{e@@@#c#VwruhC+Yf)@!xx4uhOPgPeEJjb zh_={z#a1f@C$9elt#4od67BChIEBN<|14V1p1i{ImrwEH?QMMh;~~C&pz_6ae132V zf84)}dxv&m!T2haWTl{QLO9YR`~_qMb8aa|8-f`pM6q(=?jit-Em)jTlmW}pND4bc z1K1iE2*7ePw}PF%5w)8NzzTJ97c&n%t!bZ`*^h;x4W;m6OO>6Y0ID!s2LuzCu_Y_n z+ZVBJ9t2Ld0SeFh6#tzz8W5aH}3;EJtRTxY0aVJjAw3{^RCu>!EzipAEf z{)q`l4+#>BN2iB}pa;9_Nzq9N2}DkJQgpJjFuZUOMh!1PVQydK_sc?F-|iTa(-TDl zdW&tYm*(Z5qBu`1w*I3&2op&mnlr5lt&=B+^bIBXgM_=Td}t{~jjTj6-3Lp*KE2^X zFleOD3%eT#;PR&LBh}+PdzJ{H`*C%!g@d&j`t{BffHkOJA2c`AiDj@@*s?XZwFSox z?jw*oDlA&;N^Iwa$@lERo*lb{_39$05jlHN2!qzv4O@h;W%$s(gTl&n{?s`vTewuX z4e#H+D*)=!x$~s#9Kg9#rv+58ux0DPox8#<`0Nou5go^F!WU1T5?sB*mw(-WuU@DY~-n}@6j^_vQ=IKs6f4GJ~ zYat%pY{P@AEx3BJ7Dsj#V*8rzShY9|vu8S^vEGQ_Lk~404NzXHkNg3;C?0Hz0eub8 zBUJ+#$r?zD*Fbu#4$`6}$cQmO&jb_nNwG%1G<)>Ra6nF`Bl`5TN1t9c=-ba0eFs{j z*FZD$9B7Q51Ek2z)<^eVx=1H@OUu+jY7cEBXJ{fZRUOH6{gh-)`W!7}rRWma8Dc

FhO2-bChJ;qm-nuj}?aJ*<*Z#A0~|r#q^2Mn9&$ZV@Mb&3Vx^==85W|?if9s z;HAO`)g#9U;W-hS6^aZAv zzStBqmKkH_3M0%~VS={hW|&2=HH-GQ&b318Y+KBn<$!4|_Gp}BhY1sGP&?j^z{&x& z6WJo>K|tk>+6E^YUz|x6~Aao)F3g^g@v6t zoV}dj?&l8gARqVz`x8|8!Jov}&j*f94zMt@fRs8Oc8eKk>j)^)ROm(kB}XtNOOVwS z+KRtJQ|?y+j9;Ls_#3oTIzd~RW3IXoP{|Nv$&n}!R4LK6GW69|3AQw$qe_6KqC$YB z0c~}4VSQ0ml82g#BGlEDpvHNy2(A|fN>>I-5?Lte%R$+I zz{XG&%2G8b8>>LkxEmBr2+Yi6ppm~Wk)E;Swdb` zN~JErm8J;rR#4yodj%*`DJLfjS(&Z`RbA*f2Cc5>)`cLeGeK5Yx)<4QP?e+mmM0+N z)CMvN==^IZ$ad-iO}d7Gx(bZ7)nKWo343D+{7B1=@FqilUA|@*+C;(25PWTB}Y{Al(7|?G+|F=XR zp02L+vLuP6Z(_oO5gz1+?4D_;99lqdRg9`(g_t;|0xgYWggasagBA^XP4#0idC~;5 zv^0wZp37IQ#=1>gv3kQ+0;P=vR$H)s+dgdCc>rs-?!kr~?bvtpEDoGFk1cx+VdKtr z>^Xc2dykwE``PNWeEl|T*uI~(_hQ9{?F36Z1c1$7_QR63nt+Vmfs0pd5&*`>GH9_i zOPoi?vioov0oQ_M>#>}GiQSTHrP{Q0p8%{4TlNar;<&Iw1Rre8Vh}rV<|_6dJcW6S zR*5*Qqesu<#PLfw*nSEJNRAvjiz5fm;243{DFT?2^j(e+Y#rHm1n22Hog+E8`=GFJ zZCJYrE0(RorVX31hG2`~YB#$P_v{z$LC#~vkadOz5wT_!woaOL`a z;R<966@$}Hh+?pMPO$Tmz=zA9pvBg#Yd0SWci_#6vxhXic*=~@p6;+}AjJHZy03|JiK&M?*S>F@u)eEfs7d>Y8!e-ZNb?FYPm z)qyu}KH$R#-X^G`ZQl3!^Y{PS7`8e-yhF#AC-~#fXZZZ?Hva5*fWO|{$G7`e@a5)3 zT-mb)(?$(Puhc4WqUKs@GxtvIST^zYTL{wqVjkQi>WI zuy*ANQD@i^aaK)@!m4FXkjik?JF^E;3I5n+7#ipgFIOj+lVX>V9EVBuwaCgy5iUc9 zs!^50QByGt6UNpEu;RF^Z5!8;tihh`n+dMAlj8BiO2t+whAfV^Vv82XU$M1{+t`A2 zo!$esZZUKnJ9v~pjNX+KCvp1t37kE73fC@O78Wgb3o>+ZouP_dg!gXU7U0G8j#sbo z@ofiz{Br?MKdn?x9z7N1n^$i|JHr(p!@~Pm?sEP(x{u5BS?u~_aAJ2LL)FEL7yq+V zUAn|d(NzIg9FxWHb^G>hkh8t%m;M9QeKa*ooRsFUZxn5ZG(bbdlU?CK*1mf3>oZz`~rIn zDYnJnq1G5U)Eonbnjp8-7`cUp=%25ToWXQHN#8s@^ckp!UbzzV&e1{dY#sFNql=tA z5)8~XAhlg(`qP)Noqstx8RO^K)6a6t`QZQyr4kQ?7m!AVF zhgp-@5Ue{3E7jOh4j4Du4h?lK1n%yb*5rz5lblHGFnx+GX0%vi#&lE6nr((T3(e8C z$P}%M2&$Hntdyc{wEk%NmJ+`L@i;q4ArH)l9F+QFVQ5qs(Y%uP*6LotA%wjLyEnviN~6Cm--I07rx zZa;EismnrJr886su2kfHff@srY8U9J%Rqh_ljzp0zmvmMtVE?l3U&ClK+2s;WEW?4Dy9N{lBgOUI>l}M|eUs0tl3%;$sj&z~xMxqJx72+(;w0vY_8a#{jBy zJq~iWB2Ap*G18L~(6?uI#7Bi8fzBxykb}BW6{xK)$Jmi&^nz4ja(y+%*OZZlQHXku z{b-&*8bK50%$+52N3B}B4lCDh#OjRXSi=aa zTIVkp$Lt{3Vapb~3Ky^3i0uRyY^h?n;&SJnBLslEg{zWdyco8QpT2@!1Xyzxti;K) z*KqXYB>`Ytb{xc!qnB{%6m|3jS;vp_Y`&{Fe)Iy49zIWybP1!9B0Om z#lXez$biMiv9<2d(Q_jHis6do?t@pjdH0!!^Wr6R9zE+2)+@GZy?pysSg`)zP{r|C zKZGq1cIcOgR+y9C)i$neF-u?vwUEieFmW1@u>E0e%0JD&4ir6M~yUI13#ltlDNNkDHB zb`SDold%c%SZvMW#o8Ojj}dwC7`WIac=5~$5d^<-$$SDO2Ca>B?N!*k zZWFyH?ILL|TdCNh#Sq2Nb?qWS%LR_Fy73=q@&4W0cZnIeCyyQzK*i zxCz;P$dJV`S$qt;4t-3@f9^E^O2X{~6 z;k}~-euoLJj^pi%Gx+r8B7xO8e0h5se|$KBKR&kO)9a0R^=J`Z+-t-At4+9ksuHIT z52C)N2X<_Yz^dgQSTN54Ez_(}KhYdj)dX5&Z85CE0s{;6kejcI+&paz%99{(fCNJZ z>7#J43CfDBFnp*jhL_o+e7FNDDjZQ!<%secJCxN}qjZ!7N=BKXxY`7T6$DBn43R(F z0QuzvP$LX5c(@_*h8ZBQR1bN@B!zVC0zDKK7@#!Y5T!#5P(mP9NOooH28% zBU-20p^c!YZMqd&rL zyz;M5pld5@bb_jGSE%XHK0SgbozB9Fr6`euih(jz39wWQ)rA#H$yf=BCh|}+q3fE_ zXIbh)mo!IRQ+w!{coKvJLt+pDO-+9&E4x8X-WsyqOdu;GLAP!iP>?4`Q&cC|QX{A$ z&=R5o1qLV?CCGJEM7PfJko}$D>vuWGbygtIQi6hvBEgj$l)7~zaOCgYjiBgvXe<6I zV$#fYlwl{;g|D3rQo}>hH!U9NqRpsP+$ zOhBqdV5LJ)r6R9Hq6|$HO=zjGx z2v3YgLiZG8^y-eR>|SstsJ13;+tt~ffZ9p~xwD&)U4+gwn0UFlAu=?Ww9!;y#meoQ zg~6mX7w7d8QeHF&)#U}K8j(*@fVy$zq)AM~?Ag;ulW4)*1#<{kR$$GB4Op{j3qjEi zY~6PVdybyM-eYI5_xM=>QM(SG!hWhBJ%0^nuiwR~E4Q)d*ja2MDA|4F6!siBja6F+ zfYxrt+UarZ&2*sw!@6}tfspSmPmf^5BF;M%_LsIXYE1&gg@Y}sN<7lRiA7X#Lw z0|ayg68i}#w$L$4R&K&lI-lK^*P_M~+>_ z@e|i@;w0Vgxm%>Q-o@E7H*oamMFK4XB7(0I1YAex*bAqqQzt;%zxyx_?mL3b8@6Np zn$0+R=mga{?+?!uJc6y8cZ%}DIT}PbkJYt11Xv7N*9CChzWIPUX`Y?*4EG+t5rD=xy1hA9r_XSX58UNKa$i;&wnE{o6OHny1E zefaXBpAM*YK=O5zbN zhwpc;760SvNJNFV@%*e%7urGjQ}i9>NkGQBJ?5{;~1<+f~*L8CsDGS z(9euMm&y=(2L#&M30GklDK6{=WO!nW)lb(Tm)y<}l@S~y>OX;ttyt_P(0iL08i?+3QRu-jk@QXvpm&*_^e$>L+SY*<5|y9TS5trUR8Za;>r3#ZQucOb(QTd)|kIH;X1TU_T@ zEe?cVyJ9un-+JubxeM)k_G10&HCVfH72O+w+R0P6c=nvIVzFh5tz0LL9Q_x@&WTxu z5AHpr_w5#L-MB643{YG$MExvz9}C0R)yvmN3A%(E*KZ1_I&k0s_Uze%Lx&Ct*kTJ7 zLlu`S3|nmBV(S&hX>mZj$WBks@yn;T2;wf{>7(;__UIG=*Kxdhat3dopGU`w3;6#2 zD*pO-naUF+`$@Lp^V`*U|6&oI-JOP8muhh7Og;f@lj*2Gvd0sGekrsz!5E)Ec3Di~-6k76jkV;WP}b% z%OxlqF2Qh;isAb79O%>YK=onT7)AFnalAPuH(Fy7U320@BQ!ReV$x&_Olr13<0K1A zr0u$TW7JJFrMd-ympPiLoH50QfXW_oX4ql&3>zvfsWd}tixFndFu<%fL$uA4qII4D zX3o>c%msRwxkMK;mTF_>G9AobL6Eh=7;_ezVAeciOm8zrGo4%CWJoZogR)9h6qT#e zchE#xl?25jNjSEx)&MmVrKp^sOTejuqLB);PX(n_+9<20?>bT+!>aV~i;liFG_=*A ztEUB1(&)@cj7gg_F`_zkBBoL!NG19rv$s@21HF#680-zS2#(a{2%HpX5avwb@{mXf zvIux|^<T2N8aq5IN?qMQcgWz`@j!)`zo0x4z4{HB1ezseD4F<_|@ z^bnZI%0of6Gc=VuK}YphXes^zbNZe9os1Cc?@C}5jI@Xd#0K~y*wYz4E>>`{Gk~qR zE^JKoVPj$_5*}L8fM7_!ySA#baHVQ0s}Nji5Rhs?OO@S&68a5w={M97?mekg3QHSH z7+aXY#M&GVo-T0kBJp;Gv!4gt0=<#aD+2|?OOVnd4Iz=C2#XAZIYG6A|7!yi*jhWl z-PIFLG;mncAmi@rf=Je~qr#D%6o=k)US?_n`u3m>y-yDe%Faaoz${c%bnn}6oKr!5B?|_u=X7x za3eTk_+q$XS7h75l_LI%(%&$MLgQv32iZ0<)6@Cp??)I!>Irj&m38;_}r;xOnLvPM*Aq69ivp2&&GWzJ}8$ zuaI0K;JQpebsBrx^d?T9zJ`6$1mRfCr@5>;Q5=61Xy1Puo$v_$p3fy z%?E}hhAJ-q|MC;GehOQ%{4^QrUSs_p3oi`K7Al6P^q@c_czX#e7pG)xS39OnhuVpIQnkR`x$mdR2!f|X-+xQFOZUT{5W>)+67(#qP@KxCr+HeIRZC!8?x1k-G)D7 zueg5yJ|{kXBrI6Z9^Vy!_58^ds$al!PC@ec3<1_Tym@i~A6{L>AMbDC&kvXJ_1$rN z|Im)VKJBFUU<3a6v>G2?&%@)p<8kd`5rJrT9NHU!om+jedZhyv%(ujx+18jg!wU72 z%`m#&2$h_sV1fZgQaNHYfmNj@MpWydW{e@mj5EgAaV8jFXO4zRwrHB_fW~Pg(;d*z zYLA9CJ4~Eyhx#^ajGt7?!(@_X zYM*L?sZ-3*GSwWd)2%RPrVZxKw8C75tLY|aBS31KZiLnuQp{{M#Ee-6Xlc_Y(ZjUa zx|lp$9SyUT(A1`kmif9g9uSPpkz(p}efn$zOq^suAWGk-hM;q}ItG`hkf>rvnHC1I z5EK>?WR;HAA(+%aeuW~n%VAKd0)`AzM?UQzQmRFwiC=ULwV|b}4qb^Bq(=HMlIlaM zFM$ztJjRBGRM&@|jt)VQ4h(d(V5Fx(prl4^3eeV;C7_arzElBvhH{V?$wA*l9)>0g z1T6$ohTWi}$3h@xKp>{C1YKP@k-=Suz)VX+9vTE`+FDA`)KC+H?p+D4 zIzgGhih)W&WMyFyhe3pM(Dbz{1g zu@ck>u+&Udp=PE|aHR!pb3N#on?Pb}3q2Dz=o$GDU0xV?#S>3uSqwDYT=<;i~{{Tx(Q5|YZ%1~F5 zg}!E280r2FGriy7X{&~WAO~bb`Xh_9&h*Gggp+pacSK z=>gww1hOYYUwO8%(AmV!d@C|+(J{UvCkHW-RQ}Hzq z#+|?Ff^YhL4Obrc?bvd6clCgqiwCDIIdy{*Pbl3wbtbBYVdRj(=-0QG>LwgE=!gFT zYtGd1Y?;i$y4Caf<5|I0lNTZ)b|NY!1kthK*tI*3XyJjH2j_Gih|*7;!Qt}+(Y4#S zc$Wv{`#CsIXq~;8foqw0>hT5QN~YJd9;uM^PF%T#gGAN=;_4{Db?F{kquB+xKqLu- z1Xt1xC^$NOiGU!g1V=L8n8^}U$@5Y`f9WP$(6_VxizfkA<7;F)0x!vW$ufbMZ2w+% zk$Ublp>*qBzKZL&h$zWmJuJn20_*mjJlwgHi@S__cXN3V&d0T@_i%;a%jN4&9>0L( zq%@p7c2;5P?yU#7Bal3G5yy|5R@1EexedU|W6L?WSVd`Z1rN%VC@Yh~@4t%7pS@9_ zs+WO0Rkz&lYAUy^5mY^S+EAM%A=xe|T$fChbP-C;nbexeW65O6&=gXaCG%43-gt$f zWVIxVm6KnNV!2(;6S?7Yjl!0MAWINc_e!!?gx>!ZvivRT7|;KI;#s{6`@ro8tfa8` z8Q~`NFH#dzTZ@df)e+^;kRF6lzt6&0t(%93S=(+(|P3U}fXaw;A~G;yhkf???U91iY+`Lv2+s z9v5#!Ue+99@fZG`dh;`O#-a4qJjQrnPhv~N?(#rTxGTH^9pDvUhfM*t@bb6C#vL|T zyV(jGwpwG0w=H(~+GB^01AP3PurtsV{=sh8NyO|7Zvp=(5BSHngnwKshC6(toUkLz z4qJkC*c_e?7MB)WgeHN8srRCkF%uJM;Wt4_{)&&(|73 z1W{PHJwn5Yop2k3N82GZQU|{v3wQ^Z!6(pyVFjNcYxsxg5EN{OkPruihBzW3)CrLi zoR8%4@E|?mC86W<1QwAMNMr>t{D~~@P*ZFTYL0Ee2K-DG2#Vx$p*F;oHMS9ooA^Df z-C&I+D+#HECYUzc02604$M_ir7(dGd6X%*^$|4I)U1G+m0Vd4*0^?_Yj`1^_V8Tp8 zOqyki$urF{eU2qQv34*gFiZ&|;>q3$_7ZwaIM{07z!sk^TYOq;3s@6iT5A(%wT7^< zF;H+a)BGEjy3b+lKwvq10SgDtJADDYOEcKI8bI%C09!|?10uMHFe@8k%KCGdS$+;v zi!WeeP6)|x3+7+K*z^k+8h=LMdgV!IvhVpqcq6 zFtqxFxcUU9wtt7I-9J>A*!_#R`X{IVZW!g=sVPxKz&ROl-WZn7X3)4;!rEC6t+Ok% zj-If#>i{cVCs=4Y!PvYtLFYoy*`kSo6~1U@OmG>&fS55dHdA59`DV=w(WFUpeEE4( z6<>VT1fTx%Gkp5@Px1LbKUY@`8c5e&Gk$KfCa~w{ZsGJ9y0tOD*PS%@wv#;u^k|2n zeS2eIuRiG6u00R<&cu}!932dZ978zTS`kZDu+dm>s%aSC%ES~V28OUUmvOkP6s}AS z%!oNNSQ=}HFng|V3yp^0VhdeouY*V1R%qS1J>1*1Li-+_@ypoZ7&&-JxAaI61hefkurCOkr(hjkc}Zpx@Wu5Ldro#4mEsrLPpQh7RnH zp+9|xF+&Dm>4F(pzGNnrFQ17mTUH_<&;`rq& zICkYaPOuevA>$#gXXoR}gIru9sIFx-%-oSwOITLDSXB#@G9lynbDwo0;C0wU=WY`ij2){)w;TMr&7WXZfBOlr3T zY7(;S=KVaxC8ev^%XThY&rl$fZC}2{R`dfILy8c&m5m#MsmvliE>Kyb`ZKHz6Ic=gEEM4wl;9lX;l`>-OiTK;?&j-Egc+Hw6L>h*um4m9&H6zPEP3PjPb1T^KVdxP5msv!&c)0LV=;Wd z_gKt^)7r%guw>RuES@<78nyfMB9$9?Z{v zq`CtoTP1Z?vRt4gR~5?Oz5+2BxG$D#MM1i zmS>>4{62w|iP|R*@wz4lf4t7af8IU7`{x%>SCNd$(kNmq5alJ?cwAUZ*v=(@hY+4U zaP~w;oH*>s@2C}_)ds04Z4tk_6~bd(5fJ5wol*Aii?D|`A+p6+gY7$sogg~|1luDZ zs3H6%h?c-GM+8Q?AUMViA@LpvNoa|%-R=lYbV6X9E&QT&@QJd4ca#=82$pRTT5JpB ze7Fw25qw^{-a_^8549zn91#`mg79z$!b*!^qA5H=hiIZID%u8-(ON`Hy;Q80AT&o% zge5|`OuG33BQ)F>Yi>i2h)6p&;2A_&L>PnHjttQwj3M2F62T#S4e{hJxMJ)K*TOf< z0^5U)u_MR?eqmM!o!kEvU_8CS@{2ar^eSs0g-^83Mnr}owDyM3 z+Z)5)(Uh=a5MGXsmIRkMZ1jYc&WKa4XJ-H%0idyO0!urB%DD;j9$e-@WVsVpt(wEJ zjWL|sn8KxvIow)Xz{%YNdM5)|6IxcbO<|$?5~kWOU~2s(%&eQhOxqNumQ7%6L2#MN z;GzG9iRGs-vHBNGt^NrkOG3-yZ)k4u37T7e0s{?Ur6sKNpTNTYZ$#5SVCD8tXc$&5 z|Ae{Y-x>el@_%vJ=UnzB=lR|)&0*zAoN#}1ZdTB{*};Y_Oe;HgSm`~8Mi1i81xDt2 z7!ohdOia+!xEY!mHzoQEU}R>fZj@|f(hSWFzC_cepX19WpW@3eKgAbc{2QNr`VV~i zufO4QPMb9$91RU%Zo>DoXaZ-uFVMbK6ZGt8gzvj+F}RPLy2HJUXEU%zUvzHisk#H5 z_}$w(G$(e6Q(Ftz5Lj9bL8!5UmAN_0jZL{tb6A>La{Hz*HMD?*sWnWE6vRwu02Hydq?kRpR!e5?sj?d}ZSrk#(n_RH5sJLf0eJrFfP=Id$Qt z0+c{WGE$OllE+e)C57&ixw?~8ppYb3x^}nWb&~zMawkjGfE_z`6-NlLQ_yQ$*EeoIi6FXSjd&h#9#8@#4Ab$jrz^ zUUngYRmcPAQxxQta$13^r}cRJ_!%mz{#Q}EWU&NOf-31Il#pB22)gQD{iYBmz>=#0 zWmpI4224M6S{3&WHcg6P*(TqHSzd@zsH?JWBj@TVq(u2)!@^0JKD-}>^lpn@t!&|}F@$;Z zFBnZ=&EK%90X1xc)p-zXEgj6dcs)fCS3e`<0;p~i#jCQh2r$+}uOp4oO za8W^3W3hW{3k^Ej+N&Ga9Q?Y=*-FcJ8WBb}wSUz`#%3iHox&U6Q zR$%+OweZ=v34WWs2(0ah*truiK|zT058%8%f_HdhSAr|Slt4?eRuAspQCThtc`p0@J!Ia!rN*}sR0*DBq;|oT zfJ&|!6imrT?~=Kak=*a!zOL4}ap^oRoH+%#-Bd&OV5S=9sxY_UzjauW$&%uBfz~PF zMzAHwk{YZ_e4W%_MTLbUGBgZ(6L;g(vEypU20_$;)cpk9A%!i;T1oeyWU?eKpTDRO zCBvmiSD=hRCs*@cJby`LtK{n5{V8cGWF5(9sqyM$sEB*_agSJ&Vs?R*U`lW$-GhRw z#)5VU!BxYu5;e+uO-&`9Rm+I-1*opbMHMksU6HBcS;a%tJ;}xE>H_@nsuX{|&d0|W zck$uHMSOU946kaE2&^C!JlcS~oW;nzKbfDYKhB-(j7z6G;Nq!vICHEuP914WShmOh z)YeEya#L5$h3&FONE`u00Qm-Mi3}|7g zm*Rn#WG>rdM{wy86i>`B{C5#Daa#C9Yv3DWjeuAk0%I7&S1|Dv8Do#QT~3IKvO{Qy zhGE5dEn>M|bc_a3F;<9;*CHlfgNSHzM8;_ln`n#J-2@cj6&_=Qs5m_lX3NIAt^s9{ zT4IY4sv)qr&0q~bg9bsNe1C#WV6~Irlc`_01#xAJZ2?B`2{Ok{uIsZ?gRMKPuxYau zR<1U~;$^0oOI*#EXM)MIh$;eW*u*a}e2O7P%`nH<*_IeTm(ZGPgwZpbV(7$AF=+HZ z@zc+$aG6+tMtpq=bKU2#(0|7GH`n_YOlFA zZT|)fhrhwf>F?0G{v9^%|A4OLKVa+mF9PqM1k~RNs!xcizj6Lw(7JyLYqu{r{Twze zh(OurmL{-oX~q50LE~Tx4O^;KI(t}XbwnTWVr@j|HG{Dw_sNP7)HZ{;4fmB(Gs`bv zZ2Bn-js67#Bk9Kc7n&RXli>Or!SxB6Hv1clj6a1=YYrznEgbd6=+xE{y}FvAV~bDG zvrTgh@7EGP_iB#;-MizP_T6{@_kbIL<>F)xXIEp`Imy7Frm&Q{D+{7Z1`Z|oj15g- zW?})2r8SHMSVmIRZUbWjOF~Nzt(60VClkW4Wh-ve-39he_HZM#I``;`?tOZqYu_ID zamYZ*d^%>$pN)}YN233a{qbGDZ_ur4SF~=`8cq&waI|+Jw%Wjht#c=PM|AJp89#mh z9lCaC$Is)5ZXKm2t2<^-9)}Gp7h}!x`B=4LF4nH&!Cf*~L0b?OxsxrXV8rc-#lFM) zk$&33D4yU!^X%ORID6+o1F#z%JmDY}8#MLAw1O z5DFPN#p?Mha@!dKOA6>uOD5^sU0f%IWLez41GpeJQZ6XR!Badr+;3PeS2y0~w(b%@ z4+|c1y9LCS%$IXMUjZwZ^Myo64l#1=<~=;jCcxM-zH#d=?%vD9#S7PvviBf?bp_Yu z3ck}%i7UxksVp3^DBXwGZe|f%kNDam zJmB9Ur=SFdWfclm>c-IJQgC01ikf;N>ouzDU;SSpnW%=0)c-rGYGsVAdQ{iFP$Bab z)wPT|K39)d@896<+ZT9OUyI+K)!_Gvr+8mfgcn&4P;u)Tif&xMgA2!SFg^r3R?Swx z`mu}LpxGK8dUJKrh@%Y~ENn1ojGM#Gip%s`IA|@QGdF>=&IX(HaeBn zYUMywIXR)7lLK1GGII;Gw6a168(VbL+QQw)6z#2bYPbf$Rxh^}=+(jl-Cf;?tcI%x z1yzD5ft7R-3alE7*yVW%sl$>Ggvs-g$r5}CvgVH-h2@hcVdb=`SUqz(*3FxZ#cUkS zlKZ7j7>9+^Ct>OAX)1dqBe}1T`}!?fL|{pFYMYw+Zt_y73ib9SsJ3vqWVQSd9T<$H zm^dUw$6yz6l1^kDNKRHa8y8GThDvI!&YTde5J|+8082vZvIJg@U59sWTqCrutL&9@ z4a#Wk0;m&*(;+X%q$Rf<{aXZ&96KrgF39uxCCAY$nES6k3 zC?Suf+fb&`O(?@R$S@9P2p=hI-xHspx(B5WOJF6LE6G|(_DV8WjkvmS=B#=w^Ea+s zQ@5&$jgC`yRFJMf$y^DtjvP9wmdW#S#|0U;PL@e_OS%)KCMzd92O0Mpt{xO*sWGl{ za#WV8rbg~g@Z`S(kV;movC#dAx*2vcs-y<%G1o88!qc(`sAN=^5nH9%cwX@cf4+Uf zD8l=?+x-7Ok2m#)QT;duMfp1jxs`bEU^edF8H;O|`r-1~F1U8K11_IxgA2z!asEVG zLaRL+bFGn*;!b2aA)083O0-wd3X0SrID&YIvPDFcWQTMF3#Z|X2s@(45iz@55VzX{ zyY_h^A+;?M_qW2X6mBO;kMKk-LKBFZ1ZzTzsM%!&|2Rto?vkQ%JwoFdL|1sM4dG;u zgxyX=mK~xZtQi`_5m)iVSsY&%9c#fxxej~w>Jgh@$@Q)HzFO?s!*!DEkg(65Q$5cq zTD4pt79DGe$S4bhg_|KXOu_;o()|~1sp_tFMiNDNoOfY}3A!f`qz~q_DFkyOgjG5X@0c*&l<`_EF6vL;RV)QH%jG0Av zP5A-?$NU5RhWriR4*Ue)5Bvvy9@Ye7el^7>de1MQ<4IrV@-Nsr{~HcOlY{dYaCG?+ zj&4n0^WXRx;a6k10@OhD#NT)BM-OJYUawi)a@8#9dH*v$kUeKhdwqlIS=4chjw zL5H5UXx+scj-JK@PICgskT4@s>|2OPfaev$V8&2*14cAVe6Lw8hICmlx+x~;`FF3aQ3=VBS zgMFLN;n?Q5d z06RA#(v6?f)d)IAqSIE;$?rwFNcG%L0iot!Ft_|CEUf+wE%E5+XaQFzEj(N;(6g%z z`hRVWzFiIQT~|{K>FbJb+dHE#TerPDJF#`#67Gbavy&O@xP6@+L8`al_RL^nW(+e6 zb68qh5?U6pknTY%8#Hfb45Q}eFgMYnc@r6+wIPG0w{d_2L1b&I<32dRvrQ{>=-LrI z`u4;(KYWKDejb1y8ulJLh7`6U1z7vfoX3HS*Kmx;Izf1xA+oOJ7UIUEBHZLj zJfrjp?v|9}c0nob^MsyRUdd$@YW^OdyT+6Kr3|8l@VT9tj~f}estfN9=cW5kvQ@{= z^Po={WaO48XbG?cTGzR*OmA|%bJrW7B_VjanU$}W#ity?wTv9KT!xXjF7HD;39_!< z%jRqIkW=(T;p%>F5ps&TzgdrPH!~M`MP&*gf~;Ev8K5hV-}~1Z-H0*<*gf_nn;4+1V1U9oXP=u|q3YdpPR}Ev*$?`5S3r zZ>x~yq_c*b+zQ9ZSs_ch3hnuSFXQw%YpmgEC-*Mo>uqe%g3DW3S)&abI-dMJ%Cr+< z)y38x-5i`${Z%(rf90Z}C8(0jlz>X=ts0BjrHEZ1C1YYqA-i-5_HES)oe4Stm(*ZM z*P%@1HaQz+&%)M43*ohR0k*AJiVX|rVa?pxSTlbPW=$A_nT!?lXJPrgnX1le`J8$1 zTDb=PUfZ#A^Hz9o*aW{#TL>&a1a0$1TyPjpq#eZJeW{2KjX-=@B+}ydAT2Qod*kEP z)q#?g5=056q_|y>C0VPijC=pB#S&b}^D@10*!Wz>+ROLDrQE{J%PSP(e#- zu}&OLQ^0Dh(YkTzJaQh~Bcw8ks~akVCG}SVD}mHwHmJ&Y43LbLEPKNLyO(t@`F=N4 zc1voq4yLB#_~FBdJ+S>PkZCBD{V5x_T^kQHYL= zL6CnCl6LP^xH@z4EcWi%hq#zs3RnA+(-g2I+aT`edX zEJ2iv<}R5k3Bi^;Zd_JfU4_cZ$3#`bFb$7OvQS=pACHUf;R&On_yL|4Kg2UW_qMhW zAD=(MySh7gQFRJcWqVQbD1`67neVw6w{B0x?OUUW-yis0_E5k&f7Bh9j<>?qQ|)o- zR7adX))og3w8Y+IN5t>3#jYer#O`q3uGC}Iy^znj}iut9W!PQB-z zWEbq_`^LxWu`9*~yAy1&hx?Pr_2c>aq+}iTrP?8Jua+3p5?ov^Q=%(rKarJU!+o$q z{66Ark0l{yMg$rYRfY%&Gh)Nu6rmAj2#z#I5aAUNMewO0`eHcca-U#h_y(FXEa2^D zfvr0%v3aWoYd4r<;c_EPo6`gnr+&)^`~%X^%P2kj~ zDI8iig{~DLhX2OL%m%f^$0yIJV&dw~aNN2refzZw~ubMtm<5wCHRO zk1iH!+M=Tw+}auQ`!Rr1OYRTDiJ!%u`)ud*DeM`}u6(an#%SHz67AY*(Yccqx_2?e zw|$H-Y>*D42D{*wuifxd_qO=H%U9^traRg?w}uN3=nl4K(AzeHjotv(HWn~8HGzSV z5ll=ai)9WI6EheYnZVG%1WlV5sqR4wGXkrrjDD|C-GmnAL?eOK%F`3=dC>MG;@mx3 zqD#+i7&2-Y29Fq`t{j{`cLwG!nUC#0Td>J%Jtj^Xhk*ls#MgcMqD$w_XwBEl4Wjit z@p!oNB-Nock<}7kb#9B{KljJ1DKbREc+8$M8uO-&#ma@#uyO4IY~H*C-rlR>AFvsL z!CMg&>Vv3VVT4vJV)i7k)suqwv~0%FM&f zJf7II^KqTnx?5O|>?bwId|ZW$k|(%RScaRAN^pbg+$|`>y+_0s@syR%^$MRLx3o&( z>6U=&ejd&eN_TULa3k}P!c%79W0j#gcY_B*u7B$XcPf^D&Wj^A5rC2oJLP z`FKK*uEPBUT=L!n3Rq_exRWQ@8YkwmvkC~TJUozYLL#TQsG+#Ml4*-IDG7^x_!~n6X(@Uq-C9ZlC`^fS3&Et)V}eAmdO)Jc7CCX;>S-F zhNO$|aaApGRj*)FUinOcOO^?`6u5X&l&DfuVoQZ&uLM*AtOk@xC}44$f;Gu-y?FT= zZ(hE_$5(IgpO<2Mva?4UYa6&3o2vT{c5`w?N4};FpKHVI$h`{t5K9BQc2$|HVZD3e=dZfq zTQ+#UZ{L>7zQT{4IuKgj@#_!$FsNG>eC;Wjt&YT2JB2E#yP7k6I2Mc^g=G_d#YO^a z=c<+PU$Y7u7tF;*0&DYy-f6<+9A9%*#Z}IqJaUM@IZ3D;!@1KZxSfkCE}fI2 z^Sf%Ss*KzGEwaItl_5p*GB{`{YWcq;H*^+g<@5I?H+inEsN(avD$6CWxgq!OynYim zuU+T%F5uj$GX&BpoIi6GH!fcz0&lA;2rr$#zF0YJ6wK+ufKtV`wt@6 zKNvf`cOoG+foMB`LqupYktH`~PmD`MQbLltvHR`ox4F%`xWnIdR+fy`p2y$xLu3+I z1qFrbX3m)eqeM>5L*zXo77Jw@tU^3~Qi}4>cf5f75v=FXiVfZWwU1tVjyoGx3xm zS*+)kk5E^UiTaAWoL)tB`6LqfZ!M1#vvKdvcz$oc;NFe?xOcq|uAlFS zt7qEb>bVZMa;_83o@j@|2U{YQFxs=%fekB10>vH?aXLgYq}xrp-y)-J5D}$Cc%%)& zqxf8`9iroju|yYw%nfn-+zBjq?B4Ip<2j!rvZD6bATo)sOR`4z9u30wSRp(~gUEea zMDDfbat+tha(z8v#`oS!C=p%p@mlQOt;3!q9nodYsWp;Qbx2LKMaq6Xk43gf=5!A+ zmB?-EmT8J5!;E2$#AI_~(Ht?mOc54gh)^OcJko^oCPbDgLLvoPj3{#iN1Gue)*OLR zrtk|j#?By41I-n%wrw+q*A`1`^s>bAHKv%exEZF*`2yo-e2K9$nqtJ%W*9Qr7=tI8 zVZeApMpF#@^>h3*=3nSP{O|bg=f9!fz)#SB;NJ+We_{BTCiuj@3z5{}Q`oipHyql1 z276-5q2rfu?9>$Y4BIZvpkrtV8q0Q{!m`b$(6lGCI(`A$u1(?8(->~wn4{(Qmhk+6 zp!i9PPCx6>b%-sx{-Q_60UCIIZw8k>#<2a$5H?*6pzYEe+Rnrlai;6Y?RRKGd~thi zzJO!C-IxKBo$H$&&2&A3k{X!irR%l&H2eRu!L1n%9< z(6Xn7ShFJt9nq$@9o)NGp>=N!Iunc?zt^JOHx_71M7Hm1hIYNVjh;qm+uZ=}Z3$D4 ze-bgDqHPE6TUS$j#clQKqebt&8hkA`!t7^+{{0Oxe25O?M|fh?piUUrr#t#~_zIof zyTQ}39b3`P&|8~BW7z~&R?SuSps85{sw4~yWVncCYQWPkzG#AG&5XF77UpJFFf+B} zybY{1I@minqgCs6=-910+H~lIHXS>nbB`YA+UqOy{;n^Ej~j_)YnNg5rZrgWwFW^E z0a&qaDTa(3jDf%WfbaYFLFX=Q;o?g0Iq>i2>5N{z+T-g!9nt^0ZWupm049tcfDwcH zV)o==ShHjb)-IibjVoqg)9TsSws|1}{njERa1(a=ZGgAW1_T8AAT%!2~*VVh(D&r+klc`)WDA_LQa?Bxaq$^Rn0;O9~GG$i@tXss@J#JUJ#~$&(pI20l zqB8EwBk4xVSIZ=;Bf!YaEx;unkZ<3AsGd82={gU*c{oX6#>el)g$vhk=FBCWI&~iR zALOX+#BAwOBn0o?%|b3Q`S@`)%E~I$Xy-3qzC&HzYt+|E*Wg=%>OG#n`GDu|enZ{c z52%&#r8xhBz^a#mbFNd%^&bE3*SrxMjN%mgu2MVN8=-{-0&^Cq|ad5&s6u4TMn zyyIv2ouBoOSMTtikb70}6t%@AD1VrVhu3)mJ(`AnkwMtLav^4q`57bn_r}*<+M$J` z4i46ASlQdDu0O$*gM}4Z5LK;o1eT>1t!(VjmN@d%>Cw_!%U}b;lIxgRz?Hv4H%{Ae z{ccV!YU;*i-JE6MP#1J@bXGTbZbOW9u(wCQ_U$peZ*O9%J254-STYut+?&vi*y@b2 z{p3o)J{a7+69M)WrVRQ4QwRTqxg&>S@%Yi0HS`zE{P|}r|MgdR&6$I(3+7|XLN;RN z&BdCTGgLR>%GqovO&X6y({xf5EwW4KXUtq+?@P62h_S6rDYu=HD+A?fL0PxqP6>C_5D-@`u%UP02=)+BAv=TMzr#;mrFbOm z5RM-_+JG$r?DWx-xP11a>c+fB2ucC|{ftchMhkF9GS!?fC}QJ}-^n$8U-zUgEh~eK zl?TYl%jA>@mAemcnwfP659MBj1z9L6dWfQe2aF7q774WO5|VdN{Wyam)0?O&zk8^Aw;?k`Vmtuu@qAOuHgQyZX36c`_ zSRi%34UQaj#IX}DIC9JhhYs7~zyU3>Wrcmj)ZS!M>`gVro)i=8W+bIpaG5o6ra?@s zDZ(R+5gz_mgc+%6M5Hm%Xo9F%6NJYZBP7O%=rTcYII$IM1wTJ4Y~5mpO&d+HcD)f+ ztvAMs4W^jAv>C?E{0yUKG{MlxP4V+YQw*G7f&Qa;J|Ec>KaBVSKMemjej4^K3>yAV z3>@+|4EW^}0_zhD8SyuK;?$!F9J_rE*RPtuqc^e8lhMlnZp4gZ&t|aaGKao~u!{u8OxN++K4TIbFV0eB*NO5`R@6GWQvDV{94SI6hy$4w0o1d-l z?GPers5!nPoW37rhW=yB@x!m?_-V2Q2F=jompOX;IF;-EYK(r9&C!3F2H#D#MBiUc z@y$dF&TH`P1S|9!%>&|469Ui_T?Uz<(@zo>=r+g(y@oiU?@%}N8q9;`4?KttkY#%G z9;!o+!B*%wi2L)427S3deFs^f_mA90-=^r)>kIV!+7RFUXoc^8v{v!cU^|8m{kiQC zqwO$uoHIs`a>1ApZ7^&=NAx9PI=Ht+D?3lP>OA0}bx=bwXeeRgl`hMS!xcY|i zHD=742`?WnY}&CA0pb3H)(VW9%7gi$=@>C?2zvDCL?pICr_L?Wld$UFzX!hit_#|= zk(;{OW5|G>m^k_eOd8W4^QH~M>O~WmeZ}HB%1?%NXUjl~afZB^7v3 zTA{iWZ?W~8S5uEn;^*@H99+D~gKSoT%796ip@ck^?naqDDydSa5_}1;WO|z@ldeE1 z_LdBlAWJYIsE}?msZo;bl|(@)@yEw^9%QS?$}3dZx_a$4u3Wvv14lX!Bqwm@>?K^f zd;@pyWg+7}Tf~H*#O*sgA#hqvSXEZlq52u2L1fhb1+02vs}WfB?>^%Bdm@X|XEH!1 zm%V=fJ7oMRftlRfuekV$Dz-m+?lN1{W$L{W68EWnIe+0QE?m5Z6DQB%=&{pkSvJ43 zNBR8h-1nP2A>GQzz*U}HFY)AYlk+)+Mf{vk_&FaFnWZYDC0Qp4$!f_|GFDYGR#u%1 zDe;zodWCZS%}UB;B>Gx~EXlG}RK39Cr*({aJmtJ(-(*=uMI9=iJVQO#t0%l(Fg_Ak zf4+H-mzCA1$S*)i7J+r+8ZI9@NMHrSYw3JU9x?y}dw0WE?OMXa(N1*fz~^tNhVx(3@4aDCgf#xE*U)lQ-6o7OE<^mKRPvNpujkC^b|cle=W8)B<3 z7K|B=IU|0-mIbp_EEqikYo<)WrdhKTur|+~gS9iJW7V{&*hplpnDroz{giX#3#3kgRyT5@kO zuA{i%2CARjLv_U+)IMcY-omqr254239LLk57?kC0MoIQE6g`}WoQw&?)er*sN20Sg zZd~og@4quHU+92yr#x}|PzxMNbH#yF7wk=vLUD71MiV2^ridW4LZU4Y7(sZ1TOc5e z^O05v+oeNff-S=1bqFKKB6jN)s^SQ&hHzx?ak8FagV+=zDwSwTV;s<6*8zg+kOq5> z>X39yhdl?akwj?ix{D(-Eii-3r?Q0!?9yp9v>}` ze!vuI=_c5JzziwrrbtecsRpTOHavz(_aZ?@NQH$NB7(4rCZb~bxpu{wAt9cCOEg97 zZc{`hn($m?fyh|mF+ztxe;asjvxL_s6KvRMg!P+^vChjJ^H(;)+Z$?zHUy35&nEC3Y=U+_o1)_ouKNpvk2?=GM)$#{ z=sm<7-;A)L5ViPej1B|F=rM4t4ui&9Q;r%8nrMl^lg;qUG;<7{VS!<@iI@477`If1 zi7V|fW{DQV=9y!}A}z)&(_sm4EWv^KlCA9IoJ+p!!$0UHs&D~J(}$e2+6bH`)XzGOrvC9}1Yj>H2e zk#gc9j$Xctb9Wx%!rg3@y^;`U38*Afb+6zt?iW=cpP(w@i8+@iWO*!+^Y|Hxcrq^L zNjabB5?nQAuLM_F#68XxScUbkkjIn0)OOt@9;Bv9x)vXmRw+D5rb-^G zx-Eh#xA>_liWg-4&*w7piixQTm7!9F-VDjsG@?qTQmZ6gh0;YP$hyoHZ^pws+|Pcb zAoCzISK&&Kb@9@5wtzF)0)D8bdBmFl=;ke+93DPW_wlQ%e~qf@dcx{8o)cKlpG!C4 z{|>AdAAZMk#xnw>{`Gsj{qU!{Ma?t9tVXgzPihp}1aqZjPm#}kOi4Y+gV}kN)e?Nk zzBFP>9t*N^2*b>rJY?od=I$YGKgc99vv5Br2U$F+=JLcPz$)a)u)MONYf!ofKQI_T7f{^JilAxZxQ1O)s=<;R06~nn7>V zFrcWNBiyw1a5S@lyGAlsF6iXwhSoMj7y%_<5;(cBp(B~94*cDH)2fXEmJ=H|9t7GC zojR-G7+P9sROU*q928{9{R-tq&tJEeQQTW%K$rID*RBfBCYYT{4*Fuwmc3R&|< z4^zvA_UeXN!v^EmAHKz~-rX@};Ez}|W;9k$o{S}9$70>|>F`^zT-9RD7&Zixe;$Nc zqeo)VMt(RP(C|!k; z$&vv~C2N(oZ@+phS1rmwr!soH3S#RPp>>;}y2an9boplxl6Scu_cHF|62Gs@S1%*; z;R6(v_T6i^ckep#^0;0RF;<*~it=n!RY+v=fBrs^ zb(gTZjrz(vcwQxgkY2-60;{ql4wXe)@uXlC%JUbYFlP#KGe_|6GzfQZ_2u{6jmYYN z%a>Z?{233NI_8F>hg@(l&5^*eLCh{Q;>rkNQ6`9p=6n>v6=}(^LWtlhMvJI;Tg2?P zYbcE0qa&_t*-&<1I3i)6Ozn}7Y=`(1eM5azsuqd+wMaT>gT04!3>zdL*CG9kJr16= zNBT)U4j$Ly;8ANFBAyPP(Bi;RE6!WtQ9dp10^IDX0kM~<1| z@KIA7I%-LXS>eE84W~9Zc-R(c2lPnVXUzt^naX7CA{-OrEwMY%3Q4;OxILWT!)1HT z5uap%I4+OgC08Hn5ge$4ueTMpZ8d|JmoYYPHNmEB=2)`E08{6GfywilW86$b44K#r z1I9MR52L?C|52Zz|FD1JharE*FC+fN^ZXYWH{o-P9seoD{rYcAobow7={AA@B68Xf zG)Bik#^^TG96d)`qsJHhFhu)KH(RYeH`cAV$pUHakoM_GGEYWqW8M=-$<O@M z9dC+$YLnx)6++4dMa#{pyK+F|TGEk@7NVC;M=j9qAn@rx}nak(WXud>4Q zb=H`>#SRO1IAV^Mo{zPdz1a>6wmD;-mm{BZ!u+i+eC&$Zn;iKX9mXxO!pMaL+FXKY z2EjJd0>kH8WAs8hj9cV{(ev%`%M2|BP38W~&|v5+YYdyE!N@rpj9*}bUl-~yex5bP z%rwWyDW(`PNx}@HCRt$IR4t~>bHz#YK+2M>XnO)mGFWD277AOoL{+O+ zt>Mwq6E=Ko#S=s$u)6fRa*&UW!0yykB&Mb#VgEs-pT3Aw*E1BZ zWP0rKZJfAz7w7p{vQ~LKnF^*J2yhr#1dwE|1XYFAFHu_m4rR4(P+au_g*^G@$@2tN zRuK^-aHt@5crq@ket{gKBCnDV;z_xH(2!#J>?hUS{xgL#K~rNiUSZf+*VWiHC{f7w zmd64tslAfHHf0Q`#tfE#Lb6!WH7Ls@TP45}{1ox|b8IC`7Avo~Tp>of2_;*UmVQK` zO1cLHKN2~)g$h_VZr)SqdRF^VA?qpMPi~O>g1CDASNGtH*B|iWZA12|o*~m05(L-F zw~hAzfQ^3z& zTtZM1t7ZJI^Lc{G<;hVp1B7J}Pp*yGyCQzi4N=AKrsKZYTV6ttoPC0iAO zL;K=!`cMi^q$guvTqL4`1YFW(=&uGBm4Qb0#zZ46Apr*os)KuzaPibBq^Bg~$boc~ z!ICQp8|$v5JFpQ~B?WTBU}Xr{*OqC9jignPkW0DnNM;$!y8hfT|#! z+Z4FTdkDbf{bk?ewXzRVJpY)#Rk^pJbpOe?TQabzKufv^Wk?9QT2aR5k|86cu>KJn zO4(fhfn43l-%$zw{~vK*Z{5Cwa~ICzI{zOZWZg$r_I>0&lFZdZJSu#MqS9OitfG=! zL&e(n%t-TxN1Z*}E&(gqhVxZ~_;7o0uej1z|mo&7rO z+iQjR1ha;&vnXRk5LuBjG(wCOv86#6p%oErjhF;I8^?@9;%c`RaY;G_s)Q6fBqrNp zcd{P4rM4>7hEr=!HAqU=Ao;KsDMxLPLPQ-nrNgm{jyQ423CGXd;>0-{oH%cdQx~;3 zb4ibr=QTLRxNy}L*Y7yt<~7KV;nzef|I8;IB`Ztbm?&7 zv>n3%M~~YpWbI3~!rpz-ov1}B_a}weOW{84udEzf&w?+*M5gx8ZNRSNz zcUr^G*Mi70#+L2I@Y-R973-Q~=EBb~eL*u!nB5#BrZmCNzsgmEpW^57pJT*?FEHxY z&oO%ZXZV%#Q>Qh@^cl@DV`g*AnQKH~jWI>HQ6~6mj5&IZw?H4pH~4Kn7G6ila?};=sDG4(n@QE zsVOThFm;Uu5oL~98!a){%L)s(X|Qsq4(oy(IJL#n9oAUA(+(ShU9djL1#1Icu_2@d zHiouP^Gm($2`?MW*`&pcbykF$2_~*E!_?Ip%-o>I?2YzZuH!N*j9)6x+hFoa#&T;+ z<$laqV}n_1Z7_X>1tu&o!lXr}n7NFoTdu>5MOywbY%q7Z6PB)PhefNqVB+*P7&_bo zBSv(_xRKxB+a9f9Z)E^yohcl(Mue5zE~pu-Hi}J^ME{l$|OveD~>j=MPNjr z%3etXMfqdH_O;l!V;y$v+=NYAR$%VDi5NDb9|jNZgI~uF!lDJgV)@d^Sh{cm7R?=v zr3=Pk{mQA>vTi1UH6L5nOvCym+Oh_r z5i(>$B9hY5ktUZ^pE!rTM@}K-xO5R-#=&#f6t1LuP_kCp;g-5_v?_;}`Ex{ozj(*1kY4Ptpa|^(d@) zfvl1WWDryWsl1X(1*pbu!ZM!FB?MdoDtW$?`yw@2S$wb3r*(Kz^ITm)C#Y!bB9v~i z#sY7t!;<95V5g>Ic z8zoqJ!O!)70!!iQ!*6)|$A9ojvRQoo4UzTn4WF-ngU7_B;7Pg(1z0uQp3J8o zIQn0B39tl!G8N?AxyKXXBf(WE3d+h*R#~a8`jcYxk|&jfm*A^Jedfm6D;eQk?pdft zeScC%Sn+Xry^6;bgj4l9R56}bz9poHs>(O0e)b+!HSg8@V`5B}*F1a26YLv2t9q$2 zS>^n@Rg^wuRG@?>&Fp&_xJLk7Idu}pQj-xM9DvPB=VRKaUoeD#>Dj3r+P5Ztx^}{k zeZR)H-FxDzwjI%4uHdzCga@Ogo*}o1veKcojU775fTQwzW5dVE*qFdFCB8K1U}vvD z)mEoRD-9c>&Ms=1Ouug3MupsmuurR&=+m+Vy1O{wYfpDoW3@qsQW!rFmM+GMnbXu*SgYpF zgx|)s*cTIlV<|}lQ#kgPzkaGSdzVx$ExW4#tp)yp_@+t zB^fEfmSm{}T+&_F*lj0SEkTZitSjrta>0?{N@}kJM3QN0%wjdvV^t_vedPc8yEpuQ z;d7Fm62M6}q>ND|+mUSwwxruo9?N!QzhplIS+Z~PJ_VeX74mn$-{><&1)(IhRZ?>$ znJXDpUO*)U^|GE^;V4~-f-ON?0e`1?dHikUOV+s*d5`jms)xwVWg|sKwZC?e*t$$K zT*0loS8+eJ~T{UDwT}>|Dz9=NJvWcrqys68? z`xg)K;bk_bxA3n1AYN35pzi55?)!TFE*2126A7&0L}NePyWO3@YK;r$iK{bCIDgs+ zr;j=Eb7_&h#|(+_CWwn8uELumEX)9r5vGU|WJO!3h}}hu$Q6EjZIO_qXV_p5LA7_k zEs|1o3>$_Pdl^Z@i*)<#-EW2D0~(|qA*`f(?}Qe|&)MSaRVQ4y;evBl9B}@c9_O#? zaOsvUuH3f6r5l`QT)$_JyIIb-m+6c<84kE{M~|yFtl1bf$Jz77ICI_%=P&7S{<1yJ zU6SBuy5x#8=bUl$ggw#^Y6&eHPW3o&$dag2WQ(Co?eWVJ zI}BQ+!;kZ=@y!fVLW%26GehrbrYibQH^bL_-FI^|_r~z#9LuTBwSgLJ3e{s>pcZR28*{jV9_>vEcDW0!DbsQl4V=0F^~H*bEOextu(>HwHhqlV24F(?J$?0Y0fgj za5k|x!x1y)bjIj0oiJ=j4}9OZ3);9kqJ_N;fn@?4xk0wc-(hL?DXh#5i7YFI9f74M ztYp-9V}&TmR0*yc=LKH!SU@G2Eh&1J%$7!@g`U{*Xwy#BU$tuA3BAAj9s`C9!BFW+ zSg{mAk--F3fU3WWPK-kI?ntchTB(rb<+C0eH!s7|Wiv2q#wf-JEM7bTD_8KKPE;+N zJrc_nj>W3Qr-LhY?e)$9fY%(^jou#r^K>?c) z6%~M8iP4DK6@^f?mLuYKBO-AxqLNbB!a0MJ*Y4un?JQh*@CcVOauu|sn@}=YlD!ga z6-cJ4nqaD^Q;#LPr2t0gJmLwwfN(0Rsz(XoR8B-ZAwHhI`oQUX6meND*Ln1mtzDu^ zfc3EKDIOFNR;86F;2xaQS=*OM;EC(mA?`Z?c+ z;E{}#WNZW#as^yt{gEt}ES8`{GFnofmBEAbnTuDEK}<;tCae(0ciQ0PCd`wG&hlFMs<3uRi{Xm+ybaE5hsDhd=P!Z@`E5 zf2ypPY^S*7iGq}LMas4%YbJGIvaFEbi#(R@Ntu@#uv>RBagY0y%@(>KOS%FdS3g7T z%QvdVO0rjyt&`81&uy!W6@k@oI~}R3;@{=TOU5fadHRO9dapt-RaNtWu=;?S+TZZ3 zPU3epUsL;$@jE~7@2IVLiyHp@%gcyoLb|NvDXJ@KP)gY4Kg>aXRu(dE5~nAR6AZEN z*{}w4Cyv9|p9f&zcVDAJ>y~iR*}%=7zYzy#v~+MqTLP<%LkqO9=I_$l9<6K~h$uZn zM?l%ZgTQhzGl#pCl`435x3GkRp%EMjt~PqT%2LS{gOb@2R0*#7wDMH(Q96gFS8>hy@ zT0~?mojeiCrcT1r=~ED}X+2VRMI$9H3dymNICF46QWK&O8L&e^OS%X*uUe{bwKp~b zd!oXS7!js`C8(0jRdQUc%4o^0a!wsNtgaT6%vE%Fn95{H=1QQ|*i|U?Sb{FO5217o zO4p&h=GqnhZxc#_9qEP>fC!2NHv%WYjVzaS1X_)?P=YNfP?t=Xz^F0fC2*2#maH!z zdL;wW67>=x^r-e(NGGT|I+qH_jqE=MEcCk5EyOkH_T?`5u|5 zsgx{M9_p&I@w_GrubyQoV7+~wg?Ee(FYe;ESEq@r1iY*XMqT9&Vs|-;^QUt^$MUlb zz7-vZYzw#3C#Za8*8kJP>9NZM_R#9c;+3jY$}Azvao!Vt01W{4rO z;&y4UC&?Bmsg6iZb3)2~2c)Dq5GqbYiUU&9bx0* zXNf7I>iQiw+`iWWH}AOM#y#Tco*ixzQg0z- zj(m+R8+ckCE3F7s4ZkmYqRSaqt~+zymGkbnc*O;$&e`MWNj;98swIjOhuun2>i9}Yco}bs2pIb(R*CHs;5`jS$2nw-+*LEW;Tipc9RvBR7 zG6T$5_$4OI{S3d(_$Q{$A-onEVE!UQ%$d^+vu8HJg1H7*w7?jP7aC*PQZsxqn8+Et z+!{j|!&mDudc6aF-Rz7BUQQUd(H^7M*<#ch9iP`=;36~pxX=VYNGveLj|@aqd1IBE0z;Ghz7cT#0xdn!=u*8TJ<`})o5);-DPMhp8Yl|c1 zY;z=-959Qond?Q!Y;`1RoUmlOGnR6?c)L9o`PyNjpAJj>ZLlmrhgHEktPQonhA>wzA9zeWE+KV!nQX_&isA(pRQiS=7IU}xw~ghU4;I4X$1ia<0x@VAa3=sIHKcE9eAQ0xPL|s(bbJzgeqBcnGd!Ub0t5Pn}bU5?~2xjvYUv zLU1Ff`k%lO2nn(TLQ;bz1?y7W{y#HVFW-FpFR)$_SCYMwE<#!Mj?ntRc=zsi{Kn~T zzYDV7piH{%iXW@)K>?P;x${?4woJMqrCU?1+Q6`y% zzlLd$??51#SH$OvC9BuaJyaCiut^ZSDsmCgFDU3(SMT`1`4-?0<=weP@XZSZ}28APDyNv8U zXVeJH9Wx4R=FCD|fFF+S-HrGVHex~pa40DOiQ&O2dlePvjW9p{?tHgmS6CoY;-eL? zV!5sWOEOo|J(xg5#f1bDSRqJ?iNYRYOUA*Hdl2s1ov5xRlpz|V=v~Ib5@ZRm8f&iv zTn)F>IgcwBFDpO^S{l0t1vCN@!AfJ6O0r6VDp}Up#V7Srl5r9kNft^9)dfbfjmD_} z>;2mgs>@JtBq8(CWmsENrwZZay<{8m9ybT>e1T&2YfoyY{$q$DR#8 zXJXX_*Kay8h_zd;xOUwIXQf{2ydBP8cEOnojyQ4Fj^GktNsU)Sq;ua=)1=GL21&$Y zVxm1_WAum&w?=4~B_blN5gBET2!@}(2{vqMhIJc_v0{xO7OrfHxywGsti}Jv+$Eo5 z(XwV(yu<)Y78_#uQWLCPYKj$0I9+CfHLENLtd*wtd6gN4t+&9K&00*}VUL*sE||@j z?qiQh+jN+?MT=iIX^1Uzg2{woh7oHl2q{a9T5p9>>#PVfD~#Ww!>^19TlJVs6iwb{ zhw)xIjN7Co*mRh@Nr&-k1Xmi&*kXryI~*~8hZC0hyJIG zC2QMb^4!)KILsM6d)uQ+M|ZU5f!9fE3wtZJf~`$pr)>_M^%t|z#OVHK2ZF_X@-4|aGS3eFOf?=b_V$8$|Sh##C zHt*Pm9RWTFjR_@i!VnZ0s9+TyAA+#BU<8Hxz(0T(+P)NCn-^g1n(0`(dMZ{epNK{C zhGY4{QP{Y8B7rpl8{HwymBF|E=>7=)DBqUJI~w z(;@6?!P4v`}_H*Q3hWVfW-Ofp(BjKi_hQlBBW*T_~`BI%Zsu0pwLPrBnIdn8#S0g!~? zN}!`^uDIXNRrmHi-v0R?y!iuo^ZS4Bj`3dNLj$lra{4m)7pRuvn{}I=f z;`PE(er|qurTlvdu*(0+U`dAR`AfmnZ+Kim=slJ?t9L}zZwgj|EeV-#49RZQNoGsB z538i;ov1CVL9yUBx0olBLS){3z!S+8TswCTCl92n+9Ln0UYIjwBEIk23q8AbW^_TD z79P-EE@rJc%RwXn8>o0SeOY_K@U6@+Hya5OW6-p~Mc#zt@! zU}-d5&sq&U+L>UJ8$dVKV13`ABYx`A72mXOjqlpFR=|=jLb+P7F*_xw5?sl0xvEg= zuI7#$f@Kp0P$SfO6Z(IL>4OGg+Q5OBKVpO$3v0>vaSB*V_?uogcQ$<2tVE>Gc0~Jm zBf@t(kraUV&_IOj*n*fKUj%z^hS%!F>WaejJ;W4oCD4iq_Q&qXaHJDY0xY>tp$yls zi^xih;5v~JNZzxXC*4q0izTqyzi%%RVq=iHH%Z}3t|n|8szIPeQU#k~xx0l7L9+uq3l3C=&3< zazT~=OXelB)R^&V#9d?dO12|75?l$mWVvipGE}mjbQj8U*@k4^s-N;0!uOHa$@c$O z7v(d)XB{3tW<%mHVBKfq=^lSKx9{A=Eg6XQ{ypU7XQQxK3fi-Xtb2^R$jZ6P=^fmA za2>brUSymnKrZt)cAE`|Y}D5l;bpx<0bUSUFKTk|sy15z>(6(E_|J!8{OA26;6n!f z^X?*ke|41TO2OOb(Wrg84UdZ!p&SD+`H?J%m*!ylkI^=d7k_(Jds<}7CD7& z2`o=MDr|?sl8(scygXM>+#Zh#+T!6ug6zJVip&SZ)_r$=zivD(IkRzU$K$50g4S(r z|H?HdT)N_bt2fLUi%5w-u)OSgV-2LqkwmVY;6ULx&kV^#qy&X8YS?y04Dw*kGo&EoKS4 z)>~rg1}n_lZij^goWyc}SB4W7@36(9ZF(&CbHJJ)SF8_l!^Ti|Yz%kD+6Wh{4tL&R()zNV$hf(0jd zASBTPfxA4gGui{b;hxwY+#1^gJ77m(4{Z1Eg$-N3!ki`TFm#+V`VZtmx3?48wswMx zy&YjCWASKVYiSA_%Vw}L{~Tr}pTWeq8BC1K(5#6WzWl-j2F)9QCEbCInJdASJeEwA z;7Ym+rHfG36I``u(GuVG>yLgv4n&u)dZ{edcbuO#XEt{D??fWUh}bi>jL=tEP=n@Qf%Kc7aP}3!RB>Suw~t3Y+5y*uo^*N zje_@vNrcr5_->qxt*ggl3$f+1VJZ*oGHk;FY~M5oK3kU}G-xxTBK!~;9mId+a6~7> zB9;gDUHkSCTKkc7@F-G`U%-L0S8@2l4OQ$eU4$|$gVbY5fxC1MO6`@PN^te$Hme##NJ;C$z9Dn!-=P&XzA33S&umo6A3nf!QjSR~mh3=BglE+fy zE)c4zX{f=f=k}lTv%MyscXbv4(oe2FKIrCaDJit_pXazVkv99+M23F&+HVpn)5c5GOO z(LWDFudbcYy-O!_Y1bZZ4o=XRn8St*8igx!9rVW5&>31mXJ83?V=FkDSrb-Pur)G< zrfCz{5>{?n4IIqPU}IzeS1T*DwYP_-UJn=IOYTqD*R!>{s<3xUPjq*2MZb1(%bSjz zdaABJ0hVN``m}UaMel+r$zlnzrvEYkvxonzmPx_8tT&=h4M*tB#J{5Gybgx?MXcx^Sg9_- zOEOUCSvAu8@gs+E?#x-$^(QEjsbr}HPJ$PiO14O{N^-jyDKr;Y39{sQ0ho-aE<-s; z$n9waTCxpUPaxEojS}=U!b!j+=#tEofJ?Ge0xN+~V|Sp0WV>XUfKDJL;F4M_*|u!! zAyM^&uYV$F;`U{oS|X~3jR%31DzLAqRb7(r*qC`E4HZJGShCZq_Ns`#lPvyTuHiDj zp9fiNC_Uom&C5bTQ65Ul3six-4AIaqR0H8zn8DZb^Az7lNy!70l|H~@##6b`b5%Cp zy(}i23i0kmJ`q)jKi(D*Sw;BI+kE`_<^evwxQh36XYk?05yoD;s1Cx@vb88Hn1j5` zk+^&PTim|V6Ay0nM#i=7#8rDnJ6t;Lh7(6^aD-4glxB&v6ia^R7D!IAMDkwZD%A!D z57^`A5l0+7?#v)ojyvG!Nj^SFWF4_W`eC{HPm5D$9C-Y4#l?&6ICstk7dU_QvK#*g z+zH(l{F}Grcib8!1XWpS2Naie;_;#jk=35Zi}rZ@v3!1^FHM z_wXdhTQEHMclIQFTk>!3ikn2$wd?i_2OggYxceRoT9>bJ*)6{RRYwBL0jDlF;P5FM zq#fhFDq!hU7v}Cg7D!06LR`ESQE}FYP0(R?iUX38?GeAn8d1B<5gucXkZ|cTw1lsp zIlR0Lv3`3~Ml)>KVT8>)Ow}!Rwr$hEd%F%>w^(E226L=iXN-00jq%AuZ&OV6HN&(3 z3(O9+#-eCjtk~s%6>)Z07HNYeVOCfiY=-$ejWOHTkZ>}?f)EXsMiD~M_M8$$VLHqY z=4&}!6sE`GaCxm0mPR;YQ7FL{tjD4tJ1ii!W_ej+_Et+Q^3!AKPFpPDvgHAGSWc8J z_0eHffIT*ZyI>PzW2h6n2&-*`%r@d?OQJix2&=7$&hXyjjGg;j5S;3cko~R*V?-Tj zfv{vJ1nss%SdtUMh^>G)J^bVKh)i=s{L$8kKG+grseIjDNBAe$VrQ(}oKlB?c%m%P z9+Al|*p=QAiHF)S+G6+NPMmf{Tv|ths|})4TXVh*V)wU2;m7= z9`Fw83ooC(Sh21`HF8@Fx4rtMp?&TA9A0|ZvS@DKCFsxiK9BY=2 z=WAzSyVnwx#oDoBJzE7^;Ty<(2nj+E58A;|(TGgkhr|OXu=m(Gq@KK}kR{!Pjm7R# zcU4mR29IBSP_U|g_a}Jn1<$ppc>Np7UJzWbdBT445tRf_1*cD5zDGF`BDGjj>|Rt| zk0%0@H#~8_`VCKsD#2AFoFqdfs1jUBm!M>+8esK7)n5s;1Xogm)Ai?xqp;f%oc7oZtp@9>8Xod~H`=!?5_!!^higi-`A%NsLVQwRNo8G{F7?yzAhX8iICA@(!o$;j?gCSm3Dso1=D z0oE;;i$opq}W|xCD|=mPvCVh zZ9h&OKZ#2hi7%o^@FLJ^thtgdyT*)`;7KxAk|~m9k~NadSK}C2jraYN43-ff%TlfD}hwS6S+b2Gd!<+=<`KD;cz?~TZMTZrFZKE%7)8+h~V0^Zl3B(~D=>RB|NRcu9N*>Y5u&qi_XDB|in z+_};nw=QA+3<6vY zw(j0`<^O^cZr*Uj&0CJRdyl|+&;r*vf9a|%E?%?4>5Dp?IH$vr(>jH#eTU4Ee8>{} z4wxe)oq$Y}TklvSdY2^|`9Ob{hQKEzugGD?HMKyz&IX$mj@ zX7CL+hhLZmK0#LS3$Vsce;xe%blARKgUw!M*s#e68@vqg$y@?zeuNbk#aLrmybV_E z)??j1du-fmiw%i3*sx234Y8J36JdtsVa8YdW0`|AY&i$?6o8k_iz0UCCdk3YvBJzj_Vve>z^bt429&tx_ zsy%}C>JZA$5RzmMKg!f^w+3Mc9T9iD1=n>)#6jXK)d9i#>=3+1GFM!GkDegYBUT1q zJKP4zN84im$&QRJNIu?~C;d*?bEG}7)fx#0JhA&wJ4RH z7ppgZg&FhOV#r80bn9ya*H(tm+kXiMdqddUn!(!A7#7Bcu(7a&){JN}G=X8W<}fmp zF|JHtY$91Jsl93dmPBLCRb#QcAWJe=0xii}d3dx^1C6%iw1tN!Jll1|H~r;)gHy0z z>2fSzyGC^puHQmHg$3{*Ly+aC3f;qZg(7aR6rG3hxorpv-GK1$^@s=~gtpJay44e~ zPPza$Pe+j70_@y26FW9e#CD=;``U5Xw49(?^s}07SveBxIKOm8e=L~N4~u6G!tw

rUEP(2TQsG z8?PD^TvfjL166M&e#cYds)ERQBA_B@p1l2t3JE^0c>RITe?)Q33zQI8Rj)pv?)@LA ze#PgAqbkCyUb0xPf5($&ZxpT`l~yZUN&QtLup|@ojIWjML4j8N+dmbwo)S3ExDDwh zl?Z5xvEgQ2nARIEy0u&kPBV}GlD9qdlGmx4&flrOV^y> zO0rjyZIXdTUlNaX#Fb>Tp4Ib&@~Yu|p42`^?VI;1qy|g&L5kc3R06Db{JbAO{(+DD zOmb!6TUjRG31KB&kTR9}E6HLBvLve}bz*|9yWF=c*YDuw-3KZIm(7!j;7W~PPoNb) zu2$Krhk3*nk@cvk5{0GDQ1ZAQrB8^e%2%kVC8!uxHT+xh@sp=Avimzce<_HP;`R@y zeJZ-=g>h9vEa5bP~7xZy&L14KPR~^y6 zLu>SKb5wwm!gi^#lI)e>N($c_>#`)9CBrr>8aD#de;$DG{l3MhK7BCun{O~{$PlcW zG8yY<&%){%)3IjO46K|v4KqfMz=Db65xQe5q62*4w`m=A@wX+w3iaKh#=??pmJBp1 zxZ0l>i_}D^y^2y-5K8x;AWLwyecf6%p4P)>ix<4NY(acXwCWOEzIYLQxlTxcKZ14= zZm}^qcIcoA!It1FA|x1x($aAD)M=bQcTu58kkwdgCBPC;$yBmf0w;lzz)Btqm;_PM zb$IB&VH`|9gac^@)p|12f^0{y*0{~a44G_GGGC2olA#-3^jF6I4$MlN5n-ws%vFCL>8m= zSpoMyAN3M7xu~toR5e(yYIE@J`6K*BVEyM^3GlHL|9LOq%EZT)xA4cCOZfBMaeR1@ zi09S5c=mKPo>whG-P7qP$@v-gu71Vnirl+Bk#&>6B7`nobYg>!AUSP|lPC1VlpaTq z>T%?#Ee;*A#o=R8I8IQVk#NLu!s_@rN1QxwkJA_IaPopZ&RllJr5mkq{Z4x}z&hf_ zjrO>GyFC%zk$>BEct{xK5?DnAZBSAu(^mYh+M&3-BO6_9k@JX;OFOA7R!LbG0=E-N ziaYb~*#X&^t#Dtu7c)EvEMhIA6*BI)6U8q4+dJa=HG5pY>8QF4ugH)Nm$f*3NsD7= zEpYgh6%HKNC|vDMH^rWGGbE%KVPCoh()ihSr`Qlu#MB;Z#P7Ff1K%DA={AVjXNK@Z z14JbnBX&2RkFi8RkTHSv1-6GYM^LN=K~kd?Vu_tW8a`(O{{S0!`)aUthdDNHGs0$X z0&6)@wUWSCLr|?xw#DW&2Y4TLg5M!Wc<9LmZ+7M@pby0k;U`wnH)nHSMEjC3< zp}G~;@$tHFD{P3=5LdSF+r#(YBVB*C*qP*n;52t)%7egigKsL|Gue*NvO@s(HSCBJ zB9A&F`j|6fPdFp&fDQrsGzjJw7m`9%?zVt;k{JThH3;Vy8hO+nVTWxInod}8oxmin zyGMh_WSzQ?QsMzu95~eh=_lIY$k~oKc%}nVPqjnZnfBPvX%az~aL^q|hdqg}u1G)i z4N{Kw$FAh>39N6hc}EY-U(yyM#yFtwcY3t#q=loa5p49I!&>`qSXzDxGvm)-ZrBVO zQ!8kUZD4Akfl)Jam>8K7P$n=jYX(E(ri4|)@C<^f#;!uiS_!fwYbD^4!gd*+p)p#w zZi~)cx}kfoz8LV!Fif2_7t`jg9py&8f3UI-2ILrh!*689w_68_DX;yxT=&cLB?~!;^l`w6{4~WD^ylWYOQ1{xT>swgIc~ua3%Ayo)o%E z4VD0_g21UJZr*(SQ^87*)rc!8E`QB!zuB-8+-wFI63OR`xn-zmhE*VN$|-$#J;QVQM)tQY)V zUJ;v(z<@}oovKZwR^{A|Qtp*U4=@V7_PAJwBQVd!C zKj#|;CVj^f(g##jze9P|J06JnH?JhHp1$HfKj%rY2BrMpD0ozg+qdqi6ZFYr$B~wt zf=z4IVo?9@@l}^DXxX9#bXHnu%{8zzvQ(inwSk^tZD0k9FWCTUW&sB?8^THpJ7aU` z_`B4bn!wh=40;PwXiW^EGckfQf#qp$i?)sq=-}e4x(j7khaPO`^m6w=Up91p5@>bm zh=yC_xT(yPbo~joB$Fl3l7U5s_Ufv}$daoI1zu8%HD|=nm^|a31X(kuWBIhn>c-EBA%Tbs^2fF{EBKpSrf!uJ;^W2N>Ow4>F_FmfQ{!MA zN!f$L$-8kRIZ0)(1XnW9sB{yCc<;bAHpV1_wR6XI?BrvCQ?UO|%pvI3u3Q0sA8!Qu z`KnBoWUQpyP%>JPGTZ~_52U83@BTFZ22!jpMd}g)q&xiEG!8No$8coIZaR8%Tp39clYrLtEQlF@pK+S+=Rxe|=YYYGaA39LFcPU_h> zdB#Ri70Sv9IRdM;z7{WD)wA(HXw_Hof4PG3L;>qreFYm66?pmjDPF$e@B3vbUPy8K z>vFt(Q;yd!it(Z@5A{{q3Ro|n$gj6Z;zKpmcvOW~z&$rq5k5~hz=xNK zcv0hn+NY~fUpXI@W#dtt{S$KU^~S?nU6FOY12V3(#;t1}1d1yzUvXmN&XK=!2OfhQ zaOxBhby~Us9dPWF6HcCW!6_M4{k$VioOe)h@{%J?^L3{#6KGf6arIVP+{);T+xG~! z+norWj{IA8__g%&G$fINm~>UTm=Pf zQOL;4@g%NXaOb82?%j6cyc-@6TlWd9jN5LwdCigk13Fx~WW&#($N5XvICIewr!Hu4 zt*msc24%w(oRcM?k!ilKZ{W`=S&?E7%4Pv=Y zhF>%@Gu01n{mih{ z*AQEFHp3_D_vx`Q)ef7}?XiV=^FHE$pi^!LKSR_Uv4?N6C4Ban!gseRcI@U0xkKAh zwD3vO!#ADKVR-Mi!}b(gY}>1c7m>1sSlhnOfjD!3*KVRMQI9R0ZcWe=RvKc>27Y|r zXRjT$?y|;~I1P3XX4~ThSXS^$(jhR#9>J;h2qEG^4!96g4Y1m_m+S4bCa|mtE-n1i ztofWJp`}IiNqZuSXiBw2@P5LH&-o>DzmxgJq#9x80aFAWwnFG(Yi`qu?_o~J86$AF z38GWARI3eAj=15}#m+c%vJH+ARmUz6TW4G2z*$d5Yowm^K+;iHBp+{y1E)LV;ORa{ zKJpD>_Vk5MP!FtF-yRdDIiuf?=4ju^6wdAz(AgWnO8YOEYd#^4K7qN(zo0QQg2u!e z8Y6pH7&^k#$PN~4QCn)vVP@GB21Ho%=1mo<1X)tx-Y}B8k;-66=1Pj(rMTU}!3mz8 zZT=e_J9R~`ufM|&gMLB3p9Z1-z`>Y4cMdjg+k&O5SE!pjuiChZ{~Q4biN@ zDqsbKZbN9u280H$L{Q*T`1#C7Xy6h=h4H}ZJqO!1PR8~P6X3n!SGJNTz<1+#Y+XGP zTUHE%_uA3$UOft1mJMSJ$M)6Zuw~^qY+5m%2kbdS+Ddi%oB;n#M4S(z`QGunW0AxI ze)54d>^*n{`;MJeHCX9quBdF*$!qu2Epi%%WKg%qd0MZ!2_+<}Rm>A`8L?FU>K%c_ z~Hph0;a%`1w0LBP=Q<%T@PA!K&uXZ>pQH{_P(MPm-~cp%}`mU!ac5r7KYC zuxh!zTE0%e)zBqKAn`r3h(1A9dG&Kt)Vxrjs^j}h_n;KG3#ue@)tJGOD+;CP{nh*5 zR5xN755j`19JX}jo`a3uZ-Okz7B$Y_e~_!PNzzp)tzyYw36vzu^z7M7Jm=?;X(Oi`B(@sH$tr*HLIJCgf0Nvt zBA$G6apM}Xdg2sM_WSvN>x)rChoNt;Ug+4q16&OQhNkf5RFJi4 z#UiYjKLgv>ufV~*yHtUD`kq}ldn6tE6XOsowN`}D-q;v5RD%G^Z}TPud2dGuQ6&ZL z+qZaO)v~3SHFXLW@;KtVZL7*;39yn95)`<0a2?5PNmgs`?nERf?Nym4>FN_0NeGk# zNXL#GSJ;xP^rV|m9t)#wn!P_j{12`E97tS7IN5RA!GUMJ5rqDmkpk1w9Pq^1I= z#%z^z6E@adNft{oW%9frOqNLE=o^ZS61XdYdzbwJa`a-;{%g3AN1**vXMGb+)`9I$YuFCQIn?ihi znZspw@vi;~KE67~Wry(YMJ%3IZ$n+>asq2M%JPRIH=_@7?sY@vjrO>E(E}M*J#hba zE8MV&fd z)@1_gW=0nRs~e9KopIx4dp363@$cH2uy*I))t%peD`Hlz474Yz9N8Fa!Q({>Jj`<< zu$)m??7?GQ3m$u1k)7?z*EkW>TK+Fs;`&u<+`6u3*x~L?XU6{m*2N22TqLs2ULdZ{ zTM&X)IDATgWlmsOA>}B8M%aH`kJO`Dg3Ahf(yb7^*9_r%2q>B+E?tlK!#X7J`CSLi z5wqVA(X>Nck{M#-Ef5(aH;Xnw&@OXC?6E;qk}bk_=@A^MLrA0^p;7h-2(y8AfH}7M z8)9n^f#pTDZB4hujzh%NVS?$X9YW8zAm)NQ!jJO>X%_G&QUZvPorH^bs)k_=pZyXx z@J-XfkFS^T-ERx;6g_sNa69`Q;G0Th?GtR-!Y9Q6engWG0p^#?ojK@?-~$fWnatNG z5lU*xZ71@52&hoPE&PxJA`Uwu?2se;x$SLSZ!4uKSv5cIhaVB;oovp>#)OtRB2U;M z;4r&o}K#Rb1S;q|CX~x)|ZV2xK#t1lUj!=HtArx#tiV1vnH;4ZoBSfcbm0(kk zx4`MEU2ynxYaBh>7ALO|TNm5nz&V2ITq|O$B~nj$AmvmmVygr89_fmN)E)?n>4`1g zT`+&CCx(x<#aF!y;nB(v_D<%+Ra2trA27H48!S1sGXG2g%aR9n3qu0SzzJrYnzI#c zX=x4<^JZvf@F~9h@=F*RO4drQC^Y8d=4jHy0LE-lYuF06)jPn-l2coIv~S-Lty;B# zo!t7vy(PN!?2WJc|A5}#^;6TalO|%p@+FutZx(?y9qYENQ`xJS-I0h&3`ba8psK-& zNeDu8%vMB3tVI+fEMx`3f|jubyAXccXTfLdbolR>fv}x(5x9N&U$s{g;JN(&5TAa8u)2tY=dP-3*6EuWxSIJ0cVzI;5*f+8lEA7%S-lLm@D3%luT&ws3?eG^ zSJFi&-GfgVm5eHx5=fOqNyT%%ruH=|>R#iijFD9@^-AvuCFvThRUut|(rqZ~JSLn3 zP-RsFf{fu*^Gao~vbZmIGV}3FWsF`y#(8>6$Ox(gQm=V{mF$&dv}AeRO9Jp|jjE4o z1Xm?NB!iPm-BV#j09VPE1AE+&wei>3nir$DtjeUL6AVHs;Umph%CusNw!Hs z3f)x(tE!gJdyD7XpEuGC_>ud@&o3~O+N(DNrGToT%a9o4d=0B5o_|ZeM^WKZ7iwvyT6)WOsgu2z<4XKPD1wZ@>X-SA7-?)b4?NBq#PEq>_G4&SzsdMsBp zCf3)U4Ru+AyLDE}J3H87c%L2^^Iczj)4C<54Ej-xfi-2&04$q00ZYb@$AZzLu$;eR zpJmGsvS9;(wFEm>EJI>&0OA66V#A{O*t~2Jwy#-!2L{EG) zQg+4Q)WQ8o+Z~Vaojyp22*vUJsW`N6FZM)7AzsGC3JXCbu@n{-@JAm_wQyP zxg5=k_`?t|9YbJ!Mpj@V;2r}2lZ8J5^}z?2>^Ky0!B;^>R` zB)Pqg70;Iz2v0O4%rr>YZ-<0r2Sn}C5m#CofY9P}XBf>9Vg#S?=J;f5nl*MDB$SR2 zNJsS8Nx_AmaYQuL8g@dDAii0^0iq|(j99UN-vJx=5FVuvUq*uoCz5q^-k zI$(=X;w>ay!j`zxV@skHHpQ89YJnZZ*mi#4ZTmRgX9b^B?gNn+c-S7nN1YJF?fMc- ze(40=K|+h4!~duTLHv?~DA|AmmfR*kQ!+)m-x5)W^oZvECmwf3`i0g=JMW1@m)hgd ztVZjo%T8BEJQzs+X z+I;~V8$-CbSi#=j1ZJlHhLHiGWzqy13o}@mTEo)B7G?%I7=B>|gD=ft++4Z}&4?%y z7#f&x%4Lk^&5Y5ksgb&}(8SmRmgd$lH?!t8t)SD{!_BoNT%23L&ejQTZmrO+Q)hJO z))jrf{}uy=493qRhG6QPX;>?@PXXKE6YPy`JG~GXxs&@Ifza5U2#WH&9cdTvfP!5;m=wgpF&aVB@-3*s^H>wr^VrZ=bdB_1mP76%xkx4h=wHR0N{-r6Td* zQS3c>5(m!mpvl&1I$OJkFJ8sv2W(-MRp1d@llhg;P*TSe^}F||;6d;)@$&T5J5(~N z2$*VOp@zVCM(ETsYKWa0J}2W{RS_Q*1W7r8^7t7MRa1{AJXk)HRwps^M2chUUlCL? z;Aj;J8C3+`3u2^<2z!uSjLe*3p5R}gmM4D6V7+=rxP1JBc=|wC$=FlUMJPix{E2#^ zOGYxU5gc*ZGrp#>_6@;72ys7Zh$6WI|Lr^X)fIbkskhW|$SCe|^H^CXU3ZlNr&_)z zUn7O?(j6hdl45tsXuXmy$`^0&xRUQB;NoYkXS|XwKjp@UNVlbbZ zJa`aROr462^XFs7ayC*{@^`d)75q1>$5x_i`Sj^nK#WZrIUK)o-T9Lzuz|B4aYR(= z?gZ?Ki^iqXCvob~0mKqfyJMnoENwrIr>Emkax#wW-;e#fcOy153{inWhz$=z`rah0 zC7!(2uI6ugEkb;K5Fm9}oW@5)U{6dmg1mRI5w!x_Wk0yiX6{o&pg&R);*rKi({92o zf>?`>j6fQ}cInhfq$DO_S4226Wt4ci^6)?!E^>LsjcX{#ey9d5mGQN*@83sm<^#w; zrujKJY6yrU+~2f)`*8pEZ4~D}QlrRc-n)mS_;{Q>eF~Q^HVkx{^&pdoDnSjgRQF77 z68!{a1XM{;G2~u}<)!6nS;b=+Zh`C6)~mX!$4_Lql6p2q-V;+&uhfEJ>E-}>qRAMt4mP-tQhq*g{Z4}#AzPh)EDBz>r(vorWC)w zE@b53w->kZ+sljipbHAKzry3fukk3W zE0OAnJ2yRW>t+l7j@<|=XPmj{Kv+59H0O_9u*XrxaYE|YWd|I;%z4I<^Ew_?wZ?Y>k(eBe_YJ!yzb*LB>MEw`b^ zwHtPvx5Jg2c6?u3T)1Y#eX+)g3syLG-Vz5-nIiSLF>z&vv=f#{J84B=S>wQQYosyu zAGc9it(2o$B+&?o=_c5fYKXX$=EPM~#O-TJL>VB7R!BZzg}wXDv1_jp684#3pWF(F zK1tYXL1dXAI^GPCyUY+2X{aJB&IF(A;7UFRweUSmh#c0!pZEwrZI8&ajtJqK2Pu>g zQHLzxn{EN01J)|M(}?43u1V2{rqV&Ae?Gbv?4bf-Z z5hrol4Kb%&5qGu);?J}|^fAIqK*fmo3s+(3dIS?qL8;aVB$WJ9^w^dlwOeM`5N(PL zoNkOY$F?MbFV&uyvM1{7up`-yfO03Y+|^^BG{TX9+$mj$#|TD706*(amEE#XS-D_- z{>USCh@fC&jyYiOc@LyqBC;;GLmH8_?{rIo$`!khxgmkmxP#8vb|_Nyy&23+o59?;IV?<#VPa^E=3g2p zP+1shi7acFG`EInb1lq`bTBoxCakO&7BDh2C#*~fE`gVYf|aF(janvCCr3Bvbv!xP zI-_OFwrJC~BU-j;ht?h2qsQ01FmUJ~jF~hB3zjdyR=+LS5x5;&cW#Dv&{p_{Y)AO6 zKtv`4At=fR!QopG9=;Bdp{o!Uv|Qmzrcq(55fQRdflH?TJ`3QxeJ*x-&xiNcS@7C0 zl|e*Fx8U0GY|T!>=5-UWY28F@+dLCK+ZV#udntDMu7IEKTI}#%gYDbbV5h$q!Xo?; z5fjSqFc#6tDTqo=MGRZK$tO<{Sc0qzIC1R;u4m^ESmnsAc#51S0<9_(*Vdtw2f1=d z^NOo4+77q&}&d7!gQS)W1aK%U5{DgJaDbp6J=~ ze9D7o2@jm5Qcoe6kz#hDq?}MHD5^ltqjD6LRjU(!1(8rmB*@i-Zv-;$|G()bDym$-8j$Odz%Xc|{8)wd3#rbnL*ou@J z=|04R`*MX=F{2a@vrCZ8C?>KV6I8`zH7H?}KCV@DR|2fZ@}7j1x_L8EC*YEsKFec? z26Qz%-&lXuINty){%vHPY8lp{0aqnu&rwuTkK9L<$aql9?=cTIZ)fuWe;Y~32jCYF zf+fpWW8|1|`1+gg@a@;%qf^^1Xye`%?OfZSqg#7)bZ&!ob{_B`w(N~HM3y1UrE9Qh zGiVGApffdv-du)tFo({}9F|0u;g_Gota($|Yb?1PXLN9JLOTMhhpP*IY}*z;wDiQ# zj-4^QTMzX0XoXIWj%ti78Oq_CHf=G02piPBJHB#r!;hW2U|_fI=;q>rA-#HI^mpH3 z_Q;W#IA{>Q>(CJ+`}M=jQKK+p#0bVntS7Rfe0&hOV+XvJEWyHw6R~i@L@Xu1=8YeZ ztxK08cP6{J2G9014-tZyhd^T;s`ejS7Y|dyRkCe;;xVsAwT$*-w@6+z5-Cb~ZhY$!5+zAj!h`YPHhXe^O zP0RR?QDOJ#_kQo6=b2B{sx43bT6cFh2P=f9OBV=P zmvHT>1~3Yc?>41ryLJT^E@+TM=sJ6v5XEkX47v=bG``)lU3zy$gT-+Dfphz~!DaT?LBOdT?cKjWB(9dXE3(y9b{}f>$?tGV;}b`SU#F8 zv@hBkdkA6&*tVZgwqp;0YsVmL-DN}AvO&bAfmk0k1ewy(VoUoj>Ce{^eW8DZQPCU*I^J$9(REM0gnr$l%*kQ?D>%MrW?N*q--5QH_ zJ7D1+C(PYVAlvWB&j%qYWf-=l(Nc$DYsye;PY=V+v@mQ-3Pt2GKdg=R!1CQrShmvv zEB81Xz*@oWB|99jc)JT0Y<0%mC`Zg7VEqz42$R%W5(Jc zD9{RCoEm=APtJW;Qy7kMkZsQt6vh&@%e?KtEojXN1_I}*ugcN=s`OnODPIY5wv;xFkG|Yjy%z{`=dR-H#p?tug4`uS9@{Qn{S)LoT)6Uwfdc_n55cVML@z3;>e123 z^LBLq1ydF^ODdK?OG|GwHnkbJQk=)R^MsAl1SBa{Hv--TR3^Lo*-Hknv`=dlYM&+Z zyGpey?L&L}DYQyiqP4a0JOnI(mS*FXmp7ugxE4i4H3qC=W7CkGU5eynxeK!mTxDmL z7{DqkQIK#GD$4k`C~ZJlSpymxx(ujw@_Sk!N?@f#qpGV-&rD#YcC}f+O8eAqIYyqS@*vUPF92A>*|bQK3*6~xEkT^j)}g0xGyLOj}0A;e~q1hXUC4moq@r) zJs|L}Z0`asZ7T!3)-Zhikw@|J0}qv!J4qF;W&(y3E1{mU=$hbF;EpB6NBSNj^GI2x#+!n40Nqsv7!&KR<1N4rMRqB3+7`B+f-N9 z_x)S9Aa?&gQy$=X6vL(bSqr93#k|RrG5!1R3{b6Ew1E3oVf*@ZX8YqeUpFwNC5+Ur z_^(Z?SDO91Hg3SaEt_$iz*U}~L%^yx;8a^)g0m-2;1Xe^wxR@Og?Y$HCqTu<7|1Hl z%O)t~6TT`?Sz3r=hog~9xXMh=Kq>nQK~z(NmctRCoW!|a_7Q|Fxf=@$3UKj)lJ%Z5 z$4GgSi%+h;^NO#MYmlX&%F?Ft+OOA*igo2G`xqW$lq|y0$&-X5!c-4oP(XG4`ZWVv zrwLH9bA+k$=g*q@DT0(hOVD-s@C%b9@X zf|P8nLFhv$B8ah`sRf=(gOFM1$2Kp-r#TaZ2r|b8VSmg(>^m~RY$p<&1X7s=UdS!> zLqS=fv0MULdLGY}S(m|%4z(VoEQgd<5#h)Hn4(F8Z%vpZsryWwyw zA(D`_pTM*?+P2RQ5zJ@@jyU2_tTT=f#&mpKk~@wbcSdxKJq{n^z8J#U0bA_aXG`#6 zoh-F&mn~t7z_!l?H!axjgn5LFSv#yTW7}ZN+hc1Ko|Q*ju{7Ed%V^!hmJzUKZ*|19O*U9epj&Yy0P}adVb*pB%-PLz?6=2~XhIgh+y4fv z6$hNL_K+La9&{z>`Cu3CZ71(_djjt{#v5CX`eJ8%Ft!r3w#SBGf64?zr`?Nv@edBQU@YzP?WI@N|Z|hbtT%m0rObb{hTKJF%qMI1##B zOzf5Q5N8b0Y{G#~7}(#zBpSW#)*%L@1X+JKDOb1LJiq{#i?b(&hK@wo@G<6Gwmk2s zF%xjl{SV;I`|iP`Pd$dGUVH}6z49DB`tl?EFy%XZ`Qzti?Lnzm0<3wAH9n75aCgu^o7SI0z^XGhlnbSX^eU5nqtYyn*VomrW!qqaPV69xsa?=Jx z9XNn(hmRoYV6?F<(TA}8NDL08q#~uL*nm|&N2OU6l?Je+V0CaL*})O#33hgxJ!jzR z+ON34j_{%}LY8LJ5wb4-@gKnIz4EJ3yUucydin}~tj}LCfYrqgx20Q|+k4T;&a<`q zG_>4HO9#QF=Nu}kn^9Pztjuj@ww%C0EF+n|&Pw zh?ANnclHWu6ewQbY~V?yU`miBz*1dp6_vFnd7)-0UAU;(fLA#JCWM^m11YIj3QU($ zC5RGWnN$vhF14#o;MJ#eoiY2{+D{s=lG-G-N?FkbS#@;!Ij!o zT0qJIV-{sgrKK*&vM5^JS~92mGCH~hSLX>={E=*tiq(aJqIzUxmEiEvMC{tj|3f== zm;F(0r`j;APEsc8I+JESDi1AXyDD zE&ddcB^q2y4l&li|@9NgFOPA95L3{7k3h#?(z@71B9xFgF|sYO?j~% z9y8Wtc$ab|$QtJ(ci~XH@#tgt>h(A9>O&73W$W!Hp2RbE+=)9wLJV9zeb-%he9|Pm z``mN*;>|Y=P{}1Ip!)Kyw=ntBPci4a@BVTVe*DTS`10+yv6uk1eBJ`gnlc5uwrnw= z^$U;NvSEYS-oJAv(h?Goa{M^<%9TgJQVf+8ul1|6#LjY~VC~xyg{|wt%{qlzekeZr zAd+K_nD!kT)*I6-L;;pkJSg3Rz)1kLZs}qi*tP}B=FGxJ1gzbgB8_sje`}Q4zkc~r z16WEIp*E>lMVaXatg6e3jrDX8FbExuHI-(&hWma~W{L4{ORc!!DfOUa`C;?VsUIDVRve12kFm$FDT^yucy?PZF2~&M>REnYE zV9TUmkjgB#-c_?t>Xk~1vUQpRyUwmIba%_u*UiC2JOBR^h`PJb(a~n=GS$`A)zx8+ z(XoQ99+oG2x_NKB#>I2EaQ?KhQ>Qx6)?9)$R7|Ef4iT^pB)J$ffJNAdjuW8p_~Qf*0!VBE z0WHZB@n*Ri!c&$nQZqb|K(I+jazJXj3kN_R$jX&l&)Yy%hH_UCh_VD&C4tBiU~xM! z-Ie#_fa8RuxD-c%oUMti(jw7W1>VRlB3zXQqNqF=g|q_Y!7B7O=gcbbC%pL~DU;tP z#f6~dhU2`CxO7^I2aXFQlf4K`QsCTiB#z)n$U1Pu0S99oaESFIaV|J!psMd!eI_Z5 z5Si$T!!dSyDUcX;@iCqc^050KJPI5a9pG_2~any9-+m+=pfB?xzdw5y zKh5|7Yc{UH!j*F{eg4l_x^^DIH!sJ!O^Xo`F&mpBXJO0cx!4df9V?grfF+B*#)@Tx ztF;P#pN6%of5Gyla`_RexL>w-!RJ^!@8cW6@E>6j>&q8?Mfm!j`@X@vxnE)7!XL3{ z@nkF^)Gc2%7pvAR#F}+W5gxe;8@98(|js zC_BSZsQ|0vbgywEcAvY5+LjKJdO*R}0<7GE3U&}>sH|y4Z9}I4hfc!5Db0L4cMaXA zFB^~&M4i3(JK^h3!WPhb;Wq1 zqy$KE6Y4u_cA*8BbFw;4;V&?i}Da-DU%ue++=l z&HdpxV33Kw@+4e&J2=AA-VsiNtYCfHZLsav-?$CE2v#1pHt?{vMu@8`ZVw8?9RWeO z-P;e7ynHax!xMKBwv-p^=}C9sg?sMBi}&7V+<^~_7-^Q%dG^k`3}gwqq-bfs;7e;7 zzIfk#cZ3enFM?_%l^5s~zV1WUw zLpyhvMW6*!atj{ayN3`IjojoU16x|ZP|B9pFkCo&no+y@j@^XFmCLy=90#{=#jKxx zz_%ZNXxxQb|M2jx9Y(bha0#w-Ov=$C$cT?cURnxDaSAettLFrIx;xR{)`+goX7&ln zG+$4ssv}(0p{KJJr@Cu#?sUD$?A`}ijrjdaHGaF0gI{~&ajo|NPPMK=L)o{q_s~)I zCOYe1KwZf_s4JU>3H&}uSw2YQ_lQrsagTy5-e+tAzaLrV&{2oKlqqd%Us9Sc zLD7%qi$nrwie^qG5HJs0<0t_u+3fdcKjUE*mR4$uXtL9RBOWY$aML0Mx$kkr+?|9N z+R}s0SQqDu$OHmWEP*Q43maMAteJ)Jo>(8}gtalYh&X144TP*U2dxQ2L$Hy=x0MuY zAZwQm%?``t-VwIi$vR<`!eu%->MabTMR261110n}jDt(-AnkOTS;5M0^afGZmVRu*@mU`sJq z+9$AbbM=O;jT1scMqtdCiSYIbz=XT*#54bT0e3%mFYbBhUcCL`+qAdv?90#K({Db) z?8P(i^UUutdG_~M7Czs^V6BVdh;qv^tX(?;5#iGiwP_AEMb0KnO*YHrtXTE~R-hhUt6GmAPTxp#>t-U9ON^U~A4Fy?J zvCd!q!_?&(JlT8A9MgI7JZkG(3`7aAaP?DqJEo zNr_VM`x&+E8EHfrffQ7LQR3m(C@4l0v1@nCdhE zl|{X3Zc_k!pNeH^lbM(;+TXVYW_^M1Qm_>0ZUL-w#ywbD--C*(4wRO+AUD5?Bh5_g zJ9rdP+jrxq$~cZ0W!4?;YH5$qlaA3G1YC?6dGYsmlH3fo)z z!(}iBO9U)mM;Cb7IT5S|!ur;Ja3*MZ+SN@Qq-05{Qv8+Tt^{2c_u)u>?!K@vJUVV1-XJu|H7I3D zuE0-Udkz17;ROR%Yv#`Clf{4nFgQ)R~DeEEGa~7MJZYv2=(P9W}iyA4Fy`|#rY`8%Rv^w>d2wPNZ{bi z66oI6qAc!8FxqP3vg9&UyWs0I2WVHXa1hqpYrsknb&>tR_1}KSpMYsQt;O8g&vc#W zG0-K2OS1+0j=h3QSF|Rmvbu9{)7-#!CKo9wNk~XI&UYxofR=!(va%diRTUhlRPwk+ zqiVIawV+kN)zya1juyU0Roqug$YL7-tFx^JEe)mUXsN)-6V>QFS&vJ-jrjFqEB?IJ zjz6x};@a74S^|E*cm!8_qi~^TF)nsbrhS1k9dDqu<`LAF---Il+xZN~qrQ3~s>{b9 zhj5inu*zVWnC62c@h&)=NF!7Uqz=#oT5=Hzumo0zQ@PE$VzlA}RVhAZVdW$xEG2{_ zrFbEkmPRPa%<@N8CeKe0k!5EF66y#)1h1?d9|KOZEJBu`DYqzq16V&(&$&@bnNlfN zV`8c^GI)G`VE_t>0#R5JL<=Tt1*3pvUQrOw!Lm4nkQG8e^Fu*V z7b1>2WBpM(!j&ze5}dJ>e~1W@+=jz8*qP{wy?nr%V_gw`z!7T?xM0mb*S?sl-GVGv ztUBasQYl1|^41ev)*mOl#q%@AeXu3bpXc<$h8Pd5+-r};+lOG$w!v7c6b!ttrGzY{ zaab(1YL^RE9VDqO9;!L_bWA8jR^%24m)q!I-aE zgHo~fJN3a8ZOI;f=e>^n9t13!RICkz!A(cJu`@mp+Y@+yd|*+0SQ`%qV%@%AEZ-7} zg%Kk$W64d+kh@%ijt*`n=v~T{ zzh8&}Ee{W0Oqh5lo_O|I+@eK>$laJa=NC+w^E1Ai@)c$+ znu^HnYq5UoDy)iJf=%03W9zmh*tlU1)`ttUren?ODOkPo7lf~w%J$jDRA08_d(55n z8D`D+7z^irWuR;IitiAy`g^Qf{R5UQ`U3N3e}Z{)KF6Yk-(kVR?=gMGSF~?1Z{cLD zSUn$WBbFgz;~H$-7Ky06JF)ZVQ4?<^m!Om@#bCv-!&Lm$k+gKvCfF)!Xf$e;AgiRY z8I?--$Ii1(`ECeVO19alS%zl`5rid~+=mv>GEj9x!BPyC084H{0ah=|UbYFYS_p0( zC(oiwsUW(}psKcoU{%i-uN--WRVL??VyXmBf+jO-j?mC^<`Mx*Y8IjA+*RWqlyaqG z`_!!KW@ce^Ek~;-P8qPu&M7czmXxcUT)E@Q|H=%lHpOa5x$290%I1a z`a59h7(te?t_x`IJa3lVX>8WSgVU&MIB66txd?LzSg9HL*tT;&7A;$apQlX6Q%^t3 z0ZEgRS6hWnmV;tNFzCI_EP2+I~wr?Z!95!)Ras#FwJ8ELDO0%RWi6aO)hJnlDgjq|+%Xhoq!HHk zAul!A#9);Wlmu1Hb=3x<^0U$rb8w$=4W4LgHbB)+=dE(m$G%`veGe}$7S{p0w*a{GSwAt_3L#GkO*R} zZ5@1{G%N6eaT#7#tFg~I>u~BsJjK~$il;VIgKXsEh_ zU^N!awFIroF(@EBI3Lm>YqfyhZG{A7jFhO!R{;PW7W z5td|vD#21gaUk*uSrye`e3wU}xHP0srK08J`|>`$k;h||wLLH2pM7L72cMzF1X;QH zybdkDgx4Wtr4i&3d9O+71T4Z;3ZW^DrIfWkC{WN5WMfU~P+c!meaD9LVs+{&YWV zjdMrjVS*T8YV|(CmDDFf*3!KShIho87+1oTCpH}KvrP$vD?-eU)F5moc&*$gm*5~{ zi?Qf$_e9XuCp^+6BbUx3NrzKBpYCyMt@uv)Um8S{78@w?bzM${1e9B~__ zY#e~8n+ISfVQv0y8!Y0vmhN}NiW|AIRuZ;W69zXN@uGPmBHA6B4ttw)4Z9P9v7HZT zM`9r1aRh;D3?ie)VtLePOkX+z-%J^S*WL@n_&coN<#RLKJqTDHj&SA3)ycsfR@QD9 zIK&+eE`f0M427MO4?)VFU}cSd?4WNQI0S=i>(|Z1hV`=% z!BQ4U@QNUKEnoH%7B2V(t5#0Nwk?aWY2!T1B}grp^RZE}=Fa>#W=)rUfZ4M?#hf`` z7{FS*lwh`E7S^m=gowzM*ci1AyZ7(Jp`(Wo&5`l`gaqu1PcQ-QDrJHy1-HvG%F2<; z&QYd#ETd#K5VGnTP*Pius-{*nbeup-H^J!iji7gSf>N}kNXaa9wVQ3Z5(!mu52}>n zrT8tm7mVI{W(TACp!_n_jkln3g>smsQ7DA*EE z>6nHV9d{L7CoiC}O^QpM0Vly!R(8ILwUSzunU%+mPwNzF4ZFR%K;_0V=6iXU<&Zh?)0N(ZtSQ%Y?KWxYB+p zS_*ns{8d6imI1A*%9j5FteP4KWeS$3$weqQtK~DPtl@FhEhbh?pw*Y%{e%H7JvTi^ zwQJIVAj$$y0x64I@b7`{GO1UlP0QqTUF=KI&>_2cBgnlIC1w2o!23(jD#D)qN3db@ zHY{Df3Lkv%G48zME`){*H}Q{y`dPt+@2R(|FI;R~VAIbUHn&+3tcJjz0Ortt5S#}N zGLYqL0^Y3%SA??uLwL+USpCl}u)p~>cn-0Fznufdc>5yQ-Vq@VPDX{gGbjWPgpI@l zBSzuA;bBId(n8PzEt%9SDOUn51-EN<;hn)DX8Wm$lMIMGclX_Rx)SaJz!R-t0C1X^*2598-=zs2kS`WMzLTZU-1CmucosZxS0sZ~<0stWQAU?~nu z&?PsbVzdry+lqsPGPxA>nL|6bn^>&HGpCy%cR|(C*)z?yz)FCnO!E1JDgjnbVggDD zPz9{#(c~hOVkPD3c+4TJU$YX~Y01W2D5$cy4mE@DWM>=l2w_RbW09GfhJswJeVA_m ztGcqvC|3%2uPm=HP^EY*i|g$C~`9hSE(V$CTwN!yqVeDm*v<6{bt}lv;f>1)RDy9_{2C)zIK{|mjE5{${tf%n) zlL=UDaMTXl5}dJ{4|NyGbx*Q4wiB?n#JVE#h$~j@ zcfbmQ)yn-YJhy{M2CBST;YVE%5$jIS^1wzyRupe&Te2S_V?78vyoZAh1UeUlA95sI z*%GpBSvqrHUyiJZBi?3dob^WpWJ+AhYjEH4{jN6v%NDb@SYgWgewe(WAEs^YkJ*IC z1w7AU!5Y8Y+87V4Q)-4o9!9-V?A1ovhC{rc7$3q^5cVX7Vn;$CAuAZWlSW}r(p}hi z@D9vgIRc;l7=X7v3c}Mb`eO8WJ9zsHf~%_);Yxtz0XxFgph1KrTPat5u(tOmIJp?b zYLJZstQ{O-MYC}tMAf|I*DhK?G6;bTU?-OmFfCyc}^Z@-LxfA(*DL%^D~h@;;H zQ;f}A_=^E7saR_w7i0Z~1%#@(*t}^0wnQz$`iMDLxneS*ipS9wFZuyXm;8to%O_)9 z_$+hms+By?j8Cv^$=8Tj^CKeG{>;zyt!KDq^)#$fcKGnw2#?@(B9|d*YdE5|t;d#~ zn{hB^KaM5F;y~0CQMFooP8)^G;zpF6VrOZAEV&BLDz@TUpUS1ApxtK(Di;V~ zXRn~4`GkQ6fq)d4@~UQ((WKDGY6uSs=9W89+gk8gt}(@B$(q~G5UvP9J!er~S!aUT zHFHpD8U$2=EU8&(>DflPlDkjvll4>Q`l5!>0N>HU)g@P`@R&`aY32JX^A;c;(dwrLQ z!>S>W3DByl2vxKSJ`+6~nLw+O&#atfQE44VhV`aRprvOhuo5^4tSs(8)h&Rfy4w1n zi_m-O>W!I%*9coz2wQ!#2^*UDylcBS`fWp2ZYg4wwD!m`tXLIp0Bh{%vGDTng|Bx2 z++Dok=IjLrTNl_{Il^`j-_4r`q5pr~i~;}kPgwo)R@mRx4-WnM8%4`ufD(&3!Hw^t z6YDP89&8IA4w6C~T`}C%6Mj~<81CYR`w3R}3>}VphmSDjcwavQSg$dSuL4bBx-gXbG;~di)8z{^+9yt{xaY8g~pGiVt6U2~$4%%qUsU-FF`z8aEF2 zjvR^iUwqM|XOMeP0q?Iq{uu5VJqq^|)<%Vf;t?ME0zYF3be}tYI->XN`D-R&!qKA$ zU$Fw8zW=_7*OI$%-_|WgosuiCAT8CXQ&OyCf-6CkNm5F{(s5F>)F#lXH2fa#KY0ZOGz>+R(WnV%JZ_35*vdQZkHD18px7*wSU)ktXaMU z@kb9ExDrrFz3T1h!s!zo1dndxQj9+qgQG`cjCv*aU>yfxf-42FD<11`^dX~eNxf1I ztc&L_;8(r#XU?FzyN8f;2AUZtp!(y_KKG#9fdVO&+NW47nNm4uTkgcG*RBz?e#IYu z{AS=vvjNYY>&4Zpmko3&xV@yL$hZupW*y{rPfJTNDp#{+Og7f?U1~&YOFbGJz7$xsmGNI1-N`Zo$YbB)Vm88Plw~u zsf9Rq;s=~+`!_-B1vFMZNVuAas^T%IEFOufGQvaY2m(h42O|LlkU*ppKvD=I@dS){5(INkAO!2N=@ax(NYOnnfxxfTEDP( zDEm?+SLJzVrKQ7+sb0YSg)ECph7zj6czW>)4gh09$iO@Rvtgs zr)~{JT2?TUQu)l{S%joYoZ;o?yxg~gK!h=gf#~o5l$G{aKyv7?$&bu zs)II!EC)m$bHYa2mN;i@jdes6!E5_*1%rEHXQCH29U}}KQV_h>|MSGkgHBkXYaDeX zSP{VD-4U7KflY~?*rMa(c^tuOeT*}<@EqHdeX)tht=xAb+xjj$!k!Qe0Xn4 zt3V(-=uWsI5Guw?iB9)8niLW9w%cIZ#{R};Z5>3ovN3U2f~z%02zRmGMx`@>^#Uv@ zSb{B{Ya=0Rdt874tW7cgh=?Zq9UhL2hbLmy_S-Ra(I|ZSeHdPPD+ssWw$9FQ_Vj?GyBq9WoG{qV4z~@q z;(ihL)TUpE_@qZVW9wq@85IUg%mO~>jrGq85;EMrkymmq4(QmkQpDWPjM0cz8xg@_7QiZF$63JO2l!o|(6r)}VvZPq0rezuElKW7gC0C(bhU(Zg zik2I&v8mmtSb{5olQO}VS7_E@lWDhrl^{!InQ7Q_lFz7<*C$l<^jt)5FTZE^1p`|HR*-3LRR+%VeH2X_;y zZVw2;c)tML6*3f)f`W{DQ1B%6O2O{$KJ_%-fA%?}VoBNh=!F;Y&XZ3W^-KFdc7?vM~@&g zGo9~i2^t#(RB{{Ea=_D!lPBBI*2=b~N}TQ{ND{Cvo+VgaJb_;?b>ml-*IE8?^#uO- zwFTF&meca_+ciN}Gyy9T=eif6r{!C8HNK0M>gP~hbPvk&$DzJr5}K-SC%_IxUUtx5 zaad_LAS*e`zfYmc@L{RESYC)pbi<)oCmfA;HNoKt1gh9r7aWgsLu!&Ik_cY0gsHe= z*4%bP4xuSM#Tf~)Hb{zdKu#I~Dl-V#iGKWh6H?NGkdf+%lw@aPSvk6#6U;FB6$DqD$4EGDNh8d+ya92POl#4RI zAb@={p{p=+eChT})I{|6yaaU|S;(#s3Tudz1t`uKH9hV!=4cn3x z&lP}h*4HYAE8Y#8_y>tf_BOVK^t+vOyCumB>yJ4T%ItV;2W&X1z;k=7+HZ{&d#p@s z)b5nw*qc5Qkug5J9~Z1UL^wL)O)zvPV0ju_w$B+_pKy_8Mn(-XHfP%qZrk-GMU5sr z68IwHe6cZ3i$oK&V!W~Gs1G6uY!Ukje=N7h`WwL7emo3YVn-7q@4%+R_hCuoUHEd! zaQy3SU)=LBJJzrv2n->_1QC4PoXkSZ4(x30>|HTvuoDJZxf8IwO#GF#gNsqFY@8ej zJ`S*Vb%eE}E&2}`Y}|!Xw+3@G-JhSk`L_RWng-1tv?Ll8Y{_LPxRR@IMA%r93(K1$ zbN_%4__A~N4)BA2Xds4<2{Q_o0PD4PU&TvrzKA#8e-+gpe@9PjSI1M-5e}mF%2tLPDgmeT(&L1`iPm> z5HS_22v_sxeuhPhzQMZinOL`;$F7=*Wh)6}tL9><;;$lCVEyKGW z`=2JTT{8q_XD+iOTv^nsv+P*UvGY{!EGby}%-PG=aq8R^v~-*#01%uA05U;=(lrR6 zq)^E%D7ca<&;ncCr!N}UVrFg`ipuJZn@mf}s9xQm#bA4kB4u#{rf1}Eq*P?;mN+aW zAMI1PN(^ABlW%NNw(=_D9u!#AKhfc5v34Khm{mS9PBi?Stvl7c1opx~;yvV}iZeX|I)PXMORl<|8?rIHD%^u6_5 z{_Y~QfRBUzuar~Xe&F(@#lFNPYKwI4MkN&v%zi&mq|G5=b zxAY@W4TNL=0kFTdpII`;iSM7ME#b=A86JZj;KMS=-VFhK9|Jfr3gfxL+`KT>CjfVb z4#PPA0F3hX#dwyt2L>95dg1PS3}k)z+Uq7YgK}R(!&p%*ek(Tpra%1A2SBeJ@^2g=efWA{BxsP z39uf$|9;~pRLqrJf;njf`umpxaaoGHvbYPCC+otQljvw^!m%SquzTk&qgV;9 zG;>gKR*J)_s;I>A*f;}N+OH&|%8jL5SbzTcCqYQ79h^6DSHIr?tl$6mlTg(+W6-kw z8~YKt2o;aj(n=U=Z|6I93KuW(v%g+7Q1y4XQaT2ySSmGpP|B48-{n5c%gf@Ab~fLc zTvS#T@;%gghyufUoIJtrcK(EM5h_)KAnVHcPW*bI%K+BzS5Dx!t1Y;4xdK;c}aNG@X{5?;Ib4Lod6OTI)YOVQftqE3+$WtnX zG$*7c*da5;1%=suD9Q>#ZZhF2iC~fyilV$gf*2tv%?&wOUMMdOMGZkmKqXbmfEK|< z+a&~Y5uw90wT znred84XdvmO&dcPA7QRn&Fj~)UPq{EBwRJtk3|EIQF{g9u8h|%HsnV{^O{HW0Md9k#`q1A-YltUYLl6}tvw>DK;Ox@`cK zZ6Amgdu>qwVPIB!`2Omh+2=G2ln;>R!WMQJ*WU( zL888RE9JdXtd-O%!PWl;ESUwcOxAS*m0(J)L5o>jgepzWtG@-e%LHEC9JNZRGH}I? z^#VK73mmapW*c5q_V>%bp|eN%nzYnQUxsnPfS^IDi~vhB2vsWXs*3O+)k9?kQYl|mJk}C>B@;|3W=+s(*_Qp?MW| zP1pxm3V5%n>*BTA%w&X&>_Y6=y&sdO&BPmTyp4PBz7O}_^B^97>}iZAWO@4pVu+PJ z`uFGicJl!I z_Fy|#*1ZW>ftU~&f-xNAjP?)2SOV4rnsQ#r9Vlh$mk&O~kMI54fYeX_ejnez^R96d zD$rf3)|-z#j*nmbm&xS*_R~)rh>}7j#p=ZeA2hB)waHZ|;L;32#bMnwe7K3#GOofO zf54nyCS%6WKbzPpiK);48i4d=NQ#WN|gXiE<`C;3UpVgjO+?>mx85q3<`GFtU|$+)GMWNkbAJIAP+57 z73ip|F|ef|_q?=ZR&UElQoy6tyT?Vkuni#Bh0#-NvxN;hQTs?)~f9=4vt2Oxb zS{dtkxOyQL=T2_H$+pFWtMAcX`yslU-$irfQ+&3gk&_aN+OqNJY`PCkHIq=3Kb)Y` zH@h#Fkd>YpfTT3VFA=g>YG$BvWF_)WDUeNdR^XW&XuQ$?sM zCse64b%9L@fs1t=Q(ZBXU=@sp>QFS+3`1Qdk1Y)#UOw=c?g7)Q?6}!x%I-jzLS)I5e|f&tq%&`O1n= zTCgdrDu<$&=QV&;KmaTsfr`qp?61eMA6Gd6H|*AzfxnLt9OSU0_z(p$F6z77?32sl8WlM z^)cRvBwVdOOsG0U;F5cAzbn@6al*R2gida6IpRmq8iOrI@4~u0cVl71Bz!eB49~pk zhOu`JA;1iTo5vtHxmd&5*%3}oP6R9m*xNh6+L~}R#1^*pget<2wXJ3o+S~xEfw0no z&O>gc-G+XH`r+0A18~~_EA$)afZJ}f!>zX}B?FHithEU34P*(nWELe$u0q9L1^9=W zQMmG9g#?Ge)!mCQ$9r*cLfF`mxck9-FlN#iOt^aj?tSDQJow}T_~@(mF>l!{ELk%j z8@8=M)b0oqhqYk&bSzyVc(SNhgd;-KlBH9yblFtG)@-w_Qpd=&y*7LX*#@u{uO!5+UW~=7WJ|Crave7A*@eyQ@V6g1 zYJ%M5BD5%2G9?(bWN-gJa$&LaY*OZRtzLhsZ}y;Ef_+L>pNeGy*)Lr+pe3kM-2zZ1 zMFUNMbzX4w+aI_{`wOzJ{ASh=)bcmyE?ze-E5!-vhhA`?7%RnH37n)}X;~b_S_!Tc znc@Wm2uAW-0!vg2!6owCV^?nnfspvY4Kcte)T|#Yzg`-{Y;c5VY!l zcM<;GJ=k}QVzmfhrwLtWer3sL-Q5RQO)Y1Qi?F)3&qbKaXRMioN;W!w;Zl73@n^W_ zu6r>wG|Vh1HEh@@j2b-wo?iYKFvte~{AWM>QKh%G!MvY;!s1!8@WqE8;;mO-F~RHeX3Q|l=FFTj#i&(F z=FKySNWc2z6a4t~*T!`y08=LS9g!Q1TBQK@4J(#o?cznoWw>G<-|aQ4%|69qDezse zrDL|Qk1#Gpsa;a6EE(J78kBwh-aDB2|rB?vPC?u0D`xK~yc@m(t8b}1?<^7!r* z^Bf9tZ^F6L?FO(eo$bVhv#hhcexb(z)}PmDzxVJyn(*88D*SQ17{6Xl;xT)1@zgr> zv`j}`>AR>ddL9+I52848A}R|epsD&UwAS5&#;V&_F?yg0;wR&C`7v^0)KW33=-xB;qax%0{hHl;(& z{)U=S1S^73)ljt74@YZ57~4X4TmYeq=cs1AGT4Atc`2c;#GhrL0iwEEe!gLpfu}}7 zR#WXLv^0!0)=)DN^*S%xS{uh1Yik;h_U7@dk4016C<9qll~U2<%It%zlG0$Kjw#5# zqGBYfs>TuI#uKzAurHs4oA&4UV0VU>nGv`#mVgvPxH)8v6}$QquKs}qwB_6TW9>d0 zw!30O9N{KmC^p3h8wF^S@>(5p<38oYvN5*#xGQ$^53)BW2zzouus4rDmhOWH!j+(E z;|<$z+=;;EX!2=oBmu8yxsH&x;V8Fbop^31MDe&C$v)Vd7J~ihVc3^847-v;uqED~ zKt}}n1C*lSKVYSWo(J~-&r&%GdKX|R(Wpw3qJf>elao7~T|D6A<_FN_#J z8lxwUMbPkI3>`g`fORk4eE)S!n?D&VB9>sm3PILfEL=GYQ|A1D`OBsueEkBfi+`T|`7}b-R0C2QH!U&c;w4i|($H0_XAz_p5~Q?_;S}1>2wyvy$4teV@ab5! zW;#}f&!Wx43SMhDp>AdP5-c@DHQ;2) z?k*)uU?uQUDG<|sSwU$n(y|KKnPnPfN&uCRm}=_9C4EUo1y?euS&Gq;qNMP2t%M!0;$%WP0#vhnA0ve%4V5L$nL#b9$t5lj) z3pH)VHE4k@wJ9A#Yx7C|Se@Z0uiMNhEG(!-S!un2G99n7nkE&?qE-p6>YBO;R*Jcz zwej!MMxdj$YU!Si|DPNaV(@?8jKTlt51U&FSc3#uuJE&VL7}t{jkg`{1>QAI3Ltyot|WeYG#%>ZzxVJ%7)=c$<(k{j<;T(ep3hi&tO6 z*DYQ07eFnWGus5WD<&&DF~KNY+NV?vnoX#fthI|5npi94!CEqFhEcJk zV14+;YbI@ja$X6n6o)1EU{7Y_U0+L*Xi?-f^g`022ik$-yzTXz%(GG|>N;r!1M5JZ`9(5*M zIh$bj^+)UpS60T&xHZuod$Rnok2JiWq`Hr`E!7v1JnzN?H*Dtq&GD=scQh6i?}+V5 zQqk;qp20kyH8#iDVN}m-dr|_hm0%Zfz!^)nSz%$+V5}q< zDGR)oyje}yTDDh9>kPo0sD7A3$WjUlsatXruHxq+j=1yQJqTHDgi2RL#<*isj5pRF zi#80yR z2p3mdV@@tMu(P)&U=fCF9BG0r8E5eszP2?(U+P(=>MQIC4w~QOo39|C3O44IQTy41W>b7cLX7e#QB}xd)|eo#F9kF0uXEAGq=x zaN*kTW&vlVV(955j9vJH^($t^pi&JK6xR@%iVbMVB`DAmG)T!3XlUkKeo?hCK~_2e zD={gZFJ}=)KxN3xQgXr!f>sK$v-9|}D)y=bDXAF*v2^6*@D<`fnwolv&CLU0cbgR)w00=pO{XCa@Z{(G?cES$=YrAh zz6f)4!zgD@jB@p4n*)ZqxM75sCx*Mb;lWWO@afAh;`7&D!mE!ygjXJU7%x6}KVEzM zF}(9^A3VLI0QI}?!Q_uV!7m?vj89&C8DGEg7C-ZvSz}OZ4+^fHzyE%G@X|~8nc(!x z=bxE!+Sgy=moGlUm;ZhjbAJ92YZuMO!WmPsVEPO~(>%=nWimFdS&PMU=HRoz+80clVp1|_!Dj)LW)v#!>W$~0HM0p73}3FCRf+K^&P+#ZbtRfA z%1w%f%wz%q8JxV#3_?~4f7J8%qgu)zK0-*NX3?F)`7@_* z_OupI*8gd#L|Pk=5Ko}$=;$;6b(-MR($Y%M>BS#^{DE_4wFJ%;{7S&mvDf+TUm`U9 zXLg{HiC*UCrT(^=*@G4rp#V!0PI?JnI_`Hu*40ZFapv?XoaodfkT$+o9q8^NQ1x`1 znTkE#9GtYa81<@$ZCYOE#EEuvbhhySa1#d+EjV?i3%%!hSXVfJg5H~WY%?wpu&!L{ z!KDi)aFO+^7rXJBT!g=M6R?_b=~6k5$;SD!iG;DkxO8?K&YfIA*jm72XW(@Ar>H4? z3^}QzkdrRuXq-trT2(R{1^kW9%MM0vRvyoNkJgN zmt}4Mfvc~*yp#YW5UNrPlU!(0k9=AB5p)zQP_qM8~BXbq#; z=M6(yIe}06bWB4S2X`Y;-#iixEF0=a^Z}P(t9dLxH-_7zP1({so&(2m+$LBN!s-cM zwN)e89}h)YSqMtX6x1HbzPT^8L|#E(($wr6U-r-ZZsj9*T^?UK8aEvxc^L&O#g!1` zhMno&*h+epi*D)80a(1PpNYj zCb(cvhOe=GSpkUVA8tRXcUzJdfyRZ$3#8;0ltSge;~lUw#R+>eoUtvD=Q=hJ+v2TQ zw?b6hAnZu6MRbNc4k?o?|KR&lG{cW@7VU)a-F8^IWiS>*4#cvpwkE~G`on#RN|){$ zf<@a2R|LelTLxnBZe?tD!Lr?sSh2^EAm+^bcQugMx6F^D33d3J-5P!qs57d04~A zc?iL2h*=^>9k{I^%i12+R&x3E0agEggslF9Fi-*Rgr|PD4I*F-V*6kW8fb+Wl%_LkL*@ zc<70T@brsM;k}RF#*|q<;j14$!Ph^2j31|ciP?)MW5v38ShbFw>H68&uyHO&{d4%k zB$euWEL!p%R^ zfANo)yYMHLQ?P6muMxS-1i7zTzns@yhLsyuVa?`^SWm#(xO=a$orJ9Yaq&2mn2h6@ z*+|OGM_OSqvdb!vTT#Wb($sTU&#kCsy$0E(ED1L`<#i~nZ$|^+r#p*XD!?X=#@e;PY? z`498s-$sxUWGSFqX$a(=QZiD-XUUDIeTqSnlA|_df-fwoMqXYq3JQw&@>UU`@{q{y znn0LLOUvNC0%O_Pxya=A&nHlpmX@QWq|B5xHT9^fB;1r%u@h)!C$5$G&vWEVcp*qN zHEI;!ZOYo3MgwDNudN~cR5ln8l4@lEDnXW%D~qb7c0rep5olFbHXC)Swe>WO-?#n* zuiZ#EtTRfNC1$I>dru9Ji;LSkzNn%LIZ z)`fZkm)3o>lr0^n&1_6rEAOw9_tnuY1@RpBY005pW3Aj)du!J@RP$adYWSQhn~

cxSK4Q|u2I5tjjB`nzwwujYieEgbFB4{r?h@Ir{A3j#UV8P4~76yM3QuI?D? z;YrAH!h`@{{Of_c@%EFC;Kc{-!V`Cl#}ju=#A{DIhELyo4e!7FFFbzFU3hrH1ibUy z^Z4eick$lS&*P)#U&6d^zC-xzIav79Pxypz_4<=f;%x%ir*FKDsb7DE1yiSB{*=j> z{=;|p>8sE2!}TPj8ETr7jM4sB3^m= z8GQH2r}*lF5Ao@{?-7g^W8>;InEKuKm^*oj0V(CI(lR%KBCRDTSd!aM3pOjym9n-g z)=F-`MKh)u_n&49O66L;P)S9nne_=32)~d3_s&cIG7Cd1SiUfwgNxJ@0#*jtAJ7T|P;oIw5VaBqS8^xH4SmJ%e)u zo6{$ijQ1Q)6QJ7KwFJ!xXf46UCJjWi+|)_4Po+RfOXFzf;Dw7?YDyD6G@I}ee*gVX zTqbz+@Us`WPheJ4!$Abk(bd&$K6{y_&zwHR^RewL;p-}oz0k}5r-Zo9);8mwRQ#7* zl2Xe~(FAg`Q>VBs<2`qEck%yt2L}@;aOV7J^jTiYM~l@2iBQCci;3hZ-{ z(JJL8BlwgNcFM{_P+3J7sv&F<31e{S^eJGQvu6`A`%Q ztV$~hLjc@RtpOSSx5RS*G9z!hlJshX-_!dd|D-QNVa*O{0p%?2ba@?5o2 zzZxfSKsSc{@CX#~o{D)-RgJtZ!K$*6*RBsmMJ?}@R?T~>ZQy-1jbgtv2K9t1S#!$- zmgD$6M)5n1KzZc|qfX@$F7pZk%#u9$B|*q8@I!uakjWsQo#)2^vXY<%5VC@KJVC59 z6gTb7bjL0N%(hflL?t_8ONtw!lH3R$4p_O*3d?s7Ca_pxHEq?tfe1e|kT5lbaAeKW ziltnF_AKpKI$&3-2Vuz6$J;@P! z(g;}Tj{K}GcBj~256eBNcG#6{gWbut*q`D^bH=_TXY7b`#Kwa*2q!?T+HQpVsZn@<)Gh@-q%Li_59vq45KD-bbIt;@{j3iKTbnfmB8)qlD`FbO4>?qv# z$o+WX^%u;NIUj!h9^U@oRebZ)r*OUZi}M@030x=6T|!syg}<`CpVG23m#^b2LF?@0KG?cKz#>-}yZk#0T%Eqi z(JtYv_rkBnwbe)%sS}9s+FC{@Gp7`3nMI~<$xto;>w~Wff>Y+`5BWdKS_l?AEQ0k0Z zQ9#z0KdbgHxN2|{{xR;+}PXU z?-y)TV1bsStut=@pIg!Yp9HPJ4sfw@f-Qk+(EkXsZrFeK!IssnLoj53HSBEo?zwv) z#6JLGegO!yb3iEDMmxA*I0sRaJiRf>)fGb>?eW;S(Rl6Q`|$XL(Rg6Ya6C3?EM9o{ zUVQ%U8+iL)&*7m-6EHHs53fA&IKCqMJbU;3cFF*CAKp@C0m_8K?W=zGBd9$!7Vhs-L-j2Q7w_rEHYG>p|#O^)&ANx8L&Ii}CZ9pBccC z8uiFscbFh`B?sLZx!%N7X@O?lgVZURAWN>mjjL7~u=?ezFO90D%vWH5L8*kc7j!1Ll_~2U_-E~uGRJkG!SHJ>%&l4Jrw1HqY{EuL0O0ap;CfS zQ7Pf2*oXaRFzV`u@)`mxe}bGJO{G7|OMD1~zHARhZ8hPjRtnfi0@iTe$0%M)!R28b z#17+m334^Oev{y8I7(`RP+T2=lIlSAIl;UKsav6_<~`Lm@H`EpQByyP_c{huwWCnV za~73^68wUZmE&jfX{GU*;XWw|4Y5VI-S37eB$urb~l5n9ywke!)TxaNR0Rv#Eh7#T#6vZmP@xZ09nPjfVo zwKc&RJ5xLeSKcf=2~1uFvUa8VU|XUGqT*Z;c~og2h6c3x|5hAR$cyW&8W6Jg8= zhjLtSAd^6q;fRBoPS~GjkKKv3*qg}vOmM5Ukic#6Z^K9fM86)9^zcG*6lrwB}&sQBQscFKmwUCh++pGA0BYqDNz6 z^u1WO^C8SyHW8owI2@0^=!DSGx4<{(R`>?kz|Y?Ue(Y4;-F-}OxvPr@yga?&@z@SI`q>5w3p0iZ#;+UDL35#ZApT%vm&@a5Wo? zRxZHOHA@iAk?_t#G1wh*3_A`T!JebBM#b88@DTRJ9!GLs0kYV+Nwtz$>iJc*s8p;` z*BO-7cOavv8fk=>eC1u@c0pCM0j|7?Mgw1E4gAsXI*ry-7twn9BHB)0K!=o{8~L%? zmB!%AMVvf;m9TYLiz+0PFPWD+Z{P8%vO7 zsY|s|pu3LMc30PV16fT?r`X9V?^R!%mLRK22}cQ6az9#3F=EyHo|V-Nd?tJ@wGD(Q zw%vfM!m>(shJ0ofHH55216We9WQy6cWPn$!TYd9MG_)w8>wjQNb+uXA>zaB{Ro6+# zYU2pA4ry5>*ts_bKTn>47ytDt9(dqU1P2Y{JL?A*XAij3oNS$7r5U<64>Z}_?fcun zN=ZlmUxL?v-D)7q>YsyP+n<9btpRH52oJt{{sgIDcTWs;cEu=XciiI>fJts%m`L~v zwza_+PY=9s?_GHAu8Fv3STG(TWWDp;llbt}7x2;(58=+yBQP;66p!6~Cq8-ob-eh{ zLwI!TM11(d%lP^Ik1*?tukp=WZ{z(Jp2vI7J&)I)e1gZlhUK$nB5KVlY+AL#xZ$?0 z568|;8?br(8tmS-8He`mLdx-DNI7;4F?;sn@UGoP$FWhH!~0gTyix|`Q~e*ZXG{(z{GGVI9@82+=WuLjvqRR@`8Lc*VUq| zsL-fZyS8n`!M%GBd*m>3GSba*I3+x$wXp$bdb$l@HSzefr%w?!6r6t5z*S!ym6ng< zd)?863J%1|I3Sa&Pv9k0OH1EqeL)>7MM~F{0?vprwrWfZq zj|#E~3weGhDONyt2&$`wa$q(bjRYr)HPjCyK!vbx8%B5vBbaDAl*VK0hjUwS6>0#i zju0c@l9E+fEg&1l_cP2uP>E&%a(}hiN4RPjih5dgjpC>LP*FyxD)B~Xv9}4JFDdji z3Yr4WHT$ocCbg(SGYkn?rDa3e$MU=tL)q61M+NV*m{66+^Awc(prnd`ML;W|mGRn@ z)x+2ykKpwMXnY3MqX=n)tKy*soYFIWkd)$%fP*)0nh!DvUs<_I z)ewy0Qsu+a>_UPgA?v0cQj4-(ushq0faQdZNe+lgbHSEWH*8KIXdQP(WUMpR9U*8P z8iL4URsP zo3k&R9DF#E^@Ovd4?I2m;p*%~sFFL-7526sgexs|(+5^ktSoRPfU*G9-vLYejk48m z2;r)q0W2w30xSX5%{Q~NCtx`_xfpY2$1c@MkQE#pit*zo^8NzQkHA5)G9&M-%V}Ww>o?Zf$C>0TP4u?5lfc;gvHB$rcK7eWi$fT(p7V?a@}Iu z60C|?hVV^muzA;JY~HhzwjZ1K9>9j3yN$areCu|sh}?v=Tee}-o_z+q1Y6OH$s%|h zA%Gl9%S2*MAu@`qjDnS%TZY8!667f6iIA0~;PV@G0ha37rFEt(R(w!vH(Gmo|H}5R z!1%_FlV~|{1|6r)qxl8V*R++h?fS@((42Da2DxKf+kge}co{Lwf`03vV^oCH*-PHDN1 z%SO493AzMdf-Aw3j_FgkwBBGZe{AGBRLs?XQaJPl!uNrdvc)$LIF*;HvZ|iXq>iJ; zdQ=JwYq-t(S1I6CLY&<3c^}B&#klDX2zrdJ&Eu&%pAP>+s!oKjGbXJ}{{of&zz{_s)jzsJE*(-0a;9U=8^1 zekPdRT4_>P7hKs8u8f;-pdIW6(KvW=b|7RqI>Uzpolyj>F|MAt%bS4Z?uFrY_89H% zhDXPZ#WQzK!Xslw;IZ+e@$$nD;KP?+zhjF1nc=Eox@#))d;=LDNz@xX{ zj<=qC2H(B?u1VkU?OX5Q<+q@AmyLTETNrB}1wnkxl zWCWrJT1O8NRAQo$6c>Y(qsNeykjP`!W99q>M!|ai(Z}#QVNGBqTRwLl_C{^N`sFJK zK;PnH0^R(nQ_Xp`fb$QZeS%3N!wh7}J*b!~!Ic(mmYOBN`sJ%Hj2kg(?P?RRHT&nE z@Y5Hc^O&X30?`62LD$S5e=u!I?_kLiUqooKqT48-c6OYwe@`Gnh00~ zD1zL1f|^`}7Zuwjpkn>}4Y;~=;UX?x{Qug8i%M{+tnfVk0*%0R=_1c@v6n!08t2cR zswwHFg9p}!p`6n|XXg)8S)kDzrq5Vu=U|m2$mR4@X1Y z2sAg2LTfXvWi;Ao&4jAjx?n!L;p|IBn6kb>suqDt?l+cI)k=>LWYn&@IszEWs;W@- z2L!CD;RK-&zTZR9sPnar?t`nwP&Cl$Y6A&&0xWNo6nPT*JW!bL!P3*DVko;YJFu*b zpjhBXunIMRB{>@YhmwFTG6p*gPqLnB#z%0X4Rxymwr!)&; zJkM~Hlng_DelRjJeL28&6I1#Y6rVF@S5tZbE%_O%-USNHU;;s4u&!s+XGo>qb z62^A0>;o%L?4j-FjYadK`?LIvsqRUdw$LV`!bwynAbkWhZk4uiQ{|- zamDU9oa2PU+0Ho1eS)mL$#&S6>`ZgV&N$xF5rSZ}GtCVf_Pb&IzP|MXH^=y3?S3aL z+c5~M2v+N&Z4rLZ7HbdKBI1w>qGJ89Js|{9aY0yr*q3cu9>*VB;)Y`5k>OamJrv8L z#$xHld+`0NF?ji1FWmKz4MIotN62s+3=8u#Zb3%}cS4mPTpfc5QoeAsS61j?S`hbf z+s>CGTwhoZaUo#IHTZuAEQ>o(b<6gD_A5SXpi!;_S-0NW5C8nfOc=MhJTaN7XT?t>#1VN>1@bK|QC;@BOC|M}(y#Ee7`t*Z%>5XTxdi@g2 zT<{aV`0n4BzicYzF8%>?7k-1)Yo=i1=6S{hSTe<538v&Clv&yYU$XU)tZ$fsHA*xZ zK9!&~nV|IxAxnYoELTp)>UHxG9=QZ-HY_7tEhS_v#k!5Fv1#Wftlz!`5!-ekYTrR@ zJrIpuM~)g-VdSno#&vk$c!Gf{xef(cQo8o=2V{R-0=E-+d^`>(W*fLl&96X8Ub!i= z2s{ER!I#Ry>K5ZZl%kbUqBf3f2?33r-Tz5as%2=}yHU;``5MAV&$(-as$bF6*^6em z&dw@_)^F%x*_UeIER^M4Kvkt+a6*+JN+u;s0q~Xtn^Ld@S+xYLoSY(LXXXCYX-l0F zNXgW}7t1xu9{_=s_6fAqR?iNuLN3x zDV2YREN$!9yrNomhQ&BWz*@I{Grs@+XFT`ZOStondodzx46Hc#u~7;JD+f4QIl_(r zWks;+Pq315CBTvm`7c7&KL#1sp|xVL_&z(^JHVR*9e)mHv<~4oH!qBH_P_{R2Mo2f z!-KRvJVU5@aN<}ze$O3v`^D$+)%))m_n*=* zy!phFc<<@w@aaph;KS!%Fo0#rf%Va=uVT~kWr!!_9NW9wxab5zo7b+w4g%KUeY+5Q z_y7*?-;IQ0hmpnqGbLHsShsW;;pQjf5`6g1yYTW;Pvi6V{%sVkt?MGNef_f#~$)so%rl*-S56YpX7%T#ab!8O0Xq}DzB(Q%+Xj=$~7p!O60qy z$s_fIDXCO){dMzmm6g@VC5*{^FW|B$R~q1020R)N$z|Bl-iebZy3yOyr(g-N6pwX9 zv0dC&qEqGDlB?08T=iv+SKOEK(Va9gS{Kd}%m`RlF3~QZ=Vwmi-02fMXBS~jxv)A| zcHr8j9{llZFaG@XB(7bo;k63!>*XA_r|_D`*tQ?nE=1u{?{b{&nTj(fzC=sSizv;# z12rXgpr_>_oNj*rUCno+t?@22*G(eSPC$LtSU%6ue6FKSg3UT5-6UW&60lkt`@pKb zb&RpL)=_-thtopYkA$#K3g&ws#J(b!@BUDh$`vKoqQ8ly`pdOQ=&7k0P7n#@V3hj_ zTum*bXmTm?91TGPtPu7cI-fs3@ADU6Nx>>E^kv-#MJxqbg@uZL@+2$;66i)6Skl7L zX6?eVK=!@9$jWy|W}XZC0}tdE`IyY^ikZsHB6#I!nI0aiWpW6X1qC7en}_o69Eg+@ zPaHeuh(m{LaPXiN_Us*uLotp>O5-(if{<6hYZVPgexX#W;Rdh-XceVlDChP~F-2ZD zROn$;tgWeppA>roSXx#&O9%c*6a@8Il{0oG89vn4#l@KhU0~| z+%W0>K^QY>2!e+XhF_2k`~%(K;UUG!n^5IPi1LTMjW6tMyx1lMD-cd>SDVdX1<*Rf znw_@XgnxGtT0raXP-WS+wCNZt!j#ou8w@r<@Pi0xg9&sF@Z<>D$IBNZM~uep6DA=z zD8#f|X+c^$2NUS-K*+LH-PIL-fdRaCe>k~1A}G`!k3RVjUVG;yd_Vb1EM7SqAAR{2 zzWm_>!p=O*nD-54&;QB*)3zN;uqkT3QKu}bmD>9P-DjJ9QCk;c^OgmFsa&hq{YSwP zT&-L~&{{i&w)6s36I3A+4Jz|qff!tC(tNZHdgjp~|`|4`vhycEep`P9t?yMPv zZn(qC50gB7aZg|n?jvAL3J${K6UXC?M<2#x}(TwRP*OlCUQm$Tj>=6S|3TPK#DN(4Bh$=(7Vy^^O0zD~M zio=@m!*@ohQa&sxRf@ql2v>+5l{v&+O_og!=rw0^txt4R-`gr+72i>BF(|J&q7)bW=ISQpQp!TDao z*4b0&J*l-8+i>nwC(fUyo#OX7-A;Hqfj_RE!5>$&Y)&=dDj(OcWD&Fyaj{oRn4o);bjn>-x&|Y^3I_f8)y+e8o!<}eh*;0QyzsCf$ zG)zEq!#Ki~GMf)4Y>nV{pMoXG>H{rqw+=&N6QQa;fH0)s^ggSpkp=Pj`SaO%qomk} z{nU*(uBs6BTMA4MC6q{o5=TI5G?k^7QR zM3W1%Am87t4Jc@mt5B{v1)Jv;_@RKu7ZR|t^E{B5?TqYPS01M&bbQ#)dhl=V&cB~0 z2eW?s`v&vxE4bq4^FrAcz&_a%M~^xZuB>t3fED)dwVjQusdM{j}67TeSuiE)dveU_+VL7 z80M@E!#6X6@#e?Axc6}zgp9fwAtMIDKV&dGeTKl(%Mq@wZg6q(HbB(}Szhd<-3VE3 zgc}doT6=I*?1BEbIb%RShrebITH>&zWLeZK!IZ$t0$mn0OBvd&hS>7FwirmT8qkj& z`w$yAIXLs$u5fX5L1^$$gbf=3e;c+WPwQiPiKQ3J{6Dz|PU{%BttO#FB z&|1v;a;)CC#ss;CZ`p>G8#iNV1V_T#cVhG2{U#1efMszB#u7dfIEqYRXQx1S1-@4a zHrh{`VD^fp6DTDNR5o{;c&p6fDjZJC#L<*o16}c%McmFZmY7q5+_G9!5qDwP}gYOf->5C`-XADCK#|`<5Z9uIqyWLRwa- znMt^D^G?j4zZgIMI2rG~`=L>=T%6ny>=y!OI~UjuwlnI~kede^6>IQ62Vw}#mcZqB zn++UT+R1fDlY-^S0gRuUrva?dwDBI^m_WOe@8mrMtI=K_cxc=hy!+hK_}BgS;f4F| z#amii@a{Ws@92?u*r?vaP^#&gf$;X7`}W3_+y8WlTIK{NkIIe zLng?5)q;ii>5DIUy+=(9mSz(!nKj$E4x@MMG@zwzL6}smzEli5O%AJXKmHJ3zW<(4 ztdyZ$>jnz61XNPAEUrPRSBlY6%$5L6IkBW(Ntu#@B{!jRYAH{aVy@&GRG_;6tEQ;1 zZ!zh(SQHb!l8?uk0QWsScIwYRVcx8nW;WqYe)n_&SaN)vaUpiLwGy@xkPv$eC58DM zSkxF7pP)*yRf@Cfn>lzExg3}rVqYScpkf$hf-uEqrSaW6a^$Firv~;XGO1XKxzgkk zy^DG$<*wH;s_Q+}`>D1(T2Wp;YS|w()zzb#1HZ=FIs;l4&z(04mJ}+%l$0x}R;N!Z znEq_v_Kmo$3*3M9R1dU>_4zYBgenetx;qG2tvGwKozT>Z3nyDm9<1Lkoy6~#y71d& zDOfoKuXvm}aS&&^cA@vgMx5+einfNI&{+8)TB=`1XT!f}PoSmhZZwpSVL2KtHRI4& zJ)ZYG$pBVsgUSiq9@nQ}-2klmnlLogk3ehF$UZkAt+`RTr2_e^0?^PH$i5($+ftqS zps1=+!SMnicLI?IiVCGf5s=CRS;GldeOcNSOI4>-3Iwv++5q-lAq2M@fK@Lyp@Q3e zZ@`t3h=!W@Fs(UQ#(Hs4Fk#4>#%qy5^0WXmIizoi96th()(!MRR<;(FcII)cXA;T? zNU15_NK5rGxxR99LI`Wa_?clm_fX_y2XmhnNxjyNDD#*zO=$S21$ARavLv)564rlVqkT#F-VH`|#z-|KH?j%?2PWD37 zF_*raTzl;aR`yuA!`?vFx_z!}b42(aTSRdGCO(iI@ou~}K`+${2h#XG30ON~1F_{u zFg6_=itwF5SRCn#nX7#;b*Vpom>Yt(Kl8?;&pYAnhg}dnl3?X`3p{-K!_9LL+&z>% z-I-uTfFVpdIe5a!(Zj3_XibPx@=seE7lM>4`rYb4U{a8~;OhSyhb8;FdZl(9BNK>8 z^%^|T3OE1b7Tk7gKVH)b?yeqiQ_d@O>LEi-so6||F2R+hF0eACfh`xfxw&z~&L4r% zBXHN6D0iVsDOggoR967J_Qw%EQVWWa#UJGSN(Hd@%^Ga# zK8H4~19<8p>IfPYOv7Lz)Epef-S+6OkkzVn8q6K&AcKCwP*ZP;e#ns;*A3C6}Or zo3ztOQU7u%uMoBDdfW0+ux#Z?S?s_bCp`nFA*m z0+y%JGq}2Al!q7Y@Cm?JzLR(Q`r~#VU)&iQjA!q=8_(Q(7oNH6PQ3NR6DEh%grFeY zJ#qx@9Xkg1PaKB_CXPptyDJ`@G!Y-a_A-9{@O{ks>I;1J`fK>$`RDN6yYJ!`g4X&4 z3y^f+0FLk7gTp(v;gD?Sc4Kl6?%ccqNA~Z=kpp{>aP%N56;{GtV?H45vajgi~FQwyBZ!CHV zSY6F)&{Q)MwPjzSEdO;>6+Vx;(#O$Q@c>$D?m<_>UFd3@#N+NjXVcwiZ@e2Vgsi5T z31}c7G!VWTsz(zNN3k4%y2_!buNsEt`Y^OKj39swClmz{XnonQ1aZ(7!ZMhBQ4kvH zSSkjqCWL6x)gmLI}BmyiO1b^MX;38_aEj6(OrI-;dzrLtt`8 zMw%_4snHpd+(&w|SD4*x1<>wNzvO|%b5rmXPZzRQgA~o3;iSa&& zjU`;gdm|;)k9{*gmp_!CID&s)f>rHUes?7rF>e{c#}42YZt|v0ceOk2zp% z^dPJ|tP~7(G<$3zV96c0Bi@PTj9rOtraWj8fchA~+L!7{xbjDIrjLOt!PVYWckE9i zV5JhUlx87~$EC~q(lYEzx2M?=ylikF!VnmKZLn&$HJ0zN!P2cZSh3w6t9CeG^-eoNmK`=m^L~%J8t4-M9^kzlB&8lm z4#2h;e}a`KRz`VY)s{dk+Z2jv%l+}iR1duQi8JnfVgQDXyBVWzcf^QsZtxBm2v^Tr z;o?33uI{#k6&FI4D;#NZ#c6pQTU$br)nE)6VnxVuARIXpj%3Ql-j|xe0#+84N@jtn z|2upQ!hqWb7_%D0j-Bx2K!|d7aE6bk54_yH;X&Z?=7`yorS{nj;V7G>m%A5yy?o7f zzguo2l(}Num@ybJau`O88j2^MeiY9<|0te*{t>+U@$2~V#}6@g*;E5qi&jm;qGdl~ zHGxS=m7qzWrMd;MWR@5#L6+1ji<@xys-KLDFnojHY6e!W)eOQZ1gaTWzGfyCFQ0*h z%VuEds(B^`YqiOPwG=DYuf(d2>rMVE#a#)o6x6O6go?qEnx(pBcA?_1{vPChEG-L1 zQ?iXZrCEfzKaZf0iRspsaH-$#n9UX$D zP6JemwX(!pNud%*XDWFOVP$0t@3nzY)fbB;sIs^RmC)3pURAR!*E8kwm132}AEor1eB|(%msHjgu9{KH zdz5R?5{Fe;*J_k2<+PFsoRo$^DwbeMpd~d+ZI*oktCEUlGlNj>!R-7h16cd_$6)g0 z>3Hy=M~!<>vj|6z9FOs%Ccw+Z8@7XN``m>6tPNxhxrKmrYad|Q{c{j({;NN%|GPhI zZsGPo8`xXfz{T1Q{v6Cm!SWk81h@0OyTdO4cM+mqy#HQ8(*%t5_rp_@?!Ys5-i1-# zKA035%JxYZ$@X9mHw^Rf#Dlkw$M+w-k6%9h2>*WWX?*s|zwr7a58>UXp2C9fe?T~a zWyhK|IJ|WW4n}Rpo{bx@HGGYUcT#NBg6UJtEW)D)_Zq+oU%3=Jqc$3aN^o`ih>>{g zo_o#A!EZkL*eF%|w{9aeZ8t7LDOd`EmwQn8u=a1=YSbwKmIB$=FPFP-3$!4!vabuU z0hf-IiY51=;;#f*hj#8DfXz2S?1C*ziUuiJatn4hHKIZ>S8OZF%s>I* zDvO|HQKt42$W{~3zW@3wJoVV4`0lGOjjOPtq}aF-6@w+n%FoH>k7zdX`3}_)e56h( zErVc6F;{}EqsQWmJFs3V6`@M)7Rb^&W@ZK|K)$2PK$YBuQpEmJssv_yA9b8eASU-? zGtW`VceSauj`vfIxR|3@zIX{%uULub0|)sd&(9Sq-ZC5IWo3M)+BvY%>|M=FY~lOa zVp2Ms?CLVHWL@peIN9BSv!~>8>>_-%;#6ldy4!ev9rb2C!rv~RB3Sj%8u91#QkJ>6 zelZCb&qU))_a>ZZU5xgo$!Mwll=e27DxX1Z>3ygxy8|uNx6{U>y>22p8}C9}{T*nk zCR|mHMP2zw8lj5TP#K0gt^HRPjN0-LG}a8|cN|7|2t{pW0P1T9SdBph$WWFceQ;IF z>o$)Z=xB5V~lIAyksOHYq(igv7~SWuS-x;_W68*SbiXZ z%%9NZgWN1{!c{1W^G9?4C_+>iVSXq&-3Mv;4$2v8G_8T08^%? z1R^QXpD-STj7%wKT8l6UCB>nruIyVRS_?=w){HaX@214`(Kv8C0J~$ou~m7YqV2HZ zkS(HO`xL9q$Lz2(QE=pLR3yRF4uY86b#e{vNp}BB)e>wOm!Xs^9)BRi69+Oq2vKq+ zx)QVqO2!CSsgBs2=D>CWR+=pi60{Dgo@#5{f=4o35R>7CL#ez@f)n9SLGo@0kG99M zokNT*-DZuYTWkqeQm$MOvCkQi(LC;`Gj{XX=rk`J&h*9s-RE(F;W2mYi1EY5z5ZCf z(Hjfa_+Z|uAWT~lg3o{P!y6xZ^~S*v;o{6blX5U+Bm@1gMj7YfdFrRc)5DQdZ3kQ_i^(wZ9Z<^aNu>F*yhF& zwvVS5LIMNf=j(+@cZ|c^@4k+^?wg27_l&`}zkG&;D`#T%(#iOF)|Z&K^k;0?JRchg zQvxQr1O-`^d{>sBcPUyb)uw%dFG1Ln<=_7WSm7IHVmaYz@rs|ZXvJi#SUU@AA{Mf3 zo|#3sK57j%Zi~R?T^q6I$X*;uh&7qp1y-6hsM&*3v*aQaR9OH^?n1?1DgTwq%o4?7 zRTHRc%?v`h{`SNqAv!+Oz?NKv%7LXgEX899tOQ$vuC8-e(R=lGll!Xk^f@$j^$?=k zQQy{$_LJvO-PD1?ih2T;0^haWMc`^fm0W|}7f?kgv7}vS;B};439uB8WtmN=c9l}C z1XhAArEf?`&f*V30bfpKXD>D*WR0HHfm;+SL6tgkOR4?Z*Tf%$P6Cz%rv5%#P%tI9 zvZQE`392k{Sncf!P?yWElO0vP0VBBr6>DX26)M(B+kz|YlX_(VEVZ|^XusTuJ>1t9 zpC$KUAAAYgI=J0e7hEX^mO!htg3siJa#c`DfFx*1$*Qa)JT>&iUX@h#xd}@u8)%Iv z;`7ZXsLCv{RTlT4N+lH)RH-iTvM5}FEakz9j!8zu#$A}VU>Uyp>N~vh@*8;isps*? z!%vuHbHYMKz}MXm9*!RHbMu9#y}QZQ?y3cyS$E@r$^M^%VSUqWu>ODh!G?9a0oJe| zYz=3=@4gOBCTn{j>mxYux;K~r#R1JoFL#Uy@Wmq&#^a&UWAN~pad>F#IE?4JI5sc< zcZ>)lSWUq5k34`kpMM(9KX@NT`g-Di4uqb&dlEkrhUuSug0-_}Vdj@#nk??$y!S2^ zPW}Ze=gh|PIkOC0ttPzeCEV=V9Eml{7GuTY`B<}jv8gMOXqbNh{5Z%`a?uZ7f5X`7 zg^LJ0dyV^0!R^}C3`40`3V1)TZJU85#Y}1Dpg>D`ue5fcvae?xk2P>5x1d~wGRvaQ zmIa;_ccp;#_~?TMwv_Wq>l6yOq*f^{gSM4qv@$;rZ8gGiTtPH(tjpFTP-4OEU~x8tPF~QEn8is`4@pR&tRg1(@%M084H{fmL-)oe6T+ z48rt`EE52)7%ah+5|ipU<+Kt^DgH|8l}bx2mIB`O-U+Z&O108H0hoZSo2Bw#H51b6 zDy#V8T!6@R5tvS3T}W`U>cv`Izgj}eqos2@4j0eu#f8%w zaJFX=dOCh0n0}ABhv~WBDEBh8)ViY&ao{;MKtU zsq2HQ`kFfkZg-%pbPW5hQK;hmR#uEhdGT06)hPZykeW3T7NVZ65y+#cCi61Ix+MiSh|qpWBw%6QI-l5ys|1v$eFbmixSAuDqzGBZMH zLCDPVM;-xAGX|?FhC|uxwWgsa3Di`M#7z}d&)|5TClIJ&mGHCAwjFb%IbbINXb&N47hy2Ts_nhF% z9R~?p2QyX5ToIiqH={Gp?L-iALUg(#4yHH|s+@5o-HqmfXt^2*bvqN?%p%h3W1O*a zudPYPuy&sd*6nk{h65hhs37;l?u0}S>^e@MOX9V7zlYL2c^)t9i+929V;5X_p_seM7tJz}4SXEQ|d|vFZzKw`>m@FvNh9 z?O+>Nu`Uxtc{sbnlW;Y-{~!Zbfj$8oN!uB7v3Dj=xx$ffB{1{m{$Srgc)7X5-NgmN zLWA(|!}sCQ#~;M~kKBcq-*^_2XMT&-k&Cf3d@klLpMvEPv$1D?I5uvYXW&WTv}M~O zqgV;H1XThlnba$_S>m)-uAYnyn|Msr90OM?)+m$v&jzrTtelGFYXn#e_+z&mo3^o2 z-m?+A4s64o!@IEi@E%0&+>XU-BMew+hM*KGxdAP4R|7ZNVC4{ z6dJqFqFU}b0!@t+B_;nnaULx_XHnNlNFXE$tUAwL!-@0C=6(&Wr!S$S_cGc~oj0=w zwGedo*^6j7(Tg^IPBB=G9j6Fe7tz#t1}D#7!HM3hD5`8GY;~i703x@bfJy3=081uC zOJF6F>(H`9j%E}J$g*;kgtQR(d3|b?ph~5nN_7F3I(31RM%ES;OP#$kN^AOfds|=1 z1%ZfB#$L&2Ahv$zS{S`|M2WzR#OvyTnJ*Qr-4Js#=OF#Ap|2+`4 z|2+V9w_3qrkS!doY~W^NZvbn!t2-w71!9Dg3&yy4;PwE2jPmosg@+!*zaD!8_l_Gw8;!e04aW#ycZ9k-;(fx_TTebp$hsZVKK&4% zzVea@a-aJ7=UDX1FPQ)HPgpc{iiwGm>rO7Y?S!Evb7tc65C2U7c@}eLOu^jgQ!p|x z2(H#Pc_2cJXm>~DQlnsKe1<=wsim%c&(qfMq z7h!z#0TYWAy`vApEUrPRSPFWVTD52MW;07rFeO)@+=7BBsaM)oY*u<)Y@ceCk#68h zN>){2K1%Zy%T#V$fP$=JhYw>nk5>$q!0NfDo;2>lRm+x|lnn|DR|3+a{9OL|T?ip+So|cjMgI3nobY5|6)p;i3t2Z)s>mb_V;cz5B3e!9oLChYuY_c6K(;k;V7D zl%MS)%qbYZ6^+eJsBfr4RZS(Ts>}KQ)S|Pq1>IdO93%<0S_oS$Flibtb>rIQlel!g z9hWZD;_8)B{Cc&3fR#f_;=aQKt?jscZVk?#o{fuVe!_)QpP~1}o3s~kw)osN(mvr3S&8n=N)6>nX1Ys5_oPW{7gVm;b__@4o(SL1fkNxafG8W1SLXK z79lHlGzzl_QR%_RPWD5#)=fFyv>9V*5xGWR4`1jYVb2 zL;~7)mJ?80J_%Izl|bd=Sychx11)2>TQL5uNIf!|8qm6mJ|Q`5n#iXWfUe<3WgVB!oE< zXzZ}@fHk%rvE#NqcEmamwrsI8&YrO3fn$8QM+jMm(g|CHt0TF-G#{Eb&5P&s#L-+H zmq&ohAjl=z;XtAzqEp-uLohp*<%^hfUqq+*V1I@m_VD3sKJJ0FgurzNTo7^49UBgM zA~M>;1ib(MIQt9eJdSN`7tLNPiP@HAF*8~+Glm&fn3=&zOffUFY|EBpF*7qWGc#Id zX2)^f`_597f1Q2qxntbpF{Z1#n)_2#KND(JK%zOq)2-FLCzseGowS@@ZqIY^{JE4I zdw#H|A|Toc$Gz=v;DiHq9CE^UD~#~We;&uIC6B^!#u%8{Xu!hSm|$fB3oBz-*%+v~ z2F=V(2w5i3(=&jMj;?YIYU{{cf*Q~vXzA*THA|q%kbp#)ps(Ocrmz{dS`E7hCHK{+ zk>ixeT!T_4U?uyAl}l4ZtXBjpu|^44*;-J$CR_=0jedNT0xm04OBm`HD8SMs%!%l0 z>cYZ+8nuNjOA{WiL&!44(!~p~c=0?;o@|2;K6wjkH!sKftt)Z(}2Nv2F^q1f*o!uoX+T2L>NfNk>HjLl40(h@GtaW*j)O1_zI< z#Szy{IPM`M3<+mGhY=J;?KUO=;jux8Ns2&RY79bSL;1TzD6kUupTt{JH9ayU+5jsuR#Ud;I|} z-1-xj?>tah+b^(ll?k3zruJI|uuC`4bLJA^>N-?n(d+kdOXeiJ4>f|YYfu2}x^e-E zg-Tq1Vx5w833@+wk=H$YO#xV0<^KtEFD$7;USSm~YY1GDmO#PPP#~>n$07n-vMwMc zBH$&RxoFPfx)9gI=>ft9;YCDTe`2v31}d30=jxT4YF+9@RB$^XOQmffVD7D(Ouw5tXSnW^=c$xJ-17u(IGiafRfY<638wfCF_LN-l3$T!;mF1 z3|V5elDMnk{8uR%#fVMJg>PUq_8&Z^ELiWo`;ki1;52y}Y^)q$XJwC>&NH!e-V>N= zH&x9uIC0bjXtIqez#6L!!%by#3;HD!0|^i=M!XvlrlnIScSEVdssfp2E_ZGq6P5_IB2=GuFp!2P=H^ z+6(yRt=I7u;p*3q-^EYwzl|Nse?^$vNrbt(}W{LYNuq;>ktx@ZMXm;l-z)fR%xvau14IP%^l0TDeNaWQF_s!SAG-io4pf`gg4U z>1Q0=zC*=i`MY^w{cpcv&9a}BWlE;S5et^g8CaT~NpK1$Fb(D45?7&=5)3bKSRxX$ zB?0aNPZDb-kR|a~;tmwJ60j1$5{s5tu9^u_*Uu8x>+4WkQiSH}YJSWrRhk9?ERp=| zY~>c*zIhYgee(^t96X>bSSR`H66k*A;sq7t-rU5$p}Iz8u@Wnl0F;`j`R+aC${Why z&cAyqDpCr730%pj!`pZ6;hwbr+jnv8n&haut>z~D^UptVm&ePrIu|cep<=rf1!&ks zC|TeiaKF1ZZ{i`3|LfsjYSiE#ckUAYZYaxCc4ig=_-;MiJrEKSf>gc(0WGm&b#--9 z0ihDo%eLObw$ssy*7jzuckmv2aQ^%NuHQIM_&d-0IzyNn#GN}A@aKc8c=+dK-eW)h zxZi?5|7c`g4W&pW8ohTnf*|J2dpLx9H+SOhjkUOO=?7dp{|T;~dlwf5Uc+G5GZ<(S zm*E0*Hq1qP-7K`#PDg94Sgxk6mD`H6^+?32EK*`*R6oO7=`Ak$*h_z|~s!Hdgv|t7bbEcpq zZyJGWDvGikkeg!3zco|jr#NoKR`j{lSs^+g2VzRWK~QldXcDrt2}~A9;}4TuYNg;Rq1XcPB@z=wxFU2Vay_w_&{bka7&0f2NvxGV zVzLbopJ#^DVq2t>c+*N8kWgqxz_LepfjzuaOmQMw4=1Ay;TCNSk7!f)#96>E(E?%V zgur}fq?J!acFk1e)=_Gmcs>&(@|=lz_J~M!fLEw3j(OVR;0Y&e+dmm!{G^X1FFcCb zi$-GFoJp{D(1jJXPYZFk*;vEM+Kg~z3Nyltk)Z^&54rv%W4o4?WLno40xelr?m-i1 zO*B)b#AFSRBK&VFmdG$@4Z8*Zv1*C1e%y#Lm@sM_^axF61SMl#UU$MIXpSWS61a>A zRYRf$HAyKRGq54GXIqXaNFghu)xEH)4kalwd9jzoM~EMn8*5Rnv*WS*7JDu5D2B4@#Y^%kuDgZh55|Ntjt8^>wIf>Ha5)=x`Qyv1Ij}zlk$f@8jH! z2Y<_b!qt8BOFDvc*U)$28ZO_ui|h9v;Ns0YxOn3ZA?i;xxwB++7gyoH1$LC@ujBN^ z8!FN0nag)os)o}SZla&S*2ZfIV2Nu`AWJM)0#}l~UFyZcHJrvlVzJaTIgK3zlz}13 zRZ}xTq+P{eiIyx@DuF72DY0TnDWD}&bjY#<-wI$2j|>!$5~vcm5||RWx_0%Js=s{c zx)Mq2(Dipf{6N20orbJa0#pK85=YhC+^ayTp+O)?<`I;13^G#ilCpNSH1{dck__&W z18ZR5D(`2I_c}CWyT!8{q{~dcJBL*d)U2|1&>l&! zdQ=03k57c&V;azv7_9MH&=%!qf+oz^z9t(OVVa={rs)~t`DxSf=_@bc^(Pl&nvDe( zIXmI?1xxVUjJbGX>P$Q}a~A8Iv2f}XOt-bhTqg(2b9TTJGpFF&58lF;Z@i4}-gzAx ze)tN%eexlWY}|l@lvQ7UjgMY=1s}fhGPbQ=1s@40C(Qi%&DZ$P^UuP`+5*<5hIsk8 zr?8l9-Okb!lPxUq(i2ar>2SnCB^E1j2?|)r)Ho7zCCNr5gS%vM4^B48C8ENK@)AbGH4^q{y6#fl{WCe|ylbjiMA^~#r#eZ@rzti+lnV3nIB z$xoAbjHhxT%65UUo~A~XvO%m_?e$bLDk{`y!R(AQWT&U1klO^f#FDjk)k-WU{K*tL zZpV)yjCJQu52&eg#Ddk-+=8>`FDmd6*PmFf1h8asW`Qj!uU@;MmIb=Rx^{t45=3GB6 zUO9t%e_Y3{J8ahktgF{${W5@?AjH808$+_<_O zSI)1%nf@;^*!LdJpMDz`&%B9q{Vx$}p2q1;v0g1kPxE|q6KJ|6$h~nULCfhMz-p+l zR)E!7XRpfoN(tPVn88}GBRnt(@`t1g|6N=jL=Gl~mrQC?z8C0V8q za#j;uOFD_hI$JdJ9W_+ja;*5Qxan@QcXz6+fS;g~^TEvUi&PPe}LgdxW zL~8kD#FIqDg5?*lkH91&oQl-M$#a+V{6J-5N- z*g6GKfguMG9CjGK0sAO>)iw``)CB||z@ek7almC2j-HTU_f0BT-pg-40>h3WAoMsq zeGkJs;21)qdvlSA-IVwsBqf>lLR3(5%| zh1HT?p&5-`eQ54EjoP*zR5f>V{R}!zUq(0KruY1fe`2j9s9gfwMFy|l#o)Dj=(~Iy z1Aos;D3d*3yY~PxJ&w2mFW$IIh`NDu0x8^oDE5li>Ka7nfLOI|p_^cG?#f*Z5YpPZ z#WmG~wk`seTJBXAn4W=)1TJx1^{H)5tQYC*6{{CJ(SdVpphMOv0Vn}1fvX-uh?Fu) zNw$eQLbeN7brDPiwEFr^tIq!1*^9~@C}1V7Kq;?ZyMya|mUD!zON7MBe2#u@>ms~} z^=g3d)6aYB?UU(p&a(b2>gpvnL}IeK&_L<#Ig56cEOaQPgKQHB6QGjZSIx}>1gguF zOWZyLVFGBCl`Xt)8KEe#TLM_^%8J!NxNB?gCLHw;pa@SCaS;wLOB|NOU`ZYopMsYYBJ)RRUN=r47gtuu3gJR7?g!!jj;2 zDiAw%AH*B)e1NIbX2HhBk!^1xG}!(OwW+v_W&0gPWo3dcEObqkd(cYXT*YC@B+oK} z&~O9+i;$%&z%`bTMKNG|HrLdKHQT5I+uJO*!NvBDcwz2b%(b(_OdA_KKYt#UI#0zC z%Bzc?z_Z*o$KD>Z9qh1RsU^*wFEzY@D5fJpgwu^ zRb};B_1!mc+qYLulXGO}c6|TIM|ftzJUCfdV48z1o?bW)3ujKllk?|b-ZW=CHE$jP z>P!6e#TPhv@F3yJg>bcBxdp{yCDYlgS+-1NZkJqHl6X|sfBohgY+AV-KY#hTN+ofU z@FaP+JY5d|6Ij1-`LEcwc_RV{Qv!Q3C*juBE3sqEYJ~fEsYIx`Nr}qpRh*uVJc3nG zTAFe%N)UXYyBm5M8&O|cj2gm|>{mc2Db*OH zB`!n(u6y_YP?oBPB&t6MNa8LWnv?L(-C_QIf= z>{+JExpn&ru3bA%#pw)g-@br{f8D@ef88XwohQKc^S-(Wh^_eJK`o!J5Vvk7^O=Hi z<%&BMts}U6aSKlO{fM6S4{^HZJzPEi5$@giLb(bD+Mhyii@?lS^=!NgsS=IY+kCQv1T4>1gr>%6~$9g zS0(XYQ&3fAPskL@l_Tn^ZBSorO~A6@JK_7``+DSJ*AKXVeLWuD+eq?V$By_%boRfG z(uSvzT{Ry$^$U?%KMx5NQxQeb2~IPGPl7(dMjvhw6X6-71F9GmxHNjKaz~)V6e? zgMiSd5`bPok7QK8a0Bgq7r3n#jh&~_MwqB+>qUipunA!eJ%odS3zYL{K79e*mu?WU zZlSgRERW&GpHOj@&~@>~eVo5`SBY4x1h6CzmXs}ojkaDMPjKo#e@z9%H+A%(zEyH% z5uQ$;) z)j5=kK`d3>okKBHUA$ILx3~z;;Iz0p1hURZoQR}kI7_Yl5gCG8qJ zZ&|)cP5G*_2=xzMM0ej=$n-TGVm0FX5vy8leH-c;I?&7KYHaD`J@%o4&{$SkkE+@h z$b5?}ZF~=Hd=DMx(cVdL>$$*>j=&;6JcOMAiOJ&c&`Rj+z@B6OGjV zB?p#>Sh0rEHps|90#u{q!D>O3k~UP<4oQ{7u2nXoq^1RBHLWOPT`gg&L9Bw+Ehv@T zVik=jD3q-DWk^iQMnpt1{Cp#@Yu90X_5IJ7Pe`$GoD5?VbBug!q_PNW6RfmHPJqFf zNiZJA_CKC7fr<&+uNmRXgbJ0c8;;S0zQC2_#u_mmdZQ=8nDv%as?6A?Z6+CDmW4eQ z*gL~s<0;?D4|9MR;QROe~%{6|X<}1U@1ty|8cr=1g|L6SHUHK*Z70F>Ci1PJD zQ&|ZH+xXiNx+)1+Y206|U!~bOD9X%2bWkwT;}eiU$)ciFkWS!=jzv15E8Nc?i4lKc?2B+Dzys2r42X{bYUOA9)?Ix#TNkF#e72}c)r>@_uI&W)=CsykQk z*FyrrH;cUligxqJ*QMXvR2nDQ~Wd1<{PHUY5+8dl%Qfj84t!|of8#YxDuxbRT98py+ zqw)w9HPg60hfp<#P&NmDGSjCaH)|#e zbLJo~V>+@}UywP4Amu>7x8dKE5z?a#P@HCss=UbrugPj#L8=qVvS*Wtf3F9Xcpk&G$P30aGz!JC`wps~LNhx)*U6x0U6yO@G<{X@)p-ByUJjRb618t3o zFwr-Fg%Kf-*Ax-UlnLQWMlu;mBGQT4gepxKYig;$R_$?PV5~C<&eUuj?QLOcX^tnT z6@T&V7x?CK!RDob_9g)M^MBe$`QmQc_A?^0AbPYJl{zL zS+R-1yp|Wde2)>qyXwD%9H^=cV8N2w)9G<0;c{tUV(t?k_^4ojd)vadHawy78? z0jZXz4pquFDThZ0_V8YNd&EUZz+>m$-`k79fz#?kGQjH%N(|P;Yt(=VRA+~#k?HQg zg06w9gwb0B(dz`is|v6LphQj+$|QwCX$3oY-bW+fQww28g3@KZjqj|gwh1-$Eh=cg zrS&u#n+H(eK#*-FKoPFGx(Qibe6H?u$~xA~chu6_&3l#+j68O|ed|2Wl zltiPFV6>vPSp~aS6ZDGsn`GveA-Av!rG%)$3Yq?>4n@_3DZ*Y!b+dvl8DS_RGz*Gr zkdawT&9)R7X@!U)V0pL&V9Vy+c=fIK;5278rp}%X2S+DpvV9t8=s@!^$<#g?rem~V zsUfhW3u85kP#>l`1S}l`wkbUr3wVv21bu>)&g0{uP0=Hq8IGL<(+S$J)G~yPjtQn& z*umMv8cV0n#o{TmFxAoqFE3q+uit-{;PWKrIyvI0xwBPrQL$jnqvEr4)(o}$&4=&f zqyM~$H=ln7Z#?%5mVNa(e*E-f{QA`w*tGmt{J>>_D9Hf-?MENry;oks`>(!?-@f@u z$;WTJj(1;vk;gxamzFNZr*FTBA3y&TYnJ^)kXnhA1T%@3lI-gORKF95R{i+Bish0i zao*r@@4fmGHW0MF{os8pm^>L@zx@v4{QTgtZy!E*@kMO>`Dgt4+2>gO%{MGR!}kQL zquaK@=kQ_t{_VGFpS8SBu$wCq30WeUF_B0O4?#^{HVTsyP?4FY%Iw%^oNa4HS4}0V z31J!0k%;#5MPXVxvJ=IX=#ALm5H;daN|{?x>T{Bk5$5X$AJ-GeOG$;#iIXayS8H7Z z6_IWggQYB41Rrtt2?UAN>D;;VR6edLSbFf_p#rMg5*~2%5`Wo%wzf7ZPXoA0AiG1rx^SLGQ!RmXsdh=T~%*mpz%GN zZ~Kt%{~h!-y@c+DrD(05f$Cx_R1}z^u3UoJod`ymO|`RGw-C+M3%UPd0@fnR0$y(cy6OpA)w9uDHVchKb5NH*7nOvX^8ERzELnoO ziX|v3nvMJ%C&JWJf|Ua@GcA#sWsR&Xdt_xhB0b%SKqWE-Y3b9ELLiDybU+g8vItlC zxpPrcI3LBivysEP+_cHaQxIi`%vfVBDF+m=8-1GPod3Gn2Dj47zjpU1iVUhbZ)l5Y*_p2v#Hdaqo z7Qa@$nr(R;>;48~K9kosm^J1+fIPa8hk(;&gb1Ou?=rwpg~t z7;kCL=A(~FiZ`LK$Xbjl>1KHcmW|UT;2uWKmtUjNAPPUH<#w6L?4^9QE)yz++u`9m13U zakzONhKKi2mA1h<&;v60vzyOJ1V;tKFVqjmPZ7TS{dlcV9urB>i{g41j|-v%!_y}M zKK_vii%LdTUO9>YJVC?0tC~nF2OIg$0t)|WyhA^3@a3~A>MU@4o|Y$HU`h%LTB$Kw04~zlJ4__y9>%HRY$Pt z;Q20GzJaFJE)kJ2hh zJlclJ`c~u>%S6ou1n5$fNLKv1HWb&$bVscUu(G*5joSpS%LrgO`4#Md3sFGm&d;qt zWOy?6?{menpH||T7hi>moh=N^&Ee!c73RifR8H8g1(L?F{fwe=GG>xm))Q;hc(y0D zOFgzLJ%W|giPcI+5|9d%QM4W(2VEX-N+7djUtvMWvNJTtQ?nK-S>QAiuRi$<-h25a zWodGzk~723Mp><%;rf$vW+_Y7EC)M${?1$2zhx5^&6q~WdJU@yKI?v6rb+=XfvR_2 zdO-oyr*FNXELH+nzyI`unk@R&r=C#r8p^g;o?NQn>k}?NyKp|9TeLudlvu1Jje=OS zw*I~fE583$S*?~4;KT|h*Z$$NPq2;K-g)79yu5HBe*64$M0$E+|N8X`pf>#U6Fz(W zb%NL1Sn=hT`1G~cRGs&sL)g7$4SxCb6a4<&*Kpmv1AfPjs4~>!B;o>mP?DBRsESe& z<>QIQ;sTUqq@pxE1(n$uC`d{`NoEGB3ks2+nx^t}$*9Ko&@iP zZOIZPMq(o+rHTZ)3tS0gb#`{)0^x0V-ofGJ2lxNLANTGP;%*aw?&1%w-;%)ho?g{< zWuMbjj@sJT2XuF!Z-DK+zmM&^o9$k5VVx(SUB!*d7jf$vLF?uf+`V;)fOQcM?_UNU zUd5mH&g1^AKHR<8LHKIIts9lNaXpXcPQs;&VLXR7&h;HePt$s|lzoq;!q3rB`UOtc zeuImx-(#@x6SS4TL;!k%AhbY1R%4|zn#9#tC4uhK(AF>;-EHDBo2%lYn(HK>-B~3K ztuB|84Fr+;Mcj8WA%mbMkVUxaY*@&0E*i=SHpSCXUosCh1gqiFQJ(9J0`f?1k})dt?9f~>15FjP`1eJ~;(ir5GtpSOl=rn5 z)kX6ZAhtFTpa_z!O+!*oz!b|`Lmgp^(pWtcHRAf@yJ>BlsYVOdR66io*z?^u^Y|$Q zPA9^c10ETyc@vjgKE%1E_i?H7Gu*lKGk>tP{KHs_TZF9(mw%$h{XWtQmm-3Y5<>Wi zNVP;joDoh2X{gda-Wc9ddMu69dT_EC!qY4fo^D0>u|sVBWW*4V!~zzUYlon8YxwY` zxkZ@ZNPr&pcxd8)mkv&Zn8H26jOQ{(aEdLV&KBVrLkU9T^BfVI>pRpKpYeQcI*)*sW5&TqQxmo(Mm$azdRm$=H!;S1>Cm5k0?sp>@Z@t(;D_I+ zxgOX87q@-b@3IXiPl*dq;EA9WLO_agB}^SB*c?KjOohEr2am4DzQb#A(sMVjL5K@=<8@9@PV!nF zJg+u;qb$E(dYK^At|``oq&Nh;S7GpMVG+7t4E0K}y%__7jUX>>QWXfu~NyhfR=2N54@~PR(EkHN}jB?o?pt`o45k$?hMVxf{4Dsc@;y#SUh56>+q+hx6r=jbQE ziF@$O;2E6#`{SYJQM(|4yH_xH<_g=d0M^j_e61a42{?mj?>dM6!OQ3o*CwH=jWE^C z^ENhjpo$e2ZlV(THh4YtS4jcp`W#++>KQDUDlR}v zWv!Cb3<6bRu@cMG!f8`5+tFT4nIqPz`P_H&%H=q)b+fWkeev$w%9SVhzRJtB zea&hGdLIx9Uz|T5pT7DkPVU~VELc(sOuhH~^GeqL_#@VQ_Z^mh@daFW?u5tQeaf=+ z(Hxv~mH8JTJnq*$)95)!fUhaXkIz5rHCSOh{t!U))fDuz+4 zRuYIVE_;C;0&mMn1@N}X7_BqmD$P0l+!+E;?z zd%CG0HMc;rw)b}PZ@#q+-Ta#uh>|SrGS{G3tt1Xhey16$OmQ~4okv!6pv&SJEbFF{A` z0<;h!8q24kzHAB_Dy9*h=Afr-5xQCkArg3AI}~?RU+t*eWX-j6lm)A?auK0y2^y;i zDg>;K#)aHA7gZ(GP*FGy)x~pBNx;fan}LkvDJaaFhpO@?RhgFTg!EKrBqiD-G}MS< zh?qDF#3foInZT4=IE($kLMnmt*dI+p8tXC$WjS0f%$HnM3s76W2xSE`k(cg>f-DDA z6-_}UxuY!GihoabXef2!-;g|;xCW;vD^_LRY}63Y>PqLMt&U*W$g*h`+L~ofL7C2H zDjMr%p@q-VL|CjYb4E#l4JybN0#}W7JV%)|sw-{K)WGu-JY`CsnhJY7(o^^}2FhMU zZ^?@|Tl+R{4}6CQS6AWgr4_h&X*qwejU@ez=k|Y@vV}js#GX%2qjWmE; z&?I=X42a`;G$D)R8JuK+pkxb#rdcB_(+;78rJziE_$1rFE!rF|Ax7BmtA`zKTG(`C z0=6HYgoEBDa0w8LmW&LvMF0skDA^Vf84`nKuV5{T=ZNG>4M}o6T*n0 zkty^I%%CfY292$tZ)l;W!I3ztVfo+ItYHzz5~xxWGmo8sv7^VS(SZNQ^*0n}_21U3 z5s!~iP$ibFVb`IE7J)z~qefAQpjv*-Ajo@~Q$9NEwTnTAM;pKON`-+>; zoyVTQA=iUA>V6b^4)4X*Jv(sB{UqTkkdP%31mD0&0$mcK<1-MOn1jf;41~s{AS^aj zNoZ^eLgUjAosx~D>>{M*6RffdkVv@7B2?w`7%KLIUqdA0Y*XgL`XNAO}P z>yoj&u}ui5~WwBuODfeJMKOlqZBO)Ri z_L;L6ae7cj2wud6OE(ov4dvvzK$tqK;-xNJy2br2qnQxbN^p|tY#InuA|1S*)HgSG zqO!7%z> zKABXz88z)~XryK?fH^#Ra0sp%RVh|1ne4erev_&O1z5Fw_w_2MogXxo0$7cF=k*;! zpw-klh$ez?Syi*LUS;K$DZmQwi^TDxUf8wc5Ptk_1zvgaEzFoT50-Xz(3qs9^3_Q$ zIZY}+S`Uc{=3R*C8da1xMaU2GpDP4#X>fJiW64<@SSoA%DIp2 z*@eBEHemg)%M{4{^!caioD#bwz;^iW{oa1=SuCA8RZZ`+^|#-Yg-QTR@^O9j)?0Z0 zg%=cDZC|kh-@f}U)_(uJ$~J#&`*wW)!F$-Y@;4mWwi!Qu`~h~XUV)Q)cPXe!2@6&h ztrL58;@Hk@i1a=M?;~vQ+qWQ?pjDQYfzG;megrE}l9m0BTTp)obYzb-I#5jU?$a?y)~sEhnIUr;ev zfBw-2JZuO4Y`|ZCR`FR1_)Mw1XGzoGgWETbdU99vI_6>7UTsjN6c~g*`JsG(< z&dAH2g7T6%sIOX#isCuQNwY;(iY3YmCM!s+FPn_U3PNEyVX$f{+6aX8JVzzrtTJyV zp^wXjGx)3osk+%{X_(IEbVi*DU?*TU&EfhS)QJnV!U@F%Rwyd4RFLea zn`8s`7)!W>nqjZ60k*koW8+axtT~{86}!h`%{~omJ7$1gZid+BX^ayg*6@tBg;$I< zA(qIJR|gRDnbihQIimmppVE*E5zkFA(lT%Ox7&K zW<3e7u=&_`Y$|@=Rfcm5Gb=)t4fORbprd1=Kub%dWB5OC z_1}Os3|YgaSgkb1PsBKa&}c%C%po{|upll~8q*fFwY60w+8U;GoM69C6);;~s|zR{OF4=w2K? zaY$LQTs>WJ=(vjlEDv9Qcn5^QFF2AC#q&h+o}v&C5>IGLR;h?cNJn5)B0Pg4 z;1v>uu!J;3rDh{CMRI2qDN9yrVL9RmT;ejUYVAQKKjekw^(w|k;;;m)1ft4nTUDI^ zm(=B!)F|+(YV1O}1X)X9yZ}(|X%(Pd-_n8ZfwQU<*yvz=Cm~9}OG;Un(Q?g_wY-hm zF6)#VuzLuehLeQ~Uev z$u49^SjY~QFxAk8g1j<*I2zE@*pBitv1TQPo*hgNFywbbBi+FH?bflxO% zfU0)FQ+Ed%dk9#P*GmCti-Ic=v1CacmLwRJ5rhI+66`)a7okegAl56c3tWi^Xo;(~ zv;QJwGU%$hb~V>yc6KSk_&0INJrKKh9>Eu%{)nfacnPy+&4;bMBPLAHfPp^Si@ve4 zOldqeT8ZYPW0cEJtWzp3OW=!bRZ^i$pfWW6QE7*)XPea;Ew1^A&||wbV7t?&Og6X0 zWD8s6cAV)j1xscvz;rt&OtrGbJZC37J$JT>y%HCpgQ>ByXo<_vp30rTmAK_3x0Q^} z6AP38nOL+0rUaDaSOF&iG;t@2TX6ZeUt`VBKdShv4_|*(xg7=4WS{>$`?LZ&so%44 zy~_OVarhwiZr-3q`$?U|WWD|3^XmF?&hI|{5XblK#{SKllzZ^0=`-;8fBvIffrmD2 z!r{%Ev2)cbl~rCWSduzICZYb}gAefKn{VR8u3ZQ?b_|;ch--fM7JJvP!LrXj#`3Q| z$L_VO6;KIa<-|oR;M%hM7sU8^!^hp}m8%MbB#>QVt2)~|(9+b5 z=Ef!z^L{0Zds#^-I@;QmW$WCTvkI=1HS4MX-!kGS_!`UCG1%}r`Wv2A!RSo{tG3#iXsIPQ5uOCF+M5;< zJmx8vS$*9Uf`>DzOKeb4V1v4{>4d9UT%JLhiL(6Zs4SU|ijpZP%(rL%VS~&xYosJu zBQ?z>4_#N$+IKyNj#K2nyMUz zVx%NspU)+3%Su98HSe{W_g$LfgwlLF0-+O|A-Jk2b0RoN5WYFG zvJ7~AQ7w{9@EC}2PR_q?(z6#`&j(6c{G06I-ZcFgN=vu zu=7U^93Tq(ahc5SnU~Sj0947(^ctN^w3(&c*lF+xnSt$xZ1BrkV|@CJAzpYz8*}Dr!r565cGf0pPCrv)YnYkZ zv*R_VcB>DazmthdDu(~<78FaBrp6>y4sVn73Dk(idL@u0GLfCHuC@X9(@_vL3|7N3 zUgW=kYqX3c6e|}yb~QB)MF4Buqa!fx-;c4wAFIGhELWDgGL453%qQu>f*rag@7Z#a zzFIfneM}lTN&%Lso(^m*%rJA>R5&>~!okS_ufF-J0<0bTcVNSgwQ%>9SSfcxiz}kz ze3b-+o`8SQaRSUyc=`xT9YJ7-D?%eqAu`5`%P089?~2$2e}wSZV<&d9-jjb51hJ4^ z2#h!YztFw#^cTz3(1g#YeD@LJju4V0#>)jB-iH-n9e3LgSHhKtuPg861WtMr!h*dK z5bm#HumZzF_%{|oNDC*RMIj_Ip0JjL@aSaaA`FR4=KaSL_M?>qL?$34Hidta$q0(& zK8fjwPRl_CVWFV9QMn6K@=FO>Wk@fQ0Qf2ucO{l7fv3`%7Uc>Q3zYy>Qf49Ade5SB z-~!SK7Xn=Jv9D|E<+8Z0S}<_-5}~0>#SjTZwGcSUYZ{dGs++*n+Sv!03PvnhV%d`A zPI31Mcy-8JWJ53|a5ZevlDc6_mdLO*tBu#`=^2W{l2{D^Ea~t?n^u-Ao};(t3_k$W ziqBp_JD*F^B@CXHWTWS}PVHM#PBeD#L(qo~0Zf8YJK?6ep`F@b5jD4FYHgj!$;emR z%J_~dN^7XOHnSrur{-4AeVR~ISW3vMMkC> z2v)rWrcN38C)42!16GrICjzAne72!1?%hfxHG_;GtZ5i>+1ByC#XTs1)kNr3zZs!R zQaQ-J#bx!#&#y#ka-MP#9zN)X58nHdfBy?GZR#wx`}wdir$S|F0TVrA6<;ORs)>(} z#n}HDf${$uH6;J0f<&m2HYmC1#y&cV?QoQWD}gFQO|~<(>*3Ob?cCYQ0oGJ@GYNHbohGxi#S|+GHUFTv;{>e4Jt!8YU%&oRS&qblBqI#14fK>%OW;Rf zOO6$b*RW+uU`W7f%c>O$vSeLAN?eGNxmY$IHW`|;qmEh>Os z;OcvVl>pl>UwxrcGVEHr1}`mGfG4I+Q-Jl&JMUoMx^*fBYuyh&sNnWDo_tbSvIM@w zf+cZT0$gI{66=%z*7~1+z>2TGz^`9?ihUc_Dpz69$>Yd~3RiByqtI5i_SR#~t_B<89tCl@^g7FieQlCq|#SOJzKOcg7ZBv6eC2}68bf|@j4+;{?5 z0z?uwC8fB~#o{EOBoHMQtgBaVD8Q1qt6`Xu`u`K`E^fm6$_gfmjkq4=T*K{cIP<#z zmc(2M6b;8=$t2GLRrLhJoUCjGT4K=>H=;n6ShTJa&Tiefj=Ohm;ojZ5xJRH83zx)a zi5v0Sl`Cw=rwLtW30)TmW9JBCXL0-1Is9?=4F0^|Lun#xRpO8Pg}mQ1+`Six+jl&1 z^VUJ!ys;TqFZ_aYy&t2mS4fx-&A<2|}5(sSwRYO3F-v)eJ({0wg8RMp(E#AF`WwF6!t-cEMBW1XJ`tX5<*j6I-xAt1jV_Mz1ks&hhPB|(WWRgh+h(kur;`!tm26Vmdm*w0xLsvOz>O{P*wz-lCrHP0eoO(#%U zAt%oS8JW6BNYp@DhBgX|O;K8IfwD?VR8(8>8(@v%3TqUUSmKd-0#=ES8M0jsk?&!N z{8RQQ3*-xodj>t(@8DwfH@MZh9Je}G;#%*|{A2iyB>y`*_0LdR`V3OjX7a=8zz?7e zyh2QI#8ZoqWdhG|Gn@!EhG&!&JfbY&5@d>fKIYg=cv`h@BEf1LzTY$k->n~w@7IsV zuUoaSatFa{k1jU57-HuMBkVe%hrRB4ge((6ni;%fjR;U?@Qbs9SClP|5!eoT>f)f6 zHV*q}J6(1SlhAJWf$DMj}T2RCtsp0n0;ekBRr=@9zB%Xh|6uatz1aH^IYi3xcB$ zsetzro?GDVvjc8k5(vMCaCMmFejGcwhhViACr=&3DW7AkcSArJp(?_c_u@lH^MO~O z7rcYK5fT-K$oM#fM#WI#5E++@xa2G(r{^OntpKsfxd@9*LqK#Qf(TV%3F(L<7-SHh za>{EJP>JPAV5*?HK}`T%R^Nv5#D60(Y_kXgu%oZzGa<0~8gk+j@WBxV$-4|(_C zWwkvcznqP{Pkrz+3(Al~P-_!_A}Cc5s)k4ARn|74oq*I&(2@1(`eqdAicv+Vs-|S6=1`-nM-}0! zl2XTaQe9q0z^dbiq6Wo9r6?&bQxob|)zlG+WVB=}3dJ?j+>8cE{z#~5?C(QWX9w!K z2ui%?{|2mz+6EPWB^E$&5e`rET-zjb5DukZ5Xc&S=T*&wFalOf&sj8Zy`E6q)IOk6 zHDu?NBRjVovC(Nbe$*42Htfd7AAU_m%9+ZLSgY;SWa%m)uc0Z7bqti7Z#>~?^dpa9 z+~1Hj_Fp3~`d^P@%%dYQ_VLk}z;-y1?NVa41geaw(1_JaMBqw9lKV=%ov9UOI!;xP zHN#;FoGh*3XlANnp+v-bWoK-tELe-CPgSv0YnT0`Ku4fU_7Mo1VP~th%YITPa3wKZ z0$T!NV(Ahn8n$qWi*V1zb;>#=fF<%9!EGxc?AWdy2tDNvpQA1+6a4P=Yq5f0_Q`)< z$Fk2q#iqYU4SxH8Dklj40f-dbOG_aqEg-RSedFP3y6J)pBfD_9HwF?8T{rdzHmXMiu%WcR_&b zQ3YM%CiFPCA4hkwPda!|S*cp93H7CA$`vTC!rI~zRo5>uPK`~k!- z|As8FWC>VF-YZGXAnrebtsy|VihFnNs=N(kGOr;4?`QGHtzJC1-GM*vHSiuvaPM9+ z0V|NucN~{5ZNk~pKjL)HM;PpS1LwP5#f6R+FxdPgq3UV$*FTBwng!@@nvZjx^Kqtg zHu_qpqN`~NI%QtH<~e9>n6Io@jWu&nUpCViGbF_5AS`$+qQW)VCiM{-qK%*cUBt!MAUV;ILeR>XN!Xf=)Rb9BOPfp3nvR&* z$%u%uK`1#QCc%<`<%E)|SyXy?ADv6k*1m)a-Yk?9IikFTU`NPGPmya{BR|U#xfwRd zNHRfgx-s%H4N#P8fQA|?_IJ(%MF-wDp)1D>xhY13baRyD+M}vyvdTPPU13A8vZVrQ zkLD&viZdGOr?7r9GP4X2pD=;a@8S-X=BexE$lzV?ZMV?4|TxFAZNJxPQl*e4p_g_8ejaVi&x(n zkHyc7g7fspU|}@^X66%MXrK)pZC&W=8d8j)r>hSg9W7{UkB7F-IBuKtPmI<7wpNKt zP=M?IS-7u2mAD}&{};#_tBG+0s&SI{ipv6B;!+gQ5)0PY5o0K$F>VAq`^QIOyyU>* z^&};O1h{Jvz{FiBRxDHApWavv=!_muZJcm5TDDDui4N~wV>}FWwBaCb341#{@zhd$ z`_s4h_4i+}ZSPhbbw7+#{>Kp%;fW|hlfaW-;4uYP0!}{uE=oee-4PY*qaaH}0894u z@;$69T>@L4KKpU__*xa@9u_+^qR{o!W*l|fgrg_7z~#gi9C76}PwarJ`w(2YdmV1Rzohk7tc}U18 zL_&5E;cWaZw0z5+E*t;SwY+DGfxr#0n)j zwpcIQ>j@AIlEH#f zBo0JA8ZrrVduumZcz?Ll%}D&nIO z6<7&q33N$WSzOHys)X8M1v{Wx)DpO=%WBxEm7=Vq43!mCC@&|V)in?z8&FVIfs&ef zwDt9)uA>v>0$Ti#l-BdT@uS$>H5BMRJn6HLQ%YR*uPe1+^)2GaXuC^hJ^^Kt? zZJBLHrk66*(t{b>y|@Y|JVuaubV$bjdnA>b5g7Y-a$gO?l>nBC*4!^(R&!30$pOwl3Z0gRC(3u~!oCF4inbNLoVZud1w8sT3p-U97v(E~Je}Q2KCiy0{Pps05;9`>-2O0P7Bc zNUT`Haa+UjTC%Rvq5R$61h~W^CiUVTlz1wEso~fv0WDb z&$Ae8Ux=ROY3L?U^tL)+pv#FsHXVJPGub!HMn}^;wA9ZtZ*P; z84-RBRhIOUOb0@*1(Kumkr1hc*zgI62pNOu2o1!?=y6#aA%WUlwxZY|GTa;~i86X{ zHZoGDDd0-tdP=Gjl2RNIN4QE#wLxytRFv1wVIMFbO>GO&Myaovj>23U<%X;*nSqpe zV5eghtE$Ms9UqP*;7_yt(O%ij{7K~~s)XdJd49*2XT`Z(lg3>SYh z>^^0T4M+5_h5)y6rw)GHsDUq*kHlxcj=~RXCSlonEtcc)^Lh=eB4n-It%=n;$6?*x zaoBxw5{?Dx!6iToE`9=T7C7o>g*~oD*m_78+m7g9n~OHKyXfM8hb0br*x{g?6ZX4K z#kT!Zux8s7eDa+so_%#ZX3T#ambU+bk;$VlG1Y*v3E@glPvyBX)Hftx=@P0m6=3P; z5LgIX5@$6GR{~DMRx4TlZ|m0oO$b<%#uKUtP$HV+c--GIVa!BKAY6%Pj2YT4BI}aH zeKb4vk&lj0$wkME9Es77kDwMk8d~EupvPh@IFAu zI{FVx1&6sQ%hm9*NLb`4mL~~S2URRqQ0Pgxdx}+R6Z}H=5VCgSn8ya3@Y;f7?wbf+ z8!4LzSX#H5*B}#YoO6Mq+lM%88XzTBG18 zI64_gSw$$WYF4hm)SMCpRsv1}RLR^X=^A8R+=Nm}5WMUofF=7B6RyfD8&FtO!BUb0 zcB;vZE32fw4($R)6alY#iexmG`S@gXUS)NIf+c|~fhkGXAnO8GlA=KXNtOk!hJi}} zOG;T6z-psLE?Tt2V@ap40&WM+5Tg26@&iEWZtq4r&)3WIobEkM(CS2e6=9IiRz-N~ zZt20f{_{v7V8w;UQ|rq|ZdxA73718DC&eNJteUbqb&SAO8Q*hZP64Vas;P}Ppt8J% zAFd{pRn{QCs9dFg=q1=S@Y>}BsK(A7)bP48l2Y8JGEI)8Ymih866`M4D{-I7T!h0x z?xlRMl4w)~xQnZh?_3Q$XzLw{!)otS7A#3NJDlmgRDP?!0c+Qe!}$KY6?oyfH{k3z z3l=6;urM-L$wie5o(hQ$m(BHzRe-txlE&j>G3MVRFzSCE!|4A4tZ{@daU)I|!Q&nq z!}c~oB_XvCrHpM{hsR3byFNk7O5YSVypF_Q&7jyA8pE7Qj*%u6Buz~PPvROBk(jCX z2v>G&o3q(QCC*A*ff95su0XM335W?W$u=1wC~m>8KX~t-k%$tfB`!d54Nj*LC3V9A z@L#|44&3+cRe7pDe&rPfK;qVWZtfgBGiw&Nzm17$R7JMh!jUnf|- zuIiqhJzE94%d)r&KYZyW1zOL~or`()_HZ^aQR{MUv1Up6tpwh``=)XcimdzTdj(oz z(Gutqm*AdtzbmVi=QKkNo6B#0CZ8_}+bL{zRE4Q7l>F5)`G1uN`Oo9Vu?lT#`POef$fB> z2Y*|+1i;Slo+KE)qoo;V`}nuVarHtgu3fIerHffOJs5)S{v%ZGwqoGSN`m0`INkRVx>{aDOU)vc zpXyx4GdS1zGzL4Kz}cQ9=#Zt0_2tdIru9%)nsJOxDjqPs==XHqJ$Ry`*Ch zx1qQXr=hKOGO7y9QIKkg!c=pVX4xSx*@7@kjtCx$cNl*`cY?hGMPC+8TmmG0&I9`%ckBV!~J?#*aZt$~ffa zYNNQ+2qonv1TAAK+!h2uJN9{wD6Mh8BWZgkA#Il?Qg%#4+{Uq#@ra?sZ`DMOi#aNN zr=c}+5qeUd!}*H$aii}?{xDzT#yJAky)D2WJ8SYZ~pZgeW9RCetvwuV1@Nt-$PK2460nAN}VQOpw16_TBlmOHO z!V^1REe*nzHs$|}5*&sr0jS}+|3?vv)!)!G1XAq4{|2k^?4ZZ7Ll%+c;eBL%%;RGe zV2yfwBp!eCF+BROf8o*p`4>iy7|C;Lz)(^*7!U|~UnVM>yE*L5En%vo4}E?xj5M{V zS!-b8=#kK$qyZZXQ`pA6#5_KEip^Q4r%qyi9 zQ;eD#c7%kZ+PY@c5Hu>pg+_pqIsvY_`W6LP0$?(AjO-_MVx5xdZ3LWZYZ?_)NwQH1 zG#AJkhO1%g)i7Ww5bKaE>Eiz2Ho{gHKPW2Ly>~z*TkP-Y$GOvI(Z~H->YLF?m^s}u zpw`FX5J?us6j>=HV~@X`_bKZksUr^sIymDu!i$t z4b4S31X&^iSi@-=DkUvOeTTA6N%nRDTd`o(wDxdWrp@6;R7QBV^{e?FWzy}uyb2sS z#4)1j+n3S)f>SQxU6=$k-u4BH=}X~M{{T%G`9Z37r->BB%%7Za)6jQrPQ zc>I6%?Ln8fw>1tCiy>_1ODfiM0JY?tkFti)G+^zzH(0H)%6fI{&N<#sFRol}<^5FS!leS7IUmQi z=8mS$?No4>3sryowIPJ zpKx`WW$$#H>0#MD4+9(|?3e(MyonVTzXcMHzSR#cI8DxN@Xj8(F9WqkPk;#1u^KDR8MnEZZ zAZXd5q{NYZ%5*BEvrt(%n|isz_Q8a+ z8a`W*DY7&5kd>~3oJ=jsB;@7l5WEafQDKUTDsz-mDr>D#-ROX-hRN)&9PvovmWfE) zp@sMj;}G@xNJRcV0;xNA@F8OqxbVPJcIb$lhyIkOaIW|bTyFUaU6t?Sa^EsMytxJU zudc_<%d64T^$ik}7h%WV@z}U~47MDeh|PyIu>H6WcAOjvT;F`y80+^L5uEh#>xN1A zcI8-nw~|1$)d;J$=wsO$4T6>i*6q~7#yy%?zjG`$?;DTZ$2GCXO#|Cp#}L{kV)scS z>^N$SwYzn(V#_2f-#iJwZq%eq!kX>I6br1{Y=adW9P#Zkdp!NJ7N*P^1;?pl;51be zHue)?YBm8zh7)0+uf6p$JUWFLm9;rJ>6tl`mv|2ME2 z7HYN%rUasfL28t&Prz7e!Xm?fH7rAbHI^_n8Y4%H!efsniL$vYc?W3Hy$$#QH7YVe78naLjE7LGHMk^x4Dv2#%fH3s<-O zaPxG5cc42$q5=>eABu?haD+q;B^vb(^dnRSs!Z+@gB2JSqbyjGSXA;~rDl~PHLo1W zMde5>u0RSUm7pcLuF_db{FNjdl~fE8q#hBUPDxX_vf>F85wU5AO2||&CGk~KN-iyN z8HN)yhOJs+#S*~EDdu^3o}8je*bp|g!2 z7uKCWeGca&M;8I6rH&s6?sKNQAE#w*L;_1~aSe(IP`SxDXy!AP5jr#ZE;8aIlXey= z_+HC$N|Be6%a2$FinEGQ$M;yAPoPc9W(ULVd=Ndo|)8O#j0=C6}XK|Mt zXHqAzSz_g?sBS?~X&n+0vT)4B6F>jB8qYrc8XRn=!NSB2=KAI^lIg3cz=#V_g4V}# zS$Co~6%k!E=cfd;3t)}-pT|_}l~}n1x-{7?C;WRf#ys*EwAgM{ruT_@|A-_ZHJfAr z1A>=>ktLQ)n+qp1TbL5IYz>Xoq|RcQnr>sIz)1qwMFf;Y1c=0?Cn*;szDit&0$-A3 zba=F&jP4V_k~*Nu(U>GMw%sLkZHo|rXL1)U4{hy_YqdIDP_G79j= z4?k4yzqg-xMnRT9l#~)Kl&0JZ9%KQRl-IDk$fv@vaFgp0RU*FKE;7Wi<+JwNB zK#^FVhGVfLS!iBffpQ@VD2Z|+@Fb%LWj}GhOR&5o5*5G_iq59cqOCPeXmZ=S^+iNX5o3hq5PjhlB`aOFlh&R@wu&zWG-B&2kjg ze1+Q9FVNWb5f!^P(Ny;enrmM~XT!?`tk==s`W8-ie}MA?A7G&CC6$VyPvWrp=3}sT z4$k$?!};C?eBbjh*ggjXEz{87;>^+!y)E|WA+((CoJq)XCS=*7rqEGIafSto(~VJI zWQ~?`do&VY>I&^qnQe!>cynaMm?J&b3du2M$Vf0lW|9f}5PcLEn4!GH3{{nu{C%uZ zSwUc_noQ+$2Fux~D4&h$$~k;5i_z9N7X{f?$V@gvZiYQ_Qtj2m&87J>QIbCm6(!SA zTQ!Zx5wPm*`S;*VaUif-5Y9|dUulNgatqX!S)pFyuBz-I6IOS1II-U)V6}(^%aSl| zK{z#~5~+`(d?QqHxvJEZ{hK*zs(5@YkFU33pKgQNdcK1uM?#hWmIEG%Bba3DGC;!m z@rYS763JU8AZMQ*viEBvlaN#5YK)d(Cp3jRqdtZtTk$@6t3Jl%uAgxC{O`Cn_#5tA zS@xyXEeE6j`mOf{Q$x|o6+Fk?JwnK1b zXecrXx;mPKp^4OT*-`47QNuNdp^gc3CK^C%0yWxklQ29kAwDo>MGYgsoxAmaGre5wb>%SAg~S<0CL)#0ZR^FabKcI?&V8 zgN}|4WbQ*<9bH&kTEp4F5#}bQgeYy803|`%z7Vi%EiGYZVZo1yOrxU(Yl-`?vsR-B zpL}vDKL6@-Y}>sZ2MAa$ZijIyfB+TZ0pH-02nan%i1AQh6%ye}xH^H*NLK>X5dze4 z#3pzVXuP@IUFF9Lk2;BXHYEWXZ+66BAWnGfSCHlDdmNsAuJ8>xMWFJ+u~RMz{!aRwBxHpkIWrX*xj9HmCj@2W zAU(GT`2;4Jcdo3e1*Mc+f=^0O1=7o^QP|juVt$|nup|yEud*J6{3w^!wW@e3fv2d1 z41!q^QVADPgo)tDL=`9>N9Ym@mISuPBxfUr046Xc5>4O`xDpYomRPU^uyP3-WdtgL zrXdSX9ZE{s`4f_ctyNMd+XT8~KY_09zg>gFaaICU0$F0o8cLPWp|%N3$+E~WV2Ski zpH=y;I=lK5WOegHA|H#Mu3q$tJ7eGsE}gwVP4qOn#ll6%>ZY{SHlV(&1`TC|ID(5n zRtdo=i(pcaRzRpKLs3Q{a+0Xw=awm`Doo2)(3K_7#pf?6C`V#^D$-N4+365A37>`B zx1yvTbyckjsOl@Ew-VqydCv5d>?#%NizE1fF&s{WYlFjpS_a5lZ+&+ zm*9IsU=iGoJSQ3jRIjqEjC9-kFZ*V)fAGiGU%3edz+|-F2QR#UC#O$W7Ar}$AXD8)jMZ0fzpboR0$VaNP|9CE{Zwt2 zoK~{$`!Bqxz)9dtEMSsYRL=9>^Dn4+0XV61*}4_`*KbhEo0l!a_LVCXJPpTSNk(^< z?ORlkyNn!^SS;E0{fF-<=u&{iZQg{fNIzesMMtZcD{=oxGSP~>e3ha>GPz4&yI8Sg zy`ijJxd~3QvcQpaBy-nPeT^Kymi?io?aq;Q^ zZr(eOdw*WU9}mtDvbu5i&lcSIs~R`{$iu~((HOkqiGj<9(SL3SdIr|=-j|`F_DiE<#aTcIian>5#6;@&{N}#E<#dg84rJObq$_Bqb%Tb&5n)6m^LnSG}v6+a8K z)f4_?qUu@)wAD>UYyD)jH%}&DIr4qm@)!&L?xv`zF-K*k1Q-NlT6MJU^(gCEyu;EQFW@XZPh ztk`6Zl^gBx^C|~?{gWNu{*WEqQd8L2Ys1(~14bqj2`b~Er#lfwhPp5^kepW3UbXe1 zH^~q>8ivpsFJes1))W&6Ly~$y;OhSXmWG0^|DVG9EUzeJ+j*M6w$Lo!Oow+F{+gia&f@)_>$9vRrS5ceXwreYP?k5CEoKuJ= z0>V8B7rqFO^+srvy8=^y`6Rvj?ly<_+ zXS2$K^-;@f=OH{UMZr}%>oRhRk)2zt0IQy0S4~JL704?pS5PHzRa9I-;Hp&1 z!!E*x#&&cNswAdR|TRUmOKrN~dsLoT0LELO6d zotTNl@C2mArlNxIl%14`yp(Kqf_a3kLX;MiAup={N%3jOVqIBL6*b5LYLa=>9toC3 z4QQxnLsMl3`r9s0(>=#dy`5m!N6oQ|fHjnoLAeR}ECN{)>@ILsR^33Tk`aYsJG5_#g+Ll_aZ^hZs^R5Lr+=$XJwQx8ri)|g@G0B2JROg1&aQi7GZ@t&SL2MeZ6 z!P_r9uVSfWj>3=r^Ey6x<8=jDk}~1g?w#;*IgA~@ufq4Ae27)we}ipoyUV`#6yFj` zR>>4Mzx<3ngs%fzHmcEr@4oy3o+Dtr`{MKX;+?nf72EqJ!p^W&N-S405>TvC0#Iwe z`(8nkSfwO6sDPCKlYrI9UAtA?Pal7*>LsxK1K#iV?}_ldFAoJ>M+l8F+VCKs z!Sm35_^>Y6(?bDPMqE5nBBNCfEJ@8EBJRNS*f_N;Kvh>#inf|se%PyUn())czoP+} z1C-0d6ETbRN@AzP+9dA3t5>i6W7!f{q0A{LkTh(O60j10k{By-5$5FNsZsz;&RJ1W zrH+?Co*q=|x2{~l z{TnxMmmqfk_8t5|5WIPrkf+i-$o!lG?7jggLH3HUU+`fMnH}Cf2 z_Prk5|Em)Z(1Lq^Rp9Q!Y(iHuZrqN<^&3IBdeH|L`cL3Yy9>^?9mC~rH(cpIigUf& zsR*sYK=)5L+xHd0>O)*Q^Csn0Tsr+6k6B8&-K8x@j5)8>ZoO?F@8C0@ExzH0Do3UD0eRboMCD zwqc)PgW_C$6ctQDRfR60#R!cJrhI1-m_7qN9ShOXyc8{UPoSx0Df^LmXeVIRl}|%? zfiuBsI*PJoc@D}7W~01l7U5?Rk6pq(a~2iVnds}CsVr0Ngu||OYjn5Rp|jbMeIfy{ zc`BiH3cA{+pu5wF?_6TA%oJcXG+3dL@Fb}lnrrRRTE}xV62d4=gsb`*OWv;y@0V~@ zZ-?3j2UOMAqp;K(kHl{pi=4f>C_ieB^sSo6+^U6~9lFTdWq^V`2FTv2g`z`xsP-^N zkq1wf_!Rmp-o@#fw{gAeGjvqEj7x*x;NiVZz@NKt^X68hWxRxaE~eOdSYHL8Z`eN` zoAzm9%N{*!-l>P}2hFhixHa~B*kixD19rMtVA}x$Y}~Ga%{#{9&`~`cbtP0C*1%pD z4IFmY!=4j5%I&!0s1Ej?G{W{Hx>&Jw41QiW65szm0$>03IDTFu)-XH#zG*ssT0RXQ ze{PGHUo*$_nR+m{oCtlx36zO2FqlNqfsv5`A;l2-`g+jO*1;rA9m0toCXO>C7@0w9 zf+Z%7H&CPgWb~h=hJcf}3U$=nffKnd+y1|cmV&c!1gA07dPn|yG)7a)9V;5NxCvFf z)daO|3_E_=Z`_D+81>i~cJLH&5sn-|c^sohvBRf~;_|q0W1uY=+O>I4n&WvNVu|DP znVIsL^q_B`3sX~LSX-Jax1a!|9U<%OcizA!pM8Wk-hLG;eqRPx4{^!4!9TAew$PyCJ}C)W_L))J`J^LpDUJ8{5eBlaKO05`7#D!^UlBb2!a-Mn0I{M2!{db-0e zEC~MLq3{k4CaC#ReBcutir~l?!d3#J6J@ktHqvs6kVE*%E2&X&RSi9ZsP8$ANKe@C7~yiFqA_;NJ`0sk6$pne1Z@yX&(|&;o~0y|G+RLBxfKjBKDtU z3EIxiE9Nm-NJ`B@WHjL;GY^T$naE&ST2_Uss#=tn6Y>gpenMbjVJT{A8`$6oc4d`n zyTDT;A*`mhQDsQ45|<$Xu(hc}jrx;hpUNFb=nywzOhyIFpodV`A@C*%MN4Z?mR*Eg z0#p`ZvLv$rIf-dV3g>(F4M4EB4?Djc@B8ml3eaS=Ki5Jv4QyNuG-;2(ARGnzmk)H+G67QcT+dv^PU*1ii6u+ieKJyTyn?6^1hBDcw4jnv<1m5i zvQ1JrjQ!W+m`KplcYXu0{UZRHU%BbvUkEXD?3cJ zw8k^@=HP_|^Duw1172J>A78xnhMG=h&5z$(;=Rpb_rj zhRweU^n8X5KYoK>K6xKszVR~Feg75qtz89|&FgS#-%jjZvjRW7_ZEKq;B9>S&VTUT zyKky}w*9sYpS?z4BNVx8*^G@p{YV&Ej_u2T!>*Mpuxr&yY+m-WT6WpI83)#{=ed4| z=l*^0I&@IYE9iD$X!(N|p2LSPJcDH)yn{7geu~wEzMtQI17E%R5`O>cQyks24!#HW z^4Q%tuyGAmeEAWUfBpgXuUmz{>v(pTnGPShmjQ%fw%x)LYEt20{n*3M;vp3 z@9|^sKjDhl;2^?$I#Oa{5bp1f0!cF3(1@zSA{1q2QW;1^L|_nV2v>z!SyUD(P*qff z>LUK_z-C|ilTsAzAS+CCvM-lM|irda?nZptgfyjYz^S*l_ATO zfYtSDH`pHA(A?C*53*Q}1#)CgP(mG{Os1;3a`_t0o;i=2>RPlCyaxLQ(ALbqcY;(7 z@A>fl12}Q?IJR%vf;B5wW80?9IC}U9;v%C^QCxzGk}~9GW>K=yKuA0{IQWmn>jL3R z<}4Iz*yYQY34k(EQLJm1aO37x+_`rX5B|JOxrsmiyo&pOo>!KvKmR<9KOgq8z8&`; zG_tG*(1?eBR^h?@GW_*NIq&Q|lZmr}ii1Txlj5wtoS`CIZn z8m-XMWXEMf7Xhq;FxJuNL?D|+Fq=x?bL4MlkH%UDG}TMqEJwn)0~P2gD66!?BSnOn z{6ku(b~i)y2}_h8u|n})bCm73M%5vElVi`=N>0b3_mO4idWd7-Nr%C3YS& z$97kOmb)bmcv|D2n<=*K9*-Tn#^A&WU3hpH;*iS(>^nRThfnHa?=gLBJur!2rG>q& zy4Z4XJbvByKlowoBlvO6|KR)8Bd~ml3CTJ7F z^awZxs+4u9|9=tal-~!s7y2VzCkz;dma)ZL%)g#}Kwgvm7CCB}orQ zOTy8S-2Ny=JoYb)eEi=SH+CeiDQH zhk>!7f~}dersJt+mg2b=pT@U8e98B^n=o{gaOI`|N8+KvqrDLo=Z)}~Q-mj(D(5(S z{f;3Z$W@Ifl#zlG_#PAQt>Um`{y~|C&@bp9y!|ER!Zw1|76nxZysf zptrprjRcB{@>&((-arT|uOfi4^KYUC+}zQDhL%=BRvY1}4NWaA>@YeAPF?I6WH@(^ z8ul&2x>W}+)+>URXyW45=#U( zc~J%0n%YrXSVk?c1ubH=s%qr5dkI5bC@gOx9Cf3RDo%wKHZ|Il@NQ1iGUpVB{nJ!i0a1A|y?OCfnTje~nbB8YEl0CY2o-HK?+@ zO9poWmF5VM(W;y@a;y@W7gXaho@1u}QNh)Jo?L== zo_iXfzwsKrdFM?m`}8CH`uQiyO}6aQ5App6@8al=E%0?YgiXKvgx|mY3hRIP2FnRn zZ!DgRFJFBQ-@f@W-hFBzzJ22*>|M1CC%13J&fk8(x^F(kCbsR(KN75#eXF(|-mnI% zB+lu#U*Jtpa@(~72iLEK`|h1MxpO;quUdgUt5@LYmQAXD+po)TV*6HvyRpq5J&faf zcEQ*6DB=Tsv3upO`1!!ZypNqfeGQkj1h(ZrV(0RoaCj5j z|F(5Fws|%7|NbkEZ&`~NPgf-Qdm+NZ6+tIlP>>Xd^r&zohOzDW`5=UUYcWCmJBx}& zQe-5OqGOPel%xQvG(TTS1tF@YtPFMK<@|^iASosmk-;ITA@H}>5t7>5`S;&SaO%Zr zf=@T0Y2fr(6{k3O=7O3+>f)tqxJSskb^AU6?2NMhN-nzkh8CP7bcyBa!nupMbm1~O z+q?L2ZB-WGu8wY;Ca|49E2DG=l@cRLi|pGiE_q(Fp{5Qok__3~7blJ!#iq6EuzllZ zY+knk+cs>%`c=Qkq<|Oz|V59&g~Uzb;cQQZD>0=LuHlC};mm2JsgLxPPC5 zu!j$OfCmi(ts+V`9^OgD{o7Hvb2AXPZg}9v^~1PwX)B*`CC;4whT!o&0pu05H$6+J zS&Gi4#Z=ms;_SdvxP0z8T)#*bxbOn5oOyomh9p_S+oJS>cKHvEQ^mE_- z_CA3?m*Q-`$9YsTXHYqHV*fW8&CPabZnWle z*rUIPaME=MY!_Gp*5vQ7e4`(*wt*%6j)DGthY*h#SL6bDTsWX(cFj1$h)zJyDyZ{ybKZ*i&T2V6h94u3s3!5?QY zKQwQ{)7JsVJWO%i%>svco;~}_`4OIsgB}h9EDLNsW`cblmN-J$b5I)x4rs#N%?O^J zCOCRr3kQ#C;)uHe4xBI`s7=D={S&a2aJ7E-Nc^_(5z4=@V)H1L6R~=WIo51(#;>cU z;-gP(@$7T@m@!L(;4vPC#^VWA8U!n?Axn?G9$`zHP^E#1n(QpKB+hBl&>UqG^a(-6 z6f;cHF!_HCSOQT3SprW2P9n1W4`8trz#85rGE%Hu?EF<$_|c;=cBG_gcnqVJ^=dRG zO`-;^t2Jb`vM^CVC1PV|1#25i7#Qh8Q;Q!Atw}I6F~*c>&X~7g4i+q$gRj2*40{i3 zC**F#(GzMPM<9|eDcMpqn<2Fe^ zx(mlnZXsB0hnL?TiU1Sg>Ev2vy>j*3h?Cxu18XA=99^R(dzL&{Cr<5xPk@UGsQ2(b z#N(*72D!u2&ka67eh7+;CR_#Mh}$U~_w<3gUm!dKLkL=Nh~-B;B_|*0)KaC4O-)ze z6%rGVu=r#I$0WgnVC3o(2sgr!n@6h_GlR#3ifsqQYY2=H@F` zg20so#>!l15*#a|l!m2(ASlDb#VsL0xJ?Z*YN!<*&0VOjASmXS60*3RU{%MCy|J|e zO$4a=W`bFRj7V&wHZHD@W;WyoHtGgK3<0gXN5Pfk&XFPA5|BGQOud)3cG6oQCw+1Z1(J&xlVzPI4kL;$n~)&ktCDH@sbsAemqlaOxPL z>m;(1lTgCnK)_0JXQd{jDap?&P;ix=l!3~UD#By4TCc0DLpOm))*Hmq*xZZK>Q)p> z{9H{NL93IUY!50My9rma-mcPN$OysVvbMEnXp-l40vFFE?%yiHRVmLWfF&_n`GwU; zC1eE!Mq}spLwNViPvLAk6NcK7U7gB?mN6`~jaBT_1POBgpAkceMY$|+C7IkM2iE_A zD;YUBB-*1VD2S5$bHh-jK#S`FTH0eKsCDhp8-Dm4+kX2Vdw=Kk zHm-o%jtw}qdkex(x*#Rk7g3%k5#n|Xi9!A-N=ZU>elFreg5h)g7<^n^ReV)ZW)?D~ z%*jD@DHWH30wvA0R6yrNRjB{~|MW>jK~yMJB_(JfP!;FqA~rl+ftc*uODK`~{`&e( zqm^*f(b=oON}x)tR+lbcBV=7r%XjYHCw!g9*>jhaC9AZ&S^<}U)#Zy@k6obMG~Tl zNIHp!fBk{K9^S%Ve_mlp$P)Q;sC@YF61Nk&a7oEST)+c@*Mo;=@dxF>pMwgv@UWZG zL22f3b*wMN-9NH%=Ux(S-HpP{+X1+C{Uk13Jb<%<+i<#fE&98b6Hvb6JNXNtROfFWHrb%J5AG2R&Ikw z@;nVu6>N<%9}5(mFhPl{6)M~&qxh%;#SukE?U8%Pf?|%0gXYL~wMVJ{bd&{8MRoKv zRK`xh>55lyqw6d5*L;Gjy{mBN(oWpHwg*=)Z{`Q%9ry>>W~$e?08$M!>{$%-`0li3Af} z7#SL=fN`x!n$Xao<~m^{0c#w$PlC3#%qggY@nZ}KKqiy1ctH`jr3-D zlI1BxC-^9ltn89N)Js{dB!l}Yf|a`(O?c8{Cyt-k#_cjW^nL|cuI`)R?zKaKmS4zz z1y}oBs2RI$fS3OsxcltJzQb#na!d~6cZ32k}QKGP)tpPi+Y0$Ia=B|s(d8xl+_ zRxSap8fv=3u^-h0swNqASJRA2!k>&Flvu7BLQ-ubL8`S)!BtINEjuKcqC;|IHKV4o z3UyW0YJ`zY#UUcDK$*Q?+!}ISfvkFgID*>nNW(^%FOlc3t7)XRSAhb)(@eg{w73M8 zLx7b=xJn3!G$0JI5wXZj z=6i@sfuwOL$txjjrK75}T3NGd<@u@_(a_S1nx-BFRwY%fD6i!`wGE)IwNF{Dh5<_~ zSrT_u(?n4097JR10ICUBr36Natt;X?E2wNx@mOWtuc)LBDJgmI_6o(?-?!oA=ih>( z^)wjkT0vLC2ou;2b=c-)#Gf{m7^4X~svPymzcKoeM^${5z?IZVo-1)9ipaYDI88{N zIf1ERxY8k1$pq_$6DBG68vC!uV5+T$8FrKZ1}q0Gm^vNrz5Fu4>UF$JuoB4n_|=#2 z!-wx-^|xQ)w=X_bpe2B{?#J)&<45n~ji-iS?49SI#xpaV@zxUy@WpG-vz(7-raR(= z*;DY#M{i@p_XM<0-&26Kd*#pAy6jtQ`td6SA3K1cqx-S_*B@|#z;=Aw7OegDYkc^^ za|)=AZQacJZTR86cksfTS@`aqx3Fc|PuTR+4{DpwfqjVe^+INJ1VZ@RN1Qr=6oU5l zpTETl!qs5{)Vhz}!1|Bh#9Efe*Zu;3Nm{&dr2;CkUj6>%he}+xu0x>9e#CjXAuW^& z)UiVdB& zS0g_&6FpQoWH~7&2I+~31pF+>{C{T%RXx3@2~g*7{=yYy!MZ?Lx_Rp!E?>EU>o@M; z-u;J!hTFJt^R9w08Gs@{cIDDl1yFrG{mMEepe1l6Aa&;SSpvp6C9>WugEPu1*_P^& z#dhR(${WGH{%X1&0jL1>En>~uvTg%5tX`w6T8YuIy!UKWvM*?@Z{+i|vTg90>gv(P zw%*q-mS@Qhe;QY=Tv1jlkz2QJ;jT=h^SAYC7_=V#^?;Cc2M-@yr(7G7KSi!`JLTb3 zg4K2W2`>M26@UD78TSca5*&Z;&kJf@w)1UppUafU|RDC}+9|a2<;=(7puStwV0Zw)#1g*{H9Yf`%Grg3%1V*Qv_Y*g=qLl~fH) z4$39iOUM)H=^!+9Od&uK!jwDEUX{%a)@Z1;qF5-aS6efWBVdX3s-HmEL&)lAB2*Es zS_xY11VfRghN)C?rwjpBet;R85=mXr&L})3kY$OYlZ2|HcF1+HM~;gf!O8;Zgsa3o zMo8Xgj+6uDNIqzY>agi34xEhkjHl3+{UX}(B#px|T!7W18+0`h`f0^Vy zogXfH964f-Lp=AUy@uFy$QTDs+2N?KBaVC7@xyJ7lP68#>0u5J4-*Ag`;KVfpsN;+ zc^l!7hc0#>n~0r9CK9qH5UeI(>waBqJzz$_vc!t@Hu(ByYrOHEF%~b?grk!r6a5!~ zV>EPhhb&ZvhJ+GBQ$mV9CQh8FtU_bQJr2!D>@ao3O*jNnI?fAmtQV`5+o@ej1g^aN_Td!4 zN!*0Kf&1YXbbye=b-$e|d;5_S>u`+Vb&}xa=CvCqJ$J#=?+}5?gRtWPp8(0ZbsRqa zyk4j`?;!v|5upem^!O8i#4Q+~o~0~RQpOQ*qLb4UT#5W&ms1g$l!7pVg18j@!=vFv zP;>VU;5CDI&1eLNM&RI~BRJx6jDKSua691!4|flEp7O-O{ReRDunPiw{Nd_y90&I9 zgQuGZygj_&&kiz?0GCK@Gb%h187XNdS0C~B;UiOK(1u%wicN3txC zC2%Eyu>w~DSz_UmWdX19;tJH1*Rq3cR-*@NsvA{YNO^S)$_PmUT9q|5sHm(YFf}WX zYM>ThNlmx9yqt}@QGwO46-xk1+=2pE?d=^b*@07=A9jTd16EmSC5rO+5i2Y~X&&#H z&m}QdVZQtbdU+zy!wnIB-bjm$K%~DHVuE}SM!1UbJ%tc%5BBjxTtp1A1uzMqQ6UkC zA$SUSr4d4N#Nx*?JuwxPB^9Wvs8MAVVW^rARoB$3q^zn9rG%{7#vUayBDB1^4fV}^ zsB0#OHA*1-X*747p|;zHs@85)P)Zxx6l|3>w4+e0e|3T2tQ<`(VQ*}XCuYsVhp)Vf&)@tHK7IWa z1y=%9Vy#;B&6f(OzIpd8W!ZZDiN$z+-fX-ue-7S%@j2yET;ybnPhWl-Z!VpO*_K9F zGT9m*Jhzl}&*0OSp22UQysJQK^G{zZz&cK#2|IBZhc~Rkwq@U8`)@yE^;ZP3&puHw z_2qwFQ*d=^?{2j`*TxDj&Yi8mORQP}E{EZF_#h%rxg$L?1gQ~$i1u_tMlk;_4(!6O zZ@+@w-+zXEl(p}_isf&=h&?}jffE~+ii=Pn>&n%e1T2EdU%;)~ zL+gG0XZgYJ$F(chaqGry1ySN66sQsr_n^R*Se+%Yu9WGi8E|J`63xCKCo>1W9-i2= zW*v^RpGl?SCvYXe<#p0sZ5u9=V&c%iw%x~eDk4cm#l?U2+*tykj7pRVs)sCB!+<4l zH4Ie`9z0MY%YXg#5cum4;Ne}$ogw+_4(kbBlt00e@__OOh@c$dQ&9%GS z>o!5{=3n=4gU4V0i(CG>ft!S`8xQ%;9-hISzXoyduhRsse%$}F2lxMI$KCs7e1Fln zb2k{b@A?qDWLln`1lE=4?)@4)eeV-)|3g@O7N-XnQD$Lqa4N-#G7V=>FTwf2XK|+g zDT2-dbhpn%Z`VBZ_snNG7d@SG(NDlS-6K{j0u^CxNS?yko+mKaxrm^%0R3$XRU*{( zx|wLHo35n3ax&_x$tDD%b^=yAfvT&Mz|}5wPUvo%!E4SOlCGI5j;yPF3Sn1buk0xT zSXNYg%~4xr%+gc=S4WF8dfI36cbkV^-fu@Uuh}?FNrwc}x6dJriK}o5`+QqGQV?o` z@^Cv;hS{UY+X{InEs=fH5@`o4D3(asXMt3fS;y_UY=*>b#)#dli-eu}$aS?wi4RW{ zJR7CKbJ3LgG6qUM$JOTFaJ78}u6D1)-3#k+~qz} zj>D8gX4rnf7HhZJ%!97lFw)bBU2;j61emz zrdDQfoZ^JJ^XCyV7i0R&X#}mg`1zL~uxsx&Y~QsBM_u<5hK|8Mcqj%-04qA)PsLx! zC_jISZ@^&{+#VC}g`m)5aCP5~6Ye_*Lr1ya$^RNX$PP19Mid_5xd?3nTK;GBHJM6Om=HUWxlqfXq82T*YpA1_Z;G%i#00e- z#LhD^EDUi3t_0q9T3Uv36-rFjumwwCD2n#}- zxNhB#!ROdPMEH0hGJxyq;}OV@MZAKnWd06OeCLtD;ndzESeK!;31F2fkdvu*%2a-@ z%IX#cQ%!9HDjrLKO3GSs5sF2N>y51gXzxC+q_y)b8d~~M$$bQ{%9}e?kb4EoVwNh~ zep$U5K`1W5z<_A1Ubz`d7CZ}cLt7Z@T0(ogJ|;XS=@up^h*H_uSqemng-U>H!oS!D zuucF>L}Ih_*>;U(q@a$GnqXa^N~~7K6SZNcrKd#dB=*W&TOST47MNk{sNhN<>$!zX z@Smp$NKZYX+

gwc+O+l8K6>#5eD>S|uAw;;JM69=`ij|7> zJB3)UlkncV9qGRAI7W!t@X1>^^2^thudwsmk0>AG#QI<1wR0W(4{k@$;ob1uvk8aR z{f0d&e^yqk=u@sp4Iw-c))NAK5bSmwDd8c=PKZ?#PzMpR!hC#GT88APXvBnss%dY; zMOarMGYaG=5wH?h;XqfHs+aAuE}&IQC8VJAPEuNg1fc5ySux)ySux)W4~K9JDq>N^WAgC9pm;GHRfw;t#_?{XU(T% z)=WiKb{=@~arQ_tV6lgapMUmbvDb=ot$KP1INrQNXIC#tADUac#2TKR?OlHhhXz}l zM{=^bwj6t{*jtsAkxj5wDNIKuWLHpZxXnpe*P4pQ} zZ->!LuW6&}+6lf|-u94mpyOR9;aDdD8QmWVVJU{+RTHSD((f3H;Wyy~THf^gTA`^$ z9W{0LNiL(N{w!)6zCvBYVbnJgBsFazklcXgrp;(?+K0A=Ptj7l7xfhcNkwZ>QM3ki z6&pziuF5u`u6zp`2(TIns+y|zqp9jsl6`2d+=a&S?Pw_9hK33Ttt}|cTZ@A1l_(^* z$)oKDHet&{F0=n9Y%>;1-ccp6ypq3G2jf4n! z24ha`(3)dng>;r*-$c6 zfV#;{SkryDd9Hz*$4a=mEP;dLJXqV#fH8rUu9+0{t){@xMh3=q3NW^v1AWVRP&b?p zCGEv{uDTMpAFaSI=NI76v8hm4{3s{|9=3Bfod`-XHn|mFVJES)<1!2bbr*N zH40hd#?bjO<1yx=kMYreKNR(_#*G^%yl&F;U1VfQvCo(eSp|8>%}~Jn#fu26R$$@M z#q`~mV)fcJSh8%12pxX={nvP<^bAj)Kfu!$4+xaVO0_d0sIr5*w~YWSPHeX)pklxh zb+4TDM3GT0B5FWz#ralNb{g~=Q!#sx%QV?wAIxJqKu%cp$=^|HCWQNmWm40>71zn-K&WSDNK8U2S>P(L<9%H*WDe?WNq79 zS;5`eg#gPN9`2s-;+%@`Na3a81b1%PlQkMP96dIg6we{T#JFUU9G;d;+li^-x^#l8 z?A%-ekql&HWgtB>ovfi`g0NI%(;&i$zWH>nI4@s>7sR?{RXhMxi*3%mV!*1as~6jB z!E=Ipc1{7kUmgKljsUD2+D;-6jG+6A2w@=dLV%|yyqz3js;vp7M-Sj+W&|%=E4W#Z z^{t}@4=XbSxH`ef%36d#I7!#j*_8$kZ!xf#85q%TXd(ubARk`@di#ojB_W!VloJI= zW@i@@OqGl3>v=`hVw($vjx348QYpM!?77M>t{2;xxpcg+8W}}Zq6Ej^fR$N*n3QY* zSP_xQ@b(UcwUsL#-hYLI`;KA$%%zwyZ60RH&c(EeGNL>KS6iP(#t*|5S7Dz?{aJeK zBux6xSWKnG^pR8Ylwew~RnBwij+ct~*tj~9C z$MzM=Xnzw0ZJUzN} zj;=W&t~>wDas2u7k2pba$HJa0hA4(B)w{Pv*znqqr}5i~FGaYJ^Rzf0i<99!%uV2E zr~^~AmvGY8gx%T+u+Bt%*3ov|e4Q4Oyz)tfyoOG4o zVxSIZeKip>G$g&lfW;xg5GOmh(lceFtA$`!XGHn=z|+nK4g~dnE-vt){^aNCh9GZm z0a#%H0V3CmL5e+CDRFV)`7g=KLvDIHvdGBbmP3W91YfyifUwt=pLc%lIlqcMS#|Zz zez{x%8ZH#qv1g073DVvSjf4+}UcbS>>$m79XzHf@j$r~RI@U^U^YCjl z(zOi)Xf=bws2UhT6}Q7fs3t(GAuy|bH;DT8{UkkTBxy!B+6b&V-nSE&5um>BK>xcY zyna`Mckj#TJ_`uyQqeyULcggaL98A@)pOL=|4Cr=2Z7ZObnFDl5i~b{inf-6=xzTT z?RB3DSZXGSswJSRELbH9mo`*xByie*rpnD|uHK23n!RYP-H+CqPf7Npg6^MFdPNT!MiCOI`pL&nPS@B2X(?kBYKQ)NUjYBdE(=C4>R1 zoB&IN3Tc}?S^tW*UxP^V1&B4DhX}pt2veVqz?bsye>NS#ujW!a3%<{05M0fM?_&i7 zJe~osyVKxwT@p@zO2O&2H2f6jB3NSy0opdCIqpWB%~qtk?L}SE59lwwg4gwT@viFy znrp5hB=8fMm@R^i>1=45&w{q)Oc)R_IeD){VCZH9hi!tV*HSn*&4RTf!IZT$ft3`@ zon%O+!OCS8>^+ym#%&D@ELTHCdll|KTaJs@mf*V|=3wu^X;`*uEN0IBF9FEM)JkH; z3|Yv^DiA!86)PbF$%%3ToEW$!PnHx(?vo_hi}f)b<4~durjo@vMM|DPN)D2wa7ST^ zVT(nQ6f^HjkfP>LAh)B}@IK3^*NVZ4p^3d!qv1gIUhy$L$GKPxS7N522pf*a$6Ti2 zqcIpeW-KHmBrtvgy@qqLXj@K!1_ZjtDFj-w(-kmp(ITu~zYZ%&7A#qUxeFIy*^1>@ zOZyxiyngd4RMZvm@X0+WsXm9fm4V28^73*}1}~v&ph%FW zB0`fk_MFpY4cZRF9ByWYqV={TxH2_?rI8WrEiB+nuk&zn5>w~h>>WhwK`4k+xLux z4{i$Jy7AL@xOV0n-2C|zo?f{i;OP%u!|?J&F`JM>fSi~0^vWdxSv<>-!Hf6*{Q0a% ze&@t^2Ldb;mFI9b(SzyB$9Vq7Ptdsj2P~i7g8rZ9q4?`5=v@B|rjM>e`_=_$-MIv_ zSNCD9{!GACfQ>P{%=8GhG+|G0?r5L|e>+P!n;3{1Siba}1$(*?SUJJd-VUJzRlWpS zzAkQv2@Mk-D?bkpAq)V#KtsP)3{mW=u06IE)1Y9vR4Gj#7P$3J41{ti_qs6ldxhw;3 zS67bYW$|l@3W`OzJ)Zh~90|i!VK%)czfj~_@pdBhfA(H6gt1pEAu2{xZLh7UM0*P_ zHr*%Sh&@vbQ0%q(Ct!^N7a!;Skz?=ho`sj>q3^*<_ONGeVE7$+XuoUtEn50tqo!vN zwSBKq**k>djy{z44WopPWj1vntECHtJwqs<-zBfB8wGtm$nR@ML4O-6-*%(=Z5PT1 z8&TF*gT{BwXnxy_x`7(>zHKLnYbBWD*@{hg`>vcmJDb2S4!ym;Xz#Ejn9@LF;}bMB z{)y(si)d;5h2%$cwVlE1-k;D}cU0t1HCOHyGY=ceH=&jbnpUhQc-lx%wGD0cI|;;g z(e@sKsZY>aO|VtH3+)6}T@8ECQNM>kYzwN3*Pt|a8H#h3pfG2#nEh8#vI>=DoXox& zHI-{oUA|U$t?CKTIG2mt`pTUGvTEqF%QzuEcZtZ;V#vzNo*hPxip7yJzYfjePlyMVxHE^NJ4z{YbaT>MrbB6b^M!VapWE zp8pZ#XM6-X`LUQjV>0CBWigGcPboHIYbryHM9)x*0LKiDt`>0du!4t=tpF?s7h~bUa`ocGcYWcN zGPQg~VD(buSaArD^Q;)OjLcq&@E`-0m7ThPEQTwdS*T<1l>R5rpkw$9YTA#WruA49 z6y-Gswe^)@W~mQH7h5>F*u&M`nf}+FaQ7ro@$*GUWE6tQ5)2|?Z&g%MssJknt;Eb6 zWRMceDk>GnA`+4j8k-r_fM-1ubPIC_Q-um1mD&q(uXdnlf!ahWbkaEHx#92MYw!AYe)Zf{Css z-0iK!zOSnzLVdmA$3;+S00;^W5#d1=hAZ}DacGbsi;sRlH12 z)!%T%6|AeOY6-BaMgA3sN%FbCCk;F~1Y4<lZYW*CP0_`uf81a5Y= z;3K-cDk9jTd(5vwPF@v#cLJ!4Qlw{= z(eWy}?|R|Q$}A@MBEZTltwwrbxmc4hDXS1M37G`yQE+thhMB1Y9z9UR=LA^G7OcTE z$?1@mmsFLN}E9#HDJzrV@vq^8vpx4bOov>74 z4i?MJ6tZyIbOJD0aeM*kmlZST30WdPo7(x% z#je$)ueNRy;lMp>S7Gb2#Uio&%oj%mWN}^=&on%?dn?WmTwOkWg4$07Se-ud34ZzV zFrHsO4;N!?I2foy_t`z+!BV?_1Il-<;>nfYaFbx`+0~1p90NlX!xVd`oale+pr<1e z+W8oJvv{99TfEQ7?k{g%$E!Oxq4DG{tTdEhuKXMZ4{zfZLEV#|zJdO&i!i!#8Rm~~ z!ua8JsNeVlidTPy-lIQZrTPpGx+-ur)F8OhfTyV*Y)SWUg1avPdVr&?m__JL&zgg& zF~OCsNOF(#CnJj>%iqnNpxg`5p`nN*X!3D$6W%P&wPMI(;ruN2Ua@dqRz*<};t0s1 zN%9DS7^WDk*tp=9!-E_e98Gc`9D0K{1SSkvT(aRc!3)>M;#{llPF|R~Pvl{7(mN-) zb3*%QvOB|78TBFdXyxVPi@wVS5+|)wy1EEVdIe~)ut#eo4ER5JSEFDxDtv5sa2Rg}U*jFY)w{RA@ay;J zBe?1&=^((W?i@f?Z3_z8`cTj{h}6bTQGYY?yQ@*wUx%u}22}Rfpk=rXExj#h>uDB6Plw+%VdzaI-BS_W zcPdFF`un`l*=~vUW*rh0v^73QTf+l%wcNsB*A?`)o*_8;9MwgipsH{ODhfGtw~>Cg z&1k9JijKye=xW}Dj)q+XU^~%Pvj^?9dkM_;ps#5khT0CGr;&iGW*eHSHleP39jb~~ zqP$=kJrkTSMQ|mO**WKGB=4$*mjdEMd4e@wf3dD&2SF|a))rI{JaJgDqI9(=%fO*R zF5ggDx`H5$Jz=Z=2COPhz^|s)R&5us#bCwd8d%t)#gO$cfBo$U(p`-><5dV#nT?Q_ z(-EmW7mZR8&U*U)8KSwV7KaDNjKc@K*fWX5#7U4-P=LbBnV29c2}x;bg2^q|xNR$D%$f3<;!w<|%FD?Giu5E>qisF)apghwDCG(v;`SvVxfUMluvu?LIy zx#fL^FFwcDafr~-!<+8IouJAV>e|{+)li4Ai7^am@Ha9f$RZ_fY-9j)5;Fn>hN~xc zZ$Xt>19errxOW?`9^ECtQibaCN4S6E5>yGexIBTAr3viKjbNg!3KJc5SQ=_0*vAb% zE_R}Xfv=kjf&u~sM1>Pr`TGY7uwpM3!xV=H*@MMU#d%qLjN8AH-f26DU@?g--h@QD zCV}=-l99=9MKG0?%K2F-$R)Tcp#5SxUXq!KVgjyQ&e2Lr7FD7P^72J^kUd#E(~#k+ zv9Sp?G+u#=@MwqXA5`2%5DuVW3F= z%ZK-2M4)A_tqunrP1w<3%yo;b30Tc&+snxTK?Iu4*7SS0I?>?hCXR6(Ek5RIYlU!s zAEd>_ASpJEzIzsuQwf@r37!)(sLe)dT0Sx|381q}>3f&aHi1=E1+sGpxN^B5WDP-B zB~tS%kX%qI!h?x9MYLZkW*c&dFf1|&US1)vvUJ9?C#v}I`(N?t?$5At@p??3HV3i< zO0tquAumOs#S5{2JV6u^<=R&wG)O=tW*O4&08y%3A0&A<}5S=h8_1$M9Ah~s+?;J0tS!xI7=4i7Rk z?Oe4SpKe+&wi&Y6i^b5zGYdHho)g?Tgm~`jFRA?o7fzkP>CgA$)MtC~$9G@g_iw+z ztzS<=|K&qiYm?EU{u1UIFX3pY2^$?1SZOQa)ve2TdFu-F=sC8eXI$gKec{34*y(5qxZ?jWLzknzu1Iue@0RA{2T*-@56bs%CUz^~{8VP}Z zViutlJwF^C^dJ}y_Vh$_K#(Zaz&Tg}1pVQGf$(;96&})HUtckMkVAz$n~*(O?B!y} zYOSvqo-79V6w(o#cgP_@7S6?D$Qlg?GGuW$kQ3b*u6XDH8x;9goOi_@sWf^Y z1}M(8VwmDQtdX!`m#Bv&D#nw+7fX65j)b9V)MHgsRw3Z3sisb}9lRD|TL*@E2%LB+ z9_pWU1Z)gp4K=lBYiSkvS1b%#qp&sV$@-`K6Ruc>26(xhH{h&&_FxT>48DDjZj!p* zeiSryAib^yna$lusB1$+WfM|51`*TH4WEK01eJ6nq_hv=RsD#n8$eKb8%Z-lDjN`7 zUX28Tzk)$}pMh>9)>7YWru%-~jk0d~efxUQ(%*}&*93-dd(h97<=@m1XqDm3yF$8` zG=eMwtsZ~$c6p$y)gB$q7U*o&LRZs640Ycnh&oR&^c9M-Kc(Mr3(AYOjg(@jA)sp9 zP1o*0XX74%tUc&z*oU5`1L$u#fT8w-7$zBL*^gejrkg;lwQdV)d1hb__R&4*vd$ost|D7Z|L?V4n`Bnm~4X7$zPuK8u8&OlS7PXb@=zcb!hFUIq zT2{JI%y6u#+J?H?U7~yhmy=+i;^cM)EQT!(75>XZYd>6Gtwfm4S|k~+N2KZ^_&ty( zQ6R`#jIdXW;QM$M-0#Z5?(#&~UzUXBZxf;Y?SG;F!vq+fm4xy)|Ap}dNm$>S0=oxO z;H^%eX0#NEj@wWW@HvV@PNFR46k3WeVYvG-SqOKLUvdV4(R<+^xf9_@`w^2)VPVz7%$g=a2g-^sxgeNXSRPs6+Xm=wt_9#KxH!!t2hRZkBV*i0@ zSiVXE3l~da&ipAPa+oGR1yWNaAtS>j8Av9}W7^c&kddA(!h~FoVX`DC+Q|}s1J=Yz z6KGpXB)LzX#DK+N#s4>4jRF?SXo!#jYl@Vt05bM!jUib2zX2A56^8{`KKNfB{{yrL zyh!nX_~96g9VbE1Ck-iS8BClgiAhpYP?$XjE7z{a`psLhdc#J{UbqnQvu0z}h7H*E z$zJT-_X&<3{{las`vs4mJ|ehMgEm=@W|l^nQ7$|Op(tt#M(i;A*W%gjnic&*r*rK$UXz~&x|OkawEq-IuH1XLPy z9SzVXssvzaVzwZAuAE&R;O0RJ)5{ZHWR-@75sVOIghj-_Cn$`bgCwMqwa8v8hN+}1 zo`#+97p@>}V}U;}oyXm4mk6*_p!E1Y)Sf?pAzA85kM7|9^$XBbB}<=vFG~Y0Xe&L1 zk){$H2vF_K^bzRc09P9m*pX!(;_HFP@Gt}h2GMit|2JT<7mJ0rS=f8U(8aLDc~+wU z77@l)cQl=krDJi3iHjv8BaVQFu8)sJ0zpYiVmz`^laNQCRgjj7{NyC$#wQ?yporHg z%gM|VfR&${M;$yD#ROTrCLw#W%1D9pj4Bp}E4Jio8d^|TR*9^9UY;fsG4y%tG2(K+~&|quw{25&6m=i&&lR0~+tYKlmwXsYHI33_6l0X04%1WEM$Qd2UJoRTTlYfMN;Be=|@K`|HU>4nH7$jZnl zK}I?ORwn1XR3SU78ky-8NJ%S2Qg#XA39J%GV$*ZQqR+e#H+!%`LPus1T9`ZH{@oY& z^5}QiwqXxeELw*}bCzM*f@PRDb1tS*pPxK_qFD5K(#PXPc?L=9j|^Fo)c@qDzb=%S zf%#JuFmt>#W|N*+GHoW7v-gSsie<(0xuRVpGac&*y7>G&>ier^&c_-8tsTqP;=so3 z`2O(cIR70N6#Y>oxF6ZRMR=>&YsGNIpv5i271zUJ4;RMIduj`HzD z071B`l?9v$>Rl`e&Yhjb;?7~de(-a5M^s1%V#xsG97tzdTTzOEg*{p9v0~wnA(wMt z*lMb&K?PS@&(1?^ToSn6+h~~v1}si+9~gWi0E>?g4!suT8hBfT4f}>f&J}}GSP1DQ zGUB-W0s|C-7KaBJwAxzRMJb0mGE~@`#onvbq*N3VSXGslp|ZFX&9(JtscR6@THi>J zKmbKD*xida{euFsx(MXjc?lnaD_&x!w};nC9GNjVn#4W|QvbUbYjk@g2aDG|q-~O+ z!PgiVev1Kutv338#mybaX=+1WM;|gf`U#lo5LjN1gsve3RJFmUv>hR}Z(*O_4#(^c z_*D$Ut*{GTrCso=?1pz~3w+C45Z=&*Bzk>VMJ;0Jdl&Y-Mp|P#N;>;dM{rk8U|7}N ziN?WhGz}7@4%DOjO)Yv~SDS26|$;xO13fUY)Ebhm4wuKGSIN-m+i zZ(*ZicJY!+72P8D4iBA;3CB@+n?-9>8$d zrvz=B5Wk*)iNJ{AsB|qs)jE-1RaeQ$?Ar+3b`$iDz*S}O7F3sPrS0wXzuSy5f}$z{ zsCt5@rrPcF`CF;oMi9J}?q>sm71!t5NP3k3i)(Y$?M6L87JIWe;XUuKqN5C0r3_f^ zIwxWEWIfzpu0ovQRwU|fK$O~YguGmU;1>%JNaFu;K0F`Kfc0ew82$7SLDhIz{5}<` zC&uF0XCL6jS06z4oCGXyO@;ZLNw9e`8BWTw2ryiNB&XfT^FNM?xU;Cq{2h&@SLlJd zkM72V zSFXU?ty}Qvv19oB#0h-&!)aW;dIc{PU%GwC$p==(DVs(3?7KkAr}_aF?b9WjoYF%Hdln6 z;d6p1MY@i?Us@#EbWQ~tWKo-1@Z!-9!gJ;0=Y!zTP#O@!5lo6KA~F_HF$5g($w5SA%_A@wh!ny@u9#QhsrgcztQLG|f<0xPoQUp|8JYh2GnkU~tw95r z+Vgv`&{KoAqa|6{j_`N2g}bdOe4MOk5b#4-a1flGUEt#4CT0>cQ1PrnF5SR+S))nr z0;v81F9t0EV&M^pi5dYcmdI#=G6J@c&|rjx1=G23f|3|yCM6&@g&>P0Hz5vLu`vR$ zatM$(JSYl`a)K{G7O%rrQBfsYo~c+~S@Sn!al&tPeKU&bSWZa^vI+|bv~rP5;1(Gf zhWMCh8r)o=K?Ab-lSi;nSA!M7mWAR=7}35N9kee*O-Be19B|ul-K&OU93mGBlswft`Um0gE#nj5J}RrwR`%L)de8jtl@V`akik zKn5xHS}|ZTK>1o*ievvfVEItriF9{^zdad4=4P-pHiD_91}wESVDS7gRR6pR^A}H` zMXlQPi%_|C0j~(aH1FMn@vEoMcyt%)5AVQ8@d?b8pTLRwjTaduL3ZYdp#E>7@4AUadst+3z_y?p?p1GKk<$+A{C3zC@v&Ce z7q-H+qyxTHy#!iAh-@4nNb5#uMHkZAhmg@Ri1^x8WVQDox1$ret<5O!X+`~D8ybh3 z(Du3kJ?|SpD2D#`l>~p~c>O*f!$Z*ov~H-b)JI9t3#6xBAYFPG$%&s4pd3R<;X#6$ zg9Jne37!w2nWVk`5PF-AV6gQw47VM|a4P{;+X1}kIw+*4Wj9)CH`BG7M525Rd#*~> zi&=y972DCop0Vm(!fRDgw28o+fT?f;T}J>_#@@3H)NU5uvzGeZ1j;)Juy&%MipxlB zAbq+P#nhIt7psEctY!yls&@#F7S9~a&00PJSb3{aO#A;bdvyYKiu>WKxd$OyTM(ze z4e4gP5UaZ$A*#y}ptKO4FJ{48aSlA5&VSR8NnE z*6(9ra%%!?pH7DD^U3hhosT4!y~y-FhU~y^P!RhQic^25FMl8J-sz&b^BGyBXAlze z8A2itA}rznf&zBH!C@Kf9TwAozXDdy3t{g$3;vNa5tcXyF{x{ikiHKAp`XIsW)BpV zcjC7{)?v>lGqG@yBxVs@$xojOg&7KvBp?|xo)<@+0y#Ma$dCe*Ad7VTSoSt?qPzTG z9;}HXG{~N;i3Bne#uF@!m&GIr1(CD$e*!BG3$mw*xA~k13zG0QhY8t}^?w5_4hN1_ zb?5V=fW>vNIHV~pJq;5iBr#^}1WY2(nmKPC7B63c1xuC+xZ1X75B42Cf@N#gVEOv> z*n8v%j(`6>j(_tFPM$SzR`1N>O6vq z`Xdn*)Hhaxfr*AF386~^fE59WCk?nkp@9M-;uDe(M?e)G5rfdMD6%+Hkdna*`sRpG zAm>+c%O0zAPAn}fK^iHtWU@|2li%5k#en7J8vrL)Hv%k2*wetkUMoHR-rDNWP*ue9 zNB5zo_#8Uw%Ft4J4lT|n(V%S_5VXh|e|7&R9^E*P7k95gonT7k$!#b;xPj+)uhOxr z&`~1LpaIR9ATh|p9_}{AWNlj^)XPyUArs)^4Nng*czOH&4OCool)YKJ&ptqV0ruWD;Oy zr=|bxz2YK^qSz=|&peB;l9W7y7DH8i11UTjWO%TtBdM%!LT-6El5?_@0%A zK%^1K#fF7Ij|SXFS1ya(oyX_@fZ?Nu@HW(k3k|lmn(DA2aJ4kh5g}Ax7kdILYl1C3 z0b7n1#t5PRbxg1yjI~tZ=jwo10xYhnl@t?2FqKY#mGM7-l|q1(miIq^l~F=)RfLpe zUOtFnivTOV3TYXYNX;u3-mAFGe1s>aBO)mSp)n~43Xg+_S1@dC-Jz>vj`M%qz@~LO zF+*k^rm(Rzek!CVk-;@liexe-PmsiFjRR87c*T1(evnOZ6v%`3|WpQTJU!uh|<**mDvMq zZG^|l-PBlslnF0`qoF3sGVoG4JbRFn-vjJy5bolPP*)dt+7e`0StH!f4<6Q5(0Fhc z%70#m;gbhYy?zDCS1&^A&P_b|;}<--@EgI?E9k#^4z-7O3CtD$_FlOg>%iZ}4C!J1 zaHi)&=lLV}JK7<}pBHm>h8w+iu%`zSxexgIA<{npkwJkX9LUM-3|2gwkcGpA3|#Ep z;_x7Quo%L)ttl%*LTtR4MOatYEb zteM!?mw~R%2sAXfkm2=&^y3-o_dg;(?==0UUlG(CM{VUXf}_t!4xy>?FxqPlqpR*9 z2AU2MU>(F@%cmG<-AD2Xx(Tq_>$jn$W;5!`H=wd;4a)P@pdx?0DCJO3U{zDPRX|ly z&Kl%pu0T;P7aHA!+KO!gvbbe0S92ZL-r6Z<8djHXK}o?ny02Bp$zMePzXnCc>ru*y z@nxG(Sh$wpY8kRKmI^OcfsmE>m*TC@V6Slk9$E+Cro0IO8XFO%w+VjAOA(;56hT_c z;qXWnc8_G>_IwtM{*Zv)&l6$zs|=of@iDZ1oCeLaQ}N`)M|g4i-_XAN5zHP;fcfKz zaMK_lvfGISm&1s0`W&eNKcF`6KKfcU@&2tT245SXw)q+2l757n$6omQeTvZFLvVCh z3u|loT$i=5B-pZao(cCLSp>&SM|i?=ghXwobDN>Ay&gAiuEdegSWgsCj6%z@7CQTy1nM_J`3a>{vd89TL zS+5hu(RJe)$Yg1M#5?s*xEeoBQUKP+A59S3d~7rq>+kFSf~)@%V14x8apE|i=W`q$ zWayHl`;sH;SYCb>-Mb9kAKg1y-BZZwmXxMJg4$)P*Wv&v?j8H~WBv9WICT6=9Qo=5 zJ|W;bdFp%IdGG)#YASg4@+lrXx(6*CUR>FRz{x@+n)C8Bj;`i#^RgCI)vfLIU}~if zGpmuXptHL=Ts$>l?eGeQW~9hT%&a)!or{br{Z+yJnaICl(9+R=3==aYf-ug}(uA=Y zSAEwMpJnev%HPM0{=Z%b3=TkeWCVg4LIO#G!U;?Q;OP|zKiYQp3xr2thyW}0RI%qO zg{($~EC#I6S%f@;kZWKWS=mD0)PlZ`0|6FU$fkyWd#?2Lv|yyK4Rxg#P^>l1Pw~mPWvt!ueS-NG8BaXRj4WE`e1+Vgd>h5|Kkz zdkz5>SAovqT0jMb;DUwq^$j#=RHKTMYB>jlfU3ErlPq+y#RMfdH(u zREDieWMx(hz)DUlLtJ_RVhOGivI`NDmW#NId_*Rs6JRBf4hevnxg(Smb@0RYzhd?B zO`-)&0^Br*M4l zc3e91EnaZ_DSAGiTt5$edL9jwpFr)&Ef}l4fD0K-?p8+dwl#sJjtW#C{s~Vq0-}B0 z;7AZ`qN0c}o~dVU1}B1K1}yal_n>t9PZ)~z3Ga#IcFx6O0Q0mkLxh(n8516`H8&R_ zy8vewXgzrhmD@L=`|utf{rU^;{`4bM2@GFdyNJ7goP*Abr*JSa6uEn*sxRsD^bzgu zfDn62cv~1DIoKDm-tMs0(SQ~8#X!2JF!~%nY8kL1381(J7FTj-xZ=&w+Ia~Tya(RrdqB|Mf;>PIPZ$RSUh`Hcx?%=cs))wGDh>TI6sSL z6*6G)1 zM(S@JO|59I<7ORP2@d2UPL# zQ5j|#<~mvQp6@tUi(W^d)%$t??Id+P8?dbf$&HOj>+D7xL7qST4q*hDQT;>kYwLzR z0iaG{5tK4Bp_-iwrK~(?6;;5Zp%X?`t}OajN;p#c;O^`L059VLS;C>?A>_3IY2zwM;^ zY(r-^C&gFMcg#ggiw{AtIf3ybWM}_@lA@ncRsJLDD!(VlIf2@;V`wP*98Kki(Ng{i zdh7S&P5Ti6SlxBI2)eeTr)ej;8h4dgt>5h>K>|7u; zXBD#3my*s~O3%?+5h4_T#r3tS*vqw@0F3~uV1w{<_Hk|2lPSD-2%!00*tF;idZ-LW~b0+;}g%R93=YeI;Ve zwjn@sIUJu(hxI)f=v^2Ki<{G6c3lB4PK}4^S!ujFBL&5uNPd}!r)NKe#?{F%x<4IO z&zHbUXD1?UPaxjy95TZ0qP5D5DBcZ2eJ<$hcSQeyJ(1uGxVjvNpU+na^7{g|mK$JW zwHA)f8)(p93?s|wu=ku!1Me(2d(MS}^Kw{OZ-ScYdYt)j2G*|q5YuP;8*&O0pfFPo zQ)T5KJxu{qrpjZQ>~zS;%3_MNG(i->$&_iBIB5#Tj-4nVNK$yPCPRwOPo2u=IoD|_ zog)Y%$dX_f`wOuC30VwPqcVE@pK!(C#W2MSIx|2`81p~%ug3g$L_YptJjRnE=W`Nd zJ+nxXBA+Tn-+^QjS=f)E#V`u(3l*XTZU4IdCxJtcVJ#P{bgFnKP(N<-%!bPOL8eDT_b zx_^1XjOlf{biF2Q9n9bt=pkxexw<>T#mxyme%^3!a)zk|7aesWXmf^{M)9b7Z;X#9#i-iHa?akopXbC@8d-!{~A~c9Yg)~6WcVh_)2}EQ>7@{H~ z5EC7V*yw0P(BKeEmN~-|d#oY}G$UvriK6Rb>6*AW0x^Q9gv59xC6gkjHX$xngzXZe zqmWG3q>^ODMk6PlU@J9+ltc~+@^X=#nNEX2jwsVmMR3L8!IIK)6w~ijSzV9HnnsjX z)S{$Fh>6vi5A@E3!p2LYHJ4b<+42%!J1t1|-q z{pfoUSP@)BMaL6RCJN9>O3Dyh;Fl!@r%Y=p<9Av7`x?q0zJSWbBLQVZXl{0WQZt;W>JGcaYM0w#``f{*^g`Hue; zfHj`rY9g;AI6)HflczzB``LfEn)?_`BhVU2Zl5M{u%?fhj2YvlFpmth`O@-`B_NZd ze!oz5B%ysi9bYwjA=b`ajOEkk;)~t;aqXwy@!jDsuxs@OY*@GiYv#?v))mXKd)->> zS+@?K@7{$|M~~vtk3Zrn_22uKF5&#CQ{uYczb81mdPyhH0H&1c;~Z%q43txUNF?2oLWTf)hjR%$G6ZnN!rRje{sf$faq;3>u|!o_C5j7+ zQC(4m=K4mo5n%Op_0V<`S{j?t-rR=4-hK@C5d;km<1Krws2vu`={)=IErth&(B0jG z&QA7TadP_zZ1vDFPIhO=VyF_w`v)+@Va8zsufaj|^>q_$_2cckVGO(>xEkz1%TOw-aR3%o128J;g<07U^mBV)klPEZ(jmCiy@7c_J4|z%VMUN-lwJw<$_@lI z_rbru9o|*-@F%?;L68^K)PR`g24r-%qjaDL)$PrwZK}lUx9xcIwhrC>8F-IObar{7 ztWuqf!)K_iy+?AD^x8R;mYhOa$w}0deTn+g18A?>kN(C(=xf-Iu9}_bB&h1D*@DiR z%>-Z@QCGYMl{qU=N`O_CvyK33D{4x1prT~+NJ4t{asr}7$V^=%yiMgr>qU5wJy={P zi$Sb7Z;e>YnXAI5r7T2h>Jp?9I3=enrFI3fvI)F$H;^H?j?S;9XJQ?4GB?rnn~|Hb z1^-&PWEn1;{elLRuVJn69m4E>MyTai2sYY}X!CuDGTaVd#iek6IuCXa6`*nXBWPY7 z3*&oo(7quH)eBQersL(W)A8!JS$O!PJYM~=2!{8z!BP2h0<2RAGW{KCfyzjaF@c|# z3JpZsD9CX{b7K&O24m6Q;)R6hr-%r?0!N#purk{LSEoI2cG?UBgSpVuk%fu%Jeb?e zhoRAIm>MsKvED}9zCIrZJ{gPU%Ra#Tg`^bc$zk#|_VCCi zQ%H$UA}E?ja5ZrfBqkBmNJ?Si0en#^D$jqwat1UDS&8A~8?6c&y~FtXByg}nh>ylqIV;XrW3%jNL$H5@K< z_2kf@A?_4-0ncuxtonCyq&Dz?`BIt;y{+Z3&Q=~5#~=;xUU-myxkD&??nTFuV_OE!otIN zy~03*h6EuxDgsgAAv9Qo&_EH6_&AW6=ord!D3$Rju zMFXfI!KNXcNbD^QVP|eggCz~@?jEqx(M7162cmof5avzd9}HLeA9KzfhXiBel4(1V zAS*%SWAU++)NG`235PT;gv!z1JkC?h1}Hnkdc~+No?qRJYEO`7JINbL^yfeL`)tl zfl0j3^M~U_6?a|`TIM4O;mw*MA&r@m1Yy*-WeC1_mLV^>GlP!tajuiKSYa+^Q@eG= zI-EKFElwUhhBfn-VG);Xpgy~5{z7b6x&k}bZN%Xnd+`0SqRaC;ovp?a%_3Q9K{B{_GQz`~H*k%h>haA#>Hm5e7tj2J zOJ~mF`Y-1QwtmH^h?=}oyJ%gE=B8+7W1(lQH6ea35bp`v)Py#~;BQPN!q` zrsnj#xFb3+1W^IOh!2S%823gnT@w}@g78p!*5eZqM_`qdkR#@wtM6Qt|p2g1LhH zT!O14Vcc+u6mARhsbAGplM$0k{WKP_QITYjaADId8uSUWDk@Q4UP0#zQ9)qW+T4LE zGHBZ9bJ_^*TAJF?*-o&;B_jF>u!i4|Fj&3DKp%Up1_`A4&`$T>+S-n`whnX=q;WDm z7d_?c2m6P_^?hA^=MK^3~2ceVGMA8hC;$8t) z#wGo*s(uHP(qX7)cG9sf*j0DIjKIpgfM6`W3VLZ3aI5J-KClr^RMosybs44mc)p^^|P`Di(6+6*gxfRVtt5KJ;2sK#?QI)w66DHyBYv<7OHHV;RAqsL=h#adrdJnFX#fk1^#p_X+zY5uO zZCcs_Bqq%zBX|*k_6m9iR?}2gc) z$&39|>Ys3ovE zHw8++&xFc_rO>{)8PCqG#^Ybs!&Unn5_)HUW4U@f2lei0N^rbAzE8Eh73e-@ z6a3A;6x^(pQ=x`wAJ_n=`y zO43dTW{&zWq1MPo3nsSOuyZqoo39O=JS|`^7GAc1JHr-1n7NH6tnGQ7!jZKF4Nad3 zuws}})4DB0U7J2n?>=}oA%_dK^&jAs$~CBJ-iD&a0~lCn()*L;8{iacXtHGB_NS3#ms_Y;k{ybVxY}Hq7-1woG*4<7|Tn zf406!$$Qwq--Va@uq5EJL5Q~#B7=DGV^0Klxxm-c8Qvbw@bYwlr-utXJY3=J<&Ho< zFFNl==Ur*w@I!1&43d&ZN;dH9N|so%mgCry#S%@{d~^g-;@NVKLRxYRax;?&uu_o2 zuvJ=!hK4${w6&p;lzSOLYDF!XG z0VAK3O%7rR{4`!Y#joFeg+IRe0(X8ng~z}Dh!+=sg7)2OuuysmTPP;>l|ghQiIo6TW`Ih>1-?WMn)dNa7RH1yCiYaFJEc z$zo3ymu<));L4%jB8wzbfL9i;KbT*J^nyy{l-41mpbF`E6^Kj8M|6A^0)pdUYwrzZ zWdr>B%QbA@dH^#N7DGmQ4ra1%j;0cy!}7?qB^=yynU;zvIPS>YL9QhP3hXH>YrT z*C+HIzY=(<;QsX+IQjWe9Qefu`_?%jpOs~6C@ ze+LiFpToV&=b-W8KCBEhL>UinM;H3v`%%B}grBoJB7B1cM6tvLhl^vOUVcc4j74fx z9MYl*q{vW;A_$NCYa)oZw=aBr{OGwzA<#}2vvGOF1fD>`1$$XI-;oUwE`Jgg6-!W6 zC7_8z;|x{o&ElLZ4hf26b~?`6Vr|a+V$pJVkcWJTX+UFl6;~GE@!Y&A=P<&}+I{IxyTf zgm=U5@rGpZ^;`74d5x~ugJ>D(MqM}c`Bv)J)isDG@bikH->aq_vCaK(DQSYk*>G1q^aKVPEqO#)W-QOKXN!Rwp#Gx}cKYCg4jc zz8D7CwXi9p<4I*O$!&m7%WF7Q_YiPZLnpZc_9dNgs~&)5ZZm;h8@y|W5#Bh61Tp|C zdOA_sN%z=D|FixUG!E6HvA+au!+8XYS?GBig|>b-0xoMbck7bTeIFUQKarmL97WlC zQJlFQCFvVck-3(@YAxDI)}e(Ut0H|4$vgtAMJUf%j&h#Wm%koG1X}q8Ymt%3fHjxk zc{WlL=Oa69smRHyDBOs0l2W>^C~rL(o2!wLx{N@Iz$$J5LPHlJER5l52a*$aBRX`|nren(*SD+=>2l0LqGe@&V=AD`|$hF6as8WSZa5im^B*sm_gs}u+ z1X3UWm#euG%n@9TrF}le+aD5G{pVj}L_3b5Y@!U^uM9r?coIG)#V)Hb2WvNM!{N`5 zFvpdi=RTn^I>0$tD4`Wz3>A~1W1ID(R zuyHklqo*01z03)Mtl{cw2`3LTIJg>%JS;OSHCWk`VkK}gFnKCKN>lfqfG9m90uQ6d z0=l^PC?~t~F=h3e0=i5r72)V%4i_I=;Tf~A)1zzEprfw{9X&;8YQKU$-6!XQIk`E) z+R+x)&W>>AY1v%9AT>jHqY5gjg$Jvmp;>?whXmPUl~0P3g~5tLg$!N{UA*A4Z*Ukq zxT?F83!Dkqj7&_Ssig%YL#~CT4=&M;?6a^x_<*t zIPdDui%@%Zm#$HSskV{;Ed%;}*=yxtZvjtE9JVlor2+kxT;$PQk1TO5*qLa;!Au*@ zmily!1%kZ(2e1OY-4NtYpylfUKOYYS3r|)sLPG)(MJ;=|@R5TYVjYq0je=v!ZK_UtWrg90Y7_ick<4|0Xji!bgvdl?oRaBsqtm(R@Rus{1 zSzJ+v>c)1|wf3T+eE{{X{iti~r9r5N1{|{T%Nj^($x&L{Gm8-xl>$2lZ#;UWf+L5%#`2|`FmLWEOqX8-S?SrB zMh42%$_ zCu@=X9O1>1h*Mvjz~!HR!$O5wVq5XS69HJ~fBYHGZr>-!I*+p_zY|c#=WqUT8PD%L z6fmXq@G16h-HE-Mwu$Q*%FcZK4NiUaC8o>D;JcH@arKYiv3l`h?B2K;U+g<5JZbDT z+qq^fzCLgOXOADp>7$46%NL*F{C6jC>-=x@IiExO`7>likkO*93Kv~nxasIY>GEZ0 z-nt2;E0;tG4=YU-7^}Y&eMj}_JyGVv$;=voZe9YSe4X3`P%&8LC8yIf5eZi-JJ=bU zBgoww*$FAgO-e&n0vQ)PdzhYo_9$_#B7+u#6i*Q0V=T_ju41NcQ&WrB=X}P3f+FfC zyb?mT@Jxvb9RyljhT(sf$mtf4#X!ZL)6}#ql#njTBEv$IQK9eY>*Ixra-Lj~BDNW# z7`Rw6sc)1N7ow@L9;GEk$Vg8WuvJ}IMX#wO5G$g7lrOx&73DQ_ZG~vdN~^>+gI7~y z8$nvP@N9K;_F{PGEy-)KxN}EGCwj!1gk5x9v#80%MNfNs`p`#h7rlnPR$Nqch@fkr zs}Ju6-vC4J@tWXkU}zYFZ{DE)H5pKS-KcJALSaP}>Cq}AP=Bc%e22Wgx9}_PfLlc; z>`FRdS=a{i{5I&N*FiC=2+FZ#&`fQBK~4uON(W(D+y|wEIy{T5fns7KZU+@ZDWw5U zO>dzTR|<`!a@dylKrNvJ23hrRt$Pjo@?IEaHNZTt1uhJ4rQNWk&+w`rMqn%8Q{9Vz z$_A9Yd5d`Zp23AhC>`iUNlz0BT1!yVTTGx>ih=jJ=o*Sd-|IlUe(ys@qao5WFVgez z8PbyWAS-1DvXeF;FL5=B(pI9rU>zEZ)}Sn7K7rLDROBy1dBJj&7Op}`(OQw1o}IIT z47vpbR|HtGbCDXq5SdBKQINiluHTNjl6|Ny-bb+Y35s)eBPVSu8M7NmKd&R%j_~ju z@bTVAx_m2QqdzCu`vpb$myw@!1;zPy(b}kmzAh{DcG}@za`GFoZ_g=QyYL9RH+_Po zGv?s;Z;rxI|2|ywFTwQH=Wx|OhCuT#39@#>;qH1^TwVdg-#LFKE4g7j_<&>^>Z<2x&*dwTZh~C9?>A|f+sIEas1?IY$I!H+s>U> zxMV&KAKi_sH%{Tvv$JsXens*Sn)=@mH0;KcSKDb2-vL9T1JKdjgS*$4;D@iK;^5Bl zShMs4%%1*%sMszyQyx-s3Ye-ufFw7a081J&Q#rS4Dx{`JL6SXJ(o-NcbqXd+NeQ?b zKVc#!@XSF{x)UTO3&3KLm^eiM7AM006R<|TSpUBP){NTLrjM{N=Z-OaLV>oyi z!NcDQuC#CCs7p|#1$!s1hh+#mx}NJ?8JIkSuHhr-8$X39!4og$%oX0bL<0ksp3xJ$ zRK5;1tvlkF4V|}lHxpsV7itd(_HN+ev+H>N>JBt@UO-z{5sJ!B2)Hz0%DFn0W(0UX z2#Se8IDt|=CuTDg5L|KoRYOO&fGw_jRngFd{&zqdDVLg-b^%{}4TBcXD)b|(*Vx>O z?ui$!CaamOS}QASXsT;KTT2s`rY5kX&$Bl(fw_)0S>^;q#(FRyOa1B1%L1%k+`9(l z$G5~F!Qnw&x>pVn+8AlVi2%ahR2P=|>M+w)6<#ez5hgUIbGmS`(uaqg3A~-mNi5+( z;NfaVAZu#|Hz#X&dpN<%-3gxV&cag_6%{TZD}q2KAv#>l6ikSV7P(o$!666>j}(BF zL{@Q9GF$H{$Vf{?Msgz3li~@ol2DM7MwU`KN(ysPSyqU$k^)j7d8n+cKvPQ#YUnr1 zEi6S|aV0A1T2b55L(+??`Yu$~bfB)T8#QFnGGLXI)R3Ym5@DE}j0_~jMv2*f1}e{S z@BCT({?!rOIQ1orp5B5ReSa(E=g=d-(pP#4dxA`NM@tfGL5<@7xr~V z1~0iP=E5%^22Y--Vbg}aB0M;6&PptrzeaelW)tvm9jsY$ zGaxT53zco^&uqw{jpfP zP>%Y&3|+^`@pELRW9`D_ST%1c7Rb%UB7&-Avlj{IS~_z+md{y$ZL8Pg+oQ+v#r{KB zHGi=<#-6KXv*!z8uhP-IpW@SPyYSnoGq`{CChlFifd|+A6mZ3_`|`jM;rU_@7+=pI zrTXNBfVNG`SBq;Hz;gI~`<=Pnr_ zPo6;g&Mn;j;S_G3IZfv;L*>z}kMsV@2E6 z+Kh(!S`-!Li~Y)qG6JV!;o&MOD4>2+fvU~5jDzp-wr?1(2e|V4JG_1W4n2K+1eNXRB@i2?x2|aDMqxu2qKX^glivjA z{6?rpVj5sHB53^U{l-!-IQ7wWj4U8@ijc^hGCW8 z3fGEuB+|Y6*LA`vuM&wJ{m3AlpHx?ktj01l4Ai3!b!Z>RL|1=2-o8u3z+e!nYqSWW zeH6SE3ar33X0d!|9Um) z(-lZfT8T6Qv-HHZ1X%0oH9Jw3a{$Ge2T`1H2!-jN(R&?1PU2yt#UG^S@(@A-_QTbA zC!8I3At>NW(w8^U)~t)(E=zQ^*%AbMVt617ulu4f+?POrrLY6DW|1Yc{WP|3JdO>^ zcjMEod-2DqllbHN{W$&kVkq4^0C%&WVe{$;?C@7V#;rh)J@6vDI~`Xr0DOrK3<*I)pnnHsjpsg``C0 zV&7INES&XkD9C;Ud3h;JQy{>aMu0^Mlyj*h*;_Sr@?Yx>k`Q1`qE=!OLzTo|U^NL7 zClPo}nTjc-bU6Wj%vcG0%!@4(U`?7tz(fj{p=s2E^-rK;!1~`qgrfk)71>8;363u6 z%(DkYqB{wfWZ;>EoZLQrTu8A^~wV&ac%00TLTX_8ZCY~$Zfri#|n3`*g5)rmE7~4AA!aL9x z0TE#cj*UfXej%!wThT{oe69)Kui#)90AQ~vbLl74cinypS0a%>-k&=>113((M zltpw*9HMA2iI0yX0Es6Mi$f}xZAeW-W@$EKhz#_B zA%WDhYk%O*&)?(j*>9nC{dc(2^Oof8iU2!v=)U5L_iFI-a1c3IHpYe`-zG6Q4B>9x z2y}8sd`JwUf};qgQjwB6;-!j?PN6oF2DJ==%@oAOri=YVf-KIt;u(dhsrfWmjwHTw z5z?%j5swv@YG4mm5&>6YMiHXvHIcCy2#rXFn@0#cnXz! zH*x>UZ+Lp=GIW%mk`ba!{fzXCg$>-S?crhT2v0jFvF~7NiP)ep;mKl;R)8y4a1TLI zdNvJc>BvYTLn3`7|B$^`91i4_5{imS#I}u%9ddK?#d!`P*4HAk`Y`!qKyp8?zS!r5+n}M>)3nshP_wA zOl9xzzV|iW^^?3AS-)_guV3VFy(SQABf!hbCu23Q4ngU)@Xu|6Cjpf?ZCm8E69jcY zE3pE~(G@VtX@h=dD->g^p_bGL&9rvBh^d20G67g}8~zL^!IPMJsHb&6Eu|jjr9Ch! z>Bjw_94IGLz&O7hN^uo<8C4=c%du(*1{qB-$!>*P-5WSozJ_i25FBb>!@cbt9IN|a zSJIAv`ay&@_9Ljg0nwE;DC#1ku)hiAt$CI z??QO!HqxP+NtdobOxQf6#w|j2`ZD_cRuWJxMQX-kGRBr7BXK!u0lcuy}k9dbfYU?bFA|!aamNYgb~; zqD9!caWlQ&2Fzc)5*xRFf=@sH2HW2lUv24u=%$k3IAZQzwt~dr+g)NvWy9WCY zoWkLw-(%_WHQ2FhEB?HF0gkSQ@b))_gR?U1>>m+1K7yIyCEU6EDL(&XHYu5pFi-v? z%v6ws%rt3CnI?-#1WBCa&ehl_Oc;x)(hONsASES*i4!M+m&TExR+5*Sp=%})T=BJB zu3^%|DRd116Di?|Bn(NTIaU9+9IR1q)o2YYzJ}pzbd5qj&-;A-BLXO%VfZnf`;g#@ zA&kp8j3)r&Z87`sBMFS9*GUlE$xWXl0Bh}r&Dgq=;P>z$96xy+KmGO-Zr;5C6-^}= znCZaCg5bx|grJE5%ZLC=57w?auyxmiqn8m}eNEx!X9jnFbGTFMMxf>BrVm>NCs#eV z5p=nF8VQiHv{8e#ohHGME}UEq;7G8>d0FOGyxa~azP}W8uXtf+KF`;g5@;EkzJjK~ zODJhR#WUr5c&c~@%Ic3`U`&u>qYryWQ}_nBAtb^dUjANi_4a_1rw4q)!;wbHF*UD1 zB)IchgWNJWF<`ay^rO9hNVK)B9m0cER@eA<$p&6V$Ahe4H%~uU**X#UnZeqI27FTL zsw!%*G_!z{tsTq_36u!79L(svzCQGom7x3bIW(U>gvN_UP=E0NS_BLR>aSpFpdmtl z)<#;wbH$*=kY#70O;))ky`J7zM;Yb>T2=-sG?)ZwALQEJkQaL=BB>=0ssuYb)Y^gV*mLRK!2A=ZjdNj0lp|yJm4P+rQ zVAVF0G;p!S0UDV439vXBxE`gYRWx8!qbNTQ#d+CCiVTIFz80Qe`2&BR`3CpSe2b^Q zo`%NFOK{LshQF;DeArWGYXWaq8~A%V!^PT?22^VVyLccqBnnAn!N&wfA|fDM3^Z}^ zsRF7J6LV>R1V(4PX;*el2eP0e4=81Fh@tog6(3Mdv0BbY{E3>EunFUoy zp!bMP%0+NkBAi_Vp{r|&tC#O%&#un|V2%Hn`s0UEkQ_^JHExoCrilbhT*+PHzhg1! zuk8suyYPdLF!6)2A|GoK_0LIkeli(bEPPBX3Qhf)TamLx{hGaA@}wIU$j^oX8DYH5 z9xRrnv)FUBLBJJ5(605HMa%hCb4X7unm${2ujZ0r$X+dmE6&5ZaOM{QP7F&7RqSzM z_&Rau=-;nBuzk0WQ>TeSvXeR=>teEvBu|M&x*UcZc+KmUm9KmUMVzdMPuCr{uT(mBU=?ZOw^ zx8c7@YKdCbSvpK4tG{oRuh;N5=<7OuC76>hscXb^BRdIg{7#i zs1ad8_GB?=z3CgqTL!D1*LXkh2Jgj!&u=g|Jc$1OK9Re{_fc3_h5V8Rp@7g%xJKLw zs(?mjFH}<6pqAPM<>Wd#UWrFxg?JcTfG6R_coka-<@jot(RFP6P*RL`c9&M24;)P+o_Oj7>;NU5l866^Kq= ziTL!5NJ!a<#H9UX>>WW`$}uFxA45Xq7f6fw4uu)#QD5>1%@s;$uhv0#lPUV!Y%$d7 zf}t)Cycr51fQ=@wjmBVaGzPlkFx;1l_rv*gt`waud1$TALwj8bx|(Y6uc-=qFk|ik ztl#(@7B1Qc8JU&Xwf%Ek`u!%pI{GCR&6$n8+c)6p-Ahn^atSK8et?DI6

hR%~~ zxbnjZ9N4-M8<(%Z@`cN=e92nOp0k1=Y8`fc`VICTK8<-R2;3I##jbr9aQ>1JPMuaJ z06Psi*-r>2R#HpW*Xm1Bgn@PkN*4) zJJ-r%@$@m6FE4@V(L|KN(k`hpmor2l3rbAYi;A;|ln7Ht$ zl&Fg(%{8T@qy=b=9ZO)t)+#}b?6m2SA#0Z5Y8048W%Sr6V2ygR#L_qnOQd{9&yNDu z`0*n(vv~g_Qsf^oMDg`xIgh>%gVl$m%o)J={p93kV)opHm?l3HGw0411xA;wScx5b zcj1>mei3l>O6@uHOtfHTV*qDQYXMi5_J*)@Bye)mgR`#*JVUJD6=Ds~KuaM$!8Fhl zV7Yo36Xck~({E(fpc6rsgR}04r;1v8C$3*bK;cN>W=Ef4p-NCS0#+vG1Qu3m1XU{H zm;s5g8H1M+)b*a@iP9ZBczy#)8joOUs|T^5G~J)Or!D*fd0}Ta*b{76*wHev<8L*V2xIH=cIQo?Olwq`!VbPZK`_uafrS|j`i?FHSjGfO*6^^ihl7bJ3{+HMq^c_NvGmnmk>yRlr|Ju` zzSYFCIL=naBdw*O2nBMP21hepI2dce$y5h!1X3P0MsTtqs3O31wW57{Gx!l~1bI3l z#Mc$R9*%IZHifOFF`VqJ;Nj*74|iu87(5V0pu{1<=&&H+!HSKEB#A{r!pK^M3|YAZ zomm8293o8O5)RRk;@ZUM2&5-dzsOBRc}X5gJ}G=&f31cTGgob|MkOieO3p=T>p@%3 zkjTNRYV1Nyb1&+e`q11)&`MHW-;VN1&fTgb#ovI^qCynrWRk*(K#;SoSm^i8+3#`j z*g*kUdUvlw|Ir;-sJ(=by*WaBoJEqmv#mM8{d^D`6a*K7S|4i%MEe9HIXoWm5%GwN zNfe3gk&!7hfO76tA=1-}1xN)4$0Hyhh6dDR8bk@M6LLfw8OZ}}CQ|77pqp+e08F^L6$SEg9T8NOaBv@Iw^^ z9o8*hhApet;NY%3;<`O#w5?yf6dM*V!SPQ%!EfJugPZ5hLgn5qJidAXcP{>po9F+) z&jeY=_wK>bEt~PhHU_K1_~pIgGF(OZ2cs~J428TRq!RS-BIwakF(Sb|lZ=eCv<&F! zk&)x*EDHDXaSk0aaAk1?bzYjLr40=Y&B8;)P{p$ZS$LmwuQ&&bXAib_bR#+@UH}${ z2|YZ0k&u`wW*=szrz0@Hj|`n80ZuK=jRLCN-FR|}n}DoQsNy^=_Hyxda}#@{y6ISr zfGGA-u{Vno*oOw+3a?g4ak=nhF@P~#jfM!T=(_4^>azq_9PTV7(96v&pzqBy6w3u< zam(c(8mWKQSJk1du~p<^agzH`CxKSaNT~4b8(xd>HF^j7(aV6TR#XwFQD4t2 zYehh6CA>3h;aS)Qhk|xk7k0v>`YkN-`=CQ$rJ3A>myy+Y8de3h#8zmgc0!q2jkIn& zjcLT=sCpO{y}`?bW;}|lfO1MBp2bw*Sv0*SvJ!U#3veeO4=vV5vf_P5E$|s+}%HihwC1AyKhB+?^e>Kdr7zKM{N9VGU_)Y zC}I=BV|ODu;V9yhzDGvZZzw3ZMh5C_lomWhU8yp<8chjWz3^@z7;pO`@U}k|?+25C z!F0UqPsdPCCI-8*2)1%D(3yjtmTYu1+w`Q4D8 zvj^+9e1+vJKf&}FG>|XefGrz#W6iP+n5VE1n^$ecZ>P`U+ha#@;@D2;s^5dNodF)+ zyGBZE4HnE_L=dwWi|BPrmhHrhd7H6l#eQts@hz6D{tU8nc4F0rZ*cB+ZK$XOl67f@ zRm;DJ%#=NlmfQ}R$y*^Oy#*7;ErG1`a!5(c#^mv`*t%gmbTze5n3qKlel-1X7BJR* zjw`=>g-uHputbFX-N6s$%)R=hoWjDctrltzJT)T1@B zErE|oISaUAAqCCyF)4PjB#r>A@%VrgK6|zJePv1c%F54x1l^m|v}u?zXD*hnUWFsa zj^OI8tGNHX!a*0d1Wfks`f%_hK=3gZ-YXA(3wV;a zdYiz>-9UJ(T)m9oLfg)+`a)dX4B_fw3I`WM0xKO@+GvXP1rlBI;lO??U9Yh%jQ*%u?I1$i!I}yNH!_3-@z|a!* zq_kZ914shl<{t#_piq(2UP}-)z^mQAc`M+GbFUb*I!V}z#bHFw&tfkYS9JFz1?}z? zNI>WTWi=gW>lnk&Hv}d|7BDxpf}f`!ER9XcswTL!wk5zagT0Xn>lw^Qxf4_|Sg|-5Yr@f311wIRU8&F~~~erE++&=QuHou(&7}rDdFxQceoB3RU%uXlQOnZBsj% zdCf3VSmgv$oao-%F^tC60U>owU8tz$_1Z{zlQOR?D@7GSRvg`zvxz=zG?k(G=T$uZ z`4k?V{YI=mXY}+wY_ybNqpuDZ`h7fItl>rg$%{V61&6@T!5Ka_j)?UOK|)B30IZmp z1cZjgP|Kw!a{uy75irqqU|=i_ni25xixS%mRt#G#adBD5AP`H>EJbuu4idA9kycQN z?2$!L<($dJaFsgQ*7JvsQ@fV33*JLJc}TT44;302+6;IioI0~ zO_Cpvha4GJQq)f;F+_d9%jb*$7y}k>Gk7tKjkdf{G{1)cGk)Ee@t8hE7BZv{vT+g1Y?W@;efvg<%Z`y>@M~~vvkwbWR@izfjH-7sW*MI*NcQ2gB zkDnjKmG8g9F@mfgKG`P#>vuYK?z6+VaN;-~TsQ~q7Y|^muLgTlJ=hu>!bD3K=GtU* z*g6V`;@N}29zJ55L5p*;9L=l{LV&}7l|^7xl9h+dWL~2(NqDGuh9G;e*rR1{?+63x zuUx3t)zw{`W7uN&8uenekwM054l+#f>_D#2J{l_Iv=VLK+xw<&Pp9~20XmK7Ed$LBtgr%auZ&PCn88=Pn>g*BW zK+d(|P$7o_IibC?qmO{AM|iLJ9DBPsC#$HijHE;uRs3Bzmn@I&C78Y^m!#mT@f<2F z$}dJ;Wvu`#o<-QtOaBnm^>_7Suy+W9q#OH(hS4+3x{^MtuLtctJ!D9AqoK72joojN zQPBbajB13Gb|R#95Y{=aRTIzJ+_!0CbY;V3pqj|As-4n-xI6RYXezl6xAF+FOP6 z-cppksX{BN(f7U_gKzWE(I11NQcHw|-GaOGw{WsM1P^DR_GXw=3hL`szaCX~? zfZ$_fjNL$al?v(_^a-}DG0^LVH-r8JsFA>M0x*;Y3}j)rD;t9yc^K+0C7`N6XJaAS zYV*-vT_~iprWhTyC1|ZGLQ5q{RSBK1z(89wdYT#tylU|;nd!TrFlQSUE#HBKi`Eg0 zEWq5E^RZ;kVzRInV2Q#q9NKymmwvd0o|zhSjMZ_Atd`}}VWk!365K4N z_gRlw3wB_^ii23O{y63=IzYg)1#=c3#_oL=aPsRX`0C4tSi9;pW+@!P-04R!OZEU1 zqzSYnHel=r3m_}G5^|DDF;{LmKG{kZ^Zn-pR$gR5I740K1+HKC8Q&b;hmA{TWA@Yu zm?J+0vuBXfk!P@)i7B!(g$HXI0hWTSv;eHB(gZ{j5*R;zJVBErL6a0g(kJBSNc|G+cF zN6;}+hmDgdY+X!5Sdih$(m_uovpW(zalRFUmB_auh;k%x^QL1yT=R_Hn4JWfsMN-oP7Mng3eVubLRD1 zvQXZkbMUnQEN*+~7`M&cWRa1j${sAfK8%!PXmlciLgV4)84O(mOQ@;Sz1q6M#LyB} z7PbiR3V@k`2`PPEsMs1#W|nZZu!6Ix1uS%QV6IQ!!_owNKDp1@T78SD*J;cl%@5JqrnWdL`P=xzyLR~vY{*ucw~gaFHj;L6Y2 z4Zc3^!h_}PWDiFNTljjqA<)M|lxqkN2}DeElvtCH^R6Pvf{%%f7bP3G%6b|p_HoYazZ3YH1c!OMUs0-DN6}~7C{z4Xd_QgZ|Ooy*MP{usv?-GZ{zI&0~8!6IQR$PQkg1IO^Z#Z)iQI(f4efj`qkMBV3@;L!o`Va5G*-#6v=KAn- zv><>bkaV#}FacI1SN?W#MUay_qPzn{nTE)qNE)a(sW=I-ap?q2nIi8hHMIa41V_=) zX$TIELuhEC04zU04h^LW_zDZ7|7}nlS)y4Y=PDty2%+>I!7-^K={+;Q5=m)=h#?RQ z3Q2&8i6btazm2`Sk7CZu<=}$B(>Mo9Vyf^yabmh889E~5_whtbr9L`k+$4glk&q#S z)!6^_A;uDnv5X_gVi`XQS=3L(LebQJB?+z=vSbO+WF$zhklvUjqky$bR$|%QMNlAJ zvYY^kbFkP0B{xwDGo)llcgaC!{3Ovb9I+>gbF&znc$;Brmb9#B8Oqor#d%un?UEWh zUceLs7K0K)7<;xjWXPYvo-z*q?ccrwyEd`0yav0sZ^o|8o3LZU1_HkoVg}>4M~~s# z&psz(bs07wxt@ay+q3FUb~7@`}PUI`uX5N zT;iOp6JO%$w_oG-uV?Y%_7!NnybnVSMKPJg!_E;dmUd#cpudX;!o2-O?iB+Td#-r) zpf&YL&dcI+iD6MFOwSS((Ruk9&Ozi^gq+yUP{lb{mX_AAvvUxM?hII*o5jiRoQK6V zq}V`Uj}2F9=S25{!ctMyeY9KyC%!Xu`EW`BeZIF(00QZr7_`_c79JWZ$~0J6nj<7Q zP=pG(b#rw_SZJ_-EuK}#xmm3(oTy$SU@I*(bp-Orm|=Kg4;IfH%%{)F%FHEL%R^~N z6+sw5Ai)*`7=u-EQaVCIBE{m;oV3p?LGX9x@5TFeHuhq%X2rU5TPtDqX&1dXJ2yozeT6WV_s-iQ}5tx$^Z!Sm>LJP4!xxK2EZ zY{Ikn7HH=U;dV#`E_&pN`dE4egLod-jAyZpcoiZ^ zD8|>&`<1~czZ139pj`=e`BS1PxkhLFE z#&3euxHXWOxEj-?R^!0VlQ{axH#qtEX}o%-0uy61Jbw59CypJ)#+8dm*-RsAZZc-b zOolAo^OUJ%70S)PRQXwuCM$2Mj11(bku^yoIg!9-3@Jr|Ea@pzAT2co65|fDkyk`H3m|&Tc_)boPV3 zz9ozetw}6lYGe&J2M?GVn2|!aARuyty|D#s^o`+YOyAMW6n5q&u(vWJxUzt=EzbZX z(6XlErn;~;97$}q)KP|+)=Lq|XuxrCfEyVM3|ZbDE(q{;N07faDfb{Tizz-n9&zyr0&rO|! zs3*v(ZRtZ*V~>#PrXEortGJ>TWtF73$P(vi=$Xk0NC*vtyO|;MxK`MObI`nc9tO8B zL!AI$_2y;hJ$nFqBP|gk4EA#sAwm!Oe+Ia?iX5yMf-A0r<>Tl3FlNX zR53JhIB)_37J=0mf+`lSmo=6l>)!-fd>wn4{{k!lSp-$m1Zgv+kk-oH_XwPJezBC-?8it)I>c z!20#bQIY6=@%VAv`2Jhm{rL>;{_!&&Ui(AjVCgD9gSnnI+^p?{r-}iK6WlrfDmj7x zB`{O~7ef|<6??Jx94EYICL|*(H3KPsCZD>E{*^a=2E^^62;vB%8c*O&UGg9r)w z`Fe{4_TZoZQLvaxHY6t{2v0IYRaa*_eP)}0tn@TC7ScudkG)c(vjrm~Vqr-Jp1GMd zL7N>JRW9^?oNJjS!iDK+*<#H*3=j|}M-Y{qlkgnw9_98^Yr6Rkm4z+!6k|AJ=N zXVgMFstihDmC#CPgKBgWo&=WTX;3AehE(HaR3o%gyP%fPiN|3CQ;}_W64{E!)G8$n z;6ZpZ?u6DrHLD*_<63YhtQLx?-FTTq@D<&NXK`(K5#Ns60hPGrS1vqW%E{eOOYMbr zb{|xe>+viiAI~F;U{l%y$BG^((Y8re1ALlZ!!)Y``pH@Fsjo*+YaJX)(h0iKkkDO& z@_{bYb$6hty9GT%EqME`32z2V(A^S;-qr~8wgjRm%@BD>I;bhML{GC1UiXBfuPp>U zZIS5jOeQ$Z!fDbxq@cAZn%|S(a0UC>H38p&GRoP7E zD$!O%07c;5Lprm&ss=sP)#$6MMt4ma+A9muQB#cG<_i34iu77co4OPWW-r6S8FMj1 zQVL6^$zc1!h1fh}7S>3~;>4P5czWhMG#)&Hp@9aTzIuRL_iy6Y3+Exj)zW7zgN(v* zlGTu#L5gJICs3HZ8*;PuVA-0JSi1Tv$j;bZzNf<+L$MSE2tAGFJzrvd)E2jWC`RSNO_dkX1Q-Wtc()XA(e-36Zn2VL` zR^rEBPUAKyv8Rdzx&{PU=3F&h8xHO~JJ3vcuWVfOVdJa^OM8MEUi!wBJy$g7`&x?D z-OC(azE-5PNa&ci56NijV+|iaQZSx21W^`naU&q1ZFf&=0aiA498R>L>+K1!3}IxV z22Jf3c>3%XG_;??#7YNRhDy*j8WCOE)*;9;Fj9q;wLbKXH1JaSDS?+3oIG6NH2(CCp7#$TZmd@eCUJeoR zOhOihEiN?56{X|Ix{i+uLq<}(sDqW67LSb7coY}ppt`Dz29R=8R#l>=t`_ypt!N-i zx4OO+RSj*zgVoagTI68ylcsNXp3*Z{zc3poIiIJ=FVDy z$r7@dGFA%HsZUS*@5jPh#hxtAwcpws|KV66T#%F@i-pe# zFBZX)SS*@^Jzxt+e=Z{>$_UXSIA=0lnc&6!hWuxi0#0?1%^v zUOD$Oem?OPKHI(>XO0}kNxJ6T;m>g4i{rTZ^+`Pb^%p4LxC*7)*YNVrRZ*rvSLK-~ z(_pS^AmEDOiAymsRE?Hs;O)`+R~$lQ$l^S$)aW?GMMM)&hY5&cFBJn60~U*$n}+}_ zhAs{pW@KauZx)v|XlQ5>-l3-Ec9DCD&yj% zYown{1t29QB*4YlQFy7?W5vRGSGG1*qG+)<>8se7sK4{DT3eej(BFr`{Cw)q-U6sN z^cNTqio}Fe;hj=^rHb=^T*l8o{YGQhMcn%H9-co{gr0=BmVTO#KZ6gXy^1pC$}9hVvF!Ht`s%|SB_QP&`zm^X?7Ew zX3I+jE5 zoJXLPTaKpm64a-bpe45k{goXg&FCtwKxbhowIn5_Xe%s4bAB$`OY#Z03ej6#gx4)q zc-v8fe|`M%T!Iwpi1XHA)+Bk%Ck3=)?tE;Km&1C=2{^fY5uTp-99pDQO=*BJCx!a# z?hRZ&|2r=Jat^DOti}x41(26r0y#NSWKv5ZE58Zz7VX2DP2XVsj?++>{|Thzw_@*+ zOSp000;kWZVB4Pa1Yt)oaq>>c$bJe*sa*sfn=wgp6QrkZ#`f)}@a6GKxOo0KJ&2aL zb>|Uw?A(E6OBZ7PtZA4&bv!AwDVR4y2C~zpV5+RF0IaE`h^9~33w#NO9;4P$YQvfKuU51vZU!8wG$b(rv1M{gxvBzgVa9(i_iTtY&f3Y zZ`}Ba0RvNACDi02?U2S3bI(R zbOFBl_Dj4}d5#-2$lrZ@Lo7*SVb3sS2q#Z7;k~kS)P|L#4jf#JMDb8ZR}(n9n}HW} zcJm@w^0FkjvW9Pf9SwFP;_qiq9oSBYA8q^jJHpqW*Yj7@iouFOial5?qkz@__C5OEy#G5dON0x_GEL1Y65b03CqL-u zS>e?yUFhpt!rj#$LH;xVJ9xp(&J6*sz6f^rgEwuvnv-R1?I;wyvn@d+!H>JM16&+z z;Al;t#bH7-F6p371I9C`J-H(QOY!zas6V_4W90{MFjR#HPcpF76NN_YO$l}hu$(v{ z-pYvH#{}+91ZxCWo-FRp^m}`WnMxtS0r2&77dcqT2{EEfLuhaa0)v7?aZ!e=G_u;c zO-V^c3R%yIvC&A1jYL*TB8u}fk)M-_tn@?_=4GO+v_N>Us;etS9jq3z=&MONl~t1! zUq#k@0O3F$zjGIwI)_HQR*l`LYa2ja#{fZA56WwrL_&OhBefOfsHMRpjX=)J(gY6L zs?h%P8lIf}9{0Zc5_eCXz;n*Id3;9{m$V=NwWsgu zDj5+8Sp?)6h)&KSxGq6_YCd8TvJo1QLeFa?EG%4c<G` z<0gu_S~CcYrcafJ0)f!H8FR3Lz-Jjj6Nd~HB&DH1(8cY1xtRi(*ki?^z5QEvi4Y>g z)#&jpE7yU`J8)QVzT6Cv)XwKP|4NoXZ54qWd#yMu$dJW=#d%(w_|EUiki|0wS1z27 zt?Sod#M|bbRp0#U9*5IpsdvNyHVf=F97=Ai= zTmTlYmH5qp1NiaqA$-4oAI|PSK=LWh5pdo6=?paP+=Tv%M`BTQE?r_s{o7PW3&t8c zFxTPrI|&f}D#gIF3E5M{+YDJ8GUViU_Ga-l?A?lsjDfqGr+}%zK!TjG2+{IBdwCoj zoM3HjOQ7U0yj)ztoxN9_AYMfV*FU{hqxn}1TU@GvL5pV)T3OpeRZR=$FI>Tyv**Yt zvk>o{oR}m$S6s!N^R4X3@Z#il8*5AG(swd3CIgnX`8a#F^7FEhmzyneCm99<>3hXQ zv6m~2`ga5z>G?M?w!pKeuW4ZeMxsZ+si$n$K#^Sspk%!gZ)Fw(c4EGM=?Eu%qdV|;0@{&0N!$asB z96%3&UvFyWE^N>2+ z_A0^ium&hbx8S;aF|N23<4Jfc?gcgCrcW)N#rBQ>R!|+3GGF6*a2L8 z#ErmOJdJOMQffc$hBe_{cr%{I5{UWN;Hp;{?uXanR%jk>1f{~PvJd(NZS=mhUDyVv z%3c_!*1|Zk9JX22@GPl^dp?0N0ZmL}17d4x5RqMo3^LFfI(o(TeLLI-47LOP4Isk7 zyS5^{Y04wWDZ`uQVhmO1;Y~fkSA9A9%kt4p|F`a(Y;?a2gLiK%FcOGQI; z3ObVWFql_`H$@H9)}lA33O#w%=*}-C@FJL^|9Mwo4tk2{y^8YCRg{a~@&X~Eg?=Kyo>zkEPB%aSB!niDTOo zNnBVyAL?iJz*Fuj>>i$k>4S5yc=HSfaxd`q)@?jKcL}HeJcLD47hwhg)>QGu5EWVo zafy|XnZ6P8mhOkl>>Uu6T91WmPvG#yH#mPs6H7K+#?*z!FlF{(Ob}zo_hOvjR*adv z5tBqVK~Q`n4xf0A*NQffdt;7!kCd=$-x183GaECfOOe8y1Ze`S1vA7keF`Z`Q2~e& z5DK$AEapZhMJFsO3JC%$2_XSUh!9{+BDfhh9)i>|nM80idE6ujvalQ}tjQAye8x<~ z1cD+#Hd~OC??ko^Alv>2q((!5qcAnPZ!~21pZnQ1o0a$*3lGxwj-l`SouG-eV<$|+ zIKC$0B>YTp^~>*LAs{47P$)|0PN02bFlIa{ZXrPkhzdb!x-@p}--U-yAK;nXW4uz3 zg_eN|tQ}d-l_{(o^x5at%f?_@0anZAG{fIC|Q$3hqX{UKUGo5AvdW?_m!Q zM}iZ33%EPk!rR>mfqtF@v3_*F18KkrCShw71|mKxoQM5VQxbWIFd>oJ?hu;EDkjPjJsl<0TsMa#B!TQH-|cIyBT(p{%TkC%Jd^^za1tYLBF_Oxqk%IcJQhj85j2>_A|{M2dYy{G z!cyesm(Xv^LL3c5QPBidvB`Xi93}|~thi({!Xx4k9?ptP5_Co<(K#uIBS>QlJ2TG} z3lp*&tR#Y~(Ms-V86`A$X2Q)a2p^Pmap=HV?!gijoJIgN9WsK_keVP2QRMNRjl>;YBb6$>tK^8A2 z>gLJAfef~oH;VyQ3_&=9ESCIUmR-P;-Pt)Q1Xzv^&b;0g11knTp`jx!%f(_IEhi@z zf*dz~JU_pHS3YND5{in;{>yvC@~;?FjfMspTrpw!R%`~L4qe;HQ|EB+{$qF&+_|_g z9T>_1i$RpDixU?^13kP`dVFnYY8J7t_XeF4W=IgJBHasJ$8ynHSX1qCH2E2~07LyP}5=Dp)# zPy$Flwsxa$Aj%1@>gcz%(r;mK#k^RM!XlDI@LP9?t1UKMK3bi2(xJD56Jf;&*BU*7Upce1b z2BA~%g8-`&&%#>pA^9VL*&yU1+aViTjjLWsxbB|>i^gH-m2}~TYdj2cTj1636-J4* zFpaN-KV55Rc_)0bD-l{yjV#h*MQy#vuWv&&wSDy47*u{A9>QNAyYaoJ3E#Wx@ppF} zhAIotRgj6E!aM>a=9MZVu&Tfy>7V|LJai`!P{pO9BQ_0vq)Ud<37ANq^rqyaKQRxV z(u(mlrxKrY%JGqat3R^{{dsxlE6hY+Q3gJi<>E_K5x&-z;9Ff8el%6#TVoZ5Ys>Jt zt{h+LEAf-K;6h9nUX0n3=3((f>d@mPv4X6)ON$o3?D7ftJ->{=S7+e)_#m7goq*QG zqcFI09hx_9;PSSexVq;!wl3a?8NzcQF?klGB$h%_awS2|GEAAakpN*8B&0WC`+*18 zxaS^bE;)v&i;qEM<~~TyJq*!l2OuoHA3_ql@%yCpkeaa%Gv^(|x=ojG>YN-dT~ox- z6SuK;{T9raJ`K}k#Ccf;X|V|ebdw=2GyzhQA`p|1pzjpNWU|bJL?t0EJ_VAZ;-pwb zAx7{rnZRYj7y_Az6CuL9S(5}HM9NTjvJgZ_`3X%F!UTe($)tEi1;psJ2qupoDH=MO zQ}us(tJv$&eWQ>yDkJ$?<2XG1HkKfZ9OM{(s8ns$4!I)OSGqb zztU@V-UQ~k5*Nj+d9$#Al-}`EM{(xjDO|jM7RHvkFtah>AvoqGGPc$txFSX7V+OYX z3wQ;wnSyo%Rd(rTfl>p*c$J#tGbk(OP|lc0k_6QN^Z4O4R`xVZR)&G<8=iY* ztEUV*eP!6{e}Iji5^wE|*qViEaHQYJ3XQV02-(8VabaYE`+LBbK8t-`fDc*nfn
t4q~v4{TZtsGG0_MktC|5BTUv+ZU}dK# zAt#d+8_hyZbs3-4R9#bz+J*)+wRfPpu^AQh&1mcSh?ed_)DUP@Hg%%Bp@Vy^+6k`O z`aY8t_z4YUqA{_E#ZzI_dEFPy^jWBYM$|DSkxf(F*x zm!bXkIWNj;$4XZ?Si+6K)YH}$QC_}C4T(T@Od<*ioRZ^M8HOnC!73;$Lv~Ifve+P$ znuS!d_KQjJ6_KzccvhDvAu)}&Y(Z!SVR7*(h)>Eu0$Vz#fMANCD!qV&+N8{4?!k&r z&Zl#7>3b4kpl^fgS07>Jvdx$@fpo$|Rzg7r(vu}HM|?UY#*snuE9rvYC-DWIg{jZ7 zTr6Q`bP+U7`i1LyK!kLV_=HJhj7nmr#1t%!78rCgYiuFsElPu%y zq8YO=Lqd{3YbKpL7i$(TC8%0L@U;McEMLi!*>@0}oj-a4H_wx?d+-Rs)jr(5cm?v0 zpW-nY!8a~lz^S8$ac~a-(XnIL`Nu}glqAq3@MB)F^-Gpx-{vjYPqK5(TI^oG7Du*i z!i9tTaO>XCc^l8YRIfI8MPvG(Svru?&8@fvJ z1XQZL%n2*z>tb!o;fmG6aAX1XLjeH!L)m9-D~d=r|!OqT+dxavSV(A%Rz#WBX)<#70P@1m~8A$|O2?;4=SX82;qniU1tIA$cRm;6t ztQJ-s!4`uqb}TQym?yYfTGKSn5WukH`S=(XrjI1U%oNT9Sw3ViXs9UT z(Y?EP^58z+%D;lbYdJi9{1Er=+=2pIkI~8k#s&sZQ&EMdhc_8^7BJE`fs3OX99fdT zhY#$m?ciwdOrL8FRb_R&dHog_&RxQReTQ-R(ltDJB#XDN74Sy>EmT$1;XuFL-!}lh z)Q^$~IHSp6NlQ#cIrSap!D2;5Ka+v^ZSXUJ#aCWr^lw&Vl=S6yGC;o#5915z$**+I zck1U|_1$Q%C28nKd096ya+={8mIouhTmqRk=*G6=ePA{86T6`k-;L*9W%v-?0pq+M zP>Jt_YEmx@^SVtaLFccGeAs^k2w@Dp%99M>$VHr@&X@F*a8x&&8V4T|l=h6Wf#Z|yA zwFZ$@1BkEcMSN*9GOAip**1W-!LR5gs2m*r&XfCx2R`C!Umw18ci>}f1qMos@STj! z;Sw@D$v|x*!>%9IvdTwNoWa9L{C&EJ|z<@WmVvNb~y&) za?ll?g5KzK3?$Hd608h?LGW%p5C*72`#*i4??n0vUsYyAUCJ712*G!2kAscwgTM+w+_7Vb2n% z9^DF!bH{Lvl*6U%hp~6<9?Y7u6cS?dAu(kMghUqb;-OQgvbt7VF=sjf)7qN^PA3VV z4rAj^S&}FCW9tLVoO2wL1UExw+CEI2xEgb2@5BDR_pxouCG6dG4R>$8!@XN_*tvB# zW=Kn8x|9T_ONwEN@I*`zoeXISVaQAogE(IfhoDH1;7CXkVj?nRxk?c{i9?X!Nq`ib z&}0HC!I30)!AXK7BP}~7K-T3XvNQ!4bdiD-7G$AB5eQ8B@9OOgvKUw~`A^6ief^)p zkBuep8asiY>UaE3&@`45^=|}NYC`yE-_IW0O-3WvI^7ey4W=m6pD+{f3jjJ zPMkc7z5D*e$umds=G`mk8L2}_O%CdMiZHX&hLtm0z}Xz`0akGJXRRdxmMzb>@@08d zp-u>na79prI|9R8;YYCL7wm!n=DmvbMRZ~?A`=1;66K9xf~~OFKm8Ldw>Q&ZlGE;eiNEBE^}JjL7s9M5HAnDm@MP)ir3N4%14~P70!jltnKCtZ(1Z zNx;?E-HYblepI%!p}4jl)vfJl?CM2HO(Rlsir^Cx4NXIHSUY&YKQso8uDC#k^F-1-U3KE<|Qp3gTiSN%2P_Dl8Zw z?7YwbQo2#drhAr0fSQxT0Huh7RSOL=b*-H!t!+ekV=HPqdr;Bbj+%A?C%SLd?Omv9 z?cn)qZ9Rj0jlxb+){SkQJSo1jhb^5`3AUhgkQ+g&i2>}?-$VcDL&(u{cyHG>-1&10 z9v|5ag`4M~_38mkH5A}zq00-h+8OJ@g9e-s7gt34_#uYX7^3^0k(x;sc_g16pP8A5 zoZKQ*RMhc>ansXsP+rc$HqWn6-_9p zB&ldbc2Nz|aw~YT(fFi18bISwgnq{hN%n zpGT^?i;NwqgT)|=!I_MRIOfRAz$~e$kfv=Z>dVteKTQ!3z*G@of+=atpDK+tixy$` zri}zrGq7sGd~95?9Lwjhyz=<;_A`kxJw}Q===pdyL<(Y&YXkX zwd;6#;Ub=2ybNXfOa=N*xm!2z>gIL4fASE9%I{#Rr3OoVeK=cL!^7SYE;iKfEk_{B z-QEFyZtjQ=k3e*A2pJ+F2=?}Yzq<#*{R5B?8O@Wq8Sr=zc=;0yh0!w;9h<;IdvOF) z48FbTxIgvTxFiDiuxO+Zq_bIrxp_s%py#WgfYpU-LJa}d=sJL`!h3ppHhjFE%j9&m}qO^-OCqvN|1H-=nGo%0D2^3zug5pa#7-(t3#?%}JnmVvGvxKva9W0DY z@lNgyZd|yE>ld%$34xy6vsX}7d=Cu*Fhd>E>2^+VB`8abj_0K-vZxQ0cL+yR&AV_I0X+&pb zJF?>Q;Tw{RfXr?B7CxsJ5_pwhI5Qt# z@^kR9C=I=ZDfrSOR>jw3j_ z^(Yq4Sq~|hWe}aR7-CXOAuh2T5~6EK(d@;d=_fEv;xJ@nj$^|XS)9F~hjSNnaQKJ< z7A`!63FB4~AZ;KEa6KuKbC7?ckC%_MAooZE1}e5tk<-C}9s4nV+6>H_HVsn3!ek{* z!n~PNF;hko62gKIXE|2{Sgg!~n6Na-bchO(2uvkllB8oSxt--y3H?{z6`MIYlH5L$ z=+2YfC$StXf*d+eNI-~(2>(yOVgNPjsTzeTcI-brU2NY-{i`wf`R89bTuq?w8ADbv zORg8-pDzqa@hOlHksvFXdCw+eir5rPBg=BSv6)*r#Keh5nlLUdXrV$)+0ml21!^f<(%#n3t72#N`UUxYvW zBLff^9mrpYC9oW-NP2$=+ygz~73f3X7Xgx$7Zx1+lyTj4PgW$>w&IAc= zAp!7;B&dmvKv-HLQcChsP+Np+V6^`udcS@{=ci$^ScWjjW)PABscLOQc~c8Y z8k*?5P6DtmsPF7TLUt~~5>wzu3fkPs1vai8@CXWpt6w0&BPr$73`CHE4rhthp;7Q6 zCF@NR;1`PEfCvNyN0L$xf)`sX+Q|V%D`ywgQ|9x}43m=zS`1v(`s|ixvDFEfDN(kCbqNqR>D@)9;TYpo${M zN)8Vpg&xJ>%FC7hAG$}(o0S+7fh@WwCHVwSd0D(911m_LL8a!-`5#(xz_yDrH0|Su~9EOCzP(+i&gvTJ7U@0YyRdUZnVnP~<3d{NQ z_4v3H4qa*KIS8Tuh6|fn6%>wO8axwIGLV^Dh%^G|r0gQ@!OADNDz0lM&}!sfthBre zWaL%Sxdrg_iGYrt6&^o%i-QNxV&2@95D}E&r5eO0kiM88foZ~1F@vCMCdst%Vvzi8 zGG?+xo7qf2GQ6b7Fq%SOB{gw8B*u+{$Zz8y{0jlpzb9bw|038T$P)ZF!Pjq-AU0MI zqAZ{5H#(Pv2&rF-lMZ2^HB(XwGbAK1S6T|QB*nR`oIeL^7caoNC5s5EmJw(zz?Ri3 zv0$1Efz=Wm*uIUpM{s!GZoD9{ zG1Aq7+IvMB;NS4pSWlY`)-1^wb;(8 z%x~O^HA`1w^^z4>y>tckZrP4g1YM`LlAb5fVsLe3-vQh@dJK=woyViIq^ED(gyxGE z(0ug@ZwWr$-n$2#w{KyrsscS_Wtb6UIa*u6!O9ArPELpj3WAZ24m=zk5f%^tKMxNw zF#M4g9}johXQHb|eIbyZjbwz<{~r(>0T1e&kpOq$pllZ^`v7l@yP_$ z>4>9&H6tsZB#-Am6%kC0t_#Sjs29>}HfxW~-g9zv;|b8WbL!NLYUE*=PFzn`uzgA9!_0?1|ptls7h3{&6!@sXfvXc*rH2Joey zpz9M`r|>g=d?g6^!ep5C_o9pRLkH=Jj?xNr(!J=XzE>8Rgh;<6BxJN9BC8*+(XFtL z?tocjJ9NXEp+k_Q64ijGzJ+)lSwnzDKonSw$9}a0SYM!;{2B5QeRvoD5wD`U@hYZ= zfa(*TCwAdcY%5-8_CqOm5VCQNcpOoM7tytN5?l&}xCR)O58-`5J08UsL${;@*5zGL z3`mE0LJ0y3o8TOujkvM~)D3(=H33vR8P+WH`GbJ*GZ~0q8%S!}@r8i#Yh?=t$-rug zNJEW(Eb9H@Q0o?fa=Sn@xkjSZH42S3!DzG&MuS}hD(z!X>=2IEerU^rL=L*E$K8}3Z^GLgS0HLS; zfEU4)<+d4kzkCvu)`>!8=K?6~SdA-WQQg>i80UAL#)joPFmK*^h)r1tnOUnLA+rh+ z;_I+v{xK|?bqZ4@4q@f0J2-J#3&&5YVe{6{ zLQ_d*(Dn=tT*88)1Wke*o<`-r%QK9?)&DFbmE8X)z#1*vz+SWCY{nrgO3DC>?f?1T zzmxn*+rQKCksSt0qQYWyekXwtkZ222Kba<#AiexB_|dMSy2d23PN~tFu@f84?$COYBcRnB*=;8RkXu_#R!S#oSZJ%Q1MMF_ajRGu2S(!*k&frQp zI3yAQf#C=Y4CA2X?BWSqTPHY>b?IbfNf4z6OI>By>V1HN{(D#wSm`M}hW5M1WZf#m zkY4L+yhfmx4IIohU{Cw(ja6Z5s0<$mBltO)!Nb}JfzH+l_i{q0y90b3tPtelfM|j% zCJ}Tm5+Xu5K(U#Dagm`Mt{6nIZMGy%ab70M3Uhhh72D5Rc5X7=x2*IOloqqao$FE4 z&`6NgjGC4X)RCoH(bR=fl1kdnt7%4hX$|t&5+dDwX#eyDO&xt`ZR^Fr$FFGZ>_G|L z!`j9sbai*}#N8wUKYuop($*4I9~7W)=^XCu-hs0#m*V`Ir36@ep!wh`3|~KjnW{Yf zc6E5$nEMr(=b5bgj2$mA!Ss}+{q@`vfBR!YHRywuJ z^T51}K{S{I(f<v_ z7#qOS#1K}dMzAq6;b0TS`hlk}ffgAzcFqJcu6)}-OOJa^&YwKX;cCO`_1L+2JHgdT zESj?bJO9{>6Wjm9xt+UkaW?@3LDqvaXL0S&VcaChy7!M~OPv7A`2Bl=Y(=Qjw#xJ8 z&{ZIVgaAuVT?2XqC;D2N9HxTknK01Og0rnH7xp^X*B9}Tk#vpJ|IE!fV5KCdaYF}+$YHZ~_}7a^}DRoZPz? z_by$+@!h)#uohq!>F4u@4&l)D?bxw)EiRupj<-*rK_ljM7XlLc;T+us%ZPSZCUnC*tp^H$C6M*ZhH`u} z-p6%7)~^OneCzNmpbf7=yP%cz4Qd%fcoEeB`J^7aPU*#~lrFr_{RsJ#cH9jrfqZ-e zv~qf(K#-*vT}|6Zq4V!$!>h`q`psAq?{jGiY z+C~sr-HL(SO7x|aqAMm3wLS@G@J~X6Zvtuvq&kAq(Gie_3cC>G8GE7KD-oR}4UQ4$ z@?_h|1YJp}vX4W#O$=(B6H)CPk5c;x)Of_AJ31R5lL|1LUWAXC1?VLm)kDU4Z+ZrL z($mpShHrOXD!THMF<6mFpqGcQbs6~4l#Z{}vFOk9MN7ObDueV66Nmb80Vu5!g5vsVc)5N7ZjchVyK_IT z>^X@;TMuLN`rQyBpb-?C1Iei?Atkc`(o#DhBefSRmS4lx?a#1k?QP6kKvw0#)0jQy z2oDPi39P}?DVs5S>Q*eB_a_$3+>E8OH{G~=maVvrFR zg|wIm!~`c}3MsECv`>laL5c~|t^zYu_p`E?BUTuq_tn#jTvWI-=lun?O!ZNj!~oAKw)t=PVO6L$W& z88>cS!n+SIpl!&O$b+7zG?GX@0gJt*# zV1>qdBQ8CRESCtxrc&pP^(8p+KuCNbK~+3r)8jZ;C1xj*B$H4_%t=NHJy`!KvFY&y zS798mq6xN=a#E0zoq?#tMEHhCB_l0Lf7=jWB-ZIjW zTvUMkx*AlpwxO=Gi-!hTh>&@zTKWgk|NRH;??z5}CGx9k_;yKsBNB7+5Sx*O^r8|J z*3==dvKq;G1q6QS1dJ&>rz<_bh_7uJo1BIyvVvK-i3ziinbpbw%frJDj`pswv9g1i zkr7PE`nNUKhJ&FxOjKUMMEM!)^%P;LB@btO*s;oz0%WI=wMo#$+8jDJJvo6a^#Y#UTi4i(as3fRr=pI5#*GF{FIrWY0C@HTdxN1TF5Z(8-R)U@s`hEf(PZxO8 z_3FKn#e+lpaAngUIJS5`jxL&m+kbAvt8+)7aPuq;s<&aJ`kDp`O@c}j_}W?_(cd4* z!J&xq@`E1@=zd_@6NRs52)Q=P#P05E}Knzq=4RPquIn185 zoO`s{Y-eeLt7)P$2!y6#nxGVBkjxjGf!P95m`Q*&lZ>R9WQ@%u-7rm1hyZIcq$d$X zQNI`dc@lxu1PJ`^aa{fju*S35fuazj{y#%hk}tG8Q$m{OI*k~GGW1%E_DgZFV(_$W z?P?s@z7@MRvOKC0(0X#~2JTJJB(YG&g072)3|&63gn(W!ObfdasSqJ zd{B7JA&Wtmvf>-=!7?+{gNwZlyxpAP>EZ}SYik60`yr8_HI?o?11)b?PsD^pA=Ez@ zPBsqQqjlxnC2S?2+O&29Z#S;_11sk*#?FKCCJ~t zjVA;f&#zsBmb^S12}&$9HSq57W4wR*6vi5wurfB`K&ABhHO%R_y`?483C7fvl(;90 zdDtwCjX8W}Bqedk3iS5jiQUn$i5#w3Uu8hWJXA~=aIt41x1f}_47_p(rcw#27>F@I zXI?7?RH-SM9IjYBDpr>&iQdm@PO%Unt4+mb_=VDQ7Q~kD37}^+JPIDRc2Ixw2FG^o z!jYXjIYezybge#U3W2em`irw;F8ShbBzRpDJ) zClr(0@X()mz$&4e-VRLyEw!jBsKi!4J~kh3lJa3u(gKs@5_o1;p}glSn)?0%*@0+l z`GoeGK74NdjPG>=7|5zce^Ld8ldADGz8Zu6IjArSK($3E8XclhYZHz(wSUl>2 zB2et@i+mcdsv?33NYn78su*8uitweb2!o}m7$}IsP+=$rGdwVuVvGI+GxS6opf}b4 z14$NWk2b+iYY9*`P52o%W(mOk%pb^qatH;tw3GvMYJL@4Oz8>O2%Q0u_A6T=5KJ)Zr=&4#m?X^D6 z9=eE)%hz*-B~HptT0{hrJWMD62_b?eYNf;_AWZ-^MO+4=LXr@iBu+8~!U9tW;6?z8 z2@A_HuwtOaLWZOJ{}Zqn{4l^8`};V8CRS|pe+H~k83ir2j^OBG&TJoB0_XQ%#}Y*S z!as|BK5H3dv6g)&`)+nEEL6x^c5O4K&%*Mh%dmht@cj96uyNBm?AW;($4?xD?6Z4# z_2v;isIgouWf)ng!^}<#mX6vmv)6#5w-ExuS$NQnEIc-w&uWuG)U<~ zUya~ZO%Z15Z(&R@t4C0!r|)jh&lZEP@}fNCq$hLl6-#ta;q`b5(A?6BmNtT_rZzMXK(+P_qqYAl%9^^7QBsHe z>NXVBb)dMm6P3-q1Yo_WCabW8tis0TPLx&Ep}x5dJp%)%Z>UFRMk)umP;Ym5S(rfQ z#bew%um=~`6BNvyiG%Z|Sfg?O32Hw6w@be8uEWr}Ttwg6|hR9Sbk(rN$Q)XkX=v2%Tmcl#{Da;m;B=8c$Q~_2uYcj;g zPlO2dXCdaj`rqR*`G1Y&!o66(PUPhtBqj($n%={Tfl4yZIQ4I7>d%X2&LNq}Jy$IJ z$7J8u%^aY1{;`g?rw;7H?oAtbc`1!-p_eXQ6Z*XuOt(^2--ceft(_@04I|YRaMN1%a23n;SHhmGO|^ z>@~grm8>kROiZ}via}Ona4IQyZ zS!Eq>8Jx3xE9S*wv-ns|Dh69DOvqYRI)Qnvme4Jeo;O@l}|GgYt2%y3dN4h8`B!Yf_IR8I92)fvR70$|6kU_%M zW-Kcp7%D79RY?V!Y8uel+)nV+jsEs7d?bVOYkwd9Vl(N!43m7q_s>Jzi`CQGgs%EJ zbl23Phje5;eO6^eJUUZz&={SL0>4z`2A3k)yA457w#Oce&ZEdjOeNoXXgw+cmzc`ycP`&&p3zJz9=+b<5yo}s9A^+UeBGjbdqk?-M* z$}nGaWhY>`CLiA#3h|{j5Bg%*=cu;5i(0$8XmFK9Pq+q# z)9mom(%=4#i}S@`e{M6P39M4CZ9vKG&B!>p5P_RS;kaS~j28R~-G#qHXW3+^FBid^ zg~E8bWGbGoTZZR5wv%PM1=sc;#_2tWv1$o{lEhq$7nlV>u@w-KSP!wOJ1}G35lBwm zi@6I=V8xnCShM~zj-8Oh%GIYKBD|7dXb~38-G+TT&SBs7GdRBQ5?KQeaQ?_OQbfD4 zY|dg#As`YVWj8~TKuTB?lgEz36cJIfOeOf#=qaS!B*di1S|wN!VxBB%682h>hX@(a zjCiId{Fg^*6tG4iYZS0p7;hY}^!|SbtkLcN1S|$uW5%%K6Y$Hwe#O6j`WN?7O{VXg z^v~yy!WCQ8T1-S7zp#2(BvJ&C%LuL(En0*H3+CgG4QsJs<68W=Ya4Ffx{SxNcX_&;R2f!|I(+7!FF_Sca(8Cy5PBNH-P;5{0hS!Lf+L&|5=GWhG)rvv;()~x z+hdYL5S<)G&_!)34QRPZ$Sxt}l*=k+C(s~910jJ`PH7&s+5A{(eFY8D)hMYe=f{$= zQ;6ww;Yw+Kxz0K`=pAU~~)usSQa;L}V&iq*>WW%*#gx0akHC z0~&k#cxZ4Gq8Lz(h6{VYenr>t7u0w4aG**pDB?g>*4WHLgv^VTSwfa;c_sH|WzlvF zL0(*XCUOa~vPn6mkkV!O8_eJ=SDqgJaB*^jlf46M%*|kFqz4OKb?7U-BFp>% zjFq0kR$CtCYR{nm?mlev-@@8J5t;;A>Pkf+3f1c1XtQvoeFfmj?k<7!A&89H<||q4MH6R7sSdKZ64E zTD^D%0~KY0Y%O{Y)SyEk_5Sr64p(*-7I-HwkC#uL(6jE1tLM*i$hv;AkBd7(eObvEKZN=PA~h}vQ9HKK`=?;b3<^2DUuQOJ*^IpLq|d;YC{rG9gvC61kxQL4ajgVMS^1)A|1;SS3Wil7!>1H-fy z=p{73Ag&JP)S5+A!XmsB{yEKvs_a8_eFrkS`cV6cz-sUZ+FHJ#wY(3*^`G%q<2Qo9 zM$`o6quxIsEk5~Zcg#SiRWjQ3qtT`oj2_J}^y)^U-!K*}8X+iG^gyFt7#fWtP)y*J zq~L%kd21wma7MI}J;D|3kfrC1dW#_RIYj^wIY3e=dOX9Cq@<0ISBgl`)IpkwF>)NN zP#NTouCxeztIEKS#yotkPRFOx1Po*cp(nu!9T6sI@>fBdw>*Xd-{5=H2YiW9L5Hs# z${b|T7@&clc1Vli<<1rGzke7p_jV!X!Ybrn{{!)d=fGvvcvvkR2g60G z#!?ZyUrO&;D1oQTXF;AU*;@nw*LLi{)q_W{d*h#2Fn2YCMCW6I@Ip+PwFOdhc0xex z4=h=E7B}u1;QnJ1oI3j!$4FlL^`3V^jF-#Yq0vS?Xto#BK8F8{UrKUkfLYn7ai3o`kR7pZaKoSBIMKNJ4OKKlU zC}%R7KTG%p+w!FMz%VC-MtLAS z&JRJXEJL&(2dwyXvKn&|ky)6|JyDrO1XP7NNX^ScDh=qFMY+hYC`M^rB`O+fP}N+A z;+k@#DP%RE+%J$*bJ$lCs|-_bEN%;AgeW5-z@7K5pb;!=>ShsX7*N=FgMVJo~jabl;mLe;U$dTKP7n# zOO5An)PD^FrTZ|YZ9ivySQ#tPpdbrX#fPxaQ{-^v>u3ZkJtbJ`zJrOz8NLGHf#f`-l^f-449eW-5!h~la) zR5yJ@Yxj3Fwhf`axfeCHZ3J1Z=*|qTP=L~sLV~&sgm}BbK~EE!Pai<` z_+gw_z8JfvNn_vasW`rT9xm@#hx;dXa=h?!gN8 z4MJR096=KSdvZFGk}{B+UC6yxr6pCgU4){d3i|F+{=a0=|C5(r!e$>3W)j|S!$v#@ab46Iwc2z$3|!k*0=v3Bu7 z?y@{N5c7U<|fiKY7G+d6a38Rv^&QAh^;{R^nkmw$gyLnK9osrRP;w zLlyd3>ICSP2=etJuyTj1BlUG}Pa3HGkV8gBFx`Kadu6I`424&3@ZqfzY|X8Ch|tm6 zp0`#e7SPks!L2Jduw~swY+AJz+g7f^#sv#-V*7SHBp7-`!a{)BZ{9$IfJy7^8>qdO zgNnQyRH-%AVBT9(GC=fUq^Zf1+_lxzco>j*s%~Ds%tM3BbH#v5pU!#y=n>D;V&P2Y z?ecJPLQr4`pDoBj!_0%lyj85{;%4p+>3#X^KUEEpNb0n6Ub39nzu@iG>(B_z1_ig~JpfBzlRMMSvw zig~NHtXzqm>(=4y!GpL)(8;`AH_n{p$?UpnDs+!^IOH-3^6}+i&!~`KKZKtw_-0#iF+7E}q__%=AD_d}fkOFO&) zS`qbl?U|1c!R4^Z>4Po-m4Z(W^djlp)FxQR)xe6t$}OfA+0|drF$~mplA+i44THl! z&{W@x?%IBQuK9$4oObji*P|h*5Owq!9YLjN^D9A(O$sV?B2c3dMlcmYa216i-B^4x zO~qG|UhOC}sRW^2FA}}xi6~R?MS-$6stm(XX%dMli)b{vWueC}4?}*b=y3@_ow+N@ zjO>uFXNF8|17ztN5*XQHAT}I7a+C0Pc_v+GHb7wYr7Qux1Xdk!_Gk{)Lwm3`YTe%; z)8Hx!O|PTX{TVv^USl9e3ty9s(G{kJ?pRa&bY{akD4g64yIY6ge|rx?F0Mn!@x^f8 zCIze2f-qh&nZRm16z7h`yZIBLxk3WE>!w3tsRW)cm4e*nC3v`LIc{uOk9&s?zKFr0JiS9f*G?mLsWDz0mod3 z39;HSOR#y}F5I|q7iW*0$LfWvF?-5PUJZGsqzq zD8Ut*MaUpan3N)$0mv${PhtiB2xJ&sjg(&)0j*J}`p?kdsAr3{yxRMC22TH-6wDpx>&Z?zbszKlM zJq#^Az}86zu3pA)_cnwFi911;t0$}MZp@S3gCoc)it{1yCka4Y3ekI3Jd!eLFiVR= z90{vW6`l}_xbzgHN>hn+up@JSIuOVF_>cQ z$o8jylHS=OreFCnHq(%Kw?<)%g$S8fYczDoLWG6YwJ2br!}2Py1y2)6v9VAC^PKu|Z=}U@lS14a5kn9a7vPJG$Z*6^ z>qCI$?qtWqgDmvN7Jg=s6%!uJfhsm4gnO_$TN+SPPN2$iu+o!IMgUw!Ak@;*gw8Iq z(&>H|lvENxed3kc8#_M{T=ftL51_2Bhkkzt8rum_+diSBg5b2ali;cy6_sS+k~Q4f zJ3x?Di;~h3l+k}%lu!4{#{;Iy?;v;OJnrn28`Td?!`(;&P04{ z3X%!BSdu&QK(G=H`Try?^YBkq9kOWv%^?WOD<`lgFdUV_>J}7}6qGljh$O$bjt1l+ zUUb&V+MRo_PM)}m70Wha*3<3$c+vW#5*~9I#m8`jK5bar+|O(@Pg{@5&|asd{kj3a*_wg=cqf^Tg=4 zvX41n>C>QX!NS&>BM`*^%EQ?Kt~3zXS(@?Lf~;j>K?YyGbnjV6Il$Y424*(%G!0$^ z7GVJ)+#_<~)Hxj4cZkm5MEc6J;_JX~Dh>+Q$;DDzseoGS)a%xlHMfDEu0aMd-o z@n<8S;Hjto6Nm7uykc~A51_5J3ylq}+;hcpuGk6!47fbpeYgjU0Tn9->gDOjUt3w) z@$F6Yn^{SY#nY#AkJZY#b1_v&hyxef-m`HdE*v?+6W!ksXlf}c5@5b2z?6rfx;p(P zR$9RZVL<_8B(d4R(MU~5Amb<-MY(wdQ5hVbO7e?Q$wGZ(5Y^Fdsw}QRDH#+Dpi1(J z__1mNDF#(6jLSTb@noQ+C#BKnrI0?z=e4jpTRPF*+JTRx1BZKi@VUR27u@{u`4dlU z@2smuTV*A>Ne8wN%vD6iqbw*A#U6pEa0^17g$I&U?UAENM)pf5c-%5as%9+839fSO z=o&-XP?OY;_<(ZwdgZ~%D;KuDMet4SfmiAPOhW46klF?3tUky)B;&1X8XPj)VH{Nf zV*)Ds_*z(n7Q;3$A5j@CsA~L%j!vMv9q4KNf&PXee5&n1Pi_N-vKrA7RgSKpe02Kd zqQ)T+we*>F4k;)$ibTFf5K1*dP^lV<%J)I2cX*F}SV1U@?F;ImgD5Z#0}$7B@fIwDQO06tG%!2H~0xZZz&EF%N7_<5o~ zCInwIV)1uwJW!m3AI0(bk{5x_1a~w=*q|rU2%iX|{z}xt_e28>$LnD@(HI@!TFAG3 zjsm+^Xpb<)r%V_8RpN)A9$&l)mAltq@#GTRpPYjK-Tm-7_Xq6u&WGMwal9e0lAZA@ zp3j>Ixdj4{pFJM0N#qwy#>-`5c(P&&9;>;6AfF(0^I%K9%2Vb%nTX$~9vD3$J{Pa=m+_N2f_U(Y& zo2PiI^c+emFL@4@zOgcl%vA`oXt1}{goCR-Ts#P>JoMp4;_9vsXE!~#xEsRL*Bk+% zj);gM>m}A75hRfbA&5?lKvaALqT?eGn-Yt}%rqqDWFx&Gp9Zut8mQ|DpjuGTNY-Et zgDmF3YD8UoE8kz))WDBbx3qBYQ5FGHT45;xRvFTYN(p$#8e=oWNYRCpl87LQq3xvX z0)n(k`pl7Om>tr=IH(_A#BZ|u_kw^ENtvOgw&i;>X0uNGq13s7n(chzBTos zsJH33R~AlkzNaee`aVc`(E&n)EO;pqeaz)&P6rqSS6 z!U2oTBxGe8;z)97KxA;0NrNb>_RhjXY%t}?`_**KG{{yow9~WKgNmkZH1!Olq4Ohu zT}i*UytW;Mr42~SEJb8=I^5iYp{-?xE0-UP0M_({kP)8=k%?junLv7Bj36XgXi!KR z(*>qr%2;7Yj1j_2AsI-~IpX74C~+#J1xTO#CIHECA`qii=;sLpT*46jm2}6y#t?u> za=;QEGm%$pXGK9-cyR5K<=DFJ5A5B#jl&h2Dabrn=Z+lWiO>wP7+h(;Cy0IYfWJ0W zSLNYBUFG*M(bj+&fs3`NF+qtM&#z(t#gf?lJl)`6ZNbBX46K;>Y)tHq@7o($Z}*z?B* z>|C=N2MG?@dW2VxAHzPn{yTJ?*7~}zH8!L^V+==gwwx4g6JR-6n9((BLq*{YebzJj z%)30_>dC!(kbC+RS}H0$>79iH8Dx1nJ9F^TQB&hyEH@`7`1<&cgpvuMmWXSKkj`sqXhUmb zJK9KE>YF)Kl~W%sCOySKiWL^kA;^lQ|IMF3IGK#Xob+sz7L@QMbC`6Hary1jF#i6` zYGSc)VgEnv06zEj;Zs)^zLFjtu4+JMMjnAeCTe{mP~jASN_&5#zPCpBLj#1|HA46U zYh2NMW8Q1mU|W=lDiO;+zW?@M%c&H!9TALI^NkN znea$&fmL`3Onfup5?+j`v}P2R_oJoxGd_0yjbXNuO&##H{u{m*bz(5S6uqH&=<&%! zgJmRIZQ{{v8H;M;aMW4Fq0&4WWhPOmvq(UrWg_YfV^Oagg)T#!cO^4veAR09) z0Vq}QKm$QoEA1`_>f@Zzo9Tt0v@{H%uJ9IWPi{fy&M6pO*$>sd%kgUU6g*ojg!^;G z^uQ_H zy?!6}Z{EYrOV_Yx%TCOZo{nh*AgrzoOB|g-@Wi}UQ%R}Gh%vwthd3!YQ9%jr!4eW6 zOH*K^pePdtQWFSnM#6p*IXp3l8ueI>ZjXiyM?G5;$4?}n`tQ)+{{&e7=j&1L7L)M= zS`&Yrz?a|!~-D_eh7^UMsP$RLL!3@5f_Q1j0~jZvwW9A zq~%eYUx?h&G89%;p`fCYwj1YO*ftX|Qf3rAMHYQ$dy{3;k5qqm-0L zaUIK(szrKXIiiv?5f-0{lw2CjsnfFov!J|=fUXF6m9=R4Cl8Ck6E8GMP}NFp|F<9b z_~QrnUUh!@%t4F66$=?Mz+$0AR?>lavskDwg|0a^JrfBT1d91Z$R)_iC&(%yCB~Gs ze?TbgN!eOj*ul)u2o`#pu+mkAshT{DK0Jll2U(aX-h-|B6S(NUfbqLquuy$S#~+d9 zdI#3Vig4zTr3ZI=J$Tyb!O2VmZdN+nljUx$2QOP1a0uEwZA=j9;|f1V8bHiVcz7_t z$sX}6;XTk7tPWO)zc-gr$ofyvV%{w#EWy2{zM4Z;VNNC*>*;`#*_QFyloTq&Yq4#^Jd}rl6g3`bPg_WUyH}5_v6{MQ&4(-2eyXlJlD+E z#uCw<9z4mNd9Z@W0(Z2b=Po!5@kyzOjZNaS2s26HGY=Mx%V?#NE$nb&_q(hejh<)aaki$(~Idpj)A$k6P`SJ zgFn{o#8m0I1X$A`PQWzbH!_C)_ZWx}P>B;b3H*Bk#A#b*q68$zi$dggK}b!O#7r?d zf4m4LGpM5TrcRQebHpJ<=dh9uEJQe8dKM26PWpK)&%yd*=}N3zuoz3{EWm~ptFUAJ zAH0Yt11y$jb^hpK9u{Ql{K-Fl2-P?8+=Io!gyy>1Jank9@`3KZB`gedVXULYbFOSH z2+#?z*iudmvY1DUd9c`i=Ghv}!AehxM_mOOHwg&@RnaKSrstjh1LnbE9uXTeE7Grb zas1#>9NfJh_io(bp!NE>93I_yfHyDXATRqIx6Ylzk?q?!WSu{B5Rb24#l?R@n(qh# zjWyNz_1Y8Ix!YUA&eRCD1X?b(Rs?K1yyzzLV6g=E(VQzbgYfOkmmIJdR57m=YZ-L0 zTrE!*7r485@Y$y98DI(RY&IdQi^c$|ysDmu3mI6kZI<}nLNL|bM!-$d)jfd0{!i2| z+E7mCvr-Lg;bm4xl(h`7OpGmf%gQw{uzLGO3Ho}*xPIk2wys&ry;lsXnAeJh1X)5m zOL~{Rc@tK8dc00n09_|*nfN+5lA+{JMo$WIal%9`l;>Z5ZFuC`)Tkq$tQHvv1aOr48U?SbCI5!feezkn0Q2nMQl;0rk;rn&L`8Hhf}K;~YMY42$a>hj z=EKT84_k!4-_f7AYI-L+3($u`py{%ijGL9{ka;x$W`+}fvOM6 z=z2@leUTwgdi1dw(q7mgN8S-FMv;J5F}}GM0>L%tF;7Ct8&_mLvOwTTB?Mejg5MJ* z*vZPm>D4pD8S9}rJOJPF)9|Ap1%s&(7)TAp*W4(4Oz}sJpBb8htkD{3hvr~YbOfrQ z$z2weP7hJ(`36;iIw%dbKxvFC>arrxUXp_PtSA&ld7w7I13$e{H-Ws8D&Da~O0q%~ zE+56sjSFyU)?{3qJsEeFh~dT}VVs>l7MEql;K9t_@pSfYP*_1=weWX5TQL#JyXWKO z)`hsWZW*2)IE1sC_F#e7d`Jq+f{gSsOr5y~64O>gde%nl+;u+TJPCpVERlUQm22(V0;7t5R=%lrdOE#Jf3 zN)hIk3e>(Mz*2=P0hSK|R$zz&d;=Wd>S;}43ol<6_y&5yJHV4#Zv=)0^Ey@u8Cgip zEkJU1E|Rix2(${2Q$p4iSz;wMb=)h&z@?Bndu?kw>e@O`-q3^s=4q&HL1}X*s@i)| z(b9#&nkMAa;FwuliI~(JUM#Y(su4w11YR`QXXKY7hX(xm_C7Rp^`eNtjn%wj>kfAO zjK~$7rNT8*!_5?Pj?_p=I3JZc<;4V9MFfy_9R%3jG?=oo z6WzmTY#T&PLpRDQTab}ehTxC{*w}bL?v*lj|9J$nXDq}NW{{EY5Ex7S_2=;%uoz6S z3hfeOMIbeS476V+(|%!06PAGtn=C`;h>jJ86oHlW1ThX*QwX-07i$)M-nvCAv1I0a zf-4!UUa*8Ow#>qVOxW@^%gJD3Wf-oWKEadOnKz3;io!ElD9gXXyBE(mR2itLK<({o zUarA{0L6h|+|ob~`WmV{-^!h!JkZCJhXYxdkhRQf#bysO@Jfh|KzNWp_hywAf z;>q_cg!zadizVSdzI7dMo;~4SHU?H+bZxG7*3>5qVQX$oMvDQ@v0@;_JXs8)?%lY- z6WrNr*5}x9GXn!2J~Sb4VcQI_0{w#!5IkCBcBFP43l}pFR!w~i_d>JWtfJB?zRduu zt+NlMm37>M_4(5`f|(9JbC5|iD=r)qj*#F8K68+T2wC-ZO%3Yzx<+^=`wGe*)N%9L zO&tF7PYzbA=FQ_RE7x%S^l7>ex2fN&BErW9c}Ypgh>1a3R1`8}W04*mjV!kMLS8QQ zt5Vd~)}pbI`esuTT3cJuNjj>F^jSCQvA(`R3=ja0$S_~(<;&+E_%i$r!@NrQ5J6WT z8Ba|FXl2ME12HWn9YqDj$R~ZwUY8V=qPw$)e&-i_CjHpo)rH=+Rt$D^@S>uB53}_N zhw#0p7oQ32K33IXpr`_0imK5OnTi&lX!Q9fpxZ4HpS+XNYZrrBZGRLfx+C$iE#e+K zAmO2-%f=D5)Q& zaiSk3h`vrq7-|@NO8$K2iRHe3|JGus~0Yzwb&qSSN1R9Nl&|w;ecKZC%*LJ9S z=Ym>QFH~##qR})0HD=+cFbPGiMHHPMg>u6XR2l}Oo`9@C!xNzo^$_{U0A*S}Xf=;Q zw@o~T98)pul#W4%RHPDoMO{%x+I=HLUQ|WY9Suah)k3V1AX7L z#9&r5T4TIW6Ks#lKr2)STcI_|5yhScNV0g1B3DJ!1%5znusW(Ej8UKLiiWIE6eI>B zJ<1;qr8(%XD@J2sGTIB{@YC)~k8ti0b#iT8m|2*Rb^a1^myhH6mSs3LR~W}G-abx}@+?zcHkLUf4x2q=O>Ed4quzrW??iqN!Z7v>fT!trq?#7L62e5wH3QQNA zgO!W7V$JH^keRUtE7l*zsY}nWW8Y=0-*OBawjGDmv?W-)Yy)oId<-i~2gF4sAR;7; ztTJ=zkk_zw*%Hi>nu2L!;sk7hB*G+OWQB@j3MoHHK?1Bv;t(Oo5}ZVq=EQ%(eiJ7V zY)v4@ng}6!-)J~+6re`)uts0AV+^v`>;FuK|9=Zu|9O1$oJnIRLukw-h|+6O`kYC> zkH^@5kF3x?kzh}h;7UkH1VYqGPo0L@EV+HrVk}s)1k2W}!kUfiv1r9&Z2ogIuHL>v zKzbb4ZePUn*N-5l@K31*LnWA6sKLTYorGFzHCS4!z}iL$wsuOecTj0$__WSCYk5&q8%Q{b*-iq@F=k(O_QQ){8QND+XKHG|1d>uIkS!7J!8h+#Kv-qNWHvMOo+(VClcU z1B>?$V5xi$wyO7FuXZ1LuP#9U%~g0=D#6K2fdiJM@jI9sD8NKZp66hN610YRSi#d) zA5Ny4aI-WZu(BlBGA78+qeg+lKzjXh8ENi+?G@~qO7)w?k)Xq<<$gO z3_L24lbeG|YD>#1$jWAm-&Uirq=MkA8X384ntL#U0{svl9S$2KT^PStz`NVmaORIS z*fVPy4$PUs>tG#WA;Rrzq59|=EH&T4O7{bSr8b<+^xB*9iByf@(D+ZBY1u^+w6M40F89^E9zd{iFT?kWYd#b<`h|@NMD;82@ zxm!!7&BNLS%dvRIJj|Ig9cvdY!D}c?%b#AdWsLP zUf}uN+qiw{0`6YEh@0on;`#kMP*r%12iLFgmgg^8vIKQ|?rCK?VdgY|vAS0bq*$1c zd9qk2kcA4F2aC-v^dguVEj-F*6S8y6jg1fz6v9J8*7$Nb1pBNG7V}`q%PHW+b9ua1Qsv3- z7tUVb-o`EelwsJnc{85gx<$s<2bgPXQ@=N({_Vk`sxUQ`gH}mK2I@!$*RkK?0}Fj$X9VwX$pLB|SMb_!UDRzo5Twh%f&#H28_bRT~*g z%#&4HUB|suWhLbtuGn@B>0tJ{g7k4a%fBL>_o;sXpL%;R)ZK~U-fn#B@5A382l3ay z0RHNs?ap5ORo9BI#kHt%4?(s8!Gnb-IvhjL;~aqwn=llAa7O5LO(Z|DL6M>#5+2(l z_6doc6C7{r!1AsRg7p01ZyF9yi&(@3)xh7k2mxUw2#qDfERDv7ssZE`b|Npg4RtIm zSk{M*f-ZEVHKIPG2qkv$$k6dcu9h$A%p*}p;8dyQjVd)yR4coo@P!p>-n*hv!w1Dm zPAE`zMu9r**YZHQfgj5Be30|r0hvm6$k%j3hKfCM)Sc068;ySVRQfH6s5bCN#v3bS z%3C7*wi>)o%OU)#3W+ij@9CmS#{>1QfoP75#K+uBd@jgBPjW1}Q)AGZ5rgJTJa4&U$?7uES3Y4_v~A^S5w@DE_hHJ3N2+6!H&m;r-nUcyo3SF0Y%8y)wVzaPZ)>JxU5oNRfC$UrPhmFPAw0&BOd$G)Tcu>aV8>_55(vacTEzU)mr zd2x$}2$=`Vz*q@JCLds8_5l`_DrB{*!rq>gvZD%|oz>vtqQwEr)5jcsfp!E{t_Tcv zBgi5N^n|a!7reaP;o;>5ce?j}!C?rGPexK!E;8x=$SbSjaK(gyRt|OW0_yC=qyR>M zs~NS_*_*nlQw@AZP1hhwY49s=>q8|8D{z@f5S2+Cyt1hW^)wI`R}y#?)gZH=ng-7< zboPIx&!G1-wW4cy7(;*mjamXX7Aoxj_8o(N{mng8Y(^mq2QrTpOMqu^#m-^5TMVvP zq0%a{+DhsgkXJEstt`nsH7gHMamm~g9;t&Bjo^?dgom;uQV-Y}sKHeEIZQu1f%*G~ zuvU2pM~%mD(RvJbJz08v8|u$a!%CC(5oB2!%G02r1S=yYI9qAL*6;)ToJ>gZxFFoy zc4UbiJyis`+H$ebR)&qfCeOiQHLwzb{1HMx#lk=goLJd~5zmA@_gb-mYSeSZ+F(C# z4q2?4dqQ*=N(=K)UssFHu3j{^(S4%-ps}?R^(2+`t;k@rkt(Z^TUv^;sw&hsvAnB1 zPore0Lku)?Wi{g*c*JEwvM)kr8QYUAkP^H{cc?LUAeg&9&aF->$T zBv~^1B+@10*i1p{tN*6{|BE1`CrUwLyadMmYXXGE3S-`sIhZFg3)3b~!5ookn8VgH z6qdmpacae7c&^r(h0CyE*($7Cyc`==tj31rtFUY?D=fMk$9C`HiS7)vSTWI85AWmI zo!dN2c=7mA?zLh|t+4znA14PGsjETdjT}C_eg#7@kVVi5$($*~DM!QI!L`n87-pOMBOi|u1G3t6JOuDUjdEEYC= zc>6wXU%iRDS8w3ug9ms|5XfdSzL9;*uZ>-!?46rXfAF=4f@SfV>SkL_n-sH+1zTL*;wQ)DzMmh=!k z2ZaPn%zMR(iZbA0;X(#mZ1y1YY_TvhFX2GY#mY1^H?{K|ECyGs_7$sl6;6Gg%^qZ7 zL2D~}C@Orw)hjo7h)_#QA9rrv!IjgeasSdKJiL09=UeF#2)bKa!`IOf-t3vCHa0L2 zxrvF$Pfq5bRa;ny#>xuRR#u^w0E^Xtf!u9tK!L zU-5D93&GY$?$H_^`hq?(R9l-{(L#E#j?D}vJzHH_gTnkGRFWQJo~sV(?`(fP>E3=a zKtF%{h%bZv`1Y|MKM1Ut_v-J#L45D-!B;XsKi4&(JCh(FDVHx|+#8gPZudxZI)tFc z$PIPIUMNu~y>&|yX-}OpfdjS*hy$g>DXzCj@JG}w1}qlEyg$2k@C z46+EQ8npb;pz4io!%%dQlqk$ zIR@Xu3(@8fg=`f&M9S*G_QYf8@4O82gLmM0UJilR-ofY68|1&U#h^z#zQ$#tKOqSn zG12HEgQPn#2CZ>ns0{H$uD1bvH32xhguYLGw?t#l#bnG%#pS*xAr_NyCsbjc$ z`4p6&-Gr$;4dPdJUS+TKjJTtnpCTDg%X` zEAVvdMm*Vd2xr&r!D7+*keNK40BH&4&s&Gt3pZlk(rx&2{{`$ia31To9KeGoZ)kuI zCri45Iz$zc6B6O=?gmZO54d>xIM$J3UodkjU*K7c75$qeL|`S(Jy_xdSR)=RN$$ZC zVPQhSk^Czm!O0x31SXFsSYn>55f2rECI(ca9x5hmfn~PPG7Ay18Ha+C|4E7;H;I7h z{|;FH(>|Vm#k^M&g&|JDax=#K^b00ZD@>oy65ZJ%)9iX!h)_aOihHppOcsEcloSEz zGOSp?7OOY?fjvj|W5+%Qq)V~u;Gej9`yw7bzYTfCCs6$G49aRRp{Dr?n%Z*E(|Zk5 zf-PHXRRUHO*xA2_t=)S9Ep@oM>BG~@6kcBDBvu4ewggl(0Qxw?+t-bbxpBbq^7ln> zWE8^U5)qM*jF^-R9x7y>pp1fIUKO2%+>)qGXKw2XmVebmkkx|Hx@OdL^rO1-BPu%v zP}ehzdXh>S1dD3gc;a<)_h$mB0hHEsps1n+Wz}t{s_R5G9jj>UKs_mtu1{YHo(8%1 zip?No;lS?CU(oiEloUZ0^Imlhf99aoMwTH9D^`=zVZs2dvY9@U2IPV&0yw&ERtGCB zJ0C$|k?`~lfDc*lUf%w2b@hV1og)o0hOpIFfvxsyn5#U2wVEt!)gQs0#KYh@JdK{g zLHi*cy91*Sw_&dJ6n16`ur+-LdkbZF+UdgCTn(PKdWiJ3Bfz4;&RQQ%CfdBnsK28n z_v|%!g248d-WhzlTKiVlZ24T>SYUPz6PrUB6p-mVS^^l?WJNr0CN5@W*&vZ8qi zk!_dcXCWpm80pE0q+sjNOZTaX;I^D#x3RU0tjIbP5D=&477|!i5D1r{rjaby+H&OP zXP~e!12r`zsI6tyt(ixwmz!GzAZp(d@AAyuHut2ke*$DN*W|8 z8`@A$&sRlrFa5X8yl84oOCPETtSTG23Dmn$+t`CTlIq${u_>Kaxo zB^@+<9%fISiB-WN5s-oK7%@!v7wMb{k{qz+ zO3cJ;0;@&Rb2wbFa3Kp3&Ldb`K4&rZY}!FEwHm7zF5wF(Z(6+$OXtkPss$r;uMY0q zhTE6U<0Zk>)4R8D_3SB}Jh-1^AMRYaga_BJa_^P1r8(X`e}=bDWg$;s!|Exqrms!)0(4?QwM-3hw<35Z!6-pW0r@3)30J-ZIJ7SuY?|LsiIY|Cc{>T0Ot?WOp%Faky8K<7;pgncCCtkU=`k@V$;d=rQX=w_lh8zf#SEUB z+FAmv2GrHp({?=p7YpOJqOH9P9i61lNV>@o?d|)B{=v@}Bw>)nLWSSH{NP?J22gD+ z9RyajC}gt=*(_`7-!%kUJ)|G|dj`-$I&Xl$jd`#>_Vi$=r;E=V{L)8IHPDAI-QD=u z+=4zbvO39VX-~;UZ(7h(;@4GF$!$aF2rE$$DOld)8S_2tvO(@E1MOJDxD)LGHW;Y|z zCl&F|(a3gG3kpCv>ADg>50nObqAE53 z%~=r`tjolYo)UcRD!|{v75M9O8NO5d4P6-g!E%EKFi7(K`yl@M(nDa|hVT6?1XisC zx9#{o(1E|deZ)^Q_ngDLgJ-bp=t-ta_D^=h z?8X|{-dO{SD+{1?OdRibkAuRtUm&;PSG?LNfHynm;OVxdc=YFHT-dZ3E2LR5KS|7= zJ|7F`EytX>%dm3o7Cd_P8miicP*B$37HwHY6-rCXkU~~LkgpF{CQ1r&IC6L|7A=|$ zX=yQtiwZ)RKuKI!)}Rob%e-97tHl6I1d|DzCedqF?tz2Tm`VQ#Tilt!)qg|P_;C!- z2&_oPjU72pZ~_Yr5=i}f3?`5WvSfDxEa8d&y$+(7s00LsL@<%TmE;smoiz)yNFgs= zwiK(@t-&9gH&DA8%hoQ#_PyJ2_1;z7e|{G)6`tUo%1gXgeS!BXvQSlh0z(6NSeuhl zu>JsR+xM`rQh=4UBKKsuxoE@PMVG8xf&qJNIM|Ry;$#dj4=ecjk@5&|BhV$l3h+b# z4d8yEA@Cwg%_k%TK~d2NV}oxrS%Ps1yv9`qflEGFibb_-(cNa0k!4oh)`O&d_M*0}4-IVTmL7s38X&9K8hH%_1+@f1Z26Y@cAk{YJP|GZU(nb?5Jr&J zJ^USm-$Cs+o{QB!IDr0dUorI8ck~RCA|lJJn_#T9_Y)5lRnzyB(B~G{x6$=>uEW}Z_L~u4nbQ3=Q-T|TmW$~Jc+q_gbTUGin-#om zjA5^@4nKQygn8H#eCQEq=^)D23E^IL2=TN>96?pMwy_>k4_ zVQB?N6LY>`EgR(ZbPQo`W`nSh82I~!!PkpGAT*w=;8difcyzYi64Jp_~ebRT+AQPoC(RYOpk zMR4Jc`!{8=dEGWFp0gB-W-r5nS&Ip@=3}PJJV=m{Ht9F&k7Gq4KAHNU$P7qKoC2XS zqL3DxhB;C!ziI}gg{EM-s0?OH&Vb}(c1!|GW-sKje8Cb9REy`#r|req`o|_LC(v5H zU?CZa%dvjtGMqhm6c6v;#+57Qaq;q595}QW+jnfmMS`xo1YGwBXiW5Vp{4Wz@(&*4 z^}{E4_3$z5Z3v!hZJ?#42_0Q+XlSTG;q4o!D1RVewIe7Fgqy1iEG@`*^7KGdR0O;| zJ>l!?3nvD+1h*FE*08a0fEz0^>fsA7cha-=uFxmAQc+ZalDraLKaew&0uS6 z0#_>=_`108S%Sf&JABA+U;yk-`_f{gkroq$`z?*Zy7%b+n zEh@todjHp=N_@|+z)(^yhU0V4<{N`@dta2;`k>T?pvgKA={j!6)bT*Rksq@4y-{Qt zj2z1VglpO(*(MMbQ3a?@s6cC88#?O-QCi$YAX|f+EV|z@Imk&YKypwryiEw~32Nd@ zeUNNO2Bfwta<$x0q~VUd_YTNVutu7?Gm_LDk+1KCVv-Cc8UB%)k>xdalS& zwMMFn1&WPb(c~C{HkUB8x`d*^B>=g`w(x(U2-|B9V0HB#OwV73`K6n1qvzM_xjaIZ zRO#AnQQ_f@?wB}y$;`yZ^fYv*C8Ikl9qs8UXh@1hLs|k_a%g{P9zHfyV6c(yQCkfL zTdN6L8}K&))R&K~_%zstPXtZ<-@c&t%P_hI`|Rv~x<8u_ayg;$jV^n)TLVeJE z6gr(lg5GZU$}OiKBn6fIlkj%OB)r-#iicaK;P&=;IQ0i98W|BRl$nCXv*(Z^n~Fcy zuEo`>m!WT{hv3K%BxNKcyC4S{S**%#II`&nCq{-tMd3AB6nn8|-Ez#BHyz?r#ULa` zP$W1RLc#N2}~3rC=;V|39Lxi_DD{bFh6e$TUYScaa_i+kRR>)ulJL%?a|NV zLZBr`;3de4kp8dVAVe)obe}wC0Y{gO!aUoSjwS;lgITslv@x86I|O@OILrfzN>U8F5wws(X(egw>O&37m!RV{%>-5rt*CBpM}1p2?dwKk_YeWvHv+V8Xzu@u zAPQVMKYru+U2Oy17@!CIK9fJ@8$AADIQna=(G1MSs{{gz1iqKPg1v|sHu+Wl)j=~Mt=st%%0TgRp z%@yE4kY%m&6pqFOu)5Ent8@$2`Y+&a^&U2Q&j}*r;AKhH69Jd2i4r_5H4#DJ8bYAt zW}=G#2Ma{`I3v)7 zYLu3hppXWDtn3Wrx~#AjrQ7DF@y_S$Ym_?OyP}9^Tqe=yDm6V{YrUolJ0`&+2Dt~_%o14MK z!vhgynLD|;A|NCTKEa`I_4b85L9Vue2@K7wNGCYJ+S-92#s+H2noyK~2Zfi41XcHN z?aWm?zxxcY?mvUv{YOxG`V{X-G!+zJsiOluf;vq_1t=3>>C<&MSy(_{T@~8O?_p`k z>ciQL+B@11u<7%KuoVdq_io*allu-pn+Dl}!@_`-pcq-w#+4v9knXK7L7FxD9hPPY^YcbrXb|GVLy(=8LZHElptho+ zrJYY2X(FI%@9skf3;T6+^F`HLSiUH2w{`WRv!@R|q}Lc|4GexJV~LDNGBO*;;B0H@ zK?mv8%93i7=anHlISVD(g=ipfY$RA}tE$7t_AY$xroq3r8$Z}OghT!K+S5sJ(1Kwm z1X=^66F$|{;xp;%!IClpi#)U@q+%eu0Np9ssEbTMb$9}5!je!Mk%A(h2o!inpgtx8 z-RUJ5Dq`=eM_YC&dWviC7a6|a=^6(}w+?jlp@noggUBj2yOV|ZlhTnM9*+bvp3*$S zkmDYPT!$d!Tl=HP!Ve|pekeB~s3Y*oHSU@IV>rt_g}3q>MANgA z<6(;$mUx~NhVFty3|D8PzcLd&HM!_+C`MO(33{6=>Hf9i+dvlv+MChUSVzCBoxrLW zjg>UUkuGYkYQ~qoFX(P1-9q=ij6k}P?r&RXCptPi=>HoeK>mz@KDsZxBePfs`aYwd z=R2qmWAk$&!5?$oi4JcN;smZN%cG^DuqhG>A_VhnTcD1cWAI z-1xB=KVd8;Po4+?mR}_#01*+vkyem}-zE`YP5ckQVj;qjysJ^K(SL#vgR2Q+1PH1K zeh92s<@br>gbBFBcqMoSPh;4E&Hu>Q-^bIwkx(M@ezEOQz+wsZ4Bog*WVNmYAUu`; zkd99#Kw~0A`@{qZ5QUjS7sj};;~**~F*1u#S_ac*&&J{vD=>HALP$wTK}uR0^A|0` z<{jH`_|$P+x^o@pZe7G}*;{z{f~?Fp_n@i$ij>D&*qAB6+T=CC!*c?x7ce2fvaozj z%IqCH990Rb2()ZJz{5tF26!z}B3f{<(}aVa7Mxs-NI6@;!OaqOuGR!tF7OTUg@1Se z!BsFqX<(1fNJnN#F)6Pa6x7zExUm&goqeby#Zc2TgmSXJYTA2IL!GX+wF}KX18D2} z$aAx5+qzIk!nPS`)f0HtwRdxA=?L_uW}a!5Iq);5uan+i{|klj69 z$s(qKC%_$gy6>UK&{N2w>^n zI*^!a%Mv`iAy|43E8V9AU@r-xR1o5#4R338xSG>-j)ambU|3SKVo7d zkxrIaT6#8e$cpE)2+3+LEh|L@K~q6tA??rQOWssfRq>?Tyu3W5q^2Mxg}^L_B?6bD zl-0b-$s@&`is&E$Xc{(wo!O=)d%R_obKH?J6kwRdVTUdsK)J*sWM<66N5n07$NX;+ez+7C@ zgS@hC6jt@3jNq!YzMuAWqoSc7^{pRia2rHzT{kK#TTxlofFc?gLVd!Z`#}d+Ph7#q z#Tzh-E&41pm3ywl1g4Ol5a)fKEvqFsQ3AjJdpySdG7(dSr(^NdMOY}k05e(fQ35RH zfu2ZuKzyKMblzf2laRr>rK_=O;WEsUBG{U@2wT=}#I`>+WBbNUICl6T z_Uzt)wd+@5+n?LWU|NUO8`fd(;RCpE?J~|?z67~fukq^13tT>N0djYq;px=}FwruE zi>o^{b@d3U-b3;I2YgUd$2(;eYSjq5OyTL{i=eP@f-QU4*gL|P{?CAr5IB2yAs{lE z45SDIM#jK`pg~Q?0QyGeFfz7)fxaoU)OGRZr2+@6*Rrn(I!@r|uEThA{XQPwxQ&O` zu2OpgN-tl)TvrFCY~f-8EA0>Op{}F|Edncj4K;#iO}=!Gt%Vta{e9r(>;QGPq>r`+ z?Cq@SyXd_phI|5ro2wJ)Br7;L*z#kJ)cVtV!a{@L;!MB6+z?hYK-yax&}X?JB_R?i zNim3xi9kkX8V9W8v@|57W$;u6XJ0>f1ci|CmjU01ctoV+pn?oHwvs~x9ZyV6gNLU( z4E1ziYiY{Et_QYnBcM7*zwH(d?bt=|y%RUiU8Z{=2bDJ;;ACYFT{UeOYU;vF&j=dt zRbZ&417{l>__@0w*vI=H@j@a2TzPR3irHkKqEZf5%>+@^4b2>~>It|iNncjfHt<}@ z76Pp{f-bh7NjvGoj?Mw}_IyG&0Zc6!hxIkBXlv?1eIX1&4DOzA~2tWD;@fWrD_zBMQdEbMtSY*{f1$Oy3vn^Yh>VOvVKMza zy7v{Vkab}R>Kj_o)AJFX^gp%{fc6nwb(69G)2bu8vF_j&96Y}ZkDs50rs`F)RIb5A z{}LRP55wZ=23X6kh3A{i@O!@*vBtYlAAA#C(T~tfuvPDM6}jd|;r(JYw9ih%`xCQp z_fHwJc4uJM3K>XF`~_3Rg>dx1Zalnu9rCZp8q!vUxs548X+Vijk0Wa&6D7rYaCNZZ z37v-8>bQ07B37?m2ASE?5SbzjQ3(+aSQA;;?;p5gGLpS>(6dJ+onWub4V0BsF6 z&H5hQzsImJc|}(0TbNqNlTv#D8*1I`KXAbEA`oyVFmSg~fscz0Je~F6?4Uyyo*o>W zjA8F=1_yUrID0w5!`~f#1X$q-F-XixLq<_P3Tw!sq5-|6u@Qy!jU2Md+h{=R>_>I? zAe#F=5h#5^+rTgZ5vx=5kszvv0IQq8s-M4ZU{FT;SX)n>m;qJ`y{CQXJDLZ+p{bvM zYWQ#Tf2Y3v4e0&+4LzTR(Eqs~L*EB6_@$5fd>@8Bf5x}(e-mKQxkChG!+)WrZy5C~ zR7lDqB`Y5p`NfEei-(WDAKVEZ3=MT)Wo1HEv;iqxH5louKue8aRQDxpOyp@0c}kY+ za~Nwsff2!$gT-4oS}4$Kx~~Q=;6{LIXZo7XeFYbSFGo6VPupI0AK>q#O)#ZFqD%MK zfS|<&5&j+ovQ}`hHAO5L6Dcu4@bj>Nhl>fd)(9kP-`mas5u~h>!-EkU?1i{64rKrsL}6te4N@Jv5`8Jl-D@DQs_!C*ZbwaX4-H&{Xz3V0BTIg- zZX&>{MR9Hk0c13+^epi5_EQ|*v=1w0EXAU!3$Q?jz)5lzq=lyvFiAp!M2J9Z;;)k+ z%4P}*%3zM@3@n;57jwjCK!kau$4taD5lKi1h(TIV9E+yUKL4ol|G!~a9nTgJC_bp6^r=eh4fn5ikZDKj&rkW$J`F>TYPNyE&{ zXeV);*l`>%Gcz+Yvn-1xOEOcE>s?!YdfpG~w^3VLTQh52|C!lq9(!mGFFf-U&p!1e zue|UApU#`d58p1}=kLB}{=AP_{nv69fBzdleDpPIeqF}e-@j~wH`!9yICpnu__C$aH~1O^6k(epA_MBOJOByrkBU?(X}>TfoFp^-!; zqzH6{;BwA`6HYGd-*<%ldk(X0;||s?U(ePxTUokr37^gVgkQe-iNAjR%`v~;uit;i z$|Z|Ayl*#WP8@T9l(PWUITvRR?%%`iUEBU?9TIpsEBh{8bSF^ClAHYJ2L!~#lkC%Xyl29}Ve}R<%u}~?4fdT&bdV4#%?T;PWgZnurfpLGbGn2{6N_Di} zmXs-zK5C;5JZP*+6)dz*$bAqpY1cdOFon1$bd!?&M)%*P^L|6KSJ9|?3RfP2OL3g=Iqi4PtWyEZt;$lw6|yp?OJ-9Rdm-k(J64J$t%Jre!5G* zuD7n4VU>=-CV@TK9%yPq9-=60xjzMZI^t~}IKW-_oL!2`j*mIN^*vnIzQVpA?_uZHGdT3)T{!)54}p7Mq{8n9 z+9DRw9Q+H~h(FPU{YkMvmd}Rg+4S*LetBsOKfQSepT2w-Pv1A0ryjbC58r;3&p&#P zg+DLA&BIlKpHd0xX)~%Qt|}r!Dv|QWRCs_NhxY7n0M@&6-{9T{?_$bKfuSh^QUX!a zrcGhy%;`*>I@tkRlR#HH{#^9lC!7R;&pMS0G7bO`K?@bkwECaTY~>F!uuRZJX`!;U%>h_dLhe;+JyjhBhqVc0bqGYM z%w4oerES(J^9G6%6SCLZl{{75G|6_8)*}DCfR>m_z)LSMW>f&Fdq^N_2=m|#y01&! z5y-L)4hdw5QW_D}biIqgYc~3a>A}AJyVx)0>2Xe!vj=BR9p%WuO`JKo(ed2QIh$lPMuhVo6AN~ zsKC=+2W(w(-OYKY9r#{!BJk>YfmaVlKC2i} z=U$iG@$tGyq5x}3awK8lUPOfZ5EUHArE_lbAIZ59p=2hTpgi?X5?7VzRDytnS7HXSY9qPGI;Hp#~e^=`B z(|q^JBP{voW!8T6CTqWbjZHti%f2OF;Jk4m=XWdF?l;Kiw&Q*J0Kp!oiS)iGWle(X ziK9}^+z9s4LoQ*P|sx)Z09&bYgKuzAx~wr<(Rp@WAxdgM6nZXS3EP{j*i#Yv#|@%H1Q z$0af|vgG`WBAEP%j8Dd204qwC%JehJ;PK-pPP1qC0lD^GHm}>ts=wE;@cUo+cK&z# z_~noM@YN6evG7+G|MrXc?C)9h`!8%*x0)k|_HkSU@Ui2E@VI!MbLU;yDS&nK#38PF zd*Uf|>XZPI=cS8;1P624`80>+UQeDl$-X^%admN(dv+txKadcCBITZAV`8OlcuSoW z;o%n`=bT~p&aLd(vyF4-PD}Yuq@=uv%-k#~%NeBQWD9IJ5|x~SlbbtNgG2EOjle%L zmW+ZjVg#^CDjLboEhi)-j->buB0^$tK6(n*6D~L%IKq!#ea%l_f63SLKjx+9o{)I+ z6TkfU9UIoH6G3_z_p=_HJmSpe_1jqT$1;blS+RiwyN_`0w7b;(%lKRkBrGtBsL(j# z;{^Uh%qeeI*SCuJlx3?xLAzdjU55_kE$b#JugbA< z>Q#>Xbjm!PxdMh6saYh%CXg&*qfml*i-?VO@ohR0x^|tG9-{(SdTa)b0D(pTOKHh6 z%YGeQvOOTcFw$ce;L>7gX%dHO7vSg>II#$Hbjx-(BUU?u0)#_Wfvnzs#IbT+#OqLB z4+H(Z^!9a&zwKx6`Y=61eOUXu=o#s!e^~YnicjvhF(Bf1cvwJeq=)O*`nYkepBp26 zTp#LTP>%QYb~;QscSz11>9sN=7aKkPTWE1!?8U^s=zO0JLzAm{}v1_t^&1RMR zmiv|OzcFBAuuDaswT=OM0|VAZfz4{Hsxk&mO^kH5V{NOUN87?kw*XbQnPIz0z{)D0 zwQ{{*_Vo*_^>r{jpy&FqGR=@YAB{k(Q7dq0p;P`dqd>2rMT=c)rM0q+rt%g{N;hGH z2)CLB`OjL($t}XiHw0hba1!FP@birl5wG+-d;)8tr-m5J6{U^sv?*&CUv~)C6R9!?_L%9^j zN>SLUlo#ha@&T<~v6L@A|CpCwevW${xRaT8h~t|gkTPWwGiFR<`t&JGm@t-6qizwH z86^NUj;Tt%D}k%20#noe*QQStXDQ|wmo-rtgY^##jUV@KYm>6Aq-2;T$EGo1%w)#i zCXgh6H9-Jtw36-lHl?*n=@R@0%oMm9b*nOA^BBi9{+)_vvRq#|M}aHH6gy(d)Hsur z&)zzg83J>+Po3$Qi%`jfH9?fH(nUC0t~YVwWRx@wGX%88%lb4~-X+`j-*+D`zw{E{ zeDe)&zWo-n@4kZ%#UXt08He|;erco;7bHYcoQBk z&Mzf~`1E*Dz=)~fM_!c#zq&dKYU?O!Xre;ZL5(P|Dp4n;tsOMzI%yK}Z&0Q$ zk;15q(Q44jK3Q*8-Y_)SW&c0NT20*oSG@vc1CG`!eeV!zx~?fLSl6&f zMeppf&^;)slUt!*rWnWB_6uQpg>udO)9;js740R8agN|7YHdUB0N$GzrPn3 z1gH)l-Y?L3jFV1BaB@DzWeEcA?&kz91&q$^babVjbKUH)iyk{=-xkiD-Q-xFIKEPp z^?JfWowy>fdgSomqRcl-KsYM!w4bY<`^D+*lk@k<_89_vJ*Bc;At)$-u&^+E1uQOH z@*pTYn1s}LLZX8R7l4h3j1&bQgx8gSlzl>6B*n$q~|J2&IJYa{3PZ(-%PAF=tDFIf8N8~pUflPrAuNtS;s zfc52Tto`OycKrGQ$5wsEDFH0!jlXkl*9v@3?IF_h6u}pq@pE$~$n%_(HCHK@=Loub zNg&IIh(LeGVEU}oWXF^|nd!+AXro9J*viVzlE5fHSR?^8CzphjR0_)~sTZ|a;Yc}A zPlvW$AWAL2S!o66z#tCTXcM^V>~ypm7$jII2`5#pTIy?B$;&Jv)F+H%+Yj;W`=9aD zT@Ufhy^rwx!%y@0Jr6qut=~8EZtf6BnmJDD={WH*0$R$Og0DRCEDz7ThspmP&0P}( zxNg6f2WQ>GV|UHrnFk)@xd)%%jtSEpkoENakMi+qj^Iv|8uV0(XlKBfbvQ{AH$3=Yj+-rOzz_nKbyW5GgBACvz zXZvoB3KX0^af;pBcCl~QK297tA$42InE;gXhGukFBxy;hvWz0oHvnbcy5PVNDeEqT z2zV$N@s%8KN=EcVS$cW-bNJ8+)~?+*0Q>hIWaydz6uk~p1abGUMFg^(-)oTT=*0|J zd-VcdCiy;#m;ybWT<uabJE zjO3&uoSm-Xe9{YVuLN#7aOg*_oLj<`qYH3Z_XcN|zrdM4o@D38Gg$H3tt=C`TJpv% zEP3-bR=jsB8|II}eeI*9xy+-|>uYL#zM{eJYnlQVkaPA!yf#0}zJ>R*?6bRA_~9Hr zd;NYs5y!mp&mV9-d6?+%AW|j3lvWp0-71d0p^U7eECH+(BIVfWlSf2RtzyA9pYz6> zFEi)i`Wiyol z@B%*yU`e%eD+6Mcfw9vkPL=yn=1rW=6xlcK zf4dr|PMyvqxz??>j$)kL-|ctY$^8$^;fco{cL3I#Z@tBR58Tg_&ppkjUw_WGzkbg* zzkJ0XfB(#${p&?hZpP`@20YGec%-|YY_&-42QdXI`DbR{(OG9lq!1cY8CFv5@E$UuUkf(VKUCNd?Kl>BURE6N12 zssylVDQ;|{thJp=aU3-|6LtDdG(CfKbPv?91+oHNla0?UEBx`cgD-_5H12)7cT80G~yKgLC3h_a{%7}fh*r5 zcnDm%DbMoqG5iA0%l?b_21*4}T7<58bJ5EW9|;_O0#kvpv4jdp1SUk|8y!J_1ckVy zbb`a8Id{PWPp`|e9Y=uxR8eUz@re=m_<0Z&<4=B}IPju0a^-gw7G=sg1&-WRm30kt z7>oi@b(G8h(xg&TQdLd5IOGgRD_0f8%AjD`&Mzr*ykVH1lSNWg7)fD)B!>DsrnvEU zli;y!wZPS8j&EGTpP#pu{Zo@_k; z^W;(Ecxuk0yztmF+&%Sn9=t;Vs(T&Xh0n`(Jb33la_u{L?%_vyUe0^_iD&ubh1YrI z?uU6_0B-l7X?pFeqq zt8Nzw6}XBB2*u~(6?`vVB`GqVlxTtGI04tR95Ry9@bL+h@*VETF{WhkPf5=vE-r=G zm?S(dTw&Q#fsKWW*}QQ(YgcY?wAB2y=x=^n@C!c*g#5j93EQ@8W$l{PELyaPjhi-b z;NX6a9zQCeb(o!dcHyMF9q8)9c3D4i@)+KJQtmx3bLp}OweVQZoN;5%o#c}BI3Dv)t@ux+&Jxao_)uTa^U0%!lk}smzEGMU=+W3#>8ec-4!venl;TsnG@I5P5tl^-5=ka4_IC|tX`}YZiuGzuX&4)OC z)SY8TF0g;kSy{itWzQJAE+@!$NL}_$AS^0_u(&i5vkGYuQK>d|Q(mufj2%=e~zyRx0ABj^eT!5mBX%G!#lcuSO9{1^Fcu78KDUzSgAG(j!8` zW-?;ds_E2eFsoH`)YoBYYolLpptH3VV^b3YMgbnP1#?Raohp^UmWWTK%g$tG#4cc^ zv{u;!z>Kop*#$iUS6%;f3HHe`yNH$kegUy-B6NrP&~=&7DPv%73}PJap+#UtD^O+a z)nOAr=@xLhexsY-K@&YgCVH|hD3$ivW}COs;NRQZ2vs+$Q6j1rN320yn`Ekbk*%IE*<$Aw~cS%w)}Zcet#dkKb$N!jcxBwVd*Qk^6T?A^UI6><>#0F zhdBbq4H+oXhz>Q%3;d7!)}v({}mt;8!y-ScP_$zpl+fZA0yY9%!CP( z7&lHCQ>Jtcih7?!Jbu z4>Y~kXtQ?HX1B>_x&^v=F!%S-+24(&+kjnENB>Z}sH9eITyGP=QgNeS4gCffuy8{v z>b2fxt`D_vZLkHsx{#doAd=!l!G>KXJ2Qs7yc7}=qbMxSqg-i;D=VW~oKtzN1fTj; znl)K8wr5b=l0u8DH>lD?fyPr*89_q&C6cqQQcxa7PDzj`Raq7V5gzA8NbChXd`@t| z%ZcDvUjm~8MNJ3cD?oKAI2;fEP%Z>T;478&YFr|okud~kW#X5df_G$ssP8ncipqAE zVCe4Si+4z{1d0?2D~jbivWScoz>NtYJ68fnS*BPz<(0B5&m}E8RluqOy~Q9)W#lApTb>Aa7VJCv@z zjqF{vfL{fycK!JozrFW3KZ;QL`{Sntu%2S=moKqn;av9q@i9kNek*YG3uiVj!eifR zf?W;};eLYPbI0&KbIdW|-T(X2-xzWt^9{@|L8Re19;B%B0s(@_fazQc|fY zucSl*OqMvcl%!0810%`HEyZN&rL4S`s`?r#8p^3_Q-&9G&}P=ss8^%1n9)e!&`OZd zcMIfO`e@U1(b(EaVMz;7k(pfa@M8HNiyhh3pL^sn$DD!BKKdAs+;boID`OyM-p-xV zrgMkbgLmD{6C!eyX}abJc-=W|mO~)jF||a~_uM`+4?Z`A*rM`^-yx{QPTt@c0Y-{=w%QUb%^lKmEz4FTTP1 zFTBbta?ZAmo4I=V3R{%PmpAVeQFIBHLnk@8=P({8&l2GkNSc(Ll<+u#;~)`ESBUWo zBUQG`G7G55E2X-yoESMLPJqiXB~f%D#YGilW#*$~&{w7?QkLu2ZQ-MN^Z9$pO15v? z zkF49U73b6EIql*hAnDBBea@UZeFbM{FPxox#Qbn|36=jQ1-J8Y96I2O%jqa0!b`d0 zm&h590K9_YiBB&jHno_{!g^|3%~Us=XwmhcHVufMu}c}&QPpIiUCOml1gKu%t6gKE zO>LB=37fr-!J!*i#V2-m_XzCtGcq(R{%y!H7HUA5&{^ravsy3-MA=#eG+NcP7M0SF zQ-Ha?32Q?u?ZxHTn^X*G1nOjer9ih`_V>4T&{@+=cY})ST7fM2&b~GcLk0`i>^%(G z1ju@O>Fw^KPeg3Dn5ox_^;$ocYke37?Xup5O<>AsS7GXEr%#~D(x<`FtD#%Ks^_`_ zQbw$UdO8OTSO%0N(ptxMm+bGmCXh6!qto6hkk-b~kdXoLV_<@ zW8<1Ehi+H|%#7HT&-Jxq>r>M|Y@nw{g+Wt+p`!$=z8phqE~=VDj4c_M+cMBLCSh(* zrN@{@r#cHwLn<}}vP@N2R7KdeRT4k6^ce(5+Z#o&cREsCbZIoG%ByHCt)#82mi&xD zva?I2On9>BcV+gO021O0*s=Yj)SF#g@`%OhL;wrEUMDQ$!%hBYf8+A5d7NGHG|mE7 z2S1v^&Ns)f^X-YOeRUj5UL3<8FHhv#r*7q|C;!8D&;1|%o;!*Yf80aJo);*){26tA zpVQ#~Ir;8$i97KI=hi;KiqB^7^{Z3(UI1&u;x9S4btRiuEMe!S^#lfpa}@`#09Iyc zHtB^K{gwuBJ>Dn3}3&XCKF;@uQeJ zX{>;hGH_ggXTlh!P8`GRX_L8U<}~gQz?wFGj03PHj~(miHk>_enmEJh0#yQ1j&V}| znDTbtBxUT?t>YPY%Q(k!>@5n!37|Q;|CF&+;~ZUoZ~o8Cj+QH>J5d?SHE!$#X3e&FI=dZr zr}dmYwnEhI8ZMpLz&Ps&Mv*UjI@#p3hP@bZd8#`QBRSQ8bRMf zqmoS2HX?vEM62~W&DLww8waR24bt9?fLK4pEf(2UnwypLh8-|gZ?K{5>qGPunl3vU zt63_w1%pM@*NiWYQErDP|Q zkP<;!W)uZQiIkTo3v7s0WRR7UAh4K^(b6u^mFwt6EG^3>uP~MT!Zc#zL*#RDl*qn< z;tUZoc@&pslA0JyN|YDL5dzj%oVen$O8{#tr*{2?+x}&2Su~HoK6{3}f6e2Ux9?%0 z*k2#aVfiPIvF*p#IQZ8`oLKcWr`CPXnN7cNe%BJN9$t^X(=Gx|?_=1A9L=yg9q=vlLzm)lZWq9-VVHvCm))_ zlaD;ay?4wOQ8ZTk%VZu?;OT*fWuMg5S$E35DGr-0z%^d{h_e3ZoJSnzy!gxuy!PU& zJoV_4JoeBdy!!l$%zy7gK7Z$Z-g)AA=05%$tH1q)ZND#L`F9KX=Isyp=#6*y?EQJH zU%Qrl`}PWGZNl~NNqL4B*u8Qi$F}Yz&L@QAfJhSk!^sygQ7qmklnjBHoXA9SqLXRJ zFDE}y{%0v~dD+E8N5m5%ke`v30$T0jo7@HuCr1 zt5~vl8SB?^2~W$ zFZysv?$IYK9&h;%y+h(S?cvMDJxAGl+!Ys3f5(2mh$KAyBk>T(a`Osizteek3qT%_ z>+Tfr+3RwSOCm@T1RTOMbMXj^#5XaSD^W27CZ!UZmO)5rD(Mvhj!G($>OATiQmC(s zr@l6cy6R+_>$2%+E~Blkl=6aX@%(=6kcwF$~_=)pu*|DD;dyfzx z^>OFEll<`Wa`xUTJsdq8jf+bXzc1O(+AU7v zJJUq~yYT(58*n-oETC0|`&9wWfHd+-J7_m5X=TL6S*{C=Ab@g%c0(^_dp})$*BBUv zp=&TWe1o38VeDOf0ua3n3t;sMYz>O=RJ!r54-ErBMgjc|0wH3BLD3{#@TR-G343WhP4QV+@+ugtY@w^T4xNBQS7jsD1oE!wEL<13 zyRLK_3e*h`4KO$|h^@a1wM8!?#*BHO6WfRdb!RiWZh;YjAG^SgS;Vk?Ku_<88LQGl z)vcyaKx}ZNlb!)1{bIucV+t${T{AjbpazC@0#asrx?1VBHFIr5g!6TQtP#B|joiS< z(1?Zsv0>Tn7GN`21*$Bq49N9{`UUKCmGlTOUF)tBa4JOK5JP86Ec&`oRF&RXRbdQw zrqHQMMAw*%y(5oaT@kwabUJF%88p^2Vs4gwO(NU{?rh52m?pYZa(_BK%_2_DN%_3& z86@?mkX<{SM1btz(#1#-A?|$h<#M)dcIB{u)puWQl)8I?o4gNvf!nH=IQ8Rw9RF-O zN8X>vzPHD*{q+f~d3gehpP#^QFHGgzrzbLR&VTvf!JGKviJSQQol&^1c!1cWFH!0B zF%1FpDfWDyG?#aATmLwJeK>(npB=-eFU{urd7{+5n#U@Erc39}l98T7N@fxT)rABn zgprh&LQ!QQF-jm04`ScWt$gy~T%LGboZG!hs)8BZF?%|*XHDgHvDvpz6Cj!FfU7A2 zR5Pb2;H0!n3494;&73q|6z62_m@%7*K-NUDNwTiwqZ-GAQ36Z?L5|j} zF#=tqlmY1iX5;>8)tWR`0jn{LRcy2ZPGYjI>{D#~*njqq9;HCmt=uN2EFCRb%0PIf zi%^uU;*4iapXGRa@aF&CBG(w_fGcGT*R(0qa$)&~1mGVYLQr%Rq47%4&lSL`AR(uSH0AZ-hIVqQn@KJax^L7_W9Xw?(?hdu zn2!D%RB3xDk|0;!(MOf8Uyct@E3j2(>Z8Rr=&(B3Zt5ID(?5)5WCXoHmcF+KbDy2g zUXv)gN;HNd%mPkkV-hw~yi}wFy0mdpX=1UqDi%R!t3R3=PgE6`M14fkYf8ap$e^QD zprbGpwYrE_RXHUBB(1s@ifhUQ^7ABES5ncILuy$t*)FIyy;jGZUCyj(2K4QN{Jd7S|G-mPc4>HgQ?G4#-N+&m>2HDYr0D z04q-5DxRvUBuYwSDJYDlTAV|3i~Nr2G>VF2$zaUDE$CH^ALT;Wit}Bkh!Wb$l zrLM_mlA=9Hi@8Wf>;=NE9^}ID)wmv9B7pTP2iJVTitnD6!2bqY7rxBr&yM5wxwo_U z{X1Ck>3!__^<|E&_!!r%-{Z0SSKN0j<4snLC~T$7v`Y|DNr2KxfYSF~!_ae`4$BaAtu~6v)#Me_ zk`R}~)(vZT_l=i$_1PyKQ*h0kIF>ng%;JS79_6{m9+rKxdF0+ZdGf*gc;vpjxLbV9 zG&yI2)b)GrxSOXRe~Jh1ox=n7-0y&_yJp?t7;^CBBab_V9Nd4`eZ2q9dp!NvQ@r)c z8@&F~Ydrbr6FmRS^L+8i=lt@`4}9_NJm$ao9>353hBZGgV%fJp^TUUq@tcxO{LOdx z>C10expXP()~@3C{sUZbcIDz>XIB642iyNzO_Zk}X#y|d?p}^%UStxnR|3frh)MJg zCG`AN$`b{sV+B~Vim8x#nx9ojdP+8F$yuZb$Y*8bIw0%H6<I)~FmU(N|QT@FveD>4P&*bF>HNbEb~$u1{%PFxfr9hQW@1bLsBbjNY;=ro)z z2XVy18~2D2P@-z1wX2I}i&>z$ z8jU`WuC5}wx{I)wW!+lFKz9RnQ#B?z-fPp+Z8g$qG*ew(C-F$2KEH}>JC5-64~y7) z^ppTm5PvM&#LvI4WB*Z4cI>&pSKn=9-9~r3d`sE0H3lDXyp?WRY6I$9bp+dgd>`@{j_TB zbQpUuT6<~JSmbxu>FOQ9YU`s{#KVvPmNIAIVBesaGESsE^IWrnIDmrI70pgf_7NdsY=Se(^N=$I+cyf+;SCM!#5$Nd*iG zaP?F)(Ouihb#oUZ0$MhW_W!kLH8wYhNY&CQ5MmuR(_(I+>izb#hgE&;3_9sR== zx@2iIG-J1^xpvLYjbR&u-3GaTg8-9BgtHENcPoQ~8g2+|^;nzeG7IR)_Vr%9z_u42D*$W`V4(b_T7D0#LOKiP-MfXVTjq2c7Bkc0`~q zcgI-oNxwD}OOtF@MWC-w#L|+1rYe#4ibQ%k%H-TifwFRj%_?r#4D@LQ99x>WCS|Kf zEk4{goE=+_;_BkZzCEri{COQtC%n1n9?PGu zdexwBdD_JjQR;md6M`*#97&J!t$H7X*Q z^xRZ};=_nak0q--m-zHJ;uE6@4)hl1@+a@U^(s$4^(gn>e{8IFXaOfR&PqO986eZW+a`H!EYg{+X&rY0*+3 z>)$yNXH1zUj{H6zeefa2Sghw?c!9Uxd5ZvyKy$zO-+aN6m4C5(?NU~+UCib!f3tn- zUu@g3knL-KVE^WY9NV#o6MGips-zZhUeDPRs~zbEV#8dBi*&`$>zIJ7hbY-_fjEKzG<04^Jpk&y>$KR1sWA@HXdT8V zs;$k|MX{3EzrCFneLD@RYMR>eQK>~`bVOs&1!L^+!KU@4N9Qkq^^`SL$$e*&R30u)Jdz@H zk^oX9@nr#&YEmiJXOQ2TAp4U^EDt29B8YNbI>iEQnF3$=ZK>p`awySPQL3*ePhC$+ zLp5>LwZsT`#y98$sM_%>ZXvF%i-<-up$$f&+H8b18Awsv$P@LNtLrAM#YnEIliX$_ z8P%;6H7d_otXUa*R7_P}4z=|e)YmHtJ&u~HNJ@$VDJcx3r74MycDb*D0CKXfQdt&8 zL7p!esh;FyUL`NbhkUV$@-T|>eMyWuLt3m0nXzYx@ZOEv(Zw9w_6<(kzhUF=FR}Ei zhd8+EZB~8#AYZ;Pishf)&azKuvhuUp99;S`=eB)@%a+eLweeE{tuMKB@HhOOmJ{H- z8vj%4CHU_m%xw=>j&CB&<1nEfN4a`(7g3i_5g+JAjNdt;eccGXe3l?j7oz+w5vxpg zEufW^q(D|0Vg4b62Sx~BWs{RuNSVN=&ZOkO>Yz}9ZKKwJMgokfe^3;3KUxVgMggo& z0efBNpg?(_K)JwIdl$J`r9_8?vv1o*=DqncFFZDfmmYtRxi3A(+!vnVxjFapD{NCBzfBRG(z4vyWdhlKzz3)DrdH4yQ7Q1`K9Xv4m9_A=186J9E&byl@ z<~%CrJ;pl%RdZi`i^uMJn73Yhodxs1=Hs~^@#zPj^7rpc_*J0g!`J5W%NGmSwQ_^_ z#nmkQ^eg5*`w|OK zkSKzJ!X1FMd*5LWA9rTsmYsb5&G#($X%Sz4|2tnUSi~QHZ(_-+t?WK{ik)K1*6-p^ z+1_*XEJs~Dad!9S?BzgC%5v|?^XzeQww}Q}|3jEWH@J`AlD6<4V z`9H%8s)-PbE~+I>;5k>ye`;+z(Pi}{)U=RPuM%q`M@hM$@1)#lqgmjh!QMmrz%XqB z8KvqD@iTVu9V4{bmAOnSX;o)nv*%;6=Ah|FqSKT`ue}gMM;c~b0e#)gBGejWe>>ea zo2*;Ws?8+Iy&V!zbaKDSqU9TTW$x!JUb&m2r~Fy5?v%jRL0m6nbMbN+pUmIOi?6I^ z-TE*AvqXU}XYt8bh)U|<#Mu#+CMp{=vCHc z**Dxh$PL*yZ0(~@(*s9aHz#>Xv1o3XIOx2$Zn)d{Q7W@iIE|bK$04}T>xu9 z!?i)J09FItB34KGRdRl_d{?>bD-+g1}V~_O>Ea zrKuRI^BC#u5MUFq>=3{+>A`A8+o&ZpAeGCW(HuN*fhB+L;{4eNg8cJXv%-mOn=W#} zHBA7^mCrxg%uT`D-r?fnhdH}oCeHK6bL^wh?0;_zn_s()Rj-Wa_h%=v;K?a`^@IS{ zowthJ%m?@1%)58q#1BvXA1m>@7V zNdd0OVpEttUTLiw#}omZ>EkCmk_b+`ZM?%KjS~2}ZJZ;uf&x!UDu{ng0jqx?YP18c zlssJj?pK!o?wd7ZhGQb(n1CgNCb{?9FrR;}p*T}pO$WvZXq zGj8X;JMR^Rd?ydycR#PZ^b+sB`z~+3^(ODldyg-^`-0#8_=z>^{$ky_KluBvuUWhN z8xHSUjMKiq9ht_Jgqc^*ZznF)g|dPWv~4+bbyj20mQq`jEBf%Lpq*Q7JN}37DC?+CTz@wy1mQ4hvRpXslMo>mA(FHA{N;_$^3{ljq zrl_`=MvV@wO}?itpVsy~box{*rYKRPL83e_(q(X`Pt4rxES2OWeQFnmw9fRak7I8+ zj=J(7s>&1STRi2r2cXkLQ(YTINmUGOhH^Tr&D6A4QQTNcaa*A{_hjNK0!eF#q{fy{ zetRMbl|dA0Qm8cNIQ9o-Uv!*PYRI5mn@O4gRzh_Yc~UWRbyehQ>d4Wykl7)Dp{1Re z1~mcYZCuW8BBb6TK-Gn3u^R6hBUfwmc$T(vrKFiuT_3sTVWMiaq%@hyR|^nGuqjdL zscKhK-YEZ3eKAd~#b^y>n9U{fEOO~Iq|;F8FTwMwDDo`&?AfyIM^kkGn$}1vi!YO% zdWO6VS4wg{WOzPVuE%P>9>tQ&o!Jo`ZIghyv>1i@3Q{a$Jo8>1$Hle zmhWC4#m7%bU4QLnmdqQ+mLKlq=*nkt-uOC4R=>pIRWEUN#|L3ZK04Jo8_BiLc&xna^K-fp;ExkXP@&n|B|3 zh_?i=p1ErlPv1R@*B*U<7azKxC+@n7$7c(K-8si$FF*7YFFp7q&)oeG&))k8FV1xSJC^;tm@nsk%EB+cXZ7z(W#89q6~Nl? zr+}5f*uJ$J`17mp`01lhIkIsJ2W5NdmkaRRdxBu63!GfD11GWg3w{)bO8Jy!l zB2&rnk0$L(2=z&Mq+bms=A0J|Qs=VcGvzrZ3rweyt)wr=D|58kBY-s| zur(}op1F5o>jjBMHzVbruTG+<+BxD$J~O2AQoYfvY! z)FD2siC(J!R$mi?-8E8wbLBI+4EB}?fRzhel`}M0C%;`Iplp`&jf`{|uxsj3S4)f& zK)Plu$JUrkn+VLFrdUQaiP#&0vDf?4*A|AgDT4O$AlgcUBzDAM6tSwQjHO$ZE62o# zW=EkZO{ZVPy|JnQYhyKZcQR-+U=XHH_~G$rTND5g#r#q>#I95!RpXl6|w>#*4ZTX#*H=&;$7mDVgd zK4B~~1b(KA%@p{VEr4~$BOoPkqbz3%V9gjmg(=D;%}V|%$5c1UVDzyL!1~t| zs8Zl+)NKMxx835{{&)Ys>;FR5)G1RODHz6#9*dI7VaBwX4zT*~|J>|otx{U5CXAiv z81O!0(li0ZnGU;a)}0Q(df@JRcy!J~JpSmT%$YNXM;?2Gx8}aZ$De=1{I5RZt8eD< z;}0LRY_T}%wLh_M)2|%Zv=FDgOC)F>ATPz6_J%YET(vZ0pjH)%5^55#Qeo_DrcF~z zb4LSODfBHGaawKdv>VLm%r=MVmHbo^xOAQ3z&fRJTb04X12k#7Xwu0(aRS=zYqXk& zsMifr(%MCKeJAmy9UQw7%bJ51*nZlN4aY9CeD8U7x&(6AGfI{Lj(uwnUu4G_e~w&= z=2A>P=@spih(oULP;z8R(CiXtT#-&wTLvB4SXu>^+FMW4(Q*i5+X1ZY2QfD8!rZWn z?v~x6P}XCtUPEWYc2Oal(A6J7-QY}l;ZYjvuTWOuEr1m!*UzO!T}Yj_iqh6nQYsS3 zY)YWSm_b%sJeh5=WVFVRDo|C?nIqeA1ms*KwK0-Xa~6p;QlVr!x*~w&#suOUGKm%S zl-^NGhPIXHrUv}WstKxUBfQCiZpUa@|?UXH3T;p2&~Z( zCFf>z^pUOZrm&-nl6Es?t!jZy6&=P_wE7x4G=-wdGtp~Pv73@Gw26~Y1u@c-La#YS z9A6ZDopGWPqv$ck$a4z7&?2f$9fGR%GBri6REt#=xKfelLP_RHiqeje9=(fD&n1Li zT1>FpLSioef$Off+3@3?Z2IvoHhn*f<0~J<>%d$19(|9?hu-1xk$1Ur^nI=zn}@ga z=K@q;;dSC`qAvbLYQP5KuCBrV?4N{stR_WZE;(Q~xzWcdOmU$w^_&F%)5HZI7O*-= zUh*Xh(|jq+2%y?pfO!+i9_W4!;U92dBH<=%UE^`1GrH2Xnby5k|0w;w*?vA6}t;Srt5<-~k~vMTUODZ*F2 zBSZpsQf&u$nl6XM)u@RqZOhw4rng(5&& z1-2TjRtj61P+4?z_v^hh4egm&oqp{=D(wGQR!gFwW=lS+dfX7hl`TU(3TdaHN6{ zKR(B+Zyw^{@d7UUweZ^Mg26W8cuAw6M48`Ez)Ae9$09~c z$#Hx*XPm&*7+#(+hF7NlhY#=j4=d(Q#e389BprW^OCj-LL}tVhpO->@RUyHV0qohkorC*#@xwQt z^Yr5nIRNYSnNzrZ#zgLzHQBM8E+92+;%I>q0W4Y0l%)bzGbfH=me`$BCvf-lNwQSt z|5K*55YQ6fnI`Zvb*ysEBmt}`OdmI%iMQUyplLV~(fvf4`Cd>Jg9f0*u z*WYadO1Cn4)GcB+J8aBoWh~Wg|9ec?f6M>wyH!AIG^0k{%Gj~vxbx1tx&8J#967j@ zEiw`*KWPJ%u}_&*b6z=kV~H2f6p2dwKZbhj>X; z_uKEk#Rng~!w)}w&YypL!>>QiW6_WAvt#x5xExr4pZjhJm>v?SLMbnZAU`vZ(!wZF zKV{gvno$c_bQl|?LYM`d1g>-)G-1^C2ie)X<+GTV#tz}5Jm(Hf`7#sGXZ#hYM(S920FNg#6 z6z3HsYO9EHfzBpN9kl{2SqRbE&-e7Vsr3f&forQI82>s5iPOS0}ozbTYnLy(*sM85=?9zz)2 z#xVNKk=V6C7+btB%lYO;Pi!q0vDBTVK4T}93F~P}-$HrJa$3?iq0QZaCV#uA!%e7) zw$Logs?^m~B(I?^b1Q8{2QXBh!dUByzUDkNnMbHdJ&3993c6}f>hjOfBL9O$6^5xb zl#Y5)%xzJ0b|ecdCSlQJV>1+?ZpuW}l!Kw8oGwcfc5^fB&DCgI+p%h`a$JM1RY0q; z4Sib&16I2LrGNc*M^o(fL=#@F_!ovtYc7%g}{?4k8rOv$eI?LX9 zmp@;5l^>pbia%a{h2O;%zxgJAy!i&-zwjKNKlwPHKK2-2Kl_qwOPzahE2($arvk-XSVES^_Sn{zG)|!?pMjZ;7zV)ASv#C6#B=~l3YlW zz)nMaE}G0TRB0ttgr!gtlz=L=7=4ZihLA)wS>+BZjZUK`yBvM94sBxxs(O`(Eu9Dy zEzMG{YHOOPs%~&3pe-z|A~8Liq^w->$}7nbfsi6#lhk4)wo#97Q62HJo@MBfrGbF5 zCV{0+QnYqrm2(7`l4U(!L_t_x2N42WX{KJ1_1#=4uEVFIi7e{?ku7>I32ddv{`d|n z-ldHqF4{;CaEO%ahSaO2{%A>-W6C(i3u(oKSGQ1NA0SKWR9sCnY0d2-gw*6qkgqhF zQ4b7KV=_^uYQxaejkQb4hq_+sN|``OJ~nGQR%@idxHp}hega&fm`uR}S*Z;47NJw; zVA7Riw5ZYAyJ+j|p;CTFWt)ZMf@W53IV*s*oaGxXbM#a;Uwn6(=U&;y(iPF{+E>PB zUtHyjZ!Y5#tmU{%8PC4DneTt|z%R_q=L^nq?<0%ZusxoOK5BmW{SvReyPHirlQ`^L z%BSC)=GAvLv25L0+`VE1!lDR^D-i!YK&z&gypm=Clzv1rpqFL4s#AW?2tD@e0*m64 zMT}}JcFaRV*oKF(bc@L7>ZjM%hf#!xtzOIZHY)>lMyh=h$#?e06p~Br*)XD49i{$= z4|ca0$~K)A$U2VtOfa=4ya-#lmz4cyX$wuJ$SagsS5NBGa_LoR#pkw(-_Z#Gbqlmg z4AJWZlsf6^?G{mPLah_P>Jktf>_czwq}8CMLqN=WZGg@ZWt^l9Q@;)CfQWfZE7o2O zL)T0U^mi~csI+D^G1ym4Uw5he&I0;-^Xco$#@?MGFqTUHP_bAmJv~~wyR?i9nHiF$ zRe)&7sAP9ni{LInBe7#ZQzcf3szB;bUbH||w)}>4^wnYFqr&M|r(&rW@m>%pF(i#4 zO);jLbXszwMXcu{jtX;i0s7J$`c%#0XN~CE2e6t!-P+Gp&lHv~Im#cu9^}hUH#2|U zCU$KJV)Ghbe)#ejH@W>h1Gk?hbME``IDdT`hd%u;+u!>y8{Qqo;+JmWho^4k>nFzY z-o3Z+#_Z7oSfhDv(*Nb#rzdfG>GK5bdYyEa_b9mdAqh?|IX%jAr`yQI3|YyQfd)uBnr_ zUBF5i>^@trp#attfhA=S{9RM!T1o=a(PNl2YBbZ7LFQwX?!hrk6yQ?24JVJ0ZO1_N ze*sJ2Y4mL<$xX+PRmNcbJ0|P@2e5xlY2_L{dK6>E{F6vjc~j8w#-LnttbmlGYf!c) z2+&QCZ3VE#-6qf_uyy;C83K$m9j|`hGy85HnezxwKJg@TL}kyu=XRcY;Yr?_`zrIl zn9Jfnzh&v4U$Xd@kJ!BYTLG+<1bFNtKG=m+fvntAUjd9L8tc+gwdK>IDn`{&>BwJV zAGFbIP*W=^uuYljMigUJtARREkFBB@+w^uZaX4ZsajfkER?VWSnl;^Yn1*Hl5H0#a z8jVAgw1`@5=%k>nkEBXbrf(J8G${OQ5bH5LN3X39=`#bQ~6#*oCoi9TqWb z(;7@wOR!Wg#@?`ue${&HjqA}=uEpMdl%|Tk)K{ILvGEdRH9qn?)2Y`Ni9;@-SXDu} zzM9nfRDucu2rmyJz9F0xRg|dc2vXJ26qr&4vZ4ts@g}Y|lwtv~bX7D_HU4A>Xr&tq zh>(gK(^f)?u9*yz!2wti%_f5CJGok6#;wr6`4SUe&3(AnSUFo|;bc)eK24njHkwsQ=3 zdNE|W#0}e32DNADYja|#dv4M>(=>-@A5|1=q3J8fKMNXT@z2YJnKGD ztev_v^zA9wjfD(!)d^tLU{KeHqSeu5mH^tI5=AXpf$*Ih2-?0DpUo?| zykP~tyEYSk^Z)_-_v5$cFd+xdaAnI8&Me!?kzd!aZr%d6fAuR)f3D;3&r3PAcr80+ zd+X;va$)UGu5LYm=Y~C;U$u*)i`H{^@g`2J+s&EXNAYw%$K_LQTse7;#EX8E_(f6R z5k%fOsjnA1! zS44?d9OiU+7BNLs2c@Gc5Y;V^-;^u+D%$9g`fF*o2&n4OOK{eUh^lK+i;z*?q-~YD zrzNkpo=gF-^mY*mwHiDN>IkTACq``}yv4+oiWYoo)r1IC`3VFm79qkYMy?$u$3vP7 zgbH*;s5=RhGVD{+PLu#sg0YvYm938RlPyEU>3i^!?a)>e$>u>qhXtVaa>S{OGwuz{|JH@~K0U&o!`bXSkjXpq4l-~4LG~SuV*7#1eEj9#tlfHw z(pr-U+8dPC7-($o77!VsS;TCOh&GFDSU}@C=FWZ$a*vk&K@5F;B6Q8-qb=CF6#(d# zI?^uw&%$-{06h&x3^{e^lFKn9mXUo%?sxTJ>Q0AX^+=$2qYGu*#8-GG(Rd<&sHOYK z+3SwhKa~oPFao!qAn!^T76EO2X&nQGF7Y=~|BRg?+>O|6-2yy)=nO_Wv^t3|0$+XI zs6^njnzZQpt!U)gwsaX#^_tKRTQLsEx~Tz6ubRQ@273Ec0$6H>hg9_U)zi~kC16#~ z;7}>O{W&0fLv1{!zzKZ+(Zll zSc95Y+C`A))nxJ?B@c3xvKX5DW%=!;+-hL~ui@iMkCf0sDhsZ;p z6Mc9t+3uf^e`P*_hhD^a^+W9a^I;aeHj(!pnZ)itzu@AY4Xpj;JO29lM-K1b%hf9v ziHVLND<^}Z(n5#DCB)$EbA^*9j+7Nx(H_oTupYQW|*#w zw-TUIx(jDb7gLtg#yYwRXG|O;KsAB8r%iD{)a*%0g20IaQ{$L1UI1*Ia?V)U7D$t0 z)0KG($Bbs``0@XA2i|s@0M95Uj~nl>2?Ai_WP6;z(s(&<;#fKVf2ZG>BsNjjl>zOe zXZi4>mze+Q%PjnEE=RU6 z<+9Uut~hVU-)%23ekVzbzCcA`I1N>aloUo%U!U!0k~MW{1T@+Ntcl^m z=kQOgrpapHY0zhbRl zDvD&Sz`!!>O)Jnr?*t2rbl>BoL)L zNN?LNd7gXek!5$wcG-VOKx!WY9XkZBb_z&sXF$D$5$#T{8}^C=+AVOqgF*Ekx|?^& zu|sm6lR)oYtPT6*+NZEJAC&*@2wmz^*wq4otsZo?cu61-1*wwYCrib;^kEEG6GSma zV`xdB$6SWhP(!=A9-TPsw$>Ipnp){V!gSLz2mL55RxhVf?oqB+JEc~ur1+FSC zMA3Xj0^R8d8qUUG^)EzyDV;j^1T-N<=whm94KG0zR)RLP3}c|&t8Xb0y>RtCbG*LC>EG6Xwwo^QYk{BnN*F9D5>k0%iHj8 z)Dv$R7C~UZO~5Ba{DOabCr){_xRtdM*k;GCK?FpVR)EP)jA4*)O)p;c2K+=oBv^+D z5NLAAuP37w3ed#MJ%tHO_%}#BQ1=Lk4G-WX*^xNzBbDbYf=PP;t<6}Ja>?SsBPG|3dQeJv(KX1KvloO|`*mE$8 z*WTX7d!OvX#l4)PPWin3!5+R`;Do1789NUL^86bM`R>FMZIQKv1#(9^=u4S}?-X1eUn0#sUoT?0M+ZP=Br!jW3~hYGQF#bN6T6R?UG zE0psq>Fus#q_2fRTNC}}X0CS|80qRn-`pg^x>!V9qlk)nH03F1N~7p&O{J$X7G0r_ zKvWPTt+901govN;#4h5xw>kn%rU=l=cy6c)8ElaHn45s5G>c)iz+h{=h(Beb=q@xW z9nDe(_4+}IinXlYaGp2cT*Ar~7umj5%E@aRx#|4RQ*iofEKc7}=IEDG*!|H&w#=Qx z`nRU==X2xu_Muz(=+6K0=G2>b_x784_wL(xRRC-A=MRzY@;zDSzaZ`0hXfvcp7UFt zz-hy?to`u;zI|&Ze|`Qu2bV8k{r8{p*(=ZU)7M|%>~w;_K_>$R7OeyKnR46OVKMefMzZ>{;9~YX&o?OmZX^ojC4RW=vHeYXZ|Ij}p)t z>j16klLeqA$o7P>4zRj&$`paCNz9rsp4%NzCFjXf=`vKtWBt>jHCg~?6jPJ|>`Kao z+iqq2sN4RTBwFB0$$v9p)M$aM(T-QOCkb>pvcW5+WQL!lv`&rVRwYG)z}YyZ+i~Iq zCJ1~@mSajcpfXvsV&ldrNl68=ZXGMv6+oOk!vR%FZY*VDXeCXA(mgqA;&kqxbuahK zxQ|K7oQR{wam%fvm~s1bUU>C+9(?>>QAzjn`a4hZ);mw|+RG2{%XjZ`YX2HMPHg4u z{#BemvWh_WJtPF4A}{F*g_(Y2qXnt~uP1_o}qDl7f~dvg;ekZ)_yFGLOK#5CThm zNYh1<+Zjc4;}zl?d@0l=klq+YLU{;z>O^vN0%HO=;mubGX$v6Em`JoHjnKwSBAY6R zX;H~%jRaMi@e;7|u9RR?H;89l59jN;I8$fEv%QD2Whzc)RpDOJ=CJcctpqCps-u_i z1_ROc8q!(>Hg!6=-#YRuvgy#*iW4iQzVwvlTV1&X#~X;_WD zWj+0xt)d3E)7QS9-quwDQfnF3tmB$)v#f8x-nf#UrnU4mub2Np;7qk$j_+kawVNT; zUJ2~m5aff5e} zOFZexx=3f@1$xqa7|9ZtOSp(RMj$oQjqXfWtYX~-Ui216&{dpBPelPEb>h4mbX=>~ zF;LMepjJt3U<}D8+=x4PiiCY9$q>k@z2HsxSr0M}9H;cud0H;{)9MjSXJ|G<1#R?H zi<+-(r@6LO0zeb$f_iHFQfP9EWhAzNe(xM~=f&xV6`%@ABlomGnqLlP{{m``g`;wg z$Kswzt8*Nx^C?(@D`*dtvLJ!c7+Hros2Y`PHhq^XC0JHr4XLK`LJIXhx!5z?u@~r2 zX9=7ZcSx}4#-#11p~Xz81d8%@Gr27SEj3M~w;M?}^by)5<+rMXV3mjkT`!m98C+`8 z6Ji?TYHKGa^BZul)Z!;T<5H#a;)aGW@deR(0jjoct_XAm3M9pJj^HKNa;xYdT;MC# zdJVre8=kcWqRjnru8|Yj)%dhnND%mnHVhD7wMtt^ zw2lyK9>QPlC0gz`&pb?pzLz#zCx$^arr~N-=j;VKnE{+9IIPaZ!S^Cj&iF z7PRN+()lygl}Tq;Av$XbL)Q%gEov$ns;Fz%i4YwiQ-bK`ecrtN**bn-eU3e+6Zz=p zgFOB2a+YlNVd>f+-u>V(@60{Q=54twUK+;}PjBbLPh4<1Q^U61$vpS+YTo{EJI=0! z96Xl6yC3f5=SAlUh-hHP!9bpS{SSWn>ljypOW3f>ldT6XQPLpb)Gb0*eq&jSffjQ& z+FoVGj6M;^0$H|xjHX`4_^eKAKLZ2Tv5FWr= zf|UVdA68>Gb`j~<2CvcG)g!DPcV!gA%^BE=V+6S5oa!8gS}R1&*+#FH-<(DrXi5WPLM*gP%=g_dEft_a?LU%_;o;%ox6T=w?2- z`@g(9>;LfX?ElM~V(;E@6WhO@L(I`nNphY?;+gmG+xG&e);-RirH`}p+xx|yWy9|u z@ze&Lx&D<>C#2~{CtRyk0moRom7FW=;%nIq9Sm0bz$McpLy-I zS9s)+hj{402e|*u(V@v{CeU@2fVY0@NF{*!(|nN<4U38Tl18t+&iee)P5 z2w07~bsUqFiJ-^IdABL!zNT{5v^%(K>K$^=Gng`ZI^%Cs=0}wKl50Kq@El%!>qUX9 z2YBw4hj@GLQ@rrP-Ms$ty=++dEuLq#30SS=$d=zYwDD&=j;$fu>j){~E+j{tBUJ)P zUUmqTRq0gK6;LMuTW_<|ps`S`wo~2FPfC%Bj50NqZC%u;d#M!XR;{#JiKA1CwY7Jl z7RTA%+CxpXiNZ2HMfCz^>Ot~U1Ekb+5}vCfGOvTEd<`k(otz6Q;kOMJ`1IGKeD>Q( zKKkhd-z~ksqV)lM^urnEe(l7H9kCoZm&N{y++mHQJR z0lTiNfTZRid`i!fs1G5%D~Tvgw5a_=V%2#9SS3U@G?3C^B~8>#P_>myC7pQJ4d7O3 z$F;_eQqT*pF^1hzB*h}4>Ta_g!nsx78b zRZg2KN1p#B^c}94+fSma+b6)}#0}$7fuGF+TRZ4C3LL4{2(YYUz_i@~Q#XtPQM$bh zcWh%wyOj~sPHuGW0jmI)d6xjyR(jPNv9_$?x^2JXcu)HVhAq1pG;9|rT1Ssuvsbl> zo|feTva7hJ-zb2!4qL-A0p2zA3!DvVcVMeuM|a~!fwn^owC%)NyOFMD0pj*8qAK@@ z(>p|;#zi3O0;YNwObxEs)gIUcy6oDE^ccJZ4E-^;hG11E&|@i~RUBn)V<|?1njW(_ z(Yi)drPXu^gbrv8QZDMK&q<;#T~uj7Fsd{UR0&RW<({J_|17pFXRK+bF~^^wGtQl! zq)YT9c+j16o`DQEtVvE75>L=uAb?jKj6OX8OMW5)HI-tG3<}6yZ_;o>f|WhLic0TD zGMz4we#DKUlb+=5cOh-lA!=m1a^GoEmTxD0`Ce-GUPA4efGMF&)Nm`Bs#Y3{YH7}` zqB*vZ+RKS(e6r|pPsHY*i!G>t`U`PX3wRj<3TgMupv@%_`{e@kXH#i88H>%g2wO-M zH5XE;yOf1JR)X23LTZjCFz8dqwSWdXJoBhLpG2EqA)N_Lv_(}?6<&m=euIDrz@LdG94n-GyI;nut~_aRNMUrOmjOH507sBUrXC$mbM+a7DzB zS3{?W0~H>{YQkFk2vhfQv07lPPEVw1kO1);rwSUmDBq{R)zwC`V?C&&hqQhOYPWE< zxJAzECdvU-U4+W9Sj&i`rOQ(w@3MSfxIkC5aS)$I0k<|QDcvLZN*(f+ZKcI5r}H{x z`hF@j0!2M)+Iyuu81m?_meV!RLZ`igRuO7Coi_u0-VF6!aR64g-d6&18s<(Ba^_41 zuF1JQEi|-Mq0;NI2$-nkdoTKB@Y@RUS6i;Ibh8&vzq6bdK3L1912O!)E|{mD+r+$2 zT{w88nqL=1@W4Zx_~L6%Trae-a$N*3yta;?7M;WOVkLjC_Tz=uH?nqf7*_+E_+!O+ zUVCp9D>iy^)FpwpKUprel*>w2Zx0m9Z!B)mqqYs8w)cr|);Z>NHQEQ!>h1EIYyu#| z^vE+Y8hQksxt!ZC!)yy z`wV4kq)r}*Mt?AZ($#JhtZ_r{nuNwZg~au)G+s!guRwrZ?rFq2EPyI-C->4Nz}IO} z<|yo;tKUwCxgDKNLzmJe*rye--iWSG%HB0Ss_sVW3?-QRTNu2q#bOh{Qd+Qvjf^+| zOOLf*P4D$~0jwIjdb8>8OJ;a5jjrx=fy#8ctOZ=_ujX1$4I^DmFeLY*q;9ZT3fcjXKv0OL-szO;L0USQ!c~(N`59fE9{a#FZ}V3Vk&qbgJUf z=Y+|6I(_wdXiCyhRb+{Ht)oY-ZPDrJGMsAU#WkZ$K&QH+k{QbANJ^%MX)r z{%#T{zM9OT&nB_&<0)+WU@EKLoW#PXZsV&t|KXE+{)hMO_&?0O^Z)Sn?Kkns12?hz z+j|M!JC`(<&nUREfF!psxU}zGw*2`dzkTr_KY#W#KYj8Fi@u!4o>j{^f9wdmw{79K z-xjj}z+S>agNccWA}uY2jEpoA6XOX94r2H2oqYG*H@x%CTfF$<^E~zRlRP-*e(t(` zmSc`V1+Zp`O_`vyGTqF~sbjc90Be>kXHNQO4AwM(rs-ox3#15iDY>u4D6Lwf9b>a* zO&aR}u*suuWvZO_Pp0>O0BgpS$qt(~X`%zJ6v$Gh*O@kclB4BH0W4)XNtt(0fJ@2d z{_j*d3Sf;MJ(`IEbc&5p5{Zsdl8Fj@2~a6;HCABi)|*Fj+kZwgUP%KndXhkvTu+uW zCe9SFx|6%6+`+5~Gnp}VhS==_fzz2WX%>$@^eE3h_Y99bb{~&Ceiw5;c#e19f11}{ zyN{n1yuq1ctMT*LgX_WN>{{_PoBo)`k7PQ_seu;e5aREMNTL%$I+h<@>)q_+^zh zA1rX@^ItEpbZZnJ{BVr93wCnKC!5mt0V-8CvdhZJEmE5G3(%+%=xFkxz1kUDo0A0o z!>9_EqA6bNXgM)fEW%v*JN?an(^>Tk#?l|KRQ)DuU94g?rj{dA7ayg#*qz#zL`oZT z$f+q3aH)1!d`TveB{3v6#SvI>h06sl0yse=H3gF-a95yCB&#KcjFw1Z8~sS=@F%A` zo~Vvsd}_Q%GNqAjE+(Wlhv=p{@;bZ75eFAsYvDq^4zHSCE>w4Oq(H;*QX|flCPyp4 z-ozpfB$wmUEQ-J0#<`*nu2va|l>Z=EAUCc~f?Q!4`OOV9=voC13u$T3$7GD9q2@3R zWjnC89ivZmkRkP6fux=EcWj_nyH;Rjg}~JYu61seed`5&)^kI*of`sRgPP3@=(cd( zy2oKX?d!3(u4UM~lR<$q9Y)`rEsE$01Ch;tNxgHpk578036Jz3GdeU44uH5O2I!kYYI|Bk@_Jq@PC7r@9 z0A|f_#gujdL+WKrSs@s5W}LvUc8Tnjpry_e}?j7zF1SM=&#nG%B@C~UyHp$LsN7i#TTM!6cucb zs-pE`Djk=_k%yGgekF@K34Ru(%TKJ$IUen~bb5j+F$iQ;oK2v`E1#|?30S97s686T za8MKdK2+T5rX{P1GPxIZoe`^fgk~v=<#k$`481g% zhe&QP5?Izwrg4}=fvUstdAOG}5hnrFUxe3%YAL4@V1qS%jB1&Fsj6wo2MH33OT`=yT zNPf9SV5kT;uR=EL4(9o{SM$~gnmc2)^@Ck0`mp@&3^TQ2n*y+z{_e@@$ zyOihO`i*m5DN^r;#b0&GQoyP2x`0YAMGYO)YpodMzS`9mv;sMO{nxPB2GQ%g1a$f_ zn|kSxV-`~%eF9&d#%?riCh?_a+G;z{WY&>;B9PeCr>Qv>N~-`?&ayKUE|>CuI2z-j zC`wkGqjrlIy%)3T5b#S}bB6je(U@W?sEW)HA*E+X$wc33q|0EXzjuJHo^DKCX4IV> z7?i2b1h5R<8q}6n%zZj6gHp~qtEe>;V;*RtcUXnqT!qD|jGNQbr{vV?mGgSFbdTuh z9&E-Db{mqwr{q>i;{q~zYEs*v2BM%BpO%~`G%e{Ba;_lf~xqa$5 z?w&PCz-p8L%5BV=I9hBBGX$Cxz?wdG?El(m2Vg136abq%=2l1R)jw|+3N$HQgh~pB zsZ#{rrV7X@U6Be*DG5krDX^rpU@4{mmePVXL6!<&DO2T)y6rZBsZop*m{Z_t{P^*X z)~uUvzLlH*Gm2aPJBA5kr!aZKbjFSn_!6j^GJYDfr`^ef+mz&{6S;H7UEDL{ZYJM0 ziLw6?2plb7HD)$ez zA-v9Q;n?byeW%B%l7`%EDCveDd=- zUjOm{Uo3WJ`R=O(CD&1`q%A0}bmUA^sf+1QCDSY*(^T$^q49(S<0G^eucp25FS_bi zW3B!RW7#jFP=2GQ?l-z?e#KJpGaZFLp(|fbOUVYxa<TN#I((^e?Y6i)>Z*H0X6jsx|#)STbD4TSxs--3QX01I3UYf zzf3@Dt)nHYuXU3+HRYv)4FZ2V9IG;dYu4Pg&c3frs+W4P)~iOIMLR6teSdM<*C#p z`_K}9QDDgnbAk(+=-p`Jx1&#x=aYP#-V|4Xt8;WlIAIJqf;r*{{b?=?=el8xKTb#V zaco(h7}GCN8|h9{axj*%Y`UsT>8Yt>phnbor5dVDn4(LG*>H-MQ{i;F#!mQS*DS7wHe+_r zr~YIdgVBxjMm135mQ2m%3<1mrbjghr2jx;6S%jh5NM~C&P1QPD8qJuLw}J(lvg%AE z)S1aQju2gM;$(aQVJ&77%>7bETXCyU;V-ZguJ6Z3K6jy18AoU$qP>qm5oSsYmeN9% z(De^sU2U`wDuCo#+Q#{EH6dClb@jTBmVjSFaJs zscG&N(CT9A-cX)+p>85 zoqfFb$srCq<#E=diuvE3#WML={l zmxBtp;FpF*o@KiLRlR^#omjIxzYc+?W|csb-cEPlh(Kbm0G6yPW3OyOXgdUMbat){ zA&@mlhX9tc-P<`N<=9BwkfbV+%jp{ zA4JOX)6^diN9UJI+9`i3Lo&E706w7ap+{$BpsSDWUS*C&3msOSJa-GWAsc#|fU2dP z?tU|!eOelfWz-pq=^SpMO8}}vUy9jUPoFYZ;egaB0W)*2iLMcgY-KPoUVMP8H zdv_*AV;o)96t4B>3tSa3(p`x-8E(k=Hu+wQ*}(PdBiJoQv`r#p)D2jhiX|2#qA7{M zUY96hJPB)6D5g?B1{%WXtx*8WT||!`H`?Pd6nfE^>?ZLfMq)`6&FQ|VbE5>d3Pea& z&{0-^y0QXuyPgj5(~Tk&^oAkw3w3PR>d8$fzL|*Ag6RTRli2(5M0R~Vg^hD3^4CkF z`TC*%%SU(L#0QGq^MCp7sWB{{JA?h-&mmyPT%rzrNc_pq2tWD>Cs)6~nx7uytM~8b z#iwTQ$OAKY{;9|LbnZMpc4Qhu`qw7R+hGSOyqN+G51$#G^`dt_*)?uqzMSs&uH2L4s zlsXS{$#>W)7GNy-M%2m=m@5CKIe$6T1zTyZbt5Cwh3o=9>QwoZHxyAQ(2`k`Pi$Ed z(N(d;)JGB~Y9^%2o0w{U(pw@a(k7B16)>(&Ak~sUnkkGJjUQ1OfBfsN5Lo9=L{kjO zntTeZD$=!TVp}@MF!d9!9mF+HjdQM=W4RiR8xKuUz_|V z^|G&SIlTf|*G&89Y1x3j@-O*&+2MOVETOXFJk{(V?mwqt7CMvvwIy}Cn~TKAx9 z-YY>*oSpt0s;VQh9?3{w0ot|eQmisk1BzzN*aoiY0MDE znG=Q~{tDV~XRN8m=*--QCUzT|@LlwzoZ?#M1q{Igsh786k333mqLYA^6P7puuEgWm zGtQ!kKS^!)Nm2U&bXTQgEK0>xT)=RXitEir`b#x*rZmtIT7u25h?c{_yv)G2Txuw*Wf zAYMdYUjkN(KRuoPT!GrOVEdyZ8j!{R;TA$li0E%!7fnmKy2Qcxci%@L4^y zwzT2!dCl3j{K}ER6>4+K zZQl)G-Pe6t{%KFKz9VMO_nck& z21n+<%(j`&F?ZB(CVo1cH=gg$%P$P)?YCd$tvBE1?GYdF{8O(o{QgIH=e_s%{g3$^ zK5>Ky0abQE4zUTb#3tS%GcSuZ8`m;>?re7K*}>cebNO+`kNolHAO6~`C!c(b`-cqT zkq3uyzd&PPuWmeeUtfj~?8V@of~($LxxaUJSqiLrbYqYJX^3Fwfr0%Q)?ev3u5@r$ z`eiAd-Fq{*PcK21(q~IBChPrWsnl30sdx8oeqaqA&`+Kl@c*XJQO2oKhEh=G7wmd( zcmH%Z%D|z@-GRM(D>YYr1y{ZO&4Wtn-LoIPy7#Ah7eQK2fndKO{$;PO0?W*H5ir_nOJ9obxHmW_`@;F)#7``wudI%3GY-JCCr78wtC-gF+d@@! z3#ElF3iIvc7dj=HbCF)^l8Dz!N~MR)x@NK@%1bD93aqr839k@j88{bS&o9d_^4b^6 zc}+5=FDC8blX1Hl@x=x{op6BhbIq(=+vqN%Tz zLQ@lfuHq1lMVqkIY{62#QZAB3G}p{Wm-#)~jM3N&$DvF60(bcoxp=10kUl{Q{y(wQ zZlSh#H)X{q$jduVZea-JbywUM|8DHC{H$cU=)x%Kicy`vk6g()FAwba1i6&J`)Zg9=o{mNk%8 z-5}sCCtHqPR2hcZ5{RnmAX%{jBJD0bbt|dQ7%!OmRlp>e(*A(H>>F(L6KFTi#$B!a z9uoyvztOCqNHv!x{X#lyO6}C2vcGI=n1fpY*6vtIvwjhl>R<4v=hLP3j}C(XOwi?1{f51ArYz^n?>!fD>1^7}Yp~ZW#9aP6&Bo>O9`i6)PM7~q zKqhdr)y%^p5NkH9q0_Y;mu?NZ$|X3p8|n1?Ez#jNbXDsmgS?E#6o#oW3cD^9pE8Yx zLm;6kM%7q~T31bFbs?4V+Zbx{1=Z;o({7@>c@b;mXpR0Bhhz^w(G8j@wCV<{?3r>>sukQ{o}K`PVV0U#BMK3c8GV+Ux5DS$aHz ziRK~;)V1M~b3JYODN^R{LBHiXEeCH>v0x9$lb2IDZ#TL>kB~FvFXFyiOtoOkaXtZC zxL_)^n#L3faN^5Rr#4_rP@@)L5Oc|aVCexeYbF&l}P()3EC5>R33=Ma;*SoL={EHEc*MMHm{WiknTt1drA)sm?KxOx*bLCnt z36z4>9+D21J0*b7a3H5nwl{G_<&br)081y2UH%U7 z7pl$vS}UawmeSu!!PLp(M#79fL6_GLETwrcR*rQ_UaQn#1?${gs5EghtCt+JjY_u@O^XASvz`V^KJI4a*8en|&YRdQ*J*Z#($ONoa?815J1g6-<7UiO` zjeI-yBAd2XGimw_o_^_1KKW)hYqq4Zcy%=If4+$?zT3;{O)31gAcz-VUBsjxj&tfl zy@2c{uf4N~MJund=Wq%ik6g^#pUq+0fpF3!m@TYrqSn-b!SfGV3E))Pf9EbVS$xzt zn*B-X@b2?B-jb z2J+WeeOUP3z5Mp}z04fZg$d8y#q_s&^VgIY*uU@-u521l#O^5sZXU(ozrV_#-wt5) z_~HEY!9c!xsT&`^GLToFyPpr=AHh#EXYlhczwzFCU-HxwuQKBGcNsTv5^FZCL&py@Paw8(?J72I-^{47qj>bOhk5A1`+5A~ z;oLt^;3JS4*1so16kPS{!tlP`c|d6f6nG8l(UpOMq#=C-V1ot-!jwA+mD;PWvQ6pm zuG~fVza8CsDx<>pl=N?>_I|zot-VsxAZ5tfldUfyTf870^o&qcdiG%1Z`09RF5BhYG_mX{w z^&86L_dm{4!=K^NK~Ko{8%lq9&BKES^XUEk`1+HVId^IkHv~)PPcG-;iKSdRzLc{E z=COC(WLEt8F>@!q&WusdGvSj*_-*3r>{>mUOGlOx6S#}S$Ri}goFp?Xko??RloV%5 zRHl-`$soY0A-}kRk{SmYr4}+P9Hf=oNG`RKU)MyTq@qe6b@d%ol=i2LU^WykTf%aNoJPsjOMdR+Zp-8em)$tl_%a<$a~*z zpva`=q z)sP|?Ru=iHLelH9xtbQlwVYtC4LbBcfb#1_BDSq^n&dRjLV3&!MQUiW*HCRW(g1PJ`!P!zg_45T!zhNqwB@muNyLpA|_lrE9LW}M<$>e^;SoR}6^#blV z*3fKPfkUbN(g^NM8}aDZV{KS1%guB+kKoju^N(Eb)JleLNR&*o7>7ma^k0C+SV=`q z0hJYbIE@WB>x!w13q>6vg=KOOP3dPb#cjnDvx%1U{j{d+#~QL8N7!a=Cm*Ku)?PFh zm!ZD6l&1JybY$+w87GLpwUO5BqxdpTqmMa?Chk1O%t)H5B>Jwb##g1Gv(7)zLl&6+i3jj7?rcPk~?Y%HB;78Iejy!Q`Vx}AId)h?1r29 z)Js8SE3!(2?4l;Lg8Y*yr~?WGVdYeuOrY+ppdhRQ)75+$-DJX!@;ErAtijwWF` zok82pIvNh%qCs%xx+s{qSU}Cb7}Up7{1~$a6j638p0ZPk7;lzS6Oc>X{t$| zW0h;zEMT;#+oX_GCL->T0Lo036c||wigIc>lU+q%t(ltwo@=taP^sreqg{aY?{E$0 zD~((i1O;flen9QZs^)Zskq}dhEalu53eV&dPA_KW9-CB z0<0B$KlKc2w`TCetV_K7)(XD){xGXIW-w{md7gf5A#>&har%OmA7-B8g%Jzcup^O8 zyW;uai=~X6x|yx}qqrb>M^uWM8hr<90dS+K6}4WOdBca<-Yobt(WtfJYv~jmwxiQI zXlyjm-gXC{w+&Uj$=__~b9Kly>Z7gN3Dq9#wh+xEs#y67ZXKTEK% z_6COafuzgh(#5A~KbMPXUlgee4^gu1D!PlwXoE9x=BsFy^T1ZZ9xf|@1?u3FM*ZA+uADVZi`JRRP2?z9%uR^o7MUcN(B3F?v(j4B;Ejh)IG3;)?MZXjF64B)RXda?AQZp?c} zaP|6snek>9=6^Pr%|E}&iB;clcKsMm%5v}TZ?Wa4=UMjMFn)TcE8o9-FJC|3owpvn zhgTl!!&|RD&Aact!zW*SBQTo6`yYP8Yp=e~=)(9SbWH`eH3ql6<^G}8Iu%w6j z_VA|%1yI8UO~ZN#q^U3<~Hn}DiEUwZfG;~#cGnewKO086R8QfjTb zb?@n4_UR)q6JYi1-pAie_%E{h&{gU1E|2^55Rl#5n;v)f5M=dXV9!B3(Eon_9fZUB z-Ov5Khcd9+Ao|_ii^1Ld`fKFgc=0jL9A3|rGaEU5croXWE#l(QMV#FGD_d8LVf)Il zY+pWxr9Z#V%rP(V3_5CX>o%IQaw#+2_>K39y_7M|)n2gjLWM@Q>ofSt>Q7)=R zH7d1%%-l+HN_3JL<0<5%B z7h!3QB$V2SFSZkuswPMf5tHvCBFo_i*0vLw{JJ`T@w1Qf$|oy%`t8MxoOVR8wTH*v zUe1&y!OU47%?IN)@#ppsqB0waNh>5RCsQ(wVl=7*%JR>mDmy`A$zD|X8!?t{K%4iw zTny9j6#pO>`2=h^qtK^(j6sqkZxq_Jkyr#^jRLW}TeGPv+Cf$EVM+>4Q&k&9PFXzp z^?Br}iv&^;gcin-(2z+`K`1wJuMtxbL~27Q1=eVyYA!bEJ49iO^gVT%DH; zJ(OFz2(IK`${KQsPnO@IApoB>i1O6s zSV~vWB9OF{OhI2XT7dN}&ib)<8pdHO8-=%K5}n##a8ypkP&P@xG>aDV0zBF|cr|lq zGcBY|)}4*BvD8kd)AomdT~{^*QCjXz;1`5u!T&nK^I z6R2tP<*{bI{|>{3%!zm#76`tUNVF*X*Uyp+={J0;pK#SqljnZMtDc2hu;r?k?b;>s zzU%SnS7WVLu(g6#S#L6L#@w(AkMTUM_E1`^QG%^Vtol?;rb4Rf1)ddIR8<$EZ>+-B zP=Pi(1y#&V+^JDGk}sl--c57%aoTf^VT;*6_K_3 z3OASTA%4|<%66S0f5|Q)znV?<*Yk1yvJ2zXO~kzYGX*oZ(t24U>97L4a&9OIKV2MJMDOJraX4q!iutd@7E_VYrY@M`$fg*Ge%SPsM&N3tvDnRr?~a zUdW+6tQPN;VgXkio*QL!MoTbtI^ExI%OG%5UCAcvz)e#2hM);Ap*7EfIYEFHE5Bcj z53lnd3JQ&673k2qJIHKsayh+%OL=wNGJ1(I`8X><*~Lm7(bi_drJ%oDW#Ce+g+QIp z-^qP_Ob%`}b3}-!!-V+h0qLe^nqDtZL?3g^R1jR^l~HCBp^?7Ntm z^G+LitSo4{1`YLFW z|E>MD{C`cU_?&Tcv}E#6rvS@SjMG&m-`^s@QiiS&)h4^<19Wcz}kbY-~^7s)3~zF;!VFmQ}%UQ@`JIY zUB{duI4@1bSeS;oAPcKXh0S24x=P1?){pGX%FlbS! z{_-kUw~Xb=*759K_#Vq9J;~B>PcZkZ`}ygEeoT3*C!arg4{r>=ih^DQUf&-5{C%ee_7V8>89<+2{pq7L0}8mh3%r!nLm4bonKZi3K!Mi) z|L_d=+||Wj+ofPjX{oa1%LhaDGR5)#q@8V zWAc|z^3!)OGJE3dtXVXQYiIr?2XBM{_{zm zeD4nyYz*Xlcp=I8b)@Cylarq$?;C|)6^X9q66IMtsK{84rf4JD-1+FUrr;@=ialox zru5G-r+-94%DimEBG7n;gYuhg$S|=YNyFZXHg=V^vLeyQ`ZNdY zldLQYuVqV|niDxTF6LXfQD7i6zmX{UJyOeR$*RaBF6|n%b&;6WA*4qvL0`BUU;PR} z%~;H3;{+@du~Z4N>c-Nf67%&opU%lw(hk^{KKH+{$rU1!R`0>GJ;5 zXw@!|rBbu?2kqwNm`kVA)bOhxShn)<^7*FFZki>2O`dsW~VA;W?b9L>$(9fmJGXjYa4TwbV*9Syx(sxx5rh zUM>d7XBr|ep}Kj5mfXv<<(#Ja$`Z^GYXwjH@kXx2d|lozconUJD_iI)YR> zzKxco?Wis;k@XF97G1}ZdV%tw!_+6Qa4DLI?Wf2( ze3i0eSBaXlir}wip!#Jq?peF2`gA@OU;TmU_x-4r9j9phIlM6y^7)L^$5c~)tClvo z&e|oAF-6xv^bxiliV+lw-zWy;ev|Pz9zFlEEKKrah<}&$R-IcLt+9o$9aM zvYgGPV#iH9L6x-M(o%CIo|>Zx0?9IJWuMe-H^@5_i8>&k8Ua^<6qeON1z3yhc=a8W z$~B#vtH$Q-B(2OqNOmpJH6{{`9zyHQ{?Xa5*P948xVa(Fyj)}O*Eodm? zBc;p_s>1~;Keo;Yij?uNlyS0@+N)q=6W7#kKf*3cI$N$2JSlyxw0=-29o|otXgHWv z>A$1!Y`H<;rS=0$c|O{Hn^1#~V-kQUxQnvf=6s2j!xFqiHF(H%wv((jlBrQq>oTKq z8mKeYU~tvYe5YOtYY8&77cd)+)6sfW3hgU`t&;+*5Zc<31qQK_ltG#Wtj#8jRtsvK zT_Dp&b-kCvCu0RwEBRvlN+$lYpW#nW=k@p3GwqikMo+xJGcW(etM6=N+RSTA|22RY zUthtiZ?9$fx(s$6sAJ@Jr+DGzbxipoK+-i{e04QnjXJ=t{iRHpcAPifU&i88FD<4P{xP&k08&ih;hWSSiKcp65V;GFQuXI~T)U!i?~I~s!3jzP zTpg#gsa|%T)E{@Ek$hUeU-G|YM~L5i8hcumToW4fWsP`DE`d)cPNy5aL15F=LR&|N zf4;&7dn4K=4emPZFpMj*qiHd zca+m|yOidZEJ1M+?EGDvz3~w*Y0KAxJNm%;2qAb z{(@7hKj*;G_gO#tC1#Ht#|tJeLkUs24p@IWpET_qwh zhMR({_>5#yax*w~;S|f(t>VwWRxonZH~v0Y_YdmNW5b5`1J4wuiaB}9Og6nBAyxa;KRBv9W_Np)=vszxnEWp(6Arj}Bu zCOWH@xB@MSWmaPI^(5pQ$tiV{U*x7xa8)YHq5_9Ra0XJcbYzy;$f|OYQsE@7$R@aw zj4$8eZz8-L(?~>?lf>e7wjRsot*_Vc(nre}HT@{>j@-)q&;Q2rpKayCX=ixs!*#s; zNpE6Geg}8{Na}8ViXmqj?wXaDDmG!RJxG1&-(+VUqqr)T?5cF4a^gs= z$|Js}kYE8_U~V+wMK?*Vk0LPV7$@U46IOYG?50RUG#5Bma+t7&Yb0A^xF*rg#oXhh zo8!o_rxRQn%$2-gL01Zwi?g|2UB^v>mm8W^&eb$?KqAnUNoo$%HM6O}&Dv}ioAX;) zmE>S?gqDSY)oh73NCDQy)eu=~QCStA;oGLY2s-Lh5 zlsaq+1U7RqR!zZFJz4NI%m18K&}ONbNV{C^Qy7CDwzfYxDN zC}0|oscb48jup5Ye#22STafoVzM7em0Z+zKGDcn_ATv%!U-S){f>Hh^#@ps)v}pgt zC&2O#6JcF}Pd^`H%?!+{g_1dLk_dDc`pWHi&DSO4%n)Ryq1R+#FjitRH_}j7Nkw5k zmih+yEH?B4^a2U$%&8GL(gHBY9YGhdnTG3&XiC~ZOTs$b;VbD(*h)+6CR7*Zp}I6* zp4&`w@+K_7%W#A$HCuad$L_@(djS2dV`vi3W6q6~OgRx_K{mdI2D~+DbXjGBuLAM| z;tAb$m}`sI61{LOdGqDGTe=_HtgRG%AdsE1j?QH#i2Zanf#1wWdn6Kz;Kq}tqpj3S zTd|jRIX5(sRa6BQppBL5G@_iM^Qlx{NXHpcj{9m64f~{!IGZKFsz7rhiM*YGlnI)Y zzFF#{$V*;gEPbU#l;2$n%;AE+mo8}gOCwKqdiNzXD6gB#5 zs@6wlus5}Y5UtDKx&2^PmA`iDvY<+7Iy@nt=S-E_KWxOAA_o_Xyu@faNinsNCIHOP z8pv#Hq|{`h(d9(#H2OQf8@<^W9N~CeSLtk%g4J;zo#r?W$8~|pEvz>AU+f9^TFYqe z(D;WCFxt%ik<-;iufNv+Oh6(_H=p6Jok2YJ=C3^U>H?<-xo&l-p8AHi!T0xf0)s9sVr8mp}XYtD87Jt*G z(jUv|Y@w;?wtsvq1zJi?mQ(((PD=-!Dj$Z(VhZ*Kk^R>>a+jW@WT^xrf-KEn*ZfJn zHh`u>66`KIP2G}n^4wW+791vh$x%w>btXZYIlUTdp^7&7ZTut2+dT3cx&%G{&X-tk z(9z(~V`#NtZq{LN*2-_HqUp8~gS(!3YaJS&0PA)umdN^1mUM>s=iKV!wuXeXYiJuqq9!FbNL1Ii3f0{3ouIp@n&7bn|q!1 zns^E9W6`IFW5`XEATkegRTZkzTK==(-Tz_U+y95(-@1#Luinkdk@s_D&6nIdG@TpU zzh~Q=msvXLK~_wEl6e!JVC;K?`R4V3O!?$7Mm*S^cODeAgm z!n$(Dp8~6HcL|vO+l3y=;GY64B`KZRm71&nn|k(8mc3*tcr%nt2wx7 zHXD|VX2~z_@cYyknLq6%{`m1V7EFDWxf5Py&bU|jW$erRH1<_~9P=_0zkY@>pFPHO z0oT@bKXT>#2CiP*M*OYwloTa!D=wJSw0Lq0@+c{nOuSxCjuhU}=@kSf6cHtvXG>>xYOMqaLsno6Z6%R`Z1Dp48xDn~<|^&f$ihLCg($>j>RS_E5OLes30 z>_q3ZaNuG&Q|6v#^iPNR?&ssY{`opS9J`Nk^MZN)t9|r)aWNlFJ;Iki9bm$o-9)5R zQ&`nVN@faWWyz@P<0;8GMOEHmDP)hMF5ZbQe<`ZuaWo3B-1%cMB!7rGayhf#*?Y4AY0c!SYZZ9szQn# zWyDm3a3XpuK?Q%4Xt+wW{t{Pfj&oCgodi=P*NV<@A^RvnrRRyLz9uPvFu7QxYGR1f z++oZ7SX`Qf_AnPwzD|O$RD9<*55L- zH`&Tj$w;ph*odlhlUU>8W=<`^DcLkw%;Xj(lNfgjk1>+cl)orXm`#g%IqrG^Q?1gR zHwHu580?i3F&2K0E^o}g2(tZ-v++lC<=+dQew6fsU~9YpOHgE+C#aesna>yU8&9Cq zy+}Yhh5CY#*y|_Jteb|bVI0=#F*IqXN=7;ZXWb+$vahG%C)&;P(UnZVR`mn7-7C=+ zO&0vgW6Ph|OMgU__BGC`N&I8|Mc_AHGNqC7d8cvP`UkDLKLlg*1YgVf$04XuI@;Gx z$12D5SQp@tW2npL3HDae>AZx?5Qw%i5Su;|r@aQ9P7tE1L#K2)RU1%O)Kit7M@3YE zWSAwGlf%h7vyrCU(>PMLqq^}s&8h2XOI*i4DO>SGtfBG3d`v;hXiMFW`_?*)HCbXRVub!!*o5De*{+@xek1pR_y5} z9644@8G0(hiYU00Mngazu26~k&u5`N6z?Aw%XTq`!kxj?OJJhEmV@SA;G=WONl}29U3a!K}&sQExknfv?U5?doK7;aO@z|qkxh>bOHcL;P1W%@V4~-38 z|6rS0HCED84pNOCt`yaBzOaS}gM(nTi6c2>oRR$FdZVmMp{;ai-&dYA4 zd9mK%?`w4^uig(W1zqQ=tppjGIazMt@4PyHEtN79gECtC)p`q8YE6Dj{S}tVw)i{} z?LLm?)vzW!)ekJCd40}$TB+1Fu65Y z-F8$)69v@`WLK)GFt%_uG>50(n9jRjuHpM1PBCT1HC`C8mZx4^&bQ-F^W7w+f7Mz( z`uY&_mZtFilmK3SeFqaKhp~8RE+2gKH!r@tk?$v5z}39x+FydBhOt<;=( z+S~u}-(_eKY$>l(;N=z6wj0}Mm4I4zBa5sJ7fAc#DAjAPP_^|0hUp~WNFjk zy`#a`RxZbu;G#8$&h{L6oF?Bl3wKL7K4l1qmU>!c-_AB8PICZf>`_v1_VO zm!(RcmL`E%js&GQG3H<7jy96^h9C)^j^Qhl^@d>V`KM`!-GwXj6t~L)FeMyCck2l5 zya0?TSE!4<#l}q_*t~NcX$7tfy%(4y}BuVvHq>GQc@4urqpD0>ngbF-c$BbhHc)gNOO* zgW-(&;tA&c`Z*_$Ea%$g9o!5*Nl|eEX&JF(W~Y*toar!}Z>F~VG-Wl>0<28ZDzizdEF!M5lwdh0 z<7@u~R&dTG&L{4s$Q?

MX}HHgmoHI2kRsh)@M`Od^{&br`wML@s9?<7DD~BC7*Q z(g3GGSidI{Ru#wT>?i`NONo+*H`vg`fgCNHQ`H=*>tLziX=S#XB`J2+X172CFlq!_Hb1Syep}7gy*V=EvO^2qL$c{TU1vj(_~E{|JGkrCC^8h zqtRUUqrhe?_FBP>AWB;}22-&hsAQ@jXeK`WFPLj4Vy&NoQ!UV{_#SoXcX)I^<7iZ{ zB^XmW#hYi->i8X_>}#o=h(|LOTkUr^>&FPRCi}5vulXKxg|hyWM1r%hS5LuOIh9VO z39?2RO<$=so9iDQ!dmzPy3EnEXl6*JHWNqXx3~prcg(ZsG%dhgqck5X!$7PC*Lqwk zrHNABSN{_&uHR^OFGQ`}si>h4cpF$fpA4A9-+LHdFJzj}AebmYjerN&|DzhL@tfm__yeC^UZy4gyN?UMr;b_${oVMd$)@DLxQE-SH%h z7jyB2S5tQ)iPZI%X*{1vM}ijB*>uVd#bCKnjOJ1f`Fn3tdnyG>Xc^9QBULfg6vdPY zWdA{3=b@z1O1Fh6Vg>7){NrF95r7@cl0Z*wCCuPqYg`^D z3RQ&aW#8Ote-ohsD`iLrrK9_S%nDAFRB^VphKmBSW4Tr?7Q2a1cMwo!;Zj~TaSaCY ztUd}Y?Gzf?sQ0vC|EHBYcewzECL2Kbw(|m-%5l`QkEOdTRp{f4a(; zDHnM8?M+Pn`3BqeH8ADpo4oqwUPga+gDKM@c>aYgy!^^m<}XZQ(UK${equfor=DZp zA7Q-r$vR#fv5+5Uon+U+ELN@$X7{0B3d$R?xLaw^+i4JF`I`uR9sU|DyQ`JvHo=xW zHd(y>@v(f~+c=zU=$%cd9A0#eCM*I@i~R2Gg6YnDLF4WqqJG_tW>+}ABgv%C+DG#A zofx);;yaW`;oKv{e6y08WtXt+izo5t9i%KggyBLe#m8=veLNgvdKIoZ3s(6LJ@Q+- zS~^iX-Kaet>g+Z=cWw)s1n(AQ(&t8+ZyT{Tsi?PC)7Wgl^p6*Isi=s?tU~@Xd&GZO_`$vGobniF7rn#jKi=oi z+&9?q%S)`B^azW`Kfte}`Y}@W`+P(XK6rI7PY&tI8&BQOJI@Z|)yIbM^su2kcK<^R z>i+b)-Yz=7+!keS%wc)hBfHnAClprK0SG)S2v#M z+mlCS{SkrIg94+$f~|i?Zx=){;k2f7iGd{1y@S)LrX!{paBdWI6!dKA7vtE zWzNCAl9V|I`zd(pB8ckxFS73IJD5R|diUt>AK&WUySmdu>7UhSkUuHSgWYAlTUVt& zRu6f658b-lO+Wd}_X!633jBI1;OeH-U@0|Z^88)@L%09_ANt5+Wqhu|y?XLMKc(N* zaK3osWmf$@ozr_(v1!R9R{#DLb0)mVxQ_<$)3*=v!&eV7?*03i^6A6O8ucP`$G^es z@ozHa+gJGJ<0pCljiLXW2J_j65AoNEDMW@JB`Wd^vA3=g867~PV78*Nf`p`0lGF0Y zlBl$x+DJ;Nnuu)4l!~=v)_O=UcamG-rbd06oLmcOsTyi(TFA=PkyB`-xW+|Np^3~yk{C*9~w_X&8XOUZ6M7n^mxG;kHiV(_jj+2wHne5ml6vxj*l{y|x z(wCT0zrd36A&oIFVHaGPlU@^OeMEENXpCu6n3v4LRXquKO}fM*K&(yy^rkD}EujV7&Ns%k85)nsm4 zex+466MgX*LEO)DnC4NJ`7MsxpJ=x(#!xT?d%+L*Draz8I}1LLHW%>8Y4<5JrPIM-!V7@N4B$>{&BeK zPbHweo{cNEnyM4=0h}fGJmTBEOQWbp~Rb zt^T_Rl`*bvTAKXhU!AM5`cb6}G^+HyQtmEX6`n3j6+wcgZHYx3%4;CR*vgf9yFXoR zaQN>kyeyBEK3ErPOxzMMY>Lff)2%##T$_I$LZw;pY=xd1dJ8v9M!{Mw$MP)1ns1X} zzQg5W3t>{wr(2uI6#V7NcgQn#Qse!HIw|b4G*0C zs%)NqVFi{tgQ{&+QBLyt}-Y!crJ{-RxGmaH8pYD)J}V;4Xr@FWKS?! z0k~;@3@NkrQuN0O?7O3=S#gPspLSF7+Yu~+JpH;rvgaJ4aQPVwr;_|ljLEw$V@NEc zS^ghmt-#6b#^!4i$U0HE1x~(ZtnF>+To$wrHLf-jwkEZ#*HSAGYVc|>+;-B~WTnoj zN8>SJYO?qTE;Y2cFm-tG;icstqX1G6)mh4&+l2zTRD8|Zv~`wYu%u%52)zDjq@_u4 zWfvH`H26GnJqfU!CKW!(^PQ?ZtQ858_k`oA3a7O$9DCs<9EIoUtPQ5Q{2K1U3;2qa zX><-_Og)S#{eLH7-3=zYWZ@ZI-VxNts4Pn{$M^RN3aq`^dpdc>XxWVC5r&;#bDyB@I!dq{@%7er2WAK1~M`Tx; z2_G5Ii--Gk=W)TygMz3bUGL_;?)NaPUr+ztgAWYo$1s7@;NE>CDIMMi`_o`$9>E^H z7}!&po3J1E_rFi@G=M%`drA^q^%9iz8A!kG1L$>cUwYo#kG}GFK%c?%m!trz>%G0? z`zke6${1MkS^-wio&#lFkR{ucfk=DadpF(h{;$7=N~y8xt&I3CNg2RYX&&r$*WL8( z+KoQ)xbMC9GDMl$r(0JB2)dq>@Al&t?{RwTa!zhu#M(I{*)Z=Lejoo56Fsv@Jb&*`|A+gL$V5**wObhWPEkxxsbNEUX z<7Xb{jgMFJ?f8Sd_0a|f4Eu@a-(JVaX{Q+Ue~ z%rsQ>>C{&RP+qi`tmJv5#g3;Ydm{Rrv1k%M#+mhrfa)`vOFqGx{SMls*RiC1BH$W@ zF@6l{*oo99{z!f90xAkO5}&+{^up`ZnTkm&$RIi=k9?gVv%Z>}rO5=yMHE_jolE(D zv;M|H0;|uE(~`oG^aGqLJVm-Yk?@A=oX$GP^)h8VtSbanT_K<%fD@S)1Y|df(N%FS zKab#gHMguSoGLP~B~8uxOf&1U?Chzy&DQ)@)@J(HTF}nAbT8}EJnYJD=4eSfNAq0l zOO~iT*GOoMhpUqLf5lfm346sjeCi+lC^8g}qF%Wfpl-52WVT>uwqR@uoyOVtv@_A<3X-a)(yEy$ znb25?^hV;XokW}FM||~Da4Q3us(ui>{UqC`;;NdAr}hUyfnY4}I|0*=c!upe1n)tx10gp#Bm}Ey5JKT)?##Tg*19E-n^iEulGS2kzL7=&$`j zbNpt=IfyTD7wW+ESfZ6V4$omv3c!{WDG_ci$ob=zV8s}jO-EWS|D@JXuIGAk`=YU5E21S*_CGBF>t7dXkJVuhucLHd46fiR zTBFn$FBVWH>y?M%aa=FOcs@@GR4F_UC*caMLLX8}%&r^cTuP=assvq`4ZXUFT1^v` zrdHys3`Ex%$@O%~xo={3Od2P%N{KRf2-8V0P^6JyLPv5_2O+X;Uq-b6$wq+M%cWWy zhjQxJno!7z5}kkF?EeHDa#L@zkycpCk~IgJFnuixm!ISPFIMpCyG!_UZ5(qJNAlDwt9bqWU5uF)#G4-< z<&kIB^Y%xFn7b&EDL(}<{PD$%nQ)PVM^(&O5Y59+{lOQbj<9@fIv;VerA+Q@_M$Sn(ONt>n%b#QsUUCpu z3ZiT_9IfpF#a6+WGMqt^1XP_gYuyqAndH2yq2gRDY3t7tK4UYbD+LsPCsMgUg3?J_ zXY&c%zO)M&b6P0hI!*QheSDnp$-@d`9`-boiVWZo6y}ps6L; zE5TkY*IY%sNf_SjAewT* zxLuk^Q*IpUTcK2hhwz`>GoIzbst>uc{$tLpe49OUo@K?jApby<;O(LNxPMR|K~Ya0xvwunl{*Og3c9-8&7iJ#F+|qy z@7LWwx$^_c5Dk6%3#R%Cumo@M_qnC#A_OnBI>)D4G@yuhq`1IpE z_xv+F{oGSLCF@Vhah`c%2yeam2&2Ayl|L6vCg|#ZVk1s-E9N{&$u~&Lju7~Ula`-E znV=$D{)>pXbmCG9iAyafImu z9Te3z6PIr$y~;^}u7!{+9XHan#1y%SD{dnw(a!RX(GrpE=ezF@^89lPc>37|%$|FL z&%W8p15Zw2+HZd|b7ifJJLCpd4Myyhlth%6QhsdRQ7RBIr4ZPaZ(~VR zEvu3YEQ>b_u$tMN?_q6*hiwIIY|rzuJ=?|}iOLVi?|P!p#FYXamy(JJ&nO}(Cx?WT za4O2eQB_`{C}Sff8GoQDU5G002UHo~;VzyisF+4a@i@HsBXO3Fp-n%7Hr)&w(?7u` z*l7|RH7U(|MPq1hoI$f7$yW3|?()g_s(z5yOh%XW4f^bFXs($?i#%`6AC04QyktHz z1W4aYrZk!kfmo9~r^);ZPvsOj&X3sSecct41yuqw)l6FIf5uii0e96@ZYyJH$mPL<4VCK!LGQ$H79-3;tiO2_!$FxUM+b^iC*)Jp|iI|Y(Q(dJ*m zRi8zN%|v5G4Y_%Rl-4&;-k_!^znZGdYFu>^xt0{-%8NuFa|l=5PHt!HrZsT`w$Me` zZ~jS3(pH*N_n-~mNZs|nut#j7U8%haS%L1_BAk(HXinUYJ$f_xuuXU*nr+Uygdye( zjW^F@iN8rxL5@H_2S;KaZ5a);36wM!Vo6%DhoprYsb6=9?CF0J^Vu{SX04%p(<$;M zEFy07ZxqbiO2LW)O%76J&aTx zOQv|+P1=L&@m(yX@o*~Tdt!0js6u<8K!6vH_EZ+FVfDwd7>NJ?U7|@uK~&f-6;rWK z3WF1w_=2iYAJ3pn_SK!vL4PiX@*{DSpG`&=Q-LO_hUye42#d7h#ukcIat_y;Nz*#X zb+i-HXya^F83Cn@#0sXaR+%}HTkoH=IojUA#cB(u6dXxm8_?)sZ(0SbLsB?hrYBh2 z%$YI+#|j#`sVs!;)3${I$72fVEHdIl*a&_RR1vDVl`QJsy#8aXF4R$T2JXzVuBa$P8+qt_bi{K+9GY*IR&JAE{2 zO@c=c-j+`6%G`@;vs_bp8WIYK-h7hK-*-^7;tVYZZc+F95eW|VZs5(;^Q`^ zRu6S<6B+@O<+c|?i=Apyy?;(aSF0VH$Ar<4g|3lOBmF zA(Hu6M~5$4DA1qzge(%uc16J>`BIqp32u>jpN1V-}d9`owvVW>eOG^xBmnY zw-QN|3@0!$n#jatG7F2jm6}ePk_px}kXu$Rg?u(qNhw6eClL@H$+>F*{C)BSzc2ia z*Is{K-eVBMhbo=Z`}s#@e?$QFpwiUWy$eITb>X2ty?Cg99|rgA%HUqz7(PINr3@e{ zfEggD8PhBxuJarFi078p-WHM z){Wlxb{B*RzLeMZR+mEdY{4>|r_|HGqwx-o0y2Lv5jPspJ)te-WKIip`;_NeEX z^2x(Y{p>NufBY!ly!{X#zw!XDJ~^1@9~r{4j|xB^9LBKW!x$>*z9Ib?cK-mLetZbe zKY1UoK0BDH<38Z@k@ZA{9wRd1Br$SgB&G%ulYE_<@qxspCy-H4B>2rIGrvk;RZVn~ z;5ScCmH;ay*Ft8Ai;PkiNd*?ND?Maadq|TAx3I=Xt^N+hbxszxN`CJOZ@8trSLuJMfWT(v}H-8;PWydKf4H8@?lUh(nd_e{A<<*1~ zWDu*$BEy-*!K6JzsBidzbu{H57mFma(*zQ24&_4GNsi|qBuI6M2tyE8sxHX33nW<5 zi8tkPA~TGzx?Ga!W{^1Gd`Z)SN+Eo)MYERNOkXS|U$nQlQ=GrP;$ z*^p^xQ>KN3Wo|*1i38ah0xMlams<%+k?SZkpVX{m3i6{UFS6g1uy{M6Q!C=2i&!j%-W68XJ=8mk} zipToF)-1a)+UAKCYUd zX*2zXNj`5y?noS_CAf^6Q59@NoqG&db(}n3i>bbb;-Vsn>g%X67|6+~ASMGJPBKI#|Wgt7UPXtMRWW{fz2j#A!{*5Y^F{A_qMpL*n*a04qS#S zVlAH7P3VJHV~^QROZssCHRsnu}0=$3C+NKEgse3 ztK==+M&jg!RB|cT1!6Id+y4GkCyF$L8=DEyH~X=*F*=7s z*>(Q)zjJEkaV;SRW#VTG`*RyOUT)%|%FWqYJ4Z|P0woKimoi&K{zeqep@Ty_fw3wXu>II?YYRR7( z1W?6z>eJB{24X4+kRUPwccC&IgK{Tfump(#m~zfz&b&x#MJ%1=ahPK-q6F|_4o@SiIiKjz5N5p0_A3@ax+!5^a^<(H2iVEh|{`TT_ey#82EUU_;TUw!&2 zqegztCm(&ud+&V8{evH-S5IXi&4KjqJCy$Y?&q;bp5wJw-r=byUgV+SPYT9fk=Kl4 z`<8u#-Heq&I+=*1L}F4?NyyCNR(d*FB_(8+mXRTHOv%e5H9w!^++2ysa|yk5i;F?Z zxLAky@uwen@r9>(Wcd9&`oK`0kmW-Ilrf@u^Kjq3JSH%DLh$ss;OSv`JgiqQ?w2%7 zaHUNC++XlDp!>fQJS+XL2K7|H)%AbxHk8N8;H15K`lrxQ1|RL)P3f=IgJFFIYrO

87R=|DQHv1!0O6_$Dy@x*k{Xe|+$Y9pYnMla7O50oarhX?Z5(EdC!^gbRKavy^R4WPf^bLjm8 zxqnDsL1|B35MVtyybrHGH;i4I=Mi*mKS4oSGtFSvNh!9&80Yf8mjb(7}CE* zll&33(r-}Zen@WAOEji^hBak0=9n=!Q>SCgn@vgT6f%;hQ&zf;{K7pXrbuK~kxZq! zp3L$((reTt)Kn5%mrF!_9A|S+3$8*5th&UGn;QtKxkQZM>T<;?&XpV`(ilXXJzO&I zQ&PwrBU~FQk#ZVWO2avt7D$S%kyMEy!>SvIlF0J36dZ?&%p5Iuvof}pUBzwzRTC?c ztgJ|JvM$@px-2)Vl1*&PG_$wZ!Ok2VyE7W)m`)Pv+KI@~5|>j=PH_n-sW-_;Jx@yf zHu5soP+z)|@{H+d3Z~LrJsnH>=a`b-r?u>RIvV9SkfpUi0HvNGk=u9-Ip5G)_k+N6 zq~Ks2YA z(yq*jX!sSgyiZg0G=bXBSc=B^G1VrxQj%4<<51H7MVCXC+U#%T|C>mMdA=W9rh;*F zXy?;zn2%RJufB2|HH9N_TNYu{t`ICQ#87+?pDGf2Z7S;WEL62+{wX?2>gz~OFQGWU z5q+JRCao5=WP()@C-7#S!;`!heb7>Y(@HuLHwv=;!WOauYv?N6ksIkq*hy2=1{}dF zX}z`CUz^nwzZHGpN}3Y);!8S+DsT(6fxFR0oR)|-6mwiC_Oy7~3d?Y(mr8V-hbz8- zj?60BZV7nS9Uywj0RE+D*Boog8BmnNp~yn_Ebg$FUkWm&^4W7VsP|RQt!oQh>EDt&C((yB}1S1wmKp z>>Mko_YcRQjCXY~tA=X=s!R1w_T|(Ij4XuO+67N4*2d-wxQqmv+Wp(NB$vq2K%}da zla(g6r&n@a-^4XZy9H;v1cFCp|D$CF4i{_Km0r%NavcF`2ZypM{J;urbO^4Dq&S<& zbl;|;?GBC2K8n>ZL zJx-@iP*p)iWex78PHF_2zc1RxORrC0=A479*&4$KUrCF2|&+8D&F`A1l>;R4xZdepYt6xDjr$nU2UMCO#&Q=>Ow_IYu(HOW#SW7P_% zoV3dKcDkC;n;qD^O@f$aGzJTLtz0Kop9DMt9y!-akA~8)Ka83+7l<9Zn%eotVP6vJ zd8f!6vkAkZi*)XY$N1+}s^%P{arq_m+rubacZuY6XHf-b)7j{yrQS`8)`QR9gjebN z)_YQcTGQ-Eco(Q8H5F6e5rQ|HoAZEnEm6X>-nQ}4)iYf+*139@dx1Z5uoU4=~@ zK77ig(yj(tTWjE-N`Yf0ZkGfbEopSN<@$l;cIVUDQBCWe2JFsiY{mxK1(h9UFP>Th zZCVrUMis7xTr@>t7>fh(m51Oh44}0*h>n_@wAO}U&bdTG>1Uj2CZ!N+**r4bApGMtBoJ;I|8JudiCYEd5Ji%-91 z>xOMaghmn(8AI5uSQ2uxh)qu+AzLD((jqcTibyTUBe@`#=(Hr_vNELL&6Z_4fiaO> zj|k)MQ^%S5<5XUI^(CHq{1F}*Hk79ye2}Lf7|s(zhVs;4N%#G`e5iLnhV>A1^&iMX z{RVJfcja!v?t+8nhEBcSTlg?>Hm@h9c+puU2fp7+x8-v1FC-NgXee~|32 z%yT%n=iLnM{%_x|AwBN(H$mRlLjjwBtLwdjqkHN5f0ID2e>dfhK&2m7Z~FJ_=kF-5 zjHT79OK-Y$>E)mJdGMfN^bve@7g%-u?_K_;!~xv}Uw8ixg9LY9zV#d%7EEE^%AeUd zZ!AlG_<%X%US+|Qw;1=~LwxhL{73IS%;#@E%G)mv=g9|@&io481cU{U8GFX`k=q`recQ3;S+{0t{cVWTokpx`aNkrH&Vq(s6`I=<*aaXvN5-PY#CsBU? z*pxIuaVE*x6(nWWkdR(WPKjMWELh4jlU3%Xq_KlsK~-j@iwb=w73y|kQ&sXG>Z!A~ z3$T3TYuYFCR@kC56+l1X3Se8~|4YR-`Ajv~Px z=3l>9dV+XcGzqo@&dUXQJR^WeRT0s0>}wKG#ObXhd2e%CV7kA+%<8yW_Lq6tRpMqz zoSxO`Zb>fx<8@g!KfZRz@4j0Ad$`!f)$(Qn^SzvotRyl+MZLjIN=6c~u@|VT4x+Sh zANg5JsViQHrTRC)%4F<$BXQ<@AraXq0oPdDX4M^x*7yeN==pmETslZd9AN%3U~CsVwZJW;TWuv+LhrIWV=<~ zM}e5P>IbwrqcG%sN1J*U&8nH`b4UA|2Nhf?&{6@I z8K&}FECn$%7YE}MSl3_s6MOh-{z>0KYwTJafr}+A#V6ZZA_Z4B1YFmb(H64_pJ2;* zb1nLS6||@9qdDaOWoMVu7_?gee+FaJc{GyIS_D~5Ia#pXBfJ>>tQko6DH>yyd%AjDkfa-6-YKDf4lQD$vzd>V;hFS>-3JbJUHo7TL zxyY21q4SVo@R4n8BVGYksgA447*-`ZKb({rtCK|?*;d($8wQyymDJL#eSkUanqPj;Fmj)8D_xZm*NPqlkKW zeN*ed1AJb%c8g!9CL<@cqr%v}=0>+f&ia<{_AN*j^Dt0 zpDkqC?A@HdA^)uey*Xuq6-ygBM>|@(Pl5~$bw)Gp_BQHGT1grlJ|E4komd@S3|1%3 zrZ%kZCK`2S%mS`|Z(^mK7qL`qyhP-rHPkFVg?B?3^}n5)7Q?pet%Hy?SJXCDwONqX?1hZs6^C_@I_$B+R7 z8Ft@bh7KCUhwr@0q;cbU4H2#&r< zN?|rpsR>fJrjlP>Mru(mN%@(wp2n@L6f#N*q;Sq4I4)9B1UI6?*m-C#Q>RbkgZJLy zg=e4P=_elJm1m#jm8YNO)u*4~#YZ3KsiDJoWZ-@NQO_Uh*Pq7)Tn`Tz#GoEM7}CF| zzhnHMeqH}f{kt(xmi>F*L;qfP3qAx<1G@Xydv*OE`t`c!-~DC#fFA#cf!+TPgM0iB zLweuMP}w%5cNYeBlh<~+TQH>{OR%QEs=FMshu}(5e*spXZawI!%sbe%pR5lQ;Qc!{ zVIN7o1zp{`_VACBb?<-wOV_*pE60-ezw19dF|Ff9L$%tXR{lZXQdSWn-3>8@3*Wdr243__B=+OHZAmHk&e7AvJ7%J)h zes}ZYlrABWKTV=gftjghXE8R%#U2!fy}~D_F?PAu+R9 zpj9MG0amG%G{JxZtjsbO#VSFRaxqtXDG^*{mMQ?VQ`*o%m9d>vxybVxTPQLJyo#+v z7uZQEYat@TNm!zTpcoq)cct?EB7YeBIjuKjPo>0M6VC^}=)mI3rJ4-BNmk9m{VXBLQs|Z5t0|}}Q zBhHY;^^ycGWF-<_RYqikK-AO9f!uly6zVx80Ns&q)z4)#M+5^g^bWGq-d^WLqh$b8#yzi-hExRM*5(kbjh{jMdbXE=N=T zJG!DNemr?g1U|XnV9Nd04=G=@>zbT&GZM_;&Eupzr$D~SXNC! zr!+5C{f@6;GuHCGsB({Bt&gS2S%F4VOkQyg`ITzQ8{CxDSO^b`ra}JGrp6-d1!1V8 z577{~4p-cEZl~|X9l8w1^#!yk9pL4$^ZF7jmlk8bv;;=VH&vlSsIf)`D{E(J2(|Jw@WejT9`{fo0tp za>g&GeDYeF*IuN4`W8~ZTtNJ|rNqtMO3s!u)SVR!rqs}!qr#Lfg-%X`{PqUwq~I$J z%On3%DrtwJFor1OTa}TuDVX}BY4X`CXcS~AHCD>fE$a%f6nyDV=lH>8y;$Ub>-`fthw454=D|Iw6$CUox!Pc3Z)P>S zC7}Di&4MQ?OnzJ`O@Zg@B^b$TU{7`p0aEavuC{Wppporq6`ZZJaYoWE!RO&JJ=YAa z>=LN0jm_m~u`>Cznxh5voGw;zz1GYv0np|0Ms8H=$!WSnQBw!mS_kDOH+6Ow8bOxX zS%}OA6PHGxtgiJTxZE&X}tO2 zcAk4}EnD|iv3y-NPrR^Pw(VfPU~I}S*Ldc&<$OKv5UV!cV*C$#c=g>m%$#?GYhhJ# zt~XIy-%6dn4U6{8441il?5MvTE(WUW0V=j2X`mY$$)`9;cpJ4%hSM9gUP;B0P$|Ij(dll-YOtYh&|wwWI-2EutwyRXI`pkh^leVc ztqqhKYf<}*XgqqVO|_^TjTn^aj#_Qh3Ah?kLMZ8O0Z0e$Ir6GitRi1hDQHBdHo)8Sa{M1uC_27g4y9e*@-m`s9Q1$I9 zuTus!?bXwt`b+B9Q<(-waMis#${?jZ@99nNE=mp700#C}rurF7FaHn^|4ta)ua7dI z>D~0W=Ps1F5(jksKk}PD%AWP}2@~|3+P#R)OUCop?;~0L``64E^DkyPJ2)S3f{Q^H2ovBYMXsTSMFDtA-d*h*HVi^!CEY7H$^Xj=Vq7iLwt{mq2wRW9OXUu6R5lxB{lAZlE>ZAOgxyaG~IDuFCRq$=_TQpj|FK#Fer`L})G&XAI^_$r-|GLn&&h zAharx(`h#)%C9E2QAL>6#OZPs2c-}?RqJMJmW~DCrEJP`u%*z;AMrXC-I55tz{A!8 zCrhF$SsGK$-ay z^4Vx}1tfy0rmAW3dweHQn&5A;t4;VAi=d@lFk~zF-hT(6I#a2^`W{E=SlMqhuJW-o z*G-{aX<8Iu83kw-fs+C&ZRS`0nyselseZI69p8C2_g_>#8z>9jZeEZZje zYpWDMdE|2_O9fa;&6R>LrGJ(~nb)x9M_L6}-s&IxBfqyb&KA5a#9BHHqtaYiJq}&f zD0G#hFjgwl0?oov{RhUvmFNn0;nRiF;>e-4DuMFqJPN8CNhwm3Roci+Ij4*B1U-%A zSW4olzjX>j%psid`)H2eiRtP>EEng{9{wk7Axm*wT8Q)VG6BdMnnKs1zqkMlr!a>r^BrEqo)U&RBOa#&lN}AUG?$lSjZedJGg$z1 zgN%hc$(_BG!f6{Qnz)X(%|X;p-9qw*bEufLor1*&iJZNeq~(8Oyq=0TsS;gWG3LyA znyXz{^7Z6iOC#w-G-cOvsl8f2#i=y%_C%pOn@elBiiX1}6l@9eHy0|v(w)lo_rY?> zq_&Wg01pB(P#x&p3rhCkH>N2&%Y@=pG5iv2@Z}&kra|cX`Ysg*V-oy4a!FmB2^o_~D?zb?AMk~MLB^vzygdV2$tX9lonWfJdyv5VK< z+rS^Il3B7kjhElr$SZGek;j=#oPM20o>|JM31`{9zlv$I0(kPJC4BPjZssiwV&wPR zWdGk-x+Z{-Xbpi;wWQ_Qs8Y9}wY5=PW2WBd!qagFoy{YU&8W;~j1ITI_DXFKuvndV z+B&d$o6wkTxV_C1pfuxB+wj#{xFi2tb5;XcJ1-G5XA9-qucO)#O59I7DO_|M=iXQ} z^8QJaw~#esF9rdA?b>U^&)rYf`tvlLOQQU2JoyqRc#8G383cO`1}s`LEzNCsTU+I` zn$fv!7z9`9W(yTIl_Wt_laU&`iUIpI1j9}L1xlM&$f?=mi&i~e8`R{UYIx_Gn!)r99)9Op5qbZ*|E%mg^>s&@1cKHn)Ru@{0 zhRU)$G}T!+>rydgU&WMggqEz+G!>l3nSBOJ+6f#P=P}2fMi+hO!+7b5hj?^oe;&H8AJ05;zo2F?uRr%VBR_eUKj!?*kv&_veCjZv zSI-d@8AxQpO~F+Z(P=Rx=A}phol1If1{oz;q!ncdw$ez^abXCt|mu)`^!d7n;r+Yt?MJV2V9O%TA6miQbu-wyY!cg+ zPiEzB-!SpZr}_E&*BSrC^StszUmhFUl}Cp5=Aj`2cwop7hTJ!letrAVx3A!FV0Z2x z(w(Os?a#Q8Z?SXzZ=5@{nllHMv2W8XcC7n}HOr^4Zq1ME+BuKyyB4zl?=_sec33XZ ztK5jZLFlb0ZbU?LD=m-o!UjQC5ix1CWR}{9NpIvIfjXwS+&F#l#*3jB*D~KlDR70XL9jHD`sB|AN~7q| z&!%1Ti@!!nlkqujWn}lFZ*f(OrA;-3rke4xO=%AN#t$r|Nl>Z9s!jaJ-}g$XrP618 z?Qb$vI>-B)5F6zG5Ma5>C;A_o3%~a_0V*|9EvlJlvPPoI8RhS1r6lFKfA1#zflkeA zKfW5$zNAAlhks0q(dLc!1It!B5kuu@^pzvARFClwZfYo)h9>)0S+2#cI)_6OPGRDPXN&YZL7^H)Fs0C%SXw+=??|JMO5x*u(c?4pxqJjQWt{ z6a}0_n;Jz+bup%#3>sqN@n)0=&ax@pEcxT4zsR1tiJY-((Eoanl5ba#{?YGf<{qH( zkHg%YyqcRcHc+_z64uZhZ1H9Cd8@JJYH$^ssfZ{f@uZ+AwibP44H^5RC_9>rBcKAy zg+dySrjffb7|q!{Tmq{4BdKUlN?{}T@&wgTb1)H=?4vX#Dm7WfJ8n{Q=oU>Og0b_G zAOCfliVJBJU(Fe(f&KPpy2Fik(R#;HUF=J`?l0_ftHq=&JIpg z=mc1m{toRIRW5%m*T#e*f8VSNvj0wbul@4=7X)or1$!qd%^b*Y5MbE|)OqDS8aa?r zMu31JTw~{akpe6|36>TT4b7x9`Y15AQRnWIJk5d1Vx_6WNpri>XD1o0_A(~ZS$s`O z4b~+*?#r~bg-aoxh{Y;M)fUj!Zt>55S5#6+O@jt^GbBo$IeOeuM!Y+Vb=za4JWJxe z&$saQCtCzd*BCS9G>^ZqSb(+3UwbugX@cNt6Q6u@n8m9y_k=Nf4l^ zB6B6+vf)vgX|pwPr|mWly92$&gr&)at;0*bOHZM$ifWe@qku{+$f~l{)8N*kZ?R%- zchTU`O48tv$7;ct(WRzIet)a029vFXrsgWzJ8Ez`v$0xIXz}IH*_KY5H-R=!I-R~! zfmS1JO;)@vCkBI=8f7B=(gNzrv#{4?Vk-#2m2sA~tW(&N_M?y6i8ucY9pwQSq7PDY zbrX)5!!#uv$9{7!x`6H2!Vh5$Im&;2_-Nq2wO6m+&&U^t^WN|QyeCk4@6iW&X3zkJ z_wD9yUK%j)9v&Xvi&vf=#)xNz^VNGV^ZDB^`X_jPWKefm_TZrb-Ff$=Czv+oGnUW$ zi6gt#5pwY;Q8&&J7IdDF$ZN!;Mv|P9KuTVs6xb=`lx31zmPL9&3aR-?QZOWuS)5K% zc0A$nAzTl?!r5!5IDY9k`%diVm@Lm+JHxKSyV$UE1M37{Gk>4K_^D(0Zrn(|`069x zeQyNMKmR0;KJow$K5(A^t3MA6=);gcJsH%i8-sc(oz%NCq;F6B^Ah?yzW=)%+(%#{ z&ks=gSoKhv5heBMDu5DP$v*x2_Vr^*nS)Rn|4JE>p_{Vo*`GfB2GLud>n7;xBEah1 zqc=nQ$ouyjBFF4c->%A?gv!Lxz5K&7^zPP`o?Y%1tlc93`?vY=^=BU7*C}7KXYE`z zE}6{2pFZW!*lYdS+0%Ug*)x3b(omj%xHnG>>&3%E`!H-!Klz^q`jOYa zpHh3J)X;SoXb$ANFW+R>hWT7Mv5_ms))IVv6UTPVW9#avY+e5oe=VQL2Eo?)4ZpDW z;0n%M*~htnqnx>Zf~#TI35|^*IysYD=|w~(7ZaCRN0LAwOfuNyLNjF=WfE9Bxiv0o zO>LCcdjw!saw@G_la$1y?@uRjtJ2SqV=!6Pf8CBF)LsE2Yf&^Ag{T-^$Bx z|IVY&O=H~jef&80I1jxzoe`g}VCK@RygOXfl}>SGDL0c+i7zT4Pgl?R#6ZGIB1lk2b2a}2XS4Qmt@sFm z6~{T2x|uWC+lf-2A-w)LXLGi3xoAHznrnnsUFL#BJ~sq{VU@RtYsewIDv#qzUpJ?l z0IikX*%eYa=r~y6WOb^BWl3t*=GfU(SLvQ5SIVh5XZENo7-vp2nkyfrjcPaX2x@5Ic??QxY>XGsd#al4X)bmMmK=iRPDl{z9<0mrX>3^3>>! zn4Ik%wEHCEOl_bmw}sBac1-fyYD%f2C^(0zkP>QwOGrEwhRUyy;beoW6hrYb>Fh3~ z(&Al+KCsf2m(?NI>WOT^8eHSTm6Cs@M6asd6R38J#o(QbGpq)qe?FBLVrldhltosP z>6J)sP#%@Z^{7faWE-|o-Dsy;(@RE;k)&E9=^XxCyuPN*Ah>9IO9EvXoK{ zF{UBcm4&ASNJ=CtpuxnI@;0tl8(hFrqEZ1Z4p$WGKlxX7E|s-#zF5E{>y^`k?)fTL zbSqdo|FcDE4yBZHUj8@I+E19tiD#wqJSKu#?F0+R0vkKHTv$((;40PHhj(!U!F5_< z+wCL>%5n^YI%kU^>wx0zA(DGW6|kS z(CUg97}V2gZ=LcP8v)#&@3VKatDV^R6=P z)inaF`T&xNqrapvg*SQ4^|&Y})Q*p}Uz*+)8&c)?cFC zEgH4JF6q0Y)Nb~s_d*)FBViP6@FH#1Ig0oBQFA7O>?8g(2Bt9}`_)ma1?vC;o2b76Fz`Mel%yU(pJPE3w&f>FX^JOroy4#+i{U;2aj%NOJ_G&zcFa~2s^(@I z)dIWfB6Kxb7>mN_FAw2T=w_i$k{_|Kd^;OyVkO4`)U?0ox@j?C-C7%@9_3pf8pg9#`5gw zQM@$vNnVupi~#BxrQ-YO#~Jsez(mmW@>8Q-?fEAk=V^i0*hiEk_CN6Ss1dHorN@qW zT$+NaQG%EMRBV52A*d3(39ghm5+D2B1H3f)VP=2%9;ap71$rLFZO>2aSTUXb z>%U>`_n$KRv%j!-`a4XU@Mr$<;z<7S%9H%{dHEb;M)3R}N6UL1>pDI7%+pWF`xHcu z{sSMr_ZK#=o`;XeLBf1aaM$}F;l79Py|fvx)2s1xU&FBji`ljPTXyW2$MI8Z@$f#% z1@9C1hhHZ;B~qZ9KuByn5s6u(=GO_Z)FjA=EIPe`j54Dfga%0}YNtfqh0zHGRc4B6 zJ5lL}D5$lNUu&hT)k%p9ScBx&_YzlNA}n1-rFnv^JUNI4R2!N4c9XOE{j;dPvu6By3;l>XF@=r8z+&eDmr7koy0=@g909C_vIX{tC* zefbUYa)QV&PM6P9L26kUSq;^Mkd+%w}OhKS%R1)^r=@0u%^*bBv@)%=DJc)lRFu6 z*-ZJ{0s+>1+VejX6isBH#Zoa#{=gO7X{{M5c90D)p>cZZ(A_=q`ugbnu@#B zsEa6Ss3S%xa9mYIyU|2?x(xP`;xIQ>3G~uY$NQlUJ4aLC0XhUx&agcgZ>+*_bqzzI zC+H16LRa8k`UGIT;rr-|*h6pRZcMi}2voLmKj|d>amNK*yRe2R6G*$G4|AtA{1PVR zYNPyEnloZ($;`l*TSkun%M@13wdq?46jWIcg_Ad9A30xare(ni%&R zWtf!wD!H#0Xm!f%|DwtwOC@@xoI)%;5lz#jH2Ut=&~v8(?NxcNe)%+brPAP?McIu^ zYD3FuN^PbpO-*rDGb&XVb@Cg}sxpwP%u{F?B3j?imC{yMIfjt7E+TY&9JrV3iduzg z{+o-Xy&5M6s z&ll4@T)=w$y@UMewVeX76m}gfzuwJN*>9@WVVc^9Y1R$V+R;x*jgD4h5Bkml0hfbzb0^l` z0UU$Fvd-a(U^(yqLRa4)29v-{XQ4y3!=Pi3AyY410<_`wE^Mha)SrtcZkZcpyL>RY z$5ONDI=SDUqGjVv?1yBV><*@8l^4ys0x_PJ{(rkSMLTcMd?lXtsC=|CV8__Ukg*$w zPH<$h(AC`~sO_P#qm^1?lR!$g|Dcrydn--dTG|Cs8m0KC(}b>1-rJB3tp|c%rM-x4LMuvaY)7Mu^ud|qb*98e_blYO+bENRgUE zgUM&;jy}x;yB7S5eM>*V``}N+c*sNJ^FoepUBupX^Kjq4imPthxO4p&vA1Q^e%F)q$m`_A`B9p7hw8$3 z8te0EZYrZtI;o8GC<^}nGBZ7z+?+TH3KA$&3SCy`ke`=8YH}Fag0kHFc(M!PNy)uO zY+9&v`gggP79zkp$MJLfarZdN(NnwFy?;Gx)-7cI{3%SH^gf?__&V>u^H*Mf;sVqYt_Yl)fnIUVU1CHTE%Hd_v&$q%>JRH|jw~KlEFkQRXrnJDMj( zJ;vzAM!O1)KKR>58S%(iMn5r5kTr%!9)6NhkB{Yv(PJ6)m>}!Xhj>hhAPMZoJp3S| ze)}7N+XzA47*~mhCm;R;;{P68y9lz>`o&6PZH^Ogdne-(smGdbqD@ewsYmeMh@&*z@|+z*tSdd>*;m4 zU)@hY#8pD$gSZFPEkKDR$?&a#ao7zfLj&yQWgG6OpS+UcXm)=~!#5o68 zzV#-rzV{RFezBEl-~Y^Wf1S%)pRH%k@-s|Xc#ti}1G$~tKtxsxv2s8zsjsJ|GM@7E zi&Vtyq4@TES`xp-R5X|7tdD3)e~*r=_i-eTrzhc`45WQTSJubqb3UXc=K~C7lU&u# zTa?p;bw|;vZjhDcNkLI8^=%ELmK2fOP(yx84Ur`Y_-EeYPH`X+)whUg48XtWi~#Dm zjDo$1R(ob@tD$Ja1=;Ksv9|ueAY)Nltb6Oh*3eD^jHO=)ilz#XCc8jttNsS3YN6}Yp8~9b=B0Gi&3BzP zRDh-AVL9aQy^V`pR~sr(v0>c`*%x2ZoGn-rIN1eO3bOut%1|lWpxoBvPjgj%SFRki z3HX$f4Z{{?jzT5beu3OyCl`cGy&odI0avhipBJ)Hp%ntpf+f$qK8tzb37EM}yG8L*SN~+7LtgR(2J%jk@XgakTtc^8P#oneO_#(}rXD~?ijyBA3%3wIo6={^hNDrDE0u3u$^dq)}!;=OjpEV z?2)H1MV`l&5`ZZ)3{7T?Z0|zM#kKOCtj8dOh4Rx;BrQKr@d{6}W*s7P;&z%ByV1WR z0OPWY6n`zanz@%GK~~I?LsXp!p)a?cp-LNV3DvZvHZa)Qk11bIk$(;~cS@)aC`98^ z;EGfg9|@;j@YEC2M!gcr^2+|-`dCWtl~Ep}M6r~s43)y9+Ut1?gw|p5mUV7X492Kw z7qI0V3!(h7KZGuIm|1PIh_3c4-}#x9j>xG6Xb6=3WguTQgWZ!D`f1@ZC3UCdkT&biC!ghbU)E(5k!!!QkPJybW^PziWD z9K#swy|f9U432JW{e$Rbsq3`UDYqScLl~5tB%O&4gN+_rFLs@cPK|{DlM`o28`bXN z#Qyj*4F`fS{~S%F3<#>0UZ8227uwY~=-A*#yDU5RhB9zAmBy{Ul&$kZBQPsEcZc%8 z1o~_A(9y@gl}V>twU`VB`i6RGHEX2bXhzp>K|5%sxkq5wXP|w+;sTb+rlq+{k8xNA z8C@E|mJx$I*4%Fw6wCIu8W_6YL%%DcSSkCZT<}}OV1EXV&KP>^i468;%QBatzB<{j zIy%hqdWJTPdMyJzcAUypjPU`qr}{BeafhyK4_akVr3%_6zuP19W?o`2+mqhpvp8ao zxk|bWrn$?&<|yXd`+4Bhx``ZK{t2hoO~PmY0;12XCGp}`GOq0-{_-xOy>=7ha}ckS zE7-nbGP_pIkj{S(WvPJ_Ciqa95sWH7g8KY$sxt4;UYUrYMmA(o6wNZyZ?8(EIxB>v zuq%WIdJ-Dsh3_?Y&YaxM@q?Qld+Y%>uTp zn8%9w)0j8uLni<0bv}CIW!`*gG%r2zATNyitt=nng~uM`r6(WcPh$itPb&9+$Fn1T z!!wWm596K~!84CP#)#klUSKqW#~&QY69OtFuj-GFKg;NmV|nCvk23P%M|o~OxnF<4p&dVP>BI(tukGhvz)^y)?j+>KZi23C z)o`nHOLCo@w)svu3wHA57iMlm!Pl zbuEoMi7lj++Nf){Qm-l`KlK_F@h4Fy?w~bo8RqgIQ0GjfD(M|;#b45!^Esxdx9CZo zfG+V}Y7^eTSoQ^u>aS^+j=d^*I;z|?G?W}CSDD{MydOg+stX%PRFVo>?^jhr@+F2l1^!|ow77+Om1YG zAnQ~`7bgU%2V$DJmfJ;a{jbDU_7a%VOj>mZ*>!57Gh->Nils#pLq+i=iqdwYE!$3i z(`NbHKVm9gfWBZ3wux z4r2>iCt&#jOVCC-{5N2}wH3$RgLDKQ#T0P~Q?fj_3>=Mx1$5Q4(wmH^1Gw7EOJBj^+D=p9Heo{8HyI2Bx=rX(zaZp**^ngLM`nHH5js+ zaaP-DNopkPdMb5c<(Lzi(cda{p-G8a=>$zh0? zG5eL!8Bl^FsGO>!p_u#&vU*h5~0kt{(@ zVW$GDZdZO)h!TOS*17Vpl)7){bL+X+G2pst@KS-wm5UWDAX1`SN)DDX<&6@lQgC!I zsRZ}D1_4%=Yu)L*dhX~P0xi9(RD)9OU0EMu=*L@7cCNIIFjF7C&1T&4;656dSuy*#~GaSZR?C z*J{@YrW)z*m2G29z-o@5rzcuk6c%$h-Hr^L-6h!VB^V6_I2?@(N$0OzRj3v?D8SNo z4B{J%HoR&DZO?&6EQ`L7*J`0^C%x2Ljbc{tC$vXOs%;?CAR1 za{<5GIp{3Il-5|NsJF`P`&ckm+>6%BJLBbvw~^IhRJFa&IuLChu|xVU{PhnJ0K*ZkKx`op_; zZup#lJ>N+CmWykraBAgb&TXAT!0B})+&W2+$8LPjZ6iJG5(Tl>smcgMT@*n{vJYh` z{xlYZ(~uYHT3?#%OI+Y-&hGz-4NE5R!`v@e_T6MQF8h`}8F3eq!U&=`0p_%>3jvK7I3f-g)H- z-g%$sfJMx{+7p;8d@WN!`M*|@YIM01Z=F_i+NEm;T5r zV;|%BCr9$?A4l=>6A$yk$cLEy(OaC|wHl9o>)F3*9@~GI$*Q@ZGke0{nfBhFm_Gh5 zeDv3+`Nwl3dHtD@y#3Nx-hSm-{`}07JpK41j2R_(d+G^Zefe2e<@R^q7{|OB6WG4? zTMlhqz;(9`0<0s1Uf)Hi_bvg~HZGoAijUV8u6b-?@2s7 z@8jH!b6mOQLqK>mw;~d`8<$69N+lsl72Hj%BtEx|gggzI6(-V4bkwyC&}#a(D}q&6 zD?ey~RYkLtoN77ns=6s}8z#4*m)If`_X^A;l=l#lVZbA}l6_~AnZD>8kBytfi?1(Z z_L9rI@xf+Z``0?YS#p+%a}M&{-@akOlx-Z7k$6~A2kFIqf%U6CtI_$>?yMGbNwI(1e4pc+Sr=W%$|HLhf9s@Oj2<+&&uT-2bbavM3oN` zBWO)1w-T9CLwH&m`IVV+pe&%c;3oMgN3l1#(N(9EvzU#sV7@Dg)si`lp1Q>XEoEw) zZ|QEBE9m@G;G-m}&&FIS5UrcSVCw>$DnVJ{G*_*x&dNEi@(W6yl@igaPx%;!+_%Yn zmE2ZJH8`7>y7IIXbPZ_!Q&?2Nl+u*w)qhJhDEU{)6@&_+>Qg_%AkZ>bev7VTmS9Sm zYjUbQ{u}P=H(;wih&1nm1&D}tA;Xc%5=TKD! zk(U)jRYeLNrbf~-lE})*MXhclIWCP%X*F5dG^EC%ioHcupgW~kccBeF!Y}cc=np!M z_Uu}0H+N#bwgJ13Y`+VF!pjS>_$_BBdIwDxzeRm<9>a0_aYpPycYP%$@68x)>_q2( zjP~%$R0Ll~osulut4+3f9jyr}oLMIN67&@A2qa?a9@4%!K*{`L6i(bm^=I2rP2Pw8 zhl^A$Iz`x&&4eyEK+(xyTJ9ELNUq0LprgCoj6SoC;-CTwe6ujdt1;fLAZt$$ZQez6 zMyM&24p(VPxdtW4U1`xhv_{IyI;fUGR<&`6eCf=UI#{;^H(6Z+1U4865O^u41<&Qw z3$~QoT^vg+ab0B?u9fd@zKV+lYJBQC@F;8LSbC+a{*?kOC7Pw28dR|eq@N2f=RjI+Vv{^t`k9S22H_MxGOUx&@ zK}Tli0Nw>P_?FZX)2=5`-$91aMxLPym16)^M<)$!?N}Wh3=bQ*Kh!{PcP?F>3D~XC z*ln_HoXK?A(*#+00+9kt9R*meHPWG5vD)-BwYFij^djRYisXGRUHLOJ=N-abK)ZTN zAb#-dzrf^ zkagRV_;T6>-X6b?WvfG2za@$fKHJaxpX^}iiYu&Kca6^`Zj#$4$t%%QqwXTFxE-?t zwB|t?G*;LAi55YPxwl_#D-$m}1XuU5I{Rq1*l07Fv2^udvUQSbTl3bOKX_NB|Z1X-rY{>;MZNTk=E z!7oF#3_6@AISRObmMOR*M)p-;SoRZ!J0(CSO|qCe#deaV8vTl;Cc zxgC3i45Whg&~kZ$44BsQz)xR|W#jA@aa%JUpM8@FJ~We%gLClNG7XPaUvYEiclaIr zi7R`T;JNh#-GXWbU?`I8xOnWQ`CNVt8Dm;g5c;~vf*T+YD_bJ(y*-s_Bane+Mo<*Rqc@%~?* z;O|e5;I*em^X?1J^YX}1Jp14yJSWI{MPT*92!YLmkMi8Z0;Ew7^U{+K^Verb@W#ud zc=MGLqOY;X78$oywfg{>jwwud#XI z47^Y5lC}en{Tn#HXB}G?&tTD4n@oQDPmF(Q3~xO*ir1eV$v<8g%`-|O{zJbL zWR2!e&yD5XH~+$SvnR7?-V~P3pU9dYzGn0C8EpP(67G8!6Lx(scf7U{a(z27frq$y zcD3MjJN`HKxPZ0)!1pZQFon5-tc9zmv*YMGPJ5l=!VNDxZu;WkbBDmFOcHV%2#P6| zR!T;>NjmWck_vSc*E^_f9r)h}R;3(p%NiZ7T&#-LKB~3DE?^Zj_K{d>CBDQ)e3^s0 z0<6#s3qM~i=GA|$ufOBq$Uw<$|HAyC>xS^6RxO6Jp8Jd^6=52=fM7tOu*F{gitxo8@7 znIBP;_AiWuUt!3dOiR`b8nWbn3b&!II!|SVd}j(mNG(hvx3*lsl0kTGDoHhh%+dsI zBwrKM93!CcG^u)jLTb*sfE7`HhVTY=0!xo^J#!y7bB__N^5#~dCtk^C$kZj1uFd9r z(k-qPr4rg&Pe^+!o>g_6DHB{(=s2(L<#4%;^(jppEVJR>G{6=mdX?VH(W*{P)OWHY zw~>u86k0sn*w?&X$ISW`xILlyV|^n8j_Jp*&~mol0gPjAy)SGfiSQ%ZRT z1yIVog9^BmDR%Tl)BltBp8gq4nP0dHl@97wqRAKhC}kUTYg|#OrtC?sTr34tO74|% zicrbN8fshaD&NqU@ueX76W82@LzYdhNTjJ$iFPd!Q2l_uY%!Jb(=k=A6TodlpFIy< z+Dv+Dl~bMTXh@h#MeKYu#d|Q+UZAbUkEYs4S{hTaSsN&?EFdQ@kESLax!GkTM5mAw zpNc9gi^ik~if^5z;M#s_ZtTJ6cZe?UgXqp~pwoXJzeFBm;O=fJk4&Y*`$w$4%dq(^ zqxr%&Xs<40C~7CYq1(`1`H7BeYw3~y=?QVi5avns?W?q=r^@GUrY&EMwZuYKju}(9 zipXW>x$@yE>Na1aRa)`XU6g;Yfx1rxZ?lh+{^eF~f4q{&`3ERHaR<$vd<=3 zX3`~(8;H|j6=YQ%2*ziJ4CbwAZx5}~IsgvhTBJV;X8CPVR-6F0QRo<1;gICI0Tyqs(5J;WPts}V2 z&gIHB_Qn_DA&*h6CRA!)otD2V$Wo@zxhmi~nO!S?Zza)*fa^c?uo1DrEMccI`_cxO48dp zXzl;E9IuR&H?*KP>FMs#($}lPVa>&6O2*O=i`5h@;7So>WiilSNN-OGR!adr&IY== zThQs%bXZJi41yJd6TM}S(0j#f+v_Jl^5LtQhh4y$`ptFLZcPzj-Qe%<9AMJS>r9&A z&De44dE>nk?ATw-jsu0RsdJR-?n(jDO}o+ECv6a5t~Nv!2Dv zudrhM4aQGcCE!}dInOvst2?Qvw_~*Z8&lV>H0e80>69z?`dv}1`gR>{W}9p7LW84= z76F%XwV}<~?}}!r1fY%5?A?8sT1+^qO>`D$FhrH2x|T@TPsb_Qb(5aU8Ll!7MawVJ zv@-ywe6Q5|gDF_;LDP}Dw75r+w#$>aT^A_v48@#W!Em#MLA?#TMvuNtFlEr-80eIGQsX!<%_z*0GysWvrW7X1bj9 z3=LP&+n-BMUk28$M9j8my1V1(>q=m-JB{JqV)`A`^xB#k?6S~nmd~eYL0g_J$H8>@ z&EAp$x;MRP*Re*O!xC`;d$foAuFjzgl);nVezg93Pv%ozKjke>-0SaF`pL83Sl&XaFA6 z3vj*xfY{s#3J(gq6BI0>C@Tk0(>JpO0s;R^=W_sq|9Jl&(bCe<{zpJMI$An0#AB=A$;b;0(NDlB+Lbr=Z87n|yv7Yew=X9C1dAKmD=AM< z#R$*<{`VpO_bGt?!~m$TO>I=6j^{rD(9_b>0jZ;*HUc>5M7iXFdi304ww^o+`XLFp zat3HKuU+5P__m_gPrmb6zy&HNEhil(01h}Kmbg| zz;u;=zxLh@`FAp04$Yv%Fyaa7WA)&laF3U58J`CiEkm}qQ$*0;alZ;% zShDdaa2OumzA+>Hy>2q%a#=Zz`=~~wyUq7{F3asqT_GZVbq9Idi-Uf=@WMqz{6{Ss zGUafUvp$$%%?EKq^c4-Ap!|?PdXeH1l&y)7oIpCgu$Ro;sos?bOlCnp#Kd^V?720C zXV98z?j~X2?Piysgn6yG+!SAI%Ls*A|64uE7v>H0WRe=|*&R!uzN+Bk%=f{?;v=ows-XoTa9UYYOVoePNlbP z_pFur_Sta}`MF7GxpeR&4^BaYI(3mKDE4SSCR+DaTBxv{hk0c*vjG1`)zuZqhcZGn zxwrCDe?ZotN^t_Ebamo!GfyM`l?(W}kL+~V!#zDAGPmz<+r;OOWrTa-bJe~gA6{=7 zIwa(HL<--*UpE|}*n54t*ytQnIjMe)5z zg9kq9Tj?Oq!=ALo^FaWQ-eNaOu!{UnOMlAWHe1DDv18Wm78ZAvKa_b2n3IA-Nc)$4 z(`@8K-smj}6(wLw-vkbqvFRO>n>>vzgx@Bbddl$PZsGcb*ewaim(ZsB3LMb*hO8`% zRyc1x(-Yp&fv`+?khXNdD2_v1+yyK#!Q$6s>syhs=i5)q#94otl(0J;&HJU80O(Me zNZxP{I(;t)RW!@;sx(%by`=_9INAQv`Y`#5Sbx%l7T~%pbq!5EmoMy`P;?FO@p3NJ z@@r~#RBPlfYv3ckzNljy8Zu$ESXW>DB{W|odQfq!_cWq+Q_#_Jt7v#fk4|gpXuaUi z6y2^U=esdG6>oJB@BJ^Fp$aXueJpYa;=y{^k^~)r$e9O{OkWfX-=w|s%g6IfzY@bE zRNQ);>VQnt9FDb6%sNJ$9j-p*oM%Nxat|r3kz`DXyGJWALprpU`Y&LEP22U53-mi- zrP_+_wM#C!jGlXP6#^z&TWlynb=Au^HY6oP-07mUmQ>`B@uZ%Ste5pZHIGy}MdTq% z;Rd)mj)=}Y`8x3Z&Up?R{J+%>=5%EjXzDw%=}ba*Fr7BwXSk=0*;N1623`BjOX9Kf zS3J)Qii0(rb{vH36@E$Y>(6?2ieCos(?Q}s)^}feooZQx>v?)1;a6HSu7%AxLwyN5 zwC29)v^=OR*mp2;^^t68iYL_t09 zSnoODy`ZfPcSnulm2$E?dQA}M@Q9P~>7E%pua^NyUG84j!&xclJ#t;>#LW+s5s z!=u&jMy9X#B#iq~X2)xxLwMnK)AqHy1K9aIskv|Tc6#vkQw<-~+ahm~eMN3{uta#<*J1VD0r0jla zq1RtXwfSh$-b;qJUsuoOR+-gxWdDHP>R2y#tkc-M15;4b)rr~Oo|MX(a^&%k=`JAH z6zM`vaVO!Q%J@~3D^$M^FLA}k#FSK9|9hqg$@#6I#r;vWqk=~>#UzJFiumTNg+ODf zB_7G>4@ohJi8Nf<`gjuYi6+@w?5_DEb50XhsaI1?Z&QEimYZoUvFU_O1~V&3N&EO} zCQAc*Wlc#Xa6w7I`FkRPzCL!<0iAt=prO@24put96Sg8%nD3LYTl)&rYaz5vCa?m} z4i1zgdHaisrB&hMaJ$O*_gPrF1_fLz!?7ZR(f7A>9a}U<_%XZRk^=i8d>$rybeA*G zIiymxF$eyYcm$$*eu1QKs_XcGxJ^l_jD)S2r@Z^UvM%ziUUyIng9=1vyFS zYX0dU$w6VwNfBY8-|gFH!;(r_ChLUnGzE1wRPfW*=MqjS8oyt8(jFJ)>jrqgW6;G5 z!u>MLFHY}Zv_GTm=PZ!l6#B)oGq z2e2j(6lU;I*uygepaYkz9Ev!4#Hgvre-wpti2D(>i??!hH2y7>ob+H>s$;h}i%2(h*>wW(cjkv4J;($n9j;18@|O%)@ajTi=%Eer$9^HxpJ6t`)=9f3 z5z4oUjZ6#WoO2fwW8yTdG&`2loeR}w9jCbB;P0&(?Lt*pFC|+Jg!Br9O>>&guU4>* zzHE;(NuoS68)6Xkx@-x7iRa2CA~Ojs#8Y@*e@E}XW!K6Sv&b1-@|410oi{Tl+P6KO&}dPDmS zKEH&?N!6*!xfh1AbgqqL8H`dN3>mle3l0H=xd2Vm(WVRvw1=zCMai<8K91T(vk8Op z!%GQAYlK8aP6uDgQ*~HsPD3 zd(j26F;d8s-I?dk;THWIrFPmyq3W<;V+FvfL&&9UCqc_k_uk?%U8RBo;zFZw>vN`8AX5BNRR#l$|w*kDkFHq?Z_Z}n?R ze^~$1%y^Ye(eIA&^d@6gL7iMPof_T3zAC#zW2JL~e)fmhUEuiqL)AANnedL))aDQt zflkPbzmsF8*{>9UDg$myaW@lLK;Q}Z(0h^=7>NqJJN~%)<(=v8sf^yQ(gfguEb_l~ zS!*~W{FcGa?G;A$b3p9^Ze9Gr<^s#~(9eJ?L%Z=NB{}b)N#n)AaK}pqKiuki_};Zw z(ZiZqF4H_Y2Q;CU$KBza%Aqe4j?-`4oP6fcSRb0Av(Q4zt^KG>NKh$Gpq-7&21|0zzsA*r!lDA=^UmZ1leIwmDZEmd2= zJ`V4B_lMmZFxI5KCDhT1+;QbOK+JOOZ}2AV6(JMX!-Rzo^q1?3nRtqQtkeQgfAQv) zgZ9QGyWXa!@08CBGt}|A3F@hmw!U>ne_urWnhLzz?d`NLoiiUY%BBZ~Ysj`kFaYeHT(I2FFHy6-6p}FB z&Z0b>sr;fNl-oGg(0&(dbhF3oYF7cZkFwhCXwJlh_W&N_YiRO#?==HjjgC@P> z09Cho>k4=6^k)yOZBDjC!Ck1%bwY6nDISl>QEY!*&Toego}QGEo9{3kRm=s!=l-ps zXF8ZSt4l|GJRF35^+uy0r+fB2ABKDQ1-|6Ke1_>VE6I4`^TF3^AzqeDG<9BT368+x zB?u?p=*E1366~7Y#1(eBI!PU^u*tku`%b6+MDu?N27`ohYt1lRDJBp6l^E2aX^{%a z360Z!hayBseA26!su_`-zv1UvuG{`1?}qg9J;Fy}68mvP-nHy{r2GutD_r*O^;;7W zbmxGoZk}DpIlv+j12_lF_6ikfCEsa$gs(@FMgqNhGrjfqgI9Kiahi;KU^CZ#*G)1Fu*-^>++h%Wmr^iQuHkR8iG~2Kb zq9-S>qR#=c$RNpKpIa_5o`F?RyW__?PXw-F#y4Y)zy1bSI1pM89|H-!&_R^&``d_^ zTVmL67M}phE^m(>s#x0LnMzAJUKmpzqv(>E8UEzPJ0{zxuwJT2V z+We=5276?S=7Kc?1*CrB;Abx~bfWnW*HOcxPV?u0zk6u|ppERngFnKx8dFSHG5hWR zDk9&o=oBJ^;~h{h|L#~jD@GM#r+k_hTkBYTmAfm$ z!dc+OD|jqO2w7q++}GdtE#BmJ zVWn~hk%kep#UE2<9Qi^g44}C3vR`0?@LqQP@7L;x?RdLEWNzcjF2*lyS>7k9Fm&8P ztQo0v(vHG~uhk)}Px0 zc9Vu>rxh$fOxyGm4A^}{MSU?puAGC85frhRHdL#O9}nwzkwOH-q~ z|1wC=G1eFjROM67C`Kg=m!ZD#{HZXQ%CbArz;W4pOX1zZ=efbdiR$US=BOqjP5wpC zBasN~eyYF|g(RT7=!=1>#LKo|ri=c>2XV+2Day}_WThLj36(n4$H}$tA4;Vb9H{&j z#;U2Uwj8P*g%Uf~vvI9VQ%cC2#aC`lJrHlIgoqbF=S|SrQ21QmTJ$N%#$5>cK%#XL=qTTvHp7-1ky{ z=`0S#V|6OswaLLNnYVEWr*ps`{j#Gqx{rf{bD<*zNX(bFKCcE4VDQkO4ydG!cMo<* z^GX$ZjT2{&)Jon?S+-unq$PxVaXggsY3GKvJ@6{JD`mAEj{;&rbqt`ok&4bI8-&_c z6B7H@Zpn+=x%jXgJ!M-4-27}+L8$$TpECLEd^_!PF0GSLlMD6HrF`se` zK3^l&@`=|)Tn~|IZvbP}@g*7)q4%;P6fZoiN;IjD{iJXD!a&QrSjn;j61Q{?h|_Ql zY*Mdl*2KA2j-}Urx&zVl*z7}W!q=w@=vm^I;Za&Yo|QS>+$WtfxZ)_W*GB9D%ieS+ z{k2G)3b0YLUD`*u8d-%stYd$hun8fWU;x8zbP#AvqE z=}x;I6{r%-<|UY$BqB2jxg6h(oIh-U2f;hHKyXaG+@#y!Zzx}ys@Dz1N3Rw&-=j9B z-??NT zw~nkWz9Xa{9TC3Zh3<+?T`nmu!KZiJuK?F%#d=Atzm|-1JAj&fnR#Lj%$8m`8SjZ$t+{<~G@-6@R#RW6y03BUUe-tqY!$``MFKJ;tkHs%Y(e>TZOeo?(dUs=T>F?nRQm80bCu@S%eWG5sef& z;_EOwR9ykw&2le+`f(3K&H)!5D4RuKer^1q{5`V|@NnL=TI;UOaS%_e#ceS-zmZS0 zb{g0!<>s0|&{3_;rw4}^y{4Lm6O&cYW0v@D z`y!A9H%o$oMxr+zfaXYj3+ePc0KcTys^o@w;g#r;ap9->+|j63cvW8QFxji-v(haW zLa#6hL>OA>UV9$vBe8j4rIcUk0WCeif41sxq6 z^Xmg#7kYTScKB(tG9nRB;%V0};umxQKtVMVCjF|vY=@DA-CslQOMF+Y+f~0%nH$Y! z)(eSSP)J$}T?q_g3hjL%lN^!~$W9hspBhLKC+u{-2(@Tvu5hwA8dapY$*Xqie-)KM zI4)0@f?xpk){}Pjp9%J^M{|M4C^zUh2}Rvr5mM~$HTxcX~Js~ zo-eIeTlr*`ewt}f`^Rr;l3=EWJ0Og)2Si2hYB4JBXb)X~sb8e^j-~bkkE-vs>S|C> zBw3K{rx%}*Oy;L%nWhAj;;6GO-Acpp+hw<(@>gDicI>vF!YHWK$%Ov#HQRLScR!RY zUf76H0TF=5%VShreyk?c&QI}yWZ#gkZN3U%D~h_|$qv$&7T7f3u9JC@319@4gi&I3 z`r@xzs^HqPVHkBayyG=#y+=bl^{Sn}MB>!R|5o_Ek_dn)j9bgK^h!3kI^H0DAHwpz zFcA^KR*nf)-yIC2ip6MUt8!VCe-_5Q{irTPg0n@NuEM>ub< zZ)f>gQ`X5n2_1{|9@0}YPG2l%?HyzFS5ovI(^~MMkKBF)Q)2~EctMPE#1gW&^YD-P z$b+Va=PMCy?*{Kn>N|WeVoAB#G{km&cr%inuZ>QiNK4&b~vAgx|aDoJwnMw3IFzoC7*O4h<>~-n!?wN$^U% za?R0)ta;E%vM^_hy&5KgL zJGYuyU)zZ*Pn!le(>BGi{fgP!{KHtMVxy6wr*)#(#rR!7J>cZMNQ8aCXz~bwyLOi6 zB=xJEm|YnDXwUl_(`adrW}Ng_rD)|1US)8ZqS=Mq+-nvLHf+B?JbVh!p3gdgSWEW5 zx>Dvd$*me%{xWhV!pwa5-|eCr|E}bq$9H66CqKw%e5%*a2xS~JDW+)jodawFN+}BP zgX7QO5Fp%nYUv;X+k|}GDqHcx4PS2R9d7FX@_89m7r)&g3b93LM;`@~gMJJm0l#S1 zZLXTEmb_p1muI&*`fKK;x&7k0Szk<2@94#N?T)!|_s86CR3*ZsR>e#5CX6N>fj57Y zJI2dA&;$si*8nJskl>i8HR4B?&!nVpxy)w;ap*_&2gVJM#X4Zq=cL_CGeJi=r|f~F z)mxXI(OsYHu0jYkHQx4v-M=cprvn$Ft^ys6?^uC!ZrJAI@@rdDGop!NJ9y~QrchCU zaE3|gVxy-}n4S(-^;U`BH}1a-;u)DZ69aF)J~q-n#&i)1|B|>|CRG*7xEUrt{M> zcLK_l4kjDN;suT?0?mf#U&S$?PWOeUM75mkuwa6L;}e|kODR7o$Ag#{;t6$sdZT=S z(DU{AL4$U3kKk2vbbIUx|2o&}p;5Y84^v1KRQ+(x-S*P`VP4+*bkpUV;*;D*JcS%z zRhkaEf$x$ocoIyD9>i?$}SJ4J|Zso)vfWV*1%}_Fj07{G1SNljWM&4!bYG^gP47E(LHL&^ed2 z6C5_9;gZ}+=XKZJxK7VAt&;b7hPSXW$J62?JQi<|?q8AXb+wxQzBTNIX^P&^gWa(~ z4Uj;jMCjE!`gZ9pPa&!GfLRy!yP(}tIC+{^*;rKC$(SU+I*ymYzV62+PB09GsvX3g zmUJ&OGQkKl*Pmqc%ca)4=u)FN_AzoD`qsRRb%)YajV*^9UL!MX6H1506l5>{kVzd_ zjW;IcNvi3<4+XC;pH=Y=*jtvB!2B-Q*|72JE+NRmi%mxrwexpuih5?(1S`>6Nop4= zH_19kkhoC!5rA#U9XYOZlRoWgXHX#Hm@etW%3`b>sv`!3FIf&tBWjViI>ifc1j`P`XGH5f~jS@ZbXvA2&jw@XlhU>AIa!lOaGC#OWUqqDB%NQuki$uI20ltOB%--Y0X z_o>O8cjOP^H|TS2i+l%-f*kxnomRYGO6k@#^LcG!O0?MLn*161OEfwKyfYd3?G%$R zVHk{p_NZ^Gh-`gtOHiP-kOqAMI=?pHQ$0WL8{!=zvGng`Fc)%PMbtT~oqzQ2afCwe zz7##PqA@58Q1zG$Kfu)YIx;ac1R!)9Gw0c{B^s&rE5lnr|2uQBTF<(TwW74;5L3dk zj|drDR)%QBwwSQP^+bVML1mC!SIcpFhsP2EAXHDUUzz?yp!XB)WdIhB(O3~oShVBQ zcO20mW?Gxh{6pJkG%In+b1pkxOz&lfFaBFq5p!B06pD7bJds9|!b;*b$~AOJiP|z6 zyI2DMs$OUBjGbcUMdqf%?Jf4=@Y4&c&qWkPH_@hvq4d3|UW4WW^)gmCWwa7nJUnW70IxzK7cp@`_yY%LWQF6kMtNlfk= ziI6%QSa{QBiXNYAe&9g=#)vaARJ|7{G6v4@}-rT2;7XuJ05@DM-CLZ`#iU- zHd*r(>2i4B1n2kr*mP1fuh)S zCL+N;3G0tOguf>pt>%5&Nu4=0)RBcRYx)JmB95Ec0_Mk0^rxLXm>aoynrce;o#UgH z5b%;Wzjod}n&UjH9EBkCks!0byDuD1R+R|rfdJD?lqC`VE(*Sg>2Vc`rTo-7&1Y0S ztI8F;W!ZVjV$a{==PTy;CHX@T4!5H*gH=&crmbK*#&~uEb*1`K>)9MVnR#1_GeD!L zpVRk|3F(*Qb5fTI6|R1Lc^BsPO}>F$4Uf^$ipaqqq)1`gHJ$n=6#soKne^uCy^<^Z zYnF|1dkib+Yh>S5>_}C@8yi-0dCY}A&S0IsR~e{RRCoGLqwGU+1n0BSN_;z}1jqc~ zs(yJWCk!S>B&d%a?O91p3_1nHnu*d^M!^#I0w^C4y^KF}VoG8n5E1$zR${+Bx~uzY z-70%Ny1eXfkhaS=b~O%G<+XpyzKD0ZF+3=Mg*TiG4zgUusg`!O`Ew1q?Vs_=WP%L? zj0Cu%={y6_Bo9tvJ$ou~`Aur@rZC@IKReM(0p?qh3#A|sjuf^1^<^>JS84Sgv-*4T z)-nmOeN-YOjQoH9x``JPF(sXJ^qw8B%+mJHOSQf_VCTsG{u!wjo9RXXXwX+F|=&(znB({Wl-p8_9kiXB&@?1e%z&W zg_a2#0u$cXR*|{pf zpgSH{dPG59MHei}xtSJOznA!(-F>Z?oU;9e*%%v~$(F1kb&U@w-DMTZLJyxSDQ?;q zYx8aS_;&;%cLs&B9*tSm2NGM@-+nRX*qXDaQzvUfvmPcDJpu6xP(zFFW9F>- z^$!)DLOOGB2{3X3OW0$zVKfH*C!FuB*Y1r3Yt(|OVjE2>ekpFT zz2s@2Sf;g|V$Ec(_Tc^-eK~zo2J5ji4Uz~Rw5=K|+s61D)~w(sl1CP%8eI98UVx=b zv@zrCUmxXjRIpi>3TbHFXh@||M77DJr^u9TfZ+!x`*^Nr;BV6j`B7!|olF-hg%oky zlv3?$!>TrjAV;w@nCz&FjmQ1IQFc>?wXj-2i=X2>v}-!iB<$artK9Za&uaQ}puj6E zR8R)lgM6xEE0Z?YBT}X+a&#J=RyYsi3NS65aQnRa1iYlGHng(E{rF*_T%s}VK^Fmj zfT-BpXS)9_qfJ>?Auz%ngWmM@6szwDk3{b-QC?Dz#<%X|b!C~pSciFYX>sppM14i# zl7m75Ai?mU#im4-oOD%nHgzH?+^n%+3dwy{IYb~Olu?Uvc-B>Icesce)VvUp(C(@~rf;+~J*lG!{L5hk(S(XUD zeQiV5VaSE(u~T))CL5{Vc%fTd5si93?;H%)3M4JR57`8Ulu|S%@;lSgLbt~goyV0} zG6xhJ%-4(!Lu%pb=YVqJ$^OTWv^2Q*HUBoH{N$Vl+eZ~K7w73V`H$Apo=zL8yQFr$ zbx;Ba7$PQD`_ygX>He~me%l|yf|VG4(*NBQ^;~DS_CZ;?&eAOK3TC zg~4j;qa#}ZlK;{QFMblaQ8%s>*SB$>uhAL0amLlZ^&yAydm*CLbKICF*N=XlxDywr z@%?eX#XVh%&&yo?ZtL3qsbBhdoxN;3On5^c6Ehe9Ak528>~@A+K~uQE8I1iSFZ1*c zn-@!A7!W4vl&$nPJs+?htiPu3Y6?sP&?msbr*S4{$R&J70r)t}#8<7CR zB4eSOv>0mG!Q?ivX`i9XPsLPsCqhLw`EEUCs+6WvG=H~Mq2)(8T3b{&Ne#)r=~`f2 z-N+v-XQn1uqM)JG54mup?6PB0o4f-OGWmmaLbXA8p6oXO(0_z+@AryPi^ptvWPw^* z3_3^oOSJ?&KoaPefd5PBu`m6lZ8MbaixRtwO23J2?e;4`7%(T9U)x{6}Zlw?+ z460=9j!#?B+;{Y!eY zDA(P-UqXq99p@4aBLo#S>h&pUq5Yt|bRW8VsLj<6c1;X0dZ{E*0GLBFh&R7N;Y)BO zXI4_igTJ1vFJ#x2*B;AsdNeVq9i9VPr-b>n!K)c;zjH1}vgBSGeea}tD6}KOG1|vV4~)h_ zh`9b z$W+@w&D7U*{EX*PkSNe~K%>c-p>MY|>6f#fg!|P>4fU7oqGmZN@ zhD1uI4=D>4U%sSSrpfu4(aDQ<5U430L50eTxAOH67aBFob-;Uzvd40>I4@71K;GbC z1t!qg-x;@i9k!2~p?f7B-2}%WH3+dT81_9OzR|k!k+?DXGGm-Bb*2^10YYsd)5bnp zzE7+w%QgufF_h>vVHrVw`Gcg-)s7j$#sdVG4iN|{XlKoYgTbb!(b8)Y@#Dv{x>6m3 zQb}ro5!~9ZBaNwu*}NcitaD3`xO=$%jIUwd=@t$qb3IviWVj@1E| zF94Cr&P;VU#WQQ1JeDGk(Ht~{v9V1J8r|hxQPHKS@H3BJm=&)aq&m9NkQcM6Jn%mX{WzwU3^ounjU3zjt-)TwOZ#$jI zggVHdf4MH1ddy{eM2Y!3$(!T{%V3c}x*_7}aW!NwlC3G3e*)+t_(y*d8)B>Cj3_43 zL4}t~+OSuHd_lkfCWwLoeY9_`^S$0qr<@`h>oLjgqwVQZHM&s$s%|d-uSyG+FYwo0 zD?+9X3lfqrx_v;@52UMX`YRvWZFFJgpdd^!sc!@zk5PcXL6&=tXZ>ccV@xwG$hkM7 zRT`tm#rn^s{P1+@AorJGP7#nTwv)r_M$I{OO2{j!_ zI;5Pba!#=r1drbaPvzy%g)%?F3u~30kZYd_nuxuzNHMSp6fJOlm=J(|_Li0b^>cEk z$6@)QvdSLsMWjrNp$aiD8d^fF?gl~F^e!r&guW{WJgeakl7x3szO8p8%ok=@TJ&cr zCBL|ys7TJxiWWXdm;v@owPYYO>kWUD#RCL%4*03+m11|refVPix&4 zi$k)4oWtVXXJ&nXXuP&&Eho%&@cI>$OMK9sMReTaT-{7(rAmc!PCjlW#o|JuNw=`t zTH43?%uLOq!sQ~F`oAX$8E|+fxv5Gr)F?q!hhpjWd&Jt8an#|DhaSB66ykS=EHK(v zRwz9mBBI%OPngiAF}du#dgWf zs)~Z7&pOuQr8S;B{ry%LK9Mf?c$H~H^)zfLLgnE;I~|?v0rSO)TnRP)!055kb3mje ze+D)*Y6ravF)e9ZF(kV{L#Q>dBhn>~a5y>~tr3aI&It`DEe0{$f& z;$7u!S`gkeT}c&VoOv@5-!V+^74{>xthE!XZ`{7`xEV}XIR$ig^meKKsXp|K)WNNM zw*trFS)|p>975iu^oYS_lLOL#i7pFz7`1jyE@P8YDz7Z|6J4_=_@r+^GNVOo#yW#h zD^Be+jQG6f3;OG%&tsdE0N8Q3ZHeABGB%$2p)DRId+|iF$zSI*&P49iLB*|1?T6ON z(;3U#^%*8I@W&y~$#g-4vtwgBP4tpO?<99s((FZ6GNkmW2SqtP1Ek#(*^v? z$w_}hU={(a&?~>ZWoEleRoNFhCIi8;1@GG;wp{QNB98AWA3$C=e5+eHtGjAu>yu{v z&U&rlLdxzAy8-*)+t4>?DfWq1ldL0%JF2RKS67e`okar4t$~a*y&rjo@L(YOU;M3S z&JSoZtp0hI4hKBB^#?5ZdUB#-Frj*t7XdmVRh;0}e zf_bySe@>8=p3jEzX$X$qy}ouZan*RNswy%tsj=Qb6nCX}0-X4)CJgPtMx%BlEEtX2 z7lvSKC=&Cxb*u<^jjCPU=KVnj*J@UqTC$%;tfYM^clfK%ruz|~2Otv#Z+tk!Nbnpd zocc2Kb$ZWL=LEH3s_J-q((Hp_r*%TnNBp~A|9NYeU4n+(?l`V~W@Yev<+}2(KAsQ5 z>27bOYqOFt3_n_r)V{Ho8=rkh&a$ij>M@PSg5R?kl0epc^{ooOEgQE`L4TbJ%UNw=gG zwJ{`ovlq3&-?cF%g^iI=hpUe#D>kqA?)lanrI46p;_}pFLYs^k)V8BsY!H`-6~|J*3f#`J#Q>Td0ci8>9T^k{hhAoDa9C@261m7aRq^^JHY z;0dSEWNPIJ8>J8Pf_5mm*=$YW7`?)vzomeJm(EoMft${HeRokZe z^U|HO;~QRn2E;^?&!v7Jj>~p6tXX88DB3a(H2@$!p|r?6%t_KF5=hh ztd$m(AAHJ8lQ|bXNX_N2v z?I)@=>%SP$p9c+S+DqaXN(5xVJH1D^%U_p&Xv4+oU#8|EC$hs5L+TD4SNChN+-&$5^3C zd_%oqV`j-0MoDULGjf8czFN=W!hpUwW%`3Y!{WM6Fb?8(60c9*3D&UEGBT!SS-EUH zX&}v9njCd)C$He(v&Y((J*TX5^*TAJHLRSuR#w&tL^YU4_|9oWqKJ`y67$lzu`xG)WsQunjReOM=bkP z7BC7p5WP{;xm;WcUDDhDmV#mDfRA$H7(nx3p5{ ziH5n?`uB|+AL0hZeICr#*CY%P)rH`3t%%);!>o=mqTHoNS^+JFKwn3ew=Do`C*+UNwLEbB!n7mtJRGGIN6!G7xkoH@3G-PLZ}G z+ws=R^1Sn0SM8n!+1+$V9|@E9HQx~WgPpQ+_T%SQC)U*z@_d=E-5%oeJvhlsXR#gc zRA!z{ezNuv(nQyC+Eu18(O@S^C(AK!1lvp#6c(x z_I4;U*MOdY%90-){N$!A4p6j3)G zolz0&n2})~vbkfWuC{)8D~sv()AxkOWbPJ!8ez{f^_IjtC!p8L@hHmk#;9LU-qUcS zUhDSKVF1M1)3*r9;vZO8G|kN?ic_;D5tEJJ2FHNe27c!+ObSoOL?AoH)<=mt8prRa zf>zf&TxC$%cijJVTxVcIv&^npHa{vkoeq(lcf^gF6l>AS?8?VaF1uDkSqmxx`}6<$Az_Rafy zPC`GiKJ1`OOBL-^_(n8{`+tfjL=K&W;sx^W!*y;4B{4u)?wx+Vx6SpwIYP5$+`4>W zW^cr_^?FBljq%^Q4O5B2_34u^NGiYmq~#b%9+VLi8ZuMy^N{J$ouH#noo^q<{d#xw zhwb!b!wAy4Uc2G>KlHeB<*ff*gL~z6j*cxq_Psut-VUz$7HS3gA!;n+ZnCX>Ew@U) zWweNDcGFWCjhjA}{IXX`%>lnM8m%hvk2|Nc><~oqB z8m{x!Ovwx>wDGJtJBYiXA5|Sha}vcvFnZ?z(V40w&DRS|zx|Bd$uh47YcBX3{cW-**vfkvR z7NmOBGkTT;n~Wt)=jOXgUO5>;<=$O0FacxPA4w%6uu%Nb{wE$2-)a7wNhl78!^7lv zUT%EHode+K0PV{}NypA|&T=_JbJk%!^}#SAZY(KllFxo#ie}p4{xvbrZ&##x`KYD4 zp<1UQ6S4ByfP0;L*}rnqhP|KYb0?6f5C;6y_Lt_Hk42}ZR&$6xOyafwc5Y4g9k9XQ zLaVIGwm)tLr9+IPO-UM;Eo@g{csGY#J$XY3XXni!9w&JKAF0#vYO@a?b%TVjeuOxJ$2G zpK{AYRzAn}i-=va7cHs&$iT4XvM#Q0hZXJ z4?)6$;SnCjm3O@yMScU_ffu3=N8@7l$8RAsdcKqprMYOBX+}e^rJ(2E=~)LIen~M& zw7#t$Y|hW#eZ)!z3U5&JAraj9uAbh;g+kXxk zrS)o_!TOos*N!NRWp#_x+9W*4(383D*DGkLppH5RIFrHU!Q217^s~He>P}3VXLEpL zhN86MQ0+IL3O%-qd7>mi+;FY)Kgq+TAI(vxuK-@USeERDW6nJQ6@n|rYCY!5Zwwl< zpT5@)16V{%Id=v6DdasJ$FFdnw3GM!$R`MLl>^{!?e(Gx$t$~%sF6tFw zAiz!l7XJ|=7rj0@9A6@GW4AEm=_DAnb7mZk*bA_4phXjkV%<)+_=|Ac#0E(WDx!DH zZ7|d+M=Z#nAC$>vjCxNV6AU==U@93pYnG_czx=1C;YQ{2oR5StPeQt*vDFgu7HEjD z4&6TM9}%B^m?eH^IjyJ5Zl?LB(LR^mQe~R49xxh`REoc8uw&&PpnI4;&ZXR0 z{lLkK_83C9W2Oe*Tf3g2+KK1bDD#o}`UIgT-Khn9D$rt0MODYLFi7=cgaDrh5t);& z>HMZN{CFm4apRf6Lva1suyf0HBCq#lv<>8xZg(aNtH~nUeQJ3%9iH^^lcepmQr9c( zXU5KLX}o~f+C_s3QkTyG+)%y)kN*MrKnA~OoMXv7SIzT6CBBE|R}YQK0G-vQ>Ji48 zLQknk=^T(v989se+;~; zjyL(aRLNUAjkIWWtv;}){MO}~ev-r{oW{j~<8TTCWwDaXPD$e*Iy_GtsL`<}mBY&0 zC!;Uk>3UAJeZ59C;Jkc(k-^D5XUl{+{!+((4dB} zmSU!B1(r73hIJnx<#y!pj1GG98^m#tbM^fH0Q#GqhaF1=YPt*)>K8QH_w<>hg<}>e z*JM)|V!;=9+Juvnfs#)rt*07UWl{>DvfwUT6#L8z6FP#0m6U8XqWi287uxxi?cg2d$K860`}=^1UTHsRzt zA}yob^Vig3(WSSi-P9ygB%g1lzof?`U`R@?<|BnAkMGpZcSftf=^e1CTM$}Wexx-U zI+RJLQ<=`$p^BuEFmHz}@(>6B`)~(2{kl}Z%+_dsAt9G1)5HzOv3B*z5H#J#EYf_k zm4u(}h?xHXt})h!at(7o;$ys@VIJxFB$nKYwIPZrrY|Ev$PNZhNXNf80Qn^2+^taBy*x@zzK3u?w>-xLk{=2mb)7-%MeJn@#S1+`Tl5B#{KS z2x}NDGZK@QP*jZYKMliSSK@fbM&rMT^=Cp@G@kO(HSg-sxR5GDra>&@Y&r3kQ?Z6J zP8jje$R|qqfy7tBK%Z;K~`-b>^>L&!E-jby;MMWynU`lN)0hBR)tO z9Q^e}@~m=gMQ1o-@M zgZuPfCHEDVc44y492P3@#1HBV#C&i;JfFu)^9P|F7Nku=HhqmHdlLl=R6n>6hT1%5 zoPVEBV2Thnx`rB%0u^eoU;$j2hBQpdtXmDVv0bWv zNhjbQIsN|t%c91hD#k*Sqyh$D5=PjY^OXnv(Ttpv^Z4p%nDtYgDu|n^yH6v_4%mS5 zK@LD-1`vVg2ge>c&mBK838S+KY3R~~Jc<@9Z_3BHf%x(9&&cPFpC^zMM#MF#h_AUx z)qPfQ*+9c+Q|ESb^PK+uO>*4h$eLN2mf~gmg@t`KY-A_uCpZc*6 zq(!=~cy^wibm6P(I!zzBooiPz*iy4fLG}U(0f#)C0l+6Db%FjVfk*`G%MTUknuAZO zQFoRYVZJsw5;;~-&&k>ufX)Y!ybOGt9;|$uY8^>&{6SV}W|9|Fw}7la-30#tiy$5k zj1oT|8S03)u>jGd41-M8r3uV&!ZJ`ELCNw!!9N`SM^$WX+~UIMM}tL3;XlkIq9Q)$ zNi)D7Bk}Nh96%eKHfT?)(9wNHj;yq-u)xHANIpE{&Hy9xo{uG6!;xE|BvjMYO2-G= zuH(x5AJ};K@(0IO`HDt_N3bW>No0B0tN08LxOvAI#((9~HPl=xN$v|JjiA8-$G|&7 z>Vl;MUKB8;2u;H zmN_Kz)I3ifk0O7hcW}d~PuSI&;Vlo}IvhvsBMqKTPBMAO!2ETjC_kA#U^et1R%g}V z17@2AiNGaeOA4h(zy*QNxRM4zz~jl|tjqI8%%LcMX@}CUZ4xh0PVtS)zXQi40rB8t zkIz>?RW}e4cM$3oY3cQ(hPqUzUo9zTR9NFBxyc~yB$9U!bJX)=0Bcgp*SIxXky?06 zb%eJ-+5lqFWBueH1>_v$c+NgJ7)sT*_c0t6HE}Gx&D(KnR{L!CMLPP+BSmKOTSAtfS1txp=YeQ25VRQ+-PIpDkMvHLA0yjf;B8>X*O-{{T$}=Rf;KIX}Nt z&*tOeK{4dvKP3Zrf}dTgVEw~rYZK05O}e7H+6QT`<|hiV2hv7u|3B7$rp#d$_|W zIRx-j6P|O9qPbPOI{ZsAS&H>)+T74Bb!GczEKFi@lFDo!72rENtT zJ#GPGQChQ&HeGC8)lhHrEXbaswljc01o6&s{+Z88Jb*ruWkv^K$%dfmuF+<%2B)fO zS|*_m8PrD5#XEQiCU6nl=Y&(A&qF7Wm;T`({{WBvA^Atpjhb1kR^L+6q!NJ1xx+FL z51tN5{A6{b=GuY`&QOh1Q7(Z))Sp#){=KcC7E`z}U)T;Y`woD_E1g-4xh!$&E4dZ@ zMzyNiJ%6*5x2DZ)C5Q$2J3tMQSXzyCM4sjjCq-?RHqC;QSLtf0QB#+yRL?@ zr+1%f==SSbGC`=764qU2GT9A03jUOgeZ0%Ph6gypjTrn8-~L6Me2NsNE}Qst?Ktb* zq1w8IKG@Q1L`1q`(XPJ%Cr z)%(_OrP_U|EJ_6%FhFg&<+iJs;6999ga()B-g+MGn-}DNSZd zw;u*f=Jw3letJ6ae_MIg@;E{Zs6ACqe|6%1+QM@l%hFM@kJW}9(YjP zuegFUz~t}%0F#U#QL<22?#82{QmQRYsG8`^L72=i34G`C!F@J;eS;o-i@SI*-i*uA>&UHyBsEimcwZs2M|h9XVorVST3@ zct0Zpsum14CKLk+YYAK=;>>od?h33}wig8C{0?$5e+R0P5ABm@-9coryq0*U79B$$ zP00#2oZ~!XVR^{#eh*6gM|&~W$kfZs;#^Hi$}_65G5bS)dC%>TKmFZHm%7wj`_1ay z)1JhTr=+eC5>_}2Q=Dh~KoAZI_~*w(NxgL&fFe3u6r_+$^2WuG1`bF$&N2_rBaC^+ zRmYgZ5;BH;J%ok^jbzTr0Lrhy{{R*Sc{%4J&srBTnVTJ(5TM2+O@*VKlx@NV8Sn;A z`JdmOyO8Qpbs#saixM)`fZ&287#ne%Z)1-b;~CCz`}Nr}vYkNSRlw6#nslB)46&6b z)H^n*2a-rWM~+A2eh*0gK|bNdjc6HE*Jkc$YR{CC{{V8E{7q_B_9E)`d){j@!=_xVZ%!`n>S{FQigwA$#6jAN$YKu%7*aAi zhCDt+*lm7gFBMTA+H_A=v@s;wo}QXgf~;b3EsZ{8uQX>r?BrqYZ`=d7{vAkpmes_?{WgiXREu`d^<$Q_PVttG zCOk5coxqTJI46uBKc1@j06$bzQ`C~Z4MHgy3Z$SR%z{m-q-P|KNBB<%;QVJDI%NAm zz}BI2$zD3rpqXkw7k*I{_YQJ$0R-UubLWpe2lCN$J;8-olLKoNV^IK_D@xNC+)cSd zl1bVK134HTen*}<&iVZgrK?vEKW+B=T1nSS)2b61ss)K4QpDwmj4lI{!1IiLq0dn& zyl#ccsCwP*2$gKCf|OI!)TFMARX)kYjD?hvz`0;X6aoJL?S4mGt9pQn7G&}26!d7d z8&m2K*M=}v1IGn4C5TH?dOhw^VFRk#y&j=T(vJrQ6Y4_3noXgbes+B>T|F?L!%`mMf+PYFhPb@1~wlUP~D6jhALUIc5Lmd1P2R}T4 zJegbzNXn~pWpcN3MP}L?LQN(;&sreStdYFCjvPxK<%}JK6lPpxmOcQ&o@0}{{(a4V zBz@s=Thv8#w{oMz?8PGy(>MblD|;Err~@In_{irSICcy5ZsKsGsV0jaqchC4`?Flt zh?V2nEl3g+;c`=AF69{Zu<*>o$l`c`C$s%$W>u{Sea&jNiBg?G zVYPAAUkk`{eC4y^P=1+Zj<%x9B)+M?5}re#-Nzo|k_bEu@Os>zS4JU9)YjW-lFR7U z(@HBc`mECce;Ysq@WKE*$5zFaIb``I;J8VK_kmQg((a5mEzR~h9zM>shwzS=DjGAt#ZXe~-$IVB9mNk9D1%PnzuNlThc;~?D7DX8c z7CMv!r_>_NC$?!NdNdO}D2}(fe(6{UvutWAF7Oj!oDFS|%ebtAmk@ z^YhjWxbvw40@(b^f`nUlGbL`|hfa38Lr9iBV;cwK%7NA+;FKJ3 zuYdd8&^(~zyXrjBJ5yJAUTe|mQC(O)5q70y^#b@Smn_5cgTTf)>iB*f`&QVHe=&Mk za&{Cj!%D89CcE79;+NE8<y5UxuoEE5GL^JeDHcQT2*b zS9oewIy{r^4fe|_*IlIY%7`#e_s3Mrj4pu^O4q+on4v8h63Jx9xf`S#SulTY56Ag+ zOmr7!BUh;!7xdL?MP-Hv{-Ts-1M`!}{{TL!!Ou`}p)_X>PR&A1Nu|mMDhYk03XEtjWp_;V|gj+ z`j~-)tGM32m#I-J z_WG3_Rbq)!%vPa6HvBQ%0w_>OW#D|{lhqXn6?Za_Y*b|)ms(VwLmJB(hfpkT5hy1F zD;yqo5Jw~9+tkhkZsiFiS%Ar=>5$5*mo*vPV%)LUI}eb!$QcA+V*n3<j_ z%`8dVqU9QvHT^$L)?Q^nTF#*iR-`0j3Wb%uLKis$CzJU-Lno1l9z3@!Ff~{jmP&A2 z5@95Q<#bT8W%s;C802Rez!~TE1mN|hd$M&iW8UK=tvz)?YJrMZ$Zf~c+}Y%XI6U#l z;OEcDsbvfGXvTK}FB<1=)C`krI(-eLoUFQpE1Z4_By9&7{{T*)bK^eY7~ilWJAL=r z`fQLvYq;nKmDO4}$(Mqmg9b%q!Bd_OBgcX|pX!heM;Ms;n1|{|XTPa64RY+1pdZwc zYfB(2l6|OD?<5AofLr#@T;5G?a(JfoBd7I~v@A+uuRgnOs?4c0qfYeJEy&CZJdD1= z6pi2!kU_>eLFZoR#(x^8(0x^k^>cSZ?Hyj`nH<6ir=HAlMm_slL~Jm8V19x@+yFT_ z>7UNXO@-IRPo*&r`81J!vI( zn;teBGUW!b-ZRIGeYK(1NOv(Z*p@~-s9nTHHn8^+3Y-uyPdL~zX65x%&J!Bw8By+i zC+atJX|?p|wMqM3R>(6=q3RP;wqoG063CJ$81*;wSLF{Tlt4?Y9cC^%HB?yQfnzL8ji&dummgy?JAS=9YN2@F4+!5)J9g zsX^|*{Uk@oM|&jI%Hb%zi=5B5dq3+RYwC8VrS(@&?@IH}B=S_&^x&+UpOA(o3%Czz zv!MMrImpHgUkk~M@>l+ZT-le*Mg!@iI-QQ;?)tG)r(dz-om$fgq=!?5Vn~;Uos@vs zCkJi|kTOR-dJNt`6ry86jP_>}?)^{eN!DT3G!Lc!08ZYLv6Io(ZX}j0$RBRxvb)k8 zs8-xmZO+g$xb^2`$K>;{025+n!d8JWTiw^+aLar}+kLnoK@-ujR=keuv|y^l$6=Ey z7!agj0fNBdm78S$0D%;;^bh%*-&yaEsg=!BF0rdq(vf#nsYzKOjNlLHYy`ThVVC?l z@#l{{u;jm}nRI2U?h&H@01zKgwC$1K)Gc>dja_Dn)k6YhNCjDb)7G`^c`#Jc2dzHQlZdj+#2!ge$0Uy(Zo;{=p>Ahzo!w4l zss8{JA4;pu9X%UbzuX07WOu2g+E}EM3>g*PH7?mn7hnM75uEk0{4B+QS`#?Rm-mRO ze-V^5Smm2T`fuGg>P0+ITqx)(2@7oq?230xSnLs~cV=OQAg<&;4x>)Ob9o>bkGOui z{d3W5>DB1{7VKRgNuEgJr5>TIyps}8m1YqIjZQ&3Lls5KGlk2GH;Ie>>Zpz7l6sU4 zNBFM#sY=+>sr@PKDC3Sug`C>C95mzfQhni+dU*7kd1A^!?rdQ){CpHGVW3|ug`zhq zo!R_#caLdLNiO{vi(P4BGOU#By>TmSiG*az2vdoXwG@x()VqE?II3hHRaMv_UmO#kK01zH35V@Vgn3HFu66s{^({T2 zrs4|E81$v~N+t{kDp-_no_{`nJx9nqmybeM9=Z`NO*>4MFSLVBzX|kI3pH^AJ~5y6 zB}gQ5?Zz|btOykS1#Erg0czx?1yiw`O_k@6H<~K47})T!+Q6#!Oj>ohG}t?P zT&$qWn>J-kETH&MUAK=U{s9~gqP}Ax92ZC2=fEo1!7cYiM~WrZG+kDusopr>(xoX5 zUx41Ks;>h%Cjb-p9aAoPA6l3OI~pQSJ-uF9V@lF>YV%7ZUXsM`hrs^;6k&4G5D%Pt zPdxPblaL4v547xbHbqqB+V67xEbaY5dAsw!D)xul1&SCugIup!)T#i?$Yl=9eZ-kL zJBh%~dWJs{7!#89?&IZlZ>2h%_UG{x{44hxO`}KrRQ)xglQ>fy4)di}Nf?9eNfC)< z8Nu9kleYx$dX_}O+HHR$0XY=tPeyjOp`~lOO=x>xy7cbzh9Ih~Li872LAN7xk+EIJ z^YQ2L*RSK_L5M20=3Jx}`lEV%vkViQ2cvWa&o5ctln#U$E}Pi3FJa{{T{q z5y1Zd=dN4|7Per^la}hFA|!yq$hNGxae*u39P@%nG&+IVqgvE8eQJ>*gqal_Wq}#wZ6}^bkIzwc)`wBk zmCVUvc&fa$^y#OQU;-m5sXX}Hc<8q=itBQ$pqd6n4cl|LgvVVYEAC$;AB>#;0G2WF z(8rS?05*)xlN}iY&!#I8Wi1JhB&Z}e?m8zZ_Z#Jyt_tzTb!B+tGA{f|PSLvnjFIQY zFnAgH9Q0oqi+~@zJkqG|BP@wDhb4Xg0I2iE)1TX)+n~rjO7RD{Y$B__uReCERGZU- z2Ow@^yMh4ZpY8L|Mf=9vjT1U&BSTi!<9g9H(`8T^Bh*C2mVh@e7ay=5hh+5}u;Hhrf9{YS^`_#ES&l%<=f z15u;VlC=#o@`t-#RGrvi7u-o=Irgv&NhcpZM;!*losm+3{T`%?3s-xGVu#K_ccE}q7hL0aA2Z%nTd!CB0tGK1~NCoGYZoM#KkJPh@X zvW03UGLm``cBS4LZ8)3kD8F&YNQyY?M&>&Jjy5|F)5pgo{{XL1&zON~EXyV`N7QP0 z`?;U$uX0{}b3)Tu}t+PSV2{ia2mn_&yDGT3b&qm^GA)Dk1nRcmlCfL z8eG%0_PL;@uiYKg(Nbl3po>e)QXpT|y$d1|f2ggvFx&%hdeQP3jhoa#lrQb-G|{^` z^%DO8f3No@whMZZHlausjzYwk{YYB{R3O^SP6<*=6OOFDDuGpUO^}AME-BdBHDs0t z?aeu)Vz!?UNXA3Q8&8(bbA=v#*)7$^Ai7aGbDoT}CW}_Jgr21>Rq(Cvf|7?AIlv_0 z3Gxu3+m3O^PEnNg5smg_GQ%pvXZgm1r_-*JNOAY8ERt?f%Pgs}cMZghfsS%W9XSq1 z)FR|{AGF5uE%l{5$)Lw%k-dK9m@9%Z$&k5GkUzq?#{h$pqk5df*@I0NcI*}#L+y&l zp>{_hm8C?$4gFOl5;Dhb0LB0~1oT@DPf~4>N8TiRpHiK6OG~u0DYNczJKB|%7a#`o z)d0v)08@&e@yQW*7p*weBYoel$YX!n(Esw61!kf#Z+JifAEGswxlx8(_k78hvxU&`7FdKH{QHwd> zD~eqYLZ#P8)w|ln(Y~psOQ+I~M?llUBbITvZD6QmSjuG-rzOKN_LT(R7taUk>Lx;! zL%8C%dQgg5>or|$#=yHy+EauFKEb&%MxY`b9X(_3@yW6vTAz9LR*7a9^OaQ!(Q1-uRq2lH?i;r(dJu}5W5R5*l^9Q4 zf_=;s9ix!mcD5OQ5u7zRsH4kM(45m){yc%U>Da$VyNkVNh5b+e0NG^DnZab)5%&J3 z41J7<&JNA4Zr&3w(~-3Y>IolArGNZee-r)BEvsFR^ooXo+X5t-J36m!*;X)>JHu|U zz%nd**+TD8ums;AMX301js@~EM=N(3FVvsoTe_-RdU0qvHC?EItZDGZkvLXn5}p_` zt3t(aKdS2pVp2&D9C#OSu2<>>Ezjcr09U^iskOaM2$^Fnj#l>Kwm=-q&*NUS#XFquEvGe?&GoIpE) z>@z%Qssfeia(Ny%d?m4b!f05NYnq4G9@x9vANlj^&cE8y+lt+~kXf+Pr=wX(dE~PM zZ;2x^RZyi=$kR3kz1F5pgE=K!-jfy6Py!l=fkR=suTXStPyR{mZV^ssQxovd?fiW?+o_ zlhEH9Mp_ zjjY235d?qE8Gvnw#z$oY$LVBpuIkRNBSF+h#JM*XGOOVlCQ$n;>h`1D8ns^C?@9Z6 zU(+Uer@WHZg<^JC$u7(4M(&Cum)_OM2vvf!*itoQ@(>Q|`pkIoa&8^}07=&10GP*UZVIUvxfUW)GR5`)a#@&0?a=st#giechs*d;L&S=n ze)^5{%SP_6s*!f5Zs`5J+d6A$U;h9w)E`QcdisTlE7)lOK%y|}vak{>stFOYrGZuU z6U+#j?s?7P{ri=^`2Gtl&)V9Squ71ZCCyqJLU|yT7M2(sP3wPI@(4bhxgaSY(TNTY zPgcR_*|KV87zq8saUBD{zPEo5v+H_|y??x`L0VT5RI8;%G^U{{6$dukR$DVcU$gS{qs>{TX5;!w|`2WoS{sO z5owu^rV3%t=^oN>9Ch;EUw}gu0P+6--*eT$<|>P6=pA>x^BKT18@6u*$Vwc5r*TXkJMsz0e_2w?Rs^C~`n5w{DX8gPtMx0nmR9s( zGig99TVx8#3lvpg!yMs`4F~B7)7SKho?s_wsmE;p02*J!Z+%qICiUp7w0h)69h{NE-_z)?vc?owf%Y2PY*Iz7f7q0a1UP7*)$z?s(#e z?+7&tzMii;#>JTWCH^wo^3d9eb zWclj0dVx;0D_ZAx>7BTZCaBXug!qmP?0Fs(dzk+KV0y*md50M4Suiu`MAbX8jr#`f zo2_edR$-N@wX$Rc`4N%7AdlN0zg~KIVb35<#yA;8A5slhxHWrHOQ%w8GCQC&u}5ZO z45J^Un3cCO`R60?jygHKmv#$aAIz$#t;qDPQ&+yz!*jRVg9lXmyHi6NStsT-Mr;lT z$UKaE^i}ec^^fave=HWEyOw2^vVfN2s`D?mF-y5|@q&5k%0Lyb{r;01ozH?4kpBQi zyug(sbC6q(0XRHh6O*4f>(a?}sVru?lf)yH2n6h3)D|&9f?<%iR)ozi*qgdvm!v-rLI@B9@RUnP%YnCyy}3QLe-+v<-^ zvyL(w$^d;cf)akAMsckp>5tZq_tblfxc>l%4*hEjsZyUyrP$q_t=!W4dqbxvWpg!{ z9WCls{{X1ju_Li#oQNcN-&qV_NGQQjIKW>(YU zp`{ISqvVt}+aEXU|Yc2L=5v9c_qVZmX) zt!ajri8nK5bz@*u?X}{)9P>#&ulJVb-0fXsuj!N{2tp$aFN~7gh&*S(bQPdtG1j9B zcU;RKr>|DMP%{!`GDkFRfZG;TBy9&HXi}hIMu3mB2f2axhC{867o}G-1X*@R3@-m&l(n%EH05;%oNy7}|I3K@9g_}Et zpY0@?wGXke9!2Ylc06gsxHLq~| z!&{JvF9&-PX_T5d*Kew}$GNu>KrI*;^uZ!ReBcquB88v!_axzKeM;V`-~F){sBZT+ zZPs)usLQHXl)2r2OlVo7!791roxuBf9X2l;G0<3pIv$MT9qaX<==3)#Q`YbD_FBPKTW3ZmvUEEv0IZb9Vn(|`VdE1N6+fB1v`qL{sp?I^l;@gwxVvoBcD zyE?zr9_7?EIis?bbscG%Re4rcSr_gLOX!t~L+w@y0_1*>G~xdMOg=N{e{&8`ij0E0 zAKEAGPvWOvr*_SK1L@w0PQ+>J%4}*9&LNFsUD6nn(`P6jV*)_TfmE@{75@OGBDhj+ zXZ>RVInJm2LVpw$qSke&J9F*y=qogFjXhxz!eb-Vk*2Zik|@blZIdUza~5=u9WRlG z7mfAR{{UHR4sz*xg(jW#`@LwkvE4o6saer(*jA#CsU%4RERD8BDoQGVRn|?LnHl8^ z?NsYQ_>YZ2(0_j7whX+unO?Wsn!jXN?j0stw)I+a2!hXMS%3AAO&_WyHW$=*6vZ1Z z6iQ=>^2Hukajy@=OYLzvD~oU zvvR(xM_7-x%nfZ7k*Q9}86-`zGb=%}F(SwMVO2e1&aZTW?p%2E^hD^-{Av9k`i-Yq z)4ra4Z>yzuDo%awFD^wNMIBK!6GML(&r_{Ld!5nguS$#Ppe8%`5r;o|D zs7?MPGujK~QsaN>r_;Xi?RtH$-sQ3p|UqIMMO2Lh%4lfwhs- zZ(DG9IZ%>C34a;tx1}jpisWfS6%#uYAl1XIr@wG#| zfN;L$RU2{UMGeJPD*30>^EQ;C}E*4|!bAMLc{o$35K_FvU4Gg0j8{mDlD zvt<_T54Y}Fp=e1`kw~&j8BLC|`P$NIq zhB8DbMpq}VFU;|Li~{=~gFPPz;mGz48Pq!m_=xS@TFi9fo(W)b$q_l391s)wL^zZw z0W-9p)h2&f>&EduEycc}zy0++FEhlU$eW&t(>|`~S{0phxNE~_TGb>(Nu{oWcf08` zK?1%u&e5w+7wHG}JK=zGKBva`k1v1TcwR?_pz|}lc2!>F);^#waq1G@r=;kqCY`3& zlF1eA&S6xV$yo8~uJG15yi5!bs(p{& zI)xVE%C1%{-W{@W7CKVB@cl z@_fW5r_%VoCixL_&+`3Py{+o{-)>vh&atLfB55h=WhD^UE4vtU&*?9g45Tn4CkL+` z{MS}2u1DhTZEp!5OrPRdHIL#eRqdYAsdu(!)-6F4QA4Nc`d!Gy_5bXCir$WJ!*Ll$98Jz-kSES-(BD7!An+x1#Ln`l6bdD!w&2U z!~=-QVV2w+0

+QtDunL^!o}~Li3Tm=p7HN@`eQq={z2!F*QqIaM4Hvq8N9*64j@{ zafG7DweT*5j1HI!Ox*zW*u-rzYERc5jU~{NXx@Kh7Ls>8=YbIcQ7y~p#A-VGkSm;w zOne5q@S#v)DAs^`5mRgx*TZy0{35m4#x4$0k4+L0*plll(8 zqUTXiG14;;_t}hW8Sb;!1`uWY^?84P7?7v^_vfwfQGwXN&GsD~|9>@e3sjEZyXD-w zSt$pvynh(7No%k{1(FMOOMe|w+|Ky$Uq+?;%a9%&4AA&xRJ^}Ag*4WQzmBToPj9BU zjfHfRpgua7U@OofJ9A+{&~4gA&v z2v=SFOfb=<%68bf851)5QGUvf>@szH>D7wfGN@FaJ#hVyXf!lySR$uTl_6eau+2RZPS=(LL40Cej{yqgbuO=105^F%+1AD`Y8%jZpG zw;1-2Zah|x(YvWI6AugVJgIoAG0x>;-}{DR1*tAIofP z-Ql3uc?-i@lpezALaeF)vehru^ByhCR)JZTIZ9a;p%#_6CMFQ;nyd>Z8ziA5KgtEr zOv%F$;3}|UrJze&3N|iKEX<3a*!PIl;ua zDRz&~TEWt!&X_Q>(8uZ+efFWp2`{uaQjByc^Ebyv(vt_&I#kxL+#`&qG)WsFCGmN2 zt5hIBZ%iHxJt~Vx-=bLbI9X=`hpDu4^_p?W7%?(b+T4Bn1!@G-T8j8ih*3J9k`nP6 zl%W)LX!#1|!rG@2*#<_;!fZHv5nT1UeeUalus0PPE|OkTH}Q4tmg^u~VnhFY7Mf@F z4q1d08eQOYTAAw2%%?@eMD0d7sel(ncW~Pt?;uBOP2+X#D1upa+8T?uD4ogq2DTuA zuopc#o4=6|z`KVqpvd{Ht8#X;#?NIMMeIiC@GyDQUe;mG22qSEcxyCQ;^vMs7 zL!=$Yyo55a4{ZLbxcoHVd@K9|?|2WpPt!0b{ z3`|WdHrk##mzJ&}2z1LiH1ljswqM#QTu2$*r;AMc2(*{GIy`3$Z8U2cM&FCTztaH) z2@#eqt}9({cZ!j+F%-J4D9SY}V~J054UtK%`GYZf#BePT%(=jK^*O&ayYl%2K)Z9> zRP?!4{t=}}^HUpF8>T!_xWus-yB3V=y$spEdc*|vk#`V7K4S!Y`4`hyzo$KGe~1I5 zSOiEXgKsQBmg%#XsDQeC+Qa`Dc13hsOgH$eMD}vj3LTg}W!^yYby}nXtDE}(#P8co zmY-uacUAa3jXm+6GfM_k;7@XL*vDaosA_~iinD@FIgWI))6O|?LArX}x?6LN~P|8Y8+?{t=B{3FGk;1b?}4uo7JXC{NeMIb?LUEfZi zx%X%u;xW!8N;OcsEZalOU9Cl@<=Op|q`6Obl_Tv>;SBb5rbTG2X3Z<(qQ(Sjpner6 z@X~cHEF)L(;yt?q#>$cP8-dF2kai`afzENYM z>=3TlmkVN}Dz~Eax;hOKHFdj(FYY?BM)x2Ug*76{fmKbcpnFnv7e-PSsD*9!?6&2Z$l{VMoZjVxW{gw?K#zZXfUmdvzMReS8pHh9?gdX!lP5>tPDS zDJKf1+8O7-%90qJbc|l2IZ^%ldzyW9LdCeiS+s zWL8gOv_-InH~bF1_AcK*dZ&r+5ewjXTKI_dY6kj;`BMKdK6d532ASq>3`(uu3OVFK z+}g^FQCl5~>B7T9HT`dl_|A|m_8PBdBCns_?Hv$#Eb%7VQ1vviI0n-+bp^6Ju&%M* zM@Bs}G$;v7RizLP=KLBPEk)Og7?iTVrxsd;vXCi8X-KEUwUT*NYy4J*kAUk)_uY)#% z8JPp6w!+J9S6FFJUOTR>t@-5Pi!69~e3*Ixk$*lsJjZAayCAX;uIB#7)zM3AGJm#&vf@{&k>t3kJ>HtJ6-ZQnWXB$?<) z0-ff>D`{95)1We$7k*_%O)%78;-%3|&VN zxRce_(_l}`4xim9dZp}Zs6pD-olTlv?{CrY4dSB+2V+VO6jIbzm@LA(2x-@0*vD#1 zua^kO;5{&aB#wDvVnC3)6mz!oYWh)2=u@^0f&*?$LR`Qr&r(xHH03e?az)@LmeySe z><>?uKz2*)I)Ny6uuN7)fPtY`Ejz~OmS-!Jrf|1d1dqX|nZ=l-%JR|GvK;3dhWDP1m%DwlqRxLq!5Ye_cywC8qreNd zQ5*UU8CXt}SL_*U%k2Z*nAcC$(jf9NL9pGa9243ITzCvh0y5AuK~i=4S~gSVd0G?x zwM_)EhcIXc?cdUvCt`VM8dz=Jy0cB|nbI`}-{&QeH~gk;)nNS^m2NN6-0}O@$4y?z zNa3%3!L}PS^p?xa!EA%V zAm%C_?*C~9w$qboT62#bHx}WSq2+dxOJ0QVT3lcl1iOATOXb2htlN{{stTVr<_}Zr z;Ex)0U9;`U(rQh7ow#9FFw*iZvASKYgFGY(gupUlgd?4DIjedZt>+Z2U44olmSZBt zbU5h!Yd5bM)1>VwChI%m+a(`lrhI3}dWYC({I?skZ$Zd+=2fy#3ApuxiT%Nl*>#-f z`~w@`*h!OK;Qv6!%o}li*w4XFaVJ7RyYWd1wCp*a5RxM9_=cB+adR8JjdqJzg#B{! z2{iZF>|hoC&@h!H)1aS1VGREV=IdWp!WO z@?q5J$aCd;>JPT%&8%sDh&=u?p80+aU(Ej6yUJb?o_-YbZ!yaE=(e%;&qGDPuH&~0 zg}&nj{3fQqEgkrQ0y+=>237uBEzzG)#L>_HjO~otvG>o?8vwm4sd#SeqHpkt{PA+G z7EQ~6xf=ne5=QRt)zgKyzvb?U30Sf5wBzc0YgU?nP6o<9@-;eF>E06Y67o#xPNabQ zJD0Falni;vgfB2Yw1%w2Y4M&n`ojsvkYLyW1I5&L0*@sMWDZ8|V;@cxTy-^&I7pcd z6j}{ccWslY*4?-ll_A!i3zM0l!z@e^Pq^(7tn1py)=Pja4MAT>zCx|fu2?ULaMz;_ zAR)M|Cb>5DIjuo%zkZv$PdiOap&LhiXk3lSFSOxwq~mzxB{J>0A3o8{YpgQ~vdL15 z?#&||!lhK3F_wp$6b5))GAvRrbX(7m9uPkP8CqfbqH(odcz=_iOVDs(@%6`GI342{ z6t)^98NO^UQaa7(tE)VcYS;~H=4=w(YI0L@O%mvLK06PV)vf3-yf2SslFpBs=HUSxUI`3?t_LStb0OWX`y?3~8rAL!80g&jOro)| z`WJp*%; z_Z9JK?PD8xi_z%liaVf(^2BlR^dV?9KhXj~o4gPhTf`$M3t6DaK#S<$m;9WrD$`95 zoQyhKSQIvBnZ3H$X#21WSy_X1InjcRY>l==q=!MbB zi>8Cj2kk681&iJQaRFL0w2;0^YdQxS~RuK=7fNUs`0}(@fJU z%7|ECpJ8uiUk>>sa<v-{I%R@>2`Z4A2qSMvBLXuLmG=J;(oIe#Z1`fI5p|IeJgKWZ5)UE1nB zA-Mfs;!jB`4?3#2g_RF$MMb(AV2=eeoVohxyzb#JxNN?SW|D9Oe*$M(!K#Y*+}2!@ zur9nOSk}Q_z5jSAs&8%Zy3N}{|4gyt2IER67Z)DIIrGOpE(*Rhp5^B(gjOQxS_uT^ zt4~U*^M_mRd3bjCX3YVfN>KsJEAqN_eAwW=+EO&(T7}GX`xlNQGiIzs=90~d????ekH+l;S3Z%fqmfhjgSKe9#@H4(+I1h|6E|1H1L3nK+B&g31*e957 znkMX7d{O*0uS{n{KSj{;b%+`|6@-%?NYH z(E*5(s$>moZndtJ0q%z%AI#qgzaKkDGa$Q99Q&|Ffw@ zA`b$FeRU$+kxeg5N}5zu?KbX*IU***`UTVY82d9VOdU&dv}QO9Igj^W5mgcoN2DP% z0h&i3WoT7H{@)UZ{srhT-!X3fYiLM&xA7k%eyCRd8{z01>FA$c-)~IGVuSDZ$Rp{$ zPp!&NxwQpgvh=4Xp$z^?!uknW=)2kZ$>+`Z3h#76LbZ{fUzx(VhjPuszQ`8#%<{x4 zJJFWF*s%I2&6Yi<=J|vBdei}1Em&+9(vqBul*U8rxS(FpyICv*bd5BI#VOdnv2=R8g;_wZ2AIhJ0O4L4fhq_~&H^Cjl|W+W3kWcM^e8r*D8v~k#eU$@XB>;K{oxBKz6U5Mhz8s zsz{fOI}vE*I<6or6yKQv0{Y#2dRLg=9eM1yOW_@D&L4Z|bh-w?5cUoAcK?QYTY{)J zyE5ry7KnN)OoOQR84&djhy!U{VBTLJgyVTvQKUoPI-!3W&pN*3lc$BiqG<^sSGwgd zq)yyTh|<)^9XLYllitfc;g2faNW3e&cH9+j@cx{vScZP`fbF0gf8?V2blA|kN`8k= zb#J}ijnoX}s*0#F;&xb1Vc{zqg)DB8<_BCw0>sBjLD&OPtAE-xvV~cnt$To!&S-N^5TZ#RA!@=Dr6+kZngHLT@Oyb~Ey6 z>!pztN(hFdJZC_p1$%oY#y15eJs|n!)|{4KYsKpzj=NIq=V^2qngVt_i@<gakys%E|5)&crZ&rDS^dY!)!N>HcOZfBTLvZq%)t1SgBcilEfoF+>OU%Kf2eCe zTlF71MkDcknWR341)jFjf1IqkMzTuUb-VL!r;Iwu-7r!utn%>a^Fxku_PqJI|8vIo zFFWynv}4R8|}#i#YDGu z1{)<1uh^#8eo$&)9BqK!uGm6;&@Uj#Ob^Ulf3IV}Qysq3FPij2>>x_|{XTrRz4JG~ z6Mgf zc6A@PAVbI7P1CRS*E;>GL;o8DXot->R)7oa3j?Tjsd15_XL1mMfSA;gs|o~%ty|GTB3H#_|-!5tR811 zGwkRoQ@h)Bdexw(IeQ)Geete0LqsPqd|92M60Mtc@rmpEMkUugj8uqHeA#u>U?J8l zO{joifrYTU^+_bJEh-n(6-2>3T zRfX0f4K7L-CU>c3L4yb$bipBUxkqL&zJDk%ZKGks?&io@&t32!SU(k5zuXNVcnHyD z<{+MDgoKZmX6VT@9 z?MF&GM#XEv0>eqPuTIBy1SuGY6T<>X*Jy~>hiAthdZF&L`EQLCZlyB$nRiAsSbXD4 zEEpY{M`K{$^b8)qUlJG;$atUz70xM0t|lEqQlHEgIS|E*e_p@ zum|ruMHbg2=OIx%9kUWc95s03tegh1Xb}tYl*N)M=J)is*4k{^XpqIsF3S6Bif@%j zjxWls#5?K^Um&pK<6&P+C+q^fFaKpi@&w|K&$Y8v2=t>lWf$PNz-dEuUu07Tyvidg7FcWXqR3=N~JvA_i0!6a$GTNe2jNqahrm z^Mncf;!;gbJ&f5IE@Z!o+5M%>A;m^YN-xk34nMrXtaQ8vl^HQ%J-#zV_y8Rp@i-fe z4}LFg-1%Kp;AQJCl|oZd5Yh^)DO+(H^uEK@0VHfolHM(kg9K{$rgq2qy^y8 z*Fbg!SG^y0_=zghChgz@kkPDW?D$(rf*;%2J<&h~VoQ4ptNs4lh))jw=nQytD=|z$?E!Ki!{rzTWyv?~#)gODVRDq{M!g)tlduI!g zMZX*l=%2FfD7Htezf7p|FMWv&05pE-4(~60F%qQtdv|^vRH}6x-LLDb_3NS}#s60~ zWA2h8K^eyNeT05JL&nUsP8Oyy)nS%-aeZW8u>S4pqBSkzf5brkF|A?BkjRlzSc$uy zbd5nfNv_@h0=Qo8_9Lr}aQmOp^!8=_E3V8x2K1wx?B(9o^{t3O3f%GrAU*qA5yP({ zlP)5Cwc^-p=T)q(lN!sX27pOwDP}?vJwRNCcjT+&NfL^aXZ3rRa>kl#=1!zYXprmG zBqAa({d|f&8N%#y1*ug{L*b~XISa1OL5e&AdbZMwoy5$wO_SAM_tUQ(Tkaz2P7kCs zk7_)Iv69YDE$%AY?VOxO=I_~=?y@lya0NfLU@_MA#YKPggb&twBx55Osh8V)fC^-p zDjYXtDjd*6B;h? zE(Lj!;Ga0t*GDTAqjR0Vyco1edL4@~Fv1S5NsakUJa;`tx*jZ$sx^8cPAR;MbF!#e z-#06LzRgYfE_XWfKnZxR17v3fEAnVz~opq^^(kn~1!6z}a8N>?uH zoN{iJHeqs-sAPy5)g~X~?c&iNef)Sy07S(eHLQg;ph0Bp0LgqA$wiJXQ-4=a1!mV! zoNTzYsBDpYT&r$)TNe^YNj3BAt~j?@hQK#m#RaE1cadn~`1^$AyxrgV>ypnDWQWeh z#%3r%jGsV#uAg4=R3?0~dJ&WQ>Px@Nkv!M*DP>yIIa)ETQL=GtPYKwlu*{PGm za($W3S5?;Yqg!%3SdOy~yXas?Ks;@Su}5GfWoJj8wL^%AYS*zs6SrHEI|>BTXUFim z3C%JErKW?69iH@gA6^=c^e}b^=+!qR92S*r z)rHGx`@D`B8d2h;0&ePtPAaMzSNo@tqV4%}3u-!q)NuvL^n6iQ^JiV^FD6zO6pz$% z9$sD9BOliIj8>)a&7;`X01*@3gPiiRNe71)3d<6QCIlu2D`G@hI`|Ho3A;2cCaByE zGZsv;u5;zj$js7Py9*-ovoeo=qh$F{%MWZjMeEBpv?y+0)9y%$YA|CYIAAx+^zpQH1NQ#O#-N7mmV|c~`Y6ymJaT)TNBy1<@iu91 zG+hB=ocra=J2LYW&bbxkBl92e{p`*PmxPWyyO6=Olky3|ND>*_g>=X?8Ybk{wjZt= zLvarCTwG@TQdQ>q;1LygFmw!NyDIyzQm3;M{bp0Y(MV&dFfQzqFx;i4>C4jO68i?M zd%1?FV>sv6NU+3-jA{5M$?uRqDi(S`ad~#K<0?;65W915T{FnCW~S2A^ZCjuN`$(;7NI$Du-7Z+p?`= z_WNPqi19GhxJf3XE$vHh=4KZf#0R+v3j$1W=?mW<-VqUDa{X9|YT_%k6s75iuuVSO zb0$(eh1YjWm(G%nOHoo_UFl8xe_mXw1hD^(Hu=Zl9$`Os;D7hr(zXYkNA~Pr9b!C$&_0E@up>|7{AcU*rx6VPyU%fPO->`;qI{C&&2mn#9pRF&=Fcv&7F*=iBT6`CjRbw!xm+lO zjq<$l^)Pe5v+?x~a>Qlk3O{^b4dqDeJwGU{dTvGN0)e2J4^QFklb@` zfav?9W>UsRolYIn7L1zhZ9!kqnAHiv z3vg~VN^dAf2hO!e;jlt(N=X~d5xl7e!zT&BxQ$9pa!P}lTt}V(9(`7?=U9P>+vFAY zYNmd^$}KEWxef+#8LZDmS2!=RYwRmt1GAYqQtOz_Jp%EuaIKJLVekHJv z$=j3v(%4-3f^T!x!JN4>pEfI%h{rz0O$B8$%7y_a-UefAQGv_5$HGRi*PnP=_rw?W zNNzURRTcL46Ens9*DjfT2u-}o=fC59knp?89g*nXQ>z$n4YOrim}5@JaH4qHEn6zU zmHa0Dwdck|@#5NL@ZZLn>T6Eb!SXihL~o$WgWdWx7{EfN^#TG(5QLp%3F3~>G53e# zb|&a@axPxZv;?#()_J6h)*7f)YoX zQbeTJpooBo2uK&AA~ix(dX0+o5(NbXBmx2=B`Cc{x`1>c5=sbIXn}+pNQnQZ&YU|i zo*B=%p8wu^jTZUP=v<%eXyEd9(p0L!(S)NgPAl{rx3TyYEG|T^#7My`-iBTkELM z`yN6#@)C;zwXq0+}wpfKMP*9xR=`7)5iuA^YyzEQ`yiT?OpCEO{^$7Fx^a zHvuX;Wl|%bBoyf&9-D0?E;7hnpKMtc&97f8cKOKCrU12lX`ju(wzB}2b`&!V@ZIZg zr_1OM&~!+HX%%rEvhg*VY+QHyYq2*(Zbs`(XUayG^XQiI^Aa#LU@Kv*fReY-?XAm^ z$GUloW9+$K*LEa4%ut_vhtAm7>VFe;)Gs z`7XS4&MN_)sdZ7vdfjB`WvLiFl(LBI&A?C&Y4{>_6kya-X)=X!uPP5wQQ;A&*8?ee z0KB?3e18lb`s^HYvnb2D03s=--Yhf<3`#jOgdjfyl)c~67I3uvKiSgyd^@Mj=p#;L zzf3D$aSQ8lS^LNKCN?+C4r@0rdog_A7Pv4MF+2+7j@Ax2xr2b=+nT7Y>$UG4yZ_-0 z<~rRNzeV|Z)7~A4 zm*+QiFv2u00?Cb9z;L_P9Qek4?h`2TGtpx0ELP*aXAGu^sW8%gWzaNXIE2yzCgsh8 zmq~zY_oL6QyXOGj<-d1JF7d(jdRrZ2TYa7(9Qu!AykU_{L=HdgaL#5JzQ}H>mSCR* zm{nMJUD+)eCl}Q@N?*}8H%v9pDmZ{ofzO_9PIHttn$XtlNn0-PcHBC%DSQNOan2U>TQ{`Xtd{ z?s|^}O}x0%%@RlN!+_Ah{qeS@1y)(F#DBbke|XT)C`#|RKw)vTGNQng#T!bo$MZO1 zBmxkZXF?T2Anrb|qz7dsr)y>x4}n%r>J_?Yk{N0{A4jPunHlK^>JwUCf`Ix0=P>I= zZeRxAmcit#2%Jm$1G0pQz zp%d~+U&_s7%h=7AONB+>zhilD;ch^iR6ry!X8wbY1m8DGe&~T`Eye+OL9E@rjOC(y zT~Y?e^6EKbv$ytS?|mpOTEJ~l@?f^}X5vmpgR5J#H{MV(|6vEP(r?n29}3rrZ)&KW z;%Qg^1UmZV@SXAF^4}&Xe}F@>7)RCd6^xoFhwI#hyg=@dapre3KzG_PCKqfT^Xq`m_*?i|wGnbuV@KfcNxMHXCXN zoVnHZ;kAR^DY=Ll=W#C=(dXrC4@*Eb*D6M}9%~>tBu=b649$$=_7skK}UMWx4_+%>M#E`&#enA5&~x2ZIYmA4v?O zL*M856-$kEoA;r#KR|^bG{4@ml@V3>PoRvq6j)6qqL5~n!7s5CJEZPgJ6_b2PmG_F z$~>i@;l`B}9=7o4F=kIvtS)zp%;R?j1_u6jD)0mbbXP&Y6R%EDmsrdd*`o0sqGQjJ z)o6jHfFvN5yGBIPSVz6>UYde}_F!I0*rAZ$nC##B`adKZc&94l>qXHgMZx@(8io#s zshpW-&#*T>(&Uk8^$CRCuEqHwZsMuhw1%akk*HpDqJ>{Q3)yZ4YNWb|D$t zX=RM!#@Hh{_Q$=1zDTQ2JnFK!J&I=GlpbXD`ci>K(^(JPSkBu(<;^_8L=+nEGMK58 z2e8@}M0=MtEnmj_CD+8(L76TLM$YVy}UdefHQe&~XNc+Fxi# zd*7Q-GSQUO#=FUz#guJEwmE0JO!PXuz^PA@WtFUjkQZ+w>wzOJAp%9e-yG0CDoE-q zrlbL8VNd3cI+-k%Q3Q9VsRj(UTI-XO(wsjiO!;n4J?}_A9q>#?=7sbTxu>xw>W^b~ zLmZH-wxiXk7f+03KdzdQ+oPQ3mo4xu&x5G;i+&W z<|Q}>l+%F^>@ZS(*#g#M1mYP#`9U#zNrLl|Dir^C_yl zt67!@Kh7dE;i|vT*EoEee;oMxid`7rT$G4%iOWX4=QDimI`bAVmNVX9QmJq!j8M{v)oBO{M>eYppUYnQc$Fc&LP+kAx4f5{CUPx`y z&?9Ts`B*b&TflSfz)(dt79Yt3X*UMDT08~rmAAbgLQ6$@5V48$Wx>sW)n0yaN|DVnD3$qOjv%s-B^o97xeyqq@YyiM9g&fc^Sv^j(rFmzGLb{d z77k#v+slN=a|cC?63k5d-sLxnw-`yW37lOy?J;ddxT>Vd%qHAg?n?bg3&=-MX5j zya7N2htkr|SYsuEo{70WlEKcXEu|}Lic_fwx!a6dNZ)@<(vsd(CY$DrJA)?Ij@W#- zFS~n5tCSoO$vNOR;w8IGU^%{9L*O#|tz+|u9iGryeXDVR>Fv;6G?O`Bi`5KY%cl)^#Ay=x{$q0=Le9+!aAYUFu!kl? z5hi63HkmLGu(!nK$7d|ulb?dRhOCO0GD`0hbQf1dHI&7Rh*(R?TzvK3e$iNGS{wTD z`uK3(n~b(pFAt}t)mbO>eSiayYyI?F$C`hUXz#zdZ2Zn>eLdmhkIa?d=mkHHKJXpI zK-OMf{-BC1{<5h4gTl@dWB`gR*`w?*lG?tAjBq{>iFsQX#H-A5VR06m39R_+R0S5f z;i}+g#X~)p5)ALGRKDpt|D1L@y&_w0M#@(i5-TH2Ik_Du{A-sPygFnQf|@&(h}Z*I zq>mpYK1~T*&=dRC82J6O?>*+uDq+IFT=XNJZ9q|r)r3OG&bRie`{H#hE$Wj5I_=(w zFYG&J0qZl@WNt0B3Ev@DGIUNmq{l2)H=(n(t#YbM$9-O>ShK20pr4ouW1@tdec@2F zsAEhWcYD!O)MdG#z4D&(J?6@4;rz@10BEX^icQ@O~hg`ZHIq3?j9ga1k-W&&M8V0m8 zJk5D6cfO6Hc=E#vX@@;m<}4w#pGzO8vMOUCEUMt~6}R_Tn*T(Se(d7ZN~_B+4(Y1X zQ}&CVB8a^M6)D*m(jou5EDljwB*U|FG39{KiS^2>4-i7C@5K>R3;{gVGwNi+yK0q& z#x*`R8-0G!x5+Jl-db*mA;A4RaUQfF%w>!IY`+ zpJsWL$}J8012lb|9<0Y~1RFK`6G*BVI6LRk!S;+Tl+t0KnvCbRMtoeo|GqnB8M%{_ zhB6Omw%LB_4Llh{4?l_9{_LL77J5M)T9?RNkp)@NXmlQ7O1ke3nT6Eq8Qd#>hB&K9 zy!SRGY-`=whNodio~45PIrZxZnLhL5IKRWwaq+rVtGPIfi7p}2^SaRMIXbmDsm=Hp z2V8qjG~9{Zj$s7S2VL|f%c3&O&eO!Y#h!E$*{~}A6g`zY23woxc{f=4h3r-M6CVc{#+)o&)JQEf&`rN^+ZIap> zN57W_zaJjUz<9{=cssQiD5Eusc+@W5jj(OOuzH4d*P*~wLROxnaf9qKp%}09T8DJp z>x={Mi7Cs@!i>{Rhc5DMh}3`4_~vQKi=2=w%UqA5mJ@L)#yah~-UGNOzNmew8dWpK z_$&{2i>0FJA=z9a^`u(EwYEd{*}AvZWBEYtbRC)Z`rlz=ZCv+!)3=<#9lyz*E&EP- z)sgwNGE3t>6xaa$ekJ&O->)}`|DP|fAzF(WPQ~x65~5kHMk$+-@(IGVqa%-?b?oD@ zQua{XF7uq8fC=N2Wpj(5b4FA!Zd;r77z2A4Xo*L34yr+ldkHx0y`=-|<}FXpghgJG zT}`-bRXJI%4!+4#%|bVsQq?EjfAfj9ScgSFZ%M38m2Gi6XTM;4^gj2P1r{26aBGJu zn5ItAuy#=zgR;;iVRSGl(S(advap+Vq$H@vvdMby7Q(QaHty~LSA8`;>^iR>fJYk| z-NYEg=9!aZt81ZW6Lge8f$J(Q^%{e4h5*Yo14NA(8qq1!i&#uW}SLDVw3a zoB(tCexTao`^wJ^e$j)oPa4xVbv_O!&~p@p?mK5m8=7yvvlaV+H-nK8y&vodnsA%^ z1e&Vcxnw8|wk?Q0{|QtED9%rPtYR=l*!b=wImer9!Pk`eUiJyMug=DY56SgT0cAN- zR`h~#6{0M`@XHQKFx}@j86GvV$K1r;(6y&%p2xZ(w~c$Br8s4eV9(t8B{Z8n+mK54 zp_;^z?UL?c=AU6!GskHP!a309IG|dMTLF;5TK9K?NwF~eysjIX3J@!R6B_3P-^H(a ztTj;s5@PEj_BA2)7p-ypniKm`ZMnq9A!npv+X^kAFUuaKwEG?~g9wqaV7X*O(V6J=2#t|O3$BGKBFvBVWYce>W1w`ijb7&pCCGl>HGw?KDG`=u@+8G{b= zSVR3*t}OBMedMELClEarZ>^?WK8IlN%zCNH@Q{Idwf;OHJZDM>G=}|MRO@aaHbWUZ#{Ap7Z ztJ%aNWcq=OLawX?^+Ntc0Pd6rND9!cq)Xh zl}607cqReV=mHc%U2sY=b+3CHAhCJLkBF=IY@-w#N;UJ|Hk5^WN7FK6>=54^IxX%*0ItORa@8P6exJgCtHCWCW8D(%utA7GI=IzyN$ zwo0d3VM^MuuOH>>5BN8LH(pW0P`pyCC{iZj2~LO_^Lw#z*H9U+AwhSW1T zYD8{`;V9xE!y^bQ|SC>h*%cs4IuBJq>thTQv>0)6w1m}8e*&>A*uVD9f5G4C@Q|g?c}q{-JG)IuSC3>Q4)5C8` zb}s#)Ex?2ZAfv47v-LCbE0yYJOMe-sZ;7^?|k!mO_Su>;cwxZWowMjuhS9C7EUJwxI5{Zl}O z{4e*A{%>429ilz+t!De_;zju#fM&o4IK=$O)b}3p+_1?lh&?_i4zfepOE%TXN&J1< zLTp;9RRU~KcBOYjDcqvmOHK8Br!I%{lUqkiE;OG4L{OmRW++7nRV$j2(4dHHz=s0D zJd{;YPco%c2z|M1wIFt}8>g|!N36HW={A>~WCG&Fg9IX*l&Hdx3T#;TC1x|d4YTtX ziqmlTqm$FW__-mTPT+bz;v@=nsF_0}A7($CXF2L%b9XRg!^z{%o;`ay1=Mx;(WM(D zAr5R7!@}V?=jPlV512rWX1^pqJbXY1beHa_yuSBkgqm1*gMUzAEQr_-+V! zg_zZxHe+Xma&*PwMWR&{tEP1|JTmoHJ2Q5DN#?x#TQa8v843aWa+2`SfND+MUf_7g zI%w|q3}2hX-6c?2HY?}E)>O>ef6|nb?F>0}mzqF8)b)v_#T-f@c+m+Nd_|SZelGF6 zqlb5dVp19Z^z&nEg^wzNAKy(Y*&aj9zX8kTR(uhbh>L)lDLK3=i~-b8O@_Ruq^{m_ zi5}W$TgAu6Q}C=H*|87 zE#WozfGQA0DNQfKQ&h_}X$1~3?twe=jZh6y+*0QUv`)uBt~{6DdyomVPs1?tRyvW# z+5fr|ls*{M((RcF5e&cx9LG-z~jh~Um3hF4NL06}S zs~5VXihKMB%t{%XV%s_YFfl}Im=QL?6;V+S+rtm3=O2-7kp&!R!FyyTSy(fwF}GH9 ztDcXBx7Z{db1R=xWX@tsSzM00^CW0?*g)XXy+GWvSs`>a;9JNeTm$26!S)<>L$ksR zvgv~4GwB?I6EQb$SlHKH+b?Jxc;qgcNkL%AFinqTv+;|{g$SSJs!p2Cix(A9Oofh@ zgvL7Di=yM;2bbaq`vwP7B&yK?Wp*`|eA7CSR(`NL#h#btWX(|Yo$ZkZlg@IxnwzLpL;Ye008sVEqYnrk^@-4)GY^ z+)N~0FhS9rDCJq6vKB1CkR9`{WZ?TeBQ0I`$-1j*Wb52Koa}CLuQ2$kFt@|rC)#uJ z{G~lIx=JUy54+`9Eh@ouwD^3oLK8(acn;aR3qJ<2=@ohfPZg$EQi3&mAodszhn(H{ z13Trkt2m`Q;rcHy1sQ&Sg?c|l)+}$JcE`sac;1WAmv%> zKyIc`?q%ypfWfS9>BM28aw;ZAzs;HW2?WL3q$nOT$T%5)n=_h@Aq)`*D?f9Nw%pYC zqiS}`A953aNKAf_#Qv2B_U83eil1)UIhaA2wF!a3!f%njI<^e{W=Z}Je>I7u52E(c z>`1)BdsncvkG5NsVhab2`_;Q;#JOrb%CL*P=VJjb1k7efzBXrtjzIZwR)O%nC(sW| zKps3xjqNx6caxIPqj+|j11a`VvM0hI<_xwl3pmE&fQ;P`|7IUpXp-8U=IrJj&gvUq zvbMrKVRb{iIeWwGXk(gIxd4w_%&7zFM>Ws+^r=;N>VCprAxg42e!{93R%*BUT~z zq-kqcPMv{H_N$1N_qp5joz>)tF-Su-W+1PmwVGNm1b9G^be9Mpt;`W*WLLT4-1hA= z-~pgSb3Qt#AVwx5z?JmM?V;?mRu8X$@~P#fBT15Pue&tbttONC>4BD9pFoTN$UQ6L zWoC>qjq?U*0$TLojECb>{J8s&#w2Lwmo$yNuNGizY@z|kJf|?UNK*uBmB=KNNqL{w zD0;7hqIf#WHmf8zUpZQr|A;9eOr}s=Ecd-W|5>xXTQ{`PR=XZ)KdrZ{y!-yl)eJ_3 za>1-CA2AMbh>)`2V1(^Hg(g0!QXeG86l9_T;;4^i@ZL5!oWEp|!9UHjaQj5$rfZJ@ zS&0!h$lD}}2Hcn$CfnI35qwn#C@k8~dujS`82n;Nd#*yviLT)to#sknJJM~V&C>5> z_LpfKD>;$v2d1izXs$AqNzhqr0Ib+?qW;to{bVk;eZ>42d@oJ|BJ9Lh@j$EjdhCHy zZ9c+fVH7`!?D#p89*1xJ@tTXlb;uP?S166I<#Ts6xF-Ojt+fx|Al60f3m)`;?VT;a z0alRT=o2XK*E7dRHoC$WkRd91!nLLL3BA_d(|ehT>vbtk-47)YQ_j!8y*H7ZzkX-| zrVXYD9GDFiAarpC<2JG;Va|8kcB zWMV(x`IBAvJL2;VJ=*;xWH7Q&SdV#J$offND8J(Co5z>ql(gXa@1i{}U$AC~Ns>hr zs0tuDvH*wX;YBF@A*x~&KD^Of(6%X<8M-DET!Qm7)&jpc>6Gmjrc;d(bod%up* z@yiuY-7;|10+jSJCLrQVYVwE7h3`ALy#XlBnAtP)U@AjDVk`~t*8fi8*SE@hD5L99 z&o!r7M-faD{yWJmwGVl^jmb}{4woc#TUHg72Uy&3@|lD^wbLdFir#T^X}k~rmcsE?IqJ_z|Dz) zW&P=QUFq(DFZzM+C%o;@+lqMN>q^e~2o*imNpzu)mB)dXc^7hVC8!|GIGBxoz{-u4 za;xnPcuBfkTM{@xXBD9(gt6NWd1L~|jrV5g)q?psKgnxIdjsh5831f{W+J0+drPAV zgd;xFF9N;~ zeN;pG0MzRC%(r^`JPM2Q^DT$jBNk&ti*kjBAu|hs9PcIPq7@&->9OO zWzu2o*K`s&D}c&cN~081X|@mhQQC^#i`Ue#N>qx*~p!#n-`(f6N>s;pk7nFAT(m_99 z%g_tn`J5)-@y_K&nap$H4lf-wZcKh-J1MDo4oIpppPW>!a+;t0_z48g4QL!XoJV53 z>4xTWeBu{(qRSaRef=7>o5O9{p^!$GB__i42ZlVUo*09BW=7#CZF z^(osB!}80BJ&t8|K^4zw`i@*3xOU1`frrQ!@gsz=Q$?1yZWrA}@xejr2Cvt)#XWEQ=AUJOwdEmn6$(wd#tQkzPsa6s(Wh z1~nsYhQ}NxY3A8kz4tXM5)eM{1?3fv)Z@1uH0_^My^9+IEcGrs{zv%g{ShqQ@H+tc z#{tJQfai2)2Y8XE8#Dic2XHkFLCkIlpx2sv&|}pBz(rpJ^f})Dty+NJ?}rPN3^N?b zsKwv10==m^z$%KLkh2I&|7hBI{HfUV^jJvPcBQgsw_a};6Af8?Oc(a$ zqZ}hGLiYK(TiB4Mcx6~~n!aQk1B#jkKXIorHCz!60txF}uFxbZG`;Kt755|o4B zsW{=P(9siXCl}`t*U_GC&U|~?YcIx_fex=cDC%j`FphxMz#tmQZPc z|9#otm^Y1>j&;zFe#5EP9{!xUT{_TBd_d?|JqB)pF#HFiik zKA*=)DfKCd9+>WFq4u2A77BmY=g@o29CzY;(W_LokW%HugABs)bBIU;*}&I>qB9J- zerHlKF7)syxRLpOcz>>6U_*|QIyNb{4dy>|cZ|NaQyh49X4)kZ z?|1$k9(}kfqX>ZG0QCWY$2kIcoCbi$33RTa05~`(2Y`b&831sw?)68ufPi8BQ}DW9 z14^C#$G@~8V_jW?k}Od$@Smerl0esqTJ8L!TCNUE_va3~-@w?v+CSaY)TYU>7oca_ z(LQ79S)Va=4h${kD?*F_pkIE9|9Xmifo<1Kh(FAu zpD*ctUkLt=5bB>=k`FQzd?sG5T}uD36?`H1|9t1~Zk68?AE0}d5AmQ^Z(c#nI}JLU zaN1apj*b$w+3Vu99yrL_YVVBv#h}Wjt%HlWXVbnu1%+3`){juY;s+#8W$ZDO13>aL z+Ws@|pWgmCd8%$44ru%i0Xr}9=S%-=2-XSm*KgXra#2@&edEQ1=IY{gG8PMOZ0CzS za0d?ycW5e6-crq9vgxwj{;$;KA22(<&07XYI&8{C?O_GZ!SoA&d}vk|LO|1EDjDf7 z7x62H?18$MzjGM)N4|dsE87`761jV()?cVIM`oWQeQ>xc4^wQGToM|@qrh5iVz6zq z*8TwFvBx`kx)(35#%HVU)*~HlT=XJIt8Od1*WxxjRv~CxcCO7-(zbxrOUy$g@qU|{ zO4c(HCa^+s7|CKr(X960o7W}%NOAB&_k88yR&gTnl73*6{OAUKwXw$_k=1{R5topZy0_IoN|LUlfS+m*piEc#S056}d38(1-}k2g1iXE@I*V9qiK(>FoL}@5 zt8TBH<-=UDDQq15XiY}zgAOvKf?PM2^F8$S{Q#7HNn1(OU&`O(b%mBOWHdA z0s;KVB>l-GrKUgk=K*e7N*#zP7C>;H5_>})iJMuKBBX~Cs<9@NNCM25?|Gzio0;Y) zhk;3^Ni4k+KTemTXcLm#`{}}({Aj8y&N$5j^%&+#(?T2MHsdag<M|c9o<#MI<#Wji8bB^RdQpyNikz6^$O2RA4GfOQLYST|5%ZcCr%zn{VDO~* zo!F_IU*y%*_PU}-28bZzOCM|E8>^uqy^vkW*s1w&`0XVg-53})-_kG4MfZ?0Ni6>Tn#FiSrrQ^ z7DZ7Wa>P(B?hkuUueM+M zQ{%l9izi=qOA4l&erfD74MJ)tyf&=M<5{)ev4elycq*@+IoY&2(qF%9<5k7$+Dngj zfWrWfsMQQr4*R$5eHXC{SG>K9pPK`T4B_V#Y^F(=qZM~=p+vvb$zsSZ8CxeXA@<3L zHFNqqs%`S6Bamwm;O8EDmOc3sR}ky|6!%L787EZ$7gazxa<~3=Z<>Qg0*!w&{WDSr z?0`uCB~(N7v(?CM1P!UV?w7AUO8=hMzAIe+z72f?%#+wLsXIzMHQ=k(6ObEY+bSQ_ z?Ghl^#z7zD1`01Q%%k}-oE!;e7_!y#LFGy_#L#l6b8ta%%6~|?-bcZ;} z$savW(3_ULdFvzyBH5&DZTE9=`&VrM>(RWm^I#!z6t!dWtbJYuW|xe^&7N!yfjcL05MS?8%v7eWB zQIzppm8U9-hvR6c{gr$#V--V`_0z^r4W;bc-8_t}M)Q(ez3;nQ7vyj+MsU$jp(d_f zkV!q{aSu;Fjiuv&zu?-VZLx~b$=9t#WMQcvv_~L1Q&IzOUnCIvf&6hrYS7+T& z?4AKe;;RIY?4bdM2!XYFTaw!{9+YGR?=kxaZ}$PX>s+4LrkGnlt{*YAZJskbM%hJoo?vpri|~;vuU( zl1a*P8Yap+_QtVtMDrFEpM1DSPo(DfaKNhrl}urx<_nhIbI+Dy;4@D2^mB(aS&nnW zc53@INpt{!*sC|+;)wqK*^fesb^{lGpIYc54`P+S^e12RU&Q$SjaB=%ui1K`U{>FK zq?niS8FlCop3>)?1+bH)Zw?1m#2h^dDc|eZ&d;W{HOvffn$+Sf2CC+uhtZTnwrOM& z0ywC`gw(Ql{mnaaTqIp^vT7FzeN$N{`xD6aRAict=i%JKZ4qp|oLX0&c=5BI9_N3e zrl6Gd7Go=c{J0oQ+PoAZ1Kr#TRO(bCb1vQ-X=iTtj?>hAlOxb408`v-Hew^$viGKm z>-A)jC-=dEp2CDrpn75mdKb3SZA7{Z%aKLSU!Sqc)Q%~)u0l( zSW+xYI(B}?qn8nGtv0Kv!!xTK>40xr7lIB^+gHh1=eOpOCQfxH0Aihj)5L{=M9ch! zer@_UcEwvuU`F@7zS_>Hc?ICm);K9wEkzUIW9~YcMg~=1l^#>3APC|~65@+Ha;uMQ z3ux%rq6o;P4~LDYc9J~kVTK5T0|cPxrV_!7fZ|1tzB>l+gI)m<$8!1CK3e%Fb*-JTo35IF^1xl66CsPRLyE2qpl0oSp}ScY5q5qyhTISJzM`P|G~|y*AdjM z3)PDXbx2KWmVHq7$cAXi%wimr&enu#(aYBjYI-oWTohRg_Sv(Xr>1iB=lK3@eE*2K zj{AnOdNx-HMJaD|M8_52QMLi=pjp*66yYq%Ia)>{WXtxO8OJ`>0*2C8$X@U~8~1HD zPV>p$knHBzFz#x?x`+(G%o`*C1!n*-ow5qHrzipF`5_Fc0LTa+I~d^_*VaAjT=y#= z_xYdA{2H%6#KsA@{m&Z6K&czwegcj4mkpq$3Qz3c7;MszTy>;H)0e&6B4hmg!j=nX)w`~dmv zXR#&g{qq;^v5sl*okz^IB1Fs&Zcr`(jH~16=e(`~)!Jofnf8x2&5nKoCBc^U-!}rr zQP^2DXth|TefQ*$B1MKOOL1xB!3ItF3sM#*uyre#V3|W=Xn-;5d(#3qYli`ttNY+LYW+V}}Gcd>V)^hGldUtx)Jken5 z^kMMFBrQw<4_LCvUF%M{)a5%YKI=Y7TFMRf79`yZ{a!N;Oq+u?$A*FQ4nvkdVDA4T zf6%|InF{sAMv^Kj%gl_XLn)=|PC{8!IX&f0yn)j$f`gUST~Y>y22lIWTZEEf2TtEP ze(U2B>mA!z)BF;l2GHWnn!kkBz4~ZD4~JfSO1U`NG82vgs|tBS%z?6s&24ipTWoTg z(k)xAx#AuH?xWf2yUtZ6zST&361&9IxrK{2b?6>T%h z(B>xSbBRqgKijt7|K0;=NQ$TcKPl53QRK)skw8=cuji910$ znVK@ew;MyY&6iDkYL0U>7Sha*o_2R|-mO6V;Ob!ZWV^gUod~w$&HrHF{;6&>`3R}k z!4|9x)~9Nvy}}q+E%cAuKmpB^aBDV@m)Q9 zrx~LU1-{uabNv~B5kLW54MbJcvwzfN%4--2_SNn=nLNVyyd!^c^ZNEdrtOKKBL^AY(*AOX_?IoH|J3r1P6VWG&v0nsA;QdC#E4ut1gNhbYVHv* z?JBWhvaG6bgzJ`>47U?edVG4fD04&>$E@;2)sEH&M&T*>fuirLl%Zjm0%#;%)WV$J zVI@zxOHn5!94jMTr0ve<4?3*nx^P(2*01$NOl<2>s&PM67&B`^Hme?ULk1Nso61 zuiq5mU#CkVNWV}GpoD&(?)z^D)t|6L8D1kC+WS79qs*jxLd{5ebrO^l71qO0_Vh}i zP#2qQbTQfD{A#?9c1qIQraej9a&mJIxSH|96OWd?2r!_(hUOF7r&b9n&Md5+6M7t; zsQRIUWn``h<&(JUeQAryM>Td|DmJ;cNwRu#1#5%lD=ib3a<;}U`#&ky`8!s8mB%Uo zH(Be2qY5SJpuu!;5~>l-Y>yMZDPj8tL)by~s2yKul{?p?s_*2ZsGivehh5;VLsoLW zlepC!=pT}DL-jnleM*?B?)hQKf-FMEme~g8`W_=i`14Sndk|)VhAgYl4m*kwwk6#o zC2xeBj*`XE0>x6waat6U*?Z3dS2U}WSb@Sy6o@tne0yqhZd{5(o~c98GwogdwLFbH z8@=R>7)UuH-=*ni*gj%X3l4_vQD*leVMp3;7NFTEK8ELbqC#^`HCg@&RwNzd%0L9m67)YaZ7Z_o6!tCsDA+@3CIj4~|P zvfX9#>rQ^%o%}mX&rjE)-*TK~jP-AY<`;*k?tpq?8u4s&!HfuB_-N}TUZ7_u7jgT$ zo!l+@HJzMbPL6^PsyFFt-r+IZm!!Qfar)(g;_tfQ3&(I7^%7NxB%;Wub7Vh!HM|c@ z=$Q^6Ai*#zu7I;-a6{Ih>I8XyvQON~y*OO0#5CZ+(&jlRXjEo$g^MUog}|!hiz#{z zV@i4cR)(YglG?L-Cmq{zd<3_X9P@iAgNccctlCJ@ATCprDH}u zfi_YuQO$aMS*%AA;KYMnxGndx^Rkrlw!E|??r9jl>b;9aH>ILw(PuTspV7b5YI`Bp zcOO~3CT5b=)4Tl~84@(#&e3mmp5oiSvH4QjxeMegVkWL34{>K0PmR%c`?^pB$=!P8 zJyqL$FOkp@PSR9lK7IgLQe<(r-Migp-@!btM$((AAMbqjJ4P(agQ}y5Y9%*JsQ&>n z{U!m0YzIKuU@%vqkEPB?@GT61E9%1t-IGWpI;+$xmNNSZ#LBp;QE`ql&W_H z$r`?Aj6SxDbH4IVMGk)xK$zU8cZ2hjipfPw)m2IqFR}z3ASvx!YH~1j4O3ThOMl& zJ)^6T{nLiE&{y1E%|zz10wRcl#zlDkc3zuexT5Z}TtW8bNPtuZR6b-U6;F+l3~Qkb z6I}Z!_tM$4n;9JeyqaW;>(kcZ;OqrfdZh8!u>l5PmWTs@Vmd-W%Tt|Cc>?+&;62D3X`Ks zooZ5?((2QX}q1*|h_TN$XT%phyM`nk52);x<~0#SskA=_wvZ~}OPe~YC5>m>0aN(KRmQ}8tzVF{J; zJaew=U7p^qmZz2zli8fR24sd3Tb&lK{eqI?Gi-QvJA}qQtYW9O8cT%OR9Y)mCv)grs_zSvWnemobn4jJ77%_)`=azBbf6#w>ar+0=R;$ zXeXAx7#N1IaRjF{KEhwb(xN6w;Ek^+*DYxuCGcL_amvXmRx%fA6ZRKruNqAzM}3RB k_w}!@9uPg%%%|H2w*9aC^5^X8&;NhM;NNi!fIs#9KYW7SbN~PV literal 0 HcmV?d00001 diff --git a/docs/en/_static/js/custom.js b/docs/en/_static/js/custom.js new file mode 100644 index 0000000..3eec9f4 --- /dev/null +++ b/docs/en/_static/js/custom.js @@ -0,0 +1,10 @@ +var collapsedSections = ['Advanced Guides', 'Model Zoo', 'Visualization', 'Analysis Tools', 'Deployment', 'Notes']; + +$(document).ready(function () { + $('.model-summary').DataTable({ + "stateSave": false, + "lengthChange": false, + "pageLength": 20, + "order": [] + }); +}); diff --git a/docs/en/_templates/404.html b/docs/en/_templates/404.html new file mode 100644 index 0000000..639d255 --- /dev/null +++ b/docs/en/_templates/404.html @@ -0,0 +1,18 @@ +{% extends "layout.html" %} + +{% block body %} + +

Page Not Found

+

+ The page you are looking for cannot be found. +

+

+ If you just switched documentation versions, it is likely that the page you were on is moved. You can look for it in + the content table left, or go to the homepage. +

+

+ If you cannot find documentation you want, please open an issue to tell us! +

+ +{% endblock %} diff --git a/docs/en/_templates/autosummary/class.rst b/docs/en/_templates/autosummary/class.rst new file mode 100644 index 0000000..4c3a7a9 --- /dev/null +++ b/docs/en/_templates/autosummary/class.rst @@ -0,0 +1,13 @@ +.. role:: hidden + :class: hidden-section +.. currentmodule:: {{ module }} + + +{{ name | underline}} + +.. autoclass:: {{ name }} + :members: + +.. + autogenerated from _templates/autosummary/class.rst + note it does not have :inherited-members: diff --git a/docs/en/_templates/callable.rst b/docs/en/_templates/callable.rst new file mode 100644 index 0000000..3a7b9d2 --- /dev/null +++ b/docs/en/_templates/callable.rst @@ -0,0 +1,14 @@ +.. role:: hidden + :class: hidden-section +.. currentmodule:: {{ module }} + + +{{ name | underline}} + +.. autoclass:: {{ name }} + :members: + :special-members: __call__ + +.. + autogenerated from _templates/callable.rst + note it does not have :inherited-members: diff --git a/docs/en/_templates/data_transform.rst b/docs/en/_templates/data_transform.rst new file mode 100644 index 0000000..376bfe9 --- /dev/null +++ b/docs/en/_templates/data_transform.rst @@ -0,0 +1,13 @@ +.. role:: hidden + :class: hidden-section +.. currentmodule:: {{ module }} + + +{{ name | underline}} + +.. autoclass:: {{ name }} + :members: transform + +.. + autogenerated from _templates/callable.rst + note it does not have :inherited-members: diff --git a/docs/en/advanced_guides/convention.md b/docs/en/advanced_guides/convention.md new file mode 100644 index 0000000..9edd04c --- /dev/null +++ b/docs/en/advanced_guides/convention.md @@ -0,0 +1,120 @@ +# Convention in MMPretrain + +## Model Naming Convention + +We follow the below convention to name models. Contributors are advised to follow the same style. The model names are divided into five parts: algorithm info, module information, pretrain information, training information and data information. Logically, different parts are concatenated by underscores `'_'`, and words in the same part are concatenated by dashes `'-'`. + +```text +{algorithm info}_{module info}_{pretrain info}_{training info}_{data info} +``` + +- `algorithm info` (optional): The main algorithm information, it's includes the main training algorithms like MAE, BEiT, etc. +- `module info`: The module information, it usually includes the backbone name, such as resnet, vit, etc. +- `pretrain info`: (optional): The pretrain model information, such as the pretrain model is trained on ImageNet-21k. +- `training info`: The training information, some training schedule, including batch size, lr schedule, data augment and the like. +- `data info`: The data information, it usually includes the dataset name, input size and so on, such as imagenet, cifar, etc. + +### Algorithm information + +The main algorithm name to train the model. For example: + +- `simclr` +- `mocov2` +- `eva-mae-style` + +The model trained by supervised image classification can omit this field. + +### Module information + +The modules of the model, usually, the backbone must be included in this field, and the neck and head +information can be omitted. For example: + +- `resnet50` +- `vit-base-p16` +- `swin-base` + +### Pretrain information + +If the model is a fine-tuned model from a pre-trained model, we need to record some information of the +pre-trained model. For example: + +- The source of the pre-trained model: `fb`, `openai`, etc. +- The method to train the pre-trained model: `clip`, `mae`, `distill`, etc. +- The dataset used for pre-training: `in21k`, `laion2b`, etc. (`in1k` can be omitted.) +- The training duration: `300e`, `1600e`, etc. + +Not all information is necessary, only select the necessary information to distinguish different pre-trained +models. + +At the end of this field, use a `-pre` as an identifier, like `mae-in21k-pre`. + +### Training information + +Training schedule, including training type, `batch size`, `lr schedule`, data augment, special loss functions and so on: + +- format `{gpu x batch_per_gpu}`, such as `8xb32` + +Training type (mainly seen in the transformer network, such as the `ViT` algorithm, which is usually divided into two training type: pre-training and fine-tuning): + +- `ft` : configuration file for fine-tuning +- `pt` : configuration file for pretraining + +Training recipe. Usually, only the part that is different from the original paper will be marked. These methods will be arranged in the order `{pipeline aug}-{train aug}-{loss trick}-{scheduler}-{epochs}`. + +- `coslr-200e` : use cosine scheduler to train 200 epochs +- `autoaug-mixup-lbs-coslr-50e` : use `autoaug`, `mixup`, `label smooth`, `cosine scheduler` to train 50 epochs + +If the model is converted from a third-party repository like the official repository, the training information +can be omitted and use a `3rdparty` as an identifier. + +### Data information + +- `in1k` : `ImageNet1k` dataset, default to use the input image size of 224x224; +- `in21k` : `ImageNet21k` dataset, also called `ImageNet22k` dataset, default to use the input image size of 224x224; +- `in1k-384px` : Indicates that the input image size is 384x384; +- `cifar100` + +### Model Name Example + +```text +vit-base-p32_clip-openai-pre_3rdparty_in1k +``` + +- `vit-base-p32`: The module information +- `clip-openai-pre`: The pre-train information. + - `clip`: The pre-train method is clip. + - `openai`: The pre-trained model is come from OpenAI. + - `pre`: The pre-train identifier. +- `3rdparty`: The model is converted from a third-party repository. +- `in1k`: Dataset information. The model is trained from ImageNet-1k dataset and the input size is `224x224`. + +```text +beit_beit-base-p16_8xb256-amp-coslr-300e_in1k +``` + +- `beit`: The algorithm information +- `beit-base`: The module information, since the backbone is a modified ViT from BEiT, the backbone name is + also `beit`. +- `8xb256-amp-coslr-300e`: The training information. + - `8xb256`: Use 8 GPUs and the batch size on each GPU is 256. + - `amp`: Use automatic-mixed-precision training. + - `coslr`: Use cosine annealing learning rate scheduler. + - `300e`: To train 300 epochs. +- `in1k`: Dataset information. The model is trained from ImageNet-1k dataset and the input size is `224x224`. + +## Config File Naming Convention + +The naming of the config file is almost the same with the model name, with several difference: + +- The training information is necessary, and cannot be `3rdparty`. +- If the config file only includes backbone settings, without neither head settings nor dataset settings. We + will name it as `{module info}_headless.py`. This kind of config files are usually used for third-party + pre-trained models on large datasets. + +## Checkpoint Naming Convention + +The naming of the weight mainly includes the model name, date and hash value. + +```text +{model_name}_{date}-{hash}.pth +``` diff --git a/docs/en/advanced_guides/datasets.md b/docs/en/advanced_guides/datasets.md new file mode 100644 index 0000000..1a018e4 --- /dev/null +++ b/docs/en/advanced_guides/datasets.md @@ -0,0 +1,72 @@ +# Adding New Dataset + +You can write a new dataset class inherited from `BaseDataset`, and overwrite `load_data_list(self)`, +like [CIFAR10](https://github.com/open-mmlab/mmpretrain/blob/main/mmpretrain/datasets/cifar.py) and [ImageNet](https://github.com/open-mmlab/mmpretrain/blob/main/mmpretrain/datasets/imagenet.py). +Typically, this function returns a list, where each sample is a dict, containing necessary data information, e.g., `img` and `gt_label`. + +Assume we are going to implement a `Filelist` dataset, which takes filelists for both training and testing. The format of annotation list is as follows: + +```text +000001.jpg 0 +000002.jpg 1 +``` + +## 1. Create Dataset Class + +We can create a new dataset in `mmpretrain/datasets/filelist.py` to load the data. + +```python +from mmpretrain.registry import DATASETS +from .base_dataset import BaseDataset + + +@DATASETS.register_module() +class Filelist(BaseDataset): + + def load_data_list(self): + assert isinstance(self.ann_file, str), + + data_list = [] + with open(self.ann_file) as f: + samples = [x.strip().split(' ') for x in f.readlines()] + for filename, gt_label in samples: + img_path = add_prefix(filename, self.img_prefix) + info = {'img_path': img_path, 'gt_label': int(gt_label)} + data_list.append(info) + return data_list +``` + +## 2. Add to the package + +And add this dataset class in `mmpretrain/datasets/__init__.py` + +```python +from .base_dataset import BaseDataset +... +from .filelist import Filelist + +__all__ = [ + 'BaseDataset', ... ,'Filelist' +] +``` + +## 3. Modify Related Config + +Then in the config, to use `Filelist` you can modify the config as the following + +```python +train_dataloader = dict( + ... + dataset=dict( + type='Filelist', + ann_file='image_list.txt', + pipeline=train_pipeline, + ) +) +``` + +All the dataset classes inherit from [`BaseDataset`](https://github.com/open-mmlab/mmpretrain/blob/main/mmpretrain/datasets/base_dataset.py) have **lazy loading** and **memory saving** features, you can refer to related documents of {external+mmengine:doc}`BaseDataset `. + +```{note} +If the dictionary of the data sample contains 'img_path' but not 'img', then 'LoadImgFromFile' transform must be added in the pipeline. +``` diff --git a/docs/en/advanced_guides/evaluation.md b/docs/en/advanced_guides/evaluation.md new file mode 100644 index 0000000..d7978ea --- /dev/null +++ b/docs/en/advanced_guides/evaluation.md @@ -0,0 +1,103 @@ +# Customize Evaluation Metrics + +## Use metrics in MMPretrain + +In MMPretrain, we have provided multiple metrics for both single-label classification and multi-label +classification: + +**Single-label Classification**: + +- [`Accuracy`](mmpretrain.evaluation.Accuracy) +- [`SingleLabelMetric`](mmpretrain.evaluation.SingleLabelMetric), including precision, recall, f1-score and + support. + +**Multi-label Classification**: + +- [`AveragePrecision`](mmpretrain.evaluation.AveragePrecision), or AP (mAP). +- [`MultiLabelMetric`](mmpretrain.evaluation.MultiLabelMetric), including precision, recall, f1-score and + support. + +To use these metrics during validation and testing, we need to modify the `val_evaluator` and `test_evaluator` +fields in the config file. + +Here is several examples: + +1. Calculate top-1 and top-5 accuracy during both validation and test. + + ```python + val_evaluator = dict(type='Accuracy', topk=(1, 5)) + test_evaluator = val_evaluator + ``` + +2. Calculate top-1 accuracy, top-5 accuracy, precision and recall during both validation and test. + + ```python + val_evaluator = [ + dict(type='Accuracy', topk=(1, 5)), + dict(type='SingleLabelMetric', items=['precision', 'recall']), + ] + test_evaluator = val_evaluator + ``` + +3. Calculate mAP (mean AveragePrecision), CP (Class-wise mean Precision), CR (Class-wise mean Recall), CF + (Class-wise mean F1-score), OP (Overall mean Precision), OR (Overall mean Recall) and OF1 (Overall mean + F1-score). + + ```python + val_evaluator = [ + dict(type='AveragePrecision'), + dict(type='MultiLabelMetric', average='macro'), # class-wise mean + dict(type='MultiLabelMetric', average='micro'), # overall mean + ] + test_evaluator = val_evaluator + ``` + +## Add new metrics + +MMPretrain supports the implementation of customized evaluation metrics for users who pursue higher customization. + +You need to create a new file under `mmpretrain/evaluation/metrics`, and implement the new metric in the file, for example, in `mmpretrain/evaluation/metrics/my_metric.py`. And create a customized evaluation metric class `MyMetric` which inherits [`BaseMetric in MMEngine`](mmengine.evaluator.BaseMetric). + +The data format processing method `process` and the metric calculation method `compute_metrics` need to be overwritten respectively. Add it to the `METRICS` registry to implement any customized evaluation metric. + +```python +from mmengine.evaluator import BaseMetric +from mmpretrain.registry import METRICS + +@METRICS.register_module() +class MyMetric(BaseMetric): + + def process(self, data_batch: Sequence[Dict], data_samples: Sequence[Dict]): + """ The processed results should be stored in ``self.results``, which will + be used to computed the metrics when all batches have been processed. + `data_batch` stores the batch data from dataloader, + and `data_samples` stores the batch outputs from model. + """ + ... + + def compute_metrics(self, results: List): + """ Compute the metrics from processed results and returns the evaluation results. + """ + ... +``` + +Then, import it in the `mmpretrain/evaluation/metrics/__init__.py` to add it into the `mmpretrain.evaluation` package. + +```python +# In mmpretrain/evaluation/metrics/__init__.py +... +from .my_metric import MyMetric + +__all__ = [..., 'MyMetric'] +``` + +Finally, use `MyMetric` in the `val_evaluator` and `test_evaluator` field of config files. + +```python +val_evaluator = dict(type='MyMetric', ...) +test_evaluator = val_evaluator +``` + +```{note} +More details can be found in {external+mmengine:doc}`MMEngine Documentation: Evaluation `. +``` diff --git a/docs/en/advanced_guides/modules.md b/docs/en/advanced_guides/modules.md new file mode 100644 index 0000000..fb34aed --- /dev/null +++ b/docs/en/advanced_guides/modules.md @@ -0,0 +1,511 @@ +# Customize Models + +In our design, a complete model is defined as a top-level module which contains several model components based on their functionalities. + +- model: a top-level module defines the type of the task, such as `ImageClassifier` for image classification, `MAE` for self-supervised leanrning, `ImageToImageRetriever` for image retrieval. +- backbone: usually a feature extraction network that records the major differences between models, e.g., `ResNet`, `MobileNet`. +- neck: the component between backbone and head, e.g., `GlobalAveragePooling`. +- head: the component for specific tasks, e.g., `ClsHead`, `ContrastiveHead`. +- loss: the component in the head for calculating losses, e.g., `CrossEntropyLoss`, `LabelSmoothLoss`. +- target_generator: the component for self-supervised leanrning task specifically, e.g., `VQKD`, `HOGGenerator`. + +## Add a new model + +Generally, for image classification and retrieval tasks, the pipelines are consistent. However, the pipelines are different from each self-supervised leanrning algorithms, like `MAE` and `BEiT`. Thus, in this section, we will explain how to add your self-supervised learning algorithm. + +### Add a new self-supervised learning algorithm + +1. Create a new file `mmpretrain/models/selfsup/new_algorithm.py` and implement `NewAlgorithm` in it. + + ```python + from mmpretrain.registry import MODELS + from .base import BaseSelfSupvisor + + + @MODELS.register_module() + class NewAlgorithm(BaseSelfSupvisor): + + def __init__(self, backbone, neck=None, head=None, init_cfg=None): + super().__init__(init_cfg) + pass + + # ``extract_feat`` function is defined in BaseSelfSupvisor, you could + # overwrite it if needed + def extract_feat(self, inputs, **kwargs): + pass + + # the core function to compute the loss + def loss(self, inputs, data_samples, **kwargs): + pass + + ``` + +2. Import the new algorithm module in `mmpretrain/models/selfsup/__init__.py` + + ```python + ... + from .new_algorithm import NewAlgorithm + + __all__ = [ + ..., + 'NewAlgorithm', + ... + ] + ``` + +3. Use it in your config file. + + ```python + model = dict( + type='NewAlgorithm', + backbone=..., + neck=..., + head=..., + ... + ) + ``` + +## Add a new backbone + +Here we present how to develop a new backbone component by an example of `ResNet_CIFAR`. +As the input size of CIFAR is 32x32, which is much smaller than the default size of 224x224 in ImageNet, this backbone replaces the `kernel_size=7, stride=2` to `kernel_size=3, stride=1` and removes the MaxPooling after the stem layer to avoid forwarding small feature maps to residual blocks. + +The easiest way is to inherit from `ResNet` and only modify the stem layer. + +1. Create a new file `mmpretrain/models/backbones/resnet_cifar.py`. + + ```python + import torch.nn as nn + + from mmpretrain.registry import MODELS + from .resnet import ResNet + + + @MODELS.register_module() + class ResNet_CIFAR(ResNet): + + """ResNet backbone for CIFAR. + + short description of the backbone + + Args: + depth(int): Network depth, from {18, 34, 50, 101, 152}. + ... + """ + + def __init__(self, depth, deep_stem, **kwargs): + # call ResNet init + super(ResNet_CIFAR, self).__init__(depth, deep_stem=deep_stem, **kwargs) + # other specific initializations + assert not self.deep_stem, 'ResNet_CIFAR do not support deep_stem' + + def _make_stem_layer(self, in_channels, base_channels): + # override the ResNet method to modify the network structure + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + base_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, base_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + # Customize the forward method if needed. + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + # The return value needs to be a tuple with multi-scale outputs from different depths. + # If you don't need multi-scale features, just wrap the output as a one-item tuple. + return tuple(outs) + + def init_weights(self): + # Customize the weight initialization method if needed. + super().init_weights() + + # Disable the weight initialization if loading a pretrained model. + if self.init_cfg is not None and self.init_cfg['type'] == 'Pretrained': + return + + # Usually, we recommend using `init_cfg` to specify weight initialization methods + # of convolution, linear, or normalization layers. If you have some special needs, + # do these extra weight initialization here. + ... + ``` + +```{note} +Replace original registry names from `BACKBONES`, `NECKS`, `HEADS` and `LOSSES` to `MODELS` in OpenMMLab 2.0 design. +``` + +2. Import the new backbone module in `mmpretrain/models/backbones/__init__.py`. + + ```python + ... + from .resnet_cifar import ResNet_CIFAR + + __all__ = [ + ..., 'ResNet_CIFAR' + ] + ``` + +3. Modify the correlated settings in your config file. + + ```python + model = dict( + ... + backbone=dict( + type='ResNet_CIFAR', + depth=18, + ...), + ... + ``` + +### Add a new backbone for self-supervised learning + +For some self-supervised learning algorithms, the backbones are kind of different, such as `MAE`, `BEiT`, etc. Their backbones need to deal with `mask` in order to extract features from visible tokens. + +Take [MAEViT](mmpretrain.models.selfsup.MAEViT) as an example, we need to overwrite `forward` function to compute with `mask`. We also defines `init_weights` to initialize parameters and `random_masking` to generate mask for `MAE` pre-training. + +```python +class MAEViT(VisionTransformer): + """Vision Transformer for MAE pre-training""" + + def __init__(mask_ratio, **kwargs) -> None: + super().__init__(**kwargs) + # position embedding is not learnable during pretraining + self.pos_embed.requires_grad = False + self.mask_ratio = mask_ratio + self.num_patches = self.patch_resolution[0] * self.patch_resolution[1] + + def init_weights(self) -> None: + """Initialize position embedding, patch embedding and cls token.""" + super().init_weights() + # define what if needed + pass + + def random_masking( + self, + x: torch.Tensor, + mask_ratio: float = 0.75 + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Generate the mask for MAE Pre-training.""" + pass + + def forward( + self, + x: torch.Tensor, + mask: Optional[bool] = True + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Generate features for masked images. + + The function supports two kind of forward behaviors. If the ``mask`` is + ``True``, the function will generate mask to masking some patches + randomly and get the hidden features for visible patches, which means + the function will be executed as masked imagemodeling pre-training; + if the ``mask`` is ``None`` or ``False``, the forward function will + call ``super().forward()``, which extract features from images without + mask. + """ + if mask is None or False: + return super().forward(x) + + else: + B = x.shape[0] + x = self.patch_embed(x)[0] + # add pos embed w/o cls token + x = x + self.pos_embed[:, 1:, :] + + # masking: length -> length * mask_ratio + x, mask, ids_restore = self.random_masking(x, self.mask_ratio) + + # append cls token + cls_token = self.cls_token + self.pos_embed[:, :1, :] + cls_tokens = cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + + for _, layer in enumerate(self.layers): + x = layer(x) + # Use final norm + x = self.norm1(x) + + return (x, mask, ids_restore) + +``` + +## Add a new neck + +Here we take `GlobalAveragePooling` as an example. It is a very simple neck without any arguments. +To add a new neck, we mainly implement the `forward` function, which applies some operations on the output from the backbone and forwards the results to the head. + +1. Create a new file in `mmpretrain/models/necks/gap.py`. + + ```python + import torch.nn as nn + + from mmpretrain.registry import MODELS + + @MODELS.register_module() + class GlobalAveragePooling(nn.Module): + + def __init__(self): + self.gap = nn.AdaptiveAvgPool2d((1, 1)) + + def forward(self, inputs): + # we regard inputs as tensor for simplicity + outs = self.gap(inputs) + outs = outs.view(inputs.size(0), -1) + return outs + ``` + +2. Import the new neck module in `mmpretrain/models/necks/__init__.py`. + + ```python + ... + from .gap import GlobalAveragePooling + + __all__ = [ + ..., 'GlobalAveragePooling' + ] + ``` + +3. Modify the correlated settings in your config file. + + ```python + model = dict( + neck=dict(type='GlobalAveragePooling'), + ) + ``` + +## Add a new head + +### Based on ClsHead + +Here we present how to develop a new head by the example of simplified `VisionTransformerClsHead` as the following. +To implement a new head, we need to implement a `pre_logits` method for processes before the final classification head and a `forward` method. + +:::{admonition} Why do we need the `pre_logits` method? +:class: note + +In classification tasks, we usually use a linear layer to do the final classification. And sometimes, we need +to obtain the feature before the final classification, which is the output of the `pre_logits` method. +::: + +1. Create a new file in `mmpretrain/models/heads/vit_head.py`. + + ```python + import torch.nn as nn + + from mmpretrain.registry import MODELS + from .cls_head import ClsHead + + + @MODELS.register_module() + class VisionTransformerClsHead(ClsHead): + + def __init__(self, num_classes, in_channels, hidden_dim, **kwargs): + super().__init__(**kwargs) + self.in_channels = in_channels + self.num_classes = num_classes + self.hidden_dim = hidden_dim + + self.fc1 = nn.Linear(in_channels, hidden_dim) + self.act = nn.Tanh() + self.fc2 = nn.Linear(hidden_dim, num_classes) + + def pre_logits(self, feats): + # The output of the backbone is usually a tuple from multiple depths, + # and for classification, we only need the final output. + feat = feats[-1] + + # The final output of VisionTransformer is a tuple of patch tokens and + # classification tokens. We need classification tokens here. + _, cls_token = feat + + # Do all works except the final classification linear layer. + return self.act(self.fc1(cls_token)) + + def forward(self, feats): + pre_logits = self.pre_logits(feats) + + # The final classification linear layer. + cls_score = self.fc2(pre_logits) + return cls_score + ``` + +2. Import the module in `mmpretrain/models/heads/__init__.py`. + + ```python + ... + from .vit_head import VisionTransformerClsHead + + __all__ = [ + ..., 'VisionTransformerClsHead' + ] + ``` + +3. Modify the correlated settings in your config file. + + ```python + model = dict( + head=dict( + type='VisionTransformerClsHead', + ..., + )) + ``` + +### Based on BaseModule + +Here is an example of `MAEPretrainHead`, which is based on `BaseModule` and implemented for mask image modeling task. It is required to implement `loss` function to generate loss, but the other helper functions are optional. + +```python +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class MAEPretrainHead(BaseModule): + """Head for MAE Pre-training.""" + + def __init__(self, + loss: dict, + norm_pix: bool = False, + patch_size: int = 16) -> None: + super().__init__() + self.norm_pix = norm_pix + self.patch_size = patch_size + self.loss_module = MODELS.build(loss) + + def patchify(self, imgs: torch.Tensor) -> torch.Tensor: + """Split images into non-overlapped patches.""" + p = self.patch_size + assert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % p == 0 + + h = w = imgs.shape[2] // p + x = imgs.reshape(shape=(imgs.shape[0], 3, h, p, w, p)) + x = torch.einsum('nchpwq->nhwpqc', x) + x = x.reshape(shape=(imgs.shape[0], h * w, p**2 * 3)) + return x + + def construct_target(self, target: torch.Tensor) -> torch.Tensor: + """Construct the reconstruction target.""" + target = self.patchify(target) + if self.norm_pix: + # normalize the target image + mean = target.mean(dim=-1, keepdim=True) + var = target.var(dim=-1, keepdim=True) + target = (target - mean) / (var + 1.e-6)**.5 + + return target + + def loss(self, pred: torch.Tensor, target: torch.Tensor, + mask: torch.Tensor) -> torch.Tensor: + """Generate loss.""" + target = self.construct_target(target) + loss = self.loss_module(pred, target, mask) + + return loss +``` + +After implementation, the following step is the same as the step-2 and step-3 in [Based on ClsHead](#based-on-clshead) + +## Add a new loss + +To add a new loss function, we mainly implement the `forward` function in the loss module. We should register the loss module as `MODELS` as well. +In addition, it is helpful to leverage the decorator `weighted_loss` to weight the loss for each element. +Assuming that we want to mimic a probabilistic distribution generated from another classification model, we implement an L1Loss to fulfill the purpose as below. + +1. Create a new file in `mmpretrain/models/losses/l1_loss.py`. + + ```python + import torch + import torch.nn as nn + + from mmpretrain.registry import MODELS + from .utils import weighted_loss + + @weighted_loss + def l1_loss(pred, target): + assert pred.size() == target.size() and target.numel() > 0 + loss = torch.abs(pred - target) + return loss + + @MODELS.register_module() + class L1Loss(nn.Module): + + def __init__(self, reduction='mean', loss_weight=1.0): + super(L1Loss, self).__init__() + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss = self.loss_weight * l1_loss( + pred, target, weight, reduction=reduction, avg_factor=avg_factor) + return loss + ``` + +2. Import the module in `mmpretrain/models/losses/__init__.py`. + + ```python + ... + from .l1_loss import L1Loss + + __all__ = [ + ..., 'L1Loss' + ] + ``` + +3. Modify loss field in the head configs. + + ```python + model = dict( + head=dict( + loss=dict(type='L1Loss', loss_weight=1.0), + )) + ``` + +Finally, we can combine all the new model components in a config file to create a new model for best practices. Because `ResNet_CIFAR` is not a ViT-based backbone, we do not implement `VisionTransformerClsHead` here. + +```python +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet_CIFAR', + depth=18, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=10, + in_channels=512, + loss=dict(type='L1Loss', loss_weight=1.0), + topk=(1, 5), + )) + +``` + +```{tip} +For convenience, the same model components could inherit from existing config files, refers to [Learn about configs](../user_guides/config.md) for more details. +``` diff --git a/docs/en/advanced_guides/pipeline.md b/docs/en/advanced_guides/pipeline.md new file mode 100644 index 0000000..058e813 --- /dev/null +++ b/docs/en/advanced_guides/pipeline.md @@ -0,0 +1,170 @@ +# Customize Data Pipeline + +## Design of Data pipelines + +In the [new dataset tutorial](./datasets.md), we know that the dataset class use the `load_data_list` method +to initialize the entire dataset, and we save the information of every sample to a dict. + +Usually, to save memory usage, we only load image paths and labels in the `load_data_list`, and load full +image content when we use them. Moreover, we may want to do some random data augmentation during picking +samples when training. Almost all data loading, pre-processing, and formatting operations can be configured in +MMPretrain by the **data pipeline**. + +The data pipeline means how to process the sample dict when indexing a sample from the dataset. And it +consists of a sequence of data transforms. Each data transform takes a dict as input, processes it, and outputs a +dict for the next data transform. + +Here is a data pipeline example for ResNet-50 training on ImageNet. + +```python +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=224), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] +``` + +All available data transforms in MMPretrain can be found in the [data transforms docs](mmpretrain.datasets.transforms). + +## Modify the training/test pipeline + +The data pipeline in MMPretrain is pretty flexible. You can control almost every step of the data +preprocessing from the config file, but on the other hand, you may be confused facing so many options. + +Here is a common practice and guidance for image classification tasks. + +### Loading + +At the beginning of a data pipeline, we usually need to load image data from the file path. +[`LoadImageFromFile`](mmcv.transforms.LoadImageFromFile) is commonly used to do this task. + +```python +train_pipeline = [ + dict(type='LoadImageFromFile'), + ... +] +``` + +If you want to load data from files with special formats or special locations, you can [implement a new loading +transform](#add-new-data-transforms) and add it at the beginning of the data pipeline. + +### Augmentation and other processing + +During training, we usually need to do data augmentation to avoid overfitting. During the test, we also need to do +some data processing like resizing and cropping. These data transforms will be placed after the loading process. + +Here is a simple data augmentation recipe example. It will randomly resize and crop the input image to the +specified scale, and randomly flip the image horizontally with probability. + +```python +train_pipeline = [ + ... + dict(type='RandomResizedCrop', scale=224), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + ... +] +``` + +Here is a heavy data augmentation recipe example used in [Swin-Transformer](../papers/swin_transformer.md) +training. To align with the official implementation, it specified `pillow` as the resize backend and `bicubic` +as the resize algorithm. Moreover, it added [`RandAugment`](mmpretrain.datasets.transforms.RandAugment) and +[`RandomErasing`](mmpretrain.datasets.transforms.RandomErasing) as extra data augmentation method. + +This configuration specified every detail of the data augmentation, and you can simply copy it to your own +config file to apply the data augmentations of the Swin-Transformer. + +```python +bgr_mean = [103.53, 116.28, 123.675] +bgr_std = [57.375, 57.12, 58.395] + +train_pipeline = [ + ... + dict(type='RandomResizedCrop', scale=224, backend='pillow', interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + ... +] +``` + +```{note} +Usually, the data augmentation part in the data pipeline handles only image-wise transforms, but not transforms +like image normalization or mixup/cutmix. It's because we can do image normalization and mixup/cutmix on batch data +to accelerate. To configure image normalization and mixup/cutmix, please use the [data preprocessor](mmpretrain.models.utils.data_preprocessor). +``` + +### Formatting + +The formatting is to collect training data from the data information dict and convert these data to +model-friendly format. + +In most cases, you can simply use [`PackInputs`](mmpretrain.datasets.transforms.PackInputs), and it will +convert the image in NumPy array format to PyTorch tensor, and pack the ground truth categories information and +other meta information as a [`DataSample`](mmpretrain.structures.DataSample). + +```python +train_pipeline = [ + ... + dict(type='PackInputs'), +] +``` + +## Add new data transforms + +1. Write a new data transform in any file, e.g., `my_transform.py`, and place it in + the folder `mmpretrain/datasets/transforms/`. The data transform class needs to inherit + the [`mmcv.transforms.BaseTransform`](mmcv.transforms.BaseTransform) class and override + the `transform` method which takes a dict as input and returns a dict. + + ```python + from mmcv.transforms import BaseTransform + from mmpretrain.registry import TRANSFORMS + + @TRANSFORMS.register_module() + class MyTransform(BaseTransform): + + def transform(self, results): + # Modify the data information dict `results`. + return results + ``` + +2. Import the new class in the `mmpretrain/datasets/transforms/__init__.py`. + + ```python + ... + from .my_transform import MyTransform + + __all__ = [ + ..., 'MyTransform' + ] + ``` + +3. Use it in config files. + + ```python + train_pipeline = [ + ... + dict(type='MyTransform'), + ... + ] + ``` + +## Pipeline visualization + +After designing data pipelines, you can use the [visualization tools](../useful_tools/dataset_visualization.md) to view the performance. diff --git a/docs/en/advanced_guides/runtime.md b/docs/en/advanced_guides/runtime.md new file mode 100644 index 0000000..8150fb1 --- /dev/null +++ b/docs/en/advanced_guides/runtime.md @@ -0,0 +1,221 @@ +# Customize Runtime Settings + +The runtime configurations include many helpful functionalities, like checkpoint saving, logger configuration, +etc. In this tutorial, we will introduce how to configure these functionalities. + +## Save Checkpoint + +The checkpoint saving functionality is a default hook during training. And you can configure it in the +`default_hooks.checkpoint` field. + +```{note} +The hook mechanism is widely used in all OpenMMLab libraries. Through hooks, you can plug in many +functionalities without modifying the main execution logic of the runner. + +A detailed introduction of hooks can be found in {external+mmengine:doc}`Hooks `. +``` + +**The default settings** + +```python +default_hooks = dict( + ... + checkpoint = dict(type='CheckpointHook', interval=1) + ... +) +``` + +Here are some usual arguments, and all available arguments can be found in the [CheckpointHook](mmengine.hooks.CheckpointHook). + +- **`interval`** (int): The saving period. If use -1, it will never save checkpoints. +- **`by_epoch`** (bool): Whether the **`interval`** is by epoch or by iteration. Defaults to `True`. +- **`out_dir`** (str): The root directory to save checkpoints. If not specified, the checkpoints will be saved in the work directory. If specified, the checkpoints will be saved in the sub-folder of the **`out_dir`**. +- **`max_keep_ckpts`** (int): The maximum checkpoints to keep. In some cases, we want only the latest few checkpoints and would like to delete old ones to save disk space. Defaults to -1, which means unlimited. +- **`save_best`** (str, List[str]): If specified, it will save the checkpoint with the best evaluation result. + Usually, you can simply use `save_best="auto"` to automatically select the evaluation metric. + +And if you want more advanced configuration, please refer to the [CheckpointHook docs](tutorials/hook.md#checkpointhook). + +## Load Checkpoint / Resume Training + +In config files, you can specify the loading and resuming functionality as below: + +```python +# load from which checkpoint +load_from = "Your checkpoint path" + +# whether to resume training from the loaded checkpoint +resume = False +``` + +The `load_from` field can be either a local path or an HTTP path. And you can resume training from the checkpoint by +specify `resume=True`. + +```{tip} +You can also enable auto resuming from the latest checkpoint by specifying `load_from=None` and `resume=True`. +Runner will find the latest checkpoint from the work directory automatically. +``` + +If you are training models by our `tools/train.py` script, you can also use `--resume` argument to resume +training without modifying the config file manually. + +```bash +# Automatically resume from the latest checkpoint. +python tools/train.py configs/resnet/resnet50_8xb32_in1k.py --resume + +# Resume from the specified checkpoint. +python tools/train.py configs/resnet/resnet50_8xb32_in1k.py --resume checkpoints/resnet.pth +``` + +## Randomness Configuration + +In the `randomness` field, we provide some options to make the experiment as reproducible as possible. + +By default, we won't specify seed in the config file, and in every experiment, the program will generate a random seed. + +**Default settings:** + +```python +randomness = dict(seed=None, deterministic=False) +``` + +To make the experiment more reproducible, you can specify a seed and set `deterministic=True`. The influence +of the `deterministic` option can be found [here](https://pytorch.org/docs/stable/notes/randomness.html#cuda-convolution-benchmarking). + +## Log Configuration + +The log configuration relates to multiple fields. + +In the `log_level` field, you can specify the global logging level. See {external+python:ref}`Logging Levels` for a list of levels. + +```python +log_level = 'INFO' +``` + +In the `default_hooks.logger` field, you can specify the logging interval during training and testing. And all +available arguments can be found in the [LoggerHook docs](tutorials/hook.md#loggerhook). + +```python +default_hooks = dict( + ... + # print log every 100 iterations. + logger=dict(type='LoggerHook', interval=100), + ... +) +``` + +In the `log_processor` field, you can specify the log smooth method. Usually, we use a window with length of 10 +to smooth the log and output the mean value of all information. If you want to specify the smooth method of +some information finely, see the {external+mmengine:doc}`LogProcessor docs `. + +```python +# The default setting, which will smooth the values in training log by a 10-length window. +log_processor = dict(window_size=10) +``` + +In the `visualizer` field, you can specify multiple backends to save the log information, such as TensorBoard +and WandB. More details can be found in the [Visualizer section](#visualizer). + +## Custom Hooks + +Many above functionalities are implemented by hooks, and you can also plug-in other custom hooks by modifying +`custom_hooks` field. Here are some hooks in MMEngine and MMPretrain that you can use directly, such as: + +- [EMAHook](mmpretrain.engine.hooks.EMAHook) +- [SyncBuffersHook](mmengine.hooks.SyncBuffersHook) +- [EmptyCacheHook](mmengine.hooks.EmptyCacheHook) +- [ClassNumCheckHook](mmpretrain.engine.hooks.ClassNumCheckHook) +- ...... + +For example, EMA (Exponential Moving Average) is widely used in the model training, and you can enable it as +below: + +```python +custom_hooks = [ + dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL'), +] +``` + +## Visualize Validation + +The validation visualization functionality is a default hook during validation. And you can configure it in the +`default_hooks.visualization` field. + +By default, we disabled it, and you can enable it by specifying `enable=True`. And more arguments can be found in +the [VisualizationHook docs](mmpretrain.engine.hooks.VisualizationHook). + +```python +default_hooks = dict( + ... + visualization=dict(type='VisualizationHook', enable=False), + ... +) +``` + +This hook will select some images in the validation dataset, and tag the prediction results on these images +during every validation process. You can use it to watch the varying of model performance on actual images +during training. + +In addition, if the images in your validation dataset are small (\<100), you can rescale them before +visualization by specifying `rescale_factor=2.` or higher. + +## Visualizer + +The visualizer is used to record all kinds of information during training and test, including logs, images and +scalars. By default, the recorded information will be saved at the `vis_data` folder under the work directory. + +**Default settings:** + +```python +visualizer = dict( + type='UniversalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ] +) +``` + +Usually, the most useful function is to save the log and scalars like `loss` to different backends. +For example, to save them to TensorBoard, simply set them as below: + +```python +visualizer = dict( + type='UniversalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + dict(type='TensorboardVisBackend'), + ] +) +``` + +Or save them to WandB as below: + +```python +visualizer = dict( + type='UniversalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + dict(type='WandbVisBackend'), + ] +) +``` + +## Environment Configuration + +In the `env_cfg` field, you can configure some low-level parameters, like cuDNN, multi-process, and distributed +communication. + +**Please make sure you understand the meaning of these parameters before modifying them.** + +```python +env_cfg = dict( + # whether to enable cudnn benchmark + cudnn_benchmark=False, + + # set multi-process parameters + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + + # set distributed parameters + dist_cfg=dict(backend='nccl'), +) +``` diff --git a/docs/en/advanced_guides/schedule.md b/docs/en/advanced_guides/schedule.md new file mode 100644 index 0000000..f020759 --- /dev/null +++ b/docs/en/advanced_guides/schedule.md @@ -0,0 +1,361 @@ +# Customize Training Schedule + +In our codebase, [default training schedules](https://github.com/open-mmlab/mmpretrain/blob/main/configs/_base_/schedules) have been provided for common datasets such as CIFAR, ImageNet, etc. If we attempt to experiment on these datasets for higher accuracy or on different new methods and datasets, we might possibly need to modify the strategies. + +In this tutorial, we will introduce how to modify configs to construct optimizers, use parameter-wise finely configuration, gradient clipping, gradient accumulation as well as customize learning rate and momentum schedules. Furthermore, introduce a template to customize self-implemented optimizationmethods for the project. + +## Customize optimization + +We use the `optim_wrapper` field to configure the strategies of optimization, which includes choices of optimizer, choices of automatic mixed precision training, parameter-wise configurations, gradient clipping and accumulation. Details are seen below. + +### Use optimizers supported by PyTorch + +We support all the optimizers implemented by PyTorch, and to use them, please change the `optimizer` field of config files. + +For example, if you want to use [`SGD`](torch.optim.SGD), the modification in config file could be as the following. Notice that optimization related settings should all wrapped inside the `optim_wrapper`. + +```python +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.0003, weight_decay=0.0001) +) +``` + +```{note} +`type` in optimizer is not a constructor but a optimizer name in PyTorch. +Refers to {external+torch:ref}`List of optimizers supported by PyTorch ` for more choices. +``` + +To modify the learning rate of the model, just modify the `lr` in the config of optimizer. +You can also directly set other arguments according to the [API doc](torch.optim) of PyTorch. + +For example, if you want to use [`Adam`](torch.optim.Adam) with settings like `torch.optim.Adam(params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False)` in PyTorch. You could use the config below: + +```python +optim_wrapper = dict( + type='OptimWrapper', + optimizer = dict( + type='Adam', + lr=0.001, + betas=(0.9, 0.999), + eps=1e-08, + weight_decay=0, + amsgrad=False), +) +``` + +````{note} +The default type of `optim_wrapper` field is [`OptimWrapper`](mmengine.optim.OptimWrapper), therefore, you can +omit the type field usually, like: + +```python +optim_wrapper = dict( + optimizer=dict( + type='Adam', + lr=0.001, + betas=(0.9, 0.999), + eps=1e-08, + weight_decay=0, + amsgrad=False)) +``` +```` + +### Use AMP training + +If we want to use the automatic mixed precision training, we can simply change the type of `optim_wrapper` to `AmpOptimWrapper` in config files. + +```python +optim_wrapper = dict(type='AmpOptimWrapper', optimizer=...) +``` + +Alternatively, for conveniency, we can set `--amp` parameter to turn on the AMP option directly in the `tools/train.py` script. Refers to [Training tutorial](../user_guides/train.md) for details of starting a training. + +### Parameter-wise finely configuration + +Some models may have parameter-specific settings for optimization, for example, no weight decay to the BatchNorm layers or using different learning rates for different network layers. +To finely configure them, we can use the `paramwise_cfg` argument in `optim_wrapper`. + +- **Set different hyper-parameter multipliers for different types of parameters.** + + For instance, we can set `norm_decay_mult=0.` in `paramwise_cfg` to change the weight decay of weight and bias of normalization layers to zero. + + ```python + optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.8, weight_decay=1e-4), + paramwise_cfg=dict(norm_decay_mult=0.)) + ``` + + More types of parameters are supported to configured, list as follow: + + - `bias_lr_mult`: Multiplier for learning rate of bias (Not include normalization layers' biases and deformable convolution layers' offsets). Defaults to 1. + - `bias_decay_mult`: Multiplier for weight decay of bias (Not include normalization layers' biases and deformable convolution layers' offsets). Defaults to 1. + - `norm_decay_mult`: Multiplier for weight decay of weight and bias of normalization layers. Defaults to 1. + - `flat_decay_mult`: Multiplier for weight decay of all one-dimensional parameters. Defaults to 1. + - `dwconv_decay_mult`: Multiplier for weight decay of depth-wise convolution layers. Defaults to 1. + - `bypass_duplicate`: Whether to bypass duplicated parameters. Defaults to `False`. + - `dcn_offset_lr_mult`: Multiplier for learning rate of deformable convolution layers. Defaults to 1. + +- **Set different hyper-parameter multipliers for specific parameters.** + + MMPretrain can use `custom_keys` in `paramwise_cfg` to specify different parameters to use different learning rates or weight decay. + + For example, to set all learning rates and weight decays of `backbone.layer0` to 0, the rest of `backbone` remains the same as optimizer and the learning rate of `head` to 0.001, use the configs below. + + ```python + optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.01, weight_decay=0.0001), + paramwise_cfg=dict( + custom_keys={ + 'backbone.layer0': dict(lr_mult=0, decay_mult=0), + 'backbone': dict(lr_mult=1), + 'head': dict(lr_mult=0.1) + })) + ``` + +### Gradient clipping + +During the training process, the loss function may get close to a cliffy region and cause gradient explosion. And gradient clipping is helpful to stabilize the training process. More introduction can be found in [this page](https://paperswithcode.com/method/gradient-clipping). + +Currently we support `clip_grad` option in `optim_wrapper` for gradient clipping, refers to [PyTorch Documentation](torch.nn.utils.clip_grad_norm_). + +Here is an example: + +```python +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.01, weight_decay=0.0001), + # norm_type: type of the used p-norm, here norm_type is 2. + clip_grad=dict(max_norm=35, norm_type=2)) +``` + +### Gradient accumulation + +When computing resources are lacking, the batch size can only be set to a small value, which may affect the performance of models. Gradient accumulation can be used to solve this problem. We support `accumulative_counts` option in `optim_wrapper` for gradient accumulation. + +Here is an example: + +```python +train_dataloader = dict(batch_size=64) +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.01, weight_decay=0.0001), + accumulative_counts=4) +``` + +Indicates that during training, back-propagation is performed every 4 iters. And the above is equivalent to: + +```python +train_dataloader = dict(batch_size=256) +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.01, weight_decay=0.0001)) +``` + +## Customize parameter schedules + +In training, the optimzation parameters such as learing rate, momentum, are usually not fixed but changing through iterations or epochs. PyTorch supports several learning rate schedulers, which are not sufficient for complex strategies. In MMPretrain, we provide `param_scheduler` for better controls of different parameter schedules. + +### Customize learning rate schedules + +Learning rate schedulers are widely used to improve performance. We support most of the PyTorch schedulers, including `ExponentialLR`, `LinearLR`, `StepLR`, `MultiStepLR`, etc. + +All available learning rate scheduler can be found {external+mmengine:doc}`here `, and the +names of learning rate schedulers end with `LR`. + +- **Single learning rate schedule** + + In most cases, we use only one learning rate schedule for simplicity. For instance, [`MultiStepLR`](mmengine.optim.MultiStepLR) is used as the default learning rate schedule for ResNet. Here, `param_scheduler` is a dictionary. + + ```python + param_scheduler = dict( + type='MultiStepLR', + by_epoch=True, + milestones=[100, 150], + gamma=0.1) + ``` + + Or, we want to use the [`CosineAnnealingLR`](mmengine.optim.CosineAnnealingLR) scheduler to decay the learning rate: + + ```python + param_scheduler = dict( + type='CosineAnnealingLR', + by_epoch=True, + T_max=num_epochs) + ``` + +- **Multiple learning rate schedules** + + In some of the training cases, multiple learning rate schedules are applied for higher accuracy. For example ,in the early stage, training is easy to be volatile, and warmup is a technique to reduce volatility. + The learning rate will increase gradually from a minor value to the expected value by warmup and decay afterwards by other schedules. + + In MMPretrain, simply combines desired schedules in `param_scheduler` as a list can achieve the warmup strategy. + + Here are some examples: + + 1. linear warmup during the first 50 iters. + + ```python + param_scheduler = [ + # linear warm-up by iters + dict(type='LinearLR', + start_factor=0.001, + by_epoch=False, # by iters + end=50), # only warm up for first 50 iters + # main learing rate schedule + dict(type='MultiStepLR', + by_epoch=True, + milestones=[8, 11], + gamma=0.1) + ] + ``` + + 2. linear warmup and update lr by iter during the first 10 epochs. + + ```python + param_scheduler = [ + # linear warm-up by epochs in [0, 10) epochs + dict(type='LinearLR', + start_factor=0.001, + by_epoch=True, + end=10, + convert_to_iter_based=True, # Update learning rate by iter. + ), + # use CosineAnnealing schedule after 10 epochs + dict(type='CosineAnnealingLR', by_epoch=True, begin=10) + ] + ``` + + Notice that, we use `begin` and `end` arguments here to assign the valid range, which is [`begin`, `end`) for this schedule. And the range unit is defined by `by_epoch` argument. If not specified, the `begin` is 0 and the `end` is the max epochs or iterations. + + If the ranges for all schedules are not continuous, the learning rate will stay constant in ignored range, otherwise all valid schedulers will be executed in order in a specific stage, which behaves the same as PyTorch [`ChainedScheduler`](torch.optim.lr_scheduler.ChainedScheduler). + + ```{tip} + To check that the learning rate curve is as expected, after completing your configuration file,you could use [optimizer parameter visualization tool](../useful_tools/scheduler_visualization.md) to draw the corresponding learning rate adjustment curve. + ``` + +### Customize momentum schedules + +We support using momentum schedulers to modify the optimizer's momentum according to learning rate, which could make the loss converge in a faster way. The usage is the same as learning rate schedulers. + +All available learning rate scheduler can be found {external+mmengine:doc}`here `, and the +names of momentum rate schedulers end with `Momentum`. + +Here is an example: + +```python +param_scheduler = [ + # the lr scheduler + dict(type='LinearLR', ...), + # the momentum scheduler + dict(type='LinearMomentum', + start_factor=0.001, + by_epoch=False, + begin=0, + end=1000) +] +``` + +## Add new optimizers or constructors + +```{note} +This part will modify the MMPretrain source code or add code to the MMPretrain framework, beginners can skip it. +``` + +### Add new optimizers + +In academic research and industrial practice, it may be necessary to use optimization methods not implemented by MMPretrain, and you can add them through the following methods. + +1. Implement a New Optimizer + + Assume you want to add an optimizer named `MyOptimizer`, which has arguments `a`, `b`, and `c`. + You need to create a new file under `mmpretrain/engine/optimizers`, and implement the new optimizer in the file, for example, in `mmpretrain/engine/optimizers/my_optimizer.py`: + + ```python + from torch.optim import Optimizer + from mmpretrain.registry import OPTIMIZERS + + + @OPTIMIZERS.register_module() + class MyOptimizer(Optimizer): + + def __init__(self, a, b, c): + ... + + def step(self, closure=None): + ... + ``` + +2. Import the Optimizer + + To find the above module defined above, this module should be imported during the running. + + Import it in the `mmpretrain/engine/optimizers/__init__.py` to add it into the `mmpretrain.engine` package. + + ```python + # In mmpretrain/engine/optimizers/__init__.py + ... + from .my_optimizer import MyOptimizer # MyOptimizer maybe other class name + + __all__ = [..., 'MyOptimizer'] + ``` + + During running, we will automatically import the `mmpretrain.engine` package and register the `MyOptimizer` at the same time. + +3. Specify the Optimizer in Config + + Then you can use `MyOptimizer` in the `optim_wrapper.optimizer` field of config files. + + ```python + optim_wrapper = dict( + optimizer=dict(type='MyOptimizer', a=a_value, b=b_value, c=c_value)) + ``` + +### Add new optimizer constructors + +Some models may have some parameter-specific settings for optimization, like different weight decay rate for all `BatchNorm` layers. + +Although we already can use [the `optim_wrapper.paramwise_cfg` field](#parameter-wise-finely-configuration) to +configure various parameter-specific optimizer settings. It may still not cover your need. + +Of course, you can modify it. By default, we use the [`DefaultOptimWrapperConstructor`](mmengine.optim.DefaultOptimWrapperConstructor) +class to deal with the construction of optimizer. And during the construction, it fine-grainedly configures the optimizer settings of +different parameters according to the `paramwise_cfg`,which could also serve as a template for new optimizer constructor. + +You can overwrite these behaviors by add new optimizer constructors. + +```python +# In mmpretrain/engine/optimizers/my_optim_constructor.py +from mmengine.optim import DefaultOptimWrapperConstructor +from mmpretrain.registry import OPTIM_WRAPPER_CONSTRUCTORS + + +@OPTIM_WRAPPER_CONSTRUCTORS.register_module() +class MyOptimWrapperConstructor: + + def __init__(self, optim_wrapper_cfg, paramwise_cfg=None): + ... + + def __call__(self, model): + ... +``` + +Here is a specific example of [OptimWrapperConstructor](mmpretrain.engine.optimizers.LearningRateDecayOptimWrapperConstructor). + +And then, import it and use it almost like [the optimizer tutorial](#add-new-optimizers). + +1. Import it in the `mmpretrain/engine/optimizers/__init__.py` to add it into the `mmpretrain.engine` package. + + ```python + # In mmpretrain/engine/optimizers/__init__.py + ... + from .my_optim_constructor import MyOptimWrapperConstructor + + __all__ = [..., 'MyOptimWrapperConstructor'] + ``` + +2. Use `MyOptimWrapperConstructor` in the `optim_wrapper.constructor` field of config files. + + ```python + optim_wrapper = dict( + constructor=dict(type='MyOptimWrapperConstructor'), + optimizer=..., + paramwise_cfg=..., + ) + ``` diff --git a/docs/en/api/apis.rst b/docs/en/api/apis.rst new file mode 100644 index 0000000..074960b --- /dev/null +++ b/docs/en/api/apis.rst @@ -0,0 +1,48 @@ +.. role:: hidden + :class: hidden-section + +.. module:: mmpretrain.apis + +mmpretrain.apis +=================================== + +These are some high-level APIs for classification tasks. + +.. contents:: mmpretrain.apis + :depth: 2 + :local: + :backlinks: top + +Model +------------------ + +.. autosummary:: + :toctree: generated + :nosignatures: + + list_models + get_model + +Inference +------------------ + +.. autosummary:: + :toctree: generated + :nosignatures: + :template: callable.rst + + ImageClassificationInferencer + ImageRetrievalInferencer + ImageCaptionInferencer + VisualQuestionAnsweringInferencer + VisualGroundingInferencer + TextToImageRetrievalInferencer + ImageToTextRetrievalInferencer + NLVRInferencer + FeatureExtractor + +.. autosummary:: + :toctree: generated + :nosignatures: + + inference_model diff --git a/docs/en/api/data_process.rst b/docs/en/api/data_process.rst new file mode 100644 index 0000000..af0f6e5 --- /dev/null +++ b/docs/en/api/data_process.rst @@ -0,0 +1,329 @@ +.. role:: hidden + :class: hidden-section + +Data Process +================= + +In MMPreTrain, the data process and the dataset is decomposed. The +datasets only define how to get samples' basic information from the file +system. These basic information includes the ground-truth label and raw +images data / the paths of images.The data process includes data transforms, +data preprocessors and batch augmentations. + +- :mod:`Data Transforms `: Transforms includes loading, preprocessing, formatting and etc. +- :mod:`Data Preprocessors `: Processes includes collate, normalization, stacking, channel fliping and etc. + + - :mod:`Batch Augmentations `: Batch augmentation involves multiple samples, such as Mixup and CutMix. + +.. module:: mmpretrain.datasets.transforms + +Data Transforms +-------------------- + +To prepare the inputs data, we need to do some transforms on these basic +information. These transforms includes loading, preprocessing and +formatting. And a series of data transforms makes up a data pipeline. +Therefore, you can find the a ``pipeline`` argument in the configs of dataset, +for example: + +.. code:: python + + train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=224), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), + ] + + train_dataloader = dict( + .... + dataset=dict( + pipeline=train_pipeline, + ....), + .... + ) + +Every item of a pipeline list is one of the following data transforms class. And if you want to add a custom data transformation class, the tutorial :doc:`Custom Data Pipelines ` will help you. + +.. contents:: + :depth: 1 + :local: + :backlinks: top + +Loading and Formatting +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated + :nosignatures: + :template: data_transform.rst + + LoadImageFromFile + PackInputs + PackMultiTaskInputs + PILToNumpy + NumpyToPIL + Transpose + Collect + +Processing and Augmentation +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated + :nosignatures: + :template: data_transform.rst + + Albumentations + CenterCrop + ColorJitter + EfficientNetCenterCrop + EfficientNetRandomCrop + Lighting + Normalize + RandomCrop + RandomErasing + RandomFlip + RandomGrayscale + RandomResize + RandomResizedCrop + Resize + ResizeEdge + BEiTMaskGenerator + SimMIMMaskGenerator + +Composed Augmentation +""""""""""""""""""""" +Composed augmentation is a kind of methods which compose a series of data +augmentation transforms, such as ``AutoAugment`` and ``RandAugment``. + +.. autosummary:: + :toctree: generated + :nosignatures: + :template: data_transform.rst + + AutoAugment + RandAugment + +The above transforms is composed from a group of policies from the below random +transforms: + +.. autosummary:: + :toctree: generated + :nosignatures: + :template: data_transform.rst + + AutoContrast + Brightness + ColorTransform + Contrast + Cutout + Equalize + GaussianBlur + Invert + Posterize + Rotate + Sharpness + Shear + Solarize + SolarizeAdd + Translate + BaseAugTransform + +MMCV transforms +^^^^^^^^^^^^^^^ + +We also provides many transforms in MMCV. You can use them directly in the config files. Here are some frequently used transforms, and the whole transforms list can be found in :external+mmcv:doc:`api/transforms`. + +Transform Wrapper +^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated + :nosignatures: + :template: data_transform.rst + + MultiView + +.. module:: mmpretrain.models.utils.data_preprocessor + + +TorchVision Transforms +^^^^^^^^^^^^^^^^^^^^^^ + +We also provide all the transforms in TorchVision. You can use them the like following examples: + +**1. Use some TorchVision Augs Surrounded by NumpyToPIL and PILToNumpy (Recommendation)** + +Add TorchVision Augs surrounded by ``dict(type='NumpyToPIL', to_rgb=True),`` and ``dict(type='PILToNumpy', to_bgr=True),`` + +.. code:: python + + train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='NumpyToPIL', to_rgb=True), # from BGR in cv2 to RGB in PIL + dict(type='torchvision/RandomResizedCrop',size=176), + dict(type='PILToNumpy', to_bgr=True), # from RGB in PIL to BGR in cv2 + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), + ] + + data_preprocessor = dict( + num_classes=1000, + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True, # from BGR in cv2 to RGB in PIL + ) + + +**2. Use TorchVision Augs and ToTensor&Normalize** + +Make sure the 'img' has been converted to PIL format from BGR-Numpy format before being processed by TorchVision Augs. + +.. code:: python + + train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='NumpyToPIL', to_rgb=True), # from BGR in cv2 to RGB in PIL + dict( + type='torchvision/RandomResizedCrop', + size=176, + interpolation='bilinear'), # accept str format interpolation mode + dict(type='torchvision/RandomHorizontalFlip', p=0.5), + dict( + type='torchvision/TrivialAugmentWide', + interpolation='bilinear'), + dict(type='torchvision/PILToTensor'), + dict(type='torchvision/ConvertImageDtype', dtype=torch.float), + dict( + type='torchvision/Normalize', + mean=(0.485, 0.456, 0.406), + std=(0.229, 0.224, 0.225), + ), + dict(type='torchvision/RandomErasing', p=0.1), + dict(type='PackInputs'), + ] + + data_preprocessor = dict(num_classes=1000, mean=None, std=None, to_rgb=False) # Normalize in dataset pipeline + + +**3. Use TorchVision Augs Except ToTensor&Normalize** + +.. code:: python + + train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='NumpyToPIL', to_rgb=True), # from BGR in cv2 to RGB in PIL + dict(type='torchvision/RandomResizedCrop', size=176, interpolation='bilinear'), + dict(type='torchvision/RandomHorizontalFlip', p=0.5), + dict(type='torchvision/TrivialAugmentWide', interpolation='bilinear'), + dict(type='PackInputs'), + ] + + # here the Normalize params is for the RGB format + data_preprocessor = dict( + num_classes=1000, + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=False, + ) + + +Data Preprocessors +------------------ + +The data preprocessor is also a component to process the data before feeding data to the neural network. +Comparing with the data transforms, the data preprocessor is a module of the classifier, +and it takes a batch of data to process, which means it can use GPU and batch to accelebrate the processing. + +The default data preprocessor in MMPreTrain could do the pre-processing like following: + +1. Move data to the target device. +2. Pad inputs to the maximum size of current batch. +3. Stack inputs to a batch. +4. Convert inputs from bgr to rgb if the shape of input is (3, H, W). +5. Normalize image with defined std and mean. +6. Do batch augmentations like Mixup and CutMix during training. + +You can configure the data preprocessor by the ``data_preprocessor`` field or ``model.data_preprocessor`` field in the config file. Typical usages are as below: + +.. code-block:: python + + data_preprocessor = dict( + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True, # convert image from BGR to RGB + ) + +Or define in ``model.data_preprocessor`` as following: + +.. code-block:: python + + model = dict( + backbone = ..., + neck = ..., + head = ..., + data_preprocessor = dict( + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True) + train_cfg=..., + ) + +Note that the ``model.data_preprocessor`` has higher priority than ``data_preprocessor``. + +.. autosummary:: + :toctree: generated + :nosignatures: + + ClsDataPreprocessor + SelfSupDataPreprocessor + TwoNormDataPreprocessor + VideoDataPreprocessor + +.. module:: mmpretrain.models.utils.batch_augments + +Batch Augmentations +^^^^^^^^^^^^^^^^^^^^ + +The batch augmentation is a component of data preprocessors. It involves multiple samples and mix them in some way, such as Mixup and CutMix. + +These augmentations are usually only used during training, therefore, we use the ``model.train_cfg`` field to configure them in config files. + +.. code-block:: python + + model = dict( + backbone=..., + neck=..., + head=..., + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ]), + ) + +You can also specify the probabilities of every batch augmentation by the ``probs`` field. + +.. code-block:: python + + model = dict( + backbone=..., + neck=..., + head=..., + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ], probs=[0.3, 0.7]) + ) + +Here is a list of batch augmentations can be used in MMPreTrain. + +.. autosummary:: + :toctree: generated + :nosignatures: + :template: callable.rst + + Mixup + CutMix + ResizeMix diff --git a/docs/en/api/datasets.rst b/docs/en/api/datasets.rst new file mode 100644 index 0000000..069880d --- /dev/null +++ b/docs/en/api/datasets.rst @@ -0,0 +1,129 @@ +.. role:: hidden + :class: hidden-section + +.. module:: mmpretrain.datasets + +mmpretrain.datasets +=================================== + +The ``datasets`` package contains several usual datasets for image classification tasks and some dataset wrappers. + +.. contents:: mmpretrain.datasets + :depth: 2 + :local: + :backlinks: top + +Custom Dataset +-------------- + +.. autoclass:: CustomDataset + +ImageNet +-------- + +.. autoclass:: ImageNet + +.. autoclass:: ImageNet21k + +CIFAR +----- + +.. autoclass:: CIFAR10 + +.. autoclass:: CIFAR100 + +MNIST +----- + +.. autoclass:: MNIST + +.. autoclass:: FashionMNIST + +VOC +--- + +.. autoclass:: VOC + +CUB +--- + +.. autoclass:: CUB + +Places205 +--------- + +.. autoclass:: Places205 + +Retrieval +--------- + +.. autoclass:: InShop + +Base classes +------------ + +.. autoclass:: BaseDataset + +.. autoclass:: MultiLabelDataset + +Caltech101 +---------------- + +.. autoclass:: Caltech101 + +Food101 +---------------- + +.. autoclass:: Food101 + +DTD +---------------- + +.. autoclass:: DTD + +FGVCAircraft +---------------- + +.. autoclass:: FGVCAircraft + + +Flowers102 +---------------- + +.. autoclass:: Flowers102 + +StanfordCars +---------------- + +.. autoclass:: StanfordCars + +OxfordIIITPet +---------------- + +.. autoclass:: OxfordIIITPet + +SUN397 +---------------- + +.. autoclass:: SUN397 + +RefCOCO +-------- + +.. autoclass:: RefCOCO + +Dataset Wrappers +---------------- + +.. autoclass:: KFoldDataset + +The dataset wrappers in the MMEngine can be directly used in MMPreTrain. + +.. list-table:: + + * - :class:`~mmengine.dataset.ConcatDataset` + - A wrapper of concatenated dataset. + * - :class:`~mmengine.dataset.RepeatDataset` + - A wrapper of repeated dataset. + * - :class:`~mmengine.dataset.ClassBalancedDataset` + - A wrapper of class balanced dataset. diff --git a/docs/en/api/engine.rst b/docs/en/api/engine.rst new file mode 100644 index 0000000..2e67fd0 --- /dev/null +++ b/docs/en/api/engine.rst @@ -0,0 +1,51 @@ +.. role:: hidden + :class: hidden-section + +.. module:: mmpretrain.engine + +mmpretrain.engine +=================================== + +This package includes some runtime components, including hooks, runners, optimizers and loops. These components are useful in +classification tasks but not supported by MMEngine yet. + +.. note:: + + Some components may be moved to MMEngine in the future. + +.. contents:: mmpretrain.engine + :depth: 2 + :local: + :backlinks: top + +.. module:: mmpretrain.engine.hooks + +Hooks +------------------ + +.. autosummary:: + :toctree: generated + :nosignatures: + + ClassNumCheckHook + PreciseBNHook + VisualizationHook + PrepareProtoBeforeValLoopHook + SetAdaptiveMarginsHook + EMAHook + SimSiamHook + DenseCLHook + SwAVHook + +.. module:: mmpretrain.engine.optimizers + +Optimizers +------------------ + +.. autosummary:: + :toctree: generated + :nosignatures: + + Lamb + LARS + LearningRateDecayOptimWrapperConstructor diff --git a/docs/en/api/evaluation.rst b/docs/en/api/evaluation.rst new file mode 100644 index 0000000..bddea20 --- /dev/null +++ b/docs/en/api/evaluation.rst @@ -0,0 +1,47 @@ +.. role:: hidden + :class: hidden-section + +.. module:: mmpretrain.evaluation + +mmpretrain.evaluation +=================================== + +This package includes metrics and evaluators for classification tasks. + +.. contents:: mmpretrain.evaluation + :depth: 1 + :local: + :backlinks: top + +Single Label Metric +---------------------- + +.. autosummary:: + :toctree: generated + :nosignatures: + + Accuracy + SingleLabelMetric + ConfusionMatrix + +Multi Label Metric +---------------------- +.. autosummary:: + :toctree: generated + :nosignatures: + + AveragePrecision + MultiLabelMetric + VOCAveragePrecision + VOCMultiLabelMetric + +Retrieval Metric +---------------------- + +.. autosummary:: + :toctree: generated + :nosignatures: + :template: classtemplate.rst + + RetrievalRecall + RetrievalAveragePrecision diff --git a/docs/en/api/models.rst b/docs/en/api/models.rst new file mode 100644 index 0000000..3098032 --- /dev/null +++ b/docs/en/api/models.rst @@ -0,0 +1,364 @@ +.. role:: hidden + :class: hidden-section + +.. module:: mmpretrain.models + +mmpretrain.models +=================================== + +The ``models`` package contains several sub-packages for addressing the different components of a model. + +- :mod:`~mmpretrain.models.classifiers`: The top-level module which defines the whole process of a classification model. +- :mod:`~mmpretrain.models.selfsup`: The top-level module which defines the whole process of a self-supervised learning model. +- :mod:`~mmpretrain.models.retrievers`: The top-level module which defines the whole process of a retrieval model. +- :mod:`~mmpretrain.models.backbones`: Usually a feature extraction network, e.g., ResNet, MobileNet. +- :mod:`~mmpretrain.models.necks`: The component between backbones and heads, e.g., GlobalAveragePooling. +- :mod:`~mmpretrain.models.heads`: The component for specific tasks. +- :mod:`~mmpretrain.models.losses`: Loss functions. +- :mod:`~mmpretrain.models.peft`: The PEFT (Parameter-Efficient Fine-Tuning) module, e.g. LoRAModel. +- :mod:`~mmpretrain.models.utils`: Some helper functions and common components used in various networks. + + - :mod:`~mmpretrain.models.utils.data_preprocessor`: The component before model to preprocess the inputs, e.g., ClsDataPreprocessor. + - :ref:`components`: Common components used in various networks. + - :ref:`helpers`: Helper functions. + +Build Functions +--------------- + +.. autosummary:: + :toctree: generated + :nosignatures: + + build_classifier + build_backbone + build_neck + build_head + build_loss + +.. module:: mmpretrain.models.classifiers + +Classifiers +------------------ + +.. autosummary:: + :toctree: generated + :nosignatures: + + BaseClassifier + ImageClassifier + TimmClassifier + HuggingFaceClassifier + +.. module:: mmpretrain.models.selfsup + +Self-supervised Algorithms +-------------------------- + +.. _selfsup_algorithms: + +.. autosummary:: + :toctree: generated + :nosignatures: + + BaseSelfSupervisor + BEiT + BYOL + BarlowTwins + CAE + DenseCL + EVA + iTPN + MAE + MILAN + MaskFeat + MixMIM + MoCo + MoCoV3 + SimCLR + SimMIM + SimSiam + SparK + SwAV + +.. _selfsup_backbones: + +Some of above algorithms modified the backbone module to adapt the extra inputs +like ``mask``, and here is the a list of these **modified backbone** modules. + +.. autosummary:: + :toctree: generated + :nosignatures: + + BEiTPretrainViT + CAEPretrainViT + iTPNHiViT + MAEHiViT + MAEViT + MILANViT + MaskFeatViT + MixMIMPretrainTransformer + MoCoV3ViT + SimMIMSwinTransformer + +.. _target_generators: + +Some self-supervise algorithms need an external **target generator** to +generate the optimization target. Here is a list of target generators. + +.. autosummary:: + :toctree: generated + :nosignatures: + + VQKD + DALLEEncoder + HOGGenerator + CLIPGenerator + +.. module:: mmpretrain.models.retrievers + +Retrievers +------------------ + +.. autosummary:: + :toctree: generated + :nosignatures: + + BaseRetriever + ImageToImageRetriever + +.. module:: mmpretrain.models.multimodal + +Multi-Modality Algorithms +-------------------------- + +.. autosummary:: + :toctree: generated + :nosignatures: + + Blip2Caption + Blip2Retrieval + Blip2VQA + BlipCaption + BlipGrounding + BlipNLVR + BlipRetrieval + BlipVQA + Flamingo + OFA + MiniGPT4 + Llava + Otter + +.. module:: mmpretrain.models.backbones + +Backbones +------------------ + +.. autosummary:: + :toctree: generated + :nosignatures: + + AlexNet + BEiTViT + CSPDarkNet + CSPNet + CSPResNeXt + CSPResNet + Conformer + ConvMixer + ConvNeXt + DaViT + DeiT3 + DenseNet + DistilledVisionTransformer + EdgeNeXt + EfficientFormer + EfficientNet + EfficientNetV2 + HiViT + HRNet + HorNet + InceptionV3 + LeNet5 + LeViT + MViT + MlpMixer + MobileNetV2 + MobileNetV3 + MobileOne + MobileViT + PCPVT + PoolFormer + PyramidVig + RegNet + RepLKNet + RepMLPNet + RepVGG + Res2Net + ResNeSt + ResNeXt + ResNet + ResNetV1c + ResNetV1d + ResNet_CIFAR + RevVisionTransformer + SEResNeXt + SEResNet + SVT + ShuffleNetV1 + ShuffleNetV2 + SparseResNet + SparseConvNeXt + SwinTransformer + SwinTransformerV2 + T2T_ViT + TIMMBackbone + TNT + VAN + VGG + Vig + VisionTransformer + ViTSAM + XCiT + ViTEVA02 + +.. module:: mmpretrain.models.necks + +Necks +------------------ + +.. autosummary:: + :toctree: generated + :nosignatures: + + BEiTV2Neck + CAENeck + ClsBatchNormNeck + DenseCLNeck + GeneralizedMeanPooling + GlobalAveragePooling + HRFuseScales + LinearNeck + MAEPretrainDecoder + MILANPretrainDecoder + MixMIMPretrainDecoder + MoCoV2Neck + NonLinearNeck + SimMIMLinearDecoder + SwAVNeck + iTPNPretrainDecoder + SparKLightDecoder + +.. module:: mmpretrain.models.heads + +Heads +------------------ + +.. autosummary:: + :toctree: generated + :nosignatures: + + ArcFaceClsHead + BEiTV1Head + BEiTV2Head + CAEHead + CSRAClsHead + ClsHead + ConformerHead + ContrastiveHead + DeiTClsHead + EfficientFormerClsHead + LatentCrossCorrelationHead + LatentPredictHead + LeViTClsHead + LinearClsHead + MAEPretrainHead + MIMHead + MixMIMPretrainHead + MoCoV3Head + MultiLabelClsHead + MultiLabelLinearClsHead + MultiTaskHead + SimMIMHead + StackedLinearClsHead + SwAVHead + VigClsHead + VisionTransformerClsHead + iTPNClipHead + SparKPretrainHead + +.. module:: mmpretrain.models.losses + +Losses +------------------ + +.. autosummary:: + :toctree: generated + :nosignatures: + + AsymmetricLoss + CAELoss + CosineSimilarityLoss + CrossCorrelationLoss + CrossEntropyLoss + FocalLoss + LabelSmoothLoss + PixelReconstructionLoss + SeesawLoss + SwAVLoss + +.. module:: mmpretrain.models.peft + +PEFT +------------------ + +.. autosummary:: + :toctree: generated + :nosignatures: + + LoRAModel + +.. module:: mmpretrain.models.utils + +models.utils +------------ + +This package includes some helper functions and common components used in various networks. + +.. _components: + +Common Components +^^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated + :nosignatures: + + ConditionalPositionEncoding + CosineEMA + HybridEmbed + InvertedResidual + LayerScale + MultiheadAttention + PatchEmbed + PatchMerging + SELayer + ShiftWindowMSA + WindowMSA + WindowMSAV2 + +.. _helpers: + +Helper Functions +^^^^^^^^^^^^^^^^ + +.. autosummary:: + :toctree: generated + :nosignatures: + + channel_shuffle + is_tracing + make_divisible + resize_pos_embed + resize_relative_position_bias_table + to_ntuple diff --git a/docs/en/api/structures.rst b/docs/en/api/structures.rst new file mode 100644 index 0000000..10caa37 --- /dev/null +++ b/docs/en/api/structures.rst @@ -0,0 +1,13 @@ +.. role:: hidden + :class: hidden-section + +.. module:: mmpretrain.structures + +mmpretrain.structures +=================================== + +This package includes basic data structures. + +DataSample +------------- +.. autoclass:: DataSample diff --git a/docs/en/api/utils.rst b/docs/en/api/utils.rst new file mode 100644 index 0000000..b2b9ea9 --- /dev/null +++ b/docs/en/api/utils.rst @@ -0,0 +1,19 @@ +.. role:: hidden + :class: hidden-section + +.. module:: mmpretrain.utils + +mmpretrain.utils +=================================== + +This package includes some useful helper functions for developing. + +.. autosummary:: + :toctree: generated + :nosignatures: + + collect_env + register_all_modules + load_json_log + track_on_main_process + get_ori_model diff --git a/docs/en/api/visualization.rst b/docs/en/api/visualization.rst new file mode 100644 index 0000000..85742a1 --- /dev/null +++ b/docs/en/api/visualization.rst @@ -0,0 +1,14 @@ +.. role:: hidden + :class: hidden-section + +.. module:: mmpretrain.visualization + +mmpretrain.visualization +=================================== + +This package includes visualizer and some helper functions for visualization. + +Visualizer +------------- +.. autoclass:: UniversalVisualizer + :members: diff --git a/docs/en/conf.py b/docs/en/conf.py new file mode 100644 index 0000000..a5a7fef --- /dev/null +++ b/docs/en/conf.py @@ -0,0 +1,248 @@ +# flake8: noqa +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import subprocess +import sys + +import pytorch_sphinx_theme +from sphinx.builders.html import StandaloneHTMLBuilder + +sys.path.insert(0, os.path.abspath('../../')) + +# -- Project information ----------------------------------------------------- + +project = 'MMPretrain' +copyright = '2020, OpenMMLab' +author = 'MMPretrain Authors' + +# The full version, including alpha/beta/rc tags +version_file = '../../mmpretrain/version.py' + + +def get_version(): + with open(version_file, 'r') as f: + exec(compile(f.read(), version_file, 'exec')) + return locals()['__version__'] + + +release = get_version() + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.autosummary', + 'sphinx.ext.intersphinx', + 'sphinx.ext.napoleon', + 'sphinx.ext.viewcode', + 'myst_parser', + 'sphinx_copybutton', + 'sphinx_tabs.tabs', + 'notfound.extension', + 'sphinxcontrib.jquery', +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +source_suffix = { + '.rst': 'restructuredtext', + '.md': 'markdown', +} + +language = 'en' + +# The master toctree document. +root_doc = 'index' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'pytorch_sphinx_theme' +html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# yapf: disable +html_theme_options = { + 'menu': [ + { + 'name': 'GitHub', + 'url': 'https://github.com/open-mmlab/mmpretrain' + }, + { + 'name': 'Colab Tutorials', + 'children': [ + {'name': 'Train and inference with shell commands', + 'url': 'https://colab.research.google.com/github/mzr1996/mmpretrain-tutorial/blob/master/1.x/MMPretrain_tools.ipynb'}, + {'name': 'Train and inference with Python APIs', + 'url': 'https://colab.research.google.com/github/mzr1996/mmpretrain-tutorial/blob/master/1.x/MMPretrain_python.ipynb'}, + ] + }, + { + 'name': 'Version', + 'children': [ + {'name': 'MMPreTrain 0.x', + 'url': 'https://mmpretrain.readthedocs.io/en/0.x/', + 'description': '0.x branch'}, + {'name': 'MMPreTrain 1.x', + 'url': 'https://mmpretrain.readthedocs.io/en/latest/', + 'description': 'Main branch'}, + ], + } + ], + # Specify the language of shared menu + 'menu_lang': 'en', + # Disable the default edit on GitHub + 'default_edit_on_github': False, +} +# yapf: enable + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] +html_css_files = [ + 'https://cdn.datatables.net/v/bs4/dt-1.12.1/datatables.min.css', + 'css/readthedocs.css' +] +html_js_files = [ + 'https://cdn.datatables.net/v/bs4/dt-1.12.1/datatables.min.js', + 'js/custom.js' +] + +# -- Options for HTMLHelp output --------------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = 'mmpretraindoc' + +# -- Options for LaTeX output ------------------------------------------------ + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (root_doc, 'mmpretrain.tex', 'MMPretrain Documentation', author, 'manual'), +] + +# -- Options for manual page output ------------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [(root_doc, 'mmpretrain', 'MMPretrain Documentation', [author], 1)] + +# -- Options for Texinfo output ---------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (root_doc, 'mmpretrain', 'MMPretrain Documentation', author, 'mmpretrain', + 'OpenMMLab pre-training toolbox and benchmark.', 'Miscellaneous'), +] + +# -- Options for Epub output ------------------------------------------------- + +# Bibliographic Dublin Core info. +epub_title = project + +# The unique identifier of the text. This can be a ISBN number +# or the project homepage. +# +# epub_identifier = '' + +# A unique identification for the text. +# +# epub_uid = '' + +# A list of files that should not be packed into the epub file. +epub_exclude_files = ['search.html'] + +# set priority when building html +StandaloneHTMLBuilder.supported_image_types = [ + 'image/svg+xml', 'image/gif', 'image/png', 'image/jpeg' +] + +# -- Extension configuration ------------------------------------------------- +# Ignore >>> when copying code +copybutton_prompt_text = r'>>> |\.\.\. ' +copybutton_prompt_is_regexp = True + +# Auto-generated header anchors +myst_heading_anchors = 3 +# Enable "colon_fence" extension of myst. +myst_enable_extensions = ['colon_fence', 'dollarmath'] + +# Configuration for intersphinx +intersphinx_mapping = { + 'python': ('https://docs.python.org/3', None), + 'numpy': ('https://numpy.org/doc/stable', None), + 'torch': ('https://pytorch.org/docs/stable/', None), + 'mmcv': ('https://mmcv.readthedocs.io/en/2.x/', None), + 'mmengine': ('https://mmengine.readthedocs.io/en/latest/', None), + 'transformers': + ('https://huggingface.co/docs/transformers/main/en/', None), +} +napoleon_custom_sections = [ + # Custom sections for data elements. + ('Meta fields', 'params_style'), + ('Data fields', 'params_style'), +] + +# Disable docstring inheritance +autodoc_inherit_docstrings = False +# Mock some imports during generate API docs. +autodoc_mock_imports = ['rich', 'attr', 'einops', 'mat4py'] +# Disable displaying type annotations, these can be very verbose +autodoc_typehints = 'none' + +# The not found page +notfound_template = '404.html' + + +def builder_inited_handler(app): + if subprocess.run(['./stat.py']).returncode != 0: + raise RuntimeError('Failed to run the script `stat.py`.') + + +def setup(app): + app.connect('builder-inited', builder_inited_handler) diff --git a/docs/en/device/npu.md b/docs/en/device/npu.md new file mode 100644 index 0000000..d450029 --- /dev/null +++ b/docs/en/device/npu.md @@ -0,0 +1,47 @@ +# NPU (HUAWEI Ascend) + +## Usage + +### General Usage + +Please refer to the [building documentation of MMCV](https://mmcv.readthedocs.io/en/latest/get_started/build.html#build-mmcv-full-on-ascend-npu-machine) to install MMCV and [MMEngine](https://mmengine.readthedocs.io/en/latest/get_started/installation.html#build-from-source) on NPU devices. + +Here we use 8 NPUs on your computer to train the model with the following command: + +```shell +bash ./tools/dist_train.sh configs/resnet/resnet50_8xb32_in1k.py 8 +``` + +Also, you can use only one NPU to train the model with the following command: + +```shell +python ./tools/train.py configs/resnet/resnet50_8xb32_in1k.py +``` + +## Models Results + +| Model | Top-1 (%) | Top-5 (%) | Config | Download | +| :---------------------------------------------------------: | :-------: | :-------: | :----------------------------------------------------------: | :-------------------------------------------------------------: | +| [ResNet-50](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/README.md) | 76.40 | 93.21 | [config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/resnet50_8xb32_in1k.py) | [log](https://download.openmmlab.com/mmclassification/v1/device/npu/resnet50_8xb32_in1k.log) | +| [ResNetXt-32x4d-50](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnext/README.md) | 77.48 | 93.75 | [config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnext/resnext50-32x4d_8xb32_in1k.py) | [log](https://download.openmmlab.com/mmclassification/v1/device/npu/resnext50-32x4d_8xb32_in1k.log) | +| [HRNet-W18](https://github.com/open-mmlab/mmclassification/blob/master/configs/hrnet/README.md) | 77.06 | 93.57 | [config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/hrnet/hrnet-w18_4xb32_in1k.py) | [log](https://download.openmmlab.com/mmclassification/v1/device/npu/hrnet-w18_4xb32_in1k.log) | +| [ResNetV1D-152](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/README.md) | 79.41 | 94.48 | [config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/resnetv1d152_8xb32_in1k.py) | [log](https://download.openmmlab.com/mmclassification/v1/device/npu/resnetv1d152_8xb32_in1k.log) | +| [SE-ResNet-50](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/seresnet/README.md) | 77.65 | 93.74 | [config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/seresnet/seresnet50_8xb32_in1k.py) | [log](https://download.openmmlab.com/mmclassification/v1/device/npu/seresnet50_8xb32_in1k.log) | +| [ShuffleNetV2 1.0x](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/shufflenet_v2/README.md) | 69.52 | 88.79 | [config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py) | [log](https://download.openmmlab.com/mmclassification/v1/device/npu/shufflenet-v2-1x_16xb64_in1k.log) | +| [MobileNetV2](https://github.com/open-mmlab/mmclassification/tree/1.x/configs/mobilenet_v2) | 71.74 | 90.28 | [config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py) | [log](https://download.openmmlab.com/mmclassification/v1/device/npu/mobilenet-v2_8xb32_in1k.log) | +| [MobileNetV3-Small](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/mobilenet_v3/README.md) | 67.09 | 87.17 | [config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/mobilenet_v3/mobilenet-v3-small_8xb128_in1k.py) | [log](https://download.openmmlab.com/mmclassification/v1/device/npu/mobilenet-v3-small.log) | +| [\*CSPResNeXt50](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/cspnet/README.md) | 77.25 | 93.46 | [config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/cspnet/cspresnext50_8xb32_in1k.py) | [log](https://download.openmmlab.com/mmclassification/v1/device/npu/cspresnext50_8xb32_in1k.log) | +| [\*EfficientNet-B4](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/efficientnet/README.md) | 75.73 | 92.91 | [config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/efficientnet/efficientnet-b4_8xb32_in1k.py) | [log](https://download.openmmlab.com/mmclassification/v1/device/npu/efficientnet-b4_8xb32_in1k.log) | +| [\*\*DenseNet121](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/densenet/README.md) | 72.53 | 90.85 | [config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/densenet/densenet121_4xb256_in1k.py) | [log](https://download.openmmlab.com/mmclassification/v1/device/npu/densenet121_4xb256_in1k.log) | + +**Notes:** + +- If not specially marked, the results are almost same between results on the NPU and results on the GPU with FP32. +- (\*) The training results of these models are lower than the results on the readme in the corresponding model, mainly + because the results on the readme are directly the weight of the timm of the eval, and the results on this side are + retrained according to the config with mmcls. The results of the config training on the GPU are consistent with the + results of the NPU. +- (\*\*) The accuracy of this model is slightly lower because config is a 4-card config, we use 8 cards to run, and users + can adjust hyperparameters to get the best accuracy results. + +**All above models are provided by Huawei Ascend group.** diff --git a/docs/en/docutils.conf b/docs/en/docutils.conf new file mode 100644 index 0000000..0c00c84 --- /dev/null +++ b/docs/en/docutils.conf @@ -0,0 +1,2 @@ +[html writers] +table_style: colwidths-auto diff --git a/docs/en/get_started.md b/docs/en/get_started.md new file mode 100644 index 0000000..5d33ac0 --- /dev/null +++ b/docs/en/get_started.md @@ -0,0 +1,164 @@ +# Prerequisites + +In this section we demonstrate how to prepare an environment with PyTorch. + +MMPretrain works on Linux, Windows and macOS. It requires Python 3.7+, CUDA 10.2+ and PyTorch 1.8+. + +```{note} +If you are experienced with PyTorch and have already installed it, just skip this part and jump to the [next section](#installation). Otherwise, you can follow these steps for the preparation. +``` + +**Step 1.** Download and install Miniconda from the [official website](https://docs.conda.io/en/latest/miniconda.html). + +**Step 2.** Create a conda environment and activate it. + +```shell +conda create --name openmmlab python=3.8 -y +conda activate openmmlab +``` + +**Step 3.** Install PyTorch following [official instructions](https://pytorch.org/get-started/locally/), e.g. + +On GPU platforms: + +```shell +conda install pytorch torchvision -c pytorch +``` + +```{warning} +This command will automatically install the latest version PyTorch and cudatoolkit, please check whether they match your environment. +``` + +On CPU platforms: + +```shell +conda install pytorch torchvision cpuonly -c pytorch +``` + +# Installation + +## Best Practices + +According to your needs, we support two install modes: + +- [Install from source (Recommended)](#install-from-source): You want to develop your own network or new features based on MMPretrain framework. For example, adding new datasets or new backbones. And you can use all tools we provided. +- [Install as a Python package](#install-as-a-python-package): You just want to call MMPretrain's APIs or import MMPretrain's modules in your project. + +### Install from source + +In this case, install mmpretrain from source: + +```shell +git clone https://github.com/open-mmlab/mmpretrain.git +cd mmpretrain +pip install -U openmim && mim install -e . +``` + +```{note} +`"-e"` means installing a project in editable mode, thus any local modifications made to the code will take effect without reinstallation. +``` + +### Install as a Python package + +Just install with mim. + +```shell +pip install -U openmim && mim install "mmpretrain>=1.0.0rc8" +``` + +```{note} +`mim` is a light-weight command-line tool to setup appropriate environment for OpenMMLab repositories according to PyTorch and CUDA version. It also has some useful functions for deep-learning experiments. +``` + +## Install multi-modality support (Optional) + +The multi-modality models in MMPretrain requires extra dependencies. To install these dependencies, you +can add `[multimodal]` during the installation. For example: + +```shell +# Install from source +mim install -e ".[multimodal]" + +# Install as a Python package +mim install "mmpretrain[multimodal]>=1.0.0rc8" +``` + +## Verify the installation + +To verify whether MMPretrain is installed correctly, we provide some sample codes to run an inference demo. + +Option (a). If you install mmpretrain from the source, just run the following command: + +```shell +python demo/image_demo.py demo/demo.JPEG resnet18_8xb32_in1k --device cpu +``` + +You will see the output result dict including `pred_label`, `pred_score` and `pred_class` in your terminal. + +Option (b). If you install mmpretrain as a python package, open your python interpreter and copy&paste the following codes. + +```python +from mmpretrain import get_model, inference_model + +model = get_model('resnet18_8xb32_in1k', device='cpu') # or device='cuda:0' +inference_model(model, 'demo/demo.JPEG') +``` + +You will see a dict printed, including the predicted label, score and category name. + +```{note} +The `resnet18_8xb32_in1k` is the model name, and you can use [`mmpretrain.list_models`](mmpretrain.apis.list_models) to +explore all models, or search them on the [Model Zoo Summary](./modelzoo_statistics.md) +``` + +## Customize Installation + +### CUDA versions + +When installing PyTorch, you need to specify the version of CUDA. If you are +not clear on which to choose, follow our recommendations: + +- For Ampere-based NVIDIA GPUs, such as GeForce 30 series and NVIDIA A100, CUDA 11 is a must. +- For older NVIDIA GPUs, CUDA 11 is backward compatible, but CUDA 10.2 offers better compatibility and is more lightweight. + +Please make sure the GPU driver satisfies the minimum version requirements. See [this table](https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#cuda-major-component-versions__table-cuda-toolkit-driver-versions) for more information. + +```{note} +Installing CUDA runtime libraries is enough if you follow our best practices, +because no CUDA code will be compiled locally. However if you hope to compile +MMCV from source or develop other CUDA operators, you need to install the +complete CUDA toolkit from NVIDIA's [website](https://developer.nvidia.com/cuda-downloads), +and its version should match the CUDA version of PyTorch. i.e., the specified +version of cudatoolkit in `conda install` command. +``` + +### Install on CPU-only platforms + +MMPretrain can be built for CPU only environment. In CPU mode you can train, test or inference a model. + +### Install on Google Colab + +See [the Colab tutorial](https://colab.research.google.com/github/mzr1996/mmclassification-tutorial/blob/master/1.x/MMClassification_tools.ipynb). + +### Using MMPretrain with Docker + +We provide a [Dockerfile](https://github.com/open-mmlab/mmpretrain/blob/main/docker/Dockerfile) +to build an image. Ensure that your [docker version](https://docs.docker.com/engine/install/) >=19.03. + +```shell +# build an image with PyTorch 1.12.1, CUDA 11.3 +# If you prefer other versions, just modified the Dockerfile +docker build -t mmpretrain docker/ +``` + +Run it with + +```shell +docker run --gpus all --shm-size=8g -it -v {DATA_DIR}:/mmpretrain/data mmpretrain +``` + +## Trouble shooting + +If you have some issues during the installation, please first view the [FAQ](./notes/faq.md) page. +You may [open an issue](https://github.com/open-mmlab/mmpretrain/issues/new/choose) +on GitHub if no solution is found. diff --git a/docs/en/index.rst b/docs/en/index.rst new file mode 100644 index 0000000..d16a32d --- /dev/null +++ b/docs/en/index.rst @@ -0,0 +1,157 @@ +Welcome to MMPretrain's documentation! +============================================ + +MMPretrain is a newly upgraded open-source framework for pre-training. +It has set out to provide multiple powerful pre-trained backbones and +support different pre-training strategies. MMPretrain originated from the +famous open-source projects +`MMClassification `_ +and `MMSelfSup `_, and is developed +with many exiciting new features. The pre-training stage is essential for +vision recognition currently. With the rich and strong pre-trained models, +we are currently capable of improving various downstream vision tasks. + +Our primary objective for the codebase is to become an easily accessible and +user-friendly library and to streamline research and engineering. We +detail the properties and design of MMPretrain across different sections. + +Hands-on Roadmap of MMPretrain +------------------------------- + +To help users quickly utilize MMPretrain, we recommend following the hands-on +roadmap we have created for the library: + + - For users who want to try MMPretrain, we suggest reading the GetStarted_ + section for the environment setup. + + - For basic usage, we refer users to UserGuides_ for utilizing various + algorithms to obtain the pre-trained models and evaluate their performance + in downstream tasks. + + - For those who wish to customize their own algorithms, we provide + AdvancedGuides_ that include hints and rules for modifying code. + + - To find your desired pre-trained models, users could check the ModelZoo_, + which features a summary of various backbones and pre-training methods and + introfuction of different algorithms. + + - Additionally, we provide Analysis_ and Visualization_ tools to help + diagnose algorithms. + + - Besides, if you have any other questions or concerns, please refer to the + Notes_ section for potential answers. + +We always welcome *PRs* and *Issues* for the betterment of MMPretrain. + +.. _GetStarted: +.. toctree:: + :maxdepth: 1 + :caption: Get Started + + get_started.md + +.. _UserGuides: +.. toctree:: + :maxdepth: 1 + :caption: User Guides + + user_guides/config.md + user_guides/dataset_prepare.md + user_guides/inference.md + user_guides/train.md + user_guides/test.md + user_guides/downstream.md + +.. _AdvancedGuides: +.. toctree:: + :maxdepth: 1 + :caption: Advanced Guides + + advanced_guides/datasets.md + advanced_guides/pipeline.md + advanced_guides/modules.md + advanced_guides/schedule.md + advanced_guides/runtime.md + advanced_guides/evaluation.md + advanced_guides/convention.md + +.. _ModelZoo: +.. toctree:: + :maxdepth: 1 + :caption: Model Zoo + :glob: + + modelzoo_statistics.md + papers/* + +.. _Visualization: +.. toctree:: + :maxdepth: 1 + :caption: Visualization + + useful_tools/dataset_visualization.md + useful_tools/scheduler_visualization.md + useful_tools/cam_visualization.md + useful_tools/t-sne_visualization.md + +.. _Analysis: +.. toctree:: + :maxdepth: 1 + :caption: Analysis Tools + + useful_tools/print_config.md + useful_tools/verify_dataset.md + useful_tools/log_result_analysis.md + useful_tools/complexity_analysis.md + useful_tools/confusion_matrix.md + useful_tools/shape_bias.md + +.. toctree:: + :maxdepth: 1 + :caption: Deployment + + useful_tools/model_serving.md + +.. toctree:: + :maxdepth: 1 + :caption: Migration + + migration.md + +.. toctree:: + :maxdepth: 1 + :caption: API Reference + + mmpretrain.apis + mmpretrain.engine + mmpretrain.datasets + Data Process + mmpretrain.models + mmpretrain.structures + mmpretrain.visualization + mmpretrain.evaluation + mmpretrain.utils + +.. _Notes: +.. toctree:: + :maxdepth: 1 + :caption: Notes + + notes/contribution_guide.md + notes/projects.md + notes/changelog.md + notes/faq.md + notes/pretrain_custom_dataset.md + notes/finetune_custom_dataset.md + +.. toctree:: + :maxdepth: 1 + :caption: Device Support + + device/npu.md + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`search` diff --git a/docs/en/migration.md b/docs/en/migration.md new file mode 100644 index 0000000..bdebdf6 --- /dev/null +++ b/docs/en/migration.md @@ -0,0 +1,772 @@ +# Migration + +We introduce some modifications in MMPretrain 1.x, and some of them are BC-breacking. To migrate your projects from **MMClassification 0.x** or **MMSelfSup 0.x** smoothly, please read this tutorial. + +- [Migration](#migration) + - [New dependencies](#new-dependencies) +- [General change of config](#general-change-of-config) + - [Schedule settings](#schedule-settings) + - [Runtime settings](#runtime-settings) + - [Other changes](#other-changes) +- [Migration from MMClassification 0.x](#migration-from-mmclassification-0x) + - [Config files](#config-files) + - [Model settings](#model-settings) + - [Data settings](#data-settings) + - [Packages](#packages) + - [`mmpretrain.apis`](#mmpretrainapis) + - [`mmpretrain.core`](#mmpretraincore) + - [`mmpretrain.datasets`](#mmpretraindatasets) + - [`mmpretrain.models`](#mmpretrainmodels) + - [`mmpretrain.utils`](#mmpretrainutils) +- [Migration from MMSelfSup 0.x](#migration-from-mmselfsup-0x) + - [Config](#config) + - [Dataset settings](#dataset-settings) + - [Model settings](#model-settings-1) + - [Package](#package) + +## New dependencies + +```{warning} +MMPretrain 1.x has new package dependencies, and a new environment should be created for MMPretrain 1.x even if you already have a well-rounded MMClassification 0.x or MMSelfSup 0.x environment. Please refer to the [installation tutorial](./get_started.md) for the required package installation or install the packages manually. +``` + +1. [MMEngine](https://github.com/open-mmlab/mmengine): MMEngine is the core the OpenMMLab 2.0 architecture, + and we have split many compentents unrelated to computer vision from MMCV to MMEngine. +2. [MMCV](https://github.com/open-mmlab/mmcv): The computer vision package of OpenMMLab. This is not a new + dependency, but it should be upgraded to version `2.0.0rc1` or above. +3. [rich](https://github.com/Textualize/rich): A terminal formatting package, and we use it to enhance some + outputs in the terminal. +4. [einops](https://github.com/arogozhnikov/einops): Operators for Einstein notations. + +# General change of config + +In this section, we introduce the general difference between old version(**MMClassification 0.x** or **MMSelfSup 0.x**) and **MMPretrain 1.x**. + +## Schedule settings + +| MMCls or MMSelfSup 0.x | MMPretrain 1.x | Remark | +| ---------------------- | --------------- | ------------------------------------------------------------------------------------------------------------------------------- | +| optimizer_config | / | It has been **removed**. | +| / | optim_wrapper | The `optim_wrapper` provides a common interface for updating parameters. | +| lr_config | param_scheduler | The `param_scheduler` is a list to set learning rate or other parameters, which is more flexible. | +| runner | train_cfg | The loop setting (`EpochBasedTrainLoop`, `IterBasedTrainLoop`) in `train_cfg` controls the work flow of the algorithm training. | + +Changes in **`optimizer`** and **`optimizer_config`**: + +- Now we use `optim_wrapper` field to specify all configurations related to optimization process. The + `optimizer` has become a subfield of `optim_wrapper`. +- The `paramwise_cfg` field is also a subfield of `optim_wrapper`, instead of `optimizer`. +- The `optimizer_config` field has been removed, and all configurations has been moved to `optim_wrapper`. +- The `grad_clip` field has been renamed to `clip_grad`. + + + + + + + + + +
Original + +```python +optimizer = dict( + type='AdamW', + lr=0.0015, + weight_decay=0.3, + paramwise_cfg = dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + )) + +optimizer_config = dict(grad_clip=dict(max_norm=1.0)) +``` + +
New + +```python +optim_wrapper = dict( + optimizer=dict(type='AdamW', lr=0.0015, weight_decay=0.3), + paramwise_cfg = dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + ), + clip_grad=dict(max_norm=1.0), +) +``` + +
+ +Changes in **`lr_config`**: + +- The `lr_config` field has been removed and replaced by the new `param_scheduler`. +- The `warmup` related arguments have also been removed since we use a combination of schedulers to implement this + functionality. + +The new scheduler combination mechanism is highly flexible and enables the design of various learning rate/momentum curves. +For more details, see the {external+mmengine:doc}`parameter schedulers tutorial `. + + + + + + + + + +
Original + +```python +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_iters=5, + warmup_ratio=0.01, + warmup_by_epoch=True) +``` + +
New + +```python +param_scheduler = [ + # warmup + dict( + type='LinearLR', + start_factor=0.01, + by_epoch=True, + end=5, + # Update the learning rate after every iters. + convert_to_iter_based=True), + # main learning rate scheduler + dict(type='CosineAnnealingLR', by_epoch=True, begin=5), +] +``` + +
+ +Changes in **`runner`**: + +Most of the configurations that were originally in the `runner` field have been moved to `train_cfg`, `val_cfg`, and `test_cfg`. +These fields are used to configure the loop for training, validation, and testing. + + + + + + + + + +
Original + +```python +runner = dict(type='EpochBasedRunner', max_epochs=100) +``` + +
New + +```python +# The `val_interval` is the original `evaluation.interval`. +train_cfg = dict(by_epoch=True, max_epochs=100, val_interval=1) +val_cfg = dict() # Use the default validation loop. +test_cfg = dict() # Use the default test loop. +``` + +
+ +In OpenMMLab 2.0, we introduced `Loop` to control the behaviors in training, validation and testing. As a result, the functionalities of `Runner` have also been changed. +More details can be found in the {external+mmengine:doc}`MMEngine tutorials `. + +## Runtime settings + +Changes in **`checkpoint_config`** and **`log_config`**: + +The `checkpoint_config` has been moved to `default_hooks.checkpoint`, and `log_config` has been moved to +`default_hooks.logger`. Additionally, many hook settings that were previously included in the script code have +been moved to the `default_hooks` field in the runtime configuration. + +```python +default_hooks = dict( + # record the time of every iterations. + timer=dict(type='IterTimerHook'), + + # print log every 100 iterations. + logger=dict(type='LoggerHook', interval=100), + + # enable the parameter scheduler. + param_scheduler=dict(type='ParamSchedulerHook'), + + # save checkpoint per epoch, and automatically save the best checkpoint. + checkpoint=dict(type='CheckpointHook', interval=1, save_best='auto'), + + # set sampler seed in distributed evrionment. + sampler_seed=dict(type='DistSamplerSeedHook'), + + # validation results visualization, set True to enable it. + visualization=dict(type='VisualizationHook', enable=False), +) +``` + +In OpenMMLab 2.0, we have split the original logger into logger and visualizer. The logger is used to record +information, while the visualizer is used to display the logger in different backends such as terminal, +TensorBoard, and Wandb. + + + + + + + + + +
Original + +```python +log_config = dict( + interval=100, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook'), + ]) +``` + +
New + +```python +default_hooks = dict( + ... + logger=dict(type='LoggerHook', interval=100), +) + +visualizer = dict( + type='UniversalVisualizer', + vis_backends=[dict(type='LocalVisBackend'), dict(type='TensorboardVisBackend')], +) +``` + +
+ +Changes in **`load_from`** and **`resume_from`**: + +- The `resume_from` is removed. And we use `resume` and `load_from` to replace it. + - If `resume=True` and `load_from` is not None, resume training from the checkpoint in `load_from`. + - If `resume=True` and `load_from` is None, try to resume from the latest checkpoint in the work directory. + - If `resume=False` and `load_from` is not None, only load the checkpoint, not resume training. + - If `resume=False` and `load_from` is None, do not load nor resume. + +the `resume_from` field has been removed, and we use `resume` and `load_from` instead. + +- If `resume=True` and `load_from` is not None, training is resumed from the checkpoint in `load_from`. +- If `resume=True` and `load_from` is None, the latest checkpoint in the work directory is used for resuming. +- If `resume=False` and `load_from` is not None, only the checkpoint is loaded without resuming training. +- If `resume=False` and `load_from` is None, neither checkpoint is loaded nor is training resumed. + +Changes in **`dist_params`**: The `dist_params` field has become a subfield of `env_cfg` now. +Additionally, some new configurations have been added to `env_cfg`. + +```python +env_cfg = dict( + # whether to enable cudnn benchmark + cudnn_benchmark=False, + + # set multi process parameters + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + + # set distributed parameters + dist_cfg=dict(backend='nccl'), +) +``` + +Changes in **`workflow`**: `workflow` related functionalities are removed. + +New field **`visualizer`**: The visualizer is a new design in OpenMMLab 2.0 architecture. The runner uses an +instance of the visualizer to handle result and log visualization, as well as to save to different backends. +For more information, please refer to the {external+mmengine:doc}`MMEngine tutorial `. + +```python +visualizer = dict( + type='UniversalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + # Uncomment the below line to save the log and visualization results to TensorBoard. + # dict(type='TensorboardVisBackend') + ] +) +``` + +New field **`default_scope`**: The start point to search module for all registries. The `default_scope` in MMPretrain is `mmpretrain`. See {external+mmengine:doc}`the registry tutorial ` for more details. + +## Other changes + +We moved the definition of all registries in different packages to the `mmpretrain.registry` package. + +# Migration from MMClassification 0.x + +## Config files + +In MMPretrain 1.x, we refactored the structure of configuration files, and the original files are not usable. + +In this section, we will introduce all changes of the configuration files. And we assume you already have +ideas of the [config files](./user_guides/config.md). + +### Model settings + +No changes in `model.backbone`, `model.neck` and `model.head` fields. + +Changes in **`model.train_cfg`**: + +- `BatchMixup` is renamed to [`Mixup`](mmpretrain.models.utils.batch_augments.Mixup). +- `BatchCutMix` is renamed to [`CutMix`](mmpretrain.models.utils.batch_augments.CutMix). +- `BatchResizeMix` is renamed to [`ResizeMix`](mmpretrain.models.utils.batch_augments.ResizeMix). +- The `prob` argument is removed from all augments settings, and you can use the `probs` field in `train_cfg` to + specify probabilities of every augemnts. If no `probs` field, randomly choose one by the same probability. + + + + + + + + + +
Original + +```python +model = dict( + ... + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ] +) +``` + +
New + +```python +model = dict( + ... + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), dict(type='CutMix', alpha=1.0)] +) +``` + +
+ +### Data settings + +Changes in **`data`**: + +- The original `data` field is splited to `train_dataloader`, `val_dataloader` and + `test_dataloader`. This allows us to configure them in fine-grained. For example, + you can specify different sampler and batch size during training and test. +- The `samples_per_gpu` is renamed to `batch_size`. +- The `workers_per_gpu` is renamed to `num_workers`. + + + + + + + + + +
Original + +```python +data = dict( + samples_per_gpu=32, + workers_per_gpu=2, + train=dict(...), + val=dict(...), + test=dict(...), +) +``` + +
New + +```python +train_dataloader = dict( + batch_size=32, + num_workers=2, + dataset=dict(...), + sampler=dict(type='DefaultSampler', shuffle=True) # necessary +) + +val_dataloader = dict( + batch_size=32, + num_workers=2, + dataset=dict(...), + sampler=dict(type='DefaultSampler', shuffle=False) # necessary +) + +test_dataloader = val_dataloader +``` + +
+ +Changes in **`pipeline`**: + +- The original formatting transforms **`ToTensor`**, **`ImageToTensor`** and **`Collect`** are combined as [`PackInputs`](mmpretrain.datasets.transforms.PackInputs). +- We don't recommend to do **`Normalize`** in the dataset pipeline. Please remove it from pipelines and set it in the `data_preprocessor` field. +- The argument `flip_prob` in [**`RandomFlip`**](mmcv.transforms.RandomFlip) is renamed to `prob`. +- The argument `size` in [**`RandomCrop`**](mmpretrain.datasets.transforms.RandomCrop) is renamed to `crop_size`. +- The argument `size` in [**`RandomResizedCrop`**](mmpretrain.datasets.transforms.RandomResizedCrop) is renamed to `scale`. +- The argument `size` in [**`Resize`**](mmcv.transforms.Resize) is renamed to `scale`. And `Resize` won't support size like `(256, -1)`, please use [`ResizeEdge`](mmpretrain.datasets.transforms.ResizeEdge) to replace it. +- The argument `policies` in [**`AutoAugment`**](mmpretrain.datasets.transforms.AutoAugment) and [**`RandAugment`**](mmpretrain.datasets.transforms.RandAugment) supports using string to specify preset policies. `AutoAugment` supports "imagenet" and `RandAugment` supports "timm_increasing". +- **`RandomResizedCrop`** and **`CenterCrop`** won't supports `efficientnet_style`, and please use [`EfficientNetRandomCrop`](mmpretrain.datasets.transforms.EfficientNetRandomCrop) and [`EfficientNetCenterCrop`](mmpretrain.datasets.transforms.EfficientNetCenterCrop) to replace them. + +```{note} +We move some work of data transforms to the data preprocessor, like normalization, see [the documentation](mmpretrain.models.utils.data_preprocessor) for +more details. +``` + + + + + + + + + +
Original + +```python +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +``` + +
New + +```python +data_preprocessor = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=224), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] +``` + +
+ +Changes in **`evaluation`**: + +- The **`evaluation`** field is splited to `val_evaluator` and `test_evaluator`. And it won't supports `interval` and `save_best` arguments. + The `interval` is moved to `train_cfg.val_interval`, see [the schedule settings](./user_guides/config.md#schedule-settings) and the `save_best` + is moved to `default_hooks.checkpoint.save_best`, see [the runtime settings](./user_guides/config.md#runtime-settings). +- The 'accuracy' metric is renamed to [`Accuracy`](mmpretrain.evaluation.Accuracy). +- The 'precision', 'recall', 'f1-score' and 'support' are combined as [`SingleLabelMetric`](mmpretrain.evaluation.SingleLabelMetric), and use `items` argument to specify to calculate which metric. +- The 'mAP' is renamed to [`AveragePrecision`](mmpretrain.evaluation.AveragePrecision). +- The 'CP', 'CR', 'CF1', 'OP', 'OR', 'OF1' are combined as [`MultiLabelMetric`](mmpretrain.evaluation.MultiLabelMetric), and use `items` and `average` arguments to specify to calculate which metric. + + + + + + + + + + + + + + + + +
Original + +```python +evaluation = dict( + interval=1, + metric='accuracy', + metric_options=dict(topk=(1, 5)) +) +``` + +
New + +```python +val_evaluator = dict(type='Accuracy', topk=(1, 5)) +test_evaluator = val_evaluator +``` + +
Original + +```python +evaluation = dict( + interval=1, + metric=['mAP', 'CP', 'OP', 'CR', 'OR', 'CF1', 'OF1'], + metric_options=dict(thr=0.5), +) +``` + +
New + +```python +val_evaluator = [ + dict(type='AveragePrecision'), + dict(type='MultiLabelMetric', + items=['precision', 'recall', 'f1-score'], + average='both', + thr=0.5), +] +test_evaluator = val_evaluator +``` + +
+ +## Packages + +### `mmpretrain.apis` + +The documentation can be found [here](mmpretrain.apis). + +| Function | Changes | +| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------- | +| `init_model` | No changes | +| `inference_model` | No changes. But we recommend to use [`mmpretrain.ImageClassificationInferencer`](mmpretrain.apis.ImageClassificationInferencer) instead. | +| `train_model` | Removed, use `runner.train` to train. | +| `multi_gpu_test` | Removed, use `runner.test` to test. | +| `single_gpu_test` | Removed, use `runner.test` to test. | +| `show_result_pyplot` | Removed, use [`mmpretrain.ImageClassificationInferencer`](mmpretrain.apis.ImageClassificationInferencer) to inference model and show the result. | +| `set_random_seed` | Removed, use `mmengine.runner.set_random_seed`. | +| `init_random_seed` | Removed, use `mmengine.dist.sync_random_seed`. | + +### `mmpretrain.core` + +The `mmpretrain.core` package is renamed to [`mmpretrain.engine`](mmpretrain.engine). + +| Sub package | Changes | +| :-------------: | :-------------------------------------------------------------------------------------------------------------------------------- | +| `evaluation` | Removed, use the metrics in [`mmpretrain.evaluation`](mmpretrain.evaluation). | +| `hook` | Moved to [`mmpretrain.engine.hooks`](mmpretrain.engine.hooks) | +| `optimizers` | Moved to [`mmpretrain.engine.optimizers`](mmpretrain.engine.optimizers) | +| `utils` | Removed, the distributed environment related functions can be found in the [`mmengine.dist`](api/dist) package. | +| `visualization` | Removed, the related functionalities are implemented in [`mmengine.visualization.Visualizer`](mmengine.visualization.Visualizer). | + +The `MMClsWandbHook` in `hooks` package is waiting for implementation. + +The `CosineAnnealingCooldownLrUpdaterHook` in `hooks` package is removed, and we support this functionality by +the combination of parameter schedulers, see [the tutorial](./advanced_guides/schedule.md). + +### `mmpretrain.datasets` + +The documentation can be found [here](mmpretrain.datasets). + +| Dataset class | Changes | +| :---------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------- | +| [`CustomDataset`](mmpretrain.datasets.CustomDataset) | Add `data_root` argument as the common prefix of `data_prefix` and `ann_file` and support to load unlabeled data. | +| [`ImageNet`](mmpretrain.datasets.ImageNet) | Same as `CustomDataset`. | +| [`ImageNet21k`](mmpretrain.datasets.ImageNet21k) | Same as `CustomDataset`. | +| [`CIFAR10`](mmpretrain.datasets.CIFAR10) & [`CIFAR100`](mmpretrain.datasets.CIFAR100) | The `test_mode` argument is a required argument now. | +| [`MNIST`](mmpretrain.datasets.MNIST) & [`FashionMNIST`](mmpretrain.datasets.FashionMNIST) | The `test_mode` argument is a required argument now. | +| [`VOC`](mmpretrain.datasets.VOC) | Requires `data_root`, `image_set_path` and `test_mode` now. | +| [`CUB`](mmpretrain.datasets.CUB) | Requires `data_root` and `test_mode` now. | + +The `mmpretrain.datasets.pipelines` is renamed to `mmpretrain.datasets.transforms`. + +| Transform class | Changes | +| :-----------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `LoadImageFromFile` | Removed, use [`mmcv.transforms.LoadImageFromFile`](mmcv.transforms.LoadImageFromFile). | +| `RandomFlip` | Removed, use [`mmcv.transforms.RandomFlip`](mmcv.transforms.RandomFlip). The argument `flip_prob` is renamed to `prob`. | +| `RandomCrop` | The argument `size` is renamed to `crop_size`. | +| `RandomResizedCrop` | The argument `size` is renamed to `scale`. The argument `scale` is renamed to `crop_ratio_range`. Won't support `efficientnet_style`, use [`EfficientNetRandomCrop`](mmpretrain.datasets.transforms.EfficientNetRandomCrop). | +| `CenterCrop` | Removed, use [`mmcv.transforms.CenterCrop`](mmcv.transforms.CenterCrop). Won't support `efficientnet_style`, use [`EfficientNetCenterCrop`](mmpretrain.datasets.transforms.EfficientNetCenterCrop). | +| `Resize` | Removed, use [`mmcv.transforms.Resize`](mmcv.transforms.Resize). The argument `size` is renamed to `scale`. Won't support size like `(256, -1)`, use [`ResizeEdge`](mmpretrain.datasets.transforms.ResizeEdge). | +| `AutoAugment` & `RandomAugment` | The argument `policies` supports using string to specify preset policies. | +| `Compose` | Removed, use [`mmcv.transforms.Compose`](mmcv.transforms.Compose). | + +### `mmpretrain.models` + +The documentation can be found [here](mmpretrain.models). The interface of all **backbones**, **necks** and **losses** didn't change. + +Changes in [`ImageClassifier`](mmpretrain.models.classifiers.ImageClassifier): + +| Method of classifiers | Changes | +| :-------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `extract_feat` | No changes | +| `forward` | Now only accepts three arguments: `inputs`, `data_samples` and `mode`. See [the documentation](mmpretrain.models.classifiers.ImageClassifier.forward) for more details. | +| `forward_train` | Replaced by `loss`. | +| `simple_test` | Replaced by `predict`. | +| `train_step` | The `optimizer` argument is replaced by `optim_wrapper` and it accepts [`OptimWrapper`](mmengine.optim.OptimWrapper). | +| `val_step` | The original `val_step` is the same as `train_step`, now it calls `predict`. | +| `test_step` | New method, and it's the same as `val_step`. | + +Changes in [heads](mmpretrain.models.heads): + +| Method of heads | Changes | +| :-------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------- | +| `pre_logits` | No changes | +| `forward_train` | Replaced by `loss`. | +| `simple_test` | Replaced by `predict`. | +| `loss` | It accepts `data_samples` instead of `gt_labels` to calculate loss. The `data_samples` is a list of [ClsDataSample](mmpretrain.structures.DataSample). | +| `forward` | New method, and it returns the output of the classification head without any post-processs like softmax or sigmoid. | + +### `mmpretrain.utils` + +| Function | Changes | +| :--------------------------: | :-------------------------------------------------------------------------------------------------------------- | +| `collect_env` | No changes | +| `get_root_logger` | Removed, use [`mmengine.logging.MMLogger.get_current_instance`](mmengine.logging.MMLogger.get_current_instance) | +| `load_json_log` | The output format changed. | +| `setup_multi_processes` | Removed, use [`mmengine.utils.dl_utils.set_multi_processing`](mmengine.utils.dl_utils.set_multi_processing). | +| `wrap_non_distributed_model` | Removed, we auto wrap the model in the runner. | +| `wrap_distributed_model` | Removed, we auto wrap the model in the runner. | +| `auto_select_device` | Removed, we auto select the device in the runner. | + +# Migration from MMSelfSup 0.x + +## Config + +This section illustrates the changes of our config files in the `_base_` folder, which includes three parts + +- Datasets: `configs/_base_/datasets` +- Models: `configs/_base_/models` +- Schedules: `configs/_base_/schedules` + +### Dataset settings + +In **MMSelfSup 0.x**, we use key `data` to summarize all information, such as `samples_per_gpu`, `train`, `val`, etc. + +In **MMPretrain 1.x**, we separate `train_dataloader`, `val_dataloader` to summarize information correspodingly and the key `data` has been **removed**. + + + + + + + + + + +
Original + +```python +data = dict( + samples_per_gpu=32, # total 32*8(gpu)=256 + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_source=dict( + type=data_source, + data_prefix='data/imagenet/train', + ann_file='data/imagenet/meta/train.txt', + ), + num_views=[1, 1], + pipelines=[train_pipeline1, train_pipeline2], + prefetch=prefetch, + ), + val=...) +``` + +
New + +```python +train_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + collate_fn=dict(type='default_collate'), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='meta/train.txt', + data_prefix=dict(img_path='train/'), + pipeline=train_pipeline)) +val_dataloader = ... +``` + +
+ +Besides, we **remove** the key of `data_source` to keep the pipeline format consistent with that in other OpenMMLab projects. Please refer to [Config](user_guides/config.md) for more details. + +Changes in **`pipeline`**: + +Take MAE as an example of `pipeline`: + +```python +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + crop_ratio_range=(0.2, 1.0), + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5), + dict(type='PackInputs') +] +``` + +### Model settings + +In the config of models, there are two main different parts from MMSeflSup 0.x. + +1. There is a new key called `data_preprocessor`, which is responsible for preprocessing the data, like normalization, channel conversion, etc. For example: + +```python +data_preprocessor=dict( + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True) +model = dict( + type='MAE', + data_preprocessor=dict( + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + bgr_to_rgb=True), + backbone=..., + neck=..., + head=..., + init_cfg=...) +``` + +2. There is a new key `loss` in `head` in MMPretrain 1.x, to determine the loss function of the algorithm. For example: + +```python +model = dict( + type='MAE', + backbone=..., + neck=..., + head=dict( + type='MAEPretrainHead', + norm_pix=True, + patch_size=16, + loss=dict(type='MAEReconstructionLoss')), + init_cfg=...) +``` + +## Package + +The table below records the general modification of the folders and files. + +| MMSelfSup 0.x | MMPretrain 1.x | Remark | +| ------------------------ | ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| apis | apis | The high level APIs are updated. | +| core | engine | The `core` folder has been renamed to `engine`, which includes `hooks`, `opimizers`. ([API link](mmpretrain.engine)) | +| datasets | datasets | The datasets is implemented according to different datasets, such as ImageNet, Places205. ([API link](mmpretrain.datasets)) | +| datasets/data_sources | / | The `data_sources` has been **removed** and the directory of `datasets` now is consistent with other OpenMMLab projects. | +| datasets/pipelines | datasets/transforms | The `pipelines` folder has been renamed to `transforms`. ([API link](mmpretrain.datasets.transforms)) | +| / | evaluation | The `evaluation` is created for some evaluation functions or classes. ([API link](mmpretrain.evaluation)) | +| models/algorithms | selfsup | The algorithms are moved to `selfsup` folder. ([API link](mmpretrain.models.selfsup)) | +| models/backbones | selfsup | The re-implemented backbones are moved to corresponding self-supervised learning algorithm `.py` files. ([API link](mmpretrain.models.selfsup)) | +| models/target_generators | selfsup | The target generators are moved to corresponding self-supervised learning algorithm `.py` files. ([API link](mmpretrain.models.selfsup)) | +| / | models/losses | The `losses` folder is created to provide different loss implementations, which is from `heads`. ([API link](mmpretrain.models.losses)) | +| / | structures | The `structures` folder is for the implementation of data structures. In MMPretrain, we implement a new data structure, `DataSample`, to pass and receive data throughout the training/val process. ([API link](mmpretrain.structures)) | +| / | visualization | The `visualization` folder contains the visualizer, which is responsible for some visualization tasks like visualizing data augmentation. ([API link](mmpretrain.visualization)) | diff --git a/docs/en/notes/changelog.md b/docs/en/notes/changelog.md new file mode 100644 index 0000000..499ed24 --- /dev/null +++ b/docs/en/notes/changelog.md @@ -0,0 +1,1055 @@ +# Changelog (MMPreTrain) + +## v1.2.0(04/01/2024) + +### New Features + +- [Feature] Support LLaVA 1.5 ([#1853](https://github.com/open-mmlab/mmpretrain/pull/1853)) +- [Feature] Implement of RAM with a gradio interface. ([#1802](https://github.com/open-mmlab/mmpretrain/pull/1802)) + +### Bug Fix + +- [Fix] Fix resize mix argument bug. + +## v1.1.0(12/10/2023) + +### New Features + +- [Feature] Implement of Zero-Shot CLIP Classifier ([#1737](https://github.com/open-mmlab/mmpretrain/pull/1737)) +- [Feature] Add minigpt4 gradio demo and training script. ([#1758](https://github.com/open-mmlab/mmpretrain/pull/1758)) + +### Improvements + +- [Config] New Version of config Adapting MobileNet Algorithm ([#1774](https://github.com/open-mmlab/mmpretrain/pull/1774)) +- [Config] Support DINO self-supervised learning in project ([#1756](https://github.com/open-mmlab/mmpretrain/pull/1756)) +- [Config] New Version of config Adapting Swin Transformer Algorithm ([#1780](https://github.com/open-mmlab/mmpretrain/pull/1780)) +- [Enhance] Add iTPN Supports for Non-three channel image ([#1735](https://github.com/open-mmlab/mmpretrain/pull/1735)) +- [Docs] Update dataset download script from opendatalab to openXlab ([#1765](https://github.com/open-mmlab/mmpretrain/pull/1765)) +- [Docs] Update COCO-Retrieval dataset docs. ([#1806](https://github.com/open-mmlab/mmpretrain/pull/1806)) + +### Bug Fix + +- Update `train.py` to compat with new config. +- Update OFA module to compat with the latest huggingface. +- Fix pipeline bug in ImageRetrievalInferencer. + +## v1.0.2(15/08/2023) + +### New Features + +- Add MFF ([#1725](https://github.com/open-mmlab/mmpretrain/pull/1725)) +- Support training of BLIP2 ([#1700](https://github.com/open-mmlab/mmpretrain/pull/1700)) + +### Improvements + +- New Version of config Adapting MAE Algorithm ([#1750](https://github.com/open-mmlab/mmpretrain/pull/1750)) +- New Version of config Adapting ConvNeXt Algorithm ([#1760](https://github.com/open-mmlab/mmpretrain/pull/1760)) +- New version of config adapting BeitV2 Algorithm ([#1755](https://github.com/open-mmlab/mmpretrain/pull/1755)) +- Update `dataset_prepare.md` ([#1732](https://github.com/open-mmlab/mmpretrain/pull/1732)) +- New Version of `config` Adapting Vision Transformer Algorithm ([#1727](https://github.com/open-mmlab/mmpretrain/pull/1727)) +- Support Infographic VQA dataset and ANLS metric. ([#1667](https://github.com/open-mmlab/mmpretrain/pull/1667)) +- Support IconQA dataset. ([#1670](https://github.com/open-mmlab/mmpretrain/pull/1670)) +- Fix typo MIMHIVIT to MAEHiViT ([#1749](https://github.com/open-mmlab/mmpretrain/pull/1749)) + +## v1.0.1(28/07/2023) + +### Improvements + +- Add init_cfg with type='pretrained' to downstream tasks ([#1717](https://github.com/open-mmlab/mmpretrain/pull/1717) +- Set 'is_init' in some multimodal methods ([#1718](https://github.com/open-mmlab/mmpretrain/pull/1718) +- Adapt test cases on Ascend NPU ([#1728](https://github.com/open-mmlab/mmpretrain/pull/1728) +- Add GPU Acceleration Apple silicon mac ([#1699](https://github.com/open-mmlab/mmpretrain/pull/1699) +- BEiT refactor ([#1705](https://github.com/open-mmlab/mmpretrain/pull/1705) + +### Bug Fixes + +- Fix dict update in minigpt4. ([#1709](https://github.com/open-mmlab/mmpretrain/pull/1709) +- Fix nested predict for multi-task prediction ([#1716](https://github.com/open-mmlab/mmpretrain/pull/1716) +- Fix the issue #1711 "GaussianBlur doesn't work" ([#1722](https://github.com/open-mmlab/mmpretrain/pull/1722) +- Just to correct a typo of 'target' ([#1655](https://github.com/open-mmlab/mmpretrain/pull/1655) +- Fix freeze without cls_token in vit ([#1693](https://github.com/open-mmlab/mmpretrain/pull/1693) +- Fix RandomCrop bug ([#1706](https://github.com/open-mmlab/mmpretrain/pull/1706) + +### Docs Update + +- Fix spelling ([#1689](https://github.com/open-mmlab/mmpretrain/pull/1689) + +## v1.0.0(04/07/2023) + +### Highlights + +- Support inference of more **multi-modal** algorithms, such as **LLaVA**, **MiniGPT-4**, **Otter**, etc. +- Support around **10 multi-modal datasets**! +- Add **iTPN**, **SparK** self-supervised learning algorithms. +- Provide examples of [New Config](https://github.com/open-mmlab/mmpretrain/tree/main/mmpretrain/configs/) and [DeepSpeed/FSDP](https://github.com/open-mmlab/mmpretrain/tree/main/configs/mae/benchmarks/). + +### New Features + +- Transfer shape-bias tool from mmselfsup ([#1658](https://github.com/open-mmlab/mmpretrain/pull/1685)) +- Download dataset by using MIM&OpenDataLab ([#1630](https://github.com/open-mmlab/mmpretrain/pull/1630)) +- Support New Configs ([#1639](https://github.com/open-mmlab/mmpretrain/pull/1639), [#1647](https://github.com/open-mmlab/mmpretrain/pull/1647), [#1665](https://github.com/open-mmlab/mmpretrain/pull/1665)) +- Support Flickr30k Retrieval dataset ([#1625](https://github.com/open-mmlab/mmpretrain/pull/1625)) +- Support SparK ([#1531](https://github.com/open-mmlab/mmpretrain/pull/1531)) +- Support LLaVA ([#1652](https://github.com/open-mmlab/mmpretrain/pull/1652)) +- Support Otter ([#1651](https://github.com/open-mmlab/mmpretrain/pull/1651)) +- Support MiniGPT-4 ([#1642](https://github.com/open-mmlab/mmpretrain/pull/1642)) +- Add support for VizWiz dataset ([#1636](https://github.com/open-mmlab/mmpretrain/pull/1636)) +- Add support for vsr dataset ([#1634](https://github.com/open-mmlab/mmpretrain/pull/1634)) +- Add InternImage Classification project ([#1569](https://github.com/open-mmlab/mmpretrain/pull/1569)) +- Support OCR-VQA dataset ([#1621](https://github.com/open-mmlab/mmpretrain/pull/1621)) +- Support OK-VQA dataset ([#1615](https://github.com/open-mmlab/mmpretrain/pull/1615)) +- Support TextVQA dataset ([#1569](https://github.com/open-mmlab/mmpretrain/pull/1569)) +- Support iTPN and HiViT ([#1584](https://github.com/open-mmlab/mmpretrain/pull/1584)) +- Add retrieval mAP metric ([#1552](https://github.com/open-mmlab/mmpretrain/pull/1552)) +- Support NoCap dataset based on BLIP. ([#1582](https://github.com/open-mmlab/mmpretrain/pull/1582)) +- Add GQA dataset ([#1585](https://github.com/open-mmlab/mmpretrain/pull/1585)) + +### Improvements + +- Update fsdp vit-huge and vit-large config ([#1675](https://github.com/open-mmlab/mmpretrain/pull/1675)) +- Support deepspeed with flexible runner ([#1673](https://github.com/open-mmlab/mmpretrain/pull/1673)) +- Update Otter and LLaVA docs and config. ([#1653](https://github.com/open-mmlab/mmpretrain/pull/1653)) +- Add image_only param of ScienceQA ([#1613](https://github.com/open-mmlab/mmpretrain/pull/1613)) +- Support to use "split" to specify training set/validation ([#1535](https://github.com/open-mmlab/mmpretrain/pull/1535)) + +### Bug Fixes + +- Refactor \_prepare_pos_embed in ViT ([#1656](https://github.com/open-mmlab/mmpretrain/pull/1656), [#1679](https://github.com/open-mmlab/mmpretrain/pull/1679)) +- Freeze pre norm in vision transformer ([#1672](https://github.com/open-mmlab/mmpretrain/pull/1672)) +- Fix bug loading IN1k dataset ([#1641](https://github.com/open-mmlab/mmpretrain/pull/1641)) +- Fix sam bug ([#1633](https://github.com/open-mmlab/mmpretrain/pull/1633)) +- Fixed circular import error for new transform ([#1609](https://github.com/open-mmlab/mmpretrain/pull/1609)) +- Update torchvision transform wrapper ([#1595](https://github.com/open-mmlab/mmpretrain/pull/1595)) +- Set default out_type in CAM visualization ([#1586](https://github.com/open-mmlab/mmpretrain/pull/1586)) + +### Docs Update + +- Fix spelling ([#1681](https://github.com/open-mmlab/mmpretrain/pull/1681)) +- Fix doc typos ([#1671](https://github.com/open-mmlab/mmpretrain/pull/1671), [#1644](https://github.com/open-mmlab/mmpretrain/pull/1644), [#1629](https://github.com/open-mmlab/mmpretrain/pull/1629)) +- Add t-SNE visualization doc ([#1555](https://github.com/open-mmlab/mmpretrain/pull/1555)) + +## v1.0.0rc8(22/05/2023) + +### Highlights + +- Support multiple multi-modal algorithms and inferencers. You can explore these features by the [gradio demo](https://github.com/open-mmlab/mmpretrain/tree/main/projects/gradio_demo)! +- Add EVA-02, Dino-V2, ViT-SAM and GLIP backbones. +- Register torchvision transforms into MMPretrain, you can now easily integrate torchvision's data augmentations in MMPretrain. + +### New Features + +- Support Chinese CLIP. ([#1576](https://github.com/open-mmlab/mmpretrain/pull/1576)) +- Add ScienceQA Metrics ([#1577](https://github.com/open-mmlab/mmpretrain/pull/1577)) +- Support multiple multi-modal algorithms and inferencers. ([#1561](https://github.com/open-mmlab/mmpretrain/pull/1561)) +- add eva02 backbone ([#1450](https://github.com/open-mmlab/mmpretrain/pull/1450)) +- Support dinov2 backbone ([#1522](https://github.com/open-mmlab/mmpretrain/pull/1522)) +- Support some downstream classification datasets. ([#1467](https://github.com/open-mmlab/mmpretrain/pull/1467)) +- Support GLIP ([#1308](https://github.com/open-mmlab/mmpretrain/pull/1308)) +- Register torchvision transforms into mmpretrain ([#1265](https://github.com/open-mmlab/mmpretrain/pull/1265)) +- Add ViT of SAM ([#1476](https://github.com/open-mmlab/mmpretrain/pull/1476)) + +### Improvements + +- [Refactor] Support to freeze channel reduction and add layer decay function ([#1490](https://github.com/open-mmlab/mmpretrain/pull/1490)) +- [Refactor] Support resizing pos_embed while loading ckpt and format output ([#1488](https://github.com/open-mmlab/mmpretrain/pull/1488)) + +### Bug Fixes + +- Fix scienceqa ([#1581](https://github.com/open-mmlab/mmpretrain/pull/1581)) +- Fix config of beit ([#1528](https://github.com/open-mmlab/mmpretrain/pull/1528)) +- Incorrect stage freeze on RIFormer Model ([#1573](https://github.com/open-mmlab/mmpretrain/pull/1573)) +- Fix ddp bugs caused by `out_type`. ([#1570](https://github.com/open-mmlab/mmpretrain/pull/1570)) +- Fix multi-task-head loss potential bug ([#1530](https://github.com/open-mmlab/mmpretrain/pull/1530)) +- Support bce loss without batch augmentations ([#1525](https://github.com/open-mmlab/mmpretrain/pull/1525)) +- Fix clip generator init bug ([#1518](https://github.com/open-mmlab/mmpretrain/pull/1518)) +- Fix the bug in binary cross entropy loss ([#1499](https://github.com/open-mmlab/mmpretrain/pull/1499)) + +### Docs Update + +- Update PoolFormer citation to CVPR version ([#1505](https://github.com/open-mmlab/mmpretrain/pull/1505)) +- Refine Inference Doc ([#1489](https://github.com/open-mmlab/mmpretrain/pull/1489)) +- Add doc for usage of confusion matrix ([#1513](https://github.com/open-mmlab/mmpretrain/pull/1513)) +- Update MMagic link ([#1517](https://github.com/open-mmlab/mmpretrain/pull/1517)) +- Fix example_project README ([#1575](https://github.com/open-mmlab/mmpretrain/pull/1575)) +- Add NPU support page ([#1481](https://github.com/open-mmlab/mmpretrain/pull/1481)) +- train cfg: Removed old description ([#1473](https://github.com/open-mmlab/mmpretrain/pull/1473)) +- Fix typo in MultiLabelDataset docstring ([#1483](https://github.com/open-mmlab/mmpretrain/pull/1483)) + +## v1.0.0rc7(07/04/2023) + +### Highlights + +- Integrated Self-supervised learning algorithms from **MMSelfSup**, such as **MAE**, **BEiT**, etc. +- Support **RIFormer**, a simple but effective vision backbone by removing token mixer. +- Support **LeViT**, **XCiT**, **ViG** and **ConvNeXt-V2** backbone. +- Add t-SNE visualization. +- Refactor dataset pipeline visualization. +- Support confusion matrix calculation and plot. + +### New Features + +- Support RIFormer. ([#1453](https://github.com/open-mmlab/mmpretrain/pull/1453)) +- Support XCiT Backbone. ([#1305](https://github.com/open-mmlab/mmclassification/pull/1305)) +- Support calculate confusion matrix and plot it. ([#1287](https://github.com/open-mmlab/mmclassification/pull/1287)) +- Support RetrieverRecall metric & Add ArcFace config ([#1316](https://github.com/open-mmlab/mmclassification/pull/1316)) +- Add `ImageClassificationInferencer`. ([#1261](https://github.com/open-mmlab/mmclassification/pull/1261)) +- Support InShop Dataset (Image Retrieval). ([#1019](https://github.com/open-mmlab/mmclassification/pull/1019)) +- Support LeViT backbone. ([#1238](https://github.com/open-mmlab/mmclassification/pull/1238)) +- Support VIG Backbone. ([#1304](https://github.com/open-mmlab/mmclassification/pull/1304)) +- Support ConvNeXt-V2 backbone. ([#1294](https://github.com/open-mmlab/mmclassification/pull/1294)) + +### Improvements + +- Use PyTorch official `scaled_dot_product_attention` to accelerate `MultiheadAttention`. ([#1434](https://github.com/open-mmlab/mmpretrain/pull/1434)) +- Add ln to vit avg_featmap output ([#1447](https://github.com/open-mmlab/mmpretrain/pull/1447)) +- Update analysis tools and documentations. ([#1359](https://github.com/open-mmlab/mmclassification/pull/1359)) +- Unify the `--out` and `--dump` in `tools/test.py`. ([#1307](https://github.com/open-mmlab/mmclassification/pull/1307)) +- Enable to toggle whether Gem Pooling is trainable or not. ([#1246](https://github.com/open-mmlab/mmclassification/pull/1246)) +- Update registries of mmcls. ([#1306](https://github.com/open-mmlab/mmclassification/pull/1306)) +- Add metafile fill and validation tools. ([#1297](https://github.com/open-mmlab/mmclassification/pull/1297)) +- Remove useless EfficientnetV2 config files. ([#1300](https://github.com/open-mmlab/mmclassification/pull/1300)) + +### Bug Fixes + +- Fix precise bn hook ([#1466](https://github.com/open-mmlab/mmpretrain/pull/1466)) +- Fix retrieval multi gpu bug ([#1319](https://github.com/open-mmlab/mmclassification/pull/1319)) +- Fix error repvgg-deploy base config path. ([#1357](https://github.com/open-mmlab/mmclassification/pull/1357)) +- Fix bug in test tools. ([#1309](https://github.com/open-mmlab/mmclassification/pull/1309)) + +### Docs Update + +- Translate some tools tutorials to Chinese. ([#1321](https://github.com/open-mmlab/mmclassification/pull/1321)) +- Add Chinese translation for runtime.md. ([#1313](https://github.com/open-mmlab/mmclassification/pull/1313)) + +# Changelog (MMClassification) + +## v1.0.0rc5(30/12/2022) + +### Highlights + +- Support EVA, RevViT, EfficientnetV2, CLIP, TinyViT and MixMIM backbones. +- Reproduce the training accuracy of ConvNeXt and RepVGG. +- Support multi-task training and testing. +- Support Test-time Augmentation. + +### New Features + +- [Feature] Add EfficientnetV2 Backbone. ([#1253](https://github.com/open-mmlab/mmclassification/pull/1253)) +- [Feature] Support TTA and add `--tta` in `tools/test.py`. ([#1161](https://github.com/open-mmlab/mmclassification/pull/1161)) +- [Feature] Support Multi-task. ([#1229](https://github.com/open-mmlab/mmclassification/pull/1229)) +- [Feature] Add clip backbone. ([#1258](https://github.com/open-mmlab/mmclassification/pull/1258)) +- [Feature] Add mixmim backbone with checkpoints. ([#1224](https://github.com/open-mmlab/mmclassification/pull/1224)) +- [Feature] Add TinyViT for dev-1.x. ([#1042](https://github.com/open-mmlab/mmclassification/pull/1042)) +- [Feature] Add some scripts for development. ([#1257](https://github.com/open-mmlab/mmclassification/pull/1257)) +- [Feature] Support EVA. ([#1239](https://github.com/open-mmlab/mmclassification/pull/1239)) +- [Feature] Implementation of RevViT. ([#1127](https://github.com/open-mmlab/mmclassification/pull/1127)) + +### Improvements + +- [Reproduce] Reproduce RepVGG Training Accuracy. ([#1264](https://github.com/open-mmlab/mmclassification/pull/1264)) +- [Enhance] Support ConvNeXt More Weights. ([#1240](https://github.com/open-mmlab/mmclassification/pull/1240)) +- [Reproduce] Update ConvNeXt config files. ([#1256](https://github.com/open-mmlab/mmclassification/pull/1256)) +- [CI] Update CI to test PyTorch 1.13.0. ([#1260](https://github.com/open-mmlab/mmclassification/pull/1260)) +- [Project] Add ACCV workshop 1st Solution. ([#1245](https://github.com/open-mmlab/mmclassification/pull/1245)) +- [Project] Add Example project. ([#1254](https://github.com/open-mmlab/mmclassification/pull/1254)) + +### Bug Fixes + +- [Fix] Fix imports in transforms. ([#1255](https://github.com/open-mmlab/mmclassification/pull/1255)) +- [Fix] Fix CAM visualization. ([#1248](https://github.com/open-mmlab/mmclassification/pull/1248)) +- [Fix] Fix the requirements and lazy register mmpretrain models. ([#1275](https://github.com/open-mmlab/mmclassification/pull/1275)) + +## v1.0.0rc4(06/12/2022) + +### Highlights + +- Upgrade API to get pre-defined models of MMClassification. See [#1236](https://github.com/open-mmlab/mmclassification/pull/1236) for more details. +- Refactor BEiT backbone and support v1/v2 inference. See [#1144](https://github.com/open-mmlab/mmclassification/pull/1144). + +### New Features + +- Support getting model from the name defined in the model-index file. ([#1236](https://github.com/open-mmlab/mmclassification/pull/1236)) + +### Improvements + +- Support evaluate on both EMA and non-EMA models. ([#1204](https://github.com/open-mmlab/mmclassification/pull/1204)) +- Refactor BEiT backbone and support v1/v2 inference. ([#1144](https://github.com/open-mmlab/mmclassification/pull/1144)) + +### Bug Fixes + +- Fix `reparameterize_model.py` doesn't save meta info. ([#1221](https://github.com/open-mmlab/mmclassification/pull/1221)) +- Fix dict update in BEiT. ([#1234](https://github.com/open-mmlab/mmclassification/pull/1234)) + +### Docs Update + +- Update install tutorial. ([#1223](https://github.com/open-mmlab/mmclassification/pull/1223)) +- Update MobileNetv2 & MobileNetv3 readme. ([#1222](https://github.com/open-mmlab/mmclassification/pull/1222)) +- Add version selection in the banner. ([#1217](https://github.com/open-mmlab/mmclassification/pull/1217)) + +## v1.0.0rc3(21/11/2022) + +### Highlights + +- Add **Switch Recipe** Hook, Now we can modify training pipeline, mixup and loss settings during training, see [#1101](https://github.com/open-mmlab/mmclassification/pull/1101). +- Add **TIMM and HuggingFace** wrappers. Now you can train/use models in TIMM/HuggingFace directly, see [#1102](https://github.com/open-mmlab/mmclassification/pull/1102). +- Support **retrieval tasks**, see [#1055](https://github.com/open-mmlab/mmclassification/pull/1055). +- Reproduce **mobileone** training accuracy. See [#1191](https://github.com/open-mmlab/mmclassification/pull/1191) + +### New Features + +- Add checkpoints from EfficientNets NoisyStudent & L2. ([#1122](https://github.com/open-mmlab/mmclassification/pull/1122)) +- Migrate CSRA head to 1.x. ([#1177](https://github.com/open-mmlab/mmclassification/pull/1177)) +- Support RepLKnet backbone. ([#1129](https://github.com/open-mmlab/mmclassification/pull/1129)) +- Add Switch Recipe Hook. ([#1101](https://github.com/open-mmlab/mmclassification/pull/1101)) +- Add adan optimizer. ([#1180](https://github.com/open-mmlab/mmclassification/pull/1180)) +- Support DaViT. ([#1105](https://github.com/open-mmlab/mmclassification/pull/1105)) +- Support Activation Checkpointing for ConvNeXt. ([#1153](https://github.com/open-mmlab/mmclassification/pull/1153)) +- Add TIMM and HuggingFace wrappers to build classifiers from them directly. ([#1102](https://github.com/open-mmlab/mmclassification/pull/1102)) +- Add reduction for neck ([#978](https://github.com/open-mmlab/mmclassification/pull/978)) +- Support HorNet Backbone for dev1.x. ([#1094](https://github.com/open-mmlab/mmclassification/pull/1094)) +- Add arcface head. ([#926](https://github.com/open-mmlab/mmclassification/pull/926)) +- Add Base Retriever and Image2Image Retriever for retrieval tasks. ([#1055](https://github.com/open-mmlab/mmclassification/pull/1055)) +- Support MobileViT backbone. ([#1068](https://github.com/open-mmlab/mmclassification/pull/1068)) + +### Improvements + +- [Enhance] Enhance ArcFaceClsHead. ([#1181](https://github.com/open-mmlab/mmclassification/pull/1181)) +- [Refactor] Refactor to use new fileio API in MMEngine. ([#1176](https://github.com/open-mmlab/mmclassification/pull/1176)) +- [Enhance] Reproduce mobileone training accuracy. ([#1191](https://github.com/open-mmlab/mmclassification/pull/1191)) +- [Enhance] add deleting params info in swinv2. ([#1142](https://github.com/open-mmlab/mmclassification/pull/1142)) +- [Enhance] Add more mobilenetv3 pretrains. ([#1154](https://github.com/open-mmlab/mmclassification/pull/1154)) +- [Enhancement] RepVGG for YOLOX-PAI for dev-1.x. ([#1126](https://github.com/open-mmlab/mmclassification/pull/1126)) +- [Improve] Speed up data preprocessor. ([#1064](https://github.com/open-mmlab/mmclassification/pull/1064)) + +### Bug Fixes + +- Fix the torchserve. ([#1143](https://github.com/open-mmlab/mmclassification/pull/1143)) +- Fix configs due to api refactor of `num_classes`. ([#1184](https://github.com/open-mmlab/mmclassification/pull/1184)) +- Update mmpretrain2torchserve. ([#1189](https://github.com/open-mmlab/mmclassification/pull/1189)) +- Fix for `inference_model` cannot get classes information in checkpoint. ([#1093](https://github.com/open-mmlab/mmclassification/pull/1093)) + +### Docs Update + +- Add not-found page extension. ([#1207](https://github.com/open-mmlab/mmclassification/pull/1207)) +- update visualization doc. ([#1160](https://github.com/open-mmlab/mmclassification/pull/1160)) +- Support sort and search the Model Summary table. ([#1100](https://github.com/open-mmlab/mmclassification/pull/1100)) +- Improve the ResNet model page. ([#1118](https://github.com/open-mmlab/mmclassification/pull/1118)) +- update the readme of convnext. ([#1156](https://github.com/open-mmlab/mmclassification/pull/1156)) +- Fix the installation docs link in README. ([#1164](https://github.com/open-mmlab/mmclassification/pull/1164)) +- Improve ViT and MobileViT model pages. ([#1155](https://github.com/open-mmlab/mmclassification/pull/1155)) +- Improve Swin Doc and Add Tabs enxtation. ([#1145](https://github.com/open-mmlab/mmclassification/pull/1145)) +- Add MMEval projects link in README. ([#1162](https://github.com/open-mmlab/mmclassification/pull/1162)) +- Add runtime configuration docs. ([#1128](https://github.com/open-mmlab/mmclassification/pull/1128)) +- Add custom evaluation docs ([#1130](https://github.com/open-mmlab/mmclassification/pull/1130)) +- Add custom pipeline docs. ([#1124](https://github.com/open-mmlab/mmclassification/pull/1124)) +- Add MMYOLO projects link in MMCLS1.x. ([#1117](https://github.com/open-mmlab/mmclassification/pull/1117)) + +## v1.0.0rc2(12/10/2022) + +### New Features + +- [Feature] Support DeiT3. ([#1065](https://github.com/open-mmlab/mmclassification/pull/1065)) + +### Improvements + +- [Enhance] Update `analyze_results.py` for dev-1.x. ([#1071](https://github.com/open-mmlab/mmclassification/pull/1071)) +- [Enhance] Get scores from inference api. ([#1070](https://github.com/open-mmlab/mmclassification/pull/1070)) + +### Bug Fixes + +- [Fix] Update requirements. ([#1083](https://github.com/open-mmlab/mmclassification/pull/1083)) + +### Docs Update + +- [Docs] Add 1x docs schedule. ([#1015](https://github.com/open-mmlab/mmclassification/pull/1015)) + +## v1.0.0rc1(30/9/2022) + +### New Features + +- Support MViT for MMCLS 1.x ([#1023](https://github.com/open-mmlab/mmclassification/pull/1023)) +- Add ViT huge architecture. ([#1049](https://github.com/open-mmlab/mmclassification/pull/1049)) +- Support EdgeNeXt for dev-1.x. ([#1037](https://github.com/open-mmlab/mmclassification/pull/1037)) +- Support Swin Transformer V2 for MMCLS 1.x. ([#1029](https://github.com/open-mmlab/mmclassification/pull/1029)) +- Add efficientformer Backbone for MMCls 1.x. ([#1031](https://github.com/open-mmlab/mmclassification/pull/1031)) +- Add MobileOne Backbone For MMCls 1.x. ([#1030](https://github.com/open-mmlab/mmclassification/pull/1030)) +- Support BEiT Transformer layer. ([#919](https://github.com/open-mmlab/mmclassification/pull/919)) + +### Improvements + +- [Refactor] Fix visualization tools. ([#1045](https://github.com/open-mmlab/mmclassification/pull/1045)) +- [Improve] Update benchmark scripts ([#1028](https://github.com/open-mmlab/mmclassification/pull/1028)) +- [Improve] Update tools to enable `pin_memory` and `persistent_workers` by default. ([#1024](https://github.com/open-mmlab/mmclassification/pull/1024)) +- [CI] Update circle-ci and github workflow. ([#1018](https://github.com/open-mmlab/mmclassification/pull/1018)) + +### Bug Fixes + +- Fix verify dataset tool in 1.x. ([#1062](https://github.com/open-mmlab/mmclassification/pull/1062)) +- Fix `loss_weight` in `LabelSmoothLoss`. ([#1058](https://github.com/open-mmlab/mmclassification/pull/1058)) +- Fix the output position of Swin-Transformer. ([#947](https://github.com/open-mmlab/mmclassification/pull/947)) + +### Docs Update + +- Auto generate model summary table. ([#1010](https://github.com/open-mmlab/mmclassification/pull/1010)) +- Refactor new modules tutorial. ([#998](https://github.com/open-mmlab/mmclassification/pull/998)) + +## v1.0.0rc0(31/8/2022) + +MMClassification 1.0.0rc0 is the first version of MMClassification 1.x, a part of the OpenMMLab 2.0 projects. + +Built upon the new [training engine](https://github.com/open-mmlab/mmengine), MMClassification 1.x unifies the interfaces of dataset, models, evaluation, and visualization. + +And there are some BC-breaking changes. Please check [the migration tutorial](https://mmclassification.readthedocs.io/en/1.x/migration.html) for more details. + +## v0.23.1(2/6/2022) + +### New Features + +- Dedicated MMClsWandbHook for MMClassification (Weights and Biases Integration) ([#764](https://github.com/open-mmlab/mmclassification/pull/764)) + +### Improvements + +- Use mdformat instead of markdownlint to format markdown. ([#844](https://github.com/open-mmlab/mmclassification/pull/844)) + +### Bug Fixes + +- Fix wrong `--local_rank`. + +### Docs Update + +- Update install tutorials. ([#854](https://github.com/open-mmlab/mmclassification/pull/854)) +- Fix wrong link in README. ([#835](https://github.com/open-mmlab/mmclassification/pull/835)) + +## v0.23.0(1/5/2022) + +### New Features + +- Support DenseNet. ([#750](https://github.com/open-mmlab/mmclassification/pull/750)) +- Support VAN. ([#739](https://github.com/open-mmlab/mmclassification/pull/739)) + +### Improvements + +- Support training on IPU and add fine-tuning configs of ViT. ([#723](https://github.com/open-mmlab/mmclassification/pull/723)) + +### Docs Update + +- New style API reference, and easier to use! Welcome [view it](https://mmclassification.readthedocs.io/en/master/api/models.html). ([#774](https://github.com/open-mmlab/mmclassification/pull/774)) + +## v0.22.1(15/4/2022) + +### New Features + +- [Feature] Support resize relative position embedding in `SwinTransformer`. ([#749](https://github.com/open-mmlab/mmclassification/pull/749)) +- [Feature] Add PoolFormer backbone and checkpoints. ([#746](https://github.com/open-mmlab/mmclassification/pull/746)) + +### Improvements + +- [Enhance] Improve CPE performance by reduce memory copy. ([#762](https://github.com/open-mmlab/mmclassification/pull/762)) +- [Enhance] Add extra dataloader settings in configs. ([#752](https://github.com/open-mmlab/mmclassification/pull/752)) + +## v0.22.0(30/3/2022) + +### Highlights + +- Support a series of CSP Network, such as CSP-ResNet, CSP-ResNeXt and CSP-DarkNet. +- A new `CustomDataset` class to help you build dataset of yourself! +- Support ConvMixer, RepMLP and new dataset - CUB dataset. + +### New Features + +- [Feature] Add CSPNet and backbone and checkpoints ([#735](https://github.com/open-mmlab/mmclassification/pull/735)) +- [Feature] Add `CustomDataset`. ([#738](https://github.com/open-mmlab/mmclassification/pull/738)) +- [Feature] Add diff seeds to diff ranks. ([#744](https://github.com/open-mmlab/mmclassification/pull/744)) +- [Feature] Support ConvMixer. ([#716](https://github.com/open-mmlab/mmclassification/pull/716)) +- [Feature] Our `dist_train` & `dist_test` tools support distributed training on multiple machines. ([#734](https://github.com/open-mmlab/mmclassification/pull/734)) +- [Feature] Add RepMLP backbone and checkpoints. ([#709](https://github.com/open-mmlab/mmclassification/pull/709)) +- [Feature] Support CUB dataset. ([#703](https://github.com/open-mmlab/mmclassification/pull/703)) +- [Feature] Support ResizeMix. ([#676](https://github.com/open-mmlab/mmclassification/pull/676)) + +### Improvements + +- [Enhance] Use `--a-b` instead of `--a_b` in arguments. ([#754](https://github.com/open-mmlab/mmclassification/pull/754)) +- [Enhance] Add `get_cat_ids` and `get_gt_labels` to KFoldDataset. ([#721](https://github.com/open-mmlab/mmclassification/pull/721)) +- [Enhance] Set torch seed in `worker_init_fn`. ([#733](https://github.com/open-mmlab/mmclassification/pull/733)) + +### Bug Fixes + +- [Fix] Fix the discontiguous output feature map of ConvNeXt. ([#743](https://github.com/open-mmlab/mmclassification/pull/743)) + +### Docs Update + +- [Docs] Add brief installation steps in README for copy&paste. ([#755](https://github.com/open-mmlab/mmclassification/pull/755)) +- [Docs] fix logo url link from mmocr to mmpretrain. ([#732](https://github.com/open-mmlab/mmclassification/pull/732)) + +## v0.21.0(04/03/2022) + +### Highlights + +- Support ResNetV1c and Wide-ResNet, and provide pre-trained models. +- Support dynamic input shape for ViT-based algorithms. Now our ViT, DeiT, Swin-Transformer and T2T-ViT support forwarding with any input shape. +- Reproduce training results of DeiT. And our DeiT-T and DeiT-S have higher accuracy comparing with the official weights. + +### New Features + +- Add ResNetV1c. ([#692](https://github.com/open-mmlab/mmclassification/pull/692)) +- Support Wide-ResNet. ([#715](https://github.com/open-mmlab/mmclassification/pull/715)) +- Support gem pooling ([#677](https://github.com/open-mmlab/mmclassification/pull/677)) + +### Improvements + +- Reproduce training results of DeiT. ([#711](https://github.com/open-mmlab/mmclassification/pull/711)) +- Add ConvNeXt pretrain models on ImageNet-1k. ([#707](https://github.com/open-mmlab/mmclassification/pull/707)) +- Support dynamic input shape for ViT-based algorithms. ([#706](https://github.com/open-mmlab/mmclassification/pull/706)) +- Add `evaluate` function for ConcatDataset. ([#650](https://github.com/open-mmlab/mmclassification/pull/650)) +- Enhance vis-pipeline tool. ([#604](https://github.com/open-mmlab/mmclassification/pull/604)) +- Return code 1 if scripts runs failed. ([#694](https://github.com/open-mmlab/mmclassification/pull/694)) +- Use PyTorch official `one_hot` to implement `convert_to_one_hot`. ([#696](https://github.com/open-mmlab/mmclassification/pull/696)) +- Add a new pre-commit-hook to automatically add a copyright. ([#710](https://github.com/open-mmlab/mmclassification/pull/710)) +- Add deprecation message for deploy tools. ([#697](https://github.com/open-mmlab/mmclassification/pull/697)) +- Upgrade isort pre-commit hooks. ([#687](https://github.com/open-mmlab/mmclassification/pull/687)) +- Use `--gpu-id` instead of `--gpu-ids` in non-distributed multi-gpu training/testing. ([#688](https://github.com/open-mmlab/mmclassification/pull/688)) +- Remove deprecation. ([#633](https://github.com/open-mmlab/mmclassification/pull/633)) + +### Bug Fixes + +- Fix Conformer forward with irregular input size. ([#686](https://github.com/open-mmlab/mmclassification/pull/686)) +- Add `dist.barrier` to fix a bug in directory checking. ([#666](https://github.com/open-mmlab/mmclassification/pull/666)) + +## v0.20.1(07/02/2022) + +### Bug Fixes + +- Fix the MMCV dependency version. + +## v0.20.0(30/01/2022) + +### Highlights + +- Support K-fold cross-validation. The tutorial will be released later. +- Support HRNet, ConvNeXt, Twins and EfficientNet. +- Support model conversion from PyTorch to Core-ML by a tool. + +### New Features + +- Support K-fold cross-validation. ([#563](https://github.com/open-mmlab/mmclassification/pull/563)) +- Support HRNet and add pre-trained models. ([#660](https://github.com/open-mmlab/mmclassification/pull/660)) +- Support ConvNeXt and add pre-trained models. ([#670](https://github.com/open-mmlab/mmclassification/pull/670)) +- Support Twins and add pre-trained models. ([#642](https://github.com/open-mmlab/mmclassification/pull/642)) +- Support EfficientNet and add pre-trained models.([#649](https://github.com/open-mmlab/mmclassification/pull/649)) +- Support `features_only` option in `TIMMBackbone`. ([#668](https://github.com/open-mmlab/mmclassification/pull/668)) +- Add conversion script from pytorch to Core-ML model. ([#597](https://github.com/open-mmlab/mmclassification/pull/597)) + +### Improvements + +- New-style CPU training and inference. ([#674](https://github.com/open-mmlab/mmclassification/pull/674)) +- Add setup multi-processing both in train and test. ([#671](https://github.com/open-mmlab/mmclassification/pull/671)) +- Rewrite channel split operation in ShufflenetV2. ([#632](https://github.com/open-mmlab/mmclassification/pull/632)) +- Deprecate the support for "python setup.py test". ([#646](https://github.com/open-mmlab/mmclassification/pull/646)) +- Support single-label, softmax, custom eps by asymmetric loss. ([#609](https://github.com/open-mmlab/mmclassification/pull/609)) +- Save class names in best checkpoint created by evaluation hook. ([#641](https://github.com/open-mmlab/mmclassification/pull/641)) + +### Bug Fixes + +- Fix potential unexcepted behaviors if `metric_options` is not specified in multi-label evaluation. ([#647](https://github.com/open-mmlab/mmclassification/pull/647)) +- Fix API changes in `pytorch-grad-cam>=1.3.7`. ([#656](https://github.com/open-mmlab/mmclassification/pull/656)) +- Fix bug which breaks `cal_train_time` in `analyze_logs.py`. ([#662](https://github.com/open-mmlab/mmclassification/pull/662)) + +### Docs Update + +- Update README in configs according to OpenMMLab standard. ([#672](https://github.com/open-mmlab/mmclassification/pull/672)) +- Update installation guide and README. ([#624](https://github.com/open-mmlab/mmclassification/pull/624)) + +## v0.19.0(31/12/2021) + +### Highlights + +- The feature extraction function has been enhanced. See [#593](https://github.com/open-mmlab/mmclassification/pull/593) for more details. +- Provide the high-acc ResNet-50 training settings from [*ResNet strikes back*](https://arxiv.org/abs/2110.00476). +- Reproduce the training accuracy of T2T-ViT & RegNetX, and provide self-training checkpoints. +- Support DeiT & Conformer backbone and checkpoints. +- Provide a CAM visualization tool based on [pytorch-grad-cam](https://github.com/jacobgil/pytorch-grad-cam), and detailed [user guide](https://mmclassification.readthedocs.io/en/latest/tools/visualization.html#class-activation-map-visualization)! + +### New Features + +- Support Precise BN. ([#401](https://github.com/open-mmlab/mmclassification/pull/401)) +- Add CAM visualization tool. ([#577](https://github.com/open-mmlab/mmclassification/pull/577)) +- Repeated Aug and Sampler Registry. ([#588](https://github.com/open-mmlab/mmclassification/pull/588)) +- Add DeiT backbone and checkpoints. ([#576](https://github.com/open-mmlab/mmclassification/pull/576)) +- Support LAMB optimizer. ([#591](https://github.com/open-mmlab/mmclassification/pull/591)) +- Implement the conformer backbone. ([#494](https://github.com/open-mmlab/mmclassification/pull/494)) +- Add the frozen function for Swin Transformer model. ([#574](https://github.com/open-mmlab/mmclassification/pull/574)) +- Support using checkpoint in Swin Transformer to save memory. ([#557](https://github.com/open-mmlab/mmclassification/pull/557)) + +### Improvements + +- [Reproduction] Reproduce RegNetX training accuracy. ([#587](https://github.com/open-mmlab/mmclassification/pull/587)) +- [Reproduction] Reproduce training results of T2T-ViT. ([#610](https://github.com/open-mmlab/mmclassification/pull/610)) +- [Enhance] Provide high-acc training settings of ResNet. ([#572](https://github.com/open-mmlab/mmclassification/pull/572)) +- [Enhance] Set a random seed when the user does not set a seed. ([#554](https://github.com/open-mmlab/mmclassification/pull/554)) +- [Enhance] Added `NumClassCheckHook` and unit tests. ([#559](https://github.com/open-mmlab/mmclassification/pull/559)) +- [Enhance] Enhance feature extraction function. ([#593](https://github.com/open-mmlab/mmclassification/pull/593)) +- [Enhance] Improve efficiency of precision, recall, f1_score and support. ([#595](https://github.com/open-mmlab/mmclassification/pull/595)) +- [Enhance] Improve accuracy calculation performance. ([#592](https://github.com/open-mmlab/mmclassification/pull/592)) +- [Refactor] Refactor `analysis_log.py`. ([#529](https://github.com/open-mmlab/mmclassification/pull/529)) +- [Refactor] Use new API of matplotlib to handle blocking input in visualization. ([#568](https://github.com/open-mmlab/mmclassification/pull/568)) +- [CI] Cancel previous runs that are not completed. ([#583](https://github.com/open-mmlab/mmclassification/pull/583)) +- [CI] Skip build CI if only configs or docs modification. ([#575](https://github.com/open-mmlab/mmclassification/pull/575)) + +### Bug Fixes + +- Fix test sampler bug. ([#611](https://github.com/open-mmlab/mmclassification/pull/611)) +- Try to create a symbolic link, otherwise copy. ([#580](https://github.com/open-mmlab/mmclassification/pull/580)) +- Fix a bug for multiple output in swin transformer. ([#571](https://github.com/open-mmlab/mmclassification/pull/571)) + +### Docs Update + +- Update mmcv, torch, cuda version in Dockerfile and docs. ([#594](https://github.com/open-mmlab/mmclassification/pull/594)) +- Add analysis&misc docs. ([#525](https://github.com/open-mmlab/mmclassification/pull/525)) +- Fix docs build dependency. ([#584](https://github.com/open-mmlab/mmclassification/pull/584)) + +## v0.18.0(30/11/2021) + +### Highlights + +- Support MLP-Mixer backbone and provide pre-trained checkpoints. +- Add a tool to visualize the learning rate curve of the training phase. Welcome to use with the [tutorial](https://mmclassification.readthedocs.io/en/latest/tools/visualization.html#learning-rate-schedule-visualization)! + +### New Features + +- Add MLP Mixer Backbone. ([#528](https://github.com/open-mmlab/mmclassification/pull/528), [#539](https://github.com/open-mmlab/mmclassification/pull/539)) +- Support positive weights in BCE. ([#516](https://github.com/open-mmlab/mmclassification/pull/516)) +- Add a tool to visualize learning rate in each iterations. ([#498](https://github.com/open-mmlab/mmclassification/pull/498)) + +### Improvements + +- Use CircleCI to do unit tests. ([#567](https://github.com/open-mmlab/mmclassification/pull/567)) +- Focal loss for single label tasks. ([#548](https://github.com/open-mmlab/mmclassification/pull/548)) +- Remove useless `import_modules_from_string`. ([#544](https://github.com/open-mmlab/mmclassification/pull/544)) +- Rename config files according to the config name standard. ([#508](https://github.com/open-mmlab/mmclassification/pull/508)) +- Use `reset_classifier` to remove head of timm backbones. ([#534](https://github.com/open-mmlab/mmclassification/pull/534)) +- Support passing arguments to loss from head. ([#523](https://github.com/open-mmlab/mmclassification/pull/523)) +- Refactor `Resize` transform and add `Pad` transform. ([#506](https://github.com/open-mmlab/mmclassification/pull/506)) +- Update mmcv dependency version. ([#509](https://github.com/open-mmlab/mmclassification/pull/509)) + +### Bug Fixes + +- Fix bug when using `ClassBalancedDataset`. ([#555](https://github.com/open-mmlab/mmclassification/pull/555)) +- Fix a bug when using iter-based runner with 'val' workflow. ([#542](https://github.com/open-mmlab/mmclassification/pull/542)) +- Fix interpolation method checking in `Resize`. ([#547](https://github.com/open-mmlab/mmclassification/pull/547)) +- Fix a bug when load checkpoints in mulit-GPUs environment. ([#527](https://github.com/open-mmlab/mmclassification/pull/527)) +- Fix an error on indexing scalar metrics in `analyze_result.py`. ([#518](https://github.com/open-mmlab/mmclassification/pull/518)) +- Fix wrong condition judgment in `analyze_logs.py` and prevent empty curve. ([#510](https://github.com/open-mmlab/mmclassification/pull/510)) + +### Docs Update + +- Fix vit config and model broken links. ([#564](https://github.com/open-mmlab/mmclassification/pull/564)) +- Add abstract and image for every paper. ([#546](https://github.com/open-mmlab/mmclassification/pull/546)) +- Add mmflow and mim in banner and readme. ([#543](https://github.com/open-mmlab/mmclassification/pull/543)) +- Add schedule and runtime tutorial docs. ([#499](https://github.com/open-mmlab/mmclassification/pull/499)) +- Add the top-5 acc in ResNet-CIFAR README. ([#531](https://github.com/open-mmlab/mmclassification/pull/531)) +- Fix TOC of `visualization.md` and add example images. ([#513](https://github.com/open-mmlab/mmclassification/pull/513)) +- Use docs link of other projects and add MMCV docs. ([#511](https://github.com/open-mmlab/mmclassification/pull/511)) + +## v0.17.0(29/10/2021) + +### Highlights + +- Support Tokens-to-Token ViT backbone and Res2Net backbone. Welcome to use! +- Support ImageNet21k dataset. +- Add a pipeline visualization tool. Try it with the [tutorials](https://mmclassification.readthedocs.io/en/latest/tools/visualization.html#pipeline-visualization)! + +### New Features + +- Add Tokens-to-Token ViT backbone and converted checkpoints. ([#467](https://github.com/open-mmlab/mmclassification/pull/467)) +- Add Res2Net backbone and converted weights. ([#465](https://github.com/open-mmlab/mmclassification/pull/465)) +- Support ImageNet21k dataset. ([#461](https://github.com/open-mmlab/mmclassification/pull/461)) +- Support seesaw loss. ([#500](https://github.com/open-mmlab/mmclassification/pull/500)) +- Add a pipeline visualization tool. ([#406](https://github.com/open-mmlab/mmclassification/pull/406)) +- Add a tool to find broken files. ([#482](https://github.com/open-mmlab/mmclassification/pull/482)) +- Add a tool to test TorchServe. ([#468](https://github.com/open-mmlab/mmclassification/pull/468)) + +### Improvements + +- Refator Vision Transformer. ([#395](https://github.com/open-mmlab/mmclassification/pull/395)) +- Use context manager to reuse matplotlib figures. ([#432](https://github.com/open-mmlab/mmclassification/pull/432)) + +### Bug Fixes + +- Remove `DistSamplerSeedHook` if use `IterBasedRunner`. ([#501](https://github.com/open-mmlab/mmclassification/pull/501)) +- Set the priority of `EvalHook` to "LOW" to avoid a bug when using `IterBasedRunner`. ([#488](https://github.com/open-mmlab/mmclassification/pull/488)) +- Fix a wrong parameter of `get_root_logger` in `apis/train.py`. ([#486](https://github.com/open-mmlab/mmclassification/pull/486)) +- Fix version check in dataset builder. ([#474](https://github.com/open-mmlab/mmclassification/pull/474)) + +### Docs Update + +- Add English Colab tutorials and update Chinese Colab tutorials. ([#483](https://github.com/open-mmlab/mmclassification/pull/483), [#497](https://github.com/open-mmlab/mmclassification/pull/497)) +- Add tutuorial for config files. ([#487](https://github.com/open-mmlab/mmclassification/pull/487)) +- Add model-pages in Model Zoo. ([#480](https://github.com/open-mmlab/mmclassification/pull/480)) +- Add code-spell pre-commit hook and fix a large mount of typos. ([#470](https://github.com/open-mmlab/mmclassification/pull/470)) + +## v0.16.0(30/9/2021) + +### Highlights + +- We have improved compatibility with downstream repositories like MMDetection and MMSegmentation. We will add some examples about how to use our backbones in MMDetection. +- Add RepVGG backbone and checkpoints. Welcome to use it! +- Add timm backbones wrapper, now you can simply use backbones of pytorch-image-models in MMClassification! + +### New Features + +- Add RepVGG backbone and checkpoints. ([#414](https://github.com/open-mmlab/mmclassification/pull/414)) +- Add timm backbones wrapper. ([#427](https://github.com/open-mmlab/mmclassification/pull/427)) + +### Improvements + +- Fix TnT compatibility and verbose warning. ([#436](https://github.com/open-mmlab/mmclassification/pull/436)) +- Support setting `--out-items` in `tools/test.py`. ([#437](https://github.com/open-mmlab/mmclassification/pull/437)) +- Add datetime info and saving model using torch\<1.6 format. ([#439](https://github.com/open-mmlab/mmclassification/pull/439)) +- Improve downstream repositories compatibility. ([#421](https://github.com/open-mmlab/mmclassification/pull/421)) +- Rename the option `--options` to `--cfg-options` in some tools. ([#425](https://github.com/open-mmlab/mmclassification/pull/425)) +- Add PyTorch 1.9 and Python 3.9 build workflow, and remove some CI. ([#422](https://github.com/open-mmlab/mmclassification/pull/422)) + +### Bug Fixes + +- Fix format error in `test.py` when metric returns `np.ndarray`. ([#441](https://github.com/open-mmlab/mmclassification/pull/441)) +- Fix `publish_model` bug if no parent of `out_file`. ([#463](https://github.com/open-mmlab/mmclassification/pull/463)) +- Fix num_classes bug in pytorch2onnx.py. ([#458](https://github.com/open-mmlab/mmclassification/pull/458)) +- Fix missing runtime requirement `packaging`. ([#459](https://github.com/open-mmlab/mmclassification/pull/459)) +- Fix saving simplified model bug in ONNX export tool. ([#438](https://github.com/open-mmlab/mmclassification/pull/438)) + +### Docs Update + +- Update `getting_started.md` and `install.md`. And rewrite `finetune.md`. ([#466](https://github.com/open-mmlab/mmclassification/pull/466)) +- Use PyTorch style docs theme. ([#457](https://github.com/open-mmlab/mmclassification/pull/457)) +- Update metafile and Readme. ([#435](https://github.com/open-mmlab/mmclassification/pull/435)) +- Add `CITATION.cff`. ([#428](https://github.com/open-mmlab/mmclassification/pull/428)) + +## v0.15.0(31/8/2021) + +### Highlights + +- Support `hparams` argument in `AutoAugment` and `RandAugment` to provide hyperparameters for sub-policies. +- Support custom squeeze channels in `SELayer`. +- Support classwise weight in losses. + +### New Features + +- Add `hparams` argument in `AutoAugment` and `RandAugment` and some other improvement. ([#398](https://github.com/open-mmlab/mmclassification/pull/398)) +- Support classwise weight in losses. ([#388](https://github.com/open-mmlab/mmclassification/pull/388)) +- Enhance `SELayer` to support custom squeeze channels. ([#417](https://github.com/open-mmlab/mmclassification/pull/417)) + +### Code Refactor + +- Better result visualization. ([#419](https://github.com/open-mmlab/mmclassification/pull/419)) +- Use `post_process` function to handle pred result processing. ([#390](https://github.com/open-mmlab/mmclassification/pull/390)) +- Update `digit_version` function. ([#402](https://github.com/open-mmlab/mmclassification/pull/402)) +- Avoid albumentations to install both opencv and opencv-headless. ([#397](https://github.com/open-mmlab/mmclassification/pull/397)) +- Avoid unnecessary listdir when building ImageNet. ([#396](https://github.com/open-mmlab/mmclassification/pull/396)) +- Use dynamic mmcv download link in TorchServe dockerfile. ([#387](https://github.com/open-mmlab/mmclassification/pull/387)) + +### Docs Improvement + +- Add readme of some algorithms and update meta yml. ([#418](https://github.com/open-mmlab/mmclassification/pull/418)) +- Add Copyright information. ([#413](https://github.com/open-mmlab/mmclassification/pull/413)) +- Fix typo 'metirc'. ([#411](https://github.com/open-mmlab/mmclassification/pull/411)) +- Update QQ group QR code. ([#393](https://github.com/open-mmlab/mmclassification/pull/393)) +- Add PR template and modify issue template. ([#380](https://github.com/open-mmlab/mmclassification/pull/380)) + +## v0.14.0(4/8/2021) + +### Highlights + +- Add transformer-in-transformer backbone and pretrain checkpoints, refers to [the paper](https://arxiv.org/abs/2103.00112). +- Add Chinese colab tutorial. +- Provide dockerfile to build mmpretrain dev docker image. + +### New Features + +- Add transformer in transformer backbone and pretrain checkpoints. ([#339](https://github.com/open-mmlab/mmclassification/pull/339)) +- Support mim, welcome to use mim to manage your mmpretrain project. ([#376](https://github.com/open-mmlab/mmclassification/pull/376)) +- Add Dockerfile. ([#365](https://github.com/open-mmlab/mmclassification/pull/365)) +- Add ResNeSt configs. ([#332](https://github.com/open-mmlab/mmclassification/pull/332)) + +### Improvements + +- Use the `presistent_works` option if available, to accelerate training. ([#349](https://github.com/open-mmlab/mmclassification/pull/349)) +- Add Chinese ipynb tutorial. ([#306](https://github.com/open-mmlab/mmclassification/pull/306)) +- Refactor unit tests. ([#321](https://github.com/open-mmlab/mmclassification/pull/321)) +- Support to test mmdet inference with mmpretrain backbone. ([#343](https://github.com/open-mmlab/mmclassification/pull/343)) +- Use zero as default value of `thrs` in metrics. ([#341](https://github.com/open-mmlab/mmclassification/pull/341)) + +### Bug Fixes + +- Fix ImageNet dataset annotation file parse bug. ([#370](https://github.com/open-mmlab/mmclassification/pull/370)) +- Fix docstring typo and init bug in ShuffleNetV1. ([#374](https://github.com/open-mmlab/mmclassification/pull/374)) +- Use local ATTENTION registry to avoid conflict with other repositories. ([#376](https://github.com/open-mmlab/mmclassification/pull/375)) +- Fix swin transformer config bug. ([#355](https://github.com/open-mmlab/mmclassification/pull/355)) +- Fix `patch_cfg` argument bug in SwinTransformer. ([#368](https://github.com/open-mmlab/mmclassification/pull/368)) +- Fix duplicate `init_weights` call in ViT init function. ([#373](https://github.com/open-mmlab/mmclassification/pull/373)) +- Fix broken `_base_` link in a resnet config. ([#361](https://github.com/open-mmlab/mmclassification/pull/361)) +- Fix vgg-19 model link missing. ([#363](https://github.com/open-mmlab/mmclassification/pull/363)) + +## v0.13.0(3/7/2021) + +- Support Swin-Transformer backbone and add training configs for Swin-Transformer on ImageNet. + +### New Features + +- Support Swin-Transformer backbone and add training configs for Swin-Transformer on ImageNet. (#271) +- Add pretained model of RegNetX. (#269) +- Support adding custom hooks in config file. (#305) +- Improve and add Chinese translation of `CONTRIBUTING.md` and all tools tutorials. (#320) +- Dump config before training. (#282) +- Add torchscript and torchserve deployment tools. (#279, #284) + +### Improvements + +- Improve test tools and add some new tools. (#322) +- Correct MobilenetV3 backbone structure and add pretained models. (#291) +- Refactor `PatchEmbed` and `HybridEmbed` as independent components. (#330) +- Refactor mixup and cutmix as `Augments` to support more functions. (#278) +- Refactor weights initialization method. (#270, #318, #319) +- Refactor `LabelSmoothLoss` to support multiple calculation formulas. (#285) + +### Bug Fixes + +- Fix bug for CPU training. (#286) +- Fix missing test data when `num_imgs` can not be evenly divided by `num_gpus`. (#299) +- Fix build compatible with pytorch v1.3-1.5. (#301) +- Fix `magnitude_std` bug in `RandAugment`. (#309) +- Fix bug when `samples_per_gpu` is 1. (#311) + +## v0.12.0(3/6/2021) + +- Finish adding Chinese tutorials and build Chinese documentation on readthedocs. +- Update ResNeXt checkpoints and ResNet checkpoints on CIFAR. + +### New Features + +- Improve and add Chinese translation of `data_pipeline.md` and `new_modules.md`. (#265) +- Build Chinese translation on readthedocs. (#267) +- Add an argument efficientnet_style to `RandomResizedCrop` and `CenterCrop`. (#268) + +### Improvements + +- Only allow directory operation when rank==0 when testing. (#258) +- Fix typo in `base_head`. (#274) +- Update ResNeXt checkpoints. (#283) + +### Bug Fixes + +- Add attribute `data.test` in MNIST configs. (#264) +- Download CIFAR/MNIST dataset only on rank 0. (#273) +- Fix MMCV version compatibility. (#276) +- Fix CIFAR color channels bug and update checkpoints in model zoo. (#280) + +## v0.11.1(21/5/2021) + +- Refine `new_dataset.md` and add Chinese translation of `finture.md`, `new_dataset.md`. + +### New Features + +- Add `dim` argument for `GlobalAveragePooling`. (#236) +- Add random noise to `RandAugment` magnitude. (#240) +- Refine `new_dataset.md` and add Chinese translation of `finture.md`, `new_dataset.md`. (#243) + +### Improvements + +- Refactor arguments passing for Heads. (#239) +- Allow more flexible `magnitude_range` in `RandAugment`. (#249) +- Inherits MMCV registry so that in the future OpenMMLab repos like MMDet and MMSeg could directly use the backbones supported in MMCls. (#252) + +### Bug Fixes + +- Fix typo in `analyze_results.py`. (#237) +- Fix typo in unittests. (#238) +- Check if specified tmpdir exists when testing to avoid deleting existing data. (#242 & #258) +- Add missing config files in `MANIFEST.in`. (#250 & #255) +- Use temporary directory under shared directory to collect results to avoid unavailability of temporary directory for multi-node testing. (#251) + +## v0.11.0(1/5/2021) + +- Support cutmix trick. +- Support random augmentation. +- Add `tools/deployment/test.py` as a ONNX runtime test tool. +- Support ViT backbone and add training configs for ViT on ImageNet. +- Add Chinese `README.md` and some Chinese tutorials. + +### New Features + +- Support cutmix trick. (#198) +- Add `simplify` option in `pytorch2onnx.py`. (#200) +- Support random augmentation. (#201) +- Add config and checkpoint for training ResNet on CIFAR-100. (#208) +- Add `tools/deployment/test.py` as a ONNX runtime test tool. (#212) +- Support ViT backbone and add training configs for ViT on ImageNet. (#214) +- Add finetuning configs for ViT on ImageNet. (#217) +- Add `device` option to support training on CPU. (#219) +- Add Chinese `README.md` and some Chinese tutorials. (#221) +- Add `metafile.yml` in configs to support interaction with paper with code(PWC) and MMCLI. (#225) +- Upload configs and converted checkpoints for ViT fintuning on ImageNet. (#230) + +### Improvements + +- Fix `LabelSmoothLoss` so that label smoothing and mixup could be enabled at the same time. (#203) +- Add `cal_acc` option in `ClsHead`. (#206) +- Check `CLASSES` in checkpoint to avoid unexpected key error. (#207) +- Check mmcv version when importing mmpretrain to ensure compatibility. (#209) +- Update `CONTRIBUTING.md` to align with that in MMCV. (#210) +- Change tags to html comments in configs README.md. (#226) +- Clean codes in ViT backbone. (#227) +- Reformat `pytorch2onnx.md` tutorial. (#229) +- Update `setup.py` to support MMCLI. (#232) + +### Bug Fixes + +- Fix missing `cutmix_prob` in ViT configs. (#220) +- Fix backend for resize in ResNeXt configs. (#222) + +## v0.10.0(1/4/2021) + +- Support AutoAugmentation +- Add tutorials for installation and usage. + +### New Features + +- Add `Rotate` pipeline for data augmentation. (#167) +- Add `Invert` pipeline for data augmentation. (#168) +- Add `Color` pipeline for data augmentation. (#171) +- Add `Solarize` and `Posterize` pipeline for data augmentation. (#172) +- Support fp16 training. (#178) +- Add tutorials for installation and basic usage of MMClassification.(#176) +- Support `AutoAugmentation`, `AutoContrast`, `Equalize`, `Contrast`, `Brightness` and `Sharpness` pipelines for data augmentation. (#179) + +### Improvements + +- Support dynamic shape export to onnx. (#175) +- Release training configs and update model zoo for fp16 (#184) +- Use MMCV's EvalHook in MMClassification (#182) + +### Bug Fixes + +- Fix wrong naming in vgg config (#181) + +## v0.9.0(1/3/2021) + +- Implement mixup trick. +- Add a new tool to create TensorRT engine from ONNX, run inference and verify outputs in Python. + +### New Features + +- Implement mixup and provide configs of training ResNet50 using mixup. (#160) +- Add `Shear` pipeline for data augmentation. (#163) +- Add `Translate` pipeline for data augmentation. (#165) +- Add `tools/onnx2tensorrt.py` as a tool to create TensorRT engine from ONNX, run inference and verify outputs in Python. (#153) + +### Improvements + +- Add `--eval-options` in `tools/test.py` to support eval options override, matching the behavior of other open-mmlab projects. (#158) +- Support showing and saving painted results in `mmpretrain.apis.test` and `tools/test.py`, matching the behavior of other open-mmlab projects. (#162) + +### Bug Fixes + +- Fix configs for VGG, replace checkpoints converted from other repos with the ones trained by ourselves and upload the missing logs in the model zoo. (#161) + +## v0.8.0(31/1/2021) + +- Support multi-label task. +- Support more flexible metrics settings. +- Fix bugs. + +### New Features + +- Add evaluation metrics: mAP, CP, CR, CF1, OP, OR, OF1 for multi-label task. (#123) +- Add BCE loss for multi-label task. (#130) +- Add focal loss for multi-label task. (#131) +- Support PASCAL VOC 2007 dataset for multi-label task. (#134) +- Add asymmetric loss for multi-label task. (#132) +- Add analyze_results.py to select images for success/fail demonstration. (#142) +- Support new metric that calculates the total number of occurrences of each label. (#143) +- Support class-wise evaluation results. (#143) +- Add thresholds in eval_metrics. (#146) +- Add heads and a baseline config for multilabel task. (#145) + +### Improvements + +- Remove the models with 0 checkpoint and ignore the repeated papers when counting papers to gain more accurate model statistics. (#135) +- Add tags in README.md. (#137) +- Fix optional issues in docstring. (#138) +- Update stat.py to classify papers. (#139) +- Fix mismatched columns in README.md. (#150) +- Fix test.py to support more evaluation metrics. (#155) + +### Bug Fixes + +- Fix bug in VGG weight_init. (#140) +- Fix bug in 2 ResNet configs in which outdated heads were used. (#147) +- Fix bug of misordered height and width in `RandomCrop` and `RandomResizedCrop`. (#151) +- Fix missing `meta_keys` in `Collect`. (#149 & #152) + +## v0.7.0(31/12/2020) + +- Add more evaluation metrics. +- Fix bugs. + +### New Features + +- Remove installation of MMCV from requirements. (#90) +- Add 3 evaluation metrics: precision, recall and F-1 score. (#93) +- Allow config override during testing and inference with `--options`. (#91 & #96) + +### Improvements + +- Use `build_runner` to make runners more flexible. (#54) +- Support to get category ids in `BaseDataset`. (#72) +- Allow `CLASSES` override during `BaseDateset` initialization. (#85) +- Allow input image as ndarray during inference. (#87) +- Optimize MNIST config. (#98) +- Add config links in model zoo documentation. (#99) +- Use functions from MMCV to collect environment. (#103) +- Refactor config files so that they are now categorized by methods. (#116) +- Add README in config directory. (#117) +- Add model statistics. (#119) +- Refactor documentation in consistency with other MM repositories. (#126) + +### Bug Fixes + +- Add missing `CLASSES` argument to dataset wrappers. (#66) +- Fix slurm evaluation error during training. (#69) +- Resolve error caused by shape in `Accuracy`. (#104) +- Fix bug caused by extremely insufficient data in distributed sampler.(#108) +- Fix bug in `gpu_ids` in distributed training. (#107) +- Fix bug caused by extremely insufficient data in collect results during testing (#114) + +## v0.6.0(11/10/2020) + +- Support new method: ResNeSt and VGG. +- Support new dataset: CIFAR10. +- Provide new tools to do model inference, model conversion from pytorch to onnx. + +### New Features + +- Add model inference. (#16) +- Add pytorch2onnx. (#20) +- Add PIL backend for transform `Resize`. (#21) +- Add ResNeSt. (#25) +- Add VGG and its pretained models. (#27) +- Add CIFAR10 configs and models. (#38) +- Add albumentations transforms. (#45) +- Visualize results on image demo. (#58) + +### Improvements + +- Replace urlretrieve with urlopen in dataset.utils. (#13) +- Resize image according to its short edge. (#22) +- Update ShuffleNet config. (#31) +- Update pre-trained models for shufflenet_v2, shufflenet_v1, se-resnet50, se-resnet101. (#33) + +### Bug Fixes + +- Fix init_weights in `shufflenet_v2.py`. (#29) +- Fix the parameter `size` in test_pipeline. (#30) +- Fix the parameter in cosine lr schedule. (#32) +- Fix the convert tools for mobilenet_v2. (#34) +- Fix crash in CenterCrop transform when image is greyscale (#40) +- Fix outdated configs. (#53) diff --git a/docs/en/notes/contribution_guide.md b/docs/en/notes/contribution_guide.md new file mode 100644 index 0000000..c97564d --- /dev/null +++ b/docs/en/notes/contribution_guide.md @@ -0,0 +1 @@ +../../../CONTRIBUTING.md \ No newline at end of file diff --git a/docs/en/notes/faq.md b/docs/en/notes/faq.md new file mode 100644 index 0000000..da45841 --- /dev/null +++ b/docs/en/notes/faq.md @@ -0,0 +1,116 @@ +# Frequently Asked Questions + +We list some common troubles faced by many users and their corresponding +solutions here. Feel free to enrich the list if you find any frequent issues +and have ways to help others to solve them. If the contents here do not cover +your issue, please create an issue using the +[provided templates](https://github.com/open-mmlab/mmpretrain/issues/new/choose) +and make sure you fill in all required information in the template. + +## Installation + +- Compatibility issue between MMEngine, MMCV and MMPretrain + + Compatible MMPretrain and MMEngine, MMCV versions are shown as below. Please + choose the correct version of MMEngine and MMCV to avoid installation issues. + + | MMPretrain version | MMEngine version | MMCV version | + | :----------------: | :---------------: | :--------------: | + | 1.2.0 (main) | mmengine >= 0.8.3 | mmcv >= 2.0.0 | + | 1.1.1 | mmengine >= 0.8.3 | mmcv >= 2.0.0 | + | 1.0.0 | mmengine >= 0.8.0 | mmcv >= 2.0.0 | + | 1.0.0rc8 | mmengine >= 0.7.1 | mmcv >= 2.0.0rc4 | + | 1.0.0rc7 | mmengine >= 0.5.0 | mmcv >= 2.0.0rc4 | + + ```{note} + Since the `dev` branch is under frequent development, the MMEngine and MMCV + version dependency may be inaccurate. If you encounter problems when using + the `dev` branch, please try to update MMEngine and MMCV to the latest version. + ``` + +- Using Albumentations + + If you would like to use `albumentations`, we suggest using `pip install -r requirements/albu.txt` or + `pip install -U albumentations --no-binary qudida,albumentations`. + + If you simply use `pip install albumentations>=0.3.2`, it will install `opencv-python-headless` simultaneously + (even though you have already installed `opencv-python`). Please refer to the + [official documentation](https://albumentations.ai/docs/getting_started/installation/#note-on-opencv-dependencies) + for details. + +## General Questions + +### Do I need to reinstall mmpretrain after some code modifications? + +If you follow [the best practice](../get_started.md#best-practices) and install mmpretrain from source, +any local modifications made to the code will take effect without +reinstallation. + +### How to develop with multiple MMPretrain versions? + +Generally speaking, we recommend to use different virtual environments to +manage MMPretrain in different working directories. However, you +can also use the same environment to develop MMPretrain in different +folders, like mmpretrain-0.21, mmpretrain-0.23. When you run the train or test shell script, +it will adopt the mmpretrain package in the current folder. And when you run other Python +script, you can also add `` PYTHONPATH=`pwd` `` at the beginning of your command +to use the package in the current folder. + +Conversely, to use the default MMPretrain installed in the environment +rather than the one you are working with, you can remove the following line +in those shell scripts: + +```shell +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH +``` + +### What's the relationship between the `load_from` and the `init_cfg`? + +- `load_from`: If `resume=False`, only imports model weights, which is mainly used to load trained models; + If `resume=True`, load all of the model weights, optimizer state, and other training information, which is + mainly used to resume interrupted training. + +- `init_cfg`: You can also specify `init=dict(type="Pretrained", checkpoint=xxx)` to load checkpoint, it + means load the weights during model weights initialization. That is, it will be only done at the + beginning of the training. It's mainly used to fine-tune a pre-trained model, and you can set it in + the backbone config and use `prefix` field to only load backbone weights, for example: + +```python +model = dict( + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoints=xxx, prefix='backbone'), + ) + ... +) +``` + +See the [Fine-tune Models](./finetune_custom_dataset.md) for more details about fine-tuning. + +### What's the difference between `default_hooks` and `custom_hooks`? + +Almost no difference. Usually, the `default_hooks` field is used to specify the hooks that will be used in almost +all experiments, and the `custom_hooks` field is used in only some experiments. + +Another difference is the `default_hooks` is a dict while the `custom_hooks` is a list, please don't be +confused. + +### During training, I got no training log, what's the reason? + +If your training dataset is small while the batch size is large, our default log interval may be too large to +record your training log. + +You can shrink the log interval and try again, like: + +```python +default_hooks = dict( + ... + logger=dict(type='LoggerHook', interval=10), + ... +) +``` + +### How to train with other datasets, like my own dataset or COCO? + +We provide [specific examples](./pretrain_custom_dataset.md) to show how to train with other datasets. diff --git a/docs/en/notes/finetune_custom_dataset.md b/docs/en/notes/finetune_custom_dataset.md new file mode 100644 index 0000000..4000268 --- /dev/null +++ b/docs/en/notes/finetune_custom_dataset.md @@ -0,0 +1,340 @@ +# How to Fine-tune with Custom Dataset + +In most scenarios, we want to apply a pre-trained model without training from scratch, which might possibly introduce extra uncertainties about the model convergency and therefore, is time-consuming. +The common sense is to learn from previous models trained on large dataset, which can hopefully provide better knowledge than a random beginner. Roughly speaking, this process is as known as fine-tuning. + +Models pre-trained on the ImageNet dataset have been demonstrated to be effective for other datasets and other downstream tasks. +Hence, this tutorial provides instructions for users to use the models provided in the [Model Zoo](../modelzoo_statistics.md) for other datasets to obtain better performance. + +In this tutorial, we provide a practice example and some tips on how to fine-tune a model on your own dataset. + +## Step-1: Prepare your dataset + +Prepare your dataset following [Prepare Dataset](../user_guides/dataset_prepare.md). +And the root folder of the dataset can be like `data/custom_dataset/`. + +Here, we assume you want to do supervised image-classification training, and use the sub-folder format +`CustomDataset` to organize your dataset as: + +```text +data/custom_dataset/ +├── train +│   ├── class_x +│   │ ├── x_1.png +│ │ ├── x_2.png +│ │ ├── x_3.png +│ │ └── ... +│ ├── class_y +│   └── ... +└── test +    ├── class_x +    │ ├── test_x_1.png + │ ├── test_x_2.png + │ ├── test_x_3.png + │ └── ... + ├── class_y +    └── ... +``` + +## Step-2: Choose one config as template + +Here, we would like to use `configs/resnet/resnet50_8xb32_in1k.py` as the example. We first copy this config +file to the same folder and rename it as `resnet50_8xb32-ft_custom.py`. + +```{tip} +As a convention, the last field of the config name is the dataset, e.g.,`in1k` for ImageNet dataset, `coco` for COCO dataset +``` + +The content of this config is: + +```python +_base_ = [ + '../_base_/models/resnet50.py', # model settings + '../_base_/datasets/imagenet_bs32.py', # data settings + '../_base_/schedules/imagenet_bs256.py', # schedule settings + '../_base_/default_runtime.py', # runtime settings +] +``` + +## Step-3: Edit the model settings + +When fine-tuning a model, usually we want to load the pre-trained backbone +weights and train a new classification head from scratch. + +To load the pre-trained backbone, we need to change the initialization config +of the backbone and use `Pretrained` initialization function. Besides, in the +`init_cfg`, we use `prefix='backbone'` to tell the initialization function +the prefix of the submodule that needs to be loaded in the checkpoint. + +For example, `backbone` here means to load the backbone submodule. And here we +use an online checkpoint, it will be downloaded automatically during training, +you can also download the model manually and use a local path. +And then we need to modify the head according to the class numbers of the new +datasets by just changing `num_classes` in the head. + +When new dataset is small and shares the domain with the pre-trained dataset, +we might want to freeze the first several stages' parameters of the +backbone, that will help the network to keep ability to extract low-level +information learnt from pre-trained model. In MMPretrain, you can simply +specify how many stages to freeze by `frozen_stages` argument. For example, to +freeze the first two stages' parameters, just use the following configs: + +```{note} +Not all backbones support the `frozen_stages` argument by now. Please check +[the docs](https://mmpretrain.readthedocs.io/en/latest/api.html#module-mmpretrain.models.backbones) +to confirm if your backbone supports it. +``` + +```python +_base_ = [ + '../_base_/models/resnet50.py', # model settings + '../_base_/datasets/imagenet_bs32.py', # data settings + '../_base_/schedules/imagenet_bs256.py', # schedule settings + '../_base_/default_runtime.py', # runtime settings +] + +# >>>>>>>>>>>>>>> Override model settings here >>>>>>>>>>>>>>>>>>> +model = dict( + backbone=dict( + frozen_stages=2, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth', + prefix='backbone', + )), + head=dict(num_classes=10), +) +# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< +``` + +```{tip} +Here we only need to set the part of configs we want to modify, because the +inherited configs will be merged and get the entire configs. +``` + +## Step-4: Edit the dataset settings + +To fine-tuning on a new dataset, we need to override some dataset settings, like the type of dataset, data +pipeline, etc. + +```python +_base_ = [ + '../_base_/models/resnet50.py', # model settings + '../_base_/datasets/imagenet_bs32.py', # data settings + '../_base_/schedules/imagenet_bs256.py', # schedule settings + '../_base_/default_runtime.py', # runtime settings +] + +# model settings +model = dict( + backbone=dict( + frozen_stages=2, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth', + prefix='backbone', + )), + head=dict(num_classes=10), +) + +# >>>>>>>>>>>>>>> Override data settings here >>>>>>>>>>>>>>>>>>> +data_root = 'data/custom_dataset' +train_dataloader = dict( + dataset=dict( + type='CustomDataset', + data_root=data_root, + ann_file='', # We assume you are using the sub-folder format without ann_file + data_prefix='train', + )) +val_dataloader = dict( + dataset=dict( + type='CustomDataset', + data_root=data_root, + ann_file='', # We assume you are using the sub-folder format without ann_file + data_prefix='test', + )) +test_dataloader = val_dataloader +# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< +``` + +## Step-5: Edit the schedule settings (optional) + +The fine-tuning hyper parameters vary from the default schedule. It usually +requires smaller learning rate and quicker decaying scheduler epochs. + +```python +_base_ = [ + '../_base_/models/resnet50.py', # model settings + '../_base_/datasets/imagenet_bs32.py', # data settings + '../_base_/schedules/imagenet_bs256.py', # schedule settings + '../_base_/default_runtime.py', # runtime settings +] + +# model settings +model = dict( + backbone=dict( + frozen_stages=2, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth', + prefix='backbone', + )), + head=dict(num_classes=10), +) + +# data settings +data_root = 'data/custom_dataset' +train_dataloader = dict( + dataset=dict( + type='CustomDataset', + data_root=data_root, + ann_file='', # We assume you are using the sub-folder format without ann_file + data_prefix='train', + )) +val_dataloader = dict( + dataset=dict( + type='CustomDataset', + data_root=data_root, + ann_file='', # We assume you are using the sub-folder format without ann_file + data_prefix='test', + )) +test_dataloader = val_dataloader + +# >>>>>>>>>>>>>>> Override schedule settings here >>>>>>>>>>>>>>>>>>> +# optimizer hyper-parameters +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) +# learning policy +param_scheduler = dict( + type='MultiStepLR', by_epoch=True, milestones=[15], gamma=0.1) +# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< +``` + +```{tip} +Refers to [Learn about Configs](../user_guides/config.md) for more detailed configurations. +``` + +## Start Training + +Now, we have finished the fine-tuning config file as following: + +```python +_base_ = [ + '../_base_/models/resnet50.py', # model settings + '../_base_/datasets/imagenet_bs32.py', # data settings + '../_base_/schedules/imagenet_bs256.py', # schedule settings + '../_base_/default_runtime.py', # runtime settings +] + +# model settings +model = dict( + backbone=dict( + frozen_stages=2, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth', + prefix='backbone', + )), + head=dict(num_classes=10), +) + +# data settings +data_root = 'data/custom_dataset' +train_dataloader = dict( + dataset=dict( + type='CustomDataset', + data_root=data_root, + ann_file='', # We assume you are using the sub-folder format without ann_file + data_prefix='train', + )) +val_dataloader = dict( + dataset=dict( + type='CustomDataset', + data_root=data_root, + ann_file='', # We assume you are using the sub-folder format without ann_file + data_prefix='test', + )) +test_dataloader = val_dataloader + +# schedule settings +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) +param_scheduler = dict( + type='MultiStepLR', by_epoch=True, milestones=[15], gamma=0.1) +``` + +Here we use 8 GPUs on your computer to train the model with the following command: + +```shell +bash tools/dist_train.sh configs/resnet/resnet50_8xb32-ft_custom.py 8 +``` + +Also, you can use only one GPU to train the model with the following command: + +```shell +python tools/train.py configs/resnet/resnet50_8xb32-ft_custom.py +``` + +But wait, an important config need to be changed if using one GPU. We need to +change the dataset config as following: + +```python +data_root = 'data/custom_dataset' +train_dataloader = dict( + batch_size=256, + dataset=dict( + type='CustomDataset', + data_root=data_root, + ann_file='', # We assume you are using the sub-folder format without ann_file + data_prefix='train', + )) +val_dataloader = dict( + dataset=dict( + type='CustomDataset', + data_root=data_root, + ann_file='', # We assume you are using the sub-folder format without ann_file + data_prefix='test', + )) +test_dataloader = val_dataloader +``` + +It's because our training schedule is for a batch size of 256. If using 8 GPUs, +just use `batch_size=32` config in the base config file for every GPU, and the total batch +size will be 256. But if using one GPU, you need to change it to 256 manually to +match the training schedule. + +However, a larger batch size requires a larger GPU memory, and here are several simple tricks to save the GPU +memory: + +1. Enable Automatic-Mixed-Precision training. + + ```shell + python tools/train.py configs/resnet/resnet50_8xb32-ft_custom.py --amp + ``` + +2. Use a smaller batch size, like `batch_size=32` instead of 256, and enable the auto learning rate scaling. + + ```shell + python tools/train.py configs/resnet/resnet50_8xb32-ft_custom.py --auto-scale-lr + ``` + + The auto learning rate scaling will adjust the learning rate according to the actual batch size and the + `auto_scale_lr.base_batch_size` (You can find it in the base config + `configs/_base_/schedules/imagenet_bs256.py`) + +```{note} +Most of these tricks may influence the training performance slightly. +``` + +### Apply pre-trained model with command line + +If you don't want to modify the configs, you could use `--cfg-options` to add your pre-trained model path to `init_cfg`. + +For example, the command below will also load pre-trained model. + +```shell +bash tools/dist_train.sh configs/resnet/resnet50_8xb32-ft_custom.py 8 \ + --cfg-options model.backbone.init_cfg.type='Pretrained' \ + model.backbone.init_cfg.checkpoint='https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_resnet50_8xb512-amp-coslr-100e_in1k/mocov3_resnet50_8xb512-amp-coslr-100e_in1k_20220927-f1144efa.pth' \ + model.backbone.init_cfg.prefix='backbone' \ +``` diff --git a/docs/en/notes/pretrain_custom_dataset.md b/docs/en/notes/pretrain_custom_dataset.md new file mode 100644 index 0000000..c9e5837 --- /dev/null +++ b/docs/en/notes/pretrain_custom_dataset.md @@ -0,0 +1,255 @@ +# How to Pretrain with Custom Dataset + +In this tutorial, we provide a practice example and some tips on how to train on your own dataset. + +In MMPretrain, We support the `CustomDataset` (similar to the `ImageFolder` in `torchvision`), which is able to read the images within the specified folder directly. You only need to prepare the path information of the custom dataset and edit the config. + +## Step-1: Prepare your dataset + +Prepare your dataset following [Prepare Dataset](../user_guides/dataset_prepare.md). +And the root folder of the dataset can be like `data/custom_dataset/`. + +Here, we assume you want to do unsupervised training, and use the sub-folder format `CustomDataset` to +organize your dataset as: + +```text +data/custom_dataset/ +├── sample1.png +├── sample2.png +├── sample3.png +├── sample4.png +└── ... +``` + +## Step-2: Choose one config as template + +Here, we would like to use `configs/mae/mae_vit-base-p16_8xb512-amp-coslr-300e_in1k.py` as the example. We +first copy this config file to the same folder and rename it as +`mae_vit-base-p16_8xb512-amp-coslr-300e_custom.py`. + +```{tip} +As a convention, the last field of the config name is the dataset, e.g.,`in1k` for ImageNet dataset, `coco` for COCO dataset +``` + +The content of this config is: + +```python +_base_ = [ + '../_base_/models/mae_vit-base-p16.py', + '../_base_/datasets/imagenet_bs512_mae.py', + '../_base_/default_runtime.py', +] + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='AdamW', + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'ln': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.0001, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=260, + by_epoch=True, + begin=40, + end=300, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=300) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +# auto resume +resume = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) +``` + +## Step-3: Edit the dataset related config + +- Override the `type` of dataset settings as `'CustomDataset'` +- Override the `data_root` of dataset settings as `data/custom_dataset`. +- Override the `ann_file` of dataset settings as an empty string since we assume you are using the sub-folder + format `CustomDataset`. +- Override the `data_prefix` of dataset settings as an empty string since we are using the whole dataset under + the `data_root`, and you don't need to split samples into different subset and set the `data_prefix`. + +The modified config will be like: + +```python +_base_ = [ + '../_base_/models/mae_vit-base-p16.py', + '../_base_/datasets/imagenet_bs512_mae.py', + '../_base_/default_runtime.py', +] + +# >>>>>>>>>>>>>>> Override dataset settings here >>>>>>>>>>>>>>>>>>> +train_dataloader = dict( + dataset=dict( + type='CustomDataset', + data_root='data/custom_dataset/', + ann_file='', # We assume you are using the sub-folder format without ann_file + data_prefix='', # The `data_root` is the data_prefix directly. + with_label=False, + ) +) +# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='AdamW', + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'ln': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.0001, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=260, + by_epoch=True, + begin=40, + end=300, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=300) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +# auto resume +resume = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) +``` + +By using the edited config file, you are able to train a self-supervised model with MAE algorithm on the custom dataset. + +## Another example: Train MAE on COCO Dataset + +```{note} +You need to install MMDetection to use the `mmdet.CocoDataset` follow this [documentation](https://github.com/open-mmlab/mmdetection/blob/3.x/docs/en/get_started.md) +``` + +Follow the aforementioned idea, we also present an example of how to train MAE on COCO dataset. The edited file will be like this: + +```python +_base_ = [ + '../_base_/models/mae_vit-base-p16.py', + '../_base_/datasets/imagenet_mae.py', + '../_base_/default_runtime.py', +] + +# >>>>>>>>>>>>>>> Override dataset settings here >>>>>>>>>>>>>>>>>>> +train_dataloader = dict( + dataset=dict( + type='mmdet.CocoDataset', + data_root='data/coco/', + ann_file='annotations/instances_train2017.json', # Only for loading images, and the labels won't be used. + data_prefix=dict(img='train2017/'), + ) +) +# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='AdamW', + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'ln': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.0001, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=260, + by_epoch=True, + begin=40, + end=300, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=300) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +# auto resume +resume = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) +``` diff --git a/docs/en/notes/projects.md b/docs/en/notes/projects.md new file mode 100644 index 0000000..d6b6254 --- /dev/null +++ b/docs/en/notes/projects.md @@ -0,0 +1,21 @@ +# Projects based on MMPretrain + +There are many projects built upon MMPretrain(MMClassification previsously). +We list some of them as examples of how to extend MMPretrain(MMClassification previsously) for your own projects. +As the page might not be completed, please feel free to create a PR to update this page. + +## Projects as an extension + +- [OpenMixup](https://github.com/Westlake-AI/openmixup): an open-source toolbox for supervised, self-, and semi-supervised visual representation learning with mixup based on PyTorch, especially for mixup-related methods. +- [AI Power](https://github.com/ykk648/AI_power): AI toolbox and pretrain models. +- [OpenBioSeq](https://github.com/Westlake-AI/OpenBioSeq): an open-source supervised and self-supervised bio-sequence representation learning toolbox based on PyTorch. + +## Projects of papers + +There are also projects released with papers. +Some of the papers are published in top-tier conferences (CVPR, ICCV, and ECCV), the others are also highly influential. +To make this list also a reference for the community to develop and compare new image classification algorithms, we list them following the time order of top-tier conferences. +Methods already supported and maintained by MMPretrain(MMClassification previsously) are not listed. + +- Involution: Inverting the Inherence of Convolution for Visual Recognition, CVPR21. [[paper]](https://arxiv.org/abs/2103.06255)[[github]](https://github.com/d-li14/involution) +- Convolution of Convolution: Let Kernels Spatially Collaborate, CVPR22. [[paper]](https://openaccess.thecvf.com/content/CVPR2022/papers/Zhao_Convolution_of_Convolution_Let_Kernels_Spatially_Collaborate_CVPR_2022_paper.pdf)[[github]](https://github.com/Genera1Z/ConvolutionOfConvolution) diff --git a/docs/en/stat.py b/docs/en/stat.py new file mode 100644 index 0000000..2d74823 --- /dev/null +++ b/docs/en/stat.py @@ -0,0 +1,249 @@ +#!/usr/bin/env python +import re +import warnings +from collections import defaultdict +from pathlib import Path + +from modelindex.load_model_index import load +from modelindex.models.Result import Result +from tabulate import tabulate + +MMPT_ROOT = Path(__file__).absolute().parents[2] +PAPERS_ROOT = Path('papers') # Path to save generated paper pages. +GITHUB_PREFIX = 'https://github.com/open-mmlab/mmpretrain/blob/main/' +MODELZOO_TEMPLATE = """\ +# Model Zoo Summary + +In this page, we list [all algorithms](#all-supported-algorithms) we support. You can click the link to jump to the corresponding model pages. + +And we also list all checkpoints for different tasks we provide. You can sort or search checkpoints in the table and click the corresponding link to model pages for more details. + +## All supported algorithms + +* Number of papers: {num_papers} +{type_msg} + +* Number of checkpoints: {num_ckpts} +{paper_msg} + +""" # noqa: E501 + +METRIC_ALIAS = { + 'Top 1 Accuracy': 'Top-1 (%)', + 'Top 5 Accuracy': 'Top-5 (%)', +} + +model_index = load(str(MMPT_ROOT / 'model-index.yml')) + + +def build_collections(model_index): + col_by_name = {} + for col in model_index.collections: + setattr(col, 'models', []) + col_by_name[col.name] = col + + for model in model_index.models: + col = col_by_name[model.in_collection] + col.models.append(model) + setattr(model, 'collection', col) + if model.results is None: + setattr(model, 'tasks', []) + else: + setattr(model, 'tasks', [result.task for result in model.results]) + + +build_collections(model_index) + + +def count_papers(collections): + total_num_ckpts = 0 + type_count = defaultdict(int) + paper_msgs = [] + + for collection in collections: + with open(MMPT_ROOT / collection.readme) as f: + readme = f.read() + ckpts = set(x.lower().strip() + for x in re.findall(r'\[model\]\((https?.*)\)', readme)) + total_num_ckpts += len(ckpts) + title = collection.paper['Title'] + papertype = collection.data.get('type', 'Algorithm') + type_count[papertype] += 1 + + readme = PAPERS_ROOT / Path( + collection.filepath).parent.with_suffix('.md').name + paper_msgs.append( + f'\t- [{papertype}] [{title}]({readme}) ({len(ckpts)} ckpts)') + + type_msg = '\n'.join( + [f'\t- {type_}: {count}' for type_, count in type_count.items()]) + paper_msg = '\n'.join(paper_msgs) + + modelzoo = MODELZOO_TEMPLATE.format( + num_papers=len(collections), + num_ckpts=total_num_ckpts, + type_msg=type_msg, + paper_msg=paper_msg, + ) + + with open('modelzoo_statistics.md', 'w') as f: + f.write(modelzoo) + + +count_papers(model_index.collections) + + +def generate_paper_page(collection): + PAPERS_ROOT.mkdir(exist_ok=True) + + # Write a copy of README + with open(MMPT_ROOT / collection.readme) as f: + readme = f.read() + folder = Path(collection.filepath).parent + copy = PAPERS_ROOT / folder.with_suffix('.md').name + + def replace_link(matchobj): + # Replace relative link to GitHub link. + name = matchobj.group(1) + link = matchobj.group(2) + if not link.startswith('http'): + assert (folder / link).exists(), \ + f'Link not found:\n{collection.readme}: {link}' + rel_link = (folder / link).absolute().relative_to(MMPT_ROOT) + link = GITHUB_PREFIX + str(rel_link) + return f'[{name}]({link})' + + content = re.sub(r'\[([^\]]+)\]\(([^)]+)\)', replace_link, readme) + content = f'---\ngithub_page: /{collection.readme}\n---\n' + content + + def make_tabs(matchobj): + """modify the format from emphasis black symbol to tabs.""" + content = matchobj.group() + content = content.replace('', '') + content = content.replace('', '') + + # split the content by "**{Tab-Name}**"" + splits = re.split(r'^\*\*(.*)\*\*$', content, flags=re.M)[1:] + tabs_list = [] + for title, tab_content in zip(splits[::2], splits[1::2]): + title = ':::{tab} ' + title + '\n' + tab_content = tab_content.strip() + '\n:::\n' + tabs_list.append(title + tab_content) + + return '::::{tabs}\n' + ''.join(tabs_list) + '::::' + + if '' in content and '' in content: + # Make TABS block a selctive tabs + try: + pattern = r'([\d\D]*?)' + content = re.sub(pattern, make_tabs, content) + except Exception as e: + warnings.warn(f'Can not parse the TABS, get an error : {e}') + + with open(copy, 'w') as copy_file: + copy_file.write(content) + + +for collection in model_index.collections: + generate_paper_page(collection) + + +def scatter_results(models): + model_result_pairs = [] + for model in models: + if model.results is None: + result = Result(task=None, dataset=None, metrics={}) + model_result_pairs.append((model, result)) + else: + for result in model.results: + model_result_pairs.append((model, result)) + return model_result_pairs + + +def generate_summary_table(task, model_result_pairs, title=None): + metrics = set() + for model, result in model_result_pairs: + if result.task == task: + metrics = metrics.union(result.metrics.keys()) + metrics = sorted(list(metrics)) + + rows = [] + for model, result in model_result_pairs: + if result.task != task: + continue + name = model.name + params = f'{model.metadata.parameters / 1e6:.2f}' # Params + if model.metadata.flops is not None: + flops = f'{model.metadata.flops / 1e9:.2f}' # Flops + else: + flops = None + readme = Path(model.collection.filepath).parent.with_suffix('.md').name + page = f'[link]({PAPERS_ROOT / readme})' + model_metrics = [] + for metric in metrics: + model_metrics.append(str(result.metrics.get(metric, ''))) + + rows.append([name, params, flops, *model_metrics, page]) + + with open('modelzoo_statistics.md', 'a') as f: + if title is not None: + f.write(f'\n{title}') + f.write("""\n```{table}\n:class: model-summary\n""") + header = [ + 'Model', + 'Params (M)', + 'Flops (G)', + *[METRIC_ALIAS.get(metric, metric) for metric in metrics], + 'Readme', + ] + table_cfg = dict( + tablefmt='pipe', + floatfmt='.2f', + numalign='right', + stralign='center') + f.write(tabulate(rows, header, **table_cfg)) + f.write('\n```\n') + + +def generate_dataset_wise_table(task, model_result_pairs, title=None): + dataset_rows = defaultdict(list) + for model, result in model_result_pairs: + if result.task == task: + dataset_rows[result.dataset].append((model, result)) + + if title is not None: + with open('modelzoo_statistics.md', 'a') as f: + f.write(f'\n{title}') + for dataset, pairs in dataset_rows.items(): + generate_summary_table(task, pairs, title=f'### {dataset}') + + +model_result_pairs = scatter_results(model_index.models) + +# Generate Pretrain Summary +generate_summary_table( + task=None, + model_result_pairs=model_result_pairs, + title='## Pretrained Models', +) + +# Generate Image Classification Summary +generate_dataset_wise_table( + task='Image Classification', + model_result_pairs=model_result_pairs, + title='## Image Classification', +) + +# Generate Multi-Label Classification Summary +generate_dataset_wise_table( + task='Multi-Label Classification', + model_result_pairs=model_result_pairs, + title='## Multi-Label Classification', +) + +# Generate Image Retrieval Summary +generate_dataset_wise_table( + task='Image Retrieval', + model_result_pairs=model_result_pairs, + title='## Image Retrieval', +) diff --git a/docs/en/useful_tools/cam_visualization.md b/docs/en/useful_tools/cam_visualization.md new file mode 100644 index 0000000..023e37a --- /dev/null +++ b/docs/en/useful_tools/cam_visualization.md @@ -0,0 +1,164 @@ +# Class Activation Map (CAM) Visualization + +## Introduction of the CAM visualization tool + +MMPretrain provides `tools/visualization/vis_cam.py` tool to visualize class activation map. Please use `pip install "grad-cam>=1.3.6"` command to install [pytorch-grad-cam](https://github.com/jacobgil/pytorch-grad-cam). + +The supported methods are as follows: + +| Method | What it does | +| ------------ | ---------------------------------------------------------------------------------------------------------------------------- | +| GradCAM | Weight the 2D activations by the average gradient | +| GradCAM++ | Like GradCAM but uses second order gradients | +| XGradCAM | Like GradCAM but scale the gradients by the normalized activations | +| EigenCAM | Takes the first principle component of the 2D Activations (no class discrimination, but seems to give great results) | +| EigenGradCAM | Like EigenCAM but with class discrimination: First principle component of Activations\*Grad. Looks like GradCAM, but cleaner | +| LayerCAM | Spatially weight the activations by positive gradients. Works better especially in lower layers | + +More CAM methods supported by the new version `pytorch-grad-cam` can also be used but we haven't verified the availability. + +**Command**: + +```bash +python tools/visualization/vis_cam.py \ + ${IMG} \ + ${CONFIG_FILE} \ + ${CHECKPOINT} \ + [--target-layers ${TARGET-LAYERS}] \ + [--preview-model] \ + [--method ${METHOD}] \ + [--target-category ${TARGET-CATEGORY}] \ + [--save-path ${SAVE_PATH}] \ + [--vit-like] \ + [--num-extra-tokens ${NUM-EXTRA-TOKENS}] + [--aug_smooth] \ + [--eigen_smooth] \ + [--device ${DEVICE}] \ + [--cfg-options ${CFG-OPTIONS}] +``` + +**Description of all arguments**: + +- `img`: The target picture path. +- `config`: The path of the model config file. +- `checkpoint`: The path of the checkpoint. +- `--target-layers`: The target layers to get activation maps, one or more network layers can be specified. If not set, use the norm layer of the last block. +- `--preview-model`: Whether to print all network layer names in the model. +- `--method`: Visualization method, supports `GradCAM`, `GradCAM++`, `XGradCAM`, `EigenCAM`, `EigenGradCAM`, `LayerCAM`, which is case insensitive. Defaults to `GradCAM`. +- `--target-category`: Target category, if not set, use the category detected by the given model. +- `--eigen-smooth`: Whether to use the principal component to reduce noise. +- `--aug-smooth`: Whether to use TTA(Test Time Augment) to get CAM. +- `--save-path`: The path to save the CAM visualization image. If not set, the CAM image will not be saved. +- `--vit-like`: Whether the network is ViT-like network. +- `--num-extra-tokens`: The number of extra tokens in ViT-like backbones. If not set, use num_extra_tokens the backbone. +- `--device`: The computing device used. Default to 'cpu'. +- `--cfg-options`: Modifications to the configuration file, refer to [Learn about Configs](../user_guides/config.md). + +```{note} +The argument `--preview-model` can view all network layers names in the given model. It will be helpful if you know nothing about the model layers when setting `--target-layers`. +``` + +## How to visualize the CAM of CNN (ResNet-50) + +Here are some examples of `target-layers` in ResNet-50, which can be any module or layer: + +- `'backbone.layer4'` means the output of the forth ResLayer. +- `'backbone.layer4.2'` means the output of the third BottleNeck block in the forth ResLayer. +- `'backbone.layer4.2.conv1'` means the output of the `conv1` layer in above BottleNeck block. + +1. Use different methods to visualize CAM for `ResNet50`, the `target-category` is the predicted result by the given checkpoint, using the default `target-layers`. + + ```shell + python tools/visualization/vis_cam.py \ + demo/bird.JPEG \ + configs/resnet/resnet50_8xb32_in1k.py \ + https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_batch256_imagenet_20200708-cfb998bf.pth \ + --method GradCAM + # GradCAM++, XGradCAM, EigenCAM, EigenGradCAM, LayerCAM + ``` + + | Image | GradCAM | GradCAM++ | EigenGradCAM | LayerCAM | + | ------------------------------------ | --------------------------------------- | ----------------------------------------- | -------------------------------------------- | ---------------------------------------- | + |
|
|
|
|
| + +2. Use different `target-category` to get CAM from the same picture. In `ImageNet` dataset, the category 238 is 'Greater Swiss Mountain dog', the category 281 is 'tabby, tabby cat'. + + ```shell + python tools/visualization/vis_cam.py \ + demo/cat-dog.png configs/resnet/resnet50_8xb32_in1k.py \ + https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_batch256_imagenet_20200708-cfb998bf.pth \ + --target-layers 'backbone.layer4.2' \ + --method GradCAM \ + --target-category 238 + # --target-category 281 + ``` + + | Category | Image | GradCAM | XGradCAM | LayerCAM | + | -------- | ---------------------------------------------- | ------------------------------------------------ | ------------------------------------------------- | ------------------------------------------------- | + | Dog |
|
|
|
| + | Cat |
|
|
|
| + +3. Use `--eigen-smooth` and `--aug-smooth` to improve visual effects. + + ```shell + python tools/visualization/vis_cam.py \ + demo/dog.jpg \ + configs/mobilenet_v3/mobilenet-v3-large_8xb128_in1k.py \ + https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/convert/mobilenet_v3_large-3ea3c186.pth \ + --target-layers 'backbone.layer16' \ + --method LayerCAM \ + --eigen-smooth --aug-smooth + ``` + + | Image | LayerCAM | eigen-smooth | aug-smooth | eigen&aug | + | ------------------------------------ | --------------------------------------- | ------------------------------------------- | ----------------------------------------- | ----------------------------------------- | + |
|
|
|
|
| + +## How to visualize the CAM of vision transformer + +Here are some examples: + +- `'backbone.norm3'` for Swin-Transformer; +- `'backbone.layers.11.ln1'` for ViT; + +For ViT-like networks, such as ViT, T2T-ViT and Swin-Transformer, the features are flattened. And for drawing the CAM, we need to specify the `--vit-like` argument to reshape the features into square feature maps. + +Besides the flattened features, some ViT-like networks also add extra tokens like the class token in ViT and T2T-ViT, and the distillation token in DeiT. In these networks, the final classification is done on the tokens computed in the last attention block, and therefore, the classification score will not be affected by other features and the gradient of the classification score with respect to them, will be zero. Therefore, you shouldn't use the output of the last attention block as the target layer in these networks. + +To exclude these extra tokens, we need know the number of extra tokens. Almost all transformer-based backbones in MMPretrain have the `num_extra_tokens` attribute. If you want to use this tool in a new or third-party network that don't have the `num_extra_tokens` attribute, please specify it the `--num-extra-tokens` argument. + +1. Visualize CAM for `Swin Transformer`, using default `target-layers`: + + ```shell + python tools/visualization/vis_cam.py \ + demo/bird.JPEG \ + configs/swin_transformer/swin-tiny_16xb64_in1k.py \ + https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_tiny_224_b16x64_300e_imagenet_20210616_090925-66df6be6.pth \ + --vit-like + ``` + +2. Visualize CAM for `Vision Transformer(ViT)`: + + ```shell + python tools/visualization/vis_cam.py \ + demo/bird.JPEG \ + configs/vision_transformer/vit-base-p16_64xb64_in1k-384px.py \ + https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-98e8652b.pth \ + --vit-like \ + --target-layers 'backbone.layers.11.ln1' + ``` + +3. Visualize CAM for `T2T-ViT`: + + ```shell + python tools/visualization/vis_cam.py \ + demo/bird.JPEG \ + configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py \ + https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-14_3rdparty_8xb64_in1k_20210928-b7c09b62.pth \ + --vit-like \ + --target-layers 'backbone.encoder.12.ln1' + ``` + +| Image | ResNet50 | ViT | Swin | T2T-ViT | +| --------------------------------------- | ------------------------------------------ | -------------------------------------- | --------------------------------------- | ------------------------------------------ | +|
|
|
|
|
| diff --git a/docs/en/useful_tools/complexity_analysis.md b/docs/en/useful_tools/complexity_analysis.md new file mode 100644 index 0000000..ac6d133 --- /dev/null +++ b/docs/en/useful_tools/complexity_analysis.md @@ -0,0 +1,77 @@ +# Model Complexity Analysis + +## Get the FLOPs and params (experimental) + +We provide a script adapted from [MMEngine](https://github.com/open-mmlab/mmengine/blob/main/mmengine/analysis/complexity_analysis.py) to compute the FLOPs and params of a given model. + +```shell +python tools/analysis_tools/get_flops.py ${CONFIG_FILE} [--shape ${INPUT_SHAPE}] +``` + +Description of all arguments: + +- `config`: The path of the model config file. +- `--shape`: Input size, support single value or double value parameter, such as `--shape 256` or `--shape 224 256`. If not set, default to be `224 224`. + +Example: + +```shell +python tools/analysis_tools/get_flops.py configs/resnet/resnet50_8xb32_in1k.py +``` + +You will get the final result like this. + +```text +============================== +Input shape: (3, 224, 224) +Flops: 4.109G +Params: 25.557M +Activation: 11.114M +============================== +``` + +Also, you will get the detailed complexity information of each layer like this: + +```text ++--------------------------+----------------------+-----------+--------------+ +| module | #parameters or shape | #flops | #activations | ++--------------------------+----------------------+-----------+--------------+ +| model | 25.557M | 4.109G | 11.114M | +| backbone | 23.508M | 4.109G | 11.114M | +| backbone.conv1 | 9.408K | 0.118G | 0.803M | +| backbone.conv1.weight | (64, 3, 7, 7) | | | +| backbone.bn1 | 0.128K | 1.606M | 0 | +| backbone.bn1.weight | (64,) | | | +| backbone.bn1.bias | (64,) | | | +| backbone.layer1 | 0.216M | 0.677G | 4.415M | +| backbone.layer1.0 | 75.008K | 0.235G | 2.007M | +| backbone.layer1.1 | 70.4K | 0.221G | 1.204M | +| backbone.layer1.2 | 70.4K | 0.221G | 1.204M | +| backbone.layer2 | 1.22M | 1.034G | 3.111M | +| backbone.layer2.0 | 0.379M | 0.375G | 1.305M | +| backbone.layer2.1 | 0.28M | 0.22G | 0.602M | +| backbone.layer2.2 | 0.28M | 0.22G | 0.602M | +| backbone.layer2.3 | 0.28M | 0.22G | 0.602M | +| backbone.layer3 | 7.098M | 1.469G | 2.158M | +| backbone.layer3.0 | 1.512M | 0.374G | 0.652M | +| backbone.layer3.1 | 1.117M | 0.219G | 0.301M | +| backbone.layer3.2 | 1.117M | 0.219G | 0.301M | +| backbone.layer3.3 | 1.117M | 0.219G | 0.301M | +| backbone.layer3.4 | 1.117M | 0.219G | 0.301M | +| backbone.layer3.5 | 1.117M | 0.219G | 0.301M | +| backbone.layer4 | 14.965M | 0.81G | 0.627M | +| backbone.layer4.0 | 6.04M | 0.373G | 0.326M | +| backbone.layer4.1 | 4.463M | 0.219G | 0.151M | +| backbone.layer4.2 | 4.463M | 0.219G | 0.151M | +| head.fc | 2.049M | | | +| head.fc.weight | (1000, 2048) | | | +| head.fc.bias | (1000,) | | | +| neck.gap | | 0.1M | 0 | ++--------------------------+----------------------+-----------+--------------+ +``` + +```{warning} +This tool is still experimental and we do not guarantee that the number is correct. You may well use the result for simple comparisons, but double-check it before you adopt it in technical reports or papers. +- FLOPs are related to the input shape while parameters are not. The default input shape is (1, 3, 224, 224). +- Some operators are not counted into FLOPs like custom operators. Refer to [`mmengine.analysis.complexity_analysis._DEFAULT_SUPPORTED_FLOP_OPS`](https://github.com/open-mmlab/mmengine/blob/main/mmengine/analysis/complexity_analysis.py) for details. +``` diff --git a/docs/en/useful_tools/confusion_matrix.md b/docs/en/useful_tools/confusion_matrix.md new file mode 100644 index 0000000..306b585 --- /dev/null +++ b/docs/en/useful_tools/confusion_matrix.md @@ -0,0 +1,84 @@ +# Confusion Matrix + +MMPretrain provides `tools/analysis_tools/confusion_matrix.py` tool to calculate and visualize the confusion matrix. For an introduction to the confusion matrix, see [link](https://en.wikipedia.org/wiki/Confusion_matrix). + +## Command-line Usage + +**Command**: + +```shell +python tools/analysis_tools/confusion_matrix.py \ + ${CONFIG_FILE} \ + ${CHECKPOINT} \ + [--show] \ + [--show-path] \ + [--include-values] \ + [--cmap ${CMAP}] \ + [--cfg-options ${CFG-OPTIONS}] +``` + +**Description of all arguments**: + +- `config`: The path of the model config file. +- `checkpoint`: The path of the checkpoint. +- `--show`: If or not to show the matplotlib visualization result of the confusion matrix, the default is `False`. +- `--show-path`: If `show` is True, the path where the results are saved is visualized. +- `--include-values`: Whether to add values to the visualization results. +- `--cmap`: The color map used for visualization results, `cmap`, which defaults to `viridis`. + +* `--cfg-options`: Modifications to the configuration file, refer to [Learn about Configs](../user_guides/config.md). + +**Examples of use**: + +```shell +python tools/analysis_tools/confusion_matrix.py \ + configs/resnet/resnet50_8xb16_cifar10.py \ + https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar10_20210528-f54bfad9.pth \ + --show +``` + +**output image**: + +
+ +## **Basic Usage** + +```python +>>> import torch +>>> from mmpretrain.evaluation import ConfusionMatrix +>>> y_pred = [0, 1, 1, 3] +>>> y_true = [0, 2, 1, 3] +>>> ConfusionMatrix.calculate(y_pred, y_true, num_classes=4) +tensor([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 1, 0, 0], + [0, 0, 0, 1]]) +>>> # plot the confusion matrix +>>> import matplotlib.pyplot as plt +>>> y_score = torch.rand((1000, 10)) +>>> y_true = torch.randint(10, (1000, )) +>>> matrix = ConfusionMatrix.calculate(y_score, y_true) +>>> ConfusionMatrix().plot(matrix) +>>> plt.show() +``` + +## **Use with Evalutor** + +```python +>>> import torch +>>> from mmpretrain.evaluation import ConfusionMatrix +>>> from mmpretrain.structures import DataSample +>>> from mmengine.evaluator import Evaluator +>>> data_samples = [ +... DataSample().set_gt_label(i%5).set_pred_score(torch.rand(5)) +... for i in range(1000) +... ] +>>> evaluator = Evaluator(metrics=ConfusionMatrix()) +>>> evaluator.process(data_samples) +>>> evaluator.evaluate(1000) +{'confusion_matrix/result': tensor([[37, 37, 48, 43, 35], + [35, 51, 32, 46, 36], + [45, 28, 39, 42, 46], + [42, 40, 40, 35, 43], + [40, 39, 41, 37, 43]])} +``` diff --git a/docs/en/useful_tools/dataset_visualization.md b/docs/en/useful_tools/dataset_visualization.md new file mode 100644 index 0000000..b1f216c --- /dev/null +++ b/docs/en/useful_tools/dataset_visualization.md @@ -0,0 +1,90 @@ +# Dataset Visualization + +## Introduce the dataset visualization tool + +```bash +python tools/visualization/browse_dataset.py \ + ${CONFIG_FILE} \ + [-o, --output-dir ${OUTPUT_DIR}] \ + [-p, --phase ${DATASET_PHASE}] \ + [-n, --show-number ${NUMBER_IMAGES_DISPLAY}] \ + [-i, --show-interval ${SHOW_INTERRVAL}] \ + [-m, --mode ${DISPLAY_MODE}] \ + [-r, --rescale-factor ${RESCALE_FACTOR}] \ + [-c, --channel-order ${CHANNEL_ORDER}] \ + [--cfg-options ${CFG_OPTIONS}] +``` + +**Description of all arguments**: + +- `config` : The path of a model config file. +- `-o, --output-dir`: The output path for visualized images. If not specified, it will be set to `''`, which means not to save. +- **`-p, --phase`**: Phase of visualizing dataset,must be one of `['train', 'val', 'test']`. If not specified, it will be set to `'train'`. +- **`-n, --show-number`**: The number of samples to visualized. If not specified, display all images in the dataset. +- `--show-interval`: The interval of show (s). +- **`-m, --mode`**: The display mode, can be one of `['original', 'transformed', 'concat', 'pipeline']`. If not specified, it will be set to `'transformed'`. +- `-r, --rescale-factor`: The image rescale factor, which is useful if the output is too large or too small + in the `original` mode. +- `-c, --channel-order`: The channel of the showing images, could be "BGR" or "RGB", If not specified, it will be set to 'BGR'. +- `--cfg-options` : Modifications to the configuration file, refer to [Learn about Configs](../user_guides/config.md). + +```{note} +1. The `-m, --mode` is about display mode, display original pictures or transformed pictures or comparison pictures: +- "original" means show images load from disk; +- "transformed" means to show images after transformed; +- "concat" means show images stitched by "original" and "transformed" images; +- "pipeline" means show all the intermediate images throghout the pipeline. + +2. The `-r, --rescale-factor` option is set when the label information is too large or too small relative to the picture. For example, when visualizing the CIFAR dataset, since the resolution of the image is very small, `--rescale-factor` can be set to 10. +``` + +## How to visualize the original image + +In **'original'** mode: + +```shell +python ./tools/visualization/browse_dataset.py ./configs/resnet/resnet101_8xb16_cifar10.py --phase val --output-dir tmp --mode original --show-number 100 --rescale-factor 10 --channel-order RGB +``` + +- `--phase val`: Visual validation set, can be simplified to `-p val`; +- `--output-dir tmp`: The visualization results are saved in the "tmp" folder, can be simplified to `-o tmp`; +- `--mode original`: Visualize the original image, can be simplified to `-m original`; +- `--show-number 100`: visualize 100 images, can be simplified to `-n 100`; +- `--rescale-factor`: the image is enlarged by 10 times, can be simplified to `-r 10`; +- `--channel-order RGB`: The channel order of the visualized image is "RGB", can be simplified to `-c RGB`. + +
+ +## How to visualize the transformed images + +In **'transformed'** mode: + +```shell +python ./tools/visualization/browse_dataset.py ./configs/resnet/resnet50_8xb32_in1k.py -n 100 +``` + +
+ +## How to visualize the transformed images and original images together + +In **'concat'** mode: + +```shell +python ./tools/visualization/browse_dataset.py configs/swin_transformer/swin-small_16xb64_in1k.py -n 10 -m concat +``` + +
+ +4. In **'pipeline'** mode: + +```shell +python ./tools/visualization/browse_dataset.py configs/swin_transformer/swin-small_16xb64_in1k.py -m pipeline +``` + +
+ +```shell +python ./tools/visualization/browse_dataset.py configs/beit/beit_beit-base-p16_8xb256-amp-coslr-300e_in1k.py -m pipeline +``` + +
diff --git a/docs/en/useful_tools/log_result_analysis.md b/docs/en/useful_tools/log_result_analysis.md new file mode 100644 index 0000000..99968d7 --- /dev/null +++ b/docs/en/useful_tools/log_result_analysis.md @@ -0,0 +1,226 @@ +# Log and Results Analysis + +## Log Analysis + +### Introduction of log analysis tool + +`tools/analysis_tools/analyze_logs.py` plots curves of given keys according to the log files. + +
+ +```shell +python tools/analysis_tools/analyze_logs.py plot_curve \ + ${JSON_LOGS} \ + [--keys ${KEYS}] \ + [--title ${TITLE}] \ + [--legend ${LEGEND}] \ + [--backend ${BACKEND}] \ + [--style ${STYLE}] \ + [--out ${OUT_FILE}] \ + [--window-size ${WINDOW_SIZE}] +``` + +**Description of all arguments**: + +- `json_logs` : The paths of the log files, separate multiple files by spaces. +- `--keys` : The fields of the logs to analyze, separate multiple keys by spaces. Defaults to 'loss'. +- `--title` : The title of the figure. Defaults to use the filename. +- `--legend` : The names of legend, the number of which must be equal to `len(${JSON_LOGS}) * len(${KEYS})`. Defaults to use `"${JSON_LOG}-${KEYS}"`. +- `--backend` : The backend of matplotlib. Defaults to auto selected by matplotlib. +- `--style` : The style of the figure. Default to `whitegrid`. +- `--out` : The path of the output picture. If not set, the figure won't be saved. +- `--window-size`: The shape of the display window. The format should be `'W*H'`. Defaults to `'12*7'`. + +```{note} +The `--style` option depends on `seaborn` package, please install it before setting it. +``` + +### How to plot the loss/accuracy curve + +We present some examples here to show how to plot the loss curve of accuracy curve by using the `tools/analysis_tools/analyze_logs.py` + +#### Plot the loss curve in training. + +```shell +python tools/analysis_tools/analyze_logs.py plot_curve your_log_json --keys loss --legend loss +``` + +#### Plot the top-1 accuracy and top-5 accuracy curves, and save the figure to results.jpg. + +```shell +python tools/analysis_tools/analyze_logs.py plot_curve your_log_json --keys accuracy/top1 accuracy/top5 --legend top1 top5 --out results.jpg +``` + +#### Compare the top-1 accuracy of two log files in the same figure. + +```shell +python tools/analysis_tools/analyze_logs.py plot_curve log1.json log2.json --keys accuracy/top1 --legend exp1 exp2 +``` + +### How to calculate training time + +`tools/analysis_tools/analyze_logs.py` can also calculate the training time according to the log files. + +```shell +python tools/analysis_tools/analyze_logs.py cal_train_time \ + ${JSON_LOGS} + [--include-outliers] +``` + +**Description of all arguments**: + +- `json_logs` : The paths of the log files, separate multiple files by spaces. +- `--include-outliers` : If set, include the first time record in each epoch (Sometimes the time of the first iteration is longer). + +Example: + +```shell +python tools/analysis_tools/analyze_logs.py cal_train_time work_dirs/your_exp/20230206_181002/vis_data/scalars.json +``` + +The output is expected to be like the below. + +```text +-----Analyze train time of work_dirs/your_exp/20230206_181002/vis_data/scalars.json----- +slowest epoch 68, average time is 0.3818 +fastest epoch 1, average time is 0.3694 +time std over epochs is 0.0020 +average iter time: 0.3777 s/iter +``` + +## Result Analysis + +With the `--out` argument in `tools/test.py`, we can save the inference results of all samples as a file. +And with this result file, we can do further analysis. + +### How to conduct offline metric evaluation + +We provide `tools/analysis_tools/eval_metric.py` to enable the user evaluate the model from the prediction files. + +```shell +python tools/analysis_tools/eval_metric.py \ + ${RESULT} \ + [--metric ${METRIC_OPTIONS} ...] +``` + +Description of all arguments: + +- `result`: The output result file in pickle format from `tools/test.py`. +- `--metric`: The metric and options to evaluate the results. You need to specify at least one metric and you + can also specify multiple `--metric` to use multiple metrics. + +Please refer the [Metric Documentation](mmpretrain.evaluation) to find the available metrics and options. + +```{note} +In `tools/test.py`, we support using `--out-item` option to select which kind of results will be saved. +Please ensure the `--out-item` is not specified or `--out-item=pred` to use this tool. +``` + +**Examples**: + +```shell +# Get the prediction results +python tools/test.py configs/resnet/resnet18_8xb16_cifar10.py \ + https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8.pth \ + --out results.pkl + +# Eval the top-1 and top-5 accuracy +python tools/analysis_tools/eval_metric.py results.pkl --metric type=Accuracy topk=1,5 + +# Eval the overall accuracy and the class-wise precision, recall, f1-score +python tools/analysis_tools/eval_metric.py results.pkl --metric type=Accuracy \ + --metric type=SingleLabelMetric items=precision,recall,f1-score average=None +``` + +### How to plot the confusion matrix for the test result + +We provide `tools/analysis_tools/confusion_matrix.py` to enable the user plot the confusion matrix from the prediction files. + +```shell +python tools/analysis_tools/confusion_matrix.py \ + ${CONFIG} \ + ${RESULT} \ + [--out ${OUT}] \ + [--show] \ + [--show-path ${SHOW_PATH}] \ + [--include-values] \ + [--cmap] \ + [--cfg-options ${CFG_OPTIONS} ...] \ +``` + +Description of all arguments: + +- `config`: The config file path. +- `result`: The output result file in pickle format from `tools/test.py`, or a checkpoint file. +- `--out`: The path to save the confusion matrix in pickle format. +- `--show`: Whether to show the confusion matrix plot. +- `--show-path`: The path to save the confusion matrix plot. +- `--include-values`: Whether to show the values in the confusion matrix plot. +- `--cmap`: The color map to plot the confusion matrix. +- `--cfg-options`: If specified, the key-value pair config will be merged into the config file, for more details please refer to [Learn about Configs](../user_guides/config.md) + +```{note} +In `tools/test.py`, we support using `--out-item` option to select which kind of results will be saved. +Please ensure the `--out-item` is not specified or `--out-item=pred` to use this tool. +``` + +**Examples**: + +```shell +# Get the prediction results +python tools/test.py configs/resnet/resnet18_8xb16_cifar10.py \ + https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8.pth \ + --out results.pkl + +# Save the confusion matrix in a pickle file +python tools/analysis_tools/confusion_matrix.py configs/resnet/resnet18_8xb16_cifar10.py results.pkl --out cm.pkl + +# Show the confusion matrix plot in a graphical window. +python tools/analysis_tools/confusion_matrix.py configs/resnet/resnet18_8xb16_cifar10.py results.pkl --show +``` + +### How to visualize the prediction results + +We can use `tools/analysis_tools/analyze_results.py` to save the images with the highest scores in successful or failed prediction. + +```shell +python tools/analysis_tools/analyze_results.py \ + ${CONFIG} \ + ${RESULT} \ + [--out-dir ${OUT_DIR}] \ + [--topk ${TOPK}] \ + [--rescale-factor ${RESCALE_FACTOR}] \ + [--cfg-options ${CFG_OPTIONS}] +``` + +**Description of all arguments**: + +- `config` : The path of the model config file. +- `result`: Output result file in json/pickle format from `tools/test.py`. +- `--out_dir`: Directory to store output files. +- `--topk`: The number of images in successful or failed prediction with the highest `topk` scores to save. If not specified, it will be set to 20. +- `--rescale-factor`: Image rescale factor, which is useful if the output is too large or too small (Too small + images may cause the prediction tag is too vague). +- `--cfg-options`: If specified, the key-value pair config will be merged into the config file, for more details please refer to [Learn about Configs](../user_guides/config.md) + +```{note} +In `tools/test.py`, we support using `--out-item` option to select which kind of results will be saved. +Please ensure the `--out-item` is not specified or `--out-item=pred` to use this tool. +``` + +**Examples**: + +```shell +# Get the prediction results +python tools/test.py configs/resnet/resnet18_8xb16_cifar10.py \ + https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8.pth \ + --out results.pkl + +# Save the top-10 successful and failed predictions. And enlarge the sample images by 10 times. +python tools/analysis_tools/analyze_results.py \ + configs/resnet/resnet18_8xb16_cifar10.py \ + results.pkl \ + --out-dir output \ + --topk 10 \ + --rescale-factor 10 +``` diff --git a/docs/en/useful_tools/model_serving.md b/docs/en/useful_tools/model_serving.md new file mode 100644 index 0000000..9f135fb --- /dev/null +++ b/docs/en/useful_tools/model_serving.md @@ -0,0 +1,88 @@ +# Torchserve Deployment + +In order to serve an `MMPretrain` model with [`TorchServe`](https://pytorch.org/serve/), you can follow the steps: + +## 1. Convert model from MMPretrain to TorchServe + +```shell +python tools/torchserve/mmpretrain2torchserve.py ${CONFIG_FILE} ${CHECKPOINT_FILE} \ +--output-folder ${MODEL_STORE} \ +--model-name ${MODEL_NAME} +``` + +```{note} +${MODEL_STORE} needs to be an absolute path to a folder. +``` + +Example: + +```shell +python tools/torchserve/mmpretrain2torchserve.py \ + configs/resnet/resnet18_8xb32_in1k.py \ + checkpoints/resnet18_8xb32_in1k_20210831-fbbb1da6.pth \ + --output-folder ./checkpoints \ + --model-name resnet18_in1k +``` + +## 2. Build `mmpretrain-serve` docker image + +```shell +docker build -t mmpretrain-serve:latest docker/serve/ +``` + +## 3. Run `mmpretrain-serve` + +Check the official docs for [running TorchServe with docker](https://github.com/pytorch/serve/blob/master/docker/README.md#running-torchserve-in-a-production-docker-environment). + +In order to run in GPU, you need to install [nvidia-docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). You can omit the `--gpus` argument in order to run in GPU. + +Example: + +```shell +docker run --rm \ +--name mar \ +--cpus 8 \ +--gpus device=0 \ +-p8080:8080 -p8081:8081 -p8082:8082 \ +--mount type=bind,source=`realpath ./checkpoints`,target=/home/model-server/model-store \ +mmpretrain-serve:latest +``` + +```{note} +`realpath ./checkpoints` points to the absolute path of "./checkpoints", and you can replace it with the absolute path where you store torchserve models. +``` + +[Read the docs](https://github.com/pytorch/serve/blob/master/docs/rest_api.md) about the Inference (8080), Management (8081) and Metrics (8082) APis + +## 4. Test deployment + +```shell +curl http://127.0.0.1:8080/predictions/${MODEL_NAME} -T demo/demo.JPEG +``` + +You should obtain a response similar to: + +```json +{ + "pred_label": 58, + "pred_score": 0.38102269172668457, + "pred_class": "water snake" +} +``` + +And you can use `test_torchserver.py` to compare result of TorchServe and PyTorch, and visualize them. + +```shell +python tools/torchserve/test_torchserver.py ${IMAGE_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} ${MODEL_NAME} +[--inference-addr ${INFERENCE_ADDR}] [--device ${DEVICE}] +``` + +Example: + +```shell +python tools/torchserve/test_torchserver.py \ + demo/demo.JPEG \ + configs/resnet/resnet18_8xb32_in1k.py \ + checkpoints/resnet18_8xb32_in1k_20210831-fbbb1da6.pth \ + resnet18_in1k +``` diff --git a/docs/en/useful_tools/print_config.md b/docs/en/useful_tools/print_config.md new file mode 100644 index 0000000..ea40764 --- /dev/null +++ b/docs/en/useful_tools/print_config.md @@ -0,0 +1,27 @@ +# How to Get the Complete Config + +We also provide the `print_config.py` tools to print the complete configuration of the given experiment. +You can check each item of the config before the training by using the following command. + +## Description + +`tools/misc/print_config.py` prints the whole config verbatim, expanding all its imports. + +```shell +python tools/misc/print_config.py ${CONFIG} [--cfg-options ${CFG_OPTIONS}] +``` + +Description of all arguments: + +- `config` : The path of the model config file. +- `--cfg-options`: If specified, the key-value pair config will be merged into the config file, for more details please refer to [Learn about Configs](../user_guides/config.md) + +## Examples + +```shell +# Print a complete config +python tools/misc/print_config.py configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py + +# Save the complete config to a independent config file. +python tools/misc/print_config.py configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py > final_config.py +``` diff --git a/docs/en/useful_tools/scheduler_visualization.md b/docs/en/useful_tools/scheduler_visualization.md new file mode 100644 index 0000000..0ba1bdc --- /dev/null +++ b/docs/en/useful_tools/scheduler_visualization.md @@ -0,0 +1,44 @@ +# Hyper-parameter Scheduler Visualization + +This tool aims to help the user to check the hyper-parameter scheduler of the optimizer (without training), which support the "learning rate" or "momentum" + +## Introduce the scheduler visualization tool + +```bash +python tools/visualization/vis_scheduler.py \ + ${CONFIG_FILE} \ + [-p, --parameter ${PARAMETER_NAME}] \ + [-d, --dataset-size ${DATASET_SIZE}] \ + [-n, --ngpus ${NUM_GPUs}] \ + [-s, --save-path ${SAVE_PATH}] \ + [--title ${TITLE}] \ + [--style ${STYLE}] \ + [--window-size ${WINDOW_SIZE}] \ + [--cfg-options] +``` + +**Description of all arguments**: + +- `config`: The path of a model config file. +- **`-p, --parameter`**: The param to visualize its change curve, choose from "lr" and "momentum". Default to use "lr". +- **`-d, --dataset-size`**: The size of the datasets. If set,`build_dataset` will be skipped and `${DATASET_SIZE}` will be used as the size. Default to use the function `build_dataset`. +- **`-n, --ngpus`**: The number of GPUs used in training, default to be 1. +- **`-s, --save-path`**: The learning rate curve plot save path, default not to save. +- `--title`: Title of figure. If not set, default to be config file name. +- `--style`: Style of plt. If not set, default to be `whitegrid`. +- `--window-size`: The shape of the display window. If not specified, it will be set to `12*7`. If used, it must be in the format `'W*H'`. +- `--cfg-options`: Modifications to the configuration file, refer to [Learn about Configs](../user_guides/config.md). + +```{note} +Loading annotations maybe consume much time, you can directly specify the size of the dataset with `-d, dataset-size` to save time. +``` + +## How to plot the learning rate curve without training + +You can use the following command to plot the step learning rate schedule used in the config `configs/swin_transformer/swin-base_16xb64_in1k.py`: + +```bash +python tools/visualization/vis_scheduler.py configs/swin_transformer/swin-base_16xb64_in1k.py --dataset-size 1281167 --ngpus 16 +``` + +
diff --git a/docs/en/useful_tools/shape_bias.md b/docs/en/useful_tools/shape_bias.md new file mode 100644 index 0000000..907bde6 --- /dev/null +++ b/docs/en/useful_tools/shape_bias.md @@ -0,0 +1,100 @@ +# Shape Bias Tool Usage + +Shape bias measures how a model relies the shapes, compared to texture, to sense the semantics in images. For more details, +we recommend interested readers to this [paper](https://arxiv.org/abs/2106.07411). MMPretrain provide an off-the-shelf toolbox to +obtain the shape bias of a classification model. You can following these steps below: + +## Prepare the dataset + +First you should download the [cue-conflict](https://github.com/bethgelab/model-vs-human/releases/download/v0.1/cue-conflict.tar.gz) to `data` folder, +and then unzip this dataset. After that, you `data` folder should have the following structure: + +```text +data +├──cue-conflict +| |──airplane +| |──bear +| ... +| |── truck +``` + +## Modify the config for classification + +We run the shape-bias tool on a ViT-base model with masked autoencoder pretraining. Its config file is `configs/mae/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py`, and its checkpoint is downloaded from [this link](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-base-p16_8xb512-fp16-coslr-1600e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k_20220825-cf70aa21.pth). Replace the original test_pipeline, test_dataloader and test_evaluation with the following configurations: + +```python +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=256, + edge='short', + backend='pillow'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs') +] +test_dataloader = dict( + pin_memory=True, + collate_fn=dict(type='default_collate'), + batch_size=32, + num_workers=4, + dataset=dict( + type='CustomDataset', + data_root='data/cue-conflict', + pipeline=test_pipeline, + _delete_=True), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, + drop_last=False) +test_evaluator = dict( + type='mmpretrain.ShapeBiasMetric', + _delete_=True, + csv_dir='work_dirs/shape_bias', + model_name='mae') +``` + +Please note you should make custom modifications to the `csv_dir` and `model_name` above. I renamed my modified sample config file as `vit-base-p16_8xb128-coslr-100e_in1k_shape-bias.py` in the folder `configs/mae/benchmarks/`. + +## Inference your model with above modified config file + +Then you should inferece your model on the `cue-conflict` dataset with the your modified config file. + +```shell +# For PyTorch +bash tools/dist_test.sh $CONFIG $CHECKPOINT +``` + +**Description of all arguments**: + +- `$CONFIG`: The path of your modified config file. +- `$CHECKPOINT`: The path or link of the checkpoint file. + +```shell +# Example +bash tools/dist_test.sh configs/mae/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k_shape-bias.py https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-base-p16_8xb512-fp16-coslr-1600e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k_20220825-cf70aa21.pth 1 +``` + +After that, you should obtain a csv file in `csv_dir` folder, named `cue-conflict_model-name_session-1.csv`. Besides this file, you should also download these [csv files](https://github.com/bethgelab/model-vs-human/tree/master/raw-data/cue-conflict) to the +`csv_dir`. + +## Plot shape bias + +Then we can start to plot the shape bias: + +```shell +python tools/analysis_tools/shape_bias.py --csv-dir $CSV_DIR --result-dir $RESULT_DIR --colors $RGB --markers o --plotting-names $YOUR_MODEL_NAME --model-names $YOUR_MODEL_NAME +``` + +**Description of all arguments**: + +- `--csv-dir $CSV_DIR`, the same directory to save these csv files. +- `--result-dir $RESULT_DIR`, the directory to output the result named `cue-conflict_shape-bias_matrixplot.pdf`. +- `--colors $RGB`, should be the RGB values, formatted in R G B, e.g. 100 100 100, and can be multiple RGB values, if you want to plot the shape bias of several models. +- `--plotting-names $YOUR_MODEL_NAME`, the name of the legend in the shape bias figure, and you can set it as your model name. If you want to plot several models, plotting_names can be multiple values. +- `model-names $YOUR_MODEL_NAME`, should be the same name specified in your config, and can be multiple names if you want to plot the shape bias of several models. + +Please note, every three values for `--colors` corresponds to one value for `--model-names`. After all of above steps, you are expected to obtain the following figure. + +
+ +
diff --git a/docs/en/useful_tools/t-sne_visualization.md b/docs/en/useful_tools/t-sne_visualization.md new file mode 100644 index 0000000..9f24a11 --- /dev/null +++ b/docs/en/useful_tools/t-sne_visualization.md @@ -0,0 +1,85 @@ +# t-Distributed Stochastic Neighbor Embedding (t-SNE) Visualization + +## Introduction of the t-SNE visualization tool + +MMPretrain provides `tools/visualization/vis_tsne.py` tool to visualize the feature embeddings of images by t-SNE. Please install `sklearn` to calculate t-SNE by `pip install scikit-learn`. + +**Command**: + +```bash +python tools/visualization/vis_tsne.py \ + CONFIG \ + [--checkpoint CHECKPOINT] \ + [--work-dir WORK_DIR] \ + [--test-cfg TEST_CFG] \ + [--vis-stage {backbone,neck,pre_logits}] + [--class-idx ${CLASS_IDX} [CLASS_IDX ...]] + [--max-num-class MAX_NUM_CLASS] + [--max-num-samples MAX_NUM_SAMPLES] + [--cfg-options CFG_OPTIONS [CFG_OPTIONS ...]] + [--device DEVICE] + [--legend] + [--show] + [--n-components N_COMPONENTS] + [--perplexity PERPLEXITY] + [--early-exaggeration EARLY_EXAGGERATION] + [--learning-rate LEARNING_RATE] + [--n-iter N_ITER] + [--n-iter-without-progress N_ITER_WITHOUT_PROGRESS] + [--init INIT] +``` + +**Description of all arguments**: + +- `CONFIG`: The path of t-SNE config file. +- `--checkpoint CHECKPOINT`: The path of the checkpoint file. +- `--work-dir WORK_DIR`: The directory to save logs and visualization images. +- `--test-cfg TEST_CFG`: The path of t-SNE config file to load config of test dataloader. +- `--vis-stage {backbone,neck,pre_logits}`: The visualization stage of the model. +- `--class-idx CLASS_IDX [CLASS_IDX ...]`: The categories used to calculate t-SNE. +- `--max-num-class MAX_NUM_CLASS`: The first N categories to apply t-SNE algorithms. Defaults to 20. +- `--max-num-samples MAX_NUM_SAMPLES`: The maximum number of samples per category. Higher number need longer time to calculate. Defaults to 100. +- `--cfg-options CFG_OPTIONS [CFG_OPTIONS ...]`: override some settings in the used config, the key-value pair in xxx=yyy format will be merged into config file. If the value to be overwritten is a list, it should be like key="[a,b]" or key=a,b It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation marks are necessary and that no white space is allowed. +- `--device DEVICE`: Device used for inference. +- `--legend`: Show the legend of all categories. +- `--show`: Display the result in a graphical window. +- `--n-components N_COMPONENTS`: The dimension of results. +- `--perplexity PERPLEXITY`: The perplexity is related to the number of nearest neighbors that is used in other manifold learning algorithms. +- `--early-exaggeration EARLY_EXAGGERATION`: Controls how tight natural clusters in the original space are in the embedded space and how much space will be between them. +- `--learning-rate LEARNING_RATE`: The learning rate for t-SNE is usually in the range[10.0, 1000.0]. If the learning rate is too high, the data may looklike a ball with any point approximately equidistant from its nearestneighbours. If the learning rate is too low, most points may lookcompressed in a dense cloud with few outliers. +- `--n-iter N_ITER`: Maximum number of iterations for the optimization. Should be at least 250. +- `--n-iter-without-progress N_ITER_WITHOUT_PROGRESS`: Maximum number of iterations without progress before we abort the optimization. +- `--init INIT`: The init method. + +## How to visualize the t-SNE of a image classifier (such as ResNet) + +Here are two examples of running t-SNE visualization on ResNet-18 and ResNet-50 models, trained on CIFAR-10 dataset: + +```shell +python tools/visualization/vis_tsne.py \ + configs/resnet/resnet18_8xb16_cifar10.py \ + --checkpoint https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8.pth + +python tools/visualization/vis_tsne.py \ + configs/resnet/resnet50_8xb16_cifar10.py \ + --checkpoint https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar10_20210528-f54bfad9.pth +``` + +| ResNet-18 | ResNet-50 | +| ---------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | +|
|
| + +## How to visualize the t-SNE of a self-supervised model (such as MAE) + +Here is an example of running t-SNE visualization on MAE-ViT-base model, trained on ImageNet dataset. The input data is from ImageNet validation set. MAE and some self-supervised pre-training algorithms do not have test_dataloader information. When analyzing such self-supervised algorithms, you need to add test_dataloader information in the config, or you can use '--test-cfg' argument to specify a config file. + +```shell +python tools/visualization/vis_tsne.py \ + configs/mae/mae_vit-base-p16_8xb512-amp-coslr-800e_in1k.py \ + --checkpoint https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-base-p16_8xb512-fp16-coslr-800e_in1k/mae_vit-base-p16_8xb512-coslr-800e-fp16_in1k_20220825-5d81fbc4.pth \ + --test-cfg configs/_base_/datasets/imagenet_bs32.py +``` + +| MAE-ViT-base | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------- | +|
| diff --git a/docs/en/useful_tools/verify_dataset.md b/docs/en/useful_tools/verify_dataset.md new file mode 100644 index 0000000..d27948f --- /dev/null +++ b/docs/en/useful_tools/verify_dataset.md @@ -0,0 +1,28 @@ +# Verify Dataset + +In MMPretrain, we also provide a tool `tools/misc/verify_dataset.py` to check whether there exists **broken pictures** in the given dataset. + +## Introduce the tool + +```shell +python tools/print_config.py \ + ${CONFIG} \ + [--out-path ${OUT-PATH}] \ + [--phase ${PHASE}] \ + [--num-process ${NUM-PROCESS}] + [--cfg-options ${CFG_OPTIONS}] +``` + +**Description of all arguments**: + +- `config` : The path of the model config file. +- `--out-path` : The path to save the verification result, if not set, defaults to 'brokenfiles.log'. +- `--phase` : Phase of dataset to verify, accept "train" "test" and "val", if not set, defaults to "train". +- `--num-process` : number of process to use, if not set, defaults to 1. +- `--cfg-options`: If specified, the key-value pair config will be merged into the config file, for more details please refer to [Learn about Configs](../user_guides/config.md) + +## Example + +```shell +python tools/misc/verify_dataset.py configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py --out-path broken_imgs.log --phase val --num-process 8 +``` diff --git a/docs/en/user_guides/config.md b/docs/en/user_guides/config.md new file mode 100644 index 0000000..6077c70 --- /dev/null +++ b/docs/en/user_guides/config.md @@ -0,0 +1,421 @@ +# Learn about Configs + +To manage various configurations in a deep-learning experiment, we use a kind of config file to record all of +these configurations. This config system has a modular and inheritance design, and more details can be found in +{external+mmengine:doc}`the tutorial in MMEngine `. + +Usually, we use python files as config file. All configuration files are placed under the [`configs`](https://github.com/open-mmlab/mmpretrain/tree/main/configs) folder, and the directory structure is as follows: + +```text +MMPretrain/ + ├── configs/ + │ ├── _base_/ # primitive configuration folder + │ │ ├── datasets/ # primitive datasets + │ │ ├── models/ # primitive models + │ │ ├── schedules/ # primitive schedules + │ │ └── default_runtime.py # primitive runtime setting + │ ├── beit/ # BEiT Algorithms Folder + │ ├── mae/ # MAE Algorithms Folder + │ ├── mocov2/ # MoCoV2 Algorithms Folder + │ ├── resnet/ # ResNet Algorithms Folder + │ ├── swin_transformer/ # Swin Algorithms Folder + │ ├── vision_transformer/ # ViT Algorithms Folder + │ ├── ... + └── ... +``` + +If you wish to inspect the config file, you may run `python tools/misc/print_config.py /PATH/TO/CONFIG` to see the complete config. + +This article mainly explains the structure of configuration files, and how to modify it based on the existing configuration files. We will take [ResNet50 config file](https://github.com/open-mmlab/mmpretrain/blob/main/configs/resnet/resnet50_8xb32_in1k.py) as an example and explain it line by line. + +## Config Structure + +There are four kinds of basic component files in the `configs/_base_` folders, namely: + +- [models](https://github.com/open-mmlab/mmpretrain/tree/main/configs/_base_/models) +- [datasets](https://github.com/open-mmlab/mmpretrain/tree/main/configs/_base_/datasets) +- [schedules](https://github.com/open-mmlab/mmpretrain/tree/main/configs/_base_/schedules) +- [runtime](https://github.com/open-mmlab/mmpretrain/blob/main/configs/_base_/default_runtime.py) + +We call all the config files in the `_base_` folder as _primitive_ config files. You can easily build your training config file by inheriting some primitive config files. + +For easy understanding, we use [ResNet50 config file](https://github.com/open-mmlab/mmpretrain/blob/main/configs/resnet/resnet50_8xb32_in1k.py) as an example and comment on each line. + +```python +_base_ = [ # This config file will inherit all config files in `_base_`. + '../_base_/models/resnet50.py', # model settings + '../_base_/datasets/imagenet_bs32.py', # data settings + '../_base_/schedules/imagenet_bs256.py', # schedule settings + '../_base_/default_runtime.py' # runtime settings +] +``` + +We will explain the four primitive config files separately below. + +### Model settings + +This primitive config file includes a dict variable `model`, which mainly includes information such as network structure and loss function: + +- `type`: The type of model to build, we support several tasks. + - For image classification tasks, it's usually `ImageClassifier` You can find more details in the [API documentation](mmpretrain.models.classifiers). + - For self-supervised leanrning, there are several `SelfSupervisors`, such as `MoCoV2`, `BEiT`, `MAE`, etc. You can find more details in the [API documentation](mmpretrain.models.selfsup). + - For image retrieval tasks, it's usually `ImageToImageRetriever` You can find more details in the [API documentation](mmpretrain.models.retrievers). + +Usually, we use the **`type` field** to specify the class of the component and use other fields to pass +the initialization arguments of the class. The {external+mmengine:doc}`registry tutorial ` describes it in detail. + +Here, we use the config fields of [`ImageClassifier`](mmpretrain.models.classifiers.ImageClassifier) as an example to +describe the initialization arguments as below: + +- `backbone`: The settings of the backbone. The backbone is the main network to extract features of the inputs, like `ResNet`, `Swin Transformer`, `Vision Transformer` etc. All available backbones can be found in the [API documentation](mmpretrain.models.backbones). + - For self-supervised leanrning, some of the backbones are re-implemented, you can find more details in the [API documentation](mmpretrain.models.selfsup). +- `neck`: The settings of the neck. The neck is the intermediate module to connect the backbone and the head, like `GlobalAveragePooling`. All available necks can be found in the [API documentation](mmpretrain.models.necks). +- `head`: The settings of the task head. The head is the task-related component to do a specified task, like image classification or self-supervised training. All available heads can be found in the [API documentation](mmpretrain.models.heads). + - `loss`: The loss function to optimize, like `CrossEntropyLoss`, `LabelSmoothLoss`, `PixelReconstructionLoss` and etc. All available losses can be found in the [API documentation](mmpretrain.models.losses). +- `data_preprocessor`: The component before the model forwarding to preprocess the inputs. See the [documentation](mmpretrain.models.utils.data_preprocessor) for more details. +- `train_cfg`: The extra settings of `ImageClassifier` during training. In `ImageClassifier`, we mainly use it to specify batch augmentation settings, like `Mixup` and `CutMix`. See the [documentation](mmpretrain.models.utils.batch_augments) for more details. + +Following is the model primitive config of the ResNet50 config file in [`configs/_base_/models/resnet50.py`](https://github.com/open-mmlab/mmpretrain/blob/main/configs/_base_/models/resnet50.py): + +```python +model = dict( + type='ImageClassifier', # The type of the main model (here is for image classification task). + backbone=dict( + type='ResNet', # The type of the backbone module. + # All fields except `type` come from the __init__ method of class `ResNet` + # and you can find them from https://mmpretrain.readthedocs.io/en/latest/api/generated/mmpretrain.models.backbones.ResNet.html + depth=50, + num_stages=4, + out_indices=(3, ), + frozen_stages=-1, + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), # The type of the neck module. + head=dict( + type='LinearClsHead', # The type of the classification head module. + # All fields except `type` come from the __init__ method of class `LinearClsHead` + # and you can find them from https://mmpretrain.readthedocs.io/en/latest/api/generated/mmpretrain.models.heads.LinearClsHead.html + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + )) +``` + +### Data settings + +This primitive config file includes information to construct the dataloader and evaluator: + +- `data_preprocessor`: Model input preprocessing configuration, same as `model.data_preprocessor` but with lower priority. +- `train_evaluator | val_evaluator | test_evaluator`: To build the evaluator or metrics, refer to the [tutorial](mmpretrain.evaluation). +- `train_dataloader | val_dataloader | test_dataloader`: The settings of dataloaders + - `batch_size`: The batch size of each GPU. + - `num_workers`: The number of workers to fetch data of each GPU. + - `sampler`: The settings of the sampler. + - `persistent_workers`: Whether to persistent workers after finishing one epoch. + - `dataset`: The settings of the dataset. + - `type`: The type of the dataset, we support `CustomDataset`, `ImageNet` and many other datasets, refer to [documentation](mmpretrain.datasets). + - `pipeline`: The data transform pipeline. You can find how to design a pipeline in [this tutorial](https://mmpretrain.readthedocs.io/en/latest/tutorials/data_pipeline.html). + +Following is the data primitive config of the ResNet50 config in [`configs/_base_/datasets/imagenet_bs32.py`](https://github.com/open-mmlab/mmpretrain/blob/main/configs/_base_/datasets/imagenet_bs32.py): + +```python +dataset_type = 'ImageNet' +# preprocessing configuration +data_preprocessor = dict( + # Input image data channels in 'RGB' order + mean=[123.675, 116.28, 103.53], # Input image normalized channel mean in RGB order + std=[58.395, 57.12, 57.375], # Input image normalized channel std in RGB order + to_rgb=True, # Whether to flip the channel from BGR to RGB or RGB to BGR +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), # read image + dict(type='RandomResizedCrop', scale=224), # Random scaling and cropping + dict(type='RandomFlip', prob=0.5, direction='horizontal'), # random horizontal flip + dict(type='PackInputs'), # prepare images and labels +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), # read image + dict(type='ResizeEdge', scale=256, edge='short'), # Scale the short side to 256 + dict(type='CenterCrop', crop_size=224), # center crop + dict(type='PackInputs'), # prepare images and labels +] + +# Construct training set dataloader +train_dataloader = dict( + batch_size=32, # batchsize per GPU + num_workers=5, # Number of workers to fetch data per GPU + dataset=dict( # training dataset + type=dataset_type, + data_root='data/imagenet', + ann_file='meta/train.txt', + data_prefix='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), # default sampler + persistent_workers=True, # Whether to keep the process, can shorten the preparation time of each epoch +) + +# Construct the validation set dataloader +val_dataloader = dict( + batch_size=32, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + ann_file='meta/val.txt', + data_prefix='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, +) +# The settings of the evaluation metrics for validation. We use the top1 and top5 accuracy here. +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +test_dataloader = val_dataloader # The settings of the dataloader for the test dataset, which is the same as val_dataloader +test_evaluator = val_evaluator # The settings of the evaluation metrics for test, which is the same as val_evaluator +``` + +```{note} +The data preprocessor can be defined either in the subfield of `model`, or a using the `data_preprocessor` definition here, if both of them exist, use the `model.data_preprocessor` configuration. +``` + +### Schedule settings + +This primitive config file mainly contains training strategy settings and the settings of training, val and +test loops: + +- `optim_wrapper`: The settings of the optimizer wrapper. We use the optimizer wrapper to customize the + optimization process. + - `optimizer`: Supports all `pytorch` optimizers, refers to the relevant {external+mmengine:doc}`MMEngine documentation `. + - `paramwise_cfg`: To set different optimization arguments according to the parameters' type or name, refer to the relevant [learning policy documentation](../advanced_guides/schedule.md). + - `accumulative_counts`: Optimize parameters after several backward steps instead of one backward step. You + can use it to simulate large batch size by small batch size. +- `param_scheduler`: Optimizer parameters policy. You can use it to specify learning rate and momentum curves during training. See the {external+mmengine:doc}`documentation ` in MMEngine for more details. +- `train_cfg | val_cfg | test_cfg`: The settings of the training, validation and test loops, refer to the relevant {external+mmengine:doc}`MMEngine documentation `. + +Following is the schedule primitive config of the ResNet50 config in [`configs/_base_/datasets/imagenet_bs32.py`](https://github.com/open-mmlab/mmpretrain/blob/main/configs/_base_/datasets/imagenet_bs32.py): + +```python +optim_wrapper = dict( + # Use SGD optimizer to optimize parameters. + optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)) + +# The tuning strategy of the learning rate. +# The 'MultiStepLR' means to use multiple steps policy to schedule the learning rate (LR). +param_scheduler = dict( + type='MultiStepLR', by_epoch=True, milestones=[30, 60, 90], gamma=0.1) + +# Training configuration, iterate 100 epochs, and perform validation after every training epoch. +# 'by_epoch=True' means to use `EpochBaseTrainLoop`, 'by_epoch=False' means to use IterBaseTrainLoop. +train_cfg = dict(by_epoch=True, max_epochs=100, val_interval=1) +# Use the default val loop settings. +val_cfg = dict() +# Use the default test loop settings. +test_cfg = dict() + +# This schedule is for the total batch size 256. +# If you use a different total batch size, like 512 and enable auto learning rate scaling. +# We will scale up the learning rate to 2 times. +auto_scale_lr = dict(base_batch_size=256) +``` + +### Runtime settings + +This part mainly includes saving the checkpoint strategy, log configuration, training parameters, breakpoint weight path, working directory, etc. + +Here is the runtime primitive config file ['configs/_base_/default_runtime.py'](https://github.com/open-mmlab/mmpretrain/blob/main/configs/_base_/default_runtime.py) file used by almost all configs: + +```python +# defaults to use registries in mmpretrain +default_scope = 'mmpretrain' + +# configure default hooks +default_hooks = dict( + # record the time of every iteration. + timer=dict(type='IterTimerHook'), + + # print log every 100 iterations. + logger=dict(type='LoggerHook', interval=100), + + # enable the parameter scheduler. + param_scheduler=dict(type='ParamSchedulerHook'), + + # save checkpoint per epoch. + checkpoint=dict(type='CheckpointHook', interval=1), + + # set sampler seed in a distributed environment. + sampler_seed=dict(type='DistSamplerSeedHook'), + + # validation results visualization, set True to enable it. + visualization=dict(type='VisualizationHook', enable=False), +) + +# configure environment +env_cfg = dict( + # whether to enable cudnn benchmark + cudnn_benchmark=False, + + # set multi-process parameters + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + + # set distributed parameters + dist_cfg=dict(backend='nccl'), +) + +# set visualizer +vis_backends = [dict(type='LocalVisBackend')] # use local HDD backend +visualizer = dict( + type='UniversalVisualizer', vis_backends=vis_backends, name='visualizer') + +# set log level +log_level = 'INFO' + +# load from which checkpoint +load_from = None + +# whether to resume training from the loaded checkpoint +resume = False +``` + +## Inherit and Modify Config File + +For easy understanding, we recommend contributors inherit from existing config files. But do not abuse the +inheritance. Usually, for all config files, we recommend the maximum inheritance level is 3. + +For example, if your config file is based on ResNet with some other modification, you can first inherit the +basic ResNet structure, dataset and other training settings by specifying `_base_ ='./resnet50_8xb32_in1k.py'` +(The path relative to your config file), and then modify the necessary parameters in the config file. A more +specific example, now we want to use almost all configs in `configs/resnet/resnet50_8xb32_in1k.py`, but using +`CutMix` train batch augment and changing the number of training epochs from 100 to 300, modify when to decay +the learning rate, and modify the dataset path, you can create a new config file +`configs/resnet/resnet50_8xb32-300e_in1k.py` with content as below: + +```python +# create this file under 'configs/resnet/' folder +_base_ = './resnet50_8xb32_in1k.py' + +# using CutMix batch augment +model = dict( + train_cfg=dict( + augments=dict(type='CutMix', alpha=1.0) + ) +) + +# trains more epochs +train_cfg = dict(max_epochs=300, val_interval=10) # Train for 300 epochs, evaluate every 10 epochs +param_scheduler = dict(step=[150, 200, 250]) # The learning rate adjustment has also changed + +# Use your own dataset directory +train_dataloader = dict( + dataset=dict(data_root='mydata/imagenet/train'), +) +val_dataloader = dict( + batch_size=64, # No back-propagation during validation, larger batch size can be used + dataset=dict(data_root='mydata/imagenet/val'), +) +test_dataloader = dict( + batch_size=64, # No back-propagation during test, larger batch size can be used + dataset=dict(data_root='mydata/imagenet/val'), +) +``` + +### Use intermediate variables in configs + +Some intermediate variables are used in the configuration file. The intermediate variables make the configuration file clearer and easier to modify. + +For example, `train_pipeline` / `test_pipeline` is the intermediate variable of the data pipeline. We first need to define `train_pipeline` / `test_pipeline`, and then pass them to `train_dataloader` / `test_dataloader`. If you want to modify the size of the input image during training and testing, you need to modify the intermediate variables of `train_pipeline` / `test_pipeline`. + +```python +bgr_mean = [103.53, 116.28, 123.675] # mean in BGR order +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=224, backend='pillow', interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=6, + magnitude_std=0.5, + hparams=dict(pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='ResizeEdge', scale=236, edge='short', backend='pillow', interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=val_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=val_pipeline)) +``` + +### Ignore some fields in the base configs + +Sometimes, you need to set `_delete_=True` to ignore some domain content in the basic configuration file. You can refer to the {external+mmengine:doc}`documentation in MMEngine ` for more instructions. + +The following is an example. If you want to use cosine schedule in the above ResNet50 case, just using inheritance and directly modifying it will report `get unexpected keyword 'step'` error, because the `'step'` field of the basic config in `param_scheduler` domain information is reserved, and you need to add `_delete_ =True` to ignore the content of `param_scheduler` related fields in the basic configuration file: + +```python +_base_ = '../../configs/resnet/resnet50_8xb32_in1k.py' + +# the learning rate scheduler +param_scheduler = dict(type='CosineAnnealingLR', by_epoch=True, _delete_=True) +``` + +### Use some fields in the base configs + +Sometimes, you may refer to some fields in the `_base_` config, to avoid duplication of definitions. You can refer to {external+mmengine:doc}`MMEngine ` for some more instructions. + +The following is an example of using auto augment in the training data preprocessing pipeline, refer to [`configs/resnest/resnest50_32xb64_in1k.py`](https://github.com/open-mmlab/mmpretrain/blob/main/configs/resnest/resnest50_32xb64_in1k.py). When defining `train_pipeline`, just add the definition file name of auto augment to `_base_`, and then use `_base_.auto_increasing_policies` to reference the variables in the primitive config: + +```python +_base_ = [ + '../_base_/models/resnest50.py', '../_base_/datasets/imagenet_bs64.py', + '../_base_/default_runtime.py', './_randaug_policies.py', +] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandAugment', + policies=_base_.policies, # This uses the `policies` parameter in the primitive config. + num_policies=2, + magnitude_level=12), + dict(type='EfficientNetRandomCrop', scale=224, backend='pillow'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4), + dict( + type='Lighting', + eigval=EIGVAL, + eigvec=EIGVEC, + alphastd=0.1, + to_rgb=False), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +``` + +## Modify config in command + +When you use the script "tools/train.py" or "tools/test.py" to submit tasks or use some other tools, they can directly modify the content of the configuration file used by specifying the `--cfg-options` argument. + +- Update config keys of dict chains. + + The config options can be specified following the order of the dict keys in the original config. + For example, `--cfg-options model.backbone.norm_eval=False` changes the all BN modules in model backbones to `train` mode. + +- Update keys inside a list of configs. + + Some config dicts are composed as a list in your config. For example, the training pipeline `data.train.pipeline` is normally a list + e.g. `[dict(type='LoadImageFromFile'), dict(type='TopDownRandomFlip', flip_prob=0.5), ...]`. If you want to change `'flip_prob=0.5'` to `'flip_prob=0.0'` in the pipeline, + you may specify `--cfg-options data.train.pipeline.1.flip_prob=0.0`. + +- Update values of list/tuples. + + If the value to be updated is a list or a tuple. For example, the config file normally sets `val_evaluator = dict(type='Accuracy', topk=(1, 5))`. If you want to change the field `topk`, you may specify `--cfg-options val_evaluator.topk="(1,3)"`. Note that the quotation mark " is necessary to support list/tuple data types and that **NO** white space is allowed inside the quotation marks in the specified value. diff --git a/docs/en/user_guides/dataset_prepare.md b/docs/en/user_guides/dataset_prepare.md new file mode 100644 index 0000000..17ec229 --- /dev/null +++ b/docs/en/user_guides/dataset_prepare.md @@ -0,0 +1,364 @@ +# Prepare Dataset + +## CustomDataset + +[`CustomDataset`](mmpretrain.datasets.CustomDataset) is a general dataset class for you to use your own datasets. To use `CustomDataset`, you need to organize your dataset files according to the following two formats: + +### Subfolder Format + +In this format, you only need to re-organize your dataset folder and place all samples in one folder without +creating any annotation files. + +For supervised tasks (with `with_label=True`), we use the name of sub-folders as the categories names, as +shown in the below example, `class_x` and `class_y` will be recognized as the categories names. + +```text +data_prefix/ +├── class_x +│ ├── xxx.png +│ ├── xxy.png +│ └── ... +│ └── xxz.png +└── class_y + ├── 123.png + ├── nsdf3.png + ├── ... + └── asd932_.png +``` + +For unsupervised tasks (with `with_label=False`), we directly load all sample files under the specified folder: + +```text +data_prefix/ +├── folder_1 +│ ├── xxx.png +│ ├── xxy.png +│ └── ... +├── 123.png +├── nsdf3.png +└── ... +``` + +Assume you want to use it as the training dataset, and the below is the configurations in your config file. + +```python +train_dataloader = dict( + ... + # Training dataset configurations + dataset=dict( + type='CustomDataset', + data_prefix='path/to/data_prefix', + with_label=True, # or False for unsupervised tasks + pipeline=... + ) +) +``` + +```{note} +If you want to use this format, do not specify `ann_file`, or specify `ann_file=''`. + +And please note that the subfolder format requires a folder scanning which may cause a slower initialization, +especially for large datasets or slow file IO. +``` + +### Text Annotation File Format + +In this format, we use a text annotation file to store image file paths and the corespondding category +indices. + +For supervised tasks (with `with_label=True`), the annotation file should include the file path and the +category index of one sample in one line and split them by a space, as below: + +All these file paths can be absolute paths, or paths relative to the `data_prefix`. + +```text +folder_1/xxx.png 0 +folder_1/xxy.png 1 +123.png 4 +nsdf3.png 3 +... +``` + +```{note} +The index numbers of categories start from 0. And the value of ground-truth labels should fall in range `[0, num_classes - 1]`. + +In addition, please use the `classes` field in the dataset settings to specify the name of every category. +``` + +For unsupervised tasks (with `with_label=False`), the annotation file only need to include the file path of +one sample in one line, as below: + +```text +folder_1/xxx.png +folder_1/xxy.png +123.png +nsdf3.png +... +``` + +Assume the entire dataset folder is as below: + +```text +data_root +├── meta +│   ├── test.txt # The annotation file for the test dataset +│   ├── train.txt # The annotation file for the training dataset +│   └── val.txt # The annotation file for the validation dataset. +├── train +│   ├── 123.png +│   ├── folder_1 +│   │   ├── xxx.png +│   │   └── xxy.png +│   └── nsdf3.png +├── test +└── val +``` + +Here is an example dataset settings in config files: + +```python +# Training dataloader configurations +train_dataloader = dict( + dataset=dict( + type='CustomDataset', + data_root='path/to/data_root', # The common prefix of both `ann_flie` and `data_prefix`. + ann_file='meta/train.txt', # The path of annotation file relative to the data_root. + data_prefix='train', # The prefix of file paths in the `ann_file`, relative to the data_root. + with_label=True, # or False for unsupervised tasks + classes=['A', 'B', 'C', 'D', ...], # The name of every category. + pipeline=..., # The transformations to process the dataset samples. + ) + ... +) +``` + +```{note} +For a complete example about how to use the `CustomDataset`, please see [How to Pretrain with Custom Dataset](../notes/pretrain_custom_dataset.md) +``` + +## ImageNet + +ImageNet has multiple versions, but the most commonly used one is [ILSVRC 2012](http://www.image-net.org/challenges/LSVRC/2012/). It can be accessed with the following steps. + +`````{tabs} + +````{group-tab} Download by MIM + +MIM supports downloading from [OpenXlab](https://openxlab.org.cn/datasets) and preprocessing ImageNet dataset with one command line. + +_You need to register an account at [OpenXlab official website](https://openxlab.org.cn/datasets) and login by CLI._ + +```Bash +# install OpenXlab CLI tools +pip install -U openxlab +# log in OpenXLab +openxlab login +# download and preprocess by MIM, better to execute in $MMPreTrain directory. +mim download mmpretrain --dataset imagenet1k +``` + +```` + +````{group-tab} Download form Official Source + +1. Register an account and login to the [download page](http://www.image-net.org/download-images). +2. Find download links for ILSVRC2012 and download the following two files + - ILSVRC2012_img_train.tar (~138GB) + - ILSVRC2012_img_val.tar (~6.3GB) +3. Untar the downloaded files + +```` + +````` + +### The Directory Structrue of the ImageNet dataset + +We support two ways of organizing the ImageNet dataset: Subfolder Format and Text Annotation File Format. + +#### Subfolder Format + +We have provided a sample, which you can download and extract from this [link](https://download.openmmlab.com/mmpretrain/datasets/imagenet_1k.zip). The directory structure of the dataset should be as below: + +```text +data/imagenet/ +├── train/ +│ ├── n01440764 +│ │ ├── n01440764_10026.JPEG +│ │ ├── n01440764_10027.JPEG +│ │ ├── n01440764_10029.JPEG +│ │ ├── n01440764_10040.JPEG +│ │ ├── n01440764_10042.JPEG +│ │ ├── n01440764_10043.JPEG +│ │ └── n01440764_10048.JPEG +│ ├── ... +├── val/ +│ ├── n01440764 +│ │ ├── ILSVRC2012_val_00000293.JPEG +│ │ ├── ILSVRC2012_val_00002138.JPEG +│ │ ├── ILSVRC2012_val_00003014.JPEG +│ │ └── ... +│ ├── ... +``` + +#### Text Annotation File Format + +You can download and untar the meta data from this [link](https://download.openmmlab.com/mmclassification/datasets/imagenet/meta/caffe_ilsvrc12.tar.gz). And re-organize the dataset as below: + +```text +data/imagenet/ +├── meta/ +│ ├── train.txt +│ ├── test.txt +│ └── val.txt +├── train/ +│ ├── n01440764 +│ │ ├── n01440764_10026.JPEG +│ │ ├── n01440764_10027.JPEG +│ │ ├── n01440764_10029.JPEG +│ │ ├── n01440764_10040.JPEG +│ │ ├── n01440764_10042.JPEG +│ │ ├── n01440764_10043.JPEG +│ │ └── n01440764_10048.JPEG +│ ├── ... +├── val/ +│ ├── ILSVRC2012_val_00000001.JPEG +│ ├── ILSVRC2012_val_00000002.JPEG +│ ├── ILSVRC2012_val_00000003.JPEG +│ ├── ILSVRC2012_val_00000004.JPEG +│ ├── ... +``` + +### Configuration + +Once your dataset is organized in the way described above, you can use the [`ImageNet`](mmpretrain.datasets.ImageNet) dataset with the below configurations: + +```python +train_dataloader = dict( + ... + # Training dataset configurations + dataset=dict( + type='ImageNet', + data_root='data/imagenet', + split='train', + pipeline=..., + ) +) + +val_dataloader = dict( + ... + # Validation dataset configurations + dataset=dict( + type='ImageNet', + data_root='data/imagenet', + split='val', + pipeline=..., + ) +) + +test_dataloader = val_dataloader +``` + +## Supported Image Classification Datasets + +| Datasets | split | HomePage | +| ---------------------------------------------------------------------------------- | :---------------------------------- | ----------------------------------------------------------------------------------- | +| [`Calthch101`](mmpretrain.datasets.Caltech101)(data_root[, split, pipeline, ...]) | ["train", "test"] | [Caltech 101](https://data.caltech.edu/records/mzrjq-6wc02) Dataset. | +| [`CIFAR10`](mmpretrain.datasets.CIFAR10)(data_root[, split, pipeline, ...]) | ["train", "test"] | [CIFAR10](https://www.cs.toronto.edu/~kriz/cifar.html) Dataset. | +| [`CIFAR100`](mmpretrain.datasets.CIFAR100)(data_root[, split, pipeline, ...]) | ["train", "test"] | [CIFAR100](https://www.cs.toronto.edu/~kriz/cifar.html) Dataset. | +| [`CUB`](mmpretrain.datasets.CUB)(data_root[, split, pipeline, ...]) | ["train", "test"] | [CUB-200-2011](http://www.vision.caltech.edu/datasets/cub_200_2011/) Dataset. | +| [`DTD`](mmpretrain.datasets.DTD)(data_root[, split, pipeline, ...]) | ["train", "val", "tranval", "test"] | [Describable Texture Dataset (DTD)](https://www.robots.ox.ac.uk/~vgg/data/dtd/) Dataset. | +| [`FashionMNIST`](mmpretrain.datasets.FashionMNIST) (data_root[, split, pipeline, ...]) | ["train", "test"] | [Fashion-MNIST](https://github.com/zalandoresearch/fashion-mnist) Dataset. | +| [`FGVCAircraft`](mmpretrain.datasets.FGVCAircraft)(data_root[, split, pipeline, ...]) | ["train", "val", "tranval", "test"] | [FGVC Aircraft](https://www.robots.ox.ac.uk/~vgg/data/fgvc-aircraft/) Dataset. | +| [`Flowers102`](mmpretrain.datasets.Flowers102)(data_root[, split, pipeline, ...]) | ["train", "val", "tranval", "test"] | [Oxford 102 Flower](https://www.robots.ox.ac.uk/~vgg/data/flowers/102/) Dataset. | +| [`Food101`](mmpretrain.datasets.Food101)(data_root[, split, pipeline, ...]) | ["train", "test"] | [Food101](https://data.vision.ee.ethz.ch/cvl/datasets_extra/food-101/) Dataset. | +| [`MNIST`](mmpretrain.datasets.MNIST) (data_root[, split, pipeline, ...]) | ["train", "test"] | [MNIST](http://yann.lecun.com/exdb/mnist/) Dataset. | +| [`OxfordIIITPet`](mmpretrain.datasets.OxfordIIITPet)(data_root[, split, pipeline, ...]) | ["tranval", test"] | [Oxford-IIIT Pets](https://www.robots.ox.ac.uk/~vgg/data/pets/) Dataset. | +| [`Places205`](mmpretrain.datasets.Places205)(data_root[, pipeline, ...]) | - | [Places205](http://places.csail.mit.edu/downloadData.html) Dataset. | +| [`StanfordCars`](mmpretrain.datasets.StanfordCars)(data_root[, split, pipeline, ...]) | ["train", "test"] | [Stanford Cars](https://ai.stanford.edu/~jkrause/cars/car_dataset.html) Dataset. | +| [`SUN397`](mmpretrain.datasets.SUN397)(data_root[, split, pipeline, ...]) | ["train", "test"] | [SUN397](https://vision.princeton.edu/projects/2010/SUN/) Dataset. | +| [`VOC`](mmpretrain.datasets.VOC)(data_root[, image_set_path, pipeline, ...]) | ["train", "val", "tranval", "test"] | [Pascal VOC](http://host.robots.ox.ac.uk/pascal/VOC/) Dataset. | + +Some dataset homepage links may be unavailable, and you can download datasets through [OpenXLab](https://openxlab.org.cn/datasets), such as [Stanford Cars](https://openxlab.org.cn/datasets/OpenDataLab/Stanford_Cars). + +## Supported Multi-modality Datasets + +| Datasets | split | HomePage | +| --------------------------------------------------------------------------------------------- | :----------------------- | ----------------------------------------------------------------------------------- | +| [`RefCOCO`](mmpretrain.datasets.RefCOCO)(data_root, ann_file, data_prefix, split_file[, split, ...]) | ["train", "val", "test"] | [RefCOCO](https://bvisionweb1.cs.unc.edu/licheng/referit/data/refcoco.zip) Dataset. | + +Some dataset homepage links may be unavailable, and you can download datasets through [OpenDataLab](https://opendatalab.com/), such as [RefCOCO](https://opendatalab.com/RefCOCO/download). + +## OpenMMLab 2.0 Standard Dataset + +In order to facilitate the training of multi-task algorithm models, we unify the dataset interfaces of different tasks. OpenMMLab has formulated the **OpenMMLab 2.0 Dataset Format Specification**. When starting a trainning task, the users can choose to convert their dataset annotation into the specified format, and use the algorithm library of OpenMMLab to perform algorithm training and testing based on the data annotation file. + +The OpenMMLab 2.0 Dataset Format Specification stipulates that the annotation file must be in `json` or `yaml`, `yml`, `pickle` or `pkl` format; the dictionary stored in the annotation file must contain `metainfo` and `data_list` fields, The value of `metainfo` is a dictionary, which contains the meta information of the dataset; and the value of `data_list` is a list, each element in the list is a dictionary, the dictionary defines a raw data, each raw data contains a or several training/testing samples. + +The following is an example of a JSON annotation file (in this example each raw data contains only one train/test sample): + +``` +{ + 'metainfo': + { + 'classes': ('cat', 'dog'), # the category index of 'cat' is 0 and 'dog' is 1. + ... + }, + 'data_list': + [ + { + 'img_path': "xxx/xxx_0.jpg", + 'gt_label': 0, + ... + }, + { + 'img_path': "xxx/xxx_1.jpg", + 'gt_label': 1, + ... + }, + ... + ] +} +``` + +Assume you want to use the training dataset and the dataset is stored as the below structure: + +```text +data +├── annotations +│ ├── train.json +├── train +│ ├── xxx/xxx_0.jpg +│ ├── xxx/xxx_1.jpg +│ ├── ... +``` + +Build from the following dictionaries: + +```python +train_dataloader = dict( + ... + dataset=dict( + type='BaseDataset', + data_root='data', + ann_file='annotations/train.json', + data_prefix='train/', + pipeline=..., + ) +) +``` + +## Other Datasets + +To find more datasets supported by MMPretrain, and get more configurations of the above datasets, please see the [dataset documentation](mmpretrain.datasets). + +To implement your own dataset class for some special formats, please see the [Adding New Dataset](../advanced_guides/datasets.md). + +## Dataset Wrappers + +The following datawrappers are supported in MMEngine, you can refer to {external+mmengine:doc}`MMEngine tutorial ` to learn how to use it. + +- {external:py:class}`~mmengine.dataset.ConcatDataset` +- {external:py:class}`~mmengine.dataset.RepeatDataset` +- {external:py:class}`~mmengine.dataset.ClassBalancedDataset` + +The MMPretrain also support [KFoldDataset](mmpretrain.datasets.KFoldDataset), please use it with `tools/kfold-cross-valid.py`. diff --git a/docs/en/user_guides/downstream.md b/docs/en/user_guides/downstream.md new file mode 100644 index 0000000..9abb077 --- /dev/null +++ b/docs/en/user_guides/downstream.md @@ -0,0 +1,128 @@ +# Downstream tasks + +## Detection + +For detection tasks, please use MMDetection. First, make sure you have installed [MIM](https://github.com/open-mmlab/mim), which is also a project of OpenMMLab. + +```shell +pip install openmim +mim install 'mmdet>=3.0.0rc0' +``` + +Besides, please refer to MMDet for [installation](https://mmdetection.readthedocs.io/en/dev-3.x/get_started.html) and [data preparation](https://mmdetection.readthedocs.io/en/dev-3.x/user_guides/dataset_prepare.html) + +### Train + +After installation, you can run MMDetection with simple command. + +```shell +# distributed version +bash tools/benchmarks/mmdetection/mim_dist_train_c4.sh ${CONFIG} ${PRETRAIN} ${GPUS} +bash tools/benchmarks/mmdetection/mim_dist_train_fpn.sh ${CONFIG} ${PRETRAIN} ${GPUS} + +# slurm version +bash tools/benchmarks/mmdetection/mim_slurm_train_c4.sh ${PARTITION} ${CONFIG} ${PRETRAIN} +bash tools/benchmarks/mmdetection/mim_slurm_train_fpn.sh ${PARTITION} ${CONFIG} ${PRETRAIN} +``` + +- `${CONFIG}`: Use config file path in MMDetection directly. And for some algorithms, we also have some + modified config files which can be found in the `benchmarks` folder under the correspondding algorithm + folder. You can also writing your config file from scratch. +- `${PRETRAIN}`: the pre-trained model file. +- `${GPUS}`: The number of GPUs that you want to use to train. We adopt 8 GPUs for detection tasks by default. + +Example: + +```shell +bash ./tools/benchmarks/mmdetection/mim_dist_train_c4.sh \ + configs/byol/benchmarks/mask-rcnn_r50-c4_ms-1x_coco.py \ + https://download.openmmlab.com/mmselfsup/1.x/byol/byol_resnet50_16xb256-coslr-200e_in1k/byol_resnet50_16xb256-coslr-200e_in1k_20220825-de817331.pth 8 +``` + +### Test + +After training, you can also run the command below to test your model. + +```shell +# distributed version +bash tools/benchmarks/mmdetection/mim_dist_test.sh ${CONFIG} ${CHECKPOINT} ${GPUS} + +# slurm version +bash tools/benchmarks/mmdetection/mim_slurm_test.sh ${PARTITION} ${CONFIG} ${CHECKPOINT} +``` + +- `${CONFIG}`: Use config file name in MMDetection directly. And for some algorithms, we also have some + modified config files which can be found in the `benchmarks` folder under the correspondding algorithm + folder. You can also writing your config file from scratch. +- `${CHECKPOINT}`: The fine-tuned detection model that you want to test. +- `${GPUS}`: The number of GPUs that you want to use to test. We adopt 8 GPUs for detection tasks by default. + +Example: + +```shell +bash ./tools/benchmarks/mmdetection/mim_dist_test.sh \ +configs/byol/benchmarks/mask-rcnn_r50_fpn_ms-1x_coco.py \ +https://download.openmmlab.com/mmselfsup/1.x/byol/byol_resnet50_16xb256-coslr-200e_in1k/byol_resnet50_16xb256-coslr-200e_in1k_20220825-de817331.pth 8 +``` + +## Segmentation + +For semantic segmentation task, we use MMSegmentation. First, make sure you have installed [MIM](https://github.com/open-mmlab/mim), which is also a project of OpenMMLab. + +```shell +pip install openmim +mim install 'mmsegmentation>=1.0.0rc0' +``` + +Besides, please refer to MMSegmentation for [installation](https://mmsegmentation.readthedocs.io/en/dev-1.x/get_started.html) and [data preparation](https://mmsegmentation.readthedocs.io/en/dev-1.x/user_guides/2_dataset_prepare.html). + +### Train + +After installation, you can run MMSegmentation with simple command. + +```shell +# distributed version +bash tools/benchmarks/mmsegmentation/mim_dist_train.sh ${CONFIG} ${PRETRAIN} ${GPUS} + +# slurm version +bash tools/benchmarks/mmsegmentation/mim_slurm_train.sh ${PARTITION} ${CONFIG} ${PRETRAIN} +``` + +- `${CONFIG}`: Use config file path in MMSegmentation directly. And for some algorithms, we also have some + modified config files which can be found in the `benchmarks` folder under the correspondding algorithm + folder. You can also writing your config file from scratch. +- `${PRETRAIN}`: the pre-trained model file. +- `${GPUS}`: The number of GPUs that you want to use to train. We adopt 4 GPUs for segmentation tasks by default. + +Example: + +```shell +bash ./tools/benchmarks/mmsegmentation/mim_dist_train.sh \ +configs/benchmarks/mmsegmentation/voc12aug/fcn_r50-d8_4xb4-20k_voc12aug-512x512.py \ +https://download.openmmlab.com/mmselfsup/1.x/byol/byol_resnet50_16xb256-coslr-200e_in1k/byol_resnet50_16xb256-coslr-200e_in1k_20220825-de817331.pth 4 +``` + +### Test + +After training, you can also run the command below to test your model. + +```shell +# distributed version +bash tools/benchmarks/mmsegmentation/mim_dist_test.sh ${CONFIG} ${CHECKPOINT} ${GPUS} + +# slurm version +bash tools/benchmarks/mmsegmentation/mim_slurm_test.sh ${PARTITION} ${CONFIG} ${CHECKPOINT} +``` + +- `${CONFIG}`: Use config file name in MMSegmentation directly. And for some algorithms, we also have some + modified config files which can be found in the `benchmarks` folder under the correspondding algorithm + folder. You can also writing your config file from scratch. +- `${CHECKPOINT}`: The fine-tuned segmentation model that you want to test. +- `${GPUS}`: The number of GPUs that you want to use to test. We adopt 4 GPUs for segmentation tasks by default. + +Example: + +```shell +bash ./tools/benchmarks/mmsegmentation/mim_dist_test.sh fcn_r50-d8_4xb4-20k_voc12aug-512x512.py \ +https://download.openmmlab.com/mmselfsup/1.x/byol/byol_resnet50_16xb256-coslr-200e_in1k/byol_resnet50_16xb256-coslr-200e_in1k_20220825-de817331.pth 4 +``` diff --git a/docs/en/user_guides/inference.md b/docs/en/user_guides/inference.md new file mode 100644 index 0000000..8d6cbef --- /dev/null +++ b/docs/en/user_guides/inference.md @@ -0,0 +1,179 @@ +# Inference with existing models + +This tutorial will show how to use the following APIs: + +- [**`list_models`**](mmpretrain.apis.list_models): List available model names in MMPreTrain. +- [**`get_model`**](mmpretrain.apis.get_model): Get a model from model name or model config. +- [**`inference_model`**](mmpretrain.apis.inference_model): Inference a model with the correspondding + inferencer. It's a shortcut for a quick start, and for advanced usage, please use the below inferencer + directly. +- Inferencers: + 1. [**`ImageClassificationInferencer`**](mmpretrain.apis.ImageClassificationInferencer): + Perform image classification on the given image. + 2. [**`ImageRetrievalInferencer`**](mmpretrain.apis.ImageRetrievalInferencer): + Perform image-to-image retrieval from the given image on a given image set. + 3. [**`ImageCaptionInferencer`**](mmpretrain.apis.ImageCaptionInferencer): + Generate a caption on the given image. + 4. [**`VisualQuestionAnsweringInferencer`**](mmpretrain.apis.VisualQuestionAnsweringInferencer): + Answer a question according to the given image. + 5. [**`VisualGroundingInferencer`**](mmpretrain.apis.VisualGroundingInferencer): + Locate an object from the description on the given image. + 6. [**`TextToImageRetrievalInferencer`**](mmpretrain.apis.TextToImageRetrievalInferencer): + Perform text-to-image retrieval from the given description on a given image set. + 7. [**`ImageToTextRetrievalInferencer`**](mmpretrain.apis.ImageToTextRetrievalInferencer): + Perform image-to-text retrieval from the given image on a series of text. + 8. [**`NLVRInferencer`**](mmpretrain.apis.NLVRInferencer): + Perform Natural Language for Visual Reasoning on a given image-pair and text. + 9. [**`FeatureExtractor`**](mmpretrain.apis.FeatureExtractor): + Extract features from the image files by a vision backbone. + +## List available models + +list all the models in MMPreTrain. + +```python +>>> from mmpretrain import list_models +>>> list_models() +['barlowtwins_resnet50_8xb256-coslr-300e_in1k', + 'beit-base-p16_beit-in21k-pre_3rdparty_in1k', + ...] +``` + +`list_models` supports Unix filename pattern matching, you can use \*\* * \*\* to match any character. + +```python +>>> from mmpretrain import list_models +>>> list_models("*convnext-b*21k") +['convnext-base_3rdparty_in21k', + 'convnext-base_in21k-pre-3rdparty_in1k-384px', + 'convnext-base_in21k-pre_3rdparty_in1k'] +``` + +You can use the `list_models` method of inferencers to get the available models of the correspondding tasks. + +```python +>>> from mmpretrain import ImageCaptionInferencer +>>> ImageCaptionInferencer.list_models() +['blip-base_3rdparty_caption', + 'blip2-opt2.7b_3rdparty-zeroshot_caption', + 'flamingo_3rdparty-zeroshot_caption', + 'ofa-base_3rdparty-finetuned_caption'] +``` + +## Get a model + +you can use `get_model` get the model. + +```python +>>> from mmpretrain import get_model + +# Get model without loading pre-trained weight. +>>> model = get_model("convnext-base_in21k-pre_3rdparty_in1k") + +# Get model and load the default checkpoint. +>>> model = get_model("convnext-base_in21k-pre_3rdparty_in1k", pretrained=True) + +# Get model and load the specified checkpoint. +>>> model = get_model("convnext-base_in21k-pre_3rdparty_in1k", pretrained="your_local_checkpoint_path") + +# Get model with extra initialization arguments, for example, modify the num_classes in head. +>>> model = get_model("convnext-base_in21k-pre_3rdparty_in1k", head=dict(num_classes=10)) + +# Another example, remove the neck and head, and output from stage 1, 2, 3 in backbone +>>> model_headless = get_model("resnet18_8xb32_in1k", head=None, neck=None, backbone=dict(out_indices=(1, 2, 3))) +``` + +The obtained model is a usual PyTorch module. + +```python +>>> import torch +>>> from mmpretrain import get_model +>>> model = get_model('convnext-base_in21k-pre_3rdparty_in1k', pretrained=True) +>>> x = torch.rand((1, 3, 224, 224)) +>>> y = model(x) +>>> print(type(y), y.shape) + torch.Size([1, 1000]) +``` + +## Inference on given images + +Here is an example to inference an [image](https://github.com/open-mmlab/mmpretrain/raw/main/demo/demo.JPEG) by the ResNet-50 pre-trained classification model. + +```python +>>> from mmpretrain import inference_model +>>> image = 'https://github.com/open-mmlab/mmpretrain/raw/main/demo/demo.JPEG' +>>> # If you have no graphical interface, please set `show=False` +>>> result = inference_model('resnet50_8xb32_in1k', image, show=True) +>>> print(result['pred_class']) +sea snake +``` + +The `inference_model` API is only for demo and cannot keep the model instance or inference on multiple +samples. You can use the inferencers for multiple calling. + +```python +>>> from mmpretrain import ImageClassificationInferencer +>>> image = 'https://github.com/open-mmlab/mmpretrain/raw/main/demo/demo.JPEG' +>>> inferencer = ImageClassificationInferencer('resnet50_8xb32_in1k') +>>> # Note that the inferencer output is a list of result even if the input is a single sample. +>>> result = inferencer('https://github.com/open-mmlab/mmpretrain/raw/main/demo/demo.JPEG')[0] +>>> print(result['pred_class']) +sea snake +>>> +>>> # You can also use is for multiple images. +>>> image_list = ['demo/demo.JPEG', 'demo/bird.JPEG'] * 16 +>>> results = inferencer(image_list, batch_size=8) +>>> print(len(results)) +32 +>>> print(results[1]['pred_class']) +house finch, linnet, Carpodacus mexicanus +``` + +Usually, the result for every sample is a dictionary. For example, the image classification result is a dictionary containing `pred_label`, `pred_score`, `pred_scores` and `pred_class` as follows: + +```python +{ + "pred_label": 65, + "pred_score": 0.6649366617202759, + "pred_class":"sea snake", + "pred_scores": array([..., 0.6649366617202759, ...], dtype=float32) +} +``` + +You can configure the inferencer by arguments, for example, use your own config file and checkpoint to +inference images by CUDA. + +```python +>>> from mmpretrain import ImageClassificationInferencer +>>> image = 'https://github.com/open-mmlab/mmpretrain/raw/main/demo/demo.JPEG' +>>> config = 'configs/resnet/resnet50_8xb32_in1k.py' +>>> checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth' +>>> inferencer = ImageClassificationInferencer(model=config, pretrained=checkpoint, device='cuda') +>>> result = inferencer(image)[0] +>>> print(result['pred_class']) +sea snake +``` + +## Inference by a Gradio demo + +We also provide a gradio demo for all supported tasks and you can find it in [projects/gradio_demo/launch.py](https://github.com/open-mmlab/mmpretrain/blob/main/projects/gradio_demo/launch.py). + +Please install `gradio` by `pip install -U gradio` at first. + +Here is the interface preview: + + + +## Extract Features From Image + +Compared with `model.extract_feat`, `FeatureExtractor` is used to extract features from the image files directly, instead of a batch of tensors. +In a word, the input of `model.extract_feat` is `torch.Tensor`, the input of `FeatureExtractor` is images. + +```python +>>> from mmpretrain import FeatureExtractor, get_model +>>> model = get_model('resnet50_8xb32_in1k', backbone=dict(out_indices=(0, 1, 2, 3))) +>>> extractor = FeatureExtractor(model) +>>> features = extractor('https://github.com/open-mmlab/mmpretrain/raw/main/demo/demo.JPEG')[0] +>>> features[0].shape, features[1].shape, features[2].shape, features[3].shape +(torch.Size([256]), torch.Size([512]), torch.Size([1024]), torch.Size([2048])) +``` diff --git a/docs/en/user_guides/test.md b/docs/en/user_guides/test.md new file mode 100644 index 0000000..65ec073 --- /dev/null +++ b/docs/en/user_guides/test.md @@ -0,0 +1,123 @@ +# Test + +For image classification task and image retrieval task, you could test your model after training. + +## Test with your PC + +You can use `tools/test.py` to test a model on a single machine with a CPU and optionally a GPU. + +Here is the full usage of the script: + +```shell +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [ARGS] +``` + +````{note} +By default, MMPretrain prefers GPU to CPU. If you want to test a model on CPU, please empty `CUDA_VISIBLE_DEVICES` or set it to -1 to make GPU invisible to the program. + +```bash +CUDA_VISIBLE_DEVICES=-1 python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [ARGS] +``` +```` + +| ARGS | Description | +| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `CONFIG_FILE` | The path to the config file. | +| `CHECKPOINT_FILE` | The path to the checkpoint file (It can be a http link, and you can find checkpoints [here](https://mmpretrain.readthedocs.io/en/latest/modelzoo_statistics.html)). | +| `--work-dir WORK_DIR` | The directory to save the file containing evaluation metrics. | +| `--out OUT` | The path to save the file containing test results. | +| `--out-item OUT_ITEM` | To specify the content of the test results file, and it can be "pred" or "metrics". If "pred", save the outputs of the model for offline evaluation. If "metrics", save the evaluation metrics. Defaults to "pred". | +| `--cfg-options CFG_OPTIONS` | Override some settings in the used config, the key-value pair in xxx=yyy format will be merged into the config file. If the value to be overwritten is a list, it should be of the form of either `key="[a,b]"` or `key=a,b`. The argument also allows nested list/tuple values, e.g. `key="[(a,b),(c,d)]"`. Note that the quotation marks are necessary and that no white space is allowed. | +| `--show-dir SHOW_DIR` | The directory to save the result visualization images. | +| `--show` | Visualize the prediction result in a window. | +| `--interval INTERVAL` | The interval of samples to visualize. | +| `--wait-time WAIT_TIME` | The display time of every window (in seconds). Defaults to 1. | +| `--no-pin-memory` | Whether to disable the `pin_memory` option in dataloaders. | +| `--tta` | Whether to enable the Test-Time-Aug (TTA). If the config file has `tta_pipeline` and `tta_model` fields, use them to determine the TTA transforms and how to merge the TTA results. Otherwise, use flip TTA by averaging classification score. | +| `--launcher {none,pytorch,slurm,mpi}` | Options for job launcher. | + +## Test with multiple GPUs + +We provide a shell script to start a multi-GPUs task with `torch.distributed.launch`. + +```shell +bash ./tools/dist_test.sh ${CONFIG_FILE} ${CHECKPOINT_FILE} ${GPU_NUM} [PY_ARGS] +``` + +| ARGS | Description | +| ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `CONFIG_FILE` | The path to the config file. | +| `CHECKPOINT_FILE` | The path to the checkpoint file (It can be a http link, and you can find checkpoints [here](https://mmpretrain.readthedocs.io/en/latest/modelzoo_statistics.html)). | +| `GPU_NUM` | The number of GPUs to be used. | +| `[PY_ARGS]` | The other optional arguments of `tools/test.py`, see [here](#test-with-your-pc). | + +You can also specify extra arguments of the launcher by environment variables. For example, change the +communication port of the launcher to 29666 by the below command: + +```shell +PORT=29666 bash ./tools/dist_test.sh ${CONFIG_FILE} ${CHECKPOINT_FILE} ${GPU_NUM} [PY_ARGS] +``` + +If you want to startup multiple test jobs and use different GPUs, you can launch them by specifying +different port and visible devices. + +```shell +CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=29500 bash ./tools/dist_test.sh ${CONFIG_FILE1} ${CHECKPOINT_FILE} 4 [PY_ARGS] +CUDA_VISIBLE_DEVICES=4,5,6,7 PORT=29501 bash ./tools/dist_test.sh ${CONFIG_FILE2} ${CHECKPOINT_FILE} 4 [PY_ARGS] +``` + +## Test with multiple machines + +### Multiple machines in the same network + +If you launch a test job with multiple machines connected with ethernet, you can run the following commands: + +On the first machine: + +```shell +NNODES=2 NODE_RANK=0 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR bash tools/dist_test.sh $CONFIG $CHECKPOINT_FILE $GPUS +``` + +On the second machine: + +```shell +NNODES=2 NODE_RANK=1 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR bash tools/dist_test.sh $CONFIG $CHECKPOINT_FILE $GPUS +``` + +Comparing with multi-GPUs in a single machine, you need to specify some extra environment variables: + +| ENV_VARS | Description | +| ------------- | ---------------------------------------------------------------------------- | +| `NNODES` | The total number of machines. | +| `NODE_RANK` | The index of the local machine. | +| `PORT` | The communication port, it should be the same in all machines. | +| `MASTER_ADDR` | The IP address of the master machine, it should be the same in all machines. | + +Usually it is slow if you do not have high speed networking like InfiniBand. + +### Multiple machines managed with slurm + +If you run MMPretrain on a cluster managed with [slurm](https://slurm.schedmd.com/), you can use the script `tools/slurm_test.sh`. + +```shell +[ENV_VARS] ./tools/slurm_test.sh ${PARTITION} ${JOB_NAME} ${CONFIG_FILE} ${CHECKPOINT_FILE} [PY_ARGS] +``` + +Here are the arguments description of the script. + +| ARGS | Description | +| ----------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `PARTITION` | The partition to use in your cluster. | +| `JOB_NAME` | The name of your job, you can name it as you like. | +| `CONFIG_FILE` | The path to the config file. | +| `CHECKPOINT_FILE` | The path to the checkpoint file (It can be a http link, and you can find checkpoints [here](https://mmpretrain.readthedocs.io/en/latest/modelzoo_statistics.html)). | +| `[PY_ARGS]` | The other optional arguments of `tools/test.py`, see [here](#test-with-your-pc). | + +Here are the environment variables can be used to configure the slurm job. + +| ENV_VARS | Description | +| --------------- | ---------------------------------------------------------------------------------------------------------- | +| `GPUS` | The number of GPUs to be used. Defaults to 8. | +| `GPUS_PER_NODE` | The number of GPUs to be allocated per node. | +| `CPUS_PER_TASK` | The number of CPUs to be allocated per task (Usually one GPU corresponds to one task). Defaults to 5. | +| `SRUN_ARGS` | The other arguments of `srun`. Available options can be found [here](https://slurm.schedmd.com/srun.html). | diff --git a/docs/en/user_guides/train.md b/docs/en/user_guides/train.md new file mode 100644 index 0000000..9cc618b --- /dev/null +++ b/docs/en/user_guides/train.md @@ -0,0 +1,121 @@ +# Train + +In this tutorial, we will introduce how to use the scripts provided in MMPretrain to start a training task. If +you need, we also have some practice examples about [how to pretrain with custom dataset](../notes/pretrain_custom_dataset.md) +and [how to finetune with custom dataset](../notes/finetune_custom_dataset.md). + +## Train with your PC + +You can use `tools/train.py` to train a model on a single machine with a CPU and optionally a GPU. + +Here is the full usage of the script: + +```shell +python tools/train.py ${CONFIG_FILE} [ARGS] +``` + +````{note} +By default, MMPretrain prefers GPU to CPU. If you want to train a model on CPU, please empty `CUDA_VISIBLE_DEVICES` or set it to -1 to make GPU invisible to the program. + +```bash +CUDA_VISIBLE_DEVICES=-1 python tools/train.py ${CONFIG_FILE} [ARGS] +``` +```` + +| ARGS | Description | +| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `CONFIG_FILE` | The path to the config file. | +| `--work-dir WORK_DIR` | The target folder to save logs and checkpoints. Defaults to a folder with the same name of the config file under `./work_dirs`. | +| `--resume [RESUME]` | Resume training. If specify a path, resume from it, while if not specify, try to auto resume from the latest checkpoint. | +| `--amp` | Enable automatic-mixed-precision training. | +| `--no-validate` | **Not suggested**. Disable checkpoint evaluation during training. | +| `--auto-scale-lr` | Auto scale the learning rate according to the actual batch size and the original batch size. | +| `--no-pin-memory` | Whether to disable the `pin_memory` option in dataloaders. | +| `--no-persistent-workers` | Whether to disable the `persistent_workers` option in dataloaders. | +| `--cfg-options CFG_OPTIONS` | Override some settings in the used config, the key-value pair in xxx=yyy format will be merged into the config file. If the value to be overwritten is a list, it should be of the form of either `key="[a,b]"` or `key=a,b`. The argument also allows nested list/tuple values, e.g. `key="[(a,b),(c,d)]"`. Note that the quotation marks are necessary and that no white space is allowed. | +| `--launcher {none,pytorch,slurm,mpi}` | Options for job launcher. | + +## Train with multiple GPUs + +We provide a shell script to start a multi-GPUs task with `torch.distributed.launch`. + +```shell +bash ./tools/dist_train.sh ${CONFIG_FILE} ${GPU_NUM} [PY_ARGS] +``` + +| ARGS | Description | +| ------------- | ---------------------------------------------------------------------------------- | +| `CONFIG_FILE` | The path to the config file. | +| `GPU_NUM` | The number of GPUs to be used. | +| `[PY_ARGS]` | The other optional arguments of `tools/train.py`, see [here](#train-with-your-pc). | + +You can also specify extra arguments of the launcher by environment variables. For example, change the +communication port of the launcher to 29666 by the below command: + +```shell +PORT=29666 bash ./tools/dist_train.sh ${CONFIG_FILE} ${GPU_NUM} [PY_ARGS] +``` + +If you want to startup multiple training jobs and use different GPUs, you can launch them by specifying +different ports and visible devices. + +```shell +CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=29500 bash ./tools/dist_train.sh ${CONFIG_FILE1} 4 [PY_ARGS] +CUDA_VISIBLE_DEVICES=4,5,6,7 PORT=29501 bash ./tools/dist_train.sh ${CONFIG_FILE2} 4 [PY_ARGS] +``` + +## Train with multiple machines + +### Multiple machines in the same network + +If you launch a training job with multiple machines connected with ethernet, you can run the following commands: + +On the first machine: + +```shell +NNODES=2 NODE_RANK=0 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR bash tools/dist_train.sh $CONFIG $GPUS +``` + +On the second machine: + +```shell +NNODES=2 NODE_RANK=1 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR bash tools/dist_train.sh $CONFIG $GPUS +``` + +Comparing with multi-GPUs in a single machine, you need to specify some extra environment variables: + +| ENV_VARS | Description | +| ------------- | ---------------------------------------------------------------------------- | +| `NNODES` | The total number of machines. | +| `NODE_RANK` | The index of the local machine. | +| `PORT` | The communication port, it should be the same in all machines. | +| `MASTER_ADDR` | The IP address of the master machine, it should be the same in all machines. | + +Usually it is slow if you do not have high speed networking like InfiniBand. + +### Multiple machines managed with slurm + +If you run MMPretrain on a cluster managed with [slurm](https://slurm.schedmd.com/), you can use the script `tools/slurm_train.sh`. + +```shell +[ENV_VARS] ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} ${CONFIG_FILE} ${WORK_DIR} [PY_ARGS] +``` + +Here are the arguments description of the script. + +| ARGS | Description | +| ------------- | ---------------------------------------------------------------------------------- | +| `PARTITION` | The partition to use in your cluster. | +| `JOB_NAME` | The name of your job, you can name it as you like. | +| `CONFIG_FILE` | The path to the config file. | +| `WORK_DIR` | The target folder to save logs and checkpoints. | +| `[PY_ARGS]` | The other optional arguments of `tools/train.py`, see [here](#train-with-your-pc). | + +Here are the environment variables can be used to configure the slurm job. + +| ENV_VARS | Description | +| --------------- | ---------------------------------------------------------------------------------------------------------- | +| `GPUS` | The number of GPUs to be used. Defaults to 8. | +| `GPUS_PER_NODE` | The number of GPUs to be allocated per node.. | +| `CPUS_PER_TASK` | The number of CPUs to be allocated per task (Usually one GPU corresponds to one task). Defaults to 5. | +| `SRUN_ARGS` | The other arguments of `srun`. Available options can be found [here](https://slurm.schedmd.com/srun.html). | diff --git a/docs/zh_CN/Makefile b/docs/zh_CN/Makefile new file mode 100644 index 0000000..d4bb2cb --- /dev/null +++ b/docs/zh_CN/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/zh_CN/_static/css/readthedocs.css b/docs/zh_CN/_static/css/readthedocs.css new file mode 100644 index 0000000..39dc689 --- /dev/null +++ b/docs/zh_CN/_static/css/readthedocs.css @@ -0,0 +1,61 @@ +.header-logo { + background-image: url("../image/mmpt-logo.png"); + background-size: 183px 50px; + height: 50px; + width: 183px; +} + +@media screen and (min-width: 1100px) { + .header-logo { + top: -12px; + } +} + +pre { + white-space: pre; +} + +@media screen and (min-width: 2000px) { + .pytorch-content-left { + width: 1200px; + margin-left: 30px; + } + article.pytorch-article { + max-width: 1200px; + } + .pytorch-breadcrumbs-wrapper { + width: 1200px; + } + .pytorch-right-menu.scrolling-fixed { + position: fixed; + top: 45px; + left: 1580px; + } +} + +article.pytorch-article section code { + padding: .2em .4em; + background-color: #f3f4f7; + border-radius: 5px; +} + +/* Disable the change in tables */ +article.pytorch-article section table code { + padding: unset; + background-color: unset; + border-radius: unset; +} + +table.autosummary td { + width: 50% +} + +img.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +article.pytorch-article p.rubric { + font-weight: bold; +} diff --git a/docs/zh_CN/_static/image/confusion-matrix.png b/docs/zh_CN/_static/image/confusion-matrix.png new file mode 100644 index 0000000..7b0b377 --- /dev/null +++ b/docs/zh_CN/_static/image/confusion-matrix.png @@ -0,0 +1 @@ +../../../en/_static/image/confusion-matrix.png \ No newline at end of file diff --git a/docs/zh_CN/_static/image/mmpt-logo.png b/docs/zh_CN/_static/image/mmpt-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..f4e060716520ece5db7e85df3c3ad8fd9e0eda57 GIT binary patch literal 28982 zcmcG#1yq!6yFW^&G)M~!Ejh%{J#>c%3?f4#-61XALrEwhsep7UFob}Df;3V?w+zxH zXVmw7zi;og_kZv8Kj*L(%rnni_Z7b@@0(}h^mJ5-2sCA* z)L&^13p&&bzPp;K7aAHr>#tvQ|57<$G&F(?MiOozxhg{u7BMY05Jb1@ph5{DEt!0Y@)5ltmNit%Ph(d z;kKa$2-_6rr04O1W;vpm;B*cfJ;PVP_^@jTMxq7kw%>iudW$o$c z?(OL2%KVEX)XL4rTLysA^p6QH?*FiL_4*4Y6kr1WPhXS^u`i%iF;AUwHm6mZQjjFLswy^0b9|yLlS9 zxjFysWxc<#VOBB(F>^h1bhUBw^Wy#`>@Rv-Fx1;t1|TFTB*-Ty#wRFh1Qd}Jl8_V; zv8>lz*A7WHs2=fVv8v(^7g(W0~LH~i+&BoC#;D03j$1##hZq9C= z+HN)|J%j3p{~}p>R=fF%2s|yM;l37JE*9bAQZ?aC}b3P~3 zc|sjs{~g)iAOGU@ZyyVw_z3(;#6K(r{u`nHy8X8`3MibtP=WVfsU-WinRY@@Yg-|p z6`!z`tsS3;xCDp~DvAP5MAXj4%F0$yRKyDSN4&G+f2+uUB@rb6tBUN24MXwNo-M``E%Rxzlf9n(IbDY^>eUAG5kMd&L0t8Zg$>&P)}P0 zdsICAPl@@<3BP=Ra^`>3+Y9RZf7V)5R0t>v6c-W}Q&jmCM+%~%$|}NQKoD3^QBYJE zl~{im{wJ+LMnIsXh_Ixn@ZTx?f1&kXtgRiOuJ*R5UL^qdzqiBM#>Pq)z{YZ-@?fq z>WRuZZ&Z590PH;7T$rKm?#_ zR}$41{)bLsWo;uNE(8_jgW3TF`9Q)VLVV&vKoFmeAXLa&NJL!NTEzBOcK+h#_W!0+ z{8PdIZM6EkmZIjhUzh(3il{gLjGDHtsGRjg4U*>z?jdMsc53Qi1tb4&`!D^n&FuqD zL+y9&o;>+z+{F8_2U}HXph!G2l1}YHt%fZejU_TNIeEK`LW>I@VWdco!JSH~rg;B@ zHT*$VLg0Z<>B7q+*(IifoR_{k!#R^#o1JeTetVfSzVy2fQ_aknt1kcUg)$ z3mHj13tnRK>%BUNK3l?MYQ>fH7YIViO37Z!TL*3ZL>zWMUA9hv7uaRC|k2zmGt zS*N;u8naw1g>Ik6{cNm4@QC?U6>9q%4k#1Jz|dT*%-d2-oN zSYQRw_+z`XBE=EZ;gIkV3?|Hw-ca4mXvHEuF3R{$dG-E1czQSqx*KLb5ij;B+AX0u z#uTP5x&^^|>~lP%BZ3G{7?u=PT(19X0YMbGo-)H*%n(BN@W&^i0_d#p1zDEhVqNhr zym=B{+}kM0rs#<;1U{VSPf<<#`}fa zJxKT&dMHT{Nh_K&MhLlohpx)fCdK&ki{0QmwI;f4zjY;GFKnRDto=GPjd6e#g3&63 z@JbHdClcIA|B4$ffPR1*GQxC#_8sjF+`56zDyWD|pfy1_YXi8Q*_DaQOTLy7j#2;g@S7eAjK zV94S*AGO@_I=gIR6edok-bW#U(h5V8RS)egP6*DN^zl1*dRTYjlpM|;PUkAc$lUe* z^}XhhV?+2Y+Gdzj7$oem>RNT|e7yE{7luDj4?XL`Mx-Xv=r=m-h4I^d3t78isNq(=#OZn&~&@A2u?gJHU9HHT*p zKUVz4NJE~=kcL$q{AZ$N=?huAkEPe-fEv{`%-Cy8z#N%Ga<+2p z`%x8@3r*+gn*+JHdkaj48Kh~V-EiIy&xhrvSh_sCn zcH5Ut*Iy;SrjmHg0C21AQ%6rm zKeJLs-qP?mcM&bOC@jVS3kI^^0qaS1@5KzJu)M$9J4Y5kPn-L~KK#sTuBY+hUinVAVh#C7Hk7v74xjgZ2ofn|Vx@94ZPA(UD zCP~Ay^Z2f{vd1yq7x%^ad!CXW+ylc0`I;Ra`&FW;{>0~-Ey8&C{i?eg@9NsKbul(} zUOPVPRRGIALwbPkDVIcMZ!&LqFe^jMzQrty`~JrGS5#wuxxheh!z=4VQv}emc3$I$ zmjm>Ml~{C@a5VlxWpq16!IBS2UX{6Cd9y?X zb!VVqjQXl6C<}_%yb%WYL2dX|@)HLQzzhW?c2a-RhztGY7ZC#blWQ`BFG2=xeREVJ zvB(??k2>SHB#)||X3f{1?3EtQ=06WS3g^Hw$2WxZpmNi@#m2vDceCl>~Z@*$q zNMebH4O(>3ZqtG9+U5od+|3O8ljm68-D}?+Jg|ZCC1Wy`wck1Xa!nC5wz;h+%OzO+X%WxQ> zPyMV!qC_0*0NU3(s7+%Ur`EYSSPsjkZS|O6)B*ojO$L<#KJex^TJpP!*+Uq z1CCz-z;X(owle&#ao1p6aNrXoqa>QW4g`?P?-D|0{as}Z3$S}55+u>FUUtaDsgX{F z{Rrz?+&F)?7cLdVm6&(Qgkx#QNmggO!mdmFF*3pjphKb|BqrE<7+=t*Eq$yW zlLXCkmZ3@=vA<9)@oveV#zHqif1JwN#|;lkJbkR29JS>yuh$d0&#qi;v({RzAexJ&q zTQ42CG*g}@Zk}&fL3HSi>21?}B)0SAjgKtL4I?5Y%c>gOkGQv(7^Kq$FIlK~jXEL6AY2Q32mZY&Q`aF1*G+#=dXc5uzZW(SN9wew^ zBm21mI_z5j=R=!K9K5#qYS^_a^So{A+PdyDn13qUQgiNVAY8R}`;_@bPuc42N6zB6 zx`K}hgRst3Ev1^(w{gQC!$H6fLjQNVTA}EzZE?qc6$KGP@nohCxs7%u>#BUZPGGQcSWOPU@<^DvHmr|ja)F+M0WrDN8-xbg?+bP z-)1@PHLsZP4(_R^q~|!u0aVp(Ds;nG#0zBi*r(GLRSj79GIwNvJU+Z{NU7=F_ z7X;`{9-10EK{ax$F|~BiVBEGB=UQkN&KJR1@_nCp870uJ&br~2y&hceSYls*ZKzfH z1?zFFvpB|XhWRhsNatSvR6!Wdct4$4%EaBn`Ep#_@d!>GOnxaGdyNA)kY6JPzlq&b zUIPFibxB$U=(0)s6%U`9{BEG=pRrE7zh5+@>))D!ho7XLP$V|wc>4-YF z|GatAUu@2KG3*OT0eIeneFzb|wi(`4s0tbz3ZJXfR!FeQlDM?sqwB^7`NQ@y-V)pAZw$vz{aw(&-* zmE-4OZrJMXR2|pM2Zt8j=8ct{-NRu|>Bly*&fcNPM>A?G0^vb-U`yQu1JB8M-_g@| z57V*2fv9Ot&YJ$YMatj^izhvRgr)uttl;d-PrmE&&kQ6&h-yM!S*cj1JqQ67&cnVLk{~F6i(=g=Qo5ui{ z2H2UFoCmQSh^d#N22ZQhIr#g#*7?!d%G~^4I%Ua5eP3YR!}&?Q=9_vJHcwPfv-ZIR z6Z^N(8DVxRidXo(LWxyd%vojDs%9}TcMivhSnj03B3Nyb7RlO?9`9`6=p`5Ba-7&) zSxMbjOY(fzz1~43nl?OVc=meH*D>?A^xLJ$ELCUxUUQ`z~6fzmoV7H`lBE=gDl`x$O-rF0nZ_;)%z;dZn1aD_D_iDIGv!havEt&| zZL`*8j$QhtbO`xVQXOW4{y{qFkE<8jirw{^N2q$fg!>Z@Rshcyve07ljqnC(N`F1} z8jui=t46Q3m0SMs<>au=n}q)Npv;B7kBR?zD$9$uXA+MPqd5ID*;l)oA6dlPv0RRX zK*4P_M;j$WyzMQ{GjGrBn#ri6T01^zUmkOs?M~u}>|(r_>*(Ui*fu~UX;#=Id`SMg z{Mx#|oyD{3T*Q}~IPXpoWyMl$2f11nM^1O?r{B4o6QMy>`g6HH8D_r+XZAvFwa@1j zq1Im_J@_!%aBQH3yZSVhPMl@okw&lZ9T~VYOG+|cXNI`N0QHU})q~nSh+(+E6{rmb zAP=ol`5PQ9`6wN)KxOSD)_KeOi3VwusLO3j*Ys8^6Sgm!z3|H=YioCpK61Smoj9-m znuBxs!5LI_X3>KWmKSyuN(95QqsjU!bWVK@=q42kax?6t&EhO16O+2%Ja9i6ujV!M zOH#8J+z0^^hga2kYJJ`u3%EOYMHWB-5VIb}NZe$Dv|yDla{}`7Z@pqbTQu%+?vfNM zIYZrB>=h@M?thO-KUoM<{9#nFyaXwMD*W`wI^QuG70NkNhpRxPEd2Run*Grs+M6mS z-6FJ7fO+evi|YFrpp@VSTL18e?8=)aH|#~U_q5CQzFnm-Y@Nt5$qnFAqHKmIhyJ|b0NOCbD_x%8>yb!QV=DUb)ivo68$p1 z?vK2W2CCe;EE{C0`c-c?A;66Qe>>WI?nZ)7XIq_=V6{W~SG;5=f*bmQ@gbLF*WX%0 zgT9myQvIIYfa&;K;(M?O}Yl*kjq&BRrDr$SzC(g9{Xzv!%j7Us`&$K2baxgtdK9NEW zqem)$jiFkvqufCW<9c_Y+C$E(@of`ws7X*%H_J%*L2+m>$EZ>kEoyaIyKyWI-pi74=?c)9HjdNg^ER zw;I6e->DY#O8-cCJ$P0RXl`6>{JX!}k)rcqreKCFkn4s}uSJ*T+0d>%dHjq|+4vgMB&8kARFCnZL=88r<-)x*BqdkK$$ZKxE)qc&y2HXmt}JMN?} zm@j6r-61{;kNiDW0^i{|pgRShOC$K;xMZ%xqei@d`hKPb)vZ9!DVPHIE!c+Hghn(_0pT?_yJJvUP(yaeI2Vb(Lx(X5SU7e8Dc5HB+LviQH?)NpzFW3E7sGZ}l zDvTup%zAcog%=u+Y`|0EE8!MT+S6p#{|+>t-fyKdL8pW~q*{&`*Wr*-WBvGkmYpM5 z7mZt6G4`W;&p>Qa&|S?NPT}YL(EKZNo101ZLxbEK=~$R*{ef@I5Wli0J8gINDosP) zw^mh`7i-qTHLO08aj)$Ve~G5bPZ;DRJG$Rzo|J9il*Gtlqy3{3@A^L2bDLF01n}?? zjHrzc_%LEk@c`iwIl&f}Hm0t{ivN1agGoc9S1tGs@TK(KbWPE-9~Z87uusvY7)B6{ ze()GF19d zTwQy41J!xP!`nWx78Oq!F-wpwkVs>lzrNVuC@DL-cV0`nitbi2iT=?$s4RMr4BX5r zlz;wWs((K}#_`R@k@lLQA@a|m5gm9@evLNvns_ih%wW*|)!1}^CDmXPbCl6n30%4Q zi6;LujK~Lg>v}(O2Hjq+E$qPbKcRW%#ij~P4yqDErjX8-P@x|I_yH@cu+Mo_x)Z7#?kfa8N%xJ6+wu-gw8(4}0LK5)?T<~_%^5lt0`-IzlP2ZC`{n1Bw zD>QOcOE2!9!A%@uMPb$VM;{aQ>{Lh?>+iX7K&@v7|?(JcP_*7zs)vFnm_;eXWs3V1HPOm=vdI zxv!ycf3i#Sz9GExaXy|Iu?fC;iYOp9dKEsC&0etmL?H!Z$>qgnB;#R`k|gWvF$FL- z>j3F&X2soc8Xlmi^q$jH{|obxXW`*xYOkaJ%ACfWqxWdpOoKy`fS9^nR7V$$r8FQL z3FNFRas4*W`De`wsnY)VYc)}wU)GFGliM9EG{Mj4EABL_FWE;qGjUr^%td|)C$y5 zwsfE&z!q$3=FApR`dv z+rB!OXXq4B#mS=?fo}J(RFyj>u#U-_zUe$^T8Mih=Ok+Kp20I}tz_}M=eR9`DNY%) z@K03lq`#FYA{*kWpO9d%L`TwqqYcyr14O09N?jN&b525^Th$YvV=-<6Sm6)DZ;H|k ziilKGd7scK3Z*|u(IHMCvz))ZrLzopRmyJxc5TNG_(lRU4`0#8^u>%pTV-ofbrG0z zXgNx}_ndTg)vsIAIYahw?PpI%JvSA)Tx<0za=kzM*p~=eooClr9~ZLm6g7Czsf2M+ z`y9RYU}W$o83FC;$M`V5*kM9ySCXylE=jBgs>AX3$RP%I@}n@E4?^Zm{h{(20g9ZTw2W|npOIQ*Tn&udkRH*RGl7cv0jhn#{NmNwsl-{X`DRhds# zYdl-1S#A?_gZke9lR?AoO zQic*(cTO^b2YTVA;CnsUy+veWhaU88=5627zViMYoxoTBQ-bE)650GP0At4WiP^iP z!gBhN14p^9z^`jDU`?^yeW1%og7Fqob%iyqT1+r6I(xqKz$Xqej>Q7_mCUo4guPm{ z)?7B9Ld)X9p)3~NE(u4m_m+97r=qY$4ueJBrnPi)tuRj*eq1W3i(^x8k(I_vC?hO6 z2|by;=quQryO#SfAg9coLG{-^968+P1E(8aKn^Y`HmBa5inlC^Zp;Th-$dPILEZTI zrS0Txfro2Hr6Av2>O+rN0uqcsaEL07dF{DsNwj-a3thW z668h_Spye9tE}@3lRyw4l;MxjmIkU9D>$7cDfAqD*Bx0Od=~qBmr;f8WkG6lW>YJWCs^Sd#!6{C1e$P0&)60rh>8)tmESnw^bZAy08Q>{+99{t;_S)=T zDfJlBl#sh0)E0lz+cM^XZY1X5S$ZTFOg%~Rr^8!99~I5g4?z>>%sr)8j5;$$U3(V0@j zIGXkq`^z2rAoASr=$vf=K}5T;-Wi1=@0o7ABN$-s!7mRG0oF7Dl&s}F^r=D10VAv& zwWwo6_m&LsTT??_ya|+x-B6Ba_rPqS6IF4qEcdA7bgz8pQR%m0+!X=25pg|PXq|k% zYFrQpdGzM-x^R~GUUClW^QY2}1jaH7l#aq{(0B}ZMX`eLTLP2v;oDmB&&EFYkrowR z_V<7GB6NSyexoLx#q3=7tr)7Uh<`Sn?D06z@m-e?rv!SY4hG$X?Gl~P=jc2w*-+Ck z?OqZ#j)D&WUeh%18ytT;aRxel=eVG;lAD}SDT_71`~b8OsgzBM=|Wf)x`QYPTW%sn zT{h{*=`$0lC1p5WA;vWqk_WyUo`Qah!*CM!sbIzUaRh{Ms>U};QNO$p-p-$uM;K(T zyX4JQ$Va9qxhtprPSzIu(#mOJ%MNT@HioGUVn}_!%mdh0ESCH%6*m?DzK6qq$vdDv zk;Kc=2blPh0aE9n(3V_TC?{{{w~e2Kf3rCprsK6p4EUN-y1msI>7z`C-({g4*10}x(t=={!p>*VAmRgK=T-Hs_E zmc_STI@j*%#X?TcZ)Br1-QIv$ZQK^8fKbeyPqOn4)GYO~-(JQw&Pjv~-ZXy~eYN_? zDsn~Xxn{eYzJcz-r*j(}D`U%D`E)AZ_;bef*GST44zY|83C4&ca->%nM}>W|_3%A% zV82P#5&nhA8#!UBS2fneNC=#G{!IdeZ|7l$B9Q0MyKp*j(Cb7*M+}2*cZK3E^v<~D z3uZeng(E1U!9zXPxb!DFm zlS!oHAl_ioMWb8NBVFcq%x>Bra@jFX+1DoT9*gKa#Gq4Cqj(ZVhhP83yGp0r9Ogmr zQv>{VyYU;_3oaZL4akn1AB&x;=c~o{uH)PIOez|yUtA0fa^F<3c#$`t(W5i&tp|Iz z6BK1-3|*!)jj{TH&-IloyF|b3%SqLtxb>Oy7Yg>ByXVHRvuK zI^)^%l%>fBGvEEx0NreK=-RRD@UB&3(cB`v;d=~2mW=i&AXG8Fc|NEN}1@b**cP$9Dc@oszO@!HX=v{au(MDGIXk~$pqmw z^&_*i)>|6Pb)g@__A|-nHYK0Xo`yBld9{4t>@-bN|F)Ks*fG^F9ltq=Dk1OLF7qU+ z$a?CHj2`>Qr(4M)ZRdB$Z+1}5!TyfYiDbv|g)5ebkc*akuf9V@+3IFx?V}#k(hO== zy9Y`wl&_RR6+Bh>B9F-dBqooMY~RYqW?dMUpi9Q(W}KyD383As4XAz2f3-ppcv6$WyXPtkG%qDA#l!DACu2kJ~T%9 zQzP-F@`WQpzIkmlzVp_WW*gOFIAie?86)?d^~{zU05Q`FINxDjv`frU8oL~+khIk9 zj#&;sRnx8ME=6GPvavLRB+#VycG>v*1ck%`o`cCw%Vgy|8nt{s8O+4A$T*^z95zY& z@);4HljJ^6T$Uuk^IXX&WbG0@(>Mc_qR6VjS<$O={kgocH45gGrUG-=lh_^KnohjhDo5gZ4TNqMOAcxr#XLnUQo{ikha5>N54H@?Zzy)%$WBMn^=UzP_auD+; zacwFoS18Ip^p|sgNIjH<)>TyQ)=sX6as3q%w@s6+sz7}8XqCk#zOZ7;6AXGL4g;%O z?Jna8)}zBJ&3r{hqqs_l8`F!v50nL3t3k2qGERfl)!BN~94gdqGFXbd!nN66V^&*# z1#nTOkyj?nY}7V<+^Bm@NVjOz@+`L{<douNWzaAzc5k6xzC`A;7Z?=XFNf; zZ=5JxV9QaD%T9d$6Bkm^d4q>rCHyu9(x$76Xk;toNan)tp;7UL`cL>KwUCt%kIgD# zJY;t6R+IRuEg5v@lJUfA)ollz4U$_$U%Gh1PZGwUJ1Z+2cr1{kZ1U^9TfhOuImNIecHdu*4u9jp@MLU zRMiSiMyyXvqUmFCf&Cxg9FgJYu~uu0fpxmRcoV}Sz?Zv>SUIeCXUrS!wPwg1=U9dnQ{!!Xm�Ei-is;{x!X zRxe^R&4x9ivF35&RRFm~oJFyh*j@GVWTLF5Qb#FTGnp{+ae@f^GgJiNK*SKd24i}~ zxU-bg*^FXTFZ|S3{{onbODaL7Q5d0avhXfg;kpxC_HsIcSDkO()hzc_?%4?qK&s;1 zKAR^C;M|;Dy<3wYX>8a$^-{x$=iUpJC{$eA<9A;W0Hdbo+%%O%(1QdPDbgx*2MGcYNA7rL*`15ZKk!VMx5f7yL% zQP>jB{WEn`f@wvi#boBgIa3FCJUR9!wd=Z=I}g1(52ieEOZ}aeAJX%m?A>{+1JlZ+ z(32?5TF$%*!Zt|Tm|zrNt+o{wx9jn2f}M#Ryf1?>0h;6>YIbJ-Jymgehc;USMH;`o zK8nP?D`{mlt>Qb-dsZOJytI{Nanu2od6KQLPANwr#rj1r$Hy3RlUX=&AXF$EF;1bH25AQVaFp0+RI zl!=()<&u0YzH)i_-D=_(pnWA##$A-7DxVHsaSwaHPqS(Bs^ZaZ;3lhif*|D=1=cet zKOm38S?1ZG7q5b|`gj3$0O^{Mr@@PejA-&7kppvNjvd43qjz7;+bBHILI+MWn?sq5 ztWIr%7y|moQ`d)9f~IznwVqm>hWj;#kJ9L11N-o}nE(sju9(m=R{<$|gTYNL_3n3> z`tzUm;pt<_4G^S%2hp#=TMJM~F&x2Vkb}vM7DGmiVNtcF&vRjh=B!|&;N31=@?FO! z|0L92P|r^!u^L-a1ij}T_`A}YvlW2WKO|OX($BS}x=bXw&wed(;vHsYDBpXP&&BLV z%6I4whgGBvV_lH6XQ@q2Q2*>WoP2)u_d(J^r+LBvY69LZ*t~MDw4eK_bGB@J zi`jA+GQV@=Uiqxyd02d;cVmmwqqKo+@>+VB67bQ{{_C|DC+_y9)K9mV>v3#cEhhv; zJn2cfUBLIwe8H1!9P6$TQw0D6I+~YJHVbDm#N)ey)vveXxGFs0yx3ku*#=e$r`}EI zn{^S#SMPF{`V+_N&HSVSI*{ z_mMcEPxEdrQk_;vkLtz(1aMrjf6|NZ$W<9xHT$T3^0HOEw7v|Xo|}E?NiR1wS=TyQ zu3F`V?s}5cI>`t>3p=~myZ|;zDr8+t9C{=C!b7o$SB>SOAPa?9B4+*_XwnhxwFzeZ z3XvyZP!$$=Rsi!uCA9@cGRIYTG*eq_rjPjZ_^^JT8C>O%@u?Jz7hpqbrJcaSU_PG<_5_v!w+vb=W%(!`!TL{bgS(3shWJ}D zkNCyr?G=ACz;9-Vf3hKiR$%M0<}r{Pp3u%NCZ~Y8%7{G%M0r@(iJR|1Up4;&97*lX z1c(nRc$VF+9tXV|-+%TY=oY%JGr!Y?FfOQ|22D26^i_8<&Jew)n3?%twt+5yeXp-N zguJEQ{SD*Bnh47W(M5SO*aFevDQV3yB5xaQP?VYQ*&~=DD|pBSll+KWT`tP^mG0hj za3|M=!xQ`xBcDwDk_6;(gQ;V6$h@egJ&PuE~m~ zAW_tlFb)>iB)nBjngAMBi(V@3o%d{ewYr*WaTPM3jR+@>W&1=f-iX~ekB<%G4*pi!D#I>i^OZ2MsmEs?jEYfVI%RK>ArhfiXy4Hy4CYC z%T`dvvCr?6*NJvurhAt>M7s@#-0*d5@l9LFAQ=(1>F^8Lcu4RK>U;_;_6`dM$Rn1S zs7u4|oZg#U9P_WggV*AEXR&KiWI80r?v-6caiT))!nVDFZ$#T@tQC8#3C<^N1*^l) z8|%CG4q&pUI?OE{K|)$CY@HGql3!W)cggM^J7y(Y1&1v+{$q@{z^ z-*5RPbs_R0ArB)Lb3HLEvy+40*wV;J{#|R)qS}wLB;UOuf46*G>9!%;+cz;@wdjxm zzZ=STk|St8A@mt#gF$s`M+IRJ{5!iZkrW~FV%i7oiA_~Lm5X)YzBXJ z;=)wPXbGL);8Q%Ne!VnU^TeAxMum~_w&5YG5NUvQGJ873)e&D;h-e4NJ2fql#(>9T zA|fYTM)x3oxeAvr%e)&zZd~8R?>Jtdp4p*Vhl1%KBFf7$z=>ZAKf!JMw^ zkVpB?GG~nTjHo8N?w$K3MOtj2`UazO`nZv!N>2In+xnu&%){I`tVV7+O4NfLH%O0E z8D{GupA@VGwNkvQfhUtxQpE-P7+dvsA!`tVrWUppm4(^m+u&5zk9mGQPh{~qN z=J*}{%YzSq`i=YWIKWflS`6SroebSZ(xDQ%!eLj8C!)ul_|7|?L`(bJ>o56ar`IL2 zS5+I|=B_;4l|(Q-D7mix^1S8Ag#Opaa@2v}39E2Qn8qE)9cEpMq8d!KN1BEhvh<{n znuIWS!+jddml^T!fN#-FRrJnOG}qiCYRShk*zi=HZ|oISU-~YpsLD9iBBj;U9Tl5l z>J&jZipLad4W=(Tl`CXpVA{-@R(_wlL4e68>V`;5!yVjkRq~)IOZu$uy~a+_FCdu3 z#ptfOe8q{`+RQ^_ADiNTl(QcRXNQ|{W?}y{Nr6ZfhCfa^(2ougaSE7~qOOne8mzIvIC z?n_e$W_#u5!!-|FF#}aW=Z4Db#PhFUTa;VsJWEM)qR-O7N1IVjw56E>O#n81!^t00mHh^<&nT? zYa2(;f=eT|U9PDcR$}Xr#$$g68vURjrU@C3F-GoKu04tTV0>g`=Hy2q=H^IYK+;Oz z;yJ?Rdce@kbN$45yqlt~J%ux@YY%YQCeiD#(qWR^?()j+MRj+UKrL>%w{{Ke+sLv9c7yU-Hv`k1)$4hdzqlElH2W(y$|!p05L z-68K}HO@K%Esky?sO}o)4+*Z!wA2G=s!u0{0p!@+0Gyo}b{h6k3;O`KdBu1!0Wup0 zk%)p^fLzy!Og<0mde?m+`TEjZmr=CpdgP=v6SZsXN`YKJ=0A0`X0D?4Y^|Bat(mwU zJg8O^z~s6v0VhCc|6I~;F$u3$H9CIUY2p*PH0Zw6jM;KgV%u2 zU5wDYtg5vUVSAH1+^`VF0GCWM*UaxAFL_;@42$i)XY^UB8+v)6GLR_w@L#L^l)nwAkmOEfrVzC z0S}&o9#HUU+Ej3x0K9tZASghpS@+e$AVz-zS6f2YbrgP~ebswNCL^6y@14OnZ+$B?_E+DK$8XBC<|hgJzT^FfA5_tD@{SeooXp# zE?od?q?=}xIe>TS@L$VNEnN${^iB9byxhUY-&zy*oFrH)+jX%=J|&jq5I!1v+lS(!yFX^NH~db&?7i#-Lf zEfj^Q;(RIVn%>8Ud`9gxa~G>kM$qLFAOUw>*KZAmNuEJ20L{i%lgGUt&URJ|=Z^EA zG(C6*C|+y|Sm#{@vJ^d(ME!Sdn;jUO1tnlr?rWv|=mAdlNp0hLWe235i4-IqLqZfy zRApSKjE-n$xE?U+cu{;c1$V|}o7vlFykhP3I4J~!HKZbso;@*MeIh2TRd5EDw{Fyc ze0+j#4Pve5)jtjf5Eblbp7QtX&h^(h*5O)}zo(|pe31f#2-)=a-;h2#i0L0rAUoI{;FziDM`omqni98EXp*oW;GeQ zCeATs?$L5aTqqIWN;BWyAwg%#8_z2b~`kg|ZRzWOGMc8m{Z9eG#vS!D#Z3JN~JMp~Oo9Fhs zs^a*oJJ)Y?hGd7STXKuVDnBd1$31vR>_k+*_kkK-jtw!y`##EZvma@(MQGfL$`~X0k@~+p?|->qB|+Z*7&@B@?~AN^YtZ zC%knt{3C2^tcBrN5?1s&Tu$PAV-X!$_`S8X@>vFZKf)V9+okeSLFPIx_NmgTi(;ryr~ z#KUJr*15w~;M|>L=u07G(7VpSr)&Il_Te$4#D1T=n(=e|>RY2`i0cCQY-Xkw7Vvnp zkqfTGgw1(ZA~h|nm!BLXuj)RmyoA;EgPwhuhGcFj;YoK#@+N0AQ@m@3=#&B0i20J0 zWm3Yn%8ME(%-^qDf^g@mo68SmXc*+*n!`i@d^2>pG20?kf-_s2i^$#e?iUiZL2*f* zDhErMG!5@rs3%oE$;34vor6ub%dVlP%a!m3cOH8VkGy+Rc`};PBu+)o?+Of5Cu;Qo zyE2a*rv@B^4IXa{9X)MNb}ju@j+^O*b}%`#5Z+>gbia0So&}3>4KrZSW$GlpZy|^^ z2e0X;TEB#SF_9<=fcwPqI$!NE*J49Emw-#O`kAD?$mu#2GWDeIqPmje2yAm_XK(7HU{S4+*!S97RyFZ*b+(bWFumMb&N?7j@A zs%A1n?Gt0~!*EK8Vl%AP;{bU+f~fCitVw&igK} z{YFx-#?Gsf(%$xG5A;u%fTDqpocn&^s4v()M)M^vb>eJn2VxUrs=UQ&z}6~i%9`?4 z8K7u$zmJ{-KMOBMHP}eVA%E{_%h~_a(^*Ef(Qa)Rf(B1;cc(bT-Jw|MON$iOB1MZ7 z30fQiv{2lk#ogVZxLa}82G@S+d(QcOWF>2om3ijbGka$CzVBS;EZ?2EvN0lCEWGscNPY{`Aw6cNQA@^?#mzI4K3#RloXjPhDMbfa|C?CPE&GLt zD*IMZg`!hRv8)Dvf^cZeLqMwy0CV41O!p#2^fiZEl__I(-S72{S(#R=XB0>d2<_T{ zvRUQ(NyNvlh7G6nqAIyxYpu}_Rh-LdA3e|}Vyu1%8y;&sNIGCms5ZsLy}?U3@LdGp zxL2*PuU{nIX6H`5A~6|qe)p+pMApQb=N1~TnYUp%9#EML3pJagFc+JA*9QPJcC?l>(53v+ADL+ZYZ2F>}=vrG(^KdjQ zDY_{wXcO8qz0=7PN6c!b(?k*^cBo8o!>FP@%qL~84_L&kUg>D!k=Zs-P^)?%QC3mB zUbvs(jup=o!H9~tQBniiK@Pim+V*zye3~Wqp9*I@EL!mTjU06sUEHTS*O7L^Mq_p> zZyTSOOP6)rUd+jEI$$uHxy21~L^tF!yB?@^x}SZ9+yVn8MR-R}bsu5j8u zpTrK#My!TviU#=b2-R-~b$Q>kR2V`Bo4(^xCGA1{IU<-40;mk z>8l78yI1$^do1}}y62-H>4V3nX-0oO^Li)|_{4$R@nt-<#$@=#Lhj>M)r`<{`U^SB z7?17uiMhFp#sZG0tKU?avyYrPHK$X84fVqf^`i`%Kn?KME5lLFoE7l4p^gwkQC`J3 zX8XyLI)O)}*3`uqu08bz)LOBj0pDb}7R`{CpH1ONpq4SJ;6M2UC3SZ8GB=?K6*Ay3 z9u*!+zG@tha|(Vnh(39Nn$3$=8RWgtfOS&s!V2WmFfAijSJPE6Yaz}ZkA|j(WbV!k z?K3F&BaIXNesuTSK6k*iPGy}^qSzADOJ!#$+qkttsY?lC(+{pJpGA%w(e*4z!LiH; zGAT!Wy{9h6weQ-b%zBsNc{q>Cir9Vn4qVq0(}M7Qq+^Goj(=0fipgtCg1R_!XppVY zx`$0SiNWnyEa!_Rs#Fn_ z7}xZ!^kR-=DqI*)&cHh|Jqxzlm3`)e_WDvcQoRaJ-wwj8uXGG_8xPY}pJXWte;2jM zpVb~=7TbN%JhU>U(3Bay%26_ic;Dmw9SXDOwf2ToGe_;RmZ6}Ppe#`#<0hN@Ah#Tn zv+OdmePEM|z2i9WZ7SV!wVaCcf&@RIJYGZ7uY_tsJ7aT9k+Qw>5?`Mo}L??-9RbK5e!pRhdBkF}R4dzbc|1f!T z!SR;_DotsAQU|4yDh5UAeGA;sOXj2qX>Xg6c^5-e69vUPU~VtHxo*v@9|>}3k*jgE zO)W&NmH*7&%g2@~;uTH>ph0~fA$~~7|fLYbqqnMty}wKG4M#ctPk9l}nw=9lfA!J&tXMC)&R_OnLUv-JCy z#@}%ValJW8QR(`mFxO8}gT+^3xKo&aVMLaQTs2ShNF+N~_tpk6cdqX3mSIl$Ni2Vm zJIlByDu8}!hdjt%UdKqCpk9Wrd7m=3SG}zN^nyY9PawoCVo&JlADjG3O_aVZ&Ww6);l7Y6dCGXH8S!3v{$nn2nHYu!n{RZVEI0NUpI zylo4ty)o5Hb6r#FgnY%|;eE1>z?|`P7x z)X(RMd}M0Y&s4G`WvC0$OTVITx4Fe#Fwiz{IRL^``OOIy(Odw4Tr6SV{E~&AUmTe$At^(WEL?g@&KWTAy z<#D7NdMf;i9yjnXt)fE3(v?o$n9b{?BvjDst7Cyr)bt0}*DU!;q~44tAJ)=>ZOU2f zy8PO7CCTxS)=+8aI5KubhEl5#3j4u?h`UwS$6BRr*7q`rKY$3EeC}YgwVNmDNGs^A zh(`a22O?g78xf0Niw&>rb9qFBTYzlfU#~$W%8RmXnsn6xL9;izmPvwsYYwD7s0f>} z8&^AH6ZX?jHaQAaH4CF8M4MiaT#P{x#BIlz6XWiQxUn(+Ii=u{eSm5EK!rnBOH=L2 z&hLrYKahvH>1+Cep435kM?t38a;HNF4_kEj=Q7@HQ?otBY zcCZMdA-rXaD+hX3N#7vJ%5q-GcqV0`Wzmm+>c7}r9;=WCM zZ_+$P&Kzbj0~Nk|)z24=v+0@5`kxF_)GbIR6sE`KL2}()GR-mf|Hyj zaps$F;({9qL-r~MSd2j`>uDsUElDroEbaxKY&J3rr`xyn~HkOT9V@H<3A8g>8$D=^6xlfi~yF?O)vcFfsFYoNjZ8t0tA z1s@M_^NIIl2k2?N?s&M5spjafbzm(ceF z52ojwhO9^ibEO|uj6{i#@pt3Sl>Kf+dN&oJRNp+yjtiy{m570kNj=(=jyy6sU5QD0 z)Zm$%xHk25w%oUVIJ4R{9O2-mL@od=*v-(VvNUAQh^VxDD9x$Mk!f81I8; z8G4fSh5EU;@6fw`fkQF+W6VhYWT`go1JnZZzF2jUclL&2f`$J6+L}?dl?H<^?a4Y{ zF4wAmz^oDnrQlLWdq8#<*AS38!W`kt)0J`+b)`fZ5_=7PCW7H+;CnRSn=C$L$){T{|Y@eR8H&Zd|4 z@O$>_X62%fSKYSQ@Mk6wWDJs%gMLCbB_D8l4*v{QDJOSi|sbseVchjwofJY4Dtr_(2T+5{p?$?G0{ur_O&^|!A( z%R_~=38NpMCiPxSDbl48kZYF8iaEu1@$(17mSJd+J>_ddmm0I4V?9h#qr40W&c7^! z?&M0Rq(vhRG)Jzn^oQg;Q7MryxMp0H32Xeo9Tlno3Dv1W&J9q+D1Rq+!nl*42v-hE z_*t@|0s{cXM;!?m{-8$uhWq)(*|f`qcXLgy)t`6v?wBrbTuSb^_mq(v?$OunpYMqx zhnTW~Ev5;^Rh-}FeD(ig)uV9Wd3BTzQ7@m zMVGN5Tp1=i&!lO;kVZ&~C7e9$;(DilGyYf(q{a3A`JlSq(m0DK5dL%F0^b`}Tz`@* zFF;yBUkNqzGut#UZ%7>|YbU$quK&(nL8`zfiYmx4HRaSo{+4ndt;|ijds5Stgor#^ z&8;3*8=LkA;j7HC)QYoHwU+ywE5fig>CD`<_p%d zTkoRz`iFfTq1s*cl|2{ibt7xI$9`LNA&rtiutY^kZBl%`0!~+=jkBW~`R2$@9w<&%BnZ{Tx?V?!#-Bex zXGhXyUy5^~mBR+-5}pZcQ9GT9wQ=W|66hNGGl_kyGgu_laqda#<;kC;PGTQ8adHF{ zJU#o1Sf5bgV_oC3*AyG}OBxb{JMpKbz$abm_%vszDxnboEU3;P&;T zRV{|yH8`pQIUv3(6^_^t@pCH&wKh!-O|vgD?RdTfUi7@_T`#j_oR^Fzvzp+^oofdX z+CTXL92?sOlvQppWf?rrm*6^OzFy2P0MXFS-V~?>q zH4j_-REu_ae^s0e#dM||(sWe)f$IKs%fo-hu6cuIUC8H-g}4JqH8-7|a);`kD=8k5 zo?3nuBu)@p`w=#2q3Uwr75jVt~iuk?fIoBZKg>&DkN`IvM5PS3`jCz2zWkwUPEvwbo(lw z{9@c;CH}CNYcMxKY3WnxhW%yzj=RYle|NJt8^Df2(e`qgWj)vS8?xXc4>l;QB zxW~XG_PZ>RAy5EU9Ad}(^##6*%a{Qo3ywteNv$_^F4|Q#*aYsa>s{}Mws5)ah<8@( zBZ7(tSW=(XaIQULU_clH@nbmp4Xndv;{e^v3O_mz6og#|4mNjb>+zXxs>XA3zRwR@ z>FK3Ycc|NkjMpln9f2ld$J3~~tp^gn#DbcsAsh_B%%8->A&oXZDYktxFVkI6-b~Y*zZ=}VZRd)b z>h*+W>Y?SqEoNEkU^G&h9X4;2x3< zch8(OYHJ9Sl@o&Jv8pU3FIJHlpL?EmY8)b2Mt#NPtjy5bUt6`}zphhy0l|M@18x`P z&dq*gSbq8?PaPM2>(HM^`|kY3cfX1kqsE@(SlaZw>tw+etzd>U(4yD3VQh2+IHlVF zM^imAgDy%V%hzYB*zJm`(LzdHT(j>L3(0_=`ELW$ZT{0Ck@J`9)+@vQZM?d!<-Ww6 z5gzkQj=urt;FV!}XU==YeAL=Fm0Yx?jLrvtg=@va?ilx+tRZ%VQ{*vb>3(C#7&4Q< z51J$*k~|fqheaOvq5@aj_1Jau3OJ)KXX+f%5K{9TpuX+Os5B>*nbR`q_f%|=9}Sx& zYsz)&2v#Fq!ptwyREc~Q9*SsEAUU8v@A4may@94Vb1oyUl-uZZeaerKGBH=MqOV=l z|D28kHCj0DsgZ>=D=~c`31qNM5^_aL{LzgEg}QHG#|up5z%!xSF5>G2eQE1th-*TP zj(f@;iJYnD+H70n3@<9p_q4joBuE=vJB*=r0$T4IiAfhQedjv9>Tl?*?*-C!mr0be z8u9^AlX4U?)^+%$&GLtkLwdrg@V179OtEdC|0p^`0S@gk@*f! zF82qrroJZTZ}+9O3st&GytfOhD<|yLEmSbhoc0QLC1KTD&zyJXk;j<*PmNzp0D4F~ z7kN7R@sloyZe>Yei8H5ktl^!so55*_E1 zwfQOREx`Q;*5pWinO3r+VMb%1p&e~Y(fZ9cB?5F-YjH-zM8@jOnbldg5_A<5kQ(3{ z0C*&=@4CRrSe-0-RGA9^c$yxmX92l7%Op@5;GmV^*Dj0t3p*2cQxu{B;!akQl|Zk8 zfYpu&A+YUBP~-a9Lk9{inm_n$tf4oe(e1{^OvaOyZ|~toy4*;=^Rj(qr|MCQwtPL6 z`+93j;4`%QQ=l~3^YjM_tOONsZ^{IwzU57mC_23I%#ciGH|Vqubo$B}U?!fB%Q^1S zEG_=sOO}isSCl3v(aY0);l{Ryou!3hA~r1de$3h_Nkwc}C1V0{^*?s=o8r;vMl2=%x;V%f>FM?f{= zcdzOlso$qkx*`5X2(kDD;#Gq9-0{WHFpkyeW-#BV#L;lvqbj3@y@Bu7jz`7SMry`^ zxwMiE82Jg4p+aW+M&=hboRn@2eGx7C*2tPHWqH;g^D~G>&XNoAhEVQ3mw0Znxa_G0 z@$9iuNP8duG;03V!R&xkqqx+FyF@}isiZ--8b$kAET0JE0~_3c!7o-&Nv;&N{_m7X zm-5*Ex1Z1K5V|%fm<0(9& z!ybHx@~Q?6&2Kd>CA#x4NiABAnPIP{^ExF757ry`zF%dB_mAo0#e7PDCKPJUDGJ<3s-MMqejsQKzI-_T*+|FWXy z$k47W81u3)+M@?Uxj!V?fEKAaPHC+B7FCC1O|g$zF;uzanS1#K!=b52>QhYL?_a%_ zZJ{02P>NMGTK;$S;^2)y|S9YHvW&t_95az;cilC z4LZ&s5yU%1r&u_OE(m>`bWd!^@+12>_8KmGXLK{Ah*;C|P<}5F{M+?-SlNW-3|f{f zZt;OQxj|42l!VochG!tse>|o&JFq!R10S6bLDLk}Wo7RCm$}&YlOel}H(sE#2a^=6 zOeCHL>koWzl-D%?%=PWlQ31K+^J1eAIc~xRXdLiHD7`I4Rc4VjRDEjvY9_Qwy-KJk z*v0;12(V=|V*kSkN@Q{_NYyzdVej8-&~5->K?uSHaW(W|TCjr|U&)XwQ5pu9aswzprRzD% zcDn+}#_mYWT*bBxPcJq1o6;2UCu@f_9`2`!g#V|1eL%FI!UoS6;C2Z1oDt>@@d>J) z7Xb`KQkAv9afQAcsLI3yAOQ#BZ~$>%NMlaYoao2dsaZ5eKw13MWUzu&ytyIiiIzBO zE@S*d_`oJtTr9@*8G~^*!8MP3(0yqU!W%`;oK+*L5cxcg^8fU$whi#y@L_Q0a^-U! zg1HFzgF)p4x^?RF4mq>fmN`UEedTSKmZzgVYJ*{C_rW&O_I+gaeyha08N9@KQQhvs zbyC(}h0r7nSdQpW|9Ltyzlg*e^f1kosdJDnm6SPF4=b~YRIqLS z(xMc2Ct;(ZM`DnZEsS=a%cAd6#lu&EA!Jfqp*e{OoEd%WGed};#V}moJFQu!M&X}* zm1kaOl3V4{qKl);60QFp3c)p@?6&_4_bXJT8=`6{8tJ2g&$iyAQR~c(x^|715-T3ZA zSEEyh@UsYQY>n7rA1r4ss}x(H#Q7$vzGJK_xF9wns^NkfqNoSXPnr*V%^uzkyds(l1&tOmrlqLzH5p6aP0h~~{B0HXO zeD{Rt5N-x>*cfmJ1nXvFFgCAxwj8uz%pQ5n+ETUpju~u4#?}|-s>=2yR+syv=(U*2 zclW`BiA}_uq9jFrX4}Dsz=v#`oa0@8BNcSXCxzju+l}$DkBQRXbwzC^;UaUpT1j(RH=}M%Z#DyoBR8!eH;_*KYXZ{pp=u|5{e5DTKrNJ_pxaKZ z&mu{UHUF&?b6?;zB21ij8*T}dVyt89XgMMrB6VSc(3}y$wqQp&19^z#ggNqXACm>t z^*4BDlYUq+%ajBWZ82Nr-$*-C^uq;=N!SByq?!=!46|(aKv!=CbPS%s&U1El{Ile# zbZSWYr#K??4vwbH2Ie;mi)@Dt-~Mxc(IgzSTC|LMXA6J`$#MjX#pT9w6s!Ll+AZCo z&eGm(A>T@Zv$E~PY7f)qy%v%E?v(&JASwvdg-}T_-{t5g&`K~ibzR6E0Em=b;6lAH zy^`79)c-U#=k>)TY^9@va911ydk(F?D73FR?n@ z)MnYOae;8n;T;UN9UPi4%e;{7;rp2X z&b9!4yN|Y>#+n&u*mPtf#O~61YN`EMoEs+>b08*ByL zAho-^BsgNK;&iz)@&&yTHYb3HfU@`HByQqlfQo!&UHjDQ(^f!C#pU!w!btWJ>OD~i zNj*+dPJA5Y77?74o#a}V`^+l4HaT#UCg8ZxXi4$=CGXCZUy{0~4#yKu-{w3U$#bCj z^99jG7Jb%(NbJRc$XM2vh)JhQJ7BD#Q*=eURE_^~{9mY=nv4xyH)J_q-6S#THJB$u z0Sl1&1-+OLF@Af1m}ZsNFnD5a(6mm z0<9Et6P(;7k`&FxD1<=gC&>yz_3YF8IZwND@i*Prof|Lj{)ITJ1Hu8Yl|JC23FD#4 zc`GJURhtZPVJ+DF7}<9Q8~4K)xGR)ykJ(y@=vfsR>k**>#G=?hFFF;Q*>QFLrc?y+ z9j7P6z4^d}KvPECSlIF49@CghW29H;?2@8ONJeVS5tq#$cVAUqFiM$?I1a)=RU?#x zmgTh@2N|O-NS9U;4M;NI5ZQTj8Q!rV!iZJ3+iy$?i(B5!5M^bO_M#|qr@`8iV#hH( zZgf8ca}pwEEkJWx#ilhrpgxbtXhO@BfV8R`-lN=OLO5|4#& z&@?F2I!8ke$O^h$0^Wk(>s}79&ztbj78yyYdGE9BY~(?xq_{&j2ooqJUNlX4r$4fZ z7%xYxLQlft`X>mE*YH`j77WH`60LmhzWkSI>%I%t=7nHgCB=cGw7B?rp5~E-`oR+r zU?ifB5?P>2q^9f#_f9uI5b1l=c?dwfZMsA+kjJ zUlN!*e9*>v+mQ&C5l*GlIALy2{sWmpSq;dVJ1X1w%0s5_ENEQ5szfI*URs_$AYE>w zF(&L@niQ^)Hz61~T9J{RA}w1raVwc6uFB_`YJSR9@aR2^Vv{f83yE~YN903fcDyr2 zX?H*oo5-D_NRwT4TdAHkR?#;Xjmi1PC41g<>J>TutE_2Zk`N#=+Fdev7rv z$qUI$E2(MlvY()7YO#C8K{jx9f;4JKqXrz4YMgTq{tyyHUAe5BrAn(-IZq`!z^&3E zi^>XZH*g9z_0>YBuP^T8lU!Tab~==+ZvSD&T`?PW`$gZkA*}Adew08<275%B%!|hP z8R!e_AMm*TUu>2(kN?8M^`uJnQwX=T2EIj!-tg14=4=FigX zyB#w0e}Gs`o{+(|!1N2BPQ_cyZkxagvy(T8LP*3>&g;BmDB`8!34c@YUuNnw0XkXN z@TmljFY&GXdR-?E9b9vrHjt6p?o0$?pll9V>T!0}!>$$g5*btEG)$6K%JE7v{~c~} z<~>umbuqwBbQp5kI17AnoTt4Yn{)mSEA&sH_AM#=jwfN{3Abs*1d)AuU7o*Dts(t* zJ2LBs@D!ugWBY|THIFMkMEtv>AVTCexUXo6f9|-h-g(m50$o$wrVjT-hG7&R$tf9p zsuvQhBP=deYDdDj?s{I;X1jJh7BWquA{`X98QRlaD7HNazk1DcTJ&~7wd$PUZslp) z^(Nl)lx1&4gZsuWedQ+b!H=N>gYunJaM?sp+oe9N=H9>lDIqL&8u$*K18D{{P1KleO) ziw4D5XI`rA7I&m8m)?H=-`qyBk*$MYI{MO{E3V6TQU)Sy3TV$=0cV&DTQP!?xTE(# zjr-j(FV-|kE|M~d$*#8gasly~<;qqT*MrITw*Ld;7w3*QhQvHeLd)=<z`Y0jnM zd7vwMgmaVVU+l|i9rL`}VvX>|GD8UmvR}_}=KqcCzxi^2Z{1ko84b1^=xN?^xQ6yf ze%=vIg+}f;6c?q-S|A!NLKvg5ilbFZz4sP3mbT8$`(L`%-~FNnkhk#qF&T=_P>l=o zq=)+}JX66wAhkfFG=y@!%@-RMY&i)_8t)0kc3!&? h!u`+IGk~6@vkP(maXdrC`1@o-RYgsOayiqW{{s|x8hQW# literal 0 HcmV?d00001 diff --git a/docs/zh_CN/_static/image/tools/analysis/analyze_log.jpg b/docs/zh_CN/_static/image/tools/analysis/analyze_log.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8eb1a27d6464d255b84b23a7460a5f622f51712f GIT binary patch literal 68146 zcmeFa2|Sd2zdwG7C@r!@#8i|mv{4(ldbF*aO~>e*o-0EiKsTonVi_yFI|q0XpWLEJsgY zpl8)GXE^4-cE&68?JmBH*;VY?9XS58w;UhuW<1Ejd5G)y2?0SNVF^ho>2or&mo6(^ zQNF68s&hkEPv5}M$l|u;9V=@a+xt$=4<5R>x_LkG@%4M^9}pJ)A|f*CWpqq@LSj;K z%DdF}Ik|cH1%*Y$CDk>xb@dG&8=E@2x_f&2KKBn`$Hu=-OioSD%;J|Z#i}_9^{w69>;$h+TTX@#|HNJKQyxcH?V(D<&C=8QBxdSd#Tw#~cF z)0+CkI~RxMR-4ze38G^m1JdjEcyuLo3I781d6ps zGaI{nZiT!l)eyk1tnVi9;8WI>dNH?yAvL|c+#kwt>26ni(hX)D%C_V`9P@ZKb4u&Y z+epFNv~H27$M3aT(|#gJ67@WXF}z|VOFUxToS=hSx`BguQ;v)f?$lJ547GeR+6d6s zC?y_W>*h$lotI4>)A-tOQX9d%Z}F8c51e>|G7LK`uY?zZRWz1P)Ap`;a zTB3(l1LY8bhiGm)yp!OHSF2^6GdqK;3-Pr;YZsQ4+%VqglDs6V5HfJl-mc@i*3j^o zCpa2WzlBo#tvM{y0&IMfbULoa11F;2n}HO#yPnHdY^PcU>lH?T ze!sy=1t<9f%8n@rowwm#v4eEHOOeaQ?Li7rkpLf)_IJh{nBnqiu2WcB=qay@DAOFO zm&&P5mU^J5r93pbq*+jLk_uFvuHbV?9v|CyLSdrxMValziPsNmBdxH-^4z>ht~^~f zp*Ee(a1YU@JcpLO%)7P5Lt7!uVaKYXIz4xL){Wp&6cugIhf!Edlr22D7J?Dv&Dw^LFye2!tClTBH;Ky)P85(4qM zC<_#TJXeexsfMyh`|)>?Y>tn1BTdt)YJCv&oTuQ+P zm7?=@$7@?p6q%Zy$O*e@s8x7bS6>FIMUq4CLTkwTC_+@=4yyW^le@*((fC??f8df3 zk_sHX{m?S-(@@^o>^(u*@_h{`PRt!sxpf~Z@Hhy`+b)6fF_X*2QGv<~E){v$xTss} zjV^f=wp;ac${zw|wXdg~)bBXnRrJw6BS~NG>-fR|AQ&I;Kjvz|-zk2GU4RfT*4NAV z$IR~H3os0_H8RvbrMB}HARg$B=@R*Zpck zw%DCuB;IB!@Ua_xlK{4SNh|`K@Iopuo`<YhZ3B;12e%ZXC~ zK3zN3+F@wlDqP*Mt=dSn#`SJ^Z4Nw)p0SD3>fl!^hTilFH2y#;Lv0C+kK%=(0u8ok zUlMe@g|DVpRu}D`*TtTn=%9qT<~XQ({*WTVgb^quzs2)Wd`vm007DKHSeAqgKv@cy2VJ#bmyRGy*O?7eCa%^%!+ld+Pc5n*t~`{PIR zs@DD1JD3)j{2{Bt(-?w66cq@4w|x-{1>^QY|99TA{}zOmK(VtQzo4<(M$tJxPv>KU zakz~kmy#w^|H|w7{_&zT)c4b?1evNXftqNe1HjC@RC=ptJ6dAhYGs6e_W4()@=m_9=8e!vd`ZQG#?LYV2ez zOBsHDf^)jffViK$T*{|VwfD188xDp8#=H49kLktS9^$+A^w0-~$28MU`b~W(oEGe< zEG*b~{caEHPT+0Ah*-ImMGApyq+!pA2nVbCmdGnJ( zM96#{omSUeYAe0Jmb zjj%Y{_usFAOD^S7EH9|Y(oi$-a6UDel@`LvwxC}19V7=) zf$4>cLsS6!5OhB9v#Vq8M_ey#1;`gsfvFgZZWqJjuO|2i(rY_HU2Y_tr*-rA9;Xt} zN#4!x>Q64fSja;N%`jgS!E}ZSkld1SQ49BpLN4*d>$pN+j7^r`*0U53{s-$u&LKhR zma$`Zj_OGSTb^800R-v79L%0|G0 zJuLU2d{LxO1>_+R8|XmgEmfikLOzqPa4WUE+&vlcD!7emHx3_p(TtgmR69G5)Hyew z52Z2Ii|bjD?adPD&+TIb;|jSCA;&k~xaco{L#3k-REXD*nt@{@f%0}^WXO$>3wd3; zaKlMEOTw+(Hr6?8muQ>WTQa&sP9j+2Q#9!eZVjIqYDKVqH(WkWwYj&Qe7;t z75R9vhWg=sAG-2i8H>y*9HauOc)GqweCztc{gdlm7bJaJ*H)7D2Kcyoq)nUcz+)DK z@RX6Ul=)~p?U;Sk=m@Wih<0auF@B>a<4~kX`3cI-B11d7rKH#WLu_g*c0#&ZN9FO9Y6N;0t#O6v;mwY;0X&o4xG zf%NpP@44{+iOT_k2};15(&(3uT-!)2+5%um^!Pqf__1;0;vtDcx+x_u8&4*hUYC~R zH4M?#;17cjk_5hq;{hu0Sr@e$EBuihTR~6IScenN#*y~n#1h^O%_#EZ!`(%KjjX>G zJKOMHG)=gy?KlJ>h%$5c_V6Uab2K>fF;|I#=cTVz$RVuJ4#Fa(% zx{F%18?ygnio(EC6ny4}FJ@kX3T!FgH;&lm^3fFkL4C7BBg(UJIC2?RNShHm&nJ|G>rRh9bQP)~8e!K)0k83q_bfKQt`#Jvh4u8>qmQq239A zO-tz~m#%$&IoHQf_q2g(WtbFJ9}=DV0=v^y&TaaJg;1#ytUX`pzS~H>#uGDcFd*|D z3-L?$pDvXBw5#Fsj(&j|Lo}|P3hc2OSMHWHy@4(z2V&ER?ITxw55gMGWim$B>!>2b zYcD8%!6O#0YvANkCuA}VRxUmnSZe04SdYiB7RDE$38q&NF*R6$;7qnv#Qf3ysNHXN zkM^UkM1hEep$p*?N-fN0?~qz!g<5< z#e2`}3*>Nr7~4#iQ63dgIa`np{kK?w0S5PiLhSuSOz4+gA9Ex_f5~Olno_ zQ}jecYE@%*N{a1s#Mj7-P2NByHvL4`Do}p@OJw5*`El7;7#~g3;GukS8;)|u&D9Q{ zfU94+)-Adq&x$PA8hQ~q;8QmD&Sw52Hgn;DNAkG>v}7Ff&84b!EGXZtuRb4EzCI0%`+a=diS!eOn1-%8!N(i2og^Z%YfL<9@ zrIQq$MKRDVr&JEJ;3Lskji)enq|IoHMoku8d@M6<%;W6E-i08J-bg0){5RP}PgeHk zDBTI?d^M?YP6ViWv=JakizY-Pct(6#diCz*%nrDnes(d4ru50ZrVBcY`<1Nb6j?e~ zLzt~u2Zn6L+EQgh@27pacKWNT3yTbZ@4jz@26fI?S_ZOj-V*twZ^%K^$NM!S>+A@! zK;$FIiYkVUNh*L@qDXi=W?E{Y0;mxZoTIKJs!mOWFpd-OcG`EPyLxY~?J$3AlTx_A z#BQF~RZ*OBg|uN;&NVhshRE@l=|}>Iz3gemP;~py`e%}^BzUK24Zcs2s!+FX-7<7m z?e6YTwI%ahOn;ap$67ZM92QbxwL4EiswYst7eT4+0*-9KV9alMqNbdw6yW#FtR z98oAzS_tPif_V+}V71$YEtEv$yXW zIc%y+9`=6POc);pLj8B%NRNm3E|u*YL;32XEIabvs7UzxkgH9`rDC81Ms)B%^)#Nr zNc{_w=KwM-L2f+c08$fIHhdIsRVB_z*fA+GRjAHUFJ0n3r0T`|WY%eLuBa@>;N2&$ z6>dvlz;HnNNnSau)`+yBZxnFG_G+B5?Z$y1S7zSJ_B8Tq@_iv7vUm<@l}V^MPY5W1 z)MmKXkL+`z0uafq+Af6si~Cb-8v{Y=IfPNODoebXQL;W#kER3bX% zB>^u+INw~J(Oaw*eXzjB$`#9gn@0Hqz&QW)w?MXS3$lG(;f?`7&5vuFux(xcmJ#@? zsXsPX!pVhM(`H;BJoIbbdXmv_$VhQ{u|WbX?KE(s^d)NI{Xqqq zn1(d8`AI86yR+mK#4<9KPK4hHRmW0+{ll=!6Ss@`{muOo8a|yG{NP$O+S^7FZDScB zRqznl$L8YQRGcCoID}-O?`3TsRqUBW*-dDSzE7gCg9*`@17Rdjt0Zz@jQF$g5W;J7 zDv;DFj@#t^0~YRRAJkgGIFQM2_)sV43O1W+)Sfl@2wiv~HH+Ci=ruYLJb-S?K-%VI zw~=yj8$RP92omG0S`hi3;tQ0mDypicCyHDWSFsfqLJ5QPhx*|i*v6>+bsibSF zAuojLJkQ=oJ!Z5=sF!P*By{qq&{#(8IR2J3^Vz>eoko)NPzw8*c_+-}s}fal!v8{Z}5DG1H8 zt}$}4wb`(BBpp|TRmB=pfxX1>(hvfP98c1KWUae0Q_SEhdh;kN&SO=*o>It5-5Pk; zVL@xq5vsf$$UeQQMGlU_1gpZvQne`i<6)051*hjm9YP4^>Yl={Q2tXu>wP%J+SNV8j+~SAsrIE0G zDIV$4C{21EDNk$P#E!x*LO6@S^AP~U%`}Ql{`}gnpmh9VRk;keYeYv1_*q82RDsMszkgsHpsxEKG zOl{l%efYWtD!`!zs=f_y%FsjlU?!deLNW#Hr7x|%b6O(c{3ylZ%jvF)5NmVJz#;t< zGQAj-Q1S^U?Jd!wUuAS9#r>Uauw?x!x||O^DTnR^vD|~uhM(sNX7~G{QP3B4f^L#~ zP&fw%FfMGTSFR87s$eC|*kIG32WMr2O9;mGZV3Opl@xWZLt^umE4Ngn6DE&+ImK=8 zET|lwn+_%Dmq19ASq$ME2-%KJgBI5PE$BThf?dR7vV+VCY$>|Av z-4_mibFtjtqS_Wg*1+HH$y*KPR&T%Si%v3n8o$>dxHi(F@z!-U)>+rMJvV*|GyMWR znS-G3vbH3QxZ^X3BM+8*NmqbURIh-KzG)00b3ZhG@mnz7ckk;#0#RQTuvx@?P)g{R z-U)L;A?4CAC@vqmK-0j}@=pQ5?^+5&r6D%dZ;?R%GVCGfM1JNSqykE!c0YsY{%ux) zJ$y`@*9B6|e28Q?t{|eObhsknUEMB7;D_|t-h_+$oo~lS>2<=~Z9GrBNY%}fWOQ&u z#H(Y9Pk@%xC}50fVK;7)xZ%kZu7l@KSrqV-@LvqYEaYT`B@Df0W)azme;KcZ_nk;t*Y{cd^qD9-FTldk%)B~8BR zdUprH0!fh46I8*-4w41#4Unt^-=Zv=pggu(RG`S8!WVsQ@l@^bvz(|Q=>C5iUNcJq5 zR%V!!N21@Fd_`>jX`#)Ftl`9okHI6XBo%`5Xej!S%Z5@XDBu@FwFRN8QtVN7#Pdk{ zvO{@v7jc^X%PEqweG!9bwB()k(1;~UL(O(WG(Q)NRk?SA5nJ{rH7 zY}_1MaaVZclh)m}IXAVY9@sdT6rMSX47IV%YF+Ka+Ms5Xxjdy({6j)<-c^MZK>?rc zc5?VN7%E@&v@o$UhYY$K{g}M#Szu&tCW4E6>bG<}GSmTeWJ`s=J!{MYs=81K+}umk z&M0X3;6y>u>&t@+4D;gzGel1-&90MK6ZxqU?f=e@+R@)^0 z2$4*LX$(So1fo$Ndf$M)+jDReZ?iaGeq{#J@c%r37zigDqe|c$*ep_bIRm9~ePm0> zn$|u*FmA_x;@bNi^b3Og-x~$TM~s5;k61L;>?kwous_vm)quL|YoaY?eP=cm=qPk9 zHuxUW|6?@abm&2lo#tKxTjxTN`HC}|eu$s{KCeHiz^Lt|oIpzBx|5lhN#{ExHaV7# zFJ|n$?76o(C&SE__i)dh_|nG2iuF#Cx{YSm(QIrE&!ES!Cn1(&>CTmb&uB-KrHcA|P^=srxKe|Y*~ozHLJAPvN8yD%@=)wM-?zeJ9!3iTlgs9m zq1vP*)Z4q7bCYX%DOV&&Q(b7GyYGFgRJ7{>iktn)Ge^#8iuIDb7OVt!6 zf>pwxi^au)9OE~#yEB8Bfv%>GR2zQ#%-Xd({GD)l^x|L&WGV9wMKdCT^yQ1b)rYRe zqf1HJurJ6|

+QtDunL^!o}~Li3Tm=p7HN@`eQq={z2!F*QqIaM4Hvq8N9*64j@{ zafG7DweT*5j1HI!Ox*zW*u-rzYERc5jU~{NXx@Kh7Ls>8=YbIcQ7y~p#A-VGkSm;w zOne5q@S#v)DAs^`5mRgx*TZy0{35m4#x4$0k4+L0*plll(8 zqUTXiG14;;_t}hW8Sb;!1`uWY^?84P7?7v^_vfwfQGwXN&GsD~|9>@e3sjEZyXD-w zSt$pvynh(7No%k{1(FMOOMe|w+|Ky$Uq+?;%a9%&4AA&xRJ^}Ag*4WQzmBToPj9BU zjfHfRpgua7U@OofJ9A+{&~4gA&v z2v=SFOfb=<%68bf851)5QGUvf>@szH>D7wfGN@FaJ#hVyXf!lySR$uTl_6eau+2RZPS=(LL40Cej{yqgbuO=105^F%+1AD`Y8%jZpG zw;1-2Zah|x(YvWI6AugVJgIoAG0x>;-}{DR1*tAIofP z-Ql3uc?-i@lpezALaeF)vehru^ByhCR)JZTIZ9a;p%#_6CMFQ;nyd>Z8ziA5KgtEr zOv%F$;3}|UrJze&3N|iKEX<3a*!PIl;ua zDRz&~TEWt!&X_Q>(8uZ+efFWp2`{uaQjByc^Ebyv(vt_&I#kxL+#`&qG)WsFCGmN2 zt5hIBZ%iHxJt~Vx-=bLbI9X=`hpDu4^_p?W7%?(b+T4Bn1!@G-T8j8ih*3J9k`nP6 zl%W)LX!#1|!rG@2*#<_;!fZHv5nT1UeeUalus0PPE|OkTH}Q4tmg^u~VnhFY7Mf@F z4q1d08eQOYTAAw2%%?@eMD0d7sel(ncW~Pt?;uBOP2+X#D1upa+8T?uD4ogq2DTuA zuopc#o4=6|z`KVqpvd{Ht8#X;#?NIMMeIiC@GyDQUe;mG22qSEcxyCQ;^vMs7 zL!=$Yyo55a4{ZLbxcoHVd@K9|?|2WpPt!0b{ z3`|WdHrk##mzJ&}2z1LiH1ljswqM#QTu2$*r;AMc2(*{GIy`3$Z8U2cM&FCTztaH) z2@#eqt}9({cZ!j+F%-J4D9SY}V~J054UtK%`GYZf#BePT%(=jK^*O&ayYl%2K)Z9> zRP?!4{t=}}^HUpF8>T!_xWus-yB3V=y$spEdc*|vk#`V7K4S!Y`4`hyzo$KGe~1I5 zSOiEXgKsQBmg%#XsDQeC+Qa`Dc13hsOgH$eMD}vj3LTg}W!^yYby}nXtDE}(#P8co zmY-uacUAa3jXm+6GfM_k;7@XL*vDaosA_~iinD@FIgWI))6O|?LArX}x?6LN~P|8Y8+?{t=B{3FGk;1b?}4uo7JXC{NeMIb?LUEfZi zx%X%u;xW!8N;OcsEZalOU9Cl@<=Op|q`6Obl_Tv>;SBb5rbTG2X3Z<(qQ(Sjpner6 z@X~cHEF)L(;yt?q#>$cP8-dF2kai`afzENYM z>=3TlmkVN}Dz~Eax;hOKHFdj(FYY?BM)x2Ug*76{fmKbcpnFnv7e-PSsD*9!?6&2Z$l{VMoZjVxW{gw?K#zZXfUmdvzMReS8pHh9?gdX!lP5>tPDS zDJKf1+8O7-%90qJbc|l2IZ^%ldzyW9LdCeiS+s zWL8gOv_-InH~bF1_AcK*dZ&r+5ewjXTKI_dY6kj;`BMKdK6d532ASq>3`(uu3OVFK z+}g^FQCl5~>B7T9HT`dl_|A|m_8PBdBCns_?Hv$#Eb%7VQ1vviI0n-+bp^6Ju&%M* zM@Bs}G$;v7RizLP=KLBPEk)Og7?iTVrxsd;vXCi8X-KEUwUT*NYy4J*kAUk)_uY)#% z8JPp6w!+J9S6FFJUOTR>t@-5Pi!69~e3*Ixk$*lsJjZAayCAX;uIB#7)zM3AGJm#&vf@{&k>t3kJ>HtJ6-ZQnWXB$?<) z0-ff>D`{95)1We$7k*_%O)%78;-%3|&VN zxRce_(_l}`4xim9dZp}Zs6pD-olTlv?{CrY4dSB+2V+VO6jIbzm@LA(2x-@0*vD#1 zua^kO;5{&aB#wDvVnC3)6mz!oYWh)2=u@^0f&*?$LR`Qr&r(xHH03e?az)@LmeySe z><>?uKz2*)I)Ny6uuN7)fPtY`Ejz~OmS-!Jrf|1d1dqX|nZ=l-%JR|GvK;3dhWDP1m%DwlqRxLq!5Ye_cywC8qreNd zQ5*UU8CXt}SL_*U%k2Z*nAcC$(jf9NL9pGa9243ITzCvh0y5AuK~i=4S~gSVd0G?x zwM_)EhcIXc?cdUvCt`VM8dz=Jy0cB|nbI`}-{&QeH~gk;)nNS^m2NN6-0}O@$4y?z zNa3%3!L}PS^p?xa!EA%V zAm%C_?*C~9w$qboT62#bHx}WSq2+dxOJ0QVT3lcl1iOATOXb2htlN{{stTVr<_}Zr z;Ex)0U9;`U(rQh7ow#9FFw*iZvASKYgFGY(gupUlgd?4DIjedZt>+Z2U44olmSZBt zbU5h!Yd5bM)1>VwChI%m+a(`lrhI3}dWYC({I?skZ$Zd+=2fy#3ApuxiT%Nl*>#-f z`~w@`*h!OK;Qv6!%o}li*w4XFaVJ7RyYWd1wCp*a5RxM9_=cB+adR8JjdqJzg#B{! z2{iZF>|hoC&@h!H)1aS1VGREV=IdWp!WO z@?q5J$aCd;>JPT%&8%sDh&=u?p80+aU(Ej6yUJb?o_-YbZ!yaE=(e%;&qGDPuH&~0 zg}&nj{3fQqEgkrQ0y+=>237uBEzzG)#L>_HjO~otvG>o?8vwm4sd#SeqHpkt{PA+G z7EQ~6xf=ne5=QRt)zgKyzvb?U30Sf5wBzc0YgU?nP6o<9@-;eF>E06Y67o#xPNabQ zJD0Falni;vgfB2Yw1%w2Y4M&n`ojsvkYLyW1I5&L0*@sMWDZ8|V;@cxTy-^&I7pcd z6j}{ccWslY*4?-ll_A!i3zM0l!z@e^Pq^(7tn1py)=Pja4MAT>zCx|fu2?ULaMz;_ zAR)M|Cb>5DIjuo%zkZv$PdiOap&LhiXk3lSFSOxwq~mzxB{J>0A3o8{YpgQ~vdL15 z?#&||!lhK3F_wp$6b5))GAvRrbX(7m9uPkP8CqfbqH(odcz=_iOVDs(@%6`GI342{ z6t)^98NO^UQaa7(tE)VcYS;~H=4=w(YI0L@O%mvLK06PV)vf3-yf2SslFpBs=HUSxUI`3?t_LStb0OWX`y?3~8rAL!80g&jOro)| z`WJp*%; z_Z9JK?PD8xi_z%liaVf(^2BlR^dV?9KhXj~o4gPhTf`$M3t6DaK#S<$m;9WrD$`95 zoQyhKSQIvBnZ3H$X#21WSy_X1InjcRY>l==q=!MbB zi>8Cj2kk681&iJQaRFL0w2;0^YdQxS~RuK=7fNUs`0}(@fJU z%7|ECpJ8uiUk>>sa<v-{I%R@>2`Z4A2qSMvBLXuLmG=J;(oIe#Z1`fI5p|IeJgKWZ5)UE1nB zA-Mfs;!jB`4?3#2g_RF$MMb(AV2=eeoVohxyzb#JxNN?SW|D9Oe*$M(!K#Y*+}2!@ zur9nOSk}Q_z5jSAs&8%Zy3N}{|4gyt2IER67Z)DIIrGOpE(*Rhp5^B(gjOQxS_uT^ zt4~U*^M_mRd3bjCX3YVfN>KsJEAqN_eAwW=+EO&(T7}GX`xlNQGiIzs=90~d????ekH+l;S3Z%fqmfhjgSKe9#@H4(+I1h|6E|1H1L3nK+B&g31*e957 znkMX7d{O*0uS{n{KSj{;b%+`|6@-%?NYH z(E*5(s$>moZndtJ0q%z%AI#qgzaKkDGa$Q99Q&|Ffw@ zA`b$FeRU$+kxeg5N}5zu?KbX*IU***`UTVY82d9VOdU&dv}QO9Igj^W5mgcoN2DP% z0h&i3WoT7H{@)UZ{srhT-!X3fYiLM&xA7k%eyCRd8{z01>FA$c-)~IGVuSDZ$Rp{$ zPp!&NxwQpgvh=4Xp$z^?!uknW=)2kZ$>+`Z3h#76LbZ{fUzx(VhjPuszQ`8#%<{x4 zJJFWF*s%I2&6Yi<=J|vBdei}1Em&+9(vqBul*U8rxS(FpyICv*bd5BI#VOdnv2=R8g;_wZ2AIhJ0O4L4fhq_~&H^Cjl|W+W3kWcM^e8r*D8v~k#eU$@XB>;K{oxBKz6U5Mhz8s zsz{fOI}vE*I<6or6yKQv0{Y#2dRLg=9eM1yOW_@D&L4Z|bh-w?5cUoAcK?QYTY{)J zyE5ry7KnN)OoOQR84&djhy!U{VBTLJgyVTvQKUoPI-!3W&pN*3lc$BiqG<^sSGwgd zq)yyTh|<)^9XLYllitfc;g2faNW3e&cH9+j@cx{vScZP`fbF0gf8?V2blA|kN`8k= zb#J}ijnoX}s*0#F;&xb1Vc{zqg)DB8<_BCw0>sBjLD&OPtAE-xvV~cnt$To!&S-N^5TZ#RA!@=Dr6+kZngHLT@Oyb~Ey6 z>!pztN(hFdJZC_p1$%oY#y15eJs|n!)|{4KYsKpzj=NIq=V^2qngVt_i@<gakys%E|5)&crZ&rDS^dY!)!N>HcOZfBTLvZq%)t1SgBcilEfoF+>OU%Kf2eCe zTlF71MkDcknWR341)jFjf1IqkMzTuUb-VL!r;Iwu-7r!utn%>a^Fxku_PqJI|8vIo zFFWynv}4R8|}#i#YDGu z1{)<1uh^#8eo$&)9BqK!uGm6;&@Uj#Ob^Ulf3IV}Qysq3FPij2>>x_|{XTrRz4JG~ z6Mgf zc6A@PAVbI7P1CRS*E;>GL;o8DXot->R)7oa3j?Tjsd15_XL1mMfSA;gs|o~%ty|GTB3H#_|-!5tR811 zGwkRoQ@h)Bdexw(IeQ)Geete0LqsPqd|92M60Mtc@rmpEMkUugj8uqHeA#u>U?J8l zO{joifrYTU^+_bJEh-n(6-2>3T zRfX0f4K7L-CU>c3L4yb$bipBUxkqL&zJDk%ZKGks?&io@&t32!SU(k5zuXNVcnHyD z<{+MDgoKZmX6VT@9 z?MF&GM#XEv0>eqPuTIBy1SuGY6T<>X*Jy~>hiAthdZF&L`EQLCZlyB$nRiAsSbXD4 zEEpY{M`K{$^b8)qUlJG;$atUz70xM0t|lEqQlHEgIS|E*e_p@ zum|ruMHbg2=OIx%9kUWc95s03tegh1Xb}tYl*N)M=J)is*4k{^XpqIsF3S6Bif@%j zjxWls#5?K^Um&pK<6&P+C+q^fFaKpi@&w|K&$Y8v2=t>lWf$PNz-dEuUu07Tyvidg7FcWXqR3=N~JvA_i0!6a$GTNe2jNqahrm z^Mncf;!;gbJ&f5IE@Z!o+5M%>A;m^YN-xk34nMrXtaQ8vl^HQ%J-#zV_y8Rp@i-fe z4}LFg-1%Kp;AQJCl|oZd5Yh^)DO+(H^uEK@0VHfolHM(kg9K{$rgq2qy^y8 z*Fbg!SG^y0_=zghChgz@kkPDW?D$(rf*;%2J<&h~VoQ4ptNs4lh))jw=nQytD=|z$?E!Ki!{rzTWyv?~#)gODVRDq{M!g)tlduI!g zMZX*l=%2FfD7Htezf7p|FMWv&05pE-4(~60F%qQtdv|^vRH}6x-LLDb_3NS}#s60~ zWA2h8K^eyNeT05JL&nUsP8Oyy)nS%-aeZW8u>S4pqBSkzf5brkF|A?BkjRlzSc$uy zbd5nfNv_@h0=Qo8_9Lr}aQmOp^!8=_E3V8x2K1wx?B(9o^{t3O3f%GrAU*qA5yP({ zlP)5Cwc^-p=T)q(lN!sX27pOwDP}?vJwRNCcjT+&NfL^aXZ3rRa>kl#=1!zYXprmG zBqAa({d|f&8N%#y1*ug{L*b~XISa1OL5e&AdbZMwoy5$wO_SAM_tUQ(Tkaz2P7kCs zk7_)Iv69YDE$%AY?VOxO=I_~=?y@lya0NfLU@_MA#YKPggb&twBx55Osh8V)fC^-p zDjYXtDjd*6B;h? zE(Lj!;Ga0t*GDTAqjR0Vyco1edL4@~Fv1S5NsakUJa;`tx*jZ$sx^8cPAR;MbF!#e z-#06LzRgYfE_XWfKnZxR17v3fEAnVz~opq^^(kn~1!6z}a8N>?uH zoN{iJHeqs-sAPy5)g~X~?c&iNef)Sy07S(eHLQg;ph0Bp0LgqA$wiJXQ-4=a1!mV! zoNTzYsBDpYT&r$)TNe^YNj3BAt~j?@hQK#m#RaE1cadn~`1^$AyxrgV>ypnDWQWeh z#%3r%jGsV#uAg4=R3?0~dJ&WQ>Px@Nkv!M*DP>yIIa)ETQL=GtPYKwlu*{PGm za($W3S5?;Yqg!%3SdOy~yXas?Ks;@Su}5GfWoJj8wL^%AYS*zs6SrHEI|>BTXUFim z3C%JErKW?69iH@gA6^=c^e}b^=+!qR92S*r z)rHGx`@D`B8d2h;0&ePtPAaMzSNo@tqV4%}3u-!q)NuvL^n6iQ^JiV^FD6zO6pz$% z9$sD9BOliIj8>)a&7;`X01*@3gPiiRNe71)3d<6QCIlu2D`G@hI`|Ho3A;2cCaByE zGZsv;u5;zj$js7Py9*-ovoeo=qh$F{%MWZjMeEBpv?y+0)9y%$YA|CYIAAx+^zpQH1NQ#O#-N7mmV|c~`Y6ymJaT)TNBy1<@iu91 zG+hB=ocra=J2LYW&bbxkBl92e{p`*PmxPWyyO6=Olky3|ND>*_g>=X?8Ybk{wjZt= zLvarCTwG@TQdQ>q;1LygFmw!NyDIyzQm3;M{bp0Y(MV&dFfQzqFx;i4>C4jO68i?M zd%1?FV>sv6NU+3-jA{5M$?uRqDi(S`ad~#K<0?;65W915T{FnCW~S2A^ZCjuN`$(;7NI$Du-7Z+p?`= z_WNPqi19GhxJf3XE$vHh=4KZf#0R+v3j$1W=?mW<-VqUDa{X9|YT_%k6s75iuuVSO zb0$(eh1YjWm(G%nOHoo_UFl8xe_mXw1hD^(Hu=Zl9$`Os;D7hr(zXYkNA~Pr9b!C$&_0E@up>|7{AcU*rx6VPyU%fPO->`;qI{C&&2mn#9pRF&=Fcv&7F*=iBT6`CjRbw!xm+lO zjq<$l^)Pe5v+?x~a>Qlk3O{^b4dqDeJwGU{dTvGN0)e2J4^QFklb@` zfav?9W>UsRolYIn7L1zhZ9!kqnAHiv z3vg~VN^dAf2hO!e;jlt(N=X~d5xl7e!zT&BxQ$9pa!P}lTt}V(9(`7?=U9P>+vFAY zYNmd^$}KEWxef+#8LZDmS2!=RYwRmt1GAYqQtOz_Jp%EuaIKJLVekHJv z$=j3v(%4-3f^T!x!JN4>pEfI%h{rz0O$B8$%7y_a-UefAQGv_5$HGRi*PnP=_rw?W zNNzURRTcL46Ens9*DjfT2u-}o=fC59knp?89g*nXQ>z$n4YOrim}5@JaH4qHEn6zU zmHa0Dwdck|@#5NL@ZZLn>T6Eb!SXihL~o$WgWdWx7{EfN^#TG(5QLp%3F3~>G53e# zb|&a@axPxZv;?#()_J6h)*7f)YoX zQbeTJpooBo2uK&AA~ix(dX0+o5(NbXBmx2=B`Cc{x`1>c5=sbIXn}+pNQnQZ&YU|i zo*B=%p8wu^jTZUP=v<%eXyEd9(p0L!(S)NgPAl{rx3TyYEG|T^#7My`-iBTkELM z`yN6#@)C;zwXq0+}wpfKMP*9xR=`7)5iuA^YyzEQ`yiT?OpCEO{^$7Fx^a zHvuX;Wl|%bBoyf&9-D0?E;7hnpKMtc&97f8cKOKCrU12lX`ju(wzB}2b`&!V@ZIZg zr_1OM&~!+HX%%rEvhg*VY+QHyYq2*(Zbs`(XUayG^XQiI^Aa#LU@Kv*fReY-?XAm^ z$GUloW9+$K*LEa4%ut_vhtAm7>VFe;)Gs z`7XS4&MN_)sdZ7vdfjB`WvLiFl(LBI&A?C&Y4{>_6kya-X)=X!uPP5wQQ;A&*8?ee z0KB?3e18lb`s^HYvnb2D03s=--Yhf<3`#jOgdjfyl)c~67I3uvKiSgyd^@Mj=p#;L zzf3D$aSQ8lS^LNKCN?+C4r@0rdog_A7Pv4MF+2+7j@Ax2xr2b=+nT7Y>$UG4yZ_-0 z<~rRNzeV|Z)7~A4 zm*+QiFv2u00?Cb9z;L_P9Qek4?h`2TGtpx0ELP*aXAGu^sW8%gWzaNXIE2yzCgsh8 zmq~zY_oL6QyXOGj<-d1JF7d(jdRrZ2TYa7(9Qu!AykU_{L=HdgaL#5JzQ}H>mSCR* zm{nMJUD+)eCl}Q@N?*}8H%v9pDmZ{ofzO_9PIHttn$XtlNn0-PcHBC%DSQNOan2U>TQ{`Xtd{ z?s|^}O}x0%%@RlN!+_Ah{qeS@1y)(F#DBbke|XT)C`#|RKw)vTGNQng#T!bo$MZO1 zBmxkZXF?T2Anrb|qz7dsr)y>x4}n%r>J_?Yk{N0{A4jPunHlK^>JwUCf`Ix0=P>I= zZeRxAmcit#2%Jm$1G0pQz zp%d~+U&_s7%h=7AONB+>zhilD;ch^iR6ry!X8wbY1m8DGe&~T`Eye+OL9E@rjOC(y zT~Y?e^6EKbv$ytS?|mpOTEJ~l@?f^}X5vmpgR5J#H{MV(|6vEP(r?n29}3rrZ)&KW z;%Qg^1UmZV@SXAF^4}&Xe}F@>7)RCd6^xoFhwI#hyg=@dapre3KzG_PCKqfT^Xq`m_*?i|wGnbuV@KfcNxMHXCXN zoVnHZ;kAR^DY=Ll=W#C=(dXrC4@*Eb*D6M}9%~>tBu=b649$$=_7skK}UMWx4_+%>M#E`&#enA5&~x2ZIYmA4v?O zL*M856-$kEoA;r#KR|^bG{4@ml@V3>PoRvq6j)6qqL5~n!7s5CJEZPgJ6_b2PmG_F z$~>i@;l`B}9=7o4F=kIvtS)zp%;R?j1_u6jD)0mbbXP&Y6R%EDmsrdd*`o0sqGQjJ z)o6jHfFvN5yGBIPSVz6>UYde}_F!I0*rAZ$nC##B`adKZc&94l>qXHgMZx@(8io#s zshpW-&#*T>(&Uk8^$CRCuEqHwZsMuhw1%akk*HpDqJ>{Q3)yZ4YNWb|D$t zX=RM!#@Hh{_Q$=1zDTQ2JnFK!J&I=GlpbXD`ci>K(^(JPSkBu(<;^_8L=+nEGMK58 z2e8@}M0=MtEnmj_CD+8(L76TLM$YVy}UdefHQe&~XNc+Fxi# zd*7Q-GSQUO#=FUz#guJEwmE0JO!PXuz^PA@WtFUjkQZ+w>wzOJAp%9e-yG0CDoE-q zrlbL8VNd3cI+-k%Q3Q9VsRj(UTI-XO(wsjiO!;n4J?}_A9q>#?=7sbTxu>xw>W^b~ zLmZH-wxiXk7f+03KdzdQ+oPQ3mo4xu&x5G;i+&W z<|Q}>l+%F^>@ZS(*#g#M1mYP#`9U#zNrLl|Dir^C_yl zt67!@Kh7dE;i|vT*EoEee;oMxid`7rT$G4%iOWX4=QDimI`bAVmNVX9QmJq!j8M{v)oBO{M>eYppUYnQc$Fc&LP+kAx4f5{CUPx`y z&?9Ts`B*b&TflSfz)(dt79Yt3X*UMDT08~rmAAbgLQ6$@5V48$Wx>sW)n0yaN|DVnD3$qOjv%s-B^o97xeyqq@YyiM9g&fc^Sv^j(rFmzGLb{d z77k#v+slN=a|cC?63k5d-sLxnw-`yW37lOy?J;ddxT>Vd%qHAg?n?bg3&=-MX5j zya7N2htkr|SYsuEo{70WlEKcXEu|}Lic_fwx!a6dNZ)@<(vsd(CY$DrJA)?Ij@W#- zFS~n5tCSoO$vNOR;w8IGU^%{9L*O#|tz+|u9iGryeXDVR>Fv;6G?O`Bi`5KY%cl)^#Ay=x{$q0=Le9+!aAYUFu!kl? z5hi63HkmLGu(!nK$7d|ulb?dRhOCO0GD`0hbQf1dHI&7Rh*(R?TzvK3e$iNGS{wTD z`uK3(n~b(pFAt}t)mbO>eSiayYyI?F$C`hUXz#zdZ2Zn>eLdmhkIa?d=mkHHKJXpI zK-OMf{-BC1{<5h4gTl@dWB`gR*`w?*lG?tAjBq{>iFsQX#H-A5VR06m39R_+R0S5f z;i}+g#X~)p5)ALGRKDpt|D1L@y&_w0M#@(i5-TH2Ik_Du{A-sPygFnQf|@&(h}Z*I zq>mpYK1~T*&=dRC82J6O?>*+uDq+IFT=XNJZ9q|r)r3OG&bRie`{H#hE$Wj5I_=(w zFYG&J0qZl@WNt0B3Ev@DGIUNmq{l2)H=(n(t#YbM$9-O>ShK20pr4ouW1@tdec@2F zsAEhWcYD!O)MdG#z4D&(J?6@4;rz@10BEX^icQ@O~hg`ZHIq3?j9ga1k-W&&M8V0m8 zJk5D6cfO6Hc=E#vX@@;m<}4w#pGzO8vMOUCEUMt~6}R_Tn*T(Se(d7ZN~_B+4(Y1X zQ}&CVB8a^M6)D*m(jou5EDljwB*U|FG39{KiS^2>4-i7C@5K>R3;{gVGwNi+yK0q& z#x*`R8-0G!x5+Jl-db*mA;A4RaUQfF%w>!IY`+ zpJsWL$}J8012lb|9<0Y~1RFK`6G*BVI6LRk!S;+Tl+t0KnvCbRMtoeo|GqnB8M%{_ zhB6Omw%LB_4Llh{4?l_9{_LL77J5M)T9?RNkp)@NXmlQ7O1ke3nT6Eq8Qd#>hB&K9 zy!SRGY-`=whNodio~45PIrZxZnLhL5IKRWwaq+rVtGPIfi7p}2^SaRMIXbmDsm=Hp z2V8qjG~9{Zj$s7S2VL|f%c3&O&eO!Y#h!E$*{~}A6g`zY23woxc{f=4h3r-M6CVc{#+)o&)JQEf&`rN^+ZIap> zN57W_zaJjUz<9{=cssQiD5Eusc+@W5jj(OOuzH4d*P*~wLROxnaf9qKp%}09T8DJp z>x={Mi7Cs@!i>{Rhc5DMh}3`4_~vQKi=2=w%UqA5mJ@L)#yah~-UGNOzNmew8dWpK z_$&{2i>0FJA=z9a^`u(EwYEd{*}AvZWBEYtbRC)Z`rlz=ZCv+!)3=<#9lyz*E&EP- z)sgwNGE3t>6xaa$ekJ&O->)}`|DP|fAzF(WPQ~x65~5kHMk$+-@(IGVqa%-?b?oD@ zQua{XF7uq8fC=N2Wpj(5b4FA!Zd;r77z2A4Xo*L34yr+ldkHx0y`=-|<}FXpghgJG zT}`-bRXJI%4!+4#%|bVsQq?EjfAfj9ScgSFZ%M38m2Gi6XTM;4^gj2P1r{26aBGJu zn5ItAuy#=zgR;;iVRSGl(S(advap+Vq$H@vvdMby7Q(QaHty~LSA8`;>^iR>fJYk| z-NYEg=9!aZt81ZW6Lge8f$J(Q^%{e4h5*Yo14NA(8qq1!i&#uW}SLDVw3a zoB(tCexTao`^wJ^e$j)oPa4xVbv_O!&~p@p?mK5m8=7yvvlaV+H-nK8y&vodnsA%^ z1e&Vcxnw8|wk?Q0{|QtED9%rPtYR=l*!b=wImer9!Pk`eUiJyMug=DY56SgT0cAN- zR`h~#6{0M`@XHQKFx}@j86GvV$K1r;(6y&%p2xZ(w~c$Br8s4eV9(t8B{Z8n+mK54 zp_;^z?UL?c=AU6!GskHP!a309IG|dMTLF;5TK9K?NwF~eysjIX3J@!R6B_3P-^H(a ztTj;s5@PEj_BA2)7p-ypniKm`ZMnq9A!npv+X^kAFUuaKwEG?~g9wqaV7X*O(V6J=2#t|O3$BGKBFvBVWYce>W1w`ijb7&pCCGl>HGw?KDG`=u@+8G{b= zSVR3*t}OBMedMELClEarZ>^?WK8IlN%zCNH@Q{Idwf;OHJZDM>G=}|MRO@aaHbWUZ#{Ap7Z ztJ%aNWcq=OLawX?^+Ntc0Pd6rND9!cq)Xh zl}607cqReV=mHc%U2sY=b+3CHAhCJLkBF=IY@-w#N;UJ|Hk5^WN7FK6>=54^IxX%*0ItORa@8P6exJgCtHCWCW8D(%utA7GI=IzyN$ zwo0d3VM^MuuOH>>5BN8LH(pW0P`pyCC{iZj2~LO_^Lw#z*H9U+AwhSW1T zYD8{`;V9xE!y^bQ|SC>h*%cs4IuBJq>thTQv>0)6w1m}8e*&>A*uVD9f5G4C@Q|g?c}q{-JG)IuSC3>Q4)5C8` zb}s#)Ex?2ZAfv47v-LCbE0yYJOMe-sZ;7^?|k!mO_Su>;cwxZWowMjuhS9C7EUJwxI5{Zl}O z{4e*A{%>429ilz+t!De_;zju#fM&o4IK=$O)b}3p+_1?lh&?_i4zfepOE%TXN&J1< zLTp;9RRU~KcBOYjDcqvmOHK8Br!I%{lUqkiE;OG4L{OmRW++7nRV$j2(4dHHz=s0D zJd{;YPco%c2z|M1wIFt}8>g|!N36HW={A>~WCG&Fg9IX*l&Hdx3T#;TC1x|d4YTtX ziqmlTqm$FW__-mTPT+bz;v@=nsF_0}A7($CXF2L%b9XRg!^z{%o;`ay1=Mx;(WM(D zAr5R7!@}V?=jPlV512rWX1^pqJbXY1beHa_yuSBkgqm1*gMUzAEQr_-+V! zg_zZxHe+Xma&*PwMWR&{tEP1|JTmoHJ2Q5DN#?x#TQa8v843aWa+2`SfND+MUf_7g zI%w|q3}2hX-6c?2HY?}E)>O>ef6|nb?F>0}mzqF8)b)v_#T-f@c+m+Nd_|SZelGF6 zqlb5dVp19Z^z&nEg^wzNAKy(Y*&aj9zX8kTR(uhbh>L)lDLK3=i~-b8O@_Ruq^{m_ zi5}W$TgAu6Q}C=H*|87 zE#WozfGQA0DNQfKQ&h_}X$1~3?twe=jZh6y+*0QUv`)uBt~{6DdyomVPs1?tRyvW# z+5fr|ls*{M((RcF5e&cx9LG-z~jh~Um3hF4NL06}S zs~5VXihKMB%t{%XV%s_YFfl}Im=QL?6;V+S+rtm3=O2-7kp&!R!FyyTSy(fwF}GH9 ztDcXBx7Z{db1R=xWX@tsSzM00^CW0?*g)XXy+GWvSs`>a;9JNeTm$26!S)<>L$ksR zvgv~4GwB?I6EQb$SlHKH+b?Jxc;qgcNkL%AFinqTv+;|{g$SSJs!p2Cix(A9Oofh@ zgvL7Di=yM;2bbaq`vwP7B&yK?Wp*`|eA7CSR(`NL#h#btWX(|Yo$ZkZlg@IxnwzLpL;Ye008sVEqYnrk^@-4)GY^ z+)N~0FhS9rDCJq6vKB1CkR9`{WZ?TeBQ0I`$-1j*Wb52Koa}CLuQ2$kFt@|rC)#uJ z{G~lIx=JUy54+`9Eh@ouwD^3oLK8(acn;aR3qJ<2=@ohfPZg$EQi3&mAodszhn(H{ z13Trkt2m`Q;rcHy1sQ&Sg?c|l)+}$JcE`sac;1WAmv%> zKyIc`?q%ypfWfS9>BM28aw;ZAzs;HW2?WL3q$nOT$T%5)n=_h@Aq)`*D?f9Nw%pYC zqiS}`A953aNKAf_#Qv2B_U83eil1)UIhaA2wF!a3!f%njI<^e{W=Z}Je>I7u52E(c z>`1)BdsncvkG5NsVhab2`_;Q;#JOrb%CL*P=VJjb1k7efzBXrtjzIZwR)O%nC(sW| zKps3xjqNx6caxIPqj+|j11a`VvM0hI<_xwl3pmE&fQ;P`|7IUpXp-8U=IrJj&gvUq zvbMrKVRb{iIeWwGXk(gIxd4w_%&7zFM>Ws+^r=;N>VCprAxg42e!{93R%*BUT~z zq-kqcPMv{H_N$1N_qp5joz>)tF-Su-W+1PmwVGNm1b9G^be9Mpt;`W*WLLT4-1hA= z-~pgSb3Qt#AVwx5z?JmM?V;?mRu8X$@~P#fBT15Pue&tbttONC>4BD9pFoTN$UQ6L zWoC>qjq?U*0$TLojECb>{J8s&#w2Lwmo$yNuNGizY@z|kJf|?UNK*uBmB=KNNqL{w zD0;7hqIf#WHmf8zUpZQr|A;9eOr}s=Ecd-W|5>xXTQ{`PR=XZ)KdrZ{y!-yl)eJ_3 za>1-CA2AMbh>)`2V1(^Hg(g0!QXeG86l9_T;;4^i@ZL5!oWEp|!9UHjaQj5$rfZJ@ zS&0!h$lD}}2Hcn$CfnI35qwn#C@k8~dujS`82n;Nd#*yviLT)to#sknJJM~V&C>5> z_LpfKD>;$v2d1izXs$AqNzhqr0Ib+?qW;to{bVk;eZ>42d@oJ|BJ9Lh@j$EjdhCHy zZ9c+fVH7`!?D#p89*1xJ@tTXlb;uP?S166I<#Ts6xF-Ojt+fx|Al60f3m)`;?VT;a z0alRT=o2XK*E7dRHoC$WkRd91!nLLL3BA_d(|ehT>vbtk-47)YQ_j!8y*H7ZzkX-| zrVXYD9GDFiAarpC<2JG;Va|8kcB zWMV(x`IBAvJL2;VJ=*;xWH7Q&SdV#J$offND8J(Co5z>ql(gXa@1i{}U$AC~Ns>hr zs0tuDvH*wX;YBF@A*x~&KD^Of(6%X<8M-DET!Qm7)&jpc>6Gmjrc;d(bod%up* z@yiuY-7;|10+jSJCLrQVYVwE7h3`ALy#XlBnAtP)U@AjDVk`~t*8fi8*SE@hD5L99 z&o!r7M-faD{yWJmwGVl^jmb}{4woc#TUHg72Uy&3@|lD^wbLdFir#T^X}k~rmcsE?IqJ_z|Dz) zW&P=QUFq(DFZzM+C%o;@+lqMN>q^e~2o*imNpzu)mB)dXc^7hVC8!|GIGBxoz{-u4 za;xnPcuBfkTM{@xXBD9(gt6NWd1L~|jrV5g)q?psKgnxIdjsh5831f{W+J0+drPAV zgd;xFF9N;~ zeN;pG0MzRC%(r^`JPM2Q^DT$jBNk&ti*kjBAu|hs9PcIPq7@&->9OO zWzu2o*K`s&D}c&cN~081X|@mhQQC^#i`Ue#N>qx*~p!#n-`(f6N>s;pk7nFAT(m_99 z%g_tn`J5)-@y_K&nap$H4lf-wZcKh-J1MDo4oIpppPW>!a+;t0_z48g4QL!XoJV53 z>4xTWeBu{(qRSaRef=7>o5O9{p^!$GB__i42ZlVUo*09BW=7#CZF z^(osB!}80BJ&t8|K^4zw`i@*3xOU1`frrQ!@gsz=Q$?1yZWrA}@xejr2Cvt)#XWEQ=AUJOwdEmn6$(wd#tQkzPsa6s(Wh z1~nsYhQ}NxY3A8kz4tXM5)eM{1?3fv)Z@1uH0_^My^9+IEcGrs{zv%g{ShqQ@H+tc z#{tJQfai2)2Y8XE8#Dic2XHkFLCkIlpx2sv&|}pBz(rpJ^f})Dty+NJ?}rPN3^N?b zsKwv10==m^z$%KLkh2I&|7hBI{HfUV^jJvPcBQgsw_a};6Af8?Oc(a$ zqZ}hGLiYK(TiB4Mcx6~~n!aQk1B#jkKXIorHCz!60txF}uFxbZG`;Kt755|o4B zsW{=P(9siXCl}`t*U_GC&U|~?YcIx_fex=cDC%j`FphxMz#tmQZPc z|9#otm^Y1>j&;zFe#5EP9{!xUT{_TBd_d?|JqB)pF#HFiik zKA*=)DfKCd9+>WFq4u2A77BmY=g@o29CzY;(W_LokW%HugABs)bBIU;*}&I>qB9J- zerHlKF7)syxRLpOcz>>6U_*|QIyNb{4dy>|cZ|NaQyh49X4)kZ z?|1$k9(}kfqX>ZG0QCWY$2kIcoCbi$33RTa05~`(2Y`b&831sw?)68ufPi8BQ}DW9 z14^C#$G@~8V_jW?k}Od$@Smerl0esqTJ8L!TCNUE_va3~-@w?v+CSaY)TYU>7oca_ z(LQ79S)Va=4h${kD?*F_pkIE9|9Xmifo<1Kh(FAu zpD*ctUkLt=5bB>=k`FQzd?sG5T}uD36?`H1|9t1~Zk68?AE0}d5AmQ^Z(c#nI}JLU zaN1apj*b$w+3Vu99yrL_YVVBv#h}Wjt%HlWXVbnu1%+3`){juY;s+#8W$ZDO13>aL z+Ws@|pWgmCd8%$44ru%i0Xr}9=S%-=2-XSm*KgXra#2@&edEQ1=IY{gG8PMOZ0CzS za0d?ycW5e6-crq9vgxwj{;$;KA22(<&07XYI&8{C?O_GZ!SoA&d}vk|LO|1EDjDf7 z7x62H?18$MzjGM)N4|dsE87`761jV()?cVIM`oWQeQ>xc4^wQGToM|@qrh5iVz6zq z*8TwFvBx`kx)(35#%HVU)*~HlT=XJIt8Od1*WxxjRv~CxcCO7-(zbxrOUy$g@qU|{ zO4c(HCa^+s7|CKr(X960o7W}%NOAB&_k88yR&gTnl73*6{OAUKwXw$_k=1{R5topZy0_IoN|LUlfS+m*piEc#S056}d38(1-}k2g1iXE@I*V9qiK(>FoL}@5 zt8TBH<-=UDDQq15XiY}zgAOvKf?PM2^F8$S{Q#7HNn1(OU&`O(b%mBOWHdA z0s;KVB>l-GrKUgk=K*e7N*#zP7C>;H5_>})iJMuKBBX~Cs<9@NNCM25?|Gzio0;Y) zhk;3^Ni4k+KTemTXcLm#`{}}({Aj8y&N$5j^%&+#(?T2MHsdag<M|c9o<#MI<#Wji8bB^RdQpyNikz6^$O2RA4GfOQLYST|5%ZcCr%zn{VDO~* zo!F_IU*y%*_PU}-28bZzOCM|E8>^uqy^vkW*s1w&`0XVg-53})-_kG4MfZ?0Ni6>Tn#FiSrrQ^ z7DZ7Wa>P(B?hkuUueM+M zQ{%l9izi=qOA4l&erfD74MJ)tyf&=M<5{)ev4elycq*@+IoY&2(qF%9<5k7$+Dngj zfWrWfsMQQr4*R$5eHXC{SG>K9pPK`T4B_V#Y^F(=qZM~=p+vvb$zsSZ8CxeXA@<3L zHFNqqs%`S6Bamwm;O8EDmOc3sR}ky|6!%L787EZ$7gazxa<~3=Z<>Qg0*!w&{WDSr z?0`uCB~(N7v(?CM1P!UV?w7AUO8=hMzAIe+z72f?%#+wLsXIzMHQ=k(6ObEY+bSQ_ z?Ghl^#z7zD1`01Q%%k}-oE!;e7_!y#LFGy_#L#l6b8ta%%6~|?-bcZ;} z$savW(3_ULdFvzyBH5&DZTE9=`&VrM>(RWm^I#!z6t!dWtbJYuW|xe^&7N!yfjcL05MS?8%v7eWB zQIzppm8U9-hvR6c{gr$#V--V`_0z^r4W;bc-8_t}M)Q(ez3;nQ7vyj+MsU$jp(d_f zkV!q{aSu;Fjiuv&zu?-VZLx~b$=9t#WMQcvv_~L1Q&IzOUnCIvf&6hrYS7+T& z?4AKe;;RIY?4bdM2!XYFTaw!{9+YGR?=kxaZ}$PX>s+4LrkGnlt{*YAZJskbM%hJoo?vpri|~;vuU( zl1a*P8Yap+_QtVtMDrFEpM1DSPo(DfaKNhrl}urx<_nhIbI+Dy;4@D2^mB(aS&nnW zc53@INpt{!*sC|+;)wqK*^fesb^{lGpIYc54`P+S^e12RU&Q$SjaB=%ui1K`U{>FK zq?niS8FlCop3>)?1+bH)Zw?1m#2h^dDc|eZ&d;W{HOvffn$+Sf2CC+uhtZTnwrOM& z0ywC`gw(Ql{mnaaTqIp^vT7FzeN$N{`xD6aRAict=i%JKZ4qp|oLX0&c=5BI9_N3e zrl6Gd7Go=c{J0oQ+PoAZ1Kr#TRO(bCb1vQ-X=iTtj?>hAlOxb408`v-Hew^$viGKm z>-A)jC-=dEp2CDrpn75mdKb3SZA7{Z%aKLSU!Sqc)Q%~)u0l( zSW+xYI(B}?qn8nGtv0Kv!!xTK>40xr7lIB^+gHh1=eOpOCQfxH0Aihj)5L{=M9ch! zer@_UcEwvuU`F@7zS_>Hc?ICm);K9wEkzUIW9~YcMg~=1l^#>3APC|~65@+Ha;uMQ z3ux%rq6o;P4~LDYc9J~kVTK5T0|cPxrV_!7fZ|1tzB>l+gI)m<$8!1CK3e%Fb*-JTo35IF^1xl66CsPRLyE2qpl0oSp}ScY5q5qyhTISJzM`P|G~|y*AdjM z3)PDXbx2KWmVHq7$cAXi%wimr&enu#(aYBjYI-oWTohRg_Sv(Xr>1iB=lK3@eE*2K zj{AnOdNx-HMJaD|M8_52QMLi=pjp*66yYq%Ia)>{WXtxO8OJ`>0*2C8$X@U~8~1HD zPV>p$knHBzFz#x?x`+(G%o`*C1!n*-ow5qHrzipF`5_Fc0LTa+I~d^_*VaAjT=y#= z_xYdA{2H%6#KsA@{m&Z6K&czwegcj4mkpq$3Qz3c7;MszTy>;H)0e&6B4hmg!j=nX)w`~dmv zXR#&g{qq;^v5sl*okz^IB1Fs&Zcr`(jH~16=e(`~)!Jofnf8x2&5nKoCBc^U-!}rr zQP^2DXth|TefQ*$B1MKOOL1xB!3ItF3sM#*uyre#V3|W=Xn-;5d(#3qYli`ttNY+LYW+V}}Gcd>V)^hGldUtx)Jken5 z^kMMFBrQw<4_LCvUF%M{)a5%YKI=Y7TFMRf79`yZ{a!N;Oq+u?$A*FQ4nvkdVDA4T zf6%|InF{sAMv^Kj%gl_XLn)=|PC{8!IX&f0yn)j$f`gUST~Y>y22lIWTZEEf2TtEP ze(U2B>mA!z)BF;l2GHWnn!kkBz4~ZD4~JfSO1U`NG82vgs|tBS%z?6s&24ipTWoTg z(k)xAx#AuH?xWf2yUtZ6zST&361&9IxrK{2b?6>T%h z(B>xSbBRqgKijt7|K0;=NQ$TcKPl53QRK)skw8=cuji910$ znVK@ew;MyY&6iDkYL0U>7Sha*o_2R|-mO6V;Ob!ZWV^gUod~w$&HrHF{;6&>`3R}k z!4|9x)~9Nvy}}q+E%cAuKmpB^aBDV@m)Q9 zrx~LU1-{uabNv~B5kLW54MbJcvwzfN%4--2_SNn=nLNVyyd!^c^ZNEdrtOKKBL^AY(*AOX_?IoH|J3r1P6VWG&v0nsA;QdC#E4ut1gNhbYVHv* z?JBWhvaG6bgzJ`>47U?edVG4fD04&>$E@;2)sEH&M&T*>fuirLl%Zjm0%#;%)WV$J zVI@zxOHn5!94jMTr0ve<4?3*nx^P(2*01$NOl<2>s&PM67&B`^Hme?ULk1Nso61 zuiq5mU#CkVNWV}GpoD&(?)z^D)t|6L8D1kC+WS79qs*jxLd{5ebrO^l71qO0_Vh}i zP#2qQbTQfD{A#?9c1qIQraej9a&mJIxSH|96OWd?2r!_(hUOF7r&b9n&Md5+6M7t; zsQRIUWn``h<&(JUeQAryM>Td|DmJ;cNwRu#1#5%lD=ib3a<;}U`#&ky`8!s8mB%Uo zH(Be2qY5SJpuu!;5~>l-Y>yMZDPj8tL)by~s2yKul{?p?s_*2ZsGivehh5;VLsoLW zlepC!=pT}DL-jnleM*?B?)hQKf-FMEme~g8`W_=i`14Sndk|)VhAgYl4m*kwwk6#o zC2xeBj*`XE0>x6waat6U*?Z3dS2U}WSb@Sy6o@tne0yqhZd{5(o~c98GwogdwLFbH z8@=R>7)UuH-=*ni*gj%X3l4_vQD*leVMp3;7NFTEK8ELbqC#^`HCg@&RwNzd%0L9m67)YaZ7Z_o6!tCsDA+@3CIj4~|P zvfX9#>rQ^%o%}mX&rjE)-*TK~jP-AY<`;*k?tpq?8u4s&!HfuB_-N}TUZ7_u7jgT$ zo!l+@HJzMbPL6^PsyFFt-r+IZm!!Qfar)(g;_tfQ3&(I7^%7NxB%;Wub7Vh!HM|c@ z=$Q^6Ai*#zu7I;-a6{Ih>I8XyvQON~y*OO0#5CZ+(&jlRXjEo$g^MUog}|!hiz#{z zV@i4cR)(YglG?L-Cmq{zd<3_X9P@iAgNccctlCJ@ATCprDH}u zfi_YuQO$aMS*%AA;KYMnxGndx^Rkrlw!E|??r9jl>b;9aH>ILw(PuTspV7b5YI`Bp zcOO~3CT5b=)4Tl~84@(#&e3mmp5oiSvH4QjxeMegVkWL34{>K0PmR%c`?^pB$=!P8 zJyqL$FOkp@PSR9lK7IgLQe<(r-Migp-@!btM$((AAMbqjJ4P(agQ}y5Y9%*JsQ&>n z{U!m0YzIKuU@%vqkEPB?@GT61E9%1t-IGWpI;+$xmNNSZ#LBp;QE`ql&W_H z$r`?Aj6SxDbH4IVMGk)xK$zU8cZ2hjipfPw)m2IqFR}z3ASvx!YH~1j4O3ThOMl& zJ)^6T{nLiE&{y1E%|zz10wRcl#zlDkc3zuexT5Z}TtW8bNPtuZR6b-U6;F+l3~Qkb z6I}Z!_tM$4n;9JeyqaW;>(kcZ;OqrfdZh8!u>l5PmWTs@Vmd-W%Tt|Cc>?+&;62D3X`Ks zooZ5?((2QX}q1*|h_TN$XT%phyM`nk52);x<~0#SskA=_wvZ~}OPe~YC5>m>0aN(KRmQ}8tzVF{J; zJaew=U7p^qmZz2zli8fR24sd3Tb&lK{eqI?Gi-QvJA}qQtYW9O8cT%OR9Y)mCv)grs_zSvWnemobn4jJ77%_)`=azBbf6#w>ar+0=R;$ zXeXAx7#N1IaRjF{KEhwb(xN6w;Ek^+*DYxuCGcL_amvXmRx%fA6ZRKruNqAzM}3RB k_w}!@9uPg%%%|H2w*9aC^5^X8&;NhM;NNi!fIs#9KYW7SbN~PV literal 0 HcmV?d00001 diff --git a/docs/zh_CN/_static/js/custom.js b/docs/zh_CN/_static/js/custom.js new file mode 100644 index 0000000..96f0679 --- /dev/null +++ b/docs/zh_CN/_static/js/custom.js @@ -0,0 +1,20 @@ +var collapsedSections = ['进阶教程', '模型库', '可视化', '分析工具', '部署', '其他说明']; + +$(document).ready(function () { + $('.model-summary').DataTable({ + "stateSave": false, + "lengthChange": false, + "pageLength": 20, + "order": [], + "language": { + "info": "显示 _START_ 至 _END_ 条目(总计 _TOTAL_ )", + "infoFiltered": "(筛选自 _MAX_ 条目)", + "search": "搜索:", + "zeroRecords": "没有找到任何条目", + "paginate": { + "next": "下一页", + "previous": "上一页" + }, + } + }); +}); diff --git a/docs/zh_CN/_templates/404.html b/docs/zh_CN/_templates/404.html new file mode 100644 index 0000000..abf3356 --- /dev/null +++ b/docs/zh_CN/_templates/404.html @@ -0,0 +1,16 @@ +{% extends "layout.html" %} + +{% block body %} + +

未找到页面

+

+ 未找到你要打开的页面。 +

+

+ 如果你是从旧版本文档跳转至此,可能是对应的页面被移动了。请从左侧的目录中寻找新版本文档,或者跳转至首页。 +

+

+ 如果你找不到希望打开的文档,欢迎在 Issue 中告诉我们! +

+ +{% endblock %} diff --git a/docs/zh_CN/_templates/autosummary/class.rst b/docs/zh_CN/_templates/autosummary/class.rst new file mode 100644 index 0000000..4c3a7a9 --- /dev/null +++ b/docs/zh_CN/_templates/autosummary/class.rst @@ -0,0 +1,13 @@ +.. role:: hidden + :class: hidden-section +.. currentmodule:: {{ module }} + + +{{ name | underline}} + +.. autoclass:: {{ name }} + :members: + +.. + autogenerated from _templates/autosummary/class.rst + note it does not have :inherited-members: diff --git a/docs/zh_CN/_templates/callable.rst b/docs/zh_CN/_templates/callable.rst new file mode 100644 index 0000000..3a7b9d2 --- /dev/null +++ b/docs/zh_CN/_templates/callable.rst @@ -0,0 +1,14 @@ +.. role:: hidden + :class: hidden-section +.. currentmodule:: {{ module }} + + +{{ name | underline}} + +.. autoclass:: {{ name }} + :members: + :special-members: __call__ + +.. + autogenerated from _templates/callable.rst + note it does not have :inherited-members: diff --git a/docs/zh_CN/_templates/data_transform.rst b/docs/zh_CN/_templates/data_transform.rst new file mode 100644 index 0000000..376bfe9 --- /dev/null +++ b/docs/zh_CN/_templates/data_transform.rst @@ -0,0 +1,13 @@ +.. role:: hidden + :class: hidden-section +.. currentmodule:: {{ module }} + + +{{ name | underline}} + +.. autoclass:: {{ name }} + :members: transform + +.. + autogenerated from _templates/callable.rst + note it does not have :inherited-members: diff --git a/docs/zh_CN/advanced_guides/convention.md b/docs/zh_CN/advanced_guides/convention.md new file mode 100644 index 0000000..941236b --- /dev/null +++ b/docs/zh_CN/advanced_guides/convention.md @@ -0,0 +1,114 @@ +# MMPretrain 中的约定 + +## 模型命名规则 + +MMPretrain 按照以下风格进行模型命名,代码库的贡献者需要遵循相同的命名规则。模型名总体分为五个部分:算法信息,模块信息,预训练信息,训练信息和数据信息。逻辑上属于不同部分的单词之间用下划线 `'_'` 连接,同一部分有多个单词用短横线 `'-'` 连接。 + +```text +{algorithm info}_{module info}_{pretrain info}_{training info}_{data info} +``` + +- `algorithm info`(可选):算法信息,表示用以训练该模型的主要算法,如 MAE、BEiT 等 +- `module info`:模块信息,主要包含模型的主干网络名称,如 resnet、vit 等 +- `pretrain info`(可选):预训练信息,比如预训练模型是在 ImageNet-21k 数据集上训练的等 +- `training info`:训练信息,训练策略设置,包括 batch size,schedule 以及数据增强等; +- `data info`:数据信息,数据集名称、模态、输入尺寸等,如 imagenet, cifar 等; + +### 算法信息 + +指用以训练该模型的算法名称,例如: + +- `simclr` +- `mocov2` +- `eva-mae-style` + +使用监督图像分类任务训练的模型可以省略这个字段。 + +### 模块信息 + +指模型的结构信息,一般主要包含模型的主干网络结构,`neck` 和 `head` 信息一般被省略。例如: + +- `resnet50` +- `vit-base-p16` +- `swin-base` + +### 预训练信息 + +如果该模型是在预训练模型基础上,通过微调获得的,我们需要记录预训练模型的一些信息。例如: + +- 预训练模型的来源:`fb`、`openai`等。 +- 训练预训练模型的方法:`clip`、`mae`、`distill` 等。 +- 用于预训练的数据集:`in21k`、`laion2b`等(`in1k`可以省略) +- 训练时长:`300e`、`1600e` 等。 + +并非所有信息都是必要的,只需要选择用以区分不同的预训练模型的信息即可。 + +在此字段的末尾,使用 `-pre` 作为标识符,例如 `mae-in21k-pre`。 + +### 训练信息 + +训练策略的一些设置,包括训练类型、 `batch size`、 `lr schedule`、 数据增强以及特殊的损失函数等等,比如: +Batch size 信息: + +- 格式为`{gpu x batch_per_gpu}`, 如 `8xb32` + +训练类型(主要见于 transformer 网络,如 `ViT` 算法,这类算法通常分为预训练和微调两种模式): + +- `ft` : Finetune config,用于微调的配置文件 +- `pt` : Pretrain config,用于预训练的配置文件 + +训练策略信息,训练策略以复现配置文件为基础,此基础不必标注训练策略。但如果在此基础上进行改进,则需注明训练策略,按照应用点位顺序排列,如:`{pipeline aug}-{train aug}-{loss trick}-{scheduler}-{epochs}` + +- `coslr-200e` : 使用 cosine scheduler, 训练 200 个 epoch +- `autoaug-mixup-lbs-coslr-50e` : 使用了 `autoaug`、`mixup`、`label smooth`、`cosine scheduler`, 训练了 50 个轮次 + +如果模型是从官方仓库等第三方仓库转换过来的,训练信息可以省略,使用 `3rdparty` 作为标识符。 + +### 数据信息 + +- `in1k` : `ImageNet1k` 数据集,默认使用 `224x224` 大小的图片 +- `in21k` : `ImageNet21k` 数据集,有些地方也称为 `ImageNet22k` 数据集,默认使用 `224x224` 大小的图片 +- `in1k-384px` : 表示训练的输出图片大小为 `384x384` +- `cifar100` + +### 模型命名案例 + +```text +vit-base-p32_clip-openai-pre_3rdparty_in1k +``` + +- `vit-base-p32`: 模块信息 +- `clip-openai-pre`:预训练信息 + - `clip`:预训练方法是 clip + - `openai`:预训练模型来自 OpenAI + - `pre`:预训练标识符 +- `3rdparty`:模型是从第三方仓库转换而来的 +- `in1k`:数据集信息。该模型是从 ImageNet-1k 数据集训练而来的,输入大小为 `224x224` + +```text +beit_beit-base-p16_8xb256-amp-coslr-300e_in1k +``` + +- `beit`: 算法信息 +- `beit-base`:模块信息,由于主干网络来自 BEiT 中提出的修改版 ViT,主干网络名称也是 `beit` +- `8xb256-amp-coslr-300e`:训练信息 + - `8xb256`:使用 8 个 GPU,每个 GPU 的批量大小为 256 + - `amp`:使用自动混合精度训练 + - `coslr`:使用余弦退火学习率调度器 + - `300e`:训练 300 个 epoch +- `in1k`:数据集信息。该模型是从 ImageNet-1k 数据集训练而来的,输入大小为 `224x224` + +## 配置文件命名规则 + +配置文件的命名与模型名称几乎相同,有几点不同: + +- 训练信息是必要的,不能是 `3rdparty` +- 如果配置文件只包含主干网络设置,既没有头部设置也没有数据集设置,我们将其命名为`{module info}_headless.py`。这种配置文件通常用于大型数据集上的第三方预训练模型。 + +### 权重命名规则 + +权重的命名主要包括模型名称,日期和哈希值。 + +```text +{model_name}_{date}-{hash}.pth +``` diff --git a/docs/zh_CN/advanced_guides/datasets.md b/docs/zh_CN/advanced_guides/datasets.md new file mode 100644 index 0000000..83b7959 --- /dev/null +++ b/docs/zh_CN/advanced_guides/datasets.md @@ -0,0 +1,73 @@ +# 添加新数据集 + +用户可以编写一个继承自 [BasesDataset](https://mmpretrain.readthedocs.io/zh_CN/latest/_modules/mmpretrain/datasets/base_dataset.html#BaseDataset) 的新数据集类,并重载 `load_data_list(self)` 方法,类似 [CIFAR10](https://github.com/open-mmlab/mmpretrain/blob/main/mmpretrain/datasets/cifar.py) 和 [ImageNet](https://github.com/open-mmlab/mmpretrain/blob/main/mmpretrain/datasets/imagenet.py)。 + +通常,此方法返回一个包含所有样本的列表,其中的每个样本都是一个字典。字典中包含了必要的数据信息,例如 `img` 和 `gt_label`。 + +假设我们将要实现一个 `Filelist` 数据集,该数据集将使用文件列表进行训练和测试。注释列表的格式如下: + +```text +000001.jpg 0 +000002.jpg 1 +... +``` + +## 1. 创建数据集类 + +我们可以在 `mmpretrain/datasets/filelist.py` 中创建一个新的数据集类以加载数据。 + +```python +from mmpretrain.registry import DATASETS +from .base_dataset import BaseDataset + + +@DATASETS.register_module() +class Filelist(BaseDataset): + + def load_data_list(self): + assert isinstance(self.ann_file, str) + + data_list = [] + with open(self.ann_file) as f: + samples = [x.strip().split(' ') for x in f.readlines()] + for filename, gt_label in samples: + img_path = add_prefix(filename, self.img_prefix) + info = {'img_path': img_path, 'gt_label': int(gt_label)} + data_list.append(info) + return data_list +``` + +## 2. 添加到库 + +将新的数据集类加入到 `mmpretrain/datasets/__init__.py` 中: + +```python +from .base_dataset import BaseDataset +... +from .filelist import Filelist + +__all__ = [ + 'BaseDataset', ... ,'Filelist' +] +``` + +### 3. 修改相关配置文件 + +然后在配置文件中,为了使用 `Filelist`,用户可以按以下方式修改配置 + +```python +train_dataloader = dict( + ... + dataset=dict( + type='Filelist', + ann_file='image_list.txt', + pipeline=train_pipeline, + ) +) +``` + +所有继承 [`BaseDataset`](https://github.com/open-mmlab/mmpretrain/blob/main/mmpretrain/datasets/base_dataset.py) 的数据集类都具有**懒加载**以及**节省内存**的特性,可以参考相关文档 {external+mmengine:doc}`BaseDataset `。 + +```{note} +如果数据样本时获取的字典中,只包含了 'img_path' 不包含 'img', 则在 pipeline 中必须包含 'LoadImgFromFile'。 +``` diff --git a/docs/zh_CN/advanced_guides/evaluation.md b/docs/zh_CN/advanced_guides/evaluation.md new file mode 100644 index 0000000..32db197 --- /dev/null +++ b/docs/zh_CN/advanced_guides/evaluation.md @@ -0,0 +1,97 @@ +# 自定义评估指标 + +## 使用 MMPretrain 中的指标 + +在 MMPretrain 中,我们为单标签分类和多标签分类提供了多种指标: + +**单标签分类**: + +- [`Accuracy`](mmpretrain.evaluation.Accuracy) +- [`SingleLabelMetric`](mmpretrain.evaluation.SingleLabelMetric),包括精度、召回率、f1-score 和支持度。 + +**多标签分类**: + +- [`AveragePrecision`](mmpretrain.evaluation.AveragePrecision), 或 AP (mAP)。 +- [`MultiLabelMetric`](mmpretrain.evaluation.MultiLabelMetric),包括精度、召回率、f1-score 和支持度。 + +要在验证和测试期间使用这些指标,我们需要修改配置文件中的 `val_evaluator` 和 `test_evaluator` 字段。 + +以下为几个例子: + +1. 在验证和测试期间计算 top-1 和 top-5 准确率。 + + ```python + val_evaluator = dict(type='Accuracy', topk=(1, 5)) + test_evaluator = val_evaluator + ``` + +2. 在验证和测试期间计算 top-1 准确率、top-5 准确度、精确度和召回率。 + + ```python + val_evaluator = [ + dict(type='Accuracy', topk=(1, 5)), + dict(type='SingleLabelMetric', items=['precision', 'recall']), + ] + test_evaluator = val_evaluator + ``` + +3. 计算 mAP(平均平均精度)、CP(类别平均精度)、CR(类别平均召回率)、CF(类别平均 F1 分数)、OP(总体平均精度)、OR(总体平均召回率)和 OF1(总体平均 F1 分数)。 + + ```python + val_evaluator = [ + dict(type='AveragePrecision'), + dict(type='MultiLabelMetric', average='macro'), # class-wise mean + dict(type='MultiLabelMetric', average='micro'), # overall mean + ] + test_evaluator = val_evaluator + ``` + +## 添加新的指标 + +MMPretrain 支持为追求更高定制化的用户实现定制化的评估指标。 + +您需要在 `mmpretrain/evaluation/metrics` 下创建一个新文件,并在该文件中实现新的指标,例如,在 `mmpretrain/evaluation/metrics/my_metric.py` 中。并创建一个自定义的评估指标类 `MyMetric` 继承 [MMEngine 中的 BaseMetric](mmengine.evaluator.BaseMetric)。 + +需要分别覆盖数据格式处理方法`process`和度量计算方法`compute_metrics`。 将其添加到“METRICS”注册表以实施任何自定义评估指标。 + +```python +from mmengine.evaluator import BaseMetric +from mmpretrain.registry import METRICS + +@METRICS.register_module() +class MyMetric(BaseMetric): + + def process(self, data_batch: Sequence[Dict], data_samples: Sequence[Dict]): + """ The processed results should be stored in ``self.results``, which will + be used to computed the metrics when all batches have been processed. + `data_batch` stores the batch data from dataloader, + and `data_samples` stores the batch outputs from model. + """ + ... + + def compute_metrics(self, results: List): + """ Compute the metrics from processed results and returns the evaluation results. + """ + ... +``` + +然后,将其导入 `mmpretrain/evaluation/metrics/__init__.py` 以将其添加到 `mmpretrain.evaluation` 包中。 + +```python +# In mmpretrain/evaluation/metrics/__init__.py +... +from .my_metric import MyMetric + +__all__ = [..., 'MyMetric'] +``` + +最后,在配置文件的 `val_evaluator` 和 `test_evaluator` 字段中使用 `MyMetric`。 + +```python +val_evaluator = dict(type='MyMetric', ...) +test_evaluator = val_evaluator +``` + +```{note} +更多的细节可以参考 {external+mmengine:doc}`MMEngine 文档: Evaluation `. +``` diff --git a/docs/zh_CN/advanced_guides/modules.md b/docs/zh_CN/advanced_guides/modules.md new file mode 100644 index 0000000..cb0fac6 --- /dev/null +++ b/docs/zh_CN/advanced_guides/modules.md @@ -0,0 +1,512 @@ +# 自定义模型 + +在我们的设计中,我们定义一个完整的模型为顶层模块,根据功能的不同,基本几种不同类型的模型组件组成。 + +- 模型:顶层模块定义了具体的任务类型,例如 `ImageClassifier` 用在图像分类任务中, `MAE` 用在自监督学习中, `ImageToImageRetriever` 用在图像检索中。 +- 主干网络:通常是一个特征提取网络,涵盖了模型之间绝大多数的差异,例如 `ResNet`、`MobileNet`。 +- 颈部:用于连接主干网络和头部的组件,例如 `GlobalAveragePooling`。 +- 头部:用于执行特定任务的组件,例如 `ClsHead`、 `ContrastiveHead`。 +- 损失函数:在头部用于计算损失函数的组件,例如 `CrossEntropyLoss`、`LabelSmoothLoss`。 +- 目标生成器: 用于自监督学习任务的组件,例如 `VQKD`、 `HOGGenerator`。 + +## 添加新的顶层模型 + +通常来说,对于图像分类和图像检索任务来说,模型顶层模型流程基本一致。但是不同的自监督学习算法却用不同的计算流程,像 `MAE` 和 `BEiT` 就大不相同。 所以在这个部分,我们将简单介绍如何添加一个新的自监督学习算法。 + +### 添加新的自监督学习算法 + +1. 创建新文件 `mmpretrain/models/selfsup/new_algorithm.py` 以及实现 `NewAlgorithm` + + ```python + from mmpretrain.registry import MODELS + from .base import BaseSelfSupvisor + + + @MODELS.register_module() + class NewAlgorithm(BaseSelfSupvisor): + + def __init__(self, backbone, neck=None, head=None, init_cfg=None): + super().__init__(init_cfg) + pass + + # ``extract_feat`` function is defined in BaseSelfSupvisor, you could + # overwrite it if needed + def extract_feat(self, inputs, **kwargs): + pass + + # the core function to compute the loss + def loss(self, inputs, data_samples, **kwargs): + pass + + ``` + +2. 在 `mmpretrain/models/selfsup/__init__.py` 中导入对应的新算法 + + ```python + ... + from .new_algorithm import NewAlgorithm + + __all__ = [ + ..., + 'NewAlgorithm', + ... + ] + ``` + +3. 在配置文件中使用新算法 + + ```python + model = dict( + type='NewAlgorithm', + backbone=..., + neck=..., + head=..., + ... + ) + ``` + +## 添加新的主干网络 + +这里,我们以 `ResNet_CIFAR` 为例,展示了如何开发一个新的主干网络组件。 + +`ResNet_CIFAR` 针对 CIFAR 32x32 的图像输入,远小于大多数模型使用的ImageNet默认的224x224输入配置,所以我们将骨干网络中 `kernel_size=7,stride=2` +的设置替换为 `kernel_size=3, stride=1`,并移除了 stem 层之后的 +`MaxPooling`,以避免传递过小的特征图到残差块中。 + +最简单的方式就是继承自 `ResNet` 并只修改 stem 层。 + +1. 创建一个新文件 `mmpretrain/models/backbones/resnet_cifar.py`。 + + ```python + import torch.nn as nn + + from mmpretrain.registry import MODELS + from .resnet import ResNet + + + @MODELS.register_module() + class ResNet_CIFAR(ResNet): + + """ResNet backbone for CIFAR. + + (对这个主干网络的简短描述) + + Args: + depth(int): Network depth, from {18, 34, 50, 101, 152}. + ... + (参数文档) + """ + + def __init__(self, depth, deep_stem=False, **kwargs): + # 调用基类 ResNet 的初始化函数 + super(ResNet_CIFAR, self).__init__(depth, deep_stem=deep_stem **kwargs) + # 其他特殊的初始化流程 + assert not self.deep_stem, 'ResNet_CIFAR do not support deep_stem' + + def _make_stem_layer(self, in_channels, base_channels): + # 重载基类的方法,以实现对网络结构的修改 + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + base_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, base_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + # 如果需要的话,可以自定义forward方法 + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + # 输出值需要是一个包含不同层多尺度输出的元组 + # 如果不需要多尺度特征,可以直接在最终输出上包一层元组 + return tuple(outs) + + def init_weights(self): + # 如果需要的话,可以自定义权重初始化的方法 + super().init_weights() + + # 如果有预训练模型,则不需要进行权重初始化 + if self.init_cfg is not None and self.init_cfg['type'] == 'Pretrained': + return + + # 通常来说,我们建议用`init_cfg`去列举不同层权重初始化方法 + # 包括卷积层,线性层,归一化层等等 + # 如果有特殊需要,可以在这里进行额外的初始化操作 + ... + ``` + +```{note} +在 OpenMMLab 2.0 的设计中,将原有的`BACKBONES`、`NECKS`、`HEADS`、`LOSSES`等注册名统一为`MODELS`. +``` + +2. 在 `mmpretrain/models/backbones/__init__.py` 中导入新模块 + + ```python + ... + from .resnet_cifar import ResNet_CIFAR + + __all__ = [ + ..., 'ResNet_CIFAR' + ] + ``` + +3. 在配置文件中使用新的主干网络 + + ```python + model = dict( + ... + backbone=dict( + type='ResNet_CIFAR', + depth=18, + other_arg=xxx), + ... + ``` + +### 为自监督学习添加新的主干网络 + +对于一部分自监督学习算法,主干网络做了一定修改,例如 `MAE`、`BEiT` 等。 这些主干网络需要处理 `mask` 相关的逻辑,以此从可见的图像块中提取对应的特征信息。 + +以 [MAEViT](mmpretrain.models.selfsup.MAEViT) 作为例子,我们需要重写 `forward` 函数,进行基于 `mask` 的计算。我们实现了 `init_weights` 进行特定权重的初始化和 `random_masking` 函数来生成 `MAE` 预训练所需要的 `mask`。 + +```python +class MAEViT(VisionTransformer): + """Vision Transformer for MAE pre-training""" + + def __init__(mask_ratio, **kwargs) -> None: + super().__init__(**kwargs) + # position embedding is not learnable during pretraining + self.pos_embed.requires_grad = False + self.mask_ratio = mask_ratio + self.num_patches = self.patch_resolution[0] * self.patch_resolution[1] + + def init_weights(self) -> None: + """Initialize position embedding, patch embedding and cls token.""" + super().init_weights() + # define what if needed + pass + + def random_masking( + self, + x: torch.Tensor, + mask_ratio: float = 0.75 + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Generate the mask for MAE Pre-training.""" + pass + + def forward( + self, + x: torch.Tensor, + mask: Optional[bool] = True + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Generate features for masked images. + + The function supports two kind of forward behaviors. If the ``mask`` is + ``True``, the function will generate mask to masking some patches + randomly and get the hidden features for visible patches, which means + the function will be executed as masked imagemodeling pre-training; + if the ``mask`` is ``None`` or ``False``, the forward function will + call ``super().forward()``, which extract features from images without + mask. + """ + if mask is None or False: + return super().forward(x) + + else: + B = x.shape[0] + x = self.patch_embed(x)[0] + # add pos embed w/o cls token + x = x + self.pos_embed[:, 1:, :] + + # masking: length -> length * mask_ratio + x, mask, ids_restore = self.random_masking(x, self.mask_ratio) + + # append cls token + cls_token = self.cls_token + self.pos_embed[:, :1, :] + cls_tokens = cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + + for _, layer in enumerate(self.layers): + x = layer(x) + # Use final norm + x = self.norm1(x) + + return (x, mask, ids_restore) + +``` + +## 添加新的颈部组件 + +这里我们以 `GlobalAveragePooling` 为例。这是一个非常简单的颈部组件,没有任何参数。 + +要添加新的颈部组件,我们主要需要实现 `forward` 函数,该函数对主干网络的输出进行 +一些操作并将结果传递到头部。 + +1. 创建一个新文件 `mmpretrain/models/necks/gap.py` + + ```python + import torch.nn as nn + + from mmpretrain.registry import MODELS + + @MODELS.register_module() + class GlobalAveragePooling(nn.Module): + + def __init__(self): + self.gap = nn.AdaptiveAvgPool2d((1, 1)) + + def forward(self, inputs): + # 简单起见,我们默认输入是一个张量 + outs = self.gap(inputs) + outs = outs.view(inputs.size(0), -1) + return outs + ``` + +2. 在 `mmpretrain/models/necks/__init__.py` 中导入新模块 + + ```python + ... + from .gap import GlobalAveragePooling + + __all__ = [ + ..., 'GlobalAveragePooling' + ] + ``` + +3. 修改配置文件以使用新的颈部组件 + + ```python + model = dict( + neck=dict(type='GlobalAveragePooling'), + ) + ``` + +## 添加新的头部组件 + +### 基于分类头 + +在此,我们以一个简化的 `VisionTransformerClsHead` 为例,说明如何开发新的头部组件。 + +要添加一个新的头部组件,基本上我们需要实现 `pre_logits` 函数用于进入最后的分类头之前需要的处理, +以及 `forward` 函数。 + +1. 创建一个文件 `mmpretrain/models/heads/vit_head.py`. + + ```python + import torch.nn as nn + + from mmpretrain.registry import MODELS + from .cls_head import ClsHead + + + @MODELS.register_module() + class LinearClsHead(ClsHead): + + def __init__(self, num_classes, in_channels, hidden_dim, **kwargs): + super().__init__(**kwargs) + self.in_channels = in_channels + self.num_classes = num_classes + self.hidden_dim = hidden_dim + + self.fc1 = nn.Linear(in_channels, hidden_dim) + self.act = nn.Tanh() + self.fc2 = nn.Linear(hidden_dim, num_classes) + + def pre_logits(self, feats): + # 骨干网络的输出通常包含多尺度信息的元组 + # 对于分类任务来说,我们只需要关注最后的输出 + feat = feats[-1] + + # VisionTransformer的最终输出是一个包含patch tokens和cls tokens的元组 + # 这里我们只需要cls tokens + _, cls_token = feat + + # 完成除了最后的线性分类头以外的操作 + return self.act(self.fc1(cls_token)) + + def forward(self, feats): + pre_logits = self.pre_logits(feats) + + # 完成最后的分类头 + cls_score = self.fc(pre_logits) + return cls_score + ``` + +2. 在 `mmpretrain/models/heads/__init__.py` 中导入这个模块 + + ```python + ... + from .vit_head import VisionTransformerClsHead + + __all__ = [ + ..., 'VisionTransformerClsHead' + ] + ``` + +3. 修改配置文件以使用新的头部组件。 + + ```python + model = dict( + head=dict( + type='VisionTransformerClsHead', + ..., + )) + ``` + +### 基于 BaseModule 类 + +这是一个基于 MMEngine 中的 `BaseModule` 进行开发例子,`MAEPretrainHead`,主要是为了 `MAE` 掩码学习。我们需要实现 `loss` 函数来计算损失吗,不过其它的函数均为可选项。 + +```python +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class MAEPretrainHead(BaseModule): + """Head for MAE Pre-training.""" + + def __init__(self, + loss: dict, + norm_pix: bool = False, + patch_size: int = 16) -> None: + super().__init__() + self.norm_pix = norm_pix + self.patch_size = patch_size + self.loss_module = MODELS.build(loss) + + def patchify(self, imgs: torch.Tensor) -> torch.Tensor: + """Split images into non-overlapped patches.""" + p = self.patch_size + assert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % p == 0 + + h = w = imgs.shape[2] // p + x = imgs.reshape(shape=(imgs.shape[0], 3, h, p, w, p)) + x = torch.einsum('nchpwq->nhwpqc', x) + x = x.reshape(shape=(imgs.shape[0], h * w, p**2 * 3)) + return x + + def construct_target(self, target: torch.Tensor) -> torch.Tensor: + """Construct the reconstruction target.""" + target = self.patchify(target) + if self.norm_pix: + # normalize the target image + mean = target.mean(dim=-1, keepdim=True) + var = target.var(dim=-1, keepdim=True) + target = (target - mean) / (var + 1.e-6)**.5 + + return target + + def loss(self, pred: torch.Tensor, target: torch.Tensor, + mask: torch.Tensor) -> torch.Tensor: + """Generate loss.""" + target = self.construct_target(target) + loss = self.loss_module(pred, target, mask) + + return loss +``` + +完成实现后,之后的步骤和 [基于分类头](#基于分类头) 中的步骤 2 和步骤 3 一致。 + +## 添加新的损失函数 + +要添加新的损失函数,我们主要需要在损失函数模块中 `forward` 函数。这里需要注意的是,损失模块也应该注册到`MODELS`中。另外,利用装饰器 `weighted_loss` 可以方便的实现对每个元素的损失进行加权平均。 + +假设我们要模拟从另一个分类模型生成的概率分布,需要添加 `L1loss` 来实现该目的。 + +1. 创建一个新文件 `mmpretrain/models/losses/l1_loss.py` + + ```python + import torch + import torch.nn as nn + + from mmpretrain.registry import MODELS + from .utils import weighted_loss + + @weighted_loss + def l1_loss(pred, target): + assert pred.size() == target.size() and target.numel() > 0 + loss = torch.abs(pred - target) + return loss + + @MODELS.register_module() + class L1Loss(nn.Module): + + def __init__(self, reduction='mean', loss_weight=1.0): + super(L1Loss, self).__init__() + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + loss = self.loss_weight * l1_loss( + pred, target, weight, reduction=reduction, avg_factor=avg_factor) + return loss + ``` + +2. 在文件 `mmpretrain/models/losses/__init__.py` 中导入这个模块 + + ```python + ... + from .l1_loss import L1Loss + + __all__ = [ + ..., 'L1Loss' + ] + ``` + +3. 修改配置文件中的 `loss` 字段以使用新的损失函数 + + ```python + model = dict( + head=dict( + loss=dict(type='L1Loss', loss_weight=1.0), + )) + ``` + +最后我们可以在配置文件中结合所有新增的模型组件来使用新的模型。由于`ResNet_CIFAR` 不是一个基于ViT的骨干网络,这里我们不用`VisionTransformerClsHead`的配置。 + +```python +model = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet_CIFAR', + depth=18, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=10, + in_channels=512, + loss=dict(type='L1Loss', loss_weight=1.0), + topk=(1, 5), + )) + +``` + +```{tip} +为了方便,相同的模型组件可以直接从已有的config文件里继承,更多细节可以参考[学习配置文件](../user_guides/config.md)。 +``` diff --git a/docs/zh_CN/advanced_guides/pipeline.md b/docs/zh_CN/advanced_guides/pipeline.md new file mode 100644 index 0000000..99506b0 --- /dev/null +++ b/docs/zh_CN/advanced_guides/pipeline.md @@ -0,0 +1,148 @@ +# 自定义数据处理流程 + +## 数据流的设计 + +在[新数据集教程](./datasets.md)中,我们知道数据集类使用 `load_data_list` 方法来初始化整个数据集,我们将每个样本的信息保存到一个 dict 中。 + +通常,为了节省内存,我们只加载 `load_data_list` 中的图片路径和标签,使用时加载完整的图片内容。此外,我们可能希望在训练时选择样本时进行一些随机数据扩充。几乎所有的数据加载、预处理和格式化操作都可以通过**数据管道**在 MMPretrain 中进行配置。 + +数据管道意味着在从数据集中索引样本时如何处理样本字典,它由一系列数据变换组成。每个数据变换都将一个字典作为输入,对其进行处理,并为下一个数据变换输出一个字典。 + +这是 ImageNet 上 ResNet-50 训练的数据管道示例。 + +```python +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=224), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] +``` + +MMPretrain 中所有可用的数据变换都可以在 [数据变换文档](mmpretrain.datasets.transforms) 中找到。 + +## 修改训练/测试管道 + +MMPretrain 中的数据管道非常灵活。您几乎可以从配置文件中控制数据预处理的每一步,但另一方面,面对如此多的选项,您可能会感到困惑。 + +这是图像分类任务的常见做法和指南。 + +### 读取 + +在数据管道的开始,我们通常需要从文件路径加载图像数据。 +[`LoadImageFromFile`](mmcv.transforms.LoadImageFromFile) 通常用于执行此任务。 + +```python +train_pipeline = [ + dict(type='LoadImageFromFile'), + ... +] +``` + +如果您想从具有特殊格式或特殊位置的文件中加载数据,您可以 [实施新的加载变换](#添加新的数据变换) 并将其添加到数据管道的开头。 + +### 增强和其它处理 + +在训练过程中,我们通常需要做数据增强来避免过拟合。在测试过程中,我们还需要做一些数据处理,比如调整大小和裁剪。这些数据变换将放置在加载过程之后。 + +这是一个简单的数据扩充方案示例。它会将输入图像随机调整大小并裁剪到指定比例,并随机水平翻转图像。 + +```python +train_pipeline = [ + ... + dict(type='RandomResizedCrop', scale=224), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + ... +] +``` + +这是 [Swin-Transformer](../papers/swin_transformer.md) 训练中使用的大量数据增强配方示例。 为了与官方实施保持一致,它指定 `pillow` 作为调整大小后端,`bicubic` 作为调整大小算法。 此外,它添加了 [`RandAugment`](mmpretrain.datasets.transforms.RandAugment) 和 [`RandomErasing`](mmpretrain.datasets.transforms.RandomErasing) 作为额外的数据增强方法。 + +此配置指定了数据扩充的每个细节,您只需将其复制到您自己的配置文件中即可应用 Swin-Transformer 的数据扩充。 + +```python +bgr_mean = [103.53, 116.28, 123.675] +bgr_std = [57.375, 57.12, 58.395] + +train_pipeline = [ + ... + dict(type='RandomResizedCrop', scale=224, backend='pillow', interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict( + type='RandomErasing', + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + ... +] +``` + +```{note} +通常,数据管道中的数据增强部分仅处理图像方面的变换,而不处理图像归一化或混合/剪切混合等变换。 因为我们可以对 batch data 做 image normalization 和 mixup/cutmix 来加速。要配置图像归一化和 mixup/cutmix,请使用 [数据预处理器](mmpretrain.models.utils.data_preprocessor)。 +``` + +### 格式化 + +格式化是从数据信息字典中收集训练数据,并将这些数据转换为模型友好的格式。 + +在大多数情况下,您可以简单地使用 [`PackInputs`](mmpretrain.datasets.transforms.PackInputs),它将 NumPy 数组格式的图像转换为 PyTorch 张量,并将 ground truth 类别信息和其他元信息打包为 [`DataSample`](mmpretrain.structures.DataSample)。 + +```python +train_pipeline = [ + ... + dict(type='PackInputs'), +] +``` + +## 添加新的数据变换 + +1. 在任何文件中写入一个新的数据转换,例如 `my_transform.py`,并将其放在文件夹 `mmpretrain/datasets/transforms/` 中。 数据变换类需要继承 [`mmcv.transforms.BaseTransform`](mmcv.transforms.BaseTransform) 类并覆盖以字典作为输入并返回字典的 `transform` 方法。 + + ```python + from mmcv.transforms import BaseTransform + from mmpretrain.registry import TRANSFORMS + + @TRANSFORMS.register_module() + class MyTransform(BaseTransform): + + def transform(self, results): + # Modify the data information dict `results`. + return results + ``` + +2. 在 `mmpretrain/datasets/transforms/__init__.py` 中导入新的变换 + + ```python + ... + from .my_transform import MyTransform + + __all__ = [ + ..., 'MyTransform' + ] + ``` + +3. 在配置文件中使用 + + ```python + train_pipeline = [ + ... + dict(type='MyTransform'), + ... + ] + ``` + +## 数据管道可视化 + +数据流水线设计完成后,可以使用 [可视化工具](../useful_tools/dataset_visualization.md) 查看效果。 diff --git a/docs/zh_CN/advanced_guides/runtime.md b/docs/zh_CN/advanced_guides/runtime.md new file mode 100644 index 0000000..e5fa386 --- /dev/null +++ b/docs/zh_CN/advanced_guides/runtime.md @@ -0,0 +1,213 @@ +# 自定义运行参数 + +运行参数配置包括许多有用的功能,如权重文件保存、日志配置等等,在本教程中,我们将介绍如何配置这些功能。 + +## 保存权重文件 + +权重文件保存功能是一个在训练阶段默认注册的钩子, 你可以通过配置文件中的 `default_hooks.checkpoint` 字段配置它。 + +```{note} +钩子机制在 OpenMMLab 开源算法库中应用非常广泛。通过钩子,你可以在不修改运行器的主要执行逻辑的情况下插入许多功能。 + +可以通过{external+mmengine:doc}`相关文章 `进一步理解钩子。 +``` + +**默认配置:** + +```python +default_hooks = dict( + ... + checkpoint = dict(type='CheckpointHook', interval=1) + ... +) +``` + +下面是一些[权重文件钩子(CheckpointHook)](mmengine.hooks.CheckpointHook)的常用可配置参数。 + +- **`interval`** (int): 文件保存周期。如果使用-1,它将永远不会保存权重。 +- **`by_epoch`** (bool): 选择 **`interval`** 是基于epoch还是基于iteration, 默认为 `True`. +- **`out_dir`** (str): 保存权重文件的根目录。如果不指定,检查点将被保存在工作目录中。如果指定,检查点将被保存在 **`out_dir`** 的子文件夹中。 +- **`max_keep_ckpts`** (int): 要保留的权重文件数量。在某些情况下,为了节省磁盘空间,我们希望只保留最近的几个权重文件。默认为 -1,也就是无限制。 +- **`save_best`** (str, List[str]): 如果指定,它将保存具有最佳评估结果的权重。 + 通常情况下,你可以直接使用`save_best="auto"`来自动选择评估指标。 + +而如果你想要更高级的配置,请参考[权重文件钩子(CheckpointHook)](tutorials/hook.md#checkpointhook)。 + +## 权重加载 / 断点训练 + +在配置文件中,你可以加载指定模型权重或者断点继续训练,如下所示: + +```python +# 从指定权重文件加载 +load_from = "Your checkpoint path" + +# 是否从加载的断点继续训练 +resume = False +``` + +`load_from` 字段可以是本地路径,也可以是HTTP路径。你可以从检查点恢复训练,方法是指定 `resume=True`。 + +```{tip} +你也可以通过指定 `load_from=None` 和 `resume=True` 启用从最新的断点自动恢复。 +Runner执行器将自动从工作目录中找到最新的权重文件。 +``` + +如果你用我们的 `tools/train.py` 脚本来训练模型,你只需使用 `--resume` 参数来恢复训练,就不用手动修改配置文件了。如下所示: + +```bash +# 自动从最新的断点恢复 +python tools/train.py configs/resnet/resnet50_8xb32_in1k.py --resume + +# 从指定的断点恢复 +python tools/train.py configs/resnet/resnet50_8xb32_in1k.py --resume checkpoints/resnet.pth +``` + +## 随机性(Randomness)配置 + +为了让实验尽可能是可复现的, 我们在 `randomness` 字段中提供了一些控制随机性的选项。 + +默认情况下,我们不会在配置文件中指定随机数种子,在每次实验中,程序会生成一个不同的随机数种子。 + +**默认配置:** + +```python +randomness = dict(seed=None, deterministic=False) +``` + +为了使实验更具可复现性,你可以指定一个种子并设置 `deterministic=True`。 +`deterministic` 选项的使用效果可以在[这里](https://pytorch.org/docs/stable/notes/randomness.html#cuda-convolution-benchmarking)找到。 + +## 日志配置 + +日志的配置与多个字段有关。 + +在`log_level`字段中,你可以指定全局日志级别。参见 {external+python:ref}`Logging Levels` 以获得日志级别列表。 + +```python +log_level = 'INFO' +``` + +在 `default_hooks.logger` 字段中,你可以指定训练和测试期间的日志间隔。 +而所有可用的参数可以在[日志钩子文档](tutorials/hook.md#loggerhook)中找到。 + +```python +default_hooks = dict( + ... + # 每100次迭代就打印一次日志 + logger=dict(type='LoggerHook', interval=100), + ... +) +``` + +在 `log_processor` 字段中,你可以指定日志信息的平滑方法。 +通常,我们使用一个长度为10的窗口来平滑日志中的值,并输出所有信息的平均值。 +如果你想特别指定某些信息的平滑方法,请参阅{external+mmengine:doc}`日志处理器文档 `。 + +```python +# 默认设置,它将通过一个10长度的窗口平滑训练日志中的值 +log_processor = dict(window_size=10) +``` + +在 `visualizer` 字段中,你可以指定多个后端来保存日志信息,如TensorBoard和WandB。 +更多的细节可以在[可视化工具](#visualizer)找到。 + +## 自定义钩子 + +上述许多功能是由钩子实现的,你也可以通过修改 `custom_hooks` 字段来插入其他的自定义钩子。 +下面是 MMEngine 和 MMPretrain 中的一些钩子,你可以直接使用,例如: + +- [EMAHook](mmpretrain.engine.hooks.EMAHook) +- [SyncBuffersHook](mmengine.hooks.SyncBuffersHook) +- [EmptyCacheHook](mmengine.hooks.EmptyCacheHook) +- [ClassNumCheckHook](mmpretrain.engine.hooks.ClassNumCheckHook) +- ...... + +例如,EMA(Exponential Moving Average)在模型训练中被广泛使用,你可以以下方式启用它: + +```python +custom_hooks = [ + dict(type='EMAHook', momentum=4e-5, priority='ABOVE_NORMAL'), +] +``` + +## 验证可视化 + +验证可视化钩子是一个验证过程中默认注册的钩子。 +你可以在 `default_hooks.visualization` 字段中来配置它。 + +默认情况下,我们禁用这个钩子,你可以通过指定 `enable=True` 来启用它。而更多的参数可以在 +[可视化钩子文档](mmpretrain.engine.hooks.VisualizationHook)中找到。 + +```python +default_hooks = dict( + ... + visualization=dict(type='VisualizationHook', enable=False), + ... +) +``` + +这个钩子将在验证数据集中选择一部分图像,在每次验证过程中记录并可视化它们的预测结果。 +你可以用它来观察训练期间模型在实际图像上的性能变化。 + +此外,如果你的验证数据集中的图像很小(\<100, 如Cifra数据集), +你可以指定 `rescale_factor` 来缩放它们,如 `rescale_factor=2.`, 将可视化的图像放大两倍。 + +## Visualizer + +`Visualizer` 用于记录训练和测试过程中的各种信息,包括日志、图像和标量。 +默认情况下,记录的信息将被保存在工作目录下的 `vis_data` 文件夹中。 + +**默认配置:** + +```python +visualizer = dict( + type='UniversalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + ] +) +``` + +通常,最有用的功能是将日志和标量如 `loss` 保存到不同的后端。 +例如,要把它们保存到 TensorBoard,只需像下面这样设置: + +```python +visualizer = dict( + type='UniversalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + dict(type='TensorboardVisBackend'), + ] +) +``` + +或者像下面这样把它们保存到 WandB: + +```python +visualizer = dict( + type='UniversalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + dict(type='WandbVisBackend'), + ] +) +``` + +## 环境配置 + +在 `env_cfg` 字段中,你可以配置一些底层的参数,如 cuDNN、多进程和分布式通信。 + +**在修改这些参数之前,请确保你理解这些参数的含义。** + +```python +env_cfg = dict( + # 是否启用cudnn基准测试 + cudnn_benchmark=False, + + # 设置多进程参数 + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + + # 设置分布式参数 + dist_cfg=dict(backend='nccl'), +) +``` diff --git a/docs/zh_CN/advanced_guides/schedule.md b/docs/zh_CN/advanced_guides/schedule.md new file mode 100644 index 0000000..d1c347d --- /dev/null +++ b/docs/zh_CN/advanced_guides/schedule.md @@ -0,0 +1,359 @@ +# 自定义训练优化策略 + +在我们的算法库中,已经提供了通用数据集(如ImageNet,CIFAR)的[默认训练策略配置](https://github.com/open-mmlab/mmpretrain/blob/main/configs/_base_/schedules)。如果想要在这些数据集上继续提升模型性能,或者在不同数据集和方法上进行新的尝试,我们通常需要修改这些默认的策略。 + +在本教程中,我们将介绍如何在运行自定义训练时,通过修改配置文件进行构造优化器、参数化精细配置、梯度裁剪、梯度累计以及定制动量调整策略等。同时也会通过模板简单介绍如何自定义开发优化器和构造器。 + +## 配置训练优化策略 + +我们通过 `optim_wrapper` 来配置主要的优化策略,包括优化器的选择,混合精度训练的选择,参数化精细配置,梯度裁剪以及梯度累计。接下来将分别介绍这些内容。 + +### 构造 PyTorch 内置优化器 + +MMPretrain 支持 PyTorch 实现的所有优化器,仅需在配置文件中,指定优化器封装需要的 `optimizer` 字段。 + +如果要使用 [`SGD`](torch.optim.SGD),则修改如下。这里要注意所有优化相关的配置都需要封装在 `optim_wrapper` 配置里。 + +```python +optim_wrapper = dict( + type='OptimWrapper', + optimizer=dict(type='SGD', lr=0.0003, weight_decay=0.0001) +) +``` + +```{note} +配置文件中的 'type' 不是构造时的参数,而是 PyTorch 内置优化器的类名。 +更多优化器选择可以参考{external+torch:ref}`PyTorch 支持的优化器列表`。 +``` + +要修改模型的学习率,只需要在优化器的配置中修改 `lr` 即可。 +要配置其他参数,可直接根据 [PyTorch API 文档](torch.optim) 进行。 + +例如,如果想使用 [`Adam`](torch.optim.Adam) 并设置参数为 `torch.optim.Adam(params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False)`。 +则需要进行如下修改: + +```python +optim_wrapper = dict( + type='OptimWrapper', + optimizer = dict( + type='Adam', + lr=0.001, + betas=(0.9, 0.999), + eps=1e-08, + weight_decay=0, + amsgrad=False), +) +``` + +````{note} +考虑到对于单精度训练来说,优化器封装的默认类型就是 `OptimWrapper`,我们在这里可以直接省略,因此配置文件可以进一步简化为: + +```python +optim_wrapper = dict( + optimizer=dict( + type='Adam', + lr=0.001, + betas=(0.9, 0.999), + eps=1e-08, + weight_decay=0, + amsgrad=False)) +``` +```` + +### 混合精度训练 + +如果我们想要使用混合精度训练(Automactic Mixed Precision),我们只需简单地将 `optim_wrapper` 的类型改为 `AmpOptimWrapper`。 + +```python +optim_wrapper = dict(type='AmpOptimWrapper', optimizer=...) +``` + +另外,为了方便,我们同时在启动训练脚本 `tools/train.py` 中提供了 `--amp` 参数作为开启混合精度训练的开关,更多细节可以参考[训练教程](../user_guides/train.md)。 + +### 参数化精细配置 + +在一些模型中,不同的优化策略需要适应特定的参数,例如不在 BatchNorm 层使用权重衰减,或者在不同层使用不同的学习率等等。 +我们需要用到 `optim_wrapper` 中的 `paramwise_cfg` 参数来进行精细化配置。 + +- **为不同类型的参数设置超参乘子** + + 例如,我们可以在 `paramwise_cfg` 配置中设置 `norm_decay_mult=0.` 来改变归一化层权重和偏移的衰减为0。 + + ```python + optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.8, weight_decay=1e-4), + paramwise_cfg=dict(norm_decay_mult=0.)) + ``` + + 支持更多类型的参数配置,参考以下列表: + + - `bias_lr_mult`:偏置的学习率系数(不包括正则化层的偏置以及可变形卷积的 offset),默认值为 1 + - `bias_decay_mult`:偏置的权值衰减系数(不包括正则化层的偏置以及可变形卷积的 offset),默认值为 1 + - `norm_decay_mult`:正则化层权重和偏置的权值衰减系数,默认值为 1 + - `flat_decay_mult`: 一维参数的权值衰减系数,默认值为 1 + - `dwconv_decay_mult`:Depth-wise 卷积的权值衰减系数,默认值为 1 + - `bypass_duplicate`:是否跳过重复的参数,默认为 `False` + - `dcn_offset_lr_mult`:可变形卷积(Deformable Convolution)的学习率系数,默认值为 1 + +- **为特定参数设置超参乘子** + + MMPretrain 通过 `paramwise_cfg` 的 `custom_keys` 参数来配置特定参数的超参乘子。 + + 例如,我们可以通过以下配置来设置所有 `backbone.layer0` 层的学习率和权重衰减为0, `backbone` 的其余层和优化器保持一致,另外 `head` 层的学习率为0.001. + + ```python + optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.01, weight_decay=0.0001), + paramwise_cfg=dict( + custom_keys={ + 'backbone.layer0': dict(lr_mult=0, decay_mult=0), + 'backbone': dict(lr_mult=1), + 'head': dict(lr_mult=0.1) + })) + ``` + +### 梯度裁剪 + +在训练过程中,损失函数可能接近于一些异常陡峭的区域,从而导致梯度爆炸。而梯度裁剪可以帮助稳定训练过程,更多介绍可以参见[该页面](https://paperswithcode.com/method/gradient-clipping)。 + +目前我们支持在 `optim_wrapper` 字段中添加 `clip_grad` 参数来进行梯度裁剪,更详细的参数可参考 [PyTorch 文档](torch.nn.utils.clip_grad_norm_)。 + +用例如下: + +```python +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.01, weight_decay=0.0001), + # norm_type: 使用的范数类型,此处使用范数2。 + clip_grad=dict(max_norm=35, norm_type=2)) +``` + +### 梯度累计 + +计算资源缺乏缺乏时,每个训练批次的大小(batch size)只能设置为较小的值,这可能会影响模型的性能。 + +可以使用梯度累计来规避这一问题。我们支持在 `optim_wrapper` 字段中添加 `accumulative_counts` 参数来进行梯度累计。 + +用例如下: + +```python +train_dataloader = dict(batch_size=64) +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.01, weight_decay=0.0001), + accumulative_counts=4) +``` + +表示训练时,每 4 个 iter 执行一次反向传播。由于此时单张 GPU 上的批次大小为 64,也就等价于单张 GPU 上一次迭代的批次大小为 256,也即: + +```python +train_dataloader = dict(batch_size=256) +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.01, weight_decay=0.0001)) +``` + +## 配置参数优化策略 + +在训练过程中,优化参数例如学习率、动量,通常不会是固定不变,而是随着训练进程的变化而调整。PyTorch 支持一些学习率调整的调度器,但是不足以完成复杂的策略。在 MMPretrain 中,我们提供 `param_scheduler` 来更好地控制不同优化参数的策略。 + +### 配置学习率调整策略 + +深度学习研究中,广泛应用学习率衰减来提高网络的性能。我们支持大多数 PyTorch 学习率调度器, 其中包括 `ExponentialLR`, `LinearLR`, `StepLR`, `MultiStepLR` 等等。 + +- **单个学习率策略** + + 多数情况下,我们使用单一学习率策略,这里 `param_scheduler` 会是一个字典。比如在默认的 ResNet 网络训练中,我们使用阶梯式的学习率衰减策略 [`MultiStepLR`](mmengine.optim.MultiStepLR),配置文件为: + + ```python + param_scheduler = dict( + type='MultiStepLR', + by_epoch=True, + milestones=[100, 150], + gamma=0.1) + ``` + + 或者我们想使用 [`CosineAnnealingLR`](mmengine.optim.CosineAnnealingLR) 来进行学习率衰减: + + ```python + param_scheduler = dict( + type='CosineAnnealingLR', + by_epoch=True, + T_max=num_epochs) + ``` + +- **多个学习率策略** + + 然而在一些其他情况下,为了提高模型的精度,通常会使用多种学习率策略。例如,在训练的早期阶段,网络容易不稳定,而学习率的预热就是为了减少这种不稳定性。 + + 整个学习过程中,学习率将会通过预热从一个很小的值逐步提高到预定值,再会通过其他的策略进一步调整。 + + 在 MMPretrain 中,我们同样使用 `param_scheduler` ,将多种学习策略写成列表就可以完成上述预热策略的组合。 + + 例如: + + 1. 在前50次迭代中逐**迭代次数**地**线性**预热 + + ```python + param_scheduler = [ + # 逐迭代次数,线性预热 + dict(type='LinearLR', + start_factor=0.001, + by_epoch=False, # 逐迭代次数 + end=50), # 只预热50次迭代次数 + # 主要的学习率策略 + dict(type='MultiStepLR', + by_epoch=True, + milestones=[8, 11], + gamma=0.1) + ] + ``` + + 2. 在前10轮迭代中逐**迭代次数**地**线性**预热 + + ```python + param_scheduler = [ + # 在前10轮迭代中,逐迭代次数,线性预热 + dict(type='LinearLR', + start_factor=0.001, + by_epoch=True, + end=10, + convert_to_iter_based=True, # 逐迭代次数更新学习率. + ), + # 在 10 轮次后,通过余弦退火衰减 + dict(type='CosineAnnealingLR', by_epoch=True, begin=10) + ] + ``` + + 注意这里增加了 `begin` 和 `end` 参数,这两个参数指定了调度器的**生效区间**。生效区间通常只在多个调度器组合时才需要去设置,使用单个调度器时可以忽略。当指定了 `begin` 和 `end` 参数时,表示该调度器只在 [begin, end) 区间内生效,其单位是由 `by_epoch` 参数决定。在组合不同调度器时,各调度器的 `by_epoch` 参数不必相同。如果没有指定的情况下,`begin` 为 0, `end` 为最大迭代轮次或者最大迭代次数。 + + 如果相邻两个调度器的生效区间没有紧邻,而是有一段区间没有被覆盖,那么这段区间的学习率维持不变。而如果两个调度器的生效区间发生了重叠,则对多组调度器叠加使用,学习率的调整会按照调度器配置文件中的顺序触发(行为与 PyTorch 中 [`ChainedScheduler`](torch.optim.lr_scheduler.ChainedScheduler) 一致)。 + + ```{tip} + 为了避免学习率曲线与预期不符, 配置完成后,可以使用 MMPretrain 提供的 [学习率可视化工具](../useful_tools/scheduler_visualization.md) 画出对应学习率调整曲线。 + ``` + +### 配置动量调整策略 + +MMPretrain 支持动量调度器根据学习率修改优化器的动量,从而使损失函数收敛更快。用法和学习率调度器一致。 + +我们支持的动量策略和详细的使用细节可以参考[这里](https://github.com/open-mmlab/mmengine/blob/main/mmengine/optim/scheduler/momentum_scheduler.py)。我们只将调度器中的 `LR` 替换为了 `Momentum`,动量策略可以直接追加 `param_scheduler` 列表中。 + +这里是一个用例: + +```python +param_scheduler = [ + # 学习率策略 + dict(type='LinearLR', ...), + # 动量策略 + dict(type='LinearMomentum', + start_factor=0.001, + by_epoch=False, + begin=0, + end=1000) +] +``` + +## 新增优化器或者优化器构造器 + +```{note} +本部分将修改 MMPretrain 源码或者向 MMPretrain 框架添加代码,初学者可跳过。 +``` + +### 新增优化器 + +在学术研究和工业实践中,可能需要使用 MMPretrain 未实现的优化方法,可以通过以下方法添加。 + +1. 定义一个新的优化器 + + 一个自定义的优化器可根据如下规则进行定制: + + 假设我们想添加一个名为 `MyOptimzer` 的优化器,其拥有参数 `a`, `b` 和 `c`。 + 可以创建一个名为 `mmpretrain/engine/optimizer` 的文件夹,并在目录下的一个文件,如 `mmpretrain/engine/optimizer/my_optimizer.py` 中实现该自定义优化器: + + ```python + from mmpretrain.registry import OPTIMIZERS + from torch.optim import Optimizer + + + @OPTIMIZERS.register_module() + class MyOptimizer(Optimizer): + + def __init__(self, a, b, c): + ... + + def step(self, closure=None): + ... + ``` + +2. 注册优化器 + + 要注册上面定义的上述模块,首先需要将此模块导入到主命名空间中。有两种方法可以实现它。 + + 修改 `mmpretrain/engine/optimizers/__init__.py`,将其导入至 `mmpretrain.engine` 包。 + + ```python + # 在 mmpretrain/engine/optimizers/__init__.py 中 + ... + from .my_optimizer import MyOptimizer # MyOptimizer 是我们自定义的优化器的名字 + + __all__ = [..., 'MyOptimizer'] + ``` + + 在运行过程中,我们会自动导入 `mmpretrain.engine` 包并同时注册 `MyOptimizer`。 + +3. 在配置文件中指定优化器 + + 之后,用户便可在配置文件的 `optim_wrapper.optimizer` 域中使用 `MyOptimizer`: + + ```python + optim_wrapper = dict( + optimizer=dict(type='MyOptimizer', a=a_value, b=b_value, c=c_value)) + ``` + +### 新增优化器构造器 + +某些模型可能具有一些特定于参数的设置以进行优化,例如为所有 BatchNorm 层设置不同的权重衰减。 + +尽管我们已经可以使用 [`optim_wrapper.paramwise_cfg` 字段](#参数化精细配置)来配置特定参数的优化设置,但可能仍然无法覆盖你的需求。 + +当然你可以在此基础上进行修改。我们默认使用 [`DefaultOptimWrapperConstructor`](mmengine.optim.DefaultOptimWrapperConstructor) 来构造优化器。在构造过程中,通过 `paramwise_cfg` 来精细化配置不同设置。这个默认构造器可以作为新优化器构造器实现的模板。 + +我们可以新增一个优化器构造器来覆盖这些行为。 + +```python +# 在 mmpretrain/engine/optimizers/my_optim_constructor.py 中 +from mmengine.optim import DefaultOptimWrapperConstructor +from mmpretrain.registry import OPTIM_WRAPPER_CONSTRUCTORS + + +@OPTIM_WRAPPER_CONSTRUCTORS.register_module() +class MyOptimWrapperConstructor: + + def __init__(self, optim_wrapper_cfg, paramwise_cfg=None): + ... + + def __call__(self, model): + ... +``` + +这是一个已实现的 [OptimWrapperConstructor](mmpretrain.engine.optimizers.LearningRateDecayOptimWrapperConstructor) 具体例子。 + +接下来类似 [新增优化器教程](#新增优化器) 来导入并使用新的优化器构造器。 + +1. 修改 `mmpretrain/engine/optimizers/__init__.py`,将其导入至 `mmpretrain.engine` 包。 + + ```python + # 在 mmpretrain/engine/optimizers/__init__.py 中 + ... + from .my_optim_constructor import MyOptimWrapperConstructor + + __all__ = [..., 'MyOptimWrapperConstructor'] + ``` + +2. 在配置文件的 `optim_wrapper.constructor` 字段中使用 `MyOptimWrapperConstructor` 。 + + ```python + optim_wrapper = dict( + constructor=dict(type='MyOptimWrapperConstructor'), + optimizer=..., + paramwise_cfg=..., + ) + ``` diff --git a/docs/zh_CN/api b/docs/zh_CN/api new file mode 100644 index 0000000..0ef434a --- /dev/null +++ b/docs/zh_CN/api @@ -0,0 +1 @@ +../en/api \ No newline at end of file diff --git a/docs/zh_CN/conf.py b/docs/zh_CN/conf.py new file mode 100644 index 0000000..2c372a8 --- /dev/null +++ b/docs/zh_CN/conf.py @@ -0,0 +1,253 @@ +# flake8: noqa +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import subprocess +import sys + +import pytorch_sphinx_theme +from sphinx.builders.html import StandaloneHTMLBuilder + +sys.path.insert(0, os.path.abspath('../../')) + +# -- Project information ----------------------------------------------------- + +project = 'MMPretrain' +copyright = '2020, OpenMMLab' +author = 'MMPretrain Authors' + +# The full version, including alpha/beta/rc tags +version_file = '../../mmpretrain/version.py' + + +def get_version(): + with open(version_file, 'r') as f: + exec(compile(f.read(), version_file, 'exec')) + return locals()['__version__'] + + +release = get_version() + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.autosummary', + 'sphinx.ext.intersphinx', + 'sphinx.ext.napoleon', + 'sphinx.ext.viewcode', + 'myst_parser', + 'sphinx_copybutton', + 'sphinx_tabs.tabs', + 'notfound.extension', + 'sphinxcontrib.jquery', +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +source_suffix = { + '.rst': 'restructuredtext', + '.md': 'markdown', +} + +language = 'zh_CN' + +# The master toctree document. +root_doc = 'index' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'pytorch_sphinx_theme' +html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# yapf: disable +html_theme_options = { + 'menu': [ + { + 'name': 'GitHub', + 'url': 'https://github.com/open-mmlab/mmpretrain' + }, + { + 'name': 'Colab 教程', + 'children': [ + {'name': '用命令行工具训练和推理', + 'url': 'https://colab.research.google.com/github/mzr1996/mmpretrain-tutorial/blob/master/1.x/MMPretrain_tools.ipynb'}, + {'name': '用 Python API 训练和推理', + 'url': 'https://colab.research.google.com/github/mzr1996/mmpretrain-tutorial/blob/master/1.x/MMPretrain_python.ipynb'}, + ] + }, + { + 'name': 'Version', + 'children': [ + {'name': 'MMPretrain 0.x', + 'url': 'https://mmpretrain.readthedocs.io/zh_CN/0.x/', + 'description': '0.x branch'}, + {'name': 'MMPretrain 1.x', + 'url': 'https://mmpretrain.readthedocs.io/zh_CN/latest/', + 'description': 'Main branch'}, + ], + } + ], + # Specify the language of shared menu + 'menu_lang': 'cn', + # Disable the default edit on GitHub + 'default_edit_on_github': False, +} +# yapf: enable + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] +html_css_files = [ + 'https://cdn.datatables.net/v/bs4/dt-1.12.1/datatables.min.css', + 'css/readthedocs.css' +] +html_js_files = [ + 'https://cdn.datatables.net/v/bs4/dt-1.12.1/datatables.min.js', + 'js/custom.js' +] + +# -- Options for HTMLHelp output --------------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = 'mmpretraindoc' + +# -- Options for LaTeX output ------------------------------------------------ + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (root_doc, 'mmpretrain.tex', 'MMPretrain Documentation', author, 'manual'), +] + +# -- Options for manual page output ------------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [(root_doc, 'mmpretrain', 'MMPretrain Documentation', [author], 1)] + +# -- Options for Texinfo output ---------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (root_doc, 'mmpretrain', 'MMPretrain Documentation', author, 'mmpretrain', + 'OpenMMLab pre-training toolbox and benchmark.', 'Miscellaneous'), +] + +# -- Options for Epub output ------------------------------------------------- + +# Bibliographic Dublin Core info. +epub_title = project + +# The unique identifier of the text. This can be a ISBN number +# or the project homepage. +# +# epub_identifier = '' + +# A unique identification for the text. +# +# epub_uid = '' + +# A list of files that should not be packed into the epub file. +epub_exclude_files = ['search.html'] + +# set priority when building html +StandaloneHTMLBuilder.supported_image_types = [ + 'image/svg+xml', 'image/gif', 'image/png', 'image/jpeg' +] + +# -- Extension configuration ------------------------------------------------- +# Ignore >>> when copying code +copybutton_prompt_text = r'>>> |\.\.\. ' +copybutton_prompt_is_regexp = True + +# Auto-generated header anchors +myst_heading_anchors = 3 +# Enable "colon_fence" extension of myst. +myst_enable_extensions = ['colon_fence', 'dollarmath'] + +# Configuration for intersphinx +intersphinx_mapping = { + 'python': ('https://docs.python.org/3', None), + 'numpy': ('https://numpy.org/doc/stable', None), + 'torch': ('https://pytorch.org/docs/stable/', None), + 'mmcv': ('https://mmcv.readthedocs.io/zh_CN/2.x/', None), + 'mmengine': ('https://mmengine.readthedocs.io/zh_CN/latest/', None), + 'transformers': + ('https://huggingface.co/docs/transformers/main/zh/', None), +} +napoleon_custom_sections = [ + # Custom sections for data elements. + ('Meta fields', 'params_style'), + ('Data fields', 'params_style'), +] + +# Disable docstring inheritance +autodoc_inherit_docstrings = False +# Mock some imports during generate API docs. +autodoc_mock_imports = ['rich', 'attr', 'einops', 'mat4py'] +# Disable displaying type annotations, these can be very verbose +autodoc_typehints = 'none' + +# The not found page +notfound_template = '404.html' + + +def builder_inited_handler(app): + if subprocess.run(['./stat.py']).returncode != 0: + raise RuntimeError('Failed to run the script `stat.py`.') + + +def setup(app): + app.add_config_value('no_underscore_emphasis', False, 'env') + app.connect('builder-inited', builder_inited_handler) diff --git a/docs/zh_CN/device/npu.md b/docs/zh_CN/device/npu.md new file mode 100644 index 0000000..b81c175 --- /dev/null +++ b/docs/zh_CN/device/npu.md @@ -0,0 +1,41 @@ +# NPU (华为昇腾) + +## 使用方法 + +首先,请参考[链接](https://mmcv.readthedocs.io/zh_CN/latest/get_started/build.html#npu-mmcv-full)安装带有 NPU 支持的 MMCV 和[链接](https://mmengine.readthedocs.io/en/latest/get_started/installation.html#build-from-source)安装 MMEngine。 + +使用如下命令,可以利用 8 个 NPU 在机器上训练模型(以 ResNet 为例): + +```shell +bash tools/dist_train.sh configs/cspnet/resnet50_8xb32_in1k.py 8 +``` + +或者,使用如下命令,在一个 NPU 上训练模型(以 ResNet 为例): + +```shell +python tools/train.py configs/cspnet/resnet50_8xb32_in1k.py +``` + +## 经过验证的模型 + +| Model | Top-1 (%) | Top-5 (%) | Config | Download | +| :---------------------------------------------------------: | :-------: | :-------: | :----------------------------------------------------------: | :-------------------------------------------------------------: | +| [ResNet-50](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/README.md) | 76.40 | 93.21 | [config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/resnet50_8xb32_in1k.py) | [model](<>) \| [log](https://download.openmmlab.com/mmclassification/v1/device/npu/resnet50_8xb32_in1k.log) | +| [ResNetXt-32x4d-50](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnext/README.md) | 77.48 | 93.75 | [config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnext/resnext50-32x4d_8xb32_in1k.py) | [model](<>) \| [log](https://download.openmmlab.com/mmclassification/v1/device/npu/resnext50-32x4d_8xb32_in1k.log) | +| [HRNet-W18](https://github.com/open-mmlab/mmclassification/blob/master/configs/hrnet/README.md) | 77.06 | 93.57 | [config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/hrnet/hrnet-w18_4xb32_in1k.py) | [model](<>) \| [log](https://download.openmmlab.com/mmclassification/v1/device/npu/hrnet-w18_4xb32_in1k.log) | +| [ResNetV1D-152](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/README.md) | 79.41 | 94.48 | [config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/resnetv1d152_8xb32_in1k.py) | [model](<>) \| [log](https://download.openmmlab.com/mmclassification/v1/device/npu/resnetv1d152_8xb32_in1k.log) | +| [SE-ResNet-50](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/seresnet/README.md) | 77.65 | 93.74 | [config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/seresnet/seresnet50_8xb32_in1k.py) | [model](<>) \|[log](https://download.openmmlab.com/mmclassification/v1/device/npu/seresnet50_8xb32_in1k.log) | +| [ShuffleNetV2 1.0x](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/shufflenet_v2/README.md) | 69.52 | 88.79 | [config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/shufflenet_v2/shufflenet-v2-1x_16xb64_in1k.py) | [model](<>) \| [log](https://download.openmmlab.com/mmclassification/v1/device/npu/shufflenet-v2-1x_16xb64_in1k.log) | +| [MobileNetV2](https://github.com/open-mmlab/mmclassification/tree/1.x/configs/mobilenet_v2) | 71.74 | 90.28 | [config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/mobilenet_v2/mobilenet-v2_8xb32_in1k.py) | [model](<>) \| [log](https://download.openmmlab.com/mmclassification/v1/device/npu/mobilenet-v2_8xb32_in1k.log) | +| [MobileNetV3-Small](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/mobilenet_v3/README.md) | 67.09 | 87.17 | [config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/mobilenet_v3/mobilenet-v3-small_8xb128_in1k.py) | [model](<>) \| [log](https://download.openmmlab.com/mmclassification/v1/device/npu/mobilenet-v3-small.log) | +| [\*CSPResNeXt50](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/cspnet/README.md) | 77.25 | 93.46 | [config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/cspnet/cspresnext50_8xb32_in1k.py) | [model](<>) \| [log](https://download.openmmlab.com/mmclassification/v1/device/npu/cspresnext50_8xb32_in1k.log) | +| [\*EfficientNet-B4](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/efficientnet/README.md) | 75.73 | 92.9100 | [config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/efficientnet/efficientnet-b4_8xb32_in1k.py) | [model](<>) \|[log](https://download.openmmlab.com/mmclassification/v1/device/npu/efficientnet-b4_8xb32_in1k.log) | +| [\*\*DenseNet121](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/densenet/README.md) | 72.53 | 90.85 | [config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/densenet/densenet121_4xb256_in1k.py) | [model](<>) \| [log](https://download.openmmlab.com/mmclassification/v1/device/npu/densenet121_4xb256_in1k.log) | + +**注意:** + +- 如果没有特别标记,NPU 上的结果与使用 FP32 的 GPU 上的结果结果相同。 +- (\*) 这些模型的训练结果低于相应模型中自述文件上的结果,主要是因为自述文件上的结果直接是 timm 训练得出的权重,而这边的结果是根据 mmcls 的配置重新训练得到的结果。GPU 上的配置训练结果与 NPU 的结果相同。 +- (\*\*)这个模型的精度略低,因为 config 是 4 张卡的配置,我们使用 8 张卡来运行,用户可以调整超参数以获得最佳精度结果。 + +**以上所有模型权重及训练日志均由华为昇腾团队提供** diff --git a/docs/zh_CN/docutils.conf b/docs/zh_CN/docutils.conf new file mode 100644 index 0000000..0c00c84 --- /dev/null +++ b/docs/zh_CN/docutils.conf @@ -0,0 +1,2 @@ +[html writers] +table_style: colwidths-auto diff --git a/docs/zh_CN/get_started.md b/docs/zh_CN/get_started.md new file mode 100644 index 0000000..0cf252f --- /dev/null +++ b/docs/zh_CN/get_started.md @@ -0,0 +1,163 @@ +# 依赖环境 + +在本节中,我们将演示如何准备 PyTorch 相关的依赖环境。 + +MMPretrain 适用于 Linux、Windows 和 macOS。它需要 Python 3.7+、CUDA 10.2+ 和 PyTorch 1.8+。 + +```{note} +如果你对配置 PyTorch 环境已经很熟悉,并且已经完成了配置,可以直接进入[下一节](#安装)。 +否则的话,请依照以下步骤完成配置。 +``` + +**第 1 步** 从[官网](https://docs.conda.io/en/latest/miniconda.html)下载并安装 Miniconda。 + +**第 2 步** 创建一个 conda 虚拟环境并激活它。 + +```shell +conda create --name openmmlab python=3.8 -y +conda activate openmmlab +``` + +**第 3 步** 按照[官方指南](https://pytorch.org/get-started/locally/)安装 PyTorch。例如: + +在 GPU 平台: + +```shell +conda install pytorch torchvision -c pytorch +``` + +```{warning} +以上命令会自动安装最新版的 PyTorch 与对应的 cudatoolkit,请检查它们是否与你的环境匹配。 +``` + +在 CPU 平台: + +```shell +conda install pytorch torchvision cpuonly -c pytorch +``` + +# 安装 + +我们推荐用户按照我们的最佳实践来安装 MMPretrain。但除此之外,如果你想根据 +你的习惯完成安装流程,也可以参见[自定义安装](#自定义安装)一节来获取更多信息。 + +## 最佳实践 + +根据具体需求,我们支持两种安装模式: + +- [从源码安装(推荐)](#从源码安装):希望基于 MMPretrain 框架开发自己的预训练任务,需要添加新的功能,比如新的模型或是数据集,或者使用我们提供的各种工具。 +- [作为 Python 包安装](#作为-python-包安装):只是希望调用 MMPretrain 的 API 接口,或者在自己的项目中导入 MMPretrain 中的模块。 + +### 从源码安装 + +这种情况下,从源码按如下方式安装 mmpretrain: + +```shell +git clone https://github.com/open-mmlab/mmpretrain.git +cd mmpretrain +pip install -U openmim && mim install -e . +``` + +```{note} +`"-e"` 表示以可编辑形式安装,这样可以在不重新安装的情况下,让本地修改直接生效 +``` + +### 作为 Python 包安装 + +直接使用 mim 安装即可。 + +```shell +pip install -U openmim && mim install "mmpretrain>=1.0.0rc8" +``` + +```{note} +`mim` 是一个轻量级的命令行工具,可以根据 PyTorch 和 CUDA 版本为 OpenMMLab 算法库配置合适的环境。同时它也提供了一些对于深度学习实验很有帮助的功能。 +``` + +## 安装多模态支持 (可选) + +MMPretrain 中的多模态模型需要额外的依赖项,要安装这些依赖项,请在安装过程中添加 `[multimodal]` 参数,如下所示: + +```shell +# 从源码安装 +mim install -e ".[multimodal]" + +# 作为 Python 包安装 +mim install "mmpretrain[multimodal]>=1.0.0rc8" +``` + +## 验证安装 + +为了验证 MMPretrain 的安装是否正确,我们提供了一些示例代码来执行模型推理。 + +如果你是**从源码安装**的 mmpretrain,那么直接运行以下命令进行验证: + +```shell +python demo/image_demo.py demo/demo.JPEG resnet18_8xb32_in1k --device cpu +``` + +你可以看到命令行中输出了结果字典,包括 `pred_label`,`pred_score` 和 `pred_class` 三个字段。 + +如果你是**作为 Python 包安装**,那么可以打开你的 Python 解释器,并粘贴如下代码: + +```python +from mmpretrain import get_model, inference_model + +model = get_model('resnet18_8xb32_in1k', device='cpu') # 或者 device='cuda:0' +inference_model(model, 'demo/demo.JPEG') +``` + +你会看到输出一个字典,包含预测的标签、得分及类别名。 + +```{note} +以上示例中,`resnet18_8xb32_in1k` 是模型名称。你可以使用 [`mmpretrain.list_models`](mmpretrain.apis.list_models) 接口来 +浏览所有的模型,或者在[模型汇总](./modelzoo_statistics.md)页面进行查找。 +``` + +## 自定义安装 + +### CUDA 版本 + +安装 PyTorch 时,需要指定 CUDA 版本。如果您不清楚选择哪个,请遵循我们的建议: + +- 对于 Ampere 架构的 NVIDIA GPU,例如 GeForce 30 series 以及 NVIDIA A100,CUDA 11 是必需的。 +- 对于更早的 NVIDIA GPU,CUDA 11 是向前兼容的,但 CUDA 10.2 能够提供更好的兼容性,也更加轻量。 + +请确保你的 GPU 驱动版本满足最低的版本需求,参阅[这张表](https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#cuda-major-component-versions__table-cuda-toolkit-driver-versions)。 + +```{note} +如果按照我们的最佳实践进行安装,CUDA 运行时库就足够了,因为我们提供相关 CUDA 代码的预编译,你不需要进行本地编译。 +但如果你希望从源码进行 MMCV 的编译,或是进行其他 CUDA 算子的开发,那么就必须安装完整的 CUDA 工具链,参见 +[NVIDIA 官网](https://developer.nvidia.com/cuda-downloads),另外还需要确保该 CUDA 工具链的版本与 PyTorch 安装时 +的配置相匹配(如用 `conda install` 安装 PyTorch 时指定的 cudatoolkit 版本)。 +``` + +### 在 CPU 环境中安装 + +MMPretrain 可以仅在 CPU 环境中安装,在 CPU 模式下,你可以完成训练、测试和模型推理等所有操作。 + +### 在 Google Colab 中安装 + +参考 [Colab 教程](https://colab.research.google.com/github/mzr1996/mmclassification-tutorial/blob/master/1.x/MMClassification_tools.ipynb) 安装即可。 + +### 通过 Docker 使用 MMPretrain + +MMPretrain 提供 [Dockerfile](https://github.com/open-mmlab/mmpretrain/blob/main/docker/Dockerfile) +用于构建镜像。请确保你的 [Docker 版本](https://docs.docker.com/engine/install/) >=19.03。 + +```shell +# 构建默认的 PyTorch 1.12.1,CUDA 11.3 版本镜像 +# 如果你希望使用其他版本,请修改 Dockerfile +docker build -t mmpretrain docker/ +``` + +用以下命令运行 Docker 镜像: + +```shell +docker run --gpus all --shm-size=8g -it -v {DATA_DIR}:/mmpretrain/data mmpretrain +``` + +## 故障解决 + +如果你在安装过程中遇到了什么问题,请先查阅[常见问题](./notes/faq.md)。如果没有找到解决方法,可以在 GitHub +上[提出 issue](https://github.com/open-mmlab/mmpretrain/issues/new/choose)。 diff --git a/docs/zh_CN/index.rst b/docs/zh_CN/index.rst new file mode 100644 index 0000000..ca57faa --- /dev/null +++ b/docs/zh_CN/index.rst @@ -0,0 +1,150 @@ +欢迎来到 MMPretrain 中文教程! +========================================== + +MMPretrain 是一个全新升级的预训练开源算法框架,旨在提供各种强大的预训练主干网络, +并支持了不同的预训练策略。MMPretrain 源自著名的开源项目 +`MMClassification `_ +和 `MMSelfSup `_,并开发了许多令人兴奋的新功能。 +目前,预训练阶段对于视觉识别至关重要,凭借丰富而强大的预训练模型,我们能够改进各种下游视觉任务。 + +我们的代码库旨在成为一个易于使用和用户友好的代码库库,并简化学术研究活动和工程任务。 +我们在以下不同部分中详细介绍了 MMPretrain 的特性和设计。 + +MMPretrain 上手路线 +------------------------------- + +为了用户能够快速上手,我们推荐以下流程: + + - 对于想要使用 MMPretrain 的用户,我们推荐先阅读 开始你的第一步_ 部分来设置环境。 + + - 对于一些基础使用,我们建议用户阅读 教程_ 来学习如何使用算法库来获得预训练模型以及在下游任务进行评测。 + + - 若您想进行算法的自定义,我们提供了 进阶教程_ 来阐述了代码修改的方法和规则。 + + - 如果您想找到所期望的预训练模型,您可以浏览 模型库_,其中包含了模型库的总结,以及各类主干网络和预训练算法的介绍。 + + - 我们同样提供了 分析工具_ 和 可视化_ 来辅助模型分析。 + + - 另外,如果您还有其它问题,欢迎查阅 其他说明_,也许可以找到您想要的答案。 + +我们始终非常欢迎用户的 PRs 和 Issues 来完善 MMPretrain! + +.. _开始你的第一步: +.. toctree:: + :maxdepth: 1 + :caption: 开始你的第一步 + + get_started.md + +.. _教程: +.. toctree:: + :maxdepth: 1 + :caption: 教程 + + user_guides/config.md + user_guides/dataset_prepare.md + user_guides/inference.md + user_guides/train.md + user_guides/test.md + user_guides/downstream.md + +.. _进阶教程: +.. toctree:: + :maxdepth: 1 + :caption: 进阶教程 + + advanced_guides/datasets.md + advanced_guides/pipeline.md + advanced_guides/modules.md + advanced_guides/schedule.md + advanced_guides/runtime.md + advanced_guides/evaluation.md + advanced_guides/convention.md + +.. _模型库: +.. toctree:: + :maxdepth: 1 + :caption: 模型库 + :glob: + + modelzoo_statistics.md + papers/* + +.. _可视化: +.. toctree:: + :maxdepth: 1 + :caption: 可视化 + + useful_tools/dataset_visualization.md + useful_tools/scheduler_visualization.md + useful_tools/cam_visualization.md + useful_tools/t-sne_visualization.md + +.. _分析工具: +.. toctree:: + :maxdepth: 1 + :caption: 分析工具 + + useful_tools/print_config.md + useful_tools/verify_dataset.md + useful_tools/log_result_analysis.md + useful_tools/complexity_analysis.md + useful_tools/confusion_matrix.md + useful_tools/shape_bias.md + +.. toctree:: + :maxdepth: 1 + :caption: 部署 + + useful_tools/model_serving.md + +.. toctree:: + :maxdepth: 1 + :caption: 迁移指南 + + migration.md + +.. toctree:: + :maxdepth: 1 + :caption: API 参考文档 + + mmpretrain.apis + mmpretrain.engine + mmpretrain.datasets + 数据处理 + mmpretrain.models + mmpretrain.structures + mmpretrain.visualization + mmpretrain.evaluation + mmpretrain.utils + +.. _其他说明: +.. toctree:: + :maxdepth: 1 + :caption: 其他说明 + + notes/contribution_guide.md + notes/projects.md + notes/changelog.md + notes/faq.md + notes/pretrain_custom_dataset.md + notes/finetune_custom_dataset.md + +.. toctree:: + :maxdepth: 1 + :caption: 设备支持 + + device/npu.md + +.. toctree:: + :caption: 切换语言 + + English + 简体中文 + + +索引与表格 +================== + +* :ref:`genindex` +* :ref:`search` diff --git a/docs/zh_CN/locales/zh_CN/LC_MESSAGES/api.po b/docs/zh_CN/locales/zh_CN/LC_MESSAGES/api.po new file mode 100644 index 0000000..abfc40d --- /dev/null +++ b/docs/zh_CN/locales/zh_CN/LC_MESSAGES/api.po @@ -0,0 +1,9090 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) 2020, OpenMMLab +# This file is distributed under the same license as the MMClassification +# package. +# FIRST AUTHOR , 2021. +# +msgid "" +msgstr "" +"Project-Id-Version: MMClassification\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2022-11-22 08:42+0800\n" +"PO-Revision-Date: 2022-11-22 15:18+0800\n" +"Last-Translator: Ma Zerun \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.9.1\n" +"Language-Team: \n" +"Language: zh_CN\n" +"X-Generator: Poedit 2.3\n" + +#: ../../api/apis.rst:7 ../../api/apis.rst:14 +msgid "mmcls.apis" +msgstr "" + +#: ../../api/apis.rst:9 +msgid "These are some high-level APIs for classification tasks." +msgstr "该包提供了一些用于分类任务的高阶 API" + +#: ../../api/apis.rst:17 +msgid "Inference" +msgstr "推理" + +#: ../../api/apis.rst:24::1 +msgid ":py:obj:`init_model `" +msgstr "" + +#: ../../api/apis.rst:24::1 mmcls.apis.inference.init_model:1 of +msgid "Initialize a classifier from config file." +msgstr "从配置文件初始化一个分类器" + +#: ../../api/apis.rst:24::1 +msgid ":py:obj:`inference_model `" +msgstr "" + +#: ../../api/apis.rst:24::1 mmcls.apis.inference.inference_model:1 of +msgid "Inference image(s) with the classifier." +msgstr "使用分类器推理图像" + +#: ../../api/data_process.rst:5 +msgid "Data Process" +msgstr "数据处理" + +#: ../../api/data_process.rst:7 +msgid "" +"In MMClassification, the data process and the dataset is decomposed. The datasets only define how to get " +"samples' basic information from the file system. These basic information includes the ground-truth label " +"and raw images data / the paths of images.The data process includes data transforms, data preprocessors and " +"batch augmentations." +msgstr "" +"在 MMClassification 中,数据处理和数据集是解耦的。数据集只定义了如何从文件系统中获取样本的基本信息。这些基本" +"信息包括分类标签和原始图像数据/图像的路径。完整的数据处理流程包括了数据变换(data transform)、数据预处理器" +"(data preprocessor)及批量数据增强(batch augmentation)。" + +#: ../../api/data_process.rst:13 +msgid "" +":mod:`Data Transforms `: Transforms includes loading, preprocessing, formatting " +"and etc." +msgstr "" +":mod:`数据变换 `:数据变换包括了数据的加载、部分预处理/增强、数据格式化等操作" + +#: ../../api/data_process.rst:14 +msgid "" +":mod:`Data Preprocessors `: Processes includes collate, " +"normalization, stacking, channel fliping and etc." +msgstr "" +":mod:`数据预处理器 `:主要负责批量数据的收集、归一化、堆叠、通道翻转等" +"操作。" + +#: ../../api/data_process.rst:16 +msgid "" +":mod:`Batch Augmentations `: Batch augmentation involves multiple " +"samples, such as Mixup and CutMix." +msgstr "" +":mod:`批量数据增强 `:批量数据增强是数据预处理器的功能之一,负责处理涉及" +"多个样本的数据增强操作,例如 Mixup 和 CutMix。" + +#: ../../api/data_process.rst:21 +msgid "Data Transforms" +msgstr "数据变换" + +#: ../../api/data_process.rst:23 +msgid "" +"To prepare the inputs data, we need to do some transforms on these basic information. These transforms " +"includes loading, preprocessing and formatting. And a series of data transforms makes up a data pipeline. " +"Therefore, you can find the a ``pipeline`` argument in the configs of dataset, for example:" +msgstr "" +"为了准备输入数据,我们需要对数据集中保存的基本信息做一些变换。这些变换包括数据加载、部分预处理和增强、格式" +"化。一系列的数据变换组成了数据流水线(data pipeline)。因此,在数据集的配置参数中通常存在一个 ``pipeline`` " +"参数,例如:" + +#: ../../api/data_process.rst:46 +msgid "" +"Every item of a pipeline list is one of the following data transforms class. And if you want to add a " +"custom data transformation class, the tutorial :doc:`Custom Data Pipelines ` " +"will help you." +msgstr "" +"``pipeline`` 列表中的每一项都是以下数据变换类之一。如果您想添加自定义数据变换类,可以参考 :doc:`自定义数据流" +"水线教程 `。" + +#: ../../api/data_process.rst:54 +msgid "Processing and Augmentation" +msgstr "组合式增强" + +#: ../../api/data_process.rst:70::1 +msgid ":py:obj:`Albumentations `" +msgstr "" + +#: ../../api/data_process.rst:70::1 mmcls.datasets.transforms.processing.Albumentations:1 of +msgid "Wrapper to use augmentation from albumentations library." +msgstr "使用 Albumentations 库进行数据变换的封装类" + +#: ../../api/data_process.rst:70::1 +msgid ":py:obj:`ColorJitter `" +msgstr "" + +#: ../../api/data_process.rst:70::1 mmcls.datasets.transforms.processing.ColorJitter:1 of +msgid "Randomly change the brightness, contrast and saturation of an image." +msgstr "随机改变图像的亮度、对比度和饱和度" + +#: ../../api/data_process.rst:70::1 +msgid ":py:obj:`EfficientNetCenterCrop `" +msgstr "" + +#: ../../api/data_process.rst:70::1 mmcls.datasets.transforms.processing.EfficientNetCenterCrop:1 +#: of +msgid "EfficientNet style center crop." +msgstr "EfficientNet 风格的中心裁剪" + +#: ../../api/data_process.rst:70::1 +msgid ":py:obj:`EfficientNetRandomCrop `" +msgstr "" + +#: ../../api/data_process.rst:70::1 mmcls.datasets.transforms.processing.EfficientNetRandomCrop:1 +#: of +msgid "EfficientNet style RandomResizedCrop." +msgstr "EfficientNet 风格的随机缩放裁剪" + +#: ../../api/data_process.rst:70::1 +msgid ":py:obj:`Lighting `" +msgstr "" + +#: ../../api/data_process.rst:70::1 mmcls.datasets.transforms.processing.Lighting:1 of +msgid "Adjust images lighting using AlexNet-style PCA jitter." +msgstr "使用 AlexNet 风格的 PCA 抖动随机调整图像照明" + +#: ../../api/data_process.rst:70::1 +msgid ":py:obj:`RandomCrop `" +msgstr "" + +#: ../../api/data_process.rst:70::1 mmcls.datasets.transforms.processing.RandomCrop:1 of +msgid "Crop the given Image at a random location." +msgstr "在随机位置裁剪给定图像" + +#: ../../api/data_process.rst:70::1 +msgid ":py:obj:`RandomErasing `" +msgstr "" + +#: ../../api/data_process.rst:70::1 mmcls.datasets.transforms.processing.RandomErasing:1 of +msgid "Randomly selects a rectangle region in an image and erase pixels." +msgstr "在图像中随机选择一个矩形区域并擦除像素" + +#: ../../api/data_process.rst:70::1 +msgid ":py:obj:`RandomResizedCrop `" +msgstr "" + +#: ../../api/data_process.rst:70::1 mmcls.datasets.transforms.processing.RandomResizedCrop:1 of +msgid "Crop the given image to random scale and aspect ratio." +msgstr "将给定图像按照随机尺寸和纵横比进行裁剪" + +#: ../../api/data_process.rst:70::1 +msgid ":py:obj:`ResizeEdge `" +msgstr "" + +#: ../../api/data_process.rst:70::1 mmcls.datasets.transforms.processing.ResizeEdge:1 of +msgid "Resize images along the specified edge." +msgstr "按照指定边长调整图像尺寸" + +#: ../../api/data_process.rst:72 +msgid "Composed Augmentation" +msgstr "组合式增强" + +#: ../../api/data_process.rst:73 +msgid "" +"Composed augmentation is a kind of methods which compose a series of data augmentation transforms, such as " +"``AutoAugment`` and ``RandAugment``." +msgstr "" +"组合式增强将一系列数据增强方法组合在一起,实现对样本的整体增强,例如 ``AutoAugment`` 和 ``RandAugment``" + +#: ../../api/data_process.rst:83::1 +msgid ":py:obj:`AutoAugment `" +msgstr "" + +#: ../../api/data_process.rst:83::1 mmcls.datasets.transforms.auto_augment.AutoAugment:1 of +msgid "Auto augmentation." +msgstr "" + +#: ../../api/data_process.rst:83::1 +msgid ":py:obj:`RandAugment `" +msgstr "" + +#: ../../api/data_process.rst:83::1 mmcls.datasets.transforms.auto_augment.RandAugment:1 of +msgid "Random augmentation." +msgstr "" + +#: ../../api/data_process.rst:84 +msgid "" +"To specify the augmentation combination (The ``policies`` argument), you can use string to specify from " +"some preset policies." +msgstr "为了指定增强组合的策略(即上述变换中的 ``policies`` 参数),你可以使用字符串从一系列预设策略中指定。" + +#: ../../api/data_process.rst:91 +msgid "Preset policy" +msgstr "预设策略" + +#: ../../api/data_process.rst:92 +msgid "Use for" +msgstr "用于" + +#: ../../api/data_process.rst:93 +msgid "Description" +msgstr "说明" + +#: ../../api/data_process.rst:94 +msgid "\"imagenet\"" +msgstr "" + +#: ../../api/data_process.rst:95 +msgid ":class:`AutoAugment`" +msgstr "" + +#: ../../api/data_process.rst:96 +msgid "Policy for ImageNet, come from `DeepVoltaire/AutoAugment`_" +msgstr "用于 ImageNet 数据集的增强组合,来自 `DeepVoltaire/AutoAugment`_ 仓库" + +#: ../../api/data_process.rst:97 +msgid "\"timm_increasing\"" +msgstr "" + +#: ../../api/data_process.rst:98 +msgid ":class:`RandAugment`" +msgstr "" + +#: ../../api/data_process.rst:99 +msgid "The ``_RAND_INCREASING_TRANSFORMS`` policy from `timm`_" +msgstr "`timm`_ 仓库中的 ``_RAND_INCREASING_TRANSFORMS`` 增强组合" + +#: ../../api/data_process.rst:104 +msgid "And you can also configure a group of policies manually by selecting from the below table." +msgstr "你还可以通过根据下表手动配置一组策略。" + +#: ../../api/data_process.rst:126::1 +msgid ":py:obj:`AutoContrast `" +msgstr "" + +#: ../../api/data_process.rst:126::1 mmcls.datasets.transforms.auto_augment.AutoContrast:1 of +msgid "Auto adjust image contrast." +msgstr "自动调整图像对比度" + +#: ../../api/data_process.rst:126::1 +msgid ":py:obj:`Brightness `" +msgstr "" + +#: ../../api/data_process.rst:126::1 mmcls.datasets.transforms.auto_augment.Brightness:1 of +msgid "Adjust images brightness." +msgstr "自动调整图像亮度" + +#: ../../api/data_process.rst:126::1 +msgid ":py:obj:`ColorTransform `" +msgstr "" + +#: ../../api/data_process.rst:126::1 mmcls.datasets.transforms.auto_augment.ColorTransform:1 of +msgid "Adjust images color balance." +msgstr "自动调整图像平衡" + +#: ../../api/data_process.rst:126::1 +msgid ":py:obj:`Contrast `" +msgstr "" + +#: ../../api/data_process.rst:126::1 mmcls.datasets.transforms.auto_augment.Contrast:1 of +msgid "Adjust images contrast." +msgstr "改变图像对比度" + +#: ../../api/data_process.rst:126::1 +msgid ":py:obj:`Cutout `" +msgstr "" + +#: ../../api/data_process.rst:126::1 mmcls.datasets.transforms.auto_augment.Cutout:1 of +msgid "Cutout images." +msgstr "擦除部分图像区域" + +#: ../../api/data_process.rst:126::1 +msgid ":py:obj:`Equalize `" +msgstr "" + +#: ../../api/data_process.rst:126::1 mmcls.datasets.transforms.auto_augment.Equalize:1 of +msgid "Equalize the image histogram." +msgstr "均衡化图像直方图" + +#: ../../api/data_process.rst:126::1 +msgid ":py:obj:`Invert `" +msgstr "" + +#: ../../api/data_process.rst:126::1 mmcls.datasets.transforms.auto_augment.Invert:1 of +msgid "Invert images." +msgstr "反转图像色阶" + +#: ../../api/data_process.rst:126::1 +msgid ":py:obj:`Posterize `" +msgstr "" + +#: ../../api/data_process.rst:126::1 mmcls.datasets.transforms.auto_augment.Posterize:1 of +msgid "Posterize images (reduce the number of bits for each color channel)." +msgstr "图像像素化(降低各色彩通道的比特数)" + +#: ../../api/data_process.rst:126::1 +msgid ":py:obj:`Rotate `" +msgstr "" + +#: ../../api/data_process.rst:126::1 mmcls.datasets.transforms.auto_augment.Rotate:1 of +msgid "Rotate images." +msgstr "旋转图像" + +#: ../../api/data_process.rst:126::1 +msgid ":py:obj:`Sharpness `" +msgstr "" + +#: ../../api/data_process.rst:126::1 mmcls.datasets.transforms.auto_augment.Sharpness:1 of +msgid "Adjust images sharpness." +msgstr "改变图像锐度" + +#: ../../api/data_process.rst:126::1 +msgid ":py:obj:`Shear `" +msgstr "" + +#: ../../api/data_process.rst:126::1 mmcls.datasets.transforms.auto_augment.Shear:1 of +msgid "Shear images." +msgstr "图像切变" + +#: ../../api/data_process.rst:126::1 +msgid ":py:obj:`Solarize `" +msgstr "" + +#: ../../api/data_process.rst:126::1 mmcls.datasets.transforms.auto_augment.Solarize:1 of +msgid "Solarize images (invert all pixel values above a threshold)." +msgstr "图像日光化(反转高于某一阈值的所有图像色阶)" + +#: ../../api/data_process.rst:126::1 +msgid ":py:obj:`SolarizeAdd `" +msgstr "" + +#: ../../api/data_process.rst:126::1 mmcls.datasets.transforms.auto_augment.SolarizeAdd:1 of +msgid "SolarizeAdd images (add a certain value to pixels below a threshold)." +msgstr "图像过曝(为低于某一阈值的所有色阶增加一个固定值)" + +#: ../../api/data_process.rst:126::1 +msgid ":py:obj:`Translate `" +msgstr "" + +#: ../../api/data_process.rst:126::1 mmcls.datasets.transforms.auto_augment.Translate:1 of +msgid "Translate images." +msgstr "平移图像" + +#: ../../api/data_process.rst:126::1 +msgid ":py:obj:`BaseAugTransform `" +msgstr "" + +#: ../../api/data_process.rst:126::1 mmcls.datasets.transforms.auto_augment.BaseAugTransform:1 of +msgid "The base class of augmentation transform for RandAugment." +msgstr "用于组合式增强的数据变换基类" + +#: ../../api/data_process.rst:128 +msgid "Formatting" +msgstr "格式化" + +#: ../../api/data_process.rst:141::1 +msgid ":py:obj:`Collect `" +msgstr "" + +#: ../../api/data_process.rst:141::1 mmcls.datasets.transforms.formatting.Collect:1 of +msgid "Collect and only reserve the specified fields." +msgstr "收集并仅保留指定字段的数据" + +#: ../../api/data_process.rst:141::1 +msgid ":py:obj:`PackClsInputs `" +msgstr "" + +#: ../../api/data_process.rst:141::1 mmcls.datasets.transforms.formatting.PackClsInputs:1 of +msgid "Pack the inputs data for the classification." +msgstr "将输入数据整理成为用于分类任务的数据格式。" + +#: ../../api/data_process.rst:141::1 +msgid ":py:obj:`ToNumpy `" +msgstr "" + +#: ../../api/data_process.rst:141::1 mmcls.datasets.transforms.formatting.ToNumpy:1 of +msgid "Convert object to :obj:`numpy.ndarray`." +msgstr "将对象转变为 :obj:`numpy.ndarray`" + +#: ../../api/data_process.rst:141::1 +msgid ":py:obj:`ToPIL `" +msgstr "" + +#: ../../api/data_process.rst:141::1 mmcls.datasets.transforms.formatting.ToPIL:1 of +msgid "Convert the image from OpenCV format to :obj:`PIL.Image.Image`." +msgstr "将图片从 OpenCV 格式转为为 :obj:`PIL.Image.Image` 格式" + +#: ../../api/data_process.rst:141::1 +msgid ":py:obj:`Transpose `" +msgstr "" + +#: ../../api/data_process.rst:141::1 mmcls.datasets.transforms.formatting.Transpose:1 of +msgid "Transpose numpy array." +msgstr "转置 NumPy 数组" + +#: ../../api/data_process.rst:143 +msgid "MMCV transforms" +msgstr "MMCV 中的数据变换" + +#: ../../api/data_process.rst:145 +msgid "" +"We also provides many transforms in MMCV. You can use them directly in the config files. Here are some " +"frequently used transforms, and the whole transforms list can be found in :external+mmcv:doc:`api/" +"transforms`." +msgstr "" +"我们还在 MMCV 中提供了很多数据转换类。你可以在配置文件中直接使用它们。这里我们列举了一些常用的数据变换类,完" +"整的数据变换类列表可以在 :external+mmcv:doc:`api/transforms` 中找到。" + +#: ../../api/data_process.rst:150 +msgid ":external:class:`~mmcv.transforms.LoadImageFromFile`" +msgstr "" + +#: ../../api/data_process.rst:151 +msgid "Load an image from file." +msgstr "从图片路径加载图片" + +#: ../../api/data_process.rst:152 +msgid ":external:class:`~mmcv.transforms.Resize`" +msgstr "" + +#: ../../api/data_process.rst:153 +msgid "Resize images & bbox & seg & keypoints." +msgstr "缩放图像、bbox、分割图、关键点等" + +#: ../../api/data_process.rst:154 +msgid ":external:class:`~mmcv.transforms.RandomResize`" +msgstr "" + +#: ../../api/data_process.rst:155 +msgid "Random resize images & bbox & keypoints." +msgstr "随机缩放图像、bbox、关键点等" + +#: ../../api/data_process.rst:156 +msgid ":external:class:`~mmcv.transforms.RandomFlip`" +msgstr "" + +#: ../../api/data_process.rst:157 +msgid "Flip the image & bbox & keypoints & segmentation map." +msgstr "随机翻转图像、bbox、关键点等" + +#: ../../api/data_process.rst:158 +msgid ":external:class:`~mmcv.transforms.RandomGrayscale`" +msgstr "" + +#: ../../api/data_process.rst:159 +msgid "Randomly convert image to grayscale with a probability." +msgstr "随机灰度化图像" + +#: ../../api/data_process.rst:160 +msgid ":external:class:`~mmcv.transforms.CenterCrop`" +msgstr "" + +#: ../../api/data_process.rst:161 +msgid "" +"Crop the center of the image, segmentation masks, bounding boxes and key points. If the crop area exceeds " +"the original image and ``auto_pad`` is True, the original image will be padded before cropping." +msgstr "" +"裁剪一张图像的中心区域(同时处理分割图、bbox、关键点等)。如果裁剪尺寸超出原图区域,并且指定了 " +"``auto_pad=True``,则会在裁剪之前扩充原图至合适大小" + +#: ../../api/data_process.rst:162 +msgid ":external:class:`~mmcv.transforms.Normalize`" +msgstr "" + +#: ../../api/data_process.rst:163 +msgid "Normalize the image." +msgstr "归一化图像" + +#: ../../api/data_process.rst:164 +msgid ":external:class:`~mmcv.transforms.Compose`" +msgstr "" + +#: ../../api/data_process.rst:165 +msgid "Compose multiple transforms sequentially." +msgstr "顺序组合一系列数据变换" + +#: ../../api/data_process.rst:170 +msgid "Data Preprocessors" +msgstr "数据预处理器" + +#: ../../api/data_process.rst:172 +msgid "" +"The data preprocessor is also a component to process the data before feeding data to the neural network. " +"Comparing with the data transforms, the data preprocessor is a module of the classifier, and it takes a " +"batch of data to process, which means it can use GPU and batch to accelebrate the processing." +msgstr "" +"数据预处理器也是在数据进入神经网络之前,对数据进行处理的组件。与数据变换相比,数据预处理器是模型的一个的模" +"块,并且可以获得一个批次的数据进行处理,这意味着它可以使用模型所在的设备(如 GPU),并利用批量处理,实现加" +"速。" + +#: ../../api/data_process.rst:176 +msgid "The default data preprocessor in MMClassification could do the pre-processing like following:" +msgstr "MMClassification 中使用的默认的数据预处理器可以进行以下操作:" + +#: ../../api/data_process.rst:178 +msgid "Move data to the target device." +msgstr "将数据移动到模型所在的设备" + +#: ../../api/data_process.rst:179 +msgid "Pad inputs to the maximum size of current batch." +msgstr "将不同尺寸的输入填充至统一的尺寸" + +#: ../../api/data_process.rst:180 +msgid "Stack inputs to a batch." +msgstr "将一系列输入的 tensor 组成 batch" + +#: ../../api/data_process.rst:181 mmcls.models.utils.data_preprocessor.ClsDataPreprocessor:16 of +msgid "Convert inputs from bgr to rgb if the shape of input is (3, H, W)." +msgstr "如果输入的 tensor 形状为 (3, H, W),则可以执行 BGR 到 RGB 的通道转换" + +#: ../../api/data_process.rst:182 mmcls.models.utils.data_preprocessor.ClsDataPreprocessor:17 of +msgid "Normalize image with defined std and mean." +msgstr "根据给定的均值和方差对图像进行归一化" + +#: ../../api/data_process.rst:183 +msgid "Do batch augmentations like Mixup and CutMix during training." +msgstr "在训练时进行批量数据增强,如 Mixup 和 CutMix" + +#: ../../api/data_process.rst:185 +msgid "" +"You can configure the data preprocessor by the ``data_preprocessor`` field or ``model.data_preprocessor`` " +"field in the config file. Typical usages are as below:" +msgstr "" +"你可以在配置文件的 ``data_preprocessor`` 字段,或是 ``model.data_preprocessor`` 字段对数据预处理器进行配置。" +"一个典型的用法如下:" + +#: ../../api/data_process.rst:196 +msgid "Or define in ``model.data_preprocessor`` as following:" +msgstr "或者在 ``model.data_preprocessor`` 字段配置如下:" + +#: ../../api/data_process.rst:211 +msgid "Note that the ``model.data_preprocessor`` has higher priority than ``data_preprocessor``." +msgstr "请注意如果在两处均进行了配置,``model.data_preprocessor`` 拥有更高的优先级。" + +#: ../../api/data_process.rst:219::1 +msgid ":py:obj:`ClsDataPreprocessor `" +msgstr "" + +#: ../../api/data_process.rst:219::1 mmcls.models.utils.data_preprocessor.ClsDataPreprocessor:1 +#: of +msgid "Image pre-processor for classification tasks." +msgstr "用于分类任务的图像预处理器" + +#: ../../api/data_process.rst:223 +msgid "Batch Augmentations" +msgstr "批量数据增强" + +#: ../../api/data_process.rst:225 +msgid "" +"The batch augmentation is a component of data preprocessors. It involves multiple samples and mix them in " +"some way, such as Mixup and CutMix." +msgstr "" +"批量数据增强是数据预处理器的一个功能。它可以利用一个批次的多个样本,以某种方式进行混合增强,如 Mixup 和 " +"CutMix。" + +#: ../../api/data_process.rst:227 +msgid "" +"These augmentations are usually only used during training, therefore, we use the ``model.train_cfg`` field " +"to configure them in config files." +msgstr "这些数据增强只会在训练过程中生效,因此,我们使用 ``model.train_cfg`` 字段来配置这些功能。" + +#: ../../api/data_process.rst:241 +msgid "You can also specify the probabilities of every batch augmentation by the ``probs`` field." +msgstr "你也可以通过 ``probs`` 字段指定每一个批量数据增强的概率。" + +#: ../../api/data_process.rst:255 +msgid "Here is a list of batch augmentations can be used in MMClassification." +msgstr "这里是 MMClassification 中支持的所有批量数据增强列表。" + +#: ../../api/data_process.rst:264::1 +msgid ":py:obj:`Mixup `" +msgstr "" + +#: ../../api/data_process.rst:264::1 mmcls.models.utils.batch_augments.mixup.Mixup:1 of +msgid "Mixup batch augmentation." +msgstr "" + +#: ../../api/data_process.rst:264::1 +msgid ":py:obj:`CutMix `" +msgstr "" + +#: ../../api/data_process.rst:264::1 mmcls.models.utils.batch_augments.cutmix.CutMix:1 of +msgid "CutMix batch agumentation." +msgstr "" + +#: ../../api/data_process.rst:264::1 +msgid ":py:obj:`ResizeMix `" +msgstr "" + +#: ../../api/data_process.rst:264::1 mmcls.models.utils.batch_augments.resizemix.ResizeMix:1 of +msgid "ResizeMix Random Paste layer for a batch of data." +msgstr "" + +#: ../../api/datasets.rst:7 ../../api/datasets.rst:14 +msgid "mmcls.datasets" +msgstr "" + +#: ../../api/datasets.rst:9 +msgid "" +"The ``datasets`` package contains several usual datasets for image classification tasks and some dataset " +"wrappers." +msgstr "``dataset`` 包中包含了分类任务中常用的数据集,以及一些数据集封装。" + +#: ../../api/datasets.rst:17 +msgid "Custom Dataset" +msgstr "" + +#: mmcls.datasets.custom.CustomDataset:1 of +msgid "Custom dataset for classification." +msgstr "" + +#: mmcls.datasets.custom.CustomDataset:3 of +msgid "The dataset supports two kinds of annotation format." +msgstr "" + +#: mmcls.datasets.custom.CustomDataset:5 of +msgid "An annotation file is provided, and each line indicates a sample:" +msgstr "" + +#: mmcls.datasets.custom.CustomDataset:7 of +msgid "The sample files: ::" +msgstr "" + +#: mmcls.datasets.custom.CustomDataset:19 of +msgid "" +"The annotation file (the first column is the image path and the second column is the index of category): ::" +msgstr "" + +#: mmcls.datasets.custom.CustomDataset:28 of +msgid "Please specify the name of categories by the argument ``classes`` or ``metainfo``." +msgstr "" + +#: mmcls.datasets.custom.CustomDataset:31 of +msgid "The samples are arranged in the specific way: ::" +msgstr "" + +#: mmcls.datasets.custom.CustomDataset:45 of +msgid "" +"If the ``ann_file`` is specified, the dataset will be generated by the first way, otherwise, try the second " +"way." +msgstr "" + +#: mmcls.apis.inference.inference_model mmcls.apis.inference.init_model +#: mmcls.datasets.base_dataset.BaseDataset mmcls.datasets.cifar.CIFAR10 mmcls.datasets.cifar.CIFAR100 +#: mmcls.datasets.cub.CUB mmcls.datasets.custom.CustomDataset mmcls.datasets.dataset_wrappers.KFoldDataset +#: mmcls.datasets.imagenet.ImageNet mmcls.datasets.imagenet.ImageNet21k mmcls.datasets.mnist.FashionMNIST +#: mmcls.datasets.mnist.MNIST mmcls.datasets.multi_label.MultiLabelDataset +#: mmcls.datasets.transforms.auto_augment.AutoAugment mmcls.datasets.transforms.auto_augment.AutoContrast +#: mmcls.datasets.transforms.auto_augment.BaseAugTransform mmcls.datasets.transforms.auto_augment.Brightness +#: mmcls.datasets.transforms.auto_augment.ColorTransform mmcls.datasets.transforms.auto_augment.Contrast +#: mmcls.datasets.transforms.auto_augment.Cutout mmcls.datasets.transforms.auto_augment.Equalize +#: mmcls.datasets.transforms.auto_augment.Invert mmcls.datasets.transforms.auto_augment.Posterize +#: mmcls.datasets.transforms.auto_augment.RandAugment mmcls.datasets.transforms.auto_augment.Rotate +#: mmcls.datasets.transforms.auto_augment.Sharpness mmcls.datasets.transforms.auto_augment.Shear +#: mmcls.datasets.transforms.auto_augment.Solarize mmcls.datasets.transforms.auto_augment.SolarizeAdd +#: mmcls.datasets.transforms.auto_augment.Translate mmcls.datasets.transforms.formatting.Collect +#: mmcls.datasets.transforms.formatting.PackClsInputs mmcls.datasets.transforms.formatting.ToNumpy +#: mmcls.datasets.transforms.formatting.Transpose mmcls.datasets.transforms.processing.Albumentations +#: mmcls.datasets.transforms.processing.Albumentations.transform +#: mmcls.datasets.transforms.processing.ColorJitter mmcls.datasets.transforms.processing.ColorJitter.transform +#: mmcls.datasets.transforms.processing.EfficientNetCenterCrop +#: mmcls.datasets.transforms.processing.EfficientNetCenterCrop.transform +#: mmcls.datasets.transforms.processing.EfficientNetRandomCrop mmcls.datasets.transforms.processing.Lighting +#: mmcls.datasets.transforms.processing.Lighting.transform mmcls.datasets.transforms.processing.RandomCrop +#: mmcls.datasets.transforms.processing.RandomCrop.transform +#: mmcls.datasets.transforms.processing.RandomErasing +#: mmcls.datasets.transforms.processing.RandomErasing.transform +#: mmcls.datasets.transforms.processing.RandomResizedCrop +#: mmcls.datasets.transforms.processing.RandomResizedCrop.transform +#: mmcls.datasets.transforms.processing.ResizeEdge mmcls.datasets.transforms.processing.ResizeEdge.transform +#: mmcls.datasets.voc.VOC mmcls.engine.hooks.class_num_check_hook.ClassNumCheckHook.before_test +#: mmcls.engine.hooks.class_num_check_hook.ClassNumCheckHook.before_train +#: mmcls.engine.hooks.class_num_check_hook.ClassNumCheckHook.before_val +#: mmcls.engine.hooks.margin_head_hooks.SetAdaptiveMarginsHook +#: mmcls.engine.hooks.margin_head_hooks.SetAdaptiveMarginsHook.before_train +#: mmcls.engine.hooks.precise_bn_hook.PreciseBNHook +#: mmcls.engine.hooks.precise_bn_hook.PreciseBNHook.after_train_epoch +#: mmcls.engine.hooks.precise_bn_hook.PreciseBNHook.after_train_iter +#: mmcls.engine.hooks.visualization_hook.VisualizationHook +#: mmcls.engine.hooks.visualization_hook.VisualizationHook.after_test_iter +#: mmcls.engine.hooks.visualization_hook.VisualizationHook.after_val_iter mmcls.engine.optimizers.lamb.Lamb +#: mmcls.engine.optimizers.lamb.Lamb.step mmcls.evaluation.metrics.multi_label.AveragePrecision +#: mmcls.evaluation.metrics.multi_label.MultiLabelMetric mmcls.evaluation.metrics.single_label.Accuracy +#: mmcls.evaluation.metrics.single_label.Accuracy.calculate +#: mmcls.evaluation.metrics.single_label.Accuracy.compute_metrics +#: mmcls.evaluation.metrics.single_label.Accuracy.process +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric.calculate +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric.compute_metrics +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric.process +#: mmcls.evaluation.metrics.voc_multi_label.VOCAveragePrecision +#: mmcls.evaluation.metrics.voc_multi_label.VOCMultiLabelMetric mmcls.models.backbones.alexnet.AlexNet +#: mmcls.models.backbones.conformer.Conformer mmcls.models.backbones.convmixer.ConvMixer +#: mmcls.models.backbones.convnext.ConvNeXt mmcls.models.backbones.cspnet.CSPDarkNet +#: mmcls.models.backbones.cspnet.CSPNet mmcls.models.backbones.cspnet.CSPResNeXt +#: mmcls.models.backbones.cspnet.CSPResNet mmcls.models.backbones.davit.DaViT +#: mmcls.models.backbones.deit.DistilledVisionTransformer mmcls.models.backbones.deit3.DeiT3 +#: mmcls.models.backbones.densenet.DenseNet mmcls.models.backbones.edgenext.EdgeNeXt +#: mmcls.models.backbones.efficientformer.EfficientFormer mmcls.models.backbones.efficientnet.EfficientNet +#: mmcls.models.backbones.hornet.HorNet mmcls.models.backbones.hrnet.HRNet +#: mmcls.models.backbones.inception_v3.InceptionV3 mmcls.models.backbones.lenet.LeNet5 +#: mmcls.models.backbones.mlp_mixer.MlpMixer mmcls.models.backbones.mobilenet_v2.MobileNetV2 +#: mmcls.models.backbones.mobilenet_v2.MobileNetV2.make_layer mmcls.models.backbones.mobilenet_v3.MobileNetV3 +#: mmcls.models.backbones.mobileone.MobileOne mmcls.models.backbones.mobilevit.MobileViT +#: mmcls.models.backbones.mobilevit.MobileViT.make_mobilenetv2_layer +#: mmcls.models.backbones.mobilevit.MobileViT.make_mobilevit_layer mmcls.models.backbones.mvit.MViT +#: mmcls.models.backbones.poolformer.PoolFormer mmcls.models.backbones.regnet.RegNet +#: mmcls.models.backbones.regnet.RegNet.adjust_width_group +#: mmcls.models.backbones.regnet.RegNet.generate_regnet +#: mmcls.models.backbones.regnet.RegNet.get_stages_from_blocks +#: mmcls.models.backbones.regnet.RegNet.quantize_float mmcls.models.backbones.replknet.RepLKNet +#: mmcls.models.backbones.repmlp.RepMLPNet mmcls.models.backbones.repvgg.RepVGG +#: mmcls.models.backbones.res2net.Res2Net mmcls.models.backbones.resnest.ResNeSt +#: mmcls.models.backbones.resnet.ResNet mmcls.models.backbones.resnet_cifar.ResNet_CIFAR +#: mmcls.models.backbones.resnext.ResNeXt mmcls.models.backbones.seresnet.SEResNet +#: mmcls.models.backbones.seresnext.SEResNeXt mmcls.models.backbones.shufflenet_v1.ShuffleNetV1 +#: mmcls.models.backbones.shufflenet_v1.ShuffleNetV1.make_layer +#: mmcls.models.backbones.shufflenet_v2.ShuffleNetV2 mmcls.models.backbones.swin_transformer.SwinTransformer +#: mmcls.models.backbones.swin_transformer_v2.SwinTransformerV2 mmcls.models.backbones.t2t_vit.T2T_ViT +#: mmcls.models.backbones.timm_backbone.TIMMBackbone mmcls.models.backbones.tnt.TNT +#: mmcls.models.backbones.twins.PCPVT mmcls.models.backbones.twins.SVT mmcls.models.backbones.van.VAN +#: mmcls.models.backbones.vgg.VGG mmcls.models.backbones.vision_transformer.VisionTransformer +#: mmcls.models.classifiers.base.BaseClassifier mmcls.models.classifiers.base.BaseClassifier.extract_feat +#: mmcls.models.classifiers.base.BaseClassifier.extract_feats +#: mmcls.models.classifiers.base.BaseClassifier.forward +#: mmcls.models.classifiers.hugging_face.HuggingFaceClassifier +#: mmcls.models.classifiers.hugging_face.HuggingFaceClassifier.loss +#: mmcls.models.classifiers.hugging_face.HuggingFaceClassifier.predict +#: mmcls.models.classifiers.image.ImageClassifier mmcls.models.classifiers.image.ImageClassifier.extract_feat +#: mmcls.models.classifiers.image.ImageClassifier.forward mmcls.models.classifiers.image.ImageClassifier.loss +#: mmcls.models.classifiers.image.ImageClassifier.predict mmcls.models.classifiers.timm.TimmClassifier +#: mmcls.models.classifiers.timm.TimmClassifier.loss mmcls.models.classifiers.timm.TimmClassifier.predict +#: mmcls.models.heads.cls_head.ClsHead mmcls.models.heads.cls_head.ClsHead.loss +#: mmcls.models.heads.cls_head.ClsHead.predict mmcls.models.heads.conformer_head.ConformerHead +#: mmcls.models.heads.conformer_head.ConformerHead.predict mmcls.models.heads.deit_head.DeiTClsHead +#: mmcls.models.heads.efficientformer_head.EfficientFormerClsHead +#: mmcls.models.heads.efficientformer_head.EfficientFormerClsHead.loss +#: mmcls.models.heads.linear_head.LinearClsHead mmcls.models.heads.margin_head.ArcFaceClsHead +#: mmcls.models.heads.margin_head.ArcFaceClsHead.loss +#: mmcls.models.heads.margin_head.ArcFaceClsHead.set_margins +#: mmcls.models.heads.multi_label_cls_head.MultiLabelClsHead +#: mmcls.models.heads.multi_label_cls_head.MultiLabelClsHead.loss +#: mmcls.models.heads.multi_label_cls_head.MultiLabelClsHead.predict +#: mmcls.models.heads.multi_label_csra_head.CSRAClsHead +#: mmcls.models.heads.multi_label_linear_head.MultiLabelLinearClsHead +#: mmcls.models.heads.stacked_head.StackedLinearClsHead +#: mmcls.models.heads.vision_transformer_head.VisionTransformerClsHead +#: mmcls.models.losses.asymmetric_loss.AsymmetricLoss +#: mmcls.models.losses.asymmetric_loss.AsymmetricLoss.forward +#: mmcls.models.losses.cross_entropy_loss.CrossEntropyLoss mmcls.models.losses.focal_loss.FocalLoss +#: mmcls.models.losses.focal_loss.FocalLoss.forward mmcls.models.losses.label_smooth_loss.LabelSmoothLoss +#: mmcls.models.losses.label_smooth_loss.LabelSmoothLoss.forward mmcls.models.losses.seesaw_loss.SeesawLoss +#: mmcls.models.losses.seesaw_loss.SeesawLoss.forward mmcls.models.necks.gap.GlobalAveragePooling +#: mmcls.models.necks.gem.GeneralizedMeanPooling mmcls.models.necks.hr_fuse.HRFuseScales +#: mmcls.models.utils.attention.MultiheadAttention mmcls.models.utils.attention.ShiftWindowMSA +#: mmcls.models.utils.attention.WindowMSA mmcls.models.utils.attention.WindowMSA.forward +#: mmcls.models.utils.attention.WindowMSAV2 mmcls.models.utils.attention.WindowMSAV2.forward +#: mmcls.models.utils.batch_augments.cutmix.CutMix +#: mmcls.models.utils.batch_augments.cutmix.CutMix.cutmix_bbox_and_lam +#: mmcls.models.utils.batch_augments.cutmix.CutMix.mix +#: mmcls.models.utils.batch_augments.cutmix.CutMix.rand_bbox +#: mmcls.models.utils.batch_augments.cutmix.CutMix.rand_bbox_minmax +#: mmcls.models.utils.batch_augments.mixup.Mixup mmcls.models.utils.batch_augments.mixup.Mixup.mix +#: mmcls.models.utils.batch_augments.resizemix.ResizeMix +#: mmcls.models.utils.batch_augments.resizemix.ResizeMix.mix +#: mmcls.models.utils.channel_shuffle.channel_shuffle mmcls.models.utils.data_preprocessor.ClsDataPreprocessor +#: mmcls.models.utils.data_preprocessor.ClsDataPreprocessor.forward mmcls.models.utils.embed.HybridEmbed +#: mmcls.models.utils.embed.PatchEmbed mmcls.models.utils.embed.PatchMerging +#: mmcls.models.utils.embed.PatchMerging.forward mmcls.models.utils.embed.resize_pos_embed +#: mmcls.models.utils.embed.resize_relative_position_bias_table mmcls.models.utils.helpers._ntuple +#: mmcls.models.utils.inverted_residual.InvertedResidual +#: mmcls.models.utils.inverted_residual.InvertedResidual.forward mmcls.models.utils.layer_scale.LayerScale +#: mmcls.models.utils.make_divisible.make_divisible +#: mmcls.models.utils.position_encoding.ConditionalPositionEncoding mmcls.models.utils.se_layer.SELayer +#: mmcls.utils.setup_env.register_all_modules mmcls.visualization.cls_visualizer.ClsVisualizer of +msgid "参数" +msgstr "" + +#: mmcls.datasets.custom.CustomDataset:48 mmcls.datasets.imagenet.ImageNet:6 +#: mmcls.datasets.imagenet.ImageNet21k:7 of +msgid "Annotation file path. Defaults to ''." +msgstr "" + +#: mmcls.datasets.base_dataset.BaseDataset:14 mmcls.datasets.custom.CustomDataset:50 +#: mmcls.datasets.imagenet.ImageNet:8 mmcls.datasets.imagenet.ImageNet21k:9 +#: mmcls.datasets.multi_label.MultiLabelDataset:35 of +msgid "Meta information for dataset, such as class information. Defaults to None." +msgstr "" + +#: mmcls.datasets.base_dataset.BaseDataset:17 mmcls.datasets.custom.CustomDataset:53 +#: mmcls.datasets.imagenet.ImageNet:11 mmcls.datasets.imagenet.ImageNet21k:12 +#: mmcls.datasets.multi_label.MultiLabelDataset:38 of +msgid "The root directory for ``data_prefix`` and ``ann_file``. Defaults to ''." +msgstr "" + +#: mmcls.datasets.custom.CustomDataset:56 of +msgid "Prefix for the data. Defaults to ''." +msgstr "" + +#: mmcls.datasets.custom.CustomDataset:58 of +msgid "" +"A sequence of allowed extensions. Defaults to ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif')." +msgstr "" + +#: mmcls.datasets.base_dataset.BaseDataset:37 mmcls.datasets.custom.CustomDataset:61 +#: mmcls.datasets.multi_label.MultiLabelDataset:59 of +msgid "" +"Whether to load annotation during instantiation. In some cases, such as visualization, only the meta " +"information of the dataset is needed, which is not necessary to load annotation file. ``Basedataset`` can " +"skip load annotations to save time by set ``lazy_init=False``. Defaults to False." +msgstr "" + +#: mmcls.datasets.cifar.CIFAR10:20 mmcls.datasets.cifar.CIFAR100:17 mmcls.datasets.custom.CustomDataset:67 +#: mmcls.datasets.mnist.FashionMNIST:18 mmcls.datasets.mnist.MNIST:20 mmcls.datasets.voc.VOC:40 of +msgid "Other keyword arguments in :class:`BaseDataset`." +msgstr "" + +#: ../../api/datasets.rst:22 +msgid "ImageNet" +msgstr "" + +#: mmcls.datasets.imagenet.ImageNet:1 of +msgid "`ImageNet `_ Dataset." +msgstr "" + +#: mmcls.datasets.imagenet.ImageNet:3 of +msgid "" +"The dataset supports two kinds of annotation format. More details can be found in :class:`CustomDataset`." +msgstr "" + +#: mmcls.datasets.base_dataset.BaseDataset:20 mmcls.datasets.imagenet.ImageNet:14 +#: mmcls.datasets.imagenet.ImageNet21k:15 mmcls.datasets.multi_label.MultiLabelDataset:41 of +msgid "Prefix for training data. Defaults to ''." +msgstr "" + +#: mmcls.datasets.imagenet.ImageNet:16 mmcls.datasets.imagenet.ImageNet21k:20 of +msgid "Other keyword arguments in :class:`CustomDataset` and :class:`BaseDataset`." +msgstr "" + +#: mmcls.datasets.imagenet.ImageNet21k:1 of +msgid "ImageNet21k Dataset." +msgstr "" + +#: mmcls.datasets.imagenet.ImageNet21k:3 of +msgid "" +"Since the dataset ImageNet21k is extremely big, cantains 21k+ classes and 1.4B files. We won't provide the " +"default categories list. Please specify it from the ``classes`` argument." +msgstr "" + +#: mmcls.datasets.imagenet.ImageNet21k:17 of +msgid "Not implement by now. Use multi label or not. Defaults to False." +msgstr "" + +#: ../../api/datasets.rst:29 +msgid "CIFAR" +msgstr "" + +#: mmcls.datasets.cifar.CIFAR10:1 of +msgid "`CIFAR10 `_ Dataset." +msgstr "" + +#: mmcls.datasets.cifar.CIFAR10:3 of +msgid "" +"This implementation is modified from https://github.com/pytorch/vision/blob/master/torchvision/datasets/" +"cifar.py" +msgstr "" + +#: mmcls.datasets.cifar.CIFAR10:6 mmcls.datasets.cifar.CIFAR100:3 mmcls.datasets.mnist.FashionMNIST:4 +#: mmcls.datasets.mnist.MNIST:6 of +msgid "Prefix for data." +msgstr "" + +#: mmcls.datasets.cifar.CIFAR10:8 mmcls.datasets.cifar.CIFAR100:5 mmcls.datasets.cub.CUB:28 +#: mmcls.datasets.mnist.FashionMNIST:6 mmcls.datasets.mnist.MNIST:8 mmcls.datasets.voc.VOC:34 of +msgid "``test_mode=True`` means in test phase. It determines to use the training set or test set." +msgstr "" + +#: mmcls.datasets.cifar.CIFAR10:11 mmcls.datasets.cifar.CIFAR100:8 mmcls.datasets.mnist.FashionMNIST:9 +#: mmcls.datasets.mnist.MNIST:11 mmcls.datasets.voc.VOC:37 of +msgid "Meta information for dataset, such as categories information. Defaults to None." +msgstr "" + +#: mmcls.datasets.cifar.CIFAR10:14 mmcls.datasets.cifar.CIFAR100:11 mmcls.datasets.mnist.FashionMNIST:12 +#: mmcls.datasets.mnist.MNIST:14 of +msgid "The root directory for ``data_prefix``. Defaults to ''." +msgstr "" + +#: mmcls.datasets.cifar.CIFAR10:17 mmcls.datasets.cifar.CIFAR100:14 mmcls.datasets.mnist.FashionMNIST:15 +#: mmcls.datasets.mnist.MNIST:17 of +msgid "Whether to download the dataset if not exists. Defaults to True." +msgstr "" + +#: mmcls.datasets.cifar.CIFAR100:1 of +msgid "`CIFAR100 `_ Dataset." +msgstr "" + +#: ../../api/datasets.rst:36 +msgid "MNIST" +msgstr "" + +#: mmcls.datasets.mnist.MNIST:1 of +msgid "`MNIST `_ Dataset." +msgstr "" + +#: mmcls.datasets.mnist.MNIST:3 of +msgid "" +"This implementation is modified from https://github.com/pytorch/vision/blob/master/torchvision/datasets/" +"mnist.py" +msgstr "" + +#: mmcls.datasets.mnist.FashionMNIST:1 of +msgid "`Fashion-MNIST `_ Dataset." +msgstr "" + +#: ../../api/datasets.rst:43 +msgid "VOC" +msgstr "" + +#: mmcls.datasets.voc.VOC:1 of +msgid "`Pascal VOC `_ Dataset." +msgstr "" + +#: mmcls.datasets.voc.VOC:3 of +msgid "After decompression, the dataset directory structure is as follows:" +msgstr "" + +#: mmcls.datasets.voc.VOC:5 of +msgid "VOC dataset directory: ::" +msgstr "" + +#: mmcls.datasets.voc.VOC:18 of +msgid "" +"Extra difficult label is in VOC annotations, we will use `gt_label_difficult` to record the difficult " +"labels in each sample and corresponding evaluation should take care of this field to calculate metrics. " +"Usually, difficult labels are reckoned as negative in defaults." +msgstr "" + +#: mmcls.datasets.voc.VOC:24 of +msgid "The root directory for VOC dataset." +msgstr "" + +#: mmcls.datasets.voc.VOC:26 of +msgid "" +"The path of image set, The file which lists image ids of the sub dataset, and this path is relative to " +"``data_root``." +msgstr "" + +#: mmcls.datasets.voc.VOC:30 of +msgid "" +"Prefix for data and annotation, keyword 'img_path' and 'ann_path' can be set. Defaults to be " +"``dict(img_path='JPEGImages', ann_path='Annotations')``." +msgstr "" + +#: ../../api/datasets.rst:48 +msgid "CUB" +msgstr "" + +#: mmcls.datasets.cub.CUB:1 of +msgid "The CUB-200-2011 Dataset." +msgstr "" + +#: mmcls.datasets.cub.CUB:3 of +msgid "" +"Support the `CUB-200-2011 `_ Dataset. Comparing " +"with the `CUB-200 `_ Dataset, there are much more " +"pictures in `CUB-200-2011`. After downloading and decompression, the dataset directory structure is as " +"follows." +msgstr "" + +#: mmcls.datasets.cub.CUB:8 of +msgid "CUB dataset directory: ::" +msgstr "" + +#: mmcls.datasets.cub.CUB:26 of +msgid "The root directory for CUB-200-2011 dataset." +msgstr "" + +#: mmcls.datasets.cub.CUB:31 of +msgid "Annotation file path, path relative to ``data_root``. Defaults to 'images.txt'." +msgstr "" + +#: mmcls.datasets.cub.CUB:34 of +msgid "Prefix for iamges, path relative to ``data_root``. Defaults to 'images'." +msgstr "" + +#: mmcls.datasets.cub.CUB:37 of +msgid "The label file, path relative to ``data_root``. Defaults to 'image_class_labels.txt'." +msgstr "" + +#: mmcls.datasets.cub.CUB:40 of +msgid "" +"The split file to split train and test dataset, path relative to ``data_root``. Defaults to " +"'train_test_split_file.txt'." +msgstr "" + +#: mmcls.datasets.cub.CUB:46 mmcls.datasets.transforms.auto_augment.RandAugment:44 +#: mmcls.evaluation.metrics.multi_label.AveragePrecision:39 +#: mmcls.evaluation.metrics.multi_label.MultiLabelMetric:69 mmcls.evaluation.metrics.single_label.Accuracy:32 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric:68 mmcls.models.backbones.mvit.MViT:80 +#: mmcls.models.backbones.swin_transformer.SwinTransformer:75 +#: mmcls.models.backbones.swin_transformer_v2.SwinTransformerV2:78 mmcls.models.backbones.twins.PCPVT:46 +#: mmcls.models.backbones.twins.SVT:47 mmcls.models.backbones.van.VAN:50 +#: mmcls.models.classifiers.hugging_face.HuggingFaceClassifier:49 +#: mmcls.models.classifiers.image.ImageClassifier.extract_feat:25 +#: mmcls.models.classifiers.timm.TimmClassifier:40 mmcls.structures.cls_data_sample.ClsDataSample:21 +#: mmcls.visualization.cls_visualizer.ClsVisualizer:22 of +msgid "实际案例" +msgstr "使用示例" + +#: ../../api/datasets.rst:53 +msgid "Base classes" +msgstr "" + +#: mmcls.datasets.base_dataset.BaseDataset:1 of +msgid "Base dataset for image classification task." +msgstr "" + +#: mmcls.datasets.base_dataset.BaseDataset:3 mmcls.datasets.multi_label.MultiLabelDataset:3 of +msgid "This dataset support annotation file in `OpenMMLab 2.0 style annotation format`." +msgstr "" + +#: mmcls.datasets.base_dataset.BaseDataset:9 of +msgid "Comparing with the :class:`mmengine.BaseDataset`, this class implemented several useful methods." +msgstr "" + +#: mmcls.datasets.base_dataset.BaseDataset:12 mmcls.datasets.multi_label.MultiLabelDataset:33 of +msgid "Annotation file path." +msgstr "" + +#: mmcls.datasets.base_dataset.BaseDataset:22 mmcls.datasets.multi_label.MultiLabelDataset:43 of +msgid "Config for filter data. Defaults to None." +msgstr "" + +#: mmcls.datasets.base_dataset.BaseDataset:24 of +msgid "" +"Support using first few data in annotation file to facilitate training/testing on a smaller dataset. " +"Defaults to None, which means using all ``data_infos``." +msgstr "" + +#: mmcls.datasets.base_dataset.BaseDataset:28 mmcls.datasets.multi_label.MultiLabelDataset:49 of +msgid "" +"Whether to hold memory using serialized objects, when enabled, data loader workers can use shared RAM from " +"master process instead of making a copy. Defaults to True." +msgstr "" + +#: mmcls.datasets.base_dataset.BaseDataset:32 of +msgid "Processing pipeline. Defaults to an empty tuple." +msgstr "" + +#: mmcls.datasets.base_dataset.BaseDataset:34 mmcls.datasets.multi_label.MultiLabelDataset:56 of +msgid "``test_mode=True`` means in test phase. Defaults to False." +msgstr "" + +#: mmcls.datasets.base_dataset.BaseDataset:43 mmcls.datasets.multi_label.MultiLabelDataset:65 of +msgid "" +"If ``Basedataset.prepare_data`` get a None img. The maximum extra number of cycles to get a valid image. " +"Defaults to 1000." +msgstr "" + +#: mmcls.datasets.base_dataset.BaseDataset:47 mmcls.datasets.multi_label.MultiLabelDataset:69 of +msgid "" +"Specify names of classes. - If is string, it should be a file path, and the every line of the file is a " +"name of a class. - If is a sequence of string, every item is a name of class. - If is None, use categories " +"information in ``metainfo`` argument, annotation file or the class attribute ``METAINFO``. Defaults to " +"None." +msgstr "" + +#: mmcls.datasets.base_dataset.BaseDataset:47 mmcls.datasets.multi_label.MultiLabelDataset:69 of +msgid "Specify names of classes." +msgstr "" + +#: mmcls.datasets.base_dataset.BaseDataset:49 mmcls.datasets.multi_label.MultiLabelDataset:71 of +msgid "If is string, it should be a file path, and the every line of the file is a name of a class." +msgstr "" + +#: mmcls.datasets.base_dataset.BaseDataset:51 mmcls.datasets.multi_label.MultiLabelDataset:73 of +msgid "If is a sequence of string, every item is a name of class." +msgstr "" + +#: mmcls.datasets.base_dataset.BaseDataset:52 mmcls.datasets.multi_label.MultiLabelDataset:74 of +msgid "" +"If is None, use categories information in ``metainfo`` argument, annotation file or the class attribute " +"``METAINFO``." +msgstr "" + +#: mmcls.datasets.base_dataset.BaseDataset:55 mmcls.datasets.multi_label.MultiLabelDataset:77 +#: mmcls.models.backbones.hrnet.HRNet:23 mmcls.models.classifiers.hugging_face.HuggingFaceClassifier:32 +#: mmcls.models.classifiers.image.ImageClassifier:23 mmcls.models.classifiers.timm.TimmClassifier:23 of +msgid "Defaults to None." +msgstr "" + +#: mmcls.datasets.multi_label.MultiLabelDataset:1 of +msgid "Multi-label Dataset." +msgstr "" + +#: mmcls.datasets.multi_label.MultiLabelDataset:9 of +msgid "The annotation format is shown as follows." +msgstr "" + +#: mmcls.datasets.multi_label.MultiLabelDataset:45 of +msgid "" +"Support using first few data in annotation file to facilitate training/testing on a smaller dataset. " +"Defaults to None which means using all ``data_infos``." +msgstr "" + +#: mmcls.datasets.multi_label.MultiLabelDataset:54 of +msgid "Processing pipeline. Defaults to []." +msgstr "" + +#: ../../api/datasets.rst:60 +msgid "Dataset Wrappers" +msgstr "" + +#: mmcls.datasets.dataset_wrappers.KFoldDataset:1 of +msgid "A wrapper of dataset for K-Fold cross-validation." +msgstr "" + +#: mmcls.datasets.dataset_wrappers.KFoldDataset:3 of +msgid "" +"K-Fold cross-validation divides all the samples in groups of samples, called folds, of almost equal sizes. " +"And we use k-1 of folds to do training and use the fold left to do validation." +msgstr "" + +#: mmcls.datasets.dataset_wrappers.KFoldDataset:7 of +msgid "The dataset to be divided" +msgstr "" + +#: mmcls.datasets.dataset_wrappers.KFoldDataset:10 of +msgid "The fold used to do validation. Defaults to 0." +msgstr "" + +#: mmcls.datasets.dataset_wrappers.KFoldDataset:12 of +msgid "The number of all folds. Defaults to 5." +msgstr "" + +#: mmcls.datasets.dataset_wrappers.KFoldDataset:14 of +msgid "Use the training dataset or validation dataset. Defaults to False." +msgstr "" + +#: mmcls.datasets.dataset_wrappers.KFoldDataset:17 of +msgid "The seed to shuffle the dataset before splitting. If None, not shuffle the dataset. Defaults to None." +msgstr "" + +#: ../../api/datasets.rst:64 +msgid "The dataset wrappers in the MMEngine can be directly used in MMClassification." +msgstr "" + +#: ../../api/datasets.rst:68 +msgid ":class:`~mmengine.dataset.ConcatDataset`" +msgstr "" + +#: ../../api/datasets.rst:69 +msgid "A wrapper of concatenated dataset." +msgstr "" + +#: ../../api/datasets.rst:70 +msgid ":class:`~mmengine.dataset.RepeatDataset`" +msgstr "" + +#: ../../api/datasets.rst:71 +msgid "A wrapper of repeated dataset." +msgstr "" + +#: ../../api/datasets.rst:72 +msgid ":class:`~mmengine.dataset.ClassBalancedDataset`" +msgstr "" + +#: ../../api/datasets.rst:73 +msgid "A wrapper of class balanced dataset." +msgstr "" + +#: ../../api/engine.rst:7 ../../api/engine.rst:19 +msgid "mmcls.engine" +msgstr "" + +#: ../../api/engine.rst:9 +msgid "" +"This package includes some runtime components, including hooks, runners, optimizers and loops. These " +"components are useful in classification tasks but not supported by MMEngine yet." +msgstr "" +"该包中包含了一些运行时组件,如钩子(hook)、执行器(runner)、优化器(optimizer)和循环执行器(loop)。这些" +"组件在分类任务中需要用到,而还未被 MMEngine 支持。" + +#: ../../api/engine.rst:14 +msgid "Some components may be moved to MMEngine in the future." +msgstr "部分组件未来可能会被移动到 MMEngine 中。" + +#: ../../api/engine.rst:24 +msgid "Hooks" +msgstr "" + +#: ../../api/engine.rst:36::1 +msgid ":py:obj:`ClassNumCheckHook `" +msgstr "" + +#: ../../api/engine.rst:36::1 mmcls.engine.hooks.class_num_check_hook.ClassNumCheckHook:1 of +msgid "Class Number Check HOOK." +msgstr "" + +#: ../../api/engine.rst:36::1 +msgid ":py:obj:`PreciseBNHook `" +msgstr "" + +#: ../../api/engine.rst:36::1 mmcls.engine.hooks.precise_bn_hook.PreciseBNHook:1 of +msgid "Precise BN hook." +msgstr "" + +#: ../../api/engine.rst:36::1 +msgid ":py:obj:`VisualizationHook `" +msgstr "" + +#: ../../api/engine.rst:36::1 +msgid "Classification Visualization Hook." +msgstr "" + +#: ../../api/engine.rst:36::1 +msgid ":py:obj:`PrepareProtoBeforeValLoopHook `" +msgstr "" + +#: ../../api/engine.rst:36::1 mmcls.engine.hooks.retriever_hooks.PrepareProtoBeforeValLoopHook:1 +#: of +msgid "The hook to prepare the prototype in retrievers." +msgstr "" + +#: ../../api/engine.rst:36::1 +msgid ":py:obj:`SetAdaptiveMarginsHook `" +msgstr "" + +#: ../../api/engine.rst:36::1 mmcls.engine.hooks.margin_head_hooks.SetAdaptiveMarginsHook:1 of +msgid "Set adaptive-margins in ArcFaceClsHead based on the power of category-wise count." +msgstr "" + +#: ../../api/engine.rst:40 +msgid "Optimizers" +msgstr "" + +#: ../../api/engine.rst:47::1 +msgid ":py:obj:`Lamb `" +msgstr "" + +#: ../../api/engine.rst:47::1 mmcls.engine.optimizers.lamb.Lamb:1 of +msgid "A pure pytorch variant of FuseLAMB (NvLamb variant) optimizer." +msgstr "" + +#: ../../api/evaluation.rst:7 ../../api/evaluation.rst:14 +msgid "mmcls.evaluation" +msgstr "" + +#: ../../api/evaluation.rst:9 +msgid "This package includes metrics and evaluators for classification tasks." +msgstr "该包中包含了用于分类任务的一系列评测指标及评测器。" + +#: ../../api/evaluation.rst:17 +msgid "Single Label Metric" +msgstr "" + +#: ../../api/evaluation.rst:26::1 +msgid ":py:obj:`Accuracy `" +msgstr "" + +#: ../../api/evaluation.rst:26::1 mmcls.evaluation.metrics.single_label.Accuracy:1 of +msgid "Accuracy evaluation metric." +msgstr "" + +#: ../../api/evaluation.rst:26::1 +msgid ":py:obj:`SingleLabelMetric `" +msgstr "" + +#: ../../api/evaluation.rst:26::1 mmcls.evaluation.metrics.single_label.SingleLabelMetric:1 of +msgid "A collection of precision, recall, f1-score and support for single-label tasks." +msgstr "" + +#: ../../api/evaluation.rst:28 +msgid "Multi Label Metric" +msgstr "" + +#: ../../api/evaluation.rst:36::1 +msgid ":py:obj:`AveragePrecision `" +msgstr "" + +#: ../../api/evaluation.rst:36::1 mmcls.evaluation.metrics.multi_label.AveragePrecision:1 of +msgid "Calculate the average precision with respect of classes." +msgstr "" + +#: ../../api/evaluation.rst:36::1 +msgid ":py:obj:`MultiLabelMetric `" +msgstr "" + +#: ../../api/evaluation.rst:36::1 mmcls.evaluation.metrics.multi_label.MultiLabelMetric:1 of +msgid "A collection of precision, recall, f1-score and support for multi-label tasks." +msgstr "" + +#: ../../api/evaluation.rst:36::1 +msgid ":py:obj:`VOCAveragePrecision `" +msgstr "" + +#: ../../api/evaluation.rst:36::1 mmcls.evaluation.metrics.voc_multi_label.VOCAveragePrecision:1 +#: of +msgid "Calculate the average precision with respect of classes for VOC dataset." +msgstr "" + +#: ../../api/evaluation.rst:36::1 +msgid ":py:obj:`VOCMultiLabelMetric `" +msgstr "" + +#: ../../api/evaluation.rst:36::1 mmcls.evaluation.metrics.voc_multi_label.VOCMultiLabelMetric:1 +#: of +msgid "" +"A collection of metrics for multi-label multi-class classification task based on confusion matrix for VOC " +"dataset." +msgstr "" + +#: ../../api/generated/mmcls.apis.inference_model.rst:2 +msgid "mmcls.apis.inference\\_model" +msgstr "" + +#: mmcls.apis.inference.inference_model:3 of +msgid "The loaded classifier." +msgstr "" + +#: mmcls.apis.inference.inference_model:5 of +msgid "The image filename or loaded image." +msgstr "" + +#: mmcls.apis.inference.inference_model mmcls.apis.inference.init_model +#: mmcls.datasets.transforms.processing.Albumentations.albu_builder +#: mmcls.datasets.transforms.processing.Albumentations.mapper +#: mmcls.datasets.transforms.processing.Albumentations.transform +#: mmcls.datasets.transforms.processing.ColorJitter.transform +#: mmcls.datasets.transforms.processing.EfficientNetCenterCrop.transform +#: mmcls.datasets.transforms.processing.Lighting.transform +#: mmcls.datasets.transforms.processing.RandomCrop.transform +#: mmcls.datasets.transforms.processing.RandomErasing.transform +#: mmcls.datasets.transforms.processing.RandomResizedCrop.transform +#: mmcls.datasets.transforms.processing.ResizeEdge.transform +#: mmcls.evaluation.metrics.single_label.Accuracy.calculate +#: mmcls.evaluation.metrics.single_label.Accuracy.compute_metrics +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric.calculate +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric.compute_metrics +#: mmcls.models.backbones.regnet.RegNet.adjust_width_group +#: mmcls.models.backbones.regnet.RegNet.generate_regnet +#: mmcls.models.backbones.regnet.RegNet.get_stages_from_blocks +#: mmcls.models.backbones.regnet.RegNet.quantize_float +#: mmcls.models.classifiers.base.BaseClassifier.extract_feats +#: mmcls.models.classifiers.base.BaseClassifier.forward +#: mmcls.models.classifiers.hugging_face.HuggingFaceClassifier.loss +#: mmcls.models.classifiers.hugging_face.HuggingFaceClassifier.predict +#: mmcls.models.classifiers.image.ImageClassifier.extract_feat +#: mmcls.models.classifiers.image.ImageClassifier.forward mmcls.models.classifiers.image.ImageClassifier.loss +#: mmcls.models.classifiers.timm.TimmClassifier.loss mmcls.models.classifiers.timm.TimmClassifier.predict +#: mmcls.models.heads.cls_head.ClsHead.loss mmcls.models.heads.cls_head.ClsHead.predict +#: mmcls.models.heads.conformer_head.ConformerHead.predict +#: mmcls.models.heads.efficientformer_head.EfficientFormerClsHead.loss +#: mmcls.models.heads.margin_head.ArcFaceClsHead.loss +#: mmcls.models.heads.multi_label_cls_head.MultiLabelClsHead.loss +#: mmcls.models.heads.multi_label_cls_head.MultiLabelClsHead.predict +#: mmcls.models.losses.asymmetric_loss.AsymmetricLoss.forward mmcls.models.losses.focal_loss.FocalLoss.forward +#: mmcls.models.losses.label_smooth_loss.LabelSmoothLoss.forward +#: mmcls.models.losses.seesaw_loss.SeesawLoss.forward mmcls.models.utils.batch_augments.cutmix.CutMix.mix +#: mmcls.models.utils.batch_augments.mixup.Mixup.mix mmcls.models.utils.batch_augments.resizemix.ResizeMix.mix +#: mmcls.models.utils.channel_shuffle.channel_shuffle +#: mmcls.models.utils.data_preprocessor.ClsDataPreprocessor.forward +#: mmcls.models.utils.embed.PatchMerging.forward mmcls.models.utils.embed.resize_pos_embed +#: mmcls.models.utils.embed.resize_relative_position_bias_table +#: mmcls.models.utils.inverted_residual.InvertedResidual.forward +#: mmcls.models.utils.make_divisible.make_divisible of +msgid "返回" +msgstr "" + +#: mmcls.apis.inference.inference_model:8 of +msgid "The classification results that contains `class_name`, `pred_label` and `pred_score`." +msgstr "" + +#: mmcls.apis.inference.inference_model:10 of +msgid "The classification results that contains" +msgstr "" + +#: mmcls.apis.inference.inference_model:11 of +msgid "`class_name`, `pred_label` and `pred_score`." +msgstr "" + +#: mmcls.apis.inference.inference_model mmcls.apis.inference.init_model +#: mmcls.datasets.transforms.processing.Albumentations.albu_builder +#: mmcls.datasets.transforms.processing.Albumentations.mapper +#: mmcls.datasets.transforms.processing.Albumentations.transform +#: mmcls.datasets.transforms.processing.ColorJitter.transform +#: mmcls.datasets.transforms.processing.EfficientNetCenterCrop.transform +#: mmcls.datasets.transforms.processing.Lighting.transform +#: mmcls.datasets.transforms.processing.RandomCrop.transform +#: mmcls.datasets.transforms.processing.RandomErasing.transform +#: mmcls.datasets.transforms.processing.RandomResizedCrop.transform +#: mmcls.datasets.transforms.processing.ResizeEdge.transform +#: mmcls.evaluation.metrics.single_label.Accuracy.calculate +#: mmcls.evaluation.metrics.single_label.Accuracy.compute_metrics +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric.calculate +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric.compute_metrics +#: mmcls.models.backbones.regnet.RegNet.adjust_width_group +#: mmcls.models.backbones.regnet.RegNet.generate_regnet +#: mmcls.models.backbones.regnet.RegNet.get_stages_from_blocks +#: mmcls.models.backbones.regnet.RegNet.quantize_float +#: mmcls.models.classifiers.base.BaseClassifier.extract_feats +#: mmcls.models.classifiers.hugging_face.HuggingFaceClassifier.loss +#: mmcls.models.classifiers.hugging_face.HuggingFaceClassifier.predict +#: mmcls.models.classifiers.image.ImageClassifier.extract_feat +#: mmcls.models.classifiers.image.ImageClassifier.loss mmcls.models.classifiers.timm.TimmClassifier.loss +#: mmcls.models.classifiers.timm.TimmClassifier.predict mmcls.models.heads.cls_head.ClsHead.loss +#: mmcls.models.heads.cls_head.ClsHead.predict mmcls.models.heads.conformer_head.ConformerHead.predict +#: mmcls.models.heads.efficientformer_head.EfficientFormerClsHead.loss +#: mmcls.models.heads.margin_head.ArcFaceClsHead.loss +#: mmcls.models.heads.multi_label_cls_head.MultiLabelClsHead.loss +#: mmcls.models.heads.multi_label_cls_head.MultiLabelClsHead.predict +#: mmcls.models.losses.asymmetric_loss.AsymmetricLoss.forward mmcls.models.losses.focal_loss.FocalLoss.forward +#: mmcls.models.losses.label_smooth_loss.LabelSmoothLoss.forward +#: mmcls.models.losses.seesaw_loss.SeesawLoss.forward mmcls.models.utils.batch_augments.cutmix.CutMix.mix +#: mmcls.models.utils.batch_augments.mixup.Mixup.mix mmcls.models.utils.batch_augments.resizemix.ResizeMix.mix +#: mmcls.models.utils.channel_shuffle.channel_shuffle +#: mmcls.models.utils.data_preprocessor.ClsDataPreprocessor.forward +#: mmcls.models.utils.embed.PatchMerging.forward mmcls.models.utils.embed.resize_pos_embed +#: mmcls.models.utils.embed.resize_relative_position_bias_table +#: mmcls.models.utils.inverted_residual.InvertedResidual.forward +#: mmcls.models.utils.make_divisible.make_divisible of +msgid "返回类型" +msgstr "" + +#: ../../api/generated/mmcls.apis.init_model.rst:2 +msgid "mmcls.apis.init\\_model" +msgstr "" + +#: mmcls.apis.inference.init_model:3 of +msgid "Config file path or the config object." +msgstr "" + +#: mmcls.apis.inference.init_model:6 of +msgid "Checkpoint path. If left as None, the model will not load any weights." +msgstr "" + +#: mmcls.apis.inference.init_model:9 of +msgid "Options to override some settings in the used config." +msgstr "" + +#: mmcls.apis.inference.init_model:12 of +msgid "The constructed classifier." +msgstr "" + +#: ../../api/generated/mmcls.datasets.transforms.Albumentations.rst:7 +msgid "Albumentations" +msgstr "" + +#: mmcls.datasets.transforms.formatting.Collect:3 mmcls.datasets.transforms.formatting.PackClsInputs:3 +#: mmcls.datasets.transforms.formatting.ToNumpy:3 mmcls.datasets.transforms.formatting.ToPIL:3 +#: mmcls.datasets.transforms.formatting.Transpose:3 mmcls.datasets.transforms.processing.Albumentations:3 +#: mmcls.datasets.transforms.processing.ColorJitter:7 +#: mmcls.datasets.transforms.processing.EfficientNetCenterCrop:3 +#: mmcls.datasets.transforms.processing.EfficientNetRandomCrop:3 +#: mmcls.datasets.transforms.processing.Lighting:3 mmcls.datasets.transforms.processing.RandomCrop:3 +#: mmcls.datasets.transforms.processing.RandomErasing:3 +#: mmcls.datasets.transforms.processing.RandomResizedCrop:7 mmcls.datasets.transforms.processing.ResizeEdge:3 +#: of +msgid "**Required Keys:**" +msgstr "" + +#: mmcls.datasets.transforms.formatting.PackClsInputs:5 mmcls.datasets.transforms.formatting.ToPIL:5 +#: mmcls.datasets.transforms.formatting.ToPIL:9 mmcls.datasets.transforms.processing.Albumentations:5 +#: mmcls.datasets.transforms.processing.Albumentations:9 mmcls.datasets.transforms.processing.ColorJitter:9 +#: mmcls.datasets.transforms.processing.ColorJitter:13 +#: mmcls.datasets.transforms.processing.EfficientNetCenterCrop:5 +#: mmcls.datasets.transforms.processing.EfficientNetCenterCrop:9 +#: mmcls.datasets.transforms.processing.EfficientNetRandomCrop:5 +#: mmcls.datasets.transforms.processing.EfficientNetRandomCrop:9 +#: mmcls.datasets.transforms.processing.Lighting:5 mmcls.datasets.transforms.processing.Lighting:9 +#: mmcls.datasets.transforms.processing.RandomCrop:5 mmcls.datasets.transforms.processing.RandomCrop:9 +#: mmcls.datasets.transforms.processing.RandomErasing:5 mmcls.datasets.transforms.processing.RandomErasing:9 +#: mmcls.datasets.transforms.processing.RandomResizedCrop:9 +#: mmcls.datasets.transforms.processing.RandomResizedCrop:13 mmcls.datasets.transforms.processing.ResizeEdge:5 +#: mmcls.datasets.transforms.processing.ResizeEdge:9 of +msgid "img" +msgstr "" + +#: mmcls.datasets.transforms.formatting.ToNumpy:7 mmcls.datasets.transforms.formatting.ToPIL:7 +#: mmcls.datasets.transforms.formatting.Transpose:7 mmcls.datasets.transforms.processing.Albumentations:7 +#: mmcls.datasets.transforms.processing.ColorJitter:11 +#: mmcls.datasets.transforms.processing.EfficientNetCenterCrop:7 +#: mmcls.datasets.transforms.processing.EfficientNetRandomCrop:7 +#: mmcls.datasets.transforms.processing.Lighting:7 mmcls.datasets.transforms.processing.RandomCrop:7 +#: mmcls.datasets.transforms.processing.RandomErasing:7 +#: mmcls.datasets.transforms.processing.RandomResizedCrop:11 mmcls.datasets.transforms.processing.ResizeEdge:7 +#: of +msgid "**Modified Keys:**" +msgstr "" + +#: mmcls.datasets.transforms.processing.Albumentations:10 +#: mmcls.datasets.transforms.processing.EfficientNetCenterCrop:10 +#: mmcls.datasets.transforms.processing.EfficientNetRandomCrop:10 +#: mmcls.datasets.transforms.processing.RandomCrop:10 +#: mmcls.datasets.transforms.processing.RandomResizedCrop:14 +#: mmcls.datasets.transforms.processing.ResizeEdge:10 of +msgid "img_shape" +msgstr "" + +#: mmcls.datasets.transforms.processing.Albumentations:12 of +msgid "" +"Adds custom transformations from albumentations library. More details can be found in `Albumentations " +"`_. An example of ``transforms`` is as followed:" +msgstr "" + +#: mmcls.datasets.transforms.processing.Albumentations:42 of +msgid "List of albumentations transform configs." +msgstr "" + +#: mmcls.datasets.transforms.processing.Albumentations:44 of +msgid "" +"Mapping of mmcls to albumentations fields, in format {'input key':'albumentation-style key'}. Defaults to " +"None." +msgstr "" + +#: mmcls.datasets.transforms.processing.Albumentations:50 mmcls.models.backbones.cspnet.CSPDarkNet:30 +#: mmcls.models.backbones.cspnet.CSPNet:63 mmcls.models.backbones.cspnet.CSPResNeXt:28 +#: mmcls.models.backbones.cspnet.CSPResNet:28 mmcls.models.backbones.efficientformer.EfficientFormer:53 +#: mmcls.models.backbones.hrnet.HRNet:52 mmcls.models.backbones.inception_v3.InceptionV3:23 +#: mmcls.models.backbones.mobileone.MobileOne:48 mmcls.models.backbones.regnet.RegNet:45 +#: mmcls.models.backbones.res2net.Res2Net:56 mmcls.models.backbones.resnet.ResNet:54 +#: mmcls.models.backbones.seresnet.SEResNet:56 mmcls.models.heads.margin_head.ArcFaceClsHead:9 of +msgid "示例" +msgstr "" + +#: mmcls.datasets.transforms.processing.Albumentations.albu_builder:1 of +msgid "Import a module from albumentations." +msgstr "" + +#: mmcls.datasets.transforms.processing.Albumentations.albu_builder:3 of +msgid "" +"It inherits some of :func:`build_from_cfg` logic. :param cfg: Config dict. It should at least contain the " +"key \"type\". :type cfg: dict" +msgstr "" + +#: mmcls.datasets.transforms.processing.Albumentations.albu_builder:7 of +msgid "The constructed object." +msgstr "" + +#: mmcls.datasets.transforms.processing.Albumentations.mapper:1 of +msgid "Dictionary mapper." +msgstr "" + +#: mmcls.datasets.transforms.processing.Albumentations.mapper:3 of +msgid "" +"Renames keys according to keymap provided. :param d: old dict :type d: dict :param keymap: " +"{'old_key':'new_key'} :type keymap: dict" +msgstr "" + +#: mmcls.datasets.transforms.processing.Albumentations.mapper:9 of +msgid "new dict." +msgstr "" + +#: mmcls.datasets.transforms.processing.Albumentations.transform:1 of +msgid "Transform function to perform albumentations transforms." +msgstr "" + +#: mmcls.datasets.transforms.processing.Albumentations.transform:3 +#: mmcls.datasets.transforms.processing.ColorJitter.transform:3 +#: mmcls.datasets.transforms.processing.EfficientNetCenterCrop.transform:3 +#: mmcls.datasets.transforms.processing.Lighting.transform:3 +#: mmcls.datasets.transforms.processing.RandomCrop.transform:3 +#: mmcls.datasets.transforms.processing.RandomResizedCrop.transform:3 +#: mmcls.datasets.transforms.processing.ResizeEdge.transform:3 of +msgid "Result dict from loading pipeline." +msgstr "" + +#: mmcls.datasets.transforms.processing.Albumentations.transform:6 of +msgid "Transformed results, 'img' and 'img_shape' keys are updated in result dict." +msgstr "" + +#: mmcls.datasets.transforms.processing.Albumentations.transform:8 of +msgid "Transformed results, 'img' and 'img_shape' keys are" +msgstr "" + +#: mmcls.datasets.transforms.processing.Albumentations.transform:9 of +msgid "updated in result dict." +msgstr "" + +#: ../../api/generated/mmcls.datasets.transforms.AutoAugment.rst:7 +msgid "AutoAugment" +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.AutoAugment:3 of +msgid "" +"This data augmentation is proposed in `AutoAugment: Learning Augmentation Policies from Data `_." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.AutoAugment:6 of +msgid "" +"The policies of auto augmentation. If string, use preset policies collection like \"imagenet\". If list, " +"Each item is a sub policies, composed by several augmentation policy dicts. When AutoAugment is called, a " +"random sub policies in ``policies`` will be selected to augment images." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.AutoAugment:12 mmcls.datasets.transforms.auto_augment.RandAugment:38 +#: of +msgid "" +"Configs of hyperparameters. Hyperparameters will be used in policies that require these arguments if these " +"arguments are not set in policy dicts. Defaults to ``dict(pad_val=128)``." +msgstr "" + +#: ../../api/generated/mmcls.datasets.transforms.AutoContrast.rst:7 +msgid "AutoContrast" +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.AutoContrast:3 of +msgid "The probability for performing auto contrast therefore should be in range [0, 1]. Defaults to 0.5." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.AutoContrast:6 mmcls.datasets.transforms.auto_augment.Brightness:15 +#: mmcls.datasets.transforms.auto_augment.ColorTransform:15 mmcls.datasets.transforms.auto_augment.Cutout:15 +#: mmcls.datasets.transforms.auto_augment.Equalize:6 mmcls.datasets.transforms.auto_augment.Invert:6 +#: mmcls.datasets.transforms.auto_augment.Posterize:11 mmcls.datasets.transforms.auto_augment.Rotate:27 +#: mmcls.datasets.transforms.auto_augment.Sharpness:15 mmcls.datasets.transforms.auto_augment.Shear:23 +#: mmcls.datasets.transforms.auto_augment.Solarize:10 mmcls.datasets.transforms.auto_augment.SolarizeAdd:13 +#: mmcls.datasets.transforms.auto_augment.Translate:25 of +msgid "Other keyword arguments of :class:`BaseAugTransform`." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.AutoContrast.transform:1 +#: mmcls.datasets.transforms.auto_augment.Brightness.transform:1 +#: mmcls.datasets.transforms.auto_augment.ColorTransform.transform:1 +#: mmcls.datasets.transforms.auto_augment.Contrast.transform:1 +#: mmcls.datasets.transforms.auto_augment.Cutout.transform:1 +#: mmcls.datasets.transforms.auto_augment.Equalize.transform:1 +#: mmcls.datasets.transforms.auto_augment.Invert.transform:1 +#: mmcls.datasets.transforms.auto_augment.Posterize.transform:1 +#: mmcls.datasets.transforms.auto_augment.Rotate.transform:1 +#: mmcls.datasets.transforms.auto_augment.Sharpness.transform:1 +#: mmcls.datasets.transforms.auto_augment.Shear.transform:1 +#: mmcls.datasets.transforms.auto_augment.Solarize.transform:1 +#: mmcls.datasets.transforms.auto_augment.SolarizeAdd.transform:1 +#: mmcls.datasets.transforms.auto_augment.Translate.transform:1 of +msgid "Apply transform to results." +msgstr "" + +#: ../../api/generated/mmcls.datasets.transforms.BaseAugTransform.rst:7 +msgid "BaseAugTransform" +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.BaseAugTransform:3 of +msgid "" +"This class provides several common attributions and methods to support the magnitude level mapping and " +"magnitude level randomness in :class:`RandAugment`." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.BaseAugTransform:7 of +msgid "Magnitude level." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.BaseAugTransform:9 of +msgid "" +"For augmentation have magnitude argument, maybe \"magnitude\", \"angle\" or other, you can specify the " +"magnitude level mapping range to generate the magnitude argument. For example, assume ``total_level`` is " +"10, ``magnitude_level=3`` specify magnitude is 3 if ``magnitude_range=(0, 10)`` while specify magnitude is " +"7 if ``magnitude_range=(10, 0)``. Defaults to None." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.BaseAugTransform:17 of +msgid "" +"Deviation of magnitude noise applied. - If positive number, the magnitude obeys normal distribution :" +"math:`\\mathcal{N}(magnitude, magnitude_std)`. - If 0 or negative number, magnitude remains unchanged. - If " +"str \"inf\", the magnitude obeys uniform distribution :math:`Uniform(min, magnitude)`. Defaults to 0." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.BaseAugTransform:17 +#: mmcls.datasets.transforms.auto_augment.RandAugment:27 of +msgid "Deviation of magnitude noise applied." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.BaseAugTransform:19 of +msgid "" +"If positive number, the magnitude obeys normal distribution :math:`\\mathcal{N}(magnitude, magnitude_std)`." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.BaseAugTransform:21 +#: mmcls.datasets.transforms.auto_augment.RandAugment:31 of +msgid "If 0 or negative number, magnitude remains unchanged." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.BaseAugTransform:22 +#: mmcls.datasets.transforms.auto_augment.RandAugment:32 of +msgid "If str \"inf\", the magnitude obeys uniform distribution :math:`Uniform(min, magnitude)`." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.BaseAugTransform:25 of +msgid "Defaults to 0." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.BaseAugTransform:27 +#: mmcls.datasets.transforms.auto_augment.RandAugment:35 of +msgid "Total level for the magnitude. Defaults to 10." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.BaseAugTransform:30 of +msgid "The probability for performing transformation therefore should be in range [0, 1]. Defaults to 0.5." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.BaseAugTransform:33 of +msgid "The probability that turns the magnitude negative, which should be in range [0,1]. Defaults to 0." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.BaseAugTransform.extra_repr:1 of +msgid "Extra repr string when auto-generating magnitude is enabled." +msgstr "" + +#: ../../api/generated/mmcls.datasets.transforms.Brightness.rst:7 +msgid "Brightness" +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.Brightness:3 of +msgid "" +"The magnitude used for adjusting brightness. A positive magnitude would enhance the brightness and a " +"negative magnitude would make the image darker. A magnitude=0 gives the origin img. If None, generate from " +"``magnitude_range``, see :class:`BaseAugTransform`. Defaults to None." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.Brightness:9 of +msgid "" +"The probability for performing brightness adjusting therefore should be in range [0, 1]. Defaults to 0.5." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.Brightness:12 +#: mmcls.datasets.transforms.auto_augment.ColorTransform:12 mmcls.datasets.transforms.auto_augment.Contrast:12 +#: mmcls.datasets.transforms.auto_augment.Sharpness:12 mmcls.datasets.transforms.auto_augment.Shear:17 +#: mmcls.datasets.transforms.auto_augment.Translate:19 of +msgid "The probability that turns the magnitude negative, which should be in range [0,1]. Defaults to 0.5." +msgstr "" + +#: ../../api/generated/mmcls.datasets.transforms.Collect.rst:7 +msgid "Collect" +msgstr "" + +#: mmcls.datasets.transforms.formatting.Collect:5 mmcls.datasets.transforms.formatting.Transpose:5 +#: mmcls.datasets.transforms.formatting.Transpose:9 of +msgid "``*keys``" +msgstr "" + +#: mmcls.datasets.transforms.formatting.Collect:7 mmcls.datasets.transforms.formatting.PackClsInputs:9 of +msgid "**Deleted Keys:**" +msgstr "" + +#: mmcls.datasets.transforms.formatting.Collect:9 of +msgid "All keys except those in the argument ``*keys``." +msgstr "" + +#: mmcls.datasets.transforms.formatting.Collect:11 of +msgid "The keys of the fields to be collected." +msgstr "" + +#: ../../api/generated/mmcls.datasets.transforms.ColorJitter.rst:7 +msgid "ColorJitter" +msgstr "" + +#: mmcls.datasets.transforms.processing.ColorJitter:3 of +msgid "" +"Modified from https://github.com/pytorch/vision/blob/main/torchvision/transforms/transforms.py Licensed " +"under the BSD 3-Clause License." +msgstr "" + +#: mmcls.datasets.transforms.processing.ColorJitter:15 of +msgid "" +"How much to jitter brightness. brightness_factor is chosen uniformly from ``[max(0, 1 - brightness), 1 + " +"brightness]`` or the given ``[min, max]``. Should be non negative numbers. Defaults to 0." +msgstr "" + +#: mmcls.datasets.transforms.processing.ColorJitter:20 of +msgid "" +"How much to jitter contrast. contrast_factor is chosen uniformly from ``[max(0, 1 - contrast), 1 + " +"contrast]`` or the given ``[min, max]``. Should be non negative numbers. Defaults to 0." +msgstr "" + +#: mmcls.datasets.transforms.processing.ColorJitter:25 of +msgid "" +"How much to jitter saturation. saturation_factor is chosen uniformly from ``[max(0, 1 - saturation), 1 + " +"saturation]`` or the given ``[min, max]``. Should be non negative numbers. Defaults to 0." +msgstr "" + +#: mmcls.datasets.transforms.processing.ColorJitter:30 of +msgid "" +"How much to jitter hue. hue_factor is chosen uniformly from ``[-hue, hue]`` (0 <= hue <= 0.5) or the given " +"``[min, max]`` (-0.5 <= min <= max <= 0.5). Defaults to 0." +msgstr "" + +#: mmcls.datasets.transforms.processing.ColorJitter.transform:1 +#: mmcls.datasets.transforms.processing.Lighting.transform:1 +#: mmcls.datasets.transforms.processing.ResizeEdge.transform:1 of +msgid "Transform function to resize images." +msgstr "" + +#: mmcls.datasets.transforms.processing.ColorJitter.transform:6 of +msgid "ColorJitter results, 'img' key is updated in result dict." +msgstr "" + +#: ../../api/generated/mmcls.datasets.transforms.ColorTransform.rst:7 +msgid "ColorTransform" +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.ColorTransform:3 of +msgid "" +"The magnitude used for color transform. A positive magnitude would enhance the color and a negative " +"magnitude would make the image grayer. A magnitude=0 gives the origin img. If None, generate from " +"``magnitude_range``, see :class:`BaseAugTransform`. Defaults to None." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.ColorTransform:9 of +msgid "The probability for performing ColorTransform therefore should be in range [0, 1]. Defaults to 0.5." +msgstr "" + +#: ../../api/generated/mmcls.datasets.transforms.Contrast.rst:7 +msgid "Contrast" +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.Contrast:3 of +msgid "" +"The magnitude used for adjusting contrast. A positive magnitude would enhance the contrast and a negative " +"magnitude would make the image grayer. A magnitude=0 gives the origin img. If None, generate from " +"``magnitude_range``, see :class:`BaseAugTransform`. Defaults to None." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.Contrast:9 of +msgid "" +"The probability for performing contrast adjusting therefore should be in range [0, 1]. Defaults to 0.5." +msgstr "" + +#: ../../api/generated/mmcls.datasets.transforms.Cutout.rst:7 +msgid "Cutout" +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.Cutout:3 of +msgid "" +"Expected cutout shape (h, w). If given as a single value, the value will be used for both h and w. If None, " +"generate from ``magnitude_range``, see :class:`BaseAugTransform`. Defaults to None." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.Cutout:8 of +msgid "" +"Pixel pad_val value for constant fill. If it is a sequence, it must have the same length with the image " +"channels. Defaults to 128." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.Cutout:12 of +msgid "The probability for performing cutout therefore should be in range [0, 1]. Defaults to 0.5." +msgstr "" + +#: ../../api/generated/mmcls.datasets.transforms.EfficientNetCenterCrop.rst:7 +msgid "EfficientNetCenterCrop" +msgstr "" + +#: mmcls.datasets.transforms.processing.EfficientNetCenterCrop:12 of +msgid "Expected size after cropping with the format of (h, w)." +msgstr "" + +#: mmcls.datasets.transforms.processing.EfficientNetCenterCrop:15 +#: mmcls.datasets.transforms.processing.EfficientNetRandomCrop:18 of +msgid "The crop padding parameter in efficientnet style center crop. Defaults to 32." +msgstr "" + +#: mmcls.datasets.transforms.processing.EfficientNetCenterCrop:18 of +msgid "" +"Interpolation method, accepted values are 'nearest', 'bilinear', 'bicubic', 'area', 'lanczos'. Only valid " +"if ``efficientnet_style`` is True. Defaults to 'bicubic'." +msgstr "" + +#: mmcls.datasets.transforms.processing.EfficientNetCenterCrop:22 of +msgid "" +"The image resize backend type, accepted values are `cv2` and `pillow`. Only valid if efficientnet style is " +"True. Defaults to `cv2`." +msgstr "" + +#: mmcls.datasets.transforms.processing.EfficientNetCenterCrop:28 +#: mmcls.models.heads.multi_label_cls_head.MultiLabelClsHead:17 +#: mmcls.models.heads.multi_label_linear_head.MultiLabelLinearClsHead:17 +#: mmcls.models.losses.label_smooth_loss.LabelSmoothLoss:30 of +msgid "提示" +msgstr "" + +#: mmcls.datasets.transforms.processing.EfficientNetCenterCrop:29 of +msgid "If the image is smaller than the crop size, return the original image." +msgstr "" + +#: mmcls.datasets.transforms.processing.EfficientNetCenterCrop:31 of +msgid "The pipeline will be to first to perform the center crop with the ``crop_size_`` as:" +msgstr "" + +#: mmcls.datasets.transforms.processing.EfficientNetCenterCrop:34 of +msgid "" +"\\text{crop_size_} = \\frac{\\text{crop_size}}{\\text{crop_size} +\n" +"\\text{crop_padding}} \\times \\text{short_edge}" +msgstr "" + +#: mmcls.datasets.transforms.processing.EfficientNetCenterCrop:39 of +msgid "And then the pipeline resizes the img to the input crop size." +msgstr "" + +#: mmcls.datasets.transforms.processing.EfficientNetCenterCrop.transform:1 +#: mmcls.datasets.transforms.processing.RandomResizedCrop.transform:1 of +msgid "Transform function to randomly resized crop images." +msgstr "" + +#: mmcls.datasets.transforms.processing.EfficientNetCenterCrop.transform:6 of +msgid "" +"EfficientNet style center cropped results, 'img_shape' key in result dict is updated according to crop " +"size." +msgstr "" + +#: mmcls.datasets.transforms.processing.EfficientNetCenterCrop.transform:8 of +msgid "EfficientNet style center cropped results, 'img_shape'" +msgstr "" + +#: mmcls.datasets.transforms.processing.EfficientNetCenterCrop.transform:9 +#: mmcls.datasets.transforms.processing.RandomCrop.transform:9 +#: mmcls.datasets.transforms.processing.RandomResizedCrop.transform:9 of +msgid "key in result dict is updated according to crop size." +msgstr "" + +#: ../../api/generated/mmcls.datasets.transforms.EfficientNetRandomCrop.rst:7 +msgid "EfficientNetRandomCrop" +msgstr "" + +#: mmcls.datasets.transforms.processing.EfficientNetRandomCrop:12 of +msgid "Desired output scale of the crop. Only int size is accepted, a square crop (size, size) is made." +msgstr "" + +#: mmcls.datasets.transforms.processing.EfficientNetRandomCrop:15 of +msgid "Minimum ratio of the cropped area to the original area. Defaults to 0.1." +msgstr "" + +#: mmcls.datasets.transforms.processing.EfficientNetRandomCrop:21 +#: mmcls.datasets.transforms.processing.RandomResizedCrop:20 of +msgid "Range of the random size of the cropped image compared to the original image. Defaults to (0.08, 1.0)." +msgstr "" + +#: mmcls.datasets.transforms.processing.EfficientNetRandomCrop:24 +#: mmcls.datasets.transforms.processing.RandomResizedCrop:23 of +msgid "" +"Range of the random aspect ratio of the cropped image compared to the original image. Defaults to (3. / 4., " +"4. / 3.)." +msgstr "" + +#: mmcls.datasets.transforms.processing.EfficientNetRandomCrop:28 +#: mmcls.datasets.transforms.processing.RandomResizedCrop:27 of +msgid "Maximum number of attempts before falling back to Central Crop. Defaults to 10." +msgstr "" + +#: mmcls.datasets.transforms.processing.EfficientNetRandomCrop:31 of +msgid "" +"Interpolation method, accepted values are 'nearest', 'bilinear', 'bicubic', 'area', 'lanczos'. Defaults to " +"'bicubic'." +msgstr "" + +#: mmcls.datasets.transforms.processing.EfficientNetRandomCrop:35 +#: mmcls.datasets.transforms.processing.RandomResizedCrop:34 of +msgid "The image resize backend type, accepted values are 'cv2' and 'pillow'. Defaults to 'cv2'." +msgstr "" + +#: ../../api/generated/mmcls.datasets.transforms.Equalize.rst:7 +msgid "Equalize" +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.Equalize:3 of +msgid "The probability for performing equalize therefore should be in range [0, 1]. Defaults to 0.5." +msgstr "" + +#: ../../api/generated/mmcls.datasets.transforms.Invert.rst:7 +msgid "Invert" +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.Invert:3 of +msgid "The probability for performing invert therefore should be in range [0, 1]. Defaults to 0.5." +msgstr "" + +#: ../../api/generated/mmcls.datasets.transforms.Lighting.rst:7 +msgid "Lighting" +msgstr "" + +#: mmcls.datasets.transforms.processing.Lighting:11 of +msgid "the eigenvalue of the convariance matrix of pixel values, respectively." +msgstr "" + +#: mmcls.datasets.transforms.processing.Lighting:14 of +msgid "the eigenvector of the convariance matrix of pixel values, respectively." +msgstr "" + +#: mmcls.datasets.transforms.processing.Lighting:17 of +msgid "The standard deviation for distribution of alpha. Defaults to 0.1." +msgstr "" + +#: mmcls.datasets.transforms.processing.Lighting:20 of +msgid "Whether to convert img to rgb. Defaults to False." +msgstr "" + +#: mmcls.datasets.transforms.processing.Lighting.transform:6 of +msgid "Lightinged results, 'img' key is updated in result dict." +msgstr "" + +#: ../../api/generated/mmcls.datasets.transforms.PackClsInputs.rst:7 +msgid "PackClsInputs" +msgstr "" + +#: mmcls.datasets.transforms.formatting.PackClsInputs:6 of +msgid "gt_label (optional)" +msgstr "" + +#: mmcls.datasets.transforms.formatting.PackClsInputs:7 of +msgid "``*meta_keys`` (optional)" +msgstr "" + +#: mmcls.datasets.transforms.formatting.PackClsInputs:11 of +msgid "All keys in the dict." +msgstr "" + +#: mmcls.datasets.transforms.formatting.PackClsInputs:13 mmcls.datasets.transforms.processing.ResizeEdge:12 of +msgid "**Added Keys:**" +msgstr "" + +#: mmcls.datasets.transforms.formatting.PackClsInputs:15 of +msgid "inputs (:obj:`torch.Tensor`): The forward data of models." +msgstr "" + +#: mmcls.datasets.transforms.formatting.PackClsInputs:16 of +msgid "data_samples (:obj:`~mmcls.structures.ClsDataSample`): The annotation info of the sample." +msgstr "" + +#: mmcls.datasets.transforms.formatting.PackClsInputs:19 of +msgid "" +"The meta keys to be saved in the ``metainfo`` of the packed ``data_samples``. Defaults to a tuple includes " +"keys: - ``sample_idx``: The id of the image sample. - ``img_path``: The path to the image file. - " +"``ori_shape``: The original shape of the image as a tuple (H, W). - ``img_shape``: The shape of the image " +"after the pipeline as a tuple (H, W). - ``scale_factor``: The scale factor between the resized image " +"and the original image. - ``flip``: A boolean indicating if image flip transform was used. - " +"``flip_direction``: The flipping direction." +msgstr "" + +#: mmcls.datasets.transforms.formatting.PackClsInputs:19 of +msgid "" +"The meta keys to be saved in the ``metainfo`` of the packed ``data_samples``. Defaults to a tuple includes " +"keys:" +msgstr "" + +#: mmcls.datasets.transforms.formatting.PackClsInputs:23 of +msgid "``sample_idx``: The id of the image sample." +msgstr "" + +#: mmcls.datasets.transforms.formatting.PackClsInputs:24 of +msgid "``img_path``: The path to the image file." +msgstr "" + +#: mmcls.datasets.transforms.formatting.PackClsInputs:25 of +msgid "``ori_shape``: The original shape of the image as a tuple (H, W)." +msgstr "" + +#: mmcls.datasets.transforms.formatting.PackClsInputs:26 of +msgid "``img_shape``: The shape of the image after the pipeline as a tuple (H, W)." +msgstr "" + +#: mmcls.datasets.transforms.formatting.PackClsInputs:28 of +msgid "``scale_factor``: The scale factor between the resized image and the original image." +msgstr "" + +#: mmcls.datasets.transforms.formatting.PackClsInputs:30 of +msgid "``flip``: A boolean indicating if image flip transform was used." +msgstr "" + +#: mmcls.datasets.transforms.formatting.PackClsInputs:31 of +msgid "``flip_direction``: The flipping direction." +msgstr "" + +#: mmcls.datasets.transforms.formatting.PackClsInputs.transform:1 of +msgid "Method to pack the input data." +msgstr "" + +#: ../../api/generated/mmcls.datasets.transforms.Posterize.rst:7 +msgid "Posterize" +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.Posterize:3 of +msgid "" +"Number of bits for each pixel in the output img, which should be less or equal to 8. If None, generate from " +"``magnitude_range``, see :class:`BaseAugTransform`. Defaults to None." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.Posterize:8 of +msgid "The probability for posterizing therefore should be in range [0, 1]. Defaults to 0.5." +msgstr "" + +#: ../../api/generated/mmcls.datasets.transforms.RandAugment.rst:7 +msgid "RandAugment" +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.RandAugment:3 of +msgid "" +"This data augmentation is proposed in `RandAugment: Practical automated data augmentation with a reduced " +"search space `_." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.RandAugment:7 of +msgid "" +"The policies of random augmentation. If string, use preset policies collection like \"timm_increasing\". If " +"list, each item is one specific augmentation policy dict. The policy dict shall should have these keys: - " +"``type`` (str), The type of augmentation. - ``magnitude_range`` (Sequence[number], optional): For those " +"augmentation have magnitude, you need to specify the magnitude level mapping range. For example, assume " +"``total_level`` is 10, ``magnitude_level=3`` specify magnitude is 3 if ``magnitude_range=(0, 10)`` " +"while specify magnitude is 7 if ``magnitude_range=(10, 0)``. - other keyword arguments of the " +"augmentation." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.RandAugment:7 of +msgid "" +"The policies of random augmentation. If string, use preset policies collection like \"timm_increasing\". If " +"list, each item is one specific augmentation policy dict. The policy dict shall should have these keys:" +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.RandAugment:12 of +msgid "``type`` (str), The type of augmentation." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.RandAugment:13 of +msgid "" +"``magnitude_range`` (Sequence[number], optional): For those augmentation have magnitude, you need to " +"specify the magnitude level mapping range. For example, assume ``total_level`` is 10, ``magnitude_level=3`` " +"specify magnitude is 3 if ``magnitude_range=(0, 10)`` while specify magnitude is 7 if " +"``magnitude_range=(10, 0)``." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.RandAugment:19 of +msgid "other keyword arguments of the augmentation." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.RandAugment:21 of +msgid "Number of policies to select from policies each time." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.RandAugment:24 of +msgid "Magnitude level for all the augmentation selected." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.RandAugment:27 of +msgid "" +"Deviation of magnitude noise applied. - If positive number, the magnitude obeys normal distribution :" +"math:`\\mathcal{N}(magnitude_level, magnitude_std)`. - If 0 or negative number, magnitude remains " +"unchanged. - If str \"inf\", the magnitude obeys uniform distribution :math:`Uniform(min, magnitude)`." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.RandAugment:29 of +msgid "" +"If positive number, the magnitude obeys normal distribution :math:`\\mathcal{N}(magnitude_level, " +"magnitude_std)`." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.RandAugment:45 of +msgid "" +"To use \"timm-increasing\" policies collection, select two policies every time, and magnitude_level of " +"every policy is 6 (total is 10 by default)" +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.RandAugment:60 of +msgid "" +"If you want the ``magnitude_level`` randomly changes every time, you can use ``magnitude_std`` to specify " +"the random distribution. For example, a normal distribution :math:`\\mathcal{N}(6, 0.5)`." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.RandAugment:71 of +msgid "You can also use your own policies:" +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.RandAugment:86 of +msgid "" +"``magnitude_std`` will introduce some randomness to policy, modified by https://github.com/rwightman/" +"pytorch-image-models." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.RandAugment:89 of +msgid "When magnitude_std=0, we calculate the magnitude as follows:" +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.RandAugment:91 of +msgid "" +"\\text{magnitude} = \\frac{\\text{magnitude_level}}\n" +"{\\text{totallevel}} \\times (\\text{val2} - \\text{val1})\n" +"+ \\text{val1}\n" +"\n" +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.RandAugment.transform:1 of +msgid "Randomly choose a sub-policy to apply." +msgstr "" + +#: ../../api/generated/mmcls.datasets.transforms.RandomCrop.rst:7 +msgid "RandomCrop" +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomCrop:12 of +msgid "" +"Desired output size of the crop. If crop_size is an int instead of sequence like (h, w), a square crop " +"(crop_size, crop_size) is made." +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomCrop:16 of +msgid "" +"Optional padding on each border of the image. If a sequence of length 4 is provided, it is used to pad " +"left, top, right, bottom borders respectively. If a sequence of length 2 is provided, it is used to pad " +"left/right, top/bottom borders, respectively. Default: None, which means no padding." +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomCrop:22 of +msgid "" +"It will pad the image if smaller than the desired size to avoid raising an exception. Since cropping is " +"done after padding, the padding seems to be done at a random offset. Default: False." +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomCrop:27 of +msgid "" +"Pixel pad_val value for constant fill. If a tuple of length 3, it is used to pad_val R, G, B channels " +"respectively. Default: 0." +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomCrop:31 of +msgid "" +"Type of padding. Defaults to \"constant\". Should be one of the following: - ``constant``: Pads with a " +"constant value, this value is specified with pad_val. - ``edge``: pads with the last value at the edge of " +"the image. - ``reflect``: Pads with reflection of image without repeating the last value on the edge. For " +"example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode will result in [3, 2, 1, 2, " +"3, 4, 3, 2]. - ``symmetric``: Pads with reflection of image repeating the last value on the edge. For " +"example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode will result in [2, 1, 1, " +"2, 3, 4, 4, 3]." +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomCrop:31 of +msgid "Type of padding. Defaults to \"constant\". Should be one of the following:" +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomCrop:34 of +msgid "``constant``: Pads with a constant value, this value is specified with pad_val." +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomCrop:36 of +msgid "``edge``: pads with the last value at the edge of the image." +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomCrop:37 of +msgid "" +"``reflect``: Pads with reflection of image without repeating the last value on the edge. For example, " +"padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode will result in [3, 2, 1, 2, 3, 4, 3, 2]." +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomCrop:41 of +msgid "" +"``symmetric``: Pads with reflection of image repeating the last value on the edge. For example, padding [1, " +"2, 3, 4] with 2 elements on both sides in symmetric mode will result in [2, 1, 1, 2, 3, 4, 4, 3]." +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomCrop.transform:1 of +msgid "Transform function to randomly crop images." +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomCrop.transform:6 of +msgid "Randomly cropped results, 'img_shape' key in result dict is updated according to crop size." +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomCrop.transform:8 of +msgid "Randomly cropped results, 'img_shape'" +msgstr "" + +#: ../../api/generated/mmcls.datasets.transforms.RandomErasing.rst:7 +msgid "RandomErasing" +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomErasing:11 of +msgid "Probability that image will be randomly erased. Default: 0.5" +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomErasing:14 of +msgid "Minimum erased area / input image area Default: 0.02" +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomErasing:17 of +msgid "Maximum erased area / input image area Default: 0.4" +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomErasing:20 of +msgid "" +"Aspect ratio range of erased area. if float, it will be converted to (aspect_ratio, 1/aspect_ratio) " +"Default: (3/10, 10/3)" +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomErasing:24 of +msgid "" +"Fill method in erased area, can be: - const (default): All pixels are assign with the same value. - rand: " +"each pixel is assigned with a random value in [0, 255]" +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomErasing:24 of +msgid "Fill method in erased area, can be:" +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomErasing:26 of +msgid "const (default): All pixels are assign with the same value." +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomErasing:27 of +msgid "rand: each pixel is assigned with a random value in [0, 255]" +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomErasing:29 of +msgid "Base color filled in erased area. Defaults to (128, 128, 128)." +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomErasing:32 of +msgid "" +"If set and ``mode`` is 'rand', fill erased area with random color from normal distribution " +"(mean=fill_color, std=fill_std); If not set, fill erased area with random color from uniform distribution " +"(0~255). Defaults to None." +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomErasing:40 of +msgid "See `Random Erasing Data Augmentation `_" +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomErasing:43 of +msgid "" +"This paper provided 4 modes: RE-R, RE-M, RE-0, RE-255, and use RE-M as default. The config of these 4 modes " +"are:" +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomErasing:46 of +msgid "RE-R: RandomErasing(mode='rand')" +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomErasing:47 of +msgid "RE-M: RandomErasing(mode='const', fill_color=(123.67, 116.3, 103.5))" +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomErasing:48 of +msgid "RE-0: RandomErasing(mode='const', fill_color=0)" +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomErasing:49 of +msgid "RE-255: RandomErasing(mode='const', fill_color=255)" +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomErasing.transform:1 of +msgid "Results dict from pipeline" +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomErasing.transform:4 of +msgid "Results after the transformation." +msgstr "" + +#: ../../api/generated/mmcls.datasets.transforms.RandomResizedCrop.rst:7 +msgid "RandomResizedCrop" +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomResizedCrop:3 of +msgid "" +"A crop of random size (default: of 0.08 to 1.0) of the original size and a random aspect ratio (default: of " +"3/4 to 4/3) of the original aspect ratio is made. This crop is finally resized to given size." +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomResizedCrop:16 of +msgid "" +"Desired output scale of the crop. If size is an int instead of sequence like (h, w), a square crop (size, " +"size) is made." +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomResizedCrop:30 of +msgid "" +"Interpolation method, accepted values are 'nearest', 'bilinear', 'bicubic', 'area', 'lanczos'. Defaults to " +"'bilinear'." +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomResizedCrop.transform:6 of +msgid "" +"Randomly resized cropped results, 'img_shape' key in result dict is updated according to crop size." +msgstr "" + +#: mmcls.datasets.transforms.processing.RandomResizedCrop.transform:8 of +msgid "Randomly resized cropped results, 'img_shape'" +msgstr "" + +#: ../../api/generated/mmcls.datasets.transforms.ResizeEdge.rst:7 +msgid "ResizeEdge" +msgstr "" + +#: mmcls.datasets.transforms.processing.ResizeEdge:14 of +msgid "scale" +msgstr "" + +#: mmcls.datasets.transforms.processing.ResizeEdge:15 of +msgid "scale_factor" +msgstr "" + +#: mmcls.datasets.transforms.processing.ResizeEdge:17 of +msgid "The edge scale to resizing." +msgstr "" + +#: mmcls.datasets.transforms.processing.ResizeEdge:19 of +msgid "The edge to resize. Defaults to 'short'." +msgstr "" + +#: mmcls.datasets.transforms.processing.ResizeEdge:21 of +msgid "" +"Image resize backend, choices are 'cv2' and 'pillow'. These two backends generates slightly different " +"results. Defaults to 'cv2'." +msgstr "" + +#: mmcls.datasets.transforms.processing.ResizeEdge:25 of +msgid "" +"Interpolation method, accepted values are \"nearest\", \"bilinear\", \"bicubic\", \"area\", \"lanczos\" for " +"'cv2' backend, \"nearest\", \"bilinear\" for 'pillow' backend. Defaults to 'bilinear'." +msgstr "" + +#: mmcls.datasets.transforms.processing.ResizeEdge.transform:6 of +msgid "Resized results, 'img', 'scale', 'scale_factor', 'img_shape' keys are updated in result dict." +msgstr "" + +#: ../../api/generated/mmcls.datasets.transforms.Rotate.rst:7 +msgid "Rotate" +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.Rotate:3 of +msgid "" +"The angle used for rotate. Positive values stand for clockwise rotation. If None, generate from " +"``magnitude_range``, see :class:`BaseAugTransform`. Defaults to None." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.Rotate:8 of +msgid "" +"Center point (w, h) of the rotation in the source image. If None, the center of the image will be used. " +"Defaults to None." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.Rotate:12 of +msgid "Isotropic scale factor. Defaults to 1.0." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.Rotate:14 mmcls.datasets.transforms.auto_augment.Shear:7 +#: mmcls.datasets.transforms.auto_augment.Translate:9 of +msgid "" +"Pixel pad_val value for constant fill. If a sequence of length 3, it is used to pad_val R, G, B channels " +"respectively. Defaults to 128." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.Rotate:18 of +msgid "The probability for performing rotate therefore should be in range [0, 1]. Defaults to 0.5." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.Rotate:21 of +msgid "The probability that turns the angle negative, which should be in range [0,1]. Defaults to 0.5." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.Rotate:24 mmcls.datasets.transforms.auto_augment.Translate:22 of +msgid "" +"Interpolation method. Options are 'nearest', 'bilinear', 'bicubic', 'area', 'lanczos'. Defaults to " +"'nearest'." +msgstr "" + +#: ../../api/generated/mmcls.datasets.transforms.Sharpness.rst:7 +msgid "Sharpness" +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.Sharpness:3 of +msgid "" +"The magnitude used for adjusting sharpness. A positive magnitude would enhance the sharpness and a negative " +"magnitude would make the image bulr. A magnitude=0 gives the origin img. If None, generate from " +"``magnitude_range``, see :class:`BaseAugTransform`. Defaults to None." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.Sharpness:9 of +msgid "" +"The probability for performing sharpness adjusting therefore should be in range [0, 1]. Defaults to 0.5." +msgstr "" + +#: ../../api/generated/mmcls.datasets.transforms.Shear.rst:7 +msgid "Shear" +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.Shear:3 of +msgid "" +"The magnitude used for shear. If None, generate from ``magnitude_range``, see :class:`BaseAugTransform`. " +"Defaults to None." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.Shear:11 of +msgid "The probability for performing shear therefore should be in range [0, 1]. Defaults to 0.5." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.Shear:14 of +msgid "The shearing direction. Options are 'horizontal' and 'vertical'. Defaults to 'horizontal'." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.Shear:20 of +msgid "" +"Interpolation method. Options are 'nearest', 'bilinear', 'bicubic', 'area', 'lanczos'. Defaults to " +"'bicubic'." +msgstr "" + +#: ../../api/generated/mmcls.datasets.transforms.Solarize.rst:7 +msgid "Solarize" +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.Solarize:3 of +msgid "" +"The threshold above which the pixels value will be inverted. If None, generate from ``magnitude_range``, " +"see :class:`BaseAugTransform`. Defaults to None." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.Solarize:7 mmcls.datasets.transforms.auto_augment.SolarizeAdd:10 of +msgid "The probability for solarizing therefore should be in range [0, 1]. Defaults to 0.5." +msgstr "" + +#: ../../api/generated/mmcls.datasets.transforms.SolarizeAdd.rst:7 +msgid "SolarizeAdd" +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.SolarizeAdd:3 of +msgid "" +"The value to be added to pixels below the thr. If None, generate from ``magnitude_range``, see :class:" +"`BaseAugTransform`. Defaults to None." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.SolarizeAdd:7 of +msgid "The threshold below which the pixels value will be adjusted." +msgstr "" + +#: ../../api/generated/mmcls.datasets.transforms.ToNumpy.rst:7 +msgid "ToNumpy" +msgstr "" + +#: mmcls.datasets.transforms.formatting.ToNumpy:5 mmcls.datasets.transforms.formatting.ToNumpy:9 of +msgid "``*keys**``" +msgstr "" + +#: mmcls.datasets.transforms.formatting.ToNumpy:11 of +msgid "The dtype of the converted numpy array. Defaults to None." +msgstr "" + +#: mmcls.datasets.transforms.formatting.ToNumpy.transform:1 of +msgid "Method to convert object to :obj:`numpy.ndarray`." +msgstr "" + +#: ../../api/generated/mmcls.datasets.transforms.ToPIL.rst:7 +msgid "ToPIL" +msgstr "" + +#: mmcls.datasets.transforms.formatting.ToPIL.transform:1 of +msgid "Method to convert images to :obj:`PIL.Image.Image`." +msgstr "" + +#: ../../api/generated/mmcls.datasets.transforms.Translate.rst:7 +msgid "Translate" +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.Translate:3 of +msgid "" +"The magnitude used for translate. Note that the offset is calculated by magnitude * size in the " +"corresponding direction. With a magnitude of 1, the whole image will be moved out of the range. If None, " +"generate from ``magnitude_range``, see :class:`BaseAugTransform`." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.Translate:13 of +msgid "The probability for performing translate therefore should be in range [0, 1]. Defaults to 0.5." +msgstr "" + +#: mmcls.datasets.transforms.auto_augment.Translate:16 of +msgid "The translating direction. Options are 'horizontal' and 'vertical'. Defaults to 'horizontal'." +msgstr "" + +#: ../../api/generated/mmcls.datasets.transforms.Transpose.rst:7 +msgid "Transpose" +msgstr "" + +#: mmcls.datasets.transforms.formatting.Transpose:11 of +msgid "The fields to convert to tensor." +msgstr "" + +#: mmcls.datasets.transforms.formatting.Transpose:13 of +msgid "The output dimensions order." +msgstr "" + +#: mmcls.datasets.transforms.formatting.Transpose.transform:1 of +msgid "Method to transpose array." +msgstr "" + +#: ../../api/generated/mmcls.engine.hooks.ClassNumCheckHook.rst:7 +msgid "ClassNumCheckHook" +msgstr "" + +#: mmcls.engine.hooks.class_num_check_hook.ClassNumCheckHook.before_test:1 of +msgid "Check whether the test dataset is compatible with head." +msgstr "" + +#: mmcls.engine.hooks.class_num_check_hook.ClassNumCheckHook.before_test:3 +#: mmcls.engine.hooks.class_num_check_hook.ClassNumCheckHook.before_train:3 +#: mmcls.engine.hooks.class_num_check_hook.ClassNumCheckHook.before_val:3 of +msgid "`IterBasedRunner`): Iter based Runner." +msgstr "" + +#: mmcls.engine.hooks.class_num_check_hook.ClassNumCheckHook.before_train:1 of +msgid "Check whether the training dataset is compatible with head." +msgstr "" + +#: mmcls.engine.hooks.class_num_check_hook.ClassNumCheckHook.before_val:1 of +msgid "Check whether the validation dataset is compatible with head." +msgstr "" + +#: ../../api/generated/mmcls.engine.hooks.PreciseBNHook.rst:7 +msgid "PreciseBNHook" +msgstr "" + +#: mmcls.engine.hooks.precise_bn_hook.PreciseBNHook:3 of +msgid "" +"Recompute and update the batch norm stats to make them more precise. During training both BN stats and the " +"weight are changing after every iteration, so the running average can not precisely reflect the actual " +"stats of the current model." +msgstr "" + +#: mmcls.engine.hooks.precise_bn_hook.PreciseBNHook:8 of +msgid "" +"With this hook, the BN stats are recomputed with fixed weights, to make the running average more precise. " +"Specifically, it computes the true average of per-batch mean/variance instead of the running average. See " +"Sec. 3 of the paper `Rethinking Batch in BatchNorm ` for details." +msgstr "" + +#: mmcls.engine.hooks.precise_bn_hook.PreciseBNHook:14 of +msgid "" +"This hook will update BN stats, so it should be executed before ``CheckpointHook`` and ``EMAHook``, " +"generally set its priority to \"ABOVE_NORMAL\"." +msgstr "" + +#: mmcls.engine.hooks.precise_bn_hook.PreciseBNHook:18 of +msgid "The number of samples to update the bn stats. Defaults to 8192." +msgstr "" + +#: mmcls.engine.hooks.precise_bn_hook.PreciseBNHook:21 of +msgid "Perform precise bn interval. If the train loop is" +msgstr "" + +#: mmcls.engine.hooks.precise_bn_hook.PreciseBNHook:23 mmcls.engine.hooks.precise_bn_hook.PreciseBNHook:25 of +msgid "train loop is `IterBasedTrainLoop` or `by_epoch=False`, its unit is 'iter'. Defaults to 1." +msgstr "" + +#: mmcls.engine.hooks.precise_bn_hook.PreciseBNHook.after_train_epoch:1 +#: mmcls.engine.hooks.precise_bn_hook.PreciseBNHook.after_train_iter:1 of +msgid "Calculate prcise BN and broadcast BN stats across GPUs." +msgstr "" + +#: mmcls.engine.hooks.precise_bn_hook.PreciseBNHook.after_train_epoch:3 +#: mmcls.engine.hooks.precise_bn_hook.PreciseBNHook.after_train_iter:3 of +msgid "`Runner`): The runner of the training process." +msgstr "" + +#: mmcls.engine.hooks.precise_bn_hook.PreciseBNHook.after_train_iter:4 of +msgid "The index of the current batch in the train loop." +msgstr "" + +#: mmcls.engine.hooks.precise_bn_hook.PreciseBNHook.after_train_iter:6 of +msgid "Data from dataloader. Defaults to None." +msgstr "" + +#: ../../api/generated/mmcls.engine.hooks.PrepareProtoBeforeValLoopHook.rst:7 +msgid "PrepareProtoBeforeValLoopHook" +msgstr "" + +#: mmcls.engine.hooks.retriever_hooks.PrepareProtoBeforeValLoopHook:3 of +msgid "" +"Since the encoders of the retriever changes during training, the prototype changes accordingly. So the " +"`prototype_vecs` needs to be regenerated before validation loop." +msgstr "" + +#: ../../api/generated/mmcls.engine.hooks.SetAdaptiveMarginsHook.rst:7 +msgid "SetAdaptiveMarginsHook" +msgstr "" + +#: mmcls.engine.hooks.margin_head_hooks.SetAdaptiveMarginsHook:4 of +msgid "" +"A PyTorch implementation of paper `Google Landmark Recognition 2020 Competition Third Place Solution " +"`_. The margins will be :math:`\\text{f}(n) = (marginMax - marginMin) · " +"norm(n^p) + marginMin`. The `n` indicates the number of occurrences of a category." +msgstr "" + +#: mmcls.engine.hooks.margin_head_hooks.SetAdaptiveMarginsHook:10 of +msgid "Lower bound of margins. Defaults to 0.05." +msgstr "" + +#: mmcls.engine.hooks.margin_head_hooks.SetAdaptiveMarginsHook:12 of +msgid "Upper bound of margins. Defaults to 0.5." +msgstr "" + +#: mmcls.engine.hooks.margin_head_hooks.SetAdaptiveMarginsHook:14 of +msgid "The power of category freqercy. Defaults to -0.25." +msgstr "" + +#: mmcls.engine.hooks.margin_head_hooks.SetAdaptiveMarginsHook.before_train:1 of +msgid "change the margins in ArcFaceClsHead." +msgstr "" + +#: mmcls.engine.hooks.margin_head_hooks.SetAdaptiveMarginsHook.before_train:3 of +msgid "`Runner`): Runner." +msgstr "" + +#: ../../api/generated/mmcls.engine.hooks.VisualizationHook.rst:7 +msgid "VisualizationHook" +msgstr "" + +#: mmcls.engine.hooks.visualization_hook.VisualizationHook:1 of +msgid "Classification Visualization Hook. Used to visualize validation and testing prediction results." +msgstr "" + +#: mmcls.engine.hooks.visualization_hook.VisualizationHook:4 of +msgid "If ``out_dir`` is specified, all storage backends are ignored and save the image to the ``out_dir``." +msgstr "" + +#: mmcls.engine.hooks.visualization_hook.VisualizationHook:6 of +msgid "" +"If ``show`` is True, plot the result image in a window, please confirm you are able to access the graphical " +"interface." +msgstr "" + +#: mmcls.engine.hooks.visualization_hook.VisualizationHook:9 of +msgid "Whether to enable this hook. Defaults to False." +msgstr "" + +#: mmcls.engine.hooks.visualization_hook.VisualizationHook:11 of +msgid "The interval of samples to visualize. Defaults to 5000." +msgstr "" + +#: mmcls.engine.hooks.visualization_hook.VisualizationHook:13 of +msgid "Whether to display the drawn image. Defaults to False." +msgstr "" + +#: mmcls.engine.hooks.visualization_hook.VisualizationHook:15 of +msgid "" +"directory where painted images will be saved in the testing process. If None, handle with the backends of " +"the visualizer. Defaults to None." +msgstr "" + +#: mmcls.engine.hooks.visualization_hook.VisualizationHook:19 of +msgid "other keyword arguments of :meth:`mmcls.visualization.ClsVisualizer.add_datasample`." +msgstr "" + +#: mmcls.engine.hooks.visualization_hook.VisualizationHook.after_test_iter:1 of +msgid "Visualize every ``self.interval`` samples during test." +msgstr "" + +#: mmcls.engine.hooks.visualization_hook.VisualizationHook.after_test_iter:3 of +msgid "The runner of the testing process." +msgstr "" + +#: mmcls.engine.hooks.visualization_hook.VisualizationHook.after_test_iter:5 of +msgid "The index of the current batch in the test loop." +msgstr "" + +#: mmcls.engine.hooks.visualization_hook.VisualizationHook.after_test_iter:7 +#: mmcls.engine.hooks.visualization_hook.VisualizationHook.after_val_iter:7 of +msgid "Data from dataloader." +msgstr "" + +#: mmcls.engine.hooks.visualization_hook.VisualizationHook.after_test_iter:9 +#: mmcls.engine.hooks.visualization_hook.VisualizationHook.after_val_iter:9 of +msgid "Outputs from model." +msgstr "" + +#: mmcls.engine.hooks.visualization_hook.VisualizationHook.after_val_iter:1 of +msgid "Visualize every ``self.interval`` samples during validation." +msgstr "" + +#: mmcls.engine.hooks.visualization_hook.VisualizationHook.after_val_iter:3 of +msgid "The runner of the validation process." +msgstr "" + +#: mmcls.engine.hooks.visualization_hook.VisualizationHook.after_val_iter:5 of +msgid "The index of the current batch in the val loop." +msgstr "" + +#: ../../api/generated/mmcls.engine.optimizers.Lamb.rst:7 +msgid "Lamb" +msgstr "" + +#: mmcls.engine.optimizers.lamb.Lamb:3 of +msgid "" +"This class is copied from `timm`_. The LAMB was proposed in `Large Batch Optimization for Deep Learning - " +"Training BERT in 76 minutes`_." +msgstr "" + +#: mmcls.engine.optimizers.lamb.Lamb:11 of +msgid "iterable of parameters to optimize or dicts defining" +msgstr "" + +#: mmcls.engine.optimizers.lamb.Lamb:14 of +msgid "learning rate. (default: 1e-3)" +msgstr "" + +#: mmcls.engine.optimizers.lamb.Lamb:16 of +msgid "coefficients used for computing running averages of gradient and its norm. (default: (0.9, 0.999))" +msgstr "" + +#: mmcls.engine.optimizers.lamb.Lamb:19 of +msgid "term added to the denominator to improve numerical stability. (default: 1e-8)" +msgstr "" + +#: mmcls.engine.optimizers.lamb.Lamb:22 of +msgid "weight decay (L2 penalty) (default: 0)" +msgstr "" + +#: mmcls.engine.optimizers.lamb.Lamb:24 of +msgid "whether apply (1-beta2) to grad when calculating running averages of gradient. (default: True)" +msgstr "" + +#: mmcls.engine.optimizers.lamb.Lamb:27 of +msgid "value used to clip global grad norm (default: 1.0)" +msgstr "" + +#: mmcls.engine.optimizers.lamb.Lamb:30 of +msgid "enable LAMBC trust ratio clipping (default: False)" +msgstr "" + +#: mmcls.engine.optimizers.lamb.Lamb:32 of +msgid "Apply adaptive learning rate to 0.0 weight decay parameter (default: False)" +msgstr "" + +#: mmcls.engine.optimizers.lamb.Lamb.step:1 of +msgid "Performs a single optimization step." +msgstr "" + +#: mmcls.engine.optimizers.lamb.Lamb.step:3 of +msgid "A closure that reevaluates the model and returns the loss." +msgstr "" + +#: ../../api/generated/mmcls.evaluation.Accuracy.rst:7 +msgid "Accuracy" +msgstr "" + +#: mmcls.evaluation.metrics.single_label.Accuracy:3 of +msgid "" +"For either binary classification or multi-class classification, the accuracy is the fraction of correct " +"predictions in all predictions:" +msgstr "" + +#: mmcls.evaluation.metrics.single_label.Accuracy:6 of +msgid "\\text{Accuracy} = \\frac{N_{\\text{correct}}}{N_{\\text{all}}}" +msgstr "" + +#: mmcls.evaluation.metrics.single_label.Accuracy:10 of +msgid "" +"If the ground truth label matches one of the best **k** predictions, the sample will be regard as a " +"positive prediction. If the parameter is a tuple, all of top-k accuracy will be calculated and outputted " +"together. Defaults to 1." +msgstr "" + +#: mmcls.evaluation.metrics.single_label.Accuracy:15 of +msgid "" +"If a float, predictions with score lower than the threshold will be regard as the negative prediction. If " +"None, not apply threshold. If the parameter is a tuple, accuracy based on all thresholds will be calculated " +"and outputted together. Defaults to 0." +msgstr "" + +#: mmcls.evaluation.metrics.multi_label.AveragePrecision:22 +#: mmcls.evaluation.metrics.multi_label.MultiLabelMetric:58 mmcls.evaluation.metrics.single_label.Accuracy:21 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric:57 of +msgid "" +"Device name used for collecting results from different ranks during distributed training. Must be 'cpu' or " +"'gpu'. Defaults to 'cpu'." +msgstr "" + +#: mmcls.evaluation.metrics.multi_label.AveragePrecision:26 +#: mmcls.evaluation.metrics.multi_label.MultiLabelMetric:62 mmcls.evaluation.metrics.single_label.Accuracy:25 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric:61 of +msgid "" +"The prefix that will be added in the metric names to disambiguate homonymous metrics of different " +"evaluators. If prefix is not provided in the argument, self.default_prefix will be used instead. Defaults " +"to None." +msgstr "" + +#: mmcls.evaluation.metrics.single_label.Accuracy.calculate:1 of +msgid "Calculate the accuracy." +msgstr "" + +#: mmcls.evaluation.metrics.single_label.Accuracy.calculate:3 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric.calculate:3 of +msgid "The prediction results. It can be labels (N, ), or scores of every class (N, C)." +msgstr "" + +#: mmcls.evaluation.metrics.single_label.Accuracy.calculate:7 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric.calculate:7 of +msgid "The target of each prediction with shape (N, )." +msgstr "" + +#: mmcls.evaluation.metrics.single_label.Accuracy.calculate:10 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric.calculate:10 of +msgid "" +"Predictions with scores under the thresholds are considered negative. It's only used when ``pred`` is " +"scores. None means no thresholds. Defaults to (0., )." +msgstr "" + +#: mmcls.evaluation.metrics.single_label.Accuracy.calculate:15 of +msgid "" +"Predictions with scores under the thresholds are considered negative. It's only used when ``pred`` is " +"scores. Defaults to (0., )." +msgstr "" + +#: mmcls.evaluation.metrics.single_label.Accuracy.calculate:20 of +msgid "" +"Accuracy. - torch.Tensor: If the ``pred`` is a sequence of label instead of score (number of dimensions " +"is 1). Only return a top-1 accuracy tensor, and ignore the argument ``topk` and ``thrs``. - " +"List[List[torch.Tensor]]: If the ``pred`` is a sequence of score (number of dimensions is 2). Return the " +"accuracy on each ``topk`` and ``thrs``. And the first dim is ``topk``, the second dim is ``thrs``." +msgstr "" + +#: mmcls.evaluation.metrics.single_label.Accuracy.calculate:20 of +msgid "Accuracy." +msgstr "" + +#: mmcls.evaluation.metrics.single_label.Accuracy.calculate:22 of +msgid "" +"torch.Tensor: If the ``pred`` is a sequence of label instead of score (number of dimensions is 1). Only " +"return a top-1 accuracy tensor, and ignore the argument ``topk` and ``thrs``." +msgstr "" + +#: mmcls.evaluation.metrics.single_label.Accuracy.calculate:25 of +msgid "" +"List[List[torch.Tensor]]: If the ``pred`` is a sequence of score (number of dimensions is 2). Return the " +"accuracy on each ``topk`` and ``thrs``. And the first dim is ``topk``, the second dim is ``thrs``." +msgstr "" + +#: ../../api/generated/mmcls.evaluation.AveragePrecision.rst:25::1 +#: ../../api/generated/mmcls.evaluation.MultiLabelMetric.rst:25::1 +#: ../../api/generated/mmcls.evaluation.VOCAveragePrecision.rst:25::1 +#: ../../api/generated/mmcls.evaluation.VOCMultiLabelMetric.rst:25::1 +#: mmcls.evaluation.metrics.single_label.Accuracy.compute_metrics:1 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric.compute_metrics:1 of +msgid "Compute the metrics from processed results." +msgstr "" + +#: mmcls.evaluation.metrics.single_label.Accuracy.compute_metrics:3 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric.compute_metrics:3 of +msgid "The processed results of each batch." +msgstr "" + +#: mmcls.evaluation.metrics.single_label.Accuracy.compute_metrics:6 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric.compute_metrics:6 of +msgid "The computed metrics. The keys are the names of the metrics, and the values are corresponding results." +msgstr "" + +#: ../../api/generated/mmcls.evaluation.AveragePrecision.rst:25::1 +#: ../../api/generated/mmcls.evaluation.MultiLabelMetric.rst:25::1 +#: ../../api/generated/mmcls.evaluation.VOCAveragePrecision.rst:25::1 +#: ../../api/generated/mmcls.evaluation.VOCMultiLabelMetric.rst:25::1 +#: mmcls.evaluation.metrics.single_label.Accuracy.process:1 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric.process:1 of +msgid "Process one batch of data samples." +msgstr "" + +#: mmcls.evaluation.metrics.single_label.Accuracy.process:3 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric.process:3 of +msgid "" +"The processed results should be stored in ``self.results``, which will be used to computed the metrics when " +"all batches have been processed." +msgstr "" + +#: mmcls.evaluation.metrics.single_label.Accuracy.process:6 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric.process:6 of +msgid "A batch of data from the dataloader." +msgstr "" + +#: mmcls.evaluation.metrics.single_label.Accuracy.process:7 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric.process:7 of +msgid "A batch of outputs from the model." +msgstr "" + +#: ../../api/generated/mmcls.evaluation.AveragePrecision.rst:2 +msgid "mmcls.evaluation.AveragePrecision" +msgstr "" + +#: mmcls.evaluation.metrics.multi_label.AveragePrecision:3 of +msgid "" +"AveragePrecision (AP) summarizes a precision-recall curve as the weighted mean of maximum precisions " +"obtained for any r'>r, where r is the recall:" +msgstr "" + +#: mmcls.evaluation.metrics.multi_label.AveragePrecision:6 of +msgid "" +"\\text{AP} = \\sum_n (R_n - R_{n-1}) P_n\n" +"\n" +msgstr "" + +#: mmcls.evaluation.metrics.multi_label.AveragePrecision:9 of +msgid "Note that no approximation is involved since the curve is piecewise constant." +msgstr "" + +#: mmcls.evaluation.metrics.multi_label.AveragePrecision:12 of +msgid "" +"How to calculate the final metrics from every category. It supports two modes: - `\"macro\"`: Calculate " +"metrics for each category, and calculate the mean value over all categories. The result of this mode is " +"also called **mAP**. - `None`: Calculate metrics of every category and output directly. Defaults to \"macro" +"\"." +msgstr "" + +#: mmcls.evaluation.metrics.multi_label.AveragePrecision:12 of +msgid "How to calculate the final metrics from every category. It supports two modes:" +msgstr "" + +#: mmcls.evaluation.metrics.multi_label.AveragePrecision:15 of +msgid "" +"`\"macro\"`: Calculate metrics for each category, and calculate the mean value over all categories. The " +"result of this mode is also called **mAP**." +msgstr "" + +#: mmcls.evaluation.metrics.multi_label.AveragePrecision:18 +#: mmcls.evaluation.metrics.multi_label.MultiLabelMetric:54 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric:51 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric.calculate:23 of +msgid "`None`: Calculate metrics of every category and output directly." +msgstr "" + +#: mmcls.evaluation.metrics.multi_label.AveragePrecision:20 +#: mmcls.evaluation.metrics.multi_label.MultiLabelMetric:56 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric:53 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric.calculate:26 of +msgid "Defaults to \"macro\"." +msgstr "" + +#: mmcls.evaluation.metrics.multi_label.AveragePrecision:33 of +msgid "引用" +msgstr "" + +#: mmcls.evaluation.metrics.multi_label.AveragePrecision:34 of +msgid "" +"`Wikipedia entry for the Average precision `_" +msgstr "" + +#: ../../api/generated/mmcls.evaluation.AveragePrecision.rst:13 +#: ../../api/generated/mmcls.evaluation.MultiLabelMetric.rst:13 +#: ../../api/generated/mmcls.evaluation.VOCAveragePrecision.rst:13 +#: ../../api/generated/mmcls.evaluation.VOCMultiLabelMetric.rst:13 +msgid "Methods" +msgstr "" + +#: ../../api/generated/mmcls.evaluation.AveragePrecision.rst:25::1 +msgid "" +":py:obj:`__init__ `\\ \\(\\[average\\, collect\\_device\\, " +"prefix\\]\\)" +msgstr "" + +#: ../../api/generated/mmcls.evaluation.AveragePrecision.rst:25::1 +msgid "" +":py:obj:`calculate `\\ \\(pred\\, target\\[\\, average\\]\\)" +msgstr "" + +#: ../../api/generated/mmcls.evaluation.AveragePrecision.rst:25::1 +#: ../../api/generated/mmcls.evaluation.VOCAveragePrecision.rst:25::1 +msgid "Calculate the average precision for a single class." +msgstr "" + +#: ../../api/generated/mmcls.evaluation.AveragePrecision.rst:25::1 +msgid ":py:obj:`compute_metrics `\\ \\(results\\)" +msgstr "" + +#: ../../api/generated/mmcls.evaluation.AveragePrecision.rst:25::1 +msgid ":py:obj:`evaluate `\\ \\(size\\)" +msgstr "" + +#: ../../api/generated/mmcls.evaluation.AveragePrecision.rst:25::1 +#: ../../api/generated/mmcls.evaluation.MultiLabelMetric.rst:25::1 +#: ../../api/generated/mmcls.evaluation.VOCAveragePrecision.rst:25::1 +#: ../../api/generated/mmcls.evaluation.VOCMultiLabelMetric.rst:25::1 +msgid "Evaluate the model performance of the whole dataset after processing all batches." +msgstr "" + +#: ../../api/generated/mmcls.evaluation.AveragePrecision.rst:25::1 +msgid ":py:obj:`process `\\ \\(data\\_batch\\, data\\_samples\\)" +msgstr "" + +#: ../../api/generated/mmcls.evaluation.AveragePrecision.rst:27 +#: ../../api/generated/mmcls.evaluation.MultiLabelMetric.rst:27 +#: ../../api/generated/mmcls.evaluation.VOCAveragePrecision.rst:27 +#: ../../api/generated/mmcls.evaluation.VOCMultiLabelMetric.rst:27 +msgid "Attributes" +msgstr "" + +#: ../../api/generated/mmcls.evaluation.AveragePrecision.rst:31::1 +msgid ":py:obj:`dataset_meta `\\" +msgstr "" + +#: ../../api/generated/mmcls.evaluation.AveragePrecision.rst:31::1 +#: ../../api/generated/mmcls.evaluation.MultiLabelMetric.rst:31::1 +#: ../../api/generated/mmcls.evaluation.VOCAveragePrecision.rst:31::1 +#: ../../api/generated/mmcls.evaluation.VOCMultiLabelMetric.rst:31::1 +msgid "Meta info of the dataset." +msgstr "" + +#: ../../api/generated/mmcls.evaluation.AveragePrecision.rst:31::1 +msgid ":py:obj:`default_prefix `\\" +msgstr "" + +#: ../../api/generated/mmcls.evaluation.MultiLabelMetric.rst:2 +msgid "mmcls.evaluation.MultiLabelMetric" +msgstr "" + +#: mmcls.evaluation.metrics.multi_label.MultiLabelMetric:4 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric:4 of +msgid "" +"The collection of metrics is for single-label multi-class classification. And all these metrics are based " +"on the confusion matrix of every category:" +msgstr "" + +#: mmcls.evaluation.metrics.multi_label.MultiLabelMetric:11 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric:11 of +msgid "All metrics can be formulated use variables above:" +msgstr "" + +#: mmcls.evaluation.metrics.multi_label.MultiLabelMetric:13 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric:13 of +msgid "**Precision** is the fraction of correct predictions in all predictions:" +msgstr "" + +#: mmcls.evaluation.metrics.multi_label.MultiLabelMetric:15 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric:15 of +msgid "" +"\\text{Precision} = \\frac{TP}{TP+FP}\n" +"\n" +msgstr "" + +#: mmcls.evaluation.metrics.multi_label.MultiLabelMetric:18 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric:18 of +msgid "**Recall** is the fraction of correct predictions in all targets:" +msgstr "" + +#: mmcls.evaluation.metrics.multi_label.MultiLabelMetric:20 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric:20 of +msgid "" +"\\text{Recall} = \\frac{TP}{TP+FN}\n" +"\n" +msgstr "" + +#: mmcls.evaluation.metrics.multi_label.MultiLabelMetric:23 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric:23 of +msgid "**F1-score** is the harmonic mean of the precision and recall:" +msgstr "" + +#: mmcls.evaluation.metrics.multi_label.MultiLabelMetric:25 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric:25 of +msgid "" +"\\text{F1-score} = \\frac{2\\times\\text{Recall}\\times\\text{Precision}}{\\text{Recall}+" +"\\text{Precision}}\n" +"\n" +msgstr "" + +#: mmcls.evaluation.metrics.multi_label.MultiLabelMetric:28 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric:28 of +msgid "**Support** is the number of samples:" +msgstr "" + +#: mmcls.evaluation.metrics.multi_label.MultiLabelMetric:30 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric:30 of +msgid "" +"\\text{Support} = TP + TN + FN + FP\n" +"\n" +msgstr "" + +#: mmcls.evaluation.metrics.multi_label.MultiLabelMetric:33 of +msgid "" +"Predictions with scores under the threshold are considered as negative. If None, the ``topk`` predictions " +"will be considered as positive. If the ``topk`` is also None, use ``thr=0.5`` as default. Defaults to None." +msgstr "" + +#: mmcls.evaluation.metrics.multi_label.MultiLabelMetric:38 of +msgid "" +"Predictions with the k-th highest scores are considered as positive. If None, use ``thr`` to determine " +"positive predictions. If both ``thr`` and ``topk`` are not None, use ``thr``. Defaults to None." +msgstr "" + +#: mmcls.evaluation.metrics.multi_label.MultiLabelMetric:43 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric:40 of +msgid "" +"The detailed metric items to evaluate, select from \"precision\", \"recall\", \"f1-score\" and \"support\". " +"Defaults to ``('precision', 'recall', 'f1-score')``." +msgstr "" + +#: mmcls.evaluation.metrics.multi_label.MultiLabelMetric:47 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric:44 of +msgid "" +"How to calculate the final metrics from the confusion matrix of every category. It supports three modes: - " +"`\"macro\"`: Calculate metrics for each category, and calculate the mean value over all categories. - `" +"\"micro\"`: Average the confusion matrix over all categories and calculate metrics on the mean confusion " +"matrix. - `None`: Calculate metrics of every category and output directly. Defaults to \"macro\"." +msgstr "" + +#: mmcls.evaluation.metrics.multi_label.MultiLabelMetric:47 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric:44 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric.calculate:15 of +msgid "" +"How to calculate the final metrics from the confusion matrix of every category. It supports three modes:" +msgstr "" + +#: mmcls.evaluation.metrics.multi_label.MultiLabelMetric:50 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric:47 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric.calculate:19 of +msgid "`\"macro\"`: Calculate metrics for each category, and calculate the mean value over all categories." +msgstr "" + +#: mmcls.evaluation.metrics.multi_label.MultiLabelMetric:52 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric:49 +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric.calculate:21 of +msgid "" +"`\"micro\"`: Average the confusion matrix over all categories and calculate metrics on the mean confusion " +"matrix." +msgstr "" + +#: ../../api/generated/mmcls.evaluation.MultiLabelMetric.rst:25::1 +msgid "" +":py:obj:`__init__ `\\ \\(\\[thr\\, topk\\, items\\, average" +"\\, ...\\]\\)" +msgstr "" + +#: ../../api/generated/mmcls.evaluation.MultiLabelMetric.rst:25::1 +msgid "" +":py:obj:`calculate `\\ \\(pred\\, target\\[\\, pred\\_indices" +"\\, ...\\]\\)" +msgstr "" + +#: ../../api/generated/mmcls.evaluation.MultiLabelMetric.rst:25::1 +#: ../../api/generated/mmcls.evaluation.VOCMultiLabelMetric.rst:25::1 +msgid "Calculate the precision, recall, f1-score." +msgstr "" + +#: ../../api/generated/mmcls.evaluation.MultiLabelMetric.rst:25::1 +msgid ":py:obj:`compute_metrics `\\ \\(results\\)" +msgstr "" + +#: ../../api/generated/mmcls.evaluation.MultiLabelMetric.rst:25::1 +msgid ":py:obj:`evaluate `\\ \\(size\\)" +msgstr "" + +#: ../../api/generated/mmcls.evaluation.MultiLabelMetric.rst:25::1 +msgid ":py:obj:`process `\\ \\(data\\_batch\\, data\\_samples\\)" +msgstr "" + +#: ../../api/generated/mmcls.evaluation.MultiLabelMetric.rst:31::1 +msgid ":py:obj:`dataset_meta `\\" +msgstr "" + +#: ../../api/generated/mmcls.evaluation.MultiLabelMetric.rst:31::1 +msgid ":py:obj:`default_prefix `\\" +msgstr "" + +#: ../../api/generated/mmcls.evaluation.SingleLabelMetric.rst:7 +msgid "SingleLabelMetric" +msgstr "" + +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric:33 of +msgid "" +"If a float, predictions with score lower than the threshold will be regard as the negative prediction. If " +"None, only the top-1 prediction will be regard as the positive prediction. If the parameter is a tuple, " +"accuracy based on all thresholds will be calculated and outputted together. Defaults to 0." +msgstr "" + +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric:55 +#: mmcls.models.utils.data_preprocessor.ClsDataPreprocessor:37 of +msgid "The number of classes. Defaults to None." +msgstr "" + +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric.calculate:1 of +msgid "Calculate the precision, recall, f1-score and support." +msgstr "" + +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric.calculate:15 of +msgid "" +"How to calculate the final metrics from the confusion matrix of every category. It supports three modes: - " +"`\"macro\"`: Calculate metrics for each category, and calculate the mean value over all categories. - `" +"\"micro\"`: Average the confusion matrix over all categories and calculate metrics on the mean confusion " +"matrix. - `None`: Calculate metrics of every category and output directly. Defaults to \"macro\"." +msgstr "" + +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric.calculate:28 of +msgid "" +"The number of classes. If the ``pred`` is label instead of scores, this argument is required. Defaults to " +"None." +msgstr "" + +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric.calculate:33 of +msgid "" +"The tuple contains precision, recall and f1-score. And the type of each item is: - torch.Tensor: If the " +"``pred`` is a sequence of label instead of score (number of dimensions is 1). Only returns a tensor for " +"each metric. The shape is (1, ) if ``classwise`` is False, and (C, ) if ``classwise`` is True. - " +"List[torch.Tensor]: If the ``pred`` is a sequence of score (number of dimensions is 2). Return the " +"metrics on each ``thrs``. The shape of tensor is (1, ) if ``classwise`` is False, and (C, ) if " +"``classwise`` is True." +msgstr "" + +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric.calculate:33 of +msgid "The tuple contains precision, recall and f1-score. And the type of each item is:" +msgstr "" + +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric.calculate:36 of +msgid "" +"torch.Tensor: If the ``pred`` is a sequence of label instead of score (number of dimensions is 1). Only " +"returns a tensor for each metric. The shape is (1, ) if ``classwise`` is False, and (C, ) if ``classwise`` " +"is True." +msgstr "" + +#: mmcls.evaluation.metrics.single_label.SingleLabelMetric.calculate:40 of +msgid "" +"List[torch.Tensor]: If the ``pred`` is a sequence of score (number of dimensions is 2). Return the metrics " +"on each ``thrs``. The shape of tensor is (1, ) if ``classwise`` is False, and (C, ) if ``classwise`` is " +"True." +msgstr "" + +#: ../../api/generated/mmcls.evaluation.VOCAveragePrecision.rst:2 +msgid "mmcls.evaluation.VOCAveragePrecision" +msgstr "" + +#: mmcls.evaluation.metrics.voc_multi_label.VOCAveragePrecision:3 +#: mmcls.evaluation.metrics.voc_multi_label.VOCMultiLabelMetric:6 of +msgid "" +"Whether to map the difficult labels as positive in one-hot ground truth for evaluation. If it set to True, " +"map difficult gt labels to positive ones(1), If it set to False, map difficult gt labels to negative " +"ones(0). Defaults to None, the difficult labels will be set to '-1'." +msgstr "" + +#: mmcls.evaluation.metrics.voc_multi_label.VOCAveragePrecision:9 of +msgid "Refers to `AveragePrecision` for detailed docstrings." +msgstr "" + +#: ../../api/generated/mmcls.evaluation.VOCAveragePrecision.rst:25::1 +msgid "" +":py:obj:`__init__ `\\ \\(\\*arg\\[\\, difficult\\_as" +"\\_positive\\]\\)" +msgstr "" + +#: ../../api/generated/mmcls.evaluation.VOCAveragePrecision.rst:25::1 +msgid "" +":py:obj:`calculate `\\ \\(pred\\, target\\[\\, average\\]\\)" +msgstr "" + +#: ../../api/generated/mmcls.evaluation.VOCAveragePrecision.rst:25::1 +msgid ":py:obj:`compute_metrics `\\ \\(results\\)" +msgstr "" + +#: ../../api/generated/mmcls.evaluation.VOCAveragePrecision.rst:25::1 +msgid ":py:obj:`evaluate `\\ \\(size\\)" +msgstr "" + +#: ../../api/generated/mmcls.evaluation.VOCAveragePrecision.rst:25::1 +msgid "" +":py:obj:`process `\\ \\(data\\_batch\\, data\\_samples\\)" +msgstr "" + +#: ../../api/generated/mmcls.evaluation.VOCAveragePrecision.rst:31::1 +msgid ":py:obj:`dataset_meta `\\" +msgstr "" + +#: ../../api/generated/mmcls.evaluation.VOCAveragePrecision.rst:31::1 +msgid ":py:obj:`default_prefix `\\" +msgstr "" + +#: ../../api/generated/mmcls.evaluation.VOCMultiLabelMetric.rst:2 +msgid "mmcls.evaluation.VOCMultiLabelMetric" +msgstr "" + +#: mmcls.evaluation.metrics.voc_multi_label.VOCMultiLabelMetric:4 of +msgid "It includes precision, recall, f1-score and support." +msgstr "" + +#: mmcls.evaluation.metrics.voc_multi_label.VOCMultiLabelMetric:12 of +msgid "Refers to `MultiLabelMetric` for detailed docstrings." +msgstr "" + +#: ../../api/generated/mmcls.evaluation.VOCMultiLabelMetric.rst:25::1 +msgid "" +":py:obj:`__init__ `\\ \\(\\*arg\\[\\, difficult\\_as" +"\\_positive\\]\\)" +msgstr "" + +#: ../../api/generated/mmcls.evaluation.VOCMultiLabelMetric.rst:25::1 +msgid "" +":py:obj:`calculate `\\ \\(pred\\, target\\[\\, pred" +"\\_indices\\, ...\\]\\)" +msgstr "" + +#: ../../api/generated/mmcls.evaluation.VOCMultiLabelMetric.rst:25::1 +msgid ":py:obj:`compute_metrics `\\ \\(results\\)" +msgstr "" + +#: ../../api/generated/mmcls.evaluation.VOCMultiLabelMetric.rst:25::1 +msgid ":py:obj:`evaluate `\\ \\(size\\)" +msgstr "" + +#: ../../api/generated/mmcls.evaluation.VOCMultiLabelMetric.rst:25::1 +msgid "" +":py:obj:`process `\\ \\(data\\_batch\\, data\\_samples\\)" +msgstr "" + +#: ../../api/generated/mmcls.evaluation.VOCMultiLabelMetric.rst:31::1 +msgid ":py:obj:`dataset_meta `\\" +msgstr "" + +#: ../../api/generated/mmcls.evaluation.VOCMultiLabelMetric.rst:31::1 +msgid ":py:obj:`default_prefix `\\" +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.AlexNet.rst:7 +msgid "AlexNet" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.alexnet.AlexNet:1 of +msgid "`AlexNet `_ backbone." +msgstr "" + +#: mmcls.models.backbones.alexnet.AlexNet:3 of +msgid "The input for AlexNet is a 224x224 RGB image." +msgstr "" + +#: mmcls.models.backbones.alexnet.AlexNet:5 mmcls.models.backbones.lenet.LeNet5:5 of +msgid "" +"number of classes for classification. The default value is -1, which uses the backbone as a feature " +"extractor without the top classifier." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.CSPDarkNet.rst:7 +msgid "CSPDarkNet" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.cspnet.CSPDarkNet:1 of +msgid "CSP-Darknet backbone used in YOLOv4." +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPDarkNet:3 of +msgid "Depth of CSP-Darknet. Default: 53." +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPDarkNet:5 mmcls.models.backbones.mobileone.MobileOne:20 +#: mmcls.models.backbones.regnet.RegNet:17 mmcls.models.backbones.repmlp.RepMLPNet:17 +#: mmcls.models.backbones.resnest.ResNeSt:21 mmcls.models.backbones.resnet.ResNet:8 +#: mmcls.models.backbones.resnet_cifar.ResNet_CIFAR:10 mmcls.models.backbones.resnext.ResNeXt:13 +#: mmcls.models.backbones.seresnet.SEResNet:10 mmcls.models.backbones.seresnext.SEResNeXt:15 of +msgid "Number of input image channels. Default: 3." +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPDarkNet:7 of +msgid "Output from which stages. Default: (3, )." +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPDarkNet:10 mmcls.models.backbones.cspnet.CSPResNeXt:8 +#: mmcls.models.backbones.cspnet.CSPResNet:8 mmcls.models.backbones.resnest.ResNeSt:48 +#: mmcls.models.backbones.resnet.ResNet:35 mmcls.models.backbones.resnet_cifar.ResNet_CIFAR:39 +#: mmcls.models.backbones.resnext.ResNeXt:40 mmcls.models.backbones.seresnet.SEResNet:37 +#: mmcls.models.backbones.seresnext.SEResNeXt:42 of +msgid "Stages to be frozen (stop grad and set eval mode). -1 means not freezing any parameters. Default: -1." +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPDarkNet:13 mmcls.models.backbones.cspnet.CSPResNeXt:11 +#: mmcls.models.backbones.cspnet.CSPResNet:11 of +msgid "Config dict for convolution layer. Default: None." +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPDarkNet:15 mmcls.models.backbones.cspnet.CSPResNeXt:13 +#: mmcls.models.backbones.cspnet.CSPResNet:13 of +msgid "Dictionary to construct and config norm layer. Default: dict(type='BN', requires_grad=True)." +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPDarkNet:18 mmcls.models.backbones.cspnet.CSPResNeXt:16 +#: mmcls.models.backbones.cspnet.CSPResNet:16 of +msgid "Config dict for activation layer. Default: dict(type='LeakyReLU', negative_slope=0.1)." +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPDarkNet:21 mmcls.models.backbones.cspnet.CSPResNeXt:19 +#: mmcls.models.backbones.cspnet.CSPResNet:19 of +msgid "" +"Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch " +"Norm and its variants only." +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPDarkNet:25 mmcls.models.backbones.cspnet.CSPResNeXt:23 +#: mmcls.models.backbones.cspnet.CSPResNet:23 of +msgid "Initialization config dict. Default: None." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.CSPNet.rst:7 +msgid "CSPNet" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.cspnet.CSPNet:1 of +msgid "The abstract CSP Network class." +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPNet:3 of +msgid "" +"A Pytorch implementation of `CSPNet: A New Backbone that can Enhance Learning Capability of CNN `_" +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPNet:6 of +msgid "" +"This class is an abstract class because the Cross Stage Partial Network (CSPNet) is a kind of universal " +"network structure, and you network block to implement networks like CSPResNet, CSPResNeXt and CSPDarkNet." +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPNet:11 of +msgid "" +"The architecture of the CSPNet. It should have the following keys: - block_fn (Callable): A function or " +"class to return a block module, and it should accept at least ``in_channels``, ``out_channels``, " +"``expansion``, ``drop_path_rate``, ``norm_cfg`` and ``act_cfg``. - in_channels (Tuple[int]): The number " +"of input channels of each stage. - out_channels (Tuple[int]): The number of output channels of each " +"stage. - num_blocks (Tuple[int]): The number of blocks in each stage. - expansion_ratio (float | " +"Tuple[float]): The expansion ratio in the expand convolution of each stage. Defaults to 0.5. - " +"bottle_ratio (float | Tuple[float]): The expansion ratio of blocks in each stage. Defaults to 2. - " +"has_downsampler (bool | Tuple[bool]): Whether to add a downsample convolution in each stage. Defaults to " +"True - down_growth (bool | Tuple[bool]): Whether to expand the channels in the downsampler layer of each " +"stage. Defaults to False. - block_args (dict | Tuple[dict], optional): The extra arguments to the blocks " +"in each stage. Defaults to None." +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPNet:11 of +msgid "The architecture of the CSPNet. It should have the following keys:" +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPNet:14 of +msgid "" +"block_fn (Callable): A function or class to return a block module, and it should accept at least " +"``in_channels``, ``out_channels``, ``expansion``, ``drop_path_rate``, ``norm_cfg`` and ``act_cfg``." +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPNet:18 of +msgid "in_channels (Tuple[int]): The number of input channels of each stage." +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPNet:20 of +msgid "out_channels (Tuple[int]): The number of output channels of each stage." +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPNet:22 of +msgid "num_blocks (Tuple[int]): The number of blocks in each stage." +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPNet:23 of +msgid "" +"expansion_ratio (float | Tuple[float]): The expansion ratio in the expand convolution of each stage. " +"Defaults to 0.5." +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPNet:25 of +msgid "bottle_ratio (float | Tuple[float]): The expansion ratio of blocks in each stage. Defaults to 2." +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPNet:27 of +msgid "" +"has_downsampler (bool | Tuple[bool]): Whether to add a downsample convolution in each stage. Defaults to " +"True" +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPNet:29 of +msgid "" +"down_growth (bool | Tuple[bool]): Whether to expand the channels in the downsampler layer of each stage. " +"Defaults to False." +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPNet:31 of +msgid "" +"block_args (dict | Tuple[dict], optional): The extra arguments to the blocks in each stage. Defaults to " +"None." +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPNet:34 of +msgid "A function or class to return a stem module. And it should accept ``in_channels``." +msgstr "" + +#: mmcls.models.backbones.convmixer.ConvMixer:22 mmcls.models.backbones.convnext.ConvNeXt:20 +#: mmcls.models.backbones.cspnet.CSPNet:37 mmcls.models.backbones.densenet.DenseNet:22 +#: mmcls.models.backbones.hornet.HorNet:21 mmcls.models.backbones.hrnet.HRNet:25 +#: mmcls.models.backbones.mobilevit.MobileViT:25 mmcls.models.backbones.repvgg.RepVGG:21 +#: mmcls.models.backbones.res2net.Res2Net:12 mmcls.models.backbones.timm_backbone.TIMMBackbone:22 of +msgid "Number of input image channels. Defaults to 3." +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPNet:39 of +msgid "Output from which stages. Defaults to -1, which means the last stage." +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPNet:42 mmcls.models.backbones.davit.DaViT:50 +#: mmcls.models.backbones.efficientformer.EfficientFormer:35 mmcls.models.backbones.hornet.HorNet:33 +#: mmcls.models.backbones.res2net.Res2Net:35 mmcls.models.backbones.swin_transformer.SwinTransformer:48 +#: mmcls.models.backbones.swin_transformer_v2.SwinTransformerV2:48 mmcls.models.backbones.van.VAN:32 +#: mmcls.models.backbones.vision_transformer.VisionTransformer:51 of +msgid "" +"Stages to be frozen (stop grad and set eval mode). -1 means not freezing any parameters. Defaults to -1." +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPNet:45 of +msgid "The config dict for conv layers in blocks. Defaults to None, which means use Conv2d." +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPNet:48 of +msgid "The config dict for norm layers. Defaults to ``dict(type='BN', eps=1e-5)``." +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPNet:51 of +msgid "The config dict for activation functions. Defaults to ``dict(type='LeakyReLU', inplace=True)``." +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPNet:54 mmcls.models.backbones.davit.DaViT:53 +#: mmcls.models.backbones.efficientnet.EfficientNet:20 mmcls.models.backbones.hrnet.HRNet:33 +#: mmcls.models.backbones.mobileone.MobileOne:40 mmcls.models.backbones.repvgg.RepVGG:53 +#: mmcls.models.backbones.res2net.Res2Net:41 mmcls.models.backbones.swin_transformer.SwinTransformer:51 +#: mmcls.models.backbones.swin_transformer_v2.SwinTransformerV2:51 mmcls.models.backbones.van.VAN:35 of +msgid "" +"Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch " +"Norm and its variants only. Defaults to False." +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPNet:58 of +msgid "The initialization settings. Defaults to ``dict(type='Kaiming', layer='Conv2d'))``." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.CSPResNeXt.rst:7 +msgid "CSPResNeXt" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.cspnet.CSPResNeXt:1 of +msgid "CSP-ResNeXt backbone." +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPResNeXt:3 of +msgid "Depth of CSP-ResNeXt. Default: 50." +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPResNeXt:5 mmcls.models.backbones.cspnet.CSPResNet:5 of +msgid "Output from which stages. Default: (4, )." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.CSPResNet.rst:7 +msgid "CSPResNet" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.cspnet.CSPResNet:1 of +msgid "CSP-ResNet backbone." +msgstr "" + +#: mmcls.models.backbones.cspnet.CSPResNet:3 of +msgid "Depth of CSP-ResNet. Default: 50." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.Conformer.rst:7 +msgid "Conformer" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.conformer.Conformer:1 of +msgid "Conformer backbone." +msgstr "" + +#: mmcls.models.backbones.conformer.Conformer:3 of +msgid "" +"A PyTorch implementation of : `Conformer: Local Features Coupling Global Representations for Visual " +"Recognition `_" +msgstr "" + +#: mmcls.models.backbones.conformer.Conformer:6 of +msgid "Conformer architecture. Defaults to 'tiny'." +msgstr "" + +#: mmcls.models.backbones.conformer.Conformer:8 of +msgid "The patch size. Defaults to 16." +msgstr "" + +#: mmcls.models.backbones.conformer.Conformer:10 of +msgid "The base number of channels in CNN network. Defaults to 64." +msgstr "" + +#: mmcls.models.backbones.conformer.Conformer:13 of +msgid "The expansion ratio of FFN network in transformer block. Defaults to 4." +msgstr "" + +#: mmcls.models.backbones.conformer.Conformer:16 of +msgid "Whether use class token or not. Defaults to True." +msgstr "" + +#: mmcls.models.backbones.conformer.Conformer:19 mmcls.models.backbones.deit.DistilledVisionTransformer:33 +#: mmcls.models.backbones.deit3.DeiT3:38 mmcls.models.backbones.mlp_mixer.MlpMixer:29 +#: mmcls.models.backbones.t2t_vit.T2T_ViT:23 mmcls.models.backbones.vision_transformer.VisionTransformer:33 +#: mmcls.models.utils.inverted_residual.InvertedResidual:26 of +msgid "stochastic depth rate. Defaults to 0." +msgstr "" + +#: mmcls.models.backbones.conformer.Conformer:21 mmcls.models.backbones.convmixer.ConvMixer:33 +#: mmcls.models.backbones.convnext.ConvNeXt:39 mmcls.models.backbones.davit.DaViT:57 +#: mmcls.models.backbones.deit.DistilledVisionTransformer:27 mmcls.models.backbones.deit3.DeiT3:32 +#: mmcls.models.backbones.densenet.DenseNet:42 mmcls.models.backbones.edgenext.EdgeNeXt:56 +#: mmcls.models.backbones.t2t_vit.T2T_ViT:17 mmcls.models.backbones.vision_transformer.VisionTransformer:27 of +msgid "Output from which stages. Defaults to -1, means the last stage." +msgstr "" + +#: mmcls.models.backbones.conformer.Conformer:24 mmcls.models.backbones.deit.DistilledVisionTransformer:58 +#: mmcls.models.backbones.deit3.DeiT3:66 mmcls.models.backbones.efficientformer.EfficientFormer:48 +#: mmcls.models.backbones.hrnet.HRNet:47 mmcls.models.backbones.mlp_mixer.MlpMixer:41 +#: mmcls.models.backbones.repvgg.RepVGG:59 mmcls.models.backbones.res2net.Res2Net:51 +#: mmcls.models.backbones.vision_transformer.VisionTransformer:70 +#: mmcls.models.classifiers.base.BaseClassifier:3 of +msgid "Initialization config dict. Defaults to None." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.ConvMixer.rst:7 +msgid "ConvMixer" +msgstr "" + +#: mmcls.models.backbones.convmixer.ConvMixer:1 of +msgid "ConvMixer. ." +msgstr "" + +#: mmcls.models.backbones.convmixer.ConvMixer:3 of +msgid "A PyTorch implementation of : `Patches Are All You Need? `_" +msgstr "" + +#: mmcls.models.backbones.convmixer.ConvMixer:6 of +msgid "" +"Modified from the `official repo `_ and `timm " +"`_." +msgstr "" + +#: mmcls.models.backbones.convmixer.ConvMixer:11 of +msgid "" +"The model's architecture. If string, it should be one of architecture in ``ConvMixer.arch_settings``. And " +"if dict, it should include the following two keys: - embed_dims (int): The dimensions of patch embedding. " +"- depth (int): Number of repetitions of ConvMixer Layer. - patch_size (int): The patch size. - kernel_size " +"(int): The kernel size of depthwise conv layers. Defaults to '768/32'." +msgstr "" + +#: mmcls.models.backbones.convmixer.ConvMixer:11 of +msgid "" +"The model's architecture. If string, it should be one of architecture in ``ConvMixer.arch_settings``. And " +"if dict, it should include the following two keys:" +msgstr "" + +#: mmcls.models.backbones.convmixer.ConvMixer:15 of +msgid "embed_dims (int): The dimensions of patch embedding." +msgstr "" + +#: mmcls.models.backbones.convmixer.ConvMixer:16 of +msgid "depth (int): Number of repetitions of ConvMixer Layer." +msgstr "" + +#: mmcls.models.backbones.convmixer.ConvMixer:17 of +msgid "patch_size (int): The patch size." +msgstr "" + +#: mmcls.models.backbones.convmixer.ConvMixer:18 of +msgid "kernel_size (int): The kernel size of depthwise conv layers." +msgstr "" + +#: mmcls.models.backbones.convmixer.ConvMixer:20 of +msgid "Defaults to '768/32'." +msgstr "" + +#: mmcls.models.backbones.convmixer.ConvMixer:24 of +msgid "The size of one patch in the patch embed layer. Defaults to 7." +msgstr "" + +#: mmcls.models.backbones.convmixer.ConvMixer:27 mmcls.models.backbones.densenet.DenseNet:36 +#: mmcls.models.backbones.mobileone.MobileOne:31 mmcls.models.backbones.repvgg.RepVGG:41 of +msgid "The config dict for norm layers. Defaults to ``dict(type='BN')``." +msgstr "" + +#: mmcls.models.backbones.convmixer.ConvMixer:30 of +msgid "The config dict for activation after each convolution. Defaults to ``dict(type='GELU')``." +msgstr "" + +#: mmcls.models.backbones.convmixer.ConvMixer:36 mmcls.models.backbones.convnext.ConvNeXt:42 +#: mmcls.models.backbones.densenet.DenseNet:45 mmcls.models.backbones.edgenext.EdgeNeXt:59 +#: mmcls.models.backbones.efficientnet.EfficientNet:8 mmcls.models.backbones.poolformer.PoolFormer:53 of +msgid "Stages to be frozen (all param fixed). Defaults to 0, which means not freezing any parameters." +msgstr "" + +#: mmcls.models.backbones.convmixer.ConvMixer:39 mmcls.models.backbones.densenet.DenseNet:48 +#: mmcls.models.backbones.mobileone.MobileOne:44 mmcls.models.backbones.mobilevit.MobileViT:47 +#: mmcls.models.backbones.replknet.RepLKNet:59 mmcls.models.backbones.repmlp.RepMLPNet:51 +#: mmcls.models.classifiers.base.BaseClassifier:14 mmcls.models.utils.inverted_residual.InvertedResidual:31 of +msgid "Initialization config dict." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.ConvNeXt.rst:7 +msgid "ConvNeXt" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.convnext.ConvNeXt:1 of +msgid "ConvNeXt." +msgstr "" + +#: mmcls.models.backbones.convnext.ConvNeXt:3 of +msgid "A PyTorch implementation of : `A ConvNet for the 2020s `_" +msgstr "" + +#: mmcls.models.backbones.convnext.ConvNeXt:6 of +msgid "" +"Modified from the `official repo `_ and `timm `_." +msgstr "" + +#: mmcls.models.backbones.convnext.ConvNeXt:11 of +msgid "" +"The model's architecture. If string, it should be one of architecture in ``ConvNeXt.arch_settings``. And if " +"dict, it should include the following two keys: - depths (list[int]): Number of blocks at each stage. - " +"channels (list[int]): The number of channels at each stage. Defaults to 'tiny'." +msgstr "" + +#: mmcls.models.backbones.convnext.ConvNeXt:11 of +msgid "" +"The model's architecture. If string, it should be one of architecture in ``ConvNeXt.arch_settings``. And if " +"dict, it should include the following two keys:" +msgstr "" + +#: mmcls.models.backbones.convnext.ConvNeXt:15 of +msgid "depths (list[int]): Number of blocks at each stage." +msgstr "" + +#: mmcls.models.backbones.convnext.ConvNeXt:16 mmcls.models.backbones.edgenext.EdgeNeXt:14 of +msgid "channels (list[int]): The number of channels at each stage." +msgstr "" + +#: mmcls.models.backbones.convnext.ConvNeXt:18 mmcls.models.backbones.hornet.HorNet:19 +#: mmcls.models.backbones.swin_transformer.SwinTransformer:19 +#: mmcls.models.backbones.swin_transformer_v2.SwinTransformerV2:21 mmcls.models.backbones.van.VAN:18 of +msgid "Defaults to 'tiny'." +msgstr "" + +#: mmcls.models.backbones.convnext.ConvNeXt:22 of +msgid "The size of one patch in the stem layer. Defaults to 4." +msgstr "" + +#: mmcls.models.backbones.convnext.ConvNeXt:25 mmcls.models.backbones.poolformer.PoolFormer:20 of +msgid "The config dict for norm layers. Defaults to ``dict(type='LN2d', eps=1e-6)``." +msgstr "" + +#: mmcls.models.backbones.convnext.ConvNeXt:28 mmcls.models.backbones.efficientformer.EfficientFormer:38 +#: mmcls.models.backbones.poolformer.PoolFormer:23 of +msgid "The config dict for activation between pointwise convolution. Defaults to ``dict(type='GELU')``." +msgstr "" + +#: mmcls.models.backbones.convnext.ConvNeXt:31 of +msgid "Whether to use linear layer to do pointwise convolution. Defaults to True." +msgstr "" + +#: mmcls.models.backbones.convnext.ConvNeXt:34 mmcls.models.backbones.efficientformer.EfficientFormer:43 +#: mmcls.models.backbones.hornet.HorNet:23 mmcls.models.backbones.poolformer.PoolFormer:46 of +msgid "Stochastic depth rate. Defaults to 0." +msgstr "" + +#: mmcls.models.backbones.convnext.ConvNeXt:36 of +msgid "Init value for Layer Scale. Defaults to 1e-6." +msgstr "" + +#: mmcls.models.backbones.convnext.ConvNeXt:45 mmcls.models.backbones.hornet.HorNet:39 of +msgid "" +"Whether to globally average the feature map before the final norm layer. In the official repo, it's only " +"used in classification task. Defaults to True." +msgstr "" + +#: mmcls.models.backbones.convnext.ConvNeXt:49 mmcls.models.backbones.davit.DaViT:60 +#: mmcls.models.backbones.efficientnet.EfficientNet:24 mmcls.models.backbones.hornet.HorNet:36 +#: mmcls.models.backbones.hrnet.HRNet:37 mmcls.models.backbones.repvgg.RepVGG:47 +#: mmcls.models.backbones.res2net.Res2Net:45 mmcls.models.backbones.swin_transformer.SwinTransformer:45 +#: mmcls.models.backbones.swin_transformer_v2.SwinTransformerV2:45 +#: mmcls.models.classifiers.hugging_face.HuggingFaceClassifier:34 +#: mmcls.models.classifiers.timm.TimmClassifier:25 mmcls.models.utils.inverted_residual.InvertedResidual:28 of +msgid "" +"Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. " +"Defaults to False." +msgstr "" + +#: mmcls.models.backbones.convnext.ConvNeXt:52 mmcls.models.backbones.poolformer.PoolFormer:56 +#: mmcls.models.backbones.tnt.TNT:41 of +msgid "Initialization config dict" +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.DaViT.rst:7 +msgid "DaViT" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.davit.DaViT:1 of +msgid "DaViT." +msgstr "" + +#: mmcls.models.backbones.davit.DaViT:3 of +msgid "" +"A PyTorch implement of : `DaViT: Dual Attention Vision Transformers `_" +msgstr "" + +#: mmcls.models.backbones.davit.DaViT:6 of +msgid "Inspiration from https://github.com/dingmyu/davit" +msgstr "" + +#: mmcls.models.backbones.davit.DaViT:9 of +msgid "" +"DaViT architecture. If use string, choose from 'tiny', 'small', 'base' and 'large', 'huge', 'giant'. If use " +"dict, it should have below keys: - **embed_dims** (int): The dimensions of embedding. - **depths** " +"(List[int]): The number of blocks in each stage. - **num_heads** (List[int]): The number of heads in " +"attention modules of each stage. Defaults to 't'." +msgstr "" + +#: mmcls.models.backbones.davit.DaViT:9 of +msgid "" +"DaViT architecture. If use string, choose from 'tiny', 'small', 'base' and 'large', 'huge', 'giant'. If use " +"dict, it should have below keys:" +msgstr "" + +#: mmcls.models.backbones.davit.DaViT:13 mmcls.models.backbones.deit.DistilledVisionTransformer:10 +#: mmcls.models.backbones.deit3.DeiT3:15 mmcls.models.backbones.mlp_mixer.MlpMixer:10 +#: mmcls.models.backbones.mvit.MViT:14 mmcls.models.backbones.swin_transformer.SwinTransformer:14 +#: mmcls.models.backbones.swin_transformer_v2.SwinTransformerV2:14 +#: mmcls.models.backbones.vision_transformer.VisionTransformer:10 of +msgid "**embed_dims** (int): The dimensions of embedding." +msgstr "" + +#: mmcls.models.backbones.davit.DaViT:14 mmcls.models.backbones.hornet.HorNet:14 +#: mmcls.models.backbones.swin_transformer.SwinTransformer:15 +#: mmcls.models.backbones.swin_transformer_v2.SwinTransformerV2:15 mmcls.models.backbones.van.VAN:14 of +msgid "**depths** (List[int]): The number of blocks in each stage." +msgstr "" + +#: mmcls.models.backbones.davit.DaViT:15 mmcls.models.backbones.swin_transformer.SwinTransformer:16 +#: mmcls.models.backbones.swin_transformer_v2.SwinTransformerV2:16 of +msgid "**num_heads** (List[int]): The number of heads in attention modules of each stage." +msgstr "" + +#: mmcls.models.backbones.davit.DaViT:18 of +msgid "Defaults to 't'." +msgstr "" + +#: mmcls.models.backbones.davit.DaViT:20 mmcls.models.backbones.repmlp.RepMLPNet:19 +#: mmcls.models.backbones.swin_transformer.SwinTransformer:25 +#: mmcls.models.backbones.swin_transformer_v2.SwinTransformerV2:27 of +msgid "The patch size in patch embedding. Defaults to 4." +msgstr "" + +#: mmcls.models.backbones.davit.DaViT:23 mmcls.models.backbones.deit.DistilledVisionTransformer:25 +#: mmcls.models.backbones.deit3.DeiT3:30 mmcls.models.backbones.efficientformer.EfficientFormer:20 +#: mmcls.models.backbones.mvit.MViT:25 mmcls.models.backbones.swin_transformer.SwinTransformer:28 +#: mmcls.models.backbones.swin_transformer_v2.SwinTransformerV2:30 mmcls.models.backbones.van.VAN:23 +#: mmcls.models.backbones.vision_transformer.VisionTransformer:25 of +msgid "The num of input channels. Defaults to 3." +msgstr "" + +#: mmcls.models.backbones.davit.DaViT:25 mmcls.models.backbones.swin_transformer.SwinTransformer:30 +#: mmcls.models.backbones.swin_transformer_v2.SwinTransformerV2:32 of +msgid "The height and width of the window. Defaults to 7." +msgstr "" + +#: mmcls.models.backbones.davit.DaViT:27 of +msgid "The expansion ratio of feedforward network hidden layer channels. Defaults to 4." +msgstr "" + +#: mmcls.models.backbones.davit.DaViT:30 mmcls.models.backbones.deit.DistilledVisionTransformer:35 +#: mmcls.models.backbones.deit3.DeiT3:40 mmcls.models.backbones.vision_transformer.VisionTransformer:35 of +msgid "Whether to add bias for qkv in attention modules. Defaults to True." +msgstr "" + +#: mmcls.models.backbones.davit.DaViT:33 mmcls.models.backbones.mvit.MViT:31 +#: mmcls.models.backbones.swin_transformer.SwinTransformer:34 +#: mmcls.models.backbones.swin_transformer_v2.SwinTransformerV2:37 mmcls.models.backbones.van.VAN:27 of +msgid "Stochastic depth rate. Defaults to 0.1." +msgstr "" + +#: mmcls.models.backbones.davit.DaViT:35 mmcls.models.backbones.swin_transformer.SwinTransformer:36 of +msgid "Whether to output the feature map of a stage after the following downsample layer. Defaults to False." +msgstr "" + +#: mmcls.models.backbones.davit.DaViT:38 mmcls.models.backbones.swin_transformer.SwinTransformer:55 +#: mmcls.models.backbones.swin_transformer_v2.SwinTransformerV2:55 +#: mmcls.models.utils.attention.ShiftWindowMSA:15 of +msgid "" +"If True, pad the small feature map to the window size, which is common used in detection and segmentation. " +"If False, avoid shifting window and shrink the window size to the size of feature map, which is common used " +"in classification. Defaults to False." +msgstr "" + +#: mmcls.models.backbones.davit.DaViT:44 mmcls.models.backbones.swin_transformer.SwinTransformer:61 +#: mmcls.models.backbones.swin_transformer_v2.SwinTransformerV2:61 mmcls.models.backbones.van.VAN:39 of +msgid "Config dict for normalization layer for all output features. Defaults to ``dict(type='LN')``" +msgstr "" + +#: mmcls.models.backbones.davit.DaViT:47 mmcls.models.backbones.swin_transformer.SwinTransformer:64 +#: mmcls.models.backbones.swin_transformer_v2.SwinTransformerV2:64 of +msgid "Extra config dict for each stage. Defaults to an empty dict." +msgstr "" + +#: mmcls.models.backbones.davit.DaViT:63 mmcls.models.backbones.hornet.HorNet:43 +#: mmcls.models.backbones.mvit.MViT:75 mmcls.models.backbones.swin_transformer.SwinTransformer:70 +#: mmcls.models.backbones.swin_transformer_v2.SwinTransformerV2:73 mmcls.models.backbones.t2t_vit.T2T_ViT:46 +#: mmcls.models.backbones.twins.PCPVT:41 mmcls.models.backbones.twins.SVT:42 mmcls.models.backbones.van.VAN:45 +#: mmcls.models.utils.attention.MultiheadAttention:35 of +msgid "The Config for initialization. Defaults to None." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.DeiT3.rst:7 +msgid "DeiT3" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.deit3.DeiT3:1 of +msgid "DeiT3 backbone." +msgstr "" + +#: mmcls.models.backbones.deit3.DeiT3:3 of +msgid "A PyTorch implement of : `DeiT III: Revenge of the ViT `_" +msgstr "" + +#: mmcls.models.backbones.deit3.DeiT3:6 of +msgid "The differences between DeiT3 & VisionTransformer:" +msgstr "" + +#: mmcls.models.backbones.deit3.DeiT3:8 of +msgid "Use LayerScale." +msgstr "" + +#: mmcls.models.backbones.deit3.DeiT3:9 of +msgid "Concat cls token after adding pos_embed." +msgstr "" + +#: mmcls.models.backbones.deit3.DeiT3:11 of +msgid "" +"DeiT3 architecture. If use string, choose from 'small', 'base', 'medium', 'large' and 'huge'. If use dict, " +"it should have below keys: - **embed_dims** (int): The dimensions of embedding. - **num_layers** (int): " +"The number of transformer encoder layers. - **num_heads** (int): The number of heads in attention modules. " +"- **feedforward_channels** (int): The hidden dimensions in feedforward modules. Defaults to 'base'." +msgstr "" + +#: mmcls.models.backbones.deit3.DeiT3:11 of +msgid "" +"DeiT3 architecture. If use string, choose from 'small', 'base', 'medium', 'large' and 'huge'. If use dict, " +"it should have below keys:" +msgstr "" + +#: mmcls.models.backbones.deit.DistilledVisionTransformer:11 mmcls.models.backbones.deit3.DeiT3:16 +#: mmcls.models.backbones.vision_transformer.VisionTransformer:11 of +msgid "**num_layers** (int): The number of transformer encoder layers." +msgstr "" + +#: mmcls.models.backbones.deit.DistilledVisionTransformer:12 mmcls.models.backbones.deit3.DeiT3:17 +#: mmcls.models.backbones.vision_transformer.VisionTransformer:12 of +msgid "**num_heads** (int): The number of heads in attention modules." +msgstr "" + +#: mmcls.models.backbones.deit.DistilledVisionTransformer:13 mmcls.models.backbones.deit3.DeiT3:18 +#: mmcls.models.backbones.vision_transformer.VisionTransformer:13 of +msgid "**feedforward_channels** (int): The hidden dimensions in feedforward modules." +msgstr "" + +#: mmcls.models.backbones.deit3.DeiT3:21 mmcls.models.backbones.mlp_mixer.MlpMixer:16 +#: mmcls.models.backbones.mvit.MViT:21 mmcls.models.backbones.vision_transformer.VisionTransformer:16 of +msgid "Defaults to 'base'." +msgstr "" + +#: mmcls.models.backbones.deit.DistilledVisionTransformer:18 mmcls.models.backbones.deit3.DeiT3:23 +#: mmcls.models.backbones.swin_transformer.SwinTransformer:21 +#: mmcls.models.backbones.swin_transformer_v2.SwinTransformerV2:23 mmcls.models.backbones.t2t_vit.T2T_ViT:6 +#: mmcls.models.backbones.vision_transformer.VisionTransformer:18 of +msgid "" +"The expected input image shape. Because we support dynamic input shape, just set the argument to the most " +"common input image shape. Defaults to 224." +msgstr "" + +#: mmcls.models.backbones.deit.DistilledVisionTransformer:22 mmcls.models.backbones.deit3.DeiT3:27 +#: mmcls.models.backbones.mlp_mixer.MlpMixer:20 mmcls.models.backbones.vision_transformer.VisionTransformer:22 +#: of +msgid "The patch size in patch embedding. Defaults to 16." +msgstr "" + +#: mmcls.models.backbones.deit.DistilledVisionTransformer:30 mmcls.models.backbones.deit3.DeiT3:35 +#: mmcls.models.backbones.mlp_mixer.MlpMixer:26 mmcls.models.backbones.twins.PCPVT:27 +#: mmcls.models.backbones.vision_transformer.VisionTransformer:30 of +msgid "Probability of an element to be zeroed. Defaults to 0." +msgstr "" + +#: mmcls.models.backbones.deit.DistilledVisionTransformer:38 mmcls.models.backbones.deit3.DeiT3:43 +#: mmcls.models.backbones.mlp_mixer.MlpMixer:31 mmcls.models.backbones.t2t_vit.T2T_ViT:25 +#: mmcls.models.backbones.twins.PCPVT:35 mmcls.models.backbones.twins.SVT:36 +#: mmcls.models.backbones.vision_transformer.VisionTransformer:38 mmcls.models.utils.embed.PatchMerging:35 of +msgid "Config dict for normalization layer. Defaults to ``dict(type='LN')``." +msgstr "" + +#: mmcls.models.backbones.deit.DistilledVisionTransformer:41 mmcls.models.backbones.deit3.DeiT3:46 +#: mmcls.models.backbones.repmlp.RepMLPNet:42 mmcls.models.backbones.t2t_vit.T2T_ViT:28 +#: mmcls.models.backbones.vision_transformer.VisionTransformer:41 of +msgid "Whether to add a additional layer to normalize final feature map. Defaults to True." +msgstr "" + +#: mmcls.models.backbones.deit.DistilledVisionTransformer:44 mmcls.models.backbones.deit3.DeiT3:49 +#: mmcls.models.backbones.t2t_vit.T2T_ViT:31 mmcls.models.backbones.vision_transformer.VisionTransformer:44 of +msgid "Whether concatenating class token into image tokens as transformer input. Defaults to True." +msgstr "" + +#: mmcls.models.backbones.deit.DistilledVisionTransformer:47 mmcls.models.backbones.deit3.DeiT3:52 +#: mmcls.models.backbones.t2t_vit.T2T_ViT:34 mmcls.models.backbones.vision_transformer.VisionTransformer:54 of +msgid "Whether output the cls_token. If set True, ``with_cls_token`` must be True. Defaults to True." +msgstr "" + +#: mmcls.models.backbones.deit3.DeiT3:55 of +msgid "Whether to use layer_scale in DeiT3. Defaults to True." +msgstr "" + +#: mmcls.models.backbones.deit.DistilledVisionTransformer:50 mmcls.models.backbones.deit3.DeiT3:58 +#: mmcls.models.backbones.t2t_vit.T2T_ViT:37 mmcls.models.backbones.vision_transformer.VisionTransformer:62 of +msgid "Select the interpolate mode for position embeding vector resize. Defaults to \"bicubic\"." +msgstr "" + +#: mmcls.models.backbones.deit.DistilledVisionTransformer:53 mmcls.models.backbones.deit3.DeiT3:61 +#: mmcls.models.backbones.mlp_mixer.MlpMixer:36 mmcls.models.backbones.vision_transformer.VisionTransformer:65 +#: of +msgid "Configs of patch embeding. Defaults to an empty dict." +msgstr "" + +#: mmcls.models.backbones.deit.DistilledVisionTransformer:55 mmcls.models.backbones.deit3.DeiT3:63 +#: mmcls.models.backbones.t2t_vit.T2T_ViT:43 mmcls.models.backbones.vision_transformer.VisionTransformer:67 of +msgid "Configs of each transformer layer in encoder. Defaults to an empty dict." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.DenseNet.rst:7 +msgid "DenseNet" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.densenet.DenseNet:1 of +msgid "DenseNet." +msgstr "" + +#: mmcls.models.backbones.densenet.DenseNet:3 of +msgid "" +"A PyTorch implementation of : `Densely Connected Convolutional Networks `_" +msgstr "" + +#: mmcls.models.backbones.densenet.DenseNet:6 of +msgid "" +"Modified from the `official repo `_ and `pytorch `_." +msgstr "" + +#: mmcls.models.backbones.densenet.DenseNet:11 of +msgid "" +"The model's architecture. If string, it should be one of architecture in ``DenseNet.arch_settings``. And if " +"dict, it should include the following two keys: - growth_rate (int): Each layer of DenseBlock produce `k` " +"feature maps. Here refers `k` as the growth rate of the network. - depths (list[int]): Number of repeated " +"layers in each DenseBlock. - init_channels (int): The output channels of stem layers. Defaults to '121'." +msgstr "" + +#: mmcls.models.backbones.densenet.DenseNet:11 of +msgid "" +"The model's architecture. If string, it should be one of architecture in ``DenseNet.arch_settings``. And if " +"dict, it should include the following two keys:" +msgstr "" + +#: mmcls.models.backbones.densenet.DenseNet:15 of +msgid "" +"growth_rate (int): Each layer of DenseBlock produce `k` feature maps. Here refers `k` as the growth rate of " +"the network." +msgstr "" + +#: mmcls.models.backbones.densenet.DenseNet:17 of +msgid "depths (list[int]): Number of repeated layers in each DenseBlock." +msgstr "" + +#: mmcls.models.backbones.densenet.DenseNet:18 of +msgid "init_channels (int): The output channels of stem layers." +msgstr "" + +#: mmcls.models.backbones.densenet.DenseNet:20 of +msgid "Defaults to '121'." +msgstr "" + +#: mmcls.models.backbones.densenet.DenseNet:24 of +msgid "Refers to channel expansion parameter of 1x1 convolution layer. Defaults to 4." +msgstr "" + +#: mmcls.models.backbones.densenet.DenseNet:27 of +msgid "Drop rate of Dropout Layer. Defaults to 0." +msgstr "" + +#: mmcls.models.backbones.densenet.DenseNet:29 of +msgid "The reduction rate of transition layers. Defaults to 0.5." +msgstr "" + +#: mmcls.models.backbones.densenet.DenseNet:32 of +msgid "" +"If True, uses checkpointing. Much more memory efficient, but slower. Defaults to False. See `\"paper\" " +"`_." +msgstr "" + +#: mmcls.models.backbones.densenet.DenseNet:39 of +msgid "The config dict for activation after each convolution. Defaults to ``dict(type='ReLU')``." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.DistilledVisionTransformer.rst:7 +msgid "DistilledVisionTransformer" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.deit.DistilledVisionTransformer:1 of +msgid "Distilled Vision Transformer." +msgstr "" + +#: mmcls.models.backbones.deit.DistilledVisionTransformer:3 of +msgid "" +"A PyTorch implement of : `Training data-efficient image transformers & distillation through attention " +"`_" +msgstr "" + +#: mmcls.models.backbones.deit.DistilledVisionTransformer:6 of +msgid "" +"Vision Transformer architecture. If use string, choose from 'small', 'base', 'large', 'deit-tiny', 'deit-" +"small' and 'deit-base'. If use dict, it should have below keys: - **embed_dims** (int): The dimensions of " +"embedding. - **num_layers** (int): The number of transformer encoder layers. - **num_heads** (int): The " +"number of heads in attention modules. - **feedforward_channels** (int): The hidden dimensions in " +"feedforward modules. Defaults to 'deit-base'." +msgstr "" + +#: mmcls.models.backbones.deit.DistilledVisionTransformer:6 +#: mmcls.models.backbones.vision_transformer.VisionTransformer:6 of +msgid "" +"Vision Transformer architecture. If use string, choose from 'small', 'base', 'large', 'deit-tiny', 'deit-" +"small' and 'deit-base'. If use dict, it should have below keys:" +msgstr "" + +#: mmcls.models.backbones.deit.DistilledVisionTransformer:16 of +msgid "Defaults to 'deit-base'." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.EdgeNeXt.rst:7 +msgid "EdgeNeXt" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.edgenext.EdgeNeXt:1 of +msgid "EdgeNeXt." +msgstr "" + +#: mmcls.models.backbones.edgenext.EdgeNeXt:3 of +msgid "" +"A PyTorch implementation of: `EdgeNeXt: Efficiently Amalgamated CNN-Transformer Architecture for Mobile " +"Vision Applications `_" +msgstr "" + +#: mmcls.models.backbones.edgenext.EdgeNeXt:7 of +msgid "Inspiration from https://github.com/mmaaz60/EdgeNeXt" +msgstr "" + +#: mmcls.models.backbones.edgenext.EdgeNeXt:10 of +msgid "" +"The model's architecture. If string, it should be one of architectures in ``EdgeNeXt.arch_settings``. And " +"if dict, it should include the following keys: - channels (list[int]): The number of channels at each " +"stage. - depths (list[int]): The number of blocks at each stage. - num_heads (list[int]): The number of " +"heads at each stage. Defaults to 'xxsmall'." +msgstr "" + +#: mmcls.models.backbones.edgenext.EdgeNeXt:10 of +msgid "" +"The model's architecture. If string, it should be one of architectures in ``EdgeNeXt.arch_settings``. And " +"if dict, it should include the following keys:" +msgstr "" + +#: mmcls.models.backbones.edgenext.EdgeNeXt:15 of +msgid "depths (list[int]): The number of blocks at each stage." +msgstr "" + +#: mmcls.models.backbones.edgenext.EdgeNeXt:16 of +msgid "num_heads (list[int]): The number of heads at each stage." +msgstr "" + +#: mmcls.models.backbones.edgenext.EdgeNeXt:18 of +msgid "Defaults to 'xxsmall'." +msgstr "" + +#: mmcls.models.backbones.edgenext.EdgeNeXt:20 of +msgid "The number of input channels. Defaults to 3." +msgstr "" + +#: mmcls.models.backbones.edgenext.EdgeNeXt:23 of +msgid "The number of global blocks. Defaults to [0, 1, 1, 1]." +msgstr "" + +#: mmcls.models.backbones.edgenext.EdgeNeXt:26 of +msgid "The type of global blocks. Defaults to ['None', 'SDTA', 'SDTA', 'SDTA']." +msgstr "" + +#: mmcls.models.backbones.edgenext.EdgeNeXt:29 of +msgid "Stochastic depth dropout rate. Defaults to 0." +msgstr "" + +#: mmcls.models.backbones.edgenext.EdgeNeXt:32 of +msgid "Initial value of layer scale. Defaults to 1e-6." +msgstr "" + +#: mmcls.models.backbones.edgenext.EdgeNeXt:35 of +msgid "Whether to use linear layer to do pointwise convolution. Defaults to False." +msgstr "" + +#: mmcls.models.backbones.edgenext.EdgeNeXt:38 of +msgid "The number of channel ratio in MLP layers. Defaults to 4." +msgstr "" + +#: mmcls.models.backbones.edgenext.EdgeNeXt:41 of +msgid "The kernel size of convolutional layers at each stage. Defaults to [3, 5, 7, 9]." +msgstr "" + +#: mmcls.models.backbones.edgenext.EdgeNeXt:44 of +msgid "" +"Whether to use positional embedding in Channel Self-Attention. Defaults to [False, True, False, False]." +msgstr "" + +#: mmcls.models.backbones.edgenext.EdgeNeXt:47 of +msgid "Whether to use positional embedding for whole network. Defaults to False." +msgstr "" + +#: mmcls.models.backbones.edgenext.EdgeNeXt:50 of +msgid "The number of channel groups used for SDTA at each stage. Defaults to [2, 2, 3, 4]." +msgstr "" + +#: mmcls.models.backbones.edgenext.EdgeNeXt:53 of +msgid "The config of normalization layer. Defaults to ``dict(type='LN2d', eps=1e-6)``." +msgstr "" + +#: mmcls.models.backbones.edgenext.EdgeNeXt:62 of +msgid "Whether to globally average the feature map before the final norm layer. Defaults to True." +msgstr "" + +#: mmcls.models.backbones.edgenext.EdgeNeXt:65 of +msgid "The config of activation layer. Defaults to ``dict(type='GELU')``." +msgstr "" + +#: mmcls.models.backbones.edgenext.EdgeNeXt:68 of +msgid "Config for initialization. Defaults to None." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.EfficientFormer.rst:7 +msgid "EfficientFormer" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.efficientformer.EfficientFormer:1 of +msgid "EfficientFormer." +msgstr "" + +#: mmcls.models.backbones.efficientformer.EfficientFormer:3 of +msgid "" +"A PyTorch implementation of EfficientFormer introduced by: `EfficientFormer: Vision Transformers at " +"MobileNet Speed `_" +msgstr "" + +#: mmcls.models.backbones.efficientformer.EfficientFormer:6 of +msgid "Modified from the `official repo `." +msgstr "" + +#: mmcls.models.backbones.efficientformer.EfficientFormer:9 of +msgid "" +"The model's architecture. If string, it should be one of architecture in ``EfficientFormer.arch_settings``. " +"And if dict, it should include the following 4 keys: - layers (list[int]): Number of blocks at each stage. " +"- embed_dims (list[int]): The number of channels at each stage. - downsamples (list[int]): Has downsample " +"or not in the four stages. - vit_num (int): The num of vit blocks in the last stage. Defaults to 'l1'." +msgstr "" + +#: mmcls.models.backbones.efficientformer.EfficientFormer:9 of +msgid "" +"The model's architecture. If string, it should be one of architecture in ``EfficientFormer.arch_settings``. " +"And if dict, it should include the following 4 keys:" +msgstr "" + +#: mmcls.models.backbones.efficientformer.EfficientFormer:13 mmcls.models.backbones.poolformer.PoolFormer:13 +#: of +msgid "layers (list[int]): Number of blocks at each stage." +msgstr "" + +#: mmcls.models.backbones.efficientformer.EfficientFormer:14 mmcls.models.backbones.poolformer.PoolFormer:14 +#: of +msgid "embed_dims (list[int]): The number of channels at each stage." +msgstr "" + +#: mmcls.models.backbones.efficientformer.EfficientFormer:15 of +msgid "downsamples (list[int]): Has downsample or not in the four stages." +msgstr "" + +#: mmcls.models.backbones.efficientformer.EfficientFormer:16 of +msgid "vit_num (int): The num of vit blocks in the last stage." +msgstr "" + +#: mmcls.models.backbones.efficientformer.EfficientFormer:18 of +msgid "Defaults to 'l1'." +msgstr "" + +#: mmcls.models.backbones.efficientformer.EfficientFormer:22 of +msgid "The pooling size of ``Meta4D`` blocks. Defaults to 3." +msgstr "" + +#: mmcls.models.backbones.efficientformer.EfficientFormer:24 of +msgid "The dimension ratio of multi-head attention mechanism in ``Meta4D`` blocks. Defaults to 3." +msgstr "" + +#: mmcls.models.backbones.efficientformer.EfficientFormer:27 of +msgid "" +"Whether to reshape the feature map from (B, N, C) to (B, C, H, W) in the last stage, when the ``vit-num`` " +"in ``arch`` is not 0. Defaults to False. Usually set to True in downstream tasks." +msgstr "" + +#: mmcls.models.backbones.efficientformer.EfficientFormer:32 of +msgid "Output from which stages. Defaults to -1." +msgstr "" + +#: mmcls.models.backbones.efficientformer.EfficientFormer:41 mmcls.models.backbones.poolformer.PoolFormer:44 +#: mmcls.models.backbones.twins.SVT:29 of +msgid "Dropout rate. Defaults to 0." +msgstr "" + +#: mmcls.models.backbones.efficientformer.EfficientFormer:45 of +msgid "Whether to use use_layer_scale in MetaFormer block. Defaults to True." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.EfficientNet.rst:7 +msgid "EfficientNet" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.efficientnet.EfficientNet:1 of +msgid "EfficientNet backbone." +msgstr "" + +#: mmcls.models.backbones.efficientnet.EfficientNet:3 of +msgid "Architecture of efficientnet. Defaults to b0." +msgstr "" + +#: mmcls.models.backbones.efficientnet.EfficientNet:5 of +msgid "Output from which stages. Defaults to (6, )." +msgstr "" + +#: mmcls.models.backbones.efficientnet.EfficientNet:11 mmcls.models.backbones.mobilevit.MobileViT:38 +#: mmcls.models.utils.inverted_residual.InvertedResidual:17 of +msgid "Config dict for convolution layer. Defaults to None, which means using conv2d." +msgstr "" + +#: mmcls.models.backbones.efficientnet.EfficientNet:14 mmcls.models.backbones.mobilevit.MobileViT:41 of +msgid "Config dict for normalization layer. Defaults to dict(type='BN')." +msgstr "" + +#: mmcls.models.backbones.efficientnet.EfficientNet:17 mmcls.models.backbones.mobilevit.MobileViT:44 of +msgid "Config dict for activation layer. Defaults to dict(type='Swish')." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.HRNet.rst:7 +msgid "HRNet" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.hrnet.HRNet:1 of +msgid "HRNet backbone." +msgstr "" + +#: mmcls.models.backbones.hrnet.HRNet:3 of +msgid "`High-Resolution Representations for Labeling Pixels and Regions `_." +msgstr "" + +#: mmcls.models.backbones.hrnet.HRNet:6 of +msgid "" +"The preset HRNet architecture, includes 'w18', 'w30', 'w32', 'w40', 'w44', 'w48', 'w64'. It will only be " +"used if extra is ``None``. Defaults to 'w32'." +msgstr "" + +#: mmcls.models.backbones.hrnet.HRNet:10 of +msgid "" +"Detailed configuration for each stage of HRNet. There must be 4 stages, the configuration for each stage " +"must have 5 keys: - num_modules (int): The number of HRModule in this stage. - num_branches (int): The " +"number of branches in the HRModule. - block (str): The type of convolution block. Please choose between " +"'BOTTLENECK' and 'BASIC'. - num_blocks (tuple): The number of blocks in each branch. The length must be " +"equal to num_branches. - num_channels (tuple): The number of base channels in each branch. The length " +"must be equal to num_branches. Defaults to None." +msgstr "" + +#: mmcls.models.backbones.hrnet.HRNet:10 of +msgid "" +"Detailed configuration for each stage of HRNet. There must be 4 stages, the configuration for each stage " +"must have 5 keys:" +msgstr "" + +#: mmcls.models.backbones.hrnet.HRNet:14 of +msgid "num_modules (int): The number of HRModule in this stage." +msgstr "" + +#: mmcls.models.backbones.hrnet.HRNet:15 of +msgid "num_branches (int): The number of branches in the HRModule." +msgstr "" + +#: mmcls.models.backbones.hrnet.HRNet:16 of +msgid "block (str): The type of convolution block. Please choose between 'BOTTLENECK' and 'BASIC'." +msgstr "" + +#: mmcls.models.backbones.hrnet.HRNet:18 of +msgid "num_blocks (tuple): The number of blocks in each branch. The length must be equal to num_branches." +msgstr "" + +#: mmcls.models.backbones.hrnet.HRNet:20 of +msgid "" +"num_channels (tuple): The number of base channels in each branch. The length must be equal to num_branches." +msgstr "" + +#: mmcls.models.backbones.hrnet.HRNet:27 of +msgid "Dictionary to construct and config conv layer. Defaults to None." +msgstr "" + +#: mmcls.models.backbones.hrnet.HRNet:30 of +msgid "Dictionary to construct and config norm layer. Defaults to ``dict(type='BN')``." +msgstr "" + +#: mmcls.models.backbones.hrnet.HRNet:40 of +msgid "" +"Whether to use zero init for last norm layer in resblocks to let them behave as identity. Defaults to False." +msgstr "" + +#: mmcls.models.backbones.hrnet.HRNet:43 of +msgid "" +"Whether to output multi-level features produced by multiple branches. If False, only the first level " +"feature will be output. Defaults to True." +msgstr "" + +#: mmcls.models.backbones.hrnet.HRNet.forward:1 mmcls.models.backbones.inception_v3.InceptionV3.forward:1 +#: mmcls.models.losses.seesaw_loss.SeesawLoss.forward:1 +#: mmcls.models.utils.inverted_residual.InvertedResidual.forward:1 of +msgid "Forward function." +msgstr "" + +#: mmcls.models.backbones.HRNet.norm1:1 of +msgid "the normalization layer named \"norm1\"" +msgstr "" + +#: mmcls.models.backbones.HRNet.norm1 mmcls.models.backbones.HRNet.norm2 +#: mmcls.models.classifiers.base.BaseClassifier of +msgid "type" +msgstr "" + +#: mmcls.models.backbones.HRNet.norm1:3 mmcls.models.backbones.HRNet.norm2:3 of +msgid "nn.Module" +msgstr "" + +#: mmcls.models.backbones.HRNet.norm2:1 of +msgid "the normalization layer named \"norm2\"" +msgstr "" + +#: mmcls.models.backbones.hrnet.HRNet.train:1 of +msgid "Convert the model into training mode will keeping the normalization layer freezed." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.HorNet.rst:7 +msgid "HorNet" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.hornet.HorNet:1 of +msgid "HorNet backbone." +msgstr "" + +#: mmcls.models.backbones.hornet.HorNet:3 of +msgid "" +"A PyTorch implementation of paper `HorNet: Efficient High-Order Spatial Interactions with Recursive Gated " +"Convolutions `_ . Inspiration from https://github.com/raoyongming/HorNet" +msgstr "" + +#: mmcls.models.backbones.hornet.HorNet:8 of +msgid "" +"HorNet architecture. If use string, choose from 'tiny', 'small', 'base' and 'large'. If use dict, it " +"should have below keys: - **base_dim** (int): The base dimensions of embedding. - **depths** (List[int]): " +"The number of blocks in each stage. - **orders** (List[int]): The number of order of gnConv in each " +"stage. - **dw_cfg** (List[dict]): The Config for dw conv. Defaults to 'tiny'." +msgstr "" + +#: mmcls.models.backbones.hornet.HorNet:8 of +msgid "HorNet architecture." +msgstr "" + +#: mmcls.models.backbones.hornet.HorNet:10 of +msgid "" +"If use string, choose from 'tiny', 'small', 'base' and 'large'. If use dict, it should have below keys:" +msgstr "" + +#: mmcls.models.backbones.hornet.HorNet:13 of +msgid "**base_dim** (int): The base dimensions of embedding." +msgstr "" + +#: mmcls.models.backbones.hornet.HorNet:15 of +msgid "**orders** (List[int]): The number of order of gnConv in each" +msgstr "" + +#: mmcls.models.backbones.hornet.HorNet:16 of +msgid "stage." +msgstr "" + +#: mmcls.models.backbones.hornet.HorNet:17 of +msgid "**dw_cfg** (List[dict]): The Config for dw conv." +msgstr "" + +#: mmcls.models.backbones.hornet.HorNet:25 of +msgid "Scaling parameter of gflayer outputs. Defaults to 1/3." +msgstr "" + +#: mmcls.models.backbones.hornet.HorNet:27 of +msgid "Whether to use use_layer_scale in HorNet block. Defaults to True." +msgstr "" + +#: mmcls.models.backbones.hornet.HorNet:30 mmcls.models.backbones.repmlp.RepMLPNet:22 +#: mmcls.models.backbones.resnet.ResNet:22 mmcls.models.backbones.van.VAN:29 of +msgid "Output from which stages. Default: ``(3, )``." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.InceptionV3.rst:7 +msgid "InceptionV3" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.inception_v3.InceptionV3:1 of +msgid "Inception V3 backbone." +msgstr "" + +#: mmcls.models.backbones.inception_v3.InceptionV3:3 of +msgid "" +"A PyTorch implementation of `Rethinking the Inception Architecture for Computer Vision `_" +msgstr "" + +#: mmcls.models.backbones.inception_v3.InceptionV3:6 of +msgid "" +"This implementation is modified from https://github.com/pytorch/vision/blob/main/torchvision/models/" +"inception.py. Licensed under the BSD 3-Clause License." +msgstr "" + +#: mmcls.models.backbones.inception_v3.InceptionV3:10 of +msgid "The number of categroies. Defaults to 1000." +msgstr "" + +#: mmcls.models.backbones.inception_v3.InceptionV3:12 of +msgid "" +"Whether to enable the auxiliary branch. If False, the auxiliary logits output will be None. Defaults to " +"False." +msgstr "" + +#: mmcls.models.backbones.inception_v3.InceptionV3:15 of +msgid "Dropout rate. Defaults to 0.5." +msgstr "" + +#: mmcls.models.backbones.inception_v3.InceptionV3:17 of +msgid "" +"The config of initialization. Defaults to use trunc normal with ``std=0.1`` for all Conv2d and Linear " +"layers and constant with ``val=1`` for all BatchNorm2d layers." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.LeNet5.rst:7 +msgid "LeNet5" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.lenet.LeNet5:1 of +msgid "`LeNet5 `_ backbone." +msgstr "" + +#: mmcls.models.backbones.lenet.LeNet5:3 of +msgid "The input for LeNet-5 is a 32×32 grayscale image." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.MViT.rst:7 +msgid "MViT" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.mvit.MViT:1 of +msgid "Multi-scale ViT v2." +msgstr "" + +#: mmcls.models.backbones.mvit.MViT:3 of +msgid "" +"A PyTorch implement of : `MViTv2: Improved Multiscale Vision Transformers for Classification and Detection " +"`_" +msgstr "" + +#: mmcls.models.backbones.mvit.MViT:6 of +msgid "" +"Inspiration from `the official implementation `_ and `the " +"detectron2 implementation `_" +msgstr "" + +#: mmcls.models.backbones.mvit.MViT:10 of +msgid "" +"MViT architecture. If use string, choose from 'tiny', 'small', 'base' and 'large'. If use dict, it should " +"have below keys: - **embed_dims** (int): The dimensions of embedding. - **num_layers** (int): The number " +"of layers. - **num_heads** (int): The number of heads in attention modules of the initial layer. - " +"**downscale_indices** (List[int]): The layer indices to downscale the feature map. Defaults to 'base'." +msgstr "" + +#: mmcls.models.backbones.mvit.MViT:10 of +msgid "" +"MViT architecture. If use string, choose from 'tiny', 'small', 'base' and 'large'. If use dict, it should " +"have below keys:" +msgstr "" + +#: mmcls.models.backbones.mvit.MViT:15 of +msgid "**num_layers** (int): The number of layers." +msgstr "" + +#: mmcls.models.backbones.mvit.MViT:16 of +msgid "**num_heads** (int): The number of heads in attention modules of the initial layer." +msgstr "" + +#: mmcls.models.backbones.mvit.MViT:18 of +msgid "**downscale_indices** (List[int]): The layer indices to downscale the feature map." +msgstr "" + +#: mmcls.models.backbones.mvit.MViT:23 of +msgid "The expected input image shape. Defaults to 224." +msgstr "" + +#: mmcls.models.backbones.mvit.MViT:27 of +msgid "" +"The output scale indices. They should not exceed the length of ``downscale_indices``. Defaults to -1, which " +"means the last scale." +msgstr "" + +#: mmcls.models.backbones.mvit.MViT:33 mmcls.models.backbones.swin_transformer.SwinTransformer:39 +#: mmcls.models.backbones.swin_transformer_v2.SwinTransformerV2:39 of +msgid "If True, add absolute position embedding to the patch embedding. Defaults to False." +msgstr "" + +#: mmcls.models.backbones.mvit.MViT:36 of +msgid "Select the interpolate mode for absolute position embedding vector resize. Defaults to \"bicubic\"." +msgstr "" + +#: mmcls.models.backbones.mvit.MViT:39 of +msgid "kernel size for qkv pooling layers. Defaults to (3, 3)." +msgstr "" + +#: mmcls.models.backbones.mvit.MViT:42 of +msgid "The magnification for ``embed_dims`` in the downscale layers. Defaults to 2." +msgstr "" + +#: mmcls.models.backbones.mvit.MViT:45 of +msgid "The magnification for ``num_heads`` in the downscale layers. Defaults to 2." +msgstr "" + +#: mmcls.models.backbones.mvit.MViT:48 of +msgid "The stride size for kv pooling in the initial layer. Defaults to 4." +msgstr "" + +#: mmcls.models.backbones.mvit.MViT:51 of +msgid "Whether to enable the spatial relative position embedding. Defaults to True." +msgstr "" + +#: mmcls.models.backbones.mvit.MViT:54 of +msgid "Whether to enable the residual connection after attention pooling. Defaults to True." +msgstr "" + +#: mmcls.models.backbones.mvit.MViT:57 of +msgid "" +"Whether to multiply the ``embed_dims`` in attention layers. If False, multiply it in MLP layers. Defaults " +"to True." +msgstr "" + +#: mmcls.models.backbones.mvit.MViT:61 of +msgid "If True, zero initialize relative positional parameters. Defaults to False." +msgstr "" + +#: mmcls.models.backbones.mvit.MViT:64 of +msgid "Ratio of hidden dimensions in MLP layers. Defaults to 4.0." +msgstr "" + +#: mmcls.models.backbones.mvit.MViT:67 of +msgid "enable bias for qkv if True. Defaults to True." +msgstr "" + +#: mmcls.models.backbones.mvit.MViT:69 of +msgid "" +"Config dict for normalization layer for all output features. Defaults to ``dict(type='LN', eps=1e-6)``." +msgstr "" + +#: mmcls.models.backbones.mvit.MViT:72 of +msgid "Config dict for the patch embedding layer. Defaults to ``dict(kernel_size=7, stride=4, padding=3)``." +msgstr "" + +#: mmcls.models.backbones.mvit.MViT.forward:1 of +msgid "Forward the MViT." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.MlpMixer.rst:7 +msgid "MlpMixer" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.mlp_mixer.MlpMixer:1 of +msgid "Mlp-Mixer backbone." +msgstr "" + +#: mmcls.models.backbones.mlp_mixer.MlpMixer:3 of +msgid "" +"Pytorch implementation of `MLP-Mixer: An all-MLP Architecture for Vision `_" +msgstr "" + +#: mmcls.models.backbones.mlp_mixer.MlpMixer:6 of +msgid "" +"MLP Mixer architecture. If use string, choose from 'small', 'base' and 'large'. If use dict, it should have " +"below keys: - **embed_dims** (int): The dimensions of embedding. - **num_layers** (int): The number of MLP " +"blocks. - **tokens_mlp_dims** (int): The hidden dimensions for tokens FFNs. - **channels_mlp_dims** (int): " +"The The hidden dimensions for channels FFNs. Defaults to 'base'." +msgstr "" + +#: mmcls.models.backbones.mlp_mixer.MlpMixer:6 of +msgid "" +"MLP Mixer architecture. If use string, choose from 'small', 'base' and 'large'. If use dict, it should have " +"below keys:" +msgstr "" + +#: mmcls.models.backbones.mlp_mixer.MlpMixer:11 of +msgid "**num_layers** (int): The number of MLP blocks." +msgstr "" + +#: mmcls.models.backbones.mlp_mixer.MlpMixer:12 of +msgid "**tokens_mlp_dims** (int): The hidden dimensions for tokens FFNs." +msgstr "" + +#: mmcls.models.backbones.mlp_mixer.MlpMixer:13 of +msgid "**channels_mlp_dims** (int): The The hidden dimensions for channels FFNs." +msgstr "" + +#: mmcls.models.backbones.mlp_mixer.MlpMixer:18 of +msgid "The input image shape. Defaults to 224." +msgstr "" + +#: mmcls.models.backbones.mlp_mixer.MlpMixer:23 of +msgid "Output from which layer. Defaults to -1, means the last layer." +msgstr "" + +#: mmcls.models.backbones.mlp_mixer.MlpMixer:34 of +msgid "The activation config for FFNs. Default GELU." +msgstr "" + +#: mmcls.models.backbones.mlp_mixer.MlpMixer:38 of +msgid "Configs of each mixer block layer. Defaults to an empty dict." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.MobileNetV2.rst:7 +msgid "MobileNetV2" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.mobilenet_v2.MobileNetV2:1 of +msgid "MobileNetV2 backbone." +msgstr "" + +#: mmcls.models.backbones.mobilenet_v2.MobileNetV2:3 of +msgid "Width multiplier, multiply number of channels in each layer by this amount. Default: 1.0." +msgstr "" + +#: mmcls.models.backbones.mobilenet_v2.MobileNetV2:6 of +msgid "Output from which stages. Default: (7, )." +msgstr "" + +#: mmcls.models.backbones.mobilenet_v2.MobileNetV2:9 mmcls.models.backbones.mobilenet_v3.MobileNetV3:15 +#: mmcls.models.backbones.shufflenet_v1.ShuffleNetV1:12 mmcls.models.backbones.shufflenet_v2.ShuffleNetV2:9 of +msgid "Stages to be frozen (all param fixed). Default: -1, which means not freezing any parameters." +msgstr "" + +#: mmcls.models.backbones.mobilenet_v2.MobileNetV2:12 mmcls.models.backbones.mobilenet_v3.MobileNetV3:6 +#: mmcls.models.backbones.shufflenet_v1.ShuffleNetV1:15 mmcls.models.backbones.shufflenet_v2.ShuffleNetV2:12 +#: mmcls.models.utils.se_layer.SELayer:16 of +msgid "Config dict for convolution layer. Default: None, which means using conv2d." +msgstr "" + +#: mmcls.models.backbones.mobilenet_v2.MobileNetV2:15 mmcls.models.backbones.mobilenet_v3.MobileNetV3:9 +#: mmcls.models.backbones.shufflenet_v1.ShuffleNetV1:18 mmcls.models.backbones.shufflenet_v2.ShuffleNetV2:15 +#: of +msgid "Config dict for normalization layer. Default: dict(type='BN')." +msgstr "" + +#: mmcls.models.backbones.mobilenet_v2.MobileNetV2:18 of +msgid "Config dict for activation layer. Default: dict(type='ReLU6')." +msgstr "" + +#: mmcls.models.backbones.mobilenet_v2.MobileNetV2:21 mmcls.models.backbones.mobilenet_v3.MobileNetV3:18 +#: mmcls.models.backbones.regnet.RegNet:33 mmcls.models.backbones.resnest.ResNeSt:55 +#: mmcls.models.backbones.resnet.ResNet:42 mmcls.models.backbones.resnet_cifar.ResNet_CIFAR:46 +#: mmcls.models.backbones.resnext.ResNeXt:47 mmcls.models.backbones.seresnet.SEResNet:44 +#: mmcls.models.backbones.seresnext.SEResNeXt:49 mmcls.models.backbones.shufflenet_v1.ShuffleNetV1:24 +#: mmcls.models.backbones.shufflenet_v2.ShuffleNetV2:21 mmcls.models.backbones.vgg.VGG:23 of +msgid "" +"Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch " +"Norm and its variants only. Default: False." +msgstr "" + +#: mmcls.models.backbones.mobilenet_v2.MobileNetV2:25 mmcls.models.backbones.mobilenet_v3.MobileNetV3:22 +#: mmcls.models.backbones.regnet.RegNet:37 mmcls.models.backbones.resnest.ResNeSt:59 +#: mmcls.models.backbones.resnet.ResNet:46 mmcls.models.backbones.resnet_cifar.ResNet_CIFAR:50 +#: mmcls.models.backbones.resnext.ResNeXt:51 mmcls.models.backbones.seresnet.SEResNet:48 +#: mmcls.models.backbones.seresnext.SEResNeXt:53 mmcls.models.backbones.shufflenet_v1.ShuffleNetV1:28 +#: mmcls.models.backbones.shufflenet_v2.ShuffleNetV2:25 of +msgid "" +"Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. " +"Default: False." +msgstr "" + +#: mmcls.models.backbones.mobilenet_v2.MobileNetV2.make_layer:1 of +msgid "Stack InvertedResidual blocks to build a layer for MobileNetV2." +msgstr "" + +#: mmcls.models.backbones.mobilenet_v2.MobileNetV2.make_layer:3 of +msgid "out_channels of block." +msgstr "" + +#: mmcls.models.backbones.mobilenet_v2.MobileNetV2.make_layer:5 of +msgid "number of blocks." +msgstr "" + +#: mmcls.models.backbones.mobilenet_v2.MobileNetV2.make_layer:7 of +msgid "stride of the first block. Default: 1" +msgstr "" + +#: mmcls.models.backbones.mobilenet_v2.MobileNetV2.make_layer:9 of +msgid "Expand the number of channels of the hidden layer in InvertedResidual by this ratio. Default: 6." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.MobileNetV3.rst:7 +msgid "MobileNetV3" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.mobilenet_v3.MobileNetV3:1 of +msgid "MobileNetV3 backbone." +msgstr "" + +#: mmcls.models.backbones.mobilenet_v3.MobileNetV3:3 of +msgid "Architecture of mobilnetv3, from {small, large}. Default: small." +msgstr "" + +#: mmcls.models.backbones.mobilenet_v3.MobileNetV3:12 of +msgid "Output from which stages. Default: None, which means output tensors from final stage." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.MobileOne.rst:7 +msgid "MobileOne" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.mobileone.MobileOne:1 of +msgid "MobileOne backbone." +msgstr "" + +#: mmcls.models.backbones.mobileone.MobileOne:3 of +msgid "" +"A PyTorch impl of : `An Improved One millisecond Mobile Backbone `_" +msgstr "" + +#: mmcls.models.backbones.mobileone.MobileOne:6 of +msgid "" +"MobileOne architecture. If use string, choose from 's0', 's1', 's2', 's3' and 's4'. If use dict, it should " +"have below keys: - num_blocks (Sequence[int]): Number of blocks in each stage. - width_factor " +"(Sequence[float]): Width factor in each stage. - num_conv_branches (Sequence[int]): Number of conv " +"branches in each stage. - num_se_blocks (Sequence[int]): Number of SE layers in each stage, all the SE " +"layers are placed in the subsequent order in each stage. Defaults to 's0'." +msgstr "" + +#: mmcls.models.backbones.mobileone.MobileOne:6 of +msgid "" +"MobileOne architecture. If use string, choose from 's0', 's1', 's2', 's3' and 's4'. If use dict, it should " +"have below keys:" +msgstr "" + +#: mmcls.models.backbones.mobileone.MobileOne:10 of +msgid "num_blocks (Sequence[int]): Number of blocks in each stage." +msgstr "" + +#: mmcls.models.backbones.mobileone.MobileOne:11 of +msgid "width_factor (Sequence[float]): Width factor in each stage." +msgstr "" + +#: mmcls.models.backbones.mobileone.MobileOne:12 of +msgid "num_conv_branches (Sequence[int]): Number of conv branches in each stage." +msgstr "" + +#: mmcls.models.backbones.mobileone.MobileOne:14 of +msgid "" +"num_se_blocks (Sequence[int]): Number of SE layers in each stage, all the SE layers are placed in the " +"subsequent order in each stage." +msgstr "" + +#: mmcls.models.backbones.mobileone.MobileOne:18 of +msgid "Defaults to 's0'." +msgstr "" + +#: mmcls.models.backbones.mobileone.MobileOne:22 mmcls.models.backbones.repvgg.RepVGG:26 +#: mmcls.models.backbones.res2net.Res2Net:22 mmcls.models.backbones.twins.PCPVT:22 of +msgid "Output from which stages. Defaults to ``(3, )``." +msgstr "" + +#: mmcls.models.backbones.mobileone.MobileOne:25 mmcls.models.backbones.repvgg.RepVGG:35 of +msgid "Stages to be frozen (all param fixed). -1 means not freezing any parameters. Defaults to -1." +msgstr "" + +#: mmcls.models.backbones.mobileone.MobileOne:28 mmcls.models.backbones.repvgg.RepVGG:38 of +msgid "The config dict for conv layers. Defaults to None." +msgstr "" + +#: mmcls.models.backbones.mobileone.MobileOne:34 mmcls.models.backbones.repvgg.RepVGG:44 +#: mmcls.models.utils.inverted_residual.InvertedResidual:23 of +msgid "Config dict for activation layer. Defaults to ``dict(type='ReLU')``." +msgstr "" + +#: mmcls.models.backbones.mobileone.MobileOne:37 mmcls.models.backbones.repvgg.RepVGG:50 of +msgid "Whether to switch the model structure to deployment mode. Defaults to False." +msgstr "" + +#: mmcls.models.backbones.mobileone.MobileOne.switch_to_deploy:1 of +msgid "switch the model to deploy mode, which has smaller amount of parameters and calculations." +msgstr "" + +#: mmcls.models.backbones.mobileone.MobileOne.train:1 of +msgid "switch the mobile to train mode or not." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.MobileViT.rst:7 +msgid "MobileViT" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.mobilevit.MobileViT:1 of +msgid "MobileViT backbone." +msgstr "" + +#: mmcls.models.backbones.mobilevit.MobileViT:3 of +msgid "" +"A PyTorch implementation of : `MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision " +"Transformer `_" +msgstr "" + +#: mmcls.models.backbones.mobilevit.MobileViT:6 of +msgid "" +"Modified from the `official repo `_ and `timm `_." +msgstr "" + +#: mmcls.models.backbones.mobilevit.MobileViT:11 of +msgid "" +"Architecture of MobileViT. - If a string, choose from \"small\", \"x_small\" and \"xx_small\". - If a " +"list, every item should be also a list, and the first item of the sub-list can be chosen from " +"\"moblienetv2\" and \"mobilevit\", which indicates the type of this layer sequence. If \"mobilenetv2\", " +"the other items are the arguments of :attr:`~MobileViT.make_mobilenetv2_layer` (except ``in_channels``) " +"and if \"mobilevit\", the other items are the arguments of :attr:`~MobileViT.make_mobilevit_layer` " +"(except ``in_channels``). Defaults to \"small\"." +msgstr "" + +#: mmcls.models.backbones.mobilevit.MobileViT:11 of +msgid "Architecture of MobileViT." +msgstr "" + +#: mmcls.models.backbones.mobilevit.MobileViT:13 of +msgid "If a string, choose from \"small\", \"x_small\" and \"xx_small\"." +msgstr "" + +#: mmcls.models.backbones.mobilevit.MobileViT:15 of +msgid "" +"If a list, every item should be also a list, and the first item of the sub-list can be chosen from " +"\"moblienetv2\" and \"mobilevit\", which indicates the type of this layer sequence. If \"mobilenetv2\", the " +"other items are the arguments of :attr:`~MobileViT.make_mobilenetv2_layer` (except ``in_channels``) and if " +"\"mobilevit\", the other items are the arguments of :attr:`~MobileViT.make_mobilevit_layer` (except " +"``in_channels``)." +msgstr "" + +#: mmcls.models.backbones.mobilevit.MobileViT:23 of +msgid "Defaults to \"small\"." +msgstr "" + +#: mmcls.models.backbones.mobilevit.MobileViT:27 of +msgid "Channels of stem layer. Defaults to 16." +msgstr "" + +#: mmcls.models.backbones.mobilevit.MobileViT:29 of +msgid "Channels expand factor of last layer. Defaults to 4." +msgstr "" + +#: mmcls.models.backbones.mobilevit.MobileViT:32 of +msgid "Output from which stages. Defaults to (4, )." +msgstr "" + +#: mmcls.models.backbones.mobilevit.MobileViT:35 of +msgid "Stages to be frozen (all param fixed). Defaults to -1, which means not freezing any parameters." +msgstr "" + +#: mmcls.models.backbones.mobilevit.MobileViT.make_mobilenetv2_layer:1 of +msgid "Build mobilenetv2 layer, which consists of several InvertedResidual layers." +msgstr "" + +#: mmcls.models.backbones.mobilevit.MobileViT.make_mobilenetv2_layer:4 +#: mmcls.models.backbones.mobilevit.MobileViT.make_mobilevit_layer:4 of +msgid "The input channels." +msgstr "" + +#: mmcls.models.backbones.mobilevit.MobileViT.make_mobilenetv2_layer:6 +#: mmcls.models.backbones.mobilevit.MobileViT.make_mobilevit_layer:6 of +msgid "The output channels." +msgstr "" + +#: mmcls.models.backbones.mobilevit.MobileViT.make_mobilenetv2_layer:8 +#: mmcls.models.backbones.mobilevit.MobileViT.make_mobilevit_layer:8 of +msgid "The stride of the first 3x3 convolution in the ``InvertedResidual`` layers." +msgstr "" + +#: mmcls.models.backbones.mobilevit.MobileViT.make_mobilenetv2_layer:11 of +msgid "The number of ``InvertedResidual`` blocks." +msgstr "" + +#: mmcls.models.backbones.mobilevit.MobileViT.make_mobilenetv2_layer:13 +#: mmcls.models.backbones.mobilevit.MobileViT.make_mobilevit_layer:18 of +msgid "adjusts number of channels of the hidden layer in ``InvertedResidual`` by this amount. Defaults to 4." +msgstr "" + +#: mmcls.models.backbones.mobilevit.MobileViT.make_mobilevit_layer:1 of +msgid "Build mobilevit layer, which consists of one InvertedResidual and one MobileVitBlock." +msgstr "" + +#: mmcls.models.backbones.mobilevit.MobileViT.make_mobilevit_layer:11 of +msgid "The channels of the transformer layers." +msgstr "" + +#: mmcls.models.backbones.mobilevit.MobileViT.make_mobilevit_layer:13 of +msgid "The mid-channels of the feedforward network in transformer layers." +msgstr "" + +#: mmcls.models.backbones.mobilevit.MobileViT.make_mobilevit_layer:16 of +msgid "The number of transformer blocks." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.PCPVT.rst:7 +msgid "PCPVT" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.twins.PCPVT:1 of +msgid "The backbone of Twins-PCPVT." +msgstr "" + +#: mmcls.models.backbones.twins.PCPVT:3 mmcls.models.backbones.twins.SVT:3 of +msgid "" +"This backbone is the implementation of `Twins: Revisiting the Design of Spatial Attention in Vision " +"Transformers `_." +msgstr "" + +#: mmcls.models.backbones.twins.PCPVT:7 of +msgid "" +"PCPVT architecture, a str value in arch zoo or a detailed configuration dict with 7 keys, and the length of " +"all the values in dict should be the same: - depths (List[int]): The number of encoder layers in each " +"stage. - embed_dims (List[int]): Embedding dimension in each stage. - patch_sizes (List[int]): The patch " +"sizes in each stage. - num_heads (List[int]): Numbers of attention head in each stage. - strides " +"(List[int]): The strides in each stage. - mlp_ratios (List[int]): The ratios of mlp in each stage. - " +"sr_ratios (List[int]): The ratios of GSA-encoder layers in each stage." +msgstr "" + +#: mmcls.models.backbones.twins.PCPVT:7 of +msgid "" +"PCPVT architecture, a str value in arch zoo or a detailed configuration dict with 7 keys, and the length of " +"all the values in dict should be the same:" +msgstr "" + +#: mmcls.models.backbones.twins.PCPVT:11 mmcls.models.backbones.twins.SVT:11 of +msgid "depths (List[int]): The number of encoder layers in each stage." +msgstr "" + +#: mmcls.models.backbones.twins.PCPVT:12 mmcls.models.backbones.twins.SVT:12 of +msgid "embed_dims (List[int]): Embedding dimension in each stage." +msgstr "" + +#: mmcls.models.backbones.twins.PCPVT:13 mmcls.models.backbones.twins.SVT:13 of +msgid "patch_sizes (List[int]): The patch sizes in each stage." +msgstr "" + +#: mmcls.models.backbones.twins.PCPVT:14 mmcls.models.backbones.twins.SVT:14 of +msgid "num_heads (List[int]): Numbers of attention head in each stage." +msgstr "" + +#: mmcls.models.backbones.twins.PCPVT:15 mmcls.models.backbones.twins.SVT:15 of +msgid "strides (List[int]): The strides in each stage." +msgstr "" + +#: mmcls.models.backbones.twins.PCPVT:16 mmcls.models.backbones.twins.SVT:16 of +msgid "mlp_ratios (List[int]): The ratios of mlp in each stage." +msgstr "" + +#: mmcls.models.backbones.twins.PCPVT:17 mmcls.models.backbones.twins.SVT:17 of +msgid "sr_ratios (List[int]): The ratios of GSA-encoder layers in each stage." +msgstr "" + +#: mmcls.models.backbones.twins.PCPVT:20 mmcls.models.backbones.twins.SVT:22 of +msgid "Number of input channels. Defaults to 3." +msgstr "" + +#: mmcls.models.backbones.twins.PCPVT:25 mmcls.models.backbones.twins.SVT:27 of +msgid "Enable bias for qkv if True. Defaults to False." +msgstr "" + +#: mmcls.models.backbones.twins.PCPVT:30 of +msgid "The drop out rate for attention layer. Defaults to 0.0" +msgstr "" + +#: mmcls.models.backbones.twins.PCPVT:33 of +msgid "Stochastic depth rate. Defaults to 0.0." +msgstr "" + +#: mmcls.models.backbones.twins.PCPVT:38 mmcls.models.backbones.twins.SVT:39 of +msgid "Add extra norm after each stage. Defaults to False." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.PoolFormer.rst:7 +msgid "PoolFormer" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.poolformer.PoolFormer:1 of +msgid "PoolFormer." +msgstr "" + +#: mmcls.models.backbones.poolformer.PoolFormer:3 of +msgid "" +"A PyTorch implementation of PoolFormer introduced by: `MetaFormer is Actually What You Need for Vision " +"`_" +msgstr "" + +#: mmcls.models.backbones.poolformer.PoolFormer:6 of +msgid "" +"Modified from the `official repo `." +msgstr "" + +#: mmcls.models.backbones.poolformer.PoolFormer:9 of +msgid "" +"The model's architecture. If string, it should be one of architecture in ``PoolFormer.arch_settings``. And " +"if dict, it should include the following two keys: - layers (list[int]): Number of blocks at each stage. - " +"embed_dims (list[int]): The number of channels at each stage. - mlp_ratios (list[int]): Expansion ratio of " +"MLPs. - layer_scale_init_value (float): Init value for Layer Scale. Defaults to 'S12'." +msgstr "" + +#: mmcls.models.backbones.poolformer.PoolFormer:9 of +msgid "" +"The model's architecture. If string, it should be one of architecture in ``PoolFormer.arch_settings``. And " +"if dict, it should include the following two keys:" +msgstr "" + +#: mmcls.models.backbones.poolformer.PoolFormer:15 of +msgid "mlp_ratios (list[int]): Expansion ratio of MLPs." +msgstr "" + +#: mmcls.models.backbones.poolformer.PoolFormer:16 of +msgid "layer_scale_init_value (float): Init value for Layer Scale." +msgstr "" + +#: mmcls.models.backbones.poolformer.PoolFormer:18 of +msgid "Defaults to 'S12'." +msgstr "" + +#: mmcls.models.backbones.poolformer.PoolFormer:26 of +msgid "The patch size of input image patch embedding. Defaults to 7." +msgstr "" + +#: mmcls.models.backbones.poolformer.PoolFormer:29 of +msgid "The stride of input image patch embedding. Defaults to 4." +msgstr "" + +#: mmcls.models.backbones.poolformer.PoolFormer:32 of +msgid "The padding of input image patch embedding. Defaults to 2." +msgstr "" + +#: mmcls.models.backbones.poolformer.PoolFormer:35 of +msgid "The patch size of downsampling patch embedding. Defaults to 3." +msgstr "" + +#: mmcls.models.backbones.poolformer.PoolFormer:38 of +msgid "The stride of downsampling patch embedding. Defaults to 2." +msgstr "" + +#: mmcls.models.backbones.poolformer.PoolFormer:41 of +msgid "The padding of downsampling patch embedding. Defaults to 1." +msgstr "" + +#: mmcls.models.backbones.poolformer.PoolFormer:48 of +msgid "" +"Output from which network position. Index 0-6 respectively corresponds to [stage1, downsampling, stage2, " +"downsampling, stage3, downsampling, stage4] Defaults to -1, means the last stage." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.RegNet.rst:7 +msgid "RegNet" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.regnet.RegNet:1 of +msgid "RegNet backbone." +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet:3 of +msgid "More details can be found in `paper `_ ." +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet:5 of +msgid "" +"The parameter of RegNets. - w0 (int): initial width - wa (float): slope of width - wm (float): quantization " +"parameter to quantize the width - depth (int): depth of the backbone - group_w (int): width of group - " +"bot_mul (float): bottleneck ratio, i.e. expansion of bottleneck." +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet:13 of +msgid "Strides of the first block of each stage." +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet:15 of +msgid "Base channels after stem layer." +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet:19 mmcls.models.backbones.vgg.VGG:11 of +msgid "Dilation of each stage." +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet:21 of +msgid "Output from which stages." +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet:23 of +msgid "" +"`pytorch` or `caffe`. If set to \"pytorch\", the stride-two layer is the 3x3 conv layer, otherwise the " +"stride-two layer is the first 1x1 conv layer. Default: \"pytorch\"." +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet:27 of +msgid "Stages to be frozen (all param fixed). -1 means not freezing any parameters. Default: -1." +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet:30 of +msgid "dictionary to construct and config norm layer. Default: dict(type='BN', requires_grad=True)." +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet:40 of +msgid "" +"whether to use zero init for last norm layer in resblocks to let them behave as identity. Default: True." +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet.adjust_width_group:1 of +msgid "Adjusts the compatibility of widths and groups." +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet.adjust_width_group:3 of +msgid "Width of each stage." +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet.adjust_width_group:5 of +msgid "Bottleneck ratio." +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet.adjust_width_group:7 of +msgid "number of groups in each stage" +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet.adjust_width_group:10 of +msgid "The adjusted widths and groups of each stage." +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet.generate_regnet:1 of +msgid "Generates per block width from RegNet parameters." +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet.generate_regnet:3 of +msgid "Initial width of the backbone" +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet.generate_regnet:5 of +msgid "Slope of the quantized linear function" +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet.generate_regnet:7 of +msgid "Parameter used to quantize the width." +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet.generate_regnet:9 of +msgid "Depth of the backbone." +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet.generate_regnet:11 of +msgid "The divisor of channels. Defaults to 8." +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet.generate_regnet:14 of +msgid "tuple containing: - list: Widths of each stage. - int: The number of stages." +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet.generate_regnet:17 of +msgid "tuple containing:" +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet.generate_regnet:17 of +msgid "list: Widths of each stage." +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet.generate_regnet:18 of +msgid "int: The number of stages." +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet.get_stages_from_blocks:1 of +msgid "Gets widths/stage_blocks of network at each stage." +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet.get_stages_from_blocks:3 of +msgid "Width in each stage." +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet.get_stages_from_blocks:6 of +msgid "width and depth of each stage" +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet.quantize_float:1 of +msgid "Converts a float to closest non-zero int divisible by divior." +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet.quantize_float:3 of +msgid "Original number to be quantized." +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet.quantize_float:5 of +msgid "Divisor used to quantize the number." +msgstr "" + +#: mmcls.models.backbones.regnet.RegNet.quantize_float:8 of +msgid "quantized number that is divisible by devisor." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.RepLKNet.rst:7 +msgid "RepLKNet" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.replknet.RepLKNet:1 of +msgid "RepLKNet backbone." +msgstr "" + +#: mmcls.models.backbones.replknet.RepLKNet:3 of +msgid "" +"A PyTorch impl of : `Scaling Up Your Kernels to 31x31: Revisiting Large Kernel Design in CNNs `_" +msgstr "" + +#: mmcls.models.backbones.replknet.RepLKNet:7 of +msgid "" +"The parameter of RepLKNet. If it's a dict, it should contain the following keys: - large_kernel_sizes " +"(Sequence[int]): Large kernel size in each stage. - layers (Sequence[int]): Number of blocks in each " +"stage. - channels (Sequence[int]): Number of channels in each stage. - small_kernel (int): size of the " +"parallel small kernel. - dw_ratio (float): The intermediate channels expansion ratio of the block." +msgstr "" + +#: mmcls.models.backbones.replknet.RepLKNet:7 of +msgid "The parameter of RepLKNet. If it's a dict, it should contain the following keys:" +msgstr "" + +#: mmcls.models.backbones.replknet.RepLKNet:10 of +msgid "large_kernel_sizes (Sequence[int]):" +msgstr "" + +#: mmcls.models.backbones.replknet.RepLKNet:11 of +msgid "Large kernel size in each stage." +msgstr "" + +#: mmcls.models.backbones.replknet.RepLKNet:12 of +msgid "layers (Sequence[int]): Number of blocks in each stage." +msgstr "" + +#: mmcls.models.backbones.replknet.RepLKNet:13 of +msgid "channels (Sequence[int]): Number of channels in each stage." +msgstr "" + +#: mmcls.models.backbones.replknet.RepLKNet:14 of +msgid "small_kernel (int): size of the parallel small kernel." +msgstr "" + +#: mmcls.models.backbones.replknet.RepLKNet:15 of +msgid "dw_ratio (float): The intermediate channels" +msgstr "" + +#: mmcls.models.backbones.replknet.RepLKNet:16 of +msgid "expansion ratio of the block." +msgstr "" + +#: mmcls.models.backbones.replknet.RepLKNet:18 of +msgid "Number of input image channels. Default to 3." +msgstr "" + +#: mmcls.models.backbones.replknet.RepLKNet:20 of +msgid "Mlp expansion ratio. Defaults to 4." +msgstr "" + +#: mmcls.models.backbones.replknet.RepLKNet:22 of +msgid "Output from which stages. Default to (3, )." +msgstr "" + +#: mmcls.models.backbones.replknet.RepLKNet:25 of +msgid "Strides of the first block of each stage. Default to (2, 2, 2, 2)." +msgstr "" + +#: mmcls.models.backbones.replknet.RepLKNet:28 of +msgid "Dilation of each stage. Default to (1, 1, 1, 1)." +msgstr "" + +#: mmcls.models.backbones.replknet.RepLKNet:31 of +msgid "Stages to be frozen (all param fixed). -1 means not freezing any parameters. Default to -1." +msgstr "" + +#: mmcls.models.backbones.replknet.RepLKNet:35 of +msgid "The config dict for conv layers. Default to None." +msgstr "" + +#: mmcls.models.backbones.replknet.RepLKNet:38 of +msgid "The config dict for norm layers. Default to ``dict(type='BN')``." +msgstr "" + +#: mmcls.models.backbones.replknet.RepLKNet:41 of +msgid "Config dict for activation layer. Default to ``dict(type='ReLU')``." +msgstr "" + +#: mmcls.models.backbones.replknet.RepLKNet:44 of +msgid "" +"Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. " +"Default to False." +msgstr "" + +#: mmcls.models.backbones.replknet.RepLKNet:47 of +msgid "Whether to switch the model structure to deployment mode. Default to False." +msgstr "" + +#: mmcls.models.backbones.replknet.RepLKNet:50 of +msgid "" +"Construct and config norm layer or not. Using True will normalize the intermediate features for downstream " +"dense prediction tasks." +msgstr "" + +#: mmcls.models.backbones.replknet.RepLKNet:55 of +msgid "" +"Whether to set norm layers to eval mode, namely, freeze running stats (mean and var). Note: Effect on Batch " +"Norm and its variants only. Default to False." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.RepMLPNet.rst:7 +msgid "RepMLPNet" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.repmlp.RepMLPNet:1 of +msgid "RepMLPNet backbone." +msgstr "" + +#: mmcls.models.backbones.repmlp.RepMLPNet:3 of +msgid "" +"A PyTorch impl of : `RepMLP: Re-parameterizing Convolutions into Fully-connected Layers for Image " +"Recognition `_" +msgstr "" + +#: mmcls.models.backbones.repmlp.RepMLPNet:7 of +msgid "" +"RepMLP architecture. If use string, choose from 'base' and 'b'. If use dict, it should have below keys: - " +"channels (List[int]): Number of blocks in each stage. - depths (List[int]): The number of blocks in each " +"branch. - sharesets_nums (List[int]): RepVGG Block that declares the need to apply group convolution." +msgstr "" + +#: mmcls.models.backbones.repmlp.RepMLPNet:7 of +msgid "" +"RepMLP architecture. If use string, choose from 'base' and 'b'. If use dict, it should have below keys:" +msgstr "" + +#: mmcls.models.backbones.repmlp.RepMLPNet:10 of +msgid "channels (List[int]): Number of blocks in each stage." +msgstr "" + +#: mmcls.models.backbones.repmlp.RepMLPNet:11 of +msgid "depths (List[int]): The number of blocks in each branch." +msgstr "" + +#: mmcls.models.backbones.repmlp.RepMLPNet:12 of +msgid "sharesets_nums (List[int]): RepVGG Block that declares the need to apply group convolution." +msgstr "" + +#: mmcls.models.backbones.repmlp.RepMLPNet:15 of +msgid "The size of input image. Defaults: 224." +msgstr "" + +#: mmcls.models.backbones.repmlp.RepMLPNet:25 of +msgid "The conv kernels in the GlobalPerceptron. Default: None." +msgstr "" + +#: mmcls.models.backbones.repmlp.RepMLPNet:28 of +msgid "The reducation ratio in the GlobalPerceptron. Default: 4." +msgstr "" + +#: mmcls.models.backbones.repmlp.RepMLPNet:31 of +msgid "The number of sharesets in the PartitionPerceptron. Default 1." +msgstr "" + +#: mmcls.models.backbones.repmlp.RepMLPNet:34 mmcls.models.backbones.resnest.ResNeSt:51 +#: mmcls.models.backbones.resnet.ResNet:38 mmcls.models.backbones.resnet_cifar.ResNet_CIFAR:42 +#: mmcls.models.backbones.resnext.ResNeXt:43 mmcls.models.backbones.seresnet.SEResNet:40 +#: mmcls.models.backbones.seresnext.SEResNeXt:45 mmcls.models.utils.embed.HybridEmbed:17 of +msgid "The config dict for conv layers. Default: None." +msgstr "" + +#: mmcls.models.backbones.repmlp.RepMLPNet:36 of +msgid "The config dict for norm layers. Default: dict(type='BN', requires_grad=True)." +msgstr "" + +#: mmcls.models.backbones.repmlp.RepMLPNet:39 mmcls.models.backbones.swin_transformer.SwinTransformer:67 +#: mmcls.models.backbones.swin_transformer_v2.SwinTransformerV2:67 of +msgid "Extra config dict for patch embedding. Defaults to an empty dict." +msgstr "" + +#: mmcls.models.backbones.repmlp.RepMLPNet:45 mmcls.models.backbones.shufflenet_v1.ShuffleNetV1:21 +#: mmcls.models.backbones.shufflenet_v2.ShuffleNetV2:18 of +msgid "Config dict for activation layer. Default: dict(type='ReLU')." +msgstr "" + +#: mmcls.models.backbones.repmlp.RepMLPNet:48 of +msgid "Whether to switch the model structure to deployment mode. Default: False." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.RepVGG.rst:7 +msgid "RepVGG" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.repvgg.RepVGG:1 of +msgid "RepVGG backbone." +msgstr "" + +#: mmcls.models.backbones.repvgg.RepVGG:3 of +msgid "" +"A PyTorch impl of : `RepVGG: Making VGG-style ConvNets Great Again `_" +msgstr "" + +#: mmcls.models.backbones.repvgg.RepVGG:6 of +msgid "" +"RepVGG architecture. If use string, choose from 'A0', 'A1`', 'A2', 'B0', 'B1', 'B1g2', 'B1g4', 'B2', " +"'B2g2', 'B2g4', 'B3', 'B3g2', 'B3g4' or 'D2se'. If use dict, it should have below keys: - **num_blocks** " +"(Sequence[int]): Number of blocks in each stage. - **width_factor** (Sequence[float]): Width deflator in " +"each stage. - **group_layer_map** (dict | None): RepVGG Block that declares the need to apply group " +"convolution. - **se_cfg** (dict | None): SE Layer config. - **stem_channels** (int, optional): The stem " +"channels, the final stem channels will be ``min(stem_channels, base_channels*width_factor[0])``. If " +"not set here, 64 is used by default in the code." +msgstr "" + +#: mmcls.models.backbones.repvgg.RepVGG:6 of +msgid "" +"RepVGG architecture. If use string, choose from 'A0', 'A1`', 'A2', 'B0', 'B1', 'B1g2', 'B1g4', 'B2', " +"'B2g2', 'B2g4', 'B3', 'B3g2', 'B3g4' or 'D2se'. If use dict, it should have below keys:" +msgstr "" + +#: mmcls.models.backbones.repvgg.RepVGG:11 of +msgid "**num_blocks** (Sequence[int]): Number of blocks in each stage." +msgstr "" + +#: mmcls.models.backbones.repvgg.RepVGG:12 of +msgid "**width_factor** (Sequence[float]): Width deflator in each stage." +msgstr "" + +#: mmcls.models.backbones.repvgg.RepVGG:13 of +msgid "**group_layer_map** (dict | None): RepVGG Block that declares the need to apply group convolution." +msgstr "" + +#: mmcls.models.backbones.repvgg.RepVGG:15 of +msgid "**se_cfg** (dict | None): SE Layer config." +msgstr "" + +#: mmcls.models.backbones.repvgg.RepVGG:16 of +msgid "" +"**stem_channels** (int, optional): The stem channels, the final stem channels will be ``min(stem_channels, " +"base_channels*width_factor[0])``. If not set here, 64 is used by default in the code." +msgstr "" + +#: mmcls.models.backbones.repvgg.RepVGG:23 of +msgid "Base channels of RepVGG backbone, work with width_factor together. Defaults to 64." +msgstr "" + +#: mmcls.models.backbones.repvgg.RepVGG:29 of +msgid "Strides of the first block of each stage. Defaults to ``(2, 2, 2, 2)``." +msgstr "" + +#: mmcls.models.backbones.repvgg.RepVGG:32 mmcls.models.backbones.res2net.Res2Net:19 of +msgid "Dilation of each stage. Defaults to ``(1, 1, 1, 1)``." +msgstr "" + +#: mmcls.models.backbones.repvgg.RepVGG:57 of +msgid "Whether to use the MTSPPF block. Defaults to False." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.Res2Net.rst:7 +msgid "Res2Net" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.res2net.Res2Net:1 of +msgid "Res2Net backbone." +msgstr "" + +#: mmcls.models.backbones.res2net.Res2Net:3 of +msgid "" +"A PyTorch implement of : `Res2Net: A New Multi-scale Backbone Architecture `_" +msgstr "" + +#: mmcls.models.backbones.res2net.Res2Net:6 of +msgid "Depth of Res2Net, choose from {50, 101, 152}." +msgstr "" + +#: mmcls.models.backbones.res2net.Res2Net:8 of +msgid "Scales used in Res2Net. Defaults to 4." +msgstr "" + +#: mmcls.models.backbones.res2net.Res2Net:10 of +msgid "Basic width of each scale. Defaults to 26." +msgstr "" + +#: mmcls.models.backbones.res2net.Res2Net:14 of +msgid "Number of Res2Net stages. Defaults to 4." +msgstr "" + +#: mmcls.models.backbones.res2net.Res2Net:16 of +msgid "Strides of the first block of each stage. Defaults to ``(1, 2, 2, 2)``." +msgstr "" + +#: mmcls.models.backbones.res2net.Res2Net:25 of +msgid "" +"\"pytorch\" or \"caffe\". If set to \"pytorch\", the stride-two layer is the 3x3 conv layer, otherwise the " +"stride-two layer is the first 1x1 conv layer. Defaults to \"pytorch\"." +msgstr "" + +#: mmcls.models.backbones.res2net.Res2Net:29 of +msgid "Replace 7x7 conv in input stem with 3 3x3 conv. Defaults to True." +msgstr "" + +#: mmcls.models.backbones.res2net.Res2Net:32 of +msgid "Use AvgPool instead of stride conv when downsampling in the bottle2neck. Defaults to True." +msgstr "" + +#: mmcls.models.backbones.res2net.Res2Net:38 of +msgid "Dictionary to construct and config norm layer. Defaults to ``dict(type='BN', requires_grad=True)``." +msgstr "" + +#: mmcls.models.backbones.res2net.Res2Net:48 of +msgid "" +"Whether to use zero init for last norm layer in resblocks to let them behave as identity. Defaults to True." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.ResNeSt.rst:7 +msgid "ResNeSt" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.resnest.ResNeSt:1 of +msgid "ResNeSt backbone." +msgstr "" + +#: mmcls.models.backbones.resnest.ResNeSt:3 of +msgid "Please refer to the `paper `__ for details." +msgstr "" + +#: mmcls.models.backbones.resnest.ResNeSt:6 of +msgid "Network depth, from {50, 101, 152, 200}." +msgstr "" + +#: mmcls.models.backbones.resnest.ResNeSt:8 mmcls.models.backbones.resnext.ResNeXt:8 +#: mmcls.models.backbones.seresnext.SEResNeXt:8 of +msgid "Groups of conv2 in Bottleneck. Default: 32." +msgstr "" + +#: mmcls.models.backbones.resnest.ResNeSt:10 mmcls.models.backbones.resnext.ResNeXt:10 +#: mmcls.models.backbones.seresnext.SEResNeXt:10 of +msgid "Width per group of conv2 in Bottleneck. Default: 4." +msgstr "" + +#: mmcls.models.backbones.resnest.ResNeSt:13 of +msgid "Radix of SpltAtConv2d. Default: 2" +msgstr "" + +#: mmcls.models.backbones.resnest.ResNeSt:15 of +msgid "Reduction factor of SplitAttentionConv2d. Default: 4." +msgstr "" + +#: mmcls.models.backbones.resnest.ResNeSt:18 of +msgid "Whether to use average pool for stride in Bottleneck. Default: True." +msgstr "" + +#: mmcls.models.backbones.resnest.ResNeSt:23 mmcls.models.backbones.resnet.ResNet:10 +#: mmcls.models.backbones.resnet_cifar.ResNet_CIFAR:12 mmcls.models.backbones.resnext.ResNeXt:15 +#: mmcls.models.backbones.seresnet.SEResNet:12 mmcls.models.backbones.seresnext.SEResNeXt:17 of +msgid "Output channels of the stem layer. Default: 64." +msgstr "" + +#: mmcls.models.backbones.resnest.ResNeSt:25 mmcls.models.backbones.resnet.ResNet:14 +#: mmcls.models.backbones.resnet_cifar.ResNet_CIFAR:16 mmcls.models.backbones.resnext.ResNeXt:17 +#: mmcls.models.backbones.seresnet.SEResNet:14 mmcls.models.backbones.seresnext.SEResNeXt:19 of +msgid "Stages of the network. Default: 4." +msgstr "" + +#: mmcls.models.backbones.resnest.ResNeSt:27 mmcls.models.backbones.resnet.ResNet:16 +#: mmcls.models.backbones.resnet_cifar.ResNet_CIFAR:18 mmcls.models.backbones.resnext.ResNeXt:19 +#: mmcls.models.backbones.seresnet.SEResNet:16 mmcls.models.backbones.seresnext.SEResNeXt:21 of +msgid "Strides of the first block of each stage. Default: ``(1, 2, 2, 2)``." +msgstr "" + +#: mmcls.models.backbones.resnest.ResNeSt:30 mmcls.models.backbones.resnet.ResNet:19 +#: mmcls.models.backbones.resnet_cifar.ResNet_CIFAR:21 mmcls.models.backbones.resnext.ResNeXt:22 +#: mmcls.models.backbones.seresnet.SEResNet:19 mmcls.models.backbones.seresnext.SEResNeXt:24 of +msgid "Dilation of each stage. Default: ``(1, 1, 1, 1)``." +msgstr "" + +#: mmcls.models.backbones.resnest.ResNeSt:33 mmcls.models.backbones.resnet_cifar.ResNet_CIFAR:24 +#: mmcls.models.backbones.resnext.ResNeXt:25 mmcls.models.backbones.seresnet.SEResNet:22 +#: mmcls.models.backbones.seresnext.SEResNeXt:27 of +msgid "" +"Output from which stages. If only one stage is specified, a single tensor (feature map) is returned, " +"otherwise multiple stages are specified, a tuple of tensors will be returned. Default: ``(3, )``." +msgstr "" + +#: mmcls.models.backbones.resnest.ResNeSt:38 mmcls.models.backbones.resnet.ResNet:25 +#: mmcls.models.backbones.resnet_cifar.ResNet_CIFAR:29 mmcls.models.backbones.resnext.ResNeXt:30 +#: mmcls.models.backbones.seresnet.SEResNet:27 mmcls.models.backbones.seresnext.SEResNeXt:32 of +msgid "" +"`pytorch` or `caffe`. If set to \"pytorch\", the stride-two layer is the 3x3 conv layer, otherwise the " +"stride-two layer is the first 1x1 conv layer." +msgstr "" + +#: mmcls.models.backbones.resnest.ResNeSt:42 mmcls.models.backbones.resnet.ResNet:29 +#: mmcls.models.backbones.resnext.ResNeXt:34 mmcls.models.backbones.seresnet.SEResNet:31 +#: mmcls.models.backbones.seresnext.SEResNeXt:36 of +msgid "Replace 7x7 conv in input stem with 3 3x3 conv. Default: False." +msgstr "" + +#: mmcls.models.backbones.resnest.ResNeSt:45 mmcls.models.backbones.resnet.ResNet:32 +#: mmcls.models.backbones.resnet_cifar.ResNet_CIFAR:36 mmcls.models.backbones.resnext.ResNeXt:37 +#: mmcls.models.backbones.seresnet.SEResNet:34 mmcls.models.backbones.seresnext.SEResNeXt:39 of +msgid "Use AvgPool instead of stride conv when downsampling in the bottleneck. Default: False." +msgstr "" + +#: mmcls.models.backbones.resnest.ResNeSt:53 mmcls.models.backbones.resnet.ResNet:40 +#: mmcls.models.backbones.resnet_cifar.ResNet_CIFAR:44 mmcls.models.backbones.resnext.ResNeXt:45 +#: mmcls.models.backbones.seresnet.SEResNet:42 mmcls.models.backbones.seresnext.SEResNeXt:47 of +msgid "The config dict for norm layers." +msgstr "" + +#: mmcls.models.backbones.resnest.ResNeSt:62 mmcls.models.backbones.resnet.ResNet:49 +#: mmcls.models.backbones.resnet_cifar.ResNet_CIFAR:53 mmcls.models.backbones.resnext.ResNeXt:54 +#: mmcls.models.backbones.seresnet.SEResNet:51 mmcls.models.backbones.seresnext.SEResNeXt:56 of +msgid "" +"Whether to use zero init for last norm layer in resblocks to let them behave as identity. Default: True." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.ResNeXt.rst:7 +msgid "ResNeXt" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.resnext.ResNeXt:1 of +msgid "ResNeXt backbone." +msgstr "" + +#: mmcls.models.backbones.resnext.ResNeXt:3 of +msgid "Please refer to the `paper `__ for details." +msgstr "" + +#: mmcls.models.backbones.resnext.ResNeXt:6 mmcls.models.backbones.seresnet.SEResNet:6 +#: mmcls.models.backbones.seresnext.SEResNeXt:6 of +msgid "Network depth, from {50, 101, 152}." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.ResNet.rst:7 +msgid "ResNet" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.resnet.ResNet:1 of +msgid "ResNet backbone." +msgstr "" + +#: mmcls.models.backbones.resnet.ResNet:3 of +msgid "Please refer to the `paper `__ for details." +msgstr "" + +#: mmcls.models.backbones.resnet.ResNet:6 mmcls.models.backbones.resnet_cifar.ResNet_CIFAR:8 of +msgid "Network depth, from {18, 34, 50, 101, 152}." +msgstr "" + +#: mmcls.models.backbones.resnet.ResNet:12 mmcls.models.backbones.resnet_cifar.ResNet_CIFAR:14 of +msgid "Middle channels of the first stage. Default: 64." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.ResNetV1c.rst:7 +msgid "ResNetV1c" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.resnet.ResNetV1c:1 of +msgid "ResNetV1c backbone." +msgstr "" + +#: mmcls.models.backbones.resnet.ResNetV1c:3 mmcls.models.backbones.resnet.ResNetV1d:3 of +msgid "This variant is described in `Bag of Tricks. `_." +msgstr "" + +#: mmcls.models.backbones.resnet.ResNetV1c:6 of +msgid "" +"Compared with default ResNet(ResNetV1b), ResNetV1c replaces the 7x7 conv in the input stem with three 3x3 " +"convs." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.ResNetV1d.rst:7 +msgid "ResNetV1d" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.resnet.ResNetV1d:1 of +msgid "ResNetV1d backbone." +msgstr "" + +#: mmcls.models.backbones.resnet.ResNetV1d:6 of +msgid "" +"Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in the input stem with three 3x3 " +"convs. And in the downsampling block, a 2x2 avg_pool with stride 2 is added before conv, whose stride is " +"changed to 1." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.ResNet_CIFAR.rst:7 +msgid "ResNet_CIFAR" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.resnet_cifar.ResNet_CIFAR:1 of +msgid "ResNet backbone for CIFAR." +msgstr "" + +#: mmcls.models.backbones.resnet_cifar.ResNet_CIFAR:3 of +msgid "" +"Compared to standard ResNet, it uses `kernel_size=3` and `stride=1` in conv1, and does not apply " +"MaxPoolinng after stem. It has been proven to be more efficient than standard ResNet in other public " +"codebase, e.g., `https://github.com/kuangliu/pytorch-cifar/blob/master/models/resnet.py`." +msgstr "" + +#: mmcls.models.backbones.resnet_cifar.ResNet_CIFAR:33 of +msgid "This network has specific designed stem, thus it is asserted to be False." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.SEResNeXt.rst:7 +msgid "SEResNeXt" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.seresnext.SEResNeXt:1 of +msgid "SEResNeXt backbone." +msgstr "" + +#: mmcls.models.backbones.seresnet.SEResNet:3 mmcls.models.backbones.seresnext.SEResNeXt:3 of +msgid "Please refer to the `paper `__ for details." +msgstr "" + +#: mmcls.models.backbones.seresnet.SEResNet:8 mmcls.models.backbones.seresnext.SEResNeXt:13 of +msgid "Squeeze ratio in SELayer. Default: 16." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.SEResNet.rst:7 +msgid "SEResNet" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.seresnet.SEResNet:1 of +msgid "SEResNet backbone." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.SVT.rst:7 +msgid "SVT" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.twins.SVT:1 of +msgid "The backbone of Twins-SVT." +msgstr "" + +#: mmcls.models.backbones.twins.SVT:7 of +msgid "" +"SVT architecture, a str value in arch zoo or a detailed configuration dict with 8 keys, and the length of " +"all the values in dict should be the same: - depths (List[int]): The number of encoder layers in each " +"stage. - embed_dims (List[int]): Embedding dimension in each stage. - patch_sizes (List[int]): The patch " +"sizes in each stage. - num_heads (List[int]): Numbers of attention head in each stage. - strides " +"(List[int]): The strides in each stage. - mlp_ratios (List[int]): The ratios of mlp in each stage. - " +"sr_ratios (List[int]): The ratios of GSA-encoder layers in each stage. - windiow_sizes (List[int]): The " +"window sizes in LSA-encoder layers in each stage." +msgstr "" + +#: mmcls.models.backbones.twins.SVT:7 of +msgid "" +"SVT architecture, a str value in arch zoo or a detailed configuration dict with 8 keys, and the length of " +"all the values in dict should be the same:" +msgstr "" + +#: mmcls.models.backbones.twins.SVT:19 of +msgid "windiow_sizes (List[int]): The window sizes in LSA-encoder layers in each stage." +msgstr "" + +#: mmcls.models.backbones.twins.SVT:24 of +msgid "Output from which stages. Defaults to (3, )." +msgstr "" + +#: mmcls.models.backbones.twins.SVT:31 of +msgid "Dropout ratio of attention weight. Defaults to 0.0" +msgstr "" + +#: mmcls.models.backbones.twins.SVT:34 of +msgid "Stochastic depth rate. Defaults to 0.2." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.ShuffleNetV1.rst:7 +msgid "ShuffleNetV1" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.shufflenet_v1.ShuffleNetV1:1 of +msgid "ShuffleNetV1 backbone." +msgstr "" + +#: mmcls.models.backbones.shufflenet_v1.ShuffleNetV1:3 of +msgid "The number of groups to be used in grouped 1x1 convolutions in each ShuffleUnit. Default: 3." +msgstr "" + +#: mmcls.models.backbones.shufflenet_v1.ShuffleNetV1:6 mmcls.models.backbones.shufflenet_v2.ShuffleNetV2:3 of +msgid "Width multiplier - adjusts the number of channels in each layer by this amount. Default: 1.0." +msgstr "" + +#: mmcls.models.backbones.shufflenet_v1.ShuffleNetV1:9 of +msgid "Output from which stages. Default: (2, )" +msgstr "" + +#: mmcls.models.backbones.shufflenet_v1.ShuffleNetV1.make_layer:1 of +msgid "Stack ShuffleUnit blocks to make a layer." +msgstr "" + +#: mmcls.models.backbones.shufflenet_v1.ShuffleNetV1.make_layer:3 of +msgid "out_channels of the block." +msgstr "" + +#: mmcls.models.backbones.shufflenet_v1.ShuffleNetV1.make_layer:5 of +msgid "Number of blocks." +msgstr "" + +#: mmcls.models.backbones.shufflenet_v1.ShuffleNetV1.make_layer:7 of +msgid "" +"Whether is the first ShuffleUnit of a sequential ShuffleUnits. Default: False, which means using the " +"grouped 1x1 convolution." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.ShuffleNetV2.rst:7 +msgid "ShuffleNetV2" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.shufflenet_v2.ShuffleNetV2:1 of +msgid "ShuffleNetV2 backbone." +msgstr "" + +#: mmcls.models.backbones.shufflenet_v2.ShuffleNetV2:6 of +msgid "Output from which stages. Default: (0, 1, 2, 3)." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.SwinTransformer.rst:7 +msgid "SwinTransformer" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.swin_transformer.SwinTransformer:1 of +msgid "Swin Transformer." +msgstr "" + +#: mmcls.models.backbones.swin_transformer.SwinTransformer:3 of +msgid "" +"A PyTorch implement of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows `_" +msgstr "" + +#: mmcls.models.backbones.swin_transformer.SwinTransformer:7 +#: mmcls.models.backbones.swin_transformer_v2.SwinTransformerV2:7 of +msgid "Inspiration from https://github.com/microsoft/Swin-Transformer" +msgstr "" + +#: mmcls.models.backbones.swin_transformer.SwinTransformer:10 of +msgid "" +"Swin Transformer architecture. If use string, choose from 'tiny', 'small', 'base' and 'large'. If use dict, " +"it should have below keys: - **embed_dims** (int): The dimensions of embedding. - **depths** (List[int]): " +"The number of blocks in each stage. - **num_heads** (List[int]): The number of heads in attention modules " +"of each stage. Defaults to 'tiny'." +msgstr "" + +#: mmcls.models.backbones.swin_transformer.SwinTransformer:10 +#: mmcls.models.backbones.swin_transformer_v2.SwinTransformerV2:10 of +msgid "" +"Swin Transformer architecture. If use string, choose from 'tiny', 'small', 'base' and 'large'. If use dict, " +"it should have below keys:" +msgstr "" + +#: mmcls.models.backbones.swin_transformer.SwinTransformer:32 +#: mmcls.models.backbones.swin_transformer_v2.SwinTransformerV2:35 mmcls.models.backbones.van.VAN:25 of +msgid "Dropout rate after embedding. Defaults to 0." +msgstr "" + +#: mmcls.models.backbones.swin_transformer.SwinTransformer:42 +#: mmcls.models.backbones.swin_transformer_v2.SwinTransformerV2:42 of +msgid "Select the interpolate mode for absolute position embeding vector resize. Defaults to \"bicubic\"." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.SwinTransformerV2.rst:7 +msgid "SwinTransformerV2" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.swin_transformer_v2.SwinTransformerV2:1 of +msgid "Swin Transformer V2." +msgstr "" + +#: mmcls.models.backbones.swin_transformer_v2.SwinTransformerV2:3 of +msgid "" +"A PyTorch implement of : `Swin Transformer V2: Scaling Up Capacity and Resolution `_" +msgstr "" + +#: mmcls.models.backbones.swin_transformer_v2.SwinTransformerV2:10 of +msgid "" +"Swin Transformer architecture. If use string, choose from 'tiny', 'small', 'base' and 'large'. If use dict, " +"it should have below keys: - **embed_dims** (int): The dimensions of embedding. - **depths** (List[int]): " +"The number of blocks in each stage. - **num_heads** (List[int]): The number of heads in attention modules " +"of each stage. - **extra_norm_every_n_blocks** (int): Add extra norm at the end of main branch every n " +"blocks. Defaults to 'tiny'." +msgstr "" + +#: mmcls.models.backbones.swin_transformer_v2.SwinTransformerV2:18 of +msgid "**extra_norm_every_n_blocks** (int): Add extra norm at the end of main branch every n blocks." +msgstr "" + +#: mmcls.models.backbones.swin_transformer_v2.SwinTransformerV2:70 of +msgid "Pretrained window sizes of each layer." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.T2T_ViT.rst:7 +msgid "T2T_ViT" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.t2t_vit.T2T_ViT:1 of +msgid "Tokens-to-Token Vision Transformer (T2T-ViT)" +msgstr "" + +#: mmcls.models.backbones.t2t_vit.T2T_ViT:3 of +msgid "" +"A PyTorch implementation of `Tokens-to-Token ViT: Training Vision Transformers from Scratch on ImageNet " +"`_" +msgstr "" + +#: mmcls.models.backbones.t2t_vit.T2T_ViT:10 mmcls.models.utils.attention.ShiftWindowMSA:3 +#: mmcls.models.utils.attention.WindowMSA:4 mmcls.models.utils.attention.WindowMSAV2:8 +#: mmcls.models.utils.position_encoding.ConditionalPositionEncoding:6 of +msgid "Number of input channels." +msgstr "" + +#: mmcls.models.backbones.t2t_vit.T2T_ViT:12 of +msgid "Embedding dimension." +msgstr "" + +#: mmcls.models.backbones.t2t_vit.T2T_ViT:14 of +msgid "Num of transformer layers in encoder. Defaults to 14." +msgstr "" + +#: mmcls.models.backbones.t2t_vit.T2T_ViT:20 of +msgid "Dropout rate after position embedding. Defaults to 0." +msgstr "" + +#: mmcls.models.backbones.t2t_vit.T2T_ViT:40 of +msgid "Extra config of Tokens-to-Token module. Defaults to an empty dict." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.TIMMBackbone.rst:7 +msgid "TIMMBackbone" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.timm_backbone.TIMMBackbone:1 of +msgid "Wrapper to use backbones from timm library." +msgstr "" + +#: mmcls.models.backbones.timm_backbone.TIMMBackbone:3 of +msgid "" +"More details can be found in `timm `_. See especially " +"the document for `feature extraction `_." +msgstr "" + +#: mmcls.models.backbones.timm_backbone.TIMMBackbone:8 of +msgid "Name of timm model to instantiate." +msgstr "" + +#: mmcls.models.backbones.timm_backbone.TIMMBackbone:10 of +msgid "" +"Whether to extract feature pyramid (multi-scale feature maps from the deepest layer at each stride). For " +"Vision Transformer models that do not support this argument, set this False. Defaults to False." +msgstr "" + +#: mmcls.models.backbones.timm_backbone.TIMMBackbone:15 of +msgid "Whether to load pretrained weights. Defaults to False." +msgstr "" + +#: mmcls.models.backbones.timm_backbone.TIMMBackbone:18 of +msgid "" +"Path of checkpoint to load at the last of ``timm.create_model``. Defaults to empty string, which means not " +"loading." +msgstr "" + +#: mmcls.models.backbones.timm_backbone.TIMMBackbone:24 of +msgid "Initialization config dict of OpenMMLab projects. Defaults to None." +msgstr "" + +#: mmcls.models.backbones.timm_backbone.TIMMBackbone:27 of +msgid "Other timm & model specific arguments." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.TNT.rst:7 +msgid "TNT" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.tnt.TNT:1 of +msgid "Transformer in Transformer." +msgstr "" + +#: mmcls.models.backbones.tnt.TNT:3 of +msgid "A PyTorch implement of: `Transformer in Transformer `_" +msgstr "" + +#: mmcls.models.backbones.tnt.TNT:6 of +msgid "Inspiration from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/tnt.py" +msgstr "" + +#: mmcls.models.backbones.tnt.TNT:9 of +msgid "Vision Transformer architecture Default: 'b'" +msgstr "" + +#: mmcls.models.backbones.tnt.TNT:12 of +msgid "Input image size. Defaults to 224" +msgstr "" + +#: mmcls.models.backbones.tnt.TNT:14 of +msgid "The patch size. Deault to 16" +msgstr "" + +#: mmcls.models.backbones.tnt.TNT:16 of +msgid "Number of input channels. Defaults to 3" +msgstr "" + +#: mmcls.models.backbones.tnt.TNT:18 of +msgid "A ratio to calculate the hidden_dims in ffn layer. Default: 4" +msgstr "" + +#: mmcls.models.backbones.tnt.TNT:21 of +msgid "Enable bias for qkv if True. Default False" +msgstr "" + +#: mmcls.models.backbones.tnt.TNT:23 of +msgid "Probability of an element to be zeroed after the feed forward layer. Default 0." +msgstr "" + +#: mmcls.models.backbones.tnt.TNT:26 of +msgid "The drop out rate for attention layer. Default 0." +msgstr "" + +#: mmcls.models.backbones.tnt.TNT:29 of +msgid "stochastic depth rate. Default 0." +msgstr "" + +#: mmcls.models.backbones.tnt.TNT:31 of +msgid "The activation config for FFNs. Defaults to GELU." +msgstr "" + +#: mmcls.models.backbones.tnt.TNT:33 of +msgid "Config dict for normalization layer. Default layer normalization" +msgstr "" + +#: mmcls.models.backbones.tnt.TNT:36 of +msgid "" +"The stride of the conv2d layer. We use a conv2d layer and a unfold layer to implement image to pixel " +"embedding." +msgstr "" + +#: mmcls.models.backbones.tnt.TNT:39 of +msgid "The number of fully-connected layers for FFNs. Default 2" +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.VAN.rst:7 +msgid "VAN" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.van.VAN:1 of +msgid "Visual Attention Network." +msgstr "" + +#: mmcls.models.backbones.van.VAN:3 of +msgid "A PyTorch implement of : `Visual Attention Network `_" +msgstr "" + +#: mmcls.models.backbones.van.VAN:6 of +msgid "Inspiration from https://github.com/Visual-Attention-Network/VAN-Classification" +msgstr "" + +#: mmcls.models.backbones.van.VAN:9 of +msgid "" +"Visual Attention Network architecture. If use string, choose from 'tiny', 'small', 'base' and 'large'. If " +"use dict, it should have below keys: - **embed_dims** (List[int]): The dimensions of embedding. - " +"**depths** (List[int]): The number of blocks in each stage. - **ffn_ratios** (List[int]): The number of " +"expansion ratio of feedforward network hidden layer channels. Defaults to 'tiny'." +msgstr "" + +#: mmcls.models.backbones.van.VAN:9 of +msgid "" +"Visual Attention Network architecture. If use string, choose from 'tiny', 'small', 'base' and 'large'. If " +"use dict, it should have below keys:" +msgstr "" + +#: mmcls.models.backbones.van.VAN:13 of +msgid "**embed_dims** (List[int]): The dimensions of embedding." +msgstr "" + +#: mmcls.models.backbones.van.VAN:15 of +msgid "" +"**ffn_ratios** (List[int]): The number of expansion ratio of feedforward network hidden layer channels." +msgstr "" + +#: mmcls.models.backbones.van.VAN:20 of +msgid "The patch size in patch embeddings. Defaults to [7, 3, 3, 3]." +msgstr "" + +#: mmcls.models.backbones.van.VAN:42 of +msgid "The extra config of each block. Defaults to empty dicts." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.VGG.rst:7 +msgid "VGG" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.vgg.VGG:1 of +msgid "VGG backbone." +msgstr "" + +#: mmcls.models.backbones.vgg.VGG:3 of +msgid "Depth of vgg, from {11, 13, 16, 19}." +msgstr "" + +#: mmcls.models.backbones.vgg.VGG:5 of +msgid "Use BatchNorm or not." +msgstr "" + +#: mmcls.models.backbones.vgg.VGG:7 of +msgid "number of classes for classification." +msgstr "" + +#: mmcls.models.backbones.vgg.VGG:9 of +msgid "VGG stages, normally 5." +msgstr "" + +#: mmcls.models.backbones.vgg.VGG:13 of +msgid "" +"Output from which stages. When it is None, the default behavior depends on whether num_classes is " +"specified. If num_classes <= 0, the default value is (4, ), output the last feature map before classifier. " +"If num_classes > 0, the default value is (5, ), output the classification score. Default: None." +msgstr "" + +#: mmcls.models.backbones.vgg.VGG:20 of +msgid "Stages to be frozen (all param fixed). -1 means not freezing any parameters." +msgstr "" + +#: mmcls.models.backbones.vgg.VGG:27 of +msgid "Whether to use ceil_mode of MaxPool. Default: False." +msgstr "" + +#: mmcls.models.backbones.vgg.VGG:29 of +msgid "Whether to keep the last pooling before classifier. Default: True." +msgstr "" + +#: ../../api/generated/mmcls.models.backbones.VisionTransformer.rst:7 +msgid "VisionTransformer" +msgstr "" + +#: ../../api/models.rst:111::1 mmcls.models.backbones.vision_transformer.VisionTransformer:1 of +msgid "Vision Transformer." +msgstr "" + +#: mmcls.models.backbones.vision_transformer.VisionTransformer:3 of +msgid "" +"A PyTorch implement of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale " +"`_" +msgstr "" + +#: mmcls.models.backbones.vision_transformer.VisionTransformer:6 of +msgid "" +"Vision Transformer architecture. If use string, choose from 'small', 'base', 'large', 'deit-tiny', 'deit-" +"small' and 'deit-base'. If use dict, it should have below keys: - **embed_dims** (int): The dimensions of " +"embedding. - **num_layers** (int): The number of transformer encoder layers. - **num_heads** (int): The " +"number of heads in attention modules. - **feedforward_channels** (int): The hidden dimensions in " +"feedforward modules. Defaults to 'base'." +msgstr "" + +#: mmcls.models.backbones.vision_transformer.VisionTransformer:47 of +msgid "" +"Whether or not to use the mean patch token for classification. If True, the model will only take the " +"average of all patch tokens. Defaults to False." +msgstr "" + +#: mmcls.models.backbones.vision_transformer.VisionTransformer:57 of +msgid "Whether or not use BEiT-style. Defaults to False." +msgstr "" + +#: mmcls.models.backbones.vision_transformer.VisionTransformer:59 of +msgid "The initialization value for the learnable scaling of attention and FFN. Defaults to 0.1." +msgstr "" + +#: mmcls.models.backbones.vision_transformer.VisionTransformer.resize_pos_embed:1 of +msgid "Interface for backward-compatibility." +msgstr "" + +#: ../../api/generated/mmcls.models.build_backbone.rst:2 +msgid "mmcls.models.build\\_backbone" +msgstr "" + +#: ../../api/models.rst:34::1 mmcls.models.builder.build_backbone:1 of +msgid "Build backbone." +msgstr "" + +#: ../../api/generated/mmcls.models.build_classifier.rst:2 +msgid "mmcls.models.build\\_classifier" +msgstr "" + +#: ../../api/models.rst:34::1 mmcls.models.builder.build_classifier:1 of +msgid "Build classifier." +msgstr "" + +#: ../../api/generated/mmcls.models.build_head.rst:2 +msgid "mmcls.models.build\\_head" +msgstr "" + +#: ../../api/models.rst:34::1 mmcls.models.builder.build_head:1 of +msgid "Build head." +msgstr "" + +#: ../../api/generated/mmcls.models.build_loss.rst:2 +msgid "mmcls.models.build\\_loss" +msgstr "" + +#: ../../api/models.rst:34::1 mmcls.models.builder.build_loss:1 of +msgid "Build loss." +msgstr "" + +#: ../../api/generated/mmcls.models.build_neck.rst:2 +msgid "mmcls.models.build\\_neck" +msgstr "" + +#: ../../api/models.rst:34::1 mmcls.models.builder.build_neck:1 of +msgid "Build neck." +msgstr "" + +#: ../../api/generated/mmcls.models.classifiers.BaseClassifier.rst:7 +msgid "BaseClassifier" +msgstr "" + +#: ../../api/models.rst:49::1 mmcls.models.classifiers.base.BaseClassifier:1 of +msgid "Base class for classifiers." +msgstr "" + +#: mmcls.models.classifiers.base.BaseClassifier:6 of +msgid "" +"The config for preprocessing input data. If None, it will use \"BaseDataPreprocessor\" as type, see :class:" +"`mmengine.model.BaseDataPreprocessor` for more details. Defaults to None." +msgstr "" + +#: mmcls.models.classifiers.base.BaseClassifier:16 of +msgid "dict" +msgstr "" + +#: mmcls.models.classifiers.base.BaseClassifier:20 of +msgid "" +"An extra data pre-processing module, which processes data from dataloader to the format accepted by :meth:" +"`forward`." +msgstr "" + +#: mmcls.models.classifiers.base.BaseClassifier:24 of +msgid ":obj:`mmengine.model.BaseDataPreprocessor`" +msgstr "" + +#: mmcls.models.classifiers.base.BaseClassifier.extract_feat:1 +#: mmcls.models.classifiers.image.ImageClassifier.extract_feat:1 of +msgid "Extract features from the input tensor with shape (N, C, ...)." +msgstr "" + +#: mmcls.models.classifiers.base.BaseClassifier.extract_feat:3 of +msgid "The sub-classes are recommended to implement this method to extract features from backbone and neck." +msgstr "" + +#: mmcls.models.classifiers.base.BaseClassifier.extract_feat:6 +#: mmcls.models.classifiers.image.ImageClassifier.extract_feat:3 of +msgid "A batch of inputs. The shape of it should be ``(num_samples, num_channels, *img_shape)``." +msgstr "" + +#: mmcls.models.classifiers.base.BaseClassifier.extract_feats:1 of +msgid "Extract features from a sequence of input tensor." +msgstr "" + +#: mmcls.models.classifiers.base.BaseClassifier.extract_feats:3 of +msgid "A sequence of input tensor. It can be used in augmented inference." +msgstr "" + +#: mmcls.models.classifiers.base.BaseClassifier.extract_feats:6 of +msgid "Other keyword arguments accepted by :meth:`extract_feat`." +msgstr "" + +#: mmcls.models.classifiers.base.BaseClassifier.extract_feats:8 of +msgid "Features of every input tensor." +msgstr "" + +#: mmcls.models.classifiers.base.BaseClassifier.forward:1 +#: mmcls.models.classifiers.image.ImageClassifier.forward:1 of +msgid "The unified entry for a forward process in both training and test." +msgstr "" + +#: mmcls.models.classifiers.base.BaseClassifier.forward:3 +#: mmcls.models.classifiers.image.ImageClassifier.forward:3 of +msgid "The method should accept three modes: \"tensor\", \"predict\" and \"loss\":" +msgstr "" + +#: mmcls.models.classifiers.base.BaseClassifier.forward:5 +#: mmcls.models.classifiers.image.ImageClassifier.forward:5 of +msgid "" +"\"tensor\": Forward the whole network and return tensor or tuple of tensor without any post-processing, " +"same as a common nn.Module." +msgstr "" + +#: mmcls.models.classifiers.base.BaseClassifier.forward:7 of +msgid "" +"\"predict\": Forward and return the predictions, which are fully processed to a list of :obj:" +"`BaseDataElement`." +msgstr "" + +#: mmcls.models.classifiers.base.BaseClassifier.forward:9 +#: mmcls.models.classifiers.image.ImageClassifier.forward:9 of +msgid "\"loss\": Forward and return a dict of losses according to the given inputs and data samples." +msgstr "" + +#: mmcls.models.classifiers.base.BaseClassifier.forward:12 +#: mmcls.models.classifiers.image.ImageClassifier.forward:12 of +msgid "" +"Note that this method doesn't handle neither back propagation nor optimizer updating, which are done in " +"the :meth:`train_step`." +msgstr "" + +#: mmcls.models.classifiers.base.BaseClassifier.forward:15 +#: mmcls.models.classifiers.hugging_face.HuggingFaceClassifier.loss:3 +#: mmcls.models.classifiers.hugging_face.HuggingFaceClassifier.predict:3 +#: mmcls.models.classifiers.image.ImageClassifier.forward:15 +#: mmcls.models.classifiers.image.ImageClassifier.loss:3 +#: mmcls.models.classifiers.image.ImageClassifier.predict:3 +#: mmcls.models.classifiers.timm.TimmClassifier.loss:3 mmcls.models.classifiers.timm.TimmClassifier.predict:3 +#: of +msgid "The input tensor with shape (N, C, ...) in general." +msgstr "" + +#: mmcls.models.classifiers.base.BaseClassifier.forward:18 +#: mmcls.models.classifiers.image.ImageClassifier.forward:18 of +msgid "The annotation data of every samples. It's required if ``mode=\"loss\"``. Defaults to None." +msgstr "" + +#: mmcls.models.classifiers.base.BaseClassifier.forward:22 +#: mmcls.models.classifiers.image.ImageClassifier.forward:22 of +msgid "Return what kind of value. Defaults to 'tensor'." +msgstr "" + +#: mmcls.models.classifiers.base.BaseClassifier.forward:25 of +msgid "" +"The return type depends on ``mode``. - If ``mode=\"tensor\"``, return a tensor or a tuple of tensor. - If " +"``mode=\"predict\"``, return a list of :obj:`mmengine.BaseDataElement`. - If ``mode=\"loss\"``, return a " +"dict of tensor." +msgstr "" + +#: mmcls.models.classifiers.base.BaseClassifier.forward:25 +#: mmcls.models.classifiers.image.ImageClassifier.forward:25 of +msgid "The return type depends on ``mode``." +msgstr "" + +#: mmcls.models.classifiers.base.BaseClassifier.forward:27 +#: mmcls.models.classifiers.image.ImageClassifier.forward:27 of +msgid "If ``mode=\"tensor\"``, return a tensor or a tuple of tensor." +msgstr "" + +#: mmcls.models.classifiers.base.BaseClassifier.forward:28 of +msgid "If ``mode=\"predict\"``, return a list of :obj:`mmengine.BaseDataElement`." +msgstr "" + +#: mmcls.models.classifiers.base.BaseClassifier.forward:30 +#: mmcls.models.classifiers.image.ImageClassifier.forward:30 of +msgid "If ``mode=\"loss\"``, return a dict of tensor." +msgstr "" + +#: mmcls.models.classifiers.BaseClassifier.with_head:1 of +msgid "Whether the classifier has a head." +msgstr "" + +#: mmcls.models.classifiers.BaseClassifier.with_neck:1 of +msgid "Whether the classifier has a neck." +msgstr "" + +#: ../../api/generated/mmcls.models.classifiers.HuggingFaceClassifier.rst:7 +msgid "HuggingFaceClassifier" +msgstr "" + +#: ../../api/models.rst:49::1 mmcls.models.classifiers.hugging_face.HuggingFaceClassifier:1 of +msgid "Image classifiers for HuggingFace model." +msgstr "" + +#: mmcls.models.classifiers.hugging_face.HuggingFaceClassifier:3 of +msgid "" +"This class accepts all positional and keyword arguments of the API ``from_pretrained`` (when " +"``pretrained=True``) and ``from_config`` (when ``pretrained=False``) of `transformers." +"AutoModelForImageClassification`_ and use it to create a model from hugging-face." +msgstr "" + +#: mmcls.models.classifiers.hugging_face.HuggingFaceClassifier:8 of +msgid "" +"It can load checkpoints of hugging-face directly, and the saved checkpoints also can be directly load by " +"hugging-face." +msgstr "" + +#: mmcls.models.classifiers.hugging_face.HuggingFaceClassifier:11 of +msgid "Please confirm that you have installed ``transfromers`` if you want to use it." +msgstr "" + +#: mmcls.models.classifiers.hugging_face.HuggingFaceClassifier:16 of +msgid "The name of the model to use in hugging-face." +msgstr "" + +#: mmcls.models.classifiers.hugging_face.HuggingFaceClassifier:18 of +msgid "Whether to load pretrained checkpoint from hugging-face. Defaults to False." +msgstr "" + +#: mmcls.models.classifiers.hugging_face.HuggingFaceClassifier:21 of +msgid "Other positional arguments of the method `from_pretrained` or `from_config`." +msgstr "" + +#: mmcls.models.classifiers.hugging_face.HuggingFaceClassifier:23 +#: mmcls.models.classifiers.timm.TimmClassifier:14 mmcls.models.heads.cls_head.ClsHead:3 +#: mmcls.models.heads.linear_head.LinearClsHead:8 mmcls.models.heads.margin_head.ArcFaceClsHead:82 of +msgid "Config of classification loss. Defaults to ``dict(type='CrossEntropyLoss', loss_weight=1.0)``." +msgstr "" + +#: mmcls.models.classifiers.hugging_face.HuggingFaceClassifier:26 +#: mmcls.models.classifiers.image.ImageClassifier:17 mmcls.models.classifiers.timm.TimmClassifier:17 of +msgid "" +"The training setting. The acceptable fields are: - augments (List[dict]): The batch augmentation methods " +"to use. More details can be found in :mod:`mmcls.model.utils.augment`. Defaults to None." +msgstr "" + +#: mmcls.models.classifiers.hugging_face.HuggingFaceClassifier:26 +#: mmcls.models.classifiers.image.ImageClassifier:17 mmcls.models.classifiers.timm.TimmClassifier:17 of +msgid "The training setting. The acceptable fields are:" +msgstr "" + +#: mmcls.models.classifiers.hugging_face.HuggingFaceClassifier:29 +#: mmcls.models.classifiers.image.ImageClassifier:20 mmcls.models.classifiers.timm.TimmClassifier:20 of +msgid "" +"augments (List[dict]): The batch augmentation methods to use. More details can be found in :mod:`mmcls." +"model.utils.augment`." +msgstr "" + +#: mmcls.models.classifiers.hugging_face.HuggingFaceClassifier:37 +#: mmcls.models.classifiers.image.ImageClassifier:25 mmcls.models.classifiers.timm.TimmClassifier:28 of +msgid "" +"The config for preprocessing input data. If None or no specified type, it will use \"ClsDataPreprocessor\" " +"as type. See :class:`ClsDataPreprocessor` for more details. Defaults to None." +msgstr "" + +#: mmcls.models.classifiers.hugging_face.HuggingFaceClassifier:42 +#: mmcls.models.classifiers.image.ImageClassifier:30 mmcls.models.classifiers.timm.TimmClassifier:33 +#: mmcls.models.heads.cls_head.ClsHead:13 mmcls.models.heads.margin_head.ArcFaceClsHead:85 of +msgid "the config to control the initialization. Defaults to None." +msgstr "" + +#: mmcls.models.classifiers.hugging_face.HuggingFaceClassifier:45 of +msgid "Other keyword arguments of the method `from_pretrained` or `from_config`." +msgstr "" + +#: mmcls.models.classifiers.hugging_face.HuggingFaceClassifier.loss:1 +#: mmcls.models.classifiers.image.ImageClassifier.loss:1 mmcls.models.classifiers.timm.TimmClassifier.loss:1 +#: of +msgid "Calculate losses from a batch of inputs and data samples." +msgstr "" + +#: mmcls.models.classifiers.hugging_face.HuggingFaceClassifier.loss:6 +#: mmcls.models.classifiers.image.ImageClassifier.loss:6 mmcls.models.classifiers.timm.TimmClassifier.loss:6 +#: mmcls.models.heads.cls_head.ClsHead.loss:8 +#: mmcls.models.heads.efficientformer_head.EfficientFormerClsHead.loss:8 +#: mmcls.models.heads.margin_head.ArcFaceClsHead.loss:8 +#: mmcls.models.heads.multi_label_cls_head.MultiLabelClsHead.loss:8 of +msgid "The annotation data of every samples." +msgstr "" + +#: mmcls.models.classifiers.hugging_face.HuggingFaceClassifier.loss:9 +#: mmcls.models.classifiers.timm.TimmClassifier.loss:9 of +msgid "Other keyword arguments of the loss module." +msgstr "" + +#: mmcls.models.classifiers.hugging_face.HuggingFaceClassifier.loss:11 +#: mmcls.models.classifiers.image.ImageClassifier.loss:10 mmcls.models.classifiers.timm.TimmClassifier.loss:11 +#: mmcls.models.heads.cls_head.ClsHead.loss:13 +#: mmcls.models.heads.efficientformer_head.EfficientFormerClsHead.loss:13 +#: mmcls.models.heads.margin_head.ArcFaceClsHead.loss:13 +#: mmcls.models.heads.multi_label_cls_head.MultiLabelClsHead.loss:13 of +msgid "a dictionary of loss components" +msgstr "" + +#: mmcls.models.classifiers.hugging_face.HuggingFaceClassifier.predict:1 +#: mmcls.models.classifiers.image.ImageClassifier.predict:1 +#: mmcls.models.classifiers.timm.TimmClassifier.predict:1 of +msgid "Predict results from a batch of inputs." +msgstr "" + +#: mmcls.models.classifiers.hugging_face.HuggingFaceClassifier.predict:6 +#: mmcls.models.classifiers.image.ImageClassifier.predict:6 +#: mmcls.models.classifiers.timm.TimmClassifier.predict:6 of +msgid "The annotation data of every samples. Defaults to None." +msgstr "" + +#: mmcls.models.classifiers.hugging_face.HuggingFaceClassifier.predict:10 +#: mmcls.models.classifiers.timm.TimmClassifier.predict:10 of +msgid "The prediction results." +msgstr "" + +#: ../../api/generated/mmcls.models.classifiers.ImageClassifier.rst:7 +msgid "ImageClassifier" +msgstr "" + +#: ../../api/models.rst:49::1 mmcls.models.classifiers.image.ImageClassifier:1 of +msgid "Image classifiers for supervised classification task." +msgstr "" + +#: mmcls.models.classifiers.image.ImageClassifier:3 of +msgid "The backbone module. See :mod:`mmcls.models.backbones`." +msgstr "" + +#: mmcls.models.classifiers.image.ImageClassifier:6 of +msgid "The neck module to process features from backbone. See :mod:`mmcls.models.necks`. Defaults to None." +msgstr "" + +#: mmcls.models.classifiers.image.ImageClassifier:9 of +msgid "" +"The head module to do prediction and calculate loss from processed features. See :mod:`mmcls.models.heads`. " +"Notice that if the head is not set, almost all methods cannot be used except :meth:`extract_feat`. Defaults " +"to None." +msgstr "" + +#: mmcls.models.classifiers.image.ImageClassifier:14 of +msgid "The pretrained checkpoint path, support local path and remote path. Defaults to None." +msgstr "" + +#: mmcls.models.classifiers.image.ImageClassifier.extract_feat:6 of +msgid "" +"Which stage to output the feature. Choose from: - \"backbone\": The output of backbone network. Returns a " +"tuple including multiple stages features. - \"neck\": The output of neck module. Returns a tuple " +"including multiple stages features. - \"pre_logits\": The feature before the final classification " +"linear layer. Usually returns a tensor. Defaults to \"neck\"." +msgstr "" + +#: mmcls.models.classifiers.image.ImageClassifier.extract_feat:6 of +msgid "Which stage to output the feature. Choose from:" +msgstr "" + +#: mmcls.models.classifiers.image.ImageClassifier.extract_feat:8 of +msgid "\"backbone\": The output of backbone network. Returns a tuple including multiple stages features." +msgstr "" + +#: mmcls.models.classifiers.image.ImageClassifier.extract_feat:10 of +msgid "\"neck\": The output of neck module. Returns a tuple including multiple stages features." +msgstr "" + +#: mmcls.models.classifiers.image.ImageClassifier.extract_feat:12 of +msgid "\"pre_logits\": The feature before the final classification linear layer. Usually returns a tensor." +msgstr "" + +#: mmcls.models.classifiers.image.ImageClassifier.extract_feat:15 of +msgid "Defaults to \"neck\"." +msgstr "" + +#: mmcls.models.classifiers.image.ImageClassifier.extract_feat:18 of +msgid "" +"The output of specified stage. The output depends on detailed implementation. In general, the output of " +"backbone and neck is a tuple and the output of pre_logits is a tensor." +msgstr "" + +#: mmcls.models.classifiers.image.ImageClassifier.extract_feat:26 of +msgid "Backbone output" +msgstr "" + +#: mmcls.models.classifiers.image.ImageClassifier.extract_feat:43 of +msgid "Neck output" +msgstr "" + +#: mmcls.models.classifiers.image.ImageClassifier.extract_feat:61 of +msgid "Pre-logits output (without the final linear classifier head)" +msgstr "" + +#: mmcls.models.classifiers.image.ImageClassifier.forward:7 of +msgid "" +"\"predict\": Forward and return the predictions, which are fully processed to a list of :obj:" +"`ClsDataSample`." +msgstr "" + +#: mmcls.models.classifiers.image.ImageClassifier.forward:25 of +msgid "" +"The return type depends on ``mode``. - If ``mode=\"tensor\"``, return a tensor or a tuple of tensor. - If " +"``mode=\"predict\"``, return a list of :obj:`mmcls.structures.ClsDataSample`. - If ``mode=\"loss\"``, " +"return a dict of tensor." +msgstr "" + +#: mmcls.models.classifiers.image.ImageClassifier.forward:28 of +msgid "If ``mode=\"predict\"``, return a list of :obj:`mmcls.structures.ClsDataSample`." +msgstr "" + +#: mmcls.models.classifiers.image.ImageClassifier.predict:9 of +msgid "Other keyword arguments accepted by the ``predict`` method of :attr:`head`." +msgstr "" + +#: ../../api/generated/mmcls.models.classifiers.TimmClassifier.rst:7 +msgid "TimmClassifier" +msgstr "" + +#: ../../api/models.rst:49::1 mmcls.models.classifiers.timm.TimmClassifier:1 of +msgid "Image classifiers for pytorch-image-models (timm) model." +msgstr "" + +#: mmcls.models.classifiers.timm.TimmClassifier:3 of +msgid "" +"This class accepts all positional and keyword arguments of the function `timm.models.create_model `_ and use it to create a model from pytorch-image-models." +msgstr "" + +#: mmcls.models.classifiers.timm.TimmClassifier:7 of +msgid "It can load checkpoints of timm directly, and the saved checkpoints also can be directly load by timm." +msgstr "" + +#: mmcls.models.classifiers.timm.TimmClassifier:10 of +msgid "Please confirm that you have installed ``timm`` if you want to use it." +msgstr "" + +#: mmcls.models.classifiers.timm.TimmClassifier:12 of +msgid "All positional arguments of the function `timm.models.create_model`." +msgstr "" + +#: mmcls.models.classifiers.timm.TimmClassifier:36 of +msgid "Other keyword arguments of the function `timm.models.create_model`." +msgstr "" + +#: ../../api/generated/mmcls.models.heads.ArcFaceClsHead.rst:7 +msgid "ArcFaceClsHead" +msgstr "" + +#: ../../api/models.rst:147::1 mmcls.models.heads.margin_head.ArcFaceClsHead:1 of +msgid "ArcFace classifier head." +msgstr "" + +#: mmcls.models.heads.margin_head.ArcFaceClsHead:3 of +msgid "" +"A PyTorch implementation of paper `ArcFace: Additive Angular Margin Loss for Deep Face Recognition `_ and `Sub-center ArcFace: Boosting Face Recognition by Large-Scale Noisy Web " +"Faces `_" +msgstr "" + +#: mmcls.models.heads.margin_head.ArcFaceClsHead:10 of +msgid "To use ArcFace in config files." +msgstr "" + +#: mmcls.models.heads.margin_head.ArcFaceClsHead:12 of +msgid "use vanilla ArcFace" +msgstr "" + +#: mmcls.models.heads.margin_head.ArcFaceClsHead:27 of +msgid "use SubCenterArcFace with 3 sub-centers" +msgstr "" + +#: mmcls.models.heads.margin_head.ArcFaceClsHead:43 of +msgid "use SubCenterArcFace With CountPowerAdaptiveMargins" +msgstr "" + +#: mmcls.models.heads.conformer_head.ConformerHead:3 mmcls.models.heads.deit_head.DeiTClsHead:8 +#: mmcls.models.heads.efficientformer_head.EfficientFormerClsHead:3 +#: mmcls.models.heads.linear_head.LinearClsHead:3 mmcls.models.heads.margin_head.ArcFaceClsHead:61 +#: mmcls.models.heads.vision_transformer_head.VisionTransformerClsHead:3 of +msgid "Number of categories excluding the background category." +msgstr "" + +#: mmcls.models.heads.conformer_head.ConformerHead:6 mmcls.models.heads.deit_head.DeiTClsHead:11 +#: mmcls.models.heads.efficientformer_head.EfficientFormerClsHead:6 +#: mmcls.models.heads.linear_head.LinearClsHead:6 mmcls.models.heads.margin_head.ArcFaceClsHead:64 +#: mmcls.models.heads.multi_label_csra_head.CSRAClsHead:9 +#: mmcls.models.heads.stacked_head.StackedLinearClsHead:5 +#: mmcls.models.heads.vision_transformer_head.VisionTransformerClsHead:6 of +msgid "Number of channels in the input feature map." +msgstr "" + +#: mmcls.models.heads.margin_head.ArcFaceClsHead:66 of +msgid "Number of subcenters. Defaults to 1." +msgstr "" + +#: mmcls.models.heads.margin_head.ArcFaceClsHead:68 of +msgid "Scale factor of output logit. Defaults to 64.0." +msgstr "" + +#: mmcls.models.heads.margin_head.ArcFaceClsHead:70 of +msgid "" +"The penalty margin. Could be the fllowing formats: - float: The margin, would be same for all the " +"categories. - Sequence[float]: The category-based margins list. - str: A '.txt' file path which contains a " +"list. Each line represents the margin of a category, and the number in the i-th row indicates the " +"margin of the i-th class. Defaults to 0.5." +msgstr "" + +#: mmcls.models.heads.margin_head.ArcFaceClsHead:70 of +msgid "The penalty margin. Could be the fllowing formats:" +msgstr "" + +#: mmcls.models.heads.margin_head.ArcFaceClsHead:72 of +msgid "float: The margin, would be same for all the categories." +msgstr "" + +#: mmcls.models.heads.margin_head.ArcFaceClsHead:73 of +msgid "Sequence[float]: The category-based margins list." +msgstr "" + +#: mmcls.models.heads.margin_head.ArcFaceClsHead:74 of +msgid "" +"str: A '.txt' file path which contains a list. Each line represents the margin of a category, and the " +"number in the i-th row indicates the margin of the i-th class." +msgstr "" + +#: mmcls.models.heads.margin_head.ArcFaceClsHead:78 of +msgid "Defaults to 0.5." +msgstr "" + +#: mmcls.models.heads.margin_head.ArcFaceClsHead:80 of +msgid "Avoid theta + m >= PI. Defaults to False." +msgstr "" + +#: mmcls.models.heads.cls_head.ClsHead.forward:1 mmcls.models.heads.conformer_head.ConformerHead.forward:1 +#: mmcls.models.heads.deit_head.DeiTClsHead.forward:1 +#: mmcls.models.heads.efficientformer_head.EfficientFormerClsHead.forward:1 +#: mmcls.models.heads.linear_head.LinearClsHead.forward:1 +#: mmcls.models.heads.margin_head.ArcFaceClsHead.forward:1 +#: mmcls.models.heads.multi_label_cls_head.MultiLabelClsHead.forward:1 +#: mmcls.models.heads.multi_label_csra_head.CSRAClsHead.forward:1 +#: mmcls.models.heads.multi_label_linear_head.MultiLabelLinearClsHead.forward:1 +#: mmcls.models.heads.stacked_head.StackedLinearClsHead.forward:1 +#: mmcls.models.heads.vision_transformer_head.VisionTransformerClsHead.forward:1 of +msgid "The forward process." +msgstr "" + +#: mmcls.models.heads.cls_head.ClsHead.loss:1 +#: mmcls.models.heads.efficientformer_head.EfficientFormerClsHead.loss:1 +#: mmcls.models.heads.margin_head.ArcFaceClsHead.loss:1 +#: mmcls.models.heads.multi_label_cls_head.MultiLabelClsHead.loss:1 of +msgid "Calculate losses from the classification score." +msgstr "" + +#: mmcls.models.heads.cls_head.ClsHead.loss:3 mmcls.models.heads.cls_head.ClsHead.predict:3 +#: mmcls.models.heads.conformer_head.ConformerHead.predict:3 +#: mmcls.models.heads.efficientformer_head.EfficientFormerClsHead.loss:3 +#: mmcls.models.heads.margin_head.ArcFaceClsHead.loss:3 +#: mmcls.models.heads.multi_label_cls_head.MultiLabelClsHead.loss:3 +#: mmcls.models.heads.multi_label_cls_head.MultiLabelClsHead.predict:3 of +msgid "" +"The features extracted from the backbone. Multiple stage inputs are acceptable but only the last stage will " +"be used to classify. The shape of every item should be ``(num_samples, num_classes)``." +msgstr "" + +#: mmcls.models.heads.cls_head.ClsHead.loss:11 +#: mmcls.models.heads.efficientformer_head.EfficientFormerClsHead.loss:11 +#: mmcls.models.heads.margin_head.ArcFaceClsHead.loss:11 +#: mmcls.models.heads.multi_label_cls_head.MultiLabelClsHead.loss:11 of +msgid "Other keyword arguments to forward the loss module." +msgstr "" + +#: mmcls.models.heads.cls_head.ClsHead.pre_logits:1 +#: mmcls.models.heads.conformer_head.ConformerHead.pre_logits:1 +#: mmcls.models.heads.deit_head.DeiTClsHead.pre_logits:1 +#: mmcls.models.heads.efficientformer_head.EfficientFormerClsHead.pre_logits:1 +#: mmcls.models.heads.linear_head.LinearClsHead.pre_logits:1 +#: mmcls.models.heads.margin_head.ArcFaceClsHead.pre_logits:1 +#: mmcls.models.heads.multi_label_cls_head.MultiLabelClsHead.pre_logits:1 +#: mmcls.models.heads.multi_label_csra_head.CSRAClsHead.pre_logits:1 +#: mmcls.models.heads.multi_label_linear_head.MultiLabelLinearClsHead.pre_logits:1 +#: mmcls.models.heads.stacked_head.StackedLinearClsHead.pre_logits:1 +#: mmcls.models.heads.vision_transformer_head.VisionTransformerClsHead.pre_logits:1 of +msgid "The process before the final classification head." +msgstr "" + +#: mmcls.models.heads.margin_head.ArcFaceClsHead.pre_logits:3 of +msgid "" +"The input ``feats`` is a tuple of tensor, and each tensor is the feature of a backbone stage. In " +"``ArcFaceHead``, we just obtain the feature of the last stage." +msgstr "" + +#: mmcls.models.heads.margin_head.ArcFaceClsHead.set_margins:1 of +msgid "set margins of arcface head." +msgstr "" + +#: mmcls.models.heads.margin_head.ArcFaceClsHead.set_margins:3 of +msgid "The marigins." +msgstr "" + +#: ../../api/generated/mmcls.models.heads.CSRAClsHead.rst:7 +msgid "CSRAClsHead" +msgstr "" + +#: ../../api/models.rst:147::1 mmcls.models.heads.multi_label_csra_head.CSRAClsHead:1 of +msgid "Class-specific residual attention classifier head." +msgstr "" + +#: mmcls.models.heads.multi_label_csra_head.CSRAClsHead:3 of +msgid "" +"Please refer to the `Residual Attention: A Simple but Effective Method for Multi-Label Recognition (ICCV " +"2021) `_ for details." +msgstr "" + +#: mmcls.models.heads.multi_label_csra_head.CSRAClsHead:7 +#: mmcls.models.heads.stacked_head.StackedLinearClsHead:3 of +msgid "Number of categories." +msgstr "" + +#: mmcls.models.heads.multi_label_csra_head.CSRAClsHead:11 of +msgid "Number of residual at tensor heads." +msgstr "" + +#: mmcls.models.heads.multi_label_csra_head.CSRAClsHead:13 of +msgid "Config of classification loss." +msgstr "" + +#: mmcls.models.heads.multi_label_csra_head.CSRAClsHead:15 of +msgid "Lambda that combines global average and max pooling scores." +msgstr "" + +#: mmcls.models.heads.conformer_head.ConformerHead:9 mmcls.models.heads.multi_label_csra_head.CSRAClsHead:18 +#: of +msgid "The extra init config of layers. Defaults to use ``dict(type='Normal', layer='Linear', std=0.01)``." +msgstr "" + +#: mmcls.models.heads.multi_label_csra_head.CSRAClsHead.pre_logits:3 of +msgid "" +"The input ``feats`` is a tuple of tensor, and each tensor is the feature of a backbone stage. In " +"``CSRAClsHead``, we just obtain the feature of the last stage." +msgstr "" + +#: ../../api/generated/mmcls.models.heads.ClsHead.rst:7 +msgid "ClsHead" +msgstr "" + +#: ../../api/models.rst:147::1 mmcls.models.heads.cls_head.ClsHead:1 of +msgid "Classification head." +msgstr "" + +#: mmcls.models.heads.cls_head.ClsHead:6 mmcls.models.heads.linear_head.LinearClsHead:11 of +msgid "Top-k accuracy. Defaults to ``(1, )``." +msgstr "" + +#: mmcls.models.heads.cls_head.ClsHead:8 mmcls.models.heads.linear_head.LinearClsHead:13 of +msgid "" +"Whether to calculate accuracy during training. If you use batch augmentations like Mixup and CutMix during " +"training, it is pointless to calculate accuracy. Defaults to False." +msgstr "" + +#: mmcls.models.heads.cls_head.ClsHead.pre_logits:3 of +msgid "" +"The input ``feats`` is a tuple of tensor, and each tensor is the feature of a backbone stage. In " +"``ClsHead``, we just obtain the feature of the last stage." +msgstr "" + +#: mmcls.models.heads.cls_head.ClsHead.predict:1 mmcls.models.heads.conformer_head.ConformerHead.predict:1 +#: mmcls.models.heads.multi_label_cls_head.MultiLabelClsHead.predict:1 of +msgid "Inference without augmentation." +msgstr "" + +#: mmcls.models.heads.cls_head.ClsHead.predict:8 mmcls.models.heads.conformer_head.ConformerHead.predict:8 +#: mmcls.models.heads.multi_label_cls_head.MultiLabelClsHead.predict:8 of +msgid "" +"The annotation data of every samples. If not None, set ``pred_label`` of the input data samples. Defaults " +"to None." +msgstr "" + +#: mmcls.models.heads.cls_head.ClsHead.predict:13 mmcls.models.heads.conformer_head.ConformerHead.predict:13 +#: mmcls.models.heads.multi_label_cls_head.MultiLabelClsHead.predict:13 of +msgid "A list of data samples which contains the predicted results." +msgstr "" + +#: ../../api/generated/mmcls.models.heads.ConformerHead.rst:7 +msgid "ConformerHead" +msgstr "" + +#: ../../api/models.rst:147::1 mmcls.models.heads.conformer_head.ConformerHead:1 +#: mmcls.models.heads.linear_head.LinearClsHead:1 of +msgid "Linear classifier head." +msgstr "" + +#: mmcls.models.heads.conformer_head.ConformerHead.pre_logits:3 of +msgid "" +"The input ``feats`` is a tuple of tensor, and each tensor is the feature of a backbone stage. In " +"``ConformerHead``, we just obtain the feature of the last stage." +msgstr "" + +#: ../../api/generated/mmcls.models.heads.DeiTClsHead.rst:7 +msgid "DeiTClsHead" +msgstr "" + +#: ../../api/models.rst:147::1 mmcls.models.heads.deit_head.DeiTClsHead:1 of +msgid "Distilled Vision Transformer classifier head." +msgstr "" + +#: mmcls.models.heads.deit_head.DeiTClsHead:3 of +msgid "" +"Comparing with the :class:`VisionTransformerClsHead`, this head adds an extra linear layer to handle the " +"dist token. The final classification score is the average of both linear transformation results of " +"``cls_token`` and ``dist_token``." +msgstr "" + +#: mmcls.models.heads.deit_head.DeiTClsHead:13 +#: mmcls.models.heads.vision_transformer_head.VisionTransformerClsHead:8 of +msgid "Number of the dimensions for hidden layer. Defaults to None, which means no extra hidden layer." +msgstr "" + +#: mmcls.models.heads.deit_head.DeiTClsHead:16 +#: mmcls.models.heads.vision_transformer_head.VisionTransformerClsHead:11 of +msgid "The activation config. Only available during pre-training. Defaults to ``dict(type='Tanh')``." +msgstr "" + +#: mmcls.models.heads.deit_head.DeiTClsHead:19 +#: mmcls.models.heads.vision_transformer_head.VisionTransformerClsHead:14 of +msgid "The extra initialization configs. Defaults to ``dict(type='Constant', layer='Linear', val=0)``." +msgstr "" + +#: mmcls.models.heads.deit_head.DeiTClsHead.pre_logits:3 of +msgid "" +"The input ``feats`` is a tuple of list of tensor, and each tensor is the feature of a backbone stage. In " +"``DeiTClsHead``, we obtain the feature of the last stage and forward in hidden layer if exists." +msgstr "" + +#: ../../api/generated/mmcls.models.heads.EfficientFormerClsHead.rst:7 +msgid "EfficientFormerClsHead" +msgstr "" + +#: ../../api/models.rst:147::1 mmcls.models.heads.efficientformer_head.EfficientFormerClsHead:1 +#: of +msgid "EfficientFormer classifier head." +msgstr "" + +#: mmcls.models.heads.efficientformer_head.EfficientFormerClsHead:8 of +msgid "Whether use a additional distilled head. Defaults to True." +msgstr "" + +#: mmcls.models.heads.efficientformer_head.EfficientFormerClsHead:11 of +msgid "The extra initialization configs. Defaults to ``dict(type='Normal', layer='Linear', std=0.01)``." +msgstr "" + +#: mmcls.models.heads.efficientformer_head.EfficientFormerClsHead.pre_logits:3 of +msgid "" +"The input ``feats`` is a tuple of tensor, and each tensor is the feature of a backbone stage. In :" +"obj`EfficientFormerClsHead`, we just obtain the feature of the last stage." +msgstr "" + +#: ../../api/generated/mmcls.models.heads.LinearClsHead.rst:7 +msgid "LinearClsHead" +msgstr "" + +#: mmcls.models.heads.linear_head.LinearClsHead:18 of +msgid "" +"the config to control the initialization. Defaults to ``dict(type='Normal', layer='Linear', std=0.01)``." +msgstr "" + +#: mmcls.models.heads.linear_head.LinearClsHead.pre_logits:3 of +msgid "" +"The input ``feats`` is a tuple of tensor, and each tensor is the feature of a backbone stage. In " +"``LinearClsHead``, we just obtain the feature of the last stage." +msgstr "" + +#: ../../api/generated/mmcls.models.heads.MultiLabelClsHead.rst:7 +msgid "MultiLabelClsHead" +msgstr "" + +#: ../../api/models.rst:147::1 mmcls.models.heads.multi_label_cls_head.MultiLabelClsHead:1 of +msgid "Classification head for multilabel task." +msgstr "" + +#: mmcls.models.heads.multi_label_cls_head.MultiLabelClsHead:3 +#: mmcls.models.heads.multi_label_linear_head.MultiLabelLinearClsHead:3 of +msgid "Config of classification loss. Defaults to dict(type='CrossEntropyLoss', use_sigmoid=True)." +msgstr "" + +#: mmcls.models.heads.multi_label_cls_head.MultiLabelClsHead:6 +#: mmcls.models.heads.multi_label_linear_head.MultiLabelLinearClsHead:6 of +msgid "Predictions with scores under the thresholds are considered as negative. Defaults to None." +msgstr "" + +#: mmcls.models.heads.multi_label_cls_head.MultiLabelClsHead:9 +#: mmcls.models.heads.multi_label_linear_head.MultiLabelLinearClsHead:9 of +msgid "Predictions with the k-th highest scores are considered as positive. Defaults to None." +msgstr "" + +#: mmcls.models.heads.multi_label_cls_head.MultiLabelClsHead:12 of +msgid "The extra init config of layers. Defaults to None." +msgstr "" + +#: mmcls.models.heads.multi_label_cls_head.MultiLabelClsHead:18 +#: mmcls.models.heads.multi_label_linear_head.MultiLabelLinearClsHead:18 of +msgid "" +"If both ``thr`` and ``topk`` are set, use ``thr` to determine positive predictions. If neither is set, use " +"``thr=0.5`` as default." +msgstr "" + +#: mmcls.models.heads.multi_label_cls_head.MultiLabelClsHead.pre_logits:3 of +msgid "" +"The input ``feats`` is a tuple of tensor, and each tensor is the feature of a backbone stage. In " +"``MultiLabelClsHead``, we just obtain the feature of the last stage." +msgstr "" + +#: ../../api/generated/mmcls.models.heads.MultiLabelLinearClsHead.rst:7 +msgid "MultiLabelLinearClsHead" +msgstr "" + +#: ../../api/models.rst:147::1 +#: mmcls.models.heads.multi_label_linear_head.MultiLabelLinearClsHead:1 of +msgid "Linear classification head for multilabel task." +msgstr "" + +#: mmcls.models.heads.multi_label_linear_head.MultiLabelLinearClsHead:12 of +msgid "The extra init config of layers. Defaults to use dict(type='Normal', layer='Linear', std=0.01)." +msgstr "" + +#: mmcls.models.heads.multi_label_linear_head.MultiLabelLinearClsHead.pre_logits:3 of +msgid "" +"The input ``feats`` is a tuple of tensor, and each tensor is the feature of a backbone stage. In " +"``MultiLabelLinearClsHead``, we just obtain the feature of the last stage." +msgstr "" + +#: ../../api/generated/mmcls.models.heads.StackedLinearClsHead.rst:7 +msgid "StackedLinearClsHead" +msgstr "" + +#: ../../api/models.rst:147::1 mmcls.models.heads.stacked_head.StackedLinearClsHead:1 of +msgid "Classifier head with several hidden fc layer and a output fc layer." +msgstr "" + +#: mmcls.models.heads.stacked_head.StackedLinearClsHead:7 of +msgid "Number of channels in the hidden fc layers." +msgstr "" + +#: mmcls.models.heads.stacked_head.StackedLinearClsHead:10 of +msgid "Dropout rate after each hidden fc layer, except the last layer. Defaults to 0." +msgstr "" + +#: mmcls.models.heads.stacked_head.StackedLinearClsHead:13 of +msgid "" +"Config dict of normalization layer after each hidden fc layer, except the last layer. Defaults to None." +msgstr "" + +#: mmcls.models.heads.stacked_head.StackedLinearClsHead:16 of +msgid "" +"Config dict of activation function after each hidden layer, except the last layer. Defaults to use \"ReLU\"." +msgstr "" + +#: mmcls.models.heads.StackedLinearClsHead.fc:1 of +msgid "Full connected layer." +msgstr "" + +#: mmcls.models.heads.stacked_head.StackedLinearClsHead.pre_logits:3 of +msgid "The input ``feats`` is a tuple of tensor, and each tensor is the feature of a backbone stage." +msgstr "" + +#: ../../api/generated/mmcls.models.heads.VisionTransformerClsHead.rst:7 +msgid "VisionTransformerClsHead" +msgstr "" + +#: ../../api/models.rst:147::1 +#: mmcls.models.heads.vision_transformer_head.VisionTransformerClsHead:1 of +msgid "Vision Transformer classifier head." +msgstr "" + +#: mmcls.models.heads.vision_transformer_head.VisionTransformerClsHead.init_weights:1 of +msgid "\"Init weights of hidden layer if exists." +msgstr "" + +#: mmcls.models.heads.vision_transformer_head.VisionTransformerClsHead.pre_logits:3 of +msgid "" +"The input ``feats`` is a tuple of list of tensor, and each tensor is the feature of a backbone stage. In " +"``VisionTransformerClsHead``, we obtain the feature of the last stage and forward in hidden layer if exists." +msgstr "" + +#: ../../api/generated/mmcls.models.losses.AsymmetricLoss.rst:7 +msgid "AsymmetricLoss" +msgstr "" + +#: ../../api/models.rst:163::1 mmcls.models.losses.asymmetric_loss.AsymmetricLoss:1 +#: mmcls.models.losses.asymmetric_loss.AsymmetricLoss.forward:1 of +msgid "asymmetric loss." +msgstr "" + +#: mmcls.models.losses.asymmetric_loss.AsymmetricLoss:3 of +msgid "positive focusing parameter. Defaults to 0.0." +msgstr "" + +#: mmcls.models.losses.asymmetric_loss.AsymmetricLoss:6 of +msgid "Negative focusing parameter. We usually set gamma_neg > gamma_pos. Defaults to 4.0." +msgstr "" + +#: mmcls.models.losses.asymmetric_loss.AsymmetricLoss:9 of +msgid "Probability margin. Defaults to 0.05." +msgstr "" + +#: mmcls.models.losses.asymmetric_loss.AsymmetricLoss:11 of +msgid "The method used to reduce the loss into a scalar." +msgstr "" + +#: mmcls.models.losses.asymmetric_loss.AsymmetricLoss:14 mmcls.models.losses.focal_loss.FocalLoss:12 of +msgid "Weight of loss. Defaults to 1.0." +msgstr "" + +#: mmcls.models.losses.asymmetric_loss.AsymmetricLoss:16 of +msgid "Whether the prediction uses sigmoid instead of softmax. Defaults to True." +msgstr "" + +#: mmcls.models.losses.asymmetric_loss.AsymmetricLoss:19 of +msgid "The minimum value of the argument of logarithm. Defaults to 1e-8." +msgstr "" + +#: mmcls.models.losses.asymmetric_loss.AsymmetricLoss.forward:3 +#: mmcls.models.losses.focal_loss.FocalLoss.forward:3 +#: mmcls.models.losses.label_smooth_loss.LabelSmoothLoss.forward:3 of +msgid "The prediction with shape (N, \\*)." +msgstr "" + +#: mmcls.models.losses.asymmetric_loss.AsymmetricLoss.forward:5 +#: mmcls.models.losses.focal_loss.FocalLoss.forward:5 of +msgid "The ground truth label of the prediction with shape (N, \\*), N or (N,1)." +msgstr "" + +#: mmcls.models.losses.asymmetric_loss.AsymmetricLoss.forward:8 +#: mmcls.models.losses.focal_loss.FocalLoss.forward:8 +#: mmcls.models.losses.label_smooth_loss.LabelSmoothLoss.forward:8 of +msgid "Sample-wise loss weight with shape (N, \\*). Defaults to None." +msgstr "" + +#: mmcls.models.losses.asymmetric_loss.AsymmetricLoss.forward:11 +#: mmcls.models.losses.focal_loss.FocalLoss.forward:11 +#: mmcls.models.losses.label_smooth_loss.LabelSmoothLoss.forward:11 +#: mmcls.models.losses.seesaw_loss.SeesawLoss.forward:9 of +msgid "Average factor that is used to average the loss. Defaults to None." +msgstr "" + +#: mmcls.models.losses.asymmetric_loss.AsymmetricLoss.forward:14 +#: mmcls.models.losses.focal_loss.FocalLoss.forward:14 +#: mmcls.models.losses.label_smooth_loss.LabelSmoothLoss.forward:14 of +msgid "" +"The method used to reduce the loss into a scalar. Options are \"none\", \"mean\" and \"sum\". Defaults to " +"None." +msgstr "" + +#: mmcls.models.losses.asymmetric_loss.AsymmetricLoss.forward:19 +#: mmcls.models.losses.focal_loss.FocalLoss.forward:19 +#: mmcls.models.losses.label_smooth_loss.LabelSmoothLoss.forward:19 of +msgid "Loss." +msgstr "" + +#: ../../api/generated/mmcls.models.losses.CrossEntropyLoss.rst:7 +msgid "CrossEntropyLoss" +msgstr "" + +#: ../../api/models.rst:163::1 mmcls.models.losses.cross_entropy_loss.CrossEntropyLoss:1 of +msgid "Cross entropy loss." +msgstr "" + +#: mmcls.models.losses.cross_entropy_loss.CrossEntropyLoss:3 of +msgid "Whether the prediction uses sigmoid of softmax. Defaults to False." +msgstr "" + +#: mmcls.models.losses.cross_entropy_loss.CrossEntropyLoss:6 of +msgid "Whether to use the soft version of CrossEntropyLoss. Defaults to False." +msgstr "" + +#: mmcls.models.losses.cross_entropy_loss.CrossEntropyLoss:9 +#: mmcls.models.losses.label_smooth_loss.LabelSmoothLoss:23 of +msgid "The method used to reduce the loss. Options are \"none\", \"mean\" and \"sum\". Defaults to 'mean'." +msgstr "" + +#: mmcls.models.losses.cross_entropy_loss.CrossEntropyLoss:12 +#: mmcls.models.losses.label_smooth_loss.LabelSmoothLoss:26 of +msgid "Weight of the loss. Defaults to 1.0." +msgstr "" + +#: mmcls.models.losses.cross_entropy_loss.CrossEntropyLoss:14 of +msgid "The weight for each class with shape (C), C is the number of classes. Default None." +msgstr "" + +#: mmcls.models.losses.cross_entropy_loss.CrossEntropyLoss:17 of +msgid "" +"The positive weight for each class with shape (C), C is the number of classes. Only enabled in BCE loss " +"when ``use_sigmoid`` is True. Default None." +msgstr "" + +#: ../../api/generated/mmcls.models.losses.FocalLoss.rst:7 +msgid "FocalLoss" +msgstr "" + +#: ../../api/models.rst:163::1 mmcls.models.losses.focal_loss.FocalLoss:1 of +msgid "Focal loss." +msgstr "" + +#: mmcls.models.losses.focal_loss.FocalLoss:3 of +msgid "Focusing parameter in focal loss. Defaults to 2.0." +msgstr "" + +#: mmcls.models.losses.focal_loss.FocalLoss:6 of +msgid "The parameter in balanced form of focal loss. Defaults to 0.25." +msgstr "" + +#: mmcls.models.losses.focal_loss.FocalLoss:9 of +msgid "" +"The method used to reduce the loss into a scalar. Options are \"none\" and \"mean\". Defaults to 'mean'." +msgstr "" + +#: mmcls.models.losses.focal_loss.FocalLoss.forward:1 of +msgid "Sigmoid focal loss." +msgstr "" + +#: ../../api/generated/mmcls.models.losses.LabelSmoothLoss.rst:7 +msgid "LabelSmoothLoss" +msgstr "" + +#: ../../api/models.rst:163::1 mmcls.models.losses.label_smooth_loss.LabelSmoothLoss:1 of +msgid "Initializer for the label smoothed cross entropy loss." +msgstr "" + +#: mmcls.models.losses.label_smooth_loss.LabelSmoothLoss:3 of +msgid "" +"Refers to `Rethinking the Inception Architecture for Computer Vision `_" +msgstr "" + +#: mmcls.models.losses.label_smooth_loss.LabelSmoothLoss:6 of +msgid "" +"This decreases gap between output scores and encourages generalization. Labels provided to forward can be " +"one-hot like vectors (NxC) or class indices (Nx1). And this accepts linear combination of one-hot like " +"labels from mixup or cutmix except multi-label task." +msgstr "" + +#: mmcls.models.losses.label_smooth_loss.LabelSmoothLoss:12 of +msgid "The degree of label smoothing." +msgstr "" + +#: mmcls.models.losses.label_smooth_loss.LabelSmoothLoss:14 of +msgid "Number of classes. Defaults to None." +msgstr "" + +#: mmcls.models.losses.label_smooth_loss.LabelSmoothLoss:16 of +msgid "Refers to notes, Options are 'original', 'classy_vision', 'multi_label'. Defaults to 'original'." +msgstr "" + +#: mmcls.models.losses.label_smooth_loss.LabelSmoothLoss:19 of +msgid "" +"Whether the prediction uses sigmoid of softmax. Defaults to None, which means to use sigmoid in " +"\"multi_label\" mode and not use in other modes." +msgstr "" + +#: mmcls.models.losses.label_smooth_loss.LabelSmoothLoss:31 of +msgid "if the mode is **\"original\"**, this will use the same label smooth method as the original paper as:" +msgstr "" + +#: mmcls.models.losses.label_smooth_loss.LabelSmoothLoss:34 of +msgid "" +"(1-\\epsilon)\\delta_{k, y} + \\frac{\\epsilon}{K}\n" +"\n" +msgstr "" + +#: mmcls.models.losses.label_smooth_loss.LabelSmoothLoss:37 of +msgid "" +"where :math:`\\epsilon` is the ``label_smooth_val``, :math:`K` is the ``num_classes`` and :math:`" +"\\delta_{k, y}` is Dirac delta, which equals 1 for :math:`k=y` and 0 otherwise." +msgstr "" + +#: mmcls.models.losses.label_smooth_loss.LabelSmoothLoss:41 of +msgid "" +"if the mode is **\"classy_vision\"**, this will use the same label smooth method as the facebookresearch/" +"ClassyVision repo as:" +msgstr "" + +#: mmcls.models.losses.label_smooth_loss.LabelSmoothLoss:44 of +msgid "" +"\\frac{\\delta_{k, y} + \\epsilon/K}{1+\\epsilon}\n" +"\n" +msgstr "" + +#: mmcls.models.losses.label_smooth_loss.LabelSmoothLoss:47 of +msgid "" +"if the mode is **\"multi_label\"**, this will accept labels from multi-label task and smoothing them as:" +msgstr "" + +#: mmcls.models.losses.label_smooth_loss.LabelSmoothLoss:50 of +msgid "" +"(1-2\\epsilon)\\delta_{k, y} + \\epsilon\n" +"\n" +msgstr "" + +#: mmcls.models.losses.label_smooth_loss.LabelSmoothLoss.forward:1 of +msgid "Label smooth loss." +msgstr "" + +#: mmcls.models.losses.label_smooth_loss.LabelSmoothLoss.forward:5 of +msgid "The ground truth label of the prediction with shape (N, \\*)." +msgstr "" + +#: mmcls.models.losses.label_smooth_loss.LabelSmoothLoss.generate_one_hot_like_label:1 of +msgid "This function takes one-hot or index label vectors and computes one- hot like label vectors (float)" +msgstr "" + +#: ../../api/generated/mmcls.models.losses.SeesawLoss.rst:7 +msgid "SeesawLoss" +msgstr "" + +#: ../../api/models.rst:163::1 mmcls.models.losses.seesaw_loss.SeesawLoss:1 of +msgid "Implementation of seesaw loss." +msgstr "" + +#: mmcls.models.losses.seesaw_loss.SeesawLoss:3 of +msgid "" +"Refers to `Seesaw Loss for Long-Tailed Instance Segmentation (CVPR 2021) `_" +msgstr "" + +#: mmcls.models.losses.seesaw_loss.SeesawLoss:6 of +msgid "Whether the prediction uses sigmoid of softmax. Only False is supported. Defaults to False." +msgstr "" + +#: mmcls.models.losses.seesaw_loss.SeesawLoss:9 of +msgid "The ``p`` in the mitigation factor. Defaults to 0.8." +msgstr "" + +#: mmcls.models.losses.seesaw_loss.SeesawLoss:12 of +msgid "The ``q`` in the compenstation factor. Defaults to 2.0." +msgstr "" + +#: mmcls.models.losses.seesaw_loss.SeesawLoss:15 of +msgid "The number of classes. Defaults to 1000 for the ImageNet dataset." +msgstr "" + +#: mmcls.models.losses.seesaw_loss.SeesawLoss:18 of +msgid "The minimal value of divisor to smooth the computation of compensation factor, default to 1e-2." +msgstr "" + +#: mmcls.models.losses.seesaw_loss.SeesawLoss:21 of +msgid "" +"The method that reduces the loss to a scalar. Options are \"none\", \"mean\" and \"sum\". Defaults to \"mean" +"\"." +msgstr "" + +#: mmcls.models.losses.seesaw_loss.SeesawLoss:24 of +msgid "The weight of the loss. Defaults to 1.0" +msgstr "" + +#: mmcls.models.losses.seesaw_loss.SeesawLoss.forward:3 of +msgid "The prediction with shape (N, C)." +msgstr "" + +#: mmcls.models.losses.seesaw_loss.SeesawLoss.forward:5 of +msgid "The learning label of the prediction." +msgstr "" + +#: mmcls.models.losses.seesaw_loss.SeesawLoss.forward:7 of +msgid "Sample-wise loss weight." +msgstr "" + +#: mmcls.models.losses.seesaw_loss.SeesawLoss.forward:12 of +msgid "The method used to reduce the loss. Options are \"none\", \"mean\" and \"sum\"." +msgstr "" + +#: mmcls.models.losses.seesaw_loss.SeesawLoss.forward:16 of +msgid "The calculated loss" +msgstr "" + +#: ../../api/generated/mmcls.models.necks.GeneralizedMeanPooling.rst:7 +msgid "GeneralizedMeanPooling" +msgstr "" + +#: ../../api/models.rst:125::1 mmcls.models.necks.gem.GeneralizedMeanPooling:1 of +msgid "Generalized Mean Pooling neck." +msgstr "" + +#: mmcls.models.necks.gap.GlobalAveragePooling:3 mmcls.models.necks.gem.GeneralizedMeanPooling:3 of +msgid "" +"Note that we use `view` to remove extra channel after pooling. We do not use `squeeze` as it will also " +"remove the batch dimension when the tensor has a batch dimension of size 1, which can lead to unexpected " +"errors." +msgstr "" + +#: mmcls.models.necks.gem.GeneralizedMeanPooling:7 of +msgid "Parameter value. Default: 3." +msgstr "" + +#: mmcls.models.necks.gem.GeneralizedMeanPooling:10 of +msgid "epsilon. Default: 1e-6" +msgstr "" + +#: mmcls.models.necks.gem.GeneralizedMeanPooling:13 of +msgid "Use clamp before pooling. Default: True" +msgstr "" + +#: ../../api/generated/mmcls.models.necks.GlobalAveragePooling.rst:7 +msgid "GlobalAveragePooling" +msgstr "" + +#: ../../api/models.rst:125::1 mmcls.models.necks.gap.GlobalAveragePooling:1 of +msgid "Global Average Pooling neck." +msgstr "" + +#: mmcls.models.necks.gap.GlobalAveragePooling:7 of +msgid "Dimensions of each sample channel, can be one of {1, 2, 3}. Default: 2" +msgstr "" + +#: ../../api/generated/mmcls.models.necks.HRFuseScales.rst:7 +msgid "HRFuseScales" +msgstr "" + +#: ../../api/models.rst:125::1 mmcls.models.necks.hr_fuse.HRFuseScales:1 of +msgid "Fuse feature map of multiple scales in HRNet." +msgstr "" + +#: mmcls.models.necks.hr_fuse.HRFuseScales:3 of +msgid "The input channels of all scales." +msgstr "" + +#: mmcls.models.necks.hr_fuse.HRFuseScales:5 of +msgid "The channels of fused feature map. Defaults to 2048." +msgstr "" + +#: mmcls.models.necks.hr_fuse.HRFuseScales:8 of +msgid "dictionary to construct norm layers. Defaults to ``dict(type='BN', momentum=0.1)``." +msgstr "" + +#: mmcls.models.necks.hr_fuse.HRFuseScales:11 of +msgid "Initialization config dict. Defaults to ``dict(type='Normal', layer='Linear', std=0.01))``." +msgstr "" + +#: ../../api/generated/mmcls.models.utils.ConditionalPositionEncoding.rst:7 +msgid "ConditionalPositionEncoding" +msgstr "" + +#: ../../api/models.rst:192::1 mmcls.models.utils.position_encoding.ConditionalPositionEncoding:1 +#: of +msgid "The Conditional Position Encoding (CPE) module." +msgstr "" + +#: mmcls.models.utils.position_encoding.ConditionalPositionEncoding:3 of +msgid "" +"The CPE is the implementation of 'Conditional Positional Encodings for Vision Transformers '_." +msgstr "" + +#: mmcls.models.utils.position_encoding.ConditionalPositionEncoding:8 of +msgid "The feature dimension. Default: 768." +msgstr "" + +#: mmcls.models.utils.position_encoding.ConditionalPositionEncoding:10 of +msgid "Stride of conv layer. Default: 1." +msgstr "" + +#: ../../api/generated/mmcls.models.utils.HybridEmbed.rst:7 +msgid "HybridEmbed" +msgstr "" + +#: ../../api/models.rst:192::1 mmcls.models.utils.embed.HybridEmbed:1 of +msgid "CNN Feature Map Embedding." +msgstr "" + +#: mmcls.models.utils.embed.HybridEmbed:3 of +msgid "Extract feature map from CNN, flatten, project to embedding dim." +msgstr "" + +#: mmcls.models.utils.embed.HybridEmbed:6 of +msgid "CNN backbone" +msgstr "" + +#: mmcls.models.utils.embed.HybridEmbed:8 mmcls.models.utils.embed.PatchEmbed:5 of +msgid "The size of input image. Default: 224" +msgstr "" + +#: mmcls.models.utils.embed.HybridEmbed:10 of +msgid "Size of feature map extracted by CNN backbone. Default: None" +msgstr "" + +#: mmcls.models.utils.embed.HybridEmbed:13 mmcls.models.utils.embed.PatchEmbed:7 of +msgid "The num of input channels. Default: 3" +msgstr "" + +#: mmcls.models.utils.embed.HybridEmbed:15 mmcls.models.utils.embed.PatchEmbed:9 of +msgid "The dimensions of embedding. Default: 768" +msgstr "" + +#: mmcls.models.utils.embed.HybridEmbed:20 of +msgid "The Config for initialization. Default: None." +msgstr "" + +#: ../../api/generated/mmcls.models.utils.InvertedResidual.rst:7 +msgid "InvertedResidual" +msgstr "" + +#: ../../api/models.rst:192::1 mmcls.models.utils.inverted_residual.InvertedResidual:1 of +msgid "Inverted Residual Block." +msgstr "" + +#: mmcls.models.utils.inverted_residual.InvertedResidual:3 of +msgid "The input channels of this module." +msgstr "" + +#: mmcls.models.utils.inverted_residual.InvertedResidual:5 of +msgid "The output channels of this module." +msgstr "" + +#: mmcls.models.utils.inverted_residual.InvertedResidual:7 of +msgid "The input channels of the depthwise convolution." +msgstr "" + +#: mmcls.models.utils.inverted_residual.InvertedResidual:9 of +msgid "The kernel size of the depthwise convolution. Defaults to 3." +msgstr "" + +#: mmcls.models.utils.inverted_residual.InvertedResidual:12 of +msgid "The stride of the depthwise convolution. Defaults to 1." +msgstr "" + +#: mmcls.models.utils.inverted_residual.InvertedResidual:14 of +msgid "Config dict for se layer. Defaults to None, which means no se layer." +msgstr "" + +#: mmcls.models.utils.inverted_residual.InvertedResidual:20 of +msgid "Config dict for normalization layer. Defaults to ``dict(type='BN')``." +msgstr "" + +#: mmcls.models.utils.channel_shuffle.channel_shuffle:6 +#: mmcls.models.utils.inverted_residual.InvertedResidual.forward:3 of +msgid "The input tensor." +msgstr "" + +#: mmcls.models.utils.inverted_residual.InvertedResidual.forward:6 of +msgid "The output tensor." +msgstr "" + +#: ../../api/generated/mmcls.models.utils.LayerScale.rst:7 +msgid "LayerScale" +msgstr "" + +#: ../../api/models.rst:192::1 mmcls.models.utils.layer_scale.LayerScale:1 of +msgid "LayerScale layer." +msgstr "" + +#: mmcls.models.utils.layer_scale.LayerScale:3 of +msgid "Dimension of input features." +msgstr "" + +#: mmcls.models.utils.layer_scale.LayerScale:5 of +msgid "inplace: can optionally do the operation in-place. Defaults to False." +msgstr "" + +#: mmcls.models.utils.layer_scale.LayerScale:8 of +msgid "" +"The input data format, could be 'channels_last' or 'channels_first', representing (B, C, H, W) and (B, N, " +"C) format data respectively. Defaults to 'channels_last'." +msgstr "" + +#: ../../api/generated/mmcls.models.utils.MultiheadAttention.rst:7 +msgid "MultiheadAttention" +msgstr "" + +#: ../../api/models.rst:192::1 mmcls.models.utils.attention.MultiheadAttention:1 of +msgid "Multi-head Attention Module." +msgstr "" + +#: mmcls.models.utils.attention.MultiheadAttention:3 of +msgid "" +"This module implements multi-head attention that supports different input dims and embed dims. And it also " +"supports a shortcut from ``value``, which is useful if input dims is not the same with embed dims." +msgstr "" + +#: mmcls.models.utils.attention.MultiheadAttention:7 of +msgid "The embedding dimension." +msgstr "" + +#: mmcls.models.utils.attention.MultiheadAttention:9 of +msgid "Parallel attention heads." +msgstr "" + +#: mmcls.models.utils.attention.MultiheadAttention:11 of +msgid "The input dimension, and if None, use ``embed_dims``. Defaults to None." +msgstr "" + +#: mmcls.models.utils.attention.MultiheadAttention:14 of +msgid "Dropout rate of the dropout layer after the attention calculation of query and key. Defaults to 0." +msgstr "" + +#: mmcls.models.utils.attention.MultiheadAttention:17 of +msgid "Dropout rate of the dropout layer after the output projection. Defaults to 0." +msgstr "" + +#: mmcls.models.utils.attention.MultiheadAttention:20 of +msgid "The dropout config before adding the shortcut. Defaults to ``dict(type='Dropout', drop_prob=0.)``." +msgstr "" + +#: mmcls.models.utils.attention.MultiheadAttention:23 mmcls.models.utils.attention.WindowMSA:10 +#: mmcls.models.utils.attention.WindowMSAV2:14 of +msgid "If True, add a learnable bias to q, k, v. Defaults to True." +msgstr "" + +#: mmcls.models.utils.attention.MultiheadAttention:26 mmcls.models.utils.attention.WindowMSA:13 of +msgid "Override default qk scale of ``head_dim ** -0.5`` if set. Defaults to None." +msgstr "" + +#: mmcls.models.utils.attention.MultiheadAttention:29 of +msgid "Defaults to True." +msgstr "" + +#: mmcls.models.utils.attention.MultiheadAttention:31 of +msgid "" +"Add a shortcut from value to output. It's usually used if ``input_dims`` is different from ``embed_dims``. " +"Defaults to False." +msgstr "" + +#: ../../api/generated/mmcls.models.utils.PatchEmbed.rst:7 +msgid "PatchEmbed" +msgstr "" + +#: ../../api/models.rst:192::1 mmcls.models.utils.embed.PatchEmbed:1 of +msgid "Image to Patch Embedding." +msgstr "" + +#: mmcls.models.utils.embed.PatchEmbed:3 of +msgid "We use a conv layer to implement PatchEmbed." +msgstr "" + +#: mmcls.models.utils.embed.PatchEmbed:11 of +msgid "Config dict for normalization layer. Default: None" +msgstr "" + +#: mmcls.models.utils.embed.PatchEmbed:14 of +msgid "The config dict for conv layers. Default: None" +msgstr "" + +#: mmcls.models.utils.embed.PatchEmbed:17 of +msgid "The Config for initialization. Default: None" +msgstr "" + +#: ../../api/generated/mmcls.models.utils.PatchMerging.rst:7 +msgid "PatchMerging" +msgstr "" + +#: ../../api/models.rst:192::1 mmcls.models.utils.embed.PatchMerging:1 of +msgid "Merge patch feature map." +msgstr "" + +#: mmcls.models.utils.embed.PatchMerging:3 of +msgid "Modified from mmcv, and this module supports specifying whether to use post-norm." +msgstr "" + +#: mmcls.models.utils.embed.PatchMerging:6 of +#, python-format +msgid "" +"This layer groups feature map by kernel_size, and applies norm and linear layers to the grouped feature map " +"((used in Swin Transformer)). Our implementation uses :class:`torch.nn.Unfold` to merge patches, which is " +"about 25% faster than the original implementation. However, we need to modify pretrained models for " +"compatibility." +msgstr "" + +#: mmcls.models.utils.embed.PatchMerging:12 of +msgid "The num of input channels. To gets fully covered by filter and stride you specified." +msgstr "" + +#: mmcls.models.utils.embed.PatchMerging:15 of +msgid "The num of output channels." +msgstr "" + +#: mmcls.models.utils.embed.PatchMerging:17 of +msgid "the kernel size in the unfold layer. Defaults to 2." +msgstr "" + +#: mmcls.models.utils.embed.PatchMerging:20 of +msgid "" +"the stride of the sliding blocks in the unfold layer. Defaults to None, which means to be set as " +"``kernel_size``." +msgstr "" + +#: mmcls.models.utils.embed.PatchMerging:24 of +msgid "" +"The padding length of embedding conv. When it is a string, it means the mode of adaptive padding, support " +"\"same\" and \"corner\" now. Defaults to \"corner\"." +msgstr "" + +#: mmcls.models.utils.embed.PatchMerging:29 of +msgid "dilation parameter in the unfold layer. Defaults to 1." +msgstr "" + +#: mmcls.models.utils.embed.PatchMerging:32 of +msgid "Whether to add bias in linear layer or not. Defaults to False." +msgstr "" + +#: mmcls.models.utils.embed.PatchMerging:38 of +msgid "Whether to use post normalization here. Defaults to False." +msgstr "" + +#: mmcls.models.utils.attention.ShiftWindowMSA:24 mmcls.models.utils.attention.WindowMSA:21 +#: mmcls.models.utils.attention.WindowMSAV2:29 mmcls.models.utils.embed.PatchMerging:41 of +msgid "The extra config for initialization. Defaults to None." +msgstr "" + +#: mmcls.models.utils.embed.PatchMerging.forward:1 of +msgid "Has shape (B, H*W, C_in)." +msgstr "" + +#: mmcls.models.utils.embed.PatchMerging.forward:3 of +msgid "The spatial shape of x, arrange as (H, W). Default: None." +msgstr "" + +#: mmcls.models.utils.embed.PatchMerging.forward:7 of +msgid "" +"Contains merged results and its spatial shape. - x (Tensor): Has shape (B, Merged_H * Merged_W, C_out) - " +"out_size (tuple[int]): Spatial shape of x, arrange as (Merged_H, Merged_W)." +msgstr "" + +#: mmcls.models.utils.embed.PatchMerging.forward:7 of +msgid "Contains merged results and its spatial shape." +msgstr "" + +#: mmcls.models.utils.embed.PatchMerging.forward:9 of +msgid "x (Tensor): Has shape (B, Merged_H * Merged_W, C_out)" +msgstr "" + +#: mmcls.models.utils.embed.PatchMerging.forward:10 of +msgid "out_size (tuple[int]): Spatial shape of x, arrange as (Merged_H, Merged_W)." +msgstr "" + +#: ../../api/generated/mmcls.models.utils.SELayer.rst:7 +msgid "SELayer" +msgstr "" + +#: ../../api/models.rst:192::1 mmcls.models.utils.se_layer.SELayer:1 of +msgid "Squeeze-and-Excitation Module." +msgstr "" + +#: mmcls.models.utils.se_layer.SELayer:3 of +msgid "The input (and output) channels of the SE layer." +msgstr "" + +#: mmcls.models.utils.se_layer.SELayer:5 of +msgid "" +"The intermediate channel number of SElayer. Default: None, means the value of ``squeeze_channels`` is " +"``make_divisible(channels // ratio, divisor)``." +msgstr "" + +#: mmcls.models.utils.se_layer.SELayer:9 of +msgid "" +"Squeeze ratio in SELayer, the intermediate channel will be ``make_divisible(channels // ratio, divisor)``. " +"Only used when ``squeeze_channels`` is None. Default: 16." +msgstr "" + +#: mmcls.models.utils.se_layer.SELayer:13 of +msgid "" +"The divisor to true divide the channel number. Only used when ``squeeze_channels`` is None. Default: 8." +msgstr "" + +#: mmcls.models.utils.se_layer.SELayer:19 of +msgid "Whether to return the weight. Default: False." +msgstr "" + +#: mmcls.models.utils.se_layer.SELayer:21 of +msgid "" +"Config dict for activation layer. If act_cfg is a dict, two activation layers will be configurated by this " +"dict. If act_cfg is a sequence of dicts, the first activation layer will be configurated by the first dict " +"and the second activation layer will be configurated by the second dict. Default: (dict(type='ReLU'), " +"dict(type='Sigmoid'))" +msgstr "" + +#: ../../api/generated/mmcls.models.utils.ShiftWindowMSA.rst:7 +msgid "ShiftWindowMSA" +msgstr "" + +#: ../../api/models.rst:192::1 mmcls.models.utils.attention.ShiftWindowMSA:1 of +msgid "Shift Window Multihead Self-Attention Module." +msgstr "" + +#: mmcls.models.utils.attention.ShiftWindowMSA:5 mmcls.models.utils.attention.WindowMSA:8 +#: mmcls.models.utils.attention.WindowMSAV2:12 mmcls.models.utils.embed.resize_relative_position_bias_table:11 +#: of +msgid "Number of attention heads." +msgstr "" + +#: mmcls.models.utils.attention.ShiftWindowMSA:7 mmcls.models.utils.attention.WindowMSA:6 +#: mmcls.models.utils.attention.WindowMSAV2:10 of +msgid "The height and width of the window." +msgstr "" + +#: mmcls.models.utils.attention.ShiftWindowMSA:9 of +msgid "The shift step of each window towards right-bottom. If zero, act as regular window-msa. Defaults to 0." +msgstr "" + +#: mmcls.models.utils.attention.ShiftWindowMSA:12 of +msgid "The dropout_layer used before output. Defaults to dict(type='DropPath', drop_prob=0.)." +msgstr "" + +#: mmcls.models.utils.attention.ShiftWindowMSA:21 of +msgid "To build a window multi-head attention module. Defaults to :class:`WindowMSA`." +msgstr "" + +#: mmcls.models.utils.attention.ShiftWindowMSA:27 of +msgid "Other keyword arguments to build the window multi-head attention module." +msgstr "" + +#: ../../api/generated/mmcls.models.utils.WindowMSA.rst:7 +msgid "WindowMSA" +msgstr "" + +#: ../../api/models.rst:192::1 mmcls.models.utils.attention.WindowMSA:1 +#: mmcls.models.utils.attention.WindowMSAV2:1 of +msgid "Window based multi-head self-attention (W-MSA) module with relative position bias." +msgstr "" + +#: mmcls.models.utils.attention.WindowMSA:16 mmcls.models.utils.attention.WindowMSAV2:17 of +msgid "Dropout ratio of attention weight. Defaults to 0." +msgstr "" + +#: mmcls.models.utils.attention.WindowMSA:19 mmcls.models.utils.attention.WindowMSAV2:20 of +msgid "Dropout ratio of output. Defaults to 0." +msgstr "" + +#: mmcls.models.utils.attention.WindowMSA.forward:1 mmcls.models.utils.attention.WindowMSAV2.forward:1 of +msgid "input features with shape of (num_windows*B, N, C)" +msgstr "" + +#: mmcls.models.utils.attention.WindowMSA.forward:3 mmcls.models.utils.attention.WindowMSAV2.forward:3 of +msgid "mask with shape of (num_windows, Wh*Ww, Wh*Ww), value should be between (-inf, 0]." +msgstr "" + +#: ../../api/generated/mmcls.models.utils.WindowMSAV2.rst:7 +msgid "WindowMSAV2" +msgstr "" + +#: mmcls.models.utils.attention.WindowMSAV2:4 of +msgid "" +"Based on implementation on Swin Transformer V2 original repo. Refers to https://github.com/microsoft/Swin-" +"Transformer/blob/main/models/swin_transformer_v2.py for more details." +msgstr "" + +#: mmcls.models.utils.attention.WindowMSAV2:22 of +msgid "The hidden dimensions of the continuous relative position bias network. Defaults to 512." +msgstr "" + +#: mmcls.models.utils.attention.WindowMSAV2:25 of +msgid "" +"The height and width of the window in pre-training. Defaults to (0, 0), which means not load pretrained " +"model." +msgstr "" + +#: ../../api/generated/mmcls.models.utils.batch_augments.CutMix.rst:7 +msgid "CutMix" +msgstr "" + +#: mmcls.models.utils.batch_augments.cutmix.CutMix:3 of +msgid "" +"CutMix is a method to improve the network's generalization capability. It's proposed in `CutMix: " +"Regularization Strategy to Train Strong Classifiers with Localizable Features `" +msgstr "" + +#: mmcls.models.utils.batch_augments.cutmix.CutMix:7 of +msgid "" +"With this method, patches are cut and pasted among training images where the ground truth labels are also " +"mixed proportionally to the area of the patches." +msgstr "" + +#: mmcls.models.utils.batch_augments.cutmix.CutMix:11 mmcls.models.utils.batch_augments.resizemix.ResizeMix:7 +#: of +msgid "" +"Parameters for Beta distribution to generate the mixing ratio. It should be a positive number. More details " +"can be found in :class:`Mixup`." +msgstr "" + +#: mmcls.models.utils.batch_augments.cutmix.CutMix:15 mmcls.models.utils.batch_augments.resizemix.ResizeMix:22 +#: of +msgid "" +"The min/max area ratio of the patches. If not None, the bounding-box of patches is uniform sampled within " +"this ratio range, and the ``alpha`` will be ignored. Otherwise, the bounding-box is generated according to " +"the ``alpha``. Defaults to None." +msgstr "" + +#: mmcls.models.utils.batch_augments.cutmix.CutMix:21 of +msgid "Whether to apply lambda correction when cutmix bbox clipped by image borders. Defaults to True." +msgstr "" + +#: mmcls.models.utils.batch_augments.cutmix.CutMix:26 of +msgid "" +"If the ``cutmix_minmax`` is None, how to generate the bounding-box of patches according to the ``alpha``?" +msgstr "" + +#: mmcls.models.utils.batch_augments.cutmix.CutMix:29 of +msgid "" +"First, generate a :math:`\\lambda`, details can be found in :class:`Mixup`. And then, the area ratio of the " +"bounding-box is calculated by:" +msgstr "" + +#: mmcls.models.utils.batch_augments.cutmix.CutMix:33 mmcls.models.utils.batch_augments.resizemix.ResizeMix:45 +#: of +msgid "" +"\\text{ratio} = \\sqrt{1-\\lambda}\n" +"\n" +msgstr "" + +#: mmcls.models.utils.batch_augments.cutmix.CutMix.cutmix_bbox_and_lam:1 of +msgid "Generate bbox and apply lambda correction." +msgstr "" + +#: mmcls.models.utils.batch_augments.cutmix.CutMix.cutmix_bbox_and_lam:3 +#: mmcls.models.utils.batch_augments.cutmix.CutMix.rand_bbox:5 +#: mmcls.models.utils.batch_augments.cutmix.CutMix.rand_bbox_minmax:8 of +msgid "Image shape as tuple" +msgstr "" + +#: mmcls.models.utils.batch_augments.cutmix.CutMix.cutmix_bbox_and_lam:5 +#: mmcls.models.utils.batch_augments.cutmix.CutMix.rand_bbox:7 of +msgid "Cutmix lambda value" +msgstr "" + +#: mmcls.models.utils.batch_augments.cutmix.CutMix.cutmix_bbox_and_lam:7 +#: mmcls.models.utils.batch_augments.cutmix.CutMix.rand_bbox:12 +#: mmcls.models.utils.batch_augments.cutmix.CutMix.rand_bbox_minmax:10 of +msgid "Number of bbox to generate. Defaults to None" +msgstr "" + +#: mmcls.models.utils.batch_augments.cutmix.CutMix.mix:1 mmcls.models.utils.batch_augments.mixup.Mixup.mix:1 +#: mmcls.models.utils.batch_augments.resizemix.ResizeMix.mix:1 of +msgid "Mix the batch inputs and batch one-hot format ground truth." +msgstr "" + +#: mmcls.models.utils.batch_augments.cutmix.CutMix.mix:3 mmcls.models.utils.batch_augments.mixup.Mixup.mix:3 +#: mmcls.models.utils.batch_augments.resizemix.ResizeMix.mix:3 of +msgid "A batch of images tensor in the shape of ``(N, C, H, W)``." +msgstr "" + +#: mmcls.models.utils.batch_augments.cutmix.CutMix.mix:6 mmcls.models.utils.batch_augments.mixup.Mixup.mix:6 +#: mmcls.models.utils.batch_augments.resizemix.ResizeMix.mix:6 of +msgid "A batch of one-hot format labels in the shape of ``(N, num_classes)``." +msgstr "" + +#: mmcls.models.utils.batch_augments.cutmix.CutMix.mix:10 mmcls.models.utils.batch_augments.mixup.Mixup.mix:10 +#: mmcls.models.utils.batch_augments.resizemix.ResizeMix.mix:10 of +msgid "The mixed inputs and labels." +msgstr "" + +#: mmcls.models.utils.batch_augments.cutmix.CutMix.rand_bbox:1 of +msgid "" +"Standard CutMix bounding-box that generates a random square bbox based on lambda value. This implementation " +"includes support for enforcing a border margin as percent of bbox dimensions." +msgstr "" + +#: mmcls.models.utils.batch_augments.cutmix.CutMix.rand_bbox:9 of +msgid "Percentage of bbox dimension to enforce as margin (reduce amount of box outside image). Defaults to 0." +msgstr "" + +#: mmcls.models.utils.batch_augments.cutmix.CutMix.rand_bbox_minmax:1 of +msgid "" +"Min-Max CutMix bounding-box Inspired by Darknet cutmix implementation. It generates a random rectangular " +"bbox based on min/max percent values applied to each dimension of the input image." +msgstr "" + +#: mmcls.models.utils.batch_augments.cutmix.CutMix.rand_bbox_minmax:5 of +msgid "Typical defaults for minmax are usually in the .2-.3 for min and .8-.9 range for max." +msgstr "" + +#: ../../api/generated/mmcls.models.utils.batch_augments.Mixup.rst:7 +msgid "Mixup" +msgstr "" + +#: mmcls.models.utils.batch_augments.mixup.Mixup:3 of +msgid "" +"Mixup is a method to reduces the memorization of corrupt labels and increases the robustness to adversarial " +"examples. It's proposed in `mixup: Beyond Empirical Risk Minimization `_" +msgstr "" + +#: mmcls.models.utils.batch_augments.mixup.Mixup:8 of +msgid "" +"Parameters for Beta distribution to generate the mixing ratio. It should be a positive number. More details " +"are in the note." +msgstr "" + +#: mmcls.models.utils.batch_augments.mixup.Mixup:15 of +msgid "" +"The :math:`\\alpha` (``alpha``) determines a random distribution :math:`Beta(\\alpha, \\alpha)`. For each " +"batch of data, we sample a mixing ratio (marked as :math:`\\lambda`, ``lam``) from the random distribution." +msgstr "" + +#: ../../api/generated/mmcls.models.utils.batch_augments.ResizeMix.rst:7 +msgid "ResizeMix" +msgstr "" + +#: mmcls.models.utils.batch_augments.resizemix.ResizeMix:3 of +msgid "" +"The ResizeMix will resize an image to a small patch and paste it on another image. It's proposed in " +"`ResizeMix: Mixing Data with Preserved Object Information and True Labels `_" +msgstr "" + +#: mmcls.models.utils.batch_augments.resizemix.ResizeMix:11 of +msgid "The minimum value of lam. Defaults to 0.1." +msgstr "" + +#: mmcls.models.utils.batch_augments.resizemix.ResizeMix:13 of +msgid "The maximum value of lam. Defaults to 0.8." +msgstr "" + +#: mmcls.models.utils.batch_augments.resizemix.ResizeMix:15 of +msgid "" +"algorithm used for upsampling: 'nearest' | 'linear' | 'bilinear' | 'bicubic' | 'trilinear' | 'area'. " +"Defaults to 'bilinear'." +msgstr "" + +#: mmcls.models.utils.batch_augments.resizemix.ResizeMix:19 of +msgid "The probability to execute resizemix. It should be in range [0, 1]. Defaults to 1.0." +msgstr "" + +#: mmcls.models.utils.batch_augments.resizemix.ResizeMix:28 of +msgid "Whether to apply lambda correction when cutmix bbox clipped by image borders. Defaults to True" +msgstr "" + +#: mmcls.models.utils.batch_augments.resizemix.ResizeMix:31 of +msgid "Any other parameters accpeted by :class:`CutMix`." +msgstr "" + +#: mmcls.models.utils.batch_augments.resizemix.ResizeMix:35 of +msgid "" +"The :math:`\\lambda` (``lam``) is the mixing ratio. It's a random variable which follows :math:" +"`Beta(\\alpha, \\alpha)` and is mapped to the range [``lam_min``, ``lam_max``]." +msgstr "" + +#: mmcls.models.utils.batch_augments.resizemix.ResizeMix:39 of +msgid "" +"\\lambda = \\frac{Beta(\\alpha, \\alpha)}\n" +"{\\lambda_{max} - \\lambda_{min}} + \\lambda_{min}\n" +"\n" +msgstr "" + +#: mmcls.models.utils.batch_augments.resizemix.ResizeMix:43 of +msgid "And the resize ratio of source images is calculated by :math:`\\lambda`:" +msgstr "" + +#: ../../api/generated/mmcls.models.utils.channel_shuffle.rst:2 +msgid "mmcls.models.utils.channel\\_shuffle" +msgstr "" + +#: ../../api/models.rst:207::1 mmcls.models.utils.channel_shuffle.channel_shuffle:1 of +msgid "Channel Shuffle operation." +msgstr "" + +#: mmcls.models.utils.channel_shuffle.channel_shuffle:3 of +msgid "This function enables cross-group information flow for multiple groups convolution layers." +msgstr "" + +#: mmcls.models.utils.channel_shuffle.channel_shuffle:8 of +msgid "The number of groups to divide the input tensor in the channel dimension." +msgstr "" + +#: mmcls.models.utils.channel_shuffle.channel_shuffle:12 of +msgid "The output tensor after channel shuffle operation." +msgstr "" + +#: ../../api/generated/mmcls.models.utils.data_preprocessor.ClsDataPreprocessor.rst:7 +msgid "ClsDataPreprocessor" +msgstr "" + +#: mmcls.models.utils.data_preprocessor.ClsDataPreprocessor:3 of +msgid "Comparing with the :class:`mmengine.model.ImgDataPreprocessor`," +msgstr "" + +#: mmcls.models.utils.data_preprocessor.ClsDataPreprocessor:5 of +msgid "It won't do normalization if ``mean`` is not specified." +msgstr "" + +#: mmcls.models.utils.data_preprocessor.ClsDataPreprocessor:6 of +msgid "It does normalization and color space conversion after stacking batch." +msgstr "" + +#: mmcls.models.utils.data_preprocessor.ClsDataPreprocessor:7 of +msgid "It supports batch augmentations like mixup and cutmix." +msgstr "" + +#: mmcls.models.utils.data_preprocessor.ClsDataPreprocessor:9 of +msgid "It provides the data pre-processing as follows" +msgstr "" + +#: mmcls.models.utils.data_preprocessor.ClsDataPreprocessor:11 of +msgid "Collate and move data to the target device." +msgstr "" + +#: mmcls.models.utils.data_preprocessor.ClsDataPreprocessor:12 of +msgid "" +"Pad inputs to the maximum size of current batch with defined ``pad_value``. The padding size can be " +"divisible by a defined ``pad_size_divisor``" +msgstr "" + +#: mmcls.models.utils.data_preprocessor.ClsDataPreprocessor:15 of +msgid "Stack inputs to batch_inputs." +msgstr "" + +#: mmcls.models.utils.data_preprocessor.ClsDataPreprocessor:18 of +msgid "Do batch augmentations like Mixup and Cutmix during training." +msgstr "" + +#: mmcls.models.utils.data_preprocessor.ClsDataPreprocessor:20 of +msgid "The pixel mean of R, G, B channels. Defaults to None." +msgstr "" + +#: mmcls.models.utils.data_preprocessor.ClsDataPreprocessor:23 of +msgid "The pixel standard deviation of R, G, B channels. Defaults to None." +msgstr "" + +#: mmcls.models.utils.data_preprocessor.ClsDataPreprocessor:26 of +msgid "The size of padded image should be divisible by ``pad_size_divisor``. Defaults to 1." +msgstr "" + +#: mmcls.models.utils.data_preprocessor.ClsDataPreprocessor:29 of +msgid "The padded pixel value. Defaults to 0." +msgstr "" + +#: mmcls.models.utils.data_preprocessor.ClsDataPreprocessor:31 of +msgid "whether to convert image from BGR to RGB. Defaults to False." +msgstr "" + +#: mmcls.models.utils.data_preprocessor.ClsDataPreprocessor:34 of +msgid "Whether to generate one-hot format gt-labels and set to data samples. Defaults to False." +msgstr "" + +#: mmcls.models.utils.data_preprocessor.ClsDataPreprocessor:39 of +msgid "" +"The batch augmentations settings, including \"augments\" and \"probs\". For more details, see :class:`mmcls." +"models.RandomBatchAugment`." +msgstr "" + +#: mmcls.models.utils.data_preprocessor.ClsDataPreprocessor.forward:1 of +msgid "" +"Perform normalization, padding, bgr2rgb conversion and batch augmentation based on ``BaseDataPreprocessor``." +msgstr "" + +#: mmcls.models.utils.data_preprocessor.ClsDataPreprocessor.forward:4 of +msgid "data sampled from dataloader." +msgstr "" + +#: mmcls.models.utils.data_preprocessor.ClsDataPreprocessor.forward:6 of +msgid "Whether to enable training time augmentation." +msgstr "" + +#: mmcls.models.utils.data_preprocessor.ClsDataPreprocessor.forward:9 of +msgid "Data in the same format as the model input." +msgstr "" + +#: ../../api/generated/mmcls.models.utils.is_tracing.rst:2 +msgid "mmcls.models.utils.is\\_tracing" +msgstr "" + +#: ../../api/models.rst:207::1 mmcls.models.utils.helpers.is_tracing:1 of +msgid "Determine whether the model is called during the tracing of code with ``torch.jit.trace``." +msgstr "" + +#: ../../api/generated/mmcls.models.utils.make_divisible.rst:2 +msgid "mmcls.models.utils.make\\_divisible" +msgstr "" + +#: ../../api/models.rst:207::1 mmcls.models.utils.make_divisible.make_divisible:1 of +msgid "Make divisible function." +msgstr "" + +#: mmcls.models.utils.make_divisible.make_divisible:3 of +msgid "" +"This function rounds the channel number down to the nearest value that can be divisible by the divisor." +msgstr "" + +#: mmcls.models.utils.make_divisible.make_divisible:6 of +msgid "The original channel number." +msgstr "" + +#: mmcls.models.utils.make_divisible.make_divisible:8 of +msgid "The divisor to fully divide the channel number." +msgstr "" + +#: mmcls.models.utils.make_divisible.make_divisible:10 of +msgid "" +"The minimum value of the output channel. Default: None, means that the minimum value equal to the divisor." +msgstr "" + +#: mmcls.models.utils.make_divisible.make_divisible:13 of +msgid "The minimum ratio of the rounded channel number to the original channel number. Default: 0.9." +msgstr "" + +#: mmcls.models.utils.make_divisible.make_divisible:17 of +msgid "The modified output channel number" +msgstr "" + +#: ../../api/generated/mmcls.models.utils.resize_pos_embed.rst:2 +msgid "mmcls.models.utils.resize\\_pos\\_embed" +msgstr "" + +#: ../../api/models.rst:207::1 mmcls.models.utils.embed.resize_pos_embed:1 of +msgid "Resize pos_embed weights." +msgstr "" + +#: mmcls.models.utils.embed.resize_pos_embed:3 of +msgid "Position embedding weights with shape [1, L, C]." +msgstr "" + +#: mmcls.models.utils.embed.resize_pos_embed:6 mmcls.models.utils.embed.resize_relative_position_bias_table:3 +#: of +msgid "The resolution of downsampled origin training image, in format (H, W)." +msgstr "" + +#: mmcls.models.utils.embed.resize_pos_embed:9 mmcls.models.utils.embed.resize_relative_position_bias_table:6 +#: of +msgid "The resolution of downsampled new training image, in format (H, W)." +msgstr "" + +#: mmcls.models.utils.embed.resize_pos_embed:12 of +msgid "" +"Algorithm used for upsampling. Choose one from 'nearest', 'linear', 'bilinear', 'bicubic' and 'trilinear'. " +"Defaults to 'bicubic'." +msgstr "" + +#: mmcls.models.utils.embed.resize_pos_embed:16 of +msgid "The number of extra tokens, such as cls_token. Defaults to 1." +msgstr "" + +#: mmcls.models.utils.embed.resize_pos_embed:20 of +msgid "The resized pos_embed of shape [1, L_new, C]" +msgstr "" + +#: ../../api/generated/mmcls.models.utils.resize_relative_position_bias_table.rst:2 +msgid "mmcls.models.utils.resize\\_relative\\_position\\_bias\\_table" +msgstr "" + +#: ../../api/models.rst:207::1 mmcls.models.utils.embed.resize_relative_position_bias_table:1 of +msgid "Resize relative position bias table." +msgstr "" + +#: mmcls.models.utils.embed.resize_relative_position_bias_table:9 of +msgid "The relative position bias of the pretrained model." +msgstr "" + +#: mmcls.models.utils.embed.resize_relative_position_bias_table:14 of +msgid "The resized relative position bias table." +msgstr "" + +#: ../../api/generated/mmcls.models.utils.to_ntuple.rst:2 +msgid "mmcls.models.utils.to\\_ntuple" +msgstr "" + +#: ../../api/models.rst:207::1 mmcls.models.utils.helpers._ntuple:1 of +msgid "A `to_tuple` function generator." +msgstr "" + +#: mmcls.models.utils.helpers._ntuple:3 of +msgid "" +"It returns a function, this function will repeat the input to a tuple of length ``n`` if the input is not " +"an Iterable object, otherwise, return the input directly." +msgstr "" + +#: mmcls.models.utils.helpers._ntuple:7 of +msgid "The number of the target length." +msgstr "" + +#: ../../api/generated/mmcls.utils.collect_env.rst:2 +msgid "mmcls.utils.collect\\_env" +msgstr "" + +#: ../../api/utils.rst:16::1 mmcls.utils.collect_env.collect_env:1 of +msgid "Collect the information of the running environments." +msgstr "" + +#: ../../api/generated/mmcls.utils.register_all_modules.rst:2 +msgid "mmcls.utils.register\\_all\\_modules" +msgstr "" + +#: ../../api/utils.rst:16::1 mmcls.utils.setup_env.register_all_modules:1 of +msgid "Register all modules in mmcls into the registries." +msgstr "" + +#: mmcls.utils.setup_env.register_all_modules:3 of +msgid "" +"Whether initialize the mmcls default scope. If True, the global default scope will be set to `mmcls`, and " +"all registries will build modules from mmcls's registry node. To understand more about the registry, please " +"refer to https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/registry.md Defaults to True." +msgstr "" + +#: ../../api/models.rst:7 +msgid "mmcls.models" +msgstr "" + +#: ../../api/models.rst:9 +msgid "" +"The ``models`` package contains several sub-packages for addressing the different components of a model." +msgstr "``models`` 包中包含了若干子包,分别对应神经网络中不同的组件。" + +#: ../../api/models.rst:11 +msgid "" +":mod:`~mmcls.models.classifiers`: The top-level module which defines the whole process of a classification " +"model." +msgstr ":mod:`~mmcls.models.classifiers`:定义完整分类模型的顶级模块。" + +#: ../../api/models.rst:12 +msgid ":mod:`~mmcls.models.backbones`: Usually a feature extraction network, e.g., ResNet, MobileNet." +msgstr ":mod:`~mmcls.models.backbones`:用于特征提取的主干网络结构,如 ResNet、MobileNet。" + +#: ../../api/models.rst:13 +msgid ":mod:`~mmcls.models.necks`: The component between backbones and heads, e.g., GlobalAveragePooling." +msgstr ":mod:`~mmcls.models.necks`:位于主干网络和头部网络之间的过渡层,如 GlobalAveragePooling。" + +#: ../../api/models.rst:14 +msgid "" +":mod:`~mmcls.models.heads`: The component for specific tasks. In MMClassification, we provides heads for " +"classification." +msgstr "" +":mod:`~mmcls.models.heads`:用于特定任务的头部网络。在 MMClassification 中,我们提供了若干用于分类任务的头部" +"网络。" + +#: ../../api/models.rst:15 +msgid ":mod:`~mmcls.models.losses`: Loss functions." +msgstr ":mod:`~mmcls.models.losses`:损失函数" + +#: ../../api/models.rst:16 +msgid ":mod:`~mmcls.models.utils`: Some helper functions and common components used in various networks." +msgstr ":mod:`~mmcls.models.utils`:一些辅助函数,或是在多个网络中出现的公共模块。" + +#: ../../api/models.rst:18 +msgid "" +":mod:`~mmcls.models.utils.data_preprocessor`: The component before model to preprocess the inputs, e.g., " +"ClsDataPreprocessor." +msgstr "" +":mod:`~mmcls.models.utils.data_preprocessor`:对网络的输入进行预处理的模块,如 ``ClsDataPreprocessor``。" + +#: ../../api/models.rst:19 +msgid ":ref:`components`: Common components used in various networks." +msgstr ":ref:`components`:多个网络共用的一些公共模块。" + +#: ../../api/models.rst:20 +msgid ":ref:`helpers`: Helper functions." +msgstr ":ref:`helpers`:模型中用到的辅助函数。" + +#: ../../api/models.rst:23 +msgid "Build Functions" +msgstr "" + +#: ../../api/models.rst:34::1 +msgid ":py:obj:`build_classifier `" +msgstr "" + +#: ../../api/models.rst:34::1 +msgid ":py:obj:`build_backbone `" +msgstr "" + +#: ../../api/models.rst:34::1 +msgid ":py:obj:`build_neck `" +msgstr "" + +#: ../../api/models.rst:34::1 +msgid ":py:obj:`build_head `" +msgstr "" + +#: ../../api/models.rst:34::1 +msgid ":py:obj:`build_loss `" +msgstr "" + +#: ../../api/models.rst:38 +msgid "Classifiers" +msgstr "" + +#: ../../api/models.rst:49::1 +msgid ":py:obj:`BaseClassifier `" +msgstr "" + +#: ../../api/models.rst:49::1 +msgid ":py:obj:`ImageClassifier `" +msgstr "" + +#: ../../api/models.rst:49::1 +msgid ":py:obj:`TimmClassifier `" +msgstr "" + +#: ../../api/models.rst:49::1 +msgid ":py:obj:`HuggingFaceClassifier `" +msgstr "" + +#: ../../api/models.rst:53 +msgid "Backbones" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`AlexNet `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`CSPDarkNet `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`CSPNet `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`CSPResNeXt `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`CSPResNet `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`Conformer `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`ConvMixer `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid "ConvMixer." +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`ConvNeXt `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`DaViT `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`DeiT3 `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`DenseNet `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`DistilledVisionTransformer `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`EdgeNeXt `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`EfficientFormer `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`EfficientNet `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`HorNet `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`HRNet `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`InceptionV3 `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`LeNet5 `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`MViT `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`MlpMixer `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`MobileNetV2 `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`MobileNetV3 `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`MobileOne `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`MobileViT `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`PCPVT `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`PoolFormer `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`RegNet `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`RepLKNet `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`RepMLPNet `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`RepVGG `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`Res2Net `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`ResNeSt `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`ResNeXt `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`ResNet `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`ResNetV1c `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`ResNetV1d `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`ResNet_CIFAR `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`SEResNeXt `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`SEResNet `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`SVT `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`ShuffleNetV1 `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`ShuffleNetV2 `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`SwinTransformer `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`SwinTransformerV2 `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`T2T_ViT `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`TIMMBackbone `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`TNT `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`VAN `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`VGG `" +msgstr "" + +#: ../../api/models.rst:111::1 +msgid ":py:obj:`VisionTransformer `" +msgstr "" + +#: ../../api/models.rst:115 +msgid "Necks" +msgstr "" + +#: ../../api/models.rst:125::1 +msgid ":py:obj:`GlobalAveragePooling `" +msgstr "" + +#: ../../api/models.rst:125::1 +msgid ":py:obj:`GeneralizedMeanPooling `" +msgstr "" + +#: ../../api/models.rst:125::1 +msgid ":py:obj:`HRFuseScales `" +msgstr "" + +#: ../../api/models.rst:129 +msgid "Heads" +msgstr "" + +#: ../../api/models.rst:147::1 +msgid ":py:obj:`ClsHead `" +msgstr "" + +#: ../../api/models.rst:147::1 +msgid ":py:obj:`LinearClsHead `" +msgstr "" + +#: ../../api/models.rst:147::1 +msgid ":py:obj:`StackedLinearClsHead `" +msgstr "" + +#: ../../api/models.rst:147::1 +msgid ":py:obj:`VisionTransformerClsHead `" +msgstr "" + +#: ../../api/models.rst:147::1 +msgid ":py:obj:`EfficientFormerClsHead `" +msgstr "" + +#: ../../api/models.rst:147::1 +msgid ":py:obj:`DeiTClsHead `" +msgstr "" + +#: ../../api/models.rst:147::1 +msgid ":py:obj:`ConformerHead `" +msgstr "" + +#: ../../api/models.rst:147::1 +msgid ":py:obj:`ArcFaceClsHead `" +msgstr "" + +#: ../../api/models.rst:147::1 +msgid ":py:obj:`MultiLabelClsHead `" +msgstr "" + +#: ../../api/models.rst:147::1 +msgid ":py:obj:`MultiLabelLinearClsHead `" +msgstr "" + +#: ../../api/models.rst:147::1 +msgid ":py:obj:`CSRAClsHead `" +msgstr "" + +#: ../../api/models.rst:151 +msgid "Losses" +msgstr "" + +#: ../../api/models.rst:163::1 +msgid ":py:obj:`CrossEntropyLoss `" +msgstr "" + +#: ../../api/models.rst:163::1 +msgid ":py:obj:`LabelSmoothLoss `" +msgstr "" + +#: ../../api/models.rst:163::1 +msgid ":py:obj:`FocalLoss `" +msgstr "" + +#: ../../api/models.rst:163::1 +msgid ":py:obj:`AsymmetricLoss `" +msgstr "" + +#: ../../api/models.rst:163::1 +msgid ":py:obj:`SeesawLoss `" +msgstr "" + +#: ../../api/models.rst:167 +msgid "models.utils" +msgstr "" + +#: ../../api/models.rst:169 +msgid "This package includes some helper functions and common components used in various networks." +msgstr "" + +#: ../../api/models.rst:174 +msgid "Common Components" +msgstr "" + +#: ../../api/models.rst:192::1 +msgid ":py:obj:`InvertedResidual `" +msgstr "" + +#: ../../api/models.rst:192::1 +msgid ":py:obj:`SELayer `" +msgstr "" + +#: ../../api/models.rst:192::1 +msgid ":py:obj:`WindowMSA `" +msgstr "" + +#: ../../api/models.rst:192::1 +msgid ":py:obj:`WindowMSAV2 `" +msgstr "" + +#: ../../api/models.rst:192::1 +msgid ":py:obj:`ShiftWindowMSA `" +msgstr "" + +#: ../../api/models.rst:192::1 +msgid ":py:obj:`MultiheadAttention `" +msgstr "" + +#: ../../api/models.rst:192::1 +msgid ":py:obj:`ConditionalPositionEncoding `" +msgstr "" + +#: ../../api/models.rst:192::1 +msgid ":py:obj:`PatchEmbed `" +msgstr "" + +#: ../../api/models.rst:192::1 +msgid ":py:obj:`PatchMerging `" +msgstr "" + +#: ../../api/models.rst:192::1 +msgid ":py:obj:`HybridEmbed `" +msgstr "" + +#: ../../api/models.rst:192::1 +msgid ":py:obj:`LayerScale `" +msgstr "" + +#: ../../api/models.rst:196 +msgid "Helper Functions" +msgstr "" + +#: ../../api/models.rst:207::1 +msgid ":py:obj:`channel_shuffle `" +msgstr "" + +#: ../../api/models.rst:207::1 +msgid ":py:obj:`make_divisible `" +msgstr "" + +#: ../../api/models.rst:207::1 +msgid ":py:obj:`resize_pos_embed `" +msgstr "" + +#: ../../api/models.rst:207::1 +msgid ":py:obj:`resize_relative_position_bias_table `" +msgstr "" + +#: ../../api/models.rst:207::1 +msgid ":py:obj:`to_ntuple `" +msgstr "" + +#: ../../api/models.rst:207::1 +msgid ":py:obj:`is_tracing `" +msgstr "" + +#: ../../api/structures.rst:7 +msgid "mmcls.structures" +msgstr "" + +#: ../../api/structures.rst:9 +msgid "This package includes basic data structures for classification tasks." +msgstr "该包中包含了用于分类任务的基础数据结构。" + +#: ../../api/structures.rst:12 +msgid "ClsDataSample" +msgstr "" + +#: mmcls.structures.cls_data_sample.ClsDataSample:1 of +msgid "A data structure interface of classification task." +msgstr "" + +#: mmcls.structures.cls_data_sample.ClsDataSample:3 of +msgid "It's used as interfaces between different components." +msgstr "" + +#: mmcls.structures.cls_data_sample.ClsDataSample of +msgid "Meta fields" +msgstr "" + +#: mmcls.structures.cls_data_sample.ClsDataSample:5 of +msgid "**img_shape** (*Tuple*) -- The shape of the corresponding input image. Used for visualization." +msgstr "" + +#: mmcls.structures.cls_data_sample.ClsDataSample:7 of +msgid "**ori_shape** (*Tuple*) -- The original shape of the corresponding image. Used for visualization." +msgstr "" + +#: mmcls.structures.cls_data_sample.ClsDataSample:9 of +msgid "**num_classes** (*int*) -- The number of all categories. Used for label format conversion." +msgstr "" + +#: mmcls.structures.cls_data_sample.ClsDataSample of +msgid "Data fields" +msgstr "" + +#: mmcls.structures.cls_data_sample.ClsDataSample:12 of +msgid "**gt_label** (:obj:`~mmengine.structures.LabelData`) -- The ground truth label." +msgstr "" + +#: mmcls.structures.cls_data_sample.ClsDataSample:14 of +msgid "**pred_label** (:obj:`~mmengine.structures.LabelData`) -- The predicted label." +msgstr "" + +#: mmcls.structures.cls_data_sample.ClsDataSample:16 of +msgid "**scores** (*torch.Tensor*) -- The outputs of model." +msgstr "" + +#: mmcls.structures.cls_data_sample.ClsDataSample:17 of +msgid "**logits** (*torch.Tensor*) -- The outputs of model without softmax nor sigmoid." +msgstr "" + +#: ../../api/utils.rst:7 +msgid "mmcls.utils" +msgstr "" + +#: ../../api/utils.rst:9 +msgid "This package includes some useful helper functions for developing." +msgstr "该包中包含了一些有助于开发的辅助函数。" + +#: ../../api/utils.rst:16::1 +msgid ":py:obj:`collect_env `" +msgstr "" + +#: ../../api/utils.rst:16::1 +msgid ":py:obj:`register_all_modules `" +msgstr "" + +#: ../../api/visualization.rst:7 +msgid "mmcls.visualization" +msgstr "" + +#: ../../api/visualization.rst:9 +msgid "This package includes visualizer components for classification tasks." +msgstr "该包中包含了用于分类任务的一些可视化组件。" + +#: ../../api/visualization.rst:12 +msgid "ClsVisualizer" +msgstr "" + +#: mmcls.visualization.cls_visualizer.ClsVisualizer:1 of +msgid "Universal Visualizer for classification task." +msgstr "" + +#: mmcls.visualization.cls_visualizer.ClsVisualizer:3 of +msgid "Name of the instance. Defaults to 'visualizer'." +msgstr "" + +#: mmcls.visualization.cls_visualizer.ClsVisualizer:5 of +msgid "the origin image to draw. The format should be RGB. Defaults to None." +msgstr "" + +#: mmcls.visualization.cls_visualizer.ClsVisualizer:8 of +msgid "Visual backend config list. Defaults to None." +msgstr "" + +#: mmcls.visualization.cls_visualizer.ClsVisualizer:11 of +msgid "Save file dir for all storage backends. If it is None, the backend storage will not save any data." +msgstr "" + +#: mmcls.visualization.cls_visualizer.ClsVisualizer:14 of +msgid "Keyword parameters of figure for saving. Defaults to empty dict." +msgstr "" + +#: mmcls.visualization.cls_visualizer.ClsVisualizer:17 of +msgid "Keyword parameters of figure for showing. Defaults to empty dict." +msgstr "" diff --git a/docs/zh_CN/locales/zh_CN/LC_MESSAGES/papers.po b/docs/zh_CN/locales/zh_CN/LC_MESSAGES/papers.po new file mode 100644 index 0000000..70fe7c2 --- /dev/null +++ b/docs/zh_CN/locales/zh_CN/LC_MESSAGES/papers.po @@ -0,0 +1,8971 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) 2020, OpenMMLab +# This file is distributed under the same license as the MMClassification +# package. +# FIRST AUTHOR , 2021. +# +msgid "" +msgstr "" +"Project-Id-Version: MMClassification\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2022-11-22 08:42+0800\n" +"PO-Revision-Date: 2022-11-22 14:24+0800\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 2.9.1\n" +"Last-Translator: Ma Zerun \n" +"Language-Team: \n" +"Language: zh_CN\n" +"X-Generator: Poedit 2.3\n" + +#: ../../papers/conformer.md:4 +msgid "Conformer" +msgstr "" + +#: ../../papers/conformer.md:6 +msgid "" +"[Conformer: Local Features Coupling Global Representations for Visual Recognition](https://arxiv.org/" +"abs/2105.03889)" +msgstr "" + +#: ../../papers/conformer.md:10 ../../papers/convmixer.md:10 ../../papers/convnext.md:20 +#: ../../papers/cspnet.md:10 ../../papers/csra.md:10 ../../papers/davit.md:10 ../../papers/deit.md:10 +#: ../../papers/deit3.md:10 ../../papers/densenet.md:10 ../../papers/edgenext.md:10 +#: ../../papers/efficientformer.md:10 ../../papers/efficientnet.md:20 ../../papers/hornet.md:10 +#: ../../papers/hrnet.md:10 ../../papers/inception_v3.md:10 ../../papers/mlp_mixer.md:10 +#: ../../papers/mobilenet_v2.md:10 ../../papers/mobilenet_v3.md:10 ../../papers/mobileone.md:21 +#: ../../papers/mobilevit.md:24 ../../papers/mvit.md:10 ../../papers/poolformer.md:10 +#: ../../papers/regnet.md:10 ../../papers/replknet.md:10 ../../papers/repmlp.md:10 ../../papers/repvgg.md:10 +#: ../../papers/res2net.md:10 ../../papers/resnet.md:28 ../../papers/resnext.md:10 ../../papers/seresnet.md:10 +#: ../../papers/shufflenet_v1.md:10 ../../papers/shufflenet_v2.md:10 ../../papers/swin_transformer.md:20 +#: ../../papers/swin_transformer_v2.md:28 ../../papers/t2t_vit.md:10 ../../papers/tnt.md:10 +#: ../../papers/twins.md:10 ../../papers/van.md:10 ../../papers/vgg.md:10 +#: ../../papers/vision_transformer.md:20 ../../papers/wrn.md:10 +msgid "Abstract" +msgstr "摘要" + +#: ../../papers/conformer.md:12 +#, python-format +msgid "" +"Within Convolutional Neural Network (CNN), the convolution operations are good at extracting local features " +"but experience difficulty to capture global representations. Within visual transformer, the cascaded self-" +"attention modules can capture long-distance feature dependencies but unfortunately deteriorate local " +"feature details. In this paper, we propose a hybrid network structure, termed Conformer, to take advantage " +"of convolutional operations and self-attention mechanisms for enhanced representation learning. Conformer " +"roots in the Feature Coupling Unit (FCU), which fuses local features and global representations under " +"different resolutions in an interactive fashion. Conformer adopts a concurrent structure so that local " +"features and global representations are retained to the maximum extent. Experiments show that Conformer, " +"under the comparable parameter complexity, outperforms the visual transformer (DeiT-B) by 2.3% on ImageNet. " +"On MSCOCO, it outperforms ResNet-101 by 3.7% and 3.6% mAPs for object detection and instance segmentation, " +"respectively, demonstrating the great potential to be a general backbone network." +msgstr "" + +#: ../../papers/conformer.md:18 ../../papers/convmixer.md:22 ../../papers/convnext.md:85 +#: ../../papers/cspnet.md:22 ../../papers/csra.md:18 ../../papers/davit.md:18 ../../papers/deit.md:18 +#: ../../papers/deit3.md:18 ../../papers/densenet.md:18 ../../papers/edgenext.md:22 +#: ../../papers/efficientformer.md:18 ../../papers/efficientnet.md:31 ../../papers/hornet.md:18 +#: ../../papers/hrnet.md:18 ../../papers/inception_v3.md:18 ../../papers/mlp_mixer.md:18 +#: ../../papers/mobilenet_v2.md:20 ../../papers/mobilenet_v3.md:18 ../../papers/mobileone.md:151 +#: ../../papers/mobilevit.md:89 ../../papers/mvit.md:25 ../../papers/poolformer.md:18 +#: ../../papers/regnet.md:18 ../../papers/replknet.md:18 ../../papers/repmlp.md:18 ../../papers/repvgg.md:18 +#: ../../papers/res2net.md:18 ../../papers/resnet.md:94 ../../papers/resnext.md:18 ../../papers/seresnet.md:18 +#: ../../papers/shufflenet_v1.md:18 ../../papers/shufflenet_v2.md:18 ../../papers/swin_transformer.md:84 +#: ../../papers/swin_transformer_v2.md:94 ../../papers/t2t_vit.md:18 ../../papers/tnt.md:18 +#: ../../papers/twins.md:18 ../../papers/van.md:18 ../../papers/vgg.md:18 +#: ../../papers/vision_transformer.md:89 ../../papers/wrn.md:18 +msgid "Results and models" +msgstr "结果和模型" + +#: ../../papers/conformer.md:20 ../../papers/convmixer.md:24 ../../papers/convnext.md:68 +#: ../../papers/convnext.md:87 ../../papers/cspnet.md:24 ../../papers/davit.md:20 ../../papers/deit.md +#: ../../papers/deit.md:20 ../../papers/deit3.md:20 ../../papers/densenet.md:20 ../../papers/edgenext.md:24 +#: ../../papers/efficientformer.md:20 ../../papers/efficientnet.md:33 ../../papers/hornet.md:20 +#: ../../papers/hrnet.md:20 ../../papers/inception_v3.md:20 ../../papers/mlp_mixer.md:20 +#: ../../papers/mobilenet_v2.md:22 ../../papers/mobilenet_v3.md:20 ../../papers/mobileone.md:153 +#: ../../papers/mobilevit.md:91 ../../papers/mvit.md:27 ../../papers/poolformer.md:20 +#: ../../papers/regnet.md:20 ../../papers/replknet.md:20 ../../papers/repmlp.md:20 ../../papers/repvgg.md:20 +#: ../../papers/res2net.md:20 ../../papers/resnet.md:120 ../../papers/resnext.md:20 +#: ../../papers/seresnet.md:20 ../../papers/shufflenet_v1.md:20 ../../papers/shufflenet_v2.md:20 +#: ../../papers/swin_transformer.md:97 ../../papers/swin_transformer_v2.md:105 ../../papers/t2t_vit.md:20 +#: ../../papers/tnt.md:20 ../../papers/twins.md:20 ../../papers/van.md:20 ../../papers/vgg.md:20 +#: ../../papers/vision_transformer.md:109 ../../papers/wrn.md:20 +msgid "ImageNet-1k" +msgstr "" + +#: ../../papers/conformer.md ../../papers/convmixer.md ../../papers/convnext.md:68 ../../papers/cspnet.md +#: ../../papers/csra.md ../../papers/davit.md ../../papers/deit.md ../../papers/deit3.md +#: ../../papers/densenet.md ../../papers/edgenext.md ../../papers/efficientformer.md +#: ../../papers/efficientnet.md ../../papers/hornet.md ../../papers/hrnet.md ../../papers/inception_v3.md +#: ../../papers/mlp_mixer.md ../../papers/mobilenet_v2.md ../../papers/mobilenet_v3.md +#: ../../papers/mobileone.md:86 ../../papers/mobilevit.md:71 ../../papers/mvit.md ../../papers/poolformer.md +#: ../../papers/regnet.md ../../papers/replknet.md ../../papers/repmlp.md ../../papers/repvgg.md +#: ../../papers/res2net.md ../../papers/resnet.md:76 ../../papers/resnext.md ../../papers/seresnet.md +#: ../../papers/shufflenet_v1.md ../../papers/shufflenet_v2.md ../../papers/swin_transformer.md:66 +#: ../../papers/swin_transformer_v2.md:76 ../../papers/t2t_vit.md ../../papers/tnt.md ../../papers/twins.md +#: ../../papers/van.md ../../papers/vgg.md ../../papers/vision_transformer.md:71 ../../papers/wrn.md +msgid "Model" +msgstr "模型" + +#: ../../papers/conformer.md ../../papers/convmixer.md ../../papers/convnext.md:68 ../../papers/cspnet.md +#: ../../papers/csra.md ../../papers/davit.md ../../papers/deit.md ../../papers/deit3.md +#: ../../papers/densenet.md ../../papers/edgenext.md ../../papers/efficientformer.md +#: ../../papers/efficientnet.md ../../papers/hornet.md ../../papers/hrnet.md ../../papers/inception_v3.md +#: ../../papers/mlp_mixer.md ../../papers/mobilenet_v2.md ../../papers/mobilenet_v3.md +#: ../../papers/mobileone.md:86 ../../papers/mobilevit.md:71 ../../papers/mvit.md ../../papers/poolformer.md +#: ../../papers/regnet.md ../../papers/replknet.md ../../papers/repmlp.md ../../papers/repvgg.md +#: ../../papers/res2net.md ../../papers/resnet.md:76 ../../papers/resnext.md ../../papers/seresnet.md +#: ../../papers/shufflenet_v1.md ../../papers/shufflenet_v2.md ../../papers/swin_transformer.md:66 +#: ../../papers/swin_transformer_v2.md:76 ../../papers/t2t_vit.md ../../papers/tnt.md ../../papers/twins.md +#: ../../papers/van.md ../../papers/vgg.md ../../papers/vision_transformer.md:71 ../../papers/wrn.md +msgid "Params(M)" +msgstr "参数量(M)" + +#: ../../papers/conformer.md ../../papers/convmixer.md ../../papers/convnext.md:68 ../../papers/cspnet.md +#: ../../papers/csra.md ../../papers/davit.md ../../papers/deit.md ../../papers/deit3.md +#: ../../papers/densenet.md ../../papers/edgenext.md ../../papers/efficientformer.md +#: ../../papers/efficientnet.md ../../papers/hornet.md ../../papers/hrnet.md ../../papers/inception_v3.md +#: ../../papers/mlp_mixer.md ../../papers/mobilenet_v2.md ../../papers/mobilenet_v3.md +#: ../../papers/mobileone.md:86 ../../papers/mobilevit.md:71 ../../papers/mvit.md ../../papers/poolformer.md +#: ../../papers/regnet.md ../../papers/replknet.md ../../papers/repmlp.md ../../papers/repvgg.md +#: ../../papers/res2net.md ../../papers/resnet.md:76 ../../papers/resnext.md ../../papers/seresnet.md +#: ../../papers/shufflenet_v1.md ../../papers/shufflenet_v2.md ../../papers/swin_transformer.md:66 +#: ../../papers/swin_transformer_v2.md:76 ../../papers/t2t_vit.md ../../papers/tnt.md ../../papers/twins.md +#: ../../papers/van.md ../../papers/vgg.md ../../papers/vision_transformer.md:71 ../../papers/wrn.md +msgid "Flops(G)" +msgstr "" + +#: ../../papers/conformer.md ../../papers/convmixer.md ../../papers/convnext.md:68 ../../papers/cspnet.md +#: ../../papers/davit.md ../../papers/deit.md ../../papers/deit3.md ../../papers/densenet.md +#: ../../papers/edgenext.md ../../papers/efficientformer.md ../../papers/efficientnet.md +#: ../../papers/hornet.md ../../papers/hrnet.md ../../papers/inception_v3.md ../../papers/mlp_mixer.md +#: ../../papers/mobilenet_v2.md ../../papers/mobilenet_v3.md ../../papers/mobileone.md:86 +#: ../../papers/mobilevit.md:71 ../../papers/mvit.md ../../papers/poolformer.md ../../papers/regnet.md +#: ../../papers/replknet.md ../../papers/repmlp.md ../../papers/repvgg.md ../../papers/res2net.md +#: ../../papers/resnet.md:76 ../../papers/resnext.md ../../papers/seresnet.md ../../papers/shufflenet_v1.md +#: ../../papers/shufflenet_v2.md ../../papers/swin_transformer.md:66 ../../papers/swin_transformer_v2.md:76 +#: ../../papers/t2t_vit.md ../../papers/tnt.md ../../papers/twins.md ../../papers/van.md ../../papers/vgg.md +#: ../../papers/vision_transformer.md:71 ../../papers/wrn.md +msgid "Top-1 (%)" +msgstr "" + +#: ../../papers/conformer.md ../../papers/convmixer.md ../../papers/convnext.md:68 ../../papers/cspnet.md +#: ../../papers/davit.md ../../papers/deit.md ../../papers/deit3.md ../../papers/densenet.md +#: ../../papers/edgenext.md ../../papers/efficientformer.md ../../papers/efficientnet.md +#: ../../papers/hornet.md ../../papers/hrnet.md ../../papers/inception_v3.md ../../papers/mlp_mixer.md +#: ../../papers/mobilenet_v2.md ../../papers/mobilenet_v3.md ../../papers/mobileone.md:86 +#: ../../papers/mobilevit.md:71 ../../papers/mvit.md ../../papers/poolformer.md ../../papers/regnet.md +#: ../../papers/replknet.md ../../papers/repmlp.md ../../papers/repvgg.md ../../papers/res2net.md +#: ../../papers/resnet.md:76 ../../papers/resnext.md ../../papers/seresnet.md ../../papers/shufflenet_v1.md +#: ../../papers/shufflenet_v2.md ../../papers/swin_transformer.md:66 ../../papers/swin_transformer_v2.md:76 +#: ../../papers/t2t_vit.md ../../papers/tnt.md ../../papers/twins.md ../../papers/van.md ../../papers/vgg.md +#: ../../papers/vision_transformer.md:71 ../../papers/wrn.md +msgid "Top-5 (%)" +msgstr "" + +#: ../../papers/conformer.md ../../papers/convmixer.md ../../papers/convnext.md:68 ../../papers/cspnet.md +#: ../../papers/csra.md ../../papers/davit.md ../../papers/deit.md ../../papers/deit3.md +#: ../../papers/densenet.md ../../papers/edgenext.md ../../papers/efficientformer.md +#: ../../papers/efficientnet.md ../../papers/hornet.md ../../papers/hrnet.md ../../papers/inception_v3.md +#: ../../papers/mlp_mixer.md ../../papers/mobilenet_v2.md ../../papers/mobilenet_v3.md +#: ../../papers/mobileone.md:86 ../../papers/mobilevit.md:71 ../../papers/mvit.md ../../papers/poolformer.md +#: ../../papers/regnet.md ../../papers/replknet.md ../../papers/repmlp.md ../../papers/repvgg.md +#: ../../papers/res2net.md ../../papers/resnet.md:76 ../../papers/resnext.md ../../papers/seresnet.md +#: ../../papers/shufflenet_v1.md ../../papers/shufflenet_v2.md ../../papers/swin_transformer.md:66 +#: ../../papers/swin_transformer_v2.md:76 ../../papers/t2t_vit.md ../../papers/tnt.md ../../papers/twins.md +#: ../../papers/van.md ../../papers/vgg.md ../../papers/vision_transformer.md:71 ../../papers/wrn.md +msgid "Config" +msgstr "配置文件" + +#: ../../papers/conformer.md ../../papers/convmixer.md ../../papers/convnext.md:68 ../../papers/cspnet.md +#: ../../papers/csra.md ../../papers/davit.md ../../papers/deit.md ../../papers/deit3.md +#: ../../papers/densenet.md ../../papers/edgenext.md ../../papers/efficientformer.md +#: ../../papers/efficientnet.md ../../papers/hornet.md ../../papers/hrnet.md ../../papers/inception_v3.md +#: ../../papers/mlp_mixer.md ../../papers/mobilenet_v2.md ../../papers/mobilenet_v3.md +#: ../../papers/mobileone.md:86 ../../papers/mobilevit.md:71 ../../papers/mvit.md ../../papers/poolformer.md +#: ../../papers/regnet.md ../../papers/replknet.md ../../papers/repmlp.md ../../papers/repvgg.md +#: ../../papers/res2net.md ../../papers/resnet.md:76 ../../papers/resnext.md ../../papers/seresnet.md +#: ../../papers/shufflenet_v1.md ../../papers/shufflenet_v2.md ../../papers/swin_transformer.md:66 +#: ../../papers/swin_transformer_v2.md:76 ../../papers/t2t_vit.md ../../papers/tnt.md ../../papers/twins.md +#: ../../papers/van.md ../../papers/vgg.md ../../papers/vision_transformer.md:71 ../../papers/wrn.md +msgid "Download" +msgstr "下载" + +#: ../../papers/conformer.md +msgid "Conformer-tiny-p16\\*" +msgstr "" + +#: ../../papers/conformer.md ../../papers/resnet.md:76 +msgid "23.52" +msgstr "" + +#: ../../papers/conformer.md +msgid "4.90" +msgstr "" + +#: ../../papers/conformer.md +msgid "81.31" +msgstr "" + +#: ../../papers/conformer.md +msgid "95.60" +msgstr "" + +#: ../../papers/conformer.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/conformer/conformer-tiny-" +"p16_8xb128_in1k.py)" +msgstr "" + +#: ../../papers/conformer.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/conformer/conformer-tiny-" +"p16_3rdparty_8xb128_in1k_20211206-f6860372.pth)" +msgstr "" + +#: ../../papers/conformer.md +msgid "Conformer-small-p32\\*" +msgstr "" + +#: ../../papers/conformer.md ../../papers/deit3.md +msgid "38.85" +msgstr "" + +#: ../../papers/conformer.md +msgid "7.09" +msgstr "" + +#: ../../papers/conformer.md +msgid "81.96" +msgstr "" + +#: ../../papers/conformer.md +msgid "96.02" +msgstr "" + +#: ../../papers/conformer.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/conformer/conformer-small-" +"p32_8xb128_in1k.py)" +msgstr "" + +#: ../../papers/conformer.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/conformer/conformer-small-" +"p32_8xb128_in1k_20211206-947a0816.pth)" +msgstr "" + +#: ../../papers/conformer.md +msgid "Conformer-small-p16\\*" +msgstr "" + +#: ../../papers/conformer.md +msgid "37.67" +msgstr "" + +#: ../../papers/conformer.md +msgid "10.31" +msgstr "" + +#: ../../papers/conformer.md +msgid "83.32" +msgstr "" + +#: ../../papers/conformer.md +msgid "96.46" +msgstr "" + +#: ../../papers/conformer.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/conformer/conformer-small-" +"p16_8xb128_in1k.py)" +msgstr "" + +#: ../../papers/conformer.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/conformer/conformer-small-" +"p16_3rdparty_8xb128_in1k_20211206-3065dcf5.pth)" +msgstr "" + +#: ../../papers/conformer.md +msgid "Conformer-base-p16\\*" +msgstr "" + +#: ../../papers/conformer.md +msgid "83.29" +msgstr "" + +#: ../../papers/conformer.md +msgid "22.89" +msgstr "" + +#: ../../papers/conformer.md ../../papers/efficientnet.md +msgid "83.82" +msgstr "" + +#: ../../papers/conformer.md ../../papers/twins.md +msgid "96.59" +msgstr "" + +#: ../../papers/conformer.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/conformer/conformer-base-" +"p16_8xb128_in1k.py)" +msgstr "" + +#: ../../papers/conformer.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/conformer/conformer-base-" +"p16_3rdparty_8xb128_in1k_20211206-bfdf8637.pth)" +msgstr "" + +#: ../../papers/conformer.md:29 +msgid "" +"*Models with * are converted from the [official repo](https://github.com/pengzhiliang/Conformer). The " +"config files of these models are only for validation. We don't ensure these config files' training accuracy " +"and welcome you to contribute your reproduction results.*" +msgstr "" + +#: ../../papers/conformer.md:31 ../../papers/convmixer.md:34 ../../papers/convnext.md:116 +#: ../../papers/cspnet.md:34 ../../papers/csra.md:26 ../../papers/davit.md:32 ../../papers/deit.md:43 +#: ../../papers/deit3.md:43 ../../papers/densenet.md:31 ../../papers/edgenext.md:37 +#: ../../papers/efficientformer.md:30 ../../papers/efficientnet.md:130 ../../papers/hornet.md:45 +#: ../../papers/hrnet.md:36 ../../papers/inception_v3.md:28 ../../papers/mlp_mixer.md:29 +#: ../../papers/mobilenet_v2.md:28 ../../papers/mobilenet_v3.md:35 ../../papers/mobileone.md:163 +#: ../../papers/mobilevit.md:101 ../../papers/mvit.md:38 ../../papers/poolformer.md:32 +#: ../../papers/regnet.md:43 ../../papers/replknet.md:88 ../../papers/repmlp.md:87 ../../papers/repvgg.md:94 +#: ../../papers/res2net.md:30 ../../papers/resnet.md:152 ../../papers/resnext.md:29 +#: ../../papers/seresnet.md:27 ../../papers/shufflenet_v1.md:26 ../../papers/shufflenet_v2.md:26 +#: ../../papers/swin_transformer.md:120 ../../papers/swin_transformer_v2.md:124 ../../papers/t2t_vit.md:30 +#: ../../papers/tnt.md:28 ../../papers/twins.md:33 ../../papers/van.md:31 ../../papers/vgg.md:33 +#: ../../papers/vision_transformer.md:121 ../../papers/wrn.md:30 +msgid "Citation" +msgstr "引用" + +#: ../../papers/convmixer.md:4 +msgid "ConvMixer" +msgstr "" + +#: ../../papers/convmixer.md:6 +msgid "[Patches Are All You Need?](https://arxiv.org/abs/2201.09792)" +msgstr "" + +#: ../../papers/convmixer.md:14 +msgid "" +"Although convolutional networks have been the dominant architecture for vision tasks for many years, recent " +"experiments have shown that Transformer-based models, most notably the Vision Transformer (ViT), may exceed " +"their performance in some settings. However, due to the quadratic runtime of the self-attention layers in " +"Transformers, ViTs require the use of patch embeddings, which group together small regions of the image " +"into single input features, in order to be applied to larger image sizes. This raises a question: Is the " +"performance of ViTs due to the inherently-more-powerful Transformer architecture, or is it at least partly " +"due to using patches as the input representation? In this paper, we present some evidence for the latter: " +"specifically, we propose the ConvMixer, an extremely simple model that is similar in spirit to the ViT and " +"the even-more-basic MLP-Mixer in that it operates directly on patches as input, separates the mixing of " +"spatial and channel dimensions, and maintains equal size and resolution throughout the network. In " +"contrast, however, the ConvMixer uses only standard convolutions to achieve the mixing steps. Despite its " +"simplicity, we show that the ConvMixer outperforms the ViT, MLP-Mixer, and some of their variants for " +"similar parameter counts and data set sizes, in addition to outperforming classical vision models such as " +"the ResNet." +msgstr "" + +#: ../../papers/convmixer.md +msgid "ConvMixer-768/32\\*" +msgstr "" + +#: ../../papers/convmixer.md +msgid "21.11" +msgstr "" + +#: ../../papers/convmixer.md +msgid "19.62" +msgstr "" + +#: ../../papers/convmixer.md +msgid "80.16" +msgstr "" + +#: ../../papers/convmixer.md +msgid "95.08" +msgstr "" + +#: ../../papers/convmixer.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/convmixer/" +"convmixer-768-32_10xb64_in1k.py)" +msgstr "" + +#: ../../papers/convmixer.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/convmixer/" +"convmixer-768-32_3rdparty_10xb64_in1k_20220323-bca1f7b8.pth)" +msgstr "" + +#: ../../papers/convmixer.md +msgid "ConvMixer-1024/20\\*" +msgstr "" + +#: ../../papers/convmixer.md +msgid "24.38" +msgstr "" + +#: ../../papers/convmixer.md +msgid "5.55" +msgstr "" + +#: ../../papers/convmixer.md +msgid "76.94" +msgstr "" + +#: ../../papers/convmixer.md +msgid "93.36" +msgstr "" + +#: ../../papers/convmixer.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/convmixer/" +"convmixer-1024-20_10xb64_in1k.py)" +msgstr "" + +#: ../../papers/convmixer.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/convmixer/" +"convmixer-1024-20_3rdparty_10xb64_in1k_20220323-48f8aeba.pth)" +msgstr "" + +#: ../../papers/convmixer.md +msgid "ConvMixer-1536/20\\*" +msgstr "" + +#: ../../papers/convmixer.md +msgid "51.63" +msgstr "" + +#: ../../papers/convmixer.md +msgid "48.71" +msgstr "" + +#: ../../papers/convmixer.md +msgid "81.37" +msgstr "" + +#: ../../papers/convmixer.md ../../papers/swin_transformer.md:66 +msgid "95.61" +msgstr "" + +#: ../../papers/convmixer.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/convmixer/" +"convmixer-1536-20_10xb64_in1k.py)" +msgstr "" + +#: ../../papers/convmixer.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/convmixer/" +"convmixer-1536_20_3rdparty_10xb64_in1k_20220323-ea5786f3.pth)" +msgstr "" + +#: ../../papers/convmixer.md:32 +msgid "" +"*Models with * are converted from the [official repo](https://github.com/locuslab/convmixer). The config " +"files of these models are only for inference. We don't ensure these config files' training accuracy and " +"welcome you to contribute your reproduction results.*" +msgstr "" + +#: ../../papers/convnext.md:4 +msgid "ConvNeXt" +msgstr "" + +#: ../../papers/convnext.md:6 +msgid "[A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545v1)" +msgstr "" + +#: ../../papers/convnext.md:10 ../../papers/efficientnet.md:10 ../../papers/mobileone.md:10 +#: ../../papers/mobilevit.md:10 ../../papers/resnet.md:10 ../../papers/swin_transformer.md:10 +#: ../../papers/swin_transformer_v2.md:10 ../../papers/vision_transformer.md:10 +msgid "Introduction" +msgstr "简介" + +#: ../../papers/convnext.md:12 +msgid "" +"**ConvNeXt** is initially described in [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545v1), which " +"is a pure convolutional model (ConvNet), inspired by the design of Vision Transformers. The ConvNeXt has " +"the pyramid structure and achieve competitive performance on various vision tasks, with simplicity and " +"efficiency." +msgstr "" + +#: ../../papers/convnext.md:34 ../../papers/efficientnet.md:78 ../../papers/mobilevit.md:37 +#: ../../papers/resnet.md:42 ../../papers/swin_transformer.md:32 ../../papers/swin_transformer_v2.md:42 +#: ../../papers/vision_transformer.md:33 +msgid "How to use it?" +msgstr "使用方式" + +#: ../../papers/convnext.md:39 ../../papers/efficientnet.md:83 ../../papers/mobileone.md:40 +#: ../../papers/mobilevit.md:42 ../../papers/resnet.md:47 ../../papers/swin_transformer.md:37 +#: ../../papers/swin_transformer_v2.md:47 ../../papers/vision_transformer.md:38 +msgid "Predict image" +msgstr "推理图片" + +#: ../../papers/convnext.md:52 ../../papers/efficientnet.md:96 ../../papers/mobileone.md:63 +#: ../../papers/mobilevit.md:55 ../../papers/resnet.md:60 ../../papers/swin_transformer.md:50 +#: ../../papers/swin_transformer_v2.md:60 ../../papers/vision_transformer.md:51 +msgid "Use the model" +msgstr "调用模型" + +#: ../../papers/convnext.md:69 ../../papers/efficientnet.md:113 ../../papers/mobileone.md:87 +#: ../../papers/mobilevit.md:72 ../../papers/resnet.md:77 ../../papers/swin_transformer.md:67 +#: ../../papers/swin_transformer_v2.md:77 ../../papers/vision_transformer.md:72 +msgid "Train/Test Command" +msgstr "训练/测试" + +#: ../../papers/convnext.md:69 ../../papers/efficientnet.md:113 ../../papers/mobileone.md:87 +#: ../../papers/mobilevit.md:72 ../../papers/resnet.md:77 ../../papers/swin_transformer.md:67 +#: ../../papers/swin_transformer_v2.md:77 ../../papers/vision_transformer.md:72 +msgid "" +"Place the ImageNet dataset to the `data/imagenet/` directory, or prepare datasets according to the [docs]" +"(https://mmclassification.readthedocs.io/en/1.x/user_guides/dataset_prepare.html#prepare-dataset)." +msgstr "" +"将 ImageNet 数据集放置在 `data/imagenet` 目录下,或者根据 [docs](https://mmclassification.readthedocs.io/" +"en/1.x/user_guides/dataset_prepare.html#prepare-dataset) 准备其他数据集。" + +#: ../../papers/convnext.md:71 ../../papers/efficientnet.md:115 ../../papers/mobileone.md:89 +#: ../../papers/mobilevit.md:74 ../../papers/resnet.md:79 ../../papers/swin_transformer.md:69 +#: ../../papers/swin_transformer_v2.md:79 ../../papers/vision_transformer.md:74 +msgid "Train:" +msgstr "训练:" + +#: ../../papers/convnext.md:83 +msgid "" +"For more configurable parameters, please refer to the [API](https://mmclassification.readthedocs.io/en/1.x/" +"api/generated/mmcls.models.backbones.ConvNeXt.html#mmcls.models.backbones.ConvNeXt)." +msgstr "" + +#: ../../papers/convnext.md:68 ../../papers/cspnet.md ../../papers/csra.md ../../papers/davit.md +#: ../../papers/deit.md ../../papers/deit3.md ../../papers/edgenext.md ../../papers/hornet.md +#: ../../papers/mvit.md ../../papers/resnet.md:76 ../../papers/swin_transformer.md:66 +#: ../../papers/swin_transformer_v2.md:76 ../../papers/van.md ../../papers/vision_transformer.md:71 +msgid "Pretrain" +msgstr "预训练" + +#: ../../papers/convnext.md:68 +msgid "ConvNeXt-T\\*" +msgstr "" + +#: ../../papers/convnext.md:68 ../../papers/cspnet.md ../../papers/davit.md ../../papers/deit.md +#: ../../papers/deit3.md ../../papers/edgenext.md ../../papers/hornet.md ../../papers/mvit.md +#: ../../papers/swin_transformer.md:66 ../../papers/swin_transformer_v2.md:76 ../../papers/van.md +#: ../../papers/vision_transformer.md:71 +msgid "From scratch" +msgstr "从头训练" + +#: ../../papers/convnext.md:68 +msgid "28.59" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "4.46" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "82.05" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "95.86" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/convnext/convnext-" +"tiny_32xb128_in1k.py)" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-" +"tiny_3rdparty_32xb128_in1k_20220124-18abde00.pth)" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "ConvNeXt-S\\*" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "50.22" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "8.69" +msgstr "" + +#: ../../papers/convnext.md:68 ../../papers/twins.md +msgid "83.13" +msgstr "" + +#: ../../papers/convnext.md:68 ../../papers/efficientnet.md ../../papers/swin_transformer.md:66 +msgid "96.44" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/convnext/convnext-" +"small_32xb128_in1k.py)" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-" +"small_3rdparty_32xb128_in1k_20220124-d39b5192.pth)" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "ConvNeXt-B\\*" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "88.59" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "15.36" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "83.85" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "96.74" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/convnext/convnext-" +"base_32xb128_in1k.py)" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-" +"base_3rdparty_32xb128_in1k_20220124-d0915162.pth)" +msgstr "" + +#: ../../papers/convnext.md:68 ../../papers/deit3.md ../../papers/hornet.md +#: ../../papers/swin_transformer.md:66 ../../papers/swin_transformer.md:86 +#: ../../papers/swin_transformer_v2.md:76 ../../papers/swin_transformer_v2.md:96 +#: ../../papers/vision_transformer.md:71 ../../papers/vision_transformer.md:97 +msgid "ImageNet-21k" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "85.81" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "97.86" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_in21k-" +"pre-3rdparty_32xb128_in1k_20220124-eb2d6ada.pth)" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "ConvNeXt-L\\*" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "197.77" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "34.37" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "84.30" +msgstr "" + +#: ../../papers/convnext.md:68 ../../papers/efficientnet.md +msgid "96.89" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/convnext/convnext-" +"large_64xb64_in1k.py)" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-" +"large_3rdparty_64xb64_in1k_20220124-f8a0ded0.pth)" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "86.61" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "98.04" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-large_in21k-" +"pre-3rdparty_64xb64_in1k_20220124-2412403d.pth)" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "ConvNeXt-XL\\*" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "350.20" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "60.93" +msgstr "" + +#: ../../papers/convnext.md:68 ../../papers/deit3.md +msgid "86.97" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "98.20" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/convnext/convnext-" +"xlarge_64xb64_in1k.py)" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-xlarge_in21k-" +"pre-3rdparty_64xb64_in1k_20220124-76b6863d.pth)" +msgstr "" + +#: ../../papers/convnext.md:99 +msgid "" +"*Models with * are converted from the [official repo](https://github.com/facebookresearch/ConvNeXt). The " +"config files of these models are only for inference. We don't ensure these config files' training accuracy " +"and welcome you to contribute your reproduction results.*" +msgstr "" + +#: ../../papers/convnext.md:101 ../../papers/hornet.md:33 +msgid "Pre-trained Models" +msgstr "" + +#: ../../papers/convnext.md:103 +msgid "The pre-trained models on ImageNet-1k or ImageNet-21k are used to fine-tune on the downstream tasks." +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "Training Data" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-tiny_3rdparty_32xb128-" +"noema_in1k_20220222-2908964a.pth)" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-small_3rdparty_32xb128-" +"noema_in1k_20220222-fa001ca5.pth)" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-base_3rdparty_32xb128-" +"noema_in1k_20220222-dba4f95f.pth)" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-" +"base_3rdparty_in21k_20220124-13b83eec.pth)" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-" +"large_3rdparty_in21k_20220124-41b5a79f.pth)" +msgstr "" + +#: ../../papers/convnext.md:68 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/convnext/convnext-xlarge_3rdparty_in21k_20220124-" +"f909bad7.pth)" +msgstr "" + +#: ../../papers/convnext.md:114 +msgid "*Models with * are converted from the [official repo](https://github.com/facebookresearch/ConvNeXt).*" +msgstr "" + +#: ../../papers/cspnet.md:4 +msgid "CSPNet" +msgstr "" + +#: ../../papers/cspnet.md:6 +msgid "[CSPNet: A New Backbone that can Enhance Learning Capability of CNN](https://arxiv.org/abs/1911.11929)" +msgstr "" + +#: ../../papers/cspnet.md:14 +msgid "" +"Neural networks have enabled state-of-the-art approaches to achieve incredible results on computer vision " +"tasks such as object detection. However, such success greatly relies on costly computation resources, which " +"hinders people with cheap devices from appreciating the advanced technology. In this paper, we propose " +"Cross Stage Partial Network (CSPNet) to mitigate the problem that previous works require heavy inference " +"computations from the network architecture perspective. We attribute the problem to the duplicate gradient " +"information within network optimization. The proposed networks respect the variability of the gradients by " +"integrating feature maps from the beginning and the end of a network stage, which, in our experiments, " +"reduces computations by 20% with equivalent or even superior accuracy on the ImageNet dataset, and " +"significantly outperforms state-of-the-art approaches in terms of AP50 on the MS COCO object detection " +"dataset. The CSPNet is easy to implement and general enough to cope with architectures based on ResNet, " +"ResNeXt, and DenseNet. Source code is at this https URL." +msgstr "" + +#: ../../papers/cspnet.md +msgid "CSPDarkNet50\\*" +msgstr "" + +#: ../../papers/cspnet.md +msgid "27.64" +msgstr "" + +#: ../../papers/cspnet.md +msgid "5.04" +msgstr "" + +#: ../../papers/cspnet.md +msgid "80.05" +msgstr "" + +#: ../../papers/cspnet.md ../../papers/efficientnet.md +msgid "95.07" +msgstr "" + +#: ../../papers/cspnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/cspnet/cspdarknet50_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/cspnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/cspnet/cspdarknet50_3rdparty_8xb32_in1k_20220329-" +"bd275287.pth)" +msgstr "" + +#: ../../papers/cspnet.md +msgid "CSPResNet50\\*" +msgstr "" + +#: ../../papers/cspnet.md +msgid "21.62" +msgstr "" + +#: ../../papers/cspnet.md +msgid "3.48" +msgstr "" + +#: ../../papers/cspnet.md ../../papers/resnet.md:76 +msgid "79.55" +msgstr "" + +#: ../../papers/cspnet.md ../../papers/repvgg.md +msgid "94.68" +msgstr "" + +#: ../../papers/cspnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/cspnet/cspresnet50_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/cspnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/cspnet/cspresnet50_3rdparty_8xb32_in1k_20220329-" +"dd6dddfb.pth)" +msgstr "" + +#: ../../papers/cspnet.md +msgid "CSPResNeXt50\\*" +msgstr "" + +#: ../../papers/cspnet.md +msgid "20.57" +msgstr "" + +#: ../../papers/cspnet.md +msgid "3.11" +msgstr "" + +#: ../../papers/cspnet.md +msgid "79.96" +msgstr "" + +#: ../../papers/cspnet.md ../../papers/efficientnet.md +msgid "94.96" +msgstr "" + +#: ../../papers/cspnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/cspnet/cspresnext50_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/cspnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/cspnet/" +"cspresnext50_3rdparty_8xb32_in1k_20220329-2cc84d21.pth)" +msgstr "" + +#: ../../papers/cspnet.md:32 +msgid "" +"*Models with * are converted from the [timm repo](https://github.com/rwightman/pytorch-image-models). The " +"config files of these models are only for inference. We don't ensure these config files' training accuracy " +"and welcome you to contribute your reproduction results.*" +msgstr "" + +#: ../../papers/csra.md:4 +msgid "CSRA" +msgstr "" + +#: ../../papers/csra.md:6 +msgid "" +"[Residual Attention: A Simple but Effective Method for Multi-Label Recognition](https://arxiv.org/" +"abs/2108.02456)" +msgstr "" + +#: ../../papers/csra.md:12 +msgid "" +"Multi-label image recognition is a challenging computer vision task of practical use. Progresses in this " +"area, however, are often characterized by complicated methods, heavy computations, and lack of intuitive " +"explanations. To effectively capture different spatial regions occupied by objects from different " +"categories, we propose an embarrassingly simple module, named class-specific residual attention (CSRA). " +"CSRA generates class-specific features for every category by proposing a simple spatial attention score, " +"and then combines it with the class-agnostic average pooling feature. CSRA achieves state-of-the-art " +"results on multilabel recognition, and at the same time is much simpler than them. Furthermore, with only 4 " +"lines of code, CSRA also leads to consistent improvement across many diverse pretrained models and datasets " +"without any extra training. CSRA is both easy to implement and light in computations, which also enjoys " +"intuitive explanations and visualizations." +msgstr "" + +#: ../../papers/csra.md:20 +msgid "VOC2007" +msgstr "" + +#: ../../papers/csra.md +msgid "mAP" +msgstr "" + +#: ../../papers/csra.md +msgid "OF1 (%)" +msgstr "" + +#: ../../papers/csra.md +msgid "CF1 (%)" +msgstr "" + +#: ../../papers/csra.md +msgid "Resnet101-CSRA" +msgstr "" + +#: ../../papers/csra.md +msgid "" +"[ImageNet-1k](https://download.openmmlab.com/mmclassification/v0/resnet/" +"resnet101_8xb32_in1k_20210831-539c63f8.pth)" +msgstr "" + +#: ../../papers/csra.md +msgid "23.55" +msgstr "" + +#: ../../papers/csra.md ../../papers/resnet.md:76 +msgid "4.12" +msgstr "" + +#: ../../papers/csra.md +msgid "94.98" +msgstr "" + +#: ../../papers/csra.md ../../papers/vgg.md +msgid "90.80" +msgstr "" + +#: ../../papers/csra.md +msgid "89.16" +msgstr "" + +#: ../../papers/csra.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/master/configs/csra/resnet101-" +"csra_1xb16_voc07-448px.py)" +msgstr "" + +#: ../../papers/csra.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/csra/resnet101-" +"csra_1xb16_voc07-448px_20220722-29efb40a.pth) | [log](https://download.openmmlab.com/mmclassification/v0/" +"csra/resnet101-csra_1xb16_voc07-448px_20220722-29efb40a.log.json)" +msgstr "" + +#: ../../papers/davit.md:4 +msgid "DaViT" +msgstr "" + +#: ../../papers/davit.md:6 +msgid "[DaViT: Dual Attention Vision Transformers](https://arxiv.org/abs/2204.03645v1)" +msgstr "" + +#: ../../papers/davit.md:12 +msgid "" +"In this work, we introduce Dual Attention Vision Transformers (DaViT), a simple yet effective vision " +"transformer architecture that is able to capture global context while maintaining computational efficiency. " +"We propose approaching the problem from an orthogonal angle: exploiting self-attention mechanisms with both " +"\"spatial tokens\" and \"channel tokens\". With spatial tokens, the spatial dimension defines the token " +"scope, and the channel dimension defines the token feature dimension. With channel tokens, we have the " +"inverse: the channel dimension defines the token scope, and the spatial dimension defines the token feature " +"dimension. We further group tokens along the sequence direction for both spatial and channel tokens to " +"maintain the linear complexity of the entire model. We show that these two self-attentions complement each " +"other: (i) since each channel token contains an abstract representation of the entire image, the channel " +"attention naturally captures global interactions and representations by taking all spatial positions into " +"account when computing attention scores between channels; (ii) the spatial attention refines the local " +"representations by performing fine-grained interactions across spatial locations, which in turn helps the " +"global information modeling in channel attention. Extensive experiments show our DaViT achieves state-of-" +"the-art performance on four different tasks with efficient computations. Without extra data, DaViT-Tiny, " +"DaViT-Small, and DaViT-Base achieve 82.8%, 84.2%, and 84.6% top-1 accuracy on ImageNet-1K with 28.3M, " +"49.7M, and 87.9M parameters, respectively. When we further scale up DaViT with 1.5B weakly supervised image " +"and text pairs, DaViT-Gaint reaches 90.4% top-1 accuracy on ImageNet-1K." +msgstr "" + +#: ../../papers/davit.md ../../papers/deit3.md ../../papers/hornet.md ../../papers/res2net.md +#: ../../papers/resnet.md:76 ../../papers/swin_transformer.md:66 ../../papers/swin_transformer_v2.md:76 +#: ../../papers/van.md ../../papers/vision_transformer.md:71 +msgid "resolution" +msgstr "分辨率" + +#: ../../papers/davit.md +msgid "DaViT-T\\*" +msgstr "" + +#: ../../papers/davit.md ../../papers/deit3.md ../../papers/hornet.md ../../papers/replknet.md +#: ../../papers/res2net.md ../../papers/resnet.md:76 ../../papers/swin_transformer.md:66 ../../papers/van.md +#: ../../papers/vision_transformer.md:71 +msgid "224x224" +msgstr "" + +#: ../../papers/davit.md +msgid "28.36" +msgstr "" + +#: ../../papers/davit.md +msgid "4.54" +msgstr "" + +#: ../../papers/davit.md +msgid "82.24" +msgstr "" + +#: ../../papers/davit.md +msgid "96.13" +msgstr "" + +#: ../../papers/davit.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/davit/davit-tiny_4xb256_in1k.py)" +msgstr "" + +#: ../../papers/davit.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/davit/davit-tiny_3rdparty_in1k_20221116-700fdf7d." +"pth)" +msgstr "" + +#: ../../papers/davit.md +msgid "DaViT-S\\*" +msgstr "" + +#: ../../papers/davit.md +msgid "49.74" +msgstr "" + +#: ../../papers/davit.md +msgid "8.79" +msgstr "" + +#: ../../papers/davit.md +msgid "83.61" +msgstr "" + +#: ../../papers/davit.md ../../papers/hornet.md +msgid "96.75" +msgstr "" + +#: ../../papers/davit.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/davit/davit-small_4xb256_in1k.py)" +msgstr "" + +#: ../../papers/davit.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/davit/davit-" +"small_3rdparty_in1k_20221116-51a849a6.pth)" +msgstr "" + +#: ../../papers/davit.md +msgid "DaViT-B\\*" +msgstr "" + +#: ../../papers/davit.md +msgid "87.95" +msgstr "" + +#: ../../papers/davit.md ../../papers/vgg.md +msgid "15.5" +msgstr "" + +#: ../../papers/davit.md +msgid "84.09" +msgstr "" + +#: ../../papers/davit.md ../../papers/efficientnet.md +msgid "96.82" +msgstr "" + +#: ../../papers/davit.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/davit/davit-base_4xb256_in1k.py)" +msgstr "" + +#: ../../papers/davit.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/davit/davit-base_3rdparty_in1k_20221116-19e0d956." +"pth)" +msgstr "" + +#: ../../papers/davit.md:28 +msgid "" +"*Models with * are converted from the [official repo](https://github.com/dingmyu/davit). The config files " +"of these models are only for validation. We don't ensure these config files' training accuracy and welcome " +"you to contribute your reproduction results.*" +msgstr "" + +#: ../../papers/davit.md:30 +msgid "" +"Note: Inference accuracy is a bit lower than paper result because of inference code for classification " +"doesn't exist." +msgstr "" + +#: ../../papers/deit.md:4 +msgid "DeiT" +msgstr "" + +#: ../../papers/deit.md:6 +msgid "" +"[Training data-efficient image transformers & distillation through attention](https://arxiv.org/" +"abs/2012.12877)" +msgstr "" + +#: ../../papers/deit.md:12 +msgid "" +"Recently, neural networks purely based on attention were shown to address image understanding tasks such as " +"image classification. However, these visual transformers are pre-trained with hundreds of millions of " +"images using an expensive infrastructure, thereby limiting their adoption. In this work, we produce a " +"competitive convolution-free transformer by training on Imagenet only. We train them on a single computer " +"in less than 3 days. Our reference vision transformer (86M parameters) achieves top-1 accuracy of 83.1% " +"(single-crop evaluation) on ImageNet with no external data. More importantly, we introduce a teacher-" +"student strategy specific to transformers. It relies on a distillation token ensuring that the student " +"learns from the teacher through attention. We show the interest of this token-based distillation, " +"especially when using a convnet as a teacher. This leads us to report results competitive with convnets for " +"both Imagenet (where we obtain up to 85.2% accuracy) and when transferring to other tasks. We share our " +"code and models." +msgstr "" + +#: ../../papers/deit.md:22 +msgid "The teacher of the distilled version DeiT is RegNetY-16GF." +msgstr "" + +#: ../../papers/deit.md +msgid "DeiT-tiny" +msgstr "" + +#: ../../papers/deit.md +msgid "5.72" +msgstr "" + +#: ../../papers/deit.md +msgid "1.08" +msgstr "" + +#: ../../papers/deit.md +msgid "74.50" +msgstr "" + +#: ../../papers/deit.md +msgid "92.24" +msgstr "" + +#: ../../papers/deit.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/deit/deit-tiny_pt-4xb256_in1k.py)" +msgstr "" + +#: ../../papers/deit.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/deit/deit-tiny_pt-4xb256_in1k_20220218-13b382a0." +"pth) | [log](https://download.openmmlab.com/mmclassification/v0/deit/deit-" +"tiny_pt-4xb256_in1k_20220218-13b382a0.log.json)" +msgstr "" + +#: ../../papers/deit.md +msgid "DeiT-tiny distilled\\*" +msgstr "" + +#: ../../papers/deit.md +msgid "74.51" +msgstr "" + +#: ../../papers/deit.md +msgid "91.90" +msgstr "" + +#: ../../papers/deit.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/deit/deit-tiny-" +"distilled_pt-4xb256_in1k.py)" +msgstr "" + +#: ../../papers/deit.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/deit/deit-tiny-" +"distilled_3rdparty_pt-4xb256_in1k_20211216-c429839a.pth)" +msgstr "" + +#: ../../papers/deit.md +msgid "DeiT-small" +msgstr "" + +#: ../../papers/deit.md +msgid "22.05" +msgstr "" + +#: ../../papers/deit.md +msgid "4.24" +msgstr "" + +#: ../../papers/deit.md +msgid "80.69" +msgstr "" + +#: ../../papers/deit.md +msgid "95.06" +msgstr "" + +#: ../../papers/deit.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/deit/deit-small_pt-4xb256_in1k.py)" +msgstr "" + +#: ../../papers/deit.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/deit/deit-small_pt-4xb256_in1k_20220218-9425b9bb." +"pth) | [log](https://download.openmmlab.com/mmclassification/v0/deit/deit-" +"small_pt-4xb256_in1k_20220218-9425b9bb.log.json)" +msgstr "" + +#: ../../papers/deit.md +msgid "DeiT-small distilled\\*" +msgstr "" + +#: ../../papers/deit.md +msgid "81.17" +msgstr "" + +#: ../../papers/deit.md +msgid "95.40" +msgstr "" + +#: ../../papers/deit.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/deit/deit-small-" +"distilled_pt-4xb256_in1k.py)" +msgstr "" + +#: ../../papers/deit.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/deit/deit-small-" +"distilled_3rdparty_pt-4xb256_in1k_20211216-4de1d725.pth)" +msgstr "" + +#: ../../papers/deit.md +msgid "DeiT-base" +msgstr "" + +#: ../../papers/deit.md +msgid "86.57" +msgstr "" + +#: ../../papers/deit.md +msgid "16.86" +msgstr "" + +#: ../../papers/deit.md ../../papers/swin_transformer_v2.md:76 +msgid "81.76" +msgstr "" + +#: ../../papers/deit.md +msgid "95.81" +msgstr "" + +#: ../../papers/deit.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/deit/deit-base_pt-16xb64_in1k.py)" +msgstr "" + +#: ../../papers/deit.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/deit/deit-base_pt-16xb64_in1k_20220216-db63c16c." +"pth) | [log](https://download.openmmlab.com/mmclassification/v0/deit/deit-base_pt-16xb64_in1k_20220216-" +"db63c16c.log.json)" +msgstr "" + +#: ../../papers/deit.md +msgid "DeiT-base\\*" +msgstr "" + +#: ../../papers/deit.md +msgid "81.79" +msgstr "" + +#: ../../papers/deit.md +msgid "95.59" +msgstr "" + +#: ../../papers/deit.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/deit/deit-" +"base_3rdparty_pt-16xb64_in1k_20211124-6f40c188.pth)" +msgstr "" + +#: ../../papers/deit.md +msgid "DeiT-base distilled\\*" +msgstr "" + +#: ../../papers/deit.md +msgid "83.33" +msgstr "" + +#: ../../papers/deit.md +msgid "96.49" +msgstr "" + +#: ../../papers/deit.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/deit/deit-base-" +"distilled_pt-16xb64_in1k.py)" +msgstr "" + +#: ../../papers/deit.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/deit/deit-base-" +"distilled_3rdparty_pt-16xb64_in1k_20211216-42891296.pth)" +msgstr "" + +#: ../../papers/deit.md +msgid "DeiT-base 384px\\*" +msgstr "" + +#: ../../papers/deit.md ../../papers/vision_transformer.md:71 +msgid "86.86" +msgstr "" + +#: ../../papers/deit.md +msgid "49.37" +msgstr "" + +#: ../../papers/deit.md +msgid "83.04" +msgstr "" + +#: ../../papers/deit.md +msgid "96.31" +msgstr "" + +#: ../../papers/deit.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/deit/deit-" +"base_ft-16xb32_in1k-384px.py)" +msgstr "" + +#: ../../papers/deit.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/deit/deit-" +"base_3rdparty_ft-16xb32_in1k-384px_20211124-822d02f2.pth)" +msgstr "" + +#: ../../papers/deit.md +msgid "DeiT-base distilled 384px\\*" +msgstr "" + +#: ../../papers/deit.md +msgid "85.55" +msgstr "" + +#: ../../papers/deit.md +msgid "97.35" +msgstr "" + +#: ../../papers/deit.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/deit/deit-base-" +"distilled_ft-16xb32_in1k-384px.py)" +msgstr "" + +#: ../../papers/deit.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/deit/deit-base-" +"distilled_3rdparty_ft-16xb32_in1k-384px_20211216-e48d6000.pth)" +msgstr "" + +#: ../../papers/deit.md:36 ../../papers/deit3.md:41 +msgid "" +"*Models with * are converted from the [official repo](https://github.com/facebookresearch/deit). The config " +"files of these models are only for validation. We don't ensure these config files' training accuracy and " +"welcome you to contribute your reproduction results.*" +msgstr "" + +#: ../../papers/deit.md:39 +msgid "" +"MMClassification doesn't support training the distilled version DeiT. And we provide distilled version " +"checkpoints for inference only." +msgstr "" + +#: ../../papers/deit3.md:4 +msgid "DeiT III: Revenge of the ViT" +msgstr "" + +#: ../../papers/deit3.md:6 +msgid "[DeiT III: Revenge of the ViT](https://arxiv.org/pdf/2204.07118.pdf)" +msgstr "" + +#: ../../papers/deit3.md:12 +msgid "" +"A Vision Transformer (ViT) is a simple neural architecture amenable to serve several computer vision tasks. " +"It has limited built-in architectural priors, in contrast to more recent architectures that incorporate " +"priors either about the input data or of specific tasks. Recent works show that ViTs benefit from self-" +"supervised pre-training, in particular BerT-like pre-training like BeiT. In this paper, we revisit the " +"supervised training of ViTs. Our procedure builds upon and simplifies a recipe introduced for training " +"ResNet-50. It includes a new simple data-augmentation procedure with only 3 augmentations, closer to the " +"practice in self-supervised learning. Our evaluations on Image classification (ImageNet-1k with and without " +"pre-training on ImageNet-21k), transfer learning and semantic segmentation show that our procedure " +"outperforms by a large margin previous fully supervised training recipes for ViT. It also reveals that the " +"performance of our ViT trained with supervision is comparable to that of more recent architectures. Our " +"results could serve as better baselines for recent self-supervised approaches demonstrated on ViT." +msgstr "" + +#: ../../papers/deit3.md +msgid "DeiT3-S\\*" +msgstr "" + +#: ../../papers/deit3.md +msgid "22.06" +msgstr "" + +#: ../../papers/deit3.md +msgid "4.61" +msgstr "" + +#: ../../papers/deit3.md +msgid "81.35" +msgstr "" + +#: ../../papers/deit3.md +msgid "95.31" +msgstr "" + +#: ../../papers/deit3.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/deit3/deit3-small-p16_64xb64_in1k." +"py)" +msgstr "" + +#: ../../papers/deit3.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-small-" +"p16_3rdparty_in1k_20221008-0f7c70cf.pth)" +msgstr "" + +#: ../../papers/deit3.md ../../papers/hornet.md ../../papers/replknet.md ../../papers/swin_transformer.md:66 +#: ../../papers/swin_transformer_v2.md:76 ../../papers/vision_transformer.md:71 +msgid "384x384" +msgstr "" + +#: ../../papers/deit3.md +msgid "22.21" +msgstr "" + +#: ../../papers/deit3.md +msgid "15.52" +msgstr "" + +#: ../../papers/deit3.md +msgid "83.43" +msgstr "" + +#: ../../papers/deit3.md +msgid "96.68" +msgstr "" + +#: ../../papers/deit3.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/deit3/deit3-small-" +"p16_64xb64_in1k-384px.py)" +msgstr "" + +#: ../../papers/deit3.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-small-" +"p16_3rdparty_in1k-384px_20221008-a2c1a0c7.pth)" +msgstr "" + +#: ../../papers/deit3.md +msgid "83.06" +msgstr "" + +#: ../../papers/deit3.md ../../papers/hornet.md +msgid "96.77" +msgstr "" + +#: ../../papers/deit3.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-small-p16_in21k-" +"pre_3rdparty_in1k_20221009-dcd90827.pth)" +msgstr "" + +#: ../../papers/deit3.md ../../papers/replknet.md +msgid "84.84" +msgstr "" + +#: ../../papers/deit3.md +msgid "97.48" +msgstr "" + +#: ../../papers/deit3.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-small-p16_in21k-" +"pre_3rdparty_in1k-384px_20221009-de116dd7.pth)" +msgstr "" + +#: ../../papers/deit3.md +msgid "DeiT3-M\\*" +msgstr "" + +#: ../../papers/deit3.md +msgid "8.00" +msgstr "" + +#: ../../papers/deit3.md +msgid "82.99" +msgstr "" + +#: ../../papers/deit3.md +msgid "96.22" +msgstr "" + +#: ../../papers/deit3.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/deit3/deit3-medium-p16_64xb64_in1k." +"py)" +msgstr "" + +#: ../../papers/deit3.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-medium-" +"p16_3rdparty_in1k_20221008-3b21284d.pth)" +msgstr "" + +#: ../../papers/deit3.md +msgid "84.56" +msgstr "" + +#: ../../papers/deit3.md +msgid "97.19" +msgstr "" + +#: ../../papers/deit3.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-medium-p16_in21k-" +"pre_3rdparty_in1k_20221009-472f11e2.pth)" +msgstr "" + +#: ../../papers/deit3.md +msgid "DeiT3-B\\*" +msgstr "" + +#: ../../papers/deit3.md +msgid "86.59" +msgstr "" + +#: ../../papers/deit3.md +msgid "17.58" +msgstr "" + +#: ../../papers/deit3.md +msgid "83.80" +msgstr "" + +#: ../../papers/deit3.md +msgid "96.55" +msgstr "" + +#: ../../papers/deit3.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/deit3/deit3-base-p16_64xb64_in1k." +"py)" +msgstr "" + +#: ../../papers/deit3.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-base-" +"p16_3rdparty_in1k_20221008-60b8c8bf.pth)" +msgstr "" + +#: ../../papers/deit3.md ../../papers/swin_transformer.md:66 +msgid "86.88" +msgstr "" + +#: ../../papers/deit3.md +msgid "55.54" +msgstr "" + +#: ../../papers/deit3.md +msgid "85.08" +msgstr "" + +#: ../../papers/deit3.md +msgid "97.25" +msgstr "" + +#: ../../papers/deit3.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/deit3/deit3-base-" +"p16_64xb32_in1k-384px.py)" +msgstr "" + +#: ../../papers/deit3.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-base-" +"p16_3rdparty_in1k-384px_20221009-e19e36d4.pth)" +msgstr "" + +#: ../../papers/deit3.md +msgid "85.70" +msgstr "" + +#: ../../papers/deit3.md ../../papers/efficientnet.md ../../papers/replknet.md +msgid "97.75" +msgstr "" + +#: ../../papers/deit3.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-base-p16_in21k-" +"pre_3rdparty_in1k_20221009-87983ca1.pth)" +msgstr "" + +#: ../../papers/deit3.md +msgid "86.73" +msgstr "" + +#: ../../papers/deit3.md +msgid "98.11" +msgstr "" + +#: ../../papers/deit3.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-base-p16_in21k-" +"pre_3rdparty_in1k-384px_20221009-5e4e37b9.pth)" +msgstr "" + +#: ../../papers/deit3.md +msgid "DeiT3-L\\*" +msgstr "" + +#: ../../papers/deit3.md +msgid "304.37" +msgstr "" + +#: ../../papers/deit3.md +msgid "61.60" +msgstr "" + +#: ../../papers/deit3.md +msgid "84.87" +msgstr "" + +#: ../../papers/deit3.md +msgid "97.01" +msgstr "" + +#: ../../papers/deit3.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/deit3/deit3-large-p16_64xb64_in1k." +"py)" +msgstr "" + +#: ../../papers/deit3.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-large-" +"p16_3rdparty_in1k_20221009-03b427ea.pth)" +msgstr "" + +#: ../../papers/deit3.md +msgid "304.76" +msgstr "" + +#: ../../papers/deit3.md +msgid "191.21" +msgstr "" + +#: ../../papers/deit3.md +msgid "85.82" +msgstr "" + +#: ../../papers/deit3.md +msgid "97.60" +msgstr "" + +#: ../../papers/deit3.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/deit3/deit3-large-" +"p16_64xb16_in1k-384px.py)" +msgstr "" + +#: ../../papers/deit3.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-large-" +"p16_3rdparty_in1k-384px_20221009-4317ce62.pth)" +msgstr "" + +#: ../../papers/deit3.md +msgid "98.24" +msgstr "" + +#: ../../papers/deit3.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-large-p16_in21k-" +"pre_3rdparty_in1k_20221009-d8d27084.pth)" +msgstr "" + +#: ../../papers/deit3.md +msgid "87.73" +msgstr "" + +#: ../../papers/deit3.md +msgid "98.51" +msgstr "" + +#: ../../papers/deit3.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-large-p16_in21k-" +"pre_3rdparty_in1k-384px_20221009-75fea03f.pth)" +msgstr "" + +#: ../../papers/deit3.md +msgid "DeiT3-H\\*" +msgstr "" + +#: ../../papers/deit3.md +msgid "632.13" +msgstr "" + +#: ../../papers/deit3.md +msgid "167.40" +msgstr "" + +#: ../../papers/deit3.md +msgid "85.21" +msgstr "" + +#: ../../papers/deit3.md +msgid "97.36" +msgstr "" + +#: ../../papers/deit3.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/deit3/deit3-huge-p14_64xb32_in1k." +"py)" +msgstr "" + +#: ../../papers/deit3.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-huge-p14_3rdparty_in1k_20221009-" +"e107bcb7.pth)" +msgstr "" + +#: ../../papers/deit3.md +msgid "87.19" +msgstr "" + +#: ../../papers/deit3.md +msgid "98.26" +msgstr "" + +#: ../../papers/deit3.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/deit3/deit3-huge-p14_in21k-" +"pre_3rdparty_in1k_20221009-19b8a535.pth)" +msgstr "" + +#: ../../papers/densenet.md:4 +msgid "DenseNet" +msgstr "" + +#: ../../papers/densenet.md:6 +msgid "[Densely Connected Convolutional Networks](https://arxiv.org/abs/1608.06993)" +msgstr "" + +#: ../../papers/densenet.md:12 +msgid "" +"Recent work has shown that convolutional networks can be substantially deeper, more accurate, and efficient " +"to train if they contain shorter connections between layers close to the input and those close to the " +"output. In this paper, we embrace this observation and introduce the Dense Convolutional Network " +"(DenseNet), which connects each layer to every layer in a feed-forward fashion. Whereas traditional " +"convolutional networks with L layers have L connections - one between each layer and its subsequent layer - " +"our network has L(L+1)/2 direct connections. For each layer, the feature-maps of all preceding layers are " +"used as inputs, and its own feature-maps are used as inputs into all subsequent layers. DenseNets have " +"several compelling advantages: they alleviate the vanishing-gradient problem, strengthen feature " +"propagation, encourage feature reuse, and substantially reduce the number of parameters. We evaluate our " +"proposed architecture on four highly competitive object recognition benchmark tasks (CIFAR-10, CIFAR-100, " +"SVHN, and ImageNet). DenseNets obtain significant improvements over the state-of-the-art on most of them, " +"whilst requiring less computation to achieve high performance." +msgstr "" + +#: ../../papers/densenet.md +msgid "DenseNet121\\*" +msgstr "" + +#: ../../papers/densenet.md +msgid "7.98" +msgstr "" + +#: ../../papers/densenet.md +msgid "2.88" +msgstr "" + +#: ../../papers/densenet.md +msgid "74.96" +msgstr "" + +#: ../../papers/densenet.md +msgid "92.21" +msgstr "" + +#: ../../papers/densenet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/densenet/densenet121_4xb256_in1k." +"py)" +msgstr "" + +#: ../../papers/densenet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/densenet/" +"densenet121_4xb256_in1k_20220426-07450f99.pth)" +msgstr "" + +#: ../../papers/densenet.md +msgid "DenseNet169\\*" +msgstr "" + +#: ../../papers/densenet.md +msgid "14.15" +msgstr "" + +#: ../../papers/densenet.md +msgid "3.42" +msgstr "" + +#: ../../papers/densenet.md +msgid "76.08" +msgstr "" + +#: ../../papers/densenet.md +msgid "93.11" +msgstr "" + +#: ../../papers/densenet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/densenet/densenet169_4xb256_in1k." +"py)" +msgstr "" + +#: ../../papers/densenet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/densenet/densenet169_4xb256_in1k_20220426-" +"a2889902.pth)" +msgstr "" + +#: ../../papers/densenet.md +msgid "DenseNet201\\*" +msgstr "" + +#: ../../papers/densenet.md +msgid "20.01" +msgstr "" + +#: ../../papers/densenet.md +msgid "4.37" +msgstr "" + +#: ../../papers/densenet.md +msgid "77.32" +msgstr "" + +#: ../../papers/densenet.md +msgid "93.64" +msgstr "" + +#: ../../papers/densenet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/densenet/densenet201_4xb256_in1k." +"py)" +msgstr "" + +#: ../../papers/densenet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/densenet/" +"densenet201_4xb256_in1k_20220426-05cae4ef.pth)" +msgstr "" + +#: ../../papers/densenet.md +msgid "DenseNet161\\*" +msgstr "" + +#: ../../papers/densenet.md +msgid "28.68" +msgstr "" + +#: ../../papers/densenet.md +msgid "7.82" +msgstr "" + +#: ../../papers/densenet.md +msgid "77.61" +msgstr "" + +#: ../../papers/densenet.md ../../papers/mobileone.md:86 +msgid "93.83" +msgstr "" + +#: ../../papers/densenet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/densenet/densenet161_4xb256_in1k." +"py)" +msgstr "" + +#: ../../papers/densenet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/densenet/densenet161_4xb256_in1k_20220426-" +"ee6a80a9.pth)" +msgstr "" + +#: ../../papers/densenet.md:29 +msgid "" +"*Models with * are converted from [pytorch](https://pytorch.org/vision/stable/models.html), guided by " +"[original repo](https://github.com/liuzhuang13/DenseNet). The config files of these models are only for " +"inference. We don't ensure these config files' training accuracy and welcome you to contribute your " +"reproduction results.*" +msgstr "" + +#: ../../papers/edgenext.md:4 +msgid "EdgeNeXt" +msgstr "" + +#: ../../papers/edgenext.md:6 +msgid "" +"[EdgeNeXt: Efficiently Amalgamated CNN-Transformer Architecture for Mobile Vision Applications](https://" +"arxiv.org/abs/2206.10589)" +msgstr "" + +#: ../../papers/edgenext.md:14 +#, python-format +msgid "" +"In the pursuit of achieving ever-increasing accuracy, large and complex neural networks are usually " +"developed. Such models demand high computational resources and therefore cannot be deployed on edge " +"devices. It is of great interest to build resource-efficient general purpose networks due to their " +"usefulness in several application areas. In this work, we strive to effectively combine the strengths of " +"both CNN and Transformer models and propose a new efficient hybrid architecture EdgeNeXt. Specifically in " +"EdgeNeXt, we introduce split depth-wise transpose attention (SDTA) encoder that splits input tensors into " +"multiple channel groups and utilizes depth-wise convolution along with self-attention across channel " +"dimensions to implicitly increase the receptive field and encode multi-scale features. Our extensive " +"experiments on classification, detection and segmentation tasks, reveal the merits of the proposed " +"approach, outperforming state-of-the-art methods with comparatively lower compute requirements. Our " +"EdgeNeXt model with 1.3M parameters achieves 71.2% top-1 accuracy on ImageNet-1K, outperforming MobileViT " +"with an absolute gain of 2.2% with 28% reduction in FLOPs. Further, our EdgeNeXt model with 5.6M parameters " +"achieves 79.4% top-1 accuracy on ImageNet-1K." +msgstr "" + +#: ../../papers/edgenext.md +msgid "EdgeNeXt-Base-usi\\*" +msgstr "" + +#: ../../papers/edgenext.md +msgid "18.51" +msgstr "" + +#: ../../papers/edgenext.md +msgid "3.84" +msgstr "" + +#: ../../papers/edgenext.md +msgid "83.67" +msgstr "" + +#: ../../papers/edgenext.md +msgid "96.7" +msgstr "" + +#: ../../papers/edgenext.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/edgenext/edgenext-base_8xb256-" +"usi_in1k.py)" +msgstr "" + +#: ../../papers/edgenext.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/edgenext/edgenext-base_3rdparty-" +"usi_in1k_20220801-909e8939.pth)" +msgstr "" + +#: ../../papers/edgenext.md +msgid "EdgeNeXt-Base\\*" +msgstr "" + +#: ../../papers/edgenext.md +msgid "82.48" +msgstr "" + +#: ../../papers/edgenext.md +msgid "96.2" +msgstr "" + +#: ../../papers/edgenext.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/edgenext/edgenext-base_8xb256_in1k." +"py)" +msgstr "" + +#: ../../papers/edgenext.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/edgenext/edgenext-" +"base_3rdparty_in1k_20220801-9ade408b.pth)" +msgstr "" + +#: ../../papers/edgenext.md +msgid "EdgeNeXt-Small-usi\\*" +msgstr "" + +#: ../../papers/edgenext.md +msgid "5.59" +msgstr "" + +#: ../../papers/edgenext.md +msgid "1.26" +msgstr "" + +#: ../../papers/edgenext.md ../../papers/hrnet.md +msgid "81.06" +msgstr "" + +#: ../../papers/edgenext.md ../../papers/efficientnet.md ../../papers/resnet.md:76 +msgid "95.34" +msgstr "" + +#: ../../papers/edgenext.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/edgenext/edgenext-small_8xb256-" +"usi_in1k.py)" +msgstr "" + +#: ../../papers/edgenext.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/edgenext/edgenext-small_3rdparty-" +"usi_in1k_20220801-ae6d8dd3.pth)" +msgstr "" + +#: ../../papers/edgenext.md +msgid "EdgeNeXt-Small\\*" +msgstr "" + +#: ../../papers/edgenext.md ../../papers/resnet.md:76 +msgid "79.41" +msgstr "" + +#: ../../papers/edgenext.md +msgid "94.53" +msgstr "" + +#: ../../papers/edgenext.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/edgenext/edgenext-" +"small_8xb256_in1k.py)" +msgstr "" + +#: ../../papers/edgenext.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/edgenext/edgenext-small_3rdparty_in1k_20220801-" +"d00db5f8.pth)" +msgstr "" + +#: ../../papers/edgenext.md +msgid "EdgeNeXt-X-Small\\*" +msgstr "" + +#: ../../papers/edgenext.md +msgid "2.34" +msgstr "" + +#: ../../papers/edgenext.md +msgid "0.538" +msgstr "" + +#: ../../papers/edgenext.md +msgid "74.86" +msgstr "" + +#: ../../papers/edgenext.md +msgid "92.31" +msgstr "" + +#: ../../papers/edgenext.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/edgenext/edgenext-" +"xsmall_8xb256_in1k.py)" +msgstr "" + +#: ../../papers/edgenext.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/edgenext/edgenext-" +"xsmall_3rdparty_in1k_20220801-974f9fe7.pth)" +msgstr "" + +#: ../../papers/edgenext.md +msgid "EdgeNeXt-XX-Small\\*" +msgstr "" + +#: ../../papers/edgenext.md +msgid "1.33" +msgstr "" + +#: ../../papers/edgenext.md +msgid "0.261" +msgstr "" + +#: ../../papers/edgenext.md +msgid "71.2" +msgstr "" + +#: ../../papers/edgenext.md +msgid "89.91" +msgstr "" + +#: ../../papers/edgenext.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/edgenext/edgenext-" +"xxsmall_8xb256_in1k.py)" +msgstr "" + +#: ../../papers/edgenext.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/edgenext/edgenext-" +"xxsmall_3rdparty_in1k_20220801-7ca8a81d.pth)" +msgstr "" + +#: ../../papers/edgenext.md:35 +msgid "" +"*Models with * are converted from the [official repo](https://github.com/mmaaz60/EdgeNeXt). The config " +"files of these models are only for inference. We don't ensure these config files' training accuracy and " +"welcome you to contribute your reproduction results.*" +msgstr "" + +#: ../../papers/efficientformer.md:4 +msgid "EfficientFormer" +msgstr "" + +#: ../../papers/efficientformer.md:6 +msgid "[EfficientFormer: Vision Transformers at MobileNet Speed](https://arxiv.org/abs/2206.01191)" +msgstr "" + +#: ../../papers/efficientformer.md:12 +msgid "" +"Vision Transformers (ViT) have shown rapid progress in computer vision tasks, achieving promising results " +"on various benchmarks. However, due to the massive number of parameters and model design, e.g., attention " +"mechanism, ViT-based models are generally times slower than lightweight convolutional networks. Therefore, " +"the deployment of ViT for real-time applications is particularly challenging, especially on resource-" +"constrained hardware such as mobile devices. Recent efforts try to reduce the computation complexity of ViT " +"through network architecture search or hybrid design with MobileNet block, yet the inference speed is still " +"unsatisfactory. This leads to an important question: can transformers run as fast as MobileNet while " +"obtaining high performance? To answer this, we first revisit the network architecture and operators used in " +"ViT-based models and identify inefficient designs. Then we introduce a dimension-consistent pure " +"transformer (without MobileNet blocks) as a design paradigm. Finally, we perform latency-driven slimming " +"to get a series of final models dubbed EfficientFormer. Extensive experiments show the superiority of " +"EfficientFormer in performance and speed on mobile devices. Our fastest model, EfficientFormer-L1, achieves " +"79.2% top-1 accuracy on ImageNet-1K with only 1.6 ms inference latency on iPhone 12 (compiled with CoreML), " +"which runs as fast as MobileNetV2×1.4 (1.6 ms, 74.7% top-1), and our largest model, EfficientFormer-L7, " +"obtains 83.3% accuracy with only 7.0 ms latency. Our work proves that properly designed transformers can " +"reach extremely low latency on mobile devices while maintaining high performance." +msgstr "" + +#: ../../papers/efficientformer.md +msgid "EfficientFormer-l1\\*" +msgstr "" + +#: ../../papers/efficientformer.md +msgid "12.19" +msgstr "" + +#: ../../papers/efficientformer.md +msgid "1.30" +msgstr "" + +#: ../../papers/efficientformer.md +msgid "80.46" +msgstr "" + +#: ../../papers/efficientformer.md +msgid "94.99" +msgstr "" + +#: ../../papers/efficientformer.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/efficientformer/efficientformer-" +"l1_8xb128_in1k.py)" +msgstr "" + +#: ../../papers/efficientformer.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientformer/efficientformer-" +"l1_3rdparty_in1k_20220915-cc3e1ac6.pth)" +msgstr "" + +#: ../../papers/efficientformer.md +msgid "EfficientFormer-l3\\*" +msgstr "" + +#: ../../papers/efficientformer.md +msgid "31.41" +msgstr "" + +#: ../../papers/efficientformer.md +msgid "3.93" +msgstr "" + +#: ../../papers/efficientformer.md +msgid "82.45" +msgstr "" + +#: ../../papers/efficientformer.md ../../papers/t2t_vit.md +msgid "96.18" +msgstr "" + +#: ../../papers/efficientformer.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/efficientformer/efficientformer-" +"l3_8xb128_in1k.py)" +msgstr "" + +#: ../../papers/efficientformer.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientformer/efficientformer-" +"l3_3rdparty_in1k_20220915-466793d6.pth)" +msgstr "" + +#: ../../papers/efficientformer.md +msgid "EfficientFormer-l7\\*" +msgstr "" + +#: ../../papers/efficientformer.md +msgid "82.23" +msgstr "" + +#: ../../papers/efficientformer.md +msgid "10.16" +msgstr "" + +#: ../../papers/efficientformer.md +msgid "83.40" +msgstr "" + +#: ../../papers/efficientformer.md +msgid "96.60" +msgstr "" + +#: ../../papers/efficientformer.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/efficientformer/efficientformer-" +"l7_8xb128_in1k.py)" +msgstr "" + +#: ../../papers/efficientformer.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientformer/efficientformer-" +"l7_3rdparty_in1k_20220915-185e30af.pth)" +msgstr "" + +#: ../../papers/efficientformer.md:28 +msgid "" +"*Models with * are converted from the [official repo](https://github.com/snap-research/EfficientFormer). " +"The config files of these models are only for inference. We don't ensure these config files' training " +"accuracy and welcome you to contribute your reproduction results.*" +msgstr "" + +#: ../../papers/efficientnet.md:4 +msgid "EfficientNet" +msgstr "" + +#: ../../papers/efficientnet.md:6 +msgid "[Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946v5)" +msgstr "" + +#: ../../papers/efficientnet.md:12 +msgid "" +"EfficientNets are a family of image classification models, which achieve state-of-the-art accuracy, yet " +"being an order-of-magnitude smaller and faster than previous models." +msgstr "" + +#: ../../papers/efficientnet.md:14 +msgid "" +"EfficientNets are based on AutoML and Compound Scaling. In particular, we first use [AutoML MNAS Mobile " +"framework](https://ai.googleblog.com/2018/08/mnasnet-towards-automating-design-of.html) to develop a mobile-" +"size baseline network, named as EfficientNet-B0; Then, we use the compound scaling method to scale up this " +"baseline to obtain EfficientNet-B1 to B7." +msgstr "" + +#: ../../papers/efficientnet.md:35 +msgid "" +"In the result table, AA means trained with AutoAugment pre-processing, more details can be found in the " +"[paper](https://arxiv.org/abs/1805.09501); AdvProp is a method to train with adversarial examples, more " +"details can be found in the [paper](https://arxiv.org/abs/1911.09665); RA means trained with RandAugment " +"pre-processing, more details can be found in the [paper](https://arxiv.org/abs/1909.13719); NoisyStudent " +"means trained with extra JFT-300M unlabeled data, more details can be found in the [paper](https://arxiv." +"org/abs/1911.04252); L2-475 means the same L2 architecture with input image size 475." +msgstr "" + +#: ../../papers/efficientnet.md:37 +msgid "Note: In MMClassification, we support training with AutoAugment, don't support AdvProp by now." +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-B0\\*" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "5.29" +msgstr "" + +#: ../../papers/efficientnet.md ../../papers/mobilevit.md:71 +msgid "0.42" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "76.74" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "93.17" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/efficientnet/efficientnet-" +"b0_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-" +"b0_3rdparty_8xb32_in1k_20220119-a7e2a0b1.pth)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-B0 (AA)\\*" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "77.26" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "93.41" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty_8xb32-" +"aa_in1k_20220119-8d939117.pth)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-B0 (AA + AdvProp)\\*" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "77.53" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "93.61" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/efficientnet/efficientnet-" +"b0_8xb32-01norm_in1k.py)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty_8xb32-aa-" +"advprop_in1k_20220119-26434485.pth)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-B0 (RA + NoisyStudent)\\*" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "77.63" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "94.00" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b0_3rdparty-ra-" +"noisystudent_in1k_20221103-75cd08d3.pth)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-B1\\*" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "7.79" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "0.74" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "78.68" +msgstr "" + +#: ../../papers/efficientnet.md ../../papers/resnet.md:76 ../../papers/wrn.md +msgid "94.28" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/efficientnet/efficientnet-" +"b1_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-" +"b1_3rdparty_8xb32_in1k_20220119-002556d9.pth)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-B1 (AA)\\*" +msgstr "" + +#: ../../papers/efficientnet.md ../../papers/res2net.md +msgid "79.20" +msgstr "" + +#: ../../papers/efficientnet.md ../../papers/repvgg.md +msgid "94.42" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b1_3rdparty_8xb32-" +"aa_in1k_20220119-619d8ae3.pth)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-B1 (AA + AdvProp)\\*" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "79.52" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "94.43" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/efficientnet/efficientnet-" +"b1_8xb32-01norm_in1k.py)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b1_3rdparty_8xb32-aa-" +"advprop_in1k_20220119-5715267d.pth)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-B1 (RA + NoisyStudent)\\*" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "81.44" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "95.83" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b1_3rdparty-ra-" +"noisystudent_in1k_20221103-756bcbc0.pth)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-B2\\*" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "9.11" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "1.07" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "79.64" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "94.80" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/efficientnet/efficientnet-" +"b2_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-" +"b2_3rdparty_8xb32_in1k_20220119-ea374a30.pth)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-B2 (AA)\\*" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "80.21" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b2_3rdparty_8xb32-" +"aa_in1k_20220119-dd61e80b.pth)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-B2 (AA + AdvProp)\\*" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "80.45" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/efficientnet/efficientnet-" +"b2_8xb32-01norm_in1k.py)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b2_3rdparty_8xb32-aa-" +"advprop_in1k_20220119-1655338a.pth)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-B2 (RA + NoisyStudent)\\*" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "82.47" +msgstr "" + +#: ../../papers/efficientnet.md ../../papers/swin_transformer_v2.md:76 +msgid "96.23" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-B3\\*" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "12.23" +msgstr "" + +#: ../../papers/efficientnet.md ../../papers/van.md +msgid "81.01" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/efficientnet/efficientnet-" +"b3_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-" +"b3_3rdparty_8xb32_in1k_20220119-4b4d7487.pth)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-B3 (AA)\\*" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "81.58" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "95.67" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-" +"aa_in1k_20220119-5b4887a0.pth)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-B3 (AA + AdvProp)\\*" +msgstr "" + +#: ../../papers/efficientnet.md ../../papers/repvgg.md +msgid "81.81" +msgstr "" + +#: ../../papers/efficientnet.md ../../papers/twins.md +msgid "95.69" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/efficientnet/efficientnet-" +"b3_8xb32-01norm_in1k.py)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa-" +"advprop_in1k_20220119-53b41118.pth)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-B3 (RA + NoisyStudent)\\*" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "84.02" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty-ra-" +"noisystudent_in1k_20221103-a4ab5fd6.pth)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-B4\\*" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "19.34" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "1.95" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "82.57" +msgstr "" + +#: ../../papers/efficientnet.md ../../papers/t2t_vit.md +msgid "96.09" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/efficientnet/efficientnet-" +"b4_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-" +"b4_3rdparty_8xb32_in1k_20220119-81fd4077.pth)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-B4 (AA)\\*" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "82.95" +msgstr "" + +#: ../../papers/efficientnet.md ../../papers/twins.md +msgid "96.26" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b4_3rdparty_8xb32-" +"aa_in1k_20220119-45b8bd2b.pth)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-B4 (AA + AdvProp)\\*" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "83.25" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/efficientnet/efficientnet-" +"b4_8xb32-01norm_in1k.py)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b4_3rdparty_8xb32-aa-" +"advprop_in1k_20220119-38c2238c.pth)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-B4 (RA + NoisyStudent)\\*" +msgstr "" + +#: ../../papers/efficientnet.md ../../papers/mvit.md +msgid "85.25" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "97.52" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b4_3rdparty-ra-" +"noisystudent_in1k_20221103-16ba8a2d.pth)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-B5\\*" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "30.39" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "10.1" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "83.18" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "96.47" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/efficientnet/efficientnet-" +"b5_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-" +"b5_3rdparty_8xb32_in1k_20220119-e9814430.pth)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-B5 (AA)\\*" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "96.76" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b5_3rdparty_8xb32-" +"aa_in1k_20220119-2cab8b78.pth)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-B5 (AA + AdvProp)\\*" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "84.21" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "96.98" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/efficientnet/efficientnet-" +"b5_8xb32-01norm_in1k.py)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b5_3rdparty_8xb32-aa-" +"advprop_in1k_20220119-f57a895a.pth)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-B5 (RA + NoisyStudent)\\*" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "86.08" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b5_3rdparty-ra-" +"noisystudent_in1k_20221103-111a185f.pth)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-B6 (AA)\\*" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "43.04" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "20.0" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "84.05" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/efficientnet/efficientnet-" +"b6_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b6_3rdparty_8xb32-" +"aa_in1k_20220119-45b03310.pth)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-B6 (AA + AdvProp)\\*" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "84.74" +msgstr "" + +#: ../../papers/efficientnet.md ../../papers/mvit.md +msgid "97.14" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/efficientnet/efficientnet-" +"b6_8xb32-01norm_in1k.py)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b6_3rdparty_8xb32-aa-" +"advprop_in1k_20220119-bfe3485e.pth)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-B6 (RA + NoisyStudent)\\*" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "86.47" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "97.87" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b6_3rdparty-ra-" +"noisystudent_in1k_20221103-7de7d2cc.pth)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-B7 (AA)\\*" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "66.35" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "39.3" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "84.38" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "96.88" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/efficientnet/efficientnet-" +"b7_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b7_3rdparty_8xb32-" +"aa_in1k_20220119-bf03951c.pth)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-B7 (AA + AdvProp)\\*" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "85.14" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "97.23" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/efficientnet/efficientnet-" +"b7_8xb32-01norm_in1k.py)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b7_3rdparty_8xb32-aa-" +"advprop_in1k_20220119-c6dbff10.pth)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-B7 (RA + NoisyStudent)\\*" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "65.0" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "86.83" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "98.08" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b7_3rdparty-ra-" +"noisystudent_in1k_20221103-a82894bc.pth)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-B8 (AA + AdvProp)\\*" +msgstr "" + +#: ../../papers/efficientnet.md ../../papers/mobilenet_v3.md +msgid "87.41" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "85.38" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "97.28" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/efficientnet/efficientnet-" +"b8_8xb32-01norm_in1k.py)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b8_3rdparty_8xb32-aa-" +"advprop_in1k_20220119-297ce1b7.pth)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-L2-475 (RA + NoisyStudent)\\*" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "480.30" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "174.20" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "88.18" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "98.55" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/efficientnet/efficientnet-" +"l2_8xb32_in1k-475px.py)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-l2_3rdparty-ra-" +"noisystudent_in1k-475px_20221103-5a0d8058.pth)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "EfficientNet-L2 (RA + NoisyStudent)\\*" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "484.98" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "88.33" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "98.65" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/efficientnet/efficientnet-" +"l2_8xb8_in1k-800px.py)" +msgstr "" + +#: ../../papers/efficientnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-l2_3rdparty-ra-" +"noisystudent_in1k_20221103-be73be13.pth)" +msgstr "" + +#: ../../papers/efficientnet.md:75 +msgid "" +"*Models with * are converted from the [official repo](https://github.com/tensorflow/tpu/tree/master/models/" +"official/efficientnet). The config files of these models are only for inference. We don't ensure these " +"config files' training accuracy and welcome you to contribute your reproduction results.*" +msgstr "" + +#: ../../papers/efficientnet.md:121 ../../papers/mobilevit.md:80 ../../papers/resnet.md:85 +#: ../../papers/swin_transformer.md:75 ../../papers/swin_transformer_v2.md:85 +#: ../../papers/vision_transformer.md:80 +msgid "Test:" +msgstr "测试:" + +#: ../../papers/efficientnet.md:128 +msgid "" +"For more configurable parameters, please refer to the [API](https://mmclassification.readthedocs.io/en/1.x/" +"api/generated/mmcls.models.backbones.EfficientNet.html#mmcls.models.backbones.EfficientNet)." +msgstr "" + +#: ../../papers/hornet.md:4 +msgid "HorNet" +msgstr "" + +#: ../../papers/hornet.md:6 +msgid "" +"[HorNet: Efficient High-Order Spatial Interactions with Recursive Gated Convolutions](https://arxiv.org/" +"pdf/2207.14284v2.pdf)" +msgstr "" + +#: ../../papers/hornet.md:12 +msgid "" +"Recent progress in vision Transformers exhibits great success in various tasks driven by the new spatial " +"modeling mechanism based on dot-product self-attention. In this paper, we show that the key ingredients " +"behind the vision Transformers, namely input-adaptive, long-range and high-order spatial interactions, can " +"also be efficiently implemented with a convolution-based framework. We present the Recursive Gated " +"Convolution (g nConv) that performs high-order spatial interactions with gated convolutions and recursive " +"designs. The new operation is highly flexible and customizable, which is compatible with various variants " +"of convolution and extends the two-order interactions in self-attention to arbitrary orders without " +"introducing significant extra computation. g nConv can serve as a plug-and-play module to improve various " +"vision Transformers and convolution-based models. Based on the operation, we construct a new family of " +"generic vision backbones named HorNet. Extensive experiments on ImageNet classification, COCO object " +"detection and ADE20K semantic segmentation show HorNet outperform Swin Transformers and ConvNeXt by a " +"significant margin with similar overall architecture and training configurations. HorNet also shows " +"favorable scalability to more training data and a larger model size. Apart from the effectiveness in visual " +"encoders, we also show g nConv can be applied to task-specific decoders and consistently improve dense " +"prediction performance with less computation. Our results demonstrate that g nConv can be a new basic " +"module for visual modeling that effectively combines the merits of both vision Transformers and CNNs. Code " +"is available at https://github.com/raoyongming/HorNet." +msgstr "" + +#: ../../papers/hornet.md +msgid "HorNet-T\\*" +msgstr "" + +#: ../../papers/hornet.md +msgid "22.41" +msgstr "" + +#: ../../papers/hornet.md +msgid "3.98" +msgstr "" + +#: ../../papers/hornet.md +msgid "82.84" +msgstr "" + +#: ../../papers/hornet.md +msgid "96.24" +msgstr "" + +#: ../../papers/hornet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hornet/hornet-tiny_8xb128_in1k." +"py)" +msgstr "" + +#: ../../papers/hornet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-" +"tiny_3rdparty_in1k_20220915-0e8eedff.pth)" +msgstr "" + +#: ../../papers/hornet.md +msgid "HorNet-T-GF\\*" +msgstr "" + +#: ../../papers/hornet.md +msgid "22.99" +msgstr "" + +#: ../../papers/hornet.md +msgid "3.9" +msgstr "" + +#: ../../papers/hornet.md +msgid "82.98" +msgstr "" + +#: ../../papers/hornet.md +msgid "96.38" +msgstr "" + +#: ../../papers/hornet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hornet/hornet-tiny-" +"gf_8xb128_in1k.py)" +msgstr "" + +#: ../../papers/hornet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-tiny-" +"gf_3rdparty_in1k_20220915-4c35a66b.pth)" +msgstr "" + +#: ../../papers/hornet.md +msgid "HorNet-S\\*" +msgstr "" + +#: ../../papers/hornet.md +msgid "49.53" +msgstr "" + +#: ../../papers/hornet.md +msgid "8.83" +msgstr "" + +#: ../../papers/hornet.md +msgid "83.79" +msgstr "" + +#: ../../papers/hornet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hornet/hornet-small_8xb64_in1k." +"py)" +msgstr "" + +#: ../../papers/hornet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-" +"small_3rdparty_in1k_20220915-5935f60f.pth)" +msgstr "" + +#: ../../papers/hornet.md +msgid "HorNet-S-GF\\*" +msgstr "" + +#: ../../papers/hornet.md +msgid "50.4" +msgstr "" + +#: ../../papers/hornet.md +msgid "8.71" +msgstr "" + +#: ../../papers/hornet.md +msgid "83.98" +msgstr "" + +#: ../../papers/hornet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hornet/hornet-small-" +"gf_8xb64_in1k.py)" +msgstr "" + +#: ../../papers/hornet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-small-" +"gf_3rdparty_in1k_20220915-649ca492.pth)" +msgstr "" + +#: ../../papers/hornet.md +msgid "HorNet-B\\*" +msgstr "" + +#: ../../papers/hornet.md +msgid "87.26" +msgstr "" + +#: ../../papers/hornet.md +msgid "15.59" +msgstr "" + +#: ../../papers/hornet.md +msgid "84.24" +msgstr "" + +#: ../../papers/hornet.md +msgid "96.94" +msgstr "" + +#: ../../papers/hornet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hornet/hornet-base_8xb64_in1k." +"py)" +msgstr "" + +#: ../../papers/hornet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-base_3rdparty_in1k_20220915-" +"a06176bb.pth)" +msgstr "" + +#: ../../papers/hornet.md +msgid "HorNet-B-GF\\*" +msgstr "" + +#: ../../papers/hornet.md +msgid "88.42" +msgstr "" + +#: ../../papers/hornet.md +msgid "15.42" +msgstr "" + +#: ../../papers/hornet.md +msgid "84.32" +msgstr "" + +#: ../../papers/hornet.md ../../papers/swin_transformer.md:66 +msgid "96.95" +msgstr "" + +#: ../../papers/hornet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/master/configs/hornet/hornet-base-" +"gf_8xb64_in1k.py)" +msgstr "" + +#: ../../papers/hornet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-base-" +"gf_3rdparty_in1k_20220915-82c06fa7.pth)" +msgstr "" + +#: ../../papers/hornet.md:31 +msgid "" +"\\*Models with * are converted from [the official repo](https://github.com/raoyongming/HorNet). The config " +"files of these models are only for validation. We don't ensure these config files' training accuracy and " +"welcome you to contribute your reproduction results." +msgstr "" + +#: ../../papers/hornet.md:35 +msgid "The pre-trained models on ImageNet-21k are used to fine-tune on the downstream tasks." +msgstr "" + +#: ../../papers/hornet.md +msgid "HorNet-L\\*" +msgstr "" + +#: ../../papers/hornet.md +msgid "194.54" +msgstr "" + +#: ../../papers/hornet.md +msgid "34.83" +msgstr "" + +#: ../../papers/hornet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-" +"large_3rdparty_in21k_20220909-9ccef421.pth)" +msgstr "" + +#: ../../papers/hornet.md +msgid "HorNet-L-GF\\*" +msgstr "" + +#: ../../papers/hornet.md +msgid "196.29" +msgstr "" + +#: ../../papers/hornet.md +msgid "34.58" +msgstr "" + +#: ../../papers/hornet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-large-" +"gf_3rdparty_in21k_20220909-3aea3b61.pth)" +msgstr "" + +#: ../../papers/hornet.md +msgid "HorNet-L-GF384\\*" +msgstr "" + +#: ../../papers/hornet.md +msgid "201.23" +msgstr "" + +#: ../../papers/hornet.md +msgid "101.63" +msgstr "" + +#: ../../papers/hornet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/hornet/hornet-large-" +"gf384_3rdparty_in21k_20220909-80894290.pth)" +msgstr "" + +#: ../../papers/hornet.md:43 +msgid "\\*Models with * are converted from [the official repo](https://github.com/raoyongming/HorNet)." +msgstr "" + +#: ../../papers/hrnet.md:4 +msgid "HRNet" +msgstr "" + +#: ../../papers/hrnet.md:6 +msgid "" +"[Deep High-Resolution Representation Learning for Visual Recognition](https://arxiv.org/abs/1908.07919v2)" +msgstr "" + +#: ../../papers/hrnet.md:12 +msgid "" +"High-resolution representations are essential for position-sensitive vision problems, such as human pose " +"estimation, semantic segmentation, and object detection. Existing state-of-the-art frameworks first encode " +"the input image as a low-resolution representation through a subnetwork that is formed by connecting high-" +"to-low resolution convolutions *in series* (e.g., ResNet, VGGNet), and then recover the high-resolution " +"representation from the encoded low-resolution representation. Instead, our proposed network, named as High-" +"Resolution Network (HRNet), maintains high-resolution representations through the whole process. There are " +"two key characteristics: (i) Connect the high-to-low resolution convolution streams *in parallel*; (ii) " +"Repeatedly exchange the information across resolutions. The benefit is that the resulting representation is " +"semantically richer and spatially more precise. We show the superiority of the proposed HRNet in a wide " +"range of applications, including human pose estimation, semantic segmentation, and object detection, " +"suggesting that the HRNet is a stronger backbone for computer vision problems." +msgstr "" + +#: ../../papers/hrnet.md +msgid "HRNet-W18\\*" +msgstr "" + +#: ../../papers/hrnet.md +msgid "21.30" +msgstr "" + +#: ../../papers/hrnet.md +msgid "4.33" +msgstr "" + +#: ../../papers/hrnet.md +msgid "76.75" +msgstr "" + +#: ../../papers/hrnet.md +msgid "93.44" +msgstr "" + +#: ../../papers/hrnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/hrnet/hrnet-w18_4xb32_in1k.py)" +msgstr "" + +#: ../../papers/hrnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-" +"w18_3rdparty_8xb32_in1k_20220120-0c10b180.pth)" +msgstr "" + +#: ../../papers/hrnet.md +msgid "HRNet-W30\\*" +msgstr "" + +#: ../../papers/hrnet.md +msgid "37.71" +msgstr "" + +#: ../../papers/hrnet.md +msgid "8.17" +msgstr "" + +#: ../../papers/hrnet.md +msgid "78.19" +msgstr "" + +#: ../../papers/hrnet.md ../../papers/regnet.md +msgid "94.22" +msgstr "" + +#: ../../papers/hrnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/hrnet/hrnet-w30_4xb32_in1k.py)" +msgstr "" + +#: ../../papers/hrnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-" +"w30_3rdparty_8xb32_in1k_20220120-8aa3832f.pth)" +msgstr "" + +#: ../../papers/hrnet.md +msgid "HRNet-W32\\*" +msgstr "" + +#: ../../papers/hrnet.md +msgid "41.23" +msgstr "" + +#: ../../papers/hrnet.md ../../papers/van.md +msgid "8.99" +msgstr "" + +#: ../../papers/hrnet.md +msgid "78.44" +msgstr "" + +#: ../../papers/hrnet.md +msgid "94.19" +msgstr "" + +#: ../../papers/hrnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/hrnet/hrnet-w32_4xb32_in1k.py)" +msgstr "" + +#: ../../papers/hrnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w32_3rdparty_8xb32_in1k_20220120-" +"c394f1ab.pth)" +msgstr "" + +#: ../../papers/hrnet.md +msgid "HRNet-W40\\*" +msgstr "" + +#: ../../papers/hrnet.md +msgid "57.55" +msgstr "" + +#: ../../papers/hrnet.md +msgid "12.77" +msgstr "" + +#: ../../papers/hrnet.md +msgid "78.94" +msgstr "" + +#: ../../papers/hrnet.md +msgid "94.47" +msgstr "" + +#: ../../papers/hrnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/hrnet/hrnet-w40_4xb32_in1k.py)" +msgstr "" + +#: ../../papers/hrnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-" +"w40_3rdparty_8xb32_in1k_20220120-9a2dbfc5.pth)" +msgstr "" + +#: ../../papers/hrnet.md +msgid "HRNet-W44\\*" +msgstr "" + +#: ../../papers/hrnet.md +msgid "67.06" +msgstr "" + +#: ../../papers/hrnet.md +msgid "14.96" +msgstr "" + +#: ../../papers/hrnet.md ../../papers/resnext.md +msgid "78.88" +msgstr "" + +#: ../../papers/hrnet.md ../../papers/resnet.md:76 +msgid "94.37" +msgstr "" + +#: ../../papers/hrnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/hrnet/hrnet-w44_4xb32_in1k.py)" +msgstr "" + +#: ../../papers/hrnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-" +"w44_3rdparty_8xb32_in1k_20220120-35d07f73.pth)" +msgstr "" + +#: ../../papers/hrnet.md +msgid "HRNet-W48\\*" +msgstr "" + +#: ../../papers/hrnet.md +msgid "77.47" +msgstr "" + +#: ../../papers/hrnet.md +msgid "17.36" +msgstr "" + +#: ../../papers/hrnet.md +msgid "79.32" +msgstr "" + +#: ../../papers/hrnet.md +msgid "94.52" +msgstr "" + +#: ../../papers/hrnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/hrnet/hrnet-w48_4xb32_in1k.py)" +msgstr "" + +#: ../../papers/hrnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w48_3rdparty_8xb32_in1k_20220120-" +"e555ef50.pth)" +msgstr "" + +#: ../../papers/hrnet.md +msgid "HRNet-W64\\*" +msgstr "" + +#: ../../papers/hrnet.md +msgid "128.06" +msgstr "" + +#: ../../papers/hrnet.md +msgid "29.00" +msgstr "" + +#: ../../papers/hrnet.md +msgid "79.46" +msgstr "" + +#: ../../papers/hrnet.md ../../papers/regnet.md +msgid "94.65" +msgstr "" + +#: ../../papers/hrnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/hrnet/hrnet-w64_4xb32_in1k.py)" +msgstr "" + +#: ../../papers/hrnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-" +"w64_3rdparty_8xb32_in1k_20220120-19126642.pth)" +msgstr "" + +#: ../../papers/hrnet.md +msgid "HRNet-W18 (ssld)\\*" +msgstr "" + +#: ../../papers/hrnet.md +msgid "95.70" +msgstr "" + +#: ../../papers/hrnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w18_3rdparty_8xb32-" +"ssld_in1k_20220120-455f69ea.pth)" +msgstr "" + +#: ../../papers/hrnet.md +msgid "HRNet-W48 (ssld)\\*" +msgstr "" + +#: ../../papers/hrnet.md ../../papers/mvit.md +msgid "83.63" +msgstr "" + +#: ../../papers/hrnet.md +msgid "96.79" +msgstr "" + +#: ../../papers/hrnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/hrnet/hrnet-w48_3rdparty_8xb32-" +"ssld_in1k_20220120-d0459c38.pth)" +msgstr "" + +#: ../../papers/hrnet.md:34 +msgid "" +"*Models with * are converted from the [official repo](https://github.com/HRNet/HRNet-Image-Classification). " +"The config files of these models are only for inference. We don't ensure these config files' training " +"accuracy and welcome you to contribute your reproduction results.*" +msgstr "" + +#: ../../papers/inception_v3.md:4 +msgid "Inception V3" +msgstr "" + +#: ../../papers/inception_v3.md:6 +msgid "[Rethinking the Inception Architecture for Computer Vision](http://arxiv.org/abs/1512.00567)" +msgstr "" + +#: ../../papers/inception_v3.md:12 +#, python-format +msgid "" +"Convolutional networks are at the core of most state-of-the-art computer vision solutions for a wide " +"variety of tasks. Since 2014 very deep convolutional networks started to become mainstream, yielding " +"substantial gains in various benchmarks. Although increased model size and computational cost tend to " +"translate to immediate quality gains for most tasks (as long as enough labeled data is provided for " +"training), computational efficiency and low parameter count are still enabling factors for various use " +"cases such as mobile vision and big-data scenarios. Here we explore ways to scale up networks in ways that " +"aim at utilizing the added computation as efficiently as possible by suitably factorized convolutions and " +"aggressive regularization. We benchmark our methods on the ILSVRC 2012 classification challenge validation " +"set demonstrate substantial gains over the state of the art: 21.2% top-1 and 5.6% top-5 error for single " +"frame evaluation using a network with a computational cost of 5 billion multiply-adds per inference and " +"with using less than 25 million parameters. With an ensemble of 4 models and multi-crop evaluation, we " +"report 3.5% top-5 error on the validation set (3.6% error on the test set) and 17.3% top-1 error on the " +"validation set." +msgstr "" + +#: ../../papers/inception_v3.md +msgid "Inception V3\\*" +msgstr "" + +#: ../../papers/inception_v3.md +msgid "23.83" +msgstr "" + +#: ../../papers/inception_v3.md +msgid "5.75" +msgstr "" + +#: ../../papers/inception_v3.md +msgid "77.57" +msgstr "" + +#: ../../papers/inception_v3.md ../../papers/resnet.md:76 +msgid "93.58" +msgstr "" + +#: ../../papers/inception_v3.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/inception_v3/inception-" +"v3_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/inception_v3.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/inception-v3/inception-" +"v3_3rdparty_8xb32_in1k_20220615-dcd4d910.pth)" +msgstr "" + +#: ../../papers/inception_v3.md:26 +msgid "" +"*Models with * are converted from the [official repo](https://github.com/pytorch/vision/blob/main/" +"torchvision/models/inception.py#L28). The config files of these models are only for inference. We don't " +"ensure these config files' training accuracy and welcome you to contribute your reproduction results.*" +msgstr "" + +#: ../../papers/mlp_mixer.md:4 +msgid "Mlp-Mixer" +msgstr "" + +#: ../../papers/mlp_mixer.md:6 +msgid "[MLP-Mixer: An all-MLP Architecture for Vision](https://arxiv.org/abs/2105.01601)" +msgstr "" + +#: ../../papers/mlp_mixer.md:12 +msgid "" +"Convolutional Neural Networks (CNNs) are the go-to model for computer vision. Recently, attention-based " +"networks, such as the Vision Transformer, have also become popular. In this paper we show that while " +"convolutions and attention are both sufficient for good performance, neither of them are necessary. We " +"present MLP-Mixer, an architecture based exclusively on multi-layer perceptrons (MLPs). MLP-Mixer contains " +"two types of layers: one with MLPs applied independently to image patches (i.e. \"mixing\" the per-location " +"features), and one with MLPs applied across patches (i.e. \"mixing\" spatial information). When trained on " +"large datasets, or with modern regularization schemes, MLP-Mixer attains competitive scores on image " +"classification benchmarks, with pre-training and inference cost comparable to state-of-the-art models. We " +"hope that these results spark further research beyond the realms of well established CNNs and Transformers." +msgstr "" + +#: ../../papers/mlp_mixer.md +msgid "Mixer-B/16\\*" +msgstr "" + +#: ../../papers/mlp_mixer.md +msgid "59.88" +msgstr "" + +#: ../../papers/mlp_mixer.md +msgid "12.61" +msgstr "" + +#: ../../papers/mlp_mixer.md +msgid "76.68" +msgstr "" + +#: ../../papers/mlp_mixer.md +msgid "92.25" +msgstr "" + +#: ../../papers/mlp_mixer.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/mlp_mixer/mlp-mixer-base-" +"p16_64xb64_in1k.py)" +msgstr "" + +#: ../../papers/mlp_mixer.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/mlp-mixer/mixer-base-" +"p16_3rdparty_64xb64_in1k_20211124-1377e3e0.pth)" +msgstr "" + +#: ../../papers/mlp_mixer.md +msgid "Mixer-L/16\\*" +msgstr "" + +#: ../../papers/mlp_mixer.md +msgid "208.2" +msgstr "" + +#: ../../papers/mlp_mixer.md ../../papers/resnet.md:76 +msgid "44.57" +msgstr "" + +#: ../../papers/mlp_mixer.md +msgid "72.34" +msgstr "" + +#: ../../papers/mlp_mixer.md +msgid "88.02" +msgstr "" + +#: ../../papers/mlp_mixer.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/mlp_mixer/mlp-mixer-large-" +"p16_64xb64_in1k.py)" +msgstr "" + +#: ../../papers/mlp_mixer.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/mlp-mixer/mixer-large-" +"p16_3rdparty_64xb64_in1k_20211124-5a2519d2.pth)" +msgstr "" + +#: ../../papers/mlp_mixer.md:27 +msgid "" +"*Models with * are converted from [timm](https://github.com/rwightman/pytorch-image-models/blob/master/timm/" +"models/mlp_mixer.py). The config files of these models are only for validation. We don't ensure these " +"config files' training accuracy and welcome you to contribute your reproduction results.*" +msgstr "" + +#: ../../papers/mobilenet_v2.md ../../papers/mobilenet_v2.md:4 +msgid "MobileNet V2" +msgstr "" + +#: ../../papers/mobilenet_v2.md:6 +msgid "[MobileNetV2: Inverted Residuals and Linear Bottlenecks](https://arxiv.org/abs/1801.04381)" +msgstr "" + +#: ../../papers/mobilenet_v2.md:12 +msgid "" +"In this paper we describe a new mobile architecture, MobileNetV2, that improves the state of the art " +"performance of mobile models on multiple tasks and benchmarks as well as across a spectrum of different " +"model sizes. We also describe efficient ways of applying these mobile models to object detection in a novel " +"framework we call SSDLite. Additionally, we demonstrate how to build mobile semantic segmentation models " +"through a reduced form of DeepLabv3 which we call Mobile DeepLabv3." +msgstr "" + +#: ../../papers/mobilenet_v2.md:14 +msgid "" +"The MobileNetV2 architecture is based on an inverted residual structure where the input and output of the " +"residual block are thin bottleneck layers opposite to traditional residual models which use expanded " +"representations in the input an MobileNetV2 uses lightweight depthwise convolutions to filter features in " +"the intermediate expansion layer. Additionally, we find that it is important to remove non-linearities in " +"the narrow layers in order to maintain representational power. We demonstrate that this improves " +"performance and provide an intuition that led to this design. Finally, our approach allows decoupling of " +"the input/output domains from the expressiveness of the transformation, which provides a convenient " +"framework for further analysis. We measure our performance on Imagenet classification, COCO object " +"detection, VOC image segmentation. We evaluate the trade-offs between accuracy, and number of operations " +"measured by multiply-adds (MAdd), as well as the number of parameters" +msgstr "" + +#: ../../papers/mobilenet_v2.md +msgid "3.5" +msgstr "" + +#: ../../papers/mobilenet_v2.md +msgid "0.319" +msgstr "" + +#: ../../papers/mobilenet_v2.md +msgid "71.86" +msgstr "" + +#: ../../papers/mobilenet_v2.md +msgid "90.42" +msgstr "" + +#: ../../papers/mobilenet_v2.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/mobilenet_v2/mobilenet-" +"v2_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/mobilenet_v2.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/" +"mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth) | [log](https://download.openmmlab.com/" +"mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.log.json)" +msgstr "" + +#: ../../papers/mobilenet_v3.md:4 +msgid "MobileNet V3" +msgstr "" + +#: ../../papers/mobilenet_v3.md:6 +msgid "[Searching for MobileNetV3](https://arxiv.org/abs/1905.02244)" +msgstr "" + +#: ../../papers/mobilenet_v3.md:12 +#, python-format +msgid "" +"We present the next generation of MobileNets based on a combination of complementary search techniques as " +"well as a novel architecture design. MobileNetV3 is tuned to mobile phone CPUs through a combination of " +"hardware-aware network architecture search (NAS) complemented by the NetAdapt algorithm and then " +"subsequently improved through novel architecture advances. This paper starts the exploration of how " +"automated search algorithms and network design can work together to harness complementary approaches " +"improving the overall state of the art. Through this process we create two new MobileNet models for " +"release: MobileNetV3-Large and MobileNetV3-Small which are targeted for high and low resource use cases. " +"These models are then adapted and applied to the tasks of object detection and semantic segmentation. For " +"the task of semantic segmentation (or any dense pixel prediction), we propose a new efficient segmentation " +"decoder Lite Reduced Atrous Spatial Pyramid Pooling (LR-ASPP). We achieve new state of the art results for " +"mobile classification, detection and segmentation. MobileNetV3-Large is 3.2% more accurate on ImageNet " +"classification while reducing latency by 15% compared to MobileNetV2. MobileNetV3-Small is 4.6% more " +"accurate while reducing latency by 5% compared to MobileNetV2. MobileNetV3-Large detection is 25% faster at " +"roughly the same accuracy as MobileNetV2 on COCO detection. MobileNetV3-Large LR-ASPP is 30% faster than " +"MobileNetV2 R-ASPP at similar accuracy for Cityscapes segmentation." +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "MobileNetV3-Small-050\\*\\*" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "1.59" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "0.02" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "57.91" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "80.19" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/mobilenet_v3/mobilenet-v3-" +"small-050_8xb128_in1k.py)" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/mobilenet-v3-" +"small-050_3rdparty_in1k_20221114-e0b86be1.pth)" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "MobileNetV3-Small-075\\*\\*" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "2.04" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "0.04" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "65.23" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "85.44" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/mobilenet_v3/mobilenet-v3-" +"small-075_8xb128_in1k.py)" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/mobilenet-v3-" +"small-075_3rdparty_in1k_20221114-2011fa76.pth)" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "MobileNetV3-Small" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "2.54" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "0.06" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "66.68" +msgstr "" + +#: ../../papers/mobilenet_v3.md ../../papers/resnet.md:76 ../../papers/swin_transformer.md:66 +msgid "86.74" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/mobilenet_v3/mobilenet-v3-" +"small_8xb128_in1k.py)" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/mobilenet-v3-" +"small_8xb128_in1k_20221114-bd1bfcde.pth) | [log](https://download.openmmlab.com/mmclassification/v0/" +"mobilenet_v3/mobilenet-v3-small_8xb128_in1k_20221114-bd1bfcde.log.json)" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "MobileNetV3-Small\\*" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "67.66" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/convert/mobilenet_v3_small-8427ecf0." +"pth)" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "MobileNetV3-Large" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "5.48" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "0.23" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "73.49" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "91.31" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/mobilenet_v3/mobilenet-v3-" +"large_8xb128_in1k.py)" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/mobilenet-v3-" +"large_8xb128_in1k_20221114-0ed9ed9a.pth) | [log](https://download.openmmlab.com/mmclassification/v0/" +"mobilenet_v3/mobilenet-v3-large_8xb128_in1k_20221114-0ed9ed9a.log.json)" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "MobileNetV3-Large\\*" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "74.04" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "91.34" +msgstr "" + +#: ../../papers/mobilenet_v3.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/convert/mobilenet_v3_large-3ea3c186." +"pth)" +msgstr "" + +#: ../../papers/mobilenet_v3.md:31 +#, python-format +msgid "" +"We cannot reproduce the performances provided by TorchVision with the [training script](https://github.com/" +"pytorch/vision/tree/master/references/classification#mobilenetv3-large--small), and the accuracy results we " +"got are 65.5±0.5% and 73.39% for small and large model respectively. Here we provide checkpoints trained by " +"MMClassification that outperform the aforementioned results, and the original checkpoints provided by " +"TorchVision." +msgstr "" + +#: ../../papers/mobilenet_v3.md:33 +msgid "" +"*Models with * are converted from [torchvision](https://pytorch.org/vision/stable/_modules/torchvision/" +"models/mobilenetv3.html). Models with \\*\\* are converted from [timm](https://github.com/rwightman/pytorch-" +"image-models/blob/main/timm/models/mobilenetv3.py). The config files of these models are only for " +"validation. We don't ensure these config files' training accuracy and welcome you to contribute your " +"reproduction results.*" +msgstr "" + +#: ../../papers/mobileone.md:4 +msgid "MobileOne" +msgstr "" + +#: ../../papers/mobileone.md:6 +msgid "[An Improved One millisecond Mobile Backbone](https://arxiv.org/abs/2206.04040)" +msgstr "" + +#: ../../papers/mobileone.md:12 +msgid "" +"Mobileone is proposed by apple and based on reparameterization. On the apple chips, the accuracy of the " +"model is close to 0.76 on the ImageNet dataset when the latency is less than 1ms. Its main improvements " +"based on [RepVGG](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/mobileone/../repvgg) are " +"fllowing:" +msgstr "" + +#: ../../papers/mobileone.md:14 +msgid "" +"Reparameterization using Depthwise convolution and Pointwise convolution instead of normal convolution." +msgstr "" + +#: ../../papers/mobileone.md:15 +msgid "Removal of the residual structure which is not friendly to access memory." +msgstr "" + +#: ../../papers/mobileone.md:33 ../../papers/replknet.md:33 ../../papers/repmlp.md:29 +#: ../../papers/repvgg.md:39 +msgid "How to use" +msgstr "" + +#: ../../papers/mobileone.md:35 +msgid "" +"The checkpoints provided are all `training-time` models. Use the reparameterize tool or `switch_to_deploy` " +"interface to switch them to more efficient `inference-time` architecture, which not only has fewer " +"parameters but also less calculations." +msgstr "" + +#: ../../papers/mobileone.md:40 +msgid "Use `classifier.backbone.switch_to_deploy()` interface to switch the MobileOne to a inference mode." +msgstr "" + +#: ../../papers/mobileone.md:95 +msgid "Download Checkpoint:" +msgstr "" + +#: ../../papers/mobileone.md:101 +msgid "Test use unfused model:" +msgstr "" + +#: ../../papers/mobileone.md:107 +msgid "Reparameterize checkpoint:" +msgstr "" + +#: ../../papers/mobileone.md:113 +msgid "Test use fused model:" +msgstr "" + +#: ../../papers/mobileone.md:120 +msgid "Reparameterize Tool" +msgstr "" + +#: ../../papers/mobileone.md:122 ../../papers/replknet.md:39 ../../papers/repmlp.md:35 +#: ../../papers/repvgg.md:45 +msgid "Use provided tool to reparameterize the given model and save the checkpoint:" +msgstr "" + +#: ../../papers/mobileone.md:128 +msgid "" +"`${CFG_PATH}` is the config file path, `${SRC_CKPT_PATH}` is the source chenpoint file path, `" +"${TARGET_CKPT_PATH}` is the target deploy weight file path." +msgstr "" + +#: ../../papers/mobileone.md:130 +msgid "For example:" +msgstr "" + +#: ../../papers/mobileone.md:137 +msgid "" +"To use reparameterized weights, the config file must switch to [**the deploy config files**](https://github." +"com/open-mmlab/mmclassification/blob/1.x/configs/mobileone/deploy)." +msgstr "" + +#: ../../papers/mobileone.md:143 +msgid "For example of using the reparameterized weights above:" +msgstr "" + +#: ../../papers/mobileone.md:149 +msgid "" +"For more configurable parameters, please refer to the [API](https://mmclassification.readthedocs.io/en/1.x/" +"api/generated/mmcls.models.backbones.MobileOne.html#mmcls.models.backbones.MobileOne)." +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "MobileOne-s0" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "5.29(train) | 2.08 (deploy)" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "1.09 (train) | 0.28 (deploy)" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "71.34" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "89.87" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "" +"[config (train)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/mobileone/mobileone-" +"s0_8xb32_in1k.py) | [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/" +"mobileone/deploy/mobileone-s0_deploy_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/mobileone/mobileone-" +"s0_8xb32_in1k_20221110-0bc94952.pth) | [log](https://download.openmmlab.com/mmclassification/v0/mobileone/" +"mobileone-s0_8xb32_in1k_20221110-0bc94952.json)" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "MobileOne-s1" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "4.83 (train) | 4.76 (deploy)" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "0.86 (train) | 0.84 (deploy)" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "75.72" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "92.54" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "" +"[config (train)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/mobileone/mobileone-" +"s1_8xb32_in1k.py) | [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/" +"mobileone/deploy/mobileone-s1_deploy_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/mobileone/mobileone-s1_8xb32_in1k_20221110-" +"ceeef467.pth) | [log](https://download.openmmlab.com/mmclassification/v0/mobileone/mobileone-" +"s1_8xb32_in1k_20221110-ceeef467.json)" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "MobileOne-s2" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "7.88 (train) | 7.88 (deploy)" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "1.34 (train) | 1.31 (deploy)" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "77.37" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "93.34" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "" +"[config (train)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/mobileone/mobileone-" +"s2_8xb32_in1k.py) |[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/" +"mobileone/deploy/mobileone-s2_deploy_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/mobileone/mobileone-" +"s2_8xb32_in1k_20221110-9c7ecb97.pth) | [log](https://download.openmmlab.com/mmclassification/v0/mobileone/" +"mobileone-s2_8xb32_in1k_20221110-9c7ecb97.json)" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "MobileOne-s3" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "10.17 (train) | 10.08 (deploy)" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "1.95 (train) | 1.91 (deploy)" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "78.06" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "" +"[config (train)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/mobileone/mobileone-" +"s3_8xb32_in1k.py) |[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/" +"mobileone/deploy/mobileone-s3_deploy_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/mobileone/mobileone-s3_8xb32_in1k_20221110-" +"c95eb3bf.pth) | [log](https://download.openmmlab.com/mmclassification/v0/mobileone/mobileone-" +"s3_8xb32_in1k_20221110-c95eb3bf.pth)" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "MobileOne-s4" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "14.95 (train) | 14.84 (deploy)" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "3.05 (train) | 3.00 (deploy)" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "79.69" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "94.46" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "" +"[config (train)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/mobileone/mobileone-" +"s4_8xb32_in1k.py) |[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/" +"mobileone/deploy/mobileone-s4_deploy_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/mobileone.md:86 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/mobileone/mobileone-" +"s4_8xb32_in1k_20221110-28d888cb.pth) | [log](https://download.openmmlab.com/mmclassification/v0/mobileone/" +"mobileone-s4_8xb32_in1k_20221110-28d888cb.pth)" +msgstr "" + +#: ../../papers/mobilevit.md:4 +msgid "MobileVit" +msgstr "" + +#: ../../papers/mobilevit.md:6 +msgid "" +"[MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer](https://arxiv.org/" +"abs/2110.02178)" +msgstr "" + +#: ../../papers/mobilevit.md:12 +msgid "" +"**MobileViT** aims at introducing a light-weight network, which takes the advantages of both ViTs and CNNs, " +"uses the `InvertedResidual` blocks in [MobileNetV2](https://github.com/open-mmlab/mmclassification/blob/1.x/" +"configs/mobilevit/../mobilenet_v2/README.md) and `MobileViTBlock` which refers to [ViT](https://github.com/" +"open-mmlab/mmclassification/blob/1.x/configs/mobilevit/../vision_transformer/README.md) transformer blocks " +"to build a standard 5-stage model structure." +msgstr "" + +#: ../../papers/mobilevit.md:14 +msgid "" +"The MobileViTBlock reckons transformers as convolutions to perform a global representation, meanwhile " +"conbined with original convolution layers for local representation to build a block with global receptive " +"field. This is different from ViT, which adds an extra class token and position embeddings for learning " +"relative relationship. Without any position embeddings, MobileViT can benfit from multi-scale inputs during " +"training." +msgstr "" + +#: ../../papers/mobilevit.md:16 +msgid "" +"Also, this paper puts forward a strategy for multi-scale training to dynamically adjust batch size based on " +"the image size to both improve training efficiency and final performance." +msgstr "" + +#: ../../papers/mobilevit.md:18 +msgid "It is also proven effective in downstream tasks such as object detection and segmentation." +msgstr "" + +#: ../../papers/mobilevit.md:32 +msgid "" +"Light-weight convolutional neural networks (CNNs) are the de-facto for mobile vision tasks. Their spatial " +"inductive biases allow them to learn representations with fewer parameters across different vision tasks. " +"However, these networks are spatially local. To learn global representations, self-attention-based vision " +"trans-formers (ViTs) have been adopted. Unlike CNNs, ViTs are heavy-weight. In this paper, we ask the " +"following question: is it possible to combine the strengths of CNNs and ViTs to build a light-weight and " +"low latency network for mobile vision tasks? Towards this end, we introduce MobileViT, a light-weight and " +"general-purpose vision transformer for mobile devices. MobileViT presents a different perspective for the " +"global processing of information with transformers, i.e., transformers as convolutions. Our results show " +"that MobileViT significantly outperforms CNN- and ViT-based networks across different tasks and datasets. " +"On the ImageNet-1k dataset, MobileViT achieves top-1 accuracy of 78.4% with about 6 million parameters, " +"which is 3.2% and 6.2% more accurate than MobileNetv3 (CNN-based) and DeIT (ViT-based) for a similar number " +"of parameters. On the MS-COCO object detection task, MobileViT is 5.7% more accurate than MobileNetv3 for a " +"similar number of parameters.
" +msgstr "" + +#: ../../papers/mobilevit.md:87 +msgid "" +"For more configurable parameters, please refer to the [API](https://mmclassification.readthedocs.io/en/1.x/" +"api/generated/mmcls.models.backbones.MobileViT.html#mmcls.models.backbones.MobileViT)." +msgstr "" + +#: ../../papers/mobilevit.md:71 +msgid "MobileViT-XXSmall\\*" +msgstr "" + +#: ../../papers/mobilevit.md:71 +msgid "1.27" +msgstr "" + +#: ../../papers/mobilevit.md:71 +msgid "69.02" +msgstr "" + +#: ../../papers/mobilevit.md:71 +msgid "88.91" +msgstr "" + +#: ../../papers/mobilevit.md:71 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/mobilevit/mobilevit-" +"xxsmall_8xb128_in1k.py)" +msgstr "" + +#: ../../papers/mobilevit.md:71 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/mobilevit/mobilevit-" +"xxsmall_3rdparty_in1k_20221018-77835605.pth)" +msgstr "" + +#: ../../papers/mobilevit.md:71 +msgid "MobileViT-XSmall\\*" +msgstr "" + +#: ../../papers/mobilevit.md:71 +msgid "2.32" +msgstr "" + +#: ../../papers/mobilevit.md:71 +msgid "1.05" +msgstr "" + +#: ../../papers/mobilevit.md:71 +msgid "74.75" +msgstr "" + +#: ../../papers/mobilevit.md:71 ../../papers/regnet.md +msgid "92.32" +msgstr "" + +#: ../../papers/mobilevit.md:71 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/mobilevit/mobilevit-" +"xsmall_8xb128_in1k.py)" +msgstr "" + +#: ../../papers/mobilevit.md:71 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/mobilevit/mobilevit-" +"xsmall_3rdparty_in1k_20221018-be39a6e7.pth)" +msgstr "" + +#: ../../papers/mobilevit.md:71 +msgid "MobileViT-Small\\*" +msgstr "" + +#: ../../papers/mobilevit.md:71 +msgid "5.58" +msgstr "" + +#: ../../papers/mobilevit.md:71 +msgid "2.03" +msgstr "" + +#: ../../papers/mobilevit.md:71 +msgid "78.25" +msgstr "" + +#: ../../papers/mobilevit.md:71 +msgid "94.09" +msgstr "" + +#: ../../papers/mobilevit.md:71 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/mobilevit/mobilevit-" +"small_8xb128_in1k.py)" +msgstr "" + +#: ../../papers/mobilevit.md:71 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/mobilevit/mobilevit-small_3rdparty_in1k_20221018-" +"cb4f741c.pth)" +msgstr "" + +#: ../../papers/mobilevit.md:99 +msgid "" +"*Models with * are converted from [ml-cvnets](https://github.com/apple/ml-cvnets). The config files of " +"these models are only for validation. We don't ensure these config files' training accuracy and welcome you " +"to contribute your reproduction results.*" +msgstr "" + +#: ../../papers/mvit.md:4 +msgid "MViT V2" +msgstr "" + +#: ../../papers/mvit.md:6 +msgid "" +"[MViTv2: Improved Multiscale Vision Transformers for Classification and Detection](http://openaccess.thecvf." +"com//content/CVPR2022/papers/" +"Li_MViTv2_Improved_Multiscale_Vision_Transformers_for_Classification_and_Detection_CVPR_2022_paper.pdf)" +msgstr "" + +#: ../../papers/mvit.md:12 +#, python-format +msgid "" +"In this paper, we study Multiscale Vision Transformers (MViTv2) as a unified architecture for image and " +"video classification, as well as object detection. We present an improved version of MViT that incorporates " +"decomposed relative positional embeddings and residual pooling connections. We instantiate this " +"architecture in five sizes and evaluate it for ImageNet classification, COCO detection and Kinetics video " +"recognition where it outperforms prior work. We further compare MViTv2s' pooling attention to window " +"attention mechanisms where it outperforms the latter in accuracy/compute. Without bells-and-whistles, " +"MViTv2 has state-of-the-art performance in 3 domains: 88.8% accuracy on ImageNet classification, 58.7 boxAP " +"on COCO object detection as well as 86.1% on Kinetics-400 video classification." +msgstr "" + +#: ../../papers/mvit.md +msgid "MViTv2-tiny\\*" +msgstr "" + +#: ../../papers/mvit.md +msgid "24.17" +msgstr "" + +#: ../../papers/mvit.md +msgid "4.70" +msgstr "" + +#: ../../papers/mvit.md +msgid "82.33" +msgstr "" + +#: ../../papers/mvit.md ../../papers/vision_transformer.md:71 +msgid "96.15" +msgstr "" + +#: ../../papers/mvit.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/mvit/mvitv2-tiny_8xb256_in1k.py)" +msgstr "" + +#: ../../papers/mvit.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-tiny_3rdparty_in1k_20220722-db7beeef." +"pth)" +msgstr "" + +#: ../../papers/mvit.md +msgid "MViTv2-small\\*" +msgstr "" + +#: ../../papers/mvit.md +msgid "34.87" +msgstr "" + +#: ../../papers/mvit.md +msgid "7.00" +msgstr "" + +#: ../../papers/mvit.md +msgid "96.51" +msgstr "" + +#: ../../papers/mvit.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/mvit/mvitv2-small_8xb256_in1k.py)" +msgstr "" + +#: ../../papers/mvit.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-" +"small_3rdparty_in1k_20220722-986bd741.pth)" +msgstr "" + +#: ../../papers/mvit.md +msgid "MViTv2-base\\*" +msgstr "" + +#: ../../papers/mvit.md +msgid "51.47" +msgstr "" + +#: ../../papers/mvit.md +msgid "10.20" +msgstr "" + +#: ../../papers/mvit.md +msgid "84.34" +msgstr "" + +#: ../../papers/mvit.md ../../papers/swin_transformer_v2.md:76 +msgid "96.86" +msgstr "" + +#: ../../papers/mvit.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/mvit/mvitv2-base_8xb256_in1k.py)" +msgstr "" + +#: ../../papers/mvit.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-base_3rdparty_in1k_20220722-9c4f0a17." +"pth)" +msgstr "" + +#: ../../papers/mvit.md +msgid "MViTv2-large\\*" +msgstr "" + +#: ../../papers/mvit.md +msgid "217.99" +msgstr "" + +#: ../../papers/mvit.md +msgid "42.10" +msgstr "" + +#: ../../papers/mvit.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/mvit/mvitv2-large_8xb256_in1k.py)" +msgstr "" + +#: ../../papers/mvit.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/mvit/mvitv2-" +"large_3rdparty_in1k_20220722-2b57b983.pth)" +msgstr "" + +#: ../../papers/mvit.md:36 +msgid "" +"*Models with * are converted from the [official repo](https://github.com/facebookresearch/mvit). The config " +"files of these models are only for inference. We don't ensure these config files' training accuracy and " +"welcome you to contribute your reproduction results.*" +msgstr "" + +#: ../../papers/poolformer.md:4 +msgid "PoolFormer" +msgstr "" + +#: ../../papers/poolformer.md:6 +msgid "[MetaFormer is Actually What You Need for Vision](https://arxiv.org/abs/2111.11418)" +msgstr "" + +#: ../../papers/poolformer.md:12 +#, python-format +msgid "" +"Transformers have shown great potential in computer vision tasks. A common belief is their attention-based " +"token mixer module contributes most to their competence. However, recent works show the attention-based " +"module in transformers can be replaced by spatial MLPs and the resulted models still perform quite well. " +"Based on this observation, we hypothesize that the general architecture of the transformers, instead of the " +"specific token mixer module, is more essential to the model's performance. To verify this, we deliberately " +"replace the attention module in transformers with an embarrassingly simple spatial pooling operator to " +"conduct only basic token mixing. Surprisingly, we observe that the derived model, termed as PoolFormer, " +"achieves competitive performance on multiple computer vision tasks. For example, on ImageNet-1K, PoolFormer " +"achieves 82.1% top-1 accuracy, surpassing well-tuned vision transformer/MLP-like baselines DeiT-B/ResMLP-" +"B24 by 0.3%/1.1% accuracy with 35%/52% fewer parameters and 49%/61% fewer MACs. The effectiveness of " +"PoolFormer verifies our hypothesis and urges us to initiate the concept of \"MetaFormer\", a general " +"architecture abstracted from transformers without specifying the token mixer. Based on the extensive " +"experiments, we argue that MetaFormer is the key player in achieving superior results for recent " +"transformer and MLP-like models on vision tasks. This work calls for more future research dedicated to " +"improving MetaFormer instead of focusing on the token mixer modules. Additionally, our proposed PoolFormer " +"could serve as a starting baseline for future MetaFormer architecture design." +msgstr "" + +#: ../../papers/poolformer.md +msgid "PoolFormer-S12\\*" +msgstr "" + +#: ../../papers/poolformer.md +msgid "11.92" +msgstr "" + +#: ../../papers/poolformer.md ../../papers/shufflenet_v1.md +msgid "1.87" +msgstr "" + +#: ../../papers/poolformer.md +msgid "77.24" +msgstr "" + +#: ../../papers/poolformer.md ../../papers/regnet.md +msgid "93.51" +msgstr "" + +#: ../../papers/poolformer.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/poolformer/poolformer-" +"s12_32xb128_in1k.py)" +msgstr "" + +#: ../../papers/poolformer.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-" +"s12_3rdparty_32xb128_in1k_20220414-f8d83051.pth)" +msgstr "" + +#: ../../papers/poolformer.md +msgid "PoolFormer-S24\\*" +msgstr "" + +#: ../../papers/poolformer.md +msgid "21.39" +msgstr "" + +#: ../../papers/poolformer.md +msgid "3.51" +msgstr "" + +#: ../../papers/poolformer.md +msgid "80.33" +msgstr "" + +#: ../../papers/poolformer.md +msgid "95.05" +msgstr "" + +#: ../../papers/poolformer.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/poolformer/poolformer-" +"s24_32xb128_in1k.py)" +msgstr "" + +#: ../../papers/poolformer.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-" +"s24_3rdparty_32xb128_in1k_20220414-d7055904.pth)" +msgstr "" + +#: ../../papers/poolformer.md +msgid "PoolFormer-S36\\*" +msgstr "" + +#: ../../papers/poolformer.md +msgid "30.86" +msgstr "" + +#: ../../papers/poolformer.md +msgid "5.15" +msgstr "" + +#: ../../papers/poolformer.md +msgid "81.43" +msgstr "" + +#: ../../papers/poolformer.md +msgid "95.45" +msgstr "" + +#: ../../papers/poolformer.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/poolformer/poolformer-" +"s36_32xb128_in1k.py)" +msgstr "" + +#: ../../papers/poolformer.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-" +"s36_3rdparty_32xb128_in1k_20220414-d78ff3e8.pth)" +msgstr "" + +#: ../../papers/poolformer.md +msgid "PoolFormer-M36\\*" +msgstr "" + +#: ../../papers/poolformer.md +msgid "56.17" +msgstr "" + +#: ../../papers/poolformer.md +msgid "8.96" +msgstr "" + +#: ../../papers/poolformer.md +msgid "82.14" +msgstr "" + +#: ../../papers/poolformer.md +msgid "95.71" +msgstr "" + +#: ../../papers/poolformer.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/poolformer/poolformer-" +"m36_32xb128_in1k.py)" +msgstr "" + +#: ../../papers/poolformer.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-" +"m36_3rdparty_32xb128_in1k_20220414-c55e0949.pth)" +msgstr "" + +#: ../../papers/poolformer.md +msgid "PoolFormer-M48\\*" +msgstr "" + +#: ../../papers/poolformer.md +msgid "73.47" +msgstr "" + +#: ../../papers/poolformer.md +msgid "11.80" +msgstr "" + +#: ../../papers/poolformer.md +msgid "82.51" +msgstr "" + +#: ../../papers/poolformer.md +msgid "95.95" +msgstr "" + +#: ../../papers/poolformer.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/poolformer/poolformer-" +"m48_32xb128_in1k.py)" +msgstr "" + +#: ../../papers/poolformer.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/poolformer/poolformer-" +"m48_3rdparty_32xb128_in1k_20220414-9378f3eb.pth)" +msgstr "" + +#: ../../papers/poolformer.md:30 +msgid "" +"*Models with * are converted from the [official repo](https://github.com/sail-sg/poolformer). The config " +"files of these models are only for inference. We don't ensure these config files' training accuracy and " +"welcome you to contribute your reproduction results.*" +msgstr "" + +#: ../../papers/regnet.md:4 +msgid "RegNet" +msgstr "" + +#: ../../papers/regnet.md:6 +msgid "[Designing Network Design Spaces](https://arxiv.org/abs/2003.13678)" +msgstr "" + +#: ../../papers/regnet.md:12 +msgid "" +"In this work, we present a new network design paradigm. Our goal is to help advance the understanding of " +"network design and discover design principles that generalize across settings. Instead of focusing on " +"designing individual network instances, we design network design spaces that parametrize populations of " +"networks. The overall process is analogous to classic manual design of networks, but elevated to the design " +"space level. Using our methodology we explore the structure aspect of network design and arrive at a low-" +"dimensional design space consisting of simple, regular networks that we call RegNet. The core insight of " +"the RegNet parametrization is surprisingly simple: widths and depths of good networks can be explained by a " +"quantized linear function. We analyze the RegNet design space and arrive at interesting findings that do " +"not match the current practice of network design. The RegNet design space provides simple and fast networks " +"that work well across a wide range of flop regimes. Under comparable training settings and flops, the " +"RegNet models outperform the popular EfficientNet models while being up to 5x faster on GPUs." +msgstr "" + +#: ../../papers/regnet.md +msgid "RegNetX-400MF" +msgstr "" + +#: ../../papers/regnet.md +msgid "5.16" +msgstr "" + +#: ../../papers/regnet.md +msgid "0.41" +msgstr "" + +#: ../../papers/regnet.md +msgid "72.56" +msgstr "" + +#: ../../papers/regnet.md +msgid "90.78" +msgstr "" + +#: ../../papers/regnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/regnet/regnetx-400mf_8xb128_in1k." +"py)" +msgstr "" + +#: ../../papers/regnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/regnet/" +"regnetx-400mf_8xb128_in1k_20211213-89bfc226.pth) | [log](https://download.openmmlab.com/mmclassification/v0/" +"regnet/regnetx-400mf_8xb128_in1k_20211208_143316.log.json)" +msgstr "" + +#: ../../papers/regnet.md +msgid "RegNetX-800MF" +msgstr "" + +#: ../../papers/regnet.md +msgid "7.26" +msgstr "" + +#: ../../papers/regnet.md +msgid "0.81" +msgstr "" + +#: ../../papers/regnet.md +msgid "74.76" +msgstr "" + +#: ../../papers/regnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/regnet/regnetx-800mf_8xb128_in1k." +"py)" +msgstr "" + +#: ../../papers/regnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/regnet/" +"regnetx-800mf_8xb128_in1k_20211213-222b0f11.pth) | [log](https://download.openmmlab.com/mmclassification/v0/" +"regnet/regnetx-800mf_8xb128_in1k_20211207_143037.log.json)" +msgstr "" + +#: ../../papers/regnet.md +msgid "RegNetX-1.6GF" +msgstr "" + +#: ../../papers/regnet.md +msgid "9.19" +msgstr "" + +#: ../../papers/regnet.md +msgid "1.63" +msgstr "" + +#: ../../papers/regnet.md +msgid "76.84" +msgstr "" + +#: ../../papers/regnet.md +msgid "93.31" +msgstr "" + +#: ../../papers/regnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/regnet/regnetx-1.6gf_8xb128_in1k." +"py)" +msgstr "" + +#: ../../papers/regnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-1.6gf_8xb128_in1k_20211213-" +"d1b89758.pth) | [log](https://download.openmmlab.com/mmclassification/v0/regnet/" +"regnetx-1.6gf_8xb128_in1k_20211208_143018.log.json)" +msgstr "" + +#: ../../papers/regnet.md +msgid "RegNetX-3.2GF" +msgstr "" + +#: ../../papers/regnet.md +msgid "15.3" +msgstr "" + +#: ../../papers/regnet.md +msgid "3.21" +msgstr "" + +#: ../../papers/regnet.md +msgid "78.09" +msgstr "" + +#: ../../papers/regnet.md ../../papers/resnet.md:76 ../../papers/wrn.md +msgid "94.08" +msgstr "" + +#: ../../papers/regnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/regnet/regnetx-3.2gf_8xb64_in1k.py)" +msgstr "" + +#: ../../papers/regnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/regnet/" +"regnetx-3.2gf_8xb64_in1k_20211213-1fdd82ae.pth) | [log](https://download.openmmlab.com/mmclassification/v0/" +"regnet/regnetx-3.2gf_8xb64_in1k_20211208_142720.log.json)" +msgstr "" + +#: ../../papers/regnet.md +msgid "RegNetX-4.0GF" +msgstr "" + +#: ../../papers/regnet.md +msgid "22.12" +msgstr "" + +#: ../../papers/regnet.md +msgid "4.0" +msgstr "" + +#: ../../papers/regnet.md +msgid "78.60" +msgstr "" + +#: ../../papers/regnet.md ../../papers/resnext.md +msgid "94.17" +msgstr "" + +#: ../../papers/regnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/regnet/regnetx-4.0gf_8xb64_in1k.py)" +msgstr "" + +#: ../../papers/regnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-4.0gf_8xb64_in1k_20211213-" +"efed675c.pth) | [log](https://download.openmmlab.com/mmclassification/v0/regnet/" +"regnetx-4.0gf_8xb64_in1k_20211207_150431.log.json)" +msgstr "" + +#: ../../papers/regnet.md +msgid "RegNetX-6.4GF" +msgstr "" + +#: ../../papers/regnet.md +msgid "26.21" +msgstr "" + +#: ../../papers/regnet.md +msgid "6.51" +msgstr "" + +#: ../../papers/regnet.md ../../papers/repvgg.md +msgid "79.38" +msgstr "" + +#: ../../papers/regnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/regnet/regnetx-6.4gf_8xb64_in1k.py)" +msgstr "" + +#: ../../papers/regnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/regnet/" +"regnetx-6.4gf_8xb64_in1k_20211215-5c6089da.pth) | [log](https://download.openmmlab.com/mmclassification/v0/" +"regnet/regnetx-6.4gf_8xb64_in1k_20211213_172748.log.json)" +msgstr "" + +#: ../../papers/regnet.md +msgid "RegNetX-8.0GF" +msgstr "" + +#: ../../papers/regnet.md +msgid "39.57" +msgstr "" + +#: ../../papers/regnet.md ../../papers/resnext.md +msgid "8.03" +msgstr "" + +#: ../../papers/regnet.md +msgid "79.12" +msgstr "" + +#: ../../papers/regnet.md +msgid "94.51" +msgstr "" + +#: ../../papers/regnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/regnet/regnetx-8.0gf_8xb64_in1k.py)" +msgstr "" + +#: ../../papers/regnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/regnet/" +"regnetx-8.0gf_8xb64_in1k_20211213-9a9fcc76.pth) | [log](https://download.openmmlab.com/mmclassification/v0/" +"regnet/regnetx-8.0gf_8xb64_in1k_20211208_103250.log.json)" +msgstr "" + +#: ../../papers/regnet.md +msgid "RegNetX-12GF" +msgstr "" + +#: ../../papers/regnet.md +msgid "46.11" +msgstr "" + +#: ../../papers/regnet.md +msgid "12.15" +msgstr "" + +#: ../../papers/regnet.md +msgid "79.67" +msgstr "" + +#: ../../papers/regnet.md +msgid "95.03" +msgstr "" + +#: ../../papers/regnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/regnet/regnetx-12gf_8xb64_in1k.py)" +msgstr "" + +#: ../../papers/regnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/regnet/regnetx-12gf_8xb64_in1k_20211213-5df8c2f8." +"pth) | [log](https://download.openmmlab.com/mmclassification/v0/regnet/" +"regnetx-12gf_8xb64_in1k_20211208_143713.log.json)" +msgstr "" + +#: ../../papers/regnet.md +msgid "RegNetX-400MF\\*" +msgstr "" + +#: ../../papers/regnet.md +msgid "72.55" +msgstr "" + +#: ../../papers/regnet.md +msgid "90.91" +msgstr "" + +#: ../../papers/regnet.md +msgid "[model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-400MF-0db9f35c.pth)" +msgstr "" + +#: ../../papers/regnet.md +msgid "RegNetX-800MF\\*" +msgstr "" + +#: ../../papers/regnet.md +msgid "75.21" +msgstr "" + +#: ../../papers/regnet.md +msgid "92.37" +msgstr "" + +#: ../../papers/regnet.md +msgid "[model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-800MF-4f9d1e8a.pth)" +msgstr "" + +#: ../../papers/regnet.md +msgid "RegNetX-1.6GF\\*" +msgstr "" + +#: ../../papers/regnet.md +msgid "77.04" +msgstr "" + +#: ../../papers/regnet.md +msgid "[model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-1.6GF-cfb32375.pth)" +msgstr "" + +#: ../../papers/regnet.md +msgid "RegNetX-3.2GF\\*" +msgstr "" + +#: ../../papers/regnet.md ../../papers/seresnet.md +msgid "78.26" +msgstr "" + +#: ../../papers/regnet.md +msgid "94.20" +msgstr "" + +#: ../../papers/regnet.md +msgid "[model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-3.2GF-82c43fd5.pth)" +msgstr "" + +#: ../../papers/regnet.md +msgid "RegNetX-4.0GF\\*" +msgstr "" + +#: ../../papers/regnet.md +msgid "78.72" +msgstr "" + +#: ../../papers/regnet.md +msgid "[model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-4.0GF-ef8bb32c.pth)" +msgstr "" + +#: ../../papers/regnet.md +msgid "RegNetX-6.4GF\\*" +msgstr "" + +#: ../../papers/regnet.md +msgid "79.22" +msgstr "" + +#: ../../papers/regnet.md +msgid "94.61" +msgstr "" + +#: ../../papers/regnet.md +msgid "[model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-6.4GF-6888c0ea.pth)" +msgstr "" + +#: ../../papers/regnet.md +msgid "RegNetX-8.0GF\\*" +msgstr "" + +#: ../../papers/regnet.md +msgid "79.31" +msgstr "" + +#: ../../papers/regnet.md +msgid "94.57" +msgstr "" + +#: ../../papers/regnet.md +msgid "[model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-8.0GF-cb4c77ec.pth)" +msgstr "" + +#: ../../papers/regnet.md +msgid "RegNetX-12GF\\*" +msgstr "" + +#: ../../papers/regnet.md +msgid "79.91" +msgstr "" + +#: ../../papers/regnet.md ../../papers/resnet.md:76 +msgid "94.78" +msgstr "" + +#: ../../papers/regnet.md +msgid "[model](https://download.openmmlab.com/mmclassification/v0/regnet/convert/RegNetX-12GF-0574538f.pth)" +msgstr "" + +#: ../../papers/regnet.md:41 +msgid "" +"*Models with * are converted from [pycls](https://github.com/facebookresearch/pycls/blob/master/MODEL_ZOO." +"md). The config files of these models are only for validation.*" +msgstr "" + +#: ../../papers/replknet.md:4 +msgid "RepLKNet" +msgstr "" + +#: ../../papers/replknet.md:6 +msgid "" +"[Scaling Up Your Kernels to 31x31: Revisiting Large Kernel Design in CNNs](https://arxiv.org/abs/2203.06717)" +msgstr "" + +#: ../../papers/replknet.md:12 +msgid "" +"We revisit large kernel design in modern convolutional neural networks (CNNs). Inspired by recent advances " +"in vision transformers (ViTs), in this paper, we demonstrate that using a few large convolutional kernels " +"instead of a stack of small kernels could be a more powerful paradigm. We suggested five guidelines, e.g., " +"applying re-parameterized large depth-wise convolutions, to design efficient highperformance large-kernel " +"CNNs. Following the guidelines, we propose RepLKNet, a pure CNN architecture whose kernel size is as large " +"as 31×31, in contrast to commonly used 3×3. RepLKNet greatly closes the performance gap between CNNs and " +"ViTs, e.g., achieving comparable or superior results than Swin Transformer on ImageNet and a few typical " +"downstream tasks, with lower latency. RepLKNet also shows nice scalability to big data and large models, " +"obtaining 87.8% top-1 accuracy on ImageNet and 56.0% mIoU on ADE20K, which is very competitive among the " +"state-of-the-arts with similar model sizes. Our study further reveals that, in contrast to small-kernel " +"CNNs, large kernel CNNs have much larger effective receptive fields and higher shape bias rather than " +"texture bias." +msgstr "" + +#: ../../papers/replknet.md +msgid "Resolution" +msgstr "" + +#: ../../papers/replknet.md +msgid "Pretrained Dataset" +msgstr "" + +#: ../../papers/replknet.md +msgid "RepLKNet-31B\\*" +msgstr "" + +#: ../../papers/replknet.md +msgid "From Scratch" +msgstr "" + +#: ../../papers/replknet.md +msgid "79.9(train) | 79.5 (deploy)" +msgstr "" + +#: ../../papers/replknet.md +msgid "15.6 (train) | 15.4 (deploy)" +msgstr "" + +#: ../../papers/replknet.md +msgid "83.48" +msgstr "" + +#: ../../papers/replknet.md +msgid "96.57" +msgstr "" + +#: ../../papers/replknet.md +msgid "" +"[config (train)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/replknet/" +"replknet-31B_32xb64_in1k.py) | [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/1.x/" +"configs/replknet/deploy/replknet-31B-deploy_32xb64_in1k.py)" +msgstr "" + +#: ../../papers/replknet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/replknet/replknet-31B_3rdparty_in1k_20221118-" +"fd08e268.pth)" +msgstr "" + +#: ../../papers/replknet.md +msgid "46.0 (train) | 45.3 (deploy)" +msgstr "" + +#: ../../papers/replknet.md +msgid "97.34" +msgstr "" + +#: ../../papers/replknet.md +msgid "" +"[config (train)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/replknet/" +"replknet-31B_32xb64_in1k-384px.py) | [config (deploy)](https://github.com/open-mmlab/mmclassification/" +"blob/1.x/configs/replknet/deploy/replknet-31B-deploy_32xb64_in1k-384px.py)" +msgstr "" + +#: ../../papers/replknet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/replknet/" +"replknet-31B_3rdparty_in1k-384px_20221118-03a170ce.pth)" +msgstr "" + +#: ../../papers/replknet.md +msgid "ImageNet-21K" +msgstr "" + +#: ../../papers/replknet.md +msgid "85.20" +msgstr "" + +#: ../../papers/replknet.md +msgid "97.56" +msgstr "" + +#: ../../papers/replknet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/replknet/replknet-31B_in21k-" +"pre_3rdparty_in1k_20221118-54ed5c46.pth)" +msgstr "" + +#: ../../papers/replknet.md +msgid "85.99" +msgstr "" + +#: ../../papers/replknet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/replknet/replknet-31B_in21k-" +"pre_3rdparty_in1k-384px_20221118-76c92b24.pth)" +msgstr "" + +#: ../../papers/replknet.md +msgid "RepLKNet-31L\\*" +msgstr "" + +#: ../../papers/replknet.md +msgid "172.7(train) | 172.0 (deploy)" +msgstr "" + +#: ../../papers/replknet.md +msgid "97.2 (train) | 97.0 (deploy)" +msgstr "" + +#: ../../papers/replknet.md +msgid "86.63" +msgstr "" + +#: ../../papers/replknet.md +msgid "98.00" +msgstr "" + +#: ../../papers/replknet.md +msgid "" +"[config (train)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/replknet/" +"replknet-31L_32xb64_in1k-384px.py) | [config (deploy)](https://github.com/open-mmlab/mmclassification/" +"blob/1.x/configs/replknet/deploy/replknet-31L-deploy_32xb64_in1k-384px.py)" +msgstr "" + +#: ../../papers/replknet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/replknet/replknet-31L_in21k-" +"pre_3rdparty_in1k-384px_20221118-dc3fc07c.pth)" +msgstr "" + +#: ../../papers/replknet.md +msgid "RepLKNet-XL\\*" +msgstr "" + +#: ../../papers/replknet.md +msgid "320x320" +msgstr "" + +#: ../../papers/replknet.md +msgid "MegData-73M" +msgstr "" + +#: ../../papers/replknet.md +msgid "335.4(train) | 335.0 (deploy)" +msgstr "" + +#: ../../papers/replknet.md +msgid "129.6 (train) | 129.0 (deploy)" +msgstr "" + +#: ../../papers/replknet.md +msgid "87.57" +msgstr "" + +#: ../../papers/replknet.md +msgid "98.39" +msgstr "" + +#: ../../papers/replknet.md +msgid "" +"[config (train)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/replknet/replknet-" +"XL_32xb64_in1k-320px.py) | [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/1.x/" +"configs/replknet/deploy/replknet-XL-deploy_32xb64_in1k-320px.py)" +msgstr "" + +#: ../../papers/replknet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/replknet/replknet-XL_meg73m-" +"pre_3rdparty_in1k-320px_20221118-88259b1d.pth)" +msgstr "" + +#: ../../papers/replknet.md:31 ../../papers/repvgg.md:37 +msgid "" +"*Models with * are converted from the [official repo](https://github.com/DingXiaoH/RepVGG). The config " +"files of these models are only for validation. We don't ensure these config files' training accuracy and " +"welcome you to contribute your reproduction results.*" +msgstr "" + +#: ../../papers/replknet.md:35 ../../papers/repmlp.md:31 ../../papers/repvgg.md:41 +msgid "" +"The checkpoints provided are all `training-time` models. Use the reparameterize tool to switch them to more " +"efficient `inference-time` architecture, which not only has fewer parameters but also less calculations." +msgstr "" + +#: ../../papers/replknet.md:37 ../../papers/repmlp.md:33 ../../papers/repvgg.md:43 +msgid "Use tool" +msgstr "" + +#: ../../papers/replknet.md:45 ../../papers/repmlp.md:41 ../../papers/repvgg.md:51 +msgid "" +"`${CFG_PATH}` is the config file, `${SRC_CKPT_PATH}` is the source chenpoint file, `${TARGET_CKPT_PATH}` is " +"the target deploy weight file path." +msgstr "" + +#: ../../papers/replknet.md:47 ../../papers/repmlp.md:43 ../../papers/repvgg.md:53 +msgid "To use reparameterized weights, the config file must switch to the deploy config files." +msgstr "" + +#: ../../papers/replknet.md:53 ../../papers/repmlp.md:49 ../../papers/repvgg.md:59 +msgid "In the code" +msgstr "" + +#: ../../papers/replknet.md:55 ../../papers/repmlp.md:51 ../../papers/repvgg.md:61 +msgid "" +"Use `backbone.switch_to_deploy()` or `classificer.backbone.switch_to_deploy()` to switch to the deploy " +"mode. For example:" +msgstr "" + +#: ../../papers/replknet.md:65 ../../papers/repmlp.md:61 ../../papers/repvgg.md:71 +msgid "or" +msgstr "" + +#: ../../papers/repmlp.md:4 +msgid "RepMLP" +msgstr "" + +#: ../../papers/repmlp.md:6 +msgid "" +"[RepMLP: Re-parameterizing Convolutions into Fully-connected Layers forImage Recognition](https://arxiv.org/" +"abs/2105.01883)" +msgstr "" + +#: ../../papers/repmlp.md:12 +#, python-format +msgid "" +"We propose RepMLP, a multi-layer-perceptron-style neural network building block for image recognition, " +"which is composed of a series of fully-connected (FC) layers. Compared to convolutional layers, FC layers " +"are more efficient, better at modeling the long-range dependencies and positional patterns, but worse at " +"capturing the local structures, hence usually less favored for image recognition. We propose a structural " +"re-parameterization technique that adds local prior into an FC to make it powerful for image recognition. " +"Specifically, we construct convolutional layers inside a RepMLP during training and merge them into the FC " +"for inference. On CIFAR, a simple pure-MLP model shows performance very close to CNN. By inserting RepMLP " +"in traditional CNN, we improve ResNets by 1.8% accuracy on ImageNet, 2.9% for face recognition, and 2.3% " +"mIoU on Cityscapes with lower FLOPs. Our intriguing findings highlight that combining the global " +"representational capacity and positional perception of FC with the local prior of convolution can improve " +"the performance of neural network with faster speed on both the tasks with translation invariance (e.g., " +"semantic segmentation) and those with aligned images and positional patterns (e.g., face recognition)." +msgstr "" + +#: ../../papers/repmlp.md +msgid "RepMLP-B224\\*" +msgstr "" + +#: ../../papers/repmlp.md +msgid "68.24" +msgstr "" + +#: ../../papers/repmlp.md +msgid "6.71" +msgstr "" + +#: ../../papers/repmlp.md +msgid "80.41" +msgstr "" + +#: ../../papers/repmlp.md +msgid "95.12" +msgstr "" + +#: ../../papers/repmlp.md +msgid "" +"[train_cfg](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/repmlp/repmlp-base_8xb64_in1k." +"py) | [deploy_cfg](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/repmlp/repmlp-" +"base_delopy_8xb64_in1k.py)" +msgstr "" + +#: ../../papers/repmlp.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/repmlp/repmlp-" +"base_3rdparty_8xb64_in1k_20220330-1cb1f11b.pth)" +msgstr "" + +#: ../../papers/repmlp.md +msgid "RepMLP-B256\\*" +msgstr "" + +#: ../../papers/repmlp.md +msgid "96.45" +msgstr "" + +#: ../../papers/repmlp.md +msgid "9.69" +msgstr "" + +#: ../../papers/repmlp.md +msgid "81.11" +msgstr "" + +#: ../../papers/repmlp.md +msgid "95.5" +msgstr "" + +#: ../../papers/repmlp.md +msgid "" +"[train_cfg](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/repmlp/repmlp-" +"base_8xb64_in1k-256px.py) | [deploy_cfg](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/" +"repmlp/repmlp-base_deploy_8xb64_in1k-256px.py)" +msgstr "" + +#: ../../papers/repmlp.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/repmlp/repmlp-" +"base_3rdparty_8xb64_in1k-256px_20220330-7c5a91ce.pth)" +msgstr "" + +#: ../../papers/repmlp.md:27 +msgid "" +"*Models with * are converted from [the official repo.](https://github.com/DingXiaoH/RepMLP). The config " +"files of these models are only for validation. We don't ensure these config files' training accuracy and " +"welcome you to contribute your reproduction results.*" +msgstr "" + +#: ../../papers/repvgg.md:4 +msgid "RepVGG" +msgstr "" + +#: ../../papers/repvgg.md:6 +msgid "[Repvgg: Making vgg-style convnets great again](https://arxiv.org/abs/2101.03697)" +msgstr "" + +#: ../../papers/repvgg.md:12 +#, python-format +msgid "" +"We present a simple but powerful architecture of convolutional neural network, which has a VGG-like " +"inference-time body composed of nothing but a stack of 3x3 convolution and ReLU, while the training-time " +"model has a multi-branch topology. Such decoupling of the training-time and inference-time architecture is " +"realized by a structural re-parameterization technique so that the model is named RepVGG. On ImageNet, " +"RepVGG reaches over 80% top-1 accuracy, which is the first time for a plain model, to the best of our " +"knowledge. On NVIDIA 1080Ti GPU, RepVGG models run 83% faster than ResNet-50 or 101% faster than ResNet-101 " +"with higher accuracy and show favorable accuracy-speed trade-off compared to the state-of-the-art models " +"like EfficientNet and RegNet." +msgstr "" + +#: ../../papers/repvgg.md +msgid "Epochs" +msgstr "" + +#: ../../papers/repvgg.md +msgid "RepVGG-A0\\*" +msgstr "" + +#: ../../papers/repvgg.md +msgid "120" +msgstr "" + +#: ../../papers/repvgg.md +msgid "9.11(train) | 8.31 (deploy)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "1.52 (train) | 1.36 (deploy)" +msgstr "" + +#: ../../papers/repvgg.md ../../papers/vgg.md +msgid "72.41" +msgstr "" + +#: ../../papers/repvgg.md +msgid "90.50" +msgstr "" + +#: ../../papers/repvgg.md +msgid "" +"[config (train)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/repvgg/repvgg-A0_4xb64-" +"coslr-120e_in1k.py) | [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/" +"repvgg/deploy/repvgg-A0_deploy_4xb64-coslr-120e_in1k.py)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A0_3rdparty_4xb64-" +"coslr-120e_in1k_20210909-883ab98c.pth)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "RepVGG-A1\\*" +msgstr "" + +#: ../../papers/repvgg.md +msgid "14.09 (train) | 12.79 (deploy)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "2.64 (train) | 2.37 (deploy)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "74.47" +msgstr "" + +#: ../../papers/repvgg.md +msgid "91.85" +msgstr "" + +#: ../../papers/repvgg.md +msgid "" +"[config (train)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/repvgg/repvgg-A1_4xb64-" +"coslr-120e_in1k.py) | [config (deploy)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/" +"repvgg/deploy/repvgg-A1_deploy_4xb64-coslr-120e_in1k.py)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A1_3rdparty_4xb64-" +"coslr-120e_in1k_20210909-24003a24.pth)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "RepVGG-A2\\*" +msgstr "" + +#: ../../papers/repvgg.md +msgid "28.21 (train) | 25.5 (deploy)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "5.7 (train) | 5.12 (deploy)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "76.48" +msgstr "" + +#: ../../papers/repvgg.md +msgid "93.01" +msgstr "" + +#: ../../papers/repvgg.md +msgid "" +"[config (train)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/repvgg/repvgg-A2_4xb64-" +"coslr-120e_in1k.py) |[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/" +"repvgg/deploy/repvgg-A2_deploy_4xb64-coslr-120e_in1k.py)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-A2_3rdparty_4xb64-" +"coslr-120e_in1k_20210909-97d7695a.pth)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "RepVGG-B0\\*" +msgstr "" + +#: ../../papers/repvgg.md +msgid "15.82 (train) | 14.34 (deploy)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "3.42 (train) | 3.06 (deploy)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "75.14" +msgstr "" + +#: ../../papers/repvgg.md +msgid "92.42" +msgstr "" + +#: ../../papers/repvgg.md +msgid "" +"[config (train)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/repvgg/repvgg-B0_4xb64-" +"coslr-120e_in1k.py) |[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/" +"repvgg/deploy/repvgg-B0_deploy_4xb64-coslr-120e_in1k.py)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B0_3rdparty_4xb64-" +"coslr-120e_in1k_20210909-446375f4.pth)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "RepVGG-B1\\*" +msgstr "" + +#: ../../papers/repvgg.md +msgid "57.42 (train) | 51.83 (deploy)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "13.16 (train) | 11.82 (deploy)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "78.37" +msgstr "" + +#: ../../papers/repvgg.md +msgid "94.11" +msgstr "" + +#: ../../papers/repvgg.md +msgid "" +"[config (train)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/repvgg/repvgg-B1_4xb64-" +"coslr-120e_in1k.py) |[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/" +"repvgg/deploy/repvgg-B1_deploy_4xb64-coslr-120e_in1k.py)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1_3rdparty_4xb64-" +"coslr-120e_in1k_20210909-750cdf67.pth)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "RepVGG-B1g2\\*" +msgstr "" + +#: ../../papers/repvgg.md +msgid "45.78 (train) | 41.36 (deploy)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "9.82 (train) | 8.82 (deploy)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "77.79" +msgstr "" + +#: ../../papers/repvgg.md +msgid "93.88" +msgstr "" + +#: ../../papers/repvgg.md +msgid "" +"[config (train)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/repvgg/repvgg-B1g2_4xb64-" +"coslr-120e_in1k.py) |[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/" +"repvgg/deploy/repvgg-B1g2_deploy_4xb64-coslr-120e_in1k.py)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1g2_3rdparty_4xb64-" +"coslr-120e_in1k_20210909-344f6422.pth)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "RepVGG-B1g4\\*" +msgstr "" + +#: ../../papers/repvgg.md +msgid "39.97 (train) | 36.13 (deploy)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "8.15 (train) | 7.32 (deploy)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "77.58" +msgstr "" + +#: ../../papers/repvgg.md ../../papers/seresnet.md +msgid "93.84" +msgstr "" + +#: ../../papers/repvgg.md +msgid "" +"[config (train)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/repvgg/repvgg-B1g4_4xb64-" +"coslr-120e_in1k.py) |[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/" +"repvgg/deploy/repvgg-B1g4_deploy_4xb64-coslr-120e_in1k.py)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B1g4_3rdparty_4xb64-" +"coslr-120e_in1k_20210909-d4c1a642.pth)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "RepVGG-B2\\*" +msgstr "" + +#: ../../papers/repvgg.md +msgid "89.02 (train) | 80.32 (deploy)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "20.46 (train) | 18.39 (deploy)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "78.78" +msgstr "" + +#: ../../papers/repvgg.md +msgid "" +"[config (train)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/repvgg/repvgg-B2_4xb64-" +"coslr-120e_in1k.py) |[config (deploy)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/" +"repvgg/deploy/repvgg-B2_deploy_4xb64-coslr-120e_in1k.py)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B2_3rdparty_4xb64-" +"coslr-120e_in1k_20210909-bd6b937c.pth)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "RepVGG-B2g4\\*" +msgstr "" + +#: ../../papers/repvgg.md +msgid "200" +msgstr "" + +#: ../../papers/repvgg.md +msgid "61.76 (train) | 55.78 (deploy)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "12.63 (train) | 11.34 (deploy)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "" +"[config (train)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/repvgg/repvgg-B2g4_4xb64-" +"autoaug-lbs-mixup-coslr-200e_in1k.py) |[config (deploy)](https://github.com/open-mmlab/mmclassification/" +"blob/1.x/configs/repvgg/deploy/repvgg-B2g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B2g4_3rdparty_4xb64-autoaug-lbs-" +"mixup-coslr-200e_in1k_20210909-7b7955f0.pth)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "RepVGG-B3\\*" +msgstr "" + +#: ../../papers/repvgg.md +msgid "123.09 (train) | 110.96 (deploy)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "29.17 (train) | 26.22 (deploy)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "80.52" +msgstr "" + +#: ../../papers/repvgg.md +msgid "95.26" +msgstr "" + +#: ../../papers/repvgg.md +msgid "" +"[config (train)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/repvgg/repvgg-B3_4xb64-" +"autoaug-lbs-mixup-coslr-200e_in1k.py) |[config (deploy)](https://github.com/open-mmlab/mmclassification/" +"blob/1.x/configs/repvgg/deploy/repvgg-B3_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B3_3rdparty_4xb64-autoaug-lbs-" +"mixup-coslr-200e_in1k_20210909-dda968bf.pth)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "RepVGG-B3g4\\*" +msgstr "" + +#: ../../papers/repvgg.md +msgid "83.83 (train) | 75.63 (deploy)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "17.9 (train) | 16.08 (deploy)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "80.22" +msgstr "" + +#: ../../papers/repvgg.md +msgid "95.10" +msgstr "" + +#: ../../papers/repvgg.md +msgid "" +"[config (train)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/repvgg/repvgg-B3g4_4xb64-" +"autoaug-lbs-mixup-coslr-200e_in1k.py) |[config (deploy)](https://github.com/open-mmlab/mmclassification/" +"blob/1.x/configs/repvgg/deploy/repvgg-B3g4_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-B3g4_3rdparty_4xb64-autoaug-lbs-" +"mixup-coslr-200e_in1k_20210909-4e54846a.pth)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "RepVGG-D2se\\*" +msgstr "" + +#: ../../papers/repvgg.md +msgid "133.33 (train) | 120.39 (deploy)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "36.56 (train) | 32.85 (deploy)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "95.94" +msgstr "" + +#: ../../papers/repvgg.md +msgid "" +"[config (train)](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/repvgg/repvgg-D2se_4xb64-" +"autoaug-lbs-mixup-coslr-200e_in1k.py) |[config (deploy)](https://github.com/open-mmlab/mmclassification/" +"blob/1.x/configs/repvgg/deploy/repvgg-D2se_deploy_4xb64-autoaug-lbs-mixup-coslr-200e_in1k.py)" +msgstr "" + +#: ../../papers/repvgg.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/repvgg/repvgg-D2se_3rdparty_4xb64-autoaug-lbs-" +"mixup-coslr-200e_in1k_20210909-cf3139b7.pth)" +msgstr "" + +#: ../../papers/res2net.md:4 +msgid "Res2Net" +msgstr "" + +#: ../../papers/res2net.md:6 +msgid "[Res2Net: A New Multi-scale Backbone Architecture](https://arxiv.org/pdf/1904.01169.pdf)" +msgstr "" + +#: ../../papers/res2net.md:12 +msgid "" +"Representing features at multiple scales is of great importance for numerous vision tasks. Recent advances " +"in backbone convolutional neural networks (CNNs) continually demonstrate stronger multi-scale " +"representation ability, leading to consistent performance gains on a wide range of applications. However, " +"most existing methods represent the multi-scale features in a layer-wise manner. In this paper, we propose " +"a novel building block for CNNs, namely Res2Net, by constructing hierarchical residual-like connections " +"within one single residual block. The Res2Net represents multi-scale features at a granular level and " +"increases the range of receptive fields for each network layer. The proposed Res2Net block can be plugged " +"into the state-of-the-art backbone CNN models, e.g., ResNet, ResNeXt, and DLA. We evaluate the Res2Net " +"block on all these models and demonstrate consistent performance gains over baseline models on widely-used " +"datasets, e.g., CIFAR-100 and ImageNet. Further ablation studies and experimental results on representative " +"computer vision tasks, i.e., object detection, class activation mapping, and salient object detection, " +"further verify the superiority of the Res2Net over the state-of-the-art baseline methods." +msgstr "" + +#: ../../papers/res2net.md +msgid "Res2Net-50-14w-8s\\*" +msgstr "" + +#: ../../papers/res2net.md +msgid "25.06" +msgstr "" + +#: ../../papers/res2net.md +msgid "4.22" +msgstr "" + +#: ../../papers/res2net.md +msgid "78.14" +msgstr "" + +#: ../../papers/res2net.md +msgid "93.85" +msgstr "" + +#: ../../papers/res2net.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/res2net/res2net50-w14-" +"s8_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/res2net.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/res2net/res2net50-w14-" +"s8_3rdparty_8xb32_in1k_20210927-bc967bf1.pth)" +msgstr "" + +#: ../../papers/res2net.md +msgid "Res2Net-50-26w-8s\\*" +msgstr "" + +#: ../../papers/res2net.md +msgid "48.40" +msgstr "" + +#: ../../papers/res2net.md +msgid "8.39" +msgstr "" + +#: ../../papers/res2net.md +msgid "94.36" +msgstr "" + +#: ../../papers/res2net.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/res2net/res2net50-w26-" +"s8_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/res2net.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/res2net/res2net50-w26-" +"s8_3rdparty_8xb32_in1k_20210927-f547a94b.pth)" +msgstr "" + +#: ../../papers/res2net.md +msgid "Res2Net-101-26w-4s\\*" +msgstr "" + +#: ../../papers/res2net.md +msgid "45.21" +msgstr "" + +#: ../../papers/res2net.md +msgid "8.12" +msgstr "" + +#: ../../papers/res2net.md +msgid "79.19" +msgstr "" + +#: ../../papers/res2net.md +msgid "94.44" +msgstr "" + +#: ../../papers/res2net.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/res2net/res2net101-w26-" +"s4_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/res2net.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/res2net/res2net101-w26-" +"s4_3rdparty_8xb32_in1k_20210927-870b6c36.pth)" +msgstr "" + +#: ../../papers/res2net.md:28 +msgid "" +"*Models with * are converted from the [official repo](https://github.com/Res2Net/Res2Net-PretrainedModels). " +"The config files of these models are only for validation. We don't ensure these config files' training " +"accuracy and welcome you to contribute your reproduction results.*" +msgstr "" + +#: ../../papers/resnet.md:4 +msgid "ResNet" +msgstr "" + +#: ../../papers/resnet.md:6 +msgid "" +"[Deep Residual Learning for Image Recognition](https://openaccess.thecvf.com/content_cvpr_2016/html/" +"He_Deep_Residual_Learning_CVPR_2016_paper.html)" +msgstr "" + +#: ../../papers/resnet.md:12 +msgid "" +"**Residual Networks**, or **ResNets**, learn residual functions with reference to the layer inputs, instead " +"of learning unreferenced functions. In the mainstream previous works, like VGG, the neural networks are a " +"stack of layers and every layer attempts to fit a desired underlying mapping. In ResNets, a few stacked " +"layers are grouped as a block, and the layers in a block attempts to learn a residual mapping." +msgstr "" + +#: ../../papers/resnet.md:17 +msgid "" +"Formally, denoting the desired underlying mapping of a block as $\\mathcal{H}(x)$, split the underlying " +"mapping into the sum of the identity and the residual mapping as $\\mathcal{H}(x) = x + \\mathcal{F}(x)$, " +"and let the stacked non-linear layers fit the residual mapping $\\mathcal{F}(x)$." +msgstr "" + +#: ../../papers/resnet.md:21 +msgid "" +"Many works proved this method makes deep neural networks easier to optimize, and can gain accuracy from " +"considerably increased depth. Recently, the residual structure is widely used in various models." +msgstr "" + +#: ../../papers/resnet.md:37 +#, python-format +msgid "" +"The depth of representations is of central importance for many visual recognition tasks. Solely due to our " +"extremely deep representations, we obtain a 28% relative improvement on the COCO object detection dataset. " +"Deep residual nets are foundations of our submissions to ILSVRC & COCO 2015 competitions, where we also won " +"the 1st places on the tasks of ImageNet detection, ImageNet localization, COCO detection, and COCO " +"segmentation.
" +msgstr "" + +#: ../../papers/resnet.md:92 +msgid "" +"For more configurable parameters, please refer to the [API](https://mmclassification.readthedocs.io/en/1.x/" +"api/generated/mmcls.models.backbones.ResNet.html#mmcls.models.backbones.ResNet)." +msgstr "" + +#: ../../papers/resnet.md:96 ../../papers/swin_transformer.md:88 ../../papers/swin_transformer_v2.md:98 +#: ../../papers/vision_transformer.md:99 +msgid "" +"The pre-trained models on ImageNet-21k are used to fine-tune, and therefore don't have evaluation results." +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "ResNet-50-mill" +msgstr "" + +#: ../../papers/resnet.md:76 ../../papers/swin_transformer.md:66 ../../papers/swin_transformer_v2.md:76 +msgid "15.14" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_3rdparty-mill_in21k_20220331-" +"faac000b.pth)" +msgstr "" + +#: ../../papers/resnet.md:102 +msgid "" +"*The \"mill\" means using the mutil-label pretrain weight from [ImageNet-21K Pretraining for the Masses]" +"(https://github.com/Alibaba-MIIL/ImageNet21K).*" +msgstr "" + +#: ../../papers/resnet.md:104 +msgid "Cifar10" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "ResNet-18" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "11.17" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "0.56" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "94.82" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "99.87" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/resnet18_8xb16_cifar10.py)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8." +"pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-" +"bd6371c8.log.json)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "ResNet-34" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "21.28" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "1.16" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/resnet34_8xb16_cifar10.py)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_b16x8_cifar10_20210528-a8aa36a6." +"pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_b16x8_cifar10_20210528-" +"a8aa36a6.log.json)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "ResNet-50" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "1.31" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "95.55" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "99.91" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/resnet50_8xb16_cifar10.py)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar10_20210528-f54bfad9." +"pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar10_20210528-" +"f54bfad9.log.json)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "ResNet-101" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "42.51" +msgstr "" + +#: ../../papers/resnet.md:76 ../../papers/van.md +msgid "2.52" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "95.58" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/resnet101_8xb16_cifar10.py)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_b16x8_cifar10_20210528-2d29e936." +"pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/" +"resnet101_b16x8_cifar10_20210528-2d29e936.log.json)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "ResNet-152" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "58.16" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "3.74" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "95.76" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "99.89" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/resnet152_8xb16_cifar10.py)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_b16x8_cifar10_20210528-3e8e9178." +"pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/" +"resnet152_b16x8_cifar10_20210528-3e8e9178.log.json)" +msgstr "" + +#: ../../papers/resnet.md:114 +msgid "Cifar100" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "23.71" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "79.90" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "95.19" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/resnet50_8xb16_cifar100.py)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar100_20210528-67b58a1b." +"pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/" +"resnet50_b16x8_cifar100_20210528-67b58a1b.log.json)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "11.69" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "1.82" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "69.90" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "89.43" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/resnet18_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_8xb32_in1k_20210831-fbbb1da6." +"pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_8xb32_in1k_20210831-" +"fbbb1da6.log.json)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "21.8" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "3.68" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "73.62" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "91.59" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/resnet34_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_8xb32_in1k_20210831-f257d4e6." +"pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_8xb32_in1k_20210831-" +"f257d4e6.log.json)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "25.56" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "76.55" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "93.06" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/resnet50_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc." +"pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-" +"ea4938fc.log.json)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "44.55" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "7.85" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "77.97" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "94.06" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/resnet101_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_8xb32_in1k_20210831-539c63f8." +"pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/" +"resnet101_8xb32_in1k_20210831-539c63f8.log.json)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "60.19" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "11.58" +msgstr "" + +#: ../../papers/resnet.md:76 ../../papers/wrn.md +msgid "78.48" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "94.13" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/resnet152_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_8xb32_in1k_20210901-4d7582fa." +"pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/" +"resnet152_8xb32_in1k_20210901-4d7582fa.log.json)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "ResNetV1C-50" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "25.58" +msgstr "" + +#: ../../papers/resnet.md:76 ../../papers/swin_transformer.md:66 +msgid "4.36" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "77.01" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/resnetv1c50_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c50_8xb32_in1k_20220214-3343eccd." +"pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/" +"resnetv1c50_8xb32_in1k_20220214-3343eccd.log.json)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "ResNetV1C-101" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "8.09" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "78.30" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "94.27" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/resnetv1c101_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c101_8xb32_in1k_20220214-434fe45f." +"pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/" +"resnetv1c101_8xb32_in1k_20220214-434fe45f.log.json)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "ResNetV1C-152" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "60.21" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "11.82" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "78.76" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "94.41" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/resnetv1c152_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c152_8xb32_in1k_20220214-c013291f." +"pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c152_8xb32_in1k_20220214-" +"c013291f.log.json)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "ResNetV1D-50" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "77.54" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "93.57" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/resnetv1d50_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d50_b32x8_imagenet_20210531-" +"db14775a.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/" +"resnetv1d50_b32x8_imagenet_20210531-db14775a.log.json)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "ResNetV1D-101" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "78.93" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "94.48" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/resnetv1d101_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/resnet/" +"resnetv1d101_b32x8_imagenet_20210531-6e13bcd3.pth) | [log](https://download.openmmlab.com/mmclassification/" +"v0/resnet/resnetv1d101_b32x8_imagenet_20210531-6e13bcd3.log.json)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "ResNetV1D-152" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "94.70" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/resnetv1d152_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/resnet/" +"resnetv1d152_b32x8_imagenet_20210531-278cf22a.pth) | [log](https://download.openmmlab.com/mmclassification/" +"v0/resnet/resnetv1d152_b32x8_imagenet_20210531-278cf22a.log.json)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "ResNet-50 (fp16)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "76.30" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "93.07" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/resnet50_8xb32-fp16_in1k.py)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/fp16/resnet50_batch256_fp16_imagenet_20210320-" +"b3964210.pth) | [log](https://download.openmmlab.com/mmclassification/v0/fp16/" +"resnet50_batch256_fp16_imagenet_20210320-b3964210.log.json)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "Wide-ResNet-50\\*" +msgstr "" + +#: ../../papers/resnet.md:76 ../../papers/wrn.md +msgid "68.88" +msgstr "" + +#: ../../papers/resnet.md:76 ../../papers/wrn.md +msgid "11.44" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/../wrn/wide-" +"resnet50_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/resnet/wide-" +"resnet50_3rdparty_8xb32_in1k_20220304-66678344.pth)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "Wide-ResNet-101\\*" +msgstr "" + +#: ../../papers/resnet.md:76 ../../papers/wrn.md +msgid "126.89" +msgstr "" + +#: ../../papers/resnet.md:76 ../../papers/wrn.md +msgid "22.81" +msgstr "" + +#: ../../papers/resnet.md:76 ../../papers/wrn.md +msgid "78.84" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/../wrn/wide-" +"resnet101_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/resnet/wide-" +"resnet101_3rdparty_8xb32_in1k_20220304-8d5f9d61.pth)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "ResNet-50 (rsb-a1)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "80.12" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/resnet50_8xb256-rsb-" +"a1-600e_in1k.py)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-" +"a1-600e_in1k_20211228-20e21305.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/" +"resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.log.json)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "ResNet-50 (rsb-a2)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/resnet50_8xb256-rsb-" +"a2-300e_in1k.py)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-" +"a2-300e_in1k_20211228-0fd8be6e.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/" +"resnet50_8xb256-rsb-a2-300e_in1k_20211228-0fd8be6e.log.json)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "ResNet-50 (rsb-a3)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "93.80" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/resnet50_8xb256-rsb-" +"a3-100e_in1k.py)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-" +"a3-100e_in1k_20211228-3493673c.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnet/" +"resnet50_8xb256-rsb-a3-100e_in1k_20211228-3493673c.log.json)" +msgstr "" + +#: ../../papers/resnet.md:142 +msgid "" +"*The \"rsb\" means using the training settings from [ResNet strikes back: An improved training procedure in " +"timm](https://arxiv.org/abs/2110.00476).*" +msgstr "" + +#: ../../papers/resnet.md:144 +msgid "" +"*Models with * are converted from the [official repo](https://github.com/pytorch/vision). The config files " +"of these models are only for validation. We don't ensure these config files' training accuracy and welcome " +"you to contribute your reproduction results.*" +msgstr "" + +#: ../../papers/resnet.md:146 ../../papers/swin_transformer.md:114 +msgid "CUB-200-2011" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[ImageNet-21k-mill](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_3rdparty-" +"mill_in21k_20220331-faac000b.pth)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "448x448" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "23.92" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "16.48" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "88.45" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnet/resnet50_8xb8_cub.py)" +msgstr "" + +#: ../../papers/resnet.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb8_cub_20220307-57840e60.pth) " +"| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb8_cub_20220307-57840e60.log." +"json)" +msgstr "" + +#: ../../papers/resnext.md:4 +msgid "ResNeXt" +msgstr "" + +#: ../../papers/resnext.md:6 +msgid "" +"[Aggregated Residual Transformations for Deep Neural Networks](https://openaccess.thecvf.com/" +"content_cvpr_2017/html/Xie_Aggregated_Residual_Transformations_CVPR_2017_paper.html)" +msgstr "" + +#: ../../papers/resnext.md:12 +msgid "" +"We present a simple, highly modularized network architecture for image classification. Our network is " +"constructed by repeating a building block that aggregates a set of transformations with the same topology. " +"Our simple design results in a homogeneous, multi-branch architecture that has only a few hyper-parameters " +"to set. This strategy exposes a new dimension, which we call \"cardinality\" (the size of the set of " +"transformations), as an essential factor in addition to the dimensions of depth and width. On the " +"ImageNet-1K dataset, we empirically show that even under the restricted condition of maintaining " +"complexity, increasing cardinality is able to improve classification accuracy. Moreover, increasing " +"cardinality is more effective than going deeper or wider when we increase the capacity. Our models, named " +"ResNeXt, are the foundations of our entry to the ILSVRC 2016 classification task in which we secured 2nd " +"place. We further investigate ResNeXt on an ImageNet-5K set and the COCO detection set, also showing better " +"results than its ResNet counterpart. The code and models are publicly available online." +msgstr "" + +#: ../../papers/resnext.md +msgid "ResNeXt-32x4d-50" +msgstr "" + +#: ../../papers/resnext.md +msgid "25.03" +msgstr "" + +#: ../../papers/resnext.md +msgid "4.27" +msgstr "" + +#: ../../papers/resnext.md +msgid "77.90" +msgstr "" + +#: ../../papers/resnext.md +msgid "93.66" +msgstr "" + +#: ../../papers/resnext.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnext/resnext50-32x4d_8xb32_in1k." +"py)" +msgstr "" + +#: ../../papers/resnext.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/resnext/" +"resnext50_32x4d_b32x8_imagenet_20210429-56066e27.pth) | [log](https://download.openmmlab.com/" +"mmclassification/v0/resnext/resnext50_32x4d_b32x8_imagenet_20210429-56066e27.log.json)" +msgstr "" + +#: ../../papers/resnext.md +msgid "ResNeXt-32x4d-101" +msgstr "" + +#: ../../papers/resnext.md +msgid "44.18" +msgstr "" + +#: ../../papers/resnext.md +msgid "78.61" +msgstr "" + +#: ../../papers/resnext.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnext/" +"resnext101-32x4d_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/resnext.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x4d_b32x8_imagenet_20210506-" +"e0fa3dd5.pth) | [log](https://download.openmmlab.com/mmclassification/v0/resnext/" +"resnext101_32x4d_b32x8_imagenet_20210506-e0fa3dd5.log.json)" +msgstr "" + +#: ../../papers/resnext.md +msgid "ResNeXt-32x8d-101" +msgstr "" + +#: ../../papers/resnext.md +msgid "88.79" +msgstr "" + +#: ../../papers/resnext.md +msgid "16.5" +msgstr "" + +#: ../../papers/resnext.md +msgid "79.27" +msgstr "" + +#: ../../papers/resnext.md +msgid "94.58" +msgstr "" + +#: ../../papers/resnext.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnext/" +"resnext101-32x8d_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/resnext.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/resnext/" +"resnext101_32x8d_b32x8_imagenet_20210506-23a247d5.pth) | [log](https://download.openmmlab.com/" +"mmclassification/v0/resnext/resnext101_32x8d_b32x8_imagenet_20210506-23a247d5.log.json)" +msgstr "" + +#: ../../papers/resnext.md +msgid "ResNeXt-32x4d-152" +msgstr "" + +#: ../../papers/resnext.md +msgid "59.95" +msgstr "" + +#: ../../papers/resnext.md +msgid "11.8" +msgstr "" + +#: ../../papers/resnext.md +msgid "94.33" +msgstr "" + +#: ../../papers/resnext.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/resnext/" +"resnext152-32x4d_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/resnext.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/resnext/" +"resnext152_32x4d_b32x8_imagenet_20210524-927787be.pth) | [log](https://download.openmmlab.com/" +"mmclassification/v0/resnext/resnext152_32x4d_b32x8_imagenet_20210524-927787be.log.json)" +msgstr "" + +#: ../../papers/seresnet.md:4 +msgid "SE-ResNet" +msgstr "" + +#: ../../papers/seresnet.md:6 +msgid "" +"[Squeeze-and-Excitation Networks](https://openaccess.thecvf.com/content_cvpr_2018/html/Hu_Squeeze-and-" +"Excitation_Networks_CVPR_2018_paper.html)" +msgstr "" + +#: ../../papers/seresnet.md:12 +msgid "" +"The central building block of convolutional neural networks (CNNs) is the convolution operator, which " +"enables networks to construct informative features by fusing both spatial and channel-wise information " +"within local receptive fields at each layer. A broad range of prior research has investigated the spatial " +"component of this relationship, seeking to strengthen the representational power of a CNN by enhancing the " +"quality of spatial encodings throughout its feature hierarchy. In this work, we focus instead on the " +"channel relationship and propose a novel architectural unit, which we term the \"Squeeze-and-Excitation" +"\" (SE) block, that adaptively recalibrates channel-wise feature responses by explicitly modelling " +"interdependencies between channels. We show that these blocks can be stacked together to form SENet " +"architectures that generalise extremely effectively across different datasets. We further demonstrate that " +"SE blocks bring significant improvements in performance for existing state-of-the-art CNNs at slight " +"additional computational cost. Squeeze-and-Excitation Networks formed the foundation of our ILSVRC 2017 " +"classification submission which won first place and reduced the top-5 error to 2.251%, surpassing the " +"winning entry of 2016 by a relative improvement of ~25%." +msgstr "" + +#: ../../papers/seresnet.md +msgid "SE-ResNet-50" +msgstr "" + +#: ../../papers/seresnet.md +msgid "28.09" +msgstr "" + +#: ../../papers/seresnet.md +msgid "4.13" +msgstr "" + +#: ../../papers/seresnet.md +msgid "77.74" +msgstr "" + +#: ../../papers/seresnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/seresnet/seresnet50_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/seresnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet50_batch256_imagenet_20200804-" +"ae206104.pth) | [log](https://download.openmmlab.com/mmclassification/v0/se-resnet/se-" +"resnet50_batch256_imagenet_20200708-657b3c36.log.json)" +msgstr "" + +#: ../../papers/seresnet.md +msgid "SE-ResNet-101" +msgstr "" + +#: ../../papers/seresnet.md +msgid "49.33" +msgstr "" + +#: ../../papers/seresnet.md +msgid "7.86" +msgstr "" + +#: ../../papers/seresnet.md +msgid "94.07" +msgstr "" + +#: ../../papers/seresnet.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/seresnet/seresnet101_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/seresnet.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/se-resnet/se-" +"resnet101_batch256_imagenet_20200804-ba5b51d4.pth) | [log](https://download.openmmlab.com/mmclassification/" +"v0/se-resnet/se-resnet101_batch256_imagenet_20200708-038a4d04.log.json)" +msgstr "" + +#: ../../papers/shufflenet_v1.md:4 +msgid "ShuffleNet V1" +msgstr "" + +#: ../../papers/shufflenet_v1.md:6 +msgid "" +"[ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices](https://openaccess." +"thecvf.com/content_cvpr_2018/html/Zhang_ShuffleNet_An_Extremely_CVPR_2018_paper.html)" +msgstr "" + +#: ../../papers/shufflenet_v1.md:12 +msgid "" +"We introduce an extremely computation-efficient CNN architecture named ShuffleNet, which is designed " +"specially for mobile devices with very limited computing power (e.g., 10-150 MFLOPs). The new architecture " +"utilizes two new operations, pointwise group convolution and channel shuffle, to greatly reduce computation " +"cost while maintaining accuracy. Experiments on ImageNet classification and MS COCO object detection " +"demonstrate the superior performance of ShuffleNet over other structures, e.g. lower top-1 error (absolute " +"7.8%) than recent MobileNet on ImageNet classification task, under the computation budget of 40 MFLOPs. On " +"an ARM-based mobile device, ShuffleNet achieves ~13x actual speedup over AlexNet while maintaining " +"comparable accuracy." +msgstr "" + +#: ../../papers/shufflenet_v1.md +msgid "ShuffleNetV1 1.0x (group=3)" +msgstr "" + +#: ../../papers/shufflenet_v1.md +msgid "0.146" +msgstr "" + +#: ../../papers/shufflenet_v1.md +msgid "68.13" +msgstr "" + +#: ../../papers/shufflenet_v1.md +msgid "87.81" +msgstr "" + +#: ../../papers/shufflenet_v1.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/shufflenet_v1/shufflenet-" +"v1-1x_16xb64_in1k.py)" +msgstr "" + +#: ../../papers/shufflenet_v1.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/shufflenet_v1/" +"shufflenet_v1_batch1024_imagenet_20200804-5d6cec73.pth) | [log](https://download.openmmlab.com/" +"mmclassification/v0/shufflenet_v1/shufflenet_v1_batch1024_imagenet_20200804-5d6cec73.log.json)" +msgstr "" + +#: ../../papers/shufflenet_v2.md:4 +msgid "ShuffleNet V2" +msgstr "" + +#: ../../papers/shufflenet_v2.md:6 +msgid "" +"[Shufflenet v2: Practical guidelines for efficient cnn architecture design](https://openaccess.thecvf.com/" +"content_ECCV_2018/papers/Ningning_Light-weight_CNN_Architecture_ECCV_2018_paper.pdf)" +msgstr "" + +#: ../../papers/shufflenet_v2.md:12 +msgid "" +"Currently, the neural network architecture design is mostly guided by the *indirect* metric of computation " +"complexity, i.e., FLOPs. However, the *direct* metric, e.g., speed, also depends on the other factors such " +"as memory access cost and platform characterics. Thus, this work proposes to evaluate the direct metric on " +"the target platform, beyond only considering FLOPs. Based on a series of controlled experiments, this work " +"derives several practical *guidelines* for efficient network design. Accordingly, a new architecture is " +"presented, called *ShuffleNet V2*. Comprehensive ablation experiments verify that our model is the state-of-" +"the-art in terms of speed and accuracy tradeoff." +msgstr "" + +#: ../../papers/shufflenet_v2.md +msgid "ShuffleNetV2 1.0x" +msgstr "" + +#: ../../papers/shufflenet_v2.md +msgid "2.28" +msgstr "" + +#: ../../papers/shufflenet_v2.md +msgid "0.149" +msgstr "" + +#: ../../papers/shufflenet_v2.md +msgid "69.55" +msgstr "" + +#: ../../papers/shufflenet_v2.md +msgid "88.92" +msgstr "" + +#: ../../papers/shufflenet_v2.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/shufflenet_v2/shufflenet-" +"v2-1x_16xb64_in1k.py)" +msgstr "" + +#: ../../papers/shufflenet_v2.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/shufflenet_v2/" +"shufflenet_v2_batch1024_imagenet_20200812-5bf4721e.pth) | [log](https://download.openmmlab.com/" +"mmclassification/v0/shufflenet_v2/shufflenet_v2_batch1024_imagenet_20200804-8860eec9.log.json)" +msgstr "" + +#: ../../papers/swin_transformer.md:4 +msgid "Swin Transformer" +msgstr "" + +#: ../../papers/swin_transformer.md:6 +msgid "" +"[Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/pdf/2103.14030." +"pdf)" +msgstr "" + +#: ../../papers/swin_transformer.md:12 +msgid "" +"**Swin Transformer** (the name **Swin** stands for Shifted window) is initially described in [the paper]" +"(https://arxiv.org/pdf/2103.14030.pdf), which capably serves as a general-purpose backbone for computer " +"vision. It is basically a hierarchical Transformer whose representation is computed with shifted windows. " +"The shifted windowing scheme brings greater efficiency by limiting self-attention computation to non-" +"overlapping local windows while also allowing for cross-window connection." +msgstr "" + +#: ../../papers/swin_transformer.md:14 +msgid "" +"Swin Transformer achieves strong performance on COCO object detection (58.7 box AP and 51.1 mask AP on test-" +"dev) and ADE20K semantic segmentation (53.5 mIoU on val), surpassing previous models by a large margin." +msgstr "" + +#: ../../papers/swin_transformer.md:82 +msgid "" +"For more configurable parameters, please refer to the [API](https://mmclassification.readthedocs.io/en/1.x/" +"api/generated/mmcls.models.backbones.SwinTransformer.html#mmcls.models.backbones.SwinTransformer)." +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "Swin-B" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin-" +"base_3rdparty_in21k.pth)" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "44.49" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin-" +"base_3rdparty_in21k-384px.pth)" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "Swin-L" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "195.00" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "34.04" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin-" +"large_3rdparty_in21k.pth)" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "195.20" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "100.04" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "Swin-T" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "28.29" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "81.18" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/swin_transformer/swin-" +"tiny_16xb64_in1k.py)" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/" +"swin_tiny_224_b16x64_300e_imagenet_20210616_090925-66df6be6.pth) | [log](https://download.openmmlab.com/" +"mmclassification/v0/swin-transformer/swin_tiny_224_b16x64_300e_imagenet_20210616_090925.log.json)" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "Swin-S" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "49.61" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "8.52" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "83.02" +msgstr "" + +#: ../../papers/swin_transformer.md:66 ../../papers/twins.md +msgid "96.29" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/swin_transformer/swin-" +"small_16xb64_in1k.py)" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/" +"swin_small_224_b16x64_300e_imagenet_20210615_110219-7f9d988b.pth) | [log](https://download.openmmlab.com/" +"mmclassification/v0/swin-transformer/swin_small_224_b16x64_300e_imagenet_20210615_110219.log.json)" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "87.77" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "83.36" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/swin_transformer/swin-" +"base_16xb64_in1k.py)" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/" +"swin_base_224_b16x64_300e_imagenet_20210616_190742-93230b0d.pth) | [log](https://download.openmmlab.com/" +"mmclassification/v0/swin-transformer/swin_base_224_b16x64_300e_imagenet_20210616_190742.log.json)" +msgstr "" + +#: ../../papers/swin_transformer.md:66 ../../papers/swin_transformer_v2.md:76 +msgid "Swin-S\\*" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "83.21" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "96.25" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/" +"swin_small_patch4_window7_224-cc7a01c9.pth)" +msgstr "" + +#: ../../papers/swin_transformer.md:66 ../../papers/swin_transformer_v2.md:76 +msgid "Swin-B\\*" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "83.42" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/" +"swin_base_patch4_window7_224-4670dd19.pth)" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "87.90" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "84.49" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/swin_transformer/swin-" +"base_16xb64_in1k-384px.py)" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/" +"swin_base_patch4_window12_384-02c598a4.pth)" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "85.16" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "97.50" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/" +"swin_base_patch4_window7_224_22kto1k-f967f799.pth)" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "86.44" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "98.05" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/" +"swin_base_patch4_window12_384_22kto1k-d59b0d1d.pth)" +msgstr "" + +#: ../../papers/swin_transformer.md:66 ../../papers/swin_transformer_v2.md:76 +msgid "Swin-L\\*" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "196.53" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "86.24" +msgstr "" + +#: ../../papers/swin_transformer.md:66 ../../papers/swin_transformer_v2.md:76 +msgid "97.88" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/swin_transformer/swin-" +"large_16xb64_in1k.py)" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/" +"swin_large_patch4_window7_224_22kto1k-5f0996db.pth)" +msgstr "" + +#: ../../papers/swin_transformer.md:66 ../../papers/swin_transformer_v2.md:76 +msgid "196.74" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "87.25" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "98.25" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/swin_transformer/swin-" +"large_16xb64_in1k-384px.py)" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/" +"swin_large_patch4_window12_384_22kto1k-0a40944b.pth)" +msgstr "" + +#: ../../papers/swin_transformer.md:112 ../../papers/swin_transformer_v2.md:120 +msgid "" +"*Models with * are converted from the [official repo](https://github.com/microsoft/Swin-Transformer#main-" +"results-on-imagenet-with-pretrained-models). The config files of these models are only for validation. We " +"don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.*" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "" +"[ImageNet-21k](https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin-" +"base_3rdparty_in21k-384px.pth)" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "195.51" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "91.87" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/swin_transformer/swin-" +"large_8xb8_cub_384px.py)" +msgstr "" + +#: ../../papers/swin_transformer.md:66 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin-" +"large_8xb8_cub_384px_20220307-1bbaee6a.pth) | [log](https://download.openmmlab.com/mmclassification/v0/swin-" +"transformer/swin-large_8xb8_cub_384px_20220307-1bbaee6a.log.json)" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:4 +msgid "Swin Transformer V2" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:6 +msgid "[Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883.pdf)" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:12 +msgid "" +"**Swin Transformer V2** is a work on the scale up visual model based on [Swin Transformer](https://github." +"com/open-mmlab/mmclassification/tree/1.x/configs/swin_transformer). In the visual field, We can not " +"increase the performance by just simply scaling up the visual model like NLP models. The possible reasons " +"mentioned in the article are:" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:14 +msgid "Training instability when increasing the vision model" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:15 +msgid "Migrating the model trained at low resolution to a larger scale resolution task" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:16 +msgid "Too mush GPU memory" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:18 +msgid "To solve it, The following method improvements are proposed in the paper:" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:20 +msgid "post normalization: layer normalization after self-attention layer and MLP block" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:21 +msgid "" +"scaled cosine attention approach: use cosine similarity to calculate the relationship between token pairs" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:22 +msgid "log-spaced continuous position bias: redefine relative position encoding" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:36 +msgid "" +"Large-scale NLP models have been shown to significantly improve the performance on language tasks with no " +"signs of saturation. They also demonstrate amazing few-shot capabilities like that of human beings. This " +"paper aims to explore large-scale models in computer vision. We tackle three major issues in training and " +"application of large vision models, including training instability, resolution gaps between pre-training " +"and fine-tuning, and hunger on labelled data. Three main techniques are proposed: 1) a residual-post-norm " +"method combined with cosine attention to improve training stability; 2) A log-spaced continuous position " +"bias method to effectively transfer models pre-trained using low-resolution images to downstream tasks with " +"high-resolution inputs; 3) A self-supervised pre-training method, SimMIM, to reduce the needs of vast " +"labeled images. Through these techniques, this paper successfully trained a 3 billion-parameter Swin " +"Transformer V2 model, which is the largest dense vision model to date, and makes it capable of training " +"with images of up to 1,536×1,536 resolution. It set new performance records on 4 representative vision " +"tasks, including ImageNet-V2 image classification, COCO object detection, ADE20K semantic segmentation, and " +"Kinetics-400 video action classification. Also note our training is much more efficient than that in " +"Google's billion-level visual models, which consumes 40 times less labelled data and 40 times less training " +"time." +msgstr "" + +#: ../../papers/swin_transformer_v2.md:92 +msgid "" +"For more configurable parameters, please refer to the [API](https://mmclassification.readthedocs.io/en/1.x/" +"api/generated/mmcls.models.backbones.SwinTransformerV2.html#mmcls.models.backbones.SwinTransformerV2)." +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "192x192" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "87.92" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "8.51" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/swin-v2/pretrain/swinv2-base-" +"w12_3rdparty_in21k-192px_20220803-f7dc9763.pth)" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "19.04" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/swin-v2/pretrain/swinv2-large-" +"w12_3rdparty_in21k-192px_20220803-d9073fee.pth)" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "window" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "Swin-T\\*" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "256x256" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "8x8" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "28.35" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "4.35" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "95.87" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/swin_transformer_v2/swinv2-tiny-" +"w8_16xb64_in1k-256px.py)" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-tiny-" +"w8_3rdparty_in1k-256px_20220803-e318968f.pth)" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "16x16" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "4.4" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "82.81" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/swin_transformer_v2/swinv2-tiny-" +"w16_16xb64_in1k-256px.py)" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-tiny-" +"w16_3rdparty_in1k-256px_20220803-9651cdd7.pth)" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "49.73" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "8.45" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "83.74" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "96.6" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/swin_transformer_v2/swinv2-small-" +"w8_16xb64_in1k-256px.py)" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-small-" +"w8_3rdparty_in1k-256px_20220803-b01a4332.pth)" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "8.57" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "84.13" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "96.83" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/swin_transformer_v2/swinv2-small-" +"w16_16xb64_in1k-256px.py)" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-small-" +"w16_3rdparty_in1k-256px_20220803-b707d206.pth)" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "14.99" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "84.2" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/swin_transformer_v2/swinv2-base-" +"w8_16xb64_in1k-256px.py)" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-base-" +"w8_3rdparty_in1k-256px_20220803-8ff28f2b.pth)" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "84.6" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "97.05" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/swin_transformer_v2/swinv2-base-" +"w16_16xb64_in1k-256px.py)" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-base-" +"w16_3rdparty_in1k-256px_20220803-5a1886b7.pth)" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "86.17" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/swin_transformer_v2/swinv2-base-" +"w16_in21k-pre_16xb64_in1k-256px.py)" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-base-w16_in21k-" +"pre_3rdparty_in1k-256px_20220803-8d7aa8ad.pth)" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "24x24" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "34.07" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "87.14" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "98.23" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/swin_transformer_v2/swinv2-base-" +"w24_in21k-pre_16xb64_in1k-384px.py)" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-base-w24_in21k-" +"pre_3rdparty_in1k-384px_20220803-44eb70f8.pth)" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "256X256" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "196.75" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "33.86" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "86.93" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "98.06" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/swin_transformer_v2/swinv2-large-" +"w16_in21k-pre_16xb64_in1k-256px.py)" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-large-w16_in21k-" +"pre_3rdparty_in1k-256px_20220803-c40cbed7.pth)" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "76.2" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "87.59" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "98.27" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/swin_transformer_v2/swinv2-large-" +"w24_in21k-pre_16xb64_in1k-384px.py)" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:76 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/swin-v2/swinv2-large-w24_in21k-" +"pre_3rdparty_in1k-384px_20220803-3b36c165.pth)" +msgstr "" + +#: ../../papers/swin_transformer_v2.md:122 +msgid "" +"*ImageNet-21k pretrained models with input resolution of 256x256 and 384x384 both fine-tuned from the same " +"pre-training model using a smaller input resolution of 192x192.*" +msgstr "" + +#: ../../papers/t2t_vit.md:4 +msgid "Tokens-to-Token ViT" +msgstr "" + +#: ../../papers/t2t_vit.md:6 +msgid "" +"[Tokens-to-Token ViT: Training Vision Transformers from Scratch on ImageNet](https://arxiv.org/" +"abs/2101.11986)" +msgstr "" + +#: ../../papers/t2t_vit.md:12 +#, python-format +msgid "" +"Transformers, which are popular for language modeling, have been explored for solving vision tasks " +"recently, e.g., the Vision Transformer (ViT) for image classification. The ViT model splits each image into " +"a sequence of tokens with fixed length and then applies multiple Transformer layers to model their global " +"relation for classification. However, ViT achieves inferior performance to CNNs when trained from scratch " +"on a midsize dataset like ImageNet. We find it is because: 1) the simple tokenization of input images fails " +"to model the important local structure such as edges and lines among neighboring pixels, leading to low " +"training sample efficiency; 2) the redundant attention backbone design of ViT leads to limited feature " +"richness for fixed computation budgets and limited training samples. To overcome such limitations, we " +"propose a new Tokens-To-Token Vision Transformer (T2T-ViT), which incorporates 1) a layer-wise Tokens-to-" +"Token (T2T) transformation to progressively structurize the image to tokens by recursively aggregating " +"neighboring Tokens into one Token (Tokens-to-Token), such that local structure represented by surrounding " +"tokens can be modeled and tokens length can be reduced; 2) an efficient backbone with a deep-narrow " +"structure for vision transformer motivated by CNN architecture design after empirical study. Notably, T2T-" +"ViT reduces the parameter count and MACs of vanilla ViT by half, while achieving more than 3.0% improvement " +"when trained from scratch on ImageNet. It also outperforms ResNets and achieves comparable performance with " +"MobileNets by directly training on ImageNet. For example, T2T-ViT with comparable size to ResNet50 (21.5M " +"parameters) can achieve 83.3% top1 accuracy in image resolution 384×384 on ImageNet." +msgstr "" + +#: ../../papers/t2t_vit.md +msgid "T2T-ViT_t-14" +msgstr "" + +#: ../../papers/t2t_vit.md +msgid "21.47" +msgstr "" + +#: ../../papers/t2t_vit.md +msgid "4.34" +msgstr "" + +#: ../../papers/t2t_vit.md +msgid "81.83" +msgstr "" + +#: ../../papers/t2t_vit.md +msgid "95.84" +msgstr "" + +#: ../../papers/t2t_vit.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py)" +msgstr "" + +#: ../../papers/t2t_vit.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-14_8xb64_in1k_20211220-" +"f7378dd5.pth) | [log](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-" +"t-14_8xb64_in1k_20211220-f7378dd5.log.json)" +msgstr "" + +#: ../../papers/t2t_vit.md +msgid "T2T-ViT_t-19" +msgstr "" + +#: ../../papers/t2t_vit.md +msgid "39.08" +msgstr "" + +#: ../../papers/t2t_vit.md +msgid "7.80" +msgstr "" + +#: ../../papers/t2t_vit.md +msgid "82.63" +msgstr "" + +#: ../../papers/t2t_vit.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/t2t_vit/t2t-vit-t-19_8xb64_in1k.py)" +msgstr "" + +#: ../../papers/t2t_vit.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-" +"t-19_8xb64_in1k_20211214-7f5e3aaf.pth) | [log](https://download.openmmlab.com/mmclassification/v0/t2t-vit/" +"t2t-vit-t-19_8xb64_in1k_20211214-7f5e3aaf.log.json)" +msgstr "" + +#: ../../papers/t2t_vit.md +msgid "T2T-ViT_t-24" +msgstr "" + +#: ../../papers/t2t_vit.md +msgid "64.00" +msgstr "" + +#: ../../papers/t2t_vit.md +msgid "12.69" +msgstr "" + +#: ../../papers/t2t_vit.md +msgid "82.71" +msgstr "" + +#: ../../papers/t2t_vit.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/t2t_vit/t2t-vit-t-24_8xb64_in1k.py)" +msgstr "" + +#: ../../papers/t2t_vit.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-24_8xb64_in1k_20211214-" +"b2a68ae3.pth) | [log](https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-" +"t-24_8xb64_in1k_20211214-b2a68ae3.log.json)" +msgstr "" + +#: ../../papers/t2t_vit.md:28 +msgid "" +"*In consistent with the [official repo](https://github.com/yitu-opensource/T2T-ViT), we adopt the best " +"checkpoints during training.*" +msgstr "" + +#: ../../papers/tnt.md:4 +msgid "TNT" +msgstr "" + +#: ../../papers/tnt.md:6 +msgid "[Transformer in Transformer](https://arxiv.org/abs/2103.00112)" +msgstr "" + +#: ../../papers/tnt.md:12 +#, python-format +msgid "" +"Transformer is a new kind of neural architecture which encodes the input data as powerful features via the " +"attention mechanism. Basically, the visual transformers first divide the input images into several local " +"patches and then calculate both representations and their relationship. Since natural images are of high " +"complexity with abundant detail and color information, the granularity of the patch dividing is not fine " +"enough for excavating features of objects in different scales and locations. In this paper, we point out " +"that the attention inside these local patches are also essential for building visual transformers with high " +"performance and we explore a new architecture, namely, Transformer iN Transformer (TNT). Specifically, we " +"regard the local patches (e.g., 16×16) as \"visual sentences\" and present to further divide them into " +"smaller patches (e.g., 4×4) as \"visual words\". The attention of each word will be calculated with other " +"words in the given visual sentence with negligible computational costs. Features of both words and " +"sentences will be aggregated to enhance the representation ability. Experiments on several benchmarks " +"demonstrate the effectiveness of the proposed TNT architecture, e.g., we achieve an 81.5% top-1 accuracy on " +"the ImageNet, which is about 1.7% higher than that of the state-of-the-art visual transformer with similar " +"computational cost." +msgstr "" + +#: ../../papers/tnt.md +msgid "TNT-small\\*" +msgstr "" + +#: ../../papers/tnt.md +msgid "23.76" +msgstr "" + +#: ../../papers/tnt.md +msgid "3.36" +msgstr "" + +#: ../../papers/tnt.md +msgid "81.52" +msgstr "" + +#: ../../papers/tnt.md +msgid "95.73" +msgstr "" + +#: ../../papers/tnt.md +msgid "[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/tnt/tnt-s-p16_16xb64_in1k.py)" +msgstr "" + +#: ../../papers/tnt.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/tnt/tnt-small-p16_3rdparty_in1k_20210903-" +"c56ee7df.pth)" +msgstr "" + +#: ../../papers/tnt.md:26 +msgid "" +"*Models with * are converted from [timm](https://github.com/rwightman/pytorch-image-models/). The config " +"files of these models are only for validation. We don't ensure these config files' training accuracy and " +"welcome you to contribute your reproduction results.*" +msgstr "" + +#: ../../papers/twins.md:4 +msgid "Twins" +msgstr "" + +#: ../../papers/twins.md:6 +msgid "" +"[Twins: Revisiting the Design of Spatial Attention in Vision Transformers](http://arxiv-export-lb.library." +"cornell.edu/abs/2104.13840)" +msgstr "" + +#: ../../papers/twins.md:12 +msgid "" +"Very recently, a variety of vision transformer architectures for dense prediction tasks have been proposed " +"and they show that the design of spatial attention is critical to their success in these tasks. In this " +"work, we revisit the design of the spatial attention and demonstrate that a carefully-devised yet simple " +"spatial attention mechanism performs favourably against the state-of-the-art schemes. As a result, we " +"propose two vision transformer architectures, namely, Twins-PCPVT and Twins-SVT. Our proposed architectures " +"are highly-efficient and easy to implement, only involving matrix multiplications that are highly optimized " +"in modern deep learning frameworks. More importantly, the proposed architectures achieve excellent " +"performance on a wide range of visual tasks, including image level classification as well as dense " +"detection and segmentation. The simplicity and strong performance suggest that our proposed architectures " +"may serve as stronger backbones for many vision tasks. Our code is released at [this https URL](https://" +"github.com/Meituan-AutoML/Twins)." +msgstr "" + +#: ../../papers/twins.md +msgid "PCPVT-small\\*" +msgstr "" + +#: ../../papers/twins.md +msgid "24.11" +msgstr "" + +#: ../../papers/twins.md +msgid "3.67" +msgstr "" + +#: ../../papers/twins.md +msgid "81.14" +msgstr "" + +#: ../../papers/twins.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/twins/twins-pcpvt-" +"small_8xb128_in1k.py)" +msgstr "" + +#: ../../papers/twins.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/twins/twins-pcpvt-" +"small_3rdparty_8xb128_in1k_20220126-ef23c132.pth)" +msgstr "" + +#: ../../papers/twins.md +msgid "PCPVT-base\\*" +msgstr "" + +#: ../../papers/twins.md +msgid "43.83" +msgstr "" + +#: ../../papers/twins.md +msgid "6.45" +msgstr "" + +#: ../../papers/twins.md +msgid "82.66" +msgstr "" + +#: ../../papers/twins.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/twins/twins-pcpvt-base_8xb128_in1k." +"py)" +msgstr "" + +#: ../../papers/twins.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/twins/twins-pcpvt-" +"base_3rdparty_8xb128_in1k_20220126-f8c4b0d5.pth)" +msgstr "" + +#: ../../papers/twins.md +msgid "PCPVT-large\\*" +msgstr "" + +#: ../../papers/twins.md +msgid "60.99" +msgstr "" + +#: ../../papers/twins.md +msgid "9.51" +msgstr "" + +#: ../../papers/twins.md +msgid "83.09" +msgstr "" + +#: ../../papers/twins.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/twins/twins-pcpvt-" +"large_16xb64_in1k.py)" +msgstr "" + +#: ../../papers/twins.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/twins/twins-pcpvt-" +"large_3rdparty_16xb64_in1k_20220126-c1ef8d80.pth)" +msgstr "" + +#: ../../papers/twins.md +msgid "SVT-small\\*" +msgstr "" + +#: ../../papers/twins.md +msgid "24.06" +msgstr "" + +#: ../../papers/twins.md +msgid "2.82" +msgstr "" + +#: ../../papers/twins.md +msgid "81.77" +msgstr "" + +#: ../../papers/twins.md +msgid "95.57" +msgstr "" + +#: ../../papers/twins.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/twins/twins-svt-small_8xb128_in1k." +"py)" +msgstr "" + +#: ../../papers/twins.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/twins/twins-svt-" +"small_3rdparty_8xb128_in1k_20220126-8fe5205b.pth)" +msgstr "" + +#: ../../papers/twins.md +msgid "SVT-base\\*" +msgstr "" + +#: ../../papers/twins.md +msgid "56.07" +msgstr "" + +#: ../../papers/twins.md +msgid "8.35" +msgstr "" + +#: ../../papers/twins.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/twins/twins-svt-base_8xb128_in1k." +"py)" +msgstr "" + +#: ../../papers/twins.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/twins/twins-svt-" +"base_3rdparty_8xb128_in1k_20220126-e31cc8e9.pth)" +msgstr "" + +#: ../../papers/twins.md +msgid "SVT-large\\*" +msgstr "" + +#: ../../papers/twins.md +msgid "99.27" +msgstr "" + +#: ../../papers/twins.md +msgid "14.82" +msgstr "" + +#: ../../papers/twins.md +msgid "83.60" +msgstr "" + +#: ../../papers/twins.md +msgid "96.50" +msgstr "" + +#: ../../papers/twins.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/twins/twins-svt-large_16xb64_in1k." +"py)" +msgstr "" + +#: ../../papers/twins.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/twins/twins-svt-" +"large_3rdparty_16xb64_in1k_20220126-4817645f.pth)" +msgstr "" + +#: ../../papers/twins.md:31 +msgid "" +"*Models with * are converted from [the official repo](https://github.com/Meituan-AutoML/Twins). The config " +"files of these models are only for validation. We don't ensure these config files' training accuracy and " +"welcome you to contribute your reproduction results. The validation accuracy is a little different from the " +"official paper because of the PyTorch version. This result is get in PyTorch=1.9 while the official result " +"is get in PyTorch=1.7*" +msgstr "" + +#: ../../papers/van.md:4 +msgid "Visual Attention Network" +msgstr "" + +#: ../../papers/van.md:6 +msgid "[Visual Attention Network](https://arxiv.org/pdf/2202.09741v2.pdf)" +msgstr "" + +#: ../../papers/van.md:12 +msgid "" +"While originally designed for natural language processing (NLP) tasks, the self-attention mechanism has " +"recently taken various computer vision areas by storm. However, the 2D nature of images brings three " +"challenges for applying self-attention in computer vision. (1) Treating images as 1D sequences neglects " +"their 2D structures. (2) The quadratic complexity is too expensive for high-resolution images. (3) It only " +"captures spatial adaptability but ignores channel adaptability. In this paper, we propose a novel large " +"kernel attention (LKA) module to enable self-adaptive and long-range correlations in self-attention while " +"avoiding the above issues. We further introduce a novel neural network based on LKA, namely Visual " +"Attention Network (VAN). While extremely simple and efficient, VAN outperforms the state-of-the-art vision " +"transformers and convolutional neural networks with a large margin in extensive experiments, including " +"image classification, object detection, semantic segmentation, instance segmentation, etc." +msgstr "" + +#: ../../papers/van.md +msgid "VAN-T\\*" +msgstr "" + +#: ../../papers/van.md +msgid "4.11" +msgstr "" + +#: ../../papers/van.md +msgid "0.88" +msgstr "" + +#: ../../papers/van.md +msgid "75.41" +msgstr "" + +#: ../../papers/van.md +msgid "93.02" +msgstr "" + +#: ../../papers/van.md +msgid "[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/van/van-tiny_8xb128_in1k.py)" +msgstr "" + +#: ../../papers/van.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/van/van-tiny_8xb128_in1k_20220501-385941af.pth)" +msgstr "" + +#: ../../papers/van.md +msgid "VAN-S\\*" +msgstr "" + +#: ../../papers/van.md +msgid "13.86" +msgstr "" + +#: ../../papers/van.md +msgid "95.63" +msgstr "" + +#: ../../papers/van.md +msgid "[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/van/van-small_8xb128_in1k.py)" +msgstr "" + +#: ../../papers/van.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/van/van-small_8xb128_in1k_20220501-17bc91aa.pth)" +msgstr "" + +#: ../../papers/van.md +msgid "VAN-B\\*" +msgstr "" + +#: ../../papers/van.md +msgid "26.58" +msgstr "" + +#: ../../papers/van.md +msgid "5.03" +msgstr "" + +#: ../../papers/van.md +msgid "82.80" +msgstr "" + +#: ../../papers/van.md +msgid "96.21" +msgstr "" + +#: ../../papers/van.md +msgid "[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/van/van-base_8xb128_in1k.py)" +msgstr "" + +#: ../../papers/van.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/van/van-base_8xb128_in1k_20220501-6a4cc31b.pth)" +msgstr "" + +#: ../../papers/van.md +msgid "VAN-L\\*" +msgstr "" + +#: ../../papers/van.md +msgid "44.77" +msgstr "" + +#: ../../papers/van.md +msgid "83.86" +msgstr "" + +#: ../../papers/van.md +msgid "96.73" +msgstr "" + +#: ../../papers/van.md +msgid "[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/van/van-large_8xb128_in1k.py)" +msgstr "" + +#: ../../papers/van.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/van/van-large_8xb128_in1k_20220501-f212ba21.pth)" +msgstr "" + +#: ../../papers/van.md:29 +msgid "" +"\\*Models with * are converted from [the official repo](https://github.com/Visual-Attention-Network/VAN-" +"Classification). The config files of these models are only for validation. We don't ensure these config " +"files' training accuracy and welcome you to contribute your reproduction results." +msgstr "" + +#: ../../papers/vgg.md:4 +msgid "VGG" +msgstr "" + +#: ../../papers/vgg.md:6 +msgid "[Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556)" +msgstr "" + +#: ../../papers/vgg.md:12 +msgid "" +"In this work we investigate the effect of the convolutional network depth on its accuracy in the large-" +"scale image recognition setting. Our main contribution is a thorough evaluation of networks of increasing " +"depth using an architecture with very small (3x3) convolution filters, which shows that a significant " +"improvement on the prior-art configurations can be achieved by pushing the depth to 16-19 weight layers. " +"These findings were the basis of our ImageNet Challenge 2014 submission, where our team secured the first " +"and the second places in the localisation and classification tracks respectively. We also show that our " +"representations generalise well to other datasets, where they achieve state-of-the-art results. We have " +"made our two best-performing ConvNet models publicly available to facilitate further research on the use of " +"deep visual representations in computer vision." +msgstr "" + +#: ../../papers/vgg.md +msgid "VGG-11" +msgstr "" + +#: ../../papers/vgg.md +msgid "132.86" +msgstr "" + +#: ../../papers/vgg.md +msgid "7.63" +msgstr "" + +#: ../../papers/vgg.md +msgid "68.75" +msgstr "" + +#: ../../papers/vgg.md +msgid "88.87" +msgstr "" + +#: ../../papers/vgg.md +msgid "[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/vgg/vgg11_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/vgg.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_batch256_imagenet_20210208-4271cd6c." +"pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/" +"vgg11_batch256_imagenet_20210208-4271cd6c.log.json)" +msgstr "" + +#: ../../papers/vgg.md +msgid "VGG-13" +msgstr "" + +#: ../../papers/vgg.md +msgid "133.05" +msgstr "" + +#: ../../papers/vgg.md +msgid "11.34" +msgstr "" + +#: ../../papers/vgg.md +msgid "70.02" +msgstr "" + +#: ../../papers/vgg.md +msgid "89.46" +msgstr "" + +#: ../../papers/vgg.md +msgid "[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/vgg/vgg13_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/vgg.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_batch256_imagenet_20210208-4d1d6080." +"pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/" +"vgg13_batch256_imagenet_20210208-4d1d6080.log.json)" +msgstr "" + +#: ../../papers/vgg.md +msgid "VGG-16" +msgstr "" + +#: ../../papers/vgg.md +msgid "138.36" +msgstr "" + +#: ../../papers/vgg.md +msgid "71.62" +msgstr "" + +#: ../../papers/vgg.md +msgid "90.49" +msgstr "" + +#: ../../papers/vgg.md +msgid "[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/vgg/vgg16_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/vgg.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_batch256_imagenet_20210208-db26f1a5." +"pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_batch256_imagenet_20210208-" +"db26f1a5.log.json)" +msgstr "" + +#: ../../papers/vgg.md +msgid "VGG-19" +msgstr "" + +#: ../../papers/vgg.md +msgid "143.67" +msgstr "" + +#: ../../papers/vgg.md +msgid "19.67" +msgstr "" + +#: ../../papers/vgg.md +msgid "[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/vgg/vgg19_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/vgg.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_batch256_imagenet_20210208-e6920e4a." +"pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_batch256_imagenet_20210208-" +"e6920e4a.log.json)" +msgstr "" + +#: ../../papers/vgg.md +msgid "VGG-11-BN" +msgstr "" + +#: ../../papers/vgg.md +msgid "132.87" +msgstr "" + +#: ../../papers/vgg.md +msgid "7.64" +msgstr "" + +#: ../../papers/vgg.md +msgid "70.67" +msgstr "" + +#: ../../papers/vgg.md +msgid "90.16" +msgstr "" + +#: ../../papers/vgg.md +msgid "[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/vgg/vgg11bn_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/vgg.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_bn_batch256_imagenet_20210207-f244902c." +"pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_bn_batch256_imagenet_20210207-" +"f244902c.log.json)" +msgstr "" + +#: ../../papers/vgg.md +msgid "VGG-13-BN" +msgstr "" + +#: ../../papers/vgg.md +msgid "11.36" +msgstr "" + +#: ../../papers/vgg.md +msgid "72.12" +msgstr "" + +#: ../../papers/vgg.md +msgid "90.66" +msgstr "" + +#: ../../papers/vgg.md +msgid "[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/vgg/vgg13bn_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/vgg.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_bn_batch256_imagenet_20210207-1a8b7864." +"pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/" +"vgg13_bn_batch256_imagenet_20210207-1a8b7864.log.json)" +msgstr "" + +#: ../../papers/vgg.md +msgid "VGG-16-BN" +msgstr "" + +#: ../../papers/vgg.md +msgid "138.37" +msgstr "" + +#: ../../papers/vgg.md +msgid "15.53" +msgstr "" + +#: ../../papers/vgg.md +msgid "73.74" +msgstr "" + +#: ../../papers/vgg.md +msgid "91.66" +msgstr "" + +#: ../../papers/vgg.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_bn_batch256_imagenet_20210208-7e55cd29." +"pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/" +"vgg16_bn_batch256_imagenet_20210208-7e55cd29.log.json)" +msgstr "" + +#: ../../papers/vgg.md +msgid "VGG-19-BN" +msgstr "" + +#: ../../papers/vgg.md +msgid "143.68" +msgstr "" + +#: ../../papers/vgg.md +msgid "19.7" +msgstr "" + +#: ../../papers/vgg.md +msgid "74.68" +msgstr "" + +#: ../../papers/vgg.md +msgid "92.27" +msgstr "" + +#: ../../papers/vgg.md +msgid "[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/vgg/vgg19bn_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/vgg.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_bn_batch256_imagenet_20210208-da620c4f." +"pth) | [log](https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_bn_batch256_imagenet_20210208-" +"da620c4f.log.json)" +msgstr "" + +#: ../../papers/vision_transformer.md:4 +msgid "Vision Transformer" +msgstr "" + +#: ../../papers/vision_transformer.md:6 +msgid "" +"[An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/" +"pdf/2010.11929.pdf)" +msgstr "" + +#: ../../papers/vision_transformer.md:12 +msgid "" +"**Vision Transformer**, known as **ViT**, succeeded in using a full transformer to outperform previous " +"works that based on convolutional networks in vision field. ViT splits image into patches to feed the multi-" +"head attentions, concatenates a learnable class token for final prediction and adds a learnable position " +"embeddings for relative positional message between patches. Based on these three techniques with " +"attentions, ViT provides a brand-new pattern to build a basic structure in vision field." +msgstr "" + +#: ../../papers/vision_transformer.md:14 +msgid "" +"The strategy works even better when coupled with large datasets pre-trainings. Because of its simplicity " +"and effectiveness, some after works in classification field are originated from ViT. And even in recent " +"multi-modality field, ViT-based method still plays a role in it." +msgstr "" + +#: ../../papers/vision_transformer.md:28 +msgid "" +"While the Transformer architecture has become the de-facto standard for natural language processing tasks, " +"its applications to computer vision remain limited. In vision, attention is either applied in conjunction " +"with convolutional networks, or used to replace certain components of convolutional networks while keeping " +"their overall structure in place. We show that this reliance on CNNs is not necessary and a pure " +"transformer applied directly to sequences of image patches can perform very well on image classification " +"tasks. When pre-trained on large amounts of data and transferred to multiple mid-sized or small image " +"recognition benchmarks (ImageNet, CIFAR-100, VTAB, etc.), Vision Transformer (ViT) attains excellent " +"results compared to state-of-the-art convolutional networks while requiring substantially fewer " +"computational resources to train.
" +msgstr "" + +#: ../../papers/vision_transformer.md:87 +msgid "" +"For more configurable parameters, please refer to the [API](https://mmclassification.readthedocs.io/en/1.x/" +"api/generated/mmcls.models.backbones.VisionTransformer.html#mmcls.models.backbones.VisionTransformer)." +msgstr "" + +#: ../../papers/vision_transformer.md:91 +msgid "" +"The training step of Vision Transformers is divided into two steps. The first step is training the model on " +"a large dataset, like ImageNet-21k, and get the pre-trained model. And the second step is training the " +"model on the target dataset, like ImageNet-1k, and get the fine-tuned model. Here, we provide both pre-" +"trained models and fine-tuned models." +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "ViT-B16\\*" +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "33.03" +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/vit/pretrain/vit-base-" +"p16_3rdparty_pt-64xb64_in1k-224_20210928-02284250.pth)" +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "ViT-B32\\*" +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "88.30" +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "8.56" +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/vit/pretrain/vit-base-" +"p32_3rdparty_pt-64xb64_in1k-224_20210928-eee25dd4.pth)" +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "ViT-L16\\*" +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "304.72" +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "116.68" +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/vit/pretrain/vit-large-" +"p16_3rdparty_pt-64xb64_in1k-224_20210928-0001f9a1.pth)" +msgstr "" + +#: ../../papers/vision_transformer.md:107 +msgid "" +"*Models with * are converted from the [official repo](https://github.com/google-research/" +"vision_transformer#available-vit-models).*" +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "ViT-B16" +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "82.37" +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/vision_transformer/vit-base-" +"p16_pt-32xb128-mae_in1k-224.py)" +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/vit/vit-base-p16_pt-32xb128-" +"mae_in1k_20220623-4c544545.pth) | [log](https://download.openmmlab.com/mmclassification/v0/vit/vit-base-" +"p16_pt-32xb128-mae_in1k_20220623-4c544545.log)" +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "85.43" +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "97.77" +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/vision_transformer/vit-base-" +"p16_ft-64xb64_in1k-384.py)" +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p16_in21k-" +"pre-3rdparty_ft-64xb64_in1k-384_20210928-98e8652b.pth)" +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "ViT-B16 (IPU)" +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "81.22" +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "95.56" +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/vision_transformer/vit-base-" +"p16_ft-4xb544-ipu_in1k.py)" +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/vit/vit-base-p16_ft-4xb544-ipu_in1k_20220603-" +"c215811a.pth) | [log](https://download.openmmlab.com/mmclassification/v0/vit/vit-base-p16_ft-4xb544-" +"ipu_in1k.log)" +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "84.01" +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "97.08" +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/vision_transformer/vit-base-" +"p32_ft-64xb64_in1k-384.py)" +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p32_in21k-" +"pre-3rdparty_ft-64xb64_in1k-384_20210928-9cea8599.pth)" +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "85.63" +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "97.63" +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/vision_transformer/vit-large-" +"p16_ft-64xb64_in1k-384.py)" +msgstr "" + +#: ../../papers/vision_transformer.md:71 +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-large-p16_in21k-" +"pre-3rdparty_ft-64xb64_in1k-384_20210928-b20ba619.pth)" +msgstr "" + +#: ../../papers/vision_transformer.md:119 +msgid "" +"*Models with * are converted from the [official repo](https://github.com/google-research/" +"vision_transformer#available-vit-models). The config files of these models are only for validation. We " +"don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.*" +msgstr "" + +#: ../../papers/wrn.md:4 +msgid "Wide-ResNet" +msgstr "" + +#: ../../papers/wrn.md:6 +msgid "[Wide Residual Networks](https://arxiv.org/abs/1605.07146)" +msgstr "" + +#: ../../papers/wrn.md:12 +msgid "" +"Deep residual networks were shown to be able to scale up to thousands of layers and still have improving " +"performance. However, each fraction of a percent of improved accuracy costs nearly doubling the number of " +"layers, and so training very deep residual networks has a problem of diminishing feature reuse, which makes " +"these networks very slow to train. To tackle these problems, in this paper we conduct a detailed " +"experimental study on the architecture of ResNet blocks, based on which we propose a novel architecture " +"where we decrease depth and increase width of residual networks. We call the resulting network structures " +"wide residual networks (WRNs) and show that these are far superior over their commonly used thin and very " +"deep counterparts. For example, we demonstrate that even a simple 16-layer-deep wide residual network " +"outperforms in accuracy and efficiency all previous deep residual networks, including thousand-layer-deep " +"networks, achieving new state-of-the-art results on CIFAR, SVHN, COCO, and significant improvements on " +"ImageNet." +msgstr "" + +#: ../../papers/wrn.md +msgid "WRN-50\\*" +msgstr "" + +#: ../../papers/wrn.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/wrn/wide-resnet50_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/wrn.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/wrn/wide-" +"resnet50_3rdparty_8xb32_in1k_20220304-66678344.pth)" +msgstr "" + +#: ../../papers/wrn.md +msgid "WRN-101\\*" +msgstr "" + +#: ../../papers/wrn.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/wrn/wide-resnet101_8xb32_in1k.py)" +msgstr "" + +#: ../../papers/wrn.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/wrn/wide-" +"resnet101_3rdparty_8xb32_in1k_20220304-8d5f9d61.pth)" +msgstr "" + +#: ../../papers/wrn.md +msgid "WRN-50 (timm)\\*" +msgstr "" + +#: ../../papers/wrn.md +msgid "81.45" +msgstr "" + +#: ../../papers/wrn.md +msgid "95.53" +msgstr "" + +#: ../../papers/wrn.md +msgid "" +"[config](https://github.com/open-mmlab/mmclassification/blob/1.x/configs/wrn/wide-resnet50_timm_8xb32_in1k." +"py)" +msgstr "" + +#: ../../papers/wrn.md +msgid "" +"[model](https://download.openmmlab.com/mmclassification/v0/wrn/wide-resnet50_3rdparty-" +"timm_8xb32_in1k_20220304-83ae4399.pth)" +msgstr "" + +#: ../../papers/wrn.md:28 +msgid "" +"*Models with * are converted from the [TorchVision](https://github.com/pytorch/vision/blob/main/torchvision/" +"models/resnet.py) and [TIMM](https://github.com/rwightman/pytorch-image-models/blob/master). The config " +"files of these models are only for inference. We don't ensure these config files' training accuracy and " +"welcome you to contribute your reproduction results.*" +msgstr "" diff --git a/docs/zh_CN/migration.md b/docs/zh_CN/migration.md new file mode 100644 index 0000000..5e4a18d --- /dev/null +++ b/docs/zh_CN/migration.md @@ -0,0 +1,732 @@ +# 迁移文档 + +我们在 MMPretrain 1.x 版本中引入了一些修改,可能会产生兼容性问题。请按照本教程从 MMClassification 0.x 或是 MMSelfSup 0.x 迁移您的项目。 + +## 新的依赖 + +```{warning} +MMPretrain 1.x 版本依赖于一些新的代码包,您应该根据 [安装教程](./get_started.md) 来创建新的环境,尽管你可能已经拥有了一个可以正常运行 MMClassification 0.x 或 MMSelfSup 0.x 的环境。请参考[安装文档](./get_started.md) 对依赖库进行对应的安装。 +``` + +1. [MMEngine](https://github.com/open-mmlab/mmengine):MMEngine 是 OpenMMLab 2.0 架构的核心库,我们将许多与计算机视觉无关的组件从 MMCV 拆分到了 MMEngine。 +2. [MMCV](https://github.com/open-mmlab/mmcv):OpenMMLab 计算机视觉基础库,这不是一个新的依赖,但你需要将其升级到 `2.0.0rc1` 版本以上。 +3. [rich](https://github.com/Textualize/rich):一个命令行美化库,用以在命令行中呈现更美观的输出。 + +# 配置文件的通用改变 + +在这个部分,我们将介绍一些旧版本 (**MMClassification 0.x** 或 **MMSelfSup 0.x**) 和 **MMPretrain 1.x** 之间通用的变化规范。 + +## 训练策略设置 + +| MMCls or MMSelfSup 0.x | MMPretrain 1.x | 备注 | +| ---------------------- | --------------- | -------------------------------------------------------------------------------------------------------- | +| optimizer_config | / | `optimizer_config` 已经被**移除**。 | +| / | optim_wrapper | `optim_wrapper` 提供了参数更新的相关字段。 | +| lr_config | param_scheduler | `param_scheduler` 是一个列表设置学习率或者是其它参数,这将比之前更加灵活。 | +| runner | train_cfg | `train_cfg` 中的循环设置(如 `EpochBasedTrainLoop`,`IterBasedTrainLoop`)将控制模型训练过程中的工作流。 | + +**`optimizer`** 和 **`optimizer_config`** 字段的变化: + +- 现在我们使用 `optim_wrapper` 字段指定与优化过程有关的所有配置。而 `optimizer` 字段是 `optim_wrapper` 的一个 + 子字段。 +- `paramwise_cfg` 字段不再是 `optimizer` 的子字段,而是 `optim_wrapper` 的子字段。 +- `optimizer_config` 字段被移除,其配置项被移入 `optim_wrapper` 字段。 +- `grad_clip` 被重命名为 `clip_grad` + + + + + + + + + +
原配置 + +```python +optimizer = dict( + type='AdamW', + lr=0.0015, + weight_decay=0.3, + paramwise_cfg = dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + )) + +optimizer_config = dict(grad_clip=dict(max_norm=1.0)) +``` + +
新配置 + +```python +optim_wrapper = dict( + optimizer=dict(type='AdamW', lr=0.0015, weight_decay=0.3), + paramwise_cfg = dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + ), + clip_grad=dict(max_norm=1.0), +) +``` + +
+ +**`lr_config`** 字段的变化: + +- `lr_config` 字段被移除,我们使用新的 `param_scheduler` 配置取代。 +- `warmup` 相关的字段都被移除,因为学习率预热可以通过多个学习率规划器的组合来实现,因此不再单独实现。 + +新的优化器参数规划器组合机制非常灵活,你可以使用它来设计多种学习率、动量曲线,详见{external+mmengine:doc}`MMEngine 中的教程 `。 + + + + + + + + + +
原配置 + +```python +lr_config = dict( + policy='CosineAnnealing', + min_lr=0, + warmup='linear', + warmup_iters=5, + warmup_ratio=0.01, + warmup_by_epoch=True) +``` + +
新配置 + +```python +param_scheduler = [ + # 学习率预热 + dict( + type='LinearLR', + start_factor=0.01, + by_epoch=True, + end=5, + # 每轮迭代都更新学习率,而不是每个 epoch + convert_to_iter_based=True), + # 主学习率规划器 + dict(type='CosineAnnealingLR', by_epoch=True, begin=5), +] +``` + +
+ +**`runner`** 字段的变化: + +原 `runner` 字段被拆分为 `train_cfg`,`val_cfg` 和 `test_cfg` 三个字段,分别配置训练、验证和测试循环。 + + + + + + + + + +
原配置 + +```python +runner = dict(type='EpochBasedRunner', max_epochs=100) +``` + +
新配置 + +```python +# `val_interval` 字段来自原配置中 `evaluation.interval` 字段 +train_cfg = dict(by_epoch=True, max_epochs=100, val_interval=1) +val_cfg = dict() # 空字典表示使用默认验证配置 +test_cfg = dict() # 空字典表示使用默认测试配置 +``` + +
+ +在 OpenMMLab 2.0 中,我们引入了“循环控制器”来控制训练、验证和测试行为,而原先 `Runner` 功能也相应地发生了变化。详细介绍参见 MMEngine 中的{external+mmengine:doc}`执行器教程 `。 + +## 运行设置 + +**`checkpoint_config`** 和 **`log_config`** 字段的变化: + +`checkpoint_config` 被移动至 `default_hooks.checkpoint`,`log_config` 被移动至 `default_hooks.logger`。同时, +我们将很多原先在训练脚本中隐式定义的钩子移动到了 `default_hooks` 字段。 + +```python +default_hooks = dict( + # 记录每轮迭代的耗时 + timer=dict(type='IterTimerHook'), + + # 每 100 轮迭代打印一次日志 + logger=dict(type='LoggerHook', interval=100), + + # 启用优化器参数规划器 + param_scheduler=dict(type='ParamSchedulerHook'), + + # 每个 epoch 保存一次模型权重文件,并且自动保存最优权重文件 + checkpoint=dict(type='CheckpointHook', interval=1, save_best='auto'), + + # 在分布式环境中设置采样器种子 + sampler_seed=dict(type='DistSamplerSeedHook'), + + # 可视化验证结果,将 `enable` 设为 True 来启用这一功能。 + visualization=dict(type='VisualizationHook', enable=False), +) +``` + +此外,我们将原来的日志功能拆分为日志记录和可视化器。日志记录负责按照指定间隔保存日志数据,以及进行数据平滑等处理,可视化器用于在不同的后端记录日志,如终端、TensorBoard 和 WandB。 + + + + + + + + + +
原配置 + +```python +log_config = dict( + interval=100, + hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook'), + ]) +``` + +
新配置 + +```python +default_hooks = dict( + ... + logger=dict(type='LoggerHook', interval=100), +) + +visualizer = dict( + type='UniversalVisualizer', + vis_backends=[dict(type='LocalVisBackend'), dict(type='TensorboardVisBackend')], +) +``` + +
+ +**`load_from`** 和 **`resume_from`** 字段的变动: + +- `resume_from` 字段被移除。我们现在使用 `resume` 和 `load_from` 字段实现以下功能: + - 如 `resume=True` 且 `load_from` 不为 None,从 `load_from` 指定的权重文件恢复训练。 + - 如 `resume=True` 且 `load_from` 为 None,尝试从工作目录中最新的权重文件恢复训练。 + - 如 `resume=False` 且 `load_from` 不为 None,仅加载指定的权重文件,不恢复训练。 + - 如 `resume=False` 且 `load_from` 为 None,不进行任何操作。 + +**`dist_params`** 字段的变动:`dist_params` 字段被移动为 `env_cfg` 字段的一个子字段。以下为 `env_cfg` 字段的所 +有配置项: + +```python +env_cfg = dict( + # 是否启用 cudnn benchmark + cudnn_benchmark=False, + + # 设置多进程相关参数 + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + + # 设置分布式相关参数 + dist_cfg=dict(backend='nccl'), +) +``` + +**`workflow`** 字段的变动:`workflow` 相关的功能现已被移除。 + +新字段 **`visualizer`**:可视化器是 OpenMMLab 2.0 架构中的新设计,我们使用可视化器进行日志、结果的可视化与多后 +端的存储。详见 MMEngine 中的{external+mmengine:doc}`可视化教程 `。 + +```python +visualizer = dict( + type='UniversalVisualizer', + vis_backends=[ + dict(type='LocalVisBackend'), + # 将下行取消注释,即可将日志和可视化结果保存至 TesnorBoard + # dict(type='TensorboardVisBackend') + ] +) +``` + +新字段 **`default_scope`**:指定所有注册器进行模块搜索默认的起点。MMPretrain 中的 `default_scope` 字段为 `mmpretrain`,大部分情况下不需要修改。详见 MMengine 中的{external+mmengine:doc}`注册器教程 `。 + +## 其他变动 + +我们将所有注册器的定义从各个包移动到了 `mmpretrain.registry`。 + +# 从 MMClassification 0.x 迁移 + +## 配置文件 + +在 MMPretrain 1.x 中,我们重构了配置文件的结构,绝大部分原来的配置文件无法直接使用。 + +在本节中,我们将介绍配置文件的所有变化。我们假设您已经对[配置文件](./user_guides/config.md)有所了解。 + +### 模型设置 + +`model.backbone`、`model.neck` 和 `model.head` 字段没有变化。 + +**`model.train_cfg`** 字段的变化: + +- `BatchMixup` 被重命名为 [`Mixup`](mmpretrain.models.utils.batch_augments.Mixup) +- `BatchCutMix` 被重命名为 [`CutMix`](mmpretrain.models.utils.batch_augments.CutMix) +- `BatchResizeMix` 被重命名为 [`ResizeMix`](mmpretrain.models.utils.batch_augments.ResizeMix) +- 以上增强中的 `prob` 参数均被移除,现在在 `train_cfg` 中使用一个统一的 `probs` 字段指定每个增强的概率。如果没 + 有指定 `probs` 字段,现在将均匀地随机选择一种增强。 + + + + + + + + + +
原配置 + +```python +model = dict( + ... + train_cfg=dict(augments=[ + dict(type='BatchMixup', alpha=0.8, num_classes=1000, prob=0.5), + dict(type='BatchCutMix', alpha=1.0, num_classes=1000, prob=0.5) + ] +) +``` + +
新配置 + +```python +model = dict( + ... + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0), + ] +) +``` + +
+ +### 数据设置 + +**`data`** 字段的变化: + +- 原先的 `data` 字段被拆分为 `train_dataloader`,`val_dataloader` 和 `test_dataloader` 字段。这允许我们进行更 + 加细粒度的配置。比如在训练和测试中指定不同的采样器、批次大小等。 +- `samples_per_gpu` 字段被重命名为 `batch_size` +- `workers_per_gpu` 字段被重命名为 `num_workers` + + + + + + + + + +
原配置 + +```python +data = dict( + samples_per_gpu=32, + workers_per_gpu=2, + train=dict(...), + val=dict(...), + test=dict(...), +) +``` + +
新配置 + +```python +train_dataloader = dict( + batch_size=32, + num_workers=2, + dataset=dict(...), + sampler=dict(type='DefaultSampler', shuffle=True) # 必要的 +) + +val_dataloader = dict( + batch_size=32, + num_workers=2, + dataset=dict(...), + sampler=dict(type='DefaultSampler', shuffle=False) # 必要的 +) + +test_dataloader = val_dataloader +``` + +
+ +**`pipeline`** 字段的变化: + +- 原先的 **`ToTensor`**、**`ImageToTensor`** 和 **`Collect`** 被合并为 [`PackInputs`](mmpretrain.datasets.transforms.PackInputs) +- 我们建议去除数据集流水线中的 **`Normalize`** 变换,转而使用 `data_preprocessor` 字段进行归一化预处理。 +- [**`RandomFlip`**](mmcv.transforms.RandomFlip) 中的 `flip_prob` 参数被重命名为 `prob` +- [**`RandomCrop`**](mmpretrain.datasets.transforms.RandomCrop) 中的 `size` 参数被重命名为 `crop_size` +- [**`RandomResizedCrop`**](mmpretrain.datasets.transforms.RandomResizedCrop) 中的 `size` 参数被重命名为 `scale` +- [**`Resize`**](mmcv.transforms.Resize) 中的 `size` 参数被重命名为 `scale`。并且不再支持形如 `(256, -1)` 的尺寸,请使用 [`ResizeEdge`](mmpretrain.datasets.transforms.ResizeEdge) +- [**`AutoAugment`**](mmpretrain.datasets.transforms.AutoAugment) 和 [**`RandAugment`**](mmpretrain.datasets.transforms.RandAugment) 中的 `policies` 参数现在支持使用字符串来指定某些预设的策略集,`AutoAugment` 支持 "imagenet",`RandAugment` 支持 "timm_increasing" +- **`RandomResizedCrop`** 和 **`CenterCrop`** 不再支持 `efficientnet_style` 参数,请使用 [`EfficientNetRandomCrop`](mmpretrain.datasets.transforms.EfficientNetRandomCrop) 和 [`EfficientNetCenterCrop`](mmpretrain.datasets.transforms.EfficientNetCenterCrop) + +```{note} +我们将一些数据变换工作移至数据预处理器进行,如归一化,请参阅[文档](mmpretrain.models.utils.data_preprocessor)了解更多详细信息。 +``` + + + + + + + + + +
原配置 + +```python +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', size=224), + dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='ToTensor', keys=['gt_label']), + dict(type='Collect', keys=['img', 'gt_label']) +] +``` + +
新配置 + +```python +data_preprocessor = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=224), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] +``` + +
+ +**`evaluation`** 字段的变化: + +- 原先的 **`evaluation`** 字段被拆分为 `val_evaluator` 和 `test_evaluator`,并且不再支持 `interval` 和 `save_best` + 参数。`interval` 参数被移动至 `train_cfg.val_interval` 字段,详见[训练策略配置](./user_guides/config.md#训练策略)。而 `save_best` 参数被移动至 `default_hooks.checkpoint.save_best` 字段,详见 [运行设置](./user_guides/config.md#运行设置)。 +- 'accuracy' 指标被重命名为 [`Accuracy`](mmpretrain.evaluation.Accuracy) +- 'precision','recall','f1-score' 和 'support' 指标被组合为 [`SingleLabelMetric`](mmpretrain.evaluation.SingleLabelMetric),并使用 `items` 参数指定具体计算哪些指标。 +- 'mAP' 指标被重命名为 [`AveragePrecision`](mmpretrain.evaluation.AveragePrecision) +- 'CP','CR','CF1','OP','OR' 和 'OF1' 指标被组合为 [`MultiLabelMetric`](mmpretrain.evaluation.MultiLabelMetric),并使用 `items` 和 `average` 参数指定具体计算哪些指标。 + + + + + + + + + +
原配置 + +```python +evaluation = dict( + interval=1, + metric='accuracy', + metric_options=dict(topk=(1, 5)) +) +``` + +
新配置 + +```python +val_evaluator = dict(type='Accuracy', topk=(1, 5)) +test_evaluator = val_evaluator +``` + +
+ + + + + + + + +
原配置 + +```python +evaluation = dict( + interval=1, + metric=['mAP', 'CP', 'OP', 'CR', 'OR', 'CF1', 'OF1'], + metric_options=dict(thr=0.5), +) +``` + +
新配置 + +```python +val_evaluator = [ + dict(type='AveragePrecision'), + dict(type='MultiLabelMetric', + items=['precision', 'recall', 'f1-score'], + average='both', + thr=0.5), +] +test_evaluator = val_evaluator +``` + +
+ +## 模块变动 + +### `mmpretrain.apis` + +详见[包文档](mmpretrain.apis) + +| 函数 | 变动 | +| :------------------: | :-------------------------------------------------------------------------------------------------------------------------------- | +| `init_model` | 无变动 | +| `inference_model` | 无变动,但我们推荐使用功能更强的 [`mmpretrain.ImageClassificationInferencer`](mmpretrain.apis.ImageClassificationInferencer)。 | +| `train_model` | 移除,直接使用 `runner.train` 进行训练。 | +| `multi_gpu_test` | 移除,直接使用 `runner.test` 进行测试。 | +| `single_gpu_test` | 移除,直接使用 `runner.test` 进行测试。 | +| `show_result_pyplot` | 移除,使用 [`mmpretrain.ImageClassificationInferencer`](mmpretrain.apis.ImageClassificationInferencer) 进行模型推理和结果可视化。 | +| `set_random_seed` | 移除,使用 `mmengine.runner.set_random_seed`. | +| `init_random_seed` | 移除,使用 `mmengine.dist.sync_random_seed`. | + +### `mmpretrain.core` + +`mmpretrain.core` 包被重命名为 [`mmpretrain.engine`](mmpretrain.engine) + +| 子包 | 变动 | +| :-------------: | :-------------------------------------------------------------------------------------------------------------------------------- | +| `evaluation` | 移除,使用 [`mmpretrain.evaluation`](mmpretrain.evaluation) | +| `hook` | 移动至 [`mmpretrain.engine.hooks`](mmpretrain.engine.hooks) | +| `optimizers` | 移动至 [`mmpretrain.engine.optimizers`](mmpretrain.engine.optimizers) | +| `utils` | 移除,分布式环境相关的函数统一至 [`mmengine.dist`](api/dist) 包 | +| `visualization` | 移除,其中可视化相关的功能被移动至 [`mmpretrain.visualization.UniversalVisualizer`](mmpretrain.visualization.UniversalVisualizer) | + +`hooks` 包中的 `MMClsWandbHook` 尚未实现。 + +`hooks` 包中的 `CosineAnnealingCooldownLrUpdaterHook` 被移除。我们现在支持使用学习率规划器的组合实现该功能。详见[自定义训练优化策略](./advanced_guides/schedule.md)。 + +### `mmpretrain.datasets` + +详见[包文档](mmpretrain.datasets) + +| 数据集类 | 变动 | +| :---------------------------------------------------------------------------------------: | :----------------------------------------------------------------------- | +| [`CustomDataset`](mmpretrain.datasets.CustomDataset) | 增加了 `data_root` 参数,作为 `data_prefix` 和 `ann_file` 的共同根路径。 | +| [`ImageNet`](mmpretrain.datasets.ImageNet) | 与 `CustomDataset` 相同。 | +| [`ImageNet21k`](mmpretrain.datasets.ImageNet21k) | 与 `CustomDataset` 相同。 | +| [`CIFAR10`](mmpretrain.datasets.CIFAR10) & [`CIFAR100`](mmpretrain.datasets.CIFAR100) | `test_mode` 参数目前是必要参数。 | +| [`MNIST`](mmpretrain.datasets.MNIST) & [`FashionMNIST`](mmpretrain.datasets.FashionMNIST) | `test_mode` 参数目前是必要参数。 | +| [`VOC`](mmpretrain.datasets.VOC) | 现在需要指定 `data_root`,`image_set_path` 和 `test_mode` 参数。 | +| [`CUB`](mmpretrain.datasets.CUB) | 现在需要指定 `data_root` 和 `test_mode` 参数。 | + +`mmpretrain.datasets.pipelines` 包被重命名为 `mmpretrain.datasets.transforms` + +| 数据变换类 | 变动 | +| :-----------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `LoadImageFromFile` | 移除,使用 [`mmcv.transforms.LoadImageFromFile`](mmcv.transforms.LoadImageFromFile) | +| `RandomFlip` | 移除,使用 [`mmcv.transforms.RandomFlip`](mmcv.transforms.RandomFlip),其中 `flip_prob` 参数被重命名为 `prob` | +| `RandomCrop` | `size` 参数被重命名为 `crop_size` | +| `RandomResizedCrop` | `size` 参数被重命名为 `scale`;`scale` 参数被重命名为 `crop_ratio_range`;不再支持 `efficientnet_style`,请使用 [`EfficientNetRandomCrop`](mmpretrain.datasets.transforms.EfficientNetRandomCrop) | +| `CenterCrop` | 移除,使用 [`mmcv.transforms.CenterCrop`](mmcv.transforms.CenterCrop);不再支持 `efficientnet_style`,请使用 [`EfficientNetCenterCrop`](mmpretrain.datasets.transforms.EfficientNetCenterCrop) | +| `Resize` | 移除,使用 [`mmcv.transforms.Resize`](mmcv.transforms.Resize);`size` 参数被重命名为 `scale`,且不再支持形如 `(256, -1)` 参数,使用 [`ResizeEdge`](mmpretrain.datasets.transforms.ResizeEdge) | +| `AutoAugment` & `RandomAugment` | `policies` 参数现在支持使用字符串指定预设的策略集。 | +| `Compose` | 移除,使用 [`mmcv.transforms.Compose`](mmcv.transforms.Compose) | + +### `mmpretrain.models` + +详见[包文档](mmpretrain.models),**backbones**、**necks** 和 **losses** 的结构没有变动。 + +[`ImageClassifier`](mmpretrain.models.classifiers.ImageClassifier) 的变动: + +| 分类器的方法 | 变动 | +| :-------------: | :---------------------------------------------------------------------------------------------------------------------- | +| `extract_feat` | 无变动 | +| `forward` | 现在需要三个输入:`inputs`、`data_samples` 和 `mode`。详见[文档](mmpretrain.models.classifiers.ImageClassifier.forward) | +| `forward_train` | 变更为 `loss` 方法。 | +| `simple_test` | 变更为 `predict` 方法。 | +| `train_step` | `optimizer` 参数被修改为 `optim_wrapper`,接受 [`OptimWrapper`](mmengine.optim.OptimWrapper) | +| `val_step` | 原先的 `val_step` 与 `train_step` 一致,现在该方法将会调用 `predict` | +| `test_step` | 新方法,与 `val_step` 一致。 | + +[heads](mmpretrain.models.heads) 中的变动: + +| 分类头的方法 | 变动 | +| :-------------: | :--------------------------------------------------------------------------------------------------------------------------------------- | +| `pre_logits` | 无变动 | +| `forward_train` | 变更为 `loss` 方法。 | +| `simple_test` | 变更为 `predict` 方法。 | +| `loss` | 现在接受 `data_samples` 参数,而不是 `gt_labels`,`data_samples` 参数应当接受 [ClsDataSample](mmpretrain.structures.DataSample) 的列表。 | +| `forward` | 新方法,它将返回分类头的输出,不会进行任何后处理(包括 softmax 或 sigmoid)。 | + +### `mmpretrain.utils` + +详见[包文档](mmpretrain.utils) + +| 函数 | 变动 | +| :--------------------------: | :------------------------------------------------------------------------------------------------------------ | +| `collect_env` | 无变动 | +| `get_root_logger` | 移除,使用 [`mmengine.logging.MMLogger.get_current_instance`](mmengine.logging.MMLogger.get_current_instance) | +| `load_json_log` | 输出格式发生变化。 | +| `setup_multi_processes` | 移除,使用 [`mmengine.utils.dl_utils.set_multi_processing`](mmengine.utils.dl_utils.set_multi_processing) | +| `wrap_non_distributed_model` | 移除,现在 runner 会自动包装模型。 | +| `wrap_distributed_model` | 移除,现在 runner 会自动包装模型。 | +| `auto_select_device` | 移除,现在 runner 会自动选择设备。 | + +# 从 MMSelfSup 0.x 迁移 + +## 配置文件 + +本章节将介绍 `_base_` 文件夹中的配置文件的变化,主要包含以下三个部分: + +- 数据集:`configs/_base_/datasets` +- 模型:`configs/_base_/models` +- 优化器及调度:`configs/_base_/schedules` + +### 数据集 + +在 **MMSelfSup 0.x** 中,我们使用字段 `data` 来整合数据相关信息, 例如 `samples_per_gpu`,`train`,`val` 等。 + +在 **MMPretrain 1.x** 中,我们分别使用字段 `train_dataloader`, `val_dataloader` 整理训练和验证的数据相关信息,并且 `data` 字段已经被 **移除**。 + + + + + + + + + + +
旧版本 + +```python +data = dict( + samples_per_gpu=32, # total 32*8(gpu)=256 + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_source=dict( + type=data_source, + data_prefix='data/imagenet/train', + ann_file='data/imagenet/meta/train.txt', + ), + num_views=[1, 1], + pipelines=[train_pipeline1, train_pipeline2], + prefetch=prefetch, + ), + val=...) +``` + +
新版本 + +```python +train_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + collate_fn=dict(type='default_collate'), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='meta/train.txt', + data_prefix=dict(img_path='train/'), + pipeline=train_pipeline)) +val_dataloader = ... +``` + +
+ +另外,我们 **移除** 了字段 `data_source`,以此来保证我们项目和其它 OpenMMLab 项目数据流的一致性。请查阅 [Config](user_guides/config.md) 获取更详细的信息。 + +**`pipeline`** 中的变化: + +以 MAE 的 `pipeline` 作为例子,新的写法如下: + +```python +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + size=224, + scale=(0.2, 1.0), + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5), + dict(type='PackSelfSupInputs', meta_keys=['img_path']) +] +``` + +### 模型 + +在模型的配置文件中,和 MMSeflSup 0.x 版本相比,主要有两点不同。 + +1. 有一个新的字段 `data_preprocessor`,主要负责对数据进行预处理,例如归一化,通道转换等。例子如下: + +```python +data_preprocessor=dict( + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True) +model = dict( + type='MAE', + data_preprocessor=dict( + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + bgr_to_rgb=True), + backbone=..., + neck=..., + head=..., + init_cfg=...) +``` + +2. 在新版本的 `head` 字段中,我们新增加了 `loss`,主要负责损失函数的构建。例子如下: + +```python +model = dict( + type='MAE', + backbone=..., + neck=..., + head=dict( + type='MAEPretrainHead', + norm_pix=True, + patch_size=16, + loss=dict(type='MAEReconstructionLoss')), + init_cfg=...) +``` + +## 模块变动 + +下列表格记录了代码模块、文件夹的主要改变。 + +| MMSelfSup 0.x | MMPretrain 1.x | Remark | +| ------------------------ | ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| apis | / | 目前 `apis` 文件夹已暂时被**移除**,在未来可能会再添加回来。 | +| core | engine | `core` 文件夹重命名为 `engine`,包含了 `hooks`,`opimizers`。([API link](mmpretrain.engine)) | +| datasets | datasets | 数据集相关类主要基于不同的数据集实现,例如 ImageNet,Places205。([API link](mmpretrain.datasets)) | +| datasets/data_sources | / | `data_sources` 已经被**移除**,并且现在 `datasets` 的逻辑和 OpenMMLab 其它项目保持一致。 | +| datasets/pipelines | datasets/transforms | `pipelines` 文件夹已经重命名为 `transforms`。([API link](mmpretrain.datasets.transforms)) | +| / | evaluation | `evaluation` 主要负责管理一些评测函数或者是类。([API link](mmpretrain.evaluation)) | +| models/algorithms | selfsup | 算法文件移动至 `selfsup` 文件夹。([API link](mmpretrain.models.selfsup)) | +| models/backbones | selfsup | 自监督学习算法对应的,重新实现的主干网络移动到算法的 `.py` 文件中。([API link](mmpretrain.models.selfsup)) | +| models/target_generators | selfsup | 目标生成器的实现移动到算法的 `.py` 文件中。([API link](mmpretrain.models.selfsup)) | +| / | models/losses | `losses` 文件夹提供了各种不同损失函数的实现。([API link](mmpretrain.models.losses)) | +| / | structures | `structures` 文件夹提供了数据结构的实现。在 MMPretrain 中,我们实现了一种新的数据结构,`DataSample`,在训练/验证过程中来传输和接受数据信息。([API link](mmpretrain.structures)) | +| / | visualization | `visualization` 文件夹包含了 visualizer,主要负责一些可视化的工作,例如数据增强的可视化。([API link](mmpretrain.visualization)) | diff --git a/docs/zh_CN/notes/changelog.md b/docs/zh_CN/notes/changelog.md new file mode 100644 index 0000000..6fc371e --- /dev/null +++ b/docs/zh_CN/notes/changelog.md @@ -0,0 +1 @@ +../../en/notes/changelog.md \ No newline at end of file diff --git a/docs/zh_CN/notes/contribution_guide.md b/docs/zh_CN/notes/contribution_guide.md new file mode 100644 index 0000000..2549cc2 --- /dev/null +++ b/docs/zh_CN/notes/contribution_guide.md @@ -0,0 +1,62 @@ +# 参与贡献 OpenMMLab + +欢迎任何类型的贡献,包括但不限于 + +- 修改拼写错误或代码错误 +- 添加文档或将文档翻译成其他语言 +- 添加新功能和新组件 + +## 工作流程 + +1. fork 并 pull 最新的 OpenMMLab 仓库 (MMPreTrain) +2. 签出到一个新分支(不要使用 master 分支提交 PR) +3. 进行修改并提交至 fork 出的自己的远程仓库 +4. 在我们的仓库中创建一个 PR + +```{note} +如果你计划添加一些新的功能,并引入大量改动,请尽量首先创建一个 issue 来进行讨论。 +``` + +## 代码风格 + +### Python + +我们采用 [PEP8](https://www.python.org/dev/peps/pep-0008/) 作为统一的代码风格。 + +我们使用下列工具来进行代码风格检查与格式化: + +- [flake8](https://github.com/PyCQA/flake8): Python 官方发布的代码规范检查工具,是多个检查工具的封装 +- [isort](https://github.com/timothycrosley/isort): 自动调整模块导入顺序的工具 +- [yapf](https://github.com/google/yapf): 一个 Python 文件的格式化工具。 +- [codespell](https://github.com/codespell-project/codespell): 检查单词拼写是否有误 +- [mdformat](https://github.com/executablebooks/mdformat): 检查 markdown 文件的工具 +- [docformatter](https://github.com/myint/docformatter): 一个 docstring 格式化工具。 + +yapf 和 isort 的格式设置位于 [setup.cfg](https://github.com/open-mmlab/mmpretrain/blob/main/setup.cfg) + +我们使用 [pre-commit hook](https://pre-commit.com/) 来保证每次提交时自动进行代 +码检查和格式化,启用的功能包括 `flake8`, `yapf`, `isort`, `trailing whitespaces`, `markdown files`, 修复 `end-of-files`, `double-quoted-strings`, +`python-encoding-pragma`, `mixed-line-ending`, 对 `requirments.txt`的排序等。 +pre-commit hook 的配置文件位于 [.pre-commit-config](https://github.com/open-mmlab/mmpretrain/blob/main/.pre-commit-config.yaml) + +在你克隆仓库后,你需要按照如下步骤安装并初始化 pre-commit hook。 + +```shell +pip install -U pre-commit +``` + +在仓库文件夹中执行 + +```shell +pre-commit install +``` + +在此之后,每次提交,代码规范检查和格式化工具都将被强制执行。 + +```{important} +在创建 PR 之前,请确保你的代码完成了代码规范检查,并经过了 yapf 的格式化。 +``` + +### C++ 和 CUDA + +C++ 和 CUDA 的代码规范遵从 [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html) diff --git a/docs/zh_CN/notes/faq.md b/docs/zh_CN/notes/faq.md new file mode 100644 index 0000000..9e94cd8 --- /dev/null +++ b/docs/zh_CN/notes/faq.md @@ -0,0 +1,101 @@ +# 常见问题 + +我们在这里列出了一些常见问题及其相应的解决方案。如果您发现任何常见问题并有方法 +帮助解决,欢迎随时丰富列表。如果这里的内容没有涵盖您的问题,请按照 +[提问模板](https://github.com/open-mmlab/mmpretrain/issues/new/choose) +在 GitHub 上提出问题,并补充模板中需要的信息。 + +## 安装 + +- MMEngine, MMCV 与 MMPretrain 的兼容问题 + + 这里我们列举了各版本 MMPretrain 对 MMEngine 和 MMCV 版本的依赖,请选择合适的 MMEngine 和 MMCV 版本来避免安装和使用中的问题。 + + | MMPretrain 版本 | MMEngine 版本 | MMCV 版本 | + | :-------------: | :---------------: | :--------------: | + | 1.2.0 (main) | mmengine >= 0.8.3 | mmcv >= 2.0.0 | + | 1.1.1 | mmengine >= 0.8.3 | mmcv >= 2.0.0 | + | 1.0.0 | mmengine >= 0.8.0 | mmcv >= 2.0.0 | + | 1.0.0rc8 | mmengine >= 0.7.1 | mmcv >= 2.0.0rc4 | + | 1.0.0rc7 | mmengine >= 0.5.0 | mmcv >= 2.0.0rc4 | + + ```{note} + 由于 `dev` 分支处于频繁开发中,MMEngine 和 MMCV 版本依赖可能不准确。如果您在使用 + `dev` 分支时遇到问题,请尝试更新 MMEngine 和 MMCV 到最新版。 + ``` + +- 使用 Albumentations + + 如果你希望使用 `albumentations` 相关的功能,我们建议使用 `pip install -r requirements/optional.txt` 或者 + `pip install -U albumentations>=0.3.2 --no-binary qudida,albumentations` 命令进行安装。 + + 如果你直接使用 `pip install albumentations>=0.3.2` 来安装,它会同时安装 `opencv-python-headless` + (即使你已经安装了 `opencv-python`)。具体细节可参阅 + [官方文档](https://albumentations.ai/docs/getting_started/installation/#note-on-opencv-dependencies)。 + +## 通用问题 + +### 如果我对源码进行了改动,需要重新安装以使改动生效吗? + +如果你遵照[最佳实践](../get_started.md#最佳实践)的指引,从源码安装 mmpretrain,那么任何本地修改都不需要重新安装即可生效。 + +### 如何在多个 MMPretrain 版本下进行开发? + +通常来说,我们推荐通过不同虚拟环境来管理多个开发目录下的 MMPretrain。 +但如果你希望在不同目录(如 mmpretrain-0.21, mmpretrain-0.23 等)使用同一个环境进行开发, +我们提供的训练和测试 shell 脚本会自动使用当前目录的 mmpretrain,其他 Python 脚本 +则可以在命令前添加 `` PYTHONPATH=`pwd` `` 来使用当前目录的代码。 + +反过来,如果你希望 shell 脚本使用环境中安装的 MMPretrain,而不是当前目录的, +则可以去掉 shell 脚本中如下一行代码: + +```shell +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH +``` + +### `load_from` 和 `init_cfg` 之间的关系是什么? + +- `load_from`: 如果`resume=False`,只导入模型权重,主要用于加载训练过的模型; + 如果 `resume=True` ,加载所有的模型权重、优化器状态和其他训练信息,主要用于恢复中断的训练。 + +- `init_cfg`: 你也可以指定`init=dict(type="Pretrained", checkpoint=xxx)`来加载权重, + 表示在模型权重初始化时加载权重,通常在训练的开始阶段执行。 + 主要用于微调预训练模型,你可以在骨干网络的配置中配置它,还可以使用 `prefix` 字段来只加载对应的权重,例如: + + ```python + model = dict( + backbone=dict( + type='ResNet', + depth=50, + init_cfg=dict(type='Pretrained', checkpoints=xxx, prefix='backbone'), + ) + ... + ) + ``` + +参见 [微调模型](./finetune_custom_dataset.md) 以了解更多关于模型微调的细节。 + +### `default_hooks` 和 `custom_hooks` 之间有什么区别? + +几乎没有区别。通常,`default_hooks` 字段用于指定几乎所有实验都会使用的钩子, +而 `custom_hooks` 字段指部分实验特有的钩子。 + +另一个区别是 `default_hooks` 是一个字典,而 `custom_hooks` 是一个列表,请不要混淆。 + +### 在训练期间,我没有收到训练日志,这是什么原因? + +如果你的训练数据集很小,而批处理量却很大,我们默认的日志间隔可能太大,无法记录你的训练日志。 + +你可以缩减日志间隔,再试一次,比如: + +```python +default_hooks = dict( + ... + logger=dict(type='LoggerHook', interval=10), + ... +) +``` + +### 如何基于其它数据集训练,例如我自己的数据集或者是 COCO 数据集? + +我们提供了 [具体示例](./pretrain_custom_dataset.md) 来展示如何在其它数据集上进行训练。 diff --git a/docs/zh_CN/notes/finetune_custom_dataset.md b/docs/zh_CN/notes/finetune_custom_dataset.md new file mode 100644 index 0000000..2b8cbd6 --- /dev/null +++ b/docs/zh_CN/notes/finetune_custom_dataset.md @@ -0,0 +1,328 @@ +# 如何在自定义数据集上微调模型 + +在很多场景下,我们需要快速地将模型应用到新的数据集上,但从头训练模型通常很难快速收敛,这种不确定性会浪费额外的时间。 +通常,已有的、在大数据集上训练好的模型会比随机初始化提供更为有效的先验信息,粗略来讲,在此基础上的学习我们称之为模型微调。 + +已经证明,在 ImageNet 数据集上预训练的模型对于其他数据集和其他下游任务有很好的效果。 +因此,该教程提供了如何将 [Model Zoo](../modelzoo_statistics.md) 中提供的预训练模型用于其他数据集,已获得更好的效果。 + +在本教程中,我们提供了一个实践示例和一些关于如何在自己的数据集上微调模型的技巧。 + +## 第一步:准备你的数据集 + +按照 [准备数据集](../user_guides/dataset_prepare.md) 准备你的数据集。 +假设我们的数据集根文件夹路径为 `data/custom_dataset/` + +假设我们想进行有监督图像分类训练,并使用子文件夹格式的 `CustomDataset` 来组织数据集: + +```text +data/custom_dataset/ +├── train +│   ├── class_x +│   │ ├── x_1.png +│ │ ├── x_2.png +│ │ ├── x_3.png +│ │ └── ... +│ ├── class_y +│   └── ... +└── test +    ├── class_x +    │ ├── test_x_1.png + │ ├── test_x_2.png + │ ├── test_x_3.png + │ └── ... + ├── class_y +    └── ... +``` + +## 第二步:选择一个配置文件作为模板 + +在这里,我们使用 `configs/resnet/resnet50_8xb32_in1k.py` 作为示例。 +首先在同一文件夹下复制一份配置文件,并将其重命名为 `resnet50_8xb32-ft_custom.py`。 + +```{tip} +按照惯例,配置名称的最后一个字段是数据集,例如,`in1k` 表示 ImageNet-1k,`coco` 表示 coco 数据集 +``` + +这个配置的内容是: + +```python +_base_ = [ + '../_base_/models/resnet50.py', # 模型设置 + '../_base_/datasets/imagenet_bs32.py', # 数据设置 + '../_base_/schedules/imagenet_bs256.py', # 训练策略设置 + '../_base_/default_runtime.py', # 运行设置 +] +``` + +## 第三步:修改模型设置 + +在进行模型微调时,我们通常希望在主干网络(backbone)加载预训练模型,再用我们的数据集训练一个新的分类头(head)。 + +为了在主干网络加载预训练模型,我们需要修改主干网络的初始化设置,使用 +`Pretrained` 类型的初始化函数。另外,在初始化设置中,我们使用 `prefix='backbone'` +来告诉初始化函数需要加载的子模块的前缀,`backbone`即指加载模型中的主干网络。 +方便起见,我们这里使用一个在线的权重文件链接,它 +会在训练前自动下载对应的文件,你也可以提前下载这个模型,然后使用本地路径。 + +接下来,新的配置文件需要按照新数据集的类别数目来修改分类头的配置。只需要修改分 +类头中的 `num_classes` 设置即可。 + +另外,当新的小数据集和原本预训练的大数据集中的数据分布较为类似的话,我们在进行微调时会希望 +冻结主干网络前面几层的参数,只训练后面层以及分类头的参数,这么做有助于在后续训练中, +保持网络从预训练权重中获得的提取低阶特征的能力。在 MMPretrain 中, +这一功能可以通过简单的一个 `frozen_stages` 参数来实现。比如我们需要冻结前两层网 +络的参数,只需要在上面的配置中添加一行: + +```{note} +注意,目前并非所有的主干网络都支持 `frozen_stages` 参数。请检查[文档](https://mmpretrain.readthedocs.io/en/latest/api.html#module-mmpretrain.models.backbones) +确认使用的主干网络是否支持这一参数。 +``` + +```python +_base_ = [ + '../_base_/models/resnet50.py', # 模型设置 + '../_base_/datasets/imagenet_bs32.py', # 数据设置 + '../_base_/schedules/imagenet_bs256.py', # 训练策略设置 + '../_base_/default_runtime.py', # 运行设置 +] + +# >>>>>>>>>>>>>>> 在这里重载模型相关配置 >>>>>>>>>>>>>>>>>>> +model = dict( + backbone=dict( + frozen_stages=2, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth', + prefix='backbone', + )), + head=dict(num_classes=10), +) +# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< +``` + +```{tip} +这里我们只需要设定我们想要修改的部分配置,其他配置将会自动从我们的基配置文件中获取。 +``` + +## 第四步:修改数据集设置 + +为了在新数据集上进行微调,我们需要覆盖一些数据集设置,例如数据集类型、数据流水线等。 + +```python +_base_ = [ + '../_base_/models/resnet50.py', # 模型设置 + '../_base_/datasets/imagenet_bs32.py', # 数据设置 + '../_base_/schedules/imagenet_bs256.py', # 训练策略设置 + '../_base_/default_runtime.py', # 运行设置 +] + +# 模型设置 +model = dict( + backbone=dict( + frozen_stages=2, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth', + prefix='backbone', + )), + head=dict(num_classes=10), +) + +# >>>>>>>>>>>>>>> 在这里重载数据配置 >>>>>>>>>>>>>>>>>>> +data_root = 'data/custom_dataset' +train_dataloader = dict( + dataset=dict( + type='CustomDataset', + data_root=data_root, + ann_file='', # 我们假定使用子文件夹格式,因此需要将标注文件置空 + data_prefix='train', + )) +val_dataloader = dict( + dataset=dict( + type='CustomDataset', + data_root=data_root, + ann_file='', # 我们假定使用子文件夹格式,因此需要将标注文件置空 + data_prefix='test', + )) +test_dataloader = val_dataloader +# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< +``` + +## 第五步:修改训练策略设置(可选) + +微调所使用的训练超参数一般与默认的超参数不同,它通常需要更小的学习率和更快的学习率衰减。 + +```python +_base_ = [ + '../_base_/models/resnet50.py', # 模型设置 + '../_base_/datasets/imagenet_bs32.py', # 数据设置 + '../_base_/schedules/imagenet_bs256.py', # 训练策略设置 + '../_base_/default_runtime.py', # 运行设置 +] + +# 模型设置 +model = dict( + backbone=dict( + frozen_stages=2, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth', + prefix='backbone', + )), + head=dict(num_classes=10), +) + +# 数据设置 +data_root = 'data/custom_dataset' +train_dataloader = dict( + dataset=dict( + type='CustomDataset', + data_root=data_root, + ann_file='', + data_prefix='train', + )) +val_dataloader = dict( + dataset=dict( + type='CustomDataset', + data_root=data_root, + ann_file='', + data_prefix='test', + )) +test_dataloader = val_dataloader + +# >>>>>>>>>>>>>>> 在这里重载训练策略设置 >>>>>>>>>>>>>>>>>>> +# 优化器超参数 +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) +# 学习率策略 +param_scheduler = dict( + type='MultiStepLR', by_epoch=True, milestones=[15], gamma=0.1) +# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< +``` + +```{tip} +更多关于配置文件的信息,请参阅[学习配置文件](../user_guides/config.md) +``` + +## 开始训练 + +现在,我们完成了用于微调的配置文件,完整的文件如下: + +```python +_base_ = [ + '../_base_/models/resnet50.py', # 模型设置 + '../_base_/datasets/imagenet_bs32.py', # 数据设置 + '../_base_/schedules/imagenet_bs256.py', # 训练策略设置 + '../_base_/default_runtime.py', # 运行设置 +] + +# 模型设置 +model = dict( + backbone=dict( + frozen_stages=2, + init_cfg=dict( + type='Pretrained', + checkpoint='https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth', + prefix='backbone', + )), + head=dict(num_classes=10), +) + +# 数据设置 +data_root = 'data/custom_dataset' +train_dataloader = dict( + dataset=dict( + type='CustomDataset', + data_root=data_root, + ann_file='', + data_prefix='train', + )) +val_dataloader = dict( + dataset=dict( + type='CustomDataset', + data_root=data_root, + ann_file='', + data_prefix='test', + )) +test_dataloader = val_dataloader + +# 训练策略设置 +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)) +param_scheduler = dict( + type='MultiStepLR', by_epoch=True, milestones=[15], gamma=0.1) +``` + +接下来,我们使用一台 8 张 GPU 的电脑来训练我们的模型,指令如下: + +```shell +bash tools/dist_train.sh configs/resnet/resnet50_8xb32-ft_custom.py 8 +``` + +当然,我们也可以使用单张 GPU 来进行训练,使用如下命令: + +```shell +python tools/train.py configs/resnet/resnet50_8xb32-ft_custom.py +``` + +但是如果我们使用单张 GPU 进行训练的话,需要在数据集设置部分作如下修改: + +```python +data_root = 'data/custom_dataset' +train_dataloader = dict( + batch_size=256, + dataset=dict( + type='CustomDataset', + data_root=data_root, + ann_file='', + data_prefix='train', + )) +val_dataloader = dict( + dataset=dict( + type='CustomDataset', + data_root=data_root, + ann_file='', + data_prefix='test', + )) +test_dataloader = val_dataloader +``` + +这是因为我们的训练策略是针对批次大小(batch size)为 256 设置的。在父配置文件中, +设置了单张 `batch_size=32`,如果使用 8 张 GPU,总的批次大小就是 256。而如果使 +用单张 GPU,就必须手动修改 `batch_size=256` 来匹配训练策略。 + +然而,更大的批次大小需要更大的 GPU 显存,这里有几个简单的技巧来节省显存: + +1. 启用自动混合精度训练 + + ```shell + python tools/train.py configs/resnet/resnet50_8xb32-ft_custom.py --amp + ``` + +2. 使用较小的批次大小,例如仍然使用 `batch_size=32`,而不是 256,并启用学习率自动缩放 + + ```shell + python tools/train.py configs/resnet/resnet50_8xb32-ft_custom.py --auto-scale-lr + ``` + + 学习率自动缩放功能会根据实际的 batch size 和配置文件中的 `auto_scale_lr.base_batch_size` + 字段对学习率进行线性调整(你可以在基配置文件 `configs/_base_/schedules/imagenet_bs256.py` + 中找到这一字段) + +```{note} +以上技巧都有可能对训练效果造成轻微影响。 +``` + +### 在命令行指定预训练模型 + +如果您不想修改配置文件,您可以使用 `--cfg-options` 将您的预训练模型文件添加到 `init_cfg`. + +例如,以下命令也会加载预训练模型: + +```shell +bash tools/dist_train.sh configs/tutorial/resnet50_finetune_cifar.py 8 \ + --cfg-options model.backbone.init_cfg.type='Pretrained' \ + model.backbone.init_cfg.checkpoint='https://download.openmmlab.com/mmselfsup/1.x/mocov3/mocov3_resnet50_8xb512-amp-coslr-100e_in1k/mocov3_resnet50_8xb512-amp-coslr-100e_in1k_20220927-f1144efa.pth' \ + model.backbone.init_cfg.prefix='backbone' \ +``` diff --git a/docs/zh_CN/notes/pretrain_custom_dataset.md b/docs/zh_CN/notes/pretrain_custom_dataset.md new file mode 100644 index 0000000..786ecba --- /dev/null +++ b/docs/zh_CN/notes/pretrain_custom_dataset.md @@ -0,0 +1,247 @@ +# 如何在自定义数据集上进行模型预训练 + +在本教程中,我们提供了一个实践示例和一些有关如何在您自己的数据集上进行训练的技巧。 + +在 MMPretrain 中,我们支持用户直接调用 MMPretrain 的 `CustomDataset` (类似于 `torchvision` 的 `ImageFolder`), 该数据集能自动的读取给的路径下的图片。你只需要准备你的数据集路径,并修改配置文件,即可轻松使用 MMPretrain 进行预训练。 + +## 第一步:准备你的数据集 + +按照 [准备数据集](../user_guides/dataset_prepare.md) 准备你的数据集。 +假设我们的数据集根文件夹路径为 `data/custom_dataset/` + +假设我们想使用 MAE 算法进行图像自监督训练,并使用子文件夹格式的 `CustomDataset` 来组织数据集: + +```text +data/custom_dataset/ +├── sample1.png +├── sample2.png +├── sample3.png +├── sample4.png +└── ... +``` + +## 第二步:选择一个配置文件作为模板 + +在本教程中,我们使用 `configs/mae/mae_vit-base-p16_8xb512-amp-coslr-300e_in1k.py` 作为一个示例进行介绍。 +首先在同一文件夹下复制一份配置文件,并将其重命名为 `mae_vit-base-p16_8xb512-amp-coslr-300e_custom.py`。 + +```{tip} +按照惯例,配置名称的最后一个字段是数据集,例如,`in1k` 表示 ImageNet-1k,`coco` 表示 coco 数据集 +``` + +这个配置文件的内容如下: + +```python +_base_ = [ + '../_base_/models/mae_vit-base-p16.py', + '../_base_/datasets/imagenet_bs512_mae.py', + '../_base_/default_runtime.py', +] + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='AdamW', + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'ln': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.0001, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=260, + by_epoch=True, + begin=40, + end=300, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=300) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +# auto resume +resume = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) +``` + +## 第三步:修改数据集设置 + +- 重载数据集设置中的 `type` 为 `'CustomDataset'` +- 重载数据集设置中的 `data_root` 为 `data/custom_dataset` +- 重载数据集设置中的 `ann_file` 为空字符串,这是因为我们使用子文件格式的 `CustomDataset`,需要将配置文件置空 +- 重载数据集设置中的 `data_prefix` 为空字符串,这是因为我们希望使用数据集根目录下的所有数据进行训练,并不需要将其拆分为不同子集。 + +修改后的文件应如下: + +```python +_base_ = [ + '../_base_/models/mae_vit-base-p16.py', + '../_base_/datasets/imagenet_bs512_mae.py', + '../_base_/default_runtime.py', +] + +# >>>>>>>>>>>>>>> 在此重载数据设置 >>>>>>>>>>>>>>>>>>> +train_dataloader = dict( + dataset=dict( + type='CustomDataset', + data_root='data/custom_dataset/', + ann_file='', # 我们假定使用子文件夹格式,因此需要将标注文件置空 + data_prefix='', # 使用 `data_root` 路径下所有数据 + with_label=False, + ) +) +# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='AdamW', + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'ln': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.0001, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=260, + by_epoch=True, + begin=40, + end=300, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=300) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +# auto resume +resume = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) +``` + +使用上述配置文件,你就能够轻松的在自定义数据集上使用 `MAE` 算法来进行预训练了。 + +## 另一个例子:在 COCO 数据集上训练 MAE + +```{note} +你可能需要参考[文档](https://github.com/open-mmlab/mmdetection/blob/3.x/docs/en/get_started.md)安装 MMDetection 来使用 `mmdet.CocoDataset`。 +``` + +与在自定义数据集上进行预训练类似,我们在本教程中也提供了一个使用 COCO 数据集进行预训练的示例。修改后的文件如下: + +```python +# >>>>>>>>>>>>>>>>>>>>> Start of Changed >>>>>>>>>>>>>>>>>>>>>>>>> +_base_ = [ + '../_base_/models/mae_vit-base-p16.py', + '../_base_/datasets/imagenet_mae.py', + '../_base_/default_runtime.py', +] + +# >>>>>>>>>>>>>>> 在这里重载数据配置 >>>>>>>>>>>>>>>>>>> +train_dataloader = dict( + dataset=dict( + type='mmdet.CocoDataset', + data_root='data/coco/', + ann_file='annotations/instances_train2017.json', # 仅用于加载图片,不会使用标签 + data_prefix=dict(img='train2017/'), + ) +) +# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< + +# optimizer wrapper +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='AdamW', + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'ln': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.) + })) +# learning rate scheduler +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.0001, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=260, + by_epoch=True, + begin=40, + end=300, + convert_to_iter_based=True) +] +# runtime settings +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=300) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) +randomness = dict(seed=0, diff_rank_seed=True) +# auto resume +resume = True +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) +``` diff --git a/docs/zh_CN/notes/projects.md b/docs/zh_CN/notes/projects.md new file mode 100644 index 0000000..0843dc4 --- /dev/null +++ b/docs/zh_CN/notes/projects.md @@ -0,0 +1 @@ +# 基于 MMPretrain 的项目列表(待更新) diff --git a/docs/zh_CN/stat.py b/docs/zh_CN/stat.py new file mode 100644 index 0000000..29e5756 --- /dev/null +++ b/docs/zh_CN/stat.py @@ -0,0 +1,249 @@ +#!/usr/bin/env python +import re +import warnings +from collections import defaultdict +from pathlib import Path + +from modelindex.load_model_index import load +from modelindex.models.Result import Result +from tabulate import tabulate + +MMPT_ROOT = Path(__file__).absolute().parents[2] +PAPERS_ROOT = Path('papers') # Path to save generated paper pages. +GITHUB_PREFIX = 'https://github.com/open-mmlab/mmpretrain/blob/main/' +MODELZOO_TEMPLATE = """\ +# 模型库统计 + +在本页面中,我们列举了我们支持的[所有算法](#所有已支持的算法)。你可以点击链接跳转至对应的模型详情页面。 + +另外,我们还列出了我们提供的所有模型权重文件。你可以使用排序和搜索功能找到需要的模型权重,并使用链接跳转至模型详情页面。 + +## 所有已支持的算法 + +* 论文数量:{num_papers} +{type_msg} + +* 模型权重文件数量:{num_ckpts} +{paper_msg} + +""" # noqa: E501 + +METRIC_ALIAS = { + 'Top 1 Accuracy': 'Top-1 (%)', + 'Top 5 Accuracy': 'Top-5 (%)', +} + +model_index = load(str(MMPT_ROOT / 'model-index.yml')) + + +def build_collections(model_index): + col_by_name = {} + for col in model_index.collections: + setattr(col, 'models', []) + col_by_name[col.name] = col + + for model in model_index.models: + col = col_by_name[model.in_collection] + col.models.append(model) + setattr(model, 'collection', col) + if model.results is None: + setattr(model, 'tasks', []) + else: + setattr(model, 'tasks', [result.task for result in model.results]) + + +build_collections(model_index) + + +def count_papers(collections): + total_num_ckpts = 0 + type_count = defaultdict(int) + paper_msgs = [] + + for collection in collections: + with open(MMPT_ROOT / collection.readme) as f: + readme = f.read() + ckpts = set(x.lower().strip() + for x in re.findall(r'\[model\]\((https?.*)\)', readme)) + total_num_ckpts += len(ckpts) + title = collection.paper['Title'] + papertype = collection.data.get('type', 'Algorithm') + type_count[papertype] += 1 + + readme = PAPERS_ROOT / Path( + collection.filepath).parent.with_suffix('.md').name + paper_msgs.append( + f'\t- [{papertype}] [{title}]({readme}) ({len(ckpts)} ckpts)') + + type_msg = '\n'.join( + [f'\t- {type_}: {count}' for type_, count in type_count.items()]) + paper_msg = '\n'.join(paper_msgs) + + modelzoo = MODELZOO_TEMPLATE.format( + num_papers=len(collections), + num_ckpts=total_num_ckpts, + type_msg=type_msg, + paper_msg=paper_msg, + ) + + with open('modelzoo_statistics.md', 'w') as f: + f.write(modelzoo) + + +count_papers(model_index.collections) + + +def generate_paper_page(collection): + PAPERS_ROOT.mkdir(exist_ok=True) + + # Write a copy of README + with open(MMPT_ROOT / collection.readme) as f: + readme = f.read() + folder = Path(collection.filepath).parent + copy = PAPERS_ROOT / folder.with_suffix('.md').name + + def replace_link(matchobj): + # Replace relative link to GitHub link. + name = matchobj.group(1) + link = matchobj.group(2) + if not link.startswith('http'): + assert (folder / link).exists(), \ + f'Link not found:\n{collection.readme}: {link}' + rel_link = (folder / link).absolute().relative_to(MMPT_ROOT) + link = GITHUB_PREFIX + str(rel_link) + return f'[{name}]({link})' + + content = re.sub(r'\[([^\]]+)\]\(([^)]+)\)', replace_link, readme) + content = f'---\ngithub_page: /{collection.readme}\n---\n' + content + + def make_tabs(matchobj): + """modify the format from emphasis black symbol to tabs.""" + content = matchobj.group() + content = content.replace('', '') + content = content.replace('', '') + + # split the content by "**{Tab-Name}**"" + splits = re.split(r'^\*\*(.*)\*\*$', content, flags=re.M)[1:] + tabs_list = [] + for title, tab_content in zip(splits[::2], splits[1::2]): + title = ':::{tab} ' + title + '\n' + tab_content = tab_content.strip() + '\n:::\n' + tabs_list.append(title + tab_content) + + return '::::{tabs}\n' + ''.join(tabs_list) + '::::' + + if '' in content and '' in content: + # Make TABS block a selctive tabs + try: + pattern = r'([\d\D]*?)' + content = re.sub(pattern, make_tabs, content) + except Exception as e: + warnings.warn(f'Can not parse the TABS, get an error : {e}') + + with open(copy, 'w') as copy_file: + copy_file.write(content) + + +for collection in model_index.collections: + generate_paper_page(collection) + + +def scatter_results(models): + model_result_pairs = [] + for model in models: + if model.results is None: + result = Result(task=None, dataset=None, metrics={}) + model_result_pairs.append((model, result)) + else: + for result in model.results: + model_result_pairs.append((model, result)) + return model_result_pairs + + +def generate_summary_table(task, model_result_pairs, title=None): + metrics = set() + for model, result in model_result_pairs: + if result.task == task: + metrics = metrics.union(result.metrics.keys()) + metrics = sorted(list(metrics)) + + rows = [] + for model, result in model_result_pairs: + if result.task != task: + continue + name = model.name + params = f'{model.metadata.parameters / 1e6:.2f}' # Params + if model.metadata.flops is not None: + flops = f'{model.metadata.flops / 1e9:.2f}' # Flops + else: + flops = None + readme = Path(model.collection.filepath).parent.with_suffix('.md').name + page = f'[链接]({PAPERS_ROOT / readme})' + model_metrics = [] + for metric in metrics: + model_metrics.append(str(result.metrics.get(metric, ''))) + + rows.append([name, params, flops, *model_metrics, page]) + + with open('modelzoo_statistics.md', 'a') as f: + if title is not None: + f.write(f'\n{title}') + f.write("""\n```{table}\n:class: model-summary\n""") + header = [ + '模型', + '参数量 (M)', + 'Flops (G)', + *[METRIC_ALIAS.get(metric, metric) for metric in metrics], + 'Readme', + ] + table_cfg = dict( + tablefmt='pipe', + floatfmt='.2f', + numalign='right', + stralign='center') + f.write(tabulate(rows, header, **table_cfg)) + f.write('\n```\n') + + +def generate_dataset_wise_table(task, model_result_pairs, title=None): + dataset_rows = defaultdict(list) + for model, result in model_result_pairs: + if result.task == task: + dataset_rows[result.dataset].append((model, result)) + + if title is not None: + with open('modelzoo_statistics.md', 'a') as f: + f.write(f'\n{title}') + for dataset, pairs in dataset_rows.items(): + generate_summary_table(task, pairs, title=f'### {dataset}') + + +model_result_pairs = scatter_results(model_index.models) + +# Generate Pretrain Summary +generate_summary_table( + task=None, + model_result_pairs=model_result_pairs, + title='## 预训练模型', +) + +# Generate Image Classification Summary +generate_dataset_wise_table( + task='Image Classification', + model_result_pairs=model_result_pairs, + title='## 图像分类', +) + +# Generate Multi-Label Classification Summary +generate_dataset_wise_table( + task='Multi-Label Classification', + model_result_pairs=model_result_pairs, + title='## 图像多标签分类', +) + +# Generate Image Retrieval Summary +generate_dataset_wise_table( + task='Image Retrieval', + model_result_pairs=model_result_pairs, + title='## 图像检索', +) diff --git a/docs/zh_CN/useful_tools/cam_visualization.md b/docs/zh_CN/useful_tools/cam_visualization.md new file mode 100644 index 0000000..94d5ed1 --- /dev/null +++ b/docs/zh_CN/useful_tools/cam_visualization.md @@ -0,0 +1,164 @@ +# 类别激活图(CAM)可视化 + +## 类别激活图可视化工具介绍 + +MMPretrain 提供 `tools/visualization/vis_cam.py` 工具来可视化类别激活图。请使用 `pip install "grad-cam>=1.3.6"` 安装依赖的 [pytorch-grad-cam](https://github.com/jacobgil/pytorch-grad-cam)。 + +目前支持的方法有: + +| Method | What it does | +| :----------: | :-----------------------------------------------------------------------------------------------: | +| GradCAM | 使用平均梯度对 2D 激活进行加权 | +| GradCAM++ | 类似 GradCAM,但使用了二阶梯度 | +| XGradCAM | 类似 GradCAM,但通过归一化的激活对梯度进行了加权 | +| EigenCAM | 使用 2D 激活的第一主成分(无法区分类别,但效果似乎不错) | +| EigenGradCAM | 类似 EigenCAM,但支持类别区分,使用了激活 * 梯度的第一主成分,看起来和 GradCAM 差不多,但是更干净 | +| LayerCAM | 使用正梯度对激活进行空间加权,对于浅层有更好的效果 | + +也可以使用新版本 `pytorch-grad-cam` 支持的更多 CAM 方法,但我们尚未验证可用性。 + +**命令行**: + +```bash +python tools/visualization/vis_cam.py \ + ${IMG} \ + ${CONFIG_FILE} \ + ${CHECKPOINT} \ + [--target-layers ${TARGET-LAYERS}] \ + [--preview-model] \ + [--method ${METHOD}] \ + [--target-category ${TARGET-CATEGORY}] \ + [--save-path ${SAVE_PATH}] \ + [--vit-like] \ + [--num-extra-tokens ${NUM-EXTRA-TOKENS}] + [--aug_smooth] \ + [--eigen_smooth] \ + [--device ${DEVICE}] \ + [--cfg-options ${CFG-OPTIONS}] +``` + +**所有参数的说明**: + +- `img`:目标图片路径。 +- `config`:模型配置文件的路径。 +- `checkpoint`:权重路径。 +- `--target-layers`:所查看的网络层名称,可输入一个或者多个网络层,如果不设置,将使用最后一个`block`中的`norm`层。 +- `--preview-model`:是否查看模型所有网络层。 +- `--method`:类别激活图图可视化的方法,目前支持 `GradCAM`, `GradCAM++`, `XGradCAM`, `EigenCAM`, `EigenGradCAM`, `LayerCAM`,不区分大小写。如果不设置,默认为 `GradCAM`。 +- `--target-category`:查看的目标类别,如果不设置,使用模型检测出来的类别做为目标类别。 +- `--save-path`:保存的可视化图片的路径,默认不保存。 +- `--eigen-smooth`:是否使用主成分降低噪音,默认不开启。 +- `--vit-like`: 是否为 `ViT` 类似的 Transformer-based 网络 +- `--num-extra-tokens`: `ViT` 类网络的额外的 tokens 通道数,默认使用主干网络的 `num_extra_tokens`。 +- `--aug-smooth`:是否使用测试时增强 +- `--device`:使用的计算设备,如果不设置,默认为'cpu'。 +- `--cfg-options`:对配置文件的修改,参考[学习配置文件](../user_guides/config.md)。 + +```{note} +在指定 `--target-layers` 时,如果不知道模型有哪些网络层,可使用命令行添加 `--preview-model` 查看所有网络层名称; +``` + +## 如何可视化 CNN 网络的类别激活图(如 ResNet-50) + +`--target-layers` 在 `Resnet-50` 中的一些示例如下: + +- `'backbone.layer4'`,表示第四个 `ResLayer` 层的输出。 +- `'backbone.layer4.2'` 表示第四个 `ResLayer` 层中第三个 `BottleNeck` 块的输出。 +- `'backbone.layer4.2.conv1'` 表示上述 `BottleNeck` 块中 `conv1` 层的输出。 + +1. 使用不同方法可视化 `ResNet50`,默认 `target-category` 为模型检测的结果,使用默认推导的 `target-layers`。 + + ```shell + python tools/visualization/vis_cam.py \ + demo/bird.JPEG \ + configs/resnet/resnet50_8xb32_in1k.py \ + https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_batch256_imagenet_20200708-cfb998bf.pth \ + --method GradCAM + # GradCAM++, XGradCAM, EigenCAM, EigenGradCAM, LayerCAM + ``` + + | Image | GradCAM | GradCAM++ | EigenGradCAM | LayerCAM | + | ------------------------------------ | --------------------------------------- | ----------------------------------------- | -------------------------------------------- | ---------------------------------------- | + |
|
|
|
|
| + +2. 同一张图不同类别的激活图效果图,在 `ImageNet` 数据集中,类别 238 为 'Greater Swiss Mountain dog',类别 281 为 'tabby, tabby cat'。 + + ```shell + python tools/visualization/vis_cam.py \ + demo/cat-dog.png configs/resnet/resnet50_8xb32_in1k.py \ + https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_batch256_imagenet_20200708-cfb998bf.pth \ + --target-layers 'backbone.layer4.2' \ + --method GradCAM \ + --target-category 238 + # --target-category 281 + ``` + + | Category | Image | GradCAM | XGradCAM | LayerCAM | + | -------- | ---------------------------------------------- | ------------------------------------------------ | ------------------------------------------------- | ------------------------------------------------- | + | Dog |
|
|
|
| + | Cat |
|
|
|
| + +3. 使用 `--eigen-smooth` 以及 `--aug-smooth` 获取更好的可视化效果。 + + ```shell + python tools/visualization/vis_cam.py \ + demo/dog.jpg \ + configs/mobilenet_v3/mobilenet-v3-large_8xb128_in1k.py \ + https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/convert/mobilenet_v3_large-3ea3c186.pth \ + --target-layers 'backbone.layer16' \ + --method LayerCAM \ + --eigen-smooth --aug-smooth + ``` + + | Image | LayerCAM | eigen-smooth | aug-smooth | eigen&aug | + | ------------------------------------ | --------------------------------------- | ------------------------------------------- | ----------------------------------------- | ----------------------------------------- | + |
|
|
|
|
| + +## 如何可视化 Transformer 类型网络的类别激活图 + +`--target-layers` 在 Transformer-based 网络中的一些示例如下: + +- Swin-Transformer 中:`'backbone.norm3'` +- ViT 中:`'backbone.layers.11.ln1'` + +对于 Transformer-based 的网络,比如 ViT、T2T-ViT 和 Swin-Transformer,特征是被展平的。为了绘制 CAM 图,我们需要指定 `--vit-like` 选项,从而让被展平的特征恢复方形的特征图。 + +除了特征被展平之外,一些类 ViT 的网络还会添加额外的 tokens。比如 ViT 和 T2T-ViT 中添加了分类 token,DeiT 中还添加了蒸馏 token。在这些网络中,分类计算在最后一个注意力模块之后就已经完成了,分类得分也只和这些额外的 tokens 有关,与特征图无关,也就是说,分类得分对这些特征图的导数为 0。因此,我们不能使用最后一个注意力模块的输出作为 CAM 绘制的目标层。 + +另外,为了去除这些额外的 toekns 以获得特征图,我们需要知道这些额外 tokens 的数量。MMPretrain 中几乎所有 Transformer-based 的网络都拥有 `num_extra_tokens` 属性。而如果你希望将此工具应用于新的,或者第三方的网络,而且该网络没有指定 `num_extra_tokens` 属性,那么可以使用 `--num-extra-tokens` 参数手动指定其数量。 + +1. 对 `Swin Transformer` 使用默认 `target-layers` 进行 CAM 可视化: + + ```shell + python tools/visualization/vis_cam.py \ + demo/bird.JPEG \ + configs/swin_transformer/swin-tiny_16xb64_in1k.py \ + https://download.openmmlab.com/mmclassification/v0/swin-transformer/swin_tiny_224_b16x64_300e_imagenet_20210616_090925-66df6be6.pth \ + --vit-like + ``` + +2. 对 `Vision Transformer(ViT)` 进行 CAM 可视化: + + ```shell + python tools/visualization/vis_cam.py \ + demo/bird.JPEG \ + configs/vision_transformer/vit-base-p16_64xb64_in1k-384px.py \ + https://download.openmmlab.com/mmclassification/v0/vit/finetune/vit-base-p16_in21k-pre-3rdparty_ft-64xb64_in1k-384_20210928-98e8652b.pth \ + --vit-like \ + --target-layers 'backbone.layers.11.ln1' + ``` + +3. 对 `T2T-ViT` 进行 CAM 可视化: + + ```shell + python tools/visualization/vis_cam.py \ + demo/bird.JPEG \ + configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py \ + https://download.openmmlab.com/mmclassification/v0/t2t-vit/t2t-vit-t-14_3rdparty_8xb64_in1k_20210928-b7c09b62.pth \ + --vit-like \ + --target-layers 'backbone.encoder.12.ln1' + ``` + +| Image | ResNet50 | ViT | Swin | T2T-ViT | +| --------------------------------------- | ------------------------------------------ | -------------------------------------- | --------------------------------------- | ------------------------------------------ | +|
|
|
|
|
| diff --git a/docs/zh_CN/useful_tools/complexity_analysis.md b/docs/zh_CN/useful_tools/complexity_analysis.md new file mode 100644 index 0000000..83e763252 --- /dev/null +++ b/docs/zh_CN/useful_tools/complexity_analysis.md @@ -0,0 +1,80 @@ +# 模型复杂度分析 + +## 计算 FLOPs 和参数数量(实验性的) + +我们根据 [MMEngine](https://github.com/open-mmlab/mmengine/blob/main/mmengine/analysis/complexity_analysis.py) 提供了一个脚本用于计算给定模型的 FLOPs 和参数量。 + +```shell +python tools/analysis_tools/get_flops.py ${CONFIG_FILE} [--shape ${INPUT_SHAPE}] +``` + +所有参数说明: + +- `config` : 配置文件的路径。 +- `--shape`: 输入尺寸,支持单值或者双值, 如: `--shape 256`、`--shape 224 256`。默认为`224 224`。 + +示例: + +```shell +python tools/analysis_tools/get_flops.py configs/resnet/resnet50_8xb32_in1k.py +``` + +你将获得如下结果: + +```text +============================== +Input shape: (3, 224, 224) +Flops: 4.109G +Params: 25.557M +Activation: 11.114M +============================== +``` + +同时,你会得到每层的详细复杂度信息,如下所示: + +```text ++--------------------------+----------------------+-----------+--------------+ +| module | #parameters or shape | #flops | #activations | ++--------------------------+----------------------+-----------+--------------+ +| model | 25.557M | 4.109G | 11.114M | +| backbone | 23.508M | 4.109G | 11.114M | +| backbone.conv1 | 9.408K | 0.118G | 0.803M | +| backbone.conv1.weight | (64, 3, 7, 7) | | | +| backbone.bn1 | 0.128K | 1.606M | 0 | +| backbone.bn1.weight | (64,) | | | +| backbone.bn1.bias | (64,) | | | +| backbone.layer1 | 0.216M | 0.677G | 4.415M | +| backbone.layer1.0 | 75.008K | 0.235G | 2.007M | +| backbone.layer1.1 | 70.4K | 0.221G | 1.204M | +| backbone.layer1.2 | 70.4K | 0.221G | 1.204M | +| backbone.layer2 | 1.22M | 1.034G | 3.111M | +| backbone.layer2.0 | 0.379M | 0.375G | 1.305M | +| backbone.layer2.1 | 0.28M | 0.22G | 0.602M | +| backbone.layer2.2 | 0.28M | 0.22G | 0.602M | +| backbone.layer2.3 | 0.28M | 0.22G | 0.602M | +| backbone.layer3 | 7.098M | 1.469G | 2.158M | +| backbone.layer3.0 | 1.512M | 0.374G | 0.652M | +| backbone.layer3.1 | 1.117M | 0.219G | 0.301M | +| backbone.layer3.2 | 1.117M | 0.219G | 0.301M | +| backbone.layer3.3 | 1.117M | 0.219G | 0.301M | +| backbone.layer3.4 | 1.117M | 0.219G | 0.301M | +| backbone.layer3.5 | 1.117M | 0.219G | 0.301M | +| backbone.layer4 | 14.965M | 0.81G | 0.627M | +| backbone.layer4.0 | 6.04M | 0.373G | 0.326M | +| backbone.layer4.1 | 4.463M | 0.219G | 0.151M | +| backbone.layer4.2 | 4.463M | 0.219G | 0.151M | +| head.fc | 2.049M | | | +| head.fc.weight | (1000, 2048) | | | +| head.fc.bias | (1000,) | | | +| neck.gap | | 0.1M | 0 | ++--------------------------+----------------------+-----------+--------------+ +``` + +```{warning} +警告 + +此工具仍处于试验阶段,我们不保证该数字正确无误。您最好将结果用于简单比较,但在技术报告或论文中采用该结果之前,请仔细检查。 + +- FLOPs 与输入的尺寸有关,而参数量与输入尺寸无关。默认输入尺寸为 (1, 3, 224, 224) +- 一些运算不会被计入 FLOPs 的统计中,例如某些自定义运算。详细信息请参考 [`mmengine.analysis.complexity_analysis._DEFAULT_SUPPORTED_FLOP_OPS`](https://github.com/open-mmlab/mmengine/blob/main/mmengine/analysis/complexity_analysis.py)。 +``` diff --git a/docs/zh_CN/useful_tools/confusion_matrix.md b/docs/zh_CN/useful_tools/confusion_matrix.md new file mode 100644 index 0000000..98c039c --- /dev/null +++ b/docs/zh_CN/useful_tools/confusion_matrix.md @@ -0,0 +1,83 @@ +# 混淆矩阵 + +MMPretrain 提供 `tools/analysis_tools/confusion_matrix.py` 工具来分析预测结果的混淆矩阵。关于混淆矩阵的介绍,可参考[链接](https://zh.wikipedia.org/zh-cn/%E6%B7%B7%E6%B7%86%E7%9F%A9%E9%98%B5)。 + +## 命令行使用 + +**命令行**: + +```shell +python tools/analysis_tools/confusion_matrix.py \ + ${CONFIG_FILE} \ + ${CHECKPOINT} \ + [--show] \ + [--show-path] \ + [--include-values] \ + [--cmap ${CMAP}] \ + [--cfg-options ${CFG-OPTIONS}] +``` + +**所有参数的说明**: + +- `config`:模型配置文件的路径。 +- `checkpoint`:权重路径。 +- `--show`:是否展示混淆矩阵的 matplotlib 可视化结果,默认不展示。 +- `--show-path`:如果 `show` 为 True,可视化结果的保存路径。 +- `--include-values`:是否在可视化结果上添加数值。 +- `--cmap`:可视化结果使用的颜色映射图,即 `cmap`,默认为 `viridis`。 +- `--cfg-options`:对配置文件的修改,参考[学习配置文件](../user_guides/config.md)。 + +**使用示例**: + +```shell +python tools/analysis_tools/confusion_matrix.py \ + configs/resnet/resnet50_8xb16_cifar10.py \ + https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar10_20210528-f54bfad9.pth \ + --show +``` + +**输出图片**: + +
+ +## 基础用法 + +```python +>>> import torch +>>> from mmpretrain.evaluation import ConfusionMatrix +>>> y_pred = [0, 1, 1, 3] +>>> y_true = [0, 2, 1, 3] +>>> ConfusionMatrix.calculate(y_pred, y_true, num_classes=4) +tensor([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 1, 0, 0], + [0, 0, 0, 1]]) +>>> # plot the confusion matrix +>>> import matplotlib.pyplot as plt +>>> y_score = torch.rand((1000, 10)) +>>> y_true = torch.randint(10, (1000, )) +>>> matrix = ConfusionMatrix.calculate(y_score, y_true) +>>> ConfusionMatrix().plot(matrix) +>>> plt.show() +``` + +## 结合评估器使用 + +```python +>>> import torch +>>> from mmpretrain.evaluation import ConfusionMatrix +>>> from mmpretrain.structures import DataSample +>>> from mmengine.evaluator import Evaluator +>>> data_samples = [ +... DataSample().set_gt_label(i%5).set_pred_score(torch.rand(5)) +... for i in range(1000) +... ] +>>> evaluator = Evaluator(metrics=ConfusionMatrix()) +>>> evaluator.process(data_samples) +>>> evaluator.evaluate(1000) +{'confusion_matrix/result': tensor([[37, 37, 48, 43, 35], + [35, 51, 32, 46, 36], + [45, 28, 39, 42, 46], + [42, 40, 40, 35, 43], + [40, 39, 41, 37, 43]])} +``` diff --git a/docs/zh_CN/useful_tools/dataset_visualization.md b/docs/zh_CN/useful_tools/dataset_visualization.md new file mode 100644 index 0000000..aa20251 --- /dev/null +++ b/docs/zh_CN/useful_tools/dataset_visualization.md @@ -0,0 +1,90 @@ +# 数据集可视化 + +## 数据集可视化工具简介 + +```bash +python tools/visualization/browse_dataset.py \ + ${CONFIG_FILE} \ + [-o, --output-dir ${OUTPUT_DIR}] \ + [-p, --phase ${DATASET_PHASE}] \ + [-n, --show-number ${NUMBER_IMAGES_DISPLAY}] \ + [-i, --show-interval ${SHOW_INTERRVAL}] \ + [-m, --mode ${DISPLAY_MODE}] \ + [-r, --rescale-factor ${RESCALE_FACTOR}] \ + [-c, --channel-order ${CHANNEL_ORDER}] \ + [--cfg-options ${CFG_OPTIONS}] +``` + +**所有参数的说明**: + +- `config` : 模型配置文件的路径。 +- `-o, --output-dir`: 保存图片文件夹,如果没有指定,默认为 `''`,表示不保存。 +- **`-p, --phase`**: 可视化数据集的阶段,只能为 `['train', 'val', 'test']` 之一,默认为 `'train'`。 +- **`-n, --show-number`**: 可视化样本数量。如果没有指定,默认展示数据集的所有图片。 +- `-i, --show-interval`: 浏览时,每张图片的停留间隔,单位为秒。 +- **`-m, --mode`**: 可视化的模式,只能为 `['original', 'transformed', 'concat', 'pipeline']` 之一。 默认为`'transformed'`. +- `-r, --rescale-factor`: 在 `mode='original'` 下,可视化图片的放缩倍数,在图片过大或过小时设置。 +- `-c, --channel-order`: 图片的通道顺序,为 `['BGR', 'RGB']` 之一,默认为 `'BGR'`。 +- `--cfg-options` : 对配置文件的修改,参考[学习配置文件](../user_guides/config.md)。 + +```{note} + +1. `-m, --mode` 用于设置可视化的模式,默认设置为 'transformed'。 +- 如果 `--mode` 设置为 'original',则获取原始图片; +- 如果 `--mode` 设置为 'transformed',则获取预处理后的图片; +- 如果 `--mode` 设置为 'concat',获取原始图片和预处理后图片拼接的图片; +- 如果 `--mode` 设置为 'pipeline',则获得数据流水线所有中间过程图片。 + +2. `-r, --rescale-factor` 在数据集中图片的分辨率过大或者过小时设置。比如在可视化 CIFAR 数据集时,由于图片的分辨率非常小,可将 `-r, --rescale-factor` 设置为 10。 +``` + +## 如何可视化原始图像 + +使用 **'original'** 模式 : + +```shell +python ./tools/visualization/browse_dataset.py ./configs/resnet/resnet101_8xb16_cifar10.py --phase val --output-dir tmp --mode original --show-number 100 --rescale-factor 10 --channel-order RGB +``` + +- `--phase val`: 可视化验证集,可简化为 `-p val`; +- `--output-dir tmp`: 可视化结果保存在 "tmp" 文件夹,可简化为 `-o tmp`; +- `--mode original`: 可视化原图,可简化为 `-m original`; +- `--show-number 100`: 可视化 100 张图,可简化为 `-n 100`; +- `--rescale-factor`: 图像放大 10 倍,可简化为 `-r 10`; +- `--channel-order RGB`: 可视化图像的通道顺序为 "RGB", 可简化为 `-c RGB`。 + +
+ +## 如何可视化处理后图像 + +使用 **'transformed'** 模式: + +```shell +python ./tools/visualization/browse_dataset.py ./configs/resnet/resnet50_8xb32_in1k.py -n 100 +``` + +
+ +## 如何同时可视化原始图像与处理后图像 + +使用 **'concat'** 模式: + +```shell +python ./tools/visualization/browse_dataset.py configs/swin_transformer/swin-small_16xb64_in1k.py -n 10 -m concat +``` + +
+ +使用 **'pipeline'** 模式: + +```shell +python ./tools/visualization/browse_dataset.py configs/swin_transformer/swin-small_16xb64_in1k.py -m pipeline +``` + +
+ +```shell +python ./tools/visualization/browse_dataset.py configs/beit/beit_beit-base-p16_8xb256-amp-coslr-300e_in1k.py -m pipeline +``` + +
diff --git a/docs/zh_CN/useful_tools/log_result_analysis.md b/docs/zh_CN/useful_tools/log_result_analysis.md new file mode 100644 index 0000000..748a907 --- /dev/null +++ b/docs/zh_CN/useful_tools/log_result_analysis.md @@ -0,0 +1,223 @@ +# 日志分析工具 + +## 日志分析 + +### 日志分析工具介绍 + +`tools/analysis_tools/analyze_logs.py` 脚本绘制指定键值的变化曲线。 + +
+ +```shell +python tools/analysis_tools/analyze_logs.py plot_curve \ + ${JSON_LOGS} \ + [--keys ${KEYS}] \ + [--title ${TITLE}] \ + [--legend ${LEGEND}] \ + [--backend ${BACKEND}] \ + [--style ${STYLE}] \ + [--out ${OUT_FILE}] \ + [--window-size ${WINDOW_SIZE}] +``` + +**所有参数的说明:** + +- `json_logs` : 模型配置文件的路径(可同时传入多个,使用空格分开)。 +- `--keys` : 分析日志的关键字段,数量为 `len(${JSON_LOGS}) * len(${KEYS})` 默认为 ‘loss’。 +- `--title` : 分析日志的图片名称,默认使用配置文件名, 默认为空。 +- `--legend` : 图例的名称,其数目必须与相等`len(${JSON_LOGS}) * len(${KEYS})`。 默认使用 `"${JSON_LOG}-${KEYS}"`. +- `--backend` : matplotlib 的绘图后端,默认由 matplotlib 自动选择。 +- `--style` : 绘图配色风格,默认为 `whitegrid`。 +- `--out` : 保存分析图片的路径,如不指定则不保存。 +- `--window-size`: 可视化窗口大小,如果没有指定,默认为 `'12*7'`。如果需要指定,需按照格式 `'W*H'`。 + +```{note} +The `--style` option depends on `seaborn` package, please install it before setting it. +``` + +### 如何或绘制损失/精度曲线 + +我们将给出一些示例,来展示如何使用 `tools/analysis_tools/analyze_logs.py`脚本绘制精度曲线的损失曲线 + +#### 绘制某日志文件对应的损失曲线图 + +```shell +python tools/analysis_tools/analyze_logs.py plot_curve your_log_json --keys loss --legend loss +``` + +#### 绘制某日志文件对应的 top-1 和 top-5 准确率曲线图,并将曲线图导出为 results.jpg 文件。 + +```shell +python tools/analysis_tools/analyze_logs.py plot_curve your_log_json --keys accuracy/top1 accuracy/top5 --legend top1 top5 --out results.jpg +``` + +#### 在同一图像内绘制两份日志文件对应的 top-1 准确率曲线图。 + +```shell +python tools/analysis_tools/analyze_logs.py plot_curve log1.json log2.json --keys accuracy/top1 --legend exp1 exp2 +``` + +### 如何统计训练时间 + +`tools/analysis_tools/analyze_logs.py` 也可以根据日志文件统计训练耗时。 + +```shell +python tools/analysis_tools/analyze_logs.py cal_train_time \ + ${JSON_LOGS} + [--include-outliers] +``` + +**所有参数的说明:** + +- `json_logs`:模型配置文件的路径(可同时传入多个,使用空格分开)。 +- `--include-outliers`:如果指定,将不会排除每个轮次中第一个时间记录(有时第一轮迭代会耗时较长)。 + +**示例:** + +```shell +python tools/analysis_tools/analyze_logs.py cal_train_time work_dirs/your_exp/20230206_181002/vis_data/scalars.json +``` + +预计输出结果如下所示: + +```text +-----Analyze train time of work_dirs/your_exp/20230206_181002/vis_data/scalars.json----- +slowest epoch 68, average time is 0.3818 +fastest epoch 1, average time is 0.3694 +time std over epochs is 0.0020 +average iter time: 0.3777 s/iter +``` + +## 结果分析 + +利用 `tools/test.py` 的`--out`,我们可以将所有的样本的推理结果保存到输出文件中。利用这一文件,我们可以进行进一步的分析。 + +### 如何进行离线度量评估 + +我们提供了 `tools/analysis_tools/eval_metric.py` 脚本,使用户能够根据预测文件评估模型。 + +```shell +python tools/analysis_tools/eval_metric.py \ + ${RESULT} \ + [--metric ${METRIC_OPTIONS} ...] +``` + +**所有参数说明**: + +- `result`:`tools/test.py` 输出的结果文件。 +- `--metric`:用于评估结果的指标,请至少指定一个指标,并且你可以通过指定多个 `--metric` 来同时计算多个指标。 + +请参考[评估文档](mmpretrain.evaluation)选择可用的评估指标和对应的选项。 + +```{note} +在 `tools/test.py` 中,我们支持使用 `--out-item` 选项来选择保存何种结果至输出文件。 +请确保没有额外指定 `--out-item`,或指定了 `--out-item=pred`。 +``` + +**示例**: + +```shell +# 获取结果文件 +python tools/test.py configs/resnet/resnet18_8xb16_cifar10.py \ + https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8.pth \ + --out results.pkl + +# 计算 top-1 和 top-5 准确率 +python tools/analysis_tools/eval_metric.py results.pkl --metric type=Accuracy topk=1,5 + +# 计算总体准确率,各个类别上的精确度、召回率、F1-score +python tools/analysis_tools/eval_metric.py results.pkl --metric type=Accuracy \ + --metric type=SingleLabelMetric items=precision,recall,f1-score average=None +``` + +### 如何绘制测试结果的混淆矩阵 + +我们提供 `tools/analysis_tools/confusion_matrix.py`,帮助用户能够从测试输出文件中绘制混淆矩阵。 + +```shell +python tools/analysis_tools/confusion_matrix.py \ + ${CONFIG} \ + ${RESULT} \ + [--out ${OUT}] \ + [--show] \ + [--show-path ${SHOW_PATH}] \ + [--include-values] \ + [--cmap] \ + [--cfg-options ${CFG_OPTIONS} ...] \ +``` + +**所有参数说明**: + +- `config`:配置文件的路径。 +- `result`:`tools/test.py`的输出结果文件,或是模型权重文件。 +- `--out`:将混淆矩阵保存到指定路径下的 pickle 文件中。 +- `--show`:是否可视化混淆矩阵图。 +- `--show-path`:将可视化混淆矩阵图保存到指定路径下。 +- `--include-values`:是否在可视化混淆矩阵图中显示具体值。 +- `--cmap`:用以可视化混淆矩阵的颜色配置。 +- `--cfg-options`:额外的配置选项,会被合入配置文件,参考[学习配置文件](../user_guides/config.md)。 + +```{note} +在 `tools/test.py` 中,我们支持使用 `--out-item` 选项来选择保存何种结果至输出文件。 +请确保没有额外指定 `--out-item`,或指定了 `--out-item=pred`。 +``` + +**Examples**: + +```shell +# 获取结果文件 +python tools/test.py configs/resnet/resnet18_8xb16_cifar10.py \ + https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8.pth \ + --out results.pkl + +# 将混淆矩阵计算结果保存至 cm.pkl 中 +python tools/analysis_tools/confusion_matrix.py configs/resnet/resnet18_8xb16_cifar10.py results.pkl --out cm.pkl + +# 可视化混淆矩阵图,并在图形窗口显示 +python tools/analysis_tools/confusion_matrix.py configs/resnet/resnet18_8xb16_cifar10.py results.pkl --show +``` + +### 如何将预测结果可视化 + +我们可以使用脚本 `tools/analysis_tools/analyze_results.py` 来保存预测成功或失败时得分最高的图像。 + +```shell +python tools/analysis_tools/analyze_results.py \ + ${CONFIG} \ + ${RESULT} \ + [--out-dir ${OUT_DIR}] \ + [--topk ${TOPK}] \ + [--rescale-factor ${RESCALE_FACTOR}] \ + [--cfg-options ${CFG_OPTIONS}] +``` + +**所有参数说明:**: + +- `config`:配置文件的路径。 +- `result`:`tools/test.py`的输出结果文件。 +- `--out_dir`:保存结果分析的文件夹路径。 +- `--topk`:分别保存多少张预测成功/失败的图像。如果不指定,默认为 `20`。 +- `--rescale-factor`:图像的缩放系数,如果样本图像过大或过小时可以使用(过小的图像可能导致结果标签非常模糊)。 +- `--cfg-options`:额外的配置选项,会被合入配置文件,参考[学习配置文件](../user_guides/config.md)。 + +```{note} +在 `tools/test.py` 中,我们支持使用 `--out-item` 选项来选择保存何种结果至输出文件。 +请确保没有额外指定 `--out-item`,或指定了 `--out-item=pred`。 +``` + +**示例**: + +```shell +# 获取预测结果文件 +python tools/test.py configs/resnet/resnet18_8xb16_cifar10.py \ + https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8.pth \ + --out results.pkl + +# 保存预测成功/失败的图像中,得分最高的前 10 张,并在可视化时将输出图像放大 10 倍。 +python tools/analysis_tools/analyze_results.py \ + configs/resnet/resnet18_8xb16_cifar10.py \ + results.pkl \ + --out-dir output \ + --topk 10 \ + --rescale-factor 10 +``` diff --git a/docs/zh_CN/useful_tools/model_serving.md b/docs/zh_CN/useful_tools/model_serving.md new file mode 100644 index 0000000..12f6779 --- /dev/null +++ b/docs/zh_CN/useful_tools/model_serving.md @@ -0,0 +1,88 @@ +# TorchServe 部署 + +为了使用 [`TorchServe`](https://pytorch.org/serve/) 部署一个 `MMPretrain` 模型,需要进行以下几步: + +## 1. 转换 MMPretrain 模型至 TorchServe + +```shell +python tools/torchserve/mmpretrain2torchserve.py ${CONFIG_FILE} ${CHECKPOINT_FILE} \ +--output-folder ${MODEL_STORE} \ +--model-name ${MODEL_NAME} +``` + +```{note} +${MODEL_STORE} 需要是一个文件夹的绝对路径。 +``` + +示例: + +```shell +python tools/torchserve/mmpretrain2torchserve.py \ + configs/resnet/resnet18_8xb32_in1k.py \ + checkpoints/resnet18_8xb32_in1k_20210831-fbbb1da6.pth \ + --output-folder ./checkpoints \ + --model-name resnet18_in1k +``` + +## 2. 构建 `mmpretrain-serve` docker 镜像 + +```shell +docker build -t mmpretrain-serve:latest docker/serve/ +``` + +## 3. 运行 `mmpretrain-serve` 镜像 + +请参考官方文档 [基于 docker 运行 TorchServe](https://github.com/pytorch/serve/blob/master/docker/README.md#running-torchserve-in-a-production-docker-environment). + +为了使镜像能够使用 GPU 资源,需要安装 [nvidia-docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html)。之后可以传递 `--gpus` 参数以在 GPU 上运。 + +示例: + +```shell +docker run --rm \ +--name mar \ +--cpus 8 \ +--gpus device=0 \ +-p8080:8080 -p8081:8081 -p8082:8082 \ +--mount type=bind,source=`realpath ./checkpoints`,target=/home/model-server/model-store \ +mmpretrain-serve:latest +``` + +```{note} +`realpath ./checkpoints` 是 "./checkpoints" 的绝对路径,你可以将其替换为你保存 TorchServe 模型的目录的绝对路径。 +``` + +参考 [该文档](https://github.com/pytorch/serve/blob/master/docs/rest_api.md) 了解关于推理 (8080),管理 (8081) 和指标 (8082) 等 API 的信息。 + +## 4. 测试部署 + +```shell +curl http://127.0.0.1:8080/predictions/${MODEL_NAME} -T demo/demo.JPEG +``` + +您应该获得类似于以下内容的响应: + +```json +{ + "pred_label": 58, + "pred_score": 0.38102269172668457, + "pred_class": "water snake" +} +``` + +另外,你也可以使用 `test_torchserver.py` 来比较 TorchServe 和 PyTorch 的结果,并进行可视化。 + +```shell +python tools/torchserve/test_torchserver.py ${IMAGE_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} ${MODEL_NAME} +[--inference-addr ${INFERENCE_ADDR}] [--device ${DEVICE}] +``` + +示例: + +```shell +python tools/torchserve/test_torchserver.py \ + demo/demo.JPEG \ + configs/resnet/resnet18_8xb32_in1k.py \ + checkpoints/resnet18_8xb32_in1k_20210831-fbbb1da6.pth \ + resnet18_in1k +``` diff --git a/docs/zh_CN/useful_tools/print_config.md b/docs/zh_CN/useful_tools/print_config.md new file mode 100644 index 0000000..3ec0d30 --- /dev/null +++ b/docs/zh_CN/useful_tools/print_config.md @@ -0,0 +1,28 @@ +# 打印完整配置文件 + +`print_config.py`脚本脚本会解析所有输入变量,并打印完整配置信息。 + +## 说明: + +`tools/misc/print_config.py` 脚本会逐字打印整个配置文件,并展示所有导入的文件。 + +```shell +python tools/misc/print_config.py ${CONFIG} [--cfg-options ${CFG_OPTIONS}] +``` + +所有参数的说明: + +- `config`:模型配置文件的路径。 +- `--cfg-options`:额外的配置选项,会被合入配置文件,参考[学习配置文件](../user_guides/config.md)。 + +## 示例: + +打印`configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py`文件的完整配置 + +```shell +# 打印完成的配置文件 +python tools/misc/print_config.py configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py + +# 将完整的配置文件保存为一个独立的配置文件 +python tools/misc/print_config.py configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py > final_config.py +``` diff --git a/docs/zh_CN/useful_tools/scheduler_visualization.md b/docs/zh_CN/useful_tools/scheduler_visualization.md new file mode 100644 index 0000000..8ac903a --- /dev/null +++ b/docs/zh_CN/useful_tools/scheduler_visualization.md @@ -0,0 +1,44 @@ +# 优化器参数策略可视化 + +该工具旨在帮助用户检查优化器的超参数调度器(无需训练),支持学习率(learning rate)和动量(momentum)。 + +## 工具简介 + +```bash +python tools/visualization/vis_scheduler.py \ + ${CONFIG_FILE} \ + [-p, --parameter ${PARAMETER_NAME}] \ + [-d, --dataset-size ${DATASET_SIZE}] \ + [-n, --ngpus ${NUM_GPUs}] \ + [-s, --save-path ${SAVE_PATH}] \ + [--title ${TITLE}] \ + [--style ${STYLE}] \ + [--window-size ${WINDOW_SIZE}] \ + [--cfg-options] +``` + +**所有参数的说明**: + +- `config` : 模型配置文件的路径。 +- **`-p, parameter`**: 可视化参数名,只能为 `["lr", "momentum"]` 之一, 默认为 `"lr"`. +- **`-d, --dataset-size`**: 数据集的大小。如果指定,`build_dataset` 将被跳过并使用这个大小作为数据集大小,默认使用 `build_dataset` 所得数据集的大小。 +- **`-n, --ngpus`**: 使用 GPU 的数量,默认为 1。 +- **`-s, --save-path`**: 保存的可视化图片的路径,默认不保存。 +- `--title`: 可视化图片的标题,默认为配置文件名。 +- `--style`: 可视化图片的风格,默认为 `whitegrid`。 +- `--window-size`: 可视化窗口大小,如果没有指定,默认为 `12*7`。如果需要指定,按照格式 `'W*H'`。 +- `--cfg-options`: 对配置文件的修改,参考[学习配置文件](../user_guides/config.md)。 + +```{note} +部分数据集在解析标注阶段比较耗时,可直接将 `-d, dataset-size` 指定数据集的大小,以节约时间。 +``` + +## 如何在开始训练前可视化学习率曲线 + +你可以使用如下命令来绘制配置文件 `configs/swin_transformer/swin-base_16xb64_in1k.py` 将会使用的变化率曲线: + +```bash +python tools/visualization/vis_scheduler.py configs/swin_transformer/swin-base_16xb64_in1k.py --dataset-size 1281167 --ngpus 16 +``` + +
diff --git a/docs/zh_CN/useful_tools/shape_bias.md b/docs/zh_CN/useful_tools/shape_bias.md new file mode 100644 index 0000000..f557197 --- /dev/null +++ b/docs/zh_CN/useful_tools/shape_bias.md @@ -0,0 +1,96 @@ +## 形状偏差(Shape Bias)工具用法 + +形状偏差(shape bias)衡量模型与纹理相比,如何依赖形状来感知图像中的语义。关于更多细节,我们向感兴趣的读者推荐这篇[论文](https://arxiv.org/abs/2106.07411) 。MMPretrain提供现成的工具箱来获得分类模型的形状偏差。您可以按照以下步骤操作: + +### 准备数据集 + +首先你应该下载[cue-conflict](https://github.com/bethgelab/model-vs-human/releases/download/v0.1/cue-conflict.tar.gz) 到`data`文件夹,然后解压缩这个数据集。之后,你的`data`文件夹应具有一下结构: + +```text +data +├──cue-conflict +| |──airplane +| |──bear +| ... +| |── truck +``` + +### 修改分类配置 + +我们在使用MAE预训练的ViT-base模型上运行形状偏移工具。它的配置文件为`configs/mae/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k.py`,它的检查点可从[此链接](https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-base-p16_8xb512-fp16-coslr-1600e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k_20220825-cf70aa21.pth) 下载。将原始配置中的test_pipeline, test_dataloader和test_evaluation替换为以下配置: + +```python +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=256, + edge='short', + backend='pillow'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs') +] +test_dataloader = dict( + pin_memory=True, + collate_fn=dict(type='default_collate'), + batch_size=32, + num_workers=4, + dataset=dict( + type='CustomDataset', + data_root='data/cue-conflict', + pipeline=test_pipeline, + _delete_=True), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, + drop_last=False) +test_evaluator = dict( + type='mmpretrain.ShapeBiasMetric', + _delete_=True, + csv_dir='work_dirs/shape_bias', + model_name='mae') +``` + +请注意,你应该对上面的`csv_dir`和`model_name`进行自定义修改。我把修改后的示例配置文件重命名为`configs/mae/benchmarks/`文件夹中的`vit-base-p16_8xb128-coslr-100e_in1k_shape-bias.py`文件。 + +### 用上面修改后的配置文件在你的模型上做推断 + +然后,你应该使用修改后的配置文件在`cue-conflict`数据集上推断你的模型。 + +```shell +# For PyTorch +bash tools/dist_test.sh $CONFIG $CHECKPOINT +``` + +**所有参数的说明**: + +- `$CONFIG`: 修改后的配置文件的路径。 +- `$CHECKPOINT`: 检查点文件的路径或链接。 + +```shell +# Example +bash tools/dist_test.sh configs/mae/benchmarks/vit-base-p16_8xb128-coslr-100e_in1k_shape-bias.py https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-base-p16_8xb512-fp16-coslr-1600e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k/vit-base-p16_ft-8xb128-coslr-100e_in1k_20220825-cf70aa21.pth 1 +``` + +之后,你应该在`csv_dir`文件夹中获得一个名为`cue-conflict_model-name_session-1.csv`的csv文件。除了这个文件以外,你还应该下载这些[csv文件](https://github.com/bethgelab/model-vs-human/tree/master/raw-data/cue-conflict) 到`csv_dir`。 + +### 绘制形状偏差图 + +然后我们可以开始绘制形状偏差图: + +```shell +python tools/analysis_tools/shape_bias.py --csv-dir $CSV_DIR --result-dir $RESULT_DIR --colors $RGB --markers o --plotting-names $YOUR_MODEL_NAME --model-names $YOUR_MODEL_NAME +``` + +**所有参数的说明**: + +- `--csv-dir $CSV_DIR`, 与保存这些csv文件的目录相同。 +- `--result-dir $RESULT_DIR`, 输出名为`cue-conflict_shape-bias_matrixplot.pdf`的结果的目录。 +- `--colors $RGB`, 应该是RGB值,格式为R G B,例如100 100 100,如果你想绘制几个模型的形状偏差,可以是多个RGB值。 +- `--plotting-names $YOUR_MODEL_NAME`, 形状偏移图中图例的名称,您可以将其设置为模型名称。如果要绘制多个模型,plotting_names可以是多个值。 +- `model-names $YOUR_MODEL_NAME`, 应与配置中指定的名称相同,如果要绘制多个模型的形状偏差,则可以是多个名称。 + +请注意,`--colors`的每三个值对应于`--model-names`的一个值。完成以上所有步骤后,你将获得下图。 + +
+ +
diff --git a/docs/zh_CN/useful_tools/t-sne_visualization.md b/docs/zh_CN/useful_tools/t-sne_visualization.md new file mode 100644 index 0000000..124e915 --- /dev/null +++ b/docs/zh_CN/useful_tools/t-sne_visualization.md @@ -0,0 +1,85 @@ +# t-分布随机邻域嵌入(t-SNE)可视化 + +## t-分布随机邻域嵌入可视化工具介绍 + +MMPretrain 提供 `tools/visualization/vis_tsne.py` 工具来用t-SNE可视化图像的特征嵌入。请使用 `pip install scikit-learn` 安装 `sklearn` 来计算t-SNE。 + +**命令**: + +```bash +python tools/visualization/vis_tsne.py \ + CONFIG \ + [--checkpoint CHECKPOINT] \ + [--work-dir WORK_DIR] \ + [--test-cfg TEST_CFG] \ + [--vis-stage {backbone,neck,pre_logits}] + [--class-idx ${CLASS_IDX} [CLASS_IDX ...]] + [--max-num-class MAX_NUM_CLASS] + [--max-num-samples MAX_NUM_SAMPLES] + [--cfg-options CFG_OPTIONS [CFG_OPTIONS ...]] + [--device DEVICE] + [--legend] + [--show] + [--n-components N_COMPONENTS] + [--perplexity PERPLEXITY] + [--early-exaggeration EARLY_EXAGGERATION] + [--learning-rate LEARNING_RATE] + [--n-iter N_ITER] + [--n-iter-without-progress N_ITER_WITHOUT_PROGRESS] + [--init INIT] +``` + +**所有参数的说明**: + +- `CONFIG`: t-SNE 配置文件的路径。 +- `--checkpoint CHECKPOINT`: 模型权重文件的路径。 +- `--work-dir WORK_DIR`: 保存日志和可视化图像的目录。 +- `--test-cfg TEST_CFG`: 用来加载 test_dataloader 配置的 t-SNE 配置文件的路径。 +- `--vis-stage {backbone,neck,pre_logits}`: 模型可视化的阶段。 +- `--class-idx CLASS_IDX [CLASS_IDX ...]`: 用来计算 t-SNE 的类别。 +- `--max-num-class MAX_NUM_CLASS`: 前 N 个被应用 t-SNE 算法的类别,默认为20。 +- `--max-num-samples MAX_NUM_SAMPLES`: 每个类别中最大的样本数,值越高需要的计算时间越长,默认为100。 +- `--cfg-options CFG_OPTIONS [CFG_OPTIONS ...]`: 覆盖被使用的配置中的一些设定,形如 xxx=yyy 格式的关键字-值对会被合并到配置文件中。如果被覆盖的值是一个列表,它应该形如 key="[a,b]" 或者 key=a,b 。它还允许嵌套的列表/元组值,例如 key="[(a,b),(c,d)]" 。注意引号是必需的,而且不允许有空格。 +- `--device DEVICE`: 用于推理的设备。 +- `--legend`: 显示所有类别的图例。 +- `--show`: 在图形窗口中显示结果。 +- `--n-components N_COMPONENTS`: 结果的维数。 +- `--perplexity PERPLEXITY`: 复杂度与其他流形学习算法中使用的最近邻的数量有关。 +- `--early-exaggeration EARLY_EXAGGERATION`: 控制原空间中的自然聚类在嵌入空间中的紧密程度以及它们之间的空间大小。 +- `--learning-rate LEARNING_RATE`: t-SNE 的学习率通常在[10.0, 1000.0]的范围内。如果学习率太高,数据可能看起来像一个球,其中任何一点与它最近的邻居近似等距。如果学习率太低,大多数点可能看起来被压缩在一个几乎没有异常值的密集点云中。 +- `--n-iter N_ITER`: 优化的最大迭代次数。应该至少为250。 +- `--n-iter-without-progress N_ITER_WITHOUT_PROGRESS`: 在我们中止优化之前,最大的没有进展的迭代次数。 +- `--init INIT`: 初始化方法。 + +## 如何可视化分类模型的t-SNE(如 ResNet) + +以下是在CIFAR-10数据集上训练的 ResNet-18 和 ResNet-50 模型上运行 t-SNE 可视化的两个样例: + +```shell +python tools/visualization/vis_tsne.py \ + configs/resnet/resnet18_8xb16_cifar10.py \ + --checkpoint https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8.pth + +python tools/visualization/vis_tsne.py \ + configs/resnet/resnet50_8xb16_cifar10.py \ + --checkpoint https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar10_20210528-f54bfad9.pth +``` + +| ResNet-18 | ResNet-50 | +| ---------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | +|
|
| + +## 如何可视化自监督模型的t-SNE(如 MAE) + +以下是在ImageNet数据集上训练的 MAE-ViT-base 模型上运行 t-SNE 可视化的一个样例。输入数据来自 ImageNet 验证集。MAE和一些自监督预训练算法配置中没有 test_dataloader 信息。在分析这些自监督算法时,你需要在配置中添加 test_dataloader 信息,或者使用 `--test-cfg` 字段来指定一个配置文件。 + +```shell +python tools/visualization/vis_tsne.py \ + configs/mae/mae_vit-base-p16_8xb512-amp-coslr-800e_in1k.py \ + --checkpoint https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-base-p16_8xb512-fp16-coslr-800e_in1k/mae_vit-base-p16_8xb512-coslr-800e-fp16_in1k_20220825-5d81fbc4.pth \ + --test-cfg configs/_base_/datasets/imagenet_bs32.py +``` + +| MAE-ViT-base | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------- | +|
| diff --git a/docs/zh_CN/useful_tools/verify_dataset.md b/docs/zh_CN/useful_tools/verify_dataset.md new file mode 100644 index 0000000..655ce97 --- /dev/null +++ b/docs/zh_CN/useful_tools/verify_dataset.md @@ -0,0 +1,28 @@ +# 数据集验证 + +在 MMPretrain 中,`tools/misc/verify_dataset.py` 脚本会检查数据集的所有图片,查看是否有**已经损坏**的图片。 + +## 工具介绍 + +```shell +python tools/print_config.py \ + ${CONFIG} \ + [--out-path ${OUT-PATH}] \ + [--phase ${PHASE}] \ + [--num-process ${NUM-PROCESS}] + [--cfg-options ${CFG_OPTIONS}] +``` + +**所有参数说明**: + +- `config` : 配置文件的路径。 +- `--out-path` : 输出结果路径,默认为 ‘brokenfiles.log’。 +- `--phase` : 检查哪个阶段的数据集,可用值为 “train” 、”test” 或者 “val”, 默认为 “train”。 +- `--num-process` : 指定的进程数,默认为 1。 +- `--cfg-options`: 额外的配置选项,会被合入配置文件,参考[教程 1:如何编写配置文件](https://mmpretrain.readthedocs.io/zh_CN/latest/tutorials/config.html)。 + +## 示例: + +```shell +python tools/misc/verify_dataset.py configs/t2t_vit/t2t-vit-t-14_8xb64_in1k.py --out-path broken_imgs.log --phase val --num-process 8 +``` diff --git a/docs/zh_CN/user_guides/config.md b/docs/zh_CN/user_guides/config.md new file mode 100644 index 0000000..c6013f2 --- /dev/null +++ b/docs/zh_CN/user_guides/config.md @@ -0,0 +1,412 @@ +# 学习配置文件 + +为了管理深度学习实验的各种设置,我们使用配置文件来记录所有这些配置。这种配置文件系统具有模块化和继承特性,更多细节可以在{external+mmengine:doc}`MMEngine 中的教程 `。 + +MMPretrain 主要使用 python 文件作为配置文件,所有配置文件都放置在 [`configs`](https://github.com/open-mmlab/mmpretrain/tree/main/configs) 文件夹下,目录结构如下所示: + +```text +MMPretrain/ + ├── configs/ + │ ├── _base_/ # primitive configuration folder + │ │ ├── datasets/ # primitive datasets + │ │ ├── models/ # primitive models + │ │ ├── schedules/ # primitive schedules + │ │ └── default_runtime.py # primitive runtime setting + │ ├── beit/ # BEiT Algorithms Folder + │ ├── mae/ # MAE Algorithms Folder + │ ├── mocov2/ # MoCoV2 Algorithms Folder + │ ├── resnet/ # ResNet Algorithms Folder + │ ├── swin_transformer/ # Swin Algorithms Folder + │ ├── vision_transformer/ # ViT Algorithms Folder + │ ├── ... + └── ... +``` + +可以使用 `python tools/misc/print_config.py /PATH/TO/CONFIG` 命令来查看完整的配置信息,从而方便检查所对应的配置文件。 + +本文主要讲解 MMPretrain 配置文件的命名和结构,以及如何基于已有的配置文件修改,并以 [ResNet50 配置文件](https://github.com/open-mmlab/mmpretrain/blob/main/configs/resnet/resnet50_8xb32_in1k.py) 逐行解释。 + +## 配置文件结构 + +在 `configs/_base_` 文件夹下有 4 个基本组件类型,分别是: + +- [模型(model)](https://github.com/open-mmlab/mmpretrain/tree/main/configs/_base_/models) +- [数据(data)](https://github.com/open-mmlab/mmpretrain/tree/main/configs/_base_/datasets) +- [训练策略(schedule)](https://github.com/open-mmlab/mmpretrain/tree/main/configs/_base_/schedules) +- [运行设置(runtime)](https://github.com/open-mmlab/mmpretrain/blob/main/configs/_base_/default_runtime.py) + +你可以通过继承一些基本配置文件轻松构建自己的训练配置文件。我们称这些被继承的配置文件为 _原始配置文件_,如 `_base_` 文件夹中的文件一般仅作为原始配置文件。 + +下面使用 [ResNet50 配置文件](https://github.com/open-mmlab/mmpretrain/blob/main/configs/resnet/resnet50_8xb32_in1k.py) 作为案例进行说明并注释每一行含义。 + +```python +_base_ = [ # 此配置文件将继承所有 `_base_` 中的配置 + '../_base_/models/resnet50.py', # 模型配置 + '../_base_/datasets/imagenet_bs32.py', # 数据配置 + '../_base_/schedules/imagenet_bs256.py', # 训练策略配置 + '../_base_/default_runtime.py' # 默认运行设置 +] +``` + +我们将在下面分别解释这四个原始配置文件。 + +### 模型配置 + +模型原始配置文件包含一个 `model` 字典数据结构,主要包括网络结构、损失函数等信息: + +- `type`:算法类型,我们支持了多种任务 + - 对于图像分类任务,通常为 `ImageClassifier`,更多细节请参考 [API 文档](mmpretrain.models.classifiers)。 + - 对于自监督任务,有多种类型的算法,例如 `MoCoV2`, `BEiT`, `MAE` 等。更多细节请参考 [API 文档](mmpretrain.models.selfsup)。 + - 对于图像检索任务,通常为 `ImageToImageRetriever`,更多细节请参考 [API 文档](mmpretrain.models.retrievers). + +通常,我们使用 **`type`字段** 来指定组件的类,并使用其他字段来传递类的初始化参数。{external+mmengine:doc}`注册器教程 ` 对其进行了详细描述。 + +这里我们以 [`ImageClassifier`](mmpretrain.models.classifiers.ImageClassifier) 的配置字段为例,对初始化参数进行说明: + +- `backbone`: 主干网络设置,主干网络为主要的特征提取网络,比如 `ResNet`, `Swin Transformer`, `Vision Transformer` 等等。更多可用选项请参考 [API 文档](mmpretrain.models.backbones)。 + - 对于自监督学习,有些主干网络需要重新实现,您可以在 [API 文档](mmpretrain.models.selfsup) 中获取更多细节。 +- `neck`: 颈网络设置,颈网络主要是连接主干网和头网络的中间部分,比如 `GlobalAveragePooling` 等,更多可用选项请参考 [API 文档](mmpretrain.models.necks)。 +- `head`: 头网络设置,头网络主要是与具体任务关联的部件,如图像分类、自监督训练等,更多可用选项请参考 [API 文档](mmpretrain.models.heads)。 + - `loss`: 损失函数设置, 支持 `CrossEntropyLoss`, `LabelSmoothLoss`, `PixelReconstructionLoss` 等,更多可用选项参考 [API 文档](mmpretrain.models.losses)。 +- `data_preprocessor`: 图像输入的预处理模块,输入在进入模型前的预处理操作,例如 `ClsDataPreprocessor`, 有关详细信息,请参阅 [API 文档](mmpretrain.models.utils.data_preprocessor)。 +- `train_cfg`: `ImageClassifier` 的额外训练配置。在 `ImageClassifier` 中,我们使用这一参数指定批数据增强设置,比如 `Mixup` 和 `CutMix`。详见[文档](mmpretrain.models.utils.batch_augments)。 + +以下是 ResNet50 的模型配置['configs/_base_/models/resnet50.py'](https://github.com/open-mmlab/mmpretrain/blob/main/configs/_base_/models/resnet50.py): + +```python +model = dict( + type='ImageClassifier', # 主模型类型(对于图像分类任务,使用 `ImageClassifier`) + backbone=dict( + type='ResNet', # 主干网络类型 + # 除了 `type` 之外的所有字段都来自 `ResNet` 类的 __init__ 方法 + # 可查阅 https://mmpretrain.readthedocs.io/zh_CN/latest/api/generated/mmpretrain.models.backbones.ResNet.html + depth=50, + num_stages=4, # 主干网络状态(stages)的数目,这些状态产生的特征图作为后续的 head 的输入。 + out_indices=(3, ), # 输出的特征图输出索引。 + frozen_stages=-1, # 冻结主干网的层数 + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), # 颈网络类型 + head=dict( + type='LinearClsHead', # 分类颈网络类型 + # 除了 `type` 之外的所有字段都来自 `LinearClsHead` 类的 __init__ 方法 + # 可查阅 https://mmpretrain.readthedocs.io/zh_CN/latest/api/generated/mmpretrain.models.heads.LinearClsHead.html + num_classes=1000, + in_channels=2048, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), # 损失函数配置信息 + topk=(1, 5), # 评估指标,Top-k 准确率, 这里为 top1 与 top5 准确率 + )) +``` + +### 数据 + +数据原始配置文件主要包括预处理设置、dataloader 以及 评估器等设置: + +- `data_preprocessor`: 模型输入预处理配置,与 `model.data_preprocessor` 相同,但优先级更低。 +- `train_evaluator | val_evaluator | test_evaluator`: 构建评估器,参考 [API 文档](mmpretrain.evaluation)。 +- `train_dataloader | val_dataloader | test_dataloader`: 构建 dataloader + - `batch_size`: 每个 GPU 的 batch size + - `num_workers`: 每个 GPU 的线程数 + - `sampler`: 采样器配置 + - `dataset`: 数据集配置 + - `type`: 数据集类型, MMPretrain 支持 `ImageNet`、 `Cifar` 等数据集 ,参考 [API 文档](mmpretrain.datasets) + - `pipeline`: 数据处理流水线,参考相关教程文档 [如何设计数据处理流水线](../advanced_guides/pipeline.md) + +以下是 ResNet50 的数据配置 ['configs/_base_/datasets/imagenet_bs32.py'](https://github.com/open-mmlab/mmpretrain/blob/main/configs/_base_/datasets/imagenet_bs32.py): + +```python +dataset_type = 'ImageNet' +# 预处理配置 +data_preprocessor = dict( + # 输入的图片数据通道以 'RGB' 顺序 + mean=[123.675, 116.28, 103.53], # 输入图像归一化的 RGB 通道均值 + std=[58.395, 57.12, 57.375], # 输入图像归一化的 RGB 通道标准差 + to_rgb=True, # 是否将通道翻转,从 BGR 转为 RGB 或者 RGB 转为 BGR +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), # 读取图像 + dict(type='RandomResizedCrop', scale=224), # 随机放缩裁剪 + dict(type='RandomFlip', prob=0.5, direction='horizontal'), # 随机水平翻转 + dict(type='PackInputs'), # 准备图像以及标签 +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), # 读取图像 + dict(type='ResizeEdge', scale=256, edge='short'), # 缩放短边尺寸至 256px + dict(type='CenterCrop', crop_size=224), # 中心裁剪 + dict(type='PackInputs'), # 准备图像以及标签 +] + +# 构造训练集 dataloader +train_dataloader = dict( + batch_size=32, # 每张 GPU 的 batchsize + num_workers=5, # 每个 GPU 的线程数 + dataset=dict( # 训练数据集 + type=dataset_type, + data_root='data/imagenet', + ann_file='meta/train.txt', + data_prefix='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), # 默认采样器 + persistent_workers=True, # 是否保持进程,可以缩短每个 epoch 的准备时间 +) + +# 构造验证集 dataloader +val_dataloader = dict( + batch_size=32, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + ann_file='meta/val.txt', + data_prefix='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), + persistent_workers=True, +) +# 验证集评估设置,使用准确率为指标, 这里使用 topk1 以及 top5 准确率 +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +test_dataloader = val_dataloader # test dataloader 配置,这里直接与 val_dataloader 相同 +test_evaluator = val_evaluator # 测试集的评估配置,这里直接与 val_evaluator 相同 +``` + +```{note} +预处理配置(`data_preprocessor`)既可以作为 `model` 的一个子字段,也可以定义在外部的 `data_preprocessor` 字段, +同时配置时,优先使用 `model.data_preprocessor` 的配置。 +``` + +### 训练策略 + +训练策略原始配置文件主要包括预优化器设置和训练、验证及测试的循环控制器(LOOP): + +- `optim_wrapper`: 优化器装饰器配置信息,我们使用优化器装饰配置优化进程。 + - `optimizer`: 支持 `pytorch` 所有的优化器,参考相关 {external+mmengine:doc}`MMEngine ` 文档。 + - `paramwise_cfg`: 根据参数的类型或名称设置不同的优化参数,参考相关 [学习策略文档](../advanced_guides/schedule.md) 文档。 + - `accumulative_counts`: 积累几个反向传播后再优化参数,你可以用它通过小批量来模拟大批量。 +- `param_scheduler` : 学习率策略,你可以指定训练期间的学习率和动量曲线。有关详细信息,请参阅 MMEngine 中的 {external+mmengine:doc}`文档 `。 +- `train_cfg | val_cfg | test_cfg`: 训练、验证以及测试的循环执行器配置,请参考相关的{external+mmengine:doc}`MMEngine 文档 `。 + +以下是 ResNet50 的训练策略配置['configs/_base_/schedules/imagenet_bs256.py'](https://github.com/open-mmlab/mmpretrain/blob/main/configs/_base_/schedules/imagenet_bs256.py): + +```python +optim_wrapper = dict( + # 使用 SGD 优化器来优化参数 + optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)) + +# 学习率参数的调整策略 +# 'MultiStepLR' 表示使用多步策略来调度学习率(LR)。 +param_scheduler = dict( + type='MultiStepLR', by_epoch=True, milestones=[30, 60, 90], gamma=0.1) + +# 训练的配置,迭代 100 个 epoch,每一个训练 epoch 后都做验证集评估 +# 'by_epoch=True' 默认使用 `EpochBaseLoop`, 'by_epoch=False' 默认使用 `IterBaseLoop` +train_cfg = dict(by_epoch=True, max_epochs=100, val_interval=1) +# 使用默认的验证循环控制器 +val_cfg = dict() +# 使用默认的测试循环控制器 +test_cfg = dict() + +# 通过默认策略自动缩放学习率,此策略适用于总批次大小 256 +# 如果你使用不同的总批量大小,比如 512 并启用自动学习率缩放 +# 我们将学习率扩大到 2 倍 +auto_scale_lr = dict(base_batch_size=256) +``` + +### 运行设置 + +本部分主要包括保存权重策略、日志配置、训练参数、断点权重路径和工作目录等等。 + +以下是几乎所有算法都使用的运行配置['configs/_base_/default_runtime.py'](https://github.com/open-mmlab/mmpretrain/blob/main//configs/_base_/default_runtime.py): + +```python +# 默认所有注册器使用的域 +default_scope = 'mmpretrain' + +# 配置默认的 hook +default_hooks = dict( + # 记录每次迭代的时间。 + timer=dict(type='IterTimerHook'), + + # 每 100 次迭代打印一次日志。 + logger=dict(type='LoggerHook', interval=100), + + # 启用默认参数调度 hook。 + param_scheduler=dict(type='ParamSchedulerHook'), + + # 每个 epoch 保存检查点。 + checkpoint=dict(type='CheckpointHook', interval=1), + + # 在分布式环境中设置采样器种子。 + sampler_seed=dict(type='DistSamplerSeedHook'), + + # 验证结果可视化,默认不启用,设置 True 时启用。 + visualization=dict(type='VisualizationHook', enable=False), +) + +# 配置环境 +env_cfg = dict( + # 是否开启 cudnn benchmark + cudnn_benchmark=False, + + # 设置多进程参数 + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + + # 设置分布式参数 + dist_cfg=dict(backend='nccl'), +) + +# 设置可视化工具 +vis_backends = [dict(type='LocalVisBackend')] # 使用磁盘(HDD)后端 +visualizer = dict( + type='UniversalVisualizer', vis_backends=vis_backends, name='visualizer') + +# 设置日志级别 +log_level = 'INFO' + +# 从哪个检查点加载 +load_from = None + +# 是否从加载的检查点恢复训练 +resume = False +``` + +## 继承并修改配置文件 + +为了精简代码、更快的修改配置文件以及便于理解,我们建议继承现有方法。 + +对于在同一算法文件夹下的所有配置文件,MMPretrain 推荐只存在 **一个** 对应的 _原始配置_ 文件。 +所有其他的配置文件都应该继承 _原始配置_ 文件,这样就能保证配置文件的最大继承深度为 3。 + +例如,如果在 ResNet 的基础上做了一些修改,用户首先可以通过指定 `_base_ = './resnet50_8xb32_in1k.py'`(相对于你的配置文件的路径),来继承基础的 ResNet 结构、数据集以及其他训练配置信息,然后修改配置文件中的必要参数以完成继承。如想在基础 resnet50 的基础上使用 `CutMix` 训练增强,将训练轮数由 100 改为 300 和修改学习率衰减轮数,同时修改数据集路径,可以建立新的配置文件 `configs/resnet/resnet50_8xb32-300e_in1k.py`, 文件中写入以下内容: + +```python +# 在 'configs/resnet/' 创建此文件 +_base_ = './resnet50_8xb32_in1k.py' + +# 模型在之前的基础上使用 CutMix 训练增强 +model = dict( + train_cfg=dict( + augments=dict(type='CutMix', alpha=1.0) + ) +) + +# 优化策略在之前基础上训练更多个 epoch +train_cfg = dict(max_epochs=300, val_interval=10) # 训练 300 个 epoch,每 10 个 epoch 评估一次 +param_scheduler = dict(step=[150, 200, 250]) # 学习率调整也有所变动 + +# 使用自己的数据集目录 +train_dataloader = dict( + dataset=dict(data_root='mydata/imagenet/train'), +) +val_dataloader = dict( + batch_size=64, # 验证时没有反向传播,可以使用更大的 batchsize + dataset=dict(data_root='mydata/imagenet/val'), +) +test_dataloader = dict( + batch_size=64, # 测试时没有反向传播,可以使用更大的 batchsize + dataset=dict(data_root='mydata/imagenet/val'), +) +``` + +### 使用配置文件里的中间变量 + +用一些中间变量,中间变量让配置文件更加清晰,也更容易修改。 + +例如数据集里的 `train_pipeline` / `test_pipeline` 是作为数据流水线的中间变量。我们首先要定义它们,然后将它们传递到 `train_dataloader` / `test_dataloader` 中。如果想修改训练或测试时输入图片的大小,就需要修改 `train_pipeline` / `test_pipeline` 这些中间变量。 + +```python +bgr_mean = [103.53, 116.28, 123.675] +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=224, backend='pillow', interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict( + type='RandAugment', + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=6, + magnitude_std=0.5, + hparams=dict(pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='ResizeEdge', scale=236, edge='short', backend='pillow', interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=val_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=val_pipeline)) +``` + +### 忽略基础配置文件里的部分内容 + +有时,您需要设置 `_delete_=True` 去忽略基础配置文件里的一些域内容。可以查看 {external+mmengine:doc}`MMEngine 文档 ` 进一步了解该设计。 + +以下是一个简单应用案例。 如果在上述 ResNet50 案例中 使用余弦调度 ,使用继承并直接修改会报 `get unexcepected keyword 'step'` 错,因为基础配置文件 `param_scheduler` 域信息的 `'step'` 字段被保留下来了,需要加入 `_delete_=True` 去忽略基础配置文件里的 `param_scheduler` 相关域内容: + +```python +_base_ = '../../configs/resnet/resnet50_8xb32_in1k.py' + +# 学习率调整策略 +param_scheduler = dict(type='CosineAnnealingLR', by_epoch=True, _delete_=True) +``` + +### 引用基础配置文件里的变量 + +有时,您可以引用 `_base_` 配置信息的一些域内容,这样可以避免重复定义。可以查看 {external+mmengine:doc}`MMEngine 文档 ` 进一步了解该设计。 + +以下是一个简单应用案例,在训练数据预处理流水线中使用 `auto augment` 数据增强,参考配置文件 [`configs/resnest/resnest50_32xb64_in1k.py`](https://github.com/open-mmlab/mmpretrain/blob/main/configs/resnest/resnest50_32xb64_in1k.py)。 在定义 `train_pipeline` 时,可以直接在 `_base_` 中加入定义 auto augment 数据增强的文件命名,再通过 `{{_base_.auto_increasing_policies}}` 引用变量: + +```python +_base_ = [ + '../_base_/models/resnest50.py', '../_base_/datasets/imagenet_bs64.py', + '../_base_/default_runtime.py', './_randaug_policies.py', +] + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandAugment', + policies={{_base_.policies}}, # 这里使用了 _base_ 里的 `policies` 参数。 + num_policies=2, + magnitude_level=12), + dict(type='EfficientNetRandomCrop', scale=224, backend='pillow'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='ColorJitter', brightness=0.4, contrast=0.4, saturation=0.4), + dict( + type='Lighting', + eigval=EIGVAL, + eigvec=EIGVEC, + alphastd=0.1, + to_rgb=False), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +``` + +## 通过命令行参数修改配置信息 + +当用户使用脚本 "tools/train.py" 或者 "tools/test.py" 提交任务,以及使用一些工具脚本时,可以通过指定 `--cfg-options` 参数来直接修改所使用的配置文件内容。 + +- 更新配置文件内的字典 + + 可以按照原始配置文件中字典的键的顺序指定配置选项。 + 例如,`--cfg-options model.backbone.norm_eval=False` 将主干网络中的所有 BN 模块更改为 `train` 模式。 + +- 更新配置文件内列表的键 + + 一些配置字典在配置文件中会形成一个列表。例如,训练流水线 `data.train.pipeline` 通常是一个列表。 + 例如,`[dict(type='LoadImageFromFile'), dict(type='TopDownRandomFlip', flip_prob=0.5), ...]` 。如果要将流水线中的 `'flip_prob=0.5'` 更改为 `'flip_prob=0.0'`,您可以这样指定 `--cfg-options data.train.pipeline.1.flip_prob=0.0` 。 + +- 更新列表/元组的值。 + + 当配置文件中需要更新的是一个列表或者元组,例如,配置文件通常会设置 `val_evaluator = dict(type='Accuracy', topk=(1, 5))`,用户如果想更改 `topk`, + 需要指定 `--cfg-options val_evaluator.topk="(1,3)"`。注意这里的引号 " 对于列表以及元组数据类型的修改是必要的, + 并且 **不允许** 引号内所指定的值的书写存在空格。 diff --git a/docs/zh_CN/user_guides/dataset_prepare.md b/docs/zh_CN/user_guides/dataset_prepare.md new file mode 100644 index 0000000..aa1e1fd --- /dev/null +++ b/docs/zh_CN/user_guides/dataset_prepare.md @@ -0,0 +1,351 @@ +# 准备数据集 + +## CustomDataset + +[`CustomDataset`](mmpretrain.datasets.CustomDataset) 是一个通用的数据集类,供您使用自己的数据集。目前 `CustomDataset` 支持以下两种方式组织你的数据集文件: + +### 子文件夹方式 + +在这种格式下,您只需要重新组织您的数据集文件夹并将所有样本放在一个文件夹中,而无需创建任何标注文件。 + +对于监督任务(使用 `with_label=true`),我们使用子文件夹的名称作为类别名称,如下例所示,`class_x` 和 `class_y` 将被识别为类别名称。 + +```text +data_prefix/ +├── class_x +│ ├── xxx.png +│ ├── xxy.png +│ └── ... +│ └── xxz.png +└── class_y + ├── 123.png + ├── nsdf3.png + ├── ... + └── asd932_.png +``` + +对于无监督任务(使用 `with_label=false`),我们直接加载指定文件夹下的所有样本文件: + +``` +data_prefix/ +├── folder_1 +│ ├── xxx.png +│ ├── xxy.png +│ └── ... +├── 123.png +├── nsdf3.png +└── ... +``` + +假如你希望将之用于训练,那么配置文件中需要添加以下配置: + +```python +train_dataloader = dict( + ... + # 训练数据集配置 + dataset=dict( + type='CustomDataset', + data_prefix='path/to/data_prefix', + with_label=True, # 对于无监督任务,使用 False + pipeline=... + ) +) +``` + +```{note} +如果要使用此格式,请不要指定 `ann_file`,或指定 `ann_file=''`。 + +请注意,子文件夹格式需要对文件夹进行扫描,这可能会导致初始化速度变慢,尤其是对于大型数据集或慢速文件 IO。 +``` + +### 标注文件方式 + +标注文件格式主要使用文本文件来保存类别信息,`data_prefix` 存放图片,`ann_file` 存放标注类别信息。 + +如下案例,dataset 目录如下: + +在这种格式中,我们使用文本标注文件来存储图像文件路径和对应的类别索引。 + +对于监督任务(`with_label=true`),注释文件应在一行中包含一个样本的文件路径和类别索引,并用空格分隔,如下所示: + +所有这些文件路径都可以是绝对路径,也可以是相对于 `data_prefix` 的相对路径。 + +```text +folder_1/xxx.png 0 +folder_1/xxy.png 1 +123.png 4 +nsdf3.png 3 +... +``` + +```{note} +类别的索引号从 0 开始。真实标签的值应在`[0, num_classes - 1]`范围内。 + +此外,请使用数据集设置中的 `classes` 字段来指定每个类别的名称 +``` + +对于无监督任务(`with_label=false`),标注文件只需要在一行中包含一个样本的文件路径,如下: + +```text +folder_1/xxx.png +folder_1/xxy.png +123.png +nsdf3.png +... +``` + +假设整个数据集文件夹如下: + +```text +data_root +├── meta +│   ├── test.txt # 测试数据集的标注文件 +│   ├── train.txt # 训练数据集的标注文件 +│   └── val.txt # 验证数据集的标注文件 + +├── train +│   ├── 123.png +│   ├── folder_1 +│   │   ├── xxx.png +│   │   └── xxy.png +│   └── nsdf3.png +├── test +└── val +``` + +这是配置文件中的数据集设置的示例: + +```python +# 训练数据设置 +train_dataloader = dict( + dataset=dict( + type='CustomDataset', + data_root='path/to/data_root', # `ann_flie` 和 `data_prefix` 共同的文件路径前缀 + ann_file='meta/train.txt', # 相对于 `data_root` 的标注文件路径 + data_prefix='train', # `ann_file` 中文件路径的前缀,相对于 `data_root` + classes=['A', 'B', 'C', 'D', ...], # 每个类别的名称 + pipeline=..., # 处理数据集样本的一系列变换操作 + ) + ... +) +``` + +```{note} +有关如何使用 `CustomDataset` 的完整示例,请参阅[如何使用自定义数据集进行预训练](../notes/pretrain_custom_dataset.md) +``` + +## ImageNet + +ImageNet 有多个版本,但最常用的一个是 [ILSVRC 2012](http://www.image-net.org/challenges/LSVRC/2012/)。 可以通过以下步骤使用它。 + +`````{tabs} + +````{group-tab} MIM 下载 + +MIM支持使用一条命令行从 [OpenXLab](https://openxlab.org.cn/datasets?lang=zh-CN) 下载并预处理 ImageNet 数据集。 + +_需要在 [OpenXLab 官网](https://openxlab.org.cn/datasets?lang=zh-CN) 注册账号并命令行登录_。 + +```Bash +# 安装 OpenXLab CLI 工具 +pip install -U openxlab +# 登录 OpenXLab +openxlab login +# 使用 MIM 下载数据集, 最好在 $MMPreTrain 目录执行 +mim download mmpretrain --dataset imagenet1k +``` + +```` + +````{group-tab} 从官网下载 + + +1. 注册一个帐户并登录到[下载页面](http://www.image-net.org/download-images)。 +2. 找到 ILSVRC2012 的下载链接,下载以下两个文件: + - ILSVRC2012_img_train.tar (~138GB) + - ILSVRC2012_img_val.tar (~6.3GB) +3. 解压已下载的图片。 + +```` +````` + +### ImageNet数据集目录结构 + +我们支持两种方式组织ImageNet数据集,子目录格式和文本注释文件格式。 + +#### 子文件夹格式 + +我们提供了一个样例,您可以从这个[链接](https://download.openmmlab.com/mmpretrain/datasets/imagenet_1k.zip)下载和解压。数据集的目录结构应如下所示: + +```text +data/imagenet/ +├── train/ +│ ├── n01440764 +│ │ ├── n01440764_10026.JPEG +│ │ ├── n01440764_10027.JPEG +│ │ ├── n01440764_10029.JPEG +│ │ ├── n01440764_10040.JPEG +│ │ ├── n01440764_10042.JPEG +│ │ ├── n01440764_10043.JPEG +│ │ └── n01440764_10048.JPEG +│ ├── ... +├── val/ +│ ├── n01440764 +│ │ ├── ILSVRC2012_val_00000293.JPEG +│ │ ├── ILSVRC2012_val_00002138.JPEG +│ │ ├── ILSVRC2012_val_00003014.JPEG +│ │ └── ... +│ ├── ... +``` + +#### 文本标注文件格式 + +您可以从[此链接](https://download.openmmlab.com/mmclassification/datasets/imagenet/meta/caffe_ilsvrc12.tar.gz)下载并解压元数据,然后组织文件夹如下: + +```text +data/imagenet/ +├── meta/ +│ ├── train.txt +│ ├── test.txt +│ └── val.txt +├── train/ +│ ├── n01440764 +│ │ ├── n01440764_10026.JPEG +│ │ ├── n01440764_10027.JPEG +│ │ ├── n01440764_10029.JPEG +│ │ ├── n01440764_10040.JPEG +│ │ ├── n01440764_10042.JPEG +│ │ ├── n01440764_10043.JPEG +│ │ └── n01440764_10048.JPEG +│ ├── ... +├── val/ +│ ├── ILSVRC2012_val_00000001.JPEG +│ ├── ILSVRC2012_val_00000002.JPEG +│ ├── ILSVRC2012_val_00000003.JPEG +│ ├── ILSVRC2012_val_00000004.JPEG +│ ├── ... +``` + +### 配置 + +当您的数据集以上述方式组织时,您可以使用具有以下配置的 [`ImageNet`](mmpretrain.datasets.ImageNet) 数据集: + +```python +train_dataloader = dict( + ... + # 训练数据集配置 + dataset=dict( + type='ImageNet', + data_root='data/imagenet/', + split='train', + pipeline=..., + ) +) + +val_dataloader = dict( + ... + # 验证数据集配置 + dataset=dict( + type='ImageNet', + data_root='data/imagenet/', + split='val', + pipeline=..., + ) +) + +test_dataloader = val_dataloader +``` + +## 支持的图像分类数据集 + +| 数据集 | split | 主页 | +| ----------------------------------------------------------------------------------- | ----------------------------------- | ---------------------------------------------------------------------------------- | +| [`Calthch101`](mmpretrain.datasets.Caltech101)(data_root[, split, pipeline, ...]) | ["train", "test"] | [Caltech 101](https://data.caltech.edu/records/mzrjq-6wc02) 数据集 | +| [`CIFAR10`](mmpretrain.datasets.CIFAR10)(data_root[, split, pipeline, ...]) | ["train", "test"] | [CIFAR10](https://www.cs.toronto.edu/~kriz/cifar.html) 数据集 | +| [`CIFAR100`](mmpretrain.datasets.CIFAR100)(data_root[, split, pipeline, ...]) | ["train", "test"] | [CIFAR100](https://www.cs.toronto.edu/~kriz/cifar.html) 数据集 | +| [`CUB`](mmpretrain.datasets.CUB)(data_root[, split, pipeline, ...]) | ["train", "test"] | [CUB-200-2011](http://www.vision.caltech.edu/datasets/cub_200_2011/) 数据集 | +| [`DTD`](mmpretrain.datasets.DTD)(data_root[, split, pipeline, ...]) | ["train", "val", "tranval", "test"] | [Describable Texture Dataset (DTD)](https://www.robots.ox.ac.uk/~vgg/data/dtd/) 数据集 | +| [`FashionMNIST`](mmpretrain.datasets.FashionMNIST) (data_root[, split, pipeline, ...]) | ["train", "test"] | [Fashion-MNIST](https://github.com/zalandoresearch/fashion-mnist) 数据集 | +| [`FGVCAircraft`](mmpretrain.datasets.FGVCAircraft)(data_root[, split, pipeline, ...]) | ["train", "val", "tranval", "test"] | [FGVC Aircraft](https://www.robots.ox.ac.uk/~vgg/data/fgvc-aircraft/) 数据集 | +| [`Flowers102`](mmpretrain.datasets.Flowers102)(data_root[, split, pipeline, ...]) | ["train", "val", "tranval", "test"] | [Oxford 102 Flower](https://www.robots.ox.ac.uk/~vgg/data/flowers/102/) 数据集 | +| [`Food101`](mmpretrain.datasets.Food101)(data_root[, split, pipeline, ...]) | ["train", "test"] | [Food101](https://data.vision.ee.ethz.ch/cvl/datasets_extra/food-101/) 数据集 | +| [`MNIST`](mmpretrain.datasets.MNIST) (data_root[, split, pipeline, ...]) | ["train", "test"] | [MNIST](http://yann.lecun.com/exdb/mnist/) 数据集 | +| [`OxfordIIITPet`](mmpretrain.datasets.OxfordIIITPet)(data_root[, split, pipeline, ...]) | ["tranval", test"] | [Oxford-IIIT Pets](https://www.robots.ox.ac.uk/~vgg/data/pets/) 数据集 | +| [`Places205`](mmpretrain.datasets.Places205)(data_root[, pipeline, ...]) | - | [Places205](http://places.csail.mit.edu/downloadData.html) 数据集 | +| [`StanfordCars`](mmpretrain.datasets.StanfordCars)(data_root[, split, pipeline, ...]) | ["train", "test"] | [StanfordCars](https://ai.stanford.edu/~jkrause/cars/car_dataset.html) 数据集 | +| [`SUN397`](mmpretrain.datasets.SUN397)(data_root[, split, pipeline, ...]) | ["train", "test"] | [SUN397](https://vision.princeton.edu/projects/2010/SUN/) 数据集 | +| [`VOC`](mmpretrain.datasets.VOC)(data_root[, image_set_path, pipeline, ...]) | ["train", "val", "tranval", "test"] | [Pascal VOC](http://host.robots.ox.ac.uk/pascal/VOC/) 数据集 | + +有些数据集主页链接可能已经失效,您可以通过[OpenXLab](https://openxlab.org.cn/datasets?lang=zh-CN)下载数据集,例如 [Stanford Cars](https://openxlab.org.cn/datasets/OpenDataLab/Stanford_Cars)数据集。 + +## OpenMMLab 2.0 标准数据集 + +为了统一不同任务的数据集接口,便于多任务的算法模型训练,OpenMMLab 制定了 **OpenMMLab 2.0 数据集格式规范**, 数据集标注文件需符合该规范,数据集基类基于该规范去读取与解析数据标注文件。如果用户提供的数据标注文件不符合规定格式,用户可以选择将其转化为规定格式,并使用 OpenMMLab 的算法库基于该数据标注文件进行算法训练和测试。 + +OpenMMLab 2.0 数据集格式规范规定,标注文件必须为 `json` 或 `yaml`,`yml` 或 `pickle`,`pkl` 格式;标注文件中存储的字典必须包含 `metainfo` 和 `data_list` 两个字段。其中 `metainfo` 是一个字典,里面包含数据集的元信息;`data_list` 是一个列表,列表中每个元素是一个字典,该字典定义了一个原始数据(raw data),每个原始数据包含一个或若干个训练/测试样本。 + +假设您要使用训练数据集,那么配置文件如下所示: + +``` + +{ + 'metainfo': + { + 'classes': ('cat', 'dog'), # 'cat' 的类别序号为 0,'dog' 为 1。 + ... + }, + 'data_list': + [ + { + 'img_path': "xxx/xxx_0.jpg", + 'gt_label': 0, + ... + }, + { + 'img_path': "xxx/xxx_1.jpg", + 'gt_label': 1, + ... + }, + ... + ] +} +``` + +同时假设数据集存放路径如下: + +```text +data +├── annotations +│ ├── train.json +│ └── ... +├── train +│ ├── xxx/xxx_0.jpg +│ ├── xxx/xxx_1.jpg +│ ├── ... +``` + +通过以下字典构建: + +```python +dataset_cfg=dict( + type='CustomDataset', + ann_file='path/to/ann_file_path', + data_prefix='path/to/images_folder', + pipeline=transfrom_list) +``` + +## 其他数据集 + +MMPretrain 还支持更多其他的数据集,可以通过查阅[数据集文档](mmpretrain.datasets)获取它们的配置信息。 + +如果需要使用一些特殊格式的数据集,您需要实现您自己的数据集类,请参阅[添加新数据集](../advanced_guides/datasets.md)。 + +## 数据集包装 + +MMEngine 中支持以下数据包装器,您可以参考 {external+mmengine:doc}`MMEngine 教程 ` 了解如何使用它。 + +- {external:py:class}`~mmengine.dataset.ConcatDataset` +- {external:py:class}`~mmengine.dataset.RepeatDataset` +- {external:py:class}`~mmengine.dataset.ClassBalancedDataset` + +除上述之外,MMPretrain 还支持了[KFoldDataset](mmpretrain.datasets.KFoldDataset),需用通过使用 `tools/kfold-cross-valid.py` 来使用它。 diff --git a/docs/zh_CN/user_guides/downstream.md b/docs/zh_CN/user_guides/downstream.md new file mode 100644 index 0000000..0744f1e --- /dev/null +++ b/docs/zh_CN/user_guides/downstream.md @@ -0,0 +1,125 @@ +# 下游任务 + +## 检测 + +我们使用 MMDetection 进行图像检测。首先确保您已经安装了 [MIM](https://github.com/open-mmlab/mim),这也是 OpenMMLab 的一个项目。 + +```shell +pip install openmim +mim install 'mmdet>=3.0.0rc0' +``` + +此外,请参考 MMDetection 的[安装](https://mmdetection.readthedocs.io/en/dev-3.x/get_started.html)和[数据准备](https://mmdetection.readthedocs.io/en/dev-3.x/user_guides/dataset_prepare.html) + +### 训练 + +安装完后,您可以使用如下的简单命令运行 MMDetection。 + +```shell +# distributed version +bash tools/benchmarks/mmdetection/mim_dist_train_c4.sh ${CONFIG} ${PRETRAIN} ${GPUS} +bash tools/benchmarks/mmdetection/mim_dist_train_fpn.sh ${CONFIG} ${PRETRAIN} ${GPUS} + +# slurm version +bash tools/benchmarks/mmdetection/mim_slurm_train_c4.sh ${PARTITION} ${CONFIG} ${PRETRAIN} +bash tools/benchmarks/mmdetection/mim_slurm_train_fpn.sh ${PARTITION} ${CONFIG} ${PRETRAIN} +``` + +- `${CONFIG}`:直接用 MMDetection 中的配置文件路径即可。对于一些算法,我们有一些修改过的配置文件, + 可以在相应算法文件夹下的 `benchmarks` 文件夹中找到。另外,您也可以从头开始编写配置文件。 +- `${PRETRAIN}`:预训练模型文件 +- `${GPUS}`:使用多少 GPU 进行训练,对于检测任务,我们默认使用 8 个 GPU。 + +例子: + +```shell +bash ./tools/benchmarks/mmdetection/mim_dist_train_c4.sh \ + configs/byol/benchmarks/mask-rcnn_r50-c4_ms-1x_coco.py \ + https://download.openmmlab.com/mmselfsup/1.x/byol/byol_resnet50_16xb256-coslr-200e_in1k/byol_resnet50_16xb256-coslr-200e_in1k_20220825-de817331.pth 8 +``` + +### 测试 + +在训练之后,您可以运行如下命令测试您的模型。 + +```shell +# distributed version +bash tools/benchmarks/mmdetection/mim_dist_test.sh ${CONFIG} ${CHECKPOINT} ${GPUS} + +# slurm version +bash tools/benchmarks/mmdetection/mim_slurm_test.sh ${PARTITION} ${CONFIG} ${CHECKPOINT} +``` + +备注: + +- `${CHECKPOINT}`:您想测试的训练好的检测模型。 + +例子: + +```shell +bash ./tools/benchmarks/mmdetection/mim_dist_test.sh \ +configs/benchmarks/mmdetection/coco/mask-rcnn_r50_fpn_ms-1x_coco.py \ +https://download.openmmlab.com/mmselfsup/1.x/byol/byol_resnet50_16xb256-coslr-200e_in1k/byol_resnet50_16xb256-coslr-200e_in1k_20220825-de817331.pth 8 +``` + +## 分割 + +我们使用 MMSegmentation 进行图像分割。首先确保您已经安装了 [MIM](https://github.com/open-mmlab/mim),这也是 OpenMMLab 的一个项目。 + +```shell +pip install openmim +mim install 'mmsegmentation>=1.0.0rc0' +``` + +此外,请参考 MMSegmentation 的[安装](https://mmsegmentation.readthedocs.io/en/dev-1.x/get_started.html)和[数据准备](https://mmsegmentation.readthedocs.io/en/dev-1.x/user_guides/2_dataset_prepare.html)。 + +### 训练 + +在安装完后,可以使用如下简单命令运行 MMSegmentation。 + +```shell +# distributed version +bash tools/benchmarks/mmsegmentation/mim_dist_train.sh ${CONFIG} ${PRETRAIN} ${GPUS} + +# slurm version +bash tools/benchmarks/mmsegmentation/mim_slurm_train.sh ${PARTITION} ${CONFIG} ${PRETRAIN} +``` + +备注: + +- `${CONFIG}`:直接用 MMSegmentation 中的配置文件路径即可。对于一些算法,我们有一些修改过的配置文件, + 可以在相应算法文件夹下的 `benchmarks` 文件夹中找到。另外,您也可以从头开始编写配置文件。 +- `${PRETRAIN}`:预训练模型文件 +- `${GPUS}`:使用多少 GPU 进行训练,对于检测任务,我们默认使用 8 个 GPU。 + +例子: + +```shell +bash ./tools/benchmarks/mmsegmentation/mim_dist_train.sh \ +configs/benchmarks/mmsegmentation/voc12aug/fcn_r50-d8_4xb4-20k_voc12aug-512x512.py \ +https://download.openmmlab.com/mmselfsup/1.x/byol/byol_resnet50_16xb256-coslr-200e_in1k/byol_resnet50_16xb256-coslr-200e_in1k_20220825-de817331.pth 4 +``` + +### 测试 + +在训练之后,您可以运行如下命令测试您的模型。 + +```shell +# distributed version +bash tools/benchmarks/mmsegmentation/mim_dist_test.sh ${CONFIG} ${CHECKPOINT} ${GPUS} + +# slurm version +bash tools/benchmarks/mmsegmentation/mim_slurm_test.sh ${PARTITION} ${CONFIG} ${CHECKPOINT} +``` + +备注: + +- `${CHECKPOINT}`:您想测试的训练好的分割模型。 + +例子: + +```shell +bash ./tools/benchmarks/mmsegmentation/mim_dist_test.sh \ +configs/benchmarks/mmsegmentation/voc12aug/fcn_r50-d8_4xb4-20k_voc12aug-512x512.py \ +https://download.openmmlab.com/mmselfsup/1.x/byol/byol_resnet50_16xb256-coslr-200e_in1k/byol_resnet50_16xb256-coslr-200e_in1k_20220825-de817331.pth 4 +``` diff --git a/docs/zh_CN/user_guides/inference.md b/docs/zh_CN/user_guides/inference.md new file mode 100644 index 0000000..068e42e --- /dev/null +++ b/docs/zh_CN/user_guides/inference.md @@ -0,0 +1,176 @@ +# 使用现有模型进行推理 + +本文将展示如何使用以下API: + +- [**`list_models`**](mmpretrain.apis.list_models): 列举 MMPretrain 中所有可用模型名称 +- [**`get_model`**](mmpretrain.apis.get_model): 通过模型名称或模型配置文件获取模型 +- [**`inference_model`**](mmpretrain.apis.inference_model): 使用与模型相对应任务的推理器进行推理。主要用作快速 + 展示。如需配置进阶用法,还需要直接使用下列推理器。 +- 推理器: + 1. [**`ImageClassificationInferencer`**](mmpretrain.apis.ImageClassificationInferencer): + 对给定图像执行图像分类。 + 2. [**`ImageRetrievalInferencer`**](mmpretrain.apis.ImageRetrievalInferencer): + 从给定的一系列图像中,检索与给定图像最相似的图像。 + 3. [**`ImageCaptionInferencer`**](mmpretrain.apis.ImageCaptionInferencer): + 生成给定图像的一段描述。 + 4. [**`VisualQuestionAnsweringInferencer`**](mmpretrain.apis.VisualQuestionAnsweringInferencer): + 根据给定的图像回答问题。 + 5. [**`VisualGroundingInferencer`**](mmpretrain.apis.VisualGroundingInferencer): + 根据一段描述,从给定图像中找到一个与描述对应的对象。 + 6. [**`TextToImageRetrievalInferencer`**](mmpretrain.apis.TextToImageRetrievalInferencer): + 从给定的一系列图像中,检索与给定文本最相似的图像。 + 7. [**`ImageToTextRetrievalInferencer`**](mmpretrain.apis.ImageToTextRetrievalInferencer): + 从给定的一系列文本中,检索与给定图像最相似的文本。 + 8. [**`NLVRInferencer`**](mmpretrain.apis.NLVRInferencer): + 对给定的一对图像和一段文本进行自然语言视觉推理(NLVR 任务)。 + 9. [**`FeatureExtractor`**](mmpretrain.apis.FeatureExtractor): + 通过视觉主干网络从图像文件提取特征。 + +## 列举可用模型 + +列出 MMPreTrain 中的所有已支持的模型。 + +```python +>>> from mmpretrain import list_models +>>> list_models() +['barlowtwins_resnet50_8xb256-coslr-300e_in1k', + 'beit-base-p16_beit-in21k-pre_3rdparty_in1k', + ...] +``` + +`list_models` 支持 Unix 文件名风格的模式匹配,你可以使用 \*\* * \*\* 匹配任意字符。 + +```python +>>> from mmpretrain import list_models +>>> list_models("*convnext-b*21k") +['convnext-base_3rdparty_in21k', + 'convnext-base_in21k-pre-3rdparty_in1k-384px', + 'convnext-base_in21k-pre_3rdparty_in1k'] +``` + +你还可以使用推理器的 `list_models` 方法获取对应任务可用的所有模型。 + +```python +>>> from mmpretrain import ImageCaptionInferencer +>>> ImageCaptionInferencer.list_models() +['blip-base_3rdparty_caption', + 'blip2-opt2.7b_3rdparty-zeroshot_caption', + 'flamingo_3rdparty-zeroshot_caption', + 'ofa-base_3rdparty-finetuned_caption'] +``` + +## 获取模型 + +选定需要的模型后,你可以使用 `get_model` 获取特定模型。 + +```python +>>> from mmpretrain import get_model + +# 不加载预训练权重的模型 +>>> model = get_model("convnext-base_in21k-pre_3rdparty_in1k") + +# 加载默认的权重文件 +>>> model = get_model("convnext-base_in21k-pre_3rdparty_in1k", pretrained=True) + +# 加载制定的权重文件 +>>> model = get_model("convnext-base_in21k-pre_3rdparty_in1k", pretrained="your_local_checkpoint_path") + +# 指定额外的模型初始化参数,例如修改 head 中的 num_classes。 +>>> model = get_model("convnext-base_in21k-pre_3rdparty_in1k", head=dict(num_classes=10)) + +# 另外一个例子:移除模型的 neck,head 模块,直接从 backbone 中的 stage 1, 2, 3 输出 +>>> model_headless = get_model("resnet18_8xb32_in1k", head=None, neck=None, backbone=dict(out_indices=(1, 2, 3))) +``` + +获得的模型是一个通常的 PyTorch Module + +```python +>>> import torch +>>> from mmpretrain import get_model +>>> model = get_model('convnext-base_in21k-pre_3rdparty_in1k', pretrained=True) +>>> x = torch.rand((1, 3, 224, 224)) +>>> y = model(x) +>>> print(type(y), y.shape) + torch.Size([1, 1000]) +``` + +## 在给定图像上进行推理 + +这里是一个例子,我们将使用 ResNet-50 预训练模型对给定的 [图像](https://github.com/open-mmlab/mmpretrain/raw/main/demo/demo.JPEG) 进行分类。 + +```python +>>> from mmpretrain import inference_model +>>> image = 'https://github.com/open-mmlab/mmpretrain/raw/main/demo/demo.JPEG' +>>> # 如果你没有图形界面,请设置 `show=False` +>>> result = inference_model('resnet50_8xb32_in1k', image, show=True) +>>> print(result['pred_class']) +sea snake +``` + +上述 `inference_model` 接口可以快速进行模型推理,但它每次调用都需要重新初始化模型,也无法进行多个样本的推理。 +因此我们需要使用推理器来进行多次调用。 + +```python +>>> from mmpretrain import ImageClassificationInferencer +>>> image = 'https://github.com/open-mmlab/mmpretrain/raw/main/demo/demo.JPEG' +>>> inferencer = ImageClassificationInferencer('resnet50_8xb32_in1k') +>>> # 注意推理器的输出始终为一个结果列表,即使输入只有一个样本 +>>> result = inferencer('https://github.com/open-mmlab/mmpretrain/raw/main/demo/demo.JPEG')[0] +>>> print(result['pred_class']) +sea snake +>>> +>>> # 你可以对多张图像进行批量推理 +>>> image_list = ['demo/demo.JPEG', 'demo/bird.JPEG'] * 16 +>>> results = inferencer(image_list, batch_size=8) +>>> print(len(results)) +32 +>>> print(results[1]['pred_class']) +house finch, linnet, Carpodacus mexicanus +``` + +通常,每个样本的结果都是一个字典。比如图像分类的结果是一个包含了 `pred_label`、`pred_score`、`pred_scores`、`pred_class` 等字段的字典: + +```python +{ + "pred_label": 65, + "pred_score": 0.6649366617202759, + "pred_class":"sea snake", + "pred_scores": array([..., 0.6649366617202759, ...], dtype=float32) +} +``` + +你可以为推理器配置额外的参数,比如使用你自己的配置文件和权重文件,在 CUDA 上进行推理: + +```python +>>> from mmpretrain import ImageClassificationInferencer +>>> image = 'https://github.com/open-mmlab/mmpretrain/raw/main/demo/demo.JPEG' +>>> config = 'configs/resnet/resnet50_8xb32_in1k.py' +>>> checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth' +>>> inferencer = ImageClassificationInferencer(model=config, pretrained=checkpoint, device='cuda') +>>> result = inferencer(image)[0] +>>> print(result['pred_class']) +sea snake +``` + +## 使用 Gradio 推理示例 + +我们还提供了一个基于 gradio 的推理示例,提供了 MMPretrain 所支持的所有任务的推理展示功能,你可以在 [projects/gradio_demo/launch.py](https://github.com/open-mmlab/mmpretrain/blob/main/projects/gradio_demo/launch.py) 找到这一例程。 + +请首先使用 `pip install -U gradio` 安装 `gradio` 库。 + +这里是界面效果预览: + + + +## 从图像中提取特征 + +与 `model.extract_feat` 相比,`FeatureExtractor` 用于直接从图像文件中提取特征,而不是从一批张量中提取特征。简单说,`model.extract_feat` 的输入是 `torch.Tensor`,`FeatureExtractor` 的输入是图像。 + +``` +>>> from mmpretrain import FeatureExtractor, get_model +>>> model = get_model('resnet50_8xb32_in1k', backbone=dict(out_indices=(0, 1, 2, 3))) +>>> extractor = FeatureExtractor(model) +>>> features = extractor('https://github.com/open-mmlab/mmpretrain/raw/main/demo/demo.JPEG')[0] +>>> features[0].shape, features[1].shape, features[2].shape, features[3].shape +(torch.Size([256]), torch.Size([512]), torch.Size([1024]), torch.Size([2048])) +``` diff --git a/docs/zh_CN/user_guides/test.md b/docs/zh_CN/user_guides/test.md new file mode 100644 index 0000000..054e1e4 --- /dev/null +++ b/docs/zh_CN/user_guides/test.md @@ -0,0 +1,117 @@ +# 测试 + +## 单机单卡测试 + +你可以使用 `tools/test.py` 在电脑上用 CPU 或是 GPU 进行模型的测试。 + +以下是测试脚本的完整用法: + +```shell +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [ARGS] +``` + +````{note} +默认情况下,MMPretrain 会自动调用你的 GPU 进行测试。如果你有 GPU 但仍想使用 CPU 进行测试,请设置环境变量 `CUDA_VISIBLE_DEVICES` 为空或者 -1 来对禁用 GPU。 + +```bash +CUDA_VISIBLE_DEVICES=-1 python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [ARGS] +``` +```` + +| 参数 | 描述 | +| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `CONFIG_FILE` | 配置文件的路径。 | +| `CHECKPOINT_FILE` | 权重文件路径(支持 http 链接,你可以在[这里](https://mmpretrain.readthedocs.io/en/latest/modelzoo_statistics.html)寻找需要的权重文件)。 | +| `--work-dir WORK_DIR` | 用来保存测试指标结果的文件夹。 | +| `--out OUT` | 用来保存测试输出的文件。 | +| `--out-item OUT_ITEM` | 指定测试输出文件的内容,可以为 "pred" 或 "metrics",其中 "pred" 表示保存所有模型输出,这些数据可以用于离线测评;"metrics" 表示输出测试指标。默认为 "pred"。 | +| `--cfg-options CFG_OPTIONS` | 重载配置文件中的一些设置。使用类似 `xxx=yyy` 的键值对形式指定,这些设置会被融合入从配置文件读取的配置。你可以使用 `key="[a,b]"` 或者 `key=a,b` 的格式来指定列表格式的值,且支持嵌套,例如 \`key="[(a,b),(c,d)]",这里的引号是不可省略的。另外每个重载项内部不可出现空格。 | +| `--show-dir SHOW_DIR` | 用于保存可视化预测结果图像的文件夹。 | +| `--show` | 在窗口中显示预测结果图像。 | +| `--interval INTERVAL` | 每隔多少样本进行一次预测结果可视化。 | +| `--wait-time WAIT_TIME` | 每个窗口的显示时间(单位为秒)。 | +| `--no-pin-memory` | 是否在 dataloaders 中关闭 `pin_memory` 选项 | +| `--tta` | 是否开启 Test-Time-Aug (TTA). 如果配置文件有 `tta_pipeline` 和 `tta_model`,将使用这些配置指定 TTA transforms,并且决定如何融合 TTA 的结果。 否则,通过平均分类分数使用 flip TTA。 | +| `--launcher {none,pytorch,slurm,mpi}` | 启动器,默认为 "none"。 | + +## 单机多卡测试 + +我们提供了一个 shell 脚本,可以使用 `torch.distributed.launch` 启动多 GPU 任务。 + +```shell +bash ./tools/dist_test.sh ${CONFIG_FILE} ${CHECKPOINT_FILE} ${GPU_NUM} [PY_ARGS] +``` + +| 参数 | 描述 | +| ----------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | +| `CONFIG_FILE` | 配置文件的路径。 | +| `CHECKPOINT_FILE` | 权重文件路径(支持 http 链接,你可以在[这里](https://mmpretrain.readthedocs.io/en/latest/modelzoo_statistics.html)寻找需要的权重文件)。 | +| `GPU_NUM` | 使用的 GPU 数量。 | +| `[PY_ARGS]` | `tools/test.py` 支持的其他可选参数,参见[上文](#单机单卡测试)。 | + +你还可以使用环境变量来指定启动器的额外参数,比如用如下命令将启动器的通讯端口变更为 29666: + +```shell +PORT=29666 bash ./tools/dist_test.sh ${CONFIG_FILE} ${CHECKPOINT_FILE} ${GPU_NUM} [PY_ARGS] +``` + +如果你希望使用不同的 GPU 进行多项测试任务,可以在启动时指定不同的通讯端口和不同的可用设备。 + +```shell +CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=29500 bash ./tools/dist_test.sh ${CONFIG_FILE1} ${CHECKPOINT_FILE} 4 [PY_ARGS] +CUDA_VISIBLE_DEVICES=4,5,6,7 PORT=29501 bash ./tools/dist_test.sh ${CONFIG_FILE2} ${CHECKPOINT_FILE} 4 [PY_ARGS] +``` + +## 多机测试 + +### 同一网络下的多机 + +如果你希望使用同一局域网下连接的多台电脑进行一个测试任务,可以使用如下命令: + +在第一台机器上: + +```shell +NNODES=2 NODE_RANK=0 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR bash tools/dist_test.sh $CONFIG $CHECKPOINT_FILE $GPUS +``` + +在第二台机器上: + +```shell +NNODES=2 NODE_RANK=1 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR bash tools/dist_test.sh $CONFIG $CHECKPOINT_FILE $GPUS +``` + +和单机多卡相比,你需要指定一些额外的环境变量: + +| 环境变量 | 描述 | +| ------------- | ---------------------------------------------- | +| `NNODES` | 机器总数。 | +| `NODE_RANK` | 本机的序号 | +| `PORT` | 通讯端口,它在所有机器上都应当是一致的。 | +| `MASTER_ADDR` | 主机的 IP 地址,它在所有机器上都应当是一致的。 | + +### Slurm 管理下的多机集群 + +如果你在 [slurm](https://slurm.schedmd.com/) 集群上,可以使用 `tools/slurm_test.sh` 脚本启动任务。 + +```shell +[ENV_VARS] ./tools/slurm_test.sh ${PARTITION} ${JOB_NAME} ${CONFIG_FILE} ${CHECKPOINT_FILE} [PY_ARGS] +``` + +这里是该脚本的一些参数: + +| 参数 | 描述 | +| ----------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | +| `PARTITION` | 使用的集群分区。 | +| `JOB_NAME` | 任务的名称,你可以随意起一个名字。 | +| `CONFIG_FILE` | 配置文件路径。 | +| `CHECKPOINT_FILE` | 权重文件路径(支持 http 链接,你可以在[这里](https://mmpretrain.readthedocs.io/en/latest/modelzoo_statistics.html)寻找需要的权重文件)。 | +| `[PY_ARGS]` | `tools/test.py` 支持的其他可选参数,参见[上文](#单机单卡测试)。 | + +这里是一些你可以用来配置 slurm 任务的环境变量: + +| 环境变量 | 描述 | +| --------------- | ------------------------------------------------------------------------------------------ | +| `GPUS` | 使用的 GPU 总数,默认为 8。 | +| `GPUS_PER_NODE` | 每个节点分配的 GPU 数,你可以根据节点情况指定。默认为 8。 | +| `CPUS_PER_TASK` | 每个任务分配的 CPU 数(通常一个 GPU 对应一个任务)。默认为 5。 | +| `SRUN_ARGS` | `srun` 命令支持的其他参数。可用的选项参见[官方文档](https://slurm.schedmd.com/srun.html)。 | diff --git a/docs/zh_CN/user_guides/train.md b/docs/zh_CN/user_guides/train.md new file mode 100644 index 0000000..841edab --- /dev/null +++ b/docs/zh_CN/user_guides/train.md @@ -0,0 +1,118 @@ +# 训练 + +在本教程中,我们将介绍如何使用 MMPretrain 中提供的脚本启动训练任务。 +如果你需要了解一些具体的训练例子,可以查阅 [如何在自定义数据集上进行模型预训练](../notes/pretrain_custom_dataset.md) 和 [如何在自定义数据集上微调模型](../notes/finetune_custom_dataset.md). + +## 单机单卡训练 + +你可以使用 `tools/train.py` 在电脑上用 CPU 或是 GPU 进行模型的训练。 + +以下是训练脚本的完整用法: + +```shell +python tools/train.py ${CONFIG_FILE} [ARGS] +``` + +````{note} +默认情况下,MMPretrain 会自动调用你的 GPU 进行训练。如果你有 GPU 但仍想使用 CPU 进行训练,请设置环境变量 `CUDA_VISIBLE_DEVICES` 为空或者 -1 来对禁用 GPU。 + +```bash +CUDA_VISIBLE_DEVICES=-1 python tools/train.py ${CONFIG_FILE} [ARGS] +``` +```` + +| 参数 | 描述 | +| ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `CONFIG_FILE` | 配置文件的路径。 | +| `--work-dir WORK_DIR` | 用来保存训练日志和权重文件的文件夹,默认是 `./work_dirs` 目录下,与配置文件同名的文件夹。 | +| `--resume [RESUME]` | 恢复训练。如果指定了权重文件路径,则从指定的权重文件恢复;如果没有指定,则尝试从最新的权重文件进行恢复。 | +| `--amp` | 启用混合精度训练。 | +| `--no-validate` | **不建议** 在训练过程中不进行验证集上的精度验证。 | +| `--auto-scale-lr` | 自动根据实际的批次大小(batch size)和预设的批次大小对学习率进行缩放。 | +| `--no-pin-memory` | 是否在 dataloaders 中关闭 `pin_memory` 选项 | +| `--no-persistent-workers` | 是否在 dataloaders 中关闭 `persistent_workers` 选项 | +| `--cfg-options CFG_OPTIONS` | 重载配置文件中的一些设置。使用类似 `xxx=yyy` 的键值对形式指定,这些设置会被融合入从配置文件读取的配置。你可以使用 `key="[a,b]"` 或者 `key=a,b` 的格式来指定列表格式的值,且支持嵌套,例如 \`key="[(a,b),(c,d)]",这里的引号是不可省略的。另外每个重载项内部不可出现空格。 | +| `--launcher {none,pytorch,slurm,mpi}` | 启动器,默认为 "none"。 | + +## 单机多卡训练 + +我们提供了一个 shell 脚本,可以使用 `torch.distributed.launch` 启动多 GPU 任务。 + +```shell +bash ./tools/dist_train.sh ${CONFIG_FILE} ${GPU_NUM} [PY_ARGS] +``` + +| 参数 | 描述 | +| ------------- | ---------------------------------------------------------------- | +| `CONFIG_FILE` | 配置文件的路径。 | +| `GPU_NUM` | 使用的 GPU 数量。 | +| `[PY_ARGS]` | `tools/train.py` 支持的其他可选参数,参见[上文](#单机单卡训练)。 | + +你还可以使用环境变量来指定启动器的额外参数,比如用如下命令将启动器的通讯端口变更为 29666: + +```shell +PORT=29666 bash ./tools/dist_train.sh ${CONFIG_FILE} ${GPU_NUM} [PY_ARGS] +``` + +如果你希望使用不同的 GPU 进行多项训练任务,可以在启动时指定不同的通讯端口和不同的可用设备。 + +```shell +CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=29500 bash ./tools/dist_train.sh ${CONFIG_FILE1} 4 [PY_ARGS] +CUDA_VISIBLE_DEVICES=4,5,6,7 PORT=29501 bash ./tools/dist_train.sh ${CONFIG_FILE2} 4 [PY_ARGS] +``` + +## 多机训练 + +### 同一网络下的多机 + +如果你希望使用同一局域网下连接的多台电脑进行一个训练任务,可以使用如下命令: + +在第一台机器上: + +```shell +NNODES=2 NODE_RANK=0 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR bash tools/dist_train.sh $CONFIG $GPUS +``` + +在第二台机器上: + +```shell +NNODES=2 NODE_RANK=1 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR bash tools/dist_train.sh $CONFIG $GPUS +``` + +和单机多卡相比,你需要指定一些额外的环境变量: + +| 环境变量 | 描述 | +| ------------- | ---------------------------------------------- | +| `NNODES` | 机器总数。 | +| `NODE_RANK` | 本机的序号 | +| `PORT` | 通讯端口,它在所有机器上都应当是一致的。 | +| `MASTER_ADDR` | 主机的 IP 地址,它在所有机器上都应当是一致的。 | + +通常来说,如果这几台机器之间不是高速网络连接,训练速度会非常慢。 + +### Slurm 管理下的多机集群 + +如果你在 [slurm](https://slurm.schedmd.com/) 集群上,可以使用 `tools/slurm_train.sh` 脚本启动任务。 + +```shell +[ENV_VARS] ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} ${CONFIG_FILE} ${WORK_DIR} [PY_ARGS] +``` + +这里是该脚本的一些参数: + +| 参数 | 描述 | +| ------------- | ---------------------------------------------------------------- | +| `PARTITION` | 使用的集群分区。 | +| `JOB_NAME` | 任务的名称,你可以随意起一个名字。 | +| `CONFIG_FILE` | 配置文件路径。 | +| `WORK_DIR` | 用以保存日志和权重文件的文件夹。 | +| `[PY_ARGS]` | `tools/train.py` 支持的其他可选参数,参见[上文](#单机单卡训练)。 | + +这里是一些你可以用来配置 slurm 任务的环境变量: + +| 环境变量 | 描述 | +| --------------- | ------------------------------------------------------------------------------------------ | +| `GPUS` | 使用的 GPU 总数,默认为 8。 | +| `GPUS_PER_NODE` | 每个节点分配的 GPU 数,你可以根据节点情况指定。默认为 8。 | +| `CPUS_PER_TASK` | 每个任务分配的 CPU 数(通常一个 GPU 对应一个任务)。默认为 5。 | +| `SRUN_ARGS` | `srun` 命令支持的其他参数。可用的选项参见[官方文档](https://slurm.schedmd.com/srun.html)。 | diff --git a/inception-v3_8xb32_in1k.py b/inception-v3_8xb32_in1k.py new file mode 100644 index 0000000..e8a7cff --- /dev/null +++ b/inception-v3_8xb32_in1k.py @@ -0,0 +1,46 @@ +_base_ = [ + 'configs/_base_/models/inception_v3.py', + 'configs/_base_/datasets/tiny_imagenet_bs32.py', + 'configs/_base_/schedules/imagenet_bs256_coslr.py', + 'configs/_base_/default_runtime.py', +] + +import os +import torch + +torch.backends.cuda.matmul.allow_tf32=True +torch.backends.cudnn.allow_tf32=True + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='RandomResizedCrop', scale=299), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='ResizeEdge', scale=342, edge='short'), + dict(type='CenterCrop', crop_size=299), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline)) + +# optimizer +optim_wrapper = dict( + #type='AmpOptimWrapper', + #dtype='bfloat16', + optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)) + +# 自定义hooks,添加ProfilerHook, 只在rank0启用 +custom_hooks = [ + dict(type='ProfilerHook', by_epoch=False, + profile_times=5, + on_trace_ready=dict(type="log_trace", sort_by="self_cuda_time_total"), + json_trace_path=f"trace_inceptionv3_tf32.json", + activity_with_cuda=True, + schedule=dict(wait=3, warmup=1, active=1, repeat=1)) # 这样的设置是10次 +] if os.environ['LOCAL_RANK'] == '0' else [] diff --git a/mmpretrain.egg-info/PKG-INFO b/mmpretrain.egg-info/PKG-INFO new file mode 100644 index 0000000..6849445 --- /dev/null +++ b/mmpretrain.egg-info/PKG-INFO @@ -0,0 +1,399 @@ +Metadata-Version: 2.1 +Name: mmpretrain +Version: 1.2.0 +Summary: OpenMMLab Model Pretraining Toolbox and Benchmark +Home-page: https://github.com/open-mmlab/mmpretrain +Author: MMPretrain Contributors +Author-email: openmmlab@gmail.com +License: Apache License 2.0 +Keywords: computer vision,image classification,unsupervised learning,self-supervised learning +Classifier: Development Status :: 4 - Beta +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Requires-Python: >=3.7 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: einops +Requires-Dist: importlib-metadata +Requires-Dist: mat4py +Requires-Dist: matplotlib +Requires-Dist: modelindex +Requires-Dist: numpy +Requires-Dist: rich +Provides-Extra: all +Requires-Dist: albumentations>=0.3.2; extra == "all" +Requires-Dist: grad-cam<1.5.0,>=1.3.7; extra == "all" +Requires-Dist: requests; extra == "all" +Requires-Dist: scikit-learn; extra == "all" +Requires-Dist: einops; extra == "all" +Requires-Dist: importlib-metadata; extra == "all" +Requires-Dist: mat4py; extra == "all" +Requires-Dist: matplotlib; extra == "all" +Requires-Dist: modelindex; extra == "all" +Requires-Dist: numpy; extra == "all" +Requires-Dist: rich; extra == "all" +Requires-Dist: coverage; extra == "all" +Requires-Dist: interrogate; extra == "all" +Requires-Dist: pytest; extra == "all" +Provides-Extra: tests +Requires-Dist: coverage; extra == "tests" +Requires-Dist: interrogate; extra == "tests" +Requires-Dist: pytest; extra == "tests" +Provides-Extra: optional +Requires-Dist: albumentations>=0.3.2; extra == "optional" +Requires-Dist: grad-cam<1.5.0,>=1.3.7; extra == "optional" +Requires-Dist: requests; extra == "optional" +Requires-Dist: scikit-learn; extra == "optional" +Provides-Extra: mim +Requires-Dist: mmcv<2.4.0,>=2.0.0; extra == "mim" +Requires-Dist: mmengine<1.0.0,>=0.8.3; extra == "mim" +Provides-Extra: multimodal +Requires-Dist: pycocotools; extra == "multimodal" +Requires-Dist: transformers>=4.28.0; extra == "multimodal" + +
+ + +
 
+
+ OpenMMLab website + + + HOT + + +      + OpenMMLab platform + + + TRY IT OUT + + +
+
 
+ +[![PyPI](https://img.shields.io/pypi/v/mmpretrain)](https://pypi.org/project/mmpretrain) +[![Docs](https://img.shields.io/badge/docs-latest-blue)](https://mmpretrain.readthedocs.io/en/latest/) +[![Build Status](https://github.com/open-mmlab/mmpretrain/workflows/build/badge.svg)](https://github.com/open-mmlab/mmpretrain/actions) +[![codecov](https://codecov.io/gh/open-mmlab/mmpretrain/branch/main/graph/badge.svg)](https://codecov.io/gh/open-mmlab/mmpretrain) +[![license](https://img.shields.io/github/license/open-mmlab/mmpretrain.svg)](https://github.com/open-mmlab/mmpretrain/blob/main/LICENSE) +[![open issues](https://isitmaintained.com/badge/open/open-mmlab/mmpretrain.svg)](https://github.com/open-mmlab/mmpretrain/issues) +[![issue resolution](https://isitmaintained.com/badge/resolution/open-mmlab/mmpretrain.svg)](https://github.com/open-mmlab/mmpretrain/issues) + +[📘 Documentation](https://mmpretrain.readthedocs.io/en/latest/) | +[🛠️ Installation](https://mmpretrain.readthedocs.io/en/latest/get_started.html#installation) | +[👀 Model Zoo](https://mmpretrain.readthedocs.io/en/latest/modelzoo_statistics.html) | +[🆕 Update News](https://mmpretrain.readthedocs.io/en/latest/notes/changelog.html) | +[🤔 Reporting Issues](https://github.com/open-mmlab/mmpretrain/issues/new/choose) + + + +English | [简体中文](/README_zh-CN.md) + +
+ + + +
+ + + + + + + + + + + + + + + + + +
+ +## Introduction + +MMPreTrain is an open source pre-training toolbox based on PyTorch. It is a part of the [OpenMMLab](https://openmmlab.com/) project. + +The `main` branch works with **PyTorch 1.8+**. + +### Major features + +- Various backbones and pretrained models +- Rich training strategies (supervised learning, self-supervised learning, multi-modality learning etc.) +- Bag of training tricks +- Large-scale training configs +- High efficiency and extensibility +- Powerful toolkits for model analysis and experiments +- Various out-of-box inference tasks. + - Image Classification + - Image Caption + - Visual Question Answering + - Visual Grounding + - Retrieval (Image-To-Image, Text-To-Image, Image-To-Text) + +https://github.com/open-mmlab/mmpretrain/assets/26739999/e4dcd3a2-f895-4d1b-a351-fbc74a04e904 + +## What's new + +🌟 v1.2.0 was released in 04/01/2023 + +- Support LLaVA 1.5. +- Implement of RAM with a gradio interface. + +🌟 v1.1.0 was released in 12/10/2023 + +- Support Mini-GPT4 training and provide a Chinese model (based on Baichuan-7B) +- Support zero-shot classification based on CLIP. + +🌟 v1.0.0 was released in 04/07/2023 + +- Support inference of more **multi-modal** algorithms, such as [**LLaVA**](./configs/llava/), [**MiniGPT-4**](./configs/minigpt4), [**Otter**](./configs/otter/), etc. +- Support around **10 multi-modal** datasets! +- Add [**iTPN**](./configs/itpn/), [**SparK**](./configs/spark/) self-supervised learning algorithms. +- Provide examples of [New Config](./mmpretrain/configs/) and [DeepSpeed/FSDP with FlexibleRunner](./configs/mae/benchmarks/). Here are the documentation links of [New Config](https://mmengine.readthedocs.io/en/latest/advanced_tutorials/config.html#a-pure-python-style-configuration-file-beta) and [DeepSpeed/FSDP with FlexibleRunner](https://mmengine.readthedocs.io/en/latest/api/generated/mmengine.runner.FlexibleRunner.html#mmengine.runner.FlexibleRunner). + +🌟 Upgrade from MMClassification to MMPreTrain + +- Integrated Self-supervised learning algorithms from **MMSelfSup**, such as **MAE**, **BEiT**, etc. +- Support **RIFormer**, a simple but effective vision backbone by removing token mixer. +- Refactor dataset pipeline visualization. +- Support **LeViT**, **XCiT**, **ViG**, **ConvNeXt-V2**, **EVA**, **RevViT**, **EfficientnetV2**, **CLIP**, **TinyViT** and **MixMIM** backbones. + +This release introduced a brand new and flexible training & test engine, but it's still in progress. Welcome +to try according to [the documentation](https://mmpretrain.readthedocs.io/en/latest/). + +And there are some BC-breaking changes. Please check [the migration tutorial](https://mmpretrain.readthedocs.io/en/latest/migration.html). + +Please refer to [changelog](https://mmpretrain.readthedocs.io/en/latest/notes/changelog.html) for more details and other release history. + +## Installation + +Below are quick steps for installation: + +```shell +conda create -n open-mmlab python=3.8 pytorch==1.10.1 torchvision==0.11.2 cudatoolkit=11.3 -c pytorch -y +conda activate open-mmlab +pip install openmim +git clone https://github.com/open-mmlab/mmpretrain.git +cd mmpretrain +mim install -e . +``` + +Please refer to [installation documentation](https://mmpretrain.readthedocs.io/en/latest/get_started.html) for more detailed installation and dataset preparation. + +For multi-modality models support, please install the extra dependencies by: + +```shell +mim install -e ".[multimodal]" +``` + +## User Guides + +We provided a series of tutorials about the basic usage of MMPreTrain for new users: + +- [Learn about Configs](https://mmpretrain.readthedocs.io/en/latest/user_guides/config.html) +- [Prepare Dataset](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html) +- [Inference with existing models](https://mmpretrain.readthedocs.io/en/latest/user_guides/inference.html) +- [Train](https://mmpretrain.readthedocs.io/en/latest/user_guides/train.html) +- [Test](https://mmpretrain.readthedocs.io/en/latest/user_guides/test.html) +- [Downstream tasks](https://mmpretrain.readthedocs.io/en/latest/user_guides/downstream.html) + +For more information, please refer to [our documentation](https://mmpretrain.readthedocs.io/en/latest/). + +## Model zoo + +Results and models are available in the [model zoo](https://mmpretrain.readthedocs.io/en/latest/modelzoo_statistics.html). + +
+ Overview +
+ + + + + + + + + + + + + + +
+ Supported Backbones + + Self-supervised Learning + + Multi-Modality Algorithms + + Others +
+ + + + + + + Image Retrieval Task: + + Training&Test Tips: + +
+ +## Contributing + +We appreciate all contributions to improve MMPreTrain. +Please refer to [CONTRUBUTING](https://mmpretrain.readthedocs.io/en/latest/notes/contribution_guide.html) for the contributing guideline. + +## Acknowledgement + +MMPreTrain is an open source project that is contributed by researchers and engineers from various colleges and companies. We appreciate all the contributors who implement their methods or add new features, as well as users who give valuable feedbacks. +We wish that the toolbox and benchmark could serve the growing research community by providing a flexible toolkit to reimplement existing methods and supporting their own academic research. + +## Citation + +If you find this project useful in your research, please consider cite: + +```BibTeX +@misc{2023mmpretrain, + title={OpenMMLab's Pre-training Toolbox and Benchmark}, + author={MMPreTrain Contributors}, + howpublished = {\url{https://github.com/open-mmlab/mmpretrain}}, + year={2023} +} +``` + +## License + +This project is released under the [Apache 2.0 license](LICENSE). + +## Projects in OpenMMLab + +- [MMEngine](https://github.com/open-mmlab/mmengine): OpenMMLab foundational library for training deep learning models. +- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab foundational library for computer vision. +- [MIM](https://github.com/open-mmlab/mim): MIM installs OpenMMLab packages. +- [MMEval](https://github.com/open-mmlab/mmeval): A unified evaluation library for multiple machine learning libraries. +- [MMPreTrain](https://github.com/open-mmlab/mmpretrain): OpenMMLab pre-training toolbox and benchmark. +- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab detection toolbox and benchmark. +- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab's next-generation platform for general 3D object detection. +- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab rotated object detection toolbox and benchmark. +- [MMYOLO](https://github.com/open-mmlab/mmyolo): OpenMMLab YOLO series toolbox and benchmark. +- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab semantic segmentation toolbox and benchmark. +- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab text detection, recognition, and understanding toolbox. +- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab pose estimation toolbox and benchmark. +- [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 3D human parametric model toolbox and benchmark. +- [MMSelfSup](https://github.com/open-mmlab/mmselfsup): OpenMMLab self-supervised learning toolbox and benchmark. +- [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab model compression toolbox and benchmark. +- [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab fewshot learning toolbox and benchmark. +- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab's next-generation action understanding toolbox and benchmark. +- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab video perception toolbox and benchmark. +- [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab optical flow toolbox and benchmark. +- [MMagic](https://github.com/open-mmlab/mmagic): Open**MM**Lab **A**dvanced, **G**enerative and **I**ntelligent **C**reation toolbox. +- [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab image and video generative models toolbox. +- [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab model deployment framework. +- [Playground](https://github.com/open-mmlab/playground): A central hub for gathering and showcasing amazing projects built upon OpenMMLab. diff --git a/mmpretrain.egg-info/SOURCES.txt b/mmpretrain.egg-info/SOURCES.txt new file mode 100644 index 0000000..debd679 --- /dev/null +++ b/mmpretrain.egg-info/SOURCES.txt @@ -0,0 +1,371 @@ +LICENSE +MANIFEST.in +README.md +setup.cfg +setup.py +mmpretrain/__init__.py +mmpretrain/registry.py +mmpretrain/version.py +mmpretrain.egg-info/PKG-INFO +mmpretrain.egg-info/SOURCES.txt +mmpretrain.egg-info/dependency_links.txt +mmpretrain.egg-info/not-zip-safe +mmpretrain.egg-info/requires.txt +mmpretrain.egg-info/top_level.txt +mmpretrain/apis/__init__.py +mmpretrain/apis/base.py +mmpretrain/apis/feature_extractor.py +mmpretrain/apis/image_caption.py +mmpretrain/apis/image_classification.py +mmpretrain/apis/image_retrieval.py +mmpretrain/apis/model.py +mmpretrain/apis/multimodal_retrieval.py +mmpretrain/apis/nlvr.py +mmpretrain/apis/utils.py +mmpretrain/apis/visual_grounding.py +mmpretrain/apis/visual_question_answering.py +mmpretrain/datasets/__init__.py +mmpretrain/datasets/base_dataset.py +mmpretrain/datasets/builder.py +mmpretrain/datasets/caltech101.py +mmpretrain/datasets/categories.py +mmpretrain/datasets/cifar.py +mmpretrain/datasets/coco_caption.py +mmpretrain/datasets/coco_retrieval.py +mmpretrain/datasets/coco_vqa.py +mmpretrain/datasets/cub.py +mmpretrain/datasets/custom.py +mmpretrain/datasets/dataset_wrappers.py +mmpretrain/datasets/dtd.py +mmpretrain/datasets/fgvcaircraft.py +mmpretrain/datasets/flamingo.py +mmpretrain/datasets/flickr30k_caption.py +mmpretrain/datasets/flickr30k_retrieval.py +mmpretrain/datasets/flowers102.py +mmpretrain/datasets/food101.py +mmpretrain/datasets/gqa_dataset.py +mmpretrain/datasets/iconqa.py +mmpretrain/datasets/imagenet.py +mmpretrain/datasets/infographic_vqa.py +mmpretrain/datasets/inshop.py +mmpretrain/datasets/minigpt4_dataset.py +mmpretrain/datasets/mnist.py +mmpretrain/datasets/multi_label.py +mmpretrain/datasets/multi_task.py +mmpretrain/datasets/nlvr2.py +mmpretrain/datasets/nocaps.py +mmpretrain/datasets/ocr_vqa.py +mmpretrain/datasets/oxfordiiitpet.py +mmpretrain/datasets/places205.py +mmpretrain/datasets/refcoco.py +mmpretrain/datasets/scienceqa.py +mmpretrain/datasets/stanfordcars.py +mmpretrain/datasets/sun397.py +mmpretrain/datasets/textvqa.py +mmpretrain/datasets/utils.py +mmpretrain/datasets/vg_vqa.py +mmpretrain/datasets/visual_genome.py +mmpretrain/datasets/vizwiz.py +mmpretrain/datasets/voc.py +mmpretrain/datasets/vsr.py +mmpretrain/datasets/samplers/__init__.py +mmpretrain/datasets/samplers/repeat_aug.py +mmpretrain/datasets/samplers/sequential.py +mmpretrain/datasets/transforms/__init__.py +mmpretrain/datasets/transforms/auto_augment.py +mmpretrain/datasets/transforms/formatting.py +mmpretrain/datasets/transforms/processing.py +mmpretrain/datasets/transforms/utils.py +mmpretrain/datasets/transforms/wrappers.py +mmpretrain/engine/__init__.py +mmpretrain/engine/hooks/__init__.py +mmpretrain/engine/hooks/class_num_check_hook.py +mmpretrain/engine/hooks/densecl_hook.py +mmpretrain/engine/hooks/ema_hook.py +mmpretrain/engine/hooks/margin_head_hooks.py +mmpretrain/engine/hooks/precise_bn_hook.py +mmpretrain/engine/hooks/retriever_hooks.py +mmpretrain/engine/hooks/simsiam_hook.py +mmpretrain/engine/hooks/swav_hook.py +mmpretrain/engine/hooks/switch_recipe_hook.py +mmpretrain/engine/hooks/visualization_hook.py +mmpretrain/engine/hooks/warmup_param_hook.py +mmpretrain/engine/optimizers/__init__.py +mmpretrain/engine/optimizers/adan_t.py +mmpretrain/engine/optimizers/lamb.py +mmpretrain/engine/optimizers/lars.py +mmpretrain/engine/optimizers/layer_decay_optim_wrapper_constructor.py +mmpretrain/engine/runners/__init__.py +mmpretrain/engine/runners/retrieval_loop.py +mmpretrain/engine/schedulers/__init__.py +mmpretrain/engine/schedulers/weight_decay_scheduler.py +mmpretrain/evaluation/__init__.py +mmpretrain/evaluation/functional/__init__.py +mmpretrain/evaluation/metrics/ANLS.py +mmpretrain/evaluation/metrics/__init__.py +mmpretrain/evaluation/metrics/caption.py +mmpretrain/evaluation/metrics/gqa.py +mmpretrain/evaluation/metrics/multi_label.py +mmpretrain/evaluation/metrics/multi_task.py +mmpretrain/evaluation/metrics/nocaps.py +mmpretrain/evaluation/metrics/retrieval.py +mmpretrain/evaluation/metrics/scienceqa.py +mmpretrain/evaluation/metrics/shape_bias_label.py +mmpretrain/evaluation/metrics/single_label.py +mmpretrain/evaluation/metrics/visual_grounding_eval.py +mmpretrain/evaluation/metrics/voc_multi_label.py +mmpretrain/evaluation/metrics/vqa.py +mmpretrain/models/__init__.py +mmpretrain/models/builder.py +mmpretrain/models/backbones/__init__.py +mmpretrain/models/backbones/alexnet.py +mmpretrain/models/backbones/base_backbone.py +mmpretrain/models/backbones/beit.py +mmpretrain/models/backbones/conformer.py +mmpretrain/models/backbones/convmixer.py +mmpretrain/models/backbones/convnext.py +mmpretrain/models/backbones/cspnet.py +mmpretrain/models/backbones/davit.py +mmpretrain/models/backbones/deit.py +mmpretrain/models/backbones/deit3.py +mmpretrain/models/backbones/densenet.py +mmpretrain/models/backbones/edgenext.py +mmpretrain/models/backbones/efficientformer.py +mmpretrain/models/backbones/efficientnet.py +mmpretrain/models/backbones/efficientnet_v2.py +mmpretrain/models/backbones/hivit.py +mmpretrain/models/backbones/hornet.py +mmpretrain/models/backbones/hrnet.py +mmpretrain/models/backbones/inception_v3.py +mmpretrain/models/backbones/lenet.py +mmpretrain/models/backbones/levit.py +mmpretrain/models/backbones/mixmim.py +mmpretrain/models/backbones/mlp_mixer.py +mmpretrain/models/backbones/mobilenet_v2.py +mmpretrain/models/backbones/mobilenet_v3.py +mmpretrain/models/backbones/mobileone.py +mmpretrain/models/backbones/mobilevit.py +mmpretrain/models/backbones/mvit.py +mmpretrain/models/backbones/poolformer.py +mmpretrain/models/backbones/regnet.py +mmpretrain/models/backbones/replknet.py +mmpretrain/models/backbones/repmlp.py +mmpretrain/models/backbones/repvgg.py +mmpretrain/models/backbones/res2net.py +mmpretrain/models/backbones/resnest.py +mmpretrain/models/backbones/resnet.py +mmpretrain/models/backbones/resnet_cifar.py +mmpretrain/models/backbones/resnext.py +mmpretrain/models/backbones/revvit.py +mmpretrain/models/backbones/riformer.py +mmpretrain/models/backbones/seresnet.py +mmpretrain/models/backbones/seresnext.py +mmpretrain/models/backbones/shufflenet_v1.py +mmpretrain/models/backbones/shufflenet_v2.py +mmpretrain/models/backbones/sparse_convnext.py +mmpretrain/models/backbones/sparse_resnet.py +mmpretrain/models/backbones/swin_transformer.py +mmpretrain/models/backbones/swin_transformer_v2.py +mmpretrain/models/backbones/t2t_vit.py +mmpretrain/models/backbones/timm_backbone.py +mmpretrain/models/backbones/tinyvit.py +mmpretrain/models/backbones/tnt.py +mmpretrain/models/backbones/twins.py +mmpretrain/models/backbones/van.py +mmpretrain/models/backbones/vgg.py +mmpretrain/models/backbones/vig.py +mmpretrain/models/backbones/vision_transformer.py +mmpretrain/models/backbones/vit_eva02.py +mmpretrain/models/backbones/vit_sam.py +mmpretrain/models/backbones/xcit.py +mmpretrain/models/classifiers/__init__.py +mmpretrain/models/classifiers/base.py +mmpretrain/models/classifiers/hugging_face.py +mmpretrain/models/classifiers/image.py +mmpretrain/models/classifiers/timm.py +mmpretrain/models/heads/__init__.py +mmpretrain/models/heads/beitv1_head.py +mmpretrain/models/heads/beitv2_head.py +mmpretrain/models/heads/cae_head.py +mmpretrain/models/heads/cls_head.py +mmpretrain/models/heads/conformer_head.py +mmpretrain/models/heads/contrastive_head.py +mmpretrain/models/heads/deit_head.py +mmpretrain/models/heads/efficientformer_head.py +mmpretrain/models/heads/grounding_head.py +mmpretrain/models/heads/itc_head.py +mmpretrain/models/heads/itm_head.py +mmpretrain/models/heads/itpn_clip_head.py +mmpretrain/models/heads/latent_heads.py +mmpretrain/models/heads/levit_head.py +mmpretrain/models/heads/linear_head.py +mmpretrain/models/heads/mae_head.py +mmpretrain/models/heads/margin_head.py +mmpretrain/models/heads/mim_head.py +mmpretrain/models/heads/mixmim_head.py +mmpretrain/models/heads/mocov3_head.py +mmpretrain/models/heads/multi_label_cls_head.py +mmpretrain/models/heads/multi_label_csra_head.py +mmpretrain/models/heads/multi_label_linear_head.py +mmpretrain/models/heads/multi_task_head.py +mmpretrain/models/heads/seq_gen_head.py +mmpretrain/models/heads/simmim_head.py +mmpretrain/models/heads/spark_head.py +mmpretrain/models/heads/stacked_head.py +mmpretrain/models/heads/swav_head.py +mmpretrain/models/heads/vig_head.py +mmpretrain/models/heads/vision_transformer_head.py +mmpretrain/models/heads/vqa_head.py +mmpretrain/models/losses/__init__.py +mmpretrain/models/losses/asymmetric_loss.py +mmpretrain/models/losses/cae_loss.py +mmpretrain/models/losses/cosine_similarity_loss.py +mmpretrain/models/losses/cross_correlation_loss.py +mmpretrain/models/losses/cross_entropy_loss.py +mmpretrain/models/losses/focal_loss.py +mmpretrain/models/losses/label_smooth_loss.py +mmpretrain/models/losses/reconstruction_loss.py +mmpretrain/models/losses/seesaw_loss.py +mmpretrain/models/losses/swav_loss.py +mmpretrain/models/losses/utils.py +mmpretrain/models/multimodal/__init__.py +mmpretrain/models/multimodal/blip/__init__.py +mmpretrain/models/multimodal/blip/blip_caption.py +mmpretrain/models/multimodal/blip/blip_grounding.py +mmpretrain/models/multimodal/blip/blip_nlvr.py +mmpretrain/models/multimodal/blip/blip_retrieval.py +mmpretrain/models/multimodal/blip/blip_vqa.py +mmpretrain/models/multimodal/blip/language_model.py +mmpretrain/models/multimodal/blip2/Qformer.py +mmpretrain/models/multimodal/blip2/__init__.py +mmpretrain/models/multimodal/blip2/blip2_caption.py +mmpretrain/models/multimodal/blip2/blip2_opt_vqa.py +mmpretrain/models/multimodal/blip2/blip2_retriever.py +mmpretrain/models/multimodal/blip2/modeling_opt.py +mmpretrain/models/multimodal/chinese_clip/__init__.py +mmpretrain/models/multimodal/chinese_clip/bert.py +mmpretrain/models/multimodal/chinese_clip/chinese_clip.py +mmpretrain/models/multimodal/chinese_clip/utils.py +mmpretrain/models/multimodal/clip/__init__.py +mmpretrain/models/multimodal/clip/clip.py +mmpretrain/models/multimodal/clip/clip_transformer.py +mmpretrain/models/multimodal/clip/utils.py +mmpretrain/models/multimodal/flamingo/__init__.py +mmpretrain/models/multimodal/flamingo/adapter.py +mmpretrain/models/multimodal/flamingo/flamingo.py +mmpretrain/models/multimodal/flamingo/modules.py +mmpretrain/models/multimodal/flamingo/utils.py +mmpretrain/models/multimodal/llava/__init__.py +mmpretrain/models/multimodal/llava/llava.py +mmpretrain/models/multimodal/llava/modules.py +mmpretrain/models/multimodal/minigpt4/__init__.py +mmpretrain/models/multimodal/minigpt4/minigpt4.py +mmpretrain/models/multimodal/ofa/__init__.py +mmpretrain/models/multimodal/ofa/ofa.py +mmpretrain/models/multimodal/ofa/ofa_modules.py +mmpretrain/models/multimodal/otter/__init__.py +mmpretrain/models/multimodal/otter/otter.py +mmpretrain/models/multimodal/ram/__init__.py +mmpretrain/models/multimodal/ram/bert.py +mmpretrain/models/multimodal/ram/gradio_demo.py +mmpretrain/models/multimodal/ram/openset_utils.py +mmpretrain/models/multimodal/ram/ram.py +mmpretrain/models/multimodal/ram/utils.py +mmpretrain/models/multimodal/ram/config/__init__.py +mmpretrain/models/multimodal/ram/config/ram_swin_large_14m.py +mmpretrain/models/multimodal/ram/run/__init__.py +mmpretrain/models/multimodal/ram/run/inference.py +mmpretrain/models/necks/__init__.py +mmpretrain/models/necks/beitv2_neck.py +mmpretrain/models/necks/cae_neck.py +mmpretrain/models/necks/densecl_neck.py +mmpretrain/models/necks/gap.py +mmpretrain/models/necks/gem.py +mmpretrain/models/necks/hr_fuse.py +mmpretrain/models/necks/itpn_neck.py +mmpretrain/models/necks/linear_neck.py +mmpretrain/models/necks/mae_neck.py +mmpretrain/models/necks/milan_neck.py +mmpretrain/models/necks/mixmim_neck.py +mmpretrain/models/necks/mocov2_neck.py +mmpretrain/models/necks/nonlinear_neck.py +mmpretrain/models/necks/simmim_neck.py +mmpretrain/models/necks/spark_neck.py +mmpretrain/models/necks/swav_neck.py +mmpretrain/models/peft/__init__.py +mmpretrain/models/peft/lora.py +mmpretrain/models/retrievers/__init__.py +mmpretrain/models/retrievers/base.py +mmpretrain/models/retrievers/image2image.py +mmpretrain/models/selfsup/__init__.py +mmpretrain/models/selfsup/barlowtwins.py +mmpretrain/models/selfsup/base.py +mmpretrain/models/selfsup/beit.py +mmpretrain/models/selfsup/byol.py +mmpretrain/models/selfsup/cae.py +mmpretrain/models/selfsup/densecl.py +mmpretrain/models/selfsup/eva.py +mmpretrain/models/selfsup/itpn.py +mmpretrain/models/selfsup/mae.py +mmpretrain/models/selfsup/maskfeat.py +mmpretrain/models/selfsup/mff.py +mmpretrain/models/selfsup/milan.py +mmpretrain/models/selfsup/mixmim.py +mmpretrain/models/selfsup/moco.py +mmpretrain/models/selfsup/mocov3.py +mmpretrain/models/selfsup/simclr.py +mmpretrain/models/selfsup/simmim.py +mmpretrain/models/selfsup/simsiam.py +mmpretrain/models/selfsup/spark.py +mmpretrain/models/selfsup/swav.py +mmpretrain/models/tta/__init__.py +mmpretrain/models/tta/score_tta.py +mmpretrain/models/utils/__init__.py +mmpretrain/models/utils/attention.py +mmpretrain/models/utils/batch_shuffle.py +mmpretrain/models/utils/box_utils.py +mmpretrain/models/utils/channel_shuffle.py +mmpretrain/models/utils/clip_generator_helper.py +mmpretrain/models/utils/data_preprocessor.py +mmpretrain/models/utils/ema.py +mmpretrain/models/utils/embed.py +mmpretrain/models/utils/helpers.py +mmpretrain/models/utils/huggingface.py +mmpretrain/models/utils/inverted_residual.py +mmpretrain/models/utils/layer_scale.py +mmpretrain/models/utils/make_divisible.py +mmpretrain/models/utils/norm.py +mmpretrain/models/utils/position_encoding.py +mmpretrain/models/utils/res_layer_extra_norm.py +mmpretrain/models/utils/se_layer.py +mmpretrain/models/utils/sparse_modules.py +mmpretrain/models/utils/swiglu_ffn.py +mmpretrain/models/utils/tokenizer.py +mmpretrain/models/utils/vector_quantizer.py +mmpretrain/models/utils/batch_augments/__init__.py +mmpretrain/models/utils/batch_augments/cutmix.py +mmpretrain/models/utils/batch_augments/mixup.py +mmpretrain/models/utils/batch_augments/resizemix.py +mmpretrain/models/utils/batch_augments/wrapper.py +mmpretrain/structures/__init__.py +mmpretrain/structures/data_sample.py +mmpretrain/structures/multi_task_data_sample.py +mmpretrain/structures/utils.py +mmpretrain/utils/__init__.py +mmpretrain/utils/analyze.py +mmpretrain/utils/collect_env.py +mmpretrain/utils/dependency.py +mmpretrain/utils/misc.py +mmpretrain/utils/progress.py +mmpretrain/utils/setup_env.py +mmpretrain/visualization/__init__.py +mmpretrain/visualization/utils.py +mmpretrain/visualization/visualizer.py +requirements/docs.txt +requirements/mminstall.txt +requirements/multimodal.txt +requirements/optional.txt +requirements/readthedocs.txt +requirements/runtime.txt +requirements/tests.txt +tests/test_tools.py \ No newline at end of file diff --git a/README.md b/mmpretrain.egg-info/dependency_links.txt similarity index 100% rename from README.md rename to mmpretrain.egg-info/dependency_links.txt diff --git a/mmpretrain.egg-info/not-zip-safe b/mmpretrain.egg-info/not-zip-safe new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/mmpretrain.egg-info/not-zip-safe @@ -0,0 +1 @@ + diff --git a/mmpretrain.egg-info/requires.txt b/mmpretrain.egg-info/requires.txt new file mode 100644 index 0000000..5571166 --- /dev/null +++ b/mmpretrain.egg-info/requires.txt @@ -0,0 +1,42 @@ +einops +importlib-metadata +mat4py +matplotlib +modelindex +numpy +rich + +[all] +albumentations>=0.3.2 +grad-cam<1.5.0,>=1.3.7 +requests +scikit-learn +einops +importlib-metadata +mat4py +matplotlib +modelindex +numpy +rich +coverage +interrogate +pytest + +[mim] +mmcv<2.4.0,>=2.0.0 +mmengine<1.0.0,>=0.8.3 + +[multimodal] +pycocotools +transformers>=4.28.0 + +[optional] +albumentations>=0.3.2 +grad-cam<1.5.0,>=1.3.7 +requests +scikit-learn + +[tests] +coverage +interrogate +pytest diff --git a/mmpretrain.egg-info/top_level.txt b/mmpretrain.egg-info/top_level.txt new file mode 100644 index 0000000..2a6a347 --- /dev/null +++ b/mmpretrain.egg-info/top_level.txt @@ -0,0 +1 @@ +mmpretrain diff --git a/mmpretrain/__init__.py b/mmpretrain/__init__.py new file mode 100644 index 0000000..66866a8 --- /dev/null +++ b/mmpretrain/__init__.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import mmengine +from mmengine.utils import digit_version + +from .apis import * # noqa: F401, F403 +from .version import __version__ + +mmcv_minimum_version = '2.0.0' +mmcv_maximum_version = '2.4.0' +mmcv_version = digit_version(mmcv.__version__) + +mmengine_minimum_version = '0.8.3' +mmengine_maximum_version = '1.0.0' +mmengine_version = digit_version(mmengine.__version__) + +assert (mmcv_version >= digit_version(mmcv_minimum_version) + and mmcv_version < digit_version(mmcv_maximum_version)), \ + f'MMCV=={mmcv.__version__} is used but incompatible. ' \ + f'Please install mmcv>={mmcv_minimum_version}, <{mmcv_maximum_version}.' + +assert (mmengine_version >= digit_version(mmengine_minimum_version) + and mmengine_version < digit_version(mmengine_maximum_version)), \ + f'MMEngine=={mmengine.__version__} is used but incompatible. ' \ + f'Please install mmengine>={mmengine_minimum_version}, ' \ + f'<{mmengine_maximum_version}.' + +__all__ = ['__version__'] diff --git a/mmpretrain/__pycache__/__init__.cpython-310.pyc b/mmpretrain/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b7367c3a40e1dd6f8bbc31c558c38965f250780 GIT binary patch literal 787 zcmb7B&2G~`5Zze5&sV zl1Ru1zl^{Eb0>3*%UvMSl{BL-yXa+}@wtzh&jT57rd&nd`sct0lJQUu_z)GaN~8wJ zgL8Q2l?RqHNLw@V@}Uz)0KNK zK58LZh4cAwzRoRvQz3BJT8L!%p=C#=kWHp$-T$i&5g0ItfH@IRheEi)$QdJdfUX_z Q*K{4xtK9K_({|9ehDYBwvpYLW%{TP&c_jv)Uq1YP_0N7R_6H-; zPXfqK@b8c1Sj>u{m?c@VBTZzqCNaDQIF1q=#{nl%isJ;}G|F(C1gxMe$0@)$l;=1N zxPaC<&H&y(MUEA~B~<1(3wRT4ahwCZjdnQB1Kve@92WrZqXUlD0Ux3xjyC|`KsPxq z0#?y2j!S@VqhpTCfbXEY9B%@?hfX-&0(>7;INk<)icY22C}!>aDxnAX)Y`>2t-XtUF;nao~6Hn&2|&8{5LZ`sA@n@W}Qld8fJ_^H5LqXVa-is571;VS9#X%m zYUXgvH>oy6)#2E7EY-VG=@_fT^XPSEII-2z+>!0#s-mjGs({_AyXu9evtX_FD z*HTTYj47S^-`8rB30|xx*t8&{;1Z+*IV7D*3-d}W)5BJGn~<0)SH8PztaPfxa@SEGjE~UxiGrD&Y7m`9lj^g z>~y*;lxk`HUgM0fYIV-Frtz{@?{*iNlib5%`<-sT(OSlgvrb#@_XcNh4&S*FWgdll z8fcMcqg(IQTZVpishtnPs>{7WyRG#;PL*1?c@DVMb7RnNG#9K_Z|l!Gy%uBH7Y+Ss z{p`8cK3~#7y}3NE{8DRNT=tDdyU}l4yVJd}Cn{TH10E(>XC}oAn_eWzOrDt>Gg)Q| z%&aj3V=YEX%&KDcOU?Y>TmERtpsMf@5%*<{`_^Crp6bupP2 zCzEN|xz$BiROBEzH3@cUka4`BNo~*d=cUDZbqIu#*%108h&!G)4dP?Zdp*xP-iWpS z5c@uaGTxNh6MK$HP!!O3OU&sM4m}bsHFQPu&dYGTIl&YY)gMB8f<#D=vP^3Fn9j?Q zibJ12&oQL>=48s&WEEov&g#r9}SRok^GbUrJ6o_}Rh z(4jdSfnsPyA_ws3<{7BtwK}7=gEG{6ja`1tBos{S;e0Dn9)4pwGyVXBOo-vxC~s_k z7M`#hS-o^GHe`VsugoFjx7w9jkTML=sbK{0iwXHIrVH;1v<+jRebH!Z#)>w1-=N}3 zkm;1f;V!5o{Gv;E8=`^;aQ*VOL_5N{s%753ku6-;y!+6G*b-`-_iQB7s@{29qxI`e zY#QiX1?X6KSCqaYUM`wQ_LY@7*A*(S=HYtf5<`0;^6{+TDn*AP{+YSK)f~BRI77G+ zBWGUE6RzWEM9da0QM4&ei_94=;nmP`=I}ufsqr)Ohio(LLo1n%V;E$IwLqrdfeFT* z0@(%rTZ}w_FK+%msj-(0yfBtCA_~Q;Vfrpe|FC$K{V2g+KM6K28tk|7Vo_V!kg&p%$p=UpW!Y!97aU6IT97F6G02EkfEm literal 0 HcmV?d00001 diff --git a/mmpretrain/__pycache__/version.cpython-310.pyc b/mmpretrain/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44233a3a554a7c14090ae93aa32f2b90fbb563a7 GIT binary patch literal 807 zcmZ8f&5qMB5cW7p8@k)oQp#a(Ru60=6>q!CX@&R^oDl*}MXM;P-E2gg1Up55QVFR% z1QJ4Ox$z=Ags+_X3P=dXpt>7`a zxIL#OUBZf}nJB1e(cIbaaML3cVYxOVXz2*=hkRlA?{`?DS#E`j zO4yYME2KN9Wh#t=x{W(dF!ZW}1g_cJxec?w1@<5Y-kN3}6{E{=lm153 zA@1<=)``lp;$%9_l`K@0q`TpU%ky`pRULfW9onJx&(Upk?zIE(;Sdg>0WD|){osRY zS^bXL3$J=Y)x=y?coMkk#uij*QxC&vG6};<){eW5G~?f$W@0vx_w21`ADv5m$8Yb` EKR)`=B>(^b literal 0 HcmV?d00001 diff --git a/mmpretrain/apis/__init__.py b/mmpretrain/apis/__init__.py new file mode 100644 index 0000000..6fbf443 --- /dev/null +++ b/mmpretrain/apis/__init__.py @@ -0,0 +1,22 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base import BaseInferencer +from .feature_extractor import FeatureExtractor +from .image_caption import ImageCaptionInferencer +from .image_classification import ImageClassificationInferencer +from .image_retrieval import ImageRetrievalInferencer +from .model import (ModelHub, get_model, inference_model, init_model, + list_models) +from .multimodal_retrieval import (ImageToTextRetrievalInferencer, + TextToImageRetrievalInferencer) +from .nlvr import NLVRInferencer +from .visual_grounding import VisualGroundingInferencer +from .visual_question_answering import VisualQuestionAnsweringInferencer + +__all__ = [ + 'init_model', 'inference_model', 'list_models', 'get_model', 'ModelHub', + 'ImageClassificationInferencer', 'ImageRetrievalInferencer', + 'FeatureExtractor', 'ImageCaptionInferencer', + 'TextToImageRetrievalInferencer', 'VisualGroundingInferencer', + 'VisualQuestionAnsweringInferencer', 'ImageToTextRetrievalInferencer', + 'BaseInferencer', 'NLVRInferencer' +] diff --git a/mmpretrain/apis/__pycache__/__init__.cpython-310.pyc b/mmpretrain/apis/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6be681423b84d70404e2654acaa3dd7a0df2c10 GIT binary patch literal 967 zcmZ`%%Wl&^6t$hWdD(GNLP!u)Vi~C-KTsha;!z1IQKByHW;Bf_(MU6P7~7%zN!RT7 z5kA6OR{R1I5_7L(kqEHmqjMiKKKGG^Vb`(G?XPdEv!>&GM`Qo-tnt)-*{y}xiCyJr z7hD&)r##(&2Jwb!Y9D;!O?9MO&?4@uw(dZO_>t=B9`tkofess-g zAlUVBOo$L63=jeY+#%Dqa51}IHm~i3$OXeyH5z8Cs%AqMK`3!NY+22R?uF6LcQ6@J z08te(^-M@o=~MeFp(L3qpjxmD80kXg(k5}m{v0K}SOYW@wKb#~23u+&Th~3L0%5vy zf82GC@w$Cfd)Z=)E5+DX$BeK82tQBfCQb9X)*JjOm&rcm8=1`+qXl7)H=oP)nH5+l V@dVd98(Rq6`hDOIyuc&N`wgzl5=8(2 literal 0 HcmV?d00001 diff --git a/mmpretrain/apis/__pycache__/base.cpython-310.pyc b/mmpretrain/apis/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51c19b9eacca872e73b323669f10fc425484f3f8 GIT binary patch literal 13841 zcmb_j-EZ7hb|*QUPe~*BEB;OxJ0BWH8Yk(tXtnls9Xm;lO_U&ZlFe*_8ESY(qBTQ~ zFFAH3jDT!yV7)+7pl=1*0&!6k$UjjOeJIeUz7y?J(TDBpJ~f-H`#b0Ib0nn+5E?^~ zm-n7~?mgeA?P4Gq^{fAg<{ch)rRzvv}@nRxjre!+j>=4g&S)JA$o*Xh|98lzID z#LuOnIVyL`qe`bTs&=ZQTBkOucj}`?r@`ON;aq1PW6a?~X90i9!{%tQv&i2n!==%Y z&JljD4v&tGb&m0KZFqciqH}_u>%*r;Cp#zkxiLI7I^8+V&vU~wqqCi}c%F9_hUZ4- zJLgAFcb?WY(GVxZGh%U2I<0jqygei4?>s9m@cYF#HK*w;KGdAW2S(>P$GoMrmVSd3 zYAro#+8aSA?S42C;g;{9#q0}r*s4Ue>-KPHZwy6LyBP|}Pv%>05Jt6k#-Zzb_An~n zo{Z6<{Jw|xbquat_q~C;iKj(J4D87;?Dl0G>quJ`O*@1Z5 zcf^p!41L?__P0cTd+fVjh^EF3JG5`vBTR$0GNHQt8=Y`BqB_AF(b#Bi2*lKL(5I#4 zFCzL+@eBR}1BKSn9j#-$5At?O!hE20%EA;C$GD>LTb18R@mr1G%=oR&Z{;gmy;85z zKWEjciH1`bb7EdB42;v7)4=nBXx>?Ls+$sm~&hlN6U$U>6~z$dT4f@axQZldZO*5a|*pqisl!m9&2dFSaF(W92m}N z=gdR%f!;Y&Z#pmI+X?5aI4jO!_H&qN4$tTBJdK_;=e+YYpgbci?wRy@Cg~AlWI4}1 zECb%N!g4N%3uwIv3@$p)@hs0_hUcB<8LJnZ7lG*u=r2iGLw?Drf(>7dnneGb-ats<^}%K4yrxl;<)&@cz?arNTe|)vutGc74%&@| zh3oc~u!_U2vGnh`jzB+r3qsp-Z0T6{eYrgt`u7=93fQs(Gz`%mtSNx6w5@lg0F1r} z6bRQFPeRK94_X^L)*1=PT5p{8>9rY=SKHPbzPwLU(Y)L}^!b_DGdC7%p?9!8N`GXn^@r%P)*Fq4x9NJK&7Fca!F!v;p=V*?3AFp} zaA@@*%T|y3wL<|>2|_XMVUv?JC3q34vE-_?{XyIMzzrt$(A^chA}Iohb&y-rz2tkm z9Y4Z0`aziNBXF^#2q)4zWc)bm3-&?TLwg2FFlfb7nA>8E83Q{ErMm(6#B$Q=PlC`Nxw|&$(U|#G!4Vl}b+^GLn?bL) z#%#VVb^;4?(^jZ2c(g++!F~*e0Pdk5Q~fYbrAcB^|1dKoe8ck+^pqKX82BkTB^|s; z8N2*2bL9C?u^$mVM>5D65ege@Z-BL|V2f!EuCcM=K!m*>5r>Ji408(F2dx!Fcrd@P z`&;xe8Alw7u?}pf+lOg%+Savx2oW3ZtR(W7GAxsrK{;+PwnKnIM`i>t2=s&pS&WrU z-nEI^z{p{_R?v4OqluFI7C1Q_9q&?#RiXv2K?AJuK9adp zreHbM^41gTx-^~xQ}O#upJWI35MCqxPx4?6dZvhXKY3FT*LOQu}O+eh}zCJ zs~u9#}WTPLz0lX$= zaD`Z`1VX3DS_dHb}MWstR)bd zKq6y3x#@!h@S=PXcrS*QC)iRE7fH67WViN%8QT(UjL75i#$-S%CH4ck;U;1$H{E;i z5aM~5F|ot07r^;ju>{Qj&LKEKPsdz92n0}6iF^bkHOW?e6f^835CuRz)q!^E>IHie z`WJdMo}NbFR`N1-tWDv;l?|0U5K|5VG71w>4yV6AfU}A~2#y5&kc1$_BOp9@nRqdW zN6+f}UI>u@i+MZnl!ucUoG-w1Y+r!drI9gIMLf7z>_Ws-#w-s9n=qv7(k10?tXc11 zup~1E*9nr6w>_lY-ONK&>{yV76du83{12B5?i(G+lz42WqGr)^iI!4@ne;fCQ_6AM z?e(b42t|!dbwm|Km8hZ=bia~|7i)N?_n%S7`1iPl+Efqq2Zmh5lMxztIue$q`qXfA z$M_sMfvM?e6J1_(N{IQ98`!I&t%@GzwCt2WFYW2x$!UrEtW5PUD$~+qUDv#Ykop({ zV_M>#mA@@bEB~PXrM{=5SM9L|DEgiHRO>d-PwUQ2wJ+wU+MUK@dP8Ob_okyEkgBdn zrT%z=5P2Sq)*brrE8wxnBS0UG&BS+5Au?j;M#jfcv&e9*l01uH(xTh51Yj|0xB>VJ z@f2dT1vEt^=w8{Rr}89phk<;Co((9gqx4`3uwhh1@J}`;s;Iy6G<|CpO?gyH{DP>F z5>&Qokr{~LfQQI47&n7dR8fq{6Mw16HoAPZd|CQ_czHBRNS%MQ$8KLpMG%3~u?oQKG-*?~F?2t6HsQRM5Yn@1871 zAngS58@&6`QN?0iiVC4(;2i}~V}l&IZh)AoPf4fbq7AL$m$Dv+jr5TV1|zok*xW1c zRracTH828LjT}-u z(%hSyHo%73o$7RMpaVX$9WmwIm(W_9)~Ahuf%YoHBc?;kbRNuFLTim%7p4nfy^8%W zhNe-nhy^-0_@=S&X{!@zi@;^IwB8>#w6GN4Yigxe18b- zh`5o}6jZY6m3m@!P|!gHD|2}$$@IE%TM?P(HS-(rSaPPBhGn#~mMKZKhKP{da-wE& z@GF@frGc3){TYG*Qlrq=sUA&3xYP~K(hMq=AQ;INyNrob`aL}AB2EPF^&~z)=o<%@ zS%iZ&qD3*ZK*)oDXWxb%7{IO^5OPvmD(h(!vJW14j(~YUycY*3g#CLQ2;}@^Y9#Lk zwlc{fu@9O|$eJ#SQ^jcBQxs!_E~(IS)x@ozWuyX5w= zjfwLfatZKc3Ad;;6kb#jpN7bJM%9>RQ9VIOM02?{wwiK<22~Vj`3iv{+~g|VPSEWs zx}BukDcm{@U%D|SQ5mYw2_lo!O|+0q!E27Haah6&Qn$+(Ps&iGN2YiSUdF9?&iGxT z1{)=l)ZP=k<5ziNLLDB}8&A#{$DsgK%K=JIL6lbjtq%o}UbG{m^XPBj;1+6+lzDwF zG#=>>^j~XX=@GKCFtg7+(!MYsLC?|Co$@2-GiWel&-j)0D;;@H(}kV4#EcBYl(FDg=?W)cZi<-BhGvT}8xW|IH%u<+$p)9l$YUC6s0W$M*HQ+BqDriMAf)(!cvtJ;Kmi2xbKg~ zKlx{xgl4N~^AOhES7*Hw#gU1|YF{B=aTxpr4+)4?OKPh#3bLV z@}y~2^qR5z^tWT)Ck2vRWjxSt8^7QZZokA9f1!o&li?mgl}wGt#-17KaEiht&b1R!%cPeIwG#@#pn8nJXyEA@j*6q(rhzGz0-5 zs!GZg>I}-(ro0*ZEl8F?4!G9+O*^kUmlFo9t z#=FT_7Tsl)3IVTSyWw=xe95)nRppk0UDb5tN3Iw;GY-`?Z)eRSHyLZ;@)$^uDM`7* zLrW67D%saVa-vZC?d9@!>n#&*6IE_`+n5|t2|=qQv6XYUwaS?W$vmIVe14y2Tp0OB z824}Z1yt-cJ~L0v!(%ya?4F)cA{lS*b4o~`aNtEM4|*dGv7jOJ-Scm7(kk%>(u_On zhIY$Pk!?LKt>u-V^4lmr%`~?B6TJR6egPSW`O@xFGkC^KBCp~zBH=Cnej!jKT}Mt7 z<9iMd(~_JIq0fM~e#iJ5oLBg}(gXd^5%2+j(_X|hA0%ZuW=A;4NDw5YRAQJJJyviX ziPndyvZP~{bc&;gSZgHT3x4-XtjSV`!yC7$SWBjyGS^;~zf&HEN=N7MJzPr28fBh9 zMsmfoSxqAEy60GwHbY75GT0t~-i1pY?-%J{VtPgRRAguf5$T{nl!AsKmPNzdk-bTQ ztpz#q$CEMCtLOOli&9I#4v0b1$qw{h-ol!Hk!|89S!JWJ@7YYdXKs`5Ce#tV*f!A$ zcaKxTn(9<(IR{m$^;Ri5w(f^FlM;_`UYF9BQ5_1NO48jfg%XsF?RE=7Ok6DAqZ`LC zGqQlAINJZ<$Fgf4HxN~rPs}H~r)Fd#MOFSOdhe45a=AzzeoKN8TlS|QD0$0HWKAeZ z(PaX%utzH6pB759!aRqOxyY6DT1u7MnR!nV-<;v5dR*XAfr^X=FA0$fGDQaD{x$3U=J(nOn zNQ=b8NeYW_^~m#zIV2-j0{46C4`QY7lrd4ePvvY*_ax$!lM=!~rEUs4gC6PjQHCj_g~;fjaxg=v#uu{Vn+~ zrnY-#Mpx&=AC-y0X2IK{F{G`p<0n7B4KdgSG~O)Mk;t2}65leXX%u&m361?u%HV8) zr{uQOpMA%BVGZ~eK6mzYi0)(Fv z|I9cOEwo(%bIA|!uzS8pw)RZvsi9&JuEUhjKe?y|LuJ4|e!<7M{gR7-5A?lKUE4j0 z>IzZ`Q{#b-TFTsBIYa>u-zrn{3l#9Uin1^@KLC$^sCkQ~7MeRNI^L*IC|l~wM{v<^ zY4^3el~!r@-`~WpsL+h|NI4+!Zsj;KCuy zow%si#kKHUh?pttgJME*v)6KHX9T~!-YQ2XovL?d@sGG?IF_?nyB8EA0sJ^ z_Y!Km#&$_QM}JQd<5Uo(nOTJ9YI`W~?N#ur;#b4i>SGOzwfwQMSKn)(6o;AaY4R!x za46+n(YQrN%NuB^q%8(o-a$(>Z7HGU=V(DW?}~Q!WsI)iYi(d6CM?-UvAi3}nX;QY#uMTo$#5rQ-Q6R|YkB^C7=gGnXYKx& zUH4u#7fgCDaTxHJ?o6Ej$j#D2GWCFx4eIA2j7)e@C6_y`if?FgkRHbRuQRjJLBKBG;)gUeg z#07Dfi5C1@G8$;h|42K86z@tuI3uqQ>Ra7xRVYL z24s4XZslN~&mZL>AIQHPKETZYuV%HP!g%6Ny1vxqCYIgOCDjGw7TsoS8GV=%dB|aE zH1;`Jq=4i9^w2VIudlDS40Y%LK-I|)6eZSMEAoAej~ZR-i*s2B)91V0yO2s1s^gqS z7f0{9UD-oF<{_l$y0(V|06XRg>}N`jQT=T?XikYpStsBo72vsIfHDA`SX0L}sFoO& zkVr>C64ETM(K~9a$h`2_w`f8t1#ki{s(OBRQ`(Mvo0=#`CaIJm7jcUWZ;TEqMOKZJtoN#NV4LR*gTr&JQQwQ<@gn_p8)jmOVp{Biy0kuxWa))`&W{H^DslD*OAMMfqVaZ44GE2zD} zBUG#9h4L2hf_Sw|k1`zo@s11uNcy-%i|PLsh|l8CmPJcxYwRj;0yfu~QTC}JY1xw y5#1(qV@4rr@ZRJEB_&!^yQ0|pC!|#Z3%7Zgdkdu`GX3d4V_sj!4``>?J{QJmQoC0^FU zS=$m;KwGJZ3p54#Rw z8><+&{_@qon%mQc@o#zdjS<~1gq3KtF&b4HRsCEH$D;AZIQk`C4kx0?#$+_rn9|Q9;dC_9m|@0; z2CwkZ1A~u>$$hJFhMV_{TJ;NXV$|%ceBBE}ZzB|$eLF~1R=(3#K@xjmR`^AX&RUIS z#UPTxY+-K%T&R`1;X_}&+cwlTBu zYhEgD#Z4ha>0i+Zqrte*Fga^j zBPM^1Tf*kHDDZ+PHjBK-O9zEUNtBjKEe12N5R2*DaWh-6MJ$lmW za27~!ACqRBu9GT>wr7GR=W1_G>_x%>Sqo@b9XtzP)g59t-&&LPz1Z0h;JOp@AzB+d&ieXsf$IZ?T&S(DJ6_D4ouuOs881u| z=TS$<9Vbvw$q>0Ep7=}a7xd1%N#dr;Q*esFPuJ^56WdZivq3DlvjAN{l%B8FmYh!p zMc)+ub~_1T$aAadtR*qT#-2}tFgzkg3Q^z%>S_jH6WWvF$8cs%|A`1%o62d*B+}Z< zRYIbqZQ%#aKn$y6A&J8s=Sd>BouH{#tgq`eP(qM`cweWrx^p8BnUpps?)oBf@i_3s zN%3yvOR833bU_&FiZmAL%A)(;(~ZlQ+#tTVJ%8TW@ciu!SZ0|A zzFJ5+3NO$V>GHzjdFSGJ=hAuSa;;Vy+KJ*hb(RNqT$tw~N-j`S|LBvOAA&MGyCs(A z4>y>vJy?7=Oa-A#lt4!R4qyU_LswhEw-;zqJ>B%$qR$3U$KR~qgC!OoT)Oh^c_+Ug zo<8HsMTl|fJ-S~q)KtQ{b%MLYZ-fmF1uko9N#N;3(rlw zwYtS73yiMy5B4XlV8)}#ZUI&avksXN~U3vxZJ%7sEzCN4D(gdB5HFEXn+XTs{iq#K&|Ph-0q>(FO2jE-?>hH82+jxj zQ6h>5VOdyiTF0eXtnpdhH;+(BsnW z@tw$ednYTqE&-$Kz6~O26-{MwoY}0*UT3zsJGYvPcJnaD*AM6WCgvVqMRe$1ls3T!C0W$v4@(lwqLbH+UbTY&hd z#-|K5665AUxoh0O$UXyEYt5(n?eHRk1>Bn8Idobe_^`CcohFgM z%#^Owd$jrkeM)e)N{|LI5EnIPR?yg(74K^o&TNI^vt~954wLEXN;C$drPd7lo^tRTI(1y(hxXGO2w zMl{rla+X-pF))W-RzgD3;95eEq4rKQ7UbF_ugw{M4ZUl)({VJ@<{4Hs?b0-xCNG|S znE;vPH(NtaMD`}QzD}gxMFWonEN_{6+Fe2%UiK_T9%*uGAETRrWbLJ6E($fh9#z^SH0 z{Z!x-b+|H{=>t%`_coR9(9UHvwL)&%%jh?zG`Yi-E-axZW@AI+$0W(dhgrQ3N-05Y zg;kktnauviG|~G8W`FZr#2m`Yz3FHVPjgBEF6|k~v{0)cVzgzq4 zcT52*;d>SxkADvY9qe=aGYeZ3)^^XXq5?(+qCy!*)?UiaSp5qVd(^DV3Zhmvg2v5b zY=qKCUPRfF7|JTY^ums~DPYArv&dHCy`5NkT+WbhwRNxyscd>vm&o8NAPM0AIS7_^=MU9 zFA!WWImWa+i)k+IltRY*P1zi;5K!!X=M{VoMJAye>6O6YOJY^QN61(;b8YuBl}yJe zIXpqVJ+GX!)7GW?;~?$O$pHtFB(CFQz|2~rZc*h=rwhTg9RSQ40V%QPLg=h9a=2Ku zi2<@6Y1{s0_x0oZ){myXQ~>!A))V6wU&(kZ;9mIhp?Wn-5Vz`*idXqDHOE*S9hj-; z{T(#BZ=7InVA@(%!Z{xKF+RcS5fGEXC}2<-Q=T?=|9P7#P%3zj&VU0)&ciPZRKcCB zdwaVb7QS>KMNxW{j?3If5k?g+u|q+HgCk1S{$X~I@~H(Q@$~?Qb<~KSuBEBm99mCI z(FG&_oHRuFO5UYrSeJ4FV^49XTxb(gg zQoriDkKlW~Hzn7_Hwf32V^{{5h?Ts7o_v#EM!U)cXWYR@GlL{Ji9rRF-8H)S`tN5LAn-3lBmNQbkIss%+TI{I2~q+#7Ii zer;NnuZpv?Y X$qK=V&Rw&tjGR>kdR9%`?2Pq4jybZG literal 0 HcmV?d00001 diff --git a/mmpretrain/apis/__pycache__/image_caption.cpython-310.pyc b/mmpretrain/apis/__pycache__/image_caption.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f613f543ecb811d8b2c8541f94b3ab71416539b GIT binary patch literal 6223 zcma)ATW=f372X@KA|+XtWjoGg<0Q2hB(#HG(5kLl$F@=fj-1*~0x&gJtDPaW(q8D< zr4_PEpsgI_$u0U8petss{UMSPS%TF&9ArW{(Ng*ewVxjf3dYF>t*klztmbny~3*AaeukB z?4M|zkl%COihr_oQdQnlSdG>1E3D3!5A@b4rr%N;^N+AQrJ<(A2X?&E(9`l|+w<&g zkEh0UH;U8pjc)9Qf$eQ#P|@`Tw;9zghe6xzG_#M1@F+jh^3Ehhwpm}6A+ z_RUM1w>ECveE$}GU9n^PmhE>v{z#=&wRHYnJL1=ZHWxf_xJWD4f^ILq-S6@*)pX9o z1}r~h+>0FA=JZn=eMJ92g)1$UDJ|_ou(_r0fWd!KTLv%iBGWG_6{AvMud*U9u@Wz{ zGOw@-ueQsdD{Ylk+1&klYmU`;4XyRI!Rl=OzR{XzXW0T@;HA?F%|QDiJ9fX&S_HMF z%JIr^_8MCr^`F51F^pRQ^^>4}iZAiXX{B9dr`ZdbbG%}(*I7R53`Q^Sp1^q8MUlP8 zUc#6aev-YxUdGH*n5nT>n0dc&pteqbt{6(|1&Hcf>B=?V?(oaDl*oi@_9f`r?Hz8q zV@8_oP?#=_Hl1vEvsyLrToRpVP1fj3^444>A-qp)HWM?71zt7#yPHeVC=u4hvjG?1MUGBJTm(Ma~H4MDIxgUx>({0Nc zTU&AtnBYbrKH+SmXrsh_i3n(3@}W?c1cD<_)YM%st-6DZwt$>G?c}>-9EBm$^ zTe0i&v?zE4zNd@#+^8o-U}XZ^5Cq2L9Dp}bx&+(liNIn&`$%#&N~_y;?Ce-_KIl-t zL+j;F0Tue5!yCbyDzS2)?5c^HXo=p|T_rL8to}v?3aWMm1IbBA1(+~<1aM?*vmi$; z+18}81Kn&P>^}YpLtxMZM{+=hZG>_(V1!~c$PC+KHWN8|GLjQ}qPC+aH&?gA(8D4h z?{KNrWLNV1_>15zVX@H>+8J_A+~wA+-E08hrYxCOp77UP1sY+&cZuT=_<+w&e&^{e z(-1jn2IX!_VM`uE5uo{;!I^{^;$+;yQE;Rqn;*lL>G7ZgtlZ}CM%>KR27nL-vwNCF zCOQuj_P!N4H18Q4q+Yo-(s=vcM@eV%7ojRLTXdu?$NI3fs4#Ay_1 zF+(c^mK6;3>?pFVFO{#}IWNL6K0kr_>8jmzqw`YRb6jn9`{G5AYv7HFC>GCXYVpb7 zm21z8&6@eHQr&5H91RolhcHjadl_%^Jrs$$3(VWqh8k0mA|O2&>Z+3HiN^E;w8jNy zu)=*M(XJ>5>VY~m5(9`>+!QjJR?|fR2XT*E2wj2*V)xTy*F&4hooyh#*g$ON(AfN; z?a(%E;f+>Mbd*H7QyQwV@(|#AsQym7tKCw>tLSQIf)bvzu-$V#2EJ>}3#l5*zN|P) z-PMaEHwQVv`VD+Xw8V-s_`&noLk`{Qx?S!e8fsqjLI^QhMyq>>9N9@eGd(^LU~|+PiPy2XD5Gc;#7k6v4MloPlFMgWkc%9c)@RzEKG?i9v(@)N zDOy5NRcorDYpU@?(@=Y2XoENYufxJ&QnC{{qy+gx8buOu@J8z>eub2VY4??(9+N6S zK{}cgz4B0t^~6Zj11-@GwP7LFV@2?qdQteQC7?9qN?Fn!>{;w@ZIOg1CF69LWf7uMDZr{pfjX8;w=>E zg4N;3YatC_wBBzpsP{LRO=lYOF$!jNQInLlM@h>kwzZb`egZ=m=Te6X}>IM z^VKD7SslFkKQ}GSelx8SFEdq23*_q}ah!IrOvMQlP6!@bY0PeASPzW2lvFUjF4tE@3w=d=7n9;{l@(a=fvPGP zQ%ve-l!tSybV#T-A81S&&eJ}3Ye_AcOA0UyBdI6zto(rXT7Fok-iPyt${{G+H4|O* zWy``L3?nH+Yb#^aJpN(e!aqi~_fa0$isqvycy8m;qRc-95?SHiz@U&DN_^(qE!y2K4_I0u-IX?y790&r? zhRi^!eG?U^1hDfKDK!3}#kI}_rL(9@zP$XV9jExL-aUHtRP&XTA zDDb4?Syo!LXj0Fkdd;%#B1<0ilq?I^PL?Go$bwNLbXl-0J3yc*2zrUD2ce6qu#3ezH_gcA&!h37JS?ebtrnwt|KM8(#3J>C>qS1IeF}vSW)l}gf046 zaC>43uF=|aa8tn}$!CZL*EOOJ0-2}^QZ*$%bb)WD3z+wS{;I~ANXj{P2)Pkf;hGJ2SC6Z9+h>(he?8+*v!C<;~G2ncx zdS(d%8B{(%mk%zbDmPa-sRAlF#Q#JNIpjacWv)5tmO~CHTQu`~J^KmJaB|83)7vxM z{rdHLuV4S(n|OMRO9s%%B!dx29#?3hLoZYbKm+&C2Iea3Y** z&B=N>I2q2j=25S(YH%t%-8vnfX`Knrw$93TQ^C3LeCxcbe4?-#n|`UVX?}W7Z(U&e z6QxoA2HYwQH8(zY(~X9am+!ej;I0QeH$L@~G%tVFP5n4>gWUWgLS+SgEk6|8WmLNt zM{U2;(DKr~IPAs=&u18KyS*TFycmR1j$YN(#~-acS^n(tqbKxq-%Z^oZrBa@H!4l8 z=k<@>gg=bhT=2-_BCkA*y1jIDyUX*+qnPo4s=rqAsQ@fFA-zdFVto9n_aBnWpvsk& z%9NJ&1r*WJH=u%lR9gl&xyAH5O2w#{6_YJ6i8G8XQ^d68kRtyv;ANE9`q1S;a_=eV@Ji(%e&9-}$*> zC@mW%@LvA*!_e*Udx4uIe%tq4Qus(4?xT{TtZs1IA1cUh$HMk$knK&3YF4W@u8%|~ zxh-q-B=u!4l8&V^?aWS6fwFHrx9yMmeIhsHHb}d67uIN}v7KUGPukd!9Fc7M0l!l8 zydAG^-d-CLU$gPdy|if)%f-}^tmj7dItS;yh)vL1-?rD*j+H=j%1y67nIy4-{E`J`+X<0#m+w_@?k_S}Px%tAw~jrx)@lm22=LKlRzS8=D7ot~PJp{NV@h z-~6LnS3kJ^!Ht_Ye)z`j(wTwULc(1;iQH#=VfSgFHxlplfvO6RB)yRI1>SIHPHHK{ zIq2h@D|D6Eab$n;`4D_~swzpHyv2PBlf zfB$fy5K6LC7)1YP!Y-QK?S+ZO7vpU*{l4XYxUi9?-Q@Pt5{tK@Aa+?Z?(!%Mq3EU; zhf85NdEzWRzk1A!&h@L;uU)Op4WZB3X^_%#ZETjErEQ<)FJrQ~4 z7-S+g=$^uzETc%3J!Mm6My6(3rZe^L_00Sy^>0;G$qc6SRB<{pGR4uD@6y-;7-n>C|j0lyw+6fby=^fWldAH*Opp-z4Pv|zO^}6)v}ld z6@X{Lwn;u$D5MaJJQI?L5c#L)7<~W zP(tzY&>Fe&G2bzX&uAAGOvwWpz)ZSb{1WZ~2q7dTMO45t0%+uy`v8nO*>U@bXOJV3 z5eA@Xj;`5WT#w@bll*jp%afR}fHc*i|8tWgvZo3!QTN@mBWM~y=N#jWnOe_WfQxeXj(N03LNQxT{(f<9Qk!Xy(;hPMFS|uL@ z8hnr97F!^3cejWtO>tV;`H^Je-b7;H;XrkwSse0Rzsm!}q|G})45=k|Q5g`! zIX{zfWO@iP5C#M|DD~k(ULK6hPmWIXX1_HNSHXj*pcp893HAJhBsUo8Ks~Z!K0WdN z@cQbLtj3^}%%P~NHPz5z6|fD|UK`rZmH*3{Ju-#CMF5QS;^9v2qWEj1teEyv+0|1r z2$)GnlLnydYXDsUXM0+v9ca5|s;5R~?r9H@WBd~Z**++@p|2cKJ53?g{xK>b3X9r> z%7N$hPu!r#9|!@|YmoN_4y3Lyw*n(M z;*Zhquu`gI5RN=xGc?PMT2{;ItcuMF#ne>m)y|#c)q#_sD`Jl)$;kXhWMMgEK;jm0 zIpY9$I^ISvdM3>=*F(1}C>7kW#=Hvo9YGMf7*&%45Xh2U9$CR?`W=i&h?w?QWvyPF z)8^Hk#s4ku(iP-Y5^Nt+nsme^VxIUoLj~bhaSDY;D;iuUP?^Cky^#hdk?Bb}D@%Jo z`2-fS31fnFoRQv14Yl10yi;WlEe;&VZdIBdz~>%J^S<)*r+=>PPT{WYPN!y8gMHOg zE32iYJx$!r>Wr)j>FuBClx0O*nfh#2c4s0ZEoD>aKO^T?MGI|ZJfqyKGLuM#?Bc!MV5KOM`_}%_y@)JKob`k0GTt^a7&^AQ0l=2W!;WHe+`P7!5BDUzY z+y0Bz^nfr&*pu`lD7TAHo8=Xwkd z$$GB(;sLQVMLWVtWWbY~au)HJ29bk0yi+PeE`Nbm8KTxIhFXQ!Evtrh9%WUXqbCi2 z*!uH$Yf)sT8qyt5ZS-gT*W5Hlqitv5_)RvPSFB-TZ#)*G-AeviZXVjdFedoqC$uf_ zi_LI-*UZdKW!FmKO!xK7I#BnG19G}0Jez}On7FlX9Vq)H+~orWCSIbDd$$79#MdB% z+c&6|mEnXdOuK_1e7~AiqzRWL9MBPH=Tl@?MaBRTq+8i8eJ-5(Dt?u$HIr-6-w*<9|WFN-U9C%Nq=kE@lHReV8QZm27bcL<=B^F^6z zIu(@U5o8bI$5fC(2-@Ugg9?Ts*P?D-ikQrN2tjXZ@@tNVeAY(Hq`Z*jDzZo(r?_KG zq%zF7OvFy2Ah9PzqpBF{?Dw^j^jP1bd|lPF-_>>Pm1fp|tDTUMM-hYkPNTO*{oA?k zot@I=M_$eNZ;LWf7+3VrzO_v4x0Y4?jds&`MQzAcp{@E#J9AN$^Ecj-yJ92o*NcO~ zO1Hay)G34^j(pD82*yqi{aZ4OG6XJtOhoF3s0hVITXNPb5(7eD@ zVyWkyIj@T{&j#FbtAKvynnf b>n`ninaQiEXX(U{U~yKh;ry7VvqS$Md-TbP literal 0 HcmV?d00001 diff --git a/mmpretrain/apis/__pycache__/image_retrieval.cpython-310.pyc b/mmpretrain/apis/__pycache__/image_retrieval.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad212a6c107ebc5a2b40c389864e4400afb951bb GIT binary patch literal 9964 zcmbVSTXP#ncAgsszyKscin`L4$Fg@x*b*eK?ZlKlR4 zG^=LQs#?uVHPf`Kwj4KuY&D1XW{|Ju@oNQ6vrsL_cbTBroT^UAz8y?AXR0&JQnl2a zta;y-XX)aV3bnW{ZE3&D( z8k^!L_sr^3%)F_Uryl}it*ockkGy!ZY^AwtUJ!We0Z*;B{U}az@3vz4+>RhDxw_5E^yt>=wY5tv%@qoIY>uDhX zezhizM0GmF_%+pkh<*sDT&wC#s~YcvGgWgFJoyW~YVi!WnfW>>zo}QV@+tGWmbdfS zd@gUZi!9G`%;9*Q%am-@%Gg zd=`6P1;(FdXYXdJr}NL`T|j(R;`cH;*Prtoe$Qa81?bQ71wM!Vx3Jr{v9`z-*%H=1 z)yHHow~RGUZ=I!603VyZz+S|xXSmB=Vdnv(a9V2^>;kLY&FtybXYXm2Ry_xKc{M%t zR@2+y@9|jp{Eiol80Gy#R?}8Dx$6&k=Qcv&`ZV8-!+u8$R#qIx#dTF|M3-fco+LZn zB@zM2Yd3MDSfK3-+GY1@e@@G5au?t|w++c9_%ZgbOL-iEBajV0;OEuM%i;Rg<+UO3 zH5bpk9#>pKTWu}D*1eXy&VhZW#U@~_@49PiM-iYhCXh?zwKdmkF?ToYxCG7%qHqG# z?G6{at{;P3?qwF%FRxvc7<9w17R6o+;r8p%T4lJXEy6gAA^YxoLXrB|~XWrl~E?^ef zXz2u%8{CUKf=A#@!{r}=deM2%fao3Uju2A2`K>61RVFS7eqjK50v>T@h@?9Ky%om8 z9U||Jl7RsbHk>;NPU#$-sN!B%mY!tHT?t!!#EUk`nCG`}%tI9U(YWqz^7?i=^jlC! zoR@%{Ck5H@gJ5!B$QpKO_|Pws0)7E*M#A6W{>EnPHbmHztWr8nIvBNi-Ea7OQlFQ? z76`c$ifz|d!n?L6*Fbyy2*5{1q5MrpWc-d_e*(j=t8pMJI<1jm96KMe#Jl79(&tW` zvJaC^Pm1bw;;;#`P=|lsbvqGh?9nuT;f3uTk1VVEE_M*^Hs8f5K`yb;Y4TPq+2ptU z*!KdztGKPsIzf^Ngb3AdAdfN-H$IZid}PtDT)Cq7(QFO`0oI`%ir78MkPdgHwPW7o zjU!hq4VP3Z6;efa5o*)o@vE0Ec%t6$>bwU1*bLi?=MPOCYQ!6hm5Yow!;9ZEN$K_* zi+*#XCXHvM9d9m{hfpVRURsm@>t1|;g&UQ2Yh&>|WVgNj#&^meUb^++OSgs#f4m6x z);DWWJrsQLvK#YO6pE!wl~*pk_8Qb)iqad4^$zncUs^1W&fzXblIAs^eS~!{AAW<; zA4`NuRq-%5C!w^}-dODQBoWl;)X0KI8 zJTbgz5usPW9aYf8+Mc$hGb7QHOky+rGc(ElrT#Nr*Ai{tNQ}fxtOsVI)l8-#a4}cP zrZ|gr;u&gOG@omTcQa9^jUY2ugM8q4wRElqjM|>y`0YE~bavRS^*v2F2RiYfAwj13 z;rr^$u&V^6IyV5fuDA=jjZjE!MJCL{GMi69Wb{qSY(CX=?Vff|zh^W|X0goOT+fQFM2|CjhHzt>*({srEO+1R>2f@a z@nwu7Te+_%+E#AE=w;$OD_~xR6*142@w65@iN2>_*Y@yFZ0^s@J^lCe#9&%S7r%(-6Jt-`KmI@yzku**TPJEKTUL@) zJ!A3M#1*gB(kRhIN`WxTsHRbqsfO2F)i>ollHd`viO;Uf(x} z%l9)-A8Tb9kh0Ncr_l&F^u`&9bL#Z7yIP}#Dbf}Rs6H1FA{IEC8aY@zm32WfMS%Kh z^tmZWm&FBYE>T0WFV3S$OY0p!U{c{!5Ox!z(KH%ESAPzE?Jv7Gjuz{I?hTc)@_G=$ zgGN^>2cf4^*E$hgEX9DxVV9AKS`_)F&W;~%*5IFckmd>R8YyNvOB9tmFC*GmMv{T! zy$0-7u`(Zc-Cb!>(&>l`8CJs{e znej))Z3Dc}%SP&qOn#apS45Uw5)Zo@l}oAarxs;Cil8H^IMjbcL9X|`2Tg$KljetkPYE(1cy zT$DnXMu%c*L{LaeD_!@9EeSTlrPwW>O-oILOdDWXr7yNpUegkip895`6Z=6V-$+?b z9Y3N_+(YpqHIV_N79}yMMRb=W5qZ!h&GQx_F^c~CQbq6KeHtCacT#ke)Ufo}$Olve z-ykr%;s(KRAgBm^slt+Gq(F;TBy1{?+z=uZQX+*xgSJW_loMRSNEL4Fpr;!hH2_PH z{fY0>+W8HR8cs)`Bok90rA}udHVYx(6Kp6K#3Ggm+9A!z)I@3auh76$%u%!%sDQ%O zA*w1Bm&a0D+JpcXN`7A@h-4MSx6r(cJNf`kQ7`Hx!^s%j6uxj)xe%7)|ssznghk@RAi$d+0|MIM<#@mqLvpJ37kCB3_NB!ZK?SQcb1 zQLm#4T9nSIxJtz^(o9E^H4|Q+tPn*3GSE7OjtaEM1|??0^tHsIFpKPv_wP6?)eB{= zH44PLP$!KodU^R0#QAbO^R88Ie;^4s~rZ>sp2{8B29(=aFM{ zmypY1!#64Nb17n;T>R2Olxd1wG*LBpPX^nP#zBaB3}>jPjaNuCBPI_Eag%3PEFFS* zPm26l$(8s+n$<594vMz}r5GIrCh9pdC&-nk2vMt*Gs@JjVN`h3$Plhfy%(rCM~yVw z>*)O7J>;yV8ILjiT z=^J+yn91XUNJY#LG#CA=_(}CxQDQ`n>lpqU+|r<*(7N9}(x9ufbl@;Y;p>xbb1o6zs8hk4h{UN zZkYz0sq;!prplN$zX7^)7n-UjoI|tDzUT2?13}|w}-2Wby~nx zK6eQD(e{5w$kS%VPjbezGiS`}-RJ%nXg?(BdHAnrWFnk8MAv@YlL2@ZiPQA*I5ia3 z)F}!dT(juVC&O#Fqp#5Z5(c(lk1J{VN^MCIXQ zT&tA2$(*7VZg(xJZI-jL?58MAhH^yi)S@qIX)f%8QV*8q0A8!!kS( zT>`U$CN;Z$TdIioA&vbyHKbLj#KpXcg5ow+A`3EIk5H%ZKdPP{z(V<#g2Yj^OnjSG z+FrZOTP)3!NXr>WSps~yF3&1a{)jf&rH1O62SiB7BT}|5u}r51yD7}cu8208v_O`( zrc(AaBli(Kdi%T7oEtXeD%1P`3jb$3Mg*mpN6rF!nnQMSMrJE>^km=C%Yd@0|_H7cqUNI@|XBnzmc$44H zB~DUEip>1Xlw!G%WH6SO{am7IP>?u%TTp=$mb5TdE|+7etted1rx}@=s~R*_n3<|% zi=Set;`|hGT(U(bv6fFFWVXY$_?W;?DfUaw6IoPvBWv2(egGyu#dx(ihDDTS5qsi8 z5IS<0M}pEeiJkZv^$zg5FjgP`67Qqu(O6jv9L*75k&jZ&YCU$00dJj0#SFaa^GEV? z3}l)`HW_o2(y7)rxTsGVp5!V8E9CrCy$`BycK`8h%Cjh~8-AD?`woWaei%z0&hGmA z4bnS`J&^!irEhqw?%@N}U;SlhFGVD^j(Pm3fF+Zz8)6nIvR7 z)&Hnb=aesu(fS2~L*`D*qamy#vM-Ki>7}J<6ZTA+GV7OyVSMe_#uw9Bu1wHT~7x*;55nb%rL;`Y#(ZU>M~L+cLhwx-Sje`NDV=>%XKo zIa*623ukqSN%?81Gn;|G-mjXW225ozNfAj5NdigY3@Ui|=X*TP3D$2gg zSZ-!mIaL3^ps4!;T0r54imIs6B7TD!QpWW7m@9;^ck1$A03`XrRMe)$49z9pDR!Tw q9?7jd!5ERx(HIdq&AqM!>k8?9MBz_SKSd5iMuzh!f{`1+ZT>eN7^*q| literal 0 HcmV?d00001 diff --git a/mmpretrain/apis/__pycache__/model.cpython-310.pyc b/mmpretrain/apis/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eca64c5bb4462e787cc0775d21894b38927e6b19 GIT binary patch literal 13966 zcmcgz-E$k)b>CeqJ_&*lMNy(8+iTel37LdMNmd=!QY-7jR%%#V$ZnvS*qp zHM_WbKhC}P-1B{|yz%jZg1^7}pMPy$yR0Zbr;p*kJU;#qkN;nIS&F5)N=^Q%HI=`$ zR;s3}vMyaqb6uvE!LR0KTe(_}+fr`6Rj3uXth-~aVy)O3uZ_1#wG!8*-HBGYR_1cX zJ=B`4O>#Nw9&Sz5rc~uU#mZUvM~anysMU_-3wc%kvyNJ0cFCT$%S~-Yv5H(i%H?r; z9QD&wZf2~KHSs82J7yiT%GRMrYHh}vu_o=JJ&jh)I&4j$^|*D!ntr6zPFU(KrE>fi z*t4=+QN!FvX0Y*rvt^^KQ@N4~^$#3B2-7Rwj%$bMZ?_$RnuibRksh{ha7Jkzo zdY0`r2-NUjjM%?Kt*sC;Ynr9jQjSv7Ev=TeGx?Nt$kOesowM`2p|mVpsb&g`)~$?{ zeUyHv)(Un;A_wI$yU61T_P7=GX9Vchm{sK7CG;M*N{O=h7N%d2^+7m@p z`l-#-HRaC2j^5Wd_1*LnWhYIgzOtFQr-*8hwbb2QKON+E3#cD^qH+nX{q&4N-%nBx z)gP&SWw+SZ``Qz&uh+*>;vVlPcgHOaSV)=ks-k>k3g4F7rY$Y-K8&X0+J@P-3}LT> z1Z=?wh&Ej)7}4JqvFthAWoHJnUNorOqgo8 zz8J&WL=msr*wCScIih|d{;(jcX+>Wu;v{0%->V-G+;1$MMhbm}+M` zoElJ$+=S$hBLLzPq+&6r7PNv|R5ew{yR2$Y^=Bo$sOrD&z4Reb1emjW5R8U~@q<6& zE#fKWrxg@H^9u1$pzdm*_tdW5SBZnb6Y7rEhxF+x;zW?Kw7&Weik12qb4rlyYn!E=&QMD5E4M>9gSYPTLR6cEc9OsCJxQS!&8Ph1VilucX8b)t;c& zae5JL$=6F%I)zsik53?Tg=sAZdMl{hz1@O@-%aDm^ttB~8pHKps>_wEm;%%=YdL~2C#Lbm zyA9Kh54IM>FrmKbIc@BfcPKe_h_CnV`6z}kJCsn!rHbmbmQi(m>Zz{vPR9E^KtMF0 z4dOo?C3+YVZ(Z3`{MRiN7LVG}c2uY-)dKVErYvljSxY~mSm_hWPAbqh)BO~fjcPJK z)?npi`$`@5(=C1~xn)6ov^4t`iXn(#PUuT8=KsiM!|| z>7qmH)s4_rs2?9)4b86;ULDPTJdwsC1YP^==_3?)M5jrD@Mh_#Kmcz87 z+ylBQYH#j48#eSLRPeCsf!>MqRXiPVL!4C6hPYyM?O|%e^lzh&Ptz4rLkxfh@dk}P z6z1x6qQ-hXEC92f)pe;{tk>^$O*d-E*6WtnsMmQ$@g@d_V-45zeIRyaz=kdxZI`FNlnx>`nrv+7eR@915 zwPPqvX~$$$pqMaBP@`+!aVzK6$Ac8Yyf{0flD?#r!LcE&L} z&HMScz}S5i<>Cz0z}@M?%Edaz`y_}CS(86b?Uby;{SsC;?Qc{uWgVrH<5JN&hJC%ZGlA7i0Pp$D*EiqT zoWy?0!8iJ4YsSHTuyUpojCi#VaR&jYTgR~~rCngVLq*G*uuo=TkrO^~*7do59`8y& zhxci;y~*}J@%;(&4KN3p9p-+=!|;UB5H>_uEEGs5vL))QM;6Gaan1}du$Qk&2Ts}y z#L;9gaF2PmW~6$NUVt>!A$>+n=cva(MJI+u!ePeTn&-JN$)%wKDb{9-g6D(vAZ?TF z!MxjE*JzmSky+RPSzQrbdzB3iayNN_m?d__hGx8PJL?;PUp214paH-UU;qwl#rJp- zCansbPiD+uA>@nt5Bk85ovl`|a7lXxq|qT(jXJN{<*z-v_2+ z3J@3E$1`i)Che5Ye%|n4)PZoZ}1kh@NsY|EKZs46_;-V5c{0*42+pulhwjc(h^N<4we+jw` z{0$Sp=wW!xCecW}>A4p69eKf#b9z%czC-I+CDtXs%ZK~8ZQLVQZ*!T9y16 zL6wJh-Y+m+3~nNTd;o_7lb$SxRHWZftd6 zk_HBGN!7RkX=FbzTksm@4QA1yE-aT!R0FLB?A#GhBtC4gG$ymW`)wWzhiY|oZgwLG zI{wm`Gu6{)X6KFB>X`)QY-M#d+1-oJ;q>XP`(P%2$@mryQW#tI_I*!S2I{&k5ROlC zC;TVHS|mXCxnC`eSV#CrI$qUx;Uj%W{-Bhj!uBByq&SNpT$flG^5CRE|HSr}?O@@p zv$8jb4D1K+s40lhGS|VFO-T2yu$K~V@bcx$Xe5Ic)DC$PW0D#e&50E}=m;qyovxA2 zPP@~^&M!$gRfr*LZt>#0aem%7ckTkeDkB3}HK2bCZf+L4ZQH?HXX|f2SUZ0X?)~Bx z&M6_8OC+`DjWx5ewFWVDiDc27*9|BT0LkcInmaphEMhK9Td9mrM|-E`@wC+lF?Fh) zlS|*z9rQR*62aE|BXSHCFu%vtuU!q-eh7^z6CYgoAT$UA*)B*0&bV zy`XjR?70gsXg+uL!rN$$F-x>?>%P-oSYbtga3cs*qM8CktoAv|0$DrILlj&>+yO!x zGIS4dd=(AMCnFCaiUZErr^T&-%Ik|4AFN%x@SIal&_$FOvB9Oilg;r2U}ej0d1vTV z{iBbrzc)KNS{{^yaeEfzgcugN6|+Nlo%L;~@3y&R&oVBTe-Sy8CCEr9@WzJR-b$F4 zG8m>&FNL5~@0)Uytu7+H&>Uu@k6>YOJd#|jt85w>-r6Q&bD-aD0NocDz4PC0bA$tm zhQlRTT5N$NqG?2$RBwoj@FBuO$Zr!?mE6g(XY(Ja`Yy@wp zD*?3yz~ru7S-EoS`pPYY!>_Mgxw(AfTNN!VB-$}7jjFNAe3*u#j^N=E?Q5bA9qlx? z;pX1$I>HvOp_W55!l2Tt^!j~zaqNa2!4y8jj1OzTc_3b=Hg>;HQHjDlVK$=lFu$_h ziNc+FfMS?~AaBED^Fy6V;RM)a9X>oxw@%VUBD(-gAV^PkS$HTG*fJO;w#6mPQ9GOn z$|%H$7G#E$@Ti9^-4_mG0}+tAPiGURIev>d2P!~x+b|HsAJ8aG*og9W*TR%*x5W)q ziR)CKjZP-aLU5WvAVMwh!f`%@_`tGJkT%To0dnY7w_yQP4)Kn}?-8hsJQeXi0l!JF zx4BwsH$D#;AL3XU?UFYn-lY;RvXIabFR?a0pc^KUA-9`6?l2$AdJ~`ipW&5TDk93E zBWznhz&4|5pPh_LRF_dFxmHKODfcXwq8jwah!TEf*bSaK;w2}a=_!PLQ#o}SUuCtV zmDRF#3~gn^QVNq zy&{Llx`dRRlv#;olpHwQpChK;)x?M+*$3mkX}ES1-loqf6lrI&xEz^-jqWxCBHLE~$X=-bpG(L_1SAeDkB{|IGMSd2p5SE=)V;6(vvPHSiZCpDfZ zoG?T%1%auVK;ys@!cdlW6rm4pQ#&Wu6$D6Bq!PgY_^>wJ4s`7WxASs47)WE-uC%3gm!WT?<_$m z4mKc7qzF5_^8H>64IDbJuV;tS1d<2*7$IaZ4%N>MS;sub9!H79uyKE~ZeoiMi8VkZ-TQCSkQF0ngE@x0j)-pK-RuRh@aZ|pS@3q_PA_mxhmL)u z`%wMdEO{VsEBCVBrQ6(c?;M2&PA@=V5BmqQ@c_pqp2y(fvd4L0Axsr)?>~UV{Tb=A zaQ^KJv-5jlxfHS^XZY=Uqq|0ZIuG{u@>>Wlje1ji0m&{SGwz2IIPL8hgW(>dQ1+4l zrEaf(im)KXFc5Koal^rf*gk?41P24rNPjC5Atb>5uV)HLg<~V2mu8uljjQD0!5)+I z4S9sOXVH0KeW9~>p?>~s3u(Y}i(7Qu^^u(|oIihY54OMFxVJ8zJz$*6^CA2A-1!#2 zdC+`cJ?`LjCgb++?5k$lKSIuNhskO=R8QkhN!N8plDn{nl@y*^cz#K#@pTx^chY1m zY$`w1?jmc=7R;Bp;YH{4r50+{vt+4IglnI1^A52rnZ=i{z!Co?L7!B5uM9DA^&O7? z`Ik``^sS6Z*N+l^@Sc!==W75uB6NP3ZMIogrk28;Q!Ap-ZD?^)j4AO3@@CPTzAKWST%YA%3R_Tji!3I5UcHm0h z97NR+jg90WN8!jgiz9AgV1kb13VfR`at-yObVNMHFdGeooTFqDfunE+B1O!iEPN@q z0w2?)Y>NmQh&hokHVuST8e53dLqE-PkeI8jdvq(tLS6%g(7*#_V3aSD&Y=LmbiB63 z)5c4I_e!ER+Qj>m`a#M?nxC8qkL*$K!N?Xwq|z6p@wG=_FBwY=c4?Kx`KUCH(JEGX zi31QYMukI0>z>hZ{qqK1H929iXINCbf6%9nk6R1I$6bWKNSR$}`?y<&!$<(gE`uI% zhv&k2Pk3DfvG;?B8}<)fK@@vo#aoaHFWzLb29O`bw9TPCzVa227 zxOG36B~uY`|WWrDGt@fN#N@G>-nhIv&6bnuLU4-wpuAKnM^R z_h3ePo=YXwlrcvE$p|a)0;AA)V!1)-dVigL2u43nXXT8noAMs68p=O%Tbsv$V z}3IcvX9Z zOm9Ng#vAF4N8O}6=tc7$!0EkmBn09$^^VfU4&cY$Q3kr7!)iKhbVO~FokNdL58d4R zAz4uwX+?dC+bnlAy70!gs>BxBMT=g0LPPm?DAW9)BQ8IL=JzS4seLx3^iIWOBYox} z6KXJE-)hnrd>D^Ux7paH8>7`c#!WJzJyv$<&Wt)_-yw!|3O#XQtc@SGlW;}@Gl&&} zTrLH%LGT`x&)fOwEVaClA)z( zl2J5mQY}2qXfHapcwFV>uoOq5d9UI#n&dTl{RzDYsJzmbMVK4+i~~W-2*(|Y&(s@| zEya(hZYru9ky7FzHBHDSF)#%*5Pyy)@fXytx7~X}d``vkJ=XB` z^?_CvkEwbls=ga3{yOg3NX0L{ATamnCFoV77hUKSALA7sUiN~UbeDmy2iO*0xEI@K ziSjX_PKrt5{WBWH*K%o_ydhqHvQl0nuZX7&)8u3Lwerw0a;PmF;}Ee-=iu^lOqP;E zbnz3Kpg}J_?7yNC$CeX{kb>fUI`I`W7f%+DcqwQ|xgfE^sTKakwMjT6$-m66^|48f z9TZ$)rJK=|a?zh@TH$FfMGj3#LmK93IWwt>e*)l2`o=Q;DjJ>?6;29+mJ{F=W<9?O zS8OBH>CN|>#doQdFdS;0FGyyxyS9Ogxq`$&m;ouno)RSe1W7TzKRT3cli3c=E!Ivj zIm#RGoFL_Igm3U4aQJ^*)Q%4I;9_(~UXYh1370aGp-vanIE>Gsc=pVAhmw~@Wl)%V z2e6P9xC{g<{sp{paPcT5M(-kAJc;L0O8aDh-SThY_B|`43%^xb7>o<9OrN;lCzM{t+Eqaww$tEuCirYagHiGwo^&kaV%SQQn0NkDa48d zkwU)jy_xChT@VWwm6R%_LCwFf``z#T?|=XQ{r{jjI-1e&`QbM|SNWqCHSG(082(sz zcpP8f&^67~T)n2%^@6T5ZPbjqSukbVtXcI$AyKyrc0E~0)>DO4JzYr4Jgb%|WKiC! zjTEx-?`UBZe-pJ_eXKAh%hHVuUG?3C-AIqP+1j4^-ojp)9z}Xz;U1aJA?*|#nI5a%Ti;*UkMy{ApF82* zU$NXtcgq#4@W6XCcgo#*MRT`$doP=X2l2ek-Hzw&@_E{|&S?1^*SR`*JxsZt*D5z# zOGsPKmx6_SB1}D1s?|#8YF=o)x9SIB>bX`>Z8l1^FfrF|;dSEu4LqMl6}ws&Udd(Z zsb-^6ozI)$$de`CJ6Q@qgE0A2v)*d@UO47@l~TJF6w6IC9(bsdnS0@h(`RO%d*RtL zd}5U|r8+vmQz8(h^7-r7EsiqBCQmggp70uFPvA}ZRHM}n=9XG4oPM_HdNn?*>ETEX zV=mVD#t$=Srr}qbqV9)j7abIGlSz2H$?b9{>`yie#X&nR0t1rKMwpt53;k2qPuU2h% zPSA8p7n@brdB$6MT8O4_E>(jCr&SU`wN!IBHiu)I$(w@13++;?4i&Tb3dAA)}0M>@(VX@+;UtPb=uIRxsX= zV^=U2aNvGaFIZl}vt9Ezj?;o(NXn49lj#(`w);*u?WNp|m-aH=NX6KRqk;5@ zmt7on?TU%#EdHim)hfC>>gKMD7IN;x?wC7%MK6rG6Yk^{tuUV6lAdzkg`+UxZu2rY z0+Vjm+tRDG!`+EmQ(ks;>ov{Yh4+TL+ud`;x~vzrxqH3sD7ym(YoB`$4%SZh5wx?* z8$r)#2k#yC-Ybd1?)3ijedyTnhZFpfOZS@)p( z4%FY*8)LuxA=JBP(c%0sBHKOe&Y;x2-hTIQxktSF*gwX3)P3km0^_{@Rn5{04}j+$ z4foA?F9&nYQ}xok_d)_cd2Qv;wt-6-YmP-xioQJ`;8*Z$vb4$+Ivv*#FnIZ9TAb<9(Q);-* zQnT%_(NfKC#u~iP_QaA?4X|d;F}GPhcJ_!I$8xh-^n+3WQBy7ZXJ-bLTA~>=14tt0 zfLa)KhxKu*W&WXzdvCMkLNaumAvu{pCg&)(z{g?@+T#jgs=ZzD+!^OoAcu;VT+j-n zY4cLGG4HTb>{P7gVeXD<+N}yKWfQY$R%A`qR{cezXK#lc4w)M%hmO*61ADb@UxTw_ z4Yj>$vb_q}|G|<_$l+30-`~(Wu#rv6M~lefQgYVRnu5!ilJko(m@6!FQh zQZ$+VwTC_1u)WU@%cBLae7@DJHXvlMd0df0Jno!nyk{I7ELo)rX+f6u4nv>8z3w`f zyz2Zy;8aAjF8NN$DM}Q-<&~?Isu!102bv9V#igb=?^M-EJbPBwfh@x8N<)G!AD?1; zzg+KPwR{`yJ*nPfu%g|FWc|?m$X}(4rK;5NVzcbEDfbxyrqm9a*t%*N`rwk&_9=ee znt=`;Jbwv$=^t~Rlbe3tTf!`{Tw)&k+z2GkRU1|6JJn^yx@xgvOA;FphkA$QQHT2J zm!;Mi-BKq`oKX7_=|AAqR#Skt?s!EG`eqA5*W_cH)H^V!I5RUtA?i$@t5sWv&q3T3 z9~LgPyroFbnm)9l{NTJlHa&C1#bG#dr$S0{JvJS@v#MI1X$1?@`9Z7k;UAcWWG)5r zrYkvabADzz|DmHV4N8A_TAt6MkAv+^AA>^B@SEbm(V2%IedG~HGRaVnO_$ql>DbZf zd^C7x+LtR|thz6w-a{L6@b<&g)%tvq(gzRy$zeE$IgnOke!AP0BROG1X$+IFA$)L0 zX!#4xOJT+prAumHp%pY+=fm`+QZ*<-b@jp&ixeSf!|{t%zb(0{sCZD^$HpdMc?4M5 zvgiA~jMH=1p#^DSLULZ1i3CMxtBHm-F59+flv%F-p@G2hjo^FZ1&~0ytS#!U)zLe4 zC+X^+FgvLq(|agXyR)H2|U}baT%I&$6~se1Wk1e7!(QDyvkB(SAznV zIK5_yJlYlqN!|exW+JH!J129F;0CQSJ4mM6kg5rz%cr5L9A7Dt$s>)x|J z!cnSu#X)9rq2!l>K&TFnu&>D?76Y8>;crig2`2{0V(;=^OJi%iT18e2(}VnQY>-sk zDO98VHiw2DP0v=l1*)Ty1S7GaP64!ue zbW*G1E3iqs_Mnyb3`X?&`kL!TGy&V`&Lg^)ZLMe+(Z^TJ#fh%HVq%=zWDjY){Ruor zJ!gVRcQV+*8U7Q^-SSEN{cgQuxLR8mzaC6=jLSOhEu^=tZtob^uu{bxjmPITtl~}_ zrK!sXZ0>{IH1c;{(_Y^rkJRqfJ)N|hyk>OMSlQi+d-1f@-FDUJVl7wqbu!8#cVP`# z+GT(_jnqS$|HVt#Pi33y+J)bK)r76M-Q95|g?-a^!nVJxzhQFk1lkJLi`;tEAfH}MKyX;6XVFs9 zUuahfdLGYB<1&;$;=pAt&5VLqAPLi;# z=tzd+=i1epD}{m5xogPxb07wg0shP2YkzI|na$2+UoZ^B#?0|r6Pkv9VrG3G;pkIs zAF2oye<+Bv0GTYBJ7Io;!2rxrz^PWPg`Lj`2f-)L_V>haLgcS~g0KvN2BrD}lpEZ)blwI4nKHy%VERj&QmYd8skwyI0q2PXk%SxVAt z`VYx{$Q!X%Alo4j)H%9A5KHc$-PJq#indczdl&7-njv09p-^`*CjGq1karQMyN2%# zwm79NKmT^Ng!Z!BZ18-4-`5O)o=s6+2u)~5Yq}f={v1btf~Ty{DTB&C#oMm0#R3R4-LQ**ayy88l{o`aF#Zc z+pf11gn|dyPSx`H8h{I&0tG*z{Ysk#G%O4p*_oRX3K@t6XV4|vhB*;9`TN81I!uar z7+^C!p(UkNI4)OkrX5skzRZ!5EX-7W+TA69$Ds*xKeRa6(Bk5YN4U0B?83CyfTcoH zzb8$Y551_;hW#T&hLRR;xFxbIW`sA-W|l;WZDb&zU>XwY4ii$4iPN%e!qXD?m6AwM zw1l=o3q*?#76q!>?r}aR8_nXpz$@y6;RNR?H8iEqHbs~%!on-U=7!njik&PKElUmC z2a8)=;_In-55Rj{06q^ekKRgH#G4@mV_TLV6@MF5NY92Q-X#?t> zeJf**^LjzbHf%HXrfp?#;be_*lwvBQ-!32Jkz?UIWlR}4sE#?Lp-w`f95=H1o;QqF zG6}T!b=#t9$#$${?v0E-iL%QN{f&=$%)!tXK~G^eHzq+SzM#b50=Wj|jk^DuPTgN| z0k=c0K4cMhBD82bMIuV6#+1Cj%m&%|q`rK3lQG6Oc~(rYWxGlziXToYi$`99>IU=W zo|lJpX5NE*mL?E=5XuDNE!Z(DCZw}P1BjC;{ROz2P9zD4M0WHL2p5Ny9emVS`mkw9 z2w+4Vi9rs7qdhPZxH1d$aAO`*0BQq+md){VJFs%&R0nX^2NpaDTo5{uCLsR!IL82G z{0(g@tokHaUnz5E=B z1@)d7Nv#P4*mD7-4db+wzR2A$_5m>E)F4EG&Y$xjMLef(MFa9_HsV8yV^17aWSQS? z0hz1dFf~U)3x0fBY-AWsiX(W;rG0ApQpt++1WYdGcaYNVpxx6^m`{OIn$1gTwl)xeh^(s7) zXLsXVH+0fxR!p7?6K(PU`j5|DZRwzy7bMFM`FgkmJXt;0MBi!DKNQhaAgJC+aRLy6 zNMW(KmQZ)dC-GMN2#K^JzMZMZNg^%sQ+WJUd~Xn48Q)Fw{w_><)eUZO9KV>XrI6zH z8GQZk0D+vPwAEe6S}J`mTyR|&e2d1a2@}uiSgS;3w8ccnggK>4qm9orXkjF})>YF@ z>?A&8-`qRvK>^|7_Jox>Ukb6EnGLh@;_FFJm>fTe0p#J`#Bht%cLvG|Oe)Cbdy%Iu zS&Q!X&~CR-+Y7T2ST6@fUYvyys9981idfftZ&iPsHRW2eeen-S+FQPsN^!4mvU1a^=F@?~8H4A8ZEPXJU1eGo~j(0!gdy+Ma($I=QT6eM-{ zWLK9LDYk4#iw7^#7!@A{=_&JKH($h8NLnxd74PN!xlLPsVv|)-@LFH-iA>coz!-<$WR>Y1KRI4f|0LHIhkaj3Fy+WEJz(laaQs@0Pwi#cS zFc+WbpP|SNE@%cm|0(JD&w*~LZp2$Db0zv8?j*nW;p-m*`FpV90{FD0MQ6C?m24Mi z)2bd2BGoS&z*Y<(Z$=>5=4mKQe+QV3giNi|JsqYPV;Gu_eWOoe6KD>dbhIj%1^i{4 z7QcidFs`@+IHRzE)FSR5Z(?9D{f=c1}FIgd5W?2M%kZaV|eGcz?mSNQG7xB_3%NVar z8kPxd_STfKoWJW~i(f?DHRJj?{#++eDn(bAfg=z*D|N;MC6g~4X5LG3KS-Hn`yW;& zvP5~|{{cuR;ioz&0D+nSJtJkaeA-U=QPb-H_W+`$FC(XjL*C5<09L7BQa%6_d>p{b z2)^0wC~)-=$c&t9PZv)Hb|-~rw7-~i0oPvD0huN`sZMUUwwiJi*BH_>)&LCI9h34t z-N|-Fk;-(;V59^21jrUJEo4xxGv+3*W-yY>YL;bJN3UtT7cMxRgjkSoa@W9uos5)K zeQ<9JqM~re`=#1<_Pr5ej<&Qk@`}y#H!b=*z-|r}BcD`03OYP^aK(PSj8-zV80iqE znr++!1hB@!OkdRj-oInW{_MY&TATPiHuDNdXf9VdS@g z{kmRp;7Vd3ka#y+Arhs`$~5^@YSSY+k4h9^(0lPp`Fy69|Q}7KK#3B)+ zgtvWU63WGwCr4-oTKe*VO%B7LGx2%!R;I-2f5_Cl3?*ZCkm#5KY5hP*tW~h{s!cWn>YQCoy1lU_}A zGM$l5)-^vNW6mz@?j-OoE!i>EQRuHihdzjbF#2xDmnZlxw3R}VPfG(+aUK0$uJWr1 zDhYDK!%=Rt+zW{eSk=I*hi0qUl3dV72IO{so|U9($Bhq@J>^sU37@hFbS&~0eX$wUL4%n8eYsd;Rqd zTx#DiEuLVgfm;TEL(7%}qR}Q7>MdHnt)9+teFz}bOUInjAZ|xF*C|JaEU)G5T%^Nu z&BGEPchH;){)4vk-!Wq#>m~oZ*oq|XN4{CNKVlo+9ml>%)hV0Vbk%7?^x`qs5~t_H z0o0#6y@pGesWn4^5$KGVMBbKnkNCGp&q~QUcUt^oW;{syf|G%hGgD-P?HbeBV(|ie zzrCDfv4}7&co82&9bkP3K^-d=OAT04QtHsbIE^epELiXqhZdtEd$5FXULMxaC^Z5Qzx$xo9|&lVQRXGthA#J?dKBT16{DGALTfP1b)Fa)pa zFe_up%5~rmO;^wqlB*n!_PnBHk7m`-Mm)|Udr9sA;SrQzSw^$;@pt4*Bhi<6)>o0b zm9dSRxuo$`BbEC~W-9dp^Q||TV;FETn8urim3zb9H?B{ncBGALzcxyJ#YQPeQ&}ow z8E>HO*9<#z(>Q|MuQ4aZ`h2r{pDxE!#&}{MN=$zIbKsl*&*DKR?jjzPW%}`*gLu%l z9Sgd5T`cH*G8XiHNGimFDu!+Hun&&-slE*_N`DQ0B&-2JKO74x?|2Ctp&l&#eqgW? zY;mEc#LG8#Ea;tA;ZCuj14#}PM8<_W|My}+5BDbl<%g!>QJ+5KOe5rwfBZ)2yvM}3s&J=YiozEA(VGLI zq~zIS(?D-& z_9HskaQ16o-+Ephhvn@<;e!h>tOSltdc$4>3gyP&__(C$-v#@puWkqIeRC z<>@=K%I#zPMo^P*l!>pufVL#a^p-JxuUhy;&uyaoRuSU|TRhR23hc$yYMSwX*9>tG zF>i@RCdjyUFw%>4OZH;jtPXyegNQg4^L8pg9NA@^Ad)y0V%}D)#VxB-h=)r9{Y>&{aVwrO?g(Pu5Wlv%t&<`O@Btv9v6#1ZEavTb z8S|EPN7u)^aRgYATy1-Ohl1T!cfui(Q891gM*x29a>t}Q1Smr^2mjM6)+%D$WDd)W z%Xl};y1(V1K8p@6ndQlXVd{B^Dy)`0F5C09^ zMnHw>s$cZI3&mzdd=G2AkA!DNyh*Qy(Yc zv=osO;2Qk~8RZj5EGyJ)?ycI5 zL=H)g=2Lk597pqGNZiH6syyxD$5A@A3ha}8KZEb>9PNDvyV)@Z4tD8$U*}|phtIWu zgy@j(rfW+_ds6%aTAk&O%K^GEKzW2C{z<&-d(+{qkl>*BDZaE62Xc_gF)kTx-9G#@ za{mnN_~S_A;1k;c7NTbiFxIW*2RC7go>yG_3@XPKf>#+ALbAg@L*h=}Yw^!fK<&oQ zGWBzV$7qLm+kA=h{WKEGPi!)-xTE!UH;DLocJd1(|AIu`d%w)ouaNv2$-1TbEZ%*Q zL--9OHf7oP5MsVw8}+$3Wqkb;AoTSzRG@3(re_$2K2O&_LNHdPe|O;TO-R=t0s~_H zUNe6C^C+^5Gi?4z5T!oII1hRm#{^4lc;iewZ7$h(xfQrJjIJV6$C7xVu$Nqn>t+Ws=gt2*$lLZRzaQFSY&VG8s20Ju`KjLkM+FJ1?2U;&>1&DpEA7AaQAh_;DyG| zfTS0CTza9$=|^77!rM$Q^eAIudj99J&NzNlr2Nl0mRcPf@;`UwoAH7FIo(sX{~eZM z=K3vs=EUzKE&e@7&&jOTb3fk>7B%v-<|o8|Vpd4WhIZ`;3BiX{wN9y7~ zGv|*$hMmX*GdXf1OWonWu<##)pfAZ}(N^EbtoVMf^BarLv&c&%^62~tQei?K%4ibn zx${p^D2c-=#lm_wG2(jZG=}=H(On$5KH~1;O}P4Q-Nh}8YlFM^@Ljo!ry_SToW+T2 z`1t~y#pzYM=OQ+ST*P+FMQomyiYw&S7r2TaB;hpx_TYGwo&V=d$xVMZQ~!;Gn|BAd zuei=a+z;^=B!5ZrS0wR%)^E-iQRGo>&ORjKo?QfIuk-0{vhYJbUFogdU~N!Sp|B8N z0>Kje5Byo)E?cD+ln#=}2`c_4OKv5(L9$NfZNt0wVc_C_A+daLlc5bcKE+?NwJ($W zZ<4Qo^j)2wK|1d0d_a7ag>I63jpPlI2T9%}q1apJ#T2*ru1g}f^c_q+OhN@(D(V6u zE0DA#-^F*IB6*F3s?jFyOJ1{)`%)loU6Za$VKT*a5f)R?f<4Z6dr1<=IE8QIqCBvv zi}KM;T$EGMIgPt0?O_+?BO6?lJK>__c=89t8pnpsgfo_|iCXns??)L3*W)iI$T-T2 zD<_}iBegw|MEx*yUIUj)OgW5?5 zukm_vA-(8dH1( z*zKUZ^ht*HI+m5=REm&O?N*+(;&#*4m0Nk*$(7KvT(rV&QUp;BdWFEO`osG-*B{*Z z`2I%^sC7Fm!Utgrn$K9DkD2#ECGKV|Aw?Dm`3=)k33v$7oQp(7#O?S~8#L74Q4yBU zxaHfQz*fGq1uOlP`K~C5GIy?9HMdscOB{OR6;b6?QR6jHZ&kmvT8!8E)YEByiZ?_9 zz0)n1PxG0lu0O*sn-!RizF9u^wB*m$T>c857jt5M$>Iz6U)Vl}8c{3rb9@nV7R7nK z%rA%w;^LCkviU`R>1pYZ`Io-5T+3gA=3mt_>-RprKN2IVXx5ZgvFLu<=32hFy|!^ee8WRqM8&E{GW54L zWTPRI*dNMj#b6V|8>lFkLf3Y~xCn|k6}l{i z!j`&9lL8(~J@>?%D|7~ZuJ6bhB2$jDPPb5c0S?raS-^o?%HWpL_06z|wgN*56lpwi z2e3qcWxT}86kf$q6jqNFtgu`5p{;5kT3@h!&!H{nd#ue4SdZPd9?c(=&|fMTW-Xgp zP7e^m*^aH=r1s-;g?ngimwK$ldUns@_SbgL{TurO1H3q)1QP`0Mx!H486X9D?<87K zPhU)Lr68EU>Jj>XX6lb{Lg-wZ2_A{>nSOQE6Hn+An%0Doh#nN%O*mFXNtcstr2vgW zq{>@KL<=W31)^Q`q&X~uMj<{gnj?=UI`o+BX}SRe^20t@0k*Pk+S!9g$#8FS-C1*a zE1P+qK$>4}3FD7+B8<`Srx2`g*3=m}`dBxLRvVKZe+SSsb_R*tkyCgpz!7Zt9=VA} zZZf&}-P0`76tidopY9u9HO7QiW#I`9RpfoFFq#{{_8I0+0LM#2)&{(93d9xR4SgL) zInO3}n#2^6XLN#Iq(K@rtN{Xf91%g!9JjNo1jl2OBBTZFT*g98GVoA#Pbrkn=nJ=w zRVNAIfbh-*rIGx8068N!odMlJx9Ly*qp z-lXh(gP)MOZ%)KmDnTwlv3ZCBgZylGW=5hsbAWJ7TVBSWe1+;|U5{0qAs{26(UM+fNvKp1d}IA*<(ka% z;#!&xgZy|k?8NF?mOPQGojv&mafMecqgYt3vofo)8MeU6w$1GCY?SVE+bw^)fALgy zGGtLZQL7! z{%Ak8A6Sy~1*tx#qcYqteMqAMq7p(nvPwSFm ze$a_KB0<8mdOgWuesu$t;c63ln{_=~2wVUt=HLi&xJt!@Uje~}%O!2%Pbt7jA!L9Y zL)Ve%WeR64Rody~9Z8oxBY?byTC*hIq>dk;(DP=8A+Z3)^{0uA6ZZ7<#EjD-&jb^~ z+y_l{2}PYPvW8v9TV^)?7vWy+cdoPl>PwXo(HR;(!}K9Shq{5{x47MK`>A!{AU$w; z@UpgTyudoL3#aGykTCS@V;kv%Q@BVO?DuhF{lJ1x!OTr(eZm1KQ>oxJ4)8T9Ac_r^ z$gwCdF^-G6jT)|%a93Qhx|V#cVBGD`{4<>OFU}$R0$GR|yE+(yla9s;yNB^M%54Xi zVuxI~XOHSH9dUU1D{}Hm-^q<VDzuF2r zNc|9K236N28Lq6-l=`P((iQJZnM>_b_yQ);0+4tMp>WngwT zRwB%5um(`23X{`o17j}ZhyQQ)-#%r009Hrlkx*)M3Pu#+sK#@DOq|XJZGr3bRyW%* z2SPh(*pZZ3Hp_i~HlRtkD+!9ES=BHL1UF00FhANp#0o{5*}tjUGqVf!BHLg3zs%|q z#c`#lCn8Zq4mEs;7NHBVqBc?V90~t?0F(i0F(kQf9h7>dZR?;+3GtEBD<88X_n4fo zf_7=xKB)G}N9AMdsDih8Z1t+W3TIzC2eqCHe5?^-KCT)ZwAY)Co+8%z2|p5a)4;Vx zB!jw1O`3Ii4=m}DiR=hWxr~62Y52KzDMi&~Tv4c1=AsB`DC%@|$>f?C>vTlMLBpWP z2OuQxQbD%Yl|lH`Q-oAO|E3^6q;c9({+uS!?F0GdX?slJ(@8P#hh5B5MA3C!=Gs+w z(E=jX48g#_WtG51{y72*A<{X05O|5@`V3nC1Vvr^!}0O_E$jqrnCPK3L3a= zC!_1&*zHrj^a3&1&m=MU&_HPga;BSfbLLTqYu|7J&MWSalq8ui6ZEBEobn435aR1I z+}j_md7;ThDE*ySk1WMJ+9jGQHWr( zK@euh?o1AjL`K)|nw#cB1aa-sRRu9SE96bAFo}z#FJ982VvUNQQ9;&}(PT8`;v#w+qe1S$oNSj!^eq)wa=Je{P?DjTs8f?`fC5E}B5C%g9mi(W)~GX2W8$n$y)|PlvX+dfle9Ia oMoOEE8g=!0-zjd;=`)#UgDtZv&NM}eIiv{m-(auXb8ON1AM2h($ literal 0 HcmV?d00001 diff --git a/mmpretrain/apis/__pycache__/visual_grounding.cpython-310.pyc b/mmpretrain/apis/__pycache__/visual_grounding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..221b7f1e9f9d4b593a812981955c5ac4f1393505 GIT binary patch literal 6827 zcma)ATXPi06`q-$y^VHPD}eyvOUC#jtqlt{w#!xsF$QDfVBrFcD~qYoXu5YbVlGS1 zEJDg|Rcw*+lT(#9S0$;4O8nre{E9r}AwOYW^TZEGC9zG&cY5|J0p;16-k!dkK7B6V zIo)BoY$$lXe(?9^Kb}&Qf6~YJqvPWpywTSvn8H*~@zsW^QeE>jKhwzgdPDcKjjW$* z*S(@|Gz{66^``t%qa^D&uk2SE6@R)hEx+?#)t_n1$a=w>_4hUQpc)kq8r6Y;Zi$x!@%~E>?Z*#%NUz;eZg%;wXoLSN_X*uG->ZI9nq)5RDh!pK502 z6_%IZQ%w!^0xz*5FEfKzn#Pw(Q)N@Ebhq4?hM=lwooVWU>0Ve zeTL26%{KO9m3_ti#kt~v;(oTs4)(_!!v7q`&SQncSm7ytfS2+8H0sYt4h|9r%?f*# z{RDIl6?Il)gIOkKgJX;*&T{NIb_8SQ`C;}ldmfYun5nTB*o$|w+iK&fFBM&BJPnn; zlFVOoqmJ#pFTzg1+@Lj*to<3x^6DBl-J!6}W+=?ta)Q|!%%~fNiRX-HMN6_qUs4<9 zBI)WQQjFO(qgbHq8^Mx!ravaIeQtuRZMI=mW*nL^)^wz;49OA6rt9&e>ByyUb$w}N zNPNY_H+SN?N$jReOR|m~n5!IIV{a3*RyWO+mB;S9K4wshwUrgq4w$(ac1)sYdr>$+ z>sE)0P1B7bCv%B~&eFgKuBdm~z$xEpPe@!PJ$ z_ipc8+K%y}6O0UNY=5MD`?l>$T$$KqpC;{|lxlb4(1&y#V9=)7i8y-_LodI4bHgSJ zH7{WWVcz7M*d@dzS{t)`UnV7$R1mAfD7M-n%2wYvdWHPoGZp z;rjzaFkQf*h|R|ug}MlaMWBtb)k0s#Es#kUoC)fX05J%-SeVMBoTA$7LSr_%IaD=~qbDUf@^5OB!X2zKMxELT5m@+$P(c$1&tA2di>qPify+5k@C$&z$+Gfm(0N=bC{g`>g1kN z6O@xCVEeK(O{pYUCtR7c7NS}Tp{cmB6>Mn_^FtgqJsz|G{;M28miI;HZUN!KV3Mav zsHOcdVQ*NGL-QUXLF$#Pk=wcs5j5rCy`luiV}k$$ycG(UN0SU(6`frIC6|2U(r(oD zV15LZ180>x>g$tYroBfd8wh0K#^joh(Bcp>%*Cb`+5_umgEavrFwb3a3gEP{=0ev# zw}ZU`I6}g|&Bc*Py$>K~Z$v|24#Kz}oFn^o8agKSx@Ug_TgK)|Fe4KHs3DCB#pdMR z_bPk@ivt6`3PRM1>;PanFg=3ONm?f-H#WlBfhWfuB-?$UkRUukYtLTBbAW}G;5=9u z1EcCpf7U6T^+%0O=HR{zNAVKzHIa$smPF zvce8psKbYnoZW8ofF;?e)8-;6SQe!SmIWeu#J#4N2b_z;RNyS8h^J8`xio+vbOh}V zVutGWs}>FzMUe39D6*_?mG53ZF2XQAK8oJsRlDs*$5Z87qXfCVDHgHFD|n;*C@N+} zt*E+IRcF>x7UQ-7!3(yl1t1iEUPpn#fW zS39o9pkl3lB2i=6mlm&5w{eOT?BI}i+QN54OB_(PPCY?Fl)71Mx6R21>ZiOAI*(4H zG9XmTCDSp-Igzpepap|zgK3CFmJV)gNjaY7cd7Yw^eKSCC~nWCw$XGAmMwgEu;d z;&Y@OF`WTn2X|o;u?m||*0q)#&?W1|-Hc+w1W6ga`$ih#NU^x0DUiuq6X8!p(G$I8X(kXySTM$1bex|J!$MwZpC%7psE6MnFThMJ! zEtlE~=|*CVc$aE&R!L4k4oxPuhxZW^tC$fHG40nCt&*G7_N!Yj{21<~H!t5OlSr}> zl@kMj9JVLXElWW@5@dH`9~I=cVvY*JCx>i(@X!TH39p10Ts;9yqC&SI6^rr;RJjiG zfSM0V$WeW*R|Mn~x6xw(4SGgQ#)TYt8^(rw_?Mq)y(zqZ*G6wO(3sMz63^?UZlzo9W-04N?w#*WcdM-U74dHDlxf6H<(?ulbhDcgpUal% zdvJ!X0h2X`khu?+T}T-s+e=l0BvMGLtlxi#=PK^83j7X$$O><6>2$2BO`YGT}$k%7o2hsN+}&@2-waS=L7iafw^Rv>RnGFxt2`iwBqBc7*% zfD}P6=6;g(Z_*KJYQv3j4C*wS?w!Wr0Yz@5wNVTcFVWN@8N2kVBtUZ#Mpv{AHMdZHHTQc`(sPMFjKf%jr%C}C-g_;y}GVr`vvs?%B>e4zs<3^Ngfw+ zF~`Ap9Pg=4o0pp<$|>G6?JETNT4w8S7h&kQB=bhMIAfPa(O7P{_u$%JZ;**n+KWW= z3|)9J$Hr~VU;x4cu6`(qmv@WgG2@gUnROCh*O4pUSTb#yf>K^PF`sf%dbX;me9Ymc zC{$u&t4K!e=%$!PwWi9JU9uNdbo~{tOj%0ClO#=zr_dm z5*JY5CjER(U9LSt=`3y6vJ%6h=^c;iCCj>nTikw6-m-8HX<6bBrXoQ`hBIwhc7TJ5 zbU7rlf{a#zLZSujBp)z&4IrMQ89JT9k$wlUAT`V6RZs{O87h8G1=)ajp9(SpLCJ zduV9d|Fn$uK-0A!v~1=3*`tT1wCc#C_5b8B3SK@OmCI`1!|lH_^2UF)lllYdFUWr5 zfmVK2mFv~!rLnJh?rMsWxp=dU%kXr+a;tKKB(x+8h($*4qTVok-??2+kxJ$#(>`t! zTa<&QH`r2aNp;xT&neRkf+G7w#wmCURv|XiEO~AW>(SbAa7)D_DQ~CQY9CcDjwiR@W79lSDx|{NC@UTJv)1`LY$0hTRlD9r%#{r^>o`?H?pd_EVU+qi?~ue^fgLH+hBWmzBCvH|rJl46E`gukre}#pk;E zlEP~8eXgsaUgr&F@g|$&Ek4g{OA6W=e4*QTU+Jo>$y)cConvgCFQWB$*I*0m*gd0j zf_;N6qW=PVV>H^2vlI8s&PmL9vR=Wg&$3ga@2Bzq6#AXPOiP&QEM{shDP4;_#vaF* z({+P=la;-mKyPR|(xSqiWKW^b8NS4pnT?Uqu&c4B*|~e>UA1%eeZ^2ZkHMKM`O~-j zWZ(v`4|qaeyBZ~XT=-FMVq5nw0Op%p-1f)jw!5*gcjZ9)&S<3VmP1!9%f@q6^pZ7M zqc`aodxhNgD*4FH>?9Q^M|fDXua5deZpdxOckMm^W~Z^8VscNyW-N|GcKv{#FM6)U zo7-y}W8oV%-no~q+N5_ewG`{Qk-f>G{vcvAvNreajg3c|Vs#2*EA5R9teM&S@xX@M z8F6lq#AF$XRoo?TiduORXK=bCrRBDc=f$xV|6^TFL)|kKjO?@fpM_5(N1cw z*|gclw%Fp{PCxb|Ebx`CeIt%w2h6kQ2f2uHT`h3;>A6?$Hq8rD%apge(Jk{f54}VHDi()g-F#eF;B5QUVK*)+GK}Z zxpJlOM;MksuzfJ3NbN@gb38o>ncAm6c=XVf<+$r!*u=hczAISY73uzk11{oZD^8u= zJMMD({|H}MW;~29dTx4w#l6*j)LTAppL=V|O_yOb@>b-BZeU0J-Lyn5I{xrn`|V4& z$1;DpToTCg+E*HV-d=`zm{ZUV1}#H_<)IXKg+gdvgX((@^F?k@EwAsne(I!t$n%Qe z3A~su6y$}fQ@DR!P>8refy(XiB(IlRCHW$DIZLl8=&&yw8QbW?B3k+P}<*CZsI zI(n6<$U_mIl$ygY`8$eK$<$O$wXSwoOIGlvr8;V@A1l96Gd0tg`jOUCht330YKHP# zi99bB@O?jy7(;X`#LY@O>SlXoGmZnO{MHti!WC#V5N5kK9*}^8w+<7SZ&4=PxvxCNxvQy2EX!M z831WV%`Rg$CXfk=eJPheqx>6kze?<3FM|p>>mfTRLQsJsI0eo*qQE$g*#VDw2%ei9 zSqmSfE&c%_L>xUzE#i+aLILxWegIGt&6QLl1&)A0A>Tcs(jMY)yf-`N7rlDNHI*|GFlbAUIHOEI%gr}g3QiNcANKNE zx$AETIwy$7P_*m05D{jBfBbJ-VpwC5K za~#Sd9H(s|mhhk}PLtR(R4h?(mI|6BuM}|v0VL`V(j-^?QPsiOqz*ZOn8jiJlZ%CwopLdwPzTyt?eB|a%+|&)u~D?1YN-virY@q?-ZQiX)M}_V z)CEmbD-RByyY;B_ezja@N27*YXa0gCu3j!&_E_c8rQ36PY`LXM<%U&UhSVu0p zj+EXb*Od>cLu%Z_FFA#xr)0|Q>QGIUL&VGb>hHBX+D%1V#HY3*}6;+@NoFN;^IA_DNZQ|*S>nrEBSni zlax#ruy+|iRF#PX$HBN9nIE4xe|7XV5#NCxqK=|%iWRDV8%2IhiYrGtaFOhoH)q-( zUSrFdQT6ALlq{mKfDA*|RO5lBq4vPg4laG&b&Ka9$Rk{o4<$V0BoDvj8j4>d?MUgw z30&L*Mp6|RQMR?7CNZHL>M4+clts(5``XZy35FdQhI zl5qGQDoBqo*Amb+1RR($P{d*iY-Y+I6sCWy-&KF3BArqGtTKG})jv~G1IbTd?7_)( z948R%WLS6EC@aa;RUoN~4!L#94F>#0A!30h(nbz^*xD9(+L6(yi#|yW1?|cO{2l!0 z(e_0?+ZVCNlVpOaiAZ3|V7`}1Y=|||Yu@Q`T$gMOq8*8}To2v8pj@+EDX>F6D%j~N z)#Rv#9DvlBAa`U96D1f%BqU6Gucfsri`ohG;QZHFg(TnHLc#$UN;WhpWlzL$(&Ypd zCsBB0+K@b78KJ!+^8tKc3au9@P17KXKyP=yrw zE>MO<_^o%4_~K^`=a3#-lrjS)jkHqKk@9NdTGjw#K`t|YtJBR6zSqjW+x1~HGSX^R z$GE1{MHL(PUcq}Mt881W!m1yrs)9b1tofL7ILB)DiT7HO#*|@;blz@cjcg8yH&W}Y znYCE`1Jb*8*rczAt^0~h!R<^JzmhHU_pt-A8gOcjdGk1K(h$89Hz@po#85~mu72_W z&zqpu8h;loFcs-aq7#+qtqXTg;%brCVOrr{57C(v3!T%G=_{Cly6F{+^+ zeCyUDW9@iG@fK$Dh|0=`NR&3jB-rW#aK1^<#(w~MVQ6N?A%WG1aN*I`n39xNaoLx0oSvuYp6X;HX_E#j1>H041H}Bc ze(;a$fIQAN!Q^IW>LM!n(lhF4zVhfuxhM`^SLqUsc`j~%$_|JQxK5(XTHagIE}EA7 zg;^`<^)?;F)@)a%uykaZ8Be(_JqJsLo$S}kd(^AtJt{oqV9mLKdxsrC1ld+)%R?>^ z3;6Um{A4QAu)d1vDPtL3mbdlMk>NvRLbR0oh8Zv7nJo66lhf{>yrAU4SVO$bMu0{R48ZmkD7dDMB=&X%2 zK1v1U5`z3Lo~L4&3W}*hr{W3~>r@aTM2CuBpvW8YQq>D_R*#u9O5PkL-X5nce#&ka zXcV!4IEI1@n7o6+QVezBlxETnH$H2r+5=0|{;TQQ=bEAY$1=6gv`XvK#Y<=Aw1t8N zj87}*1y&gMs+iVi+C}4YYOl$5>vL`4Nmb6&UXn=P3jEE2G%M+TADMlzG`VKEda_h* zf^(7a_fYRxVd(9y7K}zTk# literal 0 HcmV?d00001 diff --git a/mmpretrain/apis/base.py b/mmpretrain/apis/base.py new file mode 100644 index 0000000..7bff6bd --- /dev/null +++ b/mmpretrain/apis/base.py @@ -0,0 +1,390 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import abstractmethod +from math import ceil +from typing import Callable, Iterable, List, Optional, Tuple, Union + +import numpy as np +import torch +from mmengine.config import Config +from mmengine.dataset import default_collate +from mmengine.fileio import get_file_backend +from mmengine.model import BaseModel +from mmengine.runner import load_checkpoint + +from mmpretrain.structures import DataSample +from mmpretrain.utils import track +from .model import get_model, list_models + +ModelType = Union[BaseModel, str, Config] +InputType = Union[str, np.ndarray, list] + + +class BaseInferencer: + """Base inferencer for various tasks. + + The BaseInferencer provides the standard workflow for inference as follows: + + 1. Preprocess the input data by :meth:`preprocess`. + 2. Forward the data to the model by :meth:`forward`. ``BaseInferencer`` + assumes the model inherits from :class:`mmengine.models.BaseModel` and + will call `model.test_step` in :meth:`forward` by default. + 3. Visualize the results by :meth:`visualize`. + 4. Postprocess and return the results by :meth:`postprocess`. + + When we call the subclasses inherited from BaseInferencer (not overriding + ``__call__``), the workflow will be executed in order. + + All subclasses of BaseInferencer could define the following class + attributes for customization: + + - ``preprocess_kwargs``: The keys of the kwargs that will be passed to + :meth:`preprocess`. + - ``forward_kwargs``: The keys of the kwargs that will be passed to + :meth:`forward` + - ``visualize_kwargs``: The keys of the kwargs that will be passed to + :meth:`visualize` + - ``postprocess_kwargs``: The keys of the kwargs that will be passed to + :meth:`postprocess` + + All attributes mentioned above should be a ``set`` of keys (strings), + and each key should not be duplicated. Actually, :meth:`__call__` will + dispatch all the arguments to the corresponding methods according to the + ``xxx_kwargs`` mentioned above. + + Subclasses inherited from ``BaseInferencer`` should implement + :meth:`_init_pipeline`, :meth:`visualize` and :meth:`postprocess`: + + - _init_pipeline: Return a callable object to preprocess the input data. + - visualize: Visualize the results returned by :meth:`forward`. + - postprocess: Postprocess the results returned by :meth:`forward` and + :meth:`visualize`. + + Args: + model (BaseModel | str | Config): A model name or a path to the config + file, or a :obj:`BaseModel` object. The model name can be found + by ``cls.list_models()`` and you can also query it in + :doc:`/modelzoo_statistics`. + pretrained (str, optional): Path to the checkpoint. If None, it will + try to find a pre-defined weight from the model you specified + (only work if the ``model`` is a model name). Defaults to None. + device (str | torch.device | None): Transfer the model to the target + device. Defaults to None. + device_map (str | dict | None): A map that specifies where each + submodule should go. It doesn't need to be refined to each + parameter/buffer name, once a given module name is inside, every + submodule of it will be sent to the same device. You can use + `device_map="auto"` to automatically generate the device map. + Defaults to None. + offload_folder (str | None): If the `device_map` contains any value + `"disk"`, the folder where we will offload weights. + **kwargs: Other keyword arguments to initialize the model (only work if + the ``model`` is a model name). + """ + + preprocess_kwargs: set = set() + forward_kwargs: set = set() + visualize_kwargs: set = set() + postprocess_kwargs: set = set() + + def __init__(self, + model: ModelType, + pretrained: Union[bool, str] = True, + device: Union[str, torch.device, None] = None, + device_map=None, + offload_folder=None, + **kwargs) -> None: + + if isinstance(model, BaseModel): + if isinstance(pretrained, str): + load_checkpoint(model, pretrained, map_location='cpu') + if device_map is not None: + from .utils import dispatch_model + model = dispatch_model( + model, + device_map=device_map, + offload_folder=offload_folder) + elif device is not None: + model.to(device) + else: + model = get_model( + model, + pretrained, + device=device, + device_map=device_map, + offload_folder=offload_folder, + **kwargs) + + model.eval() + + self.config = model._config + self.model = model + self.pipeline = self._init_pipeline(self.config) + self.visualizer = None + + def __call__( + self, + inputs, + return_datasamples: bool = False, + batch_size: int = 1, + **kwargs, + ) -> dict: + """Call the inferencer. + + Args: + inputs (InputsType): Inputs for the inferencer. + return_datasamples (bool): Whether to return results as + :obj:`BaseDataElement`. Defaults to False. + batch_size (int): Batch size. Defaults to 1. + **kwargs: Key words arguments passed to :meth:`preprocess`, + :meth:`forward`, :meth:`visualize` and :meth:`postprocess`. + Each key in kwargs should be in the corresponding set of + ``preprocess_kwargs``, ``forward_kwargs``, ``visualize_kwargs`` + and ``postprocess_kwargs``. + + Returns: + dict: Inference and visualization results. + """ + ( + preprocess_kwargs, + forward_kwargs, + visualize_kwargs, + postprocess_kwargs, + ) = self._dispatch_kwargs(**kwargs) + + ori_inputs = self._inputs_to_list(inputs) + inputs = self.preprocess( + ori_inputs, batch_size=batch_size, **preprocess_kwargs) + preds = [] + for data in track( + inputs, 'Inference', total=ceil(len(ori_inputs) / batch_size)): + preds.extend(self.forward(data, **forward_kwargs)) + visualization = self.visualize(ori_inputs, preds, **visualize_kwargs) + results = self.postprocess(preds, visualization, return_datasamples, + **postprocess_kwargs) + return results + + def _inputs_to_list(self, inputs: InputType) -> list: + """Preprocess the inputs to a list. + + Cast the input data to a list of data. + + - list or tuple: return inputs + - str: + - Directory path: return all files in the directory + - other cases: return a list containing the string. The string + could be a path to file, a url or other types of string according + to the task. + - other: return a list with one item. + + Args: + inputs (str | array | list): Inputs for the inferencer. + + Returns: + list: List of input for the :meth:`preprocess`. + """ + if isinstance(inputs, str): + backend = get_file_backend(inputs) + if hasattr(backend, 'isdir') and backend.isdir(inputs): + # Backends like HttpsBackend do not implement `isdir`, so only + # those backends that implement `isdir` could accept the inputs + # as a directory + file_list = backend.list_dir_or_file(inputs, list_dir=False) + inputs = [ + backend.join_path(inputs, file) for file in file_list + ] + + if not isinstance(inputs, (list, tuple)): + inputs = [inputs] + + return list(inputs) + + def preprocess(self, inputs: InputType, batch_size: int = 1, **kwargs): + """Process the inputs into a model-feedable format. + + Customize your preprocess by overriding this method. Preprocess should + return an iterable object, of which each item will be used as the + input of ``model.test_step``. + + ``BaseInferencer.preprocess`` will return an iterable chunked data, + which will be used in __call__ like this: + + .. code-block:: python + + def __call__(self, inputs, batch_size=1, **kwargs): + chunked_data = self.preprocess(inputs, batch_size, **kwargs) + for batch in chunked_data: + preds = self.forward(batch, **kwargs) + + Args: + inputs (InputsType): Inputs given by user. + batch_size (int): batch size. Defaults to 1. + + Yields: + Any: Data processed by the ``pipeline`` and ``default_collate``. + """ + chunked_data = self._get_chunk_data( + map(self.pipeline, inputs), batch_size) + yield from map(default_collate, chunked_data) + + @torch.no_grad() + def forward(self, inputs: Union[dict, tuple], **kwargs): + """Feed the inputs to the model.""" + return self.model.test_step(inputs) + + def visualize(self, + inputs: list, + preds: List[DataSample], + show: bool = False, + **kwargs) -> List[np.ndarray]: + """Visualize predictions. + + Customize your visualization by overriding this method. visualize + should return visualization results, which could be np.ndarray or any + other objects. + + Args: + inputs (list): Inputs preprocessed by :meth:`_inputs_to_list`. + preds (Any): Predictions of the model. + show (bool): Whether to display the image in a popup window. + Defaults to False. + + Returns: + List[np.ndarray]: Visualization results. + """ + if show: + raise NotImplementedError( + f'The `visualize` method of {self.__class__.__name__} ' + 'is not implemented.') + + @abstractmethod + def postprocess( + self, + preds: List[DataSample], + visualization: List[np.ndarray], + return_datasample=False, + **kwargs, + ) -> dict: + """Process the predictions and visualization results from ``forward`` + and ``visualize``. + + This method should be responsible for the following tasks: + + 1. Convert datasamples into a json-serializable dict if needed. + 2. Pack the predictions and visualization results and return them. + 3. Dump or log the predictions. + + Customize your postprocess by overriding this method. Make sure + ``postprocess`` will return a dict with visualization results and + inference results. + + Args: + preds (List[Dict]): Predictions of the model. + visualization (np.ndarray): Visualized predictions. + return_datasample (bool): Whether to return results as datasamples. + Defaults to False. + + Returns: + dict: Inference and visualization results with key ``predictions`` + and ``visualization`` + + - ``visualization (Any)``: Returned by :meth:`visualize` + - ``predictions`` (dict or DataSample): Returned by + :meth:`forward` and processed in :meth:`postprocess`. + If ``return_datasample=False``, it usually should be a + json-serializable dict containing only basic data elements such + as strings and numbers. + """ + + @abstractmethod + def _init_pipeline(self, cfg: Config) -> Callable: + """Initialize the test pipeline. + + Return a pipeline to handle various input data, such as ``str``, + ``np.ndarray``. It is an abstract method in BaseInferencer, and should + be implemented in subclasses. + + The returned pipeline will be used to process a single data. + It will be used in :meth:`preprocess` like this: + + .. code-block:: python + def preprocess(self, inputs, batch_size, **kwargs): + ... + dataset = map(self.pipeline, dataset) + ... + """ + + def _get_chunk_data(self, inputs: Iterable, chunk_size: int): + """Get batch data from dataset. + + Args: + inputs (Iterable): An iterable dataset. + chunk_size (int): Equivalent to batch size. + + Yields: + list: batch data. + """ + inputs_iter = iter(inputs) + while True: + try: + chunk_data = [] + for _ in range(chunk_size): + processed_data = next(inputs_iter) + chunk_data.append(processed_data) + yield chunk_data + except StopIteration: + if chunk_data: + yield chunk_data + break + + def _dispatch_kwargs(self, **kwargs) -> Tuple[dict, dict, dict, dict]: + """Dispatch kwargs to preprocess(), forward(), visualize() and + postprocess() according to the actual demands. + + Returns: + Tuple[Dict, Dict, Dict, Dict]: kwargs passed to preprocess, + forward, visualize and postprocess respectively. + """ + # Ensure each argument only matches one function + method_kwargs = self.preprocess_kwargs | self.forward_kwargs | \ + self.visualize_kwargs | self.postprocess_kwargs + + union_kwargs = method_kwargs | set(kwargs.keys()) + if union_kwargs != method_kwargs: + unknown_kwargs = union_kwargs - method_kwargs + raise ValueError( + f'unknown argument {unknown_kwargs} for `preprocess`, ' + '`forward`, `visualize` and `postprocess`') + + preprocess_kwargs = {} + forward_kwargs = {} + visualize_kwargs = {} + postprocess_kwargs = {} + + for key, value in kwargs.items(): + if key in self.preprocess_kwargs: + preprocess_kwargs[key] = value + if key in self.forward_kwargs: + forward_kwargs[key] = value + if key in self.visualize_kwargs: + visualize_kwargs[key] = value + if key in self.postprocess_kwargs: + postprocess_kwargs[key] = value + + return ( + preprocess_kwargs, + forward_kwargs, + visualize_kwargs, + postprocess_kwargs, + ) + + @staticmethod + def list_models(pattern: Optional[str] = None): + """List models defined in metafile of corresponding packages. + + Args: + pattern (str | None): A wildcard pattern to match model names. + + Returns: + List[str]: a list of model names. + """ + return list_models(pattern=pattern) diff --git a/mmpretrain/apis/feature_extractor.py b/mmpretrain/apis/feature_extractor.py new file mode 100644 index 0000000..ee14f92 --- /dev/null +++ b/mmpretrain/apis/feature_extractor.py @@ -0,0 +1,130 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Callable, List, Optional, Union + +import torch +from mmcv.image import imread +from mmengine.config import Config +from mmengine.dataset import Compose, default_collate + +from mmpretrain.registry import TRANSFORMS +from .base import BaseInferencer, InputType +from .model import list_models + + +class FeatureExtractor(BaseInferencer): + """The inferencer for extract features. + + Args: + model (BaseModel | str | Config): A model name or a path to the config + file, or a :obj:`BaseModel` object. The model name can be found + by ``FeatureExtractor.list_models()`` and you can also query it in + :doc:`/modelzoo_statistics`. + pretrained (str, optional): Path to the checkpoint. If None, it will + try to find a pre-defined weight from the model you specified + (only work if the ``model`` is a model name). Defaults to None. + device (str, optional): Device to run inference. If None, the available + device will be automatically used. Defaults to None. + **kwargs: Other keyword arguments to initialize the model (only work if + the ``model`` is a model name). + + Example: + >>> from mmpretrain import FeatureExtractor + >>> inferencer = FeatureExtractor('resnet50_8xb32_in1k', backbone=dict(out_indices=(0, 1, 2, 3))) + >>> feats = inferencer('demo/demo.JPEG', stage='backbone')[0] + >>> for feat in feats: + >>> print(feat.shape) + torch.Size([256, 56, 56]) + torch.Size([512, 28, 28]) + torch.Size([1024, 14, 14]) + torch.Size([2048, 7, 7]) + """ # noqa: E501 + + def __call__(self, + inputs: InputType, + batch_size: int = 1, + **kwargs) -> dict: + """Call the inferencer. + + Args: + inputs (str | array | list): The image path or array, or a list of + images. + batch_size (int): Batch size. Defaults to 1. + **kwargs: Other keyword arguments accepted by the `extract_feat` + method of the model. + + Returns: + tensor | Tuple[tensor]: The extracted features. + """ + ori_inputs = self._inputs_to_list(inputs) + inputs = self.preprocess(ori_inputs, batch_size=batch_size) + preds = [] + for data in inputs: + preds.extend(self.forward(data, **kwargs)) + + return preds + + @torch.no_grad() + def forward(self, inputs: Union[dict, tuple], **kwargs): + inputs = self.model.data_preprocessor(inputs, False)['inputs'] + outputs = self.model.extract_feat(inputs, **kwargs) + + def scatter(feats, index): + if isinstance(feats, torch.Tensor): + return feats[index] + else: + # Sequence of tensor + return type(feats)([scatter(item, index) for item in feats]) + + results = [] + for i in range(inputs.shape[0]): + results.append(scatter(outputs, i)) + + return results + + def _init_pipeline(self, cfg: Config) -> Callable: + test_pipeline_cfg = cfg.test_dataloader.dataset.pipeline + from mmpretrain.datasets import remove_transform + + # Image loading is finished in `self.preprocess`. + test_pipeline_cfg = remove_transform(test_pipeline_cfg, + 'LoadImageFromFile') + test_pipeline = Compose( + [TRANSFORMS.build(t) for t in test_pipeline_cfg]) + return test_pipeline + + def preprocess(self, inputs: List[InputType], batch_size: int = 1): + + def load_image(input_): + img = imread(input_) + if img is None: + raise ValueError(f'Failed to read image {input_}.') + return dict( + img=img, + img_shape=img.shape[:2], + ori_shape=img.shape[:2], + ) + + pipeline = Compose([load_image, self.pipeline]) + + chunked_data = self._get_chunk_data(map(pipeline, inputs), batch_size) + yield from map(default_collate, chunked_data) + + def visualize(self): + raise NotImplementedError( + "The FeatureExtractor doesn't support visualization.") + + def postprocess(self): + raise NotImplementedError( + "The FeatureExtractor doesn't need postprocessing.") + + @staticmethod + def list_models(pattern: Optional[str] = None): + """List all available model names. + + Args: + pattern (str | None): A wildcard pattern to match model names. + + Returns: + List[str]: a list of model names. + """ + return list_models(pattern=pattern) diff --git a/mmpretrain/apis/image_caption.py b/mmpretrain/apis/image_caption.py new file mode 100644 index 0000000..c11c0d3 --- /dev/null +++ b/mmpretrain/apis/image_caption.py @@ -0,0 +1,166 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from pathlib import Path +from typing import Callable, List, Optional + +import numpy as np +from mmcv.image import imread +from mmengine.config import Config +from mmengine.dataset import Compose, default_collate + +from mmpretrain.registry import TRANSFORMS +from mmpretrain.structures import DataSample +from .base import BaseInferencer, InputType +from .model import list_models + + +class ImageCaptionInferencer(BaseInferencer): + """The inferencer for image caption. + + Args: + model (BaseModel | str | Config): A model name or a path to the config + file, or a :obj:`BaseModel` object. The model name can be found + by ``ImageCaptionInferencer.list_models()`` and you can also + query it in :doc:`/modelzoo_statistics`. + pretrained (str, optional): Path to the checkpoint. If None, it will + try to find a pre-defined weight from the model you specified + (only work if the ``model`` is a model name). Defaults to None. + device (str, optional): Device to run inference. If None, the available + device will be automatically used. Defaults to None. + **kwargs: Other keyword arguments to initialize the model (only work if + the ``model`` is a model name). + + Example: + >>> from mmpretrain import ImageCaptionInferencer + >>> inferencer = ImageCaptionInferencer('blip-base_3rdparty_caption') + >>> inferencer('demo/cat-dog.png')[0] + {'pred_caption': 'a puppy and a cat sitting on a blanket'} + """ # noqa: E501 + + visualize_kwargs: set = {'resize', 'show', 'show_dir', 'wait_time'} + + def __call__(self, + images: InputType, + return_datasamples: bool = False, + batch_size: int = 1, + **kwargs) -> dict: + """Call the inferencer. + + Args: + images (str | array | list): The image path or array, or a list of + images. + return_datasamples (bool): Whether to return results as + :obj:`DataSample`. Defaults to False. + batch_size (int): Batch size. Defaults to 1. + resize (int, optional): Resize the short edge of the image to the + specified length before visualization. Defaults to None. + draw_score (bool): Whether to draw the prediction scores + of prediction categories. Defaults to True. + show (bool): Whether to display the visualization result in a + window. Defaults to False. + wait_time (float): The display time (s). Defaults to 0, which means + "forever". + show_dir (str, optional): If not None, save the visualization + results in the specified directory. Defaults to None. + + Returns: + list: The inference results. + """ + return super().__call__(images, return_datasamples, batch_size, + **kwargs) + + def _init_pipeline(self, cfg: Config) -> Callable: + test_pipeline_cfg = cfg.test_dataloader.dataset.pipeline + from mmpretrain.datasets import remove_transform + + # Image loading is finished in `self.preprocess`. + test_pipeline_cfg = remove_transform(test_pipeline_cfg, + 'LoadImageFromFile') + test_pipeline = Compose( + [TRANSFORMS.build(t) for t in test_pipeline_cfg]) + return test_pipeline + + def preprocess(self, inputs: List[InputType], batch_size: int = 1): + + def load_image(input_): + img = imread(input_) + if img is None: + raise ValueError(f'Failed to read image {input_}.') + return dict( + img=img, + img_shape=img.shape[:2], + ori_shape=img.shape[:2], + ) + + pipeline = Compose([load_image, self.pipeline]) + + chunked_data = self._get_chunk_data(map(pipeline, inputs), batch_size) + yield from map(default_collate, chunked_data) + + def visualize(self, + ori_inputs: List[InputType], + preds: List[DataSample], + show: bool = False, + wait_time: int = 0, + resize: Optional[int] = None, + show_dir=None): + if not show and show_dir is None: + return None + + if self.visualizer is None: + from mmpretrain.visualization import UniversalVisualizer + self.visualizer = UniversalVisualizer() + + visualization = [] + for i, (input_, data_sample) in enumerate(zip(ori_inputs, preds)): + image = imread(input_) + if isinstance(input_, str): + # The image loaded from path is BGR format. + image = image[..., ::-1] + name = Path(input_).stem + else: + name = str(i) + + if show_dir is not None: + show_dir = Path(show_dir) + show_dir.mkdir(exist_ok=True) + out_file = str((show_dir / name).with_suffix('.png')) + else: + out_file = None + + self.visualizer.visualize_image_caption( + image, + data_sample, + resize=resize, + show=show, + wait_time=wait_time, + name=name, + out_file=out_file) + visualization.append(self.visualizer.get_image()) + if show: + self.visualizer.close() + return visualization + + def postprocess(self, + preds: List[DataSample], + visualization: List[np.ndarray], + return_datasamples=False) -> dict: + if return_datasamples: + return preds + + results = [] + for data_sample in preds: + results.append({'pred_caption': data_sample.get('pred_caption')}) + + return results + + @staticmethod + def list_models(pattern: Optional[str] = None): + """List all available model names. + + Args: + pattern (str | None): A wildcard pattern to match model names. + + Returns: + List[str]: a list of model names. + """ + return list_models(pattern=pattern, task='Image Caption') diff --git a/mmpretrain/apis/image_classification.py b/mmpretrain/apis/image_classification.py new file mode 100644 index 0000000..a202180 --- /dev/null +++ b/mmpretrain/apis/image_classification.py @@ -0,0 +1,223 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from pathlib import Path +from typing import Callable, List, Optional, Union + +import numpy as np +import torch +from mmcv.image import imread +from mmengine.config import Config +from mmengine.dataset import Compose, default_collate + +from mmpretrain.registry import TRANSFORMS +from mmpretrain.structures import DataSample +from .base import BaseInferencer, InputType, ModelType +from .model import list_models + + +class ImageClassificationInferencer(BaseInferencer): + """The inferencer for image classification. + + Args: + model (BaseModel | str | Config): A model name or a path to the config + file, or a :obj:`BaseModel` object. The model name can be found + by ``ImageClassificationInferencer.list_models()`` and you can also + query it in :doc:`/modelzoo_statistics`. + pretrained (str, optional): Path to the checkpoint. If None, it will + try to find a pre-defined weight from the model you specified + (only work if the ``model`` is a model name). Defaults to None. + device (str, optional): Device to run inference. If None, the available + device will be automatically used. Defaults to None. + **kwargs: Other keyword arguments to initialize the model (only work if + the ``model`` is a model name). + + Example: + 1. Use a pre-trained model in MMPreTrain to inference an image. + + >>> from mmpretrain import ImageClassificationInferencer + >>> inferencer = ImageClassificationInferencer('resnet50_8xb32_in1k') + >>> inferencer('demo/demo.JPEG') + [{'pred_score': array([...]), + 'pred_label': 65, + 'pred_score': 0.6649367809295654, + 'pred_class': 'sea snake'}] + + 2. Use a config file and checkpoint to inference multiple images on GPU, + and save the visualization results in a folder. + + >>> from mmpretrain import ImageClassificationInferencer + >>> inferencer = ImageClassificationInferencer( + model='configs/resnet/resnet50_8xb32_in1k.py', + pretrained='https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth', + device='cuda') + >>> inferencer(['demo/dog.jpg', 'demo/bird.JPEG'], show_dir="./visualize/") + """ # noqa: E501 + + visualize_kwargs: set = { + 'resize', 'rescale_factor', 'draw_score', 'show', 'show_dir', + 'wait_time' + } + + def __init__(self, + model: ModelType, + pretrained: Union[bool, str] = True, + device: Union[str, torch.device, None] = None, + classes=None, + **kwargs) -> None: + super().__init__( + model=model, pretrained=pretrained, device=device, **kwargs) + + if classes is not None: + self.classes = classes + else: + self.classes = getattr(self.model, '_dataset_meta', + {}).get('classes') + + def __call__(self, + inputs: InputType, + return_datasamples: bool = False, + batch_size: int = 1, + **kwargs) -> dict: + """Call the inferencer. + + Args: + inputs (str | array | list): The image path or array, or a list of + images. + return_datasamples (bool): Whether to return results as + :obj:`DataSample`. Defaults to False. + batch_size (int): Batch size. Defaults to 1. + resize (int, optional): Resize the short edge of the image to the + specified length before visualization. Defaults to None. + rescale_factor (float, optional): Rescale the image by the rescale + factor for visualization. This is helpful when the image is too + large or too small for visualization. Defaults to None. + draw_score (bool): Whether to draw the prediction scores + of prediction categories. Defaults to True. + show (bool): Whether to display the visualization result in a + window. Defaults to False. + wait_time (float): The display time (s). Defaults to 0, which means + "forever". + show_dir (str, optional): If not None, save the visualization + results in the specified directory. Defaults to None. + + Returns: + list: The inference results. + """ + return super().__call__( + inputs, + return_datasamples=return_datasamples, + batch_size=batch_size, + **kwargs) + + def _init_pipeline(self, cfg: Config) -> Callable: + test_pipeline_cfg = cfg.test_dataloader.dataset.pipeline + from mmpretrain.datasets import remove_transform + + # Image loading is finished in `self.preprocess`. + test_pipeline_cfg = remove_transform(test_pipeline_cfg, + 'LoadImageFromFile') + test_pipeline = Compose( + [TRANSFORMS.build(t) for t in test_pipeline_cfg]) + return test_pipeline + + def preprocess(self, inputs: List[InputType], batch_size: int = 1): + + def load_image(input_): + img = imread(input_) + if img is None: + raise ValueError(f'Failed to read image {input_}.') + return dict( + img=img, + img_shape=img.shape[:2], + ori_shape=img.shape[:2], + ) + + pipeline = Compose([load_image, self.pipeline]) + + chunked_data = self._get_chunk_data(map(pipeline, inputs), batch_size) + yield from map(default_collate, chunked_data) + + def visualize(self, + ori_inputs: List[InputType], + preds: List[DataSample], + show: bool = False, + wait_time: int = 0, + resize: Optional[int] = None, + rescale_factor: Optional[float] = None, + draw_score=True, + show_dir=None): + if not show and show_dir is None: + return None + + if self.visualizer is None: + from mmpretrain.visualization import UniversalVisualizer + self.visualizer = UniversalVisualizer() + + visualization = [] + for i, (input_, data_sample) in enumerate(zip(ori_inputs, preds)): + image = imread(input_) + if isinstance(input_, str): + # The image loaded from path is BGR format. + image = image[..., ::-1] + name = Path(input_).stem + else: + name = str(i) + + if show_dir is not None: + show_dir = Path(show_dir) + show_dir.mkdir(exist_ok=True) + out_file = str((show_dir / name).with_suffix('.png')) + else: + out_file = None + + self.visualizer.visualize_cls( + image, + data_sample, + classes=self.classes, + resize=resize, + show=show, + wait_time=wait_time, + rescale_factor=rescale_factor, + draw_gt=False, + draw_pred=True, + draw_score=draw_score, + name=name, + out_file=out_file) + visualization.append(self.visualizer.get_image()) + if show: + self.visualizer.close() + return visualization + + def postprocess(self, + preds: List[DataSample], + visualization: List[np.ndarray], + return_datasamples=False) -> dict: + if return_datasamples: + return preds + + results = [] + for data_sample in preds: + pred_scores = data_sample.pred_score + pred_score = float(torch.max(pred_scores).item()) + pred_label = torch.argmax(pred_scores).item() + result = { + 'pred_scores': pred_scores.detach().cpu().numpy(), + 'pred_label': pred_label, + 'pred_score': pred_score, + } + if self.classes is not None: + result['pred_class'] = self.classes[pred_label] + results.append(result) + + return results + + @staticmethod + def list_models(pattern: Optional[str] = None): + """List all available model names. + + Args: + pattern (str | None): A wildcard pattern to match model names. + + Returns: + List[str]: a list of model names. + """ + return list_models(pattern=pattern, task='Image Classification') diff --git a/mmpretrain/apis/image_retrieval.py b/mmpretrain/apis/image_retrieval.py new file mode 100644 index 0000000..27919b2 --- /dev/null +++ b/mmpretrain/apis/image_retrieval.py @@ -0,0 +1,288 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from pathlib import Path +from typing import Callable, List, Optional, Union + +import numpy as np +import torch +from mmcv.image import imread +from mmengine.config import Config +from mmengine.dataset import BaseDataset, Compose, default_collate + +from mmpretrain.registry import TRANSFORMS +from mmpretrain.structures import DataSample +from .base import BaseInferencer, InputType, ModelType +from .model import list_models + + +class ImageRetrievalInferencer(BaseInferencer): + """The inferencer for image to image retrieval. + + Args: + model (BaseModel | str | Config): A model name or a path to the config + file, or a :obj:`BaseModel` object. The model name can be found + by ``ImageRetrievalInferencer.list_models()`` and you can also + query it in :doc:`/modelzoo_statistics`. + prototype (str | list | dict | DataLoader, BaseDataset): The images to + be retrieved. It can be the following types: + + - str: The directory of the the images. + - list: A list of path of the images. + - dict: A config dict of the a prototype dataset. + - BaseDataset: A prototype dataset. + - DataLoader: A data loader to load the prototype data. + + prototype_cache (str, optional): The path of the generated prototype + features. If exists, directly load the cache instead of re-generate + the prototype features. If not exists, save the generated features + to the path. Defaults to None. + pretrained (str, optional): Path to the checkpoint. If None, it will + try to find a pre-defined weight from the model you specified + (only work if the ``model`` is a model name). Defaults to None. + device (str, optional): Device to run inference. If None, the available + device will be automatically used. Defaults to None. + **kwargs: Other keyword arguments to initialize the model (only work if + the ``model`` is a model name). + + Example: + >>> from mmpretrain import ImageRetrievalInferencer + >>> inferencer = ImageRetrievalInferencer( + ... 'resnet50-arcface_inshop', + ... prototype='./demo/', + ... prototype_cache='img_retri.pth') + >>> inferencer('demo/cat-dog.png', topk=2)[0][1] + {'match_score': tensor(0.4088, device='cuda:0'), + 'sample_idx': 3, + 'sample': {'img_path': './demo/dog.jpg'}} + """ # noqa: E501 + + visualize_kwargs: set = { + 'draw_score', 'resize', 'show_dir', 'show', 'wait_time', 'topk' + } + postprocess_kwargs: set = {'topk'} + + def __init__( + self, + model: ModelType, + prototype, + prototype_cache=None, + prepare_batch_size=8, + pretrained: Union[bool, str] = True, + device: Union[str, torch.device, None] = None, + **kwargs, + ) -> None: + super().__init__( + model=model, pretrained=pretrained, device=device, **kwargs) + + self.prototype_dataset = self._prepare_prototype( + prototype, prototype_cache, prepare_batch_size) + + def _prepare_prototype(self, prototype, cache=None, batch_size=8): + from mmengine.dataset import DefaultSampler + from torch.utils.data import DataLoader + + def build_dataloader(dataset): + return DataLoader( + dataset, + batch_size=batch_size, + collate_fn=default_collate, + sampler=DefaultSampler(dataset, shuffle=False), + persistent_workers=False, + ) + + if isinstance(prototype, str): + # A directory path of images + prototype = dict( + type='CustomDataset', with_label=False, data_root=prototype) + + if isinstance(prototype, list): + test_pipeline = [dict(type='LoadImageFromFile'), self.pipeline] + dataset = BaseDataset( + lazy_init=True, serialize_data=False, pipeline=test_pipeline) + dataset.data_list = [{ + 'sample_idx': i, + 'img_path': file + } for i, file in enumerate(prototype)] + dataset._fully_initialized = True + dataloader = build_dataloader(dataset) + elif isinstance(prototype, dict): + # A config of dataset + from mmpretrain.registry import DATASETS + test_pipeline = [dict(type='LoadImageFromFile'), self.pipeline] + prototype.setdefault('pipeline', test_pipeline) + dataset = DATASETS.build(prototype) + dataloader = build_dataloader(dataset) + elif isinstance(prototype, DataLoader): + dataset = prototype.dataset + dataloader = prototype + elif isinstance(prototype, BaseDataset): + dataset = prototype + dataloader = build_dataloader(dataset) + else: + raise TypeError(f'Unsupported prototype type {type(prototype)}.') + + if cache is not None and Path(cache).exists(): + self.model.prototype = cache + else: + self.model.prototype = dataloader + self.model.prepare_prototype() + + from mmengine.logging import MMLogger + logger = MMLogger.get_current_instance() + if cache is None: + logger.info('The prototype has been prepared, you can use ' + '`save_prototype` to dump it into a pickle ' + 'file for the future usage.') + elif not Path(cache).exists(): + self.save_prototype(cache) + logger.info(f'The prototype has been saved at {cache}.') + + return dataset + + def save_prototype(self, path): + self.model.dump_prototype(path) + + def __call__(self, + inputs: InputType, + return_datasamples: bool = False, + batch_size: int = 1, + **kwargs) -> dict: + """Call the inferencer. + + Args: + inputs (str | array | list): The image path or array, or a list of + images. + return_datasamples (bool): Whether to return results as + :obj:`DataSample`. Defaults to False. + batch_size (int): Batch size. Defaults to 1. + resize (int, optional): Resize the long edge of the image to the + specified length before visualization. Defaults to None. + draw_score (bool): Whether to draw the match scores. + Defaults to True. + show (bool): Whether to display the visualization result in a + window. Defaults to False. + wait_time (float): The display time (s). Defaults to 0, which means + "forever". + show_dir (str, optional): If not None, save the visualization + results in the specified directory. Defaults to None. + + Returns: + list: The inference results. + """ + return super().__call__(inputs, return_datasamples, batch_size, + **kwargs) + + def _init_pipeline(self, cfg: Config) -> Callable: + test_pipeline_cfg = cfg.test_dataloader.dataset.pipeline + from mmpretrain.datasets import remove_transform + + # Image loading is finished in `self.preprocess`. + test_pipeline_cfg = remove_transform(test_pipeline_cfg, + 'LoadImageFromFile') + test_pipeline = Compose( + [TRANSFORMS.build(t) for t in test_pipeline_cfg]) + return test_pipeline + + def preprocess(self, inputs: List[InputType], batch_size: int = 1): + + def load_image(input_): + img = imread(input_) + if img is None: + raise ValueError(f'Failed to read image {input_}.') + return dict( + img=img, + img_shape=img.shape[:2], + ori_shape=img.shape[:2], + ) + + pipeline = Compose([load_image, self.pipeline]) + + chunked_data = self._get_chunk_data(map(pipeline, inputs), batch_size) + yield from map(default_collate, chunked_data) + + def visualize(self, + ori_inputs: List[InputType], + preds: List[DataSample], + topk: int = 3, + resize: Optional[int] = 224, + show: bool = False, + wait_time: int = 0, + draw_score=True, + show_dir=None): + if not show and show_dir is None: + return None + + if self.visualizer is None: + from mmpretrain.visualization import UniversalVisualizer + self.visualizer = UniversalVisualizer() + + visualization = [] + for i, (input_, data_sample) in enumerate(zip(ori_inputs, preds)): + image = imread(input_) + if isinstance(input_, str): + # The image loaded from path is BGR format. + image = image[..., ::-1] + name = Path(input_).stem + else: + name = str(i) + + if show_dir is not None: + show_dir = Path(show_dir) + show_dir.mkdir(exist_ok=True) + out_file = str((show_dir / name).with_suffix('.png')) + else: + out_file = None + + self.visualizer.visualize_image_retrieval( + image, + data_sample, + self.prototype_dataset, + topk=topk, + resize=resize, + draw_score=draw_score, + show=show, + wait_time=wait_time, + name=name, + out_file=out_file) + visualization.append(self.visualizer.get_image()) + if show: + self.visualizer.close() + return visualization + + def postprocess( + self, + preds: List[DataSample], + visualization: List[np.ndarray], + return_datasamples=False, + topk=1, + ) -> dict: + if return_datasamples: + return preds + + results = [] + for data_sample in preds: + match_scores, indices = torch.topk(data_sample.pred_score, k=topk) + matches = [] + for match_score, sample_idx in zip(match_scores, indices): + sample = self.prototype_dataset.get_data_info( + sample_idx.item()) + sample_idx = sample.pop('sample_idx') + matches.append({ + 'match_score': match_score, + 'sample_idx': sample_idx, + 'sample': sample + }) + results.append(matches) + + return results + + @staticmethod + def list_models(pattern: Optional[str] = None): + """List all available model names. + + Args: + pattern (str | None): A wildcard pattern to match model names. + + Returns: + List[str]: a list of model names. + """ + return list_models(pattern=pattern, task='Image Retrieval') diff --git a/mmpretrain/apis/model.py b/mmpretrain/apis/model.py new file mode 100644 index 0000000..eba475e --- /dev/null +++ b/mmpretrain/apis/model.py @@ -0,0 +1,408 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import fnmatch +import os.path as osp +import re +import warnings +from os import PathLike +from pathlib import Path +from typing import List, Tuple, Union + +from mmengine.config import Config +from modelindex.load_model_index import load +from modelindex.models.Model import Model + + +class ModelHub: + """A hub to host the meta information of all pre-defined models.""" + _models_dict = {} + __mmpretrain_registered = False + + @classmethod + def register_model_index(cls, + model_index_path: Union[str, PathLike], + config_prefix: Union[str, PathLike, None] = None): + """Parse the model-index file and register all models. + + Args: + model_index_path (str | PathLike): The path of the model-index + file. + config_prefix (str | PathLike | None): The prefix of all config + file paths in the model-index file. + """ + model_index = load(str(model_index_path)) + model_index.build_models_with_collections() + + for metainfo in model_index.models: + model_name = metainfo.name.lower() + if metainfo.name in cls._models_dict: + raise ValueError( + 'The model name {} is conflict in {} and {}.'.format( + model_name, osp.abspath(metainfo.filepath), + osp.abspath(cls._models_dict[model_name].filepath))) + metainfo.config = cls._expand_config_path(metainfo, config_prefix) + cls._models_dict[model_name] = metainfo + + @classmethod + def get(cls, model_name): + """Get the model's metainfo by the model name. + + Args: + model_name (str): The name of model. + + Returns: + modelindex.models.Model: The metainfo of the specified model. + """ + cls._register_mmpretrain_models() + # lazy load config + metainfo = copy.deepcopy(cls._models_dict.get(model_name.lower())) + if metainfo is None: + raise ValueError( + f'Failed to find model "{model_name}". please use ' + '`mmpretrain.list_models` to get all available names.') + if isinstance(metainfo.config, str): + metainfo.config = Config.fromfile(metainfo.config) + return metainfo + + @staticmethod + def _expand_config_path(metainfo: Model, + config_prefix: Union[str, PathLike] = None): + if config_prefix is None: + config_prefix = osp.dirname(metainfo.filepath) + + if metainfo.config is None or osp.isabs(metainfo.config): + config_path: str = metainfo.config + else: + config_path = osp.abspath(osp.join(config_prefix, metainfo.config)) + + return config_path + + @classmethod + def _register_mmpretrain_models(cls): + # register models in mmpretrain + if not cls.__mmpretrain_registered: + from importlib_metadata import distribution + root = distribution('mmpretrain').locate_file('mmpretrain') + model_index_path = root / '.mim' / 'model-index.yml' + ModelHub.register_model_index( + model_index_path, config_prefix=root / '.mim') + cls.__mmpretrain_registered = True + + @classmethod + def has(cls, model_name): + """Whether a model name is in the ModelHub.""" + return model_name in cls._models_dict + + +def get_model(model: Union[str, Config], + pretrained: Union[str, bool] = False, + device=None, + device_map=None, + offload_folder=None, + url_mapping: Tuple[str, str] = None, + **kwargs): + """Get a pre-defined model or create a model from config. + + Args: + model (str | Config): The name of model, the config file path or a + config instance. + pretrained (bool | str): When use name to specify model, you can + use ``True`` to load the pre-defined pretrained weights. And you + can also use a string to specify the path or link of weights to + load. Defaults to False. + device (str | torch.device | None): Transfer the model to the target + device. Defaults to None. + device_map (str | dict | None): A map that specifies where each + submodule should go. It doesn't need to be refined to each + parameter/buffer name, once a given module name is inside, every + submodule of it will be sent to the same device. You can use + `device_map="auto"` to automatically generate the device map. + Defaults to None. + offload_folder (str | None): If the `device_map` contains any value + `"disk"`, the folder where we will offload weights. + url_mapping (Tuple[str, str], optional): The mapping of pretrained + checkpoint link. For example, load checkpoint from a local dir + instead of download by ``('https://.*/', './checkpoint')``. + Defaults to None. + **kwargs: Other keyword arguments of the model config. + + Returns: + mmengine.model.BaseModel: The result model. + + Examples: + Get a ResNet-50 model and extract images feature: + + >>> import torch + >>> from mmpretrain import get_model + >>> inputs = torch.rand(16, 3, 224, 224) + >>> model = get_model('resnet50_8xb32_in1k', pretrained=True, backbone=dict(out_indices=(0, 1, 2, 3))) + >>> feats = model.extract_feat(inputs) + >>> for feat in feats: + ... print(feat.shape) + torch.Size([16, 256]) + torch.Size([16, 512]) + torch.Size([16, 1024]) + torch.Size([16, 2048]) + + Get Swin-Transformer model with pre-trained weights and inference: + + >>> from mmpretrain import get_model, inference_model + >>> model = get_model('swin-base_16xb64_in1k', pretrained=True) + >>> result = inference_model(model, 'demo/demo.JPEG') + >>> print(result['pred_class']) + 'sea snake' + """ # noqa: E501 + if device_map is not None: + from .utils import dispatch_model + dispatch_model._verify_require() + + metainfo = None + if isinstance(model, Config): + config = copy.deepcopy(model) + if pretrained is True and 'load_from' in config: + pretrained = config.load_from + elif isinstance(model, (str, PathLike)) and Path(model).suffix == '.py': + config = Config.fromfile(model) + if pretrained is True and 'load_from' in config: + pretrained = config.load_from + elif isinstance(model, str): + metainfo = ModelHub.get(model) + config = metainfo.config + if pretrained is True and metainfo.weights is not None: + pretrained = metainfo.weights + else: + raise TypeError('model must be a name, a path or a Config object, ' + f'but got {type(config)}') + + if pretrained is True: + warnings.warn('Unable to find pre-defined checkpoint of the model.') + pretrained = None + elif pretrained is False: + pretrained = None + + if kwargs: + config.merge_from_dict({'model': kwargs}) + config.model.setdefault('data_preprocessor', + config.get('data_preprocessor', None)) + + from mmengine.registry import DefaultScope + + from mmpretrain.registry import MODELS + with DefaultScope.overwrite_default_scope('mmpretrain'): + model = MODELS.build(config.model) + + dataset_meta = {} + if pretrained: + # Mapping the weights to GPU may cause unexpected video memory leak + # which refers to https://github.com/open-mmlab/mmdetection/pull/6405 + from mmengine.runner import load_checkpoint + if url_mapping is not None: + pretrained = re.sub(url_mapping[0], url_mapping[1], pretrained) + checkpoint = load_checkpoint(model, pretrained, map_location='cpu') + if 'dataset_meta' in checkpoint.get('meta', {}): + # mmpretrain 1.x + dataset_meta = checkpoint['meta']['dataset_meta'] + elif 'CLASSES' in checkpoint.get('meta', {}): + # mmcls 0.x + dataset_meta = {'classes': checkpoint['meta']['CLASSES']} + + if len(dataset_meta) == 0 and 'test_dataloader' in config: + from mmpretrain.registry import DATASETS + dataset_class = DATASETS.get(config.test_dataloader.dataset.type) + dataset_meta = getattr(dataset_class, 'METAINFO', {}) + + if device_map is not None: + model = dispatch_model( + model, device_map=device_map, offload_folder=offload_folder) + elif device is not None: + model.to(device) + + model._dataset_meta = dataset_meta # save the dataset meta + model._config = config # save the config in the model + model._metainfo = metainfo # save the metainfo in the model + model.eval() + return model + + +def init_model(config, checkpoint=None, device=None, **kwargs): + """Initialize a classifier from config file (deprecated). + + It's only for compatibility, please use :func:`get_model` instead. + + Args: + config (str | :obj:`mmengine.Config`): Config file path or the config + object. + checkpoint (str, optional): Checkpoint path. If left as None, the model + will not load any weights. + device (str | torch.device | None): Transfer the model to the target + device. Defaults to None. + **kwargs: Other keyword arguments of the model config. + + Returns: + nn.Module: The constructed model. + """ + return get_model(config, checkpoint, device, **kwargs) + + +def list_models(pattern=None, exclude_patterns=None, task=None) -> List[str]: + """List all models available in MMPretrain. + + Args: + pattern (str | None): A wildcard pattern to match model names. + Defaults to None. + exclude_patterns (list | None): A list of wildcard patterns to + exclude names from the matched names. Defaults to None. + task (str | none): The evaluation task of the model. + + Returns: + List[str]: a list of model names. + + Examples: + List all models: + + >>> from mmpretrain import list_models + >>> list_models() + + List ResNet-50 models on ImageNet-1k dataset: + + >>> from mmpretrain import list_models + >>> list_models('resnet*in1k') + ['resnet50_8xb32_in1k', + 'resnet50_8xb32-fp16_in1k', + 'resnet50_8xb256-rsb-a1-600e_in1k', + 'resnet50_8xb256-rsb-a2-300e_in1k', + 'resnet50_8xb256-rsb-a3-100e_in1k'] + + List Swin-Transformer models trained from stratch and exclude + Swin-Transformer-V2 models: + + >>> from mmpretrain import list_models + >>> list_models('swin', exclude_patterns=['swinv2', '*-pre']) + ['swin-base_16xb64_in1k', + 'swin-base_3rdparty_in1k', + 'swin-base_3rdparty_in1k-384', + 'swin-large_8xb8_cub-384px', + 'swin-small_16xb64_in1k', + 'swin-small_3rdparty_in1k', + 'swin-tiny_16xb64_in1k', + 'swin-tiny_3rdparty_in1k'] + + List all EVA models for image classification task. + + >>> from mmpretrain import list_models + >>> list_models('eva', task='Image Classification') + ['eva-g-p14_30m-in21k-pre_3rdparty_in1k-336px', + 'eva-g-p14_30m-in21k-pre_3rdparty_in1k-560px', + 'eva-l-p14_mim-in21k-pre_3rdparty_in1k-196px', + 'eva-l-p14_mim-in21k-pre_3rdparty_in1k-336px', + 'eva-l-p14_mim-pre_3rdparty_in1k-196px', + 'eva-l-p14_mim-pre_3rdparty_in1k-336px'] + """ + ModelHub._register_mmpretrain_models() + matches = set(ModelHub._models_dict.keys()) + + if pattern is not None: + # Always match keys with any postfix. + matches = set(fnmatch.filter(matches, pattern + '*')) + + exclude_patterns = exclude_patterns or [] + for exclude_pattern in exclude_patterns: + exclude = set(fnmatch.filter(matches, exclude_pattern + '*')) + matches = matches - exclude + + if task is not None: + task_matches = [] + for key in matches: + metainfo = ModelHub._models_dict[key] + if metainfo.results is None and task == 'null': + task_matches.append(key) + elif metainfo.results is None: + continue + elif task in [result.task for result in metainfo.results]: + task_matches.append(key) + matches = task_matches + + return sorted(list(matches)) + + +def inference_model(model, *args, **kwargs): + """Inference an image with the inferencer. + + Automatically select inferencer to inference according to the type of + model. It's a shortcut for a quick start, and for advanced usage, please + use the correspondding inferencer class. + + Here is the mapping from task to inferencer: + + - Image Classification: :class:`ImageClassificationInferencer` + - Image Retrieval: :class:`ImageRetrievalInferencer` + - Image Caption: :class:`ImageCaptionInferencer` + - Visual Question Answering: :class:`VisualQuestionAnsweringInferencer` + - Visual Grounding: :class:`VisualGroundingInferencer` + - Text-To-Image Retrieval: :class:`TextToImageRetrievalInferencer` + - Image-To-Text Retrieval: :class:`ImageToTextRetrievalInferencer` + - NLVR: :class:`NLVRInferencer` + + Args: + model (BaseModel | str | Config): The loaded model, the model + name or the config of the model. + *args: Positional arguments to call the inferencer. + **kwargs: Other keyword arguments to initialize and call the + correspondding inferencer. + + Returns: + result (dict): The inference results. + """ # noqa: E501 + from mmengine.model import BaseModel + + if isinstance(model, BaseModel): + metainfo = getattr(model, '_metainfo', None) + else: + metainfo = ModelHub.get(model) + + from inspect import signature + + from .image_caption import ImageCaptionInferencer + from .image_classification import ImageClassificationInferencer + from .image_retrieval import ImageRetrievalInferencer + from .multimodal_retrieval import (ImageToTextRetrievalInferencer, + TextToImageRetrievalInferencer) + from .nlvr import NLVRInferencer + from .visual_grounding import VisualGroundingInferencer + from .visual_question_answering import VisualQuestionAnsweringInferencer + task_mapping = { + 'Image Classification': ImageClassificationInferencer, + 'Image Retrieval': ImageRetrievalInferencer, + 'Image Caption': ImageCaptionInferencer, + 'Visual Question Answering': VisualQuestionAnsweringInferencer, + 'Visual Grounding': VisualGroundingInferencer, + 'Text-To-Image Retrieval': TextToImageRetrievalInferencer, + 'Image-To-Text Retrieval': ImageToTextRetrievalInferencer, + 'NLVR': NLVRInferencer, + } + + inferencer_type = None + + if metainfo is not None and metainfo.results is not None: + tasks = set(result.task for result in metainfo.results) + inferencer_type = [ + task_mapping.get(task) for task in tasks if task in task_mapping + ] + if len(inferencer_type) > 1: + inferencer_names = [cls.__name__ for cls in inferencer_type] + warnings.warn('The model supports multiple tasks, auto select ' + f'{inferencer_names[0]}, you can also use other ' + f'inferencer {inferencer_names} directly.') + inferencer_type = inferencer_type[0] + + if inferencer_type is None: + raise NotImplementedError('No available inferencer for the model') + + init_kwargs = { + k: kwargs.pop(k) + for k in list(kwargs) + if k in signature(inferencer_type).parameters.keys() + } + + inferencer = inferencer_type(model, **init_kwargs) + return inferencer(*args, **kwargs)[0] diff --git a/mmpretrain/apis/multimodal_retrieval.py b/mmpretrain/apis/multimodal_retrieval.py new file mode 100644 index 0000000..5eb9c85 --- /dev/null +++ b/mmpretrain/apis/multimodal_retrieval.py @@ -0,0 +1,603 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from pathlib import Path +from typing import Callable, List, Optional, Tuple, Union + +import mmengine +import numpy as np +import torch +from mmcv.image import imread +from mmengine.config import Config +from mmengine.dataset import BaseDataset, Compose, default_collate + +from mmpretrain.registry import TRANSFORMS +from mmpretrain.structures import DataSample +from mmpretrain.utils import track +from .base import BaseInferencer +from .base import InputType as ImageType +from .base import ModelType +from .model import list_models + + +def filter_transforms(transforms: list, data_info: dict): + """Filter pipeline to avoid KeyError with partial data info.""" + data_info = deepcopy(data_info) + filtered_transforms = [] + for t in transforms: + try: + data_info = t(data_info) + filtered_transforms.append(t) + except KeyError: + pass + return filtered_transforms + + +class TextToImageRetrievalInferencer(BaseInferencer): + """The inferencer for text to image retrieval. + + Args: + model (BaseModel | str | Config): A model name or a path to the config + file, or a :obj:`BaseModel` object. The model name can be found + by ``TextToImageRetrievalInferencer.list_models()`` and you can also + query it in :doc:`/modelzoo_statistics`. + prototype (str | list | dict | DataLoader | BaseDataset): The images to + be retrieved. It can be the following types: + + - str: The directory of the the images. + - list: A list of path of the images. + - dict: A config dict of the a prototype dataset. + - BaseDataset: A prototype dataset. + - DataLoader: A data loader to load the prototype data. + + prototype_cache (str, optional): The path of the generated prototype + features. If exists, directly load the cache instead of re-generate + the prototype features. If not exists, save the generated features + to the path. Defaults to None. + fast_match (bool): Some algorithms will record extra image features for + further matching, which may consume large memory, set True to avoid + this behavior. Defaults to True. + pretrained (str, optional): Path to the checkpoint. If None, it will + try to find a pre-defined weight from the model you specified + (only work if the ``model`` is a model name). Defaults to None. + device (str, optional): Device to run inference. If None, the available + device will be automatically used. Defaults to None. + **kwargs: Other keyword arguments to initialize the model (only work if + the ``model`` is a model name). + + Example: + >>> from mmpretrain import TextToImageRetrievalInferencer + >>> inferencer = TextToImageRetrievalInferencer( + ... 'blip-base_3rdparty_retrieval', + ... prototype='./demo/', + ... prototype_cache='t2i_retri.pth') + >>> inferencer('A cat and a dog.')[0] + {'match_score': tensor(0.3855, device='cuda:0'), + 'sample_idx': 1, + 'sample': {'img_path': './demo/cat-dog.png'}} + """ # noqa: E501 + + visualize_kwargs: set = { + 'draw_score', 'show_dir', 'show', 'wait_time', 'figsize', 'topk' + } + postprocess_kwargs: set = {'topk'} + + def __init__(self, + model: ModelType, + prototype, + prototype_cache=None, + fast_match=True, + prepare_batch_size=8, + pretrained: Union[bool, str] = True, + device: Union[str, torch.device, None] = None, + **kwargs) -> None: + super().__init__( + model=model, pretrained=pretrained, device=device, **kwargs) + + self.img_pipeline, self.text_pipeline = self.pipeline + + if hasattr(self.model, 'fast_match'): + self.model.fast_match = fast_match + + self.prototype_dataset = self._prepare_prototype( + prototype, prototype_cache, batch_size=prepare_batch_size) + + def _prepare_prototype(self, prototype, cache=None, batch_size=8): + from mmengine.dataset import DefaultSampler + from torch.utils.data import DataLoader + + def build_dataloader(dataset): + return DataLoader( + dataset, + batch_size=batch_size, + collate_fn=default_collate, + sampler=DefaultSampler(dataset, shuffle=False), + persistent_workers=False, + ) + + if isinstance(prototype, str): + # A directory path of images + prototype = dict( + type='CustomDataset', with_label=False, data_root=prototype) + + if isinstance(prototype, list): + test_pipeline = [dict(type='LoadImageFromFile'), self.img_pipeline] + dataset = BaseDataset( + lazy_init=True, serialize_data=False, pipeline=test_pipeline) + dataset.data_list = [{ + 'sample_idx': i, + 'img_path': file + } for i, file in enumerate(prototype)] + dataset._fully_initialized = True + dataloader = build_dataloader(dataset) + elif isinstance(prototype, dict): + # A config of dataset + from mmpretrain.registry import DATASETS + test_pipeline = [dict(type='LoadImageFromFile'), self.img_pipeline] + prototype.setdefault('pipeline', test_pipeline) + dataset = DATASETS.build(prototype) + dataloader = build_dataloader(dataset) + elif isinstance(prototype, list): + test_pipeline = [dict(type='LoadImageFromFile'), self.img_pipeline] + dataset = BaseDataset( + lazy_init=True, serialize_data=False, pipeline=test_pipeline) + dataset.data_list = [{ + 'sample_idx': i, + 'img_path': file + } for i, file in enumerate(prototype)] + dataset._fully_initialized = True + dataloader = build_dataloader(dataset) + elif isinstance(prototype, DataLoader): + dataset = prototype.dataset + dataloader = prototype + elif isinstance(prototype, BaseDataset): + dataset = prototype + dataloader = build_dataloader(dataset) + else: + raise TypeError(f'Unsupported prototype type {type(prototype)}.') + + if cache is not None and Path(cache).exists(): + self.prototype = torch.load(cache) + else: + prototype = [] + for data_batch in track(dataloader, 'Prepare prototype...'): + with torch.no_grad(): + data_batch = self.model.data_preprocessor( + data_batch, False) + feats = self.model._run_forward(data_batch, mode='tensor') + prototype.append(feats) + prototype = { + k: torch.cat([d[k] for d in prototype]) + for k in prototype[0] + } + self.prototype = prototype + + from mmengine.logging import MMLogger + logger = MMLogger.get_current_instance() + if cache is None: + logger.info('The prototype has been prepared, you can use ' + '`save_prototype` to dump it into a pickle ' + 'file for the future usage.') + elif not Path(cache).exists(): + self.save_prototype(cache) + logger.info(f'The prototype has been saved at {cache}.') + + return dataset + + def save_prototype(self, path): + torch.save(self.prototype, path) + + def __call__(self, + inputs: ImageType, + return_datasamples: bool = False, + batch_size: int = 1, + **kwargs) -> dict: + """Call the inferencer. + + Args: + inputs (str | array | list): The image path or array, or a list of + images. + return_datasamples (bool): Whether to return results as + :obj:`DataSample`. Defaults to False. + batch_size (int): Batch size. Defaults to 1. + resize (int, optional): Resize the long edge of the image to the + specified length before visualization. Defaults to None. + draw_score (bool): Whether to draw the match scores. + Defaults to True. + show (bool): Whether to display the visualization result in a + window. Defaults to False. + wait_time (float): The display time (s). Defaults to 0, which means + "forever". + show_dir (str, optional): If not None, save the visualization + results in the specified directory. Defaults to None. + + Returns: + list: The inference results. + """ + return super().__call__(inputs, return_datasamples, batch_size, + **kwargs) + + @torch.no_grad() + def forward(self, data: dict, **kwargs): + """Feed the inputs to the model.""" + data = self.model.data_preprocessor(data, False) + data_samples = data['data_samples'] + feats = self.prototype.copy() + feats.update(self.model.extract_feat(data_samples=data_samples)) + return self.model.predict_all(feats, data_samples, cal_i2t=False)[0] + + def _init_pipeline(self, cfg: Config) -> Callable: + test_pipeline_cfg = cfg.test_dataloader.dataset.pipeline + test_transfroms = [TRANSFORMS.build(t) for t in test_pipeline_cfg] + img_info = {'img': np.zeros((224, 224, 3), dtype=np.uint8)} + text_info = {'text': 'example'} + img_pipeline = Compose(filter_transforms(test_transfroms, img_info)) + text_pipeline = Compose(filter_transforms(test_transfroms, text_info)) + return img_pipeline, text_pipeline + + def preprocess(self, inputs: List[str], batch_size: int = 1): + + def process_text(input_: str): + return self.text_pipeline({'text': input_}) + + chunked_data = self._get_chunk_data( + map(process_text, inputs), batch_size) + yield from map(default_collate, chunked_data) + + def visualize(self, + ori_inputs: List[str], + preds: List[DataSample], + topk: int = 3, + figsize: Tuple[int, int] = (16, 9), + show: bool = False, + wait_time: int = 0, + draw_score=True, + show_dir=None): + if not show and show_dir is None: + return None + + if self.visualizer is None: + from mmpretrain.visualization import UniversalVisualizer + self.visualizer = UniversalVisualizer() + + visualization = [] + for i, (text, data_sample) in enumerate(zip(ori_inputs, preds)): + name = str(i) + + if show_dir is not None: + show_dir = Path(show_dir) + show_dir.mkdir(exist_ok=True) + out_file = str((show_dir / name).with_suffix('.png')) + else: + out_file = None + + self.visualizer.visualize_t2i_retrieval( + text, + data_sample, + self.prototype_dataset, + topk=topk, + fig_cfg=dict(figsize=figsize), + draw_score=draw_score, + show=show, + wait_time=wait_time, + name=name, + out_file=out_file) + visualization.append(self.visualizer.get_image()) + if show: + self.visualizer.close() + return visualization + + def postprocess( + self, + preds: List[DataSample], + visualization: List[np.ndarray], + return_datasamples=False, + topk=1, + ) -> dict: + if return_datasamples: + return preds + + results = [] + for data_sample in preds: + match_scores, indices = torch.topk(data_sample.pred_score, k=topk) + matches = [] + for match_score, sample_idx in zip(match_scores, indices): + sample = self.prototype_dataset.get_data_info( + sample_idx.item()) + sample_idx = sample.pop('sample_idx') + matches.append({ + 'match_score': match_score, + 'sample_idx': sample_idx, + 'sample': sample + }) + results.append(matches) + + return results + + @staticmethod + def list_models(pattern: Optional[str] = None): + """List all available model names. + + Args: + pattern (str | None): A wildcard pattern to match model names. + + Returns: + List[str]: a list of model names. + """ + return list_models(pattern=pattern, task='Text-To-Image Retrieval') + + +class ImageToTextRetrievalInferencer(BaseInferencer): + """The inferencer for image to text retrieval. + + Args: + model (BaseModel | str | Config): A model name or a path to the config + file, or a :obj:`BaseModel` object. The model name can be found + by ``ImageToTextRetrievalInferencer.list_models()`` and you can + also query it in :doc:`/modelzoo_statistics`. + prototype (str | list | dict | DataLoader, BaseDataset): The images to + be retrieved. It can be the following types: + + - str: The file path to load the string list. + - list: A list of string. + + prototype_cache (str, optional): The path of the generated prototype + features. If exists, directly load the cache instead of re-generate + the prototype features. If not exists, save the generated features + to the path. Defaults to None. + fast_match (bool): Some algorithms will record extra image features for + further matching, which may consume large memory, set True to avoid + this behavior. Defaults to True. + pretrained (str, optional): Path to the checkpoint. If None, it will + try to find a pre-defined weight from the model you specified + (only work if the ``model`` is a model name). Defaults to None. + device (str, optional): Device to run inference. If None, the available + device will be automatically used. Defaults to None. + **kwargs: Other keyword arguments to initialize the model (only work if + the ``model`` is a model name). + + Example: + >>> from mmpretrain import ImageToTextRetrievalInferencer + >>> inferencer = ImageToTextRetrievalInferencer( + ... 'blip-base_3rdparty_retrieval', + ... prototype=['cat', 'dog', 'snake', 'bird'], + ... prototype_cache='i2t_retri.pth') + >>> inferencer('demo/bird.JPEG')[0] + {'match_score': tensor(0.3855, device='cuda:0'), + 'sample_idx': 1, + 'sample': {'img_path': './demo/cat-dog.png'}} + """ # noqa: E501 + + visualize_kwargs: set = { + 'draw_score', 'resize', 'show_dir', 'show', 'wait_time', 'topk' + } + postprocess_kwargs: set = {'topk'} + + def __init__(self, + model: ModelType, + prototype, + prototype_cache=None, + fast_match=True, + prepare_batch_size=8, + pretrained: Union[bool, str] = True, + device: Union[str, torch.device, None] = None, + **kwargs) -> None: + super().__init__( + model=model, pretrained=pretrained, device=device, **kwargs) + + self.img_pipeline, self.text_pipeline = self.pipeline + + if hasattr(self.model, 'fast_match'): + self.model.fast_match = fast_match + + self.prototype_dataset = self._prepare_prototype( + prototype, cache=prototype_cache, batch_size=prepare_batch_size) + + def _prepare_prototype(self, prototype, cache=None, batch_size=8): + from mmengine.dataset import DefaultSampler + from torch.utils.data import DataLoader + + def build_dataloader(dataset): + return DataLoader( + [ + self.text_pipeline({ + 'sample_idx': i, + 'text': text + }) for i, text in enumerate(dataset) + ], + batch_size=batch_size, + collate_fn=default_collate, + sampler=DefaultSampler(dataset, shuffle=False), + persistent_workers=False, + ) + + if isinstance(prototype, str): + # A file path of a list of string + dataset = mmengine.list_from_file(prototype) + elif mmengine.utils.is_seq_of(prototype, str): + dataset = prototype + else: + raise TypeError(f'Unsupported prototype type {type(prototype)}.') + + dataloader = build_dataloader(dataset) + + if cache is not None and Path(cache).exists(): + self.prototype = torch.load(cache) + else: + prototype = [] + for data_batch in track(dataloader, 'Prepare prototype...'): + with torch.no_grad(): + data_batch = self.model.data_preprocessor( + data_batch, False) + feats = self.model._run_forward(data_batch, mode='tensor') + prototype.append(feats) + prototype = { + k: torch.cat([d[k] for d in prototype]) + for k in prototype[0] + } + self.prototype = prototype + + from mmengine.logging import MMLogger + logger = MMLogger.get_current_instance() + if cache is None: + logger.info('The prototype has been prepared, you can use ' + '`save_prototype` to dump it into a pickle ' + 'file for the future usage.') + elif not Path(cache).exists(): + self.save_prototype(cache) + logger.info(f'The prototype has been saved at {cache}.') + + return dataset + + def save_prototype(self, path): + torch.save(self.prototype, path) + + def __call__(self, + inputs: ImageType, + return_datasamples: bool = False, + batch_size: int = 1, + **kwargs) -> dict: + """Call the inferencer. + + Args: + inputs (str | array | list): The image path or array, or a list of + images. + return_datasamples (bool): Whether to return results as + :obj:`DataSample`. Defaults to False. + batch_size (int): Batch size. Defaults to 1. + resize (int, optional): Resize the long edge of the image to the + specified length before visualization. Defaults to None. + draw_score (bool): Whether to draw the match scores. + Defaults to True. + show (bool): Whether to display the visualization result in a + window. Defaults to False. + wait_time (float): The display time (s). Defaults to 0, which means + "forever". + show_dir (str, optional): If not None, save the visualization + results in the specified directory. Defaults to None. + + Returns: + list: The inference results. + """ + return super().__call__(inputs, return_datasamples, batch_size, + **kwargs) + + @torch.no_grad() + def forward(self, data: dict, **kwargs): + """Feed the inputs to the model.""" + data = self.model.data_preprocessor(data, False) + feats = self.prototype.copy() + feats.update(self.model.extract_feat(images=data['images'])) + return self.model.predict_all( + feats, data['data_samples'], cal_t2i=False)[0] + + def _init_pipeline(self, cfg: Config) -> Callable: + test_pipeline_cfg = cfg.test_dataloader.dataset.pipeline + test_transfroms = [TRANSFORMS.build(t) for t in test_pipeline_cfg] + img_info = {'img': np.zeros((224, 224, 3), dtype=np.uint8)} + text_info = {'text': 'example'} + img_pipeline = Compose(filter_transforms(test_transfroms, img_info)) + text_pipeline = Compose(filter_transforms(test_transfroms, text_info)) + return img_pipeline, text_pipeline + + def preprocess(self, inputs: List[ImageType], batch_size: int = 1): + + def load_image(input_): + img = imread(input_) + if img is None: + raise ValueError(f'Failed to read image {input_}.') + return dict( + img=img, + img_shape=img.shape[:2], + ori_shape=img.shape[:2], + ) + + pipeline = Compose([load_image, self.img_pipeline]) + + chunked_data = self._get_chunk_data(map(pipeline, inputs), batch_size) + yield from map(default_collate, chunked_data) + + def visualize(self, + ori_inputs: List[ImageType], + preds: List[DataSample], + topk: int = 3, + resize: Optional[int] = 224, + show: bool = False, + wait_time: int = 0, + draw_score=True, + show_dir=None): + if not show and show_dir is None: + return None + + if self.visualizer is None: + from mmpretrain.visualization import UniversalVisualizer + self.visualizer = UniversalVisualizer() + + visualization = [] + for i, (input_, data_sample) in enumerate(zip(ori_inputs, preds)): + image = imread(input_) + if isinstance(input_, str): + # The image loaded from path is BGR format. + image = image[..., ::-1] + name = Path(input_).stem + else: + name = str(i) + + if show_dir is not None: + show_dir = Path(show_dir) + show_dir.mkdir(exist_ok=True) + out_file = str((show_dir / name).with_suffix('.png')) + else: + out_file = None + + self.visualizer.visualize_i2t_retrieval( + image, + data_sample, + self.prototype_dataset, + topk=topk, + resize=resize, + draw_score=draw_score, + show=show, + wait_time=wait_time, + name=name, + out_file=out_file) + visualization.append(self.visualizer.get_image()) + if show: + self.visualizer.close() + return visualization + + def postprocess( + self, + preds: List[DataSample], + visualization: List[np.ndarray], + return_datasamples=False, + topk=1, + ) -> dict: + if return_datasamples: + return preds + + results = [] + for data_sample in preds: + match_scores, indices = torch.topk(data_sample.pred_score, k=topk) + matches = [] + for match_score, sample_idx in zip(match_scores, indices): + text = self.prototype_dataset[sample_idx.item()] + matches.append({ + 'match_score': match_score, + 'sample_idx': sample_idx, + 'text': text + }) + results.append(matches) + + return results + + @staticmethod + def list_models(pattern: Optional[str] = None): + """List all available model names. + + Args: + pattern (str | None): A wildcard pattern to match model names. + + Returns: + List[str]: a list of model names. + """ + return list_models(pattern=pattern, task='Image-To-Text Retrieval') diff --git a/mmpretrain/apis/nlvr.py b/mmpretrain/apis/nlvr.py new file mode 100644 index 0000000..9977c3b --- /dev/null +++ b/mmpretrain/apis/nlvr.py @@ -0,0 +1,150 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from typing import Callable, List, Optional, Tuple, Union + +import numpy as np +import torch +from mmcv.image import imread +from mmengine.config import Config +from mmengine.dataset import Compose, default_collate + +from mmpretrain.registry import TRANSFORMS +from mmpretrain.structures import DataSample +from .base import BaseInferencer +from .model import list_models + +InputType = Tuple[Union[str, np.ndarray], Union[str, np.ndarray], str] +InputsType = Union[List[InputType], InputType] + + +class NLVRInferencer(BaseInferencer): + """The inferencer for Natural Language for Visual Reasoning. + + Args: + model (BaseModel | str | Config): A model name or a path to the config + file, or a :obj:`BaseModel` object. The model name can be found + by ``NLVRInferencer.list_models()`` and you can also + query it in :doc:`/modelzoo_statistics`. + pretrained (str, optional): Path to the checkpoint. If None, it will + try to find a pre-defined weight from the model you specified + (only work if the ``model`` is a model name). Defaults to None. + device (str, optional): Device to run inference. If None, the available + device will be automatically used. Defaults to None. + **kwargs: Other keyword arguments to initialize the model (only work if + the ``model`` is a model name). + """ + + visualize_kwargs: set = { + 'resize', 'draw_score', 'show', 'show_dir', 'wait_time' + } + + def __call__(self, + inputs: InputsType, + return_datasamples: bool = False, + batch_size: int = 1, + **kwargs) -> dict: + """Call the inferencer. + + Args: + inputs (tuple, List[tuple]): The input data tuples, every tuple + should include three items (left image, right image, text). + The image can be a path or numpy array. + return_datasamples (bool): Whether to return results as + :obj:`DataSample`. Defaults to False. + batch_size (int): Batch size. Defaults to 1. + resize (int, optional): Resize the short edge of the image to the + specified length before visualization. Defaults to None. + draw_score (bool): Whether to draw the prediction scores + of prediction categories. Defaults to True. + show (bool): Whether to display the visualization result in a + window. Defaults to False. + wait_time (float): The display time (s). Defaults to 0, which means + "forever". + show_dir (str, optional): If not None, save the visualization + results in the specified directory. Defaults to None. + + Returns: + list: The inference results. + """ + assert isinstance(inputs, (tuple, list)) + if isinstance(inputs, tuple): + inputs = [inputs] + for input_ in inputs: + assert isinstance(input_, tuple) + assert len(input_) == 3 + + return super().__call__( + inputs, + return_datasamples=return_datasamples, + batch_size=batch_size, + **kwargs) + + def _init_pipeline(self, cfg: Config) -> Callable: + test_pipeline_cfg = cfg.test_dataloader.dataset.pipeline + assert test_pipeline_cfg[0]['type'] == 'ApplyToList' + + list_pipeline = deepcopy(test_pipeline_cfg[0]) + if list_pipeline.scatter_key == 'img_path': + # Remove `LoadImageFromFile` + list_pipeline.transforms.pop(0) + list_pipeline.scatter_key = 'img' + + test_pipeline = Compose( + [TRANSFORMS.build(list_pipeline)] + + [TRANSFORMS.build(t) for t in test_pipeline_cfg[1:]]) + return test_pipeline + + def preprocess(self, inputs: InputsType, batch_size: int = 1): + + def load_image(input_): + img1 = imread(input_[0]) + img2 = imread(input_[1]) + text = input_[2] + if img1 is None: + raise ValueError(f'Failed to read image {input_[0]}.') + if img2 is None: + raise ValueError(f'Failed to read image {input_[1]}.') + return dict( + img=[img1, img2], + img_shape=[img1.shape[:2], img2.shape[:2]], + ori_shape=[img1.shape[:2], img2.shape[:2]], + text=text, + ) + + pipeline = Compose([load_image, self.pipeline]) + + chunked_data = self._get_chunk_data(map(pipeline, inputs), batch_size) + yield from map(default_collate, chunked_data) + + def postprocess(self, + preds: List[DataSample], + visualization: List[np.ndarray], + return_datasamples=False) -> dict: + if return_datasamples: + return preds + + results = [] + for data_sample in preds: + pred_scores = data_sample.pred_score + pred_score = float(torch.max(pred_scores).item()) + pred_label = torch.argmax(pred_scores).item() + result = { + 'pred_scores': pred_scores.detach().cpu().numpy(), + 'pred_label': pred_label, + 'pred_score': pred_score, + } + results.append(result) + + return results + + @staticmethod + def list_models(pattern: Optional[str] = None): + """List all available model names. + + Args: + pattern (str | None): A wildcard pattern to match model names. + + Returns: + List[str]: a list of model names. + """ + return list_models(pattern=pattern, task='NLVR') diff --git a/mmpretrain/apis/utils.py b/mmpretrain/apis/utils.py new file mode 100644 index 0000000..83e763254 --- /dev/null +++ b/mmpretrain/apis/utils.py @@ -0,0 +1,270 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +from collections import defaultdict +from contextlib import contextmanager +from itertools import chain +from typing import Dict, List, Optional, Union + +import torch +import torch.nn as nn + +from mmpretrain.utils import require + + +@require('torch>=1.9.0', 'https://pytorch.org/get-started/locally/') +@require('accelerate') +def dispatch_model( + model, + device_map: Union[str, dict], + max_memory: Optional[dict] = None, + no_split_module_classes: Optional[List[str]] = None, + offload_folder: str = None, + offload_buffers: bool = False, + preload_module_classes: Optional[List[str]] = None, +): + """Split and dispatch a model across devices. + + The function depends on the `accelerate` package. Refers to + https://huggingface.co/docs/accelerate/main/en/usage_guides/big_modeling + + Args: + model (torch.nn.Module): The model to dispatch. + device_map (str | dict | None): A map that specifies where each + submodule should go. It doesn't need to be refined to each + parameter/buffer name, once a given module name is inside, every + submodule of it will be sent to the same device. You can use + `device_map="auto"` to automatically generate the device map. + Defaults to None. + max_memory (dict | None): A dictionary device identifier to maximum + memory. Will default to the maximum memory available for each GPU + and the available CPU RAM if unset. Defaults to None. + no_split_module_classes (List[str] | None): A list of layer class names + that should never be split across device (for instance any layer + that has a residual connection). If None, try to get the settings + from the model class. Defaults to None. + offload_folder (str | None): If the `device_map` contains any value + `"disk"`, the folder where we will offload weights. + offload_buffers (bool): In the layers that are offloaded on the CPU + or the hard drive, whether or not to offload the buffers as + well as the parameters. Defaults to False. + preload_module_classes (List[str] | None): A list of classes whose + instances should load all their weights (even in the submodules) at + the beginning of the forward. This should only be used for classes + that have submodules which are registered but not called directly + during the forward, for instance if a `dense` linear layer is + registered, but at forward, `dense.weight` and `dense.bias` are + used in some operations instead of calling `dense` directly. + Defaults to None. + """ + from accelerate import dispatch_model, infer_auto_device_map + + # Check valid device_map string. + valid_map_option = ['auto', 'balanced', 'balanced_low_0', 'sequential'] + if isinstance(device_map, str) and device_map not in valid_map_option: + raise ValueError('If passing a string for `device_map`, please choose ' + f'from {valid_map_option}.') + + # Generate device map automatically + if isinstance(device_map, str): + if no_split_module_classes is None: + no_split_module_classes = getattr(model, '_no_split_modules', None) + if no_split_module_classes is None: + raise ValueError(f'{model.__class__.__name__} does not support ' + f"`device_map='{device_map}'` yet.") + + if device_map != 'sequential': + from accelerate.utils import get_balanced_memory + max_memory = get_balanced_memory( + model, + max_memory=max_memory, + no_split_module_classes=no_split_module_classes, + dtype=None, + low_zero=(device_map == 'balanced_low_0'), + ) + max_memory[0] *= 0.9 + device_map = infer_auto_device_map( + model, + max_memory=max_memory, + no_split_module_classes=no_split_module_classes, + dtype=None, + ) + + if 'disk' in device_map.values(): + if offload_folder is None: + raise ValueError( + 'The current `device_map` had weights offloaded to the disk. ' + 'Please provide an `offload_folder` for them.') + os.makedirs(offload_folder, exist_ok=True) + + main_device = next( + (d for d in device_map.values() if d not in ['cpu', 'disk']), 'cpu') + + model = dispatch_model( + model, + device_map=device_map, + main_device=main_device, + offload_dir=offload_folder, + offload_buffers=offload_buffers, + preload_module_classes=preload_module_classes, + ) + if hasattr(model, 'data_preprocessor'): + model.data_preprocessor._device = torch.device(main_device) + return model + + +@contextmanager +def init_empty_weights(include_buffers: bool = False): + """A context manager under which models are initialized with all parameters + on the meta device. + + With this context manager, we can create an empty model. Useful when just + initializing the model would blow the available RAM. + + Besides move the parameters to meta device, this method will also avoid + load checkpoint from `mmengine.runner.load_checkpoint` and + `transformers.PreTrainedModel.from_pretrained`. + + Modified from https://github.com/huggingface/accelerate + + Args: + include_buffers (bool): Whether put all buffers on the meta device + during initialization. + """ + device = torch.device('meta') + + # move parameter and buffer to meta device + old_register_parameter = nn.Module.register_parameter + if include_buffers: + old_register_buffer = nn.Module.register_buffer + # See https://github.com/huggingface/accelerate/pull/699 + tensor_constructors_to_patch = { + torch_function_name: getattr(torch, torch_function_name) + for torch_function_name in ['empty', 'zeros', 'ones', 'full'] + } + + def register_parameter(module, name, param): + old_register_parameter(module, name, param) + if param is not None: + param_cls = type(module._parameters[name]) + kwargs = module._parameters[name].__dict__ + module._parameters[name] = param_cls( + module._parameters[name].to(device), **kwargs) + + def register_buffer(module, name, buffer, *args, **kwargs): + old_register_buffer(module, name, buffer, *args, **kwargs) + if buffer is not None: + module._buffers[name] = module._buffers[name].to(device) + + def patch_tensor_constructor(fn): + + def wrapper(*args, **kwargs): + kwargs['device'] = device + return fn(*args, **kwargs) + + return wrapper + + # Patch load_checkpoint + import mmengine.runner.checkpoint as mmengine_load + old_load_checkpoint = mmengine_load.load_checkpoint + + def patch_load_checkpoint(*args, **kwargs): + return {} + + # Patch transformers from pretrained + try: + from transformers import PreTrainedModel + from transformers.models.auto.auto_factory import (AutoConfig, + _BaseAutoModelClass) + with_transformers = True + except ImportError: + with_transformers = False + + @classmethod + def patch_auto_model(cls, pretrained_model_name_or_path, *model_args, + **kwargs): + cfg = AutoConfig.from_pretrained(pretrained_model_name_or_path, + *model_args, **kwargs) + return cls.from_config(cfg) + + @classmethod + def patch_pretrained_model(cls, pretrained_model_name_or_path, *model_args, + **kwargs): + cfg = cls.config_class.from_pretrained(pretrained_model_name_or_path, + *model_args, **kwargs) + return cls(cfg) + + if with_transformers: + old_pretrained_model = PreTrainedModel.from_pretrained + old_auto_model = _BaseAutoModelClass.from_pretrained + + try: + nn.Module.register_parameter = register_parameter + mmengine_load.load_checkpoint = patch_load_checkpoint + if with_transformers: + PreTrainedModel.from_pretrained = patch_pretrained_model + _BaseAutoModelClass.from_pretrained = patch_auto_model + if include_buffers: + nn.Module.register_buffer = register_buffer + for func in tensor_constructors_to_patch.keys(): + tensor_constructor = patch_tensor_constructor( + getattr(torch, func)) + setattr(torch, func, tensor_constructor) + yield + finally: + nn.Module.register_parameter = old_register_parameter + mmengine_load.load_checkpoint = old_load_checkpoint + if with_transformers: + PreTrainedModel.from_pretrained = old_pretrained_model + _BaseAutoModelClass.from_pretrained = old_auto_model + if include_buffers: + nn.Module.register_buffer = old_register_buffer + for func, ori in tensor_constructors_to_patch.items(): + setattr(torch, func, ori) + + +def compute_module_sizes( + model: nn.Module, + dtype: Union[str, torch.dtype, None] = None, + special_dtypes: Optional[Dict[str, Union[str, torch.dtype]]] = None): + """Compute the size of each submodule of a given model.""" + + def get_dtype(dtype): + if isinstance(dtype, str): + dtype = getattr(torch, dtype) + if dtype is not None: + assert issubclass(dtype, torch.dtype) + return dtype + + def dtype_bytes(dtype: torch.dtype): + if dtype is torch.bool: + return 1 + if dtype.is_floating_point: + return torch.finfo(dtype).bits / 8 + else: + return torch.iinfo(dtype).bits / 8 + + if dtype is not None: + dtype = get_dtype(dtype) + dtype_size = dtype_bytes(dtype) + + if special_dtypes is not None: + special_dtypes = { + key: dtype_bytes(dtype) + for key, dtype in special_dtypes.items() + } + + module_sizes = defaultdict(int) + for name, tensor in chain( + model.named_parameters(recurse=True), + model.named_buffers(recurse=True)): + if special_dtypes is not None and name in special_dtypes: + size = tensor.numel() * special_dtypes[name] + elif dtype is None: + size = tensor.numel() * tensor.element_size() + else: + size = tensor.numel() * min(dtype_size, tensor.element_size()) + name_parts = name.split('.') + for idx in range(len(name_parts) + 1): + module_sizes['.'.join(name_parts[:idx])] += size + + return module_sizes diff --git a/mmpretrain/apis/visual_grounding.py b/mmpretrain/apis/visual_grounding.py new file mode 100644 index 0000000..0153d56 --- /dev/null +++ b/mmpretrain/apis/visual_grounding.py @@ -0,0 +1,182 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from pathlib import Path +from typing import Callable, List, Optional, Union + +import numpy as np +from mmcv.image import imread +from mmengine.config import Config +from mmengine.dataset import Compose, default_collate + +from mmpretrain.registry import TRANSFORMS +from mmpretrain.structures import DataSample +from .base import BaseInferencer +from .model import list_models + + +class VisualGroundingInferencer(BaseInferencer): + """The inferencer for visual grounding. + + Args: + model (BaseModel | str | Config): A model name or a path to the config + file, or a :obj:`BaseModel` object. The model name can be found + by ``VisualGroundingInferencer.list_models()`` and you can also + query it in :doc:`/modelzoo_statistics`. + pretrained (str, optional): Path to the checkpoint. If None, it will + try to find a pre-defined weight from the model you specified + (only work if the ``model`` is a model name). Defaults to None. + device (str, optional): Device to run inference. If None, the available + device will be automatically used. Defaults to None. + **kwargs: Other keyword arguments to initialize the model (only work if + the ``model`` is a model name). + + Example: + >>> from mmpretrain import VisualGroundingInferencer + >>> inferencer = VisualGroundingInferencer('ofa-base_3rdparty_refcoco') + >>> inferencer('demo/cat-dog.png', 'dog')[0] + {'pred_bboxes': tensor([[ 36.6000, 29.6000, 355.8000, 395.2000]])} + """ # noqa: E501 + + visualize_kwargs: set = { + 'resize', 'show', 'show_dir', 'wait_time', 'line_width', 'bbox_color' + } + + def __call__(self, + images: Union[str, np.ndarray, list], + texts: Union[str, list], + return_datasamples: bool = False, + batch_size: int = 1, + **kwargs) -> dict: + """Call the inferencer. + + Args: + images (str | array | list): The image path or array, or a list of + images. + texts (str | list): The text to do visual grounding. + return_datasamples (bool): Whether to return results as + :obj:`DataSample`. Defaults to False. + batch_size (int): Batch size. Defaults to 1. + resize (int, optional): Resize the short edge of the image to the + specified length before visualization. Defaults to None. + draw_score (bool): Whether to draw the prediction scores + of prediction categories. Defaults to True. + show (bool): Whether to display the visualization result in a + window. Defaults to False. + wait_time (float): The display time (s). Defaults to 0, which means + "forever". + show_dir (str, optional): If not None, save the visualization + results in the specified directory. Defaults to None. + line_width (int): The line width of the bbox. Defaults to 3. + bbox_color (str | tuple): The color of the bbox. + Defaults to 'green'. + + Returns: + list: The inference results. + """ + if not isinstance(images, (list, tuple)): + assert isinstance(texts, str) + inputs = [{'img': images, 'text': texts}] + else: + inputs = [] + for i in range(len(images)): + input_ = {'img': images[i], 'text': texts[i]} + inputs.append(input_) + + return super().__call__(inputs, return_datasamples, batch_size, + **kwargs) + + def _init_pipeline(self, cfg: Config) -> Callable: + test_pipeline_cfg = cfg.test_dataloader.dataset.pipeline + from mmpretrain.datasets import remove_transform + + # Image loading is finished in `self.preprocess`. + test_pipeline_cfg = remove_transform(test_pipeline_cfg, + 'LoadImageFromFile') + test_pipeline = Compose( + [TRANSFORMS.build(t) for t in test_pipeline_cfg]) + return test_pipeline + + def preprocess(self, inputs: List[dict], batch_size: int = 1): + + def load_image(input_: dict): + img = imread(input_['img']) + if img is None: + raise ValueError(f'Failed to read image {input_}.') + return {**input_, 'img': img} + + pipeline = Compose([load_image, self.pipeline]) + + chunked_data = self._get_chunk_data(map(pipeline, inputs), batch_size) + yield from map(default_collate, chunked_data) + + def visualize(self, + ori_inputs: List[dict], + preds: List[DataSample], + show: bool = False, + wait_time: int = 0, + resize: Optional[int] = None, + line_width: int = 3, + bbox_color: Union[str, tuple] = 'green', + show_dir=None): + if not show and show_dir is None: + return None + + if self.visualizer is None: + from mmpretrain.visualization import UniversalVisualizer + self.visualizer = UniversalVisualizer() + + visualization = [] + for i, (input_, data_sample) in enumerate(zip(ori_inputs, preds)): + image = imread(input_['img']) + if isinstance(input_['img'], str): + # The image loaded from path is BGR format. + image = image[..., ::-1] + name = Path(input_['img']).stem + else: + name = str(i) + + if show_dir is not None: + show_dir = Path(show_dir) + show_dir.mkdir(exist_ok=True) + out_file = str((show_dir / name).with_suffix('.png')) + else: + out_file = None + + self.visualizer.visualize_visual_grounding( + image, + data_sample, + resize=resize, + show=show, + wait_time=wait_time, + line_width=line_width, + bbox_color=bbox_color, + name=name, + out_file=out_file) + visualization.append(self.visualizer.get_image()) + if show: + self.visualizer.close() + return visualization + + def postprocess(self, + preds: List[DataSample], + visualization: List[np.ndarray], + return_datasamples=False) -> dict: + if return_datasamples: + return preds + + results = [] + for data_sample in preds: + results.append({'pred_bboxes': data_sample.get('pred_bboxes')}) + + return results + + @staticmethod + def list_models(pattern: Optional[str] = None): + """List all available model names. + + Args: + pattern (str | None): A wildcard pattern to match model names. + + Returns: + List[str]: a list of model names. + """ + return list_models(pattern=pattern, task='Visual Grounding') diff --git a/mmpretrain/apis/visual_question_answering.py b/mmpretrain/apis/visual_question_answering.py new file mode 100644 index 0000000..616e1ed --- /dev/null +++ b/mmpretrain/apis/visual_question_answering.py @@ -0,0 +1,183 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from pathlib import Path +from typing import Callable, List, Optional, Union + +import numpy as np +from mmcv.image import imread +from mmengine.config import Config +from mmengine.dataset import Compose, default_collate + +from mmpretrain.registry import TRANSFORMS +from mmpretrain.structures import DataSample +from .base import BaseInferencer +from .model import list_models + + +class VisualQuestionAnsweringInferencer(BaseInferencer): + """The inferencer for visual question answering. + + Args: + model (BaseModel | str | Config): A model name or a path to the config + file, or a :obj:`BaseModel` object. The model name can be found + by ``VisualQuestionAnsweringInferencer.list_models()`` and you can + also query it in :doc:`/modelzoo_statistics`. + pretrained (str, optional): Path to the checkpoint. If None, it will + try to find a pre-defined weight from the model you specified + (only work if the ``model`` is a model name). Defaults to None. + device (str, optional): Device to run inference. If None, the available + device will be automatically used. Defaults to None. + **kwargs: Other keyword arguments to initialize the model (only work if + the ``model`` is a model name). + + Example: + >>> from mmpretrain import VisualQuestionAnsweringInferencer + >>> inferencer = VisualQuestionAnsweringInferencer('ofa-base_3rdparty-zeroshot_vqa') + >>> inferencer('demo/cat-dog.png', "What's the animal next to the dog?")[0] + {'question': "What's the animal next to the dog?", 'pred_answer': 'cat'} + """ # noqa: E501 + + visualize_kwargs: set = {'resize', 'show', 'show_dir', 'wait_time'} + + def __call__(self, + images: Union[str, np.ndarray, list], + questions: Union[str, list], + return_datasamples: bool = False, + batch_size: int = 1, + objects: Optional[List[str]] = None, + **kwargs) -> dict: + """Call the inferencer. + + Args: + images (str | array | list): The image path or array, or a list of + images. + questions (str | list): The question to the correspondding image. + return_datasamples (bool): Whether to return results as + :obj:`DataSample`. Defaults to False. + batch_size (int): Batch size. Defaults to 1. + objects (List[List[str]], optional): Some algorithms like OFA + fine-tuned VQA models requires extra object description list + for every image. Defaults to None. + resize (int, optional): Resize the short edge of the image to the + specified length before visualization. Defaults to None. + show (bool): Whether to display the visualization result in a + window. Defaults to False. + wait_time (float): The display time (s). Defaults to 0, which means + "forever". + show_dir (str, optional): If not None, save the visualization + results in the specified directory. Defaults to None. + + Returns: + list: The inference results. + """ + if not isinstance(images, (list, tuple)): + assert isinstance(questions, str) + inputs = [{'img': images, 'question': questions}] + if objects is not None: + assert isinstance(objects[0], str) + inputs[0]['objects'] = objects + else: + inputs = [] + for i in range(len(images)): + input_ = {'img': images[i], 'question': questions[i]} + if objects is not None: + input_['objects'] = objects[i] + inputs.append(input_) + + return super().__call__(inputs, return_datasamples, batch_size, + **kwargs) + + def _init_pipeline(self, cfg: Config) -> Callable: + test_pipeline_cfg = cfg.test_dataloader.dataset.pipeline + from mmpretrain.datasets import remove_transform + + # Image loading is finished in `self.preprocess`. + test_pipeline_cfg = remove_transform(test_pipeline_cfg, + 'LoadImageFromFile') + test_pipeline = Compose( + [TRANSFORMS.build(t) for t in test_pipeline_cfg]) + return test_pipeline + + def preprocess(self, inputs: List[dict], batch_size: int = 1): + + def load_image(input_: dict): + img = imread(input_['img']) + if img is None: + raise ValueError(f'Failed to read image {input_}.') + return {**input_, 'img': img} + + pipeline = Compose([load_image, self.pipeline]) + + chunked_data = self._get_chunk_data(map(pipeline, inputs), batch_size) + yield from map(default_collate, chunked_data) + + def visualize(self, + ori_inputs: List[dict], + preds: List[DataSample], + show: bool = False, + wait_time: int = 0, + resize: Optional[int] = None, + show_dir=None): + if not show and show_dir is None: + return None + + if self.visualizer is None: + from mmpretrain.visualization import UniversalVisualizer + self.visualizer = UniversalVisualizer() + + visualization = [] + for i, (input_, data_sample) in enumerate(zip(ori_inputs, preds)): + image = imread(input_['img']) + if isinstance(input_['img'], str): + # The image loaded from path is BGR format. + image = image[..., ::-1] + name = Path(input_['img']).stem + else: + name = str(i) + + if show_dir is not None: + show_dir = Path(show_dir) + show_dir.mkdir(exist_ok=True) + out_file = str((show_dir / name).with_suffix('.png')) + else: + out_file = None + + self.visualizer.visualize_vqa( + image, + data_sample, + resize=resize, + show=show, + wait_time=wait_time, + name=name, + out_file=out_file) + visualization.append(self.visualizer.get_image()) + if show: + self.visualizer.close() + return visualization + + def postprocess(self, + preds: List[DataSample], + visualization: List[np.ndarray], + return_datasamples=False) -> dict: + if return_datasamples: + return preds + + results = [] + for data_sample in preds: + results.append({ + 'question': data_sample.get('question'), + 'pred_answer': data_sample.get('pred_answer'), + }) + + return results + + @staticmethod + def list_models(pattern: Optional[str] = None): + """List all available model names. + + Args: + pattern (str | None): A wildcard pattern to match model names. + + Returns: + List[str]: a list of model names. + """ + return list_models(pattern=pattern, task='Visual Question Answering') diff --git a/mmpretrain/configs/_base_/datasets/cifar10_bs16.py b/mmpretrain/configs/_base_/datasets/cifar10_bs16.py new file mode 100644 index 0000000..3737dbe --- /dev/null +++ b/mmpretrain/configs/_base_/datasets/cifar10_bs16.py @@ -0,0 +1,52 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.dataset import DefaultSampler + +from mmpretrain.datasets import CIFAR10, PackInputs, RandomCrop, RandomFlip +from mmpretrain.evaluation import Accuracy + +# dataset settings +dataset_type = CIFAR10 +data_preprocessor = dict( + num_classes=10, + # RGB format normalization parameters + mean=[125.307, 122.961, 113.8575], + std=[51.5865, 50.847, 51.255], + # loaded images are already RGB format + to_rgb=False) + +train_pipeline = [ + dict(type=RandomCrop, crop_size=32, padding=4), + dict(type=RandomFlip, prob=0.5, direction='horizontal'), + dict(type=PackInputs), +] + +test_pipeline = [ + dict(type=PackInputs), +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + dataset=dict( + type=dataset_type, + data_root='data/cifar10', + split='train', + pipeline=train_pipeline), + sampler=dict(type=DefaultSampler, shuffle=True), +) + +val_dataloader = dict( + batch_size=16, + num_workers=2, + dataset=dict( + type=dataset_type, + data_root='data/cifar10/', + split='test', + pipeline=test_pipeline), + sampler=dict(type=DefaultSampler, shuffle=False), +) +val_evaluator = dict(type=Accuracy, topk=(1, )) + +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/mmpretrain/configs/_base_/datasets/cub_bs8_384.py b/mmpretrain/configs/_base_/datasets/cub_bs8_384.py new file mode 100644 index 0000000..b193bf8 --- /dev/null +++ b/mmpretrain/configs/_base_/datasets/cub_bs8_384.py @@ -0,0 +1,59 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.dataset import DefaultSampler + +from mmpretrain.datasets import (CUB, CenterCrop, LoadImageFromFile, + PackInputs, RandomCrop, RandomFlip, Resize) +from mmpretrain.evaluation import Accuracy + +# dataset settings +dataset_type = CUB +data_preprocessor = dict( + num_classes=200, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type=LoadImageFromFile), + dict(type=Resize, scale=510), + dict(type=RandomCrop, crop_size=384), + dict(type=RandomFlip, prob=0.5, direction='horizontal'), + dict(type=PackInputs), +] + +test_pipeline = [ + dict(type=LoadImageFromFile), + dict(type=Resize, scale=510), + dict(type=CenterCrop, crop_size=384), + dict(type=PackInputs), +] + +train_dataloader = dict( + batch_size=8, + num_workers=2, + dataset=dict( + type=dataset_type, + data_root='data/CUB_200_2011', + split='train', + pipeline=train_pipeline), + sampler=dict(type=DefaultSampler, shuffle=True), +) + +val_dataloader = dict( + batch_size=8, + num_workers=2, + dataset=dict( + type=dataset_type, + data_root='data/CUB_200_2011', + split='test', + pipeline=test_pipeline), + sampler=dict(type=DefaultSampler, shuffle=False), +) +val_evaluator = dict(type=Accuracy, topk=(1, )) + +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/mmpretrain/configs/_base_/datasets/imagenet21k_bs128.py b/mmpretrain/configs/_base_/datasets/imagenet21k_bs128.py new file mode 100644 index 0000000..11c4c0a --- /dev/null +++ b/mmpretrain/configs/_base_/datasets/imagenet21k_bs128.py @@ -0,0 +1,35 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.dataset import DefaultSampler + +from mmpretrain.datasets import (ImageNet21k, LoadImageFromFile, PackInputs, + RandomFlip, RandomResizedCrop) + +# dataset settings +dataset_type = ImageNet21k +data_preprocessor = dict( + num_classes=21842, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type=LoadImageFromFile), + dict(type=RandomResizedCrop, scale=224), + dict(type=RandomFlip, prob=0.5, direction='horizontal'), + dict(type=PackInputs), +] + +train_dataloader = dict( + batch_size=128, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet21k', + split='train', + pipeline=train_pipeline), + sampler=dict(type=DefaultSampler, shuffle=True), +) diff --git a/mmpretrain/configs/_base_/datasets/imagenet_bs128_mbv3.py b/mmpretrain/configs/_base_/datasets/imagenet_bs128_mbv3.py new file mode 100644 index 0000000..cf0aa62 --- /dev/null +++ b/mmpretrain/configs/_base_/datasets/imagenet_bs128_mbv3.py @@ -0,0 +1,75 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.dataset import DefaultSampler + +from mmpretrain.datasets import (AutoAugment, CenterCrop, ImageNet, + LoadImageFromFile, PackInputs, RandomErasing, + RandomFlip, RandomResizedCrop, ResizeEdge) +from mmpretrain.evaluation import Accuracy + +# dataset settings +dataset_type = ImageNet +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type=LoadImageFromFile), + dict(type=RandomResizedCrop, scale=224, backend='pillow'), + dict(type=RandomFlip, prob=0.5, direction='horizontal'), + dict( + type=AutoAugment, + policies='imagenet', + hparams=dict(pad_val=[round(x) for x in bgr_mean])), + dict( + type=RandomErasing, + erase_prob=0.2, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + dict(type=PackInputs), +] + +test_pipeline = [ + dict(type=LoadImageFromFile), + dict(type=ResizeEdge, scale=256, edge='short', backend='pillow'), + dict(type=CenterCrop, crop_size=224), + dict(type=PackInputs), +] + +train_dataloader = dict( + batch_size=128, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type=DefaultSampler, shuffle=True), +) + +val_dataloader = dict( + batch_size=128, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type=DefaultSampler, shuffle=False), +) +val_evaluator = dict(type=Accuracy, topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/mmpretrain/configs/_base_/datasets/imagenet_bs256_beitv2.py b/mmpretrain/configs/_base_/datasets/imagenet_bs256_beitv2.py new file mode 100644 index 0000000..f89eb17 --- /dev/null +++ b/mmpretrain/configs/_base_/datasets/imagenet_bs256_beitv2.py @@ -0,0 +1,53 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.dataset import DefaultSampler, default_collate + +from mmpretrain.datasets import (BEiTMaskGenerator, ColorJitter, ImageNet, + LoadImageFromFile, PackInputs, RandomFlip, + RandomResizedCropAndInterpolationWithTwoPic) +from mmpretrain.models import TwoNormDataPreprocessor + +dataset_type = ImageNet +data_root = 'data/imagenet/' + +data_preprocessor = dict( + type=TwoNormDataPreprocessor, + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + second_mean=[127.5, 127.5, 127.5], + second_std=[127.5, 127.5, 127.5], + to_rgb=True) + +train_pipeline = [ + dict(type=LoadImageFromFile), + dict( + type=ColorJitter, brightness=0.4, contrast=0.4, saturation=0.4, + hue=0.), + dict(type=RandomFlip, prob=0.5, direction='horizontal'), + dict( + type=RandomResizedCropAndInterpolationWithTwoPic, + size=224, + second_size=224, + interpolation='bicubic', + second_interpolation='bicubic', + scale=(0.2, 1.0)), + dict( + type=BEiTMaskGenerator, + input_size=(14, 14), + num_masking_patches=75, + max_num_patches=75, + min_num_patches=16), + dict(type=PackInputs) +] + +train_dataloader = dict( + batch_size=256, + num_workers=8, + persistent_workers=True, + sampler=dict(type=DefaultSampler, shuffle=True), + collate_fn=dict(type=default_collate), + dataset=dict( + type=dataset_type, + data_root=data_root, + split='train', + pipeline=train_pipeline)) diff --git a/mmpretrain/configs/_base_/datasets/imagenet_bs32.py b/mmpretrain/configs/_base_/datasets/imagenet_bs32.py new file mode 100644 index 0000000..7d07400 --- /dev/null +++ b/mmpretrain/configs/_base_/datasets/imagenet_bs32.py @@ -0,0 +1,62 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.dataset import DefaultSampler + +from mmpretrain.datasets import (CenterCrop, ImageNet, LoadImageFromFile, + PackInputs, RandomFlip, RandomResizedCrop, + ResizeEdge) +from mmpretrain.evaluation import Accuracy + +# dataset settings +dataset_type = ImageNet +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type=LoadImageFromFile), + dict(type=RandomResizedCrop, scale=224), + dict(type=RandomFlip, prob=0.5, direction='horizontal'), + dict(type=PackInputs), +] + +test_pipeline = [ + dict(type=LoadImageFromFile), + dict(type=ResizeEdge, scale=256, edge='short'), + dict(type=CenterCrop, crop_size=224), + dict(type=PackInputs), +] + +train_dataloader = dict( + batch_size=32, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + ann_file='meta/train.txt', + data_prefix='train', + pipeline=train_pipeline), + sampler=dict(type=DefaultSampler, shuffle=True), +) + +val_dataloader = dict( + batch_size=32, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + ann_file='meta/val.txt', + data_prefix='val', + pipeline=test_pipeline), + sampler=dict(type=DefaultSampler, shuffle=False), +) +val_evaluator = dict(type=Accuracy, topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/mmpretrain/configs/_base_/datasets/imagenet_bs32_pil_resize.py b/mmpretrain/configs/_base_/datasets/imagenet_bs32_pil_resize.py new file mode 100644 index 0000000..f911bc2 --- /dev/null +++ b/mmpretrain/configs/_base_/datasets/imagenet_bs32_pil_resize.py @@ -0,0 +1,60 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.dataset import DefaultSampler + +from mmpretrain.datasets import (CenterCrop, ImageNet, LoadImageFromFile, + PackInputs, RandomFlip, RandomResizedCrop, + ResizeEdge) +from mmpretrain.evaluation import Accuracy + +# dataset settings +dataset_type = ImageNet +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type=LoadImageFromFile), + dict(type=RandomResizedCrop, scale=224, backend='pillow'), + dict(type=RandomFlip, prob=0.5, direction='horizontal'), + dict(type=PackInputs), +] + +test_pipeline = [ + dict(type=LoadImageFromFile), + dict(type=ResizeEdge, scale=256, edge='short', backend='pillow'), + dict(type=CenterCrop, crop_size=224), + dict(type=PackInputs), +] + +train_dataloader = dict( + batch_size=32, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type=DefaultSampler, shuffle=True), +) + +val_dataloader = dict( + batch_size=32, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type=DefaultSampler, shuffle=False), +) +val_evaluator = dict(type=Accuracy, topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/mmpretrain/configs/_base_/datasets/imagenet_bs32_simclr.py b/mmpretrain/configs/_base_/datasets/imagenet_bs32_simclr.py new file mode 100644 index 0000000..29b698f --- /dev/null +++ b/mmpretrain/configs/_base_/datasets/imagenet_bs32_simclr.py @@ -0,0 +1,63 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmcv.transforms import (LoadImageFromFile, RandomApply, RandomFlip, + RandomGrayscale) +from mmengine.dataset import DefaultSampler, default_collate + +from mmpretrain.datasets import (ColorJitter, GaussianBlur, ImageNet, + MultiView, PackInputs, RandomResizedCrop) +from mmpretrain.models import SelfSupDataPreprocessor + +# dataset settings +dataset_type = ImageNet +data_root = 'data/imagenet/' +data_preprocessor = dict( + type=SelfSupDataPreprocessor, + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True) + +view_pipeline = [ + dict(type=RandomResizedCrop, scale=224, backend='pillow'), + dict(type=RandomFlip, prob=0.5), + dict( + type=RandomApply, + transforms=[ + dict( + type=ColorJitter, + brightness=0.8, + contrast=0.8, + saturation=0.8, + hue=0.2) + ], + prob=0.8), + dict( + type=RandomGrayscale, + prob=0.2, + keep_channels=True, + channel_weights=(0.114, 0.587, 0.2989)), + dict( + type=GaussianBlur, + magnitude_range=(0.1, 2.0), + magnitude_std='inf', + prob=0.5), +] + +train_pipeline = [ + dict(type=LoadImageFromFile), + dict(type=MultiView, num_views=2, transforms=[view_pipeline]), + dict(type=PackInputs) +] + +train_dataloader = dict( + batch_size=32, + num_workers=4, + persistent_workers=True, + sampler=dict(type=DefaultSampler, shuffle=True), + collate_fn=dict(type=default_collate), + dataset=dict( + type=ImageNet, + data_root=data_root, + ann_file='meta/train.txt', + data_prefix=dict(img_path='train/'), + pipeline=train_pipeline)) diff --git a/mmpretrain/configs/_base_/datasets/imagenet_bs512_mae.py b/mmpretrain/configs/_base_/datasets/imagenet_bs512_mae.py new file mode 100644 index 0000000..017f5b7 --- /dev/null +++ b/mmpretrain/configs/_base_/datasets/imagenet_bs512_mae.py @@ -0,0 +1,40 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmcv.transforms import LoadImageFromFile, RandomFlip +from mmengine.dataset.sampler import DefaultSampler + +from mmpretrain.datasets import ImageNet, PackInputs, RandomResizedCrop +from mmpretrain.models import SelfSupDataPreprocessor + +# dataset settings +dataset_type = ImageNet +data_root = 'data/imagenet/' +data_preprocessor = dict( + type=SelfSupDataPreprocessor, + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True) + +train_pipeline = [ + dict(type=LoadImageFromFile), + dict( + type=RandomResizedCrop, + scale=224, + crop_ratio_range=(0.2, 1.0), + backend='pillow', + interpolation='bicubic'), + dict(type=RandomFlip, prob=0.5), + dict(type=PackInputs) +] + +train_dataloader = dict( + batch_size=512, + num_workers=8, + persistent_workers=True, + sampler=dict(type=DefaultSampler, shuffle=True), + collate_fn=dict(type='default_collate'), + dataset=dict( + type=dataset_type, + data_root=data_root, + split='train', + pipeline=train_pipeline)) diff --git a/mmpretrain/configs/_base_/datasets/imagenet_bs64_pil_resize.py b/mmpretrain/configs/_base_/datasets/imagenet_bs64_pil_resize.py new file mode 100644 index 0000000..a2d8aea --- /dev/null +++ b/mmpretrain/configs/_base_/datasets/imagenet_bs64_pil_resize.py @@ -0,0 +1,60 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.dataset import DefaultSampler + +from mmpretrain.datasets import (CenterCrop, ImageNet, LoadImageFromFile, + PackInputs, RandomFlip, RandomResizedCrop, + ResizeEdge) +from mmpretrain.evaluation import Accuracy + +# dataset settings +dataset_type = ImageNet +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type=LoadImageFromFile), + dict(type=RandomResizedCrop, scale=224, backend='pillow'), + dict(type=RandomFlip, prob=0.5, direction='horizontal'), + dict(type=PackInputs), +] + +test_pipeline = [ + dict(type=LoadImageFromFile), + dict(type=ResizeEdge, scale=256, edge='short', backend='pillow'), + dict(type=CenterCrop, crop_size=224), + dict(type=PackInputs), +] + +train_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type=DefaultSampler, shuffle=True), +) + +val_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type=DefaultSampler, shuffle=False), +) +val_evaluator = dict(type=Accuracy, topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/mmpretrain/configs/_base_/datasets/imagenet_bs64_pil_resize_autoaug.py b/mmpretrain/configs/_base_/datasets/imagenet_bs64_pil_resize_autoaug.py new file mode 100644 index 0000000..a5f0526 --- /dev/null +++ b/mmpretrain/configs/_base_/datasets/imagenet_bs64_pil_resize_autoaug.py @@ -0,0 +1,78 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.dataset import DefaultSampler + +from mmpretrain.datasets import (CenterCrop, ImageNet, LoadImageFromFile, + PackInputs, RandomFlip, RandomResizedCrop, + ResizeEdge) +from mmpretrain.datasets.transforms import AutoAugment +from mmpretrain.evaluation import Accuracy + +# dataset settings +dataset_type = ImageNet +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type=LoadImageFromFile), + dict( + type=RandomResizedCrop, + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type=RandomFlip, prob=0.5, direction='horizontal'), + dict( + type=AutoAugment, + policies='imagenet', + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict(type=PackInputs), +] + +test_pipeline = [ + dict(type=LoadImageFromFile), + dict( + type=ResizeEdge, + scale=256, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type=CenterCrop, crop_size=224), + dict(type=PackInputs), +] + +train_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type=DefaultSampler, shuffle=True), +) + +val_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type=DefaultSampler, shuffle=False), +) +val_evaluator = dict(type=Accuracy, topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/mmpretrain/configs/_base_/datasets/imagenet_bs64_swin_224.py b/mmpretrain/configs/_base_/datasets/imagenet_bs64_swin_224.py new file mode 100644 index 0000000..5a38943 --- /dev/null +++ b/mmpretrain/configs/_base_/datasets/imagenet_bs64_swin_224.py @@ -0,0 +1,89 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.dataset import DefaultSampler + +from mmpretrain.datasets import (CenterCrop, ImageNet, LoadImageFromFile, + PackInputs, RandAugment, RandomErasing, + RandomFlip, RandomResizedCrop, ResizeEdge) +from mmpretrain.evaluation import Accuracy + +# dataset settings +dataset_type = ImageNet +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type=LoadImageFromFile), + dict( + type=RandomResizedCrop, + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type=RandomFlip, prob=0.5, direction='horizontal'), + dict( + type=RandAugment, + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict( + type=RandomErasing, + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + dict(type=PackInputs), +] + +test_pipeline = [ + dict(type=LoadImageFromFile), + dict( + type=ResizeEdge, + scale=256, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type=CenterCrop, crop_size=224), + dict(type=PackInputs), +] + +train_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type=DefaultSampler, shuffle=True), +) + +val_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type=DefaultSampler, shuffle=False), +) +val_evaluator = dict(type=Accuracy, topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/mmpretrain/configs/_base_/datasets/imagenet_bs64_swin_256.py b/mmpretrain/configs/_base_/datasets/imagenet_bs64_swin_256.py new file mode 100644 index 0000000..9690ff8 --- /dev/null +++ b/mmpretrain/configs/_base_/datasets/imagenet_bs64_swin_256.py @@ -0,0 +1,89 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.dataset import DefaultSampler + +from mmpretrain.datasets import (CenterCrop, ImageNet, LoadImageFromFile, + PackInputs, RandAugment, RandomErasing, + RandomFlip, RandomResizedCrop, ResizeEdge) +from mmpretrain.evaluation import Accuracy + +# dataset settings +dataset_type = ImageNet +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +bgr_mean = data_preprocessor['mean'][::-1] +bgr_std = data_preprocessor['std'][::-1] + +train_pipeline = [ + dict(type=LoadImageFromFile), + dict( + type=RandomResizedCrop, + scale=256, + backend='pillow', + interpolation='bicubic'), + dict(type=RandomFlip, prob=0.5, direction='horizontal'), + dict( + type=RandAugment, + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict( + pad_val=[round(x) for x in bgr_mean], interpolation='bicubic')), + dict( + type=RandomErasing, + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=bgr_mean, + fill_std=bgr_std), + dict(type=PackInputs), +] + +test_pipeline = [ + dict(type=LoadImageFromFile), + dict( + type=ResizeEdge, + scale=292, # ( 256 / 224 * 256 ) + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type=CenterCrop, crop_size=256), + dict(type=PackInputs), +] + +train_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='train', + pipeline=train_pipeline), + sampler=dict(type=DefaultSampler, shuffle=True), +) + +val_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + split='val', + pipeline=test_pipeline), + sampler=dict(type=DefaultSampler, shuffle=False), +) +val_evaluator = dict(type=Accuracy, topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/mmpretrain/configs/_base_/datasets/imagenet_bs64_swin_384.py b/mmpretrain/configs/_base_/datasets/imagenet_bs64_swin_384.py new file mode 100644 index 0000000..85aeb1e --- /dev/null +++ b/mmpretrain/configs/_base_/datasets/imagenet_bs64_swin_384.py @@ -0,0 +1,64 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.dataset import DefaultSampler + +from mmpretrain.datasets import (ImageNet, LoadImageFromFile, PackInputs, + RandomFlip, RandomResizedCrop, Resize) +from mmpretrain.evaluation import Accuracy + +# dataset settings +dataset_type = ImageNet +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type=LoadImageFromFile), + dict( + type=RandomResizedCrop, + scale=384, + backend='pillow', + interpolation='bicubic'), + dict(type=RandomFlip, prob=0.5, direction='horizontal'), + dict(type=PackInputs), +] + +test_pipeline = [ + dict(type=LoadImageFromFile), + dict(type=Resize, scale=384, backend='pillow', interpolation='bicubic'), + dict(type=PackInputs), +] + +train_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + ann_file='meta/train.txt', + data_prefix='train', + pipeline=train_pipeline), + sampler=dict(type=DefaultSampler, shuffle=True), +) + +val_dataloader = dict( + batch_size=64, + num_workers=5, + dataset=dict( + type=dataset_type, + data_root='data/imagenet', + ann_file='meta/val.txt', + data_prefix='val', + pipeline=test_pipeline), + sampler=dict(type=DefaultSampler, shuffle=False), +) +val_evaluator = dict(type=Accuracy, topk=(1, 5)) + +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader +test_evaluator = val_evaluator diff --git a/mmpretrain/configs/_base_/default_runtime.py b/mmpretrain/configs/_base_/default_runtime.py new file mode 100644 index 0000000..b5c748e --- /dev/null +++ b/mmpretrain/configs/_base_/default_runtime.py @@ -0,0 +1,61 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook, + LoggerHook, ParamSchedulerHook) +from mmengine.visualization import LocalVisBackend + +from mmpretrain.engine.hooks import VisualizationHook +from mmpretrain.visualization import UniversalVisualizer + +# configure default hooks +default_hooks = dict( + # record the time of every iteration. + timer=dict(type=IterTimerHook), + + # print log every 100 iterations. + logger=dict(type=LoggerHook, interval=100), + + # enable the parameter scheduler. + param_scheduler=dict(type=ParamSchedulerHook), + + # save checkpoint per epoch. + checkpoint=dict(type=CheckpointHook, interval=1), + + # set sampler seed in distributed evrionment. + sampler_seed=dict(type=DistSamplerSeedHook), + + # validation results visualization, set True to enable it. + visualization=dict(type=VisualizationHook, enable=False), +) + +# configure environment +env_cfg = dict( + # whether to enable cudnn benchmark + cudnn_benchmark=False, + + # set multi process parameters + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + + # set distributed parameters + dist_cfg=dict(backend='nccl'), +) + +# set visualizer +vis_backends = [dict(type=LocalVisBackend)] +visualizer = dict(type=UniversalVisualizer, vis_backends=vis_backends) + +# set log level +log_level = 'INFO' + +# load from which checkpoint +load_from = None + +# whether to resume training from the loaded checkpoint +resume = False + +# Defaults to use random seed and disable `deterministic` +randomness = dict(seed=None, deterministic=False) + +# Do not need to specify default_scope with new config. Therefore set it to +# None to avoid BC-breaking. +default_scope = None diff --git a/mmpretrain/configs/_base_/models/convnext_base.py b/mmpretrain/configs/_base_/models/convnext_base.py new file mode 100644 index 0000000..6315b2f --- /dev/null +++ b/mmpretrain/configs/_base_/models/convnext_base.py @@ -0,0 +1,25 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.model import TruncNormalInit + +from mmpretrain.models import (ConvNeXt, CutMix, ImageClassifier, + LabelSmoothLoss, LinearClsHead, Mixup) + +# Model settings +model = dict( + type=ImageClassifier, + backbone=dict(type=ConvNeXt, arch='base', drop_path_rate=0.5), + head=dict( + type=LinearClsHead, + num_classes=1000, + in_channels=1024, + loss=dict(type=LabelSmoothLoss, label_smooth_val=0.1, mode='original'), + init_cfg=None, + ), + init_cfg=dict( + type=TruncNormalInit, layer=['Conv2d', 'Linear'], std=.02, bias=0.), + train_cfg=dict(augments=[ + dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0), + ]), +) diff --git a/mmpretrain/configs/_base_/models/mae_hivit_base_p16.py b/mmpretrain/configs/_base_/models/mae_hivit_base_p16.py new file mode 100644 index 0000000..975e16b --- /dev/null +++ b/mmpretrain/configs/_base_/models/mae_hivit_base_p16.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmpretrain.models import (MAE, MAEHiViT, MAEPretrainDecoder, + MAEPretrainHead, PixelReconstructionLoss) + +# model settings +model = dict( + type=MAE, + backbone=dict(type=MAEHiViT, patch_size=16, arch='base', mask_ratio=0.75), + neck=dict( + type=MAEPretrainDecoder, + patch_size=16, + in_chans=3, + embed_dim=512, + decoder_embed_dim=512, + decoder_depth=6, + decoder_num_heads=16, + mlp_ratio=4., + ), + head=dict( + type=MAEPretrainHead, + norm_pix=True, + patch_size=16, + loss=dict(type=PixelReconstructionLoss, criterion='L2')), + init_cfg=[ + dict(type='Xavier', layer='Linear', distribution='uniform'), + dict(type='Constant', layer='LayerNorm', val=1.0, bias=0.0) + ]) diff --git a/mmpretrain/configs/_base_/models/mae_vit_base_p16.py b/mmpretrain/configs/_base_/models/mae_vit_base_p16.py new file mode 100644 index 0000000..9347d1e --- /dev/null +++ b/mmpretrain/configs/_base_/models/mae_vit_base_p16.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmpretrain.models import (MAE, MAEPretrainDecoder, MAEPretrainHead, + MAEViT, PixelReconstructionLoss) + +# model settings +model = dict( + type=MAE, + backbone=dict(type=MAEViT, arch='b', patch_size=16, mask_ratio=0.75), + neck=dict( + type=MAEPretrainDecoder, + patch_size=16, + in_chans=3, + embed_dim=768, + decoder_embed_dim=512, + decoder_depth=8, + decoder_num_heads=16, + mlp_ratio=4., + ), + head=dict( + type=MAEPretrainHead, + norm_pix=True, + patch_size=16, + loss=dict(type=PixelReconstructionLoss, criterion='L2')), + init_cfg=[ + dict(type='Xavier', layer='Linear', distribution='uniform'), + dict(type='Constant', layer='LayerNorm', val=1.0, bias=0.0) + ]) diff --git a/mmpretrain/configs/_base_/models/mobilenet_v2_1x.py b/mmpretrain/configs/_base_/models/mobilenet_v2_1x.py new file mode 100644 index 0000000..17dbb9f --- /dev/null +++ b/mmpretrain/configs/_base_/models/mobilenet_v2_1x.py @@ -0,0 +1,17 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmpretrain.models import (CrossEntropyLoss, GlobalAveragePooling, + ImageClassifier, LinearClsHead, MobileNetV2) + +# model settings +model = dict( + type=ImageClassifier, + backbone=dict(type=MobileNetV2, widen_factor=1.0), + neck=dict(type=GlobalAveragePooling), + head=dict( + type=LinearClsHead, + num_classes=1000, + in_channels=1280, + loss=dict(type=CrossEntropyLoss, loss_weight=1.0), + topk=(1, 5), + )) diff --git a/mmpretrain/configs/_base_/models/mobilenet_v3_small.py b/mmpretrain/configs/_base_/models/mobilenet_v3_small.py new file mode 100644 index 0000000..83edab5 --- /dev/null +++ b/mmpretrain/configs/_base_/models/mobilenet_v3_small.py @@ -0,0 +1,25 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.model.weight_init import NormalInit +from torch.nn.modules.activation import Hardswish + +from mmpretrain.models import (CrossEntropyLoss, GlobalAveragePooling, + ImageClassifier, MobileNetV3, + StackedLinearClsHead) + +# model settings +model = dict( + type=ImageClassifier, + backbone=dict(type=MobileNetV3, arch='small'), + neck=dict(type=GlobalAveragePooling), + head=dict( + type=StackedLinearClsHead, + num_classes=1000, + in_channels=576, + mid_channels=[1024], + dropout_rate=0.2, + act_cfg=dict(type=Hardswish), + loss=dict(type=CrossEntropyLoss, loss_weight=1.0), + init_cfg=dict( + type=NormalInit, layer='Linear', mean=0., std=0.01, bias=0.), + topk=(1, 5))) diff --git a/mmpretrain/configs/_base_/models/resnet18.py b/mmpretrain/configs/_base_/models/resnet18.py new file mode 100644 index 0000000..30b8f65 --- /dev/null +++ b/mmpretrain/configs/_base_/models/resnet18.py @@ -0,0 +1,22 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmpretrain.models import (CrossEntropyLoss, GlobalAveragePooling, + ImageClassifier, LinearClsHead, ResNet) + +# model settings +model = dict( + type=ImageClassifier, + backbone=dict( + type=ResNet, + depth=18, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type=GlobalAveragePooling), + head=dict( + type=LinearClsHead, + num_classes=1000, + in_channels=512, + loss=dict(type=CrossEntropyLoss, loss_weight=1.0), + topk=(1, 5), + )) diff --git a/mmpretrain/configs/_base_/models/swin_transformer_base.py b/mmpretrain/configs/_base_/models/swin_transformer_base.py new file mode 100644 index 0000000..c73c254 --- /dev/null +++ b/mmpretrain/configs/_base_/models/swin_transformer_base.py @@ -0,0 +1,20 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmpretrain.models import (CrossEntropyLoss, GlobalAveragePooling, + ImageClassifier, LinearClsHead, SwinTransformer) + +# model settings +model = dict( + type=ImageClassifier, + backbone=dict( + type=SwinTransformer, + arch='base', + img_size=384, + stage_cfgs=dict(block_cfgs=dict(window_size=12))), + neck=dict(type=GlobalAveragePooling), + head=dict( + type=LinearClsHead, + num_classes=1000, + in_channels=1024, + loss=dict(type=CrossEntropyLoss, loss_weight=1.0), + topk=(1, 5))) diff --git a/mmpretrain/configs/_base_/models/swin_transformer_v2_base.py b/mmpretrain/configs/_base_/models/swin_transformer_v2_base.py new file mode 100644 index 0000000..c7566b5 --- /dev/null +++ b/mmpretrain/configs/_base_/models/swin_transformer_v2_base.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmpretrain.models import (GlobalAveragePooling, ImageClassifier, + LabelSmoothLoss, LinearClsHead, + SwinTransformerV2) + +# model settings +model = dict( + type=ImageClassifier, + backbone=dict( + type=SwinTransformerV2, arch='base', img_size=384, drop_path_rate=0.2), + neck=dict(type=GlobalAveragePooling), + head=dict( + type=LinearClsHead, + num_classes=1000, + in_channels=1024, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict(type=LabelSmoothLoss, label_smooth_val=0.1, mode='original'), + cal_acc=False)) diff --git a/mmpretrain/configs/_base_/models/vit_base_p16.py b/mmpretrain/configs/_base_/models/vit_base_p16.py new file mode 100644 index 0000000..326c50a --- /dev/null +++ b/mmpretrain/configs/_base_/models/vit_base_p16.py @@ -0,0 +1,31 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.model.weight_init import KaimingInit + +from mmpretrain.models import (ImageClassifier, LabelSmoothLoss, + VisionTransformer, VisionTransformerClsHead) + +# model settings +model = dict( + type=ImageClassifier, + backbone=dict( + type=VisionTransformer, + arch='b', + img_size=224, + patch_size=16, + drop_rate=0.1, + init_cfg=[ + dict( + type=KaimingInit, + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ]), + neck=None, + head=dict( + type=VisionTransformerClsHead, + num_classes=1000, + in_channels=768, + loss=dict( + type=LabelSmoothLoss, label_smooth_val=0.1, mode='classy_vision'), + )) diff --git a/mmpretrain/configs/_base_/schedules/cifar10_bs128.py b/mmpretrain/configs/_base_/schedules/cifar10_bs128.py new file mode 100644 index 0000000..8ab749e --- /dev/null +++ b/mmpretrain/configs/_base_/schedules/cifar10_bs128.py @@ -0,0 +1,20 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.optim import MultiStepLR +from torch.optim import SGD + +# optimizer +optim_wrapper = dict( + optimizer=dict(type=SGD, lr=0.1, momentum=0.9, weight_decay=0.0001)) +# learning policy +param_scheduler = dict( + type=MultiStepLR, by_epoch=True, milestones=[100, 150], gamma=0.1) + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=200, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=128) diff --git a/mmpretrain/configs/_base_/schedules/cub_bs64.py b/mmpretrain/configs/_base_/schedules/cub_bs64.py new file mode 100644 index 0000000..2ca40bf --- /dev/null +++ b/mmpretrain/configs/_base_/schedules/cub_bs64.py @@ -0,0 +1,39 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.optim import CosineAnnealingLR, LinearLR +from torch.optim import SGD + +# optimizer +optim_wrapper = dict( + optimizer=dict( + type=SGD, lr=0.01, momentum=0.9, weight_decay=0.0005, nesterov=True)) + +# learning policy +param_scheduler = [ + # warm up learning rate scheduler + dict( + type=LinearLR, + start_factor=0.01, + by_epoch=True, + begin=0, + end=5, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict( + type=CosineAnnealingLR, + T_max=95, + by_epoch=True, + begin=5, + end=100, + ) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=100, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=64) diff --git a/mmpretrain/configs/_base_/schedules/imagenet_bs1024_adamw_swin.py b/mmpretrain/configs/_base_/schedules/imagenet_bs1024_adamw_swin.py new file mode 100644 index 0000000..60ccaa0 --- /dev/null +++ b/mmpretrain/configs/_base_/schedules/imagenet_bs1024_adamw_swin.py @@ -0,0 +1,46 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.optim import CosineAnnealingLR, LinearLR +from torch.optim import AdamW + +# for batch in each gpu is 128, 8 gpu +# lr = 5e-4 * 128 * 8 / 512 = 0.001 +optim_wrapper = dict( + optimizer=dict( + type=AdamW, + lr=5e-4 * 1024 / 512, + weight_decay=0.05, + eps=1e-8, + betas=(0.9, 0.999)), + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + flat_decay_mult=0.0, + custom_keys={ + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0) + }), +) + +# learning policy +param_scheduler = [ + # warm up learning rate scheduler + dict( + type=LinearLR, + start_factor=1e-3, + by_epoch=True, + end=20, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict(type=CosineAnnealingLR, eta_min=1e-5, by_epoch=True, begin=20) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=300, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=1024) diff --git a/mmpretrain/configs/_base_/schedules/imagenet_bs256.py b/mmpretrain/configs/_base_/schedules/imagenet_bs256.py new file mode 100644 index 0000000..95afa2a --- /dev/null +++ b/mmpretrain/configs/_base_/schedules/imagenet_bs256.py @@ -0,0 +1,21 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.optim import MultiStepLR +from torch.optim import SGD + +# optimizer +optim_wrapper = dict( + optimizer=dict(type=SGD, lr=0.1, momentum=0.9, weight_decay=0.0001)) + +# learning policy +param_scheduler = dict( + type=MultiStepLR, by_epoch=True, milestones=[30, 60, 90], gamma=0.1) + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=100, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=256) diff --git a/mmpretrain/configs/_base_/schedules/imagenet_bs256_epochstep.py b/mmpretrain/configs/_base_/schedules/imagenet_bs256_epochstep.py new file mode 100644 index 0000000..9d245eb --- /dev/null +++ b/mmpretrain/configs/_base_/schedules/imagenet_bs256_epochstep.py @@ -0,0 +1,20 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.optim import StepLR +from torch.optim import SGD + +# optimizer +optim_wrapper = dict( + optimizer=dict(type=SGD, lr=0.045, momentum=0.9, weight_decay=0.00004)) + +# learning policy +param_scheduler = dict(type=StepLR, by_epoch=True, step_size=1, gamma=0.98) + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=300, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=256) diff --git a/mmpretrain/configs/_base_/schedules/imagenet_bs4096_adamw.py b/mmpretrain/configs/_base_/schedules/imagenet_bs4096_adamw.py new file mode 100644 index 0000000..4561f23 --- /dev/null +++ b/mmpretrain/configs/_base_/schedules/imagenet_bs4096_adamw.py @@ -0,0 +1,44 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.optim import CosineAnnealingLR, LinearLR +from torch.optim import AdamW + +# optimizer +optim_wrapper = dict( + optimizer=dict(type=AdamW, lr=0.003, weight_decay=0.3), + # specific to vit pretrain + paramwise_cfg=dict(custom_keys={ + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0) + }), +) + +# learning policy +param_scheduler = [ + # warm up learning rate scheduler + dict( + type=LinearLR, + start_factor=1e-4, + by_epoch=True, + begin=0, + end=30, + # update by iter + convert_to_iter_based=True), + # main learning rate scheduler + dict( + type=CosineAnnealingLR, + T_max=270, + by_epoch=True, + begin=30, + end=300, + ) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=300, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/mmpretrain/configs/_base_/schedules/imagenet_lars_coslr_200e.py b/mmpretrain/configs/_base_/schedules/imagenet_lars_coslr_200e.py new file mode 100644 index 0000000..0c7e617 --- /dev/null +++ b/mmpretrain/configs/_base_/schedules/imagenet_lars_coslr_200e.py @@ -0,0 +1,27 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.optim.optimizer.optimizer_wrapper import OptimWrapper +from mmengine.optim.scheduler.lr_scheduler import CosineAnnealingLR, LinearLR +from mmengine.runner.loops import EpochBasedTrainLoop + +from mmpretrain.engine.optimizers.lars import LARS + +# optimizer wrapper +optim_wrapper = dict( + type=OptimWrapper, + optimizer=dict(type=LARS, lr=4.8, weight_decay=1e-6, momentum=0.9)) + +# learning rate scheduler +param_scheduler = [ + dict( + type=LinearLR, + start_factor=1e-4, + by_epoch=True, + begin=0, + end=10, + convert_to_iter_based=True), + dict(type=CosineAnnealingLR, T_max=190, by_epoch=True, begin=10, end=200) +] + +# runtime settings +train_cfg = dict(type=EpochBasedTrainLoop, max_epochs=200) diff --git a/mmpretrain/configs/beit/beit_beit_base_p16_8xb256_amp_coslr_300e_in1k.py b/mmpretrain/configs/beit/beit_beit_base_p16_8xb256_amp_coslr_300e_in1k.py new file mode 100644 index 0000000..fe9c329 --- /dev/null +++ b/mmpretrain/configs/beit/beit_beit_base_p16_8xb256_amp_coslr_300e_in1k.py @@ -0,0 +1,146 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.default_runtime import * + +from mmengine.dataset import DefaultSampler, default_collate +from mmengine.hooks import CheckpointHook +from mmengine.model import ConstantInit, PretrainedInit, TruncNormalInit +from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR +from mmengine.runner import EpochBasedTrainLoop +from torch.optim import AdamW + +from mmpretrain.datasets import (BEiTMaskGenerator, ColorJitter, ImageNet, + LoadImageFromFile, PackInputs, RandomFlip, + RandomResizedCropAndInterpolationWithTwoPic) +from mmpretrain.models import (BEiT, BEiTPretrainViT, BEiTV1Head, + CrossEntropyLoss, DALLEEncoder, + TwoNormDataPreprocessor) + +# dataset settings +dataset_type = ImageNet +data_root = 'data/imagenet/' +data_preprocessor = dict( + type=TwoNormDataPreprocessor, + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + second_mean=[-31.875, -31.875, -31.875], + second_std=[318.75, 318.75, 318.75], + to_rgb=True) + +train_pipeline = [ + dict(type=LoadImageFromFile), + dict( + type=ColorJitter, brightness=0.4, contrast=0.4, saturation=0.4, + hue=0.), + dict(type=RandomFlip, prob=0.5, direction='horizontal'), + dict( + type=RandomResizedCropAndInterpolationWithTwoPic, + size=224, + second_size=112, + interpolation='bicubic', + second_interpolation='lanczos', + scale=(0.08, 1.0)), + dict( + type=BEiTMaskGenerator, + input_size=(14, 14), + num_masking_patches=75, + max_num_patches=None, + min_num_patches=16), + dict(type=PackInputs) +] +train_dataloader = dict( + batch_size=256, + num_workers=8, + persistent_workers=True, + sampler=dict(type=DefaultSampler, shuffle=True), + collate_fn=dict(type=default_collate), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='meta/train.txt', + data_prefix=dict(img_path='train/'), + pipeline=train_pipeline)) + +# model settings +model = dict( + type=BEiT, + backbone=dict( + type=BEiTPretrainViT, + arch='base', + patch_size=16, + drop_path_rate=0.1, + final_norm=True, + out_type='raw', + layer_scale_init_value=0.1, + init_cfg=[ + dict(type=TruncNormalInit, std=0.02, layer='Linear'), + dict(type=TruncNormalInit, std=0.02, layer='Conv2d'), + dict(type=ConstantInit, layer='LayerNorm', val=1.0, bias=0.0) + ]), + neck=None, + head=dict( + type=BEiTV1Head, + embed_dims=768, + num_embed=8192, + loss=dict(type=CrossEntropyLoss)), + target_generator=dict( + type=DALLEEncoder, + init_cfg=dict( + type=PretrainedInit, + checkpoint= # noqa: E251 + 'https://download.openmmlab.com/mmselfsup/1.x/target_generator_ckpt/dalle_encoder.pth', # noqa: E501 + ))) + +# optimizer wrapper +optim_wrapper = dict( + type=AmpOptimWrapper, + loss_scale='dynamic', + optimizer=dict( + type=AdamW, lr=1.5e-3, betas=(0.9, 0.999), weight_decay=0.05), + clip_grad=dict(max_norm=3.0), + paramwise_cfg=dict( + custom_keys={ + # the following configurations are designed for BEiT + '.ln': dict(decay_mult=0.0), + '.bias': dict(decay_mult=0.0), + 'q_bias': dict(decay_mult=0.0), + 'v_bias': dict(decay_mult=0.0), + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.gamma': dict(decay_mult=0.0), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type=LinearLR, + start_factor=1e-4, + by_epoch=True, + begin=0, + end=10, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + eta_min=1e-5, + by_epoch=True, + begin=10, + end=300, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type=EpochBasedTrainLoop, max_epochs=300) +default_hooks.update( + # only keeps the latest 3 checkpoints + checkpoint=dict(type=CheckpointHook, interval=1, max_keep_ckpts=3)) + +randomness.update(seed=0, diff_rank_seed=True) + +find_unused_parameters = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=2048) diff --git a/mmpretrain/configs/beit/benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py b/mmpretrain/configs/beit/benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py new file mode 100644 index 0000000..00a76b7 --- /dev/null +++ b/mmpretrain/configs/beit/benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py @@ -0,0 +1,139 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from ..._base_.datasets.imagenet_bs64_swin_224 import * + from ..._base_.schedules.imagenet_bs1024_adamw_swin import * + from ..._base_.default_runtime import * + +from mmengine.hooks import CheckpointHook +from mmengine.model import PretrainedInit, TruncNormalInit +from mmengine.optim import CosineAnnealingLR, LinearLR +from torch.optim import AdamW + +from mmpretrain.datasets import LoadImageFromFile, PackInputs, RandomFlip +from mmpretrain.engine.optimizers import \ + LearningRateDecayOptimWrapperConstructor +from mmpretrain.models import (BEiTViT, ImageClassifier, LabelSmoothLoss, + LinearClsHead) +from mmpretrain.models.utils.batch_augments import CutMix, Mixup + +data_preprocessor = dict( + num_classes=1000, + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + to_rgb=True, +) + +# model settings +model = dict( + type=ImageClassifier, + backbone=dict( + type=BEiTViT, + arch='base', + img_size=224, + patch_size=16, + drop_path_rate=0.1, + out_type='avg_featmap', + use_abs_pos_emb=False, + use_rel_pos_bias=True, + use_shared_rel_pos_bias=False, + init_cfg=dict(type=PretrainedInit, checkpoint='', prefix='backbone.')), + neck=None, + head=dict( + type=LinearClsHead, + num_classes=1000, + in_channels=768, + loss=dict(type=LabelSmoothLoss, label_smooth_val=0.1, mode='original'), + init_cfg=[dict(type=TruncNormalInit, layer='Linear', std=0.02)]), + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)])) + +train_pipeline = [ + dict(type=LoadImageFromFile), + dict( + type=RandomResizedCrop, + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type=RandomFlip, prob=0.5, direction='horizontal'), + dict( + type=RandAugment, + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict(pad_val=[104, 116, 124], interpolation='bicubic')), + dict( + type=RandomErasing, + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=0.3333333333333333, + fill_color=[103.53, 116.28, 123.675], + fill_std=[57.375, 57.12, 58.395]), + dict(type=PackInputs) +] +test_pipeline = [ + dict(type=LoadImageFromFile), + dict( + type=ResizeEdge, + scale=256, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type=CenterCrop, crop_size=224), + dict(type=PackInputs) +] + +train_dataloader = dict(batch_size=128, dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(batch_size=128, dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# optimizer wrapper +optim_wrapper = dict( + optimizer=dict(type=AdamW, lr=4e-3, weight_decay=0.05, betas=(0.9, 0.999)), + constructor=LearningRateDecayOptimWrapperConstructor, + paramwise_cfg=dict( + _delete_=True, + layer_decay_rate=0.65, + custom_keys={ + # the following configurations are designed for BEiT + '.ln': dict(decay_mult=0.0), + '.bias': dict(decay_mult=0.0), + 'q_bias': dict(decay_mult=0.0), + 'v_bias': dict(decay_mult=0.0), + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.gamma': dict(decay_mult=0.0), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type=LinearLR, + start_factor=1e-4, + by_epoch=True, + begin=0, + end=20, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + by_epoch=True, + begin=20, + end=100, + eta_min=1e-6, + convert_to_iter_based=True) +] + +# runtime settings +default_hooks = dict( + # save checkpoint per epoch. + checkpoint=dict(type=CheckpointHook, interval=1, max_keep_ckpts=2)) + +train_cfg = dict(by_epoch=True, max_epochs=100) + +randomness = dict(seed=0) diff --git a/mmpretrain/configs/beit/benchmarks/beit-base-p16_8xb64_in1k.py b/mmpretrain/configs/beit/benchmarks/beit-base-p16_8xb64_in1k.py new file mode 100644 index 0000000..b4718af --- /dev/null +++ b/mmpretrain/configs/beit/benchmarks/beit-base-p16_8xb64_in1k.py @@ -0,0 +1,50 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from ..._base_.datasets.imagenet_bs64_swin_224 import * + from ..._base_.schedules.imagenet_bs1024_adamw_swin import * + from ..._base_.default_runtime import * + +from mmengine.model import ConstantInit, TruncNormalInit + +from mmpretrain.models import (BEiTViT, ImageClassifier, LabelSmoothLoss, + LinearClsHead) +from mmpretrain.models.utils.batch_augments import CutMix, Mixup + +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + # convert image from BGR to RGB + to_rgb=True, +) + +model = dict( + type=ImageClassifier, + backbone=dict( + type=BEiTViT, + arch='base', + img_size=224, + patch_size=16, + out_type='avg_featmap', + use_abs_pos_emb=False, + use_rel_pos_bias=True, + use_shared_rel_pos_bias=False, + ), + neck=None, + head=dict( + type=LinearClsHead, + num_classes=1000, + in_channels=768, + loss=dict(type=LabelSmoothLoss, label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=.02), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.), + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)])) diff --git a/mmpretrain/configs/beitv2/beitv2_beit-base-p16_8xb256-amp-coslr-1600e_in1k.py b/mmpretrain/configs/beitv2/beitv2_beit-base-p16_8xb256-amp-coslr-1600e_in1k.py new file mode 100644 index 0000000..6bec16b --- /dev/null +++ b/mmpretrain/configs/beitv2/beitv2_beit-base-p16_8xb256-amp-coslr-1600e_in1k.py @@ -0,0 +1,130 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.datasets.imagenet_bs256_beitv2 import * + from .._base_.default_runtime import * + +from mmengine.model import ConstantInit, PretrainedInit, TruncNormalInit +from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR +from mmengine.runner import EpochBasedTrainLoop +from torch.optim import AdamW + +from mmpretrain.models import (VQKD, BEiT, BEiTPretrainViT, BEiTV2Head, + BEiTV2Neck, CrossEntropyLoss) + +vqkd_encoder = dict( + arch='base', + img_size=224, + patch_size=16, + in_channels=3, + out_indices=-1, + drop_rate=0., + drop_path_rate=0., + norm_cfg=dict(type='LN', eps=1e-6), + final_norm=True, + out_type='featmap', + with_cls_token=True, + frozen_stages=-1, + use_abs_pos_emb=True, + use_rel_pos_bias=False, + use_shared_rel_pos_bias=False, + layer_scale_init_value=0., + interpolate_mode='bicubic', + patch_cfg=dict(), + layer_cfgs=dict(), + init_cfg=None) + +layer_scale_init_value = 0.1 +drop_path_rate = 0.1 # 0. for 300 epochs and 0.1 for 1600 epochs. + +model = dict( + type=BEiT, + backbone=dict( + type=BEiTPretrainViT, + arch='base', + patch_size=16, + out_indices=[-4, -1], + drop_path_rate=drop_path_rate, + final_norm=False, + out_type='raw', + layer_scale_init_value=layer_scale_init_value, + init_cfg=[ + dict(type=TruncNormalInit, std=0.02, layer='Linear'), + dict(type=TruncNormalInit, std=0.02, layer='Conv2d'), + dict(type=ConstantInit, layer='LayerNorm', val=1.0, bias=0.0) + ]), + neck=dict( + type=BEiTV2Neck, + num_layers=2, + early_layers=9, + backbone_arch='base', + drop_path_rate=drop_path_rate, + layer_scale_init_value=layer_scale_init_value, + ), + head=dict( + type=BEiTV2Head, + embed_dims=768, + num_embed=8192, + loss=dict(type=CrossEntropyLoss)), + target_generator=dict( + type=VQKD, + encoder_config=vqkd_encoder, + init_cfg=dict( + type=PretrainedInit, + checkpoint= # noqa + 'https://download.openmmlab.com/mmselfsup/1.x/target_generator_ckpt/vqkd_encoder.pth' # noqa + ))) + +# optimizer wrapper +optim_wrapper = dict( + type=AmpOptimWrapper, + loss_scale='dynamic', + # betas: (0.9, 0.98) for 300 epochs and (0.9, 0.999) for 1600 epochs. + optimizer=dict( + type=AdamW, lr=1.5e-3, betas=(0.9, 0.999), weight_decay=0.05), + clip_grad=dict(max_norm=3.0), + paramwise_cfg=dict( + custom_keys={ + # the following configurations are designed for BEiT + '.ln': dict(decay_mult=0.0), + '.bias': dict(decay_mult=0.0), + 'q_bias': dict(decay_mult=0.0), + 'v_bias': dict(decay_mult=0.0), + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.gamma': dict(decay_mult=0.0), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type=LinearLR, + start_factor=1e-4, + by_epoch=True, + begin=0, + end=10, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + eta_min=1e-5, + by_epoch=True, + begin=10, + end=1600, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type=EpochBasedTrainLoop, max_epochs=1600) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type=CheckpointHook, interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +find_unused_parameters = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=2048) diff --git a/mmpretrain/configs/beitv2/beitv2_beit-base-p16_8xb256-amp-coslr-300e_in1k.py b/mmpretrain/configs/beitv2/beitv2_beit-base-p16_8xb256-amp-coslr-300e_in1k.py new file mode 100644 index 0000000..3fe9b50 --- /dev/null +++ b/mmpretrain/configs/beitv2/beitv2_beit-base-p16_8xb256-amp-coslr-300e_in1k.py @@ -0,0 +1,130 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.datasets.imagenet_bs256_beitv2 import * + from .._base_.default_runtime import * + +from mmengine.model import ConstantInit, PretrainedInit, TruncNormalInit +from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR +from mmengine.runner import EpochBasedTrainLoop +from torch.optim import AdamW + +from mmpretrain.models import (VQKD, BEiT, BEiTPretrainViT, BEiTV2Head, + BEiTV2Neck, CrossEntropyLoss) + +# model settings +vqkd_encoder = dict( + arch='base', + img_size=224, + patch_size=16, + in_channels=3, + out_indices=-1, + drop_rate=0., + drop_path_rate=0., + norm_cfg=dict(type='LN', eps=1e-6), + final_norm=True, + out_type='featmap', + with_cls_token=True, + frozen_stages=-1, + use_abs_pos_emb=True, + use_rel_pos_bias=False, + use_shared_rel_pos_bias=False, + layer_scale_init_value=0., + interpolate_mode='bicubic', + patch_cfg=dict(), + layer_cfgs=dict(), + init_cfg=None) + +layer_scale_init_value = 0.1 +drop_path_rate = 0. # 0. for 300 epochs and 0.1 for 1600 epochs. +model = dict( + type=BEiT, + backbone=dict( + type=BEiTPretrainViT, + arch='base', + patch_size=16, + out_indices=[-4, -1], + drop_path_rate=drop_path_rate, + final_norm=False, + out_type='raw', + layer_scale_init_value=layer_scale_init_value, + init_cfg=[ + dict(type=TruncNormalInit, std=0.02, layer='Linear'), + dict(type=TruncNormalInit, std=0.02, layer='Conv2d'), + dict(type=ConstantInit, layer='LayerNorm', val=1.0, bias=0.0) + ]), + neck=dict( + type=BEiTV2Neck, + num_layers=2, + early_layers=9, + backbone_arch='base', + drop_path_rate=drop_path_rate, + layer_scale_init_value=layer_scale_init_value, + ), + head=dict( + type=BEiTV2Head, + embed_dims=768, + num_embed=8192, + loss=dict(type=CrossEntropyLoss)), + target_generator=dict( + type=VQKD, + encoder_config=vqkd_encoder, + init_cfg=dict( + type=PretrainedInit, + checkpoint= # noqa + 'https://download.openmmlab.com/mmselfsup/1.x/target_generator_ckpt/vqkd_encoder.pth' # noqa + ))) + +# optimizer wrapper +optim_wrapper = dict( + type=AmpOptimWrapper, + loss_scale='dynamic', + # betas: (0.9, 0.98) for 300 epochs and (0.9, 0.999) for 1600 epochs. + optimizer=dict( + type=AdamW, lr=1.5e-3, betas=(0.9, 0.98), weight_decay=0.05), + clip_grad=dict(max_norm=3.0), + paramwise_cfg=dict( + custom_keys={ + # the following configurations are designed for BEiT + '.ln': dict(decay_mult=0.0), + '.bias': dict(decay_mult=0.0), + 'q_bias': dict(decay_mult=0.0), + 'v_bias': dict(decay_mult=0.0), + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.gamma': dict(decay_mult=0.0), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type=LinearLR, + start_factor=1e-4, + by_epoch=True, + begin=0, + end=10, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + eta_min=1e-5, + by_epoch=True, + begin=10, + end=300, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type=EpochBasedTrainLoop, max_epochs=300) +default_hooks = dict( + # only keeps the latest 3 checkpoints + checkpoint=dict(type=CheckpointHook, interval=1, max_keep_ckpts=3)) + +randomness = dict(seed=0, diff_rank_seed=True) + +find_unused_parameters = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=2048) diff --git a/mmpretrain/configs/beitv2/benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py b/mmpretrain/configs/beitv2/benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py new file mode 100644 index 0000000..ee32d3a --- /dev/null +++ b/mmpretrain/configs/beitv2/benchmarks/beit-base-p16_8xb128-coslr-100e_in1k.py @@ -0,0 +1,132 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from ..._base_.datasets.imagenet_bs64_swin_224 import * + from ..._base_.schedules.imagenet_bs1024_adamw_swin import * + from ..._base_.default_runtime import * + +from mmengine.model import PretrainedInit, TruncNormalInit +from mmengine.optim import CosineAnnealingLR, LinearLR +from torch.optim import AdamW + +from mmpretrain.engine.optimizers import \ + LearningRateDecayOptimWrapperConstructor +from mmpretrain.models import (BEiTViT, ImageClassifier, LabelSmoothLoss, + LinearClsHead) +from mmpretrain.models.utils.batch_augments import CutMix, Mixup + +# model settings +model = dict( + type=ImageClassifier, + backbone=dict( + type=BEiTViT, + arch='base', + img_size=224, + patch_size=16, + # 0.2 for 1600 epochs pretrained models and 0.1 for 300 epochs. + drop_path_rate=0.1, + out_type='avg_featmap', + use_abs_pos_emb=False, + use_rel_pos_bias=True, + use_shared_rel_pos_bias=False, + init_cfg=dict(type=PretrainedInit, checkpoint='', prefix='backbone.')), + neck=None, + head=dict( + type=LinearClsHead, + num_classes=1000, + in_channels=768, + loss=dict(type=LabelSmoothLoss, label_smooth_val=0.1, mode='original'), + init_cfg=[dict(type=TruncNormalInit, layer='Linear', std=0.02)]), + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)])) + +train_pipeline = [ + dict(type=LoadImageFromFile), + dict( + type=RandomResizedCrop, + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type=RandomFlip, prob=0.5, direction='horizontal'), + dict( + type=RandAugment, + policies='timm_increasing', + num_policies=2, + total_level=10, + magnitude_level=9, + magnitude_std=0.5, + hparams=dict(pad_val=[104, 116, 124], interpolation='bicubic')), + dict( + type=RandomErasing, + erase_prob=0.25, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=0.3333333333333333, + fill_color=[103.53, 116.28, 123.675], + fill_std=[57.375, 57.12, 58.395]), + dict(type=PackInputs) +] +test_pipeline = [ + dict(type=LoadImageFromFile), + dict( + type=ResizeEdge, + scale=256, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type=CenterCrop, crop_size=224), + dict(type=PackInputs) +] + +train_dataloader = dict(batch_size=128, dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(batch_size=128, dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +# optimizer wrapper +optim_wrapper = dict( + optimizer=dict(type=AdamW, lr=5e-4, weight_decay=0.05, betas=(0.9, 0.999)), + constructor=LearningRateDecayOptimWrapperConstructor, + paramwise_cfg=dict( + _delete_=True, + # 0.6 for 1600 epochs pretrained models and 0.65 for 300 epochs + layer_decay_rate=0.65, + custom_keys={ + # the following configurations are designed for BEiT + '.ln': dict(decay_mult=0.0), + '.bias': dict(decay_mult=0.0), + 'q_bias': dict(decay_mult=0.0), + 'v_bias': dict(decay_mult=0.0), + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + '.gamma': dict(decay_mult=0.0), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type=LinearLR, + start_factor=1e-4, + by_epoch=True, + begin=0, + end=20, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + by_epoch=True, + begin=20, + end=100, + eta_min=1e-6, + convert_to_iter_based=True) +] + +# runtime settings +default_hooks = dict( + # save checkpoint per epoch. + checkpoint=dict(type=CheckpointHook, interval=1, max_keep_ckpts=2)) + +train_cfg = dict(by_epoch=True, max_epochs=100) + +randomness = dict(seed=0) diff --git a/mmpretrain/configs/beitv2/benchmarks/beit-base-p16_8xb64_in1k.py b/mmpretrain/configs/beitv2/benchmarks/beit-base-p16_8xb64_in1k.py new file mode 100644 index 0000000..ec20ba9 --- /dev/null +++ b/mmpretrain/configs/beitv2/benchmarks/beit-base-p16_8xb64_in1k.py @@ -0,0 +1,42 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from ..._base_.datasets.imagenet_bs64_swin_224 import * + from ..._base_.schedules.imagenet_bs1024_adamw_swin import * + from ..._base_.default_runtime import * + +from mmengine.model import ConstantInit, TruncNormalInit + +from mmpretrain.models import (BEiTViT, ImageClassifier, LabelSmoothLoss, + LinearClsHead) +from mmpretrain.models.utils.batch_augments.cutmix import CutMix +from mmpretrain.models.utils.batch_augments.mixup import Mixup + +model = dict( + type=ImageClassifier, + backbone=dict( + type=BEiTViT, + arch='base', + img_size=224, + patch_size=16, + out_type='avg_featmap', + use_abs_pos_emb=False, + use_rel_pos_bias=True, + use_shared_rel_pos_bias=False, + ), + neck=None, + head=dict( + type=LinearClsHead, + num_classes=1000, + in_channels=768, + loss=dict(type=LabelSmoothLoss, label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=.02), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.), + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)])) diff --git a/mmpretrain/configs/convnext/convnext-base_32xb128_in1k.py b/mmpretrain/configs/convnext/convnext-base_32xb128_in1k.py new file mode 100644 index 0000000..3e8a10f --- /dev/null +++ b/mmpretrain/configs/convnext/convnext-base_32xb128_in1k.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_224 import * + from .._base_.default_runtime import * + from .._base_.models.convnext_base import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +from mmpretrain.engine import EMAHook + +# dataset setting +train_dataloader.update(batch_size=128) + +# schedule setting +optim_wrapper.update( + optimizer=dict(lr=4e-3), + clip_grad=None, +) + +# runtime setting +custom_hooks = [dict(type=EMAHook, momentum=4e-5, priority='ABOVE_NORMAL')] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr.update(base_batch_size=4096) diff --git a/mmpretrain/configs/convnext/convnext-base_32xb128_in21k.py b/mmpretrain/configs/convnext/convnext-base_32xb128_in21k.py new file mode 100644 index 0000000..73fb0a0 --- /dev/null +++ b/mmpretrain/configs/convnext/convnext-base_32xb128_in21k.py @@ -0,0 +1,27 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.datasets.imagenet21k_bs128 import * + from .._base_.default_runtime import * + from .._base_.models.convnext_base import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# model setting +model.update(head=dict(num_classes=21841)) + +# dataset setting +data_preprocessor.update(num_classes=21841) +train_dataloader.update(batch_size=128) + +# schedule setting +optim_wrapper.update( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr.update(base_batch_size=4096) diff --git a/mmpretrain/configs/convnext/convnext-large_64xb64_in1k-384px.py b/mmpretrain/configs/convnext/convnext-large_64xb64_in1k-384px.py new file mode 100644 index 0000000..2da428a --- /dev/null +++ b/mmpretrain/configs/convnext/convnext-large_64xb64_in1k-384px.py @@ -0,0 +1,27 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.config import read_base + +from mmpretrain.engine import EMAHook + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_384 import * + from .._base_.default_runtime import * + from .._base_.models.convnext_base import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# dataset setting +train_dataloader.update(batch_size=64) + +# schedule setting +optim_wrapper.update( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) + +# runtime setting +custom_hooks = [dict(type=EMAHook, momentum=4e-5, priority='ABOVE_NORMAL')] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (64 GPUs) x (64 samples per GPU) +auto_scale_lr.update(base_batch_size=4096) diff --git a/mmpretrain/configs/convnext/convnext-large_64xb64_in1k.py b/mmpretrain/configs/convnext/convnext-large_64xb64_in1k.py new file mode 100644 index 0000000..e11e6a9 --- /dev/null +++ b/mmpretrain/configs/convnext/convnext-large_64xb64_in1k.py @@ -0,0 +1,27 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.config import read_base + +from mmpretrain.engine import EMAHook + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_384 import * + from .._base_.default_runtime import * + from .._base_.models.convnext_base import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# dataset setting +train_dataloader.update(batch_size=64) + +# schedule setting +optim_wrapper.update( + optimizer=dict(lr=4e-3), + clip_grad=None, +) + +# runtime setting +custom_hooks = [dict(type=EMAHook, momentum=1e-4, priority='ABOVE_NORMAL')] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (64 GPUs) x (64 samples per GPU) +auto_scale_lr.update(base_batch_size=4096) diff --git a/mmpretrain/configs/convnext/convnext-large_64xb64_in21k.py b/mmpretrain/configs/convnext/convnext-large_64xb64_in21k.py new file mode 100644 index 0000000..d103dfa --- /dev/null +++ b/mmpretrain/configs/convnext/convnext-large_64xb64_in21k.py @@ -0,0 +1,26 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.config import read_base + +with read_base(): + from .._base_.datasets.imagenet21k_bs128 import * + from .._base_.default_runtime import * + from .._base_.models.convnext_base import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# model setting +model.update(head=dict(num_classes=21841)) + +# dataset setting +data_preprocessor.update(num_classes=21841) +train_dataloader.update(batch_size=64) + +# schedule setting +optim_wrapper.update( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr.update(base_batch_size=4096) diff --git a/mmpretrain/configs/convnext/convnext-small_32xb128_in1k-384px.py b/mmpretrain/configs/convnext/convnext-small_32xb128_in1k-384px.py new file mode 100644 index 0000000..9b7bce7 --- /dev/null +++ b/mmpretrain/configs/convnext/convnext-small_32xb128_in1k-384px.py @@ -0,0 +1,27 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.config import read_base + +from mmpretrain.engine import EMAHook + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_384 import * + from .._base_.default_runtime import * + from .._base_.models.convnext_base import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# dataset setting +train_dataloader.update(batch_size=128) + +# schedule setting +optim_wrapper.update( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) + +# runtime setting +custom_hooks = [dict(type=EMAHook, momentum=4e-5, priority='ABOVE_NORMAL')] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr.update(base_batch_size=4096) diff --git a/mmpretrain/configs/convnext/convnext-small_32xb128_in1k.py b/mmpretrain/configs/convnext/convnext-small_32xb128_in1k.py new file mode 100644 index 0000000..bd43ec1 --- /dev/null +++ b/mmpretrain/configs/convnext/convnext-small_32xb128_in1k.py @@ -0,0 +1,27 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.config import read_base + +from mmpretrain.engine import EMAHook + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_224 import * + from .._base_.default_runtime import * + from .._base_.models.convnext_base import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# dataset setting +train_dataloader.update(batch_size=128) + +# schedule setting +optim_wrapper.update( + optimizer=dict(lr=4e-3), + clip_grad=None, +) + +# runtime setting +custom_hooks = [dict(type=EMAHook, momentum=1e-4, priority='ABOVE_NORMAL')] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr.update(base_batch_size=4096) diff --git a/mmpretrain/configs/convnext/convnext-tiny_32xb128_in1k-384px.py b/mmpretrain/configs/convnext/convnext-tiny_32xb128_in1k-384px.py new file mode 100644 index 0000000..9b7bce7 --- /dev/null +++ b/mmpretrain/configs/convnext/convnext-tiny_32xb128_in1k-384px.py @@ -0,0 +1,27 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.config import read_base + +from mmpretrain.engine import EMAHook + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_384 import * + from .._base_.default_runtime import * + from .._base_.models.convnext_base import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# dataset setting +train_dataloader.update(batch_size=128) + +# schedule setting +optim_wrapper.update( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) + +# runtime setting +custom_hooks = [dict(type=EMAHook, momentum=4e-5, priority='ABOVE_NORMAL')] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr.update(base_batch_size=4096) diff --git a/mmpretrain/configs/convnext/convnext-tiny_32xb128_in1k.py b/mmpretrain/configs/convnext/convnext-tiny_32xb128_in1k.py new file mode 100644 index 0000000..bd43ec1 --- /dev/null +++ b/mmpretrain/configs/convnext/convnext-tiny_32xb128_in1k.py @@ -0,0 +1,27 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.config import read_base + +from mmpretrain.engine import EMAHook + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_224 import * + from .._base_.default_runtime import * + from .._base_.models.convnext_base import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# dataset setting +train_dataloader.update(batch_size=128) + +# schedule setting +optim_wrapper.update( + optimizer=dict(lr=4e-3), + clip_grad=None, +) + +# runtime setting +custom_hooks = [dict(type=EMAHook, momentum=1e-4, priority='ABOVE_NORMAL')] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr.update(base_batch_size=4096) diff --git a/mmpretrain/configs/convnext/convnext-xlarge_64xb64_in1k-384px.py b/mmpretrain/configs/convnext/convnext-xlarge_64xb64_in1k-384px.py new file mode 100644 index 0000000..2da428a --- /dev/null +++ b/mmpretrain/configs/convnext/convnext-xlarge_64xb64_in1k-384px.py @@ -0,0 +1,27 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.config import read_base + +from mmpretrain.engine import EMAHook + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_384 import * + from .._base_.default_runtime import * + from .._base_.models.convnext_base import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# dataset setting +train_dataloader.update(batch_size=64) + +# schedule setting +optim_wrapper.update( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) + +# runtime setting +custom_hooks = [dict(type=EMAHook, momentum=4e-5, priority='ABOVE_NORMAL')] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (64 GPUs) x (64 samples per GPU) +auto_scale_lr.update(base_batch_size=4096) diff --git a/mmpretrain/configs/convnext/convnext-xlarge_64xb64_in1k.py b/mmpretrain/configs/convnext/convnext-xlarge_64xb64_in1k.py new file mode 100644 index 0000000..bdb1157 --- /dev/null +++ b/mmpretrain/configs/convnext/convnext-xlarge_64xb64_in1k.py @@ -0,0 +1,27 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.config import read_base + +from mmpretrain.engine import EMAHook + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_224 import * + from .._base_.default_runtime import * + from .._base_.models.convnext_base import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# dataset setting +train_dataloader.update(batch_size=64) + +# schedule setting +optim_wrapper.update( + optimizer=dict(lr=4e-3), + clip_grad=None, +) + +# runtime setting +custom_hooks = [dict(type=EMAHook, momentum=1e-4, priority='ABOVE_NORMAL')] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (64 GPUs) x (64 samples per GPU) +auto_scale_lr.update(base_batch_size=4096) diff --git a/mmpretrain/configs/convnext/convnext-xlarge_64xb64_in21k.py b/mmpretrain/configs/convnext/convnext-xlarge_64xb64_in21k.py new file mode 100644 index 0000000..21f10dc --- /dev/null +++ b/mmpretrain/configs/convnext/convnext-xlarge_64xb64_in21k.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.config import read_base + +from mmpretrain.engine import EMAHook + +with read_base(): + from .._base_.datasets.imagenet21k_bs128 import * + from .._base_.default_runtime import * + from .._base_.models.convnext_base import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# model setting +model.update(head=dict(num_classes=21841)) + +# dataset setting +data_preprocessor.update(num_classes=21841) +train_dataloader.update(batch_size=64) + +# schedule setting +optim_wrapper.update( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr.update(base_batch_size=4096) diff --git a/mmpretrain/configs/convnext/convnext_base_32xb128_in1k_384px.py b/mmpretrain/configs/convnext/convnext_base_32xb128_in1k_384px.py new file mode 100644 index 0000000..6d90e71 --- /dev/null +++ b/mmpretrain/configs/convnext/convnext_base_32xb128_in1k_384px.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_384 import * + from .._base_.default_runtime import * + from .._base_.models.convnext_base import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +from mmpretrain.engine import EMAHook + +# dataset setting +train_dataloader.update(batch_size=128) + +# schedule setting +optim_wrapper.update( + optimizer=dict(lr=4e-3), + clip_grad=dict(max_norm=5.0), +) + +# runtime setting +custom_hooks = [dict(type=EMAHook, momentum=4e-5, priority='ABOVE_NORMAL')] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr.update(base_batch_size=4096) diff --git a/mmpretrain/configs/eva/eva_mae_style_vit_base_p16_16xb256_coslr_400e_in1k.py b/mmpretrain/configs/eva/eva_mae_style_vit_base_p16_16xb256_coslr_400e_in1k.py new file mode 100644 index 0000000..a254ac8 --- /dev/null +++ b/mmpretrain/configs/eva/eva_mae_style_vit_base_p16_16xb256_coslr_400e_in1k.py @@ -0,0 +1,92 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.models.mae_vit_base_p16 import * + from .._base_.datasets.imagenet_bs512_mae import * + from .._base_.default_runtime import * + +from mmengine.hooks import CheckpointHook +from mmengine.optim import CosineAnnealingLR, LinearLR, OptimWrapper +from mmengine.runner import EpochBasedTrainLoop +from torch.optim import AdamW + +from mmpretrain.models import (EVA, CLIPGenerator, CosineSimilarityLoss, + MAEPretrainDecoder, MIMHead) + +# dataset settings +train_dataloader.batch_size = 256 + +# model settings +model.type = EVA +model.init_cfg = None +model.backbone.update(init_cfg=[ + dict(type='Xavier', distribution='uniform', layer='Linear'), + dict(type='Constant', layer='LayerNorm', val=1.0, bias=0.0) +]) +model.neck.update( + type=MAEPretrainDecoder, + predict_feature_dim=512, + init_cfg=[ + dict(type='Xavier', distribution='uniform', layer='Linear'), + dict(type='Constant', layer='LayerNorm', val=1.0, bias=0.0) + ]) +model.head = dict( + type=MIMHead, + loss=dict(type=CosineSimilarityLoss, shift_factor=2.0, scale_factor=2.0)) +model.target_generator = dict( + type=CLIPGenerator, + tokenizer_path= # noqa + 'https://download.openmmlab.com/mmselfsup/1.x/target_generator_ckpt/clip_vit_base_16.pth.tar' # noqa +) + +# optimizer wrapper +optim_wrapper = dict( + type=OptimWrapper, + optimizer=dict( + type=AdamW, + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'ln': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.) + })) +find_unused_parameters = True + +# learning rate scheduler +param_scheduler = [ + dict( + type=LinearLR, + start_factor=1e-4, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + T_max=360, + by_epoch=True, + begin=40, + end=400, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type=EpochBasedTrainLoop, max_epochs=400) +default_hooks.checkpoint = dict( + type=CheckpointHook, interval=1, max_keep_ckpts=3) + +randomness.update(dict(seed=0, diff_rank_seed=True)) + +# auto resume +resume = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/mmpretrain/configs/mae/mae_hivit_base_p16_8xb512_amp_coslr_1600e_in1k.py b/mmpretrain/configs/mae/mae_hivit_base_p16_8xb512_amp_coslr_1600e_in1k.py new file mode 100644 index 0000000..a32cb0c --- /dev/null +++ b/mmpretrain/configs/mae/mae_hivit_base_p16_8xb512_amp_coslr_1600e_in1k.py @@ -0,0 +1,65 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.models.mae_hivit_base_p16 import * + from .._base_.datasets.imagenet_bs512_mae import * + from .._base_.default_runtime import * + +from mmengine.hooks.checkpoint_hook import CheckpointHook +from mmengine.optim.optimizer.amp_optimizer_wrapper import AmpOptimWrapper +from mmengine.optim.scheduler.lr_scheduler import CosineAnnealingLR, LinearLR +from mmengine.runner.loops import EpochBasedTrainLoop +from torch.optim.adamw import AdamW + +# optimizer wrapper +optim_wrapper = dict( + type=AmpOptimWrapper, + loss_scale='dynamic', + optimizer=dict( + type=AdamW, + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'norm': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type=LinearLR, + start_factor=0.0001, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + T_max=1560, + by_epoch=True, + begin=40, + end=1600, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type=EpochBasedTrainLoop, max_epochs=1600) +# only keeps the latest 3 checkpoints +default_hooks.checkpoint = dict( + type=CheckpointHook, interval=1, max_keep_ckpts=3) + +randomness.update(seed=0, diff_rank_seed=True) + +# auto resume +resume = True +find_unused_parameters = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/mmpretrain/configs/mae/mae_hivit_base_p16_8xb512_amp_coslr_400e_in1k.py b/mmpretrain/configs/mae/mae_hivit_base_p16_8xb512_amp_coslr_400e_in1k.py new file mode 100644 index 0000000..6ffcf6d --- /dev/null +++ b/mmpretrain/configs/mae/mae_hivit_base_p16_8xb512_amp_coslr_400e_in1k.py @@ -0,0 +1,65 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.models.mae_hivit_base_p16 import * + from .._base_.datasets.imagenet_bs512_mae import * + from .._base_.default_runtime import * + +from mmengine.hooks.checkpoint_hook import CheckpointHook +from mmengine.optim.optimizer.amp_optimizer_wrapper import AmpOptimWrapper +from mmengine.optim.scheduler.lr_scheduler import CosineAnnealingLR, LinearLR +from mmengine.runner.loops import EpochBasedTrainLoop +from torch.optim.adamw import AdamW + +# optimizer wrapper +optim_wrapper = dict( + type=AmpOptimWrapper, + loss_scale='dynamic', + optimizer=dict( + type=AdamW, + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'norm': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type=LinearLR, + start_factor=0.0001, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + T_max=360, + by_epoch=True, + begin=40, + end=400, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type=EpochBasedTrainLoop, max_epochs=400) +# only keeps the latest 3 checkpoints +default_hooks.checkpoint = dict( + type=CheckpointHook, interval=1, max_keep_ckpts=3) + +randomness.update(seed=0, diff_rank_seed=True) + +# auto resume +resume = True +find_unused_parameters = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/mmpretrain/configs/mae/mae_hivit_base_p16_8xb512_amp_coslr_800e_in1k.py b/mmpretrain/configs/mae/mae_hivit_base_p16_8xb512_amp_coslr_800e_in1k.py new file mode 100644 index 0000000..f8a49b5 --- /dev/null +++ b/mmpretrain/configs/mae/mae_hivit_base_p16_8xb512_amp_coslr_800e_in1k.py @@ -0,0 +1,65 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.models.mae_hivit_base_p16 import * + from .._base_.datasets.imagenet_bs512_mae import * + from .._base_.default_runtime import * + +from mmengine.hooks.checkpoint_hook import CheckpointHook +from mmengine.optim.optimizer.amp_optimizer_wrapper import AmpOptimWrapper +from mmengine.optim.scheduler.lr_scheduler import CosineAnnealingLR, LinearLR +from mmengine.runner.loops import EpochBasedTrainLoop +from torch.optim.adamw import AdamW + +# optimizer wrapper +optim_wrapper = dict( + type=AmpOptimWrapper, + loss_scale='dynamic', + optimizer=dict( + type=AdamW, + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'norm': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type=LinearLR, + start_factor=0.0001, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + T_max=760, + by_epoch=True, + begin=40, + end=800, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type=EpochBasedTrainLoop, max_epochs=800) +# only keeps the latest 3 checkpoints +default_hooks.checkpoint = dict( + type=CheckpointHook, interval=1, max_keep_ckpts=3) + +randomness.update(seed=0, diff_rank_seed=True) + +# auto resume +resume = True +find_unused_parameters = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/mmpretrain/configs/mae/mae_hivit_large_p16_8xb512_amp_coslr_1600e_in1k.py b/mmpretrain/configs/mae/mae_hivit_large_p16_8xb512_amp_coslr_1600e_in1k.py new file mode 100644 index 0000000..ae1aba5 --- /dev/null +++ b/mmpretrain/configs/mae/mae_hivit_large_p16_8xb512_amp_coslr_1600e_in1k.py @@ -0,0 +1,70 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.models.mae_hivit_base_p16 import * + from .._base_.datasets.imagenet_bs512_mae import * + from .._base_.default_runtime import * + +from mmengine.hooks.checkpoint_hook import CheckpointHook +from mmengine.optim.optimizer.amp_optimizer_wrapper import AmpOptimWrapper +from mmengine.optim.scheduler.lr_scheduler import CosineAnnealingLR, LinearLR +from mmengine.runner.loops import EpochBasedTrainLoop +from torch.optim.adamw import AdamW + +# model settings +model.update( + backbone=dict(type=MAEHiViT, arch='large'), + neck=dict(type=MAEPretrainDecoder, embed_dim=768)) + +# optimizer wrapper +optim_wrapper = dict( + type=AmpOptimWrapper, + loss_scale='dynamic', + optimizer=dict( + type=AdamW, + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'norm': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type=LinearLR, + start_factor=0.0001, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + T_max=1560, + by_epoch=True, + begin=40, + end=1600, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type=EpochBasedTrainLoop, max_epochs=1600) +# only keeps the latest 3 checkpoints +default_hooks.checkpoint = dict( + type=CheckpointHook, interval=1, max_keep_ckpts=3) + +randomness.update(seed=0, diff_rank_seed=True) + +# auto resume +resume = True +find_unused_parameters = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/mmpretrain/configs/mae/mae_hivit_large_p16_8xb512_amp_coslr_400e_in1k.py b/mmpretrain/configs/mae/mae_hivit_large_p16_8xb512_amp_coslr_400e_in1k.py new file mode 100644 index 0000000..cdc1259 --- /dev/null +++ b/mmpretrain/configs/mae/mae_hivit_large_p16_8xb512_amp_coslr_400e_in1k.py @@ -0,0 +1,70 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.models.mae_hivit_base_p16 import * + from .._base_.datasets.imagenet_bs512_mae import * + from .._base_.default_runtime import * + +from mmengine.hooks.checkpoint_hook import CheckpointHook +from mmengine.optim.optimizer.amp_optimizer_wrapper import AmpOptimWrapper +from mmengine.optim.scheduler.lr_scheduler import CosineAnnealingLR, LinearLR +from mmengine.runner.loops import EpochBasedTrainLoop +from torch.optim.adamw import AdamW + +# model settings +model.update( + backbone=dict(type=MAEHiViT, arch='large'), + neck=dict(type=MAEPretrainDecoder, embed_dim=768)) + +# optimizer wrapper +optim_wrapper = dict( + type=AmpOptimWrapper, + loss_scale='dynamic', + optimizer=dict( + type=AdamW, + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'norm': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type=LinearLR, + start_factor=0.0001, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + T_max=360, + by_epoch=True, + begin=40, + end=400, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type=EpochBasedTrainLoop, max_epochs=400) +# only keeps the latest 3 checkpoints +default_hooks.checkpoint = dict( + type=CheckpointHook, interval=1, max_keep_ckpts=3) + +randomness.update(seed=0, diff_rank_seed=True) + +# auto resume +resume = True +find_unused_parameters = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/mmpretrain/configs/mae/mae_hivit_large_p16_8xb512_amp_coslr_800e_in1k.py b/mmpretrain/configs/mae/mae_hivit_large_p16_8xb512_amp_coslr_800e_in1k.py new file mode 100644 index 0000000..657ee01 --- /dev/null +++ b/mmpretrain/configs/mae/mae_hivit_large_p16_8xb512_amp_coslr_800e_in1k.py @@ -0,0 +1,70 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.models.mae_hivit_base_p16 import * + from .._base_.datasets.imagenet_bs512_mae import * + from .._base_.default_runtime import * + +from mmengine.hooks.checkpoint_hook import CheckpointHook +from mmengine.optim.optimizer.amp_optimizer_wrapper import AmpOptimWrapper +from mmengine.optim.scheduler.lr_scheduler import CosineAnnealingLR, LinearLR +from mmengine.runner.loops import EpochBasedTrainLoop +from torch.optim.adamw import AdamW + +# model settings +model.update( + backbone=dict(type=MAEHiViT, arch='large'), + neck=dict(type=MAEPretrainDecoder, embed_dim=768)) + +# optimizer wrapper +optim_wrapper = dict( + type=AmpOptimWrapper, + loss_scale='dynamic', + optimizer=dict( + type=AdamW, + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'norm': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type=LinearLR, + start_factor=0.0001, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + T_max=760, + by_epoch=True, + begin=40, + end=800, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type=EpochBasedTrainLoop, max_epochs=800) +# only keeps the latest 3 checkpoints +default_hooks.checkpoint = dict( + type=CheckpointHook, interval=1, max_keep_ckpts=3) + +randomness.update(seed=0, diff_rank_seed=True) + +# auto resume +resume = True +find_unused_parameters = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/mmpretrain/configs/mae/mae_vit_base_p16_8xb512_amp_coslr_1600e_in1k.py b/mmpretrain/configs/mae/mae_vit_base_p16_8xb512_amp_coslr_1600e_in1k.py new file mode 100644 index 0000000..a4b325d --- /dev/null +++ b/mmpretrain/configs/mae/mae_vit_base_p16_8xb512_amp_coslr_1600e_in1k.py @@ -0,0 +1,65 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.models.mae_vit_base_p16 import * + from .._base_.datasets.imagenet_bs512_mae import * + from .._base_.default_runtime import * + +from mmengine.hooks.checkpoint_hook import CheckpointHook +from mmengine.optim.optimizer.amp_optimizer_wrapper import AmpOptimWrapper +from mmengine.optim.scheduler.lr_scheduler import CosineAnnealingLR, LinearLR +from mmengine.runner.loops import EpochBasedTrainLoop +from torch.optim.adamw import AdamW + +# optimizer wrapper +optim_wrapper = dict( + type=AmpOptimWrapper, + loss_scale='dynamic', + optimizer=dict( + type=AdamW, + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'ln': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type=LinearLR, + start_factor=0.0001, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + T_max=1560, + by_epoch=True, + begin=40, + end=1600, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type=EpochBasedTrainLoop, max_epochs=1600) +# only keeps the latest 3 checkpoints +default_hooks.checkpoint = dict( + type=CheckpointHook, interval=1, max_keep_ckpts=3) + +randomness.update(seed=0, diff_rank_seed=True) + +# auto resume +resume = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/mmpretrain/configs/mae/mae_vit_base_p16_8xb512_amp_coslr_300e_in1k.py b/mmpretrain/configs/mae/mae_vit_base_p16_8xb512_amp_coslr_300e_in1k.py new file mode 100644 index 0000000..6cee3bc --- /dev/null +++ b/mmpretrain/configs/mae/mae_vit_base_p16_8xb512_amp_coslr_300e_in1k.py @@ -0,0 +1,65 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.models.mae_vit_base_p16 import * + from .._base_.datasets.imagenet_bs512_mae import * + from .._base_.default_runtime import * + +from mmengine.hooks.checkpoint_hook import CheckpointHook +from mmengine.optim.optimizer.amp_optimizer_wrapper import AmpOptimWrapper +from mmengine.optim.scheduler.lr_scheduler import CosineAnnealingLR, LinearLR +from mmengine.runner.loops import EpochBasedTrainLoop +from torch.optim.adamw import AdamW + +# optimizer wrapper +optim_wrapper = dict( + type=AmpOptimWrapper, + loss_scale='dynamic', + optimizer=dict( + type=AdamW, + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'ln': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type=LinearLR, + start_factor=0.0001, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + T_max=260, + by_epoch=True, + begin=40, + end=300, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type=EpochBasedTrainLoop, max_epochs=300) +# only keeps the latest 3 checkpoints +default_hooks.checkpoint = dict( + type=CheckpointHook, interval=1, max_keep_ckpts=3) + +randomness.update(seed=0, diff_rank_seed=True) + +# auto resume +resume = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/mmpretrain/configs/mae/mae_vit_base_p16_8xb512_amp_coslr_400e_in1k.py b/mmpretrain/configs/mae/mae_vit_base_p16_8xb512_amp_coslr_400e_in1k.py new file mode 100644 index 0000000..fb78e2b --- /dev/null +++ b/mmpretrain/configs/mae/mae_vit_base_p16_8xb512_amp_coslr_400e_in1k.py @@ -0,0 +1,65 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.models.mae_vit_base_p16 import * + from .._base_.datasets.imagenet_bs512_mae import * + from .._base_.default_runtime import * + +from mmengine.hooks.checkpoint_hook import CheckpointHook +from mmengine.optim.optimizer.amp_optimizer_wrapper import AmpOptimWrapper +from mmengine.optim.scheduler.lr_scheduler import CosineAnnealingLR, LinearLR +from mmengine.runner.loops import EpochBasedTrainLoop +from torch.optim.adamw import AdamW + +# optimizer wrapper +optim_wrapper = dict( + type=AmpOptimWrapper, + loss_scale='dynamic', + optimizer=dict( + type=AdamW, + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'ln': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type=LinearLR, + start_factor=0.0001, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + T_max=360, + by_epoch=True, + begin=40, + end=400, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type=EpochBasedTrainLoop, max_epochs=400) +# only keeps the latest 3 checkpoints +default_hooks.checkpoint = dict( + type=CheckpointHook, interval=1, max_keep_ckpts=3) + +randomness.update(seed=0, diff_rank_seed=True) + +# auto resume +resume = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/mmpretrain/configs/mae/mae_vit_base_p16_8xb512_amp_coslr_800e_in1k.py b/mmpretrain/configs/mae/mae_vit_base_p16_8xb512_amp_coslr_800e_in1k.py new file mode 100644 index 0000000..f34e1da --- /dev/null +++ b/mmpretrain/configs/mae/mae_vit_base_p16_8xb512_amp_coslr_800e_in1k.py @@ -0,0 +1,65 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.models.mae_vit_base_p16 import * + from .._base_.datasets.imagenet_bs512_mae import * + from .._base_.default_runtime import * + +from mmengine.hooks.checkpoint_hook import CheckpointHook +from mmengine.optim.optimizer.amp_optimizer_wrapper import AmpOptimWrapper +from mmengine.optim.scheduler.lr_scheduler import CosineAnnealingLR, LinearLR +from mmengine.runner.loops import EpochBasedTrainLoop +from torch.optim.adamw import AdamW + +# optimizer wrapper +optim_wrapper = dict( + type=AmpOptimWrapper, + loss_scale='dynamic', + optimizer=dict( + type=AdamW, + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'ln': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type=LinearLR, + start_factor=0.0001, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + T_max=760, + by_epoch=True, + begin=40, + end=800, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type=EpochBasedTrainLoop, max_epochs=800) +# only keeps the latest 3 checkpoints +default_hooks.checkpoint = dict( + type=CheckpointHook, interval=1, max_keep_ckpts=3) + +randomness.update(seed=0, diff_rank_seed=True) + +# auto resume +resume = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/mmpretrain/configs/mae/mae_vit_huge_p14_8xb512_amp_coslr_1600e_in1k.py b/mmpretrain/configs/mae/mae_vit_huge_p14_8xb512_amp_coslr_1600e_in1k.py new file mode 100644 index 0000000..bc91ee0 --- /dev/null +++ b/mmpretrain/configs/mae/mae_vit_huge_p14_8xb512_amp_coslr_1600e_in1k.py @@ -0,0 +1,75 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.models.mae_vit_base_p16 import * + from .._base_.datasets.imagenet_bs512_mae import * + from .._base_.default_runtime import * + +from mmengine.hooks.checkpoint_hook import CheckpointHook +from mmengine.optim.optimizer.amp_optimizer_wrapper import AmpOptimWrapper +from mmengine.optim.scheduler.lr_scheduler import CosineAnnealingLR, LinearLR +from mmengine.runner.loops import EpochBasedTrainLoop +from torch.optim.adamw import AdamW + +# model settings +model.update( + backbone=dict(type=MAEViT, arch='h', patch_size=14), + neck=dict( + type=MAEPretrainDecoder, + embed_dim=1280, + patch_size=14, + num_patches=256), + head=dict(patch_size=14)) + +# optimizer wrapper +optim_wrapper = dict( + type=AmpOptimWrapper, + loss_scale='dynamic', + optimizer=dict( + type=AdamW, + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'ln': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type=LinearLR, + start_factor=0.0001, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + T_max=1560, + by_epoch=True, + begin=40, + end=1600, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type=EpochBasedTrainLoop, max_epochs=1600) +# only keeps the latest 3 checkpoints +default_hooks.checkpoint = dict( + type=CheckpointHook, interval=1, max_keep_ckpts=3) + +randomness.update(seed=0, diff_rank_seed=True) + +# auto resume +resume = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/mmpretrain/configs/mae/mae_vit_large_p16_8xb512_amp_coslr_1600e_in1k.py b/mmpretrain/configs/mae/mae_vit_large_p16_8xb512_amp_coslr_1600e_in1k.py new file mode 100644 index 0000000..ef0777a --- /dev/null +++ b/mmpretrain/configs/mae/mae_vit_large_p16_8xb512_amp_coslr_1600e_in1k.py @@ -0,0 +1,70 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.models.mae_vit_base_p16 import * + from .._base_.datasets.imagenet_bs512_mae import * + from .._base_.default_runtime import * + +from mmengine.hooks.checkpoint_hook import CheckpointHook +from mmengine.optim.optimizer.amp_optimizer_wrapper import AmpOptimWrapper +from mmengine.optim.scheduler.lr_scheduler import CosineAnnealingLR, LinearLR +from mmengine.runner.loops import EpochBasedTrainLoop +from torch.optim.adamw import AdamW + +# model settings +model = dict( + backbone=dict(type=MAEViT, arch='l'), + neck=dict(type=MAEPretrainDecoder, embed_dim=1024)) + +# optimizer wrapper +optim_wrapper = dict( + type=AmpOptimWrapper, + loss_scale='dynamic', + optimizer=dict( + type=AdamW, + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'ln': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type=LinearLR, + start_factor=0.0001, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + T_max=1560, + by_epoch=True, + begin=40, + end=1600, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type=EpochBasedTrainLoop, max_epochs=1600) +# only keeps the latest 3 checkpoints +default_hooks.checkpoint = dict( + type=CheckpointHook, interval=1, max_keep_ckpts=3) + +randomness.update(seed=0, diff_rank_seed=True) + +# auto resume +resume = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/mmpretrain/configs/mae/mae_vit_large_p16_8xb512_amp_coslr_300e_in1k.py b/mmpretrain/configs/mae/mae_vit_large_p16_8xb512_amp_coslr_300e_in1k.py new file mode 100644 index 0000000..ea005e4 --- /dev/null +++ b/mmpretrain/configs/mae/mae_vit_large_p16_8xb512_amp_coslr_300e_in1k.py @@ -0,0 +1,70 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.models.mae_vit_base_p16 import * + from .._base_.datasets.imagenet_bs512_mae import * + from .._base_.default_runtime import * + +from mmengine.hooks.checkpoint_hook import CheckpointHook +from mmengine.optim.optimizer.amp_optimizer_wrapper import AmpOptimWrapper +from mmengine.optim.scheduler.lr_scheduler import CosineAnnealingLR, LinearLR +from mmengine.runner.loops import EpochBasedTrainLoop +from torch.optim.adamw import AdamW + +# model settings +model = dict( + backbone=dict(type=MAEViT, arch='l'), + neck=dict(type=MAEPretrainDecoder, embed_dim=1024)) + +# optimizer wrapper +optim_wrapper = dict( + type=AmpOptimWrapper, + loss_scale='dynamic', + optimizer=dict( + type=AdamW, + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'ln': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type=LinearLR, + start_factor=0.0001, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + T_max=260, + by_epoch=True, + begin=40, + end=300, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type=EpochBasedTrainLoop, max_epochs=300) +# only keeps the latest 3 checkpoints +default_hooks.checkpoint = dict( + type=CheckpointHook, interval=1, max_keep_ckpts=3) + +randomness.update(seed=0, diff_rank_seed=True) + +# auto resume +resume = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/mmpretrain/configs/mae/mae_vit_large_p16_8xb512_amp_coslr_400e_in1k.py b/mmpretrain/configs/mae/mae_vit_large_p16_8xb512_amp_coslr_400e_in1k.py new file mode 100644 index 0000000..6f73549 --- /dev/null +++ b/mmpretrain/configs/mae/mae_vit_large_p16_8xb512_amp_coslr_400e_in1k.py @@ -0,0 +1,70 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.models.mae_vit_base_p16 import * + from .._base_.datasets.imagenet_bs512_mae import * + from .._base_.default_runtime import * + +from mmengine.hooks.checkpoint_hook import CheckpointHook +from mmengine.optim.optimizer.amp_optimizer_wrapper import AmpOptimWrapper +from mmengine.optim.scheduler.lr_scheduler import CosineAnnealingLR, LinearLR +from mmengine.runner.loops import EpochBasedTrainLoop +from torch.optim.adamw import AdamW + +# model settings +model = dict( + backbone=dict(type=MAEViT, arch='l'), + neck=dict(type=MAEPretrainDecoder, embed_dim=1024)) + +# optimizer wrapper +optim_wrapper = dict( + type=AmpOptimWrapper, + loss_scale='dynamic', + optimizer=dict( + type=AdamW, + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'ln': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type=LinearLR, + start_factor=0.0001, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + T_max=360, + by_epoch=True, + begin=40, + end=400, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type=EpochBasedTrainLoop, max_epochs=400) +# only keeps the latest 3 checkpoints +default_hooks.checkpoint = dict( + type=CheckpointHook, interval=1, max_keep_ckpts=3) + +randomness.update(seed=0, diff_rank_seed=True) + +# auto resume +resume = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/mmpretrain/configs/mae/mae_vit_large_p16_8xb512_amp_coslr_800e_in1k.py b/mmpretrain/configs/mae/mae_vit_large_p16_8xb512_amp_coslr_800e_in1k.py new file mode 100644 index 0000000..a0a5abd --- /dev/null +++ b/mmpretrain/configs/mae/mae_vit_large_p16_8xb512_amp_coslr_800e_in1k.py @@ -0,0 +1,70 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.models.mae_vit_base_p16 import * + from .._base_.datasets.imagenet_bs512_mae import * + from .._base_.default_runtime import * + +from mmengine.hooks.checkpoint_hook import CheckpointHook +from mmengine.optim.optimizer.amp_optimizer_wrapper import AmpOptimWrapper +from mmengine.optim.scheduler.lr_scheduler import CosineAnnealingLR, LinearLR +from mmengine.runner.loops import EpochBasedTrainLoop +from torch.optim.adamw import AdamW + +# model settings +model = dict( + backbone=dict(type=MAEViT, arch='l'), + neck=dict(type=MAEPretrainDecoder, embed_dim=1024)) + +# optimizer wrapper +optim_wrapper = dict( + type=AmpOptimWrapper, + loss_scale='dynamic', + optimizer=dict( + type=AdamW, + lr=1.5e-4 * 4096 / 256, + betas=(0.9, 0.95), + weight_decay=0.05), + paramwise_cfg=dict( + custom_keys={ + 'ln': dict(decay_mult=0.0), + 'bias': dict(decay_mult=0.0), + 'pos_embed': dict(decay_mult=0.), + 'mask_token': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.) + })) + +# learning rate scheduler +param_scheduler = [ + dict( + type=LinearLR, + start_factor=0.0001, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type=CosineAnnealingLR, + T_max=760, + by_epoch=True, + begin=40, + end=800, + convert_to_iter_based=True) +] + +# runtime settings +train_cfg = dict(type=EpochBasedTrainLoop, max_epochs=800) +# only keeps the latest 3 checkpoints +default_hooks.checkpoint = dict( + type=CheckpointHook, interval=1, max_keep_ckpts=3) + +randomness.update(seed=0, diff_rank_seed=True) + +# auto resume +resume = True + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=4096) diff --git a/mmpretrain/configs/mobilenet_v2/mobilenet_v2_8xb32_in1k.py b/mmpretrain/configs/mobilenet_v2/mobilenet_v2_8xb32_in1k.py new file mode 100644 index 0000000..79eec63 --- /dev/null +++ b/mmpretrain/configs/mobilenet_v2/mobilenet_v2_8xb32_in1k.py @@ -0,0 +1,9 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.datasets.imagenet_bs32_pil_resize import * + from .._base_.default_runtime import * + from .._base_.models.mobilenet_v2_1x import * + from .._base_.schedules.imagenet_bs256_epochstep import * diff --git a/mmpretrain/configs/mobilenet_v3/mobilenet_v3_large_8xb128_in1k.py b/mmpretrain/configs/mobilenet_v3/mobilenet_v3_large_8xb128_in1k.py new file mode 100644 index 0000000..3f1bee1 --- /dev/null +++ b/mmpretrain/configs/mobilenet_v3/mobilenet_v3_large_8xb128_in1k.py @@ -0,0 +1,40 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. + +# Refers to https://pytorch.org/blog/ml-models-torchvision-v0.9/#classification +from mmengine.config import read_base + +with read_base(): + from .._base_.models.mobilenet_v3_small import * + from .._base_.datasets.imagenet_bs128_mbv3 import * + from .._base_.default_runtime import * + +from mmengine.optim import StepLR +from torch.optim import RMSprop + +# model settings +model.merge( + dict( + backbone=dict(arch='large'), + head=dict(in_channels=960, mid_channels=[1280]), + )) +# schedule settings +optim_wrapper = dict( + optimizer=dict( + type=RMSprop, + lr=0.064, + alpha=0.9, + momentum=0.9, + eps=0.0316, + weight_decay=1e-5)) + +param_scheduler = dict(type=StepLR, by_epoch=True, step_size=2, gamma=0.973) + +train_cfg = dict(by_epoch=True, max_epochs=600, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (8 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=1024) diff --git a/mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_050_8xb128_in1k.py b/mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_050_8xb128_in1k.py new file mode 100644 index 0000000..50e1ffc --- /dev/null +++ b/mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_050_8xb128_in1k.py @@ -0,0 +1,85 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +# Refers to https://pytorch.org/blog/ml-models-torchvision-v0.9/#classification + +from mmengine.config import read_base + +with read_base(): + from .._base_.models.mobilenet_v3_small import * + from .._base_.datasets.imagenet_bs128_mbv3 import * + from .._base_.default_runtime import * + +from mmengine.optim import StepLR +from torch.nn.modules.batchnorm import BatchNorm2d +from torch.optim import RMSprop + +# model settings +model.merge( + dict( + backbone=dict( + arch='small_050', + norm_cfg=dict(type=BatchNorm2d, eps=1e-5, momentum=0.1)), + head=dict(in_channels=288), + )) + +train_pipeline = [ + dict(type=LoadImageFromFile), + dict( + type=RandomResizedCrop, + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type=RandomFlip, prob=0.5, direction='horizontal'), + dict( + type=AutoAugment, + policies='imagenet', + hparams=dict(pad_val=[round(x) for x in [103.53, 116.28, 123.675]])), + dict( + type=RandomErasing, + erase_prob=0.2, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=[103.53, 116.28, 123.675], + fill_std=[57.375, 57.12, 58.395]), + dict(type=PackInputs), +] + +test_pipeline = [ + dict(type=LoadImageFromFile), + dict( + type=ResizeEdge, + scale=256, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type=CenterCrop, crop_size=224), + dict(type=PackInputs), +] + +train_dataloader.merge(dict(dataset=dict(pipeline=train_pipeline))) + +val_dataloader.merge(dict(dataset=dict(pipeline=test_pipeline))) +# If you want standard test, please manually configure the test dataset +test_dataloader = val_dataloader + +# schedule settings +optim_wrapper = dict( + optimizer=dict( + type=RMSprop, + lr=0.064, + alpha=0.9, + momentum=0.9, + eps=0.0316, + weight_decay=1e-5)) + +param_scheduler = dict(type=StepLR, by_epoch=True, step_size=2, gamma=0.973) + +train_cfg = dict(by_epoch=True, max_epochs=600, val_interval=10) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (8 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=1024) diff --git a/mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_075_8xb128_in1k.py b/mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_075_8xb128_in1k.py new file mode 100644 index 0000000..c8c640c --- /dev/null +++ b/mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_075_8xb128_in1k.py @@ -0,0 +1,83 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +# Refers to https://pytorch.org/blog/ml-models-torchvision-v0.9/#classification + +from mmengine.config import read_base + +with read_base(): + from .._base_.models.mobilenet_v3_small import * + from .._base_.datasets.imagenet_bs128_mbv3 import * + from .._base_.default_runtime import * + +from mmengine.optim import StepLR +from torch.nn.modules.batchnorm import BatchNorm2d +from torch.optim import RMSprop + +# model settings +model.merge( + dict( + backbone=dict( + arch='small_075', + norm_cfg=dict(type=BatchNorm2d, eps=1e-5, momentum=0.1)), + head=dict(in_channels=432), + )) + +train_pipeline = [ + dict(type=LoadImageFromFile), + dict( + type=RandomResizedCrop, + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type=RandomFlip, prob=0.5, direction='horizontal'), + dict( + type=AutoAugment, + policies='imagenet', + hparams=dict(pad_val=[round(x) for x in [103.53, 116.28, 123.675]])), + dict( + type=RandomErasing, + erase_prob=0.2, + mode='rand', + min_area_ratio=0.02, + max_area_ratio=1 / 3, + fill_color=[103.53, 116.28, 123.675], + fill_std=[57.375, 57.12, 58.395]), + dict(type=PackInputs), +] + +test_pipeline = [ + dict(type=LoadImageFromFile), + dict( + type=ResizeEdge, + scale=256, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type=CenterCrop, crop_size=224), + dict(type=PackInputs), +] + +train_dataloader.merge(dict(dataset=dict(pipeline=train_pipeline))) +val_dataloader.merge(dict(dataset=dict(pipeline=test_pipeline))) +test_dataloader = val_dataloader + +# schedule settings +optim_wrapper = dict( + optimizer=dict( + type=RMSprop, + lr=0.064, + alpha=0.9, + momentum=0.9, + eps=0.0316, + weight_decay=1e-5)) + +param_scheduler = dict(type=StepLR, by_epoch=True, step_size=2, gamma=0.973) + +train_cfg = dict(by_epoch=True, max_epochs=600, val_interval=10) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (8 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=1024) diff --git a/mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_8xb128_in1k.py b/mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_8xb128_in1k.py new file mode 100644 index 0000000..0c220a0 --- /dev/null +++ b/mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_8xb128_in1k.py @@ -0,0 +1,34 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +# Refers to https://pytorch.org/blog/ml-models-torchvision-v0.9/#classification + +from mmengine.config import read_base + +with read_base(): + from .._base_.models.mobilenet_v3_small import * + from .._base_.datasets.imagenet_bs128_mbv3 import * + from .._base_.default_runtime import * + +from mmengine.optim import StepLR +from torch.optim import RMSprop + +# schedule settings +optim_wrapper = dict( + optimizer=dict( + type=RMSprop, + lr=0.064, + alpha=0.9, + momentum=0.9, + eps=0.0316, + weight_decay=1e-5)) + +param_scheduler = dict(type=StepLR, by_epoch=True, step_size=2, gamma=0.973) + +train_cfg = dict(by_epoch=True, max_epochs=600, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (8 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=1024) diff --git a/mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_8xb16_cifar10.py b/mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_8xb16_cifar10.py new file mode 100644 index 0000000..0f91ee3 --- /dev/null +++ b/mmpretrain/configs/mobilenet_v3/mobilenet_v3_small_8xb16_cifar10.py @@ -0,0 +1,34 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.models.mobilenet_v3_small import * + from .._base_.datasets.cifar10_bs16 import * + from .._base_.schedules.cifar10_bs128 import * + from .._base_.default_runtime import * + +from mmengine.optim import MultiStepLR + +# model settings +model.merge( + dict( + head=dict( + _delete_=True, + type=StackedLinearClsHead, + num_classes=10, + in_channels=576, + mid_channels=[1280], + act_cfg=dict(type=Hardswish), + loss=dict(type=CrossEntropyLoss, loss_weight=1.0), + topk=(1, 5)))) +# schedule settings +param_scheduler.merge( + dict( + type=MultiStepLR, + by_epoch=True, + milestones=[120, 170], + gamma=0.1, + )) + +train_cfg.merge(dict(by_epoch=True, max_epochs=200)) diff --git a/mmpretrain/configs/resnet/resnet18_8xb32_in1k.py b/mmpretrain/configs/resnet/resnet18_8xb32_in1k.py new file mode 100644 index 0000000..f16d248 --- /dev/null +++ b/mmpretrain/configs/resnet/resnet18_8xb32_in1k.py @@ -0,0 +1,9 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.datasets.imagenet_bs32 import * + from .._base_.default_runtime import * + from .._base_.models.resnet18 import * + from .._base_.schedules.imagenet_bs256 import * diff --git a/mmpretrain/configs/simclr/simclr_resnet50_16xb256_coslr_200e_in1k.py b/mmpretrain/configs/simclr/simclr_resnet50_16xb256_coslr_200e_in1k.py new file mode 100644 index 0000000..09c738f --- /dev/null +++ b/mmpretrain/configs/simclr/simclr_resnet50_16xb256_coslr_200e_in1k.py @@ -0,0 +1,58 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.datasets.imagenet_bs32_simclr import * + from .._base_.schedules.imagenet_lars_coslr_200e import * + from .._base_.default_runtime import * + +from mmengine.hooks.checkpoint_hook import CheckpointHook +from mmengine.optim.optimizer.optimizer_wrapper import OptimWrapper + +from mmpretrain.engine.optimizers.lars import LARS +from mmpretrain.models.backbones.resnet import ResNet +from mmpretrain.models.heads.contrastive_head import ContrastiveHead +from mmpretrain.models.losses.cross_entropy_loss import CrossEntropyLoss +from mmpretrain.models.necks.nonlinear_neck import NonLinearNeck +from mmpretrain.models.selfsup.simclr import SimCLR + +# dataset settings +train_dataloader.merge(dict(batch_size=256)) + +# model settings +model = dict( + type=SimCLR, + backbone=dict( + type=ResNet, + depth=50, + norm_cfg=dict(type='SyncBN'), + zero_init_residual=True), + neck=dict( + type=NonLinearNeck, # SimCLR non-linear neck + in_channels=2048, + hid_channels=2048, + out_channels=128, + num_layers=2, + with_avg_pool=True), + head=dict( + type=ContrastiveHead, + loss=dict(type=CrossEntropyLoss), + temperature=0.1), +) + +# optimizer +optim_wrapper = dict( + type=OptimWrapper, + optimizer=dict(type=LARS, lr=4.8, momentum=0.9, weight_decay=1e-6), + paramwise_cfg=dict( + custom_keys={ + 'bn': dict(decay_mult=0, lars_exclude=True), + 'bias': dict(decay_mult=0, lars_exclude=True), + # bn layer in ResNet block downsample module + 'downsample.1': dict(decay_mult=0, lars_exclude=True) + })) + +# runtime settings +default_hooks.checkpoint = dict( + type=CheckpointHook, interval=10, max_keep_ckpts=3) diff --git a/mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k.py b/mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k.py new file mode 100644 index 0000000..09af3d0 --- /dev/null +++ b/mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k.py @@ -0,0 +1,35 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base +from mmengine.model import ConstantInit, TruncNormalInit + +from mmpretrain.models import CutMix, LabelSmoothLoss, Mixup + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_224 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer_base import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# model settings +model.update( + backbone=dict(img_size=224, drop_path_rate=0.5, stage_cfgs=None), + head=dict( + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type=LabelSmoothLoss, + label_smooth_val=0.1, + mode='original', + loss_weight=0), + topk=None, + cal_acc=False), + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)])) + +# schedule settings +optim_wrapper = dict(clip_grad=dict(max_norm=5.0)) diff --git a/mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k_384px.py b/mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k_384px.py new file mode 100644 index 0000000..aacdc32 --- /dev/null +++ b/mmpretrain/configs/swin_transformer/swin_base_16xb64_in1k_384px.py @@ -0,0 +1,12 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_384 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer_base import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# schedule settings +optim_wrapper = dict(clip_grad=dict(max_norm=5.0)) diff --git a/mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k.py b/mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k.py new file mode 100644 index 0000000..b8fc279 --- /dev/null +++ b/mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k.py @@ -0,0 +1,18 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_224 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer_base import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# model settings +model.update( + backbone=dict(arch='large', img_size=224, stage_cfgs=None), + head=dict(in_channels=1536), +) + +# schedule settings +optim_wrapper = dict(clip_grad=dict(max_norm=5.0)) diff --git a/mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k_384px.py b/mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k_384px.py new file mode 100644 index 0000000..9a449aa --- /dev/null +++ b/mmpretrain/configs/swin_transformer/swin_large_16xb64_in1k_384px.py @@ -0,0 +1,18 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_384 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer_base import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# model settings +model.update( + backbone=dict(arch='large'), + head=dict(in_channels=1536), +) + +# schedule settings +optim_wrapper = dict(clip_grad=dict(max_norm=5.0)) diff --git a/mmpretrain/configs/swin_transformer/swin_large_8xb8_cub_384px.py b/mmpretrain/configs/swin_transformer/swin_large_8xb8_cub_384px.py new file mode 100644 index 0000000..2003cd3 --- /dev/null +++ b/mmpretrain/configs/swin_transformer/swin_large_8xb8_cub_384px.py @@ -0,0 +1,49 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base +from mmengine.hooks import CheckpointHook, LoggerHook +from mmengine.model import PretrainedInit +from torch.optim.adamw import AdamW + +from mmpretrain.models import ImageClassifier + +with read_base(): + from .._base_.datasets.cub_bs8_384 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer_base import * + from .._base_.schedules.cub_bs64 import * + +# model settings +checkpoint = 'https://download.openmmlab.com/mmclassification/v0/swin-transformer/convert/swin-large_3rdparty_in21k-384px.pth' # noqa + +model.update( + backbone=dict( + arch='large', + init_cfg=dict( + type=PretrainedInit, checkpoint=checkpoint, prefix='backbone')), + head=dict(num_classes=200, in_channels=1536)) + +# schedule settings +optim_wrapper = dict( + optimizer=dict( + _delete_=True, + type=AdamW, + lr=5e-6, + weight_decay=0.0005, + eps=1e-8, + betas=(0.9, 0.999)), + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.absolute_pos_embed': dict(decay_mult=0.0), + '.relative_position_bias_table': dict(decay_mult=0.0) + }), + clip_grad=dict(max_norm=5.0), +) + +default_hooks = dict( + # log every 20 intervals + logger=dict(type=LoggerHook, interval=20), + # save last three checkpoints + checkpoint=dict(type=CheckpointHook, interval=1, max_keep_ckpts=3)) diff --git a/mmpretrain/configs/swin_transformer/swin_small_16xb64_in1k.py b/mmpretrain/configs/swin_transformer/swin_small_16xb64_in1k.py new file mode 100644 index 0000000..5979252 --- /dev/null +++ b/mmpretrain/configs/swin_transformer/swin_small_16xb64_in1k.py @@ -0,0 +1,37 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base +from mmengine.model import ConstantInit, TruncNormalInit + +from mmpretrain.models import CutMix, LabelSmoothLoss, Mixup + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_224 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer_base import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# model settings +model.update( + backbone=dict( + arch='small', img_size=224, drop_path_rate=0.3, stage_cfgs=None), + head=dict( + in_channels=768, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type=LabelSmoothLoss, + label_smooth_val=0.1, + mode='original', + loss_weight=0), + topk=None, + cal_acc=False), + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)])) + +# schedule settings +optim_wrapper = dict(clip_grad=dict(max_norm=5.0)) diff --git a/mmpretrain/configs/swin_transformer/swin_tiny_16xb64_in1k.py b/mmpretrain/configs/swin_transformer/swin_tiny_16xb64_in1k.py new file mode 100644 index 0000000..733e1ef --- /dev/null +++ b/mmpretrain/configs/swin_transformer/swin_tiny_16xb64_in1k.py @@ -0,0 +1,37 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base +from mmengine.model import ConstantInit, TruncNormalInit + +from mmpretrain.models import CutMix, LabelSmoothLoss, Mixup + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_224 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer_base import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# model settings +model.update( + backbone=dict( + arch='tiny', img_size=224, drop_path_rate=0.2, stage_cfgs=None), + head=dict( + in_channels=768, + init_cfg=None, # suppress the default init_cfg of LinearClsHead. + loss=dict( + type=LabelSmoothLoss, + label_smooth_val=0.1, + mode='original', + loss_weight=0), + topk=None, + cal_acc=False), + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)])) + +# schedule settings +optim_wrapper = dict(clip_grad=dict(max_norm=5.0)) diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_base_w12_8xb128_in21k_192px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w12_8xb128_in21k_192px.py new file mode 100644 index 0000000..1ecc436 --- /dev/null +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w12_8xb128_in21k_192px.py @@ -0,0 +1,32 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base +from mmengine.model import ConstantInit, TruncNormalInit + +from mmpretrain.models import CutMix, Mixup + +with read_base(): + from .._base_.datasets.imagenet21k_bs128 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer_v2_base import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# model settings +model.update( + backbone=dict( + img_size=192, drop_path_rate=0.5, window_size=[12, 12, 12, 6]), + head=dict(num_classes=21841), + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)])) + +# dataset settings +data_preprocessor = dict(num_classes=21841) + +_base_['train_pipeline'][1]['scale'] = 192 # RandomResizedCrop +_base_['test_pipeline'][1]['scale'] = 219 # ResizeEdge +_base_['test_pipeline'][2]['crop_size'] = 192 # CenterCrop diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_16xb64_in1k_256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_16xb64_in1k_256px.py new file mode 100644 index 0000000..103afb4 --- /dev/null +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_16xb64_in1k_256px.py @@ -0,0 +1,24 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base +from mmengine.model import ConstantInit, TruncNormalInit + +from mmpretrain.models import CutMix, Mixup + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_256 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer_v2_base import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# model settings +model.update( + backbone=dict( + img_size=256, drop_path_rate=0.5, window_size=[16, 16, 16, 8]), + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)])) diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_in21k_pre_16xb64_in1k_256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_in21k_pre_16xb64_in1k_256px.py new file mode 100644 index 0000000..6588f50 --- /dev/null +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w16_in21k_pre_16xb64_in1k_256px.py @@ -0,0 +1,26 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base +from mmengine.model import ConstantInit, TruncNormalInit + +from mmpretrain.models import CutMix, Mixup + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_256 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer_v2_base import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# model settings +model.update( + backbone=dict( + img_size=256, + window_size=[16, 16, 16, 8], + pretrained_window_sizes=[12, 12, 12, 6]), + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)])) diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_base_w24_in21k_pre_16xb64_in1k_384px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w24_in21k_pre_16xb64_in1k_384px.py new file mode 100644 index 0000000..118c085 --- /dev/null +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w24_in21k_pre_16xb64_in1k_384px.py @@ -0,0 +1,14 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_384 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer_v2_base import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# model settings +model.update( + backbone=dict( + window_size=[24, 24, 24, 12], pretrained_window_sizes=[12, 12, 12, 6])) diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_base_w8_16xb64_in1k_256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w8_16xb64_in1k_256px.py new file mode 100644 index 0000000..d40144c --- /dev/null +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_base_w8_16xb64_in1k_256px.py @@ -0,0 +1,23 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base +from mmengine.model import ConstantInit, TruncNormalInit + +from mmpretrain.models import CutMix, Mixup + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_256 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer_v2_base import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# model settings +model.update( + backbone=dict(img_size=256, drop_path_rate=0.5), + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)])) diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_large_w12_8xb128_in21k_192px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_large_w12_8xb128_in21k_192px.py new file mode 100644 index 0000000..1ecc436 --- /dev/null +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_large_w12_8xb128_in21k_192px.py @@ -0,0 +1,32 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base +from mmengine.model import ConstantInit, TruncNormalInit + +from mmpretrain.models import CutMix, Mixup + +with read_base(): + from .._base_.datasets.imagenet21k_bs128 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer_v2_base import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# model settings +model.update( + backbone=dict( + img_size=192, drop_path_rate=0.5, window_size=[12, 12, 12, 6]), + head=dict(num_classes=21841), + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)])) + +# dataset settings +data_preprocessor = dict(num_classes=21841) + +_base_['train_pipeline'][1]['scale'] = 192 # RandomResizedCrop +_base_['test_pipeline'][1]['scale'] = 219 # ResizeEdge +_base_['test_pipeline'][2]['crop_size'] = 192 # CenterCrop diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_large_w16_in21k_pre_16xb64_in1k_256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_large_w16_in21k_pre_16xb64_in1k_256px.py new file mode 100644 index 0000000..0a1b59d --- /dev/null +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_large_w16_in21k_pre_16xb64_in1k_256px.py @@ -0,0 +1,24 @@ +# Only for evaluation +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +from mmpretrain.models import CrossEntropyLoss + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_256 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer_v2_base import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# model settings +model.update( + backbone=dict( + arch='large', + img_size=256, + window_size=[16, 16, 16, 8], + pretrained_window_sizes=[12, 12, 12, 6]), + head=dict( + in_channels=1536, + loss=dict(type=CrossEntropyLoss, loss_weight=1.0), + topk=(1, 5))) diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_large_w24_in21k_pre_16xb64_in1k_384px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_large_w24_in21k_pre_16xb64_in1k_384px.py new file mode 100644 index 0000000..b20bcea --- /dev/null +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_large_w24_in21k_pre_16xb64_in1k_384px.py @@ -0,0 +1,24 @@ +# Only for evaluation +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +from mmpretrain.models import CrossEntropyLoss + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_384 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer_v2_base import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# model settings +model.update( + backbone=dict( + arch='large', + img_size=384, + window_size=[24, 24, 24, 12], + pretrained_window_sizes=[12, 12, 12, 6]), + head=dict( + in_channels=1536, + loss=dict(type=CrossEntropyLoss, loss_weight=1.0), + topk=(1, 5))) diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_small_w16_16xb64_in1k_256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_small_w16_16xb64_in1k_256px.py new file mode 100644 index 0000000..dfd15c3 --- /dev/null +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_small_w16_16xb64_in1k_256px.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base +from mmengine.model import ConstantInit, TruncNormalInit + +from mmpretrain.models import CutMix, Mixup + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_256 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer_v2_base import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# model settings +model.update( + backbone=dict( + arch='small', + img_size=256, + drop_path_rate=0.3, + window_size=[16, 16, 16, 8]), + head=dict(in_channels=768), + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)])) diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_small_w8_16xb64_in1k_256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_small_w8_16xb64_in1k_256px.py new file mode 100644 index 0000000..bfec346 --- /dev/null +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_small_w8_16xb64_in1k_256px.py @@ -0,0 +1,24 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base +from mmengine.model import ConstantInit, TruncNormalInit + +from mmpretrain.models import CutMix, Mixup + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_256 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer_v2_base import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# model settings +model.update( + backbone=dict(arch='small', img_size=256, drop_path_rate=0.3), + head=dict(in_channels=768), + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)])) diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_tiny_w16_16xb64_in1k_256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_tiny_w16_16xb64_in1k_256px.py new file mode 100644 index 0000000..f2fa160 --- /dev/null +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_tiny_w16_16xb64_in1k_256px.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base +from mmengine.model import ConstantInit, TruncNormalInit + +from mmpretrain.models import CutMix, Mixup + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_256 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer_v2_base import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# model settings +model.update( + backbone=dict( + arch='tiny', + img_size=256, + drop_path_rate=0.2, + window_size=[16, 16, 16, 8]), + head=dict(in_channels=768), + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)])) diff --git a/mmpretrain/configs/swin_transformer_v2/swinv2_tiny_w8_16xb64_in1k_256px.py b/mmpretrain/configs/swin_transformer_v2/swinv2_tiny_w8_16xb64_in1k_256px.py new file mode 100644 index 0000000..8cca2b3 --- /dev/null +++ b/mmpretrain/configs/swin_transformer_v2/swinv2_tiny_w8_16xb64_in1k_256px.py @@ -0,0 +1,24 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base +from mmengine.model import ConstantInit, TruncNormalInit + +from mmpretrain.models import CutMix, Mixup + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_256 import * + from .._base_.default_runtime import * + from .._base_.models.swin_transformer_v2_base import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +# model settings +model.update( + backbone=dict(arch='tiny', img_size=256, drop_path_rate=0.2), + head=dict(in_channels=768), + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=0.02, bias=0.), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.) + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)])) diff --git a/mmpretrain/configs/vision_transformer/vit_base_p16_32xb128_mae_in1k.py b/mmpretrain/configs/vision_transformer/vit_base_p16_32xb128_mae_in1k.py new file mode 100644 index 0000000..18c2afd --- /dev/null +++ b/mmpretrain/configs/vision_transformer/vit_base_p16_32xb128_mae_in1k.py @@ -0,0 +1,52 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base +from mmengine.model import ConstantInit, TruncNormalInit +from torch.optim import AdamW + +from mmpretrain.engine import EMAHook +from mmpretrain.models import CutMix, Mixup + +with read_base(): + from .._base_.datasets.imagenet_bs64_swin_224 import * + from .._base_.default_runtime import * + from .._base_.models.vit_base_p16 import * + from .._base_.schedules.imagenet_bs1024_adamw_swin import * + +model.update( + backbone=dict(drop_rate=0, drop_path_rate=0.1, init_cfg=None), + head=dict(loss=dict(mode='original')), + init_cfg=[ + dict(type=TruncNormalInit, layer='Linear', std=.02), + dict(type=ConstantInit, layer='LayerNorm', val=1., bias=0.), + ], + train_cfg=dict( + augments=[dict(type=Mixup, alpha=0.8), + dict(type=CutMix, alpha=1.0)])) + +# dataset settings +train_dataloader.update(batch_size=128) + +# schedule settings +optim_wrapper.update( + optimizer=dict( + type=AdamW, + lr=1e-4 * 4096 / 256, + weight_decay=0.3, + eps=1e-8, + betas=(0.9, 0.95)), + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0) + })) + +# runtime settings +custom_hooks = [dict(type=EMAHook, momentum=1e-4)] + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr.update(base_batch_size=4096) diff --git a/mmpretrain/configs/vision_transformer/vit_base_p16_64xb64_in1k.py b/mmpretrain/configs/vision_transformer/vit_base_p16_64xb64_in1k.py new file mode 100644 index 0000000..8f128d1 --- /dev/null +++ b/mmpretrain/configs/vision_transformer/vit_base_p16_64xb64_in1k.py @@ -0,0 +1,20 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +from mmpretrain.models import Mixup + +with read_base(): + from .._base_.datasets.imagenet_bs64_pil_resize_autoaug import * + from .._base_.default_runtime import * + from .._base_.models.vit_base_p16 import * + from .._base_.schedules.imagenet_bs4096_adamw import * + +# model setting +model.update( + head=dict(hidden_dim=3072), + train_cfg=dict(augments=dict(type=Mixup, alpha=0.2)), +) + +# schedule setting +optim_wrapper.update(clip_grad=dict(max_norm=1.0)) diff --git a/mmpretrain/configs/vision_transformer/vit_base_p16_64xb64_in1k_384px.py b/mmpretrain/configs/vision_transformer/vit_base_p16_64xb64_in1k_384px.py new file mode 100644 index 0000000..98e01f3 --- /dev/null +++ b/mmpretrain/configs/vision_transformer/vit_base_p16_64xb64_in1k_384px.py @@ -0,0 +1,44 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +from mmpretrain.datasets import (CenterCrop, LoadImageFromFile, PackInputs, + RandomFlip, RandomResizedCrop, ResizeEdge) + +with read_base(): + from .._base_.datasets.imagenet_bs64_pil_resize import * + from .._base_.default_runtime import * + from .._base_.models.vit_base_p16 import * + from .._base_.schedules.imagenet_bs4096_adamw import * + +# model setting +model.update(backbone=dict(img_size=384)) + +# dataset setting +data_preprocessor.update( + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type=LoadImageFromFile), + dict(type=RandomResizedCrop, scale=384, backend='pillow'), + dict(type=RandomFlip, prob=0.5, direction='horizontal'), + dict(type=PackInputs), +] + +test_pipeline = [ + dict(type=LoadImageFromFile), + dict(type=ResizeEdge, scale=384, edge='short', backend='pillow'), + dict(type=CenterCrop, crop_size=384), + dict(type=PackInputs), +] + +train_dataloader.update(dataset=dict(pipeline=train_pipeline)) +val_dataloader.update(dataset=dict(pipeline=test_pipeline)) +test_dataloader.update(dataset=dict(pipeline=test_pipeline)) + +# schedule setting +optim_wrapper.update(clip_grad=dict(max_norm=1.0)) diff --git a/mmpretrain/configs/vision_transformer/vit_base_p32_64xb64_in1k.py b/mmpretrain/configs/vision_transformer/vit_base_p32_64xb64_in1k.py new file mode 100644 index 0000000..3651c93 --- /dev/null +++ b/mmpretrain/configs/vision_transformer/vit_base_p32_64xb64_in1k.py @@ -0,0 +1,26 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +from mmpretrain.models import CrossEntropyLoss, Mixup + +with read_base(): + from .._base_.datasets.imagenet_bs64_pil_resize_autoaug import * + from .._base_.default_runtime import * + from .._base_.models.vit_base_p16 import * + from .._base_.schedules.imagenet_bs4096_adamw import * + +# model setting +model.update( + backbone=dict(patch_size=32), + head=dict( + hidden_dim=3072, + topk=(1, 5), + ), + train_cfg=dict(augments=dict(type=Mixup, alpha=0.2)), +) + +model.head.loss = dict(type=CrossEntropyLoss, loss_weight=1.0) + +# schedule setting +optim_wrapper.update(clip_grad=dict(max_norm=1.0)) diff --git a/mmpretrain/configs/vision_transformer/vit_base_p32_64xb64_in1k_384px.py b/mmpretrain/configs/vision_transformer/vit_base_p32_64xb64_in1k_384px.py new file mode 100644 index 0000000..253740c --- /dev/null +++ b/mmpretrain/configs/vision_transformer/vit_base_p32_64xb64_in1k_384px.py @@ -0,0 +1,48 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +from mmpretrain.datasets import (CenterCrop, LoadImageFromFile, PackInputs, + RandomFlip, RandomResizedCrop, ResizeEdge) +from mmpretrain.models import CrossEntropyLoss + +with read_base(): + from .._base_.datasets.imagenet_bs64_pil_resize import * + from .._base_.default_runtime import * + from .._base_.models.vit_base_p16 import * + from .._base_.schedules.imagenet_bs4096_adamw import * + +# model setting +model.update( + backbone=dict(img_size=384, patch_size=32), head=dict(topk=(1, 5))) + +model.head.loss = dict(type=CrossEntropyLoss, loss_weight=1.0) + +# dataset setting +data_preprocessor.update( + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type=LoadImageFromFile), + dict(type=RandomResizedCrop, scale=384, backend='pillow'), + dict(type=RandomFlip, prob=0.5, direction='horizontal'), + dict(type=PackInputs), +] + +test_pipeline = [ + dict(type=LoadImageFromFile), + dict(type=ResizeEdge, scale=384, edge='short', backend='pillow'), + dict(type=CenterCrop, crop_size=384), + dict(type=PackInputs), +] + +train_dataloader.update(dataset=dict(pipeline=train_pipeline)) +val_dataloader.update(dataset=dict(pipeline=test_pipeline)) +test_dataloader.update(dataset=dict(pipeline=test_pipeline)) + +# schedule setting +optim_wrapper.update(clip_grad=dict(max_norm=1.0)) diff --git a/mmpretrain/configs/vision_transformer/vit_large_p16_64xb64_in1k.py b/mmpretrain/configs/vision_transformer/vit_large_p16_64xb64_in1k.py new file mode 100644 index 0000000..03f4a74 --- /dev/null +++ b/mmpretrain/configs/vision_transformer/vit_large_p16_64xb64_in1k.py @@ -0,0 +1,27 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +from mmpretrain.models import CrossEntropyLoss, Mixup + +with read_base(): + from .._base_.datasets.imagenet_bs64_pil_resize_autoaug import * + from .._base_.default_runtime import * + from .._base_.models.vit_base_p16 import * + from .._base_.schedules.imagenet_bs4096_adamw import * + +# model setting +model.update( + backbone=dict(arch='l'), + head=dict( + hidden_dim=3072, + in_channels=1024, + topk=(1, 5), + ), + train_cfg=dict(augments=dict(type=Mixup, alpha=0.2)), +) + +model.head.loss = dict(type=CrossEntropyLoss, loss_weight=1.0) + +# schedule setting +optim_wrapper.update(clip_grad=dict(max_norm=1.0)) diff --git a/mmpretrain/configs/vision_transformer/vit_large_p16_64xb64_in1k_384px.py b/mmpretrain/configs/vision_transformer/vit_large_p16_64xb64_in1k_384px.py new file mode 100644 index 0000000..eba4bc4 --- /dev/null +++ b/mmpretrain/configs/vision_transformer/vit_large_p16_64xb64_in1k_384px.py @@ -0,0 +1,49 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +from mmpretrain.datasets import (CenterCrop, LoadImageFromFile, PackInputs, + RandomFlip, RandomResizedCrop, ResizeEdge) +from mmpretrain.models import CrossEntropyLoss + +with read_base(): + from .._base_.datasets.imagenet_bs64_pil_resize import * + from .._base_.default_runtime import * + from .._base_.models.vit_base_p16 import * + from .._base_.schedules.imagenet_bs4096_adamw import * + +# model setting +model.update( + backbone=dict(arch='l', img_size=384), + head=dict(in_channels=1024, topk=(1, 5))) + +model.head.loss = dict(type=CrossEntropyLoss, loss_weight=1.0) + +# dataset setting +data_preprocessor.update( + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type=LoadImageFromFile), + dict(type=RandomResizedCrop, scale=384, backend='pillow'), + dict(type=RandomFlip, prob=0.5, direction='horizontal'), + dict(type=PackInputs), +] + +test_pipeline = [ + dict(type=LoadImageFromFile), + dict(type=ResizeEdge, scale=384, edge='short', backend='pillow'), + dict(type=CenterCrop, crop_size=384), + dict(type=PackInputs), +] + +train_dataloader.update(dataset=dict(pipeline=train_pipeline)) +val_dataloader.update(dataset=dict(pipeline=test_pipeline)) +test_dataloader.update(dataset=dict(pipeline=test_pipeline)) + +# schedule setting +optim_wrapper.update(clip_grad=dict(max_norm=1.0)) diff --git a/mmpretrain/configs/vision_transformer/vit_large_p32_64xb64_in1k.py b/mmpretrain/configs/vision_transformer/vit_large_p32_64xb64_in1k.py new file mode 100644 index 0000000..73dae6e --- /dev/null +++ b/mmpretrain/configs/vision_transformer/vit_large_p32_64xb64_in1k.py @@ -0,0 +1,27 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +from mmpretrain.models import CrossEntropyLoss, Mixup + +with read_base(): + from .._base_.datasets.imagenet_bs64_pil_resize_autoaug import * + from .._base_.default_runtime import * + from .._base_.models.vit_base_p16 import * + from .._base_.schedules.imagenet_bs4096_adamw import * + +# model setting +model.update( + backbone=dict(arch='l', patch_size=32), + head=dict( + hidden_dim=3072, + in_channels=1024, + topk=(1, 5), + ), + train_cfg=dict(augments=dict(type=Mixup, alpha=0.2)), +) + +loss = dict(type=CrossEntropyLoss, loss_weight=1.0) + +# schedule setting +optim_wrapper.update(clip_grad=dict(max_norm=1.0)) diff --git a/mmpretrain/configs/vision_transformer/vit_large_p32_64xb64_in1k_384px.py b/mmpretrain/configs/vision_transformer/vit_large_p32_64xb64_in1k_384px.py new file mode 100644 index 0000000..82e1619 --- /dev/null +++ b/mmpretrain/configs/vision_transformer/vit_large_p32_64xb64_in1k_384px.py @@ -0,0 +1,49 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# This is a BETA new format config file, and the usage may change recently. +from mmengine.config import read_base + +from mmpretrain.datasets import (CenterCrop, LoadImageFromFile, PackInputs, + RandomFlip, RandomResizedCrop, ResizeEdge) +from mmpretrain.models import CrossEntropyLoss + +with read_base(): + from .._base_.datasets.imagenet_bs64_pil_resize import * + from .._base_.default_runtime import * + from .._base_.models.vit_base_p16 import * + from .._base_.schedules.imagenet_bs4096_adamw import * + +# model setting +model.update( + backbone=dict(arch='l', img_size=384, patch_size=32), + head=dict(in_channels=1024, topk=(1, 5))) + +model.head.loss = dict(type=CrossEntropyLoss, loss_weight=1.0) + +# dataset setting +data_preprocessor.update( + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type=LoadImageFromFile), + dict(type=RandomResizedCrop, scale=384, backend='pillow'), + dict(type=RandomFlip, prob=0.5, direction='horizontal'), + dict(type=PackInputs), +] + +test_pipeline = [ + dict(type=LoadImageFromFile), + dict(type=ResizeEdge, scale=384, edge='short', backend='pillow'), + dict(type=CenterCrop, crop_size=384), + dict(type=PackInputs), +] + +train_dataloader.update(dataset=dict(pipeline=train_pipeline)) +val_dataloader.update(dataset=dict(pipeline=test_pipeline)) +test_dataloader.update(dataset=dict(pipeline=test_pipeline)) + +# schedule setting +optim_wrapper.update(clip_grad=dict(max_norm=1.0)) diff --git a/mmpretrain/datasets/__init__.py b/mmpretrain/datasets/__init__.py new file mode 100644 index 0000000..e621e15 --- /dev/null +++ b/mmpretrain/datasets/__init__.py @@ -0,0 +1,62 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmpretrain.utils.dependency import WITH_MULTIMODAL +from .base_dataset import BaseDataset +from .builder import build_dataset +from .caltech101 import Caltech101 +from .cifar import CIFAR10, CIFAR100 +from .cub import CUB +from .custom import CustomDataset +from .dataset_wrappers import KFoldDataset +from .dtd import DTD +from .fgvcaircraft import FGVCAircraft +from .flowers102 import Flowers102 +from .food101 import Food101 +from .imagenet import ImageNet, ImageNet21k +from .inshop import InShop +from .mnist import MNIST, FashionMNIST +from .multi_label import MultiLabelDataset +from .multi_task import MultiTaskDataset +from .nlvr2 import NLVR2 +from .oxfordiiitpet import OxfordIIITPet +from .places205 import Places205 +from .samplers import * # noqa: F401,F403 +from .stanfordcars import StanfordCars +from .sun397 import SUN397 +from .transforms import * # noqa: F401,F403 +from .voc import VOC + +__all__ = [ + 'BaseDataset', 'CIFAR10', 'CIFAR100', 'CUB', 'Caltech101', 'CustomDataset', + 'DTD', 'FGVCAircraft', 'FashionMNIST', 'Flowers102', 'Food101', 'ImageNet', + 'ImageNet21k', 'InShop', 'KFoldDataset', 'MNIST', 'MultiLabelDataset', + 'MultiTaskDataset', 'NLVR2', 'OxfordIIITPet', 'Places205', 'SUN397', + 'StanfordCars', 'VOC', 'build_dataset' +] + +if WITH_MULTIMODAL: + from .coco_caption import COCOCaption + from .coco_retrieval import COCORetrieval + from .coco_vqa import COCOVQA + from .flamingo import FlamingoEvalCOCOCaption, FlamingoEvalCOCOVQA + from .flickr30k_caption import Flickr30kCaption + from .flickr30k_retrieval import Flickr30kRetrieval + from .gqa_dataset import GQA + from .iconqa import IconQA + from .infographic_vqa import InfographicVQA + from .minigpt4_dataset import MiniGPT4Dataset + from .nocaps import NoCaps + from .ocr_vqa import OCRVQA + from .refcoco import RefCOCO + from .scienceqa import ScienceQA + from .textvqa import TextVQA + from .visual_genome import VisualGenomeQA + from .vizwiz import VizWiz + from .vsr import VSR + + __all__.extend([ + 'COCOCaption', 'COCORetrieval', 'COCOVQA', 'FlamingoEvalCOCOCaption', + 'FlamingoEvalCOCOVQA', 'Flickr30kCaption', 'Flickr30kRetrieval', + 'RefCOCO', 'VisualGenomeQA', 'ScienceQA', 'NoCaps', 'GQA', 'TextVQA', + 'VSR', 'VizWiz', 'OCRVQA', 'InfographicVQA', 'IconQA', + 'MiniGPT4Dataset' + ]) diff --git a/mmpretrain/datasets/__pycache__/__init__.cpython-310.pyc b/mmpretrain/datasets/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e19a17ed3a8b2cb3f0c2a3e3e90635dcb7f154d4 GIT binary patch literal 2383 zcmZuz+fo}x5Y-~It1F0$xft7Ej4{Fn1lVyLJB~qEST+bO3EAm~sFvLoY%#Md@2+H< zugGH_6Tjt0@)7%*Cs*YQQkAN7w?Lv&kxECWd!}c0dd?BJnM^W*uV4Q8qiH50kv}Q8 z`WnRG1%CYeKqO*DOhrVb0*Vqb2h6A#kWq*-9~6T!1~KL_p-K%j=BkLx1SDhO5`MBUjL%NrltI>coSGTBOb~} z@Q7#M5Z}nh@R<2c@vVFUPnh2l-^r)&R6c`e9M6jH<#Tw>d`|ozU%(6Ix5bv+hHd8a zq9Au*hxr}xQWl}ed_nBWSMZAYT~U&+;WhI`@kZ{!9`k!*UzVZFd`TS0LpWrf6K`b& zD$JKfRo0-!{JuDnb*M9cAl}LM@Lq{no4@{ur(OA?{6^YXt@07IYC<#_4)~JvR_)g;H^=vYrp) z7bK50(ZbO-l@{85;L845={H4JnEg1qQ`^CL>Edp^uw{FOr#ETKWKp;uEze)iZ(yVq zUDxbx7B5Nt#41}sn7U*e>t{GeEjiUww}sCW+b@@@HC$5nPi@!X2q%o}w?$y@>BpAn zZ=dC4P500GxmbCxUZHI>2cMd*XO>E(+95Y0aVT`d@;CC2a8y}FNLK^hp-BbZ^D$Pd zN9D~YkC8>|2Zh|UM^5!Lf;a)1IpV1VWN#t5zvOb|>G zOc6{I+#r}CxJeL0z^6zR4)CqF0&E0^844>_;MrD37nsl}QGdIIZJR6#UE0ox`x1w) zl!ase9iaJ2m@Nw1IP*61XZ`g@d%3HdM0dGBEg7!EY^a1iJMr|^scm!@8{W4ad-t&R zuzw9|*~NvqsRxBhcbZnQn)GP6eWGgEmSb4FpjNX!1>Gq_b=z<2V%Kt9*&S5t_W3*e z9BZQWY9%-3-J%A~63h|YCYUFxo?wk&ncyD5BEb?uIR`K! zWy`}h>$bDn4s7ACnpVqlOnl(a9{yxU!t^nk>(WxJl2ButSsveAm^6AGW5I+m!*1$c zUrP`*+Q%%YP&L?PtSQKvU7t2SVuM@gAy!WqH3O6N2I=NW$Ivgl#_9``&7R#@grU~# zI*!#C#%(g4g9qyA5>(sq(QmBJFeV+_54s8^C5I_VW@`v?8*4U9b~E^!a>n|Fm?Jvg z2Fn;SE<4S(?VyFbkg6fkx-=ULJdFFgYzaIMr2Pxi8E87ze5m?u2Ytsb2$R@&$46I6 zKa6%QZAK%A4E4?4>vj5ja*(H9JiG%r2S zTaPpZWBq)8Q4$uUPCn?pYbQlvBhePP2?+>>?Y=V_v^8l9vgk|uq!m2uRg%y`)p79@ zKC2TnJSSI>Bti46COtk2IZXInui$zlXV3z)3$0FShD?VY)4&@>%dqM}?bzoZ?Q<46 zL_5C6nh#a1LmB@b@rqc<@OOUAbKPLAulTF?Ex@KEg_}3Hv kh4~2~t#seni|?!P)m)s%3F8B5{Qqk%YrEg01A~A43lCgtD*ylh literal 0 HcmV?d00001 diff --git a/mmpretrain/datasets/__pycache__/base_dataset.cpython-310.pyc b/mmpretrain/datasets/__pycache__/base_dataset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c346ab5b5d2b1888bd955ca5258b186c0e12860 GIT binary patch literal 8339 zcmbVR&2t;cb)OjwfWZKSD2k#e?b;qYyOyA(K-o^>ge5DX^eWPXgl~zT*S3R?3%6ZjR4gYm-$*;FA$iAYt>|bnM^jBId zcrLN>snWW{FP^QQE8kO?!78T;tMCgag%h<^XX>8PFn<9ql--7!7*Cw|@R9qBH}ph* ze|zu2{$674zqh@+ z_wez1J9{0Kh-W{#*Kys$6}^cjR>n%K##&c9(L^mSoatlb96XH`@THFSAH3{40ki(Z zlI)U6pKNJ*ovB_Y2o*7*OGmqu-^Lq9afAFV zOT`dv9zYs)_SEbT#S*5{6_wD`i>9h;@?8gyb3c*?{UQTm;z~riEpm#!+z#C?^Qxz0;n4bQdg{Sy9{xxp*kxT;VO`s&k$=9WS-6iE;TXwf=jd-**J;+d2YmD?a4bmK4fXdqTFE^VIeRhCncK2xQpz zI2MmNldMMk1s9G-{?r|Kme1qEkWKt-TlAvYjvyghitq+psIg^jFG!YrCv`d6KPX{W z-O#$h+)lhi<98}fBAJ`gwC?k+Gw|ZbibHER z4EStiqGXfnS{bAJkdS<4uHYSrb|_bDx8+E9ch`N{ZWHFrE!S>OcHC~yuXp|W!fGiE zxt=w)Xv5g%6G>PuCE}Ad}08U~FS46SZvUYtg_q%pfD%MD zV;;ps4C-~P$ag%Ci&y9&kjzViHn3qG9lEdqpF06?U9yDfR;Dkqf(RO!5(9L@SKLtw z!`6+1F!Uh)A05J$5kSD-VdycC^GWk5z3Cigg$GYz6w$_%9+}$FArCAbI0qhQ2r0C6 zFLW5l9fjf<7cfsJAg8gSLq~9Cy|=w%bw%h~z7s)*bHw^0gc>3U4_yvc>|)e25{`rL z)*sG`U|$S!#@u)N+yhWpHzwMXmL&-ig-QQB4e-YOemu0|0o;8iDpHE=hYaLMN^>^Q zZclr^Lp;DEQVl@o)MNDz0m$=WqIHCTX&t#x&;i`caU)1Wim%6GX#p7I#@vUMxL&jY zHO|&~2#mu*bJChEnwe6zFbof29U73trVH|TAOb8Y;J%^n4QHDrJ?2qDE;j4-oYByR ztpX-qwHQ)%-qP7X0&bc`#IB4dGg7qDyMYyjKEe}(gk4Th?1dW*$T;N;$UE?$6yOoT zV;<086mwFbF=Te|5Xx=>&KX%^>KXI`@D^|~D+fjm8(-jfju`oA6N0CxkP(G;$%3P2 zZhvvAgw)7+0cUoxp@X3XCg$k1+f(6vCsX0P{Vj;vcV5~6LmtDc35;OENa5!AX#zqK z)lwe@b1)uw2f+bCR+h<;nCZ7q@^fCg{=m2ROCXLF4EzIF1{|w1?0A4Y2qdNO0>Wo$ z<}k0Xyk&gj_U+rVUKuC}HWB84<`cjBF&-CCv(GzjcW4EUkKwd9r6zn5JyT{a>lSSm zD#Ltu>;~)?pb-uLNpKh^1yPEf8!$K~je}zH(2{PyU>26_m$(ax*f^G^l>sEQ_G}Zh z_+(+=ZR^Ci(r)iO*x!D4_u*s0`#BTLt8W(db`xb^Chre_@vjA>_{JoH zC&q-Uq(UAb^Mj-+Z`0tNnBXLfzDYR?ok@KfC}fVG7!$lrDpM3oDpPMrsyS0n%9%ZM zs2-U7sI;JGu%|?Sjs`W69;>n*(&EBcKT(x{r+$_kn0^8}Q} zQv4sO;_G-yxg|T2ETg(5rO9Md;AKQhn1#_=o}bx}S2&s$=yl8^j?Q3!Ogq=eC`NC- zCj+cS30ThQCFC`wfOl~eP_d`(RRHdacZfO3;f&4ly+_-7dk^-GCOp;)qf2=nv;J2Y zBu|kdBV#1iPUShmQ+UUH6sVHTKi+Zrhzb2Z`XX~LZUTqC#sti zAm7>hOt*K4Cvu&;r$mu-yQYjb@^X_b+Hq*R>}B(g7j}?T-EGb$l72WMB9u&2xk-3KuJTtk*7b!33Hv{Gf;S9I(mP)lHk2Yz4UKhQ8J#oR}XlsWH9IR0WMul>E9@cJ4 zJm4s`r)iwbtMdt7b9Z#{2SIk!20_NdKDS0Sl5RFTjS-DXE-Nr5r&LLu?Aj)rl?O{{ zrcjhMYob$MV*b$a2K)iln2Ao*8+xh=nV873P_N$B*OinRMrWM{air?g)Hr<_CgUim?rO3aTEC)-jN%y z0@HB{U;F?;AlB0}`mu~%CvqOs?%?WGaq+Ee^>dOTQC&nGm z@ei1D_wO)E+PS7FV=_#w@JrnhG=gR6}A}|J3+obosTc%n5~J6J7U+3~fUiobpt? z0`EpqAdOC}je!|tfM@!-@^j@wjS7IEDBl+79tK%tIuPR>`3`78ZwNKgv3Og?Y~vgs zWmHA>R-T$DNJ`Iiu`B14Gdg&WGQ>PC$hXG0Fro4M-WaIQD$G0umQjKR9`UW>tUA`| zQ%|Eda-X0OjwRk4IFZ|#DJkg4Yln6NKMO*&^rWWoPLBcIUvH}Xb<3WoQEZRb5~V!nxL zHfrJ~=1^TlzGBANZW`OJdwCax%~&smtO^IB^|K5rka|mt*Lduw5irmZmr_~ zVp)|=eXW?Kb5Pj^hX7ifrA{(4Zh9jfWn!vX26~eUuk;*=sDGX{j#d^vdXNoWdXgUW zPm4CZY!swC)54W_ngGQ6xL=TnXyM&I<4W@}Z3SD`)%p5uCi_NpS6n0_)a>qN#~lv^ zfy-`VUHlOL6Vs+s-hoHDuG#i;96@H9hl25;2`Z=(4GL$%qJ~m;u}ux(4CyfRWHF2* z4}lL?A|49bM`{?qL3eklp97{}OLMMfv@|T0#57)V2R96}A6fDQO>TW$nK-L;FvBFZ~Y~(7$U{ z?N`gS`Y*NXrmE@bWmMLxMy;bZS_)BW-!pJ`^GC0A(G?3{}xmu8=ln}YY?onj~`(#ivn?2IJs oz!#uYc{w|oMiT_wwDT^(P(%T;24`7EHe=xbvSt)Y)iw4103>y+V*mgE literal 0 HcmV?d00001 diff --git a/mmpretrain/datasets/__pycache__/builder.cpython-310.pyc b/mmpretrain/datasets/__pycache__/builder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53d8fe2d075e5f34f71c9e184c5d8e8ffa484949 GIT binary patch literal 1008 zcmcIjKT88K6i<4sO0km;j@jA+xwDId*kb($f)=5jx|FlYm4i#~NK$CQ(GTIMqm$pl z4{@uLUqKLjFCOi6&_U>fS(ymknA$3-zpusDIC_h90L)0jv) zpp@_ucOJ-~#LGS6RzBdAxHQ{3@ANypi+*oJaVHP_2Xb--L!+J{PxPKVQ-IRo6*Nw# zQOp?+OrVAFs#OMYF$-iG3w>l)xYKU8S*Q}pq)e4CDu^atj?pZVX`&3fL#AQAyMmOH zNSmQWvDRu{W0B~29x5>&jZDMLQ_-s5bgz2-`o48H1l=&2wdyF~TSgy_i~wetaM8L9 zVl7;E!H1Sp6-`W|Aiq35|G6=<8Ve8_=uYLGP%H_V&db2q;Q_=iS);&+aiSuq7s4T$ ztEsovITE7xpQOSq7PRO%_X@XKU#0qL`e&g){J(W-uDecdN#VoDDz@98@10RNej%y? zo1ea~lEnDS9xeW2#i7NuZ;?WI={&2g^el2{WUub3P55+OC)-()c}k3-kJa2duo+gi YFl_)T+d77RoW`P!k589j+NRsiH@^A@NdN!< literal 0 HcmV?d00001 diff --git a/mmpretrain/datasets/__pycache__/caltech101.cpython-310.pyc b/mmpretrain/datasets/__pycache__/caltech101.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3820d20b18ef49c97044e2bd51efe20d185f103e GIT binary patch literal 3901 zcmb_fUvJ~a5$CQbijrm5$mP zHMH6mJe#~4)Iz&$>t`#dhmCfF7{a<-cy7FH@EW(D8r&B4MdgCDo4kH#v>IQ+3Py`$ z=6yd=Et)O&g>rj-Al#1EJrxnpmI9b@dom97caXLBc8+!q_m2+0B)Rg!ue?O;c?w1a z^EY>Q?jP;%zH?{ej=Q^awEy;lhwttmc5$WI3%$fEIeX{!JJ;E(sd%D;LKY0DA(YAR*)P;51=uy7Fn@`Pl<3qzV+6&;+2kxbGo@jNATgg6x~?z zWIY_qlgF>$ebU|7+GR9`~a@^CHeTXodqRlEjarHN7NShtM(Z zgIN{J5lfUDcGXY{<|oWcST7EO_(@X0ok2LKy2ZA(a;FeN0r_=^f_4kEJh?8LFLI1x>ZT<(TX9YaRapvrl z&~r4Sx%sPTMx*})&1m#((Oi$T#JCehvGNpb^LG)|&{N;w?e!?~NL)WxA`FG{=8Tq; zN_u|esI&QHOQT98>IH#v3ihr@*53vKlAA#&1SGqsSmH-rd=z^dW5r$ra%gR_BT&-sQ`RVL z&GW;wkaUQ(jWrezFgd-TuvT)37FlCSPKpUz(XOoM(5~PftW3=zv^~-Dh5@i=T#6Q3 zzklY10|@BUi|y@ghDQmThR7+FO_(2Q5}u-;IFD)?gj-^~H$&#HO=5Fz6(7r3BkQe# zADC44R<-_W>zi#(X7&m?z6J=odoT<;!0U03C53y8{e0u@#zga zvJY`r!Cc{+7JJc;iGmIV?7{yN2~dAA5Hl7hOMQzt+R%)Ek%L+Ck&*^zJs!iS-_|6y3Es&(A^t`4_ zRZWcxdSda#w3?FCN^89NiJ4aZLjFjwg~N8`Kq9!z#OfN~#?sE~G#mosbp(qexO;58 zu*Nz=#h^)Jt6!1TBj~o4MdWp$Ujth02SUYBW+KOo>pC<@&ttnJ>g?s~j{KIH*+4(K zy^~GEv7qA#B-ZoKvN~$JVa&y6Myn zLgrW&ymSU5`6?*B1$XjO7;M@g%f$TJqD$1K%Vdd~)Fw?zN&aL0lU5u57~hyyy;I`v zVxL=Hm`&Ka?!uisgh3gw`{aV0lfH3IQ?h6L?uX}ztEmb3 zVJR!MR4s)pAdls3Fv?n>Ahc@z4v3nVReJxa+zS6D_lUvwyVn?!j9R zvI-pgI-}(pOh^ocTt|_`qEfmd2taMR9X~=5h69$PW;}*4Vy}G1l4qA(u zrA7lF5N$TI!%#$hKN7mmbv9);rsd{H(TAOtBZ+m8H9OGXmB*1pqRkppvd`W4Ualf! k;k90O2C4?lP}P1jkIOdJTmt_c8;~-z35nArR^=7)ZyCya>;M1& literal 0 HcmV?d00001 diff --git a/mmpretrain/datasets/__pycache__/categories.cpython-310.pyc b/mmpretrain/datasets/__pycache__/categories.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3937fae49bc0815c581ef481fefa01c02d51f99 GIT binary patch literal 56316 zcmagHdypL0eeWlmz`mc@#hZ^2Nr?i9C7vWm%h&eq;zbfm!n-R%>fD=2?@aH^U}vXC zJu_I$Npf>#Tb5;k!-957am5_^`^X~Kbo!|SMjwdI_-p7Am`oI6D&3|9`rT6`dB7^_^5|Q8FU+XH5 z^814Km47MtrSdNazg&JOc&NND*jL^k>@N=mL*?OMxI7Y!lt+Wn@>nod9uLOL6Tw7z zGMFq+1ykkeV7fdL%#>%>-gn}_iTU1W7$tGD9yX%h@J6lFE!CsmzKvQ^<{5|W-q1$e zZj<)VMyJ(#s1zo8w0rwY)v%^;ywe*m*BZ4(r5$dlTsf}v_E+Pi74=59<93{E)V8AD zP%}zeaicfhq`9c97t%$_V%}vu-52}wr_36jT-e%{LjImrFvM}DsILx zb&8Aijj)}>tyVB|E$p_!#z4yS8fCfz3A0y{IKCAXH){2|>NaY@)N&HFtf9_UJ!%IN z*P^i1E-w1m^z|?ax1uOdm|d;b8gYr~X=P<*mXc^EN{ZGkO;RwwO5;0>o}Yf|^lGO? zZ~6&`KNi*N@lLTCh2=dZN0#D^xL#{)1ye6I%C$;dTZW3tgvP%j5K~)==-W z0wc>|!pw*DV0z8ZYH>MiTdyXrBw;s?&8IKlNHktrSgwZcVCs@0Nu#(P@30KP%yPBE zT<9;C#!L*TfAIrl&ZnR^-j{PM`>*4)i`NaH4&HVwbB;dIj~p?%Qbp) zx!wiVOJoK|t}uB_MG8R!IWr!L22u{aRBodtQ1pTR4EHwztCg<)l^#+mX%gbbkn)`gWKE!&k!cPFSxqlx$|N#P#x~4gbKU z4m&<#eEqb!CH8vOFXpYp1_{P5g$ZwE2T0|gGTKdN>iOstn^|l$!s6A=_BK^*368FIHoC=jHDMm9GnjQ?wX>;qNl~NtdMySK zMXcC?YcZWj0psN`ssMA-*JBn_vy>)`*qpu`c7yTD^*E`OLvp&osBcWc>5E~qRgAiH z;pI-F6bG~GtTkJ!UVJ6%>ZI+9JUY7+)zxJsY_joq@r-Y9AZ6Otdmv#}leV|k(HqrT zdteC%N*k7}M-^YA-LCC`X@l_;q^{9tHf30?^@O4w-8;X}epIqdY)$5-OgRyOX@S8J7OyBhA$!m=7FzS?P*1nabL_`c4Q z1(F0=%q)fVb`AU&6<4pY{uY{8*B@8$u{Tp&L(?2m{^Xt zA)>|Ake4%swl0mxpxLL*<_$7)t#+8Sfublb#idgBb})M_W~mYptKwQUfqn;&Q{g|J z-7INx2@Ze)%`g$xPHA<%1+w(1lxAje=uEBIWKr^(^Q=aSv+z=#4YMwHE4Fnn28+pR< zSED*y3nLDbX0|)?`fK4W&`Vg&i%+jXV8Qx%!o-?(byi_wolT#|re2R)gZ(||mnoxH zS*1fh`GIMjUk;m~&pPZ7E5ZuGL{x%_rLeJ8TodkRNr-`uv)JS^Xmc}}*HcYW!%8vjy;t|%XtgVpJ zi*`#CJ5M_F8Z0C$m;rcJ6~tfP&Pz=gSCHjRUq>cLYW+s$uP1T48aHA9exQedc&(w0 znpK=#j~b1r)ndP8NmF3T!6I`v;9!d&wLRlV7dopnpL13>pzH97NvBl}4ql5pwH83H zDd{KAEXCDmTYKR~cw5sPOfE;|5FDLGAI-5YP)*m`F@Pw9-xO|6g1PJPg~mS&BzoGG za)1NrhXutT>@AZh1~LvRBi#f8$uk$(PRv>Vb>mCX4jgasMgo->Uk*!cxD<;_B3ei) zVSLIP^QhQ|SHcAD+`^-)ygsddL&6C|Rr$FT3;4-XC-M* zv^g%vjRN>h;ab>okG0_`ldRB{uny&ml5$)qb#B#=MBKyGP{>b{c(TwB`x@@)s2-Kt z2rLDptdj*;i0!60r;~+7XM4kkkYB+|ac`t0_@Z2IpVq4PP&I0}qj5{ssj${gU^HsH zIoyuwjQ5ERS~FyHvH*+UsIZ5^0>r#s+svZwQ9olglMJ!kfv0F`fIj3-R7E-q#_pdi zu#SzWZIj!K%VCMx-HvXH3E_RyH^nBNEVLS4G;<%WnHXBw@M7r6LK;sS%W|RoCkxkE zET*dw7D`@e-?NsZUi9o}C7{?MXCn{D@0%ggL}MYXrMz0qy8;rfB) zQU@WVv+bS=Z^Ie2>F0K*TxfQhO<-=IV7ips?il^8Zzgf&WMLK51U$4OgfF2fH{S-| z6nv(F3ktel_Y^lx-vGxH+Oa@BscmdT+zoa(;m?R+# zr8}|_-_Gk<7|A}awW8gL4J}x?Pyx@z?#Xn0YLyOi1B9o)FWGMzb(MIAKZn%38}E1xGbCSB9W0#MX0c%HIC40?4>0*}+mqrO8ZB6;SGy0Wt#+ zNh-D78F)8|_HmMybMLh7Zt5mYH-=l{?~BBrOEqYqin_4r-mV+6r44cNF9xjQ3!T z-Eof#EoXP^77&*<#e^Z!W>v0so2?p1k-ZP}e7qB~<35BEmwf~1FhQgObp6O+hWh}^ zuvKVw#a@8{6iK^s-O>uJXd{f=VW}wvNimd)#SCwTg$9q^aX;W~$XB>K+KK>AO}|@2 z7mVV_PYcwNRRWG%keqz7ps~ITq68gGOK7{p#s;_J+>zq;WZ|0h`9xZ^u< zeKUU>n?*cI8t?7njayJ+!~g)fEaYipLE{Qj(HjM`p{hi#gW3c7!m5^YvVbChsVi65 za!YX|Y=s5bA&83hSkp7|;2P;znawOAubdXxge3QdQBNAXv>zz0)p=>wv^$ky6u?b{ zFhy))u~?E;4KgDR2et$S4x!$DCLr8#zuM{`19p;yl2_pZU2q!h7~bcVP=Sa=s;e4M z4KEDs4fB-Vh}3N|4)lglG@?%19o>MS8spg;5nM@s*c-3*Rf4_Yo3w%guQ$|Il}2wr z=#vfc6k3jjOlpJ`OO;qB3({NBbyzCnl6w0qF?a^LZu8&T5fCnSE5fm?uh?Un?zE5` z-1{RYwk%=>h&8$Z9$H1C7Fo#Mz~<8KHakFW8Tn5A?T1*!z1a(1A3Biw8WJD4A*puZ zJJp4TL?_nvuv7@)2GhSG>8P;9rhw;;-KDLV9dZ(az)t}+FxV%{OrvV!R+L8AP;4MH zb08{!A2gS+3wlF4F({?o8@&lEb;Vx0d7&3}07wB~O9Q*(V5FKbQxewQ;Z24s%nzt( z;BXKqYui=#IMrD0G8MCsQ3&BY$cCz5-HI6`_Nj)TZ(pn4srYv)BsRZX<6RVS&ol3B zAP9#>>y>9f!7hSA} ztu5;$ZTd)~@L>KximYUH0YW(vg4%Ak7zrpNbeD3H4fgb`N+vynHPe(8Rw(L6uS&x$ zU}77j63JB$!FdhgSTIkUIkYk68H`}Z3tNN?F7g?-boar2`kt~oU~&`N6exFh$Kgn~ zYi*mI-cYR-BPO|rQ;`#z3C&rq0TDGO%Kzqja&IowI0<{BaQ3ms2O|KXOa!ref;ody zfu=iAf%geyE`h>4ckFdI?sPtbk+L$$#P{UBN+Hq{LJee!bRke#!8{kYs?$`1zlrX4DrbgPQ!?Y-#+X9{&vEXGa zlnC(n25_=7!$}?Tg+(gb((dc=HD4xmw}k_96-f@Qb5u*Q;2`QQ@DkG5beH)gX*o@ll;g>cL z5AzYHvFjjKG&1gCaOX}W39As*x2)>PJme0`iy}!=E$j#44x*CiY|*(&h~fl<<2&|< z8Ku-WbSqiCJlGq%!Lo@xgpxdmF<}$)Dtk@p4k;CKcz7@?U(}5paqKPh3+m>{c{S`> z_Oh%+$l-k{?!Z!11~F8?Cs`>swM180$FxCr;1!^(>+x)sHtCloi^^QS8Q0N3qyVV| zG@;czVfJHSeLw!J%;6no19^aFSwNAO#TR7@2M9R*tmv_|D6Ds(?^z0pqBO-FTT}yv zIkHez%FUN(9kgdZdFp{hkc4P1kdVFMgBN*UEh=2BHKZD3@9&A{<&H0*X=Jjrh+@hu zk~K01WvanNN~cw-!fH5Hf7;9R{e%Oc^9D+65OiMXgrKQET?|hqPeE7Oc9s&k$Jc8c zk#=65174Kx4r-unBq#8WJKn0*w{rAA-EO%7g0)mKv+`~(#g`IQvc%rdR_7K<`ra4-XKG*E z$51Yytcd``sNNgdW;ZlY>_G^aHxz6`>_F*{N2GgdusrUO*O0pN@yN~0DBMQ8Ar+lF zwN{PV1~ahgY2Mx>PZ6^;iGA|Hag!;!Nu$p!#bz=9t+oo6)gWw*cABNO_9XYlR@o&x z5$dnRE#{zsej3}u$-;dJ{{3kp!UO6ppA^HV)>IQ&2$b1i)@c7ge{<() z$F9hh!c4F2pgSvI#A`4a4cmkBOK_251hxlKd*h2xDjd1Qo%F^pHQ->EI>t!tN59Hz zCWKjF60ve@Zo27gvu6oYm*M$96e`91?~YjygsmW>YrMN%(0f@zc6}apvzJ<1xLo-0 zB5Q7S-IJ_246UR9LZA+XGJz$_1HQmEwv~LX$HBA+d(4&zX!OLO0ChBlDmJ0+X{cS) z$R2Z(Ky;O3gir@)H+BQzN9IJ^5o|(vV#c72V7L{P>jVoDgJ1 zR=nu}16d8%fmX{si=zzFkaDSFdGer`WaC{CT+zMOJFv9wS&y)N#v)=rCbb7jAwsaL zkP^4uv%-A70AQboeUm7o#9{jgk{Hl=suXwe9A%+W<&v-K)f9VPBz;PwpP0XGNJDdvB0w;6n3>n*Rgo99pKITMI7MQ8wN# z;H4RB+E3cbSdK|A7WxRnb(g+)wM!f(s2fa!I)h)Ylson#L27r@n@<%Szm7wPNEZ<| zzlLU3ID`lQ?IUjDQl;C}7PK-~QJ+EWw0E|leUt?5!9&`2DLRtWLcXwrh!V1dOh%YP zy_FNqj68+hbiLbP4)PY<5o{Ug3E4~6eU(reN$ig930XHWEv4DKsH8oTl_H7?_UW4; z_MgEQ<^^~qef95G*gfw1XfkBHDfLyW4eaF=c$GV3vow%M9O=NlAN&bNgz=Yv{Vp=+ zaie>?aI=H7Nq-+>7w^EenZ^ugm4y0{1Oc+(bz|;w&NX}8U|Q1jgg%@W&($iBW(H@9 zHP$H8wR~7q#aDpDgh?9ql~-U&*wETk>r!?5+K350)jfkZRU6!Hyr^$Xg=~wT?J%Wm_<7510zIzW%+&x`LhSwes#bN{ULF zvpbvQuHtDDTFXe12X8VOdn}9joA;{lopD9HzO7c?^Agd*pHh6M}ieZF!?RJDU?@OMu0nh>tSz9 zWAkrBZ-m8AE-RW`A1css*Yy@Zg0g>@&ktHtq%c;&v>i{uYE+5 zO4gElSZ?BYH-mYy&_J#PsKhIUq?1u>cTAxUoyBT>RN+#ml(Oj%7IsxfOpJSa2hxj; zYXdDG%Ley|um}4(7*4Z$`!H2S?%*Q3Om;c{n`I6ojGCHw4ozRF18>9nRU=F_7&F}& z2tYv#ktFi9utNe^qxPoayKY8mqpWh+z$~xRArU^NvUY-`7)P@P1TBZRq$-U!!fx0b z!r`4}d;494g@S$_l>~%uF!AvY;;}-A2Z)&o$3d`B)!^WP0@bLI@_$^4iLc#?t>-Va_1lvkrDV! zsa#J2CU)ik042mw39Lw6kIARmK|fAQZozBe{5c+Q0T7|`>2FqC>T?vpyx25a?pSN9 zBW-W~M%xaIsD;PSMhNsK9A82L!mpqnyQgt9!?Q(Bbp<0$ScXu9!6%nx_QGQYhxt-l zjt1@|n*`M_qO#Pi1k)d-5WX&Ag)O{-hXkdMu%~zhvx~T`07hN=b&s=!nRrAbAkD%? z@Of8U!AnE>WOvZizr5&b7uy8_80>GDxEBC1Ff(uKr?QhAzO$n$k~pML&lI zJ1DDZ(Ep|qi~Eoi*V+V35TxA8G0^=4^F7QR?o3{{EwMS=KoQy-mT8pD*Bi38B<`?S zE6NGMSRiZr@g~aZxW)OEyxmt(uE+_7v0$YJToB8_31}kR6Zi-rCW>tlp7tnf**ai3 zW;Q&CP$Q*x)=wXGvu)T5wiM6o<&Sx@d0ui6;9?Rv7P5&OQ@7^cpCPZj2(l#|S1qV$(6Bm?|yaZ&EcOg`urxfb7+Zz7&Lp|@j)ZdsBP(*jV5R!as* zgXxd>Ek6W$3b!EyxrqHxX0%OQ%)l0I(QEPcR3wE5c8sj_(Ine*F4S9RvjxXko2mnQ;e3N9jW3b90 zUYuZ+O>v-Ew9;v~UzHW9X~3rtn?`WB)j;oSA;oo%!p3($Ca~Szk3Yg+nRnj1qe1>6B1(s?_>_#}6QD&#&4v607W-K?> z2D%Vqfe7Xy=o3She*z7Nk|sjxQZ0h5^x{NLHS%dB04lJ9xMM!1u5)k~j=VR@6Y@@V zv~=lImaz7_XB3lKi3O5&8{h(4)k7psAJlPB?4BxwB@NoIf%JF-{CV5LN=3}W9Yz|l zmgeColnsmzsDw;jYo}&s;}qMaCURms-T{3)rf|E1w?S4ukBE=jaBEC2db%C&e+akS zW(#G}G!=#iGOFIvOjg|d6$i>&oC^Hn1t)(ZRszpTj(`breNCFSBv1j&R4mdcJw7G@ zeiO8iTE2xyNSm3d# z!%@B`(m$i_fl0KE!A_0p{F&dbqp;F`{8&cu$$jls8c|QbZBpB#yTLuSnHnY-hHG6oLh`nhFYz=z=B>z+igf9eM5UWUcQkq_5fG zmzE?OBfLQakcaA0L7DJ}9}NhU5hx!7srDuGu#3u&6Ze9=tG0}}mzaT7#eTYydNAXw zc&CmCAwm^}2-RSjn;2BObcUL(wmgO?#W76D&w8?HacSC?!dsF#@S@F#6T42z?#@WT z9koG)XoKW1h~e5&guFZqDW6Ru$|u5T^RtaQ9`+!SsDA|EQ`=lvs}0BCpCn2wt;3cx z2AB;CsW2*Lde}y#$R^kc(e4Sfu~UFlVD0wv8{-eckDvj9Kw)(KKHM8&@0cohcZ?9? zv3!NxuMpPWU^)n8LDWS_ccBt5m}&)d+t$PTlBk`Z!Ksa4!jYVaP=4HOU!C9jL) zxf+fX-*#QByP?ZYlUV}plw2WyTFw^vmtnHL&q1&5rBtqqs_t?*+9rd z?zk2V*&Z>{&XlB4&Phb=-2=4_trCWA9K z1ysDOH@zQ-YG3kj=5TrE;IBf9EvD7rL>Xq80HeG(RQawI`CL1LwDpgz*y^X zxCNxb(J&sAuiC~y0$ZyEK(~Pix-Fr=usuzTm5?!DcD6eepu)l^8xZ?qtqO7pS}7nR zuA4n-2L%98od0UKcTvb~GfSR+8T)jU6hvaSKL#f0G{C)tnqk?UfI4lW#)H-w8`6&& zzoqStO;@N?Mh!FD85R+Hm&ZM~mq&$&B)5-~!YsAL0ZPKdvjHDifWRYSDO{o*_b?f@ zq+W@^@s4erRM^D|?9#^02G${PJGzE6g5MwcQDZ?qWl&P$Y$5Y^OkU=mlHCa+Y;(C@ zLL%g%hEMe<%1tmGiFG&Cjemh7eXC+>92to#qY_HgpM7s6#aT0HeE;AVL`Lx@U6xQRoL!${f(D z&1bYnbH=i{1K=vor2<6?08jXpo>UMv*K8FEgO&Rch1E=o^4uXxApFQ7*cw6AWX{2l zp&Eq(=!quM2UL$sH*hhBLOp^h$&+mY=s85d(+~uJd4cD{Cyb-vAW#r=RA+7gVC*xG zA;Q{DFppUH5T+E6h8D4>5E0z*GK;Oad2dI-FiN0n(R=iW#eWu-BpV;8kf z1@7X&R|^d2kHu|<>?p}o?4ap9)r&W~b9n-r0&~3_fOqGOa?xwmYs%NKg#zz!tfgO}4+NDvM()qdJ&8o> z8A#%0Qvh)z3>8ni&+ z03j1D1P9guTY}X6K6cM5jQI}KL&y$5rBM(D9-C>ThI0!74{+|OdT*!_i+$T2hLzaj zO}nGG*I-;>Dlmf()aylONK_ELrF=O!uP`av1F8wP(b#PF_DTA72e6#bzRsRQlr2Kv zwNxp!XkdFXWNLh{B{FJl0oDyHg@zYD9-rWzflWkhnj%@XXHjNQDtOPMu``TU>x}`* zU_AJ#$f1O-!Ovk5LLTs86trEXGY)`(!xTfP-{GR{7X;)!ENL#?Abz8ucZC7T!$yX! zu{1pmlkT2T25pxpGH#YAP0f#hbilwTDQsqlrkf=!HdsS~p=QiIOb}vy zSye0?%5lf4LQFZ~@5d52FtJ8mhOxm!B1J$i5K!Gb;U=&wIDw4l6dp@(1h}s`QN=pY z#4D@p4~5(za?BpB3Km{4$f9{PNs`-o`)YVxx_KmYZIf;Grr~MDIULLCEyPbP0Lk>Y z;SSy8q`g*2iY^NxK-ab@XsUhzyzV7pY&V4fnBw&Xxe@aAHe7@ScPDSwDzIK`6X&4F zkKC*^I9me2U~*;m8w?F}Z|+zpuEPMs1TfS;WG<;kwRCBwgv?Ha*hy@`_%N3gER0=Svi~fEoB1Z%1kd zmD!Hn0j)L#*qKN*7IAa+TGOUSWtx&rDyXxs#Uc7V)Y|M(){)C{I%Uw1NQbz#y@5i9 z7gN1o25mbK3MEX7rg5yKai>XdZgM>fQVD{NCC~{o-eF`EQyA_jusg%q3>Ye;5A8-V zF{ISctANRb$_cnROOe4Tok$dd(#jil!H3HSg!vE)TBrwiCovhw_k>1)Gqb~?g;dc5 z-y0z4Hbw?7SnLxP?~OtEOH~g+wA>Zha?_kfHCWid{U4nx%_8<8r;0KaG|qvk z5^n>1Xeq>(?2gD_3%JQf0sn;xhCUNW_r8KYhZ|TFKn;N0HpD3o_|e_*?b>Y(1swm7#6JFTH&_!?ZhQ2VB{NCA=gRmIT!s zYG51%9*%=Y*p|YxStx%?Zy0qph8}$BF#+-VDl#~-Ko-EIysSTL^Ag1di!Gexp z>@Bdo`dV*PJH!B{Hx%C%-rpUMZ+9_}00r!fA*qB}syR@pDO1D@3a<)f9`(Pbtq>|x zqSsw;h6JPvf8HB2A0_i9bB{DR11oX1*-d|MK85OtFkUXG6;qLwVz?S=2n5ho$@j|! zqiY(KeK1ijre@?7Ez=O^V`b$ST)GvcTeUjv(s; zjQ}Sig@h6xvKu7HiRwThR+f1c1L90&LLoT@b5u{M9Y~e`l?=;r?X!jt;d@X)kOiZX z&n+=N%+v#43_`j|mP^34vpa>SLm>^ea%>!Buh3Y4Q;I~iP5Vt*oE`NbOzqU{0~B$W{L0yagz9*(k)&J-KkUo z(z=cX@;(Ccyo4gb7>dRGz;*d^#kf9kMh|5$X6Y>98jzQ!GAAMjS9r=3475R0KAfuB z1^KvcdTl)5BK|&&r9a*SZH12GD!^2seQ_sn5v}JUGjk~~?JSNl5ydeYGih@HAH;V* z?d)q=A$Op$Y`1f3o?rt8!)M|+vO50nea>#cR^zjJtbER;1E&oXNV85 zs6oyCrX&$JgNKbW?8oG~#}s6cAatl6-OPikj!b!6+mclM_$^FG`MQ0^sFi>@$B-sptle=>fgkDo!!BeNk zKn^^;&cA10f3~*|F{{2iX|->su;k{6qD27MXb(0Hj{|&c@M47&ieG66cq9)fE?-{o z2eF z5#r70p;~a7_;c}Wo71>EjM2Ld&hxw`;Z&E1ClnhmM7f1`hIFY@tq_dKBjwk)JwX0v z=obzX(hE5&P#M%3@g8;;I}3hDmkkgGt?Hlz8XV(B+yQCxq^($9T{q2Mge|}{e7$k| zVZTT~nZxiQ?*W0If(q?UrLKFKl4=@(1vwK!1LSM$425&@Jix6ABHUy_ubtGDzVXf4 zE;Z4?Y8|}QaCb6K5Fus`s!(Ux5`MLdWhR`;!dxB$JJfp!+U-Vo7`fh^K-Yy)-Eor5d#u&jkd!2f3=g2$ zqd&4le`LX?@}LAPCE;F=8PBx3ux`UG%v~sz=_$n4Y`v86A=z5-pWDhzt<;ue-D0D3 zIEdqSB0+Saq9cT)^$|weoyDCSjMI{J+fS;S=86Zc;!eyHi39M&^$uKu0)g&lCUv-( z#+Gq^URB}a%%}W!fkrzD4c^SOvG*r{(R~s_6WgifG+J!B0rwjcHW5scZI0w+LGX*I zn=sn<$$@l2CPzt6)6d|rFKPgxZ?mZZO+XiZT@joFYoQ-j2Lkqp$Pr$SOo7s~a4K!n z4BQzGW`=?f`s-LJ=k-Xq#bRr9nfeH+)sTkrnKIP5X+m$9N#a7a$bCTWfnFg`C{@k- zG$D)>=W(M3&T@@NhaBMe^%9zhO(1m7wDI4=izmf^X-u9Q52onIGcAxqf4JNnzuPMF z46WK0i-pVum+v`KI<50P+MYWQUePOF*%ihl0F|e*ds*8=rch&Msp8Z1t7IjBq%{&~ zFtkb!FfErUiF?Gy4L4k-zlHk}SG>R>(LsMW%hbWg<#VOW-_?-Gq0+pU5KhYMQIgm? z5-JJdDz-btFB)KUoQ4+M$#i3y?_j=3y||&CLOnQfmS(6uAck3XBj){q{~*`X*i8Y9 zWp|EiN}?T(88F(+Qq~)R!G$$J8$5@SAnmI3VHR|UIxu!<#ZMN}0DBz8usd`M2QJAE z{@09zqM7?RVuLE>e~`VoJ&vn(&~3OWnSW~ltiMyE zHwIS%MiP+fo~BLsse+V;yJJFr-lpU}sIXWyb5O$^;N@(INgyL31SIMh2^*&bG>CF0 z1N|V>lPj}_J3(W`q`fpMWVt(jgBV`oasr_dX`NCrY#)&dEyP01>vOsF8Hj7WvoK26)|^BtdciY#M?Va!)F;&MbpM27bT=lCsgs%UjTKDLs>0 zr9`;tCJ~C{Nx}_;{7Mo-*Fs+c4sl1 zccjGTsQ&DnSlmZ1ZMh(>@Fmhx2UU3(ZYP-eKqp zcoQYV8oJ3U6rFY6LHaP1MJ=4e{U?%y~3RNQ)m@IH;^w|&zWY;IVrgG!&6!kActS>d_keq#xkL`(+0+Jo7!vnLI~{+ zarJwHgR&2CR#Hw=8zRUD%{>E%Mo;XY+@G|m?SzsGF)`Z2WEgdk(NM6neRY$Y(3v@C zqGEm$Ih~1vI0BS{4)7z0RUzr8AQr@7UJWUV_4T+faZf^$A+B;Sf%ZwgO+r7UdjN2W znYbmKP1F1`M$o)ms^lq(lc(`Ez%~e6@s!zdvzQHI_Kg@imVqz=Ly03JNC)g#N$8w! zfOlu6!&)>kRu=;qtYZn2s|ctF$K5PM0Dnxl25!0*8Zc9t^!;oFO%29DBoe1V_ngi`~#mG;uAvHS9LRKZx|yy$ELt)@aT59A<)HEZH+M) z(oSo&gNHaShGiaiz~r_%z$buYoEXV2eACf&@ATB$CmUoiCL?3ftc%zO{n=cy03Ymj z{@&DK9Ptq@`OFYzn*d7|D6S|Pid9Qr7BbdPM@zlegv>O&Gx?5nA6G9cn|zn3kS#M( zPCjW|L;i>! zR|H8q5H5$mMoEShhdg8N6l+DnLtq+oMjUrTUPCXw74U=m6)~N4f-WUMWSoleYtb(jz%f-fxWSl9S5?ntY@8EnMm@Az{9i}G-r_DyNPh8lEF*kC$ zuu$Mmnb-}VAtZKx_$&eZn%(F*1$17^jhrVa2NyT;oI)TFAf&yI9(r}|M z5H!yhH}WDubZ`-G8BZd$p(Tv5V~Y*M6!CHG6aNc48YN-3H-i>YDw85$h}BVbjV$yg?6gGcz0jM| z`NK%QOGTMrKq@IBuuvq)p|?*8l-@Wfu_%W*85qYD@w$uNq--@vs@*VDEd^_kHSJ2WlMaQi=!`bhuU^fr-*hF03Q}2(>vr)p6FXc0GPBWMQ$Cn zVCwnZn}L_-a0B#;QR}<`-2&0U*cbgoj-w@3Oqz(~f7M={&PJzCO(7xhZZ4)b*1N_~ zCsQZtR?#Hn{e~aixIe*h%FKVkqw+(uHlGFk;H%9lV>yQF7r@ zdP+Q%n@uCP_-c_qui+*j9%8;vnTdT`AWF6TUE|)_&bK-iVVu=Pw}Pup_8TR822z-Q<8IwyEUp`>m6f@Xn)4^P45yQK9-QfY50bTOHf!c5s6qKJrC4$qT zv?6ZIr|7IP%%%s(Q7-F_eAgyYC%GaHk4-Ki3W2$_7o!zC_WWXx%Vo_v#H8cv1;34t z0CvEQY{iD5L!@75!yPJ=~wb)|T!lMmV3v85-ep z8c1qFIxu~`VNwt-aCPP$=s4yFr8#E{RFHcJSg=4&+`wdy7Wow3LU>ShreOrw`WB+@4MyR)rhpF*d@sq-L5k>zXz`{0*s z+W!_a425$9NL{-R3x5UcWy04R;Wn`Dw?QY^^UcOv1a&CUFA`RH`JW)33!rsNA{Rvn z`egx!^AH6Bn&C;nn@j;LgKWTE1PZ3tq8o>mE=L9g`pAU=Fh`w+w#9HNr1g-NWZ+718+;Nl zhYvq26W9wz2b+`Ma|$*kB`~i9(LafIsuk%I)S8lnvHpNOFe7!7|3|B$dwhW;L5L@J zSP(!r(!%j#Ji9;`f)lCIAsP-#2xe4(BBPT1l+1_MO;Dy1rC@{MuCu{-G9+<|Rci|m zVW3F0_xfm8e+Q!!&$QkttoEkhV|2(eLmg<2HMmI@vDPSRO*LWMQO@^cHqpRKZzH9P zn?bV-e|$>kmx^fkIKrEzU}gBkfXz*jWbjCQ+Na5PV0M_8)3Hl3gi;C-No zus+&B$*TC=p(ThAAI}`(bf(zWp(R-{x=Bfy+^$9E`OeK6f-NS8S`z5nB>o=NAivV6 zxDs}PL$sr-LhWNbd;$k`lkQdxrm}QiOh1AH@Y%db@avn5%E1&4%M87l-Uk+pa3mEi zFOtIKO}IUUic)ALRrQJ+Tl_s^v^KT^Jm>>vd@Zkk*;^6Og)O8J^YsxqksDVS3}43; zO0za51rbX{xc3nEmj%PCVFme)4$6|!8qM;tJAdhBmUQA2qA*~^ zL=9s%gW_}vum_al)kxg{{%^a%yo>}F~-71HOw4XZ%i%8 z6@GlwlRC2T_yAWp6Bq({aX)5&nZ)l#P4#RH*BiL#rI?=X6hslxKsC=UAhA*7H zuyF3|X#!_2ESx>9z`23Ixh&;;7I-cTJf8(FWPum5z>5|*e_`QF7Vs^T;u|F38+WLX5r3WgSWE}akdEuK4b+MX}i^F=++TFTioZe;P?xzjc@A5x=) z&gW@_`e_=T&t-gS=Y`X5XmNdY;rv1}ub5Xz<&C1wP%=%Oc7tUO8`^E{IPgVFbD8THynUiVGTEn-s-3#-=(Yg`(nc>++= z#PvOi^S&gBOm?)erur{%!?)F6U;F4_14n)8OL*yFx3CI@RaH3Sj`*zL;ztWtg}Cir z2Yc&>7Lno`g%wE&jl%0M6kdnlh7T(&_m4XI{yqMSOTzH^GEeR;1EPGdmL7fYQh6)K z7SYrcR@VyW@9PCa=MHD`7M~+0FF$8#>+5TSiN4_K3lF9-NI&goUVQO9)2;>q$oEdX zA%9J<0woWnuh~0=XVNaMJ>OpoO}JWJQ#bPyqkf8fzbq#oK$>>G-@@6f)qdLP3*1Sj zR@c`xHv{8;WNGC_VG-rWt>|i3QbFOl!Imuh3@Pw$tp_`FShWYT^IloTxvbs*r0&h| zGi1PIZ_+%x$D8Q>wBGbl`sVjRmOuqdU=Pfy#LBtB!TF?t0iAaC{*~wNAIjR1IDi*cbuguHMd*jgJCr8E@rbtM%X!$Rzb>vg&im4@aH)aX zgOe)x@*G`8D#YSaDlD(BExbNh)q3JSr(CJdD4zQ-p^U#;K4>G?_5S7PrFC@Wtg*CeF+C9L7&mY7u_@c z?Ea#kWznx*6!_ZB5*|Vr&e``WY!I?6Z8c-RbQ_;JedsWUs)6arrXU~u!1SzuP#Cxr zRQc}SyW;5x2bSjPU)Y`dxb4J>m0KVK%SZ!I#20p^ZT>W@PVO=r=of)uhn!qI<*_TEovkrzQjt{oV4S zJLI!pM$M=XH1zkNPrt|oNnB;O_q}Pyv%Gp|BYWKici7j{_oZ=Z$mW5$%SPK5HdrHD zqBB`1F6!t2oy$9sc5ZJvGndUNT3Sgh82}H=c9xTZ0R-gOC4F4Bur6EI0OQ1L!^U2I zX>q-v30}m6#!+aaE=Tj!y@0@L4qisVfmcA`@?}9|#&tfCB?}G^^yQ28K;691`R86J zyac&_uR@PxE$u0kzv%E~x#cB%<_`7K;P{`oEI+l&3+JC>4~UE46G9wf7+k-z?hz}r zA}^x{cAVhSw}E<)0!a>+o?ciUXa{}0QIbnY37(63(H-!a+6eg)9<{VpUZjOmk5b$R zyJ}ejyvOP1-Em7pjHCY#ppjpkNv{w3np~gN$|r>6hb{TiLdptyfb{5+bv)!N!Hxpt z17I|NWo5art~Ev$$;UFM)HQ9^FeNuMaa4&P>ud?CJW=0Uei3sgE1V}jtn$ldsk|Sz z)7r5se7w~J!##ill%8`(u6|@?<bX691}N@$mUEW(?8y}1U&W{p7G5i(W@U$G zXvRSN7v?c@hkd<8yW(&V%J*gr%-UmDyC^3^ae8>V3fbHz;~mWkV6$h!FAu=U;Vf&t zQ{qrpf4=fM;^w$R#`Uw9aQ{BI1a5cA=RYIWOlMZ&5_|xP@qCRLU*0eygze6=v_|X;*)G2?eDoFmB!1KW!IwF2!=3i11M$tw zVe*hA-w^f{k!n`XR%|DTp`;PqM7+DFgd-+_Z^EoBYVIIw1T^!JNbE`Yw!+ z>Bl?qAid2pmcwmChJm(@rZo_?nfN_uIIDAhm2;tjwQS!%`Z_imEXlfX;EFCax)0j^ zKk@;cGp;aDA6-9+pZi-lLWx^osdMNE_9{r{eGcH5j>7ivoKOAe@{1&UM(l!ngxj7t z61a&`a_2#z{X>5sgYIG6RD&tG!qu@!7E8JWBsoH2a#W9F%Elg>xxiuWN5_#I_sQHg zQH^nAz)wPF)PwhMV=Ja^Y{59!^+wnd<%E$uthOJ~<53=b1wt;Ix1wNP_ZJ|oV|;Go zOyfP^lE?Hm?@(tx1W7?P->=j|cn5(p>4Vw*vnsgjLfblc&vZ6$bxR&sg(p;DLY~0# zKEqe6n>=Z4;9$3nRD`QmNv=m}C*@>Usj$X!j;U=2sT-O(DFid93rc+1r5GWX1L{dX z#pElcn7bZk+jjB+z5auGJf+8n^mt2VUy3AtMouX0X+56d5gfs)t)oLwx3p8)JW|E` zIjcjchLRRuCAX84szr}o7$)fe0(<~1npVZ%4`=vlvY@v-tMViu<^%^ebnj&a_r&~? zA)4ZlPZDPfc1m?l>+#q0J2|7;6Xwm5l{l-USsVb1vhDNZmzu`=N!fy)Q$y!fVTzMq z_ziTkR^%m^Jg1xy+5W{gQAXd)U>-b_CC{t&1wCF+!AXPJY~_;|mGLXez!6X%Nl*d2 zJB#p!2$pp$#1`#vg>@tOuuA``N*^rQ#QD`M${>M{N;}}1Rn2FbY;Gx*jvr!rjjGzm zaUq|opW4FWl^5#pWH6Ka=Vdd}_t?uittQ|;8rdxT8};hf)GJ*1bfhK&SPJrSMXii2 zRxro5!JaL^jhk8f^Lr9AxEUw4UT&GshMLjcooNl+5PdXTpI_Ij$8_r^ektbJWxhJW zG>($I98ZXJAOD6rx~RvJ9?N=MhYS&ar}o%nQ?CM z&R)rNB~Cy^@HXM=4e$lQw0TVN(WkUbkUB)-9@{7fsY7J3?+{6K+3vE*=gLXNvRTi( zC2>#Lj%b@q_AJ%y682z>1V9|LnQZMjrYZi@MoLQl3%%#x z>hbUNkpF1%Un*4O5lrF{DWso{@V`<5quCqEY8)>kzL$mAnIT4i2WDxx?`h4NqR6 zu2x3mB*@}K!KnGA(uK|7@ck~T*#I8!UCkTY*4rA^FUF37%J4suP8i6oH7^&d$P}mL zxDTtAH9Ky;uIchMRbs+!bpvI}voQI%k|sf#4V-V&shvur*<9-IPSVn&thEp;M!9BY^s=jTr}SBz)bcyhJWAp$rW_-) z-lYDt*_)I9T6O;$J^oug{=Hh^gbt2v=IBR@nZIpTCE#i^-I~^K(MPkX{qNL{JbRP> zpvLCy8t7tr+9Ioz`s`-W@yzF!eU^Me)s8nhB2NiUm#_~-F|X~c2gz?)FXh88uRuKR z<~VG`#0hhC^HRU9j{J@upH%sI&X$3z(7T8o$rfXXR8F4oAfocx57-&28oauN;>>M4-egINNuI zvx*+T5f95yYk+G@KBo*$n(91*MxIUJoN@Q4afME@7k%D0iusp~@VRqpPam#IIvWpk`3`Vo z*CtCSG5LbhboeU5H#YfWV#%NA@kKqpq|E95g+`v&__DJ8RFALd@s=K6>Z{u!<<9nF=rEC}-qLmR<;p3T}z=GmP5K-D;b zhPckVlH_g0=l8ffXYjSx2|9x?I@ZN2A?9ONS;5(UN(NIuRI{mzb1`>u4pJBAV&BC% znDf_$?2*!f`8|ngqrn8JIYJ1I>_sm{fDkn)a87MGDCg{3QM`RC7(>DW<7zd7aY z8+R~v3rAbDMBvka`!CRYRH9%6UNVw9wt0C6hi@@DfBnVY89(wsMz)myQLmlhJRu(+ z>|>wgpOidp1cR^f)r)pfPB7U|;8t-}qqmly-hKBos`d8iF416kN0SZ={+zVrpY{0f z)#x!~P2_Xj8XyJAhooy%8qVH4+-YI^v%G`c3Ber}+Ns89rW@sdP@5B+iNmR0O0=kc zVzW29w7C4rmDSfi^6JInm6(sealGR)2&vV>laL?5Y&RIW=Fhp3x}pQDy780mv*^^Q zeJYAzs${zXkb#5dZ4(Ws;TX>0D2VxYI5vvw$Iu4+`198E@1Q;F@J z!3f>;yzVe;5w~J&1!FiGa=ntAwgWya&*0F%;QSMe>cgWg^@a97^ym{$6sOwM9S z28^44V6czRNVb9r`(%edTt#CyWA5q-?M!TPSKZwY9zdCLPh%VM*DNQ$B~jx4v|K1K zE5wKM(1kGnat80(2T!4Xz-KiWK6ywx^`vIygdV@8SMFz{m#UmHHoRMBNjNQQ7e4}xoW|g6NS#A~ z=w9q@89FCUw#JRfY+5!>Zv_9cVi#HH1agL*&S#M$X}dPMbQ8`OI|;MFu~FTSLk5Ub zA~sj9q3C84DNg_SivoS=2}u&U7MV#mnjoRGaD3iLw=OqPgduhSMr<`e4-FU5EeW@r zc|cj&j%k5=4baPQk`v8F7;y<;+8&pN@X4NfP5K?V4+Bm19#l{$kJ|K!V*|q_eQdBw zPrR2FN{>`lYQ0$PKVQ_ECiQQBGjS z;31GGA}p?BZr~YCwxd+*!nq{vKu1ME4h$SX;AjZAnYRnSa40uj(e&#B>X+&3#6H>t zatsIv=S{3UvQqMO!w!{h%+3WV@wGnrt^~7^zcntp3iQxl-KNi(>$nokR6A^EG6+pcb!-9iZT;`4b!g4=3bItUmv#DMSU0EU% zi;^++^ES+z_}C0@<(@;RVlV>EVolm$ML)eFp{%NIvqv3`oT8J7Y*{c>DAEjyqU*Zd zCE(a(qruTX$T6~fGcvh|GxS@*L!jj3KZ^kC8U{(PFkiz>XfY(7H0~*b39D@c^PG#+ zgj%U=o;j2VKs6*P*ezZX3(Bb3borOx0mHf-mM<)V*dP(DP%J*2?RJfu7k~l|6rw2x z{pnlM>7vd+?YL^gW{713i0ASPhoF<7?Fox9C?J?=se*1KWuVl4C4Bk*vGOS|`dKT_bYu zjeofNwYNID#^mk~eth?fpL_RPzx(dn|73Z0zWw7nzw^%>U3v1(@BY3e-TmQ@-g)Ei zE$Plb`lNery>;)c-?H3yzVO3)U;a)<7meKe)Zg6u#%J#R>{~wLkN)n?cR%l2`tvVZ zfjd9=+&f?R>7DO>+fwg-d9JD+^}?%O}hYTo`kkMB-ZJid{j+!e>>EDg`(L^H{jaDK*6UAy#R}a0%TMa{8t*%w`8I!+ z`|clp-6wtdXaD+-Z(8u)*WbMNH9wZSKm78Y&!wqf`|W#Q{|77Z?vH*ao1ME~{eITy z-A{b}-JgHi3f%q4Pw&0`rxv{Pmw$Ed?W|j0c;_qM%WM9{_wIh}i&o~HH$JcV)QmA9 zcfR=}O^EvWumACn-upx5`l{BvLS3qSqWfArPve(gJV|Ljda zpKpHq?x(+`#n61-dHZcEd+$GdBJ0MzFTHv1bDy%*yWjqXsu?JK`^&uT?zg{`z4Oi| ze{k>jePzJxoj>@lZ~ZU+`0lrUPnFdA-9P_pOMT~0zjx;kzL5dI-LL*!1#IoU@Pl_h z^;sYMO5XH4Kl`nBe&|>7-rIlk&S(C{ck#zx&$@W$lV8ZXarY;GkwNmiKl*&O9su3F zpMKxB`-$(o^S$r-;Ag+{&YM55AS=nbTF}-vg@qsfwOY`4@BH9T?tJOTtYG%ed!PF5 zyZ`jJelFhp_MN}v@2~ED({HPLZ+-C{08>N0_Z2^N_ul%_y|1LO`qiIjjlA=PpJ)~g zI6nX8-7kLg-kU%1UHigYcmMJaeSg03nQU-(zWS{@pL`=N@RNJr|7srmtKYRC%XaU} zpY=2U#-Ff(aya?Kce3ez_eWpXp!CjnfBO^fe(KG4zvTh@U;p7V@BTcU=5PGP-LJo8 zO}_iXzt3jz?$>{mt>imzqzL4jKgwY0-k<$3yYfT#7&TYOcutJ#5m0;b-CZfQxhIsX z0r5|<>u=n+T|bjXX8d<@%!k2ePe4QbH>D6Pl7sX%xg&1zFd^7q>3re!Mv;5CIR#A0 z#Z>k(XuRW8CgLR*%7`7@f0Ns}V>g8iSHo*`1E*k$yQxJyFLa2QtFg?ctv4Li&T)B;TQoZ#f3CwG-)#V~u{hKRK5ZZ_|SjGk~G6=%o5bu(6l ziNw2{nihea%d^Q(WW>ii;ZV47?GVaPR2kSbIUNBNOVg9bG}eMTJ%%jw_H}xu`)*Xxf z>M9N(WeWYN{=|c#f*5U)2E90KszIw zcuDsR^AX+{a8905Qy)@O^J^jEI~lM(@W6b35-qcPEbB0Jm_6?Oq$QNkth0aVAxZYK zveUbPbd#}=Wc>%407aAO+5x~!${E9N75e1$YQuiDS)@w^@v^msv!7xnlR9=&~H zFLhacc|jb!)d!OJ$Q7F0$ty+y1SX z-JATH+WvJ_L>a6Nqgx)MRT)v0LMisSycHYJ(hX65mn;CiV8X#@Fnnp zKxYm#j4aatxT%Z!00ays*Lx#_IlFGdg2_d-!WI2xzQKxHsb$aPyCdcI9aoIkxScYM zCjUvu^^!VqS=Du=t57vCy9F@#@hg&CQ5or5_*fKY({*o_K8hs=Prg^j+JNUd(8=X9 z6B|0aF6aPXI*E$9;FU19zQH%sIj2u$GC6!sPABpa9NlEzQG6EK$t!x%bb@rvoz!+u zuyi*=<&Hj>>bP0SsxwxoMq?|vM)BUHmo!ijAZ;bdikg_Pso+!zDkZ*X81gkH)GOSt zi%JM+vYX9R2+=`bs^u!E9xu_?=aN^cpS-33F1!Wx2&u7()7j=BbEqLKWjTAW+eXKZvoLt$0cJMCpRvA9`b zG=;0W#KBE0$sw`rW876?geDkU;+7wM+L;mubZ>(+&v@a*`UW0phnH;0pf{XlkoRGh zabz9zs-E58b|}W0=OpXu(Cn(NmuH>wbTp9Qt+?i^eFm_kbm zhHpRx_!uB4Z38lpjp4wn+yceNlu5W$=f+5S7>vCp;IMFSXpQfkbLanowe*gO!bPrL zw7leox^--A@H&aY5@fKj&f21xbdTl`v$s%|e_$ypi$jnjah5iU@4@Oi@Q$FgfI@}!v?I!AOF(wfzpxdv^K5xAeS#-)H= zc6dK!ky1V z7pw|D-+(`XMeJwZ0B;wrLO5+^03vWt_L8w{Rdg4H#bzt!_(nZ!FX%=;T3JOyTEj)0 z&pj-6xTYd&y`MgV_oawhKjpx+5E{$HW&>$D=hx!4F&3t<#0LcWr8#Gzi(3k75Ma9t zhPPhlqvCB;GDEM|9Gs7vy5ui`!QD~-4dmN4Nn?{&05oPl%UnWFz=U;ml29^O>F3V1o1R zqvdL9gtKetJ#9@1SW;3nOq%Y$Wz-*hnB~Y)-1&doIuq!=@~V!HG->uUP1;O5hof=9 z)2f-y3?hqc|EB3obehH{Gip3Kw39SRn{95=$@H-d95}-&Ad8^L4#KD)AOdm#aoCYX zHibX3iogJ}sOW*C;OBGSd&$3GdwTocZ+q|me%rnGcklgz1r3x2OC?=Qq#iD4ARcDv zq3v!U2pVp(oDSn+L_`%!9 zCw&cgC*Z`Yh(8@{5%a6(mf|ip@!4*UnpUaFFewHmTVes_m>bORGBQ=g9Wt%iF%2Vm zksyH!zFpTCx?VP}wJo|Ay&oLvTrr>$TQwitSK@^i`=$g7#;IeZ9GVzNFDw=NkP{MK z{*u&gjHsD7Yl4ytv~*UPvS)j(sfyg|#Qbf^B&*j?`~co^Zxacak zZuTSC>gwyE2}3?0tT2i4ba@4oobZ3u9c-0G;3(GA*b^#`ZPfRwIdjh!^0TxrUc?fh z=&%`L+!ltu%Zu(t5WV2$yzb1GFuRNMcW{GXJ7S6N80X5aE(0T(Tx>^=qLVN98lI>T z`Idl5aW{%zVu}%=3`EE^xZAnr?sMsy6JH}MI+N3646@!X!jXca2xEnb3%|Vej3 zeGLX1L$gXVRC0VbJDtH}{P`u%dE=t#9`nMPGSF$6tP95j&6%bTXC-urV-u`(OU`AeK;PI4?$WzhI3b2|#Exp0;v9pqVoNtrbNs+De}`9(&#DLxltJsN)H?{Y1%=e8A=9o3QhYmcs(S ztu=zrf?SAxNQlYpnoqX;{wxLXGNwv`6MzthNMM}KEw_*IA?mnr(yML`;Iw%bB~6BV zPrB*MlkvkAhce*c;!U_fXd`9XGxw4 zBbgkgrQ{zRsf#g9b6fpKIkh5=x(b19figaJ4-64T?jrv1O@N|Eg)s0kDM$PVR^OB8tg$=$Eg3$>^lFLR3r9I|`m@NgM z6YT%YHV(z(PRm}+&*$MRfhy-tucM1C6|R)pDmoxpaX(BrT-gem5p8!xQSdNw7s)&y z&h302|2KO|m5e3~^YIrvfL9i_U^8Cj(_RfJ1`M$~n!sp@iNs82l}>IgEwnl|Ps>f5 znI$0f1?n2%y^q)1%mLkpQ0p-5SQv99;82vsGyd8}-ynITOk^2BrUWSSY^X1@tor=z66>k9X`Yzh7Tt`d-BeNxMs^|U5K~(x^IKbiY2hH5r1pK z+fAsVU#~d3KsL;1gAU1?HI2QC67GmN+Bkt~Ab_Hb$erpX7+oDa_@%Bt&i;2$sfh~* zF(FatZOJIXu5ZRWZHy4uTOcJc6F1{sCJdJ9)iRj2$}ZY@HyQDsEKcNGC0nDGzt`0F zN!~B{d&v~zH9p|g2O)w;V)cP^=Abl0YiS6yn|nB!_>cuZY(X72oHM1(WtR|u86~`l z%fKy*QU1ykRW~Jx#2(MtMYB(a+bQNF;;e|#L5RdqX&-$q3 zWBEW{raTYf_r%9d|AgcpCI4gz+%GZ)-CB)k7N0b^=PZ4x=)#ySFp$W{)Sdq+OW9_uzsWmE)Oe9Z< z z4>9flM9=W}@im)slCbciNGo_?3xW*1`ZI)&$yEKaXu z2)6yLibMo~>%pRrYG5Pg*3p+%BvCKR|7FYnZBwX^GomAH)@}|(PloH}*of(m#DMjW zLIxWxm!l|P*b3(3d%l;>1AHSkj9jvY!A2o6%L>*pOJ{S0Ce)+$A6teXj_;S1()~-- z3u6`uiX~Y4@7rn)Xhh^Im~KvG;UwdKeChwqm)4F5ld<@LNk5caCcoOY zVqB=7;IbGnzBwqr?wpPVfPKMqLJcE+6l|1U-aLn>=3Z<0I${{sxCfPEm!!=WG0p** z&@mD1ChnGWNID?}81}i-IHrnT{GaCd@_I^dMwf-UB|VZ}i{rwfC0vG70>VhiupjAe z?n3&C*-^2_!UK@mc3|_?PrHp&>K5%>fg!=#+GW1l1~f67kG<9!lng4VK=2#s=uvdB z-hpN9>iN^;Ho0dZ5!)Tma4uPda*}Su&)CAxLI4`}BjBRu8>NSbMpH&F6<|wXMp#6^ ziD6g7U&p_6K7P)|IWN6fCtMRmy@*?!0;}gl$dsLjSsqaSsIlB z(-mjO_+=94H%+`&%js|*i(n_f7pdUAds|Sv7vfi}(Vl?a<$s$65MI0uJtFE)@}T$4 zLW!qc?kp`h+i^diuy4ovgt1*My~RG|D3=I&PGbZi&z9o>w$|GUrCs!23ZY%Pt$azA zA7p|330y6b+eR5;CKpAPwi5j&9!#Nlh$VZF{5Z5u z@alM|$qmVueb!M6^lw8_YYsj&ZYl#!k&Bwu)F2$4$Q03BeGY^^cs*40M?_v-5jD-D=lC_7`gO_mk`t0i$w>(8EwUW;skd@q zTA@Y9lo_bJs9xfB|AqVgfe#;_*H9N~*;?Pju`)zY)CBFl~PBM-C*#vo|QsyT41;@k8J`KSxkg<3+ZuUA2tJ82g<}5I8qF}Epan}3K zdEcI($ds+&kikW8k6JLZ8z`yV_~tiaZdW#00~yB>1!i|)I3MS@0xuik)wKdJV)~2j z<7dQqvvd`%9Ev$EcR7e<(ql!^?qFK=YR$4>QPqIJ(P7rQsePQYxQbLTzuMX|wJS{D zxWyuSRR)z4sP53P)eXs}C3;)odgFrWw@S9m>_RH8cup>B&QBODOg_+7$mt=8Rd~Bm zSn=VIreL@!H^(DPd!*!1X5gIT@IZ;W5x0|2Xq(G%^_h6IWga6*+xE^Hz_#eS6s)%Y zR-9A?_e0)d&5Xy|%E82_s=?%{^42K&O4HSmvz%tZ3*6xo9xr(U1hj@ks*NX_ z@+8TVGr7c~jUGNW+q!;2$~v_!hS9(xUf_sx>9>7~4P-Eor+W1?$Z0Lq>V@fOwG*Ef*eh7a=UD^y zo(h;v8u)+NNM?o~Swt6&zPzo~Fk0WxOYR_}U<}EXPZ%{nG8)NOr{@c71zE#Ve{xY@Xrc zEFCYi@XI01&KjsR{?xQrNM4y4wFVjM1kszt7>zs4{xiv+LkbRWIPa<;MN8n%(vjj- z7J0SgFCj1)aF6Nw5U1u6EoplI%Z zYsNb)@J)kVF@4>{$;ltj z**A54V{Cl-nyc>9+U|jqw<#@5Oiav7VK(3Ts>f!IwccNT{MsAG8WX3+PBo72Jd;OW zK0VWzJbv=jv9ZRf>DC&1r*D|N_x)Rce{=s>TUuPn{1$YQkda!F26q8&^TvcD1#)J?OIATmQD*Lh9Z&|BSZv@OP+fvF&u*zP1CM GJ^u&M7sI&# literal 0 HcmV?d00001 diff --git a/mmpretrain/datasets/__pycache__/cifar.cpython-310.pyc b/mmpretrain/datasets/__pycache__/cifar.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c121d6c27ed429e989523880c336bf3b7f2f2aee GIT binary patch literal 6766 zcmeHM-ESLLcAqa&LyDAW$(Ak0$xf6wHXWPdr$`z3a{RGP;V3~YbT@1ls?B?b0j(SM*1#r_fV+9&@B%{Fy^=ME`KX|P?OPX#){ z9NxM2+^=)b`JHo*f_y%!;Q7bze((M3D~j@O^l|b@;p08L;lF?|g{i*MreDoh`&wJ; z>up_4>WzM?otl=@?FI1YzS+;TGyQBk+h1%i_H*rAKi|$v9>ZViFSnOvIpr7nXWC~_ zPO}AnrN7!S_j_R!n;#9MUNCU{Vk*wv4_wE8-*F%F z0gKc8b1;Ol-sMqT0I|KE&+X2%IrAv!4eX&4?G@G7{P4s3LAT2VmG9izz4hSk?t|wl zWBA3$0#%=<_}w zL=NeKg_?fAdR~t+%M(FgGR|h{_M*K}r{o6x&EY=ga`!f$^g?vo?D#=vv+slv7n`zS zTF;WB52x!G?k6+f65a5o9GccaOtuJu$hsOvqIeSvRxnfQWSr>=5|-;$I2sNE5rx(Uw5T`Ou%J>X^Dx>d9WRK6yu&?bUO(C65RM?u<2I7kZ0|kK8bG>mxb%W4`}15X?f|i1dY)Yu$8xCk$^sJYnV!lL^MFQuS_B$~QVr1Y-_w4EM*) znJWEpg{5B5u)SbVilDyT@p$ccv{Uu5}+>Dk8d~Kj?>}pDgIh)HR`W0<~gl$W2@QWoYk9^a=CJP{f*eFx2kTvQfKvw*QmSb+NoA~qf^Innr_YQ#92~i zKs96z?sBbBVT~5X+Q#ZD*kHZpcwVdGRI05eALpirQzD&?7kzBWw+Bw2huDheZM%(n z)#1FhRjGS54B$aZ&K9q?@O-1kCiw+ok9FurtP@->NS(j{N}Q5LwzGRL&Kw(goRZcW zn=>nl&6(21i;`<7xYzqU&d7}bjkvpT0^rGGn|K%ThQ9=1D$}~!fjQA8<`X4c{d4%< ziTP`V850dO^>yl*`gbZ?6D-REg7w4(EquJb z3kytusXq!MtHUi=F#*|l@w(L+MHc+3b>&<2%J@CW_Vx^tsO@;~Z%eC`kSK-O8+0vT ziS@MS`!wb#H$c8oiWyO)uM9|hmSO^TBm{uK?hV4o8Mr)N0ALKTz&Q8GXb|=K z{H_pz5N}eC6p)vT*tEgki)=eK2s?|}*a*4rC9}!#<>b_5K6#u@peRmDL4|SJml4M| zYMilcscyC{s_66WjZNCkX1@<5lJYuPobG<^jYJcDw(*9vTQieY3##!Vt>tu6&8jA7 z8n32ls;1&sHNMprvj09VO#N?Zwi%bgo!PU5LK^RFyx}j=GEp8uN!OJFHBu)~R;GWc z9cYnus2?ff-HCE&94X4Sx6Rj%OdB>hZ3Fr#yi)}r$h^O?%>Yze!{OpL<|lMF)u zL+F$nU|_75*|{SXCFRj8 zY;B~7f5I+)M7zPdvXM10CaGr{R#(9477uch#pu;Z4y!r;m3EMyWU-b!t>d%bO|mf1 z3**=CBaB4M<8&T)Yl4&fSK^p{v)! z3dXtPap70TZ(XQ4~yf8~`-4 z5QKz#jzB3yIECHQvPT2w37Tn(C2I;GvpG(o#qmY}8o{W$XT@qMfx5JhNe2}eqR9s? z0!J9XLGpo#k}C=0MeL5-$;a~-c%JLn`*RDVXb7^K=ZI((@c|LSbh@ADy?!?~yHV1w zxRF2sc>#hATwRuXq1{IeW)DT+@-U1Gc5+`xj-eOr$5}+CQ(zJX4c;Ikfu%qL=_BFG z!-yB6?wBqb;vK37LXkrQLM?P0fw)Xa+=E}GfYJ78`ouL+QJBqAN=%2nyn~GaODYRuTPbgJGP00@!pP#|whvY=#`83TF@wgAho1`~Iy54{$rm z+VbKfy|X^<4I!FAR9q4ST2fDGmC2=Td3JW3C95Sx#8&{VGIof} zN0|uG1b_v1M!*~aQudG{0xIb&v#tQjGyt9fn8}W>-6xp&;mBTIt1LQDI*kNu=Af`M z{^^~mk0ut9Fz^B3A%Q|-6>QmsR~-$3H6`oQDM}y?u>KfA^dj*-1S7~RiOWP@11aVb z(;_VvKLv>mnpP^UAgPLFBGPm|rBa)CGBz=hUc^~M6D+wZ6;mg)(vT4b!JN2DP09A)Z@j9en}nA5(TBX`ky**I&4~=^sycaIELwgqeMPZJFmbm zT*hH*bnKw|x}xE@4~z(595@UdvVM@l@#B!g*OSAW7%T-PC_@QS69Y<6PU=5fWN94j z!g%90&mZ{uaOHmKnMdt=;LeT3S~0J5_~95US4Onb&?76qclGAu?*>Fa=K|{Bz6^5 zDHb?Ig0tA@1Z@9%ni6N_6gExqTQG$L1c_BV1&Q<1fYs%`KMh)wEx%1H1g_!?2)!XK+*E*)1p`P3 zT%uJGJGC_s}8l}%Ckg;VbN+C9<3RrgDgcxSI ztMdOcdG-G(B(%f#=I+X^JJ`UsCZj;r^UZ-BI zZ`E2JL&8P8ijLBA?-cVA)5PagvrlA9CNlRUvLu1$=VJ{;ljs46=* z5Q*S|#1N-OQO^&P0H;6|%gI1oOg)8!Nx9JST zo%!uh@!XtE;Q7PTKX>k4B;+snGW}HGA!nDCmgkMtwjn&#yvktn_9lm6{*|t1L5v??R^pPd_I5~wdhZdXX8Z*6XGZr$GA z`id5U>hF4qxaFnLr!YT%^B3>BH#fI$-~Hgj_ik^sal7fKTD~N|0YZ=#<)o!?+S0iu z44<|tHN9r=Z}5sRxe2W*svYxV(xJ+nxt7IkUVltlHQwNJ@NM&XQ5WVS>1g~MUwCY^ z8XpsbwC2Fr^SKV%^dFgoZ0`yN^bfCIxdLr%jg=ZZw$0$#>i2rFOj!zS4`%qT@1|*Q zeRcKV;J|t0Cw?3`Z7)bgd)E=XzluXW!F_LadX=-AhQY=I_jDKB+w7!5G9Dj9LF{qV%!@eV zq79nD60~+j>B)=pRhw{Migp^y0ZUTZSHhW}FfU=9I0)i{r1U8`gq7>qWX(ETUoTv) zJOZ%-CjxD4P07Z=FQ0t&<&)pSAM-=6E0U>+k7lOZftMui;hFhauEWDMXRp`&k6c&( z4cC)WA;)pf$QdhJXfrr7Kda4P@ZV@N82qo={A*3eIu;If(!+F0&6!EJFeC7GM3B6I zF_oSlxv2n@NP2;vKF2ntLKz90vYV_=cEp%)WgMsMEeN3II>X#l?~^EjNRqSZS_aao z`_;}$!U~IfDMYFll(NP_r&v~4Qsj2RmXzX4D#n*k{iRcJ7y+COva^@69u_`t{FJQ8CNE|C+y58a^QuZGk>nM1hIjHB zdO)3cUQwr;*Hj{fF`%w7by3!V#=8=~lRrVnDc8E%sFG<}NU2csIdM|Nstl-GIV z6I@%#$X?~x%xG3S!T0Lq-B#~bT1)MW9BF%XKA%;A&&qT@_nDCyf26-h=>%@xbN6Kf zbj}SlZn%gUULMtN*#HsRD#G;YDdtU+ zWojp@?f#C_8_4g#iVbMVPoc9+n=Vk}nW;5&i`vxC4T_(6&4k{BaqJtKMvIT}cdcUo zZP)-DbH+BcH4rp@Fg4K4H=!jzgDxc*J)$EmrAK6>XWA|D>xB^nB?kYmAZ;)KGChTq zIij4tMy`{GwM>Ul)rP;s;zuD_f$6X*gE(MV6&K}?cqE|Q3H%~xu}H)@ta^NYsO}SQ z0z>Er=BFZbBr4f7B&Kz4aHzx7N#wd8rOm4RE)H8LNS%A7=lL=z0RowG14n?HAC{vi zRh>l9T}A;HT-06xYf-a<<_};Z!ThsylUlSv&x5;%uPSGre+42V z$I$lZ2_4ar)_?|RP~$q#dPa|p6966T4Wbi(o~2C3w|y=7Q3Rj>U(Q(;`3{NV*Gr6<$H=;k9zygtsj+Q@>qsAm>}A*?kZ3hd#V_b>_yz)(lpm2n@c zVW@GTST75r;Ty&M>H0ij$!^>aI3f=WLNr7wDiSu-R@ktjWaM;BE`S$vdPMmJ_QfD6GI$ZEUq@+xaj?9Ao@E^U9*&w`uLnQinl%+# zt$L{{#!|~xd$C9z0J90|-q7&# zrUv;9xvttX!)VY;RMW88nC;INv<8Hvp$*StK+cNFO{k{&GMb4=%zJDqCeH(*Vu?WP zXkCm-sK5Yv?WA2!}Dj#ms*o5tZZsUUK%?wA3Sfj zt;D-1{&%280>3MXt%Ifd5P$YMr_&$?^Y2b`9x*wB@NnJSc3rsU`*5wedBb%d_PwB- zsk$zY+pa4y{ov>J?d{F??%(+!*P%epL6taAa9RY&HQL@?EqpXIcWF+>6QbdtrVuPXt?+OOSW_o(3Yi4`5 zr+Zl4!`+?q0EU);m4gff;UR!!U>692@SJ;&IpvsB1Gy+C13Bdq#iqo3uc~`~mb7db z@WH+8n(nTuSM^@K_q|uI7%wi?6kPxC#lLm`c}-FNi(Y20I$qw!o%{z1U-8vY>8mZZ zueG$k-qQO<%jlaevv0Mmex+4Wsf`xe{c5Y)ueECZdaEwJ>ES|uv9&0kjc}>I+*-!7 z>09AS|4i$Qs#GifIp1by{VFq9^^)S(n8kE<_L9Ohc9xy%YL^soTYlYNcx1NDPkJm) zTlh;%Eh%(uf7xF_%a_>l@r4uRFBSicfA*2$pJiu`^dq(Pdwyk8X`KHVI8z#x+`i?7 zp|>5f+_)Wd(%iTkBx!EHKS+Z(^1|HSWDkZc>ag708V=CR{4hdIL(k8?<0Wh}jk(uj z?|7X(7Ww%~kEL!m2${QGd|3)X&+YQKFTOX_+RWo&0 z6YsjO`^G26k@~mF$I7vOqNvJ`l&(q(^2kn&)cjaEQjgTzD975zN~U!+h^pfK3nq6b zh;>U za)(abEh$Yp6x=#%&&$`zoO^M^KxS))IV5MNYz;ywzR79OxaIZjuIRJwc*8Vyf~bq> zmYrx>Cf%HYm+my3+pOyi!!&Wwng%PF`mPZD3|~SUzE9fneK2=_-68d%$8`*n-oL*g zg+bGJt>jV8JM#LB7*B@VWkn&z=Q$|5x@qhN{(&UW_Xdubx?BWD5X5xF@;GSL6vEtk zqUI|sBIR}yNIzt09Nl@M=eE#EEJ9wD+6h7CRV>MnC&5G3Azdsl(lrD3Io!zw6ajSW zer2peKh#UO|6Y3lEqS8wOQ?LJ3fF*Fy$N<}2vH&rq*LNx6`=m^uGC)xJ!%3$ueS;7f85#mZSt zHB_zmGoBjH?8;@;Ryoa^|1ou33=;RD7Y3L)MBO2Ime(R4CieT^c%EY@8Imz7n~tCrO=HM0nA$MwuOu4MHSRaG+c*alTRJ-GJZ5^$v$H#09P zE6h|27U0#_`*0$icl7Gncf3}2u2YuMf+EbM#U9&-vEAZ*$9!aeA#17voB zKH&AY0W)(e?W~cu0awTZC*~#X={f6~Al2NeI%}mT0hyalcNlfZg@B@$Iw?3z1i+Bt zdjh&9WPlKkN9el4I(tqaflPo~kf5zl94GPm@IIoWv~KEE;@Iy5RFWyKJMe4s&?p&8 zHoz&hAo--^Xr28X#+k#s&W^)E)@M=bKptyCA=V|<5hOMya(vBU{Xu$IqC?>%3P0t? z(Q4}Kcn=w5eOL&Ugq##$$qC`^-Qh3c=a(ZO8GMfr(3S`YE7CIc?*s-)xF8s8X#8vV z$qgFdiOLaUNqm$zQoBK%3T(TN8i8#WmC?%-JQU)XfwoE8Mm5(VK8bG@;TZGz?~8Hv z*kO`acjF*($&2w<=&R)o1{9R&p6@pnCCU*T$>|8Zs}#fKXS|1AAmSjmaPqBsVIidd zx#5z#FonQ)LySoSoa`)3BD$8p)f93>WXG-=hcN7^t7I? zST?>as5Sg)Kh%DK_qy@FCAwNqFI$FcYy6ua(ZM};5th=GWGI-@QhlYR`D#nAYT{3? z8dbAu`3M1-fw0p=&}mV4Nx>sRPkhC5o!JaH=r6EpcR}FTqQCTLu~id+rP8X)fU>pV zpF^+nXl3{>`4=7;t;HX~RkoH8GhNIVZwXgcgmS|V;Vy1EJr*$@bS7BWjk(iDun~~H zBj($KGD_DC7%BlV84d)N66A?V5hhZ6;TyHubCZyN+7saV3a7O?Lf`Vkg zIzjhPfN8PXh=l+*!m`33Vu)9OeqIWM@=9ZpEvGRI#apL|uhI0|?foF#aiKaad~=Hr zS-TA*9}XCQ7$nRuxB?_@OocPQ5a{#cvRt|lt_E6MbpRK+A`WxcoH)cOZ ziQko<-_eDvyhiXwtNd#Wf}W z0@D1dqd$M+U-6|5nOiTcAEnDQu)~dVonPFW8N>! zBjzPBqCm#C4hMulq}~3T)dB-oH=MQA?ST8M>&_}HgnoR+xcH5G^n*COx7rxD+l89F z-gG_)y$`!GtFkNyaHy5u-{ex8l zJHR8EcrWGw?3@}W&l}~h)*&Gn_n;hz7PwseQq6u#DnuGa*Hj@64n_n6^K&V+MNaf^ zwo>*kr}+X^oNw?#b)0Wq0T&QOPDHdWs!Lb9AHZn0ne@}J z7N2$_=r5$u4LHKneeZDHiSdo^!@r?Bk{CkhtP{hjqPhTr`7DUAtVt>aE3$=IiT7zP zM7q-%8^m1UsArd8j*!5b7yFX)C=+^#s204;UJ(Ge8TuICsWEy(7>E;o{BD> zPngV3Vj34XYuj-gg24B77)eeHeL9*5qE0yUflWOgBdG+z4%3}E-bIX&K>Az&Khh@? zgqwl-DauXdLE1Z;*P%FNS_|jEq6B+2>@Lb|pUCMD%h<5U{GZskg@A2n#k9v%cym+5 zeAZT*y941O@XO=_(29IUb1>+O-|hZDzW3yN8gy41(}|V7Ozuv>c#)LEyxxPGJMGWP zh>_wiY?^@j0tDwKHsb=PQ=g`~NKkZz-ssQ`q5zq@(@PgSVeI*{!Jt2++#Y6=q+XN; z!oNHDiTIvlF7D1TLH1UfEQ~o~%x69T zawh=W^Z^;ih;>+!cx0W=&3KL&#mPqOiLTg6U*07@THSqy-b&MWz850z6&nzwX`1s{$SfeDb<@lu=`Dm-e35rV5_h zr4}MtSZYPb7W*t&RPjUabZC(HC^9{%Wg2cZseK38t0VOzHC0mNqka7mGD?(RroT*a zHDr(!S5J*2WfxiKOwTN2p)_QY%umhC{zvu4DsoVjT%l}&LsD)&aW-bYM@ zP-lwm>rQ*Zo50TET{76lUJD2ge4oI7v-6@{=O%G-9);NHTVrI-6a_P~^2K)aItxv*dO;NM7v@!%&Qf{DG0MuscIm40XFCBnPFTa!V>D(zsPu zW&~VUvPoNdeh0r)E51!zI8*1>T)Fi)JW)wLnOblr*Su} z`8L|uL8a=~ju-H}c%uG9`6*6VK2kG-@25-0%O{k{HIG;501IslYJv9jJZUd7tvd1s zi=$U4$2{3Mh%A3wDs`d6?Ix$x%DvHL3L&Cle;W=}2K;5bzc#wwaQv7hl=8xU2dGGp zIn!3;1SmojxB{oUHgX!Ya}nDrnTRY=wumB|d8KH~$!12*9pR?072zj93J$rPLEPPN zMpv57rp#T;xlAA%+i-rau8&@kB%*jG9Y<(KBTGuoNLzOrXSh&N%BSbGpx-ODxgrT9 zlUX9mUb%Jm=H}+RoBWT^kdqogc_-+#@ZBMtQ=h%ddCY}TTwaZ0{5#%lu z6C!45OE4EYJuuYT7nW5M`7m3vzOTv{{u=b1XuO@7uuhB4BaSU}z2uy;SC&lINkE07XFg6+8$iua!`K zjMSj7o@f%%8(OY9ddtllm#N z(xk**A`i8szo%*BmQ=vprl^OK0GtxgVl^dgWGm zP9o1yDB_zxC28VJ?;Ui`ZKS^wa*o2tv6jMs#nAJ73KaQzxtc55X?vPG>1kB>2B=Sq zE&k_tzjvpxOn#Td>biN&b^EbD#M!HxFSzalM7l*y#dZC-gU>otjg!mE4ceY@QdmyL znHv;!3opyX>Qh)H<_I8>?zqF?)eDy_c=--B5zhHrcEQs#$7xwOtqV7(pef49(#fUd zV7ZS3Tn|duB-ILHPGOxytY7ps%9Erl2j~6 X<$_-OLi+>b>70~|+EeSB7u5d;^|a={ literal 0 HcmV?d00001 diff --git a/mmpretrain/datasets/__pycache__/dataset_wrappers.cpython-310.pyc b/mmpretrain/datasets/__pycache__/dataset_wrappers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8620b8a51a52471b9da262c65ab9542f5adcb643 GIT binary patch literal 5755 zcmcgw&2QYs73XldU)1Wu*4mOBCuLGJ^#az~X`41sjlhm&xkVDSs2reJw4kV+*)^4lb@QxRYZ7)HasO3&1+rsS( zg244dKWQ*k*}n7O&fdKTdu>`V{S+~{jW_-a8ey~;H*g--DpbvCp=wo&+~Fl&e#%;w zDDsLZbxdC6_EQU|DDwqgdup^QqRJPAjjwjc;!Av4e_a^QmN2`*FX-7CW-s#9rv}z9 zqF?8ibbkr`%Y04um(gG68@j)OU9a$$bpHbSSNY4jf04h!zk$71c@dm=Rn^|zi2|On z!273Z7F*fQYpb^7WBy(mr`@zKBwoY)_Vxar&p zx%07b2C;BH+2uGn;Qtsn5PM0iC`+<)SQ;0BokV=twui4X_!SLn;Ln7kD?L}NT*AAj6 z01+Q#9LrfbrU;ra;aWaB!9*lZA9KeGVlnv~3lK(!gk<}qJ|1mG9MfMwSNw3$=>(~` zaxNYSC?G(H#sqSm;G^w++;MiJP+WJyC^>KWEY$>>u9pn9`eogV+Qm^3?GmT?eL_{81gbTB#+Mxc?Ey}e(Q#eqU1)e*OxdZ zNSp3vB*Ztej+?o%xY<8a6<2f2bzcUa_!TsEsm7K}n^jodTro>5`&k#?hGd7pMVgSMo3Del49y>8feG{noAxz+6d8jWYGxoI1BaG@I@Qk5>~L2W!eE|#yFWDatnyc8oo@5c?22xNvf+_ zZaG(XpOdH`G*;vq{!^7*M^6<35h^S2<3tsu7j}hKU1f%SC=5bS)Kd#Qb>FTGyQ-Y| zxjBVbH8+wz6cs`iv9e)sZag?Ze$psud9}*)oi;1yKn37y+1lt}-7;xabYFygUbuA9 z+jwIKO$mB#L;GvYW=nWi%sMP!iLIL@6Jv9Dc}D-o_Nod#f7~YZ$RFy{OWOYh<#S*! z1h)Z?1>OJK-6(t{Wa4y#=l~HMt0{g35duBf%*Ep=g`bVTr0oQrMYa1G0$P(A&h!bt z-f$3h(pUo)B97ydpk9%i#xaoaLwd4(52yzM4I}GQ+-^^)sG`Y%^EHm=qB~ zS$my5xH}~L@-@t*M(a?IBFe<~sj^Fk<+^=Le?fR)1ofG%Gk`O~$~Tow!uVY@&>9`j z&LK}NxTFDoB6_Zy(1JJ74A^I5@;SD!h+IK(Vz6l4%- z_63tD46ml4Qr8(^fat;+QD%f*59NX?w*xPZMNB$C!A}y7^UkNrci+3Sw|8&vR{+pV zS}M%xKn8H;e7RI&%3kPXPsi%Fjj~Oj@{{+yzSBc_g(TL|XVNDmf0&Nu&G-cgK#ry( zquz^fkrSn7=D>7VV(1=`NlBIusr<713-g$wvN>XZFh4RO-LdqLMVQT-iutMl_D<#{ zWev){MJ}No_4;rBmN1e%TsDTAGb*hqbdw0m`M7yY`~Udu=42%b@~S`{DJ0}4w}9eY z3Du_Ye46i8uM9W7a<>`MDP5tA@-8j?7aAlAaIc39=>h9fih#ULE^;$OVt`Zx89%p1 zg=fZRX+lC{N;#~2Mi=^&n$VXDW$}kaUR{ICk@wpemGHmtSy87m#$j1*YseZhdt6cE zSi+*7UCf}WdrEPKG0U=^^@8N zr+_A28u~w#2#4BLA9%6fMgW;fN~e7IRCqpD9pgl9IXgO|r5XgonG$VS+0JeeP(`55 zRF%A@9G0oT*mA1F@*e60bY=uaN;K9{QU?+gH?QEmjGLYhk+tcb^`XuM>*)xk)~5yJ zfPj65B!n0Of{VMzisDc{OgMi|kwLzS<~gC)x(wZ8p(@3wJ^jb?+KrTpQ@ZdB&@nA( znl(0jeMa3}SBk!L-HBpni-zSL6iA>=?>Mw5aTyjjscIV5cgHn)b_GJrSpSjtNQB=` zaC0C+$R$ri`R;ISvS8xmXw5e%81HAUX@62?I6izJmBk`y4%dE?=RH}P;!st$^#ZPw zE9;VHC>`U5YXmj=z4i-8>n!pq8Smr|0Y}+yDCT)*-=dsYdxk#|`$(vDW7zl_=1o>9 zW;Yg5aJs44yHv-KR|mtXLR2NXtU{`*E5%jJq}8KN&%Z}Q?MPmu9tC7&9Y%hbCDY1! zC_JvLgNPrgA~D02_X(&-q{Q>IVCPiGzXMEs4Na|Nu_d;`F0zVg6|bUdESZ-nt2Wos zTF9fVFY5)H&6QC(o`$9GASAh@o!v%VzDJweE?vb80+5Juz@=-ilwe9wc+|$~s{~i+ z%Lw;$$&p;ZP*-k}GJHwS9a(yq$eT2zY)WcYP+=!21<+wQ<;-mwQW%(`R(pM24$|!i znM+JdD~4s7t0tqL{U38r8BRE>Y^$MN5hs>ajPQyI||TmQGaFr zK26^`q^Dn<6!=Xkx=#6d?HTa=$WzQ&Kmrv@KyL(A_0VFn|yk&rGS*f(RdYr@-#t;Fuydd!Y% ziPLqUUt>;OPu#An`}MeycwLVe+&OPPHr_Ru%Nh?2*5K}$bw;`^X6_js?+NT;bcnLw zi*mUOo#i2y!61rx&JQu9dZ}~Q zWgc&E=bAAvS(CLM+FkDx!#282uwY49u>1HQu*d!Zr`tU5i>Md&Vovw@u^bB+D;1^J zfwALz9))LbJQ`&}QVDDBy=>9#11U%O#`^lv(UC8*UM6!tJNCoAKR#Ul(}Uq~oq_mu zCfWMtySLj?y>_S(FZGm@>+EKTAe}=XsQ+Ygz*o0j{Jh61*=naYAz` z#(k}Ql+!S$gDj4-qrB1{4581g*`OO6rEzE=D5Q13{#TDa|LW04@JFL09P)f&;^Pa` zJAE=V@px|gGdh>^!Tl^sWe}!m=F8)`oM+f2&*dBco8`?Husq+!m)XCt zqJqJe80H%@OA!!($TCT<1A00e6yrgE&tWyl#B)QW0@M5`@=+X>g4Sp@l7Q4Ou0&3O zr&-o$UPe+*SELA|bY*R(5FY8ugD_rMtLDl+#=r`CZ^Z{)2H`k{Fw7QMD(iNR!(?qo7t`ccX3xVdv~1lAoQ~V%_}1){l%?c-k2@C$kY!dA7&x~V{rnWo3}2o zet^dc(keSN*HrZxqc#`(`kkx={--Tn5HXYeT?~EWt+)PvhN^l$4)dJn%AKB+YM?9h zfvTE5#lEHMO-WpM)uA<^<;OsVP*wj*CU#*K_5&lo{9CB3h5Z@S)xrd7e^EOl9}}20 z0;sB!T46Hh(407u6of%d`V?oN-8auI)?nUab5fVKtQE$YdEa5JqF#`qRyeHrxm{R) zBY!4Xsi9W2cRRMKX|nXZ0A zR+Up`aq$YQ-p7*7Q>=GE4izTmGE2KQromHpnharA7ZRE->jK@Nn%ZL!M1$y9HBf;d z$r%6A=+s3E7z8>{)vGIjYG6_fMxi_a2aiWw2z07)gCI&H83Y|y**T8~TDTUVoWr9K z(vxzuO}Tg#rG6jDWguS?)d&K8l0hJ@z{od0TgP0rJ`e51E~*O!GRIK$MKGJ+e&fZynP%U9w~@k(Oza@@N0sta<-BZ9#zgQ*HZriaD|*C*k+)exkF@oO2P1b`Q;*nyxyAwDZ)gC{V81i6ZT#`a6SuGn z_nJ{S4-9b(Xodcy0jr(!I#9Q004}}4!f{r=YD}84QFsO1pFTAwfYEcWXgvmGez;fE z5u2w!dJirs)eS*kRE8oOL#~FKh&-TWb@avez(vYV!-T7vK0jr_!JJ+~bOJ7*z5voF zULXa!R-kVMTy%{mCiW1sPY_%kQvp7QC8*q?4D<$_rgETq-yEH&I^LO}ukoU+n5QMM zY9dUBT>Jo~T?L{%EvAIB&7!_kPBUT_zCW}s>MYqlu^WNNhl63p|(2l2DtVQFqZp3mMxc*zcu0k zIz9Np<^on*qy@cYpe-0PKeEl!ONi5pqLc%rZYD(@W2km_~=_c~G!O{muITHYs%Ui(kQn(3rZ7EbS_t^m!atDW%l)b>yQ+FlS+M%aOPSB#WP4 zHQiTVdtJ|T+RBk9Bgmnp)a$^c+$7=YFiLru5HHWOlP~xX4qBWD8yQAw~7Jw@bHdVy4QMfdEnvFB&tBb%p#FqT~Iu literal 0 HcmV?d00001 diff --git a/mmpretrain/datasets/__pycache__/fgvcaircraft.cpython-310.pyc b/mmpretrain/datasets/__pycache__/fgvcaircraft.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7241c7331d3a02b5733d1e3e65c6ada3ebc402f0 GIT binary patch literal 3586 zcmcH++in}jb!KOXvg9V6)^5>6Q1w z%nWrQRzVTD2v8t>@D){g8gdKBiCkg|tC>&g_z+6gxZZM(2` zcqOXFUfVNkH>$<;cAXO8p3Iz*yM$M{_n2@`c*piJZ8vyrn>6cRzzw8HbLW1jbkoY` z`$GG@FcN+z=spq&&*vh5_&u4%W*>09jrE=N?aiI-FKBUi=C^?o8-a#d1N`!xyT8A^ ze(%BU2kUos{M+k0n|I%NaBp+Fi+4>wC?`H@3noF@l#{l_Y1`(Oa6;Oy@LBE%msf<_ zb3P(H%3WT4Jlh6IcVwbFxD&}|JL zt$eW8@2_ys!%DBe*L^c6Z&_J;&p*2fFIw+uAsJ8ilPC>%nDkkYaK=S9jR#UF6{g9u z;R1<)QW+1W=;~A+GNt9PtA|psP_aOzELR9499FuQezk2OP z$EX2epFer>`IA4wA44LO?E>^^2@2z&FVqF#hvxJc(9j)bI@wFE>IX# zFBtIm0vQI0_O%Scq@@q^cSILTe-CUVz86H_{lvE;mrSc?(3=vxwClR;tJR5AXcAwh zsb*Ke9h$2Q9ou}L`AIL87ks_+hBIBNK@=7wmRUN`;B-M$5*vGtL0M+12n@v*jfxkS zC#;}DEP|km%O$K>qdPBZ(C=c4ZHQhlj3BDgDU?#K&4VBwL}E&|wY4>d$%e%k3WY^W zZWIe+6Q9$8J>dio8gzfm{4Jw^ajaj#ZKJhYOSn8AqlNn~fvBjITT6x#$g zEqzIscA$>WizMhkC!nA}MM9@Z?x0rMFI-5wBL{O>?iSl}9_|AcydX(T#~>TXKn)r2 zOVnD~Q0KLq^`2SUmk!ad&)lTiD9)5{$t zqMjjc2;}ag{Q%Nr?ivA=d>N@;#_1K9KBIZf_l==_U%m=EU;bhRbLPrfqM2SIj}X(! zq=|7QZw(IRPvGdUVW^+O5B1YOC^9OFi0x5}YgI`Og!U{-Z4bF(V1erz3q^PSW)?-ZQq{aR*$ z8+oJG??Z0_DHm(J=}-DH9YS7*21Gw(QP2^QYDttKr%WR6Ti^*$?j?4&&`Ww@+%HV% z^|fyfG-sp(z`{0Y_S`{3%LUwg4ZrK=knxS&dwX+d{odA{ck)V@aB%?s7k0tXg24bf z(`HTn6i-wJU>;RQEH!SZ&ZU?lbXP2T7`<7PRb82idr}1 zeI)-bu%Xa`Am-POJt{i*vVCox;Q?99Cd~#0BC6B(^SX~wJ%l{r=MCR~e;7n1r0V-T z?LxF+x|CO7%5CUZOa#g6xQBX>zeK=n8Jk@ZMP+;z{4zvA)0hNc5-#Qwk2sdseAV(^ zdhJIBvelf;U41x!TxH1SUL1?0A11 z?DQPy*Mt+*W4Gs0BAwI58TpWix^SNp;Y#&b6V; z{6QE=e=q1C$V33o-FdkC@bUY*kH4X%bn{n%mODWPH0z^=ARcv z#{Xo5>^_nFH%Eh12@Wh?j`GT;j^}ZB9*;+(RAoE^*-tJB>zOov?@5-8^j5cfczD=R z>0X-YPI}Y{`knDX_xDeS!>#~dyMxk#F09wx{=}b6@0hh74l=2@NDq@J4Mdm>d5{P$ zWIv5ZN@^XZ$%ZimRYBOe2$k$-sXFF5Q{#R%R+5LB2bvGk2v*XS6AQojrKwwdYpZk> z-Cal;i;(s8i!Z)@@dx#Hx4fWQ1GKR!gX`1mn5XSZvM_iciMSKAym!YC((Oj6S zM^RYn+2H9YgB=T^O5-f@Mr4EQ5?GqAWhw}hwT-zw=zfoVJ(w7TkG&0 zIS9rPpfX(&sW$ulqaYqda>>E%?QK3#Y0P89ky)-&S)+Lv8+?Dy#^W?2?9WVpu!N60 z^RPWw$8L9_VI91~I6PQ4)4}}p_T0NmTIVA>6{;7C=0g~gN8@-8jz>D+x^j^7dz<$+ z&k6k&w-H1ti??v%1?%SW#uA9<|KT3YzUbPL!ud-7gJO*b;Wnb1#=ranV z#E0rIh0Hs3M15M%E#V$mQ#YeI6%FD25hbAQTc>tT^ZFTnv#gdmIXSWR>!Ov{fbZn4 zXnw(R`>*s*6l)LW=tpf<)Qkg#4Y*M**i|q@{bUu7WqH2hvCjCOC0Vo1%jYwWeBQJXLPJqVd5q<{WcJndh{A)FCB|nj56xdD;D&r^Ri`IwEVH} zteW@l$@Rra?aUnMV|kxF2rVq-zlNs&0VE@*6oGL@1zjamT7BDRbc%ps!aA_jEdZ1a zqgHa;@EGvcfLAlTRl{?Dw*kDm;a#m}08ot-fa$5+dxQ)#x1f+P5{QY$h47pxtkk6PrbZ$+RQ!F_~|e58bGx^xNI7+z3{vnRZ&QiW4?6vhIforFTQOds$KeS{+LHI%K%m9mPywfQrF z#Zh7G+E(F)@vwxHJIs8euiYvfD1i4z$3-143(zkCVz7a@EF5{1!QD&UfDd&OIiAr| zBH0d;tY9MSXN41tMtDhUsyB?3S->|#UWwZXE_S^}3FlDgUL8#Lp;KiT94;DVRt9!y zZxX~(y@_kTg<4sdKn+ERmgFbMyn?2i^y{OJ4}que0J&zjsAqYA#Pa7_h)O^syj?)& zYqSMFW=&R?5O`i!;XtJutK=LWEzph4;1Q3KKs1ui$(PoUP9c54zFL#FG0{NqRKS%S zDQ+x>07EgyY=bw!>L7I{uU<;ajW#uhxNVi;WP7PNzUa7T)cdHu4`yfwh-b5x%%0p> za%mw2N--6-Jxm!GTHh}`A2ZPy(wASfeE-vN5LF{}-xp~g*fv~f%<)$5z-)!St9OAE zD^=R)%P6W+M``1aQ3N-~Bq&oG5KS&&)~(y_>u>zX@Y~J8$&N>mHA|g8MV)RO%VZcP zvP_BB7dfb-TjlKe; OMO_Q<0L`&)(*FXPBc=lY literal 0 HcmV?d00001 diff --git a/mmpretrain/datasets/__pycache__/food101.cpython-310.pyc b/mmpretrain/datasets/__pycache__/food101.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5af1c6f560ee53ddde7b29738351ce0c8ccda118 GIT binary patch literal 3430 zcmcH*+in}jb!RxcT&^fZuv;fifnuOu6b3388!!T>u~0RVgA}O@Sb8E-Y*w5hwent= znWZffE1-xBS)b8mBZdvEt!QXXFTWuWC&kO9m9f8)W!hg)~v zx#PdTxwrkn!$%)(?+);qiyx|ppWXo=tv(S}p9<1%2r5}f`nFgUOgh4r&X9d>4T*3> z^Vwn_ln||FR^Jt#Y)R*~HKbxew4brQ_qoNa{sJhqR5Tx?sQ?Kke};SZ4kWMEyb`kO zc^tmo$#|TqjAuac$rTd*-a(d)^?I)-K(X$VP={&Km9i_dgIRZQ&>K973<`ACzC6lQ z&>O<7KZ9#~8=v^+SK&>Y!%QkJ(x*w31|m#GJV*o=a*)PjB{lM1Gh*Onz>$kk$w8K? zDc6~r46=!mJk&hUe3(X2`czjc)KXtNx6aqsOD*yKS~8lWumDU8AU(W>RNY2x=5qrgHnH8tL^p3v|A((lq05L9}$% zIflGhpNG*fRr4&Ug5})IdK`u2;WeI)Gl<3@s@T+lWk}YzE>lMH6{E?DiQ^TNdF7lP z2H2Jy29pSKE3HW@(YKF+cpS+K;%{thaI7yp#$+fhR9&kfJWYyLaXwcW&lpO-7Vv(p z-dnh1%yEAer}MO21@TeQ`>W>UYUf{bn`5@Be7t%;s<<`Q2$IR$9HuusTm1hAHGWh6I!=L=lX9QxB^syexKNKYHshtBA+oiyib57$6NAJbz; zEF3p8mf5*=LJu9WnA^B(sx+l z21#O?xoYFhEhO>dRLEk%7!$H^7#@9Xb(#vECze8k7R~xW&^k=R#2*LQ0T_HTmP)k{ z@A`h2gqiPm+=6Kt4GnX{pl}{P4WMEbj?ql3o5=Nh2)+;CYf`j)zbx&(x&=Egdp#^) zz4L-_af&ix*ENX^DbyWL)thkY=P>k70l3s7%Y?mh=n{2d29Pq+rj(RF_BU#Kf1NE? zN!_g_AK-hY{!l3g#``dI1R%4()8vF4laX~ybFyXq=I!G~-Z+K0I3piJOo8`ph#~p{ zoHBrR&T{*#aqPg{5sll{X)|YM1fs(=@xZ>I$8JtfTe*7%G4W|Drw|qP?8hI$ zOQGt{rpk^~I)O(6JO{D^9tHa{(%qM&H)GgS6}~9p{pN=8sPv6H%6voB@e~7OX>{Yy z*Kzp+QYG4|g6sEbK55@~B`U9At$PjK}Z}?6lMm@Ps`MLZz#$!>3`v zaHsG}|0O{zOCfNkR`8}D;W-v2(1OspCE&QZgs&k}!hB@)2=+9}_$rABM8(hX- zEz>s47@6IwU3U3k;XvP*sN|9ZDKNre9C!mJf`ALGFRd4J1WwEen3>Mr#)LD^H?HJZ zaU*6NU=PHs!>3^tZ{69AYxTL+AtuZ_w6qS}PsQ}AmY!knBl`fv&=^&q;Vagd-Kh0B zCjpLF4?1nkC^Vw)7oLylIe|>~i?;87Itii*)AW6j4t!r>p;JErP&D8TVDdrra1TwO z?jmBbO0t2BA~~t0jC!3C;+8DNl8^`muqs2;%zL8Ipm?o3B2}fZx%YyoMre*l Y-S-&#abhCHY1|_J04a&e8UO$Q literal 0 HcmV?d00001 diff --git a/mmpretrain/datasets/__pycache__/imagenet.cpython-310.pyc b/mmpretrain/datasets/__pycache__/imagenet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5f96364e356db37c6110736501fad5e87af5a48 GIT binary patch literal 7370 zcmeHMTW{RP6(+e{l6#Y{P3t0Qr%oGr9c5)XN$sePQQ4La71lb56f{_%lE7=7uJfd)td$VL0$-_W-{^hfZiNT2c-Xbw*lWf^3 zcgj3pvK2?|s5~#*np5diAYV2WyXw?BHJ(@Py3^=1M1fXzXC4bT1XD9B4+OJ98~021 z#m9AM}FWr*NnjM zY0$&TicdZH@NLLYp(C0?M>54u$&@;>S)yf2>?jpE`IXJ1rb1Oyg;%2*RcWO!okCSr zvr4P9a!Tk|9}0cZteN!(jZV#M&^lG2R5EAGBM;_TCJ@6XOAYPC-MB_tbYIe<9{^&_^Zqx z>vphYslo0Ub|%l1yoa0X;35B6We@88JWLBuXk0aJ z29J@i!_&x=BTZSBOs#WTT@(ie#3y&o=PVW3dVRt#(*e^}9v@u&{P`yAJw4)niyzct z(&lN4gQrbP9U2&!OHNvV@9tKxHSGpzSFjMTjn{IC4gAGyB#b&b^SpqZ@dMUe0jflm$Jk zn9F0|X|B`hIdU3=KaI1I(`eh%`B*q>GSBoh1yQ+L5Y9}i7l3x}aQQKY#xc=y2R!f| zE^?|?YM&+7^#TJAH-g%dp%L6Zyf?0z<@P;t1_hWSUUDpgD63FKunxmH9}a{{?17f( z-6n<~2geCZ7rIQ&+xA?Vo9FD=yPF0sHMtesri|RB<4up51jK3rCFEtWqUJgY!|YLA=^uoaxQ zexem23+FE`Ed21oA?d*q~%Y`_r(vzP%@Rf5_>HWccsU|juvXW zB~yJY0=U*f#jIQs!jf5q7O#hrD~6?=QYiE9cLRC190Jzlqt!o^9+!41P^N^H$0Fmk zg*s_R4%GX?9nG8xVU$n~%Vy&fIaGcx{s!j~0FxeRM^(BNP}hgJ;sYoz^ZgN(z(pb% zB6bXuU9v$1@+rNhha| zlsLXd3b&#kDK_UcQAJ0O!@-}3@S(?eQD?y8guQd~c(%^xk+GOXw*_PRS$t~^$Ltm8 z_H%gs^N`G_vZRSs@u(<2(@L5sixSTrl{D!jlphy0NrE!`$zMq2>eKCGX{c?bsznM! zmx$hgu+f(*V_;q}qR@izEa_*5E*J7Oq;TS?nz(QCl7m_ZmFvWephTnD`)QKGed zd|%wFG(~RCro?7phDhG<%rUo1oV2hI9hbp=hT(sOvfzqVOY$?hw0$B|C2!L#b9b3a z+RY=ll&;}WU5~1|?s(?N#(Z7Ze=#!bq(;?s)9dLvBQOgC_9g&1U0z>WXiVi z8Ovlmyjcm`;BqiD6kEWWMzX#=YTGaju5p~qVGb2y7+8>=`ez~01X+?-HBl;M%k&g- z&m>9uTeT#8Daq0oN_pn#w_oP{_VL^XjDe5;0VLBrw^B-ea-|GBRiR~IxMg6tkfUnC zaLd4Of$;*{tpV$;bCz2-8^Cgb@dBrV+GDa|4dzJCa}g8y1gc++@eTF)>4Ye6q4mZU4y^nZ`6(rvo;#Zd74Ba#v?Te- zLu}Z=E%J^{VfXNX{2}dG{V@To$$<#=>voef1Ho&8fnv0TKAno5gI-wtQ4h8WMHK*u z^*!76HvNQm7tOK1??GG%w9Ss-oV@V3TqvI)i*e-mKQTwPmvR|A6*7Av+{&xCE&of1 z4*G;1Bar`D2I2q%wBiGwoZ0X^8%R$ZAOcVT8{&ZrpkoY}VY5Zv_bK76W7t4|2mx@& zxeTszky3KZkXQc)s18c-?=hX?{(m0Xfp&-T9jN;{C{IxoR4j+{1=jP%!dn;4UA#E4 z*QU7dekKDW&*KiA&-WrukUk0f2Bcu&eJH0)xKlE~gZ=Z6$ne0qht?9{cSQKF0xE*;RoZR72Tc4Xe9w#m*T*6*xx)IEMg@dn4jbRnfCSqo@U*GNxI3M10G`t(39(BYNor7 zeFIFxzJkQAA_pC_poa|yIGy9SLVgd>HowKmF325Y zj3C2@YYt+2aKRH;4sDUEPz20`VdH`l92+2F=XY0FWAp&cQ3k}G16t$*7AUyQ6>PDS zDB^ce#E($Kt2dX{)|S^Y^THiH*cR?q_`n4Px-r&yz91<^$sh``V%xhEMm0S|+!S~X zP%$tzPLa;=8tk1E#&8JQY(ys@+^(e2mYU0qd5rxC<>!P3xe$99lka1KNSl2Rl1PP{ zBQ$CZi+_OShZ0nJ2c#6^u^gKh+rCfka2*#*`&wi4zbmQbEgX=_2h?%&9ITi}Z0*oD}O)J$^-{ LbX-)z7sS5~G^S^>(!?nR)4Tv(o*TxMt8ZF|`H zFg;^nS7Qm?66GZ%FDOcKcM?c>;VpkfUh|ZfdAvOFFCZ?Ys%QMM%ZK10nPuA3GgZ}9 zzpAdPCaTwM0@t6v{zrf1TZH@@CyQ4JCO?5Y-he?6L46X?E)7_hQJgd12+Xbt&xSDl zQebs0N~HO){D}ONh?21G5n)N|z&N1YiYRZ9cI633B5js62QqQ`o-dspx3?=pku`iT zhF2;I^!ql=DjVz9*S9vWZ#|*NM6;!fZY($41jeL?f)=l8F^R9igCt!lNS6uPHL9#? zR84VQl%xsjn4%1KMVkG}??|7Ds<7|XyCtzCYWGOj5_Qpl-?BI+n(zxWCh1l|^%I$O zIoujXqw$|AgyfIBP=@y-#V-f$K=NxcQJ%cz`W@TmaDC!SP?wA7cIZd0;K@*O4=H1g zw(9vX3gif0X@=G?Nk;M7>S_@9?oP*(o!)r3+mT}28Sbu*RCGi3l6ZAP%F)MeJoKV) zHCJZ7@-PYfmKL^rIs7OFo%tm{f|cXBdicL(3$5ADIWRpMoRX* zJMF_%M-~CPBU(%~ytcW%@$*fD=&*pPdL6uSSk~1bUYk+PZ0E~yqS|ZxIs^)0!G)*5 z9ClN3B<^)=`92UI}PeG?%0y=)8vC?|!#k_F9k$TSif^fijVeP3?ruwgkVU)NS zWgL?OAGyhJMUPxbA3ofISc_)1qx9|V8Fbs*ox_&AE`%4fICX0Gwu|22zkonid{^$> zj+Ef=ZXC!EVDUn}*7MysUfW*CLED8VH}AN?$d?P=yl~+{?t~x!;50U!!gC-j7)2_9 zErHo&W(%+^KJyRx&e-!IVW1ZaVht{M{}7=U;tqE-ozv?N_<8M~`EEMj!lhSpI*11y zG3N_6&jTHGUX%rLaut_Pomza4YrL9krrJgxs7K92mt7qPI{i#- zZo2Y|fG@1{I?fqxZwvaFAD#Z+hj&)eSs*hY%Rc?!b=fbg;?~f`HY@3bkS*yqd7jTI z)6Cq%CUAOL@PPLc-0>9{Qs}U5k+^mRn!%KP4vpZK(CmFi6OvM4iqd_S5{ITZvcxzb zH=z4UX=th7m51gd@@r#Ko!FBlkXm|Z!B|eKkJzM^E(vp&scWg7mTpnCk(y~K zwa*cVfzpr3#Do~KPLhX}grzn7a*~Mhef9|A;^v3ITLIn#c+0^1n*mA7niodoNiC#H zjRgsosOH?MrR7^71>+8N_&u!61g@}jjwIE57(zpr-rwSHAtb@f4 z#(r0!pPyi~>J6lD%c^g~fb_bjg*Gay3=#)U6w+5s{YiZXelj~R!Hy^Hs1M*px0VNL zsB3h$UVP4{3WKO^D+7M?Hpxuv1T#}>p`-s@6k5Fzc_Ew_U{zbMulvNzaz`UP>KVQa z+sD*5k+kG{p^UpW5MoP2g_QDJKJJ^Epr& z>~4*jbpQS7-hPQ5hxR%8217ybbM!_^9+F37LLpM=Niu;v4Y9=b|D}(km}@wCuV`gC zTpz<>7W&|#^Mp*a0?l_aR$+c{)#r$TipFpNOw>3TavfEj8J`AmRw!T2?}-@Gc5xDh z2Dfn9TuSi*`@H--imJV!hlkC%n4dTDu%~JT42PtyLo6?KxM%d&qpocr5RNW>2*e+O zGsYrUDT52>{x=HX4^th29O0fpegY?u^ef(YhU^2Fbh0J^g<8#K?f0)6bq&1#Zj#th#mo)v=b`_Ez z9LEN*t95DCX()SVUPL=e4lo_=DKtE*?LZmTj)fRf=+f+1P9arco|e3TA{J+@Vpchq U18@OzYm5LjX$`ilWxPlK3o+VSg8%>k literal 0 HcmV?d00001 diff --git a/mmpretrain/datasets/__pycache__/mnist.cpython-310.pyc b/mmpretrain/datasets/__pycache__/mnist.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58abc1a4ece6f99f441585259f25925d961da9f6 GIT binary patch literal 7956 zcmeHM+jAS&dEXlr3lO}Bq%PFWYdMYuM+5h_dV#n=FXZq49 zIOpuSU%vC*ejmf}@w|fHXMg;?_lK7iS1%C3W{#31BJZK+h6KJ2{7iIfPY?9k-iofg`>;#*BX4Eg`X4rW)%U@w9 z`S;M1-+I+E*(rAVS*HGd*`Br;>R zy0IU2oS>*C*`5ftLcdcqlEOQo>jZB)?gQRoiOHW}Y?Ms3c--*(fH#`Mo*a#Dbe(t; z^R0K^eJ5hdw1=v`|FL{ zx7P3e=)Di#TEBZgIq|KgqMn>*;p0vaI;`PzSc5-_h2zEzN4T5*Bc7ZMyS&qAJKIg( zaKm<2@F?O;o;1mcb|dO}p8v$9n;-p+4=d4cQQ%5lWlCLRYF%eq-C#O1S{lnRv!%7P zdWPN{^&HE~y2-|P)>rFRu8e78?tC$-;Cq#wS|Rkr_(6~-0gKrpuhru!kP>14qk2H#zw;)(!G?=+xV?-`jPFo zy8&f-P*6}-+Z154vJc=)s4h}?1-r=6p z3t~GCM?(+$+dRfxFSIW+-;IxL`7XL_>J#mhpeV@;g57C%chh#FV;i|n%v+)GIo{%M zUTJiIh)%E;cKG4;BN?zSH^VRhy*}LJAdaAs2Yp1X(cOK|?tr?f^d7ErT{cF|EI8k(MPpb31=61|Wt$t#AE460TU1V;PJC3vJH5sce)tXJlo2yH;tJTr$z-jV8&aNDp z&6caYQeNhKd5Jqq<*Q4pPI=XHn=Y&H)urm<^SQWEe()VzW3!dTRd=aUTU}nRIE$>j zxac|6YS~*|zFPIF!^wYoXYMYpRw|2(Ejyl5Ua3@EUMsUo^Qu=~q*arwq#2JACCNwv ztgYWmatA_?WF$8y)`-gzYxJnem>dPY1~r!Ch-({=R_>A&9R0{%T1aC^(I?25%AmP+ ztbsPL9x2hx+pvZM>vPz;frdQuMdqpcGZno`Lm5EtJ;jtGn8svXdL+c6Da% zsj4ziqxnH5nvV^Zm2(Wt$;9SUWh)DtW)1X#8e4-5%YCX2OqTymW{~}z`b*4Y3iMWC zOpqqx%yiFrX2RhNMxz+;O=io zu9Yf764URrY{;7Z*bf3)vj+=M>Q~d=hvR9COr4)1GC936+C9`B9-JTE-l!o7lVyENsBnyG+ z;DAZtgI*{0+x)H&p%CzEzzWbzTqITl!~M9?NDR^~#e8B!Jn+)p=jp*Jwy~Yc#}V%y~Y)K(|?FlW~;vI$9jw{!m+sdCXZvifFQx%d1nW z@xs&!x~1k-3%QA;X_~63_^QTpEt~(h{)AN1rO{PfGX0|;v8Rc28!5Voo`JFj7M)Xe z)L0#WRhj;&wxh+`uD+*;+CbSg_LQg=X9gM=O@pG!f^k*JxNx4pxXP9#E@ND7Pf?Z5 z6#7lH<+sK#COcXkWh-TLtX!ax!e8-bdTyj^5Gno!Zgk6Y(^d zduE_BGUrH7)u-B1b;pQj4|)p6dNM6#$Ba)7j6vq9c1QX6%tzWzcE^fOZJi!aeuj#bo*m>!*dE3M6GAiD|K2RWZI?f|>)3r+WP>@G^9Fy9E!$Ww( zUTf1%)RG|P6Qm45oD7S%yBuz(?zdZs)ruREe8qFAERh-|xr0AE=|^xR$)^>e3pX+u zZ@8P>eE^?Ry1;q(Jwve;8QS3ol#y6Mx zy>@pyF`X`aV^++I1zLi*g^rW@^*QOw%N-36CQ5Q6%0vJ~>9gGBBQM zI~iyf7{TZz7%J_)@^FmlkUXRR(rtQj$ztiVlXar|K@9b4WHhO?)WlLUvN7lSCrU6Mu&66QpPY8PTU8 z4U+Z3__{wiq|WFVi3!Ws6P;re0+3R4BuSxDC@|t0aKoJPx$=e9Qo%_CoAv(dq)p{@ zZNaYsRDiuj0YuGu*$ctut6OHq^eON?g7w!d+sBrmsU&=$YOaUvqk zv<7LIO0RR+4Kk9WB3-Msby>bwoE10GmgF1cANF7l8cCticnH-#Qbi4DDHmRWB=;fb z5&;3=@2=l^Ywg~9i4H&}h%ll=9XM(jNT)!2K#jzglK6rMDE^Yhj1N5ymj}Vn>qswG zr6KaUrcs(fifD67F*IY!)J@g++B7M*3=Mi<^aq~;Yz`meYg5<$s2SRSXc_I_HFM&* zb;-JH70%`at>+>g`_ZXyB1PAc{ZCOnE8x+`lfpeG+62y(w0Bl<*^Ob#ylhJ0_ctJN z-w7ZZA%YN`0u&2gy2iYO<3QsQ1>Q*k@(_{#KQ6Wpe*af-@ehZ%_!v8sqU!u(-i&;J zU*4N{0ULx9%vVZI+vy`L_c)SK7eP{!*fqA~HLvm&ccsRbS1WAEtul{$tK6;CnqHZS zlXBSNvRkXH)>fHUsbN65(ya2DQ+6D-%sj8j#c4UL$-O1+I?d{0*(V-w)m1T<4r^A+W!Ga4tGbmHe$}ZgI!m=O=e4TKs+F=hhhb|#K{xRgA5r!(Wq(B( z3A#AcG8AzW4ayD;-Jv!oQT}_RXcn1N4--e#L%|R{sDQ@leZ=lW%cPX{D7W3@3B7Y4vD zzbfo#18sMFPx*!NAux>rC+evV3}sc6=Jgsa*3-@+aO^XTvfCQd0k z27S*eJMaOc?^5VYAPycoIIC zdWY7{;-*udC&I_)UbR7Lewpy7t3T5N*CzJk-}R76#a~O~HoLF(eav#Xbx`%eB%*;2U!F z?6A4T-8hYuC0PO`ZKvDUZ}^Le<}XrV356vTJQO@xsG(3Rn)?=bW}ld6pHR(yfie=P zeX`wa67TGjyReUkOOek$v6*;8*dR38xtgWRE1W~8{}uM;>R>fHDz43!dd zMd?>0)QLJ({1<_q1`4Cu@Skc}ATdMFL&Qh?6qS1bd4?Ny zw~MEe?4}braV(Mx|K-vM!cinU$@Cz@HSuFw67VLjNd#3{NjD)hRb5Q79$?5}VP#1? z#CSnU192kon7n?H5l*MYlRQPxoA{>#AYTT!w#%UsM99*P#t4~3^!|zQ$mfrtK0{it zOAD%Ak^$=XUBEh)Xz_NUwi6ZVsOtzswNje?jD{0o#V5%A5h;2dS>Bq^CV-*RKkIoO zv77TRrVy>MfUsr}y*Z_x$KOR&&O62u#4W@Uw`nm_-!WK%l+&*!jt6O=G>l_Pr)}en*Bt{T+T3_iYT|c8<^~pC>xVVSre@2Q( zmi&VlwCf^BhNhQipz+#!v_>3j(SVJ10{IgD9Ly+mhBxW)6dw zCBa*?#kQcZVp3=#gw+_rFF|ja#+hQYfTQ7<{N~P-G{)nWohF#1K0nHWXyc?npZ;O}t;38QGSb zoRWL5gYDB_bTMufY5!A(!Xk2%~M$ibBUN^eUpb3}DlP~s1k&%yhPefD0{1+r` zM{h(l%%lux!}@6;ZuFwGf4Z~7k~9%{j1=e3 z)&pJ9u^tPN-az&_dI ztxxfI>;K$UK_b}Z`M+GvtFx;Hx~Kvu;aSdXXrnfF*zN+A2$;ggtc+|UkbTo;X-1^u z5nkL!g+aWWHkOI0nh~`PruK(KAzR;hNQ7wVl@7ZnLXHKYnMv6J_*8hd^;KzV*fh|i zpVcF=KFSLrbH+&d>@n#D(-xLma*?w{k2*P-f|n(AA7+lVWj!Q8n&*C z0;Od#uvz}MEud?J^7`T61E}3ilTZ$r5a1v3zKkSfMX*_>TeLjL7=(s19rGv>>OFf< z$0{0>06QN_P#_jO(JYS$Z_mm_B+w|x+?Ezf@OVg;5+)qPchC&fhhG`VQ za7+}imgiQ8UvoCmrs!oLhPk91cqH1Z#l?d3= z-F+6SG-ff^C@jlVih5|Eh+7UXA)cHPN*n^2jSDO)))4rS8kHEH$xK8NNNtdeRg!&@ zyC9phS%p=C;P|9~&PrGeWoGZA=KJ%fx5+yeNL~?8fS#~yh|R6AMF5u9|qUHQGTpIgl(B&!AK#M3K|3+PmHviScmN;OsBCx8L$}?>TxFZh*)7~0ih%sGq3_@=<`9p-Oxn;AAp$CVPDvVotR#Y}$iX3#R%|UsPiol#aj6-Y8U>6*7=0=; zOQce=BxMzInxA1oNhH@BgPaDI<}lv2_$~_50_ZWn@E}5HL55ZYDooi6tC4jeVi{;_ zml47oz+n|CyEw7qbNq!;YQOv{z);vo_aSsd!g8gwng6M#1)&e_28LGl6V?F>b zWwzYh-rg>_G~L=~dOFuF>^lp(m5II#$1LG7T$1y=DVX*h#7R1CFeFRui6J(%8CZx; zWdtqgp^?dDjCQX!su5TUh%lH3Xq)6T0lPhF*8tWs%j)1)O+2uFy zeGha>GY{~^u;O{dPS2aHdLCqc6w&pX=RJosPM<7$UXb=NTt`!&$h4x`*mz5)t2EZc zX$3!>Uo>i8-?3u{n=6y18E05q*23h=BEyan1Hh}sifTVu>p==FO3*V!A3~GQ^BL%S k@(``bl50Xvk^jlc_xX$;=p#$h2j7~zR$FtM^{?E20atbgIsgCw literal 0 HcmV?d00001 diff --git a/mmpretrain/datasets/__pycache__/multi_task.cpython-310.pyc b/mmpretrain/datasets/__pycache__/multi_task.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a598ff9344314768e4b790724426bfb49bc4c21d GIT binary patch literal 10726 zcmds7&5zthb|=~FZuZA~NE%DlT3*+Vos2q>x_7+}0!%Ey1AQ2F)?>jEB49QKo72Ua z;fURA71@%ewc$Rf5=ic z$R(U1yIA$A`0CZ~y?XDF!O~J)!|`{&{inUlf2?W$PB+EF#LWjd!@tIbYh3rW9rdgC zjh&KCWy3f7<(;xDm;6eN02JC?lH{n}0qG^StgH+CAbuIxAaOFK&_S9sN5?yu~u zpltEl6K!Xe*LefKr`(m3(@(VzHQwY)Pc**t*m$h(oZMu2 zgGa}uFcQK*x40C9!?^A~8ae|$3SEIGnnUpYwI){3{_Ne32!g27?+=9=3C9~0i`%m@T=*$bWB@M}FTjf5xJQ6hF_u zgO=0$4SoT?XZV}^yZBw>Z*hj-*SGU9;Y(?2SMZzgc*U9 zc?0S$N85G%;2}M*??jvG0qr*H1>C*7>j%Aio15$~3=ERFAyDHD%`A-*lgbmBd{_3gd($&VXwb$ zlY)S9-Dw{V_t%Rf5y^gJ`wpbCIontESNx*)_hN|33FKI}ON-U% zA0<%lc7Z(~5GZ0&xG<|;6o?}UBcWAvLZzx?LQ zpWiRt3kG}MK8pgjJMw&KW{rqinaFa1CZdK=a`l zfnc!~7YJUUez~Q^IwiXWyp%-Z|9|x_U%v&4^gmNNsUZaw=rS*4@a&*i_eY*^IYW%l zgU1Kxl9LlMM=VmdR1Y()%h>2ntK9FaO%kbFVjl)G#cFDh*hK^{txah_a?_;TQsTMI zK8mCc?z+tNFk`_GH3a@B%4b4j5-o$9oXtg6q-I2V8Pb|{3sXwHRd$iA>m?Q>QC-50 z`v_~rhzKgLJYsmvlA-V*7{S2pPV?YME6pB)O=-On3qh`Q>Zo+PxsOb~PQgxQqM_-` zh@#D|yL%2I1-Lr_+YSb9zA)KhNm0(tf=qZKoju7F(SuRp_Q<26>1>2k&n8$Yo5}*! zMR{qqss!5Lt(4QamhsT5BFaw>DrXs! zL);)^QhJO`gN^eLiGYzVPDh3;>p!FV(xAkVKe0;_?ZgDN{#buX^<}!Bn6g}%R8Uim ztO>$=lxk?JV~p~|c#5&?23L6uK^cx#Q>H=0Ig|zEM&f2M;}h5I!@wIzqa!GlAzr7; zcj!{4%XwVlWj}DZ^gv|5nh1oz(W=H~==yuA?^)_0pCm`2zG^5+>*RTp*s^VJ;6=9m zH>eMZ&AL@Dt?CtH)o9{c*Vpv%`B{gto%ZTcx+OhIC}c8j<7T3BeWZyhBmy}^I>}Hh zO=Mo`&^Uo)4?*+PI5sAf+%TMfL1OD;2mxUpO+~uCi4`tb$)VfxUrB%cihfKzJ=0&s&5A^uh9P>iiTm)XtsMne|Esd zM42R}W>H{{B(sRMoUQGasQ^f+Iq?QAaZLi8gxe~Kx2T%(ggXtYcDc-}%;xndx%u}v zLrMW!D;3?+O?}lc_3<0?pv?3qZa|STqft>yK8IDp>opqt4{=fX$3x?(4m2vN*9?_)2V+vru`M%x_OXiE1Y{{V|kd=!u}?beWd3%n}N0(SG0881SP zSs3LriQ*Z!(Q`N1Ek}e<4tngAACj#pRwg!k!SE=yxa$s8>606Gwm#av`Dt7k4PjZ` zRw*v^{7|ezHbPpH8OyP3Q_h;60t>Pi`~wM+^7YLIEm7StjqzLaOElG|M=OvX)VG0C zuE=jtkShYoAD3X4OHt{u5w1lh*P}97?w=#K{}q0}(0OT6dZ3F7QH3Ls`h`9zom6?* z(_mN3Nr{)vYsY1bV9F8R`xu#hi~!1?YhNL64|;=FVg3IcG=`+P2^tGDXw7KM6ALz- zP@tXE66;^z*N!VugKYnG?elm3(m1XjTjTnqa?%9V(!_cyadTI@f7jHa<)feI+HpyVOnqM$>YbccNCY>0)(r439u zJ4Ih7XE1j$#y9Uk!}6Ap(T)hb%d|;euOfoX?`F!^zJw4yV*BuLpXuKd-v!FX?@f=V z`t@n2IB*`QG`Wffh{wbOZ%d6(taM2QNCT$5^8$-6V`Y2%F7Z$p$PxQUgAOA0V*baM zsgH^>Snt5?-D7)>=ZB2r?Z{vsiwP?rJykDq`)vG9>YLAsNJby75P{fatyTD1coSyq8_!}{MOt7CPS|k;)*kb z1JC0X$(?dzNObWs62Gn>qp8Mkel{9JUSEyfs;kX{oT8mmskrUn0l3Bsinx(aDU=Hn zFA3*i?j*@|j&(1#(&|`uXE&NRG5EjX46ou+uj8$iUeW8%EyL2AIG0PIS@{Wn3+s=5zD8^;6@yX)dsP(_Y)$PDK8lw0j?;K)zevU|zw(3W65AB2p=FnJyi=kc*T0Lw|-cv<>2%#pj=(umkLx6*{ZFXgeJ-qiVAm&>H zKQ};6xlgIZ?snm2v{Ub++s)Ilw9n-0rHm*g5>f+zBnjvPjhh^Ej)v&@u|W(xFBj4WsB z;JXwpoh+n&8I$$;&OZ4=ip>|ODs%QQmCQ~qSn90p)zOxSnFRc5ra;=wmvTMLD#337 zyHrg_OBaL*7+4ujZY@}ghE0JAnRna%qij7I;|mMcBO{zbALWiHUn9}?pD555Du=+| zIqeubFILESs?iR;FSY#;F$dy_3y2&|j_+^|OVp+!NYo&IbXd-6w8M&|{-LBsHtDcZ zXshxnq8C^X^s|s8Qx6O-Zz)pGv^Xtl%IVg0er8~04nmo!Zk zzfe7T){hb4py$#gGGBRuX!pAIm}Xevt0#>~MI!Lj*t-SqQfV9Vr(SOs zaHNK>sB*~3ZnB&5!;uJDhFkcpxsr{o>uR52@Cxg}rsQD>^9DD4VQgr>gN^U0or;_`&;8aWeari5t^iRz*k^K`}=UJ;abm{ zi~9Q%TOc_zym~7|=$baZya1t;*|qKV`!s7myoy5FGuG>^Cj4%@=R09&+mzZH;FY6o ziyOp*Oa{J3r98PP~>UtCRYsfP$*l*KOTIKESBr7ax z7_4y0XwyC9Nrm7q19+A8>O;B6i8sqN&H$`)XmXl0t=)f<_pHKg98awMcE&-Iuv^ zVGGhp25cXD3;F=0WBy2g1g|~$FBE9gA>}qU5CStC&XSyOz8P-0wiY57Z~p#cJfsNy zX%@GK3ybF<^$(yp;y6Q*nMsCo5)o`{Cv$Qd(cFz(yQZ0!`;iZ}%e^ef8&Lxz;V;4^ z`WA7Y2j_?fqA_>oIBIgdk9v)3xE<~Fu%^3dt$IY?ON3(MG!yJF9{nf^uG<-WVPje5 z)(?Aw;fsS8`(GdIUt>58j^ftNsO4 zf@XM%j)1o=y21)y;yJ!TihvKYpOGmUqF2vmj-m^9>MZfhnF5|JT6hM_l5oIvMN%}Q z;P<*{O1VEkoC34KZv=deF0$TmfG*Glz93_VH+kzkP(ELq;~|=3ID-dhiWk9@E-^;O zA59JC?aBIX8FQP^hT*oagEUWA6|2`eOt~mjI!=YucvE_iiZqGppa#DvMnY31PEDog zfjkk~{U*+8n6mg#WI9L`v(_DU2wczrxero*2`WKT^r}6>muP`5@g?Da@>k&McjqBm zlAp;j0i0w)Hv!j#_NyX+A~%l8w6G{4IO#vpIDPBr)}3AnZ6Z>P(^KtLc7BWg+T4<5 zskZVQJW9rCaofD9YrR!uKgp;4>P&|N!^)`4tL@JW;sBI~2V!DJ zASA>OFvTt0!ENl3@Sm1T$>hO@vfb;3mt?n{wCA?2AgPgA!J;rC$4fxm?Gjca|iL&%#HY{U=2)4^bs!#xy84)6heNk2$G1Rs;9e1)_{IEbJjB9yt_<<8E|{N``k z>9__wfBpAAMsCM2zQo1yQ-{T`;Fa&gzzjwrBPM-9aBfCsTQN(h`i`F4Q8RY?E-|=$*?MBUW3UEu9vaNy&PDBl^xLd?V07KD;0B{h3hUias;*hw z7<1*1!if8WV0gk4R;)#^+eA8>}(%yAM73;d_~H`tzQR{?*s})1^c(}pN&$% z_V)G;e-GT=-ag!Y=l*+py9YzOZ23V&@say5aHCI{(Ki|C*O8MG`k)GHJ~a9cb9s~7FB&70wOIS1)ptKOETi87)z*s6iX4-_!)=F0oR(i- zLGdWaCut?Q=eiW0gGrX9LQw^zA3TeezICKjCO3P%)6-K=q=QsRFFo^up*K0{{qezg z++%=8F9ZI)tq=Ug9eBm|NO3_~dYVLOz`|rqgM?AWhiRM%E@hY|clG_q12Cm56nv;s zaZaTYlcAajPD4opNk?fErKhrzeMwwjtM)Ahn{;!t)Ds1HnqC8d8(%#7?2AW#fMV4Lp(8({M^E=!!RuKa&tdE=K?EKv0Y zcHc29MPnERQgT^1b8{C>L{Ym)QP-|8tm*Pm4i5Zlz}tdX{sjg`m^n6QR&M6j14G{Y zt-)%!^(j=!+=Q|IxPC!CBCu=th9bExE}!7a%%KRKn4+5-$L3`Xi_H^rR!*V#Ed_BZlpf|VLe(!N093Jc?K`XXk6GN|K; zRCK^;n!r1v)7D)&m?#Rhm~MPcHV&bv-rz|vfU*q9b{uh)CPiy0Cr;aQkT7*koq3ik z>@t6Mt-=9vUBN{gWoT(EA&5ueSjm!bqY)JXr!5I28Dfc8bB&6Y^|ddH<9b}ID8+5&q>kreQjgk7q{Tc z*FW!JUhFMW%kmV(g;FScsctU9FjN^=UUn{Cg0pYJD}M@uW4h!9vA(g*HPeCFnrXrR zwrP^`XZ_2pyZ@ZtTFD09T#F$#H1h|0G;C1bhF2cJpbW4-xgaw#HfCl{c8ovVnbq>z zrTN5o-?}JQGw48tc{q zV-MTD%GbQ@5g^FXo1B z8)FKQ_5H&2F(xMv^?uRz{SPNWRP8i;pQS?x987wJ^Sj-{?Y;fq+%Ia-W9Wbo9&89i zL%fD0ohsIbJc_D#E2aJsl29BlpmpxV{NNZ6H+OEE&I>QC>7D&Wpik|l~nHW9l>Xf89u*tAslc`4B940>45F-!_g_xqyqByjv6*|;r zZiYjz>6p)@ZR)XxX2}|?IkxU13L0&)7Hi%`WB)lCW7?uy&pP4ObL60~4N-Jd^E6LV zCV*Pp&~g2;;8yBDl2H!zGpGAA@kCAd8`z{Zv2Nmgd%#BjUDT7zhJZ{V(c zuY%~6WhE0`f1)XoI+4?Bq&H6&OoGR8Meb`d4@tG`9H4a!zli?FizOW7LPr68@sNmF zIwgr9X`TpigWtxHn|=pX$*%M9;K7sgMAw6y$SLFG3A;G2I3=(*D_98$%u2Fn;P>Ji zP5IvntjsbY@X|j_20go;%d~ z(V4ry;P0`10ige#foid*l9d8VD7p^UK)kI*M~iI`FHqN3+egQTQUB{ve=s~c84Q&x zW;Nr=k0NLx8AV-BIfCWm{{v9oBKa?{YDSTXI*Rx$fd2EtT^;joQ9!?&cHC5#YneEFvlMYKwR?A!XdPPX!o`=j2`;PVq@!#(D<-g9(2b9gu=b3?KwNIjQyayr9632!$mH%jw Vp2zw%7rHra;kMPrZS2`Q_+N>Dyxsr+ literal 0 HcmV?d00001 diff --git a/mmpretrain/datasets/__pycache__/stanfordcars.cpython-310.pyc b/mmpretrain/datasets/__pycache__/stanfordcars.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cdd7826a5d687dcbc1dee7d17fa4a6a25d7a5a20 GIT binary patch literal 4612 zcmcgwUvC@75#PN#9*;*#ab+jAS~$2mKolAR88=3g3btFtQk@v70!Tt12n6R7cS)ag zf0*5)WD&=runhR2Nc-Z40*L_$=u5sxKg2!sA$`gh=tJ5-o!PsiNXvb25On2R?#<55 z&dknlcFytKoK2wp_4~i~uD?#mKk;R{Rp8|}@W=J&=1Y}qm5aOZ)0wij-Ns>HpX-}Sw~Ip90ePq;G>@xITvbdrPEIq(i)5IeExdyzll zN~=<)QWs?T9O9bY=0bb4_R#0Ro;NnvB|u44PM)ho`*O71)5zk=CC-zpW6`pvVizIpaX_~)Qk z{BU~a@yz_Y&~CJL2mPtF=Q)1yQjTZE9<5euYIRA)Lvg(*ie)SGl3e3hysGljO@!x1 zQ}WMi_v7k6jn5sabVHta%C9(AwwYruJU4X&B^&;}$O?A8 zD(8WpJPf=Y9^?o7a_M~E3-Tjug%d~S3Y5mG19k^o^Z*_%V28<8@~DWIRneC#lZ6Nd zSHyARyaO0&t|&~a_cL$-I$y{*GYH1LTdl0iQDB&Ye3>PsE`UVxmxTTgA50@k5mk z%SvSoZ;QX!v%4Lg^KOGEt4fm zb&TDD-}!+a0TC}cWJ>B}yC3F{cW>U9NI;vQHa1c%HTH?Tcpu7~)cBGxpg>=FUOA$VDa;a=q#D!q zv~eY&sj3jZKq@@D+OZB4>qHxy>di>a6DrZG!K4!Bz@Ksp@=(P!L zPSNJaRiPN4Acqi!s)Bnb{D z6%Si>AX!0CiE(+0tBku zxa$r@@HNe9uB+hWy5cfSeRs2s_e}fj9Hxh**W1$$Zw(H`Raklt9(f-+OS9<(YMh#y zrOnfMtw9ZHQbU8Mpla|7?>Ga$8pS{J)HrPz`55N^qgCvGk6tfR`&~HMR_TB)ejJny z-D<*f7asY0m`TVnMNFMgMi0UfRt4IoqE(L(`L$F>UdRPjYUHb~5J0{S*sd|_3vFzt+HpO#Pbej@4nCzo z^IECDrj6@k1GKA8YYlonJ*VAOrCqse{S|io6YkoyH?legRN`SFA;lc{yK_Ns0Hwdr zq4>_@xT2zIw18;^A{UAjSmfYmp1^N1p}y<1%<{v2zQF1yt}3sxN-y$4-fUzh6w)^b zhglWBIAE+IH#7G5q0IDvM_EO9QJKwGC)Y5~Z}OFU&(rbojtqrP|>WiP3vK78-x{HUZfgo@;$NhMNpdQyX2!G|3;q73v5+5RfLJq6BBd;o|iGnYT=$ zt5*@<5)J$_OGn2ys+>g1`CNO1I81ks^ zM?6pO7tfMsOYlB~gE$lhGB$SL$|-9&f$qRtu&fnxD_+Z&a8Veoww>}UW@)8qX5Gr& VatARbFF=Qp2C<~#$<*JV{{^O`afc(1+Md_sXx}ws6mxUH(X}jKEi2YDYUeXU?48 z@0^Y2=W7OBfBVlr`rj@X#y@c~dzE4GOSt79VPFO$ff14pA^2mPjd@WJ|K4^NrBP(!>VgMk|z!i4MHf(L*{nVXuN9JpN#RX;YeiXzW^P>UvB1ReS#bF}2M5fF7 zWR}~KGG748SR7KR#IUD^f>U2oPttxI1o6HcORWt~U)i=oS5|Vpv~HS8tBKQRPriEg zGh9(6Lcuxlu1wCufObV3D|#Igpt(XZZ}ohd8U0w$P31-KpV51skmEF%MvY7YKWDK_ z;{?_Af`ZvNWi-h$m3jJ0x~NdOMV+9F3RGB}vO`PV=Y4M&08YeX(n5jt120ShKBL6l zyLTzVCk-*nHQ!cotfYRZ;U-6#iQc$H6S`yhR*NLPJ~7Ga5qfHtT+I_r>KlWfcVfGaDaJ0`v}bD zQGiK`MVWem+Kz{VZCxcINKXnSTGD-AZPP`>up&$`Pl^JLR?l5bCF~6t?Q*)9_*g$6 zz9mfI2?JShk0E0@kpOC1!bNbneB(x*o~;(`#f<;78^pccl@*#Cf;`c*#3X*g13%() zmHuI3af}D%_tbTDnDEu5525xwKn>rAq<9~+g5dQ-tb%OPb*wJs0xmU|&tvltYG)ii zbg_6D%mt^9rL7wea=(!s2o)$^BT_C+>s5G=B{~EQI;yF#<96$v-z}~ zlC+ZAtp1gimi|uuLa;l8zNfJvP`J!O5F1^?z{ue+97=_u5QDcML`HMVv^!K3`ain# zf-G%8Be%dKuM4f91pE%L(dpEtg=2JK>ooq48e4G06zWA4&TU$m<3*8f9Yq}lsiGim z2+q#tv~2+t`~JZrv&y*rtnS2qDBC?lDz6_w&5a=nbhB^bUTS=Dv5mt9wU58k|JwXvqOr}#Ykh%!Q7E8Eb( z0IIc;Lva;0{RVFN6Brz`Miz+m(l#5W15XXpg8y~XB>B(!w^^?JYjh#c!PZ#r9`?@T z3oV6VbPsO%2nJ<<+sQFGA_L>dOv!!YllD<54+R*(#Sm!5pUrCq1O?1a%FKL9AUsMZ zc3Q?y0lh{bSd5(tgO>QjNo}C0sA_7Z6;}DeJaSU=q?S5QAyoFNsR_a2YzP}pe?i(A zai5#;`1~bY0zX&a+b3l?)5zlBd%V908kQad*HiG^;=oZqf* ztv%Ry|D&wzM~ok27UDu?dr1N~)~t$axVfBozK})c;nzoI;T!QIz33|*%B+?<5Ft?f z7~fkyb^z$keE1>0Y+#?QONonoy6{dSNsU;PQd?Kyl*uv@iwTH&ABcB0-D@)v&NcI!UvGx>q5jLt3Mn@$`A8d z)6~urw_%(30S?+1=mzl)3^ClY0fR{zU~kL~>!tZ}bZL?olY>sB23i?!of??Gj6g^m zUm9PV14xgQK-ik28;EQC03aKxSR$wnhS0-S55WzJ+PL#*jV`^?i&UFL!$+Y*W_4qB zcs7=5n!Stc4?qlwiBcg z39auKfKGR=n$Fu-=D%OnSh~@i&ul2ofZUpK=7b@S22jN{6zlxXE>FR+CHMf6NgN9N ziqGm@fX8vWCJ@CSIVb&~Rw=uXZ{eb_fIfG|z>`qPoIAOV?jk(uc&U>Gvu&$vLv7>zKLiT%~>eYMi_g*P1mx~&H|N6)OX}^C_(|$)EM}Ikd{4JiS$TW>>ob|Lu z`eqHLzV(K#-bVH|8>afsHFD~mZ{*e6YS`*s$lk?95#M@mu3u`D`sGHMX&sFlKVyvw zzs^mbdz&@pd5v3MmD}E_w#f^;_;#+bz~^}BZLM({qsqL3QH%VOyvk3Zc7}h7FYwc- zE%8Nu2BTi$OZ+9g&+@bU9Ny>nd42)!^L&|K#QOq&nP0+tncv`_;FmG;Vu7hYhu`E^ z_$z36nP2s)ZJoc$uf1(HE^+oitJU8lDXrD?r1W~&a(iELTU%bhlWND4&9>k3nj2|L z%}ffS-;9Rsw!f3ix4K?ytLX>Q>j+=&Cd)kB4tk->n{L3H-i{P*OEz86>iUnpNus;x96nR|HZ|TH9_Mrw~vjO$PIu_2AvkmjB){#xeHYV7GxFa8sg`FFH;7>GCoDmeC&3G@F%Jk(KnbcZ1BPpV?1Bbu#aA}(%ThFq}H8yYxMw3M{2P|rRSzm>(w zD`}m9lLzYg(2@UpbmX?St<@@S9a?!Wg9<0y*z{V`ait@>p3?zR1kShbzwXG;aUJCi z98$n~vFPCUjgb3o-{VeOgniZ0m2wc>URfFJ$`D9X4@GB%hplJ@o&o-Br90fHcV)kK zCGDoq8B7gDuPH+XK`Hq!!qRRlva}ewo$HYlwcCoLu&r3k7zTe+I?W1|ab0YV#$8XFg(rRA4Q+FOdT#^03iuC$}P`F}O@%HfHY#@2TCU6V* zt5$-uzTt!e>4$;a1D{`OJ3%O&wJ`8*C~hBo^^2dm_4ygA9-on-HOZu%s2dJ@+=0A= zbhdrj1s6xwdHAsI-1FM*uqPur6{20AQ5lG^LrCsi-v~qE0Nceu3MUkb6B3gj4SMb_ zj&nA6!xcm-lH3#GkQlLabg6YQ^wdggNfCa$3wZHFG^*SU5px9c=IdV2kzE1bp{)Yy z^V|b+!RK>d0FDY+rMc~Mz_UF)DvZj>P`bUudJOo0f~@hA_WOTz19=koC zJ0t`L)uHEH_jU$QQz+i$?~O0ta5@k&)oQh?fEv*wWU_?!Xb6l+tozdndNNE3+M&WQ zS4ttsWhWLzS8lJD7(JktfiOytgBI7QvxxN&L=7WAnT72HZr@96QVTk=WMK{8e0{1V ztJGE&KGc6I;>4cSDpOm#8}^3%ASryw6dE#YNZ}mcO>QKFOe;7zm{jv}tV7MKMzAK3w1yqUI4-pEQ zYCCyv$B!h!GvQHdZ6PrrFPt7kICn$=#orL!0&QFcScw5aCu|5}o^+E+E7gW3#0YI@ zSeW00$ixy&Gf66m)lyrJ1c40@4Vsadw3j5mfnb*6g)}}m0cXkLRB@VMZR#s-fk@Ow zVa}I;|29Petf()dEagh9!ff5vsongep0n|;uqxBlC@N|E0@MFbHkY-ePYcZsqxL z?)$kQm%g{z_Ker)d$Phy1XaXkt?d2_?0>4_a5BoL5j>!dLvvt84*XBcbA0LefkVN6 zh9)2B?vFV5Lz;)aipWfz{XJj6IfP=DoLaipRBs5X$jSP_;Z$lM3sBRYSb0=A6GZoXcdKlH}pHgRB2C7YO=p&ul6-`(=yXCf8bCb@wi z7f!Y+KCL(gw7~<~IDpg_T3`ESRuP_367f}xtL4QvX}C!WmDtmRRmq78vlHf>^h%|v z?8z(bV0QF56s0*69tb`|FY53w@JAK41W!UyIQ+|j&g_GdZtA5!*>CC}6btZA2cnOO zEdtQ#kLm^<@-6R^lbNR6ur+;cWYrNS)OSYD=x_rbQM~Ysi7gB~$z`GGM*s}5GkTY3 zQ7VE&K;ZSl1x0{fTE?VhM2Ii(hzgbX9A-aOMtYz6A}S<`zex6Z%6(__V#-sdJEMP} z(SH1KXcNmHML1=-GkP{7yZupQBSNpK=)HDmN1Ru%SbEA&6hD7RxGO|fVosmEI z4-8WY;t~_PAGPKY>0{c9DXXnu#DCKIO8O(zR$HVt)1QUPEW7(PlqyR!iy2fAG%-pz z_n7#UG$(ZNfsEs6WsJ*^%{)fyPXISZFE~`5bMwF2(j5NxZcb!3X$oe`BgBNdb<`u4`C z^5yg*5Ge`)%&4MVZZsSq!-1!*y>!IUegRseOgXV&|Ktk=UAa&=tS%D~E(@|oLAIk@ ziPC7%qCqy&41}Z@xj~;M>3(8=1tmFpu}V`YbQPqniG_ArAh8(F0df#~Zf z=q8CGz6x7D(BG`iYKLvMo`?TlV&VrYTFmW46H$gEX!aWl)A1N|aHilV^^D@s_oQGoQ zu|A6mtvHV>gdE*NDzz}yMsw412iA%v7O&YyPMRQ9os&W!b%4U7HrhnZN-?oK!#M?x zrKt?*D}|i%H02!>NZ-!{F;oy6T(1f!sf=ZQ|&&|-<}+~^r&7E_F^ak!1F7uQzes_M-_zWQM=$(AF6hVkS_ zKfmwEp$METY@OaMba5Rr2qQ{RcOCA{uFE6m8tLFQ2ew4lCGFmB7Ruh64N`X2Y7Iq1 zxj0CR&ajVc*?CA)A5sA4BJ)PH(or!Op?I41UjwLIQ~5SRp=$v7YiT4WD8x_(y(UPC zBLjViTP~oU7$7AEG~i7tl)0L{QQyTaCZ)1z^N*=^9K;1_=kM{TlPTuunlQaqO-t23 zy%w!t9O8m?JorDw+G@>QTSMuj)G1?Cx|LWV?(DHmL8c|fRJ?{FDIrioj$T(WSsI^J zvZgF+7jMzH`94I|@o~dJzQGUG#d3PxY&9ucZ#LECzj#Wd|B8w$R9vNkOf3xnDXL1{ z6U8M*To&Q4@;^$?Q{^K$9Ffgolw-fv5Bad?-JzH)x`Tp}#gl(kB%8(jqGgwhX3;L* WwXfRd^si{@mvlG}q@5ouoc=R>@x#mj literal 0 HcmV?d00001 diff --git a/mmpretrain/datasets/__pycache__/voc.cpython-310.pyc b/mmpretrain/datasets/__pycache__/voc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbbe1de022873b771de8014fe993abf47cf2370b GIT binary patch literal 6438 zcmc&&UvC@75x+Ygk4K84Wm%RjCv6Wx8k&hpIfjk4LTprx71yaP8|^?reuKD4+~4f+uk5DHQ%<_&-tO$~ z%+7CSb~c=zE@*iE{N+Cy|9V-|{z)I>PZl3P!5jS%1=E-wXf55)bggC7GCH*wf!WH| zvMsA-wQ{wbYRd$6D__gE3blgzHiM~Fu~t;|Y%tv_)k>&aEEk+=&D3U8-40H-W^1#m zo)6Bn=4x}g#!JU%pJ+EVR$x;PH8#cPjxtAjZJy~HTBZ0Ic+%D@8EM}3qgdK^JFy?O zy+CH)ZKF~#*m08+Oh!j@{U=+eG(=XTg^azXVQtDCDEYnvON z>6jV~1-{?$+N>L??%7}7ee3e2OFwkqTHRc`dG{B$);2(M`c5~9{oCF)4{ms|7xB1G zjEz5~{zvpDRJc~tnN~BHUdu3po4#JlvJ5l1#j><1j|^tBd?U*WJO`0;tcZ7-+dRju zhOwZr63??!yuhsmtx^0yYv^o-oqjl7o8m<_%g#L1utn2sj-7p|)k=S?PDvS|A~nsCDW4(+@xX^uyod-+AZW z+Ra-nugRma&i5xs_V@RzyPf9O^dG#i|HJ+g)oOKY>|o*5b~}tc!s6Gjyx$7OMxW&D zp!%Zz7q2{MRpM^&c(iW1C4>Gup5GoWNnP}eb}Sa?ug3fF*U;Seg8xCGX8AwJIAHbS zwFd0c_9|A^_Ax0?(`fj07@HH2u@PeYwt}iR+VLgl0e2qwLEyk>)!?mW?5Y9oaDpu- z4jn)+6fDK%jlBV(n;Z9444Hh?uBj6U>RzTHg96R z`!JR^%&XyHFQS3Xi6*Qj)u^M4#uZ^0I~QSLl@(_b)~CKlrq>9C+PuDjjgTC50zVzN zBaoi=eXhUOQm&W%(6QbjtP zNJd=vWV$ZAsoU}59VN$e?5G}WwW4)EyQBmqkMBI#@#{Mi{E+L1YH41_XHlvn4k5M& zPG-p=29@H})KH+{P#Cs+VkWS>wMBcO_IPV+LYQ)SB!r@Y4{Z4R&PC?eW9;L-^pm!q zy48{+053Uv{NMqAIk8~bZ#C6Elofdy^!?Ae*LJoc1doSIM6{*blN^IxtbTDY&9(B# zJC;f6bh>Nh)sZEYD-dkR1rQ78&cWot_#MEr4jX8O z!sldZ!*P=WtONL?76qx)^^rcnG=YS0P3AG&0HRk^ZlB7zM*SD*OTpk;nW_{L zDIj52m62R4t3kkax$@uV8ur{sIha_Pc5S`e+J@M`dg_v}5zf_1Z@e)aI7u|xV9=@O z@}&vVJ_s>iUmDPjvp-2p1~_hJCy1Aaac7w#%<@3u=iK{fRb_=F#Kfx0BUDZ@keUow z7#WHhd~*C4e1{V##lhu#W4&R_Nu7;NGo1gMsO-v*uU;MwB)yFE`Yo#rl70K?k~w9; zJd*YRLsFB45h~==n1(+wq&;wG(gre=OMH7><_XElCS_I`p)4u94Wo&iQWUUzIA)or zfaJD9#-%-QbM@TV`JHkk8}Ag}=rU4+SW9%KGvkqwXs(_Z;!0CvnZM|Vc48#KRp@ew=-x>so9lW}H7VL@mz5g~UA4 z+3X|z&{XXiwAb00!~~B`@Hm%pC0bbF?0r27lWcUz=BZyCBqp0jx}kOT=n%B$6s-;k zrc!PbJ;^*FiHh;`k+xf67ufm4LaUi%*}})!B==YSkKl-Dlw_=LB3GK>ZEqXtScLOa zz+)t7rpzi;%M4tkIE5XSCh4QMKu4DJDAMXiF$Fn91ad>YsU>H-8#_quobpq>EKXyT zn5E(jjn5C=ku=F4^-Myx_zn@!0=-=7nHPH(7S%*U{a@P@U0zjZh~6y4)bu2YrE!>K z)rv0WXc8)V)vYlMZ8-xT>6lulad4T4?BZ}VEr~!j zG4+ET-gdA#KGs7{;o#gr3@i3H)e3g6l5gn zg6C;?7H2~)r0rs?AG>a)AkByejg*Ae6+}1UPH`-)G|Wp&sVI_GpmOt1beVTuPqBHt+~Cjrw_z2Y_KJx8)q%iu zx&j~TF&Oz$iKKXpyRpc_>cX4Zi~QYU_@_)!~? z7FX~o3kp!msr>=pk%giF)=i^VBGyLTbz)}lkXs{IDCYi#3bAxd3>t|J=cU6L8HX@* zw8AAl(GN4Rd7OpulUuQlGd2(f-%#P}DDS;D3X&vbe+Nx~tjhnESL3)Rj z35R!;=|!VdFFoTSl}0KS@G5)(>e33gcX0zD&9>L#h!=zW4+ipmY=9_JaTSHkzzn3Z zwkZpASOm}s{25?N<{EyRA$gM~Rmf}Yqw%fVs~a0@8?u;!H4a^$?aPecj%CJUAfqg# zGNtq#`ts02|JX8?QfG8kH+eOG5;>u*kZ&K`NySNAI>f+TzJGKa2X?vWbfRb3u ze3SvE^kX~8DRkr$x(3YlUb!85EY0gu3^Ya9r94h_;e=DMN%6SpsTk|)xG_{7T-pQV z$k{<{gquoUkVL7I8e#%&kNmkZF@+CthlpN9AuX@d!4+f0PIa3eLP*=ST&m_stP!ya zAwQ$qEgEL34CsW$N&!St2UeG6Afr^eGK~Tp&pcjeOgH*2$ojHq!p7-(vN}0XT1aiW zqCKH4vi+gGU&2S~j6TvH8;q~EBo%WMal}1HqI7~# zOWskM+aWtRX$!c~1=B7u{dW*WMHJ*C>CgPa%%z@kL{vFLF^nwCb!EY&V`3Kv7*`fu z_dVR*^m}ry%fdQh6FF1dDX(p=-dcbAu0n=*9c{v-4zLskB17L4lBC@cA;M&QFb1Yg z>LneCUlGZ4Kkn6e5cFeTD$`{mA-^xCQP350M88)L5HX!E(}y z4?6IPDaT4OC8^!Q(5Bz!>cpXJNzM$~fYK)Vn(0a7%qWIe1#bfH;y{o~k;QF9@j>3E zP8hN<%9GN)o@&wcd+OZ;X;Yp`C*h*6EG5|(4wS8_fN8&(s`EO<=ZFk*77>324zWlt J{;bTr{vX=9Es6jD literal 0 HcmV?d00001 diff --git a/mmpretrain/datasets/base_dataset.py b/mmpretrain/datasets/base_dataset.py new file mode 100644 index 0000000..dffdf04 --- /dev/null +++ b/mmpretrain/datasets/base_dataset.py @@ -0,0 +1,219 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from os import PathLike +from typing import List, Optional, Sequence, Union + +import mmengine +import numpy as np +from mmengine.dataset import BaseDataset as _BaseDataset + +from mmpretrain.registry import DATASETS, TRANSFORMS + + +def expanduser(path): + """Expand ~ and ~user constructions. + + If user or $HOME is unknown, do nothing. + """ + if isinstance(path, (str, PathLike)): + return osp.expanduser(path) + else: + return path + + +@DATASETS.register_module() +class BaseDataset(_BaseDataset): + """Base dataset for image classification task. + + This dataset support annotation file in `OpenMMLab 2.0 style annotation + format`. + + .. _OpenMMLab 2.0 style annotation format: + https://github.com/open-mmlab/mmengine/blob/main/docs/zh_cn/tutorials/basedataset.md + + Comparing with the :class:`mmengine.BaseDataset`, this class implemented + several useful methods. + + Args: + ann_file (str): Annotation file path. + metainfo (dict, optional): Meta information for dataset, such as class + information. Defaults to None. + data_root (str): The root directory for ``data_prefix`` and + ``ann_file``. Defaults to ''. + data_prefix (str | dict): Prefix for training data. Defaults to ''. + filter_cfg (dict, optional): Config for filter data. Defaults to None. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Defaults to None, which means using all ``data_infos``. + serialize_data (bool): Whether to hold memory using serialized objects, + when enabled, data loader workers can use shared RAM from master + process instead of making a copy. Defaults to True. + pipeline (Sequence): Processing pipeline. Defaults to an empty tuple. + test_mode (bool, optional): ``test_mode=True`` means in test phase, + an error will be raised when getting an item fails, ``test_mode=False`` + means in training phase, another item will be returned randomly. + Defaults to False. + lazy_init (bool): Whether to load annotation during instantiation. + In some cases, such as visualization, only the meta information of + the dataset is needed, which is not necessary to load annotation + file. ``Basedataset`` can skip load annotations to save time by set + ``lazy_init=False``. Defaults to False. + max_refetch (int): If ``Basedataset.prepare_data`` get a None img. + The maximum extra number of cycles to get a valid image. + Defaults to 1000. + classes (str | Sequence[str], optional): Specify names of classes. + + - If is string, it should be a file path, and the every line of + the file is a name of a class. + - If is a sequence of string, every item is a name of class. + - If is None, use categories information in ``metainfo`` argument, + annotation file or the class attribute ``METAINFO``. + + Defaults to None. + """ # noqa: E501 + + def __init__(self, + ann_file: str, + metainfo: Optional[dict] = None, + data_root: str = '', + data_prefix: Union[str, dict] = '', + filter_cfg: Optional[dict] = None, + indices: Optional[Union[int, Sequence[int]]] = None, + serialize_data: bool = True, + pipeline: Sequence = (), + test_mode: bool = False, + lazy_init: bool = False, + max_refetch: int = 1000, + classes: Union[str, Sequence[str], None] = None): + if isinstance(data_prefix, str): + data_prefix = dict(img_path=expanduser(data_prefix)) + + ann_file = expanduser(ann_file) + metainfo = self._compat_classes(metainfo, classes) + + transforms = [] + for transform in pipeline: + if isinstance(transform, dict): + transforms.append(TRANSFORMS.build(transform)) + else: + transforms.append(transform) + + super().__init__( + ann_file=ann_file, + metainfo=metainfo, + data_root=data_root, + data_prefix=data_prefix, + filter_cfg=filter_cfg, + indices=indices, + serialize_data=serialize_data, + pipeline=transforms, + test_mode=test_mode, + lazy_init=lazy_init, + max_refetch=max_refetch) + + @property + def img_prefix(self): + """The prefix of images.""" + return self.data_prefix['img_path'] + + @property + def CLASSES(self): + """Return all categories names.""" + return self._metainfo.get('classes', None) + + @property + def class_to_idx(self): + """Map mapping class name to class index. + + Returns: + dict: mapping from class name to class index. + """ + + return {cat: i for i, cat in enumerate(self.CLASSES)} + + def get_gt_labels(self): + """Get all ground-truth labels (categories). + + Returns: + np.ndarray: categories for all images. + """ + + gt_labels = np.array( + [self.get_data_info(i)['gt_label'] for i in range(len(self))]) + return gt_labels + + def get_cat_ids(self, idx: int) -> List[int]: + """Get category id by index. + + Args: + idx (int): Index of data. + + Returns: + cat_ids (List[int]): Image category of specified index. + """ + + return [int(self.get_data_info(idx)['gt_label'])] + + def _compat_classes(self, metainfo, classes): + """Merge the old style ``classes`` arguments to ``metainfo``.""" + if isinstance(classes, str): + # take it as a file path + class_names = mmengine.list_from_file(expanduser(classes)) + elif isinstance(classes, (tuple, list)): + class_names = classes + elif classes is not None: + raise ValueError(f'Unsupported type {type(classes)} of classes.') + + if metainfo is None: + metainfo = {} + + if classes is not None: + metainfo = {'classes': tuple(class_names), **metainfo} + + return metainfo + + def full_init(self): + """Load annotation file and set ``BaseDataset._fully_initialized`` to + True.""" + super().full_init() + + # To support the standard OpenMMLab 2.0 annotation format. Generate + # metainfo in internal format from standard metainfo format. + if 'categories' in self._metainfo and 'classes' not in self._metainfo: + categories = sorted( + self._metainfo['categories'], key=lambda x: x['id']) + self._metainfo['classes'] = tuple( + [cat['category_name'] for cat in categories]) + + def __repr__(self): + """Print the basic information of the dataset. + + Returns: + str: Formatted string. + """ + head = 'Dataset ' + self.__class__.__name__ + body = [] + if self._fully_initialized: + body.append(f'Number of samples: \t{self.__len__()}') + else: + body.append("Haven't been initialized") + + if self.CLASSES is not None: + body.append(f'Number of categories: \t{len(self.CLASSES)}') + + body.extend(self.extra_repr()) + + if len(self.pipeline.transforms) > 0: + body.append('With transforms:') + for t in self.pipeline.transforms: + body.append(f' {t}') + + lines = [head] + [' ' * 4 + line for line in body] + return '\n'.join(lines) + + def extra_repr(self) -> List[str]: + """The extra repr information of the dataset.""" + body = [] + body.append(f'Annotation file: \t{self.ann_file}') + body.append(f'Prefix of images: \t{self.img_prefix}') + return body diff --git a/mmpretrain/datasets/builder.py b/mmpretrain/datasets/builder.py new file mode 100644 index 0000000..dfa3872 --- /dev/null +++ b/mmpretrain/datasets/builder.py @@ -0,0 +1,25 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmpretrain.registry import DATASETS + + +def build_dataset(cfg): + """Build dataset. + + Examples: + >>> from mmpretrain.datasets import build_dataset + >>> mnist_train = build_dataset( + ... dict(type='MNIST', data_prefix='data/mnist/', test_mode=False)) + >>> print(mnist_train) + Dataset MNIST + Number of samples: 60000 + Number of categories: 10 + Prefix of data: data/mnist/ + >>> mnist_test = build_dataset( + ... dict(type='MNIST', data_prefix='data/mnist/', test_mode=True)) + >>> print(mnist_test) + Dataset MNIST + Number of samples: 10000 + Number of categories: 10 + Prefix of data: data/mnist/ + """ + return DATASETS.build(cfg) diff --git a/mmpretrain/datasets/caltech101.py b/mmpretrain/datasets/caltech101.py new file mode 100644 index 0000000..71e5de8 --- /dev/null +++ b/mmpretrain/datasets/caltech101.py @@ -0,0 +1,113 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +from mmengine import get_file_backend, list_from_file + +from mmpretrain.registry import DATASETS +from .base_dataset import BaseDataset +from .categories import CALTECH101_CATEGORIES + + +@DATASETS.register_module() +class Caltech101(BaseDataset): + """The Caltech101 Dataset. + + Support the `Caltech101 `_ Dataset. + After downloading and decompression, the dataset directory structure is as follows. + + Caltech101 dataset directory: :: + + caltech-101 + ├── 101_ObjectCategories + │ ├── class_x + │ │ ├── xx1.jpg + │ │ ├── xx2.jpg + │ │ └── ... + │ ├── class_y + │ │ ├── yy1.jpg + │ │ ├── yy2.jpg + │ │ └── ... + │ └── ... + ├── Annotations + │ ├── class_x + │ │ ├── xx1.mat + │ │ └── ... + │ └── ... + ├── meta + │ ├── train.txt + │ └── test.txt + └── .... + + Please note that since there is no official splitting for training and + test set, you can use the train.txt and text.txt provided by us or + create your own annotation files. Here is the download + `link `_ + for the annotations. + + Args: + data_root (str): The root directory for the Caltech101 dataset. + split (str, optional): The dataset split, supports "train" and "test". + Default to "train". + + Examples: + >>> from mmpretrain.datasets import Caltech101 + >>> train_dataset = Caltech101(data_root='data/caltech-101', split='train') + >>> train_dataset + Dataset Caltech101 + Number of samples: 3060 + Number of categories: 102 + Root of dataset: data/caltech-101 + >>> test_dataset = Caltech101(data_root='data/caltech-101', split='test') + >>> test_dataset + Dataset Caltech101 + Number of samples: 6728 + Number of categories: 102 + Root of dataset: data/caltech-101 + """ # noqa: E501 + + METAINFO = {'classes': CALTECH101_CATEGORIES} + + def __init__(self, data_root: str, split: str = 'train', **kwargs): + + splits = ['train', 'test'] + assert split in splits, \ + f"The split must be one of {splits}, but get '{split}'" + self.split = split + + self.backend = get_file_backend(data_root, enable_singleton=True) + + if split == 'train': + ann_file = self.backend.join_path('meta', 'train.txt') + else: + ann_file = self.backend.join_path('meta', 'test.txt') + + data_prefix = '101_ObjectCategories' + test_mode = split == 'test' + + super(Caltech101, self).__init__( + ann_file=ann_file, + data_root=data_root, + data_prefix=data_prefix, + test_mode=test_mode, + **kwargs) + + def load_data_list(self): + """Load images and ground truth labels.""" + + pairs = list_from_file(self.ann_file) + data_list = [] + + for pair in pairs: + path, gt_label = pair.split() + img_path = self.backend.join_path(self.img_prefix, path) + info = dict(img_path=img_path, gt_label=int(gt_label)) + data_list.append(info) + + return data_list + + def extra_repr(self) -> List[str]: + """The extra repr information of the dataset.""" + body = [ + f'Root of dataset: \t{self.data_root}', + ] + return body diff --git a/mmpretrain/datasets/categories.py b/mmpretrain/datasets/categories.py new file mode 100644 index 0000000..9e75f79 --- /dev/null +++ b/mmpretrain/datasets/categories.py @@ -0,0 +1,1661 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Pre-defined categories names of various datasets. + +VOC2007_CATEGORIES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', + 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', + 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', + 'sofa', 'train', 'tvmonitor') + +CUB_CATEGORIES = ( + 'Black_footed_Albatross', 'Laysan_Albatross', 'Sooty_Albatross', + 'Groove_billed_Ani', 'Crested_Auklet', 'Least_Auklet', 'Parakeet_Auklet', + 'Rhinoceros_Auklet', 'Brewer_Blackbird', 'Red_winged_Blackbird', + 'Rusty_Blackbird', 'Yellow_headed_Blackbird', 'Bobolink', 'Indigo_Bunting', + 'Lazuli_Bunting', 'Painted_Bunting', 'Cardinal', 'Spotted_Catbird', + 'Gray_Catbird', 'Yellow_breasted_Chat', 'Eastern_Towhee', + 'Chuck_will_Widow', 'Brandt_Cormorant', 'Red_faced_Cormorant', + 'Pelagic_Cormorant', 'Bronzed_Cowbird', 'Shiny_Cowbird', 'Brown_Creeper', + 'American_Crow', 'Fish_Crow', 'Black_billed_Cuckoo', 'Mangrove_Cuckoo', + 'Yellow_billed_Cuckoo', 'Gray_crowned_Rosy_Finch', 'Purple_Finch', + 'Northern_Flicker', 'Acadian_Flycatcher', 'Great_Crested_Flycatcher', + 'Least_Flycatcher', 'Olive_sided_Flycatcher', 'Scissor_tailed_Flycatcher', + 'Vermilion_Flycatcher', 'Yellow_bellied_Flycatcher', 'Frigatebird', + 'Northern_Fulmar', 'Gadwall', 'American_Goldfinch', 'European_Goldfinch', + 'Boat_tailed_Grackle', 'Eared_Grebe', 'Horned_Grebe', 'Pied_billed_Grebe', + 'Western_Grebe', 'Blue_Grosbeak', 'Evening_Grosbeak', 'Pine_Grosbeak', + 'Rose_breasted_Grosbeak', 'Pigeon_Guillemot', 'California_Gull', + 'Glaucous_winged_Gull', 'Heermann_Gull', 'Herring_Gull', 'Ivory_Gull', + 'Ring_billed_Gull', 'Slaty_backed_Gull', 'Western_Gull', + 'Anna_Hummingbird', 'Ruby_throated_Hummingbird', 'Rufous_Hummingbird', + 'Green_Violetear', 'Long_tailed_Jaeger', 'Pomarine_Jaeger', 'Blue_Jay', + 'Florida_Jay', 'Green_Jay', 'Dark_eyed_Junco', 'Tropical_Kingbird', + 'Gray_Kingbird', 'Belted_Kingfisher', 'Green_Kingfisher', + 'Pied_Kingfisher', 'Ringed_Kingfisher', 'White_breasted_Kingfisher', + 'Red_legged_Kittiwake', 'Horned_Lark', 'Pacific_Loon', 'Mallard', + 'Western_Meadowlark', 'Hooded_Merganser', 'Red_breasted_Merganser', + 'Mockingbird', 'Nighthawk', 'Clark_Nutcracker', 'White_breasted_Nuthatch', + 'Baltimore_Oriole', 'Hooded_Oriole', 'Orchard_Oriole', 'Scott_Oriole', + 'Ovenbird', 'Brown_Pelican', 'White_Pelican', 'Western_Wood_Pewee', + 'Sayornis', 'American_Pipit', 'Whip_poor_Will', 'Horned_Puffin', + 'Common_Raven', 'White_necked_Raven', 'American_Redstart', 'Geococcyx', + 'Loggerhead_Shrike', 'Great_Grey_Shrike', 'Baird_Sparrow', + 'Black_throated_Sparrow', 'Brewer_Sparrow', 'Chipping_Sparrow', + 'Clay_colored_Sparrow', 'House_Sparrow', 'Field_Sparrow', 'Fox_Sparrow', + 'Grasshopper_Sparrow', 'Harris_Sparrow', 'Henslow_Sparrow', + 'Le_Conte_Sparrow', 'Lincoln_Sparrow', 'Nelson_Sharp_tailed_Sparrow', + 'Savannah_Sparrow', 'Seaside_Sparrow', 'Song_Sparrow', 'Tree_Sparrow', + 'Vesper_Sparrow', 'White_crowned_Sparrow', 'White_throated_Sparrow', + 'Cape_Glossy_Starling', 'Bank_Swallow', 'Barn_Swallow', 'Cliff_Swallow', + 'Tree_Swallow', 'Scarlet_Tanager', 'Summer_Tanager', 'Artic_Tern', + 'Black_Tern', 'Caspian_Tern', 'Common_Tern', 'Elegant_Tern', + 'Forsters_Tern', 'Least_Tern', 'Green_tailed_Towhee', 'Brown_Thrasher', + 'Sage_Thrasher', 'Black_capped_Vireo', 'Blue_headed_Vireo', + 'Philadelphia_Vireo', 'Red_eyed_Vireo', 'Warbling_Vireo', + 'White_eyed_Vireo', 'Yellow_throated_Vireo', 'Bay_breasted_Warbler', + 'Black_and_white_Warbler', 'Black_throated_Blue_Warbler', + 'Blue_winged_Warbler', 'Canada_Warbler', 'Cape_May_Warbler', + 'Cerulean_Warbler', 'Chestnut_sided_Warbler', 'Golden_winged_Warbler', + 'Hooded_Warbler', 'Kentucky_Warbler', 'Magnolia_Warbler', + 'Mourning_Warbler', 'Myrtle_Warbler', 'Nashville_Warbler', + 'Orange_crowned_Warbler', 'Palm_Warbler', 'Pine_Warbler', + 'Prairie_Warbler', 'Prothonotary_Warbler', 'Swainson_Warbler', + 'Tennessee_Warbler', 'Wilson_Warbler', 'Worm_eating_Warbler', + 'Yellow_Warbler', 'Northern_Waterthrush', 'Louisiana_Waterthrush', + 'Bohemian_Waxwing', 'Cedar_Waxwing', 'American_Three_toed_Woodpecker', + 'Pileated_Woodpecker', 'Red_bellied_Woodpecker', 'Red_cockaded_Woodpecker', + 'Red_headed_Woodpecker', 'Downy_Woodpecker', 'Bewick_Wren', 'Cactus_Wren', + 'Carolina_Wren', 'House_Wren', 'Marsh_Wren', 'Rock_Wren', 'Winter_Wren', + 'Common_Yellowthroat') + +IMAGENET_CATEGORIES = ( + 'tench, Tinca tinca', + 'goldfish, Carassius auratus', + 'great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias', # noqa: E501 + 'tiger shark, Galeocerdo cuvieri', + 'hammerhead, hammerhead shark', + 'electric ray, crampfish, numbfish, torpedo', + 'stingray', + 'cock', + 'hen', + 'ostrich, Struthio camelus', + 'brambling, Fringilla montifringilla', + 'goldfinch, Carduelis carduelis', + 'house finch, linnet, Carpodacus mexicanus', + 'junco, snowbird', + 'indigo bunting, indigo finch, indigo bird, Passerina cyanea', + 'robin, American robin, Turdus migratorius', + 'bulbul', + 'jay', + 'magpie', + 'chickadee', + 'water ouzel, dipper', + 'kite', + 'bald eagle, American eagle, Haliaeetus leucocephalus', + 'vulture', + 'great grey owl, great gray owl, Strix nebulosa', + 'European fire salamander, Salamandra salamandra', + 'common newt, Triturus vulgaris', + 'eft', + 'spotted salamander, Ambystoma maculatum', + 'axolotl, mud puppy, Ambystoma mexicanum', + 'bullfrog, Rana catesbeiana', + 'tree frog, tree-frog', + 'tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui', + 'loggerhead, loggerhead turtle, Caretta caretta', + 'leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea', # noqa: E501 + 'mud turtle', + 'terrapin', + 'box turtle, box tortoise', + 'banded gecko', + 'common iguana, iguana, Iguana iguana', + 'American chameleon, anole, Anolis carolinensis', + 'whiptail, whiptail lizard', + 'agama', + 'frilled lizard, Chlamydosaurus kingi', + 'alligator lizard', + 'Gila monster, Heloderma suspectum', + 'green lizard, Lacerta viridis', + 'African chameleon, Chamaeleo chamaeleon', + 'Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis', # noqa: E501 + 'African crocodile, Nile crocodile, Crocodylus niloticus', + 'American alligator, Alligator mississipiensis', + 'triceratops', + 'thunder snake, worm snake, Carphophis amoenus', + 'ringneck snake, ring-necked snake, ring snake', + 'hognose snake, puff adder, sand viper', + 'green snake, grass snake', + 'king snake, kingsnake', + 'garter snake, grass snake', + 'water snake', + 'vine snake', + 'night snake, Hypsiglena torquata', + 'boa constrictor, Constrictor constrictor', + 'rock python, rock snake, Python sebae', + 'Indian cobra, Naja naja', + 'green mamba', + 'sea snake', + 'horned viper, cerastes, sand viper, horned asp, Cerastes cornutus', + 'diamondback, diamondback rattlesnake, Crotalus adamanteus', + 'sidewinder, horned rattlesnake, Crotalus cerastes', + 'trilobite', + 'harvestman, daddy longlegs, Phalangium opilio', + 'scorpion', + 'black and gold garden spider, Argiope aurantia', + 'barn spider, Araneus cavaticus', + 'garden spider, Aranea diademata', + 'black widow, Latrodectus mactans', + 'tarantula', + 'wolf spider, hunting spider', + 'tick', + 'centipede', + 'black grouse', + 'ptarmigan', + 'ruffed grouse, partridge, Bonasa umbellus', + 'prairie chicken, prairie grouse, prairie fowl', + 'peacock', + 'quail', + 'partridge', + 'African grey, African gray, Psittacus erithacus', + 'macaw', + 'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita', + 'lorikeet', + 'coucal', + 'bee eater', + 'hornbill', + 'hummingbird', + 'jacamar', + 'toucan', + 'drake', + 'red-breasted merganser, Mergus serrator', + 'goose', + 'black swan, Cygnus atratus', + 'tusker', + 'echidna, spiny anteater, anteater', + 'platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus', # noqa: E501 + 'wallaby, brush kangaroo', + 'koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus', # noqa: E501 + 'wombat', + 'jellyfish', + 'sea anemone, anemone', + 'brain coral', + 'flatworm, platyhelminth', + 'nematode, nematode worm, roundworm', + 'conch', + 'snail', + 'slug', + 'sea slug, nudibranch', + 'chiton, coat-of-mail shell, sea cradle, polyplacophore', + 'chambered nautilus, pearly nautilus, nautilus', + 'Dungeness crab, Cancer magister', + 'rock crab, Cancer irroratus', + 'fiddler crab', + 'king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica', # noqa: E501 + 'American lobster, Northern lobster, Maine lobster, Homarus americanus', # noqa: E501 + 'spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish', # noqa: E501 + 'crayfish, crawfish, crawdad, crawdaddy', + 'hermit crab', + 'isopod', + 'white stork, Ciconia ciconia', + 'black stork, Ciconia nigra', + 'spoonbill', + 'flamingo', + 'little blue heron, Egretta caerulea', + 'American egret, great white heron, Egretta albus', + 'bittern', + 'crane', + 'limpkin, Aramus pictus', + 'European gallinule, Porphyrio porphyrio', + 'American coot, marsh hen, mud hen, water hen, Fulica americana', + 'bustard', + 'ruddy turnstone, Arenaria interpres', + 'red-backed sandpiper, dunlin, Erolia alpina', + 'redshank, Tringa totanus', + 'dowitcher', + 'oystercatcher, oyster catcher', + 'pelican', + 'king penguin, Aptenodytes patagonica', + 'albatross, mollymawk', + 'grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus', # noqa: E501 + 'killer whale, killer, orca, grampus, sea wolf, Orcinus orca', + 'dugong, Dugong dugon', + 'sea lion', + 'Chihuahua', + 'Japanese spaniel', + 'Maltese dog, Maltese terrier, Maltese', + 'Pekinese, Pekingese, Peke', + 'Shih-Tzu', + 'Blenheim spaniel', + 'papillon', + 'toy terrier', + 'Rhodesian ridgeback', + 'Afghan hound, Afghan', + 'basset, basset hound', + 'beagle', + 'bloodhound, sleuthhound', + 'bluetick', + 'black-and-tan coonhound', + 'Walker hound, Walker foxhound', + 'English foxhound', + 'redbone', + 'borzoi, Russian wolfhound', + 'Irish wolfhound', + 'Italian greyhound', + 'whippet', + 'Ibizan hound, Ibizan Podenco', + 'Norwegian elkhound, elkhound', + 'otterhound, otter hound', + 'Saluki, gazelle hound', + 'Scottish deerhound, deerhound', + 'Weimaraner', + 'Staffordshire bullterrier, Staffordshire bull terrier', + 'American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier', # noqa: E501 + 'Bedlington terrier', + 'Border terrier', + 'Kerry blue terrier', + 'Irish terrier', + 'Norfolk terrier', + 'Norwich terrier', + 'Yorkshire terrier', + 'wire-haired fox terrier', + 'Lakeland terrier', + 'Sealyham terrier, Sealyham', + 'Airedale, Airedale terrier', + 'cairn, cairn terrier', + 'Australian terrier', + 'Dandie Dinmont, Dandie Dinmont terrier', + 'Boston bull, Boston terrier', + 'miniature schnauzer', + 'giant schnauzer', + 'standard schnauzer', + 'Scotch terrier, Scottish terrier, Scottie', + 'Tibetan terrier, chrysanthemum dog', + 'silky terrier, Sydney silky', + 'soft-coated wheaten terrier', + 'West Highland white terrier', + 'Lhasa, Lhasa apso', + 'flat-coated retriever', + 'curly-coated retriever', + 'golden retriever', + 'Labrador retriever', + 'Chesapeake Bay retriever', + 'German short-haired pointer', + 'vizsla, Hungarian pointer', + 'English setter', + 'Irish setter, red setter', + 'Gordon setter', + 'Brittany spaniel', + 'clumber, clumber spaniel', + 'English springer, English springer spaniel', + 'Welsh springer spaniel', + 'cocker spaniel, English cocker spaniel, cocker', + 'Sussex spaniel', + 'Irish water spaniel', + 'kuvasz', + 'schipperke', + 'groenendael', + 'malinois', + 'briard', + 'kelpie', + 'komondor', + 'Old English sheepdog, bobtail', + 'Shetland sheepdog, Shetland sheep dog, Shetland', + 'collie', + 'Border collie', + 'Bouvier des Flandres, Bouviers des Flandres', + 'Rottweiler', + 'German shepherd, German shepherd dog, German police dog, alsatian', + 'Doberman, Doberman pinscher', + 'miniature pinscher', + 'Greater Swiss Mountain dog', + 'Bernese mountain dog', + 'Appenzeller', + 'EntleBucher', + 'boxer', + 'bull mastiff', + 'Tibetan mastiff', + 'French bulldog', + 'Great Dane', + 'Saint Bernard, St Bernard', + 'Eskimo dog, husky', + 'malamute, malemute, Alaskan malamute', + 'Siberian husky', + 'dalmatian, coach dog, carriage dog', + 'affenpinscher, monkey pinscher, monkey dog', + 'basenji', + 'pug, pug-dog', + 'Leonberg', + 'Newfoundland, Newfoundland dog', + 'Great Pyrenees', + 'Samoyed, Samoyede', + 'Pomeranian', + 'chow, chow chow', + 'keeshond', + 'Brabancon griffon', + 'Pembroke, Pembroke Welsh corgi', + 'Cardigan, Cardigan Welsh corgi', + 'toy poodle', + 'miniature poodle', + 'standard poodle', + 'Mexican hairless', + 'timber wolf, grey wolf, gray wolf, Canis lupus', + 'white wolf, Arctic wolf, Canis lupus tundrarum', + 'red wolf, maned wolf, Canis rufus, Canis niger', + 'coyote, prairie wolf, brush wolf, Canis latrans', + 'dingo, warrigal, warragal, Canis dingo', + 'dhole, Cuon alpinus', + 'African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus', + 'hyena, hyaena', + 'red fox, Vulpes vulpes', + 'kit fox, Vulpes macrotis', + 'Arctic fox, white fox, Alopex lagopus', + 'grey fox, gray fox, Urocyon cinereoargenteus', + 'tabby, tabby cat', + 'tiger cat', + 'Persian cat', + 'Siamese cat, Siamese', + 'Egyptian cat', + 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor', # noqa: E501 + 'lynx, catamount', + 'leopard, Panthera pardus', + 'snow leopard, ounce, Panthera uncia', + 'jaguar, panther, Panthera onca, Felis onca', + 'lion, king of beasts, Panthera leo', + 'tiger, Panthera tigris', + 'cheetah, chetah, Acinonyx jubatus', + 'brown bear, bruin, Ursus arctos', + 'American black bear, black bear, Ursus americanus, Euarctos americanus', # noqa: E501 + 'ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus', + 'sloth bear, Melursus ursinus, Ursus ursinus', + 'mongoose', + 'meerkat, mierkat', + 'tiger beetle', + 'ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle', + 'ground beetle, carabid beetle', + 'long-horned beetle, longicorn, longicorn beetle', + 'leaf beetle, chrysomelid', + 'dung beetle', + 'rhinoceros beetle', + 'weevil', + 'fly', + 'bee', + 'ant, emmet, pismire', + 'grasshopper, hopper', + 'cricket', + 'walking stick, walkingstick, stick insect', + 'cockroach, roach', + 'mantis, mantid', + 'cicada, cicala', + 'leafhopper', + 'lacewing, lacewing fly', + "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk", # noqa: E501 + 'damselfly', + 'admiral', + 'ringlet, ringlet butterfly', + 'monarch, monarch butterfly, milkweed butterfly, Danaus plexippus', + 'cabbage butterfly', + 'sulphur butterfly, sulfur butterfly', + 'lycaenid, lycaenid butterfly', + 'starfish, sea star', + 'sea urchin', + 'sea cucumber, holothurian', + 'wood rabbit, cottontail, cottontail rabbit', + 'hare', + 'Angora, Angora rabbit', + 'hamster', + 'porcupine, hedgehog', + 'fox squirrel, eastern fox squirrel, Sciurus niger', + 'marmot', + 'beaver', + 'guinea pig, Cavia cobaya', + 'sorrel', + 'zebra', + 'hog, pig, grunter, squealer, Sus scrofa', + 'wild boar, boar, Sus scrofa', + 'warthog', + 'hippopotamus, hippo, river horse, Hippopotamus amphibius', + 'ox', + 'water buffalo, water ox, Asiatic buffalo, Bubalus bubalis', + 'bison', + 'ram, tup', + 'bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis', # noqa: E501 + 'ibex, Capra ibex', + 'hartebeest', + 'impala, Aepyceros melampus', + 'gazelle', + 'Arabian camel, dromedary, Camelus dromedarius', + 'llama', + 'weasel', + 'mink', + 'polecat, fitch, foulmart, foumart, Mustela putorius', + 'black-footed ferret, ferret, Mustela nigripes', + 'otter', + 'skunk, polecat, wood pussy', + 'badger', + 'armadillo', + 'three-toed sloth, ai, Bradypus tridactylus', + 'orangutan, orang, orangutang, Pongo pygmaeus', + 'gorilla, Gorilla gorilla', + 'chimpanzee, chimp, Pan troglodytes', + 'gibbon, Hylobates lar', + 'siamang, Hylobates syndactylus, Symphalangus syndactylus', + 'guenon, guenon monkey', + 'patas, hussar monkey, Erythrocebus patas', + 'baboon', + 'macaque', + 'langur', + 'colobus, colobus monkey', + 'proboscis monkey, Nasalis larvatus', + 'marmoset', + 'capuchin, ringtail, Cebus capucinus', + 'howler monkey, howler', + 'titi, titi monkey', + 'spider monkey, Ateles geoffroyi', + 'squirrel monkey, Saimiri sciureus', + 'Madagascar cat, ring-tailed lemur, Lemur catta', + 'indri, indris, Indri indri, Indri brevicaudatus', + 'Indian elephant, Elephas maximus', + 'African elephant, Loxodonta africana', + 'lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens', + 'giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca', + 'barracouta, snoek', + 'eel', + 'coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch', # noqa: E501 + 'rock beauty, Holocanthus tricolor', + 'anemone fish', + 'sturgeon', + 'gar, garfish, garpike, billfish, Lepisosteus osseus', + 'lionfish', + 'puffer, pufferfish, blowfish, globefish', + 'abacus', + 'abaya', + "academic gown, academic robe, judge's robe", + 'accordion, piano accordion, squeeze box', + 'acoustic guitar', + 'aircraft carrier, carrier, flattop, attack aircraft carrier', + 'airliner', + 'airship, dirigible', + 'altar', + 'ambulance', + 'amphibian, amphibious vehicle', + 'analog clock', + 'apiary, bee house', + 'apron', + 'ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin', # noqa: E501 + 'assault rifle, assault gun', + 'backpack, back pack, knapsack, packsack, rucksack, haversack', + 'bakery, bakeshop, bakehouse', + 'balance beam, beam', + 'balloon', + 'ballpoint, ballpoint pen, ballpen, Biro', + 'Band Aid', + 'banjo', + 'bannister, banister, balustrade, balusters, handrail', + 'barbell', + 'barber chair', + 'barbershop', + 'barn', + 'barometer', + 'barrel, cask', + 'barrow, garden cart, lawn cart, wheelbarrow', + 'baseball', + 'basketball', + 'bassinet', + 'bassoon', + 'bathing cap, swimming cap', + 'bath towel', + 'bathtub, bathing tub, bath, tub', + 'beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon', # noqa: E501 + 'beacon, lighthouse, beacon light, pharos', + 'beaker', + 'bearskin, busby, shako', + 'beer bottle', + 'beer glass', + 'bell cote, bell cot', + 'bib', + 'bicycle-built-for-two, tandem bicycle, tandem', + 'bikini, two-piece', + 'binder, ring-binder', + 'binoculars, field glasses, opera glasses', + 'birdhouse', + 'boathouse', + 'bobsled, bobsleigh, bob', + 'bolo tie, bolo, bola tie, bola', + 'bonnet, poke bonnet', + 'bookcase', + 'bookshop, bookstore, bookstall', + 'bottlecap', + 'bow', + 'bow tie, bow-tie, bowtie', + 'brass, memorial tablet, plaque', + 'brassiere, bra, bandeau', + 'breakwater, groin, groyne, mole, bulwark, seawall, jetty', + 'breastplate, aegis, egis', + 'broom', + 'bucket, pail', + 'buckle', + 'bulletproof vest', + 'bullet train, bullet', + 'butcher shop, meat market', + 'cab, hack, taxi, taxicab', + 'caldron, cauldron', + 'candle, taper, wax light', + 'cannon', + 'canoe', + 'can opener, tin opener', + 'cardigan', + 'car mirror', + 'carousel, carrousel, merry-go-round, roundabout, whirligig', + "carpenter's kit, tool kit", + 'carton', + 'car wheel', + 'cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM', # noqa: E501 + 'cassette', + 'cassette player', + 'castle', + 'catamaran', + 'CD player', + 'cello, violoncello', + 'cellular telephone, cellular phone, cellphone, cell, mobile phone', + 'chain', + 'chainlink fence', + 'chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour', # noqa: E501 + 'chain saw, chainsaw', + 'chest', + 'chiffonier, commode', + 'chime, bell, gong', + 'china cabinet, china closet', + 'Christmas stocking', + 'church, church building', + 'cinema, movie theater, movie theatre, movie house, picture palace', + 'cleaver, meat cleaver, chopper', + 'cliff dwelling', + 'cloak', + 'clog, geta, patten, sabot', + 'cocktail shaker', + 'coffee mug', + 'coffeepot', + 'coil, spiral, volute, whorl, helix', + 'combination lock', + 'computer keyboard, keypad', + 'confectionery, confectionary, candy store', + 'container ship, containership, container vessel', + 'convertible', + 'corkscrew, bottle screw', + 'cornet, horn, trumpet, trump', + 'cowboy boot', + 'cowboy hat, ten-gallon hat', + 'cradle', + 'crane', + 'crash helmet', + 'crate', + 'crib, cot', + 'Crock Pot', + 'croquet ball', + 'crutch', + 'cuirass', + 'dam, dike, dyke', + 'desk', + 'desktop computer', + 'dial telephone, dial phone', + 'diaper, nappy, napkin', + 'digital clock', + 'digital watch', + 'dining table, board', + 'dishrag, dishcloth', + 'dishwasher, dish washer, dishwashing machine', + 'disk brake, disc brake', + 'dock, dockage, docking facility', + 'dogsled, dog sled, dog sleigh', + 'dome', + 'doormat, welcome mat', + 'drilling platform, offshore rig', + 'drum, membranophone, tympan', + 'drumstick', + 'dumbbell', + 'Dutch oven', + 'electric fan, blower', + 'electric guitar', + 'electric locomotive', + 'entertainment center', + 'envelope', + 'espresso maker', + 'face powder', + 'feather boa, boa', + 'file, file cabinet, filing cabinet', + 'fireboat', + 'fire engine, fire truck', + 'fire screen, fireguard', + 'flagpole, flagstaff', + 'flute, transverse flute', + 'folding chair', + 'football helmet', + 'forklift', + 'fountain', + 'fountain pen', + 'four-poster', + 'freight car', + 'French horn, horn', + 'frying pan, frypan, skillet', + 'fur coat', + 'garbage truck, dustcart', + 'gasmask, respirator, gas helmet', + 'gas pump, gasoline pump, petrol pump, island dispenser', + 'goblet', + 'go-kart', + 'golf ball', + 'golfcart, golf cart', + 'gondola', + 'gong, tam-tam', + 'gown', + 'grand piano, grand', + 'greenhouse, nursery, glasshouse', + 'grille, radiator grille', + 'grocery store, grocery, food market, market', + 'guillotine', + 'hair slide', + 'hair spray', + 'half track', + 'hammer', + 'hamper', + 'hand blower, blow dryer, blow drier, hair dryer, hair drier', + 'hand-held computer, hand-held microcomputer', + 'handkerchief, hankie, hanky, hankey', + 'hard disc, hard disk, fixed disk', + 'harmonica, mouth organ, harp, mouth harp', + 'harp', + 'harvester, reaper', + 'hatchet', + 'holster', + 'home theater, home theatre', + 'honeycomb', + 'hook, claw', + 'hoopskirt, crinoline', + 'horizontal bar, high bar', + 'horse cart, horse-cart', + 'hourglass', + 'iPod', + 'iron, smoothing iron', + "jack-o'-lantern", + 'jean, blue jean, denim', + 'jeep, landrover', + 'jersey, T-shirt, tee shirt', + 'jigsaw puzzle', + 'jinrikisha, ricksha, rickshaw', + 'joystick', + 'kimono', + 'knee pad', + 'knot', + 'lab coat, laboratory coat', + 'ladle', + 'lampshade, lamp shade', + 'laptop, laptop computer', + 'lawn mower, mower', + 'lens cap, lens cover', + 'letter opener, paper knife, paperknife', + 'library', + 'lifeboat', + 'lighter, light, igniter, ignitor', + 'limousine, limo', + 'liner, ocean liner', + 'lipstick, lip rouge', + 'Loafer', + 'lotion', + 'loudspeaker, speaker, speaker unit, loudspeaker system, speaker system', # noqa: E501 + "loupe, jeweler's loupe", + 'lumbermill, sawmill', + 'magnetic compass', + 'mailbag, postbag', + 'mailbox, letter box', + 'maillot', + 'maillot, tank suit', + 'manhole cover', + 'maraca', + 'marimba, xylophone', + 'mask', + 'matchstick', + 'maypole', + 'maze, labyrinth', + 'measuring cup', + 'medicine chest, medicine cabinet', + 'megalith, megalithic structure', + 'microphone, mike', + 'microwave, microwave oven', + 'military uniform', + 'milk can', + 'minibus', + 'miniskirt, mini', + 'minivan', + 'missile', + 'mitten', + 'mixing bowl', + 'mobile home, manufactured home', + 'Model T', + 'modem', + 'monastery', + 'monitor', + 'moped', + 'mortar', + 'mortarboard', + 'mosque', + 'mosquito net', + 'motor scooter, scooter', + 'mountain bike, all-terrain bike, off-roader', + 'mountain tent', + 'mouse, computer mouse', + 'mousetrap', + 'moving van', + 'muzzle', + 'nail', + 'neck brace', + 'necklace', + 'nipple', + 'notebook, notebook computer', + 'obelisk', + 'oboe, hautboy, hautbois', + 'ocarina, sweet potato', + 'odometer, hodometer, mileometer, milometer', + 'oil filter', + 'organ, pipe organ', + 'oscilloscope, scope, cathode-ray oscilloscope, CRO', + 'overskirt', + 'oxcart', + 'oxygen mask', + 'packet', + 'paddle, boat paddle', + 'paddlewheel, paddle wheel', + 'padlock', + 'paintbrush', + "pajama, pyjama, pj's, jammies", + 'palace', + 'panpipe, pandean pipe, syrinx', + 'paper towel', + 'parachute, chute', + 'parallel bars, bars', + 'park bench', + 'parking meter', + 'passenger car, coach, carriage', + 'patio, terrace', + 'pay-phone, pay-station', + 'pedestal, plinth, footstall', + 'pencil box, pencil case', + 'pencil sharpener', + 'perfume, essence', + 'Petri dish', + 'photocopier', + 'pick, plectrum, plectron', + 'pickelhaube', + 'picket fence, paling', + 'pickup, pickup truck', + 'pier', + 'piggy bank, penny bank', + 'pill bottle', + 'pillow', + 'ping-pong ball', + 'pinwheel', + 'pirate, pirate ship', + 'pitcher, ewer', + "plane, carpenter's plane, woodworking plane", + 'planetarium', + 'plastic bag', + 'plate rack', + 'plow, plough', + "plunger, plumber's helper", + 'Polaroid camera, Polaroid Land camera', + 'pole', + 'police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria', # noqa: E501 + 'poncho', + 'pool table, billiard table, snooker table', + 'pop bottle, soda bottle', + 'pot, flowerpot', + "potter's wheel", + 'power drill', + 'prayer rug, prayer mat', + 'printer', + 'prison, prison house', + 'projectile, missile', + 'projector', + 'puck, hockey puck', + 'punching bag, punch bag, punching ball, punchball', + 'purse', + 'quill, quill pen', + 'quilt, comforter, comfort, puff', + 'racer, race car, racing car', + 'racket, racquet', + 'radiator', + 'radio, wireless', + 'radio telescope, radio reflector', + 'rain barrel', + 'recreational vehicle, RV, R.V.', + 'reel', + 'reflex camera', + 'refrigerator, icebox', + 'remote control, remote', + 'restaurant, eating house, eating place, eatery', + 'revolver, six-gun, six-shooter', + 'rifle', + 'rocking chair, rocker', + 'rotisserie', + 'rubber eraser, rubber, pencil eraser', + 'rugby ball', + 'rule, ruler', + 'running shoe', + 'safe', + 'safety pin', + 'saltshaker, salt shaker', + 'sandal', + 'sarong', + 'sax, saxophone', + 'scabbard', + 'scale, weighing machine', + 'school bus', + 'schooner', + 'scoreboard', + 'screen, CRT screen', + 'screw', + 'screwdriver', + 'seat belt, seatbelt', + 'sewing machine', + 'shield, buckler', + 'shoe shop, shoe-shop, shoe store', + 'shoji', + 'shopping basket', + 'shopping cart', + 'shovel', + 'shower cap', + 'shower curtain', + 'ski', + 'ski mask', + 'sleeping bag', + 'slide rule, slipstick', + 'sliding door', + 'slot, one-armed bandit', + 'snorkel', + 'snowmobile', + 'snowplow, snowplough', + 'soap dispenser', + 'soccer ball', + 'sock', + 'solar dish, solar collector, solar furnace', + 'sombrero', + 'soup bowl', + 'space bar', + 'space heater', + 'space shuttle', + 'spatula', + 'speedboat', + "spider web, spider's web", + 'spindle', + 'sports car, sport car', + 'spotlight, spot', + 'stage', + 'steam locomotive', + 'steel arch bridge', + 'steel drum', + 'stethoscope', + 'stole', + 'stone wall', + 'stopwatch, stop watch', + 'stove', + 'strainer', + 'streetcar, tram, tramcar, trolley, trolley car', + 'stretcher', + 'studio couch, day bed', + 'stupa, tope', + 'submarine, pigboat, sub, U-boat', + 'suit, suit of clothes', + 'sundial', + 'sunglass', + 'sunglasses, dark glasses, shades', + 'sunscreen, sunblock, sun blocker', + 'suspension bridge', + 'swab, swob, mop', + 'sweatshirt', + 'swimming trunks, bathing trunks', + 'swing', + 'switch, electric switch, electrical switch', + 'syringe', + 'table lamp', + 'tank, army tank, armored combat vehicle, armoured combat vehicle', + 'tape player', + 'teapot', + 'teddy, teddy bear', + 'television, television system', + 'tennis ball', + 'thatch, thatched roof', + 'theater curtain, theatre curtain', + 'thimble', + 'thresher, thrasher, threshing machine', + 'throne', + 'tile roof', + 'toaster', + 'tobacco shop, tobacconist shop, tobacconist', + 'toilet seat', + 'torch', + 'totem pole', + 'tow truck, tow car, wrecker', + 'toyshop', + 'tractor', + 'trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi', # noqa: E501 + 'tray', + 'trench coat', + 'tricycle, trike, velocipede', + 'trimaran', + 'tripod', + 'triumphal arch', + 'trolleybus, trolley coach, trackless trolley', + 'trombone', + 'tub, vat', + 'turnstile', + 'typewriter keyboard', + 'umbrella', + 'unicycle, monocycle', + 'upright, upright piano', + 'vacuum, vacuum cleaner', + 'vase', + 'vault', + 'velvet', + 'vending machine', + 'vestment', + 'viaduct', + 'violin, fiddle', + 'volleyball', + 'waffle iron', + 'wall clock', + 'wallet, billfold, notecase, pocketbook', + 'wardrobe, closet, press', + 'warplane, military plane', + 'washbasin, handbasin, washbowl, lavabo, wash-hand basin', + 'washer, automatic washer, washing machine', + 'water bottle', + 'water jug', + 'water tower', + 'whiskey jug', + 'whistle', + 'wig', + 'window screen', + 'window shade', + 'Windsor tie', + 'wine bottle', + 'wing', + 'wok', + 'wooden spoon', + 'wool, woolen, woollen', + 'worm fence, snake fence, snake-rail fence, Virginia fence', + 'wreck', + 'yawl', + 'yurt', + 'web site, website, internet site, site', + 'comic book', + 'crossword puzzle, crossword', + 'street sign', + 'traffic light, traffic signal, stoplight', + 'book jacket, dust cover, dust jacket, dust wrapper', + 'menu', + 'plate', + 'guacamole', + 'consomme', + 'hot pot, hotpot', + 'trifle', + 'ice cream, icecream', + 'ice lolly, lolly, lollipop, popsicle', + 'French loaf', + 'bagel, beigel', + 'pretzel', + 'cheeseburger', + 'hotdog, hot dog, red hot', + 'mashed potato', + 'head cabbage', + 'broccoli', + 'cauliflower', + 'zucchini, courgette', + 'spaghetti squash', + 'acorn squash', + 'butternut squash', + 'cucumber, cuke', + 'artichoke, globe artichoke', + 'bell pepper', + 'cardoon', + 'mushroom', + 'Granny Smith', + 'strawberry', + 'orange', + 'lemon', + 'fig', + 'pineapple, ananas', + 'banana', + 'jackfruit, jak, jack', + 'custard apple', + 'pomegranate', + 'hay', + 'carbonara', + 'chocolate sauce, chocolate syrup', + 'dough', + 'meat loaf, meatloaf', + 'pizza, pizza pie', + 'potpie', + 'burrito', + 'red wine', + 'espresso', + 'cup', + 'eggnog', + 'alp', + 'bubble', + 'cliff, drop, drop-off', + 'coral reef', + 'geyser', + 'lakeside, lakeshore', + 'promontory, headland, head, foreland', + 'sandbar, sand bar', + 'seashore, coast, seacoast, sea-coast', + 'valley, vale', + 'volcano', + 'ballplayer, baseball player', + 'groom, bridegroom', + 'scuba diver', + 'rapeseed', + 'daisy', + "yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum", # noqa: E501 + 'corn', + 'acorn', + 'hip, rose hip, rosehip', + 'buckeye, horse chestnut, conker', + 'coral fungus', + 'agaric', + 'gyromitra', + 'stinkhorn, carrion fungus', + 'earthstar', + 'hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa', # noqa: E501 + 'bolete', + 'ear, spike, capitulum', + 'toilet tissue, toilet paper, bathroom tissue') + +CIFAR10_CATEGORIES = ('airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', + 'frog', 'horse', 'ship', 'truck') + +CIFAR100_CATEGORIES = ( + 'apple', 'aquarium_fish', 'baby', 'bear', 'beaver', 'bed', 'bee', 'beetle', + 'bicycle', 'bottle', 'bowl', 'boy', 'bridge', 'bus', 'butterfly', 'camel', + 'can', 'castle', 'caterpillar', 'cattle', 'chair', 'chimpanzee', 'clock', + 'cloud', 'cockroach', 'couch', 'crab', 'crocodile', 'cup', 'dinosaur', + 'dolphin', 'elephant', 'flatfish', 'forest', 'fox', 'girl', 'hamster', + 'house', 'kangaroo', 'keyboard', 'lamp', 'lawn_mower', 'leopard', 'lion', + 'lizard', 'lobster', 'man', 'maple_tree', 'motorcycle', 'mountain', + 'mouse', 'mushroom', 'oak_tree', 'orange', 'orchid', 'otter', 'palm_tree', + 'pear', 'pickup_truck', 'pine_tree', 'plain', 'plate', 'poppy', + 'porcupine', 'possum', 'rabbit', 'raccoon', 'ray', 'road', 'rocket', + 'rose', 'sea', 'seal', 'shark', 'shrew', 'skunk', 'skyscraper', 'snail', + 'snake', 'spider', 'squirrel', 'streetcar', 'sunflower', 'sweet_pepper', + 'table', 'tank', 'telephone', 'television', 'tiger', 'tractor', 'train', + 'trout', 'tulip', 'turtle', 'wardrobe', 'whale', 'willow_tree', 'wolf', + 'woman', 'worm') + +MNIST_CATEGORITES = ('0 - zero', '1 - one', '2 - two', '3 - three', '4 - four', + '5 - five', '6 - six', '7 - seven', '8 - eight', + '9 - nine') + +FASHIONMNIST_CATEGORITES = ('T-shirt/top', 'Trouser', 'Pullover', 'Dress', + 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', + 'Ankle boot') + +PLACES205_CATEGORIES = ( + 'abbey', 'airport_terminal', 'alley', 'amphitheater', 'amusement_park', + 'aquarium', 'aqueduct', 'arch', 'art_gallery', 'art_studio', + 'assembly_line', 'attic', 'auditorium', 'apartment_building/outdoor', + 'badlands', 'ballroom', 'bamboo_forest', 'banquet_hall', 'bar', + 'baseball_field', 'basement', 'basilica', 'bayou', 'beauty_salon', + 'bedroom', 'boardwalk', 'boat_deck', 'bookstore', 'botanical_garden', + 'bowling_alley', 'boxing_ring', 'bridge', 'building_facade', + 'bus_interior', 'butchers_shop', 'butte', 'bakery/shop', 'cafeteria', + 'campsite', 'candy_store', 'canyon', 'castle', 'cemetery', 'chalet', + 'classroom', 'closet', 'clothing_store', 'coast', 'cockpit', 'coffee_shop', + 'conference_center', 'conference_room', 'construction_site', 'corn_field', + 'corridor', 'cottage_garden', 'courthouse', 'courtyard', 'creek', + 'crevasse', 'crosswalk', 'cathedral/outdoor', 'church/outdoor', 'dam', + 'dining_room', 'dock', 'dorm_room', 'driveway', 'desert/sand', + 'desert/vegetation', 'dinette/home', 'doorway/outdoor', 'engine_room', + 'excavation', 'fairway', 'fire_escape', 'fire_station', 'food_court', + 'forest_path', 'forest_road', 'formal_garden', 'fountain', + 'field/cultivated', 'field/wild', 'galley', 'game_room', 'garbage_dump', + 'gas_station', 'gift_shop', 'golf_course', 'harbor', 'herb_garden', + 'highway', 'home_office', 'hospital', 'hospital_room', 'hot_spring', + 'hotel_room', 'hotel/outdoor', 'ice_cream_parlor', 'iceberg', 'igloo', + 'islet', 'ice_skating_rink/outdoor', 'inn/outdoor', 'jail_cell', 'kasbah', + 'kindergarden_classroom', 'kitchen', 'kitchenette', 'laundromat', + 'lighthouse', 'living_room', 'lobby', 'locker_room', 'mansion', 'marsh', + 'martial_arts_gym', 'mausoleum', 'medina', 'motel', 'mountain', + 'mountain_snowy', 'music_studio', 'market/outdoor', 'monastery/outdoor', + 'museum/indoor', 'nursery', 'ocean', 'office', 'office_building', + 'orchard', 'pagoda', 'palace', 'pantry', 'parking_lot', 'parlor', + 'pasture', 'patio', 'pavilion', 'phone_booth', 'picnic_area', 'playground', + 'plaza', 'pond', 'pulpit', 'racecourse', 'raft', 'railroad_track', + 'rainforest', 'reception', 'residential_neighborhood', 'restaurant', + 'restaurant_kitchen', 'restaurant_patio', 'rice_paddy', 'river', + 'rock_arch', 'rope_bridge', 'ruin', 'runway', 'sandbar', 'schoolhouse', + 'sea_cliff', 'shed', 'shoe_shop', 'shopfront', 'shower', 'ski_resort', + 'ski_slope', 'sky', 'skyscraper', 'slum', 'snowfield', 'staircase', + 'supermarket', 'swamp', 'stadium/baseball', 'stadium/football', + 'stage/indoor', 'subway_station/platform', 'swimming_pool/outdoor', + 'television_studio', 'topiary_garden', 'tower', 'train_railway', + 'tree_farm', 'trench', 'temple/east_asia', 'temple/south_asia', + 'track/outdoor', 'train_station/platform', 'underwater/coral_reef', + 'valley', 'vegetable_garden', 'veranda', 'viaduct', 'volcano', + 'waiting_room', 'water_tower', 'watering_hole', 'wheat_field', 'wind_farm', + 'windmill', 'yard') + +OxfordIIITPet_CATEGORIES = ( + 'Abyssinian', 'american_bulldog', 'american_pit_bull_terrier', + 'basset_hound', 'beagle', 'Bengal', 'Birman', 'Bombay', 'boxer', + 'British_Shorthair', 'chihuahua', 'Egyptian_Mau', 'english_cocker_spaniel', + 'english_setter', 'german_shorthaired', 'great_pyrenees', 'havanese', + 'japanese_chin', 'keeshond', 'leonberger', 'Maine_Coon', + 'miniature_pinscher', 'newfoundland', 'Persian', 'pomeranian', 'pug', + 'Ragdoll', 'Russian_Blue', 'saint_bernard', 'samoyed', 'scottish_terrier', + 'shiba_inu', 'Siamese', 'Sphynx', 'staffordshire_bull_terrier', + 'wheaten_terrier', 'yorkshire_terrier') + +DTD_CATEGORIES = ('banded', 'blotchy', 'braided', 'bubbly', 'bumpy', + 'chequered', 'cobwebbed', 'cracked', 'crosshatched', + 'crystalline', 'dotted', 'fibrous', 'flecked', 'freckled', + 'frilly', 'gauzy', 'grid', 'grooved', 'honeycombed', + 'interlaced', 'knitted', 'lacelike', 'lined', 'marbled', + 'matted', 'meshed', 'paisley', 'perforated', 'pitted', + 'pleated', 'polka-dotted', 'porous', 'potholed', 'scaly', + 'smeared', 'spiralled', 'sprinkled', 'stained', 'stratified', + 'striped', 'studded', 'swirly', 'veined', 'waffled', 'woven', + 'wrinkled', 'zigzagged') + +FGVCAIRCRAFT_CATEGORIES = ( + '707-320', '727-200', '737-200', '737-300', '737-400', '737-500', + '737-600', '737-700', '737-800', '737-900', '747-100', '747-200', + '747-300', '747-400', '757-200', '757-300', '767-200', '767-300', + '767-400', '777-200', '777-300', 'A300B4', 'A310', 'A318', 'A319', 'A320', + 'A321', 'A330-200', 'A330-300', 'A340-200', 'A340-300', 'A340-500', + 'A340-600', 'A380', 'ATR-42', 'ATR-72', 'An-12', 'BAE 146-200', + 'BAE 146-300', 'BAE-125', 'Beechcraft 1900', 'Boeing 717', 'C-130', 'C-47', + 'CRJ-200', 'CRJ-700', 'CRJ-900', 'Cessna 172', 'Cessna 208', 'Cessna 525', + 'Cessna 560', 'Challenger 600', 'DC-10', 'DC-3', 'DC-6', 'DC-8', 'DC-9-30', + 'DH-82', 'DHC-1', 'DHC-6', 'DHC-8-100', 'DHC-8-300', 'DR-400', + 'Dornier 328', 'E-170', 'E-190', 'E-195', 'EMB-120', 'ERJ 135', 'ERJ 145', + 'Embraer Legacy 600', 'Eurofighter Typhoon', 'F-16A/B', 'F/A-18', + 'Falcon 2000', 'Falcon 900', 'Fokker 100', 'Fokker 50', 'Fokker 70', + 'Global Express', 'Gulfstream IV', 'Gulfstream V', 'Hawk T1', 'Il-76', + 'L-1011', 'MD-11', 'MD-80', 'MD-87', 'MD-90', 'Metroliner', 'Model B200', + 'PA-28', 'SR-20', 'Saab 2000', 'Saab 340', 'Spitfire', 'Tornado', 'Tu-134', + 'Tu-154', 'Yak-42') + +STANFORDCARS_CATEGORIES = ( + 'AM General Hummer SUV 2000', 'Acura RL Sedan 2012', 'Acura TL Sedan 2012', + 'Acura TL Type-S 2008', 'Acura TSX Sedan 2012', + 'Acura Integra Type R 2001', 'Acura ZDX Hatchback 2012', + 'Aston Martin V8 Vantage Convertible 2012', + 'Aston Martin V8 Vantage Coupe 2012', + 'Aston Martin Virage Convertible 2012', 'Aston Martin Virage Coupe 2012', + 'Audi RS 4 Convertible 2008', 'Audi A5 Coupe 2012', 'Audi TTS Coupe 2012', + 'Audi R8 Coupe 2012', 'Audi V8 Sedan 1994', 'Audi 100 Sedan 1994', + 'Audi 100 Wagon 1994', 'Audi TT Hatchback 2011', 'Audi S6 Sedan 2011', + 'Audi S5 Convertible 2012', 'Audi S5 Coupe 2012', 'Audi S4 Sedan 2012', + 'Audi S4 Sedan 2007', 'Audi TT RS Coupe 2012', + 'BMW ActiveHybrid 5 Sedan 2012', 'BMW 1 Series Convertible 2012', + 'BMW 1 Series Coupe 2012', 'BMW 3 Series Sedan 2012', + 'BMW 3 Series Wagon 2012', 'BMW 6 Series Convertible 2007', + 'BMW X5 SUV 2007', 'BMW X6 SUV 2012', 'BMW M3 Coupe 2012', + 'BMW M5 Sedan 2010', 'BMW M6 Convertible 2010', 'BMW X3 SUV 2012', + 'BMW Z4 Convertible 2012', + 'Bentley Continental Supersports Conv. Convertible 2012', + 'Bentley Arnage Sedan 2009', 'Bentley Mulsanne Sedan 2011', + 'Bentley Continental GT Coupe 2012', 'Bentley Continental GT Coupe 2007', + 'Bentley Continental Flying Spur Sedan 2007', + 'Bugatti Veyron 16.4 Convertible 2009', 'Bugatti Veyron 16.4 Coupe 2009', + 'Buick Regal GS 2012', 'Buick Rainier SUV 2007', 'Buick Verano Sedan 2012', + 'Buick Enclave SUV 2012', 'Cadillac CTS-V Sedan 2012', + 'Cadillac SRX SUV 2012', 'Cadillac Escalade EXT Crew Cab 2007', + 'Chevrolet Silverado 1500 Hybrid Crew Cab 2012', + 'Chevrolet Corvette Convertible 2012', 'Chevrolet Corvette ZR1 2012', + 'Chevrolet Corvette Ron Fellows Edition Z06 2007', + 'Chevrolet Traverse SUV 2012', 'Chevrolet Camaro Convertible 2012', + 'Chevrolet HHR SS 2010', 'Chevrolet Impala Sedan 2007', + 'Chevrolet Tahoe Hybrid SUV 2012', 'Chevrolet Sonic Sedan 2012', + 'Chevrolet Express Cargo Van 2007', 'Chevrolet Avalanche Crew Cab 2012', + 'Chevrolet Cobalt SS 2010', 'Chevrolet Malibu Hybrid Sedan 2010', + 'Chevrolet TrailBlazer SS 2009', + 'Chevrolet Silverado 2500HD Regular Cab 2012', + 'Chevrolet Silverado 1500 Classic Extended Cab 2007', + 'Chevrolet Express Van 2007', 'Chevrolet Monte Carlo Coupe 2007', + 'Chevrolet Malibu Sedan 2007', + 'Chevrolet Silverado 1500 Extended Cab 2012', + 'Chevrolet Silverado 1500 Regular Cab 2012', 'Chrysler Aspen SUV 2009', + 'Chrysler Sebring Convertible 2010', + 'Chrysler Town and Country Minivan 2012', 'Chrysler 300 SRT-8 2010', + 'Chrysler Crossfire Convertible 2008', + 'Chrysler PT Cruiser Convertible 2008', 'Daewoo Nubira Wagon 2002', + 'Dodge Caliber Wagon 2012', 'Dodge Caliber Wagon 2007', + 'Dodge Caravan Minivan 1997', 'Dodge Ram Pickup 3500 Crew Cab 2010', + 'Dodge Ram Pickup 3500 Quad Cab 2009', 'Dodge Sprinter Cargo Van 2009', + 'Dodge Journey SUV 2012', 'Dodge Dakota Crew Cab 2010', + 'Dodge Dakota Club Cab 2007', 'Dodge Magnum Wagon 2008', + 'Dodge Challenger SRT8 2011', 'Dodge Durango SUV 2012', + 'Dodge Durango SUV 2007', 'Dodge Charger Sedan 2012', + 'Dodge Charger SRT-8 2009', 'Eagle Talon Hatchback 1998', + 'FIAT 500 Abarth 2012', 'FIAT 500 Convertible 2012', + 'Ferrari FF Coupe 2012', 'Ferrari California Convertible 2012', + 'Ferrari 458 Italia Convertible 2012', 'Ferrari 458 Italia Coupe 2012', + 'Fisker Karma Sedan 2012', 'Ford F-450 Super Duty Crew Cab 2012', + 'Ford Mustang Convertible 2007', 'Ford Freestar Minivan 2007', + 'Ford Expedition EL SUV 2009', 'Ford Edge SUV 2012', + 'Ford Ranger SuperCab 2011', 'Ford GT Coupe 2006', + 'Ford F-150 Regular Cab 2012', 'Ford F-150 Regular Cab 2007', + 'Ford Focus Sedan 2007', 'Ford E-Series Wagon Van 2012', + 'Ford Fiesta Sedan 2012', 'GMC Terrain SUV 2012', 'GMC Savana Van 2012', + 'GMC Yukon Hybrid SUV 2012', 'GMC Acadia SUV 2012', + 'GMC Canyon Extended Cab 2012', 'Geo Metro Convertible 1993', + 'HUMMER H3T Crew Cab 2010', 'HUMMER H2 SUT Crew Cab 2009', + 'Honda Odyssey Minivan 2012', 'Honda Odyssey Minivan 2007', + 'Honda Accord Coupe 2012', 'Honda Accord Sedan 2012', + 'Hyundai Veloster Hatchback 2012', 'Hyundai Santa Fe SUV 2012', + 'Hyundai Tucson SUV 2012', 'Hyundai Veracruz SUV 2012', + 'Hyundai Sonata Hybrid Sedan 2012', 'Hyundai Elantra Sedan 2007', + 'Hyundai Accent Sedan 2012', 'Hyundai Genesis Sedan 2012', + 'Hyundai Sonata Sedan 2012', 'Hyundai Elantra Touring Hatchback 2012', + 'Hyundai Azera Sedan 2012', 'Infiniti G Coupe IPL 2012', + 'Infiniti QX56 SUV 2011', 'Isuzu Ascender SUV 2008', 'Jaguar XK XKR 2012', + 'Jeep Patriot SUV 2012', 'Jeep Wrangler SUV 2012', 'Jeep Liberty SUV 2012', + 'Jeep Grand Cherokee SUV 2012', 'Jeep Compass SUV 2012', + 'Lamborghini Reventon Coupe 2008', 'Lamborghini Aventador Coupe 2012', + 'Lamborghini Gallardo LP 570-4 Superleggera 2012', + 'Lamborghini Diablo Coupe 2001', 'Land Rover Range Rover SUV 2012', + 'Land Rover LR2 SUV 2012', 'Lincoln Town Car Sedan 2011', + 'MINI Cooper Roadster Convertible 2012', + 'Maybach Landaulet Convertible 2012', 'Mazda Tribute SUV 2011', + 'McLaren MP4-12C Coupe 2012', 'Mercedes-Benz 300-Class Convertible 1993', + 'Mercedes-Benz C-Class Sedan 2012', 'Mercedes-Benz SL-Class Coupe 2009', + 'Mercedes-Benz E-Class Sedan 2012', 'Mercedes-Benz S-Class Sedan 2012', + 'Mercedes-Benz Sprinter Van 2012', 'Mitsubishi Lancer Sedan 2012', + 'Nissan Leaf Hatchback 2012', 'Nissan NV Passenger Van 2012', + 'Nissan Juke Hatchback 2012', 'Nissan 240SX Coupe 1998', + 'Plymouth Neon Coupe 1999', 'Porsche Panamera Sedan 2012', + 'Ram C/V Cargo Van Minivan 2012', + 'Rolls-Royce Phantom Drophead Coupe Convertible 2012', + 'Rolls-Royce Ghost Sedan 2012', 'Rolls-Royce Phantom Sedan 2012', + 'Scion xD Hatchback 2012', 'Spyker C8 Convertible 2009', + 'Spyker C8 Coupe 2009', 'Suzuki Aerio Sedan 2007', + 'Suzuki Kizashi Sedan 2012', 'Suzuki SX4 Hatchback 2012', + 'Suzuki SX4 Sedan 2012', 'Tesla Model S Sedan 2012', + 'Toyota Sequoia SUV 2012', 'Toyota Camry Sedan 2012', + 'Toyota Corolla Sedan 2012', 'Toyota 4Runner SUV 2012', + 'Volkswagen Golf Hatchback 2012', 'Volkswagen Golf Hatchback 1991', + 'Volkswagen Beetle Hatchback 2012', 'Volvo C30 Hatchback 2012', + 'Volvo 240 Sedan 1993', 'Volvo XC90 SUV 2007', + 'smart fortwo Convertible 2012') + +SUN397_CATEGORIES = ( + 'abbey', 'airplane_cabin', 'airport_terminal', 'alley', 'amphitheater', + 'amusement_arcade', 'amusement_park', 'anechoic_chamber', + 'apartment_building_outdoor', 'apse_indoor', 'aquarium', 'aqueduct', + 'arch', 'archive', 'arrival_gate_outdoor', 'art_gallery', 'art_school', + 'art_studio', 'assembly_line', 'athletic_field_outdoor', 'atrium_public', + 'attic', 'auditorium', 'auto_factory', 'badlands', + 'badminton_court_indoor', 'baggage_claim', 'bakery_shop', + 'balcony_exterior', 'balcony_interior', 'ball_pit', 'ballroom', + 'bamboo_forest', 'banquet_hall', 'bar', 'barn', 'barndoor', + 'baseball_field', 'basement', 'basilica', 'basketball_court_outdoor', + 'bathroom', 'batters_box', 'bayou', 'bazaar_indoor', 'bazaar_outdoor', + 'beach', 'beauty_salon', 'bedroom', 'berth', 'biology_laboratory', + 'bistro_indoor', 'boardwalk', 'boat_deck', 'boathouse', 'bookstore', + 'booth_indoor', 'botanical_garden', 'bow_window_indoor', + 'bow_window_outdoor', 'bowling_alley', 'boxing_ring', 'brewery_indoor', + 'bridge', 'building_facade', 'bullring', 'burial_chamber', 'bus_interior', + 'butchers_shop', 'butte', 'cabin_outdoor', 'cafeteria', 'campsite', + 'campus', 'canal_natural', 'canal_urban', 'candy_store', 'canyon', + 'car_interior_backseat', 'car_interior_frontseat', 'carrousel', + 'casino_indoor', 'castle', 'catacomb', 'cathedral_indoor', + 'cathedral_outdoor', 'cavern_indoor', 'cemetery', 'chalet', + 'cheese_factory', 'chemistry_lab', 'chicken_coop_indoor', + 'chicken_coop_outdoor', 'childs_room', 'church_indoor', 'church_outdoor', + 'classroom', 'clean_room', 'cliff', 'cloister_indoor', 'closet', + 'clothing_store', 'coast', 'cockpit', 'coffee_shop', 'computer_room', + 'conference_center', 'conference_room', 'construction_site', + 'control_room', 'control_tower_outdoor', 'corn_field', 'corral', + 'corridor', 'cottage_garden', 'courthouse', 'courtroom', 'courtyard', + 'covered_bridge_exterior', 'creek', 'crevasse', 'crosswalk', + 'cubicle_office', 'dam', 'delicatessen', 'dentists_office', 'desert_sand', + 'desert_vegetation', 'diner_indoor', 'diner_outdoor', 'dinette_home', + 'dinette_vehicle', 'dining_car', 'dining_room', 'discotheque', 'dock', + 'doorway_outdoor', 'dorm_room', 'driveway', 'driving_range_outdoor', + 'drugstore', 'electrical_substation', 'elevator_door', 'elevator_interior', + 'elevator_shaft', 'engine_room', 'escalator_indoor', 'excavation', + 'factory_indoor', 'fairway', 'fastfood_restaurant', 'field_cultivated', + 'field_wild', 'fire_escape', 'fire_station', 'firing_range_indoor', + 'fishpond', 'florist_shop_indoor', 'food_court', 'forest_broadleaf', + 'forest_needleleaf', 'forest_path', 'forest_road', 'formal_garden', + 'fountain', 'galley', 'game_room', 'garage_indoor', 'garbage_dump', + 'gas_station', 'gazebo_exterior', 'general_store_indoor', + 'general_store_outdoor', 'gift_shop', 'golf_course', 'greenhouse_indoor', + 'greenhouse_outdoor', 'gymnasium_indoor', 'hangar_indoor', + 'hangar_outdoor', 'harbor', 'hayfield', 'heliport', 'herb_garden', + 'highway', 'hill', 'home_office', 'hospital', 'hospital_room', + 'hot_spring', 'hot_tub_outdoor', 'hotel_outdoor', 'hotel_room', 'house', + 'hunting_lodge_outdoor', 'ice_cream_parlor', 'ice_floe', 'ice_shelf', + 'ice_skating_rink_indoor', 'ice_skating_rink_outdoor', 'iceberg', 'igloo', + 'industrial_area', 'inn_outdoor', 'islet', 'jacuzzi_indoor', 'jail_indoor', + 'jail_cell', 'jewelry_shop', 'kasbah', 'kennel_indoor', 'kennel_outdoor', + 'kindergarden_classroom', 'kitchen', 'kitchenette', 'labyrinth_outdoor', + 'lake_natural', 'landfill', 'landing_deck', 'laundromat', 'lecture_room', + 'library_indoor', 'library_outdoor', 'lido_deck_outdoor', 'lift_bridge', + 'lighthouse', 'limousine_interior', 'living_room', 'lobby', 'lock_chamber', + 'locker_room', 'mansion', 'manufactured_home', 'market_indoor', + 'market_outdoor', 'marsh', 'martial_arts_gym', 'mausoleum', 'medina', + 'moat_water', 'monastery_outdoor', 'mosque_indoor', 'mosque_outdoor', + 'motel', 'mountain', 'mountain_snowy', 'movie_theater_indoor', + 'museum_indoor', 'music_store', 'music_studio', + 'nuclear_power_plant_outdoor', 'nursery', 'oast_house', + 'observatory_outdoor', 'ocean', 'office', 'office_building', + 'oil_refinery_outdoor', 'oilrig', 'operating_room', 'orchard', + 'outhouse_outdoor', 'pagoda', 'palace', 'pantry', 'park', + 'parking_garage_indoor', 'parking_garage_outdoor', 'parking_lot', 'parlor', + 'pasture', 'patio', 'pavilion', 'pharmacy', 'phone_booth', + 'physics_laboratory', 'picnic_area', 'pilothouse_indoor', + 'planetarium_outdoor', 'playground', 'playroom', 'plaza', 'podium_indoor', + 'podium_outdoor', 'pond', 'poolroom_establishment', 'poolroom_home', + 'power_plant_outdoor', 'promenade_deck', 'pub_indoor', 'pulpit', + 'putting_green', 'racecourse', 'raceway', 'raft', 'railroad_track', + 'rainforest', 'reception', 'recreation_room', 'residential_neighborhood', + 'restaurant', 'restaurant_kitchen', 'restaurant_patio', 'rice_paddy', + 'riding_arena', 'river', 'rock_arch', 'rope_bridge', 'ruin', 'runway', + 'sandbar', 'sandbox', 'sauna', 'schoolhouse', 'sea_cliff', 'server_room', + 'shed', 'shoe_shop', 'shopfront', 'shopping_mall_indoor', 'shower', + 'skatepark', 'ski_lodge', 'ski_resort', 'ski_slope', 'sky', 'skyscraper', + 'slum', 'snowfield', 'squash_court', 'stable', 'stadium_baseball', + 'stadium_football', 'stage_indoor', 'staircase', 'street', + 'subway_interior', 'subway_station_platform', 'supermarket', 'sushi_bar', + 'swamp', 'swimming_pool_indoor', 'swimming_pool_outdoor', + 'synagogue_indoor', 'synagogue_outdoor', 'television_studio', + 'temple_east_asia', 'temple_south_asia', 'tennis_court_indoor', + 'tennis_court_outdoor', 'tent_outdoor', 'theater_indoor_procenium', + 'theater_indoor_seats', 'thriftshop', 'throne_room', 'ticket_booth', + 'toll_plaza', 'topiary_garden', 'tower', 'toyshop', 'track_outdoor', + 'train_railway', 'train_station_platform', 'tree_farm', 'tree_house', + 'trench', 'underwater_coral_reef', 'utility_room', 'valley', + 'van_interior', 'vegetable_garden', 'veranda', 'veterinarians_office', + 'viaduct', 'videostore', 'village', 'vineyard', 'volcano', + 'volleyball_court_indoor', 'volleyball_court_outdoor', 'waiting_room', + 'warehouse_indoor', 'water_tower', 'waterfall_block', 'waterfall_fan', + 'waterfall_plunge', 'watering_hole', 'wave', 'wet_bar', 'wheat_field', + 'wind_farm', 'windmill', 'wine_cellar_barrel_storage', + 'wine_cellar_bottle_storage', 'wrestling_ring_indoor', 'yard', + 'youth_hostel') + +CALTECH101_CATEGORIES = ( + 'BACKGROUND_Google', 'Faces', 'Faces_easy', 'Leopards', 'Motorbikes', + 'accordion', 'airplanes', 'anchor', 'ant', 'barrel', 'bass', 'beaver', + 'binocular', 'bonsai', 'brain', 'brontosaurus', 'buddha', 'butterfly', + 'camera', 'cannon', 'car_side', 'ceiling_fan', 'cellphone', 'chair', + 'chandelier', 'cougar_body', 'cougar_face', 'crab', 'crayfish', + 'crocodile', 'crocodile_head', 'cup', 'dalmatian', 'dollar_bill', + 'dolphin', 'dragonfly', 'electric_guitar', 'elephant', 'emu', 'euphonium', + 'ewer', 'ferry', 'flamingo', 'flamingo_head', 'garfield', 'gerenuk', + 'gramophone', 'grand_piano', 'hawksbill', 'headphone', 'hedgehog', + 'helicopter', 'ibis', 'inline_skate', 'joshua_tree', 'kangaroo', 'ketch', + 'lamp', 'laptop', 'llama', 'lobster', 'lotus', 'mandolin', 'mayfly', + 'menorah', 'metronome', 'minaret', 'nautilus', 'octopus', 'okapi', + 'pagoda', 'panda', 'pigeon', 'pizza', 'platypus', 'pyramid', 'revolver', + 'rhino', 'rooster', 'saxophone', 'schooner', 'scissors', 'scorpion', + 'sea_horse', 'snoopy', 'soccer_ball', 'stapler', 'starfish', 'stegosaurus', + 'stop_sign', 'strawberry', 'sunflower', 'tick', 'trilobite', 'umbrella', + 'watch', 'water_lilly', 'wheelchair', 'wild_cat', 'windsor_chair', + 'wrench', 'yin_yang') + +FOOD101_CATEGORIES = ( + 'apple_pie', 'baby_back_ribs', 'baklava', 'beef_carpaccio', 'beef_tartare', + 'beet_salad', 'beignets', 'bibimbap', 'bread_pudding', 'breakfast_burrito', + 'bruschetta', 'caesar_salad', 'cannoli', 'caprese_salad', 'carrot_cake', + 'ceviche', 'cheesecake', 'cheese_plate', 'chicken_curry', + 'chicken_quesadilla', 'chicken_wings', 'chocolate_cake', + 'chocolate_mousse', 'churros', 'clam_chowder', 'club_sandwich', + 'crab_cakes', 'creme_brulee', 'croque_madame', 'cup_cakes', 'deviled_eggs', + 'donuts', 'dumplings', 'edamame', 'eggs_benedict', 'escargots', 'falafel', + 'filet_mignon', 'fish_and_chips', 'foie_gras', 'french_fries', + 'french_onion_soup', 'french_toast', 'fried_calamari', 'fried_rice', + 'frozen_yogurt', 'garlic_bread', 'gnocchi', 'greek_salad', + 'grilled_cheese_sandwich', 'grilled_salmon', 'guacamole', 'gyoza', + 'hamburger', 'hot_and_sour_soup', 'hot_dog', 'huevos_rancheros', 'hummus', + 'ice_cream', 'lasagna', 'lobster_bisque', 'lobster_roll_sandwich', + 'macaroni_and_cheese', 'macarons', 'miso_soup', 'mussels', 'nachos', + 'omelette', 'onion_rings', 'oysters', 'pad_thai', 'paella', 'pancakes', + 'panna_cotta', 'peking_duck', 'pho', 'pizza', 'pork_chop', 'poutine', + 'prime_rib', 'pulled_pork_sandwich', 'ramen', 'ravioli', 'red_velvet_cake', + 'risotto', 'samosa', 'sashimi', 'scallops', 'seaweed_salad', + 'shrimp_and_grits', 'spaghetti_bolognese', 'spaghetti_carbonara', + 'spring_rolls', 'steak', 'strawberry_shortcake', 'sushi', 'tacos', + 'takoyaki', 'tiramisu', 'tuna_tartare', 'waffles') + +CIFAR100_CATEGORIES_CN = ( + '苹果', '水族馆鱼', '婴儿', '熊', '河狸', '床', '蜜蜂', '甲虫', '自行车', '瓶子', '碗', '小男孩', + '桥', '公共汽车', '蝴蝶', '骆驼', '易拉罐', '城堡', '毛毛虫', '牛', '椅子', '猩猩', '钟', '白云', + '蟑螂', '沙发', '螃蟹', '鳄鱼', '杯子', '恐龙', '海豚', '大象', '比目鱼', '森林', '狐狸', '小女孩', + '仓鼠', '屋子', '袋鼠', '键盘', '台灯', '割草机', '猎豹', '狮子', '蜥蜴', '龙虾', '男人', '枫树', + '摩托车', '山', '老鼠', '蘑菇', '橡树', '橙子橘子', '兰花', '水獭', '棕榈树', '梨', '皮卡车', '松树', + '田野', '盘子', '罂粟', '豪猪', '负鼠', '兔子', '浣熊', '鳐鱼', '公路', '火箭', '玫瑰', '大海', + '海豹', '鲨鱼', '尖嘴小鼠', '臭鼬', '摩天大楼', '蜗牛', '蛇', '蜘蛛', '松鼠', '电车', '向日葵', '甜椒', + '桌子', '坦克', '电话', '电视', '老虎', '拖拉机', '火车', '鳟鱼', '郁金香', '乌龟', '衣柜', '鲸鱼', + '柳树', '狼', '女人', '蠕虫') + +IMAGENET_SIMPLE_CATEGORIES = ( + 'tench', 'goldfish', 'great white shark', 'tiger shark', + 'hammerhead shark', 'electric ray', 'stingray', 'rooster', 'hen', + 'ostrich', 'brambling', 'goldfinch', 'house finch', 'junco', + 'indigo bunting', 'American robin', 'bulbul', 'jay', 'magpie', 'chickadee', + 'American dipper', 'kite (bird of prey)', 'bald eagle', 'vulture', + 'great grey owl', 'fire salamander', 'smooth newt', 'newt', + 'spotted salamander', 'axolotl', 'American bullfrog', 'tree frog', + 'tailed frog', 'loggerhead sea turtle', 'leatherback sea turtle', + 'mud turtle', 'terrapin', 'box turtle', 'banded gecko', 'green iguana', + 'Carolina anole', 'desert grassland whiptail lizard', 'agama', + 'frilled-necked lizard', 'alligator lizard', 'Gila monster', + 'European green lizard', 'chameleon', 'Komodo dragon', 'Nile crocodile', + 'American alligator', 'triceratops', 'worm snake', 'ring-necked snake', + 'eastern hog-nosed snake', 'smooth green snake', 'kingsnake', + 'garter snake', 'water snake', 'vine snake', 'night snake', + 'boa constrictor', 'African rock python', 'Indian cobra', 'green mamba', + 'sea snake', 'Saharan horned viper', 'eastern diamondback rattlesnake', + 'sidewinder rattlesnake', 'trilobite', 'harvestman', 'scorpion', + 'yellow garden spider', 'barn spider', 'European garden spider', + 'southern black widow', 'tarantula', 'wolf spider', 'tick', 'centipede', + 'black grouse', 'ptarmigan', 'ruffed grouse', 'prairie grouse', 'peafowl', + 'quail', 'partridge', 'african grey parrot', 'macaw', + 'sulphur-crested cockatoo', 'lorikeet', 'coucal', 'bee eater', 'hornbill', + 'hummingbird', 'jacamar', 'toucan', 'duck', 'red-breasted merganser', + 'goose', 'black swan', 'tusker', 'echidna', 'platypus', 'wallaby', 'koala', + 'wombat', 'jellyfish', 'sea anemone', 'brain coral', 'flatworm', + 'nematode', 'conch', 'snail', 'slug', 'sea slug', 'chiton', + 'chambered nautilus', 'Dungeness crab', 'rock crab', 'fiddler crab', + 'red king crab', 'American lobster', 'spiny lobster', 'crayfish', + 'hermit crab', 'isopod', 'white stork', 'black stork', 'spoonbill', + 'flamingo', 'little blue heron', 'great egret', 'bittern bird', + 'crane bird', 'limpkin', 'common gallinule', 'American coot', 'bustard', + 'ruddy turnstone', 'dunlin', 'common redshank', 'dowitcher', + 'oystercatcher', 'pelican', 'king penguin', 'albatross', 'grey whale', + 'killer whale', 'dugong', 'sea lion', 'Chihuahua', 'Japanese Chin', + 'Maltese', 'Pekingese', 'Shih Tzu', 'King Charles Spaniel', 'Papillon', + 'toy terrier', 'Rhodesian Ridgeback', 'Afghan Hound', 'Basset Hound', + 'Beagle', 'Bloodhound', 'Bluetick Coonhound', 'Black and Tan Coonhound', + 'Treeing Walker Coonhound', 'English foxhound', 'Redbone Coonhound', + 'borzoi', 'Irish Wolfhound', 'Italian Greyhound', 'Whippet', + 'Ibizan Hound', 'Norwegian Elkhound', 'Otterhound', 'Saluki', + 'Scottish Deerhound', 'Weimaraner', 'Staffordshire Bull Terrier', + 'American Staffordshire Terrier', 'Bedlington Terrier', 'Border Terrier', + 'Kerry Blue Terrier', 'Irish Terrier', 'Norfolk Terrier', + 'Norwich Terrier', 'Yorkshire Terrier', 'Wire Fox Terrier', + 'Lakeland Terrier', 'Sealyham Terrier', 'Airedale Terrier', + 'Cairn Terrier', 'Australian Terrier', 'Dandie Dinmont Terrier', + 'Boston Terrier', 'Miniature Schnauzer', 'Giant Schnauzer', + 'Standard Schnauzer', 'Scottish Terrier', 'Tibetan Terrier', + 'Australian Silky Terrier', 'Soft-coated Wheaten Terrier', + 'West Highland White Terrier', 'Lhasa Apso', 'Flat-Coated Retriever', + 'Curly-coated Retriever', 'Golden Retriever', 'Labrador Retriever', + 'Chesapeake Bay Retriever', 'German Shorthaired Pointer', 'Vizsla', + 'English Setter', 'Irish Setter', 'Gordon Setter', 'Brittany dog', + 'Clumber Spaniel', 'English Springer Spaniel', 'Welsh Springer Spaniel', + 'Cocker Spaniel', 'Sussex Spaniel', 'Irish Water Spaniel', 'Kuvasz', + 'Schipperke', 'Groenendael dog', 'Malinois', 'Briard', 'Australian Kelpie', + 'Komondor', 'Old English Sheepdog', 'Shetland Sheepdog', 'collie', + 'Border Collie', 'Bouvier des Flandres dog', 'Rottweiler', + 'German Shepherd Dog', 'Dobermann', 'Miniature Pinscher', + 'Greater Swiss Mountain Dog', 'Bernese Mountain Dog', + 'Appenzeller Sennenhund', 'Entlebucher Sennenhund', 'Boxer', 'Bullmastiff', + 'Tibetan Mastiff', 'French Bulldog', 'Great Dane', 'St. Bernard', 'husky', + 'Alaskan Malamute', 'Siberian Husky', 'Dalmatian', 'Affenpinscher', + 'Basenji', 'pug', 'Leonberger', 'Newfoundland dog', 'Great Pyrenees dog', + 'Samoyed', 'Pomeranian', 'Chow Chow', 'Keeshond', 'brussels griffon', + 'Pembroke Welsh Corgi', 'Cardigan Welsh Corgi', 'Toy Poodle', + 'Miniature Poodle', 'Standard Poodle', + 'Mexican hairless dog (xoloitzcuintli)', 'grey wolf', + 'Alaskan tundra wolf', 'red wolf or maned wolf', 'coyote', 'dingo', + 'dhole', 'African wild dog', 'hyena', 'red fox', 'kit fox', 'Arctic fox', + 'grey fox', 'tabby cat', 'tiger cat', 'Persian cat', 'Siamese cat', + 'Egyptian Mau', 'cougar', 'lynx', 'leopard', 'snow leopard', 'jaguar', + 'lion', 'tiger', 'cheetah', 'brown bear', 'American black bear', + 'polar bear', 'sloth bear', 'mongoose', 'meerkat', 'tiger beetle', + 'ladybug', 'ground beetle', 'longhorn beetle', 'leaf beetle', + 'dung beetle', 'rhinoceros beetle', 'weevil', 'fly', 'bee', 'ant', + 'grasshopper', 'cricket insect', 'stick insect', 'cockroach', + 'praying mantis', 'cicada', 'leafhopper', 'lacewing', 'dragonfly', + 'damselfly', 'red admiral butterfly', 'ringlet butterfly', + 'monarch butterfly', 'small white butterfly', 'sulphur butterfly', + 'gossamer-winged butterfly', 'starfish', 'sea urchin', 'sea cucumber', + 'cottontail rabbit', 'hare', 'Angora rabbit', 'hamster', 'porcupine', + 'fox squirrel', 'marmot', 'beaver', 'guinea pig', 'common sorrel horse', + 'zebra', 'pig', 'wild boar', 'warthog', 'hippopotamus', 'ox', + 'water buffalo', 'bison', 'ram (adult male sheep)', 'bighorn sheep', + 'Alpine ibex', 'hartebeest', 'impala (antelope)', 'gazelle', + 'arabian camel', 'llama', 'weasel', 'mink', 'European polecat', + 'black-footed ferret', 'otter', 'skunk', 'badger', 'armadillo', + 'three-toed sloth', 'orangutan', 'gorilla', 'chimpanzee', 'gibbon', + 'siamang', 'guenon', 'patas monkey', 'baboon', 'macaque', 'langur', + 'black-and-white colobus', 'proboscis monkey', 'marmoset', + 'white-headed capuchin', 'howler monkey', 'titi monkey', + "Geoffroy's spider monkey", 'common squirrel monkey', 'ring-tailed lemur', + 'indri', 'Asian elephant', 'African bush elephant', 'red panda', + 'giant panda', 'snoek fish', 'eel', 'silver salmon', 'rock beauty fish', + 'clownfish', 'sturgeon', 'gar fish', 'lionfish', 'pufferfish', 'abacus', + 'abaya', 'academic gown', 'accordion', 'acoustic guitar', + 'aircraft carrier', 'airliner', 'airship', 'altar', 'ambulance', + 'amphibious vehicle', 'analog clock', 'apiary', 'apron', 'trash can', + 'assault rifle', 'backpack', 'bakery', 'balance beam', 'balloon', + 'ballpoint pen', 'Band-Aid', 'banjo', 'baluster / handrail', 'barbell', + 'barber chair', 'barbershop', 'barn', 'barometer', 'barrel', 'wheelbarrow', + 'baseball', 'basketball', 'bassinet', 'bassoon', 'swimming cap', + 'bath towel', 'bathtub', 'station wagon', 'lighthouse', 'beaker', + 'military hat (bearskin or shako)', 'beer bottle', 'beer glass', + 'bell tower', 'baby bib', 'tandem bicycle', 'bikini', 'ring binder', + 'binoculars', 'birdhouse', 'boathouse', 'bobsleigh', 'bolo tie', + 'poke bonnet', 'bookcase', 'bookstore', 'bottle cap', 'hunting bow', + 'bow tie', 'brass memorial plaque', 'bra', 'breakwater', 'breastplate', + 'broom', 'bucket', 'buckle', 'bulletproof vest', 'high-speed train', + 'butcher shop', 'taxicab', 'cauldron', 'candle', 'cannon', 'canoe', + 'can opener', 'cardigan', 'car mirror', 'carousel', 'tool kit', + 'cardboard box / carton', 'car wheel', 'automated teller machine', + 'cassette', 'cassette player', 'castle', 'catamaran', 'CD player', 'cello', + 'mobile phone', 'chain', 'chain-link fence', 'chain mail', 'chainsaw', + 'storage chest', 'chiffonier', 'bell or wind chime', 'china cabinet', + 'Christmas stocking', 'church', 'movie theater', 'cleaver', + 'cliff dwelling', 'cloak', 'clogs', 'cocktail shaker', 'coffee mug', + 'coffeemaker', 'spiral or coil', 'combination lock', 'computer keyboard', + 'candy store', 'container ship', 'convertible', 'corkscrew', 'cornet', + 'cowboy boot', 'cowboy hat', 'cradle', 'construction crane', + 'crash helmet', 'crate', 'infant bed', 'Crock Pot', 'croquet ball', + 'crutch', 'cuirass', 'dam', 'desk', 'desktop computer', + 'rotary dial telephone', 'diaper', 'digital clock', 'digital watch', + 'dining table', 'dishcloth', 'dishwasher', 'disc brake', 'dock', + 'dog sled', 'dome', 'doormat', 'drilling rig', 'drum', 'drumstick', + 'dumbbell', 'Dutch oven', 'electric fan', 'electric guitar', + 'electric locomotive', 'entertainment center', 'envelope', + 'espresso machine', 'face powder', 'feather boa', 'filing cabinet', + 'fireboat', 'fire truck', 'fire screen', 'flagpole', 'flute', + 'folding chair', 'football helmet', 'forklift', 'fountain', 'fountain pen', + 'four-poster bed', 'freight car', 'French horn', 'frying pan', 'fur coat', + 'garbage truck', 'gas mask or respirator', 'gas pump', 'goblet', 'go-kart', + 'golf ball', 'golf cart', 'gondola', 'gong', 'gown', 'grand piano', + 'greenhouse', 'radiator grille', 'grocery store', 'guillotine', + 'hair clip', 'hair spray', 'half-track', 'hammer', 'hamper', 'hair dryer', + 'hand-held computer', 'handkerchief', 'hard disk drive', 'harmonica', + 'harp', 'combine harvester', 'hatchet', 'holster', 'home theater', + 'honeycomb', 'hook', 'hoop skirt', 'gymnastic horizontal bar', + 'horse-drawn vehicle', 'hourglass', 'iPod', 'clothes iron', + 'carved pumpkin', 'jeans', 'jeep', 'T-shirt', 'jigsaw puzzle', 'rickshaw', + 'joystick', 'kimono', 'knee pad', 'knot', 'lab coat', 'ladle', 'lampshade', + 'laptop computer', 'lawn mower', 'lens cap', 'letter opener', 'library', + 'lifeboat', 'lighter', 'limousine', 'ocean liner', 'lipstick', + 'slip-on shoe', 'lotion', 'music speaker', 'loupe magnifying glass', + 'sawmill', 'magnetic compass', 'messenger bag', 'mailbox', 'tights', + 'one-piece bathing suit', 'manhole cover', 'maraca', 'marimba', 'mask', + 'matchstick', 'maypole', 'maze', 'measuring cup', 'medicine cabinet', + 'megalith', 'microphone', 'microwave oven', 'military uniform', 'milk can', + 'minibus', 'miniskirt', 'minivan', 'missile', 'mitten', 'mixing bowl', + 'mobile home', 'ford model t', 'modem', 'monastery', 'monitor', 'moped', + 'mortar and pestle', 'graduation cap', 'mosque', 'mosquito net', 'vespa', + 'mountain bike', 'tent', 'computer mouse', 'mousetrap', 'moving van', + 'muzzle', 'metal nail', 'neck brace', 'necklace', 'baby pacifier', + 'notebook computer', 'obelisk', 'oboe', 'ocarina', 'odometer', + 'oil filter', 'pipe organ', 'oscilloscope', 'overskirt', 'bullock cart', + 'oxygen mask', 'product packet / packaging', 'paddle', 'paddle wheel', + 'padlock', 'paintbrush', 'pajamas', 'palace', 'pan flute', 'paper towel', + 'parachute', 'parallel bars', 'park bench', 'parking meter', + 'railroad car', 'patio', 'payphone', 'pedestal', 'pencil case', + 'pencil sharpener', 'perfume', 'Petri dish', 'photocopier', 'plectrum', + 'Pickelhaube', 'picket fence', 'pickup truck', 'pier', 'piggy bank', + 'pill bottle', 'pillow', 'ping-pong ball', 'pinwheel', 'pirate ship', + 'drink pitcher', 'block plane', 'planetarium', 'plastic bag', 'plate rack', + 'farm plow', 'plunger', 'Polaroid camera', 'pole', 'police van', 'poncho', + 'pool table', 'soda bottle', 'plant pot', "potter's wheel", 'power drill', + 'prayer rug', 'printer', 'prison', 'missile', 'projector', 'hockey puck', + 'punching bag', 'purse', 'quill', 'quilt', 'race car', 'racket', + 'radiator', 'radio', 'radio telescope', 'rain barrel', + 'recreational vehicle', 'fishing casting reel', 'reflex camera', + 'refrigerator', 'remote control', 'restaurant', 'revolver', 'rifle', + 'rocking chair', 'rotisserie', 'eraser', 'rugby ball', + 'ruler measuring stick', 'sneaker', 'safe', 'safety pin', 'salt shaker', + 'sandal', 'sarong', 'saxophone', 'scabbard', 'weighing scale', + 'school bus', 'schooner', 'scoreboard', 'CRT monitor', 'screw', + 'screwdriver', 'seat belt', 'sewing machine', 'shield', 'shoe store', + 'shoji screen / room divider', 'shopping basket', 'shopping cart', + 'shovel', 'shower cap', 'shower curtain', 'ski', 'balaclava ski mask', + 'sleeping bag', 'slide rule', 'sliding door', 'slot machine', 'snorkel', + 'snowmobile', 'snowplow', 'soap dispenser', 'soccer ball', 'sock', + 'solar thermal collector', 'sombrero', 'soup bowl', 'keyboard space bar', + 'space heater', 'space shuttle', 'spatula', 'motorboat', 'spider web', + 'spindle', 'sports car', 'spotlight', 'stage', 'steam locomotive', + 'through arch bridge', 'steel drum', 'stethoscope', 'scarf', 'stone wall', + 'stopwatch', 'stove', 'strainer', 'tram', 'stretcher', 'couch', 'stupa', + 'submarine', 'suit', 'sundial', 'sunglasses', 'sunglasses', 'sunscreen', + 'suspension bridge', 'mop', 'sweatshirt', 'swim trunks / shorts', 'swing', + 'electrical switch', 'syringe', 'table lamp', 'tank', 'tape player', + 'teapot', 'teddy bear', 'television', 'tennis ball', 'thatched roof', + 'front curtain', 'thimble', 'threshing machine', 'throne', 'tile roof', + 'toaster', 'tobacco shop', 'toilet seat', 'torch', 'totem pole', + 'tow truck', 'toy store', 'tractor', 'semi-trailer truck', 'tray', + 'trench coat', 'tricycle', 'trimaran', 'tripod', 'triumphal arch', + 'trolleybus', 'trombone', 'hot tub', 'turnstile', 'typewriter keyboard', + 'umbrella', 'unicycle', 'upright piano', 'vacuum cleaner', 'vase', + 'vaulted or arched ceiling', 'velvet fabric', 'vending machine', + 'vestment', 'viaduct', 'violin', 'volleyball', 'waffle iron', 'wall clock', + 'wallet', 'wardrobe', 'military aircraft', 'sink', 'washing machine', + 'water bottle', 'water jug', 'water tower', 'whiskey jug', 'whistle', + 'hair wig', 'window screen', 'window shade', 'Windsor tie', 'wine bottle', + 'airplane wing', 'wok', 'wooden spoon', 'wool', 'split-rail fence', + 'shipwreck', 'sailboat', 'yurt', 'website', 'comic book', 'crossword', + 'traffic or street sign', 'traffic light', 'dust jacket', 'menu', 'plate', + 'guacamole', 'consomme', 'hot pot', 'trifle', 'ice cream', 'popsicle', + 'baguette', 'bagel', 'pretzel', 'cheeseburger', 'hot dog', + 'mashed potatoes', 'cabbage', 'broccoli', 'cauliflower', 'zucchini', + 'spaghetti squash', 'acorn squash', 'butternut squash', 'cucumber', + 'artichoke', 'bell pepper', 'cardoon', 'mushroom', 'Granny Smith apple', + 'strawberry', 'orange', 'lemon', 'fig', 'pineapple', 'banana', 'jackfruit', + 'cherimoya (custard apple)', 'pomegranate', 'hay', 'carbonara', + 'chocolate syrup', 'dough', 'meatloaf', 'pizza', 'pot pie', 'burrito', + 'red wine', 'espresso', 'tea cup', 'eggnog', 'mountain', 'bubble', 'cliff', + 'coral reef', 'geyser', 'lakeshore', 'promontory', 'sandbar', 'beach', + 'valley', 'volcano', 'baseball player', 'bridegroom', 'scuba diver', + 'rapeseed', 'daisy', "yellow lady's slipper", 'corn', 'acorn', 'rose hip', + 'horse chestnut seed', 'coral fungus', 'agaric', 'gyromitra', + 'stinkhorn mushroom', 'earth star fungus', 'hen of the woods mushroom', + 'bolete', 'corn cob', 'toilet paper') diff --git a/mmpretrain/datasets/cifar.py b/mmpretrain/datasets/cifar.py new file mode 100644 index 0000000..2a011da --- /dev/null +++ b/mmpretrain/datasets/cifar.py @@ -0,0 +1,210 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pickle +from typing import List, Optional + +import mmengine.dist as dist +import numpy as np +from mmengine.fileio import (LocalBackend, exists, get, get_file_backend, + join_path) +from mmengine.logging import MMLogger + +from mmpretrain.registry import DATASETS +from .base_dataset import BaseDataset +from .categories import CIFAR10_CATEGORIES, CIFAR100_CATEGORIES +from .utils import check_md5, download_and_extract_archive + + +@DATASETS.register_module() +class CIFAR10(BaseDataset): + """`CIFAR10 `_ Dataset. + + This implementation is modified from + https://github.com/pytorch/vision/blob/master/torchvision/datasets/cifar.py + + Args: + data_root (str): The root directory of the CIFAR Dataset. + split (str, optional): The dataset split, supports "train" and "test". + Default to "train". + metainfo (dict, optional): Meta information for dataset, such as + categories information. Defaults to None. + download (bool): Whether to download the dataset if not exists. + Defaults to True. + **kwargs: Other keyword arguments in :class:`BaseDataset`. + """ # noqa: E501 + + base_folder = 'cifar-10-batches-py' + url = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz' + filename = 'cifar-10-python.tar.gz' + tgz_md5 = 'c58f30108f718f92721af3b95e74349a' + train_list = [ + ['data_batch_1', 'c99cafc152244af753f735de768cd75f'], + ['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'], + ['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'], + ['data_batch_4', '634d18415352ddfa80567beed471001a'], + ['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'], + ] + + test_list = [ + ['test_batch', '40351d587109b95175f43aff81a1287e'], + ] + meta = { + 'filename': 'batches.meta', + 'key': 'label_names', + 'md5': '5ff9c542aee3614f3951f8cda6e48888', + } + METAINFO = {'classes': CIFAR10_CATEGORIES} + + def __init__(self, + data_root: str = '', + split: str = 'train', + metainfo: Optional[dict] = None, + download: bool = True, + data_prefix: str = '', + test_mode: bool = False, + **kwargs): + + splits = ['train', 'test'] + assert split in splits, \ + f"The split must be one of {splits}, but get '{split}'" + self.split = split + + # To handle the BC-breaking + if split == 'train' and test_mode: + logger = MMLogger.get_current_instance() + logger.warning('split="train" but test_mode=True. ' + 'The training set will be used.') + + if not data_root and not data_prefix: + raise RuntimeError('Please set ``data_root`` to' + 'specify the dataset path') + + self.download = download + super().__init__( + # The CIFAR dataset doesn't need specify annotation file + ann_file='', + metainfo=metainfo, + data_root=data_root, + data_prefix=dict(root=data_prefix), + test_mode=test_mode, + **kwargs) + + def load_data_list(self): + """Load images and ground truth labels.""" + root = self.data_prefix['root'] + backend = get_file_backend(root, enable_singleton=True) + + if dist.is_main_process() and not self._check_integrity(): + if not isinstance(backend, LocalBackend): + raise RuntimeError(f'The dataset on {root} is not integrated, ' + f'please manually handle it.') + + if self.download: + download_and_extract_archive( + self.url, root, filename=self.filename, md5=self.tgz_md5) + else: + raise RuntimeError( + f'Cannot find {self.__class__.__name__} dataset in ' + f"{self.data_prefix['root']}, you can specify " + '`download=True` to download automatically.') + + dist.barrier() + assert self._check_integrity(), \ + 'Download failed or shared storage is unavailable. Please ' \ + f'download the dataset manually through {self.url}.' + + if self.split == 'train': + downloaded_list = self.train_list + else: + downloaded_list = self.test_list + + imgs = [] + gt_labels = [] + + # load the picked numpy arrays + for file_name, _ in downloaded_list: + file_path = join_path(root, self.base_folder, file_name) + entry = pickle.loads(get(file_path), encoding='latin1') + imgs.append(entry['data']) + if 'labels' in entry: + gt_labels.extend(entry['labels']) + else: + gt_labels.extend(entry['fine_labels']) + + imgs = np.vstack(imgs).reshape(-1, 3, 32, 32) + imgs = imgs.transpose((0, 2, 3, 1)) # convert to HWC + + if self.CLASSES is None: + # The metainfo in the file has the lowest priority, therefore + # we only need to load it if classes is not specified. + self._load_meta() + + data_list = [] + for img, gt_label in zip(imgs, gt_labels): + info = {'img': img, 'gt_label': int(gt_label)} + data_list.append(info) + return data_list + + def _load_meta(self): + """Load categories information from metafile.""" + root = self.data_prefix['root'] + + path = join_path(root, self.base_folder, self.meta['filename']) + md5 = self.meta.get('md5', None) + if not exists(path) or (md5 is not None and not check_md5(path, md5)): + raise RuntimeError( + 'Dataset metadata file not found or corrupted.' + + ' You can use `download=True` to download it') + data = pickle.loads(get(path), encoding='latin1') + self._metainfo.setdefault('classes', data[self.meta['key']]) + + def _check_integrity(self): + """Check the integrity of data files.""" + root = self.data_prefix['root'] + + for fentry in (self.train_list + self.test_list): + filename, md5 = fentry[0], fentry[1] + fpath = join_path(root, self.base_folder, filename) + if not exists(fpath): + return False + if md5 is not None and not check_md5(fpath, md5): + return False + return True + + def extra_repr(self) -> List[str]: + """The extra repr information of the dataset.""" + body = [f"Prefix of data: \t{self.data_prefix['root']}"] + return body + + +@DATASETS.register_module() +class CIFAR100(CIFAR10): + """`CIFAR100 `_ Dataset. + + Args: + data_root (str): The root directory of the CIFAR Dataset. + split (str, optional): The dataset split, supports "train" and "test". + Default to "train". + metainfo (dict, optional): Meta information for dataset, such as + categories information. Defaults to None. + download (bool): Whether to download the dataset if not exists. + Defaults to True. + **kwargs: Other keyword arguments in :class:`BaseDataset`. + """ + + base_folder = 'cifar-100-python' + url = 'https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz' + filename = 'cifar-100-python.tar.gz' + tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85' + train_list = [ + ['train', '16019d7e3df5f24257cddd939b257f8d'], + ] + + test_list = [ + ['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'], + ] + meta = { + 'filename': 'meta', + 'key': 'fine_label_names', + 'md5': '7973b15100ade9c7d40fb424638fde48', + } + METAINFO = {'classes': CIFAR100_CATEGORIES} diff --git a/mmpretrain/datasets/coco_caption.py b/mmpretrain/datasets/coco_caption.py new file mode 100644 index 0000000..541cda8 --- /dev/null +++ b/mmpretrain/datasets/coco_caption.py @@ -0,0 +1,42 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from pathlib import Path +from typing import List + +import mmengine +from mmengine.dataset import BaseDataset +from mmengine.fileio import get_file_backend + +from mmpretrain.registry import DATASETS + + +@DATASETS.register_module() +class COCOCaption(BaseDataset): + """COCO Caption dataset. + + Args: + data_root (str): The root directory for ``data_prefix`` and + ``ann_file``.. + ann_file (str): Annotation file path. + data_prefix (dict): Prefix for data field. Defaults to + ``dict(img_path='')``. + pipeline (Sequence): Processing pipeline. Defaults to an empty tuple. + **kwargs: Other keyword arguments in :class:`BaseDataset`. + """ + + def load_data_list(self) -> List[dict]: + """Load data list.""" + img_prefix = self.data_prefix['img_path'] + annotations = mmengine.load(self.ann_file) + file_backend = get_file_backend(img_prefix) + + data_list = [] + for ann in annotations: + data_info = { + 'image_id': Path(ann['image']).stem.split('_')[-1], + 'img_path': file_backend.join_path(img_prefix, ann['image']), + 'gt_caption': ann['caption'], + } + + data_list.append(data_info) + + return data_list diff --git a/mmpretrain/datasets/coco_retrieval.py b/mmpretrain/datasets/coco_retrieval.py new file mode 100644 index 0000000..be8a0bc --- /dev/null +++ b/mmpretrain/datasets/coco_retrieval.py @@ -0,0 +1,148 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import json +import os.path as osp +from collections import OrderedDict +from os import PathLike +from typing import List, Sequence, Union + +from mmengine import get_file_backend + +from mmpretrain.registry import DATASETS, TRANSFORMS +from .base_dataset import BaseDataset + + +def expanduser(data_prefix): + if isinstance(data_prefix, (str, PathLike)): + return osp.expanduser(data_prefix) + else: + return data_prefix + + +@DATASETS.register_module() +class COCORetrieval(BaseDataset): + """COCO Retrieval dataset. + + COCO (Common Objects in Context): The COCO dataset contains more than + 330K images,each of which has approximately 5 descriptive annotations. + This dataset was releasedin collaboration between Microsoft and Carnegie + Mellon University + + COCO_2014 dataset directory: :: + + COCO_2014 + ├── val2014 + ├── train2014 + ├── annotations + ├── instances_train2014.json + ├── instances_val2014.json + ├── person_keypoints_train2014.json + ├── person_keypoints_val2014.json + ├── captions_train2014.json + ├── captions_val2014.json + + Args: + ann_file (str): Annotation file path. + test_mode (bool): Whether dataset is used for evaluation. This will + decide the annotation format in data list annotations. + Defaults to False. + data_root (str): The root directory for ``data_prefix`` and + ``ann_file``. Defaults to ''. + data_prefix (str | dict): Prefix for training data. Defaults to ''. + pipeline (Sequence): Processing pipeline. Defaults to an empty tuple. + **kwargs: Other keyword arguments in :class:`BaseDataset`. + + Examples: + >>> from mmpretrain.datasets import COCORetrieval + >>> train_dataset=COCORetrieval(data_root='coco2014/') + >>> train_dataset + Dataset COCORetrieval + Number of samples: 414113 + Annotation file: /coco2014/annotations/captions_train2014.json + Prefix of images: /coco2014/ + >>> from mmpretrain.datasets import COCORetrieval + >>> val_dataset = COCORetrieval(data_root='coco2014/') + >>> val_dataset + Dataset COCORetrieval + Number of samples: 202654 + Annotation file: /coco2014/annotations/captions_val2014.json + Prefix of images: /coco2014/ + """ + + def __init__(self, + ann_file: str, + test_mode: bool = False, + data_prefix: Union[str, dict] = '', + data_root: str = '', + pipeline: Sequence = (), + **kwargs): + + if isinstance(data_prefix, str): + data_prefix = dict(img_path=expanduser(data_prefix)) + + ann_file = expanduser(ann_file) + transforms = [] + for transform in pipeline: + if isinstance(transform, dict): + transforms.append(TRANSFORMS.build(transform)) + else: + transforms.append(transform) + + super().__init__( + data_root=data_root, + data_prefix=data_prefix, + test_mode=test_mode, + pipeline=transforms, + ann_file=ann_file, + **kwargs, + ) + + def load_data_list(self) -> List[dict]: + """Load data list.""" + # get file backend + img_prefix = self.data_prefix['img_path'] + file_backend = get_file_backend(img_prefix) + + anno_info = json.load(open(self.ann_file, 'r')) + # mapping img_id to img filename + img_dict = OrderedDict() + for idx, img in enumerate(anno_info['images']): + if img['id'] not in img_dict: + img_rel_path = img['coco_url'].rsplit('/', 2)[-2:] + img_path = file_backend.join_path(img_prefix, *img_rel_path) + + # create new idx for image + img_dict[img['id']] = dict( + ori_id=img['id'], + image_id=idx, # will be used for evaluation + img_path=img_path, + text=[], + gt_text_id=[], + gt_image_id=[], + ) + + train_list = [] + for idx, anno in enumerate(anno_info['annotations']): + anno['text'] = anno.pop('caption') + anno['ori_id'] = anno.pop('id') + anno['text_id'] = idx # will be used for evaluation + # 1. prepare train data list item + train_data = anno.copy() + train_image = img_dict[train_data['image_id']] + train_data['img_path'] = train_image['img_path'] + train_data['image_ori_id'] = train_image['ori_id'] + train_data['image_id'] = train_image['image_id'] + train_data['is_matched'] = True + train_list.append(train_data) + # 2. prepare eval data list item based on img dict + img_dict[anno['image_id']]['gt_text_id'].append(anno['text_id']) + img_dict[anno['image_id']]['text'].append(anno['text']) + img_dict[anno['image_id']]['gt_image_id'].append( + train_image['image_id']) + + self.img_size = len(img_dict) + self.text_size = len(anno_info['annotations']) + + # return needed format data list + if self.test_mode: + return list(img_dict.values()) + return train_list diff --git a/mmpretrain/datasets/coco_vqa.py b/mmpretrain/datasets/coco_vqa.py new file mode 100644 index 0000000..85f4bdc --- /dev/null +++ b/mmpretrain/datasets/coco_vqa.py @@ -0,0 +1,114 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import re +from collections import Counter +from typing import List + +import mmengine +from mmengine.dataset import BaseDataset + +from mmpretrain.registry import DATASETS + + +@DATASETS.register_module() +class COCOVQA(BaseDataset): + """VQAv2 dataset. + + Args: + data_root (str): The root directory for ``data_prefix``, ``ann_file`` + and ``question_file``. + data_prefix (str): The directory of images. + question_file (str): Question file path. + ann_file (str, optional): Annotation file path for training and + validation. Defaults to an empty string. + **kwargs: Other keyword arguments in :class:`BaseDataset`. + """ + + def __init__(self, + data_root: str, + data_prefix: str, + question_file: str, + ann_file: str = '', + **kwarg): + self.question_file = question_file + super().__init__( + data_root=data_root, + data_prefix=dict(img_path=data_prefix), + ann_file=ann_file, + **kwarg, + ) + + def _join_prefix(self): + if not mmengine.is_abs(self.question_file) and self.question_file: + self.question_file = osp.join(self.data_root, self.question_file) + + return super()._join_prefix() + + def _create_image_index(self): + img_prefix = self.data_prefix['img_path'] + + files = mmengine.list_dir_or_file(img_prefix, list_dir=False) + image_index = {} + for file in files: + image_id = re.findall(r'\d{12}', file) + if len(image_id) > 0: + image_id = int(image_id[-1]) + image_index[image_id] = mmengine.join_path(img_prefix, file) + + return image_index + + def load_data_list(self) -> List[dict]: + """Load data list.""" + questions = mmengine.load(self.question_file)['questions'] + if self.ann_file: + annotations = mmengine.load(self.ann_file)['annotations'] + assert len(questions) == len(annotations) + else: + annotations = [None] * len(questions) + + # The original VQAv2 annotation file and question file includes + # only image id but no image file paths. + self.image_index = self._create_image_index() + + data_list = [] + for question, ann in zip(questions, annotations): + # question example + # { + # 'image_id': 262144, + # 'question': "Is the ball flying towards the batter?", + # 'question_id': 262144000 + # } + # + # ann example + # { + # 'question_type': "what are the", + # 'answer_type': "other", + # 'answers': [ + # {'answer': 'watching', + # 'answer_id': 1, + # 'answer_confidence': 'yes'}, + # ... + # ], + # 'image_id': 262148, + # 'question_id': 262148000, + # 'multiple_choice_answer': 'watching', + # 'answer_type': 'other', + # } + + data_info = question + data_info['img_path'] = self.image_index[question['image_id']] + + if ann is not None: + assert ann['question_id'] == question['question_id'] + + # add answer_weight & answer_count, delete duplicate answer + answers = [item['answer'] for item in ann.pop('answers')] + count = Counter(answers) + answer_weight = [i / len(answers) for i in count.values()] + data_info['gt_answer'] = list(count.keys()) + data_info['gt_answer_weight'] = answer_weight + data_info.update(ann) + + data_list.append(data_info) + + return data_list diff --git a/mmpretrain/datasets/cub.py b/mmpretrain/datasets/cub.py new file mode 100644 index 0000000..8db1262 --- /dev/null +++ b/mmpretrain/datasets/cub.py @@ -0,0 +1,142 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +from mmengine import get_file_backend, list_from_file +from mmengine.logging import MMLogger + +from mmpretrain.registry import DATASETS +from .base_dataset import BaseDataset +from .categories import CUB_CATEGORIES + + +@DATASETS.register_module() +class CUB(BaseDataset): + """The CUB-200-2011 Dataset. + + Support the `CUB-200-2011 `_ Dataset. + Comparing with the `CUB-200 `_ Dataset, + there are much more pictures in `CUB-200-2011`. After downloading and decompression, the dataset + directory structure is as follows. + + CUB dataset directory: :: + + CUB_200_2011 + ├── images + │ ├── class_x + │ │ ├── xx1.jpg + │ │ ├── xx2.jpg + │ │ └── ... + │ ├── class_y + │ │ ├── yy1.jpg + │ │ ├── yy2.jpg + │ │ └── ... + │ └── ... + ├── images.txt + ├── image_class_labels.txt + ├── train_test_split.txt + └── .... + + Args: + data_root (str): The root directory for CUB-200-2011 dataset. + split (str, optional): The dataset split, supports "train" and "test". + Default to "train". + + Examples: + >>> from mmpretrain.datasets import CUB + >>> train_dataset = CUB(data_root='data/CUB_200_2011', split='train') + >>> train_dataset + Dataset CUB + Number of samples: 5994 + Number of categories: 200 + Root of dataset: data/CUB_200_2011 + >>> test_dataset = CUB(data_root='data/CUB_200_2011', split='test') + >>> test_dataset + Dataset CUB + Number of samples: 5794 + Number of categories: 200 + Root of dataset: data/CUB_200_2011 + """ # noqa: E501 + + METAINFO = {'classes': CUB_CATEGORIES} + + def __init__(self, + data_root: str, + split: str = 'train', + test_mode: bool = False, + **kwargs): + + splits = ['train', 'test'] + assert split in splits, \ + f"The split must be one of {splits}, but get '{split}'" + self.split = split + + # To handle the BC-breaking + if split == 'train' and test_mode: + logger = MMLogger.get_current_instance() + logger.warning('split="train" but test_mode=True. ' + 'The training set will be used.') + + ann_file = 'images.txt' + data_prefix = 'images' + image_class_labels_file = 'image_class_labels.txt' + train_test_split_file = 'train_test_split.txt' + + self.backend = get_file_backend(data_root, enable_singleton=True) + self.image_class_labels_file = self.backend.join_path( + data_root, image_class_labels_file) + self.train_test_split_file = self.backend.join_path( + data_root, train_test_split_file) + super(CUB, self).__init__( + ann_file=ann_file, + data_root=data_root, + data_prefix=data_prefix, + test_mode=test_mode, + **kwargs) + + def _load_data_from_txt(self, filepath): + """load data from CUB txt file, the every line of the file is idx and a + data item.""" + pairs = list_from_file(filepath) + data_dict = dict() + for pair in pairs: + idx, data_item = pair.split() + # all the index starts from 1 in CUB files, + # here we need to '- 1' to let them start from 0. + data_dict[int(idx) - 1] = data_item + return data_dict + + def load_data_list(self): + """Load images and ground truth labels.""" + sample_dict = self._load_data_from_txt(self.ann_file) + + label_dict = self._load_data_from_txt(self.image_class_labels_file) + + split_dict = self._load_data_from_txt(self.train_test_split_file) + + assert sample_dict.keys() == label_dict.keys() == split_dict.keys(),\ + f'sample_ids should be same in files {self.ann_file}, ' \ + f'{self.image_class_labels_file} and {self.train_test_split_file}' + + data_list = [] + for sample_id in sample_dict.keys(): + if split_dict[sample_id] == '1' and self.split == 'test': + # skip train samples when split='test' + continue + elif split_dict[sample_id] == '0' and self.split == 'train': + # skip test samples when split='train' + continue + + img_path = self.backend.join_path(self.img_prefix, + sample_dict[sample_id]) + gt_label = int(label_dict[sample_id]) - 1 + info = dict(img_path=img_path, gt_label=gt_label) + data_list.append(info) + + return data_list + + def extra_repr(self) -> List[str]: + """The extra repr information of the dataset.""" + body = [ + f'Root of dataset: \t{self.data_root}', + ] + return body diff --git a/mmpretrain/datasets/custom.py b/mmpretrain/datasets/custom.py new file mode 100644 index 0000000..bb491ff --- /dev/null +++ b/mmpretrain/datasets/custom.py @@ -0,0 +1,287 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union + +from mmengine.fileio import (BaseStorageBackend, get_file_backend, + list_from_file) +from mmengine.logging import MMLogger + +from mmpretrain.registry import DATASETS +from .base_dataset import BaseDataset + + +def find_folders( + root: str, + backend: Optional[BaseStorageBackend] = None +) -> Tuple[List[str], Dict[str, int]]: + """Find classes by folders under a root. + + Args: + root (string): root directory of folders + backend (BaseStorageBackend | None): The file backend of the root. + If None, auto infer backend from the root path. Defaults to None. + + Returns: + Tuple[List[str], Dict[str, int]]: + + - folders: The name of sub folders under the root. + - folder_to_idx: The map from folder name to class idx. + """ + # Pre-build file backend to prevent verbose file backend inference. + backend = backend or get_file_backend(root, enable_singleton=True) + folders = list( + backend.list_dir_or_file( + root, + list_dir=True, + list_file=False, + recursive=False, + )) + folders.sort() + folder_to_idx = {folders[i]: i for i in range(len(folders))} + return folders, folder_to_idx + + +def get_samples( + root: str, + folder_to_idx: Dict[str, int], + is_valid_file: Callable, + backend: Optional[BaseStorageBackend] = None, +): + """Make dataset by walking all images under a root. + + Args: + root (string): root directory of folders + folder_to_idx (dict): the map from class name to class idx + is_valid_file (Callable): A function that takes path of a file + and check if the file is a valid sample file. + backend (BaseStorageBackend | None): The file backend of the root. + If None, auto infer backend from the root path. Defaults to None. + + Returns: + Tuple[list, set]: + + - samples: a list of tuple where each element is (image, class_idx) + - empty_folders: The folders don't have any valid files. + """ + samples = [] + available_classes = set() + # Pre-build file backend to prevent verbose file backend inference. + backend = backend or get_file_backend(root, enable_singleton=True) + + if folder_to_idx is not None: + for folder_name in sorted(list(folder_to_idx.keys())): + _dir = backend.join_path(root, folder_name) + files = backend.list_dir_or_file( + _dir, + list_dir=False, + list_file=True, + recursive=True, + ) + for file in sorted(list(files)): + if is_valid_file(file): + path = backend.join_path(folder_name, file) + item = (path, folder_to_idx[folder_name]) + samples.append(item) + available_classes.add(folder_name) + empty_folders = set(folder_to_idx.keys()) - available_classes + else: + files = backend.list_dir_or_file( + root, + list_dir=False, + list_file=True, + recursive=True, + ) + samples = [file for file in sorted(list(files)) if is_valid_file(file)] + empty_folders = None + + return samples, empty_folders + + +@DATASETS.register_module() +class CustomDataset(BaseDataset): + """A generic dataset for multiple tasks. + + The dataset supports two kinds of style. + + 1. Use an annotation file to specify all samples, and each line indicates a + sample: + + The annotation file (for ``with_label=True``, supervised tasks.): :: + + folder_1/xxx.png 0 + folder_1/xxy.png 1 + 123.png 4 + nsdf3.png 3 + ... + + The annotation file (for ``with_label=False``, unsupervised tasks.): :: + + folder_1/xxx.png + folder_1/xxy.png + 123.png + nsdf3.png + ... + + Sample files: :: + + data_prefix/ + ├── folder_1 + │ ├── xxx.png + │ ├── xxy.png + │ └── ... + ├── 123.png + ├── nsdf3.png + └── ... + + Please use the argument ``metainfo`` to specify extra information for + the task, like ``{'classes': ('bird', 'cat', 'deer', 'dog', 'frog')}``. + + 2. Place all samples in one folder as below: + + Sample files (for ``with_label=True``, supervised tasks, we use the name + of sub-folders as the categories names): :: + + data_prefix/ + ├── class_x + │ ├── xxx.png + │ ├── xxy.png + │ └── ... + │ └── xxz.png + └── class_y + ├── 123.png + ├── nsdf3.png + ├── ... + └── asd932_.png + + Sample files (for ``with_label=False``, unsupervised tasks, we use all + sample files under the specified folder): :: + + data_prefix/ + ├── folder_1 + │ ├── xxx.png + │ ├── xxy.png + │ └── ... + ├── 123.png + ├── nsdf3.png + └── ... + + If the ``ann_file`` is specified, the dataset will be generated by the + first way, otherwise, try the second way. + + Args: + data_root (str): The root directory for ``data_prefix`` and + ``ann_file``. Defaults to ''. + data_prefix (str | dict): Prefix for the data. Defaults to ''. + ann_file (str): Annotation file path. Defaults to ''. + with_label (bool): Whether the annotation file includes ground truth + labels, or use sub-folders to specify categories. + Defaults to True. + extensions (Sequence[str]): A sequence of allowed extensions. Defaults + to ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif'). + metainfo (dict, optional): Meta information for dataset, such as class + information. Defaults to None. + lazy_init (bool): Whether to load annotation during instantiation. + In some cases, such as visualization, only the meta information of + the dataset is needed, which is not necessary to load annotation + file. ``Basedataset`` can skip load annotations to save time by set + ``lazy_init=False``. Defaults to False. + **kwargs: Other keyword arguments in :class:`BaseDataset`. + """ + + def __init__(self, + data_root: str = '', + data_prefix: Union[str, dict] = '', + ann_file: str = '', + with_label=True, + extensions: Sequence[str] = ('.jpg', '.jpeg', '.png', '.ppm', + '.bmp', '.pgm', '.tif'), + metainfo: Optional[dict] = None, + lazy_init: bool = False, + **kwargs): + assert (ann_file or data_prefix or data_root), \ + 'One of `ann_file`, `data_root` and `data_prefix` must '\ + 'be specified.' + + self.extensions = tuple(set([i.lower() for i in extensions])) + self.with_label = with_label + + super().__init__( + # The base class requires string ann_file but this class doesn't + ann_file=ann_file, + metainfo=metainfo, + data_root=data_root, + data_prefix=data_prefix, + # Force to lazy_init for some modification before loading data. + lazy_init=True, + **kwargs) + + # Full initialize the dataset. + if not lazy_init: + self.full_init() + + def _find_samples(self): + """find samples from ``data_prefix``.""" + if self.with_label: + classes, folder_to_idx = find_folders(self.img_prefix) + samples, empty_classes = get_samples( + self.img_prefix, + folder_to_idx, + is_valid_file=self.is_valid_file, + ) + + self.folder_to_idx = folder_to_idx + + if self.CLASSES is not None: + assert len(self.CLASSES) == len(classes), \ + f"The number of subfolders ({len(classes)}) doesn't " \ + f'match the number of specified classes ' \ + f'({len(self.CLASSES)}). Please check the data folder.' + else: + self._metainfo['classes'] = tuple(classes) + else: + samples, empty_classes = get_samples( + self.img_prefix, + None, + is_valid_file=self.is_valid_file, + ) + + if len(samples) == 0: + raise RuntimeError( + f'Found 0 files in subfolders of: {self.data_prefix}. ' + f'Supported extensions are: {",".join(self.extensions)}') + + if empty_classes: + logger = MMLogger.get_current_instance() + logger.warning( + 'Found no valid file in the folder ' + f'{", ".join(empty_classes)}. ' + f"Supported extensions are: {', '.join(self.extensions)}") + + return samples + + def load_data_list(self): + """Load image paths and gt_labels.""" + if not self.ann_file: + samples = self._find_samples() + elif self.with_label: + lines = list_from_file(self.ann_file) + samples = [x.strip().rsplit(' ', 1) for x in lines] + else: + samples = list_from_file(self.ann_file) + + # Pre-build file backend to prevent verbose file backend inference. + backend = get_file_backend(self.img_prefix, enable_singleton=True) + data_list = [] + for sample in samples: + if self.with_label: + filename, gt_label = sample + img_path = backend.join_path(self.img_prefix, filename) + info = {'img_path': img_path, 'gt_label': int(gt_label)} + else: + img_path = backend.join_path(self.img_prefix, sample) + info = {'img_path': img_path} + data_list.append(info) + return data_list + + def is_valid_file(self, filename: str) -> bool: + """Check if a file is a valid sample.""" + return filename.lower().endswith(self.extensions) diff --git a/mmpretrain/datasets/dataset_wrappers.py b/mmpretrain/datasets/dataset_wrappers.py new file mode 100644 index 0000000..1adff10 --- /dev/null +++ b/mmpretrain/datasets/dataset_wrappers.py @@ -0,0 +1,176 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import numpy as np +from mmengine.dataset import BaseDataset, force_full_init + +from mmpretrain.registry import DATASETS + + +@DATASETS.register_module() +class KFoldDataset: + """A wrapper of dataset for K-Fold cross-validation. + + K-Fold cross-validation divides all the samples in groups of samples, + called folds, of almost equal sizes. And we use k-1 of folds to do training + and use the fold left to do validation. + + Args: + dataset (:obj:`mmengine.dataset.BaseDataset` | dict): The dataset to be + divided + fold (int): The fold used to do validation. Defaults to 0. + num_splits (int): The number of all folds. Defaults to 5. + test_mode (bool): Use the training dataset or validation dataset. + Defaults to False. + seed (int, optional): The seed to shuffle the dataset before splitting. + If None, not shuffle the dataset. Defaults to None. + """ + + def __init__(self, + dataset, + fold=0, + num_splits=5, + test_mode=False, + seed=None): + if isinstance(dataset, dict): + self.dataset = DATASETS.build(dataset) + # Init the dataset wrapper lazily according to the dataset setting. + lazy_init = dataset.get('lazy_init', False) + elif isinstance(dataset, BaseDataset): + self.dataset = dataset + else: + raise TypeError(f'Unsupported dataset type {type(dataset)}.') + + self._metainfo = getattr(self.dataset, 'metainfo', {}) + self.fold = fold + self.num_splits = num_splits + self.test_mode = test_mode + self.seed = seed + + self._fully_initialized = False + if not lazy_init: + self.full_init() + + @property + def metainfo(self) -> dict: + """Get the meta information of ``self.dataset``. + + Returns: + dict: Meta information of the dataset. + """ + # Prevent `self._metainfo` from being modified by outside. + return copy.deepcopy(self._metainfo) + + def full_init(self): + """fully initialize the dataset.""" + if self._fully_initialized: + return + + self.dataset.full_init() + ori_len = len(self.dataset) + indices = list(range(ori_len)) + if self.seed is not None: + rng = np.random.default_rng(self.seed) + rng.shuffle(indices) + + test_start = ori_len * self.fold // self.num_splits + test_end = ori_len * (self.fold + 1) // self.num_splits + if self.test_mode: + indices = indices[test_start:test_end] + else: + indices = indices[:test_start] + indices[test_end:] + + self._ori_indices = indices + self.dataset = self.dataset.get_subset(indices) + + self._fully_initialized = True + + @force_full_init + def _get_ori_dataset_idx(self, idx: int) -> int: + """Convert global idx to local index. + + Args: + idx (int): Global index of ``KFoldDataset``. + + Returns: + int: The original index in the whole dataset. + """ + return self._ori_indices[idx] + + @force_full_init + def get_data_info(self, idx: int) -> dict: + """Get annotation by index. + + Args: + idx (int): Global index of ``KFoldDataset``. + + Returns: + dict: The idx-th annotation of the datasets. + """ + return self.dataset.get_data_info(idx) + + @force_full_init + def __len__(self): + return len(self.dataset) + + @force_full_init + def __getitem__(self, idx): + return self.dataset[idx] + + @force_full_init + def get_cat_ids(self, idx): + return self.dataset.get_cat_ids(idx) + + @force_full_init + def get_gt_labels(self): + return self.dataset.get_gt_labels() + + @property + def CLASSES(self): + """Return all categories names.""" + return self._metainfo.get('classes', None) + + @property + def class_to_idx(self): + """Map mapping class name to class index. + + Returns: + dict: mapping from class name to class index. + """ + + return {cat: i for i, cat in enumerate(self.CLASSES)} + + def __repr__(self): + """Print the basic information of the dataset. + + Returns: + str: Formatted string. + """ + head = 'Dataset ' + self.__class__.__name__ + body = [] + type_ = 'test' if self.test_mode else 'training' + body.append(f'Type: \t{type_}') + body.append(f'Seed: \t{self.seed}') + + def ordinal(n): + # Copy from https://codegolf.stackexchange.com/a/74047 + suffix = 'tsnrhtdd'[(n // 10 % 10 != 1) * (n % 10 < 4) * n % 10::4] + return f'{n}{suffix}' + + body.append( + f'Fold: \t{ordinal(self.fold+1)} of {self.num_splits}-fold') + if self._fully_initialized: + body.append(f'Number of samples: \t{self.__len__()}') + else: + body.append("Haven't been initialized") + + if self.CLASSES is not None: + body.append(f'Number of categories: \t{len(self.CLASSES)}') + else: + body.append('The `CLASSES` meta info is not set.') + + body.append( + f'Original dataset type:\t{self.dataset.__class__.__name__}') + + lines = [head] + [' ' * 4 + line for line in body] + return '\n'.join(lines) diff --git a/mmpretrain/datasets/dtd.py b/mmpretrain/datasets/dtd.py new file mode 100644 index 0000000..034d0b1 --- /dev/null +++ b/mmpretrain/datasets/dtd.py @@ -0,0 +1,116 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +import mat4py +from mmengine import get_file_backend + +from mmpretrain.registry import DATASETS +from .base_dataset import BaseDataset +from .categories import DTD_CATEGORIES + + +@DATASETS.register_module() +class DTD(BaseDataset): + """The Describable Texture Dataset (DTD). + + Support the `Describable Texture Dataset `_ Dataset. + After downloading and decompression, the dataset directory structure is as follows. + + DTD dataset directory: :: + + dtd + ├── images + │ ├── banded + | | ├──banded_0002.jpg + | | ├──banded_0004.jpg + | | └── ... + │ └── ... + ├── imdb + │ └── imdb.mat + ├── labels + | | ├──labels_joint_anno.txt + | | ├──test1.txt + | | ├──test2.txt + | | └── ... + │ └── ... + └── .... + + Args: + data_root (str): The root directory for Describable Texture dataset. + split (str, optional): The dataset split, supports "train", + "val", "trainval", and "test". Default to "trainval". + + Examples: + >>> from mmpretrain.datasets import DTD + >>> train_dataset = DTD(data_root='data/dtd', split='trainval') + >>> train_dataset + Dataset DTD + Number of samples: 3760 + Number of categories: 47 + Root of dataset: data/dtd + >>> test_dataset = DTD(data_root='data/dtd', split='test') + >>> test_dataset + Dataset DTD + Number of samples: 1880 + Number of categories: 47 + Root of dataset: data/dtd + """ # noqa: E501 + + METAINFO = {'classes': DTD_CATEGORIES} + + def __init__(self, data_root: str, split: str = 'trainval', **kwargs): + + splits = ['train', 'val', 'trainval', 'test'] + assert split in splits, \ + f"The split must be one of {splits}, but get '{split}'" + self.split = split + + data_prefix = 'images' + test_mode = split == 'test' + + self.backend = get_file_backend(data_root, enable_singleton=True) + ann_file = self.backend.join_path('imdb', 'imdb.mat') + + super(DTD, self).__init__( + ann_file=ann_file, + data_root=data_root, + data_prefix=data_prefix, + test_mode=test_mode, + **kwargs) + + def load_data_list(self): + """Load images and ground truth labels.""" + + data = mat4py.loadmat(self.ann_file)['images'] + names = data['name'] + labels = data['class'] + parts = data['set'] + num = len(names) + assert num == len(labels) == len(parts), 'get error ann file' + + if self.split == 'train': + target_set = {1} + elif self.split == 'val': + target_set = {2} + elif self.split == 'test': + target_set = {3} + else: + target_set = {1, 2} + + data_list = [] + for i in range(num): + if parts[i] in target_set: + img_name = names[i] + img_path = self.backend.join_path(self.img_prefix, img_name) + gt_label = labels[i] - 1 + info = dict(img_path=img_path, gt_label=gt_label) + data_list.append(info) + + return data_list + + def extra_repr(self) -> List[str]: + """The extra repr information of the dataset.""" + body = [ + f'Root of dataset: \t{self.data_root}', + ] + return body diff --git a/mmpretrain/datasets/fgvcaircraft.py b/mmpretrain/datasets/fgvcaircraft.py new file mode 100644 index 0000000..696992c --- /dev/null +++ b/mmpretrain/datasets/fgvcaircraft.py @@ -0,0 +1,98 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +from mmengine import get_file_backend, list_from_file + +from mmpretrain.registry import DATASETS +from .base_dataset import BaseDataset +from .categories import FGVCAIRCRAFT_CATEGORIES + + +@DATASETS.register_module() +class FGVCAircraft(BaseDataset): + """The FGVC_Aircraft Dataset. + + Support the `FGVC_Aircraft Dataset `_ Dataset. + After downloading and decompression, the dataset directory structure is as follows. + + FGVC_Aircraft dataset directory: :: + + fgvc-aircraft-2013b + └── data + ├── images + │ ├── 1.jpg + │ ├── 2.jpg + │ └── ... + ├── images_variant_train.txt + ├── images_variant_test.txt + ├── images_variant_trainval.txt + ├── images_variant_val.txt + ├── variants.txt + └── .... + + Args: + data_root (str): The root directory for FGVC_Aircraft dataset. + split (str, optional): The dataset split, supports "train", + "val", "trainval", and "test". Default to "trainval". + + Examples: + >>> from mmpretrain.datasets import FGVCAircraft + >>> train_dataset = FGVCAircraft(data_root='data/fgvc-aircraft-2013b', split='trainval') + >>> train_dataset + Dataset FGVCAircraft + Number of samples: 6667 + Number of categories: 100 + Root of dataset: data/fgvc-aircraft-2013b + >>> test_dataset = FGVCAircraft(data_root='data/fgvc-aircraft-2013b', split='test') + >>> test_dataset + Dataset FGVCAircraft + Number of samples: 3333 + Number of categories: 100 + Root of dataset: data/fgvc-aircraft-2013b + """ # noqa: E501 + + METAINFO = {'classes': FGVCAIRCRAFT_CATEGORIES} + + def __init__(self, data_root: str, split: str = 'trainval', **kwargs): + + splits = ['train', 'val', 'trainval', 'test'] + assert split in splits, \ + f"The split must be one of {splits}, but get '{split}'" + self.split = split + + self.backend = get_file_backend(data_root, enable_singleton=True) + ann_file = self.backend.join_path('data', + f'images_variant_{split}.txt') + data_prefix = self.backend.join_path('data', 'images') + test_mode = split == 'test' + + super(FGVCAircraft, self).__init__( + ann_file=ann_file, + data_root=data_root, + test_mode=test_mode, + data_prefix=data_prefix, + **kwargs) + + def load_data_list(self): + """Load images and ground truth labels.""" + + pairs = list_from_file(self.ann_file) + data_list = [] + for pair in pairs: + pair = pair.split() + img_name = pair[0] + class_name = ' '.join(pair[1:]) + img_name = f'{img_name}.jpg' + img_path = self.backend.join_path(self.img_prefix, img_name) + gt_label = self.METAINFO['classes'].index(class_name) + info = dict(img_path=img_path, gt_label=gt_label) + data_list.append(info) + + return data_list + + def extra_repr(self) -> List[str]: + """The extra repr information of the dataset.""" + body = [ + f'Root of dataset: \t{self.data_root}', + ] + return body diff --git a/mmpretrain/datasets/flamingo.py b/mmpretrain/datasets/flamingo.py new file mode 100644 index 0000000..3b5745a --- /dev/null +++ b/mmpretrain/datasets/flamingo.py @@ -0,0 +1,295 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import random +from abc import abstractmethod +from collections import Counter +from typing import List + +import mmengine +import numpy as np +from mmengine.dataset import BaseDataset +from pycocotools.coco import COCO + +from mmpretrain.registry import DATASETS +from .coco_vqa import COCOVQA + + +class FlamingoFewShotMixin: + """Flamingo fewshot eval dataset minin. + + Args: + num_shots (int): Number of shots to perform evaluation. + Defaults to 0. + Note: 0 does not mean a strict zero-shot in Flamingo setting. + It will use 2 only-text prompt without in context images. + num_support_examples (int): Number of support examples to get the + few shots from. Defaults to 2048. + num_query_examples (int): Number of query examples to perform the + final evaluation. Defaults to 5000. + incontext_prompt_temp (str): In context prompt template for few shot + examples. Defaults to ''. + final_prompt_temp (str): Final query prompt template. Defaults to ''. + **kwargs: Other keyword arguments in :class:`BaseDataset`. + """ + + def __init__(self, + num_shots: int = 0, + num_support_examples: int = 2048, + num_query_examples: int = 5000, + incontext_prompt_temp: str = '', + final_prompt_temp: str = '', + **kwarg): + self.num_shots = num_shots + self.num_support_examples = num_support_examples + self.num_query_examples = num_query_examples + self.incontext_prompt_temp = incontext_prompt_temp + self.final_prompt_temp = final_prompt_temp + super().__init__(**kwarg) + + def get_subset_idx(self, total_num): + random_idx = np.random.choice( + total_num, + self.num_support_examples + self.num_query_examples, + replace=False) + + support_idx = random_idx[:self.num_support_examples] + query_idx = random_idx[self.num_support_examples:] + return support_idx, query_idx + + @abstractmethod + def parse_basic_anno(self, anno: dict) -> dict: + """Parse basic annotation for support and query set.""" + pass + + @abstractmethod + def parse_fewshot_anno(self, anno: dict, support_list: List) -> dict: + """Parse fewshot related annotation for query set with support list.""" + pass + + +@DATASETS.register_module() +class FlamingoEvalCOCOVQA(FlamingoFewShotMixin, COCOVQA): + """Flamingo few shot VQAv2 dataset. + + Args: + data_root (str): The root directory for ``data_prefix`` and + ``ann_file``. + ann_file (str): Annotation file path. + question_file (str): Question file path. + num_shots (int): Number of shots to perform evaluation. + Defaults to 0. + Note: 0 does not mean a strict zero-shot in Flamingo setting. + It will use 2 only-text prompt without in context images. + num_support_examples (int): Number of support examples to get the + few shots from. Defaults to 2048. + num_query_examples (int): Number of query examples to perform the + final evaluation. Defaults to 5000. + **kwargs: Other keyword arguments in :class:`BaseDataset`. + """ + + def __init__(self, + data_root: str, + question_file: str, + ann_file: str = '', + num_shots: int = 0, + num_support_examples: int = 2048, + num_query_examples: int = 5000, + **kwarg): + super().__init__( + data_root=data_root, + question_file=question_file, + ann_file=ann_file, + num_shots=num_shots, + num_support_examples=num_support_examples, + num_query_examples=num_query_examples, + **kwarg) + + def parse_basic_anno(self, ann: dict) -> dict: + """Parse basic annotation for support and query set. + + Args: + anno (dict): Annotation for single example. + + Return: + dict: Parsed annotation for single example. + """ + if ann is None: + return {} + + answers = [a['answer'] for a in ann['answers']] + count = Counter(answers) + answer_weight = [i / len(answers) for i in count.values()] + answer_info = { + 'gt_answer': list(count.keys()), + 'gt_answer_weight': answer_weight + } + return answer_info + + def parse_fewshot_anno(self, query: dict, support_list: List) -> dict: + """Parse fewshot related annotation for query set with support list. + + Args: + anno (dict): Annotation for single example. + support_list (List): List of support subset to subsample few shots. + + Return: + dict: Parsed annotation for single example. + """ + # prepare n shots examples + shots = random.sample(support_list, self.num_shots) + + # append image path for n shots + img_path = [shot['img_path'] for shot in shots] + img_path.append(query['img_path']) + query['img_path'] = img_path + + query['shots'] = [ + dict( + question=item['question'], + answer=item['gt_answer'][0], + ) for item in shots + ] + return query + + def load_data_list(self) -> List[dict]: + """Load data list.""" + questions = mmengine.load(self.question_file)['questions'] + if self.ann_file: + annotations = mmengine.load(self.ann_file)['annotations'] + assert len(questions) == len(annotations) + else: + annotations = [None] * len(questions) + if self.num_shots > 0: + raise ValueError('Unable to construct few-shot examples ' + 'since no annotation file.') + + # The original VQAv2 annotation file and question file includes + # only image id but no image file paths. + self.image_index = self._create_image_index() + + num_data = len(questions) + support_idx, query_idx = self.get_subset_idx(num_data) + + # prepare support subset + if self.num_shots > 0: + support_list = [] + for idx in support_idx: + question = questions[idx] + ann = annotations[idx] + support = {**question, **self.parse_basic_anno(ann)} + support['img_path'] = self.image_index[question['image_id']] + support_list.append(support) + + # prepare query subset + data_list = [] + for idx in query_idx: + question = questions[idx] + ann = annotations[idx] + data_info = {**question, **self.parse_basic_anno(ann)} + data_info['img_path'] = self.image_index[question['image_id']] + if self.num_shots > 0: + data_info = self.parse_fewshot_anno(data_info, support_list) + data_list.append(data_info) + + return data_list + + +@DATASETS.register_module() +class FlamingoEvalCOCOCaption(FlamingoFewShotMixin, BaseDataset): + """Flamingo few shot COCO Caption dataset. + + Args: + data_root (str): The root directory for ``data_prefix`` and + ``ann_file``. + ann_file (str): Annotation file path. + data_prefix (dict): Prefix for data field. Defaults to + ``dict(img_path='')``. + num_shots (int): Number of shots to perform evaluation. + Defaults to 0. + num_support_examples (int): Number of support examples to get the + few shots from. Defaults to 2048. + num_query_examples (int): Number of query examples to perform the + final evaluation. Defaults to 5000. + **kwargs: Other keyword arguments in :class:`BaseDataset`. + """ + + def __init__(self, + data_root: str, + ann_file: str, + num_shots: int = 0, + num_support_examples: int = 2048, + num_query_examples: int = 5000, + **kwarg): + super().__init__( + data_root=data_root, + ann_file=ann_file, + num_shots=num_shots, + num_support_examples=num_support_examples, + num_query_examples=num_query_examples, + **kwarg) + + def parse_basic_anno(self, ann: dict, coco: COCO) -> dict: + """Parse basic annotation for support and query set. + + Args: + anno (dict): Annotation for single example. + coco (COCO): The coco dataset. + + Return: + dict: Parsed annotation for single example. + """ + img_prefix = self.data_prefix['img_path'] + img = coco.imgs[ann['image_id']] + data_info = dict( + img_path=mmengine.join_path(img_prefix, img['file_name']), + gt_caption=ann['caption'], + image_id=ann['image_id'], + ) + return data_info + + def parse_fewshot_anno(self, query: dict, support_list: List) -> dict: + """Parse fewshot related annotation for query set with support list. + + Args: + query (dict): Annotation for single example. + support_list (List): List of support subset to subsample few shots. + coco (COCO): The coco dataset. + + Return: + dict: Parsed annotation for single example. + """ + # prepare n shots examples + shots = random.sample(support_list, self.num_shots) + + # append image path for n shots + img_path = [shot['img_path'] for shot in shots] + img_path.append(query['img_path']) + query['img_path'] = img_path + + query['shots'] = [dict(caption=item['gt_caption']) for item in shots] + return query + + def load_data_list(self) -> List[dict]: + """Load data list.""" + with mmengine.get_local_path(self.ann_file) as ann_file: + coco = COCO(ann_file) + + num_data = len(coco.anns) + support_idx, query_idx = self.get_subset_idx(num_data) + ann_ids = list(coco.anns) + + # prepare support subset + if self.num_shots > 0: + support_list = [] + for idx in support_idx: + support = self.parse_basic_anno(coco.anns[ann_ids[idx]], coco) + support_list.append(support) + + # prepare query subset + query_list = [] + for idx in query_idx: + data_info = self.parse_basic_anno(coco.anns[ann_ids[idx]], coco) + if self.num_shots > 0: + data_info = self.parse_fewshot_anno(data_info, support_list) + query_list.append(data_info) + + return query_list diff --git a/mmpretrain/datasets/flickr30k_caption.py b/mmpretrain/datasets/flickr30k_caption.py new file mode 100644 index 0000000..f0f6841 --- /dev/null +++ b/mmpretrain/datasets/flickr30k_caption.py @@ -0,0 +1,77 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +import mmengine +from mmengine.dataset import BaseDataset +from mmengine.fileio import get_file_backend + +from mmpretrain.registry import DATASETS + + +@DATASETS.register_module() +class Flickr30kCaption(BaseDataset): + """Flickr30k Caption dataset. To generate coco-style GT annotation for + evaluation, please refer to + tools/dataset_converters/convert_flickr30k_ann.py. + + Args: + data_root (str): The root directory for ``data_prefix``, ``ann_file`` + and ``question_file``. + data_prefix (str): The directory of images. + ann_file (str): Annotation file path for training and validation. + split (str): 'train', 'val' or 'test'. + **kwargs: Other keyword arguments in :class:`BaseDataset`. + """ + + def __init__(self, data_root: str, data_prefix: str, ann_file: str, + split: str, **kwarg): + + assert split in ['train', 'val', 'test'], \ + '`split` must be train, val or test' + self.split = split + super().__init__( + data_root=data_root, + data_prefix=dict(img_path=data_prefix), + ann_file=ann_file, + **kwarg, + ) + + def load_data_list(self) -> List[dict]: + """Load data list.""" + img_prefix = self.data_prefix['img_path'] + annotations = mmengine.load(self.ann_file) + file_backend = get_file_backend(img_prefix) + + data_list = [] + + for img in annotations['images']: + + # img_example={ + # "sentids": [0, 1, 2], + # "imgid": 0, + # "sentences": [ + # {"raw": "Two men in green shirts standing in a yard.", + # "imgid": 0, "sentid": 0}, + # {"raw": "A man in a blue shirt standing in a garden.", + # "imgid": 0, "sentid": 1}, + # {"raw": "Two friends enjoy time spent together.", + # "imgid": 0, "sentid": 2} + # ], + # "split": "train", + # "filename": "1000092795.jpg" + # }, + + if img['split'] != self.split: + continue + + for sentence in img['sentences']: + data_info = { + 'image_id': img['imgid'], + 'img_path': file_backend.join_path(img_prefix, + img['filename']), + 'gt_caption': sentence['raw'] + } + + data_list.append(data_info) + + return data_list diff --git a/mmpretrain/datasets/flickr30k_retrieval.py b/mmpretrain/datasets/flickr30k_retrieval.py new file mode 100644 index 0000000..9f43c15 --- /dev/null +++ b/mmpretrain/datasets/flickr30k_retrieval.py @@ -0,0 +1,110 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections import OrderedDict +from typing import List + +import mmengine +from mmengine import get_file_backend + +from mmpretrain.registry import DATASETS +from .base_dataset import BaseDataset + + +@DATASETS.register_module() +class Flickr30kRetrieval(BaseDataset): + """Flickr30k Retrieval dataset. + + Args: + data_root (str): The root directory for ``data_prefix``, ``ann_file`` + and ``question_file``. + data_prefix (str): The directory of images. + ann_file (str): Annotation file path for training and validation. + split (str): 'train', 'val' or 'test'. + **kwargs: Other keyword arguments in :class:`BaseDataset`. + """ + + def __init__(self, data_root: str, data_prefix: str, ann_file: str, + split: str, **kwarg): + + assert split in ['train', 'val', 'test'], \ + '`split` must be train, val or test' + self.split = split + super().__init__( + data_root=data_root, + data_prefix=dict(img_path=data_prefix), + ann_file=ann_file, + **kwarg, + ) + + def load_data_list(self) -> List[dict]: + """Load data list.""" + # get file backend + img_prefix = self.data_prefix['img_path'] + file_backend = get_file_backend(img_prefix) + + annotations = mmengine.load(self.ann_file) + + # mapping img_id to img filename + img_dict = OrderedDict() + img_idx = 0 + sentence_idx = 0 + train_list = [] + for img in annotations['images']: + + # img_example={ + # "sentids": [0, 1, 2], + # "imgid": 0, + # "sentences": [ + # {"raw": "Two men in green shirts standing in a yard.", + # "imgid": 0, "sentid": 0}, + # {"raw": "A man in a blue shirt standing in a garden.", + # "imgid": 0, "sentid": 1}, + # {"raw": "Two friends enjoy time spent together.", + # "imgid": 0, "sentid": 2} + # ], + # "split": "train", + # "filename": "1000092795.jpg" + # }, + + if img['split'] != self.split: + continue + + # create new idx for image + train_image = dict( + ori_id=img['imgid'], + image_id=img_idx, # used for evaluation + img_path=file_backend.join_path(img_prefix, img['filename']), + text=[], + gt_text_id=[], + gt_image_id=[], + ) + + for sentence in img['sentences']: + ann = {} + ann['text'] = sentence['raw'] + ann['ori_id'] = sentence['sentid'] + ann['text_id'] = sentence_idx # used for evaluation + + ann['image_ori_id'] = train_image['ori_id'] + ann['image_id'] = train_image['image_id'] + ann['img_path'] = train_image['img_path'] + ann['is_matched'] = True + + # 1. prepare train data list item + train_list.append(ann) + # 2. prepare eval data list item based on img dict + train_image['text'].append(ann['text']) + train_image['gt_text_id'].append(ann['text_id']) + train_image['gt_image_id'].append(ann['image_id']) + + sentence_idx += 1 + + img_dict[img['imgid']] = train_image + img_idx += 1 + + self.img_size = len(img_dict) + self.text_size = len(train_list) + + # return needed format data list + if self.test_mode: + return list(img_dict.values()) + return train_list diff --git a/mmpretrain/datasets/flowers102.py b/mmpretrain/datasets/flowers102.py new file mode 100644 index 0000000..fe76dcc --- /dev/null +++ b/mmpretrain/datasets/flowers102.py @@ -0,0 +1,104 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +import mat4py +from mmengine import get_file_backend + +from mmpretrain.registry import DATASETS +from .base_dataset import BaseDataset + + +@DATASETS.register_module() +class Flowers102(BaseDataset): + """The Oxford 102 Flower Dataset. + + Support the `Oxford 102 Flowers Dataset `_ Dataset. + After downloading and decompression, the dataset directory structure is as follows. + + Flowers102 dataset directory: :: + + Flowers102 + ├── jpg + │ ├── image_00001.jpg + │ ├── image_00002.jpg + │ └── ... + ├── imagelabels.mat + ├── setid.mat + └── ... + + Args: + data_root (str): The root directory for Oxford 102 Flowers dataset. + split (str, optional): The dataset split, supports "train", + "val", "trainval", and "test". Default to "trainval". + + Examples: + >>> from mmpretrain.datasets import Flowers102 + >>> train_dataset = Flowers102(data_root='data/Flowers102', split='trainval') + >>> train_dataset + Dataset Flowers102 + Number of samples: 2040 + Root of dataset: data/Flowers102 + >>> test_dataset = Flowers102(data_root='data/Flowers102', split='test') + >>> test_dataset + Dataset Flowers102 + Number of samples: 6149 + Root of dataset: data/Flowers102 + """ # noqa: E501 + + def __init__(self, data_root: str, split: str = 'trainval', **kwargs): + splits = ['train', 'val', 'trainval', 'test'] + assert split in splits, \ + f"The split must be one of {splits}, but get '{split}'" + self.split = split + + ann_file = 'imagelabels.mat' + data_prefix = 'jpg' + train_test_split_file = 'setid.mat' + test_mode = split == 'test' + + self.backend = get_file_backend(data_root, enable_singleton=True) + + self.train_test_split_file = self.backend.join_path( + data_root, train_test_split_file) + + super(Flowers102, self).__init__( + ann_file=ann_file, + data_root=data_root, + data_prefix=data_prefix, + test_mode=test_mode, + **kwargs) + + def load_data_list(self): + """Load images and ground truth labels.""" + + label_dict = mat4py.loadmat(self.ann_file)['labels'] + split_list = mat4py.loadmat(self.train_test_split_file) + + if self.split == 'train': + split_list = split_list['trnid'] + elif self.split == 'val': + split_list = split_list['valid'] + elif self.split == 'test': + split_list = split_list['tstid'] + else: + train_ids = split_list['trnid'] + val_ids = split_list['valid'] + train_ids.extend(val_ids) + split_list = train_ids + + data_list = [] + for sample_id in split_list: + img_name = 'image_%05d.jpg' % (sample_id) + img_path = self.backend.join_path(self.img_prefix, img_name) + gt_label = int(label_dict[sample_id - 1]) - 1 + info = dict(img_path=img_path, gt_label=gt_label) + data_list.append(info) + + return data_list + + def extra_repr(self) -> List[str]: + """The extra repr information of the dataset.""" + body = [ + f'Root of dataset: \t{self.data_root}', + ] + return body diff --git a/mmpretrain/datasets/food101.py b/mmpretrain/datasets/food101.py new file mode 100644 index 0000000..4ce7ffe --- /dev/null +++ b/mmpretrain/datasets/food101.py @@ -0,0 +1,102 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +from mmengine import get_file_backend, list_from_file + +from mmpretrain.registry import DATASETS +from .base_dataset import BaseDataset +from .categories import FOOD101_CATEGORIES + + +@DATASETS.register_module() +class Food101(BaseDataset): + """The Food101 Dataset. + + Support the `Food101 Dataset `_ Dataset. + After downloading and decompression, the dataset directory structure is as follows. + + Food101 dataset directory: :: + + food-101 + ├── images + │ ├── class_x + │ │ ├── xx1.jpg + │ │ ├── xx2.jpg + │ │ └── ... + │ ├── class_y + │ │ ├── yy1.jpg + │ │ ├── yy2.jpg + │ │ └── ... + │ └── ... + ├── meta + │ ├── train.txt + │ └── test.txt + └── .... + + Args: + data_root (str): The root directory for Food101 dataset. + split (str, optional): The dataset split, supports "train" and "test". + Default to "train". + + Examples: + >>> from mmpretrain.datasets import Food101 + >>> train_dataset = Food101(data_root='data/food-101', split='train') + >>> train_dataset + Dataset Food101 + Number of samples: 75750 + Number of categories: 101 + Root of dataset: data/food-101 + >>> test_dataset = Food101(data_root='data/food-101', split='test') + >>> test_dataset + Dataset Food101 + Number of samples: 25250 + Number of categories: 101 + Root of dataset: data/food-101 + """ # noqa: E501 + + METAINFO = {'classes': FOOD101_CATEGORIES} + + def __init__(self, data_root: str, split: str = 'train', **kwargs): + + splits = ['train', 'test'] + assert split in splits, \ + f"The split must be one of {splits}, but get '{split}'" + self.split = split + + self.backend = get_file_backend(data_root, enable_singleton=True) + if split == 'train': + ann_file = self.backend.join_path('meta', 'train.txt') + else: + ann_file = self.backend.join_path('meta', 'test.txt') + + test_mode = split == 'test' + data_prefix = 'images' + + super(Food101, self).__init__( + ann_file=ann_file, + data_root=data_root, + test_mode=test_mode, + data_prefix=data_prefix, + **kwargs) + + def load_data_list(self): + """Load images and ground truth labels.""" + + pairs = list_from_file(self.ann_file) + data_list = [] + for pair in pairs: + class_name, img_name = pair.split('/') + img_name = f'{img_name}.jpg' + img_path = self.backend.join_path(self.img_prefix, class_name, + img_name) + gt_label = self.METAINFO['classes'].index(class_name) + info = dict(img_path=img_path, gt_label=gt_label) + data_list.append(info) + return data_list + + def extra_repr(self) -> List[str]: + """The extra repr information of the dataset.""" + body = [ + f'Root of dataset: \t{self.data_root}', + ] + return body diff --git a/mmpretrain/datasets/gqa_dataset.py b/mmpretrain/datasets/gqa_dataset.py new file mode 100644 index 0000000..741791b --- /dev/null +++ b/mmpretrain/datasets/gqa_dataset.py @@ -0,0 +1,70 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import List + +import mmengine +from mmengine.dataset import BaseDataset + +from mmpretrain.registry import DATASETS + + +@DATASETS.register_module() +class GQA(BaseDataset): + """GQA dataset. + + We use the annotation file from LAVIS, and you can download all annotation files from following links: # noqa: E501 + + train: + https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/gqa/train_balanced_questions.json # noqa: E501 + val: + https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/gqa/testdev_balanced_questions.json # noqa: E501 + test: + https://storage.googleapis.com/sfr-vision-language-research/LAVIS/datasets/gqa/test_balanced_questions.json # noqa: E501 + + and images from the official website: + https://cs.stanford.edu/people/dorarad/gqa/index.html + + Args: + data_root (str): The root directory for ``data_prefix``, ``ann_file`` + and ``question_file``. + data_prefix (str): The directory of images. + ann_file (str, optional): Annotation file path for training and + validation. Defaults to an empty string. + **kwargs: Other keyword arguments in :class:`BaseDataset`. + """ + + def __init__(self, + data_root: str, + data_prefix: str, + ann_file: str = '', + **kwarg): + super().__init__( + data_root=data_root, + data_prefix=dict(img_path=data_prefix), + ann_file=ann_file, + **kwarg, + ) + + def load_data_list(self) -> List[dict]: + """Load data list.""" + annotations = mmengine.load(self.ann_file) + + data_list = [] + for ann in annotations: + # ann example + # { + # 'question': "Is it overcast?", + # 'answer': 'no, + # 'image_id': n161313.jpg, + # 'question_id': 262148000, + # .... + # } + data_info = dict() + data_info['img_path'] = osp.join(self.data_prefix['img_path'], + ann['image']) + data_info['question'] = ann['question'] + data_info['gt_answer'] = ann['answer'] + + data_list.append(data_info) + + return data_list diff --git a/mmpretrain/datasets/iconqa.py b/mmpretrain/datasets/iconqa.py new file mode 100644 index 0000000..20c4d87 --- /dev/null +++ b/mmpretrain/datasets/iconqa.py @@ -0,0 +1,63 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +import mmengine +from mmengine.dataset import BaseDataset +from mmengine.fileio import list_dir_or_file +from mmengine.utils import check_file_exist + +from mmpretrain.registry import DATASETS + + +@DATASETS.register_module() +class IconQA(BaseDataset): + """IconQA: A benchmark for abstract diagram understanding + and visual language reasoning. + + Args: + data_root (str): The root directory for ``data_prefix``, ``ann_file`` + and ``question_file``. + data_prefix (str): The directory of the specific task and split. + eg. ``iconqa/val/choose_text/``. + **kwargs: Other keyword arguments in :class:`BaseDataset`. + """ + + def __init__(self, data_root: str, data_prefix: str, **kwarg): + super().__init__( + data_root=data_root, + data_prefix=dict(img_path=data_prefix), + **kwarg, + ) + + def load_data_list(self) -> List[dict]: + """Load data list.""" + sample_list = list( + list_dir_or_file(self.data_prefix['img_path'], list_file=False)) + + data_list = list() + for sample_id in sample_list: + # data json + # { + # "question": "How likely is it that you will pick a black one?", + # "choices": [ + # "certain", + # "unlikely", + # "impossible", + # "probable" + # ], + # "answer": 2, + # "ques_type": "choose_txt", + # "grade": "grade1", + # "label": "S2" + # } + data_info = mmengine.load( + mmengine.join_path(self.data_prefix['img_path'], sample_id, + 'data.json')) + data_info['gt_answer'] = data_info['choices'][int( + data_info['answer'])] + data_info['img_path'] = mmengine.join_path( + self.data_prefix['img_path'], sample_id, 'image.png') + check_file_exist(data_info['img_path']) + data_list.append(data_info) + + return data_list diff --git a/mmpretrain/datasets/imagenet.py b/mmpretrain/datasets/imagenet.py new file mode 100644 index 0000000..771d6ee --- /dev/null +++ b/mmpretrain/datasets/imagenet.py @@ -0,0 +1,235 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Union + +from mmengine import fileio +from mmengine.logging import MMLogger + +from mmpretrain.registry import DATASETS +from .categories import IMAGENET_CATEGORIES +from .custom import CustomDataset + + +@DATASETS.register_module() +class ImageNet(CustomDataset): + """`ImageNet `_ Dataset. + + The dataset supports two kinds of directory format, + + :: + + imagenet + ├── train + │ ├──class_x + | | ├── x1.jpg + | | ├── x2.jpg + | | └── ... + │ ├── class_y + | | ├── y1.jpg + | | ├── y2.jpg + | | └── ... + | └── ... + ├── val + │ ├──class_x + | | └── ... + │ ├── class_y + | | └── ... + | └── ... + └── test + ├── test1.jpg + ├── test2.jpg + └── ... + + or :: + + imagenet + ├── train + │ ├── x1.jpg + │ ├── y1.jpg + │ └── ... + ├── val + │ ├── x3.jpg + │ ├── y3.jpg + │ └── ... + ├── test + │ ├── test1.jpg + │ ├── test2.jpg + │ └── ... + └── meta + ├── train.txt + └── val.txt + + + Args: + data_root (str): The root directory for ``data_prefix`` and + ``ann_file``. Defaults to ''. + split (str): The dataset split, supports "train", "val" and "test". + Default to ''. + data_prefix (str | dict): Prefix for training data. Defaults to ''. + ann_file (str): Annotation file path. Defaults to ''. + metainfo (dict, optional): Meta information for dataset, such as class + information. Defaults to None. + **kwargs: Other keyword arguments in :class:`CustomDataset` and + :class:`BaseDataset`. + + + Examples: + >>> from mmpretrain.datasets import ImageNet + >>> train_dataset = ImageNet(data_root='data/imagenet', split='train') + >>> train_dataset + Dataset ImageNet + Number of samples: 1281167 + Number of categories: 1000 + Root of dataset: data/imagenet + >>> test_dataset = ImageNet(data_root='data/imagenet', split='val') + >>> test_dataset + Dataset ImageNet + Number of samples: 50000 + Number of categories: 1000 + Root of dataset: data/imagenet + """ # noqa: E501 + + IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif') + METAINFO = {'classes': IMAGENET_CATEGORIES} + + def __init__(self, + data_root: str = '', + split: str = '', + data_prefix: Union[str, dict] = '', + ann_file: str = '', + metainfo: Optional[dict] = None, + **kwargs): + kwargs = {'extensions': self.IMG_EXTENSIONS, **kwargs} + + if split: + splits = ['train', 'val', 'test'] + assert split in splits, \ + f"The split must be one of {splits}, but get '{split}'" + + if split == 'test': + logger = MMLogger.get_current_instance() + logger.info( + 'Since the ImageNet1k test set does not provide label' + 'annotations, `with_label` is set to False') + kwargs['with_label'] = False + + data_prefix = split if data_prefix == '' else data_prefix + + if ann_file == '': + _ann_path = fileio.join_path(data_root, 'meta', f'{split}.txt') + if fileio.exists(_ann_path): + ann_file = fileio.join_path('meta', f'{split}.txt') + + super().__init__( + data_root=data_root, + data_prefix=data_prefix, + ann_file=ann_file, + metainfo=metainfo, + **kwargs) + + def extra_repr(self) -> List[str]: + """The extra repr information of the dataset.""" + body = [ + f'Root of dataset: \t{self.data_root}', + ] + return body + + +@DATASETS.register_module() +class ImageNet21k(CustomDataset): + """ImageNet21k Dataset. + + Since the dataset ImageNet21k is extremely big, contains 21k+ classes + and 1.4B files. We won't provide the default categories list. Please + specify it from the ``classes`` argument. + The dataset directory structure is as follows, + + ImageNet21k dataset directory :: + + imagenet21k + ├── train + │ ├──class_x + | | ├── x1.jpg + | | ├── x2.jpg + | | └── ... + │ ├── class_y + | | ├── y1.jpg + | | ├── y2.jpg + | | └── ... + | └── ... + └── meta + └── train.txt + + + Args: + data_root (str): The root directory for ``data_prefix`` and + ``ann_file``. Defaults to ''. + data_prefix (str | dict): Prefix for training data. Defaults to ''. + ann_file (str): Annotation file path. Defaults to ''. + metainfo (dict, optional): Meta information for dataset, such as class + information. Defaults to None. + multi_label (bool): Not implement by now. Use multi label or not. + Defaults to False. + **kwargs: Other keyword arguments in :class:`CustomDataset` and + :class:`BaseDataset`. + + Examples: + >>> from mmpretrain.datasets import ImageNet21k + >>> train_dataset = ImageNet21k(data_root='data/imagenet21k', split='train') + >>> train_dataset + Dataset ImageNet21k + Number of samples: 14197088 + Annotation file: data/imagenet21k/meta/train.txt + Prefix of images: data/imagenet21k/train + """ # noqa: E501 + + IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif') + + def __init__(self, + data_root: str = '', + split: str = '', + data_prefix: Union[str, dict] = '', + ann_file: str = '', + metainfo: Optional[dict] = None, + multi_label: bool = False, + **kwargs): + if multi_label: + raise NotImplementedError( + 'The `multi_label` option is not supported by now.') + self.multi_label = multi_label + + if split: + splits = ['train'] + assert split in splits, \ + f"The split must be one of {splits}, but get '{split}'.\ + If you want to specify your own validation set or test set,\ + please set split to None." + + self.split = split + data_prefix = split if data_prefix == '' else data_prefix + + if not ann_file: + _ann_path = fileio.join_path(data_root, 'meta', f'{split}.txt') + if fileio.exists(_ann_path): + ann_file = fileio.join_path('meta', f'{split}.txt') + + logger = MMLogger.get_current_instance() + + if not ann_file: + logger.warning( + 'The ImageNet21k dataset is large, and scanning directory may ' + 'consume long time. Considering to specify the `ann_file` to ' + 'accelerate the initialization.') + + kwargs = {'extensions': self.IMG_EXTENSIONS, **kwargs} + super().__init__( + data_root=data_root, + data_prefix=data_prefix, + ann_file=ann_file, + metainfo=metainfo, + **kwargs) + + if self.CLASSES is None: + logger.warning( + 'The CLASSES is not stored in the `ImageNet21k` class. ' + 'Considering to specify the `classes` argument if you need ' + 'do inference on the ImageNet-21k dataset') diff --git a/mmpretrain/datasets/infographic_vqa.py b/mmpretrain/datasets/infographic_vqa.py new file mode 100644 index 0000000..46f5b0a --- /dev/null +++ b/mmpretrain/datasets/infographic_vqa.py @@ -0,0 +1,61 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +import mmengine +from mmengine.dataset import BaseDataset + +from mmpretrain.registry import DATASETS + + +@DATASETS.register_module() +class InfographicVQA(BaseDataset): + """Infographic VQA dataset. + + Args: + data_root (str): The root directory for ``data_prefix``, ``ann_file``. + data_prefix (str): The directory of images. + ann_file (str, optional): Annotation file path for training and + validation. Defaults to an empty string. + **kwargs: Other keyword arguments in :class:`BaseDataset`. + """ + + def __init__(self, + data_root: str, + data_prefix: str, + ann_file: str = '', + **kwarg): + super().__init__( + data_root=data_root, + data_prefix=dict(img_path=data_prefix), + ann_file=ann_file, + **kwarg, + ) + + def load_data_list(self) -> List[dict]: + """Load data list.""" + annotations = mmengine.load(self.ann_file) + annotations = annotations['data'] + + data_list = [] + for ann in annotations: + # ann example + # { + # "questionId": 98313, + # "question": "Which social platform has heavy female audience?", + # "image_local_name": "37313.jpeg", + # "image_url": "https://xxx.png", + # "ocr_output_file": "37313.json", + # "answers": [ + # "pinterest" + # ], + # "data_split": "val" + # } + data_info = dict() + data_info['question'] = ann['question'] + data_info['img_path'] = mmengine.join_path( + self.data_prefix['img_path'], ann['image_local_name']) + if 'answers' in ann.keys(): # test splits do not include gt + data_info['gt_answer'] = ann['answers'] + data_list.append(data_info) + + return data_list diff --git a/mmpretrain/datasets/inshop.py b/mmpretrain/datasets/inshop.py new file mode 100644 index 0000000..f64f177 --- /dev/null +++ b/mmpretrain/datasets/inshop.py @@ -0,0 +1,157 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine import get_file_backend, list_from_file + +from mmpretrain.registry import DATASETS +from .base_dataset import BaseDataset + + +@DATASETS.register_module() +class InShop(BaseDataset): + """InShop Dataset for Image Retrieval. + + Please download the images from the homepage + 'https://mmlab.ie.cuhk.edu.hk/projects/DeepFashion/InShopRetrieval.html' + (In-shop Clothes Retrieval Benchmark -> Img -> img.zip, + Eval/list_eval_partition.txt), and organize them as follows way: :: + + In-shop Clothes Retrieval Benchmark (data_root)/ + ├── Eval / + │ └── list_eval_partition.txt (ann_file) + ├── Img (img_prefix) + │ └── img/ + ├── README.txt + └── ..... + + Args: + data_root (str): The root directory for dataset. + split (str): Choose from 'train', 'query' and 'gallery'. + Defaults to 'train'. + data_prefix (str | dict): Prefix for training data. + Defaults to 'Img'. + ann_file (str): Annotation file path, path relative to + ``data_root``. Defaults to 'Eval/list_eval_partition.txt'. + **kwargs: Other keyword arguments in :class:`BaseDataset`. + + Examples: + >>> from mmpretrain.datasets import InShop + >>> + >>> # build train InShop dataset + >>> inshop_train_cfg = dict(data_root='data/inshop', split='train') + >>> inshop_train = InShop(**inshop_train_cfg) + >>> inshop_train + Dataset InShop + Number of samples: 25882 + The `CLASSES` meta info is not set. + Root of dataset: data/inshop + >>> + >>> # build query InShop dataset + >>> inshop_query_cfg = dict(data_root='data/inshop', split='query') + >>> inshop_query = InShop(**inshop_query_cfg) + >>> inshop_query + Dataset InShop + Number of samples: 14218 + The `CLASSES` meta info is not set. + Root of dataset: data/inshop + >>> + >>> # build gallery InShop dataset + >>> inshop_gallery_cfg = dict(data_root='data/inshop', split='gallery') + >>> inshop_gallery = InShop(**inshop_gallery_cfg) + >>> inshop_gallery + Dataset InShop + Number of samples: 12612 + The `CLASSES` meta info is not set. + Root of dataset: data/inshop + """ + + def __init__(self, + data_root: str, + split: str = 'train', + data_prefix: str = 'Img', + ann_file: str = 'Eval/list_eval_partition.txt', + **kwargs): + + assert split in ('train', 'query', 'gallery'), "'split' of `InShop`" \ + f" must be one of ['train', 'query', 'gallery'], bu get '{split}'" + self.backend = get_file_backend(data_root, enable_singleton=True) + self.split = split + super().__init__( + data_root=data_root, + data_prefix=data_prefix, + ann_file=ann_file, + **kwargs) + + def _process_annotations(self): + lines = list_from_file(self.ann_file) + + anno_train = dict(metainfo=dict(), data_list=list()) + anno_gallery = dict(metainfo=dict(), data_list=list()) + + # item_id to label, each item corresponds to one class label + class_num = 0 + gt_label_train = {} + + # item_id to label, each label corresponds to several items + gallery_num = 0 + gt_label_gallery = {} + + # (lines[0], lines[1]) is the image number and the field name; + # Each line format as 'image_name, item_id, evaluation_status' + for line in lines[2:]: + img_name, item_id, status = line.split() + img_path = self.backend.join_path(self.img_prefix, img_name) + if status == 'train': + if item_id not in gt_label_train: + gt_label_train[item_id] = class_num + class_num += 1 + # item_id to class_id (for the training set) + anno_train['data_list'].append( + dict(img_path=img_path, gt_label=gt_label_train[item_id])) + elif status == 'gallery': + if item_id not in gt_label_gallery: + gt_label_gallery[item_id] = [] + # Since there are multiple images for each item, + # record the corresponding item for each image. + gt_label_gallery[item_id].append(gallery_num) + anno_gallery['data_list'].append( + dict(img_path=img_path, sample_idx=gallery_num)) + gallery_num += 1 + + if self.split == 'train': + anno_train['metainfo']['class_number'] = class_num + anno_train['metainfo']['sample_number'] = \ + len(anno_train['data_list']) + return anno_train + elif self.split == 'gallery': + anno_gallery['metainfo']['sample_number'] = gallery_num + return anno_gallery + + # Generate the label for the query(val) set + anno_query = dict(metainfo=dict(), data_list=list()) + query_num = 0 + for line in lines[2:]: + img_name, item_id, status = line.split() + img_path = self.backend.join_path(self.img_prefix, img_name) + if status == 'query': + anno_query['data_list'].append( + dict( + img_path=img_path, gt_label=gt_label_gallery[item_id])) + query_num += 1 + + anno_query['metainfo']['sample_number'] = query_num + return anno_query + + def load_data_list(self): + """load data list. + + For the train set, return image and ground truth label. For the query + set, return image and ids of images in gallery. For the gallery set, + return image and its id. + """ + data_info = self._process_annotations() + data_list = data_info['data_list'] + return data_list + + def extra_repr(self): + """The extra repr information of the dataset.""" + body = [f'Root of dataset: \t{self.data_root}'] + return body diff --git a/mmpretrain/datasets/minigpt4_dataset.py b/mmpretrain/datasets/minigpt4_dataset.py new file mode 100644 index 0000000..e14e5c3 --- /dev/null +++ b/mmpretrain/datasets/minigpt4_dataset.py @@ -0,0 +1,79 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +import mmengine +from mmengine.dataset import BaseDataset +from mmengine.fileio import get_file_backend + +from mmpretrain.registry import DATASETS + + +@DATASETS.register_module() +class MiniGPT4Dataset(BaseDataset): + """Dataset for training MiniGPT4. + + MiniGPT4 dataset directory: + + minigpt4_dataset + ├── image + │ ├── id0.jpg + │ │── id1.jpg + │ │── id2.jpg + │ └── ... + └── conversation_data.json + + The structure of conversation_data.json: + + [ + // English data + { + "id": str(id0), + "conversation": "###Ask: [Ask content] + ###Answer: [Answer content]" + }, + + // Chinese data + { + "id": str(id1), + "conversation": "###问: [Ask content] + ###答:[Answer content]" + }, + + ... + ] + + Args: + data_root (str): The root directory for ``ann_file`` and ``image``. + ann_file (str): Conversation file path. + **kwargs: Other keyword arguments in :class:`BaseDataset`. + """ + + def load_data_list(self) -> List[dict]: + file_backend = get_file_backend(self.data_root) + conversation_path = file_backend.join_path(self.data_root, + self.ann_file) + conversation = mmengine.load(conversation_path) + img_ids = {} + n = 0 + for conv in conversation: + img_id = conv['id'] + if img_id not in img_ids.keys(): + img_ids[img_id] = n + n += 1 + + img_root = file_backend.join_path(self.data_root, 'image') + data_list = [] + for conv in conversation: + img_file = '{}.jpg'.format(conv['id']) + chat_content = conv['conversation'] + lang = 'en' if chat_content.startswith('###Ask: ') else 'zh' + data_info = { + 'image_id': img_ids[conv['id']], + 'img_path': file_backend.join_path(img_root, img_file), + 'chat_content': chat_content, + 'lang': lang, + } + + data_list.append(data_info) + + return data_list diff --git a/mmpretrain/datasets/mnist.py b/mmpretrain/datasets/mnist.py new file mode 100644 index 0000000..425267f --- /dev/null +++ b/mmpretrain/datasets/mnist.py @@ -0,0 +1,234 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import codecs +from typing import List, Optional +from urllib.parse import urljoin + +import mmengine.dist as dist +import numpy as np +import torch +from mmengine.fileio import LocalBackend, exists, get_file_backend, join_path +from mmengine.logging import MMLogger + +from mmpretrain.registry import DATASETS +from .base_dataset import BaseDataset +from .categories import FASHIONMNIST_CATEGORITES, MNIST_CATEGORITES +from .utils import (download_and_extract_archive, open_maybe_compressed_file, + rm_suffix) + + +@DATASETS.register_module() +class MNIST(BaseDataset): + """`MNIST `_ Dataset. + + This implementation is modified from + https://github.com/pytorch/vision/blob/master/torchvision/datasets/mnist.py + + Args: + data_root (str): The root directory of the MNIST Dataset. + split (str, optional): The dataset split, supports "train" and "test". + Default to "train". + metainfo (dict, optional): Meta information for dataset, such as + categories information. Defaults to None. + download (bool): Whether to download the dataset if not exists. + Defaults to True. + **kwargs: Other keyword arguments in :class:`BaseDataset`. + """ # noqa: E501 + + url_prefix = 'http://yann.lecun.com/exdb/mnist/' + # train images and labels + train_list = [ + ['train-images-idx3-ubyte.gz', 'f68b3c2dcbeaaa9fbdd348bbdeb94873'], + ['train-labels-idx1-ubyte.gz', 'd53e105ee54ea40749a09fcbcd1e9432'], + ] + # test images and labels + test_list = [ + ['t10k-images-idx3-ubyte.gz', '9fb629c4189551a2d022fa330f9573f3'], + ['t10k-labels-idx1-ubyte.gz', 'ec29112dd5afa0611ce80d1b7f02629c'], + ] + METAINFO = {'classes': MNIST_CATEGORITES} + + def __init__(self, + data_root: str = '', + split: str = 'train', + metainfo: Optional[dict] = None, + download: bool = True, + data_prefix: str = '', + test_mode: bool = False, + **kwargs): + + splits = ['train', 'test'] + assert split in splits, \ + f"The split must be one of {splits}, but get '{split}'" + self.split = split + + # To handle the BC-breaking + if split == 'train' and test_mode: + logger = MMLogger.get_current_instance() + logger.warning('split="train" but test_mode=True. ' + 'The training set will be used.') + + if not data_root and not data_prefix: + raise RuntimeError('Please set ``data_root`` to' + 'specify the dataset path') + + self.download = download + super().__init__( + # The MNIST dataset doesn't need specify annotation file + ann_file='', + metainfo=metainfo, + data_root=data_root, + data_prefix=dict(root=data_prefix), + test_mode=test_mode, + **kwargs) + + def load_data_list(self): + """Load images and ground truth labels.""" + root = self.data_prefix['root'] + backend = get_file_backend(root, enable_singleton=True) + + if dist.is_main_process() and not self._check_exists(): + if not isinstance(backend, LocalBackend): + raise RuntimeError(f'The dataset on {root} is not integrated, ' + f'please manually handle it.') + + if self.download: + self._download() + else: + raise RuntimeError( + f'Cannot find {self.__class__.__name__} dataset in ' + f"{self.data_prefix['root']}, you can specify " + '`download=True` to download automatically.') + + dist.barrier() + assert self._check_exists(), \ + 'Download failed or shared storage is unavailable. Please ' \ + f'download the dataset manually through {self.url_prefix}.' + + if not self.test_mode: + file_list = self.train_list + else: + file_list = self.test_list + + # load data from SN3 files + imgs = read_image_file(join_path(root, rm_suffix(file_list[0][0]))) + gt_labels = read_label_file( + join_path(root, rm_suffix(file_list[1][0]))) + + data_infos = [] + for img, gt_label in zip(imgs, gt_labels): + gt_label = np.array(gt_label, dtype=np.int64) + info = {'img': img.numpy(), 'gt_label': gt_label} + data_infos.append(info) + return data_infos + + def _check_exists(self): + """Check the exists of data files.""" + root = self.data_prefix['root'] + + for filename, _ in (self.train_list + self.test_list): + # get extracted filename of data + extract_filename = rm_suffix(filename) + fpath = join_path(root, extract_filename) + if not exists(fpath): + return False + return True + + def _download(self): + """Download and extract data files.""" + root = self.data_prefix['root'] + + for filename, md5 in (self.train_list + self.test_list): + url = urljoin(self.url_prefix, filename) + download_and_extract_archive( + url, download_root=root, filename=filename, md5=md5) + + def extra_repr(self) -> List[str]: + """The extra repr information of the dataset.""" + body = [f"Prefix of data: \t{self.data_prefix['root']}"] + return body + + +@DATASETS.register_module() +class FashionMNIST(MNIST): + """`Fashion-MNIST `_ + Dataset. + + Args: + data_root (str): The root directory of the MNIST Dataset. + split (str, optional): The dataset split, supports "train" and "test". + Default to "train". + metainfo (dict, optional): Meta information for dataset, such as + categories information. Defaults to None. + download (bool): Whether to download the dataset if not exists. + Defaults to True. + **kwargs: Other keyword arguments in :class:`BaseDataset`. + """ + + url_prefix = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/' + # train images and labels + train_list = [ + ['train-images-idx3-ubyte.gz', '8d4fb7e6c68d591d4c3dfef9ec88bf0d'], + ['train-labels-idx1-ubyte.gz', '25c81989df183df01b3e8a0aad5dffbe'], + ] + # test images and labels + test_list = [ + ['t10k-images-idx3-ubyte.gz', 'bef4ecab320f06d8554ea6380940ec79'], + ['t10k-labels-idx1-ubyte.gz', 'bb300cfdad3c16e7a12a480ee83cd310'], + ] + METAINFO = {'classes': FASHIONMNIST_CATEGORITES} + + +def get_int(b: bytes) -> int: + """Convert bytes to int.""" + return int(codecs.encode(b, 'hex'), 16) + + +def read_sn3_pascalvincent_tensor(path: str, + strict: bool = True) -> torch.Tensor: + """Read a SN3 file in "Pascal Vincent" format (Lush file 'libidx/idx- + io.lsh'). + + Argument may be a filename, compressed filename, or file object. + """ + # typemap + if not hasattr(read_sn3_pascalvincent_tensor, 'typemap'): + read_sn3_pascalvincent_tensor.typemap = { + 8: (torch.uint8, np.uint8, np.uint8), + 9: (torch.int8, np.int8, np.int8), + 11: (torch.int16, np.dtype('>i2'), 'i2'), + 12: (torch.int32, np.dtype('>i4'), 'i4'), + 13: (torch.float32, np.dtype('>f4'), 'f4'), + 14: (torch.float64, np.dtype('>f8'), 'f8') + } + # read + with open_maybe_compressed_file(path) as f: + data = f.read() + # parse + magic = get_int(data[0:4]) + nd = magic % 256 + ty = magic // 256 + assert nd >= 1 and nd <= 3 + assert ty >= 8 and ty <= 14 + m = read_sn3_pascalvincent_tensor.typemap[ty] + s = [get_int(data[4 * (i + 1):4 * (i + 2)]) for i in range(nd)] + parsed = np.frombuffer(data, dtype=m[1], offset=(4 * (nd + 1))) + assert parsed.shape[0] == np.prod(s) or not strict + return torch.from_numpy(parsed.astype(m[2], copy=False)).view(*s) + + +def read_label_file(path: str) -> torch.Tensor: + """Read labels from SN3 label file.""" + with open(path, 'rb') as f: + x = read_sn3_pascalvincent_tensor(f, strict=False) + assert (x.dtype == torch.uint8) + assert (x.ndimension() == 1) + return x.long() + + +def read_image_file(path: str) -> torch.Tensor: + """Read images from SN3 image file.""" + with open(path, 'rb') as f: + x = read_sn3_pascalvincent_tensor(f, strict=False) + assert (x.dtype == torch.uint8) + assert (x.ndimension() == 3) + return x diff --git a/mmpretrain/datasets/multi_label.py b/mmpretrain/datasets/multi_label.py new file mode 100644 index 0000000..58a9c7c --- /dev/null +++ b/mmpretrain/datasets/multi_label.py @@ -0,0 +1,85 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +from mmpretrain.registry import DATASETS +from .base_dataset import BaseDataset + + +@DATASETS.register_module() +class MultiLabelDataset(BaseDataset): + """Multi-label Dataset. + + This dataset support annotation file in `OpenMMLab 2.0 style annotation + format`. + + The annotation format is shown as follows. + + .. code-block:: none + + { + "metainfo": + { + "classes":['A', 'B', 'C'....] + }, + "data_list": + [ + { + "img_path": "test_img1.jpg", + 'gt_label': [0, 1], + }, + { + "img_path": "test_img2.jpg", + 'gt_label': [2], + }, + ] + .... + } + + + Args: + ann_file (str): Annotation file path. + metainfo (dict, optional): Meta information for dataset, such as class + information. Defaults to None. + data_root (str): The root directory for ``data_prefix`` and + ``ann_file``. Defaults to ''. + data_prefix (str | dict): Prefix for training data. Defaults to ''. + filter_cfg (dict, optional): Config for filter data. Defaults to None. + indices (int or Sequence[int], optional): Support using first few + data in annotation file to facilitate training/testing on a smaller + dataset. Defaults to None which means using all ``data_infos``. + serialize_data (bool, optional): Whether to hold memory using + serialized objects, when enabled, data loader workers can use + shared RAM from master process instead of making a copy. Defaults + to True. + pipeline (list, optional): Processing pipeline. Defaults to []. + test_mode (bool, optional): ``test_mode=True`` means in test phase. + Defaults to False. + lazy_init (bool, optional): Whether to load annotation during + instantiation. In some cases, such as visualization, only the meta + information of the dataset is needed, which is not necessary to + load annotation file. ``Basedataset`` can skip load annotations to + save time by set ``lazy_init=False``. Defaults to False. + max_refetch (int, optional): If ``Basedataset.prepare_data`` get a + None img. The maximum extra number of cycles to get a valid + image. Defaults to 1000. + classes (str | Sequence[str], optional): Specify names of classes. + + - If is string, it should be a file path, and the every line of + the file is a name of a class. + - If is a sequence of string, every item is a name of class. + - If is None, use categories information in ``metainfo`` argument, + annotation file or the class attribute ``METAINFO``. + + Defaults to None. + """ + + def get_cat_ids(self, idx: int) -> List[int]: + """Get category ids by index. + + Args: + idx (int): Index of data. + + Returns: + cat_ids (List[int]): Image categories of specified index. + """ + return self.get_data_info(idx)['gt_label'] diff --git a/mmpretrain/datasets/multi_task.py b/mmpretrain/datasets/multi_task.py new file mode 100644 index 0000000..443df0e --- /dev/null +++ b/mmpretrain/datasets/multi_task.py @@ -0,0 +1,337 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import os.path as osp +from os import PathLike +from typing import Optional, Sequence + +import mmengine +from mmcv.transforms import Compose +from mmengine.fileio import get_file_backend + +from .builder import DATASETS + + +def expanduser(path): + if isinstance(path, (str, PathLike)): + return osp.expanduser(path) + else: + return path + + +def isabs(uri): + return osp.isabs(uri) or ('://' in uri) + + +@DATASETS.register_module() +class MultiTaskDataset: + """Custom dataset for multi-task dataset. + + To use the dataset, please generate and provide an annotation file in the + below format: + + .. code-block:: json + + { + "metainfo": { + "tasks": + [ + 'gender' + 'wear' + ] + }, + "data_list": [ + { + "img_path": "a.jpg", + gt_label:{ + "gender": 0, + "wear": [1, 0, 1, 0] + } + }, + { + "img_path": "b.jpg", + gt_label:{ + "gender": 1, + "wear": [1, 0, 1, 0] + } + } + ] + } + + Assume we put our dataset in the ``data/mydataset`` folder in the + repository and organize it as the below format: :: + + mmpretrain/ + └── data + └── mydataset + ├── annotation + │   ├── train.json + │   ├── test.json + │   └── val.json + ├── train + │   ├── a.jpg + │   └── ... + ├── test + │   ├── b.jpg + │   └── ... + └── val + ├── c.jpg + └── ... + + We can use the below config to build datasets: + + .. code:: python + + >>> from mmpretrain.datasets import build_dataset + >>> train_cfg = dict( + ... type="MultiTaskDataset", + ... ann_file="annotation/train.json", + ... data_root="data/mydataset", + ... # The `img_path` field in the train annotation file is relative + ... # to the `train` folder. + ... data_prefix='train', + ... ) + >>> train_dataset = build_dataset(train_cfg) + + Or we can put all files in the same folder: :: + + mmpretrain/ + └── data + └── mydataset + ├── train.json + ├── test.json + ├── val.json + ├── a.jpg + ├── b.jpg + ├── c.jpg + └── ... + + And we can use the below config to build datasets: + + .. code:: python + + >>> from mmpretrain.datasets import build_dataset + >>> train_cfg = dict( + ... type="MultiTaskDataset", + ... ann_file="train.json", + ... data_root="data/mydataset", + ... # the `data_prefix` is not required since all paths are + ... # relative to the `data_root`. + ... ) + >>> train_dataset = build_dataset(train_cfg) + + + Args: + ann_file (str): The annotation file path. It can be either absolute + path or relative path to the ``data_root``. + metainfo (dict, optional): The extra meta information. It should be + a dict with the same format as the ``"metainfo"`` field in the + annotation file. Defaults to None. + data_root (str, optional): The root path of the data directory. It's + the prefix of the ``data_prefix`` and the ``ann_file``. And it can + be a remote path like "s3://openmmlab/xxx/". Defaults to None. + data_prefix (str, optional): The base folder relative to the + ``data_root`` for the ``"img_path"`` field in the annotation file. + Defaults to None. + pipeline (Sequence[dict]): A list of dict, where each element + represents a operation defined in + :mod:`mmpretrain.datasets.pipelines`. Defaults to an empty tuple. + test_mode (bool): in train mode or test mode. Defaults to False. + """ + METAINFO = dict() + + def __init__(self, + ann_file: str, + metainfo: Optional[dict] = None, + data_root: Optional[str] = None, + data_prefix: Optional[str] = None, + pipeline: Sequence = (), + test_mode: bool = False): + + self.data_root = expanduser(data_root) + + # Inference the file client + if self.data_root is not None: + self.file_backend = get_file_backend(uri=self.data_root) + else: + self.file_backend = None + + self.ann_file = self._join_root(expanduser(ann_file)) + self.data_prefix = self._join_root(data_prefix) + + self.test_mode = test_mode + self.pipeline = Compose(pipeline) + self.data_list = self.load_data_list(self.ann_file, metainfo) + + def _join_root(self, path): + """Join ``self.data_root`` with the specified path. + + If the path is an absolute path, just return the path. And if the + path is None, return ``self.data_root``. + + Examples: + >>> self.data_root = 'a/b/c' + >>> self._join_root('d/e/') + 'a/b/c/d/e' + >>> self._join_root('https://openmmlab.com') + 'https://openmmlab.com' + >>> self._join_root(None) + 'a/b/c' + """ + if path is None: + return self.data_root + if isabs(path): + return path + + joined_path = self.file_backend.join_path(self.data_root, path) + return joined_path + + @classmethod + def _get_meta_info(cls, in_metainfo: dict = None) -> dict: + """Collect meta information from the dictionary of meta. + + Args: + in_metainfo (dict): Meta information dict. + + Returns: + dict: Parsed meta information. + """ + # `cls.METAINFO` will be overwritten by in_meta + metainfo = copy.deepcopy(cls.METAINFO) + if in_metainfo is None: + return metainfo + + metainfo.update(in_metainfo) + + return metainfo + + def load_data_list(self, ann_file, metainfo_override=None): + """Load annotations from an annotation file. + + Args: + ann_file (str): Absolute annotation file path if ``self.root=None`` + or relative path if ``self.root=/path/to/data/``. + + Returns: + list[dict]: A list of annotation. + """ + annotations = mmengine.load(ann_file) + if not isinstance(annotations, dict): + raise TypeError(f'The annotations loaded from annotation file ' + f'should be a dict, but got {type(annotations)}!') + if 'data_list' not in annotations: + raise ValueError('The annotation file must have the `data_list` ' + 'field.') + metainfo = annotations.get('metainfo', {}) + raw_data_list = annotations['data_list'] + + # Set meta information. + assert isinstance(metainfo, dict), 'The `metainfo` field in the '\ + f'annotation file should be a dict, but got {type(metainfo)}' + if metainfo_override is not None: + assert isinstance(metainfo_override, dict), 'The `metainfo` ' \ + f'argument should be a dict, but got {type(metainfo_override)}' + metainfo.update(metainfo_override) + self._metainfo = self._get_meta_info(metainfo) + + data_list = [] + for i, raw_data in enumerate(raw_data_list): + try: + data_list.append(self.parse_data_info(raw_data)) + except AssertionError as e: + raise RuntimeError( + f'The format check fails during parse the item {i} of ' + f'the annotation file with error: {e}') + return data_list + + def parse_data_info(self, raw_data): + """Parse raw annotation to target format. + + This method will return a dict which contains the data information of a + sample. + + Args: + raw_data (dict): Raw data information load from ``ann_file`` + + Returns: + dict: Parsed annotation. + """ + assert isinstance(raw_data, dict), \ + f'The item should be a dict, but got {type(raw_data)}' + assert 'img_path' in raw_data, \ + "The item doesn't have `img_path` field." + data = dict( + img_path=self._join_root(raw_data['img_path']), + gt_label=raw_data['gt_label'], + ) + return data + + @property + def metainfo(self) -> dict: + """Get meta information of dataset. + + Returns: + dict: meta information collected from ``cls.METAINFO``, + annotation file and metainfo argument during instantiation. + """ + return copy.deepcopy(self._metainfo) + + def prepare_data(self, idx): + """Get data processed by ``self.pipeline``. + + Args: + idx (int): The index of ``data_info``. + + Returns: + Any: Depends on ``self.pipeline``. + """ + results = copy.deepcopy(self.data_list[idx]) + return self.pipeline(results) + + def __len__(self): + """Get the length of the whole dataset. + + Returns: + int: The length of filtered dataset. + """ + return len(self.data_list) + + def __getitem__(self, idx): + """Get the idx-th image and data information of dataset after + ``self.pipeline``. + + Args: + idx (int): The index of of the data. + + Returns: + dict: The idx-th image and data information after + ``self.pipeline``. + """ + return self.prepare_data(idx) + + def __repr__(self): + """Print the basic information of the dataset. + + Returns: + str: Formatted string. + """ + head = 'Dataset ' + self.__class__.__name__ + body = [f'Number of samples: \t{self.__len__()}'] + if self.data_root is not None: + body.append(f'Root location: \t{self.data_root}') + body.append(f'Annotation file: \t{self.ann_file}') + if self.data_prefix is not None: + body.append(f'Prefix of images: \t{self.data_prefix}') + # -------------------- extra repr -------------------- + tasks = self.metainfo['tasks'] + body.append(f'For {len(tasks)} tasks') + for task in tasks: + body.append(f' {task} ') + # ---------------------------------------------------- + + if len(self.pipeline.transforms) > 0: + body.append('With transforms:') + for t in self.pipeline.transforms: + body.append(f' {t}') + + lines = [head] + [' ' * 4 + line for line in body] + return '\n'.join(lines) diff --git a/mmpretrain/datasets/nlvr2.py b/mmpretrain/datasets/nlvr2.py new file mode 100644 index 0000000..0063090 --- /dev/null +++ b/mmpretrain/datasets/nlvr2.py @@ -0,0 +1,36 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import json +from typing import List + +from mmengine.fileio import get_file_backend, list_from_file + +from mmpretrain.registry import DATASETS +from .base_dataset import BaseDataset + + +@DATASETS.register_module() +class NLVR2(BaseDataset): + """COCO Caption dataset.""" + + def load_data_list(self) -> List[dict]: + """Load data list.""" + + data_list = [] + img_prefix = self.data_prefix['img_path'] + file_backend = get_file_backend(img_prefix) + examples = list_from_file(self.ann_file) + + for example in examples: + example = json.loads(example) + prefix = example['identifier'].rsplit('-', 1)[0] + train_data = {} + train_data['text'] = example['sentence'] + train_data['gt_label'] = {'True': 1, 'False': 0}[example['label']] + train_data['img_path'] = [ + file_backend.join_path(img_prefix, prefix + f'-img{i}.png') + for i in range(2) + ] + + data_list.append(train_data) + + return data_list diff --git a/mmpretrain/datasets/nocaps.py b/mmpretrain/datasets/nocaps.py new file mode 100644 index 0000000..65116e9 --- /dev/null +++ b/mmpretrain/datasets/nocaps.py @@ -0,0 +1,46 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +import mmengine +from mmengine.dataset import BaseDataset +from mmengine.fileio import get_file_backend +from pycocotools.coco import COCO + +from mmpretrain.registry import DATASETS + + +@DATASETS.register_module() +class NoCaps(BaseDataset): + """NoCaps dataset. + + Args: + data_root (str): The root directory for ``data_prefix`` and + ``ann_file``.. + ann_file (str): Annotation file path. + data_prefix (dict): Prefix for data field. Defaults to + ``dict(img_path='')``. + pipeline (Sequence): Processing pipeline. Defaults to an empty tuple. + **kwargs: Other keyword arguments in :class:`BaseDataset`. + """ + + def load_data_list(self) -> List[dict]: + """Load data list.""" + img_prefix = self.data_prefix['img_path'] + with mmengine.get_local_path(self.ann_file) as ann_file: + coco = COCO(ann_file) + + file_backend = get_file_backend(img_prefix) + data_list = [] + for ann in coco.anns.values(): + image_id = ann['image_id'] + image_path = file_backend.join_path( + img_prefix, coco.imgs[image_id]['file_name']) + data_info = { + 'image_id': image_id, + 'img_path': image_path, + 'gt_caption': None + } + + data_list.append(data_info) + + return data_list diff --git a/mmpretrain/datasets/ocr_vqa.py b/mmpretrain/datasets/ocr_vqa.py new file mode 100644 index 0000000..55aa691 --- /dev/null +++ b/mmpretrain/datasets/ocr_vqa.py @@ -0,0 +1,91 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import List + +import mmengine +from mmengine.dataset import BaseDataset + +from mmpretrain.registry import DATASETS + + +@DATASETS.register_module() +class OCRVQA(BaseDataset): + """OCR-VQA dataset. + + Args: + data_root (str): The root directory for ``data_prefix``, ``ann_file`` + and ``question_file``. + data_prefix (str): The directory of images. + ann_file (str): Annotation file path for training and validation. + split (str): 'train', 'val' or 'test'. + **kwargs: Other keyword arguments in :class:`BaseDataset`. + """ + + def __init__(self, data_root: str, data_prefix: str, ann_file: str, + split: str, **kwarg): + + assert split in ['train', 'val', 'test'], \ + '`split` must be train, val or test' + self.split = split + super().__init__( + data_root=data_root, + data_prefix=dict(img_path=data_prefix), + ann_file=ann_file, + **kwarg, + ) + + def load_data_list(self) -> List[dict]: + """Load data list.""" + + split_dict = {1: 'train', 2: 'val', 3: 'test'} + + annotations = mmengine.load(self.ann_file) + + # ann example + # "761183272": { + # "imageURL": \ + # "http://ecx.images-amazon.com/images/I/61Y5cOdHJbL.jpg", + # "questions": [ + # "Who wrote this book?", + # "What is the title of this book?", + # "What is the genre of this book?", + # "Is this a games related book?", + # "What is the year printed on this calendar?"], + # "answers": [ + # "Sandra Boynton", + # "Mom's Family Wall Calendar 2016", + # "Calendars", + # "No", + # "2016"], + # "title": "Mom's Family Wall Calendar 2016", + # "authorName": "Sandra Boynton", + # "genre": "Calendars", + # "split": 1 + # }, + + data_list = [] + + for key, ann in annotations.items(): + if self.split != split_dict[ann['split']]: + continue + + extension = osp.splitext(ann['imageURL'])[1] + if extension not in ['.jpg', '.png']: + continue + img_path = mmengine.join_path(self.data_prefix['img_path'], + key + extension) + for question, answer in zip(ann['questions'], ann['answers']): + data_info = {} + data_info['img_path'] = img_path + data_info['question'] = question + data_info['gt_answer'] = answer + data_info['gt_answer_weight'] = [1.0] + + data_info['imageURL'] = ann['imageURL'] + data_info['title'] = ann['title'] + data_info['authorName'] = ann['authorName'] + data_info['genre'] = ann['genre'] + + data_list.append(data_info) + + return data_list diff --git a/mmpretrain/datasets/oxfordiiitpet.py b/mmpretrain/datasets/oxfordiiitpet.py new file mode 100644 index 0000000..23c8b7d --- /dev/null +++ b/mmpretrain/datasets/oxfordiiitpet.py @@ -0,0 +1,97 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +from mmengine import get_file_backend, list_from_file + +from mmpretrain.registry import DATASETS +from .base_dataset import BaseDataset +from .categories import OxfordIIITPet_CATEGORIES + + +@DATASETS.register_module() +class OxfordIIITPet(BaseDataset): + """The Oxford-IIIT Pets Dataset. + + Support the `Oxford-IIIT Pets Dataset `_ Dataset. + After downloading and decompression, the dataset directory structure is as follows. + + Oxford-IIIT_Pets dataset directory: :: + + Oxford-IIIT_Pets + ├── images + │ ├── Abyssinian_1.jpg + │ ├── Abyssinian_2.jpg + │ └── ... + ├── annotations + │ ├── trainval.txt + │ ├── test.txt + │ ├── list.txt + │ └── ... + └── .... + + Args: + data_root (str): The root directory for Oxford-IIIT Pets dataset. + split (str, optional): The dataset split, supports "trainval" and "test". + Default to "trainval". + + Examples: + >>> from mmpretrain.datasets import OxfordIIITPet + >>> train_dataset = OxfordIIITPet(data_root='data/Oxford-IIIT_Pets', split='trainval') + >>> train_dataset + Dataset OxfordIIITPet + Number of samples: 3680 + Number of categories: 37 + Root of dataset: data/Oxford-IIIT_Pets + >>> test_dataset = OxfordIIITPet(data_root='data/Oxford-IIIT_Pets', split='test') + >>> test_dataset + Dataset OxfordIIITPet + Number of samples: 3669 + Number of categories: 37 + Root of dataset: data/Oxford-IIIT_Pets + """ # noqa: E501 + + METAINFO = {'classes': OxfordIIITPet_CATEGORIES} + + def __init__(self, data_root: str, split: str = 'trainval', **kwargs): + + splits = ['trainval', 'test'] + assert split in splits, \ + f"The split must be one of {splits}, but get '{split}'" + self.split = split + + self.backend = get_file_backend(data_root, enable_singleton=True) + if split == 'trainval': + ann_file = self.backend.join_path('annotations', 'trainval.txt') + else: + ann_file = self.backend.join_path('annotations', 'test.txt') + + data_prefix = 'images' + test_mode = split == 'test' + + super(OxfordIIITPet, self).__init__( + ann_file=ann_file, + data_root=data_root, + data_prefix=data_prefix, + test_mode=test_mode, + **kwargs) + + def load_data_list(self): + """Load images and ground truth labels.""" + + pairs = list_from_file(self.ann_file) + data_list = [] + for pair in pairs: + img_name, class_id, _, _ = pair.split() + img_name = f'{img_name}.jpg' + img_path = self.backend.join_path(self.img_prefix, img_name) + gt_label = int(class_id) - 1 + info = dict(img_path=img_path, gt_label=gt_label) + data_list.append(info) + return data_list + + def extra_repr(self) -> List[str]: + """The extra repr information of the dataset.""" + body = [ + f'Root of dataset: \t{self.data_root}', + ] + return body diff --git a/mmpretrain/datasets/places205.py b/mmpretrain/datasets/places205.py new file mode 100644 index 0000000..f3ba1ff --- /dev/null +++ b/mmpretrain/datasets/places205.py @@ -0,0 +1,40 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Union + +from mmpretrain.registry import DATASETS +from .categories import PLACES205_CATEGORIES +from .custom import CustomDataset + + +@DATASETS.register_module() +class Places205(CustomDataset): + """`Places205 `_ Dataset. + + Args: + data_root (str): The root directory for ``data_prefix`` and + ``ann_file``. Defaults to ''. + data_prefix (str | dict): Prefix for training data. Defaults + to ''. + ann_file (str): Annotation file path. Defaults to ''. + metainfo (dict, optional): Meta information for dataset, such as class + information. Defaults to None. + **kwargs: Other keyword arguments in :class:`CustomDataset` and + :class:`BaseDataset`. + """ + + IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif') + METAINFO = {'classes': PLACES205_CATEGORIES} + + def __init__(self, + data_root: str = '', + data_prefix: Union[str, dict] = '', + ann_file: str = '', + metainfo: Optional[dict] = None, + **kwargs): + kwargs = {'extensions': self.IMG_EXTENSIONS, **kwargs} + super().__init__( + data_root=data_root, + data_prefix=data_prefix, + ann_file=ann_file, + metainfo=metainfo, + **kwargs) diff --git a/mmpretrain/datasets/refcoco.py b/mmpretrain/datasets/refcoco.py new file mode 100644 index 0000000..39c3d3e --- /dev/null +++ b/mmpretrain/datasets/refcoco.py @@ -0,0 +1,112 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import List + +import mmengine +import numpy as np +from mmengine.dataset import BaseDataset +from pycocotools.coco import COCO + +from mmpretrain.registry import DATASETS + + +@DATASETS.register_module() +class RefCOCO(BaseDataset): + """RefCOCO dataset. + + RefCOCO is a popular dataset used for the task of visual grounding. + Here are the steps for accessing and utilizing the + RefCOCO dataset. + + You can access the RefCOCO dataset from the official source: + https://github.com/lichengunc/refer + + The RefCOCO dataset is organized in a structured format: :: + + FeaturesDict({ + 'coco_annotations': Sequence({ + 'area': int64, + 'bbox': BBoxFeature(shape=(4,), dtype=float32), + 'id': int64, + 'label': int64, + }), + 'image': Image(shape=(None, None, 3), dtype=uint8), + 'image/id': int64, + 'objects': Sequence({ + 'area': int64, + 'bbox': BBoxFeature(shape=(4,), dtype=float32), + 'gt_box_index': int64, + 'id': int64, + 'label': int64, + 'refexp': Sequence({ + 'raw': Text(shape=(), dtype=string), + 'refexp_id': int64, + }), + }), + }) + + Args: + ann_file (str): Annotation file path. + data_root (str): The root directory for ``data_prefix`` and + ``ann_file``. Defaults to ''. + data_prefix (str): Prefix for training data. + pipeline (Sequence): Processing pipeline. Defaults to an empty tuple. + **kwargs: Other keyword arguments in :class:`BaseDataset`. + """ + + def __init__(self, + data_root, + ann_file, + data_prefix, + split_file, + split='train', + **kwargs): + self.split_file = split_file + self.split = split + + super().__init__( + data_root=data_root, + data_prefix=dict(img_path=data_prefix), + ann_file=ann_file, + **kwargs, + ) + + def _join_prefix(self): + if not mmengine.is_abs(self.split_file) and self.split_file: + self.split_file = osp.join(self.data_root, self.split_file) + + return super()._join_prefix() + + def load_data_list(self) -> List[dict]: + """Load data list.""" + with mmengine.get_local_path(self.ann_file) as ann_file: + coco = COCO(ann_file) + splits = mmengine.load(self.split_file, file_format='pkl') + img_prefix = self.data_prefix['img_path'] + + data_list = [] + join_path = mmengine.fileio.get_file_backend(img_prefix).join_path + for refer in splits: + if refer['split'] != self.split: + continue + + ann = coco.anns[refer['ann_id']] + img = coco.imgs[ann['image_id']] + sentences = refer['sentences'] + bbox = np.array(ann['bbox'], dtype=np.float32) + bbox[2:4] = bbox[0:2] + bbox[2:4] # XYWH -> XYXY + + for sent in sentences: + data_info = { + 'img_path': join_path(img_prefix, img['file_name']), + 'image_id': ann['image_id'], + 'ann_id': ann['id'], + 'text': sent['sent'], + 'gt_bboxes': bbox[None, :], + } + data_list.append(data_info) + + if len(data_list) == 0: + raise ValueError(f'No sample in split "{self.split}".') + + return data_list diff --git a/mmpretrain/datasets/samplers/__init__.py b/mmpretrain/datasets/samplers/__init__.py new file mode 100644 index 0000000..2bccf9c --- /dev/null +++ b/mmpretrain/datasets/samplers/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .repeat_aug import RepeatAugSampler +from .sequential import SequentialSampler + +__all__ = ['RepeatAugSampler', 'SequentialSampler'] diff --git a/mmpretrain/datasets/samplers/__pycache__/__init__.cpython-310.pyc b/mmpretrain/datasets/samplers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c88a243479aadb7e6f204e363bbb5f9988308a5 GIT binary patch literal 282 zcmYjLK?=e!5KK~Ar6}khy!2wvf{1>=gT3{#1QJUjX={=m{E1)i2fn~Z^yV^PZ zDS!(xk#~A{E>lz2F4ej*(ivV>|5R|tt#mfEpEYa>P*$Y_STv_@w9>U`m0bI+t^aLI G@vt`yMoHQL literal 0 HcmV?d00001 diff --git a/mmpretrain/datasets/samplers/__pycache__/repeat_aug.cpython-310.pyc b/mmpretrain/datasets/samplers/__pycache__/repeat_aug.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..461fe786a95f1552b6a0e0c5ba4f931a2ce6d4e1 GIT binary patch literal 3660 zcmZuz-ESMm5#ReDd89;1mQ}}g8f5zcLo95{ZIiT6T{lLYxHe)XuoMJf8(dDeOY-dd zpm&#+B~UMwgZ8Blee0wA7@&Vg{)l_+Q~re_Y5JSJlPI}9Vu!o4v$ONHGgEZC9ft3p z-+wb~EHn0RTAckFSiFy+Qvkt)ld=K*yQ!1812^*qUgi&cr`l@_8rbvFX4V?CvS1Ko z?Lph_`DrIx7%bR%BVEk8gRaA5_ju_gyT?RRv|cdLk_$)Pkuz8p&Nk}>-{MBL)$_{W zzL6?2g(`zjCMGHJC@mY?$$=Eu?2e=fMWRiZNghsA5lgMh6}_Lwp^9=* zWTBQgRNJdzU`nLE=_Eou~djMZ~)#*yJlYNL|aXf7fX@wA9Ul8?A4c*ON| zS4+c-AxNd&r{%Cv9gdHzrIOvLk>WbvOUxL&=r$>MG#zC!HxXI6&+i*9b3MgP)s-R{ z4yD3Amr*>nyy^ou`b;W~Q*tmA*qDjl z+}IdFLxz0bTd0*%E^SBoAjvDDrd(!#=>X2X7t61LT zJ9B4M)*vc9o(_kw-8uwKdz<|8v4m6ADgLa73&r!o^!Z&mjHW3<1JSUfrgF|LpJrj@ zZOzw{+<+n7TA{c}GWZ%Ell|_8}X53YmAU!o@Td6i$hNg_Af+c@(QcYaXTPIc9ZW zL6-y(L{^;#4IimuIytY@R*}~>+W8jmjBS-IiZwW=Y}FE#3ujDM`lMb=$?@cCW8!0c zfT8~dVAv7+vwPS$Y|a`p7CN&gpgVH_y_pN>&)8#MUo(v(rmhJGDHUn+u;s8>>o|DH z4ue^6+!o$T2N++lDN{F0=ZGEpGn@<;1aVrNd4OHBbi8a?z<4ze!PI-vzUGr8egeDmVe%X< zoLDRN91gwQZCVaUNsUjyhgnht($(^5VmkDSZwkRE_5{F(QadxiZ5JgOa{dE)AO57=Lv8J*}724m?f@5))KWl#->3PBG( zptwMsj;L>F9IRFS0?G{nILSqVMxc+-ZjWSMH4^6xK?Wd0TqHwqfDvBH7mJ6EA(KFz zyX*`95HI&1-qZF|ce`?O&n+8>W_X>V0*#2(=&N+pPauc-F{VA=YEwF+vQ^u%T%1IL znvLoSQ^>1z0vy1`uT|m?Sn10EO)qd(ovY5OyXq~uE10i&tIn$T`iQk>&pC*qmWEBO zlqQhWC}}wdKcf^?mj_g>`sG$hABBUm4IFRvs3f$qTEM}lY=t2bLl}OJh4o6)J-GZD zuUH~QMvcjLCm+peSiz`OwAmd^GK6!Z9%SB@RD>a<%}dmQVKr}4>b8LDAIf_E+9G(p zI&sp7q0~{ePEr0eM}6fHid)^5oz_5de39`&38|Vd729@m`-wymrc*%6(<{zsa=ytS-74klcH=|J(6!U%y~GyWME32u z)#IUg^@8T!YNH;9me-%KYyH*f99;cBf1Q%`ydAXw(yd;X!ZL@zVOVy;Fe}6~rTJnQ zKE=zacMuMuh{I4(Iw*Z=xJnOAlA;u$enNoUt==Kew1k88c zAGwa}_}>Bk;JWUAUC;f#>36k z(oj{kmDAS1a^=--Sp{_H;=B^|l^oH_-&fSIl*`o;Ij1s-mE&~^mBFow;x@fRO;s;o PYfj*X73nUmB~x9?)a+^6+t=mw%S z>OMl$WzN36kNY0AdZ=B00oG6(EAI=*cqnte4u`{6SritD?1kwlVH_k2SJ!T>1--Se zZ+>;{+ulGwIyf{qJ;W7Q80uq+01)om)TcIcs6%U!^8`hhy0rdip0xG>#(1;U$PR zOC2)GlXx)h`aXfN#<#?(+36DvWhj^=%e{Dy(e^62HDps^cc#VjBBMbuBFme3o`BZ( zLneoe6Pc5cQoOi7hR0iR7AEi=4k_4V!*Dmwd6$$KGoG*qCQSmq z%-#qA(Y}I^L^&rcg!eaY-X?KIpaxUXCD%b5XF`VAfOW`a2=UAixuA7L=U?_fc5ap2 zIoG++xzxFQx1*KLgW!DUV&{X-M|a;)H_nJzjTwJ>vw{heWd1>jVnznTBHJPQfvL#^ zs;P|1bp0QA&jaF%bG(-T?T1sms$NB09iFAqq+7Y1$%1F=189rcuwm!GJOoo*hDD-% zw5?kXQ!B<3>v#Mk#%O}6y^Hw?X;Ei_e+FLo1^D0xJi!wb*c542SXXs1ETSl3*V`6% z!Itvp0oyxsJF9vQs$~Yv)D0&~dn-#F0=xme1)S4J8-=;x3+ZmIchtq z1{N7tUJ%4tEQ3J#--SuRu5q4oUIzqiTRDOykvS-L=Y9yy{2Z%B5Dbz~h#=tF^Oqm5 z@I03*X*z-+@i5K~ZmX6TD;0ylW>yE?(U>oQg)=Y(P%t`p>JUNk8OP`59yaZ1QL%rp!MbS{uFu$R^il`SS@|u(AM?7 zv^#y-uI;r-+AsMk&S*SfqWePov~JSQ^`BC^scNRpl^X(YY%yggEQ9JS`BS+PI95pI zO{r4ZaVC{7a~URqX0kG8jwT)D8-g=p<;2kOZJQr~uc&(U{eBCGGpJ}XfT9TWJCodU zIi|IBp}^xHcn?lu8J3pi+LqPA)@u(tfBUr-_Uiu2CU&u9WO{X9d(MBP(->=G<96HE z_Omc$x)6gP&1sS7eKQE|6=71{)PsQL0}$IlzovG$Zb*JwFYm&l7OLVLuq3GpwDe(7 zt7u2KE*KqUQ*s`1thX0Wc}Ci9KxoIKINK`Cz6Dz9XqvJNsF!uC@N->t>e#&AyPR!7 tI{27tHc&^(BZR8-!k9h~P2DsGiSj-vHLmFVizX~h=zu0}LST>Me*h*SUOoT- literal 0 HcmV?d00001 diff --git a/mmpretrain/datasets/samplers/repeat_aug.py b/mmpretrain/datasets/samplers/repeat_aug.py new file mode 100644 index 0000000..d833a19 --- /dev/null +++ b/mmpretrain/datasets/samplers/repeat_aug.py @@ -0,0 +1,101 @@ +import math +from typing import Iterator, Optional, Sized + +import torch +from mmengine.dist import get_dist_info, is_main_process, sync_random_seed +from torch.utils.data import Sampler + +from mmpretrain.registry import DATA_SAMPLERS + + +@DATA_SAMPLERS.register_module() +class RepeatAugSampler(Sampler): + """Sampler that restricts data loading to a subset of the dataset for + distributed, with repeated augmentation. It ensures that different each + augmented version of a sample will be visible to a different process (GPU). + Heavily based on torch.utils.data.DistributedSampler. + + This sampler was taken from + https://github.com/facebookresearch/deit/blob/0c4b8f60/samplers.py + Used in + Copyright (c) 2015-present, Facebook, Inc. + + Args: + dataset (Sized): The dataset. + shuffle (bool): Whether shuffle the dataset or not. Defaults to True. + num_repeats (int): The repeat times of every sample. Defaults to 3. + seed (int, optional): Random seed used to shuffle the sampler if + :attr:`shuffle=True`. This number should be identical across all + processes in the distributed group. Defaults to None. + """ + + def __init__(self, + dataset: Sized, + shuffle: bool = True, + num_repeats: int = 3, + seed: Optional[int] = None): + rank, world_size = get_dist_info() + self.rank = rank + self.world_size = world_size + + self.dataset = dataset + self.shuffle = shuffle + if not self.shuffle and is_main_process(): + from mmengine.logging import MMLogger + logger = MMLogger.get_current_instance() + logger.warning('The RepeatAugSampler always picks a ' + 'fixed part of data if `shuffle=False`.') + + if seed is None: + seed = sync_random_seed() + self.seed = seed + self.epoch = 0 + self.num_repeats = num_repeats + + # The number of repeated samples in the rank + self.num_samples = math.ceil( + len(self.dataset) * num_repeats / world_size) + # The total number of repeated samples in all ranks. + self.total_size = self.num_samples * world_size + # The number of selected samples in the rank + self.num_selected_samples = math.ceil(len(self.dataset) / world_size) + + def __iter__(self) -> Iterator[int]: + """Iterate the indices.""" + # deterministically shuffle based on epoch and seed + if self.shuffle: + g = torch.Generator() + g.manual_seed(self.seed + self.epoch) + indices = torch.randperm(len(self.dataset), generator=g).tolist() + else: + indices = list(range(len(self.dataset))) + + # produce repeats e.g. [0, 0, 0, 1, 1, 1, 2, 2, 2....] + indices = [x for x in indices for _ in range(self.num_repeats)] + # add extra samples to make it evenly divisible + padding_size = self.total_size - len(indices) + indices += indices[:padding_size] + assert len(indices) == self.total_size + + # subsample per rank + indices = indices[self.rank:self.total_size:self.world_size] + assert len(indices) == self.num_samples + + # return up to num selected samples + return iter(indices[:self.num_selected_samples]) + + def __len__(self) -> int: + """The number of samples in this rank.""" + return self.num_selected_samples + + def set_epoch(self, epoch: int) -> None: + """Sets the epoch for this sampler. + + When :attr:`shuffle=True`, this ensures all replicas use a different + random ordering for each epoch. Otherwise, the next iteration of this + sampler will yield the same ordering. + + Args: + epoch (int): Epoch number. + """ + self.epoch = epoch diff --git a/mmpretrain/datasets/samplers/sequential.py b/mmpretrain/datasets/samplers/sequential.py new file mode 100644 index 0000000..e3b940c --- /dev/null +++ b/mmpretrain/datasets/samplers/sequential.py @@ -0,0 +1,56 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Iterator + +import torch +from mmengine.dataset import DefaultSampler + +from mmpretrain.registry import DATA_SAMPLERS + + +@DATA_SAMPLERS.register_module() +class SequentialSampler(DefaultSampler): + """Sequential sampler which supports different subsample policy. + + Args: + dataset (Sized): The dataset. + round_up (bool): Whether to add extra samples to make the number of + samples evenly divisible by the world size. Defaults to True. + subsample_type (str): The method to subsample data on different rank. + Supported type: + + - ``'default'``: Original torch behavior. Sample the examples one + by one for each GPU in terms. For instance, 8 examples on 2 GPUs, + GPU0: [0,2,4,8], GPU1: [1,3,5,7] + - ``'sequential'``: Subsample all examples to n chunk sequntially. + For instance, 8 examples on 2 GPUs, + GPU0: [0,1,2,3], GPU1: [4,5,6,7] + """ + + def __init__(self, subsample_type: str = 'default', **kwargs) -> None: + super().__init__(shuffle=False, **kwargs) + + if subsample_type not in ['default', 'sequential']: + raise ValueError(f'Unsupported subsample typer "{subsample_type}",' + ' please choose from ["default", "sequential"]') + self.subsample_type = subsample_type + + def __iter__(self) -> Iterator[int]: + """Iterate the indices.""" + indices = torch.arange(len(self.dataset)).tolist() + + # add extra samples to make it evenly divisible + if self.round_up: + indices = ( + indices * + int(self.total_size / len(indices) + 1))[:self.total_size] + + # subsample + if self.subsample_type == 'default': + indices = indices[self.rank:self.total_size:self.world_size] + elif self.subsample_type == 'sequential': + num_samples_per_rank = self.total_size // self.world_size + indices = indices[self.rank * + num_samples_per_rank:(self.rank + 1) * + num_samples_per_rank] + + return iter(indices) diff --git a/mmpretrain/datasets/scienceqa.py b/mmpretrain/datasets/scienceqa.py new file mode 100644 index 0000000..8e44249 --- /dev/null +++ b/mmpretrain/datasets/scienceqa.py @@ -0,0 +1,109 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +from typing import Callable, List, Sequence + +import mmengine +from mmengine.dataset import BaseDataset +from mmengine.fileio import get_file_backend + +from mmpretrain.registry import DATASETS + + +@DATASETS.register_module() +class ScienceQA(BaseDataset): + """ScienceQA dataset. + + This dataset is used to load the multimodal data of ScienceQA dataset. + + Args: + data_root (str): The root directory for ``data_prefix`` and + ``ann_file``. + split (str): The split of dataset. Options: ``train``, ``val``, + ``test``, ``trainval``, ``minival``, and ``minitest``. + split_file (str): The split file of dataset, which contains the + ids of data samples in the split. + ann_file (str): Annotation file path. + image_only (bool): Whether only to load data with image. Defaults to + False. + data_prefix (dict): Prefix for data field. Defaults to + ``dict(img_path='')``. + pipeline (Sequence): Processing pipeline. Defaults to an empty tuple. + **kwargs: Other keyword arguments in :class:`BaseDataset`. + """ + + def __init__(self, + data_root: str, + split: str, + split_file: str, + ann_file: str, + image_only: bool = False, + data_prefix: dict = dict(img_path=''), + pipeline: Sequence[Callable] = (), + **kwargs): + assert split in [ + 'train', 'val', 'test', 'trainval', 'minival', 'minitest' + ], f'Invalid split {split}' + self.split = split + self.split_file = os.path.join(data_root, split_file) + self.image_only = image_only + + super().__init__( + data_root=data_root, + ann_file=ann_file, + data_prefix=data_prefix, + pipeline=pipeline, + **kwargs) + + def load_data_list(self) -> List[dict]: + """Load data list.""" + img_prefix = self.data_prefix['img_path'] + annotations = mmengine.load(self.ann_file) + current_data_split = mmengine.load(self.split_file)[self.split] # noqa + + file_backend = get_file_backend(img_prefix) + + data_list = [] + for data_id in current_data_split: + ann = annotations[data_id] + if self.image_only and ann['image'] is None: + continue + data_info = { + 'image_id': + data_id, + 'question': + ann['question'], + 'choices': + ann['choices'], + 'gt_answer': + ann['answer'], + 'hint': + ann['hint'], + 'image_name': + ann['image'], + 'task': + ann['task'], + 'grade': + ann['grade'], + 'subject': + ann['subject'], + 'topic': + ann['topic'], + 'category': + ann['category'], + 'skill': + ann['skill'], + 'lecture': + ann['lecture'], + 'solution': + ann['solution'], + 'split': + ann['split'], + 'img_path': + file_backend.join_path(img_prefix, data_id, ann['image']) + if ann['image'] is not None else None, + 'has_image': + True if ann['image'] is not None else False, + } + data_list.append(data_info) + + return data_list diff --git a/mmpretrain/datasets/stanfordcars.py b/mmpretrain/datasets/stanfordcars.py new file mode 100644 index 0000000..3556979 --- /dev/null +++ b/mmpretrain/datasets/stanfordcars.py @@ -0,0 +1,148 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +import mat4py +from mmengine import get_file_backend + +from mmpretrain.registry import DATASETS +from .base_dataset import BaseDataset +from .categories import STANFORDCARS_CATEGORIES + + +@DATASETS.register_module() +class StanfordCars(BaseDataset): + """The Stanford Cars Dataset. + + Support the `Stanford Cars Dataset `_ Dataset. + The official website provides two ways to organize the dataset. + Therefore, after downloading and decompression, the dataset directory structure is as follows. + + Stanford Cars dataset directory: :: + + Stanford_Cars + ├── car_ims + │ ├── 00001.jpg + │ ├── 00002.jpg + │ └── ... + └── cars_annos.mat + + or :: + + Stanford_Cars + ├── cars_train + │ ├── 00001.jpg + │ ├── 00002.jpg + │ └── ... + ├── cars_test + │ ├── 00001.jpg + │ ├── 00002.jpg + │ └── ... + └── devkit + ├── cars_meta.mat + ├── cars_train_annos.mat + ├── cars_test_annos.mat + ├── cars_test_annoswithlabels.mat + ├── eval_train.m + └── train_perfect_preds.txt + + Args: + data_root (str): The root directory for Stanford Cars dataset. + split (str, optional): The dataset split, supports "train" + and "test". Default to "train". + + Examples: + >>> from mmpretrain.datasets import StanfordCars + >>> train_dataset = StanfordCars(data_root='data/Stanford_Cars', split='train') + >>> train_dataset + Dataset StanfordCars + Number of samples: 8144 + Number of categories: 196 + Root of dataset: data/Stanford_Cars + >>> test_dataset = StanfordCars(data_root='data/Stanford_Cars', split='test') + >>> test_dataset + Dataset StanfordCars + Number of samples: 8041 + Number of categories: 196 + Root of dataset: data/Stanford_Cars + """ # noqa: E501 + + METAINFO = {'classes': STANFORDCARS_CATEGORIES} + + def __init__(self, data_root: str, split: str = 'train', **kwargs): + + splits = ['train', 'test'] + assert split in splits, \ + f"The split must be one of {splits}, but get '{split}'" + self.split = split + + test_mode = split == 'test' + self.backend = get_file_backend(data_root, enable_singleton=True) + + anno_file_path = self.backend.join_path(data_root, 'cars_annos.mat') + if self.backend.exists(anno_file_path): + ann_file = 'cars_annos.mat' + data_prefix = '' + else: + if test_mode: + ann_file = self.backend.join_path( + 'devkit', 'cars_test_annos_withlabels.mat') + data_prefix = 'cars_test' + else: + ann_file = self.backend.join_path('devkit', + 'cars_train_annos.mat') + data_prefix = 'cars_train' + + if not self.backend.exists( + self.backend.join_path(data_root, ann_file)): + doc_url = 'https://mmpretrain.readthedocs.io/en/latest/api/datasets.html#stanfordcars' # noqa: E501 + raise RuntimeError( + f'The dataset is incorrectly organized, please \ + refer to {doc_url} and reorganize your folders.') + + super(StanfordCars, self).__init__( + ann_file=ann_file, + data_root=data_root, + data_prefix=data_prefix, + test_mode=test_mode, + **kwargs) + + def load_data_list(self): + data = mat4py.loadmat(self.ann_file)['annotations'] + + data_list = [] + if 'test' in data.keys(): + # first way + img_paths, labels, test = data['relative_im_path'], data[ + 'class'], data['test'] + num = len(img_paths) + assert num == len(labels) == len(test), 'get error ann file' + for i in range(num): + if not self.test_mode and test[i] == 1: + continue + if self.test_mode and test[i] == 0: + continue + img_path = self.backend.join_path(self.img_prefix, + img_paths[i]) + gt_label = labels[i] - 1 + info = dict(img_path=img_path, gt_label=gt_label) + data_list.append(info) + else: + # second way + img_names, labels = data['fname'], data['class'] + num = len(img_names) + assert num == len(labels), 'get error ann file' + for i in range(num): + img_path = self.backend.join_path(self.img_prefix, + img_names[i]) + gt_label = labels[i] - 1 + info = dict(img_path=img_path, gt_label=gt_label) + data_list.append(info) + + return data_list + + def extra_repr(self) -> List[str]: + """The extra repr information of the dataset.""" + body = [ + f'Root of dataset: \t{self.data_root}', + ] + return body diff --git a/mmpretrain/datasets/sun397.py b/mmpretrain/datasets/sun397.py new file mode 100644 index 0000000..1039a06 --- /dev/null +++ b/mmpretrain/datasets/sun397.py @@ -0,0 +1,125 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +from mmengine import get_file_backend, list_from_file + +from mmpretrain.registry import DATASETS +from .base_dataset import BaseDataset +from .categories import SUN397_CATEGORIES + + +@DATASETS.register_module() +class SUN397(BaseDataset): + """The SUN397 Dataset. + + Support the `SUN397 Dataset `_ Dataset. + After downloading and decompression, the dataset directory structure is as follows. + + SUN397 dataset directory: :: + + SUN397 + ├── SUN397 + │ ├── a + │ │ ├── abbey + │ | | ├── sun_aaalbzqrimafwbiv.jpg + │ | | └── ... + │ │ ├── airplane_cabin + │ | | ├── sun_aadqdkqaslqqoblu.jpg + │ | | └── ... + │ | └── ... + │ ├── b + │ │ └── ... + │ ├── c + │ │ └── ... + │ └── ... + └── Partitions + ├── ClassName.txt + ├── Training_01.txt + ├── Testing_01.txt + └── ... + + Args: + data_root (str): The root directory for Stanford Cars dataset. + split (str, optional): The dataset split, supports "train" and "test". + Default to "train". + + Examples: + >>> from mmpretrain.datasets import SUN397 + >>> train_dataset = SUN397(data_root='data/SUN397', split='train') + >>> train_dataset + Dataset SUN397 + Number of samples: 19850 + Number of categories: 397 + Root of dataset: data/SUN397 + >>> test_dataset = SUN397(data_root='data/SUN397', split='test') + >>> test_dataset + Dataset SUN397 + Number of samples: 19850 + Number of categories: 397 + Root of dataset: data/SUN397 + + **Note that some images are not a jpg file although the name ends with ".jpg". + The backend of SUN397 should be "pillow" as below to read these images properly,** + + .. code-block:: python + + pipeline = [ + dict(type='LoadImageFromFile', imdecode_backend='pillow'), + dict(type='RandomResizedCrop', scale=224), + dict(type='PackInputs') + ] + """ # noqa: E501 + + METAINFO = {'classes': SUN397_CATEGORIES} + + def __init__(self, data_root: str, split: str = 'train', **kwargs): + + splits = ['train', 'test'] + assert split in splits, \ + f"The split must be one of {splits}, but get '{split}'" + self.split = split + + self.backend = get_file_backend(data_root, enable_singleton=True) + if split == 'train': + ann_file = self.backend.join_path('Partitions', 'Training_01.txt') + else: + ann_file = self.backend.join_path('Partitions', 'Testing_01.txt') + + data_prefix = 'SUN397' + test_mode = split == 'test' + + super(SUN397, self).__init__( + ann_file=ann_file, + data_root=data_root, + test_mode=test_mode, + data_prefix=data_prefix, + **kwargs) + + def load_data_list(self): + pairs = list_from_file(self.ann_file) + data_list = [] + for pair in pairs: + img_path = self.backend.join_path(self.img_prefix, pair[1:]) + items = pair.split('/') + class_name = '_'.join(items[2:-1]) + gt_label = self.METAINFO['classes'].index(class_name) + info = dict(img_path=img_path, gt_label=gt_label) + data_list.append(info) + + return data_list + + def __getitem__(self, idx: int) -> dict: + try: + return super().__getitem__(idx) + except AttributeError: + raise RuntimeError( + 'Some images in the SUN397 dataset are not a jpg file ' + 'although the name ends with ".jpg". The backend of SUN397 ' + 'should be "pillow" to read these images properly.') + + def extra_repr(self) -> List[str]: + """The extra repr information of the dataset.""" + body = [ + f'Root of dataset: \t{self.data_root}', + ] + return body diff --git a/mmpretrain/datasets/textvqa.py b/mmpretrain/datasets/textvqa.py new file mode 100644 index 0000000..48a82b4 --- /dev/null +++ b/mmpretrain/datasets/textvqa.py @@ -0,0 +1,105 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections import Counter +from typing import List + +import mmengine +from mmengine.dataset import BaseDataset + +from mmpretrain.registry import DATASETS + + +@DATASETS.register_module() +class TextVQA(BaseDataset): + """TextVQA dataset. + + val image: + https://dl.fbaipublicfiles.com/textvqa/images/train_val_images.zip + test image: + https://dl.fbaipublicfiles.com/textvqa/images/test_images.zip + val json: + https://dl.fbaipublicfiles.com/textvqa/data/TextVQA_0.5.1_val.json + test json: + https://dl.fbaipublicfiles.com/textvqa/data/TextVQA_0.5.1_test.json + + folder structure: + data/textvqa + ├── annotations + │ ├── TextVQA_0.5.1_test.json + │ └── TextVQA_0.5.1_val.json + └── images + ├── test_images + └── train_images + + Args: + data_root (str): The root directory for ``data_prefix``, ``ann_file`` + and ``question_file``. + data_prefix (str): The directory of images. + question_file (str): Question file path. + ann_file (str, optional): Annotation file path for training and + validation. Defaults to an empty string. + **kwargs: Other keyword arguments in :class:`BaseDataset`. + """ + + def __init__(self, + data_root: str, + data_prefix: str, + ann_file: str = '', + **kwarg): + super().__init__( + data_root=data_root, + data_prefix=dict(img_path=data_prefix), + ann_file=ann_file, + **kwarg, + ) + + def load_data_list(self) -> List[dict]: + """Load data list.""" + annotations = mmengine.load(self.ann_file)['data'] + + data_list = [] + + for ann in annotations: + + # ann example + # { + # 'question': 'what is the brand of...is camera?', + # 'image_id': '003a8ae2ef43b901', + # 'image_classes': [ + # 'Cassette deck', 'Printer', ... + # ], + # 'flickr_original_url': 'https://farm2.static...04a6_o.jpg', + # 'flickr_300k_url': 'https://farm2.static...04a6_o.jpg', + # 'image_width': 1024, + # 'image_height': 664, + # 'answers': [ + # 'nous les gosses', + # 'dakota', + # 'clos culombu', + # 'dakota digital' ... + # ], + # 'question_tokens': + # ['what', 'is', 'the', 'brand', 'of', 'this', 'camera'], + # 'question_id': 34602, + # 'set_name': 'val' + # } + + data_info = dict(question=ann['question']) + data_info['question_id'] = ann['question_id'] + data_info['image_id'] = ann['image_id'] + + img_path = mmengine.join_path(self.data_prefix['img_path'], + ann['image_id'] + '.jpg') + data_info['img_path'] = img_path + + data_info['question_id'] = ann['question_id'] + + if 'answers' in ann: + answers = [item for item in ann.pop('answers')] + count = Counter(answers) + answer_weight = [i / len(answers) for i in count.values()] + data_info['gt_answer'] = list(count.keys()) + data_info['gt_answer_weight'] = answer_weight + + data_list.append(data_info) + + return data_list diff --git a/mmpretrain/datasets/transforms/__init__.py b/mmpretrain/datasets/transforms/__init__.py new file mode 100644 index 0000000..617503f --- /dev/null +++ b/mmpretrain/datasets/transforms/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.transforms import (CenterCrop, LoadImageFromFile, Normalize, + RandomFlip, RandomGrayscale, RandomResize, Resize) + +from mmpretrain.registry import TRANSFORMS +from .auto_augment import (AutoAugment, AutoContrast, BaseAugTransform, + Brightness, ColorTransform, Contrast, Cutout, + Equalize, GaussianBlur, Invert, Posterize, + RandAugment, Rotate, Sharpness, Shear, Solarize, + SolarizeAdd, Translate) +from .formatting import (Collect, NumpyToPIL, PackInputs, PackMultiTaskInputs, + PILToNumpy, Transpose) +from .processing import (Albumentations, BEiTMaskGenerator, CleanCaption, + ColorJitter, EfficientNetCenterCrop, + EfficientNetRandomCrop, Lighting, + MAERandomResizedCrop, RandomCrop, RandomErasing, + RandomResizedCrop, + RandomResizedCropAndInterpolationWithTwoPic, + RandomTranslatePad, ResizeEdge, SimMIMMaskGenerator) +from .utils import get_transform_idx, remove_transform +from .wrappers import ApplyToList, MultiView + +for t in (CenterCrop, LoadImageFromFile, Normalize, RandomFlip, + RandomGrayscale, RandomResize, Resize): + TRANSFORMS.register_module(module=t) + +__all__ = [ + 'NumpyToPIL', 'PILToNumpy', 'Transpose', 'Collect', 'RandomCrop', + 'RandomResizedCrop', 'Shear', 'Translate', 'Rotate', 'Invert', + 'ColorTransform', 'Solarize', 'Posterize', 'AutoContrast', 'Equalize', + 'Contrast', 'Brightness', 'Sharpness', 'AutoAugment', 'SolarizeAdd', + 'Cutout', 'RandAugment', 'Lighting', 'ColorJitter', 'RandomErasing', + 'PackInputs', 'Albumentations', 'EfficientNetRandomCrop', + 'EfficientNetCenterCrop', 'ResizeEdge', 'BaseAugTransform', + 'PackMultiTaskInputs', 'GaussianBlur', 'BEiTMaskGenerator', + 'SimMIMMaskGenerator', 'CenterCrop', 'LoadImageFromFile', 'Normalize', + 'RandomFlip', 'RandomGrayscale', 'RandomResize', 'Resize', 'MultiView', + 'ApplyToList', 'CleanCaption', 'RandomTranslatePad', + 'RandomResizedCropAndInterpolationWithTwoPic', 'get_transform_idx', + 'remove_transform', 'MAERandomResizedCrop' +] diff --git a/mmpretrain/datasets/transforms/__pycache__/__init__.cpython-310.pyc b/mmpretrain/datasets/transforms/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b16f6525360f618772288046a5718152d17d2b41 GIT binary patch literal 1874 zcma)++fExt7{_-FKHJ#Fgm4PskU$&~98PIYZE75YsDL7DwbCb5*3>(gZTH*V%#NY- z6{_C%x_5n%KE&MiDz8wfYUi6xFi~!5$v@A|X=i@_@dU+U&eEUX{`vE)l(nqC=;7uk ztslPB-)P&mEXQ(e&q5n)Oh5uhU<5~D6qAs|6r?Z>X`A{aybNX`tIH$KsF%Y$yGajZfWCtw0^!7ZGGNxTiWaSEnz8m4guW^fi}aSrCpd`a&P z&ci(3g}b-_3%Cf2xCBeM49liH<=w;ka35D-1y^AeAHV~wK@A_mLtKM3d<2j1F+9d~ zSjRfl@d-S^4cNd<*u*W^GION8ZQOw!dY?1;gyqbTeZSF@>xr(T$B5fi>3%dRXGfpbAYVR z_e6kuuE$mOI1tD@_h+thr_6VBwdaPa6qok}yNY^D_bbE|r#zyrsra{Mt6b-_aopbf z`t+#%PG@D+##O#CkU?Y6NBv?|poeDQOTi+k%DXJ$x~U_WAD!tptK6<|`{&Zxy;}7212C{{0lCm%AN#~-YhLKPYeFAiqvT! znbf;&pED7fmnGZhoC%d_2Of(Dd?+;>M`g__JUynCQfa*|kN2d?9S<

I5eThbni% zdOsfc;Xp>JO68-0C*2N)0v4;2kfaEag27aU}yDhhKqzCSE zp9>}fp$bipGr!3~>Y?(+%U7<{TT_#*vop8n>fy&+-pKZ&`Zz3;dFGJ(x_)0x95q@u zQ*=!G`_xUj7&o_cRy3gUY45d9sv5p?KnE<;9%;4Ty7Ihp8JxI1H6C}qR(Zl4eaOrS zYB_zbs%;mK4vs$BdM%+UeJ;CFygwmb*ZE161;^lm4{LR|d?O4!9mb)n4_4OL|HkE) z`b?%Ua0VW)ZHjq)BE=$MiExLoMtDq^Cd?6T6D9~H!VKXSp+FcXlnG;mDMFqwOQ;Yg z2{pnB;Su33VVUrNuu6DBC=wP3Rl+^O1|dNhA&e4|gcO19Ly;lu5Vi^T33b9cVUw^$ zc&Kq)`&?gl*t=N278(7I=~Wdm6kLyS{dK|nTCKPe`i8ck!1Q0R%i_OvynQ3bY{qyo z>Wn6%#8@*MG-8cTqshoN5={U`wlPUjs2p7jOwx&s^D-G4mkYXmn5N2wBIxORgL)dx zMx@bVLNLOOdJ_z3iNiPPm=j>k8HXkq3PB|Y()FUDPnEe8EDX7bV(*lIvZX4;ez@qy z$4#ZXUFLb+?jM$TP46Q7{jx5CK-Py^Zx$UU^?fL#`Y@z=x9j?@>~`0~E0uW_PyGem etPv@@XBS3J?1ki;L^)lyGl`U)`FAW)Nc;y>T?*6y literal 0 HcmV?d00001 diff --git a/mmpretrain/datasets/transforms/__pycache__/auto_augment.cpython-310.pyc b/mmpretrain/datasets/transforms/__pycache__/auto_augment.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..106001603c7d57ccaccdca236343d5d7eb25b3e7 GIT binary patch literal 39651 zcmeHwdvqMvdEd_L6N@LomniBnA|(+kfgmaBNeh%HQkLmRkU~;^tfVd$I|E?BeLyn{ zQd|R$Y$>tZMsD+R>W5M?wR^(!ByC!^Y0}fQb#2#e?VdJn{>U_G(z=ezp1j%m*l;EF z_xtWV7rP)R(XJh5$-S7lbLYPA_x--dz1tid%qQ^qTd#d#_M5+vNPLwy(a%k|c^F^k z=hKOVnJ~3lqR78`O{?ogUE_7KmaM0WDY;J7()CO+BiHF#ww^2I>V3t&dcK&i_ZR!? z1H}P(rqu?EgOcA+aTtFywUPR!;->m&akRd4u6Ui? z_tkdPuP8S$H~O%lHgDYuwg5wn#s#oa-=ZRU2wZnt(zx?6+z>&zX9 z-{HmI7Q|m~?nL}fFaGu*{s!|##NX(}?+M~>GH*uw&0hRi5O0{bApRCFz7WLkGIt|> zw-kBC1;iJ;_`N~=9p;^gztfAK2;#@hy@=oI z#qSH^C(M0_-{-~e590Tm2M~Y2i$4&=-(?;|{6R1NF7s}T+#d5D^Ir45SCY8iY2I%> zU>(dL>A$5ziV)D7tF(`X_&j$PAT_DIlX~%PQ#+X`JadWEd#a$h zIn%NhE6v4oxK359Y7KugQ%m(3%Pyqd)ZsDTY*m|$a?Q=1w4PtG8Wqb;pITb1 z;m^|zJSimI!AHuDb;>R`oY|&bce6*E^~I)RxdSK44YOH4I^V2TEaW{>DOcvL5)vY| zVL47gck@-JR6{|f=B(R?YsY#Xmw1pqUN6sCZom9_d$ra=r8GBx>co+$lW%|W#1kjo z%wn@vt(TzA}zZMBx{Mnz`>y-x;zQ}`am*J&VVC6*H}X)B4Ac3xiwx7v}o zpsnc3x~VT|&W?A2SE<-fB+S&S-~b=gTFLXNWql!iL0ibIB$u`G*$atdiRVV1)mKt0 z>E+aN@=U_M8-EjLH2h63CzewWCZ4}_If33zmk(%(#1oc1XBqS77A;%qYN0ibQEjxE zhH8XSX*Oo7a})WzfzOkT+Bu^x?;v@rTCS~m*T!H}ZOb&8vqo#)^0M*XKVr{0ha@NF z=9MsJRx7Q-A>&EAI#Iym6*ntD1gg8Efon>$@fj zIh*5bk0NlBb5_gERvXTuRcX0HxSL(7)yn7wM7X(9soJQvN+mbNC^ubgS#_t7u{Se= zEzCe~E*7)R#gaTvp0>`pX_UKUT};>pp07T%&u%tb`|9;Y%rU!MZA354atjNf2^TaDJl;yHIvhO^`~la+QDtj^n*L%+vv(o7f^Y1T z_3oS#xUdpM%}f+^Q!6H|l&PD^SF~BpOj&6wV`cF#wIi`3F{|%Lm}x6DoBB`!Pcvrr z<-uak%vqUP-Rv{-06l#l!rCb2F-`m3{v)XI5mn*x*OG|@?+vd7Wzqm|h)>N|9fM7d zJv0!xXg9g;O-yWKI+EQX<8iBOH#pFK0nAS(o6L-=OotTTs?%Ow1~2R>NxTn&7J} zdOTQC%^{U6K8ThwfrGhw`Y-fEz4 zg8^jrl`<-4j-wbwaAT!mh+^4IPfLbq)bzA*wpy!kWjhvIrxvBD2@sp zlfLaBcN0b@5%J22RRrBE^T)3ah+Da3c+`%R4#R*&y2Nz_3M#}e8Wqc|Y+FX7*+N5F zoRjfBJ7&$6mukoaGhlj}9XYmGHcMFclLzj)zd%YPnN3W9O4XYnAQq(fZB zOyeH;@qQ)Q%D$9X$So($%qtp&H&CeT)aq?9W`MamZUBdX^ct1N%bqWUPnQC@3*)4Z)aCDHvYqjmvr@(aEXE<-WPRO zpV;qet&5s;qCB&2!Mkq$LDB>+^uzDO{bkl|TVnNKWSl2_W;O9(t%)`49L9zJQsh!{ zy{OxFGT$U9iM~NL-t(gBk%-XXq5?QlOXc$$dRW{J$dBm$4XB<`S|gp#~(fErr7Up3TVJl1GxuT zxoKw!FwSN_6jE*qL~_>V1C`_+T%oMKQmIlaJ5H&jhDEBxS02IFVNc|f16p3ozmd~3 z_^WHVH-_|4y}hN|2-#clLTpGl9K-}i@payTz|>Z-jur%%0?ukg1?PbY%rpi_=c>A( zZBN{XHTXOL>Vm$I1eC@4TIMRvzM8}n+++IZrrgA8T8KkCId1gDie+C0Gl9TKWLLntR@1_=-)rNf- zxe@C%%5|$$!m|e%G2p*Z!XVpsNg64Ew+3!iAj=aktHPf`yfc6xg|WFw%K@LIw9c_n zNelhJrBc0VF4a)9J5VY;zf`WNin!TQ$!u1LKFMV4K9+eO15zcKNA_dPVbGg1 z6{}W5MGQWbV3JodgLdPpfUm-iiIkq(k$Qbp)8Ekbq1U$!*?eBXH})CC-NX1gpF|MF zj+`UFkC-FCkC+P{egt|H?3lHP7IS7F;`@jifhY60?+2zFFbB-Rm(#_(sLv9`ex)`m z4p@Wcs5P`O42-$i-12e?Dzp)($Wn;iL|i(XGN~X-6-N<6Buceds<_#_9&>M}+fR|v z2Z<}gi3INf45K*c>TqZzL$G1H47diWD!{K^2Dp)#z821&1p_t803PO21&G7}fCY4O z7Rwb&-slBVAK3rE#DRnN9eCiXK&s%=F7VVtZt>O=Vfde`1*olRy$)fhVuNi0*6s>{ z9CP$Bt6@cPYO?_-2sx@cTdl+fA_}PD83i!=oD%k&d4N&FnQt!DOk=)$#`2IHN$a5y zH?a=HPL%Sdrv=6r#s#yB+$VTlBq~oHVS4|=_kg|u)0RxpG0aI8zS82EhUhh(8#kJs zLId6XcEGXLJdlG7sh^V(Z=_T=$a&m2*IY7u@mF=rIkv_Kl5}KN1C+1f>Ea>^$6;~mk$CSchqG~ z7fp`sA4e%byT;l1Dll!=obKCDPN=NGoZ?equhB&B*an9BY|}O);N8<)#3y%9VT@PF zC*8&vR3#WMrJJJM7CZrXS7M3+e?dHFaF7>A7Bk-pYZi2r>TM|i660*GS!O{`_!G%c zqetA%8O(8x);CsyFD`W}#c7$wm{^IOg4Qmb!iPR{_2n4haZHZ$nH7fI*`e;#D1+I2Q!5}&} zW_NXO%2K0Bz2XMi0)-?~j@7G;NZq6Lj;yfGF+at{7a8x^nx4RD1>$0?1qb$rE&5Ag zI9Qk=Q5RvjlkZR_K83|JJzauA_gLxp)X@`<9yxh@>akL&;RR8W^?WYU;i4v*W_D45 znD%O+--Pu>KBV&vGqR!-Wx>*yq+Ux&x9>H^WSS}lKy8qz=3XJ})We4l zLlv{wv|FSCi|4>YjK*TnPkbyr6)G$zd@ZH_UIZA$-V3B8W73GE84D6kKqJDBPirUl zu$v;NVo%7l{IsZKK+PxbTKha87>e>t-WyL+2nuJ*1g%XP@7q(Y&+R#cc26jrB!9pu z$kXn+=iYH+?5?}+#wCM;g~CcK6R7e;`!i_-4IX30@uC)?{A-PQX`hh-pPK-hS+v4* zu~Nbz0%0RmI9qPCghEGoug~IDy`k(^#L=5J!m9*m1U0Ud8!{uKnSdl?g^&RXI8!J@ zZu=gZC!ChyR6++$qUN1bDQLxeF+A*8 zC|T5c$9UOB^Q@;S#4?UxQ#RS3@o4cgK`#Cj7Y!nD>f|2E@KA)Jhn&`)LcDxdF_QEI zxK7L36Jb}hkIh*E%nt4^tpE1WX02(T!V6+~>ygX;i3d8fc`m5^n;S{LBi1(Usuy}t z6ZWG=;ij4`E3{v&o%5nN#=^8Aa94m?n?TQM;?t_gC878RdbMdnY=Hnb6B6StI@0yo zL}cQNqP=5=jT>j7fh^Z5OEnB-2O0xio`sd5c{Vzi&{6?kVH-Iso^4q#wB8qFzJgiy z>?{P<_jNo_ByuGrXMEX9B#c9-%gPEe1lxf%M71S(FBife*}6d-7@u8{)b8t>M4)n>r82H zMz`EbLkpdGNq2V9h%1I$7-C!5mNu&+_8?+Yt=uv+63}?1J-swjP=kGJIb~)}>&`Kx z$}XpnBBxToFx+=K&-q%t3uY>oIIY4e+M+p0KtGRr%FzGy&*I z5xj)EBa^UKZiS@%!|bY)3}|jGMrJZC!;zUTq`I)(zL#mK({o1%XQNtV`vE>X#9)ZQ z4>6z)RVhiSX;kI0C{-vGt4gIzZ8G$N@S3QcR`}_h*oWQZVsp{WEG+`DSZ)TwqSY`( zeQqDbD}_GMeX2Zql~C*_$%Hgps?5$QQ@Fu`B@k^R_0{nBONh}qpY`?Cqxy)hYZ)!` zM(T}xlDcc$;d&%RD|QC@?C_tCl=A$wTpHSLZ2(qohFi4Mx7u4ygv!3a>`~#k`yXCY zNi1Z6G>G0C3))K>l!aP*`*BPHl3WqZ(Hh}`&IG(iQZTaGwo=BczA*51*f#MAK$24F zj}b36f;^P(9Tk!UF?SLU9KTYbQ@99hV0|W(;IM%Wf)eOL2{byFtU$-gc>8v!$dkB- zb!<7=N`fMdAtfA3LTY4YPQuL*P0P0T9p#WiCu527gt911JbM*elSNADpzNC>CuB0U z?-cv8m&jiOWtChMnwY-EE-u+!6ZX4N`}SC5jiLjgGb0g*bc4r9jrJa2eJA7sZaU%% z1Y0rmq?CLVS#Tz%pm>+%AQK%LTs7N0b-c_ow~tj2J-yUI4U55~GVk7tt3StA>?B(- zgHo`P;2-8q9@nYYQ+j(xq>dBu1+vCi;#)0b&~8V@t;Bg4EHRV7CgA-8Ti!xy0qg@Y z8B=>jr-u)QL2uuw{8@yjqkE`{hN;nku{}h;)dpy$g;@tDDyQ1E3OQGAU~$4H3H|9N zYgR+?%Z$pw%^(ZBVRK=+nHF4QoK`_s)(ZHrNNycn;%-&%dNcsCmZW-}oy6%llH8`X zZ#fZJPF>TxSx=H#GpCctgkOi+mJ12`4M&GP+s_58Rs*4vADgN#dAG<+S$JuGNXrIKm!6A(@>&hl_|Q&rqg3fDn+5 zPb8E9k3<6mvm_Ec1 z>SThLJYv$Zn0_%34)`gsDh8+QhfquVVFqI86H1~RW2^eOv8ol?Lt{~{J=q@VV9%4Q z0}(zv+3xeji9_}y$P|uQ#Ft{v6Wf}sl#nUoIW2qkqYOBW+!V0w0SWK2-_BSg2!xI4mlc4dZS=N1KKt?T;=pp+>L^9(ZClSE#DS6RoJ3xK{Km;oWVO82z?`N4UKuk zJuSuDS(061Kx$^w24~M8aI@gKwp~7_CLagirZ0t<03-D_7$=o~0#`+Ro&5-Mu=(k; zuoi2n)giI_>96Za{k42b|G#=#|F)jVujcmWQu@HCwk~tB}?XqGORMx*7e9Odt43@w?Jj+`dSOYVGHL%a}?18ZQ_2XJx>2W=P zYjK~)^`LT_FAkZsHg9)F=sbF4Y3`K2jLLVRQ>Yu95zh`<(aDpdYWE2BrDi@lYDz~T zK5Alsr&HRQstGa3cd3H+1-yBTaw|}bgOO4*2UgoOMS6sVUsP*gyCMe#^Vd@k`^Fr3 z&6je#XK04!-3llxZy%D19GZ?))7PaOi8*Qp#V84?Q$hViyrfX=9KMIb;b%H_MWa`& zGil%oq;SZAkWD=^T41c9dg2@y|E}N>boIMXSk2l+NkB|I$}|RrN-VReaw>ScqaRhZ z!7u1MCy0)KCrBO3&v*WM(>AoIIzgm`tFWL&-<3+gN&8usd;;rP)Y4!~!T8#3pdnFz zKiQL_>Mc|~YLVfD6D!d7i6tpE86!p^$f3cb2zTpT#HZf6l?fDW#u)QWm86u6DbG}E z)z&#^n`{IANwkB}-W;G4Wog8^vMoG|FL=)|j|C1dNwpZCp#^257V)DxpKUMSu_EpOc-NZ2;3AqQMD?B<^~_L!tRjQ$6iiPu9 zk!1VkH8qZ>jUi-;3u$}>AvbV=`ofV7Aw=xcx;@f@JKK3U!o#CbU(p&{<%xbeW#1-G zlw>E*;F=1TH>6A~WG^Hoj~oy*FtkX2*_{cDtbQH5(w6n@EQ6YcWc!AoR{{9vF?53<|!FDssDv!-Jwd$hM^>BuG(udm@o?6R~MxzksaNv{zEDeKRABAdrR)dkyn5n_;^Mk4GR)LEtxfwZ(rSHf7k1Ku_~7 z+;Ga6+LC4$Wbmm2PQJ}P5w^ZJlTLZg^D=EOz6iMuJ4BpS+M;Ls7fe{ZlSL_yon^^Dr zaRHG?({E8~Muw?Xgm2_^{q;NqBRwJ*=|fOfrtlqx3NkCXbQO(|Y2q^JOc&884e=-g z*G6ozpj z`(+c(04CEGH+^y*j)vtCOkKGOwp$2`>me<~AO@V6h#BEtL|_oi7#fdF!3GXXknJf< z7aH)0!vf-_yey`J*Og6jdYZltmf;IDoin7vKPU>0;yBjZY#{*(}iu;Q?_mr*rOCkbg0O1C*ECJ&Bdw+$DZG@o`swZp?W zF>X9IZagA(_Xa&dn2Y~`l8pPv;Lrr+RSb?}V@O!=`iQ3khe@}SKq~kZVw+gW8;4&e zwuh*lN8Ie>4Yw3RViP=_Fx37{h`87bvuD0(SKBa5muq_{^Tx{F1CJH)jolM(bM&RK z@g3Cq|0n#5fKtpL?67RY4$D1RUe6qlMJh4BqhA_W6Bcas@#V0GkeZKRSjTl`3140! zZJB>-*gVi~I-ULHrwlvmjyq1Dh1}|3^Cf3McVEaWtg0zxekF-@^WiwUS+6*I1yAhT z84zZ=eZd?M*6R+&`X+{SNtTJPs0qDHl~5~z?KjvmNCTy*eNtb}eoFg@22&%v{Flf| zb^bk)ko~fru>%-5*@9-Wm4-QW4~(0LM_jf6JjK5-bYh>ZbFUaZsCD#Au!5ms=tP=6 zqUVd*DR8a;DX&gGrMCF^dW0tT#?mDjt2`u7)1tx&?>&MAQ;_|mXpo!pCwKc$&@X`j z(1HyEqp~aPRZ(i}^5223L(G#lfrK!mi+THPycX6ayjObvUdB;waMLYrkygX;A;x|f z!Nr{7$nlY~Egw*id-?*vu@DUBkQK$#{Iy^&RS=aU!I zhG~Fkv1ZY5B161DjD%DRIqXMGLnWO$4?Flm{sIw|{-A=5snwlFXcGt}3VLgJR*8uz zC{XKj7n9N@?!XNEg<+x-4j_Gly%quyqyq&a5G=TN05)JzeaJsPNaF^9t#)M>_ldF~t2kXFbK$oQF@TufHC>9?o%ZD^4at zBxT(zSo&jl_u^nzSStE^!KNQ%>0A^_oi8gpRLnol2SyOc+LX!hGUCo~a!lYtwDVh| zP;>xLbO=zCI{FZb4z1>PjBK^}ZchMezl5iNgng&LY8`qOl|Khq7~FjaprOoKmNWqj z*@~-(lmZ8kvN<~|+i7Kkv2uBm9m@e$H105H2Bj(qWHkmcDovZ%stN059HhKk?Y0T? z0C7K{yur`TH?b{2ymn)G`J0sM%`-r@&7~F@0t)nZSO5|Vz>S02SQcxvuvYyG^>Oic49Y5Wmi5Z%z_1<2 zs3&H%|0+nO{nr?Lgu&lr&;>sIF6gxVlX%!34ajJBfJ8(b0HlxN=FI~nK%kTf*PkB+ zT=Gtg0nav5&|B^YGEQ$vtYjL%&C4lXwK9NENupuEDsF*KEc9V}W*QEH+4Fhi)gOaX z8T+SDxhb0%<`NN};(9-WtHMB6tdth#b=9*!i)1d4kH6eJyh_w$e~iJ$nU4orv|5*A+^AgWQtvEDEJYp}5RrpKTXNwK*Is$(WXnp0LXP z1nd1t20zc>MFvI?2+Jm>7(r**e+L)wsjxnd_`A&KQ$c11uRy@tYBXryIR}n_nPwTJA^pCE|fI!rw@f%G@InyO-on8CsP(H4)@^F2-Rq! zI}_bQg_ywqP;8_mPmP&F&gf~(XbXl1DKuuejcLq8H-;1Bp#4fiXNK(x(4Os8+Z3n~ zgZ7M@2>O;G#h3D8T~1Bee;p;ZA6QQ@CavnF6cdIBT}p=v>+d61`yVj)6$XJa%l;d@ zCC64u7|*%9Be^^$rwY5w{t4c80b+N9`zmcpU#A|6R&Axrf(~m~F`Vugjs4FtIf}0w zNA%_~TC0l^OORLcm5eB|)*`P|1bM;!7Ixc5*=9m0AAm_e1R|F;UV$i`6 z>=jj3Nj{;nO1<9|VLi_7HBf~p!iplRl$Kc?$wFN<;&t$qft5qtj6V6U>8m0-Eu!F@ zXt4T3gVh&munPI9c6Ll=yV72@vuY(zwllJnC+(j@#qGb%;5>s145*$egqLCzl7IzvbuNULNr#_g*X-J%x`5p^C@*!LxK5JU)8fi-^yJ#vdv}=YtSl}BR`jE zlZ~QPGtX0IW_7`vJdbFwn3mHB6UB^)(`8@+%?jSk0dMxZ1JTWs?Qil9=dUerqmlwV zFau(`IykIS7+~BedQ~<#n&^2ZrDPZ5rT|-=cDk$3XaZqY55alZ3>H!%%xcOufGSd$68M+eHT|(#D)lcNx&ZZLt>iriN^$G?Au&^{652Sjnk&W=s_5 zN*kVxkQ?dx7m-IdTJmG8A{RVq3AEgu-LI$!PT3;mfRgA*a z5;s_A9v7qtTXMPNNWaba3cj(Azs~J{glMeS3=ZXk;kf-Z^>;nt@TwMPRkB?JxMR!o z>Qs&2rDbXVV|0xDPZ<0vgMZ4P3kdosdffgsJZulEuDkNevVWa<{WAoy1p+jodTX7v zQ5>LVMkVXMGCs!o{08eY;??KM>%jgOtkAz?@UIxW$iN7KfIWVSd4Cr1k+!O7Z2xPf z{#@{i!K+ZI@&(-fIA8ue<9T<*37LD8XRpBm(f)>iC3XSDJ#ct!t2u|`qTf3EfV+v5 z^A~uIeuiByb99s6mv4HO=CK*w>#_Cb=^*E+pRg;u{Z&>1E%T+ZVJzD z9>nqNaC?Dwk5&FX4c=qkjYp2@X+Eh2HRL&!srGz;;9D7C5xvtbaJPz}00T z6SKg?^Z=T)0?z#cUZq9vXS0;d@69GFLj9uhy(Ir>B!{aOol(3M+?53W9=bw_n?SL7 z?2P9A=plt=M}scuO%&NLNAVS5Z9N>zUqhf53VJ3bvUe&Xd#9(`=`oDDlDtL6ZtH~t zu*qSzT5xGNtRG<9iwuk)2mr#LaBL~s67!00M|5O8#H*`me^H&d+6j3(Tsbxhe*ZlR z{UW<_8%yW*$^p!+*Xg*yLsfJ6krA6|){t^a-uyW4LC{Ic`%*Zt1BdTNe1x%?l6T(G zs+rl02*ebC2a8#cpy%XR)hJ;fcKiMDJYt&VW2kB5J|OS-9R(0^Y%0gO;SEJ@UJd?nh;ff%q2$aOR(=!;*9%;GR9=dSkwmfp6na&}oi4_) zOnN&;u0z~WYK&od9C3@lwP7aVL90d%dvkP zrMB-pr6!UbHxu}6a*|@Vo0zf24IFYQZkqNfydg9O+i?P=hhjL=)O-0{lxH9071zC@ z+a5KJ_eW19b~@?N;Oo+g9c7ZEJj)wf587KIYqU3KA_X~$uQP+-YB>{rlZLc8eL8{P zr=h1V_A8mm3t;Zp*F;_gKY|4u0u*(=jOeT{y@4{(LBgq|#lopr6WH{r{*pd%vk)z- z^`&|brX_EZY1#jgfe{2k*=xAn!fq1U8evrfWL0mBz$31`HQH4h;8+CC8{t?H{juja zi5^Z14@*aQ7+eNlWu;}$O$y{;Qt66CL!5~)vW{7?E;qXkZC1(@@mLgo)-feKmIfVN zxDfwy(0c~2CK*US{uZvj0m|>(fD7SKL*jY*8o3mAy6DNF*K)%nn{1}6;2ZmJ1@q$^ zL(s{oe4zs_16Lz%^ucNm^Ec&<9BLNa1sqA8oK4&uoESB0%vAAv8{8Qo2TbGVQ*gLKM6pT~PRY&g@5QcS z?BK0953X;WzcO3F3Bc0#--~>ON=Y}xaa&(9ah^p&kDY)OY+N$I)&uO~16fO*)|>)Z z3AT5Fjo=Uyb?Oq>h>~1#5jN7ke?7^Cwe2OkTp}uM?;-<|$><-e6rL+~zuF!U;X7*t zo!>Ck(DSXvLJa^0q7mR`g)JKjb_kuXi*Ql>CE z$YP9C2PGLtaGaUvY!<$KR(_ShYJd+_)bXAE3v2esmBXXXJKwLD@dIGwc1q6-74ta@ zg3pm~59Z|lSgq!;zfhfqfK7S$MLu$>UYy71#%*G`eFwE*k<%o?j*+44>}WLqEtpN; zl1Ef99I;+`i(XMm-=Msb&tQd!YfDc7>GFd};&C*PQ=Ak^N1*itq{!AevDkX9JtFf6 zmcF=#ldPbOi%~%(s8>%x#Rg@#Om>n>$GvT+KRfELLjPx{V&-NdN1vNqp2kgAZgLeH z#h_23oRaeNX=}I&n)XvD#`9>_aEW8}GRh$lp2(rJ(&11utX6(ED{)lE#@-WCvHj0^ zWdwo1Y>F%I4?0W46}8bzjt2wp4LYkgR|$IS1TuSba>}c^+MgAXSG*Wlm80@X#H)qk z%EdL}N;o&xWmg|zn+K4Ca$XrxX6NYC2&EOmJEb=0DXsiBB$+{$p|mm_N-H=JR7ooX z(5o?J1>e}m=SlYOBf2o;4#f|-F8}W?oP=`3Oqu&}aaxj4fVf*oKpkQ_p}1x%OcSsZ zC7=X@9e1zYFfU={=*1i|j~I;#%T>RAjWQ$qzhm{-UuEz=7<`SvBm;wi&^yA*U%*1O{{R>5?eQ_WvUp*C9j`&z z9};Z)5@R$t)z%j0%j?nWfq-6Pb@UJj0%Bbpr9ng%V@bW&-GkvXY|9_AE!TClC0N31 zHjeo8+WsTFXa6ySKVk4910x8ezaGQw=UB#{;-X7`dHDX%_*PHq_~&@~v&iJ~4Y&|p z&PWgH$dna)W1mjyn12M|alXZK@5>spJrcv%Gr_}aNfy-@bb`M~SsTUK;VX(j#)z4Z z-9i9ubS02DPV@8{q^$^KEI(rN?;ipg?V2A5tWBNR_b!?IuUz&*CO1D4uBj_xW+9RG z|HKMjj~H(9h+#P9SEPvI>8R4<$jq)PlK;gj3pLylsq(d>gsaS7C}F%yHbe!#fiC$n zGx$Hauz6R(H}>hIbbTl1%l4uWx^J(6fY{Zjo_*)(Tv*gb$z1TpTcC0?OSMjI!uLIm zi}cH1lDObE&9S?$x5ZmH+SREgHi`td017D*H+nKyP1lIRjd&C;81X9-IOPMwdi0`i z2J0*IZF5-PYe(MpGkYO#vF_Lqb^9g=S(5Jyb>rO{>PD*qKh89QHJaCm_XxN1dZ)}O z50dbdIc)BYd67U5D{g^#&x<5HS#H?P9EB78QmgzAK|0l?XQ}H-FfmSx*dVIBKxBg} zuM45T{OAijAr0Oi70DLInaKH+sb>4sm61jI$U98NjI^pBmfVx|;OqAZ~7kG6$ z1FE-rYGa)uo(7(e(8Y@9<9gcIi2X1)$ZRY;$c!3V*{+#g>leB1UQAgVz08XuV*(s@ zII2h^?`0zigTft{5Vxsiw&R`9Q}z2!5Fuv-FN8iSd2I+DgY98O1bR#PRQ(>s_sT}Z z41Y?6=Yn-JAVLM2fe~>dE-CWUV!lg?G|*;KLl4S~nwx6yKgMStN8oqK0wd%#8WCN% zn?q+H)TvXBui^Q7*)yZK@C}Fq@Sybcydz%U^kyc0=h(lE910okL6lMUjE6sm*dA^q z9Rv3PrhJis5d?v*p-L9wB0e%&9{>3-h%H>{NUI#Pl=<^PUQyi>`K^h^d20lL=!T@| zRm45O?&;J}jKH3#4zm((lM$OK*Kju4F!{0n9fHE3JMdU}$#JUX#v`>QyZn<{BH>3E zGl*G_4cTS0x`ZFF1MeC4-Mk`hAg5@PXBH}5FV+wvUEK_6WD{4yTs}LCo{4a^?&Q8n z8$2-7c!no|b{+=t-S;QQFBGxE*LU2ozszxsxrHBE&$&Y7xjy|{W9E9DWqAAhc)h;E z#QV8W{sWS=xB7F7lWhaUZ3ZuOwP~&66Hd=NY+@9Da5=As{^2|%5kD`u(1-o{IP{Pf zu88MdR7nb732B|CsF!!7p)~USY$?gL+s6}l<#%FW{i@dB?PG7^!`B$F($KNWugH8H z*D(qx8Ib}Ovy0VQ2~;q$$Yp(#{C$|~dkZdn@<#`9{w@xFm<`(@W4*WmGU&Td`JYQbk;P=n~n;oGmYvANETAw-3$zy9fef}XiA_{9u>Jz-6PJ}vwyirxq=)|R))U_mgx4KT^ zfja13zmy2CReG8ry?5un{3U+=@FD+R_1IT}9%Fv0#8=k5^*ea0INb4*(#1^_rao ztziUi@~PvG%L13x>~4Gx#M2pJDI?247^roeSa=D*iuWaJ6Yy5S5!v$yAuQqAU0F_B{-YcyJ4D-Q0t! z0*5&}9Olm7(Bi)vxcvl_gI>KO+JL?-nb|(7W%iG1i60p`mU$1Zr|^9m-xK)W82RqS z^}YDs?S1dTU;lgGsHPvhUb_yz)5TEzecY%dSjXUjf z_VDa?Zr}ONcR%o_r%MW+zyJDQwl-E3<=^RJ{K?_t0^ZXSz1c^;~$y4har9IqYkoT#0^chM<5QfepNh5b_p%6p14<&+;OPT8G)s6SL|r&IW} zGXwZc41Xzw&pLB}&&BX>5!^X;pkk(ZX8|n>am%;4WszHsJ15X`B5rv(o$I7?3h+}g z{1r#vP%5XtBI;GtXv%T7?0!3Ryk>|`bHlygce_n@9Us$|?7-a=b~o7aMF)Vq7u17L zpq9JVv0E-GrOlfc*EgQ zB@G8m_*fCAFxk`dXNB*FXFHvqa6@5x-SH}Q4BRj{ix$b`!C7K;I}E*UtJ>R*ilJYZ z=p4gr!AmI0c|$F$C3PNeiECw5(`HoTnV{w--poTRegSXr5eiqSsg6?99JQu98dQ<) z7_PCUJ2@x+NUi1Eyi;(@M_@{*rJ`#(CD+_CSbZ2ZC>6J~r8%TDjM|hl3qhWXN*~$H zd)K5t*l6HJ!3U+jF_Q*&)sb_ zn1cwmVz*nq@WQ(tY6!+#I&NquK<>Y?a>Z@C#POM_FSgs34{U@*GXN(>jkHaQ(?!Rb z9Cao^e)$j)tE^d@*d7k*fh`=~9|>v4cieU$+5Zef>j7(`z=~)2`G^auP-FdO*cWb4 z9jT{=oSOGzLAD?`cIZLkz3!GzYYhnQQq0Jy8xuHR3|;`rUA(CQNJfN-=dkPO_6Ts@Cy%%_A4 zNpvf$x9vM_yU~!$*%E%g>zsiwgm*1&sj_hG*zJD2!EwuxA`|b-w%#~%C9x%GW4ut% z#2yY^h+F=j7V5ykNEV?wNs^>#;I_BU1pS^Xc0AAts{URuxoC>Bm9>z3Zg#pe?sgO} zF<+B*8?g~lK`MnS~uJv+RcjG;Q6H!-9fwdIY=9*u#D zt_OGRUP^2LT3*)%a}qSWs2y0?7YVSn{Qj!-No6F*$@-bO4x3^tggz&XdS1_MdtKL} zS<`)3FPJ}Q+HJSKWjEm#Bzt6-C1E&*@PX?}GXd@F4M;gKx!~)JFM;LLeR7BFMz-2s zj~2LS-SPdl3*F^)VRoQ-x-HAwN{ESGR%-pN2iONG#pkE0J20C~(jp1gv_Ow0OIzfq zN+BwZ&OS1e6OD@LIYmVRGG(LkI7K3ZW{qa39IXgXsMBJ1rb*63@=#sF%l`8TREFwM zsVhSbr8?A6Y7V^U$Le}T$BK!4k7lyUEGnk@yJAEJNsag;%qG4^#S#_dNy*|gYLQv5 zd(f!$x{a?Ok0Nj6;R_8G##D0B=Q)=jE{a4sLPfg#aLvn(>S&J;09=9Z6Fd3ZwvLwd zp|TA>>7FKD4mAn_9OFRSLr*|+fWD1de!CF7jaGyK`$hOv@S+a%19eXc5d=KMe+nnk z5${Xr{#4R)zkHy`KI8ovx9RO^;BYIP*`K8lBkqN{!ny5Z54GT4IPaL@LL5dEWf(!L zRZ8obtx$ZYWKAbN#=?szJ(r9S(*h<8@2=Cl~(;h zWO$+5nLLrwxwaRCO~2E7r%m(IVkeZr_s7m-e5&fXwhuWB-a#c9_Z5x#>KUG|*cGSf z>?#2(_ zrU-#D700O{Ds$AztUp0Bv4U0)Z?KG_1P7j+`ZC=5Mb%V|nbMOJx=}Kp8u{{nl~lth zf4!(0+Ti37B#jDCRtR{5D33s+FS<>lXY#O&;Dbf=Ur`B_hsquVbsI;&qliTk-EH-> zvInu<*M{KTA!3AcN~ois;b`3J^`WtE3?V0{!HGyR=nOprXbw<4hN1^%{1KpeKn)4S z?1d}J?U}vY5Pi{p4ecgY&7D?;xdTmA?*ABlis&0Y^25>8! zgQ5YKn}b(wxQL3`Dd}ZG4|aB{NE#6NgUC#L53aEb5E*2#Ba;*mx!9CXfWy^x>J-XF zW%2+MC%#fbV27|l>LxloVWNQkbi;Z&b@C;rH>r)5U|kj!>67PYwN}XsI(qS^G|dd* z?mG4O6vaCPoEk$Q(qZB!cqnF860I-LKOn5~I)#NZYF;z-8E}$(OS-AfsoLlCe-Zyp z%}@u6W4x5wpy!lEnj*P`*>OhTlRa>aLmZ>Y)Wr`KhJbgrbwG?S^%w$exorTFPart4 z!up`RoLYnPOHcKsRckPtH45jSY5 z#n6mD)(lP6)Y8|Qi8R2e`B%A56MZsI8u+gPoDKY4G5a{Z7s`oIKJ11_%EJ zGLtfYSa7CrVC9IZlBUzHu{;g_@p7D~vJr(y4~ZRkj`up0dpkoeFH0du3%+H;S7{=% zg+NxOf27Bn4Ptp{orX$TLHIloS*BiC%0+R!n>O(1yU_h@S71D?T9IM0_zsAXabrj~ zraXQQALDLJs6hjgTVo9M{oDb(gWKhwX?w zRVf;#hZ8Dm7CRoxw{G3CtRJibEQ{Nfl%2&zkY2opqB6~TBQlzPZ&z{!d1_LlFmn;R zM0(Hfi3Zj3{T_^nD~i;M7)V@4QJEQ;9&v-9WN&Jcp%O~DfP$Qgvr+jJr1h+l+ffr`e zpQ=j(9SpJ#>$QpDY;P*M_&(~9!NOjM)!775qvE-i+l8Ym-uX9Lm+YDLxuN8>`DdkP zgR_T`FEM~)Zg?`sU}5<=&2@e-l`7ivOXAP4!eExqI?=G_KO1O!l}cfRdt_|;URN?) z0|P{=#jq>3&juM+*>+@g`FvJnsD>2n3lTQ3Ot@%?b zh!Lc%C5^}i_h?&t8(kjb4M^XSt<_2f?CgB0{IZ}n;`7WyB6k7r6WUYO#uC3F!Hysi z|KYqk1^mxDCUH+x+)Sd3!GEMxi*F*NI!LqAM`z4eDCdKmIID@3l^ebzvxOf3oJp-` zI%bV6O<^rxNAxtFwPnHQBrRedzKo&_P;9bEana)o2U!?77U`-aqQ@otcC}}6y866?c3y`0m(Kr+yCsz5%EBGMHx5r#_e3L`2 zf0;W;DFI{sX?~fAd46E7%5ym{C?QZWMxr|+?PFD1i3wCmKB2pmC$w}PN*g{!hQ_fh zJ}w+8i{C&D1t$IjUg@1f49!{7DCY&i>6Uk$E-*L!kFH&><@jF{4Vj1Rzy-X)6%-k{ zoOX10yO2tFyCjo2$fk*FFT>3PLVmA-t1koBURrwXl>_ZK;r{*ZXRV+m1QB3d8z z-0tO{Q1Ta-3zRUG7e_#`dX2J&vP`Z8UU(7KX?+u+h6j>#%ZfD85u$a+TBfukZec#T zi_*UgAE9q}YxGRuy}I#&Hh6d`%ktW^*UJ?6-02JAL`X>mS~%UwZH6 zn>XLPBr`)rx>she?y|VPk`p>Hc|uaaE9%w9rNcx#GKlv$@Et|D;%BtX zkx?!QSB~hw0Xal|aRw&eWT7LGhaheGOqE(8uKcfr9=F`cAn6lAkNfU1q0eQ6o_?k> zBmNqA*=K=RBKNWINKWZyeo65SZ1$MczQ|&ab$d>@@3dfb|1ZK##@nF;7e~#o13kJ{ zut`~L;!;C4#h1;oh~N%{x0?7SG)3}^-w?0+-9&?|vkC#StU``7{DQn?88vI-Z%~hN zj6h;A2_*iOiU})3I^=Dfi`YR0d7G3!zNrTJ5ypkEmXyjuYzPbQXupQmAe$)`rS1rg z?R=t5aIb-$u^!O})x-1%>(xV9L#hy!w*p6*Qq@t8RfOCf@AGp&#V@HT)+*2_Z$h(( zN7OJu0MZ<5Br&8p7NI%foZg5KM{AD6X%G)E7H)ym5jZS%4EO*JaSTRhag@rqLo{-e#{_R zeg5SIe$PUk8i^B-m24hAndxGI%xq&Bm&WUnTYgrD#){iM@N2X zVw>4VonR}Ht54_{e}T$iE+%IxRGCG{03}8!u<+r<))?NiiK_(M0z$HIodB;HmN-t2 ziQk|nJWdLkq+vWswWGY^TEgRq#E>mO2zV?(>SzW}uoAzbwe)rIDJ{XS?zsJ90vyH6 z4WhzdV%#_tV&7JLM)QqGH)7+zL&JOHY;4Y$I(%1xS6+>#nvzi|G7@w^(d-zcYTs$f zU!r|ZO*vM&@?MMXmTIr@kJ<2Z0t~OFyAP^c{Vv}P+wG_{0^>3xg^P2%R;Sb485<~w zPSH#U{D}*Dx7rWAb|8NRr$eJkZ)RcaXG_9u(M{W~OjI5pYXLO=K@;DL$b{2Nyr>-H zia2}D`A-g~*`a3>&r;4bI*h21c}^zY-vMEjfPB9)uF8-XK;mc~HU2~5$kg*E=T+qw Uc|h|&MhfXwysx1C`U&-a0OxQbYybcN literal 0 HcmV?d00001 diff --git a/mmpretrain/datasets/transforms/__pycache__/processing.cpython-310.pyc b/mmpretrain/datasets/transforms/__pycache__/processing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..420b4d7d759b889b32abd76fe25805c3f4476d73 GIT binary patch literal 55705 zcmeIbd3+pKejnIXeK#5)2tpu5aikhbA^?hP@Y2N*Bp#w>I3q#|$+;jgKm%0(nrw7~ zRSh1khNBs3?D51JXAWO8o-8<%-B@F&HnyU3V%QO&0kwse=C*xLte!H z3b;6i!~1nJm9kT|QBO^)Ut`*kYjfI^-}H1^elyb<{F?Rba&9_juxz?+F6XE7a-OLd zmW$KH<h~<~n%*Vn`TBd7@14F^&I|S7<&o(TIWN}lTehYx zIWN`UyS#gPw~=b+r0)Bt@Be73(U(a%4_yDGfw!QPF@68XQw`&4%ITrlmj-Tz8v zdXK&uu(#l9^!+J&(BAq=%HDdzoF21>oRah4-6@pbW)I{3xHEnuHNDr_z5dY66w2OZ zkD%;`v-3v!hB3V_%-?SBK>iM$zdy{s+un)%oz8wKUkUT?v3DVVmo7gM=D)|j7y0+< z`~zYBeYS;sOXnX9^WSUlM*eP{e<;kq-+lo359s{E%(ox3_bi$>4SdC@{UGi>sP7&L zpBl5rkw32U9}e^P+7BWBA)Wt7n7_~7kNo{Q|IskNVoxA{Lgzmg<{z*RBLASye>}`T zWFJQUVV!?8%s*m3jQoe4k&CMT-e()#_IAJr&YyyZfbRT&T+?c ze&*@ge9O-~Tk~3e;klJot=Xv7{lYot@~YFAcl_-6)s;GaKHR_^JlTJu>N)4#YQtM- zy35EIoUhI=IWx$yo68Ny^N^Q4y(b+->-| zH96rno2`lE7>?EH&bh=mT}RPIs2Y#+rukpgq&(-Evaro)E=C$ekQgwN`w!G-)>($15 zyXlP={A|5>)p6YcG|bOWojQK@)VcG1;l$}@PxHdhpE!N;!#Mo(iRV7%=Ra`#%$d_u z@Avc19-lfn{oFa-#r|Ej#udl)oS9`nfj6TYEM2aWVb=einOYoTbm0ZJxy){T5JkKp zBn2~LW{lup?pC4T-jD2g9Era)u8!gG_9Ago#PGoGfT>qf!0u@~gWrsub+W){IXe%G zma_}!(W1ZeJcsWiHLuwSVgI8pV9s&p38ldwN0rBOIJ{XTw$Vvn0VuUnowRM-jWN7o z;>uK4H*nWBp1^t{rS?cEQ$G%|0SX3i+QJz@?T!$$l5&3md6JfG+?b|1S;z@3wv;NCIVSvLXH?4AxM zxjE&h0gB^>H2yWy9YE@VWs#Yg6Ko_qV(^`ssLD@` z=Y5Puqq^+O%=o1lPK?z$&&xA2mshKGb;sS0D&2!j#+W?9gjg+>aGtpjAUTY~BRy2W zd@2-&GLq{aLe4yn@Q-7a!y!HKgE(0vY7*ph!+1J%;c=kn^m+z>Z`?G!!OQs;5tZ2i z;IEmj+RD$I%b4R@AwYXptzpg13i7>z>CkLcqIzTF0$AF0AH)e#mhhJJy6zi=$DI zC)?Qw7%ov#yBuri1RyA<#?{ZThViX_+OD~NU#yVZkH>t| z#R82bZ+^+oFJggjwSdSm^;aA$?fIHFU$1(eUm$o_=jx6-iicc+n_pa5ZOpft&AR6v zW#$%UZc*RvxEibk5d0jmUBe#FOm9^^ITQ7y-=_hP?F2lAcT}wr$O%^88P>m{qG#}a zkFc7_4;W>`G|NnZV+w}(b{R+>=e#m=-yF=CFJ}z*gD9MUPh6+UIF8})2%iz6pH3Ie zV!D_qX6+F>=VY9$lUo3SwewCsy25qADcD8i6j7o`0vCBDr?gPA`|R>7+37w9?(lEGp?^#e~HA2^ioa^4vy?YgG?zey!)#={7XPS0x zq1L+y;Z_RIcuUn4M?G>Jm_7pQd9-Q9tF;|#46BOOu>wMH{HXP`bwRhg)6 zEUa%|RQ0%px|gfAo?m?JO4YVW3U+d7(_gOW3s%Ncw#01Smb!Z-*S!nIUjg@_tTU%Vh z`CPNrYA);7dq5;Ic>x#o>lG{c(nM`h0}kD$0sKsP>4bhAUOk~+nW#;Dy@ysM-v6|- zP{ks9)S7BG91PP^4L!B&fCsP|&7cbvg3|*qQ(Ks6I1akP8k+-pL6@IySyyZIx-^9# zBmHUB00+y!OGMgBfDznDvs$?#j(~=+ z0_cAm?M+lyUEuPdAp|3Ub_C$!InJ`j1!2yS=Mpoqxv&5_Kl(&|X4oswRO?<6{${{T zSYwLEip`-HRL%wT#`Cpnj`W2TMe$XKe2X>T6pF2tX!Qd6O+#Lb+!$w%2D`&upvKRU zD$nk--oMW}k*H@LJ=}2W-QX2|$bsksb*~7;ZH-}_Lz~Z&pTev4y-Lsn(rdeemhZ+u zEHzi_cH-6ORE!0mW}h!K>);|Vf=3k&Vt4XyKUU(Pp4nOSQq}eVHJD!2uxBjkERG!p zSalpuI9&wqCEjYOhjpJsUl+W~u@@b_2L<$d*gCA1EnPFPF>_chXuHA#iPx~%uCq|b za+_?m&IH-PrxnEWbAUlH#a0|(cR(@Tkf_dzw*Zd@EvQ(}079H=)n&lIzF-)v7Y^>T z4(+o52}dq;)gxVZ$a3n=veRgJ7y=@iC7^E1E0RXKffkl;M@>&fPj}UWk}sfsyd`>< zyz$V5jh*UUUtR{QT$_LAzDV?ee0A50)%d=UlwRLA)1^ zpd)lIOg$rn9Vjh5i~VA_W|7vDb;<81IEMI9@;PJuvMwNVZk{A|@W=Bsj*~dN4ic~) zkb!`cybqitSPY3nueB zSvzwjYn)-LW@I$Huc2O# zEY470$ruFZG6?u986)Nh;IVzr*^vCNbU*1HV3UN`=G+SYia06Z5TN|mIH5e`?$jEl zCxFvjH6ThNZ!Ooz0~o%Vmb)4EbSt}_YvpZB--`u1b9bs^UM%u=N9qPR>U$tvSxYC% z!CCD1Zwv21;09eVhst*ot{p$k&-7Jp6 z_LQC5k-~Xioh@c=LNJWhZJo-pv%$D$|Tth*evE!P{_3J|B zsTI|Wf#^mE*YY82%!ZsBAbwWlMHa6t3KGF&RfSzxsjWB^g2rm=3GjRp)x_I@X+nyb za;SaQDCU&nuSPe(_a*1LT6I}gwSip13gpPBqB=hh?n5Yorusfoz0m?IAUg#Ksvu6m znMA*89>^3($8Hgw`o*v@<6GpOpIKg>zv5?W%UEp`-X#V`cD1$SXXc$+9aE=zO=0Md zv3geI&a+UiwoGVur+JZ8NTaA`lfW;Ls-IoLY_R?8RsOhDK0L={hRH0Gq&Pq%AAb+| z9>Kp*DnW{vOFv*dU}Q`avxj2c+*?CA#rvC(0B}xa-pYKdP{6O*9#&&<5-p2_I|<<( zr&K6nWa9z8z~vB8r7oHj-))CZfvX}xo7a!3Yl3+uc1_UF#;&m@Ud(+N!2Ol9dV*r8 zTbG`9$?1}6o2z=Yd5pNyNN6&UH$nl)J3$*L@1rF9FgyeiTmn8Kkhs+samADEQb-g| zwg=@rqBDZKBsC`6Tf&k;ZA{)W#vxKH&di9YXlBNJ6uq?RDjf)43nGm~o4SXTg2q3< z;ZYDXjAfbsL;#yyjFmKVJfjIGAtT1Osh2;-7Cer`&(F-*&3QD6=+IR}1bWydS>$KP zUb<=C5g8{b0=JKeKxtktZVG@ioV)^Xo6AB!gl7q`q9iD!0EGJqbPznfS%MI1z%;*Q zrp<2-WX!kBZ0SvNc*q#qULJM%9A`=5kMgu*IJ}=k(n}Ck%-MHC{1l0z3J^s>{3If$ zV(h%+0GbcPP!LTG*u%~i+~0yc z>SQy}J&O(&BB}wv>`s5HnnOU2z&JLiQnd_fh$jNbN{SyIiW*dqs<_lGXjE4ce-Utc ztg^!sM_AX9iNhP~ia#Z_0qcs;$U09hl1PoxlLfvQYYT8WyMfiI4_A0tp8S3Y;Z(nl zg;J&tPzcwqZULNhzhH2D07pti&3po9t9y3D3#T@sFit&N2>hQD&^6{ zP$pU*)ewy;zY=PQpq1wV8EQb(q?KOaeJcO&ptUh$OTMF6rhNMECEu3qe>@@RIc4q ztqfQUP*G6Hfp!8dl)YhmlFHFc3tX=|)yY8GkoC@f5Na0Qft;ZLIm5?7IYSPN1!M~% zXDEoAp$MiSHB%C4L$RCBYoAak#uFhUrL~w7mudtFuzdS~*RPFNtP>((137yIu-e`A zR(nTm)k9_r#RtJ!UiF|B0F(iNayf#;yAJyf;3P<&4^&>ux>Q=rfV(?zMjbjV zDR$y?yf2b_xF2Pyzr+Hik5Jru%5|HryUN^`nS6rDCz;T$!M%ZGJgD@F+1MrJaAHxLE|{(c>)FO^kX;$iHFeIq99&@Bn0x0i$Xnr7I1?5U{u!&pr(t2s+&f51v=&?*q zxsDm-izH@1Awzq;bEp++YuLL{d40pUC$-k!>2D44 z+ind=B=N0uC2&i#>36kh7HS_%WlhkQL0;j zh@s_voHf;uj1NQtDE9($|00tgMB*DWQ12^o8L^08#2XZT^2_n%LNU_9Qdjvf85X}# zYpAl_D1mIrA)HDVl)bkHv1p*K$6D>z5Wx|nztjL7a2*OZx zMyV0xTeKp9@f##eSR`QlhULf3ehO#S!Jar(ecgp#R9x>Uk@^IE!8sKOBTouo?v z2S7;r^B`r4x99sQZ4$}qYp@rcgE^q%O}+j${=oi+OO$J4RI$zx#9W8TMJ6(BF5wJ7 zxvUxwOH1l5WeJQu&lwl2bD~Xv|#8jr?dnq8W>j zb0PreW7kDo7eH5)Vlt*aw}D38%8vAGyvVE8rjb&ATSUNhWR@e3m)vvR;UHW=6ypICGn#ClJD0)y}dpD|zFk!tVX zh$kSq5i7UK6EtY4drwxvXWMsyMi<#ZoC@2#@$OIKJ?>YS{AbcScO5^zu|Wd$Rb<~l z!`!dogyOC^O*eT9(y;{7GkFWLGfikux+{2Qyy#xWkCH_R0rn+czRctkOp>IU>oTuP zvPpD%ogg}dli^&!*a_{_&PW*6li1QSv8>x>jnwwJYfQ*dxIchoe5gx6=C*h%6pksH zlKL!{sTG50EO$eg{#v%O5%+SnJ!w@{hkUPtmLIJ_fBL?U*-q9ZZiZc!#yf=sLg znV2auG4o9`XWlY%rCa8GyN#i}G$R9bi zuuz+?fj6FVS_)oI0?}9h6`(kN3*y%65O8gT=)l@;h4UC|NKg2D z^0kK#b-gu)L;sT$E76B>>Cu0ZViAJxSc;`&UlFAJnF+DJ@ui>Q-`{!abqh#WgjSn}G9dh;$cgJcwJhF8kOX2UFHfOP%e+Aiu>n+VbTTyveCV)XP=rK5 z5KK{Qu*VPt!MOp~_v0GK8rM+21x6>@&_vORSV2?m2V)JiBH2%uG^V{L-t^7O!Omn9 zwQ82lTZJEoE1ILPGxuka1ePeW^oP09AU`Yq5ulKSLWmYh*6?#&@2U10$nB7oJ_l=* z_I>eXC0cQkvXzbcf6@)1faM>c=FJ5xAQQ#hN5m-M#ZMik;F;h)M8tAKwXK3f>^sBEo=Z2;vhlsQDaza}Yp& z4&oNH0Kc$1kl%iMZ9l&o;+4-|w)lPPN_f*3ifIPI+;pcL=5FZ>bhc1{69{k6S_rJr zMha}UP#6{a9o(L}C)F85o3^&L+G%O8+1ZNP@=An*=LW(%>8?BKU0XX_#gM1qei~!j zE!+GC^6>Y2Pc44n7k>Fm|L$M5pVT?2W1#xTDk%c=hj}Ihs4!ZTPq?39@(Cs)QzY^3 zE;1nlqofZ4#*|4YCX8UNBoafLN+hPYaNfm463Bp-B9`I)6;|^FB=B{O$}Zt5D!3F= z|9_61ZsvI|Ns;5cVucG35UX*oR%8AZV;VXm2J z<_wwzV=yy_vZmPI44Nh8^9dX~(>p2C7~N!6x?O^ep1f2V?eQK%(n{S(ts6H(V=*x$ zpy_~^7HCFLtY#=AkFjw%VdJLUpFkz;0k9|$r8RjAEK3}fX{IFO{u~M`Qe8$<#!z^V zpXZGT7m=F(Q}!Dffgz~iMg6`x9@1<_+Wi^U^BR*KNMJVdD9`z!qK#L~hJe`ZxeLBm zu^4QM->+>8<{f%v#YS1$EqumbN4d|i$-8kPe8z6kyT64L1FiJ#W@g|mbIWdH_;Huz zH*ggkmSZ?P&X$c_MKN2<74yXc^x)(p4EX!er6V<+g?^oU1CS4Ri=1fJd0qe?GKBj@ z@o`Q~m&7A*#P3&F{FDn5kH%v4f5D=tazKE?s}82XP>w;9JP-f7MKBTcd4UNNNL6}5 zh@Bmv%yRHNY|v>99@sH`#~Nm#+LH2GdCd&m6XvWj5l4ccejZm<>gZb6smy?SSXw1U z`p|c95{RXhJPSL(Ky1oiO$#0c+5T{NgYZkTNU`$Ki_U0XK~qaXAu zY=gl}QH2T)X)JstbmvfDPsFds!-gfgWbE+7!2>9J0G1A62{GW-cKXlI%|a<`g*76G zDW(iK1<*I=Y%seQgXPek0>E(`RVb+BctJ+v)fOAdnx0&p2isGn0#1JMegT`K%9lo- z7gGdz8g0;~{0g)J<9*nc<8ux@{P2ay8Gs%S;5zFtW{sOcXs`977M;-H5y1N_imJ&2 zMjx7NRf2Olc!-t)I*BRY<&|EGn&rT|r(>xXCR~T$00dVMQ3cM;Y-sHMdIV_(VNp^tG9c`UEx^En8jVWc+>f4{9{Q z_d4(->iN<{bsHO?RU3&GSYro%0D}TuYP|dC0i?A;Q_U6tiQmb;a}L}n&kD}4PAQ~l zJq`DG>-g#-oe#v3EiC=SQmeJ%9i5n{y4Pw~DouBBV#QvVIQZy+$0`SoJofk_6PFW!1Pmp;}}~osRloqjZ*YX@1io*z&S?4IXQOl(BaA> zj{+4C%M0X_46Cb;^B)=X}UBBBdUWN|Cll?Z*QS z6fJ;a8c&^rX+F&uGN9vOo*$e;@tlz|;0WBs7V_b0LBs#WR#pN8;CUs^_b#E%f*3wz zL0IG=>JRQ*!<~}ww;(mZPvAW;q%gc!kYASktlbZTh#$fAfYgxcT%|=6<o7BZ%1Mu2L3Z4&XQA{;TVye+6rW$3qD5Pfkg=-(43V4>3 z`x3^weWZ&nh?w_;=U;Mh7sZoUHuogHJyg6wk-?$ViVW!CUo&VbJ&J-N28YsW9p}Dr zEk=6^+4)suA2(6#YdC3-D;%b$cgUdk(p?-47i$s%(PvfFst=1b>s+vu$jaBc+ElkH z8$OC;D?iF3(Jk7Uyzmh)xruZ$_#Vs~aE(Id|9zbH`j(UU7EkG8MpEray^?I?O4F;g zU>&0(qO{+48iZ+}h!OG!qo#~$gfWSIUiYX~(5%0MwjkaCux)$a^L3{RB&vvDOD&Pg z)cytKB)EH@wVOqEU#Ps!@7~ZEY&<)|{co833KEQ-j3kS@n|=7R;UGR1A=g;LHy@WJ z7poV<<{QUlV^iG!{&sCR8@AyQX#)w0JAsU_1+WeXVODYNdA{$XNc=P`{lwJSFA1$9 z1ie3yoL{)68DU5!#|#HBt}yh+g5{T@4)O~@Lj%v4|B)~JH75TPlYhYEA2Ru$nfxOr z0)Udp6r$kk0Ktyph&_cp#vW*4$!M3rsFz`2pjh?Hw}#Wq9fDy$@2M0HGU5F~S{!D= z>s&@zCTUl$k=~Ot;Vj4Q;&Bp($FBe<4&EI2a<|wRz?I527Z^l`-ZFH%LO30iVU!BV z=|EL$XFi3<6HFJ(CsK`rxR2abr~^?7rb;&OKd_+GbThqPDPHyLGkh0Bs4$@EK)id?Y``~W4FOv^dE9+5 zyo>AbZn0Bc&z*(iKc6lsX>nCrS`>eKz1-dxEQf3n>l2lB~wR+!XwpWsfB zJ508VazQ!5Mst@tAOJ+<+%*!E^Zo>S^9`c%0tC+Se?a9MR&OnH&I_;U{!KjP{?|J}kA?l_ zAgGs2$tVn#hh3JR$C3D>?aDD69%t-EDKl6pWxJAr?F#JO5m62Sg&=RvBBETMlZVX; z@TAC_GmtRTLWL6Mtr+obN$V6ka5(NsTBm#gKpIzAXzP@?X~Xx=E=6<`Vq~KIR8C!E zv?&PUBvYXP7Lrgbiu(aVP>zrxN+J>h?V@7;MCc)|_z3)9zW3%IplAL}yDM+F%w=Ewf7esa$4eD^{ZEJ#29( zAeE*jMn2wbsZ~J$<%+S0RQoC2(fbtIEMc|Hsix|33$_EZ(v3DvWIn3BE z(!XGhgLD?;H8dX}tru#p7mvY%EJz*+y;5~-&MBELTcJW0Nbsx~j7}Bb5WcBWfmKy& z>1ZsAu>WGqxz>6qoJ%un)}-~~f?J(`sq4<#+J+qKp|JL@sEps~M}!(>%cWE{VQ;J9igblyov4(!KMS-N8Tlrx zM@aCH2p0Zq3MY_9NiY*-JwoCIT74DF1Qw~V5D{r0twkUuyPMKHkfo^^5;_c9LWvY@ z#u2JS$@fH)7dxzAsOy+!bDhK#vb#?fyV zx@_(F4Mk0UmZ}edFFOz7_coCn6tF2TEj7slO(@~|PN~)y(faL0bh!P}`u#Mzk6VTO zB7j24LVt{BKgq;K0x{9YcrFZ31im%qMf{M%%Zp5ey%Nsqr+E1wlN(52<~+{xh|s>k zOYunguXrYPS9vzaq>JR+i@e|FtT==d8sV4Hg@lzwh610+C_i83T_g9+0>~>6=vKVf zWAo;nDRu9Icq0IihEAGF|H`|J^5ho7#WB z&E6@Y1{DTjqZLN%s{=#%squ6dRs}!Xiv*UD9%CDcp4@9pI3%0fa{oT=evM7Ca3a+2 z9#XxTE*p!|TSJ58JuXYn<4F9G+CGNEn?urr_8pGdZ42#7$~O$12`dZGzs$*~(3zlr z`w`Qi=)i+cqRVGsW5EELsp&qu1d8}B1kMzP{8L!Es{ay=SC<-BSk-z{1yPHFz)8S1 zt)~2r%_)e5Hbl+byZ5-w=!RW^IK^CB*5&;wI>O9C74cPFwcsXkS;DmYJXTtXGN*E} z;0jty1pAQtK`g$Yd{DBR43yPgyDsw!x=c<_i>-%3y}>lIxlpM zP7oYuF{J!dY3QoGP|^but%^{1j>uPW6;+L1iymfL)A&=x&Rj9*1FI68SK zcMmZ>J|{YRfqsH0k1pDfDI?7&7j2=R zuv&w1B5gs6id0lu+ulMj^M|Ml@?1LlB;O!k6Mjg-z&81f(|u5MG_MM++U+(JEC+Xh_UMtwx^NG48|WpWitH?fI_ew*m38v zCUCWZ6jE5687U-iq3H#Z1!7C+m*t*DJ)m(N-bt?eLOlfJZ@BpT`Kzj4Kfk13kX-Qn zKKOEx!=C|L@F+(?M4;k7uKe%HZx(Vx#x9HsM!t+O=_>?<2b@9zz=wI`a8nGO%ACPMiklOi6<|1s(-N!JV7(eK{X_*ee zc_%Fv%h?8L*fa#@>P{MWOu0jj0^0y!)EJcv;&((dH(Yvc5&Al4)b-IN}kL+89v#(OwT)jxE`wpeEbR45RR z3|P1vjhFA32<6bZ878+UNB;mt${emeY!QQP-P?;CMX0R|NYehHf%1D?J~NLa@y9PZ zhC^^j7g1^=>O11T+dFJJ31$%@E30F=Lr9yr)-mD3`#LaJa19|Y#JCXR=50iRhhP`> zI}6Ydiy#-K!XRTslnb-r5`uE|VS}VT5VdM^q_Wx$X}aIu4jgi~-*=Mciyy2Z-j7@T zr@(&!1}4Rtpn5#GECYHIsO>BFC_`Wvb9PzNPja2$9XvGFi zOA+t@W_fJyiE~d|hxebXS7~#n?|--GaCv8vgpRsLTN}rNZHw~oLM$o_%vwLtyo!J> z)Iv6`i_-N;@-FP|O04TOuA~qX1hNNVnYNe2ScN@Il9hk6voAm!Ge!ela1Bud5!(Ks z^-wGW%R4QE*lQAGEYT3wqU~=;2Iz>|6izl^MBZ=~G4xk(Awi+Mqy#t?*eL`VW%T_W z_D)b+B?QIqpl5>0biV|b-;Z92&7JQ`zeM#_V)ODl=$oiQ-8)gn_oIK75R3lX?HCIW zN2CEK1Tx;KD>!NPquf3g<`<3~u%4LYMPgDQ3+@Jp?iZUiGT)+^VvX%b5k8FRBR|z^ z{NZ!SX2m(r*o^67lg|?e>xg4O8BHhwXvx4yt8@uT4py^%(rWjYu^jn1mF1TrOd#RG z1(W-wXkh&`J~v^=MADeYNiJ(2Ktjs^5Tpp5D=ZtY;8H19$iWrNm*k2%YB4k=lfp2) zC=YTLsS=SFrrbABpZo8b{3(;T&wfT3}n?S}9lK<@T;~a9kkl%Gi-`fD{Njdac5UUfY4Dp=@hI+C}FjIyS*Z zN-Z+t!-807t?osr5{o~KO$N`LdA7L-z-X6aBWJbm4gv;pzYdTLY=yv+)!q)$1_ou6 zuw#maH!AJN6PA;S)({6&W!t3*wIhbG(0WgdLzO8Q6pa!*@_L0-0b+lMU?W~@U*gRS zw{22!*+e}4FPJ%q%?lccK^-NkC*OY(gswiagd$U=zTH zZ6bWPh&V*O1Uv%c@q49&Fvj9a3`Us{ErGjX-y&rWgk`XMfQU^X{E#xlK{+XNA}j+F zN-2}mWmtD!%A60&AfS_!$?GzJV2E8HLIFEY;YqH2hJnlg!7#szN5RMDFZERk7!d)r zkje-RC_>F^PIp3IfZ?dqkuE_z14J5xp$BzGHjy#1ai!ze?s7?qL`(1c|mh;iQ!Lt2)3Kgv)PNgJp%R->Sv0?K@>qsISZD3e1OT17?m zQ2b(ORbsLI+voAsJ@klw))JT4zyv~WNLe5X8Kn$`sPv8`C<1)gr&Wo1&k<KtUG&pAdW>zzKE^gasa5tqa%`a?l*6d{&k^)%;r}dR6YL}-;u8F-m`sSr6p2G5 z8dCwn4z+J|QFMFsmL3Go9`Xy%Qf+{hvbx8Fw~atzpuZDU5pKaxkLmHca}8mc_N&-S z&!0T58TId>7y&vBtk%|8;H~q55*rUP%c!VA) zp%3_Zd<|(C>;Pk1wsWPXL`pDpO8SDL8S?P$c&J$XChT|YF2dDZZ~gYsB}9*=gb3SO zDDP*Cy;v2YR8n{m+e=L%5j^AP)aa;RXWlOaU*TgDXTByu{wK(szX5O#t&6rmqkz~n zB#1#&!w{E5H13Lbp?5Ftrnx~+{BFm@!gv?710c@DPw*hN3 zkjIETBJ2b9GChU79BRWmV(((S6OdbA!>}hR&SBb*^Mp^G=-%>E?e}Pq3<&N3GWS`F z@bhgKp&gZd6f3GEz^HdgSybBlHK<7$;!245JfWWTu4nL41NkcttgyMil>~YIaV`_90$m9|Gfg8ng zx1448(lY#uxE-hk!Z-(TLe zO&AzKk-{Zy1WM>tP*a^WhxB3|gBnpSnf7n}Ii*^Hz5&;zv^v03-v-h0Z_r!t|BJ>H zdx9VU&ebwu0TMd9BJVVyTXW)r(GCR^IFp_ zc{ML#j33On$#z~LHBGkUV^&A(LE=2&T9Y$rQ}X2z_L9=}FwQUaJ?NYG#0cmZQ=!{U zZ0&_qkr{o3VL`y8}(D{wSyGAcN#vCsy}-l3XkK2YqOTMhS+ z{@z$3!43&}39tZQheN(rmT_!o!srs4wTV>28Ezn9W6S6ob_Jc6*dQ=sftU~YUC4{s z*3hi zG#ZxD^YBpJ4}H}9rMSKH3kh zFUz&oB=!z@rz4~%#u-M$wPU~?bj;D;|H-^VW{NkGlw9Ta7g5A|xY>iUyvhuL#Ws|(l!5@rW1 zf3R2iO~&rIhV$G)kMD^9XEF&0Y(M{`-&7+`)K^`+dJaGOix5*|WsO`m_6^bzqyM?lSr!YwTIF08ay?$pY|#v!!d^hUGb22v-ubq=T4Yxw>MF z1-n{@Rh_|}?c}LY+w$@XB-q$ES+>zj)}8n!G2Z|&fP%_f$9id08UEunqen**k7_QB zch)xN4PrrKPAO~<#f^FL$tS@KtibLKO`N|HHih|uzM_U#uuUE;%;2!7A4uY+#G_$v zdDf&AK}-ecB^>eafgqQ6-Vuew3*QcnB+6}uO5%k!LnrYvo1&C>p>DJi+)JXCAUFEx zJ3ue-hwcc)#2>f=G!rl0jcS5>v6=pk=qB;ZZN|GB?F7$B=Y(rk4E4kduu!Z~w?Q5e z`nk>XcaDPMmBi3cJi8ke@m{n`G$$G7Gv-KgE{#R=CkiIS(wu@Q4U_=+=24ll6-M~4 z+GC@Y3ENq2N>cgY^QYcFI=&WmdF)xfaE$dNYhK~5iIV?2n(+?}32J?--xvcF1W7Ee_@CD}_&k+*dJka0iMzZxXR| zKxV$ml7$9p;IAS#Q5K58n<FxZ&qdVplYn zptN^J0~E6UN+DKh->27uKqNr)s}yF0QqWf*XTv{;<3uk+7V8c2Q;Xn)E+F{Kr>Rkf zxZ-o>W%HcUQ(+^BG8;NGw_2;iEq7}+f zLtr-z9>O0`12zNJkG0^tu?L*z_vvgJAOsvTA9C+TZPRKbG(4y;k|7XXA{n%wz^8~A zBn0u;f0z*phF}0`!UQt+b|yW5^PNU}`z8Z|&C3=R6H*dt583B3rM`%hK(yu-wY&sQ zdx6m|BX4jd*3+V4gjO9Uvrm;b#eny!F0ue(9hkcAFHd9(| z?k#pD<(`!325W{_;IKx~8z&IYpdG^A02qCfL8iqxm$6PxD~h7Z0CB6(VfSE!v^<%} z`!r`71e7(97G4vP4M8s*<*vg=X9dz{`1lO^V-~vwFV^NGN+(Iuu`ZMf<0*hXXIwco z>+ljC2neAzX)50mx?2b`NMSM7)@~9XyQA9AilukV7nUz*&Rx{O@VUvnyDCoC$b`H& zdeR*x2*1y%!qknxL~5=l#$el&3o9CM726M6qin=zrM;KGG2DW85s5&i4F_f31BHzM zo|H!l8cxj5I974*A@B=k*oCtIX;5*|g*4oIasPiH*cNazmom-5+qt*f6I~$6ITE0t zUU);r$AY!$C|^ORD1@wOu3Y!iD~L~12o(I-&e(%5wd)>du?N`56FfV~#NZ`6F|d)} z%Z!IuR{@jVZctu52qZ{f1w#UZz#xb!{SLhmpr(Bybs1iq zm?lQse;PxNNRyCHK#2kK3pX|+&N>Z5`1G_9czj=Kg_FVAnTffGB1sy)C@AgV>Fzrau(4KJ*NVzJPK>bt7?jcwU1}~bQ>XxGN{e-b4m^4 zr(wbY>m|e1_)}|T5yEG_<-V80%Z;J@th~H?iO8ug`bQkq5;{LqW&~;OlGVPeYXnuv zP1N$77(}83GKrnPLDVeEE<}hh^Mc_H{ZMj1ZwApDuem5Am*c8UfZ&UweIp=v#t4g# zLcU6Q+WCNVP3)nLp%zbe&Z8L35#fgzAZ#2}EiDem7MA0IPk=(R-=6zz*wl>zX-GZd zyQ*uT?Amyg5OrsIc8aXyE;GbTnB|WW0!EqaVKUByt_ki06G2~+$ofj;^*e+G%2o$U z18GDWPmj=}bw?L8guS`}KZani8+BkvL+LXVM;PEhb)*7_W6GwL47nVm&9#&`6O$4j zVQYm-aCz3%?njXK2HQ<+Qplmjy(j=B6PCbxh!@sNiA`ES+4r*|B@ZUngXc%n(Fh&j!S-N;9txXvoN-Y z5kI1?aL1qwyh<5)g_2%IUP06qdY^`JUhzDIOnA`V3YjpxPSMN5{t2a@v96~)sc_r-o0ABxnCYo{-6@mR#PZ6 z6QM0V7Hai=3YnpJRtBehN#GTqoRT7%k)RVnq?B{q02&lbEWmbB$9nMb*rqqE874@G z%+<3Jp&+6UO;!}&JO`iQ)O%F)yafG{pfG5YRM%=^2j65(N1}B^8YHd_Nj0_fv&!ek zkuylvaiH>us^#_n7Qmq#sA=4#1J?fGQ+_GxEFUqG5+4r3zpgLvOHor2I%Wbt55eKD z;6!{VxxRLJQgTw`Fh-zIgr;~zVprfAd8W>@be@#ki4=yqx`bR&nS>2=xA6`LXCauu zZz4CX!nz?}agjmX3)*l%i}M8W*CdlCnLLHWFJd{cWtjD*FESE{vkg{7MGX*r?GkFx zA1}885~xR&u_-Nhk$lG%^o_@>Tvi2d9EK4PcWi(aiUAwcjk7D{0ykMSi79zVhp*s7 ztOW&2B9J1xQ(OZ@KP#4Kt(O*+PbS-lE%eQ~`jO>7&n3e2KC|IKD(S--1$^4zIsF2)VsWn$U7VI+1}cX5@25@ zg#-=tuq^8}LpEE6AK{mYYF4}9iE4I}=@DpYGwxA#)NAMvF^<^!L!G3crPRAu}-J$?Xd`H zT&rPQ2CZ!oJ8zWel85s?JH0R<_GEOp8*mEP*mVombnN!Gkc-xAxY_k3-0b?Ceyr(t z*&|rfu?-N`^d0`z6Q^qD&j6646}^~_t>_$8nY?OUQOk2Y#>1Vg-*ec{o0?1^gJoj% zaJyTsA%2qATxd#vtZ(mi|AmS39PN8u`s}GkH7{QcNV4 zl)-pJURs z+!0HEl#2qT@TEK@MlfiDNgKUBxW^TwwL3T9v50o!5(+97olZatrffnh!iAJQFN72q zD_epKQ)mmx(T^cQ0!;oPH=?Nx^~?$)hdhlEVwFJvn`958+{RIDCM4sIN!zXSFQQG3{~o|GeW zaH=sI(YivJ5pohTQSD7ksu3<%Ka8Iko%k7E%6yTj@*}+b3j}G$TAuknlEQk?d?6LA-hNT zmkmm&EOet2uw>6&}+t+mQ4G1zD=fupZDjVWq~F^1ax~i^neisE(nz-tjZ}u zT8PON%n2&~I7E)A)7l=%s{+PbWNwd z(whT0@F(|dgpfPnPW~0DW6bBmn2NKcIg^|OUz%)}W2@0*dpLme7>zgC9_(79nUh?k z!Ph{G6~#fvdZk&J_?G8U&ppo$ke-XyKAFawaW-E=&$z#bPwBl}DP9JAAcZrE=Agf3 zFEOJaiX4i?_!t@7tIU((qk2bKq28XY`5?aHDGnLOSlF7oHCuBFzv1?R7`wO3tode6 zZ(+XUuA%o1jktVz1CvuiO~-I}Z6rOI9BefRHbln|ku?Smb`U@Bar4gm(JgAO8Wf}zRCHiTqo3eFa=H!7wGc1yxKG~z#WRwNN%_`ws{ClLA6OXvyf`P0u@BDv)9qoo=mvO~3cTNq>mP?V}r zm5_xbVUQHcJrnz)$50K{jTs}`a6!Rz`|ft z;$1RktF6PJ)mi|xe&8|o8Ep%BP4*^;JFFvNvj?TCbR`L{AW_-ji6gA*2=?{cP*?mZ z)$txybQ>a2=>Unz3lZ`{t%1;K*J;vENMLnpgn6mUq8Rj`tM#gjf|NN+eJBY8r8%_I z@N8P4auCWjMEG+ddsgp(Kt$B(438|m6T}FHG?vb`>OW(ywbt6uoU0cwx0NI+} z;%tGsu@5)!?ia+fn}&#D78wQ@Z?3kubEK3+my1YQ!qBPymnM){;-AEE*23cOhUdT% zP02y6jju|uShss=SRY>$YXEjWV*MN5N*h4ErMz-5!ya(ZmC47Q4fc`@zA6GF^`Y!X zc*sZ87hf9Z<;Dn#h%o~|j0&kaJ0=~u`8(sVl>|IJ_Y+^L>I~YGY{TKo#F5HA>quqd za3$7^1d_o1iY-@DyvZ9>qVT)pgBI4GXafLhb~cEf7?)z|p-tBNjxX#zVBI}DJFA03 z1}~g)iTK?PlkH4)BJndE!rlxoBukxdJLnK5hpxutU@+^S{L8<5&~@zSv|^@d<}} zje!yYX9acw=9tgB(@Z3O*^4}TP~OV3E{coH@o#bYws2C)bE{_@V%U@u@2>)`!zhqP zFrOfqzoYdqVxVbd7PTe8I8&Y(GR_}C`#_X~Wwg!Mjx2~#z#VyDU+%I*wAD80WVG|0V#sKhLPi_8A*0;~ z#x?`y`XV+nPxF^GL5qF2)wZ6u7lDS9y99_fi9a6tOWp?y8p9 z=FZew8Sg+ZBfXWgWMxrHZYz<_EwHpf@7MORy&xtzX*f(CsyTi9cLkXO#C@5z49iU^8Du8uaYc#3%4A{N$jMdCfm9z~%iZ@d+gWLtAJdIcV$xzdr&Qz@X;*5v>5eq!|gm!ZZU%LlyYH{m8q) zFwrYF3JAzVpdD&m_&;$HkJCY{uhhji(}>WbBX+hBe07PpsLBhLwz7W>P zP4Kozomk?-xs;c@8bThRFWb?HXjFOKjvM;PkG>Bl(G5=s-N*Wnh zWMMzvyx&`L>hQ~<)qw;F(UVf|ZY-3LVs>te+wdtp_wW*n*Y8Mi4DY-J;9m>2f%WUu z(6pA&1)&r}TnuBRCZ{aJmv9LmA9OEp0!9nFkPW=0Xa})^p5`Sr$_OzgfyWg!;4oXF z6$^zx`HcGl61}IJvJnsj5zX{S4#;;*mw@qSutzyUQmb>-kzKez#0K|6yr80w_V0wS zZ;*DGf(T&SSJ-unp-W&bjl3N8c!Ran!|!I|hn=;#@fYP6m?)U$%AODB4E+^t6vpYC zPUKJ``0Z~vR)}pcR-9_*#)#jwps5_A<|f_$2Tg0=)r*{)Z0BTw#IGz(T7FcB*`Y<$ zos2pMFa{2riH=TkGzRhBh5&TpH!(k=5n)bYS6+9A6p~Sx`Y*O7;Lq zt}TA~B+StpC#%BoSAW$6N@fQ9ft3t^n-pmrbQTUih&w7q@(>-=sm5J_p?qsq(1B2A zf>nb@Ar#k-Ztnd&JG+;Ly0f!AWkW~z?p68wK?EiWCKW5JA{N30Oy-3ID}NryUdm8R zfG^V2&^$l&c8vAtA_Bju&xviP;7Ui#AXG01)EI-w4D9$ovQ<%As-T_FY=^4@m#(8L z*N;|PE%)dwnhZ2siS=j9K8HTi?mPqR-zKUGyFU1~@PPPlg=g*tA*?onPxK5Ibov})102_ ziQvgYdqYUXJ1Hv&KBYaAeg)aG9%+9F-<)6SUZ3s<7HAP8VoIilus3;pqK15H%msp` z?Bd20FzO-`KZI)h;&Sy8{eK&tUyz?xb06`E2Ya}2{>#iKXw5lbDlu_r zmP*-8rJZ}xd+E@=HQ%t%k^DHnD1BB3$h&jlXFr8zQC&Hwy5b&OzGjNz5`l2=imop4 zb8r>`wz(cGaye|4RECP5u2|SM$!u>+_KTF>&?^L9N|HP}fhgc@K_-L4icDoGbJ;AVotgGWZ0M!g#ihB9({0P2G zdC%=he2IKW3{FEAKX@5&C<0H#DuPiF2P$txmJ*&@@;pGIP?17AiUdlan+;R$Iv0cH zs-{sl=V#^BKg=E{r!@f1j=H*n*&f>1LPc7VBTRHUJ39o$-MAnqLcr4}!4v_C9f;Z( z_J^jXmtjEwU^CheM7mdLgAnw}CR@Il9d_2NKq>7=3Gf)5J}_CUK(4CMDe_sntVf1VS+ zHt#kALyAmr#0PN#zSX#5z{ywpA<~=!-h=ns8DI;@d=RSw1; z{7pZRq6}0A;S%>Qc-49BUpPmnK(vVZ6=#2I6$FD~+&r+7mse|G!mL?1gRyki1{gYK z0r|fgo$;bwl5A`4y7iG-<0H>v15G#BJ_l5lMT1(fhw!X}B0)kuSmzwa>S0ZYHtj!m z-J8JgM9uS79dF{`V~_iV%Q&O{rrrP28a)Qlu%p&XYwjXO4?09BDB!+316C6ozyt1n z2z}@02sqW2pSjqqH4?y>Qw4j$<4+S5C>mzCTG~()5PXRyOPOyK5XdxhtGzujl2J4! zhE9AG55~6jjcDp#kViq&Pw{nxq(W*W?J{R`I6H!3A)J*3zKm&v2Vzj{SAZO+v(U}v zpij+doooiQavz|t-yc#;-gz#U#QD!x?blz%iuBApaY69Mb1qW?eq>rikWWy89{_5HYf|z*MR(d|T$(*OfG^>-3qY0au&78@Q$Yb8eB=VG-5=0XmtwPVeV!lhi9hAT-1vkN&Aowb2(f?tH3iU(I@xl7}yuo zh-K=MEK}tvC2IW%^r-ubOn!#RuQK_^Ol%~65w!H&T=SaaamJ~*w1L&+$Cwd8@ftx{ zOuGb%#0bY{5n1L$GW=0o!k=KAxd)htEGLrSbXdhpOn!rRY10K%yxQWHQ?7`7?q+_3 zB4l0Ki@d)}SlfdWN)Ncq=V-tM!R}6TH((E@T5kdn)dLn|OQ`8yM=PEI z3D||FL^9CFM#$&h9&KGi9sea8M*5?WDy6xz_(;}e{yef0e-uF+!{MDk5+W~!woE5W zCLbsZvCUvTpNF9cS$rCbAQCH>`>ZhcG%uky8&O%=A3k&Zl={rGk#$Iwl--70;oh+V z%rB7yJO9kY`R6|jaVCUlp_eGUZLv9W)7@WhLTp;|gp;KWfoAfR9JYF$4(?)o5Qrxu zk*av2#P*mO^{gvkm$6tWM)etq2gVkh`1o_r;_mrpungLc3K`U64YVC+1x8QyL~ybn zFLkQ!{8F%~jWVvk-mFiowh-uSLa)i#$L9WL4;^^Kx*uP4xq8$(_3(j%@cBOi$uT1p-C?e0Zl+KIhPCS~BY{9ExyLyV zlMxf}OkSSJ3#|g5B|Zf$AwUmpFnzrVTmfvQG=(}h)Cp2wSCmK#MVvuD2qVH0 z3~l?tBPsoxflYovcD$TOPF*nSh<^dO`dY;AWvadX?90B z7G@xJ9)P0>sd$>{K&I8Rp^>gIDJ-LaCYYLm13oJb@+e3M!c!<+N~D&7ff0EbFS$7j zYs6k0LPD}du0z5}Z~&!J@_0;<(NlG){P}O+4y~(5&pSba(h?LvC|K1!pgai zaXOJ2QX=aD&*T$iIr@t7+2^9fDCgNMHkP;4e z1-JZ+19MZC0?-OuNG};RfwljEaL{N+rnURu_s)vuG4pQtL)UAp z{lXK@nmW!_kzh`*8L_9NTb=?lAs;9;(o# literal 0 HcmV?d00001 diff --git a/mmpretrain/datasets/transforms/__pycache__/utils.cpython-310.pyc b/mmpretrain/datasets/transforms/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54544b1d10fe6222bdc37ec64a54a512f15f92df GIT binary patch literal 1515 zcmd6n-HzKt6oBW)&U(EGq);Kk1&qYax)oU!2?s;__q<;)~bvXy$@6Zv?~nZNV%In8L4B9K4+{j0n_KS z<$&+-CvjcuWh*q7Gf)AJEB--^e*RrxthAk_)1* z`w_{9-!Z&r9XJDZF?WbtF>QkyqrKjwAjN4{TMlUccg;d)^URYqsnuGN=bFi?oGSl|VT>+Nz^+YcD zGyj2@Pe6^+6u$?*1e4c8y3rBUAHwvVIR)K|$IjaUy<0Hw=ymu4pzDYkTKs}^=p{Z! z9;qj>{l8=$F3@Mvr^gCI#P6yd+LT)es=#d*q^d15Hl2 z5N#OIwrMZnw6hA#QD}XDX{kQ&yT@Ai>Tme?9K2=>D&3243S^{S{})Ai0?rKI$%gu> z-#LKZNOh|HzmxV{xcBRN{&cz~&1g@I9)g;Uj_w{E-n)DMFhBm~=+II4P9Yp+Lki*K ht-w6*Iz*Rz10`5g;#;4v@h2sCjE7`QKB6B_{s-5Sm@WVS literal 0 HcmV?d00001 diff --git a/mmpretrain/datasets/transforms/__pycache__/wrappers.cpython-310.pyc b/mmpretrain/datasets/transforms/__pycache__/wrappers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e62ceb563edce11f2f465f7a4732e61445b096bb GIT binary patch literal 5398 zcmcgwTaO$^6|U;;>FMdc*sP6%14IRp@Q$>M;{{^j*p`ihA&4xZ((xPMzyl zldi4#49}nc`S;QBS;qcBAI+zQj~jUPM`$FIJYs$NwIUu{eJi&6cI@;WPGfe|iru~& zxBG4L?L=PO>329&j`EeI{4rPl$U4vH?XRfL@#+iqBPLtYea@t-+#~yl_t)z2w)8OW zRpaZD@3L;^GswYqx_05+3Zf`@5UIlXX{hs}^@}7-lde^){UFflZWbhZlxA_!zLmz4 zRI4s8{JVEWm!Okv>Lze65TuxguBbwJKXUz*^CHe;VcC2cdcr+;bVb zDe}3j;z<@vCMpvUT*O2*iIjK@N-ffn2oe#-!C3WtU*Nf!jrA4NqpzBY7Ml_DaZiTB zT+GC<($e0RNGCZo6hz%CVplzys${5$ZN0MQMf}nSA1W|_Aj32ng=1m(<*9g}DluiR zUJygN4kGs#46uUWp@kelS1#O(@4@8be@sK z`Ll`YnT;O@(NyUrMkk1oPV-U>sJWDb&SiuqRsNeKGRT9AI0;OtmnGN_9xD-;?db@m zG+Tl!M>;1xF6(9|O%%=ouNFy~3n)B#Mh7TWI#;K7SB(Pfc0~~Vf&-i&eKJTe_7pn| zjJ%r<(v*DRy^**bL|SbbJvR;*CT67Ky`-6V3ONlg2vFIFOiKrbqh;i)>giA!j}uSA zC?cI_L8w*3ZOB-)z($73wSV|Dh~fGT_r8Apx_DDm(q)KAz)Jhx}o z5w-k?KeY6D8k<=&HgLuaZEMEgWqYfKcJ9tl)eoH+`>=J~o;fc#XOF&NXuP~raycT} z(}yk4c*j0b&QNa0#1Fq`nOSpLGL%og(_Jb2P=|@m0~D~r4wF2ia};)@l8n$y(V0iP z&geviOI0SFUaur4`)zy9;SC0166S-!Ww6q(qw#H*uk*9q<*wy%4}I4< z&EMe8%Y%(N5%uQFigr~=h8EV&A37j`G-I|Oqm#2SVA}6sFes7~dlp8pj$x=5WCnZY zz#OfaB>}>Yomq>1je@-yKgKxqX9TtEgTLLRDzg9x0pSNK$foG%Dp`}?E@KfIsbsAo z0a6+2Qi%&jSsDwd@Nt+<^&$l|_}wuM+Q|RJlA_{@xI0HnO3pV~)H#4WJB-KKSD^?p zEiEa{?hI41fmCWjz3en73U@kz)l}Eb&fqg6oAl2Z;8iC10jeR&n$7@?L0ulZXwOH< zCyZlcU!nO{-j_4nH`vuxd>^~CI zpYzQF-~ai%bbdy19JpmxKk&-5e#QTsccIA6U@(jVtp@{g<0Oa`dL4z7qB1BjJB77n z^w@whUB~FN@G>>Y1{lcBVZPv#QdvfAMq}V*NCx*XuUFBm@(nH!2p*|xDF{l!?pk(% zc=>}toXTlLa;y#pkETIXN!1<Y>k@p)O?MaZ&K5xW{aAIL2DT^ z4->RXZ=ms*W36wlbL*wW{g)ef%={2jOAqZ$kM5yq-X=a~BiKyZ&%M5b+d&Jr1G@e= zefMMLuzs5?Q>>V{yPMLzCioMg{iHhr!q9*~Q<2L&BNw4+aC1=M$&mwx??nN6E#F3u^`DwHwyrt9R5X! zRcw-LApw+tn^sh_26meC&?7WUh9os|mp33%PKpMXOj41qc?J^qL*!H=ClH0M#f81hC{&8eVzbh4+rkDY zC{~-6EbKtaDoK{W<1#J%KAKN?_ATl!@((4e9Iqy5geEGSL3e9o%5J-XD%R#6VWO`8rRq~j z6hQO$K;cl1?yr|8n2+{X@#jepRXC(P&^6kN{MS^k89G{^S>?Xvpl%Z2*x;UZ+NK_r zDeKpc?OO+5KJmO=&lJ?`+r*HnRd$h@v#T6U&$tcWBbv7lrI2Fgk$GzGBI-@8rf%};S((YwyOgevNBD<{K wZ-KBlRZgHrlz_k#WV3HW%sQM3?`lbQoqVpW=|JG8tySxs?b;jM{oow`7nwL__W%F@ literal 0 HcmV?d00001 diff --git a/mmpretrain/datasets/transforms/auto_augment.py b/mmpretrain/datasets/transforms/auto_augment.py new file mode 100644 index 0000000..4705d5e --- /dev/null +++ b/mmpretrain/datasets/transforms/auto_augment.py @@ -0,0 +1,1244 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import inspect +from copy import deepcopy +from math import ceil +from numbers import Number +from typing import List, Optional, Sequence, Tuple, Union + +import mmcv +import numpy as np +from mmcv.transforms import BaseTransform, Compose, RandomChoice +from mmcv.transforms.utils import cache_randomness +from mmengine.utils import is_list_of, is_seq_of +from PIL import Image, ImageFilter + +from mmpretrain.registry import TRANSFORMS + + +def merge_hparams(policy: dict, hparams: dict) -> dict: + """Merge hyperparameters into policy config. + + Only merge partial hyperparameters required of the policy. + + Args: + policy (dict): Original policy config dict. + hparams (dict): Hyperparameters need to be merged. + + Returns: + dict: Policy config dict after adding ``hparams``. + """ + policy = deepcopy(policy) + op = TRANSFORMS.get(policy['type']) + assert op is not None, f'Invalid policy type "{policy["type"]}".' + + op_args = inspect.getfullargspec(op.__init__).args + for key, value in hparams.items(): + if key in op_args and key not in policy: + policy[key] = value + return policy + + +@TRANSFORMS.register_module() +class AutoAugment(RandomChoice): + """Auto augmentation. + + This data augmentation is proposed in `AutoAugment: Learning Augmentation + Policies from Data `_. + + Args: + policies (str | list[list[dict]]): The policies of auto augmentation. + If string, use preset policies collection like "imagenet". If list, + Each item is a sub policies, composed by several augmentation + policy dicts. When AutoAugment is called, a random sub policies in + ``policies`` will be selected to augment images. + hparams (dict): Configs of hyperparameters. Hyperparameters will be + used in policies that require these arguments if these arguments + are not set in policy dicts. Defaults to ``dict(pad_val=128)``. + + .. admonition:: Available preset policies + + - ``"imagenet"``: Policy for ImageNet, come from + `DeepVoltaire/AutoAugment`_ + + .. _DeepVoltaire/AutoAugment: https://github.com/DeepVoltaire/AutoAugment + """ + + def __init__(self, + policies: Union[str, List[List[dict]]], + hparams: dict = dict(pad_val=128)): + if isinstance(policies, str): + assert policies in AUTOAUG_POLICIES, 'Invalid policies, ' \ + f'please choose from {list(AUTOAUG_POLICIES.keys())}.' + policies = AUTOAUG_POLICIES[policies] + self.hparams = hparams + self.policies = [[merge_hparams(t, hparams) for t in sub] + for sub in policies] + transforms = [[TRANSFORMS.build(t) for t in sub] for sub in policies] + + super().__init__(transforms=transforms) + + def __repr__(self) -> str: + policies_str = '' + for sub in self.policies: + policies_str += '\n ' + ', \t'.join([t['type'] for t in sub]) + + repr_str = self.__class__.__name__ + repr_str += f'(policies:{policies_str}\n)' + return repr_str + + +@TRANSFORMS.register_module() +class RandAugment(BaseTransform): + r"""Random augmentation. + + This data augmentation is proposed in `RandAugment: Practical automated + data augmentation with a reduced search space + `_. + + Args: + policies (str | list[dict]): The policies of random augmentation. + If string, use preset policies collection like "timm_increasing". + If list, each item is one specific augmentation policy dict. + The policy dict shall should have these keys: + + - ``type`` (str), The type of augmentation. + - ``magnitude_range`` (Sequence[number], optional): For those + augmentation have magnitude, you need to specify the magnitude + level mapping range. For example, assume ``total_level`` is 10, + ``magnitude_level=3`` specify magnitude is 3 if + ``magnitude_range=(0, 10)`` while specify magnitude is 7 if + ``magnitude_range=(10, 0)``. + - other keyword arguments of the augmentation. + + num_policies (int): Number of policies to select from policies each + time. + magnitude_level (int | float): Magnitude level for all the augmentation + selected. + magnitude_std (Number | str): Deviation of magnitude noise applied. + + - If positive number, the magnitude obeys normal distribution + :math:`\mathcal{N}(magnitude_level, magnitude_std)`. + - If 0 or negative number, magnitude remains unchanged. + - If str "inf", the magnitude obeys uniform distribution + :math:`Uniform(min, magnitude)`. + total_level (int | float): Total level for the magnitude. Defaults to + 10. + hparams (dict): Configs of hyperparameters. Hyperparameters will be + used in policies that require these arguments if these arguments + are not set in policy dicts. Defaults to ``dict(pad_val=128)``. + + .. admonition:: Available preset policies + + - ``"timm_increasing"``: The ``_RAND_INCREASING_TRANSFORMS`` policy + from `timm`_ + + .. _timm: https://github.com/rwightman/pytorch-image-models + + Examples: + + To use "timm-increasing" policies collection, select two policies every + time, and magnitude_level of every policy is 6 (total is 10 by default) + + >>> import numpy as np + >>> from mmpretrain.datasets import RandAugment + >>> transform = RandAugment( + ... policies='timm_increasing', + ... num_policies=2, + ... magnitude_level=6, + ... ) + >>> data = {'img': np.random.randint(0, 256, (224, 224, 3))} + >>> results = transform(data) + >>> print(results['img'].shape) + (224, 224, 3) + + If you want the ``magnitude_level`` randomly changes every time, you + can use ``magnitude_std`` to specify the random distribution. For + example, a normal distribution :math:`\mathcal{N}(6, 0.5)`. + + >>> transform = RandAugment( + ... policies='timm_increasing', + ... num_policies=2, + ... magnitude_level=6, + ... magnitude_std=0.5, + ... ) + + You can also use your own policies: + + >>> policies = [ + ... dict(type='AutoContrast'), + ... dict(type='Rotate', magnitude_range=(0, 30)), + ... dict(type='ColorTransform', magnitude_range=(0, 0.9)), + ... ] + >>> transform = RandAugment( + ... policies=policies, + ... num_policies=2, + ... magnitude_level=6 + ... ) + + Note: + ``magnitude_std`` will introduce some randomness to policy, modified by + https://github.com/rwightman/pytorch-image-models. + + When magnitude_std=0, we calculate the magnitude as follows: + + .. math:: + \text{magnitude} = \frac{\text{magnitude_level}} + {\text{totallevel}} \times (\text{val2} - \text{val1}) + + \text{val1} + """ + + def __init__(self, + policies: Union[str, List[dict]], + num_policies: int, + magnitude_level: int, + magnitude_std: Union[Number, str] = 0., + total_level: int = 10, + hparams: dict = dict(pad_val=128)): + if isinstance(policies, str): + assert policies in RANDAUG_POLICIES, 'Invalid policies, ' \ + f'please choose from {list(RANDAUG_POLICIES.keys())}.' + policies = RANDAUG_POLICIES[policies] + + assert is_list_of(policies, dict), 'policies must be a list of dict.' + + assert isinstance(magnitude_std, (Number, str)), \ + '`magnitude_std` must be of number or str type, ' \ + f'got {type(magnitude_std)} instead.' + if isinstance(magnitude_std, str): + assert magnitude_std == 'inf', \ + '`magnitude_std` must be of number or "inf", ' \ + f'got "{magnitude_std}" instead.' + + assert num_policies > 0, 'num_policies must be greater than 0.' + assert magnitude_level >= 0, 'magnitude_level must be no less than 0.' + assert total_level > 0, 'total_level must be greater than 0.' + + self.num_policies = num_policies + self.magnitude_level = magnitude_level + self.magnitude_std = magnitude_std + self.total_level = total_level + self.hparams = hparams + self.policies = [] + self.transforms = [] + + randaug_cfg = dict( + magnitude_level=magnitude_level, + total_level=total_level, + magnitude_std=magnitude_std) + + for policy in policies: + self._check_policy(policy) + policy = merge_hparams(policy, hparams) + policy.pop('magnitude_key', None) # For backward compatibility + if 'magnitude_range' in policy: + policy.update(randaug_cfg) + self.policies.append(policy) + self.transforms.append(TRANSFORMS.build(policy)) + + def __iter__(self): + """Iterate all transforms.""" + return iter(self.transforms) + + def _check_policy(self, policy): + """Check whether the sub-policy dict is available.""" + assert isinstance(policy, dict) and 'type' in policy, \ + 'Each policy must be a dict with key "type".' + type_name = policy['type'] + + if 'magnitude_range' in policy: + magnitude_range = policy['magnitude_range'] + assert is_seq_of(magnitude_range, Number), \ + f'`magnitude_range` of RandAugment policy {type_name} ' \ + 'should be a sequence with two numbers.' + + @cache_randomness + def random_policy_indices(self) -> np.ndarray: + """Return the random chosen transform indices.""" + indices = np.arange(len(self.policies)) + return np.random.choice(indices, size=self.num_policies).tolist() + + def transform(self, results: dict) -> Optional[dict]: + """Randomly choose a sub-policy to apply.""" + + chosen_policies = [ + self.transforms[i] for i in self.random_policy_indices() + ] + + sub_pipeline = Compose(chosen_policies) + return sub_pipeline(results) + + def __repr__(self) -> str: + policies_str = '' + for policy in self.policies: + policies_str += '\n ' + f'{policy["type"]}' + if 'magnitude_range' in policy: + val1, val2 = policy['magnitude_range'] + policies_str += f' ({val1}, {val2})' + + repr_str = self.__class__.__name__ + repr_str += f'(num_policies={self.num_policies}, ' + repr_str += f'magnitude_level={self.magnitude_level}, ' + repr_str += f'total_level={self.total_level}, ' + repr_str += f'policies:{policies_str}\n)' + return repr_str + + +class BaseAugTransform(BaseTransform): + r"""The base class of augmentation transform for RandAugment. + + This class provides several common attributions and methods to support the + magnitude level mapping and magnitude level randomness in + :class:`RandAugment`. + + Args: + magnitude_level (int | float): Magnitude level. + magnitude_range (Sequence[number], optional): For augmentation have + magnitude argument, maybe "magnitude", "angle" or other, you can + specify the magnitude level mapping range to generate the magnitude + argument. For example, assume ``total_level`` is 10, + ``magnitude_level=3`` specify magnitude is 3 if + ``magnitude_range=(0, 10)`` while specify magnitude is 7 if + ``magnitude_range=(10, 0)``. Defaults to None. + magnitude_std (Number | str): Deviation of magnitude noise applied. + + - If positive number, the magnitude obeys normal distribution + :math:`\mathcal{N}(magnitude, magnitude_std)`. + - If 0 or negative number, magnitude remains unchanged. + - If str "inf", the magnitude obeys uniform distribution + :math:`Uniform(min, magnitude)`. + + Defaults to 0. + total_level (int | float): Total level for the magnitude. Defaults to + 10. + prob (float): The probability for performing transformation therefore + should be in range [0, 1]. Defaults to 0.5. + random_negative_prob (float): The probability that turns the magnitude + negative, which should be in range [0,1]. Defaults to 0. + """ + + def __init__(self, + magnitude_level: int = 10, + magnitude_range: Tuple[float, float] = None, + magnitude_std: Union[str, float] = 0., + total_level: int = 10, + prob: float = 0.5, + random_negative_prob: float = 0.5): + self.magnitude_level = magnitude_level + self.magnitude_range = magnitude_range + self.magnitude_std = magnitude_std + self.total_level = total_level + self.prob = prob + self.random_negative_prob = random_negative_prob + + @cache_randomness + def random_disable(self): + """Randomly disable the transform.""" + return np.random.rand() > self.prob + + @cache_randomness + def random_magnitude(self): + """Randomly generate magnitude.""" + magnitude = self.magnitude_level + # if magnitude_std is positive number or 'inf', move + # magnitude_value randomly. + if self.magnitude_std == 'inf': + magnitude = np.random.uniform(0, magnitude) + elif self.magnitude_std > 0: + magnitude = np.random.normal(magnitude, self.magnitude_std) + magnitude = np.clip(magnitude, 0, self.total_level) + + val1, val2 = self.magnitude_range + magnitude = (magnitude / self.total_level) * (val2 - val1) + val1 + return magnitude + + @cache_randomness + def random_negative(self, value): + """Randomly negative the value.""" + if np.random.rand() < self.random_negative_prob: + return -value + else: + return value + + def extra_repr(self): + """Extra repr string when auto-generating magnitude is enabled.""" + if self.magnitude_range is not None: + repr_str = f', magnitude_level={self.magnitude_level}, ' + repr_str += f'magnitude_range={self.magnitude_range}, ' + repr_str += f'magnitude_std={self.magnitude_std}, ' + repr_str += f'total_level={self.total_level}, ' + return repr_str + else: + return '' + + +@TRANSFORMS.register_module() +class Shear(BaseAugTransform): + """Shear images. + + Args: + magnitude (int | float | None): The magnitude used for shear. If None, + generate from ``magnitude_range``, see :class:`BaseAugTransform`. + Defaults to None. + pad_val (int, Sequence[int]): Pixel pad_val value for constant fill. + If a sequence of length 3, it is used to pad_val R, G, B channels + respectively. Defaults to 128. + prob (float): The probability for performing shear therefore should be + in range [0, 1]. Defaults to 0.5. + direction (str): The shearing direction. Options are 'horizontal' and + 'vertical'. Defaults to 'horizontal'. + random_negative_prob (float): The probability that turns the magnitude + negative, which should be in range [0,1]. Defaults to 0.5. + interpolation (str): Interpolation method. Options are 'nearest', + 'bilinear', 'bicubic', 'area', 'lanczos'. Defaults to 'bicubic'. + **kwargs: Other keyword arguments of :class:`BaseAugTransform`. + """ + + def __init__(self, + magnitude: Union[int, float, None] = None, + pad_val: Union[int, Sequence[int]] = 128, + prob: float = 0.5, + direction: str = 'horizontal', + random_negative_prob: float = 0.5, + interpolation: str = 'bicubic', + **kwargs): + super().__init__( + prob=prob, random_negative_prob=random_negative_prob, **kwargs) + assert (magnitude is None) ^ (self.magnitude_range is None), \ + 'Please specify only one of `magnitude` and `magnitude_range`.' + + self.magnitude = magnitude + if isinstance(pad_val, Sequence): + self.pad_val = tuple(pad_val) + else: + self.pad_val = pad_val + + assert direction in ('horizontal', 'vertical'), 'direction must be ' \ + f'either "horizontal" or "vertical", got "{direction}" instead.' + self.direction = direction + + self.interpolation = interpolation + + def transform(self, results): + """Apply transform to results.""" + if self.random_disable(): + return results + + if self.magnitude is not None: + magnitude = self.random_negative(self.magnitude) + else: + magnitude = self.random_negative(self.random_magnitude()) + + img = results['img'] + img_sheared = mmcv.imshear( + img, + magnitude, + direction=self.direction, + border_value=self.pad_val, + interpolation=self.interpolation) + results['img'] = img_sheared.astype(img.dtype) + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(magnitude={self.magnitude}, ' + repr_str += f'pad_val={self.pad_val}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'direction={self.direction}, ' + repr_str += f'random_negative_prob={self.random_negative_prob}, ' + repr_str += f'interpolation={self.interpolation}{self.extra_repr()})' + return repr_str + + +@TRANSFORMS.register_module() +class Translate(BaseAugTransform): + """Translate images. + + Args: + magnitude (int | float | None): The magnitude used for translate. Note + that the offset is calculated by magnitude * size in the + corresponding direction. With a magnitude of 1, the whole image + will be moved out of the range. If None, generate from + ``magnitude_range``, see :class:`BaseAugTransform`. + pad_val (int, Sequence[int]): Pixel pad_val value for constant fill. + If a sequence of length 3, it is used to pad_val R, G, B channels + respectively. Defaults to 128. + prob (float): The probability for performing translate therefore should + be in range [0, 1]. Defaults to 0.5. + direction (str): The translating direction. Options are 'horizontal' + and 'vertical'. Defaults to 'horizontal'. + random_negative_prob (float): The probability that turns the magnitude + negative, which should be in range [0,1]. Defaults to 0.5. + interpolation (str): Interpolation method. Options are 'nearest', + 'bilinear', 'bicubic', 'area', 'lanczos'. Defaults to 'nearest'. + **kwargs: Other keyword arguments of :class:`BaseAugTransform`. + """ + + def __init__(self, + magnitude: Union[int, float, None] = None, + pad_val: Union[int, Sequence[int]] = 128, + prob: float = 0.5, + direction: str = 'horizontal', + random_negative_prob: float = 0.5, + interpolation: str = 'nearest', + **kwargs): + super().__init__( + prob=prob, random_negative_prob=random_negative_prob, **kwargs) + assert (magnitude is None) ^ (self.magnitude_range is None), \ + 'Please specify only one of `magnitude` and `magnitude_range`.' + + self.magnitude = magnitude + if isinstance(pad_val, Sequence): + self.pad_val = tuple(pad_val) + else: + self.pad_val = pad_val + + assert direction in ('horizontal', 'vertical'), 'direction must be ' \ + f'either "horizontal" or "vertical", got "{direction}" instead.' + self.direction = direction + + self.interpolation = interpolation + + def transform(self, results): + """Apply transform to results.""" + if self.random_disable(): + return results + + if self.magnitude is not None: + magnitude = self.random_negative(self.magnitude) + else: + magnitude = self.random_negative(self.random_magnitude()) + + img = results['img'] + height, width = img.shape[:2] + if self.direction == 'horizontal': + offset = magnitude * width + else: + offset = magnitude * height + img_translated = mmcv.imtranslate( + img, + offset, + direction=self.direction, + border_value=self.pad_val, + interpolation=self.interpolation) + results['img'] = img_translated.astype(img.dtype) + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(magnitude={self.magnitude}, ' + repr_str += f'pad_val={self.pad_val}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'direction={self.direction}, ' + repr_str += f'random_negative_prob={self.random_negative_prob}, ' + repr_str += f'interpolation={self.interpolation}{self.extra_repr()})' + return repr_str + + +@TRANSFORMS.register_module() +class Rotate(BaseAugTransform): + """Rotate images. + + Args: + angle (float, optional): The angle used for rotate. Positive values + stand for clockwise rotation. If None, generate from + ``magnitude_range``, see :class:`BaseAugTransform`. + Defaults to None. + center (tuple[float], optional): Center point (w, h) of the rotation in + the source image. If None, the center of the image will be used. + Defaults to None. + scale (float): Isotropic scale factor. Defaults to 1.0. + pad_val (int, Sequence[int]): Pixel pad_val value for constant fill. + If a sequence of length 3, it is used to pad_val R, G, B channels + respectively. Defaults to 128. + prob (float): The probability for performing rotate therefore should be + in range [0, 1]. Defaults to 0.5. + random_negative_prob (float): The probability that turns the angle + negative, which should be in range [0,1]. Defaults to 0.5. + interpolation (str): Interpolation method. Options are 'nearest', + 'bilinear', 'bicubic', 'area', 'lanczos'. Defaults to 'nearest'. + **kwargs: Other keyword arguments of :class:`BaseAugTransform`. + """ + + def __init__(self, + angle: Optional[float] = None, + center: Optional[Tuple[float]] = None, + scale: float = 1.0, + pad_val: Union[int, Sequence[int]] = 128, + prob: float = 0.5, + random_negative_prob: float = 0.5, + interpolation: str = 'nearest', + **kwargs): + super().__init__( + prob=prob, random_negative_prob=random_negative_prob, **kwargs) + assert (angle is None) ^ (self.magnitude_range is None), \ + 'Please specify only one of `angle` and `magnitude_range`.' + + self.angle = angle + self.center = center + self.scale = scale + if isinstance(pad_val, Sequence): + self.pad_val = tuple(pad_val) + else: + self.pad_val = pad_val + + self.interpolation = interpolation + + def transform(self, results): + """Apply transform to results.""" + if self.random_disable(): + return results + + if self.angle is not None: + angle = self.random_negative(self.angle) + else: + angle = self.random_negative(self.random_magnitude()) + + img = results['img'] + img_rotated = mmcv.imrotate( + img, + angle, + center=self.center, + scale=self.scale, + border_value=self.pad_val, + interpolation=self.interpolation) + results['img'] = img_rotated.astype(img.dtype) + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(angle={self.angle}, ' + repr_str += f'center={self.center}, ' + repr_str += f'scale={self.scale}, ' + repr_str += f'pad_val={self.pad_val}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'random_negative_prob={self.random_negative_prob}, ' + repr_str += f'interpolation={self.interpolation}{self.extra_repr()})' + return repr_str + + +@TRANSFORMS.register_module() +class AutoContrast(BaseAugTransform): + """Auto adjust image contrast. + + Args: + prob (float): The probability for performing auto contrast + therefore should be in range [0, 1]. Defaults to 0.5. + **kwargs: Other keyword arguments of :class:`BaseAugTransform`. + """ + + def __init__(self, prob: float = 0.5, **kwargs): + super().__init__(prob=prob, **kwargs) + + def transform(self, results): + """Apply transform to results.""" + if self.random_disable(): + return results + + img = results['img'] + img_contrasted = mmcv.auto_contrast(img) + results['img'] = img_contrasted.astype(img.dtype) + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(prob={self.prob})' + return repr_str + + +@TRANSFORMS.register_module() +class Invert(BaseAugTransform): + """Invert images. + + Args: + prob (float): The probability for performing invert therefore should + be in range [0, 1]. Defaults to 0.5. + **kwargs: Other keyword arguments of :class:`BaseAugTransform`. + """ + + def __init__(self, prob: float = 0.5, **kwargs): + super().__init__(prob=prob, **kwargs) + + def transform(self, results): + """Apply transform to results.""" + if self.random_disable(): + return results + + img = results['img'] + img_inverted = mmcv.iminvert(img) + results['img'] = img_inverted.astype(img.dtype) + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(prob={self.prob})' + return repr_str + + +@TRANSFORMS.register_module() +class Equalize(BaseAugTransform): + """Equalize the image histogram. + + Args: + prob (float): The probability for performing equalize therefore should + be in range [0, 1]. Defaults to 0.5. + **kwargs: Other keyword arguments of :class:`BaseAugTransform`. + """ + + def __init__(self, prob: float = 0.5, **kwargs): + super().__init__(prob=prob, **kwargs) + + def transform(self, results): + """Apply transform to results.""" + if self.random_disable(): + return results + + img = results['img'] + img_equalized = mmcv.imequalize(img) + results['img'] = img_equalized.astype(img.dtype) + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(prob={self.prob})' + return repr_str + + +@TRANSFORMS.register_module() +class Solarize(BaseAugTransform): + """Solarize images (invert all pixel values above a threshold). + + Args: + thr (int | float | None): The threshold above which the pixels value + will be inverted. If None, generate from ``magnitude_range``, + see :class:`BaseAugTransform`. Defaults to None. + prob (float): The probability for solarizing therefore should be in + range [0, 1]. Defaults to 0.5. + **kwargs: Other keyword arguments of :class:`BaseAugTransform`. + """ + + def __init__(self, + thr: Union[int, float, None] = None, + prob: float = 0.5, + **kwargs): + super().__init__(prob=prob, random_negative_prob=0., **kwargs) + assert (thr is None) ^ (self.magnitude_range is None), \ + 'Please specify only one of `thr` and `magnitude_range`.' + + self.thr = thr + + def transform(self, results): + """Apply transform to results.""" + if self.random_disable(): + return results + + if self.thr is not None: + thr = self.thr + else: + thr = self.random_magnitude() + + img = results['img'] + img_solarized = mmcv.solarize(img, thr=thr) + results['img'] = img_solarized.astype(img.dtype) + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(thr={self.thr}, ' + repr_str += f'prob={self.prob}{self.extra_repr()}))' + return repr_str + + +@TRANSFORMS.register_module() +class SolarizeAdd(BaseAugTransform): + """SolarizeAdd images (add a certain value to pixels below a threshold). + + Args: + magnitude (int | float | None): The value to be added to pixels below + the thr. If None, generate from ``magnitude_range``, see + :class:`BaseAugTransform`. Defaults to None. + thr (int | float): The threshold below which the pixels value will be + adjusted. + prob (float): The probability for solarizing therefore should be in + range [0, 1]. Defaults to 0.5. + **kwargs: Other keyword arguments of :class:`BaseAugTransform`. + """ + + def __init__(self, + magnitude: Union[int, float, None] = None, + thr: Union[int, float] = 128, + prob: float = 0.5, + **kwargs): + super().__init__(prob=prob, random_negative_prob=0., **kwargs) + assert (magnitude is None) ^ (self.magnitude_range is None), \ + 'Please specify only one of `magnitude` and `magnitude_range`.' + + self.magnitude = magnitude + + assert isinstance(thr, (int, float)), 'The thr type must '\ + f'be int or float, but got {type(thr)} instead.' + self.thr = thr + + def transform(self, results): + """Apply transform to results.""" + if self.random_disable(): + return results + + if self.magnitude is not None: + magnitude = self.magnitude + else: + magnitude = self.random_magnitude() + + img = results['img'] + img_solarized = np.where(img < self.thr, + np.minimum(img + magnitude, 255), img) + results['img'] = img_solarized.astype(img.dtype) + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(magnitude={self.magnitude}, ' + repr_str += f'thr={self.thr}, ' + repr_str += f'prob={self.prob}{self.extra_repr()})' + return repr_str + + +@TRANSFORMS.register_module() +class Posterize(BaseAugTransform): + """Posterize images (reduce the number of bits for each color channel). + + Args: + bits (int, optional): Number of bits for each pixel in the output img, + which should be less or equal to 8. If None, generate from + ``magnitude_range``, see :class:`BaseAugTransform`. + Defaults to None. + prob (float): The probability for posterizing therefore should be in + range [0, 1]. Defaults to 0.5. + **kwargs: Other keyword arguments of :class:`BaseAugTransform`. + """ + + def __init__(self, + bits: Optional[int] = None, + prob: float = 0.5, + **kwargs): + super().__init__(prob=prob, random_negative_prob=0., **kwargs) + assert (bits is None) ^ (self.magnitude_range is None), \ + 'Please specify only one of `bits` and `magnitude_range`.' + + if bits is not None: + assert bits <= 8, \ + f'The bits must be less than 8, got {bits} instead.' + self.bits = bits + + def transform(self, results): + """Apply transform to results.""" + if self.random_disable(): + return results + + if self.bits is not None: + bits = self.bits + else: + bits = self.random_magnitude() + + # To align timm version, we need to round up to integer here. + bits = ceil(bits) + + img = results['img'] + img_posterized = mmcv.posterize(img, bits=bits) + results['img'] = img_posterized.astype(img.dtype) + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(bits={self.bits}, ' + repr_str += f'prob={self.prob}{self.extra_repr()})' + return repr_str + + +@TRANSFORMS.register_module() +class Contrast(BaseAugTransform): + """Adjust images contrast. + + Args: + magnitude (int | float | None): The magnitude used for adjusting + contrast. A positive magnitude would enhance the contrast and + a negative magnitude would make the image grayer. A magnitude=0 + gives the origin img. If None, generate from ``magnitude_range``, + see :class:`BaseAugTransform`. Defaults to None. + prob (float): The probability for performing contrast adjusting + therefore should be in range [0, 1]. Defaults to 0.5. + random_negative_prob (float): The probability that turns the magnitude + negative, which should be in range [0,1]. Defaults to 0.5. + """ + + def __init__(self, + magnitude: Union[int, float, None] = None, + prob: float = 0.5, + random_negative_prob: float = 0.5, + **kwargs): + super().__init__( + prob=prob, random_negative_prob=random_negative_prob, **kwargs) + assert (magnitude is None) ^ (self.magnitude_range is None), \ + 'Please specify only one of `magnitude` and `magnitude_range`.' + + self.magnitude = magnitude + + def transform(self, results): + """Apply transform to results.""" + if self.random_disable(): + return results + + if self.magnitude is not None: + magnitude = self.random_negative(self.magnitude) + else: + magnitude = self.random_negative(self.random_magnitude()) + + img = results['img'] + img_contrasted = mmcv.adjust_contrast(img, factor=1 + magnitude) + results['img'] = img_contrasted.astype(img.dtype) + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(magnitude={self.magnitude}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'random_negative_prob={self.random_negative_prob}' + repr_str += f'{self.extra_repr()})' + return repr_str + + +@TRANSFORMS.register_module() +class ColorTransform(BaseAugTransform): + """Adjust images color balance. + + Args: + magnitude (int | float | None): The magnitude used for color transform. + A positive magnitude would enhance the color and a negative + magnitude would make the image grayer. A magnitude=0 gives the + origin img. If None, generate from ``magnitude_range``, see + :class:`BaseAugTransform`. Defaults to None. + prob (float): The probability for performing ColorTransform therefore + should be in range [0, 1]. Defaults to 0.5. + random_negative_prob (float): The probability that turns the magnitude + negative, which should be in range [0,1]. Defaults to 0.5. + **kwargs: Other keyword arguments of :class:`BaseAugTransform`. + """ + + def __init__(self, + magnitude: Union[int, float, None] = None, + prob: float = 0.5, + random_negative_prob: float = 0.5, + **kwargs): + super().__init__( + prob=prob, random_negative_prob=random_negative_prob, **kwargs) + assert (magnitude is None) ^ (self.magnitude_range is None), \ + 'Please specify only one of `magnitude` and `magnitude_range`.' + + self.magnitude = magnitude + + def transform(self, results): + """Apply transform to results.""" + if self.random_disable(): + return results + + if self.magnitude is not None: + magnitude = self.random_negative(self.magnitude) + else: + magnitude = self.random_negative(self.random_magnitude()) + + img = results['img'] + img_color_adjusted = mmcv.adjust_color(img, alpha=1 + magnitude) + results['img'] = img_color_adjusted.astype(img.dtype) + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(magnitude={self.magnitude}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'random_negative_prob={self.random_negative_prob}' + repr_str += f'{self.extra_repr()})' + return repr_str + + +@TRANSFORMS.register_module() +class Brightness(BaseAugTransform): + """Adjust images brightness. + + Args: + magnitude (int | float | None): The magnitude used for adjusting + brightness. A positive magnitude would enhance the brightness and a + negative magnitude would make the image darker. A magnitude=0 gives + the origin img. If None, generate from ``magnitude_range``, see + :class:`BaseAugTransform`. Defaults to None. + prob (float): The probability for performing brightness adjusting + therefore should be in range [0, 1]. Defaults to 0.5. + random_negative_prob (float): The probability that turns the magnitude + negative, which should be in range [0,1]. Defaults to 0.5. + **kwargs: Other keyword arguments of :class:`BaseAugTransform`. + """ + + def __init__(self, + magnitude: Union[int, float, None] = None, + prob: float = 0.5, + random_negative_prob: float = 0.5, + **kwargs): + super().__init__( + prob=prob, random_negative_prob=random_negative_prob, **kwargs) + assert (magnitude is None) ^ (self.magnitude_range is None), \ + 'Please specify only one of `magnitude` and `magnitude_range`.' + + self.magnitude = magnitude + + def transform(self, results): + """Apply transform to results.""" + if self.random_disable(): + return results + + if self.magnitude is not None: + magnitude = self.random_negative(self.magnitude) + else: + magnitude = self.random_negative(self.random_magnitude()) + + img = results['img'] + img_brightened = mmcv.adjust_brightness(img, factor=1 + magnitude) + results['img'] = img_brightened.astype(img.dtype) + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(magnitude={self.magnitude}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'random_negative_prob={self.random_negative_prob}' + repr_str += f'{self.extra_repr()})' + return repr_str + + +@TRANSFORMS.register_module() +class Sharpness(BaseAugTransform): + """Adjust images sharpness. + + Args: + magnitude (int | float | None): The magnitude used for adjusting + sharpness. A positive magnitude would enhance the sharpness and a + negative magnitude would make the image bulr. A magnitude=0 gives + the origin img. If None, generate from ``magnitude_range``, see + :class:`BaseAugTransform`. Defaults to None. + prob (float): The probability for performing sharpness adjusting + therefore should be in range [0, 1]. Defaults to 0.5. + random_negative_prob (float): The probability that turns the magnitude + negative, which should be in range [0,1]. Defaults to 0.5. + **kwargs: Other keyword arguments of :class:`BaseAugTransform`. + """ + + def __init__(self, + magnitude: Union[int, float, None] = None, + prob: float = 0.5, + random_negative_prob: float = 0.5, + **kwargs): + super().__init__( + prob=prob, random_negative_prob=random_negative_prob, **kwargs) + assert (magnitude is None) ^ (self.magnitude_range is None), \ + 'Please specify only one of `magnitude` and `magnitude_range`.' + + self.magnitude = magnitude + + def transform(self, results): + """Apply transform to results.""" + if self.random_disable(): + return results + + if self.magnitude is not None: + magnitude = self.random_negative(self.magnitude) + else: + magnitude = self.random_negative(self.random_magnitude()) + + img = results['img'] + img_sharpened = mmcv.adjust_sharpness(img, factor=1 + magnitude) + results['img'] = img_sharpened.astype(img.dtype) + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(magnitude={self.magnitude}, ' + repr_str += f'prob={self.prob}, ' + repr_str += f'random_negative_prob={self.random_negative_prob}' + repr_str += f'{self.extra_repr()})' + return repr_str + + +@TRANSFORMS.register_module() +class Cutout(BaseAugTransform): + """Cutout images. + + Args: + shape (int | tuple(int) | None): Expected cutout shape (h, w). + If given as a single value, the value will be used for both h and + w. If None, generate from ``magnitude_range``, see + :class:`BaseAugTransform`. Defaults to None. + pad_val (int, Sequence[int]): Pixel pad_val value for constant fill. + If it is a sequence, it must have the same length with the image + channels. Defaults to 128. + prob (float): The probability for performing cutout therefore should + be in range [0, 1]. Defaults to 0.5. + **kwargs: Other keyword arguments of :class:`BaseAugTransform`. + """ + + def __init__(self, + shape: Union[int, Tuple[int], None] = None, + pad_val: Union[int, Sequence[int]] = 128, + prob: float = 0.5, + **kwargs): + super().__init__(prob=prob, random_negative_prob=0., **kwargs) + assert (shape is None) ^ (self.magnitude_range is None), \ + 'Please specify only one of `shape` and `magnitude_range`.' + + self.shape = shape + if isinstance(pad_val, Sequence): + self.pad_val = tuple(pad_val) + else: + self.pad_val = pad_val + + def transform(self, results): + """Apply transform to results.""" + if self.random_disable(): + return results + + if self.shape is not None: + shape = self.shape + else: + shape = int(self.random_magnitude()) + + img = results['img'] + img_cutout = mmcv.cutout(img, shape, pad_val=self.pad_val) + results['img'] = img_cutout.astype(img.dtype) + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(shape={self.shape}, ' + repr_str += f'pad_val={self.pad_val}, ' + repr_str += f'prob={self.prob}{self.extra_repr()})' + return repr_str + + +@TRANSFORMS.register_module() +class GaussianBlur(BaseAugTransform): + """Gaussian blur images. + + Args: + radius (int, float, optional): The blur radius. If None, generate from + ``magnitude_range``, see :class:`BaseAugTransform`. + Defaults to None. + prob (float): The probability for posterizing therefore should be in + range [0, 1]. Defaults to 0.5. + **kwargs: Other keyword arguments of :class:`BaseAugTransform`. + """ + + def __init__(self, + radius: Union[int, float, None] = None, + prob: float = 0.5, + **kwargs): + super().__init__(prob=prob, random_negative_prob=0., **kwargs) + assert (radius is None) ^ (self.magnitude_range is None), \ + 'Please specify only one of `radius` and `magnitude_range`.' + + self.radius = radius + + def transform(self, results): + """Apply transform to results.""" + if self.random_disable(): + return results + + if self.radius is not None: + radius = self.radius + else: + radius = self.random_magnitude() + + img = results['img'] + pil_img = Image.fromarray(img) + pil_img = pil_img.filter(ImageFilter.GaussianBlur(radius=radius)) + results['img'] = np.array(pil_img, dtype=img.dtype) + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(radius={self.radius}, ' + repr_str += f'prob={self.prob}{self.extra_repr()})' + return repr_str + + +# yapf: disable +# flake8: noqa +AUTOAUG_POLICIES = { + # Policy for ImageNet, refers to + # https://github.com/DeepVoltaire/AutoAugment/blame/master/autoaugment.py + 'imagenet': [ + [dict(type='Posterize', bits=4, prob=0.4), dict(type='Rotate', angle=30., prob=0.6)], + [dict(type='Solarize', thr=256 / 9 * 4, prob=0.6), dict(type='AutoContrast', prob=0.6)], + [dict(type='Equalize', prob=0.8), dict(type='Equalize', prob=0.6)], + [dict(type='Posterize', bits=5, prob=0.6), dict(type='Posterize', bits=5, prob=0.6)], + [dict(type='Equalize', prob=0.4), dict(type='Solarize', thr=256 / 9 * 5, prob=0.2)], + [dict(type='Equalize', prob=0.4), dict(type='Rotate', angle=30 / 9 * 8, prob=0.8)], + [dict(type='Solarize', thr=256 / 9 * 6, prob=0.6), dict(type='Equalize', prob=0.6)], + [dict(type='Posterize', bits=6, prob=0.8), dict(type='Equalize', prob=1.)], + [dict(type='Rotate', angle=10., prob=0.2), dict(type='Solarize', thr=256 / 9, prob=0.6)], + [dict(type='Equalize', prob=0.6), dict(type='Posterize', bits=5, prob=0.4)], + [dict(type='Rotate', angle=30 / 9 * 8, prob=0.8), dict(type='ColorTransform', magnitude=0., prob=0.4)], + [dict(type='Rotate', angle=30., prob=0.4), dict(type='Equalize', prob=0.6)], + [dict(type='Equalize', prob=0.0), dict(type='Equalize', prob=0.8)], + [dict(type='Invert', prob=0.6), dict(type='Equalize', prob=1.)], + [dict(type='ColorTransform', magnitude=0.4, prob=0.6), dict(type='Contrast', magnitude=0.8, prob=1.)], + [dict(type='Rotate', angle=30 / 9 * 8, prob=0.8), dict(type='ColorTransform', magnitude=0.2, prob=1.)], + [dict(type='ColorTransform', magnitude=0.8, prob=0.8), dict(type='Solarize', thr=256 / 9 * 2, prob=0.8)], + [dict(type='Sharpness', magnitude=0.7, prob=0.4), dict(type='Invert', prob=0.6)], + [dict(type='Shear', magnitude=0.3 / 9 * 5, prob=0.6, direction='horizontal'), dict(type='Equalize', prob=1.)], + [dict(type='ColorTransform', magnitude=0., prob=0.4), dict(type='Equalize', prob=0.6)], + [dict(type='Equalize', prob=0.4), dict(type='Solarize', thr=256 / 9 * 5, prob=0.2)], + [dict(type='Solarize', thr=256 / 9 * 4, prob=0.6), dict(type='AutoContrast', prob=0.6)], + [dict(type='Invert', prob=0.6), dict(type='Equalize', prob=1.)], + [dict(type='ColorTransform', magnitude=0.4, prob=0.6), dict(type='Contrast', magnitude=0.8, prob=1.)], + [dict(type='Equalize', prob=0.8), dict(type='Equalize', prob=0.6)], + ], +} + +RANDAUG_POLICIES = { + # Refers to `_RAND_INCREASING_TRANSFORMS` in pytorch-image-models + 'timm_increasing': [ + dict(type='AutoContrast'), + dict(type='Equalize'), + dict(type='Invert'), + dict(type='Rotate', magnitude_range=(0, 30)), + dict(type='Posterize', magnitude_range=(4, 0)), + dict(type='Solarize', magnitude_range=(256, 0)), + dict(type='SolarizeAdd', magnitude_range=(0, 110)), + dict(type='ColorTransform', magnitude_range=(0, 0.9)), + dict(type='Contrast', magnitude_range=(0, 0.9)), + dict(type='Brightness', magnitude_range=(0, 0.9)), + dict(type='Sharpness', magnitude_range=(0, 0.9)), + dict(type='Shear', magnitude_range=(0, 0.3), direction='horizontal'), + dict(type='Shear', magnitude_range=(0, 0.3), direction='vertical'), + dict(type='Translate', magnitude_range=(0, 0.45), direction='horizontal'), + dict(type='Translate', magnitude_range=(0, 0.45), direction='vertical'), + ], + 'simple_increasing': [ + dict(type='AutoContrast'), + dict(type='Equalize'), + dict(type='Rotate', magnitude_range=(0, 30)), + dict(type='Shear', magnitude_range=(0, 0.3), direction='horizontal'), + dict(type='Shear', magnitude_range=(0, 0.3), direction='vertical'), + ], +} diff --git a/mmpretrain/datasets/transforms/formatting.py b/mmpretrain/datasets/transforms/formatting.py new file mode 100644 index 0000000..e4d3316 --- /dev/null +++ b/mmpretrain/datasets/transforms/formatting.py @@ -0,0 +1,353 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections import defaultdict +from collections.abc import Sequence + +import cv2 +import numpy as np +import torch +import torchvision.transforms.functional as F +from mmcv.transforms import BaseTransform +from mmengine.utils import is_str +from PIL import Image + +from mmpretrain.registry import TRANSFORMS +from mmpretrain.structures import DataSample, MultiTaskDataSample + + +def to_tensor(data): + """Convert objects of various python types to :obj:`torch.Tensor`. + + Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, + :class:`Sequence`, :class:`int` and :class:`float`. + """ + if isinstance(data, torch.Tensor): + return data + elif isinstance(data, np.ndarray): + return torch.from_numpy(data) + elif isinstance(data, Sequence) and not is_str(data): + return torch.tensor(data) + elif isinstance(data, int): + return torch.LongTensor([data]) + elif isinstance(data, float): + return torch.FloatTensor([data]) + else: + raise TypeError( + f'Type {type(data)} cannot be converted to tensor.' + 'Supported types are: `numpy.ndarray`, `torch.Tensor`, ' + '`Sequence`, `int` and `float`') + + +@TRANSFORMS.register_module() +class PackInputs(BaseTransform): + """Pack the inputs data. + + **Required Keys:** + + - ``input_key`` + - ``*algorithm_keys`` + - ``*meta_keys`` + + **Deleted Keys:** + + All other keys in the dict. + + **Added Keys:** + + - inputs (:obj:`torch.Tensor`): The forward data of models. + - data_samples (:obj:`~mmpretrain.structures.DataSample`): The + annotation info of the sample. + + Args: + input_key (str): The key of element to feed into the model forwarding. + Defaults to 'img'. + algorithm_keys (Sequence[str]): The keys of custom elements to be used + in the algorithm. Defaults to an empty tuple. + meta_keys (Sequence[str]): The keys of meta information to be saved in + the data sample. Defaults to :attr:`PackInputs.DEFAULT_META_KEYS`. + + .. admonition:: Default algorithm keys + + Besides the specified ``algorithm_keys``, we will set some default keys + into the output data sample and do some formatting. Therefore, you + don't need to set these keys in the ``algorithm_keys``. + + - ``gt_label``: The ground-truth label. The value will be converted + into a 1-D tensor. + - ``gt_score``: The ground-truth score. The value will be converted + into a 1-D tensor. + - ``mask``: The mask for some self-supervise tasks. The value will + be converted into a tensor. + + .. admonition:: Default meta keys + + - ``sample_idx``: The id of the image sample. + - ``img_path``: The path to the image file. + - ``ori_shape``: The original shape of the image as a tuple (H, W). + - ``img_shape``: The shape of the image after the pipeline as a + tuple (H, W). + - ``scale_factor``: The scale factor between the resized image and + the original image. + - ``flip``: A boolean indicating if image flip transform was used. + - ``flip_direction``: The flipping direction. + """ + + DEFAULT_META_KEYS = ('sample_idx', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor', 'flip', 'flip_direction') + + def __init__(self, + input_key='img', + algorithm_keys=(), + meta_keys=DEFAULT_META_KEYS): + self.input_key = input_key + self.algorithm_keys = algorithm_keys + self.meta_keys = meta_keys + + @staticmethod + def format_input(input_): + if isinstance(input_, list): + return [PackInputs.format_input(item) for item in input_] + elif isinstance(input_, np.ndarray): + if input_.ndim == 2: # For grayscale image. + input_ = np.expand_dims(input_, -1) + if input_.ndim == 3 and not input_.flags.c_contiguous: + input_ = np.ascontiguousarray(input_.transpose(2, 0, 1)) + input_ = to_tensor(input_) + elif input_.ndim == 3: + # convert to tensor first to accelerate, see + # https://github.com/open-mmlab/mmdetection/pull/9533 + input_ = to_tensor(input_).permute(2, 0, 1).contiguous() + else: + # convert input with other shape to tensor without permute, + # like video input (num_crops, C, T, H, W). + input_ = to_tensor(input_) + elif isinstance(input_, Image.Image): + input_ = F.pil_to_tensor(input_) + elif not isinstance(input_, torch.Tensor): + raise TypeError(f'Unsupported input type {type(input_)}.') + + return input_ + + def transform(self, results: dict) -> dict: + """Method to pack the input data.""" + + packed_results = dict() + if self.input_key in results: + input_ = results[self.input_key] + packed_results['inputs'] = self.format_input(input_) + + data_sample = DataSample() + + # Set default keys + if 'gt_label' in results: + data_sample.set_gt_label(results['gt_label']) + if 'gt_score' in results: + data_sample.set_gt_score(results['gt_score']) + if 'mask' in results: + data_sample.set_mask(results['mask']) + + # Set custom algorithm keys + for key in self.algorithm_keys: + if key in results: + data_sample.set_field(results[key], key) + + # Set meta keys + for key in self.meta_keys: + if key in results: + data_sample.set_field(results[key], key, field_type='metainfo') + + packed_results['data_samples'] = data_sample + return packed_results + + def __repr__(self) -> str: + repr_str = self.__class__.__name__ + repr_str += f"(input_key='{self.input_key}', " + repr_str += f'algorithm_keys={self.algorithm_keys}, ' + repr_str += f'meta_keys={self.meta_keys})' + return repr_str + + +@TRANSFORMS.register_module() +class PackMultiTaskInputs(BaseTransform): + """Convert all image labels of multi-task dataset to a dict of tensor. + + Args: + multi_task_fields (Sequence[str]): + input_key (str): + task_handlers (dict): + """ + + def __init__(self, + multi_task_fields, + input_key='img', + task_handlers=dict()): + self.multi_task_fields = multi_task_fields + self.input_key = input_key + self.task_handlers = defaultdict(PackInputs) + for task_name, task_handler in task_handlers.items(): + self.task_handlers[task_name] = TRANSFORMS.build(task_handler) + + def transform(self, results: dict) -> dict: + """Method to pack the input data. + + result = {'img_path': 'a.png', 'gt_label': {'task1': 1, 'task3': 3}, + 'img': array([[[ 0, 0, 0]) + """ + packed_results = dict() + results = results.copy() + + if self.input_key in results: + input_ = results[self.input_key] + packed_results['inputs'] = PackInputs.format_input(input_) + + task_results = defaultdict(dict) + for field in self.multi_task_fields: + if field in results: + value = results.pop(field) + for k, v in value.items(): + task_results[k].update({field: v}) + + data_sample = MultiTaskDataSample() + for task_name, task_result in task_results.items(): + task_handler = self.task_handlers[task_name] + task_pack_result = task_handler({**results, **task_result}) + data_sample.set_field(task_pack_result['data_samples'], task_name) + + packed_results['data_samples'] = data_sample + return packed_results + + def __repr__(self): + repr = self.__class__.__name__ + task_handlers = ', '.join( + f"'{name}': {handler.__class__.__name__}" + for name, handler in self.task_handlers.items()) + repr += f'(multi_task_fields={self.multi_task_fields}, ' + repr += f"input_key='{self.input_key}', " + repr += f'task_handlers={{{task_handlers}}})' + return repr + + +@TRANSFORMS.register_module() +class Transpose(BaseTransform): + """Transpose numpy array. + + **Required Keys:** + + - ``*keys`` + + **Modified Keys:** + + - ``*keys`` + + Args: + keys (List[str]): The fields to convert to tensor. + order (List[int]): The output dimensions order. + """ + + def __init__(self, keys, order): + self.keys = keys + self.order = order + + def transform(self, results): + """Method to transpose array.""" + for key in self.keys: + results[key] = results[key].transpose(self.order) + return results + + def __repr__(self): + return self.__class__.__name__ + \ + f'(keys={self.keys}, order={self.order})' + + +@TRANSFORMS.register_module(('NumpyToPIL', 'ToPIL')) +class NumpyToPIL(BaseTransform): + """Convert the image from OpenCV format to :obj:`PIL.Image.Image`. + + **Required Keys:** + + - ``img`` + + **Modified Keys:** + + - ``img`` + + Args: + to_rgb (bool): Whether to convert img to rgb. Defaults to True. + """ + + def __init__(self, to_rgb: bool = False) -> None: + self.to_rgb = to_rgb + + def transform(self, results: dict) -> dict: + """Method to convert images to :obj:`PIL.Image.Image`.""" + img = results['img'] + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) if self.to_rgb else img + + results['img'] = Image.fromarray(img) + return results + + def __repr__(self) -> str: + return self.__class__.__name__ + f'(to_rgb={self.to_rgb})' + + +@TRANSFORMS.register_module(('PILToNumpy', 'ToNumpy')) +class PILToNumpy(BaseTransform): + """Convert img to :obj:`numpy.ndarray`. + + **Required Keys:** + + - ``img`` + + **Modified Keys:** + + - ``img`` + + Args: + to_bgr (bool): Whether to convert img to rgb. Defaults to True. + dtype (str, optional): The dtype of the converted numpy array. + Defaults to None. + """ + + def __init__(self, to_bgr: bool = False, dtype=None) -> None: + self.to_bgr = to_bgr + self.dtype = dtype + + def transform(self, results: dict) -> dict: + """Method to convert img to :obj:`numpy.ndarray`.""" + img = np.array(results['img'], dtype=self.dtype) + img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) if self.to_bgr else img + + results['img'] = img + return results + + def __repr__(self) -> str: + return self.__class__.__name__ + \ + f'(to_bgr={self.to_bgr}, dtype={self.dtype})' + + +@TRANSFORMS.register_module() +class Collect(BaseTransform): + """Collect and only reserve the specified fields. + + **Required Keys:** + + - ``*keys`` + + **Deleted Keys:** + + All keys except those in the argument ``*keys``. + + Args: + keys (Sequence[str]): The keys of the fields to be collected. + """ + + def __init__(self, keys): + self.keys = keys + + def transform(self, results): + data = {} + for key in self.keys: + data[key] = results[key] + return data + + def __repr__(self): + return self.__class__.__name__ + f'(keys={self.keys})' diff --git a/mmpretrain/datasets/transforms/processing.py b/mmpretrain/datasets/transforms/processing.py new file mode 100644 index 0000000..4c640f6 --- /dev/null +++ b/mmpretrain/datasets/transforms/processing.py @@ -0,0 +1,1795 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import inspect +import math +import numbers +import re +import string +from enum import EnumMeta +from numbers import Number +from typing import Dict, List, Optional, Sequence, Tuple, Union + +import mmcv +import mmengine +import numpy as np +import torch +import torchvision +import torchvision.transforms.functional as F +from mmcv.transforms import BaseTransform +from mmcv.transforms.utils import cache_randomness +from PIL import Image +from torchvision import transforms +from torchvision.transforms.transforms import InterpolationMode + +from mmpretrain.registry import TRANSFORMS + +try: + import albumentations +except ImportError: + albumentations = None + + +def _str_to_torch_dtype(t: str): + """mapping str format dtype to torch.dtype.""" + import torch # noqa: F401,F403 + return eval(f'torch.{t}') + + +def _interpolation_modes_from_str(t: str): + """mapping str format to Interpolation.""" + t = t.lower() + inverse_modes_mapping = { + 'nearest': InterpolationMode.NEAREST, + 'bilinear': InterpolationMode.BILINEAR, + 'bicubic': InterpolationMode.BICUBIC, + 'box': InterpolationMode.BOX, + 'hammimg': InterpolationMode.HAMMING, + 'lanczos': InterpolationMode.LANCZOS, + } + return inverse_modes_mapping[t] + + +class TorchVisonTransformWrapper: + + def __init__(self, transform, *args, **kwargs): + if 'interpolation' in kwargs and isinstance(kwargs['interpolation'], + str): + kwargs['interpolation'] = _interpolation_modes_from_str( + kwargs['interpolation']) + if 'dtype' in kwargs and isinstance(kwargs['dtype'], str): + kwargs['dtype'] = _str_to_torch_dtype(kwargs['dtype']) + self.t = transform(*args, **kwargs) + + def __call__(self, results): + results['img'] = self.t(results['img']) + return results + + def __repr__(self) -> str: + return f'TorchVision{repr(self.t)}' + + +def register_vision_transforms() -> List[str]: + """Register transforms in ``torchvision.transforms`` to the ``TRANSFORMS`` + registry. + + Returns: + List[str]: A list of registered transforms' name. + """ + vision_transforms = [] + for module_name in dir(torchvision.transforms): + if not re.match('[A-Z]', module_name): + # must startswith a capital letter + continue + _transform = getattr(torchvision.transforms, module_name) + if inspect.isclass(_transform) and callable( + _transform) and not isinstance(_transform, (EnumMeta)): + from functools import partial + TRANSFORMS.register_module( + module=partial( + TorchVisonTransformWrapper, transform=_transform), + name=f'torchvision/{module_name}') + vision_transforms.append(f'torchvision/{module_name}') + return vision_transforms + + +# register all the transforms in torchvision by using a transform wrapper +VISION_TRANSFORMS = register_vision_transforms() + + +@TRANSFORMS.register_module() +class RandomCrop(BaseTransform): + """Crop the given Image at a random location. + + **Required Keys:** + + - img + + **Modified Keys:** + + - img + - img_shape + + Args: + crop_size (int | Sequence): Desired output size of the crop. If + crop_size is an int instead of sequence like (h, w), a square crop + (crop_size, crop_size) is made. + padding (int | Sequence, optional): Optional padding on each border + of the image. If a sequence of length 4 is provided, it is used to + pad left, top, right, bottom borders respectively. If a sequence + of length 2 is provided, it is used to pad left/right, top/bottom + borders, respectively. Default: None, which means no padding. + pad_if_needed (bool): It will pad the image if smaller than the + desired size to avoid raising an exception. Since cropping is done + after padding, the padding seems to be done at a random offset. + Default: False. + pad_val (Number | Sequence[Number]): Pixel pad_val value for constant + fill. If a tuple of length 3, it is used to pad_val R, G, B + channels respectively. Default: 0. + padding_mode (str): Type of padding. Defaults to "constant". Should + be one of the following: + + - ``constant``: Pads with a constant value, this value is specified + with pad_val. + - ``edge``: pads with the last value at the edge of the image. + - ``reflect``: Pads with reflection of image without repeating the + last value on the edge. For example, padding [1, 2, 3, 4] + with 2 elements on both sides in reflect mode will result + in [3, 2, 1, 2, 3, 4, 3, 2]. + - ``symmetric``: Pads with reflection of image repeating the last + value on the edge. For example, padding [1, 2, 3, 4] with + 2 elements on both sides in symmetric mode will result in + [2, 1, 1, 2, 3, 4, 4, 3]. + """ + + def __init__(self, + crop_size: Union[Sequence, int], + padding: Optional[Union[Sequence, int]] = None, + pad_if_needed: bool = False, + pad_val: Union[Number, Sequence[Number]] = 0, + padding_mode: str = 'constant'): + if isinstance(crop_size, Sequence): + assert len(crop_size) == 2 + assert crop_size[0] > 0 and crop_size[1] > 0 + self.crop_size = crop_size + else: + assert crop_size > 0 + self.crop_size = (crop_size, crop_size) + # check padding mode + assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric'] + self.padding = padding + self.pad_if_needed = pad_if_needed + self.pad_val = pad_val + self.padding_mode = padding_mode + + @cache_randomness + def rand_crop_params(self, img: np.ndarray): + """Get parameters for ``crop`` for a random crop. + + Args: + img (ndarray): Image to be cropped. + + Returns: + tuple: Params (offset_h, offset_w, target_h, target_w) to be + passed to ``crop`` for random crop. + """ + h, w = img.shape[:2] + target_h, target_w = self.crop_size + if w == target_w and h == target_h: + return 0, 0, h, w + elif w < target_w or h < target_h: + target_w = min(w, target_w) + target_h = min(h, target_h) + + offset_h = np.random.randint(0, h - target_h + 1) + offset_w = np.random.randint(0, w - target_w + 1) + + return offset_h, offset_w, target_h, target_w + + def transform(self, results: dict) -> dict: + """Transform function to randomly crop images. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Randomly cropped results, 'img_shape' + key in result dict is updated according to crop size. + """ + img = results['img'] + if self.padding is not None: + img = mmcv.impad(img, padding=self.padding, pad_val=self.pad_val) + + # pad img if needed + if self.pad_if_needed: + h_pad = math.ceil(max(0, self.crop_size[0] - img.shape[0]) / 2) + w_pad = math.ceil(max(0, self.crop_size[1] - img.shape[1]) / 2) + + img = mmcv.impad( + img, + padding=(w_pad, h_pad, w_pad, h_pad), + pad_val=self.pad_val, + padding_mode=self.padding_mode) + + offset_h, offset_w, target_h, target_w = self.rand_crop_params(img) + img = mmcv.imcrop( + img, + np.array([ + offset_w, + offset_h, + offset_w + target_w - 1, + offset_h + target_h - 1, + ])) + results['img'] = img + results['img_shape'] = img.shape + + return results + + def __repr__(self): + """Print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = self.__class__.__name__ + f'(crop_size={self.crop_size}' + repr_str += f', padding={self.padding}' + repr_str += f', pad_if_needed={self.pad_if_needed}' + repr_str += f', pad_val={self.pad_val}' + repr_str += f', padding_mode={self.padding_mode})' + return repr_str + + +@TRANSFORMS.register_module() +class RandomResizedCrop(BaseTransform): + """Crop the given image to random scale and aspect ratio. + + A crop of random size (default: of 0.08 to 1.0) of the original size and a + random aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio + is made. This crop is finally resized to given size. + + **Required Keys:** + + - img + + **Modified Keys:** + + - img + - img_shape + + Args: + scale (sequence | int): Desired output scale of the crop. If size is an + int instead of sequence like (h, w), a square crop (size, size) is + made. + crop_ratio_range (tuple): Range of the random size of the cropped + image compared to the original image. Defaults to (0.08, 1.0). + aspect_ratio_range (tuple): Range of the random aspect ratio of the + cropped image compared to the original image. + Defaults to (3. / 4., 4. / 3.). + max_attempts (int): Maximum number of attempts before falling back to + Central Crop. Defaults to 10. + interpolation (str): Interpolation method, accepted values are + 'nearest', 'bilinear', 'bicubic', 'area', 'lanczos'. Defaults to + 'bilinear'. + backend (str): The image resize backend type, accepted values are + 'cv2' and 'pillow'. Defaults to 'cv2'. + """ + + def __init__(self, + scale: Union[Sequence, int], + crop_ratio_range: Tuple[float, float] = (0.08, 1.0), + aspect_ratio_range: Tuple[float, float] = (3. / 4., 4. / 3.), + max_attempts: int = 10, + interpolation: str = 'bilinear', + backend: str = 'cv2') -> None: + if isinstance(scale, Sequence): + assert len(scale) == 2 + assert scale[0] > 0 and scale[1] > 0 + self.scale = scale + else: + assert scale > 0 + self.scale = (scale, scale) + if (crop_ratio_range[0] > crop_ratio_range[1]) or ( + aspect_ratio_range[0] > aspect_ratio_range[1]): + raise ValueError( + 'range should be of kind (min, max). ' + f'But received crop_ratio_range {crop_ratio_range} ' + f'and aspect_ratio_range {aspect_ratio_range}.') + assert isinstance(max_attempts, int) and max_attempts >= 0, \ + 'max_attempts mush be int and no less than 0.' + assert interpolation in ('nearest', 'bilinear', 'bicubic', 'area', + 'lanczos') + + self.crop_ratio_range = crop_ratio_range + self.aspect_ratio_range = aspect_ratio_range + self.max_attempts = max_attempts + self.interpolation = interpolation + self.backend = backend + + @cache_randomness + def rand_crop_params(self, img: np.ndarray) -> Tuple[int, int, int, int]: + """Get parameters for ``crop`` for a random sized crop. + + Args: + img (ndarray): Image to be cropped. + + Returns: + tuple: Params (offset_h, offset_w, target_h, target_w) to be + passed to `crop` for a random sized crop. + """ + h, w = img.shape[:2] + area = h * w + + for _ in range(self.max_attempts): + target_area = np.random.uniform(*self.crop_ratio_range) * area + log_ratio = (math.log(self.aspect_ratio_range[0]), + math.log(self.aspect_ratio_range[1])) + aspect_ratio = math.exp(np.random.uniform(*log_ratio)) + target_w = int(round(math.sqrt(target_area * aspect_ratio))) + target_h = int(round(math.sqrt(target_area / aspect_ratio))) + + if 0 < target_w <= w and 0 < target_h <= h: + offset_h = np.random.randint(0, h - target_h + 1) + offset_w = np.random.randint(0, w - target_w + 1) + + return offset_h, offset_w, target_h, target_w + + # Fallback to central crop + in_ratio = float(w) / float(h) + if in_ratio < min(self.aspect_ratio_range): + target_w = w + target_h = int(round(target_w / min(self.aspect_ratio_range))) + elif in_ratio > max(self.aspect_ratio_range): + target_h = h + target_w = int(round(target_h * max(self.aspect_ratio_range))) + else: # whole image + target_w = w + target_h = h + offset_h = (h - target_h) // 2 + offset_w = (w - target_w) // 2 + return offset_h, offset_w, target_h, target_w + + def transform(self, results: dict) -> dict: + """Transform function to randomly resized crop images. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Randomly resized cropped results, 'img_shape' + key in result dict is updated according to crop size. + """ + img = results['img'] + offset_h, offset_w, target_h, target_w = self.rand_crop_params(img) + img = mmcv.imcrop( + img, + bboxes=np.array([ + offset_w, offset_h, offset_w + target_w - 1, + offset_h + target_h - 1 + ])) + img = mmcv.imresize( + img, + tuple(self.scale[::-1]), + interpolation=self.interpolation, + backend=self.backend) + results['img'] = img + results['img_shape'] = img.shape + + return results + + def __repr__(self): + """Print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = self.__class__.__name__ + f'(scale={self.scale}' + repr_str += ', crop_ratio_range=' + repr_str += f'{tuple(round(s, 4) for s in self.crop_ratio_range)}' + repr_str += ', aspect_ratio_range=' + repr_str += f'{tuple(round(r, 4) for r in self.aspect_ratio_range)}' + repr_str += f', max_attempts={self.max_attempts}' + repr_str += f', interpolation={self.interpolation}' + repr_str += f', backend={self.backend})' + return repr_str + + +@TRANSFORMS.register_module() +class EfficientNetRandomCrop(RandomResizedCrop): + """EfficientNet style RandomResizedCrop. + + **Required Keys:** + + - img + + **Modified Keys:** + + - img + - img_shape + + Args: + scale (int): Desired output scale of the crop. Only int size is + accepted, a square crop (size, size) is made. + min_covered (Number): Minimum ratio of the cropped area to the original + area. Defaults to 0.1. + crop_padding (int): The crop padding parameter in efficientnet style + center crop. Defaults to 32. + crop_ratio_range (tuple): Range of the random size of the cropped + image compared to the original image. Defaults to (0.08, 1.0). + aspect_ratio_range (tuple): Range of the random aspect ratio of the + cropped image compared to the original image. + Defaults to (3. / 4., 4. / 3.). + max_attempts (int): Maximum number of attempts before falling back to + Central Crop. Defaults to 10. + interpolation (str): Interpolation method, accepted values are + 'nearest', 'bilinear', 'bicubic', 'area', 'lanczos'. Defaults to + 'bicubic'. + backend (str): The image resize backend type, accepted values are + 'cv2' and 'pillow'. Defaults to 'cv2'. + """ + + def __init__(self, + scale: int, + min_covered: float = 0.1, + crop_padding: int = 32, + interpolation: str = 'bicubic', + **kwarg): + assert isinstance(scale, int) + super().__init__(scale, interpolation=interpolation, **kwarg) + assert min_covered >= 0, 'min_covered should be no less than 0.' + assert crop_padding >= 0, 'crop_padding should be no less than 0.' + + self.min_covered = min_covered + self.crop_padding = crop_padding + + # https://github.com/kakaobrain/fast-autoaugment/blob/master/FastAutoAugment/data.py # noqa + @cache_randomness + def rand_crop_params(self, img: np.ndarray) -> Tuple[int, int, int, int]: + """Get parameters for ``crop`` for a random sized crop. + + Args: + img (ndarray): Image to be cropped. + + Returns: + tuple: Params (offset_h, offset_w, target_h, target_w) to be + passed to `crop` for a random sized crop. + """ + h, w = img.shape[:2] + area = h * w + min_target_area = self.crop_ratio_range[0] * area + max_target_area = self.crop_ratio_range[1] * area + + for _ in range(self.max_attempts): + aspect_ratio = np.random.uniform(*self.aspect_ratio_range) + min_target_h = int( + round(math.sqrt(min_target_area / aspect_ratio))) + max_target_h = int( + round(math.sqrt(max_target_area / aspect_ratio))) + + if max_target_h * aspect_ratio > w: + max_target_h = int((w + 0.5 - 1e-7) / aspect_ratio) + if max_target_h * aspect_ratio > w: + max_target_h -= 1 + + max_target_h = min(max_target_h, h) + min_target_h = min(max_target_h, min_target_h) + + # slightly differs from tf implementation + target_h = int( + round(np.random.uniform(min_target_h, max_target_h))) + target_w = int(round(target_h * aspect_ratio)) + target_area = target_h * target_w + + # slight differs from tf. In tf, if target_area > max_target_area, + # area will be recalculated + if (target_area < min_target_area or target_area > max_target_area + or target_w > w or target_h > h + or target_area < self.min_covered * area): + continue + + offset_h = np.random.randint(0, h - target_h + 1) + offset_w = np.random.randint(0, w - target_w + 1) + + return offset_h, offset_w, target_h, target_w + + # Fallback to central crop + img_short = min(h, w) + crop_size = self.scale[0] / (self.scale[0] + + self.crop_padding) * img_short + + offset_h = max(0, int(round((h - crop_size) / 2.))) + offset_w = max(0, int(round((w - crop_size) / 2.))) + return offset_h, offset_w, crop_size, crop_size + + def __repr__(self): + """Print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = super().__repr__()[:-1] + repr_str += f', min_covered={self.min_covered}' + repr_str += f', crop_padding={self.crop_padding})' + return repr_str + + +@TRANSFORMS.register_module() +class RandomErasing(BaseTransform): + """Randomly selects a rectangle region in an image and erase pixels. + + **Required Keys:** + + - img + + **Modified Keys:** + + - img + + Args: + erase_prob (float): Probability that image will be randomly erased. + Default: 0.5 + min_area_ratio (float): Minimum erased area / input image area + Default: 0.02 + max_area_ratio (float): Maximum erased area / input image area + Default: 0.4 + aspect_range (sequence | float): Aspect ratio range of erased area. + if float, it will be converted to (aspect_ratio, 1/aspect_ratio) + Default: (3/10, 10/3) + mode (str): Fill method in erased area, can be: + + - const (default): All pixels are assign with the same value. + - rand: each pixel is assigned with a random value in [0, 255] + + fill_color (sequence | Number): Base color filled in erased area. + Defaults to (128, 128, 128). + fill_std (sequence | Number, optional): If set and ``mode`` is 'rand', + fill erased area with random color from normal distribution + (mean=fill_color, std=fill_std); If not set, fill erased area with + random color from uniform distribution (0~255). Defaults to None. + + Note: + See `Random Erasing Data Augmentation + `_ + + This paper provided 4 modes: RE-R, RE-M, RE-0, RE-255, and use RE-M as + default. The config of these 4 modes are: + + - RE-R: RandomErasing(mode='rand') + - RE-M: RandomErasing(mode='const', fill_color=(123.67, 116.3, 103.5)) + - RE-0: RandomErasing(mode='const', fill_color=0) + - RE-255: RandomErasing(mode='const', fill_color=255) + """ + + def __init__(self, + erase_prob=0.5, + min_area_ratio=0.02, + max_area_ratio=0.4, + aspect_range=(3 / 10, 10 / 3), + mode='const', + fill_color=(128, 128, 128), + fill_std=None): + assert isinstance(erase_prob, float) and 0. <= erase_prob <= 1. + assert isinstance(min_area_ratio, float) and 0. <= min_area_ratio <= 1. + assert isinstance(max_area_ratio, float) and 0. <= max_area_ratio <= 1. + assert min_area_ratio <= max_area_ratio, \ + 'min_area_ratio should be smaller than max_area_ratio' + if isinstance(aspect_range, float): + aspect_range = min(aspect_range, 1 / aspect_range) + aspect_range = (aspect_range, 1 / aspect_range) + assert isinstance(aspect_range, Sequence) and len(aspect_range) == 2 \ + and all(isinstance(x, float) for x in aspect_range), \ + 'aspect_range should be a float or Sequence with two float.' + assert all(x > 0 for x in aspect_range), \ + 'aspect_range should be positive.' + assert aspect_range[0] <= aspect_range[1], \ + 'In aspect_range (min, max), min should be smaller than max.' + assert mode in ['const', 'rand'], \ + 'Please select `mode` from ["const", "rand"].' + if isinstance(fill_color, Number): + fill_color = [fill_color] * 3 + assert isinstance(fill_color, Sequence) and len(fill_color) == 3 \ + and all(isinstance(x, Number) for x in fill_color), \ + 'fill_color should be a float or Sequence with three int.' + if fill_std is not None: + if isinstance(fill_std, Number): + fill_std = [fill_std] * 3 + assert isinstance(fill_std, Sequence) and len(fill_std) == 3 \ + and all(isinstance(x, Number) for x in fill_std), \ + 'fill_std should be a float or Sequence with three int.' + + self.erase_prob = erase_prob + self.min_area_ratio = min_area_ratio + self.max_area_ratio = max_area_ratio + self.aspect_range = aspect_range + self.mode = mode + self.fill_color = fill_color + self.fill_std = fill_std + + def _fill_pixels(self, img, top, left, h, w): + """Fill pixels to the patch of image.""" + if self.mode == 'const': + patch = np.empty((h, w, 3), dtype=np.uint8) + patch[:, :] = np.array(self.fill_color, dtype=np.uint8) + elif self.fill_std is None: + # Uniform distribution + patch = np.random.uniform(0, 256, (h, w, 3)).astype(np.uint8) + else: + # Normal distribution + patch = np.random.normal(self.fill_color, self.fill_std, (h, w, 3)) + patch = np.clip(patch.astype(np.int32), 0, 255).astype(np.uint8) + + img[top:top + h, left:left + w] = patch + return img + + @cache_randomness + def random_disable(self): + """Randomly disable the transform.""" + return np.random.rand() > self.erase_prob + + @cache_randomness + def random_patch(self, img_h, img_w): + """Randomly generate patch the erase.""" + # convert the aspect ratio to log space to equally handle width and + # height. + log_aspect_range = np.log( + np.array(self.aspect_range, dtype=np.float32)) + aspect_ratio = np.exp(np.random.uniform(*log_aspect_range)) + area = img_h * img_w + area *= np.random.uniform(self.min_area_ratio, self.max_area_ratio) + + h = min(int(round(np.sqrt(area * aspect_ratio))), img_h) + w = min(int(round(np.sqrt(area / aspect_ratio))), img_w) + top = np.random.randint(0, img_h - h) if img_h > h else 0 + left = np.random.randint(0, img_w - w) if img_w > w else 0 + return top, left, h, w + + def transform(self, results): + """ + Args: + results (dict): Results dict from pipeline + + Returns: + dict: Results after the transformation. + """ + if self.random_disable(): + return results + + img = results['img'] + img_h, img_w = img.shape[:2] + + img = self._fill_pixels(img, *self.random_patch(img_h, img_w)) + + results['img'] = img + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(erase_prob={self.erase_prob}, ' + repr_str += f'min_area_ratio={self.min_area_ratio}, ' + repr_str += f'max_area_ratio={self.max_area_ratio}, ' + repr_str += f'aspect_range={self.aspect_range}, ' + repr_str += f'mode={self.mode}, ' + repr_str += f'fill_color={self.fill_color}, ' + repr_str += f'fill_std={self.fill_std})' + return repr_str + + +@TRANSFORMS.register_module() +class EfficientNetCenterCrop(BaseTransform): + r"""EfficientNet style center crop. + + **Required Keys:** + + - img + + **Modified Keys:** + + - img + - img_shape + + Args: + crop_size (int): Expected size after cropping with the format + of (h, w). + crop_padding (int): The crop padding parameter in efficientnet style + center crop. Defaults to 32. + interpolation (str): Interpolation method, accepted values are + 'nearest', 'bilinear', 'bicubic', 'area', 'lanczos'. Only valid if + ``efficientnet_style`` is True. Defaults to 'bicubic'. + backend (str): The image resize backend type, accepted values are + `cv2` and `pillow`. Only valid if efficientnet style is True. + Defaults to `cv2`. + Notes: + - If the image is smaller than the crop size, return the original + image. + - The pipeline will be to first + to perform the center crop with the ``crop_size_`` as: + + .. math:: + + \text{crop_size_} = \frac{\text{crop_size}}{\text{crop_size} + + \text{crop_padding}} \times \text{short_edge} + + And then the pipeline resizes the img to the input crop size. + """ + + def __init__(self, + crop_size: int, + crop_padding: int = 32, + interpolation: str = 'bicubic', + backend: str = 'cv2'): + assert isinstance(crop_size, int) + assert crop_size > 0 + assert crop_padding >= 0 + assert interpolation in ('nearest', 'bilinear', 'bicubic', 'area', + 'lanczos') + + self.crop_size = crop_size + self.crop_padding = crop_padding + self.interpolation = interpolation + self.backend = backend + + def transform(self, results: dict) -> dict: + """Transform function to randomly resized crop images. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: EfficientNet style center cropped results, 'img_shape' + key in result dict is updated according to crop size. + """ + img = results['img'] + h, w = img.shape[:2] + + # https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/preprocessing.py#L118 # noqa + img_short = min(h, w) + crop_size = self.crop_size / (self.crop_size + + self.crop_padding) * img_short + + offset_h = max(0, int(round((h - crop_size) / 2.))) + offset_w = max(0, int(round((w - crop_size) / 2.))) + + # crop the image + img = mmcv.imcrop( + img, + bboxes=np.array([ + offset_w, offset_h, offset_w + crop_size - 1, + offset_h + crop_size - 1 + ])) + # resize image + img = mmcv.imresize( + img, (self.crop_size, self.crop_size), + interpolation=self.interpolation, + backend=self.backend) + results['img'] = img + results['img_shape'] = img.shape + + return results + + def __repr__(self): + """Print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = self.__class__.__name__ + f'(crop_size={self.crop_size}' + repr_str += f', crop_padding={self.crop_padding}' + repr_str += f', interpolation={self.interpolation}' + repr_str += f', backend={self.backend})' + return repr_str + + +@TRANSFORMS.register_module() +class ResizeEdge(BaseTransform): + """Resize images along the specified edge. + + **Required Keys:** + + - img + + **Modified Keys:** + + - img + - img_shape + + **Added Keys:** + + - scale + - scale_factor + + Args: + scale (int): The edge scale to resizing. + edge (str): The edge to resize. Defaults to 'short'. + backend (str): Image resize backend, choices are 'cv2' and 'pillow'. + These two backends generates slightly different results. + Defaults to 'cv2'. + interpolation (str): Interpolation method, accepted values are + "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2' + backend, "nearest", "bilinear" for 'pillow' backend. + Defaults to 'bilinear'. + """ + + def __init__(self, + scale: int, + edge: str = 'short', + backend: str = 'cv2', + interpolation: str = 'bilinear') -> None: + allow_edges = ['short', 'long', 'width', 'height'] + assert edge in allow_edges, \ + f'Invalid edge "{edge}", please specify from {allow_edges}.' + self.edge = edge + self.scale = scale + self.backend = backend + self.interpolation = interpolation + + def _resize_img(self, results: dict) -> None: + """Resize images with ``results['scale']``.""" + + img, w_scale, h_scale = mmcv.imresize( + results['img'], + results['scale'], + interpolation=self.interpolation, + return_scale=True, + backend=self.backend) + results['img'] = img + results['img_shape'] = img.shape[:2] + results['scale'] = img.shape[:2][::-1] + results['scale_factor'] = (w_scale, h_scale) + + def transform(self, results: Dict) -> Dict: + """Transform function to resize images. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Resized results, 'img', 'scale', 'scale_factor', + 'img_shape' keys are updated in result dict. + """ + assert 'img' in results, 'No `img` field in the input.' + + h, w = results['img'].shape[:2] + if any([ + # conditions to resize the width + self.edge == 'short' and w < h, + self.edge == 'long' and w > h, + self.edge == 'width', + ]): + width = self.scale + height = int(self.scale * h / w) + else: + height = self.scale + width = int(self.scale * w / h) + results['scale'] = (width, height) + + self._resize_img(results) + return results + + def __repr__(self): + """Print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = self.__class__.__name__ + repr_str += f'(scale={self.scale}, ' + repr_str += f'edge={self.edge}, ' + repr_str += f'backend={self.backend}, ' + repr_str += f'interpolation={self.interpolation})' + return repr_str + + +@TRANSFORMS.register_module() +class ColorJitter(BaseTransform): + """Randomly change the brightness, contrast and saturation of an image. + + Modified from + https://github.com/pytorch/vision/blob/main/torchvision/transforms/transforms.py + Licensed under the BSD 3-Clause License. + + **Required Keys:** + + - img + + **Modified Keys:** + + - img + + Args: + brightness (float | Sequence[float] (min, max)): How much to jitter + brightness. brightness_factor is chosen uniformly from + ``[max(0, 1 - brightness), 1 + brightness]`` or the given + ``[min, max]``. Should be non negative numbers. Defaults to 0. + contrast (float | Sequence[float] (min, max)): How much to jitter + contrast. contrast_factor is chosen uniformly from + ``[max(0, 1 - contrast), 1 + contrast]`` or the given + ``[min, max]``. Should be non negative numbers. Defaults to 0. + saturation (float | Sequence[float] (min, max)): How much to jitter + saturation. saturation_factor is chosen uniformly from + ``[max(0, 1 - saturation), 1 + saturation]`` or the given + ``[min, max]``. Should be non negative numbers. Defaults to 0. + hue (float | Sequence[float] (min, max)): How much to jitter hue. + hue_factor is chosen uniformly from ``[-hue, hue]`` (0 <= hue + <= 0.5) or the given ``[min, max]`` (-0.5 <= min <= max <= 0.5). + Defaults to 0. + backend (str): The backend to operate the image. Defaults to 'pillow' + """ + + def __init__(self, + brightness: Union[float, Sequence[float]] = 0., + contrast: Union[float, Sequence[float]] = 0., + saturation: Union[float, Sequence[float]] = 0., + hue: Union[float, Sequence[float]] = 0., + backend='pillow'): + self.brightness = self._set_range(brightness, 'brightness') + self.contrast = self._set_range(contrast, 'contrast') + self.saturation = self._set_range(saturation, 'saturation') + self.hue = self._set_range(hue, 'hue', center=0, bound=(-0.5, 0.5)) + self.backend = backend + + def _set_range(self, value, name, center=1, bound=(0, float('inf'))): + """Set the range of magnitudes.""" + if isinstance(value, numbers.Number): + if value < 0: + raise ValueError( + f'If {name} is a single number, it must be non negative.') + value = (center - float(value), center + float(value)) + + if isinstance(value, (tuple, list)) and len(value) == 2: + if not bound[0] <= value[0] <= value[1] <= bound[1]: + value = np.clip(value, bound[0], bound[1]) + from mmengine.logging import MMLogger + logger = MMLogger.get_current_instance() + logger.warning(f'ColorJitter {name} values exceed the bound ' + f'{bound}, clipped to the bound.') + else: + raise TypeError(f'{name} should be a single number ' + 'or a list/tuple with length 2.') + + # if value is 0 or (1., 1.) for brightness/contrast/saturation + # or (0., 0.) for hue, do nothing + if value[0] == value[1] == center: + value = None + else: + value = tuple(value) + + return value + + @cache_randomness + def _rand_params(self): + """Get random parameters including magnitudes and indices of + transforms.""" + trans_inds = np.random.permutation(4) + b, c, s, h = (None, ) * 4 + + if self.brightness is not None: + b = np.random.uniform(self.brightness[0], self.brightness[1]) + if self.contrast is not None: + c = np.random.uniform(self.contrast[0], self.contrast[1]) + if self.saturation is not None: + s = np.random.uniform(self.saturation[0], self.saturation[1]) + if self.hue is not None: + h = np.random.uniform(self.hue[0], self.hue[1]) + + return trans_inds, b, c, s, h + + def transform(self, results: Dict) -> Dict: + """Transform function to resize images. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: ColorJitter results, 'img' key is updated in result dict. + """ + img = results['img'] + trans_inds, brightness, contrast, saturation, hue = self._rand_params() + + for index in trans_inds: + if index == 0 and brightness is not None: + img = mmcv.adjust_brightness( + img, brightness, backend=self.backend) + elif index == 1 and contrast is not None: + img = mmcv.adjust_contrast(img, contrast, backend=self.backend) + elif index == 2 and saturation is not None: + img = mmcv.adjust_color( + img, alpha=saturation, backend=self.backend) + elif index == 3 and hue is not None: + img = mmcv.adjust_hue(img, hue, backend=self.backend) + + results['img'] = img + return results + + def __repr__(self): + """Print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = self.__class__.__name__ + repr_str += f'(brightness={self.brightness}, ' + repr_str += f'contrast={self.contrast}, ' + repr_str += f'saturation={self.saturation}, ' + repr_str += f'hue={self.hue})' + return repr_str + + +@TRANSFORMS.register_module() +class Lighting(BaseTransform): + """Adjust images lighting using AlexNet-style PCA jitter. + + **Required Keys:** + + - img + + **Modified Keys:** + + - img + + Args: + eigval (Sequence[float]): the eigenvalue of the convariance matrix + of pixel values, respectively. + eigvec (list[list]): the eigenvector of the convariance matrix of + pixel values, respectively. + alphastd (float): The standard deviation for distribution of alpha. + Defaults to 0.1. + to_rgb (bool): Whether to convert img to rgb. Defaults to False. + """ + + def __init__(self, + eigval: Sequence[float], + eigvec: Sequence[float], + alphastd: float = 0.1, + to_rgb: bool = False): + assert isinstance(eigval, Sequence), \ + f'eigval must be Sequence, got {type(eigval)} instead.' + assert isinstance(eigvec, Sequence), \ + f'eigvec must be Sequence, got {type(eigvec)} instead.' + for vec in eigvec: + assert isinstance(vec, Sequence) and len(vec) == len(eigvec[0]), \ + 'eigvec must contains lists with equal length.' + assert isinstance(alphastd, float), 'alphastd should be of type ' \ + f'float or int, got {type(alphastd)} instead.' + + self.eigval = np.array(eigval) + self.eigvec = np.array(eigvec) + self.alphastd = alphastd + self.to_rgb = to_rgb + + def transform(self, results: Dict) -> Dict: + """Transform function to resize images. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Lightinged results, 'img' key is updated in result dict. + """ + assert 'img' in results, 'No `img` field in the input.' + + img = results['img'] + img_lighting = mmcv.adjust_lighting( + img, + self.eigval, + self.eigvec, + alphastd=self.alphastd, + to_rgb=self.to_rgb) + results['img'] = img_lighting.astype(img.dtype) + return results + + def __repr__(self): + """Print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = self.__class__.__name__ + repr_str += f'(eigval={self.eigval.tolist()}, ' + repr_str += f'eigvec={self.eigvec.tolist()}, ' + repr_str += f'alphastd={self.alphastd}, ' + repr_str += f'to_rgb={self.to_rgb})' + return repr_str + + +# 'Albu' is used in previous versions of mmpretrain, here is for compatibility +# users can use both 'Albumentations' and 'Albu'. +@TRANSFORMS.register_module(['Albumentations', 'Albu']) +class Albumentations(BaseTransform): + """Wrapper to use augmentation from albumentations library. + + **Required Keys:** + + - img + + **Modified Keys:** + + - img + - img_shape + + Adds custom transformations from albumentations library. + More details can be found in + `Albumentations `_. + An example of ``transforms`` is as followed: + + .. code-block:: + + [ + dict( + type='ShiftScaleRotate', + shift_limit=0.0625, + scale_limit=0.0, + rotate_limit=0, + interpolation=1, + p=0.5), + dict( + type='RandomBrightnessContrast', + brightness_limit=[0.1, 0.3], + contrast_limit=[0.1, 0.3], + p=0.2), + dict(type='ChannelShuffle', p=0.1), + dict( + type='OneOf', + transforms=[ + dict(type='Blur', blur_limit=3, p=1.0), + dict(type='MedianBlur', blur_limit=3, p=1.0) + ], + p=0.1), + ] + + Args: + transforms (List[Dict]): List of albumentations transform configs. + keymap (Optional[Dict]): Mapping of mmpretrain to albumentations + fields, in format {'input key':'albumentation-style key'}. + Defaults to None. + + Example: + >>> import mmcv + >>> from mmpretrain.datasets import Albumentations + >>> transforms = [ + ... dict( + ... type='ShiftScaleRotate', + ... shift_limit=0.0625, + ... scale_limit=0.0, + ... rotate_limit=0, + ... interpolation=1, + ... p=0.5), + ... dict( + ... type='RandomBrightnessContrast', + ... brightness_limit=[0.1, 0.3], + ... contrast_limit=[0.1, 0.3], + ... p=0.2), + ... dict(type='ChannelShuffle', p=0.1), + ... dict( + ... type='OneOf', + ... transforms=[ + ... dict(type='Blur', blur_limit=3, p=1.0), + ... dict(type='MedianBlur', blur_limit=3, p=1.0) + ... ], + ... p=0.1), + ... ] + >>> albu = Albumentations(transforms) + >>> data = {'img': mmcv.imread('./demo/demo.JPEG')} + >>> data = albu(data) + >>> print(data['img'].shape) + (375, 500, 3) + """ + + def __init__(self, transforms: List[Dict], keymap: Optional[Dict] = None): + if albumentations is None: + raise RuntimeError('albumentations is not installed') + else: + from albumentations import Compose as albu_Compose + + assert isinstance(transforms, list), 'transforms must be a list.' + if keymap is not None: + assert isinstance(keymap, dict), 'keymap must be None or a dict. ' + + self.transforms = transforms + + self.aug = albu_Compose( + [self.albu_builder(t) for t in self.transforms]) + + if not keymap: + self.keymap_to_albu = dict(img='image') + else: + self.keymap_to_albu = keymap + self.keymap_back = {v: k for k, v in self.keymap_to_albu.items()} + + def albu_builder(self, cfg: Dict): + """Import a module from albumentations. + + It inherits some of :func:`build_from_cfg` logic. + Args: + cfg (dict): Config dict. It should at least contain the key "type". + Returns: + obj: The constructed object. + """ + + assert isinstance(cfg, dict) and 'type' in cfg, 'each item in ' \ + "transforms must be a dict with keyword 'type'." + args = cfg.copy() + + obj_type = args.pop('type') + if mmengine.is_str(obj_type): + obj_cls = getattr(albumentations, obj_type) + elif inspect.isclass(obj_type): + obj_cls = obj_type + else: + raise TypeError( + f'type must be a str or valid type, but got {type(obj_type)}') + + if 'transforms' in args: + args['transforms'] = [ + self.albu_builder(transform) + for transform in args['transforms'] + ] + + return obj_cls(**args) + + @staticmethod + def mapper(d, keymap): + """Dictionary mapper. + + Renames keys according to keymap provided. + Args: + d (dict): old dict + keymap (dict): {'old_key':'new_key'} + Returns: + dict: new dict. + """ + + updated_dict = {} + for k, v in zip(d.keys(), d.values()): + new_k = keymap.get(k, k) + updated_dict[new_k] = d[k] + return updated_dict + + def transform(self, results: Dict) -> Dict: + """Transform function to perform albumentations transforms. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Transformed results, 'img' and 'img_shape' keys are + updated in result dict. + """ + assert 'img' in results, 'No `img` field in the input.' + + # dict to albumentations format + results = self.mapper(results, self.keymap_to_albu) + results = self.aug(**results) + + # back to the original format + results = self.mapper(results, self.keymap_back) + results['img_shape'] = results['img'].shape[:2] + + return results + + def __repr__(self): + """Print the basic information of the transform. + + Returns: + str: Formatted string. + """ + repr_str = self.__class__.__name__ + repr_str += f'(transforms={repr(self.transforms)})' + return repr_str + + +@TRANSFORMS.register_module() +class SimMIMMaskGenerator(BaseTransform): + """Generate random block mask for each Image. + + **Added Keys**: + + - mask + + This module is used in SimMIM to generate masks. + + Args: + input_size (int): Size of input image. Defaults to 192. + mask_patch_size (int): Size of each block mask. Defaults to 32. + model_patch_size (int): Patch size of each token. Defaults to 4. + mask_ratio (float): The mask ratio of image. Defaults to 0.6. + """ + + def __init__(self, + input_size: int = 192, + mask_patch_size: int = 32, + model_patch_size: int = 4, + mask_ratio: float = 0.6): + self.input_size = input_size + self.mask_patch_size = mask_patch_size + self.model_patch_size = model_patch_size + self.mask_ratio = mask_ratio + + assert self.input_size % self.mask_patch_size == 0 + assert self.mask_patch_size % self.model_patch_size == 0 + + self.rand_size = self.input_size // self.mask_patch_size + self.scale = self.mask_patch_size // self.model_patch_size + + self.token_count = self.rand_size**2 + self.mask_count = int(np.ceil(self.token_count * self.mask_ratio)) + + def transform(self, results: dict) -> dict: + """Method to generate random block mask for each Image in SimMIM. + + Args: + results (dict): Result dict from previous pipeline. + + Returns: + dict: Result dict with added key ``mask``. + """ + mask_idx = np.random.permutation(self.token_count)[:self.mask_count] + mask = np.zeros(self.token_count, dtype=int) + mask[mask_idx] = 1 + + mask = mask.reshape((self.rand_size, self.rand_size)) + mask = mask.repeat(self.scale, axis=0).repeat(self.scale, axis=1) + + results.update({'mask': mask}) + + return results + + def __repr__(self) -> str: + repr_str = self.__class__.__name__ + repr_str += f'(input_size={self.input_size}, ' + repr_str += f'mask_patch_size={self.mask_patch_size}, ' + repr_str += f'model_patch_size={self.model_patch_size}, ' + repr_str += f'mask_ratio={self.mask_ratio})' + return repr_str + + +@TRANSFORMS.register_module() +class BEiTMaskGenerator(BaseTransform): + """Generate mask for image. + + **Added Keys**: + + - mask + + This module is borrowed from + https://github.com/microsoft/unilm/tree/master/beit + + Args: + input_size (int): The size of input image. + num_masking_patches (int): The number of patches to be masked. + min_num_patches (int): The minimum number of patches to be masked + in the process of generating mask. Defaults to 4. + max_num_patches (int, optional): The maximum number of patches to be + masked in the process of generating mask. Defaults to None. + min_aspect (float): The minimum aspect ratio of mask blocks. Defaults + to 0.3. + min_aspect (float, optional): The minimum aspect ratio of mask blocks. + Defaults to None. + """ + + def __init__(self, + input_size: int, + num_masking_patches: int, + min_num_patches: int = 4, + max_num_patches: Optional[int] = None, + min_aspect: float = 0.3, + max_aspect: Optional[float] = None) -> None: + if not isinstance(input_size, tuple): + input_size = (input_size, ) * 2 + self.height, self.width = input_size + + self.num_patches = self.height * self.width + + self.num_masking_patches = num_masking_patches + self.min_num_patches = min_num_patches + self.max_num_patches = num_masking_patches if max_num_patches is None \ + else max_num_patches + + max_aspect = max_aspect or 1 / min_aspect + self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect)) + + def _mask(self, mask: np.ndarray, max_mask_patches: int) -> int: + """Generate mask recursively. + + Args: + mask (np.ndarray): The mask to be generated. + max_mask_patches (int): The maximum number of patches to be masked. + + Returns: + int: The number of patches masked. + """ + delta = 0 + for _ in range(10): + target_area = np.random.uniform(self.min_num_patches, + max_mask_patches) + aspect_ratio = math.exp(np.random.uniform(*self.log_aspect_ratio)) + h = int(round(math.sqrt(target_area * aspect_ratio))) + w = int(round(math.sqrt(target_area / aspect_ratio))) + if w < self.width and h < self.height: + top = np.random.randint(0, self.height - h) + left = np.random.randint(0, self.width - w) + + num_masked = mask[top:top + h, left:left + w].sum() + # Overlap + if 0 < h * w - num_masked <= max_mask_patches: + for i in range(top, top + h): + for j in range(left, left + w): + if mask[i, j] == 0: + mask[i, j] = 1 + delta += 1 + if delta > 0: + break + return delta + + def transform(self, results: dict) -> dict: + """Method to generate random block mask for each Image in BEiT. + + Args: + results (dict): Result dict from previous pipeline. + + Returns: + dict: Result dict with added key ``mask``. + """ + mask = np.zeros(shape=(self.height, self.width), dtype=int) + + mask_count = 0 + while mask_count != self.num_masking_patches: + max_mask_patches = self.num_masking_patches - mask_count + max_mask_patches = min(max_mask_patches, self.max_num_patches) + + delta = self._mask(mask, max_mask_patches) + mask_count += delta + results.update({'mask': mask}) + + return results + + def __repr__(self) -> str: + repr_str = self.__class__.__name__ + repr_str += f'(height={self.height}, ' + repr_str += f'width={self.width}, ' + repr_str += f'num_patches={self.num_patches}, ' + repr_str += f'num_masking_patches={self.num_masking_patches}, ' + repr_str += f'min_num_patches={self.min_num_patches}, ' + repr_str += f'max_num_patches={self.max_num_patches}, ' + repr_str += f'log_aspect_ratio={self.log_aspect_ratio})' + return repr_str + + +@TRANSFORMS.register_module() +class RandomResizedCropAndInterpolationWithTwoPic(BaseTransform): + """Crop the given PIL Image to random size and aspect ratio with random + interpolation. + + **Required Keys**: + + - img + + **Modified Keys**: + + - img + + **Added Keys**: + + - target_img + + This module is borrowed from + https://github.com/microsoft/unilm/tree/master/beit. + + A crop of random size (default: of 0.08 to 1.0) of the original size and a + random aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio + is made. This crop is finally resized to given size. This is popularly used + to train the Inception networks. This module first crops the image and + resizes the crop to two different sizes. + + Args: + size (Union[tuple, int]): Expected output size of each edge of the + first image. + second_size (Union[tuple, int], optional): Expected output size of each + edge of the second image. + scale (tuple[float, float]): Range of size of the origin size cropped. + Defaults to (0.08, 1.0). + ratio (tuple[float, float]): Range of aspect ratio of the origin aspect + ratio cropped. Defaults to (3./4., 4./3.). + interpolation (str): The interpolation for the first image. Defaults + to ``bilinear``. + second_interpolation (str): The interpolation for the second image. + Defaults to ``lanczos``. + """ + + def __init__(self, + size: Union[tuple, int], + second_size=None, + scale=(0.08, 1.0), + ratio=(3. / 4., 4. / 3.), + interpolation='bilinear', + second_interpolation='lanczos') -> None: + if isinstance(size, tuple): + self.size = size + else: + self.size = (size, size) + if second_size is not None: + if isinstance(second_size, tuple): + self.second_size = second_size + else: + self.second_size = (second_size, second_size) + else: + self.second_size = None + if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): + ('range should be of kind (min, max)') + + if interpolation == 'random': + self.interpolation = ('bilinear', 'bicubic') + else: + self.interpolation = interpolation + self.second_interpolation = second_interpolation + self.scale = scale + self.ratio = ratio + + @staticmethod + def get_params(img: np.ndarray, scale: tuple, + ratio: tuple) -> Sequence[int]: + """Get parameters for ``crop`` for a random sized crop. + + Args: + img (np.ndarray): Image to be cropped. + scale (tuple): range of size of the origin size cropped + ratio (tuple): range of aspect ratio of the origin aspect + ratio cropped + + Returns: + tuple: params (i, j, h, w) to be passed to ``crop`` for a random + sized crop. + """ + img_h, img_w = img.shape[:2] + area = img_h * img_w + + for _ in range(10): + target_area = np.random.uniform(*scale) * area + log_ratio = (math.log(ratio[0]), math.log(ratio[1])) + aspect_ratio = math.exp(np.random.uniform(*log_ratio)) + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + if w < img_w and h < img_h: + i = np.random.randint(0, img_h - h) + j = np.random.randint(0, img_w - w) + return i, j, h, w + + # Fallback to central crop + in_ratio = img_w / img_h + if in_ratio < min(ratio): + w = img_w + h = int(round(w / min(ratio))) + elif in_ratio > max(ratio): + h = img_h + w = int(round(h * max(ratio))) + else: # whole image + w = img_w + h = img_h + i = (img_h - h) // 2 + j = (img_w - w) // 2 + return i, j, h, w + + def transform(self, results: dict) -> dict: + """Crop the given image and resize it to two different sizes. + + This module crops the given image randomly and resize the crop to two + different sizes. This is popularly used in BEiT-style masked image + modeling, where an off-the-shelf model is used to provide the target. + + Args: + results (dict): Results from previous pipeline. + + Returns: + dict: Results after applying this transformation. + """ + img = results['img'] + i, j, h, w = self.get_params(img, self.scale, self.ratio) + if isinstance(self.interpolation, (tuple, list)): + interpolation = np.random.choice(self.interpolation) + else: + interpolation = self.interpolation + if self.second_size is None: + img = img[i:i + h, j:j + w] + img = mmcv.imresize(img, self.size, interpolation=interpolation) + results.update({'img': img}) + else: + img = img[i:i + h, j:j + w] + img_sample = mmcv.imresize( + img, self.size, interpolation=interpolation) + img_target = mmcv.imresize( + img, self.second_size, interpolation=self.second_interpolation) + results.update({'img': [img_sample, img_target]}) + return results + + def __repr__(self) -> str: + repr_str = self.__class__.__name__ + repr_str += f'(size={self.size}, ' + repr_str += f'second_size={self.second_size}, ' + repr_str += f'interpolation={self.interpolation}, ' + repr_str += f'second_interpolation={self.second_interpolation}, ' + repr_str += f'scale={self.scale}, ' + repr_str += f'ratio={self.ratio})' + return repr_str + + +@TRANSFORMS.register_module() +class CleanCaption(BaseTransform): + """Clean caption text. + + Remove some useless punctuation for the caption task. + + **Required Keys:** + + - ``*keys`` + + **Modified Keys:** + + - ``*keys`` + + Args: + keys (Sequence[str], optional): The keys of text to be cleaned. + Defaults to 'gt_caption'. + remove_chars (str): The characters to be removed. Defaults to + :py:attr:`string.punctuation`. + lowercase (bool): Whether to convert the text to lowercase. + Defaults to True. + remove_dup_space (bool): Whether to remove duplicated whitespaces. + Defaults to True. + strip (bool): Whether to remove leading and trailing whitespaces. + Defaults to True. + """ + + def __init__( + self, + keys='gt_caption', + remove_chars=string.punctuation, + lowercase=True, + remove_dup_space=True, + strip=True, + ): + if isinstance(keys, str): + keys = [keys] + self.keys = keys + self.transtab = str.maketrans({ch: None for ch in remove_chars}) + self.lowercase = lowercase + self.remove_dup_space = remove_dup_space + self.strip = strip + + def _clean(self, text): + """Perform text cleaning before tokenizer.""" + + if self.strip: + text = text.strip() + + text = text.translate(self.transtab) + + if self.remove_dup_space: + text = re.sub(r'\s{2,}', ' ', text) + + if self.lowercase: + text = text.lower() + + return text + + def clean(self, text): + """Perform text cleaning before tokenizer.""" + if isinstance(text, (list, tuple)): + return [self._clean(item) for item in text] + elif isinstance(text, str): + return self._clean(text) + else: + raise TypeError('text must be a string or a list of strings') + + def transform(self, results: dict) -> dict: + """Method to clean the input text data.""" + for key in self.keys: + results[key] = self.clean(results[key]) + return results + + +@TRANSFORMS.register_module() +class OFAAddObjects(BaseTransform): + + def transform(self, results: dict) -> dict: + if 'objects' not in results: + raise ValueError( + 'Some OFA fine-tuned models requires `objects` field in the ' + 'dataset, which is generated by VinVL. Or please use ' + 'zero-shot configs. See ' + 'https://github.com/OFA-Sys/OFA/issues/189') + + if 'question' in results: + prompt = '{} object: {}'.format( + results['question'], + ' '.join(results['objects']), + ) + results['decoder_prompt'] = prompt + results['question'] = prompt + + +@TRANSFORMS.register_module() +class RandomTranslatePad(BaseTransform): + + def __init__(self, size=640, aug_translate=False): + self.size = size + self.aug_translate = aug_translate + + @cache_randomness + def rand_translate_params(self, dh, dw): + top = np.random.randint(0, dh) + left = np.random.randint(0, dw) + return top, left + + def transform(self, results: dict) -> dict: + img = results['img'] + h, w = img.shape[:-1] + dw = self.size - w + dh = self.size - h + if self.aug_translate: + top, left = self.rand_translate_params(dh, dw) + else: + top = round(dh / 2.0 - 0.1) + left = round(dw / 2.0 - 0.1) + + out_img = np.zeros((self.size, self.size, 3), dtype=np.float32) + out_img[top:top + h, left:left + w, :] = img + results['img'] = out_img + results['img_shape'] = (self.size, self.size) + + # translate box + if 'gt_bboxes' in results.keys(): + for i in range(len(results['gt_bboxes'])): + box = results['gt_bboxes'][i] + box[0], box[2] = box[0] + left, box[2] + left + box[1], box[3] = box[1] + top, box[3] + top + results['gt_bboxes'][i] = box + + return results + + +@TRANSFORMS.register_module() +class MAERandomResizedCrop(transforms.RandomResizedCrop): + """RandomResizedCrop for matching TF/TPU implementation: no for-loop is + used. + + This may lead to results different with torchvision's version. + Following BYOL's TF code: + https://github.com/deepmind/deepmind-research/blob/master/byol/utils/dataset.py#L206 # noqa: E501 + """ + + @staticmethod + def get_params(img: Image.Image, scale: tuple, ratio: tuple) -> Tuple: + width, height = img.size + area = height * width + + target_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item() + log_ratio = torch.log(torch.tensor(ratio)) + aspect_ratio = torch.exp( + torch.empty(1).uniform_(log_ratio[0], log_ratio[1])).item() + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + w = min(w, width) + h = min(h, height) + + i = torch.randint(0, height - h + 1, size=(1, )).item() + j = torch.randint(0, width - w + 1, size=(1, )).item() + + return i, j, h, w + + def forward(self, results: dict) -> dict: + """The forward function of MAERandomResizedCrop. + + Args: + results (dict): The results dict contains the image and all these + information related to the image. + + Returns: + dict: The results dict contains the cropped image and all these + information related to the image. + """ + img = results['img'] + i, j, h, w = self.get_params(img, self.scale, self.ratio) + img = F.resized_crop(img, i, j, h, w, self.size, self.interpolation) + results['img'] = img + return results diff --git a/mmpretrain/datasets/transforms/utils.py b/mmpretrain/datasets/transforms/utils.py new file mode 100644 index 0000000..d794048 --- /dev/null +++ b/mmpretrain/datasets/transforms/utils.py @@ -0,0 +1,53 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from typing import List, Union + +from mmcv.transforms import BaseTransform + +PIPELINE_TYPE = List[Union[dict, BaseTransform]] + + +def get_transform_idx(pipeline: PIPELINE_TYPE, target: str) -> int: + """Returns the index of the transform in a pipeline. + + Args: + pipeline (List[dict] | List[BaseTransform]): The transforms list. + target (str): The target transform class name. + + Returns: + int: The transform index. Returns -1 if not found. + """ + for i, transform in enumerate(pipeline): + if isinstance(transform, dict): + if isinstance(transform['type'], type): + if transform['type'].__name__ == target: + return i + else: + if transform['type'] == target: + return i + else: + if transform.__class__.__name__ == target: + return i + + return -1 + + +def remove_transform(pipeline: PIPELINE_TYPE, target: str, inplace=False): + """Remove the target transform type from the pipeline. + + Args: + pipeline (List[dict] | List[BaseTransform]): The transforms list. + target (str): The target transform class name. + inplace (bool): Whether to modify the pipeline inplace. + + Returns: + The modified transform. + """ + idx = get_transform_idx(pipeline, target) + if not inplace: + pipeline = copy.deepcopy(pipeline) + while idx >= 0: + pipeline.pop(idx) + idx = get_transform_idx(pipeline, target) + + return pipeline diff --git a/mmpretrain/datasets/transforms/wrappers.py b/mmpretrain/datasets/transforms/wrappers.py new file mode 100644 index 0000000..c0dfd73 --- /dev/null +++ b/mmpretrain/datasets/transforms/wrappers.py @@ -0,0 +1,144 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from typing import Callable, List, Union + +from mmcv.transforms import BaseTransform, Compose + +from mmpretrain.registry import TRANSFORMS + +# Define type of transform or transform config +Transform = Union[dict, Callable[[dict], dict]] + + +@TRANSFORMS.register_module() +class MultiView(BaseTransform): + """A transform wrapper for multiple views of an image. + + Args: + transforms (list[dict | callable], optional): Sequence of transform + object or config dict to be wrapped. + mapping (dict): A dict that defines the input key mapping. + The keys corresponds to the inner key (i.e., kwargs of the + ``transform`` method), and should be string type. The values + corresponds to the outer keys (i.e., the keys of the + data/results), and should have a type of string, list or dict. + None means not applying input mapping. Default: None. + allow_nonexist_keys (bool): If False, the outer keys in the mapping + must exist in the input data, or an exception will be raised. + Default: False. + + Examples: + >>> # Example 1: MultiViews 1 pipeline with 2 views + >>> pipeline = [ + >>> dict(type='MultiView', + >>> num_views=2, + >>> transforms=[ + >>> [ + >>> dict(type='Resize', scale=224))], + >>> ]) + >>> ] + >>> # Example 2: MultiViews 2 pipelines, the first with 2 views, + >>> # the second with 6 views + >>> pipeline = [ + >>> dict(type='MultiView', + >>> num_views=[2, 6], + >>> transforms=[ + >>> [ + >>> dict(type='Resize', scale=224)], + >>> [ + >>> dict(type='Resize', scale=224), + >>> dict(type='RandomSolarize')], + >>> ]) + >>> ] + """ + + def __init__(self, transforms: List[List[Transform]], + num_views: Union[int, List[int]]) -> None: + + if isinstance(num_views, int): + num_views = [num_views] + assert isinstance(num_views, List) + assert len(num_views) == len(transforms) + self.num_views = num_views + + self.pipelines = [] + for trans in transforms: + pipeline = Compose(trans) + self.pipelines.append(pipeline) + + self.transforms = [] + for i in range(len(num_views)): + self.transforms.extend([self.pipelines[i]] * num_views[i]) + + def transform(self, results: dict) -> dict: + """Apply transformation to inputs. + + Args: + results (dict): Result dict from previous pipelines. + + Returns: + dict: Transformed results. + """ + multi_views_outputs = dict(img=[]) + for trans in self.transforms: + inputs = copy.deepcopy(results) + outputs = trans(inputs) + + multi_views_outputs['img'].append(outputs['img']) + results.update(multi_views_outputs) + return results + + def __repr__(self) -> str: + repr_str = self.__class__.__name__ + '(' + for i, p in enumerate(self.pipelines): + repr_str += f'\nPipeline {i + 1} with {self.num_views[i]} views:\n' + repr_str += str(p) + repr_str += ')' + return repr_str + + +@TRANSFORMS.register_module() +class ApplyToList(BaseTransform): + """A transform wrapper to apply the wrapped transforms to a list of items. + For example, to load and resize a list of images. + + Args: + transforms (list[dict | callable]): Sequence of transform config dict + to be wrapped. + scatter_key (str): The key to scatter data dict. If the field is a + list, scatter the list to multiple data dicts to do transformation. + collate_keys (List[str]): The keys to collate from multiple data dicts. + The fields in ``collate_keys`` will be composed into a list after + transformation, and the other fields will be adopted from the + first data dict. + """ + + def __init__(self, transforms, scatter_key, collate_keys): + super().__init__() + + self.transforms = Compose([TRANSFORMS.build(t) for t in transforms]) + self.scatter_key = scatter_key + self.collate_keys = set(collate_keys) + self.collate_keys.add(self.scatter_key) + + def transform(self, results: dict): + scatter_field = results.get(self.scatter_key) + + if isinstance(scatter_field, list): + scattered_results = [] + for item in scatter_field: + single_results = copy.deepcopy(results) + single_results[self.scatter_key] = item + scattered_results.append(self.transforms(single_results)) + + final_output = scattered_results[0] + + # merge output list to single output + for key in scattered_results[0].keys(): + if key in self.collate_keys: + final_output[key] = [ + single[key] for single in scattered_results + ] + return final_output + else: + return self.transforms(results) diff --git a/mmpretrain/datasets/utils.py b/mmpretrain/datasets/utils.py new file mode 100644 index 0000000..fcb60e4 --- /dev/null +++ b/mmpretrain/datasets/utils.py @@ -0,0 +1,243 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import gzip +import hashlib +import os +import os.path +import shutil +import tarfile +import tempfile +import urllib.error +import urllib.request +import zipfile + +from mmengine.fileio import LocalBackend, get_file_backend + +__all__ = [ + 'rm_suffix', 'check_integrity', 'download_and_extract_archive', + 'open_maybe_compressed_file' +] + + +def rm_suffix(s, suffix=None): + if suffix is None: + return s[:s.rfind('.')] + else: + return s[:s.rfind(suffix)] + + +def calculate_md5(fpath: str, chunk_size: int = 1024 * 1024): + md5 = hashlib.md5() + backend = get_file_backend(fpath, enable_singleton=True) + if isinstance(backend, LocalBackend): + # Enable chunk update for local file. + with open(fpath, 'rb') as f: + for chunk in iter(lambda: f.read(chunk_size), b''): + md5.update(chunk) + else: + md5.update(backend.get(fpath)) + return md5.hexdigest() + + +def check_md5(fpath, md5, **kwargs): + return md5 == calculate_md5(fpath, **kwargs) + + +def check_integrity(fpath, md5=None): + if not os.path.isfile(fpath): + return False + if md5 is None: + return True + return check_md5(fpath, md5) + + +def download_url_to_file(url, dst, hash_prefix=None, progress=True): + """Download object at the given URL to a local path. + + Modified from + https://pytorch.org/docs/stable/hub.html#torch.hub.download_url_to_file + + Args: + url (str): URL of the object to download + dst (str): Full path where object will be saved, + e.g. ``/tmp/temporary_file`` + hash_prefix (string, optional): If not None, the SHA256 downloaded + file should start with ``hash_prefix``. Defaults to None. + progress (bool): whether or not to display a progress bar to stderr. + Defaults to True + """ + file_size = None + req = urllib.request.Request(url) + u = urllib.request.urlopen(req) + meta = u.info() + if hasattr(meta, 'getheaders'): + content_length = meta.getheaders('Content-Length') + else: + content_length = meta.get_all('Content-Length') + if content_length is not None and len(content_length) > 0: + file_size = int(content_length[0]) + + # We deliberately save it in a temp file and move it after download is + # complete. This prevents a local file being overridden by a broken + # download. + dst = os.path.expanduser(dst) + dst_dir = os.path.dirname(dst) + f = tempfile.NamedTemporaryFile(delete=False, dir=dst_dir) + + import rich.progress + columns = [ + rich.progress.DownloadColumn(), + rich.progress.BarColumn(bar_width=None), + rich.progress.TimeRemainingColumn(), + ] + try: + if hash_prefix is not None: + sha256 = hashlib.sha256() + with rich.progress.Progress(*columns) as pbar: + task = pbar.add_task('download', total=file_size, visible=progress) + while True: + buffer = u.read(8192) + if len(buffer) == 0: + break + f.write(buffer) + if hash_prefix is not None: + sha256.update(buffer) + pbar.update(task, advance=len(buffer)) + + f.close() + if hash_prefix is not None: + digest = sha256.hexdigest() + if digest[:len(hash_prefix)] != hash_prefix: + raise RuntimeError( + 'invalid hash value (expected "{}", got "{}")'.format( + hash_prefix, digest)) + shutil.move(f.name, dst) + finally: + f.close() + if os.path.exists(f.name): + os.remove(f.name) + + +def download_url(url, root, filename=None, md5=None): + """Download a file from a url and place it in root. + + Args: + url (str): URL to download file from. + root (str): Directory to place downloaded file in. + filename (str | None): Name to save the file under. + If filename is None, use the basename of the URL. + md5 (str | None): MD5 checksum of the download. + If md5 is None, download without md5 check. + """ + root = os.path.expanduser(root) + if not filename: + filename = os.path.basename(url) + fpath = os.path.join(root, filename) + + os.makedirs(root, exist_ok=True) + + if check_integrity(fpath, md5): + print(f'Using downloaded and verified file: {fpath}') + else: + try: + print(f'Downloading {url} to {fpath}') + download_url_to_file(url, fpath) + except (urllib.error.URLError, IOError) as e: + if url[:5] == 'https': + url = url.replace('https:', 'http:') + print('Failed download. Trying https -> http instead.' + f' Downloading {url} to {fpath}') + download_url_to_file(url, fpath) + else: + raise e + # check integrity of downloaded file + if not check_integrity(fpath, md5): + raise RuntimeError('File not found or corrupted.') + + +def _is_tarxz(filename): + return filename.endswith('.tar.xz') + + +def _is_tar(filename): + return filename.endswith('.tar') + + +def _is_targz(filename): + return filename.endswith('.tar.gz') + + +def _is_tgz(filename): + return filename.endswith('.tgz') + + +def _is_gzip(filename): + return filename.endswith('.gz') and not filename.endswith('.tar.gz') + + +def _is_zip(filename): + return filename.endswith('.zip') + + +def extract_archive(from_path, to_path=None, remove_finished=False): + if to_path is None: + to_path = os.path.dirname(from_path) + + if _is_tar(from_path): + with tarfile.open(from_path, 'r') as tar: + tar.extractall(path=to_path) + elif _is_targz(from_path) or _is_tgz(from_path): + with tarfile.open(from_path, 'r:gz') as tar: + tar.extractall(path=to_path) + elif _is_tarxz(from_path): + with tarfile.open(from_path, 'r:xz') as tar: + tar.extractall(path=to_path) + elif _is_gzip(from_path): + to_path = os.path.join( + to_path, + os.path.splitext(os.path.basename(from_path))[0]) + with open(to_path, 'wb') as out_f, gzip.GzipFile(from_path) as zip_f: + out_f.write(zip_f.read()) + elif _is_zip(from_path): + with zipfile.ZipFile(from_path, 'r') as z: + z.extractall(to_path) + else: + raise ValueError(f'Extraction of {from_path} not supported') + + if remove_finished: + os.remove(from_path) + + +def download_and_extract_archive(url, + download_root, + extract_root=None, + filename=None, + md5=None, + remove_finished=False): + download_root = os.path.expanduser(download_root) + if extract_root is None: + extract_root = download_root + if not filename: + filename = os.path.basename(url) + + download_url(url, download_root, filename, md5) + + archive = os.path.join(download_root, filename) + print(f'Extracting {archive} to {extract_root}') + extract_archive(archive, extract_root, remove_finished) + + +def open_maybe_compressed_file(path: str): + """Return a file object that possibly decompresses 'path' on the fly. + + Decompression occurs when argument `path` is a string and ends with '.gz' + or '.xz'. + """ + if not isinstance(path, str): + return path + if path.endswith('.gz'): + import gzip + return gzip.open(path, 'rb') + if path.endswith('.xz'): + import lzma + return lzma.open(path, 'rb') + return open(path, 'rb') diff --git a/mmpretrain/datasets/vg_vqa.py b/mmpretrain/datasets/vg_vqa.py new file mode 100644 index 0000000..2d83884 --- /dev/null +++ b/mmpretrain/datasets/vg_vqa.py @@ -0,0 +1,77 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +from mmengine.fileio import load + +from mmpretrain.registry import DATASETS +from .base_dataset import BaseDataset + + +@DATASETS.register_module() +class VGVQA(BaseDataset): + """Visual Genome VQA dataset.""" + + def load_data_list(self) -> List[dict]: + """Load data list. + + Compare to BaseDataset, the only difference is that coco_vqa annotation + file is already a list of data. There is no 'metainfo'. + """ + + raw_data_list = load(self.ann_file) + if not isinstance(raw_data_list, list): + raise TypeError( + f'The VQA annotations loaded from annotation file ' + f'should be a dict, but got {type(raw_data_list)}!') + + # load and parse data_infos. + data_list = [] + for raw_data_info in raw_data_list: + # parse raw data information to target format + data_info = self.parse_data_info(raw_data_info) + if isinstance(data_info, dict): + # For VQA tasks, each `data_info` looks like: + # { + # "question_id": 986769, + # "question": "How many people are there?", + # "answer": "two", + # "image": "image/1.jpg", + # "dataset": "vg" + # } + + # change 'image' key to 'img_path' + # TODO: This process will be removed, after the annotation file + # is preprocess. + data_info['img_path'] = data_info['image'] + del data_info['image'] + + if 'answer' in data_info: + # add answer_weight & answer_count, delete duplicate answer + if data_info['dataset'] == 'vqa': + answer_weight = {} + for answer in data_info['answer']: + if answer in answer_weight.keys(): + answer_weight[answer] += 1 / len( + data_info['answer']) + else: + answer_weight[answer] = 1 / len( + data_info['answer']) + + data_info['answer'] = list(answer_weight.keys()) + data_info['answer_weight'] = list( + answer_weight.values()) + data_info['answer_count'] = len(answer_weight) + + elif data_info['dataset'] == 'vg': + data_info['answers'] = [data_info['answer']] + data_info['answer_weight'] = [0.2] + data_info['answer_count'] = 1 + + data_list.append(data_info) + + else: + raise TypeError( + f'Each VQA data element loaded from annotation file ' + f'should be a dict, but got {type(data_info)}!') + + return data_list diff --git a/mmpretrain/datasets/visual_genome.py b/mmpretrain/datasets/visual_genome.py new file mode 100644 index 0000000..8c33b86 --- /dev/null +++ b/mmpretrain/datasets/visual_genome.py @@ -0,0 +1,95 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import re +from itertools import chain +from typing import List + +import mmengine +from mmengine.dataset import BaseDataset + +from mmpretrain.registry import DATASETS + + +@DATASETS.register_module() +class VisualGenomeQA(BaseDataset): + """Visual Genome Question Answering dataset. + + dataset structure: :: + + data_root + ├── image + │   ├── 1.jpg + │   ├── 2.jpg + │   └── ... + └── question_answers.json + + Args: + data_root (str): The root directory for ``data_prefix``, ``ann_file`` + and ``question_file``. + data_prefix (str): The directory of images. Defaults to ``"image"``. + ann_file (str, optional): Annotation file path for training and + validation. Defaults to ``"question_answers.json"``. + **kwargs: Other keyword arguments in :class:`BaseDataset`. + """ + + def __init__(self, + data_root: str, + data_prefix: str = 'image', + ann_file: str = 'question_answers.json', + **kwarg): + super().__init__( + data_root=data_root, + data_prefix=dict(img_path=data_prefix), + ann_file=ann_file, + **kwarg, + ) + + def _create_image_index(self): + img_prefix = self.data_prefix['img_path'] + + files = mmengine.list_dir_or_file(img_prefix, list_dir=False) + image_index = {} + for file in files: + image_id = re.findall(r'\d+', file) + if len(image_id) > 0: + image_id = int(image_id[-1]) + image_index[image_id] = mmengine.join_path(img_prefix, file) + + return image_index + + def load_data_list(self) -> List[dict]: + """Load data list.""" + annotations = mmengine.load(self.ann_file) + + # The original Visual Genome annotation file and question file includes + # only image id but no image file paths. + self.image_index = self._create_image_index() + + data_list = [] + for qas in chain.from_iterable(ann['qas'] for ann in annotations): + # ann example + # { + # 'id': 1, + # 'qas': [ + # { + # 'a_objects': [], + # 'question': 'What color is the clock?', + # 'image_id': 1, + # 'qa_id': 986768, + # 'answer': 'Two.', + # 'q_objects': [], + # } + # ... + # ] + # } + + data_info = { + 'img_path': self.image_index[qas['image_id']], + 'quesiton': qas['quesiton'], + 'question_id': qas['question_id'], + 'image_id': qas['image_id'], + 'gt_answer': [qas['answer']], + } + + data_list.append(data_info) + + return data_list diff --git a/mmpretrain/datasets/vizwiz.py b/mmpretrain/datasets/vizwiz.py new file mode 100644 index 0000000..7b5dd39 --- /dev/null +++ b/mmpretrain/datasets/vizwiz.py @@ -0,0 +1,112 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections import Counter +from typing import List + +import mmengine +from mmengine.dataset import BaseDataset + +from mmpretrain.registry import DATASETS + + +@DATASETS.register_module() +class VizWiz(BaseDataset): + """VizWiz dataset. + + Args: + data_root (str): The root directory for ``data_prefix``, ``ann_file`` + and ``question_file``. + data_prefix (str): The directory of images. + ann_file (str, optional): Annotation file path for training and + validation. Defaults to an empty string. + **kwargs: Other keyword arguments in :class:`BaseDataset`. + """ + + def __init__(self, + data_root: str, + data_prefix: str, + ann_file: str = '', + **kwarg): + super().__init__( + data_root=data_root, + data_prefix=dict(img_path=data_prefix), + ann_file=ann_file, + **kwarg, + ) + + def load_data_list(self) -> List[dict]: + """Load data list.""" + annotations = mmengine.load(self.ann_file) + + data_list = [] + for ann in annotations: + # { + # "image": "VizWiz_val_00000001.jpg", + # "question": "Can you tell me what this medicine is please?", + # "answers": [ + # { + # "answer": "no", + # "answer_confidence": "yes" + # }, + # { + # "answer": "unanswerable", + # "answer_confidence": "yes" + # }, + # { + # "answer": "night time", + # "answer_confidence": "maybe" + # }, + # { + # "answer": "unanswerable", + # "answer_confidence": "yes" + # }, + # { + # "answer": "night time", + # "answer_confidence": "maybe" + # }, + # { + # "answer": "night time cold medicine", + # "answer_confidence": "maybe" + # }, + # { + # "answer": "night time", + # "answer_confidence": "maybe" + # }, + # { + # "answer": "night time", + # "answer_confidence": "maybe" + # }, + # { + # "answer": "night time", + # "answer_confidence": "maybe" + # }, + # { + # "answer": "night time medicine", + # "answer_confidence": "yes" + # } + # ], + # "answer_type": "other", + # "answerable": 1 + # }, + data_info = dict() + data_info['question'] = ann['question'] + data_info['img_path'] = mmengine.join_path( + self.data_prefix['img_path'], ann['image']) + + if 'answerable' not in ann: + data_list.append(data_info) + else: + if ann['answerable'] == 1: + # add answer_weight & answer_count, delete duplicate answer + answers = [] + for item in ann.pop('answers'): + if item['answer_confidence'] == 'yes' and item[ + 'answer'] != 'unanswerable': + answers.append(item['answer']) + count = Counter(answers) + answer_weight = [i / len(answers) for i in count.values()] + data_info['gt_answer'] = list(count.keys()) + data_info['gt_answer_weight'] = answer_weight + # data_info.update(ann) + data_list.append(data_info) + + return data_list diff --git a/mmpretrain/datasets/voc.py b/mmpretrain/datasets/voc.py new file mode 100644 index 0000000..39544de --- /dev/null +++ b/mmpretrain/datasets/voc.py @@ -0,0 +1,195 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import xml.etree.ElementTree as ET +from typing import List, Optional, Union + +from mmengine import get_file_backend, list_from_file +from mmengine.logging import MMLogger + +from mmpretrain.registry import DATASETS +from .base_dataset import expanduser +from .categories import VOC2007_CATEGORIES +from .multi_label import MultiLabelDataset + + +@DATASETS.register_module() +class VOC(MultiLabelDataset): + """`Pascal VOC `_ Dataset. + + After decompression, the dataset directory structure is as follows: + + VOC dataset directory: :: + + VOC2007 + ├── JPEGImages + │ ├── xxx.jpg + │ ├── xxy.jpg + │ └── ... + ├── Annotations + │ ├── xxx.xml + │ ├── xxy.xml + │ └── ... + └── ImageSets + └── Main + ├── train.txt + ├── val.txt + ├── trainval.txt + ├── test.txt + └── ... + + Extra difficult label is in VOC annotations, we will use + `gt_label_difficult` to record the difficult labels in each sample + and corresponding evaluation should take care of this field + to calculate metrics. Usually, difficult labels are reckoned as + negative in defaults. + + Args: + data_root (str): The root directory for VOC dataset. + split (str, optional): The dataset split, supports "train", + "val", "trainval", and "test". Default to "trainval". + image_set_path (str, optional): The path of image set, The file which + lists image ids of the sub dataset, and this path is relative + to ``data_root``. Default to ''. + data_prefix (dict): Prefix for data and annotation, keyword + 'img_path' and 'ann_path' can be set. Defaults to be + ``dict(img_path='JPEGImages', ann_path='Annotations')``. + metainfo (dict, optional): Meta information for dataset, such as + categories information. Defaults to None. + **kwargs: Other keyword arguments in :class:`BaseDataset`. + + Examples: + >>> from mmpretrain.datasets import VOC + >>> train_dataset = VOC(data_root='data/VOC2007', split='trainval') + >>> train_dataset + Dataset VOC + Number of samples: 5011 + Number of categories: 20 + Prefix of dataset: data/VOC2007 + Path of image set: data/VOC2007/ImageSets/Main/trainval.txt + Prefix of images: data/VOC2007/JPEGImages + Prefix of annotations: data/VOC2007/Annotations + >>> test_dataset = VOC(data_root='data/VOC2007', split='test') + >>> test_dataset + Dataset VOC + Number of samples: 4952 + Number of categories: 20 + Prefix of dataset: data/VOC2007 + Path of image set: data/VOC2007/ImageSets/Main/test.txt + Prefix of images: data/VOC2007/JPEGImages + Prefix of annotations: data/VOC2007/Annotations + """ # noqa: E501 + + METAINFO = {'classes': VOC2007_CATEGORIES} + + def __init__(self, + data_root: str, + split: str = 'trainval', + image_set_path: str = '', + data_prefix: Union[str, dict] = dict( + img_path='JPEGImages', ann_path='Annotations'), + test_mode: bool = False, + metainfo: Optional[dict] = None, + **kwargs): + + self.backend = get_file_backend(data_root, enable_singleton=True) + + if split: + splits = ['train', 'val', 'trainval', 'test'] + assert split in splits, \ + f"The split must be one of {splits}, but get '{split}'" + self.split = split + + if not data_prefix: + data_prefix = dict( + img_path='JPEGImages', ann_path='Annotations') + if not image_set_path: + image_set_path = self.backend.join_path( + 'ImageSets', 'Main', f'{split}.txt') + + # To handle the BC-breaking + if (split == 'train' or split == 'trainval') and test_mode: + logger = MMLogger.get_current_instance() + logger.warning(f'split="{split}" but test_mode=True. ' + f'The {split} set will be used.') + + if isinstance(data_prefix, str): + data_prefix = dict(img_path=expanduser(data_prefix)) + assert isinstance(data_prefix, dict) and 'img_path' in data_prefix, \ + '`data_prefix` must be a dict with key img_path' + + if (split and split not in ['val', 'test']) or not test_mode: + assert 'ann_path' in data_prefix and data_prefix[ + 'ann_path'] is not None, \ + '"ann_path" must be set in `data_prefix`' \ + 'when validation or test set is used.' + + self.data_root = data_root + self.image_set_path = self.backend.join_path(data_root, image_set_path) + + super().__init__( + ann_file='', + metainfo=metainfo, + data_root=data_root, + data_prefix=data_prefix, + test_mode=test_mode, + **kwargs) + + @property + def ann_prefix(self): + """The prefix of images.""" + if 'ann_path' in self.data_prefix: + return self.data_prefix['ann_path'] + else: + return None + + def _get_labels_from_xml(self, img_id): + """Get gt_labels and labels_difficult from xml file.""" + xml_path = self.backend.join_path(self.ann_prefix, f'{img_id}.xml') + content = self.backend.get(xml_path) + root = ET.fromstring(content) + + labels, labels_difficult = set(), set() + for obj in root.findall('object'): + label_name = obj.find('name').text + # in case customized dataset has wrong labels + # or CLASSES has been override. + if label_name not in self.CLASSES: + continue + label = self.class_to_idx[label_name] + difficult = int(obj.find('difficult').text) + if difficult: + labels_difficult.add(label) + else: + labels.add(label) + + return list(labels), list(labels_difficult) + + def load_data_list(self): + """Load images and ground truth labels.""" + data_list = [] + img_ids = list_from_file(self.image_set_path) + + for img_id in img_ids: + img_path = self.backend.join_path(self.img_prefix, f'{img_id}.jpg') + + labels, labels_difficult = None, None + if self.ann_prefix is not None: + labels, labels_difficult = self._get_labels_from_xml(img_id) + + info = dict( + img_path=img_path, + gt_label=labels, + gt_label_difficult=labels_difficult) + data_list.append(info) + + return data_list + + def extra_repr(self) -> List[str]: + """The extra repr information of the dataset.""" + body = [ + f'Prefix of dataset: \t{self.data_root}', + f'Path of image set: \t{self.image_set_path}', + f'Prefix of images: \t{self.img_prefix}', + f'Prefix of annotations: \t{self.ann_prefix}' + ] + + return body diff --git a/mmpretrain/datasets/vsr.py b/mmpretrain/datasets/vsr.py new file mode 100644 index 0000000..7b10959 --- /dev/null +++ b/mmpretrain/datasets/vsr.py @@ -0,0 +1,55 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +import mmengine +from mmengine.dataset import BaseDataset + +from mmpretrain.registry import DATASETS + + +@DATASETS.register_module() +class VSR(BaseDataset): + """VSR: Visual Spatial Reasoning dataset. + + Args: + data_root (str): The root directory for ``data_prefix``, ``ann_file`` + and ``question_file``. + data_prefix (str): The directory of images. + ann_file (str, optional): Annotation file path for training and + validation. Defaults to an empty string. + **kwargs: Other keyword arguments in :class:`BaseDataset`. + """ + + def __init__(self, + data_root: str, + data_prefix: str, + ann_file: str = '', + **kwarg): + super().__init__( + data_root=data_root, + data_prefix=dict(img_path=data_prefix), + ann_file=ann_file, + **kwarg, + ) + + def load_data_list(self) -> List[dict]: + """Load data list.""" + annotations = mmengine.load(self.ann_file) + + data_list = [] + for ann in annotations: + # ann example + # { + # "image": "train2017/000000372029.jpg", + # "question": "The dog is on the surfboard.", + # "answer": true + # } + data_info = dict() + data_info['img_path'] = mmengine.join_path( + self.data_prefix['img_path'], ann['image']) + data_info['question'] = ann['question'] + data_info['gt_answer'] = 'yes' if ann['answer'] else 'no' + + data_list.append(data_info) + + return data_list diff --git a/mmpretrain/engine/__init__.py b/mmpretrain/engine/__init__.py new file mode 100644 index 0000000..332fea0 --- /dev/null +++ b/mmpretrain/engine/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .hooks import * # noqa: F401, F403 +from .optimizers import * # noqa: F401, F403 +from .runners import * # noqa: F401, F403 +from .schedulers import * # noqa: F401, F403 diff --git a/mmpretrain/engine/__pycache__/__init__.cpython-310.pyc b/mmpretrain/engine/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8f4cbc36b2477297771f2833148897bb0fa3ccc GIT binary patch literal 224 zcmd1j<>g`kf*F60r9A-Bk3k${zy#zt0CBMjkVs)jVa#C&fzXUlnh8oX18JsU22JLd zj6kKDjJFuI{4`l^v1a7wXBXe%$}cF%%+0JyEh@gnUR0Ww2V!s)CugLll;!~0D;bK| zf!e^tFFpOD{QMIA+}whq)RLma%sgafYF>I~UaEe4d}dx|NqoFsLFFwDo7{YmF?JxE Oi#dP<2O|jbFaiLxWI6}{ literal 0 HcmV?d00001 diff --git a/mmpretrain/engine/hooks/__init__.py b/mmpretrain/engine/hooks/__init__.py new file mode 100644 index 0000000..bc9e22b --- /dev/null +++ b/mmpretrain/engine/hooks/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .class_num_check_hook import ClassNumCheckHook +from .densecl_hook import DenseCLHook +from .ema_hook import EMAHook +from .margin_head_hooks import SetAdaptiveMarginsHook +from .precise_bn_hook import PreciseBNHook +from .retriever_hooks import PrepareProtoBeforeValLoopHook +from .simsiam_hook import SimSiamHook +from .swav_hook import SwAVHook +from .switch_recipe_hook import SwitchRecipeHook +from .visualization_hook import VisualizationHook +from .warmup_param_hook import WarmupParamHook + +__all__ = [ + 'ClassNumCheckHook', 'PreciseBNHook', 'VisualizationHook', + 'SwitchRecipeHook', 'PrepareProtoBeforeValLoopHook', + 'SetAdaptiveMarginsHook', 'EMAHook', 'SimSiamHook', 'DenseCLHook', + 'SwAVHook', 'WarmupParamHook' +] diff --git a/mmpretrain/engine/hooks/__pycache__/__init__.cpython-310.pyc b/mmpretrain/engine/hooks/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..914b49a1cf16450c5f41615f7aa673c17042537a GIT binary patch literal 823 zcmZuv&5qMB5Kj7+v`IFb?hjmefL`c@BSLfw5{KQYB2tm&VwuFdwbqU!J8cC|!I3jJ zUWA9}l@qUkgoMWlQV%%Fr|%n2CgX3cl4Ro9_4Ut>`?&9UKT({no)xd{Yra@0z0wz+ z^uhO$_e4+jq3?KK4CD}ojt|601`s$t6rmi$*zu8=$Ot0G0};yv630VvCR0cqAB%ID zLFV{GOyvb!I39^hc?DO#_u!TBHy^IcB==@#zkJ))%s*yJL5*4MZdP63nGMubv$a-@x?%T9vkeuu zO4VILlyjMLDmxkGhsCDj=lOv*#rD0eShFscZMfM{!9UZ6tEvm8AE=hQdQEj#C(>xV z9w9^sI3*n*3=swhA%Y#6r^g7fh1Ki{To&jBsdkbS=myzZJ6t`8OLU|VZ3sU-hD_2{ zVok`Tvx96IEnV2SA3|EUzL1Yqn~kkOs#&8s+cVweVuuhLE{%1hO~fu?Yz}naYU~xJ z#yLZ;4Ae)Ma{NNEo$F4+D>Vz?NX2WaQ0|GEAaMd6-?IDh4my7Vq=u94_R*u4Hs85ivX|7 zIMIB9#+yAGY)l^yHg#C*u*8o*#^$E6XOFE-lbR!gUxu@F2yOt!*>ry}sC{4`AvR{q zkRHlOf8P?XeFSLCWZVP5<7;zl9|lLpP2-E^=hmLH=gP_$0@%0s-Lbn_l{M7fH@7Tt z7qork!TkZeOG}lpJ=x7<_4kP<2UWDO^E!Ics1?7IA?^;D974cANGBVmVGogEqC?`0 z3|T};8p+;}2`!PZY#@gu?^ns>>8yWpqt$x9rR6&`l96Dt)2z+}Zt#Ivot;pNkFpFb zUdy|itDQUg&}pnr*}!Ez+5DTv13fPsR4LX7%^^cs zF3--^Q^Ri5m7C_2CCZ=VQ+Y`~7%;B<-H2y#Hc*wYtc@^KK^SIH%EFLiZzvmlRtw0X zA>1-bm{9&O5|NagW5*~P3a8;IN3f)?ys`k4KS>(*QN#?DS1Lb%HUiFXYb!j@d&hO5#^_BCeSk^g(@vuG}8O^!ddpMD%5AGdUI3ylk z0x~uZVNk$mfZ=dtmBXN672oQs$b1XxI05ZhLF>9px1mFw*%@GL9WX zPg)IGq>DUKGUwW*Ve>2CvO2F_2{A`W9#O{m60jdw`~tlAMI@M|k~{}HRqrxrD;8?S z25=rC5SP5-rPI*Oi#`!M{ygXdPT!rz>B}gI!8L5)Y3u)2r2Z{Xi{Q)SF!2|`^T+Y2 zPJ#ycH}Uf_kmBkSu>rBCBJ<5@WF!;vf5+q;dhtx4)TWgH3h&HNIiuO;11RzIUB(L41SN}Kv3t(BjXdl_2p;zqk`+@ UFnrfUr|<_^-Go2OvoD)}0b*CGssI20 literal 0 HcmV?d00001 diff --git a/mmpretrain/engine/hooks/__pycache__/densecl_hook.cpython-310.pyc b/mmpretrain/engine/hooks/__pycache__/densecl_hook.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76e90f1ab625405b422d5177c7bbaf944861f813 GIT binary patch literal 1778 zcmZ`(&2Aev5GHrItJO-jWyek90%;dHq)`u&dkc&-NSXo-(n2oa0xfi*R!d4+XMYru zo79GTsr?W=w#U3kA7Zb)G_MfAZD%OkS+@v`SF`?Sgx&w1lq6Xf6TUhLjFXj z**tI_!Kc52P7pyelG8EGov}mFcQQ9;V`gnP^KyUeTbpIAJQxR*NdLF?@8oMDJmH@a z;Y)Vvp3-qBS_h;byn$Uw->HNBrAf;o&g$SmzNlo8$Ud#v)3SUH|K8L6{cjGySer}3 zOO^7x6f#S2akI6+c?6#YE6JD&GIj(VyJB6qk_jff856!}oq1zVY=}U#&)C>+d!i>o znD5Az3}kzU><}C4i>_FM*id)?cC+q$B?~Q|e2cK+k0~K&M6*&w7ZZapjG%qLNOiQp zxwJ^KN=O||CRwR9&*FS4;>jd>6|1~j!u-3Xxbn+Vsq&Q+(M*+jWEL`tE3+t-{xnM9 z%6rk|!NQoO-Wv|*saaIhK~m3MVCqngC-j7HP}c!@O-FsI+AszpVOnN0>%1mv4aVNy zAF8r6!#rQYzA8?OrYVbgTF4>hK@SCHF3C80gXK{jaGn;a;ru2Zil>M4c)O9`!DYbz zpu(kAP(K615JOMsp>s+FJ#}>V8I;5+{e@nR9=dQ9SGb@bc6@h#Y5?SQ20;uq*>N@p zWt&Als5=drvbJeZmaCa&}){_S{pq;orODm3x965aC|;``+=r$KnuK zx~5X#SiY3%D6;p7umT2_2DJ(2&o7aU`o7u%$FikvfK@Y$wVGSe=2_a*Hb$Xz)Q1=q zB$cvt^N8K0L%Bdt~|uN^-V(s|knjvPpMElJ465Av|rU>gjA)M!@S+SOXpC_N)P zD`YNqkrN!K9H@d+?N)JdapAy$zkmZrj(wtv6Gw^*%0uY2^^^*RS!L9Y3>r>9@v z>-T+MJD#5}TKN6uv%j?d{FG(=gI;ETIlR1qCrLrL#qEw&H-GKAt>2xxgTGG4?Rs^u zo2%z+`tEl8ZoVGqw$~|ii}j*z=Q?xUQoW?xerLX0u9s~~l(!bPt?ybq&x6|*4@7Xw zy=B+)+`ep83;)E<)=Je;-kEkYRl&u6+Kzisr)sPG*$c~W#ql*Xg9{hV$7^dsqU~QA z^m@9Ld+XxGcP}?-h1uVH_LBS^4Pg-{;KHuE+^Ks7x8N0W_~#cq{w&Xn98To(g2?kC zo;kEj!f%y6v|2Ww=jGe;^?*O8-}dpv+ogJeKgaj;1Gl|;u~6a9^T))TD2beyZ#hS- zBNkuam6pd3@7yP~(&?uptuc1>!Twi+j~W;X=thkOi+Y^x z?2ly|YtkpVHoyjHQe$s!GA>%tpp(+t;-RZySJ8`BJA$9oE4N#0Gaj%TQ7BBHKHg-jqJ z0Rg9jr~_Rm(RIODyT>3{)@q9mXHg5oCaYyyon5}sSQQX<*v7btGEA4~$IW&0HKSfH z2E&H$2y(=c5}bgg?XJj}U6yOflvx}gOgH_qe6yiC);i+9s)Fkcb-l zF?57|aB4_=tX~ov{gOeyAIlWHW%JsQAJ|-iIA%;x3#SGdkahD=zd{#g4MWcu6G~{` z<1?;dJR@_A&|L+eP#GDA@fa{|+zT!{cJ|Ma@dJBk zo7#I5^XY%zU>CCNrLXZ2b|M1-I1kwy+`qa*Rse#D+h?Fixbb2qj{B1-`faD!$^WNf z9dAciXI%JE4@I803k(coKkRfnxcVFedEtd?HvneI5_?x{-iRfTV~oA552T0Pki8F9 zHSWXxcG>n)yt=W}coX25IT61=0i~h$SN_VyOBa^UtHTd-xpUG+0K1}sF>t848P`+A zse!7w8E{eyvm(Y^wK&^hB-PAg>G3Z>gWl)xB=3Nv)-7wp=DCqQaz^gRgIJWuced^@=PlCKh z@2>Z$m-8LpF51INhH144%f3P4m8OFg#y`ro=yCBRKLQ~u+OkLXHd&3m;Uv$GtPNM5 zNWHDxwzcJR{Uyz#H^5W4<(#qJTllGS)4A!6+>PSa+{oD|((Lz-k}Zv&sjg5ku!;gi`OvH_58|7o;z1o)6KU8SNQq1hmV%%J3sznmF;Th0c3;hDThO~Dcu8b4%w9Ys zVv))<`vCh9TlvlCf~W0I$=GxJrg3QAb}GH!|_ znT8xxQQDovZvsqeU#A2fId?6N#!yCMg039g5#0!JwkB{Cvsr?hOy` z66s{1$|D&?o|H7nAI84|8Z=wW_t$7>`N9K?eD&LDP)DAH$GGqVdqaDRZKXbZB7e(< zj}+k}-p$;|-3mtDC`SnV{!x4_Y!%Zv8nscn1>XWD`&;wd7G~_njQq_2tD)xP_9*{b z)UF@9qd?DnnY++y_Ef#-XNWscuM|z4hR>?+ICAuHqP<(wI<208*+*6A)HEJcDcM9O zV0MBEOk zHJz^OG?dBsd7kby8tAxmY`w?*9WU=e$dBf5he)Lk*pSG{N*H_sRp9lHvhnE8?I8?uFB^4 zVJ22(UsLw=-43>(jV2_1w12H*spN4Ge=cxJj_(wmMZ0YKZV^CMaSz+UzrE7%(AevC z&3*vO$oI6beH9JuU6rw~QT~D`gxrQA3|arA#~Jrjz2V1?hI2ZNepE+zII<0oT}~s3 z;&WQ7Q}EM9QM>!>10Tr{japF0n^7y(+>Ciq?2=CsVMJaaqTz6giTlRU1A7{!{Q{#B z%IthUfVu+5v4i`=BV#2!FkVCL!*Ov7EA4@c?`T{+pSoKf^p-Pv1ETpFCG%SWMX0nu znZquWoSUHJQ}|linor9#YGYx7l9gR3xo>NcI~pb32}&+;4=9;E)m+qh6qF=|J+fJ3 zN`D~U&4Sq%0FHEa_Wy@PxeU7)=4kg*@yZsc5tg98Me zk%l=qmAip5kGn_6={)ZJk_tLq%<=qX3*{e;@KZP#M)^mqZ95xB>tRGS82COAFj7iRCePR0fJ0tLKqdjF%!RQ62Y>5kk~&{tG(+PzXOsH2lRTa#4QrpK1& zAjH|!aXJ$L6&q+AHRUa(%*saxE~1o28N3R(5PfQ8sl3TE&tjlnc(=7YW)!VCnV#WK z7@iykDfxk2bg3-!?-w1%^O5CycJZ^}6Jxa9bAFR<62?Dj6VB_vNC*#oqTH7i3V@_r zD$n#^f7{x2U;|m-W$TW;Qq9Zrn4`*BQm{H`wQ%XG3UnWq;x;fr>9USmR28VE(ZW&1 zsa2?aHsg+M@`#=8BA>zHf5nr~r2%pPJAii--QoT*gEOVl!`UKF&)8bb*qSm%tto30 z<|Nfl8GG^)+VV1yD?~03`5}?3M5w&or8!B}^xyDg2QNcM-u=?-!O2*(Ad;#tsT43b zJm#9`9Euc5d{)dc4$}<-Wtg~OD260Pm=xNjFnk{u+u4_V81lGDmk{HdbLGOql}FdI zNXdGsuJ(0hD8G!=rAvfLJeAYQvV4O&-Xub(Anwjhd9hg4iAJj_q~!!<`*B zaadnS{|G*WlpOmPa75zJGv_@aaj5z)AffVRY{!lOF_z!Hc{BUwz2Cg|v(x$c3WD+7 zUoTpPjnE%*GCNF|+=g$q2O>ZLj*x@lsYP0>J35v&Ju+g`F_msaR$OvQN;e}rE<0t6 zSox%KiteGn3QEsVP-6PAevF;D0Pmuj{R%vz?HVqO`)Rrl&!sSP<1}EAd%$V0$2iy) zHo1S7`s_}W-Dflai+TUy!_Rkp`SR>AU~(J2*}EW^3f9pA?C8NjWm}ZD!z7cT^q*)5!4je4wbgjENWz4ikB2mA#sZQiM06PG zr3Z|Yv{fN6d@5Kw<%89OFk{3|`-!Mm)XFB=8SJIp?~pL=MJ#5CpdthxX^ZqIGbY&}Pua|>x2|06LG+U(55g!SO{VM{F%_K+?<>I$#W$_t zN>U>?$;xQw7Cj`Z@bp`w!Tj$&|QxRRAtlu}XKAP*G&O@JD%q%}#XPCj8R+K&Vr zm=c-!D4tm_kjj~t$FlUu|9P2lGP?G&_a&B-N;u1bv^aahxIZ`_&FWfxeeCJw58uo2 z{NtBUz%YKFD8KzuSe%JIPquwo<&z$RSHW2Z9E2=v;R<|?uKjyo)+cZHGVRHb zJSL$~FuVXI$6Ykk1VC_%c_r7n8qD+@$lzGx*MtEUQ&`7Zc1@H5P1tgNjDJKw!TxVC zyj|{APUcS0P#4vc`CLE6LnF|0BR2wLU%P?8x&SlOT|Cu>W^SG=og$DIPLZY!tzjuQ za|@s^NqH3w?c9dF7cawQbT3^&!?L)XmyhK+sB3oL*w?ZzKSOXXL~*j5mrpTps0o*4 z9ff{J-i2zD5ro-48ED)bvY0o|HHftEev&{fucXcH2Js%LnFozB(xz=t81in89!vHk zo(J&Xg>v0V4>p6RTT^26Jjr8`j#V%C8=jjIP-9%+ZJv_n=Hx-E$J0@GfWi!@+IkWLHg8zI6%-tJ}+&u#tS`S zNihe6Zb;>wnXe8a?T!Gj*U zE$4zH$nb~-?%9UY*uQG3k+>*bSDOrwWv)u~ZLnl3AS}z4U7}U6_FB_4Y-8iK@w#eO za1}4$i`o)i!pqu{cJ$7F^w6x=n$An$lgsAgWe_!u%XZqXS+YG78Z*}|Dz5xhhI<0N z>bg(*G#amzTsKI4*X5U_gZHG6%rGNlCfouc-Kr+Hk)c%=tCq6Xs)Y%7_XhkeST7c0 z_$`#YQCBR%Wt-0!r_jZxCKWZ`DAW^8~MK3Wx9`ZE3-jXI=d literal 0 HcmV?d00001 diff --git a/mmpretrain/engine/hooks/__pycache__/precise_bn_hook.cpython-310.pyc b/mmpretrain/engine/hooks/__pycache__/precise_bn_hook.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..895af449edeb8db338ca7b932f5fd155b5d74f32 GIT binary patch literal 7698 zcmd5>&2Jn>cJJ=#>G|Mr_$i8#{Lz-}C1;l!%64q;%8Il4wl?yJ5ovESUZtlwRWod= zKStF(l1L1ZV5uA(U@yS}!2%?Ziw`*^w_Ns=LxBAU`nrdOZvlJ@BrCsH-90n>@G(Hf z?oxI2t5>gHy?XEW9)rorvVz}lU;bz3_C-bcBfX6OCh_ufJmCXXQJBJ1S7|2ys!bK| znyY!aX3o=_x|eU}J)>!;$@fCDAnA+EqWrhym78VxE$2>nm1afub$8OMHmkCqcc;AR z=Cte^?u<9vob~3KbKZP&UXB&qGu}dTLH3L8S#PnqDElS1-MF~UANRT6 z=5hWDA44@Y);|k^NBFnY6OJEQZqUWcv=drhz_@EY6?U)3MJ*T4-RcGHt?PEk*@mzk z|6UOE;@P_q7stko2Yuhi6g8e$6G2z-FuZQltFgiTFrZ%PjtKg_Mj$-Al&$M_)ZR+p zCat@E7}=09`+U=m?0bRDB+L9~_wWDZdR&Mkt1vG5eb4Il`eAGc9`%La)@bwTFOPre zxq&C#L^DvC`bZPTKv`G1%APh*ZYmF__HvQ3t!`_B9Mf3tYi&;-Xa~80eyFO-sZ=WG2^67M05fqAnLmWMbwM+wJpEu8>N+nJ{qtCtQTs zhqDXOJCa!{bQ`B@cIh-ziLZ%nctui>{ONAYWirICqCm8_>WLLRtX+|dXB1IDIv{rS z;q2^e&1INoN{_KkE+Bqi=SLGSu$N`T-hY+w>UOs0|r^N!yUhE z1-(CE0KfnKi<%MVrSS0t!$yR&*^fAj%cB#G4Z0^ji}k0r6UDmMcdc4p%wf8iM-!Ka z+J~ton~qDvv2PRsPpSORC#xa|qE*l9!J6rojd!`stdUf&foHgYrdZWgT`Q{7YDFz-WjxcWu8Mc?>08BwczJopT)1G7QLGciGabMjl;9Fr|WjWl_52F(9S1u}VPq6&gIe1!Q zU5QEw#Xl>oFfh;-e+L>?`dY*H@-c2j&=w^(T{OXMLUOC5+$xf~AlEIi$+5RGtB$=* z3_$ZRzgJ;XgNilz`0+rO^8H;@9T<>#GMYM=9;gH3kbK2QdsXabW>96*hZ^Ii$I_jc(jpzdYcjaxQ}4U9B6HdI>*)1eo?hBy_~}*vC$j0M44*o#Yi@EAhMX+Q zMmIk(Lvwk|_g^vBb~ea?$kh{qxx-OE=P0!XiM-klKMt(im_0 zHbfpOC5cDv0I@9W1wJENOVl;g6|s_{j;Xt zKEkW@B}KhkLIk39T;N9F^CV=nq*j(|EvPHn3fNFvI2y!kid_g%ryNb2f=c!yXhLvg z9ZzVY>B8|ntn8_9kBZPD{2OWent;3fjrLeuPl5ulS24SJaoGu-RCi(u9KO+Ft$sOH zJsCfe=536}<*P0t72T{)zruS+4%<*Pczx~rqNaVXsm80_w-fuPAR;e${i+)PtAwAT zlg$>igWY*p3S2=a==%ga2!e38rimJ@m3R3Q?lyL(h9oPd==T9Z{}xib{0xW6+DAP(wQtRccnKp*T9ygIL9D2hR}&R5Asu-fgVj z$vm7iD?fcE#c()3Hh_*{B99zNcoF__Y(s*l+O+r)eORJqnHqT^bYc6C;_}l#xJ*jglz9rR*(~9*x%LjOMRP{_C(B2o zkPS^{M@1G9Hirg+lMCZgrr}V0M3NLCY1i7c1vCZOW7SE{R9*q;1*VQ&@GF=W{t8X; ztf5wO_)j%71FoQ=6Cl*odGrlE8PV`g4#Rj^EmYO2Rs?{gXIiVMRkRE0qKXk!`}=aP zjQ6ry2E5c?iZv|I#&hIX!rRaBggrD|p-jK2F}0av^GxSD%i}Rv0r_!}m7XK-XJu~4 z5uHu23Nq%x*vKTSVx(9qlnU$&@@pOWb&1XJGM_-UOni!Lmd$}rg%yBq7UIeD9$#9kS=1HbRtk0gBX;1Q9T!FmXl*Ug)K z1ca`PG@0)q4lKolQx!YXgUVEKMRE%RIV&|2 z(*_NIfZ4WvNET%R05k~RaRDDBwcU>TfCMR!lW;^bOJjQC%~L@4O9xU$TTUo-vLfdX zK_F$H*kX#;5CuWoah@RtCff)>KT?WZ)_T7aMwM%-ouCP=NvmaUuXoa1TN%@0x%DP7M7KeL70am(=^ zk?v(NfVfF4+MD6(`|n-;uzvaD4?q6s)0Sk0jNfED zvK<$|Opa`1dQ1RAYD}3rS4^BH_8e{neV0+#;m>%RQcLu~EXiwIts7gs{iqiJVUX#z zTGB9Ety`a8OWs$^E=R?MbZCY=0!za0P9U5J349jr$Kizjp89so;Gq z=lj0|&e#SQs5NA3tR`b{Z;8oBw$8qy1C8-u6@Yd#z#i?BN=Nqsf zplQ-b--oJU)=Tos(kFx(VJT8OgesHtCR@a8oh)q~<9!=Q=@|17vn6u&OK=2Z3B-E| zz!dMo0}4vZWypw&*>=Slj5XR6Z?hluDPrEh6H^)Jyl(U#rNm z6$|w(wD7F(c)3P`+&);!*e^-DSsFX$CDZEz#l!G69ZK<`5BQPGbpX2uSWAir_^F57 z?Iht%4$Js53Nl3tk=cnX>t9JoLUfC&frzlXaKz=-GcuIwWQFiZsg5>DQ{_f)q7$Jo zOvMBw_}cGs1gEGphXk7XD7tOuw^7F1mOu+%bbL{$7?;tpJd|MU&f$JjC2q{T>bqwj zB)H2QivV=uF6>{jK5ZLZqi%)9#kR0U!w>_ z^okU(z0zJ)bH$gtZyr_riMeqpL97mYmYhEj8-PSy2>KCfEn%B-i0nuG>{vt-FE!0x zLk9N$Q`2-{!_8&>d_&f(8Gn|A;tXX-N{Xx$pQUo8vpt%1OAww=E@ywO_=-90=;4u? z%1z*1vm*j;yc#s}z9bla-x2g8=naZGL&q!v2&st?ML?Brm??5 z^BX*&iKeLN+T!`y!fZvG#*SWSluzfhmxivr(DdpHNu7$`s9h4bFh^WRgVOg-4=77$ zG(zhNvUWjcFFv70=E%k5F9p6IYj~?SkB=5!)Mr|-9uN}iypy<$g@>k(VjbgAi3^A4K0;0IP$*eo_amzEG4 z_hcP1&S9PmLh)3>g^Sc!eT!zFbiM7XWBKzyJUM literal 0 HcmV?d00001 diff --git a/mmpretrain/engine/hooks/__pycache__/retriever_hooks.cpython-310.pyc b/mmpretrain/engine/hooks/__pycache__/retriever_hooks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5dbb2252809e904845a2fabac7774895d515dd74 GIT binary patch literal 1303 zcmZWoQEwDC5VpO0yUUUUA_}TdEBUb}q0JAVst`PYRFt3`>chVH%-Xpu-L((f7r5wn z3O@n~AtjIfBmEJ6<*EG%NCn2e%jHll*`67X$MelM*4x`9g7N;pzmIlPggymja|}Rv z0MkB)MIeDYREM2(xF>+sn*kw z#_7M$k0m_@i8@WEOIPZ^HVyu6K9#gp)Ldp{tFojpMy+aO^pmOq48~G@6!o<$ZH~E` zNK3`cv}!_~;jIGU-uK~SxJy^wG};Jo>gE+4>F7CLXTnZoW5-lUDQrmhg@nW=Qc1&| z6rcuhG2RL9TEShb73~0N6^%ZF+-64YHw21yIYK1^`;TDSLs%C0C3;I7UXU|v$_05z zKuZ?HrDw$aq1lk`Ce8HIFy=2UnEPOvG0mxg7 zy_)fEb&@kCbikxYr_s*-dbnoCN!$Qu~em5r5_*ONHCCKLYBVxbqVk{44Msz6?ltm#hE2{ F@Hf?bcf|kz literal 0 HcmV?d00001 diff --git a/mmpretrain/engine/hooks/__pycache__/simsiam_hook.cpython-310.pyc b/mmpretrain/engine/hooks/__pycache__/simsiam_hook.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9edb83148a67c420070ceddd061f2ab75de528b6 GIT binary patch literal 1823 zcmZ`(OK%%D5GJ|Y)mwh0aSX)49=mNJUxOeh+}1z?6p#<{NkB`iMao)rAM)%fi3}Ae z5dVlC+hhJne*~{R^)D1~+ZjrZWw#-~;SA@=k3$xnPLtvI<>vSCB4F$f5;ljA!ZFVB z1c_#vr!3<`o{6F0B#YF`b{d86(MINWNJ}{SalaEZqt$bA!KjP}YIz9bvFe2N{(LmuCXZZsX!-ib5q0oHj zHH6;Qo{?G_A2S9<9xxr~+PJRkx^e9fYmJ)T(M?m=Ez~wl^MI`_0^Qafv}oyF-PK#y za@f{2taQ(|21zzZ;*4sFk2zz+gyRCjwW8N-hB(e9Nf}N^hwFwqtcq})ybV(mL!RUp zA;gsli*Yyuqmxlpz&gNJa8bT+p(q@>D6&u~grA-lMH;;b-%Lz3F>o6Z`4F|6p+YPx zFRI>$FyuW>i?~8}vVR??bC(_C8;$OFrm>#R%SxRul$jNy$tvp?VX?lU`}s>#QSS3a zxH87PNo6*o(HTz6IG(3fNd=$5+^lk)+1AG4tVqFbJ;-eXFx4FL{z&2#-wxsh&Tnv* zkC80-k}0+nNcqx3Dl{9g_q-o@P(wY`iBP*l(#q1L;|JUC8FWzl*YhJNis~rK5b+A) zB;RCBev#znh_1FgDwC|lW>6&cW(!+aD#?>dDXOQ$*TVuC@Fu@{w9z)bTO->zkE_u{ zCHk#x>bQ!Plkc69UK^d%o$_mBmTbzv*L*-6hr=bGim5l1C+z#3pQIK``I^DgC2|L} zvZ;@}_mW+9$cJVmub#RVe9$a-d+yTuHlQ0x!2uQp4l)Oo?a=jS>J7xRnE~5yVseFS zdlmx%aRJ4AR<0aTna*|9Fn5J#L4G%m*N!qu_vig77&29ent0ECs^1zeEn<+eaIT zyg>Ns+C%toB7BD*fzLI652#oNK|k_WEw)EHkBEG7tHY)wcz`+u_K8sMZ6L9G?>sK_ z=V(z9Ob=jq_E8v|Tg1W`r7+ZhZ;4HGozGkDwP#_o)X_y>T({ZYfVilU5s#B z(uZSx_i+GW3R_65m*f=?tX9&`1y4wGK;-Tuj-!f!9s|9-@-rlLCWYwA8^OgtLWsWu zPuvKvc_Ui5hjMc*cRS9nA2n=HEoS&%uJXIA-7H%@#-4kOK(}l6?p|#V%tcaGumE~Q jc55XXSfAHL{=ZUf{pAXgFY$Y4B|)~$+oCN3?=k-uF;mHY literal 0 HcmV?d00001 diff --git a/mmpretrain/engine/hooks/__pycache__/swav_hook.cpython-310.pyc b/mmpretrain/engine/hooks/__pycache__/swav_hook.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c5d864e292f6e439557ddb2d06cddd761be9b9a GIT binary patch literal 4240 zcma)9TW=f372Z3SD~h5hS(fCa-6BOI77lF(2w)gSYB#RqwvJrDZXcK$t2JjxEwvYV zb}2`M3>c7K`_hLZKv6(?On*p!#Ju*&f1pXz*y(p>Nl~=jqD%1XnVB~>tN zn{hSqI-Z)d;)SHvsU?e@#iZV;t2sMfN*bMp!ku_IX?B_luf!|KYG+m9ZhS6T>#XS- zUp+p5qJ5^ZD)XLe%;SwC^GNTkGyT5ST6lrmY1=JJTDPKZA+5VnUP$-eu!yoWjHP>@ zKOXV4%Uini`n(7j-;XddrAs3brhBqZ*pp1eEXboVm-Q$Q7}$vD(Wu}IJFPoewuiU- z`R8}DexD2A_MLn8esdqlB5lt^6eJnrahL9M_F2Z@2LAcq0JzrCnbt9w-Z7cMOlI{B zX49KnM_SKe4!2o_18!){#k)cn@akmFo4gk$?;74N))!g5*I-Mm@!alI*#)-Dn$N9{ zSFN*m*a~>Af^LCZiWkv3>>OJIZ4I>N**a(!fo-r&V0Bi9X5WSGp4|LBNgi%knnsA< z%Y;v}ZO`-ZJlKtLf0q^>jiQ+4ezD7a=zGNdC{=`h*zIP5MQPtJGJj`>53}xWph$we z2t|?a?6fDmrdyvxaqK_heje^~MvQiL#3)U;_u-L_wW5$KQ8|LNB@(JHZ9K!I4AU8!nzoxd5SdYR2mn2{Cx1zLYT~plDtgmK= zVEEZ@f2Y*Z&644$fPkeKwEs;Z6`zZKHgQXE?1?M^Tj)b_G ztnJ_8y>Jv4itNhAvlF`_8|DE73c@^(`l-Ja!-2oR%l9lt|J@dh>%gRD&R;sTxK z$UkSN%{aWy{K{+zRv+^;h{FRe@}S%6`&$fYYfci%W8?oaujUw%PlibbjSl- zXpbL!h}$CKhG8Up_>-da(=g$Fm?D1QEfXFQB!eA))tgLQoU=NKSDxatEd8MHQx4zg zi;zXI|GfF>(pwXKzF0zH6FR^P1)c$u-YkY>^UZjWjn_R^x>FaDwX>F%?o{P+`3>z! zM{qb-y4`hg==34n!2eVH^BVw%dZ8U@tJb0jw|` z&}BqlWg2p!vHhUs3F0f71W+$Eu}a_^fi(i>39JLiMS+?V37!X}JZUR&TCTKm9{2ta z248F9UGVwt<4YpTic3i{gp?wT(z7VsJxci{O1}A}{7JYU5ZWFdNH++gG%A8%3rupl zZ_RV`vp-KCFHduOdrI9Ul}#VQ9sHFF{|tl_C{_yn*f`V&CesV6u#dDO{XCk1^V$h1 z)k)pcb+eqo7z!OYC%X8gaF44c&h!mZt%s3UEP!9_NFOW?>Va1*9XC$2<7EcJ44l7c ze>F~Y9kiAoy1erP_9%J>B|Y(;BawI>Kz1td`Lmu`i{^!XgaRxQi~}^r2v&vGR~V0fB9_WBo+Cr9Hg) z2ji*r)P`k^LwipXTUay2mI^DgurO~Q>Bo+uT>!1~w1QA{_BHW7XexpE_`>7qZd=@@ z-T7@sKK8$N{}ME```Tl)RPYXKeEqiI9A$ix*Gdune8{^|FX|#UA|cMVD3k@u%-g>G zV7pZ*L)r#qlEV(zAT}YbGNzyilMPC>EEt9YZ(`VTm3c&)=q?kuLUgveM5i{%V(M&Z z(PpU+U$^1%#2PB@1ESpqkDTn=v>S-#HJH%Rjc3NIhB+Ns&#ZrrH(xjCjKzNs@fdu` zfK1=B#CtFhGT^}2vw=hT%ou6ny}}yU1+wIzazv}hssneEc2x`S2tEHFDVrj%4$Qfr zMsGVeUSbAJV~^L%z$JaF=*d5z7O3EDRZ6zeSUY8}e!JCBt}ks>N6O?&VJzMUkoK61 zESDx41C^4cH*4j~^h}9tW-rmA;m>Gm72QwSi;53vlCtiZIBL$>pe$mb$SDpzvqlkw zr~<^#{l4k0>f;M-Xnc6Z8aS7Hn^;OoUoSxk$P1OdV=T*7-gc2SFOB zJLhFtQ;N2z-w{8hjc?P|a@d2H2Bo*D&5uEv*8rME-Efd@)(!X78UK7^L(32wAcyU@ zTT4{OC^&*Zdch<|0x z0>2dYb$WfU8-Cv581iKbZPk!!TYsrt#9VjPD)W__yO2-#HcIUyf^h zYgD~&yjA0z^^&MOP#LE8(p#=K71!2=tYmpx)d`EB@NtB6ii06OAj-N$>Fc^{)bv%oMn7PVxu*XI D literal 0 HcmV?d00001 diff --git a/mmpretrain/engine/hooks/__pycache__/switch_recipe_hook.cpython-310.pyc b/mmpretrain/engine/hooks/__pycache__/switch_recipe_hook.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..321a7a8ccfdde1d959967b6a637aeb42bc9b9a4c GIT binary patch literal 5956 zcma)AZExGw73QTVik9Vq=K)0k8|at*k^KmM8n93O3&pnXIhUj?Sxy=Oyt*&vzMONObM8Y% zb8}S#zkmGqpWAmY8pgk=G5#x~aT8Dc1qxvZ78oIGGDdYXFvC)_r0b=?3d_y1u3Lc} zR+<%EF9%LI)11+DJE(@U%~@Tq1hsIkITy}1=fj2O0yE^?;o_0;8$&o^=D-j$vhuw2 zoHfsg>XuQT{SLbub(YTFQ$i{!Zu{*7O-D%CYe&63)GMDvVK0g$s@A7b^ceq(e(Z*k zkb(P5dA*)g=v&(Kx*`gH?IrEbt^Q6ZyGgy2mOs6B@Aq5j{Ac%W-?{7lVe{7g`*$|C zQd_sT+O+%luY|@;Jn`o!q|s!;XqtjGOTuhgq9m*XqgfW`L`66UR?`+UqKcl1cth00 z+yQGkaz@OHg#!Z`R#9IRXLNlQ^(Apu*K5Lt=F92g)-xYx-IQ&=CrL4Ht%99(Cs(?c zi@x%^J3Q$~o+!`nQX`0>-YWOI?VvC8APw*y776^WT;-3*0#%Nmmzg2vuneZfF>m*k zf?qIH|AkJ5QzB<9;GT9&PJWj6_I+$DODj2>wWN zj|+Him3A5}M@!PlX`~SCcuys7wY+xXM_pI;qIRd%(&vk#P-2&f*{zn=+AWl8wN^1X z>noJYr?*<9AR3S59#ooG=ELb7SA4)@jpx=L*)})3m+)sDzun>bfQ_lePM#_=*eZ{D zq?Q+8>#QJjM$-Ec@m2JyIv8Qx`zYLD=A1Qt~y#;95&Z z9KV%eEt_#d!RukzQ&Owh&|w=l`iURJjmg8_Xz}ex@i0=rj7&U|II$9Jlj$M+*n0$| z>JTdfx#Q{!i_L>gWv+jK1g}p!?hvtKb z_hTf1jvw<5as}c)-;HnkfrOz(yEPgZ5pr#f_x6%b)IA;{-?sa45``|U5%YEar_r?B z^Ez5rrg~{K+3U&m4^C#S4^~eVE)vE1^bSA1Qz0>)Ce}Z`diqX<@MKV~7dgZAwiBt` zkFUyCPoKo%`egHU7a=gLQ}H?z>MuVK#q8-$D72k0&9xsf&1HW3E9_ z8(!8~TBo82SzWy(2NmMQ|D+w)b@aY;pO;wV;nalsL{B!J+8g&aKf86eZQ|}Oez;K$ zqB@`8iBlB5v2PC9ZR6p?{gPnA(o-Xz{~ajLbM_@mj3KBA`b*TW-bpYnN*8dM)Bklt zSi1ijTGj<)-%8A(m6V>F>Z8OGj&BUDZ`j}2zBPoLupnR7@^{fHL%t08vX=ieDIeOy z@)6rF4FPci?2UAOGR)HXNe8DE zp?`3euD=so4&LKPUP|_)mq_u_9lkZ{HAOs3St!z~AN$=n@w#o9S`-$FT&jrNrsYR{ zKM-lD7xff5J+gn9ZSYnQCQ4jRc12l2dCt) zMYO0rum7o5rM5}`)(_RvG8xEFx3Xkt5x+k6=USx!m|vNaN!B&U>6 zuxS^WXJu3W=PWDl%n7Q7C#caXf9eH3NEH|i2dhYAOpIx?c#)raDr(DkjA(qvV6?#n zzLIN{Q$j4S$sqrp)i>(pv;tp~#u`%UJm#cjEs$1S5xJSRbxU2K5j#6$2Cqx#ImVcJ ziv((!Ib41EKcXjILSZ{gfQGFhV*gtzLli3-2{%vy28=@nuj7s8 zJW}2fa4;;5?I7COkt((P?sjx6bt2o>irKVWj_zZB;+6LcOhC><8W?3! zjc^Ts5QmM=p>WJac8(pFZ!+tDgG*D6Jr1NPU{ku+z~-nBu=h>e*0{x0b!hGqu+iEr zxt3rsu{pSy+m_6nnW=b1YLHdR`n5#aSS+G5F1arvrJVs3YXIv@;|eC6 z)bwZQ`!gO5TN_#4W;Js##hV+2DJLBopuUhL}wr_rAkj6vy z8~{f6nl8AC4%hmW&OzkiD~o<7B~DOKRzLB& z^fK(VDM{-fX%+IV;+sS++@P%7@nSDYl==wgRv%OGa};R}H%8)z@{UrGf(NNn%cQ1= zzh&t{W~$MLhRmmwn$@pRe1#`oMuCf&E@!hw*R#23;zv2@<}`Hk1t;UDaXpCIUJ&1C zoY-%pZl%>smOk&KRnjT*R;Iv=3reHBM4w*ck0QTWE9Q`2vqS$9{aMI;h}r=j4aAz| zEYw)F^a>zOo^=Wn1VSBh4^TM{xr9(KOTmT^M6T3Gd4s~wEfZ(ZeI)Zf)pl(?{=N=R zJC~>kO>n_3&S!Sfj;0iYj{)>z-AHGtMc@3;O0ACc#NfkIurVezf`<-mM7g7Mv{H<# z87hd6sb5e*^i0tesji_YB3$>L7U2yDy}%O_XS7RIL^#1>nbnZfPPw9sNY_czRPSKc zMxCh@{MYA*BzC<}x-OVQt|96**L~9Wf_$XnA{VyN?evt7uNKLkdK11RdoL<8fDz|V*H6iZCGp_#Ixi#R>* z)ZWpFw|V}UXHR@4(u_YnJ2tB14n9AsJw?nQbEbYFSjYzOx}PCMpZhe$Q|Ednaf8^N SCR;UluZF;;@*K0lz5fs2HXxq> literal 0 HcmV?d00001 diff --git a/mmpretrain/engine/hooks/__pycache__/visualization_hook.cpython-310.pyc b/mmpretrain/engine/hooks/__pycache__/visualization_hook.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7259b68ccca1256f9949784790fc160244a49cd GIT binary patch literal 4653 zcmds5Nsk-H74DtQ<}#W^qs0c&0TN?qD3pL8fHM&cFUr_~gApLf4#2?Z>0uSg9`!<1 zH#Hui=QKLx5a%2q3uc1&;6IWdQP-Sw%Q+rL`Cc{2A;*pZ2hJr;^kXfrURAy2dymXo zEl59d-qTo} zdCxWGaqq-D(R){!{!r^QUf@L9ZpV=B{c#axX&6iQA%8OAX`gp=SwG04G#H1)2p#MF zEPD*r^`DQk{^*@B=j@RPqx6?qHkQtVNt$v2Ht(Y-pM-JrO!e&d@8AF6Ar0RPi|}EX zjAP!XO&6aAI&b04{|v&lp3bzM!StS4H|q3S>^if##T;(48h3c@hBk1R%j(bVo?G|W z4c5SHlh;{`d%ST&V{PtK!&TSD#zG_BU{BvTNV~H&k|0 zuFftPlEz3W;ismiy&H#l9u1qu0QRfNwW3^}4@AEuYl7=E2kD$TWkJ5sR zLD=WhOTR6K`Sw(h@AFhG-rCQyxU=p5YQ%AHK^K_L6eB2Qq?CS-4?@x&&F+M84vC~@ zdekN`fxm?oWAy~k#lnryJ9`@ho%C<9G&u*&@ zaN%DW>Ag$dw40?c=mATacs+g}nay#B&_O(dn~kJi9qlx>}_mZMHz@V3zg$u;`D1 zh(UDivNdMD;R?c)c>Ug?xx zS-Y-5Mx$(;I?Q4=bI#1u+L?Ch;;pmVnT|Pg&evpa*?g*r2ZeXqD4VC~pXuOfrB=}@ zJ)EkA9oolj&=qD(bP*RcJXom5>F0YO&@Y;Z+Ci zzv{o!b?r%qZrf>xA(T2(Irljn{|Mkr&d{B?HSl-_QUj4CfL_uya36MbDgqU{MVcFviID7a>lg^Y{kMTo@l z1PHJ-t2chVo$ViN@6pPtPT2bxg6>b*l&mCUJYE(<-ZZ|>-I+QM<$RLV&$C9?fUD;4N zsB9r#g$!w>VZv2hcGgw!N;BrEv=UTP((3alR?#eN5vD^fYhx~wNx`MrA5Wy6P7(y9 z6XwOy7-LyJmhL`sDG8M(a7CI(3<4mhDZ~&KwN`24)-1RcAa+*722I@{a+Am_MAQ+z z$_sMTYES~z0J*FKr3BClF+Va}H1z6Yom;m1 zh4qhT-eH*x<_;Q#R_X_OVVoGkL(4q2Qxo475Jx)z z>HrU{(q{Un7AkCtP%|8Us|aNiL4_SWb6WF#V}w>`_qVF3sLJcTX^}FWn?~w|KopHt zQeR4{{}-|TxvqS67x~bO@_91#RFx1ACJC~pv^o7ckyeBO)>W+alxLn$+G0F_bi|{fCq!$E8ZFF$ZIYWt7y>?xl*HCsaOFAm1)%7zD|+Sy<2g- WNp{Ui!LHuauON{rs=gg_Q~x(sNg#dz literal 0 HcmV?d00001 diff --git a/mmpretrain/engine/hooks/__pycache__/warmup_param_hook.cpython-310.pyc b/mmpretrain/engine/hooks/__pycache__/warmup_param_hook.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..905513ef848010e6a89fcdd2e137b6015ad95a07 GIT binary patch literal 2651 zcmaJ@TW=Fb6yBY^*^cd82#6G_E|f}GMb1kfDnzTIss*WSKnhhPtJ1RZOyY&TxU=Ji zSPrThenh3J5_#l*RQ(b2+9&=3h0^cLI*yZ8EbREq<(xg|JKs5zqSb0xcz%8J=T4(; zS%1-F{_!yR0&n>P25xaXwR$#wS;{h}=VWfr&Agsx)1H(1S-m4qgOB>lj zZ^5=i{dx1qx?^#l2hS`Xh~UsUw0kXXZ(5z&OMJoF=s4QBo$u?~y`f5qJWjRueGZ{x zYxizZJjSng_ujp4HX*cjg^CK9L|MT_+9&<_2k}(CD+t2)-DBMDIh+ZXJ9u5;agX~u zH6HNVGq>k+PXs%R*Lef88b8MucoR|`Qj51CHR?WJ<%`f;5(}a!TB0o$cbqkA%`%pJ zzRXvidA%j>Bc}6uaVwVDXm~%CaYpXPf7q7wV33p{{>R|}OW~*#JlrW{*dN6CZj$eY zY9PWPsR$)x85WpJEXH|Q0F^8m$AnGU2=QQ9g}HzURfK!yYhjX?2^Yo;pc$p2+c0O% zhs`(puqB=-k@IjbQG+lmqzJiCagxH))Qp5?nC!rMUzBAm_orrV%iZ#(F-Q&=N246h zhSy6aJ2%4z^X`XcXXa%|Zf=V(P7#{m-I zVO*g>4$<>)AY%UTM)liGi?YN-sQ$_Ksu&|NEc$~oyq@H0>iIPI=?3LRQNCvRs7&a_ zN!)s2&UxKBWm)@DsFBP!`tBSzNp|pliMPCrVPa3L$eK79ZN;8D6Lv%^F1I$V7xqTS zlf-7ZfI+ijs2j0T67&a%x;6cJHSM_CEk(LhDNuDAtrwPDf&A*Tby*Z@J(el*FlnhfL|H~rk|!#P)?lpkG5Ef3FR}5(t3DxDSKi+Vu$vTeZUlApIDFFL$-z3T}yfUS8eOSv8{2SyovK0n$Yh!`-{!l zf%DYav-hlyGhVqPlp*{{o?gA*4ezHSE(Iz}^dDE%RsuBCSlQj^_;Lv@$N+=(44HHQ z5D8j?o^IcUm~4-f_)5w`c3inkXMH)!a{%vGz-wC=Y^&iMov6c1WIVZsY1ziW*pl5g z|7t(8x8t?5c+E8WM2zEy`tJ;|Z!sCKoB_-MV4%$-cft+<(iVF26t<55n>&TgJq4Tl z)IEl6^FOf3^Z3juaO4H7=m5ygpph4`1XdCrlDMF~pW<{ROkfMlj|ibKSwS?y2)36Kf6L$zbB5;nee%m&;Jpmdyu#yd0~)H50*5g!C#l4?%} zbcf&o=CgRty5Fo+Bcu8WML>Rl_?OQna{i#MZ^x=Xh!XxpH+Zb#$jndm0-_%klBtPX z5Ks$997a2;LL?THJ?n|Q0%CF_u_S_eL2_t882BZjLMiAN>eI8JF*z^5Y@ zF?ywmAxzLax-W4ZA=!a;X{zg4{3If&)D04=CMZK3(oUA-rj^JKFzp0$SY5?}{E!aS z`Xgx!^cIboXeeZ5MF+XqBb3h~yF3q=(ft&b$|eTiZ`wWx;=cvZSKbA*(L?@7YeTj$ zc9yAG(d83Gx)IUe2)NE5H>2psQJhY9f+*reAIlEzF74nNkgL#^lwHFP6Ro^LQ(~Q@ z7N%XE^p*D1XqXD^rU`UErp*)18;N>>tYM(OT@vqWmdjX^G4>C0*c;}u*UW4D-Ck(a z8Z}mPU(@Omt-9^kMzhn_wE`f`_. + + Args: + start_iters (int): The number of warmup iterations to set + ``loss_lambda=0``. Defaults to 1000. + """ + + def __init__(self, start_iters: int = 1000) -> None: + self.start_iters = start_iters + + def before_train(self, runner) -> None: + """Obtain ``loss_lambda`` from algorithm.""" + assert hasattr(get_ori_model(runner.model), 'loss_lambda'), \ + "The runner must have attribute \"loss_lambda\" in DenseCL." + self.loss_lambda = get_ori_model(runner.model).loss_lambda + + def before_train_iter(self, + runner, + batch_idx: int, + data_batch: Optional[Sequence[dict]] = None) -> None: + """Adjust ``loss_lambda`` every train iter.""" + assert hasattr(get_ori_model(runner.model), 'loss_lambda'), \ + "The runner must have attribute \"loss_lambda\" in DenseCL." + cur_iter = runner.iter + if cur_iter >= self.start_iters: + get_ori_model(runner.model).loss_lambda = self.loss_lambda + else: + get_ori_model(runner.model).loss_lambda = 0. diff --git a/mmpretrain/engine/hooks/ema_hook.py b/mmpretrain/engine/hooks/ema_hook.py new file mode 100644 index 0000000..284d211 --- /dev/null +++ b/mmpretrain/engine/hooks/ema_hook.py @@ -0,0 +1,216 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import itertools +import warnings +from typing import Dict, Optional + +from mmengine.hooks import EMAHook as BaseEMAHook +from mmengine.logging import MMLogger +from mmengine.runner import Runner + +from mmpretrain.registry import HOOKS + + +@HOOKS.register_module() +class EMAHook(BaseEMAHook): + """A Hook to apply Exponential Moving Average (EMA) on the model during + training. + + Comparing with :class:`mmengine.hooks.EMAHook`, this hook accepts + ``evaluate_on_ema`` and ``evaluate_on_origin`` arguments. By default, the + ``evaluate_on_ema`` is enabled, and if you want to do validation and + testing on both original and EMA models, please set both arguments + ``True``. + + Note: + - EMAHook takes priority over CheckpointHook. + - The original model parameters are actually saved in ema field after + train. + - ``begin_iter`` and ``begin_epoch`` cannot be set at the same time. + + Args: + ema_type (str): The type of EMA strategy to use. You can find the + supported strategies in :mod:`mmengine.model.averaged_model`. + Defaults to 'ExponentialMovingAverage'. + strict_load (bool): Whether to strictly enforce that the keys of + ``state_dict`` in checkpoint match the keys returned by + ``self.module.state_dict``. Defaults to False. + Changed in v0.3.0. + begin_iter (int): The number of iteration to enable ``EMAHook``. + Defaults to 0. + begin_epoch (int): The number of epoch to enable ``EMAHook``. + Defaults to 0. + evaluate_on_ema (bool): Whether to evaluate (validate and test) + on EMA model during val-loop and test-loop. Defaults to True. + evaluate_on_origin (bool): Whether to evaluate (validate and test) + on the original model during val-loop and test-loop. + Defaults to False. + **kwargs: Keyword arguments passed to subclasses of + :obj:`BaseAveragedModel` + """ + + priority = 'NORMAL' + + def __init__(self, + ema_type: str = 'ExponentialMovingAverage', + strict_load: bool = False, + begin_iter: int = 0, + begin_epoch: int = 0, + evaluate_on_ema: bool = True, + evaluate_on_origin: bool = False, + **kwargs): + super().__init__( + ema_type=ema_type, + strict_load=strict_load, + begin_iter=begin_iter, + begin_epoch=begin_epoch, + **kwargs) + + if not evaluate_on_ema and not evaluate_on_origin: + warnings.warn( + 'Automatically set `evaluate_on_origin=True` since the ' + '`evaluate_on_ema` is disabled. If you want to disable ' + 'all validation, please modify the `val_interval` of ' + 'the `train_cfg`.', UserWarning) + evaluate_on_origin = True + + self.evaluate_on_ema = evaluate_on_ema + self.evaluate_on_origin = evaluate_on_origin + self.load_ema_from_ckpt = False + + def before_train(self, runner) -> None: + super().before_train(runner) + if not runner._resume and self.load_ema_from_ckpt: + # If loaded EMA state dict but not want to resume training + # overwrite the EMA state dict with the source model. + MMLogger.get_current_instance().info( + 'Load from a checkpoint with EMA parameters but not ' + 'resume training. Initialize the model parameters with ' + 'EMA parameters') + for p_ema, p_src in zip(self._ema_params, self._src_params): + p_src.data.copy_(p_ema.data) + + def before_val_epoch(self, runner) -> None: + """We load parameter values from ema model to source model before + validation. + + Args: + runner (Runner): The runner of the training process. + """ + if self.evaluate_on_ema: + # Swap when evaluate on ema + self._swap_ema_parameters() + + def after_val_epoch(self, + runner, + metrics: Optional[Dict[str, float]] = None) -> None: + """We recover source model's parameter from ema model after validation. + + Args: + runner (Runner): The runner of the validation process. + metrics (Dict[str, float], optional): Evaluation results of all + metrics on validation dataset. The keys are the names of the + metrics, and the values are corresponding results. + """ + if self.evaluate_on_ema: + # Swap when evaluate on ema + self._swap_ema_parameters() + + if self.evaluate_on_ema and self.evaluate_on_origin: + # Re-evaluate if evaluate on both ema and origin. + val_loop = runner.val_loop + + runner.model.eval() + for idx, data_batch in enumerate(val_loop.dataloader): + val_loop.run_iter(idx, data_batch) + + # compute metrics + origin_metrics = val_loop.evaluator.evaluate( + len(val_loop.dataloader.dataset)) + + for k, v in origin_metrics.items(): + runner.message_hub.update_scalar(f'val/{k}_origin', v) + + def before_test_epoch(self, runner) -> None: + """We load parameter values from ema model to source model before test. + + Args: + runner (Runner): The runner of the training process. + """ + if self.evaluate_on_ema: + # Swap when evaluate on ema + self._swap_ema_parameters() + MMLogger.get_current_instance().info('Start testing on EMA model.') + else: + MMLogger.get_current_instance().info( + 'Start testing on the original model.') + + def after_test_epoch(self, + runner: Runner, + metrics: Optional[Dict[str, float]] = None) -> None: + """We recover source model's parameter from ema model after test. + + Args: + runner (Runner): The runner of the testing process. + metrics (Dict[str, float], optional): Evaluation results of all + metrics on test dataset. The keys are the names of the + metrics, and the values are corresponding results. + """ + if self.evaluate_on_ema: + # Swap when evaluate on ema + self._swap_ema_parameters() + + if self.evaluate_on_ema and self.evaluate_on_origin: + # Re-evaluate if evaluate on both ema and origin. + MMLogger.get_current_instance().info( + 'Start testing on the original model.') + test_loop = runner.test_loop + + runner.model.eval() + for idx, data_batch in enumerate(test_loop.dataloader): + test_loop.run_iter(idx, data_batch) + + # compute metrics + origin_metrics = test_loop.evaluator.evaluate( + len(test_loop.dataloader.dataset)) + + for k, v in origin_metrics.items(): + runner.message_hub.update_scalar(f'test/{k}_origin', v) + + def after_load_checkpoint(self, runner, checkpoint: dict) -> None: + """Resume ema parameters from checkpoint. + + Args: + runner (Runner): The runner of the testing process. + """ + from mmengine.runner.checkpoint import load_state_dict + if 'ema_state_dict' in checkpoint: + # The original model parameters are actually saved in ema + # field swap the weights back to resume ema state. + self._swap_ema_state_dict(checkpoint) + self.ema_model.load_state_dict( + checkpoint['ema_state_dict'], strict=self.strict_load) + self.load_ema_from_ckpt = True + + # Support load checkpoint without ema state dict. + else: + load_state_dict( + self.ema_model.module, + copy.deepcopy(checkpoint['state_dict']), + strict=self.strict_load) + + @property + def _src_params(self): + if self.ema_model.update_buffers: + return itertools.chain(self.src_model.parameters(), + self.src_model.buffers()) + else: + return self.src_model.parameters() + + @property + def _ema_params(self): + if self.ema_model.update_buffers: + return itertools.chain(self.ema_model.module.parameters(), + self.ema_model.module.buffers()) + else: + return self.ema_model.module.parameters() diff --git a/mmpretrain/engine/hooks/margin_head_hooks.py b/mmpretrain/engine/hooks/margin_head_hooks.py new file mode 100644 index 0000000..fbeae7a --- /dev/null +++ b/mmpretrain/engine/hooks/margin_head_hooks.py @@ -0,0 +1,61 @@ +# Copyright (c) OpenMMLab. All rights reserved +import numpy as np +from mmengine.hooks import Hook +from mmengine.model import is_model_wrapper + +from mmpretrain.models.heads import ArcFaceClsHead +from mmpretrain.registry import HOOKS + + +@HOOKS.register_module() +class SetAdaptiveMarginsHook(Hook): + r"""Set adaptive-margins in ArcFaceClsHead based on the power of + category-wise count. + + A PyTorch implementation of paper `Google Landmark Recognition 2020 + Competition Third Place Solution `_. + The margins will be + :math:`\text{f}(n) = (marginMax - marginMin) · norm(n^p) + marginMin`. + The `n` indicates the number of occurrences of a category. + + Args: + margin_min (float): Lower bound of margins. Defaults to 0.05. + margin_max (float): Upper bound of margins. Defaults to 0.5. + power (float): The power of category freqercy. Defaults to -0.25. + """ + + def __init__(self, margin_min=0.05, margin_max=0.5, power=-0.25) -> None: + self.margin_min = margin_min + self.margin_max = margin_max + self.margin_range = margin_max - margin_min + self.p = power + + def before_train(self, runner): + """change the margins in ArcFaceClsHead. + + Args: + runner (obj: `Runner`): Runner. + """ + model = runner.model + if is_model_wrapper(model): + model = model.module + + if (hasattr(model, 'head') + and not isinstance(model.head, ArcFaceClsHead)): + raise ValueError( + 'Hook ``SetFreqPowAdvMarginsHook`` could only be used ' + f'for ``ArcFaceClsHead``, but get {type(model.head)}') + + # generate margins base on the dataset. + gt_labels = runner.train_dataloader.dataset.get_gt_labels() + label_count = np.bincount(gt_labels) + label_count[label_count == 0] = 1 # At least one occurrence + pow_freq = np.power(label_count, self.p) + + min_f, max_f = pow_freq.min(), pow_freq.max() + normized_pow_freq = (pow_freq - min_f) / (max_f - min_f) + margins = normized_pow_freq * self.margin_range + self.margin_min + + assert len(margins) == runner.model.head.num_classes + + model.head.set_margins(margins) diff --git a/mmpretrain/engine/hooks/precise_bn_hook.py b/mmpretrain/engine/hooks/precise_bn_hook.py new file mode 100644 index 0000000..4fb0e4c --- /dev/null +++ b/mmpretrain/engine/hooks/precise_bn_hook.py @@ -0,0 +1,223 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Adapted from https://github.com/facebookresearch/pycls/blob/f8cd962737e33ce9e19b3083a33551da95c2d9c0/pycls/core/net.py # noqa: E501 +# Original licence: Copyright (c) 2019 Facebook, Inc under the Apache License 2.0 # noqa: E501 + +import itertools +import logging +from typing import List, Optional, Sequence, Union + +import mmengine +import torch +import torch.nn as nn +from mmengine.hooks import Hook +from mmengine.logging import print_log +from mmengine.model import is_model_wrapper +from mmengine.runner import EpochBasedTrainLoop, IterBasedTrainLoop, Runner +from mmengine.utils import ProgressBar +from torch.functional import Tensor +from torch.nn import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm +from torch.nn.modules.instancenorm import _InstanceNorm +from torch.utils.data import DataLoader + +from mmpretrain.registry import HOOKS + +DATA_BATCH = Optional[Sequence[dict]] + + +def scaled_all_reduce(tensors: List[Tensor], num_gpus: int) -> List[Tensor]: + """Performs the scaled all_reduce operation on the provided tensors. + + The input tensors are modified in-place. Currently supports only the sum + reduction operator. The reduced values are scaled by the inverse size of + the process group. + + Args: + tensors (List[torch.Tensor]): The tensors to process. + num_gpus (int): The number of gpus to use + Returns: + List[torch.Tensor]: The processed tensors. + """ + # There is no need for reduction in the single-proc case + if num_gpus == 1: + return tensors + # Queue the reductions + reductions = [] + for tensor in tensors: + reduction = torch.distributed.all_reduce(tensor, async_op=True) + reductions.append(reduction) + # Wait for reductions to finish + for reduction in reductions: + reduction.wait() + # Scale the results + for tensor in tensors: + tensor.mul_(1.0 / num_gpus) + return tensors + + +@torch.no_grad() +def update_bn_stats( + model: nn.Module, + loader: DataLoader, + num_samples: int = 8192, + logger: Optional[Union[logging.Logger, str]] = None) -> None: + """Computes precise BN stats on training data. + + Args: + model (nn.module): The model whose bn stats will be recomputed. + loader (DataLoader): PyTorch dataloader._dataloader + num_samples (int): The number of samples to update the bn stats. + Defaults to 8192. + logger (logging.Logger or str, optional): If the type of logger is + ``logging.Logger``, we directly use logger to log messages. + Some special loggers are: + - "silent": No message will be printed. + - "current": Use latest created logger to log message. + - other str: Instance name of logger. The corresponding logger + will log message if it has been created, otherwise will raise a + `ValueError`. + - None: The `print()` method will be used to print log messages. + """ + if is_model_wrapper(model): + model = model.module + + # get dist info + rank, world_size = mmengine.dist.get_dist_info() + # Compute the number of mini-batches to use, if the size of dataloader is + # less than num_iters, use all the samples in dataloader. + num_iter = num_samples // (loader.batch_size * world_size) + num_iter = min(num_iter, len(loader)) + # Retrieve the BN layers + bn_layers = [ + m for m in model.modules() + if m.training and isinstance(m, (_BatchNorm)) + ] + if len(bn_layers) == 0: + print_log('No BN found in model', logger=logger, level=logging.WARNING) + return + print_log( + f'{len(bn_layers)} BN found, run {num_iter} iters...', logger=logger) + + # Finds all the other norm layers with training=True. + other_norm_layers = [ + m for m in model.modules() + if m.training and isinstance(m, (_InstanceNorm, GroupNorm)) + ] + if len(other_norm_layers) > 0: + print_log( + 'IN/GN stats will not be updated in PreciseHook.', + logger=logger, + level=logging.INFO) + + # Initialize BN stats storage for computing + # mean(mean(batch)) and mean(var(batch)) + running_means = [torch.zeros_like(bn.running_mean) for bn in bn_layers] + running_vars = [torch.zeros_like(bn.running_var) for bn in bn_layers] + # Remember momentum values + momentums = [bn.momentum for bn in bn_layers] + # Set momentum to 1.0 to compute BN stats that reflect the current batch + for bn in bn_layers: + bn.momentum = 1.0 + # Average the BN stats for each BN layer over the batches + if rank == 0: + prog_bar = ProgressBar(num_iter) + + for data in itertools.islice(loader, num_iter): + data = model.data_preprocessor(data, False) + model(**data) + + for i, bn in enumerate(bn_layers): + running_means[i] += bn.running_mean / num_iter + running_vars[i] += bn.running_var / num_iter + if rank == 0: + prog_bar.update() + + # Sync BN stats across GPUs (no reduction if 1 GPU used) + running_means = scaled_all_reduce(running_means, world_size) + running_vars = scaled_all_reduce(running_vars, world_size) + # Set BN stats and restore original momentum values + for i, bn in enumerate(bn_layers): + bn.running_mean = running_means[i] + bn.running_var = running_vars[i] + bn.momentum = momentums[i] + + +@HOOKS.register_module() +class PreciseBNHook(Hook): + """Precise BN hook. + + Recompute and update the batch norm stats to make them more precise. During + training both BN stats and the weight are changing after every iteration, + so the running average can not precisely reflect the actual stats of the + current model. + + With this hook, the BN stats are recomputed with fixed weights, to make the + running average more precise. Specifically, it computes the true average of + per-batch mean/variance instead of the running average. See Sec. 3 of the + paper `Rethinking Batch in BatchNorm ` + for details. + + This hook will update BN stats, so it should be executed before + ``CheckpointHook`` and ``EMAHook``, generally set its priority to + "ABOVE_NORMAL". + + Args: + num_samples (int): The number of samples to update the bn stats. + Defaults to 8192. + interval (int): Perform precise bn interval. If the train loop is + `EpochBasedTrainLoop` or `by_epoch=True`, its unit is 'epoch'; if the + train loop is `IterBasedTrainLoop` or `by_epoch=False`, its unit is + 'iter'. Defaults to 1. + """ + + def __init__(self, num_samples: int = 8192, interval: int = 1) -> None: + assert interval > 0 and num_samples > 0, "'interval' and " \ + "'num_samples' must be bigger than 0." + + self.interval = interval + self.num_samples = num_samples + + def _perform_precise_bn(self, runner: Runner) -> None: + """perform precise bn.""" + print_log( + f'Running Precise BN for {self.num_samples} samples...', + logger=runner.logger) + update_bn_stats( + runner.model, + runner.train_loop.dataloader, + self.num_samples, + logger=runner.logger) + print_log('Finish Precise BN, BN stats updated.', logger=runner.logger) + + def after_train_epoch(self, runner: Runner) -> None: + """Calculate prcise BN and broadcast BN stats across GPUs. + + Args: + runner (obj:`Runner`): The runner of the training process. + """ + # if use `EpochBasedTrainLoop``, do perform precise every + # `self.interval` epochs. + if isinstance(runner.train_loop, + EpochBasedTrainLoop) and self.every_n_epochs( + runner, self.interval): + self._perform_precise_bn(runner) + + def after_train_iter(self, + runner, + batch_idx: int, + data_batch: DATA_BATCH = None, + outputs: Optional[dict] = None) -> None: + """Calculate prcise BN and broadcast BN stats across GPUs. + + Args: + runner (obj:`Runner`): The runner of the training process. + batch_idx (int): The index of the current batch in the train loop. + data_batch (Sequence[dict], optional): Data from dataloader. + Defaults to None. + """ + # if use `IterBasedTrainLoop``, do perform precise every + # `self.interval` iters. + if isinstance(runner.train_loop, + IterBasedTrainLoop) and self.every_n_train_iters( + runner, self.interval): + self._perform_precise_bn(runner) diff --git a/mmpretrain/engine/hooks/retriever_hooks.py b/mmpretrain/engine/hooks/retriever_hooks.py new file mode 100644 index 0000000..6bd7c7a --- /dev/null +++ b/mmpretrain/engine/hooks/retriever_hooks.py @@ -0,0 +1,32 @@ +# Copyright (c) OpenMMLab. All rights reserved +import warnings + +from mmengine.hooks import Hook +from mmengine.model import is_model_wrapper + +from mmpretrain.models import BaseRetriever +from mmpretrain.registry import HOOKS + + +@HOOKS.register_module() +class PrepareProtoBeforeValLoopHook(Hook): + """The hook to prepare the prototype in retrievers. + + Since the encoders of the retriever changes during training, the prototype + changes accordingly. So the `prototype_vecs` needs to be regenerated before + validation loop. + """ + + def before_val(self, runner) -> None: + model = runner.model + if is_model_wrapper(model): + model = model.module + + if isinstance(model, BaseRetriever): + if hasattr(model, 'prepare_prototype'): + model.prepare_prototype() + else: + warnings.warn( + 'Only the `mmpretrain.models.retrievers.BaseRetriever` ' + 'can execute `PrepareRetrieverPrototypeHook`, but got ' + f'`{type(model)}`') diff --git a/mmpretrain/engine/hooks/simsiam_hook.py b/mmpretrain/engine/hooks/simsiam_hook.py new file mode 100644 index 0000000..fabc4fa --- /dev/null +++ b/mmpretrain/engine/hooks/simsiam_hook.py @@ -0,0 +1,48 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Sequence + +from mmengine.hooks import Hook + +from mmpretrain.registry import HOOKS + + +@HOOKS.register_module() +class SimSiamHook(Hook): + """Hook for SimSiam. + + This hook is for SimSiam to fix learning rate of predictor. + + Args: + fix_pred_lr (bool): whether to fix the lr of predictor or not. + lr (float): the value of fixed lr. + adjust_by_epoch (bool, optional): whether to set lr by epoch or iter. + Defaults to True. + """ + + def __init__(self, + fix_pred_lr: bool, + lr: float, + adjust_by_epoch: Optional[bool] = True) -> None: + self.fix_pred_lr = fix_pred_lr + self.lr = lr + self.adjust_by_epoch = adjust_by_epoch + + def before_train_iter(self, + runner, + batch_idx: int, + data_batch: Optional[Sequence[dict]] = None) -> None: + """fix lr of predictor by iter.""" + if self.adjust_by_epoch: + return + else: + if self.fix_pred_lr: + for param_group in runner.optim_wrapper.optimizer.param_groups: + if 'fix_lr' in param_group and param_group['fix_lr']: + param_group['lr'] = self.lr + + def before_train_epoch(self, runner) -> None: + """fix lr of predictor by epoch.""" + if self.fix_pred_lr: + for param_group in runner.optim_wrapper.optimizer.param_groups: + if 'fix_lr' in param_group and param_group['fix_lr']: + param_group['lr'] = self.lr diff --git a/mmpretrain/engine/hooks/swav_hook.py b/mmpretrain/engine/hooks/swav_hook.py new file mode 100644 index 0000000..be5f3a3 --- /dev/null +++ b/mmpretrain/engine/hooks/swav_hook.py @@ -0,0 +1,119 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from typing import Dict, List, Optional, Sequence + +import torch +from mmengine.device import get_device +from mmengine.dist import get_rank, get_world_size, is_distributed +from mmengine.hooks import Hook +from mmengine.logging import MMLogger + +from mmpretrain.registry import HOOKS +from mmpretrain.utils import get_ori_model + + +@HOOKS.register_module() +class SwAVHook(Hook): + """Hook for SwAV. + + This hook builds the queue in SwAV according to ``epoch_queue_starts``. + The queue will be saved in ``runner.work_dir`` or loaded at start epoch + if the path folder has queues saved before. + + Args: + batch_size (int): the batch size per GPU for computing. + epoch_queue_starts (int, optional): from this epoch, starts to use the + queue. Defaults to 15. + crops_for_assign (list[int], optional): list of crops id used for + computing assignments. Defaults to [0, 1]. + feat_dim (int, optional): feature dimension of output vector. + Defaults to 128. + queue_length (int, optional): length of the queue (0 for no queue). + Defaults to 0. + interval (int, optional): the interval to save the queue. + Defaults to 1. + frozen_layers_cfg (dict, optional): Dict to config frozen layers. + The key-value pair is layer name and its frozen iters. If frozen, + the layers don't need gradient. Defaults to dict(). + """ + + def __init__( + self, + batch_size: int, + epoch_queue_starts: Optional[int] = 15, + crops_for_assign: Optional[List[int]] = [0, 1], + feat_dim: Optional[int] = 128, + queue_length: Optional[int] = 0, + interval: Optional[int] = 1, + frozen_layers_cfg: Optional[Dict] = dict() + ) -> None: + self.batch_size = batch_size * get_world_size() + self.epoch_queue_starts = epoch_queue_starts + self.crops_for_assign = crops_for_assign + self.feat_dim = feat_dim + self.queue_length = queue_length + self.interval = interval + self.frozen_layers_cfg = frozen_layers_cfg + self.requires_grad = True + self.queue = None + + def before_run(self, runner) -> None: + """Check whether the queues exist locally or not.""" + if is_distributed(): + self.queue_path = osp.join(runner.work_dir, + 'queue' + str(get_rank()) + '.pth') + else: + self.queue_path = osp.join(runner.work_dir, 'queue.pth') + + # load the queues if queues exist locally + if osp.isfile(self.queue_path): + self.queue = torch.load(self.queue_path)['queue'] + get_ori_model(runner.model).head.loss_module.queue = self.queue + MMLogger.get_current_instance().info( + f'Load queue from file: {self.queue_path}') + + # the queue needs to be divisible by the batch size + self.queue_length -= self.queue_length % self.batch_size + + def before_train_iter(self, + runner, + batch_idx: int, + data_batch: Optional[Sequence[dict]] = None) -> None: + """Freeze layers before specific iters according to the config.""" + for layer, frozen_iters in self.frozen_layers_cfg.items(): + if runner.iter < frozen_iters and self.requires_grad: + self.requires_grad = False + for name, p in get_ori_model(runner.model).named_parameters(): + if layer in name: + p.requires_grad = False + elif runner.iter >= frozen_iters and not self.requires_grad: + self.requires_grad = True + for name, p in get_ori_model(runner.model).named_parameters(): + if layer in name: + p.requires_grad = True + + def before_train_epoch(self, runner) -> None: + """Check the queues' state.""" + # optionally starts a queue + if self.queue_length > 0 \ + and runner.epoch >= self.epoch_queue_starts \ + and self.queue is None: + + self.queue = torch.zeros( + len(self.crops_for_assign), + self.queue_length // runner.world_size, + self.feat_dim, + device=get_device(), + ) + + # set the boolean type of use_the_queue + get_ori_model(runner.model).head.loss_module.queue = self.queue + get_ori_model(runner.model).head.loss_module.use_queue = False + + def after_train_epoch(self, runner) -> None: + """Save the queues locally.""" + self.queue = get_ori_model(runner.model).head.loss_module.queue + + if self.queue is not None and self.every_n_epochs( + runner, self.interval): + torch.save({'queue': self.queue}, self.queue_path) diff --git a/mmpretrain/engine/hooks/switch_recipe_hook.py b/mmpretrain/engine/hooks/switch_recipe_hook.py new file mode 100644 index 0000000..914b957 --- /dev/null +++ b/mmpretrain/engine/hooks/switch_recipe_hook.py @@ -0,0 +1,169 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections import OrderedDict +from copy import deepcopy + +from mmcv.transforms import Compose +from mmengine.hooks import Hook +from mmengine.model import is_model_wrapper + +from mmpretrain.models.utils import RandomBatchAugment +from mmpretrain.registry import HOOKS, MODEL_WRAPPERS, MODELS + + +@HOOKS.register_module() +class SwitchRecipeHook(Hook): + """switch recipe during the training loop, including train pipeline, batch + augments and loss currently. + + Args: + schedule (list): Every item of the schedule list should be a dict, and + the dict should have ``action_epoch`` and some of + ``train_pipeline``, ``train_augments`` and ``loss`` keys: + + - ``action_epoch`` (int): switch training recipe at which epoch. + - ``train_pipeline`` (list, optional): The new data pipeline of the + train dataset. If not specified, keep the original settings. + - ``batch_augments`` (dict | None, optional): The new batch + augmentations of during training. See :mod:`Batch Augmentations + ` for more details. + If None, disable batch augmentations. If not specified, keep the + original settings. + - ``loss`` (dict, optional): The new loss module config. If not + specified, keep the original settings. + + Example: + To use this hook in config files. + + .. code:: python + + custom_hooks = [ + dict( + type='SwitchRecipeHook', + schedule=[ + dict( + action_epoch=30, + train_pipeline=pipeline_after_30e, + batch_augments=batch_augments_after_30e, + loss=loss_after_30e, + ), + dict( + action_epoch=60, + # Disable batch augmentations after 60e + # and keep other settings. + batch_augments=None, + ), + ] + ) + ] + """ + priority = 'NORMAL' + + def __init__(self, schedule): + recipes = {} + for recipe in schedule: + assert 'action_epoch' in recipe, \ + 'Please set `action_epoch` in every item ' \ + 'of the `schedule` in the SwitchRecipeHook.' + recipe = deepcopy(recipe) + if 'train_pipeline' in recipe: + recipe['train_pipeline'] = Compose(recipe['train_pipeline']) + if 'batch_augments' in recipe: + batch_augments = recipe['batch_augments'] + if isinstance(batch_augments, dict): + batch_augments = RandomBatchAugment(**batch_augments) + recipe['batch_augments'] = batch_augments + if 'loss' in recipe: + loss = recipe['loss'] + if isinstance(loss, dict): + loss = MODELS.build(loss) + recipe['loss'] = loss + + action_epoch = recipe.pop('action_epoch') + assert action_epoch not in recipes, \ + f'The `action_epoch` {action_epoch} is repeated ' \ + 'in the SwitchRecipeHook.' + recipes[action_epoch] = recipe + self.schedule = OrderedDict(sorted(recipes.items())) + + def before_train(self, runner) -> None: + """before run setting. If resume form a checkpoint, do all switch + before the current epoch. + + Args: + runner (Runner): The runner of the training, validation or testing + process. + """ + if runner._resume: + for action_epoch, recipe in self.schedule.items(): + if action_epoch >= runner.epoch + 1: + break + self._do_switch(runner, recipe, + f' (resume recipe of epoch {action_epoch})') + + def before_train_epoch(self, runner): + """do before train epoch.""" + recipe = self.schedule.get(runner.epoch + 1, None) + if recipe is not None: + self._do_switch(runner, recipe, f' at epoch {runner.epoch + 1}') + + def _do_switch(self, runner, recipe, extra_info=''): + """do the switch aug process.""" + if 'batch_augments' in recipe: + self._switch_batch_augments(runner, recipe['batch_augments']) + runner.logger.info(f'Switch batch augments{extra_info}.') + + if 'train_pipeline' in recipe: + self._switch_train_pipeline(runner, recipe['train_pipeline']) + runner.logger.info(f'Switch train pipeline{extra_info}.') + + if 'loss' in recipe: + self._switch_loss(runner, recipe['loss']) + runner.logger.info(f'Switch loss{extra_info}.') + + @staticmethod + def _switch_batch_augments(runner, batch_augments): + """switch the train augments.""" + model = runner.model + if is_model_wrapper(model): + model = model.module + + model.data_preprocessor.batch_augments = batch_augments + + @staticmethod + def _switch_train_pipeline(runner, train_pipeline): + """switch the train loader dataset pipeline.""" + + def switch_pipeline(dataset, pipeline): + if hasattr(dataset, 'pipeline'): + # for usual dataset + dataset.pipeline = pipeline + elif hasattr(dataset, 'datasets'): + # for concat dataset wrapper + for ds in dataset.datasets: + switch_pipeline(ds, pipeline) + elif hasattr(dataset, 'dataset'): + # for other dataset wrappers + switch_pipeline(dataset.dataset, pipeline) + else: + raise RuntimeError( + 'Cannot access the `pipeline` of the dataset.') + + train_loader = runner.train_loop.dataloader + switch_pipeline(train_loader.dataset, train_pipeline) + + # To restart the iterator of dataloader when `persistent_workers=True` + train_loader._iterator = None + + @staticmethod + def _switch_loss(runner, loss_module): + """switch the loss module.""" + model = runner.model + if is_model_wrapper(model, MODEL_WRAPPERS): + model = model.module + + if hasattr(model, 'loss_module'): + model.loss_module = loss_module + elif hasattr(model, 'head') and hasattr(model.head, 'loss_module'): + model.head.loss_module = loss_module + else: + raise RuntimeError('Cannot access the `loss_module` of the model.') diff --git a/mmpretrain/engine/hooks/visualization_hook.py b/mmpretrain/engine/hooks/visualization_hook.py new file mode 100644 index 0000000..64d2230 --- /dev/null +++ b/mmpretrain/engine/hooks/visualization_hook.py @@ -0,0 +1,126 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +import os.path as osp +from typing import Optional, Sequence + +from mmengine.fileio import join_path +from mmengine.hooks import Hook +from mmengine.runner import EpochBasedTrainLoop, Runner +from mmengine.visualization import Visualizer + +from mmpretrain.registry import HOOKS +from mmpretrain.structures import DataSample + + +@HOOKS.register_module() +class VisualizationHook(Hook): + """Classification Visualization Hook. Used to visualize validation and + testing prediction results. + + - If ``out_dir`` is specified, all storage backends are ignored + and save the image to the ``out_dir``. + - If ``show`` is True, plot the result image in a window, please + confirm you are able to access the graphical interface. + + Args: + enable (bool): Whether to enable this hook. Defaults to False. + interval (int): The interval of samples to visualize. Defaults to 5000. + show (bool): Whether to display the drawn image. Defaults to False. + out_dir (str, optional): directory where painted images will be saved + in the testing process. If None, handle with the backends of the + visualizer. Defaults to None. + **kwargs: other keyword arguments of + :meth:`mmpretrain.visualization.UniversalVisualizer.visualize_cls`. + """ + + def __init__(self, + enable=False, + interval: int = 5000, + show: bool = False, + out_dir: Optional[str] = None, + **kwargs): + self._visualizer: Visualizer = Visualizer.get_current_instance() + + self.enable = enable + self.interval = interval + self.show = show + self.out_dir = out_dir + + self.draw_args = {**kwargs, 'show': show} + + def _draw_samples(self, + batch_idx: int, + data_batch: dict, + data_samples: Sequence[DataSample], + step: int = 0) -> None: + """Visualize every ``self.interval`` samples from a data batch. + + Args: + batch_idx (int): The index of the current batch in the val loop. + data_batch (dict): Data from dataloader. + outputs (Sequence[:obj:`DataSample`]): Outputs from model. + step (int): Global step value to record. Defaults to 0. + """ + if self.enable is False: + return + + batch_size = len(data_samples) + images = data_batch['inputs'] + start_idx = batch_size * batch_idx + end_idx = start_idx + batch_size + + # The first index divisible by the interval, after the start index + first_sample_id = math.ceil(start_idx / self.interval) * self.interval + + for sample_id in range(first_sample_id, end_idx, self.interval): + image = images[sample_id - start_idx] + image = image.permute(1, 2, 0).cpu().numpy().astype('uint8') + + data_sample = data_samples[sample_id - start_idx] + if 'img_path' in data_sample: + # osp.basename works on different platforms even file clients. + sample_name = osp.basename(data_sample.get('img_path')) + else: + sample_name = str(sample_id) + + draw_args = self.draw_args + if self.out_dir is not None: + draw_args['out_file'] = join_path(self.out_dir, + f'{sample_name}_{step}.png') + + self._visualizer.visualize_cls( + image=image, + data_sample=data_sample, + step=step, + name=sample_name, + **self.draw_args, + ) + + def after_val_iter(self, runner: Runner, batch_idx: int, data_batch: dict, + outputs: Sequence[DataSample]) -> None: + """Visualize every ``self.interval`` samples during validation. + + Args: + runner (:obj:`Runner`): The runner of the validation process. + batch_idx (int): The index of the current batch in the val loop. + data_batch (dict): Data from dataloader. + outputs (Sequence[:obj:`DataSample`]): Outputs from model. + """ + if isinstance(runner.train_loop, EpochBasedTrainLoop): + step = runner.epoch + else: + step = runner.iter + + self._draw_samples(batch_idx, data_batch, outputs, step=step) + + def after_test_iter(self, runner: Runner, batch_idx: int, data_batch: dict, + outputs: Sequence[DataSample]) -> None: + """Visualize every ``self.interval`` samples during test. + + Args: + runner (:obj:`Runner`): The runner of the testing process. + batch_idx (int): The index of the current batch in the test loop. + data_batch (dict): Data from dataloader. + outputs (Sequence[:obj:`DetDataSample`]): Outputs from model. + """ + self._draw_samples(batch_idx, data_batch, outputs, step=0) diff --git a/mmpretrain/engine/hooks/warmup_param_hook.py b/mmpretrain/engine/hooks/warmup_param_hook.py new file mode 100644 index 0000000..b45d891 --- /dev/null +++ b/mmpretrain/engine/hooks/warmup_param_hook.py @@ -0,0 +1,66 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import operator as op +from typing import Any, Optional, Union + +from mmengine.hooks import Hook + +from mmpretrain.registry import HOOKS +from mmpretrain.utils import get_ori_model + + +@HOOKS.register_module() +class WarmupParamHook(Hook): + """This is a hook used for changing the parameters other than optimizations + that need to warmup inside the module. + + This hook can extend with more detailed warmup rule if necessary. + + Args: + param_name (str): The parameter name that needs to be altered. + module_name (str): Module name that belongs to the model. Such as + `head`, `head.loss`, etc. + warmup_epochs (int): The warmup epochs for this parameter. + """ + + def __init__( + self, + param_name: str, + module_name: str, + warmup_epochs: int, + ) -> None: + self.param_name = param_name + self.warmup_epochs = warmup_epochs + # getter for module which saves the changed parameter + self.module_getter = op.attrgetter(module_name) + + def get_param(self, runner) -> Any: + """Get the parameter.""" + try: + module = self.module_getter(get_ori_model(runner.model)) + return getattr(module, self.param_name) + except AttributeError as e: + raise AttributeError(f'{e}. Please check hook settings.') + + def set_param(self, runner, value) -> None: + """Set the parameter.""" + try: + module = self.module_getter(get_ori_model(runner.model)) + setattr(module, self.param_name, value) + except AttributeError as e: + raise AttributeError(f'{e}. Please check hook settings.') + + def before_train(self, runner) -> None: + """Get the original value before train.""" + self.ori_val = self.get_param(runner) + + def before_train_iter( + self, + runner, + batch_idx: int, + data_batch: Optional[Union[dict, tuple, list]] = None) -> None: + """Set the warmup value before each train iter.""" + cur_iter = runner.iter + iters_per_epoch = runner.max_iters / runner.max_epochs + new_val = self.ori_val * min( + 1, cur_iter / (self.warmup_epochs * iters_per_epoch)) + self.set_param(runner, new_val) diff --git a/mmpretrain/engine/optimizers/__init__.py b/mmpretrain/engine/optimizers/__init__.py new file mode 100644 index 0000000..bd53a37 --- /dev/null +++ b/mmpretrain/engine/optimizers/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .adan_t import Adan +from .lamb import Lamb +from .lars import LARS +from .layer_decay_optim_wrapper_constructor import \ + LearningRateDecayOptimWrapperConstructor + +__all__ = ['Lamb', 'Adan', 'LARS', 'LearningRateDecayOptimWrapperConstructor'] diff --git a/mmpretrain/engine/optimizers/__pycache__/__init__.cpython-310.pyc b/mmpretrain/engine/optimizers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97151eebeaa632a7621ae00d7f502d691d12eb25 GIT binary patch literal 394 zcmYjN%Sr?>5KQK|iu)OYb8*hXA})Gd6m}0nE=`=oA&|$=lSRfa@$AhX@gwHy$zKoz zlPI_yQdLcws?ct^JSL%DzTY1kO2|iS4l4`IRk-6SfFOcuVko0D>P%-QXL-`OE=to!x@gfk-x@QZ^vIPxl+yOf$~O6`XNhAApsex$=fgBF TR>q0FmX~o8V_f_R{X6*&3n^x6 literal 0 HcmV?d00001 diff --git a/mmpretrain/engine/optimizers/__pycache__/adan_t.cpython-310.pyc b/mmpretrain/engine/optimizers/__pycache__/adan_t.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d971b1aac34bb811d4af6f751aecac889a0d279 GIT binary patch literal 6798 zcmcIoO^h7Jb?&bInVz1Xot>Her>quZ**mecyCfY^V%DMsOSGAov=A{)GNIDxo$ehr z*)u&;-LvFwdXOL_Yy=1$8NTFT1iOeYy7-WDPB|Fx#RhT-(uV*^4uTJnWXDcqM-ubB z>Yksa3^+mJ8Puz)SFc`Gy?TGuVWnb9`2FPv-`W4dGm`YL)S3UCMd!09(WWd(uH?#r z)TWvRa*wr{oZ*zV0-O>kJ+-Y0oEm67y{(J378H6$+mI#CI4+(@pO;+SEj*Cif~VhC z?#pe{H9Y32Ym!^cYcsD)d0o!yN?upJlDmLXLs>+rqcl*OC`%~IC@WssUG>!cv+kO^ z{y=S4e#xtBxM#3R)s_+z%5S~ap(*n}E$>8oXn0avcBM9R<+f5}MWv_~HT?Br!Cl0`)P43RJ0Cx2`xeZsp?f`(5j0&l`Z+^@3>2`W34m4jsYyRvZta zOWWHHzvmxpg}l4%>_yw>&+k0Hwe#F_7cXYZ^X^ET;F9RlRtFAudXcs1V;yHN@LHFw zNs|sGhCPf$RvcPkeofQa!~xrM{SKt!di#E#_Bfqnb$K`%L|aq(fLojULFmNK2wq{| z39w+`Ib2NSPV8-2n_%ROg7}hk&U^MkYdU$)iy_j@8zbDq@8?`*Rs8xXo}JL!-}gJd zxUUheV9JK}OtS5Sy}>A^{qj*?EbkmZ23;?rgLk>((&X9RY2IvWn>$-CJpYUZcwWF4 zTC-pVVvF&;t=kVw400X#>02|0Cu>nw?T5t1<=dFQPLs&fN#pHK2{$ynBXGXg>ifMXeMFHJPaA@_gl-2W49cI(sdjHKI{{6MR zS1-S*bZ`E}-~D9s<~x_W!_S!i@YjQXyWIWu=l}Vr`R#Wuzjf))cmCyThp)W&*2EQF zxsev~?R1J0Bx;IV)K^fVw*g!^(eKDnDMoZUM*Bp*FaN0wECY5KSOr)%#j3zQ1*{6J zI>jokwg!i&NOxrZtgGLVqKkc5mOxhkofw^?HKw$m6SUWI+M=Mv=p3y%rTslY+sSE5 zf)=A0ZOjsuD2bYAiJlY^!!187jEk{yUpiJ3IVqlqm<$1$TE2k_pE#wBoOIV|&rRJCESg&v%@>C}i za^h4a51wk#2v(IEwhccX+jh%L)yNC>Q$33kd;uJIjld#-I)Mg(CIRA~8o8ZCZz8PQ zb|-M6$hO~&8(uR$i7~H^$JzW7U;iMjAIPxe|0vxh_tBrKRyz%;h>jTDI-Ti{Y_j&$iRD5FeZ- z1;f>5#rne#lqE znLdX)`a=MmLBbh+bxhWe6YNTW${OLGq;uVt`7@wFYfTy}u>#A2Ux0P1yV6`wJJr*7 zrD0`=JQs2(To+aXjfuSh-@uY-!MiuGor7*_<_$X zwaTiD{R#UI^~0l@km1~fQzE3>B#{ddeh&&mB6MY>oQ-PK1jzf7&M4mB$z`mx~_PGou>kP;3* zHGCQuVTx-}T{zMeNvu(VYh)XED7Yq8E%cdN92c^6-jH86$7)v^8z@B-6QzVwMyX&2 zr8ViMGOmsn#x=K`s0R{HfGukh&ZJ(1MeIs_IuUTYSRtuWYfWlMtI~K8{e`)H9sSx| zzkzn9V1F<@p~Sy6{AOYtmlI7GR*_n_5tf9JRT2c6aWk$a&HIq;9mcJqF2p(Wzk#6dlk{EWxe4y zp>9q(g2ZN{f?eQ5N{lW!3qtx{%iPd?&IyA?IF)(z0_O z!JJQ*9;3Mccf{i?sEJTUQ7m1egw@WH)LEV@f}8L)ttEbjPD6f+Zxi?wP1A&%O7&R0 zX;YOBm#WD2?Nk|rcT*)A_23(^V5%V~_6Di$43KEMnX^*@ug@v=@D<{wA}HTV>vj*% zAm7e-+7uGghMnEiDWr1b4_d34bEGY`YaiLa2-D;^`LhH*OW-pAX)TNG_FX(WGw*wz z#wg2K>eq8z&vP0>2?mE1Sh}Q`Rhk|$1E>7Ezd2UYH zavLT38-S|Pq&UY+#CMaGsg|2E{PqWRt){4tRHe*RdG@a|L#@jOtFf9~lZl?e+m~g` z1t0o1nMuAMts3Z5nLSb&`%qQb2dc{6S2gxus?L6<7T8Z!gT1E~*?+1gqC|=P#4NKP z8x{8NMwR`@SYSUiYV2KgQGLg3)Z{9wvpVtm+oPh0CS)_M0x#eUoT3M(d|YIu0$jP# zDpP{hcY2f=n6}*u-BBR21lt~toFE@5*tQ#XK%&yKMlVOs+SP+ zs9wgSfa(=I4X9pCns_?YPGnFnB7JKfD_QhRn#n3+)*9+Uveu;7xt=V67P05HipYT0 zD&qJ%bI}l_nz424WB5`i6)=3;Zida89Lp%M`4xzp+)&u}_)F9_;HWxc=w<5S zIdEXN&iqpDo?oXGWIxq}v(i8Dyp1F7g_hsKN&Dw$K#dw>#x zQz*1uowQjv`Q)Ss+Gw%9^wMeiw~ha zJmvB)Ep{~8o9n@1;BzNqBG50+cZE64QHan#n?M#e#|_=N9+pR%L2oGj|Ll2%^>K$d z?-A$_5U;1{6N+C2?tPSqg1!Nxsv@A)^fJsQD=Iyv9+^rRHNvn~!-EtxnUz}qP#72S zXf5zBph8Y`I6!vE>5hb;xq?=EDf_M_-uzqBmp>={NE@@y1Y6we;@7)j9iN$<{Ti=7VmpqVwCDk<@RnrPA8GSJl+?{f8_7wn)TPBxP2cXkT~9hY zGk!B`$6@vWiPTe6PQcN=AVorm3!LECo+uLBm>c2{6%_#&3W!pE|2J!UokYDbt9kQ2 z|KDG4gW+LI!}H_MK5YH!IZgW$4F;b)1~1`_UPa?*Ynr1whNrLT>T9eS_~yJ^$6PaY zS~INOS?nVPAn}H+P7LOQr zco++ZwPes?@w#BG!1IDF*Kf1gDCRPz-j*A$Gmpn2imTT1Y(0*{=zP82#&ow)YX+VA ztGvN|zI45Q1Ax6#w?s=w;WtH8Z+JnYZY?xUFTOB8f4U{kipBZX{0rwA^UmVji;Io3 z3uouJSUkg9XHG9H*87~+eJ`vxgpBK<6tU#44?5}|@e*!7orH|(`V}F?yWf#Qq?AxF85;=w3N~< zOhK{GwP2iW3K_XL7q9b}xskP}dezwfDIhSQ>ppD7abtxyU{TN!tmz5vcf*qz$;vog2pnWaAVm1A6F6?mg?WLP zs2hfXjIBLeertJ&ao;)EOy-5H;A?f|4Pbmtx^36z9%}~SHu~#vq$I@~0i+PY=3!q9 z+yEC$7AK%dplfm>GGdqajh$8kTf$4DH`WQ~C-K^?q`L#WF!oYfMW%c@v z>uXnUz22k^gOAxCMAy&}8vI%#pI*zAjdHGRmhTyjR<(^|SycC7p5HB;nw z3cH#!>QtQJ`#LkCFnQwg42i|zRqJH_BH$}ic=S8uHnh%+y;j_Y@!NMOn+s@0fHsLv~qMB`JUhurF` zW#5I|4jl6o%tv_r5YqQvf>l=$aNjQpD4`jkpZ!0yKS?T=cihcdAlr4`i0bp_=1$k< z=FYvikg>QV+ue@vkijoi0AvzDyT)b#kaxZKJewD%7OVS* z8zP1g%&vA3B){Ek0G!%T+fZS2MK^!&K@A%5@e3HBbt2sX%uAZK&#f57g#7D_ISHG zAn3URaKyfy$%7;_+Xw>hkbJk+1=x^`hoJ|A^QTBJ3svHU&N8g~hqwVB%`WmWf%_tX z53;x_yW)VR4!>h7x=0mI<8hOFU6CmpWHdcDWNl<}2vCtk#}1J+U}Cia-ecK~Vp|7<8Gbb+S@7LlP zP`6hN@?t+#K$zRh68qH`E+O*ai!AJSpz#$+$Cm0nW8Q z8UqcOgFMEau^G)UdK10LWMXF`sqE^vACS zSBsBA%13tf6B@Koa3*FT|K8DLA{oQXXmS*zDa=oTvSZ0HP?h_EF6Wae;3#68;popt z-@wkJGa7cZlj6>lGlkRB$yC2LqjWY67_b{?Z-#owbTX4^PigQtARJQ=8rX9zIgVMQ zcN{P#lH+6DGf<8kb1Fjo{obNwU%pRe9L2 z=8)joQ_3BhZhApP_3JE>lSdK$a9rMBS9caE1SJ@{QK}5C3cW*tnLyI*0Fj_8QOr}G z2Zn*l=cd%*uJRZ)Wi%+?3y#=yo8n_yxO+vF%1!_jbfxKEKUjp2g{Otct>4;1b1p5Q<%sC*g<%spoRblu~ZF(S~Y1=+zFBA+o{#>*wF@v%5yJV z=hRU-bs#uWG;HIg=cOz_QWUCAr+IbrOoy^usWNq1QLz?%DgfoP*q9b#;YWc?^N5E* zl$yj;T6QBlV=}GW>iQ^{#Z@V>%S6#+r@1iLN^?=S0}eKDbETk}=22ouLD7EGMzoKi zm^9}JKeZHO+jHN=YNzYjX$er8s%Lh1yq=m=3ZzBUUv8({sh*Hi1hCjQhtwpcDQefO zz!!F(;<2olIH*C=ykacXL-{-aD3PZ}sc^6#->MQ#mhR@W&w|P`S${Dddn_}~KQXf) z7YWCgsrd?#n^z@dTIhxjSXZfQt*~V))(UO)T^q{Mq^eaUWsIzOhI$Om9lX&G(U_%@ zZs{fcna?M4rf%wm&qs2WUcsE97x1I>WPU!CGyZ8=rfFf_GHBNPuVs#5-xz+S{J35+ zOk+%^Z^fXq1fh}}F-DAWV-gVQ8LzJH>FIfR>^N^FLCs1;9?3hlSrEobyo9wAj1q5@M67ls)Z3n_8BfyF zJ+AJtW2;9RPU;xy3$#%zGa-*i3Y)kA6f@qJG{DG-+(RPn6#d=`_f_7lpe~&`a7;=m+kmO!gx6EMC!v zZfz$?H(ps-@YVD7ZY@%+1%E4ESo+@La&7UQ<>lqokGvtbcT{V5hpWqtO znj1)8Y57RKlYO0t4AJXMc0U-+H2p>rshYbcn|?0<3*c*Ua&k|$Tib~zWW(Q|oc=)1 zf?GX%|J!a?LWjwI^=s(}zk@xb`7y(5<0&%#h{dm^AQOLbmb}buM6%gzH`);G%~Oku zOA{GF8AIdIt~<9CMFCW`9#V|Ou=#M|e_wvr58_iwlwH_$r0qLy-0_3J4SSs}xFMA0 z??!Fmiruy#DG{~NS*Dic+si|jS$_`E137Rtvg7LN>ev~Sd)p=P<~os(L2YdOc&<7R zgWq1PEzOU$h0*ut$Ie1zzVe;6;B7GRop~3kq~1H#GbcyhsA|N#*HXTiJG&!@;@E43 z1J35!VYionr-b4wsn)jq#XwZ^lyo?XIhF11Iq ziJ9`$OmnG~=2KhPpV<9EnpYd3oPTD->uCZAFSyq(N1t zszg1#tYIr8^a>pjD=Cfiu24W|E*pOLH2Uu482{FH} zRHQPCpixcZmMjIHO#$4e)YUY z;KQT6m+bX-7gQ7_3!P3E-a|>^bXSJ0b|@Fdo*XX(zKUzzeRT=Sih7>+S1 zoO5IHvtM7x)5top%~|7Miu7F@3DV#=Kl;c=>7k*0bq<|`!6%H=I5rHHvZs8F{nY8h zSC8PMf|H-@6tt(9o7ygStCqTmb6MH*Vwpg-iS)dztYZs-O-UpxVr8g}s;Ms1_MG0C z<@6496;tXORoAJyfodHkCS8|}A_TQg4$ck5m|Sy46ndJ_{`ctMr|=YEfN?_@Jwx3{ zAk;DYWeGwVhpf*(z^a9(#Gz@h&{<;fl|2YdzYkG{g8#0Ma1{J1>sx3|v^lgES{rQ+ zZ62+S)Q99?!CNjZ z9UDjXG3!_Ks5~4^rxj5~;66{MhiyKsTwv)m0v+vT;xvDmU1OlbiCQ|97A~;^G zq@~mnQ=jmoLOOG7;6^;ZKGE;)F1Utv!IQcemUgnEN&_tVP}bv6YZ?+xGB9nXAb`G zP%7kKyjgq~Vb}`fC=>bgGI9yDhiQva%h}fs%`}h|dP=%;c*_NpSLZbWdQ>6R-Wjha z+kOH}kOWKcM?5s}oYOgOoP~%See{SMfcRh+<()}7-y8tY1dk>$PC%8FDL6g(yb<(- zysM#Mh@by>SA!Lv(>XurZu{Bv2rRv=Ub89H=Y(a|=`4pQQDZwR;Z*~HbShNeAlfD} zU1m{g%&ZVv%dB=HI|FbseIYWl)5D{Tw&tLiP-eD3iUKVQ3tnbnk5@fE2)F!Db(uv1 zs&C@d{})ZYMTCuRW(u!ccY+8i_mC({LuA(+6^J{Jy7dCycD>=2LF^9#+=R4)$R39` za1@6AG3dvSP+5+H2zQL@Cq>giJNlK4GA{zeY;JMesPKw0gFecMWjg!<_R*;_FP}K( z6~n@+g+2VQ)>{V!jTCeXG79PWRApm$SLIPxxtc+P|39163IToScL=+Ro(Es)!5F|X z&)b1m!uX&#Ti#z-kcPg(c zuARGXD56v?WOlOOMWhWFCf@27wZGMLf*I8Gw@*<5$j(o4QccNLJ5JQT!v8ClO%H}r g4OOG*pAunWT<6w6d8?%Nm=a^za5x#$HZK|f1qU?uy8r+H literal 0 HcmV?d00001 diff --git a/mmpretrain/engine/optimizers/__pycache__/layer_decay_optim_wrapper_constructor.cpython-310.pyc b/mmpretrain/engine/optimizers/__pycache__/layer_decay_optim_wrapper_constructor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3739b772a02a49af1facd33481210fa50191b99e GIT binary patch literal 5756 zcmb_g&2JmW72jPhKShbOD9M)O$djavL_nsUq(KwGa^l!-V8E6G+i6gy!Ft6RQfn=D z>Di@asmn_hpefJ*xwU9uKt2We2l6N6&|{B#?I}IA1$s%7y1zHOq$t?|f+9=oaK7G~ z_x;|iKRH>_@cik!uiF1St!aOw!T6KK;4QqtAJA}(>#o+-o4QVY!!^2@W~OU4&2F}t zRdX3P*UdNcs&Bf5Zn0TZ{j6K+PBbUFb?O^7y_KwDjy!4UAOQN`++1H!1eB!oNFMkb+w7MQoaM8B=ZpfWhSkJ_T%eL#< zo34n>>rN2Hg`2(5@jctc*20Y&*ZodMNc5MkBw)1MeP7zWo{*P)F9>D775cJn#3k!( zJ8W%j__7;MTGy~-do4jjfE%8NEyZ`F-|wj%#q0L2km>5e&3EryyJ5Y5>(aaLUcF^q zzPWMx&aL+@-?@3~c8ka!e`+`0EqnKS7_`?5ns&u$w}lj5$lMf+ zNx%inmVyN$WNlwEez=KP44B_$n|5n^)AvN9RAPAE-esv4R++F{TdZeGyDLH=S+GU8 z!Y$y#4D(6c&K@m@Fa({!DziP#c7#IIll}vT3rvB8PQOc}-My=@?Kpw3TJ6r=yDaos z&=W1Ey?c1jSy~?O)=!o5q!wc zj&M3#VakJ&rm`u}&j{tA%mTI|9=5=@S`W8;XcJoU;BL0-?AheRDJ%6uvS|57CM%&> znagnsp^#_Mb4X6xwZk8o95~ro4qNd86~)E`ri4nmY4wB9?^@epHz2*JK)A+kD-R)6 zj7%pu>Lg^31k_;H+Np+cT55J`g*0#+zw)S{zllgmcAFR}^OSS984w&d*j2K|#NSe% z9|DaC8yQgI)X|j?!yI+V`1=_O##nM;TsK{!NuAd>+Qm6yd;28OjeQB z+DS4STUQ*eq*9>ubO*)ab>jZ$8m))BJ+Z!giQC=xmsgKK(yA-hea4&Q9(I;&Y)AoOBGP?S3}bx?Vfg^bKTJ%8-rYEghrG* z$UN5e_0Z%7&wP>@h>WltPKHxqrETsTgGu14@<~4R ziGD@<@Q=SX22+CyuSAuO?&yPRl!>NJYQNW_%Dof8z5BlcwrNnR%fC=>a3ZXQGr!Z? zID+bCipw=e2`b@K}p#2XlN{(J>R9h-PNAsLE>>&>ozOYL9hY z^Ug<;^79CkrMXmtkko#L}t|0+U_iS@6rJ~3K1 zF-p!dKZg9l?w`ot!aak;AlQNsbJ9V6MRejb!%8XfB;>ImOfv=C zQgYq)XwJj#$xyW5`Tu&w5 z1ZY@qj}dc8c2FV{S;HqKdWEi9A5a$g5asbD#l+}zHary-@Q_xJ?jXDJBghCT5@*k~ zTew&ZkD&jQ(-qHa+)#lgEyB}zl&X}(iT|T;quT~u97dH9CGI5Q_EFhWT?s*rwjRZh z#51;nES{d3&ZKouLct+SJTVrl@8F7;bPfBsW z??@3?9clA8ziHzJzXQTN@epL9*u}QkZ23@PrswzKT))S0)y4+gc+&llZH_+9cs!m; zPH1g8E(a5F=}>i&)E=7@_2R4(if#~Rx!CM?V)LHwc=a=}83?znY+0Tlnnx;$bK5)k z))2^9LNSpDPhFxinNNVvtYW47WX3T6 zWtw}h{vZAd$0B#yz-O<5r6grp_-bb*F&X``W!=Z864DGtdCTH{%d(^ipxC5rCF$#D zoV$?}uNee+Nd>CRP(uPsgh=5l=`rd}p`kY*;TAO0FkUfq^IJ&j3HtvvGRAi~)A-iN zPW?yi-KftYE#Om=Xi>Ha5@XH5wQzw>&LWc3(Q&m4|D#ygAmgJA%6X?o(<&CqJOG-@ zqse~gxIu%vR?>k%NIwjQx4%PwjkCC>w6+qy3##AnyfH7X9fs$I6dm}J+?A6!EUqL& ld=yLMt5|!Md=QY69TzFi1v6;M`V5(lG6R@`k;}~K{{!H5u0j9+ literal 0 HcmV?d00001 diff --git a/mmpretrain/engine/optimizers/adan_t.py b/mmpretrain/engine/optimizers/adan_t.py new file mode 100644 index 0000000..571a71b --- /dev/null +++ b/mmpretrain/engine/optimizers/adan_t.py @@ -0,0 +1,312 @@ +# Copyright 2022 Garena Online Private Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import List + +import torch +from torch import Tensor +from torch.optim.optimizer import Optimizer + +from mmpretrain.registry import OPTIMIZERS + + +@OPTIMIZERS.register_module() +class Adan(Optimizer): + """Implements a pytorch variant of Adan. + + Adan was proposed in + Adan : Adaptive Nesterov Momentum Algorithm for Faster Optimizing Deep Models. # noqa + https://arxiv.org/abs/2208.06677 + Arguments: + params (iterable): iterable of parameters to optimize + or dicts defining parameter groups. + lr (float, optional): learning rate. (default: 1e-3) + betas (Tuple[float, float, flot], optional): coefficients used + for computing running averages of gradient. + (default: (0.98, 0.92, 0.99)) + eps (float, optional): term added to the denominator to improve + numerical stability. (default: 1e-8) + weight_decay (float, optional): decoupled weight decay + (L2 penalty) (default: 0) + max_grad_norm (float, optional): value used to clip + global grad norm (default: 0.0 no clip) + no_prox (bool): how to perform the decoupled weight decay + (default: False) + foreach (bool): if True would use torch._foreach implementation. + It's faster but uses slightly more memory. + """ + + def __init__(self, + params, + lr=1e-3, + betas=(0.98, 0.92, 0.99), + eps=1e-8, + weight_decay=0.0, + max_grad_norm=0.0, + no_prox=False, + foreach: bool = True): + if not 0.0 <= max_grad_norm: + raise ValueError('Invalid Max grad norm: {}'.format(max_grad_norm)) + if not 0.0 <= lr: + raise ValueError('Invalid learning rate: {}'.format(lr)) + if not 0.0 <= eps: + raise ValueError('Invalid epsilon value: {}'.format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError('Invalid beta parameter at index 0: {}'.format( + betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError('Invalid beta parameter at index 1: {}'.format( + betas[1])) + if not 0.0 <= betas[2] < 1.0: + raise ValueError('Invalid beta parameter at index 2: {}'.format( + betas[2])) + defaults = dict( + lr=lr, + betas=betas, + eps=eps, + weight_decay=weight_decay, + max_grad_norm=max_grad_norm, + no_prox=no_prox, + foreach=foreach) + super().__init__(params, defaults) + + def __setstate__(self, state): + super(Adan, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('no_prox', False) + + @torch.no_grad() + def restart_opt(self): + for group in self.param_groups: + group['step'] = 0 + for p in group['params']: + if p.requires_grad: + state = self.state[p] + # State initialization + + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p) + # Exponential moving average of gradient difference + state['exp_avg_diff'] = torch.zeros_like(p) + + @torch.no_grad() + def step(self): + """Performs a single optimization step.""" + if self.defaults['max_grad_norm'] > 0: + device = self.param_groups[0]['params'][0].device + global_grad_norm = torch.zeros(1, device=device) + + max_grad_norm = torch.tensor( + self.defaults['max_grad_norm'], device=device) + for group in self.param_groups: + + for p in group['params']: + if p.grad is not None: + grad = p.grad + global_grad_norm.add_(grad.pow(2).sum()) + + global_grad_norm = torch.sqrt(global_grad_norm) + group['eps'] + + clip_global_grad_norm = \ + torch.clamp(max_grad_norm / global_grad_norm, max=1.0) + else: + clip_global_grad_norm = 1.0 + + for group in self.param_groups: + params_with_grad = [] + grads = [] + exp_avgs = [] + exp_avg_sqs = [] + exp_avg_diffs = [] + pre_grads = [] + + beta1, beta2, beta3 = group['betas'] + # assume same step across group now to simplify things + # per parameter step can be easily support + # by making it tensor, or pass list into kernel + if 'step' in group: + group['step'] += 1 + else: + group['step'] = 1 + + bias_correction1 = 1.0 - beta1**group['step'] + bias_correction2 = 1.0 - beta2**group['step'] + bias_correction3 = 1.0 - beta3**group['step'] + + for p in group['params']: + if p.grad is None: + continue + params_with_grad.append(p) + grads.append(p.grad) + + state = self.state[p] + if len(state) == 0: + state['exp_avg'] = torch.zeros_like(p) + state['exp_avg_sq'] = torch.zeros_like(p) + state['exp_avg_diff'] = torch.zeros_like(p) + + if 'pre_grad' not in state or group['step'] == 1: + # at first step grad wouldn't be clipped + # by `clip_global_grad_norm` + # this is only to simplify implementation + state['pre_grad'] = p.grad + + exp_avgs.append(state['exp_avg']) + exp_avg_sqs.append(state['exp_avg_sq']) + exp_avg_diffs.append(state['exp_avg_diff']) + pre_grads.append(state['pre_grad']) + + kwargs = dict( + params=params_with_grad, + grads=grads, + exp_avgs=exp_avgs, + exp_avg_sqs=exp_avg_sqs, + exp_avg_diffs=exp_avg_diffs, + pre_grads=pre_grads, + beta1=beta1, + beta2=beta2, + beta3=beta3, + bias_correction1=bias_correction1, + bias_correction2=bias_correction2, + bias_correction3_sqrt=math.sqrt(bias_correction3), + lr=group['lr'], + weight_decay=group['weight_decay'], + eps=group['eps'], + no_prox=group['no_prox'], + clip_global_grad_norm=clip_global_grad_norm, + ) + if group['foreach']: + copy_grads = _multi_tensor_adan(**kwargs) + else: + copy_grads = _single_tensor_adan(**kwargs) + + for p, copy_grad in zip(params_with_grad, copy_grads): + self.state[p]['pre_grad'] = copy_grad + + +def _single_tensor_adan( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + exp_avg_diffs: List[Tensor], + pre_grads: List[Tensor], + *, + beta1: float, + beta2: float, + beta3: float, + bias_correction1: float, + bias_correction2: float, + bias_correction3_sqrt: float, + lr: float, + weight_decay: float, + eps: float, + no_prox: bool, + clip_global_grad_norm: Tensor, +): + copy_grads = [] + for i, param in enumerate(params): + grad = grads[i] + exp_avg = exp_avgs[i] + exp_avg_sq = exp_avg_sqs[i] + exp_avg_diff = exp_avg_diffs[i] + pre_grad = pre_grads[i] + + grad = grad.mul_(clip_global_grad_norm) + copy_grads.append(grad.clone()) + + diff = grad - pre_grad + update = grad + beta2 * diff + + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) # m_t + exp_avg_diff.mul_(beta2).add_(diff, alpha=1 - beta2) # diff_t + exp_avg_sq.mul_(beta3).addcmul_(update, update, value=1 - beta3) # n_t + + denom = (exp_avg_sq.sqrt() / bias_correction3_sqrt).add_(eps) + update = exp_avg / bias_correction1 + update.add_(beta2 * exp_avg_diff / bias_correction2).div_(denom) + + if no_prox: + param.mul_(1 - lr * weight_decay) + param.add_(update, alpha=-lr) + else: + param.add_(update, alpha=-lr) + param.div_(1 + lr * weight_decay) + return copy_grads + + +def _multi_tensor_adan( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + exp_avg_diffs: List[Tensor], + pre_grads: List[Tensor], + *, + beta1: float, + beta2: float, + beta3: float, + bias_correction1: float, + bias_correction2: float, + bias_correction3_sqrt: float, + lr: float, + weight_decay: float, + eps: float, + no_prox: bool, + clip_global_grad_norm: Tensor, +): + if clip_global_grad_norm < 1.0: + torch._foreach_mul_(grads, clip_global_grad_norm.item()) + copy_grads = [g.clone() for g in grads] + + diff = torch._foreach_sub(grads, pre_grads) + # NOTE: line below while looking identical gives different result, + # due to float precision errors. + # using mul+add produces identical results to single-tensor, + # using add+alpha doesn't + # update = torch._foreach_add(grads, torch._foreach_mul(diff, beta2)) + update = torch._foreach_add(grads, diff, alpha=beta2) + + torch._foreach_mul_(exp_avgs, beta1) + torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) # m_t + + torch._foreach_mul_(exp_avg_diffs, beta2) + torch._foreach_add_(exp_avg_diffs, diff, alpha=1 - beta2) # diff_t + + torch._foreach_mul_(exp_avg_sqs, beta3) + torch._foreach_addcmul_( + exp_avg_sqs, update, update, value=1 - beta3) # n_t + + denom = torch._foreach_sqrt(exp_avg_sqs) + torch._foreach_div_(denom, bias_correction3_sqrt) + torch._foreach_add_(denom, eps) + + update = torch._foreach_div(exp_avgs, bias_correction1) + # NOTE: same issue as above. + # beta2 * diff / bias_correction2 != diff * (beta2 / bias_correction2) # noqa + # using faster version by default. uncomment for tests to pass + # torch._foreach_add_(update, torch._foreach_div(torch._foreach_mul(exp_avg_diffs, beta2), bias_correction2)) # noqa + torch._foreach_add_( + update, torch._foreach_mul(exp_avg_diffs, beta2 / bias_correction2)) + torch._foreach_div_(update, denom) + + if no_prox: + torch._foreach_mul_(params, 1 - lr * weight_decay) + else: + torch._foreach_add_(params, update, alpha=-lr) + torch._foreach_div_(params, 1 + lr * weight_decay) + return copy_grads diff --git a/mmpretrain/engine/optimizers/lamb.py b/mmpretrain/engine/optimizers/lamb.py new file mode 100644 index 0000000..0b44a1c --- /dev/null +++ b/mmpretrain/engine/optimizers/lamb.py @@ -0,0 +1,228 @@ +"""PyTorch Lamb optimizer w/ behaviour similar to NVIDIA FusedLamb. + +This optimizer code was adapted from the following (starting with latest) +* https://github.com/HabanaAI/Model-References/blob/ +2b435114fe8e31f159b1d3063b8280ae37af7423/PyTorch/nlp/bert/pretraining/lamb.py +* https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/ +LanguageModeling/Transformer-XL/pytorch/lamb.py +* https://github.com/cybertronai/pytorch-lamb + +Use FusedLamb if you can (GPU). The reason for including this variant of Lamb +is to have a version that is +similar in behaviour to APEX FusedLamb if you aren't using NVIDIA GPUs or +cannot install/use APEX. + +In addition to some cleanup, this Lamb impl has been modified to support +PyTorch XLA and has been tested on TPU. + +Original copyrights for above sources are below. + +Modifications Copyright 2021 Ross Wightman +""" +# Copyright (c) 2021, Habana Labs Ltd. All rights reserved. + +# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# MIT License +# +# Copyright (c) 2019 cybertronai +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +import math + +import torch +from torch.optim import Optimizer + +from mmpretrain.registry import OPTIMIZERS + + +@OPTIMIZERS.register_module() +class Lamb(Optimizer): + """A pure pytorch variant of FuseLAMB (NvLamb variant) optimizer. + + This class is copied from `timm`_. The LAMB was proposed in `Large Batch + Optimization for Deep Learning - Training BERT in 76 minutes`_. + + .. _timm: + https://github.com/rwightman/pytorch-image-models/blob/master/timm/optim/lamb.py + .. _Large Batch Optimization for Deep Learning - Training BERT in 76 minutes: + https://arxiv.org/abs/1904.00962 + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups. + lr (float, optional): learning rate. (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its norm. (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability. (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + grad_averaging (bool, optional): whether apply (1-beta2) to grad when + calculating running averages of gradient. (default: True) + max_grad_norm (float, optional): value used to clip global grad norm + (default: 1.0) + trust_clip (bool): enable LAMBC trust ratio clipping (default: False) + always_adapt (boolean, optional): Apply adaptive learning rate to 0.0 + weight decay parameter (default: False) + """ # noqa: E501 + + def __init__(self, + params, + lr=1e-3, + bias_correction=True, + betas=(0.9, 0.999), + eps=1e-6, + weight_decay=0.01, + grad_averaging=True, + max_grad_norm=1.0, + trust_clip=False, + always_adapt=False): + defaults = dict( + lr=lr, + bias_correction=bias_correction, + betas=betas, + eps=eps, + weight_decay=weight_decay, + grad_averaging=grad_averaging, + max_grad_norm=max_grad_norm, + trust_clip=trust_clip, + always_adapt=always_adapt) + super().__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + device = self.param_groups[0]['params'][0].device + one_tensor = torch.tensor( + 1.0, device=device + ) # because torch.where doesn't handle scalars correctly + global_grad_norm = torch.zeros(1, device=device) + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.is_sparse: + raise RuntimeError( + 'Lamb does not support sparse gradients, consider ' + 'SparseAdam instead.') + global_grad_norm.add_(grad.pow(2).sum()) + + global_grad_norm = torch.sqrt(global_grad_norm) + # FIXME it'd be nice to remove explicit tensor conversion of scalars + # when torch.where promotes + # scalar types properly https://github.com/pytorch/pytorch/issues/9190 + max_grad_norm = torch.tensor( + self.defaults['max_grad_norm'], device=device) + clip_global_grad_norm = torch.where(global_grad_norm > max_grad_norm, + global_grad_norm / max_grad_norm, + one_tensor) + + for group in self.param_groups: + bias_correction = 1 if group['bias_correction'] else 0 + beta1, beta2 = group['betas'] + grad_averaging = 1 if group['grad_averaging'] else 0 + beta3 = 1 - beta1 if grad_averaging else 1.0 + + # assume same step across group now to simplify things + # per parameter step can be easily support by making it tensor, or + # pass list into kernel + if 'step' in group: + group['step'] += 1 + else: + group['step'] = 1 + + if bias_correction: + bias_correction1 = 1 - beta1**group['step'] + bias_correction2 = 1 - beta2**group['step'] + else: + bias_correction1, bias_correction2 = 1.0, 1.0 + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.div_(clip_global_grad_norm) + state = self.state[p] + + # State initialization + if len(state) == 0: + # Exponential moving average of gradient valuesa + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=beta3) # m_t + exp_avg_sq.mul_(beta2).addcmul_( + grad, grad, value=1 - beta2) # v_t + + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_( + group['eps']) + update = (exp_avg / bias_correction1).div_(denom) + + weight_decay = group['weight_decay'] + if weight_decay != 0: + update.add_(p, alpha=weight_decay) + + if weight_decay != 0 or group['always_adapt']: + # Layer-wise LR adaptation. By default, skip adaptation on + # parameters that are + # excluded from weight decay, unless always_adapt == True, + # then always enabled. + w_norm = p.norm(2.0) + g_norm = update.norm(2.0) + # FIXME nested where required since logical and/or not + # working in PT XLA + trust_ratio = torch.where( + w_norm > 0, + torch.where(g_norm > 0, w_norm / g_norm, one_tensor), + one_tensor, + ) + if group['trust_clip']: + # LAMBC trust clipping, upper bound fixed at one + trust_ratio = torch.minimum(trust_ratio, one_tensor) + update.mul_(trust_ratio) + + p.add_(update, alpha=-group['lr']) + + return loss diff --git a/mmpretrain/engine/optimizers/lars.py b/mmpretrain/engine/optimizers/lars.py new file mode 100644 index 0000000..5e38887 --- /dev/null +++ b/mmpretrain/engine/optimizers/lars.py @@ -0,0 +1,130 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Iterable + +import torch +from torch.optim.optimizer import Optimizer + +from mmpretrain.registry import OPTIMIZERS + + +@OPTIMIZERS.register_module() +class LARS(Optimizer): + """Implements layer-wise adaptive rate scaling for SGD. + + Based on Algorithm 1 of the following paper by You, Gitman, and Ginsburg. + `Large Batch Training of Convolutional Networks: + `_. + + Args: + params (Iterable): Iterable of parameters to optimize or dicts defining + parameter groups. + lr (float): Base learning rate. + momentum (float): Momentum factor. Defaults to 0. + weight_decay (float): Weight decay (L2 penalty). Defaults to 0. + dampening (float): Dampening for momentum. Defaults to 0. + eta (float): LARS coefficient. Defaults to 0.001. + nesterov (bool): Enables Nesterov momentum. Defaults to False. + eps (float): A small number to avoid dviding zero. Defaults to 1e-8. + + Example: + >>> optimizer = LARS(model.parameters(), lr=0.1, momentum=0.9, + >>> weight_decay=1e-4, eta=1e-3) + >>> optimizer.zero_grad() + >>> loss_fn(model(input), target).backward() + >>> optimizer.step() + """ + + def __init__(self, + params: Iterable, + lr: float, + momentum: float = 0, + weight_decay: float = 0, + dampening: float = 0, + eta: float = 0.001, + nesterov: bool = False, + eps: float = 1e-8) -> None: + if not isinstance(lr, float) and lr < 0.0: + raise ValueError(f'Invalid learning rate: {lr}') + if momentum < 0.0: + raise ValueError(f'Invalid momentum value: {momentum}') + if weight_decay < 0.0: + raise ValueError(f'Invalid weight_decay value: {weight_decay}') + if eta < 0.0: + raise ValueError(f'Invalid LARS coefficient value: {eta}') + + defaults = dict( + lr=lr, + momentum=momentum, + dampening=dampening, + weight_decay=weight_decay, + nesterov=nesterov, + eta=eta) + if nesterov and (momentum <= 0 or dampening != 0): + raise ValueError( + 'Nesterov momentum requires a momentum and zero dampening') + + self.eps = eps + super().__init__(params, defaults) + + def __setstate__(self, state) -> None: + super().__setstate__(state) + for group in self.param_groups: + group.setdefault('nesterov', False) + + @torch.no_grad() + def step(self, closure=None) -> torch.Tensor: + """Performs a single optimization step. + + Args: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + weight_decay = group['weight_decay'] + momentum = group['momentum'] + dampening = group['dampening'] + eta = group['eta'] + nesterov = group['nesterov'] + lr = group['lr'] + lars_exclude = group.get('lars_exclude', False) + + for p in group['params']: + if p.grad is None: + continue + + d_p = p.grad + + if lars_exclude: + local_lr = 1. + else: + weight_norm = torch.norm(p).item() + grad_norm = torch.norm(d_p).item() + if weight_norm != 0 and grad_norm != 0: + # Compute local learning rate for this layer + local_lr = eta * weight_norm / \ + (grad_norm + weight_decay * weight_norm + self.eps) + else: + local_lr = 1. + + actual_lr = local_lr * lr + d_p = d_p.add(p, alpha=weight_decay).mul(actual_lr) + if momentum != 0: + param_state = self.state[p] + if 'momentum_buffer' not in param_state: + buf = param_state['momentum_buffer'] = \ + torch.clone(d_p).detach() + else: + buf = param_state['momentum_buffer'] + buf.mul_(momentum).add_(d_p, alpha=1 - dampening) + if nesterov: + d_p = d_p.add(buf, alpha=momentum) + else: + d_p = buf + p.add_(-d_p) + + return loss diff --git a/mmpretrain/engine/optimizers/layer_decay_optim_wrapper_constructor.py b/mmpretrain/engine/optimizers/layer_decay_optim_wrapper_constructor.py new file mode 100644 index 0000000..09c6abc --- /dev/null +++ b/mmpretrain/engine/optimizers/layer_decay_optim_wrapper_constructor.py @@ -0,0 +1,166 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections import defaultdict +from typing import Callable, List, Optional + +from mmengine.logging import MMLogger +from mmengine.optim import DefaultOptimWrapperConstructor +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm, _InstanceNorm +from torch import nn +from torch.nn import GroupNorm, LayerNorm + +from mmpretrain.registry import OPTIM_WRAPPER_CONSTRUCTORS + + +@OPTIM_WRAPPER_CONSTRUCTORS.register_module() +class LearningRateDecayOptimWrapperConstructor(DefaultOptimWrapperConstructor): + """Different learning rates are set for different layers of backbone. + + By default, each parameter share the same optimizer settings, and we + provide an argument ``paramwise_cfg`` to specify parameter-wise settings. + It is a dict and may contain the following fields: + + - ``layer_decay_rate`` (float): The learning rate of a parameter will + multiply it by multiple times according to the layer depth of the + parameter. Usually, it's less than 1, so that the earlier layers will + have a lower learning rate. Defaults to 1. + - ``bias_decay_mult`` (float): It will be multiplied to the weight + decay for all bias parameters (except for those in normalization layers). + - ``norm_decay_mult`` (float): It will be multiplied to the weight + decay for all weight and bias parameters of normalization layers. + - ``flat_decay_mult`` (float): It will be multiplied to the weight + decay for all one-dimensional parameters + - ``custom_keys`` (dict): Specified parameters-wise settings by keys. If + one of the keys in ``custom_keys`` is a substring of the name of one + parameter, then the setting of the parameter will be specified by + ``custom_keys[key]`` and other setting like ``bias_decay_mult`` will be + ignored. It should be a dict and may contain fields ``decay_mult``. + (The ``lr_mult`` is disabled in this constructor). + + Example: + + In the config file, you can use this constructor as below: + + .. code:: python + + optim_wrapper = dict( + optimizer=dict( + type='AdamW', + lr=4e-3, + weight_decay=0.05, + eps=1e-8, + betas=(0.9, 0.999)), + constructor='LearningRateDecayOptimWrapperConstructor', + paramwise_cfg=dict( + layer_decay_rate=0.75, # layer-wise lr decay factor + norm_decay_mult=0., + flat_decay_mult=0., + custom_keys={ + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0) + })) + """ + + def add_params(self, + params: List[dict], + module: nn.Module, + prefix: str = '', + get_layer_depth: Optional[Callable] = None, + **kwargs) -> None: + """Add all parameters of module to the params list. + + The parameters of the given module will be added to the list of param + groups, with specific rules defined by paramwise_cfg. + + Args: + params (List[dict]): A list of param groups, it will be modified + in place. + module (nn.Module): The module to be added. + optimizer_cfg (dict): The configuration of optimizer. + prefix (str): The prefix of the module. + """ + # get param-wise options + custom_keys = self.paramwise_cfg.get('custom_keys', {}) + # first sort with alphabet order and then sort with reversed len of str + sorted_keys = sorted(sorted(custom_keys.keys()), key=len, reverse=True) + logger = MMLogger.get_current_instance() + + # The model should have `get_layer_depth` method + if get_layer_depth is None and not hasattr(module, 'get_layer_depth'): + raise NotImplementedError('The layer-wise learning rate decay need' + f' the model {type(module)} has' + ' `get_layer_depth` method.') + else: + get_layer_depth = get_layer_depth or module.get_layer_depth + + bias_decay_mult = self.paramwise_cfg.get('bias_decay_mult', None) + norm_decay_mult = self.paramwise_cfg.get('norm_decay_mult', None) + flat_decay_mult = self.paramwise_cfg.get('flat_decay_mult', None) + decay_rate = self.paramwise_cfg.get('layer_decay_rate', 1.0) + + # special rules for norm layers and depth-wise conv layers + is_norm = isinstance(module, + (_BatchNorm, _InstanceNorm, GroupNorm, LayerNorm)) + + for name, param in module.named_parameters(recurse=False): + param_group = {'params': [param]} + param_name = prefix + name + if not param.requires_grad: + continue + + if self.base_wd is not None: + base_wd = self.base_wd + custom_key = next( + filter(lambda k: k in param_name, sorted_keys), None) + # custom parameters decay + if custom_key is not None: + custom_cfg = custom_keys[custom_key].copy() + decay_mult = custom_cfg.pop('decay_mult', 1.) + + param_group['weight_decay'] = base_wd * decay_mult + # add custom settings to param_group + param_group.update(custom_cfg) + # norm decay + elif is_norm and norm_decay_mult is not None: + param_group['weight_decay'] = base_wd * norm_decay_mult + # bias decay + elif name == 'bias' and bias_decay_mult is not None: + param_group['weight_decay'] = base_wd * bias_decay_mult + # flatten parameters decay + elif param.ndim == 1 and flat_decay_mult is not None: + param_group['weight_decay'] = base_wd * flat_decay_mult + else: + param_group['weight_decay'] = base_wd + + layer_id, max_id = get_layer_depth(param_name) + scale = decay_rate**(max_id - layer_id - 1) + param_group['lr'] = self.base_lr * scale + param_group['lr_scale'] = scale + param_group['layer_id'] = layer_id + param_group['param_name'] = param_name + + params.append(param_group) + + for child_name, child_mod in module.named_children(): + child_prefix = f'{prefix}{child_name}.' + self.add_params( + params, + child_mod, + prefix=child_prefix, + get_layer_depth=get_layer_depth, + ) + + if prefix == '': + layer_params = defaultdict(list) + for param in params: + layer_params[param['layer_id']].append(param) + for layer_id, layer_params in layer_params.items(): + lr_scale = layer_params[0]['lr_scale'] + lr = layer_params[0]['lr'] + msg = [ + f'layer {layer_id} params ' + f'(lr={lr:.3g}, lr_scale={lr_scale:.3g}):' + ] + for param in layer_params: + msg.append(f'\t{param["param_name"]}: ' + f'weight_decay={param["weight_decay"]:.3g}') + logger.debug('\n'.join(msg)) diff --git a/mmpretrain/engine/runners/__init__.py b/mmpretrain/engine/runners/__init__.py new file mode 100644 index 0000000..23206e1 --- /dev/null +++ b/mmpretrain/engine/runners/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .retrieval_loop import RetrievalTestLoop, RetrievalValLoop + +__all__ = ['RetrievalTestLoop', 'RetrievalValLoop'] diff --git a/mmpretrain/engine/runners/__pycache__/__init__.cpython-310.pyc b/mmpretrain/engine/runners/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e4fa99f1059786ec9367cd285c34678e57c54da GIT binary patch literal 259 zcmd1j<>g`kf*F60r7Z>0k3k${zy#zt0CBMlkVs)jVa#F3WsG9XWr|{AWJqC3VNPd? zVoqTRX3%7P$p}=U$#hFFD7B<0Gqo%+CnUAF#3w($;FbVfC@e7tBvJ%2w}=@?_-V34 z@fE>z#OI($-eQlBPt3`Qk6+19#0t^@B7T|a7v<-d=;!7Zfb=D1<{>jv^U^c(QuT{U p^YT)QiuL2;GxIV_;^XxSDsOSvY~=b!NY~T&^femSww9i*1t@Qd>}olLoCJ7h4PYpSohHb(#$v@8Qe^gn zo?XU?KmrA1pz=Woid+g51tb(X=#LcWUzlr8y`>P^Ly^{bZ+5jLZPzw>?h^C(=FPm1 znfEp6!h%EK`~AQF-2MC#A%Dfm>}SH{8jNfg03(b>#K(UvqOs;{6mdP$W5YLM(>G(w zw-n8YYO(FxlyLi~eoTH&n8~b%gjw9&*Y~OKRP-9Nfo^j{(Ho53B^&j>fi|+eL1n9- z1#!xF6g&{&aLC1mF75YtmfuR#p{%_hMhdaRQJ!|fEC&|z*1PZCzS}_wv!4!=YcNi~ z1%MNuGU98D`a09N(bbsFjE9DAG86P$vUP{&qR&4Lqf$jUpoE|x+n z`(?(NI(8?Ghoiio^t>2P-N~k+y2Cf#_SSdOGy;YXc6q)F=I5!$lW-^Eo>~SKz}xn2 z@NNhfm_2Fg%`nP%AyQg!uOpFk;MS|J2^fYey7JW12pN+BIiidnQ+kGLXZS!n(vJxx zJ@N>~gysh9Huvcbaxeb1Hqj<}ZXMN*$!GTp-T`I$SQ|qa9O{(hHteo5^N@b_7R(&f zG|={`xL#m`#=xBziCNA79~;1H?StHswg7Vo?n7;29+0Ev*i@F$qeYmj-gDb+3L7_PTxG{DO|%FX&_e z_z3vz+<1Yl92QzI9?q+qUVubi-D}(mM@eT_C8u0?7e!RZ3X_q=!P1BF0sXDSW zuzkonhil;?%z2R_MduP2xw4j|K~IEC;4?xR*aQSdg0w)wDB^NSB@0B@P^2B6K^{pr z0EGdh>4Q)(X<^e(;2*9X{~NFu{TqNWvWDCl&!ydudqLLU&ef}op-U%;Ap2Y}xK#|E=<{1$j* zp3=ZQj zD%&TPw1XfCV;%(33Gi(OegxbNg8QQ|DpzVjz|szo4Rn^cQ=RRJn8f77j(%zZu!*59 zHw4l;Fy=mt_iHdj4FJ5)mMZJc9)n>7Df|2fS`P2V|G&_l_d>%Pdf6wMsDmr!iqh=+ zOrTq#|GH)WRd+8NWsAj`T$-7bE%>Y{0FN}k5&H*xA!G|WfwDu73byar(Jt*2t}CAyjTF7mG2(gL6)dBxodpN=!*dAUW>CG2P`qP;t*(#ce@{;N_ z;zzJ1oheyhpBCGd%&PvWy0HuN7uP>+rvClYYRuHXzeD|7{U7Sz`DZ#{Tc{Q~4HJba z|M_6(uka8bBEZ>(TeQJu92XC!wrZS33tX&-MS~USFo6$=qFbJuzP#Fk_uw@oJ{4Fd sk literal 0 HcmV?d00001 diff --git a/mmpretrain/engine/runners/retrieval_loop.py b/mmpretrain/engine/runners/retrieval_loop.py new file mode 100644 index 0000000..d15387e --- /dev/null +++ b/mmpretrain/engine/runners/retrieval_loop.py @@ -0,0 +1,168 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +import torch +from mmengine.model import is_model_wrapper +from mmengine.runner import TestLoop, ValLoop, autocast + +from mmpretrain.registry import LOOPS + + +@LOOPS.register_module() +class RetrievalValLoop(ValLoop): + """Loop for multimodal retrieval val. + + Args: + runner (Runner): A reference of runner. + dataloader (Dataloader or dict): A dataloader object or a dict to + build a dataloader. + evaluator (Evaluator or dict or list): Used for computing metrics. + fp16 (bool): Whether to enable fp16 valing. Defaults to + False. + """ + + def run(self) -> dict: + """Launch val.""" + self.runner.call_hook('before_val') + self.runner.call_hook('before_val_epoch') + self.runner.model.eval() + + feats_local = [] + data_samples_local = [] + + for idx, data_batch in enumerate(self.dataloader): + with torch.no_grad(): + self.runner.call_hook( + 'before_val_iter', batch_idx=idx, data_batch=data_batch) + # predictions should be sequence of BaseDataElement + with autocast(enabled=self.fp16): + if is_model_wrapper(self.runner.model): + data_preprocessor = self.runner.model.module.data_preprocessor # noqa: E501 + else: + data_preprocessor = self.runner.model.data_preprocessor + + # get features for retrieval instead of data samples + data_batch = data_preprocessor(data_batch, False) + feats = self.runner.model._run_forward( + data_batch, mode='tensor') + feats_local.append(feats) + data_samples_local.extend(data_batch['data_samples']) + self.runner.call_hook( + 'after_val_iter', + batch_idx=idx, + data_batch=data_batch, + outputs=feats) + + # concatenate different features + feats_local = { + k: torch.cat([dic[k] for dic in feats_local]) + for k in feats_local[0] + } + + # get predictions + if is_model_wrapper(self.runner.model): + predict_all_fn = self.runner.model.module.predict_all + else: + predict_all_fn = self.runner.model.predict_all + + img_size = self.dataloader.dataset.img_size + text_size = self.dataloader.dataset.text_size + with torch.no_grad(): + i2t_data_samples, t2i_data_samples = predict_all_fn( + feats_local, + data_samples_local, + num_images=img_size, + num_texts=text_size, + ) + + # process in evaluator and compute metrics + self.evaluator.process(i2t_data_samples, None) + i2t_metrics = self.evaluator.evaluate(img_size) + i2t_metrics = {f'i2t/{k}': v for k, v in i2t_metrics.items()} + self.evaluator.process(t2i_data_samples, None) + t2i_metrics = self.evaluator.evaluate(text_size) + t2i_metrics = {f't2i/{k}': v for k, v in t2i_metrics.items()} + metrics = {**i2t_metrics, **t2i_metrics} + + self.runner.call_hook('after_val_epoch', metrics=metrics) + self.runner.call_hook('after_val') + return metrics + + +@LOOPS.register_module() +class RetrievalTestLoop(TestLoop): + """Loop for multimodal retrieval test. + + Args: + runner (Runner): A reference of runner. + dataloader (Dataloader or dict): A dataloader object or a dict to + build a dataloader. + evaluator (Evaluator or dict or list): Used for computing metrics. + fp16 (bool): Whether to enable fp16 testing. Defaults to + False. + """ + + def run(self) -> dict: + """Launch test.""" + self.runner.call_hook('before_test') + self.runner.call_hook('before_test_epoch') + self.runner.model.eval() + + feats_local = [] + data_samples_local = [] + + for idx, data_batch in enumerate(self.dataloader): + with torch.no_grad(): + self.runner.call_hook( + 'before_test_iter', batch_idx=idx, data_batch=data_batch) + # predictions should be sequence of BaseDataElement + with autocast(enabled=self.fp16): + if is_model_wrapper(self.runner.model): + data_preprocessor = self.runner.model.module.data_preprocessor # noqa: E501 + else: + data_preprocessor = self.runner.model.data_preprocessor + # get features for retrieval instead of data samples + data_batch = data_preprocessor(data_batch, False) + feats = self.runner.model._run_forward( + data_batch, mode='tensor') + feats_local.append(feats) + data_samples_local.extend(data_batch['data_samples']) + self.runner.call_hook( + 'after_test_iter', + batch_idx=idx, + data_batch=data_batch, + outputs=feats) + + # concatenate different features + feats_local = { + k: torch.cat([dic[k] for dic in feats_local]) + for k in feats_local[0] + } + + # get predictions + if is_model_wrapper(self.runner.model): + predict_all_fn = self.runner.model.module.predict_all + else: + predict_all_fn = self.runner.model.predict_all + + img_size = self.dataloader.dataset.img_size + text_size = self.dataloader.dataset.text_size + with torch.no_grad(): + i2t_data_samples, t2i_data_samples = predict_all_fn( + feats_local, + data_samples_local, + num_images=img_size, + num_texts=text_size, + ) + + # process in evaluator and compute metrics + self.evaluator.process(i2t_data_samples, None) + i2t_metrics = self.evaluator.evaluate(img_size) + i2t_metrics = {f'i2t/{k}': v for k, v in i2t_metrics.items()} + self.evaluator.process(t2i_data_samples, None) + t2i_metrics = self.evaluator.evaluate(text_size) + t2i_metrics = {f't2i/{k}': v for k, v in t2i_metrics.items()} + metrics = {**i2t_metrics, **t2i_metrics} + + self.runner.call_hook('after_test_epoch', metrics=metrics) + self.runner.call_hook('after_test') + return metrics diff --git a/mmpretrain/engine/schedulers/__init__.py b/mmpretrain/engine/schedulers/__init__.py new file mode 100644 index 0000000..68b6a54 --- /dev/null +++ b/mmpretrain/engine/schedulers/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .weight_decay_scheduler import CosineAnnealingWeightDecay + +__all__ = ['CosineAnnealingWeightDecay'] diff --git a/mmpretrain/engine/schedulers/__pycache__/__init__.cpython-310.pyc b/mmpretrain/engine/schedulers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2663f62ac8445c0ee6bf7194a3dc99639aa7fe49 GIT binary patch literal 245 zcmd1j<>g`kf*F60rOgD=k3k${zy#zt0CBMxkVs)jVa#F3WsG8EWJqC3XN+P>VGd@{ zWO>O5l+AhGnw(f!1Tx4^lQ~MP94r{00uqfc zPR>Y8Da}bO0?XfGkB?8x$%&6&$xy@sR1YS8S?U+%=a=Z`<`xvCmJ}ss<{>jv^U{Gf m>ch<`){l?R%*!l^kJl@xyv1RYn-4PH4&=aMkdZu$TucDm=RxTJ literal 0 HcmV?d00001 diff --git a/mmpretrain/engine/schedulers/__pycache__/weight_decay_scheduler.cpython-310.pyc b/mmpretrain/engine/schedulers/__pycache__/weight_decay_scheduler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53dfd41c64ff95a791330fe656ba3b42e22727fa GIT binary patch literal 2360 zcmb7FOK%iM5bo}I?08rM0S9axP%BZCtccbea!Ei43nLN26T=WTiKmGG- z_xA-t{=m)r=fdU&{N)`OViMDo1UR#lW-MS7*H&s}c3@L-o5XhP?337;u)vY_Q}-Ft z+_(X>hIAyG*b^EwV%i}s?*&LCn=Ps@-Ym)_m+N^h!!*fz_d*qBooHLegH$RwaQfc* z*7{x1x%tJdjj!+A+Ui7jb^f!ao07vql7Pk}U@;A>C!|Yb7F+vXU_T)?2^>&u>y>Y0 z(%Y^!WEAclX}X&{PV(X9b)MmZM`>7=yjv)q%24G1jH|GcygWo!wj(wN!iF{er{Fg@ zDl#EEbVOs?V_Jm0Cq!I$K+NDgp zdZFl7NtO(OX@{y;YWLwT&d;e{5W-*;;yL;A_L?e+YAwt9N>(aN^7*CAd*JA`nFVX} z>ctVg-QQCUFr$g4ppBdk-+!yi+DBkr1fGYP6hiw#WCcbL)=eQE4Z?KFX$TP)kq}0O zUYw$jWSRo)BxtJ#RIbpO3cU)+NlMH{kJxmH8QL4u;07MN1jBq9#7x>~r|S!<;_KnznBkUv^)Yz8jXjM70HfAo&%J8IXi&y7$TJXKN(8NtzmQ=6qD-WfkUC zy2s;zLM&*2Ndnlk%|Y!#S`1VkQSi{=SG$!EtfF!QIHqc#ZuY$~{hsVVi9clOoS|ZY zdqAT*Y=caokPY8m#@SkiGRJv|oU88yJu+sM4KxRSxHExU_douYjjgdgvW(o%V{7c> z%OgDHu0Vy1=*XH-kmXj5r(V_ASwNYqrVvh^OTZKfPIG{Qg9U9&ItPm$7;(QbhDM3Tt&kttBXFMqhpWDsSvw1>ZEZnk(5#Bc!O*mP2V>|ST(WGOY4gBiF7=pCmneJ5nESE`92;nd zybG33J<1N?`~iFE*)FWv->zf-)pTt-{O~{dauX~&jj-Rr;S3HKZfzTbF5`aI=grFW z`FDYV*ADxwk*D?GhBJyaHHQ7naZ8RyP{=vU&FkvhJ literal 0 HcmV?d00001 diff --git a/mmpretrain/engine/schedulers/weight_decay_scheduler.py b/mmpretrain/engine/schedulers/weight_decay_scheduler.py new file mode 100644 index 0000000..7e725a4 --- /dev/null +++ b/mmpretrain/engine/schedulers/weight_decay_scheduler.py @@ -0,0 +1,64 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +from mmengine.optim.scheduler import CosineAnnealingParamScheduler + +from mmpretrain.registry import PARAM_SCHEDULERS + + +class WeightDecaySchedulerMixin: + """A mixin class for learning rate schedulers.""" + + def __init__(self, optimizer, *args, **kwargs): + super().__init__(optimizer, 'weight_decay', *args, **kwargs) + + +@PARAM_SCHEDULERS.register_module() +class CosineAnnealingWeightDecay(WeightDecaySchedulerMixin, + CosineAnnealingParamScheduler): + """Set the weight decay value of each parameter group using a cosine + annealing schedule. + + If the weight decay was set to be 0 initially, the weight decay value will + be 0 constantly during the training. + """ + + def _get_value(self) -> list: + """Compute value using chainable form of the scheduler.""" + + def _get_eta_min(base_value): + if self.eta_min_ratio is None: + return self.eta_min + return base_value * self.eta_min_ratio + + if self.last_step == 0: + return [ + group[self.param_name] for group in self.optimizer.param_groups + ] + elif (self.last_step - 1 - self.T_max) % (2 * self.T_max) == 0: + weight_decay_value_list = [] + for base_value, group in zip(self.base_values, + self.optimizer.param_groups): + if base_value == 0: + group_value = 0 + else: + group_value = group[self.param_name] + ( + base_value - _get_eta_min(base_value)) * ( + 1 - math.cos(math.pi / self.T_max)) / 2 + weight_decay_value_list.append(group_value) + return weight_decay_value_list + + weight_decay_value_list = [] + for base_value, group in zip(self.base_values, + self.optimizer.param_groups): + if base_value == 0: + group_value = 0 + else: + group_value = ( + 1 + math.cos(math.pi * self.last_step / self.T_max)) / ( + 1 + math.cos(math.pi * + (self.last_step - 1) / self.T_max) + ) * (group[self.param_name] - + _get_eta_min(base_value)) + _get_eta_min(base_value) + weight_decay_value_list.append(group_value) + return weight_decay_value_list diff --git a/mmpretrain/evaluation/__init__.py b/mmpretrain/evaluation/__init__.py new file mode 100644 index 0000000..f70dc22 --- /dev/null +++ b/mmpretrain/evaluation/__init__.py @@ -0,0 +1,3 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .functional import * # noqa: F401,F403 +from .metrics import * # noqa: F401,F403 diff --git a/mmpretrain/evaluation/__pycache__/__init__.cpython-310.pyc b/mmpretrain/evaluation/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0de060074feb6ad054f711fe1f8734a8103f5618 GIT binary patch literal 189 zcmd1j<>g`kf*F60rL_a;#~=klg`kf*F60r5OO}#~=J~J<~BtBlRpz;=nO>TZlX-=vg M$k<{gAi=@_0Gqod@&Et; literal 0 HcmV?d00001 diff --git a/mmpretrain/evaluation/metrics/ANLS.py b/mmpretrain/evaluation/metrics/ANLS.py new file mode 100644 index 0000000..14917f1 --- /dev/null +++ b/mmpretrain/evaluation/metrics/ANLS.py @@ -0,0 +1,103 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional + +from mmengine.evaluator import BaseMetric + +from mmpretrain.registry import METRICS + + +@METRICS.register_module() +class ANLS(BaseMetric): + """ANLS metric. + + Compute the Average Normalized Levenshtein Similarity(ANLS). + + Args: + threshold (float): ANLS threshold used for determining if the answer + has been correctly selected but not properly recognized, + or on the contrary, the output is a wrong text selected from the + options and given as an answer. + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be 'cpu' or + 'gpu'. Defaults to 'cpu'. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, self.default_prefix + will be used instead. Should be modified according to the + `retrieval_type` for unambiguous results. Defaults to TR. + """ + default_prefix = 'ANLS' + + def __init__(self, + threshold: float = 0.5, + collect_device: str = 'cpu', + prefix: Optional[str] = None) -> None: + super().__init__(collect_device=collect_device, prefix=prefix) + self.threshold = threshold + + def process(self, data_batch, data_samples) -> None: + """Process one batch of data samples. + + The processed results should be stored in ``self.results``, which will + be used to computed the metrics when all batches have been processed. + + Args: + data_batch: A batch of data from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from the model. + """ + for sample in data_samples: + gt_answer = sample.get('gt_answer') + result = { + 'pred_answer': sample.get('pred_answer'), + 'gt_answer': gt_answer + } + + self.results.append(result) + + def compute_metrics(self, results: List) -> dict: + """Compute the metrics from processed results. + + Args: + results (dict): The processed results of each batch. + + Returns: + Dict: The computed metrics. The keys are the names of the metrics, + and the values are corresponding results. + """ + total_score = 0. + for result in results: + sample_score_list = [] + pred = ' '.join(result['pred_answer'].strip().lower().split()) + for gt in result['gt_answer']: + gt = ' '.join(gt.strip().lower().split()) + dist = levenshtein_distance(gt, pred) + length = max( + len(gt.upper()), len(result['pred_answer'].upper())) + sample_score_list.append(0.0 if length == 0 else float(dist) / + float(length)) + + per_sample_score = 1. - min(sample_score_list) + if per_sample_score < self.threshold: + per_sample_score = 0. + + total_score += per_sample_score + + total_score = total_score / len(results) + return {'ANLS': total_score} + + +def levenshtein_distance(s1, s2): + if len(s1) > len(s2): + s1, s2 = s2, s1 + + distances = range(len(s1) + 1) + for i2, c2 in enumerate(s2): + distances_ = [i2 + 1] + for i1, c1 in enumerate(s1): + if c1 == c2: + distances_.append(distances[i1]) + else: + distances_.append(1 + min((distances[i1], distances[i1 + 1], + distances_[-1]))) + distances = distances_ + return distances[-1] diff --git a/mmpretrain/evaluation/metrics/__init__.py b/mmpretrain/evaluation/metrics/__init__.py new file mode 100644 index 0000000..e572efe --- /dev/null +++ b/mmpretrain/evaluation/metrics/__init__.py @@ -0,0 +1,22 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .ANLS import ANLS +from .caption import COCOCaption +from .gqa import GQAAcc +from .multi_label import AveragePrecision, MultiLabelMetric +from .multi_task import MultiTasksMetric +from .nocaps import NocapsSave +from .retrieval import RetrievalAveragePrecision, RetrievalRecall +from .scienceqa import ScienceQAMetric +from .shape_bias_label import ShapeBiasMetric +from .single_label import Accuracy, ConfusionMatrix, SingleLabelMetric +from .visual_grounding_eval import VisualGroundingMetric +from .voc_multi_label import VOCAveragePrecision, VOCMultiLabelMetric +from .vqa import ReportVQA, VQAAcc + +__all__ = [ + 'Accuracy', 'SingleLabelMetric', 'MultiLabelMetric', 'AveragePrecision', + 'MultiTasksMetric', 'VOCAveragePrecision', 'VOCMultiLabelMetric', + 'ConfusionMatrix', 'RetrievalRecall', 'VQAAcc', 'ReportVQA', 'COCOCaption', + 'VisualGroundingMetric', 'ScienceQAMetric', 'GQAAcc', 'NocapsSave', + 'RetrievalAveragePrecision', 'ShapeBiasMetric', 'ANLS' +] diff --git a/mmpretrain/evaluation/metrics/__pycache__/ANLS.cpython-310.pyc b/mmpretrain/evaluation/metrics/__pycache__/ANLS.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d12152cf6d2f5ae545c64d0d887eb46affd280c GIT binary patch literal 3696 zcmaJ^&5zs06`vVW6s6U!H?|Xdofakf5w8GujTk|S8bwpbE{Xv5rttb=wJDe8j3mYs zDGx_$uR)#GKu$dt=%I&QpqKn3J@?F0fgTFqdTjz{`+Gxu8bt|kG#`&|-h1g@ zn`i7dOuDl1iphp@Pn;9p^Q9-7uUM}o{R7r*y@p2C6_tBGDNN;mJ~m07MQN8;?N6gZ zJy1p`F-FY?zkc-av%3c|$*(@G*`W9XI*RqUWIZ8y&%v+T5-q3Y%5Ci4kebDlkJ8+&NM@8mM4D*P|#+UMd;fS4S!aFC3WG}4JV z-X%TVnT9)hP~5h%1GfbDuO3UWkH;4tzwfN2Zc(}ftDu|6J)sw#yTG>jTH#wgN!V{v8DvO<(b_f z&NHJUeSE`?@`-`>K~e-!@LcCG#Hbf$xj|p&BNAL!W{(@6WHJ~eh))0$Bed4|y@hO? zr(|a+)lm|wV7D+D(YRNSvS_4&MJQ&X6bXcHlA0o*trw~&5Hjial~$Pvbd)_U0y)tn zF9AWFAQFfNJhS243m!}ggRlkH{f;jh-ZfNd(Ld^HH81kMc<| z`=$u;{)(UKC`uui>tbmcMdGvm+z1@p?EH_C6$>Ign2Zpq8+4NWJy|;%)|RgsHg~|D zM3NOoMRG4V80Hfo7>{^GaT_P)G(&5c^%Bi9YoPnyyndiU~5P^1~1SGGVBRp}KD6QUL8>DgHIDT45Nc zQBj29*X)~*Z|XcZH%B8FXA!vCot+{0@n(&S;wJII-uPI9wAl~v7G(9d+vYDf?b6(a zm^_(39y%m<7jN+qno`_jU%WeYO6L^Ze#WP+?TIs9y3+ZJm^Mmye=5qx8RzVokS_dg zL@{UVw>pni0kCB%coLa-XkizbC;&LesRG;lVz>)cDZ zA>l@7KL+u><=vdktPsdqD!raq_TyO$g53l4Y@)JQ{Z1ya`GSaisjnu^nx$$7a;eg# zwyHHSVNHnD25=!~1HkTG9GbyPm#)^{Tyers}NPR(Z9N zRt+TsQKr5~ifwa=zHZI}_5lV3Va;#1xi19wzT0ZJ|9RP|aac!HUx7d@(99pojwW6U zxWA(#`Ban~pnE)Z25jm|amtWd&JH`}XY5qqbRBo8EeX%KrB^mi z#I$J|#w%^nal+B7^`Qhg;aK%c_tY9e;<1R*gMiQ?b1Er zSJ|{xwgCE#r%XSFwJoH6>t}2#p1GxfZcg7!g>050;YbEQdLPNKS^&&4)ZRTyyvRsU2Soovf*R)8N3KEMxv?WC`&NEBwGZ&U19?Y@u4G)&E)d(7!E&clwO~lmgRPL8~ zl7U8X4~(lunxl-tXq+adx|}YHsF1Gf2uW2rqv%EDqzWHRfO@U3LQ&PQMH3d2tft_$ zO$7_j-iFByu9yO5EB!r)_ui@Vpq>g-szzPPwn_}pqrEB*TMW#ww~al*Rq?m;<-#n= z;Vj5M#|8x@kK6XS%dhbce~-7h;8(eO{+{r-f8KH0SZRxn@cGNjHlN;7(seW?k@nHh zoztau8}wH-x=M^!KSp z`AcrcM*oz$o3m>wR%wbmZAlQ@Buy+~kB{!U;)iH_<_hVND=bg^6&=G$WG`Ce+f!F>(zPqZlufyzS*%HZ1?^NA@W}}=%16H! zQ->}vS{Z;rumTCJ{1_|Vv{^Pw7aZ_$**JBO)%Y`DbN>SxOAa=%bJUWC9ac7^*kOJA zb-A*T1P|<#g65I?&i-qH@#}Lu-G=@N`c(sU*}m7#djD9|h<^)A-BekFu|+*UiVW9^3Qsvh)IANpz+c5SRFpQ%0Avvy1Fs{=Sthj3{9wmeeDaI8+?#C2xQ zwDZ-4=hJZF45D8ywlQ!`b#!qy!FO|fi8r4YNtUAPpI(hdaXj!$G+GGFXX3pUagyVy ziOx$|Bxn3q$a7KXB*yQTu@5}|n6G1vi!A2ze8Lw3^Xy;Kw_?HNlZ4^==vu^FO3W8d z;zXpexEig~g_CE2w9pU|S@(6fS=lt*cuS%$B8V<5jP0(rSO=UC_$8y>g*O}U!^Bj(8{g6tB~#9_D2#xe_>^2 zvqzm7jotPM;UbIKCfl_@P+4G6D+>u^YK-A7F!s&Sd+1yE^J=KGtQaaaC-OW=H%+Qi ra?)q0>|*9a#*#EC7<)N?F!k3eo0d|%q1Dfi5LzzX(FXRcj}h-5W3v zme(?ECvGPluVdPcxSRC6o@qDZezNARadtGoidH;G*1dHz(vFA8CGQevAG37LVRGZ} zL(b?YxVMgS=@XXn115W?m(SRTOmsx|B@OW>o1viP4K5| zbm_0qfjt^orSq#u=VklxERV7@h)2Ba{vy!wq0ChjqIWr0LHOK{a;f~8%BD(cjn}P* z51u{Q+I&h)C0`Vgl4$leA8$T>uph*m!3QjNP^7thmRU%L6Obf+?vD#)d9!%B#DgH*V% zMye;7O57;TGdCom)D7~nS-%;?ag6RjSwtHLY3fg+ zSh_cLu10sv8D>GgYXs1NjKpfIbAeNp73NSyzI!f-r0r>FM&fR{aTW+8WT`VwK$GYR zvpAMv?hCmeh3A{!uX<^aNVm|ifzhIxO426lUf)q`;lLV=AmogFq_Pm~>t?BRcY-|JHAoEdzy)4sv0M-f!LM=xGatZM_^Uq0 zjLtJvdBgU$!CyUPd;6Aqup2=(q3#?&K$$_Gp}b^PIWqjLwiuSk)CKib=YkvU5|8dk zyan#*)Kb*6N2j|;ee^~^4c{nr$G!KOcEmEqX=xJC8L>4jT0CZPZ3Fk_Q-mX#hVnNe z3iD4vr7QBXBFEt?;;MKc6Ea>(EBkf(uL7pF6;&J-2~8=fpa8 zK4!vk;HoD!d=mJ3m+7|-c$Id1g4BcF0h8B($<555VTDs` z{cre!cx3=+8l0@idm{5fr7OXdfN>v^DnVg&MzKKa*oc2F4-wK;740kThkUT+V%n_> z7&E~igOoS`-?R{9i*wJi)KG4%!V*hI>&bgRtN}!{F4Q-010zR$2Y+R^n(vDUq(+gN zj!+JjOtO9Hi((c>A>t3=t)0tcMpMd-XJ}1)CaE0*U!?LI0YJMfHyhI$mfUi%~ zR`FzoNCS{c9PR53O*E(vLK}E(v!0f{hQ{^Uh<@$y51@l4eY$yei4i!21^x5#ofatTFz zYsguBms$jR^#dARtBF69af}s9H8yApd6uFypBkF{rOh0xxnbEBcV0n@|3I66H|^o8 zcI(;|GqYqDf*83|6UDgH9F3`=boDmzV}t=i`Unhj>l!<=j$3&{So78w>`UatoaN2a)|}<-c}v(Q))@uM7M##I zx{ZvLB0d2Oh(|^XaO!0O_*&{WK5D(A$CMr(U9B$Oq=|?i`v?c1_mUrsI*N!xeH&G2 zWx8}I5iOm)EK1A93>jUH4n%+ni)2)A)5`Br^$sd;0rONIc&$kIF@0^= z*x9=}iSWHq1J|U!exY8+ow^AjK8m?3$8ju&JG_tIfOom|dxvjW1HQo>%gnq$egu~D zo7zSPH-jL1Y|Sm$o}F=!9@=x*(*$Mv47al92#yf!oHou3(e1(RXAHMF_^ox^IBwzw z8eiZWZ{8I47tWXX#zK&O3+tNZHh%Je_@Vl+*pc}G1;A-u-3Bqp@|Ko2q{oIBj;uTG zkv;zL1m>qtB(FuG?upcjAm?=uZnl!?me9zp0u>~>Y}cuaEFEf<4Sb{}TKyE$N3F77 zU9UE#H7wvc4t$@=ZXra#C{Kaw9?JJB?B~u2le&Qi4?u&IgJ-inYOE zd3l+*jg_3zx5S~Mn=OYcBh>=BbZ_HAu4?wEj5w?Y@+^z>m^3$*E_++Vs`&$@GF7fu zTi|oIVk1KW23)+oyVczH5fPatrQ4i-L*BOlRul^bJ%G2(K`@OC-ree7 F`!6a)jkEv& literal 0 HcmV?d00001 diff --git a/mmpretrain/evaluation/metrics/__pycache__/gqa.cpython-310.pyc b/mmpretrain/evaluation/metrics/__pycache__/gqa.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ded347d7c506f26f6c82292aa0de51ff57684a6 GIT binary patch literal 3015 zcmaKuPjB2r6u>=Wuh+XxHX&&PrTiI?P`gsJxzJ055TQj?1r5;VWK=m5&+Nw3e>O9A zN-F1+NMi|`?Qg-bsINEF^1dpF();gx4+Ja3-goA-NfoGmZ+Ex3O9`?v8Q z(EA++?W+TWFW}aD&(r04gDC41-n zm)1Q?bcFld60UU4>@(tD5#2}D(0dJlV)m1P4u=ALcuVLV;SawkjGId*)5%`;WoQlk5ZmUR%j{MI8!sxD4j4RbrI*9 zja8P+RiQc&jmJ{SG-ryZPc##SLU{qvP|*QcBv`I^l)}Ridr;_{9Y}UFoEA4(rWQ2a zoWR2oSTg43Re4s+jpkBQCCAZE#^gDO@cJlaISACd zHP?lcvi$;F60I?Re#pkqk}+a7^$5a~<_ z+;H#$yj;+-uMiM4H^@(>a^J+X0H@JYbgv2O;-2q4tljc$K&i4JQoGe)5t_=aZLVB+ zL!r{pZY2oz9k}sdUxg;O&a59vNlM6)pH1Ea(R6o66&4S51scfDjxMHBRbCJPD|rwM z?aIMzx)6Frs$LL;F^3!uURi(L-cnhXZzTyBV-Rb1n;=0hZ`FaCu;@bcFpYJ?)g> z>^UtvFA1@ZsBpm94iAZCeOK2XmZg#%@H{*;Sn-@QfNvU0z-b=7nrKt6Aj-4k({lhc z;8&x)zi(h_ME3VL+0(-abR$A@b%2aP2_OmUIssWZ&j}4nWXd=u4cY}cambIsNC{7y zswqXwy2XNm>R&ihEwyc{+}cQ^2HD4wsMb)iY1VOdx7o0dVW5#UuiqygrNn(R=s15q8#K5z zV(M)W2@%EF)q@V6n+Cak=uE6rS`xtSeZbB-w@O=(>y|;U9#J#ulup@sLe;y%PKj_% z-IAV@(lr=5IW6fK$!%#+5Wg)HNOSxBS?gCjzSD zX$?P?aX=jxLJsQN(wXIv3ue<=u#3LZBz-5?NI2#*Jg=VxWZ2ZzE# z$5eQQC0?O-m4hUzPjJj|yxbfjp^W1oXl?Q;ieM5e4;nm|^as#*mP4HZwJCAlP(uH4 zDSbof;7z}?w!X1pWOs*4l?!Jq9K`0G#*S(&fzy5xrE)a?WKejR_#a)IS+glPs75oW zkB_+7YqRUsdP`Wz34T|dDEtVmR_dWt&0ZN(TK-%VCGWF(H$O*mHEuVcLRaY$b?puE EFK}Hkga7~l literal 0 HcmV?d00001 diff --git a/mmpretrain/evaluation/metrics/__pycache__/multi_label.cpython-310.pyc b/mmpretrain/evaluation/metrics/__pycache__/multi_label.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37ae69d1d67990f9475b570899cc78941df1feb3 GIT binary patch literal 21484 zcmeHP+ix7#d7qh`oxO0mq9{t1WG5NRv9&8xBi>07k`mdn5+^pJNOFy=tS(nOv*eO{ z^UP2(=_S%eZqouyfTVpW8W@oJCbUmQANmipe?j{)=tGJ=6n-fXff zje(?1SK8T`Gw1T1bH4N4KXyk)^AbLP`RPA3uGb{#-{~R#(eUt9e4T&5#gt6BCAH;> z+*T?|J5@=w)ru-pxzb8kH2%$0GWbokvh7?Y$M4iuzCBVI;rn!}&>pRf^1aq7w)a)` z;XY$#TVw70mHqAU%D609qjwJ6mA)pKIWzx}Wah2h?bL0#GGUHbnT>;Hfv=-%e9+zJZ0a0`>pQUnq}iY^Y$B8FTeTvm3y+cuT{HYwW@Bn+U;2D zUH22222i{gsy*APHy!j<#Z9f%sy3#p&Q`D2wOucV`mWV+x^|raihrK-ADkk3vZRV^ zN)_cDEO;elDizgATiOxH%9yD$QclIcbWY1Ca##ci-$ znmOv<7frQM{JzwX&3)$BN28SyEL7eq9FZD|Ic^@nLXMgfRuNAJ%|m$FXN~2a!#|D3 zR88GHZ0)zkHx5`68wag?BZYB}43!+U4w=v68$d7!;R`;bMZaZ{dL$U|9XG#Ve(|F; zw)?s7OR7{kjO}~T8+&`J1?jo2hpjdbsqHx>);3y)k{# zsdsHluXRk_-v+(WwevbYZR+$S@1*Y5oK45bbNSVEYoO;gql2S29crvQ&CXiO@>_~t z>aCjN=zMK9n)MpNWN>fiF*5K#ch@b)(u1CAfawMh&C~%qUSYsaV~g-cZ`Xi(AJBxX zo0h$;qcdx*Yi}SELYcD*xyV0^7y>_E} z)AAuZapF>d{3lM(TG6~3b}if>>;x*fv{7c0OR=OKUY%G`G*8T6eR(+mRe#U5K5#z_ z$Jo`+>hDqaA6~t*i|?x!FYSs6oH%ir&)!o*;nwUm%XJ6be9+=W(R%HEn5gvX;ChDO zUlH^wg1)oWzG2w}wo_~OuwLnm|!>Eo{3da2Rs z*4#l5elvJTKgJh|0$*L6P2j=t3~0S&Dn8d}bJw`GQ1QFnB>a1NX%ssUqbj^44> za7J!ghW=(lUjfaYUt*(}ihkYHN)Q^k4=q~8%co{PcL8yVW zgNW-TOhoVVx?nnA@7|(i2OX$0wPhr2G$E>l1exK+euq5~C?U~*DD%Mn04v0~Cc-PX1*O$76cql>@9(SW`nQ#Gxd&APw}pu+|6)Ty;CV!I{_AA!u% zS%Z}0fPO$G0WK$IZZ;bYkiL$q+qKRnD9x4)kp++e+nP7Fh>g2;t=U1Lp})Naf&m(L zyx!Y7uKTi40QK=T6ebTQ*NZ?0+BYDs5F#gtHyus?ox5Ihbt0=YOwBYgJlMWK4wADI zu+war5(3&<`$lt(jdW7(PuVF&?u@02WMuccR>&rd#SBCJ#-}N^>2pDCslIPBygWyJb0FJm_ zUl04H7ksMQZH&tMV))Y`anNrwC;=y$zoeh!57R^FFOW0dafrOoc@5-8tU0JP)|Mq+ z$8T~n(a|n{_|IS{&K2X`UajLoUq(19coeA3w)_+O=mN+38 zL&!ak7St1i4bI;P&vq%SXFxWlkC??IehI)y=I&L0_Ghnhk&yHT3L)lHS%kNovvcLJ z+mg`PO7l}j8FB+FaLI?cd1GmIb~bvuP>!0@{stTL@LdAxoP;7dusKvego8Q?H;8e_ z@PWB@Z5n$tsbiRSx*gd_O1pB$uEgX_uV6B`Lk6SKZ5w%{3N z{nV2X>q<-egtPW{;h7o?%=!^1pTzKJu>w?`n>7~ar=}u=oSimik_EE{FgAv=OAklc zJ;_RduuL#~!JN_iaX1T>WR}E^8cuV^Dh&)Q<`yDxh~iVm-2CD^7S>pto#o%z)3bB* zJTt!tsuQ-jHf2oDPA}1$>8UBMu{1Ns&kJ*NOUar`vva4Z=EBTruDQ5$dV%|#S^&|D z_c=E;yGU;qXQxD8i_^38yfnKwm#jIvxX69ZEzNRYQ?sJx)YQV_^?^-(DJEsHjuY{; zP`QbiEs=mHMhnt$NZpv;13zXAU_%T)7U$!%ZPu7xoLh*nXBamk(U5^AHR{=+MB}r2 z0J$&-|xP3!3IKWi!NeiM0^p-bBMg@Hd_+n6F2%QIcH3Xlny1NBo zEa24_YHsZc^Zx_@enYFX28}9U@fgtO@C-l=5F{Fz=&MRvx7Bip)rKzv;vNlFDjSaF zG6%2nfh^J46oqT9U$o{%*bsCyt+)BPCadr@kOTKD6Kn%i83{nwwi#I6XfGWyt8$a|kGi4C2gV8KLs?%Uw_vm(Jk-@^x5;)h) zlCeNPiwyY;#+r|gj@b75BQfmsib=I(BCjACD{{w zMf{rbuH4VuQB38oEK8kHelOcE@_YKFcPV<;m-|vR)mLy;`zo&Ke(HjBy||n0XHE5{ z_4;1n!!Bvo5`451G&riRy zv#?@;uCt-ghU#bQn`Dz-gB7qt${y4(SRG-81ODhZME(*RJ7-@bGZw#u35!n^nXmkY zWFf;lVRa;8*{Iz_-(=D4_D*gZWz`q@fzlGKX zAXcxiYa!<8ScUr1vfwGu>+9Vvewd3qt>NmPVn0s+y?{$OZ_~zl6EJmG>sv6%cU&k! z@FI{SEOV=7lU=|jkCc6wE=Q?$+T8*i_7T1Z%$DtCt7z7AtJQMOQ(pNPI{%4a0H;#s*1-{=u(H*Kc+shg7hWz>Vyj7|<} zz?s*{xp-r$N2GrGwj#><(uR5`-B;<1yswz)E7Co!R)=5kl3;;g0OUM!VTL0c2n2+{ zEFUvD3G)lHOrTa^DT1whs|8H6z$oArV=Kt&D(#)&S6KzZ-CA#=?}Qy3E!gfs*2!)g zW-hdi3^)#&Si+IRa6uGW2a&-zDD@e)WyYX8pD_OudFF-Z#>@Kof!Q(2(`Zx)UILi) zBVjXa?7JpnMq)Bfla)BupRmyhlbM(RjG(QkH!n|ukdT!=d4P|X4MZotV}x6|r+F%M z?xpK3YzD9&dW=o3bT8wtqo>t+J*#8h)9hEMJReQaK1l^5Tp)lZic6+EZzN`duwO+r zahc?v{e}EnJUDQFO4+;y6dwa}kK?Pz*#|{c{rS$}A@uee+Dm9uS8xeFBe)^L&P06y z7}k__6(a1WY$_jtKwpq<%Xei$a+JXn9V=7+MA`tt67rgKpTd`Z(`NbyncXydnL(OA z%I<3P#$KVTncYs=^sjWw7U`xRsKC?H@lnb=fE1W{mgUH z8R`AkItLSVit##TfyVub^s&Nqueuo=$$Ve&`!hsc)Jd76U{!z6&)v!1mG0#5&Ep{E z!ETD&a)aya1B`!$IwRs8M&F2@%rUAHj$50P+^4Sp2M<&+aWoF$m-x{5d3&2(3EC~;b?KiD$IIh6t=#U8m zEHSi@T{%f%mRvjZlCAopA-i^+UY9h3&@dexE23WEhcJ*x=@oo`PAqaw>Py!(hz$@W zjz~XJ-bc{JJ=vbYE5`qOil-P;UJ5HScqsB`Xq9MI=l+?xqrNAkow=bi;7`07%==8M z3k#xi&WJjAD3JDf^a_Bc0)TAd0t2Af7g6>vRQ@$wE|$|?fk}yPcl3%$lcQjcA(O4s zf&`yH&LSDYxr7UjCHM&tPXZ4a=`(jZ1J2@?OhA%SO$8-%2&v8&aCDfz@X@4_SNbWG zsKl9ou<4ZnDaHO0hAGR;-^=OfWd%Z|poUzdQWUWd!7^e|h`Pn7RH0X^ZzA@EJrh60 zJBM~y1^3M+zN);l@Z_5m@6;w1?@2r9D&(+}J1JwP;i)8ZuT=7phF~A0T5`2qfapSM ztCxm^Q{VJ*q2YL73wZAKJT=qEr&wRNiOPDK+a(;cOL!=ci1`vF^)fy#p$5kdcq(mp zB`=C;4eYnL181v2Vzs>K$v3^CSesx4#nOEf^=zi{`vws8AMwVa;ndLyWkOa#=fyu2 zB91C+zbxdm!VB_*T$Ggu6RAV829A)IkK$XDk1AN^Jq$*IPz4}dwBM!Se~&J2(&Za; zIfqLE4o)XeJvjEjhg9Q*Vhs1PS z*_Ci7LxxxGNYEZ2$#3A;Z^)a9GmrKfJ);L`75tn17@?tngUD7i@I(GOao``b_!>ySoEoY41LDte_r}KZs<7UOZdc*x z^NXYISJ@Ia)RD7Z>rq%hX=PH6$tUnMB*iYEyJce6@!bIPU7RJ_r_?VqS#T zYjriYf9SC7yMCb!C%1#PB<`@j2v!N#CKU&tBs+Tu;G)uR8|c3M?p48Zz9l>wk9VNoP^i(uGf#SpGg4(edtPY9Yqg%eB~GcLgc zUDlh8a5kd9)m5MB31S=te5&1#5Y9)E`*OB*zSk zxIoM)iYiR>6d)BXC)rkD>~k0$I=&efb4zv>Mp7pFG5x5@u&|thJ)Gzks{||er{3&9 zacv?xF_>$JF`=#1=|O(0Zwq6qCmRW?3F(-mqOp4R_qcJPzq;}U#9R9i1X7p+A)f9d zahQbHK#+v!3ze7asv8>W9uMojjJj;#4r^ra{!(AIFF^EFw`6AlHCPlzZ`sJU(uci- zEi|HUyfd?~x!@^Lo-=c9zK@y^lJi11FOTfJ?8{>m%}s$-ESJ#+#qrMggr>W5_9_mg zF9@^Db?UkTH|H8tg|vtOQ>8n}z*|GzU@yLNx#ZhB|x;x?LqmM!Y=o zVFLGp(&>2_Uz_n%Sc}>=892N=8Oo~Iz23H`35sWvNyPp(z0F+(xqQRM5r!uVx5%_f zGX_-9cM!y7-=O;8B)!qZ8lR(mGEk!cYiV9l4;21$Tvqqzf2PV~a#6wFeWs46_Y1Q6 zK-H8_6*vvF2kL{J>8IY!`{Ru4um}@@r^BL`?!Qph&93Eij=L113+uXzH4l`Y`%2kZ zu}1*h&cW!Qps~LxrJWb|Y97ONy~I|La@JE}kY0nWgLF1~kyeC#IAW1ur0zP^R&&$x zvJR95M0Z#10xFe9_p||!=O^aWA@uk5`Bbwi0`(ocO{H0oZ`ugQ{uUl91(HF+jp04v z-C^vzj2fTe>&)SjJyU=|Bmba~${#IaBl2X_z`|!$GGkyHaGy^dR1}-j9x%+Oq{`rL0}ND2V^cO8E%+t;oh%t$O)t6*Bi$i|z~6>ib)@mj5PG zt(x6>wQ7%1S7h?=Qm_Ge`>NGi2h5Wbd>k}ObJ~KJf>0w6_GM~7RwVl?bRp_#zlV!g z5P3Ra+Us4@CQ`(xZ#U`AqDzCSsiXngv_`;SVb7{t$Rn%94sn@k^I8^hM_*?hm%OAZ zV|^tRn1~cvy|2M6q$$d0T1xp;QLS#!Q>!_)Wp}215+F z*kIIwcVSmUFB%L!>AU1t$dGI8P6no?EXu(hUAT;I%VzdD>8^4IJaJBlLpT|UXd*3TZ7JDf*-iu1|!~q@2nOzIVpVmI{dWor@>oIbmNwVG!OE?1<6G1 z+6T?{mWTllM|1S<4H8gMkH_t7>-O<;Ho5l@8?J+$ir&NskICpce`y!NBkuvoYDX_! z#^t1bx%y$}NWovsf5m)!+id{k(#X6I(NbraxV0TAR)GZE9m zyI#|(TNK&BVjHO&a7V)^!H-EhnV_g~8O67Sk^0J%;J=u;ElkMj~vHV6@<{TWv1|G%Oclbm5Qb)d&mQ#0#tsDq}PX z{HwHw%q}XUeu-FH`zD%HQf|+wAXyg9-YWP?Wt6KEhXKoT*g4@TwO))M5-s$`s{w0@ z&H!7 z=9(6KJk@GsIeQmWt}?+nv`LX!|8(oEe-o!O0bz zSrJd_2x8Ug^bdR}--A;&!b?1JS42q82|DFU4`g0dgn}E?A z$iCQ!RwO_#A}!5HeFN$dCtf^kVX1x;C7#H409bxt~!QMfTe zW)uoIrm%NdKcuR{!SBk7t)oUcWq%nz91_j_BP9J5Dqw3~7zS>$5s6K6Y)K-!)z3&> zqT$BWyu5Q{5QV}xX^vp66HLK}tUc85b$o4uE~n^17Cw8KE<8{)8rjBvL8Fl+K0C5^ zAnVZh4-I6c;mM*#!#kuF?-9f*0$Y!WGKM)xfITN11Yaqt|`eVmG7L31ADa5fG$~{!OoUmAuO`a{30;Sm4$&;kb zw6D^IbndbUede%Z5naY{WcCsL2s*4fUtmwk~Wrlb{( zN+41`D%$KJW;F!+Bd;c;)YK7ueCOaEQDRBeZ7=&;BFN1jc28s5(5P&mc+#PL4lxk- zLmrpljWEtXnf^?Z{*6TDJfR*<@D1qFIK#=!@aOPMy7bUqAcgakq1hQ@d6P?^lzqPCqe* zvsQd`8uVhX8f;BnLr3_0=c}KX_yOG~f2&QUY-ROw9qU$=9c^Bk?Wb%~^i;BLp>`B% z%j|fI+Ob=i$>MkDaeypdM%gJMi#l$yhlRT=3+v@!xW~vjmPe{ni5z3d0@*n_%r*k= zGX#T3@_?h59?FO8Tlld*pvyL0cIYxj7Z4oD{*W$xx{!b%TwF}>lin@+yY!Y=@Lo0zB{lIG#XFN@ zOI0R5v#E#jVFY4t_sHsv)$+k7{>xx_X?89PM;lwh$J_Y3FUeGFo7jXm7QUc_HR_Kz zh<6~$O+&KO8ij;!+gW;ZF!lzeem1zs2P*gh^X#r>Un?Vww@-8d2dwZw3ZEML0)I>} z(~M$^#mmN4?(7-SfE?PA^9nA9vG26|_y;TY7*gKwM>M}HeK*@su|^2Jf1oWLlK%%! CX7p(Q literal 0 HcmV?d00001 diff --git a/mmpretrain/evaluation/metrics/__pycache__/multi_task.cpython-310.pyc b/mmpretrain/evaluation/metrics/__pycache__/multi_task.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70925abd659c00b91142c01a3942089b1a0f6072 GIT binary patch literal 4486 zcmb_gTW=f36`t8$E-$7iTW(S-NSmn(R3xfWlHIh0Y6*5@BM49nf$gRMU3azK8A>bd zg_>E)kxT+bY#>hs`dA&p-eBuYS5f$iHzg{ZwFZ8(wu68csM3NJx7$GFA1**>xKD(_G;XINIKR304LHhQtPhoC28%UI2ek^6CIj1{@^NcUnJ7^ zU{t+-@6m%#K6>ck`st^d57b|wBS?>O(la>inRSC-;ij;-B`Una?Y{jb=~G_iwU=`} zo7Y8E)D}tK;0=E1rPZtRrf2}VTHoSxyah9t_&mS-lJuHi5{vZa0PRA0`Tj6S{70_Z zR~a<-5R{U273;^6l?xgSo;z|!t?M3+5}T!tM#j$C=f{D>vHg6)eV z#at=$AaX;YSlrhG zdc(sWfzWW=zQ_Kg+#T`8MI{e_Fs(?o!Je1jP$NtDb#-Z-Jx0y8lcRyySi0kRL+N@) zOUoe8V1J{%w#+)6C(9Qp*WQBiCXkktCL&U? zY(HM@{&tzIc2}{v`J^+8wWLG5=jlE~k{dZ)2@grevs{*TlDB|0%YXI%A(>rzqcgjC zIT|mYQ^V5t)R0yVG;7_}@1&s_x)Rn8q@f!>goY*t24i|7VW9Cn=G2O-OSS*H}|a-@GP{(wN*6Q8Kvr# zYZq32_%;h~7A#ASna8E3HyEbPX$7P$FAf6XB@P#dz9&)}V$@JZTb?sN4u};Zd^5ng-xdU^1=E%H-zhq=C;{4p$%4QeI zNhN_~H?mIYDR4OW>A?agkT#614wZG-tW%RU_(>~jI1n+D3~uXp3H>#+mi{c=3Fg@0J2Jxtq9- z%F@b=@=*acydNRszd)l^Otp%Cfhq=d8lcuI3$qp!n3f4o;_2O_aOt24I z+H@s;dGwH&2Ui=yIjZ#tCBcZ z0c#hvmrk2cpfs7{7Y`G_4iyfjevLooBA9SX>u60*2pMjxQ^{*H8 z6I{V@V**`X!&PWyy?6S4kO<6{`g0C0y)-qFi@5qKHt&|rkFob2G`HbZINz#Uv__lM z0)sXF*24D8dUbs5g3;!<(rSK^o&nUwgMECpxuWeRV30597Zkkucr`!L@-%ZZiQ)uJ zMIZbw2m>XagrF20K_Po}D^5PaYAX~G7{fg&W7#pZO5{&~|2dh``FxKeLOg&q3LVIx z2DQ$PFJy%7If5B+Py!5nTAY7C$0uV0JrI28_E?9zJgof={LtE?dj|R;r~8Ilgs~|< z051eTJEdbgva=Q3@L@Evfyevchvrv=o1^MiQ=UAP?-we7RrILNG9iG;a* zFr30`{Aj22nko01a`cP7r2J#S+J1y(`5#5Jljt~s)(%GnC&&Pl4~B?^F4`k(+GT%6 z2i6W)?uV|0kLI_@pwgUmLKeD5Y~XUH;xL0>#32k5p}Q|GvKqqnD2H^T{X9q@Q}7SL zf)ZXv6@KL%2ewAR$JvIJ&@3-y~4d zNicYzbMDk_0+oTa_&Uqa6*X8yK>PrV^^Tz6iDm(Pc@vu1_WJWO@r|A586OAJ;P^UQ zK6fFSA;c@y)~U45)Z`b!r5+&2bl$pfWI%;wUmK46(4| z-*nlhfdIYfBW0>M!d!42=(*qT!wyjbb}Rb+=R$QmwvLzb9mD`70y1+P9UIbGp0h>T z_zdrRS@g&Qs)eVJR&(|`oa!i9pj};3sW34mj{I03p(*;~Wg#AQLIAIu4IL zkPQQDYN|x)l0#x$mRe{RI-;i*-e=_pFsreqb9ss5i68*#NfP)GF436)PZ29Fg(;y% zEW`Mv^%`V2GYsRkVYFT^G#3^wTl%!OI`!0sTO!zlMw0En5JG_7@gq@G#BhPOfLVHV wQcXeb-$7_cS)n$c4Ta1F&TMG{PxlaBTDz60WD^Zd;h!#-LEBBkHs7WH1FB@IGXMYp literal 0 HcmV?d00001 diff --git a/mmpretrain/evaluation/metrics/__pycache__/nocaps.cpython-310.pyc b/mmpretrain/evaluation/metrics/__pycache__/nocaps.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..284989d9827aad98c5106e4ed507915887d2397a GIT binary patch literal 2302 zcmZ8jO^@6(7`8o`nat--TPPGEunYwqFbKPSK6JFm8)Z9u~c*SM;Mk zCC6is?B;`F7!AE-n2(BaG^XTNQjP*5w~oG{1b>1>LuljQNJ$?P*?uv3Nq!=tC;Cr` z=*!+|>y$Z#l8Qz(A6S3KB6vqm~Q=JEgr= zFX*+!Saf7tbfJY3MpsDIc}6nonT&u#R}5sBF)cduzP?k z`I`~aOt}PDRe}`xW0Ncv(#-mO4i5@)cOdy(meM3n3J&@xXn9f!?o3kJOq+tM(rKRR zRCAmrZszsxfvTvA1?Xiem!MqoeaUULUnqyCff63CvrMHb$pOgH3v6JexM6o z9u>N>ywR~>;WeI1e`{mUks-QU*$RZRywnb6`cSPQNX)z{z*5(_m3cN34biwkz5#4S zzt1RA`Fg^g408>EbIV|TQW_q|UmAdO6s-qNX? zHotebp;eC-M;MZ*4`KU;&9>_n_-(fieQpY6>hWeU)t!b*8my%yj9^}XiVETZ)q?EW8vVH&+N-)uS9>7X) ziWUftpYG^l0lBjlxJ~L|Or_Fdg07s`)EW%H%{<4IRn}e;Ds^Xx^4{-Q!Qz9Q2;jv}Hzk>=?S;=>P)?QO9xJk7G!yD#!6KjvrM?zTD}?vCt{RTpN_? z7N9m5%sx5{y4ZYy8>3~+r83V!_#FL*D1sEs0J@9N?5EI$Bw(EhV;=ll=-fAW41cmXxs0#-OEjir%3Y}^*X5p4 zS9+Oxrl;0bnfjG(ww~pGwR(>K&DZnzo9PyM#d?w7qjtx7rFw}UXS?IQa=pxtweCc3 zvObC9oSErK_CWXURT?cbK(lgxryd@PwotGJojlE zIc-jw`yR{nL*@Z<%G{5m!>00)R6Y0w-ax9Vf%-wm^@99|eXrx#MmNZRWZfHBcFPK~ zAKU0!sLDa{UBk6*S|01Pa8~HJ4cEHYaN0P@-MoJ1_WM^q!r?^MShl(i&uKWewd#0Z z$n*rsmU5B;knZuuQc;fy84lJoIn^Il*80%Wkm`Yu>vX*v4vT|nTf>cloS@Wo= zS$V5q6|KS%sjVEr8d;i|7x!UpvhDm&rM7Gq&9TShb*u^QEa9$k@wcs-Wpm=OS|2wj zt#Vs2_nA{To3JJe`wRQcW9Brd@4Gu??Y}$CsUH|QAAR~k>wtL(--30pT`~`wM;>Rf zD~En6sZ#wgR{5o1;x;Xtb>HaTwpvEFYjg^dBo1}z9T>FkdXF!z-gcPY=@~1Q&Z1$p zVo}HE3R`i{^CLR*oc@|#>DZp`>(PEK;&iEcUVp!>d#jed!kmF^>K+?-t2*Bqy=Qo> zRm;_}J-XA5iE)A$v23}X{>B??Z@i)RnPqlbG&6UGQ*(`8zia6aI^8br!|YZJX6lBk z8+zYyJ6`9$m72|G_*D$U7otC9nT#sRuu8A`n zQ_=@6=+M%Kw|49mOvUB2+RW+2q-dqgPP=U}3ro)od(G9&0i*j(ND=ES5BQXr(Xr84 z({B#2q?rBjR)26@cUX$j@fCEYr_Jw6rjAu?cOLR7&*)Alp_u#~>{fWqTQ$6RTMX00 zn{;fxWrC$rPo%fy%Y)e(z2(jdxX;p8ot|Sq>Nx{9WJ7RBd+0qwHg%Yr%#2n7j22D; z8)`cq&Yb(5p&1x#WzfS)&FHSxZP!e(KMgS`HL-}V_)c_e*Ru>Wy)p}6+E5g(Kjh3f zBn}_DR`k{_$F$B1k)b(Wx^xKw)pwW&u3)Xz_^EV(_EzupFdvV0tTrT%;g}KOO^tq4 zKe5Y)?+w&tr=@@Fk|6JL&xsVBN35|fz0UwQ;&9t!5-{0vtFjgAdvV7V9Xh)dX3%S&}uV|t51 z(N`AF%+;2v>FK|!UyX#AGpLaE%kx~I!;>>y4sFLi5k3_A3PDd8kM`|R!)-Z?7J~0- z4T5M^D)V!5ScCbwYBHPA-LKhptv#^0Vl%p=zHY=b7lgwFzEK0aHDJRk;t5b)_KH>E z^ylWP`nU0%8QmWA8m+G3x>y@LH$`giwGU@Mvep$W_L-rnM+kz z?KE|ymNxa|UHKBsNZK2w;5r!{ywz@unW8f)9nf!smN$T6j3nxs;Ta!sS=?=)Wv_G~ zv$6JwM)3(4m_cfg30e(dWnAL$_+qVABL>xzhgGUI7Yw-KHMm);EYh|tiB1wF!5s;Y zM-+ybCB-Gtb#8n~kBH4CeoFR~Zc43rWWBMH7saB|J5_|%buW(WKllg&6>f4um6$GG zpc?ci#LTS>uf<;XhNCRb_Q_2@I?f`;VODqPhz52Ev|{r z>-|UGs$(ZtXfMHnjrg^(eB^6XyhnZO;_;DnJ07Z{QSl%u9{iOQnG{(*m7owWM^H}N zpCAL9yb!3gF+n-)Bm|nUQGtfx17^3>A*YpM06Nd$>wXuFC;74`Zz?SB%Wc_~eR%`l zYtmA2L-kctUX$5DPcfBE`BV8`$(Mbpk@3_`>2B6n{EV6TnL>U)yhU|^k<0w#dXpyA z)O(@#V1OxC$Z_hD>Sa)#hON)Roq%1vN49#m%eBuHwhz?U6q>3Q6gw^%R)akI3|z&a z_>pEi&W0SZi$hw2R#e#eTy1nBy8r5u|lHul86Iy}1_BovThERQt#^=(Lw`ZB- zc(aKq9X<><8ouOgC>*o#rque6*ik$iv6kdgFfgs=mAssnm8Y6IEw3LQVTW2Yx1h*{ zWSKnLpfoHnE!vmp17BOx@V$z!yNmas>NHI7IdSrfhYC3e)$ zY{_P3Tjp0ad(2n2lx@jWaZk?6_*p-xvvG~$E_f#uh8!fogHFy;qGzW07oJ6Ox;yy z&^5`sbfI{J-`8w%Pb<8o*__cItae(fX^$EsxSs_#x#jd=T``s-hH!T<#Im_x&gTbp zziQm4=m5Q;hO*3|*`*^Bns2f-dL@od=)EXBxuU0Eo0}PWJdNr)h6z_bRwG1hXic6? zPQ(PRaO?OB!58c%#DZdC9fEvhKKK#$C%$-!50}8I%z1vHS`4zSE?j{i1D6NV&{?+_ z@f0he2@0`86%+=xi?G?kbuLS}a0>#>=tE7K)dD+4Qg)h-ulY)cqQDYaVy6 zg0Ugzg47U2BkY@W8*g;FHy3Wl4{+ihMw1^aL3Z-Wq+C*pvZ_tX+S9V4{@ePYQ8@}n zvKfqODQKb(X*c?E?e;w!c+#e{p?J_*ccH-`Jx4!L{#@F~co`mP%ck-edheRFDR0W2 z>Z3hIGNt+o$^9U+6kubN5m$ z8NZ$jT??`hB-cT~0=rra(WGGij63KBnXYA5vx4u5&w{epvxqc%37z#Z;rTZVudybk z`zE@XP6H-JCgY*gr3{@`oRYsGPs*yY{>|sSz>Pp9v1hYHu*5Y$7+M5nA!|`6(;~i) zKI1sZzf)>)Urnncshs>bp`vvTb=*Lm^Gh~xK4e#E_tw^vdWWO$NWWC&0-p_5z8wIc6 z74Zzrj7w+b6J?`_@7PAk8`~;*`!0?{nECw zmD$GZ{DNPE>WvloP9s4( zp@-qs9Uj{tmu2^BwrMbCJQ_-D#ljug#+WJzEqy{=P}8sI=bh!d=ixsg)~^Y&O>&e( zcS63&gP|RS+*PA*>CI+kVMf1MZStW8d09LoDqJnodN>P{fZ%Y)a6}Nuv59QkNU}t* z+W)5)h6~G6lruXrl(!NuN5rJ0CD%?MI9=_mAVIlvVYz*dRH*GD$wb_P_&rKKCOVY( zVOYxb%Ch5h!9zb-<#|G`lABGs1^dfWkwg2tV@q4mX)6wU1Ron4A8lb_)QMHA zJejO=>>&QIL)4Iy5@dSD!=TgW8#{a>3Db&PBiV2u{Uu4Pw;hX zXr_-*Btx-GUMUFdaB4%`1guDEvxeSksXFC%9d3X{*DV(<;IQb z5em@A`DiqPVx!S>%t4oqOO3|8fzb`G&#xjykxw zPBu^M4fz^87-Q$?=p;4dB(O_pf)YYUuhW88uTWn~{-pZ=1rmvLR8>#FjlqE3IwskJ~T^&z*MKJGuFc|O!s& z)*8A{-1D2`PbpXe&x)eg-SxVj=>5G@|_e_q@F|1I{yCc<3P@Y_95tvMF}&Z)Q%%JVy%!XJBrOg9;JKIqiKB+F)m<5l92caS zA9U6_eMk%-RQSxmecskP?f6Lv8zxaDnO32JDVg522wdZdpO9k#+(GL3++vwNA#9o6 z8n7XZcKMMmY(K@J-UCNB%)3U5P|bKCd@IaB;J*Np)yN~F848~02wF(~q(5|m{ zJ8PIC)&uW%4PY;aT6E8F*C>RBw}t=o7LfS+cp9x1Rxl<-mu>@@1)_}i`tTvG?(8jt zVHBlHunR7ru`sT5qDAhhbGW+MrNFnzsw2d^n!PJegM z=~6PlFkXvzZVw(js4-{R@!Xp8ux7MsgSFW|xxcb9OG`5AJY0@Q%*IdJ6C(Xh z;L#8RMP%Oxw*cbp$8#gxBE*MUT{psbXg5cJ(@`^&BJnCji5N=w*4QSBo;cv=rtZU1 za)1;fn8bCJ#g6$5}ZntM*?9oksUGf}el|CQ~K* z@B-KqqZAVNzZU}K{oCAy7uhkDj7kEiFv{?yl1~I#kULI(ySt3WM`6@Ja>rw+X&-tB zSlSl>OJ>GYR}`e86hC`ka=(R?64FvgB59wYQ0!AA4GDI_RjHwRS%6PSN&%e8#o$vu z1fO1Sz6khKV@*s8rC^b>^yI|*Hk4$?)P>1Jx-%;NfT4os*4;kzVH?_*sxAV##twi& zH8&C+idfCSC3b`wLWioFNFtI8&-kSn*a}LtmJLxG<1!vm#0hdf_}?+eB@FCnmQv$I zgkI`XEu$gmT7PA4o{fN}BxAn=SjvIuRigeW8lITDK)})#%GUrM$pkH#0QdkY9pb%8 z*qgN{`8gn_4Q&Km(m1%3O@T|<1Y9Cm2>_GI0j9!MaRgvmN1pQ5?+U;~Vn&~xsY8yO z2o&kJ311Q@&bPq~9Liy@(-C$AHISTZ7ph~dLVd@nsZztSqy`;P=74>NnuNOLN0`wT)XiN&Qq^q;h)Ea{R1zUV91l{AqnOpz|3y5AGZ-4Y z^lu>?w64OFz6|Zh6Ec)@VINY%`Qj8E5wBD;3BoRf>-!aF@#O{6+rgdR*pj@ zc`xr3hM+Z>e$gNEvzr+|?-xSYV{Ds>GRBObL(xAcagu}#JGB*eUVjgC z{O%$<3D7ZAqI>HFb78Ub8mXcXCw_K9jBX~rilECq5C*#C>%|x1S>iwh4$ql_xaq%d z5!yMVn{#aNRiu_ucArqnX?L`(Ij8B zN6IN?Jfaq-jZ!JCXJ)dJ2$~4&@sQo~S~MW~JU6wK%x$-nR_ZNw&u<}-7ClTvtBitU z9*Xkc7BQ1fEo$DShCC8>4NbK)8fLynm#BJ^V~rfpV&9`v!Wr2gpa~8&VpQQup8(3R zKAAgVp!g2mxf{M{(1kWNx6srxMqeO_><_8$HZ>zI8hQVx$VJm}aP&kHsFpGbZdjC$ z0xird>#sh;d5drmDkg1$Scn{|3pZ}?^Xd`y5q008=3{EUPfZLHGAbrxe?$#OY$^U` zAmB)LtRtYC%7iBsqDKb6v`cwY!shv z6h=lBsT}7Jayp;@Kh+-#DIiloUN3s72uYhJk2M#{MiZ~biqy2we4)_Mbu`tyVE3EU z5hLgVbJxT#9oPb0BT-24IWcyWWzf0ZAjRrvik zQrivhU7|1F?tjLCudsKKNRz0V%bi3J5pF(3ro7$?!3W>S?_eV{8!DEVQf7)fhf%1` zx|{RzcMENWdN3Mc3c?R0(=Z+kK&dwN4pM6>f(leu5mRg`ytYd6GYBoFeQXkcJ}9A{ zY{AQKm3+>2_9X8IbWb${zZRf5ilnv3k>eLR zNt6N1Cw_J`KVceP2M<&yg+U#e(y@h{6e)|@;Lu1=xP#%M zj-DrVcu<}TvV6%!#uvr+(b91N;g%qf8$mAIiBYzH5A*m7VtbN76{#9U){wVUpH5~< zV0Vp*B5+P$_1CI;6zs2}pZ+10gA$2prHms+bavGD#=-jE<0`oI)C)<&OW-fa1Ee7n zO9QGFcSwW6rGYAcxiswH_jkbWs8}R5iytb6a>zcU+C=JgKg9L%UF4zRl_67HP2&k! zkti@VGxr!YsQ#F$j*0hK53jX;s~LtKG0Vj40JaI2J!I{4B(oQDe;iYM5%zutO~K*f zhDBy$w#=c19G4b0J^ql&CQC&Y+a*iPOMnYh0!%+nz|R!0V4?^ZIY!Rc3d(XEn@XmF zTv(Bg@}bih8x+FQrIpb_a`qN3|B|?k9(Yt*e|_Sp)2SOe`?1w#3;VPoKWQ@GzyvQ zU_y+xSXcqZi=Mcz!%ZL;eBs|FtHzIX8lAs@=ooclmx$Zk*U%h>@uXBR*CALZ=$Oou REd8lALv8-;;*s*<{{Tn|)DQpw literal 0 HcmV?d00001 diff --git a/mmpretrain/evaluation/metrics/__pycache__/scienceqa.cpython-310.pyc b/mmpretrain/evaluation/metrics/__pycache__/scienceqa.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85b3eddbed22e1f767844cefe1a7f4e3ef65c62a GIT binary patch literal 5438 zcmb_g&5s;M74NUwDlE+>c#uasQMNd;071=FpFuaqy|r54iWV_00Idh+A>{QzLHk#-V*^_S>;} z!|1O36>}QbyOwl*l;nlWWaOjWvX8;UBQc~NC!5JtAE5dP|X%9`3bgk%jreS=*3s&rLmZUL%#5Q$5x%JrS~ZV)j1 zE{joqNpVzLY?v3Kdx_n|unW?9$VnFS@n+qTIxF3+^~>vU}> zdSk4TGP=~dW$()@e0Kdy*7tGcqpCZP277E~dOdcP52NY0$XSuq7EH!|>-6Q*o9y%z zB0EH`61jHzcCT(W$i`znD8iUO02DA~>X$H}s-@9{vuVy_HK}fzq$4JHuI3|es)wQ( z<77DGf~N(8E!@jlJQdVFh8KxscUoX5b@dST*hka6V7r{H5B8_)EE7v3t&bpF+FI3a zzI0f_FaeTa=uK7~8IZ3=1JV_HQNbQ2<1tMW#WAK&Qf2U!kuA+gyQL$f)kc%uWCVNU zY%iN+>ElT@&6ze#2A`SlQyXq3^7+c>Bt9GxB+(+0oTXWTTJ|7WSV1I4(+N&$ljVFo z?8Vg`l8vL&(!ypNP&bjJdBLN&$8PLp({W521I#7E1bd4HgG|J9;MfJQc4?G5KJr`R;@l(wfYb@|M1eo}1?~W6bgNAW za>0SK;FZKvp_q~*ni_?1XxueRtF&YD7nX^bQW~KZ8x%DV*TgBb5!A42%SJxk=R*2n zn50P&hHCDvBON+FRhc*c!j#Q09E_tpM?-=iu}0);AQZ!LTE&_Zm``4>U9ob{-fZ}# zAV@e?2f^Q>Qd(Dy+x-KZHgMpS_ER%PusdiJ*a~>-nOQbU6(wRT-678z%%Q>E4}+1@?=cp=_w^;BpMqcnfW#SCEn{}Fq;$7Zp42{vh_nc^#j zQ*vtho$axah+<$RLaBzEO7PO~RVY=;Epo|<+pf>_T#x0`-Mhfw#dDd7zI?d4Gp*&; zdKw{Ni^fa`OUh&NY>+HV#)!+)XvFJQ^F-%-kG*^|TJm2l;6G(j7kX?D2er6CRX;j- zy|0Vt5anSqp#^X3W*K4nPxmtJrJm3=Ae1}@IRl{Q;IW?p2- zB`6r&xuf7$H@S0XlRex^;0Xk}^_tK_fiLcGkWKchIEFRMBErKxp0bF-40#cv$X@gS zF2x}Q?30i2c)^#J^I5t`<7=cB2lO|}5vv36LM%gKhaTx-|GJeZ%7Y%gwF zwiasih@tGHNN0Ey^Kgsq_04RH>tQ?`n9YHSrf8h*__EB2{3;PPe4%yGwFSrpX6=G z56aeDPSNquENr50(le5x7a}C0TIZf4K8GHBB~kiSrEh5cub~GIJ*d*JDLq9+>i-+) z!9x$K^v9Im)A~O`4<33@r9ZCpzSjQ9NmxUBnJ8KOiGcfzNa54kD4f3%% zB|Zv=2=?kBp>StmyqDgLgiu#8Qk7fR2&mDor{3Qb^;smiT6&dAV|)T?-E8@VjF+QQ z((4F#Nga{9V%_^$sxnx;g#~OUA;Cr%mH0+5D>a4(Eow#p; zbltvNg=PKS*59-Gdrp7P>+glS$3-o9M}Ob#d(}v9&zP+lvn@SDhYLPgmE7wYxP zfGXA6Wt;ffx~tj@Gvx~DYJ8O)HO3rVm0B$z+zKyL>Z*D%+}iFsqDy-wq?2|&od|;3 zZbu=jLM`d(ixS(^@GOyYM9verK!mVTyhG$&BA19zHjV!z(cKM-qaeGgZ6(x0M`)by zfw(Qla?IsF-&!Ncwc3DP`U_0oZks+hpX4_F%)qgJb^5y+d4rIu6W0O z^43wHKC&XbG3b?Yz1yaT1=(#F${-987^h?6+hKTr8jZEa3&S`Y;7LIck8>=9ptp>A zSBNd*)G5DD+7y@ir95Z`btcMajjbe<#Zys#|mpRR2^ zx9raIznLJ;thvnTdhMaK^~vrH91OB-m9Oaf&!0xW&R(Rp5P;Tia!?g zm=P3{q>Z?(BCuSov|Q-js|gpbbRInY_p0-GpJIG|62v#x=<2MFt(K09a9=%X{tE_X BwL<^^ literal 0 HcmV?d00001 diff --git a/mmpretrain/evaluation/metrics/__pycache__/shape_bias_label.cpython-310.pyc b/mmpretrain/evaluation/metrics/__pycache__/shape_bias_label.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc6f9817c13a7846baf293c3f50720d3c7366f74 GIT binary patch literal 6344 zcma)=32-FWS;zaDo}ST28f({D@A_&JoZTTCZMLYQz_l31-V~Lpcw@Y-3ZBHww&p#J zTI!jeeciK*RHGuXHsz4u928U~aSqA3FyTy`gIp9OgdCN`fj~l&69|d8sQ>}vUGe|F zo@vbrS74_4_l|z=_3QV(_Z=NiPgf26UVr(MOYc5n7=O*e#V5Mb(?Ot+$`&|8F|f$Zc}Ymk=oHzx7MubdrmanooUYKd?}jk)|++H zkoB!=wvC@Qge%II3{jRdoA##JoD=3*qv7pf2F96&l{qg6lQi?r%JT!+Ys-e2c^#Ry zRM1;RzItbn$h&2#!Zu3fyPtR8z0bevY@3}Yf0a=oF-*gd24ALW3A1Snt7A5u4yOC4 z*(?c1lsZ<^McGBUjI@l@L+T-|Agv&+BCR5wLOO-ChO~xs8tF9B88IVfG0v>0i))b9 z#T=e;jW__#q0K?GIZ(M)92N8Opg4rCC}ziJuA^DYF@AV{U3E=#`~3mN%w&p&srJ&>(-+*^vmc6#odFM=U--h)LDVv^|7j=TNw?DLt${b!%_gLZqMg7(^Rblsb25GAqS zkv*ySzJcFPF8E6!UI7Cs*~UcpBt49fX@EPz?<*;=0(_j3$skIT<9;$|FQa9U=A#$G zwDG=+(jO$~m&SUW;%QvniI>9kF38bhK38>;+jMI#EkfLMgg07tGt;?fDEc*EOL|AR<_hP;C`c?8b=N$OUML~B?mg7Mr z_-c5;K`+Fa^g|(gX}A2ZcV5pqzJYin@d)uI;?2Zc zh_@13#5VCL@iyY^#5;(067M2@jd+asb>iK`ZxFvp{1)-s#P1NlOT348FY!3>KH~kv z2Z#?6zejwC_%QJi;-kdJh~FpvfcQh=*NWh`%5*;xCD>5?>>}PJDy-Ch;xeuZX`PzD@is@g3s3#18Ry z#NQMDK>Q=|PsBeH|3dsL@jc?-h<_*kgZMu2pTvI=PZCcNmx(LHEYZsjnE3ctLWbojT*(F+Mt`3H{lemTI$xJvE^+WsDnFn7OoO3u5+eQ&q}Mka7kv~s$5%) z10^y~MzX&g^wO-n7`E5i5pD*d>PJCOW~Fu%w^y4^d_ne5SjLUaS(E`82We)vgS6=` z#%YRfrBn^ttE}Vg5M$dS?kKFM@tD!5W~F4%mn!pGt*{rStybnHu}Y=Lsu)FfVin3n z@l-0FO0`9BJ#b%1v}j>nGmneg57OmEDRUATE$!=(xvLigSg^;;tkP<=qaaCIEyWZ0 zhD5s?@mrsQkIE8$9Sl%|bZOYe%H!N*chTb-0^ZZZY zUUi74txSoUn10yYs-ZQOyFbn z;A{mc+!{CvJUDPO^vaD~6dyl&BK^EQ{F{+81fOQ?8zk^ha3iGiNI6*;H92?gxPNgu z#Mtx{3uk4|QOcb-$R)imc!6~56LgVq_263SdCA0IrsK0HQCrMnVs=+MQJgPdqoq6F z=0A7e+byZMtDnY3Q5?Vn-E(7#&Z9N(kB!5S7t$Q-!iz03{zeT8!$EAl0!I;Jn(yd~&#l*E&LRU>6b*h>9*GZqz2wIMuj-nfIcL_bx;c-(V^xuJQ0JM}701G5Tc*3~*$#A$ zV_Lg&){O0ZXZ_HA?K2vgC#(3md;%`5Zs^bmbHhxbSa2v|NLHX!4)K6GhhRvMGm+Yd zjBRVfLaWly+BVhAsS72591If740BXhxNC2l$#YV7r2=JyF!o^!EviFHxSQ5%uHA6b zsjV81J(Nvpi-9TvTaNM~gtiWB-zE(A&^l|JG?Ke7qMrZzaj~vZSwDK0wgdjvp^`r6 z`t*$Ud||zkK#M??Kl@~6w$_~m*bEA$Cx0Gcd@Ekf%!>`L=_G^2m8?8!OY;>5QD&*M zIU6soAjD~5z<%6Iq@oj)RSAVkKBD{`S#Hp){@hx=v~*#otf~uctbycqNwO>D>}?ix9iyff!C(+^7}w5 zjp;bO5{;81K;5Mxaj@%E&ARDbx#5cSQs=4lXYAkiry1v}y|*zQJv6=dKZXo_7Gc8w ztit~^hx%}*7W^2Cdn_zrUxIX;Ha2kwJM6$0;L$FvKXwaDy=rSde;cW0oU_&^5=^nuz2-d`Ay zUHDDN!B<``UX#p#?@CTYP4;7M_#u^gbI=Jw>VG$EDjDh$bH38kRM;>+Y`q z=>A>PAEDqbkEebLK0LlP-M+9l%KG&9lhlbc?w@Rww4<%Qk8u^FNX3{?Jpo{@G@!X4 zmPbSGQ#0A@zDGWS9?1+)tyHylY+1D{PHFw-|6{5RQ!zAe+{7S(Iub!bwbgQ2}1vW)RjnTYQ_OrQ8TS0v!YmI%vF@8b!;^*W;VXY=~zVZ2! zC*2dP5G;#<3mT{SLaR uHARmno6U<-jf6?haYBp>lbLy^a&fzzDm$46`1Uo}N6%u4zvbHV=Klcx*bfZ= literal 0 HcmV?d00001 diff --git a/mmpretrain/evaluation/metrics/__pycache__/single_label.cpython-310.pyc b/mmpretrain/evaluation/metrics/__pycache__/single_label.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7af3efa6c382fdf73034b227fe152c07cb3af674 GIT binary patch literal 26291 zcmeHwTW}oLncnneW_kvL!G+*evfN9pO$-GN0ban1Nm?{1dF7QLMuhBLQ1qZN-2eud z8&5Y#0&3V>rqc0srBZoFrSg)hq^he@l~f*5dfU`K zBt=PPzW+acnVtbi$(PDHr6K1`_vzE;{O3RC{O3Ra|DA)*@NhAM&)@&}-z}f;GnxNE zH@%-MZeGOK{Wl2gjIA^?EoDJb=vr;6Ep0(-=?i)*yO3?=7IGY~HS-I3JlC4WLVDd0lyQ@_|E=^nXhK- zqCNCZ#vXD;A7~Ghg#&SX$sR`ha2S6ujxXCIh#v{#582wyOl9-~)FV?-f_ztW?2Wot z(F6U}hU*2!>s_zWXy4LW$hOhfg1YX9^c*>gkKo!HDwxj!*hsrkMZptTF#6RcdcJdVOK8F-WUkYul z@IZA>ctvl>&pc3m1opqDK2W}+P#zU^>$z*4_FYGK=3T3~;h0_r|3!VZI`6dIj;L-u zXEt23-SJGTZCZD&M$=kqI;JI7Hd;>Gt5!-uvEeq_u4hqY)pj??w{1%ZYct4lPC;>5 zbXv9cMytCis4E0U3$Ly;+D?#bxHZ=kLC)jSf}AWhD9mqmotK2@h(Pyn33g?fzV|a? zKeBjq<)r9zypyd~SAd^ZquqPyP*RH;;$(}t?VbdMD@~^c;+005E8Ci{`r6%$JNh;BXkP=9 zwfE4!z+}bN*VS#^&iWZ&zm&nRLch3EUd(J~ebvsbt768_?qnWjwsU@lfAhY=zean6 zp4eG5IzQtpe#%#dpQFy<=k2_0ysPeL4>QkX?7~E5yAVDx2A&k{qK_1Qae_ORU+{|u zakZl(CzP|6qnw_}Y!899p}QIJJ-@J%=hBceyej%b;nk3Dg!Gl#s&8zUC_jJ5FD>in zUZt&X)~#lJqiK1LxrE+t)|-~=HkKQ8ZXeUL+;uZ-q$$(gsIQuqYno`)I(k*7J!Rs` zYBr~eCO*qEHMiao4yEjNJHo5JuLapwqb@r5vG6nZX!?~ln!jP2ix#@Iwc;$sops9W zHXS0u?IQc-O_MsN87S2c9^)fu?1{q&KAIT{6X2()Tag28#yRSwzWHz>qx404i< zff2DV$cHTC@g#!-RGVjO@mn?dQjOk`{3h6qtoMl2=@r>JjEa`zpNoFIyne@&c!YvuK2yo zvSJV0<#&b`N)YWuXPEM^NA0n9^o6oL?u_7WpFM%QQD+Qg?zf+@4uOgZ9=P##1ZQMg>R57~#`$^_NVRalREs<)UhW9fX?UxSFqax_OKkR!F+6W=G`+?N?l*l;r%b%b`B-78B<~T2 z!C}=&hMJvaGwf?LF6{=#x@H6OL9^K#bx{&jt(q;%TfG?G(!hPobMAX@M^)K2FPm>s z_HWPC-jlKO#O=J zY_x6D6C2*D$zq>~s;@e(*=akdQ=A!Ri25!$u4f)UzJC08Z=IMwri%fNS?^v06<%^6 zlU6KYlcciDXfUVpOZZfc2}v0wt@=D>&B`0c%y{Ig^n z#AVXlac;z}d+2T^OJcLxxrZuwtI!Up)1WNJUF|gOU3f*$+u$6DzEpNeG0ZI@Y)Yye z-EMO;$#V~0rt*sT8g*uxbiggyS7Ykjn7lMwQ&~NY#nTCk>z!s3vbbhDcN=xboPLniJX$M!hSv3B zhFP|anqcr#bIO@JRU*}vdmyN_T1$-;Xkw1J+G%y#o2|}<8_o74R+4&#!mT6Rgfi-B z7|kIOp%w~_`);F$f=CgCO5ik?tF{~hG>4&9DZ-+`%Pm0)t7F+|&QQk-h3ZT9dBpd~ zoGVwZn2i=G9T;9vkmJ1R0V@)TGFt7C)!{pl>`T4&ym?}m53?Rig+|?c(}e=FYob&> zn^a#LQuWsKlzD2(oS8CD-|kBRN$7+r@ED2uQbYt+;}J0_vz;FX^ro3e`uM4P-kelFO6e$Jw5jZ{3P2c%ga$WZbUYo-*UOvoqDRGBU;C zm^qRSyEW^Z&^%dkzC`O2RCc$9P368BSKaMJxyp)qBMJ;V zBNJeOg49bk@~X+1&ZV9bKdfR#r%zJYWTomlUJZICPpy;F@}7F3Y|Y7@woEl*loyG@ z28j_g_lm>KowPJ1N^v(*e(=&|Gp2qrTsT~gni{1h?U`gpO%jestY{%Y>W=G9_EvYV zwBcL*^t~OkhS+k<3MZSBD1PSHMe{=SGU*&YX?2-{z~ph)M{`X#w}XBA$cx{68aoI?yl)%wf$8J|M)zAC%K`Q;?IQ zkyZrgN$onV$fFNk>-cK;x(f)r%!ACD;%mNcEAOgG##4R8(_nKL_myQ>5Ndim>tRXz zfUogX^>VN_m{>YyHZtOizRIa!$br2Ai(O8~cNKN+eN|jU8RAO_u%OCfjp2yEs9~AY z@M<+tKzyJ`;vG})@OPRKT7-;@sj0!`5W@C*K-eZO1XV$Qc_w|R0lDVVWw203BcQO&(klmKG z*Co=|d4V6<&oN>32b2oQ$Obd~k0ljPkiWyV|7o$^7;){zsZH7q}7pKg7s}1Cx z(m?SBPnu9G>zx+VPQ07U{?MgX9jRY2T@X&ISizzk#D;^-v3saX>+&=rn9cn1Me{1` z1iiIm8Azp3BCN1&Xu5Gl8Gi$Iq!Zf6BXTOnYPv{vhw|Pl{ zu}aBo8D7rI`?&{-ojZt}zoXds?cBF=_mq1X50=O^tZi*$UEj`o1HE-ixGP5k z>WN)C8g!G?TVi#(e@H?Hb}S4x?(#kPy}`1%NBg9XejPbUy*wU;;RIM^m2utK#L808 zA`A;HR>Ns&8LR1TdK#itBt2xis(g`G;cmCnwtJQ*$@oZFs;Sq}TyYcMzn(y_0@-yt zw@oHb*f;)Ay#t7gu3x;=#L|Xpb>+_~JIZ?^vo#g>_prjX zsJ@q)9jK}9m1^>$_$n2I2F&b9rubWU3X(V_nUMt%-$#u2I^zD4;=h4l>kadUv>C#5 zP+w<;ErMgUSf7A-5sX~)U_I!KTy8i`n~fx!oef&`wU-+!=5nLyROfik4m3>CkMgy4 z=LF{IN7)*}6OXj&spY5A{~IyYd(r=IBl;Ie6_}|ENlT>$t1ISN&#JEng;>u|CeWbx zb#eko!@EX?d9CTRgMtI&J#49-6X*@kX;p?<%(3(d@?o36hL5$F;{`ei(LjUB5-3hk zWYZH>&JC1CpsWYV8mhhu!#;J(1?(1#{qa-2|9^$<*pD%t8E8^5Fe zw457Q1pIAdDw}K=cgP`g;(2QkG07njT+%X`s|W&r<0-BX~CaP~92zksHd( ztz@=~tlpf5PoTInwgM^?C@v7(Rfyu;_OM^F%NvS#&mV@b0D1YPbsgS~VIM|Tf9Q8f z;TcDs{q-vQXm#TL>YkvXVUP(L)og^2Vk z!P?dCRzvp`ZX%mlET1FmW~>sB+@((_&S8P_V2Nv!62+|#!4X7!I}sXTWDc=4y|*#2;0RsY)YYYFJv65wA|{V=GeaZsB<7DL zn2C|zZov0To;}Qst~pbwny*tokc&|hc5zt3VKNG->&s2rWneYOj4OR&+~p!0pt-n6 z3*T@?UZgo^aWUODPMCYMB=j8YU03dgeI)H!wNt2?Y$8cxSR*n;^9T)TUF^$`m5GRS z^J=K0EjI*=yx<)%EqXI_O*qgk?C@D%W=Pq`16Gh8jn8ACx{WRJgOGg{V)D_m71QoG zZu^)g9VJY4B$ytll`3m3^8{R2cbqP0p_vn(-+pmRpGT{0eJO4>qH?31patN4hgF=U zL(&nTia0AB(I8uvYl>%)6ISgS@)e)M^;~5iyWay9Jy*I}VF{Q$gEc?shVnoOUqr${ zb?!)`z)`x>p>NdSxwY;K3-{7=pg=3R+i>ndleymohijHAUZvEufFsFR8A+6Z9(70q zxq`HC#|Ye14w1cu6tP+@DAsB%z*zv3s0F23?aqeP44>p{HM>)X zN#+b?Maq&`LJ*A9YE~QA5ccJ}cuv>|ZWRW4$Yvp%i})G^Hz?33xPu@lK@s&Db?A?) z9h=)&jL}0{zltFW77+x)p&_U4G@B@b^>z+OK@;S&7Nj6jG46E)#f+|wo>w&$4r*1= zKZ3US_>ijp6fqy?boC=uFMl*XR^G2xRGprKcaKN$Jcs9HJU@_~_>gm~BeT?psH2ZN z4z2K$IjRc=oYkLW=d^SR7Y3Zua8JWY9XqJus2;-oFr3!q&}lt_cgpOvCKok1t;b`h zwRBUL?fvYuK42dVoz{ogP5zvHc)&@0WFV%WvVK}dV)r#Wui157f=m3_KG*f3;ybu%SuxDO83T5RvmY%ooi0IRr0ra3kzvY3Rih)(d}g z!34b4pmvn%C0a9C9bpJzH5WrVqT$9JiJLw|Yq6pv*Hn7|5n@osZrW=sLZpO=7fF#@ zI-ap1gCK`?0Ux9&>s=c301?PM(+l>14TqLB^QgM*ctCGLEJ%T2%A~OAM@}wwp##N` zDu+#N7E{IFQto%lgHw`j!l`zWL|U?<#nvmtFwnt zIEE*ndy$78)%lIiJ+x#3iIL(9;HN|yMs9;tWnziDr{v}ZepnBZio(G}053r#Xd>}) z!dyJcfWxDU7khl(ku0XEhqe4@Mv!ZkxQ$a($#pYJ8@9!okkf)EqC`hdvy@9G)FYLC zRl*#Z2ZI$H%!=Boul$&d$n57HXAU*)T9M_H=EmSkU-hbF;^WL)`7{ zE+LDP0wLX`R}b?U;lBnDD~b4HYl{P;L2rf<;Sn`r+B5nYYKO+uXQ&-EnuS1#0d+PW zID5i8PW+tk9e`1u0^ivc;PbrsIwJxTO(lj-@2XoTV~T1rP|rP4Kmg_?WAO5s1UNA% zjWd(8rx+gtlV})m?rb%=nVqf9&d#1oBF^*xZzNBz#RwucI3^ge#Il$s{+9(HdNNb` zkwnmoF0-X^x_Y6fPPZby%uM&N0cijtO1LmnJ$L%-sh)elK`NCVf88FrobAhH`pl^w z-V%nYQb_@6p5A|fIs5+>rquYt*(U*&dO={n6r9pPkDBc1DZhA>(lM7;<8s}6GSRy( znll%wvuCGgFU(vxKRbQ?^o8?hdiq-WwNOKo?=VW~^!b@Hvlq^tI=AOLu`Vaynz?ZH z%=vQ{&dpArJ9Fxk#5B8KB;n6j&mlZlJ&*80^$fz<>dfsPh7RN>+aaA9vN_9{ zQFyLO;Lm9a5kLLJ%%TSB%kBbGb-p^w*%6&G23aRkM7G~nskd`U!yq|T-`B(m{D>-o zN7!v5tvbAOW{yEKjMNIUy!sF?AXPBbGovIBE^!IZ>g0%xJ_arbcSRONm#mi|IxXwx z(&#jIj4%j5jvUwxrN*wzGPzN06wDDG1`a)CJ1bjU+1N@2d>aN(m+nUM z_=Ech( zM4I@CU2|LKsBIT}y6Pf!)kx&M+)ma&12>k*F}aAbAL3vatL#5u6ig*2&ERB(=Qap5 z;3XJE!QKcw{|Y^NjRJy=i8%^hr+{eaF@1AGkU9PDQbP~~Gy;ni;W2<0fx}U3%VOx* zUvC{AY>+)MWCVbqHsNvBMgfE&WAig%$P8*1AgnZ7@4=JN21mdZ7*AGbJXt>VJo|f@ zHI4X>U;1XT$xz?Uc{#cVMy$XXvAnz^B;{L-5d#kFRli^tF_vGlhkydZ_CkA@P+;4| zwrZCFOwjxsVZnaDfC#)<#G6BaC6s^)16<)?2FS3l-?`9!mZ=`uld6z@dlbg^G0--Q zoQHov=(z$Aon>Dozy(1bWd2+SqU%rm65+^phVU%``T)R#JQ%G=OAd3-+%N4y_li6U_+w&|_U166mb};egA8 zZN#2uiM+MdFlK@_;F1)uwJ_#$=qWnwF0`p<(pz6d+v_%M(`Eea+-}0YqOjNm@OQPV;ehLykJtR;3)>BFOj}a;GUh&Q+w2mzWK;#W2<+2L% zEym52tY@JyC)O0Q=f;bYrx}Ohh0SNMI$GM{^BJrTOz5?x?*^;;x!g5zpJRR_pdQ=r zd>C}L@8Yr<$Z0nh4wdqYb>mQuMDwR{<-}C{tnQqFx|-}{`FUJAAx%G%TW1$?_Hgu2 zDZTw^z}Z6yde&R!WOTUB6!r~9x-1fe$imp+(}&|5pa{$Ie$--!FeOACp2m1WTnk_w z;i$s?B_gNy(@VSbE5%86g=Wsvj(_Qj<(+KQwc(Kk5OP2&a|=eclXS_1QK3FYr{ctF zI~h}8Z3tcc5t09Iv7BII=mCgfJP~1`FqMjo+`GakJTjQjX50#`qZb!rO-q7+U&e+H zIx%6udpUV+uUF==Z8^=p5dWD%!+rvk>(7JyjC{gysYbxhK2WCQ%RE|DvVkxsd>+y# zWr)cvh`XQ6Z#N#M86pmm9`+0c1izFA;J9?#td1)8q`jhViHWRiyb7;sK~>zQ4&;mE-SgU>7x z@&gKvAdnW)xr!z=WR9vF65plg-=pC7DEQkHq>-57?@$~WZKTzY7RBO43fLUTOAcBa zC+JJ@`*h2UAET=x1w#}pQt*8Wc#*^bndanb;ayxEBrbg&7YwzuBd8_AEgv1Cnul;o zhFfOU4|6&7k(w(%8b4S*qFyD~CAj!u1iUPVz|0&{j^`#G2`YmYO{ov<*rkvAuMkAw z%jk@tf`(IiGypBLA!IU#^MdlY&cm8x*xIte_A6MD0H1^+PA3P2Pjs9KGz`$>5a5<2 zoGMgY*6k5{6fwh2*?x{=RC~-GXRzalQ!eZ)OrX5|I2Gvgw(5+ojp1ydar+=jJcJVW zQJKj5Fyf9ltRuGH^7<6tpsUexYTheW@Y* z`Lg*W#;0%+>G%6y!=WNnk4c$rIw>kv#rXyICMyz@O6_n8D?bTSBYuUuYm>*o1lNJw z1NgeTi5&**8s)ElCDM$&m9HkYV9Eoqy%twVj)e~ROdbPmeXB^xd~?vCMb0eSqz5jfy*`~~*M z>T5U`lUzpF%?c+F+(&TO5%p~xFhEW^+WWdj$D6{=M{XoL|0hVnM_`cK)tHkS92Aet zJWRm%@hk39z&hP4xI%1loyCa^VudyVu|ml_V2diNjl28V5*0DgdzeS0IBgS;=uRGV zSSFQFg_;9ZulQ3O&2X@vYkPXvh+(9jG1KO80b`$vFE$9nyEF=BsXOemI2>-Xk?zCSQ5aNCVuAQE1&b803dpu= z?lmEAh*`l9`!S^=9~7c9FkI=8{8M_lk5hLL_xlm2bST2u!2}N^{bI+GlKa@uw?5y` zsXkvu$~Bxvh6cXOtiQrVzqJ?EQ@H4Hd4tcptds}ah`fB$poFARUs(Shw`YZNH)0wNZb6fZHo&3Yh zAK*w>8l4kpz=P=1NN1o&t9D@@tjOU`?gCP%WI_H0EYLaz_yY}Z4^u32i{_p(yUNGa%iC>H~7f&#aNaF zc^rR>(-qwyCuh-q>9dIU{vJsm!X6o8CFKBIR&wuTS)-$#Xz8BQ$|}x_z>!bmkWIyp z_3Y?}dUosZfK-YW=`cgwZHNYN(eptWG&eVFr^Yya9P3+eSvc=Hv^)j|0OcLAWZ`TJ z(pBmt>Z1=yNN8LF*=Zk&OxVRbo|_!oE;T_3-a&_;piPcSpuFSI(!FSD@Y{> z_@WJP13W%imUr}vvl3Jsm;<GYW&mg&kbeWyPL-Iobo^TKclyKeKipRQ()O4)oIHUiOx+$&#S9^4pI7$x zA*nO z@=14HrU?y)Cu>AQvRg3JA))m@|LXafD~q*+v_Ux^PZi-?X_+75LE=Qwd_wyGz?DQK zkeE;ReRFcD)1h-hzp=`v&+^^@;v}tKPH|qOi3wy|aV%Ivx!NYTpK=aCn-NsT5nAa!a6YAjU^g`F3)TDqXjKuGgp@Fra{qZb-hg>GS=}Y4LVC}*w-Y! zjcmo|DZrG3(+l~W%OFSFb7AqTQ=CqxV$({6j=&UuNl_Y()eK4v{)-H?P3wNc4YK$7 z$Dq2_uzwNc;ONcGW}_7hTR28^x#iXF<9s=mX&jSrh z=|DqMihoIQG}+>i#a3-0OVrdB3>tSeOzY8=bsqo~KGa15P1^L@zd*2%=Qrv&aGo!0 zoSoiqyG;w6pbg*y$9(C;#D7gQG_89|2S?SS3d1>_Vt|=?LOX(Mnxk>%0lw%d1!MQ_e*qXjbSG=AY!&RV@gp?Nd|4dK5PeHO`x<PamA^Mu%v?% zAE|VN0p*H*U4U~?c%a0U9WjRyiEROrE?V`=qZ-(EKsNRu3*m*o)WIX4DiiFJ%454- zUEXN(nL_ycCThElKkO0Ti+@Nd$D=>&ksLWI@|;D_*~r*wOJJu>K?^&nGtN+&5&6Io z;VBe+1_S#7bxLRg+Vh-CcG) z+F4a&~P?~wyH4jg8Vd*KK01NaenLgM5z5(gv@Uu0GH^o$H5rOL`jWMn+Ph{)pd z<$;CvhkyNL8eg)kztLp9`IvlxuUw!LmatPRvnRGqV<&Ycj`_Qp=UBFt`4ivlxM`4u zlhBO4w3qcKeT;qSJRgXF`l%;E(R=GnF3JAM@YH%>iM|-TwZuS%Z`?QbfR*x|l{DPPAk*rh_V-`O8<)*C#BE3^>l;$Z{>GU)H>}3KxN_)BkQ=|^kESH@jFsY z+329ZnGY++HOfc!;7jY%I_Nzk0bI|bDst5W>7}blImzDP8lxK_4K#_M%s(IOsG_KLvTUK?5T4}gLwUf{ znpZfplNooFI|oTw^E8^NqRvH<&my83FAntvPm@_LMMSdW0szq}uc8~+1AE)qwznK+ z51pRv+3Gr$V|?eDi`f_W${(YX)&wp9GVO^goQWq~;k~scKJXVz9TA8SxC8oN&c43( z8za<%rA#ff_@7|qc89IDSf^5<#)APvyT{YGPI)C+HJ1#sv+Utlzh*oaX0p_jr1N+t7(DW1P5nWz=f3xtK!V*8Ta zj2HDyR;Y8FZqBech9pyNuBwWbZoHxbET+k8Bl0#Y7K8I|0JW~EH0W6>%awUVC`Y^i|pM3R@4%n4R=2;atT8wzc5SaQ;r072wRlkatZRxO7v!gSfou&%R*_VDsxxGuf+>Hc!kYsDyDPpJAvvq}do zFH;@b?nAmR?9BTKCglg{dV`^J)gHQm9sK)>@BQQG$^gb3mdFbW#K@el!@kyWko zwK75AxDQTkc3t7W!~WRh+!yk&WT^8-^fsA-Kh>^|Z2KHysgBVm6>>xi9Ypqm+>64L z?=v-*$fN|YYN(*bN4xm{eG}cxYOH6$u}zPv8`kfgm-NE3)yLQ}VENY3&iK=bM+#qr z(BMvr3Poz&eMJ0-`KERBv-jhsQ$Jd2eML-3%YvUw@9^^D?|&w2MSP3ff@v%ZkS@%)3RqWu1XUay+C`nUDJFhd5~P zJsB*+Ng}i;sOd$ku3>-DqX$Hk*O}TO@=wv}UR=aQ1m9dV)y4(wO^i^~Z4Nf^oVsEU zoFV?6ee|RM%X*JTJ>82U!b=qCK@_2y)hUgKQS=fIx^5?oB2mOxzH+`&su9Ud4pTp+ z&KU8mGv>U3jql8;+gJjdJUIajw7t>g0QC?>vt>dVY;w)KZBg;?rWhBCa%`-n zT?`fR&{s2*?pcz{@v8nSs)zbJXV2BKk~4bu94g8sicv?;OTE!f literal 0 HcmV?d00001 diff --git a/mmpretrain/evaluation/metrics/__pycache__/voc_multi_label.cpython-310.pyc b/mmpretrain/evaluation/metrics/__pycache__/voc_multi_label.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64d819810da352403e78fb033b0faa83b33ae227 GIT binary patch literal 3870 zcmeHJOK%*<5$>Lyot<4iL{OGu841vbB+?p7(n4;65{QxGKtMzpmJkPPIK$?2?{egM zta^H=M^?GefNweHkb@ZHqCesvG1ugxvu^=n=c}IKa!J_9F*$e^+tZJls_L%qtLkEH zZNTvS`Cng7ez(Ec-)XS?1Q^`Gzx*Q_#S~9j#>YJK#vZ4!mv%CL?7My^?PkF^K;Kv0 zw3mhBFzb)|oc)@qK=nRns(0*-2ikwNazb;V>SM022fBakAM^3*Vr8IKu(E=c(5n8NA9s|eI)CBH z*S_vvV-xQSHsPwPg3s5-*sbpAkY?R3{c-;b=CkntyBO5#zkP7GF?K(BmgMqh9%H*A zqrQk!S(ai_7y%)nq?M(%B6F^lBR47Yys(lEpcIGlNDKYU8aWVn_Ej>OBr&$MFQmnG z4l1jiHZ9{rQ5-zhu^kC}sEvjVJ!-xcg*#7?lp@z!Y2}oW6R}L84aV}Rv)wwfJ#Z4c zW?J5IJu<$q9mz796{St!+l|vueR5C~>8Hb6;^RY2_Ju7(CTHZ!*_R7lq|e2$6tWah zmN;!VAOv&^5fxKYRJjt?RQ9mh;gL)$cP!iD-b5t!oH9B^SbStEY?|yYycJV>Y6=1i zQ?OIsxH%ksx2_LlTK;Ecxt_uyoPtyK-f;VzOYiH6M5IdcYY!4GE=zv3`+u6GsW{N0 z-NE&(o7WpXdjR^C#WvbQcjsh?(=lP59(w!j{}SHi57Er|oJGv?WA@madkLF&{>J~r ziN=5^-y3$Ej=Eb`Gi~ZHijq9BQ8e^xztriZS?x$Ot-GH+#p^Y%`%&c38%3|#KX-Rb zQP`a{ND1ME-?Bea=9pK%$#$NFg zHuqKMwrer=PLR{E-cyh~VrB$6aK4r6-e!5vcIMqV@*BRx*!uI}m@nGytL_Oq=6BfJ z>}5D-j{_A{%sfy&Nnc}AHV^;Gp=E{E%vkl#&ttatDxCW#2+$Kx1qe|nDZU??BG$+k zK#w?(mMA`PNkg<}(Gr#D^awPxIi!vS(T6nc;h0QA6Kz28iOz*2=#pQ$O;raPvkNblygQ#t$X^#y_wg;U zdtrAIQ)Er%K-|+pDs9elYZ(vX#%YoGL?yAM9JHjbs4SdE$~{$?6-uXP+Ujs>8%)-l zr@d&wx_SV7tEh}|c5lAHIC);30k+-Ubi7O}_30y-n_aGw?Tgz&H4M$N$HRs~4!)DjBX&uE=Gk zqo^K45e~CTslOUUPb!(VE4?UEAbey{`B?2!X>fEmL|yX^H9w)1wKk>1I!z&c<~gn^ z%#izr2u}ZU9Zkr5?yU!32j2SE@ATYSjOlX^rNcY;e?>aZ*K_cyQ(vZ?Q6^ft6v#io z(Om#NO;JEmd5LGINT)jKQrDJCU2VT9t`2Be@l^j7Q~FxN`Xi~TyuFDfyF4$=}1g&ZaK^eiIRwvJu8g;zD6T8{jX~@C9-$l zbrtfK_()H*apivhBEa@t+?BR6NdXLy133YguCguluYL1Vz|~x*<_0xGYR(9;;IrAn z!Uy<&MP)N&q3344lLlr4u@?3hZ^!a7V1Kv00&16Vao?m1^ce_I6>tGV3Yvi#L<}9X3m?XR`oJVCJTYES!~WUgh|*TgoKbF4v+zour&#tO5f_~3f*0us_Gf0 z=@De|B%&w?D$3#v3L3?ILB$;fS6px{cTthI`h5C)Z{7p^&aLY1nTY-cc7F5SbN74C zJ@?#mPQ~l%%WC-D{oLcF$A&cRY0Aw18OWTAzu+c-)Fi8DRhDN}p6876yrSoIqh2{>LV`tB?Rdei`Iksbt9UZY7A~rS0hUQpqj-4~dPMBlc zBUYGWC(f~Tb8PJ#J9m!loMS5^wtkKs)$AGCE^|XGbCfif46Q$E?KxDieK-di)Eu)S zEoVmDccN6$tJd9$6PbZy`^9pU@|}9cE;^AOGLsEi2T0ujm9pLCq95>cfBmEhNnZOS7!V0uKkxU*Jr< z(9iq%0CN8EXtLn+(|;FA1!KaNgTH zC0tGR%SCrGIUpAUm&m2SW%3~4a(OWD5V-<)sKUeK;h>L@M*?3WE#Oh|Xy7sOSm1FA zUn&Pd56K+xczFVFr9AOYhIxi5m8;}x;7Rgi;3@J{;A!%7;2H8v;8_aS$g@GOmFEE0 z$@Rc<?JKwls)0KQy~056mm-N_!LnJ$)_ksnofiM$l_WpWE}tK0^> zTwVdZQeFkTTIPY*$nC%#@>*a);ZB9N+=Y^<4#eNtQs5$#OE2D|aJ*ovZ+>3Tx5> zU6*@+zQRC;pc`@=I3ceGPRb_m28As-1^N~8M&M2ImB5?jG;puNf0D0CX1Yc0L+PvK zt-#mF*8*=-_&WJ|(6`GwfNzj@0`HP<1ine(o8{f0-y+`%yhq*(yieW_e4CsB&dLXX zZ&&yZ`Cu~BL-L&{eV5!1e7AfL@L~C0;QQqJfgg|`1b#?<82AzSQQ*hqBfyW#PXIqD z9|e9&;iu(iKz~+#4)}Tb1>j@yi@-0*$CH_!kY7grEAp$rugR|izagIlep7x6_-%#X zkxzmCuKXVG`wD*`e+c?V3L}L-mOnwsPvy^mKUeq*`Ag8hl26l2^4Ib=E>dVd4$Zd| znomH(LH|LA0bBYwyPf$DLGMA&%`&Fl&i33DLSs%dv>({cr&6k&F^(Gl(H3;&3a!Oj zskPMdVfot`qs5?ES&KpYJu{lqBW6e3uI2Tx>_lvJ#7>IX$q_pxVy8yzw1}M^u`?oe zX2i~l_(`jCy^*MSH762H$M+(gNIjhJA|ovO4vI@&!w0(Kq|DswBO`Feomyl#?pQfQ zUsrWNiGVf=n+9+&%p11vAZJp7tSH)42Sf(ivQd%aPBX2D!Eq-kLX)@%t(s7VXax!l zd)hSGwktyFXGmAlG?ZXuQ+iH-d`hJ>uk>hMATmbTR*fma1ZkC}5i@ZKk*bj9q}DW) zNeK;Iwy|vttx^=|Wk)te*%_2*<4Ee9q8t$@7{mstO`zHYc~SNzWYYi+N{sDIR4QnO zT&1Gwg18RJNIb+yJd}YHtmh-uram%Q&Fdo=UyNXuyl`oO29TEHhFFdp(sDqm5bLVicC~3#?LoVmPgUtqF=#p_=opeB8mFgSN|~fW@ha|z!nhx* z^JcXX&`2qj5(9}q6IGyE)EtM-Bc)QbFlrt(Fp;QwuUPSl*ELh#pd#{kE|b;81Xh;< z1A{>u3m$A!DNw42h{3?dxIvku&!mL<1j%uwLBtHABH~aJO&u@=HHnC3Vv5$26k?>L zkOu3C>ua>)W^a-iRJnGQlHhFMRh?a)oW$v8+O$NEq*$F(Upp7Cs#B>BuQLM9uBX!w z($g6(Xm;ADJz!|uW{Ks-6huRHq{4W)?Q+VstJGxeE|^P=i7q6l>13p!J7$nq4Z^Zp z8%xGlG$vEqi7RM=e(IdmQNCNPrd0E*YD}uO9=FA`P(^kaG)+pdGo+4>eWg^2i865k z4Vq2Lh>nZZ;BiwQ0vgx*5Ku)0?NI`n#lh=Hw`d=rJ0(#Vs< z8&!ZA;(}y?N%V;e==y~dHtzQzC3yotLP-ha$ec)2$V6L(5J8Cs{ZQZR&a#E`OwwyG{(7qJ&v z#a+}+Vp}Q;-lFHE>N$y?VnQi-AUKkmXg@BBAg;wlU|U)h5xcW772nf#DJ?arP+Cn) z+E{@xIY>#0GFWsot@1>r6a^xq>eQyQacwL&ik?Tt2Ri9Bz$_p!i!wBCTICh96h#~W zfx%76(S*QyR;hH8`FPsg|IeW2x<{x=Kxgmn{ z=Y`In6N3N0F!=uolh271|6h^j|06Q|&mznJTlDZhiC+Fk(Z~Nw^z%Q6Mf^X-0RO#M zEdE0*;r}j{@_!S{_`iyS_`itd{GY|a{8@1b|D9ODpAm=h--^TdZ^YsJ*Ww8Nv^bLg zO1y;sQds;K;wb)eakTj}aSZ>dIF|oJ9LIkwUJB=VkpD;w@gIsD|A9E3e_x!yzb97m z?}`)oQ(~BZN37!C7OVNU#7X>{;$;4$IK}*iIF)~0oW{Q!qD!)_I_#1@B?+|rH3Yz(&kdPw&7W~NJq+hTd;AnXS z8}eK-#7=l#Wb};47;qm=$B-$%G%{vgT;Dd(DM8RBMpwBa2P95_dl>gFK!p_g62X9 zq2yk#mb}vPl3)-Pzpd=F>*&R`CfrJerm-bFQOm7ac=8gKESQmYtLDLK?dmRf3}FVx zDtlG0Hd*x=LDDC%ywbdVCP53&54tO()s2+ei*N%ACLwa(xI1qJwm$|C+g6zT%RKs5a!@}dQY0zz$C+szOO(2s9tv&;4y^<6jPdcW49+ zgqEUoq2Sh_m<#?^Di@I;E~-qo`F<8<3WZ|DM&PK>)$aaP=<)>q0$RlY;zrFy@vRwd zk6NU)wSNnug*B8aU-)E12=y664poGZC{7W=khP#Lrn$eoCB`UrXh^+*_eqLSZ;5Gk zm^Q{@YMDmyMD#Oe(Fg6!dv*BA+HL`#SozX>5PhPUpiSYcOXCQb1zXnIWrxMGl0!SR zEl6t}Pu+0$R>xOBMa&=;?b{lg_arevh$B;XVdqXIeMys@J6Bp0WfxR|`lZkmyn#AoKId65b3ZhHW{z+`3pf`j;J6e<(!&6@i z$OakckcnuIJ`>Bs@7G5?5Neq;%;|ms6+WqUe+K|&j6UYTdT|4~t2Wkx$V2EydIeDi zL{^pR^e?2QDgSCDQMSAPsCWK$5Rg@8+Z{bJ;{gsdSW*e2pTv0TPNlQ-I%Sy&J7qab z>tv^x&-I()56%8W*pn{4M4uv_i_VXNqZA%j7xaD*2#_L7jeyRYDSe+dqtQiOp^@OL zHwt39H;po}LffQGt1!7VR%lZQd{3otAq*7^>PIjrJ(X!?T6lB))vXl5N9h&X8QN58 z&(R^5sa9&Awp%|;o90tlgvv!LO97=lB1G_MnuQ3J)Af;QT$~=6Y4x~ls<+jP3zD@D z!v)zGi{?D_o}6??2HQ8`@5*BPwhWN~b)J(0?-Jes$HqNTLOXA~El$|*Yj|PXqD|>0 zj1<31o$HeXxQEqib)7RASO_(#rN_?@#_v8;!O8{0*)_bPxPBBa$7Q3AP{CMt-K#0> zpKPHk*h)kf#K7se>4=x&FIcXQxTWn#O)+)6&}nz@J@_7>#0+0i7Ip05_#5@p2Zc+9v8Di2ULitQ$DT+oZk zUgZjzLd7%(x3k8@jjSA7dg~A@VYDrD1P>@~`#r+FS zqsS=X6$t%ZG$3gx^_=@IQN$mAf`B9=>PsH~q60G}F#v|BHcvn&=3fGM1b=}ANNa+_ z7gM@YJO_>W0zACuO}+oQbgKW*fg@FGlBxy2j&)CF%8Oa|u+k0K_KAJ(Kr<$`J$()B z%ctaGW!(!{`S=3bvF@|Vs_%h*TM}iqICaktAtLsp*yN^0wQeDkD73`f!?O<6VEfYA z9e6de@2W6=$#kct-LGqjx?1=Qm+<|sP`AT!+Xo;Uf(k8Fh^IfGN1gOetX@=QSH&!} z``Uy2!;$!hBLRa8($jV%{7Z35Vnq;TIzrdA+5fhuuo4~r+EbXpH`-YiEDm*v|3Yvx zv><#3p?n81>R*9AO=FnuL-SaY=-32|1)C!J$}vpt3xs_*XZTmr0?9>)(ug<(6K+_J zvRBYsd%o}Eot;J%v?f_@itesG)+XJQ29b{tG@2U%ZmEuid?8lgsx1vwh`(nmSCQuK*V*-l&v=ntbg2(P;qz%{o* zgiGP9k{?V~EG!d>hS3Ray!7(ns0Y?_Kt%;ts#g;4z%8^>dI#w1oQ|(#%3_>v!TJt0 zDG`hjs2=K#EmKx;q{|jk)j49z0EjfFr^AHKHJ|S{#GjjsyX^U!1}dq2vR*dgFBk@d z_;ODtzZ55C4~gX)bw{dh-aSLg#5SFd){NBn8bww^zl$gDC1ZVXi3pB82<&>2^8sukD7P@!mX7&_zFsD zB(*au>c6NQ$Q*jTPH)RhKbW(O-qjUdrW{~m;C$j&mHV$k*U?K@LwzzypY+<@;;wh<`zXJ0t|>k#LqFEm47QB zTGHm(a3^TulYmD9oeJU!^;Dc(+Wl`L!@e^{F{DYKEXl5w5-o}=96vE5)Dx}NF>fm! m(Tmb+V{2m_wu4A6U_eiEGGTgoT2C+4ZZr>B!ptj|v%diD8R1O; literal 0 HcmV?d00001 diff --git a/mmpretrain/evaluation/metrics/caption.py b/mmpretrain/evaluation/metrics/caption.py new file mode 100644 index 0000000..c4bffab --- /dev/null +++ b/mmpretrain/evaluation/metrics/caption.py @@ -0,0 +1,136 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import json +import os +import tempfile +from typing import List, Optional + +from mmengine.evaluator import BaseMetric +from mmengine.utils import track_iter_progress + +from mmpretrain.registry import METRICS +from mmpretrain.utils import require + +try: + from pycocoevalcap.eval import COCOEvalCap + from pycocotools.coco import COCO +except ImportError: + COCOEvalCap = None + COCO = None + + +@METRICS.register_module() +class COCOCaption(BaseMetric): + """Coco Caption evaluation wrapper. + + Save the generated captions and transform into coco format. + Calling COCO API for caption metrics. + + Args: + ann_file (str): the path for the COCO format caption ground truth + json file, load for evaluations. + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be 'cpu' or + 'gpu'. Defaults to 'cpu'. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, self.default_prefix + will be used instead. Should be modified according to the + `retrieval_type` for unambiguous results. Defaults to TR. + """ + + @require('pycocoevalcap') + def __init__(self, + ann_file: str, + collect_device: str = 'cpu', + prefix: Optional[str] = None): + super().__init__(collect_device=collect_device, prefix=prefix) + self.ann_file = ann_file + + def process(self, data_batch, data_samples): + """Process one batch of data samples. + + The processed results should be stored in ``self.results``, which will + be used to computed the metrics when all batches have been processed. + + Args: + data_batch: A batch of data from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from the model. + """ + + for data_sample in data_samples: + result = dict() + + result['caption'] = data_sample.get('pred_caption') + result['image_id'] = int(data_sample.get('image_id')) + + # Save the result to `self.results`. + self.results.append(result) + + def compute_metrics(self, results: List): + """Compute the metrics from processed results. + + Args: + results (dict): The processed results of each batch. + + Returns: + Dict: The computed metrics. The keys are the names of the metrics, + and the values are corresponding results. + """ + # NOTICE: don't access `self.results` from the method. + + with tempfile.TemporaryDirectory() as temp_dir: + + eval_result_file = save_result( + result=results, + result_dir=temp_dir, + filename='m4-caption_pred', + remove_duplicate='image_id', + ) + + coco_val = coco_caption_eval(eval_result_file, self.ann_file) + + return coco_val + + +def save_result(result, result_dir, filename, remove_duplicate=''): + """Saving predictions as json file for evaluation.""" + + # combine results from all processes + result_new = [] + + if remove_duplicate: + result_new = [] + id_list = [] + for res in track_iter_progress(result): + if res[remove_duplicate] not in id_list: + id_list.append(res[remove_duplicate]) + result_new.append(res) + result = result_new + + final_result_file_url = os.path.join(result_dir, '%s.json' % filename) + print(f'result file saved to {final_result_file_url}') + json.dump(result, open(final_result_file_url, 'w')) + + return final_result_file_url + + +def coco_caption_eval(results_file, ann_file): + """Evaluation between gt json and prediction json files.""" + # create coco object and coco_result object + coco = COCO(ann_file) + coco_result = coco.loadRes(results_file) + + # create coco_eval object by taking coco and coco_result + coco_eval = COCOEvalCap(coco, coco_result) + + # make sure the image ids are the same + coco_eval.params['image_id'] = coco_result.getImgIds() + + # This will take some times at the first run + coco_eval.evaluate() + + # print output evaluation scores + for metric, score in coco_eval.eval.items(): + print(f'{metric}: {score:.3f}') + + return coco_eval.eval diff --git a/mmpretrain/evaluation/metrics/gqa.py b/mmpretrain/evaluation/metrics/gqa.py new file mode 100644 index 0000000..d5e8b07 --- /dev/null +++ b/mmpretrain/evaluation/metrics/gqa.py @@ -0,0 +1,78 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional + +from mmengine.evaluator import BaseMetric + +from mmpretrain.evaluation.metrics.vqa import (_process_digit_article, + _process_punctuation) +from mmpretrain.registry import METRICS + + +@METRICS.register_module() +class GQAAcc(BaseMetric): + """GQA Acc metric. + + Compute GQA accuracy. + + Args: + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be 'cpu' or + 'gpu'. Defaults to 'cpu'. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, self.default_prefix + will be used instead. Should be modified according to the + `retrieval_type` for unambiguous results. Defaults to TR. + """ + default_prefix = 'GQA' + + def __init__(self, + collect_device: str = 'cpu', + prefix: Optional[str] = None) -> None: + super().__init__(collect_device=collect_device, prefix=prefix) + + def process(self, data_batch, data_samples) -> None: + """Process one batch of data samples. + + The processed results should be stored in ``self.results``, which will + be used to computed the metrics when all batches have been processed. + + Args: + data_batch: A batch of data from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from the model. + """ + for sample in data_samples: + gt_answer = sample.get('gt_answer') + result = { + 'pred_answer': sample.get('pred_answer'), + 'gt_answer': gt_answer + } + + self.results.append(result) + + def compute_metrics(self, results: List) -> dict: + """Compute the metrics from processed results. + + Args: + results (dict): The processed results of each batch. + + Returns: + Dict: The computed metrics. The keys are the names of the metrics, + and the values are corresponding results. + """ + acc = [] + for result in results: + pred_answer = self._process_answer(result['pred_answer']) + gt_answer = self._process_answer(result['gt_answer']) + gqa_acc = 1 if pred_answer == gt_answer else 0 + acc.append(gqa_acc) + + accuracy = sum(acc) / len(acc) + + metrics = {'acc': accuracy} + return metrics + + def _process_answer(self, answer) -> str: + answer = _process_punctuation(answer) + answer = _process_digit_article(answer) + return answer diff --git a/mmpretrain/evaluation/metrics/multi_label.py b/mmpretrain/evaluation/metrics/multi_label.py new file mode 100644 index 0000000..bd91aac --- /dev/null +++ b/mmpretrain/evaluation/metrics/multi_label.py @@ -0,0 +1,599 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Sequence, Union + +import numpy as np +import torch +from mmengine.evaluator import BaseMetric +from mmengine.logging import MMLogger + +from mmpretrain.registry import METRICS +from mmpretrain.structures import label_to_onehot +from .single_label import _precision_recall_f1_support, to_tensor + + +@METRICS.register_module() +class MultiLabelMetric(BaseMetric): + r"""A collection of precision, recall, f1-score and support for + multi-label tasks. + + The collection of metrics is for single-label multi-class classification. + And all these metrics are based on the confusion matrix of every category: + + .. image:: ../../_static/image/confusion-matrix.png + :width: 60% + :align: center + + All metrics can be formulated use variables above: + + **Precision** is the fraction of correct predictions in all predictions: + + .. math:: + \text{Precision} = \frac{TP}{TP+FP} + + **Recall** is the fraction of correct predictions in all targets: + + .. math:: + \text{Recall} = \frac{TP}{TP+FN} + + **F1-score** is the harmonic mean of the precision and recall: + + .. math:: + \text{F1-score} = \frac{2\times\text{Recall}\times\text{Precision}}{\text{Recall}+\text{Precision}} + + **Support** is the number of samples: + + .. math:: + \text{Support} = TP + TN + FN + FP + + Args: + thr (float, optional): Predictions with scores under the threshold + are considered as negative. If None, the ``topk`` predictions will + be considered as positive. If the ``topk`` is also None, use + ``thr=0.5`` as default. Defaults to None. + topk (int, optional): Predictions with the k-th highest scores are + considered as positive. If None, use ``thr`` to determine positive + predictions. If both ``thr`` and ``topk`` are not None, use + ``thr``. Defaults to None. + items (Sequence[str]): The detailed metric items to evaluate, select + from "precision", "recall", "f1-score" and "support". + Defaults to ``('precision', 'recall', 'f1-score')``. + average (str | None): How to calculate the final metrics from the + confusion matrix of every category. It supports three modes: + + - `"macro"`: Calculate metrics for each category, and calculate + the mean value over all categories. + - `"micro"`: Average the confusion matrix over all categories and + calculate metrics on the mean confusion matrix. + - `None`: Calculate metrics of every category and output directly. + + Defaults to "macro". + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be 'cpu' or + 'gpu'. Defaults to 'cpu'. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, self.default_prefix + will be used instead. Defaults to None. + + Examples: + >>> import torch + >>> from mmpretrain.evaluation import MultiLabelMetric + >>> # ------ The Basic Usage for category indices labels ------- + >>> y_pred = [[0], [1], [0, 1], [3]] + >>> y_true = [[0, 3], [0, 2], [1], [3]] + >>> # Output precision, recall, f1-score and support + >>> MultiLabelMetric.calculate( + ... y_pred, y_true, pred_indices=True, target_indices=True, num_classes=4) + (tensor(50.), tensor(50.), tensor(45.8333), tensor(6)) + >>> # ----------- The Basic Usage for one-hot labels ----------- + >>> y_pred = torch.tensor([[1, 1, 0, 0], + ... [1, 1, 0, 0], + ... [0, 0, 1, 0], + ... [0, 1, 0, 0], + ... [0, 1, 0, 0]]) + >>> y_true = torch.Tensor([[1, 1, 0, 0], + ... [0, 0, 1, 0], + ... [1, 1, 1, 0], + ... [1, 0, 0, 0], + ... [1, 0, 0, 0]]) + >>> MultiLabelMetric.calculate(y_pred, y_true) + (tensor(43.7500), tensor(31.2500), tensor(33.3333), tensor(8)) + >>> # --------- The Basic Usage for one-hot pred scores --------- + >>> y_pred = torch.rand(y_true.size()) + >>> y_pred + tensor([[0.4575, 0.7335, 0.3934, 0.2572], + [0.1318, 0.1004, 0.8248, 0.6448], + [0.8349, 0.6294, 0.7896, 0.2061], + [0.4037, 0.7308, 0.6713, 0.8374], + [0.3779, 0.4836, 0.0313, 0.0067]]) + >>> # Calculate with different threshold. + >>> MultiLabelMetric.calculate(y_pred, y_true, thr=0.1) + (tensor(42.5000), tensor(75.), tensor(53.1746), tensor(8)) + >>> # Calculate with topk. + >>> MultiLabelMetric.calculate(y_pred, y_true, topk=1) + (tensor(62.5000), tensor(31.2500), tensor(39.1667), tensor(8)) + >>> + >>> # ------------------- Use with Evalutor ------------------- + >>> from mmpretrain.structures import DataSample + >>> from mmengine.evaluator import Evaluator + >>> data_sampels = [ + ... DataSample().set_pred_score(pred).set_gt_score(gt) + ... for pred, gt in zip(torch.rand(1000, 5), torch.randint(0, 2, (1000, 5)))] + >>> evaluator = Evaluator(metrics=MultiLabelMetric(thr=0.5)) + >>> evaluator.process(data_sampels) + >>> evaluator.evaluate(1000) + { + 'multi-label/precision': 50.72898037055408, + 'multi-label/recall': 50.06836461357571, + 'multi-label/f1-score': 50.384466955258475 + } + >>> # Evaluate on each class by using topk strategy + >>> evaluator = Evaluator(metrics=MultiLabelMetric(topk=1, average=None)) + >>> evaluator.process(data_sampels) + >>> evaluator.evaluate(1000) + { + 'multi-label/precision_top1_classwise': [48.22, 50.54, 50.99, 44.18, 52.5], + 'multi-label/recall_top1_classwise': [18.92, 19.22, 19.92, 20.0, 20.27], + 'multi-label/f1-score_top1_classwise': [27.18, 27.85, 28.65, 27.54, 29.25] + } + """ # noqa: E501 + default_prefix: Optional[str] = 'multi-label' + + def __init__(self, + thr: Optional[float] = None, + topk: Optional[int] = None, + items: Sequence[str] = ('precision', 'recall', 'f1-score'), + average: Optional[str] = 'macro', + collect_device: str = 'cpu', + prefix: Optional[str] = None) -> None: + + logger = MMLogger.get_current_instance() + if thr is None and topk is None: + thr = 0.5 + logger.warning('Neither thr nor k is given, set thr as 0.5 by ' + 'default.') + elif thr is not None and topk is not None: + logger.warning('Both thr and topk are given, ' + 'use threshold in favor of top-k.') + + self.thr = thr + self.topk = topk + self.average = average + + for item in items: + assert item in ['precision', 'recall', 'f1-score', 'support'], \ + f'The metric {item} is not supported by `SingleLabelMetric`,' \ + ' please choose from "precision", "recall", "f1-score" and ' \ + '"support".' + self.items = tuple(items) + + super().__init__(collect_device=collect_device, prefix=prefix) + + def process(self, data_batch, data_samples: Sequence[dict]): + """Process one batch of data samples. + + The processed results should be stored in ``self.results``, which will + be used to computed the metrics when all batches have been processed. + + Args: + data_batch: A batch of data from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from the model. + """ + for data_sample in data_samples: + result = dict() + + result['pred_score'] = data_sample['pred_score'].clone() + num_classes = result['pred_score'].size()[-1] + + if 'gt_score' in data_sample: + result['gt_score'] = data_sample['gt_score'].clone() + else: + result['gt_score'] = label_to_onehot(data_sample['gt_label'], + num_classes) + + # Save the result to `self.results`. + self.results.append(result) + + def compute_metrics(self, results: List): + """Compute the metrics from processed results. + + Args: + results (list): The processed results of each batch. + + Returns: + Dict: The computed metrics. The keys are the names of the metrics, + and the values are corresponding results. + """ + # NOTICE: don't access `self.results` from the method. `self.results` + # are a list of results from multiple batch, while the input `results` + # are the collected results. + metrics = {} + + target = torch.stack([res['gt_score'] for res in results]) + pred = torch.stack([res['pred_score'] for res in results]) + + metric_res = self.calculate( + pred, + target, + pred_indices=False, + target_indices=False, + average=self.average, + thr=self.thr, + topk=self.topk) + + def pack_results(precision, recall, f1_score, support): + single_metrics = {} + if 'precision' in self.items: + single_metrics['precision'] = precision + if 'recall' in self.items: + single_metrics['recall'] = recall + if 'f1-score' in self.items: + single_metrics['f1-score'] = f1_score + if 'support' in self.items: + single_metrics['support'] = support + return single_metrics + + if self.thr: + suffix = '' if self.thr == 0.5 else f'_thr-{self.thr:.2f}' + for k, v in pack_results(*metric_res).items(): + metrics[k + suffix] = v + else: + for k, v in pack_results(*metric_res).items(): + metrics[k + f'_top{self.topk}'] = v + + result_metrics = dict() + for k, v in metrics.items(): + if self.average is None: + result_metrics[k + '_classwise'] = v.detach().cpu().tolist() + elif self.average == 'macro': + result_metrics[k] = v.item() + else: + result_metrics[k + f'_{self.average}'] = v.item() + return result_metrics + + @staticmethod + def calculate( + pred: Union[torch.Tensor, np.ndarray, Sequence], + target: Union[torch.Tensor, np.ndarray, Sequence], + pred_indices: bool = False, + target_indices: bool = False, + average: Optional[str] = 'macro', + thr: Optional[float] = None, + topk: Optional[int] = None, + num_classes: Optional[int] = None + ) -> Union[torch.Tensor, List[torch.Tensor]]: + """Calculate the precision, recall, f1-score. + + Args: + pred (torch.Tensor | np.ndarray | Sequence): The prediction + results. A :obj:`torch.Tensor` or :obj:`np.ndarray` with + shape ``(N, num_classes)`` or a sequence of index/onehot + format labels. + target (torch.Tensor | np.ndarray | Sequence): The prediction + results. A :obj:`torch.Tensor` or :obj:`np.ndarray` with + shape ``(N, num_classes)`` or a sequence of index/onehot + format labels. + pred_indices (bool): Whether the ``pred`` is a sequence of + category index labels. If True, ``num_classes`` must be set. + Defaults to False. + target_indices (bool): Whether the ``target`` is a sequence of + category index labels. If True, ``num_classes`` must be set. + Defaults to False. + average (str | None): How to calculate the final metrics from + the confusion matrix of every category. It supports three + modes: + + - `"macro"`: Calculate metrics for each category, and calculate + the mean value over all categories. + - `"micro"`: Average the confusion matrix over all categories + and calculate metrics on the mean confusion matrix. + - `None`: Calculate metrics of every category and output + directly. + + Defaults to "macro". + thr (float, optional): Predictions with scores under the thresholds + are considered as negative. Defaults to None. + topk (int, optional): Predictions with the k-th highest scores are + considered as positive. Defaults to None. + num_classes (Optional, int): The number of classes. If the ``pred`` + is indices instead of onehot, this argument is required. + Defaults to None. + + Returns: + Tuple: The tuple contains precision, recall and f1-score. + And the type of each item is: + + - torch.Tensor: A tensor for each metric. The shape is (1, ) if + ``average`` is not None, and (C, ) if ``average`` is None. + + Notes: + If both ``thr`` and ``topk`` are set, use ``thr` to determine + positive predictions. If neither is set, use ``thr=0.5`` as + default. + """ + average_options = ['micro', 'macro', None] + assert average in average_options, 'Invalid `average` argument, ' \ + f'please specicy from {average_options}.' + + def _format_label(label, is_indices): + """format various label to torch.Tensor.""" + if isinstance(label, np.ndarray): + assert label.ndim == 2, 'The shape `pred` and `target` ' \ + 'array must be (N, num_classes).' + label = torch.from_numpy(label) + elif isinstance(label, torch.Tensor): + assert label.ndim == 2, 'The shape `pred` and `target` ' \ + 'tensor must be (N, num_classes).' + elif isinstance(label, Sequence): + if is_indices: + assert num_classes is not None, 'For index-type labels, ' \ + 'please specify `num_classes`.' + label = torch.stack([ + label_to_onehot(indices, num_classes) + for indices in label + ]) + else: + label = torch.stack( + [to_tensor(onehot) for onehot in label]) + else: + raise TypeError( + 'The `pred` and `target` must be type of torch.tensor or ' + f'np.ndarray or sequence but get {type(label)}.') + return label + + pred = _format_label(pred, pred_indices) + target = _format_label(target, target_indices).long() + + assert pred.shape == target.shape, \ + f"The size of pred ({pred.shape}) doesn't match "\ + f'the target ({target.shape}).' + + if num_classes is not None: + assert pred.size(1) == num_classes, \ + f'The shape of `pred` ({pred.shape}) '\ + f"doesn't match the num_classes ({num_classes})." + num_classes = pred.size(1) + + thr = 0.5 if (thr is None and topk is None) else thr + + if thr is not None: + # a label is predicted positive if larger than thr + pos_inds = (pred >= thr).long() + else: + # top-k labels will be predicted positive for any example + _, topk_indices = pred.topk(topk) + pos_inds = torch.zeros_like(pred).scatter_(1, topk_indices, 1) + pos_inds = pos_inds.long() + + return _precision_recall_f1_support(pos_inds, target, average) + + +def _average_precision(pred: torch.Tensor, + target: torch.Tensor) -> torch.Tensor: + r"""Calculate the average precision for a single class. + + AP summarizes a precision-recall curve as the weighted mean of maximum + precisions obtained for any r'>r, where r is the recall: + + .. math:: + \text{AP} = \sum_n (R_n - R_{n-1}) P_n + + Note that no approximation is involved since the curve is piecewise + constant. + + Args: + pred (torch.Tensor): The model prediction with shape + ``(N, num_classes)``. + target (torch.Tensor): The target of predictions with shape + ``(N, num_classes)``. + + Returns: + torch.Tensor: average precision result. + """ + assert pred.shape == target.shape, \ + f"The size of pred ({pred.shape}) doesn't match "\ + f'the target ({target.shape}).' + + # a small value for division by zero errors + eps = torch.finfo(torch.float32).eps + + # get rid of -1 target such as difficult sample + # that is not wanted in evaluation results. + valid_index = target > -1 + pred = pred[valid_index] + target = target[valid_index] + + # sort examples + sorted_pred_inds = torch.argsort(pred, dim=0, descending=True) + sorted_target = target[sorted_pred_inds] + + # get indexes when gt_true is positive + pos_inds = sorted_target == 1 + + # Calculate cumulative tp case numbers + tps = torch.cumsum(pos_inds, 0) + total_pos = tps[-1].item() # the last of tensor may change later + + # Calculate cumulative tp&fp(pred_poss) case numbers + pred_pos_nums = torch.arange(1, len(sorted_target) + 1).to(pred.device) + pred_pos_nums[pred_pos_nums < eps] = eps + + tps[torch.logical_not(pos_inds)] = 0 + precision = tps / pred_pos_nums.float() + ap = torch.sum(precision, 0) / max(total_pos, eps) + return ap + + +@METRICS.register_module() +class AveragePrecision(BaseMetric): + r"""Calculate the average precision with respect of classes. + + AveragePrecision (AP) summarizes a precision-recall curve as the weighted + mean of maximum precisions obtained for any r'>r, where r is the recall: + + .. math:: + \text{AP} = \sum_n (R_n - R_{n-1}) P_n + + Note that no approximation is involved since the curve is piecewise + constant. + + Args: + average (str | None): How to calculate the final metrics from + every category. It supports two modes: + + - `"macro"`: Calculate metrics for each category, and calculate + the mean value over all categories. The result of this mode + is also called **mAP**. + - `None`: Calculate metrics of every category and output directly. + + Defaults to "macro". + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be 'cpu' or + 'gpu'. Defaults to 'cpu'. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, self.default_prefix + will be used instead. Defaults to None. + + References + ---------- + 1. `Wikipedia entry for the Average precision + `_ + + Examples: + >>> import torch + >>> from mmpretrain.evaluation import AveragePrecision + >>> # --------- The Basic Usage for one-hot pred scores --------- + >>> y_pred = torch.Tensor([[0.9, 0.8, 0.3, 0.2], + ... [0.1, 0.2, 0.2, 0.1], + ... [0.7, 0.5, 0.9, 0.3], + ... [0.8, 0.1, 0.1, 0.2]]) + >>> y_true = torch.Tensor([[1, 1, 0, 0], + ... [0, 1, 0, 0], + ... [0, 0, 1, 0], + ... [1, 0, 0, 0]]) + >>> AveragePrecision.calculate(y_pred, y_true) + tensor(70.833) + >>> # ------------------- Use with Evalutor ------------------- + >>> from mmpretrain.structures import DataSample + >>> from mmengine.evaluator import Evaluator + >>> data_samples = [ + ... DataSample().set_pred_score(i).set_gt_score(j) + ... for i, j in zip(y_pred, y_true) + ... ] + >>> evaluator = Evaluator(metrics=AveragePrecision()) + >>> evaluator.process(data_samples) + >>> evaluator.evaluate(5) + {'multi-label/mAP': 70.83333587646484} + >>> # Evaluate on each class + >>> evaluator = Evaluator(metrics=AveragePrecision(average=None)) + >>> evaluator.process(data_samples) + >>> evaluator.evaluate(5) + {'multi-label/AP_classwise': [100., 83.33, 100., 0.]} + """ + default_prefix: Optional[str] = 'multi-label' + + def __init__(self, + average: Optional[str] = 'macro', + collect_device: str = 'cpu', + prefix: Optional[str] = None) -> None: + super().__init__(collect_device=collect_device, prefix=prefix) + self.average = average + + def process(self, data_batch, data_samples: Sequence[dict]): + """Process one batch of data samples. + + The processed results should be stored in ``self.results``, which will + be used to computed the metrics when all batches have been processed. + + Args: + data_batch: A batch of data from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from the model. + """ + + for data_sample in data_samples: + result = dict() + + result['pred_score'] = data_sample['pred_score'].clone() + num_classes = result['pred_score'].size()[-1] + + if 'gt_score' in data_sample: + result['gt_score'] = data_sample['gt_score'].clone() + else: + result['gt_score'] = label_to_onehot(data_sample['gt_label'], + num_classes) + + # Save the result to `self.results`. + self.results.append(result) + + def compute_metrics(self, results: List): + """Compute the metrics from processed results. + + Args: + results (list): The processed results of each batch. + + Returns: + Dict: The computed metrics. The keys are the names of the metrics, + and the values are corresponding results. + """ + # NOTICE: don't access `self.results` from the method. `self.results` + # are a list of results from multiple batch, while the input `results` + # are the collected results. + + # concat + target = torch.stack([res['gt_score'] for res in results]) + pred = torch.stack([res['pred_score'] for res in results]) + + ap = self.calculate(pred, target, self.average) + + result_metrics = dict() + + if self.average is None: + result_metrics['AP_classwise'] = ap.detach().cpu().tolist() + else: + result_metrics['mAP'] = ap.item() + + return result_metrics + + @staticmethod + def calculate(pred: Union[torch.Tensor, np.ndarray], + target: Union[torch.Tensor, np.ndarray], + average: Optional[str] = 'macro') -> torch.Tensor: + r"""Calculate the average precision for a single class. + + Args: + pred (torch.Tensor | np.ndarray): The model predictions with + shape ``(N, num_classes)``. + target (torch.Tensor | np.ndarray): The target of predictions + with shape ``(N, num_classes)``. + average (str | None): The average method. It supports two modes: + + - `"macro"`: Calculate metrics for each category, and calculate + the mean value over all categories. The result of this mode + is also called mAP. + - `None`: Calculate metrics of every category and output + directly. + + Defaults to "macro". + + Returns: + torch.Tensor: the average precision of all classes. + """ + average_options = ['macro', None] + assert average in average_options, 'Invalid `average` argument, ' \ + f'please specicy from {average_options}.' + + pred = to_tensor(pred) + target = to_tensor(target) + assert pred.ndim == 2 and pred.shape == target.shape, \ + 'Both `pred` and `target` should have shape `(N, num_classes)`.' + + num_classes = pred.shape[1] + ap = pred.new_zeros(num_classes) + for k in range(num_classes): + ap[k] = _average_precision(pred[:, k], target[:, k]) + if average == 'macro': + return ap.mean() * 100.0 + else: + return ap * 100 diff --git a/mmpretrain/evaluation/metrics/multi_task.py b/mmpretrain/evaluation/metrics/multi_task.py new file mode 100644 index 0000000..0e6af76 --- /dev/null +++ b/mmpretrain/evaluation/metrics/multi_task.py @@ -0,0 +1,120 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, Sequence + +from mmengine.evaluator import BaseMetric + +from mmpretrain.registry import METRICS + + +@METRICS.register_module() +class MultiTasksMetric(BaseMetric): + """Metrics for MultiTask + Args: + task_metrics(dict): a dictionary in the keys are the names of the tasks + and the values is a list of the metric corresponds to this task + Examples: + >>> import torch + >>> from mmpretrain.evaluation import MultiTasksMetric + # -------------------- The Basic Usage -------------------- + >>>task_metrics = { + 'task0': [dict(type='Accuracy', topk=(1, ))], + 'task1': [dict(type='Accuracy', topk=(1, 3))] + } + >>>pred = [{ + 'pred_task': { + 'task0': torch.tensor([0.7, 0.0, 0.3]), + 'task1': torch.tensor([0.5, 0.2, 0.3]) + }, + 'gt_task': { + 'task0': torch.tensor(0), + 'task1': torch.tensor(2) + } + }, { + 'pred_task': { + 'task0': torch.tensor([0.0, 0.0, 1.0]), + 'task1': torch.tensor([0.0, 0.0, 1.0]) + }, + 'gt_task': { + 'task0': torch.tensor(2), + 'task1': torch.tensor(2) + } + }] + >>>metric = MultiTasksMetric(task_metrics) + >>>metric.process(None, pred) + >>>results = metric.evaluate(2) + results = { + 'task0_accuracy/top1': 100.0, + 'task1_accuracy/top1': 50.0, + 'task1_accuracy/top3': 100.0 + } + """ + + def __init__(self, + task_metrics: Dict, + collect_device: str = 'cpu') -> None: + self.task_metrics = task_metrics + super().__init__(collect_device=collect_device) + + self._metrics = {} + for task_name in self.task_metrics.keys(): + self._metrics[task_name] = [] + for metric in self.task_metrics[task_name]: + self._metrics[task_name].append(METRICS.build(metric)) + + def process(self, data_batch, data_samples: Sequence[dict]): + """Process one batch of data samples. + + The processed results should be stored in ``self.results``, which will + be used to computed the metrics when all batches have been processed. + Args: + data_batch: A batch of data from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from the model. + """ + for task_name in self.task_metrics.keys(): + filtered_data_samples = [] + for data_sample in data_samples: + eval_mask = data_sample[task_name]['eval_mask'] + if eval_mask: + filtered_data_samples.append(data_sample[task_name]) + for metric in self._metrics[task_name]: + metric.process(data_batch, filtered_data_samples) + + def compute_metrics(self, results: list) -> dict: + raise NotImplementedError( + 'compute metrics should not be used here directly') + + def evaluate(self, size): + """Evaluate the model performance of the whole dataset after processing + all batches. + + Args: + size (int): Length of the entire validation dataset. When batch + size > 1, the dataloader may pad some data samples to make + sure all ranks have the same length of dataset slice. The + ``collect_results`` function will drop the padded data based on + this size. + Returns: + dict: Evaluation metrics dict on the val dataset. The keys are + "{task_name}_{metric_name}" , and the values + are corresponding results. + """ + metrics = {} + for task_name in self._metrics: + for metric in self._metrics[task_name]: + name = metric.__class__.__name__ + if name == 'MultiTasksMetric' or metric.results: + results = metric.evaluate(size) + else: + results = {metric.__class__.__name__: 0} + for key in results: + name = f'{task_name}_{key}' + if name in results: + """Inspired from https://github.com/open- + mmlab/mmengine/ bl ob/ed20a9cba52ceb371f7c825131636b9e2 + 747172e/mmengine/evalua tor/evaluator.py#L84-L87.""" + raise ValueError( + 'There are multiple metric results with the same' + f'metric name {name}. Please make sure all metrics' + 'have different prefixes.') + metrics[name] = results[key] + return metrics diff --git a/mmpretrain/evaluation/metrics/nocaps.py b/mmpretrain/evaluation/metrics/nocaps.py new file mode 100644 index 0000000..e8e1d06 --- /dev/null +++ b/mmpretrain/evaluation/metrics/nocaps.py @@ -0,0 +1,59 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional + +import mmengine + +from mmpretrain.registry import METRICS +from mmpretrain.utils import require +from .caption import COCOCaption, save_result + +try: + from pycocoevalcap.eval import COCOEvalCap + from pycocotools.coco import COCO +except ImportError: + COCOEvalCap = None + COCO = None + + +@METRICS.register_module() +class NocapsSave(COCOCaption): + """Nocaps evaluation wrapper. + + Save the generated captions and transform into coco format. + The dumped file can be submitted to the official evluation system. + + Args: + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be 'cpu' or + 'gpu'. Defaults to 'cpu'. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, self.default_prefix + will be used instead. Should be modified according to the + `retrieval_type` for unambiguous results. Defaults to TR. + """ + + @require('pycocoevalcap') + def __init__(self, + save_dir: str = './', + collect_device: str = 'cpu', + prefix: Optional[str] = None): + super(COCOCaption, self).__init__( + collect_device=collect_device, prefix=prefix) + self.save_dir = save_dir + + def compute_metrics(self, results: List): + """Compute the metrics from processed results. + + Args: + results (dict): The processed results of each batch. + """ + mmengine.mkdir_or_exist(self.save_dir) + save_result( + result=results, + result_dir=self.save_dir, + filename='nocap_pred', + remove_duplicate='image_id', + ) + + return dict() diff --git a/mmpretrain/evaluation/metrics/retrieval.py b/mmpretrain/evaluation/metrics/retrieval.py new file mode 100644 index 0000000..9813486 --- /dev/null +++ b/mmpretrain/evaluation/metrics/retrieval.py @@ -0,0 +1,445 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Sequence, Union + +import mmengine +import numpy as np +import torch +from mmengine.evaluator import BaseMetric +from mmengine.utils import is_seq_of + +from mmpretrain.registry import METRICS +from mmpretrain.structures import label_to_onehot +from .single_label import to_tensor + + +@METRICS.register_module() +class RetrievalRecall(BaseMetric): + r"""Recall evaluation metric for image retrieval. + + Args: + topk (int | Sequence[int]): If the ground truth label matches one of + the best **k** predictions, the sample will be regard as a positive + prediction. If the parameter is a tuple, all of top-k recall will + be calculated and outputted together. Defaults to 1. + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be 'cpu' or + 'gpu'. Defaults to 'cpu'. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, self.default_prefix + will be used instead. Defaults to None. + + Examples: + Use in the code: + + >>> import torch + >>> from mmpretrain.evaluation import RetrievalRecall + >>> # -------------------- The Basic Usage -------------------- + >>> y_pred = [[0], [1], [2], [3]] + >>> y_true = [[0, 1], [2], [1], [0, 3]] + >>> RetrievalRecall.calculate( + >>> y_pred, y_true, topk=1, pred_indices=True, target_indices=True) + [tensor([50.])] + >>> # Calculate the recall@1 and recall@5 for non-indices input. + >>> y_score = torch.rand((1000, 10)) + >>> import torch.nn.functional as F + >>> y_true = F.one_hot(torch.arange(0, 1000) % 10, num_classes=10) + >>> RetrievalRecall.calculate(y_score, y_true, topk=(1, 5)) + [tensor(9.3000), tensor(48.4000)] + >>> + >>> # ------------------- Use with Evalutor ------------------- + >>> from mmpretrain.structures import DataSample + >>> from mmengine.evaluator import Evaluator + >>> data_samples = [ + ... DataSample().set_gt_label([0, 1]).set_pred_score( + ... torch.rand(10)) + ... for i in range(1000) + ... ] + >>> evaluator = Evaluator(metrics=RetrievalRecall(topk=(1, 5))) + >>> evaluator.process(data_samples) + >>> evaluator.evaluate(1000) + {'retrieval/Recall@1': 20.700000762939453, + 'retrieval/Recall@5': 78.5999984741211} + + Use in OpenMMLab configs: + + .. code:: python + + val_evaluator = dict(type='RetrievalRecall', topk=(1, 5)) + test_evaluator = val_evaluator + """ + default_prefix: Optional[str] = 'retrieval' + + def __init__(self, + topk: Union[int, Sequence[int]], + collect_device: str = 'cpu', + prefix: Optional[str] = None) -> None: + topk = (topk, ) if isinstance(topk, int) else topk + + for k in topk: + if k <= 0: + raise ValueError('`topk` must be a ingter larger than 0 ' + 'or seq of ingter larger than 0.') + + self.topk = topk + super().__init__(collect_device=collect_device, prefix=prefix) + + def process(self, data_batch: Sequence[dict], + data_samples: Sequence[dict]): + """Process one batch of data and predictions. + + The processed results should be stored in ``self.results``, which will + be used to computed the metrics when all batches have been processed. + + Args: + data_batch (Sequence[dict]): A batch of data from the dataloader. + predictions (Sequence[dict]): A batch of outputs from the model. + """ + for data_sample in data_samples: + pred_score = data_sample['pred_score'].clone() + gt_label = data_sample['gt_label'] + + if 'gt_score' in data_sample: + target = data_sample.get('gt_score').clone() + else: + num_classes = pred_score.size()[-1] + target = label_to_onehot(gt_label, num_classes) + + # Because the retrieval output logit vector will be much larger + # compared to the normal classification, to save resources, the + # evaluation results are computed each batch here and then reduce + # all results at the end. + result = RetrievalRecall.calculate( + pred_score.unsqueeze(0), target.unsqueeze(0), topk=self.topk) + self.results.append(result) + + def compute_metrics(self, results: List): + """Compute the metrics from processed results. + + Args: + results (list): The processed results of each batch. + + Returns: + Dict: The computed metrics. The keys are the names of the metrics, + and the values are corresponding results. + """ + result_metrics = dict() + for i, k in enumerate(self.topk): + recall_at_k = sum([r[i].item() for r in results]) / len(results) + result_metrics[f'Recall@{k}'] = recall_at_k + + return result_metrics + + @staticmethod + def calculate(pred: Union[np.ndarray, torch.Tensor], + target: Union[np.ndarray, torch.Tensor], + topk: Union[int, Sequence[int]], + pred_indices: (bool) = False, + target_indices: (bool) = False) -> float: + """Calculate the average recall. + + Args: + pred (torch.Tensor | np.ndarray | Sequence): The prediction + results. A :obj:`torch.Tensor` or :obj:`np.ndarray` with + shape ``(N, M)`` or a sequence of index/onehot + format labels. + target (torch.Tensor | np.ndarray | Sequence): The prediction + results. A :obj:`torch.Tensor` or :obj:`np.ndarray` with + shape ``(N, M)`` or a sequence of index/onehot + format labels. + topk (int, Sequence[int]): Predictions with the k-th highest + scores are considered as positive. + pred_indices (bool): Whether the ``pred`` is a sequence of + category index labels. Defaults to False. + target_indices (bool): Whether the ``target`` is a sequence of + category index labels. Defaults to False. + + Returns: + List[float]: the average recalls. + """ + topk = (topk, ) if isinstance(topk, int) else topk + for k in topk: + if k <= 0: + raise ValueError('`topk` must be a ingter larger than 0 ' + 'or seq of ingter larger than 0.') + + max_keep = max(topk) + pred = _format_pred(pred, max_keep, pred_indices) + target = _format_target(target, target_indices) + + assert len(pred) == len(target), ( + f'Length of `pred`({len(pred)}) and `target` ({len(target)}) ' + f'must be the same.') + + num_samples = len(pred) + results = [] + for k in topk: + recalls = torch.zeros(num_samples) + for i, (sample_pred, + sample_target) in enumerate(zip(pred, target)): + sample_pred = np.array(to_tensor(sample_pred).cpu()) + sample_target = np.array(to_tensor(sample_target).cpu()) + recalls[i] = int(np.in1d(sample_pred[:k], sample_target).max()) + results.append(recalls.mean() * 100) + return results + + +@METRICS.register_module() +class RetrievalAveragePrecision(BaseMetric): + r"""Calculate the average precision for image retrieval. + + Args: + topk (int, optional): Predictions with the k-th highest scores are + considered as positive. + mode (str, optional): The mode to calculate AP, choose from + 'IR'(information retrieval) and 'integrate'. Defaults to 'IR'. + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be 'cpu' or + 'gpu'. Defaults to 'cpu'. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, self.default_prefix + will be used instead. Defaults to None. + + Note: + If the ``mode`` set to 'IR', use the stanford AP calculation of + information retrieval as in wikipedia page[1]; if set to 'integrate', + the method implemented integrates over the precision-recall curve + by averaging two adjacent precision points, then multiplying by the + recall step like mAP in Detection task. This is the convention for + the Revisited Oxford/Paris datasets[2]. + + References: + [1] `Wikipedia entry for the Average precision `_ + + [2] `The Oxford Buildings Dataset + `_ + + Examples: + Use in code: + + >>> import torch + >>> import numpy as np + >>> from mmcls.evaluation import RetrievalAveragePrecision + >>> # using index format inputs + >>> pred = [ torch.Tensor([idx for idx in range(100)]) ] * 3 + >>> target = [[0, 3, 6, 8, 35], [1, 2, 54, 105], [2, 42, 205]] + >>> RetrievalAveragePrecision.calculate(pred, target, 10, True, True) + 29.246031746031747 + >>> # using tensor format inputs + >>> pred = np.array([np.linspace(0.95, 0.05, 10)] * 2) + >>> target = torch.Tensor([[1, 0, 1, 0, 0, 1, 0, 0, 1, 1]] * 2) + >>> RetrievalAveragePrecision.calculate(pred, target, 10) + 62.222222222222214 + + Use in OpenMMLab config files: + + .. code:: python + + val_evaluator = dict(type='RetrievalAveragePrecision', topk=100) + test_evaluator = val_evaluator + """ + + default_prefix: Optional[str] = 'retrieval' + + def __init__(self, + topk: Optional[int] = None, + mode: Optional[str] = 'IR', + collect_device: str = 'cpu', + prefix: Optional[str] = None) -> None: + if topk is None or (isinstance(topk, int) and topk <= 0): + raise ValueError('`topk` must be a ingter larger than 0.') + + mode_options = ['IR', 'integrate'] + assert mode in mode_options, \ + f'Invalid `mode` argument, please specify from {mode_options}.' + + self.topk = topk + self.mode = mode + super().__init__(collect_device=collect_device, prefix=prefix) + + def process(self, data_batch: Sequence[dict], + data_samples: Sequence[dict]): + """Process one batch of data and predictions. + + The processed results should be stored in ``self.results``, which will + be used to computed the metrics when all batches have been processed. + Args: + data_batch (Sequence[dict]): A batch of data from the dataloader. + predictions (Sequence[dict]): A batch of outputs from the model. + """ + for data_sample in data_samples: + pred_score = data_sample.get('pred_score').clone() + + if 'gt_score' in data_sample: + target = data_sample.get('gt_score').clone() + else: + gt_label = data_sample.get('gt_label') + num_classes = pred_score.size()[-1] + target = label_to_onehot(gt_label, num_classes) + + # Because the retrieval output logit vector will be much larger + # compared to the normal classification, to save resources, the + # evaluation results are computed each batch here and then reduce + # all results at the end. + result = RetrievalAveragePrecision.calculate( + pred_score.unsqueeze(0), + target.unsqueeze(0), + self.topk, + mode=self.mode) + self.results.append(result) + + def compute_metrics(self, results: List): + """Compute the metrics from processed results. + + Args: + results (list): The processed results of each batch. + Returns: + Dict: The computed metrics. The keys are the names of the metrics, + and the values are corresponding results. + """ + result_metrics = dict() + result_metrics[f'mAP@{self.topk}'] = np.mean(self.results).item() + + return result_metrics + + @staticmethod + def calculate(pred: Union[np.ndarray, torch.Tensor], + target: Union[np.ndarray, torch.Tensor], + topk: Optional[int] = None, + pred_indices: (bool) = False, + target_indices: (bool) = False, + mode: str = 'IR') -> float: + """Calculate the average precision. + Args: + pred (torch.Tensor | np.ndarray | Sequence): The prediction + results. A :obj:`torch.Tensor` or :obj:`np.ndarray` with + shape ``(N, M)`` or a sequence of index/onehot + format labels. + target (torch.Tensor | np.ndarray | Sequence): The prediction + results. A :obj:`torch.Tensor` or :obj:`np.ndarray` with + shape ``(N, M)`` or a sequence of index/onehot + format labels. + topk (int, optional): Predictions with the k-th highest scores + are considered as positive. + pred_indices (bool): Whether the ``pred`` is a sequence of + category index labels. Defaults to False. + target_indices (bool): Whether the ``target`` is a sequence of + category index labels. Defaults to False. + mode (Optional[str]): The mode to calculate AP, choose from + 'IR'(information retrieval) and 'integrate'. Defaults to 'IR'. + + Note: + If the ``mode`` set to 'IR', use the stanford AP calculation of + information retrieval as in wikipedia page; if set to 'integrate', + the method implemented integrates over the precision-recall curve + by averaging two adjacent precision points, then multiplying by the + recall step like mAP in Detection task. This is the convention for + the Revisited Oxford/Paris datasets. + + Returns: + float: the average precision of the query image. + + References: + [1] `Wikipedia entry for Average precision(information_retrieval) + `_ + [2] `The Oxford Buildings Dataset 0 else 1 + cur_precision = (i + 1) / (rank + 1) + prediction = (old_precision + cur_precision) / 2 + ap += prediction + ap = ap / len(target) + + return ap * 100 + + +def _format_pred(label, topk=None, is_indices=False): + """format various label to List[indices].""" + if is_indices: + assert isinstance(label, Sequence), \ + '`pred` must be Sequence of indices when' \ + f' `pred_indices` set to True, but get {type(label)}' + for i, sample_pred in enumerate(label): + assert is_seq_of(sample_pred, int) or isinstance( + sample_pred, (np.ndarray, torch.Tensor)), \ + '`pred` should be Sequence of indices when `pred_indices`' \ + f'set to True. but pred[{i}] is {sample_pred}' + if topk: + label[i] = sample_pred[:min(topk, len(sample_pred))] + return label + if isinstance(label, np.ndarray): + label = torch.from_numpy(label) + elif not isinstance(label, torch.Tensor): + raise TypeError(f'The pred must be type of torch.tensor, ' + f'np.ndarray or Sequence but get {type(label)}.') + topk = topk if topk else label.size()[-1] + _, indices = label.topk(topk) + return indices + + +def _format_target(label, is_indices=False): + """format various label to List[indices].""" + if is_indices: + assert isinstance(label, Sequence), \ + '`target` must be Sequence of indices when' \ + f' `target_indices` set to True, but get {type(label)}' + for i, sample_gt in enumerate(label): + assert is_seq_of(sample_gt, int) or isinstance( + sample_gt, (np.ndarray, torch.Tensor)), \ + '`target` should be Sequence of indices when ' \ + f'`target_indices` set to True. but target[{i}] is {sample_gt}' + return label + + if isinstance(label, np.ndarray): + label = torch.from_numpy(label) + elif isinstance(label, Sequence) and not mmengine.is_str(label): + label = torch.tensor(label) + elif not isinstance(label, torch.Tensor): + raise TypeError(f'The pred must be type of torch.tensor, ' + f'np.ndarray or Sequence but get {type(label)}.') + + indices = [sample_gt.nonzero().squeeze(-1) for sample_gt in label] + return indices diff --git a/mmpretrain/evaluation/metrics/scienceqa.py b/mmpretrain/evaluation/metrics/scienceqa.py new file mode 100644 index 0000000..ebf01c7 --- /dev/null +++ b/mmpretrain/evaluation/metrics/scienceqa.py @@ -0,0 +1,170 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import random +from typing import List, Optional + +from mmengine.evaluator import BaseMetric + +from mmpretrain.registry import METRICS + + +def get_pred_idx(prediction: str, choices: List[str], + options: List[str]) -> int: # noqa + """Get the index (e.g. 2) from the prediction (e.g. 'C') + + Args: + prediction (str): The prediction from the model, + from ['A', 'B', 'C', 'D', 'E'] + choices (List(str)): The choices for the question, + from ['A', 'B', 'C', 'D', 'E'] + options (List(str)): The options for the question, + from ['A', 'B', 'C', 'D', 'E'] + + Returns: + int: The index of the prediction, from [0, 1, 2, 3, 4] + """ + if prediction in options[:len(choices)]: + return options.index(prediction) + else: + return random.choice(range(len(choices))) + + +@METRICS.register_module() +class ScienceQAMetric(BaseMetric): + """Evaluation Metric for ScienceQA. + + Args: + options (List(str)): Options for each question. Defaults to + ["A", "B", "C", "D", "E"]. + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be 'cpu' or + 'gpu'. Defaults to 'cpu'. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, self.default_prefix + will be used instead. Should be modified according to the + `retrieval_type` for unambiguous results. Defaults to TR. + """ + + def __init__(self, + options: List[str] = ['A', 'B', 'C', 'D', 'E'], + collect_device: str = 'cpu', + prefix: Optional[str] = None) -> None: + super().__init__(collect_device=collect_device, prefix=prefix) + self.options = options + + def process(self, data_batch, data_samples) -> None: + """Process one batch of data samples. + + data_samples should contain the following keys: + 1. pred_answer (str): The prediction from the model, + from ['A', 'B', 'C', 'D', 'E'] + 2. choices (List(str)): The choices for the question, + from ['A', 'B', 'C', 'D', 'E'] + 3. grade (int): The grade for the question, from grade1 to grade12 + 4. subject (str): The subject for the question, from + ['natural science', 'social science', 'language science'] + 5. answer (str): The answer for the question, from + ['A', 'B', 'C', 'D', 'E'] + 6. hint (str): The hint for the question + 7. has_image (bool): Whether or not the question has image + + + The processed results should be stored in ``self.results``, which will + be used to computed the metrics when all batches have been processed. + + Args: + data_batch: A batch of data from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from the model. + """ + for data_sample in data_samples: + result = dict() + choices = data_sample.get('choices') + result['prediction'] = get_pred_idx( + data_sample.get('pred_answer'), choices, self.options) + result['grade'] = data_sample.get('grade') + result['subject'] = data_sample.get('subject') + result['answer'] = data_sample.get('gt_answer') + hint = data_sample.get('hint') + has_image = data_sample.get('has_image', False) + result['no_context'] = True if not has_image and len( + hint) == 0 else False # noqa + result['has_text'] = True if len(hint) > 0 else False + result['has_image'] = has_image + + # Save the result to `self.results`. + self.results.append(result) + + def compute_metrics(self, results: List) -> dict: + """Compute the metrics from processed results. + + Args: + results (dict): The processed results of each batch. + + Returns: + Dict: The computed metrics. The keys are the names of the metrics, + and the values are corresponding results. + """ + # NOTICE: don't access `self.results` from the method. + metrics = dict() + + all_acc = [] + acc_natural = [] + acc_social = [] + acc_language = [] + acc_has_text = [] + acc_has_image = [] + acc_no_context = [] + acc_grade_1_6 = [] + acc_grade_7_12 = [] + + for result in results: + correct = result['prediction'] == result['answer'] + all_acc.append(correct) + # different subjects + if result['subject'] == 'natural science': + acc_natural.append(correct) + elif result['subject'] == 'social science': + acc_social.append(correct) + elif result['subject'] == 'language science': + acc_language.append(correct) + + # different context + if result['has_text']: + acc_has_text.append(correct) + elif result['has_image']: + acc_has_image.append(correct) + elif result['no_context']: + acc_no_context.append(correct) + + # different grade + if result['grade'] in [ + 'grade1', 'grade2', 'grade3', 'grade4', 'grade5', 'grade6' + ]: + acc_grade_1_6.append(correct) + elif result['grade'] in [ + 'grade7', 'grade8', 'grade9', 'grade10', 'grade11', + 'grade12' + ]: + acc_grade_7_12.append(correct) + + metrics['all_acc'] = sum(all_acc) / len(all_acc) + if len(acc_natural) > 0: + metrics['acc_natural'] = sum(acc_natural) / len(acc_natural) + if len(acc_social) > 0: + metrics['acc_social'] = sum(acc_social) / len(acc_social) + if len(acc_language) > 0: + metrics['acc_language'] = sum(acc_language) / len(acc_language) + if len(acc_has_text) > 0: + metrics['acc_has_text'] = sum(acc_has_text) / len(acc_has_text) + if len(acc_has_image) > 0: + metrics['acc_has_image'] = sum(acc_has_image) / len(acc_has_image) + if len(acc_no_context) > 0: + metrics['acc_no_context'] = sum(acc_no_context) / len( + acc_no_context) + if len(acc_grade_1_6) > 0: + metrics['acc_grade_1_6'] = sum(acc_grade_1_6) / len(acc_grade_1_6) + if len(acc_grade_7_12) > 0: + metrics['acc_grade_7_12'] = sum(acc_grade_7_12) / len( + acc_grade_7_12) + + return metrics diff --git a/mmpretrain/evaluation/metrics/shape_bias_label.py b/mmpretrain/evaluation/metrics/shape_bias_label.py new file mode 100644 index 0000000..27c80a3 --- /dev/null +++ b/mmpretrain/evaluation/metrics/shape_bias_label.py @@ -0,0 +1,172 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import csv +import os +import os.path as osp +from typing import List, Sequence + +import numpy as np +import torch +from mmengine.dist.utils import get_rank +from mmengine.evaluator import BaseMetric + +from mmpretrain.registry import METRICS + + +@METRICS.register_module() +class ShapeBiasMetric(BaseMetric): + """Evaluate the model on ``cue_conflict`` dataset. + + This module will evaluate the model on an OOD dataset, cue_conflict, in + order to measure the shape bias of the model. In addition to compuate the + Top-1 accuracy, this module also generate a csv file to record the + detailed prediction results, such that this csv file can be used to + generate the shape bias curve. + + Args: + csv_dir (str): The directory to save the csv file. + model_name (str): The name of the csv file. Please note that the + model name should be an unique identifier. + dataset_name (str): The name of the dataset. Default: 'cue_conflict'. + """ + + # mapping several classes from ImageNet-1K to the same category + airplane_indices = [404] + bear_indices = [294, 295, 296, 297] + bicycle_indices = [444, 671] + bird_indices = [ + 8, 10, 11, 12, 13, 14, 15, 16, 18, 19, 20, 22, 23, 24, 80, 81, 82, 83, + 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 98, 99, 100, 127, 128, 129, + 130, 131, 132, 133, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, + 145 + ] + boat_indices = [472, 554, 625, 814, 914] + bottle_indices = [440, 720, 737, 898, 899, 901, 907] + car_indices = [436, 511, 817] + cat_indices = [281, 282, 283, 284, 285, 286] + chair_indices = [423, 559, 765, 857] + clock_indices = [409, 530, 892] + dog_indices = [ + 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, + 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 193, 194, + 195, 196, 197, 198, 199, 200, 201, 202, 203, 205, 206, 207, 208, 209, + 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, + 224, 225, 226, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, + 239, 240, 241, 243, 244, 245, 246, 247, 248, 249, 250, 252, 253, 254, + 255, 256, 257, 259, 261, 262, 263, 265, 266, 267, 268 + ] + elephant_indices = [385, 386] + keyboard_indices = [508, 878] + knife_indices = [499] + oven_indices = [766] + truck_indices = [555, 569, 656, 675, 717, 734, 864, 867] + + def __init__(self, + csv_dir: str, + model_name: str, + dataset_name: str = 'cue_conflict', + **kwargs) -> None: + super().__init__(**kwargs) + + self.categories = sorted([ + 'knife', 'keyboard', 'elephant', 'bicycle', 'airplane', 'clock', + 'oven', 'chair', 'bear', 'boat', 'cat', 'bottle', 'truck', 'car', + 'bird', 'dog' + ]) + self.csv_dir = csv_dir + self.model_name = model_name + self.dataset_name = dataset_name + if get_rank() == 0: + self.csv_path = self.create_csv() + + def process(self, data_batch, data_samples: Sequence[dict]) -> None: + """Process one batch of data samples. + + The processed results should be stored in ``self.results``, which will + be used to computed the metrics when all batches have been processed. + + Args: + data_batch: A batch of data from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from the model. + """ + for data_sample in data_samples: + result = dict() + if 'pred_score' in data_sample: + result['pred_score'] = data_sample['pred_score'].cpu() + else: + result['pred_label'] = data_sample['pred_label'].cpu() + result['gt_label'] = data_sample['gt_label'].cpu() + result['gt_category'] = data_sample['img_path'].split('/')[-2] + result['img_name'] = data_sample['img_path'].split('/')[-1] + + aggregated_category_probabilities = [] + # get the prediction for each category of current instance + for category in self.categories: + category_indices = getattr(self, f'{category}_indices') + category_probabilities = torch.gather( + result['pred_score'], 0, + torch.tensor(category_indices)).mean() + aggregated_category_probabilities.append( + category_probabilities) + # sort the probabilities in descending order + pred_indices = torch.stack(aggregated_category_probabilities + ).argsort(descending=True).numpy() + result['pred_category'] = np.take(self.categories, pred_indices) + + # Save the result to `self.results`. + self.results.append(result) + + def create_csv(self) -> str: + """Create a csv file to store the results.""" + session_name = 'session-1' + csv_path = osp.join( + self.csv_dir, self.dataset_name + '_' + self.model_name + '_' + + session_name + '.csv') + if osp.exists(csv_path): + os.remove(csv_path) + directory = osp.dirname(csv_path) + if not osp.exists(directory): + os.makedirs(directory, exist_ok=True) + with open(csv_path, 'w') as f: + writer = csv.writer(f) + writer.writerow([ + 'subj', 'session', 'trial', 'rt', 'object_response', + 'category', 'condition', 'imagename' + ]) + return csv_path + + def dump_results_to_csv(self, results: List[dict]) -> None: + """Dump the results to a csv file. + + Args: + results (List[dict]): A list of results. + """ + for i, result in enumerate(results): + img_name = result['img_name'] + category = result['gt_category'] + condition = 'NaN' + with open(self.csv_path, 'a') as f: + writer = csv.writer(f) + writer.writerow([ + self.model_name, 1, i + 1, 'NaN', + result['pred_category'][0], category, condition, img_name + ]) + + def compute_metrics(self, results: List[dict]) -> dict: + """Compute the metrics from the results. + + Args: + results (List[dict]): A list of results. + + Returns: + dict: A dict of metrics. + """ + if get_rank() == 0: + self.dump_results_to_csv(results) + metrics = dict() + metrics['accuracy/top1'] = np.mean([ + result['pred_category'][0] == result['gt_category'] + for result in results + ]) + + return metrics diff --git a/mmpretrain/evaluation/metrics/single_label.py b/mmpretrain/evaluation/metrics/single_label.py new file mode 100644 index 0000000..f9329b9 --- /dev/null +++ b/mmpretrain/evaluation/metrics/single_label.py @@ -0,0 +1,776 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from itertools import product +from typing import List, Optional, Sequence, Union + +import mmengine +import numpy as np +import torch +import torch.nn.functional as F +from mmengine.evaluator import BaseMetric + +from mmpretrain.registry import METRICS + + +def to_tensor(value): + """Convert value to torch.Tensor.""" + if isinstance(value, np.ndarray): + value = torch.from_numpy(value) + elif isinstance(value, Sequence) and not mmengine.is_str(value): + value = torch.tensor(value) + elif not isinstance(value, torch.Tensor): + raise TypeError(f'{type(value)} is not an available argument.') + return value + + +def _precision_recall_f1_support(pred_positive, gt_positive, average): + """calculate base classification task metrics, such as precision, recall, + f1_score, support.""" + average_options = ['micro', 'macro', None] + assert average in average_options, 'Invalid `average` argument, ' \ + f'please specify from {average_options}.' + + # ignore -1 target such as difficult sample that is not wanted + # in evaluation results. + # only for calculate multi-label without affecting single-label behavior + ignored_index = gt_positive == -1 + pred_positive[ignored_index] = 0 + gt_positive[ignored_index] = 0 + + class_correct = (pred_positive & gt_positive) + if average == 'micro': + tp_sum = class_correct.sum() + pred_sum = pred_positive.sum() + gt_sum = gt_positive.sum() + else: + tp_sum = class_correct.sum(0) + pred_sum = pred_positive.sum(0) + gt_sum = gt_positive.sum(0) + + precision = tp_sum / torch.clamp(pred_sum, min=1).float() * 100 + recall = tp_sum / torch.clamp(gt_sum, min=1).float() * 100 + f1_score = 2 * precision * recall / torch.clamp( + precision + recall, min=torch.finfo(torch.float32).eps) + if average in ['macro', 'micro']: + precision = precision.mean(0) + recall = recall.mean(0) + f1_score = f1_score.mean(0) + support = gt_sum.sum(0) + else: + support = gt_sum + return precision, recall, f1_score, support + + +@METRICS.register_module() +class Accuracy(BaseMetric): + r"""Accuracy evaluation metric. + + For either binary classification or multi-class classification, the + accuracy is the fraction of correct predictions in all predictions: + + .. math:: + + \text{Accuracy} = \frac{N_{\text{correct}}}{N_{\text{all}}} + + Args: + topk (int | Sequence[int]): If the ground truth label matches one of + the best **k** predictions, the sample will be regard as a positive + prediction. If the parameter is a tuple, all of top-k accuracy will + be calculated and outputted together. Defaults to 1. + thrs (Sequence[float | None] | float | None): If a float, predictions + with score lower than the threshold will be regard as the negative + prediction. If None, not apply threshold. If the parameter is a + tuple, accuracy based on all thresholds will be calculated and + outputted together. Defaults to 0. + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be 'cpu' or + 'gpu'. Defaults to 'cpu'. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, self.default_prefix + will be used instead. Defaults to None. + + Examples: + >>> import torch + >>> from mmpretrain.evaluation import Accuracy + >>> # -------------------- The Basic Usage -------------------- + >>> y_pred = [0, 2, 1, 3] + >>> y_true = [0, 1, 2, 3] + >>> Accuracy.calculate(y_pred, y_true) + tensor([50.]) + >>> # Calculate the top1 and top5 accuracy. + >>> y_score = torch.rand((1000, 10)) + >>> y_true = torch.zeros((1000, )) + >>> Accuracy.calculate(y_score, y_true, topk=(1, 5)) + [[tensor([9.9000])], [tensor([51.5000])]] + >>> + >>> # ------------------- Use with Evalutor ------------------- + >>> from mmpretrain.structures import DataSample + >>> from mmengine.evaluator import Evaluator + >>> data_samples = [ + ... DataSample().set_gt_label(0).set_pred_score(torch.rand(10)) + ... for i in range(1000) + ... ] + >>> evaluator = Evaluator(metrics=Accuracy(topk=(1, 5))) + >>> evaluator.process(data_samples) + >>> evaluator.evaluate(1000) + { + 'accuracy/top1': 9.300000190734863, + 'accuracy/top5': 51.20000076293945 + } + """ + default_prefix: Optional[str] = 'accuracy' + + def __init__(self, + topk: Union[int, Sequence[int]] = (1, ), + thrs: Union[float, Sequence[Union[float, None]], None] = 0., + collect_device: str = 'cpu', + prefix: Optional[str] = None) -> None: + super().__init__(collect_device=collect_device, prefix=prefix) + + if isinstance(topk, int): + self.topk = (topk, ) + else: + self.topk = tuple(topk) + + if isinstance(thrs, float) or thrs is None: + self.thrs = (thrs, ) + else: + self.thrs = tuple(thrs) + + def process(self, data_batch, data_samples: Sequence[dict]): + """Process one batch of data samples. + + The processed results should be stored in ``self.results``, which will + be used to computed the metrics when all batches have been processed. + + Args: + data_batch: A batch of data from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from the model. + """ + + for data_sample in data_samples: + result = dict() + if 'pred_score' in data_sample: + result['pred_score'] = data_sample['pred_score'].cpu() + else: + result['pred_label'] = data_sample['pred_label'].cpu() + result['gt_label'] = data_sample['gt_label'].cpu() + # Save the result to `self.results`. + self.results.append(result) + + def compute_metrics(self, results: List): + """Compute the metrics from processed results. + + Args: + results (dict): The processed results of each batch. + + Returns: + Dict: The computed metrics. The keys are the names of the metrics, + and the values are corresponding results. + """ + # NOTICE: don't access `self.results` from the method. + metrics = {} + + # concat + target = torch.cat([res['gt_label'] for res in results]) + if 'pred_score' in results[0]: + pred = torch.stack([res['pred_score'] for res in results]) + + try: + acc = self.calculate(pred, target, self.topk, self.thrs) + except ValueError as e: + # If the topk is invalid. + raise ValueError( + str(e) + ' Please check the `val_evaluator` and ' + '`test_evaluator` fields in your config file.') + + multi_thrs = len(self.thrs) > 1 + for i, k in enumerate(self.topk): + for j, thr in enumerate(self.thrs): + name = f'top{k}' + if multi_thrs: + name += '_no-thr' if thr is None else f'_thr-{thr:.2f}' + metrics[name] = acc[i][j].item() + else: + # If only label in the `pred_label`. + pred = torch.cat([res['pred_label'] for res in results]) + acc = self.calculate(pred, target, self.topk, self.thrs) + metrics['top1'] = acc.item() + + return metrics + + @staticmethod + def calculate( + pred: Union[torch.Tensor, np.ndarray, Sequence], + target: Union[torch.Tensor, np.ndarray, Sequence], + topk: Sequence[int] = (1, ), + thrs: Sequence[Union[float, None]] = (0., ), + ) -> Union[torch.Tensor, List[List[torch.Tensor]]]: + """Calculate the accuracy. + + Args: + pred (torch.Tensor | np.ndarray | Sequence): The prediction + results. It can be labels (N, ), or scores of every + class (N, C). + target (torch.Tensor | np.ndarray | Sequence): The target of + each prediction with shape (N, ). + thrs (Sequence[float | None]): Predictions with scores under + the thresholds are considered negative. It's only used + when ``pred`` is scores. None means no thresholds. + Defaults to (0., ). + thrs (Sequence[float]): Predictions with scores under + the thresholds are considered negative. It's only used + when ``pred`` is scores. Defaults to (0., ). + + Returns: + torch.Tensor | List[List[torch.Tensor]]: Accuracy. + + - torch.Tensor: If the ``pred`` is a sequence of label instead of + score (number of dimensions is 1). Only return a top-1 accuracy + tensor, and ignore the argument ``topk` and ``thrs``. + - List[List[torch.Tensor]]: If the ``pred`` is a sequence of score + (number of dimensions is 2). Return the accuracy on each ``topk`` + and ``thrs``. And the first dim is ``topk``, the second dim is + ``thrs``. + """ + + pred = to_tensor(pred) + target = to_tensor(target).to(torch.int64) + num = pred.size(0) + assert pred.size(0) == target.size(0), \ + f"The size of pred ({pred.size(0)}) doesn't match "\ + f'the target ({target.size(0)}).' + + if pred.ndim == 1: + # For pred label, ignore topk and acc + pred_label = pred.int() + correct = pred.eq(target).float().sum(0, keepdim=True) + acc = correct.mul_(100. / num) + return acc + else: + # For pred score, calculate on all topk and thresholds. + pred = pred.float() + maxk = max(topk) + + if maxk > pred.size(1): + raise ValueError( + f'Top-{maxk} accuracy is unavailable since the number of ' + f'categories is {pred.size(1)}.') + + pred_score, pred_label = pred.topk(maxk, dim=1) + pred_label = pred_label.t() + correct = pred_label.eq(target.view(1, -1).expand_as(pred_label)) + results = [] + for k in topk: + results.append([]) + for thr in thrs: + # Only prediction values larger than thr are counted + # as correct + _correct = correct + if thr is not None: + _correct = _correct & (pred_score.t() > thr) + correct_k = _correct[:k].reshape(-1).float().sum( + 0, keepdim=True) + acc = correct_k.mul_(100. / num) + results[-1].append(acc) + return results + + +@METRICS.register_module() +class SingleLabelMetric(BaseMetric): + r"""A collection of precision, recall, f1-score and support for + single-label tasks. + + The collection of metrics is for single-label multi-class classification. + And all these metrics are based on the confusion matrix of every category: + + .. image:: ../../_static/image/confusion-matrix.png + :width: 60% + :align: center + + All metrics can be formulated use variables above: + + **Precision** is the fraction of correct predictions in all predictions: + + .. math:: + \text{Precision} = \frac{TP}{TP+FP} + + **Recall** is the fraction of correct predictions in all targets: + + .. math:: + \text{Recall} = \frac{TP}{TP+FN} + + **F1-score** is the harmonic mean of the precision and recall: + + .. math:: + \text{F1-score} = \frac{2\times\text{Recall}\times\text{Precision}}{\text{Recall}+\text{Precision}} + + **Support** is the number of samples: + + .. math:: + \text{Support} = TP + TN + FN + FP + + Args: + thrs (Sequence[float | None] | float | None): If a float, predictions + with score lower than the threshold will be regard as the negative + prediction. If None, only the top-1 prediction will be regard as + the positive prediction. If the parameter is a tuple, accuracy + based on all thresholds will be calculated and outputted together. + Defaults to 0. + items (Sequence[str]): The detailed metric items to evaluate, select + from "precision", "recall", "f1-score" and "support". + Defaults to ``('precision', 'recall', 'f1-score')``. + average (str | None): How to calculate the final metrics from the + confusion matrix of every category. It supports three modes: + + - `"macro"`: Calculate metrics for each category, and calculate + the mean value over all categories. + - `"micro"`: Average the confusion matrix over all categories and + calculate metrics on the mean confusion matrix. + - `None`: Calculate metrics of every category and output directly. + + Defaults to "macro". + num_classes (int, optional): The number of classes. Defaults to None. + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be 'cpu' or + 'gpu'. Defaults to 'cpu'. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, self.default_prefix + will be used instead. Defaults to None. + + Examples: + >>> import torch + >>> from mmpretrain.evaluation import SingleLabelMetric + >>> # -------------------- The Basic Usage -------------------- + >>> y_pred = [0, 1, 1, 3] + >>> y_true = [0, 2, 1, 3] + >>> # Output precision, recall, f1-score and support. + >>> SingleLabelMetric.calculate(y_pred, y_true, num_classes=4) + (tensor(62.5000), tensor(75.), tensor(66.6667), tensor(4)) + >>> # Calculate with different thresholds. + >>> y_score = torch.rand((1000, 10)) + >>> y_true = torch.zeros((1000, )) + >>> SingleLabelMetric.calculate(y_score, y_true, thrs=(0., 0.9)) + [(tensor(10.), tensor(0.9500), tensor(1.7352), tensor(1000)), + (tensor(10.), tensor(0.5500), tensor(1.0427), tensor(1000))] + >>> + >>> # ------------------- Use with Evalutor ------------------- + >>> from mmpretrain.structures import DataSample + >>> from mmengine.evaluator import Evaluator + >>> data_samples = [ + ... DataSample().set_gt_label(i%5).set_pred_score(torch.rand(5)) + ... for i in range(1000) + ... ] + >>> evaluator = Evaluator(metrics=SingleLabelMetric()) + >>> evaluator.process(data_samples) + >>> evaluator.evaluate(1000) + {'single-label/precision': 19.650691986083984, + 'single-label/recall': 19.600000381469727, + 'single-label/f1-score': 19.619548797607422} + >>> # Evaluate on each class + >>> evaluator = Evaluator(metrics=SingleLabelMetric(average=None)) + >>> evaluator.process(data_samples) + >>> evaluator.evaluate(1000) + { + 'single-label/precision_classwise': [21.1, 18.7, 17.8, 19.4, 16.1], + 'single-label/recall_classwise': [18.5, 18.5, 17.0, 20.0, 18.0], + 'single-label/f1-score_classwise': [19.7, 18.6, 17.1, 19.7, 17.0] + } + """ # noqa: E501 + default_prefix: Optional[str] = 'single-label' + + def __init__(self, + thrs: Union[float, Sequence[Union[float, None]], None] = 0., + items: Sequence[str] = ('precision', 'recall', 'f1-score'), + average: Optional[str] = 'macro', + num_classes: Optional[int] = None, + collect_device: str = 'cpu', + prefix: Optional[str] = None) -> None: + super().__init__(collect_device=collect_device, prefix=prefix) + + if isinstance(thrs, float) or thrs is None: + self.thrs = (thrs, ) + else: + self.thrs = tuple(thrs) + + for item in items: + assert item in ['precision', 'recall', 'f1-score', 'support'], \ + f'The metric {item} is not supported by `SingleLabelMetric`,' \ + ' please specify from "precision", "recall", "f1-score" and ' \ + '"support".' + self.items = tuple(items) + self.average = average + self.num_classes = num_classes + + def process(self, data_batch, data_samples: Sequence[dict]): + """Process one batch of data samples. + + The processed results should be stored in ``self.results``, which will + be used to computed the metrics when all batches have been processed. + + Args: + data_batch: A batch of data from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from the model. + """ + + for data_sample in data_samples: + result = dict() + if 'pred_score' in data_sample: + result['pred_score'] = data_sample['pred_score'].cpu() + else: + num_classes = self.num_classes or data_sample.get( + 'num_classes') + assert num_classes is not None, \ + 'The `num_classes` must be specified if no `pred_score`.' + result['pred_label'] = data_sample['pred_label'].cpu() + result['num_classes'] = num_classes + result['gt_label'] = data_sample['gt_label'].cpu() + # Save the result to `self.results`. + self.results.append(result) + + def compute_metrics(self, results: List): + """Compute the metrics from processed results. + + Args: + results (list): The processed results of each batch. + + Returns: + Dict: The computed metrics. The keys are the names of the metrics, + and the values are corresponding results. + """ + # NOTICE: don't access `self.results` from the method. `self.results` + # are a list of results from multiple batch, while the input `results` + # are the collected results. + metrics = {} + + def pack_results(precision, recall, f1_score, support): + single_metrics = {} + if 'precision' in self.items: + single_metrics['precision'] = precision + if 'recall' in self.items: + single_metrics['recall'] = recall + if 'f1-score' in self.items: + single_metrics['f1-score'] = f1_score + if 'support' in self.items: + single_metrics['support'] = support + return single_metrics + + # concat + target = torch.cat([res['gt_label'] for res in results]) + if 'pred_score' in results[0]: + pred = torch.stack([res['pred_score'] for res in results]) + metrics_list = self.calculate( + pred, target, thrs=self.thrs, average=self.average) + + multi_thrs = len(self.thrs) > 1 + for i, thr in enumerate(self.thrs): + if multi_thrs: + suffix = '_no-thr' if thr is None else f'_thr-{thr:.2f}' + else: + suffix = '' + + for k, v in pack_results(*metrics_list[i]).items(): + metrics[k + suffix] = v + else: + # If only label in the `pred_label`. + pred = torch.cat([res['pred_label'] for res in results]) + res = self.calculate( + pred, + target, + average=self.average, + num_classes=results[0]['num_classes']) + metrics = pack_results(*res) + + result_metrics = dict() + for k, v in metrics.items(): + + if self.average is None: + result_metrics[k + '_classwise'] = v.cpu().detach().tolist() + elif self.average == 'micro': + result_metrics[k + f'_{self.average}'] = v.item() + else: + result_metrics[k] = v.item() + + return result_metrics + + @staticmethod + def calculate( + pred: Union[torch.Tensor, np.ndarray, Sequence], + target: Union[torch.Tensor, np.ndarray, Sequence], + thrs: Sequence[Union[float, None]] = (0., ), + average: Optional[str] = 'macro', + num_classes: Optional[int] = None, + ) -> Union[torch.Tensor, List[torch.Tensor]]: + """Calculate the precision, recall, f1-score and support. + + Args: + pred (torch.Tensor | np.ndarray | Sequence): The prediction + results. It can be labels (N, ), or scores of every + class (N, C). + target (torch.Tensor | np.ndarray | Sequence): The target of + each prediction with shape (N, ). + thrs (Sequence[float | None]): Predictions with scores under + the thresholds are considered negative. It's only used + when ``pred`` is scores. None means no thresholds. + Defaults to (0., ). + average (str | None): How to calculate the final metrics from + the confusion matrix of every category. It supports three + modes: + + - `"macro"`: Calculate metrics for each category, and calculate + the mean value over all categories. + - `"micro"`: Average the confusion matrix over all categories + and calculate metrics on the mean confusion matrix. + - `None`: Calculate metrics of every category and output + directly. + + Defaults to "macro". + num_classes (Optional, int): The number of classes. If the ``pred`` + is label instead of scores, this argument is required. + Defaults to None. + + Returns: + Tuple: The tuple contains precision, recall and f1-score. + And the type of each item is: + + - torch.Tensor: If the ``pred`` is a sequence of label instead of + score (number of dimensions is 1). Only returns a tensor for + each metric. The shape is (1, ) if ``classwise`` is False, and + (C, ) if ``classwise`` is True. + - List[torch.Tensor]: If the ``pred`` is a sequence of score + (number of dimensions is 2). Return the metrics on each ``thrs``. + The shape of tensor is (1, ) if ``classwise`` is False, and (C, ) + if ``classwise`` is True. + """ + average_options = ['micro', 'macro', None] + assert average in average_options, 'Invalid `average` argument, ' \ + f'please specify from {average_options}.' + + pred = to_tensor(pred) + target = to_tensor(target).to(torch.int64) + assert pred.size(0) == target.size(0), \ + f"The size of pred ({pred.size(0)}) doesn't match "\ + f'the target ({target.size(0)}).' + + if pred.ndim == 1: + assert num_classes is not None, \ + 'Please specify the `num_classes` if the `pred` is labels ' \ + 'intead of scores.' + gt_positive = F.one_hot(target.flatten(), num_classes) + pred_positive = F.one_hot(pred.to(torch.int64), num_classes) + return _precision_recall_f1_support(pred_positive, gt_positive, + average) + else: + # For pred score, calculate on all thresholds. + num_classes = pred.size(1) + pred_score, pred_label = torch.topk(pred, k=1) + pred_score = pred_score.flatten() + pred_label = pred_label.flatten() + + gt_positive = F.one_hot(target.flatten(), num_classes) + + results = [] + for thr in thrs: + pred_positive = F.one_hot(pred_label, num_classes) + if thr is not None: + pred_positive[pred_score <= thr] = 0 + results.append( + _precision_recall_f1_support(pred_positive, gt_positive, + average)) + + return results + + +@METRICS.register_module() +class ConfusionMatrix(BaseMetric): + r"""A metric to calculate confusion matrix for single-label tasks. + + Args: + num_classes (int, optional): The number of classes. Defaults to None. + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be 'cpu' or + 'gpu'. Defaults to 'cpu'. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, self.default_prefix + will be used instead. Defaults to None. + + Examples: + + 1. The basic usage. + + >>> import torch + >>> from mmpretrain.evaluation import ConfusionMatrix + >>> y_pred = [0, 1, 1, 3] + >>> y_true = [0, 2, 1, 3] + >>> ConfusionMatrix.calculate(y_pred, y_true, num_classes=4) + tensor([[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 1, 0, 0], + [0, 0, 0, 1]]) + >>> # plot the confusion matrix + >>> import matplotlib.pyplot as plt + >>> y_score = torch.rand((1000, 10)) + >>> y_true = torch.randint(10, (1000, )) + >>> matrix = ConfusionMatrix.calculate(y_score, y_true) + >>> ConfusionMatrix().plot(matrix) + >>> plt.show() + + 2. In the config file + + .. code:: python + + val_evaluator = dict(type='ConfusionMatrix') + test_evaluator = dict(type='ConfusionMatrix') + """ # noqa: E501 + default_prefix = 'confusion_matrix' + + def __init__(self, + num_classes: Optional[int] = None, + collect_device: str = 'cpu', + prefix: Optional[str] = None) -> None: + super().__init__(collect_device, prefix) + + self.num_classes = num_classes + + def process(self, data_batch, data_samples: Sequence[dict]) -> None: + for data_sample in data_samples: + if 'pred_score' in data_sample: + pred_score = data_sample['pred_score'] + pred_label = pred_score.argmax(dim=0, keepdim=True) + self.num_classes = pred_score.size(0) + else: + pred_label = data_sample['pred_label'] + + self.results.append({ + 'pred_label': pred_label, + 'gt_label': data_sample['gt_label'], + }) + + def compute_metrics(self, results: list) -> dict: + pred_labels = [] + gt_labels = [] + for result in results: + pred_labels.append(result['pred_label']) + gt_labels.append(result['gt_label']) + confusion_matrix = ConfusionMatrix.calculate( + torch.cat(pred_labels), + torch.cat(gt_labels), + num_classes=self.num_classes) + return {'result': confusion_matrix} + + @staticmethod + def calculate(pred, target, num_classes=None) -> dict: + """Calculate the confusion matrix for single-label task. + + Args: + pred (torch.Tensor | np.ndarray | Sequence): The prediction + results. It can be labels (N, ), or scores of every + class (N, C). + target (torch.Tensor | np.ndarray | Sequence): The target of + each prediction with shape (N, ). + num_classes (Optional, int): The number of classes. If the ``pred`` + is label instead of scores, this argument is required. + Defaults to None. + + Returns: + torch.Tensor: The confusion matrix. + """ + pred = to_tensor(pred) + target_label = to_tensor(target).int() + + assert pred.size(0) == target_label.size(0), \ + f"The size of pred ({pred.size(0)}) doesn't match "\ + f'the target ({target_label.size(0)}).' + assert target_label.ndim == 1 + + if pred.ndim == 1: + assert num_classes is not None, \ + 'Please specify the `num_classes` if the `pred` is labels ' \ + 'intead of scores.' + pred_label = pred + else: + num_classes = num_classes or pred.size(1) + pred_label = torch.argmax(pred, dim=1).flatten() + + with torch.no_grad(): + indices = num_classes * target_label + pred_label + matrix = torch.bincount(indices, minlength=num_classes**2) + matrix = matrix.reshape(num_classes, num_classes) + + return matrix + + @staticmethod + def plot(confusion_matrix: torch.Tensor, + include_values: bool = False, + cmap: str = 'viridis', + classes: Optional[List[str]] = None, + colorbar: bool = True, + show: bool = True): + """Draw a confusion matrix by matplotlib. + + Modified from `Scikit-Learn + `_ + + Args: + confusion_matrix (torch.Tensor): The confusion matrix to draw. + include_values (bool): Whether to draw the values in the figure. + Defaults to False. + cmap (str): The color map to use. Defaults to use "viridis". + classes (list[str], optional): The names of categories. + Defaults to None, which means to use index number. + colorbar (bool): Whether to show the colorbar. Defaults to True. + show (bool): Whether to show the figure immediately. + Defaults to True. + """ # noqa: E501 + import matplotlib.pyplot as plt + + fig, ax = plt.subplots(figsize=(10, 10)) + + num_classes = confusion_matrix.size(0) + + im_ = ax.imshow(confusion_matrix, interpolation='nearest', cmap=cmap) + text_ = None + cmap_min, cmap_max = im_.cmap(0), im_.cmap(1.0) + + if include_values: + text_ = np.empty_like(confusion_matrix, dtype=object) + + # print text with appropriate color depending on background + thresh = (confusion_matrix.max() + confusion_matrix.min()) / 2.0 + + for i, j in product(range(num_classes), range(num_classes)): + color = cmap_max if confusion_matrix[i, + j] < thresh else cmap_min + + text_cm = format(confusion_matrix[i, j], '.2g') + text_d = format(confusion_matrix[i, j], 'd') + if len(text_d) < len(text_cm): + text_cm = text_d + + text_[i, j] = ax.text( + j, i, text_cm, ha='center', va='center', color=color) + + display_labels = classes or np.arange(num_classes) + + if colorbar: + fig.colorbar(im_, ax=ax) + ax.set( + xticks=np.arange(num_classes), + yticks=np.arange(num_classes), + xticklabels=display_labels, + yticklabels=display_labels, + ylabel='True label', + xlabel='Predicted label', + ) + ax.invert_yaxis() + ax.xaxis.tick_top() + + ax.set_ylim((num_classes - 0.5, -0.5)) + # Automatically rotate the x labels. + fig.autofmt_xdate(ha='center') + + if show: + plt.show() + return fig diff --git a/mmpretrain/evaluation/metrics/visual_grounding_eval.py b/mmpretrain/evaluation/metrics/visual_grounding_eval.py new file mode 100644 index 0000000..ad16e5a --- /dev/null +++ b/mmpretrain/evaluation/metrics/visual_grounding_eval.py @@ -0,0 +1,85 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +import torch +import torchvision.ops.boxes as boxes +from mmengine.evaluator import BaseMetric + +from mmpretrain.registry import METRICS + + +def aligned_box_iou(boxes1: torch.Tensor, boxes2: torch.Tensor): + area1 = boxes.box_area(boxes1) + area2 = boxes.box_area(boxes2) + + lt = torch.max(boxes1[:, :2], boxes2[:, :2]) # (B, 2) + rb = torch.min(boxes1[:, 2:], boxes2[:, 2:]) # (B, 2) + + wh = boxes._upcast(rb - lt).clamp(min=0) # (B, 2) + inter = wh[:, 0] * wh[:, 1] # (B, ) + + union = area1 + area2 - inter + iou = inter / union + return iou + + +@METRICS.register_module() +class VisualGroundingMetric(BaseMetric): + """Visual Grounding evaluator. + + Calculate the box mIOU and box grounding accuracy for visual grounding + model. + + Args: + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be 'cpu' or + 'gpu'. Defaults to 'cpu'. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, self.default_prefix + will be used instead. Should be modified according to the + `retrieval_type` for unambiguous results. Defaults to TR. + """ + default_prefix = 'visual-grounding' + + def process(self, data_batch, data_samples): + """Process one batch of data samples. + + The processed results should be stored in ``self.results``, which will + be used to computed the metrics when all batches have been processed. + + Args: + data_batch: A batch of data from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from the model. + """ + for preds in data_samples: + + pred_box = preds['pred_bboxes'].squeeze() + box_gt = torch.Tensor(preds['gt_bboxes']).squeeze() + + result = { + 'box': pred_box.to('cpu').squeeze(), + 'box_target': box_gt.squeeze(), + } + + self.results.append(result) + + def compute_metrics(self, results: List): + """Compute the metrics from processed results. + + Args: + results (dict): The processed results of each batch. + + Returns: + Dict: The computed metrics. The keys are the names of the metrics, + and the values are corresponding results. + """ + pred_boxes = torch.stack([each['box'] for each in results]) + gt_boxes = torch.stack([each['box_target'] for each in results]) + iou = aligned_box_iou(pred_boxes, gt_boxes) + accu_num = torch.sum(iou >= 0.5) + + miou = torch.mean(iou) + acc = accu_num / len(gt_boxes) + coco_val = {'miou': miou, 'acc': acc} + return coco_val diff --git a/mmpretrain/evaluation/metrics/voc_multi_label.py b/mmpretrain/evaluation/metrics/voc_multi_label.py new file mode 100644 index 0000000..1034852 --- /dev/null +++ b/mmpretrain/evaluation/metrics/voc_multi_label.py @@ -0,0 +1,98 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Sequence + +from mmpretrain.registry import METRICS +from mmpretrain.structures import label_to_onehot +from .multi_label import AveragePrecision, MultiLabelMetric + + +class VOCMetricMixin: + """A mixin class for VOC dataset metrics, VOC annotations have extra + `difficult` attribute for each object, therefore, extra option is needed + for calculating VOC metrics. + + Args: + difficult_as_postive (Optional[bool]): Whether to map the difficult + labels as positive in one-hot ground truth for evaluation. If it + set to True, map difficult gt labels to positive ones(1), If it + set to False, map difficult gt labels to negative ones(0). + Defaults to None, the difficult labels will be set to '-1'. + """ + + def __init__(self, + *arg, + difficult_as_positive: Optional[bool] = None, + **kwarg): + self.difficult_as_positive = difficult_as_positive + super().__init__(*arg, **kwarg) + + def process(self, data_batch, data_samples: Sequence[dict]): + """Process one batch of data samples. + + The processed results should be stored in ``self.results``, which will + be used to computed the metrics when all batches have been processed. + + Args: + data_batch: A batch of data from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from the model. + """ + for data_sample in data_samples: + result = dict() + gt_label = data_sample['gt_label'] + gt_label_difficult = data_sample['gt_label_difficult'] + + result['pred_score'] = data_sample['pred_score'].clone() + num_classes = result['pred_score'].size()[-1] + + if 'gt_score' in data_sample: + result['gt_score'] = data_sample['gt_score'].clone() + else: + result['gt_score'] = label_to_onehot(gt_label, num_classes) + + # VOC annotation labels all the objects in a single image + # therefore, some categories are appeared both in + # difficult objects and non-difficult objects. + # Here we reckon those labels which are only exists in difficult + # objects as difficult labels. + difficult_label = set(gt_label_difficult) - ( + set(gt_label_difficult) & set(gt_label.tolist())) + + # set difficult label for better eval + if self.difficult_as_positive is None: + result['gt_score'][[*difficult_label]] = -1 + elif self.difficult_as_positive: + result['gt_score'][[*difficult_label]] = 1 + + # Save the result to `self.results`. + self.results.append(result) + + +@METRICS.register_module() +class VOCMultiLabelMetric(VOCMetricMixin, MultiLabelMetric): + """A collection of metrics for multi-label multi-class classification task + based on confusion matrix for VOC dataset. + + It includes precision, recall, f1-score and support. + + Args: + difficult_as_postive (Optional[bool]): Whether to map the difficult + labels as positive in one-hot ground truth for evaluation. If it + set to True, map difficult gt labels to positive ones(1), If it + set to False, map difficult gt labels to negative ones(0). + Defaults to None, the difficult labels will be set to '-1'. + **kwarg: Refers to `MultiLabelMetric` for detailed docstrings. + """ + + +@METRICS.register_module() +class VOCAveragePrecision(VOCMetricMixin, AveragePrecision): + """Calculate the average precision with respect of classes for VOC dataset. + + Args: + difficult_as_postive (Optional[bool]): Whether to map the difficult + labels as positive in one-hot ground truth for evaluation. If it + set to True, map difficult gt labels to positive ones(1), If it + set to False, map difficult gt labels to negative ones(0). + Defaults to None, the difficult labels will be set to '-1'. + **kwarg: Refers to `AveragePrecision` for detailed docstrings. + """ diff --git a/mmpretrain/evaluation/metrics/vqa.py b/mmpretrain/evaluation/metrics/vqa.py new file mode 100644 index 0000000..fd77ba9 --- /dev/null +++ b/mmpretrain/evaluation/metrics/vqa.py @@ -0,0 +1,315 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Partly adopted from https://github.com/GT-Vision-Lab/VQA +# Copyright (c) 2014, Aishwarya Agrawal +from typing import List, Optional + +import mmengine +from mmengine.evaluator import BaseMetric +from mmengine.logging import MMLogger + +from mmpretrain.registry import METRICS + + +def _process_punctuation(inText): + import re + outText = inText + punct = [ + ';', r'/', '[', ']', '"', '{', '}', '(', ')', '=', '+', '\\', '_', '-', + '>', '<', '@', '`', ',', '?', '!' + ] + commaStrip = re.compile('(\d)(,)(\d)') # noqa: W605 + periodStrip = re.compile('(?!<=\d)(\.)(?!\d)') # noqa: W605 + for p in punct: + if (p + ' ' in inText or ' ' + p in inText) or (re.search( + commaStrip, inText) is not None): + outText = outText.replace(p, '') + else: + outText = outText.replace(p, ' ') + outText = periodStrip.sub('', outText, re.UNICODE) + return outText + + +def _process_digit_article(inText): + outText = [] + tempText = inText.lower().split() + articles = ['a', 'an', 'the'] + manualMap = { + 'none': '0', + 'zero': '0', + 'one': '1', + 'two': '2', + 'three': '3', + 'four': '4', + 'five': '5', + 'six': '6', + 'seven': '7', + 'eight': '8', + 'nine': '9', + 'ten': '10', + } + contractions = { + 'aint': "ain't", + 'arent': "aren't", + 'cant': "can't", + 'couldve': "could've", + 'couldnt': "couldn't", + "couldn'tve": "couldn't've", + "couldnt've": "couldn't've", + 'didnt': "didn't", + 'doesnt': "doesn't", + 'dont': "don't", + 'hadnt': "hadn't", + "hadnt've": "hadn't've", + "hadn'tve": "hadn't've", + 'hasnt': "hasn't", + 'havent': "haven't", + 'hed': "he'd", + "hed've": "he'd've", + "he'dve": "he'd've", + 'hes': "he's", + 'howd': "how'd", + 'howll': "how'll", + 'hows': "how's", + "Id've": "I'd've", + "I'dve": "I'd've", + 'Im': "I'm", + 'Ive': "I've", + 'isnt': "isn't", + 'itd': "it'd", + "itd've": "it'd've", + "it'dve": "it'd've", + 'itll': "it'll", + "let's": "let's", + 'maam': "ma'am", + 'mightnt': "mightn't", + "mightnt've": "mightn't've", + "mightn'tve": "mightn't've", + 'mightve': "might've", + 'mustnt': "mustn't", + 'mustve': "must've", + 'neednt': "needn't", + 'notve': "not've", + 'oclock': "o'clock", + 'oughtnt': "oughtn't", + "ow's'at": "'ow's'at", + "'ows'at": "'ow's'at", + "'ow'sat": "'ow's'at", + 'shant': "shan't", + "shed've": "she'd've", + "she'dve": "she'd've", + "she's": "she's", + 'shouldve': "should've", + 'shouldnt': "shouldn't", + "shouldnt've": "shouldn't've", + "shouldn'tve": "shouldn't've", + "somebody'd": 'somebodyd', + "somebodyd've": "somebody'd've", + "somebody'dve": "somebody'd've", + 'somebodyll': "somebody'll", + 'somebodys': "somebody's", + 'someoned': "someone'd", + "someoned've": "someone'd've", + "someone'dve": "someone'd've", + 'someonell': "someone'll", + 'someones': "someone's", + 'somethingd': "something'd", + "somethingd've": "something'd've", + "something'dve": "something'd've", + 'somethingll': "something'll", + 'thats': "that's", + 'thered': "there'd", + "thered've": "there'd've", + "there'dve": "there'd've", + 'therere': "there're", + 'theres': "there's", + 'theyd': "they'd", + "theyd've": "they'd've", + "they'dve": "they'd've", + 'theyll': "they'll", + 'theyre': "they're", + 'theyve': "they've", + 'twas': "'twas", + 'wasnt': "wasn't", + "wed've": "we'd've", + "we'dve": "we'd've", + 'weve': "we've", + 'werent': "weren't", + 'whatll': "what'll", + 'whatre': "what're", + 'whats': "what's", + 'whatve': "what've", + 'whens': "when's", + 'whered': "where'd", + 'wheres': "where's", + 'whereve': "where've", + 'whod': "who'd", + "whod've": "who'd've", + "who'dve": "who'd've", + 'wholl': "who'll", + 'whos': "who's", + 'whove': "who've", + 'whyll': "why'll", + 'whyre': "why're", + 'whys': "why's", + 'wont': "won't", + 'wouldve': "would've", + 'wouldnt': "wouldn't", + "wouldnt've": "wouldn't've", + "wouldn'tve": "wouldn't've", + 'yall': "y'all", + "yall'll": "y'all'll", + "y'allll": "y'all'll", + "yall'd've": "y'all'd've", + "y'alld've": "y'all'd've", + "y'all'dve": "y'all'd've", + 'youd': "you'd", + "youd've": "you'd've", + "you'dve": "you'd've", + 'youll': "you'll", + 'youre': "you're", + 'youve': "you've", + } + for word in tempText: + word = manualMap.setdefault(word, word) + if word not in articles: + outText.append(word) + for wordId, word in enumerate(outText): + if word in contractions: + outText[wordId] = contractions[word] + outText = ' '.join(outText) + return outText + + +@METRICS.register_module() +class VQAAcc(BaseMetric): + '''VQA Acc metric. + Args: + + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be 'cpu' or + 'gpu'. Defaults to 'cpu'. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, self.default_prefix + will be used instead. Should be modified according to the + `retrieval_type` for unambiguous results. Defaults to TR. + ''' + default_prefix = 'VQA' + + def __init__(self, + full_score_weight: float = 0.3, + collect_device: str = 'cpu', + prefix: Optional[str] = None): + super().__init__(collect_device=collect_device, prefix=prefix) + self.full_score_weight = full_score_weight + + def process(self, data_batch, data_samples): + """Process one batch of data samples. + + The processed results should be stored in ``self.results``, which will + be used to computed the metrics when all batches have been processed. + + Args: + data_batch: A batch of data from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from the model. + """ + for sample in data_samples: + gt_answer = sample.get('gt_answer') + gt_answer_weight = sample.get('gt_answer_weight') + if isinstance(gt_answer, str): + gt_answer = [gt_answer] + if gt_answer_weight is None: + gt_answer_weight = [1. / (len(gt_answer))] * len(gt_answer) + + result = { + 'pred_answer': sample.get('pred_answer'), + 'gt_answer': gt_answer, + 'gt_answer_weight': gt_answer_weight, + } + + self.results.append(result) + + def compute_metrics(self, results: List): + """Compute the metrics from processed results. + + Args: + results (dict): The processed results of each batch. + + Returns: + Dict: The computed metrics. The keys are the names of the metrics, + and the values are corresponding results. + """ + acc = [] + for result in results: + pred_answer = self._process_answer(result['pred_answer']) + gt_answer = [ + self._process_answer(answer) for answer in result['gt_answer'] + ] + answer_weight = result['gt_answer_weight'] + + weight_sum = 0 + for i, gt in enumerate(gt_answer): + if gt == pred_answer: + weight_sum += answer_weight[i] + vqa_acc = min(1.0, weight_sum / self.full_score_weight) + acc.append(vqa_acc) + + accuracy = sum(acc) / len(acc) * 100 + + metrics = {'acc': accuracy} + return metrics + + def _process_answer(self, answer): + answer = answer.replace('\n', ' ') + answer = answer.replace('\t', ' ') + answer = answer.strip() + answer = _process_punctuation(answer) + answer = _process_digit_article(answer) + return answer + + +@METRICS.register_module() +class ReportVQA(BaseMetric): + """Dump VQA result to the standard json format for VQA evaluation. + + Args: + file_path (str): The file path to save the result file. + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be 'cpu' or + 'gpu'. Defaults to 'cpu'. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, self.default_prefix + will be used instead. Should be modified according to the + `retrieval_type` for unambiguous results. Defaults to TR. + """ + default_prefix = 'VQA' + + def __init__(self, + file_path: str, + collect_device: str = 'cpu', + prefix: Optional[str] = None): + super().__init__(collect_device=collect_device, prefix=prefix) + if not file_path.endswith('.json'): + raise ValueError('The output file must be a json file.') + self.file_path = file_path + + def process(self, data_batch, data_samples) -> None: + """transfer tensors in predictions to CPU.""" + for sample in data_samples: + question_id = sample['question_id'] + pred_answer = sample['pred_answer'] + + result = { + 'question_id': int(question_id), + 'answer': pred_answer, + } + + self.results.append(result) + + def compute_metrics(self, results: List): + """Dump the result to json file.""" + mmengine.dump(results, self.file_path) + logger = MMLogger.get_current_instance() + logger.info(f'Results has been saved to {self.file_path}.') + return {} diff --git a/mmpretrain/models/__init__.py b/mmpretrain/models/__init__.py new file mode 100644 index 0000000..3f58311 --- /dev/null +++ b/mmpretrain/models/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .backbones import * # noqa: F401,F403 +from .builder import (BACKBONES, CLASSIFIERS, HEADS, LOSSES, NECKS, + build_backbone, build_classifier, build_head, build_loss, + build_neck) +from .classifiers import * # noqa: F401,F403 +from .heads import * # noqa: F401,F403 +from .losses import * # noqa: F401,F403 +from .multimodal import * # noqa: F401,F403 +from .necks import * # noqa: F401,F403 +from .peft import * # noqa: F401,F403 +from .retrievers import * # noqa: F401,F403 +from .selfsup import * # noqa: F401,F403 +from .tta import * # noqa: F401,F403 +from .utils import * # noqa: F401,F403 + +__all__ = [ + 'BACKBONES', 'HEADS', 'NECKS', 'LOSSES', 'CLASSIFIERS', 'build_backbone', + 'build_head', 'build_neck', 'build_loss', 'build_classifier' +] diff --git a/mmpretrain/models/__pycache__/__init__.cpython-310.pyc b/mmpretrain/models/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30cd299756c3540fe10986586041a5c8fd24afce GIT binary patch literal 649 zcmZ8e%Z}496t$B$&vqCX9uj{L7E?X|Av&dq(Tq9>&&nGsZCtR%ZYn!L;y+l-dUpH> zAK@)4egO#y-y{_eM>*$SN9Ww5n`S!Akw3S;zHQ%y2z`ghW>Lf627 z!a87q4q2!p7HPtWj#;b|mgtnFI%Ap6S*{CK;6XR(by!VDJ{tMn$j2k!ANj$^CnG-` z`O!7{fa>rLv&VHrQAvJaAFad=KPz*Woy`{)XIHBwb;aUxPU-oZ^W}T$$lK-oHFfdj z6{Y?gSuGbA)E%umscOEiHaF{LC*1MSZ&YQi+)80wKHPj1Reh*cW9^~diOo%!8^1YZ z2tWc514IBR0BqPy01AN2!>WA3vcp*Gl75OXgHf;S-Fltcu~tY3epTB)15Qj9=U%c8y5wk$WIHBx*M#73o(zR0gK%7@fasCyV z6Br8c^K@pKrk!cM2YDrT|CCKFl$~)dce3UDbpP3{AaZevZ>H#s5E( F;5WO2qYeN7 literal 0 HcmV?d00001 diff --git a/mmpretrain/models/__pycache__/builder.cpython-310.pyc b/mmpretrain/models/__pycache__/builder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c99faf7438d0063f4cb81d56243f1bd69fcaf0ab GIT binary patch literal 1035 zcmb7DJ#Q015Z%46+^6jjpnxU~g$wLIfDpE`2rI^xdo$i7*JQ_j9<&p`^J>IsY9rzo2f>FFe z00ul`D?a0#Nj@_$ObiRd#&9rpzyR|Do4H_tjoAYSb};+kf`>T(9|FuFgb-nlpb@in z^E-Z>wR!10pB#u$oN#*oGpIH(1{gm`?S_5g@7ZULjr?7+f0|~nznCY-i+n9RZKL%1 zy}@CBG8S=ZFKG6hmu9j&I%mqo_MZ>CD$h6F)#^majhd(HzsekBR&=XAsX8ZTWuvNJ zUoeCugO0^F6sfsZt0K{;FwhFuGC3x7_E-!KV`bx{atKJ11`n&!*L1Xemu3Qj2;rtg zZ{!>*qGzIau$_pO->r$7|3Q@HML|T)XcEU_I}@$HUo+9>wr*)xpIws8iy~d7cp=Ca z3`V^;9zGq4=|4zY`W!D?_iO6Cn^3njHCZH;Z#AJ%o^3hQ%))*K}?Dg!XT(7s3<( literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__init__.py b/mmpretrain/models/backbones/__init__.py new file mode 100644 index 0000000..60e37fb --- /dev/null +++ b/mmpretrain/models/backbones/__init__.py @@ -0,0 +1,129 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .alexnet import AlexNet +from .beit import BEiTViT +from .conformer import Conformer +from .convmixer import ConvMixer +from .convnext import ConvNeXt +from .cspnet import CSPDarkNet, CSPNet, CSPResNet, CSPResNeXt +from .davit import DaViT +from .deit import DistilledVisionTransformer +from .deit3 import DeiT3 +from .densenet import DenseNet +from .edgenext import EdgeNeXt +from .efficientformer import EfficientFormer +from .efficientnet import EfficientNet +from .efficientnet_v2 import EfficientNetV2 +from .hivit import HiViT +from .hornet import HorNet +from .hrnet import HRNet +from .inception_v3 import InceptionV3 +from .lenet import LeNet5 +from .levit import LeViT +from .mixmim import MixMIMTransformer +from .mlp_mixer import MlpMixer +from .mobilenet_v2 import MobileNetV2 +from .mobilenet_v3 import MobileNetV3 +from .mobileone import MobileOne +from .mobilevit import MobileViT +from .mvit import MViT +from .poolformer import PoolFormer +from .regnet import RegNet +from .replknet import RepLKNet +from .repmlp import RepMLPNet +from .repvgg import RepVGG +from .res2net import Res2Net +from .resnest import ResNeSt +from .resnet import ResNet, ResNetV1c, ResNetV1d +from .resnet_cifar import ResNet_CIFAR +from .resnext import ResNeXt +from .revvit import RevVisionTransformer +from .riformer import RIFormer +from .seresnet import SEResNet +from .seresnext import SEResNeXt +from .shufflenet_v1 import ShuffleNetV1 +from .shufflenet_v2 import ShuffleNetV2 +from .sparse_convnext import SparseConvNeXt +from .sparse_resnet import SparseResNet +from .swin_transformer import SwinTransformer +from .swin_transformer_v2 import SwinTransformerV2 +from .t2t_vit import T2T_ViT +from .timm_backbone import TIMMBackbone +from .tinyvit import TinyViT +from .tnt import TNT +from .twins import PCPVT, SVT +from .van import VAN +from .vgg import VGG +from .vig import PyramidVig, Vig +from .vision_transformer import VisionTransformer +from .vit_eva02 import ViTEVA02 +from .vit_sam import ViTSAM +from .xcit import XCiT + +__all__ = [ + 'LeNet5', + 'AlexNet', + 'VGG', + 'RegNet', + 'ResNet', + 'ResNeXt', + 'ResNetV1d', + 'ResNeSt', + 'ResNet_CIFAR', + 'SEResNet', + 'SEResNeXt', + 'ShuffleNetV1', + 'ShuffleNetV2', + 'MobileNetV2', + 'MobileNetV3', + 'VisionTransformer', + 'SwinTransformer', + 'TNT', + 'TIMMBackbone', + 'T2T_ViT', + 'Res2Net', + 'RepVGG', + 'Conformer', + 'MlpMixer', + 'DistilledVisionTransformer', + 'PCPVT', + 'SVT', + 'EfficientNet', + 'EfficientNetV2', + 'ConvNeXt', + 'HRNet', + 'ResNetV1c', + 'ConvMixer', + 'EdgeNeXt', + 'CSPDarkNet', + 'CSPResNet', + 'CSPResNeXt', + 'CSPNet', + 'RepLKNet', + 'RepMLPNet', + 'PoolFormer', + 'RIFormer', + 'DenseNet', + 'VAN', + 'InceptionV3', + 'MobileOne', + 'EfficientFormer', + 'SwinTransformerV2', + 'MViT', + 'DeiT3', + 'HorNet', + 'MobileViT', + 'DaViT', + 'BEiTViT', + 'RevVisionTransformer', + 'MixMIMTransformer', + 'TinyViT', + 'LeViT', + 'Vig', + 'PyramidVig', + 'XCiT', + 'ViTSAM', + 'ViTEVA02', + 'HiViT', + 'SparseResNet', + 'SparseConvNeXt', +] diff --git a/mmpretrain/models/backbones/__pycache__/__init__.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a5436ccae8aa19cf9f0cb465e95da983bc938c2 GIT binary patch literal 3132 zcma);*>W0J5QahQ5?U?Wvb@Q=@uCHJk!{)0YDk-+(bz z%zy7sL)B0<%pnO=I1C=(0f|sVIl@tiQA|0?aY;}@ImSszQA#<^gEB-zlBTo_)3A)t zh_)wqRK{pbImzQPK@-X;J|UAdsXWLhPx5WKLwA%< z@m-mvS>@9_C-XF~JjDx=rL6K9UX*)uPkEZ}%L96#e3l=|5-lm8<45wC9xI>cC$db- z$`|;lJfmmI7x_I|p%vvBUX?XkQ@+IOvOyclmw8j3({tr3{6e;9OSWlS-lzAq?J9pD zJG7&GjdQX~yUN%3L)oJ}C=S+?XW`bv3@%d$`V%JW>2Dpi#i*p?dAl(Sry13FM%WJkWHua)od zD`8?061DGhLz>i7e!wmHhQ3jL$gX(giBG=TOZ-~e)K-4P-^w8!DnI5o@*RDr{Di-k zALxg0s2-{={}QGjt4|A|%(FkjChjxgPGXhq!(H14u5CKSl2bI8T5mRLO;>EUYqWFD zp&Cik)84gT`k82FaJ{g%QFdRM>7BS?e5TB2-u8g#Ry2ddo!CY>SbKWI@qCBbu9h6n zX*P;(x#9IUi*49WanX!VZrBaa4o;Tbtkw6t^nTc|-x|Tf(OmF3xYulQ|2px!T@Q|y%-gM<4+2gZ&h5a*IHWO3FJ2f* z7!PNIM&Pbp@G}wg>l2ts_$)0{I(@8~S?QqNTHk)Yn(waC_3vcfZvWfEB=g(-6DA9r z!2)J#s>}K~>B7P5S}pLz0`@s#;~ho{t+MOc{TRXsZRsyJTKLv^CnUz-X;L32irJzS zcrRVt&gIt1l~?;sb#bETG~NVVk>YMKGti0ct?!kJok*dC;Et45clE(BW*~PVBamgtDad)q800?WB4ijc3Aq3nf;ASVh6a=G#gyhn^Cfg3WNpV3rgCi2l6Nh&M3S zfO{!!+#rsq*C{33vI4&#O3W`3txw(k5o*Vp7B%q9EFEQ%NME~WV+*`5MFohLr@@vf zSlxTipds$sbzA{~Pf6Eq@hg}JEcCU^fi%F>XxHl+$CPkQ@xnNSwptC_^EFd2&_kDI zk;X8xh|)o`RdH%%SJSCG&pbME4fEPKCt}c?#Oa;=`! zY-_a_G&zsAWi?l6)YF<}^`i}eYeojG-5n6oH0GIVX8CWw;WV-7#JkN13*;3g{H)pA z93L4^!@eV8?RQI?mI5=vR5y%3YEi^*_*y4AG2d+Ey{!eENW0w71f~caj=E&XsRv1j zbfk&4eghvn;0iiYW&_J^m*=yZqm+Q{l||E*qK6f;Khn69u&gq(W&Ij*Ul?2EucbM+ z+4Sc`TCRD(veP(*&8p4bT<=oeoMkx;v${3gdeccROSAf#ZS&L6{3<^*k`DL&P4%hQ S@_#=k|8wS;jtEp&9ryPj4GV6rb5$?>cKc2?-PqAO^vqMqsNE0u)76n-=ww2Go?xUN)1RaXiU-*PR*1 zZe%2+RO%_5Ioij53r>89xpMNE6G*7MS+AW`Q9IT=zc=sizW1BS8;uab`u)E@JBI;6 ze_7%Bs(^40wz7mFhEvqT5yqBxQm5xeF7Ph%Qmp#b z&Ne_c=*3A&mEwxr7MX5t+oK~cN!}p~g^gYCq9YQjMV_^m0Txv--l2n3lQWtQpe9AW zzVj6sc16-90~=gJrHkSTRd4uH(&1DOBqw~NB~3Ks8w%ac;k&G#&il*!T+VaPUc*Y9 zYg4iO25c9$BG6uqG#;Z1XW~o|Mq}V(S9_C+_D+0;nPbOfM|5dV0v539B?hhvx(Zvl zbP79%g{%g0{nC4fRmd9PwR*$Khx4_!Xx84MS>~TMpyHJ=I&sHzR~UyZa=|2!gpFqC z3s9hSuxyx$8uYo8*3VSqSY$%Sv8i1{r*X2(ga`cDfM;6J)c6nb>}-n}?+M@g*{nRK zqo49Tg@bAdCS`($GVcS3jK3!`PNfMKDCV;17*FxEBX2>@SJAJDPw_8+@Dt21VCADRo;a{w2ItI~ zI#xIFN-Ny4Z=btg%t3BKh-O8841Yy4jCUsZ5V`|%m!DdtBMJN&Jd&SVqk_QjkO|;r z7*fgZfS~NV9Xet0!M=08w)lwBrirBw7R|s8IHNrd0}kW3m$N}?`Fb2b8_=}W1aZvr zB#w)Y%G)+sqnuE}(-ag}rF;fjY3nKQQw2s@QbnG#8m@m*oWj#vg=Zz{wz5oq2wJmw zHQkov#{vNUTw3TyjU#yb?#w^wt_69~lsc|Eb?m#h F@c*XFxqtuw literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/base_backbone.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/base_backbone.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..07505e9d35b440c5fd62830d6c7fa85d1e9b248e GIT binary patch literal 1464 zcmaJ>Piqu06i;Sn|8`rdR1v)7*a6vIJe4Z81wC~yEeIo$W+&P1;LK!Avb0@#YoRCa z9`#uK3|k5&PfND1+Hv!mgd}c3B;B%$e)dH2ZlDV_{elu3RAR5wVX%%ysbxtftYuOS z(?VJ1Mu@U$!iJL~aj7mW(?cfys+-TX{Y<3Tl-WOIIybX18vc z!<$w7R_u+>O$*$u!L}Ib2Aw`wB))eW#+{rfp!ZP?i!LeDESP?OIb(z+Iv-D*K>n** zVZCxv^ZgD0WmQtoM2En+*rNkSkVBkzFCE|qQJD_Q_?dp&JN_NY*yO$K=8zc zt?9-RaTVa`4sri)h8dAphsv?s4;iz-29&-Y!4xY4 z9n7G~eXnp206Ltq&I@y6(2}MJ`OO-#?%}tLgs5g#@GlZ&kQ9StJcd2%q{j6A5+PpS z;3E~$qo|D?UWiGfUz*PKL!i*J(F+RQm%|m~`p@ literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/beit.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/beit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0cfb38be1c35d4c60a5da7bb0b3b2affbfc0882e GIT binary patch literal 19569 zcmeHvYj7mjbzXN*&l7{e0E-2%50Xug%Uw_a_CabzQhbQJT#_pzK zmorC6ym}^e?GYu4EOb zO17+rDT$l!JKZzgjeVF^@y`I&w{PFaxvz82J+C{R;o*XYzc2mb+sohUXxjJq5&ui$ z<70T-KSN+^w%*iQdPQ#;6{D4^q*`XhY^5t{oq0wxQ_0FZSINmc-zroJl9y@@wThLZ z#Leb#t5hkqMk*unoo<#}qm|LtSY=GUGtC37@yfWwv(1C8iOK}xIXmB+Y)w_BT8Ao! zT8Ar#TSqEKT1P8Kkyo&Xn)kHst=uc|V)MS1Rk3vK1Y|eimOi@m=<~6n?tHIjUrAE!IdbK6=BJXvoXFLw& zhR&9BzsNqGtJT++I&BBvlb2Pk?JlDwM_p*wJGP@PvBf&4DE^lZKirQa;%F7!)+&ar zS5kSyF4`%_bkc`38=;+cGIqw!F6Tb4E$dv|w}*i|87H@vUpBa)Z=03E=QUHS3}G>b z{qa}BA-vLYWd!FifZ8|E;eLkI&#{;_Unp4kyS&3_(Qs^QS#?^Hz3O=z z?h_|Zt~9*W&87Kzr**Q`sH=|KS@uqDwj0gX$)#pz>13#XwJ|WQAfvb(S-ML!jx>zUrnDePd+xyJS6X=~27O^t?zF8FOPx-WpgO2(H|jOdk$x?O7-6l}FqMPi9c8sUUIdN3ea=~~ zp-I>BI@V>i=>)l#{h`==`1wKS{Zg_Eenw$xRePas#?WgB@eH069yfzs;b}YCuD++~ zT2Jq3Ril?u!?wO{whi0ZHtkeTw~a&EI52KXlP|OXHLmr{Ney*Up1GU$GP~KHwwCG{ zd&V9i<&csOYVYaW%;y(s<12%&0-m8>x|dl&d8B3WUF>CTbI;h$^+^1*&l&x2Idw>j zr!kLdQ`&aEm){+>(No*lOYN5SB&AcWM_s&~``dT(7Wa@Q46)$S>R=N+?nyPF#zsJUvj(QbIvs+vND z%0c4mppp%$UE?uewA+6Em71!x91saV?R8Xr)lYXF&=LRWp!}eb80pQRR(l0q1gQGD zpKCep>WXUEes&oI8Mm+1Yf}z3hI<-E8?I%2`1_993P~TwZq6 z%n%0cG?%5r>JZAQ!z^58*PS;#RSWjGTglZs9c8=K)j{m4p9#_`r7$n-LJ$h1gIv|g&4eVW2YqS$l5>5AHpMoF;QVPPUrUB`GJnj;L zLMEe|Iv%5Blyu{!p=WNIMou>n&)h6!O8U57#51m&NY5bE#Pg8xkp3|0nferKzi*}t zef;KxF`<_M31yL1NM#H|&)w|a^XhHRk@@~Q*NGVYKOVazH|O!VZy*4I^U-$=8=L-+ zwrdE=gT%-iTN+y?MY4j8a7!xcq_*{*zH4I37`!R-=xrw{1Tdpto+)x`w1M!|s$Zy9g98@vVzqi@v(^lAvel~Hsblw$i2Es= zPrk{cMHXEhV`ApA({?*bJ;smW5S{8yvx!>K03JcE;waQ4f&+MD1UU`MKWhFQOZ&c& zGJav0#?Os(>1R`Cq}(ms30vf!`yUa+xf7vRSQ2n1rk%Dk*LA$XjqtsrArAiJx^5Si z!Iyw{DdHK%Qvwe&;$)q)lU~MhgRRWjqevUWa{$jc${cj^PQl4Z{sbUR;+ev82+v_> z2)U+HbU?LP0x*uCm1*ILj@tKtCmOczbxNSU_epq!;c~{d?EAqVm4!bV1qx03N5~xB zflaI3o71$5Eiia3@Q$tprp^KbOE%6D&XF7f>xUf7`A%!2roiHWIq_D(G_|(^p0n1p z78mc($l~HGn1lhhB@9Z<248a3X{@XUoB){{ceSIu`ld&)ZKqCVbl$qywoq1R@GP20 zFuIG20t=8gUGQ#TD3r6&tknaaq@x-u4Y1{?^%8j;vN!`Cd2taVliGLb_HEfVVV|pZ zqm|&RmYo{7630S1(3U+H;AD6PtW2K|%X!9pfym6C%#U+3Z!p zLW_gA+OUDuA+H_OilA^Z;>_y;HcONX3?u^}5*Cl~0jvDURR>duK6i+v7DNZghyzAG z$-K`d#@bl6p071sXI3h@t2KoY_epxPqGdajIfS9_pQKUWuu^RiJ@?K3Wh^b1#ezs` z$*njXL_cf!SsDV5O`C4DOWWdIHmt z6v`{ho6Y96xsWtqq|!O}HkiM^n`fdCL9`L7$UA-U1pdRJS<5#vs!ahEzkbu@{H?Q3aK}LyQL|q`L z5IOqoLegT*1h`v=5MnR~JLpO@LAAP9tuL=wCm`GPsRYs*TLFc@>@PP~KERwWE)u{A za@S8Cf8oNV*N@LEE=J9WW+L9s`2a903G_hI(I-rQ4Iajty0mb7CTd3TKHdxwJ{ZF$ z(2z8b%S#IPfswujBGuXe!D6dXQ_IWkTPE)IEumyct`aL1r!3*@VKJ)>(0LE^@hMsC z#3YLRF&LeNflyoYDfa6#vsQr9H45t&lVmg>{ZC?jxn8z6WT(E8>y^H)ZOp3U2xEdn z0Mf10TCH03bU$)tW=zDhX!uG7aFO+>O;DVsU@oJnm09J4#HRLDeo|_aNmj2jCIZnw zwv(~^U8K9q2r{D(mv8yc{MZ;yrkX!B^5##?p7-yL3+*AEO#;GwrkH`H20tn8~=xk!% zsn6TRvj*(k#jb(A2}1XEXxk<=3eiFCu8DV=x%>}uUK3rQ)Fr%q^~a&Xi$hq;0748( zln<%OfKFd6qF*yv^%=a>rx<(^fuCx&Ppe-EV`tRgLTn}#EO?cL78yhWtjMmEasLAz zcLG5H>WeX`Hx3(Sy7Y^JUc6};rrv$vZnGNl!N9D?3H}KNA7$_)gI{Lw6aqgj?7vDg z;b{icVnAQyDa9#QRBS{txO#@4?40@-gQ(Y$*!?UrI0Nn~0uj4U_K_*4=X4VfkmUzR zyJ^IP@j}}8nUOJoRK{N!IRgl1{KP02KQ0UzKQfBOUjik62vyHt7$e4?kClxd80FFr zau=x6G4-*Vimlf1B>oPDU+%AAqsN8#1_brpG&L(LR7%3cKm)^9ppyMM#CYU@BuY+! z=7mH~(H=%liPj<5g5q_?>;uRdw+|v`B9Sv`Pa)?JU>!zl@j6HBqsX}jIrrN4C2}CP zBgaHetOu2hJzdGJYW4$P(_uHu+Yh3x!}c-zA^abQn&5;zV?TV|us>wa+H*+%5W)}H z^Y+O=IkBVJrG*s_KY|FC^l;zcR*5&N;Xb5JWhZa?w1 zRw+3n_DAg}CBJMxWj~FyQTrMDV|b6*&)VnkK471>pTm3H{2}^|jC!mIS z^|JMfa^|3BzzH1)p%+`V6$cFXzzF3ESM_Ayd{a|zHn!$FYUN~Y$vt`I^r^G+rye`= z;YXidtV+RW@7k!fb><1{rJB1Ac_U~EW;rqOi2g$$ya7!`N6md=vj#@3i-K=7TqwF= zB7wmK3CgwZ)SEM>9-BXP_K~w^*_)s{2@6q;lW_u@6VeLLAn4utP!=c={t zT!XzzIu^jNtU2r9ha;x$;fMP=4G|^#x)11)mcb>%%xI%a5tB?H1VE-N5Z{sjB4N*q z7Bs1kifK6F(8SJlCt#9EMHWu;KwA~s{cZ#7cKm4)?ta~i+gBv@r^vEmK%$ls(-`OW zgN2D^snJ>qje<~%V&z3)5lX7gn;Temk@Ghovo{3$p&8f!9-MRPH6p@QC(#t_6&uj2 zS@t#PULl1y+N>#cX05eN7jef6WFWP&Ni`s4L&`+0j_V~r!3f%d%oIRKv>hMMnKO?@ zg8@fWU%lgSBsVZ{VtInEJK#;ch0|xF1{#pNZlfUsDiH~TuJ#KI)DB{Nd?W%wK2hwM z4wMglQ!J)Fu$#WT>2b=qqpYi|4eUwSWLBJg*c0#I-09g+6UKRj9R!oiDp89frzABc zA50q-fgrxc8~p&v827Hq7@3tAl>e_S>Cf*Sv?YBZa-c3b|No-KNa#$c$cSr9pxuM# z2OGSdVi8ieRipAp^yD~vgu1fVtU(QLk|qc=kf}gt-KuRQcSdBECysG*P{x;&D5OL3 zjs1l@*%|>Pciwsh+B49g_#u=aA#>2iPY)afrWY3jR|l~bqQCSp0F8#-ConJwnX*ot zo3+lz^g8U70OAPH`vI8Pi27h;*K!vI)J2*IcmzTT{=}9MYiibdan}0e?Ha6Y!MS5! zqbqO&!Ib0JRM_IsNk`Kfb|NtYbo_uCG}heh8d9~ZgHa(q02)qCqg`)q+MrhPQBj4x zuc_F)!rYQ<_fjyai5hWwf`v*BHzGZRbQ`abzN#AgwWJbv&C@xB{oVr{!lyBbjAJ}2^;&}QO3EC@jfVlK4Y6!t~U&jZhawS$MVvw!v@ z8wN?f<8(QOTKgKNI<(^h&ExYlcRHFF>Z-tK_Wi5|d-ZkfiM|as;*6|jW8Hz-1>Qu> z8u&(V!hMwFJKl(>|DtTR6BnKn4c%%Y7@VnC0L!V>R}<~T=R9HjOO7*171#sdFiaA6 z?nsb-8KmVt$hd)bA3`Bt!rK@FNn4r1Rt-d^z-S*R|fF=_+< zMy+J=bv-p7!yfDoM{RWAP~p&UKA^a-!7K*FI7c2UBG@ZJR3Ec$odDwQmIx@AF1oxV zXJdM)QQyQf9buLbl!PifIMjrvk8x;Z>=PJz2gP_W!%$NvIjc~diNPjXCRRTgvic-%nL=sM z-k=7#K|QjsH~qB8V-5H-YB#9;yHS>0gxxfV)Z9upFRupW%eSDpxp6`Yplml|!&hd` z>*j-ZqkZkhzd@!tiXhB%y9LP%y89M1BR8n?Y0!TqEV2~VXWzs>>S^)H$c6`*`KR-CZZ+wUCMNTT``T0D8mFxfK4|jj^?QcFEs%`z0v*A9kwvct%&xUxn^1Hvo zzdw0qp-#`x*h49#4tx2Y32nc&2LCl^^iy{3 zdS-jFH@REt9gx0{*oUC^pA0qmWt1$hjiTllVq)yCM z)EmN>icA@d2l>^5usCSdiC$5C-J9H<@(%4DrXmUD3VSK>T7>TiN*ryEzydM6o>Jem zIf5g0UQ$iKF3A|#k9&G0^`9a*y(z#sDy~a?X!k-hdLL3Pj;EaxSoeC9Yxl#}F>;-= zFx?wb-|dyvcQJ=$fjolL2axg?-h-&gvBDz;UM*u{i#QfSITk{GC|u>VcYMeAqOQzd zdWGJWsb1PU!4!8KkPmn>z4Q*(dHfx?XaB11JK(=?(!E1ZY8W@(hp^{T+PaSXk1(Gx_n9`v1lR|Ahx;r4Mg7VY%4^jRd9%B7s7sH~ zA-HTE@#c3pk0;TuaS!+dNT2SFuMo?IuzKU(DaNEnbK8f28CVfKGIDuOV6|o4dn3I} z?+E(@J5k=Ayk6Ko+B*uI8D|=21LaTdp7zcN_8cYlFcp2P!P=x%AA#*it3KK*pbua5 zKHQVGc4)yli#4BGXWzb&NavUz!TgxW`D4ucWAv64%MV@84li`m&r<)?Jw#hGjmzL2 z?7(?fG*GT3bRr;d4Q$oO7}I^=JQNEMmqmlshNp&W)kK9AFric|SwV4D^fNdIw$9iy z8Gk4y&-(@807ZKsg)4b*mRll>_(SWCf?JGa`WY&};Ijnk7I=7eCtq)eiVtw)i+vGh z(dr(YwczcvPe!rSRn+W0wvVrEQ9A-IlAI4Df5;{SNKeIun0eTg^o{N(7x%Mei^6V7 z@@;sfMXL_i9!xBn5FJe55Dhg|#N>F;smTn3jPi7lcz14^BMLPZ({SNtKjAk3Fu?@` zT7$tV%vvj8b-Q`Q;R#x^=ey$<+hBYfwn%k-m511$Znf)OWP8$RWRMawpO~Stgb+&L(g?&eS$GoVG2d z7JoP)YWamo9ihmss2YPMLMc^6Xo7Gca@fp0V)}w56$`FDhZ2=Sur^#L)nmQiVj!mD zec1ICX8bw>dZnr_BdCmUkHthRnWoC*9mElJofV15uq%ry5}atI5HY1ZI3VNZjp=Z2fuY7j=4_^3g_cx!xp#sCK4d03TQACll`b8mgq2H|fJ4~m4 zML;%bCs&j$f-&#&1b7!2Kfs7xnBX)lAJGMNX1L4W?2h*rF%CXqkT0MT{T@jA-R~oy z(H-XUG#Dd&SKHITq82$U7~sJkui-Kb25q>HfF%t5-PeMPD#oO?Zm36sa^Y1KBe)U* z<_EQLT_g@81^1c^SXP%4*u~02ZKQ7elW^K6GfCXfGD2rJ3owTrzIWO-O za2coHgW?nzsU7@F--g#p+j(y!Xba32874C0#T|@@y%;P~Zb8Y^iUU4okoOyMFTHfB zv$Eo-uJx)2)^cPIPJ>XrC(6h7Oe&*%eST)tH%VR~JJMnk5H&*f%PrVl;H>P-Hvtp} zq51}s$8maB>zhh}HsQ1n4pZN3v|;Y&$rup(-)2$My`nrhrPSw{^Y<8xG9U%S*?tNy zzYrZteg;R;a^p=><`vh^tiz8MZ<7xCmGS!e29B-%*+xYV`YV0Pu!oMSjO@=%ZZY*m zwD(h@8~1Y#)WrnV$bgC+#pMFhy}&mhpht!|0YuD!GLqJit`&c7{-}GnkCua@Z_sER zPzQe`5!|DKoJ4OAbOth(PA(a|tlm9#k*bwtoOPlwp^Gg2hT%>Me|E1C_#lIglPLqM z^+y1r{+PkOVR1?iqCJ;wmQvlReisAvN;GE)BErt`AsKXki~x8>cNSgcA*do=WoYcC z|!!+>mn`Vz~hnoe5@ zlTc%ta0;aPP{Rnirv-@mD$DAv{Y&`{k`Hq!A4P=ca{&%0C7!W}k4ubLYT&fYfK)OE zCFE`X#LSpRccQ=eNuH-p-01(2Ri%DNmi{jh>FHQ{oDy6n_##1ygRH7!nScPndBGSw zo3Mbq`?qa+=2{v$1s%6v7o_B%ZNOsUDZqO$Ext?|$RApe8X(U= zxaZJnaZhXG{_S>pMZ=v%NRMlf{;|N6nzJOTB!D~U6}%B4w=>tqJv}V<3ckt=cSl3G zY<+A?1L945iB@W><;F5_FT;eb> z_rN8r3v^mIdie$31Dps}mq)6d;@u%0%E32EAci}pNEM8Oh70ANz!8qJz!S5V!d+Jr z8Vuw2bUXbU`m5R(-~|V~gTs`a+cU#f@~HJ9Ry&RI%qd(4mceI~n|x?NZK7|1+{EKj zc?@Lfk21#f4=!VFG4`(!z>Q9JuPi0ie|{DORZg8*mf@}jroc0QCLLQOWA$n!6a&SC zq`u9<`$+b`M_Pr;GJyy=>H;|N2~N5`jgTx*d?&{dBQg7wga<6pTNoI;eGUHT!8U&6 zpqJh?z>o+NqR#RjE%wA&>tg#(0q0NQt`2>PYlMcfpgka>YWGJCf+ zijkLk6MVlD%+&;Qzy=m~$LM&6o72#r%8Y&sIg?LmSI!BhjfPa^A7f>@GjV&^{M}^a z?u!?MtOQz?J6)?87?8-l-C~@;-T(Ufd^a1&%=6vpdAbVUz1aun;K3Tzo zzwbPA;}^J}EZd|z7h^W>HpIYLMg0@huN0Od?Z7S)W)7%t;w9{y`sa*^dgos-CI@T- zF+W{z!h6|IZFDx&zhr{h#O(t02Q0-68t|TDj2&a}?--DLbLYw>yqT$Br?R&CLk54u zK)5QhlIl+wEHd~if=W4JPDQzb-_wAFMFes`1-==#k;#=Z#{1dAYRS;R$ijrl>ra*O zL`KLu<8s33N~-mcOsxF!$xK+A=&ZtsMT!JpQovR7UW(Beru7HQPsrM6^ ze~1yRlE~2Cw+IHJ1kw@+>LrNp#`_>VH!09l)TSWMfA~$~pQ3je|6n@sZ=f!(FoO+& zXdcGj&!vh`89)?&%ly;sT>p$1u*G*n1s`m`@Ii7bR~8~c)_9-{c+=m;M{uZskbsDR zLBUb(mSM&KH`&8+!o$QfXzZ|4pW`tC7REL~ex~g-jt?9lL783M#!msLxm!{}&eVAt zTtFIgdm8jq=h*2dK64T%4H|oi++4tj*%%mISl86lkNpI{=Gpq;sbMcK|N>orVQS zV#0rkoFJ@(2)DxA{ZpVSm;oVHlxvmeQhgsVeNQ11V)h2Emg15Kg#q>77zi0+mC8_T zT$Nn#(XjVzYk!0ic&y}?xWz5hFN_qRy(>dv1xuGN{1jof@ZK0Vw3&M!jND{@lkS56 z8S}rQK8WL;JoWt?)do})sJBrv5bA%8v1tT;4mV#QjC7H?4hTzGEKWm-0W%`DBXuU(B<3#Xt(l$!~bfx7%VXsEoH-^{x4OyFI_88ufK|o{pVzLkK@X zwJ$5?XCt?X0y9Qwp!=a9)KmpFHG}}aW7XFRNzQzr_LJM(qUQ4tM8Bv3&V$AWbxjRR zw_)^Xx@v^5$_z#s$Z1W*5q^#e7O*PSAp9LCb&zReTd>&R_n+e3oM5#{2G6oc^qV5+ v5q_phrZI*+f=9$2Klfx%;%V+$*)d0P^hJjq4n5#$>DXmtG!6FY>#}Cm`E$_%5iMRmTgI1*QvaAt#y(z4n|zg>@Kjl{F~djEyMR&2Gc zbkv%vQe10mHI08$9lfSUdFhU>DXP-R)UuqDY8#zgE!WA{@|@P&g-)?nXd6` zj%V6qol333@oam%Gf|u1xY6F%nXFABp0o4qsm^q5n&XA`OlN;>zpA{T*hRbamSUHj z%C%JO0Mcc94CygWA4Ixhk0U+K>3fi#u=gRokJE>o$=!SRsFz853b|9x^tIGAwRWGa zT~MmiH}Kv{RSk^`&ZP~r(xhB~f?16;yY`^KPSSJK{Xeq(QSzKDj92y^4sV)Kpu+-l?^e zt>Kfhbw{_;cE--`XowlK);pSASjm1-Sy6d?bG5Ww*IEYaM=Pq8@A(_ z{+e){p*}jiP~I`K?%Jle(d%`EAJ=o8%iPlnR*?3@dy#uRvO4F^kP2NF|JEnS`gSp$u!MQg%fmx2v&zcK(9ocWwjk>*kV^cOXFE=r0 z-8G#nzNljz&`3_&sbh|g*{Szr=ci`P3;p-&nm8|mX315J`t`f~DoxvQ{ic7F=IuJX z4AJ^NCsv~cq1A4>R%5M>Ds9g^+I0Qulh^>qbT>N7j-bVnuM^jiQ=*1gzV~SvE%2>P zJzrpY$>E|L40qQ>8;njfIOnX?H`=u8U2|?wv?{tAy`h(ha(*Gb+n`~q2O_pa-N!qu zw7d0q(Ksg>C8l+Gnr4S4dvY*YIt7++yr#VYGNn zD?-m*_J3#+3-z|=3?{;ysR;_SPaiq6c;uMr^t`9%oa2vGFJ2rp1J|_fJY>vMHyyK2$b+_b9r9J@{A(B#N4H&X zI8QH$4W~LZa?@=lhCXI?!Hc_~{pj>dnvC8a3rf|S^>%a1;q#d!eVPY7$=6>5OO`Vg zBvZd1v)_eKJG1!xo&WgG?vK9vt<%@FP^T3NQ&^l(r#UZO*Te*BhuLWAiwzdaQ3pl* z68lc!>p^ZRzT&Haa!qS#LC#ipRER@uJ0EC4{-Cm5@KY^4P=f-PI@mff^_@(hSZO~K zWP`#rF#7`v(pldKjBDt}%5jSVT4aM%prcjVHg>Wo$&a)t^xG7pHY7Tdqm;OX)*q$Z zZ!s2R#HDC%XcZ(|RYqEk2Q=e}AZ_Q?b@7J3FG%xDr)=ed;-_{e_vj5cRn0G6hm4JP z_68)6!pkp6VlG_Q!@{~Fz~?Lv`xs^<%L%hRDS$(Lxmova(9%}Z;vQ;z2tk;U%tPEu zIrmXeMo^s&(-1n25GAT)SQd|Kg_`SznX@3{c{@xKInN22NSIo7=LB^g>NwYPvTUBR z=Q(Fy=A5FOQ=D@uEDiC6u+UE~yWzyGj6;xy78)-`j6clvSyiYmeH2@45s9~P4w zoL^R$)I+)7j#->I$=$~MA&m-N zoDkivf1=ar2?z47=?+Dqt+c%p%aJzYogm6%O=h-tRXl)kPT=dUAehMOnxX2NrVc*F zyOnHyBu`U~cPhqcZd_AOv_?&|&uE6G-PF|l&5UZ?+?t>*WEEyM-r5EoqxhjX?dDm0 zy-5VV(o%P|Kxu*I_J|H~KCi2bI7$zqG}P&MgqbZzbUjNPqpZwjr@6Z3UsuH`B*oMH z;53zvQ&oAWu!g?1?5-o*(-2ImTlWm6cT2qnowxYOAVO)L=ieg|sM~s=?BaJ%-3HC? zs{NQ2#h}PW>Qk3I@0x1?~GfLD3TqTO7IhIC^OpuQ9vT~@3eTt*)lG>{W7PBtljponKM{c1@(j34m`1xF}2NXG1W_*-NlZuDmHE-#$>MZg zt_#~me;$pit9dQ2=GAE}qv|&~PHB8IoNpkM2X||Huy;`}Rc&#pnj zd29DeLZaDgYWYXTQxhq!)j)-zhu8}}-m?gX4F?zs%*EMcAk#s$jTM~@2AB_Tk-<=~ zi*G5l44VmAmTHBTKCk6gpH6+t{l*arB1+P)?2?{<+!Q&Lr5fOZJm^tzJ zp$Wc?uQ!cg!pNvN?>O7WOJ>mdL;7-2`dut)dQ zU9w2kJ&l>dD9ZusYGuS@eil1J$b<3)s1hsKk0>7fu8@kOuF`($JJb^xNj1HAgZOA- zf7CE#H#^mw_$1~3I)X5}(ysfyRf`~>mwx)FW;F}@yY4|#PzbA^m{605g07Oz*t53U>HS5Zrxr{E*>KNn#eLxfC(EeW~6gq$8FWU+*Sszph# z4o!^qBg}2|B65`^^dOZGjmq<+Qypzt(jT9nTj_6wYJWib`!?$G=7Ro6-Z1@DG*Did zSA9TtG;;jJoRI16OUMn>mhyUPTi;G^L&UdKabEx|h$#ubU(kq}h!SmWPlE!WLlxJ8 z)SkYbbs?f}UjkYt^+GBH**#D(#C(>iG>g_sD>tbSzop+5Q0xvNYx<%j#n&+@AXz%; z=hZwE?6o>HY+jU$C9JgW zr!pa^^cK>Kx5zt((j@8SG<;GyJ(td9@SDvU{7L0-Y!70>Jsv-rCG9Ou$*II znN$p>Jc+{XfxaaQ7hyzrk=w&=Q8@s(I*e{oj>z~YB9)jO^Fp)+$Y=rrC6rN?{35U= znAwc7$sWCdt)DbhlBm%Dq#A*m?${IS5Egj-5I)q`EKHO*Y|T&)$(QBVPfUqrZ-iPU zV6!L(Yep63?FdU8hJQXFRz*F;OECm5B--C<1>O-p;u#%ZcIA{Jm}m$&iZ$LCGUPP? z7C4y5@Oz9PNC~Yaxk-a1A!v^(N%h2x)RPDx8=6THOnG#04zSx`tmdfstYhUF(BPqrL*}&z<*o zeQ5MXz1`S=h@hGM%(ECqMX<;T;FrqCN?DW3fxi1!3IpO9nhM=o{A{bhFhInUZ-Iyt zS0?fjf@+(dUth(^d2z+`y2plwPmm$aBI-6_gAGnc45&$9_?HZoE{FpVgEt81j8N0c zvrCM)EjGx~iyty!sZt7G?;jB)w7*Ip2_5$tw}9fJ13^r&F{cM(;z8tVFdKl0LjNnr zxTp*Klkzz)yF&&cVWnCyevp(FV5UGrDVGpas!92jo8w&49#etg7Z5_dfk9Xd%0Vf} z`$ZeF+AczIF6|g-UqUOzDx;oajRj)}E39vpBYl(B0Lan6Iy;Gs0YWE8QCb3yF{nUL zr35=niamy)swc{CpW8S3hUyMfQBtc1nt0U+^8@)A=K9$V}Rf zvgnY6DQJgmK7|G5I#C(DJ^)OD6-Xrr3keddPK;W}n^e0%)8SDNwNI9KrSOV)A)&& z$t2<$taB=(e6Aanp_OXWgPXQ4qkg`7F#&U88|NEns2;#{*v5&6`2aJW@xvUO2sy&_TWO{r{@qLA-U22)Xd+Ko z(oSj{fJ=}g^bZF65@6C7Fzf=5lLh>8qM8)}#hVoT1_hs^;0+3RN462G7I-6Te31Aq zvlbl!P6#p(YS}A%^)@dEj}RRS2->TS(RR;Uef_Xjl;=kZx>{MLV)!s&67+3~Cx8@7 ztVAtqMZK(*Q-H65mH;`qX{5HMM^->G3!|`bv{Vo5Xa#m3N2EId2-=*Nske#$)nu??Erpc`U5KQD&U>kp^ z?`hvqxdiASmlVk1V5kI+94G;Aw#Nn~a5zZUvap6Kx$)dYZXZ6A_)g`fb2GNdR?&X@ z0IVXzK4{+qyfMdlhwOXbGHQAIKKp*;71&ZL;#?hOR|RR#)W3xDj?IuMGro(6?Hcoy zt4oCTm`#G#U?libjezF!qhpUeeskoaLH2xWz3aVa`K(ohpDelxsIUZ{G2 zkKj1fW_>bLmvOy|!`FOPgLX;m>IQ+MH=dqutNfOap z0Ak}S*8rG7^W!^rA2(-y>Y64#3|LIxqgyQ12L#pvmbj^S4}BcB@`3s_#ZJY!UsJxW zUsFHNAa2u0w?t`2!ZI_`h7uOZ>RrFX0$}@1bn^A&%PXZ-!U2keqvLQZ}!wMXxF5WK(gmV%Od>>}yRt-@aF z9BI~yb;zG?-D{7N10US=#_cJ4n!OwDLtppX6WrcJox}Ewy?=*VJb=`Ls5yyI4(yQb z{Sa(`gVQu)&HGQx_#R$a%{~P0i823SXy#ZI)O*BdUAVC_p;`H zl;<)*Gp9B{c}lB@9;j4jyFh6jvWwV@v~Q41vQ1b9hk5oNwkNm8={;@O z7PPY46aKMaJeatqVFwOxV|8|q+YbaKT3LTKsNjQsF_S5>BU(kWbnts9z)J2{Xun%i z_JccxJxx{KRxf>Q@Fw;ozsMBTNQ6&Zj(V}d&<@A5@P$d zr}|}Iwr5<`p4y%sncq~f4?X9D$zU4mG6{X}{paWp&oP?k%w6aCvBW(8h3ARc*)!YI zZieSNgA~n!vrgc|owQ4Q;^q*ar=B=< z3bCoTHF4N~6tRQ$!}cRP*ax!p*xoxL?Y*t^b0o624w%4Qfy|JUvZZeu%WKa(O(?PW zG6lrunE#32Mr`ZBvm1~S9j*r{=Kve4v(%UVh3n2$&m2^jt*M`_;}0a{#RFys-I&W) z(b$W;TL~tD=s0GsI=;D;L)`O$z0I;a4YTB}$rs&CctzRl3mA)@AwLPQTlPP=fM52L zSjEbICPN%@SJ_j3N4o_5p7mw|{-Kz3;8Uy3o@BLrM4t}xPqmw#-{^LFr@u*RHA#ew zVh}+8NMFP0+b^?E!OIjrZl@86n~D3;MEo9t8+0kmhV+6si0Be-kEbbXiGtrnu=VH? zkdcAD!$5dnKa}eQJhQpk^qP#?T{WQuvAh{Frw6wCh&i2+`@$P~g+^uLkgdj#A{p75 z(!_ujGcfi^`iZwF;J*9Dr}zTpaR=PhCy>Nt)z8*-cJiAq{M*)7o`E}0%Jc2& zN2HNX#zL6Ki{nhx;R#KJB22ftm*M)H@*F=jNYS*my4|qEG2}s>uX@r9FA{dpH=Fo< z=rj+Gr+AZ+bb6#eT+TrYhp>dOkT|yhA#fLAIXH!3398MOL-*zYy}ZFiT$p>MF5p)I z7(&$1VVGtwoG{nFQx6Sj=w1(=f;D;BYuP;^@9!R_28A=^f3S#==56VDGD#CVLS?y^ z>Q|y`_iU$rg)nVF=MoFB;dkLEYD*8X3^brkYc8zF{p4fCHm4v(Bh1Qd_=Q0`U3J1d zoYEp65H`8O99)y-tA*pS>-gZtKO14`VUB&iX{upfc1a5)=e zlKpIYXS4~esM047BfJPmgiLWAt_y}bp&nGVH<2du9N+zF1`Z8yZb;K5p7CB0DIN8V zo0BPn%=y&7NH%_=r!|zy9OK7YR!6GHH}8O3-qmwum0HtHJbmJt^0$bz{zK!vi7_1) zxn%$|G!8jcOps&6P#ffPI&L$aXcM<0ha_@1N{*Fmhw=Z%crP#ffZt(H);L$ zd1!+uFn%bW#`i3~-oGO7fdKhx3yv_5a6npHy2OLjWQmFVH2Z;>sGHe^cNo&CJ;*`# zbkqasx{r^R_S3ozya2V2M749M{UVh@ub?sIpA}N5F&))_Z>z9I+xOG(X{G+)w4>V8 zR|~SJw1+>c^yPAXQTzekPkfC693*8GUpzl}25upB*8&F$b>Xx(Agpv;nL1(7?Ydwh zPF=v|ULrH+&;Y(YU&oL10nD7|3QUaR3}xR(!Lta!jPNf7uu|U=e@mGUQRdisz1acN zjQ<28{+x2Nun2*q3ikT~9CnH_)y}A}EJ^1<>161gz~*_>X%S~@8Z@E3SwuL2^NG`p z^IX&_NasNrI0*y^#wUTvH0otYw9~qzqObQ9A~6KNtI=ZOKqxKvQ(=Lj6w=XvX`rnm zMavBJivU1)2*_oxrWPfm&%YA+hdMT{@DHO~@kIn-F*y&psKkV%I7!!gz4le{J<6cd zIm#9O33<=a3J{!DR_V;^cwYwJqW4U8KXmHJSfnqX`jdF+Rb?C2Tnm4II-mffB*zKT zp2FGJTmxx@G$SV^0y9JxVB9N~%&{OG} zan3Kb%H)CpOpO1UL;1V#q((m(zXHopX^pdua1QPkn2&7)S+>3ie}(rY{8|X7MQygD z_H9B9;<>*OXuFf>Yl`NGvC$(^)6{e93_^||s~T<9rI+tqyEVfzgR_;a>F`~?NyqJZ2j zghjzc3hERrQ}9<5{51t6UcxNYSSA}1U||L)#&K=&H&p(2l%o^W0hcbobq*ZCh^kl# z4Ktv2{5MB*kyFd}l?Jbtxnhyv?$qtgW$vAIzSu`8#-Yi|} zN}u|BMvGX#kvk%_y>bVT@GAKGEQy1*$kHeWhcjm-7iGT8LLl5XaEG52&MK@Faa9nr z38hLFgV3Ttc@t!5z*{*vI4O^;U`qE_9uDe3fc8haSa c2{5;h3J5Brb{<@yZv~P7HVU*b`V;!zAF;1}(reR~qzT;b%#tD{C+!kDJTp6U=G^97 zCZ3(G7jyM}N?`JN%l(q22TZMEiP_0hq{ zG+q%i7@v`~ZEKsg=7o97s8@GDi&1Bpb4#xGWz?28>Q;96S{!XGi7UeErT&Jz=?URS z-R#Ip-w%ZAwK3?WejK@hw<%RNmybuW3iA=*n-zZ7Pu&fv5=_KE@xzam9qh;SWY@<~GBZbWQ zl}vom_ky-Lg^FN$31e~yO=@f#kJw{_8ACQSTnPP^@q`iAg{0pDH;(K25lDA4vlG?L z%uZy`Q8g@m!m_gKwgWFoT=$9b?S-a_fe42J4VlUX;_0rWEV zshY=s-Yn2asS~Tao)RR7grrhqgSp*(M<44&5<%pJ5+bO$ZWxPxKycM{*CF2gi{?Nb zCakj~nY3jPxbD=R#Gs;$6&(yBIw_)YjH)?j>#;Vvd!Ev$m)rv|R=i+PxJfF2gSTt=#;bx9TJD{m0Lw|L>z?)mBjOU z^td>IQAa!{PGYpI4XXmfdLb**Y50&d=UoB)yS)bgpHHJwq3>7t&CT1fYOiuX>;*EE zQL4`lk30M#U%u(3?NyoZD@yV!LBM|<_xTMe#hZHZrPVa;B^R4bPkridG-B0l_C%+- zw6u7xv3TM23rh`jFE6|KRv*T~@Ay*ij*3H`u1dZfcRGIC_X4hDFW!q1$8)v6(rCwF zGl<*$B=A-yHriYX;+1AdHa%^(v=uSsMS?G{zsPTcSB>FR!aaQUQ`t`Y zN;dd=9gdmsUT3(U@?7LTW{H;vk6cVlhwe#{7=l1V4M$oaHiHk-`;znDEnnH0BC@8IcQ3 zh{PUQOFol4a$_F~A$uuAldrzf2SP=wpnIA6`C&bBzZyXwvTbiR7eWt^-FhQZMO%p6jyX^@B9wY0OW(e*WjpSC>vr zcJm{*z3N5SYga%M7X7H#PjSLvVthQSu?OL+6HGtM;h0jghMZg203D*dqt%nsJ;Ah} z>enZqbTn*ty8QH%je_@%-_=&b;chui+LeWt!Kj%Q#qu&?IGt|xyogQZpIF`8u ze$-D%E7}{~UG=eNl0rdH`;W1D_S_jBN)L8Qf=8gmk)Kmds{F z(sr1^PkZpDJx_Td@4$D9`UJ;*CKu1}oVMr*xi^;8QZP!x7+iDBRWUtTH+KSwWwatTHjuY-Z|+`AB$- z9SYz(6i|1{XnX@EML;?aRXlp$wOe79ahHl zt*;NkwR><_L_41@JUWyvtQ{VjSYN_hrdQM++W4=gM;?$wO53J5aNKxoVZ~9J{kf4I zOOLOe*khI}u7+qL9blM1M|@fl+aQt zTVEjkI%D%3yz+76ZFoL{dmwd$JHwHMAoqrwjAQ)i(|LMuu+HI+dPr@#r~OmoGNq=9 zQ}Y5fFH-Y7n!!)5^^+7C1_ebPAz2x(;dhZ!=;+4v%&CQ(7M^`+%3-|3L!jX+o7gZJ z6(2+ce8(Byl_?*T0V6~3L_^!>;Lv-~2F{h>Q&>2%UBaCeDa;G0$YuwHbBzm)#Wq>> z_|fSuI+Twl0h%sS=ueimSc-BWHK~07X|6#d`Xy~ysL0quvgkru)Ov%(3zPH)JsDcN zdOl^R4++{o&;FyiPIZ=zG6ESj%86oE#rFqD!C^mC!(|D zT$VH;v*61W6_Q!;8vOT?07kR1m@)T>)pE$CcluE~bH>p}R{~QNK;=^4Cn<8i-sR7M zCHX0uqG8tTuT5q%dh;Kk|Ja2ZGruvP&X;Wl=-W9e5 zQ<=?78ALX_Cq$?wEm)*xi5e|C8&(x6sztJbPENBKcU-EXg3*q$puDIn$*d@3Wi?la z1UZU4^_jfJvWb^$W-2#P=Lv0T6qnVTwCP*akf^hoOcGQ8(5y>IV8;BcfB-5#Rk~Pu zjV6+XYSH}?K(d0S=2XlowBxYiQ~PPvssK7>Im~`q0X+IyMMiUqdd?ia*4PPVeuh4? ztts7pXWKPa{I*!0hpy)>r&jq__26LsS50WeRngD3a2+7PqN7$#tx2a!n-Z z_pf;MCBPiCH#jgMOyk(&!AShOD!7PF3Od#xWC;B*1Puk~IBP=CnmQ8rV*_6cTb6(# z+`{^y^@5HP=8bpiMRgrWa^obaXSRfUrOX3KlTLTCSp~=JE19@m<%znTtAOOLZqkZ^ zp1WW8#Q5Lv>U)YBgzkVhhnY|9?+3>w9PT?&>^2V75kh^G8r-1a7`d*9+c=WMuDV1G z-JIxq2Swu3LRQBKn%s5hqogHF>L5f3t#=IDG+#DxM&RAPGfU4x#jnjHbjm=h{?)#5 z-mlHzu7dE?7qTMKCM1Qq6S|7A%+c;`XUS}fm1oeswRGL1jWh`_FoUMSIqzb literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/convnext.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/convnext.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ae29defcb2988fed0ae0951e0d10ae55afe46e2 GIT binary patch literal 11379 zcmds7+ix7#d7smabs#JQMO~tmSWkC6{j-eT5>L%xXa<5+2xQk zv#T?+q{uD-rF0;=sM55^L)s(-83aY#haxY1E%KBL*)no%57|_0&2|H1GB=&u zZ6~NX%T#z>Hd}Ao(OOB5bFbQ=^Hx)|Jtxj3&l_$S#kr(ImyY??mtX(FjhjarjiAN) z^>>!ziPg62iE7Z4e$}(L9eJcCB6&GG> z2Aj*y*P>UwW^Kcs#3uJ@EUqf&au$BqWFx%HIV!n96^j)Fju%?E|5EZ&}Q(DM^Ujka6Pm2)?im&CaR%V~vIE;{F*9~siFMI-Zk)QD)~ZrU_WYt@Oi z94D|^O)MxaLVwYSgW&~v)FLG8T>rw2ch8M5;d`yBqyen?y4SROJLTMI*+EFdCNEu# z-;CA&kY<~R$IviTl1^Uriab_>Y5X+K_;+WZR~5(Dp_Ny6V1Vp zBcqJW_pPRMARLkHLf&dNtrhmG9SF#o1@hRq$QiJ5e8+Cd_Rw2_^IUD=K-0Q7gJroH zH6gEIr{MBUi zWfE=_F5bCFM~+{RybRa9)b9`8Y|pcr?Fa`s8goT9u@8|!89Ea0CEEsYlb3zA6LQx4Qan9EMCqg1s>UpG2xN3FvSbW~UEXg^WE zqvB1fV!y4Ly>q+yARUbn9kLl zCm$6KiXG#SD2}6ktV?U6la)83NwlIjQ-LlNdUi6x*f3-j)J}9V(e%N|j(Ug@$tK;` z0@y3Gn(3rP8m;{3R42{-=Y(=oiS&cnLwe7pQf4_e8?xDMg_0Jbq{=BgB-zPZu)_sO zaxa~*+K{%8k5QA;aVl)L92sY-$sw%9Y1;K_trcrQ5St|DOCm1z4GUYJxHzh;WFEv~ zgS3CFL;1#LB7c%Bcs$-$$lUp&B%>Us*r3Nc6!`HLOOiG;)*CX2vv1qd_8o{)oPyld z)?x#8K8#Zh+xKlrI~`}<6ga_dv>j*rx~Wu%jnMJx@z{{5t&9zs&p6XFX>q!1Y2s{O z+sh}3a`s1zdwgI=1B3go`+tn}KsDDz~=fr!mMy{D-e2$&Hz+p&6?2P*XMh zXC9h*Ud?N!n$e0{UPB)5^Qx(34C6sw)l~C=rWp@1DQca2h;|QmW@w-LPE5Hc3^m%n z-j5EHfqy<&&mzN)kt($JGrQWpvaZPKj=HYO=}0@!I~v4T+rYeThq0)iTYkiRF<<{Xe zw^g|a3OLotY2vo&q(zq|JV2ol&EPCr64f-FyZ?r2rAu|two4WD86LL<`dKO+~@EdcxjyypwNI!i;{A za?EYIOZ=Ot2}woaG@QX{&@&Ig$jL`CQp%7n44`tV)i_tJ`W&nvpRZPLw{5TcCS9$H zW)0N_`M!K2OWS6@k6a&P8}l@uVy!eweEmVp2?r6HZD@i<4poAy-PA60-=o=EQ0780yZ5Pf;HAkBd(WtD%V} zP=EHmftt^VCmX7G3gxHo8x5^qWDRsq%)g&Sj9e0*MQD~5&xi#)GvZl(dtQ|BHY=VJ z7o429=;Z2#SQ3}sPgTY^h|A*uJ|Aa#vS9xej8m^*ZC-qObIIx{;a(tTy}f;lEYUIN zk>anFYwQ^=J4rZ1zSGi$r3)c@XD_csQ7gQ>xM<5e?q<0u8;ka8xVW@*;bQs1rRSb~ z?&?a_vID`5SNcqRWr-rZ^TG+?lOZmnChQ1PK4}I2l6|$e>~O;o*0pxjMDz_4B8Nu) z_`sKF;0rHYcmV?^&LKi_x9);OT{eAo{Z>YNztMR#;3>3M%eAJzShs5q0+tQ97YJf% zYl}Vh7gxRJ>Y~r)EwO2}fhD7H$%m&ItwgTxe2 zOa>v3bCKvQcI!vS>TckzB$DcJXl*Vb=Jg!F2=IU-1jSureucuGm6e3Ml}B6tD;LTX zBs1Tm5k9SgH6KRO>TrDAjqsVVa}*EM*4)UcMQw=~>CL)@me|k*a;2ev+n#_o&17kT z=m$4|L~OVtMQYYPMz5Y>9q^O{>E zRum;|INQVV5V3GR3G4t_ctCBw0Z;EeOl!FhT41kh8<<;UIW}e=M%bc3ppa1LK^mhp zZejQV_|b23jMB&ihQsJ{nDX|y!B9g!Uk_dxaG5|WEI$K)Wh6|!G@t~L@+sJ*l?6W1 zqelF489PE7!!3HjH1$a$^Z^~3{4mEufq%~p?c+H_9FqJ09a$NTA^vv(kKv^rMK8LC z`%%Lo6t%8HlOWK2#|c}_02*PL{4LMjNg$>KEg2ERMCM0fjyK@R3<>^d5b=KxFd^Bb zAFv8|U(gl~PIwQ(h5AbH$uBdKkywVUHMe)Ze+STa{^Ek=JAlbpyO0}uTqVHF>^MP{ z6-6Hiy-8|=@kv6dxP>)OSPGD4+qdfO9Y+j-C&c;ic3%Ld!R#eN1K9!*$JrshW(V5| z6$&VaIB(dk>Z${G(W%zm!1fqtew2tbSUdo4*qe?7n~C0ZY*OPRV&U5@C~%^fG$E=( z@F-M%0=$iwS5nfj4b8yY9w7#qWu%USM;Pc@B8?Hu{ixuLih4Ja@6Y31!iD?pv&DFA zRJPs?X=#RDY`IwM5HqyGrhjZ&jt}sM7T6=KC$Scyxs^f|eBf9Ia6Db4u<~OXKJj&Qk)c2nk}-k+CRF?T#hCLiw*!@--yx-J~rCZT*3&eB1xt6HNpTSg(31JB{P)FQ9|L&tv480lCMz(I8ov;$YfAU)Tlg5 z)#s2j{&4zlum97|_dbt#5UfXo0^p7HFcM(HwXtDv(4ZKACS@7?B@#4I{Qj-iO*T=A zb%43@HgYuzt$IIVjBq3@w-Wvo3qnBPy4ooU^}ebq5m1Rp-_zt-KzGWzv8jY-zJwSm z0)((`?5RIMB=sZQcl{B+?Wl7ACW1NcWun(h0wCh1A{PY!LiBwJeF-{L4>E^<3(-5> z?Tu(`jzGF>M-?f~MQ&qe?Y*n)Rzo$m@%c9zQFv8-ErizopJ2W6!oRt2U=%boY+WxSCtqEqjl+0 z?W{7V>}fncw@zyIHN>OS!~;e>(H->!k9zu^a!2-PLX=| zTh8xHm5w#H=64oLgTN)Z3k$oqz=Ml~5V(qkiHUmQ?q`zOl+;p25<)9qpyWA9o~PtVN?xP{pC>4ctl^e`lqR^kgGa2cf0*UAah4t1 zsw?iW0L8j!$)qn+VWNF11Bg&=gUl!lfnH)DgAfw!|Vek>Y2zO}p` zyyv{h`^3iwl>9Lz7A0*YvFgStIQq^V+>t=w;}q+Fc;fg};1>|LSWwvBWOkBq%np&F z6Tw^ekH~~SL{gm1;idt15UTmmc$n7-1g6cWQqIWXCIW)+a7LTPZAIS5;f{juBl!J< zo zm0W4S3E-;@+#Iaya0jkMXn}i0osFfoOU^_}UdF|UN%t3kGx4Y(Fb4B^?l z`#137T{`Tu$k0v{vG+N3LN$?}0pPFQ&1gFl{gv+m@*}DaUMgSM^1_o}J+$bbftscT zC+~~=1zgM`huftCgD-UTDy;-r`W1$%Wps}eis8I zXM)#=Rx36fav*ie<~YXW=#cLmN~vU7crCt9eQ81B6gNA*8h?&gf>wiE&*Um3gZ02^ zz#Usg(;gVV-8t1?i_z0QB8zZV*=;43!ZS!BCBj1NX$M#eb}WazpEo8E_m+dNLt+#6l!(1+#8mG` zhlaZI-ocTfBibIxBQsU>n#k^#IrGhiW(rPcH=zQy1nbr4GXjA6z357wKbMY3t*ZhH6asgDQ?A(+)*KRl0p@Q^*nhNaQ=F$e&ht416MhF^pt3IOM6pj7kOj zMJ&X1xGWKzCJt-P^CwxXWEsL8U=UsC*urlVKBgKLGG2TJlDh~p-9Ql)qTZL*QJ^u| zso_rD8l1o1)sF@phEnjMkmI+a?JhVy#Oyl`5RYK!gEs1Vixdf=!oV>A4JV(peQW^; z>?ht~jcFSEeh7+@KY%<*f#iq`r@Tc;-$`0dRNE$1VwJn2l=9A1Faz3NFY1vW}cO7*(+!>N9?u9xt zONqn~Nh7D0(@`60c;KN-9{i?3Z#6~$CcwXM|Y zS8J;st*)ukFI7)*Sh|y`XV@>*&USM3Tqj@8^Lx5o=oITke$KRqI;DE4Gh83$_iVe| z8L5x(bFN+KjMhi-oHq;YvCgjgE<6{_q4w_1p88(@c^{tlX8>t&={lUCTGY@&&_Lq;#hZ+w#*DWqwY}Xd_m!aoTX2i!(jZ!CUsFm(QMi z@#0+#W#hHr^cr2KzudO&VvNYAc)I5}ZL4cFm;UWP7k_y3uiyRGQyARjhsPaW_6u+< zrLLMvT{BgTq-Lhfw3#upR@%(bINZ|kRD|!)EzQi#=igN3RkLIczfq`X%(6N1hEmVI zsic*94t-MbM$Z~zscSh=EsTFmE6SxsOP@3BR#Tq~tEbPkd(9=iZ(F9`>gs;Nntsk` zE(UK=HbotuX0L0vY{%An^SX1br$^ms>qghq9Td~*F83Y11rO6i&Q6bg=UZK)t@rv4 z0&AQX7ZYVbDVn{`a*wKm>?veIKRq+S8NX;5ORKM1FaCN>r>4xegzZpmR$eq#Ey4LM z8aDjfjwN)vwPsNslh-H1YS|W|Bj>KAQ%tJqK

3KT#`kR?mqAdy1c^(XDQyxoC8| zR@>IcTV1C*#dVPF3`3|#7SRSdhh!5eHJZF-a@iI7`=+<9FB|CeUN>I0Fq~G8S~xo! z&;MwrWrl^#&g$0|En$W2pxVsN4n#qX!$CW`MV+XRk4LScJkDD4M!$_ofT2AR1!1@w zEz{~cEoW69pX>G7=%~*vTF72dRD|ncxIZgUV3fgN-j_t*ib63>8)mScx5^(8^eBQI ztazR!y>=fR){Sx-EbF|{wqu1+s~gSv1%2FXHEF=l^t$t{1)bjX`JRYps<&wh))L(^ zgK0LA==f`kEv%}Jg|WnnYjqbmi{oZgB@>%bV;Q?q1EXu{MY(mcUG3NcieR)`Yc!L(eol!R$A@`#mRdgUtS(#AkDs1-d_uRD?deJD$jR#L zY*eVx#9ZH@*AU)X`QQa!#1_8zcvzC4-Sg7<89_U)r=6Y=Wjx%`yfoF%OQC9BA>1(T zXkv`w<*;7cM$>8z;Vt+KSshT2oWHV_kP7=-?x%cdct19l%M(KmM_$kN1{o zmj)b7?9h~gKJI<^o+B7x#&f-oL^At<0xmAMTMixQ*NqAN`rOvp1SvQDx|@7L4?l=e zsXcyuZkxu+_(yjr_o%L8l>;h?Qx0J^>TL=`sm zshOYaBK1N^nE zSa83fdWt292v0{_-Tw-NPGlb%Jm%Gu>73R&s{{jR`zwx)~?Ckz-(-aq{kno83)-9AF&J6^^Sq1*ARY z4kN8%W^ZMXu81&guVUtwH2b9&aID`@zoe4iL1le(L0KPjhNw1YmCFyT?{Z4!kUNUM zoLg{r9Z&$|sRo5_=iK7=)UT=QyWO#kVOKSWT8g`ye5h?#Kj)O)v9A$iA3<%9-miI) zYk(S>nmf81xGdp6N?^ZQnh^)lVNsXg)$v-*i*fR;!tsWdECJ4;L1czFMBazVJBw|x zzidZnxSEL)iDxNZi2HdQ*PgnB?aALKgKcWg0%`*#L`&Y*JG6<-L9%SEwComWGJS4U zKVGZmVmW_;a{eSa1G0q{E01twbxb@+(Fi;Xni*227=6{UXROy^#FRG@ZO{!`)#5RV z{V{TMa%h@)L;h||5xfk}+XYKJLjhT1dD-fkBF8VXCElpv%v-|WRyb3y$kY=*SNBL1 zoafj$jidH?BLhd9HxxUByi)v3@JjLB*(+k}_e=B&GG}a}UQs?8;F1QqOiZB4^RKi2PhYwczk+D7JR+g! z6ap?&Mzrc!C^{Am6g~N((SKDwEhugi-^7P_2Kv}5a5jlXOh-c~$`T#TLXj^XnI-sw z{x?T%Y4ZgZk4P+ncw}YGF>}`&*r)3`bGNw%qEg;G$T9Ys`w*jG?l%v>x5&ix5QgKB zmqm+bEXVi(gihM9j5%-`I1L%cBH&t2Ea@!L2PCb2VU@>0Z;`;%0rM!SC89-TW>fle z`iyl=Kkdu*0BV5vO{1%y>w?j3N^TSfr05bOcg9#Y=2{rMRYEUkW@hwf79D5Vo;rHe z5Z7BPwVqfwih3QLe0p-SHaYq9@uyGCHl*}+iMU^`9#h}lsQ%J(&T1OOP!d$!N;>pg z$oOPj*1pO)=}Rr5t>^WAx3yvk8I2f@6Bkf0X!Pmt2q z+pQ(W_q-`P6V%E6{!D~9X9F*i^--~)!xOByffW$qARaJyv>b~o6rexZkR;}R-qG#F zUcYUEhhLE$GT&>rd)H_PmaJ7U?+KK4M4nPCJDoAwZBpYvbb3w?g(c!+iR+=Zg5c>3 z^uA1#$Vk_v+$DPpGOS+>quB(ntYc=hEdvMP>}*WVnw?GZxJ04H#VCZshffD0>E&yZ zxRQPWcAT9Zs1%9iv$H<#i?gW&QI7z!g{Oi(eu*?quY;_+>f)Ird0K!mfP#~CPV z3lZ$dfQDj+BDN3{@3)NTh}^LeJY{XPMW>8?IW5tKBYN1^3E@b0by;&3U4rfjZ8-nV|Hs++r071O=X8$7&~&^BMD#RcariI)lQZzGeFp#SiFWj*v2IvqT>D z2B~4FGY0f9MxsySl5(OOlH-u52T2f#OpdU)oeDkw zupO)OLANkU6h6B?A1kz!_|cNB4g;|wgUy(f-t~Cp`&ce(XAC6o7nxWYO)?jCViD$w+FqRu&gyY zgpBSg^lJi4Hk8Q`3Bko2nT&GSfr+4rgy2m&Lpg~uw zpb?}Q*Fth6Y8kYB^t$cU`fV0ktl= z($>4(L<}w@Qpyo6PBs>PjuJcGM}k_Yx~mDx2Ko(h7%*O;;X#cPIX(&;X)`{?zAqIo zm@jkv7U1b1c%{}d>YbMCU6@e~>KyvWgJ!-v9Tgl+v>;M!H=cg|iQ^OclP4~3junTP z)2}~yVnUxh{?r!nB3y;cCiD}R6P1c+MXGyn1WgCyNU8`qA291;W-8NEtr+_>{X{Y& z9w&X;@1Sw&lWB4$f+D8ODUluq5#}QUz2|r4W2w~?@9k|mc@h~|#8O5Ry z48??g@=5y20Os=U-owWyQ(>336?=M#>C^LG&R>=gdU-n!Y!ouQNSlE`PC_}^E`$~e z-}dR%D{OyoO9jEIY=9}bO;!Xc*d-`eKsp|P;7!yi=uEOcAR3j3O{I}{i*O5W#(_N< z;%07WE2=o*44ITN^GAxAapAt5X0nuM)Dl@haBjmOO-r1A_JFcJ?36b~%$%EbD(>(N z^>%7w)Ex!`Q+7}S@+D&R<8LV}^23Jh4bUUi9UgJTgbt^DOWOh#yi=W5ftv#9q zE&trneo}H@x4ehFSF-8{MNqJiBn5QmzDoi89t4Jo9BqE}`;xmMU$@K2j42rTfcpgNx z{p~L>R;WD#t{7HQr)p7rP|p`Avm_(T9k6!f5v;KQEk5E?%&QR0ZQoc^$4bRgoX{7r zW3LtPj9tSpYiqlo?*gv1Kzc+O=$=~RI#(Zx*yWHLrp?DovzvnHrDS9!P#mb6R|v6# zILEF3tkLdU=Y;49Z#XehFk%7z7N4b9`8H~|4C^5;W%oPcHS)`V?C97`sd-ta*M?oB zI8VJH329lpR|KJsh7s8$Np0$0X_1tsB(Q|R9&SNAwY;HNgTbOdPBB|lRLaZPC7n_h-N)gK{j)aCdr2!VHJX%j3%n>_xvkM|HqAW6yaQ ziXP;0Q-NCP0qg|G4?BS-eesmKslZLYo#VYnqqs24fyM4k*sY+<%*ND9H~z$zouN`esPhSVxcX$Y|NJ+%sBo<3?Oh_3|~O`MRpg6FCx z8R#^DTx>*36e-jYczM_=bL1;DtVBDPp%fUykjY`eP(0=jg_O=4w4$PQA0$tV4{b=q z?sI>Uf{Hh!Vz43I1bgqQH?+YmNf?NVD*hIJqAH4cyxLSd8ivEl+Q)aQPqYt-S7dJM z^rLZK2qB+o{nQKdeb^LHYvdlH$*$qTd9p7e);yIg6Wbo9E9FS<@lp9h)@@4Pq)KPm;5Nj=HEo1#tcjdA6(?4C?6RhV%s>CXz zSre}68N4`(1I%(mlW2x<4f+ees(2W^k`|9*O+%}Lei~WV9Q2Zt!+O+RZ6i*$0jiz%uoGNru08(MdeG5O8I^c@NQ(+9t zhZI{dGzEBp-5)|yD~fm!&4nLBq7tSLV&k0W#%2W#y|_jWZTx~y2`WW=0S-=TK2Dy% zvzNv>W6QzL@QE-m;B1fgRca$`5-oR^u)c)4tF{)O&Y#`#@BuX4y(6b3DjFs zu?5Z9kF=*S0;A)V-%{&YX$Kfrb3x%% zff{TS7J*}U#w`{rw~;sZ+*VB}wn(XUE9>9TIA|VXMb}~TfrO&#LEKOY6)Q$6i=E@* zg(JREC9Y^lBh>Ix#&5j*;>#;AvS-O8X;H=pd%%O8CChj^Phz=+tizOkGDhOX1kfP$ zebRwbdMuk5R?)|QE*5Y~ABVgbW9AYtEU4qf`)B$>F^+_CTF3y`ABgFjLi9)&1e+E! zI0Bgtp=}ef7no`$F9S@0AR-!=DncIypK%NMvw+@Y*(p}(#79X|r^hrB-l&*}%Zl=$ zyh7_Gg71+j(FA0O*j_J9eWH}VSo@-`wSaZLVm0h`4_5__)h1)Z8Y1tlO!|HqS-Wu9 zk`x^YE(G0Qr;bnR51vnwb^Zuayl4axV;9pBY|q2(M3MO_+OVoSQgTe2=uF)B>Be~% znLddH7+cVissa7NOw5S^&W3ZT5ylvgco8m_`55HCiOe}M3AO}IGJe}yfqfIoB^n3G zIOAgob@-wIQr_ zA2u_Dpu{XiR1L3Wk^tSB^f)g%pWjz$mg3ax=Kk!yJ^EzQuQCah(G4 z+8%k%2DA;fa~9i*6JUuQ<}SnnDo`!5s4iCVJSe6Q&?9Y%GC^sw-Y^Lzjeyjb5?z{% zAYv>>rO`2#u%(g)k&hRYMPSa$aMkX&v_T+C0{%Y`$Rl7?9);=GJ=hA}%cgM8BWCI) z!B+8|wOtWz+1kj9<{ZsH7KJ{62kK;L&;^l3!=QklBB%v_;9gLbnOj*Bbs!jL*N(|s zaRvlky2>;?$(m@=8ju`Jji+_**8wDdO|}~ikW*BOLD|@hewKnrB#jIdA@7ht$QFr_ z?;UwLhLhHZxt{}_v_#brE@epi$H?N}sprTdwwwy!D2(XvmwhjtTRRl*tHIIR0W1?v z6&rB8bO$%7KtWv<{v8=AbbFNiHH3bXszerEK*^f+v8Y&BF?q9*4aGdQ=)K8NY+Q1#d^>YlxXYAV`*};}|(+njSj~ ze|~U_%|3>LJ_5HNf!kjOm2`+0f6?HU@a~7hwsMJv6}?|*E0YDt7D~t1YK|k@|1YqX z)s??M@XGvCdMkvA<2DId#qZ+?!EwuXAK3X8JS0qIfJwmaPWt?%OP9`zZ&HwtJ~{5T z=u_%dBlIa6NSe6lJp_*sFj7AE4;TRqS-A?>$b_vA8$m%bZ1h2+TZ~|%_znud<%tda zFr*W=@q`=FmPI-jD3zF)BmgGpSg#IAa_+n2!H7Ik@%#n(Z3iH?f^=04hxB7eRmOc~ zAJS=jc{e=ogL5CCj_7UwBn%XN3C@x*T^V1-bbo_~B&NfgDo#7v1}UEz-5rGb0~n8b zioUoSeNYj~u2vuwoDbF*ZA!mjK!bSc|45Ige-!xA zZxB*0q~A#otsRUF`yf0mZp-C*6**NR^A2-KK(Pa>&JvoX|rH_`bfNb@|p4J zNZ=o`yYEyE2}-{SlZJP?&m#2wFe$Jm-Rj2wopY-|>@I5#>3=WE*K>?B^T3%S%+7Kv zUvrGF#)0N*xFO}UvmXX&zUF)cH9z7}2@BC@EABRK`S=l!nlO62UmlgZ_UC|_uQ?wM zYrf|EUx3Z64dOy}^OuA}m2_y#Hjc)GSWiiZ61h6atx7tSDA7S`_1Fo-{}ud`%&Pcv z?DN2sU&l{GgEFRsZ^(^ZR%@&$KJ<01o4SVz9!?HaOg0 zpm>bOc4kOlLA-=fKnfm&oQTe0O*!3(hS45OP+X0S+wkA#L0v61w_1b-L+vwNR`9Y* z_>Wj{d9;RHBw%3niB8);C9Ht2vR7qZ&Ofn2UJjMtRv~+uvfChs&+Tm$uYZLoREDkN zVY?$6mqy=7|7>?fQ0(f%;!p4cYpB&_7#$0$pl?4VH3Ho(U-7NyJDuiAt%+CaDgKX# z>LY<)ZBDdsDOvs>L*))GVJv_Hmd3#X`cmJ(>R^zi|G#P2Faw4~HYu3|hFsNufFR!6gvxD6(tei^y!~#oA?WL}rJ3#?V;6 zh=3V0hA7+(F>uE)k4DYQKO^Iw0tixU@@I<)+|sSUet^DoW0(HYS|wF^Uim^LrFB{%deY;;To>lF}h~mG#R%#RyR}6bnUt=_nA(%o<(}5lk4W|`6#W>En0?YbW8Oy zNwGWS?s$E?Td7y%KHHh-R_j%X=Q=yOll95&⁣oRDDX)@||7X-SyqwJ@q|uU+C=Z z?yK*Uc(Jp;d!T+m;-${P?xFf2iH~&-caPMMAYOLIJH@V3cTD4hhFftbUNzhaZ})np zeiZjrcL(lw$o(v4HrR^8?h&LO@eZwL*3J6EZswvQp=V!x-r`+ap(7xt9-*nw}Zy_u_-waxd&vfTJ7j>4NZu;JHeRrkfg(dy}Lfa2e zPci5>&IEinZc2@(S!xz9!d;n#o^H0T%=LSGntSde=bpK6@s5S>z-{H?VtYP#vE6g~ z*PpxiREwXN{@Kxu|1=_=Q8!(qZn*d^{TYA+%=jA_Z*ham84jd0F=b9gDUmCu#*~l1%zH{c#RM+XQbb|KDMX%{PzSo&Q z*$e`&7qt65=fsOA(Z-t7l|FFh`^u5ZW=bUoze|gr0O`6^QcG{8+h7-M@#3VvBtuf>>XmB^eu{HquHWxWJAKTxey`cVt3NdFTv99E zw1bJ~G@XvuRK4b0$8#iq(08s*J6EQiYe{ANlylyE6NBYD$iU*s)2mk+eyfS{C+0i- zW-#)uk6iPVYP%RKUEH~P#nGAj^TS?Z^_MU61K7RJ!w)+rADMao<;zZc9A;*9WK4ph?tA(?M4$dqUPcC*u7Fac+DOUlH`a=S?DF10(KUA(NX-Vh zr97UPt~F&0Y&Ub0c?&`AR&kKqU>zmICC3Kq8j~k^c~VB%ao2XUH<_>E8Xyq4xUGNq zJ9pTw+h%BU65hFsA8s<34z-Ce>#r<>s1_Ox>Apr-O4edHmRe(Bakx}ah;uTudc81r zq20qI4>MR4VLqA_VR1NjLYre87KbAp<}dc=gKqQXFz@R~trXfMQ0fSpsT>AJ5rl_8ACm39WrbCSzsR@@oamraVezr;N2cVuf2dH|ORztg5kA8dz>&#Z>OVS^`?x zXRM74N|;HR!PtIdFeY!b2lhsGVBIRXHri5fGaKd}mP9_6^`dRaVI#%Bd@OC4Yo#9B z9>ozuUZxI8gJRXd+tKn3OMlfMyOCMLm)#oUmrAT0#Z1i9;_9Cc$(3dc!&@%JmqFdV zp5Ip>ubLOkd(B`)fiYbNr*Ztn=CVf$bb=F9&shKAr>C9SY3FQhq^fSyzY^Cr?R+F5 z+wx%h+NPZs7axA{`t(R;Y?~z4K+l_<6%Q@xuXJ3n%OS@+akAZ;pLQO3DWNWE52k^d zWPqt)+>wCj;Li83Fx7s%LZ+=h94D?d^%i##ITmB3TS_^gN--Sfd=z0kha z_O6G;m7Wh2<*j<^y~vO zFjn}B>QR=J;C(Ea2U@t^RPIx_@tIOCOkr7AcPqRs%Pik5WvXVuDw&g}CF%CvNi3O? zRkHRYwPf1n>h4VoC#tj+!z9#ew3$xCimHt=nN5#_X0FT+AuUd;?u8BTf@YIjuQ7wm| z2)yOfq22L%r}fPlxk2cASMdPhna$la;r3U+_ZuwRK!!Qg<@1Xstx5BQxq5h02W-k- z%U+sQyVx`av$xIJT9HJ$*X()?5a~ukV>iUhjmFiLW+!@*Z#3L~tI-hhrJhCMu{~D6hNY^b2z6B{0e4F!tY`q)@X5JF4Yp3vSDJ&7wKi~JC>9v{Dq*sb864ERz zlAIq9b%y!nNVv)^sQ${bU(2dP$e}*LfXq|zrHZ7dW((C31|N>4I$51!7MXNI3L|9$ zr2BkfHJWprc{yC=@2u`jCdQEAVi7_P7-72vXk2hYK3qgb*sk>x)>;m%(jMdn`6=Uy zZkl z#~H9QKtUlFfu+@ox>ugylb_@h^Jzu0swg9d=Ch&sVrY}ur532H&1doaQ@H#pf-9_o!$flp~hsw4Eh z=#HixNJz_l6*$K*$-HBtQaIM?gHfnsPOpD``_GGWB{|$SRU*{DXy$S*zK{AaH76X= zcKO307v;d|q^d*#fzElF+6dGkT@ZbK)X`9fjA#@@Z??6Rj44Ign9{Qd$jmyFk9B*J z_irA{%a>X11ZVe;AHOhrymt9=lKKAv-y<~=SPfQyaS|$gX|^?nzk&$G z^d)mpa?P7ygCe3^gp4bOstzEWfic?aCSWE+GW_R#9{=+JxJHm&x75iX2M&^jz?E~Y zn>JWx-p#D#dKs4@eIEI9$e#-eL2;0AZHVV!u~GtjWl(^?4$jhm$b@&A>Z8Hfpg1ts zEjI_~YBb7ZzfzOLYWdlkDI630^d&T3)e%608cibgNv2Vrpsde7Z;#r_Pd@uF!&i$;<0hBjwJSfc3f zwK(lnUXnSlYGa!DEg?m*{;nq=5e=2qy7g z;jLUQhg6H#$Y0^jLTL(lSNCpWH$$AUJ;L}R-U&e&s0Ua9SVII|q6~;BKprLpehBbD z961X5f;bL-7zT!Q6Q~1%Nh}ZpXJna!xFZt(Dg4{FvKz!05Uqzf^GGLR!JOML=Pe=; zi0(+cRTPDRMNBd%%v0RS&8{9Ck)EAJkc{i959&(jq=g}oO%t$JQayZ?1~FF|qeP_0 z%)=5!D`+pQ^jCbf%v1sx)nGullg2ifG*saVs#ZBSlPt4`yQEQ_v_>;I|!PlKRRSUszi;7`Sh{N@vh zT%;c*YSW4LKN;m(qe`XrQXTMh&#ErV&Um%B+6)eB%!J z#`)P6(Qo`?oR{zk0^~@Y3)R>rbuOqm#3ib9jU_1Wwov3|Qi|L}c?){41uNAT7}9&>Li2h#H;^_hdv2rHNgl(S%+m(rBW4HnO*}BihU?SS_`g zStT0E)Alv~gN?`vAU3R6^=U7(0Lf>`FvA>2RTf(#e05&q)vUz>xB3t(y#< z3`{8{pz`FkQm9sU^Iut0lagNO*aSL^&)+&3>8^T=Um5v3S?oOxWH= z#!GAf_tL8E%!uC)c}tl^;uletypPRsgwFj)0*>eBdksi?Fr#TBUnJ~!FE2w*p|GGI z0^ZMio*NCO(}PjAudXx4at7=!`@cXp%J0cg2|MoVYKuG-@{0Ftt-oYhlhcF+_LD9*Dc5B%?tp%?vr}t?g=juEg#%E^~b)%}!d&zJ{QF9t* z@ZwlVOFtQsuM4&oLnWfH5{+zguZ~52DZfhmemZ)(Th|^}GdI zk}@R`&WQKz?^t>J+tuQfxw%Ll8%e)5j ztJ+1GOzjFR$Xl2_8zL)RG#H1(4gdkBl$mXDK;s_~5UbsZ%l|ln*Gvd{#%l%&sP1WPKX`%S6mqH)`TpdS{XWQT|q+`vq^iMOfG zGvK(XzsKOG8GHc&rr8sW|E$Er!s7LYHWvya6k&?YJ*>dQ52aNj;vhfHj-SGVGNfdh z4Qpj%K{egbYAr)f91=g| zk`_s0UuTPdo`E#*ONa@?no z51xA|E7u;%aZnq`+^c-6dPFhLJ@6-{XJ3C7Pw$041U?!92@HZP2dV=4A|;V>&q3WN$8wvON$s(z*kEnsGVp1jrXb^$n7=swu)!LcmR ziaSATzPHc?MG^(}3_aZhUW__LG_UpBa5tncBr%D8qb|J&P5zZsJG419YFcDk9cf$Y z3$|~fxcRi_+b!L!kqXkFZ4(rNob?VMrTBD4UXJvbOmHOUSGxMGux8BlMzH3H7uvh1 z^)Cf#y+c`6{VfES1h$@6uP{La*KtJS-vlBAM*>`b7m<`Dy)57r-c*eY%mWE{g^8cng26 zq#nc)cw0nGl}+!i1QSwoC91h9YN!a_s=tj7(h3kTL_>X=!8(FkAyF%ZC0W~APwOzp zPcfjfDI6c6PhlyNlDmFWyE}b`IoYY|?=T?WukFyd^_Q4%n1R5?PcSB!M@B|~k8pIt z6-E4&!a*)?1q^X$1Q7ua3(^?kR^{-~&oE$9)!%1u8$sl$D6sV(FwtiqjW~r^z0?>| z3iS{9NOUESGA3w+ZSE64P&-tuLf)5U$W~IJCK~+1u?F8x@ZbNco>a`aE&fwq z!P8nLED220sO|Y7YI_%>!Bg=6D^=A94IFBL-oxKHKptJ7;qYm5qs$2Atxx2g3SC1q6XtG2ex>k|TY#1(8IH|bjZI3E;i|6|%Wgp~Aa1E76^m^vx`xa>{^{Be? zns|2vzvK7ufBKy%3!~(WY>dNK(b&j0y^YcFoxCQ|4UOzx{mW}7d>6yss|x)-^^8A! zlWPhXX`}0Gs{-y5|7ez^twP|?A0QIhTF0rb3amjf$gEp_bx>RY=z+UUF|c04h7hDV zVyO_D85B^~x_J)l=c#M6h3+ZjeRM%W(Kgpt(#x4uEHf!YZGYH!;t{i%p+=mC_HCN2`$iB2ra_W z;~g9k0R8y#6Ei5|v)q)idMZH|G1(X*pTt_0luhf>wnUMiL%pJz(mJzOcuSobu+I|X z;)?4tM%XjrrQ2aD(N&n$``1MMMJycVH0c-Ag6(a~%U%y8lleV9E+v$`Hkn|`Fe`jr z8@V0kPBds=#cjQklq2m#J)pzl3gWAO#r(Mr<6!GH_rxI0St{Ufzm-6uC<@RlB9eDLB-O6x|13b464>Guh_!VBi@D@kD zEPZbWRuYdf?Qft?#Xf{^FwOWA2w?KILWmh1t1kyun;RW^( zS3aHjXh$z(?7Q%c-M_^*M>u%Ix((EeV@o1v&NahNR`kNdN8%%lC9WBP`jhV&RRXPMQiw3kzC{7HZQ>m@*)as-lHV5Riq<+ZCHR;IX1!u zH*_;-j@PSI8j+~^{skVqMMUo)B5J;)dcwee4r)FfuYA39#wuY$BXi$_>Z?fP&!0v; zX;j~ahnx0kT(Dew!Em#0mJB8=FhR=6zdcMcqi|70Be80T^K=QnfZ3{fLg=e%tJUHqebC1Dm?*aEc zH_>*;Di0#%Aza7jtHKUWxHTBA^X?;Z_ptk3+!fqu_aw~NMR&$Mh5wLgY@>uO4$QORmS9bJ)@9JQX`bIv>Nf1K1@maj!Eb zFEFJqmcGXqgJ9Wz?9{2IdbxdVrmq%GHRt?OXU;tGzL`fJ{lNR)f9>=WmmB(2lMi8= zbX#0kdd|G+ccp^3`~uvzR_10v-%oL)Rrkh^jyl|U)LQIglXmiEf>HjC!%%wT__T9e%(jg1^p@iyuO06+)q=;g#TEU3q225S z$A^^@nT{G@vA-$WhQ3H=vN;d_V%WcW1Bb1Q*hWv{D3HWY0LOwv!rLVPcbp7?4fp8q zWKWUoKD(y^VGlkF^ZcCj@hcGQqHT4W^9&(5_Kqy1T+-MBY|2m&eEZ78&75@&{bWbl zC;Gn`(d)42(y$QB;aV1=t5 zU?=G^^ekG1cTwMAG2$>2or4??N)|fyMczPx6gFqV6MR#*oJ=h>83?I5)@Y^H7tX9odufwi3|len@DPm7jZuy$@An+F((ADm z_)r|Qb%L9KYC|O!IE(J*kI0Eu0tzB0EIkgR-I=CtCr?kOHZbFiBk=oCmikxN z13g-Cr2ABy{P_;&3e5n!UIC`=Tn1Rj7FoJz>tIhDLn`BUD3RPLGuVp&vNgq+FsJd1 zK4!=Can=i+?b0>f5f>9y9GYFX8meIQ2`$&tp?HH@z-lE2}v4=X?7W zaWd5}P|?Hs3aIZ#2RmqqTQ{$m{*$Q19_&C#D4Tdl2USlfj}5R=z9@0MX&NU~Aw4hY zV?h~rrFG^XA5;)lVC^#+69c3-s)KQaJAz4_XKhk;CmF%cTT^Zc8xAHnGE{l3?FMw2g_AYm&|AHkUz-YcxqG0Rnu=6YyMsNV8r#bf3+CFcU|+DGZ@@LS zj`l;1@EjBbyK(JV+l%iV3l89We--Mo-8h?TU(XH>4#x1_eS_U7Gw$x#Yg{qZZ@ZH{ z)7`nYf3VA)!q@E??2Rm#`v*I*Nqo;|c;3+jwvz99y)@VdrP+btkhJW8sM`*ruD!Po z4-Sf-+Rniqo|1(3?Huf4>e3NW!`(vdI0sbfG8)dHg4P~c+YKuy)Eqx;s$y^ywo+rs zVOy~wV+U$DgccvW^#I2H5L6)Vad!`PbEF1`HY_Oiz9m-Ey|b&?rzkhA?xUY6`GWYD z%DIOkIQh;KoM2knB$})Lgxl2z&aQy(bR`QMNjx0X*c64lwsujnakP6HvrFj_wFaiE zXmY4h5Qn5#6^Q9-Y#1q3AYATkNN-(O9DkKnQ6rO5DTt!w*O2yyLzRXBmB!uGV_L9{ zkAB-q>!&wY7g(Vz!gOozxZD>{U`&6 z!Ot)_$>6IDev8597|_v8U1jhng4%=0UJ=z{Y8L?rRsT8;0?dHHh6PHpjn#fX9G4i< z=+5hFmEmx3G@;DkG>))D0ZyM-5ppkg%=5x2-fRZW;rIyZz|n$Wv<>ocynm&uKEwtU zIyh-yx!Lm67;ne4bS4s)(n?LuE5o9Pm&_xEGkeXbBXF4^_vW+iwetZWDLt> zON?x#4GSNNc3Olb$R5$jTD8NX)WSh%=nEWTKx3A6dz8U)SnIII!^)_fq5c~mOTTmQ z>LqEaKs)uveDo&_{)mB$&?&^i3e2|L@zLxwIIY96pdTPEObN#|E-M`SrJF$|sf@zE zVGYA+5Eh58ktqOW>o9=_mjU5npxD(iQd{{lewCB^^Qg7FqXdmz75nUM?8PZyb18P= z+|9x4PrJI_Hp~MNOPL(+?pk)DPP33Vhuu0ndZB8S?^=3$iRf%&Z7u(6|493r)|mZ` z3if%FtqL@k1^7tH{tJ}L-7RFW)%Dw}`mo!fYQ4o}9sfAgRMzsp|6v3Xf5H(A*shJE z6*jdO=N~5uwfb9JwsuusxvU(ClvR^EZ)mFB&dqUC~RD+FSO#^-AF_ z+gu&f#6{jbLf81gu>5OJid%@_O_#0P1#_$ zdqBd`5xdr~SqaRT>;Su=qOzHLN={z|fnUm_#2%Dj8RLqno&$R%d$xoyphC@7;aM&lUa&NFkoK&{Y_=5RS7;!O$9vkhieS4q$`i+L;wV^5 z^X$@?SR_7?z?4YT)uzoJ99I6Ybw4%TJPYei{*287%8j5;r+B_Pdy6 zxncY}rLkfB8+MgOE9SD1%lb2W_sISKun(s1?LV;|Op{N>dBRPSR{66^vrw#7728vL zFZOrhV0B@IVGid3frV@4NWu{@Svm1?rq|P)aJRn2frdC~JK4DxW+g>!u-q*K^+LDX zx;E27xg68DCh8M$+RU74x32hd@=ADMQ|b(8S**mDou+bxkmm7~E)>sjN%3Y#E<38s zx`o##PjpLY=&BzdX4egNI}IHHRZ#uFZ(LV2bt*-AT(2bu0PE`gnc=Cwi1}V4zEzaq z!kwx7%3FYfTisBUi}VJBDaJm*z+-TO!KWDf5`$l6K==_(=(5;%sab@KO=^-0#Uil^ x3yC@BB{ literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/deit.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/deit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1cbb9dadc88dc60cae76aafb6181c6be1863ed6c GIT binary patch literal 5007 zcma)AO>Y~=8QxhgKlKsCc3eA1fPsrvq9Id}8^nQ;xUKCr4d5sSBs2iqVzt~Ek}K^8 zJv)>vl>!RLMNa9dhaP&6fL?OxslTB=Vv$SRlm9~6CVk#nid@RF(B{|5;?WZAN`NEJaS+G5u0ljw}v)u`64F-ulX>ZjI6mZ*s8GfPxu?YMNz+6}=rt!C{xC|es%mMv&CjD0swROE#& z9#lU0^x@AQZ9ZpMfZ^-E4ALNpx0DyBT`ZEyr>(`y84uE*qa&?06IR<6tX&dz+Y$4h zTU|ziJ*&6NxiIaDc~%qkXSH_K^c!Lh{o3c2W3}rLa3Op1VUX$|45fHM0`H%&-(;HS z^SmHptI^=Ne###oY$eL?^B@|8GLo_8NtfT{+gKaKLEPiQ)7~xF?FN28vq9wbB-c}A zQ~pCkO60d<;xu}mGmEas%JVv2Ktr`;sbhW2Km|9=^Nv)sx!^G_V?PmiobR}pT3>qNrNqo658{Gjgx zOyPa0kp~j7*&CX~f@Cy~l2p%-@{=e+hFrkAsMPJ-?@o<7@U-85sXAts8)f06eDspw zDRNkUZ^|HuC(bGcB;tvJDHrw3n=M|X@}nt{WT@RBMmorpuWZUELj<0L>GJ^pbg0SL zCKdPl0n$9xz?O3%R3v?CeU(QNFl+1>dSqnUGUX^#GH{ir;pJ|ac$7zvRnqY~K^W)* z65Wfr90MKv(UJU6sziz@336_c={wUYLmlSqpJS2g1hxPc`&`I@?sJl0)+z-mPj>g* zPT;{Log@h%;b(n`xKI?io)Fy3A!9fe!c^atV?|lF)UaR-IWinqc`)AI zzTtXF z<-b_vpIyYz+v~aKnf5}YP{?tNDP)_tK#1>PcM&7y?N78qe?oL@l_2)Rp#X3cwIX|; zV^#1*n)_&~=A+!GGdqgua?aON^8=uy8YCgo*u`-(iLFgQ2XcpuU06aV5dMQNB*;ne--l zvsgR>4SAX20g8*|g4qd$XSK>}i2sW<3Q0ZHWR)j_90!oCA7d);!ZQr=rwBEb{5%qE zB=H#Kp4+IWzpk4CjcPET#%C85-Bq;L(9-vD)0So<>)770NA`#bChR}kqtaj49~iTA z>BQ0HlZq&b(#YDWo|aCr)~yK(rM9!N)wDB*O3`eQQpue_gd#4@dR9g~AeE|Ml+|3E zdeGR->^RQqk3Ho@5&@N!;eL=P(~*;A4dXsj^<@o$j2v{DPUfUC?5aA^0@GfaRd!K6 z_0lg{R(HW0_~W`?T3^4rrjkUjMbSV>I4p=~x(LcJUF&%MZU+WS*T_n(!GW3w-G^xD z5^fDJ~2iq@H4GiD|+YiJLMScJZ2BA-z#f#;9^kuZ5KjMX!C&zr~gNo@FJJ=@cIX z1KARl;_@lnk-IZ(UPW+FDVfdXDQ(=I)h8j8RZxBc*c4uLEk)6qm3&WUOF2-u02Oz< zXWCuLX<#hzyDzWM=m8x!?XqgUo&N1Cx9D;uur z8dl}E>$JWKy7Y?h2?psZZVk4`mP$+P3ac;&KZGcLwSPA%j(v&M*fqPs8tB=-tJ#M) zUJS!R#&J;nJVmEJAGSMFs|mAM;}c#O>34n zGKZAO8jr^)BSr7GSN{6ZKMw!+V|@N7rCKy=xvB>K<_-Am%b?eX#~b)K5~E5-<)78g z4uZ^~V@DBMo9Jk=+&t&3@*(CEw3I#uEwXugkzHcWw};EGuuPFT$PzL&8wAOCO_PA+ zhD*tTS5@@qUH8e*3&%56*A|CH+O_)pTURRPaA?lu8D29-K`V~c0v5L4jPQoo3vjLxh=i@&dgd50R?03< zLB6GA52uAXP!zt|r9Jcc$t|4Niaw8JujT8I_qoD`e?jO;^kuaV@)h?ec5=ztJpJr> JRw-R&{{vokrl$Y^ literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/deit3.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/deit3.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e479f1ae966dd502d85f12aaaa8ebcedba208d1f GIT binary patch literal 12958 zcmcIrU2GiJb)K1>-JSj6a!IZzN|xg>ZHZikqJA8wf#MjpWjR$L(@G>e=_0{!xObL2 zd>NRcLKqJHPz zncW?VbeyzZV(#3z|3BxR^PO`pds90|Pj#m6gn{O=%O3R69$ zqc&8P;+m&5GUBZ_bi6ZOwv%h*I{8Lkt?`SrIRIbDdhFhIo;cyc3=I#(d{w<78){vCuiyIMq4bIE}nA zoARFOoN1gHAfb564w~)joehW8Gg0w1+jrjVv4Q7AMftwz`ax8by#%i9q0T&W`rK*SfrCE! zu657mUAMc59Ie|$n0xcB*RS8a{YaxBW~1Uw>il-oMx(QD4!po^J2rbY2%IjBEvm{f zb$h%c#~D?*j? zF1y{JepU3-a_nHhslSfXr6pT6Z`&QmwEa=@)W!OfJF~uu0kv%Gn19v0^5REVM-^I* z!!RdoJhPf@yW4fVRG)2^F-&LFrP*uIg#2*})aAJG%a^WvWSrL>bgWiWHn?K;`a~rl zJUJa?ZaAD8Yz;i`!G&fIy*ELMrl4`(bi1QrQYWI@q$^Bg+zpP0)og8=%gk*CPxLcB zJSMtmxtn4WkDbI-Otu*;2M_wrCzd|#+Y( z1DccFH672vMg?Zj16hpPpsDOSyoY(ZU7FNrZ_G=pmnJ5~nB`*a18Q)2qu29NGy6>2 z3EEgbG;T9yBA)7DR5M;xK`rq%T{YM5@PV^F?knaZ^3a;gy^XD_>(|6uy^gh5k2fq` zYT!F8)#bf52nAyS1EJxVe!Itm<{%h%gDSEM%O@TpXNATm=7J(}`{N>qXdpR{qc!ts z0cbYkUfix5lY^K?r_f*#cQo#Bifz2p<3JfjMiOlYxzD`sZ<=t-IvzyNM z5*E1DOc)@gRPYq=l<@ffiXc$-m7y9I4pdbMGfdsi`m?_VSs7-&3JLjj$eQ0!Lp4+^ zEz}TZ!VE$^)DdQbT&V9;UVc;AD~0M_IWW+MzM$-@uPdLg?M;QHVIe36rLY`o`>4N_ z56Z)-FngfxXGD+MUit0?QL2RY)qPE*QmwcgeFw#`^d$xTRKgtpd@vp6_SLQ)RJY2* znFC6n4f6=+LWB2$8d{+-onXps1+B;u;;Wur6Kl46F5xX8vw_EO!pv^Z^c)`)0X_!y zU~cvTGg4Ri9M%ZTJnIko4v!4W5)@)Z#W9$vJhIE zPL#9zeW%MfZ41v(K${cgUzgH3%1MsK3ED*2pvRl-$hZ+7?r17?u=q);Pn#U&HtkNw zzEZD5y6<=`ewK31QLse8G6nM#T%v&LMbn4QSY#x}Ax8 zDp*$2vwh#P-cx@3;zi!;1s6LVz=MF>ZZ{R}F-VDv8+LPh1E<2j$Q(C#VYUB&KaZwY z@%XPGD9@BtO)WmoX*retimI;ZYE{*dT2>8J`!S!7|oyqQO5${EuM3sK7lzki?bm6L%tB z2#D4nKoUT113J_{q^*;Xuy)$T$U!@`< z_&BBIV?q8oeE38_#avOWJ62V#sJqqCdd3Co>RP=(2ZETAg#%$(9RV2;FI(1K@UA!~ zZ&|F@v@A}HDAGwnip}P)(DzhKI!(v(P-Z-=k0F=S*!T$qbPjzYT0;R1oYpk#_|G(5 z|B05@h*Iz*f7KsWmTRi0bvzingQh0IAfO=qfqO9>@QwhXig%vg%!mPCffe5^G_nE= z0wTU^G;#zI-&H1n-*j|FEY$=gz20qt>bbye_CJEeZgc=)Nxb_e&VUK@NBL5Akzf}M zd=QX$Oef|!L0re=j-lfJDWDwDGx0sb_9HBK%jUM{Ii6`JXx|k1qZ&u?&q?t51ds#` z3P?9p^L=m3c%p!V5%5Ljas1dW48iAMRXnkZRxNQww3#mMh%l#D+na8JJdfN zYZ?}vUsDW?Mm=7F_*ma_+|71y3??QPFwvEmW#10khq}5Q^nkzpz-^jP%7V7&?$`k* z!f!%5K+^le>14r=1ULdmO+f0q+dI~VYh(Wjd15P^uDt=sCeo-S6d*n(pjU#DjsRna z#L=j^Ul8&ga}3h!lZJsQr65MIzo050=Ci&&p$~lWI_U%J6VO>GyCdZT{lvhMDhW|R z1xDa9ro$A)Cdv~LN~*J)Yw4~MNq&T$b0Y;|6FrB+;}*S55<%+!Y}5GWzHOrVD)$k_}bMu9R_V8d*v z2YKMq9Lqe^LL)3N{UM<~BUKt`6K!OG2PaDxQ93J1mk1Tsb3oMmBNzdcbevEaKiYLo zj=CVv81%7vUSOuEFyd!X0jF1<^{CM2y)B{72>=xt3DNTx&~99J0&k6>mOx&jP?1fw;{tdso6a73V2(90Sk3B3%p11Ld6Aw&H@fu^yrrv}+j4RTxgeT_el zw=pb)>Q+%`5BS=GcCrN;W7A{;*|!s^uZ0~=-=fz}0Xk94XLP2ab7T!&kOt_xn*v7(%80}6pS%~qrn zBLu-oP}VuT$PIM$O*{gI0n7sMR@7Nl`-1+{-TM13nI=ihhuST2&c^#XqtRrqTZ(1`i#6wpDC$AlJ0fJgoY#g2lEbf&iP_-7CZ z$Y>^HszH5#0-;uZP+qF$#p!FJQ0g}mfBD}>WD+L2ilYy+1Qa(lKt;ervKoF(KhXXX za1l9xh{!1+2S(mh4$QpBsUjx}>)~V_n0ApNCt_&*E{~H0Idaf zfvw_Q6jsX;cKTwJ6+2|V2OU(z%tTX5Y>nov2X{yZHQfZnlEn&UAu4X(xN+mE`Dtgz zfo4nTgr(kb?+BUoQacFx{?&^YZGPX~S?%%7i+$F*c;(8akE~w$mCKhuwu;Y}*R2m= z9ED}{3B&JIS?>r;OVAD~J%lT))g}xnke1tIZxV1XwmOZh%*jMkMpc_;yzB?u426jg zvRW<=Ww-%Mk1(#Tnm1Y|8KF=PhOCv8q;9r*J!G|buVXIxkm}yjin+7_OFO+g4s!<` zih0npoh6t6nYq*+Aet7!aStS&S4=k`Lm62x+V&3cmE-m9ncL0-Sc6l2Uog*~AKNR= zpP$6mBkL@5k(Mb+krgi8D>Xo3P?{pr4UDm4Ft);{b(%xyHfc>L0mn5aH3%p}W5gVD z-zSH5l>U%z_pk=>gn`%;kU!=?=BTICGERamxpU*8xSdUlycJ?;K_tRU5hF+EeqY+A zVVi?4;C5^r4!>>pK{2nvgF+;8FKvQ_xv38esL38ci3Y5fHAS5jb8FyZ;~kg;aJoP) zgTzuy-<_Txq(k~7?xf-J>HU56LH?|wOt%v$m>n#5GhU$ z@WSO4v*W;#B-Rz?EG#lr8TTmg(=VfCj+-D}G}-9~(C++SNV`jL=uz4dnU&H?*v~y) zlVeTmNC{As8pZ#s^3rnQ`LU?O<7Qb75KTToj zSrU{b4@5zP?C?d?9}p*Vm;pOPEe$nG{?3 z?zzz7#c?;6Ut2M+)zcJo0m|MY^^4JunwgkUVk{KYPh3HsQqgVN;b)qMyIL`Sb;bP5 zQ61Vlo7Uk@H$8Z+VAD2T2mijdF* zzJYlMPzp}5UN<#QAt7i@c2F0Eq;o$##l;P`Il!|x9--Lcv4eKRW&~Q9kUa!hQjC>+ zsGUCCpJ_veKrqx^T4qq8blZ*%kukX?q9!Jso|QD+exR;}b~dv1q#Uw0{#PcLNcG{M zB1q|eft-V7C@qgj>vBoygZ2L`;!$x7;P}W~z&9u>C*h~7Ku>&Rh>8d!bpxs@7@A8e zxT~PqNcEzuK&bp_6nnHl0U0b@1!1JNBb_j-OM3ewQ>53Rq0LdT?@)k-lt+*86K113 z{06@HXAva!myFZ*qr&KXMfsQ{)|#^@mHbX2LS8az&i^(>JvI%5GeYUlz#O2%7_btm z!|VZRw=fSt!_DujJBoi6E>5Jt#i=E!Usb*iXC|DTC=;ru$gOB}PqpC${f4?X8&G=% zwD<9F7B%2v+*bY9(25?;qNX--lNzXRu&k(G6!DyhmjZHul6h+?D8PM6&d?&u!&H@F z#>3p+TsSwZgwtZ|(`?>V!nwatzeMG$C|TW_K@4uEtyxx}_V6yAQfkUxO_<8&c2xc_ zsIgL5!ko&;pBv2wrZnqBSmu8o%nwfn3&T^m-$ElR9>9Txywlz3u*9aeGyJa^&H6Me zAbbiA)*7pbc~$UzW?Vb0P1JrG7K0Dr`x!Cv`7IL$y6K11?z334Y_Nzm`=>CwsqSUP z_jCK2|Ib*rYVdrR-KQOxkxu5|EPcb6bMAB6-if{WaGn*m75)RXT?*$M}@cEzGL0_^;_3(N5yw+Z{S?#yocKi$^OV22L86n{wkWES z@Nn)!9!l)$N7V_@FVF!@;-iatixzh_ab*x@P~3H57W61!2okauE(KUHg=|N&6z$vi z9M$71)UPyC3*!uBMH%$O!Q++KLhoQKK$|ddmnQZNw+{P};bCw4xctZ z1=$*;e^eYp5b1-c;NA&c)q;vc2NRz3ap4lJyZChcNOhwu$lSRfWmuo1+^D6)CS>v^ zJcm>1{}m$D*`j9PrUWdok1Lv?Eo9_PiLzE!VYAPWVU+Gw&|mJyMT72C)YM`IcP>{o2?UTtB zy*$=XBkt>mmEviYZg|Y1)!mB3rXyDX?RfH|-K6Il9{<}2V9tjGKc(Q(MQpK#)S&Ab zYCw{eq=HUYmA10N=%1DL?I9#LE^!c_&ogD9@)s#KG;oO{!!nQzdQbqT*F&`>th;0Z z$JM4RE-)!uWg!oi&`)kJ4_N{XfR=K>l+2@RPum)Q8($R)htqLiggUA_3U7wlA*A?l zmK;9c_#aU~Ts6wU>;#=ip5+WIpZrfKPu$?icutoKy1hkdg0$&~L=}sC7It9aWWiR2 z`$lxz7L1U56~fFPsjrFdqT&do`H_y|KK}-_0CG?D0bRzyPG=$Y)uD2r z02K*W4^|wAY)jpvz0kI>Yd|||WX1O~9eoeizr(yhiAHD)bHH>t;1VDQDvMa|>$tFn zYiRkv2nxd@5Kf*X{60|twOHCJ?-f7?q8H!-lIIn)Zal;|!YoirBGn6Pa=%DmVo!}X z-@Mt|+;sRZZ05qCCUo_Ljm8E5p~Ua%Mh0;>eK>dQ$`MrahiL7a zL^YQXF~CZ6&;_v+x|BpLiI|RtYhbwi1>PhX3sF?%C;ETxF2oGw2{BVIAM0Sk{Y)6> z#ft8uPk9W9oAOsEpaC84bZR2^AMp6Ug8)t^O?z3#ASJg0ssA^)-(%f4l=5Ht^r4jh z(B}`Od|!V$ouYqNKPx$R(t7$1lssIOe>Zy7r^S`{`m<3Eb}J$WVysaXf(4iEXuk!A zXc*$IYqQ&xcMu6$Hfl-Q>IQe4+x{vUK;=cmrw(ULsLB$eR7P}249(+oQl(g?O90V0 z8yLm&#pN)GL*~Xe2UjJG;1772>SSe6x}GMT<5h|Y@)Tfcc1Qd-fHgT5fyU?tnH2xi zBVdA2;U65t;!JQWL4ibkk@1o&@iHA&-$Veq6M~c|u!aYJ+8I36O!c$5PtB|U14P2w A!~g&Q literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/densenet.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/densenet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23ff7aaecfa4d60f5501a326d45c4e746ec47841 GIT binary patch literal 9205 zcmbVS+i%=fdgpC8oU2B%EMMa=cCyy&IwQ-;I+r@$TpVSS+DaNb+hVpUMGZM4N;Bm6 zkRwY|s&!XEZJ=<_#R5fv-38?BlmCZ4xBoz&h&~kPL!q~QDVk(=Bv9 zw%XC^8rS7^rk>$`dM8`Y4%&=PR*__>ldIc)I2E*4Xym?JKjX+ZrRwY>~qUKru2-}vy_yVq|X$^eYl;@fuU zzHK+Q)&t*d&}@^JHh2mD0u@)P%Z^l69J#K#nxi=C0~t?^o=(Qmo7z38DHA@_#UA(-Qhe*hdy*#AON48ZzvlX(d!10|&vE_jcF@=|8L67sj(Kz`@rLb6ib#C zWj6I#j@4M12QT4Am}T%K8yRtCA0w?CkKrDpad{rcZLtC>q$4vAbo4Jo#aQQ&r8tZC zvLgXQeO1u1RvxKIVapZ1+qOcl?z*AR(yAY6VipfEha$;#ZO8HamNP(UedtFxul99x)EYJ5M%Mv6MNrtw3IZ=E+(ccOD?Mn_A2hU|eJDI59l(f&Kv>lIT zJFuf9=ZK(d2|IF=O2_R4V%Kt;O|Rj(epEFQEp*$>q~Q4$*wDwq!=%s>!JTL$9g|)6 zQ&bG*xkj^A#Zk(2KggNc@0? za&dtMmL_^M{be+T!~;cOUzWAUn$kZ$f!|c;DI|%(2x&(0Br36dP5RUTg36{ONEt%) z+*TX~EUT{GT7i}*2`C*8%d}d;cB*QMiM*5O5w;}|PcC)V_IItRF|T1BUaq0&v^BKc z(-X`6#(-}unU+m=cm@S)8c6DASau|B%CXYIKK(}iSo)kKO z4j}puf#)L`+ErY_2Rts~@knOmCg`4Rw1W_Pv_hkaaNwlN6H9N{?Kb2B8`ljyKeEP@ zuU&!uvfJU++5iM}w$kV(`El<7Eyc@#D~L$x#II6~cm3qk!M}q}A+c;xoy+uJ_&+Ua zimX%Y{#O7LffpjE#xVPV^fM^FO+}nxmItegGm*BLInWjnbyVtT&$!TYxaTQ4P%yNSW71L~0sh!=W(7}>CniPna_W4$9T6EWdPKml%gQBw3m zD{Kg_8(~zEdv~Ydq8j+(3iTsa5YJMWJ5Pt;g-KNo-qtPpO-5+?lmQQ7y%2PULn!#|K~=qn~;TQ z7Y1$uy9WIVGySR?nfxBsL;+KXA{AvSW>Bm&awvxv)#)aM5Ym=E=^Y%vkGUt`Q|_tv zw0oI*dK1h}nkLG?Yk;cU2gAdr>ul4ZX^0nx7pVtY0hwyHPA%w}PiOb!NV*TF3`lKkT2v7#ooM6_SZY(CJ?7pbu}PE-8lcWqx2gQwyqnsXy+YXUZ~FN49dT z&&H6dSwU)5d`JcHwIGv{QHxeJVbn;*Y7gP6h7(!4j)f@aA*D z1mu}$r`B+X6%;JU*?}N`Q8~){NR*wrMgmOQ@w)`Lsjy{6G(a%`3#|^;&L%U=PPf1b zp-0NLe`+MT<3^K}-ZzA}LEARGs7_L#;tn@aOp7mUj_SyfhK(c_UpXN?L_o-`Dg;=A z@DAU#NZ-uD@kcNGu;d4s(YGwV84mq&4-{%;Vb8B78acS^#RIQMa~e;>G5PBm=UIHs zG56-dz0W1akXh}2&PvkorIyc|xZW~9+Py_yzv*?lZMTDviGy)7XqvB^YuvlNYa$+l zb5A}DYTH4(M`B>NO&G5`f!GQe)|HJY>V~f`E!kqn+pYzowbXT*OUsupzFNC@`L)+x zs^RtOnw8EB9qTncKx~SjV@4aUxfZbB1hB&G2Gby0UbN9$uR;GWwY^?{qX(_L{L<18 z8-hyc*uG<~b$2jcBR9-3B${Z2uQPa_ zlc=`{f6a_da$Tygo40@s12`X=Ha>e1hzBpcX1?DvDPZ0<=6PJ)0}DD_}1h*ubqdb6_r}pqXaJ?uIq< z$F6X(w@p`s+}93yHR)6)1@HuzSOKvz@l|aK%!{T2XLlns7s(a)DHio9R<+veth<5+ zf@=_vf}?niA<@jGC68D7Bs?~C!>qs%P1?LPh&4x_06mP{&M=!ZO{{Bf)9$q+AQPD9 zmoF`!A5SxpbQppkuh;VtC`0b-mOGlMhSiRty)=e2NKy=;e4J7dGY-~hx;tI6Pl0cC zZ2@OHN{O<(vphvaQuzb`e=^nn&&Apg$1^>d`Y{*RVTZ74$lNW_ zb1#^P8$!@)80>=oYvzp}I0QK(2rLJ0Q^XpjkuI3)z%+!=xMCXB6@#X@SrTEIlS6C@t94N&xh+!7WT%6D**tVwEVgWZ1S0S(zV{ROhhMtq;Sz99r ziDkF0@O&%}JF+;; z3PXXWn&v~!A*Gh#&W48_LSWo-!>3R8;_?M-AQ*?$2hw!dEgCW&0W;Hgk?CVVLovFU zzCn!EU78L(%tfR`QW-Lv-j3^3$1t8m^5O+EU0ymhMf*+Rx_#2DND7T5abo4E&S|z% z!e1~0j^Ax&RAfUvyY2N;6Fjl!=@Jf2cpO#jP{-r2NCkP!hvdHYp>J=K#* z>dFt57(9O$UqzD&XIM%1hue7OcOX2AcaB>3I7;pRXFX4Ut8s`t3{6WBaLfVu4Q#?w?s5dqwNBbvvuQ+PC z92e8LcuNl70HjP@{H643#87}BJ343B;bc2mt|3|E7+lLm`PkqLIpxZw&9q(u++0+~ zOjevjWR~Oi86>29z|KXLILB+ur)!L6=^c^V=|5NYN_*wF>=d>n@j2j?;_@*m&c&rG zT%N#5%vL7+98ijJdAP#c0zD*aE=Ib_Iugwv9F68T7h;MBb7N2PfgT;BEdFK=0mU4x zezSmn$Kg+w*~e5LY8Zb4<4-=sSL@WMcA7@*%|y@OS>ly`v`0xVBQAlmmFP@diPKbA z7Bd()m(ZR02gcqk`kXp=HlC#j6g?_<&(2G)NP9CejW0QK$E7Xx?`4sR%y=ex?%*tX zW#*C6Bsk8|2io4;Wb59HUu66U6VP*r2lCo;fOi^m&2hB*uLzaS(J2WBJPUO6WDLcv z|GiIqd)xNf_Ilf8-|B)1hZhG(un0kb2m7Im{n@Gs{n|yM-?UlZ)~*siiY+QAW@8!a zpL?elMsQro6@$CtI>YYf9k{COQ<-T-gvZe6{)=ZP)ZSUM0|@5&E~W_w$6_=yG~flZ zg``kFhdR6t+o`c6_2=LBx1k*z@=iv|^AJA0BrGaO@K(f8s#0bF+F*1JBPgD#8sbms zBa8BPW-K82+-aS|%uC6IvxFfc}hX6!Iln$Bs7mcqtG`*O$GRJXikMpS0uc+iz9hn+BfucSY z)xdeoNm;o|2OXN42H$CB4FUL9bJ?=2|4+V%Y_C$rctsgx-2fr|>0cCcWx2xb1Q(Eh zU(q$ah#+43x~vtI{_HSn8%6F@c`o7;y5Au*eu4s0ja(NJ^>RzXi3z9nSs$H5&P&=v z-V1sS+08w+3KMViHM5PJBr;++C1o8^jIxv^LpJOcfQCr?H6AGw>`?746@NnoZxrza z&c%9A6%dMZ)c!IxY2eFrvnJA1yaVsy^ABY#eGV1MjF7ZWF%3ne2c5tQUdkwhq7Lc=hl!#rAXf;)a_Q`R6orx$-m zV=_GENuqp)Pj3=YNDmR?5G&G8myO4UqI|9Wu77OI7E=OuYl=H)VWcVO4o{;Pr_j~@kL z*99#zSx9>!9n=nMaEtgO^bxP4sLymdjqO^)_iO9IYixyS+K@OsIhw-2=~fNL z^Ff+_3I#d2$&v8^dri14Iu+X$WbtsI4AX55kJv>HZG6Ch|A=t%K2_;^)XXY^&<=N{{n?A-^2g_ literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/edgenext.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/edgenext.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6128a5afc745247e0f4c87c89b0d278255afcb2c GIT binary patch literal 11275 zcmdT~S#KQKb*`Bz>r`OV@v?$Z6MlDmyhsF&TE3L84V8vOnO-f_hD*c9 zrtQFNL z`vgAkvsn$?HOFR` zg21T{{}lEUPY$`qzDjis25ufh~Zqy62H z@yyVZWm#@D|Dm)hi=E8i+{E6M^baLXDy3=PqTKB(cP@#kJKTN^+;GXf_3)0z%WI}v zYgC;YmTA-SOmEfn8&x+jnbQc?PThBX#|*e#_ZuE^c4tG(lSHwYHSxjrd(7cWS@Cuq zOc-~xY)>a%&lXUBiW?&}O{!+IO0}ogYFoA_RIOfUKs%KNr+kuD1e&Ckl zm6_Dg0|{!@>rTU7aaL_`fNut#d8St-PS^6wcGa=mx*J%VcD30z!gZ8^t<1O>QxqLc z(V<7>-~eZxQ?K-DYt;tL+w~q_aA&DBVbr^IGgx!Xn>TLtdLN|yQm?A%I~GpRa%wB) z$raD5VoScd=70pJWun?dwczmhJi+3<-UkWa;my81t2b*F-4_4xNyo*>Tx*?c)nlWl z5Aj;B>fXktwc^?!|Ky2_x?g6D!DWEA>lSfEk7-Dtba;fAj&{6z1$q-axIKML)8O9v zXCO+=oHAX&&qVeAf)#~CE3a0}lgusC4Z4B{CTR1A#4%;B?gzYC4oqrjSKY1n%8RKz zeGzx>619`T!-n(5!j0vH;@!Kw#&$WdKFf$`=WhP@hkN_#jb9%eP~6QYux(tv)R6?V z-m~#Ef7X%NKim1IziI#bPk(fAPeak2Jp*Z^+*tltjx<_bq|tDE2*u0gLZ=s=BlwcC zR`K=MpezC@kV9!(Sy#d#Chy3y6sUm~q}qBYhxvJFSKgMzdz#+o#QV0=DujhlvWzg# zlnsTS2=jH7@-q@jGeLHnp1B|&O6!?z`HJ-3NNX7RTze=etPi7a0cu4-s_mq9G3LD& z!xaBgFcQivwIkP|e$an37z?wZysaQV9;!^=&^SZ?iBJtD+f$*uD^Cb@zoR5Qrh^&G zQV-Li5oTCsCxxD~eN^UxLVF%9v=BXaiSD7`aQjG@+r=JOM@6lu18u4`lTrWdnWH`z zDlCh+{dI6GRJMuI^Gv!ef!gt1T8DTm=IC$?A-ch3;%N5h6rlNY8xDt>v3z&SiP8<5 zk&2J>3ip}~AHA}Wn+~&>Tk|>T_dBG}IZO|iJHA!nHY=vMiHa!Gm4=bg5nNHGD_f#W zS0eCH8v8_)@|z8ZbK;B0uwvC>MQY7{5UJ35kye8?i?p)iR-+Wuna86c+`i3@ZuH|Z zsmSL#QOS$yny`NSC>;+yZ=QRbG- z?V1z7%t-}gEJPY?8H`u4p}noLZ*$`0NJTEvs9U7paO;lEqv3{!xs&x^(PYkYaX(VR zSc+6!;DasBMJYj*({+lB>x>Mq;9->MDs-4Jn(tIsqioMEhzi|mq3@%qCyT}?)77j| zu1^>8V;GGer{sA`PKd!fqAyA(5-HMSiNNQm(J5*qyI_lqWtFS8?_1W#($8Nz%{?!G zUDMzWEGD-;kcEX*^-r%P_Mm^-f!R}c9t2B`hkOA9U&PmMAQ>wt@(BL4N4h#C8%jae zWldJPf7++|zLL@Q6&X)i+s~_psy)i1UVo%0+9O5nwKYD?r^n>HJSGpeF*F5zv-`Si z>~Br<*@jCU{wtG7-1!h&WbnO=ufGNU59O8;Nb52#va}8>l_@)FOAS=KX+qd6w6suT zDg@JfsD|3C)JoO$Fcs&g@rITuDi2fJGT!30r^8fBzc&&{5PfL93d*}mUx}bUD<$O^ zeLekkt(9&WtxOAIs%^w#C(J^$sO^lfo8?`#MYGKE%}{D*(IOLOSJhp)l?!v!gQbWL zlV=C} zl)OR77btm?l1oU6d42_tNPZ9*{+h>wax*BYe&YeZNyUoah*X$Sk^EL9UykH=BKfPO zypTGHGsSNaDUtjt^+>OJ{JzcEI$r!Yk!WDyf^5iHIgblr;7W`sQ%XTSgxm<;4O!XG z;%g|TG9qWyEL!4E$)dKPXv)@jKa(faiw4QtIwYKBMOn*=1t#)&%evRJt4U4TvY1y! zu|}qgxUAf!Sq>$)zwA`2mescga^W~FLB+cx`nDa(BiFaSA|pj8m{ z!)QOkM%fq}N7;lZn?%_Z-ljj2ofMlv?d*=i=GZ(N!}}q27_RA&)iL3i9%beSnUc;v zC*F>+;~(TpY4$uj@qtt_*b8g{&kXy#s6EMwXqy$zYYq&*7#STG(e|KjIxmU4`3y() zym`0N_`G>_b=57qP%71j<|WvxP&RP%ZVz#eaM$3&TV z(_3+?j`WH3J3#8^^o^pE1!%he_Wih#+jdey3T zADzt&=OH}(f$>ju-Ffq56=2P~upr)p<3X!}Rh2mYT|d|kOvlC+_<>#N^G0bVr(z!; zmi^P|tPl)?ZF&}!t}XX0La~o~fX=|e`T>Q72M>I(Uv;79_Vmn|t}8OQb=39QyRI8h3&`mEzD zEQ1Xf7R?0`*pEH?#vU~MdOrLA51^+i%w(%^Uzo>FLpI`836KOQlzBjD61#eU!^p<= zs!ahP46wvCyXyB{j;WX3Z~To;|3-M zRWvZO`)eMwQr!vedwgSXLI=*Bor{6XXG+A*>Bn9#VS-F(Ke~xHh=HN|B=6v%oLe;U zkE401Z{;0_s*5EN+u`h2-3oC5RsXUG(m4hp;WYd=2uK}3lVEYenTBR`u zCESWVzgNh43N~a6=PR@%7h`_Q9xQ z9T+tPaJsGV&%;Mg?Iynby1ps-&wmllQz-u_oTnfS|6AWi{`VFb0Of2rftlvMk4WhI^1W37>c)MBF`Gg#g86NS zg^JuNi1*QL#s3ca$oJGXH)pYEHLh&WZXkEZbfPx#&~mLv}>&+;gN6-*cQf{k*M#6hEd8P z<{6v_=fml6Has$oF#w9^sTHsyN&&~ylTGjFMD<(py$b=)nH{BdRM2_> zw2opwMuB4WXzFmh)1CepX%48>Y=9oWhgo8bJK3jxdyzX*Uset4PB;?ixUE|CMeHEX&)R6zjQ z3a?8L`Sh$5I?>3L+q{4rb z`jfQT8hcxKHnGEE3h!cTv@-=D=?%b74%<3)tWW(PGiw;sT!Ef5{p9xorqBk#qFHeQ zb1Q?{`vGE@OR>-)NwYO^y^dI}%gjDfCO?X*GD)V+hmcYNtx070B#tXM+4ppT=1IHK zx0QR!?HJqwXs5v?uN9R@Svtdi18w_IBY}KnFObKI_;};u3mC*F*j$$sMc>!=w@!56 zj4n*ObOBy0O#F+;boxP#9zri*)^PX{mltB31a491S82#=l)OX94N9(4LgF;($PZIK z9p(D`(kRDbPle)h{EO7%OGu)^pdS;Hew(T%DIu{fP{ZO${sQIKC?U8TAihsqZsVsL zk&2l{DF6cqGz<+u45$K@zDm6Yc_LD)PCd$!gC2Vg(fEMnA6qtYgV-M9MWP~DQmC1X zgU+ye0w>D#9dI->V8um~Pc)`Pu_4LQB6*$vHbyJu>7Y8! z1P&KwZM?NyK(4~-5x6U{*gxXyM@YuTvxNNTzP3uR9sg3p@#=em?Xr9U#-T`&v zseM!X`>oN(BtoQf2j+WyKtz$aA~8kW?*l`Hv9v9<LVgEDxyjVHd91LL~AHUl-iW| zy8-Mk*m$b2UX&JDU}^qqpkPp3sG`s>s-Ri3q0-J!OsJh@(4w$SpegUAlTy65bGwN2 zP#NqlSa;BX#&WDBC?2x+h~h3J`za=z-f)}-#brq77Kiw^@!;R3ED2vM)xRs{YL>xD0LX;G9@wt%lKsdG;_{m)p7lyw_ z#j?dM8ssh|e?cWGq}TyfBVrgeI?5wx(Fw2=z`YX)oQ!}k$H1v$n(|FU*&68{GrTB@ z%nYy>nfmk7q9a(ZQo5~xFH$Xy zfe)^xp)6l38nKKNVo2;M?QWzw(A1oGGRg|ST6o0#ITRo;Pr)g%OZ+3seL~Gr zqS=8xi3*l*uARh(f1kF8lvsSFNq|u73D;3k9%-L$O?S6tu-^dxcP^f|e59E>o=O|L zBJ47$t}(afvlArH z`!7+2pHJRFz-|RuUZ#Xm;!yHCl&n%h@{`kD7hEC+=QIGXP$J~4NQBrG5|=dlAK~lM z`6JS$=&zA%(|)1rPkH`}G84q708hHJI*=2E#{Us~ZVkpK9H289}a`d9%N zyCmW!oY+{XZ=t56@m@54i8~dTSo~q~QwD&o7@LG?Yz&YxkA}#TwmMOMUZ66A6fTAO zg-(xu=P?`;b9kcNW5cMouU$-K_%FWhZjkdGUj*oZGrmWmhGYhg9;^4=GpW!Qn%m z){Y(3dYSJ#r>AEIBxRM$4}Ne1edhGJU;f+q|NnFgg`9@xJAe6yjfJmj+W+Fm=##<6 zWxW0ix~93BtGBcYe~p&jHY!HjteCn=w> zZng65LZ#3yR*LP(%A`z7wMy-&$`siUC#8dd5a;Nbf3g*WIV^cM@ZHx{+~DxzF6wD~>mH@a!SS;keJD*0bKR zeRE&0oObmaTKV(?uHb6fh%)c?f@Zf@GZlj=6 z8&s>Vqt0?J=iqrwZ1^wB7{9#sy60BiX4`ibo1LKivU79OQ>iW{=^)4J_~>~#snXeL zSJ`P&1b>kFGYm8$6>6@ws8+%}z6@)*BnnqT8$oOHMaNg?Jl)1C8c} z)94C^B>}O`z6ASW3+!OD@l^n+%6&kx^uR&zCwTFcEgNU1LT=%nU7rR%jwjAv9*0Y6oego3q-!j<3wfm&(tE+7@5i zMVaH;UOLDfOoV!vK4j~8`Na~t{4NB=VDg|8Oogd^U4@6nzP^_Uro#+=vrTO;x1sG# zgqdsF*O&J4^7qVMA(#pCu605S3k>Hm8kv%kDF~U;F!qXJG0g4i;u~l$hY{z(;up1W z;$W7s6WSqWed}wt-p8np1#<_-LqnB2G!8kt`A|DJ;o8Vq2qzBpgQvo5m_?oe>Z<`+ z)2kopQD(5O8)}*V_&z-A~J7n??zTnbhjd_MH-lG2A&8& z$NVUbWOt_?L@810YpP7GD&*eob~bK;f^?KZvJ<;lqMy;dX24JnU!itBQU!P8uv+z{u) zaVF@Sk=^TJa(<+*MpMIXyN!nLA=TpgMMiH0?}f-#$=HZ&wOVi0e7{=#Nc;HjpA+3~ zaIW3%2`><}W@jwgb-k8+{yDGFXx5vcy1>4+CzgA6#RA6S;PpR`pqRGxoNnVU zrx*2Uy@ar&&l?%t!e0h|i+J-$Gk$EPin{&KHcEQtVSnMZ0aYFl^X0*I->j1j4Iajc zmJ&6;hzL>hKnI!-F?BH&0DWA2-w;!wwgwcv0c6~=T*Ebf4mn$?h5^tA(lV!P_D|F| zxx-EX0^i1TNwfIdT8@m``fAx0C(sG@m}?WIH^F6sK!|Hd6e|qo5qxBb&$5)on&L8( zuQ0gE;B^H2@=1E~W)TapVDyhaI$_zx*kmbT*?8HG|3d_!zNLL&?iqXL9%zo5_FgJX z9T=cVpg&uts~=hioUZ9wZtBot?pBI)CVtZ!DC64q4YXtLrS~$S9i|U5V85U=B`#K$ z^lK|8UPCwm=_Ml1GAqHG7W2iTq|X=uZ8%|jy@%jBWC2kLN8;^rVm z2`Ag%1nAtZ3D;@tbRe;ihbFveoC`a6?l_BJ@<0?2|7NGR6F3d87VH2Q6f^fXYdyJV zi&vJM)g|X@Ice;{Lp+MQNIF9j^MGIjam55%eDD+Gua>7kL)eJ;46>p$T9#kLK9B)V zw08mzMS87fps?{-=47#19lzK0J#muX8M%3o4TeA=VI)fX-A2%^-4UlzOuT@goKwJ} z--+}q;$_y>tEzYXR-|u7I>-`$tfOEk0i-A$uiTeW#V6Y3?VNr}cl24qGUgrv!+w%C zPXhBSy?^3S@QQ1fQ(_u*#SDX42FDP9K;JCqNMk#oYSn6#t5yNb5Hu0bSF02o;+%A~ z>UQhZs>FP84wa*Hr(4|+HCMvA_!^T6F_Ecztrp74S;*iraus4sA;#CALy*xd-6$El z`YV2HpDyX8j9`8p@7Tjln-uV_GE}AK4Z*DR<6e+w5eTaFmxL8h}n#e4WL9hC!9V z8UxNk!GW!qnrugKVj~?~=BMnXju>l8g2;W(8}p`MD&Mh(B$JQ-9)i)G#&iu2%Ct9>7fi(jYN?zKFK z_kl+#7D6nJH4qnKD#dd^LUR<{P~7eA1f)%hR5>-_Nx+fx=&=WkW~Z9avG{~>ZE&U_ z2O1}JL&0{l>AK!uQCF2o=;W|62KmXo#yu6 zVQtd!QE7Z_jrA6TyFKrfGgns6lyT5Vs5VZqL)kx;`0GmIPkI}kbe(lCxb1lzM+!o> zo4%Jc#9mQxH$JC~A7#dd@TJ$^{>u0uTpX>3$0#1Ou`~`&SKfFt=a$I=a3+tM-Z8f`ih{q14$dAcRF78D8=nqVfFZ;!S3Kt%8{IdO|YkHHRufI)-7l_cOwNOFH45uckkmno5PObwTE zz&&!0;~v;6V25zn4%wUHCKMr_%s@7gyM@>1=|~C=GX^%2qJ#A@LL4fLpV(&cFH2K` zW$Spy9*&cb&xt&Vt$@9xBzv*JUKAryDyo!AhN4xFW@(i)M2d+h4NTu98|gfWjjX)5 zGD?pguXq~4OyF zVmgl#pVVeOfzG?eAvId0^w2pWMoJHEK)sjDNe3BdxQu;>V)96xmEUIaokRJF(3YGb zFX=zzQqq5^?I{4qb2+#UY9+0wl=}sV)>c5HnWP&jle^;x>YE=CRx$Gmu)u z?x5roS;B9jgwJD9F+-M6!XXOB5B?ebQjFT4f+f6)7CwO`444Ct?x}=1Xk@F&0a@?= zo4+wOF*wNl4RVTMlM)=sENrz+}xxF=$d!6$Nze~%J=4#8ZOt1XYMlY_O6dB#VX z$MFn7ApSIr;x5Yl8oQZAAQ{CJ8HHjM#s62D5iIctdmym!@vk8mXAgrj#|U$P*~!f` zaK3~MgNz|tNyR6PA%iHuq~Zm&i`V+^^VfMps?VJDR=2+G?D(`PTpU^zd?o+;OU~Nb zC>LK_8&P^lC>aU^@+cn}Zy%)uH&^RfIT;mjy@^_q7W^h6F)hd_S^(u4fHA>fGk^>u zaX)n=huUq(g0s*b?-B2H=zy#+<(l`Y3(rDRZf#rQVvq~%eZ6A_69@T2phmS2ro|fp z_G;*~k&)ClGH$Pz=rZ;0EZd*!JSqQY$y5-h1jh$e#WHqK7% z<{eKc@N`&!0zzcATRlM4<;p0dDIrMg<_Z;PxHOY2ZaJx)^nZ>a54HZEorNy^9i5sEsQ zAD3)fw;o!MUyD>JPAU+4GM(b)UuBboYi!x5@ETJgCBj0%2X$SXuc(!FU=IY`LtW+H z(1eSIAdGXC;#&;%8OSyNdB*NBkffl&*kk0z|ATZXH#$&cL2fL@lZXj`_($XQF7uOf z5FaA=uXu;rqtpPixjMzaD4mpL>L@ezB-l-AJ&xV33A~2b2c3dVbMmLf>r2=Al_%l# zrN1s-C!|(f$3T^;PBQo{eiGV03AewE{6}yboccaFxx{N_n*#QN6^&u99b@nFz;T7W znS-pv-W;&^?**_@QK@#H7@8mZDo8Arr2`pCEY72x%z+(9o&R8i=)ulX^_#Gnd~-0J zs7PIW2Ns8Fqbd9VdfExM7SyVMg*FVD#Sz@*@l64n!x+i90=5K+#m_NKLMAa+fhU1- z6m!LWX8i(#ahw&u$TZqn#6MwhnSlgf34By}{W)IWK~T)*jmIMf45R0R~G5sV6iUXFYRYnlHi1Wct1i1L?bag)kEq0Q#nz4q+KO0Y-LQ zo7I@Iue;{F_LK&rsA{vKYKDb*EzE7}q9VrwdKji?Jb~#n>t^pm(1nq7N#>C`T1j3` z<;AUiW7rz|L5qg?hO8Bj^wv_4f#Em*PdWQMkC*+W4X@)-|Kz+ru)vLq5WZBx)f_*l zZFtLSxw(*1(7q}9jFM`smNQaYG{7^Z97?_>wmkv#lV};)eju8z2OCJurPD)Xt;1bn z$lstOcoNKOea_#3lpE>u)ZI5#=7ClNYI@4~eJ*5jxMLmtc+w>y5VP2;jHMKM3h%3oTZ z2T`{tj_|E0zh3j3^{T2ePwe>sL$eC7z>M%`$)oXY{e|^#HbLGp>W*ocpYM8ov|nXJozS+~-jbZCF~R09bz^nmhvH zweL)5+L5$@Qr&s)?oF!AhFW{+2|^Y6ks1)G18LKTT7Uhna`jm|Qs!mnt4(_KI5%nB zBiHeS4-Xc&53M&_-l`WkH+mi#_~K>|^!%64ovVpE&E4g$*f+@I1ZdBv-_UC?iZmN?6idl@mPf{6U(HUv zwF84Y7p2i{wYs-?47%jNeFK`zu}Wu^Nddl9bkz77w({%Hkgo2)O-pcq3KO|gUJXm2 z$l#F{hMpDUt zxQXN9aHxsMHabiua^EP*v8}Z=3c{DJ;jF55<%!KDXQsquj?>0TNV(S0+yZAdG}#_{ z^hq|1IT)R)5C@d8i3BmpuIUJV4O=5LPxA5BBxr_DOyc;II`K(BX|O;ZxC z+ndb+UX4^I{Eh?*C*uFI(MDkz!}RQU!>6y{jJd;OL^qenHLk77#C)Sm2U)H z;DH}B>yGR7V6CRCGrk5RCdwmvh{wS@ia;SXrmi3W6Q`8KnGA_LB2j$2J3`=a+lEF_ zGNdtI&bP*#H)$K8TPKSRtFw<$2vgQjIfU2mKyqt&L#N$WUwu_^*`Y7nvSfWwr?q=s z5gZi>l9{k(Ttuqd#xv-3N!U`ULWnw z8Mx6&VO@IQK{TiI7*zD&ef9M5QZyO8LC=H1VFG*$@Xj92X9>7gLPUBWX${$5!;zvN)37qGGAM+!c|DPAFzkI(0C^$xrQ zK>h4Xe20VP&a7O(&Zu`hgUd|# z5JB@Y+H2AQOBO-}mXN^%>D|TvslS1r2R}s6{7_afQAlQ)kfA3RicP*Z`nH2;5M2(g+iuTcv#pr{Of4V z4hv`}9l%Lk<=`vxeYB2NVT+JPIk<4RIT@qcZbHWL=m!p-K^8r2%lO1dJkPk_zOH>! z4+<;?mvR11?CTaL4Q<8G%J2L>^ez59meVzcXg@F8FG4ROmG^KFHMUd!uVSo)c&~-B#L!|yD2m=hj1H*=BF4;4f>Zln+toB;UwBK#UHRF;-z^F>t=N5 z-imL=lUNB3>PA=!VM5rVM#zG`Ckx$>wp9!#5awClow}djn+l5uGvSof-u!Mb8y2N$ z;uzKi4((W>ABFaYzGuttGCer-OMaN3r5FY9E6W{DyAn)Cb1i9 zM9D9QGw$3r`W9d23{c-$i3Xt4~pgmswh6lLBCoY@zss2$IBp9*K# z>)tV_7*B@B!db34`d@-JORGMO)Kddb#`4M7^a2FZ`Y(K?Q`@aITebC;Cn@Wa1Nx7< zKu&$L3vXD(ffxHzWgdQd@G^I5il<8?>(5ihCqYe%jx^i@pk^+zcoG+9*l9wn z)o%P0(@3eKsYh5NJcrx0Z4Z*a*Oq!csk>5IzbvbyJMh`-if!fdLn^8!lJziFspO0% zMpjVJUV{rBt{XcfM}@JoMO3WHqX2v4>7qQR+^a24ODPWTqXpa|9N6f5@%!xbKQQ%Tju~@`C328dW;3kCiFmL8yFqnpY8h0r0owIU?;g$rnhD=$2=CJfIXWII-F>OGz z_;A{^OqPdr5LUo}_Hb0c$T(X6(|iFX3>&Q(yq|&AOXw}HpVX}nZPd*`J87UD<00}N z8rHOopVCf0{7oTsTYrfh(&&?Y+GN6^Yl5T~U=c z^mX*FG~%m6l{l=Izryr?#^9eb_*Dc^2FHorqoCe)z{GkY{Nhw zsa-0Y$`M6stN(~?T}c8?Ui@1GlaLss~wd0ToB?x2%fEV1a5^E?``B%vsRJScKe(oY$-O#?&8$QVV#_%NgQPaOrA@y5o0 z1`SJzRszjDJ|F|Y-WI#F0$m3lO3LPMi2sZ-;(suBl3npC2g@@7DUYT1k3V5JWA`d( zzvU~(LG}tz!4-$?lz^Jy--oy=vW>gzXMxC5WGwlAujHARL&@ zxR!Zvv`$${ooN8@bvUsQfQ=Y{tToZNEMZtrm zWR61=R+oS{I`Jh2ml?>h%VEEc&ks0k9u!L$VE@=rxR7_L#0SU+l75}le}=*E#>wyT zlQN2O0&^H6ZB_92HO6HB?CZ&H#2oCqI!5v)A~0VY_FKHU2)_Db8}}F=iT%i!8`A1A zL-(IfuV>~nCo}1}X~9;@&xu9+L^d4tp`uid&0I}&O(vsMNu7@LHmXd@s{-{-XL(&T z>)XEKE<^(94X*^CVjFt#4O~T0IxUf5HsvYFkfNCprI)&HD((t0f+&S++AUvsFBM?8 z^r{0rl;9~AKnk5V2m?efQDS@2S;~G#0bZRcn+&9j#fOaj5(ByV2Z)VaZ;gSKD=-4A z$<{}iFRBu}QLXtr`%`ksK)RU69X2{q@ULV}ua+iC=Svq$FBs*~bvE?epV^u(&_b)UIAJ3IU2lA@@GWqTwi5jU2pm!FDk$+9HBWZDhoBwa^cE%y$WBks;F z?+hu6m<60tb=($oo1*VH1yuw^h4y3rfTAdhqG&%P$fx;KpdSnZEt$bMHC#p8G!cJhqk3XEZ#2_{rZbcu#BEzfflQNg{I|um3jyQj>Z^E5~2G ztRpWPqM0Zs^cZK94K9;xnq{+#OEoQ_=~^>g&M+p?$ToB3Tr*$JbKYnanq%d$X0cr4 ze6lg#oG4Fl+HCA;PL?N|d&_&9Q{^efr5gL1`^)=v?RiaFGJRW8`rIJKu27viZuZ7cX6zt5HveA6^L{{FeY6jpny3q+U))ArsP& zNohESG-YZ-$Je5-Ok>o>Lgu^Lf-bW%cRO89%DgOK9?b7*hE`5t%wtjRlB_$uu2p|z zXc{5x9>%`VXw{ZxG8r4sv#Rc&;S}XuuTopAdY;qp?P=EwN@wh=iw@B3mB8*(S_^it z==ye(=WwQ9yR{PRS{?XbsD9IxyVl>WdB^shTFa9*z-p_p61XjI(32%cVGI@DU2}%V z5u6XumF38~)IqLAjy1v_re$`oSAC_t!>t|BEOG zY28~r?4RCYh{w5=fY9910cNj8;==4*Ju+zVB7rWQiHs==z6ny6I&9BhPfpG6wME3 z)n26TT2XdLz(=`Zc^zekWM*W>;*4%hDY3GV)jRBw)jN1ms(UW(YNhcg>94dMrE+K| zvMLqsP9;ip_oBibq(#E>BI`v-*J^MpO80krx0vrl`4Qn1WpIgleo*ylSXkO5ik2=) zQfE%8H1k9)&Yvn3BExqY3u+(mYCjQk!#9dLK=^}%H+`16>L_6j5xAeg14JwIyTyl4 z9U;mj0Jd1VQmHknzF(=R2Z8zM>~Ym<1;?AswsQCoj3iqU*XZ&2uG-)qcNP}hnv2nU zPB7EHr4EC|SMmBU08A8ex~W@2Yzy7kHt}Y}gg`1$(1j3s{4={>>q9eB9K!eLxewR& z4qyH=y_KlZe&{{481(WvcoYfNCv+)pTOIL^b_3%L^&7gH48$gOUZ~#@okWgo|9(Xy!108s&Z&~sD;cjwKjlGlLq zivWh6(M^%jGr|-F-4JWXMkcIVeC8~4B#5qOW=7eR4Bik?UO_)}abZ^KswyK{=CX9FyZs6u6i;%=k!?jopfkza>L0K-0&Nol467|daZ$TANgD^>v5pZ4h(G-R z)?A7M+N35E>x?<8EoYZg5O%=U^-dBgT^hfJ9O_-}Ks5%|3=$B=Q1Yd|Ut8Bf+1E)y zM5ZM6&H)%L2QIZXp zWBDkTfKZMxP}a@cbb0i9i(_K(u}Xnrh1Ny%w3DXmg-4bN%t2`$!=-PFP6{)W<~A*Y zOS<}tFd^xRK#p4PWW5QV)x;fKBh-3bAFi<rgler0!xJ%|NSt7(K3cd7w2)M#bXOIxMVVGvvW8Pbji0M=VN6DIx$`Utd<`2 zZCF^?n1Q2wyIQNY6kQyoH=_(I6IE4PtoTk406!C7OC&(HECSnjc9kb{pngE|w+@MN z&Fts_q{F)YJ+Un2;v)sYoM<>6gn^hj5v931q_4$Wdms@qXGz(pwVLf`ew~<-#G2IB z4&xA!tq>1yaPSAE@99?xHTU54>2lzH<*8qRz6tRuX@2!d?37+FOFEAhhGPN>SY$Mz z;YCKxabbf|BR4!n16ihCM48{9_Dld%o6^ngS(Qh+lmeet`&ML0$7xea zk%AK?Rn@CI>N2IxYP;=tGD0*v=S5+?$g~p(2EfG=JtLu z#VJFmUk0lh-jQdloAdt)yK~qsuR~bDUhI^%%U_5|nl?0^O+lZ@N@8E}g6J7F62_AzBY zAo1_^Lmqff!0U5R-X{;q`{e`jL21jw@`!wharetFy_bhi;>+?Y@~E74Oj(j&h5y2o z56ffnkqv>=3}NJPc^v5z@}xY)*wgY+`B+`Z$K?|niMr_eIG&VG!N-x3XXMx5`HBKBE%0j09+0m(sFU4#dur)$kRLG?r2NW%!br<3jK1EDi$PeTEQ z;q@%iZU9qwML9NX`w{_`{ zaO;jgu-|rC#M6%-vr7XqYOXn6C9c);Zp<+clD6~kX>bX9x(a)ag)+PxZ#r^_$Id2B z92>E?VIP8t2OQ-Q8$=8Mel{~M!!hsi$}+W(%xKh(tB zP1j#MIm09hKawheXS3 z-1=O@{{O4f0VV`plv_4HY!8zu9H*W`sz)c&V2kXRSTP|RnhW*lWKSwGBG{DPqKkN# zu70IY=c<(feT$sxTho0yw?9Nz*NIf^(|6X7`%gFYpnWx5pT?)=`}BM2Uk7Qc{RQ;D z?$dYgpX}4`;ZNRMMhFwpZj-# zvb)o{{!aSVi7%+{^y&B1zYfwCft~GBchNs4E%fPw`a|@s-~N2M;`!zA55_lCe~7+= zKTP2HNJmuSeYAk*r{~lSe761>pp@YB`+xjL^!)7H)=y((1E2R#^Yg(u*FZZc=@D{) zpP!#|kHyFqA^+(g>G}Aa8#1yUv-{6;UB3Ex_wVD%%8w}67MOzr|~{o7W0qceH`x-$Uhm0 z2563tw~4o*6k1TEs6zwqpO|RxbGVpJ$!BF1-u6}Jwb;d_id2}Czxu~_fA-^7=3hGp z?VL>EDB-mID9Q5sa+G?x>NasNMrO>WS18FkY-9mT9;*Q6ISW^ktx@`U)mmv&vTF%2 zWAz>8jvJtsMXLAE0+TI`aclmMF|$24C446tve4i{)NUl;rhMQf*mt4+eJF=Av1t%? zGkFJ=2QbDDlIuF0n81Z(5o;9_=~Sr8BrL*X()8eMfh#dZb|up-w$2Zb^%$h31;-^E znCtqwSk=|5K~|=F7;w*o+3#yVFsOy)$3r9N(`0HNEM3&Nfg0If4Z?CQqgM;AB&|Y@ zO>%pp=am9nqHuP?+SV$?kQ}JvaE@w~3D~z6v~4SV1Xg=9fNaLyD^5h zh<98TH!PleZS(#+2yg<&_3@p;yAN#;2L$67ehvqA+K$hDq#s#(=!lIl8fjYytOX^MR2DM0l1pu~%wpOC@&`+C)(AmRUXLAKXE#TM;oQ8I{R>b9h z0YN!;l=aNz_rA`pP?9gzc{76@KdTi6ACsjy0ik-D& z)v*~o@C2)eh-c;%4p?4+7a%gZKfbz3h3TY48IIgCx>R8QU1ZRpxIb!x%Dqc~9Ag82 zvHC5_9w9(ZiYUp3a5>wznJXvCg+T`1<3OFH(hm|iO@QWa8?XO1Kyf?=%a-g^V>_2H z;rX?;;jrAc#H4P3Vr&axeJZx6l7@5QTE16l)t^cQ^3gDj#p<8o|6 zbS69zlp)WLeZe03*tjkopr^C$U@tZDO+! z>jgCt|*Bm0|i^Rv*`+!Q@w=5+QTCo zVQ@rt^OLcD^qF1ohn;w#P7cT+Cmv#021zpYd!NyzS#-&_Kr}TW%x!a<bI*kCl(s7Q`wgZa7>C^bFoXF`vQe0_O~k4i9udgaDD8sbG1z~<_ zK}1oIDu<9(0j@X;cnW2a(1C#a*h&^z^cWCSab%%KDI^VWA<@)}J;Y*)I73|h2f||c z3>NiI(=Cs~e&*~>6ySXl5qM?;GYx>lpiz_qWQ0=gI9qRkcQ*!@i3u`v*Vi(L|3 zSJg&ulm(2?!sUfE-!?v3yKiKaqjj#1EF)dhG+O%luK^4x#=J-Z6!a71{6{m6A6g}8eFT{VDW& zaI!~J<_BUt#?VQKMLvhUvVaX&0#nI=H@Mzm+B zoO0^WVbv`~W+Mk;Ig}BN#RNpX<6Ro-=Yu8hZ@|bp8<%(nn-F#+00mTVCPj_{CHUo_ PaOU)4!kRZ9p3?stg56eZ literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/efficientnet_v2.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/efficientnet_v2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37075a5102088d2339f9ff25c69878fdb1d33d4f GIT binary patch literal 12500 zcmb_iU5p&Zah{o-{ki?SFd@q6XU?);U zUB0T>-I?7xDRPoKOzli}b$3;Db#+yBcM659hQIIK|A)r=XEg1f=_CG^#K&1Y-dg}d z6S}Ll^s3%6s)kPAhMTA+_}6SDt4S_PxT#jUnr>yP8U8linO3%%<@cnUXyvLoeowji zR-szJds<}NVr#Zq!uyQKy8Bvl)j598x%*r5)dRZrye9IZa9a}vnZA{%9+ZWOtv+_J&lx6t7Cp5k1@8*29YY zIop%x?E3m@r!9e)e@-;z2&qm4#sAFVhj$JyQmg7hs~SRwFb$Cq<`0c;X$@T@Me268 zYKpW>UQLM%!G_5-5Yz2^HT^Bk)T$Ya0RYd&R_cNLneOlKYf6emNI?8{d z-t#THEi3^6bRmFBHf!PU8P)Vo@*90Q?OGjuwx#P?i%#1wpR|^GtyQV4PQx14IBm;c zlR)mG&xW-9o|0C}?pDVAP+!Y-OIo%ERNDuxwX)J~S87Dul2}=Za63K!qYTO^V0@V>z&5cO4z}U6n^xHD ze>bG(D@T05XkpdFc>f3H)@-lltvg+7akbNN!Od6KBp6Y|4JOO+BCV)c=Vin0xjv?< zV_mRaPe$C4w$w;nYRdLOdBg44q!r742kPMYPTdl+3nioKaZlFq%JHa=t}O&fjudKI z$W^I2PmB$lr!`zjq zI#_uKHm{6&%JD!~AFI2XX{++ZIh?<2+=(Ox; zJZn)nbyB-$JMD(kwCKxfbkuNN@;b3xdsC{h3c_&Fz!Sq=UvuhfR!iEDd(We3rvXlg zXuuKWA2=W(*sik`YSdmsMxEJp7`6`~9`(*fyrT(Xhl}zgGrQ<-cIB6jyeeOQ?MOKq z0T$S`>sXf4i@XE=*g$i2s{^A$;)+I$`XI#~k3~}$RnhKIBX#kW?be9(T_e#jBMgf% zVChM!76!$J8+A4@&XkoI8h{!Tx;WNUTI?(Fx)lpH2gCT#Q!G}m*X?3!V_lt)UA?Ci zmanw}eZl-f58KwI&C4BCUps1Dm!aYk>75>Sw(D(cWPx}>C%SUlo-b`tv7Qb4bT_$& z;n3FWN48+Wydy`gBXvjBd#~37GlCa4Ul-X)C)7L5AhDjYB5KXsV7y>JTm-uVTG`t=hBf37Wm3 zOGTUE0<~T^YAsL)3+zxVpqmBAr_*Y|$jTcwk&QmoF-Q0$$-6C7=UL2+^us}lnKk$*?-E_r6Nc)T|NeC?KYRqrPp zt#AIR{vBP{`g&ih8GY@lp&kKD;4#+|-h=o}3Vl7H9`RGcxTSwx?;GlfpYG!wEwVye z*8I%fnS0vktDJy4s&W{G!PeJF!G}IUtLKqcTTik;So34f*b0}f#9gHoYt)2$A!iGk zxrSx2GUA-=Z)lZKy4zb^u_|yQlh9?Uf=sREv>m@zQzVNZ7aKju#|I8lA<04ZE4JH{ z&neYWisTp=?RJoPK|s45e>0dFi@lr-Oi#KE?pc)x)wJPv2C4Py^!v6R%+zXi*Y>Z7~+d~yn_I9 zdP<+uvwB81OmoN3vpXq0v$OTkZh2HjA_(*wK?-B{RJ%@EGWsJ;p@*i>I}Xs-2Z^D^ z=nWk@W01}aF#ph&gMIC8qOaeBHXYJXrY~!6>q})*?L*5T4TB8sMJSZRC_>GH_)(uA z@G*eu47=_%xbEs>(D> zGDO{{7IIv=D<~o~`t!aIF~@@$=G zZkZE#Zdu^EBEwmRC5HPL&N1B2a2_x-Zht@=2sv(>)skq4CTjPIb#CK`tG8#Wa~y2%N3iV%#pgz5W=Z;AIbmOb#hWa$ zMkZ+#S&v)}YjPu}ZFH(70;4Fp4WsduNfbm}@H<`X>=)Vo@qTat0T@aylZk{k%bWJ8 zHQ(=gCyyPo)eUE((oxN0_NsU6#FNLLt{gvp;tQWYy;9pN^0%?ft)c7n4Bj0d9tp+u zpjM7phlPcK9l47fFUCTo*p9I~c@QQGoR-~0G(Ai)O!o9tO99GHa>gXe?l7^BmDFFx_)Q7h)H zpy`%u*TQBaQ7m&O9NoehXAEl*QwrZx*)6N#+>j#H$MhJ-X>*Ul7xIA{O3E#YV(j*2 z*o%ZZ#ONe9=|dwBLKx;yhT+qM;}_2hgkfBqg#X6~>H|lmKyMiPoLd^XcqAHld>_gg z>i_51iWMU^#4-ZZwfHFHU)JP??e2LGK)aPs;D{~10P4q>PgM;R1g2u1NEK0ye%>M=sbLQ^D<389_c1CxbzX$o_j{lh7tJwr&u zVVt?`;<(R9U@V3oPh*n*8KaX0b{=Kh$EO8$c8nSINzU;$g?RDNV*+JN2G3LLQv_IH zzJ#|xcY`EC`KE+qSwNGL-L?q*#c`WZvg#ZaOi6)}NPi|IyB8)a*~%2=Hu?9{eZGXU z?Z2anxPt8;V@6WANSHsH#-v~))+bB$6~eqZE!l|9sLwAm=0}n2t2EA(WQ`jxbdVy0 z7zHfiy(M*d4@?#(2_Y`i?b05Yj6_0+ODbFv(Tp=}VKNd4VFL3k6+~RI;23GwT;~nCCH!@ zu4>yy(ukc=@|Ll z-PAe>bo5umO9l@)hTeB!?|jWij_sCl*XZZ&>0yrcYGOn49(fU07W(=Ra8W@d?wXXZ zy_>wJ-J=ro%_L`ODTk{crmSqLpY+py23V&-O@L7b;_R+sZlA~rTfVrXm6U_ z7TMogwx0yd^=AO{{VZU?FW#NyJa38m5|oIbXJ*~->YxZXQ7L-Ny@{&}lrKixedD@3 z(=^vlp>%&g4LIKiZM6f)BObg5H_<=TH~j~08R}NwZ0ZA(zYpIsyzgR+g8yJ2G@|!n z*t`D_eFN)b^m|^rQhvi2PO6B3AyURA>_LI`BS)5LBEHQ;!J4ky87D@ z#=7qPeH&87D5ZV@BkT)Dxb}VEYqf_d(_NlhdRJfAdUS!#4=6gt>DwquCrzj!T=Jo0 z2za)Pqt?56ne}FCVL}teIc*%+D7)R1i->!-a^<7eDo#L~h`7R~KnrB+ixgxaX^gx% z9jw-90nm^u@$sTG`4cCsqoZC{-qlysS?c78g_vG049DNJY=n2=MG^{(5FZ`o(YI#s z9v%qsa@?B7Lv^S7zu z>jZ8Q7?T}j_+)`7(VZZP%!|A+ozR{`i@(C-eGQ;Amq)t3sHb+!oqXaT65AP_Qumpi zl#xQJJEdohdELAws@_zy8|I|*_*g7jxDpwutnJjzhjezvWP>>%5p6)Ajwv!4~&b)(b7 zNmgOq80_osQ*GEV^RNcI++ePs26qLjg-zSnpCf!hT!&oc?_`;)!a#4o>=%KTn;AsM z0gPvY&cDWpNl>H>4iZKE15D9=(3I`Z_i>sM(i2jSuQ{>rPH6o>S>F`d&uIhm+N-Fc z*@6ZA4(z_JT{}B0G0>lObsmo?QkcCg+1ueeWP$fxGdZ+uyw+oZAUSU3bZpV(VwZ{3 z0o1CgwLlyAgp$!(qx8ix9*-7o69imI(J_(G4r{+}T%&t!dN?v2u)J*qM&)>TIK&4o z3@p?H*;BM!>z!8j^nHAL)c3r$Rhd%u*xIk0g2lv5pVN35HT|gM-PP_5$>Y->TzoT; zEN-bg7&UetWYOpE0qo%6qo9;#7^$7qPUr!#XOoR&sD76k%n~4ffu$IvhX?ah;%ou| zf4?{rq?es$tK$eI@Le`x{f4<^vZqKC2WjDKIJnoxCaIi@#tZT-M~p9?g`?BV;doP$ z`J5UuB#Ml1`GC5~cy_ld+d^>?iv1imjFS@|B)ncrl?gxN!rJ1xkqo;Upo|a2)HkWt zq&#GhyMS8>;^H}S#BdLhe?!a9X(`f^@}bZQ(}+QVP5EeV4N88;r<)qRR*knrFPI6p zMn~KT5~8bunXm!z$F3SqIlH*P7h^v!Z*-Adq|7_Sr`Hq>8e|}pup}F8w(56K#Z!i3 zmEpBQ&e*Fzpo%}FhDk;bg*mCV>Ne{C4UhNd04c+OnJ3dtzx@A>xl_y*^*O_YIWHQi z_cKXTH;m#A;gMl4;U6Wt{<-z9%$-y|kG3VQ&jMmYd9Re5+oKJ&NYSXjGmzDc(VmNoS#Iw&UGmQj$A`#)39rNp zXNhKedUZaAvS+ZrdE{T4U^;_+5`wy!F}4m)F*-a%o-h)`)|j=E_=vG}fHyvvT-x-A z6>?zlYu?kqbdt8cexjDVHh(S2M%GxmP);dc6zucdM0bHn3H2b$CnS8f5fl{MGDk_T z)>O6_otU#8o~9N_#+_m`DAvN%`|wES#Sza%z7{~fhOuMb-+Fk8=gC%=C#E%l(TI2Y z^)3O#LJ}PLgI65>GtatOplwDyh7yD#;JyR}Nc9areKiAT{u7W2E{R~850E5APdL@t z$c@AV(?Os5OAN-oRgeyk@ICbnDq`O-H;&Lw2=yZrBQPU<2uwPbP$Z*i&+Er1`!Y#{ zTrgTzS*SeA>zB*(>XX#n;{=WnSS0WRfFJ{}zQw-n7JVjgVC+-W-t`h5?_U6L)4(tuAGv~| zn`pjk?36O$`v{uv82?!+jlPC>?-;)*KCr9o=S3@ChTeWwJTg`CPsPeq$xn-)nJh8? z(Ks`*QkhV+utr%}JaSZF@9rD#o4>@@j$s(@8wuln$~4|HlE!}-Gsb@y+2XG zr!6a_7p~&%B~3N^qiZ)6C7bXL)TW}~J4oW5rR%9I-qo8_P*}wU_}b{$TT#RwQVlPkgyB&%v;1ZeX696=+oz$JnnxN8%{0jDlWykmM*pQm>hwYV FzX9kq>45+M literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/hivit.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/hivit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10b9339262920bea4fa62a140c2c8fc396e3b2a9 GIT binary patch literal 18218 zcmd^nX^b4#onKwuU42YXb2z+)6jhQak}Z)#kveQClq{K&CE3zg5osxzSZVfjRnPRW zkKwBt9+R4^yd%N(CUm^XuCpgF}(es{g&|Ni&u-o!+~z~>kK=y#h>TsDkvF){kdAn`n& z@EZtx!#CT;y#6)kO-WlFd)_vgXSJP9W|3g`BB=&UBF9+_%O4_xjVwpAO0!_J%pX-#0HB)qS@yFJrE1Chm)>xAI0k zT0(pxQmfrYt=m(bdb@^@uk-;-nLM=Y9*D0)mGb=@>;uoGf+uB>ebFgt1Io` zw#hnXQu<^)3_e+JTv_OKg9hI+`f>V+@Cif$gKIf&`R2UsTfP-I%NgHpI$t!JrtkQf z_ww^uKkMh-Gv;$&G#q2z#mL>ne!aa?H}SgHUwgw_X!jadW(x%mpBL0(_>9DuYISSP zpdPKNAoNbQx>5BRZ*H}-5Gb$LMB&P6!ob96pk*YxStr>w2=F!_oNI7Vl)U-K%rttjipBsOnLm>&<;TUE4-Bjp3JZ zF0_p&oI?;98^)%&Wtc{6uehW@qsy4%_o^~bp-(6)@rS8E2`BJtJ_VoueG{CT_tw2aW=6rImXTr* zo;aLCHJdnL&~7Hhku6FlcPxKW8d=#e$qyDz?Zb>dFq3?()@awmuvYuP_}-IeRIeAE z>2y|95UF~rI~MKvK|4IN&Y%`I~sYqV(&V%8vrHI`BwJyk-3s-yPcM@=!<%Yc)rT53Oj z67z<-k9iUtWlpZyQ`hUtKY>I@%vCV;*D=?M9B02A`+^PW);j?~OBPlTxQ1g| zCCfx;mcL&qOXE(PV%N zE-eKf#?r@RK494gQmZ#3gjb)xGV8qndGFH#p9@k z^FcNj@p{l&T#81JB{6LOJt1H8*zhf!&W^Xdp?VATg;u*2-6Zy{cfFt;bg+?Zx)6A4 zf$9bR028xwJ)a1}YX*VOQSkuuvE;p8tyZv3{gr-IujDDO?)#n!+Q72c0&k@kwglY< zSU}%zbr&&&u^01|EA^<0!n9%{Ry(#4 zI*8jd#(F-^tQ?5*zPVSBZS1PqD7%@9+|9fXlre4i_CatJX>1xud&}A~yP%cmrGWZ> z7j=t*QVN4u5wQ}UiMS9K7f~Mwhv{-$^3i^MBA)QUt-NogVbeZjjOMY7c?XU4a$Me= z^huMQEqk-FB{^w3>BDK-8DP%X7O^^@>mV~BWv{g7|!(Rem|Rge2|;B}MfibW;z!bN*zWcC>`{S$-sso5i4IyC%r6eaMhtPHg z99i)e_vbJPTD3H{s5kVR|45PZQ!WECG`F|$Ne-moQ%k`IM9&XyeETm=|HjMzW$oua z0di!Ak$;rktL_q2+Gx7tZ0)on8u2FLSjk*F7$B9ld zQBO6Nk_?YXm>ju#Bz#vbB34PVXw_W|&{YIlm$;oETv}8uAM^v@55$?|K(jlmQIHfG zJ)qIW)!u4|p|5t5Nfj)%LcFoIu-a?}N^vfV@SWH|QfgX;JHQ}`!(*Q02Zue$UF`eBr9td^ zlGS-hxnGvf4KvV(yrv6OmAwkl8cRXxYow8Xlt!)dBvu#<< z9mgh>oiH=XTiJX6+#=7Z*O+3W6lejO+RtuoWbx{FwTQ<&&2t75Ddab%2DQ#DbZ+jM;#3GjgVr~q_gZI+nc zGBLg-o&goXSS>2BVvQ)=Ocma7y4g!Lt2%YL#1{6*p+TbvS zIPif0QsP`|1=kZBl0@QkATNQ5y{ZtG!~AhJdydTac=ajP z5lH9u3%FOy`iR`mmU)1_#w&b5cqK7EnV55lc|niUyqcKEyOuaSgo$}E**^gI0a_w5 z7~$B2S@Y2O6+9siSP9_l0>A;#+hxl#OWOszV!8lf05*v99=KK=4 z&LVhDS9s1mXu|{~TFRJjx1yyt-*~D14cjnYX}#6D^g~5Y@Jxc`Q_k`y&Ukj(D>Vp; ztzJ5UHzfh)i=yOEk|hH+;BzRe4*9qgPY*b%KULm!oNfI{F_Z&(H_m?lU*?$ET@5_v z((o-8qh4dF9!9N(=Lah&5vl)Ah3_RdF3Jf_thKyoHt4h#RHzT|q&ytR zB0FV-fi(Y7Y7pw8YK`WichYY)s0q2y>o!}9fYe4LqKZ~lwbpi3qHyS=Qp)AaYIx zIKN?bZBpqiiU6gwFT^LJGJ0^P4N&dLXcF;BP;2l8w>W}IT=X6E5#nw2sIa^zE=E(J zvJej-WVd~V&SPAtCg8?h2UPTg$jswg+ z$Y6#66&=D_U#ddksDi7jEfC<^qN@9931!v&LwHkqTWdC+vXv-Xsk-7Z+3>DV)1qt>R!*0awuX>c#> z$|Vty)x!v&G6lx^<8V#OsQqO;Ax9=$)1l#h>zG4H=Q~qRDAv7;gW`_z6F!4rl!NMP z=rZ|iKF6(uJyd*iKk0O0sFgOB5;q03Ez?jPDA%AryeamMMn#F%v#g$SZ-usK*xbGm>@J zn)_pw-l+Q{HEw?sU~`-FQBk%QaqRZHh|P^@%&0=k;SmD*pAZ=m&1)pjD$jCj;DK)Usb9nass`ghED96S}oS z2DC*+Eic5m0HSNuH1}`&&Uc@-5TG+KJ)e8zA!8 z^N?uj?eIB7(l%8)DUJX_k_{thjsj?mpg)D0w7O?Vm!ZcBTU{ubx(z6s#QdkYr-|F@ zAK;`gJfLcWMN|D#R|B>0wO6TwO^Vch_4CoTtIt@HADkLZ(6iwAuATE0NsXrd4o^q5 zs8RIKF#E4C;EU8(82mK`YYg5;khqwHfUf$hNY3Y>{JN&oq8|EbRuqIzR__n+2o5ad zDG`@MA>>*|ED%y#lU<;9D{D@PY%Ef9pR~H)ce53LLizT zXWd#CZ4f~eL{x)101j~h^ri(pQC1E@Rs%Eul_VBq5X2Q4qJAleCSp7yh;3yzp--Yo zmF86FRI(tQ766*HNM3Ddl&EjY2~d9qJ%TfZP6iN3lj{f|iylR5S%#-PgYj2g^(KDw zM#xTmj!D@PQL0>I@@I9;A!)5Kw~in&UrEfjWq;HyW)9$A?~I4MLxk&Nh?LDECe~S2 z9n1~LE_UI2GdZv`+t&Xo&8Zf%hwmNQyVgVdaEktp=X4kKIp&ClMK?szO7tVeh%Hb1DIuK2>9Q|EXzmF4w zC_oedKS#~}IhB1XkTPoZ_hlM3Pl+00sp#aunkG<3Mo;I8K)E3!9n z@Xr%q!<)tXM48loMQe0YeFtYqDOSG?ev)~0|6Yr3iqka*+ z4Ya&4FzFG~oRq;>H4Z84s-M-{{Wq|-f>&nMV~E1cYqx{0MkjBv>}>{rj)5SNXBm46 zL1H)Rk%oxBiDY$R$g@LB3U81kdj(cQ{7$sOS_f_cH4v>uuz>90G6SL>)kKh3Hy*=t z7SB06kB{I1p$s1Ce0+$195D*a0E(bVz2n)Q$?txxA+?SIAFXNpqhTv$`R|EgzJ~4} zW%rsep!-}UqnO*kGxpivPlYgqjbW6Q5ja5f*nmaoITs<|f88X){}tH({+20a&~x_7 z3U`&k>j-5NXjewr9h>~Ir~Iwy5B$GAM_9TGyY-!h<~4dl;6Pd z@Ati1rvJdAiQEVAJcKfq-q+kO`0tqiF!Zjjf80L-{FnDn`c?cE{73vp@musy`KR$) z@*nrl1QY&a!9>&X&-&-y%gmQWLpuqq@kEjliK4y^d2~#+@ZPw2iPkf()q#hS*qeLJ zXS~Z&=^5_@@1hjI9_@Rt_f+7$)NHmIE!w4D48qo8R}90@nZD8rR2?0)z%_Yqf%$>I z&L}5;ekqDp!e`E$sjC~UYqLGIc&5G(o;i2!u_tHGKJoMuPd|6LwyVIyPQ8<0wY*qV zT2OFZyxtn`TwWZ(K>)5yvmVUat0ANzaA)1cQ(j}K2MdLaldL>HP1WmoC!$vO=803@ zi4d+_ZA37~6AN(7Ix*;ig5@a>y1HAM;;mQ zXyGG|41^mx6ST0-T%eGa{(*=y6l^#|b|7j^Y5W>^(7yn?$~&0-xO0PecflzF{sMI@ zlpNl>dm7kK$47$fzK?|V+CVSRvuN*;vFUi8_LI>g9(mEA)~F6quk3IXDa)HKf>!na ze<`!-E%i=Ps7N<-$cBWgjYbfL^umw>s0}D1!t!7r)1@4?9cy{Ex#WPCsJYtZ%BoA?^IuHod78Vy0||2>NfZAc?({-3zn zWykx89e!N4mmRPAlNn%sbXJ;>$ImE=xu=ADJy@7j5UkNjtlqteard2acG$H}pB?rvqC!cL(T|`@ z<2ZJ=9}F#oe^Qg1xakfoX{Ws23Ux4bIwkU01vlXD;6X7$>lzoH^;?6LAI^|t8-v!N zk6~iMAA*cW>s_#>5Xi{PYDWy(Ty9hFf}W``?<{QP;&*NXN-jr%jv0jh4H#)*z#Cph1{5l^ar7aiHWFFLw;zh8*iVrt74mp?Ch04dKe`;&AYj4NL? z)$6GDU_1i}_K3s|BgVE5;W-u`iSO&XP##74;rOUufUna1@%>|Rt|2azBY7h(l;Gz# zkJB{bSKvZ<0=rHQe^g4Y^c^H;xE_5M%JCY7evQNTtkLfS>v|Z<6!g<~q4YLS`gzO< z^WLobd*TPS=rnm``4L!{r^NB`(dd-E7=rno_A7BII*By`9Oz$+H2*%dhZ``_ZP`?e zLpA;&)^}Dc&}X8@bnO?7cRq(*d1(Due2^;+oyf!S!&u)hB2~g}R@RS8`YUk`aFAU; zA*r8_k9F<%m|ujC+OgCDg>&^)^Xjk05BY@yguL<?bf;CmByQWt?Easx7A)!* z^9xw*cXGsGo5q&$7VeZ>wYXy1vbd`c z_sVSs>s`mNELcpp*B;OUe0uL<#8Q#Ag3G}!_I9o8zlj%ApV2DAEe?SCeDx*17XG<# zriDNYpIRIAKXJJ$wY6R^nUEOrK)Mz~6|>EbUHJueQJ{dLc@11{f&bvEtVzp5Qeshh z-ALR6l1XNX&PhwLNs$*?;5XcY6r{e)rnH%e);K9@o=6CzR)5n_q4k_kGDdZm`WOPZ z-1fqnt|&0~G)ri*Rey%Tc?O~(dycVBFnFGUkWrV7$wA9L;rdP+cAS+u%s7SD<*o>2 z@g&lo_dVuhrk0@avZ8)+}5eYxoC zxt;X}X&7ck+tENVjy3ySoTy*u(6W6X$VR1y^IX%)>_P4&zM=)(7{N zy`F`)9Q@$oEXD#J6l3fn2VUfogVBK>T=}#C-Zv}#X4x~|Q6!AuZRjE9B{EndC@lyp zzxYeP$j{&ZgeHNfnNPBt@q!~RRIK!HEg{gRi?>+sZ3be_5IjvgX|*t7x-gd#^ZIOmO6!Vjs zJ`7R+Su{$l>o8d?J+=d6l7l0!<&BkE&~5Z+!+)4?L=#H^2%;j$LSG}Cf?fM45AJr! z#IZKBKbX!k1>f)O8Ot%(j%p~oSI1+=mSWZb;Ff^92tJ?RVcLcVhnTfz2r@97fe{lU z_PV(U;KJdiS(@ONDZ-FMMidO1R*in=I9n0Bku;kzkIRIof&Mgux0KCbH zA2PmfzG-}gml$Z2>YKk|Y(rh&MeY7UfHjgxLcj{XWX@(@gfv&1xWM2XgMkaKnfQV^ zgjtIwI0#J3MRi^L&2W{L$7c%%fcdzC)i;tYm(>^iB88guqMnnUksh7Q>4D*#;^GF4 zZ0W3i6D-ywMxDhKh)RFNJTyB;_myZ5j{!itGMhxeJmMYBy ziovbUSY7wFwQ!eRJFxr%RkUpf+Da0$YXch8rXN za2c;L#C>TkFDyGFF-Y|QPxw%vrdczu;=&nl4*e;t1JDJa?0`tXz?!~w5EtigJI3y} zx2Y2tYyGjS$I&)_SN;W&OzgQ&2?7Jk5Y*GdP-PPmhbk?_ZKV56s;g4}k`;cB0rz!2 z*VH%s`yP%-Myjg+#tPqLuw&`}4I6Rkk0L_;qhJ**-jbe{kaQfEMr?8-YtwyVG|v5{ z!;y>0NfuYMo2?sTE(BBz4c7kaNPyCV@`zSK=!{QSQ57ob)ra&!=@ZZ5$+1J$_d%?mVei>2Ln*4+g}ak$u0xGs-}vX@GyX3H%#jU{#Sv3rY6ZguK=#7k z2fI10hSMuK+NhsH{oM~~$Cr-kn|U9%vx2+1+NwA**f#@M9lIm{s=%g~_Lm^UWYD^=Ywcpngl(^UHl4!133HU3m+oP6~0HN+jZPKz<~So z5#7;4)l`3g_HcK9fMa1_BX_na5hpI~$|3{(Hj@=Dmne;EQSS;&zFO8MyQ+jPc#IKw zsK3YH2Mmrg7~+?qgVh!a=CH0I9e3bhWm#t_q)>El?!c#OyO0@4eZP<&Nqx64F-$q% zarRpFP7y?>fuhjg__zG~#!tC9H}4*Bj~G?A^016-(Hv;;dw6+QgSO0LWT9oriN6!~ysq&a e7Q62%F1P=*09gSQowBDMn3~7;mFz?N&HoKl&(rq+ literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/hornet.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/hornet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bba3ad677337ba02dc0c748fb97006b17310101d GIT binary patch literal 14571 zcmeHOYiu0Xb)MJG&d!p{rzBFA?Xmoj)|Mz!vSY`wl}MH)E3g?ilI?^^uo~_RmrL#L ztnSQ;;%13Drd?5OnI>rqq)h{mY12kPk@QcWK-&}uQWR*5qCrq#3Is^=!zG#)ZGZIP z*kQkO?(B=A61&aM?qcrVxsP*S=brZlgM+q$&*$I%=KRnH73F*M()+RS@&P>IU8|W$8FH;$+?wj3*{9e!YvahML6Xz6fz#Du`@dkZ+adK6k-H(!-H-wU*q~s2i40|Ie z8A(bGpk&k=L&;cDa;Lv{THSl<)D!SM^nO_HIwTpyc;$(az0W%&T#_?uw}F zR*{z!j0oy4xX}Xg1E<~4KkXJT&jmHAGoSy^nP;B6u%!V8UhU`HWnWw< zx)r~u6WruOc{-G*@q~9FiIhdPrQoZ!R8O5yJZ-%5E5&B@s00 zVP1>1S}iu828`ogUXOLJToqXYGDtuaF^Zoz-*Zd^L3FHItqVUAZn@T%4LrXR9-B*+ zA0Ar>Fl;ncUykjP9~HbMn&AZCgu_S-O;hP>b85aR#!*pRiP!)qs_H zM$71)bzPZPJ;O`AmH}p(UivjJYNai2*6zDs#|IJ4v+{vqQHr0d;D5ZE!SKOOUrUfArCAEp1IfZjEy|j0A?H0|4iAE5VPhI zUS-$?M8ouNZ7~qS59>kA<8fsNXQ>=5B%5=1&W(x-d`*rx3x2t@5FP2;sim?PEgb29 z%{xmAEdLywG&R7>5&x1|)!q$J%bv4|QK zGA$jc-BK29Pra&&2coQ}t*WnrYRyx>0Z+j-)MxUV*oP6~TxUAGQ_T~-r(JCTcjtO` zbNBm@AB&?8(PR51LhEOQ2t+)*^M2)x*a-c~yto$w35SwZVS;RX_HG`Q=AyYz*;MaP zRtPrndgXPE(s_Ln%xIjTr}|6xQCo0IKI-%gAG9DMFEU6jVvo|7eZNlZgw~SJ^47)b z+1RcSH!RR*%Z(+@$LGUa92-^Nt;KphSP}iqP!vQqZjpBtTtjbl-u6f`#25S zvE&NxG#Z6_k=SZZwNzu<)^h3uzDHC;HGa_CxAO#&en3^~#ml&vCLF5)sS1VIE)=SP z*Qii_pisEda4Shox=`?fA~1l?T}&g12NSd?`jtu>HF%EqQxis#J;)I{g#$<|#Za|T zP0hVMoD)=2#MAd_U)}IQ6!-GMAIk$r&Q*drxAI&7dVIEA!To5yamu-+@D{$;&vI~n z%w<52XByQxUr>9=S=yJnOQjatO`qkfM@@?XvD|9~c^SJ+Z{Y@ybnBMdU|uFF_->#) zE`e<2uvuFqS=~bV2sw2;;WZ?^qz^X=A{WFhPCbooP?Arrn;>cih+!%+7eNfUoz~Jl z74NuN>nSfij{8HTF|tt)xz)4RiAZL5yi3xUo#9KdMKj_1a#oP&4pA}XMay-+^vUtB zp8Ia|m)t6eL*1nztnGpf|oYDdE<<8ZHKL@Ds;Dg!_(y z(Ji(h(D6--GxZ>hIGbEGF+woag;-q@jJ}=A#^ZhzwIN+e8yAbnoRsQJ)q2xNA5hJ0 zO+%j3f$ir0Tkvjo(nrVBA$`yWKzKHAB%A7r8mWsQW**cPNW=QaLE2WZ!5ex@LoE(t z!$6A^@_I{OG(T>v7%Qn2b0v+^^rD5s0zt%4>C|g6vtqTZMH8^nuWKN2=mE$U3 zm7dpl4uk6kmk+nJjS;|BF?aqg5j1^W+Zb&bp4v)*?4lQ71F<&73nH-ziWL_cwaelp3Stes_XCtkv49in^Yc-h5=f7WQ`93* zm01X^zNiNkHzJk97u5zbLs!fGQo#)|DpQNthQ<(;OO2obK3--Zv0ikeIMb+w(7=3B zXdu*)5G@uE^n6|BsgHsQH zTYzh82Y}Q$b%2rDcx%AW-pZMlHlgl?@?vZcs~Y%oGk8LxRm2Q(_VBQv(pwlo3Wf9uskV<1Xui>A_ntYgvuc#~ET~2)B12G(OjeE3 zz?>`;zZxlf?n^*}Q;vN3N%8=e*xLmBLnOWvbU99!#P1~ky_&#Mp*UZ14twPyDITN+ zV8Zj|5~~>A66?FShji%@bv#T|^r=HCu9pdMtnGQnznN8hKa8AqAau4e0w) zpyZj)v$)~}jR@-04n?C^B%hvoIN?5YRIzsEq(vE``4cpteAC?Jx_d_7|V^QAd&5=;;`q+={MW@m~-joKd)f@xz& zPzs6Zl|cVLVXdeD4=ymK>{mXkUD1HYs-$*wr?A1=)Wfk_7H9F+6~|-yNm7f6q@4T} z0!Yv&l;*t&F1AJQsV6HWB84ZBX}6cqi5ZD`B%q{pA7ky@+2&o|Qi0ABx?|9QDF&L$ zr86&IA_e}^xwDtJp{StsEi0ot&MPBPj#oAUt_Iy}O(D)UsmNbX*{4imy_GD^RKhiQtbXxf2oa~ov4BQX$rz_kQ%c55Y2V6-J-dKKi} z?-?Uq7r`Plt=K3AFy&YtqKsKZ)Lvo-sNcB(nFnVrYe6W?BYmTB{HS!2C5r0cv~Y zQNV^oiMz|%R^FIl{>`kP5gv-w6-+o`$YC8u7d}$apwg?9(7uX*654LDOvw_Gd`j*q zGfj1wcb%?)SfmzRe#LuE%<^k^LMpe_`>?m8+JHI=RtZ+9HFL@SCj9TFB~B0&Vy)s) zO5TGc2I3Z@uEHc9qnhqW_tPs6=OXtyo-l`m)ubHh(Yd#V?-W#bd;a(&YH!aUA+1^S zFcpG85pN_O30ba>M-IkWsU)5TVYuwViVa3NNBRHflWYt83Gqpi0X*2y3w0RL3jz{h z4=auWsFz_>F2cwNOSePSQ+@WsgxeC7M(D#1Mn;8%qt69F1+v4-3qFj7f~<{ba!WH< z5amJl+2`a@_m6c^G!nz8m;-uABJWYor=)}=VeK$|6p5d=Ka?lR!o2+($n@}b(ib&w z^o~-)S|%t7QHz$b3c3IjoUAw2$?%qrtc?uXqYd;;DzQ8xRm%7BodNKLLfdK}_0JUPCewVQNirozLCj^xrE$-Ej+GRs3hYFf zj*WBAJojR(yTzzoc3co+MQ^uSb*Gcxx56%i|FL;9dN=qDB`Zl085?}I+L}_HM#OkA&@DOE#jk#QAb;^KK zlEW|rQk6A^KqImG-M04SQwjeeyk=#SIZ@sgzd!{)Ps!_)yhh1IN`8hCT7&pmN`8)# zbtDjI?xQ?&pA?&vdN@Ic=lm1eutCqkW)5Rh+uva|M=ID@?`9^q@d%VZ+YqPl^3pMMB*p%1Hoxp{cS*PvjP5h*%oEMfavIq25?`oawGtMR1 z@d@Xd`T26O46^Jz3n$xAX&tx#_7Cqm^WgJj9Rh7<2OoRrKI|78A}n9^opX=?JTe8e zo!udKd2%6&>fsZ|j=ADm`RY_4O2^!}@YwO=4?i|F{mAjhA31rc;7G;%d@Zb(AxMzx zmmQ+*t|j;a8*@`o6^{uwSPp6>cr{DMp6!it|?2N@yu7T8F1XmJIlmGt!RoOL&vD-bjyexMNN@|#9v^(LZKELr3zcFa9p5S} zHa?8~95B!|m(aijgC(+%Adq(VF_`o|kr-;=08U&4EHAT@)_1X2@CTQAT+3}ptcd;? zNZh7tLn6N~f$Uou!Mn{6Qn&0(mUgT?+uh7cePDadJMxISn^A_1H1YqocXFqLq+G?K zJ6_^`iSk4jQoqdczmb&Be}3# z!_gqj;&3~}rj1u0V#AxfC-A$sk!g2J+!M~FONUP!ar(@BxMEGeR$v6d0KiwE>jlC{ z4hVn+N4B6F9n_tYa1mjHn-}_#<0I_BA;b6%XZ+D=huoB*Qwt(=heet!hi+}TYdsA6 zmwa@(d*npg3dMS7TVKTWD=zrO%k==WBU=){PB|}z1hl7Osa!#91zqwmsQP`lTJ@`e zSWY4nLQnwe>IF+ON`ixSs3ffWzSlpovoL=3+|80(FUVmm7A6Y9*gN2MyU zf3dRcU_OE{odPuGeV0gI|J2+%h!R1RO$aI$D5?%CZBx#9Imv+81L}ld4~VQC3Z0-< zS?&V{%%q4Fhu!aG@DoYTBR9O!rr1du~@CdDdtmKU&m|Hs8C0XOBe*b6NqfP z2rp87AT9BY7y0w;_akh8OSiW1IXhF#qO$!V_c-Z9tVw?dk~nG72$w?$%DPIyE1uF& z#89NmP!w3|)F`#8i4)L^%uS9~h|+7RMQc@s1^fss;LxGI0K0X&^b5)tVgG(rr8aDi zO%*b=$puLDzpk!i8J-Pzr(wS%9Xbm~;dwwYTUqo>!e7?V2lnk$A=@&M4zw^L^l%Gl zu9ZPL1n3zrJ*nJKqTy(Sr7-9Tutd!$_nk#omj~bjlM?bk?}A#wRudr zpG%C(@cpu!ZYsQn%+30r?5mUNySw)*u?Kbolez1nwVXVp4 zFvi(0$N7wU@Hvt(9q!oY>`>7a_ftAMh(^GfjTiDwL8s6X=QWW=n2kvN#}ALr)RHv+ z*m|Ey2ga@QgzK0~jcMFPI^Df}Ql0JulinrWs|)E=xUUUPZc%4g0in&ao;?5>o(J|Qz!r(`t4{av@1_$gZ6 zz=s>K8dj5tK)VbH40j3vz;Jd&@R>odg6mAj`eTcCq8S9@w`oldh6YEtME;9m1wkL57NjyTrT zk|~Izqx(n%VGk2wiTHg={(zD{q~wn%`3jOa=Z7H~WFb%z*+9$*E=@eVb5U_N3P8gO zEcZZaq3Oo34toBz*e01#u02kdlMKWT82&p!g>c+Ur(s%>l&{hR$i*qXM#&;2T_j}u z+mEt<@Q-jkm$hWj$F{9o>M(*rjO`pya)N^iO$}d;c0yPue2f$XGLBZp4+nI@SBh#k zwhw5=+qqN@EeCMZ4Fh2%tE1L8ORJ{Rn38fiZQ4@az#yu+&zsa+&~gB z8vu!db8}S#JA8(W(9-h_<27QBX`=WwHWk#SR&)qxdLeaXA6mhDJ*oW2Ru7Vid@+4h z5zj{!{xLzpdl-pDGYW&fJ^XW5EZ&~BX`8(5XC-;a$V$(DbZdr{-wF6%{=;!t`9Y6?@96C{r^2=O4vU-0~UW?;3zmao2AoPo`g6UA&L&W zn4ik_n~II18DLtKEXiL@d;1!TLMOF|B%ta({}10w-~NAcil|TPkXR z@PbM5<~9)z17Vw#d9l^i~iGNXI+fVnO)E zG0V4i!4r}A0iN*Rk-%4?Y4;O9Ci+K|2@b7o+m!E!vB0VjF83CfziAKkwfi@FeAjm0 zx9{w0_dWaIuI;{KKeTK4x9vyz`v0Z<-d)@MxqW)Ka^p|585y`=8@ygF5^)u!g~@0-gZdpu|J z-G6IMkpT<++m?IE!3^Af6)BqgSWkQ5mnaToSr8Rr|Edi5F=bE>%NZQ-!=76y7!Ju! zaKd9W#LLV58zlhUWQ<0#o|G+4;$kQvnI|+5H%aI;a74l+;(+uA3gnRkH`GV7qYsTf MHu^sEeS6ja0Y%nB=l}o! literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/hrnet.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/hrnet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36551ed3ae6b27636c76e58614dfa8269def6776 GIT binary patch literal 14713 zcmds8U2q%Mb>3YpfWP|M$M>4ijlfmH2FPJi`HYsSUp~h*R7((IJ1_hCyPl=M{B8ix|l{fCgQbBJzLDy zM~Wl$TrpQ4EspBi>zc4c;-MxIPHa6=9COl}`?d(16e(a+gk9H*<3hiw<3=i2rp^@8M0=T6&cc-3+( zuLsG}g|b&!nMX;y#ol`R(&g(nw{^6XH@S*xu+O73$&OC)Ry&Zz17mMO|n`L+HhbFhoR{jwzxdwxJ7Cm`ky5XiK_?3+rK` z7!?VTe5e&;A|=xJjf;%P;@1)*B8T6^H#Ac#CNV3cfpztU8eO@7#k*QvUU?DSYu4Hx zdSt6Cd#Neyt2gEyuaHdI_*`q)-b&TA>pVZxwsY5!59~!*Zd6w6m9lHk*_giW*xlpd z*xiDCrA=c!e))c7rQBF{xIR=>9jdb1&Uv})o@NSawb8DZR2z{G7UUejJ!7#z^TLRgaUG&>Eaws+%TPrFa_>+eIo zUGu7LrCf7L&9>KSd-l{~vsuF=d}YP)RxsZfLN|wLU#x)(YW9*-_S({M`zt`x(2`)3 ziX-gB2bc#`jOh#)%f7Z`Un$pIXWDKyY7f9#j=fZsm@ckzsHSf9_PuJYMzg7UP_QpK zOJ!8-Qm<~wwi8a~y{flTs@pn3)$f8M$;?U@3_^*GA6X2!7aGuyN>NP z>xxZjDD3qATAAgJ8lg*j znq|vS%UmHO$zTtL$2r^>t#J*q`>WK+_lBCjwp+6{(S?gdE53VEx30_w#)bKh^uVN{ zSJDH*o6;n|BY52V0G{UQzP4^`8h&a^*EK)sYo&;!!aymM0VWMhW=lgV>y51Dgf5H?s$o=UknxfEE9hao+Gy3v z6(>IyMBR4Fkur-;1y)I|Oex3?uTwAr5roaqm$*Ty+UQHAAU!C%g3-sMMPT)KEwG4B zY3YM_cP|FnQoVfJ39GhxVm%mDIZJKVQAe>Gqy}G%Mk6q9IM?3^OzG6x`E+2qPHjmZ zqQOLY2thIw-SQyexwAc4DGviBpCWLSz!3ry0D;wG8%$TCRI1dG19zN;U<=^zp@V<@O>rn|pjkHgHh39?1QubP|ti1LV>%J*gYI`6w2N=`jNj zzJ!TAN*ZJO+Gy_}F7%e9LfTaCL%c-q7xB0s0QkD6`Nq0_J0kZB-PeBr4NGB=iu@?r ziGtsv>-q=C1KwoH7rK3oe6cm)}@Ss5}2Ko%vrJdwaolGZgAhL zYq`Ar%vqap*WbSQ=FO>J==rs=KJwz#^Yio2kfzqg`?zZ;+QmXSi{P{jl3y;@+RkMu zn=&7hpT%H#eI-pMuc(|tDoB8do3d1Qm*q6&FnEzt2LLYN;h9ex7G~OdWEs1q%#YU& zhBH6R@uxF6j#ya;bo;C*eC z^JxpOM)6>CuTt6)`i4aU;ZDp;(1Kyh8uNKeK94rSAqQz3b^ZM}z`D%IdC=yQK@=L; zvLmURL9EA*ecDjd>SCot8$iJcFgi;fO`iI&$6kf zjht?RpTW_(MV#LI{(J0^m0mj#&hpUauaLIf`_SzohleNmd&t2A;TCb*ka6NTdh0;4 zt?C^E(+}ynYRGY4Z|Fkrn3!yEITqu`ohWi+cvp+_$5;$3$8A&I@MD{%7u}3`JoQ+d zIF`onqleH6$xYI$H{w_bZ8fRz>xR&=tkKn!h*B*FwS9ae*)X;uylls~ZbQCHbqh`5 z(7uKCQR1M+eVXqiAj$b%=m#~7=sUi(XJ0I6_WE9Z*)q5du}67~2=}7L%LCsZOB!`2 zHuUZYgqjtwr1hO{|NRK}E#t?wbZ+f1W-a4m#9+_lyi8z`B(2<)kIW!-+kwohmE7u@ z6U3mXRD~17TV)}j1_kC~wd`&ipbg>;rwrZD!$wal-Eyg1@tQI)p?y2sbXLkSfP7YM zc#_^qUZ14Y9D!p5NE%A2Qf&k-NP;}D-f6kzI@C+~EaftLCkNP@YkUS-#T=ayw5NJ! zk9>*H$?&uZ^wl88hZpH~s#-VDZ|~d(Hp;EwaeogW39cvUZt7V*4VcDWFb&!lz~f8% zgTCg^$1KV<9~nQx{>Y>aGm_JDpxli%W})OGlzU|STkMgUc`{AXFn$_)G?p3Dlg8TN z9s4dEpIf0B?0slT>EW&S7f8@%z==T*z#O{}9&I0{`JWqtfQZR_)a!l?07SI{TEY?4(ALaB~ zuD=%DLirSFV2yI!DV1PMD5*;*(&6nX!djBjI$UfhH{dOOEF9j`XP2eF@^MtqQL32I4(}W@RAX)h#4{a zP%mc1Nihe^hT^OnOGUc-Q=6ZS2snMZnw zJ>7;`X<>nGhPaLnUE#Qu@P?MCe*-YzBvxPw2Zuh8;?L;fEI}8ozCHuDX0Jg9p0~=%qx){?G(K$L~a4XnftU1bv)oj8hv*>vD z9H%ifK--?Y@b;}+*DudszWC-O^X%k>^EabH;Um6b21!o?>7Jc+GfSZ3+FBj zwYyk7UB`pXuxAxxlOvOo4m)krkzIQ*EY+5zno04wW5YfOT>isC3#EjN%LIXJvpJFJZVn=PPgj%+Fm6i%$F zAk+9RGY%1N6D8QV>hW;~+sxo149yUX;iybm3CVa`!+OCmTZf1txoX2dfeQ*;INp=z z?J=X)Ze9DoU;^*iwbYjVA2EmW0$je&SFg}xbMD+Zh?Q1TVgZ^^1^UZG8B4NRw?ms+ zfwm76lP*1LIz#m8ls?PW*{Pu%5^#L3&75UB^kbPsK;pZ5RGys)wAft$=P9Q_6~NvlT?U0QRrT*;U0!g;|>=f#l2sMG}-AJs_!~xJR(Yh|se*|t%Bn6x z2l}!mA62#tw!VE4m+`#5ZK|Iy&`&q}Rix$H03R9hOOz&%H@3;Nw#_DvsKUQSnM@c7 z2^Doj-l9yh?`~fOK;2Bu^>Gn%M-f%d49m%{P+>BPJ=QUKld#4%*)O)wClm6 zzz}wClop&RbPUfV_r|&b_ei0W5)nW31MNqKZ`{?~ z=iY#|j^0J|M{xDQVF)*f%0Hxa(q7b03wW?#)lCaH5n!i%jd9i>&OQHz_JMw9A1t}# zx+b?YoF#x3yRmz&YjR_!JUFc3`+^ICLbcGZw8(6bYdhm*k%OQv;IbQ$cd4$CP6p%1 z0YBnrwhUcUr9l&x9^ulX{nDv^>9G!KVTg+qI$|IqSBE@4(WW8T3J5rpQCi3!v+N>Ciw zpcy#ik9GE8=2P97f1{H^J+Xn=9rwrkZ5}`xtx_9)N*u&I|FfTfYb)2;&utzWY!jn8 z?Cdh6VMNS>c4 z|416{`n6*!7Ov-~FPwowjQFKL6i(YGm?Z3A}t`KX4robB;oGsUa%C+tjeC{Y~{9lmU=}BNHM7vlfyz% zth>4;_HCr8-WUtVnP5UigM_2(25hDjp6kdTWcOZR@(901E%2m=-tMmxj$PjGQ;K{v zGEaaUenEsHQ=&W&^)nk}7GbLEE3ZM4`&S}Mrs8)x%ZoV}x=W;)_wG4Cg8M^5Dvmv7 zJ19w?nv?|C8^a6BYf8&Bi#2t>g+y*NZDb>#(X)|p{ct}{ggu85^7@5e>*o`RtZuMx zaVHkO$4C;6m*hU&5Y!;EI(Yb3u^4*3*;5<{-}XaaSC;?bB9Gt7Tk%MP>o z5Z|J^eC}Mt?@wsW5D7sNM_b;t!Udi95Ui=P$ zKOjIlRVXIp23~(i8}fTd4EAQWLR+Nwp>vzm0-7)PInZOi5GMlfC`XWV;2q7 zp}&S`5}cv8HF*u^DMgRy1QC{V-sh+ia?aC9qVS4_&i8%*Lod96&TU{3zz|tYsBqye z6}FJ=)|A{*F8?fg5J&V%Oc)0cF4PTMIMK1-^~I@=@C7=_fycR2Q7txfKha70af);4 z_bA~f(GzP&Pm+7|;-=~ayu9&l=s^~Br4ZOgfd+}LLPc866?qC!odw&E@R4-HIf58$YPz`E8o|Ec(Z6f;_JVsJB;Xm>N4?!Z^tvWw>cf}Yo;1iT6< z=wnbradfBXa#`re3)H{!1i}Mi2V;GR+$}JQOS-p(7$(!8Sb`j+z$fwk=dxyW9Q25J z;RX=ChIiVFP@q2`^gsAslh9d`jVy9a zS?E;}_qw0*O_62}y)H7Hbi)wYPTEg!xKEV%7Xd#$X`j$TS5=Tt zQjBb{wtVL`FXbn`Lm`v7`5;By(6tv1M4yr9SDHeDQdl32MF7+plPv#Tja{~o5m-&c z#d9zWpD!m_$G>t5*Hu1^j!@yF1Sk{>ocz0#exE>+z;yy7I0m?z-1j?66SLE)M+pST1;#afoWcEhz@srEW=QAe+Ir(A5zV zru6D{#I3NLwz_=x)VoF}2JWWFJqpEyf(Lm9iKyh`I%-B--`O6H_`iqax?_HX^mWur z0ltX+>lk-F2QEZi;KI1PgyFH6lXocP6QJ?SKO{i91i0q6DNV;BYgY0hrRY?W-ym?3 z03Bfnmjp{Y@L@YjWRkQDqUv8866&nIOen;=kQ%E56*U>elrc>StU54CjoBfvUPs0+ zi5H$jg4B-i8#A)TICvrlJ{Z@>jT~|k_F)?_&%ZnE{Ufhbs_BmNmDUNTL8pO8j>38A(D#A5fAnpwVY4QuRVXQfniq# z2Z&7Dh}Rp41mK7g_71Hf_yaau{3AuBy7a(a;7iK_3N3?^@y{EgD&Z`J`;&JCWH8-a zrKiL)sRyCW5?9es20l+&lLS~0@OuHb7uf$c2)^KL9Oi;6)-WUGhDH z?hnO`()uATJ@Cp-0oehGO-n~=3C?hw$4VF?4m^6etC5pd9@EEuX<8H^M#3+=v*q<$ zd3`>=PhO>FuMv2Iz?%g4nBWtFmRJ560X{Y8N)cH2w;Nnpya!4;uuS-nNJwgPz!-Cu zq&~{G2!uQO1$t%XqnqSk;^Clk1dJKUFL4ArdTe|gGgaR|Rq7vA=vgoLj1C35jnEgz z#K*>eW`AZl&`(SV3p07rn#>*y^8O3SKeUn!254>tK@Q zD*ge6x49&F!Wti|hr!`q4U~<4^K%P}X7?0Xm_B5k>Fx&Q^6F52u$?G`)d!Mzwy9R0)@)M;c;s99uk%7{Xjc`+96{LjNxr Car`>~ literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/inception_v3.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/inception_v3.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64da4474d880fb6aa12aa05233f3d00c6d177118 GIT binary patch literal 14122 zcmeHONpKw3d7kdsS1jCRn`v2=MJN;i0-|U!A|jnwZ zP%MXXoE+$EPGUJ5n93ozwNryOT zRR8n#e(U!y??PZp6^@OnQ4aZ--aPIZhqqhrpQ2IFQs zJ(rf>OeZ^+m9%s_*U8W2C7fv&I>ou7gtP5Zr#x3iIOpWsl}>f8YNTFGIR&TqPRc2| z#k=Xb8sa6VjCfh%BZya=D&kd%k0M@kMi3v7_!#1&&KTlj5+6r=+}VTp9*OTke6O<) z@qH5C>y|h6-RD>LI|q<@z^&d*-!bV8BS`>bd0%l(JY!y)k(VD`QNxpp1UB8t2gO8OQ!lbnc9`z!Zx^>9tomQfr~z zYuvC}-9~%WX>}Kqc}G#)Z!B5ep6Xb3*RkGoFMV;cRI>1#QH$P52{G2{)*DN9x9he& z>qx8XPn@)_ExA^AwX@(VtJg$ ze!gG7)or2wkI@GfY}xJB9h;NhZ(zF)^mCtm_Qoxn6K%cjFJU5YxVLZhlw%=nwc~ct zXABQHI}PzlP)@8&P)W{qP>3clC`7{ua?15rRky*@A3Zq)GI+UfyfuNUa<9jkhq5 z8%7*58N!?zKN-8&Q`k3-)m-g1WWP5k%WRrgu54DgB3--V*6Ts3UhfDjB3!B0zqV?(!<>A*?(`Z+P6Hqm zM-^mva%G-^tlv|Or670B?Rq_>o@RD6T#JU=ZrAIHZgLhB`>Ki{TtUwQDWo!nIbs@_ z_hpt%^Ik=7DV0_i%YLX zyW@=gZ9wt0+biyKXRJ6?yWVtd9BFq4T#gVK0Bx~2IlzLlyN#tj3Pm`v zGtkD&MBtTZMCe76z@`}L33VssY@M7>?468cJ4rk^glhyUJ*D5ssmHi z2ZjM4Bq-+b@-TXT1F{7O4MO00+R5C@2ms=_WMu~n5|qdSM4|q5?)9N^Wg8Yi@P%zMC=9$e2h_Z8@uUd|)P9klhst9#*pyjLu zMZK4ftsM)>I(U5Tczqx;6V!Ch?Aole&_?E9d^mqr9l$q&;+dO^Z(zd$==baTby-8F zcdP5!9f)qx7bSfntMN|2e})}9Mj8p|70lItlIgS?jFyaa~JTodN6;uRJNHbX`u z%!agLR*XC2iP;Q!jv`}H&yZL#>9d4mNP7;iM+PS>p|+JJ)ZRb*gks4C-hrnO#{TO~ z1h%k)N+_Od(L5Bk?xIMu=m3c?D6A%GQ7sY#0E)u7J`&f2d;tl;b?%L;z>F}#1E z8$43c!!VCo8n3qk5`uO*mR<#XoeW?zD}bC9Kqf?+0AB(*VLJ!#O-ouSf^7k0fPGvu z=jWFTCI)zteyY<<5Gd}wG?a|sfk55_QM~Lhuo35KUMU)nY@R2rFr$^8hw2{}fmdB%k7H3Rf-;e3 zzsRf@%>%P>Vmq1kPf^Na1CdOdNr$qn$grlSNjW!}F5w;I+;h8OKUX2f{@+66BVd1O zBPr)bvTr_?eG74@7)!yWZKUAxMkSJhHRq^m@%PuzE%jBBUni-P%#+w83nawl{t}2Z ztoh1L(lAzdE|iA#Vn3SK{5{(DXfaCg8t`8E!~74)!{;88?_5KJzsr_mzB42bgYP`4 zSZYVUqb?zLA}P6^?@O-dKVeP*+<&x*HD(L{ORW$6qr0=@`o~y+?Il-8pRu_?gJ>wZ z(zpQ0H3ZG56hI%P#Jqr-Wr28Ya4>n_5~tDNz`sZiW^3BufE1@D@@-iFctjf)6wPOS z0DeHtrZ4WQ*O~hzl3yhu3@cuMsIQQ`0Wwh;!rTZ{A=~!hmXVpDs#Q!JD#;s&4&}}c z7Jgo%DU~~Y7^!}42bcefbngfV1>i?1{~9<4gyHkM0V3IT42VRYAwV>tyi+X3>LH{R z|GqFBg|e|(Kr8;xM5ykIfSlhbfT@bIuVJpJ7V#amMA9NzCb>bF3jX$@@?2=Y zktu&PjQtpmc;g^c;^ifT5x|n_{Q1X(FN)0n%obz#+A22hQ!KRwzBtQ#@DM>ng_k=uM-+Le@G!*WH0v6kl|dKjkgkjCr% z6Ugou7@AxoywfIEtZM=liGdVyv97>7ZD19V7I&!W7*C0SWt4(?6_aj?L8RT% zTFEajS6CB&RWZ$01&M2k_G<&^*~H-b=&7jaO=uZbKont-P&pX_o=A8psuC4siPjHd z+@&BZs5~(;gsfWPAs!@*b@ds0Ahys~rh}2BNv}R@4<{^aC!?yJptX!HdNh2*(7ILX z+?w(z3G~8lfXjJ{0r$Iz3;}KgvtnsQ9xUkhjYabc{_3bMda8OPF*5g8MbSp1zD9D9N{ z)NS~9oP{KOJ0{#CXlBE)zDIWn=&I095neQ#5N+HnTqCk@B+0oMI8apdZ+Kx>b!r0f zBVwf;4V@>(obdsO-xE~&hUe8adkj}1^k|3^L%#`_e&hn)UR`UoTeiAw$qiSl*@Bf^ z+$%y;goa{5b) zU*G$;um0PecV6C12N@sNyg^pJ9i-8*Ag3ErW7yOB?y;O$&V(wE<<<`$hAGSl8=OaA z*t)YU&KFPw=xCAgv%&M#Y&Z3DKN`*g(s1KTJ$g>~~jYipst z6bkhx@G;G3D55`jn^ECG&ojynZH8QmsRJTB?ayMh2ul4+IejgdrTxhpoo8j_m~Z*NHTvC{`6IT9--Pcrm0NSy<$Ay7&atV`(%XvVy=cVKv-Q5s7^B}PqgM79R}h$o_h0BAN58-7T+!L@mX^!T zxLZL9gbt-ho93YuWcmZnK>&VLyya@}nmH7dA^>uAVY1QdOs(7&hnuOJ^3f@H0WM5+Y=AZ zs{ub9d}6hWyMGzw*{kQR+2_u+Z6a!zA5GSPuV4c1cCh^Sxz~VG$g%0^={`RL$E~+} z7!f>M7T^c~x4$nfVdB}Hp5=D!g*Mm&roYdcwGcaLz1FlY+HKE0I=~GE%Z5#d-iNnZ z?Y6bxihtnbK&Pakey<$WTR|P`NVDC;HTg;Fe3+mxsKJ`klgIn@KB|jL+?T{-!t#BsdFEWF>*6O?5P3lb;mDj$w(P>OpOop|;sb`(i?N34obB@d$wPuf6GcUd< zWMZH^eK=29bVoZvmVN4(_|RUTedZ_~StE(-6t>}D-KaBYV6b;b(CT1OZdz;)IICih zpm+!>aG3=Jc!Oo15*OOx-D}~1J1vj9+p!jITaNZ#?e8Q`A^aCz%x(BJR-t3-z<^g2 zyzq{o8q5z3rD&S1kWv&{^&egt{v;-YY_)DK@)9zx0|p{TO>{df8N-OFkfv7mr+CC5e4 za3w+$yfAMve2e*M93Mq#CMdC=dWM302q5B?s>tN84BYb#NP^eEga;)-7ywgs)7R3V zcD>aO3^YVA6*>}1WKn`TlV-14LZs`0uE92nC;9%qD3{8DGH#KyzezJb_&iIu~=0}f=d`oVQm zeak=OjI0}X+2e!$6V8aV`zim^&K?PU2BEzYI_&2+e$Lq^@~(xxKM6^Azc?a)7CBEj zd!eJVmCuP$=0GU@(!%n<0uh%DZKXD}H^wYeMdl6NGfuKBg=B4VG1LqbsP?uj5Vg*% zuY*h&iWhbdHb7b_ENBZtR7$-iS%I-K@pQsk^V>+(GILeqbp4oJhYdW4qBBF$1?Ki` z+(DsZuhB8?SX^Uvp-z9isDbBo15vpd@rnuxms@MDQ$M~ChZ=Fn5hsNx`hqQYM{zW+ z@j_hV1t-pR;kdWDc1ifQh=yXhjj~VJVhM<&aa$)E>YJE}AaCDX{3{7%2)^5}U+c@b z3gYzMPVk69ZDPR5517$cB)A_QSCK$D?f6190xBJD0+I>^O;AxX1!TzXF&;*rrk+82lR>yUq7;~?*yU;yLy z4~6--&1mw!ZPMa=ctszdCloIy)$ft~KFJ@D{2|GAN&bk0*I(*SNWMq%F3I;vc&V-a zjO5Qrxa2`WT=nU)`WH<5OOg$e`y_uwBD+HL9V#Gt>!kb6K|vajoX~w_!%!*uhiJo; zvZ8uX{o3X)19eEQJeUV1jS5=9On6IOpN&jsu+1J}7k1Lm@Q)GP?^%&2w^i1-zjo2s zzQrQ!I(0f|Y^M1P+>VSExAf2de{;y{pt zV8lrkg;74r3B+tuf}8Ai1$5>LolfKCWCKwu&q1vN)o2m66D`Imz+EcXlaTI|yV!yY z@oh!>2CnMxpHuZnC{P8Y3Hk{r3G4{i2xtgY2!;eBx)oQ2|9c=^POP8BcDRT^;U)dw m38yg|T7eyfg|mXAjX(U3rbn&OFN{7rdUkYb^lWZ=zwtlc+FW=5 literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/lenet.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/lenet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..480c50be05841b28609335fe2214685500393316 GIT binary patch literal 1517 zcmaJ>&2QT_6elTJmK7&Wx1qx}Y~X+$>Y}yL6uAUN(T@$gblCvsqzlW4w9Q18T$0Kc zyi*GF)N^;VkKMm9V1I?#2GT+>c5Uk(c|2f!cAoQ0L z))(l|W7x(Kfdo#`7{?epo}^^##UAjU@Kb->h#MG5Zx+nak4Q8`@Cu1QHZHwO95)5- zqfT=HQP6$|m%&dzfB(bt{sMyxV*4pK@+nV_5A#eW&S`!5wP5Z5BT^8JRArOG(m}3i<#lhDDnmJau=B^;2Rn4A`MF7W3Vvfgl=ZnM zdT92l2rf!ij9HR$W2B+CRc1STPIV|X%?EU4a0x124OGId%Cqjr0IO1j9PlEw^n|Ac zBsBEi{k!yZq>>RWTyP8Jt~#LHTszO{Kyq7XN#&Wuszu3cOVv7JVlpeM&<~kDe%m*{;XmBqd;ImU`|;0Z+y@-=Loc+o7fv% z*@-KpF@mo`L9Ay6NH3&_~m*RJeE z;E2O`hm;$~QfD${MqS7VMTH!!y zDA6}E9q5~o>K!gyY>6bs-l4yDdpgf;Z#g`;{0|3rZgQUE)8?|-eb5!Z^!2Jqjzq-(luRn)lReSmi;L4_fE0Y(pL-dzU#%KC#=TVVwDi3%n zr(BEgKrkVYh(s9rAQxM8t+fNp(9OEjbTi8MSi+1V#>TlQQpdL$ds*Sme##D)9xYEoS- zhYH|!t{vFQ%|rMK*=knWU1@n^=^}Mqa20f|hN}w+A1+&RA09I9Z*Su^Y2(1Vh5rTR C*M)2V literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/levit.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/levit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9020dc1d246a2c7a8d2cc7a81e88cf1d45f11ed GIT binary patch literal 14302 zcmbt*S#V_6dEVXocB9c~EQ7&pWNSv$OtVH9&QKc}h89%V;GKL_3E>4JghH-7k?^%I{kjQ_^S_~Rn-alFxzX&8YK zm@Q*f|C+0&#I3fyYMV^6T29+tb=%&mCvm%#Yx}Fdgq>EtU05x&r&g!h#nqyuxvlAT zX|*I_uQk&yua*(c1%7L`J-0e%8lNzNd{B7J2nu0g&t9EJd@3j+UX=I(;?qG1@sh-k zAwCn75id)85%Jkz4)HmOA4hyXSU`Lsw6}_=aS18Mf<>e(N(oFsa6DK-d}&bPWLWN> z+GkHr1SgSxGA!-cd*HDmQmV7_Ej&m3^hR;pIIxVF=51+{u3Zr-fN&2FdG zs^1P(T3p|W!dj!-xmjE5RL#_XR&}>OTaP!Zc3OD49)+Lo20N`VE$H7Dn^Bw=UJ7sQ zgq^rqZ{dp~OFdn0Twm*ULd3mKf9BceUVQ1mLd%Gjb;F&mYU?H&?9%un3yp;6S&-1+ z@~v8d8CZeMzt9exz`bJz9zr=CTF-yQST_Sd$iJ3fb%H`Lg(16NF&tyn!?25K>65qD zR5QrBbzq}v#fA>RE=KBo@}*nNX!EX_I`Qr8a3!^#UTI7rCwrz5V4UAY{{|n*Gl7#FQK-@saN)c~CFT?HTy><9u95j4dqQ zp82ft#gF!;;wi?HsT0WGa^%-a?8Hgj#7pvlcgOD)k>mG^oR4H`!B{ks;=W}X$y8QX z;4c~14Rt*$mrMojf)V(~Q8u1Va(gDr-}q2qV7~Gzm!CoEP)oIxy3x*d zs8kWL)UVZ~xmsFkcD8rW?q>v3bV7Pi;IAazx} zvw^O(!cMyI3^=*l+98KO%`z+CyR=Xbf?8V`SqWQCr%n{M*3-PyPz#!Eb)1zik(?ko zNpcD#%|T{lTHePb>B9ok&1RY#$PH#MU#m4*^(d;A}e(WO>+vl%aM-&PNx+F88Ohd}107i{lv!SN0~+cSM@&azA&f7aNa_ts)z z&Rj4((>b(Wb^iD6@kzE{&Sts6QD+asNk`%;N}TDyDDk8>SK45PdTl34V?$@s22v*n=Xm_AIE6oCvVJ@_f}B zv2dD$SO(3`#uytrYt4F8_0<`)q`J61I`0Vq;I^m8B|8Ib&R`7tMdGoZmfjFY$d3T+HIMx-_ApGb5|QGf#^}zTxdm~` zn&&c0AMTnmuSYtNIyA)p7=blkYB8k=dZ z)2(f&dZ5@X^%%)$)FUY(%YGbhvx7D~cSc4X>j<#V|P^ zdK-p1hMg>^FxsqdhqZciUvb#N7XxuvqIy4mw9ogF1g^}m%ilo+0L0h=bOMFG0=16= zte+F= zUx=r+2$P(x>2uX2h2(YBm^DFC3_CuGv!&-BZjz(L&IVjR<|_(Wn0pX)&rCB}2-mS2?u^FdRYNykI3a;I(tJDs+BfSjdy0n<(wZnQx zeHuw>g+!Py|Jk~Nt&9Pco?~G*Y;VW6(;`*xfX$+GPK6tQ;-RW-XQfjosK@oxZFIM9 z*Q)Nobl*zNwY0Pj4AWJ&Yu914HzMipm?{tjO&;swji^RWTT6fybEbFbJHF|??Klg7 z7$vI=-u2Agg?DfQw32!jACA>JQZSS*DvyYo(Ki+?-xZ~jr6YCpWTGzoo+*_;n_=t- zpiE>x0A=Ec*>$lv02rSOqhHLVX2~pByR+F)--FH&yNICx-wkZyzImJiHfES=90EIZUq$fq z>;(q~CNOu)!}bUA548A2WZw0TS|xIl7^M5#9?svF@rs`D4RDyRm_~M)c&JzSL{w%& zwt%I9y#YgDd+4r%A+RYNV71Wgd%$SZlE&S79!M>x_vvGZt(4}UqDJI~K^;TgidIk& zRf_33C?V9yfn*PX9OLAt=u^nj--Gx zW!>t{Bs0m>O;f#!`litPsbuB}Y(4wO89HH5U*5QOAV;&@mvte)|$a9QT0I$XUQ#E z3iF+vc5O4P2NAZLjXE3`x$EI=ZtzmKvst?yq;|6tr)H2^!6x>FaonNsHM{B)s3Wy+ zT)!zIE89%jE2hr2>Tab@i=K~s6LK1NyV$4ZkR0q_$8wI1zhAztuSKa>-`);8f#`1a z5|iECP<5lUJUM_6Tiu}&U)xzFlV@?)rKZBGuugqjrqascp zM{^@_8W?0#&BqX`08Fmt6XTi_ST8|Lwp@6Ktk($uJOT%>m*iCK-4paCY=o6~Gpu0S zA6B~S^4nTYt@_$ZH8*0L)Rhf-n%k^Lm@K7UM;FvBl68=53+%9(qqFogDDZFaM%+x6 z?Gk3-q_sOU8nIM8F@L>6619f+XQv}*%21K>#rOf;*mWu zEX6;;m^C=%R0u*ncpLfEIS4EfsUiqbyCmdwq;9vGa5gca#e^Knw0Gh#&0~5y(RLTm zW0M)aOgDGE!{w9JOmk6pJ#N=uNpp}EX`{{Bwn3@|ZIG|90`qD0CHZ|uI+2<;QuBIh z-b@{EMrRD#2wWKI(E>bq_0o46vH4b-T2y`sExdbf62tL?(b)twGaz&}=jNRweTHA11%WtZ&b3UuDI#zlXWKPBCR=?2rPXhf z{0_wV^tZD@GEEQKk-}YAENMbPBE>4cj969`9w5o z87Byg0^EAd2mS`&n!w*~nty3$EvmP-TjBk2H{swr)?ToZsBU0wJrl0h&ig2?SC=SO zB_J7m9wyvty$(=tu+^sB+MOGA?cOU!{v4|0tB6dfWD_cxDp0=;)a%%OHK6jeU$?0; zb!Z*vi6#2LcLDfW6e3Szuy6P|j=)uS@T2t`hooK3w6z&?QWdFR0ZGl8`c+1#U^E#j zp8tU)ValSpV4a2Hp;8$QG-HOf;g?d!h#5G!-(?+|8H!ffRZM#aGjMkJL@$D5%s}7D z5O-TZ-Z*;se?ZL&=Om@L_B#1L3f>0O4>bEE!4eB+h$|W07k-n}PjG=U9pOu_B0pxT?N0%II04h*@@52V*O#f_B#s=VAQc9*LHUKKbq6#h;g9+Nxm zwwA;PV)`{YApTX?Nm?Xg<2wkYrD$iZHU^Z%T38cEUzE+S@iQuLfd*PKMMiHhO_4+- z=SZj=RZOx&@*K%aBsWO}fxN=dZ4$zn>M@PwJ@^i}AIZ_7b01tk*0zjFY|}qQC94IF zMFV^RE358G4N2-%HuKXYUnbcD!5J5h>eOqDi{y(A5Ea7d`ySqC6=WRW^DJo&S`!E# z_}x3ylqXP2z&1qvz%+;c818q>v-m@rCFuYAUTzHipBmzvp@a{W@c;id02TuR1uqS~ zhX{?f0X)=%#(f>5%fP;{&Ceh~eT9TsV>t1jF?wW@SomQm7Y*;N0lZ(j$B^EM^Yi$Y zs`VMX(fvy%zE3bAwL!lGF0~%ck1;j- z&P3T9G>+RZj3IxV9iv=Pt%6PuC$cm|{xftvB>q-;fcz0oJRtwV0Qt{~nKQQL2FQPY z4A%TChFx9I_U?BP9)WuGRYr%9Ui|{2B7NUx=xZc|^ol5UY?Wlueu-%`m|DUG#p|Og zH8hOEOJ3=n z-vu*oxVV*3<|P+gadCOM<&GJ8ywbvJEb9e<3%t0Jhf7XhSn-21!2>{*xY^-A%|U=4 zOx+j5&o{5uzX?f_P$fGwUoI3Xc%G_!_V!gCbyb>#o^81C5N^kt_Z-HkA~|M= zj@C3BM6w7kIk{yoZUa5Dp9r>KimnlA_?Dcn2~xSOUqF50)#>hV`9R zT#37t%O4qjj%gZrPP3LiN70j^HUk`Y99cik`OSK3CuGlub3Ck$OR49Z?X9!Hh%H_j zt@d!c{vH}c8`u+PHBPj0pV|kPd@2_Pl0-gibUW+K4GwAKOQ@{xba00bTsi6)J3n6S zwQH>80*?m&;dy=2?0ogwwb2KgayhM*X|u`&c&c0VHJSaVDp{n`UE6{Xa~k9hov0JN zFd`un!_v2#CMS=Q{ht)ts@u5QR27=x`WSF6fJW(0B=!iR|+kxs0V1#I4O{ zCWG%X_zzybSmCiImJd^iD^XAjlVS@lZ*^~1EJqt30`2d}~?AHb(&9J~qwZ^a@Lhy!I`iv-$leLaX-_=KlP z>JKwgb<)B}H);nc&YGzeQa@93gLCxFG5e zB;laK@W09kD*Zi%S^9u?S&+c6`qv;$?zEA>FAoT9)t`b?VdQBuR&7v_$F8I{ti`Gg zLvipXOUbxo$TN(}Sm_0MUbc2C4R$_zjw8T6q9r5x@9040DuU}}O5eDoi!g?ZP<;>% z7I$A9Ex6oaY~eQJO#}V{_U5oUg>40H`6QkW_xq+0?gwmZwklRe+zE{zbOU9B?IlxB+~jY zUWR95I`IPR!-E;zEL}+Eacs9BS05I(PT`2q)%7qvJ{_My4cLPOf zd^OE*{6I1*efmJ}*x;K7rPVpOay_&%dxvY5eJQnn%~amI)qmqne6fi3E6My<^)chG zVa@#beR~$ZJ(>6$X3y`OlC-mu_E3C|z7(XECG{LrqYq>C=CFFm0iVo&18(LooAGhR zF^g5sVs9~7#E1>l`vO{>O%~zz@srXMxa7^>6Tqjoaot4zPcfe*qk0D;2fr*Ni>UXQ zuJ>!^jmPkXQG0)|2>MWx$2V|f_CG+!HxE4J(DV3;ng`SGx=#_%?VhMKqe`a>qYWFX zO`$-&sPd$PV)_J-dnepJp=Jf|Surd?TgZp{tq(vU^`rjsVr2upP=7RR^iLT2V-o0nn!gHL{+v=>rDj>pz2<}tp|n6aDE2N=b(TEK)Txp4DN-~Cygv;@ z7U2RiJ_vdJBei#%+p_7?4kg(MzsxG$AUQ|UAjy%?&4r^&A~r?ul)jlcu;aX*L=&5q zwLz`L-I`9R9@ocRf5lJy3@fIE5g62K(nr25((23+tfKxsv;G5#jE~#bv=l}WuTa!m zH3EL|)vJeDMkY%5=Fb=1NBX2xafcQ%278Am+TH0F8Xr>=86T& zhZjm;M=IdNbqO!MPmYBfGKWRG2;UX|JhP0OjQYkj-Q(E5<1~)vVA#v!51y>SpZ8V) zC)_;AW*+F<&SBA!(6Edy>wMqIS#LRqg~E7Q>(x28h?aOZ#IJDb0M9`IPWiJ)@o^H4 zKjs|nmSlrAbb;Of4(Vx(70Sg>`VJCI&mI&EP85uqRvOmIRorRiI;1o5FC4>rV3B$n zsYiVr{}E~ble6d}fcxSYFV035b$^bs$;63P9v_hBp1~XOTmlL#JJG`F2o90qdV<5s z+_!pgK=drATimG=h585-Di5}Kh9}p{w_F@r!>QHF!5t5e-A#B-Va(}_hp|j}110;- z;j9JjrhcBsO;W$pFTm*I=5rs4n5O2&F}T`coZ&2RSH><}W1JdgRi{M&)sHjbYb4(x z`FA9DNQ9fkne=ZN{RNP;0N@ulH+H%^kru_jM(W@;w78g32RbGa2FLL<1zFB(go(xJ zl-ek8bOq>T`afI&DngzXJ9stMoCZG^OzTzO+MU)bGH@bQr;gmIN^`YZ&~4y8{9mFL zeeahjN;|~4$Uow2m|E~T_TqC0_y(lrya$d2NBqFcy~ogZt#eu)C{5X4>F@kT*;A~4 zf~{>NP~{2xJdOa_XWxLeT?BKOU>6*)$=!~{7*1Rs2lwrLW6#F&I{GReLt-i6(q%PO zH!bm>1_E3~$3Hc}M3#q?hNjvrQ&Iqf@vhr^TxFadwEwh-AOW6l5 zfTx_Rm9Zdjgj>!JbVUzn|NnC+h>HgFIW2cOa#d*6Z?_vamm7%UV?%Mkt20^JGX4#z zaXr%a0!wY|ST=B{cv-eL%4K;@jwtoj>gXRvbb)9&+afZA|I^B%ic*};kCoR~iPUXH zaZO$P157K@_m+h{9%3V(AQ6+&U`XuBK11IE8M}?7XH1q-20zBh!`VvxCv>qVDQppy oJ#?*{0pLi|EFy&S0K0s-e6IXpd1mhUa(S-ot<0Immx>?%zs-feu>b%7 literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/mixmim.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/mixmim.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1ac41ef08b62a9a70c6f79cd833fbe77f782260 GIT binary patch literal 15199 zcmeHOTZ|mpS+1(C?yj!BOiz0}`H{P{-V~;PJEMf6l@ohJo*t1U7n@u_iH8p*z zXL_bD<5SgRd*~q-8zn(83K9}Q0cj=)LW>XrqzDi^ARYn_NWAe-LMT!mV30s0t3*ji zcKH7PRCjfc?Ij5tL?pUZr_a64`OoG5{!4d@MO(w?3qSl?WBsb8eS;FCpEMHh#uNMt z0-*`LrM2~%-ZpARJ5@{RlxMWenu*_3E8Wi2GMs0&vTdtoaXj71we6bS&e!ss&a?{c zVy(#WY|CtyY9)?at%-KIR_1uFHQBDzDu~-6-`dljs!g@`*7hP@5XIJXd!{zS@ltDF zdw*@euDzg%2~obQiLyU+CsjLu^rWaDUGeuLeGutAVhZUgzlii9zq0wr7PZH(*~VRMr7l+zI3_0;ESlh@vFXEY<3n=&%WRV{?)GNwS1(EPKWa9OMZQ2 zwcG53NM)|Rbm{Vym$wZXh7sjnZFWTW=GB+aN4c=;o(X%aEq`05LOm)_Zx_7!%0jn8 z{mopLUMFZ^V7|QEsdt4huh0PNG^f#z87G2g5b?E|F0`5<^ja!s_f2r9Z8iS}g-g<)ghHy2rqxDD(^QG0ikxBDtZh5^ zT3Yw(qpNqSKabhn^rV=dcfzjI z_WjTamwZS1H+oI!w=o2#+i>F7i=G78x$Ua-L+Ld;NXLVCu?dDa*U?n)AG8a*$$jzv~bTl*D;KS?}a_-JIMDt0S2y`QUle$8#LE_ z=U5ASD{ivnHy4*e$LkO^1jvEm(~MO8q{`G*86(j=%@N-Ki$vtvwchG_xSe4 z88_ytR%9o8J<5-4<0v=WlaV#pWE4}oAj%EbB+3oR8CipA<8n&}y;WaEmg{n+Tg^pg z;I|rbF9s*4DVU*P9|ijnM5SS$yv|W+*fFQ&Ayj%zk8-YCZ+SuBy02;9f99m@cEgkH z_TUZ}i{g@O1t%BcOEfsyZr*A)+q0{;BZNo-B zm06oPq85jdt%a$>vA=lU!he1XDa!I4eEOLv8+03C+q)$XU^0LkJ4Sly2$9PKh4F&SNZfY8SG09x88TYu!i$1r|>HGO?f(h=rh?lpa`nx!kJ)z;jt+*uXx}sZmUA_q9C7M$)zE|pgtA&DO#*b6~G;O&DK?P4hfu&&@2M6~? z>ARIFF0Z3#?30gE!7n2+ipTiiMtCgZoFDBUgAEI+3(GH|YqIOAi)^T5D_jrCfAaa|EdVLHkydR}OC>nq;h<6Nk; z!x^*AF5t94)keY6TY;9Eoz-6G;z)H{Jvt)JF$TV1=r2+U@ZPY5@lrEz7>XzCambrS z@!6Z;rQ<^sGQ#41BBa)CF$igPN8fz)$~6KXG!>w<=bxV&A8Dih5M0-4c6?8&$pwtg z942_ue872s>hNxy)y5zXD{}3vgGA%pJ$VK4m2mN6qBsVGs}Rd}p~4{S)|b2>Y}SEC zR{^?zWL(Hn-w?E6_7@Xu<`9Uc3JG%CwJCR_!TEW@bjRr4e(va%xuezj`D9@SKV zhuc})GBHFfzngj9b-o-U^M%V-UIBhGnUfe-=H?htMHxs>J=v+3Ms6*N6T2k78GHr7 z5KX2+1K>rU((dSetDgfqCR~~C7y3nD%e)Tg)7RV-z=7tPK%SbL27}hz3_%_@8(M%N zCfaJtnH620Nz!g6%=PtD9q*hG%zOc}@skJ*Hw3oH8xLMi) zuu-Y1IakdqcuB}InrisIh#7^8Ez#)!4Mw@4AR!&9RT|$%QI;+j99agYQP!(7j*qOt z=3t^#GY^EZ)S-Ng0^&*4vV4T%gf*iS1W9fam3QBN3I`J+j&j5MB}%oM9cCzUf;zcC zL52cC`H}UaphFYhj^{(?pju+4uSQm*stm9hYJY^1AEbcsHjmMvW|GIf#w8fei+1YN7;~ngkw%DPUD&D+BzU z-XwfY$dy=ab}O@KZVl=is6!Y%4d!UgDRfxH>lh-+Cn?6T@hD>3q&2Ii<%cPkga>(s zf)f-xji8!|vWmY1k^Y`Ye^ntm{epa!sxU!^RE=fMACR0#?Q4g*OMCOTpO z{Tvu2)Odrsfy#pq9r_`;XqiDQ?V|wcBO(ZiA36x56QTGw4f3GoCLnx*ML?buga8O4 zo4UxNwhnl0QhJ0$1fX|ON4k_pw5ysAq)t|mfwv=pB_29=fhcF2sBwmC|_<1 zGIe1I%Kk;J+Gup5G+m-+a7mMJ!rLKMB#@jjvL9%BMEVup7m*b#b!AxZg|*x*cW}N6 zj3(mLu3f`${;$zRK>Nzp3oM6Vo1`FZ?38gak+f0L%Q`)jGEGy@?ARui+cz=u8>R_ zrrIT{@m>m!41>oh_5=m*L(s$%97ixNa?s^!@g-}%UMU){8^zM=#wlFQSzE7I+>!`6 z_MwAIpWssn9w3ZBx?dy_1UDr_5C|k9+sKKWpB6SAh$Etar--M-f{8UInoOW}iA9sL zn1pDO6Vsw1_CN%&#e|=47-CB7y=#sLBr^kngmu~8S3$E~B8Z~D0?AM!FE#5+PTRXp zdb1EZ-vG)(T%cZSpciHY4YlU~4M{~I&=Dv=0R(jd_=68!(3B<)5_}#(EP0h6UForOMXFbl zu}Hcy`$dH5FteFuNyvf}1&ik;?PJp$6JYYzrX7OGqs7n$z*5@;fOHaL zz)IeWkmCTANxnm=d5_+Jl9yo*9>f3;UelEjWGB-bnHP&150Wxiz#udY3v9mGNkp;& zHqS7{atQR7L(ysEB@&IJ()bQh@{q0Uu58!Ms2v!kp_X0rql~w@>URY5!^loHHN-Q9 zA#9IeOheuO$P@V`s=P)4Eo7uOA0#%VfiHu1p}aIP5RU9r0V2{sFiL(Y&-_ki$0|}D zr3yc^(*se7*<&Iq(e4<0(umM=5l?Uh0ajhZ`d2ovdds?;fW(d!?HhNXB)>ts265~r z06wH(hTxSy0x(D=kj}S^kD?4h6~RG+R(l1#Lh!kTA2nqH(UK`ZghSB~&~e=;wa|ik zc~ptYV1}U186pWGcpH-}=_M$7D}>-4)NS zRi33ukRVPvYT!qp$MtE2RMZ5Ls&4`oWfwgtagr8v7hqIc=~NI(`3Wk0C`7!0(rrAv z(HJ5=rnZ$il)%kzA!i3N`5Q(GYIxK5fsr=;vr;m?Wt2+avQJFgl6tA*8T$-$Lc!OO zJqj0_&_{2QP?K6T02wur{*sO`^Cf`B-%f2AzY7qFa!ce;ZV$`zyUOA9f%4L@d?G2Y z!4M_@Bk@#Fz6T{!zIg}lCEpH!(;5Em6Z;z_aX{(F#X+Q$Zv2pVgmmNLu%BDD0X!cS z4uEG~JSiUc3*v2lp<#+6;)%QIT9LKmCG44_(WF|#5myWEEM)5#gANRd190hFyM3MP zF-~*fZb1C+S!X`RqR-MVJR!*be-#oAqA#|+MW5LfE-W7k;6+v}UT(k}q6rDh`Cv1E zmBRS|HkltFL#dUoKu=8W4czkVQW&lV&z?N#$y?2}*{)nX=`92&&zw2+&e_vXpMLu1 z&ds|Ig1;UO^%(Y((`R!t6rAzIsH7SCb@Fe34WL0x*g-|SF;ShJ@mhTZ%0GoJcpdifmtB|`LaT_Kx=+ae7 zZgbpO?gd!M*gpsB-(%+-a@rZ#N5*DN0I>}q1~k@gvz3If;@Bug33KMm$g~(YK6GLV zRkGs^sIOAIaqiy|qi;C<%n02QBr-~PtU2v3HSK}+c-%|n>7T^(m4V}@YD*a^^b`5&Ei_yGcp}`x1i>H2 z`1Pi1i-;|We`IyE7(bey-gC(wu2zXWl=_5wdi147c*6IqTeI?pSU+5J)*IR=o)hhSXr8_@ZMS_mwC$5=M{vE&v4U5Dl! zL!N+YHVq9s)U6?GTg%xy`kEFz`Ch`YjA945K(erueTpzS)X|XFG}r3vUxJg(m-USa z?k|V_&h{tJLcgQ0=)pzwlI~BSrNJ%H)+LLZB{JNetZPu|aXim)i{k}2;Ak$~bC8!; z_BC=ga^Zf1a1v6X=2lqJ+{3cx6s&%lyBD%1DaFaf$cB7rhB=WJ1z@w{z3fK0U*4SV zTO?nGz-mpcU;c{zYc!&LXt8g3KVk<&Nle_;!h@TKM7f_2A5rh$N?juRqUIhBAKi3V zu6-*YfHyEp!wLS7jU^jQAHbfn=_^ z$I;>hs5UEToX7FlTPeoHq!~CFp2CQ<<)e2UAR%Ew<_S5DYkQl2( zYFfUiKm(0Ew}U8q34X$OV{G*I8WP3_1g$IA;HUIqD;x#x@pBAwEuPv@38s(`A z9-I&Ffoqtb+q6%a$a%5tcKbNN_NgWfJ1c9o1Sd{=jJCIxwSztfEr4tC3w_(dD+f-{LKTd5P zr+`%&tcxM5{>N#Xe*uxoq-|I*hQag<_dUzl2hD-GlYui{0p>IM9jLG9mu&k`R^g$q zrDwip=gRO2$QTvFdc!tLtiCFvKa`Q)VaH05p$e)J{!X#~fVE@p7^$*>kaGV$vvXi? z84iGO24u6G^~SV0Fo2H;F}1sL%$VXs`dT%-&C;o+dYj7J-BZQYR6h;H4ob;EJ z#ws}xUkw8vvs3cyJWIWOjH5h!<>WnsubFi9k){feb%DO`VO(}Fn2Kxba{2KY= z3pKq?e?s~QR0zj`3&M}7;oe68QfblHV<}l(!y{eKbwfY-Lk;(f+KncbH6|gj2XzpD zC&#)H=h*x_cjPjWpF1@_Up>xxB~%>KC&2xGcQ?I-0DluA4Vv=!-Q8Kjuuhytzr*fW z62V;!gAl<|KG;Dg&Ktz=WYv|C>PpoB5;M*+sttEsZ>90n-F@lNq0w*4E!~|6yMALv^hyGL4{* zK^F>q(1jD}vr4>4v9>p4;U4g~GLdwb<{()s$Z&=3qWB)CMpw9mqdaO=7$YFyqPh^os zllZ6nJsKLVE#oM4Luanz4_{p=>E>lLCI5k9@1@|#Fkt@5H#EVp^E8opM4*s2jH9p+ zfK?LnG~opg5BTg@`gsbz?>`S7ixG6#2|}zX>P2ZyM#&f0W%r6O!ImeKg7d zHUq>dhZCLvZ6clk{P^4Twb^>7!^i~xxr~#u3$j^X31&&h!On%z!QDk8YEs5z%fF?@j(|Omz?91Px%Wv2ODsJfQK9sBaY+5P=2p^ zhBHI0u+gI}>N+i_TW*#0cOj^BtF0>XMPUcE z?o4Y2`I2ygdUv)p%ZzsoQ5KblhNwvAAlI6c)x-HCDy@ndN^5fVAa}r83&Q-sXv`c# zGNZv#`vZAzAbV}OhL8ICzz>Am3su(*yj`gpW}18Fowd}trIhOXMFh{1)g}6g!3Q7#${TQF>TvWi~KfUr<$hY za6AwnC0)IC9(MTM+qdpyjbGz$^tcxUS5d;>Q0+}WmhE_;B=3ZZf9OYk*waJ2z8S~; z=(Uv5Ih)`%Z(h6p>b2`leBQY0W)r@lHlo*bKbpQb=(=&ZEqf8a z?DyivYy3|(C6%sAMdI{no21}bo0D#mHqI4(cdA=QdRjh|W5AJkG)>ms?FRj)I&Jzw z$R0oGqgOW+36>I_%-P=bdOaCLpMI244?Z=HP+{LyUM%_LP7r#u<~u4}_tyQuk9TR& zUXRN_Vhu3_=j)R1Nfk%`Dd3}tmB^n)$A?srf&ML&zG%ummCd@g^tno;eEbS*Jav`SFHz20^fUwH-oL?HGb-fg-I`(!y94 zTEwK7tK%xK9lPz$2EQ!)_Q)hq=I?khHkD)Wj=%9KhP->12DlvW_T`tC-hKPlpDs1- z-W^Y)QDJhPH?x7MlOCNKc!9r{O`ECqM3<-KpPKB}+VoU@&!1S*G7tN-5--5szlSP+ zJj-x#B*R$J({rr>YqHvZ8KI3TA(vXT%Ev6VXw9j4Yi+Gv9<{)46)&{|CK%E8kiKnOIMaPyBCxDtKaApz z{~)#B!XW~LZ#GOdi@LOwnKsVRJB?!MjPW5|pz|?29`5Kwrp2KVr*@~)bE$J$r8FGG z*+r2ShgO(YJRw{k7!l)mYBe_s>KrCjiyohw`NRH}U?mf+RK){B5xVxDJom+G8^J^$N<Tx#}uY2lXNlb%W~>XXhq z@OFHu+(FOBvF^HQAsYo}TI02wTI;?SDY7YOn>I3KU3HD7*1Fbv`6YaQ18+npxyEX& zf_ILYAB}U?Z}-kl?C_+(ZPNVcA??#Hkab00K?1Zh5_ZH68R_%L+&6)k#-W**!hn}1${5hkTGNP^feqo{5PceXZG*rE<@F+fq12kwOI2hzjofg(57Hc< z?38V##UT!UizKy?%&{|Ufmv*CZepRsMvV#~7r~V4rj9#=Dda1zdvD+c!gv z>JrA&hdH(TKvJoImULpYV2bpu=o8Bg_l8NnH(gu8{%h;V{1Lox z)PR4W2E2n<##GIWsH0|9%%R`>sHR08`hs{)oDpZmS@`ki#G>}+pU-{|FyDb8&Ueb% zXTKm`fL~vfws=vzq<#64;Ns;e|NUZW!(rF^;Vl=Yz4we}3|;p-yKxA&ocmqy5%Lx@ zKjruD#Cs&gfgi11HsTC=37jfN%%REO>+nG&IokTYjb+~63}M8)qr&c#W3?2) z@d}of`4Sk5T!Kd|_)_4h4Y_nO2Fp15u$AnLR)Mg!2bn>n}B1B_i^(t)O0r@UV3ed>;$9^y15$=v4& zPV%%gPhIiTBND0l*ionS!m%G2$I*;sXlCSm(lhC-XWKFs!|!gmk-rB@0}=--&=h${ z=KNlN5W`5l4ea}-*FQxpR#!ha9!Jx?XO5(6xt_=GWz_IlJ2WZm^;gFekbmd*U^)`~ zWfUoZz!~>^e-M+bWRCp)rVp$B1gHP%^=00bz+%nB;0?v6duZ?K|HAghGM`rdryz9E z??&k88vXqcq2H0(jf{>HuNiA3Gn1z!)@L@e5`A*%;EsmST$q@3w-1(0;4vOg?=op% z+9-xtmeIlt4?NgJmps?(WS`#c(_8h7`Lru>BL4p)9ZXpS>Ot7)8*ovM>5e~!CusP% zM&Vav3_(+d47*eVEz}hxDO-o*1hnx1I z+%Yo7@e@3XZf!M336-fCBx`LNYxK+`L$@>LFrqI&#<9x^ddV~aWw8Y(bva>&`6F_Z z;I_h5Dju*MBl^r&5q(V9_Yrp-m44s&fpx&XP3}}NVT*9TT)VfpX^7kp*?t9dl*J{C z_a!2(2P~;O8r{wy<@+8rKr^M$efT zlo>USD1jK~g!3rBUrnlq^`xX%1fRh-lIq{G?^5eIw4B?TM{WVR1yQEa@LO3l7L5HG zXthi}CXZq|x(`!_j*D zS>kMvuFH_ti7!x2OK@qQ^iab$X-^Y~B%c%#J1L6#BYS@~nT1B2WMpW~Mh7$99%#OyGD!1tlE!dxK#3h3` zUKaV%Zb!s#5UJMYtv;ljrtM_%dzaoE03^G*9q%DT5OGhV0>AGg07hN|&zkFw<6}|) z9PF%$MjkYUZZ>^eW#_7d;LVP!LoelRsSqsGm1%+89U&FbQ0>_7UA;JQ5nbe6$U<>G zk4MAcHAjdQchWKkE#-S91jQyQ!)yJ0maAC98{o)Y7K)t63&&>QH%1# z@)bqd(?SMlX&%&QL#73<-4A>BXkxD}qG@A3BS{vmM{QAOC!3nYQ1@uQvFfze zbUdV#H3Dv@&iKa2f+IBka!mn%2Ic8$M%9A;BZ+65+J~_5B4s)ni zA@s+_H=3yUcdFF!joQiJSpbDLd-Tw@p783w!UrN00DocpnW-U@0m$b7S_Hj-#37Uu z&jyqL4%xoBVeIDsT!>w2i25u_t^GV;2^Rq_77H;ET_o+f zcrgoQ{V^3W#71y%B9-U{+KAPA)I*!y*!GC1{|ax!k(|ja1ol(QS>_x__#AA$jL%Fd;`dsKE zL0Bd5MlB?W3z+653#L6XT4_Sk-^veICR=nas&T;RJ#g38}<=c7(^MWR>?tgM}bsDj#U;; to5}hhEH;cL(-J0hc(S#r{d(5o2Hi%{86?0D>}4H)8Jg5{g%8iL{{`!u@__&V literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/mobilenet_v2.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/mobilenet_v2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae5b1af6798d6258e839f6bda0ab09944fe8b93f GIT binary patch literal 7458 zcmeHMOOG7ab*^_;S3jobK@LUBc14e9+Mt>uWk?PqTQVilNo0EUoZcG~$jc#(D1wap?JAd74Q2|^_1JGZ*0tB3R=S!I!G z)TvwdeNUaocTOj*mZRYMhp+#6_^THc6)S{ zkG;4nyoUD?}u@8|El`?H-h6?36OlLp`R2M4=J%m=hU z^&*zgmA(bSmA=Z9zQ)wP&NOZ?of!{RJWYBsi)lmi*UC_(1AbcXo8{5^)~^*q>DO?8 zM$!56_>hZ?v-f!#u!$e~pXrKnvC`cQlfi*IOoaPhvKxean`a-~>^Y8$>&If0-jXGH z3u14u=f^P*Q+F$fv+gbT?jA?=c#^s2Ey>W$Q2X?CdgqfRlkBOpq5gj}U7Cr2@%grP z%D358qJXgwZNosMS=W8_!K>~7-!us)S&+nwE%66qKW3irQ9Yl@XZw>hOWk-9?Q-E_ z#)}8t!|XhEL;r}2^V5~=dI}Scz?>j;{V18l*d_kzKm8*x8Q-ANk6BYE_jY%wT?@Gh2q z%7WCv4})W3%j}$s(MwCx9=EciG5_I9x3^#FE_(R`eC+w@p0WhmgTw#P{O|Lh{o=Li z68D2_&l`;0t=%Lcw*4aI_*8swFis$4;BsggD`Yc|B$_(iHIvYbeG3zB1*-XiFzih z&F5b)=r*>9HtHX@k%&ok}%!ig-t#Rt0#X*aOH4T)?_S(a`tk=pBu<#f$(r8T9 z*l*^I-zZqM#T1w-d%LTN8qRX2fgHau)8zFS6&(%^TM7hSz*s*zNpP4^Nje}t&0vGVwK1mNMV&*DQaV% zF?{R78i`~wPQ^NP*&uQWq}wjcbTZ~b)M=FMd610E^TZ~-)@GtvtV~kw37&e`2<(4W z=#b39#Eda3EPp)aF%!gs!r~7g6|B(WxNvs(y$O%Azz++9j?iruM#{rsWwUafX4QUO z*r9kjecWqqlbFnEFM zyE9*wUXP^jh41^WPXsRd%GZPn3^UwPuZvyQzt z;K5;Ye$-o@plg(0y(J((0mif=xNsJh5xwL-uG2nl&^~miP`RyFs7~&!Hd~9n zW=b}wsWq*pY8du6hIV}A4{fb$lQP79#67QYJTHqo|49YaB{d{9CAB22Nos@E`UZh6LXX^JE!GA+Gua9~S%-B{US(?|m90P2P~Kpd z*k!gk(%6-U`bZnrr5HZLt^&MS>|5;FPXXK5x7l~_wApu=i)Wp^AbUN>o<}c7Lc0d+ z<%>n50@eK2A&HfDcdnFA#c=n5l45TNvj>m3H4GD9Yh`YNCZx%lBM(>pc8XyxL#Q4-4IFeurdFRr~4fsc}i63K2KQD3M zP$ba|#H5)&;^|_VCFrsB`Zc!#I56xO=PZZJvD}WFz|9hOm%C*Tcgu(SAN#_O++py5 zv&tMx^S=xyRK-#N6PSYYV?qFad{oZCGcGDSoI9NVM|Dy>{=lcJpvEUqopjXv&=1dX z-G_S|;f0{DnDPvuPDG{frBo%m!pZ>ol5Gi(t}ThJ{9@b{lek1~nGa~cMd%pckhzDx z==K1dGk(i`cQ}NLc7b?hh=*DcEzLy(0u0BXz>iT#;_&E67)Vk?*Z(xN zmCH-=Z)m~wA8&TmGr7gbxV8mBVbLH$Hvdjh`>7v95}3UWWOS3k&60O2Oet`Mjat}% zI4dWn{T20}5QL-8+11O=I^;|I6m3gLn}8iHCxOE!*xVCH8=f9Wo6xh7Ywsu@cc#F> znaKbyPP9W6GHNkBcYdpUrV;kO*U5FX8nf2xXf+9&m#wpT2));GAau_Ll2*Jr(6-_^ zc@4B7(R(v%0c)!p@Kh(Xb~{^PXx(p9DuC5d0>v&yLjFsIu`5`daEJu{Lh8?jNWqZNMd2Wy;K|HOTK4^8 z8bvZF?tlael0k(@{^Mr%8F7{Be@KL2bVI)Vv0bK!j8hZFv;5Q!*SOQ*aL z7Zmk#3oF5*pd?gz7#WDjE!+@YTDH!yX^!j&FIv7B?0K-i8FVi#9GUnkRn9@AG{bqq zrO5~$mz$h{hVG>jalTKjJ46WTUf7uUkm@LE5ifxhCIAC}P^dvsm+MeEK@ii5+H6|! zm(-aeA<5$YS};q0$&hQdxpFMIhLYJ5M@mFVFjuno6BN=zkWR-^EkMb(j8*K%+T*rC zQHzZzhFa#oj)vi` zmRp20^j+o`*+VjaZ{tphcZk)^3;~PO?e9*l++ym1DcX#}7xPrjU?=u<#P2#;mVLTm zTlQgD_G?UgXv;`mTC;mrX2W)vzoEEX_x}#s2GoT7;6;RArVXXf-JJv>bNv|ve0n00 zoXv7Fk!nbC&ynOTqqtOubemL?W#q8XojI~wWamnqTJFD`8=Sq(a)UA_I9sFwSC>3} zHLjko_^!+q%0$p&@$<8klP^s|=dGp+$;>>-)ER!3DaZ+>|GR%)b7d@R&R+9Xna;SpIxUs6{-LfyaMPAQ}{oF+7- zl;~xtM%|ajW7|Old$}sv3zM-Fu3I!2ZYkTe75D=MLAg3bR5sNp*R@~D`6hgrJ~c9U z%-lH9rY8I)1?(i{knuwtE!R#_C;d`0vq(R76w$+^IPhQLA^w_(G;XyK2N=vmX^>ph z7iKmYBa3e>;^V@AbfzUYc4??I*cbTn8XEo`w+u)fg0n9%CfxvW$TX^y90J2(YH)jzPgmp+@3@=h;UeV5w4Shy@OqKym|_ z`PxvASF4PWOLx_~-KKb*7We^?KOu69NT0}`5+S%CNKs`vrQ9s7B;Fvh5L~$#snvus z819r(Wk{!JYgg9b>5OkQv-DN-ffNFXfq^YjqTgo30K~wGM)`yqX{|4mPYMuZ9 literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/mobilenet_v3.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/mobilenet_v3.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..feb04edffbb93712622245a2d2da9edc0fcea03f GIT binary patch literal 6037 zcmb6dON<;xwX1*A(=)TX{=~oh+Ht@;##uXdLK4S`o%JUM>`lnp0S$>dy;C)_ZTDxk zx@Ya(^oS^HDW{bH$w229SwH~Ap|s!$RrA!!hTV>M7it)a=W7U-eT zFkq}xBQV2KqXc7amlt9l!pyw(#>Gol7yD|ybmlwJD&r}mZ?VLuT`%a998sP35;pHO zZ!N_UYa-*~vu7|#ehbT)(jZi6sFXCcHYoH<0#K7sm(Y;VEWnZ+TN0Kfv`4VABae#2 zR|~Kv$5RqcOE@!v$YYnB?UrzlgnK1?qyYEHaSr#(@qrOU9tY*@kc5v)_?U!`7vK|e z{G^0V0Unm)rzAWg;nO2HO~3JTy`j@@(xc3vv$Rf+(Qj>P^e8<}pP|pdyE%HIP3TGb z?4|~@Q}i^+|=4y%lb%Z5&9B6bGO_u>C5!&U8PZ?uh3WF*P`d> zdH5~Ui*nUGy#TB1j}%>LR3Kz7iORKj$q(29OW!{2^)*E)%$=oy|L1Jmf#)^ePR_^? z&OF{+c4m{5*Utc^x$LK`nRYpI;+7L4(}<<3r;j@=9*55RN$3T^aVPM2oAu^KOpwt< z*7CYRdd87!#<(U#vD<95omuKP)8kIuN&PtT0@x1%yydqYeDAbk?%>im=pr7rSYv$5 zWnzZ~2?HNtEE%);8vS4w zWRS8biFuMUYWZMAM=~1ofi2cqCaTA%lnwW4?xQ5HKeR;@hYbjc-Q|Sjb7a=jU2>7%Rl`gi{Ja z)=f-zr9T6JR{&fW`djSk4UXNaZgOmC{VNC#0JzMn04H$R%=pw8SArohMe_c82xb9X z7v`14+kUb1T=b_yd zafSWl>%OxiUH=n!%@2-o+v&bS2mT1s?W`Z)i@4gDXHm{yqYee!U_W$)bWz`ublY7a zw~`BXaMO!J;sLIpL;pPD(1im(6lCH1x8VwOQp6%%5hv*iJ}UCWNZP@FBmIgx6m@@m zD9e}i8_V)#{c>FHhoa3eCW=0ixZ-~3qyC!!F6Z$%&_SleACUNoF|I(@|IJt9{J(;4 z`yE&?8S|3o7RKk{FX5z|Pr>W{))-f?r~e6LFh4TJNm+DZ$WGcQ%OCgT3(t>+&z}^A z?7{qLpjr^>{6hOb_y75)YfEpQYrntuSFitV{qDKCCUoprLSvmoSRvG&DD8&*hwxk$ zrSExu2$e^ec+5OSg&i>O)|$&%(5-9yDDcHSpb3j=1U1r+7*x!3O_*3xy+D{(<*UJ3 zK8FNal2Ty-LnzTvDk`HR>nWNShn(?XG4zLK8OVkYRxeZ?>sc}bc21R)WXguRrDoPW zLX-?tInf|WwA<1;AbQS(6B^gRL40kE2ZRBkOR0i4Q|t?7FTjAn+CQtO~X?SuGk7gg|Xupk{?Cw+|d zI+2OS!BTliU$0{affHQl_>ogQnS|XW#dq9`+HCgZac6x7TIM_)DL88~f0a>(9|Jb) zM-k%c%pQn$)^xqi72zE8#3;tOc-)^ zp#>}w_S;_2WtTXQxzLe9Sa|xw)0(j4@c~Dbf)1iKA(|-5Rd{j`r9qb#6*u&5F*gre zQ~p5Yx0si@)DPe7#_cq6Y@OG;6UxJo}TNhajfY#;G3KQ zFf%hnEHVS1$A}7_8KOT_Rs9Qq4<9r2!QA@7)J!t|*y;hYzPE7NpBp-&iD5B(reS~$ z2^CDv0;a9>Fqt5<(oW#;uw?xH{ z+~%?uMGym`ih(?uRcceG0l%dMIkR@FPL+?&Lgx+9{#DTno`wz!R ztlJ#>CI3DEQ4M`M5}Ln-?-%jCnJ|=UET|-vd9z-UN%wxQUgIwzflM`Mi-mj>I$oxP z4eGjZ=7+yGgz0rUETSCqfJ=pB5}I%+484?}h1d0Ieg(0Y0f?I1Wmsf#k!1rSzA)vb zCduV>kwhw!*N6nVAsAKa!QVqxcFqc~9ee>-OYjDcE&=#4e3K6V=%h+?qNr39=S4Vo|wI7T_u&y(8^kYBRWlr93a4P1r{mX}1G^RGHFCg4_{0gnYtV zfa5n2pntYmSd_rQw>?gO2e<@}NjljL+FJ0niTZ)1uJ0*oJy<}I3lbv@ENG(tVQ5&@SCJO?f zkOE2|<&~o1;RoA5@>2koqO0cqadRGz)KAUIcs3sWzwd{X>c7mtsL!cbGln0iOjv#N z2UGeKQ9qyh%sOUWu7s>twPA%}b9Js6MI1|pIF!b4?wX6Dxq-9hmhc~DJXT3p zi9O?E%^YWK9~#vf$G8#({6`lgvQmrM68y#N4i7mTueM@3U(Wcz3sQuF{YI`9*4cdX SSFwZ1pvOX3RS}xnKJs5X5Sx7f literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/mobileone.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/mobileone.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc9c50cf31b1ca1731a6915a3edd4b535980263a GIT binary patch literal 14017 zcmd5@+mjsES?})Y>FK%b>}a*xm1N0otsQG8S&c4MoLKQywiImawXp<&am;M*bnou0 z=CV57qs!1sTwYg_3Md6tJfuhhy9rg`2dYvbMNvHRz#l*rFa5v^#YIm*NN@ zmQgiyejCkHH6`C>D_u<|WmYR~Xu8(QRBc(3YGzxxYOa;9=39koL6(`#saCOCY)x0E zTcv7AmZh8fS~JxdnYWtzTeH`@H7my~2B%SMZBF zsp=trcK7feOQ*adN{jx%oz#wAeaJH|Y31qLn7>vwV*B;2u+eF|&Dg%=U+wzsy1!gb z#dE9OM$@ag^{{cxWrJGNz3Gd%q{{1^_Ol@4Hi7)H5*I&Bu>ZRKT=cUJ)OBY_{h_`k1T_DKwZvDz?r|s7{ z`O)9Q$w%-v$oN`S_q3|v={O1_`Ac~z-}Fo`eM`sN;u{;8TFZI1pZ2nNa(ME13V5dQ z6!A>sDfyOf9@4yh$$KVw?@!*de#SeX@@q%FqOIxPoHzg8RMqxA;T`f0zo%ET-b3Dz z_q1xxJL)}*ciwx%bMP*B$Gio+r@TkK$M7zGMKiVPG>+%-xcFLUwbArnZ~Nz)o%$8` zpG-{~6gjIbbJjY-87WIPr;^J#_`M+3gL5*+uSUC8-*DS)zZp1-jdoZ*=Um?KQM}a+ zok2}!%?VK|p;rcdJKgY}ol*XO=zYZ(7_}BOdj9Chs@Rdm$*@GK08<*7gY&YS(O@cE z%4p9<;51#a?xTy_c1|y`tMHxcjb_tX^&Qt)Sy>fsyS`Bi>Tc6tSpoPlU?XD<%G=#m zP0c0QspW2K)fdjlG&{{MiNaC9{fSu*AO)dlc!OnMQeOZ==zS29u7Zm5lE3D5n_=LD z9q06L^S4~jYqZz81Yn#|O18I^C@S9(`VYeS9#1U#qWS zWy_#w2dL~d>V3S#2CX&LyF$%40Uh{GOY)TA?hqeFH;$l&wUQ^vArjCpj)BESV;r1O zw;nof+v68Tb=VDP1?DpZH964A3PCQ0H@Ey}j-OvXUS3%lHU`&JD;pbDu20hJkgI;I zVczs#{nhb-Jb$a%x#=vfb~;T^^cOaK;=jac?3CYh)Z)4z2~bKxS*laZZ-?B@k?opX z<3+a_j8r$;jge*Xz?$IcZ*h%pFFBn=j9?lsqaiqaqo+AL@NN1xA(gV?ySPAFQ5+f4Y<}Bb?Q3i4{ zb&F8u+fSiJzha21QQ9;44i5sRzGHZhgJ^N64HbuPnI4W(Xl06b0qY?pVNrKDJhHlc^c%Oy^QlnF3 z#T=7)CP$bYVsesE1+a~nDyG=L2>jS+w`2Qd59HDaZ%#}k-q%O9T3-^3OzSA;M$JS#!o*>6jES5+ zCoQN>#>GTL34hBKZcA7!%qNA!#ki1sNFf#%>%wL71B)BoO6hn#8&ZlHAOYycALG!8VX>%@BmR2_oA} zLjg5B>yW1EfbyB%<8|K^A+?r8o}%+9XSH2+hAML+EMk+j7av3w^6VtuKZeQ1CXom_ zb%EHr2Hb%_P^41Mi7Z>>pjOnBaFsZz&~`|5LxLl6N_;M-#b=n81UVQm&h)X8cp#BM ze2%#@NUq`uc)pgQj>5!|FsD);qWOXOky$YG869u4cXZ;2#xcK6TcQ6W^X23u6Lyf1 zs<`eM@7deNSG8RY4_ThRqwnDlJDysu^Q8MfcI2V&$0L#NtJtYt=IQTtP27ZV#Cqn%$P+?F1j3(Qf;CmQe@>*?2mR@}{;^4-zL^JH7WCcz{ejZe_4QTG729Y=53*2TfI(YQ`8 zNx(fV7_+6`+`Y#eI|MeSC?%}m2S`9GV3QuU3^|-846)P|BPhM1c@QfQqBw>gW6`F;vgC~^IjSvheEC(>9nvp1<^_REo@FBr|p*ZG#r_=or+Ss`}Sb7 zUp*JjM6kJcK*QmFkZhp_f*B!M5pCQu1Lk6 zIv92SNwsWQ5D~>bB)ziVC*FHG1M(! zlnfq;y5C2}(-m3*y@*u%fK+5b(0EX8Zc|j4=5s*W2fuH>mAMY;fE)AceEm4QN!Wkp@OIq475uSOBzj!Z~_F?KLU7$gcCk0gxWMTV>&j}C@ygdhj`O?`v5g1JVbNN<8|-!isSA(=Il zM2L!+ZPTN14hthP<_N(g)9=66%%`=hFrjyC**^;gPsn8ZAevhwGLXw=elsogoHU#S zX)yP+i|B*)BB;O@P$EYBT+}}leVkZrgKMfd07|Ju4w>i53hlPn~YE0Q#U5=n-NVqAcv8v`3XmBCKTi;uAQs za#0z^Gbk*iBC$1Cg%9_Q zdP5SWv<`oW^p0T|9~mj*L&G$FW~7b(fsOxDBV+uxVH-cOvc?Bi&iF4YZ~WLQ7(X(m zjQ_NX#UJJ_+6U}e`(gWN>9BPE)G^H1Z#wx3masFUAtc{iM_lhaI?|L(O_`>@qr;Xk zM`|;&+`dn3_JOsz2iE3CY71&yZ|Yl!FM369+AAS6WqLE-ei_=DP2LB*IfS_8y?N9f z^bW1--r-vY@(4wFNAdOWt<<`)mXlWUBM3)jt5y<@$|T_^2CyDSSSsr+$ySSA8Le{O zC%sRhG%tfy1@M&3v z&u3R^_X>l#9D5N?VJGT`8-|l*Wh3sv2XcFT2{E!xC-4Wnj>1#gamRyG z$CsSr!D*SEk?9l1rO-ScoIO5(L#N`B(+Hhlqtk7AV}Lf?YY@*qj5-9jZbFeBt3Bb+ zZ6jlHh`A_5r0%~3DtsH9ba@b)N?L%4`Y^08ad&;hd(R1<06X-;joKQ#fN5E?Ha#Oo?5E0Od1Mx&1jBgz_4VRWV|D~nGo zIb#tl^zQlXn%uQPpzjhfO2#J9{!}rP+=fr7;I^Ixi;COUftO?)m zQC)T0H%C8Xg0CkMAJo@&7d%*m0EKLh9|4altXq)7Zwz;NaLYCjx>}tMIQu%epH7 zi6Q-PM8T9tE1MGPl0(I!d2?xOJc%B@lUN4iUkHXT5|t!3g{1)GTJD7YIp@OK8rU?f z8b}6u*_LrgW3U*_ZJ;|kHQY8b)HBBsZDKL~z@D=xQR)|qZpt}axZy&$`NO*x&p!LC zk`=>nNJSn4m{n`516_O|g9eLWpQKW%pkoy`oM#eW3PL>IBCb7ve$SjaE0gkAQ+3?W z3`ba82u>{^TsveW&n%u=;?;>W08_q4?+VXtal8WEp}_{2ha6z>#&|U^rl1KqYSMDJ z8$M1a}QdQe51Yzf{QRnMxyTUbiy?c-xH(l;<$A7r>|+jF@z2DNdG1j z`jA1oH2qFH`2KbhOke=-5PY)jqU;S9fAHxr>!m{YOl3Pm1|1lwhEH`fzgv(2gL(LE z3)@rZGZk7G@zroDn%dDBJr6Ep%yd-1nAW#(0qN5;E3}W_-<(FjSexHo!&%oO? z5wnNs0pnu37|wZiI3E?gEaUJ9XXFqLL4Fo%*fF-J5vSKSi_tVM0(vm|8DqySgQ5(1 z9EuQsuN_`T&)P%b5!rwGka9Sj#r#X#`v^NK4Pi4+*xLwl>>iC~qJ8l6v7V3a?Nhlu zb9=ux6P3KFc?~;hY|oLMGE^2__#T60AB1=RC*fw!S^AA z@>1Fr9c^a&ZTJ-&Z38nf7GPkUx#UFt1AY2;59XJ9*#+7H3+J4L-s}Re&QN`!4-FOz zeQmubF5K9n1XAtjK7mT)!_5dUHvA9LX5bAOS1Kq=&N})_p3$3Pf8cXh0_#0W9x1Jo zRu}$kKw<1&xh>8@g!!$Y0-p0caQyZ=Oc0XN8XB-&d>x;P0Djz8GZ!2+v7201-!X)hfq7$D5c0h&@U+Z-`u_>=Dm^6Wury+Y8?wJchbcEsq zCY#1-xhrw1>9>`q<``j3jj61-0|b?zM#(j@@S_~rkt2fqsi3%iK4aOk4+% zGvfTkx0oy-iS;XSS|Uh1wTe5hgR7U~Q8w9R!gGr)HB<5X{K&}_NLLOvsO|YTg!HWp zeVyF)6(CRW4l<=_MgR)BeTNY*6K*5>j+L6x4i;~<#Qye7Sz1}+}Wgz(Iev94>|7A{_aMT1qmx}EV%gcRgG zjDy;hw0O)*U0-4wd1k5Oc zr`AIdPF%ikzw9k3jwpm`=GB8>a@N;*$tkD8Oj6$Uo*0^gFisPLw~iaT;2i#qx)1vl zh?rbMP3)YDmnFd?)+Div0#W%WCBYx!r@RD0SEi7tHi3Q;^MDf?Te4P6= zax+DybcrgeAYF++e&E17V+0E59~FPfglJv0luo2LQiiP5BRQ%ak?3Ew7>*14Nf+sN z(EWSF@Hde`d<%aj?){j+LqvJuHDs)pxNG4_N?D_uKf+~`(l~a@KC1S|=12B4;s6Im zaCXQE?g{`uU<>vQw;;LVF%|;n(k(o4`E6vd6i_Z` z?*MW-Z)!+|K;KPCwJE(#3erIp(;;q8B+-w<*eIw#UJA5sDwP5o#2Xo^f9kH@m8*y4 zwZXKyRC7_SEs=wg?{ddtQ_6!^n4J=kWeo(HYU=_KT#2`Tjtc0;5+9e5S9?YFIl666i0`=0|O%jE-&LM*AFl+wJDPQ$xz5Kl)==ISoN2zmm+y6m)Yn;?h5y@kW%}R(|d=9yD@p~ zq^w^qPl+o8)@0IRLUNO8llTMXq?SdFC`i-l@OS_!K6911;Y5aE7=el`XS0S3!Z3#M zWdGKs`7^`%czNrejYpNPprWeG7jXZ~_^9|}`whEd=ZpDLR%(UiBe8{G9Lz%*T9QhY z@G*zjmi~`QyDgURS>50NtgLn0_}@wLzb+xf+s_L!|Eeux_jPoV)_NLc>RN5Fh5I7w zh^Q+MjL5LYWqkKZV@yipoK~H2RZk71RO8`^$lT{RCHraB;#r)E7EqLfbRgDdz+*k#Pzj1}yGXMYp literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/mobilevit.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/mobilevit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..127b0fedf8f526c101aedd3a614ee328340646b8 GIT binary patch literal 12458 zcmeHN+jHaAc?S+I1SyibtgO8`Hlif9nCh}?ySZ!~H{SInc5T@ct4W86LqR+s1&JiM z2cWd8Lg`L+)7^HuX)@D?Hi>7X%yg!n=|i9T-04IA2t2jZC%rUD+g$9#-u}J=fCOnd zO=kMgnHC%z0_XCb?_9p`eBbYbU#XN7Tz~oAKegVvswn?TFZs*B%jaXz$e`L1`2ZlPX~&qk-x zt=6k}HhG~l-JPk=$Y-lF+nuY=$>(C{On1IMuPQGpyu_#OD16E->^yW^t1r0KqqE19 zU*;9$SKOJ~+HJLdj;n7dwdx7DR@Q4;Y+Z9Y9cQcK#@1{7(DQpvC$`>juMgZ_)2*?% zbj|PWz3TG;YRqg6ybf>le9>)moSUwwsWI#IkeaW)_T2NYym6v}DqhQ1oxr{7GfFc6y>(cyzCm71A|v_Pvf4!J?omTeqP~o>HnGZ ze;#EI;al_u8F?+mDhEf>$;-}ai@ zHu3{62m{-1fndAiZ#f-FVVtVH?}gj;px5#{l0&D*?euxsrWeOGB8RuV6o8nTVh!#xi1_7ND^kcn5CAH;Ik3ST zE%VXpv=tpr9Wfv$Ue58V{hZr!1|6&&_REtKJvCcxdx?9^aM|`#!NyFZ%a+%+DId!x zX!XZ;#fS=ZmhWI4vb>ybnyw4kwZR4UDv@}CkJ?G~@ay$XRh^D^Aop#uD@SxD`zM~4 z!kc~fbB|wLf4nxT=rpk;zt(6`)7$$^J9}f_3%2w9^#&W?YcBkLLpUL}eXHX;B=4^aXc;I)FT5$$PS18bZWjuSHu9EhAGpGI`S?_| zLTCt5iY?bA)g$&D!P7l6HnK~juAMN1#DB2=4UnO1l2Kc-MpKB|V1blZBD(gz>$SJT zU_@_nl)0{CFIc7RiJ`s``cMi%=rwKb_Mt+lyYXetQR(@ORDUjQ`F;me_2q3BgT}~x zOn%qjbCWq3Q?kPqz(ZbQUvN5sJK{dKACoiu*AtpCAst(jiXMitdm5?h7__BhJq6vNjp9Bl^0_6%6dGNJ2T?x{5Wt5#D!F^$K||oj!UGN z5``OE@)_fetxP?|g;a0E3#SWoTp9^oTpS8XTpVtwxH`VU;_CP=K&xy$=-sRonMyQ|+d?wYLv*_3yz(29z;)#pIta5kDfDu$(d z<~`#!tgG|Nu}XdJ=+W%A6i}E7jeAC<^Q<@PzKPs&G%fysQ+cH~FS*mtDv|!-98%4w z6qQ=URW(v}s=Rm}?eUL3=ZVrZS70}l)AkHAE6~qEPpGOi;B?qR|g>j+pa56{4 zMJj)ol8+#HA>TqgM$Pms&j}`~HOXAfF4}t)+|*&PjHC@_-kdvBL*<@vPrs+#WA~JM zFg`;S6U^?b_e@NA@jL7~1H1Z$p!E|oNNUBn_hB7uQ&Dxle-hFZKY;Yf*__HV|er#r9;n|`8P5+ts;NZ8iqbxCVR~rql=Y@^N>eJGs37)}2MzLm&7E~_J#KH*aN6f@VFn|Fp zXjx(_BPa^=T1tGB&~rM1e6d)ADH#`KUk#Y9u>te9?Z&3l@4G!NV05EVa*WCHqErce z;qSy#@-b1o;!#;K5q(FvPHisM1Gm#kIF_?3C!02-xIoGH-V={d9#KiwV|JQ2cS8l1ScXbPk#Y~x((Xrks6)&g)I&)05&nnTSOy>KeRwpUGJ#&%U(<r&~)pH#I|6V7XG1j{f_P7FD zyAAKlhsPm;jgf*CRjlL516**-HE6BctoSst5@9fcZTO*}0qZ8#o37W1)febYhDFx* zJFsWs0{l-|sVI-Rz8|>ab3}kPxjczOuRZVwL9Fk2?!NdPDtnsBjL;X&?O1EVFq{_8 zQ2w)&Zw1@F2%CeDbUfL<+u6f@tX_@PYq9!;Sp9Nr^xS=v$-n*j6dluP{rt;*JSuSm z@hWw5X_%16!HP&s%5UN&s3XzUl2+2H>bwe(dB9Zb!5p(xU7b;NR#wdiI-AoVU=JWv z4@#O&samF*4@$;@ehxMD2Md~sRA)L$Ow=l=lz*@=A#Q2cwQ16FJ*Vq78gZ%7z@hz!&0c^ioMx zPSfplKxQ-%ze|mz0wR@5XIZd`#KK&#Phtk}%c~U1(SHav^FGtqdrW7)WCr^MGugXl zfxTl|?B`~Y{fw1xW=yf4urm8kR$>3as_fs{H2X1|VLxKC)gM0cx>WE@bToF6j-e}H z&@Xx1rwzO32P#rcrn*cGnVLwAVR=F3Su!okw3L=l$+xmhD>ALhbQ&qJq3>&TozL<) zeumHUhxh_eAcLRd=YcaB{6hMFkw4s4`A6ILjtT_MO5Y#nOY*(QYy6XU6d*~LWUFPqf>u*}m9HVUERm!NB>Qq)%(l&j z^Dof8!#6uS!!x|ae*NYKi4}m9e&SdqPA>qKEB5BF-4**4a&T4>KWEu~(S=@cIxGDF zFqG=2UphC9C5?jTlGbItk?7+J$< z-f`Euot5TZ&kbR{0Iyo>l4X--kfMWCBac*c0kr~$Zp^eHCg3PZAZ0W3y4?@$OzhM2 zbe-N>|E5$^D-xJmN&86q4xk3thz@#@eRp$q6gtZmWefy!Xmyr?P}mU`Aqc-hw$YpL z_nt+87rIRxxi0y}!^KL;5a)EQ*e?-Y2MREMmw{gSSg~X}7XyGQoyBE)@kS$glw)1I zk-eTyia1!dT>!N=ZE(=Fp@9b-PX0LjNFSXgHE2UfKim@mpgPo)6PU0?DY&NcV6e49 zE$pV#BO+iC7-h;L=BR_sx-Uk%)jp%FQN5+c(bdO;bjYZ)kv{X-QpY-fzfI#{y&2jOE36zr0pd*$T;yb{2L3AVe`o*uNm=FPT!o^ zu!a(&T|y)Cc&!;?8-FLCry_zbq$w|CQ6VlSt)Uf8HnR#R-^SC44Vx;dpHvmQlzVGf z=^yaq-J(iVp3+My9Z?2#b@C%5-ak_%@*k2)DLKa@w$-o*t{k~GQf^zQ6n}aPK!t{B7LFL3*~IUzqm?%)jC2jaq&gr5Bl_2(~>sY z9&nXEMC;}hlI)_vm2i{05&R7negsiQr35gH)KI_8#A9%YjbnveUpUI&qU3Qpn&t+;&uL!a$~W100y|~^#8#piTwdW4cL5<`Mpfn&UEfnw_YE_VB^S#*e{-vWxbJlK{S3q7&_4 zrnYKdrAUnn@L>;L19moPy+l-p!X)=~&6xaf!f**GE8J;xl%RBcz}?Y#U<)f0vEJE4{!%xM=~>8 z0z?5&1}Oj=05B#i0oE~Po+Ql${`3zcGo2!2Srk2yn@L$48YN@K*)7ye45(z9 zax(T8L_`_@BP-xO@@|r#QP^6aE6bqGq_CM*RXrFUYh%Kd!}rEM5+I9>ZYf?AzY5{h zzy(lmGnn-c!HH3J5S4>F!ykORI+PbulU&v-c zT29gk(p?zL^Z^P`9|_mQ6#-M*Lzu1677nlJiD@RsPMRh*WZj8*h^q|=|G25y`CXb4 zvX^IIx{zT+u`B((gL5MdGu~ihEb0rXm#&|Ne>t-`_YZMjj8#d-EJvrVgUg}ij6ffK zfFO?07Uugi_!G}lGGV_RBk!2lq-mZ3iwDyq<|K(RmPq#v7i0@BU?B}cL-R5EfMy5& z9j4!g7exVSm`(WH1qKw%C|O1y(m*3Fq9w{?AeBmtJt%)H9)im>wCiQ~uAqcvW67r} zI0pV49g07pgjP-5rQ|!5NJE$o5e$0(FAJxQ9PzW1rNgEP#Bk{*)=Y-z{+ecB9+gY5 zN9V9ib67C?v0D~9m>tbX7CBTmYE{vot~V)hD52>PcPLq=pMZ>1r_PL<* zNRX_^Wf8~p^fgLal#FCbu8};!+9>#Y+yPZXT#%WUVMfaz`-N$J$n%fc`H}D?aYTHc zAkRL;N8e-bSAWW0XNB^+tj3J;FD<)Tn9-%fRXZsCur4WY_onw z#%5M~y;breDBc6?g49S{%4NKQjJnnBHuqMWy`J=il8-{pj6RQ96>b|53vpABX&zU$ z;2k%z;F=&)JuY{%06=5!2|-6~JhvyqVR_Rk`Dn*Pm;%I6#2gw?)E? z_N7-vO5#>$$4gB{8(WJtc5STf-GJPUjZA_V`6mVvz%~K|PHe!)0VjV{f?#t1hJe^e z-~fTwv#b2R?^ShmksM{Yd;F?$_YHV%cIS)@))PHjU&zR@;Jw>#zb?nJc)SD$v2KR zr^-_tA7~tFPM4<8;UdPjNMk8F?V!F zFF%sgoDiwTeS+cQ*Bg`k@MS?v;JnM;oj3|WXO5-m5ZNw^|Onw zUqzxMT(7?ERyu93;!^APu>Vx6KwZ3At!*r~TW(FKSw$ZLl? zM;imEOe#BSR}YkR&C!o4fxe-Or+j_g2$bE7qdVH3wre{2o*L+m;biXWT_Z4dvy>B; zca4@7C|v`sYJqx8am*d9n~{A)Dbu%hbAh_2I$0lF+s@E%2X3y4J z{@gjPm%M#_p;v9=&}!85Mp6l*r!BQ-uemL+EzsV9`Sj0b_VT=aaV{FwTduz;T73h1 z{iZJfu&(oQ*5W`vm@Lgs6aXn=hysGJ&}`IOZdCx74VrdnG~155iV}SO`%8;+1EG$E z2=!+5W|;BTsvS2}FNFihb490(y>S8BqT2E>O++_a9@g1KL)kcz@!O)d7MfMmT15wq zc58JmA7-Pq2~D}+p($4s=&E|NLWK^F5>=ydIpy5-%34LxSuN)_dQt0orPc-ncx7uN ziiJb*+@p*S6!92l^TTIOiFVsR)ogZziy_xrsVJu1@J=m9C(S$6+^YKvom=72=wv!> zt=*(aIy^YU#?YvD9Km>@s2N&G(^O40KOD;%T2U{mQ}~Xl1x-AGDwk?mgz;wrksKaA zx|gsIfhi0}ZK;md%?9ea@+G5db#vW(cOcNdq;(73!9d4vu{(srX9R_yxTAMVh-ZSq zUPaR50CaEx)_aO`)rzzTABV=x%l%XCM4eb;b|U?eE*>D^IIWsn+1YAhJ- zZ{x7|iiyKLc34T=H}{KLIMPP~rN5>eQ#3LIBg+JqWAey~!O))B9S_EXBhM@Jo0N?y z0ZPY|J)o_RZtX=SAheO)p1Ilqh*TT)TQT0alHd(S+l)6Bk`u7FU0S}icB3}GwoyBI zfub96&W&Eqz5DpgnPME9#{0=|NvAK2<=gdEXVYf_(%$qbT3f5OT5iL}4lTRl zaB5O-RibL30+OU$1DTSq5Mhz6#&r_Xu~v5+ci+qNc01x`KuOZg$m+ISlY^}h>i^V= zy$B{^-u9|Z*RFa@FPE13M!2-JV87wU*^Tz9! zKR+{vMd%H(-m3R?bzr_pr#IaC>KcIvc$8|RzRf&k($4;FX&+?iVrWpeAE=>0gQ%H& zWMn{b0}pY1-X{_G%8s%Qr0=MARTUgvCm#@pr*(?>)qHJNC+=?#JX^)^Gl8+AB4%>F z7U-OxbqG;vi|EjbXE$eov~S`dg;u4)nUzp$wLi(hqF>i@Q^rJir=p(ea1kv`9=(PapyoS0Sdfiv`#POB}_(#Bxl%itZQZbn!CkidQIj6~UY) zsD)6yxgR-ZfhcZNh2x{xvk>Glq`YdV+XFPHsPqGi7O;iWRjGveN(HPGBm=|?mCE(a zY9lJiRw_=rhHQgik0&fHQ@?}Jdeq!TBVH5YTm-hCHUec`wxgfP5`GLfDPHu#1K*{n2=1@ z>}I=G>j?5S7b3uMg+0eYxpH`5s z06H_o?NfeW?HOQ1G9U%IV6j$DpqB1f zo2sY=ka74eImUI=XDNo-2?kb39L!hS;Igh@O;}U_zF2XqV6R4~G%<1u=@z=uIx zqYOGV;tLd%DL6yHSp=baSvM}sJp7IQ976BZ^v+7mFD1S7w!mhFI0517 z8(Wp-I#`<7W$X>$AH2>ru?b!WT&-QT8xV0@)n!bWYk{?4ZNNOM0-zG;bxzQ{Rc&m# zX-0_JN_wLM@c;%SeO{O&i-d1#oj(2NCB=bUUQF{J1ZkBGR}hyu%i1H%ElLxR+Mql( zCJ&8(#18+b-e#Ykx1WZFL5>3)m@n#%Te%*uwM0Ko`-vO28 z<4u_M!9B;-iSvYJju1nt^XdvTBz6L^(GERm1dJ0SwNir358z{oI?;c-u}R&6ytp;j z^Qz)jY6SI_{*_u<+PAU~X7=OCK!k1P346=YJ6cqfaJ-4?Ej5UE4#@B4XY= zScr%NAB;J0@Yi4}5rPXRt{b5~rF4ry(a}c1e^fHQxek>Mbe%dlAE;jLf?KIrzPX!4 zUCFEGSol@$Xs>~f`5Lu^Hom65ue`6muf4CY7%UW9-JxJ8$nL0LqMq`229UO*bOC9o zf_mv9(nH|0lu8NwmQoppqD85UfU_DF-wFnt927YNPW~wIXHDkMIImlB2K>>W6lA9q zgw~Y8y5#(%(jCT_$NVF^qt?(wq^R61jqD0Z{^i3C&f}-t2f{1uZg*1c~GV z;-Pu5-P$_q2-2V+Z-dI2i6Q>w10%Fz3RKQ9g&+zW4)hU=a)A?3M~QS54odq*gpB?p zn)67f1vw)1HUFfTv()XUKi;aP+LVkOE0{^fm^&(7#E+D6nBO9yga^#57RNHojPTsW zOQ;>{plOg{M0-6nc_!CGljn?IIn5}&5v8}H^p>=w^y6{X2eV-%A{OEB0puo*P_MZw zF@};_y$oSEPn@FhpGGh@!t$Z;D1$^n$v|2$_aQozcO5}!#e_A?MX`-7@f9lm8&sT6 zP`!AUa!Dc%E#hiuZG@maLOx|ylT#MUfNjcWL6F4ce~QPWffb9UN?+nO$4NH_4s=8- z4VJ)}ni{?+8v}1j3h7DJ_>=AF2W@N$^y!q+$z%-p79xZJchxFZr*@ejT{ z_G@qar|oaO1oeUL`OaL9!7D5<)sR{k56kO8AM00P?BkUcG;q1l#B?V+`hhBUloyxq zid@{EMeIX7EM@1l0+!UkG8UnQFn+u(ndUTKz0lu(jTS5Z5IG^;|3y3=%(4)2q2JfL zFt>Fxv33GldYwdXUPcXidNVK~9+P2?HKThOnB%(|Pc1)BFu8%?*$=y{c9^_D;Mm;7vz;;<^{eiO477UCqFJe{>^>_r<1=IrR zi1aT(UW(=0_9dxUVHx!>pp>MUgh7e?2_6|&inkF1Ng$Lc{yN7)9S3D@LTc%^D4&Ex zKBVGHh=t~6Yq{QXSR4~Ksq7X7+Z2#MCjtt(6l_w!k|dwnXDQi31Tb#3SA5v1_%usY za04~wiV~bi1tSGc5;H?}Q9MmKJfwMwt#S62I7ta2DCO*yv=4}bGIZ@ceqsc3|RJvtj<-Cz$sQH|B{x z!YuITD3$-k*&k>NB){`lIsgFp{|7n%oY_iqvjLI2lIRW&(L|t*U+*+-MMgX}?lM~q z)pC8GCL&h;#mAqtV=Q#fOG7F(Qzzrq$HEb5tx3Yj_G_RINbP^6vyjHgKeL_i513ji zrI)WoF%V`4d920|&r{5$UI7;?FoDL~eCF-g96hA|=Lfq{JXW9;yh0VCKQ` zk^D+dTu@%X_W}mQpeUvTtp&~w{v)K!7y-AMLCxG9C=u9$V<;#&`dv~nSb^yn8-{q^ z&jsd=+S2{}?!X?kR^Zl0daV!o#bCrALTknt6dxFY1vfMplc2Id3hrv};9YpOL~4?e zmhMnHBX>y5FvK_M7aZp(+8A|As4CR%n4e!C4Ms7pF;bdvYh%HXqrgz~b^i!_Wz_Ca zPls*=L!@$nrwV*kD&tadk&5BnI6XHY)frM!&r7y)2Wy&$ed0DH-=m->>xsWf$#*IE z1q5@3c#Yz(Q*eobHz+6|xCGTmuIAPoWH@wFQdq1XVFkn(>L&V;J3akK!l=QJxk{}P z6hOq`Q9VW(%)8YQ`x$_NSVv@gejl6kK&=Rv>qMGQ#NZ4X3Nu7u&xB)#VKC+aC2I>N zOp#g+>r-%W@az7qFxMmgvX!&al1USV=UJn}N&!I}c9?s!dh-gD-DjOp>OQ=UdPfXU z(ICrUvHfOl{7_*%*W;T6@7;H3I4rl*K7>R2DDI&<`b+dqvaKY$mvg|@Qm;`SkP7+= zjSPwysqEpDsM4FvMSq)OydA`He}h|PEZFov-C3Tm9=OP~O*APH_lO}cO zFJV%LmU53Qy)4A8YvLHhJRDCPGsIx&g%@epg!d>yl)z;fB~QU8fm85MC3iO1mMOh! z1`usobcE%WMMUbu;x#_waY60F6BwC73W5^J6ei1(s?s(G(gv2fd zwAg|-|GO0X+XzsdW-D!mWG*b5H`#-ierbDo`{4IL5Jv)CIdii@(8)(DIbS5LeUmoy zI3js$W(h7+hL+bx)FL(jIh-zHleQ=JZ$>1ei8t|XU&D#y^(_?4QtQ+qOI|Ys8ssO@ z4ka!~$SRh-QCRLA@dZ0bOy2GUk9o#S*qO2Yu@45GXk zrH8&_z(u_TS1ve!lk2y0#2I%coJp7{2g$jczvFQ3o^nlRnw+}tYKT3IvQfvrt2>XZ zj4_Hn;XDf24kk^whdt&z4l}3aJmJj1%$Z{|XCAtv+0c0N3-xQ&Z-Bz_TAq~B9rEn0 zv#}A=zH;jtd7;|%CXD2eihL52&e==!?$%k@3*i*n-hy44+lZTB%XFWdC_TckMfOGV zTdA+qYwYO5PGPUXw83T|`^7ci?|A1_i4=OfE@;c1;_13MKd3(l#|9pcY%kYDr;Tg>|s$$ih z>9s|j%@YGFoiS34tySSVzU;!Ak4(LArcRZfw4Z!3;hdj*GB#db!zT0z&j1y!MN0cb zJf`td17xq3X4iBB$cI-T!P#L(+4xO1WPj9)Tu6L*PZUTE-;SIyJnDo1hX^3S4R?5O zq)z)ri$NRG1s2Wqs9(CU(C;i=cLj+VqSZ;-AXvaGaIHZ41GU;dW>j;5P-vMNZfh0l3ycZY{{5TK?{7%$ z^sn5>GxPS1wK@(~(}lK5HU^D|&ErV=@9|Hh62jeuSR?eYG$?&$d>qXRP!(HC800@f z_p{8l=j2jGay|etDLA0%L#u-sBu6MU#RJyg-o*lDgBhbh^uOvh03Q*Q_o~nVkWLU7 zHJQWHs3bN%Cw<#nXa)G2toPgdKvPFXOIkbn zbRY}(vnw;q)O@+#J!gMeR$gERusf6V!BtM5i-vp^z%u(qjQNG<=Iv+s>01k~d*(ph zGcRDx&VGWP{gojI%C+b<{PZWV5@-0?U-{_+m7jkOtpawRkE-5Vf^2<{Jq}1M3r`;U zVT<-Xi!zKbpHTKNPefPJZ|PVv+#~TZ!+x){za3KS-kX%Uq(r<&y10Af&T{V?6r4uD z+C}tE<>K#B?mPnYZjh9PyB{QD!7B?AR+wSoN;>iUI#vAvf<7ZW=`b&a*@#t=I_I~M zcTM~}Rdf)n-u=UWwEMq)`yXC_8uA6CLmkNEUKX)AJ?tsRuyxY0seyvU8jU#qkY~*60I($x^grk9hTLVx_L$9Q)L)pBgc#nP>x1a;uMsTz^7dI1d z6NYXeteXLRRG`q!wvMjB_vHIf{lbey^)0mbLU06cs5|P0>b;CsjNnM`P6MgP+2O*; z@cZOU0lPHE;bVt*mgD3q0kt#73qCng6n0c6H$ijHfwG!&N%4%_5>iDchkMY< zI>qx8$8|<%wcxXdJ}V;_q2Z5WgzpE&s*2m#lpfpBykEd53s7ofc9`$rcX9pwt18+@ z`ZeX9$Dm;w?@k00PH{sKA5cHR1V&;6<1a#6L%&C%;?tE4Rs1@YV&1>m!V06+pyN_X%w6l4*k4*WG z@6akfL3OYS)b>DkIvD6u+Yfb*`!iINXZ{{vCB-UlDc?}V$zVFjzo2xdossUt{%r7& zGctugGr&#uhgxuC={_73QR^jKK8@daDLCegVw5?I&u)!Vx%atXT<&Ak>gQ5*44LZM z{#=xAqt_BVC8ubWVdAiYhl7Wr%dW>^kQlo=)O`d!KC$~`@CdD4@W}df_jqs&X`9nD zo)XXCmnlZ=bWa2)5TBH}{!?hzhB*Z**?kmx(Rt@caDw(NcytfmR^L#sPp}{6#Nu}5 z71GXZAGLACk%UuVW=CH1V*=yZ7YrOWY18<76#IaJzmH)1(Tkg&57iXwcUll>aqC{P zAAG-()GD^D&M%&gUu0yLvSQGArf$fq#G66@SW(`|0Zwsy4IZ)zPJ#N(*w>SBC7}Xf zF0y8XYCX(A@^%m0&dR??+z|VGUVx>9FKC$xl;HQx``eEY9X-sb(R*im{shQPg~^3? z0wz(&*UR{)0NacHFoN#tfI`jNtN25L?HuBs4^O=X#**8kpK9U8K;0pt7d2vw7L7YH z^{}OY))Uqp_;pt3o}a|UGgND6pFg4a9&+~^C%#X4w<&m!g2cMC&ld3oD&<8x*=MP_ zi(GP-6F~3n8@T7Ed#)c^w4;^nb~_y881g_5Rz2wn!~l@q(NKpXqs(MVE>@W9k@s*A z{&uTwOsK-)1FD3ugf)!+G=OD$K?WW{P$B7riN8pI3J8A@0G(8rPu3HTTO9{HgdidU zM_1A($iK-Ddz4d>If+ZWbS3?1YI&N1uTm}8i=sc*!0pCPXyG~)ghaRqlC2{&`PPdU zX83Ok*ddfP=lJ7-PRE7Q7!lSm4`wvF*EjbVn+tH)8_S5xl#xDi_mg8Sy*F z6u(OW`G}UYk-S!xexH!a$|DEm<^6m8F1??C2$`-hGhPvJc}IgS6xrl~g!FXokP5Dh?aSq!2AB7TG&Ieek8U5Smo zh{yXk2*|UQoPdGMqg%I(+F{Itf$%ukEQ--((F|A!V#a8e+T9G9{#NO4X4rR?N%%{6 zlf7VJ{RgubwZqK77Pw5jnGe8 zbckH~XBPR_4W4vJW#Uyp1^A4Q<462U3V1(gq2+%3D~d5b_|Fvk=M?-t1^nJvlRR8YI2fwL@6ewg7yDtIbtro)LN{h^k9P|W;8V@jfizLA}kXm!tvt$i%nQGS8-QgY90g~ zN+xRMsdUMLJjNi>bzcx(2$NTFm8{`OM~WgWjy zo~7V9s$$C9(xLe#-+mYWl}eT=s8K-1Xu6TH3IgMbh?Mf8_rYQhJ%!eb&^6!^{RKj) z-pdGZ*Oq-7Mu5d7m0)VYG{e%hr}956O>1-c9|30X1GWFy((^wmJ*9l5q$^)B&2jaA E14R1$Dv4(mSsnNtYpjf$}7=AB(Je!8^h*Q582|p zRNX^KbQ2Ml-t{7g5}$%3-VI0x2qGYdT=o-p7J-{V)+E))z2&!7L#FYEmGCFxg`=zr2koWtM!Hv~*#azkp$ z6R_W>oDt*{*${GuYy)*hS&0qXV{8O1 zkKR=f8)M^lWj2A|WIfG}u&EE_%DA21nb@TskFsN^d(19wtJ`vAk||fE@?(3LrBs%K z%vJkF*KXD9rLr1iF7S5eish{Z6DwV(!Kzlxb2co`X}78k>$c5EkO_ZGENPvTM|7|D#0-_Y5MoRFRnkLdz93tKg?* zwQMS@v)o5gU1n*P`7m42Se6+dN|n?{k|tGjOeG%}7;(KD4uCr;IGfvAZ zpEbi7h#U;hTeHJ5rcHCCIn9|D?7G!$c&_QS&4n{_y~e^0 z2$yOkN#cj%^rz46IRu`xE$zs=k}P@3b=BAWlrQ^=uQK^=%9pB|uk$Hi_fy+4QaY2a zqTEhuSBg_*g*Iw#-R7X7;F&-Vmpn*Et5!}2DYx6fZp~DyPRsGC)j(;r0{vC6`63In z4sTx%G8Y-Pg5%u|G@=M9X|CO<2S&n;dlkQ>}-kth&!yKh~I_+m3Xg~)-;Br~inQ;tdugTAZLcA^bb zPA%~aO79V0yk1uO*m}6u?zTK^l3Jt7>?$@;ImL%jEF}FXVnO!&#wtxv>>RwoF@j67 z#I<5dTr5J5TSQ%$7?N8=Kzu^$pyQwaYO@s4>UVRls2y{zFCW27_%s?U%ls5x{7DKD zMldN!ymB6Ym!?{jH;3ZhV&;@XZY1LM)lHa)7<`c$Ac7GS74=evQ|^6?lR_pv?(+x+ z7)N1>t%)^|S*orHHi8iZHcEx&Pem8nJB_t50`+pEWBoUF3f>2G^i9WGGZ&s-=-1Hh zpo+d0s%TO2ekC2s=~zoYO4&iSh1jdxme=KWPm5jXS3?fj31(a*Q8H@|gLr*--LT9d zmD^qK;Wbg<@NQwRRcrNCbA~xJvLmj+a)5DDcUGZ2YZxeunZ%|uS8VU5ZMV!$8)nB% z$F&DEXunl7piY;Usj(UFcE^6{#2c?&diz9qc{!OogAr-Y)EjM!hCn7I#%prRJFw4B z%|*7tQlL>Q&>nUR&TKFas4-q~v0djwTpr zG;c_MAsljjj1vqU;D#XIzrsN}S~dOxT0TscTt$8;OZ1GKkxS4$CHyw?*fEbFN65C0 z<;Z{rly;P!Rs_K!{TSy#KZf!~r@nhkmODd04WcY)k%+XLIA#^Xp zPf!ap6qG3-)dTv0sS=gPe~-Q-@-2~3B%fZx-zCc=R&*JN*W#C@BB!tS$tp6z=y6|1 za6ne6kX0IkdV?#ak(U8iDob8QJyd`>mKTz%z=}|SiR2mzgmisf*e$O#+O>7-rx3d& zal<@QWJc0zP^|PBx5ADQZVED!P1I8^u@M|e@(1fnGJ2bhP8A!?X(#)Sq^f(5EuW?WL9s= zwB2DbKtc6n-Bqs*@5c3dJPEov+CBYh8>#e8&;zWA0w8W7h$@aVIIlt zOXPr?SE=}e>Aql}d^Y?l6n!L0OXmXRjZm_|#(frh0JeK?J@_-o;HN1dQSsTY*M9ZW zUwr=WFW0h2#SdARB!Y$b{ToCQ@#`sYW8`g!+bMEWR8NEGRqm2dO&$2Adua$}bxhip zx3V6Dwx5Lqq%&1mYgs?dv~`UydWN6gmRqWq+sO;pr{LwT7yXPkv{NEEH714o6+H|; z@XLr0=;=tH%X|TYE^GWny!cBLe3t?tb7|VwfL#D+>&B|a<$D%7YDK!J*#$ca{*Y)C#qwX;2r39?r#4llr?s$QGd)&dRg zr5hCbd17HOxd^2sbc+f>A!ZX~d;2}e#Cx}IT1(XHM1PaQ8x$-R6sc~cn5e-+Y}gM_ z-z6z+7$qeSjguFaZ0J>FzAB}4S=-l?qMQ+7et+{wBK<<)_dx%QcOff?Wx-v8Wnp0|OMClx${9}d;D3N1$aY&U zU@v>q=5JEwMG8pG^2-!_pMobTNc4|M$>RwA0)LklR0luMa+)Xm9*G>Ft5 zKZiPf4JHch6@0jkzi3q`v=IfWlT;f0il3LB;8Z5G-HB@a0tKW54{Tok2J&jOS>uOX zLi!70*AZEjwhT`K=78LUInHi;ilYj(6gB%H-#>@k>JI(wWw0h24uzQs(u zGwj=J8t*K7l6?nngUyK2Pq7my4UkxqDtT~T8Gvw)gRFmqCz~W=Ys|cI`x?pJ#NiB) zKOme*Vc@4iE3)q#*F0bIfZH@~oA0byp83Oe*Icqun)LM#9AR<2 zxaN5s_w4+<#cw$qb8WslZ>_lViwg@2a|;Wn7oLB4xf%i{fY6<~L$!IkDe&6mcD?S@ zz@#R(J8iKGdd;oEHSVs=)!NN@*K!&s-PJxAZ60j3GT#)qTL`vwKoN{8gxFq&w*NPD0vlsA)0I*u;#wiSQahqk6m1Xx~I-)4SO= z*X`Q?)(7gGjEq%trs25WyI9lr3GnR#O6N4Xa0i-LKHI|lg*X~Cuug`!J3-Pw0s31Z z{~Cr=)KZLWKfJA;&OXrAYlB8Q4ej!!D+g@sXfof>Y8;?Tzqx+m>cZlQh$o2Fu`%&b zAb|>fJxAcu(jtQ^V3QAEpFHz_&kcarp1|L+=S7Pwa)s|j>k^_qP7{jjzJr+u()hWA zsnAV`+4px5<$TlT``kD&3)bIue4g`78(&Pu&e}I0jYw3`wQ%M}do!e;I64r=R}Z98 z1pX2eik==ur*GE7{d0K`slHkJ3w^XA7f~2LJ*V!%L*TF;Q3j2R#3THA{w0A-Flw9{ zVVChSFGM>eAOzrB5r#;*=H{9M$=3oJ*yig%16?OP3Z+$upNnK3nWs*kfi7}8&;rhe z4ZT-ub4s^cj7}nZGoV`U3Q@W+D;6^5gxOHQE*_fu%%Qob!!y%(ApLUdBwBCUbi_!< zn>c-fCL(h(Spw+NO}J?yw+G=~r845=bj2oZixTDx)Hv7_Zkbt!PsgGozQcMwH5*YZ z?3&I5>m1GmF}l^d9kxQwU7zq4o>vcAnKj!TA~DR*vp8!8xs$`kKD<_t6F%!n7<3}o z)8klK+dGH&-W>$aIiTl`1n}vewny+JZwvlK~YYwJm=FWBKQ}F7YGrm0YDQ zxiuwf>8SMrgIBsC@p*>hThx64bp^Juli7vygxcw-w#+ot1!}=kcn>pR9;o+yq%%`e zON9xZ{Z!r>VTQle*uMt zy)nNas525$#~Y_KT+n0hDO=-Pll~+ttV?{4jrx<5l3(!0UlbwcURY1LduU_KpZrAn zIo#48$v6HLC8X*ER&3&9nGew$Z5`b@;!VOy9rlmVOxXBQ%m(OG3G|<06aEpZzjYL7 z!4dzcKeemCdHpZHh?b_1ca)Ci1#&dG1Vjku-Z($UzTJH4?UuCxL)%(u*utWnHF2E; zms@b5q20zySnV^L!(|i3e+LKQrX`HnIkE3}6O6d|0@)J6%ofKn%`ODinzdn@)1lir z4f(;$>12ye&zdVZJXwW^3FJB6LL+>g0&+Novmb65+5kAZoTA*`sXaeRN!khJ!@XvU zzOPUqnxieZd2+f>Z%mtFh~l=48(kEl=|P3enya>FZe|g8J=>g$c1K_AQXR> zil_~{CH!LkK9T}<3kf`nQup`1rrnCoJLa!(Ssg3Q7H`Am6dGI{;TMrtyUQl#!g~;2Dv`#2cB-i%j1KGiC5Yl(NO3Abn-Q-FMjth(s9;BNGhe3eH zz*z3zqGEB_00gK2l+yRL{k&?(BS;w{mC10 zU`QP!#O;ut@BavAV?|62yFR`h(mV5#`5)rTVHjXA65)B(o;B;Q)sW4kB8`c(MW+cRwJakh!3NRcNtFB2?~V?oucL`>b4pk-2907iYes@)h8HUqRia!+k3D1%jiu2Fq8s-^B6tW$=L zmaeCk}!B)4_7rCN&({6K(iry=?YhLy5REf zAzKK@WZC)0$b+&F@+u|D@;{>5dUzj6tnopbeuM&lOf)qSp<`X3{&Eqtr41RcF2$5P z+HVHdcTi4VJ9L3*kzHafk3eT=>+ocnC{9!eHFA6!J&baE6#+Pwu9k5cTYM`=Bed$E zB>Myf{)z@qdz6kb!fQCMhj^H1q%2p*<GiAE}NFbK2@w@oz%zKc+MkYNmB1ls{66m-~ z0goh9byv`JkP$~sbFEgW9fz9D+QwY1)tX!34)FN!yoyK^D!*bA_bpeQmOUpBKu)_U z7)`G9&2f7bnw8(?Y5GzCplw&<<8nysKoX9nAFf%=C2k^mO;i86M6{c>eh7zo~XglJw8i=zlV3tl)JS3MMhRE|q2R zt29#OluR(Co^E8ynTA?cMLSi`Hng&adYWbGxkkR67j?Bh)F_k-vSev{!~4=363a5} zzQi;uznj{X%Ogy_B^7fASV>wfD#6%V+paUC;xspny18TV7}9ijBZL&?Kz--k8#mv+ zbs%FDHQ%?~Rm(#yf8FuCy4AEQ8x@+e{~3!K?kNnj#FokmlglY9%@md*!kJ3seX0VI zV|g|NNG5?4*f1Lb=4b*LV~+tc&L$GbBs;;T*hzLOflRa0>~YL?CV>>#S@r~bvJXdR z=h%7n6ni?vVbA=DR?e_z*>m`tVMX>ldx6d3Z;q8f**sfdi=Qe2vcO8!u}`F`%r3B{ z`?<2pF0vQzOXY0t5_^@s#4g{L%bJyAFS9H6C9pfszQL}de~2x!Yxpg&SJ(=Ehd+_h zQh5ZTxK7-xft#k2lccb!ubGvNHK%En@_8N4I}W#W1{AyQ>J_u8uUUH4X*Zc}H}yL$ zvt@Dp>bmE(+$;0*Cf~Le`qqLzV>fAp-LyU1tm|7g^VSbznfgq% z?wEw<)}59`GaiZ9=po)~n@!K|n4ax4`)KhTJ?gP^4^$pC4YOJv_7V~nHn1RJ#~eLG zV2wNNma!EN5pxoa0)7a0&GC#zyWS%dfJpasPSnoocF8K~)^^Kmxf!V_=z82{ zmaEU)vhKC5X2tq82>uu>x)pUqG*@lzdiq-3scg_xmI*nzo>_xhkPzruGhNH5teefI z1=WZ)ejPwP0h(2kr0ZD?T^#VqdbA0=~(JracJuT*P^Q6Sp@ zeKz?HkpPZh>+On%*%-B|c1@=-hxLp2nJ5g+G2`vDtmWsfuRb@cbLhU!E!U`VlP%xo zZL4^Q0&5diWoFHB>X7b->lQJe)+c%PmJ?hNs11i%vw9PztG+Wkzyh&-OXuxolb8VB zdSETFN^D`%&wvnY*6!o=R*%$DZq)nWKx+`WE|k?f<3>E^cCXw|LW-E{5Rwa@@G z8ZB;#y_HDLFm`sM$pAs@?K-43w!9a&=)vNk)G-M{}B0Z&tF2ZG@r;l_|_twJl z<;#PLMPPci%;gK0N=u8!^fs0+lrFq@bZ2w}EnhrlYH^D#A2&L>la?1t3&mnWH8Fwq zTB36?GYA)U-31o}drBPoJhM2fFX7(>dIz!V)}~cAV5VV-X^@x|YOrN;hTDdqg9C($ z#DmB^8JM9b*ixw^IM?Dh1~Xn8QlCjsOYS;+n!XJYg)d*4)i2{;Z#{x8z6`1_;@=?n zB5j8L4T2XJ7BB@33*iT+;G@>QGx-;9{Je8{rS|^Mp7{R1d@H?jK(6b-6!nEZ?3b?; z)j-)=z-!`d1ldSzK}N_Y$VP%VxJ-kV^Z)Q?t$%!VC9EtfD+lip_~#ugBm*2y3h+2; z2fsmmKm6-|pZVzBD=P;J1e&Je4#Ah`x%Jvg?Vgx4wQ?{+eJ82-86nV8%Xe2-!Wn)j ztsFc?;5TWpzkPEh?9XRc4z_8?pHm-=$bb0$O6@;pKEk@n%0Y|zCQuZ$@-Vk*P0QOh zE-oxIs=elAYIc&|#nPo(UvsH+p}%>tw9wzYRJz#TyiC32LcGu-h90Dd1f57oA1L_Y zPYo2sG^a4>P*IfmIY5JKbhp2L@DR^xpj=Ct_e}tj`>vV_@jQNLFturM_a|7^ zlRVj%c9lKFe{5ftB`@_r^3uBs*B?l}bT_jpxu?I02PtoeV~71oP*d=y_ETbpDQX)d{uIhlZ_Gcj z3u=wW{1nP@KaFz2$C}2ZKf#7JGWjVncENlrUvU;Aeq(%A5AHSY={Ea=#Ez4banm4&~#Z?Q#B>-WfJ5_>l+ftUrYE z2_MuOPx`|sb$=A)Ie!G@d4CM$Q~n7i`4it)rXd64Y3%bEU-47>iY)o5?#UWKpY@)@ z>`xPqKK=-MKh>Q^`-FdLTI!xgZA#RLf>BXB1EW(!YlK?(Ezs{I_zMo7_0RfePD|Y< z0DsSH~O9^@M+hjeI)VC8&UOPcp2@hE76fK3BTH z+?(}v|4Bi=bobf4IbSDTyZ2A%DeX(x)g&7ed!1(zLd3^bJGt|8Ii0_vpYNPIPwpvs zbqEm2u_iy0dJQ;69&UcOvvhO2MP5ueC~_Mucp`A1OyRe}KfqmX>U7)9>NQNp3pfLO zn2Hf9$OKj>-j5$Tlxe(VCcc_PL45yM?cyvjX#bCtdkSck`6#-+kiJj?rL+*p#usvs zChGX}fdAK}dG0vgeD5|NsEUhyJ`UB~`OrX=T024hYTb6diqmLa^J#q|W>V@r6-K8e zBlKj9(p3aYaKNsi65}7c30}k`Aeii>ppL&lHDqX_CYfTsh@wlC3sjH|Iv_4|=9p!* z;HJa7S%+Uk@>sL1COIEB+RzaSgAnSmw}Iq=;BjeInNYQqCTVxWqv%MC_FuxlW9 z02drlaF7wP4}S)`D=uJ-HoCZqR-ncF4e~Y|Rrt>cT7wkm3Wf(XFG!O@1-aX>yEi#^ zIDd}DkJgYOLWpd^*QC28n5tP`6yg{Z-^C2cz#XYsNI@l$wIL$wVBBPP+pcGXG>7yS zM2r-I+@KKM$drX}L0h{aR5%zW5hZ-(D@2cwff(%u*^s+#kc-)i(a2~8Lw&(%kZPFQ z{2Mejm&iSsK*;b`Pu$^#uM*PmKorNX0mfgUVp&WfL_@w=F#f0;9z^?!bY#qEm>ZT* zv?xKBr>IHD3Pf8_C&Ahv1xG5#K$1-sjEFG_9?#OUd6O{%&U3qN1*+L zDZ&i2s%3fxvm3>8L7JRBK2Ky{qT&`6D^z?y#jjIAzKh@sFH?;+&p)D~x8;wiN&Du% zi6YQM%t3mo3SqkZHub$ng+ax)2&;etod_+EZT=#4y-dXw6y>2lX$KiGQ90t9=mBSv z-3NKWEQ2CLSfiX_R1n0th7lfhamsIEQreZfhhl6zpDM_CIsLGZLMf}V_As4N@k@W& z|BPg(lv8qA(EwM|Dlo&IN%8wM%CQKd$O>jrm3S=uj>rWi4ahjg(cc+a!(4=^pcek7 zv1<4iLeziDk6|3fYRZVx89ik7D&)V!4_PN<5~bJhy2MtG?l;+&eI#FzP8EsQPwB2! zJoo{3(}1U#`YAjHz%pIH_HgIl&4f3sm%XcrG%0xwIdLE5R)yCUdT3-3UoWPEp$&^8 z31zr8RGI_@rwj^*`zT1S*`|wRakk~S9sgB^GZdR;fo(pe}^d-oU^;+b#8*yB4 zP+`Oth2(gZFqCF9NWX8r{XvlCR=o|c@Gc<>{ahi0j2ID`bGmW35bqt-{tB;q1_eYC zu{mP;e=3?L7aoo%<8tR@%-Tbz?m)xH3R%PW$)iGT!Dn*CiN6~eR~255jC$HFhzmKQjgY5uOwL$nVLXv?u%NeaL1~^5v6KR|Q1Dn}XjZKQ8SPMhcP;^={Uub{hU$ zb{`|UT7;ePH9xbjV4jSC(kxkD+b3@(=WBbKm)n>3^81pnQmH6it*c^8u1a@y%6t|w zcq6jWu1@J>VSU0JYZ!oseCbafg}G^p;s*1{1c{Qup>ft{;sobg{kh`UcZu)&Bz&a_ zLPHyb>8Yhv->50?9Ot0Hb%j9~s-4uzcBWCL5v=d_9OVZrPaxn6e;t7RI zB`D~nq=={}y&Ui6hR8NY9$37KLnG`>+l8kUWoB=KY8HVoF23!PERD#TT#k|WIX{zw0tG}q5K=Z3sA=2^-=PHg1sc3ji*aC=DFm z3%yNoatVbSR2;%-Y~dymcQ&VQSGYKxT99cuTlkKo(nguZKco(|-L#SG1~yz5=vPGm zJ%{Ln37|t<9v)Dd@=%`&lw)Tl7Fl?=XCq!lxGsIh@&aYU<#kLLUM#{0^en-zq2urI zx~EYTCJQjCV=%>Oc?yPlOqqg-Jq0t|nTqVdAW*uPY+7?JO0s#Odf017?XB&IhQs^x;n9;8i7HaC zce!1&o5!ajI%@hihtdPii*$*p-70`)o?}r0DL`U&V!M72}AJmOQ5(UjLD(SiAZjP7!?vz{Lqa>?G#>@ zoE5qj@6$C2GrX&aAUlmKF@&Xjg#QAT5`j0qT_PNp5Im<6+)RXvt2Bbqb(<2#RW)CS z_+J(7D~>k|07WtQV6tY{kq4ziDHI)=fy{E~y!5g8QGrL;Gjw!fOC%2XFFW6d9qQgk ztm}!Z9e%{>34~N6q$f6Cky3%=2#)+-@2gbrJ~&c0q8uL`_|V9NE@Rn5gOV`m1cq^`(4a45p<%@opT(F!-U{TkL#idhJ$zM{T)Ns- z369*u&RnEZkH#RsGTs~@l1z>uGS^S!uF^*$Jfl>@ZYCtcnr;~o-ce)@5t*d+rw0Rd zT9c@!K>ZQMJdpMTYxW0M<-ZH$XjT3r)cGHv0K>>Khl>kGilSls=K|>~a)(q&Rq_wj zhyPcxka_IeGygrzHL&*YCyVwBH?2H|mHCee8*J*>tzssmPgwg^8uTiPa)0KPN3s==e9 zl03V}FevzM4~em1_zw`2)rt*_C+{inwMD;|^2nUPgNuy{Jh;rh)FtmPEli8n`MWnP zTq)uCnGeI^Fl?j~gg|@C;Tt-PZ|{))KTXTPLGPR6SD$o2oUm=7_XNUv)Fh7DBPv7& zTAb`tsE9+Og@=aD^+u{+eMm=Q^>I)y=f#=^J{WBtb#9)da}(>Y3jOUYCYSwDS&5?# zxp0GyCHkvJh*r4b59s`1qA7DytsX`0Q$%ha`%)zYgAWxb>#9i79J@UKDX@D}#k?6K z=pPMqg~x%8*0Z!q0Tmo-ijICk>l9V~6T<&46=LK&R3nd$e}ba_{KZ@s+jx&KL^MzP z3`r3u?`<@vXer{5nwu0Sj*M(RMP)jre2uer_Ls_I;QW!HOMEbf2BKFOxP7^cconf# zTm$TLA_uY z@Q&(HPu4&Rv4UjCTSUQ|RNSQE4i)4ri+q%4SMWcA)*U7;3Qh);lPvjXgfSeYR4P`z zj+uJf;DjOsPv>n2uS+Xv687-YxYGZpeyN-Y2sI6~@VjQS$-jfnvesx+HcJ&W>82E^gM0@(#b83l!+4K+0-|1VVj Bi7)^F literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/replknet.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/replknet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea73f08d818d7d729dc987b024c2737dd67a7fd7 GIT binary patch literal 18484 zcmeHPTWlQXb)MVK&R)1&QWABwJ(4fdx-w-+Nt`IY#F8w>w#+z@+$8I`qv6hwTyk&v zXDCYS5_KY{Y0TO^Np>PsK{HjfFALPZM%D4N=C9Q8ZrpV^sR zlCs<;tqpXj{pY{m|8u@`{xj;1jTIF<-+1@$=l*3@QGQH;?2|*_Q}_ixgX1Zl+EVKB zt+utgrb@rwHZ(<5+PQj;W3-mp&e!v8t8Q^vZx!0bdXfD`tJE&n%k0m!#@dy7h5cr0 zyj`tV;m>jlquXq#Q z6dTn^}Pu1@^&Mn2mI>B!A%9R`@I8*J>XB? z)NiWwL!Np`sU5nFu`4q*Et;BLZniwfZG_EhZrJR0oR)jT7g1Hlce1;=w6im} z)W`^L^!$cJy@;Oz-edTk!!NiDr*C-bvLdX$=4l6%H6v6u)JRTI{r-#a--dFl+ zU+e3Aqi^T=2Qy-PSU#UfL)I*z^Xa z;?Ug+^jyztcIK0I?xDoI=q~pLQxS*m)`Zt=@g7Us7=*VeWS{rv+~rnihh6(*IxOCh zjX8AFYc^;F=!Jp1;s)i!5+*B)bwp^cId4-Lo40jZ53OgW1^dbss^}PP%cqW>ojH2k z7T6d~tfMn8T<@vNV%e`HDzUvi##~y+f@rk=3yi^i0~v2VI~tlJ~Cf|r>`t3Yg%7jRM&L4+L{4Z zSHCy94Tnm1 z{|MmV!gAm<1M9iMZTq1w0`O4q zS}*Ve+K>S;BC~9&$zNIBt?pCBDMUB$8+wd56g&-&uhdmfscW8E*9%%f_sU;W=0NGa z+?$2E;hA0@fIH_|UIA~@D|#io^Iua8rEX!fjzv?S^LZ&Qx?IjK zDre)9xD{|`YQRy!e(>yLv)yhBH0ARPK8~Fr3KW2DKsaFFdB9gIV9NQzUOTwnK8NP&fZUK|b0MM!qB}6phF|{J-vEQ287EUmL{}}c03GPs`0~p>zG%q+#qvZyHvX zd=y!~hF|a^oN7UmU@K>A1Ix>AX~h(zlh1nFvdS4GVzR&dw(*PA2ezlI)5$CiVHDaf zMA*+!M~JY8Kq($Xd{OR0InY-(RUj7N3xt4ND8xB5cfU{#9owB6qf)k9T?EnH`d{y|AjS0^n z0vJwWb#vIa&vRUk;y~yW2m2ia$y|N*($0KjPITKg3*H1InRkE~kFb;J8A-|H(()15 z#S}q&8Y54sPG;$M)Cr^%kI&OlR1g`S3vkB(vE(>lbab>pQ)uhd{)0wT$CLDuMJ5^yRH$kq#XHX{!(JunV$v*0c*VwE|J&4kQo9fd(^H zguq{RoU6-jD^AIy_uU41PBc)QC5LC|CHSJTm{>M^pof@X@>rgtd^R~m_5=FS^jQjY z83#1A@|N|uRj8IYwSk18XMkRxM06It`UYkF@N%eAbI%8SFu!R)EGIk~HMKQ_~ zQoV3t#{D_Oy_Z|PZPDq;M^9!!&4&Y6t8+6W)BHYlQPBe@Rh8|CGQldEfKT~27D5M8!{}lYivTp|2+2Bu< zC%zi6A+N3Yn9N=mbHCCIB)F4R+1U*=Jn|vk>hsTCeDx@6Qj>BC0FkrtD}bPK;sud* zQGyd0)QOp)>jzR9(vNsRa1BldlI9tb;yPl<5ECJ;8$4}Y?dYLFnh(%qGtBp)7r|A7 zzMsOD1eE}#fTj55)(OZK5sS-;k=RmL?wdEk3!E{An&!+up>ci)i^;7fy0eTu1>uBS z=C-hIpq#B$#7B@#e3l%hSwv2xn#p9G$Bu>t6` zsqO{tzopIqzkCtpMV%a)1o0Xy3JXDM!&g)yVRb)OQ@woABEMO__FdLCoH zTb8&;#a||8hMdp90sDkxm0>xrP)gcF%3f%he~cf)PGT?CUefwi-mV_vEzm&P&_no= zo-L{oPXk*aL5kQ5uG7F?%sGSE3+^wOz2M#wsu6R*UP@6B%=Y3-XItIIk{juYk_MT0 zaB#*Cp;DFifA^{&zYK|1-lq+&++q=YmfA{bskgF){>!pp4sG9Sf)3l;R&jht*ltey z)5mvcF)LXQQMo}4+$X2CG;~wPw|0r%V2hS0C6Q95ryy+K9? zkge>anYnr2jOvV?VN=kc3&-Fy*NLB2Srt^-7-t^MAyVWJlb3u{WhKoH znw%-hMyx~1M3=~S8BT3Na#td&l04H@isTQ6$;bSGFn?5Lx)BR8%tjHlB`kvWt%yj9#31zS+Bm)+|fl6won8!;_u}{7^ zat3@@I224-qfJ2)(_W+zmf=abFDYYICo`jnS+H=aNLoY6TxpdLP&KLSMuI2U>wi=a z3QDEl&_nx+o^9AG!$C-H%wDlcz5E<*8E_LpHxDGe$(x6iy_Tb~gn?cP-Fe@g1@mU6 zGw*kN5;5!~-RA8ONWuc0GIDc9+`z+9;ml~h?aW&STryUF@63X^`jpLdmfKF;e5NR3 zXQA>21B}xWR!DtI3v*q+9M<{%<+2^w-T#>Lt~gkpW;c}(cLXH^5JB>gf4%2Qdm>qU z2=ffY$un7t7((qMZIPHiQd5c$h^l`ioqxJybsD|FRmH2glWi>cjU^WHNE+*O!)g0f zzLd-)tTb^05xCcInb>U)3xaLG-Gw>M%0jb+)K+(e)KFe`rNj3W1I1L(^L=m7FJ?9) zmlGdi0K(!?j6a>kBy2FrfCBQ`hi_yXqh?A&TOwY8p*Xo3WRrv&L#zW~9=e;6*Xh}| z2Z|4(3`2F$f}j(=+lI=bgH?wFNJ0l0vxRQ3lrdoYz(A^Bm8xJE=}3%l4n+Jyig{)l zWa*JSbQe8XsMC?AZqiT>C|v0vqu$Tva0s}o^q zqud|cWOYKnC|(UK{Zd9TG0u>vv{7Y9#G#ro0|v{Q-31IoG|6GyGXN8?R^jw7Q4*Vk zaS}1Q1Pv`x63O&YKBgy84mKR~epGCNaDc{l8h&KZ)gp7vNXH&!@ia2sUeE7%Qa@3A znT^pdB41=mwjoiHL0gbvQEgJ*C|C)*ii#=!iN=Q52}39z4Xop0iX0UN1XCk)0?!Ojdr#u=(?1uFQFNs}*gPj1n%s1ljr$Gt?b#%t+p+ zy&N2egPR5gTd@kHg*zoN4r4b$+Uh3ml&(Svg3Y3`54Q{wed$7=98XnLOzD53!oPyV zNXNWn@RLmi2!VZ*hAH(QCa_l#`uo*A!`*&Q2u54$64gV9oYm(KCX>TJ?BA-73d*wu zh>=2!p5Ui&hLz~^xJ&u23Rh>>_%0ObWCpUH^No=-Yb33ZDOW@qEV!whH}-8E0IovT z+hn&5TkR5H-fnLXYg4A^P4?ON;SVXW%Ek@c!FAHP975aodH2t2-UI9Uyf!z^pwRXX z0~DIx5${3hT=L#S-otoX-Xq>oybIoAoU7(Nid;qS*SzD1EwN^%48{L+WF-^regc## z32afs*st8UOlC@aFF>Cr+Ju{PgtWAA92D z6Hi}pwu7WDb?g|3hTUgVgIGm9u#2MpCL>EB4FRbaV2&Lg4Q!XoL9-Lu!9sVr#YVwo zcg0v_uG@;?$&!Bq=Db4;@-b;rkz7BL5(dW3(tNhzzXoFQdMz~$r$*_KQz6YTY*yT$ zE#JjBm^2OdlA3_E{nl#jnNjq7*IE)#nYV=2zF+i9^EZN9 zX@x`R0-4~Zlb11)PI{TnnA96K+mJH`s=r~Jc{;6Wzzx#Qz90BZ2HMRIaMLz>!r-@sjGs%H5;3u@r0y|}TuWGk^p)hoZHhPhF?pLL2{2+;oBhsQYa2Uu~_iA&O zF^m}6bYwR%L^Vd5+Zizqx`0){3FhyNI^Iy_@eW8AJ`uEsa{V{NyrJ>{S1fm<6Zb6U z`7pW}3?Ms)xWq}j%ZHV-(li~n$yTCuWB5viPo;0i_QJ&eLY#OywJi0*ei-L?rW^Wa z?C0j@e4GVHo@|SaSkWFD2^lUoQ5=PuZU-Ki(%-n->ip6q#hP)OG=sPABj+GFBnLt! zqL(z3B4Lny$omHTxAEa199*2*|f{Yd$;soc@vHQe2(i$XaCJ#Y>nr`ZK@Dt2yM%rc>7?<*D zXh5#E*{mve4IeW9cLTYYM*ol6MYzuIg@>ujyby^y(`Y{KFk}PJ^eAbQ_u`3 z&fb0n?!LGlWwAuP^h}IxLZ+af6w5t8&i&zmjf3GKv_3(fqT=&Y`T*5L%ml|EjCUGC z>E=itl#+So4c0H*#~-gM;r$yAcxJyG+G&fM`gzRR+9b8gnufyz-;wY^`Z8@#+j(q&TmzSCR4La!t^$@OvC@9Wy!B-_US~x;7(2+Ctk=4UT=z|TyJM`&CjQl9o z3H*@6uweD{bJy`%ZD`9vq}2kT@}c2{BHjfB76g*WrYDw)=24mWU7RXOdq8OvCI^xP z@Y(-$#0^&C;*+EkYINJZr=Lf~0g(k$fnn#J)rSXyU835UeiHf=n6Ez#Pf|+!4%PZGa+qiq z>l8Q8`-mG9^x%L1?IJ&E4dlJxQF+m~MiFdA(}s^eR1wkG(5#DD zN+?{$r=!ma(G@WzgXwCdx83WJLAj;XOerHSM0r{dpnKTj#Lzm3#)j~5WC15IIL%z> zQRkz|K=nkre_txG@PQe=)+VmZL2jJAEDoCxOWTJ7BZ)3zF>Al0&Kbwaw zsP42o=AEKJ_mEKc@!u3aVKT^uIc@{J`1f)dah#(%XqnomqA#)s=+isKTjg=>4dX|v z6I%|-kkve)4?0%F?Sl=X9QOvI1E z(wrD8SWR3Xh0SYGdXaW|{GS>aIDUb+MfuX(UK_!v z8tWkkTHiKNqeDolsFT<@ti04K^u}d*1wKr@y1IWb^`qs7mH>4zcHt!;v^cmJSMl8o zmLXBLYT~b`H#z>85ZLLH2yI6LTzZ?zb&^-bDHv8bP_$XA5OX>8z^iY}F)dS4G z;)T2b8w(}_w37G;00b&34s>`RdPASu%LS&j1^lE5h;p1)tg@eJ!RYc;94TzXdWEmj z%uFFW-TYTDJG5+c{&33f81Js`9n95mxywVVrlSIyGkOPPnU_JGax3G55}r4Ff{r;d z1h5U6gkmnn@qq^BDakfp ztcM}9_-puL-Fsw^>=mi)Q6l)a$n$lY0@`XY;=tgE3}WzaSnp!jRtCf828T8ZilrOn zQ3PnK@{mevyjl5sn{n`Z-ZEElk^wOFWVv5LiOX2Gzr$O8B| zr=FsqO%AdD?-4^;fCtwPns!Pu3L?bDe`@)iL*`G((m(x^mLo~H9Gkh~{^woo7nPqX zzlHSOmQg8HEp8Q(83C}`ed2p~MV9=SxYLn0Wm7VU|Ma63OsCf}QI2B-QOdg2Za1z? zH&BFDl`qoj<4N4~tY|it0ya8|Ds6mbh>ytpY1&dPK|G^=bPy?9@mh}b;5dqI7&t3} z?(T$?<#wh2-NUr-=Yh|}4Uwmwk1;<_3_t}gBNVwVhsj59M1Es*ntY!nr$G*Zjc8m} z<%{_144#?HLh+K>l+$`r9+s!ESV}-Umk#_SSf*sOWk3O~fV1$n_IU9f>!Ir2YOnf4 L^%LeRQ|f;Kn#ZGj literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/repmlp.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/repmlp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2991c8b30e1b11464bf8b5dbeb724d759fef5ac1 GIT binary patch literal 17771 zcmeHvTaX;dbzN0;b#?V)dKxo>!DC-VEV)2az<|I~yK88<Rg@Zj&S(#axSy`Dk?>+bC%^A$hlnnfS^>6=t_0@l782^)r z$zL9cFW?IQ7Xsh#&92eVce7_TEK|pGJ z-Eyzen31^Mt@dUcv%OlQ);rWVBzgJnTyMTH-&<%bNZRQh?kzSJ5ij`e2S($_8-`!> zOCK11DVVvJYgCXf`xT@s!93DOk)H9ZNLPbdq>mvz>(`L3N%}a_hx|FD=K^cv36yv; zSloST&p`gXzkvJ&mb+&*p7zaaM*Z*ue6_J$x8u^KLI2j<1Ao{J;>DF=r|UOc?Wl9B z6?F#vX18@WP;pJ?w^4FBr$10Vol`gC($!YfUVpu}68QC8T)Nl_gQVS(zF+BtQCzwf z+#Ck|sMG4!t+;UQ^(*p_04+IhfAO`~uUu}2vQ;_l$Oge&~_2u9`x zFY}18Ywa037UH>)^@OpL8|8Lw-}Ej1?b$oIC_loNkBp7nuH)zS%uP$3WondLU<(CY zE^8x&`bUjb>zXkxt!p1Qc5Jj;9NB2W+R01({DQIL9Mr;2VN}S#eOI&UJ%6o^`f^*k0+h!nl&w>;eu1*DhTw%}sq?B4jSL2e-;;TP0)YHiN>leusqZQmsyZDmw6kZg_U3)!tkg^n>s$8t-+tmbUK31vb@O>Cd8(@Hm2+RWfVlg5{bE zru}dsUolJOqFFIrQ+asY#x?cJC8_WfB7xB`eWPLdW+V3vW0jxC)j%X%2jF1TgPM|k8khv z*=AZ`$$L$I!3FR6tbu?lx9N9!Q}vNXN5Bh0HX~(ZSjH{9_`*wB8$w;t-L1eo6-FxQ zf}~NI4+q^L8B11yV~Y=53;Ka-MS-_P9&+GoPFzCu^ZtpYtnOx@@U=}YfXOeO%%1GB zyno`$&%H2i3Z3ovQ>}zuW+eHKHj{ONACL{^WRqJhp93X5^G2TVHr&}_e=QlAv)CeZPg-`WB{=g>-rPJ7{S zYij^PI}x^e!3iP36YT-UQk?)H-X4$2)VHNY#*h6@SER(`;AeRN}Ugn#PZ0-Hb_WVj@{CnMR>NbXf|Y_HM6tK*P5?-LAJXJso7BB=>f! z+a6-@z;G~YSnwX3Sn|Hu@80!p57cIeMljho{JZ^DuhW(e+?bkK5W(eEH$MZEiDa}mP@R55oj<#wA?*lZZq{ z1i5?9+JJQKK)U|6`AyR_BFN`GNZ4lHH?A2Unag!2&VwieCG0qMn~+)Pd^5KC5OCm= z@5Z(Yx0Y@FkK$2)ahxIgIYO{|85{a*y{KkdDF+WSg<$$P7h z)gFSS17;zgOyT+QY`qgiL)D*<4s2FfkSvV|k~sw5P-~z^zzN7Q@Du=&4(=g%FepI3 zb=)LdI5X8XQqGjH+FuwClcgiC{wk!>d-$YllP};>zjNAaDb?yj0Jp+yT=+uIJHUB8Zt|qiZ2E&XsP$RC?z0HunS_MfLARTdv2^CsEFuI=tilSRi@-H(v+`uk zmL=0h#?+5R`3sj2Ou`}G3P6y!0EJc^0Y{Es_@FWYirl#NMt88%>Rt_0JJ^a8Sn9hV z*!`Jarw6zWbP$1vR_=PkkmBwJxJ?TFmMD+Hr3B;OxbYa=0%YDd0l%JlTY{!i zPR~zoZ3aM4w>n{Gr5l_|;g)yytbWO92qZils5%FOJ&vnd!lXsxy!0k^fS<8~m)j)p z^hZWUJ*bQ;B-M=hs}DV71cnsl2zKxDj(NUsu3CQn*P*#+&P#qSdIOXLK(T)E^h18B(OkQmNbO1j?uj3`mx-O=*j4E-m#_4A_XO zGY~4y7MGOwkuM9YVp{SyXWBoo@}T&3(&veQUKKi-^j5v60tG zd&AMndZVC~^oFaI^F~oCPSl3w z0Pv;_5Dpbu(9zYFiXf;5{qZ6d2L8y%Y4v4V#-x*+E(9`i=m}Gcjcbi-Nw2qTxp6fj zduko5u*>5$2JXJzT%T-vT@+VV#k2N0SVC4`3g4bA3_a8rEc|F;=!-4Y>NPPCx0)K} z12=LR4+Bwo0M9018H;&oG0hsX_j2t$GFFd+QftBvflm^4*qmY`96e};!?wnj88_KK zOh4pxpB)bf=Aj801)&^Zrl%FTWP-+z5BR792qS}t-nsF6WDI{a?98Hx-sb&X$T>y7 zS|`yWf%VV(<5y3^b^rqM4FY#{SHLmIJW@jdchVp#6Uej94}Ce?I7N2$xsw-{PoBn% z+#Gf=ip@3E@;^ttVFEn(!B%&0H&Z#FSYfghUmilg2Y5$PlqsMf>B>;iHZVm!x($jy zDVYuOg#?#pg$2uIBkciWcd?4U$_~Fbg~Z>Qg5K$uv)p*>NVijfUtY8PfTBTF5yaNT zhp|)e@~3UGj3&%o2;C8oaHeBGJ;Gk&P@$>gtc4kym z7o+O#tf&LE{!DZTZ8-NJ!j2g`WgzQ02)6Pb%&{XI<>yDm@D5CH25|TG$bQXu?-M(f zXaW6mMioDYt8mRh0cV`AIE&7$+BcsZmIhtX6Hyad&=~bUuhB$Q7 zsj6EO@{0qdZF7aVJYi*tD};_IFOAC+c8A!}hJ(0}z)@{5bJo;g2z#Eh+I}9!E_IqM zzlq?wdJAFQRc|wAnZXwsTxIYj1hJbg8ubqIE+P2qBKRTK*1y=PAJwv0#IU0&Q@zT1 zGgJH|b~JNN?ziJP;AL>@TR~fYNS8K0wkcws8juLd*jn3CB(XS$uB%Tn=Dy|Aj2qom z*I119^tc2QLxOqZg@d@YB4}5yv23~71dD4$kpet+fS-Gk6>i(Vp9UE2CxnxRvW_Ho&z1!gn1h)gbq&dGUSfS97YQU zTZG$(C%-x?IrZ5Zj3u8O z%E~ybiQ&m@KZTDZp}{fb=yIe~hrtGd zMsc+b>jhMlFwTKL>>sgOHV)0!>T0JS{1;Rb6g&sjauI}61u5ANZEO3<1BWlEHf6Tp ztY^gYmq=htw7~)leAmS82=+RB6XpmIlHSP>;{*r_DUP5GmThAcbJ))?cJpATIz_e( zCJJ`jM63`tN6xMbMr&+9Jb)-klJ<`4+dE7Zh4|n8A1^H<|1)xWPcUhs-@^oIGkRO@?F*jWR0fo6OQ>kzwZm~k7mY>0e&!v2+Co#za`G}TowP1~h z(YXD*r?$UEvK9eMw*rwo7#~3v)$2lN^ zcwST(Z1!kX278~_jw>bmdd*f?pA`xdW|Hpufv>O=jbax@&mfGNn~iE3)l#idi3U-t zn`lPZ)sqw}2}w2F&8990%c*Rg2r6mdz1i*`_xto~YG6)ii=>+D3|7ZE(&jW>#&a0N z=6C>E^RMvDLZ;+xBuWPa={^L}B0gt6O`-WdNECTh}8g6|2%0uCNR4T#h|LSM!pQDCVik0R}A z`08kmgtY6s0$>4lcgq4^L2<}0Zp<{RBCv}av(2i{UEkvN3!OeqkwRiu1~C3lh#lCh zpmrQ6kgg~6CK5=_@Z5Fjy~-y=`T@|lyS_Uv8R z_XoGiMa7L0cnpD68JGjx#m9+5%Jdare3d*e@7wo@Mr>(Oz!+5@-J-;x4&i+jNL#2% zyHGi_d}M#Xtsw8j_@Cc?mHdDky#h!#&r$7MZKsMe?D%2Ct)o`*{5*`~i7jUHy!VQ? zd2UIIHeo{)d?1D|;iie98t0>dYOlj{0InOEprb*N-T)fO%%ga0MS~c+!mUmloneN< z4xRT!M8;;MjUEs1F&Ve6nKQl=U2e3@=-vg|HqF!ig;8?m-YARVF zJB+bk>an)M%Xmsm5PAq`E3}@_uwTRa>UZ6j+=5$jJ@-iMsAB0huBjiPHh&>app#Za zSPuoe!+I#_-FH{vj{qNqeHKKq3>LsXX5tw=46p&d3kyN(11#oOy%n(z4v|5tcUHo? z;C=O-*RR6UKn#iAPk0xX5nRH|UcSuO#pU-7G{dKQ?7=1{!6+FW0;EiO6C7j{WXHx< z3$1ldHvMChpoia&O@bT+{8H2TafV+ zU7-O>{VaoTF!(eB!P}w&vO8~5sxqru*0O0Fy5Fz4RrtzPfwH$3GtofdF*ZI~sG9zW z4jo*VaD{{w#Imp;B+d?d)T1n-T3BL$jYq&pk_W58HA8&@6#x`J#7%ek=NV(ar;JJF z-N&l4EBqFAV1eF#nc*lAcuR?*`jPv0NPCg-j}_`)NBc{B5iu@>dg+;T$g1D19anr# zf|&ZTQ~ZTrKrkuBA;e`TokqC(jB!KIT(w1%!~86pfhE8E0UQl8QC?9$hi10viX+na zzzy`R0W^X$w>ycSD7DTvQA?YMWBZNQue_^XK}I|68{%U9$<`S`AJ-W*AZRnWEqJ#= z1Gl!IqwEx9&C#v`Y)ex*c8T?xv!IzfkL26Xf%5d8ZNd(O6=g4L^ofz=vLgOF$Y8CF zu_P!I8m}Qz9Xu;Q@~iE0Y2>`hF*^qqiv@R*Ewb5ocD>`n#+;eh^?SH##Qv@A@2dc{ z*qsTp(rLy)i)NGaPXaGFP&WZRr~lmV!<;ge=YGHHZr76eI(YH$lh}WLYsfE^r9(3a z7ZKQ45(BG~7~HS*lF6xGL}~TQ45+vQ#>=;hK~Wa_c*T>nf0mth5z#IvSSPb-$C4GI z?z63NRbd;#F-UjotY1S!5G{~54Mf$ysXZZ7*~WvZUnNO}?;|ov`W+*%Y3}iJYu1|K z+rMcd%uDF}rs)@c9S+jJV`2~7#r`)?E%(InSN3PHOsd=w=YqLM2lhk$%xXoJ%$z@u zCFA-F{$VVcqQB@L!M!9)rwphu!YoGwlJ~Y1?#f5hwLfJPEa<@H;_! zum*;TZA+=}@_H0)g%{4AZK*q*TT27AcDA(=o;`o=Ge5cXnR73_^!zI~ng?ygx7bno z%3|x9JAHsYy07qvYOi-9ydKmIcdu3W*Jyv#{u*2X)(3-ddKZ3G4SL>5s0G1E0qQ4L zPG;5Evbc(d>w{s}*Z#*VL3eN)`{Q@vCMMsYp%}fq#>xPF_EW#D<;+Duq#uXN3tH%Z z7{PaXjP}{i8Td$VMeEqXo@{|zvzFkNJst-rPwn-`c1WJ;ZecofDsvnjH^T2q_Ko2x zJn$yE32hGc8jwgpX9`&Gf_^(l=lqLW)f3{eZZ1~MLa4Bkfo z58hXCqXCm*Caz}dOXP&?fj{`~D51_UVCA|g@kuy+xy`22-vS~txX=|N{3dW6-77zY z+6hDMjP{C&U?z_2dzLx_192XvUf{dP`4ET5?wPlY@VS3%092d522h>m{+jVE`=0qV zw1Z6qY{7nLHr@X5b>ODoGIy%dUJ>oR3`N!i%&u-);cI9mKdPb?=Udob`x@TCTJWo} z?lD$I3~eG-8Mz4IMT4*!&Eg%dUpQvSvl>&ohxQC?c+Bn0;!U&B9Nwg&+0iUZY}D|s zTX+|?X1`|M^wZjCYhL0->Ft8VOAm(ft(d9!2S+8^;hk{tXj0Paa`(qbHDCyJxCmYj+|f4}&1hG5Ty>|G0drhBr( zvd`>(B04de`zB2IP^1nar{*6%YHS+6WU4w+9-cpod-_(~{liBLjLRbaI7&0Ps{RrG z=zW)M-288&Jig$QqcU3fspwOjIgI2nyw&m#!{=$fZ%5DVo&;rK4t7uZ$4B#fsWIb8 zZOqutze>=!eT+W;JU9k{;wJ~v2pU806;iOaJA8|=-$1ba?4@BC0U`?GBpBi;B64nh zq>VVxi_Imew*AhP^hA;eC!L5u8CbN(S4n-T*F%Fi;o_y|yXe*?&{K}L5p(f?k*5zg zEvc+4vN&Uh$oss1Iq`amOM-|5=ftJ)S+rPhHt-!2K2f3satCZ`FOaGj4e#9E_zu;e zH7H~6m3Pb!ag4*~v|uUC8^?`bwr+A~&KkSQ!21iX83bBDj|n9F$cn9{&%|cuBU5W; znLZYmUgpQPaVYSW?}B}Ohq}09%$d%^?dLRphw&oQ7nfd^t*r1BMAB-H?YR0Kw1ut- z(-VI9yU0+#&)^RkWW7-T3d!%|_3B?UUwB;chnt(!FR-L+|Iy_2NGJFm96dRMDWg~1 zF(Zs!$N13TB2GrmlwN?N(rSXveuP_WHtRSA+!TPAxryte3+!nb((orNCLV8Jj;WwD$YaW z!FlE!XgYRD$j$A+AfAy}Lg%60J}UDKZmJ2;eIo_o6TW5?7>y$I68+7w1K)umOc6F` zdEfero#7VtRRev*sjS-*GLL7(?T|2>+J*W(z8c2>a2%%L>T>I08oM1TngHlv2-PEu z5BY=JeJnGa#Lx$^;avy~CxA7P_8{@Mi%VmOug#O=U0*R&E(m$^R25v~{sXJM&EP-s zMdfkbaKdUjo)q|iacFof;NP*9m{PB>YBdN$x(JF(!rd^=%kY3gvKnzw6HCXx6IXOp zD}~BuQ?y@zxc)8UCmCec&u@e3!(C*}RcXb*Nnz|8!iT^*3af_w(2?C{I`P%u9&;Qf z9p_=iuD~X0S=@c5=?6QF_@k3vsaQiTdL_U&A;-|QVHrVL8i}--h$R_w{!gh?v7C=< zb}fh9Oys4f7j$d(_w0w(`wm^t$Sd4}V-)T#d~>GjV1F5JgB1}5tU@0E2VOiD&bPyJ2O`bWz(m^y z$*{JsQjNfOpTRby=MRFge-bAglT)Y3kw%CT?jeew)RK1<=hI={m{5vw0+J`K<+&%E zqFz?#(7GnjKS3-m23UJ?ZdiSrPq-e{e`fG!41^s26ftax%YPO~jEgj|#Wv_VRR4zM z{y>Td`NgI2dU~w6`Z+SP=TK=81b}IOh(Eh(&4C!^#M7z-GOU<393HIqU2FT$m;?^; zxuVp{D1a-I21J2vkX$%F2}>n~3?9xy`D53rs z?Zq|n#(?S_-nNM&jd~~Lvv{WdD}%>c>&e9LbCI(w43?I4s)XgtKl@|22|Tn5^vE6m zVc93C>UZ4L+A+lz>d&ZuiCgT7OT<#YFRsgt`82t-I_&GC2VMR*gETJg5cm%tJT}`# zCC-r8sKp0byE;FSK3!T-o%Uw9gpcfpm|qa2@c2f#d6k^;bsnpat34=Pa10FOOk7tL z*d*7!u<6E;eTU|{;S$ctcSFT#Q6h}yCyJ6(_HFPi)u1Jcy_%@bfQ)^4qHFl#qjwRbt?bx(x$#JuevRdv8$))zD zGece2WtxO;6QEF#Hoc@xf|5ZWY7|8a^p4(AG`#>#i}s}t^V)}&=u;b@36k3C_x)#f zcZQ-ABS?xaHD}H_bMEKA|Gzq8V|fF=FTV3P3+Fy!82`#b{Fg@ISv>wXkhq3xHjRpY zn=Q*SOrw>m*ru*Yw=zmP}YL2w>m3(WoGTJIs3azoqnAE45#nyObyj7}{q--~L zwI(VPl212{mQ}GNpJ`6ErYcj&XWd+LcWb&bjr@q4Z|-UBt?V_8bA~(W7Tz-4f;YaN zs_a8~%q^l^l=6OWYU935wi9{LR zFPxrVX*AtxtsXS4)q+N+U2WEGdMYgG`Z`)BYuX*v(luo(%%7?GUNlnvf_HVrYX^;5 zvuuX>>X}+lUz$T}l&p3eY37wzpMT-y3tJY}fnxT;3omo7EmM~Z9PCW3er3MX_Uc@0 z{8xwy{taY2qhh*7#d6I`%C(}ulxw?b*Y<2T<7RJ}c<1z3SIc<0;+cM|Urcgo$3cix?L_uxJH8N)Uz1svkOaP&2=`;l|! z&NMsqE47ON7hQJdS>Y^nl+&+>b~2mKJNTVei~cFevD9c+>r1tE+iUvH!A3hMpK>lP zd8qEL1kT_~XTb?jD?`r?`gT@=9Xq4`J@obi)o{K3q!)B4n&`xh6Hcw|I>&Atb9nQe z<_bZ=5s;X5p7$1NE6w1Pb8NVnZq0QY?Zy6h=c8h5z&i&T?8I#}1?Kv_pYNC7TlYoP zS?LbW@0>2hx_*CKskS;iADj~|rN1Akiw%8`P4RjMCpEL`sg6H0JTKp?))#PGbAWLN zz~VOQ{maLg3ysAUrH>?<%=aA34NM&lk(hwv3_o3O1WVO=*Eu-f=`^wY*U`Pcntp6yJTJX*ZupSX=@PI&DNZ@h$+qPn1byD?(Q@a9xUTvV7Flci z&Whd^raJBp&qN58oOyKCd~tH@D(Dc-an&pJKp-C_+yPEWwIZlb9LC^YImW@@X4m`R z%$d2FLyiLNYbejJE~=XQ!Ha6eD-Txy;yT>>4m%za-}gG_Yu?MR&%|bMy>7E}b8zi% zEP0?Cire!65;!1|5yCKTSvI?(JbX>;O^?w61(%k0ze`+rY(E65X2TEOE!z#x<_=(%HK{QW`7kZWGj}$$&dj}ShBnVTw7K{Cc#JhLPwtSy@(3m zh*``lmil-PJWAeZ!*?)=@yvD0wf7llIn_?N^6nYSyD?U}2aa}&YnzrDaJE}q^&Z)g zsXiI(C6DV_4YQZ~b@Oxlwl5lEU$DPdSZB?Bt^v+9GPlJ8x>z0wM`PS4EX3JKIC{lX zU`kcLvFe2xzzTRpn2m^hnClblFr#U2XwNrleprYa2tlbIjj%AxXh2ssyB6mardg^guUk{KmZJCVL(aFu zeDo2#a*zkVRD{4Vhm{DkR6Of2g*nUPq3wIkg?sNoJ&5+|F(xxi4l*e-d7Q};NWxqn z@qh~>)oQ(2^Zja7J&bSPdGd(rbb=$TR#$mKlM`7YzJFvs5=i_b*zL8&#o6vn^$0p2 z!sEY&q%=NlWvq<3)1Pfw=1zb4|1K4C=7gEC`Ym$r6w^6t8Z||0bxI3EA}!7KFIZ9F ztMmN(KOTD#&vSTWDS@$Wtl2$#!*tC6D7p+JSO$77r-`=986XsEvJm}Jw?;N0KTBIX z0KzTOYM{1ZC5;-pZ|6pEZ^Z zHQ6(lO*I)<8!3{pO=~UHwwYTu*HWJ~Htb$%6S-arXPla=^KSb;4$8qN@%R-aGGGp! zaEY5xN>H{)n^nm**UWYrUvSwN&O{CuyvaANebd}y^zg`%(&%BTC-4PefVNi8!qnEB zfK@pw$f5|;>M)ZdOpY=U^g4!IITdC^)b=amFa?7J+$w@S15`T^Y8~9iiERjH@aRdXEd7cz_x_K#GsSFTAt9*OUfS> zm1as_;3=@E(n4l6=v4JF{u-9=6HLJs%o54e?LrFafN1q|xo_q3d9z>v zee|ETdVd@)w-1~E&>SWpQ566UkO9~TXl|Ms2%|D+1j?y3J4k_rq!s}rUj_JrairED zkKtQ-4ZyRUR4}UFV5U-_uh|#nhh^Ub8*-h7f5%|_N+1}@ z?r+<|3b!ZZ0N|PFYqjQz_qHWuIik(sSQRTC2vbAo56!b-3R0wy?66c7u?_GF4(|*=(e7pU7#rD5V~y%b?mlB9=hEDTzfJJsDPwJ_WQ+yi^*+t<^z}^K#~P1?tok(2}~5*_70qhEefVLhb9?~GD8^WR{7(M z_duXK@kgdh;>(=606^Hfjh~hwN7Rp?Lj4$%4myN`po?+Am$JDNVvtUe|{@ zaa9Fj7Lw>vt?QLXwIq3k8$Kn967yMYqe_7*Fg8x$61r|12-%X9o;^$ z&+8Iby&W?yqRT(y@hP?DMr|_}`zu&`A$R7i)qQc|oj~;jau6{nJr9usq6H|hxgQc< z0F*!;bk+sRED$Mdi1XGh(C~oN*;qFr8Wh%S7s3Hh0|+n;QN&Fx*iRc*_d#IDN}rsF zrBDYjj(v-0mFlIJ^UJmf9cgIvY0~dn8p!b+4$V{G?J?62%_kZkKyD7tw?P|e;I6~0 zH2|@@<3+nYJFtKscVYDz;6ta~gguSehA{zlgv-dpI~aFo#E1wNmU9oht3~Ui=|xXN zh+rSsLkAORs0NDwmiO}_vXFASlw~woe+bu99@7dF)tE3XEJ>A+*MPeuwNRrMz0e9e z)UU5K>YxT}A@KbobA)e5LnPxv1skFm1#XzFt8SyE=K0ZVR0bp(tKJTXZ=lV8;PI(r z74ju&tGA1(X^?2j5mb1Airj-q z{hA2In&N-upUX|pM$6ES5TlmO9FRU0D3VIbvqp_iM}+9Y#$6IuxavEsD%_QsDlNl$f$~ z3Xe~YLF3DYNu`TI3utl%CK!`8O;|NSaREr6!_^dOGp-GhLp*S?FLscIi3T(q5Bf(o z%&gcUGjocvk!FA?ZO$npmz!pGq6SMXVCep^p@ui0AdhMZQN6_mAHEO+dY`#HTFLOt2X~z*zyTb^qy>opL@T%$9=%f zq346{Ltrvl*WufJSpT7XzZvfJMb6q|z$u+|`iAbl z=iNDf^CIkgX9-BRS;>iy@^~~k!POknMXQn&Z>Gi$6(K`91 z5qNc4m1mt77vR8z0uG-`d+`vwvz-o#v{nrSGzw}nr;ozlICJ{gWqBQEdPb_w9FzR9 zMO}An@uZY>={ReX1D!b@wKyT|PDK4qpw^*RZ|3>qzBe;i7#%T(oJQdIOP!TwQj%Pv z>v-OSUCg=S-Gp5uG1Fn^@yBUE7a`Yw{Bh@?Rut>roA63sf^0mu(wc`g16QNlP%QRf zpTcR>7GrK?9sozX-f-dQUI5?isKj^+%}z~5e1k1u)mwmd3w!w<9^SfgOP1dT_u%2TSY}MjQz9ZMeckdFXZ_>C4#haEhV8-QSM(U)?$D zg%=!&kYR7QrFZBUc&$M!2>ata6E0Y)tv(tJVoG3A5=|Dq_iZarG=t(m%T+QIz;P~L zZZ+C~<=Cu;fNorW6wQ*jnm3PLDqp^wT((0%ry14v0tZfN`0$@R340TP-*x71^3E7# z($|dYa9+g}u9)kxF9;2RM~BZeT7a3sh6e%>>|Gt_#Bec*W%tKo2-2V{;c**DLBK&r zT@ke^aeQ%r#5{}Qe@{;R#U`S21?Ma&yCT%d^hmV>(nMad!m91?73G zWCXSL&6t5Kd4q>zaZeOLhd~`fLP3Pn*qV%J(UBeZxWi;1&mH>9PUEuC_Z;`~Wm2+X ztV-f6e;|rW1cI??ba+Y}^CQpvAVTGJL9_q+z$97gw%8=?vluMxy#~+3Tt*3SqDLtS zH{0fuyY*ll-*HM%#n`=)W=T_yddQ(YqIomP#|3mfC$bfiV?ua2NCTo6C1E=R;Lmje z@09bx!UCiX*f||BO`%pLg$qW^HP9WM8nreuv{l`6GjxAZ+7dJ;9xF#j~s zsNQLHpYfn6eUoEP7^}N98=4({z3U3pEp)+r+@03KJ~zn8WTM&r9O@P@f%F1GkFY2IF^u3`dgC=AKW@>Idv?# zh9YXIaK@6TcAd3yEMWq<2O6a&W*#}TH~2Twcdy~uA#e{$=2BEWI_QSOX)Lo5P$L|<0}^11tUN;gwU<)T$(u>QzOLg zC45UKN||+p;`y&I1kf~?2KgT17lvb8G5n9}071qNNw7%=ZA_LnMmIT2p+Dah)9*xW zvr*emaHTFv0ayD8`3k!Yq8hLtgbp&`a2!qxh7K~+aF>J{PDn`MWDjoV>Xd{X?v}8_ zX$d>rBVmVoCE$>Oedfk}G$d?V!TmTR#7is}UF_P;?=z;bzIAJ@6g+_1@m|SAgaucF z5pwhh_6ly{79%enbjQ|qErL{49}1k|?!~$C!rFw4$*{x;oc|$x%h z#3W{w_E-bIaSdw-S=@NISL#joCVNx8-MwAC@y#rp?R(aCxy9auJGRH*{jKdqy92$w zy*-)lm_GHobP3f=x8Eshp$ZN-G>os6V02 zal}v>j>eReFdOL+;mAO?mk5<`EFp$(k5`=G0!Idd4rXs)3DAnd6f{&F?`e}zM?3e` zGJA|d`M%<5(O&&1YcjQN*K50C9z=jZcR?F_b1%Yd8#IEO<$c=3`+I1t?YzIs-0w5_ z115jST0^k)Mk>%Y?_6&bRAnTVBKR63 zq)ZW|{Hb6;aZ@7~>Uolg?%0@}{V5jz z0?8q8TX8H*5v~A3#Z>-X-Y_7*6XVIh2E4^9(?nvua-*4+!L7 zsW{2~I9Gr>A|LR71~dc3c``^+_fOr?d-m#n@w=sUeC%2Eo0wa;4)~K0Mma*8TBdr5 z-%c|Tp#D4LAk7M#zRFsmeLI{EB4f208kuF@7#|R*w*Pv?S z7`P;m0tk7>HdhPL)@8Z`grL(9pB_hn_$1pIT{D4A2!zLW#v%O4wy(kk2*Jme+qLE{ zLLd4^bkfTe`g5MeCy+p(mpl9?R&0Z7Hd^L-^T_m9nm1SXMC;rhf>|~%mP#a;mXwEDmj}}(i`v15!wY-GDu0h4?IIeJ7 zH^qPNZ2Ggq*zzzfHHt(^ok3E`Nqk?u-4@5Y{I?bzh@9py#TH^o7ZG|lOKfN=5;*yP z5tW(27d?mDtm+I5vZ@pK)$59)ozhz6TovT6pcd z`Bag!3dFB)QR8|9#OZ4cmNSZhkN^`wD9k;rTRa0WGJO6Q91`>ibFWzf+r*!wrIhhg Kr66;7+WcSA%X4Y~ literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/res2net.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/res2net.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e4ab76f1d69026553d09496151a24a33a9c136a GIT binary patch literal 8605 zcmcgyTXP)8b)MVK-WMPNfg&Z_BRXYa%it0hij)|U1{q4eM1mEOic{+-!@*83FvRT4 zvS$_`kXcv7P}zQ=AM%jWgC4-Dyr|0MpOGIiuc=fD&nYJ^*(&BcJ+~c_mP>ia65HFC zb55VWo%5Y;`t5c@!spX3|84jm-<70)r^4)~g2E=g!Ece+k}bPZKlzpWvM4JfwXe!a zoi?f{k}Qq%epS>cZf#WW*GG+hLzGpwIcoJ=qjtY7%9`65&GqMyuh_afKRVMtEAmz3 z7y1jb^nJ;$+4WB(yUxxYtNlfk8+H@rCTpYo5}P|&dO}n!yN%j5TR2va<^DNazAJS* zr_fp2>MHU4_QY{*bKraXrfVHCp4E6hA0;)QYP|2;6PLZ`1Yz8`%N|ac7dn>Pm1F(= zAK!WBy}PF}eulW4m7iY@omx?ET4YF)GI zzm$eD4gHgPU$d)r?GveA`K6>ueI4pI;%3s%I(2kP;!ZD6vWXS~Hi4}^R{tIxW_{X7TcigKTg&U4(`MKLM;qTx z#)bSGjrOq)&iP0>IfI&d)OeymAKk9*DSQx~eXw9_s9!`cwZ|$bUkaCwCH$UCe_syI zllCu5$MPNNf^~EA&N=^~bd*G-m0+%b?3f(z4}q5@~G9=P^oI zdiXGxfl-3G0UauK<8cMOznbdXjGBqQsQX4l`h64Qlgw|0Mucs7jImG-+^` zUI5-$b-XavMpn2RSB9?db5453HF4?&!=1Q6xi!O((Ma)_9vk}YA=W<*( z&4FtLfoXm&eR1tF_x{Qu+8cq3 z#~AWk!xRcsx?KEYjW*rO|8lEd5a<_h!&K;*PiN) zC0Q5Tx~K#1A3D0O=-_C8?`U!6bo8=~G@xbAKD0#irTr7UjYL>Z*;YQOO_g6s53r#l z`GL$ALghq-Et8)pQ`OTb2kWK&N;=UZ^$Bv3iu0%*oq5;im|@!(PP_r_^WK1Ld-lgQnBSHDs}llx0Ul`Xpx>&pTJyAHE~)Q#mh9HBX087Ws21Q4drfY0pYe zY7^G5e!|7k*x(X^u( zo+om~g_ce}LvvZ~!MrU^Wd0kp!u(_Gc9#)P>#e`DWQE7 zH*EjF3#`%DWnDd%AH_A8Oh^e2V-*%DR)g`QSP91b2B<%m`BmcFq~sQoxC65XQ*FYU z(|)>#I^pj%oSp{lZk(Y-?D|FIafXz$$l%K`vnpPQ;v$N3vp7atCD8;SL&RYKf6=ndZZZ zxWk6l#0{?- zYq{pu{*FoK%vj!r!-eV}1H*W7&2FGCJ37s0tKy_AV0Lgmeo@ZN}TZfq4AoPJ* zf)J8zHV$_UBK{MzxSG@p%YSbxR&H+r*J?EA!nEw*?26BT3{CbSBS6+bL_5oH8i;Wu zlOmpGs2nT29W{64NL!Q;i>Oia82LRVIERP_nBeD##-{D4Vk>)U@EY8Z3OD#+r1S5E z+DRqU$hD#sCc>^f)(|sPi3UbPK@B39u!aapJw_R^gF0<{3xY4F1Ml;6%Cx;BO*>KN zq#nt3lO&>-He^#|)v&8k)2^LIu=`mfYEhgfq81hKpkc#>3t(_=IycLY`jk6rMswh8 zd{!%XAM#MdVTb_RQRmkP&20&ewzYNC{lH~_poVZ1;wT}W-N8~}6OWjdk zK46YZcAdp%2L8kg;hhHV#AetVHa~+!N&Gh!1##>Q_$5HazfZ}xDft#9!arZ5oPfTa za`)ZZv+OV8{FvA`k#sw8MOd`B;RIw^EN{R>^kKo5V~l*6Na70@jpv*@MFjK-2-3_tcer)jb|6(nO5nHh zncA<^^?J2ltJ9}p&k1MWv|DiYy4|)rxP1Wt!0k&7XJkIErk2$D4I%Oj?xzgbja$YRJ22jdM|3U;!)@G7qc34qZ-Lnf*&v*7X1u){hU4J+ z<;xa-&BlsR>LJ0w%a}sU*TwOnh}bR?Qn)yV__fSsPiGRj^tO5yYkT9?w!7K(^5 zX2Hx)b{XwHniv{?(W~enpNUPAq4=aUBWzGV}#>GLd=FE&E>@kQEht9#af`P$J=*bOG+Qo6+ zX1g>RD=@O_pR#V{<0-VlU33!d(KNYuH2Nv+q>o{X#v-<)IQNH83QtJRk+@7J13{rB z(G@%USxZ21WqNV&&LeR#Rooriym>P*%*FL?kB*uPy+>o8hnd6AM@<}Qld!?12EmHQ zgw!D}3k-1s=pd)+^?IaV>65^O8&|%Xs=xy`)~|jw>5MzNu}siJ_{EKG5%JO}gt9$4 zPRr#6VQ%S7L=3Qp%X^!=D_qhBJnY7Lnc8LhcpyQSaB{?yh)u7)0H3J3%po>a#zi*| zB@xBqWOFscbR_X1L1kh!hh{jiuNuqJYxRNP{3Loj$JF$Y5|YF@B_a+J;m0Nw z^Qh}}1gs(k(QCTgDn(e0KOD`c$0Uz>N^b@fNYZ5gHNL^CNJ2cV;u#Q^5_o>a<0=NM zMlyx<^aQbmm$*X7H$k5aO-lmLauXil#J#i7do!a=Zf!dwYmbqMOy05Iq#<-bk_3rJ zzf?5+k4HhB=3A}3-O8xHy6T)2T52LQ0t$J?PzKK z6JIeWi0u^pEqeITzf`J4{D}+PGn^>9FY|AZxW7UY*OCM6B|&?(t zPJ26Un#lz?J;w7o^+gY#VzuJAgb(ik5RU&sAci8=b0(o(7T4ZRC~jhzq<|uQyr1EX W?JVBG=J7s&*EV@xo#*;+N&a7_TdkS^ literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/resnest.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/resnest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..851e68b34897eae1327686882ec9b57dcd1f8d94 GIT binary patch literal 10701 zcmeHN-ESP%b)T=Don0=MD^iwi#WkjC5@`*Y6iLZ)1lzIYui9~#K$PQR<90dRnI(tZ zo!Q)(+k#jxTGlmLJ#Guf`>P8M}G(5Dy}+EI`UT?Y8_3L zVST7+imD8aP9=>ohgFWTIyH{b2KC`gXJ**wH27H`%nqBKCc=hW8O#k^oeLZ`5uWeN z4;MNMs`9?#R$c2e#kD+b=i&ps^9*8YZXGf8H0BawX50p18r}t@XnD<(XCG0{S+|MU zrnm4wf1q}rbJcaFJ$H&;C~IvkX>E@Ef!pnc!CrUZ9C{*)2|_WHF>N(5zyE`G-u-C( zR7Ea4*6)UKJn#aqw~HdIdtUSrXRRT$=EXl+c6(HQ`O?#e=o$i^(otQdqq%BF|GCmv zUCq@$vpNQi!DmXR@^i&dIwsm_qVV;w9}k^_qlfn#*FUh+M|&8$Qg3ZsU4OY(;lCL{EYXx#kCP`;IM34Erc$G>#YZL|Y;pk0Luw z^}c+XPS(phME#<$mi|VU5r~xs%8B|2OJSl;l&&_>Aaz$+S01ZtZ6m2f;}M41>URCW zkGtKZ!tF{-S#w)YjK~}G#VpDaO^C;8QtNhm11E~Q-N(x3x0gg1#!JKDNO-Yu{9q~= zy51mK+H`umn_=KZOTvrr6EBVqMFWLBgF7-I>Z+;M)uRj97%yh+=}~X8i+V??-^Lw% z3NcYnpa755WAthVlecS#tBB)X?&ut+KQc}XOsnxoJ67Y$jya)8t~|sOQdTFJaC$Sr zVvMbm+Js8ur?$B!Xq5?4xYH-NPN^;EzwY|Ob{zxbA9*}5iLvK<`#d~J4TBIwqcHLk z^}VDT$u*Ny(erI*1&VN^+x^Md8KiHj-L4z< zx?P?LafK?IO=q^}4F-5oG=(Y_)E-fTpv4eb5SC)7jcTjHu{}ggU2Mk?6a5K9S<^_% zaAQ*3ifeiX*_4NB-(n3UYf|e}JX2Z`Sc)pbY&#IP+&Q-e&8)Fz)}fgfl8fhQonv6N z@QasoeYB-Qo~w?2(CuwIL4fs|soOe6&~_p_2o|%9q_}j2}!xV~<_X*2aDy*c0LVVYnx(=oQ+ z@%qkq5Z|#^ax*AAcif|fb^A^a3Y5*zy)52N<6#=lTMUIkrOC@HxLL&aXxr)aF=DRY zi?7?^h^li2m>jgL?{C=@Z$rauN*WHvJWjk(6{Sp-z6OhT-QM5!d)xNVgHf=@5zPtZ zUvY~}yuW?5swt^6@Q>sq{l3a&6>PtxB_nftYCxzZ!$A#GjFxaum% z({NGaPQ|S~tRQxdOF~>;O6$0pmeQIuxfIIRNNX$Sy%5i%_0Zyri0{QetJwA?>`e={pe*D|Un`?0ww#5AI`3tJuN^PS4|UZ>yZTeNoV$iZ%%v z?nDQWC^pr^%50CIJti?|jR=~9q)u~wgROZ|hgPT7Jkf$6G46REeViD=8;ldZ-&>Z6 zmt|r^tgu~9ERpU)?K!#G%#@6sc!g@2DOo77h!_FrsfZgSXxEq9T#yoHU{b~G{{c0Rz{kvALtBog3t%Ox3r{WW=nu|S{v1otcAi}nrU8B%_it!QVr7EA9W3f~MbOp-Kv~`LVkeG; zcool&RUW<4uE-nW`*`Kx{teAsLp!>BZX0+EdSBE4@d5Itm5L83{052dktjA{uI`T! z=TZDe5MNT+@)UBST{}lk!1(Ct3D6T`9pJ+}q-FSL2N=;+f2KXJe5xI5$AqPTqJXb% zP1GG$bUf=@Fb7Bp2u&&fTD_>!mF z*$@OGwP&TqOKGXi-)h(Ds;1Y~md;kQhlEoXZDVvrWWUPbvZmGano+Aj0zxvRB+6O? zsbivAB6STxI^q0fMNtYKX>Sg~-Y)O-Od{7o#~6rA)3=fd&CtjP#NF)7lH0%{@wOky z@`~;LJm~m?ar{-XPQ$E%4KkuAYS{PPc)N=k>2ijwd7n_Ujc8j&qkS@?{mRXQn=Ys) zrqqeOD2H7HXEruut*^gw6JTb#WaR-3*LxLc4(sKiT%;ZH#9e0@{Hp?0Lf3@@&$5UO#DaKghdC-7J>cVgW_(i?LUX zL{^V<4x~@Kp1gE96*-sl!ZU!9O?dvC&U6Y|OSP8rqI;1u1Shetf$w~ueHv3RmhdOz-9E_La{V*?%Dr78dL)$3L>cJ{6Z5fMR&@Aoyn4;+t@?29z zQJTIlqS#(OST2=QOiu=S&LInA6^S^ALuxw`qP?IltR;9NB0;&QDwH_Bn9-@R%3zhO z3*+?_YM?x_Z$+DFkaFTfWn3fd|N7OtYggNOYxaE@j^4<=mM-v*F=V}MIM+rYmLg41 z5XOu4$CAesFZTVxz>cu~?I;`;^GtOPyMJcw@fbBLtIO1lPz1L6|!F5NIYNDLX(L6RofLASi#U{!)e43&^EQB!ox` zkjkzuULyK1saygb+0`QZL**}2qykEyc*x3Sgg|Dj@CIT`mn4W~n0J5{mksz6St z9rs1PbtX}=#2P3OXj1o?_!48*XCHtpalXHbpWAufr83ZNI8~_km-CiT4Ks+($kR8{ zRx~Cv)QXe>l?dYs2Kbav@_X=8-Nmou2FnNmwGx^npo)c9JJCVQRj>j*Fevmu zJ=TAroIq2-5A=x+O0KU>u_h{%ZsJ1G2x6iftH^?1WI^#DGg=yg8#oQ28X4L##EVD+ zN|C-X^AKFYX&TE_#;RCBI;cG2$d{qsR^VjLsbNiYQMW|))J3sDf~HHjBs>z^5J|n* zH=#o|MOoRFDb^`j!Sbk6NfVy<0N5>s-~Wm@_T(E9yF;3c;U$*0G($UjsWc^Nw#CP&drMv0s3Qfs-%jOn@E)MWM@ z|1-6d40A(kTxzP8R;QNDKi#snSr~ZnBg)Rqi|qY<@kjK09%CRQ@iW{JkuzotcEK2$ z|Izv`L-`(FPF<9ZuBTkka5`h3fYezUEGsO{zUEecUFlS6R;^a6)yr2OHB0}pC$3$HIBBjJpV5z$Hewe2__MR%5# z9C6_9Erw!iY2@~oR&Lz5xp?E%@7%hzh{rcJx?O^myj#0)O!$N7Tx#NaBLMyM%m}Ao z`=QviIr6&Q7va$U^IJEr+sikWA#MSbqnhIaC`h=UWq&_Ea$>s+l5jqLU0o?kIAg%G zqP~cKoGCL0BZMAu#8PtA{y$Bik)i7x4!SlTZel?J@$*Td?GK5OWV>exkgpc=6~*2# z-+i*#I+v0X{Pr?vSxn9=rP&F_L+qlcv^YkQPbqx~T*G3grH=o0nR8|w>6|*>$qqxldJUve_@AnQ z5C~Ii8j-X8N=KQsQl1pZv10-h27^Pp%$ulB{GNyo5OdG(`<{E=Nck-0tSPr!ZyjW z$y!Qti-BLgZcmNJ|8?@`dfo^t9BPoUq|2Zz%N$io1=83zG#V4O9;M$lOy59gjE+KcFH?A@{7 zgKT42Njy<8Ch z4EC8t5P5hFon#haTJj*!R9~boT^XR#7FqtBZg-Oo(>!u!ylyZ zcR=J3A@2vgBEQDj5A~C|CCEt8#y$!-+fZs=yDDg$zdR*88{BX1S;1haXGwGD?0Q`+ zLFp0r9(9s+E!N5=j@!F}=M;qG%(}15W|XqCu^r6$zH5nM>I;n7;#~V=n-F1)sO(1f^|1FHf^9 z_TVlCfgnFfXCZrB?2iNK4;>`+A}kNgF3D8*kODlW;GIc@V?>COL_oz^!(nf4v4_%W zs^zixLh-@bqVTqS?1P7bR)1n*oB=ZB5X^K70xBnwv;JpNiLxG&_qKL1>6!~E$b^}hk`e$>tY literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/resnet.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/resnet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e252c28b70f312a663e8aabba86e41b9bd8bdd8b GIT binary patch literal 19859 zcmeHvX>43qmR`MAd$CxgNQx9G*?P9*w#t?%k+i&x?UrTPj_o$vvDDoTyPaaO?ju=b zE!2IFS~$gVqoqlA5~zVewi&d0TAEB|Mw1{&kQIX@2m&O?&mb%CkYAY~0|n_HGfpz@ zZItgj_bpWv<#qy05=>H3-Ma7Y%em*C@0|149Ud;F@cG4`{Ik`+zm`h42^SC%Rg&$LF`qf4U_&$bS<$CkzRQu4aDevJbnHw~pRU zA$?p;Abr9;bUl6DT5{s_gK84#lddD_kHqOy>JZWoh3Stf>q4qJbC;{OSj_~bSG%3d z?{w96%MB)1wwo<6%@|v?$*0?e;x7C zxw_|?R`E29nx0?p__b!I=`*`_uI@M17f~Hgxp%&J{d-e+tClLO;Ty%JteaC6HF6_`<;){Kst!oJpvKhrjnq={o2g7{sf1;lkcl-L z=UUyyX8m*~mBLuVM`yLGo%dXC(e-CbB?q5Z^qTjA#Q4iFwT&skyXvvI}lwtrvEFK+vvrK)>%tKRXjkx`Eq*Ih?*YaMj9yxiaP zsdiJv{VXp#SJqwa#)IGtmY4Sw!B~AXh*tIvhNLn#>s;h?@V`FLj&?m)Uv`yK*K6Bt zx8q}NoW6yuca-C;ceh(8(YjK2$YPqu#={s-#4B%a!Hs zIM@RWwBtAZYjIOt)D>OtG}iliS_IYi&+i&L!o<+<^42xKs~hXf4oGdeQD0qkmuH${bdysJ2Zo8|mfufq2i`VL2VO6e@ zkdLqHU<*j`wp>>wQx|sASY2~YsAj_-Twx?|wb7{G<&I%}wi~|89#5-Hj11d2FnM8R zd)7HFAdD>Ti+lLfdte>Mv>La=9gWG9ZJhA0ZMiQ!es1ycsua>g;8u%4KHSOI1N+?K z9V^JN*zZK_Ey%p*e&I_O@7Q__NkP8Z*=p4rZle+v!0#x&ybII#dJb5ppYp9<>bkvQ z_lA{q)3Q=MJlE1a8)2rGMwsoTHnPgzw7u!KQs1zUlb%eeG@c)co@Y`hm+NH^=6fh# zEA;XRi@gHEQm=?`s8>Q*_J{eqkpr{M+)RIsy;OE9l4^tTMrk9vBmOArVJs-imb1OA z%B#Xnws@eIx;2KJO0RO;wo+0O<$5-lwvAH7n>myk4@-^oMx@k4uY8?zJJ@6HWN*ly z+RW%b?H}q5snSj699F3dDS!Ib%x%s`o~lD!=mKw`8g^PXLi5T&YJRKh`K!&VcWpf1 z<&wURt1)NT33BU7fk1;o#3zExO0(`&ZT&EcY6k%tDJtB5wJqIUsU8Wk-ZqZD=8gr0 zn%sM}S}@di2?XVS?i&iH~!& zPQC4#_`Jk5?klvyT5~}ObE>t?y)Q}gn)41a+HGwIrG$qCX?M%hIp*1&PEdF=c>z@e~!W94C0~d1wK|8Ji*{e1}6~&6|-n)qy|Qb$wBy#?K#BUtW3oG z%yWrrwMMJ%d9~V|)DNFIrMq4KRJ*;UUEv`E(XPTpduk;VBfL{!iXGg{Ti5h)w9El` zO9;j)Ig9@))~HpIn0d}wwq3Bw_iQ`IFl}45mBBaqSNNn-984?xu#zvKbip3Ihw$Fc zcygu9MikH>!9^d&x#q9p`*9h84`Rh(u&&z|Kv0W!>>$79`gPye`YB}JvGjBJ(Vvfl zvyA-=0)_9#B;K;UGuqd$tltAW)vvJrIRs!e=NX@6@EU_XoxF&&b*8_L;Jz;A8|*v! zkj(hy+&*SSk+m#k-zetK^O_+vmObnbbwQcFgPy>7pX>0|Y zhq)Zevnl>wFzZvz>r?1U)>_XqHphUIGqn9IVt3Mkbrl>YIv{Br@V>=7iy}b)n^%y$4=nuk?|C;lJ<0TO8HyBw}fxyWBYmhrc2gnTIw`VeBi@5 z^5ba)dk{rh89b3w8Q=(BUsfNUD633}C~g}^t&i%J07Qc2Lo z5GZ{L=pq)#>hI$SC67j!{eKr@Km^+MPdxx>%=d|PPbUIohz3Hi6+Zou#RkidhBD=& z0B^(x%Z~~|z#}lis^hsnh2^-^NebTsf~#4fr~E5jBi0gD5Z^6aT@Z2Fq(Tr!qQ24u zE?{m_YoIX^jH4D-o7(f8xvO)1?ey(gJf>vt{yz$#g5VDeh1})!Y@mo9LG*P^IH3

HO-b^vs+Knyy*0r_Pg(2-a6<&-xffZ~1tl0XjUe;#r`HY%#jrmbI9geJoR zQz{(;SB536;*W%|B&%{EEExqXiBj`%>Hz=~rrJ=_MJ2~}NB!{_tPm~@Q)eU~WC9@M zAW}v{sKRoDDugwFGJr4yHbv-{2y7<9(g#Aw5|t)2Dys5Lj(;jFGu9iuP4ENwBCuv! zV9iXg>>mcK`7QrQ4|*zrHAe;3Jap?}fi?0}tt{#(Yzx2$cM{?~)dB)+B*EaEX=Lfc z42~dp9g@n>rmLZgs(H;F*9htka;jNfV=0l`1>79vqh#48i*fx3f~upBG5%Es#~JJq z|4%S2!2tV-|78IqHN!&!d72NO1j>lqD^jTeG7AZiF#_3X_CZ<9;F<*iF-AgLVAj(N z2wwCV2G1~{tX4f>M4&xGPDU%W_ZhN&flWNmfDk2-R}4y7&?1L6Gy1T^}Oq0*;^MY+{(E7aq5wQ}_f#^M1b zJtWQL<+wM9q7Wk2nwQ}QuzWMw>YS zT+7P|*2NI1Vt+cyh{+OC_I_3$)94A&T0x+@822M7iM$(+T}%>A&!HJ0tRTzRwJ)`{M5)djL86J+Sxgt6lilV zK^JvwWyg2dTusa&gDwW8CX53#_%+-hjqD(gZ51^Z)q>3SwpU~U1c-|S5fm?CrE6P>l^sw!zYiT!hNVeqio=O%957KujWLP1*0x8zToy8aC<2zyytpfpsBR}UM7`_ z9tgL@avGWi0hMUXiFA^mMlZ2p2_%5#`p&8&W}g0*rQ?K>I@|4PX43aHLE;)Fo5?x3#esU17j{#CW3El$h7K~AXI$PIBrZM}v z+#Iw;?#DaWWWo^0kvd%`VMve0boc1Vhy`&Vd-&o2yYbA3FcNMbESSPSlG%}|476n! zOy8Unv(jDz;e!pJ26hWE^r7uO>uLS+8gy~6!}S~cUb(#XE=+$o*Pidz73n6_olsb( zUAQwa)Yw!&W(rHLEM3eO`#OFdTFXS^5E(E2NSXIY27?w2=frBOTNk@&tLYKcdwv%t zf6s3=a3yWQ3QNPsAWg?6#M35yQBYhzN*@QaYLInD9|HJce3|CWgCUCbk2XQ82N42r z)ArN7lAnR_Fy1SzK}$Z7+AaCnUJ0O+Ho=G9g1-z!2bJ0__h5h9OnZ;RZkX9j>-X0r zZFjhr*Pl0e`t!=hbNN=z&uCP3#>$plgq<)!%t5i?He1wtyTO4*cUv4@8m(>R)?h-^e-c|GH*dAD z7b*+x6v+7`3!P*@j0wBLeh782kjSzCv>U9o$v~`cE@DAes4qxk7D2AQwdHn{AmN}G z9`kt>t zBz8ZUT#(y@4P?y|5E2XtLkKkiIZ+>afz{j(Pk)8?{S9=TE0+a`Q0rJnFVdVT6dAaDx{APYw&gVWUl`=NVGA%;WXwAxeViz;Nk6J^OxTPo`xj(QA znNM=}CQ3U8qWeC^)!4O9hb9k1AY9b88#l;88C<|UA<{|`$=b2ciYa- z&OLY1S$O)SbLR9(XYTYIf;02G3D4h04oieKF?~4Ac72U($#{_U-Kq;oGq|tYu?tru z(0%cK?<_6?%ZA=7iD>ampY>z~Z1BFu-f1cY5jGkIxgr}gf(9i1HMehM{jHHjNPaAp zUSMf2Y5}%+u3BI1zM&YVfSQIaHQcWPRD6J|^n!SQ#f|jIJ8ajjH`Xyv4r)MPiEr2C z;wpArtIj~Y9f+l&<#T0i!XSY9G`8VktySw9d z_CFQGcQ{s3g5H1KEr-^8(LKfmI!r#5n_|CLV|LRsZr2tPRd18Vgkq*p`-2J+?u~>vRhRj*eVo z`c~|s-UfS?hF-fWZnd%Y&3XCdm(Bf|_;}5d=;1-#Y;Se7A1YSjVX#7&BaX5I8T3$d zUJ9$6kURUOx#x^qCij+=S!R~>bYd`F7>$S8((EimV#TWiTz3K2$HM$z>6UvLmNnz7 zz)I38mX97kkP_g@;0;!grHv{PLl3l&EXeHaEJx~WXF57ZFIUyJrzwln&6q5R(VT5v59298@hc zFtKyry@!t&yD!2*#P0ML5d`_S>&-S@Jaeytwau$AGsw!N7Zi{RL89ZEZXs=^g5n#x zyS+sYQB4~^-8AS%{~5L^=$t0>=;I(l<9Qli@4rC6j|}H9_>2phrGIStvZW9D83oM# zos_b_i}yR=rAs4vR{VD(zg&8~0&&B0H><1o&1_oUlVVWl^VJ2`h9Ll9;kuPU$1t0p z?d5>;Uu8)vCFSYQOHbY+bO5={l(#6U^zW_oh7gVbf2V4ryX_|@~4qM6>1`8VE8@^AG`Ezn7!AS zf25b*ofOPJ1FgiA|4{EBTmcVJ@p>cWKYZ(`%J(LEhi<3O!=-9>+IP^}q3afK|7%~f zcV~7F!%KN)I<}mZLlKSjhXYm-6UZnP^(QR+NJdK7u zp3q#X0Mxv5%z&sEy(SKZNgboM7&a<~%WQu?wZu_oh|T;8V^1)6l0mG`Feg^d$TyKu zecb33dMrdn5DeGxenSnHypPl91%j3=g0bqlqvuBgluS0e;G% z#O~=Wb}`f^dI#xt{i+NZ#UhbYbM(RRfPB_B4^As6#zIOkcK>B6>lBn_B%v}sQOpFR zwRU~e73ns7ovH*vFi|f^d)w_GEB8!LXh8|P1=oMLhXX_WgOr{sIG082v@Y{t^S>xxd7i?B;hElP&#- zv1CiX%SYMfUuNtO1FEg`_Yf?VAz{P_a%q?sV)8iTcl~;+W_C5LwzU3PRutNby)c4e zJk6jatl0Q}?Biqc@2NFm?a{;V@QgjVCnRCRDUkW z6g+oqvr$*MAcKZ4{cA{jo(GuRp}2~-xn#|6!%x3e+uzPGBXtlc#RsPTPQX znY`P#HsH1!zArM2r-~UX9Jtv2NXWSj$D%AFZ2Dp~m*{)V>mSK+m_(_&v?mT|g_D;= z^fdAykFXqsh8lhnPb;WY<(!>is?UI1oE$d1VgVL!l(eUqlV$J}1K|L?A;j@1D85s_ zN(&BDyLqs;ExyMUdVCAlmSg`jEWU@hZXxX~iE9}V=s2Nsq$V`fmdZ1=qC7wMrD5%RE7qj7GZmefJ=}1Bcu8cOs1Vt2Ac76V*@56QhnS@) zR^TlHxpo=L+)hC#fNunp|5hz~X{>VyC30~Il1y%u27@x)`|m6N#HHhYK?n}s7*jA1 zpyVMK8bDAAZzYJAg)KErpZB-$_NC{Abh3+bX})f5cV2nbk{13c+x*v&5M&57f>MIb zMH)3uE3(JiC?$?e;RXg-DH~XA!!Gx1;IE^2n;Xcxlt%Cp*^_18D?u$@me>ShWL-?n z{3PZdgEz;(7LW|N4I;ae!zQvTmmQ}Wr%>EfC<8)zg**j&lAh!;aJlufIC**5n8Ujk zd4peo73xVWsCZ@dIZ9_MvY?qoBW9Q04d3^1RV0=i!>BZch@dQ> z_KMqFTlYQfGF@(^_Zj0Qg(D#^J$c4&kr2dqLXnk;rx0ZS=NSLn4EEeKe-~*Va)D)GVJvH6Fr3*weSjeDgIcE`5T|!DKDB4^G7RL5U=L#kR#L{*4qd}mejxM`7r z{vKmOZN!H9Zz2GL=UN0AYAp68rr$#LFLUz5p*)o&AiVg)3I7;G6kWa2SpXSc&b~-M zK@Kmy;f2M43z_EMm{)#;Yo9d5x2WPHIe~Z@$CEzP6DkWz$?Rr3ulJRBEGz+_s2__< zoC!}F!{lwUY6B=3%w5Y zGBYXkR>nCksd1$K<8UlPy=<5R^D_+Ta^`z2O1%*lB1;f#)Iod^(QX-tw=B6{NEkswP?L zK36*?UZf6=<|w!w!6s!~?_@Kdk~d_GX^?7TjsX~s(~$f=V@{#~Fw*8O4l-_`!}u|} ztDj@+Lk3?*fUK`E&fxAL1`ji!6j-(Ik^^YApvez*DzUqn^cq;R`kx?u(KJkK0BiO4 z@Dq$f2mU7it^xlfgR2be&b0ArZjhm0Lr?;q)xJliwq9k8thd#|-}4wYO#dGuN&h3J z=VaF2Qf8&KIWOTtU@y)u*^3K-wTSnd>sJlhtu<9pFesTZ(H%>gj1{px_eC&5zdw^H zef&Dv=Q;G58#@H{x%Zo~EAR}++ye@{R|aD(*=4-FID@;Tz&CY8I2ywZUbf2VoyqV> z?dhUwU4+Q~570+-Vqdeq2o7K6^y{!`%t`YspJ zg{R76g^|kWk)p^?4KzRS;nm8|#}0yi-vY7H17N{+pAn0E7C1H&6wM-jd9JbZdvSEH zQC|KmNN7T#f3R}B2E2XYT@2MXoNd>5NG|GTV>3J?`*n|torSv};hcy@wNgD9@=OdCmJ6{i8t3E4#3*bML*mihYih(9aFbW# z!g1awH}fzK;p=hvBB^8pe=$MuYWgDxcvJy|{zuuxi?0Lx3qBBa3#BE<5y$YO{}ltF ztG~wBOAJJsIvZttABW-N5}w5Q1@17Vdpl#%bQ`gL?@n%CGM1(vNPz79Kk%?oa3ujw z6lFvYBp6MNKtDmK`YH%G8V(%E!&mdeJ^R0oCir($_Mr(7f{?{$p$X>|nhOp640uTj zqdV!;o%yTt(i#oX#Oa1&jpovs=h?uYX`t7=i)##i7cfqoga>99hPkj1iqIQl4TW%f zh4$RfOotG(NQrysFtd13|7&cY&{sr%|BeZtN`HR<`q2Ld#S*GhG1cjR8#QRCR_OOE z)7bFu4Ukqz(u~-BO#f3B`pXD{g0a4JI>v=<(mdi%^}sJDdBr`*N{asDtoN?eYquMh zXB()8Zc^qIu?%B8p20|zJG&w*$Na5}N*mYU8eVxY7Dr9B&eF_a;;g}$Vo23Ql1q=r z1B<$6a&0@1W9CRZh6c~eVF<%cYe&ujZ9kq1H=T zLcePVMTS*rW_Xb2@iD(2Jo_Tc;wVigJQy3Pu$4@D{8PiHf#xGAQ=Y5r5dTbOu||?7 znUY+N$59x`)NnHm((@8xXP%iU@bY35M9mmd%3gxyvXeaiw9 zeM07D%6FpFbZ&9j^e9sYRcDuvRhIBS_kZ2xdpEoMgX>-1zYc!-AKW@ z)k~Nn89}9N4E6HD@F)o;k}qcU_+2>;irC!Zdn*{?^9`WS3q$J{D+v)~S?;-Y)-|kO z-CThYgKvP{kA#RdI_e&GS(${?K1g<3LA=2}uW9PYprH>$tR5XPlpx2o%4&KmfZZqpUN7HDbhDNZAd6puW5XDPjNnpz@k2Kf0 zgd0avAO^XP;NX%QnQBD*&R7PfKs+Tu-l4fl;x)z84JaE+N0G)nI9QYeU~W02h)sM` z&?tG7-9huCD2_M45W3VKHse}hSjo)h6O@3j#V2_G^g^e{rt?ZXMBSfmUf2ZAh9#ZD#i(Nv}zY7u}Ak{n5SLTQxL9A(DD zGL_+aFrM2Sj(H-}w+A%L18J)nWX~+;69rl5xFGY%!yral7qYWbhCKLMpC>Xv#Lc8hMLoQ z^`) zf@3o`Vzb&m+20v+X3mqEdhZV?NYB{cQ0t+BtL>LtVv$Zxep%IFRW%2?Y<%b<&ogC^ zN#;@wjZ*O2zh-&4Z%=Rm?Z6-17Yp>}cQr#`*7u zyDH1fZj$84+hmfBW>pR?>c~Z5Y{( zM0|rg6s*o20->_q5JPMR&IA)KeyD$6v}TU)ni@vx=yyfqMeR!$eT`G}y=vg_@COU6?4ac~dJTv#h>%Vw1>%T^u;0rxEHqbSg*)?0a|%{Y4Y zh}x*g;vkfrwsHYddW-y1HmKECgkO4#EK;_q)%PjD%QkR*WWOSorJJT@{ek@KaamU~ zE;@BZ9#dXes|L%98)aD8uuzKFtsdW}6_@=aIFR=AslGOTgI@g;JYWDAVfFL=<9Z(3 zetpTk!cKm=X^wLkA?dcbnrM(qTeAo>K(jMruh=PweF^i-TX1k{O`OxYdCu5f=c^xm z>7Le3>!vYtX7w}ov@vVU*cq)Q6E+VWb#3OpY|&cgJiThX8G-?U!1g;^>c`kk{hS^g zk8(?*l$0tk;1V%g>8s1uL?SsFRbE&ttjZ?7JY*`$hAmV&&5B_5tgK^}E?HEzZIMvf zW^0Lep5PV|2%<%N0txh0Jlc$Yb#?_$&$ivadhFzf8;s5ub=bX*M-fR5;QM9UpF05B z+rIx48Eih%@^PexzHh@w{Q~=_>!kC=d4Ps8jxoBjK0bOKMJ`~Yp5jJR1bR;0z0~m< zmoM4=&V|ymA2_`;jp!{An mCe>UCyTh7DRygaG-iMW+|CT;@^)?>c>@wSSw^_4xm3;@BLYd|O literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/resnext.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/resnext.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7eb69be2692fcf3b6aa75bb93d73afceada9747 GIT binary patch literal 5665 zcmdT|TW=f372aKv%Znsiu^qQa19V^%wP?tcDav+W#71H`G14ZA5vNGYwpcEAhvHg$ zVP=QAm40Y#AV2}PuX)Hr0`w(+qL2L%``Rb{f&xj6^gFZM3uPGz`cjnA4rk8Jxy(5; zXTHN|Wu>Lz^ZE0CjO33s?VmI#e`*-Kg(r5=xSFf`TA=s!z~~z~jSasNRQpx*D{j@V z1!ljl`Ze?${YKF2H+Aj4=9+H(spi&s{Yj;7^6HnZuZY@ko1iv%^GW52-d}QC54HBv zF*IpyBUv3zJ>RvQDBQDr`;d!_5=J6ODQ!J5Klt$8JMTX{)`>|^T6ZHUeI9aW7a|%T z@%VkkYNEHzr9-mIpXz)Nuc5=WzV2#$!`1thrqQf4t8V=>ZKS(3*L>RSS3lFLTE7Mx z8^XkN`;Xu*^s%8IIlF8W38s|&T6S73hR@q#C%&b6H1a~r8QWpV{g|zJp={q`?}d}8 zWH~h&F*)|)R9+`%jHdDh&JU;Zhp_I5XgbN&-A_kQL%!WCH0*n>99t7EEX9zS_X&|1 z$Sfu2jO@nN!Ip~^xt?Q79_OagItPPP>+Ks`>+FQ&+gpPHvqP8lHV@wTUID$$7X`dA z7!-ELQg|-U%$<*v7x~>eN4Q(xX<-P}mm^KI!xVaAt_=j)67DZTET${X#6u^mi& zXbr>82a!AV`8tax#9{l0sawp5;JQ>3l)=zYdh#$ggM2jtnzo~m=QCn?NP4g? z$Q?`$Wh9*OfMK5v9D6k4gARLd#9}U)jM%05`%CNCM4YA0xGMLfTpTG>=ZRsD(VX5v zPwj-1idBFp_Pv;&kdSjNh$0CiPsEaS54t5eMR?Q+c1AmF4f{kcdI<#bm^dWcF%kAo z9jW#PMk8-WNnB8 z=vg&sVq7PhtGktFmERelHXheY^lDCTK5pdn7V>H~f9Y{kEP-9c6;K+p`d7Gl^0}}VdZ|5!=e&BV;A%W;g6qPZt2t}Dz}14arrUf*D}Hr8Z)w(~ zd9H>W-`W;*`HA_(QlCVz9C-)F28hQMH1D=6$QX)*&QOO#v6jiAwQ6v z$)UIiO_%V*eKfDtP2JQRwR8HaQT@)SwDjfg45QRmsyh7{>S=s)rcS+#l71U{<6mc% z3OdH$o->cm7a*aN#j{-zO=uN#LUUiW$&{kAi z9T{%zi%P%dE-Q#cP_EfG-IlxbROBR91YojZ)n=Q&$4*TA<_e%V9~YNFdQ>yc`-|`i@Q0P+ zTihDG{~lSLkeOdNbVKB~(pF($>6m6#Zcl?Dyaz1fDRk)t_72@nDe^2`pS?m%EP0S0 z#!S$u5R-rf&o0fvq#Grh!fAk10HrzZe3*{rz${DyP_m@NoF*A?KIETHdFb%p;zIpB zuGohP>czCK0(hjr8zgQ!V^{{I#iAB`HW;iSy1l-`o}m4+C|-uD-T4e1!C zT1v5n<9q9D!5{zcu-4^#f*p=ykm0|N_{6sz&TbyuOo1>`LwfquG=ht)m&5IsY|(dZ zKQ3)wdv6B`ANVg*LLl?)y`2YuoDdGdwoCCyPfV(If6T*$nB>I>VMiE6M`xH9%B3)! zP>$_*#KY5%F|ss5oz9@*B$}W+ge_FoQ7yvvY@Y!c@b=640g_s$S6BJ zWD{H9_LE#-*XLR8YPVp|a7kK7uo@Kas;_a zxpU7J?G6?z`7QPi3O+0mmE>JpB28?=yxc6x3*>(f>|pZj5FN@MC5N5}@&CGpDI$Hs zup=&_6#D|Pc`gu8egsOsp5ipnOzdhe4Lo3B+5HQ?!PbW`bzPwJjhIh0^B5<1$j4NU z*>PT(mfn z#H*j@Mlu7+1tAL)V(`dUTXHypZke8Nb=>{k)R6XUn#+&Jk WI}}FcvsHcBSk_nd)r$GxqW&L;`67q_ literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/revvit.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/revvit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7b223df1599a50621b280b1e5323549149b696a GIT binary patch literal 18062 zcmb_^dyHJydEeYSckbMoot+)-uDB#cNmtZ^I~27P*S2g^4~wKoTMk955+&7RE0?o# z@666}9?Ns@EO#ffOkmP*f`*FIJdiduuuPf)LR-T?Q3OTP!fjE*Df&R4K<{5J0wiG2 zxGmCDv20iU{k}8L<*w=$ndO{0_ndRj^Lu~aVS9Qyo51Hce*B&4@Befn@dtbu|D^En z629RdBJmTx(MXi^uTe7PIax~L*KAnLR4HY!F4?e~=~BAslpJ}s8kuIclxzeX@j&xn>0ooAw9q_M zI@CN|I&369oA7h~^zDQ{9TaXQOGogW_h;}t6J+uHAf5~U0X!cF@@q#?@=!3p^YC51 zH|x)#d@h*1mAqw?9`TJg62fcNCFyh;h>a;3etF4;lhKB;P)k5lAL}^rSGoRzj=gO69 zEA3W*XZzJJynNx(8~031plHRjuXY+yeKjciXQL=+@uj$+=fqyUMtLhxaX|%Py&HJz z?a&K2t2h(2z2&I0-UupO%J?UVhnMgT7m);slHn(CkVYvPSib2eZzr$~Dda6bC3)Mo zu_ftv>h)lQ6MYSPQMQdl;(RBJ+D(^b>ny94IX$=9Uz{7Y-EyPRzTqyOExOf4CtR(! zYHl-Vw$+wfN5i(dQC5D|#i!iz-Nkc7w~T$+(EX^O(r&JIqQTQfpu+k}Ltb6Mn0%Lg zxHVPw>)0FVs8ViJIt_VeDJvzfw8Hhe3Ve5EOP*JwXgxf0>Qt>Bt#($HFvzLb+bi|f zIn@bjwLqQ1Iu zN!(2siO5(pdxmdthwmi6Y;IfIsaj&&?wKzq-Z{LT?xlNHFLle*HNEs&vX|UQsHb}N zj@7evQg;)*6w{h{*Dw;-XCu1@aP_Tgrh0C&e%ooyM#fIMm%MA#jGp~l21c3aLCTz7 zV$JC}?_zE}vzPEPm|>Q4sF^ZH6K9_4et^Bu+a9MlDm}P{wi_ z?OMIEG{md7bg2Bv^+9!AG7;d=>1{QltCQ>xvLPBokBDu`6M-kQj^{h$$^;zUS( zLPGgQJpyRQ>Haw!MJFL=;!MvQmf@IrBWoNq%%97dvqr(3H?pQ}%$nUfKnf`Vsl!tL z-3p1=;4_6x8ec(;?;{f>){F_1Ft?KcO7o`$IkxR6*|P=2lj@dTSU@r%_z>!7@cV7%>wCfc?rvWf(Tv%LbM_jG#VK`=?G}*4)5H^h0M6<` zmf2Ce0nVy1BNMbb%|LqjDLk6&8AJ$0qS(;$I)ic?PYLV$Mag{X5e{o*7KSoTp*wGyz8BEqhFKu zJilFGF^FMQuQY>bwe71Dob4x>jOHylPF@{A!qW&z-<*VHm`>I(-Mk?+6%Tr)*Kk(V!ZSk>Akv@*?iIR=fbrrP>Af11 zqYk(*%E2ulw2g2UIOS2?9JYV0tiV=+hFcy8psv)9Ua1Cw->3LqWwng?HimC=Evt3k z4_c$agx+A<{pE7Wl&?Lavb$Jqw7~_>xL_nJ<&}D)9&K^o%Plw1?3)W;3EXa=+JQfs z1-p4UsFoowpK+gAdS z^!V;#w6z|*aQw3uF1>lYIP6~ZneqP5>kj#b^s3dUdRHq1A9XxqgIu~iHiWzbDQz4V zq|8Mbwztwz3IbvupWHV?EuxOo=;3rHRyBgQdaFLROD=Yq9P3M*(K{#I_Bt`S%)@vE z4eRB`U^)HWV>^8oeSX`HX_K?k-e6_ zWva*V>p-%H@=C>+Pip{;HGrCL-Z7yn=L8Uvm;dO_pZxuu_rLS47pcyLkzbrvZ(tmK zFi@v}wl{)$Z8Zv23FUEG%K>4Wt%vm%warSP2-&fHNvLpa@uhfXv%FDCR2zRZ`6rh13NE;12;Yz4V*;T!Vg^G4pt;X4Zr%ldKm_($yH_>?C! zEyz5Oj{2I`Dl**5HJEs?Uq_=P^J^9~EAyI#XFSocux;w{o(Vn6#&fcjiPA76ARjqv znS;=-pr27g%l3@t(c=z~jrqd~?z~SO$Lij>Sl@I7Fh}MevYIhw*k|J*1SCjFU{6E> z@?hcXL;9)$V4mNpfCLAdAUxXeGsbTw?L%ORl!gW(v^F2av%Z!HyjswrssP1Ulg(l- zPJ=lExs>P}u~q57nm{c>HIT&lJ}>b6V543Mh-5IUB_TrrdbUD|D-$c=&OKZ+IO0cTp=KkV_Sy8O$29$t=|C?&BW; z0>hUp#GAnefAh$ba4`!shJWtk$NeS9^f>1%T(l&{*wUF2b z;ay9qM{n{SoN&JfkXo3RQqqGR5&HlKKHx?$aL^BQcWCKmFO7a^Y)YvMH%Zc+DosmP z_gll0eq~^o(TsgxA$wf1*QPxepR2=uHr*H8P4_8)U!%H2vbiWZkkZA?;z-V5GpPV2 z9kf_Ri<1xnG}V4#bJTj&z52`8&)6ZfQrZ(L0nPFTN~KD@%^V4tdP`c0xdjrwv{YYY zg{Zd?a|D}WMj&ORhsIXb+gvW?m%TjrkLckXF{^EBq3+;{H3zH4O8{Rrl6^wOBSb2AMLvwF779?Epj_NHL? z&4UWt_pRdh#{25gvX$&vRg1z-_h-+Jo6n!t_Vd7{#wpaX+NM38N@oBw0!7g=L+<7> z5MyiEecG)pZk+}?)bKp6W^A5jgVShGUEEwogDRe1edBXyhZuN8gZd_bqzKmrERHZZ zdaw|!uU-}$Db8qWJEc_-R{8O?!farn59Lt}(PIp6hFB$b4YF4Ks>Ke*u|Ik<_P|oMG}DlQ)@MM-rz*DTW{cJx-)DMWI2H z9whAp$?AWbN%P01 zW&X%anLos;J}}ef4@}4Wb2DSU54!Td%&GjJ0Dm@y+F%M+(q?O8YLTBV>fTkR?F_%{%a+zL(Srs#t!DNsaa? zh|!g65TH8E_YCzYUfhL!vGaVR++6X?FM_Eg{*-$$o9Ld{1qeeGV(EF9<;solMP&4A zaT15H|6J$ynCluOSM~F#fZ!(l`4>2^cag;QM!C@mLiJZzvWrgqBFa8CnB+VZvu3@u z@4)Jp*u#B;(59-s&Sbw4K7kR`FQWwxVWYNx|6d#SueVxp5rD9u+}gMAPqObjY-el0 zysvxWFV9%yQt{V-(^Ibke&T$GmVGEUvU4=H?>pS5F(buTKI>Z?fLZh{-n ztG42~%k5|=5C;AQrP`{mJ4wsk+7r7t$(~92vxY4mDhC?W_}4PKhxUV^`w);K&v|^M z$Dxyo%T8`<;Tgh=-hdpMjRUxNF4V81bTH*d*C^5x{v&dISF! z4gH|ge)?}%cg^2{!5=j+_DfBcu^AIJh{!+<;xHI{F;Rn<3)IZR;9u|$PSn8ckD9~i z>j*|09|sNq)I5Zmhy6z;YFz(O)L5t)n?cF;kCoD^3IDO*HcFVI|2W1v;y;1EC*gEB z?l1bqJEs4nf5LytKj}XmShuj!TM2&&&!?)h!keD)Pv6dzGLm2RKL&?H*8jNw2|6Nz zod2xdXM$Xf?u_UC7j7E}rYQI?`kz7h0pV@40G*d&`hDJl zVWHg`^GTFn0zjc+;J4NdV1ykyHwLWAeQoOsqY*|g8p3Ds7gQt=mB_Tbn5(Duf4wf%<}T7)u`Dx^=i3QD}&H_qX9fUM{jJ7p~ts8 zz6R+Awy!0={Nh#5eGG2=>*X`RJ8os*56098IO(ta%jn zWNQIO5(Cwj;eyR|fQ{Jn5myA#+ukX-NVG#;OsxLOs<%gQbCcP4@j`qfW$#{TJrElir`% z)jJI?KaqQFVvb|I415?vU)bBLaF)ZVhSP#{yUQo`e+BmO{hV~44Ll=5FWLG3PcN42 z!_})KFnxa+U0GMJj&`P6hbKvb3-?=9nQMcai7uyp3<%ac@S{yay7UI*)1KjGd42M{ zCszq`tw%r9xP~OpfEv^hNBTV>EbFyK0GjKLk>lM|@#@uMl}6}A?Q22n*ww4rNiF^e zoDa(SikmiM2=cj$=T5rk$6`&0VNbj0h~r}R!*0e_H0VY+0&!O3j0~aKB74yvv6+kw zb<+LZN%!r&283Mh?H;s-$U$s5n1KR~L0HacTl>z%l_tE5;SK$Sfe<;MTM%^N{Fm@0 za(B}40Qw$zPO$r&Pa|vkL!qzK-di=w#i97eR{MrSYV*m+XzYx z7|An4nm6DHfoq$9IMzhQ7n=s6c?EYTwnYd$N8iA^Fn;LB-^sy`t<)yck#V~)FO?I)!16?uo4RMJy*(M zUg;w6mGX2qGnA|EeH)ecFqc7P*o9jRjr1&k4jSh@22Ix)I^BPNrT;#sBp$mLF$J0l zG>1)ig%<>Y3<-Fqjg3V3#OK8?@|*Cm z4N8AA@eTOcz5+7>qEq2Hg$2)P9fp>g{B2`<4n8;|%AmjJdUJRKy5lt?JcnMa-W=XE zD~dP`3N2r_~7WDLqIq@6wV!cd%icnGt5J)D?NY(F3&8uJ^5`g-($Kh?`(W$=-# z%@0?Fxp)VAQ|fo3g`Gpu;hiJY$y|aYj^wu_)YZ=_7?D)?kyl7 zW%iD0wZc&Rv zSI|VG^)~>vqga*hWBuzIqGY$AV==ywL|w}O)K*>HoG->UX!ypmU$kSy#txMP1Vc~( z5|u<#lMER9hRA~P)U`k%_D?Efn;JRdFoE2-#(<=|@Mf#8O+g^KI*=f*y4?p(x`@o* zB_g`&n|$e0yTpnnRW9&wilgq0RfvYeo?aN2A(!BPGS)}*4VE+#zfpm@z($cnRwB%4mStrzf z92s4Xgd`L(Nqs#vp1q&f#CB)hXI!(;yeZAQh4IJJ!Z)~iRC42ju7eQJZgeQ;$C)p@ zdBwZ(#n&#p5oh0`^;#^bu`}3j9m0}fRHw)-eZxGXDMQ)vxIq4ZTMVkK1@bU1jEN`0 zcM%meylHgIQ$Y9Y5ay#`ehyV(GNT;SJ?x) z#T}`2+^0au&3YLTH)ISt7&`^4MlFbK#J}Q#g*cqVmKo>z5`nP!czQwrjI$#xN#)rW zz4US36Dcm}i;2aL8W~QG{w9q!hsfhrxWWnc+j40sC+^ zGPqz!Qd5_;RFHkGe>*1dYd|B=35jFsl z?7aHwr8Y0wb=}wH%A#mLLt>&0YuaMlwP>4(jV0k8#X@WeLZn0|mrmnC69%^$g7KDc zcMHEY^#|CQc)k`O1XqXhBHj`N3Canh^W|$Cn zi&;$;vLl)n+n{9C`X=Hs5oaIUyu^cFi#Iajc?9}{>J3Pvr>81&vN=O=ikaQDEOo9dP^C>LdPkJlp zdicRulK@=T>rhya2{_DXpo3@$;%ftm``hTxrwEze~l!z zXp<1&Bug3?8-!D0|Aey7^HiQh2J{<$ zbe@u1%u9~2XMT7Pe1PT7kKs&Zc<&yNHo;RT{Ho;Q4!&}3To*#MaCX>R>@-3LATHzD zo&+Q!gg{OQ`-c0vp!QZCpGDv&5!ux++>gUkuESRu%qM229IMTpd_VVM?)6q!Cq z)ep1wteJyAU_&rqNEF(c-J^Q~PJba2L9AqUFf3?@`5ZE%@PqyhYQV(JM1+tSQQoEi zKPfOL+;v6>tG-8_ygq}s5FR(WoQboty1)3QLuv5=-=j}py_+e39jatqfJO-tt}$GU>*bns#$3;CAtaYG~C(E z4%p$ZqBWveM%ZJn!XBk%b+KHl57^%>TKfZ({R%gq(K3{)vyjLS3UVA1$r*y-ezbK0 zZ|C-k3lV`7*HLfa|Wl$hpo7S)ITpYm|-x9e{XmV-3*{p0VRa&j36;-cX3zz8VlmPGe!QEA3=4QF4qgD6(P(Y>K zzoCdOqS2O)Ox7&vJ0o&CNQp|}a7uYt_$#YRJ_Z@Ps zl3d#;igGZAhv&v~zVn^$^845>mrV)R5B}@t&3}Ggl72}K!&e>;=kO2z0SS|s>`N`V zF3Xfxe5J04yV}y~nkZBKTuZO(t$aN%o;BZS73u|%&-rGnST7>4v%FtwmFs1ZH~dO# zqCU~8)~m8~Nn!+Rqx96d8S;GYK!|=jZ~8pjn*XrKTjt z1>Www=|t;Eb+zmH%nsVTW&6&S%hA+4?}YBvHtYH>p7g8VxbVi6Yx{CKPKhedJB`iN zcHm+t^{U@VOpMi7f1|bPvIf0x_)@cna0wY#s>@6QE#M3dh!Ch={ixD zyP>t@1ySv+bvfvCBa3-0Hwe*tS+s2^L-VA30e_mVm}(^5m3HMlNtPmILyhHFX7p2V zSDAJ%A4|5zBuq88!ije{5$Jj?mlQW$9=N_8dfRTIhY|Ofo8&tVV_vY9=xe;)?SzT8 z>Nz1==fZBs<%wb2Uf@NxohU(&=&!efTc=r~JAwuvvlhC3Gbs)&?Y^88Y`ft*VQAa; zrQg1Ag16hziB_w_-H1D0Fp_OE*AGvuX8RVN;2x3E<;$HdK81aNpLZ*a+p7iqJL2ulk z_Dj)L$9?(erIn+{tf1Sn(=+n&soISjgRV{^vj3-dqK$Ve6%ea#blY_UtJ8)Exb1~* zUp~+rFxgveze}QIbd{qMdgIDlM@J^XAUFp!BQe^BU;(w<(P2&w=1pDf4utKX-?gaS zSa-t6Ygo+fpypEt5Hz&IhU2@bkhtaeU3W0rWe5%{EhD;LAu(=UqvnH2jMM#;d;F_| zT1>)kZ&^#L?Y58Ie0$xE)-eV)J-qEfvRim0iR=0n6ivr~fQ>}UJ~_@> z7ac#$rt&LscBJaLM7flT&HIpTwEKxU*!V=d1j65!c@538F39}*`Y-?JXP^AbD;FCi zsCyS_bsc{pxPO7nKyb$fuSQA?;e8hxXAy$QR)fk|i5BG`(v^EMYW1Kqv zd8ZH+kT1kqoMY-e4Xw>sXWFL5Uyh2gepe3EsI*(&qqkI|(#AxbkE*+qBnW3DmSg%o z1vOKTteM6bGPGgr68$BNik|~;sq`54%CyVIGw0C zN}PzFsE{C>NY1C}(YTDsL|(L&6#DF*6yBsk-H7@}ZQfWB)*XV79zWCzjXbLAOIYr4?B+1;W-%xf{rtHaSiZU(bqdj?E+JR!tTII%C=_t=rOX9g++iOt9 zj~EqPwhOKvZU6bkuBj{hIjmv-09VaS@{nsz6mhW?{1wzBg>DeS_H?&h(UQN4LjD3J zL}~t2O3qO7I3)x9EH;Excpd+68Hp+D*a|~d4)gYEkR!tXnPom#;y|wiy$>SWZJ23ZNLS<5*!(jU!%*84-&ZpLqM*J^` zQa9I}R?D$r1P1j2^jVfQ(qccT=S86(j1esN>iBdQ*WAb^IfZiyF_Nh|GLSWWp^lacWhZ+GSA&9)XWXdI28e zkXs-)n+S2eRum$LzljG7SwDjcstk!Y$g)e}$4`l#o8- zBmyorgd2|>UFGcG-`q86$AmZqPcA}fUy$B=YNr_I<6;CJWe}X-Rj7PknvtmFE)@TQ zbXeLc#pbROVf&$kq2O_xOHjcDD0+jo^`;SN8@bs0Q;Z&yWv*>Mv*rdanQc~&p4KYa z=%KKE9oR|En!B9o?mVXnrrUjza(!jRFVG`tCb+ygxVUR>qJ~}QJ*U%g1D29y8uf{p zdhn6w__cDX2y8z82xZ57P)623BAKFsk zrUc7%9Zw#t!b#F-dgGk6zV5mm>z#I&Ti^18v)6-jtT&oXmpmxfIuEbrn{KOpi(JR+ zWC>bV$r7|)T92Ymc=p5zhu`sTEw}mF31>AtaR=8jC3jxAVLuF%acIg*@E)vKxQLn4 zD4dF;p+(hRgqOp!Shg-V$v+eus+eCoqcH7!?w}3H?^f{inx7L#H zh0(j9`g>T`N*6d7r@3VU!HF!_!Rk|yFgAiqpqULz6j;^pDCAMYBg#gCw-^9Z0-Zi> ztiD-1Hr5-XZaq!y>XkRgyz*=@U(nASS=F$EfArd^(?>JjB`36J-TyB`2Uk{3Gk^zf zCwv*M?eil(`~M5z0LUFM0AfhMf<1JGHd&NfqO{#S@hW(jph~X=^^w&H79k90;II=V zhpGIvK^0z43IvAcj0rvQDdDyA0`FV3s@A4Lc-X4 zp#_L`$Uum|Z*(JK;wEpmtlR4zWM2R@uFW@r#)X~$-9`k)Win%|lgFR8;30KV736OL zq~rA54j2KDBR$>~!t~U!5l%RkirCW+Eq(5xrDxt78Fu(g9mnV`7kH>p2Ef^&iG)$t z?}7Dg10oTn{ZPX-Y9j#CRj3fr!dh~CA8ZOV#A-r|vf5x9$4@f7H6>a4?x0O{g!4?C z?gU%uFa)X$$?yL&{ElWT5S2a-gSwYLW~g!h&WsZ65%(#8;8* z>%AOG&r!kqNW60}13d|??rZxR3i#S+LM{L>FhT?pbD%pDbD;Z#af>$np8Mqeibs@^ z1uBw#f_g-5?cTrsZ1*=m|La#UlN%`9|9d3f`(h?4y>6ev0z9B9;n`3Qp&K?mYIgW05e$*_bCQg6#c%AXJb(cR2b!j zkL8_dW{UE+QEsAKX2qQuz^3F+kqbS=N{bRJ-^1)|6Q0^^RE$dT(|48dA3>pN zG!vHvbtY5lM6>h^U-K}agt?uCc!8BRCBDz5;)QuBF2{2(i4TA{80Ka z{Lc^Mo4-H{fW6qEx$nz-g60 zT8;H~8#f`jm$oNs76J+#;4qdW%;sfr^7uN~ar;HGUW8dMLO!i71mwB{-{DB=jvj%y zV%CwtAw6=;T7_0vgG5W@W$s}R-lRnMz;v=j3>7EK9GFU+*q>LB(EM#Ogu|rJ9~7wB z>(t;JCBn6SmvY~xr0-gb`0V!aBST8-h$ZH`x`l;>S!A5n3jKA=02 z7Dn5ixf}qV@|ZO=iX}uDzk>SOV*_YCG2vICvx9IuWJAQ0@`x{jGK6TaqgSGl&7WxG zTyO$5l481au|WP7m8ihPg!9bb#$$@Nn^#5n`wDf=XS$VtjjACdBoQ|Ok>EcFh$1Nt z%~>E%iHUG@7DG+SBYG^U*h1g9nO;l5iN`W4IRV&f)-z9pLIsnwWF zj2U=IQ52+hSYl+gj1TtkBU;WMQ$na#il3427c7{vkl5z$qAZa;gwsj=@^|Pl@1>tK zBssy5iJ8U)C{p||*#8ss3I7sFWy%EPuMk*zpdFM{Q=Y;TuxC6OnjssCai9ZvHdNyP zILCpa_RtPpHh!lWRkTKZKke84+5}Rf0gxNuBos8zey1HM@0Sb{Aq09aAX>bssv3AY zBrES@RB9#AZUrcdiLisNP}%lm&#V8Op?QiS^Z;9#e}F|l417_{$1_${bK zfymN~wh#B;|6t3Jj|52H5KMG0zYv z2jET>;!*+X4poH~daMAK*M;cLL)}4?rntK}i#q=vUcreeIB`&t$B`CM2|7ZEMs*?X zGQgcbK}8x)NQn3r;IEu;+Yyfh4&P&-J2vXa(XdKy%_343)`>D?RA}HA+8v5inAkrc z*Ygk1DeW^tBRaY&{(?qiXh4vQ4=F$oiV9IgVrbtK{sXk(yOcaCa>8g%WNDd-sbIgV z+!9T}5aW%Bg!2bX0Cfd=F!5Hz%STHi=rR^UzYLJBhe} zw1I7czCsu!>#IJ{^O%6v^VsUE2(}V{u4G#cX2^PJ5A?cp^YI?YjulE5(=Twu-WO7F zK|GCU14K4ylPnGyatiASu{vN5{^uyGm^wkfinboo{Z8U<{#%0}KputX$FNjCM7nMF=h(SiUg4mKwalw{fMuyl@ z?0<1b#suX^fu%G7t5Hnurbe8QeH*2$&=*npl#n5h(K|)7MbzLt9(z-jTjM0ACLho| zO=L2*e#Ur-=Wjwu*jLSRvwj zfpl91iEmJkiF8nxXI6$-$PiTpeML~gcqwI%SFqRVr*}9vSzP^Iy6Qu1AbDw@1GEKfpfC9&{Skcalm0@00R7HzcS*(y5R?*Uhlg{{oO9+}nJg?c zEnHuJ_n%(7>rlkPJ5C!fgfcJFI%c~< zdqkvoRmTtNmzW5v%Y3VA`>b2>XTG$0j6`@n+pQA&udQzFORH*i>tLsm&hPEyJGl(I z7XkbrOuS>>OC;ah$@XqHn;h3i@=$G=89fED=7H;Jv*>Q)qk_UGiK?^$R1UE_BVTEIaS|+ zl5INfn%TEiOKWO45K_*9O4@LoAP%(SNTQxLz;j4OfMr!B!d^O8P;5G1RE}9}Xr}e! zQx~W44NGSo#|vGh9OoPB+YeV{lIWEv8c3m~8^on);^VAV4qWf}Ac=)qQ9=q83*8=^ z%LOn4r?C`q^*+1BE`Cxxv-X5*k8F2x(OSAIxYZL3#_*VLziwRGUs*?%yggz^Oy1V^ z8`2SbYhPC4D$QQ8%gR^Qo9d|Y7PCHou!Sy7qJfXd0N5we#7JqER%O> zxJL+!Fp*eD2R?{n_J0bP`T#?fHSH$DUq0G2Z`f7)V!3o)W7PH&II|n7?RJ?YtZn8r z(0#`nqzh8K90o82=TN$SY6H~t`UpaU1a2+g$HuK{#v;dD{+x#UG#HM5Ml-|q%y?4* zt9cCb_WZp?aMvXM?p((oWT&9rnjyc4-AFi2+H{;K@rNPJ=N;$e&<)3LW*o;)Jh%>7 zt88IN7slrAi7>WZSw9Pfs}QF| zPe`s46EgP)E`kPs(${*RwpLbLc@~_s6M4Ae9;lVh=IZ12YG-40b9>)$C|a91<_qlx zp}IC;`(mK`lhBBaa+=6vZh*_YCzFW(d1G~%cUC(XHr6il0R4Z3)f~ML&!)Q%qXWo> zke;QBAaW1IG-(vV^LkE3X%W4XCq6CZ2sKF(w3eH&TA=mlVNNU_Mvl_hbeg;NvZryD zK~yC56tD}(N=rB}F!$g%BKE`|;U|AW{@WYB7!&CQQt5Fz1xUDFpPNl{J}8k+bJ70( z5_G?o{kQh_^JRVzQeKO%+4K4M6^Z^Mcc25te`yEGGePVJ-gR<~wQP*!v>!~wP;LIZ z9#7)%9D|tVbCSu3$pgjJKzKnf5dJcExeDULP;f0`h3aCdCtN+0f=BM4MQf3>hBC&| z5~&3BgFFr39uYEjfFPPtSKI||iM}4vThhMSLGjZd3`@ig$jP!@3TF$&uP^hGK9oKO zsU;jW&UJrqt`q6?_c_?!_uO7j>>JsY(B!W7$M5ei8_&(`;{hzFw70QGl9LLe^|N(O znIf~&Wu8k(Z*vOSdtjAF(VEP|rM<<7$VM`DPVb`bg$mGekJ?)FQ zB+N8B!a9It)N?ScgbT;?z0F~c7b3pijd3VA>TCs_Vlv>A+_)S2oDNN#xFJWD5v^Pp z67)fbN5X}l#fb(wsz+kVjnDbOl}JxoNLc;UV;^*KbUIDP>mBkXc#-BKz6+-q7sYtj z3l6z?flysIu_84>$mvmuk<B$D?ive3U3t9t*l2!W(EMtU7=>fn8MUMfI0- z1JegF(|sXEBs`?x(uW5hh(3wpDn6+j3ygg(hcFsLXy?q-n0bq~y}g}f_PilzlhLDN z3!*_HH5#H9rn{UnLMY>RCghb~P=CUoPLY;O@$htGwe`-bHrdot8E%4&0=^;KmaqlN zcZj@eYY)v(ULT4R5jsdr=mH6ic5jbMOf;MXEh88OPmm~*lqlqCX}8;Ci+muFx0g)P zSehT#~9 z@Vu&HkR1#tx9LZ&5!BSM1?Ux(u~()DH_*zJStbbjHDOUf?Ge?XBb8 zl)e_U>gj@uZ0R7>X!w#!8y}1F9Vrtjsl}Ak8pu@|sE!}^7HQ2`Q?`P7K4~g=Q_C9_ z(ApE+ri^d0o2<@0U>EOAEI98_i%leTt0elzgh&K6M1{!K5epzViDf#^Y+Kr_Wy=}- z7y#y`Gc)WMPNwXni;;Vb&kg9BOkN++2DIB9c5!dYQ+bV~FEy=M`75GNpA_T+8rn2a zgCe(RFul!Fnyt`~Z<}{Cq`Sws6@9fpM`PC?)uE31cZ>C#b)(tVjr4BjW@ znt{kABK`{u>Fq*vkm4{vvObq2N_R0so_wOnzF5>DGszZ|N h*(v8D<+tGsjuE;{{v~bSzZ7D literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/seresnext.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/seresnext.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f96e65474b963444f2f71784614542adefe6718 GIT binary patch literal 5942 zcmdT|TaP2f74D1O)Ao3Fhh=wz5J;6M5#um;)-!v76(Cr#AQBi50|GghcDuVgZl*7$ zx;(ddU$RI%ZKS+JJn#TgUic&V5&fE{`2~?k(B?bU?u%zP0_7!2&vd!!RGm|&PF0=r zole%)+8UlOzyI6#%F~+mcWTt1CK|8dO+ysE=Ifyr=>t7728K>;BQ&GNpnoW$6wvk zx}7u3sCA8OeKZR~-|>?8&f1@ z4H{o)4QPb^-7U_k<6y0@^9rWC{4SzT@5=5d^vHZTsVrMSnS6{W+1biTry&g zv_=5s=wjUMn;&52$ok|h- zyij-1QhUM|TT+>&Un=Pbp(}$VF8Ip!Vtq+CqwbnXYrM%(oVw8zyRi|+y>}CT7V=G& zOo_t{aYFoRL;}<0Ghs3)8p}YQlxmQzM#7D~No}UxB;q*i%!kY{kPW{kx$74S1vA4%8<&w#S{cO?yvrQamjx$Au zeL)^2r8rVo-E)TXPGfeCchot`sjvZv;wVV@ISD1#k|>gppWps*zb2>JT;*B3@jlzY zDU-`K27&A?PDpnQguXLRs*{D*IM`PbmnKxY*DSSUY2~xZxo8)Ecn0Fh*H-CFMk+5N z$=Hx5Q~t_xcXpramf9QzxNP2(ZH$tHPRvIznKywGoF;HGv_Wwqdmslf)=_mF1!2fi z_Ym8YMAa#wIU^n=Fvig&z?P?Bazw0j64hQv;RZ2L(kbVDuI<@taE)h-3UaS!>nJ>O zm&LP+O7W(f@ZVCC`dqtjd}_?~xiPoKW}wYi{;K~;*R^?Lt~utsiLxDTV%+MyP2+qGp{KnoC`OA#+*_F@sT>E#X9kF8CW<$^X1033 z1%iu{2B$o;@;jF`MuD4lEkPS1oJhZK z%r5S2wpxpyS%=7dXR*kpYGrLK%!!l_%?E5R28hcfN{!mie|Ea7StI4)STJ;nXQ_W} zDOjpCil?Z&Vs?M)7zUf_9CBsiH@if?S?hY905RNx|V{D)_RoNIDe)B7H(DXYB(%F6skk0bk{_5x2 zU`0VY8~1B1Yn9l|eP|QFQ{eFv$%-vnK)Uxs4oJlW9|OlAT2SB_PF+GZFHfYLrnk4Y zTyY#6_7btb<&M&={;g{-^{(}AT)Xw^&~XSSD%b6E=>}o?SfuFlDUi|vBLc!6CE|c7 z;wBr5Bw~NKacz_Juk}&f*uGZnxPij~z)Sz#I>;@Y{$u8Rqd>Aheg-;$_+OfV9J<2tWRdghT3A_~V&IEn z8L2{#NgSTAI%K6|q-;wl5pcr`#sT*?nac>x0Gvo3r$9IxWA4hC;4E^dT^b9QJ3}z^ zHadzwYR*tpdS@PSbh7lrVJ0+s8?%k4Cc+(A2UiG<$`-mEVn8hT7 zV_!eqe;=q8!Xeo8X+QFipHF)<;qlU*l-Ux&Qz%8=c~pKCsBP1^$#EP{dHncm3@4AR z&PP!GlT4AnLKn(ADc#|RZpeT-c(+;%E%b#xaI_nz1i!(KREv~y;}bS@1wyFg0;j%o zk}vct_5$_iNjy1ks>EP-#@f zsa9BQ%f}NAtE$*XtFLiUI4q5^$p{RDC(9R5nF04^7Uv0sbR~QQnMs-R&=uVt7AyH} zb{9D%mWWjMjw|6NcA;LW7TK5VAAlW9fg7Vj-lJyFb6fnsPLrygKBw3z7fFt*fdB&^ zFsi%)HCxYc8)zg>b&zj7fM(tNOSZw$hcFc`kXA4Wn;< zLLpCuK^6QYhCky?X{DBK>Cfn=mr4X!Vq%Z7ui~NPgoghbZ~6=hiOdukA97k`wA!bp zuYYCEb`B3IoHU%DR+Ksa!))7g8`%UMO8FyiB@g7b5on-*I@Fe-r%+ zXbLswXdmVGM#K8o=~I>Z%M6fRZUnw)(jJK&Dy~y;g9<|9*_w;bI}UERgx5-yO~q@} zdy|SvsS3oC?H#=7ItqYz!+M@?raMdtMx?^e#^lpR97Ep5Xfd zT>OckLy=inDDaaq5~X2_CTTcWbLyFkpyQWa%-QkfF;4^Y15s(KYRZoG%lU{`X;YQT R>w3rN==8U4S|452{|#|0b$9>( literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/shufflenet_v1.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/shufflenet_v1.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abbea01609981c5bc9a08904adc0936f5efb6453 GIT binary patch literal 9378 zcmeHNOOG7ab*@);S64r#Ih+S)NGD}d3a25eDL(8tj6_L%!VWbOM9My+EI(^^w+;i_e=X|H8U#VCMp1=C^@7lLsRg{0B%=D8(<}JM8uR(+&)UMJ@f7PDW z)YLSs_Y6%@m0qry=Mt@3=$TEkS8NtJuXn9psafK*(Jl8X%}TG@ta3iro$J+_HBRTd zM|$(k`QFjyQO*~-3%$kWBGRTPc9(j~&1F@&sR&Dy9x9^bl@9dgF>mp3jAB2$`M2;W$QBn%}vgdX=Lv5WaZq(XZMz&b!yH-@5%&1xv`5X|5}7>+VKB@LI&@^kZh3@cT%3N>dd|Qxj@a z7n;z8A#$D}@}lra#ji=fqS!9{L}{xmonkZRH zO$#!bPfE8_o|=)6c2{c zXpqqEx9td}Q=B%&BlL&S3x`1YS001cGQ&VH>}`0GCeG@jzP;hu!_X68E-ggz-2HQ7 zMrb)x;s~jo=eD+H*tV~EZFks>F4-5x9oxPPBZozAuWj`ET?paBEe|V}HstPyym)Rk zJr*(Njt8OHL+{?u3nJg`PPYpilS%K$p?AjK+48|w&vS#&4*J;M&<{3gl9|zX;prNu z*ENxRtKZx31J7OyBbn{nj=KwP`)Hq5(XOR?Nrxicl9@?1q#LxhylEa!yF#3vo+a3J zTJ24HP57ootHEh*nM2F> zMLug;;g(Rv9n93xzolrHHL7;zL;>nu7v>`xvlbMiBZ6o88O-0$Vc>qO#)fo+9)Qmd zxN_#Bqcj3ullbV&$N9Jro1*lHY8SZHk=ygZn?Dly|ngV5||K&9H4r9(S$f zMo%>wunO`-PfB+^$wt-R^O8K2iZ8sRFhFx0ro9taVRE6Y#;S!6M3|bTz0(c*-JrW` zpG51EY=KUuGaw^zMu&qzUq+s&7n0Hx=Sg{*Q+W)GB>7b1Cnl)_x|@kPQUpmMyRP-3 zJWiaSAVR(53XxSJUnBB+M1G%$4N_l8a$v!evVvSfIke`%AikxU;ROZY4(Ob=#BGr!qT*2J$5$v(l3ISmDeh`@-vnXE!o`D?FRIAb}S-+vggCU3n5i!UtBuHb~XVtE0a%QkIri zP1RC0HUG@gDP2<+)OoF@YpQ`iq8k61ztf77l5y!LT4_>BEzRFmi+c;x{>$hHv`Dg} zhtl+3#T$}S7(+oV;{rZW*X2dzpJ<7CKhbe$Ih&GiphEruNFQ%V{Y`zZHZvIx#OEfW z3(XVNkcgB+-1b;)!|i;m;l^w0e9z(R=AyoAZ6!JmkNiV4eSYRO?w-wD)Ax?f%$%OO znaOEldg(xfv?}&wRiQmJ_qC6ejv^OgwWG>~NIN9e42JggSih}M`an(7b$z`>zV+y# z!D!H>CAVGD1WekrAvRl_KM zX5rxOy)wN&S(#6%+O{}P1dqrB2^ic(Jn(%cZJx#_gBvoWW zpqicCcxVGnt$w0ifqpP7wPvtRZ=u=DGx9;gZ4|!<(uq94Tmd$pKwXi30p=``Z(DpF zN}~J_z)n;|6#y>JWpkqT&}yr9KO}f}R4fp@6DwT5B$m+!{Q>q$IDxB4IooT1 zxbL2Ge}xM&U0{zCOJ5hP{A?y4LRYT>^(6`T{7l2L$C0 zx;_BXt8gbea6eLzlyW%Zx&kGQ5#|VyF*vX{cHyp)KhoDU{0I%vc1veRM}BACWBtQs5&`9rO%@ z(X{mZFOeepO{2pZ4*o|-adHEv7-Mwsuc>bR&6{;qE+PH&8J=}Xo{Ky}gv|bp79o?- zL&cs^u?px1@aE{7h;#lm0Ud*(U@q3LDIeALOK~Xz&=dNBwynySqk=Hv z($AEiYXrvbEyf0Fb6M?0)aGMjRLjuOWH4D|Flhl=DNYFhjesIxnNwjfS&dau06=_C znEU166mmu6J`@&mCy>*T`++DSw}@OBIZsrOqkdK7x?&EwLR5%V0L5C$8yf;L4`y7x zh8W;}IhqqkKUNly#>_Q9O9G5Fz|Ep4fIk9-3y+8^fXdE7S}qnDiUK4aE%Yb?OM0j(d`Z{CrnBHg)~!Px0g1;KqLoPBfo0l>R-H{*+2>93>R|h6?_X z3jPKKyqLP~BdCoQ=;yx$D=XjOA^itbN=MLF@?h^9qi7 z-?>pgF29b1{0@`JSo6&ofSR*k&IY9N^%}G)dDn;Xl3}jYLT7&4zoSXG0AfKDCSD7)Od#PI z1ao{=Ff%=8!~+$f6!6k-__rXlVM;(%Xx}Jzpvfgl33Wwb?Yl$^hUSGxAgJrm%mBLt zY@p;J3N!g=E{z7*bD<&7w<%XzFNvEIvqG@g*O8N5B4KV10b z&41c^_$D-yLHV}lZ7!*|2_{6tkGW!4fqp1-*j zg^7iaZ~O_bPPJyKp zT8&Q)proojFn+tYGIM68SJv4#m1jg zNI36yaLE8yT6d?vHl?2s+0iA1XuBh~dh8a?egGuDlT`Rf(~WS@Oj-8*FZ()4T}IDD zCyqLX88jU_?D&_lB_+WxYy zZK+D*XcC9vq1@Q6oWjGGcRE#7yB`KtFxKIwMi1d)`f{e8w zAB7)LF}o%cJ$Qh!KOU{V#_L9el5FJz^HW47=V$hwr>(0u>gt_(Sze{_t`WITHXrGuI6K@1TdSjN-EdC9m8#X3!(Im$u@@;gHZ-L<7WO}XbMhpEkDx*Eb z|Bm#$U=ttG8zi`{q|I%v&K5RO8^xcE*dnE-Yjwm1b;UosgpCdQFBS50d0(2#lkX$x z=wTyeiP2Y)rawB>M0%U>Zso2d;G9%8@INcgC`KqLWSf4!noHXu@SIMY`t1`bbBdiM f^YygFW$bzynkMInJV#Oncwq!Vb>&C-;*$DbvG{0J literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/shufflenet_v2.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/shufflenet_v2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2e29553215c13821f18841c2ad46c1d02edcaa9 GIT binary patch literal 7999 zcmeHMOLHUFb?z5{Mgt&04j(fkC2Bn?BIFS?L&;7&mPYbO8dXvfrD#S{0$YZfxEExD z=x)yK204Q?Q?77a$%bBJl`5C2NLOW{EL{ExUU}_xuku*LtYncz5>H&EFyFbi@uC=4 zvdbck!olrx-}j!!cMhUj&C&4u)t5i-KJ|j8{R6#BKLxzJfp7dbAWUO=Uu&sfJuq5^ zuF7U$8JeyIg;r5=jD9JwTed8l{c_;699g#dm7v~Zxc6Sf27OL%x4oF>Z$?(@L}8qGVbb1WM{By0V(+NB(w+d?SE$}R@Rl*LdS^f9JJuVW? zKIE~_243H*nwoZ=>2CI;&W_uSgnL)5e1j(+ue+Q1LYt1`;`z4Nif_siz4>7~-)`)# z`C-zy>E7GsVDAnRcU%*7-2_}YL37+U8YCBX2LFH1J5Gep_-N9*>Mft>ec z%U^r`_Qvy#$sAq>M?Biy1yi8j-}_&U{~>?(qv;v;{bak{*>%@8qlixJqnP8~d1q%g zf;hMkrZ7sHV9O(zRP6ixz8iac+>N7v&ulB;K_nj13HDLjkM^l6i}ph)M2YaAgW$xw zoU^G*z2o&`o(t)RJQ;{^N>1$}%Fwlq%(%Vrg`Qb-l9@$2z0;wJ z(L)w+4qqsb7C!@JPP9Z%wL|0BNbM6{*HSap+Gbh;wNmKpRqddhYCUM~jv-!1^|Z|N zC2hxuU-$!!nW_FGjaiC^QsMK$q5fz3xRohx<$cZg7TOo-4ed&w!+N2e8rKq^C57W+ zQbIr20L4YwN(->U<)gB!DZ_Rc%pq=CWR)ZP#K0Quyys}7UaE&!BUW^-HO3(BX!P>T z=ohp

rrRjjYecse$8?2Ry9Jn2X`_4L9D727Tu8`>;xG61khg3p?BGF86SOJa%7j z*WI<@Vgu5^LpRrXKXz{n?U4Z(zB(QTqLvSJLv@&*@9Ebr8vT?mvr)<_J6uSk?GJfY zRAXnQU5_!yT2|Z=(O@^u?2*05?2-CrrCiB`wC+W*NZY+cWQoWMkyRp>K^jh0hzGk| zhzi~^yDfc4yDd9prCggbBMdX=E>4AqiSP9V9br~dhjU|8SjS*i%=_J}GPTdFI&D|g zLT1+>o{&?MCnMv~nm>2wP=Rv0-RXOA+-`rN{ra_+L=+`21;MW1Qu}6#5##;%rIC|} zU&_5+$dmTodUN-oxQf}H#W(&TNWEOti@I$%`kZc~R6MiJs)edv)vYtfvUDT=OiG4s z{mO7g{L!!VxcG0@ilNuXJyebL*eK_vshW#NaQ=CC<-8MYjxMA_M?QKi6zD4z{}mLZ z!_}Gb$UZRsLhIqGQ@y8)rNlTkQzO++i~}<@?`l*&)K$4*ZgfaQMi2RU`jV(f)Ir{=2wH*gTQnzMs7ETs)K z0cG=HaoStLW0}#}&76rFLn{{(ifcq(Ch{T?xx{rU$#s_Ux$c7#S3#;qbrFJX+5d^9 zUH%vL3LnXRCQBav$bQg%73P9st8 zCUkI`@tdLPKHy>4?!r0)gsgS@kw@tC6Q3o(Wq^VHZr|s^ebMz;ZxAQ3L>J*8*aS1` z&LEPJm;%?j+ykzYfYRN3h(2&eUJwmJfSfS|ywSWq!C+*L{ScQ1cebW%dWU}p;OJ8< z=Y55ByCMp5$cQjwi^r4hGuU$N`Zf2)HFy1*`|=dZL+>HBs(MZkF^waADtA>2cg+Ln z+VzANxLyANXH#R$O!Fel4Be5l6Y2^Uif9l6H;htewCn315dXiP!% z^H5qM3cd#*Gy>C~Y;(j-f-W)U36Qi1l<3KOl}%wS5N(i+=s&zRGo`#R?utRE;Bw+Y z#@8q!!4)!h&l8QNyAdV)ru%lc%Ms-uQj&ozxFVPti#qt|j!r(J;22H&52pk^$KE7F z^!%R-3TRRPsBGK6yx!2o0?Mancs2wXYEdIX7V=JqK>p|zf6FC3l96o zfZ_oC##`E_^#dn$5{m-$Lt{@DPbCEaeCJ2n=LSLc`}3)Z+CpC2L~Stzq*t}T3R)ZQ zpd|ooz;l3gz~~gfzwOANqLKoFx2tf)B?J`jVs`t-`avap2AncDAA#dY&J}Pf;Cv69 zs^rwcse!Wv&Ya|wF#kLSA!>eB$Jn)Gjx7LQ)Yt+U=R9CoI-+^!lX`DK@!9ebt#dJ{ zA747rP+CfsDfBqhaSDsyH4dr=wNyJ^Nh@jfik8-ZhvrbfbiA6*(Yf9K%e2A@03`E@ zn^0{nogdG*a#W;tGK5&&Xqcx2zfNrs@ogdmq-1G$lQIi_!UHnS)3!E%Q70ZEiY-ot z^A*VZZKNdx?{I&QGgrKUQHHO~norqnlTsLYPcnvI>AHoUc>ebFhIM)!h11W7{EP@8 zMe#aHs7i?OOA!ATSh2W+Qr`11_53N-e1;nTXMmg2)!G}V{#UC0B~||d66nKoYr}8N zxV5Rpe&>4g>u?TD@(@&7Z(jfO+ntk;-h*8fGe@QM-u2@oD@t1(XJro0&xM!ptkMp=9p07| zS<&0w>E-)2A=qr;U9;WOn1?4~a)nmUtE zLB8qe3f#wwj$mdiG0iMZzkhGeDTi1(=KkBLxrK=rwxr4()PThynh0u!mdpHiQXiF}uc zoJWe#M2_fQDB&HuAXcf0J11FC@?hate*VKcv&#-bmf2gMsKrbiOO~0w^5R=4;QA-Zt;UK~f7T7R!iW>Cz3j0tC#*#S{EHfZy|dp1Z#+U4gkF zPxcOiO{sSuV!F(ch*qLeLBY8?MmmVhB8-$rZ&^XMlZTV(!Bn6`DuyUi0j3;AV;w47 zQkjZ5sZCW1na->)hfmC&%uLUj!yyeR;Y+{q&p{?BL)vNrFWR$`!l5Bns6;`%Oct)t zcHu#BAUM1`@TmHMf!0P+>e*<0i_(XCnz){n5&RRTC?1*ct5@^)Qfl;^UWM6Lv?q`{ z%XP0~6RY+1? z6z24*0jq4(4U&_({-jX{b?`x!>igEe53kHf&GZguvQ4!1u|koULk=|LlyoFtv|}V- zs08b3DC;@KTqfCAnQzhwMfNPn$Fr$%T%aDfxdkL~V2$r;U+5c9AbDN)FueGLh%_A1 zqmcp13dvv>|6LW7eWAohX2DR#%C>xudYrQ@l#Fl5z{&M|z8v8C&OJz7nX6Y*k zkipQ7aS8TG|0f6rzI5Aw2{Qi@iydM~5K$Tby$8wiXoQhqQ1o@7VCOBC{d{Nd zcz2X)BTMYu?96w*^PQRR?_pRf6*N45`MY1W{_%pQ{WE=xJ{f$xhBx{f6kOxFr}=ta z_gS6!M&0nux~Wq;^D^~}`px>edamDQ`FW=4nqR0-s20O3`lWiwFW1ZJ+w>;=O1*-5 zhG)IJKUJUdr|Z-HOnt_mtZj0`RWo!~{Z7RCIYv8UJ1aCTj*mhOXYt!gIbH8+YA>)lT5hJoWH)^%~KD}ts-GB*P> ztyEc3cqimtPbkPj+8?`)SJjj3JMUio_OM&y(3Q2W>+wbqO26SbcZ9sJ6JnNFgkVMdAWlky+PA#0x!I&u zj6OujYj~pt6hf=(T&pv#*A33NaaXIG+{EfL$;^;0LaUHDe~a1Dc6%_3t$J#Of{kY- zjKxc;MqfA9g%q}fH{K3y?3%mfcp`{x*Y9}3N6n!5e`?^kQbu$RAL z#`lEuyD?S}Dg-ndXsR^vH8&8BMC}+ECLU0gc7D?yj9+t_o5ygkM>ZENr|ZQ__O`IQ z5%8}II}*(>;53!vx$VH-cH?!`5w==x6Z~XL(Fr3r4&|NU3SP$$H@1D}j=csP7`YyM zckGrE#X_cFO*-_!W1wQY*+8; z*EOo|>1n-ctTc)91`kbO;hn)770}bucJ%}82*3CAT@7e;d8MZvfzyc3xOvZ1twv8t z@FAnGBXaX;y&%V zoJ5fno9j*x2oG``#n2QJ+=-n=3!LsihzFf5S4Q#u2Y7Bk%JsLBSt;VK44^t1PBV75 zM8kJF)ybsHMJHb0b|cXM7i}eFXcw?&qqCiU=b%uClM|DaM>5$7-5@>&zmyojmKbdr zob0-sWZI7JJBi_Uy~Nyd#kMTc0u~x=5swhj2xY?!xVW8UqIIVul027Tr_piZbt3(C zVnyqrjGNu~6Fo5_;k7=|K7VythGD$y`yH%HI&Lsl4Y}~5Ws;q>Fc8slq_n46P#|u{ zwa%TSj5z}#Y2&sX$sY%0(M1%dUeL3wqL=YYfAl-8mznxvs+BDOqM%z3%7&@$Jo^ZR zSL zOH@2f1>APMp9@rd}WMKFE$#t zx{lZH$u$~0Y&IJ5S(;Q$KP-p_IgNsNGs>c{u!Y5BSK5qE@o7GDNMmOCskYAN@I8NMwppvp7et1i7Fqrn z-ZOwWi}!0H#~;W0gs`}c_eu1f!}|wldkXI#io7TYQ(%tMS_{I7@~qNvU+3rVmg+^m z$g41PCH}N1Z%p!MsBD>hiC?&D)+_r^j`b;sZ7nH}Z@Tj{Xt-mur<8Nbk9NA@Hp!ik z5louh>IM+bFzDMwxCd^_72Iyg&{t+)J&rrkOUuh`a8Gxw)(rjSO>w7t0d7OEO!Izk zdCd#gmVG4>sT>ggKHDoCPQZVET<~0gO+s|k*c+iTKgY}3y1Zn=I6E*&k%;ZpRTw8Q zba3T{?26S@8*`#jxgL!OA|W6UZrp%j?}{rIYpbjM6~8XqBX-@9&2@V*ilyDNx!a7Z zFOhRV`zE~eWTBhuZY-KmY@%kr-Le4*1-C?Y4LYOiahNtmF1Z!-KpWU$Yiytm8^5|b zSV)aD?8az(-Yz`bp1)(py_paYK&W<=Y`uCteh8|&B$_yf(tO=P`Hu3 z=)phyA(ZI{Sk+3`UlWo@!|63Qv9#D04i+E9PFoC(c8Z~|%a$g32)>-2fgE+%eE*51`->ji7sWhzb6LPuiUJ)!h+x9~+UY zzOiUJ76n0vp4etyXoo#@IXap8q3@hT0Y2ga}QRr4xyt2|b4W4tXyU>_UN zV?>0E|Z7jW_6pm^?1x)JFykF7ps{BbV&4s7Shl*fKSLpt!S z7h7KF5My4CLvUdfyUkR%Z0a7zba6=dqm|GQ+uKk~wo*)rZB!s(UniY4#6ib3%8SGp z!(K!X-3(Ia9E zm7i*hu{ej6G;Bi{c&VkLWzhqEMZgo-P3&I;tJ+LCF?OQOaaI{;^S+yQypEgT<)M3K zE4>oe?3)p7%Sgwzi;a%3LOTlm6U!1lA~D)rj{>jMTJUf?U5xT(30c$;f{&ASLLQU} zJyt(UcG%A~KqQEX5%~?|ZA2HQYzki+;^;{VyzWz2mlTHDu6_8ge|PlRum1WK_}?a} z!o+~~OH5KdRb9Re@EhMrgOhn0I)N<jJb$zh z&u^SYRsyv064q<<3|=`j-&+g~;tyZ3&0<{+C zm?kRr^q*?Gvj=)_`bF&{;+7{cdX`4t`WJjXiL}QYaU7?#!~6yH!iFsgs*n1MhI>8#kcj2}$aZ`CS;X zbyAP#QqXfML^wBqKeuGB!NqLD=p=e=r%a=dAs2V1t0%xg#&UJ&^xJD8a$r$E<2uZU zMF@6nF;B^wC~dnb=NG>gM)sp@-)_GtF)0IJC4 zlr=oI`B0k^n?h2OhCJLPOK}Mo4`sec@6t|3vnbkNoe#27!?GG%0IjWqOX43iVX z(JIBHGENT>OSuKI77@*@h@IZmW9@()7~m|VtxjtP*#qq$b6_6i4lJ%6TInLHdexF| zV~|{-;(JtFqk?p>e3y!Ms32_#)}v%XmB~7O5`E*7yn;>g$~|nj{I2s1n?RaWrE5&osI~2l0lvqCeeb! zA9Wl>a@QvwByTXuNGE8EaoCXK@eAaTD?J(Xsg-Xf&x$i7Rp6Gx`@S)HXi zyb~1DF)IC$=x&lp6MH6E@b#F^KEyYAETr#D zg^Sq1{B1d7eqOY)#x%=*VVVWAp8{ki=0HvhFe#k*MbSFTvU%$>Yog35Y-j%ek+8A+ zIh`cEZ_rX=6m4yn#Sr=pWXECYYOr+1E>hX>01Q~AG-nPQu5#yOj}2J!=kaUGi^#NR zC?$?O7*g0sSZ8|dDBH^%<#^_vt~7g2we;(-`>>4A?%9?5Umz(g$#1&>uFq`>(c!MI z1izY>KH4QwCj%z znnVkJX zc|XypLcvtf2Q9UgYEHh7-I70`jm>osx+C^TEDp0ziHIcImSMLOrOtM-->>vWHKPW} zeHwm1!^`Qp4D4W%Zwl8-VGa4`1TobNy8#(Wl`unAh(jH?TW*9qfDnX}nXI0XO+raB zpQ<-<^d%jx(n(2?Z0*2N7*anZM1DlDKSq&cw*_5nK|AA?B*`?x&Yeb*@867G=CVYP zKcPa=m+#-OqKB@j(0?vX$WXSpv3XR0n zY9txY5%WYN{|#?Mf@VUqf&HOtM*rwL``Ppa2@3nL$Qbm|@XvzA8gI#dUYVaIeWvd` zbwY?9t@Eelb41khRJ=gNi&RkDB{>xy3Iu3KDkT|}aghc!(?z|c`#VVrI=vjABg56T zx<^pEl9c)yy(v5oT79_uT^gj+!5^VUCmcx>xIJL(Y0|T}U{U%u)w=x>yOUL|=5JYs z`pqxc=jNyAeSnSmf*I_0%w*_e|I4!MbCzTO!z`HmJo}6l*nhJL_Ft^X{*#s1e@vIl zpIY+qvZVQ|XOk=fU9f(-B(*}a+UYWHp_W+6uGV09)iUSOPo(R-s5T7zlZpW^r;eJUl4K_B*j&P9a{Q+eM{=7jz4(H?3FnB0H;BSjEUbKd1j6 Djbc+V literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/sparse_resnet.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/sparse_resnet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b0984680bd8145729d83c1591d4aaef98b99ab7 GIT binary patch literal 5739 zcmcgw%X1t@8K3E$oqcFmZ_Cdl6DM)BiM+DxBo2mvlh_U+u}npFQMDngM$^60Nb^|t z%vzQiQ4~&9PU1i=9BqnAj$A22kG^*25J_`1L6 zuY1CJy{h5&(=UG0nSMgkeoHTdzan1FqeL+X*SPL!EqXFf_gRbig;qh&+6}+hDyH>< zXZod9$uGCcex+6MtF5X((i%zo46o+bTXocn-1J8MvDTQbiTa)KyV_eCFY)q+8ZV2n z+lAYDYl7=bTBC9gD{G4lrpi~ku^R@qr;1m5T~9QMYJ90{%SgO_WA)o%=()kFn#fw- zuw!R!F_ivku4-A+n_+MRZ~2tV_Rap316lvlR^SXwYv{_neC5K$cbD$z*Z{r0C!$3W zJG7s{Un75sK0`%lEuCvE#`RW#GhV=-L9&u;yuz#a8{vj9c#YS0^z^-ow^2T}s`K$3 zhWZ4b_~~H##>zzX}$;m8KK=^BZETcQSVEV4ke?&islOuFU(k8_bKq z3AvEX?pDU?b-5ayOPLa5F1pyptQ*9Qa~7<(8On{UaV}fy`wOqmS!Z6FvtB+mXDyss z0D1ZJc5|138?>D@I|zgq?Xv0lD?(af$8v*iFScCYUWIV^sHSy6bnKoNM^+qKXZAQn zvG5<^b)^>*i~eL{8m|e9##)}e1-XXUz4Fo?zZE+YkMO(fa!#7=Z#n1Jab*;PD>k~v zfb=uC*Fok%&u^#0ccosU$x(lUL8i*#-3@Fmilxgje0E7(@4-akeVE__*kCE|7}^-D z>1+^dVLNb|FW=_!@+_=$I{R-dFYgJ&UC$<`dd#vH@~;1F5%LE74=ob*;4@c~^>)XK zL`)8LJnug~mpcyW{UrQ+*Mn)r)Rin3E(zy4bMs6ZidpNN6{1mYx{=t&VRzOLE8>v3 z)56V#0X};oav{2K9(eHV3eqx!cu#aa+Y#2;n`g5bZji@LIxI~UXLk3s$7#Q^F4+o(PTa6P%8Q~gEbhq#%NI6WI0$1*7vdJ} z%MP|IxpEF3hp5^FIG1PpK$Mnhl6UA5AV4xEX`W+wg; z_oVrI>;oX`9{c~omj*q;H=UWv-1mv^tO@cv@?mo2fmzhFtDZS4fYjdB+>p^y#RyCG zf`HV6jDd7JONvVse8ZNFCS;GrIqM=K2C4w&r09-T=@0QDi@2DMNx&CW!ob@aR`EW& zlfSl|?vUF#Yw(tCh%85e3gb1cYZ2{jpkveZJS(zqAisyc7}|s{{7`OL$QcOz$O|`V zYUa;9t78TSA7&Pj1|nB64{TUiKJ`_Yf#F5)SXe&9Gz720-i?VQ6!R z3NTBPbP8%7rJgm!_lj5}Y3#^C(g|WW-lw;#vNzy8T!!6t*M{z-9V5YYywE0CMsNVH zqS$qExTfx5Ax~vKc&~sX*1hu}i}wuBh9T+vdXIs~@jl60)2=IzkQ$98?_S($I9r^N=7Q#7ZT=1)~dD_kvzQ?9Do-aRj>%3Ct*b;-9H zg=|g%0WN1~Pjr4%#q=biMh4Cks!rK&7s6G22+>tV-0UI|0MAr)4~|tShgMbDg*!De z07|Nu;s?6>X;_u}iBi>vQkE)H4xoTmWiowQtE4rGKUK;dT}?jfRjQmi8O$ z7(Z}W+n(Vw+XrKIr@#*;Gj|!+db+&E4{aYzv~?rVl7sy44x`Hpj=M(}~8oc+<5V1F`nC}ps25R8loCoArGipL8A+(O#@S9~8v(OkUmv|Qh88^Ul0zHv6|@%e zRy?USxSg*wj+GYU35?&*cM@$=ys%?#7XmLIJ&n;KKCo}}6y9f04ss(MeJvk-2$CG` zkSxDQn3VC6K#z}NTz&l*$2}FbCs3Y5c?!ki=1%dk5Bb3a0|xpU&Kc0v1K?7M8cxNRndJI^OY1o>=YCXAPM*jybiRMo{{hOo~OiUonB94{{rF^Nz@R5e1O{>@WRPM}ZpXGNI$?LKtt$CfnbTZS$jD1XQu!+RsUAIq`|_Ja-Ud-4yBLg*JN2QD zI4QPELLcPHH5>WoTQuz*BJUEpOkK6U#B_R8BmFm=2Gtb7?h1WM5>Y!`1-~d|DAgz(Ryma5dRz(sHw;gP!q*RZ7OOQ%@>N=SAYVXf_yUmXQW-QI z+DNBUYE1K^e2r=}{W?mNfEb0EUS$k9Y4DHU&Ce%CC{wW?))-@hz2*D0^8S{|GAo!5 znO@!HW8R-EO=A9}KCO>p{y0j$&O`G7GYtLK@ul>uPM!lF4>aS1OC(LZjdN120@Q2m z_H~>|^PW;0_csU42Hi?j0jFn~p5-Wessh3X0fH-}DyC<}G}Fr~)Mw(0QD3R5p5uul zJTJ$TOmSbu&`f(Z)W%VwpMaD#;6sB!uE+hA^H%6Dt(m&<1$)3s#;16H2&Fz?Ko$mM zF@Y=$$YKInOdyL1WMM!S24rDC76xQ7fh-Kj!hkFc$ijduCXj_48U3{UDshCh9!lC@ zV^Y41hcbbc$QA>dD5swQnut3RzI9>`DVr(6N6lXBdQmfn#?0ZOEL6z-#7uX0Rkfdaw9>4N)XH59A_r30}?&h(^A_@G5d(f|6y?R%@_r34?eMe1adOC05^GiSY?X@p|)iA!xi_uRS z7ti1ud>@HtcxKaRnHAGy-fCK{R3#<(RMW1cQI=|EDjE6Bww#L7zn5#}EyFZgQsaMj>*317C@*-^O}BNta@;gtGQ6TU^S0s5_zSzK z$_ZSTyjfh&%Jn1uk-bOvS!>RlN9nwOd^fdgRvzT~R81^weH^XiYk@Zu|1Zd;gD z*^Y}>HX3W;8;!Qtx%ujqXXBFcgT{_u?RJ8yFC#zFFHwH87B+6ktwz}Bw5zL)T2KvZ ztC&JA>{Ks=+uf#r+hoUPT;K$stJSwwJ8i$tH5mN}?Vuod8HsOHOwXuTo>@tGmY4Eu zFYTp#d)LrkFXLt3HBs)Y<-cyMnO@GzznufsOnHU3jY{V0hHX@`SnKIHcjacI{amwC z->OwXfxL3Bs#-f(!_)i9UFEIieBQ<9S+yQKB{^Qn!&bdUD{xOW+F|)AcV!#TuiVa> z+h}*UL$|(BYq$Mousm$j-fmSl{F--V}V6jrEPt zt+hS(X2T0NIBbY=U47Yo!C$LwH^aaUJML$a-U9BPdup}QX=28*m(na5UU%Sjl-urv zSp3Na_kHuemkOvd$8r(5mT@XTZ5ve=bkjO)9#wGf+;JQr6AGfukUh@-1? zF1gjMdDWm*Yc{K`T6efwUswZaZ2PA%h$o9CRq$}Na9jBBw z;Wh%d-f6Y4Lfe7wfm&S85B<9AKBpP@>n#uim$tp^Vpv)D(_{VBZgd)`$?e1e?{B-1 zL$IM5?X8h{xI#PqsY$-GHrCh36Wo7_^=2&y8f%TZp4M0gqsur!B-nsfT8tbH7O^9y@shYyydd1qz(!3hYkS-IZn;@>PdJix*D5o zGELsrR625~Cuex=8fSMZyw&wDo_uBHWck{)WU4nC;YPKd(A1X#9|Z5$x4IoLk>RQP zQov0expA}6Y`Q`1hVKTQmOr-mEx*-Kw@6w|+--JnfY)8HjJA*f*16GUNzlb19AANn ztwOz!>!=_8wA<;D0Bd9v`dp9f8BR0k7HqxNZ0!6HidX?d*DG*43+{b+T{HI5Gyhp< z3?sH)S^2<>ZLZl$eR@!g-x+)zd{g)ae-lY)gl1&yT6R+RP9?`0zA+h%V%Oh@_1+_k*Sl7W_)Fze+TMl}8F=Fg(v zsuP)E4!xxpj9t@1KS?jhWjzOSC5OIp=&Lxo* z6?&=0**;CjHtAjw$Hi%$l=F(a7u#e3aSG(Ei!Z1Qi&Ge=JQLf2-&~WKsbeTsF7Kt~ ziQ^pSB0^-aPFOj0f;DnOzM^N$;~zVNeU z?>}=^bvohMR;#OgIXPq5jt6OecD0XDg0lfOql#(+w6NU0rJg`H=kX2BAt@Czrj@eH zqG_3VeC_XBS^ImXTnVMTW~nnQT`+g%hHzwgu)cMY{@`OH!>=5NPb2endBtFU*VOrPYNbwk9DIoF*eyvWcn(Qq^vrk6 zJ^b#QJp(zMjdu)mg_6iTWbjvNSc4jyDc)Eti(Z`@nRBM5YyRbpwF4x~^(oY}LDQKH<8kB~y6@gDEbHdY*~Q z>{;eydfEP16_f-8Bt@%GD44eGe2h8G-ZiJP!^6|Awft%olv!;FC`Z0ftzO@*HT!q6 z)vDL2gD_8XB$AbS2}wNNC)v6Wc%sQx&**7Z;f&;f@{k5ckvLc<>lkT`e>VOc$rb-+ zsVp_?s5JH=zvU-*1<54VCc^e?NmD@B=`}|XHW0TU>}kQ;W+mfiHQI(=gBrlwInYEg zPL^Y!(bQfC$IuE1Pz*p_uwAeFfEti>T)VsBcpxx`5-zBr(Lb71QsE`{}QUJb0#|hlkTSU)*r_g(- zG$x>B65&kXhGeV+J=_a3U?>heHppP_IK<#HiYD&`(zM8gC;+A$P|;UJ19{Vp+)-78 zHAhqe?3!Q01NN1=!Fc=~9dkqxkeYpLIUI^&jwA8+KF|5b876tO5z6_Kj1R})#h+?L zqPYEtTG0TR{J(?R>L8f+WpxPYN}W<^@S)_);VTg0_mByVT?6ucifB3nRt{6UR#1wv z>%i0tMlTy$?-)|%3a1yS5!#WpYrX)C{4$VC*apW&eP83{hPIOp>OFo^bhU zIXi)g)syH`eToT@i=be&$ee)tR6GSNI)w8DY6XKU^;uRUYAH{vGV??ealQd1GYD(2 zDk_4%I1MDb?(1=BZ;LZpGHAe`sdc-4+tYA_5FySdjK5scNJ%t|v`Ekd&a0nf?P5}! z3>B@xoW+b2sGmTEk;nQR>zfULqv2}DY01Vj07V12&u)~Up4Zr3pbwwyFX9_Kg``l( z0~+S7gJ#BZ02M8Z5NOT>C?^#9UdFNisF){ow5$w)7 zN&W+K1xn%@=vLMb>e~>DQnS%eIiB`9BP*h+s2=CI`5H+JRFyo|k8b&#SUbH>qV9n>;%;+$g7H;_52$LR24^Bgj6P z6z~6id;=~eH!5T89J_lmqZIvtd9|EZFSFw>F!>^rA7e5BmYpt$%va$pJqrR_eQag; z1Tnvg8(naE6& z<&IiETU!mzUO0d5$>sB(Jon_eOV_H}L-2)m&~1ppqV2A!PD`2(TCYR7*|Yda)Cw zs?l0coI2Q9afS`mi7vKMTm#h68ZA5t79O^dW%oJ1UgPH89N$ow=DHmfy523=4;uBp z3rgCYb~m>JY@8p8{)K(rmebPr2(&suI5z9PZ$dk<#>O6fo(mU7Tr#55-S=^{#wVT! zEU3P^&t8v?Fw*}BwizWK>?P@D!reBe%srW`IF1^?Lo&<$V*iMqG=VSTX&QQ=UVyNH zn4;fn;D1UUb8J>4D&`Od(b5SQ-#~x85_SOKf)IPDEfTyvZofabd_H+P%17~ejo^Wy zlJ>@i57k@IKZSavDQe;)ejhGlZHx+pp8H-jsoZIf_!h=2FJq53VH^OdR>3V3|L|~5 zLw8!u^ITNvG`I2m@FR<}IAI2u^w`8WnMoRY&6d%ko%Fk+1mGWi)R^c`}f?cpp=yR@PVv|R* z<{#e$Ec1BrEiu}UMABTkbV<;%o4DPU$x505p*=wj;qE9%=Cbj_mi;Q=!6WO+8$Or6 z+IQYx6l?9N;YzRqKg7)!`#AWno5?0$YCk+601Cl)%`P;G@RY8T?%PyPwpBu^Gar4;|z7H7LjM zrimk*$!+TTa#^--(-NtgETx_DHjP@a9f&C@qjA63l43J}W0+AnV?E8WHT#Us)!3#2 zbNg%T!M;ti%x#AWZJxJXB(d3y)1ukj{thnD5yw?+CLJWV&oS3xLLf^ z)kprkg_1%v;~jX?=!j>qMQ<@$V(I3xn8|+! zwMySG)n_n5QP;o|pKWJxZFmbZ2CgFq7D~5wI4aR5iTbaH=b{qYp5^=z9D~*F9mi8F z?VSf59>?ll@D4_YNj1^&eakeyVP1bm!dn*M=2tY>?xdfkonmK+;cY~FQ1v_=geY7# za4*S0KZd&6#!Q3T&Lhu5H-#-ij2vx*D8aicyjnYhX-{yI>Y5Q`Ac1aN@RFkjYxxy) z29q{fmhfZEKXPFIH>wOPil;`Hb)2DT#PhXfoF`qPWe~7ncE_xpG~|V_Vr7&>%&)Jbtc%gKrPF|=^Uluc3EB2Q55d*T>d&E% zvZGgo5eE}ycJ)=34Lt<0O(djk|4(Y${{}12Os>9!Bu-%^z!Ez*eTBRq#twI)y3^^z z(~?6OBMbv=S|wLj+&H}xsixnK^E9bQ>}C*8K{O!L0^-sS;sstz>ZI}PT`FN*8d1PR zM~kP&l#w{I-SwcqsVt{FrE!p`*73A}6%IuELR`}Kpp8+TgpDt@+U+>^+CbTe(;^b% z^o|b;Ts${W@)G?tK2cTvI*d$+I6+h^0zsiB_z3hxAQYm4s<5SP#Rr*%1~Z}C#*Yol z9~k|d?OO`q6rmGDFA-ErM15hqs#IgJ8QZPcX~IR*t-(V>*+H_UZWiZ!EIG`Xp&zGq z8eMVUC^=m;Ppdz}gl6@)z^xgeTOFF^e4!tb6X&6L_oGkBC&V<5SS@Tm+zWl%RGin# z$)%MOrd;-_gVtnt{Q#)7A?6G9O%DB+nEX7GUtl8C_>0W_IulAlId}+c zx^%?-Q$z>|10@iWP96YX(6B;0#ix=>ti@E=2~kOaLY(rtN}r84Im+;W#JS;6+8dSE zYOZ*q(sB=E%(WLf)?TIL;PSYXd2oLdnYo2LoD?Nig@Bwpc8X~+l`-@9qK7+5myBP(bBVG)5w1*?b}CHQh2c)KLL2kp>D>Y!zRZ)fp-3}%eckq45|2|Rfj znE^c6v-aWdfrv9U?Jx}?N3m;r02`hKkM2PV0hDCaiI7epdKnOL$#*8M$hVwc(UFe% z*ZLuKzPJ8U3%~i&@9z8pAQbtJSI#A@DK5x?zA54Df$%6H2+e+ksHQMI^;ekqOvHqmyefI#!*`e(-USA+vcN-{3zXfe3`Bd*2j?9l?#M z76H$gleH1Rljch7rV$?pxUmOFhUhu?dwQu5fN3*FkNKNlL=QIHe02J0kYv7_>i24+ z*3=C{{n@Y(5$xFl)2v=55&+lBM%m42_>$1_TNq)p7}-n4H_g|LpEdSoxWr z7E;K*(eQ7ozb<8P+O=Q~L^NIpOkjPxvmI!*_LIDw*2Z!tafwQJUTi}S1WJzn-{v5H zhm}((oRq|Lg#HGJgsyQML>wDNVrD+hr)-dlg?#=_p3YGmN6Kl5nY)9dj2a~ze(P(F zwR32aoCXe4o}|Ht)JjTXENt)}kYHc%c9VzI=e~N@Hb@n}DFDRhus@)wZRGTcLkzOL z$QTSz%U))m;M5WD_HZv3<@ONQyl)1}lFRIy>RIF*ybA+5ftujF4-aDx(aup0(9(?Z zPczM3G_IfMkNZXZ<}hw;IBxB(k#ar2_b3Odyw-31Mwr_L9pMU5*6WDy_!cM-Kt4^N zijxlqaCvHHmcnx+ggsb}vo~PvgG}y6M3b6!BvN>Jr#OhJT_zV^7V3-5Wg)wMuy3mt zY?Y_t6v#zgL{qiFqGFpd7_qIT1z5TY16o_-EF^W4B9x&I@c}K=>Hiv$?Byvf z|0t3A!amMG#IH5pii;9LT7|8$1`JZ8j6|$)W(#k#02TcMbXu8NYamX(FH%7Hm6@a; zExydH$`r9Ee9l&?C2k zzrhNUMCej<;N=E`3xZ!$9mj&%>JgN{GJr*#=~>X~tuPCb>_*m}!+O^b48$&fbri;^ zY)!)=)@ZALi=hOI#aYxS8Ttp+as~e`0@GUT!^b*Jt>=Mx> zn`V^RN~xbxY|h|2wJXs%PbYxJS<%9d^oWQs=hatV>8vAeYN!0V zyb7tEtbKo0qE~4Dtu}Oi-=?=h{X_Ji70;6T$H?=&2gbismd2$PoV19;^_QFI9vYbX zki`pNf7SZ7QUJ$Lw7_Q&zXjE^4cWuJP-j`oYIpHw#dQ^`@32f{^)E3evYHYa@iW|F z_0O673nu@P$-hDZr4i1PcmXdZLFE~|*V&IK`j8beoDv9QX7cVS_8|$6k=jG@-$^AL zisuDlR!}k!vjo-vH-nAYcP#rL@R4W-GQWWzH!Lf3j(@UkoW`z+iEztJ`oP zs!8Cvj~WgXjez{r!C9a{=N};_PY6qxGV~089~V*-_|!0M%lX(z6`&zP()|9;r|x?; z9`u|sHc}*?ynrGHf9&+|012bLkE@g%TSVr zLWq%GLfC)V0W4^(ew37Z>e@BwNSkRcp1XFfd|F)Ss5sUaUNWBSW_vY|e|`|&r!#7r zdX{I-qu*h7qNs{^UC{y%+ulcmB6#WLL1=eQ`QEm?do4-_;%vXodR->}gvrk_5viIE zy3IxiW3x!kAFx~`=Qogxr$!zSJVPv74KH?d2y?@m-~{mu1^Gyu$H^5=pr_&tWTaKH z@~{m#Q~CczRlqS1noj1}N3aWh48`-|$+JJvms&$VP~<=2&%%8JW%24H)F3id^)4U5 zsrD5#jT&h6QZ%Q?i)V`5Kto4@KauT<>`eUyCMm40dWE?J1CYn*ur-k2I9oA6s_-qSGT+|2v->x%$KWbaG|?4|^Wa0q<1a|Bv8;@S^x%3!yVrSacSP zB`4<`I(kf0#`5E_&3~&Q2GckL=K#SKaX8*j-EQFq?LF!}7C730+HR{~;pIVn3Bxk} zt3&dhV4Ri`b(!s6L{f2Dt@@4SI@*z$#zzL1%d4tU-wKu?lLjx1%Gtq!zwwZOZCS`rW)&kRlGTA|Roc|r-26V1wbTGzP5(-=I8 dL}Z@C=TpI5fN_N-1=v+_3R0!7WWIXP{69o(MDPFr literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/swin_transformer_v2.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/swin_transformer_v2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be9a786cf45e2d915b1f5b07613c9e1e8aebdaf9 GIT binary patch literal 17428 zcmeHvYm8jiec!!z?mTyPc9vZ3a{1CFC5s$sxfGX@EXyKeOB5Bm)J8EWDIJF$&d$BN zJIi_T+`FW>z7rT(1y;~1NPwbf6FJ#+;vy>g0$LPk(GU5Q07*V2KpNaMXwy0kSoC4k zbz2Lz+28MfX6DY_C1tlL3V+Bf=FGY0eg5Zv{_nfp$;pC&&)@#uZ?Dc&4ddVQVf>TE z!)NgIKSJUfuGuo$X4N#Aw^~*^RZU4g)v~K;l%-mkYDRvu?OZiCsLQttmSGz0iE2?w z?ABzvR4qw9-I{8btL64|by}V?tpn|Y)r0Ms>Wn;RTZh`S)mh2sT668g)x*f=-9l@= zeWZFs@)NB>`)KuO`&ji@`*`(u`$Y9b`?2a{s4KdYEvJ35deSsrGu)Cp^{(Mgc?Y*s z)l+ybyVH1{mgmR4BRfy*veg0iAW9EY7oRxsUxDADU6l^}JrA+q+jm z{+f4t)9W<6WjvHuHk&QC*6FHttyRD0sfrb)u3TA;3UAbd#`>%670*Rc;pMvTz20>< zTV7PqzgL@nfB_~0wb^ON0QFW41%=wn?755)?^~E1MVZ%s;_|CkuT|1fQAT~;Q)|u6 zT2#8W-dqjdZg$-6-Pf;uHkue^-&%^w%JZ9BUai;lYaW*o<%4c*DcJ0_y!)mexX5L_ zTyNZ3>2|yZ_d5P54j%lg$aqH8bd9Rzn$?tRxheeFZrV+G_O_wFZpO{NZ=yWs=2wef zF;-2t;7+`o2ZrE0-ZiS3uNbyb%>u5aXyV%4X6NNrw{h#OrFt%77<_iFt9r*@#m+tD zytU-4uzt~b)AtNGpO&A@4_*E=1r+2p` zHP_Yyr`~a$yG=J(=d=OJb@N5%vbS2_Yz4j(be&Jeqxn2W=gdmC+rofiS3S?g!QHJZ*Xek{-LAUjtT$cP>x|&c^N}kO zpFsj3&&Kn*s@tpe>HuJ3UKTr2jCnn$z8Y{>qcbAdvEq0b&NvS8XX2_}-L3gRrdF+8 z?~Ne!xmBRxrni7eTmgm+5u4wfeQ z?rLJJu^l{miH%m>_nWKD#^8Pte8bN;824Zt4xm$OtghkyxXnh4Bfvudp|jdtBYtVD zep~L%SrDuEC^*IV@LszY+;cco+=htw1E&DlO_+?)AKjr>`!buH3vCFZFISSg$o=oFesVtb2`Hy)G!y=nB8-^H9es?lxO3$FJY< z9KYN45~tVp+Ff;zsMSJks|#wd=79cm1O))+W``wy50@|rfjvjnQ=YW;+?u8+2@0kU zN8=JOHf|hm4ig1D2o1djr`sb+*GaZCVHi7fkOLroGiZ*H>>)bsh3GQ)kDldz4$S7t znr1!a&-}%J2N{udb@@XxvI%rW_O;xY8rI==8ejHI;p=|`NniwKXlz?MR#@6KO(QIZ zMlBVlk=mgdTA{6G!<1_tF*>Geg{c6%EX=z0`&O89(;sBQ{MXI?R5-Pf39>;hoZL3u z%=?yU1bMgEG{ULhFny zheqWwS2oIQ_FS+IRYnhWfC-T(vPr`frQRq_ z3UEG3;jSu(B}SCOYINz6{<%<@j%?p+t;%N936A1OSAnHE$9$f95biiU1{{n!B~>GW zsdq}c7UhO?NiCv{dV&pe!`lX#Fz%DD4T&MHL%voMs$Hw8r_tr3SI((!H#paB4^JeK z?YavdofcnFW09kF2cy z9lKzmJoEi>s*GBevo^>2?^rqef0YYmGn1Mz?FU;kW3t}j@RS<7sl$(r48L-FpGW2^ z##hW$$eOP8Zmyqt&)hK7q0rng)uF)JNrkD<+_m~o!pE)e? z(0>a_X!OnZ%pLr`XJ7yWd0gUMvyYO{oHO_~^Aio*>+Sb<7yCS$qkCGsMD6i^;JnwqRE@si46$p&!)NRNalNMlUMF zoCvLCNmsa}&oTKt6ItPpGbhW;4(3`%iBBR_vWnA1)3$Tp$EprKWK|#Lct<<+wpXhG zqibzZhmbGUYPUD*twBw;R&%=zAoMewi@2zkkwlXNRBw0?+BCZBH4#f?O+p7q3w#Gj z&ah3(AzJD`+q4Be`BeIE<>#dJKAlJOVh+Jk_v$>OX1xWmXHK>15rUXgbhP z6PnIMG!BZP=2(9Xq(!TNkn14k`kRf02jL6cpO8x^LZSuvBKVQ#`}H<7I4z_6KhuF+ zuSa2fugWyGET797qZL{?8xxX7JZ1kdAYT7J)TlA&40Wl?F~BIbgP&0cgg80?>dHOp z6A;*;aZ;s@C7rzz${tB7o+}oI9wr1~Do&i3WO#Ou5{4cowrW-m9s((Qh(%u%o$Q7a zI^)V2TaGD7I5n@14QgFFAy$qH-D9eev>Zs=qp8R}ITL>$vCki7ns{qtnDZkoG5@8W zxKEY+ztj_BVXWre@zlLqr#85S1G@9sA7r0MV6V1SI5Lz2(*^u#2+bz*K`P8ZuPFst(Q8Try(ZVmuw@OlVx+kszs=vmaNHX&K+~E~KNU{7 z**PPem^IwoEld5Bn}=>z=u-9FnWs` zRY^q?klROkQ;}NQ@JU@_GoHF2!}Tkb{2nz$eTtP-Qq-rJh`w=(xhI%V%&SbQ1?DMy zMTI6T6@CE2pr>pWq@ggZdFlpRh>S*jiwZGERCyL;^o`WYS*G6W!O&Pas6|WB0@EUN z#QKxE$!?{19P%KH3w|cHKwrUCF}B`l{*e?#Z{^20-V~(Cq0l4)noX^t6*Tpcvb3yK zQRY*Bl?g>$+5Xp%5&L(sRG{E(%|hsgwqsG4w7+NB_ID3tGFHaSK|Ht3GV~n?m=A2| zOO|;6U+7NqXaCPk!TukmV#&0>xpnO0P#~chQ70wTU*S4lL;^fE6l@t#VsK-3t)GS- z)K7J+(CR}~*}yHmk?q^KD`tqB-8XMfGq+0AeHGnd8@P!dRcAgO3;I+xwLTK%iBDQJ zq4J>-dZeF-F#g9V7ok6cJpvxsd(PTA`mkLM)CBbt$)N<)0_8VRvCZ+0ArYbd6m&?y zjNjp3>09OJ6?3N>7d*e)+N9-C*!WB9LD2JGJa?|H-f7-h?5eeM^%eiz z()sh}7oYjm3ol%_bhD9_Zk$j&6P#a>vQd9Lv{V`YH*Hg zJ3F?`hdG_<+|(IAP_cZxZ%b^`qz3?gghd+s;TLew1>0MI6}a2=lO|uT&N&Uk|Gm=- z&S`N&FtY-G-D!cr=~i8>d8bEXQ7~U{ngJXJ-OZMpkZ;y$RbKI6ICgG%_uwu|)Sh*o zdMY*!KK0a4f1^Pco-|K9JwDG$TDkkwBn}|7+-GO{HNOZ&^2R_-fL=Ioz-fj+XNaf@ z;3;ckjw@e`c}NV1N6MvpW(0)Byl8tR z(8XV2C4!9cj^d*=y8^R(F%A1RH6u zs{+Tp2MbHHG4O**p9N=Q6Mj|S)2={Z)TZ3afhVaAr)y%51D}@mq$cJb-_O$0m@7{- z-$!mo>&4=+!cpp{M_}3b3}fSuNkZd@3oIhW?(z91{7e^Oh~tt&N=U47D47l5{sTO_ z2`G)Pbj-^uf{ZvWM^4Sl+Qv>d>d>7T?>@2EG5vfb5YfI2_w!KszZP^M1^NMwSetWL zy+NvYaIuXT*#@FW@8iI zQ!zMxl&uip23sEFGg8b^n8G#vguA)|ut*C65Y``Bb+w}y7u&m2M=T6LSBdv3wDvrV zURo^<(B1R42(x;J7BK}QEskc7I(>EwdBj~ILk~@D!zcr+hxCLE298wv%txzOaA@ak z-AnqqWq!{=D2(=G!ZVJ2a(%LoJ9SlAbe6k;_oDOa>MD@~cqnl)C{>z|b5s1#zurW5 zbZXW+$aFicd&%S9gNF&<*8W@Ym%#uZ6H~DVl!6x{2QlvQD>?#z;)&c{HW)dCHG=3n z4ez%$&n9m5AaG$1CO*UgKk6lygj(IWrF4(^@>Ti;hrtP>rBNyw4MR-QB13`>10iD@ zW_+PEmmA;r!>9P6IozY>d8h*)I~5s0>}Y`b8>3dks1b5lib9F)i4?ZS;5BqPKElC2 zI874a|1;OVO#JFQ;=34&*Sd7+k_h5X?6F)Vt!nwy5-7D$k{*-4-&z`Nfqe&!K<3Ae zjul$08^m>76tnf2L{Fg<=yh=*Wbdnt@#rEk00$_5T`E@u+f8FfQ$ar~-_Y&H}NgvMKviw&tN;;fDPneX^lV;%@`sdt? z^e;$W2h&VQJ~s%gVVk_X5SWz z218ME!6EdU(xz;*ob8;FR&!{z8!%Uq*h|siq0Os(6?`Qjm!b3ar;k20BkGLlgI&^4Y6hDd{M>mdP ztmDW{t3OA2+@0YL@q1_mRSCa=IyyK2Sez|{t>tgj7<}(c{jKa zPKEPW-Jv;S9)V$maQ`IMePZWX;QC2G@i`Z(CuW8xcM>)jl=|zS_u(To9z1dD^Rsz)Ddfu_CwW$FX;#X8fKih zc}(#3`6zgUNjH4bGx1T_h*>el{S#uXeGAnen##dX?37Sn#N)dlIO2^c8(WL}80^RT zBDUtPdYv`I3P8CY)0%x}>(me?h%JG}0hzxwYAL+bcOr9f>&#fMfvt-AOC3s|j;d#b zB>2o?l!35uXUQESb6NG%SUo%vWKP0(1z=GgeDF? zz1iCbRi4{5eEo6H59pw=I}Ta&wPD6 zCSp~h>3#Nts63{vi$WhwCNx*|EN7`9sTRl3z}J4I{B<1Qt4ej1I?r}lB+-PHti_%X zO^S5NMNpoN%DN8v{lFR=S)ERle`Bb=Mrjd&qV$#rYi@L4Xvv5z9?_|q^44ImM7$N^ z+Yu5D#nZ$41Bf_;>$e8G?5${)S?I)jtR>N*8st={$rwV`C2F%DWd}wbc(UlZk|1@? zh^R{(8C>7>77k>y9pzx#_F>ib2L3XANus<5hqD(DcY{fqpvir1 z5KR^pAV&_usVb+$r-O(_oHm>~15;vD(6A!hbOSUJAuvk0J*DppgU;0G(nR^u8szF=xKROQMXnEMOyCP1Yd7ED;1HLY@XlQ4 zef<~69GEFsIk@o{xLC4s)?piNz6Tkr2p1O}d~oPJD8k*R{|eSI#Nw5uCiB34kW1w( ze9&s>yu$7OP9a}LtBi#)5k!f0a30zZim4LDDWo#!oq3SMJY}m0=b=RTSqIRb<1nC7 z2Upsd6KnalRkZ(mX$q^c%5b_B%)>}$E&HRbBaf!!32H||XyZ%7@)^)KlmvIIU5KLK z?g%;#(vXQLao8^8I@f|bewMrtB^h-p$l{ltD1=M%6|F%ya(Y<@9v5N-bnQzs|M0ax z+xnZ3VM&r)xT6;^r6RS8igIu6%1bl8BH0r3&=g5(b>a|E&aI+GnFQJHs8 ztC&Ie7Cb=eAF=MAGWjP=NFAc=ng?Qz+caSC>)or#x$FXSheWMGQF3e7}&gapEPp ziMhdD$3OyoMI@KnHE_J? zKHnZNYxeCa#P|@+A)$2qe z7m9^OjeWgdyZyWfr?_;y(N}cP2hSaMY!^Rr#*r-y2jqWf(^Y?kb!5WvQL5i(Hy!sI zrKPQ$NMvdmBffD6Pu8~Dfie19JP-%H8I)YYam;`u@eptT4;V24@?=_Hw!gQv-;A-C z9O`=FMv^*>s1%Prrc%6TqV)if_%#(|cSL1FAEMR@Fi!O?Xq;A%1r2jTYbVEcw+{{J z6n}Lv#5gfZ!z9@3sDFj2gvUi$7^3jL=72Cwu>22^R8j*DOFNbNBNp!=N{o(3fx7rK zN`OcMG9c<;D?r5W+UC~WepHmX6M-M3e56Q>oACcGlF)J)p)Qn!Qt2I26}c18mGnGN z6STwFur~1O$*q+7Nya_2Qf<4R1%$Gq&F8}04g$5p3^Z9%F0@51^VKw=$(LM${7wP7 zqv+ln6NeGszKuT_x41Fc&qM2$xj>_!ZEsBBJF!i=mVPPb0l8&OOo)>o;$_*_U%%R2 zL%7gZ1zvkdAL7;7FNtX%rnUuc+0-7?`xrqB?B&Q7F(Iv^Qb??#avPpXgzS2YEsPIA zR{d)h&wwV?8ka_ppX-Lc!lG(3e@khOo-ucFek)t2f0c? zpEAXpfkYT(Ak(Zi-=S=UH#Rc2@UkO*ZTd5!8NA;GnQ{28?I3x{nUL`F}Kn|hw@W8SWg!CW>=|oE!wpskH{q3zY`_by*l4}Cb z7^}s*CU{0AgD+k1b?<%}n3E>aB3If$z&r7T+~-~3+>Xtg2J8T*!i`J!bHIb#MdS7n zyaxR7EuI##1s+8dQGbGD>-R5iAax2s&g7SvZ- z-c7u)hBJoO+o`wi)~Q8%XUXV^Y4Cv$jwopsJy?c-RPU0 zyRhlO)xH8pJ075yH1UK}r;p0V>v&$rq1M1bK~x&ab7D2nIfn#FB7g(nwJRzJKjSG?GT_#M(L^~)&3doKuh<_puB-caLEzsrP-O|u*_4Iu%F z3s%>e#5;MJk3uEd$Pq+7MKFZRS=NtJKcGH>ZGF#r0JfV+JbzS}NIZYLP)^!v)@%D)m*#XxU<+m zKZz%TGXK~cp6FBH7+K*3w{k`xcHj}%2w2?@)0 z&h4Hbtu`?k)!ey#`~IGL?m6E%=c+e7T~hG*#*hE5b^4;B{3|_-e{y(u9^ddEk+_Ph z`bt-As4C?(U+d}(o%6bHG;%1@{d^GIue7RX$L*g zb$lBI`IldL@$$7B_cZF+NM>Ii_)%xWbKHwjw(yyzvN^dJI)h)WEJ4I~alv@Vb@^34K(kOsL)5+ZG`PEh7 z1Yrwn^u#rqu=848QN{`_F11j@LT&r?&T73>vhcao>-HVd3EI}JPPAcN=&f&FSQ~42 zIcWCWw9_@K6If`vY;__l9Q6A=5ry1~+iA5t0qR+6Yn`A!h-|mh4cFEzCvZ{36ibWJ zaW9H?c!Ar}EJhWqQ=K5HU9eU+Jgen7(Li_>`tpJh8&e-P2nJo7_AJcmUw4G#`<`z( z*)CfAe$=Gpc`nv}%Mq^K+;D=x^M|czw2hAIdckNgCKk9$u8?LqS{aYJZ1wuYE{>nh zm)~1aZ)?1p(FCZA7rmAPehX2{S^=Ak-nydKw}lgV)~S}?bBG4lMQ`0%@A#eQw$*Ea zmX_yxAVoy2*F9^;6Ftuz(uDeVT9GHLh>7R9v;W!#}Jqe>u({jB&7?Fm1=!9wa zTg{2xL(w``4l=a{zJL34vxg0CMxX(&Jsj@l!>4(E^y=o;w!PkQFv5DTN89TK&bsee zTt-Ad1*_AtR>k1ZEV*R3B{#S1u<4+o+|vn;d1c!ZqT_gvxt(Tq#8B4Rc8H5DP^{Hy zzXv+4tx*T3qT7A%*~eeHeC@TzYinyG+L5H480V7gm>O~KQWSS~Wu^rZAqu+EwqWsuCNnGEl{gs|Bj7 z$BM1TM*n!M?i!I1mt$j3Ls>4$$40EiTAYt_QNcChf}6Xm#l^VPKZ7^gNyW|2DY3Gt zLv0q(!sON}w=PB{wAEvCqV1Gx-p!-tl&e6I770$68B48RNz4}^`>sP))ig1WnI}al zy&#p_c*N&>e^Vv80fS#Kdg1q%0Tjr`m<|(AUaI4jLO0r%J`9ZSz8H@j*Na z+Mq=DgL8twW@6AWNer4am7W&{zemrtN@9eb-{Pr=N2%=+C67=ANgfj1NoBZTmh(wv zQtpcnQKJt~Vo~xDB#D^`4zYkoFjUdDo4yl7-PUN1V+?e>Ki38xcGWN{w+ z@XUIOy}~om`N-bxMD_k{@i;nr3g7T7lG)0vW~yZkA2s_cAy-ii)%@@B)To?F%Rk(C z>WV0kxdmuP4yGI7P`aB|nD*JbD^GiTbAaIF)hoB$X6W5U_@a zkoGsP&Z_g;JeIuk=uyi{>(@+TlK_&zwv&==cNtwHU$*U=1IJI_6l~k=HEmne&^1$A zd;+;-I^Dgd=lgg&n$&r!M>AtqoJA&l42h{2s`hxcDfDaN4_ot})hD>-1BjZql0FY0 z>Y`TEi$*b5%ohvp45MclEI`kOmvc*AezVZh7)Kj0G)#BetpHIM8Bv!&^J-EAS)>u+ zB-qzgLUiHjsCSxkmZU@qWS5S9yJVe0CzkA`W;Frc&UIv+4lU~M?&jPw{4T@k^$wb z1SMqm7L^Z`K__?s7OxCJm4vn#Y#z25iMy=(%VZq6WKRrH`>;08S!&1(<;ztJtCGeW z(*9zGJ2AE0Kjp9 z;z8r~z<0!HKl4+75Kn-Ejupo;2`5XopwV<_Z8Y7_7T4?&RE zLYz7OHsOyU9SQb@1p6#NRmTcp;riCL;}5XgA3JXWr#|)Z8W{_cg@)^X`QjyM)!h3Y z&d)ual3#)qd+&MVuBG6fEqmOmi%%_=)GHghhlRammdT`I|Q zNe<8Xw50Gr3Dj}85*1_3EzB$Q1r#vxE#5WILkVvTw}dzNPQ00ljC}xu;XD0+Ai{KD zL=|^xPyI5<(DF%Tw-Dp6P48v^7p4gw*i}~c6*vv<(K5v)B*>F65hS)DND0?)Qqz;E zElLoUZTxUP2doP%xw|G~o(=Uwd)WnMXZIDA7q=(v|ByCd!ur0PU=q_VLaxb~0Bm?HeH03W-a)pwYkiehT@mbf@O5z3; zq$rm6Q{70^TZdYRRjQTSu(3Bu?iT%w>2wAfRlDs)wj?7n#-E@G!!A=RVN>XD9y#Ma zHs^jGUk&*OOH6I&U3g*=%kW>;tl#sWB{UgYhra1q<59KK-dFp8AC3Bt9^2} zsVIS^@uKp^1#Bfm4Xg?Mo65JfJ#E(@D`Ri09`*DEeK)t8$9Agwxi|+4M00c46pgm+ zz}Qu}kNmI?lV@SZ8joyF_IL9H-sTDD5yXo%-nK%zZ6rZ5K|*U|%R%ZbL0g)XMrXWM z5DgTGU!d|_xZ%M1DnjQ3A)J(uQJ@$0lgG2qhhVd$5T8_jm(W(FfXW`5k_8L z5Ufnp7ZUYSqT1pMcsjJjjO>Wa;UbbnZB{k4s#aCY>Y`fJ%fOW--Bfpujdm}?l6mnd z>h5_;E>iLW5_oQ>#3$*A&a?P=N}i#F$?R-)y=bH7imO!q8A^E31Tn?4lpF;t zNl13^g&+Zqm_&bOQZ`Uq`~w5lZcj`4u$+Xn$LUBNhto zjv0ZY_S6^g#Xf3yrxQ=OxwkZ^yPR8iOK%ZxV1EynJ_7{`pr4hSN7f#eqIM44qJeV@ zF4QgAsm|MAg_qbJ0dHa02?k-Wf+UfvT0!0dsq`2G$BZtIupiQ9rzJ=8PlW>}=P2 zqJ73$56_%?`s`EnvmblksmyM~?d z>q#J*j{m9$&>f6WSt?{R;VBR{{5XVoiFXA4*^XZcRFs*@yDPJ9GFl>%Fal zgGdg6gCgY}$Q9xk?4iCg(U5FMI_}(%?7B9D*c;Vd^&MtA7hxT;3Qs~~yrVn{f%d5K zHSH$Jd#zK&VUY+yOSGV$ZhHl?eB~?c< z1=J%slIU0|rrkq+L_gu<@%#9Ow14x;PVJ}6Z$y$6mgZ7bv&=U|8+{}>M+oP(=u%z> z@e{q-3zHnPW->L#n@MpHgouK9J6=t1ltzb0?sjFkaV!jY&M7CzMezO~KXShRKr5n2 zl`BRbUV{5|t*%xz@g|Cz_)dIg(x-46nenLB2Evbjpf-S8T#9QUriFNx3HuvyEJU>s z#lm;m%k3%jbt^4tr_Z=m*y)IFLC_v^XYZJef;;EV!#+3NFx=45xd|${KrS4d(0xp{00wmHH;cP zDDRK(HH@m0O_GLwQoVv9*mE)pm1eOacpX^4d1BO_mNveCb7S)-`ve-a(}aiSR2GfK zrpVCcD>5v`EO2Y10|o$+fivgfeV;zJY;`>#H|gX-985MDwP61#hv0_M!55|XSO(@~ z+%J>IGM-AxZ`8x#^}L6mT=@SB%Ka>HpK%gTaMe$Y`?NX;x^j4a=mYxP25=&UK6~)! zxPUlhq08v3++})`MV>}ZFQN+fHVig*o&KcTXELHkTTg)w>lAze>G|Vlpeei~-&z{B z4(rx+-@}yvtGUqwB9|y?Vv(oe%voysq0P`{X)Q$@?DfWcDUJ-U6`+WW&G7NGPQ9>f zU8+rPEO$iXV)nysvhX>|vTm3s=@hfIE{7P!w}}|m14AuapI){;chrE+cH2HYXxm53 z8c}#S-@x;TK&LVM(Ih4)@ZfMq4J4df!&M@+1#CusTu7iV#NRpdH7G}IVf!^6wG zCW$)HV*L0Y+NcrvDQbo|MY@L%@FN0`O+-WjC={rRu^EuC=t)29#Gt(G!03<#V}gpM z>rdiK^yZ0v4{+WiPyLWTZ{W%R;;yvkV^!D}&B54AWK7I~MJ>%2hz{5TW&-&ICWl{I z?=%PaE{#Y*QJ3)m^W||DD9*=WRhs$HCEv+(oaS`lq1(shhsm%lOLZD(QutCO97q{$ z4oT8`4rK2*6}ctoFu7QH!)0=H4;lKW_~8cB^p-Px)>>6?m6 z_44qoz#Rs+EZ*B@JS8qi#r;w=wOGexBk#*8Z`+>+p#GA%k-krtlDl|Ls&vAvu*gXy^d@!z{{&Bbo0hVKZ zUWw=FoqH@c-Pv#DnQPY+PB>W(BcF8ABq=<9yk9UF>}TKa9sQiRcO&i z;5r2TQ1Vf7jALIOKcIQ=Q*C~wri;&lLBy++kR>TC7vi{`6R!m+j)b6J%QWMfwR3#g z!o`6@s?TC(-1)>If!QUnx$f~*8tc{uRNk+C- z)lSLK0C~;Wsg=w~B?4F>k%f2#uaYXOM;pSt=MM-PCB;`>TeVj|fBo`}r1Uz4jih^@ z{qF>7*z1-Q>24j|LAX2+Q9n7lcNR*~QYDzpp)NU6m{iBq3~M4b9FrMD*x+pNguOnX zyMoCQCX$xG<>K1p?eh>E#3dz!_-#O@tz?$6xSlwE@3N5M-tL8vk@uVyj&6Vk&vd1{i<$Q}{DO`X;$;oano{5^Vn2T4+yJRsuNP}nG>x-}_{ zv>yjc#C2+Ti;_)hox^$X-b{42FOuT0Zj$5dQSlU9)m6efXj-lsh@H@%@ng*}e^koN z!<`FvJt8cKwCD>QkVm{lrCSol{k%bu7UOQ>%g%Zzge{ud#U;6&e*FOdMqKAznx=6S%^$h%TSgD1UHn;sYN)j_wdL4mN%z& zi%~_6g%Ajo$mR^y+QV$(926mD!jv^v#4lsEtSCT9^W~SX_1bMu>{zcdj$|-8l;zS< zAsr?=23cu|T9=`#YGQ18w?pv;^+sJL)h=8SZG@g?eujBDc#2+LO=+qc$38$ru zvxAeDo+r&%z)3^=lJ6}UcvJq7@n0M! zhad(3j=D?mPUFjXOO)fF27+7%I$g!v%u#3z?rc6qqzQ2)PuVQs3kZRfIHH70c{zEb zX@5!vx!T~^2#{@anqkf37>Pn@2&RFvz^mXXen-(woY&X!G(+irH4T!{D95%H;l??F zb^8QN5%Fg*^K3Lv5Di|yeCkvE5|+p8^DYiCE|$O(ZjeF5dtN&(nTfZav2h;GGgyjJ zv*|F^Br`ArzeQaUTS$KS8s)xD$?qb;%{VU*EVh52%2=p>gI-i@d6S1?r0@s8eM|o( zkOsyjOlVRI?w%)Mpk~POTYQJ!9AXg)cKr^q$TBh%z%C)`HVZLAi~vzgjPgGv$YaFg z)J2N5BB%Wl;;1v3kV)fk6Y?d8-bm#!S9}#2&>38TJ;2^Ts?ZZlP{bZ^vJu+ipccs+ z`HUJIVx>?Q^9};3Xng_F33U+|Ls%8}+Ke{BVBb;Q+}G4sm9HZB1rB0=@PUzzQbeoE zsEfcEd52RuGPNSUh*`04m8f%v9(jYGryPM$=|hhx_ZLV2m`I88h9Rm6t5vMw8TT1H z(H%hl8{}uhJvtWE90k=jU}s5a#IQ;5<{a9BzeXp2PFt{m4C%xY^wk1~wU?0M)ZX7& z$cR0;|F!awG0tB>M`L&9NKFWm1sr1)G~UDGo44`y-|>|)LoqaSSxa@d)Zlb+F4g7C zB9;FrzhRcl6V>+#YFm4MV$eU+FuY0ffO;fKb-YP(z$th^vQnN2Q^v)DK!U+XyWQq? zy%_}cbP2yoY~AjwxIqW(AZtA literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/timm_backbone.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/timm_backbone.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16168aafc3fd0a1cb473db894ecbbab06c605710 GIT binary patch literal 3925 zcmb7HNsk-H6|P=LHhVaWWqBlLtprIhR5T<48-b%JLM+P@2=I)IjBLTQg`Q@2k!-0q zRaI+5qUVwkpyZNs04Klima97)VM|NZ+>B(m?swOtA|!;en~>Rw7w+&u<^{J z%Xn%+^(vfr(0~<9P0!Rza^{?qmAMAYH5YUDG+&!Ycl?<_@TVnmkosqO?U=NilYf8A zqg2uX^JT%A7o~%YW&@kb8JYjIcPeT~@i($ugDLn78G55U^(1+niC{I-A{&G%rkb9WSp zh1g-n1WhwZNB(2BBGP1guG_MtpHHm&*XYt>R!A@jNX`VzUz#7n`C#OyDJb!s148mB z)CaD7lCmc`3nUBaAmfQI(YA9c%dFdL+=M7n7l#Z;7o3BQfwho+8Zgy3_IVnmL*+(d zK53&_Rg>UeJ);F<^A&#!HU#jlJcuIri0jZW(lbKR zGee_iF*`I*jiv=ZyIE`2!?kB*U}(H(0y5Mpr0CV35u5ZHfD$d$x_f(n|K>FO{V^o_ zJ?`f@11$Ez zY}sHgCh|Gbk=zM5pat`^WsZ(km19XR$J)T@@G3nXMPTlPfjL(QAygwX0J?=HQ94Ij zx&s1ZL={ONd;o$*X#@OYK;JIjd>m{cXF0OA>sGI8QB8>da=qKFS3OISFSt+9EWR?-l+l_FWFyI zu|#zu529cmI#X{d8*yS!krx)=+Ck7jjGI1)mRN*n)cguMA%I>?P8?~JmNZXIu`VqH zzdsqJ$+t_JZ%g~E7Ft*Y$hw4*;nX~H$~umr{AfHT{2kdmbIV3(zfI1KzYq%^6Bl&c)4>;CZlkQT8Cj~?vU?_sZ)hgj6*Em;U z3dZr|*(Y#44q*?(@ig_PV1nh!BCM;ZKaS!Upc(O;;YA4HR8=Pwq4oa;=58cJ!9@4n z_uf-(3OM0ud|A1u@Q%mQ@yGCr4QT#MN)kb7hSobI&y53py|ztdW2C5h6*13^UPUp0 z1QdD;zX+$5+b^Ox^pM*hktEXJFedHg`>W%@%?+XNb*{%5Af&j_S)6Lul&cM>F{D<1T*#+De}b{1Wz?&?p;SsG4`jc~yTX zXIk9|Wn)j(07`+zHg0xi!gCCNWkGq=Yb`^Ka&-}*bx@6wFMKF&_|L1ek(e$A5&==o0=* zXnqT?_%$?6(={E#F>J$xGG?oWMNP|etU8QmKl{IC-Few+ZWuVfVYZCPTd(zBfZm-w zXby~|58xFYXrTOuI65~@Ovrvyn$j8o0`Lp)=>Z6EVwUD1Db2yO$G;Cp+BV;XKkXKd z-)O2Pi#%p+i~kBo#uKG$K=?A41|8OZRpd3`CIoMV5zL4dG%lzHidywowlTT5qQG=( zySBHdBZI#U`w!XibTvHk@dsVed} z7d38&vmw;nd|U-19GK-tr;t$RX%W?WA3hrA)h@3CZC$ad&FVPgQ&lzhCBYZQb*@*& XxPdSz;G3B^5H|3G3K)W9d)xR1*=}zl literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/tinyvit.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/tinyvit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65431a48ee3e844cae8e850a57fb24faaa33b373 GIT binary patch literal 20600 zcmd^nZEzgNncmFo?CkDfu~-5i_$g{kOBRu%K#`KH6pJTQ6h&Ju5|$;}N+2t%#m)d& za6iDF0VpDaL}%)bbX|Bk$GNZgt^)W+66VKUQb|>EKd$QHB&tgCGs%z4kHo2~N{YXd zisO?le=yJU&dkm%2uOC3Qc{%#rnkGNr>Cc<`+47f-nX|uI%;e9{O(WRTlmJJru`Wc z`A-pvvpD=2UDI66)jL{OpVPa>oS`#qbP96?`8K=7xnh!MbxVe(Yu)l(S#k=UO4pvV zyCZWW-RfLb^32X?w>DSnj?ImA>vMIAmh0(o>Q?g7iLjKhpap{V380+=ECTl(gebZhmTuEji>KM($y6 z-;KfzeeS4hyrxY*a+f_hZA9hQysN8TujNI>%d0CLZ+5y6mCvjG$}7!aDVmyJZFk&8 zvlX=0nnAnYYjm2|Jr&jC{9a#m;~ZRNpKJQwrM|n`@gh6^zS#DI$d2z2w40r2J+c>8 zd#J77lazJoOXpv@_}X2AjnSi#i{1-C;PrU^E?z40s;u^0vvp;@-}72*b^c=}3I8l2 zo;Ihu+MMC)bA^h5zc@52Mc2NmE$FW0mfo$*nO-F8d${r*C5;zi{( zyC>#5{rMB!X1jOda=Ul^tL@7tx_#H{_$LA+8*A-gX61TZ{R?W*KP}brX6;_1wbbnO zP}Dir?gi7Qoy$v}(_8J%dx}kK_f}Q|CoMBGxUjz(e8iO~vGb)@Jcau;{Pwz+zo(>e z#{l&pY3F2??+2>w4%Zmx++SyQ(QcKkG+npdTg8v$7s~(32UsPNgslmWt)jZSd&E$u?Ai+%j#pSw(RX17-=x(>& z+IGM8dfamu5)19c^u8F~K#pA=XclunKBa1y=!)3z-*MpcKR-9y5;+kkwGn)e;=uIP z{1pU&c0*g%!-}il)^#n^HVxMZ3d?4whelWk&9E3+VaYAH=I!zpBnQqFL&GiJ);H|H zS}rV?Hp`*CTnXc|S&r>mEH5>dLYr|{1B#Sq@90rE?SaVR&_Cr~m@Y>nxycz-^Aj!_ z$<3k2ibsA_N=9f@PKQ=x#bY5d=i5!6@Q8`sSn!&`s`7m47o6&ya)D(<;1RI8+>lq_ zXhb%qKqKzw$msPVD~7UYB==gQV$fHurO0IaqYB#Y2hCp4h^%;EsKe+|RBrlSXF=^n zLQOH)$6!B$0}Ku_ID{ao4Ua%I!81n?ppz<%Myu2G{YK-C_Q7)}RKFjb=yq3>7bpT> zJ{n`+d?E*86pTSoIjC$JhyNIYnr-Ta&SUt`{Gs`?>d3e;uGjVTJ$!JY{7l+6Esm)4 zVag8k<_p+BqP>$&W(uL)!YqHh@Y{%$wO~Nq= z>k^J&u$DH8VR6}5-m^I#;wlWw;&M?^W}L#nv}U13@$!xF1ZKw&Q4c2t9s3@$63`#ItB4+ zqj7b$*-6fn8V$GKYBXeos>j*3Qm@}wR83c%VQMrP|1Gc6L9y&PehFt4A4b&?@bK9+ zWeo%SP<~+7-nR~q>r$$P0z;ovH~Ri5Wao7w&vXs9a9dx{MJqDhBDA8SXh+(dCE8Gl zI2PGZ0$WQjL341MUxSV&Nk`HmOBede`D&Y*j&oTxdp^=#PbFH<3vP2I_}G-5^X-KN zPeCserOJ7$+ik7Qv>>A9Rl9Y?p8=w}wnipX5DF&GwYa!TKg45-brI>mh6qJE@favqAnv*P0W9|p z0`uHL99u9#t$|}1ivht+40xh3a)HzcO$dLjQ4FljQc$KU;FEJT| zFCegsCXlj>KkJ@lScwp|5(?#0^ubf$+f0(-{s}3%huP&bN5V6-zlqK#!2-&gU>NI zivVH+Lo5c}%gj8-U?*@rj}vn^q=f|6^4{7{>iZS*TR4V3e9QbOJ`=#PD($)CsYT#H za|W%H3@6EP2#(3pEyaOP7!gxNY6ftJ<=_lfUkLNfeo#D2o;7{Ghl6w32zDM6wel)< z9yPPo@UCmxj~(#TOB_rW5DbAG=XMzfXOG`QkOMmc7uGI>CPG*MwGt|)?i#lVN*2-u zq_L85p*BJ(w2)g$a??@(JJ$k80`3qaa3UsfSxG^Sde0p_2C$bAk5xo-8={H!CIC2X zXV|7*;RP{3p$i4CsKNEtPR3uS;;*Np8edNKuBVH!emBeOvvtLnnA`!ieh>LR*CKY& zGHj#1P2DelP%jS26!1VRz)xIZ@U-{nnM{570Z8`t$9M=e*oo^Az>XS3SS& zLJ4=y^@9N204^MuGiav z6m-D^vkI_qdFF`OtYN?!fvs#^`2@Aa9C&i_rk@fb1MvJh5<7q=pm7qk?}inS0)MV3eU;-Yc8NJwvzeS%|Ms=4SKG_)Iy-L-6^lh=inIv{;)Ilm%WUcxwz| zSsv8=eq2ASn7=Dh`jiX#^}bj_OjtvTmBO$k zB({Of6=ltDLF$Y|6{72FL37b-E}`FU1$jX+MxkIheLPATeii%A;>XVz+;-=QnA@a*mNjcS7>`mxOFj zIDlM{J~Rj+=QAkh+I(P~$#{5XgFXQyK)$Z@fkn=-`F_8HI$vM%U@lM&=o6HHP)SSB z{DMrsx#l@tuiIDGv)ep?prdlx$Rk(&)%-%zZs=K`$r_h(`LQ(vUD$zP^9`;t-#qU0 zR}ypPY3LwgP}yeSV7aWWp?hTgsoI#dvlkNVub*v^@JK(5Q_K+ueZn%u8f+SNV$&!= z*kE07Q@g3(G!_bE_ui$rg1z@VM4bpVSUFs6RaY|*ZN@DlEJJviBF3;vShki~N15Ww zrRYMLG(BCMRpYql5Jk>4c?HJ=4xiAsh^yY5*w6#*7HYqx1^O*LFm4!X&n;l_0j-C$ ztKBxgXns~GpenB3X2AZBrSUb!_~6PxFvce)OI{nXjJjc!>>rg!GHG1?NsY@UkSg~-d5iBh z{cyxlvv?85_s=50SkcaFZ`LV;mvuE37@IJLK(b=pautS+5Ti|E+A@qG7;i@)t5X@A z%2{;eUqfB;Fp-@BgyJLK$xGUz^w~~g_8C0k_iHF`gUrXe1dBij*u=IYl4>RquOP|d z$u+C4qWmoMo*h^t={8yI?9Lah;PM~g5b-W#{K2eGYkyvUT(Lw8$Iyoz$d7*-L0&Lp zq8cFMX?mnqX)Hhx58M=riEz#cc{?gxy7)@-CA=VfrzG|cJk!JBVYVrKf~rGscbP`v zfnUL{YlvMCkJ2I458I?S$?-81P(c%JcBc!e{kMOpqr*efYgvlF{^0k207*Ul5XJGsyws#v-dAmx$A3D@|JX&$T_Uk)E&|dqEBq4kE+824 zeHul^fc|<9iS6{4X!WoW5T&92>sL&F>WeU)-q63UgVBaT6y8wO{^1V=V(YP>6qau2 zy+TmltO%tA%nu7waC*67_)p_XU^%Wt-a%~xBv@!J7#kzO2y$#%Ai@zEQ&D3z7)4%D zh_`A0i4j)A3RWZ~S%HiNb>!$_8CR6uvREc6Lb~k{$zaEZ*7c+LdC^_<0}9kdxJ!cb z#)2EN_9^M3^`jihX#uyrhULUOR>1SusjtLyR7g2Ez}lF8y_hA974c+Y(qj*2a-z!Q z5`yXRoOqIz04*L-Wl(W6l51sD8O*6nPwFpP&Mv3Eo*?>Lg36A|FHW0PF z-+@I3W0%*4R&2$gix*2yY5RFEd~cNb9bcczg%tH;2aU#2d9OwvQINGQ#PvM2y4ac8O4l&DdJbj>hh}UNlR5EQOumI6(l0~>I8rLA zEKv!Ap8jT7cJ@JtFRu1i{dg+=4lnp^1k+;~)vH|QdJGm2MEb=@e{rseN*ZrRMbz4u zkH3`oJx0dL7jV2jXY}yhdwnO}>>qO;k!H4`RZe2bI!U@5W;AG6_e{(m(@0~;HTJ{A z78}|yXYSeh6heF##yQ9CP4b$;5xr*AjQuD#rB8}MZfHh*q{8<%(V7mMMcP^vzWwPG zE$g-S>rW|`xKHIHDb0_64#9mYAC*2jm6=fcuuTFhTyvo!iXS#gh~g((BytXCqq2vf z-HO@g_Y0Z^-~V>Y*_C&GIP2UD2LG=y!vo=wr!JA1xa@ztn&Uu14XaxJpJ|90Dfj<2 zQ9h*MUI0V+unY`C6@Lib@kx;3MSgrle>9EA}|O{5t7zlcCM z^8Lb*7f`=-t9+|?OTT3bTMh;VZ21(J`Is$-B)(l4u;rDQbe=^mJL-ME%M)K>Kn)Pa zN&>c0415Lw0ua?wVxTC-b;_lpL5e~tw-5=M5hB-dbp@+%-$4W%de!(rDj(ACgiZf* zu<1_-YR438>R-PTovzkVoBFE=Qi&_-v^3xyp4v^p6`D9^!lm&-hs&d}t7F~u?=v>T zz(Mdng`3dfW0ro{u&Va|F4xnhLqkpJ@QEFoK1A_wq@{eg`W06HWd>ho@GgTL$e72_ zbL!V{a(z5k{YMJrzlSUSGuwR-5fREqvKGgDOznTwpHwV&pJ*OOCO`gn5#&YlB366f z(GeCTG`|Dl`S%M*Taqqq%PmW~lFxPR-!IIW+~0_+Msa!%NugKkr zw{?6U;J15lVUGxZ?nbQ4S?(k55wJcb_fgluciH`vdlcUl_i-upY4GCr*SgJ4X9ur#|4h1Ruc_W{ug&z;;)&+Gf8x}sr%uj1_4KpPJiB)C`L`Oe zMf~NSztWB^sSmd4Z>Jr8O+M-|Y!Lz}Oq@>4xXf+`UW+?{_~=7@L~!S4V99;r+3ph= ziIMsTJI4ZU0!76z=zfHYc)L4f+#ag^*r`*`9Cr{*OTY45Zc*$Uqe(5PKEFvYE|WED zsLnT@JMQ5BBr1OH%|W%f9aTH)PP=V)9I89V=ZMt}3?7?Eclp#)J~sNJU%e)}-ygQ@ z4^{TYDLndd=d%dk%$_BuWo@te;c8mj+_fH-Jz_w}ZL6O~pkI)c%k6p`HkUq_3&GG! z*rpkE=Y{+GMZ5&!OR^U@RQ)qEPYs6PHnt@{4D*9K;}&q=8KyUs%UZO=`%f3SD?835Fi*#j&#R~qvkz}agow6Sr# z(PLY8a5usJMyKhZICw!87bhIKWYA{bgEpkx#?{Me^^?}2*@B_%G%&++{|w<=9IRNN z*-0Hq;;ptr&oJnxoTh$pHr-6ar~Cx;^l9_%S?Cvc-(j$cpiQ$8Tsx|Wt1mE@%F;@Ie;wf}T`)nvNw))1f>Z!!L-2-@$k z>{+N>sNrQ~s6XT>xhs_-{7`}MYHJ$$J@X53W2$+!gnWDt|8 z2FsGgzw>+i{PE{5s68z5R~X>N+U3lo8W|U7XIrTvhI1Mf&^iuTuKZIpF_wMTu2NY&L9?o(<4;+#-M&X?tzCEDl69 z+^HIj;ZBXPx~OkdC0)N^_&waA9PA0J(#A^M#$cRjxQZMCO*_6Z5l*=F6;0iCN5TnE zpw)2vjD*;LU|%Wvw^2tmoJd=ERgn&DOoX_9V=@GZ+1N{}v@sRz1C@w2xwh=<8VM}^ zv<4K+F2R0_DlD?}22R9G7c327yBOmSAD$pTnV{h=t4L;wIfDZmOY3DaL z##tsDN54)XR+iYlaFTa&$HVan4TgzHcOpx{*s(v{8`llubewy1v1?53#k7kjjq@08 z90+*_sL1=ZE81`B$_ShuXxEK{yj$?8xO`gT2Cq28yembNoe7SH`@;PpTUKGq@NLUC zx33=v55l3#4iAN6?&R&!jl;ds;L}O@vE=L_5Yc;YS2ozXBb$$fhh<+dTQsr8*1R4( z9@fJ{Hw-@E#v>qSKjTh?<9s9GBU{+qe)T;u#_WsD^y`m*rPo|*wwpApQ1?9UKy$=z zb->}TkIe<~zRY9mbq7lI3O0EInTb_1>W?r5;TW-X=Q@e`Xn|J0DWEYPf0~*HnRZ{vQoXs2bpES#piwgarB;e4TrF*vi7%x zN!PZtcwH}xMeN}>p|*G7?7hmWc*XWrB-h`61ibJGd02lN*r0cB{jpTT|IkIYK(V0Y zXpTd8hj;mf*Vof|8bYd&Rdn0<6S*Ze|SB zS9#Xt`^?739IR=nDcp}aevTT|U|E~PB33_Q^BMX`Q=jLliidM_X7!?x+<=Hihm@9R z@BO+39ZYJ&3YcsNj7DI0Mw`VvuD-}SJ=HSBW6}6beiq5u*caf#{d`Ilsk=WuTFrNgVg(iE6{jxKJT; z6Fca*O8Aj~&5FLu;9oNMR}A>#qH?N-#BMNa%qwENdvDA0MFn?7sdt!LN+#pnSZh_O z+_tTpYa(eF{OAOq9B+mij`ZSAw#2I(%zTfvaoecE3dlXD=DjL54b))`v5fN1Y?F?C z7Ccv~aP8xk0)uIksu$>wb5J+lK^kXZMT9M+3`+_;b$(JVTCj~+KPy}DW+m?YH}4&X z9pxbEN*NUT^rZ3hyZn7)ERAR1*85evTBzycxS#o-*6Z6R-4H#*@kce_MI1ioKM^yg zh)4`!j!)xD$qN}~ilTAMfVjg3djEf6FR@DO8#tKW_x@r-EugD#1&6eNs?Va9-!2hQ zcYqE6xqJa~8D>*RQ*qf8!)5{c!>e!{hUqjch%7d+zgk-^s*}(lEdD+PjI73Hi9ebG zX{tjiR1s$la1~fC!w_5EsK5wX;t!@Q!l_`{{tlc0zNx>e{f0(EEUqnWj?k0|3Km=D z;yQwAh*pX5FZOmKV#SConio+FHoo_m`!5*$8wTG+5S4)AFrX_L*O4i|HSzCw_OlG$ zVjvjZL=3-fLVhE4wk6g^Cg8zW9Q*3uGx!h8EC3gx0?vrpPyJn<*kmyL*2LNB&-j|C z+hM~8jCEy%G>tPX8`Z*on(W>w8|!1~D@>@rA%Vfc!b9F0{X_#wc=RjY)Lt4>Vd$mC z2eb5ML3X|yo8}f>af=(+rxX?%#jBH7i-{xM1^B!Q_Q-^1Uqz+|krvsq^77y>;YXti z60uEFCV!CvLTwu5vW}o#6RWgsPaM@63(E7>y##lE$cN|4;%wu*6x-!b*Qe5l9WFjJ zRyni_$sv0=1GtTc+6}rC%h-YrBF{RQMQG%sY~(*cfOpAXiBaD}JegQ{``GEc9dQ0D z3fyBe*}sIB_0hC(Qp2=<89s+Qxdi_SE)c1t9LD}5gUbxQ$AIfS ztP_MP^QRW}M-6@dz?dMkWssAP#Xwd2ALU-zEbpBv$)ityI>D-XwBT z{9$5S25IhSEUwp6$xp6GZ{$X(%+48>ET6$pzz<42uOsCjLm;yeFg_^;_bS8=cd6CC VI6gD}Z2e6A^Y!C(WxYD3{~uaiDu4h0 literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/tnt.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/tnt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e035ca2297849e0eb560314f7992fdaef33dd3c7 GIT binary patch literal 11066 zcmeHN+ix7#d7siWXYD+l}Jf8(nM8d>*~mnX{Dy^cHwk9+?geZnw?#p z8A>L*MBUIujG{~%G$;Z%DabYj>WB8duR+m={t5Hcq7MNd3KT_w1U78;_kDBOU5ZXq zJAEk7S{0AhCXrbwyK&)!Vn{;>lN6 z{K#8!ZRc7Pxk2Rh0yLGc+oAhv&sp)^xSago^}+~%YLQq8+6>6{EmY{QzIOB0-Pd;% z%!Fbo>RFefmA>!p$Vp9=U|qM{_ZNDB+a`oYAKnsFgs&iRB_ec7apacjD2~cM%_+E= zqdUb-8UF_T6Tv#=??@e)iB)P95+b$q??{@|Dq`sqabsTCLD<2HU2)y-weQ}ZfKtMf~a}foL_Ry zj%!CN!ZlIv1|cYz_YYRO7ST5Ee$N)R@4LQfXVf;iJs;8O1eUO|4cP?O%%m9g%(m^f zSA08i%?PtDd5+@-2{OPN15Mu9nH&~jeADgN*x}3O3wgC8dVQM1HP3eZo=vN`CwdF^ zg6DhDs@dz9c3`?Gl`!psYp%JX=Q=qA0&I68S1=+S*L6(L`H?N06oqVB^TH4ttQgon zJ~vU2s?FX?L~8{-dt!t>pTDS4=nS}TM@xHF7Dm1Hk{w1~+jQK1w8S&a?1vxiRi~Zr z6rqKx1uPO2>8$wv>iKprz!8qHTt+HnOmU0CA;3#HksjQCXf1d)050@;K8SoPuoryS z-!g1Gr;f_|1koHK)S!^_q*L9Mf>N!#*U*c`6!^T&E2 z=+x~Jxxk3%ir;v1CchX=s&S%wfj2_+b7rqk#I|V*@1V&WQYocB(J;+$IIFQ z%aMMXF+5R1?slxv)_f$#TD01CW99DL{9L=1^}#>osi%rJ`~XQLMe;z}P?nWJ)sZ)4 zS&Gy_c_42nj&f8QNR}2A2J%1|6b4$PJL*7pv`uADaSEF{T8n#n8&L_p<$*da4T@tU zN{-GW$^#?3?f~*~ak*$!9O-okJ#f|H9MCp00*MN6i;bJOy!UYPnu?gDic)eH!BU!f zT+TNuj7vE?j7vE;h)aFZTV}=(*JvczKq8{z%E)nQ>f#6ipC)dTG8+pw8yC_K<6?Sw zV}tl7K@m?fGTJ= zVimc#>ID`fO;@>Di?z`8JB*$<%J2vY=GrMOh^xi*e96qB;(9`+jAe-vG}OqrhB(PZ zo*ma#JcXh-hy)u@vaGgmhoNPCBz^q+tmyTk*>1NlTnIET7|Zq?kY{!wmA>I@6ht%q zRdEV{&f*QvA(^Z-6h+Q{W#o*H8wQniu1#t*qb_T^Ye)A=)tQWzZK7rNP@ZUc120d4 zqlDwMg=4pk7l&9`$C1W?mbTQfI?`JdQdtZS;w^$iYHH$HfDxxDA*{t2Bsgu);V+io z70**0CofPe$!kX*Lp|;IkwGUSu)D5h#bwLtvf_h$)v_L}*nZkl zv@ECBwk+`s%||2@zm6oXrQ6YVeIKnuY%Wq4LWxgZ1DTLcr6Fmuax9}asEr?$>i^j| z%{|-bH1?2YN>6wN$*7X#Y8z7WlDY)NNb6{<78M-*eNu~{Wxg+ss70mN$P^s=Pt{R% z2pr3FgEkI~NLf57Ln9i}cyV9#h;Cuudl%}>h<21zq@ol+B3mrFkqKGdUK&xs9P^PE zEse4^OnfgyoPvQ#^wu>`IEtZ|GFU>gl%4{0LwgkM22DnCm~ zNiAumLP;+bohnOC!zn>>YEIdyKynt?p`e3MwYYqbMB6QL0obRZ;N0oji!jA|CfA$X z;CR8}zM|3VE?PuZ7D?v7WEV;5Kj3<1uzi@o9)ZpIWkT3|yQuHd!zOlnT;VRG;yD}=07Ds=NObdmEXBh}K=m8NA+1TLhSl}( zQtoMZ%QaVE*0G?pq4mSAE7#K)lB_f5d4E{5SfcGH~Zc2bz8kA69jEv-lg*Dl->ir60oR(UU`vFcsGG%U68(Bw^e4 zul_B;r^${t zSfGxauO(9M4XQXr32huKmGpAQ6_PuAA>B~COl|eReU#KNY;m0`b?6~DtFgv%P=AAu zW~?ViG!=Qmp`PX8{f{9`^SjN!87CG|@nkxw1yu*74=`Z~t8nn9sf z@cq)<@NBP5-eCrsaEyd0$MSP)hnuF;bHm_FWa9L~tbqkdM$nqloME4jcooEGmf(aS zpBHbfAT3TTBZ1GHY?*#9bj52_$s0$fhj*`8WPvM4cEMRfGRQ(-K{8OxQ>Fa%Sbl3i zY5VtR4KE?7%g5z9wprUXWPMlLEvx0kxZkB(n4h01yN1@F?M4q}S5wv|M>ac^$q@Z- z(xh)uLKjE;4kgUafaTYyaG8?dpoDDIp_MFtlZq_L_sDa)On-zooJPX7aXmSAl-56P zoD|ffjd$#!-t;iL9F^%FEaEL_vW~i_5PKsghOj~DhlOqBuMt8(8)Q4$st6kBPJP(c z($RwG3SudSGwDpx42r_OBE zbWTExC^}C&r=Ud`tVNV?noh^+{M@|#Il4Z`7r6}r$muF{f@vir9rY9C>?S*jm(!~qU&s?~0@#3Yc?^sE2;|@-p$Brt* z4@9ra!?V$gFvnLGX24%(#UpQVDeBt6Y=1Q(jL!27IM2r%?j^ssFx!Qb-4(Nu*X_=x z^@+1-zc`9bt2zsg4o!&cL#Q=xdK5VsB?Fwq?IkaQ;g8_Tm_*Nm8fO;HGdgh}?0Q z0l4-nkN#v1#@s*u1;P^Sx#UpI|3{EUBV7N-qBR_+vA&eLou6D%lat8Mmsht*iTYJW z;=Z{09?KzQHsWHw_a5}xog)x9J5@?Zt7Xa8EQqgDp0&L)p`OGB#Vb^M2T3e1z*Bt{{p7#TCQ|gZ3Op=maTOKmEPwzkB&#*1rE@ zQ%zzn1)elk!^nvZXqRDR2T@$QOOb3Ew}?S_{{2Nb(_P`9j~FBMp)nF^8;Up&t*fxjnp8ykTyaBwD22~Ln}dD^eND}3Ka_r?C7K&F zGSA>>jdSlQ4oJlTCtsfX1 zaH<$}z^SnrH5pA0Dg$aQPa_~lwdj2?I<$2-t3_L7n+&8Q+!v+1-%<2C7Co``Bs34g zu0oC)^n7YqKWI2LxNe4R$4A---w}MaH6DE;I<#!U+^cUAtdq(2A(-W9r%DFha&22> z^KL3SwL#dPj-H{~4f&?@t<&q%2nH4q1~dj!gK4a$2EDvOUz2Pa>j%;A^wzV{nPhE* z6<(s{#NZ%)6=!PG*p_AK!M{DW*0WI){hng&NkPDpO2@}aC$bV7l7|vQa;Oa(w&p%eHpv#tLHoUG8iK2o{E>%{m6h7tH(a zYG@|b6Axq%bW8cZ;xd!jCGJx~zPJ%PE4Ogr_>}vPnYrX+dL1vTv25n*Y+p#*(x`z8 zrzupLF09J82|OwFBU&+=XYZiyCCnBY$mo)y=)3yvnhCw}7uvJ)O+)+|fjdXZc}iv| zIYG%RCG32lKq7kHrV6$(m<%}u#5btnZr=Sp0jjN zE1S~H4cS$hG+pARxJ03lt_y`UF0jHH7w8ifgrM7gXhpsIZU9q?KUN@9>H&c+-b{ke zX<(MWZy`e~HV}mh`*zzE#Pnl5;qP$)`un0A>vq5I1`azOV~x-gMRaQ(O5CIxt$pvI z;%(wB*|S$CF_O2bRUH-1$sQi0+3Ri?QdkrpU(rz>p%3k7tU7%W%U)c{!#VqTuRlfC z@Gp=|PQv>tHz+>3TZbR^h(_{MkE(s}f?&o@m{sD)7lt=03fK{n0b--Gy zj-L6k_%iK81uqH1@Gp^&r-7_|IL6qu1a6~jE9>wBAigZYIZm!`b&Db-kWz_@e49K7 z8oRr|S+^>1nB%t%$b>ZYY)DL zJ`-slEpyhBqrO9)ghJ{`AYTBx%D_led^6T;h#pn(d8eZ&{)Cd7lzf+x*C^qO@IB;W zwQWb@&nTz6@Ajc?iSJW~Kc|G;I^shlshd7wFzhN|@ZY3%lKp#`4ms!FA*N#@1Lr(E zKpKt(TmcAXOk-QiN?9&rYs(5Yra~K&c?HV*TK>-f4nD_#Rmtb{k5T+(yaB{q{t0hL zIKvyDD93TU_-LdDz01oRrV%|Iug*g5NwUTKE%8O@sNv zOb{?XX&K#a`{7I*Mdmk8WW|{U;kECFGwiCt%JESle-V;=F;eg1%fCg0vS!%y6)d9Y zjJA%Ax6C|p$&}{LPnb`{Q^PMeW`w&4@1$52#QEa_+<1PNxEO1sKU&#$9HL6-la7`I z+=yMq6Uk^-q~9;_Qb)F#-7YUsPvZ)(#HXECvENd790>@;x4nUzjXR&@)yDP4SM`?; G$^Qi@;bV0G literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/twins.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/twins.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e71546e2e4e2d2ba2edf244e81710d8e8272e261 GIT binary patch literal 22166 zcmeHvYj9lWdEPl^&)zRsf*?qclypSNB9|dSk&-N#Bh!*eik3}5ib&fvXm+vKa~4=& zFW@-`Ac-udN+1vE&d^O}n&x6V0pz$1JDprQO`HB`lj*cc+Dxa@Kkdx<({$RI4th(b z_V|*+)_iW79ERg_+Hmq3K#PKAn&dqmgW;rc=%IbXwkHjZ8B; zofW^?$TjoR`DS6dAn)txo`64XXBzvP2c{1+4^AJ{wF{b^wR3N4cFsAt9-Dpy?|Hj`_kvTz`=fXtv5R;w z%KKw@-(`>DeN^5pXJX^=O_s9T9z*CDL)Z1`L%r}l_Bg`FokJ4-gsorFN_)3BYEva6 zNStlAR!-SLadx@hu*iqfUtYaf6 z^YM!3yw&x>`>FU5jN$XR+I7F)ZdDp@v_0{iZ&llNy)_q%zP8-(>+?>E0D}Ri|r(!Kc9}nZLUW=?F7cRYAvYIkhlbMW# z&&%$dcUpXWb2z4DyWaGyBlVVFI&EE^cT_05X(7gGdFb3^FH38=S!Ta`Io_zal}5v9 zSe5QbSQ6jQQg1CS`~6yuS?wh?tYLi;->>VKRcYB)y=G0dTh6gw#cc1g=U6i{{Z`M+ zOj_rhS_PBgS$qxE9uCROO)XK8litkwMn&qRr;cByF5n7d+@3;&f zsJP#^YNb(KZdBL-7I1CZac@e47oD4fjd{A?k|npj@By_1`vRkBu|%iJR@}GJuwQC( zSx4-8wL8Jx9%H}M>T}ktQ)|19Rk3BxIV9eE+x4r>%lqld#$ zJ$y{YvW&LPe)Q?d5@x42h}RZZ%Cq$fR?Td?-M~z|TC*;@%MK>nwylcQa4K%AGTU%0 zNrkz*cFbBlX07y#7;XZy_haQ8sn+It9mV!#=h#dO{j=0*P+wJ3CzIXJ9Vz>?WC?qA zaq4iXpX%CT*~9!h;d&Za$gi(Bu3NXUoYZ=>t}R+Bal6(V1y(aN!|lQCU>!Yb9e;ZA zxtSTRcF*w#r;^LRU$-ow$Ot}s0PkHX1MP^;=HO7Sl)Llao2{2+l6t{4x81Z>Dvf1_ zyG~U!X}#(n_OL~laU9$ns0!OwHuTI)|0rOmJq&fN=D0XOEh%znZx0j_ZSO0Uh8Gzd z&n;s^*W3Ml&SOjkVUK!Ch^#a$*Rhvn%UJbRZ(X470t zAyh4zOC7x%vl+wUM2u`|)VQ74jP-L!ie#8V+eme52akj$ zjQX{O^ky9CW8k7tqn+H0ucbR_+rWZMb6Fb3T4pVabQ$+L+-ys?Q)@Z+a-CEs)5&&n zcKVJf_2-d4wrTWwSJ;e6{Ug2li)imIzDbLa$-D9bOgF@Uu$J#y#K6_p6aY;cMSz6vw+jCs5 z?7$@X3R2wpY+H~PawgtPpWFmV5B!v~>XhhWpic$**+Bn9pnq06 zq?)QP2KtKoBAfnn&pFBbNu0#kuJ-GA5jH%hr}Qy>G=?p;ox?X1dsNSGo9SbQiT47& zqejsfWtdUa6GlNd_0`Akzum$b?xuK5wJJ>q$5f_VRzwBh$Mwz1Y(6qLXLZpf?MSR7T~KC zP9QCS5yuf{Ih2&JloeBbz3$Ij_Z>qKQ{IPsQgbT)G8onmMmniEjvY>7k19o_q4~OP zJFR|Y!c0OejW7&c$K*h|j3L;LznG{xXG=E~E2Tn+bNG!&xOn?s8yq7+oHD?kJp)!u{FDrmAFTx*z(9(UBZ_ zzYZtD7w5p=f*d<$w)5p z$vPOC=ytK{DK`L8?|$(1}}S!O3ug1UX3SWpL;~y!@LoSEUj65mxbOI%H=Re;wzNKPhy{ zy~H9+9#w$^pK^(s1~Js(l67${zNTIci|zIkJrDVSNM|cdD*TNcNI_xn1Pls;2bMd_ z@`Zc1>FbZi=lCkbRd@!Wux$PPWsP?;dS)BE z`n#+9hREa2966V33g5H%e&aM8EG%sUiv#?&27VO$X2;kxHbWjWj05+(VJdF48*@p! zt54mHyNf8pougBuQ>QZz=Wa}RDfb)$&eIuWl7#>GO@x0V0q3B;pA665;XZ`a$ekK2 zKn768j`QEb*V_+=+=+3pJ5DJuUGx{eSv(@^s>LoPoTJ`1`yMJ7A#%`9*2-=pFF+LqfLYKZ2)q3I0p0}5bd-z z1fvYWTUJLQuu-`7J)>M*>7k2p4I^Bq?}yb!`HX?i@-z&&86Z96sd|HV-Ko#b?+jK7 zztkJcp49Q)YeAL3TYH=h%j*>fDc){FJOOkO!snlr5Q`yfB9>Dm=l>@hX^Y3oD^rS+ z5`d|81p#g`l1d)GLN0$9o(Q>2DRN1|IIh_SaDWNm0DybGtbs%tU)H{A3MN2;76b9r z$~cQU#(oX*x4Co#(sz6}$y?k{EI@(>$xQ%Hh}lU2pHhBeBi)JPTm!%azogh?DIk`J zNk~i%#8oTj1yV{)xql5U6v1~D5>wM!wTMkGH*E6Ic6|k;2;Q6IC&N9)6A+(2`8}7o ziaQRcR8S%A`U(AsrLY6$<1&&d_~uf@HYZ6m9(&!-c?RBQsvgsB^VfmXH}j zR0WYW#xH$!6uL-fFQ4phPw(CBVxtMVKz5(ha20vC?9lg0y=?-1uf< zBSSG*T3Flwk`#^KjCW(hm=UaLjs?DB8t@{tYa{O+SuBj&hE|_TAd}fRb*>-jcIG+opH2ES_<3>ZwsS<0CIP9__dAQ zopB&sOtYt`^?NL1rN)VU##k}UP`r#pM^a&IWYu&s=)3u|y7zo1t9L`2`L0dffw%!G3h72*BD5bsb^9TyemAZ3Xc4wf!62z7&b`ba5ZgoD0 zQ&jid4pYTzb-*(&*+cgX1JcW_5V8ug&5E}OZ7P!>Ar+2tm?$RwBg_H=0-P;gyXH45 zH{@&yGLS9(`rLAR*>ij63>sBp`aa9{3STeK`2?L;>3kAS5JyYPsE-d{JzVve4UA7L z1;%IbEzje7JuunA5W}NaHSY2T0?;t>A?;T}&=LTE)j6&u&b0g{c%^N@ZB zKJa+k1l9@yrDc!~T=Toe_E^4i2F=I}XqddVc}mQhai6 z*D?iWt+nO z{UE|em$7=!vd51A`WInLyWsT?gDet(^QPR-;4JLJd2%*a3p^#m5B+3MMKt*eJQ2|( z-$U}CktPdy50aPaK)8U4GPh0vA_4S{DWTr$rA+!zCWh!8j#^-T31EI1Xg*Rs^ezqb z?gG%eUi^L0yG$sY2n#Ze)+)y1^YmSX^NKq|FUe3BBl~#<2-#s56;4C)p@=TQ8bQVc zFjD{yfe|H~EYL@c2hxl%2S*to03b0jw?T&@hf5SDf_vTMt4&A1ubu$1lh|=m2gxG& z8yy)lK#K7PRrL49;(e;8fUsf6YltZAEPk-lluggFF>N|abgt2H>GVmkKuV;|pFxXU z51!S%!*x6qlHzc~7E<#L!XwoCk$yjVpwu^tA99(pitoTD7ruG0zZ{_9b34KH^OfYL@pY)$kQWA+k-Yi$mA7DzMDiBjYu@R^JPbL% zrcWpBQJQYrd-w+)WZd2h-eb((7b^ZH?EQe(Qo@HEun)eSnoiq~*pC8g18-t#(^>G} zj|K5J&c5-P%a#9w4NHYi*S3ME$7LKw-f@W!*^0~aP;~c_gi7yvS7WG@EXX(`IZs<} zf{~}eB`grB9Y5!I^|_XyE=Y7KwC3pnd$i{H4AR3O?=rQ{SQkyl^(6g^^S;02oj!4* z;@+sQOt#&*6O~!-#L4GQo|=66nP*;j?#xViN7knT)?aCBj#)5Dp+=AWwHRLap{jxc z61HoM3k{)YQ^ZWGZJ<-owe?UUDv4~zuhe0@X(?6Gve+h3sR&yEQI0-u!G6$FrjP7% z!)eVytA_pp&%DPRQB1Eqc3ev6Ln6!yJ5cAevi4-v{2W(GI;h}iDtiT{o>xOH7l4z{ z;$T2yD`h(sbO#2ta}iT3Ra)O%X0XKbv?m?39AzW#Ul8W9Ixhg)4sDZ2!O|YmoId(9 z&_9K*f`>Tq)-5HLV!r6`Lc%HiH( z&F^0UFMWUxzBgIlAOGJirr(PXObIPT4;TQ`Og3umD^-a=RhFq2qu*ne96q%F<>^C{ zQs)Njgv2s8w7b6FNfBKvL-X6!pLS2{A>;D1$_yAp0So*6m}JWMpT8kS_Ysr%GiS~y zTh+d)-z1EEVBcqfpuucf5sKdIF?|>zc+x<`QgvxXSsr}U`l-VtABRs{hdkU`XdLQW zzO#sd&*970M-KzuLiWCw#zz#G2k*AMYs2-I+Gg$F^9I=NC16Lp| zmw4`nN&6Qux>Kyiipd@_NFT;TJojOKBcn7lkS9;QfbPim{)wJB#crdtlh58i(eRW6 zx(Ce(mubn|dI@~;)?0AuFM(0s(x6u;nM(WeETgDamRE%Cp`UV|yGCaj&Rx@GG8HOO zbdZ*j%8__FNGqZJ3@i}%9)%$Sz_3K}lwv0vVJ(2Uie0f6WcuVeNOvt}yg;7~%voG3 zLam}TV)lwB#&-vi3 zJq5)bj0m8(i*-Z0>N}?CKy`3CQBJk?K~C2FG|Ksi53}5L)UuF);_u_gZ_-L*A;?>hUmFMLjf&;9-Zrd`)j z)^kuel%mi{hy71U-!`^cT>Iz-E_BDSm1*^9in>(ki%gB~3|8cydk830LKF0co zHlDEaP^*n>#+VB2f6~r>QTGpYM?>2Ep)bb#y|=Vm`Yq#D?3Q^eek)N++7l=tCB4hu zh7yb^SZIpngvU6dhg(R^r?lyCckGex3Pyi_Hxw&HMjFq9UyXB4{3ECv=`r#rFpK%MU8rN1 z|CH3(Lf%JYUF|}RKYiP@wPl??zjT``PsShj3`Q_Bqb<{abmJ&i`RMJ$#!uLfcJf%w zMrU*rHx_Q#svl@R{xvldkMq*}oguiIF|s-|RT@wAD8+_QW+@;}O9XLSmCgy;LDZca6V!zk_^s z1EQGNh|gD6aE~UG+(OB3QXQ2zOL4AM+qfd(iV>XPXE^XC*d96c(hw(Yr79cR{jgYS!Y<(}x{Wy~39lyCbf0EOxZUw{#x>)T0%S_MJA!1m z5ot_59i)4r-w;|8WL{*4s_o{|nQx&kk4R4fw)m;-#P;eTg$8u*CQQEAprMX;2A*!F zdm2dWOBy%^k2Ekr4f2MzsSPwQFgeyG!~HF!Q#!LDAn6|>;(KfpNl`*e;daG6wXu&p zc0B@kQrl4u2Nt5v&4s5CK4ali!m&ZL+XiCI+i2Tf93!@NJMPD+tG%PkLQXI}V92a$ zHE4`bxgHu?$|@2eYpR0U6^ti1@bRMJAhw+}mE0)2h87d1@a+BUbt-0hX2$9q% z9G5_^-%U!vL9Dv$D!A}d%q^&(lrFHM44uGl95q~51_6QxGnqHONzhDyvuy*manRo3 zkG5SY-TWqI`9NQY9Oh7utR5{zOg9|b#M2a-%O2u;h(Rbh@yM!Cnp~Y6E+UehG6X@E zzCbY<6o=E7jw!Q`zr$VE78U&*hk#Nd>dl9!df*4R8b-zeD8pOG9y};xiaJgTCoZ?z! zlX<~jn1af^0sw`k)7|9?mSeE-c2%X*~-2$r6GsF+Fe%r^G3uhN3 z%Ck5(7N=9NJeRB7K}IHBnY&K1;D1a@gpK#N2{ztP^M;CGT!-RU8H#>4Psxfj< zLHGagu8%?q&FfdGI2U@sNC5{g7zr^2RZBfw@ZW`^I+Kc-M&{j2B7ss;+h%Ok*r(_6 zsc+;)cz0wB<;fE!J0{9Bw@1w}y=aW6Bs}l0(wDGtxeDUr_5o|d7Ms`P7Y3x78B7aK*d4& zA}_Jf>r(q97X=k5r73HW5L;2t&F}>#6#hIe4PJBcg9B>e>rGrUsI{@^up@&Nk_o#b z`Mm=>0e?=!6}BzE0yS92P`>e?aHo(fLC<{{~KwoC5@p8==a0N6c~lCL`sr`qxYm7Z4NFSm@w6$rK9e z?yoT^&nU5=4)pmzzaHodL2S0M=yDc@xFPp7gq`jYPuwLd;7*ws@WG&kCz0`15f?ix z#6Y9=R?1k-tBDV_TBY2MHY?H|+nHVWhMIqxI$-eXYWTRRhz(Ja~YgbPh|#i zL21N`>1^%YHnH&&qP`2baS=lZum`qxcET~}{>?x&QC zWPTr4NV=M|UhzMWLN0Q1Rb-dwwbM{nC1?@V1x(QU36bz3aIY0yB2l&2%nb8Iez)ew z3b|141}f7VwFj!>y!Wq+yH^$UeW>ElKq%tE+6Q${KYVJqu$fm5MF4|CS0AK5>*bII zA5jZSw)dpA>#E=$Fj9(DM>{0#uoCiK?b}~j<(vBpV4vMzgaat?m+1dxI9oshH22FY zQ~*XgTa;Qwzt)8vMRm5!s3N1j3I|aVI^_I{L~V(3>ra`s=X9?h|0+WTc-)|Glg>MI zz6K}ADSD@@dz1wAb%aA>ID^O5@58CTg+|v2adev}E&scSQbJy+kNXNUP;TGa$Ed$Y z=hxvN7yIu1Pde;-9R_N>{;-$s|Aw<=!8w!StF)HQb2sSx8J&K+kMdP|MOB^q_u)KP zQOD-;iymkm<_9;#As@kS*5qv7#EU`l{_7%nBMwj8ry>-ymiKx8O5X6yG8Ffy;;4(#Jb$VQGCl6u zWO?5ThO9Z_)AB1mCez|p6>oJFBwy}D?b?T~9TdPOKBWdmCId61mC_NAxq<+N9}Aw> zhxY{XyTRn)o$&v`lp>k_bNXbk|A;;j)n%MzxyZ2pJR@X!h)24AOo!ro*WUHtGeFcH zBC<Y#e@7~NEMrqi<-!?FRguX|!6mZRVb0!7$oH3?D zg8_lPE8G$s1ls-W9MBr$FU;-L9FC-(xhw7jzRpVDp!1V--lRjR-Tm)$h6MJ@3}dy* znWRs^OOD)|Y?BR-i0P+?#dNgtyCSG(21W8P{9BpbJB6G7EA{BT!ut5PT-I26()~|t z!a5xM>JP6!O}1KspH1)XzE0NgLz?RM8-q;W@BTD0;ipWS)s@LAGNJM6rvUKtq+!_P ztXr=xdXwazkUuTU~Z8G?xOQ) zIzAmP-C#uJaoi9X7wAKVH8R68*PP)X^`dIV8SXFn`7bOp!Ji2Z76m2*TnMDP7%N^W SUMS8c&X4Na-$<;C>;DfjeV{=A literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/van.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/van.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28cba010a2c578ffa620bd9cb96892b5a92af222 GIT binary patch literal 13384 zcmd5@U2_}PdEQ+tfCUJWAVpD){JkS!mAeRCb z(Afngv0&?j&PYy1Y16dRc9P6cPSY9pB6s}-o#|D3*Q>p0JLybExoW4=Hi;9f&vVWK zyP%}lGi@>jg@bd>o;{!M`@COAo$>LqhR>h<;%}QzJg;d#Vj=sB^-FKTYt9eYc2$GpmRZg~dfiaU<-xGe8QdBUAU zc~X{VQLegEC{M}qK9r~3Jt*(-%6NajH??!%4tt$(_o8;Mw`V)ItuH^~>KC=z>|Lx< zTdL*a!kJF6vFOILD?PvE)}2P=Z#a?P3Fn zjkR;_70<01arv|pdM|g}UdxNi>i2>lMr>C<&4x?p2c^QxpFexE@fcn_5$M3vThP(sG^~`^Sod% z##1(Ca$L4#?+f$xYoTW^d~w0%X<$%Wc-@xM@azkI;5ovUXE8r~VR7C*A4CHCj3&pv zc+5WQb)&T-*L?Ic=m7&`igvdX`kc1Yvi+cm-Iwc4rg=)NhUqjs9%9|~+o3(@2T|>~ z&1Ez_C+Z0<2fG!br+NFL)Anp998QO8xcqXj75QtPv6~>lM*iaaR16AKl;- zoxt`K74ghgypbK*^hC#VvFn`2{9tJN>G`MAvHZYKmepBVKYqF0Zfwls0I`6xnBe7P zb*e@#i7)t0%im0nk&7C9hKpMQ7pXB~ldF4Ik4>)iUk6;Z)F=$H_+`F?Zw}vZ14*Ps zdSBZ%){TDN)o<&%)l;YTehz6qDxl7g{Ks+;Wdmh$q~Yd~E8H%k-ik_nzz%X{ z*+L(_F&0&J#{1?SKAUhg@Mv)f6SW3gBE~S2cXRQW9}Gz*u5^0XuY`c&u`8YcCDlWJ z)4R*rU#b&3;cc!Xyg1AV?#N=a>q*W`# zX6Ut=;&HSTHj^j#jy0e?u?I!$MyXzJw45-k*WcBCcI>F=bRytSS9pPb1-+mfhN1J%`h{Vt+#t1nTDFGyUsTP_Nd=4ZgPmz`xq}b$ zT+9X`atrQC!Zag`bhSzG<>zT>?CY}n;NYWE0B4=Z1rf$u0UnkzHJ}#d% z@NDEWlyZkqot1KKQA)O)YeKR?(zyjlw!D;V1#pcO=PrEiRPVe-)@OkodU zcn*(i1F@#W)BHSO()ErXw!q z;rdd8Y&ZBYKLqesQOE!|6eR+91SacC@I`9g&I5GKqz(k-7Eue8NkVo$sV}(Y+l2Ol z1n03OF^!(@QctD|rGZaJp;kPCTJad47nvh|-{rK#ekAuHvpB#m|NH*y1*0}Q@r3_(xT zT0b>P1-)`_vjXOMF!qsOx$$S%Ct1F)?`U_lEy&0O`LKCAMn4BMWG(ep^YK}ektI_-fg^!p-veX!y4G)ZY@-Rs^dEL7kH-TpZ>4zV71%&<6ZLW`q1y+a7W8EtoWyoL2)CYFc*@HQpTg2}fcJ&;R`#79h&M(t# zpds(mpbJ5>QJk0;r_qmifys+ZKFj1hk^z2^<(G@$T1PZ`QHCjpEdOmZP)fy6#j$j# zIQ#Eg)Bpph`DJ=a*_+4md+ct&-uuhdvS4ik-;obn@Dp<28dwyU#?Q3r}g7y%rmt|oik-OEBcpI;xg$qlN94F@{4 z#JLTp)yr(dd2qC?>OiCR1v;ASi)^0GmMUdp(8>*2@Qy5|Pk+_uxhjcWtDY27$fZ~s z8>g36|9$O8xBmJk|8fH6NEJ`xoYxIke|P4GFaGo9Tc25KP~{nXD4J~+NyPexYX+}49!RN5)uVY{(i?^U94n&mUv zw!T%0CXg@n^ZguH(?sjZe$g$UpU+37^=iKuP3=t62%FIq%LZ_)q1B$GmG|IQXfZ>y zmu+RPR?6UhO_!#u+&7^0sDUeJ+X%>XSn7H_F1)KfUwC1CRXA>KuaXs%NlI2oBcAdq z&eQ$4AdyL&<#S3f@f06f=ixYt;E#z*Lq<{U7vmhfSjw=^wOie|^ok>3@`3B(d<1-5 zi%qCrVLYCt*M-sIVy&F&m2qi^CCO-H$QC6+KfMGCJY7ngcLc(vzw@VULhC%?_) zB9kv7$;wtZm=c1?h{U0A{d=?yX_J_VaGljBbQ78b|5SdnI^4w%6tJY_@3BjgGWBO; zng>N?oxc-5fir+@?i05>27bbaYt!G=huU=cf!g#q=9xO@H{UtmcHkX=jFOeMblFqz zjLO@Gd9Ft~v!7C($UagBa^lB6y_5nqJk@7AcF>9J9(>aWo3QYq#~*~})U^-7i4}x! zPwN1tg3|y)$8UqKgph7q;5OP7i?T7ZU^t@j6;Oq}FK>>Cf^$;sL zPS;5UuVZMpJ@}bi#3(}e(nEQcK7JFuU5%P{$sUgUlBv<9yJjD<`FriKQix;}V$|1k zL^hzS*PfFT`V!Fn4Jb_7TZ)V?c7VZ++uXow$hu#D4R{1%Q%Ix3c6 zlvf?sL4aYys}H=!WtuE100fUQw>372@T1VeUOB@DLCcRZfYjRyV*nFlMna0EZ|)r> zst?2z((ymCuc6XjtmXu3??aI>RPq!47m}>f?rWa8lc(ywYOHFm`Hqe>FVn(1x?B8i z?vC*TDD!9ouaIn0mQTkX*amUROq+=ZwwZLRnKo0q+bkC_7ep{I`W{3q5wmn>-F+}8 z_PYmYR%~nTBh4|1c#pb|!JH_%ACskzxR0Y`xwiWRjEj={QTIvwmZf`i3_SDkIEM{( zzJlHQf*-=Sn>t4AC8gsJya)CxH!e{vNMnP9#^d(ohrK8{@wv4q>W0UU9(BZZe`CHQ zR*!bw=F!E)rx)j+{`9lYENm>!qvFKny7IlA2juzEv)9Si{2 zxV@HZuQ?kYLT9bcHT#Np1IRPPH@$`?`@v zQuv@Qf}uZG8lWM4OP+)M3nPfk(H-H0hFahU^mf0AIWj5xml&KX;+q)Gf#NY|#q>&H@XCiZI9d$#rZv0?NozCxT*e4i_aFkquXjyd#2I zp+zmVDU!jTDi|iU`07XK-tQ2u9$M zFQ*K~J;<`5*Y4s10z1o4TJ4|Tdz%XH?OeM#eqf&~t z)bzwvBzI~52*KK%bgkgZ0mHOmTuxz2DcjDhy*pCG__k@3SDZQMqD-XV6Q& zU%{IQdwdlm@EyEwxdr)tS>}r}KPK~*%)|MH(%3fJPxLV>I^2*}`*^!P6-}$BTjTVN zzokWcc4pi$xaY?2adnX5`LA&pz+Aa%zd!PUWiNgYe-DRWIDD)Aa2?#xwh7q=#(eK)n6`c30z?xuM& z-z>OOn3)yr?@w&&oDazNXfx|QE~#GU4R_G#R&{^T88 zm6Wx6GPTuV?M$Y2inUjNUmELsl?!n*e~NnR<^h_e^csNoTy)Ww+Y zw~VVWzok(Jx1|YHxsAJy#{AQQ(VAVxUR-{z1r-%4eD}mEh&ZJGw}1d`;oc_EYv?wZ ze-6i zhkDM#fp7dx{KWeDyM?%%%EEZO(Gzu5_TB4o&g}}>SFHQW(JJH6(joU1bg;o4r1z|# z83pq_3%)Z0mb3YDBX7P}xEGHpbyw=HaY6PQiZ!&aJ*wP3>&Q#D54J9Z^CEJf#>F{U zG{CdYHTZ&a&~0Lif~UUO>BQqQhdKryL-8!%A5xkmojBL>g18LzHc{W=F$jQxvKWs; zD@;|pxFl5(DoYrt*A}m#!`NzJDqURgj7uK+@u>Go8(t9W#l#=W)Vf>Lc- zZV{bUa9$@%r`!qERgCmZzRg+xF_S-GLaGp7V?r{BC%iDE)7)v*X>!G5Q71xPijp`l ze+1*F;_FPfo60lx7IX4w=zcrLqxk|d)0MJe=~aCO{^$wZq}Y!`f_K`e7!{p=ym3~5 z%LB1jFo9_zBCGGm7c2qq>MVbMI$J6fte;udj6Q43;;DIevyx?&OC$HrhymOjDJhna z8Mp#fG#2+2ao^$&WNKeJ($*0=K@H>uP<3+4;7!D+2-LUx#*W2ySNE!r2E!VISZygK zAjAuJ-!7i0E3Gy;ce%N75gG(;kA>nJn22EXCpLN9iEAh}sbR$WeZKu2wEPZ&z0bAr{i(N=8?%>fkxx*ya8GQzO zTfsihdLuM6MeIj~yAtbTs%Ue0QDeysOHpNU1 zB3qZ2b=de!yFwq~l5PFV#+JTS?B~&6Vaw`UtlQGB9)%b}3=csTUa!sHK`R+8G1-zNNpaeN)>hGNLEjnj~oQ;DW#CVy>0cxl*bkHV1`V9kHC+a8|LkcCD^luz3miyN`VV zufM}%XMhlQmHV~=$@+L1$@%R(pYfZm|RUX63yRS@97~qmF ztbXFA_yaZ}ZHXNwBsY0K^x+q(*3sl1z9A7A0U^U!C@1)6zK^Khy>ecmr7XQyM#PVQ zvh?5O@w8sY6yK0F^L5RJA*b+n{1To=)TgCrWPzInNpGZNrd~o;#w?|vy zyxXd)4ER-aqOg8VkOG8)%{>`YQh2LSio^|3<_P1oOzMOGY$2psGR{%t{lIg~y~KoW z@OVxo2c%Du=C$xy?Ht>+YWpCEaMHWCsCNu>gLh7=O4nyWrvJyw0Zdb(iG>i+{= Cr2^pq literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/vgg.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/vgg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d817376ea0de624eed72377f8c7682e303996ab GIT binary patch literal 5016 zcmZ`-TW=f36`sA4yS!L;S-EcFqBRRej++Y-*GdYFfH&C3e?oI=XEq zZnxAdq3v)tDR;f5hql2D;qwwN-#5_jaj)%ipI7b~&59@;R*%>X#;d$`kMWxD?pk+^ zW{n%SSfhRn$!xu0DF2OKy7x|x4-!FG_rar6Dp4Q1#1eQ~|z5yO+GiZ8uy;LSDwI&A3WtT68y*Vso(Lq9hPR$%Rg zTe#dTtimoFZr%5WmBKqTi^>snQ5ZwE>k^(@+cw9qD0An&rN=z%!CgPLzatm4rTkVc2r`Wr0 zm7*5-v^&Qpq%4uO`?4vGi8Lz(aq19 z3^4HD=Pt=!FTd37_NB;W6sJ>7tS!kdZJwN0c4@EES?lksdNB1_Gl2C%)V zVLs&N{>%4WW67{hNn|a&Qx7ekp6r(Zf;A1!nkF}z7B{(tV}rT=)^6Hlq%cr2R-gOA z;T2wm(bn**13SlOItHJ;Z=yZN=lKG##rqbpCBDqh16#Rob*y&vQ`R>4BmB{OUeo1| z@yGA6W{E$+pTxJ!pVp%Re+r}Cr_5$eABJ*4Ss&cE5&gwx4Bud^ku~2B@O)o(vMahp z7#IC~E4Ub^dE-iOO(VT_0Bx-XZQ1JvA78$_8eD!3?~8a}9=Yw%+l?+9k;aQPBX~zsB=1>b~ox-g)G^P zl4L)4;dI8>`XTU^*d2(pC4P#XeneY6aSReJqSn@k&*@b1sW|0vi=<6WUJZJEa;b>6 z{O%y{5Auk0-5NE+ULdj~7ZwY9=9SxE^*c4mQUW|R?DBwt0 z&`X2cTLL6i2?Yr$y{V$v{B*6u*nluLcclX0!uZ z5e^4i5m7#nBIrhaEDh%&CI=!U$(mwvc2Tcw3lE5z;xT>RLDquIHQ1t7&lRn) zQ+2{94@4SH+rh1oRpdRan9c$e!9^HD(2r!)4chS?!5eGAGna#|h|*~_z(z4ehyw)2 zAWHW~b3~2+bShQHNC7oYY#m2RClo+tCUR|k&>w{@>e8eXEMb>XS0wwZQ#;o3L?8!g zN>Z_z99Bka!wPt?7fGyky_btC!JF;2Xys%R+OyCryHmVqLX7E{6h|o?Z6Z20V=Cju3rq@7lVEinctmtf`HR&48?I za`+o^aI*M@BOL_X5m=zs1Yl^=exNlqM4?|!z}cZ=gi0pAF$N9$n6lR~Wx(SZ0+g(d zzru4&i8hx;E=z#23~+rzmEMlxE@fSpst%+@Q&Qa^p9D~4^x^2!yaNo+KVFrT z-;sHVMp-bo5ur5pSm@+#Vc*4!9W#4Q)0yZkq0nJrx$pr!lrd~z49}tDVFM;_7AALgZ21;< zQ-ha=PVN_u=3rzSnuCK`6OcZ%+yQFvj_k8hi3GXtW;j>33TIc!Pk%8~Z5!3y%U)NulN9Iup z5+32~7D{Mj{Q32RdYr;wVt$g*56S`MQY7+`$9x-BAV+nrq;|s?nY(reGW_G$h=Tkm zW+?0$?!!to`4|yVktQ1es(g|yvY2hsMEJ@^<5A^ggT9dRf+h)(N8>yURV|XOtq{>N zhex0UeOv6mDP>Qps*dI-6{4~dk*dm>#G-0bL0@^3XsoPmbVpUrd@@&(BPoAW1LQHB@>z6f zH&>-=ve$=r<)22c#)`5tk+hYgYb$w0PpAFpZDJ{tvP8Z@oW4uofx7rA_32``J{@9J zS!)wzx_pUf=s=VQ&Jy(szoYy~p$ngn<)bvLl{r>K3A-jrU3!n|b8j*S*BdV!RcT?U zYi{`zNqGU_ZM<0uFf->HGsc498+CkL(=}Y9Y?RFz!hqX`jqe=w;ddS-w*7x)_Nd4v zdK-L}@C852^fAMoK>t7XR|i#8d23@MYQZ!o4@Fvvxo_ar)i@%BiE9^mnQ_+~S~;## zXd^!9nDNBW&Jmrqox)yWpD{!cf9T*c<{)C=Vpf1^XzZRx3}M^u9+S@$=3%KY$j^5v zZj`x!xOKfS4n4d+#|Vcn4iOsmPj6TGxK39i~xJrH(+l&!wbQ%Vh#D z6VRvmDz&sxkX?>U$KH#@ZI~49bMgjym6H$pxV)C+4+t~vC>s`-$#-eSd%9~|-XbKJ z06IifLO{43N&YJ!8QG0(RB=pnP`>_|pZLJ2T{aIYr1p~;Eeagthc5FIt^Wkb8I2oS zzsMob?Grc)(zab7Q)q2343R+!3);2|`_MUJh==x}dqgMU3?1YkCoJtgy}Phm(NWD@ zzuqXz4s{E(&@UFGOG59+5L zj_R@#Km9$3Gm3{**!jqst5B8smH8#Ca!y<2)PyP9q^wIt1%0!1fHT=wTNG@@(i$-2 zQ=}z%0X@(Oba4)va|<&s-8E$Ziz*Y{?&3JwYuy;@4NpD~zN0AX=6J<-0imZ)>=R*&@{i*?TWO6Vry-X$4R!vlW^340m)vTg|{XB z{UNnx)%@un6Khg*V*GWoFUbg0Z4>__h2x93Bo~qi0iBD^jfjODXB^ty8S4xPrERP7 g)zPuOVgS=VT!jzgXX3vM*Cg}QKmXtEV@t;W0MUf~1poj5 literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/vig.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/vig.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d50fcd0b250f9b6d78b8711fe8ee4c155ed8bf22 GIT binary patch literal 24289 zcmeHv3veXIdEU(I?C$JhvA`{GxI5hOI3!QJ19ikbNRiTsl=wV~5(&}diBup`dyAa` zu!nu%dIt9Z7dA_vbf-io$*w3-^l$)`lhoNtTuRw-;z)_(QtUWMB~ntUI1?vP*-4a3 zVoMe+l}(DgeBa+Q`@n-DRpnIT@}j2E)7{fE)BQic{(D-3gLwnL&%XK_vtN7OFn*6G zy}vY0?!ys$-82l(@XWf=FsDtEd8=+sr{upqZOi|3BQu?e>#~iEWtc|8na)W`s-ABQ zOb7mB(^st;~>N^^x>5}BL^^wNt^r+;W`p(AK^ceCvFJIr) z*gd@)`2nx+qA|V4dz)AEhF&zM$Gu^12TJyOC2s`(_j#kYyUW|A?^cxD8kgLPl7rqMlpKmn4x(hjyA36`#U+PO@>cIKN)G#naK{Op z-|kJ~d@^ov8%mCNM^SP#E_tgzwsv^kK>0E6ILeRvqpPV^bNcp9`5oS!D8Dl@`+W?G#=wwfubE#cxj4ymC-m_D{IAX6UwN-EiKQQg`03&CQ2yrRllnYhE}n?Uf%c zspbjEFSdfR-zJi;K0Cc?s4?8| z`Z;^uM12O6pT^{8+Ln4JT4!;!gBHe|wQjaEb5?w9rk%m`XO_S6 zr$7Cj>3Pm=DY@N;j=i-rQC*ri?Dn)gbp8lhyXTLz+-f~2hph#_IUyHcHKVi_UR?AQ z2?u9(GX)@NT)zQ$`tixB2-0`N-zEB7q3y>0}pA z2a&iq0#f8~cGxVKMYCk(P1_tbEi*#`Vw*mOr;Z`h3&Xz`nKtyy81&166*%uTJ_1T& z$vA`nS)GEQrz9mMsn9$_!H@zyq&yq6WVdb5LwZV&BS%&SQ|(1=BBe*@*kNO~v}Y0U!f$5M%&; z5}C~d0EUtw5T+ul@MR6p-fP@oKrW^|u*V3cpw%|lQeGCUgJD~`A@ogOZ z-8ZNIfA`(B;e!yf*YR7c3IbVA zttR;>g-4Cjt%g5WiBfo;=`5!iO?8INZ(~=a>mGWa!0G)s0y4vJW)$>ZF!NRsM;>&K z$!3eb$>v1oaL8m|Mh4Sit{b1a3)T#$4AUx82s&Q3sNzWNXEG*=ou(o-&RZ!M*2U#qsYi;PA5G6=(T-54IYZ zX5U?`)YSP};A6reSzse@HY=D<>e6bxwd76aWy0@QbAeWxRJs>VxDY4-NaN59%>hxs zhu!8<1Bwcl209Na3iM3#qKj|#q9;XF_uO{V(}J2_@BaE(^b=fr4OG;XwTcGEDVRDb zYTxXF?=K=VHIa$ZVM|r#qinMU`C0MQ5E`msB%mmg6_h!2zIh=sFHSobE|zP}rmv7I z2WOWm%Aa;F#<>kN6pf;B@&?fPdc2=T#{q?ZKDXQCU&%c1Lv;@duJN8#%FE<8fQ*Af zDC+CT#1!?o33^$B4)#Dtpan8NX{)~kg5`|;Y-+_`Nv~u=(AZjLovn~U(q`L`rc0)p zY$JEKu>$Q2k&zvM;pL^TSkGE3&Pont4g?UcJ(KgYS5j+v2o>n%^;B}NmHaahRtfzz z9njyYUiw211MOTpKWljz&$&W7HCMSGLi-Uvbl*2M<<7vM^jydtT5-u}b*OVYV%$Up zq`ZE5Jl02uZ|;Hxu`PuUNXWf5)QM;2eLSk7;9CM8_14J+n~1pSgGwMao3u`6TB?V> zH$7FtObZj+6Z%50EF~ogY17s>7ZO7wMpjsn~6|%7PRa8unl&w zh}0M15||qEksY=c7nI1@`=ng?i+&|kE>0!})Cv4k?_}~WCOeTt`7TdRJIz=kM42WH z-5|0_`jJ_l&NQ3lns;G3U-g@zubAJ+G9zFa4}!)oFQ73jX;r&0i(3Q-kmL&`a7zJ) zgTGOj9<*FDFh4BXu&?d=6ps4qaY11>N!7Olc^OuZS6Qr4aF$Y)I z@p5nm)0ZGlr)hK!M1_a=G!NBamHXcEhfmhvTX_1cGodbRukIOjWK6fdH|IlMcs}Ik zq2q@^>|4vucuKEPeTcq0nW#piO0|;wm`}xryh~_=VDnXL&02@Br{z$Fw}cc%5>lGm zMmgg_F_Q4D|pVMbCtXV4O)U7?59(a*|R?Wkjo*Ygw2^m}HcsKMKnd zE+o!8Z0d>eS=NUd@s;nw5qR(#V&~4S@!bFO4aO8u_3QP73dF9W(o^C`kn&!@5m2u=hHVzp!&#xn?V0v4n)Wg6Kp)c9Xx$)c30IYc?NImBsnxW5T{vG#y!d5FmmFNf~q%QNrGkK$9ECW)n9i92rn9zAX$)FF4ODO?VB@csCwd?qrg{XC{gCFb4uDb*=9mhqL5mOBtC@V4-wN}eU=E*2Wu&Gjq z+aaRAhn>MivlCNgZ@u_Gb>4IBaC5XT(mpG60EIE6Fh?CW`J zdEb`f7_*f6FfN4vm{MhAdTt=oBs0V*{v3`j8(1YrCVqQ1pbAXbAZO;RT*}J`BfxEd z_DYG?qHCOua)9Y-)dyS6b5o~;r>lf?4kbpZ3-o~cy=0v4^M z#Xu|{v3a1X(;7rc)%_+AmBmHB=>d>Kp5t)bQv>Kg@tva_kx%%7C=`ZUU=5jO!qI}) zf&PP)Uk{=zfYWkyb}n)R)MRc5Uc%yh87RxSvOJ9Kp}WXzL}qO(PW%;g9Ndb;&Xi0W ze|e}t*ev+7Ujh1-9fciJ#4&7IFN|8trOxDa7;_>O&m#!=GN0dt*6p<{;2$~>S>!1;P!H;GDqn*$G3&#wf(-A7b$1pf>k`avZJSlxIbf>I zPQ44$b4xeZJ#ftmB^JrAedf(HQei0h}HUV8aliC(~;RO$?@WJ}E*smugNj=kgPos&oJ zxchCfV@;gUqjThXm9XLx=AN87)u(7EP-sDLeqW;N)o&n$R2C+edgjC&!fF5wBCI&V zU4i{fnFN6+BKl*iY+Ghb`vpxD0QJ+V{%5bk2w>W)yQma;1KZ+UPgN{sv?@2yQO?AR_V&@r1gBr%;#qC^4CTmIb2HJ7lgt&N5Ny zW6V+HNRt65KJ^KlEsu2HVr!x$fp=VK8Vf%*(PDv5$P#M{BwvuD$pt<*+aP0t*C1qA z2eM?B?IlAJok;dmie4&SxI=&HldL0^sZX&=8)4d5?~t1MY1WhBq8!Mze6CVo@`dWQ zBDS3vn_w7?6JoQ?{a|hz)>Y3~CHDGXVb`xuXPlTU2mG(HEzf==ZRJG5K6U>i4=;bX zlYR4I_wJ*2O&&dd{McO@_Is0JHy}#4S?ppFwol}4BZ|}Fn+kzuv>c|>(j`!!63t^w zj&Dn)2RIW&Tvf~z!0egpMGF@2_2p7$NVXJAflf+T@~!`!q{OGwNH*^*d6wse=KdIY5qr#3!Fu@jk%WcqoODmqP?R<#b= zNUJ2%ZE5o;M}%sG7Jtzi#~Q%$j_zo@38Lg=xae)8@GB_Rya57zbSjp8AAID|sW(rW zZ$EZ)^61gGAHPeapO$;CZxK)kB^JSUq`A==Frf)y@`o9?ikUfRIAVVqHX+JJ$SOfj zeir#Ib&5!da!+7&z2O6J*2*rUZCotyE^=L5wAx?fb$^LTzr`@^ppl@YTM6KMIg!*m z32EEVX&~%mRJSHLD=GVlceE`fyfywamL1iAdM*e7jlNbjOXf=wHi0+ z6}WF_0};6E$Uzowq1#2)ZY5hE-MYWM@--yk?q40iL8bK-d;g2HCJmc z-y(0RVFw!9C!Sw%YjAm6p$kxdv8Av;0V_?c+OHDVEoWRRhbMy6Wf zqm9M7kClI1`>;|i6=g7e@Le}CMjz@SAuau!vK4=PfP``g4^Yfbr!Rk4!Y{yE@ z^?wWCntgqFv}bm=<{lf}b-0IW1$-c;HO3E7Vj4#^)!#x*jG}Lp&0<7JB9;{Kz6KxE zLZ)agkMusmo1kS5`IGFY&k%jNv|F)e6J2wqhri%GNP4|Gt+ae?O_{(-uqNf@z0_<@ zJUxa*=BCr$kem&AMVtWz#CjDM`iG;zhc5QKz_R<$MZop1!y;XPTm2lKB(fiQ_^}T# zQi*dsb?W4|Klj)8`>l6Z`CiFSzhlfv_o1}QNkaeN9H2JNIzmQeHHtxDT$36=%D^w4 zV^LV#y=)i%b~N?=0y@;xOXDRny~tF8jYd0S)LT{ug&(lAxj};S8YRH_&oj4yOrK;K z7p&A*m^{s-OANHULr&suWb6W6a~&d=L68ss#B>?!8{;mWQm!OF&TmK7d;%weo!Lau z=;(P^aEI)M`wJxWrVxwx7&pS`I1tl=g?M0J7$s1HxvudpICq=M1W&`2T<%J(IAJz) zWHm5{GN+wI84z7w{%Wa*T7Gt-9i+20cqv!pZ8(cu4iPk${WUFXI2Tud{^aQk8-;`3)h% zHzV~1H)CwNyewd|62RvuQ;CjY7Ovbf62wP!_{7Nw#KeM-8#9IyEo${l`xPbr_%GrS{ zHfpnH_O**^H?Zx1fzrYizEO#iipRuYk7s+;iZTJ>6g&Wgixtnqnr>u0FeO8;@fHsG z91i@x_3kJ;Q>!lFIEh$Ao8fYijcN9yK`dg;0m*79ELsJK<``56 z^qUgS@@~Jyekfa}M|wkT%Aw%&TM6*blD|KQD~d%ev@M7itb)E)uqnV%_Nrx%Nr{uT zt`mSl`Snc7My=H24CU7gDeG0+2Am|aA@f5!jUd45_79eaWB%-lehy^v<3w~!6zUt0 zwy|Uc17rgbIu=%73=T@vfs`=fY6v0VR|B{K!kxWMHuW~@u-(&JCNFsA;Iycop!n=Z#GBesyX3TN# z;QuB^cocPG7%#^fM#?d=U&~n5Ys-5&qqFh4z7;12*LQ}AtJtxIl$9oN*QpJ~4u>9t zT}_QiJsGbfab1$@NYsI_$&=M3=Gs}k@|4(vS*_vz6>g#l`=2uR8%%za32Cekb^0Zi z{xXx!80knSCzyw!U0n)$v`fc~{3eHG7*`eH*i)+^ViW^|$cv$5NMcA!NStJyX$ZIky%Sfb26i%e*&N)#>)(M2dNL;_ihP)v+Eh!?l59yJEz zG*)9!Vxq=qQrW9ig1Pt#@a&dXq~a^`Sm4e;RXrRIU`2yh4pM1^skMSAEIA|UM6IjY z_^ZtQVz-ATWbmV*4TB!ZvPn{{ zZDtX?VmXOC5Gv=xe2X-902yu{A+>4Yng{jioEEBi2vz6$ash8ZilgsGTZ_NolSq2m4XmcY zN-`bD2W+QvS>`o(FU*!LKM$HWX{LvBHB&C+6e?Kyl3l`#^c8cc^^G~ z_wjd~F2^^<{pPtyJ`yg?6v3p2%zTwTEP_3EDOsx)ggj39TSHxCVl*D!ssN@@h zP$%6-u@R|Rzu5OQ=UZwaaL>;p)aUeRhB7CC&G+*CPoEZE(>2{7=eVx>P#Zbt=;ZNk zWpA(BOjmN#&ZG8HM*l?ghg)ycU-way9{XOpxA&8kdeAe^9e=Jnz&)LL{vzbEg16S0 zA{Prfoz8nr{%syDY0}vcrZ+~?3GNQr#_`W!FU(^H zhFnJX`3>I<1iK7#6Ps}$EfYETfJmx%FPm=R9)fVjh7_zFxwEwkz}C0F#nHo{IZ8Y& z%?QcQLYFLK6hLtobuV0j>Jd*8dXI2IEp86GtwqW&Y`NBwp4CupwPpRnTIls$zHv;a z5bCNQ@EXmTYrlqs`??v=q~)+$LXo-`N$oyZE!SRvF-8c2+6emssEZI>2{okJZ1@Kbg+uHLR{OC^i!F`KJyMcE$ssJCz!FEA7fp4bQ) zMfv9uLik1OlE{41a5yW}`E5AEdO6?NtaYMZq_f54++&|aeagTxM8T{V^^Cx92 znCcnUE|@{Ni6?w7_RY0RpEtgUkkyZvXWvQxoPbt1c&6C4E}Nb+E?+Akzz6Va=yFz< zty2fO$0ZzJ^sz0U{lq=>iJA>UIl z!qVCZMxemAUQa!QXa2YaLvrXvsey-+kFkoR*70_PqnDjk?AHBwCLCPbi5|wh0o(^h z=ejLxvb)0FmyAp1CF@e^l6@(CDRU`%$+?ufl)p4ETYz~w^r92);T<_U0Osf$jRS@= z%6r>GNcRc2Hadm%2PfeV_1hpKq21-9;$OxV$jxqJi3Y<(x8jCNSj9RVZ%(C~8Db|t z1joxck=!xkiUk6FpZowOb}0SskZU@Ji=L|Ap=q7Z6?| zh0lR_pUVi!Zmb(WZJosiH}NyLgaT8r0d&xsJgV2A`u&e6e-C#G!8TodmyeELV&|jA zG7QSOj>j~445T%7*@&p4)GfV zid%U=lp3Z?5FP!WLFD!y*@OHK0P|JrdztGKH*`@0Aji1 zv#ar7!Up>D0;E~h;w2fap7chJG8K9>*r%i?M#PcF#}n{6J+uBYyO0??&s=9F^u)Zx zGNP|CzZ&*?ji=X{{AVVd)+jC8^E8h99hP>0jw76oXHit#L1e(e)^8m0N0c~!eHiCO z#Fb%*@H&uBi(9TT=6Fv!#yY?vNWV8;%2*||ETnc@H(O3A{|A|!*a%nXW%oXSlq^VL z^B|6w9z%j{8XbTHQWX4z&HpIT^7Oe<9Or~h7|@ck0Jc~RVeu6@ zEdfrl>J{{>XXH{;f1fCP~82k6t<|O%9*o!&gQ965KDX#0S$argjRsSgzz*W@P(tin0aELsjs5|GJ>xx1frJN zA}uX8yku0wHzl}>Hnt{zCV5}NNFMgH31Sk`e8p0e>E(XD2yR$p_;>ppsyX@bgdvHC!Bct%4CPR@c zqO4wO4Al2=rhow%Du>FeF^Kl-EZ|6NTyVjsos7@%y}6c)Ph#M``JDv3_Y2Msfqduq z=5n*E=BcX#+r!>}#Lk~!LekMs#?H52%jJu_h8DqbWPra}`P;~ySlw|bAT>DIFEV*n z{I2tiQy6m;udm|h`whlt!7m}xkI>EG!zEubk*2|a= z{{EDxp^uso~@!%PB#hXd%{?4TfZ#P5jXY5Xqh5xFCd=SuB<@5VkH z@1`jK#6@^5H4j6-^t+HE@o6tom3%Jf&tbj3&s^<^|7CTz4kdXgBMgMc@F z{UE6U_JyFd5u#@+qv7z{Zd|qwiH+{_GAp~n zLG&iL*>f%UdAnEkV19OEp2pi_@_#S9!+q`Dj6C?XsrI$^t=$lh8ouF#^1N)|9|vm4 zF&K%*V4N`v;$Xn!#Ix-T_qX>5c6!Ik4RIegw#VBe?Ol6}&~5KxD=&|k97S}56NmKW z5z@D}y|Z0nD;Te1R|dnIaMidsig>4VT)vA)j@)aPm%rOs+4X7jtc$mjCvEThB(^WW zaru2>lI5G*c;E6Zf(P!NQvV6$-R0Wl+qVM(gZRUz{2pTm1yuiqSA!D`^(`hMDTLz+ zJpC$?J|7g^BhDBhHTAnp1g7~t=C;GkC<@g7K#S#rUE}-5L-EwL&Mp7v>)q1gE&mr< zTsy|(?MQ0(;f)|=>by=~Q{IpXkp6)MAEAYH`E;B58k_thCTx6aVz38T`3V;PJd+LR ziohu{EMH;mXP9(=HTC;AojB0NpOmuH!-B*%@&8~ z9`ief1f}wbXqOFnfTXYk7yB=Ip{#KrHW@%$9YE`k#|s&3TPJ*F3yfvM$9F7Sz%ALs zfjBqdkGn16_^@}q#tfEF!}Y{5=_ z&`B>!rYsIK@sk+t=|Q$`WtGgMP{9w_C=P~9vC3w0f+HsrCS)rE*&WU2Q2P#Y9kw+X z%cY(t`%zGqlC7f-E=wMa;7Sh+B~O=U8Sy7Iz}$wr6RUZccP}Edyr<{x{U}u5=#Whk zU`Y+JR~eo7LqquHHogbPqyq%09Xu6&?0`V3#ImRPVBNvsWsmc5Mvz$^?s*&yE#ceV z2)1O>eGlnE2$OgIITGek^h)fe^cH^G4U_&uD@Ov^3cV?Q&zTqGhF4FRCt=Cmi_51b zjzk%REdb9EszZ1Q1UtGCh)#5++-CO zaS-tx^Z0afM%573qGP9v4a6iM$ibiFB6M8|tWA=P#ydBOk+n(rN(2pmf!C%dy5Dow z7YAsegRPiD@IrE`xIh(+^t4z09N>C&QRP^Z#^)RAftHKAQ6L}Um5qal5Be7{e>nBZtiNU1y1 zlH*KA&ON%>yD(=)a1u!&#ob^0j2~qYza(6+L@K571!vS5bav*4OXH>S;l0k@(!SE( Y(jNW|-&i_OdL(l*Xz`~qw~m?r2hkafu>b%7 literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/vision_transformer.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/vision_transformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cecc6bdd94abbcdc7e2375d3b1b9807906a88b14 GIT binary patch literal 14318 zcmcgzYiu0Xb)K1>-JM-7mlQ>jl>8dg4{2qR5^2Y={MNpt<`=daA4EiHQegq1T0!2_b zNt3wWId|WroU~}W#N4@aALrhC&bjBF$7OwVG^^qF){nkke(M!Y`zC$#|C0Fl0G{wq zkO)oaRjsBMb)E7?)hH(TZ5B;_Cu^xPSwM;R?-)1#i8!3*|a>X2fC#$2i zd@;}YRCTO2UK~e0Ev)LH+C*`pcDQ)BHd&mk9Vs5EO%?Dduz46^+dsI3q@+i%FGdqbLy?B=}E^CFc+nBjl&|~YecXiVXO5Sq8h!Zcq zxEyCMxlw8Dh1#koP?SCAhTh9{v03%vta@Mc!w5Y`Be@xroS-giZq-3S>g89Sf8pZg z+Xg{S#uG1ZRwIASbH&*x@`A{(2XX%LHUFiHuVI)MH$zXvfZ2VJTAS^ zZ+cFn9y%V)AOw0TLI+Wd z_FqDnBI%hTCDONaye)c*OgZ~0t*ncz7`d4#CcTu%iP4)HP?knMFUB};ahq{*=%!W7 zhzW7{rdC8hQ!9=Dt&{Qe73l_H8A$Qu3qh$aJb958=KhYUX%}i(MGd$JZOq&T+Ns{= zmD_$`f5Z=gCA$yALN;sTcUD%yGn}I@#)~8TT4-&==1Sp|*4!ZQs@={s)|xMb7xad*(bxu6 z!!86~lM|NQs^|EDA30lYb1*v4RU|W2-jxvcgCg7Oj~f zE!fX{WfyD}qPu;1q1!^pdc%=!)ENK(FUk6<6eYlvWTMpV&90YW{F8zuh9PdraEk#bLT(M6Gztq_=DD$^m5==H$T=pwJ z#X4732*3%Nz%$2Rdg0=0#|tYfy1jFjCG?mD)d{ruC?&(BqFnwYia9=9Rpvx6-J$23luYS&`f@%cK(7(>N^!3dp&JjT}Z*seJI)(VC^g89c8-qsBWl0rJp_M~o{=}Oc%)7}4Z z1})cF6vU&{6FalF?t$is7o>zB&&OuyRm%$CU1-)Dyp2v0g>k-%H5O%YUSYZ; z+T<+tybno{S4#{hAEZI8&IXgy_5 zSz8THt+w@s@YEKcwhkJ04(x@-4f!zOK7uEF8p%XHW#kR~boH0zxApye#_E+Ic^LWX}y2<;H zi<5+r#g8NVT~fSKM)RIN{b68SZh6N+<>LhS{YWsY^Wbu8d9U6OUbYw|q}Jn}LtxtLDTh1qFK&pHO~dlg>7X4I)si z*W?n7%9tY>$+JjGctVnqc|EV^@J#5&8|IIi58jDd{nITG?m9mvGd_q@!Z{=)R(FBF zUA?96;cr^oMhV2Owws`mmJac2piBpr3?=lSzC}=t3Z^Uo0y7xngel)$$@O9Kr-1-c ziq>gt;?hZdQaAPHeRo<#yL}-?(k5_gp5w$>$5Dt!KIb@BH{EKxCha()UUD3Hf+j^< zF}5I8vHPPN@(h(dLkZzi5(mYj?d@Oks#SFBF)LGmkx!zIo{+#@uqZI~CR zu|$SzvSeHg)WF0*4U7u`lVYF-rUhyy#Szp@4b)7F8PpsV$538{t16)5Y&Wdv=J0#AF=kcBpFNhcMJ}fSZ z4|_3mtT*_UoyAv?wPJC++M$!F}9 zvw?k~=2kr05ABcEC3Nn^$FDCwPI(c+9_yVc$|S-zXm@zkE7dEcpF!({C3KlAB>U;L zC~AafPMvb)b$@H2E-RBZ9vi;GV#J^Ad4bC4zI(%eWrwp%d9Sh3x|7@J!8 z5p-vmhYR+FvJK_HMkhb0%yqSG(mhLS^*XA`vR<=~hmdU5<8$`$Rao2fuDY_q8R7ZS zeB=i=sFXic+W}Z^AljAOzdJ5XbIyjb7p~PetHNG$w_qB0)%rDi!@B`1cL2(~eez^a z@j7|3-{9>Uy0E6p$|x3C(x8e<07!FEhRh&P3;hlf&moSu>8B$y4@XVpiRF)sp#38ieIZZ# z*lClTA!Lw~(Os*D(EzAYy;j36IbiJm^(-wt)>{m$t4m&3I-FBarcU(8+m`KEt_ z#m9RC_`!h=5wH?5p_*#PV=`!yKH399mVqB&CwMTDI}SLYUO97KQJw+bh1b@6>~Lr% z6>o?Y`zJWRIA_;9sIiPksKY=qb?J@B`t|?BLDB<30-}G=VfPErIqaAS|67iUj*-;$ z8+>%l17DD<$aaMo@*n)f?bScmLB|4VZU0Y3dfA6X&i>6it;(gf@p0f*l`n(Vs}xJ@ z%7G2R(7KMkn%fxUu)$Rlld+$q?LMLX%-`Ts)X{!!y9~nx`%=||l*3-d*p4IeLDdG<9Qhaqow`^339`+Q-LgXV1jqH!_%E|`w%5@j7Q z%>5)4)Gp;Adfr9iMSI^+bM^=4?2jHapu1IZh6k-w;UI(ybPLYe3c35*{?pzh25`TB zxPu0g?zQf!knaL*BgYR))lGq2-@hvOCI|hHIEcxlRd@}Dmw8bkbzpM+ygo#z9{Pzg zFqYdi+{cgb5|v<^2k=YxM872og9Q&H^`0UF=jDk1r09N=sq0zT+a$-NIc*fx1v#Wk-6VGgK z4hQ>%{;&My%zg)mK-Q!bZ+6x(ICFBN*BWrClNvWXdpIKp&$G5ls(heIOLK9Y^|K4F z8yq%>LlqeLsfO;j@04fvf6)yZ>mj^lqy+oqvbb%*^z$`%IAJrhpDx?eD`nT!73-_; z20)hHCU=^zA&vEFoMh$MN1RBzJ%ftd<6Mh0*4JWlZL{L}e5IM; zm+!({e}fv3W$bH!(M%~lyqW3hfHVYn%;gF zKRJQi6*+}eK8vK|bWM1TP;Q_gPPcc-i_0b8qx&ONlLL~L89s)z=SFHBW(N_R9+3&h z(OgU4P41C11g<1FMAAF@mKHwrAp~q&`j-%@?Ua5=`?9&Cze!G$bW1-9SBhf=M`7e9 zzM^kWMg%Vdcu%z^$y>dnZ|LDUKrvgB=xK0I@@!k^Z;2%L&vHJ+`4M=a(F!d_BY3vq z?bg=en}+{7(VEC<@Jpw+kF<{L=3C=D|1mM`Ypo++)jvah#?g9w{Sa~!$W4e0!Nfaz zRGZYcr&=TUI=rRJ&qkACq&0$dk$3%YcTK2wj*!RQnQG5F*5kkF1we2 zgGh7k3dUL^Vss-R|3J|C?-CiLcLye*=6QvAeBaaS-I_x0yQgU`9T@k*;dCFqZC=In z`u(kx7`sK#XMw|H^Z;-ew~`foJIUV9sFmCyiX8efLey{S(Zlo&N8rOh zYiv(%&$MPlW057S>1h65B_klRLEDM=+y9L}kD^)jCwctk*|?-$T(H<#;wHx2Hir ztQU_Z4l^ArF^TsW;ulA0E{I5E#F1Op_OaG6(DM+L5jN1{k=^-dfhl{8C`+ZE)2}|( z8pY_RS`!$z7M(`q1MoRAvKS_-AMa~LD%0EQ2fT+?~>@=re3)9tpX=B~G0BHQc92z}k*2qLb6C*W*#np^TKgP`O#@Fq2GRa!(^5)^-TQsm+Ff zP9#~pk0_mi)q;4GPhDCF8PoB&s)MRiuWs^1kIXBtU2(2_?9vOD(CyQ6%xkdeQFtNZivBk?~Tkl(yQRA z3LN*ib0A3JgnmaViG%Qv16S8ZJV9Bgi2e6X($A~dT{|enmjdWIN}RxGlAQ1D=%9!B zz;-}5Yow2)h#xd!s|vC=+>$5hP>oZ{`xGbPW~z8`%55~fK&Y_9=zu;fNoydVx5q|f zY+YzO>f>zBlvLh&3em)QhqYa=txgy2QXvSF$ShmbBCBXabdpz5AU{vZL2UIKRIx+J zO-g>9l3%98Mgq*TYXv%9oK?yyseAG*YCy5LIPZla1r=cL(*B2+zt>}gNXMUE80&tV z#0K)N#|hDpXx9Zd95*4Beg{wZ7f8k@vxcRQQ@r8bykQwL2@AQDku!1>flH7xAI^RL zGxgN>vzc*}PwH7X^-bME%T&VDkHOuaB9}kncj#@Jh{T}RyQz0`iM)|DQi#nMXpwr? ze0LlpwVf!_GSNEErDaVTg&{RmEq$4tGG&mPk;3g!qnc&j*1E|J4X&13s~&p$a1 z*bB+YCxC&20&*M`xdxf2!g164M5PlMXVu}eho zpf;}S+oYx%;iFKT3|21^>xgj@Zl*O{Wq4Gz6TrqN=`tJAGPOhRr#bDyK)+TB%3MhJ ztD3dsb{a}ZdN&1i*lJn3X_0KDpm5Q6Xp3C>%Sl$-tSA#@cSpK2$jv~3*})&rZFD`q zor%WOJfQTDxK!cHX1lvItP%W=F4+R(F}1Kj}6!M0+t-B{4|FB6fqvf;w=aplSGL= zqu*^HG70emkw@Z)#gk!9fh8@7LM8f4J@-TNKbjBjXVCqpTnX^IKRVNBvof0!iX!No zut0RuQxQ1Rn-5(e7fBfh8aqDd4h2PlaZ#)WsqU+5cF>R*d9dWZ1|af}DH&QIA@{4a zKpHcjXwL4R?C=H90ntU%q=!UTh|960ScB&vFETLdkas9Gb`$Lz1!i9krXWm;rm7pZ-s>IIT@ z1iFo)6or$>!5zZa9+C`1euXMOOvw=Gzl5^4iS%@o(NUhoahnG><1k0w(kJbe9hOdSLS z)2L&Ia=13ZcZFdenvuDlgw3v(5#FbxsOL>UOA8h}h>HjJ(rzWy#R~{eSa);qwSht zF5DRr)}GlOC4*ic1gVo~Pc_+FV5PS4KC&!*%#G2eNWMm&O!qS7_|VNzZk>`pM}pgh zJCr9n;@~5*Ee(M{vD`R~3pTY)NO;=M;1z@t3&}n*8EbIpDQ%ve{_<}K($N0?Ym|MP zs6#G2Qn|BGgr|rG`YcjXpAO!=15CmO3xpBh6_S4hPy_TK+H~bVahakIHXfXG*wEM# z;dl#I{b#fmC_{9K4067S#3w-~+PuQVMw%5#Py)12ZFY5$+Qa?2eWhuXo`<%ogF@CG z>tfil(BJ4R=!3u`_Pz@l_nSb)OYKNEbMyQ)9}zZnK^q}Bw-=gdU-Gx{J3DaEpGsJ& zGB!H*pec|?3I_$8RfMc9ynFt5?#P--e;1z6!$s?|FX60FD1_6B z0KmSlwJ$x`CstM%ki#vWIlZz{nB&k98g?NQERK*5g4tYE4xH!?z{r4Avz%W9+%7o2 zBhC8_|3ITQBnwVZzb#`~NWP6vY$^7Jkv=IST-1%$l<*{l6rqL&{#T)pQrqqys5^1L z>N3)|E;tuRI6A%<+JW&HX}*0ezel^#29WGn7H0A|G(||rO!j+7)7c-8jI>Pa$B>Tq z@0X54u%O%hJs=gPC25KB5+y%F$*Yu{K!S_OxHm+qx@=Mz9~i$txrZsaOvxEadRO>J z-HmgkUi&4ueb(wi9X+Hf@clkXLPcJqz~cVp^|?zpBO6n zTl2ZW67#Rjm!Rsf-$IQ?sk%$t`Bn!Tt|xv+d!6g}f+Fr2-;*WM&06=}otL!s!rgHS z!F@7Xm40a>TtN7hyHzOXhr&oPA!3@Oi2q zrGS$|HU|BFmmH@t4^hQf*P(~m;2+X><$dhHj6v)WTTiPN&(bywk0F6t2^BMkBWWCI N&bS*7luTzz|1T6l51Rl0 literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/vit_eva02.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/vit_eva02.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f44852abaa50dfba7d1ecbfe9bbb4a241bdd4b8 GIT binary patch literal 9992 zcmdT~-H#ksRhyDjR;K8@?zj|CD3YS*?ri*fJb*<--#NG9dfis=eD6R0toILB73HrfG5eH|xQRFUAp)i_HB{QF{MXu= z{MY+N+fb=Y3rl^oZOXVFmit!Q>Q~wonKnYZUu{dun6%9YVVvJu_`L5DaX$3#6 zv<+IZPnCA*yNaQ-P0ZXD^S9E}k5Y{HQIM{^A3wO`HL-*54Z}2eea-inGpXR@^LHMj zqI2!GlLS#WP8`r5PXxMj&l1YmP9{0k~4;^nzyCch|O;8ZYU34z=!q}r>Zs&XIa4+?pxaXu8 zcC^oq+z`j>r9P+p$;d?Tbd0VSc88%S=ZX#<4Sl}lcoB0p{H;ajw%_xxXo-`?&Xp-8 z10Jt`MU;Re2gB5%9{ny+@!UZlZEU)oz{7%d;yA?Q-|aceeCS_tJjNW)34M=8UMKV& zSql_Dk`vinbe7_@IRA9bOP$BQ(~H9}evIj1-Qr#^=muWs1pPtiW5d#XRUWVT+;>)1 zHst6lD}{v&60A#MnTVw6NMF0IwpYwoW$MRKn9&I>z<+xWE zWMzdof%wb0c+q+N%Hq$htT;i>N&K`p_EH?pj4gL*X2rgE!H#Z5fsR>jC{7b>h$-HP zJL@-A(wKMG7Nck}m#me-oUvy?>R{Ep$U$vv`G{=`1FftaQamf@h4zR;AB|a+(rLWm zM^j;a9E73MDQ@+^=ThG=6d-~&XqUvjwC_FnywxLx;C92r&Bqus-A8LaI^a$>j=B)K z2;@LFbVDy8At7p@0Alcvw_2n_7GmD0Hr0Ws4DK(IQC!&|3R1V*gZg1XH!ZB?GA8nN z9QA@#@G(bM&{$p=Y|9mzvLMO(?9NS@9mnxcaXRpHyvgq%NR?F0lwECI z%WS4TRaGTJ+SM}+p@Fz|Rza+knS-;LvR-1^2}+x3dEG*(%Jh>;R$|6e9l4dXyk}?n zKJ`!?*T~AF(wZzaWNAICpiU#R5Y90Ls=c)IV^vt=HCY8|c>d~74v115G6r#bt0_v! z5Zlh{$Q73B%KEObr)pYMCi+)Y#=@1L9m%TDqDYwcf(Qyl=ukVNJkqnGGSR`pAn_8F zi7FJ0BF6}WgrZdyM&gG({sL&>rzrRu1+Pld#wtx%V?8nIkmkX^P8A&rng}rQiVKDW zqq**nm4Ey8Wgf@r<$iy_eQ;C|6{0co!{luz+Q6OzlD;39w1yK-%Du*FZJy9;;| zqGzFMYNl$dP4$#o(-zd4YHGH&efHM)7#bg;i{pjt(rS;NIYcbHZ{tnAkHbDw*Oia; z9eu~xDP_tYc7I>pF*9>d%k<1(*!6XtY5S!;gXs)uI>z@)R0Dj2!~Cg=*5w^*2YbJ6 z@;^XW=D!F20JQpTf?5=ortzL}M%l43l9}0=wy*6}Q){nss6OD&Gu_?QXk~{*04h1R7>DIt^!Of?sQAO%LfGb~=M>;r{hgVQ4g`zdb&rwbFiD(YI zw7ceF)HocH))(qqLR}K7o71B{66%IfHzA{L+BSLY;$P?+qXhniLGsFp*Ylu7U6-LP zAA6ihmc4|Wnrdt3)CDlUsX7pVYwAU`X6M22#h5B*bzQo*!*If6}&K!w?128T>4b zK+}-gjfP5SWi7=}wU;$@_Sg95=1a0x7ZnN*DMosdUqUd8w83aPei{Ukgb{4Tbl+em zu^%&fHHo_|RsrrVF`HF^yG@C^%V4dVSh(}ytt;2=M63(_!|#zr;Keve-W?;V#DO+) zd_ZiJJ4b8d5X*Ca3YgvVy>y6keFCy4P-3Fv$Ef^y7<P@PiUa?8p}Xc!M(W| zPqwVc%ybC3H@+ztPIQ;rLrd+ z88|V>8lH6bFpVL~Ng8xvXboWHQ*lv}W_ppnj0xU`l{P}yguW%rE(mQd$W!U38O>tt zl@)4tfzH5RJb!QLd~0Q8T6qj(kAe2@7O*@S8-((yuW9qcV@=oft~`R4mW6hY@UlU} zcL_s|p9Mtd#MkjA-$O71@aJ-XpXym7Q+Fl6PfG;xGr)TSCjjMkR%O~#0_TAEsm1iH z4*0I^5~Q!pw6rrolMRFa0_vEEqs~q<0*KpbW(I#VYwXO+)O=dS2zu7c=EZFagZMBEbFtywKA4Jrs#b&K*4NWd0W^SVfl>cS!&#g5bjOi`Dx z?+-CXzA+zA>_ZAZLeMhu4fqyid>er@m}yV#V7u#2pImQt%E1%M?g1qW(F-vzdXX*pIl)$D23^B=&ptm@UFKHGb0kdC8ky zR4hEw-;JE)FA&UPz(Dbhz0xZD*gt^bPAJefnYpX7@*f)e+K*H!g?Un0)qnsERz+#; zP$~S;D4j#8fzsxo(s{Ok(i7|?djX}VrlsvNd$Db;DeUwgsqG3o!_KmoM!%PVLchjd z0SZNISLxXjiN4AlAW@rrot*;`tx6$2UKCJ8@k*DDsYiT-2 zZd|_X@h8FNV$4@Bd!6L+wd+@|FJ8TR{rc56SKQ;g$R1H}AxXKDNmY--=P1bz)e2HL z&VYFr$)z_;;Ll1q+)$SahjDi;hMnc~c-(i+!%MYw{*rS(0pJWN(!mt~{f8c3_0LZ_ zA_Z0K2JX6KEgpuQ<8?n3{EL7&nzm)cVonS)c=3mXdv2?$H8hiEY5y*v_ocsJQV1Bn9kw>jzzB% zggDM<9&pY2xdWWs{vF@h_Id2H5#TIr`pk3-xMv1XP0W%*u5{SSTIg+*O|s*H0>AXi}?RQ0SXk}biG1x_95ry@U)boB?2 zv?7|ncyR+hsnz7DoBuu-o+e`M~Lj2tI*Z@Bl}rGHUtzH-*}PgB{04Kz73@dk@*6D9KYK zAP9Fr-lzjYnX4bj=SMfCbMZ}rcW}MIS=09hLg(INQS~-g-EmH3a@WFLRLyOeW&X>E z^It(wxS^o_$u1xxCiy!AS#_Q6By^Z4Fi>E^lvLbVq%dD_YoW7pfEqtke^)!)%eZK zUIiLmufc0;r}GrsRc|Z5erczXHj$D&;~xMDqiLY4lo_CqJiQedQT42GMq%dD+Rj`y z2c&CLdOxSIvA2+(kW|kR)s*|_jx>lYpjWsx0Rs4UFmFy^IY;&m#mF@an@8R@5PZ$8 z9opMg?T}T$e~rqeQ9!p5xp5);e2tP5V=5nl%!AfRIC}>SXGd=Hd`$HSt%^Dk~fe`?N#Wl7s4p6EVN>DjS!wSC@=uZw?`x9b z-%4M|O1pGvVJ_u-d=XQX+Juwa9$&H;fNQ>Jk}mU~Kp*FqwMpv~zl;i^n%^KuSjuC{ zG5se4eBHpWQ%~Qdz^3fVu ziVqs#?MAC1)zm{j5_T`ZO@CfN52LH1O6~;Ql>}Ws;ZJ0bE_8a|OWpkH9akqqe0+eL zgmWn6-=cuNhRLb2Cn_=#1{x&%H>eM2UpU%l?8J$IdW%?d5p`^^UVf7SOMS$EhH8Cg zn>H+Wy5BfWhAU#)CziI|oUk9LqpK2lIi6>(3X3cZG6;l@RTGv}1>13)FInaO2g~mA zcOKk%_+{)`(kOq9H(5sjyH?Y_VL&c&xq^e?-yt>64C_$NzZ!Fga(-f*D&`n}Z@glu z8su^MLmEqgTaxRX#%;X_cg(Pl`_Tj`$sxiTf6TzoAg_J0-|ucNcJXaPhX>t_WRVbE z?%HYe@x{n0?u-|ST|$l{a-`c%Os{Acxxb1Jt@su%p;(k~s}&|(>PQl2={m|8a$!?` zF6GW(3@#6Iy1J2SP|W3>`FwP(BquS$1*Ea%O_8wP$gP?;!49MABDgAQ_%(6IbsBF| LZ+>KUPO1L|>mHKq literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/vit_sam.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/vit_sam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03dee0259e5341a0cc68fc614f836c55330b62b2 GIT binary patch literal 20558 zcmc(H3y>VgdEU(I?Cj(A_MSN0fgm{qAHY$-0XXm>(iBA!Bt<#kks?6RUXoet?abco z-98RIb9cmfFQ#)$mWpz3H%6MHrP1bCX-w|Zjq&D0X+q*oW3qXubO`Z`mu*Zn50?%%r%Th# znbJ)2Na+aDb6&o2NAqauDB?rDvvQ|b@bB`5{S@N}@AB`eSu=)Qj#uyq@K9fkQ+;t= zzUbZM-Hr0cK4EySch5V9caNW4xpya3x(_MGynB&yuRn_W`~5?^59}F8z0bQJsrUOw zcTziM>9}WIG-e;T&Ka9cMcJ3u!g{+^X++tJ{*`sVRrRCvrS-Ljzc6b>spp?xi1II2 z!s_yK%|+isQvR7r;J?`R)*F75*S{C)K{#th!=YMlRm-imYE~L$Bsed=^z3sNE?&3T zlpPISs9g2c0v^wM(dgoOz2Qmba)as7sPcpQmS0|L2W6j)MY*tDo)6iV>!2h9H5S=3CLwX1(RLH_I#;Im!>$RjbPOlFtw#Ib7#)1z$qYF;>}_j1wQX;w zJ7y=hMg*|W9{)Z!dAP&RYe zu&nLucCKUWraD$9+cA%z?oKZLP479#ZS2~p!SYg`?WH|u&)H4yA-|VFPP3EVv$yk| zJj&*3R*ln@t$YI`@Nz|kb^K`9J^%vXiRm_H8QPt(s zfxp!BTj8ln>uR`+Do^W*o>oi2<5DH7eak%&wvl`8lHUs2YW8vWldZM&&<)$GNYGt+ z{h5>Q3n$&zPP*scoa>hCn?Uyj-U~%vlR0%cr*W@ek%{eVPbl>D{w7blw@T~J@#dSl z=j`t(_wwaW z8s2571o#5+v~yXkQ!$-69^*nnaF%0GGB|# z^HCb*%FFs^GqPS-i>%kyqD=g%?;7d^O5Aw#v}(7*)6M1@SWQ*xt$}FU^BckG#h5*V z(;M}$98{WfYgeODT~WC^v}33)xC=pXY}6VxN6l$-+?=#*)4o}>EYrFL)i>ghnG4zFY9Ih zgym&Bj)WP8OsUy3;O{+y^@5*smG?2PuW348Z*@mSGj?0mm+DxO-5_3(L71Qt*hz#K9(8o2UsLtz6}>8oYq@mmN$E)UP^}c z0Qa5X(lpy+!u}yXu>ABdE04Rc_9hT~-J3L7h!3uMnx&^fslwOl5M$(PHO1gCf`wUI z&G6dkC>Atx$r(~)VdEbb&92&EAbY)s(&-eZ*Zrsa#2b?7Kop+I7HrHX*dY>N zPj$bOXNL`}BiP+{b9=^&Asqax;NVaAA=t7}3F{kJxZ8p3&R|@@FXT~hZnfH0vK3>? z4wwCd`2*pjcFhI+Qdfo5SN*H9{RKZ*9@mqau-!$nV7D7rdJLrJe1A6fakZBzCWr$# z;FbNu(?2*TE&vS7-foj0+-0-ibUSrl>OM)eZi4r3 z7EVJkAPM7g9(%f0Akm7Jb@xX^U>;9tj0$By^jG=71GL!;whr?qxc+j+z@ zz{4rThdSnp`9^BHusyt8>=0WQkT26IVs{-0Q$)#~kxqUuwLL0#7Vd^{H-?=%9p?o~ z-b;5zv0kLW`?m;d=@|pqeC$@wbehhQ)Ci1#IwuXZ1EdaKw=1t>nfn7n&gUzQ>N=^# z^?V?1Oc7}Mm4r=R?yVvsa_Ln>(E8(R0D0>1%S`~Qjrq%(lD{7a&jP0x8|}r@XCHg? zqmMjt?yP_A+!?R-Snb>+^Yi}051pA`e5mr!*~cpLkDRN_S3i2@v=kxz1JQvFAOJw_ z0Ft{E_$Qql;QObOLT|g79LV@7S@o^}`pELwZOj$1P>T-2Q(ayr1D}8}aizQr zM1G~bsTUC@^V6l)15I@YIav(`*P>aw3Hx)x9aiQ(ccE4#h~2tJ=&1I9%w@g z0F{9zlEpazJQvr%@qM6BHi*9UT94ET3<^hP6RWC>27+Y${+>0-k{;Ky%6e9%9;xVi zV4#8N)>UYLhS~To+oSH}R&f!a{^as%_0*Gyt;YA8$^F8tBTJHAbw7&rMLmWfa{PKL zSZ@kSnjKe9^T9I=h(Oe{47zJ=l)}g`*o;zmHwKU;URlPqDQGD&%Tda!H=~p)Uy0JB zBFIDh+1$UpPT|$cJ?2eT+I2J-5V2+N$eIOyA=wo1owr=9wd1(#l=>7hRB;V_s59dy zs3Gu;lIa;Gh##d?&dQ~7b}sFWd5&*;8C+RE?d5RgeaFYWZxhPQK$qpEYlY7kHPb72 z!|x22vR={8t>mHOLRhoC5pNWlqoL0jwoxizSsRaX-C3(FKxy*gdLyi#TJ|fR+kN0- z*{H7S2|X~CeeCmt1uBB4us=3~1ch7zlIo%?7f+yO;0o0xdJ5`%m2L@WPb!Uu-*9^? zj6ClZxw5)ZUaVJ80t%1@br?1Gl3Mpqx|nab;x+&dT9w6y?@BJx@eNH+PT$uL!xYn&U7ncWe4UQD-guhO4FyNga~DB;Dr~UnA2Uk zPC#&p`orGrMY-;@0N2FB9S!x3OO)?*Ur{C*z!@1N@WOU%1b+iTXzV~H--EIkx;CS1 z0rOjs>>cXuCa`;#Q=z>=EgvvF_I03xYbm5UA!G@r?y(e+y-|h)v6596J5Wono59&1 zHavUB{NpBa=ED3gmEZydM5JCoZ*Azs3YKfq~TMsxf2PQ&O#+*j7kVUjKIa`D>XdN&i%9hLZ z7Bnzr^(r1B71QiTR;v{`7X;^GFU$ukYVmg}vF!mcY zb&0L9R`qEHEUbP6K{P^+(%M_$)_2V)S1wl@l^{Sry7*MRj?7=g72Jm)pLNWvnK!4b zqUl%;VwRxNDbu-WTT^EKrhQ{;yt^XJb>EAq^MJCq3q$`Wo&sL>D|@NFfd~kgX+dFY zV=6I)D=CPXX+TY?dQ4Hrrb$3l|B$FwQY0M74W$o7&^XwA`Vwi&pm(O`W#2RYSYR%elsNP*EK#0v=Gey;#8#Q)^b!*HS_lSLpdzADUPwZ{< zdKYkc&qU0mFuI_X2%=LEO+uF*M5l25L#PSgHPwq4lGzcxi#arT zq|_U{$wAy|1w^NMg1K+?Xd%i3?OI4lNK_P;@esO2?rU1>Ao>TFxA!o(z(AVh{v4UB zk-4GwYaxm*WkeY)NShVq{R?CZm|QfcMITti?)gA!%5tncw1t#Xu%9CB2(CQjnys-O zg~WA0iEVU)n2SD)*JQk zD$#tU<@ybu78hayYz16|eXrXG)}qZ6*s5Ljh^>%i^e_%?$7g!5K$?aOvAnPSi(wn@ z5`^_CQxRi=Lc!^4M-y(CUmk^U-~|} zcKI?ZI1yf5^FMa{6VF|E^*D?;z3$T5Ind5|T`>zu?b;k#-_p&>i1bSDABTa)E-VZ- z)rXRj!TAB8$M3F>msi@)ql44XM{+$V_lJr^Im!Zsy7aIQ6GLrVX(Uq=QwAsS_7l~E zxxdZTm>&URmm-@qLTpEhiL46?&x?j}p$D3x$y==>QLei-M8!c~jWRLU3EYY@F=irE?V%xflKGeNA?8A1rC47HrGV(kzVr$ZXU zEg+bDXUMZwEp>ON&`KfaPz@W+wKVc%21;ascxX;{I(C?cqCV}VufYJ0O~^ssApyae z1q@9#nG6}3*>rDc_T$Oyvsf?Tey}`isjKLFJV=17i%S>_CWWZt*N&e2rllF;Kh|S+IVg4lGk#E5SG`>;H^->6q z3pcjyltcEPouc{GYHV&XBhvp3~trj`~zrCeHM?udt>th$e%C8i*Fe4 zjXLHwsCXB@f|5b;;=pk?Bmw{>7@znHBNUu9U|I^{1cEVvnF{J^6K)}J(AZbUe*^n$i8jG{}jKnVE*B*2m5Dog?Fsy63| zxsPgk4U1`YQ-6v9hYQ3m^FxUz$_C4A6;{_nB1;EYLx2&x7O)F=; zkC*&CYsmWDVxjmA>jbza1+gKe*aAm#;KQ!)6NDJ8eoS3Ad zsQNFb_pHAPl#S=W*LXfewVPMO^O1eeN4+sTABSVuus4b4hkDOT&a&Z6{k&PqK-4>o z=Wq}6ju27fd#8Nw0xG|o&R*9nJUQmuJLuI8+`MWN-hF=7yWh`w58yhE>jbV@Tn{4s zBYxgHiR%;)`W&v)xX$3&S=2X=-*d?G5aJKxdc-gI@KC5dASnM)@1sEZ!xDeYd;Fbj zsp$QX_XLpri1#snbY;wY(jVt9O4#1Vy{Dj7oIn~dnd$bbeZf zpt20gk(P@xi7?>4eDxA_@yH1m0YT8fq=`Gq2)+LOtBI`(7|toTG?K&hy=>Kk)y zwRF0&7@VFzcjlqFGxLu;bneN^T_6sjv0VyDq@9I_uF%T;t7}3anu!5c(5cVXs?u6+Q zx2{d}u(zvBD{(Q^J-w|K-}}rl96@&NR={eo-u)sP=&Cg^M6SqGiAZ(ZmX=^mgO?-v z)PkDL^WXA=!Gk|v4X=+&D`Jq<4?Ffr{=%JJV? z)8d!F@0f4If;Q(q<5w#r!OcN}fyQtRdy4B_g(d`ope`w8PP!}9Ne1wApuh==Tu+2k z-Z$GpIPgBOXg0+|k?wz*XXCc}-)DaQq29|tD6KBv_I2c`Hg*QH*qa^%r{PQfIq=R)GbkrKXdYcVm;0Xy{@+o zu@pllgyPl%EGGf24BlW@<{$2RBX$N9Rve0_>EvgXC|wA^C^<9Cl$Ey0}oGE60)vc5%a|2lN} z@-bD}Ja+lA3y~f-EDJ+1611qEsBMtu!ukxKQoS~U<_5@(j$xo4406xWROn4C#OJlg zX`d~Wj75jcW0j4i^8Q9Od%9Ro72X|+l~{Mw>%_jc#lx$wxx|%)t?<)7#N9XUjj4!v zzaMQ-uZsPx^hw`RJ4R=}!YBBDQurLu$MoqOXcnUhL$m^-m-(KnbpMD9Dr~6Uz|Kpr zvP8Y%_O-=T3y*2H>bY9oZkzhM+yZScc8v3(!LO5;@~mBud-{f z^$nE8f}%aF1{*)NSg)?*I@arhEHJmw@d-}q+mBH5U|m$@HrJrmC1C5-C+j@x8(g|$ z5UFQZW3UPrwmvKs?+~rQx6+9C`)WE-yl7hb1Jr^=<|6DRSo7;tG+i%1nyMQJVS*8R z6rEbd7;~MsvC1dP_n@RUCsL^rP22)oMP?&P3mmS)eZja+OV@P@)pY|QT9F1m$J8+f zyp7D|$R={G8_?!lcac1qLPb&v_51B(${ zl-Kkn9*6KH>|%_lz)e2E6VXuLmLFw#!`)Z)G*F*L+uD>KucCeWr?>fRc!0g5(LTlz z-imTvN{{kAA{D(=730?Z(Cv|mGmcm=j!;|SI}>}RX@pRTgsB~P?!%wYhQ~V;AyAz_ zL9%0R7{R@;t0U#-;0>Il{+#i7v9ya4g?{=_EVgDK7pH#S+@6&B;KQpP>r7GwvtzEB z!Be!(cP3HNqK%%48Cda+m5c{pKg6lVg!N0}G{3?)EAgSQ03Sr=9uDdEPxCH(_My;8 zdAV!O_My%p7*@yRJx07c>PF|#Uok(+{G-ST`4+KpG3riiK#>(rdNA?gU7$W)ndrU; zdQv_lD%GhD%9RgyhSV3s>D`&|$jTjFVTtOZ@h>pU{dw~hUy1MUiO1Lpk6}!Ht>Y}2+YZLa3Gdypf`5&1DTeoTFxKeR$men3 z3my#j^Zr@m&HJF-n%X|xIqZ$D8tT8H_I&3sJR_aX)Dwm`%HJ7qSISs5)z|nGBmI60 z?W5MQ&f#RVulxpb8s!JV zr}X6E?J0~u-X9likDMF)PT)7&nPxAbbe=-m(1wXx<~zeJ6D?uX;PYAZCa>kUXF4;O zsd46&5i(XD+&vkdk~y2<9z8|o;e~3H&!POexX#aDb{?V^O4r-t=)zX|X{rde zW(euI`9XT|v}+KgsOoayJ}G8pF~9#TSgEJa-n#cZWG*)1wV4xMYI@6$==o3 zrQlye>Bzy(u`%yKRrpJ+>_?IsqZ}+68?stNL#w`m(Ng>HJJdsX{?_4F;Y26$A0Y4L zWM#SRZXG)5LXol$$h&fA0cuQhZR^yrzD4O+?7RXG_U;)7f_p48Ctdg>yIVQLgAg7f zbD|X8n*3x7prP(je&|Xwy^R2;9d4^`=VGVzrt+Sz@%C*5&tuX3hrIbw27eO)6wbfM z_#wtq@(?GZfLyU%>s(a-%|6^S55NTp9yb6%JPVn^AYtdhkvWC9A4XZuNqMW?j)o0nDsU4nT-zSaElV zjW#~^d5BbKA#x}jO?n%uS&!195`vHEn!=;*274=$&|d_? z2>uP~EKKIDER^>2%P&H=J)NS*zGD?E`j=9zO|>{2O62R@$Y-J8H}g<-+h!K|Xi&0E z+nR!wP@Z2CA)(%xRcu9al=|9q{7`ntYFzUllGi% z6o-$%V+*y)SNFmGPTooSBYpdJKI44dnP7kIn_DAV&wG1271u|NP?!;veK9ty&5QiVvHLJH2i`@}+n5(D{XPM)Ji@vw?W0Sx5iT}Fw z(;vrL1}^tzGqMCk1>wg{Ih`4(e~ePva`%&r{VN7Er)mS%K5O3pz=IiBK>~uoJO-yvy5FY#0e?nflJz9W5>d?!!(Dj-~p=r(NM#OT`0Tadwvh8478bu4dXijRs!9@!Kg)jwwx zXgHAy)qiAJ;kN(4m@KQpuKyXg!2DGfz`<1%Q2&K-v)N}>5{uGrBKaQ9$ODLwm1zAc z()yKRSRi@doU-YF$awZUc53TTLZSm}h<0n6L^T73N74eN37NJ3jtE^2Nt|MS#B9J> zkS0YK7_|$ZoJ57opS`eId9BuOiEDV!4d56>Lcrv_Us3v+-6xTwwX7eQM%7_?9 zeIS|9P8LSfY$v_Tb2j6=BbvJMd^pib@6h4UIbhbb)I)grXp|D2&y!kZt&!9F?xglP z@gay*hHn!Wub)j}PQ zlcXL&B#(6y49+zO6b@M-#^=PfW|?2SR@@Ua}7C#8?y`Q z7f_q}>kQsyAd)$Dn)n>*GGgE=!4?0Dsp1oAQ!NZsMP|Fzfl~EVJa~iS!5x`rNAoZ( zVZcqwDgfS6Ta_d)&;?N%V{;I%DS;T;?v zEs&dL}j~b-r-5j3e zaHKRJ@~;;h5o_T0vXjOkwITTGX25tkVZ33@cq|98DRJ-4FX+vITgk5 zu@pqkD*OxaYtv6Qnyjv_0URdGQ`TG}9dXz#Amx#LPfHUqZ~J{*r@qD>oahF#jQv{# z9;xXJB3zU~Pl85(d%#B;uM})xA|M&!3*eg%3bt*I-n75IHIppe2k`0u)lo&1!zENF zi?%^p!_tX4-L>`%ESB&9$EH}_=AV3Iuw=IB3kAvqeRWJwIL|5 z=eVU}$sO7*bnewa;JZ`;?m?%7YQ2YMSN zct~8=Wtax&n8IWiGX4y^CtC%mlyC#yNqc563e^@MUnj*sl7T7?XCHR(w-&MnVw3P} z(DpHp00T$sbslUcpdWSWKxzi5=jkhj(-Hs$1To+gs!9Me`)ZIDd-wRei{HX{@~}J? zLD^tWZPsDX(GJJ3AXa+TfIchu(*yr81uq5?tirnYS!7y#N(*oWAZEYD%A?W*|~ z+M(b3uN|ouxodR=Gp!y%sF+v*#P}wSAUNFMp16EjD#AfUcy)j5%;n3oILM}3>ekG< z#M(Pp&HAGL7mM&VmG$)Yr_Q3@Zgm<`5cBijqjJ?opkZKdYt;LZpt;{eHdq3F7C#a9 z9u)?(r^5dd81P}Hrh_%O#V5{hL*z+WDx}Cz-`hc*K-5;Qd;9j2XoOp1U(U~wfn8Ld zAHlu?RF=g)lFjA6i7=DDL9CXw!J^jIRLq|HDu6yIkB-dF+}1G;j;Q!=CE2;ZjE2;& zGx#SA_)_XGFbEi+E3rc=1xNLp3?5?;G9Y7Xe>oy7*%%d-v&X`IGv1ftYZG_5GXX;dFZ!!)av6E`+R?4k)os7X9VPciK&2A@KJ z|6P+6K)W<=Fa`e?5$pK$;vnPMM?OMGB%dhzgwF9KNXQ8OC<6G3LT)ZV%;DcO@lf+L z_cAJ;@OKgUn*|qi071w4(6;g)=I;bG+@$DZ9hVr>alXmM`KCP1H|61{X=n1^8vn@X KQ_dSx=KlpUR*v`p literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/__pycache__/xcit.cpython-310.pyc b/mmpretrain/models/backbones/__pycache__/xcit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d6d30e14d24826c4a6c0246916415ea985ef9a7 GIT binary patch literal 23522 zcmeHv36LDud0ux<&$+X+vj-Ny0tB1jg&~0j7NAIqmvE5aA!tdHpv4+e8q7}j?#^P4 zU| zK2*$#k6jzC=Zd-dNO45m@mjtidiP>$eqegFEXE*ACQgFWzn%-X1Tx#^cpaF4qaa8HQ)Zn$^3lWX!@XbJ1-Nf>55Rpu-1m7?oA+-S@V(u2;Ols|ugBKS;$hc( z$tc`$ojoyIFvH|>Sq0T{tq==S&o2knW}{pSQ!jb1uXv4$7sg+1z^h<|nKRACrH?e- zm6{i(o>a}{7s|mR(#Ou8on`RpvhPK~8J${kHN)XRtu!j7MpM;v>&L`P8|Wu3M-QvdYh`RBNszDAmeWJr$;dX6bmavRw17W2hKNl4WtH%ax0B z&4#xcpP7Dm`dApBuQi*BC8*{~!>u+JO7*7eRrsmBzoE$CKL?9v6iwGCTCQ1)rLA-< zZQ~zDrzJewP2x#;aX0NH+>Do)&wR$1H{BsOdu96K>R^U(1VAAI6MN!KbUU-TNzylU1Z`*u2X+TzOGbfsB8sxDU- z7K3`Zadi1=!1^DlvcMy3?wWseuGXA8S}*&7r;Y~Idi`kRf2qnIF9qFdO)p=SGNjUe zjk!9AT%E60q#=AKXgY4wIddv%Fty<8k7o0^)w**C4N`c-Ilt&R^IkbvQ69a$21}Zj zycx5xQZFrfW!G=Vf1#|(wVGFR%AKB(@M-5sZ@#=z3#8=XET**%y`I7(J^Rh}{GAPB=-x2h z>CiCM?wK&%Y1%N|`Tk0>ofL28MY4l=Y51Ro5g38lGS;mntCeug4bwDQR?8^GT4u|d zGS-r}c zSUv37H_R~AZqx--4!<{l8=f}DSRoh2{S{1bwF~}Xs#KDwCAF8%xL+w_VYV8LFmXqpbo$mrZ_K3Xmr#h zT`E<$!j(#I7;imrR5hEy(RzJZd6?4GMxQlW38E$1KYFE74KUl)LF9Q9k3RupER`@* zX2u+|#>_D@Yvs(mxjNamh)%aZv_kUG{t;QQW6$96zXzjbE*Y=I)~vPIn%y!s(NQk& z!>6&V&s%$qEpyXyEts(u-YsLvZXwo^J!Nd!n{n54W3J`eTlQKUY0*hKW((cCW6VSg zahH-4hNMZot29ZZNo=OwxSQCr^2S=Km0HTU2{*Z6!ftF1wNhKOrCQ0nfq0f1+p?lM z+gq`17#2i)D}VoAaOmSM+eXk5Ob$Wz_wQ zX+$Z7iAApfH@3K;+b;H6q zfq7#r#&;G^CN?I1wC7`4jG`$k1JkxvclD2;D9<M1XPp7GwdP6&AB7j>>^?S#UwG5M#H@?G<_`DM1oH>Dxk)V zg|p2SRrOT)E0~1cP^TU2toTGAWza2&=Untu!>hdulrtzZ<<+h-yJkb^_x4-|LZDL} zGG6r^Pa|pDwvM7sP9}0?AzfMu7`f$u*)pb39f_^xoeFvbLKAL~x zc$bQ9%OIlZH^ZX73?6#02jv7_9aCS!s0dk|!nYH1)|^XaRTa!bbb>`d#t3kb9wOap zY$xV3$MEY0w@KXlj2j$Gjq9ZJ*YjA#X2VRkm1Qjqhk8D?LR8TH(N3g*w1$5kMh{A` zHA)D85L{~lXaS^y8&$)#Hxk4Nu0<4p5Fi8+kP=7(%(UQ&j0>Vs8WMV+m{qqSZ&|Hn z-7X};p=zU4Su8g&cl;VcjBI<|75wOrLJ{qu~^g+(`0%v<&gaaq;7 zzDgnyA1GPgWEqz=j`Y2W$0s!yi(6JATH(0PrQXUW6DI!|bM;_%?zh*GXqoBIeDxSI z3C(9h^P?3T*7lD(xHO(Kc>FBZB#>fb$y~CQK>h=J2_zjoW9~Lu=#P!qCa@hydJ=>l z_-!k;nc4!`j^f37;vojpMKMU?;}MKNF9S>nq?nKGWqIk{#y-9?0s#`hj}qD&e?ZIx zOGT<7shK4;Nd{TqOncn|-ZYkmUAqNX!8rSjabnQ^oTF0aZPX&Uk07F5$KLXdPIww6 z=31$~+Ub}~XMZ}zn42S8#=tm)QSCVSEn~qFxMFRNx&l=s@ASLKdr+KP=goSLwbHHB zyyd0fI^v8W;2P|DE}=R6f8Zf=My*!u6L76Ft$0 zlCE)=8b456_`4P$Wi;0>9ECy~iI9{&*-S<@ykx-kKWanQQYyw98f zRGKgy%R$I2iNq})3PCS+x+AZFi>>jHoNJd9p! zL^$o*ZXC!*AUAnCgx+p?J_+tl+)H>#ZWe%#62M0Z;G@JmxI5tZ5F-s2Q*Pc%FJ&+u z$;h*dLxP#I7;)oa?hFPGSYnmMr`TiVe}_d$oQ}AOp(2n}2XmncW)I2grDknK_&vhs z!p>eGMn5>H))!>a7tY0DN7!K4RaYBi2?*Q8Ax-Gg*X`u@`0)o0J9Q7NAOSXv>;NRe z`#wC~bjz%Lx2TAzBEC`rij+D#+Dk?zzpI-5{w4u~Ub)Gmsb5Wd%h9Q^OB@C3>#SkPyC$=2W)X71A@^(zaLR!UZxi z4h2`2y~po8J$rBA!i8?+h$8P;62hyOG*{%k6!Y{`=U(nF=!XQU-o%W$o`)gprp)-z zI!(TX%?1c<)@&9`jA+P)neLcUFCbl*>GoEI5Uc$oLGM7W6c*_o$OWQI#tb3Xy1B-k zJtisHGq8~q!v+$pVU{g0D~=tsQAz~KAjJuf9Y6NnZek;`mR>MGFf2ED(NZ4<;YfqP z#6XHuQ-+(~0EYk!AnhxD2+4Fq8hep4g_N)Mr0fk(2apqhaL2z03Bi}ZZT~5=lN;h8 zw|FmFMQ6badgFxVbRpK0I}0uhYwWqq;$+R2VRMfq2>o0U^DSxRHv{Cvn5ZI|Fkp3e`y0e#gF%ORnzjCkS0g z`zVU-<7E+z@FBVMpMW9I27r}geF?AzW1Me{ddnh*t3_^>@Tfr0%q@%YFz#dX*1QE` zWY21-Bjbd_1{9jjr#?Vqk;VtLFF7@e^$D$KmP6~K%RTh?7{gA&coUD$xtTF1$)qEL zu8-DDw}HU$AhI9YTp$(qRDuA2sVKX$L~R8u~pB=P_) zBDln*w40koT_TbJM~6Y|LrLXGs-63CrW+(o1Yd--o$mbTq6hMUc5OOs4CM(gPDdsM zSt30_q`uw&AGjn@h%iZj&YbH1x@{7)Hp^p2T-7f9y+f;Wv-ci$!0>o{#yj%hk203~ zrmTTR{-kDyN6v43D74Pas-HyQ8Y%0?P#q8#2ifV2Q*o+Ukw~Lb!Fa5erhAX)q~_QU*w{ zopK1Q6Ehg5SZ=~tH(NtUZ*M@hQzFYFj`Zh|Zlsk8@|&ZS(M&)VgwU~pzT*v`#MfA( zF+AhIhDK>ZP*_reTYG9Qgs)DQqO{89AL={)14JV$3Lz}tTnBTp~NsvS$EyT zil>C$+K{dKVPc_Nua{?{_i=DT$rjWz8t6@+L02&NbIg*`L6CDSP+I8oi*!#8UT@T2 zWr}U6O!oM)+dsxoekcI120dfBEU;G>e5JmdN>l6uz=bCpV;D$0nTT(vz(fSuzmb4w zD1)#}Y<2$tI@`fqeQ=97lkF&Q>szpN;FcADg9sOdgcu|dw-0Wpcw%^UUBMc(Uc6cWZ#f(eX z)-0tl#KFhd0QVU@Ox75aCn{l3ZWMK5W^rO_&D>zs=QsgL*icXm1P+f}U!f6+BDh85&R1klA&TwjH4+XW=*T z_=jN#18RSFTyhi&;QV9w^$6^8Z|7$eQ{DtPIUV`)7h!A%oB)^D6>gXUH}Q6@`8d>$ zz~}@!Dh_5;+KWfxJM1k*?I`P|meLUjg`yH&^I%N@g4#tWC_#)jgiam{W9MFYwrm1w zo@+umzi`EphVShnGjH>ys|*OECD9e(i)HtL}nO>by3OEnE}LTQ14dD z3lyXhT0unFqt!EwCWv&tkWdRyk82#}1IJ1}z6b3*5xKEaJq$x%t9d=`oqHL8*L6;_ zb7xV92`0MpX zqjO975s|+4=hegLQ>4>Rp2}4YT37G|N4x+CT1X5VJ zBCI7$jPu|u2^%MdH7m3^1PbXMUK9CW>J$8$!~&E)mwn~Xr-M7fcvHEaDkKM`QD5nc z3o3R8J+^2FnMlAE#yRa~^y@f88-k3!%s79YhSZ=_o~TSog$8k)iX?K=NHvyBQLCi~ z?z9O^>f6>(+pgc)@2qCAl6Sz2Fl@S#BFdwn_1LJ8h1C8L->~v$@c2aYk$MyqoYtV; zv`JYRm5XAV7CI(D<`HCSz%al*f?nE~fvlw{P8_j2>FAHH0;>t1)GNb|YcA8n^nHlN zuhDn|2Kqpnjv%31eUb30$LUAb*C1%H%2vxFFl$bM_-4SX0~{D)n?&RvlJ3(?%+Ku+ zIIYupmPU+*bZMEkPt!O_V@C|Ih0xP1_y8<|0rpY_NhrUl>QG*0#d7c5~vqw|&Hyt2rO8Vtxb0AmR)184IAy8YHj-g_0}OT_H|_Zwh)8X_8_WdsPr; zTeO!mhzlJQiIIg}Vu)t)u!LyM>=$oCsam;IqzbpgSs2me^4A$8YwKRx$emEX zL_?O^U#9JO8h678V;FU+K<7RhB9_<>Tbnp=2RM+$!ew|6N&OYB(K!T;fhRDDe-^t% z=q+TiBy1y@e-p9#H~|@WaC3GBkN-O`ZdM}RBK0Gr;r^Z=1}2n-C)Sc{sUWdLy&dWqSZfJ!B_kL7TglB7bPq%cm^*sxhOCaJJLN$)spS~sMUWpMtO|kUD7T^+(W;6 zX&j;v@zOXz1;&NifL{F1?&L+{J#LB7ZgOgDZ&iUk|$va zq(t&W3Dl5is$?O7($Xhx{1ebeNxGwWkKq}|GXag2ylAZOC>b?Yc7v6$2P_0z4v*PA zvZ;Mx3Qea4Qm+6!?j(?6`~I+Kl@L}EfevfAUv6=0A=E8S#{5_c_1=yTb$H6%_WyrT z3N_cz#{@g4kOpuC=Exf1@4_w>?vCvS2 zqK11!QMq6QqNseIC@LY2$spR*I{`+XAQCs63vFr-Ez71Ac8MyWoj zQ52SXC!1i5wghQ1ISFRM>S&t+cA?^{{Jhs-^f6S(TuI{*s91vq-E3?^bhN~`78;>z zb?1sWs2xu63q&{iRGBbZ0K?nDm?#gbee?VmK=_uhSSZ|S#{ry+MQoN|MZu8I$Q)pk zYxpD``*-R5J2ZZS#usQvPqFu)0CYb+9;MOwz(Kqg+d{+U5zR3ZHguvOVxx&ORryX+ zwZYaVbL(O^5$+wM-`8M#ojpVVC~#?Cw~u(-Z;WJiY z6Nn;uqv*?j2s*J&5<6c<#b;H{AE&eJ|dJyo_7$piT@q z&V6v-4|sNuz_Y{d5x}#o*r(m2*HXn{_n13#%_!#F1MZx_MU`&d+Yw zaPFhb-$!*>OS?odx-Z%HQAwU%f%Y~|L(iiGokP~`Z|gQsH@^YL=6kExgXA&9fX`kOy)je_Ax;<2Q`FYcL*)_vooo050rPTB zzk>q>?^3O*i+6wMYGu^A>nWdNHdc$WHqrPJJ*{rL|2cSQ9aO)qsP_i#SA4I;^C!|< zgOtAz!9j%R2UU;%pCo#Ra^BG8_L#-GG+4)LHY zCCqhazIw%T!9%Ao`B-OK$W~mR9D%B*;z@B~isNRFx+D4buS=|muCtss>C2}9PI#0> zxp7q&OQocqU3(7*+)=lW`hl{g6NY?SUjE~)T_2U}+qS-c$3@iZfMNQK!3=HZU|i4A zCX7dJ{Lbss_T2cLReud;!PFec6O8852xtu03Wh^i7aEmW;XDfa7%Fq3llUp7xJ=^; zjTdNKg%QSuf`%j8XrksQceay}${d&5Fejw5v;2p-{xvcj?py6-=P4RcVTzMjPQ48E zOy@8&Uxl`Za3#XwzKK{NZ@c~52Mb|#438XO?b)#I?&wGMop+hU%~%&>{%vIB^VExW zt3v^phAj&xPM`(`Gx4RkoIAn3qZDuD-Q*?1pLx!}Nt8T%a3%&j7fRmk)g_zV zoC?yLnP6x$3pO)CBEi9MYZzg<*06dRTU#i>Z(vVT$_MGC(H2m(xW~ zNl2OrNwW)Srux!MB1Nt>;$}B+{1YjDwl$`{66}L7j?kji{lRUm(cr+krM}u4T`<>1 z#eMs_<^R@mIAw#IYDU-DQaOD>C%A)d#5%|~>U#HU*4o~+sn!(Q+fcuUdQY~d@I9lg zy^o;}_}&XGtC(@oRDZzGi>CkI8mI|!A8Ab?=e*AO^?!-DM(NJhIAgi7-Nr@x7ffZM z$Kby-iI`)p@h=)*vZcomt8|xa^v5HC3-(Z&X}_Ag0G06QblwH$(CQrp$6su&)LbB? zOHkd5w3B%0lU(D0DCvUZUGf^WtJ4KLjH&Wv96rUl)p~h394cR05CtDN^rTD-nod0% z@dr?of*G32ID^A+ylqBp-_!p+MmbO8Wg1_Efqm<(6G}`;!{{=}R*xL)nOg@PeH!Vi zQ+E95elqHmgT5V>GfAsy*g=}g?)2)!vkhQ_Dz0DOeci83bRNTKyJNJjWF2 z4vG?ySeT-l>ra@`s!-=}p;DN+QYq_u(5OS%v1SO)H8Kd}`gk&h3bJJuJwzp3pTEI^ zGPrjO4O52Ro#x??ogqtUX!FvhFzq2X-uvLG-)7K1g;Ch2nFa&aS@riB=^xM#X;Eio zRsWEF|A@vvrXkh6*`8p;-#eY8VGZ@~#XL`aMoA#U^5o?MdMX zGnqP>8;Mw!cMbHjgWj4GWv_>%C=)D%B%GuHm#O?%70>Kk8!YEsKA z#b2L5c%l=YxGDTDgeN=UMj*=3xPj4DkIEfSnNmC$0zE^?>lSjpAG&E)Czh#RLA*;h>a4!DAg3pW%$?ss`-QfLrHp53BQ<^>7aU^!t3s=~w4lna`UYaDf5oV= zn%7YOnoeRZ^>1nX8yZ{(A~qnd4PZ{{ci|P9^&S@9G5X|68l^|;<8QF*A3y-5tqDk7 za~SU#+|)%g57{iFvJklMfh|kM-i;|+$H6&BYjs?Y|GpgiWbkCFa$w0oIgEjV1G?yb z4i>|s4=`c^V8qucQ70=la|xKTWkCkd>$W5ej5w4eaN`5PFz(YZmPR)7 zERlK^EnG5&8JU2r-<13{kT~4UUk)xO0~{+^LKp`Y2FNFn|9B*|*I{W6ou~$%yQv1r zs0Lmz(X!Sl=Wk#8C183K%_zY8tZ4S1$DxuB&`U-~XEceB9~Zw-l+RU1#*O;-w8^-U z<(ZU(1A)*xD1n567uItkfMY96K&cwLMLoR!1JiSiscSHxqA4rMql{-Gxv$my{vsp% z6g{&-`l6eIWF)FjF|f-{=2%oGVf-!!;C-<04vR4`h;!CKvGK~UJ*M@k1J)P@-xypI z;!auA{=n)KF{;BHZYIr5N9y+sL*Og29_i*nCA=QJL{d2`f0FrjkafKJPm{7SSe)Tew`cr5k|B02g{@5C}{ugH5 z|FlM||A8g{TZvKYk1*%{yEUHs!)z^;N*&DYpBz=JSK)|y1utBLgPnVTd-C+*{xE@^ zGH)heFX@k$_9-)NN-o)%ZZrfn7suPqX~{#bp~JSC`H5Y96^V)?^?K#fbfwXlo>SGz zMPIHDkn?fjzHLF%J#2J28yDv4IMJ~HJ`UYs=lH#&Y@DgXcg literal 0 HcmV?d00001 diff --git a/mmpretrain/models/backbones/alexnet.py b/mmpretrain/models/backbones/alexnet.py new file mode 100644 index 0000000..f7c2891 --- /dev/null +++ b/mmpretrain/models/backbones/alexnet.py @@ -0,0 +1,56 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn + +from mmpretrain.registry import MODELS +from .base_backbone import BaseBackbone + + +@MODELS.register_module() +class AlexNet(BaseBackbone): + """`AlexNet `_ backbone. + + The input for AlexNet is a 224x224 RGB image. + + Args: + num_classes (int): number of classes for classification. + The default value is -1, which uses the backbone as + a feature extractor without the top classifier. + """ + + def __init__(self, num_classes=-1): + super(AlexNet, self).__init__() + self.num_classes = num_classes + self.features = nn.Sequential( + nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + nn.Conv2d(64, 192, kernel_size=5, padding=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + nn.Conv2d(192, 384, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(384, 256, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(256, 256, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + ) + if self.num_classes > 0: + self.classifier = nn.Sequential( + nn.Dropout(), + nn.Linear(256 * 6 * 6, 4096), + nn.ReLU(inplace=True), + nn.Dropout(), + nn.Linear(4096, 4096), + nn.ReLU(inplace=True), + nn.Linear(4096, num_classes), + ) + + def forward(self, x): + + x = self.features(x) + if self.num_classes > 0: + x = x.view(x.size(0), 256 * 6 * 6) + x = self.classifier(x) + + return (x, ) diff --git a/mmpretrain/models/backbones/base_backbone.py b/mmpretrain/models/backbones/base_backbone.py new file mode 100644 index 0000000..751aa95 --- /dev/null +++ b/mmpretrain/models/backbones/base_backbone.py @@ -0,0 +1,33 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod + +from mmengine.model import BaseModule + + +class BaseBackbone(BaseModule, metaclass=ABCMeta): + """Base backbone. + + This class defines the basic functions of a backbone. Any backbone that + inherits this class should at least define its own `forward` function. + """ + + def __init__(self, init_cfg=None): + super(BaseBackbone, self).__init__(init_cfg) + + @abstractmethod + def forward(self, x): + """Forward computation. + + Args: + x (tensor | tuple[tensor]): x could be a Torch.tensor or a tuple of + Torch.tensor, containing input data for forward computation. + """ + pass + + def train(self, mode=True): + """Set module status before forward computation. + + Args: + mode (bool): Whether it is train_mode or test_mode + """ + super(BaseBackbone, self).train(mode) diff --git a/mmpretrain/models/backbones/beit.py b/mmpretrain/models/backbones/beit.py new file mode 100644 index 0000000..3c7d908 --- /dev/null +++ b/mmpretrain/models/backbones/beit.py @@ -0,0 +1,697 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Sequence, Tuple, Union + +import numpy as np +import torch +import torch.nn as nn +from mmcv.cnn.bricks.drop import build_dropout +from mmcv.cnn.bricks.transformer import FFN, PatchEmbed +from mmengine.model import BaseModule, ModuleList +from mmengine.model.weight_init import trunc_normal_ + +from mmpretrain.registry import MODELS +from ..utils import (BEiTAttention, build_norm_layer, resize_pos_embed, + resize_relative_position_bias_table, to_2tuple) +from .base_backbone import BaseBackbone +from .vision_transformer import TransformerEncoderLayer + + +class RelativePositionBias(BaseModule): + """Relative Position Bias. + + This module is copied from + https://github.com/microsoft/unilm/blob/master/beit/modeling_finetune.py#L209. + + Args: + window_size (Sequence[int]): The window size of the relative + position bias. + num_heads (int): The number of head in multi-head attention. + with_cls_token (bool): To indicate the backbone has cls_token or not. + Defaults to True. + """ + + def __init__( + self, + window_size: Sequence[int], + num_heads: int, + with_cls_token: bool = True, + ) -> None: + super().__init__() + self.window_size = window_size + if with_cls_token: + num_extra_tokens = 3 + else: + num_extra_tokens = 0 + # cls to token & token to cls & cls to cls + self.num_relative_distance = (2 * window_size[0] - 1) * ( + 2 * window_size[1] - 1) + num_extra_tokens + self.relative_position_bias_table = nn.Parameter( + torch.zeros(self.num_relative_distance, + num_heads)) # 2*Wh-1 * 2*Ww-1, nH + + # get pair-wise relative position index for each + # token inside the window + coords_h = torch.arange(window_size[0]) + coords_w = torch.arange(window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] -\ + coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute( + 1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * window_size[1] - 1 + if with_cls_token: + relative_position_index = torch.zeros( + size=(window_size[0] * window_size[1] + 1, ) * 2, + dtype=relative_coords.dtype) + relative_position_index[1:, 1:] = relative_coords.sum( + -1) # Wh*Ww, Wh*Ww + relative_position_index[0, 0:] = self.num_relative_distance - 3 + relative_position_index[0:, 0] = self.num_relative_distance - 2 + relative_position_index[0, 0] = self.num_relative_distance - 1 + else: + relative_position_index = torch.zeros( + size=(window_size[0] * window_size[1], ) * 2, + dtype=relative_coords.dtype) + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + + self.register_buffer('relative_position_index', + relative_position_index) + + def forward(self) -> torch.Tensor: + # Wh*Ww,Wh*Ww,nH + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1] + 1, + self.window_size[0] * self.window_size[1] + 1, -1) + return relative_position_bias.permute( + 2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + + +class BEiTTransformerEncoderLayer(TransformerEncoderLayer): + """Implements one encoder layer in BEiT. + + Comparing with conventional ``TransformerEncoderLayer``, this module + adds weights to the shortcut connection. In addition, ``BEiTAttention`` + is used to replace the original ``MultiheadAttention`` in + ``TransformerEncoderLayer``. + + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + layer_scale_init_value (float): The initialization value for + the learnable scaling of attention and FFN. 1 means no scaling. + drop_rate (float): Probability of an element to be zeroed + after the feed forward layer. Defaults to 0. + window_size (tuple[int]): The height and width of the window. + Defaults to None. + use_rel_pos_bias (bool): Whether to use unique relative position bias, + if False, use shared relative position bias defined in backbone. + attn_drop_rate (float): The drop out rate for attention layer. + Defaults to 0.0. + drop_path_rate (float): Stochastic depth rate. Default 0.0. + num_fcs (int): The number of fully-connected layers for FFNs. + Defaults to 2. + bias (bool | str): The option to add leanable bias for q, k, v. If bias + is True, it will add leanable bias. If bias is 'qv_bias', it will + only add leanable bias for q, v. If bias is False, it will not add + bias for q, k, v. Default to 'qv_bias'. + act_cfg (dict): The activation config for FFNs. + Defaults to ``dict(type='GELU')``. + norm_cfg (dict): Config dict for normalization layer. + Defaults to dict(type='LN'). + attn_cfg (dict): The configuration for the attention layer. + Defaults to an empty dict. + ffn_cfg (dict): The configuration for the ffn layer. + Defaults to ``dict(add_identity=False)``. + init_cfg (dict or List[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + embed_dims: int, + num_heads: int, + feedforward_channels: int, + layer_scale_init_value: float, + window_size: Tuple[int, int], + use_rel_pos_bias: bool, + drop_rate: float = 0., + attn_drop_rate: float = 0., + drop_path_rate: float = 0., + num_fcs: int = 2, + bias: Union[str, bool] = 'qv_bias', + act_cfg: dict = dict(type='GELU'), + norm_cfg: dict = dict(type='LN'), + attn_cfg: dict = dict(), + ffn_cfg: dict = dict(add_identity=False), + init_cfg: Optional[Union[dict, List[dict]]] = None) -> None: + super().__init__( + embed_dims=embed_dims, + num_heads=num_heads, + feedforward_channels=feedforward_channels, + attn_drop_rate=attn_drop_rate, + drop_path_rate=0., + drop_rate=0., + num_fcs=num_fcs, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + init_cfg=init_cfg) + + attn_cfg = { + 'window_size': window_size, + 'use_rel_pos_bias': use_rel_pos_bias, + 'qk_scale': None, + 'embed_dims': embed_dims, + 'num_heads': num_heads, + 'attn_drop': attn_drop_rate, + 'proj_drop': drop_rate, + 'bias': bias, + **attn_cfg, + } + self.attn = BEiTAttention(**attn_cfg) + + ffn_cfg = { + 'embed_dims': embed_dims, + 'feedforward_channels': feedforward_channels, + 'num_fcs': num_fcs, + 'ffn_drop': drop_rate, + 'dropout_layer': dict(type='DropPath', drop_prob=drop_path_rate), + 'act_cfg': act_cfg, + **ffn_cfg, + } + self.ffn = FFN(**ffn_cfg) + + # NOTE: drop path for stochastic depth, we shall see if + # this is better than dropout here + dropout_layer = dict(type='DropPath', drop_prob=drop_path_rate) + self.drop_path = build_dropout( + dropout_layer) if dropout_layer else nn.Identity() + + if layer_scale_init_value > 0: + self.gamma_1 = nn.Parameter( + layer_scale_init_value * torch.ones((embed_dims)), + requires_grad=True) + self.gamma_2 = nn.Parameter( + layer_scale_init_value * torch.ones((embed_dims)), + requires_grad=True) + else: + self.gamma_1, self.gamma_2 = None, None + + def forward(self, x: torch.Tensor, + rel_pos_bias: torch.Tensor) -> torch.Tensor: + if self.gamma_1 is None: + x = x + self.drop_path( + self.attn(self.ln1(x), rel_pos_bias=rel_pos_bias)) + x = x + self.drop_path(self.ffn(self.ln2(x))) + else: + x = x + self.drop_path(self.gamma_1 * self.attn( + self.ln1(x), rel_pos_bias=rel_pos_bias)) + x = x + self.drop_path(self.gamma_2 * self.ffn(self.ln2(x))) + return x + + +@MODELS.register_module() +class BEiTViT(BaseBackbone): + """Backbone for BEiT. + + A PyTorch implement of : `BEiT: BERT Pre-Training of Image Transformers + `_ + A PyTorch implement of : `BEiT v2: Masked Image Modeling with + Vector-Quantized Visual Tokenizers `_ + + Args: + arch (str | dict): BEiT architecture. If use string, choose from + 'base', 'large'. If use dict, it should have below keys: + + - **embed_dims** (int): The dimensions of embedding. + - **num_layers** (int): The number of transformer encoder layers. + - **num_heads** (int): The number of heads in attention modules. + - **feedforward_channels** (int): The hidden dimensions in + feedforward modules. + + Defaults to 'base'. + img_size (int | tuple): The expected input image shape. Because we + support dynamic input shape, just set the argument to the most + common input image shape. Defaults to 224. + patch_size (int | tuple): The patch size in patch embedding. + Defaults to 16. + in_channels (int): The num of input channels. Defaults to 3. + out_indices (Sequence | int): Output from which stages. + Defaults to -1, means the last stage. + drop_rate (float): Probability of an element to be zeroed. + Defaults to 0. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + bias (bool | str): The option to add leanable bias for q, k, v. If bias + is True, it will add leanable bias. If bias is 'qv_bias', it will + only add leanable bias for q, v. If bias is False, it will not add + bias for q, k, v. Default to 'qv_bias'. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + final_norm (bool): Whether to add a additional layer to normalize + final feature map. Defaults to True. + out_type (str): The type of output features. Please choose from + + - ``"cls_token"``: The class token tensor with shape (B, C). + - ``"featmap"``: The feature map tensor from the patch tokens + with shape (B, C, H, W). + - ``"avg_featmap"``: The global averaged feature map tensor + with shape (B, C). + - ``"raw"``: The raw feature tensor includes patch tokens and + class tokens with shape (B, L, C). + + Defaults to ``"avg_featmap"``. + with_cls_token (bool): Whether concatenating class token into image + tokens as transformer input. Defaults to True. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Defaults to -1. + use_abs_pos_emb (bool): Use position embedding like vanilla ViT. + Defaults to False. + use_rel_pos_bias (bool): Use relative position embedding in each + transformer encoder layer. Defaults to True. + use_shared_rel_pos_bias (bool): Use shared relative position embedding, + all transformer encoder layers share the same relative position + embedding. Defaults to False. + layer_scale_init_value (float): The initialization value for + the learnable scaling of attention and FFN. Defaults to 0.1. + interpolate_mode (str): Select the interpolate mode for position + embeding vector resize. Defaults to "bicubic". + patch_cfg (dict): Configs of patch embeding. Defaults to an empty dict. + layer_cfgs (Sequence | dict): Configs of each transformer layer in + encoder. Defaults to an empty dict. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + arch_zoo = { + **dict.fromkeys( + ['s', 'small'], { + 'embed_dims': 768, + 'num_layers': 8, + 'num_heads': 8, + 'feedforward_channels': 768 * 3, + }), + **dict.fromkeys( + ['b', 'base'], { + 'embed_dims': 768, + 'num_layers': 12, + 'num_heads': 12, + 'feedforward_channels': 3072 + }), + **dict.fromkeys( + ['l', 'large'], { + 'embed_dims': 1024, + 'num_layers': 24, + 'num_heads': 16, + 'feedforward_channels': 4096 + }), + **dict.fromkeys( + ['eva-g', 'eva-giant'], + { + # The implementation in EVA + # + 'embed_dims': 1408, + 'num_layers': 40, + 'num_heads': 16, + 'feedforward_channels': 6144 + }), + **dict.fromkeys( + ['deit-t', 'deit-tiny'], { + 'embed_dims': 192, + 'num_layers': 12, + 'num_heads': 3, + 'feedforward_channels': 192 * 4 + }), + **dict.fromkeys( + ['deit-s', 'deit-small'], { + 'embed_dims': 384, + 'num_layers': 12, + 'num_heads': 6, + 'feedforward_channels': 384 * 4 + }), + **dict.fromkeys( + ['deit-b', 'deit-base'], { + 'embed_dims': 768, + 'num_layers': 12, + 'num_heads': 12, + 'feedforward_channels': 768 * 4 + }), + } + num_extra_tokens = 1 # class token + OUT_TYPES = {'raw', 'cls_token', 'featmap', 'avg_featmap'} + + def __init__(self, + arch='base', + img_size=224, + patch_size=16, + in_channels=3, + out_indices=-1, + drop_rate=0, + drop_path_rate=0, + bias='qv_bias', + norm_cfg=dict(type='LN', eps=1e-6), + final_norm=False, + out_type='avg_featmap', + with_cls_token=True, + frozen_stages=-1, + use_abs_pos_emb=False, + use_rel_pos_bias=True, + use_shared_rel_pos_bias=False, + interpolate_mode='bicubic', + layer_scale_init_value=0.1, + patch_cfg=dict(), + layer_cfgs=dict(), + init_cfg=None): + super(BEiTViT, self).__init__(init_cfg) + + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = { + 'embed_dims', 'num_layers', 'num_heads', 'feedforward_channels' + } + assert isinstance(arch, dict) and essential_keys <= set(arch), \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = arch + + self.embed_dims = self.arch_settings['embed_dims'] + self.num_layers = self.arch_settings['num_layers'] + self.img_size = to_2tuple(img_size) + + # Set patch embedding + _patch_cfg = dict( + in_channels=in_channels, + input_size=img_size, + embed_dims=self.embed_dims, + conv_type='Conv2d', + kernel_size=patch_size, + stride=patch_size, + ) + _patch_cfg.update(patch_cfg) + self.patch_embed = PatchEmbed(**_patch_cfg) + self.patch_resolution = self.patch_embed.init_out_size + num_patches = self.patch_resolution[0] * self.patch_resolution[1] + + # Set out type + if out_type not in self.OUT_TYPES: + raise ValueError(f'Unsupported `out_type` {out_type}, please ' + f'choose from {self.OUT_TYPES}') + self.out_type = out_type + + # Set cls token + self.with_cls_token = with_cls_token + if with_cls_token: + self.cls_token = nn.Parameter(torch.zeros(1, 1, self.embed_dims)) + self.num_extra_tokens = 1 + elif out_type != 'cls_token': + self.cls_token = None + self.num_extra_tokens = 0 + else: + raise ValueError( + 'with_cls_token must be True when `out_type="cls_token"`.') + + # Set position embedding + self.interpolate_mode = interpolate_mode + if use_abs_pos_emb: + self.pos_embed = nn.Parameter( + torch.zeros(1, num_patches + self.num_extra_tokens, + self.embed_dims)) + self._register_load_state_dict_pre_hook(self._prepare_pos_embed) + else: + self.pos_embed = None + self.drop_after_pos = nn.Dropout(p=drop_rate) + + assert not (use_rel_pos_bias and use_shared_rel_pos_bias), ( + '`use_rel_pos_bias` and `use_shared_rel_pos_bias` cannot be set ' + 'to True at the same time') + self.use_rel_pos_bias = use_rel_pos_bias + + if use_shared_rel_pos_bias: + self.rel_pos_bias = RelativePositionBias( + window_size=self.patch_resolution, + num_heads=self.arch_settings['num_heads']) + else: + self.rel_pos_bias = None + self._register_load_state_dict_pre_hook( + self._prepare_relative_position_bias_table) + + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = self.num_layers + index + assert 0 <= out_indices[i] <= self.num_layers, \ + f'Invalid out_indices {index}' + self.out_indices = out_indices + + # stochastic depth decay rule + dpr = np.linspace(0, drop_path_rate, self.num_layers) + + self.layers = ModuleList() + if isinstance(layer_cfgs, dict): + layer_cfgs = [layer_cfgs] * self.num_layers + for i in range(self.num_layers): + _layer_cfg = dict( + embed_dims=self.embed_dims, + num_heads=self.arch_settings['num_heads'], + feedforward_channels=self. + arch_settings['feedforward_channels'], + layer_scale_init_value=layer_scale_init_value, + window_size=self.patch_resolution, + use_rel_pos_bias=use_rel_pos_bias, + drop_rate=drop_rate, + drop_path_rate=dpr[i], + bias=bias, + norm_cfg=norm_cfg) + _layer_cfg.update(layer_cfgs[i]) + self.layers.append(BEiTTransformerEncoderLayer(**_layer_cfg)) + + self.frozen_stages = frozen_stages + self.final_norm = final_norm + if final_norm: + self.ln1 = build_norm_layer(norm_cfg, self.embed_dims) + + if out_type == 'avg_featmap': + self.ln2 = build_norm_layer(norm_cfg, self.embed_dims) + + # freeze stages only when self.frozen_stages > 0 + if self.frozen_stages > 0: + self._freeze_stages() + + @property + def norm1(self): + return self.ln1 + + @property + def norm2(self): + return self.ln2 + + def init_weights(self): + super(BEiTViT, self).init_weights() + + if not (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + if self.pos_embed is not None: + trunc_normal_(self.pos_embed, std=0.02) + + def _prepare_pos_embed(self, state_dict, prefix, *args, **kwargs): + name = prefix + 'pos_embed' + if name not in state_dict.keys(): + return + + ckpt_pos_embed_shape = state_dict[name].shape + if (not self.with_cls_token + and ckpt_pos_embed_shape[1] == self.pos_embed.shape[1] + 1): + # Remove cls token from state dict if it's not used. + state_dict[name] = state_dict[name][:, 1:] + ckpt_pos_embed_shape = state_dict[name].shape + + if self.pos_embed.shape != ckpt_pos_embed_shape: + from mmengine.logging import MMLogger + logger = MMLogger.get_current_instance() + logger.info( + f'Resize the pos_embed shape from {ckpt_pos_embed_shape} ' + f'to {self.pos_embed.shape}.') + + ckpt_pos_embed_shape = to_2tuple( + int(np.sqrt(ckpt_pos_embed_shape[1] - self.num_extra_tokens))) + pos_embed_shape = self.patch_embed.init_out_size + + state_dict[name] = resize_pos_embed(state_dict[name], + ckpt_pos_embed_shape, + pos_embed_shape, + self.interpolate_mode, + self.num_extra_tokens) + + @staticmethod + def resize_pos_embed(*args, **kwargs): + """Interface for backward-compatibility.""" + return resize_pos_embed(*args, **kwargs) + + def _freeze_stages(self): + # freeze position embedding + if self.pos_embed is not None: + self.pos_embed.requires_grad = False + # set dropout to eval model + self.drop_after_pos.eval() + # freeze patch embedding + self.patch_embed.eval() + for param in self.patch_embed.parameters(): + param.requires_grad = False + # freeze cls_token + if self.with_cls_token: + self.cls_token.requires_grad = False + # freeze layers + for i in range(1, self.frozen_stages + 1): + m = self.layers[i - 1] + m.eval() + for param in m.parameters(): + param.requires_grad = False + # freeze the last layer norm + if self.frozen_stages == len(self.layers): + if self.final_norm: + self.ln1.eval() + for param in self.ln1.parameters(): + param.requires_grad = False + + if self.out_type == 'avg_featmap': + self.ln2.eval() + for param in self.ln2.parameters(): + param.requires_grad = False + + def forward(self, x): + B = x.shape[0] + x, patch_resolution = self.patch_embed(x) + + if self.cls_token is not None: + # stole cls_tokens impl from Phil Wang, thanks + cls_token = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_token, x), dim=1) + + if self.pos_embed is not None: + x = x + resize_pos_embed( + self.pos_embed, + self.patch_resolution, + patch_resolution, + mode=self.interpolate_mode, + num_extra_tokens=self.num_extra_tokens) + x = self.drop_after_pos(x) + + rel_pos_bias = self.rel_pos_bias() \ + if self.rel_pos_bias is not None else None + + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x, rel_pos_bias) + + if i == len(self.layers) - 1 and self.final_norm: + x = self.ln1(x) + + if i in self.out_indices: + outs.append(self._format_output(x, patch_resolution)) + + return tuple(outs) + + def _format_output(self, x, hw): + if self.out_type == 'raw': + return x + if self.out_type == 'cls_token': + return x[:, 0] + + patch_token = x[:, self.num_extra_tokens:] + if self.out_type == 'featmap': + B = x.size(0) + # (B, N, C) -> (B, H, W, C) -> (B, C, H, W) + return patch_token.reshape(B, *hw, -1).permute(0, 3, 1, 2) + if self.out_type == 'avg_featmap': + return self.ln2(patch_token.mean(dim=1)) + + def _prepare_relative_position_bias_table(self, state_dict, prefix, *args, + **kwargs): + from mmengine.logging import MMLogger + logger = MMLogger.get_current_instance() + + if self.use_rel_pos_bias and 'rel_pos_bias.relative_position_bias_table' in state_dict: # noqa:E501 + logger.info('Expand the shared relative position embedding to ' + 'each transformer block.') + rel_pos_bias = state_dict[ + 'rel_pos_bias.relative_position_bias_table'] + for i in range(self.num_layers): + state_dict[ + f'layers.{i}.attn.relative_position_bias_table'] = \ + rel_pos_bias.clone() + state_dict.pop('rel_pos_bias.relative_position_bias_table') + state_dict.pop('rel_pos_bias.relative_position_index') + + state_dict_model = self.state_dict() + all_keys = list(state_dict_model.keys()) + for key in all_keys: + if 'relative_position_bias_table' in key: + ckpt_key = prefix + key + if ckpt_key not in state_dict: + continue + rel_pos_bias_pretrained = state_dict[ckpt_key] + rel_pos_bias_current = state_dict_model[key] + L1, nH1 = rel_pos_bias_pretrained.size() + L2, nH2 = rel_pos_bias_current.size() + src_size = int((L1 - 3)**0.5) + dst_size = int((L2 - 3)**0.5) + if L1 != L2: + extra_tokens = rel_pos_bias_pretrained[-3:, :] + rel_pos_bias = rel_pos_bias_pretrained[:-3, :] + + new_rel_pos_bias = resize_relative_position_bias_table( + src_size, dst_size, rel_pos_bias, nH1) + new_rel_pos_bias = torch.cat( + (new_rel_pos_bias, extra_tokens), dim=0) + logger.info('Resize the relative_position_bias_table from ' + f'{state_dict[ckpt_key].shape} to ' + f'{new_rel_pos_bias.shape}') + state_dict[ckpt_key] = new_rel_pos_bias + + # The index buffer need to be re-generated. + index_buffer = ckpt_key.replace('bias_table', 'index') + if index_buffer in state_dict: + del state_dict[index_buffer] + + def get_layer_depth(self, param_name: str, prefix: str = ''): + """Get the layer-wise depth of a parameter. + + Args: + param_name (str): The name of the parameter. + prefix (str): The prefix for the parameter. + Defaults to an empty string. + + Returns: + Tuple[int, int]: The layer-wise depth and the num of layers. + + Note: + The first depth is the stem module (``layer_depth=0``), and the + last depth is the subsequent module (``layer_depth=num_layers-1``) + """ + num_layers = self.num_layers + 2 + + if not param_name.startswith(prefix): + # For subsequent module like head + return num_layers - 1, num_layers + + param_name = param_name[len(prefix):] + + if param_name in ('cls_token', 'pos_embed'): + layer_depth = 0 + elif param_name.startswith('patch_embed'): + layer_depth = 0 + elif param_name.startswith('layers'): + layer_id = int(param_name.split('.')[1]) + layer_depth = layer_id + 1 + else: + layer_depth = num_layers - 1 + + return layer_depth, num_layers diff --git a/mmpretrain/models/backbones/conformer.py b/mmpretrain/models/backbones/conformer.py new file mode 100644 index 0000000..eda72b0 --- /dev/null +++ b/mmpretrain/models/backbones/conformer.py @@ -0,0 +1,621 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Sequence + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import build_activation_layer, build_norm_layer +from mmcv.cnn.bricks.drop import DropPath +from mmcv.cnn.bricks.transformer import AdaptivePadding +from mmengine.model import BaseModule +from mmengine.model.weight_init import trunc_normal_ + +from mmpretrain.registry import MODELS +from .base_backbone import BaseBackbone +from .vision_transformer import TransformerEncoderLayer + + +class ConvBlock(BaseModule): + """Basic convluation block used in Conformer. + + This block includes three convluation modules, and supports three new + functions: + 1. Returns the output of both the final layers and the second convluation + module. + 2. Fuses the input of the second convluation module with an extra input + feature map. + 3. Supports to add an extra convluation module to the identity connection. + + Args: + in_channels (int): The number of input channels. + out_channels (int): The number of output channels. + stride (int): The stride of the second convluation module. + Defaults to 1. + groups (int): The groups of the second convluation module. + Defaults to 1. + drop_path_rate (float): The rate of the DropPath layer. Defaults to 0. + with_residual_conv (bool): Whether to add an extra convluation module + to the identity connection. Defaults to False. + norm_cfg (dict): The config of normalization layers. + Defaults to ``dict(type='BN', eps=1e-6)``. + act_cfg (dict): The config of activative functions. + Defaults to ``dict(type='ReLU', inplace=True))``. + init_cfg (dict, optional): The extra config to initialize the module. + Defaults to None. + """ + + def __init__(self, + in_channels, + out_channels, + stride=1, + groups=1, + drop_path_rate=0., + with_residual_conv=False, + norm_cfg=dict(type='BN', eps=1e-6), + act_cfg=dict(type='ReLU', inplace=True), + init_cfg=None): + super(ConvBlock, self).__init__(init_cfg=init_cfg) + + expansion = 4 + mid_channels = out_channels // expansion + + self.conv1 = nn.Conv2d( + in_channels, + mid_channels, + kernel_size=1, + stride=1, + padding=0, + bias=False) + self.bn1 = build_norm_layer(norm_cfg, mid_channels)[1] + self.act1 = build_activation_layer(act_cfg) + + self.conv2 = nn.Conv2d( + mid_channels, + mid_channels, + kernel_size=3, + stride=stride, + groups=groups, + padding=1, + bias=False) + self.bn2 = build_norm_layer(norm_cfg, mid_channels)[1] + self.act2 = build_activation_layer(act_cfg) + + self.conv3 = nn.Conv2d( + mid_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0, + bias=False) + self.bn3 = build_norm_layer(norm_cfg, out_channels)[1] + self.act3 = build_activation_layer(act_cfg) + + if with_residual_conv: + self.residual_conv = nn.Conv2d( + in_channels, + out_channels, + kernel_size=1, + stride=stride, + padding=0, + bias=False) + self.residual_bn = build_norm_layer(norm_cfg, out_channels)[1] + + self.with_residual_conv = with_residual_conv + self.drop_path = DropPath( + drop_path_rate) if drop_path_rate > 0. else nn.Identity() + + def zero_init_last_bn(self): + nn.init.zeros_(self.bn3.weight) + + def forward(self, x, fusion_features=None, out_conv2=True): + identity = x + + x = self.conv1(x) + x = self.bn1(x) + x = self.act1(x) + + x = self.conv2(x) if fusion_features is None else self.conv2( + x + fusion_features) + x = self.bn2(x) + x2 = self.act2(x) + + x = self.conv3(x2) + x = self.bn3(x) + + if self.drop_path is not None: + x = self.drop_path(x) + + if self.with_residual_conv: + identity = self.residual_conv(identity) + identity = self.residual_bn(identity) + + x += identity + x = self.act3(x) + + if out_conv2: + return x, x2 + else: + return x + + +class FCUDown(BaseModule): + """CNN feature maps -> Transformer patch embeddings.""" + + def __init__(self, + in_channels, + out_channels, + down_stride, + with_cls_token=True, + norm_cfg=dict(type='LN', eps=1e-6), + act_cfg=dict(type='GELU'), + init_cfg=None): + super(FCUDown, self).__init__(init_cfg=init_cfg) + self.down_stride = down_stride + self.with_cls_token = with_cls_token + + self.conv_project = nn.Conv2d( + in_channels, out_channels, kernel_size=1, stride=1, padding=0) + self.sample_pooling = nn.AvgPool2d( + kernel_size=down_stride, stride=down_stride) + + self.ln = build_norm_layer(norm_cfg, out_channels)[1] + self.act = build_activation_layer(act_cfg) + + def forward(self, x, x_t): + x = self.conv_project(x) # [N, C, H, W] + + x = self.sample_pooling(x).flatten(2).transpose(1, 2) + x = self.ln(x) + x = self.act(x) + + if self.with_cls_token: + x = torch.cat([x_t[:, 0][:, None, :], x], dim=1) + + return x + + +class FCUUp(BaseModule): + """Transformer patch embeddings -> CNN feature maps.""" + + def __init__(self, + in_channels, + out_channels, + up_stride, + with_cls_token=True, + norm_cfg=dict(type='BN', eps=1e-6), + act_cfg=dict(type='ReLU', inplace=True), + init_cfg=None): + super(FCUUp, self).__init__(init_cfg=init_cfg) + + self.up_stride = up_stride + self.with_cls_token = with_cls_token + + self.conv_project = nn.Conv2d( + in_channels, out_channels, kernel_size=1, stride=1, padding=0) + self.bn = build_norm_layer(norm_cfg, out_channels)[1] + self.act = build_activation_layer(act_cfg) + + def forward(self, x, H, W): + B, _, C = x.shape + # [N, 197, 384] -> [N, 196, 384] -> [N, 384, 196] -> [N, 384, 14, 14] + if self.with_cls_token: + x_r = x[:, 1:].transpose(1, 2).reshape(B, C, H, W) + else: + x_r = x.transpose(1, 2).reshape(B, C, H, W) + + x_r = self.act(self.bn(self.conv_project(x_r))) + + return F.interpolate( + x_r, size=(H * self.up_stride, W * self.up_stride)) + + +class ConvTransBlock(BaseModule): + """Basic module for Conformer. + + This module is a fusion of CNN block transformer encoder block. + + Args: + in_channels (int): The number of input channels in conv blocks. + out_channels (int): The number of output channels in conv blocks. + embed_dims (int): The embedding dimension in transformer blocks. + conv_stride (int): The stride of conv2d layers. Defaults to 1. + groups (int): The groups of conv blocks. Defaults to 1. + with_residual_conv (bool): Whether to add a conv-bn layer to the + identity connect in the conv block. Defaults to False. + down_stride (int): The stride of the downsample pooling layer. + Defaults to 4. + num_heads (int): The number of heads in transformer attention layers. + Defaults to 12. + mlp_ratio (float): The expansion ratio in transformer FFN module. + Defaults to 4. + qkv_bias (bool): Enable bias for qkv if True. Defaults to False. + with_cls_token (bool): Whether use class token or not. + Defaults to True. + drop_rate (float): The dropout rate of the output projection and + FFN in the transformer block. Defaults to 0. + attn_drop_rate (float): The dropout rate after the attention + calculation in the transformer block. Defaults to 0. + drop_path_rate (bloat): The drop path rate in both the conv block + and the transformer block. Defaults to 0. + last_fusion (bool): Whether this block is the last stage. If so, + downsample the fusion feature map. + init_cfg (dict, optional): The extra config to initialize the module. + Defaults to None. + """ + + def __init__(self, + in_channels, + out_channels, + embed_dims, + conv_stride=1, + groups=1, + with_residual_conv=False, + down_stride=4, + num_heads=12, + mlp_ratio=4., + qkv_bias=False, + with_cls_token=True, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + last_fusion=False, + init_cfg=None): + super(ConvTransBlock, self).__init__(init_cfg=init_cfg) + expansion = 4 + self.cnn_block = ConvBlock( + in_channels=in_channels, + out_channels=out_channels, + with_residual_conv=with_residual_conv, + stride=conv_stride, + groups=groups) + + if last_fusion: + self.fusion_block = ConvBlock( + in_channels=out_channels, + out_channels=out_channels, + stride=2, + with_residual_conv=True, + groups=groups, + drop_path_rate=drop_path_rate) + else: + self.fusion_block = ConvBlock( + in_channels=out_channels, + out_channels=out_channels, + groups=groups, + drop_path_rate=drop_path_rate) + + self.squeeze_block = FCUDown( + in_channels=out_channels // expansion, + out_channels=embed_dims, + down_stride=down_stride, + with_cls_token=with_cls_token) + + self.expand_block = FCUUp( + in_channels=embed_dims, + out_channels=out_channels // expansion, + up_stride=down_stride, + with_cls_token=with_cls_token) + + self.trans_block = TransformerEncoderLayer( + embed_dims=embed_dims, + num_heads=num_heads, + feedforward_channels=int(embed_dims * mlp_ratio), + drop_rate=drop_rate, + drop_path_rate=drop_path_rate, + attn_drop_rate=attn_drop_rate, + qkv_bias=qkv_bias, + norm_cfg=dict(type='LN', eps=1e-6)) + + self.down_stride = down_stride + self.embed_dim = embed_dims + self.last_fusion = last_fusion + + def forward(self, cnn_input, trans_input): + x, x_conv2 = self.cnn_block(cnn_input, out_conv2=True) + + _, _, H, W = x_conv2.shape + + # Convert the feature map of conv2 to transformer embedding + # and concat with class token. + conv2_embedding = self.squeeze_block(x_conv2, trans_input) + + trans_output = self.trans_block(conv2_embedding + trans_input) + + # Convert the transformer output embedding to feature map + trans_features = self.expand_block(trans_output, H // self.down_stride, + W // self.down_stride) + x = self.fusion_block( + x, fusion_features=trans_features, out_conv2=False) + + return x, trans_output + + +@MODELS.register_module() +class Conformer(BaseBackbone): + """Conformer backbone. + + A PyTorch implementation of : `Conformer: Local Features Coupling Global + Representations for Visual Recognition `_ + + Args: + arch (str | dict): Conformer architecture. Defaults to 'tiny'. + patch_size (int): The patch size. Defaults to 16. + base_channels (int): The base number of channels in CNN network. + Defaults to 64. + mlp_ratio (float): The expansion ratio of FFN network in transformer + block. Defaults to 4. + with_cls_token (bool): Whether use class token or not. + Defaults to True. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + out_indices (Sequence | int): Output from which stages. + Defaults to -1, means the last stage. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + arch_zoo = { + **dict.fromkeys(['t', 'tiny'], + {'embed_dims': 384, + 'channel_ratio': 1, + 'num_heads': 6, + 'depths': 12 + }), + **dict.fromkeys(['s', 'small'], + {'embed_dims': 384, + 'channel_ratio': 4, + 'num_heads': 6, + 'depths': 12 + }), + **dict.fromkeys(['b', 'base'], + {'embed_dims': 576, + 'channel_ratio': 6, + 'num_heads': 9, + 'depths': 12 + }), + } # yapf: disable + + _version = 1 + + def __init__(self, + arch='tiny', + patch_size=16, + base_channels=64, + mlp_ratio=4., + qkv_bias=True, + with_cls_token=True, + drop_path_rate=0., + norm_eval=True, + frozen_stages=0, + out_indices=-1, + init_cfg=None): + + super().__init__(init_cfg=init_cfg) + + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = { + 'embed_dims', 'depths', 'num_heads', 'channel_ratio' + } + assert isinstance(arch, dict) and set(arch) == essential_keys, \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = arch + + self.num_features = self.embed_dims = self.arch_settings['embed_dims'] + self.depths = self.arch_settings['depths'] + self.num_heads = self.arch_settings['num_heads'] + self.channel_ratio = self.arch_settings['channel_ratio'] + + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = self.depths + index + 1 + assert out_indices[i] >= 0, f'Invalid out_indices {index}' + self.out_indices = out_indices + + self.norm_eval = norm_eval + self.frozen_stages = frozen_stages + + self.with_cls_token = with_cls_token + if self.with_cls_token: + self.cls_token = nn.Parameter(torch.zeros(1, 1, self.embed_dims)) + + # stochastic depth decay rule + self.trans_dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, self.depths) + ] + + # Stem stage: get the feature maps by conv block + self.conv1 = nn.Conv2d( + 3, 64, kernel_size=7, stride=2, padding=3, + bias=False) # 1 / 2 [112, 112] + self.bn1 = nn.BatchNorm2d(64) + self.act1 = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d( + kernel_size=3, stride=2, padding=1) # 1 / 4 [56, 56] + + assert patch_size % 16 == 0, 'The patch size of Conformer must ' \ + 'be divisible by 16.' + trans_down_stride = patch_size // 4 + + # To solve the issue #680 + # Auto pad the feature map to be divisible by trans_down_stride + self.auto_pad = AdaptivePadding(trans_down_stride, trans_down_stride) + + # 1 stage + stage1_channels = int(base_channels * self.channel_ratio) + self.conv_1 = ConvBlock( + in_channels=64, + out_channels=stage1_channels, + with_residual_conv=True, + stride=1) + self.trans_patch_conv = nn.Conv2d( + 64, + self.embed_dims, + kernel_size=trans_down_stride, + stride=trans_down_stride, + padding=0) + + self.trans_1 = TransformerEncoderLayer( + embed_dims=self.embed_dims, + num_heads=self.num_heads, + feedforward_channels=int(self.embed_dims * mlp_ratio), + drop_path_rate=self.trans_dpr[0], + qkv_bias=qkv_bias, + norm_cfg=dict(type='LN', eps=1e-6)) + + # 2~4 stage + init_stage = 2 + fin_stage = self.depths // 3 + 1 + for i in range(init_stage, fin_stage): + self.add_module( + f'conv_trans_{i}', + ConvTransBlock( + in_channels=stage1_channels, + out_channels=stage1_channels, + embed_dims=self.embed_dims, + conv_stride=1, + with_residual_conv=False, + down_stride=trans_down_stride, + num_heads=self.num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + drop_path_rate=self.trans_dpr[i - 1], + with_cls_token=self.with_cls_token)) + + stage2_channels = int(base_channels * self.channel_ratio * 2) + # 5~8 stage + init_stage = fin_stage # 5 + fin_stage = fin_stage + self.depths // 3 # 9 + for i in range(init_stage, fin_stage): + if i == init_stage: + conv_stride = 2 + in_channels = stage1_channels + else: + conv_stride = 1 + in_channels = stage2_channels + + with_residual_conv = True if i == init_stage else False + self.add_module( + f'conv_trans_{i}', + ConvTransBlock( + in_channels=in_channels, + out_channels=stage2_channels, + embed_dims=self.embed_dims, + conv_stride=conv_stride, + with_residual_conv=with_residual_conv, + down_stride=trans_down_stride // 2, + num_heads=self.num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + drop_path_rate=self.trans_dpr[i - 1], + with_cls_token=self.with_cls_token)) + + stage3_channels = int(base_channels * self.channel_ratio * 2 * 2) + # 9~12 stage + init_stage = fin_stage # 9 + fin_stage = fin_stage + self.depths // 3 # 13 + for i in range(init_stage, fin_stage): + if i == init_stage: + conv_stride = 2 + in_channels = stage2_channels + with_residual_conv = True + else: + conv_stride = 1 + in_channels = stage3_channels + with_residual_conv = False + + last_fusion = (i == self.depths) + + self.add_module( + f'conv_trans_{i}', + ConvTransBlock( + in_channels=in_channels, + out_channels=stage3_channels, + embed_dims=self.embed_dims, + conv_stride=conv_stride, + with_residual_conv=with_residual_conv, + down_stride=trans_down_stride // 4, + num_heads=self.num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + drop_path_rate=self.trans_dpr[i - 1], + with_cls_token=self.with_cls_token, + last_fusion=last_fusion)) + self.fin_stage = fin_stage + + self.pooling = nn.AdaptiveAvgPool2d(1) + self.trans_norm = nn.LayerNorm(self.embed_dims) + + if self.with_cls_token: + trunc_normal_(self.cls_token, std=.02) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + elif isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_( + m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1.) + nn.init.constant_(m.bias, 0.) + + if hasattr(m, 'zero_init_last_bn'): + m.zero_init_last_bn() + + def init_weights(self): + super(Conformer, self).init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress default init if use pretrained model. + return + self.apply(self._init_weights) + + def forward(self, x): + output = [] + B = x.shape[0] + if self.with_cls_token: + cls_tokens = self.cls_token.expand(B, -1, -1) + + # stem + x_base = self.maxpool(self.act1(self.bn1(self.conv1(x)))) + x_base = self.auto_pad(x_base) + + # 1 stage [N, 64, 56, 56] -> [N, 128, 56, 56] + x = self.conv_1(x_base, out_conv2=False) + x_t = self.trans_patch_conv(x_base).flatten(2).transpose(1, 2) + if self.with_cls_token: + x_t = torch.cat([cls_tokens, x_t], dim=1) + x_t = self.trans_1(x_t) + + # 2 ~ final + for i in range(2, self.fin_stage): + stage = getattr(self, f'conv_trans_{i}') + x, x_t = stage(x, x_t) + if i in self.out_indices: + if self.with_cls_token: + output.append([ + self.pooling(x).flatten(1), + self.trans_norm(x_t)[:, 0] + ]) + else: + # if no class token, use the mean patch token + # as the transformer feature. + output.append([ + self.pooling(x).flatten(1), + self.trans_norm(x_t).mean(dim=1) + ]) + + return tuple(output) diff --git a/mmpretrain/models/backbones/convmixer.py b/mmpretrain/models/backbones/convmixer.py new file mode 100644 index 0000000..480050d --- /dev/null +++ b/mmpretrain/models/backbones/convmixer.py @@ -0,0 +1,176 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Sequence + +import torch +import torch.nn as nn +from mmcv.cnn.bricks import (Conv2dAdaptivePadding, build_activation_layer, + build_norm_layer) +from mmengine.utils import digit_version + +from mmpretrain.registry import MODELS +from .base_backbone import BaseBackbone + + +class Residual(nn.Module): + + def __init__(self, fn): + super().__init__() + self.fn = fn + + def forward(self, x): + return self.fn(x) + x + + +@MODELS.register_module() +class ConvMixer(BaseBackbone): + """ConvMixer. . + + A PyTorch implementation of : `Patches Are All You Need? + `_ + + Modified from the `official repo + `_ + and `timm + `_. + + Args: + arch (str | dict): The model's architecture. If string, it should be + one of architecture in ``ConvMixer.arch_settings``. And if dict, it + should include the following two keys: + + - embed_dims (int): The dimensions of patch embedding. + - depth (int): Number of repetitions of ConvMixer Layer. + - patch_size (int): The patch size. + - kernel_size (int): The kernel size of depthwise conv layers. + + Defaults to '768/32'. + in_channels (int): Number of input image channels. Defaults to 3. + patch_size (int): The size of one patch in the patch embed layer. + Defaults to 7. + norm_cfg (dict): The config dict for norm layers. + Defaults to ``dict(type='BN')``. + act_cfg (dict): The config dict for activation after each convolution. + Defaults to ``dict(type='GELU')``. + out_indices (Sequence | int): Output from which stages. + Defaults to -1, means the last stage. + frozen_stages (int): Stages to be frozen (all param fixed). + Defaults to 0, which means not freezing any parameters. + init_cfg (dict, optional): Initialization config dict. + """ + arch_settings = { + '768/32': { + 'embed_dims': 768, + 'depth': 32, + 'patch_size': 7, + 'kernel_size': 7 + }, + '1024/20': { + 'embed_dims': 1024, + 'depth': 20, + 'patch_size': 14, + 'kernel_size': 9 + }, + '1536/20': { + 'embed_dims': 1536, + 'depth': 20, + 'patch_size': 7, + 'kernel_size': 9 + }, + } + + def __init__(self, + arch='768/32', + in_channels=3, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='GELU'), + out_indices=-1, + frozen_stages=0, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + if isinstance(arch, str): + assert arch in self.arch_settings, \ + f'Unavailable arch, please choose from ' \ + f'({set(self.arch_settings)}) or pass a dict.' + arch = self.arch_settings[arch] + elif isinstance(arch, dict): + essential_keys = { + 'embed_dims', 'depth', 'patch_size', 'kernel_size' + } + assert isinstance(arch, dict) and essential_keys <= set(arch), \ + f'Custom arch needs a dict with keys {essential_keys}' + + self.embed_dims = arch['embed_dims'] + self.depth = arch['depth'] + self.patch_size = arch['patch_size'] + self.kernel_size = arch['kernel_size'] + self.act = build_activation_layer(act_cfg) + + # check out indices and frozen stages + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = self.depth + index + assert out_indices[i] >= 0, f'Invalid out_indices {index}' + self.out_indices = out_indices + self.frozen_stages = frozen_stages + + # Set stem layers + self.stem = nn.Sequential( + nn.Conv2d( + in_channels, + self.embed_dims, + kernel_size=self.patch_size, + stride=self.patch_size), self.act, + build_norm_layer(norm_cfg, self.embed_dims)[1]) + + # Set conv2d according to torch version + convfunc = nn.Conv2d + if digit_version(torch.__version__) < digit_version('1.9.0'): + convfunc = Conv2dAdaptivePadding + + # Repetitions of ConvMixer Layer + self.stages = nn.Sequential(*[ + nn.Sequential( + Residual( + nn.Sequential( + convfunc( + self.embed_dims, + self.embed_dims, + self.kernel_size, + groups=self.embed_dims, + padding='same'), self.act, + build_norm_layer(norm_cfg, self.embed_dims)[1])), + nn.Conv2d(self.embed_dims, self.embed_dims, kernel_size=1), + self.act, + build_norm_layer(norm_cfg, self.embed_dims)[1]) + for _ in range(self.depth) + ]) + + self._freeze_stages() + + def forward(self, x): + x = self.stem(x) + outs = [] + for i, stage in enumerate(self.stages): + x = stage(x) + if i in self.out_indices: + outs.append(x) + + # x = self.pooling(x).flatten(1) + return tuple(outs) + + def train(self, mode=True): + super(ConvMixer, self).train(mode) + self._freeze_stages() + + def _freeze_stages(self): + for i in range(self.frozen_stages): + stage = self.stages[i] + stage.eval() + for param in stage.parameters(): + param.requires_grad = False diff --git a/mmpretrain/models/backbones/convnext.py b/mmpretrain/models/backbones/convnext.py new file mode 100644 index 0000000..6a954f5 --- /dev/null +++ b/mmpretrain/models/backbones/convnext.py @@ -0,0 +1,412 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from functools import partial +from itertools import chain +from typing import Sequence + +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn.bricks import DropPath +from mmengine.model import BaseModule, ModuleList, Sequential + +from mmpretrain.registry import MODELS +from ..utils import GRN, build_norm_layer +from .base_backbone import BaseBackbone + + +class ConvNeXtBlock(BaseModule): + """ConvNeXt Block. + + Args: + in_channels (int): The number of input channels. + dw_conv_cfg (dict): Config of depthwise convolution. + Defaults to ``dict(kernel_size=7, padding=3)``. + norm_cfg (dict): The config dict for norm layers. + Defaults to ``dict(type='LN2d', eps=1e-6)``. + act_cfg (dict): The config dict for activation between pointwise + convolution. Defaults to ``dict(type='GELU')``. + mlp_ratio (float): The expansion ratio in both pointwise convolution. + Defaults to 4. + linear_pw_conv (bool): Whether to use linear layer to do pointwise + convolution. More details can be found in the note. + Defaults to True. + drop_path_rate (float): Stochastic depth rate. Defaults to 0. + layer_scale_init_value (float): Init value for Layer Scale. + Defaults to 1e-6. + + Note: + There are two equivalent implementations: + + 1. DwConv -> LayerNorm -> 1x1 Conv -> GELU -> 1x1 Conv; + all outputs are in (N, C, H, W). + 2. DwConv -> LayerNorm -> Permute to (N, H, W, C) -> Linear -> GELU + -> Linear; Permute back + + As default, we use the second to align with the official repository. + And it may be slightly faster. + """ + + def __init__(self, + in_channels, + dw_conv_cfg=dict(kernel_size=7, padding=3), + norm_cfg=dict(type='LN2d', eps=1e-6), + act_cfg=dict(type='GELU'), + mlp_ratio=4., + linear_pw_conv=True, + drop_path_rate=0., + layer_scale_init_value=1e-6, + use_grn=False, + with_cp=False): + super().__init__() + self.with_cp = with_cp + + self.depthwise_conv = nn.Conv2d( + in_channels, in_channels, groups=in_channels, **dw_conv_cfg) + + self.linear_pw_conv = linear_pw_conv + self.norm = build_norm_layer(norm_cfg, in_channels) + + mid_channels = int(mlp_ratio * in_channels) + if self.linear_pw_conv: + # Use linear layer to do pointwise conv. + pw_conv = nn.Linear + else: + pw_conv = partial(nn.Conv2d, kernel_size=1) + + self.pointwise_conv1 = pw_conv(in_channels, mid_channels) + self.act = MODELS.build(act_cfg) + self.pointwise_conv2 = pw_conv(mid_channels, in_channels) + + if use_grn: + self.grn = GRN(mid_channels) + else: + self.grn = None + + self.gamma = nn.Parameter( + layer_scale_init_value * torch.ones((in_channels)), + requires_grad=True) if layer_scale_init_value > 0 else None + + self.drop_path = DropPath( + drop_path_rate) if drop_path_rate > 0. else nn.Identity() + + def forward(self, x): + + def _inner_forward(x): + shortcut = x + x = self.depthwise_conv(x) + + if self.linear_pw_conv: + x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C) + x = self.norm(x, data_format='channel_last') + x = self.pointwise_conv1(x) + x = self.act(x) + if self.grn is not None: + x = self.grn(x, data_format='channel_last') + x = self.pointwise_conv2(x) + x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) + else: + x = self.norm(x, data_format='channel_first') + x = self.pointwise_conv1(x) + x = self.act(x) + + if self.grn is not None: + x = self.grn(x, data_format='channel_first') + x = self.pointwise_conv2(x) + + if self.gamma is not None: + x = x.mul(self.gamma.view(1, -1, 1, 1)) + + x = shortcut + self.drop_path(x) + return x + + if self.with_cp and x.requires_grad: + x = cp.checkpoint(_inner_forward, x) + else: + x = _inner_forward(x) + return x + + +@MODELS.register_module() +class ConvNeXt(BaseBackbone): + """ConvNeXt v1&v2 backbone. + + A PyTorch implementation of `A ConvNet for the 2020s + `_ and + `ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders + `_ + + Modified from the `official repo + `_ + and `timm + `_. + + To use ConvNeXt v2, please set ``use_grn=True`` and ``layer_scale_init_value=0.``. + + Args: + arch (str | dict): The model's architecture. If string, it should be + one of architecture in ``ConvNeXt.arch_settings``. And if dict, it + should include the following two keys: + + - depths (list[int]): Number of blocks at each stage. + - channels (list[int]): The number of channels at each stage. + + Defaults to 'tiny'. + in_channels (int): Number of input image channels. Defaults to 3. + stem_patch_size (int): The size of one patch in the stem layer. + Defaults to 4. + norm_cfg (dict): The config dict for norm layers. + Defaults to ``dict(type='LN2d', eps=1e-6)``. + act_cfg (dict): The config dict for activation between pointwise + convolution. Defaults to ``dict(type='GELU')``. + linear_pw_conv (bool): Whether to use linear layer to do pointwise + convolution. Defaults to True. + use_grn (bool): Whether to add Global Response Normalization in the + blocks. Defaults to False. + drop_path_rate (float): Stochastic depth rate. Defaults to 0. + layer_scale_init_value (float): Init value for Layer Scale. + Defaults to 1e-6. + out_indices (Sequence | int): Output from which stages. + Defaults to -1, means the last stage. + frozen_stages (int): Stages to be frozen (all param fixed). + Defaults to 0, which means not freezing any parameters. + gap_before_final_norm (bool): Whether to globally average the feature + map before the final norm layer. In the official repo, it's only + used in classification task. Defaults to True. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + init_cfg (dict, optional): Initialization config dict + """ # noqa: E501 + arch_settings = { + 'atto': { + 'depths': [2, 2, 6, 2], + 'channels': [40, 80, 160, 320] + }, + 'femto': { + 'depths': [2, 2, 6, 2], + 'channels': [48, 96, 192, 384] + }, + 'pico': { + 'depths': [2, 2, 6, 2], + 'channels': [64, 128, 256, 512] + }, + 'nano': { + 'depths': [2, 2, 8, 2], + 'channels': [80, 160, 320, 640] + }, + 'tiny': { + 'depths': [3, 3, 9, 3], + 'channels': [96, 192, 384, 768] + }, + 'small': { + 'depths': [3, 3, 27, 3], + 'channels': [96, 192, 384, 768] + }, + 'base': { + 'depths': [3, 3, 27, 3], + 'channels': [128, 256, 512, 1024] + }, + 'large': { + 'depths': [3, 3, 27, 3], + 'channels': [192, 384, 768, 1536] + }, + 'xlarge': { + 'depths': [3, 3, 27, 3], + 'channels': [256, 512, 1024, 2048] + }, + 'huge': { + 'depths': [3, 3, 27, 3], + 'channels': [352, 704, 1408, 2816] + } + } + + def __init__(self, + arch='tiny', + in_channels=3, + stem_patch_size=4, + norm_cfg=dict(type='LN2d', eps=1e-6), + act_cfg=dict(type='GELU'), + linear_pw_conv=True, + use_grn=False, + drop_path_rate=0., + layer_scale_init_value=1e-6, + out_indices=-1, + frozen_stages=0, + gap_before_final_norm=True, + with_cp=False, + init_cfg=[ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict( + type='Constant', layer=['LayerNorm'], val=1., + bias=0.), + ]): + super().__init__(init_cfg=init_cfg) + + if isinstance(arch, str): + assert arch in self.arch_settings, \ + f'Unavailable arch, please choose from ' \ + f'({set(self.arch_settings)}) or pass a dict.' + arch = self.arch_settings[arch] + elif isinstance(arch, dict): + assert 'depths' in arch and 'channels' in arch, \ + f'The arch dict must have "depths" and "channels", ' \ + f'but got {list(arch.keys())}.' + + self.depths = arch['depths'] + self.channels = arch['channels'] + assert (isinstance(self.depths, Sequence) + and isinstance(self.channels, Sequence) + and len(self.depths) == len(self.channels)), \ + f'The "depths" ({self.depths}) and "channels" ({self.channels}) ' \ + 'should be both sequence with the same length.' + + self.num_stages = len(self.depths) + + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = 4 + index + assert out_indices[i] >= 0, f'Invalid out_indices {index}' + self.out_indices = out_indices + + self.frozen_stages = frozen_stages + self.gap_before_final_norm = gap_before_final_norm + + # stochastic depth decay rule + dpr = [ + x.item() + for x in torch.linspace(0, drop_path_rate, sum(self.depths)) + ] + block_idx = 0 + + # 4 downsample layers between stages, including the stem layer. + self.downsample_layers = ModuleList() + stem = nn.Sequential( + nn.Conv2d( + in_channels, + self.channels[0], + kernel_size=stem_patch_size, + stride=stem_patch_size), + build_norm_layer(norm_cfg, self.channels[0]), + ) + self.downsample_layers.append(stem) + + # 4 feature resolution stages, each consisting of multiple residual + # blocks + self.stages = nn.ModuleList() + + for i in range(self.num_stages): + depth = self.depths[i] + channels = self.channels[i] + + if i >= 1: + downsample_layer = nn.Sequential( + build_norm_layer(norm_cfg, self.channels[i - 1]), + nn.Conv2d( + self.channels[i - 1], + channels, + kernel_size=2, + stride=2), + ) + self.downsample_layers.append(downsample_layer) + + stage = Sequential(*[ + ConvNeXtBlock( + in_channels=channels, + drop_path_rate=dpr[block_idx + j], + norm_cfg=norm_cfg, + act_cfg=act_cfg, + linear_pw_conv=linear_pw_conv, + layer_scale_init_value=layer_scale_init_value, + use_grn=use_grn, + with_cp=with_cp) for j in range(depth) + ]) + block_idx += depth + + self.stages.append(stage) + + if i in self.out_indices: + norm_layer = build_norm_layer(norm_cfg, channels) + self.add_module(f'norm{i}', norm_layer) + + self._freeze_stages() + + def forward(self, x): + outs = [] + for i, stage in enumerate(self.stages): + x = self.downsample_layers[i](x) + x = stage(x) + if i in self.out_indices: + norm_layer = getattr(self, f'norm{i}') + if self.gap_before_final_norm: + gap = x.mean([-2, -1], keepdim=True) + outs.append(norm_layer(gap).flatten(1)) + else: + outs.append(norm_layer(x)) + + return tuple(outs) + + def _freeze_stages(self): + for i in range(self.frozen_stages): + downsample_layer = self.downsample_layers[i] + stage = self.stages[i] + downsample_layer.eval() + stage.eval() + for param in chain(downsample_layer.parameters(), + stage.parameters()): + param.requires_grad = False + + def train(self, mode=True): + super(ConvNeXt, self).train(mode) + self._freeze_stages() + + def get_layer_depth(self, param_name: str, prefix: str = ''): + """Get the layer-wise depth of a parameter. + + Args: + param_name (str): The name of the parameter. + prefix (str): The prefix for the parameter. + Defaults to an empty string. + + Returns: + Tuple[int, int]: The layer-wise depth and the num of layers. + """ + + max_layer_id = 12 if self.depths[-2] > 9 else 6 + + if not param_name.startswith(prefix): + # For subsequent module like head + return max_layer_id + 1, max_layer_id + 2 + + param_name = param_name[len(prefix):] + if param_name.startswith('downsample_layers'): + stage_id = int(param_name.split('.')[1]) + if stage_id == 0: + layer_id = 0 + elif stage_id == 1 or stage_id == 2: + layer_id = stage_id + 1 + else: # stage_id == 3: + layer_id = max_layer_id + + elif param_name.startswith('stages'): + stage_id = int(param_name.split('.')[1]) + block_id = int(param_name.split('.')[2]) + if stage_id == 0 or stage_id == 1: + layer_id = stage_id + 1 + elif stage_id == 2: + layer_id = 3 + block_id // 3 + else: # stage_id == 3: + layer_id = max_layer_id + + # final norm layer + else: + layer_id = max_layer_id + 1 + + return layer_id, max_layer_id + 2 diff --git a/mmpretrain/models/backbones/cspnet.py b/mmpretrain/models/backbones/cspnet.py new file mode 100644 index 0000000..7492e97 --- /dev/null +++ b/mmpretrain/models/backbones/cspnet.py @@ -0,0 +1,679 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import Sequence + +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule +from mmcv.cnn.bricks import DropPath +from mmengine.model import BaseModule, Sequential +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpretrain.registry import MODELS +from ..utils import to_ntuple +from .resnet import Bottleneck as ResNetBottleneck +from .resnext import Bottleneck as ResNeXtBottleneck + +eps = 1.0e-5 + + +class DarknetBottleneck(BaseModule): + """The basic bottleneck block used in Darknet. Each DarknetBottleneck + consists of two ConvModules and the input is added to the final output. + Each ConvModule is composed of Conv, BN, and LeakyReLU. The first convLayer + has filter size of 1x1 and the second one has the filter size of 3x3. + + Args: + in_channels (int): The input channels of this Module. + out_channels (int): The output channels of this Module. + expansion (int): The ratio of ``out_channels/mid_channels`` where + ``mid_channels`` is the input/output channels of conv2. + Defaults to 4. + add_identity (bool): Whether to add identity to the out. + Defaults to True. + use_depthwise (bool): Whether to use depthwise separable convolution. + Defaults to False. + conv_cfg (dict): Config dict for convolution layer. Defaults to None, + which means using conv2d. + drop_path_rate (float): The ratio of the drop path layer. Default: 0. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='BN', eps=1e-5)``. + act_cfg (dict): Config dict for activation layer. + Defaults to ``dict(type='Swish')``. + """ + + def __init__(self, + in_channels, + out_channels, + expansion=2, + add_identity=True, + use_depthwise=False, + conv_cfg=None, + drop_path_rate=0, + norm_cfg=dict(type='BN', eps=1e-5), + act_cfg=dict(type='LeakyReLU', inplace=True), + init_cfg=None): + super().__init__(init_cfg) + hidden_channels = int(out_channels / expansion) + conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule + self.conv1 = ConvModule( + in_channels, + hidden_channels, + 1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.conv2 = conv( + hidden_channels, + out_channels, + 3, + stride=1, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.add_identity = \ + add_identity and in_channels == out_channels + + self.drop_path = DropPath(drop_prob=drop_path_rate + ) if drop_path_rate > eps else nn.Identity() + + def forward(self, x): + identity = x + out = self.conv1(x) + out = self.conv2(out) + out = self.drop_path(out) + + if self.add_identity: + return out + identity + else: + return out + + +class CSPStage(BaseModule): + """Cross Stage Partial Stage. + + .. code:: text + + Downsample Convolution (optional) + | + | + Expand Convolution + | + | + Split to xa, xb + | \ + | \ + | blocks(xb) + | / + | / transition + | / + Concat xa, blocks(xb) + | + Transition Convolution + + Args: + block_fn (nn.module): The basic block function in the Stage. + in_channels (int): The input channels of the CSP layer. + out_channels (int): The output channels of the CSP layer. + has_downsampler (bool): Whether to add a downsampler in the stage. + Default: False. + down_growth (bool): Whether to expand the channels in the + downsampler layer of the stage. Default: False. + expand_ratio (float): The expand ratio to adjust the number of + channels of the expand conv layer. Default: 0.5 + bottle_ratio (float): Ratio to adjust the number of channels of the + hidden layer. Default: 0.5 + block_dpr (float): The ratio of the drop path layer in the + blocks of the stage. Default: 0. + num_blocks (int): Number of blocks. Default: 1 + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN') + act_cfg (dict): Config dict for activation layer. + Default: dict(type='LeakyReLU', inplace=True) + """ + + def __init__(self, + block_fn, + in_channels, + out_channels, + has_downsampler=True, + down_growth=False, + expand_ratio=0.5, + bottle_ratio=2, + num_blocks=1, + block_dpr=0, + block_args={}, + conv_cfg=None, + norm_cfg=dict(type='BN', eps=1e-5), + act_cfg=dict(type='LeakyReLU', inplace=True), + init_cfg=None): + super().__init__(init_cfg) + # grow downsample channels to output channels + down_channels = out_channels if down_growth else in_channels + block_dpr = to_ntuple(num_blocks)(block_dpr) + + if has_downsampler: + self.downsample_conv = ConvModule( + in_channels=in_channels, + out_channels=down_channels, + kernel_size=3, + stride=2, + padding=1, + groups=32 if block_fn is ResNeXtBottleneck else 1, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + else: + self.downsample_conv = nn.Identity() + + exp_channels = int(down_channels * expand_ratio) + self.expand_conv = ConvModule( + in_channels=down_channels, + out_channels=exp_channels, + kernel_size=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg if block_fn is DarknetBottleneck else None) + + assert exp_channels % 2 == 0, \ + 'The channel number before blocks must be divisible by 2.' + block_channels = exp_channels // 2 + blocks = [] + for i in range(num_blocks): + block_cfg = dict( + in_channels=block_channels, + out_channels=block_channels, + expansion=bottle_ratio, + drop_path_rate=block_dpr[i], + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + **block_args) + blocks.append(block_fn(**block_cfg)) + self.blocks = Sequential(*blocks) + self.atfer_blocks_conv = ConvModule( + block_channels, + block_channels, + 1, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.final_conv = ConvModule( + 2 * block_channels, + out_channels, + 1, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def forward(self, x): + x = self.downsample_conv(x) + x = self.expand_conv(x) + + split = x.shape[1] // 2 + xa, xb = x[:, :split], x[:, split:] + + xb = self.blocks(xb) + xb = self.atfer_blocks_conv(xb).contiguous() + + x_final = torch.cat((xa, xb), dim=1) + return self.final_conv(x_final) + + +class CSPNet(BaseModule): + """The abstract CSP Network class. + + A Pytorch implementation of `CSPNet: A New Backbone that can Enhance + Learning Capability of CNN `_ + + This class is an abstract class because the Cross Stage Partial Network + (CSPNet) is a kind of universal network structure, and you + network block to implement networks like CSPResNet, CSPResNeXt and + CSPDarkNet. + + Args: + arch (dict): The architecture of the CSPNet. + It should have the following keys: + + - block_fn (Callable): A function or class to return a block + module, and it should accept at least ``in_channels``, + ``out_channels``, ``expansion``, ``drop_path_rate``, ``norm_cfg`` + and ``act_cfg``. + - in_channels (Tuple[int]): The number of input channels of each + stage. + - out_channels (Tuple[int]): The number of output channels of each + stage. + - num_blocks (Tuple[int]): The number of blocks in each stage. + - expansion_ratio (float | Tuple[float]): The expansion ratio in + the expand convolution of each stage. Defaults to 0.5. + - bottle_ratio (float | Tuple[float]): The expansion ratio of + blocks in each stage. Defaults to 2. + - has_downsampler (bool | Tuple[bool]): Whether to add a + downsample convolution in each stage. Defaults to True + - down_growth (bool | Tuple[bool]): Whether to expand the channels + in the downsampler layer of each stage. Defaults to False. + - block_args (dict | Tuple[dict], optional): The extra arguments to + the blocks in each stage. Defaults to None. + + stem_fn (Callable): A function or class to return a stem module. + And it should accept ``in_channels``. + in_channels (int): Number of input image channels. Defaults to 3. + out_indices (int | Sequence[int]): Output from which stages. + Defaults to -1, which means the last stage. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Defaults to -1. + conv_cfg (dict, optional): The config dict for conv layers in blocks. + Defaults to None, which means use Conv2d. + norm_cfg (dict): The config dict for norm layers. + Defaults to ``dict(type='BN', eps=1e-5)``. + act_cfg (dict): The config dict for activation functions. + Defaults to ``dict(type='LeakyReLU', inplace=True)``. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Defaults to False. + init_cfg (dict, optional): The initialization settings. + Defaults to ``dict(type='Kaiming', layer='Conv2d'))``. + + Example: + >>> from functools import partial + >>> import torch + >>> import torch.nn as nn + >>> from mmpretrain.models import CSPNet + >>> from mmpretrain.models.backbones.resnet import Bottleneck + >>> + >>> # A simple example to build CSPNet. + >>> arch = dict( + ... block_fn=Bottleneck, + ... in_channels=[32, 64], + ... out_channels=[64, 128], + ... num_blocks=[3, 4] + ... ) + >>> stem_fn = partial(nn.Conv2d, out_channels=32, kernel_size=3) + >>> model = CSPNet(arch=arch, stem_fn=stem_fn, out_indices=(0, 1)) + >>> inputs = torch.rand(1, 3, 224, 224) + >>> outs = model(inputs) + >>> for out in outs: + ... print(out.shape) + ... + (1, 64, 111, 111) + (1, 128, 56, 56) + """ + + def __init__(self, + arch, + stem_fn, + in_channels=3, + out_indices=-1, + frozen_stages=-1, + drop_path_rate=0., + conv_cfg=None, + norm_cfg=dict(type='BN', eps=1e-5), + act_cfg=dict(type='LeakyReLU', inplace=True), + norm_eval=False, + init_cfg=dict(type='Kaiming', layer='Conv2d')): + super().__init__(init_cfg=init_cfg) + self.arch = self.expand_arch(arch) + self.num_stages = len(self.arch['in_channels']) + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.norm_eval = norm_eval + if frozen_stages not in range(-1, self.num_stages): + raise ValueError('frozen_stages must be in range(-1, ' + f'{self.num_stages}). But received ' + f'{frozen_stages}') + self.frozen_stages = frozen_stages + + self.stem = stem_fn(in_channels) + + stages = [] + depths = self.arch['num_blocks'] + dpr = torch.linspace(0, drop_path_rate, sum(depths)).split(depths) + + for i in range(self.num_stages): + stage_cfg = {k: v[i] for k, v in self.arch.items()} + csp_stage = CSPStage( + **stage_cfg, + block_dpr=dpr[i].tolist(), + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + init_cfg=init_cfg) + stages.append(csp_stage) + self.stages = Sequential(*stages) + + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + out_indices = list(out_indices) + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = len(self.stages) + index + assert 0 <= out_indices[i] <= len(self.stages), \ + f'Invalid out_indices {index}.' + self.out_indices = out_indices + + @staticmethod + def expand_arch(arch): + num_stages = len(arch['in_channels']) + + def to_tuple(x, name=''): + if isinstance(x, (list, tuple)): + assert len(x) == num_stages, \ + f'The length of {name} ({len(x)}) does not ' \ + f'equals to the number of stages ({num_stages})' + return tuple(x) + else: + return (x, ) * num_stages + + full_arch = {k: to_tuple(v, k) for k, v in arch.items()} + if 'block_args' not in full_arch: + full_arch['block_args'] = to_tuple({}) + return full_arch + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.stem.eval() + for param in self.stem.parameters(): + param.requires_grad = False + + for i in range(self.frozen_stages + 1): + m = self.stages[i] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(CSPNet, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() + + def forward(self, x): + outs = [] + + x = self.stem(x) + for i, stage in enumerate(self.stages): + x = stage(x) + if i in self.out_indices: + outs.append(x) + return tuple(outs) + + +@MODELS.register_module() +class CSPDarkNet(CSPNet): + """CSP-Darknet backbone used in YOLOv4. + + Args: + depth (int): Depth of CSP-Darknet. Default: 53. + in_channels (int): Number of input image channels. Default: 3. + out_indices (Sequence[int]): Output from which stages. + Default: (3, ). + frozen_stages (int): Stages to be frozen (stop grad and set eval + mode). -1 means not freezing any parameters. Default: -1. + conv_cfg (dict): Config dict for convolution layer. Default: None. + norm_cfg (dict): Dictionary to construct and config norm layer. + Default: dict(type='BN', requires_grad=True). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='LeakyReLU', negative_slope=0.1). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + + Example: + >>> from mmpretrain.models import CSPDarkNet + >>> import torch + >>> model = CSPDarkNet(depth=53, out_indices=(0, 1, 2, 3, 4)) + >>> model.eval() + >>> inputs = torch.rand(1, 3, 416, 416) + >>> level_outputs = model(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + ... + (1, 64, 208, 208) + (1, 128, 104, 104) + (1, 256, 52, 52) + (1, 512, 26, 26) + (1, 1024, 13, 13) + """ + arch_settings = { + 53: + dict( + block_fn=DarknetBottleneck, + in_channels=(32, 64, 128, 256, 512), + out_channels=(64, 128, 256, 512, 1024), + num_blocks=(1, 2, 8, 8, 4), + expand_ratio=(2, 1, 1, 1, 1), + bottle_ratio=(2, 1, 1, 1, 1), + has_downsampler=True, + down_growth=True, + ), + } + + def __init__(self, + depth, + in_channels=3, + out_indices=(4, ), + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN', eps=1e-5), + act_cfg=dict(type='LeakyReLU', inplace=True), + norm_eval=False, + init_cfg=dict( + type='Kaiming', + layer='Conv2d', + a=math.sqrt(5), + distribution='uniform', + mode='fan_in', + nonlinearity='leaky_relu')): + + assert depth in self.arch_settings, 'depth must be one of ' \ + f'{list(self.arch_settings.keys())}, but get {depth}.' + + super().__init__( + arch=self.arch_settings[depth], + stem_fn=self._make_stem_layer, + in_channels=in_channels, + out_indices=out_indices, + frozen_stages=frozen_stages, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + norm_eval=norm_eval, + init_cfg=init_cfg) + + def _make_stem_layer(self, in_channels): + """using a stride=1 conv as the stem in CSPDarknet.""" + # `stem_channels` equals to the `in_channels` in the first stage. + stem_channels = self.arch['in_channels'][0] + stem = ConvModule( + in_channels=in_channels, + out_channels=stem_channels, + kernel_size=3, + padding=1, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + return stem + + +@MODELS.register_module() +class CSPResNet(CSPNet): + """CSP-ResNet backbone. + + Args: + depth (int): Depth of CSP-ResNet. Default: 50. + out_indices (Sequence[int]): Output from which stages. + Default: (4, ). + frozen_stages (int): Stages to be frozen (stop grad and set eval + mode). -1 means not freezing any parameters. Default: -1. + conv_cfg (dict): Config dict for convolution layer. Default: None. + norm_cfg (dict): Dictionary to construct and config norm layer. + Default: dict(type='BN', requires_grad=True). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='LeakyReLU', negative_slope=0.1). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + Example: + >>> from mmpretrain.models import CSPResNet + >>> import torch + >>> model = CSPResNet(depth=50, out_indices=(0, 1, 2, 3)) + >>> model.eval() + >>> inputs = torch.rand(1, 3, 416, 416) + >>> level_outputs = model(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + ... + (1, 128, 104, 104) + (1, 256, 52, 52) + (1, 512, 26, 26) + (1, 1024, 13, 13) + """ + arch_settings = { + 50: + dict( + block_fn=ResNetBottleneck, + in_channels=(64, 128, 256, 512), + out_channels=(128, 256, 512, 1024), + num_blocks=(3, 3, 5, 2), + expand_ratio=4, + bottle_ratio=2, + has_downsampler=(False, True, True, True), + down_growth=False), + } + + def __init__(self, + depth, + in_channels=3, + out_indices=(3, ), + frozen_stages=-1, + deep_stem=False, + conv_cfg=None, + norm_cfg=dict(type='BN', eps=1e-5), + act_cfg=dict(type='LeakyReLU', inplace=True), + norm_eval=False, + init_cfg=dict(type='Kaiming', layer='Conv2d')): + assert depth in self.arch_settings, 'depth must be one of ' \ + f'{list(self.arch_settings.keys())}, but get {depth}.' + self.deep_stem = deep_stem + + super().__init__( + arch=self.arch_settings[depth], + stem_fn=self._make_stem_layer, + in_channels=in_channels, + out_indices=out_indices, + frozen_stages=frozen_stages, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + norm_eval=norm_eval, + init_cfg=init_cfg) + + def _make_stem_layer(self, in_channels): + # `stem_channels` equals to the `in_channels` in the first stage. + stem_channels = self.arch['in_channels'][0] + if self.deep_stem: + stem = nn.Sequential( + ConvModule( + in_channels, + stem_channels // 2, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + ConvModule( + stem_channels // 2, + stem_channels // 2, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + ConvModule( + stem_channels // 2, + stem_channels, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + else: + stem = nn.Sequential( + ConvModule( + in_channels, + stem_channels, + kernel_size=7, + stride=2, + padding=3, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg), + nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) + return stem + + +@MODELS.register_module() +class CSPResNeXt(CSPResNet): + """CSP-ResNeXt backbone. + + Args: + depth (int): Depth of CSP-ResNeXt. Default: 50. + out_indices (Sequence[int]): Output from which stages. + Default: (4, ). + frozen_stages (int): Stages to be frozen (stop grad and set eval + mode). -1 means not freezing any parameters. Default: -1. + conv_cfg (dict): Config dict for convolution layer. Default: None. + norm_cfg (dict): Dictionary to construct and config norm layer. + Default: dict(type='BN', requires_grad=True). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='LeakyReLU', negative_slope=0.1). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None. + Example: + >>> from mmpretrain.models import CSPResNeXt + >>> import torch + >>> model = CSPResNeXt(depth=50, out_indices=(0, 1, 2, 3)) + >>> model.eval() + >>> inputs = torch.rand(1, 3, 224, 224) + >>> level_outputs = model(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + ... + (1, 256, 56, 56) + (1, 512, 28, 28) + (1, 1024, 14, 14) + (1, 2048, 7, 7) + """ + arch_settings = { + 50: + dict( + block_fn=ResNeXtBottleneck, + in_channels=(64, 256, 512, 1024), + out_channels=(256, 512, 1024, 2048), + num_blocks=(3, 3, 5, 2), + expand_ratio=(4, 2, 2, 2), + bottle_ratio=4, + has_downsampler=(False, True, True, True), + down_growth=False, + # the base_channels is changed from 64 to 32 in CSPNet + block_args=dict(base_channels=32), + ), + } + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) diff --git a/mmpretrain/models/backbones/davit.py b/mmpretrain/models/backbones/davit.py new file mode 100644 index 0000000..cf25e2e --- /dev/null +++ b/mmpretrain/models/backbones/davit.py @@ -0,0 +1,834 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from typing import Sequence, Tuple + +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmcv.cnn.bricks import Conv2d +from mmcv.cnn.bricks.transformer import FFN, AdaptivePadding, PatchEmbed +from mmengine.model import BaseModule, ModuleList +from mmengine.utils import to_2tuple +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm + +from mmpretrain.models.backbones.base_backbone import BaseBackbone +from mmpretrain.registry import MODELS +from ..utils import ShiftWindowMSA + + +class DaViTWindowMSA(BaseModule): + """Window based multi-head self-attention (W-MSA) module for DaViT. + + The differences between DaViTWindowMSA & WindowMSA: + 1. Without relative position bias. + + Args: + embed_dims (int): Number of input channels. + window_size (tuple[int]): The height and width of the window. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. + Defaults to True. + qk_scale (float, optional): Override default qk scale of + ``head_dim ** -0.5`` if set. Defaults to None. + attn_drop (float, optional): Dropout ratio of attention weight. + Defaults to 0. + proj_drop (float, optional): Dropout ratio of output. Defaults to 0. + init_cfg (dict, optional): The extra config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + window_size, + num_heads, + qkv_bias=True, + qk_scale=None, + attn_drop=0., + proj_drop=0., + init_cfg=None): + + super().__init__(init_cfg) + self.embed_dims = embed_dims + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_embed_dims = embed_dims // num_heads + self.scale = qk_scale or head_embed_dims**-0.5 + + self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(embed_dims, embed_dims) + self.proj_drop = nn.Dropout(proj_drop) + + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, mask=None): + """ + Args: + + x (tensor): input features with shape of (num_windows*B, N, C) + mask (tensor, Optional): mask with shape of (num_windows, Wh*Ww, + Wh*Ww), value should be between (-inf, 0]. + """ + B_, N, C = x.shape + qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, + C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[ + 2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B_ // nW, nW, self.num_heads, N, + N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + @staticmethod + def double_step_seq(step1, len1, step2, len2): + seq1 = torch.arange(0, step1 * len1, step1) + seq2 = torch.arange(0, step2 * len2, step2) + return (seq1[:, None] + seq2[None, :]).reshape(1, -1) + + +class ConvPosEnc(BaseModule): + """DaViT conv pos encode block. + + Args: + embed_dims (int): Number of input channels. + kernel_size (int): The kernel size of the first convolution. + Defaults to 3. + init_cfg (dict, optional): The extra config for initialization. + Defaults to None. + """ + + def __init__(self, embed_dims, kernel_size=3, init_cfg=None): + super(ConvPosEnc, self).__init__(init_cfg) + self.proj = Conv2d( + embed_dims, + embed_dims, + kernel_size, + stride=1, + padding=kernel_size // 2, + groups=embed_dims) + + def forward(self, x, size: Tuple[int, int]): + B, N, C = x.shape + H, W = size + assert N == H * W + + feat = x.transpose(1, 2).view(B, C, H, W) + feat = self.proj(feat) + feat = feat.flatten(2).transpose(1, 2) + x = x + feat + return x + + +class DaViTDownSample(BaseModule): + """DaViT down sampole block. + + Args: + in_channels (int): The number of input channels. + out_channels (int): The number of output channels. + conv_type (str): The type of convolution + to generate patch embedding. Default: "Conv2d". + kernel_size (int): The kernel size of the first convolution. + Defaults to 2. + stride (int): The stride of the second convluation module. + Defaults to 2. + padding (int | tuple | string ): The padding length of + embedding conv. When it is a string, it means the mode + of adaptive padding, support "same" and "corner" now. + Defaults to "corner". + dilation (int): Dilation of the convolution layers. Defaults to 1. + bias (bool): Bias of embed conv. Default: True. + norm_cfg (dict, optional): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + init_cfg (dict, optional): The extra config for initialization. + Defaults to None. + """ + + def __init__(self, + in_channels, + out_channels, + conv_type='Conv2d', + kernel_size=2, + stride=2, + padding='same', + dilation=1, + bias=True, + norm_cfg=None, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.out_channels = out_channels + if stride is None: + stride = kernel_size + + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + dilation = to_2tuple(dilation) + + if isinstance(padding, str): + self.adaptive_padding = AdaptivePadding( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + padding=padding) + # disable the padding of conv + padding = 0 + else: + self.adaptive_padding = None + padding = to_2tuple(padding) + + self.projection = build_conv_layer( + dict(type=conv_type), + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=bias) + + if norm_cfg is not None: + self.norm = build_norm_layer(norm_cfg, in_channels)[1] + else: + self.norm = None + + def forward(self, x, input_size): + if self.adaptive_padding: + x = self.adaptive_padding(x) + H, W = input_size + B, L, C = x.shape + assert L == H * W, 'input feature has wrong size' + + x = self.norm(x) + x = x.reshape(B, H, W, C).permute(0, 3, 1, 2).contiguous() + + x = self.projection(x) + output_size = (x.size(2), x.size(3)) + x = x.flatten(2).transpose(1, 2) + return x, output_size + + +class ChannelAttention(BaseModule): + """DaViT channel attention. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. + qkv_bias (bool): enable bias for qkv if True. Defaults to True. + init_cfg (dict, optional): The extra config for initialization. + Defaults to None. + """ + + def __init__(self, embed_dims, num_heads=8, qkv_bias=False, init_cfg=None): + super().__init__(init_cfg) + self.embed_dims = embed_dims + self.num_heads = num_heads + self.head_dims = embed_dims // num_heads + self.scale = self.head_dims**-0.5 + + self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias) + self.proj = nn.Linear(embed_dims, embed_dims) + + def forward(self, x): + B, N, _ = x.shape + + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, + self.head_dims).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] + + k = k * self.scale + attention = k.transpose(-1, -2) @ v + attention = attention.softmax(dim=-1) + + x = (attention @ q.transpose(-1, -2)).transpose(-1, -2) + x = x.transpose(1, 2).reshape(B, N, self.embed_dims) + x = self.proj(x) + return x + + +class ChannelBlock(BaseModule): + """DaViT channel attention block. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (int): The height and width of the window. Defaults to 7. + ffn_ratio (float): The expansion ratio of feedforward network hidden + layer channels. Defaults to 4. + qkv_bias (bool): enable bias for qkv if True. Defaults to True. + drop_path (float): The drop path rate after attention and ffn. + Defaults to 0. + ffn_cfgs (dict): The extra config of FFN. Defaults to empty dict. + norm_cfg (dict): The config of norm layers. + Defaults to ``dict(type='LN')``. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + init_cfg (dict, optional): The extra config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads, + ffn_ratio=4., + qkv_bias=False, + drop_path=0., + ffn_cfgs=dict(), + norm_cfg=dict(type='LN'), + with_cp=False, + init_cfg=None): + super().__init__(init_cfg) + self.with_cp = with_cp + + self.cpe1 = ConvPosEnc(embed_dims=embed_dims, kernel_size=3) + self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1] + self.attn = ChannelAttention( + embed_dims, num_heads=num_heads, qkv_bias=qkv_bias) + self.cpe2 = ConvPosEnc(embed_dims=embed_dims, kernel_size=3) + + _ffn_cfgs = { + 'embed_dims': embed_dims, + 'feedforward_channels': int(embed_dims * ffn_ratio), + 'num_fcs': 2, + 'ffn_drop': 0, + 'dropout_layer': dict(type='DropPath', drop_prob=drop_path), + 'act_cfg': dict(type='GELU'), + **ffn_cfgs + } + self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] + self.ffn = FFN(**_ffn_cfgs) + + def forward(self, x, hw_shape): + + def _inner_forward(x): + x = self.cpe1(x, hw_shape) + identity = x + x = self.norm1(x) + x = self.attn(x) + x = x + identity + + x = self.cpe2(x, hw_shape) + identity = x + x = self.norm2(x) + x = self.ffn(x, identity=identity) + + return x + + if self.with_cp and x.requires_grad: + x = cp.checkpoint(_inner_forward, x) + else: + x = _inner_forward(x) + + return x + + +class SpatialBlock(BaseModule): + """DaViT spatial attention block. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (int): The height and width of the window. Defaults to 7. + ffn_ratio (float): The expansion ratio of feedforward network hidden + layer channels. Defaults to 4. + qkv_bias (bool): enable bias for qkv if True. Defaults to True. + drop_path (float): The drop path rate after attention and ffn. + Defaults to 0. + pad_small_map (bool): If True, pad the small feature map to the window + size, which is common used in detection and segmentation. If False, + avoid shifting window and shrink the window size to the size of + feature map, which is common used in classification. + Defaults to False. + attn_cfgs (dict): The extra config of Shift Window-MSA. + Defaults to empty dict. + ffn_cfgs (dict): The extra config of FFN. Defaults to empty dict. + norm_cfg (dict): The config of norm layers. + Defaults to ``dict(type='LN')``. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + init_cfg (dict, optional): The extra config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads, + window_size=7, + ffn_ratio=4., + qkv_bias=True, + drop_path=0., + pad_small_map=False, + attn_cfgs=dict(), + ffn_cfgs=dict(), + norm_cfg=dict(type='LN'), + with_cp=False, + init_cfg=None): + + super(SpatialBlock, self).__init__(init_cfg) + self.with_cp = with_cp + + self.cpe1 = ConvPosEnc(embed_dims=embed_dims, kernel_size=3) + self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1] + _attn_cfgs = { + 'embed_dims': embed_dims, + 'num_heads': num_heads, + 'shift_size': 0, + 'window_size': window_size, + 'dropout_layer': dict(type='DropPath', drop_prob=drop_path), + 'qkv_bias': qkv_bias, + 'pad_small_map': pad_small_map, + 'window_msa': DaViTWindowMSA, + **attn_cfgs + } + self.attn = ShiftWindowMSA(**_attn_cfgs) + self.cpe2 = ConvPosEnc(embed_dims=embed_dims, kernel_size=3) + + _ffn_cfgs = { + 'embed_dims': embed_dims, + 'feedforward_channels': int(embed_dims * ffn_ratio), + 'num_fcs': 2, + 'ffn_drop': 0, + 'dropout_layer': dict(type='DropPath', drop_prob=drop_path), + 'act_cfg': dict(type='GELU'), + **ffn_cfgs + } + self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] + self.ffn = FFN(**_ffn_cfgs) + + def forward(self, x, hw_shape): + + def _inner_forward(x): + x = self.cpe1(x, hw_shape) + identity = x + x = self.norm1(x) + x = self.attn(x, hw_shape) + x = x + identity + + x = self.cpe2(x, hw_shape) + identity = x + x = self.norm2(x) + x = self.ffn(x, identity=identity) + + return x + + if self.with_cp and x.requires_grad: + x = cp.checkpoint(_inner_forward, x) + else: + x = _inner_forward(x) + + return x + + +class DaViTBlock(BaseModule): + """DaViT block. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (int): The height and width of the window. Defaults to 7. + ffn_ratio (float): The expansion ratio of feedforward network hidden + layer channels. Defaults to 4. + qkv_bias (bool): enable bias for qkv if True. Defaults to True. + drop_path (float): The drop path rate after attention and ffn. + Defaults to 0. + pad_small_map (bool): If True, pad the small feature map to the window + size, which is common used in detection and segmentation. If False, + avoid shifting window and shrink the window size to the size of + feature map, which is common used in classification. + Defaults to False. + attn_cfgs (dict): The extra config of Shift Window-MSA. + Defaults to empty dict. + ffn_cfgs (dict): The extra config of FFN. Defaults to empty dict. + norm_cfg (dict): The config of norm layers. + Defaults to ``dict(type='LN')``. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + init_cfg (dict, optional): The extra config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads, + window_size=7, + ffn_ratio=4., + qkv_bias=True, + drop_path=0., + pad_small_map=False, + attn_cfgs=dict(), + ffn_cfgs=dict(), + norm_cfg=dict(type='LN'), + with_cp=False, + init_cfg=None): + + super(DaViTBlock, self).__init__(init_cfg) + self.spatial_block = SpatialBlock( + embed_dims, + num_heads, + window_size=window_size, + ffn_ratio=ffn_ratio, + qkv_bias=qkv_bias, + drop_path=drop_path, + pad_small_map=pad_small_map, + attn_cfgs=attn_cfgs, + ffn_cfgs=ffn_cfgs, + norm_cfg=norm_cfg, + with_cp=with_cp) + self.channel_block = ChannelBlock( + embed_dims, + num_heads, + ffn_ratio=ffn_ratio, + qkv_bias=qkv_bias, + drop_path=drop_path, + ffn_cfgs=ffn_cfgs, + norm_cfg=norm_cfg, + with_cp=False) + + def forward(self, x, hw_shape): + x = self.spatial_block(x, hw_shape) + x = self.channel_block(x, hw_shape) + + return x + + +class DaViTBlockSequence(BaseModule): + """Module with successive DaViT blocks and downsample layer. + + Args: + embed_dims (int): Number of input channels. + depth (int): Number of successive DaViT blocks. + num_heads (int): Number of attention heads. + window_size (int): The height and width of the window. Defaults to 7. + ffn_ratio (float): The expansion ratio of feedforward network hidden + layer channels. Defaults to 4. + qkv_bias (bool): enable bias for qkv if True. Defaults to True. + downsample (bool): Downsample the output of blocks by patch merging. + Defaults to False. + downsample_cfg (dict): The extra config of the patch merging layer. + Defaults to empty dict. + drop_paths (Sequence[float] | float): The drop path rate in each block. + Defaults to 0. + block_cfgs (Sequence[dict] | dict): The extra config of each block. + Defaults to empty dicts. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + pad_small_map (bool): If True, pad the small feature map to the window + size, which is common used in detection and segmentation. If False, + avoid shifting window and shrink the window size to the size of + feature map, which is common used in classification. + Defaults to False. + init_cfg (dict, optional): The extra config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + depth, + num_heads, + window_size=7, + ffn_ratio=4., + qkv_bias=True, + downsample=False, + downsample_cfg=dict(), + drop_paths=0., + block_cfgs=dict(), + with_cp=False, + pad_small_map=False, + init_cfg=None): + super().__init__(init_cfg) + + if not isinstance(drop_paths, Sequence): + drop_paths = [drop_paths] * depth + + if not isinstance(block_cfgs, Sequence): + block_cfgs = [deepcopy(block_cfgs) for _ in range(depth)] + + self.embed_dims = embed_dims + self.blocks = ModuleList() + for i in range(depth): + _block_cfg = { + 'embed_dims': embed_dims, + 'num_heads': num_heads, + 'window_size': window_size, + 'ffn_ratio': ffn_ratio, + 'qkv_bias': qkv_bias, + 'drop_path': drop_paths[i], + 'with_cp': with_cp, + 'pad_small_map': pad_small_map, + **block_cfgs[i] + } + block = DaViTBlock(**_block_cfg) + self.blocks.append(block) + + if downsample: + _downsample_cfg = { + 'in_channels': embed_dims, + 'out_channels': 2 * embed_dims, + 'norm_cfg': dict(type='LN'), + **downsample_cfg + } + self.downsample = DaViTDownSample(**_downsample_cfg) + else: + self.downsample = None + + def forward(self, x, in_shape, do_downsample=True): + for block in self.blocks: + x = block(x, in_shape) + + if self.downsample is not None and do_downsample: + x, out_shape = self.downsample(x, in_shape) + else: + out_shape = in_shape + return x, out_shape + + @property + def out_channels(self): + if self.downsample: + return self.downsample.out_channels + else: + return self.embed_dims + + +@MODELS.register_module() +class DaViT(BaseBackbone): + """DaViT. + + A PyTorch implement of : `DaViT: Dual Attention Vision Transformers + `_ + + Inspiration from + https://github.com/dingmyu/davit + + Args: + arch (str | dict): DaViT architecture. If use string, choose from + 'tiny', 'small', 'base' and 'large', 'huge', 'giant'. If use dict, + it should have below keys: + + - **embed_dims** (int): The dimensions of embedding. + - **depths** (List[int]): The number of blocks in each stage. + - **num_heads** (List[int]): The number of heads in attention + modules of each stage. + + Defaults to 't'. + patch_size (int | tuple): The patch size in patch embedding. + Defaults to 4. + in_channels (int): The num of input channels. Defaults to 3. + window_size (int): The height and width of the window. Defaults to 7. + ffn_ratio (float): The expansion ratio of feedforward network hidden + layer channels. Defaults to 4. + qkv_bias (bool): Whether to add bias for qkv in attention modules. + Defaults to True. + drop_path_rate (float): Stochastic depth rate. Defaults to 0.1. + out_after_downsample (bool): Whether to output the feature map of a + stage after the following downsample layer. Defaults to False. + pad_small_map (bool): If True, pad the small feature map to the window + size, which is common used in detection and segmentation. If False, + avoid shifting window and shrink the window size to the size of + feature map, which is common used in classification. + Defaults to False. + norm_cfg (dict): Config dict for normalization layer for all output + features. Defaults to ``dict(type='LN')`` + stage_cfgs (Sequence[dict] | dict): Extra config dict for each + stage. Defaults to an empty dict. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Defaults to -1. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Defaults to False. + out_indices (Sequence | int): Output from which stages. + Defaults to -1, means the last stage. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + arch_zoo = { + **dict.fromkeys(['t', 'tiny'], { + 'embed_dims': 96, + 'depths': [1, 1, 3, 1], + 'num_heads': [3, 6, 12, 24] + }), + **dict.fromkeys(['s', 'small'], { + 'embed_dims': 96, + 'depths': [1, 1, 9, 1], + 'num_heads': [3, 6, 12, 24] + }), + **dict.fromkeys(['b', 'base'], { + 'embed_dims': 128, + 'depths': [1, 1, 9, 1], + 'num_heads': [4, 8, 16, 32] + }), + **dict.fromkeys( + ['l', 'large'], { + 'embed_dims': 192, + 'depths': [1, 1, 9, 1], + 'num_heads': [6, 12, 24, 48] + }), + **dict.fromkeys( + ['h', 'huge'], { + 'embed_dims': 256, + 'depths': [1, 1, 9, 1], + 'num_heads': [8, 16, 32, 64] + }), + **dict.fromkeys( + ['g', 'giant'], { + 'embed_dims': 384, + 'depths': [1, 1, 12, 3], + 'num_heads': [12, 24, 48, 96] + }), + } + + def __init__(self, + arch='t', + patch_size=4, + in_channels=3, + window_size=7, + ffn_ratio=4., + qkv_bias=True, + drop_path_rate=0.1, + out_after_downsample=False, + pad_small_map=False, + norm_cfg=dict(type='LN'), + stage_cfgs=dict(), + frozen_stages=-1, + norm_eval=False, + out_indices=(3, ), + with_cp=False, + init_cfg=None): + super().__init__(init_cfg) + + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = {'embed_dims', 'depths', 'num_heads'} + assert isinstance(arch, dict) and essential_keys <= set(arch), \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = arch + + self.embed_dims = self.arch_settings['embed_dims'] + self.depths = self.arch_settings['depths'] + self.num_heads = self.arch_settings['num_heads'] + self.num_layers = len(self.depths) + self.out_indices = out_indices + self.out_after_downsample = out_after_downsample + self.frozen_stages = frozen_stages + self.norm_eval = norm_eval + + # stochastic depth decay rule + total_depth = sum(self.depths) + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, total_depth) + ] # stochastic depth decay rule + + _patch_cfg = dict( + in_channels=in_channels, + embed_dims=self.embed_dims, + conv_type='Conv2d', + kernel_size=7, + stride=patch_size, + padding='same', + norm_cfg=dict(type='LN'), + ) + self.patch_embed = PatchEmbed(**_patch_cfg) + + self.stages = ModuleList() + embed_dims = [self.embed_dims] + for i, (depth, + num_heads) in enumerate(zip(self.depths, self.num_heads)): + if isinstance(stage_cfgs, Sequence): + stage_cfg = stage_cfgs[i] + else: + stage_cfg = deepcopy(stage_cfgs) + downsample = True if i < self.num_layers - 1 else False + _stage_cfg = { + 'embed_dims': embed_dims[-1], + 'depth': depth, + 'num_heads': num_heads, + 'window_size': window_size, + 'ffn_ratio': ffn_ratio, + 'qkv_bias': qkv_bias, + 'downsample': downsample, + 'drop_paths': dpr[:depth], + 'with_cp': with_cp, + 'pad_small_map': pad_small_map, + **stage_cfg + } + + stage = DaViTBlockSequence(**_stage_cfg) + self.stages.append(stage) + + dpr = dpr[depth:] + embed_dims.append(stage.out_channels) + + self.num_features = embed_dims[:-1] + + # add a norm layer for each output + for i in out_indices: + if norm_cfg is not None: + norm_layer = build_norm_layer(norm_cfg, + self.num_features[i])[1] + else: + norm_layer = nn.Identity() + + self.add_module(f'norm{i}', norm_layer) + + def train(self, mode=True): + super().train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.patch_embed.eval() + for param in self.patch_embed.parameters(): + param.requires_grad = False + + for i in range(0, self.frozen_stages + 1): + m = self.stages[i] + m.eval() + for param in m.parameters(): + param.requires_grad = False + for i in self.out_indices: + if i <= self.frozen_stages: + for param in getattr(self, f'norm{i}').parameters(): + param.requires_grad = False + + def forward(self, x): + x, hw_shape = self.patch_embed(x) + + outs = [] + for i, stage in enumerate(self.stages): + x, hw_shape = stage( + x, hw_shape, do_downsample=self.out_after_downsample) + if i in self.out_indices: + norm_layer = getattr(self, f'norm{i}') + out = norm_layer(x) + out = out.view(-1, *hw_shape, + self.num_features[i]).permute(0, 3, 1, + 2).contiguous() + outs.append(out) + if stage.downsample is not None and not self.out_after_downsample: + x, hw_shape = stage.downsample(x, hw_shape) + + return tuple(outs) diff --git a/mmpretrain/models/backbones/deit.py b/mmpretrain/models/backbones/deit.py new file mode 100644 index 0000000..9ae3408 --- /dev/null +++ b/mmpretrain/models/backbones/deit.py @@ -0,0 +1,116 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmengine.model.weight_init import trunc_normal_ + +from mmpretrain.registry import MODELS +from .vision_transformer import VisionTransformer + + +@MODELS.register_module() +class DistilledVisionTransformer(VisionTransformer): + """Distilled Vision Transformer. + + A PyTorch implement of : `Training data-efficient image transformers & + distillation through attention `_ + + Args: + arch (str | dict): Vision Transformer architecture. If use string, + choose from 'small', 'base', 'large', 'deit-tiny', 'deit-small' + and 'deit-base'. If use dict, it should have below keys: + + - **embed_dims** (int): The dimensions of embedding. + - **num_layers** (int): The number of transformer encoder layers. + - **num_heads** (int): The number of heads in attention modules. + - **feedforward_channels** (int): The hidden dimensions in + feedforward modules. + + Defaults to 'deit-base'. + img_size (int | tuple): The expected input image shape. Because we + support dynamic input shape, just set the argument to the most + common input image shape. Defaults to 224. + patch_size (int | tuple): The patch size in patch embedding. + Defaults to 16. + in_channels (int): The num of input channels. Defaults to 3. + out_indices (Sequence | int): Output from which stages. + Defaults to -1, means the last stage. + drop_rate (float): Probability of an element to be zeroed. + Defaults to 0. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + qkv_bias (bool): Whether to add bias for qkv in attention modules. + Defaults to True. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + final_norm (bool): Whether to add a additional layer to normalize + final feature map. Defaults to True. + out_type (str): The type of output features. Please choose from + + - ``"cls_token"``: A tuple with the class token and the + distillation token. The shapes of both tensor are (B, C). + - ``"featmap"``: The feature map tensor from the patch tokens + with shape (B, C, H, W). + - ``"avg_featmap"``: The global averaged feature map tensor + with shape (B, C). + - ``"raw"``: The raw feature tensor includes patch tokens and + class tokens with shape (B, L, C). + + Defaults to ``"cls_token"``. + interpolate_mode (str): Select the interpolate mode for position + embeding vector resize. Defaults to "bicubic". + patch_cfg (dict): Configs of patch embeding. Defaults to an empty dict. + layer_cfgs (Sequence | dict): Configs of each transformer layer in + encoder. Defaults to an empty dict. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + num_extra_tokens = 2 # class token and distillation token + + def __init__(self, arch='deit-base', *args, **kwargs): + super(DistilledVisionTransformer, self).__init__( + arch=arch, + with_cls_token=True, + *args, + **kwargs, + ) + self.dist_token = nn.Parameter(torch.zeros(1, 1, self.embed_dims)) + + def forward(self, x): + B = x.shape[0] + x, patch_resolution = self.patch_embed(x) + + # stole cls_tokens impl from Phil Wang, thanks + cls_tokens = self.cls_token.expand(B, -1, -1) + dist_token = self.dist_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, dist_token, x), dim=1) + x = x + self.resize_pos_embed( + self.pos_embed, + self.patch_resolution, + patch_resolution, + mode=self.interpolate_mode, + num_extra_tokens=self.num_extra_tokens) + x = self.drop_after_pos(x) + + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x) + + if i == len(self.layers) - 1 and self.final_norm: + x = self.ln1(x) + + if i in self.out_indices: + outs.append(self._format_output(x, patch_resolution)) + + return tuple(outs) + + def _format_output(self, x, hw): + if self.out_type == 'cls_token': + return x[:, 0], x[:, 1] + + return super()._format_output(x, hw) + + def init_weights(self): + super(DistilledVisionTransformer, self).init_weights() + + if not (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + trunc_normal_(self.dist_token, std=0.02) diff --git a/mmpretrain/models/backbones/deit3.py b/mmpretrain/models/backbones/deit3.py new file mode 100644 index 0000000..acedabe --- /dev/null +++ b/mmpretrain/models/backbones/deit3.py @@ -0,0 +1,454 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Sequence + +import numpy as np +import torch +from mmcv.cnn import Linear, build_activation_layer +from mmcv.cnn.bricks.drop import build_dropout +from mmcv.cnn.bricks.transformer import PatchEmbed +from mmengine.model import BaseModule, ModuleList, Sequential +from mmengine.utils import deprecated_api_warning +from torch import nn + +from mmpretrain.registry import MODELS +from ..utils import (LayerScale, MultiheadAttention, build_norm_layer, + resize_pos_embed, to_2tuple) +from .vision_transformer import VisionTransformer + + +class DeiT3FFN(BaseModule): + """FFN for DeiT3. + + The differences between DeiT3FFN & FFN: + 1. Use LayerScale. + + Args: + embed_dims (int): The feature dimension. Same as + `MultiheadAttention`. Defaults: 256. + feedforward_channels (int): The hidden dimension of FFNs. + Defaults: 1024. + num_fcs (int, optional): The number of fully-connected layers in + FFNs. Default: 2. + act_cfg (dict, optional): The activation config for FFNs. + Default: dict(type='ReLU') + ffn_drop (float, optional): Probability of an element to be + zeroed in FFN. Default 0.0. + add_identity (bool, optional): Whether to add the + identity connection. Default: `True`. + dropout_layer (obj:`ConfigDict`): The dropout_layer used + when adding the shortcut. + use_layer_scale (bool): Whether to use layer_scale in + DeiT3FFN. Defaults to True. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + @deprecated_api_warning( + { + 'dropout': 'ffn_drop', + 'add_residual': 'add_identity' + }, + cls_name='FFN') + def __init__(self, + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + act_cfg=dict(type='ReLU', inplace=True), + ffn_drop=0., + dropout_layer=None, + add_identity=True, + use_layer_scale=True, + init_cfg=None, + **kwargs): + super().__init__(init_cfg) + assert num_fcs >= 2, 'num_fcs should be no less ' \ + f'than 2. got {num_fcs}.' + self.embed_dims = embed_dims + self.feedforward_channels = feedforward_channels + self.num_fcs = num_fcs + self.act_cfg = act_cfg + self.activate = build_activation_layer(act_cfg) + + layers = [] + in_channels = embed_dims + for _ in range(num_fcs - 1): + layers.append( + Sequential( + Linear(in_channels, feedforward_channels), self.activate, + nn.Dropout(ffn_drop))) + in_channels = feedforward_channels + layers.append(Linear(feedforward_channels, embed_dims)) + layers.append(nn.Dropout(ffn_drop)) + self.layers = Sequential(*layers) + self.dropout_layer = build_dropout( + dropout_layer) if dropout_layer else torch.nn.Identity() + self.add_identity = add_identity + + if use_layer_scale: + self.gamma2 = LayerScale(embed_dims) + else: + self.gamma2 = nn.Identity() + + @deprecated_api_warning({'residual': 'identity'}, cls_name='FFN') + def forward(self, x, identity=None): + """Forward function for `FFN`. + + The function would add x to the output tensor if residue is None. + """ + out = self.layers(x) + out = self.gamma2(out) + if not self.add_identity: + return self.dropout_layer(out) + if identity is None: + identity = x + return identity + self.dropout_layer(out) + + +class DeiT3TransformerEncoderLayer(BaseModule): + """Implements one encoder layer in DeiT3. + + The differences between DeiT3TransformerEncoderLayer & + TransformerEncoderLayer: + 1. Use LayerScale. + + Args: + embed_dims (int): The feature dimension + num_heads (int): Parallel attention heads + feedforward_channels (int): The hidden dimension for FFNs + drop_rate (float): Probability of an element to be zeroed + after the feed forward layer. Defaults to 0. + attn_drop_rate (float): The drop out rate for attention output weights. + Defaults to 0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0. + num_fcs (int): The number of fully-connected layers for FFNs. + Defaults to 2. + qkv_bias (bool): enable bias for qkv if True. Defaults to True. + use_layer_scale (bool): Whether to use layer_scale in + DeiT3TransformerEncoderLayer. Defaults to True. + act_cfg (dict): The activation config for FFNs. + Defaults to ``dict(type='GELU')``. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + num_fcs=2, + qkv_bias=True, + use_layer_scale=True, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + init_cfg=None): + super(DeiT3TransformerEncoderLayer, self).__init__(init_cfg=init_cfg) + + self.embed_dims = embed_dims + + self.ln1 = build_norm_layer(norm_cfg, self.embed_dims) + + self.attn = MultiheadAttention( + embed_dims=embed_dims, + num_heads=num_heads, + attn_drop=attn_drop_rate, + proj_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + qkv_bias=qkv_bias, + use_layer_scale=use_layer_scale) + + self.ln2 = build_norm_layer(norm_cfg, self.embed_dims) + + self.ffn = DeiT3FFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + num_fcs=num_fcs, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg, + use_layer_scale=use_layer_scale) + + def init_weights(self): + super(DeiT3TransformerEncoderLayer, self).init_weights() + for m in self.ffn.modules(): + if isinstance(m, nn.Linear): + nn.init.xavier_uniform_(m.weight) + nn.init.normal_(m.bias, std=1e-6) + + def forward(self, x): + x = x + self.attn(self.ln1(x)) + x = self.ffn(self.ln1(x), identity=x) + return x + + +@MODELS.register_module() +class DeiT3(VisionTransformer): + """DeiT3 backbone. + + A PyTorch implement of : `DeiT III: Revenge of the ViT + `_ + + The differences between DeiT3 & VisionTransformer: + + 1. Use LayerScale. + 2. Concat cls token after adding pos_embed. + + Args: + arch (str | dict): DeiT3 architecture. If use string, + choose from 'small', 'base', 'medium', 'large' and 'huge'. + If use dict, it should have below keys: + + - **embed_dims** (int): The dimensions of embedding. + - **num_layers** (int): The number of transformer encoder layers. + - **num_heads** (int): The number of heads in attention modules. + - **feedforward_channels** (int): The hidden dimensions in + feedforward modules. + + Defaults to 'base'. + img_size (int | tuple): The expected input image shape. Because we + support dynamic input shape, just set the argument to the most + common input image shape. Defaults to 224. + patch_size (int | tuple): The patch size in patch embedding. + Defaults to 16. + in_channels (int): The num of input channels. Defaults to 3. + out_indices (Sequence | int): Output from which stages. + Defaults to -1, means the last stage. + drop_rate (float): Probability of an element to be zeroed. + Defaults to 0. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + qkv_bias (bool): Whether to add bias for qkv in attention modules. + Defaults to True. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + final_norm (bool): Whether to add a additional layer to normalize + final feature map. Defaults to True. + out_type (str): The type of output features. Please choose from + + - ``"cls_token"``: The class token tensor with shape (B, C). + - ``"featmap"``: The feature map tensor from the patch tokens + with shape (B, C, H, W). + - ``"avg_featmap"``: The global averaged feature map tensor + with shape (B, C). + - ``"raw"``: The raw feature tensor includes patch tokens and + class tokens with shape (B, L, C). + + Defaults to ``"cls_token"``. + with_cls_token (bool): Whether concatenating class token into image + tokens as transformer input. Defaults to True. + use_layer_scale (bool): Whether to use layer_scale in DeiT3. + Defaults to True. + interpolate_mode (str): Select the interpolate mode for position + embeding vector resize. Defaults to "bicubic". + patch_cfg (dict): Configs of patch embeding. Defaults to an empty dict. + layer_cfgs (Sequence | dict): Configs of each transformer layer in + encoder. Defaults to an empty dict. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + arch_zoo = { + **dict.fromkeys( + ['s', 'small'], { + 'embed_dims': 384, + 'num_layers': 12, + 'num_heads': 6, + 'feedforward_channels': 1536, + }), + **dict.fromkeys( + ['m', 'medium'], { + 'embed_dims': 512, + 'num_layers': 12, + 'num_heads': 8, + 'feedforward_channels': 2048, + }), + **dict.fromkeys( + ['b', 'base'], { + 'embed_dims': 768, + 'num_layers': 12, + 'num_heads': 12, + 'feedforward_channels': 3072 + }), + **dict.fromkeys( + ['l', 'large'], { + 'embed_dims': 1024, + 'num_layers': 24, + 'num_heads': 16, + 'feedforward_channels': 4096 + }), + **dict.fromkeys( + ['h', 'huge'], { + 'embed_dims': 1280, + 'num_layers': 32, + 'num_heads': 16, + 'feedforward_channels': 5120 + }), + } + num_extra_tokens = 1 # class token + + def __init__(self, + arch='base', + img_size=224, + patch_size=16, + in_channels=3, + out_indices=-1, + drop_rate=0., + drop_path_rate=0., + qkv_bias=True, + norm_cfg=dict(type='LN', eps=1e-6), + final_norm=True, + out_type='cls_token', + with_cls_token=True, + use_layer_scale=True, + interpolate_mode='bicubic', + patch_cfg=dict(), + layer_cfgs=dict(), + init_cfg=None): + super(VisionTransformer, self).__init__(init_cfg) + + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = { + 'embed_dims', 'num_layers', 'num_heads', 'feedforward_channels' + } + assert isinstance(arch, dict) and essential_keys <= set(arch), \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = arch + + self.embed_dims = self.arch_settings['embed_dims'] + self.num_layers = self.arch_settings['num_layers'] + self.img_size = to_2tuple(img_size) + + # Set patch embedding + _patch_cfg = dict( + in_channels=in_channels, + input_size=img_size, + embed_dims=self.embed_dims, + conv_type='Conv2d', + kernel_size=patch_size, + stride=patch_size, + ) + _patch_cfg.update(patch_cfg) + self.patch_embed = PatchEmbed(**_patch_cfg) + self.patch_resolution = self.patch_embed.init_out_size + num_patches = self.patch_resolution[0] * self.patch_resolution[1] + + # Set out type + if out_type not in self.OUT_TYPES: + raise ValueError(f'Unsupported `out_type` {out_type}, please ' + f'choose from {self.OUT_TYPES}') + self.out_type = out_type + + # Set cls token + if with_cls_token: + self.cls_token = nn.Parameter(torch.zeros(1, 1, self.embed_dims)) + elif out_type != 'cls_token': + self.cls_token = None + self.num_extra_tokens = 0 + else: + raise ValueError( + 'with_cls_token must be True when `out_type="cls_token"`.') + + # Set position embedding + self.interpolate_mode = interpolate_mode + self.pos_embed = nn.Parameter( + torch.zeros(1, num_patches, self.embed_dims)) + self._register_load_state_dict_pre_hook(self._prepare_pos_embed) + + self.drop_after_pos = nn.Dropout(p=drop_rate) + + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = self.num_layers + index + assert 0 <= out_indices[i] <= self.num_layers, \ + f'Invalid out_indices {index}' + self.out_indices = out_indices + + # stochastic depth decay rule + dpr = np.linspace(0, drop_path_rate, self.num_layers) + + self.layers = ModuleList() + if isinstance(layer_cfgs, dict): + layer_cfgs = [layer_cfgs] * self.num_layers + for i in range(self.num_layers): + _layer_cfg = dict( + embed_dims=self.embed_dims, + num_heads=self.arch_settings['num_heads'], + feedforward_channels=self. + arch_settings['feedforward_channels'], + drop_rate=drop_rate, + drop_path_rate=dpr[i], + qkv_bias=qkv_bias, + norm_cfg=norm_cfg, + use_layer_scale=use_layer_scale) + _layer_cfg.update(layer_cfgs[i]) + self.layers.append(DeiT3TransformerEncoderLayer(**_layer_cfg)) + + self.final_norm = final_norm + if final_norm: + self.ln1 = build_norm_layer(norm_cfg, self.embed_dims) + + def forward(self, x): + B = x.shape[0] + x, patch_resolution = self.patch_embed(x) + + x = x + resize_pos_embed( + self.pos_embed, + self.patch_resolution, + patch_resolution, + mode=self.interpolate_mode, + num_extra_tokens=0) + x = self.drop_after_pos(x) + + if self.cls_token is not None: + # stole cls_tokens impl from Phil Wang, thanks + cls_tokens = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x) + + if i == len(self.layers) - 1 and self.final_norm: + x = self.ln1(x) + + if i in self.out_indices: + outs.append(self._format_output(x, patch_resolution)) + + return tuple(outs) + + def _prepare_pos_embed(self, state_dict, prefix, *args, **kwargs): + name = prefix + 'pos_embed' + if name not in state_dict.keys(): + return + + ckpt_pos_embed_shape = state_dict[name].shape + if self.pos_embed.shape != ckpt_pos_embed_shape: + from mmengine.logging import MMLogger + logger = MMLogger.get_current_instance() + logger.info( + f'Resize the pos_embed shape from {ckpt_pos_embed_shape} ' + f'to {self.pos_embed.shape}.') + + ckpt_pos_embed_shape = to_2tuple( + int(np.sqrt(ckpt_pos_embed_shape[1]))) + pos_embed_shape = self.patch_embed.init_out_size + + state_dict[name] = resize_pos_embed( + state_dict[name], + ckpt_pos_embed_shape, + pos_embed_shape, + self.interpolate_mode, + num_extra_tokens=0, # The cls token adding is after pos_embed + ) diff --git a/mmpretrain/models/backbones/densenet.py b/mmpretrain/models/backbones/densenet.py new file mode 100644 index 0000000..c9f0530 --- /dev/null +++ b/mmpretrain/models/backbones/densenet.py @@ -0,0 +1,332 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from itertools import chain +from typing import Sequence + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from mmcv.cnn.bricks import build_activation_layer, build_norm_layer +from torch.jit.annotations import List + +from mmpretrain.registry import MODELS +from .base_backbone import BaseBackbone + + +class DenseLayer(BaseBackbone): + """DenseBlock layers.""" + + def __init__(self, + in_channels, + growth_rate, + bn_size, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + drop_rate=0., + memory_efficient=False): + super(DenseLayer, self).__init__() + + self.norm1 = build_norm_layer(norm_cfg, in_channels)[1] + self.conv1 = nn.Conv2d( + in_channels, + bn_size * growth_rate, + kernel_size=1, + stride=1, + bias=False) + self.act = build_activation_layer(act_cfg) + self.norm2 = build_norm_layer(norm_cfg, bn_size * growth_rate)[1] + self.conv2 = nn.Conv2d( + bn_size * growth_rate, + growth_rate, + kernel_size=3, + stride=1, + padding=1, + bias=False) + self.drop_rate = float(drop_rate) + self.memory_efficient = memory_efficient + + def bottleneck_fn(self, xs): + # type: (List[torch.Tensor]) -> torch.Tensor + concated_features = torch.cat(xs, 1) + bottleneck_output = self.conv1( + self.act(self.norm1(concated_features))) # noqa: T484 + return bottleneck_output + + # todo: rewrite when torchscript supports any + def any_requires_grad(self, x): + # type: (List[torch.Tensor]) -> bool + for tensor in x: + if tensor.requires_grad: + return True + return False + + # This decorator indicates to the compiler that a function or method + # should be ignored and replaced with the raising of an exception. + # Here this function is incompatible with torchscript. + @torch.jit.unused # noqa: T484 + def call_checkpoint_bottleneck(self, x): + # type: (List[torch.Tensor]) -> torch.Tensor + def closure(*xs): + return self.bottleneck_fn(xs) + + # Here use torch.utils.checkpoint to rerun a forward-pass during + # backward in bottleneck to save memories. + return cp.checkpoint(closure, *x) + + def forward(self, x): # noqa: F811 + # type: (List[torch.Tensor]) -> torch.Tensor + # assert input features is a list of Tensor + assert isinstance(x, list) + + if self.memory_efficient and self.any_requires_grad(x): + if torch.jit.is_scripting(): + raise Exception('Memory Efficient not supported in JIT') + bottleneck_output = self.call_checkpoint_bottleneck(x) + else: + bottleneck_output = self.bottleneck_fn(x) + + new_features = self.conv2(self.act(self.norm2(bottleneck_output))) + if self.drop_rate > 0: + new_features = F.dropout( + new_features, p=self.drop_rate, training=self.training) + return new_features + + +class DenseBlock(nn.Module): + """DenseNet Blocks.""" + + def __init__(self, + num_layers, + in_channels, + bn_size, + growth_rate, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + drop_rate=0., + memory_efficient=False): + super(DenseBlock, self).__init__() + self.block = nn.ModuleList([ + DenseLayer( + in_channels + i * growth_rate, + growth_rate=growth_rate, + bn_size=bn_size, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + drop_rate=drop_rate, + memory_efficient=memory_efficient) for i in range(num_layers) + ]) + + def forward(self, init_features): + features = [init_features] + for layer in self.block: + new_features = layer(features) + features.append(new_features) + return torch.cat(features, 1) + + +class DenseTransition(nn.Sequential): + """DenseNet Transition Layers.""" + + def __init__(self, + in_channels, + out_channels, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU')): + super(DenseTransition, self).__init__() + self.add_module('norm', build_norm_layer(norm_cfg, in_channels)[1]) + self.add_module('act', build_activation_layer(act_cfg)) + self.add_module( + 'conv', + nn.Conv2d( + in_channels, out_channels, kernel_size=1, stride=1, + bias=False)) + self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2)) + + +@MODELS.register_module() +class DenseNet(BaseBackbone): + """DenseNet. + + A PyTorch implementation of : `Densely Connected Convolutional Networks + `_ + + Modified from the `official repo + `_ + and `pytorch + `_. + + Args: + arch (str | dict): The model's architecture. If string, it should be + one of architecture in ``DenseNet.arch_settings``. And if dict, it + should include the following two keys: + + - growth_rate (int): Each layer of DenseBlock produce `k` feature + maps. Here refers `k` as the growth rate of the network. + - depths (list[int]): Number of repeated layers in each DenseBlock. + - init_channels (int): The output channels of stem layers. + + Defaults to '121'. + in_channels (int): Number of input image channels. Defaults to 3. + bn_size (int): Refers to channel expansion parameter of 1x1 + convolution layer. Defaults to 4. + drop_rate (float): Drop rate of Dropout Layer. Defaults to 0. + compression_factor (float): The reduction rate of transition layers. + Defaults to 0.5. + memory_efficient (bool): If True, uses checkpointing. Much more memory + efficient, but slower. Defaults to False. + See `"paper" `_. + norm_cfg (dict): The config dict for norm layers. + Defaults to ``dict(type='BN')``. + act_cfg (dict): The config dict for activation after each convolution. + Defaults to ``dict(type='ReLU')``. + out_indices (Sequence | int): Output from which stages. + Defaults to -1, means the last stage. + frozen_stages (int): Stages to be frozen (all param fixed). + Defaults to 0, which means not freezing any parameters. + init_cfg (dict, optional): Initialization config dict. + """ + arch_settings = { + '121': { + 'growth_rate': 32, + 'depths': [6, 12, 24, 16], + 'init_channels': 64, + }, + '169': { + 'growth_rate': 32, + 'depths': [6, 12, 32, 32], + 'init_channels': 64, + }, + '201': { + 'growth_rate': 32, + 'depths': [6, 12, 48, 32], + 'init_channels': 64, + }, + '161': { + 'growth_rate': 48, + 'depths': [6, 12, 36, 24], + 'init_channels': 96, + }, + } + + def __init__(self, + arch='121', + in_channels=3, + bn_size=4, + drop_rate=0, + compression_factor=0.5, + memory_efficient=False, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + out_indices=-1, + frozen_stages=0, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + if isinstance(arch, str): + assert arch in self.arch_settings, \ + f'Unavailable arch, please choose from ' \ + f'({set(self.arch_settings)}) or pass a dict.' + arch = self.arch_settings[arch] + elif isinstance(arch, dict): + essential_keys = {'growth_rate', 'depths', 'init_channels'} + assert isinstance(arch, dict) and essential_keys <= set(arch), \ + f'Custom arch needs a dict with keys {essential_keys}' + + self.growth_rate = arch['growth_rate'] + self.depths = arch['depths'] + self.init_channels = arch['init_channels'] + self.act = build_activation_layer(act_cfg) + + self.num_stages = len(self.depths) + + # check out indices and frozen stages + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = self.num_stages + index + assert out_indices[i] >= 0, f'Invalid out_indices {index}' + self.out_indices = out_indices + self.frozen_stages = frozen_stages + + # Set stem layers + self.stem = nn.Sequential( + nn.Conv2d( + in_channels, + self.init_channels, + kernel_size=7, + stride=2, + padding=3, + bias=False), + build_norm_layer(norm_cfg, self.init_channels)[1], self.act, + nn.MaxPool2d(kernel_size=3, stride=2, padding=1)) + + # Repetitions of DenseNet Blocks + self.stages = nn.ModuleList() + self.transitions = nn.ModuleList() + + channels = self.init_channels + for i in range(self.num_stages): + depth = self.depths[i] + + stage = DenseBlock( + num_layers=depth, + in_channels=channels, + bn_size=bn_size, + growth_rate=self.growth_rate, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + drop_rate=drop_rate, + memory_efficient=memory_efficient) + self.stages.append(stage) + channels += depth * self.growth_rate + + if i != self.num_stages - 1: + transition = DenseTransition( + in_channels=channels, + out_channels=math.floor(channels * compression_factor), + norm_cfg=norm_cfg, + act_cfg=act_cfg, + ) + channels = math.floor(channels * compression_factor) + else: + # Final layers after dense block is just bn with act. + # Unlike the paper, the original repo also put this in + # transition layer, whereas torchvision take this out. + # We reckon this as transition layer here. + transition = nn.Sequential( + build_norm_layer(norm_cfg, channels)[1], + self.act, + ) + self.transitions.append(transition) + + self._freeze_stages() + + def forward(self, x): + x = self.stem(x) + outs = [] + for i in range(self.num_stages): + x = self.stages[i](x) + x = self.transitions[i](x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def _freeze_stages(self): + for i in range(self.frozen_stages): + downsample_layer = self.transitions[i] + stage = self.stages[i] + downsample_layer.eval() + stage.eval() + for param in chain(downsample_layer.parameters(), + stage.parameters()): + param.requires_grad = False + + def train(self, mode=True): + super(DenseNet, self).train(mode) + self._freeze_stages() diff --git a/mmpretrain/models/backbones/edgenext.py b/mmpretrain/models/backbones/edgenext.py new file mode 100644 index 0000000..ad4e768 --- /dev/null +++ b/mmpretrain/models/backbones/edgenext.py @@ -0,0 +1,398 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from itertools import chain +from typing import Sequence + +import torch +import torch.nn as nn +from mmcv.cnn.bricks import DropPath +from mmengine.model import BaseModule, ModuleList, Sequential + +from mmpretrain.registry import MODELS +from ..utils import (ChannelMultiheadAttention, PositionEncodingFourier, + build_norm_layer) +from .base_backbone import BaseBackbone +from .convnext import ConvNeXtBlock + + +class SDTAEncoder(BaseModule): + """A PyTorch implementation of split depth-wise transpose attention (SDTA) + encoder. + + Inspiration from + https://github.com/mmaaz60/EdgeNeXt + Args: + in_channel (int): Number of input channels. + drop_path_rate (float): Stochastic depth dropout rate. + Defaults to 0. + layer_scale_init_value (float): Initial value of layer scale. + Defaults to 1e-6. + mlp_ratio (int): Number of channels ratio in the MLP. + Defaults to 4. + use_pos_emb (bool): Whether to use position encoding. + Defaults to True. + num_heads (int): Number of heads in the multihead attention. + Defaults to 8. + qkv_bias (bool): Whether to use bias in the multihead attention. + Defaults to True. + attn_drop (float): Dropout rate of the attention. + Defaults to 0. + proj_drop (float): Dropout rate of the projection. + Defaults to 0. + layer_scale_init_value (float): Initial value of layer scale. + Defaults to 1e-6. + norm_cfg (dict): Dictionary to construct normalization layer. + Defaults to ``dict(type='LN')``. + act_cfg (dict): Dictionary to construct activation layer. + Defaults to ``dict(type='GELU')``. + scales (int): Number of scales. Default to 1. + """ + + def __init__(self, + in_channel, + drop_path_rate=0., + layer_scale_init_value=1e-6, + mlp_ratio=4, + use_pos_emb=True, + num_heads=8, + qkv_bias=True, + attn_drop=0., + proj_drop=0., + norm_cfg=dict(type='LN'), + act_cfg=dict(type='GELU'), + scales=1, + init_cfg=None): + super(SDTAEncoder, self).__init__(init_cfg=init_cfg) + conv_channels = max( + int(math.ceil(in_channel / scales)), + int(math.floor(in_channel // scales))) + self.conv_channels = conv_channels + self.num_convs = scales if scales == 1 else scales - 1 + + self.conv_modules = ModuleList() + for i in range(self.num_convs): + self.conv_modules.append( + nn.Conv2d( + conv_channels, + conv_channels, + kernel_size=3, + padding=1, + groups=conv_channels)) + + self.pos_embed = PositionEncodingFourier( + embed_dims=in_channel) if use_pos_emb else None + + self.norm_csa = build_norm_layer(norm_cfg, in_channel) + self.gamma_csa = nn.Parameter( + layer_scale_init_value * torch.ones(in_channel), + requires_grad=True) if layer_scale_init_value > 0 else None + self.csa = ChannelMultiheadAttention( + embed_dims=in_channel, + num_heads=num_heads, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=proj_drop) + + self.norm = build_norm_layer(norm_cfg, in_channel) + self.pointwise_conv1 = nn.Linear(in_channel, mlp_ratio * in_channel) + self.act = MODELS.build(act_cfg) + self.pointwise_conv2 = nn.Linear(mlp_ratio * in_channel, in_channel) + self.gamma = nn.Parameter( + layer_scale_init_value * torch.ones(in_channel), + requires_grad=True) if layer_scale_init_value > 0 else None + self.drop_path = DropPath( + drop_path_rate) if drop_path_rate > 0. else nn.Identity() + + def forward(self, x): + shortcut = x + spx = torch.split(x, self.conv_channels, dim=1) + for i in range(self.num_convs): + if i == 0: + sp = spx[i] + else: + sp = sp + spx[i] + sp = self.conv_modules[i](sp) + if i == 0: + out = sp + else: + out = torch.cat((out, sp), 1) + + x = torch.cat((out, spx[self.num_convs]), 1) + + # Channel Self-attention + B, C, H, W = x.shape + x = x.reshape(B, C, H * W).permute(0, 2, 1) + if self.pos_embed: + pos_encoding = self.pos_embed((B, H, W)) + pos_encoding = pos_encoding.reshape(B, -1, + x.shape[1]).permute(0, 2, 1) + x += pos_encoding + + x = x + self.drop_path(self.gamma_csa * self.csa(self.norm_csa(x))) + x = x.reshape(B, H, W, C) + + # Inverted Bottleneck + x = self.norm(x) + x = self.pointwise_conv1(x) + x = self.act(x) + x = self.pointwise_conv2(x) + + if self.gamma is not None: + x = self.gamma * x + x = x.permute(0, 3, 1, 2) # (B, H, W, C) -> (B, C, H, W) + + x = shortcut + self.drop_path(x) + + return x + + +@MODELS.register_module() +class EdgeNeXt(BaseBackbone): + """EdgeNeXt. + + A PyTorch implementation of: `EdgeNeXt: Efficiently Amalgamated + CNN-Transformer Architecture for Mobile Vision Applications + `_ + + Inspiration from + https://github.com/mmaaz60/EdgeNeXt + + Args: + arch (str | dict): The model's architecture. If string, it should be + one of architectures in ``EdgeNeXt.arch_settings``. + And if dict, it should include the following keys: + + - channels (list[int]): The number of channels at each stage. + - depths (list[int]): The number of blocks at each stage. + - num_heads (list[int]): The number of heads at each stage. + + Defaults to 'xxsmall'. + in_channels (int): The number of input channels. + Defaults to 3. + global_blocks (list[int]): The number of global blocks. + Defaults to [0, 1, 1, 1]. + global_block_type (list[str]): The type of global blocks. + Defaults to ['None', 'SDTA', 'SDTA', 'SDTA']. + drop_path_rate (float): Stochastic depth dropout rate. + Defaults to 0. + layer_scale_init_value (float): Initial value of layer scale. + Defaults to 1e-6. + linear_pw_conv (bool): Whether to use linear layer to do pointwise + convolution. Defaults to False. + mlp_ratio (int): The number of channel ratio in MLP layers. + Defaults to 4. + conv_kernel_size (list[int]): The kernel size of convolutional layers + at each stage. Defaults to [3, 5, 7, 9]. + use_pos_embd_csa (list[bool]): Whether to use positional embedding in + Channel Self-Attention. Defaults to [False, True, False, False]. + use_pos_emebd_global (bool): Whether to use positional embedding for + whole network. Defaults to False. + d2_scales (list[int]): The number of channel groups used for SDTA at + each stage. Defaults to [2, 2, 3, 4]. + norm_cfg (dict): The config of normalization layer. + Defaults to ``dict(type='LN2d', eps=1e-6)``. + out_indices (Sequence | int): Output from which stages. + Defaults to -1, means the last stage. + frozen_stages (int): Stages to be frozen (all param fixed). + Defaults to 0, which means not freezing any parameters. + gap_before_final_norm (bool): Whether to globally average the feature + map before the final norm layer. Defaults to True. + act_cfg (dict): The config of activation layer. + Defaults to ``dict(type='GELU')``. + init_cfg (dict, optional): Config for initialization. + Defaults to None. + """ + arch_settings = { + 'xxsmall': { # parameters: 1.3M + 'channels': [24, 48, 88, 168], + 'depths': [2, 2, 6, 2], + 'num_heads': [4, 4, 4, 4] + }, + 'xsmall': { # parameters: 2.3M + 'channels': [32, 64, 100, 192], + 'depths': [3, 3, 9, 3], + 'num_heads': [4, 4, 4, 4] + }, + 'small': { # parameters: 5.6M + 'channels': [48, 96, 160, 304], + 'depths': [3, 3, 9, 3], + 'num_heads': [8, 8, 8, 8] + }, + 'base': { # parameters: 18.51M + 'channels': [80, 160, 288, 584], + 'depths': [3, 3, 9, 3], + 'num_heads': [8, 8, 8, 8] + }, + } + + def __init__(self, + arch='xxsmall', + in_channels=3, + global_blocks=[0, 1, 1, 1], + global_block_type=['None', 'SDTA', 'SDTA', 'SDTA'], + drop_path_rate=0., + layer_scale_init_value=1e-6, + linear_pw_conv=True, + mlp_ratio=4, + conv_kernel_sizes=[3, 5, 7, 9], + use_pos_embd_csa=[False, True, False, False], + use_pos_embd_global=False, + d2_scales=[2, 2, 3, 4], + norm_cfg=dict(type='LN2d', eps=1e-6), + out_indices=-1, + frozen_stages=0, + gap_before_final_norm=True, + act_cfg=dict(type='GELU'), + init_cfg=None): + super(EdgeNeXt, self).__init__(init_cfg=init_cfg) + + if isinstance(arch, str): + arch = arch.lower() + assert arch in self.arch_settings, \ + f'Arch {arch} is not in default archs ' \ + f'{set(self.arch_settings)}' + self.arch_settings = self.arch_settings[arch] + elif isinstance(arch, dict): + essential_keys = {'channels', 'depths', 'num_heads'} + assert isinstance(arch, dict) and set(arch) == essential_keys, \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = arch + + self.channels = self.arch_settings['channels'] + self.depths = self.arch_settings['depths'] + self.num_heads = self.arch_settings['num_heads'] + self.num_layers = len(self.depths) + self.use_pos_embd_global = use_pos_embd_global + + for g in global_block_type: + assert g in ['None', + 'SDTA'], f'Global block type {g} is not supported' + + self.num_stages = len(self.depths) + + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = 4 + index + assert out_indices[i] >= 0, f'Invalid out_indices {index}' + self.out_indices = out_indices + + self.frozen_stages = frozen_stages + self.gap_before_final_norm = gap_before_final_norm + + if self.use_pos_embd_global: + self.pos_embed = PositionEncodingFourier( + embed_dims=self.channels[0]) + else: + self.pos_embed = None + + # stochastic depth decay rule + dpr = [ + x.item() + for x in torch.linspace(0, drop_path_rate, sum(self.depths)) + ] + + self.downsample_layers = ModuleList() + stem = nn.Sequential( + nn.Conv2d(in_channels, self.channels[0], kernel_size=4, stride=4), + build_norm_layer(norm_cfg, self.channels[0]), + ) + self.downsample_layers.append(stem) + + self.stages = ModuleList() + block_idx = 0 + for i in range(self.num_stages): + depth = self.depths[i] + channels = self.channels[i] + + if i >= 1: + downsample_layer = nn.Sequential( + build_norm_layer(norm_cfg, self.channels[i - 1]), + nn.Conv2d( + self.channels[i - 1], + channels, + kernel_size=2, + stride=2, + )) + self.downsample_layers.append(downsample_layer) + + stage_blocks = [] + for j in range(depth): + if j > depth - global_blocks[i] - 1: + stage_blocks.append( + SDTAEncoder( + in_channel=channels, + drop_path_rate=dpr[block_idx + j], + mlp_ratio=mlp_ratio, + scales=d2_scales[i], + use_pos_emb=use_pos_embd_csa[i], + num_heads=self.num_heads[i], + )) + else: + dw_conv_cfg = dict( + kernel_size=conv_kernel_sizes[i], + padding=conv_kernel_sizes[i] // 2, + ) + stage_blocks.append( + ConvNeXtBlock( + in_channels=channels, + dw_conv_cfg=dw_conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + linear_pw_conv=linear_pw_conv, + drop_path_rate=dpr[block_idx + j], + layer_scale_init_value=layer_scale_init_value, + )) + block_idx += depth + + stage_blocks = Sequential(*stage_blocks) + self.stages.append(stage_blocks) + + if i in self.out_indices: + out_norm_cfg = dict(type='LN') if self.gap_before_final_norm \ + else norm_cfg + norm_layer = build_norm_layer(out_norm_cfg, channels) + self.add_module(f'norm{i}', norm_layer) + + def init_weights(self) -> None: + # TODO: need to be implemented in the future + return super().init_weights() + + def forward(self, x): + outs = [] + for i, stage in enumerate(self.stages): + x = self.downsample_layers[i](x) + x = stage(x) + if self.pos_embed and i == 0: + B, _, H, W = x.shape + x += self.pos_embed((B, H, W)) + + if i in self.out_indices: + norm_layer = getattr(self, f'norm{i}') + if self.gap_before_final_norm: + gap = x.mean([-2, -1], keepdim=True) + outs.append(norm_layer(gap.flatten(1))) + else: + # The output of LayerNorm2d may be discontiguous, which + # may cause some problem in the downstream tasks + outs.append(norm_layer(x).contiguous()) + + return tuple(outs) + + def _freeze_stages(self): + for i in range(self.frozen_stages): + downsample_layer = self.downsample_layers[i] + stage = self.stages[i] + downsample_layer.eval() + stage.eval() + for param in chain(downsample_layer.parameters(), + stage.parameters()): + param.requires_grad = False + + def train(self, mode=True): + super(EdgeNeXt, self).train(mode) + self._freeze_stages() diff --git a/mmpretrain/models/backbones/efficientformer.py b/mmpretrain/models/backbones/efficientformer.py new file mode 100644 index 0000000..c2525c8 --- /dev/null +++ b/mmpretrain/models/backbones/efficientformer.py @@ -0,0 +1,606 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import itertools +from typing import Optional, Sequence + +import torch +import torch.nn as nn +from mmcv.cnn.bricks import (ConvModule, DropPath, build_activation_layer, + build_norm_layer) +from mmengine.model import BaseModule, ModuleList, Sequential + +from mmpretrain.registry import MODELS +from ..utils import LayerScale +from .base_backbone import BaseBackbone +from .poolformer import Pooling + + +class AttentionWithBias(BaseModule): + """Multi-head Attention Module with attention_bias. + + Args: + embed_dims (int): The embedding dimension. + num_heads (int): Parallel attention heads. Defaults to 8. + key_dim (int): The dimension of q, k. Defaults to 32. + attn_ratio (float): The dimension of v equals to + ``key_dim * attn_ratio``. Defaults to 4. + resolution (int): The height and width of attention_bias. + Defaults to 7. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads=8, + key_dim=32, + attn_ratio=4., + resolution=7, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.num_heads = num_heads + self.scale = key_dim**-0.5 + self.attn_ratio = attn_ratio + self.key_dim = key_dim + self.nh_kd = key_dim * num_heads + self.d = int(attn_ratio * key_dim) + self.dh = int(attn_ratio * key_dim) * num_heads + h = self.dh + self.nh_kd * 2 + self.qkv = nn.Linear(embed_dims, h) + self.proj = nn.Linear(self.dh, embed_dims) + + points = list(itertools.product(range(resolution), range(resolution))) + N = len(points) + attention_offsets = {} + idxs = [] + for p1 in points: + for p2 in points: + offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1])) + if offset not in attention_offsets: + attention_offsets[offset] = len(attention_offsets) + idxs.append(attention_offsets[offset]) + self.attention_biases = nn.Parameter( + torch.zeros(num_heads, len(attention_offsets))) + self.register_buffer('attention_bias_idxs', + torch.LongTensor(idxs).view(N, N)) + + @torch.no_grad() + def train(self, mode=True): + """change the mode of model.""" + super().train(mode) + if mode and hasattr(self, 'ab'): + del self.ab + else: + self.ab = self.attention_biases[:, self.attention_bias_idxs] + + def forward(self, x): + """forward function. + + Args: + x (tensor): input features with shape of (B, N, C) + """ + B, N, _ = x.shape + qkv = self.qkv(x) + qkv = qkv.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) + q, k, v = qkv.split([self.key_dim, self.key_dim, self.d], dim=-1) + + attn = ((q @ k.transpose(-2, -1)) * self.scale + + (self.attention_biases[:, self.attention_bias_idxs] + if self.training else self.ab)) + attn = attn.softmax(dim=-1) + x = (attn @ v).transpose(1, 2).reshape(B, N, self.dh) + x = self.proj(x) + return x + + +class Flat(nn.Module): + """Flat the input from (B, C, H, W) to (B, H*W, C).""" + + def __init__(self, ): + super().__init__() + + def forward(self, x: torch.Tensor): + x = x.flatten(2).transpose(1, 2) + return x + + +class LinearMlp(BaseModule): + """Mlp implemented with linear. + + The shape of input and output tensor are (B, N, C). + + Args: + in_features (int): Dimension of input features. + hidden_features (int): Dimension of hidden features. + out_features (int): Dimension of output features. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='BN')``. + act_cfg (dict): The config dict for activation between pointwise + convolution. Defaults to ``dict(type='GELU')``. + drop (float): Dropout rate. Defaults to 0.0. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + def __init__(self, + in_features: int, + hidden_features: Optional[int] = None, + out_features: Optional[int] = None, + act_cfg=dict(type='GELU'), + drop=0., + init_cfg=None): + super().__init__(init_cfg=init_cfg) + out_features = out_features or in_features + hidden_features = hidden_features or in_features + + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = build_activation_layer(act_cfg) + self.drop1 = nn.Dropout(drop) + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop2 = nn.Dropout(drop) + + def forward(self, x): + """ + Args: + x (torch.Tensor): input tensor with shape (B, N, C). + + Returns: + torch.Tensor: output tensor with shape (B, N, C). + """ + x = self.drop1(self.act(self.fc1(x))) + x = self.drop2(self.fc2(x)) + return x + + +class ConvMlp(BaseModule): + """Mlp implemented with 1*1 convolutions. + + Args: + in_features (int): Dimension of input features. + hidden_features (int): Dimension of hidden features. + out_features (int): Dimension of output features. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='BN')``. + act_cfg (dict): The config dict for activation between pointwise + convolution. Defaults to ``dict(type='GELU')``. + drop (float): Dropout rate. Defaults to 0.0. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='GELU'), + drop=0., + init_cfg=None): + super().__init__(init_cfg=init_cfg) + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Conv2d(in_features, hidden_features, 1) + self.act = build_activation_layer(act_cfg) + self.fc2 = nn.Conv2d(hidden_features, out_features, 1) + self.norm1 = build_norm_layer(norm_cfg, hidden_features)[1] + self.norm2 = build_norm_layer(norm_cfg, out_features)[1] + + self.drop = nn.Dropout(drop) + + def forward(self, x): + """ + Args: + x (torch.Tensor): input tensor with shape (B, C, H, W). + + Returns: + torch.Tensor: output tensor with shape (B, C, H, W). + """ + + x = self.act(self.norm1(self.fc1(x))) + x = self.drop(x) + x = self.norm2(self.fc2(x)) + x = self.drop(x) + return x + + +class Meta3D(BaseModule): + """Meta Former block using 3 dimensions inputs, ``torch.Tensor`` with shape + (B, N, C).""" + + def __init__(self, + dim, + mlp_ratio=4., + norm_cfg=dict(type='LN'), + act_cfg=dict(type='GELU'), + drop=0., + drop_path=0., + use_layer_scale=True, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.norm1 = build_norm_layer(norm_cfg, dim)[1] + self.token_mixer = AttentionWithBias(dim) + self.norm2 = build_norm_layer(norm_cfg, dim)[1] + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = LinearMlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_cfg=act_cfg, + drop=drop) + + self.drop_path = DropPath(drop_path) if drop_path > 0. \ + else nn.Identity() + if use_layer_scale: + self.ls1 = LayerScale(dim) + self.ls2 = LayerScale(dim) + else: + self.ls1, self.ls2 = nn.Identity(), nn.Identity() + + def forward(self, x): + x = x + self.drop_path(self.ls1(self.token_mixer(self.norm1(x)))) + x = x + self.drop_path(self.ls2(self.mlp(self.norm2(x)))) + return x + + +class Meta4D(BaseModule): + """Meta Former block using 4 dimensions inputs, ``torch.Tensor`` with shape + (B, C, H, W).""" + + def __init__(self, + dim, + pool_size=3, + mlp_ratio=4., + act_cfg=dict(type='GELU'), + drop=0., + drop_path=0., + use_layer_scale=True, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + self.token_mixer = Pooling(pool_size=pool_size) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = ConvMlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_cfg=act_cfg, + drop=drop) + + self.drop_path = DropPath(drop_path) if drop_path > 0. \ + else nn.Identity() + if use_layer_scale: + self.ls1 = LayerScale(dim, data_format='channels_first') + self.ls2 = LayerScale(dim, data_format='channels_first') + else: + self.ls1, self.ls2 = nn.Identity(), nn.Identity() + + def forward(self, x): + x = x + self.drop_path(self.ls1(self.token_mixer(x))) + x = x + self.drop_path(self.ls2(self.mlp(x))) + return x + + +def basic_blocks(in_channels, + out_channels, + index, + layers, + pool_size=3, + mlp_ratio=4., + act_cfg=dict(type='GELU'), + drop_rate=.0, + drop_path_rate=0., + use_layer_scale=True, + vit_num=1, + has_downsamper=False): + """generate EfficientFormer blocks for a stage.""" + blocks = [] + if has_downsamper: + blocks.append( + ConvModule( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + stride=2, + padding=1, + bias=True, + norm_cfg=dict(type='BN'), + act_cfg=None)) + if index == 3 and vit_num == layers[index]: + blocks.append(Flat()) + for block_idx in range(layers[index]): + block_dpr = drop_path_rate * (block_idx + sum(layers[:index])) / ( + sum(layers) - 1) + if index == 3 and layers[index] - block_idx <= vit_num: + blocks.append( + Meta3D( + out_channels, + mlp_ratio=mlp_ratio, + act_cfg=act_cfg, + drop=drop_rate, + drop_path=block_dpr, + use_layer_scale=use_layer_scale, + )) + else: + blocks.append( + Meta4D( + out_channels, + pool_size=pool_size, + act_cfg=act_cfg, + drop=drop_rate, + drop_path=block_dpr, + use_layer_scale=use_layer_scale)) + if index == 3 and layers[index] - block_idx - 1 == vit_num: + blocks.append(Flat()) + blocks = nn.Sequential(*blocks) + return blocks + + +@MODELS.register_module() +class EfficientFormer(BaseBackbone): + """EfficientFormer. + + A PyTorch implementation of EfficientFormer introduced by: + `EfficientFormer: Vision Transformers at MobileNet Speed `_ + + Modified from the `official repo + `. + + Args: + arch (str | dict): The model's architecture. If string, it should be + one of architecture in ``EfficientFormer.arch_settings``. And if dict, + it should include the following 4 keys: + + - layers (list[int]): Number of blocks at each stage. + - embed_dims (list[int]): The number of channels at each stage. + - downsamples (list[int]): Has downsample or not in the four stages. + - vit_num (int): The num of vit blocks in the last stage. + + Defaults to 'l1'. + + in_channels (int): The num of input channels. Defaults to 3. + pool_size (int): The pooling size of ``Meta4D`` blocks. Defaults to 3. + mlp_ratios (int): The dimension ratio of multi-head attention mechanism + in ``Meta4D`` blocks. Defaults to 3. + reshape_last_feat (bool): Whether to reshape the feature map from + (B, N, C) to (B, C, H, W) in the last stage, when the ``vit-num`` + in ``arch`` is not 0. Defaults to False. Usually set to True + in downstream tasks. + out_indices (Sequence[int]): Output from which stages. + Defaults to -1. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Defaults to -1. + act_cfg (dict): The config dict for activation between pointwise + convolution. Defaults to ``dict(type='GELU')``. + drop_rate (float): Dropout rate. Defaults to 0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0. + use_layer_scale (bool): Whether to use use_layer_scale in MetaFormer + block. Defaults to True. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + + Example: + >>> from mmpretrain.models import EfficientFormer + >>> import torch + >>> inputs = torch.rand((1, 3, 224, 224)) + >>> # build EfficientFormer backbone for classification task + >>> model = EfficientFormer(arch="l1") + >>> model.eval() + >>> level_outputs = model(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 448, 49) + >>> # build EfficientFormer backbone for downstream task + >>> model = EfficientFormer( + >>> arch="l3", + >>> out_indices=(0, 1, 2, 3), + >>> reshape_last_feat=True) + >>> model.eval() + >>> level_outputs = model(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 64, 56, 56) + (1, 128, 28, 28) + (1, 320, 14, 14) + (1, 512, 7, 7) + """ # noqa: E501 + + # --layers: [x,x,x,x], numbers of layers for the four stages + # --embed_dims: [x,x,x,x], embedding dims for the four stages + # --downsamples: [x,x,x,x], has downsample or not in the four stages + # --vit_num:(int), the num of vit blocks in the last stage + arch_settings = { + 'l1': { + 'layers': [3, 2, 6, 4], + 'embed_dims': [48, 96, 224, 448], + 'downsamples': [False, True, True, True], + 'vit_num': 1, + }, + 'l3': { + 'layers': [4, 4, 12, 6], + 'embed_dims': [64, 128, 320, 512], + 'downsamples': [False, True, True, True], + 'vit_num': 4, + }, + 'l7': { + 'layers': [6, 6, 18, 8], + 'embed_dims': [96, 192, 384, 768], + 'downsamples': [False, True, True, True], + 'vit_num': 8, + }, + } + + def __init__(self, + arch='l1', + in_channels=3, + pool_size=3, + mlp_ratios=4, + reshape_last_feat=False, + out_indices=-1, + frozen_stages=-1, + act_cfg=dict(type='GELU'), + drop_rate=0., + drop_path_rate=0., + use_layer_scale=True, + init_cfg=None): + + super().__init__(init_cfg=init_cfg) + self.num_extra_tokens = 0 # no cls_token, no dist_token + + if isinstance(arch, str): + assert arch in self.arch_settings, \ + f'Unavailable arch, please choose from ' \ + f'({set(self.arch_settings)}) or pass a dict.' + arch = self.arch_settings[arch] + elif isinstance(arch, dict): + default_keys = set(self.arch_settings['l1'].keys()) + assert set(arch.keys()) == default_keys, \ + f'The arch dict must have {default_keys}, ' \ + f'but got {list(arch.keys())}.' + + self.layers = arch['layers'] + self.embed_dims = arch['embed_dims'] + self.downsamples = arch['downsamples'] + assert isinstance(self.layers, list) and isinstance( + self.embed_dims, list) and isinstance(self.downsamples, list) + assert len(self.layers) == len(self.embed_dims) == len( + self.downsamples) + + self.vit_num = arch['vit_num'] + self.reshape_last_feat = reshape_last_feat + + assert self.vit_num >= 0, "'vit_num' must be an integer " \ + 'greater than or equal to 0.' + assert self.vit_num <= self.layers[-1], ( + "'vit_num' must be an integer smaller than layer number") + + self._make_stem(in_channels, self.embed_dims[0]) + + # set the main block in network + network = [] + for i in range(len(self.layers)): + if i != 0: + in_channels = self.embed_dims[i - 1] + else: + in_channels = self.embed_dims[i] + out_channels = self.embed_dims[i] + stage = basic_blocks( + in_channels, + out_channels, + i, + self.layers, + pool_size=pool_size, + mlp_ratio=mlp_ratios, + act_cfg=act_cfg, + drop_rate=drop_rate, + drop_path_rate=drop_path_rate, + vit_num=self.vit_num, + use_layer_scale=use_layer_scale, + has_downsamper=self.downsamples[i]) + network.append(stage) + + self.network = ModuleList(network) + + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = 4 + index + assert out_indices[i] >= 0, f'Invalid out_indices {index}' + + self.out_indices = out_indices + for i_layer in self.out_indices: + if not self.reshape_last_feat and \ + i_layer == 3 and self.vit_num > 0: + layer = build_norm_layer( + dict(type='LN'), self.embed_dims[i_layer])[1] + else: + # use GN with 1 group as channel-first LN2D + layer = build_norm_layer( + dict(type='GN', num_groups=1), self.embed_dims[i_layer])[1] + + layer_name = f'norm{i_layer}' + self.add_module(layer_name, layer) + + self.frozen_stages = frozen_stages + self._freeze_stages() + + def _make_stem(self, in_channels: int, stem_channels: int): + """make 2-ConvBNReLu stem layer.""" + self.patch_embed = Sequential( + ConvModule( + in_channels, + stem_channels // 2, + kernel_size=3, + stride=2, + padding=1, + bias=True, + conv_cfg=None, + norm_cfg=dict(type='BN'), + inplace=True), + ConvModule( + stem_channels // 2, + stem_channels, + kernel_size=3, + stride=2, + padding=1, + bias=True, + conv_cfg=None, + norm_cfg=dict(type='BN'), + inplace=True)) + + def forward_tokens(self, x): + outs = [] + for idx, block in enumerate(self.network): + if idx == len(self.network) - 1: + N, _, H, W = x.shape + if self.downsamples[idx]: + H, W = H // 2, W // 2 + x = block(x) + if idx in self.out_indices: + norm_layer = getattr(self, f'norm{idx}') + + if idx == len(self.network) - 1 and x.dim() == 3: + # when ``vit-num`` > 0 and in the last stage, + # if `self.reshape_last_feat`` is True, reshape the + # features to `BCHW` format before the final normalization. + # if `self.reshape_last_feat`` is False, do + # normalization directly and permute the features to `BCN`. + if self.reshape_last_feat: + x = x.permute((0, 2, 1)).reshape(N, -1, H, W) + x_out = norm_layer(x) + else: + x_out = norm_layer(x).permute((0, 2, 1)) + else: + x_out = norm_layer(x) + + outs.append(x_out.contiguous()) + return tuple(outs) + + def forward(self, x): + # input embedding + x = self.patch_embed(x) + # through stages + x = self.forward_tokens(x) + return x + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.patch_embed.eval() + for param in self.patch_embed.parameters(): + param.requires_grad = False + + for i in range(self.frozen_stages): + # Include both block and downsample layer. + module = self.network[i] + module.eval() + for param in module.parameters(): + param.requires_grad = False + if i in self.out_indices: + norm_layer = getattr(self, f'norm{i}') + norm_layer.eval() + for param in norm_layer.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(EfficientFormer, self).train(mode) + self._freeze_stages() diff --git a/mmpretrain/models/backbones/efficientnet.py b/mmpretrain/models/backbones/efficientnet.py new file mode 100644 index 0000000..9ec7ee8 --- /dev/null +++ b/mmpretrain/models/backbones/efficientnet.py @@ -0,0 +1,410 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import math +from functools import partial + +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn.bricks import ConvModule, DropPath +from mmengine.model import BaseModule, Sequential + +from mmpretrain.models.backbones.base_backbone import BaseBackbone +from mmpretrain.models.utils import InvertedResidual, SELayer, make_divisible +from mmpretrain.registry import MODELS + + +class EdgeResidual(BaseModule): + """Edge Residual Block. + + Args: + in_channels (int): The input channels of this module. + out_channels (int): The output channels of this module. + mid_channels (int): The input channels of the second convolution. + kernel_size (int): The kernel size of the first convolution. + Defaults to 3. + stride (int): The stride of the first convolution. Defaults to 1. + se_cfg (dict, optional): Config dict for se layer. Defaults to None, + which means no se layer. + with_residual (bool): Use residual connection. Defaults to True. + conv_cfg (dict, optional): Config dict for convolution layer. + Defaults to None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='BN')``. + act_cfg (dict): Config dict for activation layer. + Defaults to ``dict(type='ReLU')``. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + init_cfg (dict | list[dict], optional): Initialization config dict. + """ + + def __init__(self, + in_channels, + out_channels, + mid_channels, + kernel_size=3, + stride=1, + se_cfg=None, + with_residual=True, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + drop_path_rate=0., + with_cp=False, + init_cfg=None): + super(EdgeResidual, self).__init__(init_cfg=init_cfg) + assert stride in [1, 2] + self.with_cp = with_cp + self.drop_path = DropPath( + drop_path_rate) if drop_path_rate > 0 else nn.Identity() + self.with_se = se_cfg is not None + self.with_residual = ( + stride == 1 and in_channels == out_channels and with_residual) + + if self.with_se: + assert isinstance(se_cfg, dict) + + self.conv1 = ConvModule( + in_channels=in_channels, + out_channels=mid_channels, + kernel_size=kernel_size, + stride=stride, + padding=kernel_size // 2, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + if self.with_se: + self.se = SELayer(**se_cfg) + + self.conv2 = ConvModule( + in_channels=mid_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=None, + norm_cfg=norm_cfg, + act_cfg=None) + + def forward(self, x): + + def _inner_forward(x): + out = x + out = self.conv1(out) + + if self.with_se: + out = self.se(out) + + out = self.conv2(out) + + if self.with_residual: + return x + self.drop_path(out) + else: + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +def model_scaling(layer_setting, arch_setting): + """Scaling operation to the layer's parameters according to the + arch_setting.""" + # scale width + new_layer_setting = copy.deepcopy(layer_setting) + for layer_cfg in new_layer_setting: + for block_cfg in layer_cfg: + block_cfg[1] = make_divisible(block_cfg[1] * arch_setting[0], 8) + + # scale depth + split_layer_setting = [new_layer_setting[0]] + for layer_cfg in new_layer_setting[1:-1]: + tmp_index = [0] + for i in range(len(layer_cfg) - 1): + if layer_cfg[i + 1][1] != layer_cfg[i][1]: + tmp_index.append(i + 1) + tmp_index.append(len(layer_cfg)) + for i in range(len(tmp_index) - 1): + split_layer_setting.append(layer_cfg[tmp_index[i]:tmp_index[i + + 1]]) + split_layer_setting.append(new_layer_setting[-1]) + + num_of_layers = [len(layer_cfg) for layer_cfg in split_layer_setting[1:-1]] + new_layers = [ + int(math.ceil(arch_setting[1] * num)) for num in num_of_layers + ] + + merge_layer_setting = [split_layer_setting[0]] + for i, layer_cfg in enumerate(split_layer_setting[1:-1]): + if new_layers[i] <= num_of_layers[i]: + tmp_layer_cfg = layer_cfg[:new_layers[i]] + else: + tmp_layer_cfg = copy.deepcopy(layer_cfg) + [layer_cfg[-1]] * ( + new_layers[i] - num_of_layers[i]) + if tmp_layer_cfg[0][3] == 1 and i != 0: + merge_layer_setting[-1] += tmp_layer_cfg.copy() + else: + merge_layer_setting.append(tmp_layer_cfg.copy()) + merge_layer_setting.append(split_layer_setting[-1]) + + return merge_layer_setting + + +@MODELS.register_module() +class EfficientNet(BaseBackbone): + """EfficientNet backbone. + + Args: + arch (str): Architecture of efficientnet. Defaults to b0. + out_indices (Sequence[int]): Output from which stages. + Defaults to (6, ). + frozen_stages (int): Stages to be frozen (all param fixed). + Defaults to 0, which means not freezing any parameters. + conv_cfg (dict): Config dict for convolution layer. + Defaults to None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Defaults to dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='Swish'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Defaults to False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + """ + + # Parameters to build layers. + # 'b' represents the architecture of normal EfficientNet family includes + # 'b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8'. + # 'e' represents the architecture of EfficientNet-EdgeTPU including 'es', + # 'em', 'el'. + # 6 parameters are needed to construct a layer, From left to right: + # - kernel_size: The kernel size of the block + # - out_channel: The number of out_channels of the block + # - se_ratio: The sequeeze ratio of SELayer. + # - stride: The stride of the block + # - expand_ratio: The expand_ratio of the mid_channels + # - block_type: -1: Not a block, 0: InvertedResidual, 1: EdgeResidual + layer_settings = { + 'b': [[[3, 32, 0, 2, 0, -1]], + [[3, 16, 4, 1, 1, 0]], + [[3, 24, 4, 2, 6, 0], + [3, 24, 4, 1, 6, 0]], + [[5, 40, 4, 2, 6, 0], + [5, 40, 4, 1, 6, 0]], + [[3, 80, 4, 2, 6, 0], + [3, 80, 4, 1, 6, 0], + [3, 80, 4, 1, 6, 0], + [5, 112, 4, 1, 6, 0], + [5, 112, 4, 1, 6, 0], + [5, 112, 4, 1, 6, 0]], + [[5, 192, 4, 2, 6, 0], + [5, 192, 4, 1, 6, 0], + [5, 192, 4, 1, 6, 0], + [5, 192, 4, 1, 6, 0], + [3, 320, 4, 1, 6, 0]], + [[1, 1280, 0, 1, 0, -1]] + ], + 'e': [[[3, 32, 0, 2, 0, -1]], + [[3, 24, 0, 1, 3, 1]], + [[3, 32, 0, 2, 8, 1], + [3, 32, 0, 1, 8, 1]], + [[3, 48, 0, 2, 8, 1], + [3, 48, 0, 1, 8, 1], + [3, 48, 0, 1, 8, 1], + [3, 48, 0, 1, 8, 1]], + [[5, 96, 0, 2, 8, 0], + [5, 96, 0, 1, 8, 0], + [5, 96, 0, 1, 8, 0], + [5, 96, 0, 1, 8, 0], + [5, 96, 0, 1, 8, 0], + [5, 144, 0, 1, 8, 0], + [5, 144, 0, 1, 8, 0], + [5, 144, 0, 1, 8, 0], + [5, 144, 0, 1, 8, 0]], + [[5, 192, 0, 2, 8, 0], + [5, 192, 0, 1, 8, 0]], + [[1, 1280, 0, 1, 0, -1]] + ] + } # yapf: disable + + # Parameters to build different kinds of architecture. + # From left to right: scaling factor for width, scaling factor for depth, + # resolution. + arch_settings = { + 'b0': (1.0, 1.0, 224), + 'b1': (1.0, 1.1, 240), + 'b2': (1.1, 1.2, 260), + 'b3': (1.2, 1.4, 300), + 'b4': (1.4, 1.8, 380), + 'b5': (1.6, 2.2, 456), + 'b6': (1.8, 2.6, 528), + 'b7': (2.0, 3.1, 600), + 'b8': (2.2, 3.6, 672), + 'l2': (4.3, 5.3, 800), + 'es': (1.0, 1.0, 224), + 'em': (1.0, 1.1, 240), + 'el': (1.2, 1.4, 300) + } + + def __init__(self, + arch='b0', + drop_path_rate=0., + out_indices=(6, ), + frozen_stages=0, + conv_cfg=dict(type='Conv2dAdaptivePadding'), + norm_cfg=dict(type='BN', eps=1e-3), + act_cfg=dict(type='Swish'), + norm_eval=False, + with_cp=False, + init_cfg=[ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', + layer=['_BatchNorm', 'GroupNorm'], + val=1) + ]): + super(EfficientNet, self).__init__(init_cfg) + assert arch in self.arch_settings, \ + f'"{arch}" is not one of the arch_settings ' \ + f'({", ".join(self.arch_settings.keys())})' + self.arch_setting = self.arch_settings[arch] + # layer_settings of arch='l2' is 'b' + self.layer_setting = self.layer_settings['b' if arch == + 'l2' else arch[:1]] + for index in out_indices: + if index not in range(0, len(self.layer_setting)): + raise ValueError('the item in out_indices must in ' + f'range(0, {len(self.layer_setting)}). ' + f'But received {index}') + + if frozen_stages not in range(len(self.layer_setting) + 1): + raise ValueError('frozen_stages must be in range(0, ' + f'{len(self.layer_setting) + 1}). ' + f'But received {frozen_stages}') + self.drop_path_rate = drop_path_rate + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + + self.layer_setting = model_scaling(self.layer_setting, + self.arch_setting) + block_cfg_0 = self.layer_setting[0][0] + block_cfg_last = self.layer_setting[-1][0] + self.in_channels = make_divisible(block_cfg_0[1], 8) + self.out_channels = block_cfg_last[1] + self.layers = nn.ModuleList() + self.layers.append( + ConvModule( + in_channels=3, + out_channels=self.in_channels, + kernel_size=block_cfg_0[0], + stride=block_cfg_0[3], + padding=block_cfg_0[0] // 2, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + self.make_layer() + self.layers.append( + ConvModule( + in_channels=self.in_channels, + out_channels=self.out_channels, + kernel_size=block_cfg_last[0], + stride=block_cfg_last[3], + padding=block_cfg_last[0] // 2, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + + def make_layer(self): + # Without the first and the final conv block. + layer_setting = self.layer_setting[1:-1] + + total_num_blocks = sum([len(x) for x in layer_setting]) + block_idx = 0 + dpr = [ + x.item() + for x in torch.linspace(0, self.drop_path_rate, total_num_blocks) + ] # stochastic depth decay rule + + for layer_cfg in layer_setting: + layer = [] + for i, block_cfg in enumerate(layer_cfg): + (kernel_size, out_channels, se_ratio, stride, expand_ratio, + block_type) = block_cfg + + mid_channels = int(self.in_channels * expand_ratio) + out_channels = make_divisible(out_channels, 8) + if se_ratio <= 0: + se_cfg = None + else: + se_cfg = dict( + channels=mid_channels, + ratio=expand_ratio * se_ratio, + divisor=1, + act_cfg=(self.act_cfg, dict(type='Sigmoid'))) + if block_type == 1: # edge tpu + if i > 0 and expand_ratio == 3: + with_residual = False + expand_ratio = 4 + else: + with_residual = True + mid_channels = int(self.in_channels * expand_ratio) + if se_cfg is not None: + se_cfg = dict( + channels=mid_channels, + ratio=se_ratio * expand_ratio, + divisor=1, + act_cfg=(self.act_cfg, dict(type='Sigmoid'))) + block = partial(EdgeResidual, with_residual=with_residual) + else: + block = InvertedResidual + layer.append( + block( + in_channels=self.in_channels, + out_channels=out_channels, + mid_channels=mid_channels, + kernel_size=kernel_size, + stride=stride, + se_cfg=se_cfg, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + drop_path_rate=dpr[block_idx], + with_cp=self.with_cp)) + self.in_channels = out_channels + block_idx += 1 + self.layers.append(Sequential(*layer)) + + def forward(self, x): + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def _freeze_stages(self): + for i in range(self.frozen_stages): + m = self.layers[i] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(EfficientNet, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() diff --git a/mmpretrain/models/backbones/efficientnet_v2.py b/mmpretrain/models/backbones/efficientnet_v2.py new file mode 100644 index 0000000..fec002a --- /dev/null +++ b/mmpretrain/models/backbones/efficientnet_v2.py @@ -0,0 +1,343 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Sequence, Tuple + +import torch +import torch.nn as nn +from mmcv.cnn.bricks import ConvModule, DropPath +from mmengine.model import Sequential +from torch import Tensor + +from mmpretrain.registry import MODELS +from ..utils import InvertedResidual as MBConv +from .base_backbone import BaseBackbone +from .efficientnet import EdgeResidual as FusedMBConv + + +class EnhancedConvModule(ConvModule): + """ConvModule with short-cut and droppath. + + Args: + in_channels (int): Number of channels in the input feature map. + Same as that in ``nn._ConvNd``. + out_channels (int): Number of channels produced by the convolution. + Same as that in ``nn._ConvNd``. + kernel_size (int | tuple[int]): Size of the convolving kernel. + Same as that in ``nn._ConvNd``. + stride (int | tuple[int]): Stride of the convolution. + Same as that in ``nn._ConvNd``. + has_skip (bool): Whether there is short-cut. Defaults to False. + drop_path_rate (float): Stochastic depth rate. Default 0.0. + padding (int | tuple[int]): Zero-padding added to both sides of + the input. Same as that in ``nn._ConvNd``. + dilation (int | tuple[int]): Spacing between kernel elements. + Same as that in ``nn._ConvNd``. + groups (int): Number of blocked connections from input channels to + output channels. Same as that in ``nn._ConvNd``. + bias (bool | str): If specified as `auto`, it will be decided by the + norm_cfg. Bias will be set as True if `norm_cfg` is None, otherwise + False. Default: "auto". + conv_cfg (dict): Config dict for convolution layer. Default: None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. Default: None. + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + inplace (bool): Whether to use inplace mode for activation. + Default: True. + with_spectral_norm (bool): Whether use spectral norm in conv module. + Default: False. + padding_mode (str): If the `padding_mode` has not been supported by + current `Conv2d` in PyTorch, we will use our own padding layer + instead. Currently, we support ['zeros', 'circular'] with official + implementation and ['reflect'] with our own implementation. + Default: 'zeros'. + order (tuple[str]): The order of conv/norm/activation layers. It is a + sequence of "conv", "norm" and "act". Common examples are + ("conv", "norm", "act") and ("act", "conv", "norm"). + Default: ('conv', 'norm', 'act'). + """ + + def __init__(self, *args, has_skip=False, drop_path_rate=0, **kwargs): + super().__init__(*args, **kwargs) + self.has_skip = has_skip + if self.has_skip and (self.in_channels != self.out_channels + or self.stride != (1, 1)): + raise ValueError('the stride must be 1 and the `in_channels` and' + ' `out_channels` must be the same , when ' + '`has_skip` is True in `EnhancedConvModule` .') + self.drop_path = DropPath( + drop_path_rate) if drop_path_rate else nn.Identity() + + def forward(self, x: torch.Tensor, **kwargs) -> torch.Tensor: + short_cut = x + x = super().forward(x, **kwargs) + if self.has_skip: + x = self.drop_path(x) + short_cut + return x + + +@MODELS.register_module() +class EfficientNetV2(BaseBackbone): + """EfficientNetV2 backbone. + + A PyTorch implementation of EfficientNetV2 introduced by: + `EfficientNetV2: Smaller Models and Faster Training + `_ + + Args: + arch (str): Architecture of efficientnetv2. Defaults to s. + in_channels (int): Number of input image channels. Defaults to 3. + drop_path_rate (float): The ratio of the stochastic depth. + Defaults to 0.0. + out_indices (Sequence[int]): Output from which stages. + Defaults to (-1, ). + frozen_stages (int): Stages to be frozen (all param fixed). + Defaults to 0, which means not freezing any parameters. + conv_cfg (dict): Config dict for convolution layer. + Defaults to None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Defaults to dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Defaults to dict(type='Swish'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Defaults to False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + """ + + # Parameters to build layers. From left to right: + # - repeat (int): The repeat number of the block in the layer + # - kernel_size (int): The kernel size of the layer + # - stride (int): The stride of the first block of the layer + # - expand_ratio (int, float): The expand_ratio of the mid_channels + # - in_channel (int): The number of in_channels of the layer + # - out_channel (int): The number of out_channels of the layer + # - se_ratio (float): The sequeeze ratio of SELayer. + # - block_type (int): -2: ConvModule, -1: EnhancedConvModule, + # 0: FusedMBConv, 1: MBConv + arch_settings = { + **dict.fromkeys(['small', 's'], [[2, 3, 1, 1, 24, 24, 0.0, -1], + [4, 3, 2, 4, 24, 48, 0.0, 0], + [4, 3, 2, 4, 48, 64, 0.0, 0], + [6, 3, 2, 4, 64, 128, 0.25, 1], + [9, 3, 1, 6, 128, 160, 0.25, 1], + [15, 3, 2, 6, 160, 256, 0.25, 1], + [1, 1, 1, 1, 256, 1280, 0.0, -2]]), + **dict.fromkeys(['m', 'medium'], [[3, 3, 1, 1, 24, 24, 0.0, -1], + [5, 3, 2, 4, 24, 48, 0.0, 0], + [5, 3, 2, 4, 48, 80, 0.0, 0], + [7, 3, 2, 4, 80, 160, 0.25, 1], + [14, 3, 1, 6, 160, 176, 0.25, 1], + [18, 3, 2, 6, 176, 304, 0.25, 1], + [5, 3, 1, 6, 304, 512, 0.25, 1], + [1, 1, 1, 1, 512, 1280, 0.0, -2]]), + **dict.fromkeys(['l', 'large'], [[4, 3, 1, 1, 32, 32, 0.0, -1], + [7, 3, 2, 4, 32, 64, 0.0, 0], + [7, 3, 2, 4, 64, 96, 0.0, 0], + [10, 3, 2, 4, 96, 192, 0.25, 1], + [19, 3, 1, 6, 192, 224, 0.25, 1], + [25, 3, 2, 6, 224, 384, 0.25, 1], + [7, 3, 1, 6, 384, 640, 0.25, 1], + [1, 1, 1, 1, 640, 1280, 0.0, -2]]), + **dict.fromkeys(['xl'], [[4, 3, 1, 1, 32, 32, 0.0, -1], + [8, 3, 2, 4, 32, 64, 0.0, 0], + [8, 3, 2, 4, 64, 96, 0.0, 0], + [16, 3, 2, 4, 96, 192, 0.25, 1], + [24, 3, 1, 6, 192, 256, 0.25, 1], + [32, 3, 2, 6, 256, 512, 0.25, 1], + [8, 3, 1, 6, 512, 640, 0.25, 1], + [1, 1, 1, 1, 640, 1280, 0.0, -2]]), + **dict.fromkeys(['b0'], [[1, 3, 1, 1, 32, 16, 0.0, -1], + [2, 3, 2, 4, 16, 32, 0.0, 0], + [2, 3, 2, 4, 32, 48, 0.0, 0], + [3, 3, 2, 4, 48, 96, 0.25, 1], + [5, 3, 1, 6, 96, 112, 0.25, 1], + [8, 3, 2, 6, 112, 192, 0.25, 1], + [1, 1, 1, 1, 192, 1280, 0.0, -2]]), + **dict.fromkeys(['b1'], [[2, 3, 1, 1, 32, 16, 0.0, -1], + [3, 3, 2, 4, 16, 32, 0.0, 0], + [3, 3, 2, 4, 32, 48, 0.0, 0], + [4, 3, 2, 4, 48, 96, 0.25, 1], + [6, 3, 1, 6, 96, 112, 0.25, 1], + [9, 3, 2, 6, 112, 192, 0.25, 1], + [1, 1, 1, 1, 192, 1280, 0.0, -2]]), + **dict.fromkeys(['b2'], [[2, 3, 1, 1, 32, 16, 0.0, -1], + [3, 3, 2, 4, 16, 32, 0.0, 0], + [3, 3, 2, 4, 32, 56, 0.0, 0], + [4, 3, 2, 4, 56, 104, 0.25, 1], + [6, 3, 1, 6, 104, 120, 0.25, 1], + [10, 3, 2, 6, 120, 208, 0.25, 1], + [1, 1, 1, 1, 208, 1408, 0.0, -2]]), + **dict.fromkeys(['b3'], [[2, 3, 1, 1, 40, 16, 0.0, -1], + [3, 3, 2, 4, 16, 40, 0.0, 0], + [3, 3, 2, 4, 40, 56, 0.0, 0], + [5, 3, 2, 4, 56, 112, 0.25, 1], + [7, 3, 1, 6, 112, 136, 0.25, 1], + [12, 3, 2, 6, 136, 232, 0.25, 1], + [1, 1, 1, 1, 232, 1536, 0.0, -2]]) + } + + def __init__(self, + arch: str = 's', + in_channels: int = 3, + drop_path_rate: float = 0., + out_indices: Sequence[int] = (-1, ), + frozen_stages: int = 0, + conv_cfg=dict(type='Conv2dAdaptivePadding'), + norm_cfg=dict(type='BN', eps=1e-3, momentum=0.1), + act_cfg=dict(type='Swish'), + norm_eval: bool = False, + with_cp: bool = False, + init_cfg=[ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', + layer=['_BatchNorm', 'GroupNorm'], + val=1) + ]): + super(EfficientNetV2, self).__init__(init_cfg) + assert arch in self.arch_settings, \ + f'"{arch}" is not one of the arch_settings ' \ + f'({", ".join(self.arch_settings.keys())})' + self.arch = self.arch_settings[arch] + if frozen_stages not in range(len(self.arch) + 1): + raise ValueError('frozen_stages must be in range(0, ' + f'{len(self.arch)}), but get {frozen_stages}') + self.drop_path_rate = drop_path_rate + self.frozen_stages = frozen_stages + self.norm_eval = norm_eval + self.with_cp = with_cp + + self.layers = nn.ModuleList() + assert self.arch[-1][-1] == -2, \ + f'the last block_type of `arch_setting` must be -2 ,' \ + f'but get `{self.arch[-1][-1]}`' + self.in_channels = in_channels + self.out_channels = self.arch[-1][5] + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + + self.make_layers() + + # there len(slef.arch) + 2 layers in the backbone + # including: the first + len(self.arch) layers + the last + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + out_indices = list(out_indices) + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = len(self.layers) + index + assert 0 <= out_indices[i] <= len(self.layers), \ + f'Invalid out_indices {index}.' + self.out_indices = out_indices + + def make_layers(self, ): + # make the first layer + self.layers.append( + ConvModule( + in_channels=self.in_channels, + out_channels=self.arch[0][4], + kernel_size=3, + stride=2, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + + in_channels = self.arch[0][4] + layer_setting = self.arch[:-1] + + total_num_blocks = sum([x[0] for x in layer_setting]) + block_idx = 0 + dpr = [ + x.item() + for x in torch.linspace(0, self.drop_path_rate, total_num_blocks) + ] # stochastic depth decay rule + + for layer_cfg in layer_setting: + layer = [] + (repeat, kernel_size, stride, expand_ratio, _, out_channels, + se_ratio, block_type) = layer_cfg + for i in range(repeat): + stride = stride if i == 0 else 1 + if block_type == -1: + has_skip = stride == 1 and in_channels == out_channels + droppath_rate = dpr[block_idx] if has_skip else 0.0 + layer.append( + EnhancedConvModule( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + has_skip=has_skip, + drop_path_rate=droppath_rate, + stride=stride, + padding=1, + conv_cfg=None, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + in_channels = out_channels + else: + mid_channels = int(in_channels * expand_ratio) + se_cfg = None + if block_type != 0 and se_ratio > 0: + se_cfg = dict( + channels=mid_channels, + ratio=expand_ratio * (1.0 / se_ratio), + divisor=1, + act_cfg=(self.act_cfg, dict(type='Sigmoid'))) + block = FusedMBConv if block_type == 0 else MBConv + conv_cfg = self.conv_cfg if stride == 2 else None + layer.append( + block( + in_channels=in_channels, + out_channels=out_channels, + mid_channels=mid_channels, + kernel_size=kernel_size, + stride=stride, + se_cfg=se_cfg, + conv_cfg=conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + drop_path_rate=dpr[block_idx], + with_cp=self.with_cp)) + in_channels = out_channels + block_idx += 1 + self.layers.append(Sequential(*layer)) + + # make the last layer + self.layers.append( + ConvModule( + in_channels=in_channels, + out_channels=self.out_channels, + kernel_size=self.arch[-1][1], + stride=self.arch[-1][2], + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + + def forward(self, x: Tensor) -> Tuple[Tensor]: + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def _freeze_stages(self): + for i in range(self.frozen_stages): + m = self.layers[i] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(EfficientNetV2, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() diff --git a/mmpretrain/models/backbones/hivit.py b/mmpretrain/models/backbones/hivit.py new file mode 100644 index 0000000..981cbf8 --- /dev/null +++ b/mmpretrain/models/backbones/hivit.py @@ -0,0 +1,656 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn as nn +from mmcv.cnn.bricks import DropPath +from mmengine.model.weight_init import trunc_normal_ + +from mmpretrain.registry import MODELS +from ..utils import build_norm_layer, to_2tuple +from .base_backbone import BaseBackbone + + +class Mlp(nn.Module): + """MLP block. + + Args: + in_features (int): Number of input dims. + hidden_features (int): Number of hidden dims. + out_feature (int): Number of out dims. + act_layer: MLP activation layer. + drop (float): MLP dropout rate. + """ + + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + """Attention. + + Args: + input size (int): Input size. + dim (int): Number of input dims. + num_heads (int): Number of attention heads. + qkv_bias (bool): Enable bias for qkv projections if True. + qk_scale (float): The number of divider after q@k. Default to None. + attn_drop (float): The drop out rate for attention output weights. + Defaults to 0. + proj_drop (float): Probability of an element to be zeroed + after the feed forward layer. Defaults to 0. + rpe (bool): If True, add relative position embedding to + the patch embedding. + """ + + def __init__(self, + input_size, + dim, + num_heads, + qkv_bias=True, + qk_scale=None, + attn_drop=0., + proj_drop=0., + rpe=True): + super().__init__() + self.input_size = input_size + self.dim = dim + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim**-0.5 + + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * input_size - 1) * + (2 * input_size - 1), num_heads)) if rpe else None + if rpe: + coords_h = torch.arange(input_size) + coords_w = torch.arange(input_size) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) + coords_flatten = torch.flatten(coords, 1) + relative_coords = coords_flatten[:, :, + None] - coords_flatten[:, None, :] + relative_coords = relative_coords.permute(1, 2, 0).contiguous() + relative_coords[:, :, 0] += input_size - 1 + relative_coords[:, :, 1] += input_size - 1 + relative_coords[:, :, 0] *= 2 * input_size - 1 + relative_position_index = relative_coords.sum(-1) + self.register_buffer('relative_position_index', + relative_position_index) + + trunc_normal_(self.relative_position_bias_table, std=.02) + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, rpe_index=None, mask=None): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, + C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[ + 2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + if rpe_index is not None: + rpe_index = self.relative_position_index.view(-1) + S = int(math.sqrt(rpe_index.size(-1))) + relative_position_bias = self.relative_position_bias_table[ + rpe_index].view(-1, S, S, self.num_heads) + relative_position_bias = relative_position_bias.permute( + 0, 3, 1, 2).contiguous() + attn = attn + relative_position_bias + if mask is not None: + mask = mask.bool() + attn = attn.masked_fill(~mask[:, None, None, :], float('-inf')) + attn = self.softmax(attn) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class BlockWithRPE(nn.Module): + """HiViT block. + + Args: + input_size (int): Input size. + dim (int): Number of input dims. + num_heads (int): Number of attention heads. + mlp_ratio (int): Ratio of MLP hidden dim to embedding dim. + qkv_bias (bool): Enable bias for qkv projections if True. + qk_scale (float): The number of divider after q@k. Default to None. + drop (float): Probability of an element to be zeroed + after the feed forward layer. Defaults to 0. + attn_drop (float): The drop out rate for attention output weights. + Defaults to 0. + drop_path (float): Stochastic depth rate. Defaults to 0. + rpe (bool): If True, add relative position embedding to + the patch embedding. + layer_scale_init_value (float): Layer-scale init values. Defaults to 0. + act_layer: MLP activation layer. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + """ + + def __init__(self, + input_size, + dim, + num_heads=0., + mlp_ratio=4., + qkv_bias=True, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + rpe=True, + layer_scale_init_value=0.0, + act_layer=nn.GELU, + norm_cfg=dict(type='LN')): + super().__init__() + self.dim = dim + self.num_heads = num_heads + self.mlp_ratio = mlp_ratio + + with_attn = num_heads > 0. + + self.norm1 = build_norm_layer(norm_cfg, dim) if with_attn else None + self.attn = Attention( + input_size, + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop, + rpe=rpe, + ) if with_attn else None + + self.drop_path = DropPath( + drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = build_norm_layer(norm_cfg, dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop) + + if layer_scale_init_value > 0: + self.gamma_1 = nn.Parameter( + layer_scale_init_value * torch.ones( + (dim)), requires_grad=True) if with_attn else None + self.gamma_2 = nn.Parameter( + layer_scale_init_value * torch.ones((dim)), requires_grad=True) + else: + self.gamma_1, self.gamma_2 = None, None + + def forward(self, x, rpe_index=None, mask=None): + if self.attn is not None: + if self.gamma_1 is not None: + x = x + self.drop_path( + self.gamma_1 * self.attn(self.norm1(x), rpe_index, mask)) + else: + x = x + self.drop_path( + self.attn(self.norm1(x), rpe_index, mask)) + if self.gamma_2 is not None: + x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) + else: + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class PatchEmbed(nn.Module): + """PatchEmbed for HiViT. + + Args: + img_size (int): Input image size. + patch_size (int): Patch size. Defaults to 16. + inner_patches (int): Inner patch. Defaults to 4. + in_chans (int): Number of image input channels. + embed_dim (int): Transformer embedding dimension. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + kernel_size (int): Kernel size. + pad_size (int): Pad size. + """ + + def __init__(self, + img_size=224, + patch_size=16, + inner_patches=4, + in_chans=3, + embed_dim=128, + norm_cfg=None, + kernel_size=None, + pad_size=None): + super().__init__() + img_size = to_2tuple(img_size) if not isinstance(img_size, + tuple) else img_size + patch_size = to_2tuple(patch_size) + patches_resolution = [ + img_size[0] // patch_size[0], img_size[1] // patch_size[1] + ] + self.img_size = img_size + self.patch_size = patch_size + self.inner_patches = inner_patches + self.patches_resolution = patches_resolution + self.num_patches = patches_resolution[0] * patches_resolution[1] + + self.in_chans = in_chans + self.embed_dim = embed_dim + + conv_size = [size // inner_patches for size in patch_size] + kernel_size = kernel_size or conv_size + pad_size = pad_size or 0 + self.proj = nn.Conv2d( + in_chans, + embed_dim, + kernel_size=kernel_size, + stride=conv_size, + padding=pad_size) + if norm_cfg is not None: + self.norm = build_norm_layer(norm_cfg, embed_dim) + else: + self.norm = None + + def forward(self, x): + B, C, H, W = x.shape + patches_resolution = (H // self.patch_size[0], W // self.patch_size[1]) + num_patches = patches_resolution[0] * patches_resolution[1] + x = self.proj(x).view( + B, + -1, + patches_resolution[0], + self.inner_patches, + patches_resolution[1], + self.inner_patches, + ).permute(0, 2, 4, 3, 5, 1).reshape(B, num_patches, self.inner_patches, + self.inner_patches, -1) + if self.norm is not None: + x = self.norm(x) + return x + + +class PatchMerge(nn.Module): + """PatchMerge for HiViT. + + Args: + dim (int): Number of input channels. + norm_cfg (dict): Config dict for normalization layer. + """ + + def __init__(self, dim, norm_cfg): + super().__init__() + self.norm = build_norm_layer(norm_cfg, dim * 4) + self.reduction = nn.Linear(dim * 4, dim * 2, bias=False) + + def forward(self, x, *args, **kwargs): + is_main_stage = len(x.shape) == 3 + if is_main_stage: + B, N, C = x.shape + S = int(math.sqrt(N)) + x = x.reshape(B, S // 2, 2, S // 2, 2, C) \ + .permute(0, 1, 3, 2, 4, 5) \ + .reshape(B, -1, 2, 2, C) + x0 = x[..., 0::2, 0::2, :] + x1 = x[..., 1::2, 0::2, :] + x2 = x[..., 0::2, 1::2, :] + x3 = x[..., 1::2, 1::2, :] + + x = torch.cat([x0, x1, x2, x3], dim=-1) + x = self.norm(x) + x = self.reduction(x) + + if is_main_stage: + x = x[:, :, 0, 0, :] + return x + + +@MODELS.register_module() +class HiViT(BaseBackbone): + """HiViT. + + A PyTorch implement of: `HiViT: A Simple and More Efficient Design + of Hierarchical Vision Transformer `_. + + Args: + arch (str | dict): Swin Transformer architecture. If use string, choose + from 'tiny', 'small', and'base'. If use dict, it should + have below keys: + + - **embed_dims** (int): The dimensions of embedding. + - **depths** (List[int]): The number of blocks in each stage. + - **num_heads** (int): The number of heads in attention + modules of each stage. + + Defaults to 'tiny'. + img_size (int): Input image size. + patch_size (int): Patch size. Defaults to 16. + inner_patches (int): Inner patch. Defaults to 4. + in_chans (int): Number of image input channels. + embed_dim (int): Transformer embedding dimension. + depths (list[int]): Number of successive HiViT blocks. + num_heads (int): Number of attention heads. + stem_mlp_ratio (int): Ratio of MLP hidden dim to embedding dim + in the first two stages. + mlp_ratio (int): Ratio of MLP hidden dim to embedding dim in + the last stage. + qkv_bias (bool): Enable bias for qkv projections if True. + qk_scale (float): The number of divider after q@k. Default to None. + drop_rate (float): Probability of an element to be zeroed + after the feed forward layer. Defaults to 0. + attn_drop_rate (float): The drop out rate for attention output weights. + Defaults to 0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + ape (bool): If True, add absolute position embedding to + the patch embedding. + rpe (bool): If True, add relative position embedding to + the patch embedding. + patch_norm (bool): If True, use norm_cfg for normalization layer. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Defaults to -1. + kernel_size (int): Kernel size. + pad_size (int): Pad size. + layer_scale_init_value (float): Layer-scale init values. Defaults to 0. + init_cfg (dict, optional): The extra config for initialization. + Defaults to None. + """ + arch_zoo = { + **dict.fromkeys(['t', 'tiny'], + {'embed_dims': 384, + 'depths': [1, 1, 10], + 'num_heads': 6}), + **dict.fromkeys(['s', 'small'], + {'embed_dims': 384, + 'depths': [2, 2, 20], + 'num_heads': 6}), + **dict.fromkeys(['b', 'base'], + {'embed_dims': 512, + 'depths': [2, 2, 24], + 'num_heads': 8}), + **dict.fromkeys(['l', 'large'], + {'embed_dims': 768, + 'depths': [2, 2, 40], + 'num_heads': 12}), + } # yapf: disable + + num_extra_tokens = 0 + + def __init__(self, + arch='base', + img_size=224, + patch_size=16, + inner_patches=4, + in_chans=3, + stem_mlp_ratio=3., + mlp_ratio=4., + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.0, + norm_cfg=dict(type='LN'), + out_indices=[23], + ape=True, + rpe=False, + patch_norm=True, + frozen_stages=-1, + kernel_size=None, + pad_size=None, + layer_scale_init_value=0.0, + init_cfg=None): + super(HiViT, self).__init__(init_cfg=init_cfg) + + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = {'embed_dims', 'depths', 'num_heads'} + assert isinstance(arch, dict) and set(arch) == essential_keys, \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = arch + self.embed_dims = self.arch_settings['embed_dims'] + self.depths = self.arch_settings['depths'] + self.num_heads = self.arch_settings['num_heads'] + + self.num_stages = len(self.depths) + self.ape = ape + self.rpe = rpe + self.patch_size = patch_size + self.num_features = self.embed_dims + self.mlp_ratio = mlp_ratio + self.num_main_blocks = self.depths[-1] + self.out_indices = out_indices + self.out_indices[-1] = self.depths[-1] - 1 + + img_size = to_2tuple(img_size) if not isinstance(img_size, + tuple) else img_size + + embed_dim = self.embed_dims // 2**(self.num_stages - 1) + # split image into non-overlapping patches + self.patch_embed = PatchEmbed( + img_size=img_size, + patch_size=patch_size, + inner_patches=inner_patches, + in_chans=in_chans, + embed_dim=embed_dim, + norm_cfg=norm_cfg if patch_norm else None, + kernel_size=kernel_size, + pad_size=pad_size) + num_patches = self.patch_embed.num_patches + Hp, Wp = self.patch_embed.patches_resolution + + if rpe: + assert Hp == Wp, 'If you use relative position, make sure H == W ' + 'of input size' + + # absolute position embedding + if ape: + self.pos_embed = nn.Parameter( + torch.zeros(1, num_patches, self.num_features)) + trunc_normal_(self.pos_embed, std=.02) + if rpe: + # get pair-wise relative position index for each token inside the + # window + coords_h = torch.arange(Hp) + coords_w = torch.arange(Wp) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) + coords_flatten = torch.flatten(coords, 1) + relative_coords = coords_flatten[:, :, + None] - coords_flatten[:, None, :] + relative_coords = relative_coords.permute(1, 2, 0).contiguous() + relative_coords[:, :, 0] += Hp - 1 + relative_coords[:, :, 1] += Wp - 1 + relative_coords[:, :, 0] *= 2 * Wp - 1 + relative_position_index = relative_coords.sum(-1) + self.register_buffer('relative_position_index', + relative_position_index) + + self.pos_drop = nn.Dropout(p=drop_rate) + + # stochastic depth + dpr = iter( + x.item() + for x in torch.linspace(0, drop_path_rate, + sum(self.depths) + sum(self.depths[:-1]))) + + # build blocks + self.blocks = nn.ModuleList() + for stage_i, stage_depth in enumerate(self.depths): + is_main_stage = embed_dim == self.num_features + nhead = self.num_heads if is_main_stage else 0 + ratio = mlp_ratio if is_main_stage else stem_mlp_ratio + # every block not in main stage includes two mlp blocks + stage_depth = stage_depth if is_main_stage else stage_depth * 2 + for _ in range(stage_depth): + self.blocks.append( + BlockWithRPE( + Hp, + embed_dim, + nhead, + ratio, + qkv_bias, + qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=next(dpr), + rpe=rpe, + norm_cfg=norm_cfg, + layer_scale_init_value=layer_scale_init_value, + )) + if stage_i + 1 < self.num_stages: + self.blocks.append(PatchMerge(embed_dim, norm_cfg)) + embed_dim *= 2 + + self.frozen_stages = frozen_stages + if self.frozen_stages > 0: + self._freeze_stages() + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def interpolate_pos_encoding(self, x, h, w): + npatch = x.shape[1] + N = self.pos_embed.shape[1] + if npatch == N and w == h: + return self.pos_embed + patch_pos_embed = self.pos_embed + dim = x.shape[-1] + w0 = w // self.patch_size + h0 = h // self.patch_size + # we add a small number to avoid floating point error in interpolation + # see discussion at https://github.com/facebookresearch/dino/issues/8 + w0, h0 = w0 + 0.1, h0 + 0.1 + patch_pos_embed = nn.functional.interpolate( + patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), + dim).permute(0, 3, 1, 2), + scale_factor=(h0 / math.sqrt(N), w0 / math.sqrt(N)), + mode='bicubic', + ) + assert int(h0) == patch_pos_embed.shape[-2] and int( + w0) == patch_pos_embed.shape[-1] + patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) + return patch_pos_embed + + def forward(self, x): + B, C, H, W = x.shape + Hp, Wp = H // self.patch_size, W // self.patch_size + + x = self.patch_embed(x) + + outs = [] + for i, blk in enumerate(self.blocks[:-self.num_main_blocks]): + x = blk(x) + if i in self.out_indices: + x = x.reshape(B, Hp, Wp, *x.shape[-3:]).permute( + 0, 5, 1, 3, 2, 4).reshape(B, -1, Hp * x.shape[-3], + Wp * x.shape[-2]).contiguous() + outs.append(x) + + x = x[..., 0, 0, :] + if self.ape: + x = x + self.interpolate_pos_encoding(x, H, W) + x = self.pos_drop(x) + + rpe_index = True if self.rpe else None + + for i, blk in enumerate(self.blocks[-self.num_main_blocks:]): + x = blk(x, rpe_index) + if i in self.out_indices: + x = x.transpose(1, 2).view(B, -1, Hp, Wp).contiguous() + outs.append(x) + + return tuple(outs) + + def _freeze_stages(self): + # freeze position embedding + if self.pos_embed is not None: + self.pos_embed.requires_grad = False + # set dropout to eval model + self.pos_drop.eval() + # freeze patch embedding + self.patch_embed.eval() + for param in self.patch_embed.parameters(): + param.requires_grad = False + # freeze layers + for i in range(1, self.frozen_stages + 1): + m = self.blocks[i - 1] + m.eval() + for param in m.parameters(): + param.requires_grad = False + # freeze the last layer norm + for param in self.fc_norm.parameters(): + param.requires_grad = False + + def get_layer_depth(self, param_name: str, prefix: str = ''): + """Get the layer-wise depth of a parameter. + + Args: + param_name (str): The name of the parameter. + prefix (str): The prefix for the parameter. + Defaults to an empty string. + + Returns: + Tuple[int, int]: The layer-wise depth and the num of layers. + + Note: + The first depth is the stem module (``layer_depth=0``), and the + last depth is the subsequent module (``layer_depth=num_layers-1``) + """ + self.num_layers = len(self.blocks) + num_layers = self.num_layers + 2 + + if not param_name.startswith(prefix): + # For subsequent module like head + return num_layers - 1, num_layers + + param_name = param_name[len(prefix):] + + if param_name in 'pos_embed': + layer_depth = 0 + elif param_name.startswith('patch_embed'): + layer_depth = 0 + elif param_name.startswith('layers'): + layer_id = int(param_name.split('.')[1]) + layer_depth = layer_id + 1 + else: + layer_depth = num_layers - 1 + + return layer_depth, num_layers diff --git a/mmpretrain/models/backbones/hornet.py b/mmpretrain/models/backbones/hornet.py new file mode 100644 index 0000000..460f2dc --- /dev/null +++ b/mmpretrain/models/backbones/hornet.py @@ -0,0 +1,500 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Adapted from official impl at https://github.com/raoyongming/HorNet. +try: + import torch.fft + fft = True +except ImportError: + fft = None + +import copy +from functools import partial +from typing import Sequence + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint +from mmcv.cnn.bricks import DropPath + +from mmpretrain.models.backbones.base_backbone import BaseBackbone +from mmpretrain.registry import MODELS +from ..utils import LayerScale + + +def get_dwconv(dim, kernel_size, bias=True): + """build a pepth-wise convolution.""" + return nn.Conv2d( + dim, + dim, + kernel_size=kernel_size, + padding=(kernel_size - 1) // 2, + bias=bias, + groups=dim) + + +class HorNetLayerNorm(nn.Module): + """An implementation of LayerNorm of HorNet. + + The differences between HorNetLayerNorm & torch LayerNorm: + 1. Supports two data formats channels_last or channels_first. + Args: + normalized_shape (int or list or torch.Size): input shape from an + expected input of size. + eps (float): a value added to the denominator for numerical stability. + Defaults to 1e-5. + data_format (str): The ordering of the dimensions in the inputs. + channels_last corresponds to inputs with shape (batch_size, height, + width, channels) while channels_first corresponds to inputs with + shape (batch_size, channels, height, width). + Defaults to 'channels_last'. + """ + + def __init__(self, + normalized_shape, + eps=1e-6, + data_format='channels_last'): + super().__init__() + self.weight = nn.Parameter(torch.ones(normalized_shape)) + self.bias = nn.Parameter(torch.zeros(normalized_shape)) + self.eps = eps + self.data_format = data_format + if self.data_format not in ['channels_last', 'channels_first']: + raise ValueError( + 'data_format must be channels_last or channels_first') + self.normalized_shape = (normalized_shape, ) + + def forward(self, x): + if self.data_format == 'channels_last': + return F.layer_norm(x, self.normalized_shape, self.weight, + self.bias, self.eps) + elif self.data_format == 'channels_first': + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = self.weight[:, None, None] * x + self.bias[:, None, None] + return x + + +class GlobalLocalFilter(nn.Module): + """A GlobalLocalFilter of HorNet. + + Args: + dim (int): Number of input channels. + h (int): Height of complex_weight. + Defaults to 14. + w (int): Width of complex_weight. + Defaults to 8. + """ + + def __init__(self, dim, h=14, w=8): + super().__init__() + self.dw = nn.Conv2d( + dim // 2, + dim // 2, + kernel_size=3, + padding=1, + bias=False, + groups=dim // 2) + self.complex_weight = nn.Parameter( + torch.randn(dim // 2, h, w, 2, dtype=torch.float32) * 0.02) + self.pre_norm = HorNetLayerNorm( + dim, eps=1e-6, data_format='channels_first') + self.post_norm = HorNetLayerNorm( + dim, eps=1e-6, data_format='channels_first') + + def forward(self, x): + x = self.pre_norm(x) + x1, x2 = torch.chunk(x, 2, dim=1) + x1 = self.dw(x1) + + x2 = x2.to(torch.float32) + B, C, a, b = x2.shape + x2 = torch.fft.rfft2(x2, dim=(2, 3), norm='ortho') + + weight = self.complex_weight + if not weight.shape[1:3] == x2.shape[2:4]: + weight = F.interpolate( + weight.permute(3, 0, 1, 2), + size=x2.shape[2:4], + mode='bilinear', + align_corners=True).permute(1, 2, 3, 0) + + weight = torch.view_as_complex(weight.contiguous()) + + x2 = x2 * weight + x2 = torch.fft.irfft2(x2, s=(a, b), dim=(2, 3), norm='ortho') + + x = torch.cat([x1.unsqueeze(2), x2.unsqueeze(2)], + dim=2).reshape(B, 2 * C, a, b) + x = self.post_norm(x) + return x + + +class gnConv(nn.Module): + """A gnConv of HorNet. + + Args: + dim (int): Number of input channels. + order (int): Order of gnConv. + Defaults to 5. + dw_cfg (dict): The Config for dw conv. + Defaults to ``dict(type='DW', kernel_size=7)``. + scale (float): Scaling parameter of gflayer outputs. + Defaults to 1.0. + """ + + def __init__(self, + dim, + order=5, + dw_cfg=dict(type='DW', kernel_size=7), + scale=1.0): + super().__init__() + self.order = order + self.dims = [dim // 2**i for i in range(order)] + self.dims.reverse() + self.proj_in = nn.Conv2d(dim, 2 * dim, 1) + + cfg = copy.deepcopy(dw_cfg) + dw_type = cfg.pop('type') + assert dw_type in ['DW', 'GF'],\ + 'dw_type should be `DW` or `GF`' + if dw_type == 'DW': + self.dwconv = get_dwconv(sum(self.dims), **cfg) + elif dw_type == 'GF': + self.dwconv = GlobalLocalFilter(sum(self.dims), **cfg) + + self.proj_out = nn.Conv2d(dim, dim, 1) + + self.projs = nn.ModuleList([ + nn.Conv2d(self.dims[i], self.dims[i + 1], 1) + for i in range(order - 1) + ]) + + self.scale = scale + + def forward(self, x): + x = self.proj_in(x) + y, x = torch.split(x, (self.dims[0], sum(self.dims)), dim=1) + + x = self.dwconv(x) * self.scale + + dw_list = torch.split(x, self.dims, dim=1) + x = y * dw_list[0] + + for i in range(self.order - 1): + x = self.projs[i](x) * dw_list[i + 1] + + x = self.proj_out(x) + + return x + + +class HorNetBlock(nn.Module): + """A block of HorNet. + + Args: + dim (int): Number of input channels. + order (int): Order of gnConv. + Defaults to 5. + dw_cfg (dict): The Config for dw conv. + Defaults to ``dict(type='DW', kernel_size=7)``. + scale (float): Scaling parameter of gflayer outputs. + Defaults to 1.0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0. + use_layer_scale (bool): Whether to use use_layer_scale in HorNet + block. Defaults to True. + """ + + def __init__(self, + dim, + order=5, + dw_cfg=dict(type='DW', kernel_size=7), + scale=1.0, + drop_path_rate=0., + use_layer_scale=True): + super().__init__() + self.out_channels = dim + + self.norm1 = HorNetLayerNorm( + dim, eps=1e-6, data_format='channels_first') + self.gnconv = gnConv(dim, order, dw_cfg, scale) + self.norm2 = HorNetLayerNorm(dim, eps=1e-6) + self.pwconv1 = nn.Linear(dim, 4 * dim) + self.act = nn.GELU() + self.pwconv2 = nn.Linear(4 * dim, dim) + + if use_layer_scale: + self.gamma1 = LayerScale(dim, data_format='channels_first') + self.gamma2 = LayerScale(dim) + else: + self.gamma1, self.gamma2 = nn.Identity(), nn.Identity() + + self.drop_path = DropPath( + drop_path_rate) if drop_path_rate > 0. else nn.Identity() + + def forward(self, x): + x = x + self.drop_path(self.gamma1(self.gnconv(self.norm1(x)))) + + input = x + x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C) + x = self.norm2(x) + x = self.pwconv1(x) + x = self.act(x) + x = self.pwconv2(x) + x = self.gamma2(x) + x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) + + x = input + self.drop_path(x) + return x + + +@MODELS.register_module() +class HorNet(BaseBackbone): + """HorNet backbone. + + A PyTorch implementation of paper `HorNet: Efficient High-Order Spatial + Interactions with Recursive Gated Convolutions + `_ . + Inspiration from https://github.com/raoyongming/HorNet + + Args: + arch (str | dict): HorNet architecture. + + If use string, choose from 'tiny', 'small', 'base' and 'large'. + If use dict, it should have below keys: + + - **base_dim** (int): The base dimensions of embedding. + - **depths** (List[int]): The number of blocks in each stage. + - **orders** (List[int]): The number of order of gnConv in each + stage. + - **dw_cfg** (List[dict]): The Config for dw conv. + + Defaults to 'tiny'. + in_channels (int): Number of input image channels. Defaults to 3. + drop_path_rate (float): Stochastic depth rate. Defaults to 0. + scale (float): Scaling parameter of gflayer outputs. Defaults to 1/3. + use_layer_scale (bool): Whether to use use_layer_scale in HorNet + block. Defaults to True. + out_indices (Sequence[int]): Output from which stages. + Default: ``(3, )``. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Defaults to -1. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + gap_before_final_norm (bool): Whether to globally average the feature + map before the final norm layer. In the official repo, it's only + used in classification task. Defaults to True. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + arch_zoo = { + **dict.fromkeys(['t', 'tiny'], + {'base_dim': 64, + 'depths': [2, 3, 18, 2], + 'orders': [2, 3, 4, 5], + 'dw_cfg': [dict(type='DW', kernel_size=7)] * 4}), + **dict.fromkeys(['t-gf', 'tiny-gf'], + {'base_dim': 64, + 'depths': [2, 3, 18, 2], + 'orders': [2, 3, 4, 5], + 'dw_cfg': [ + dict(type='DW', kernel_size=7), + dict(type='DW', kernel_size=7), + dict(type='GF', h=14, w=8), + dict(type='GF', h=7, w=4)]}), + **dict.fromkeys(['s', 'small'], + {'base_dim': 96, + 'depths': [2, 3, 18, 2], + 'orders': [2, 3, 4, 5], + 'dw_cfg': [dict(type='DW', kernel_size=7)] * 4}), + **dict.fromkeys(['s-gf', 'small-gf'], + {'base_dim': 96, + 'depths': [2, 3, 18, 2], + 'orders': [2, 3, 4, 5], + 'dw_cfg': [ + dict(type='DW', kernel_size=7), + dict(type='DW', kernel_size=7), + dict(type='GF', h=14, w=8), + dict(type='GF', h=7, w=4)]}), + **dict.fromkeys(['b', 'base'], + {'base_dim': 128, + 'depths': [2, 3, 18, 2], + 'orders': [2, 3, 4, 5], + 'dw_cfg': [dict(type='DW', kernel_size=7)] * 4}), + **dict.fromkeys(['b-gf', 'base-gf'], + {'base_dim': 128, + 'depths': [2, 3, 18, 2], + 'orders': [2, 3, 4, 5], + 'dw_cfg': [ + dict(type='DW', kernel_size=7), + dict(type='DW', kernel_size=7), + dict(type='GF', h=14, w=8), + dict(type='GF', h=7, w=4)]}), + **dict.fromkeys(['b-gf384', 'base-gf384'], + {'base_dim': 128, + 'depths': [2, 3, 18, 2], + 'orders': [2, 3, 4, 5], + 'dw_cfg': [ + dict(type='DW', kernel_size=7), + dict(type='DW', kernel_size=7), + dict(type='GF', h=24, w=12), + dict(type='GF', h=13, w=7)]}), + **dict.fromkeys(['l', 'large'], + {'base_dim': 192, + 'depths': [2, 3, 18, 2], + 'orders': [2, 3, 4, 5], + 'dw_cfg': [dict(type='DW', kernel_size=7)] * 4}), + **dict.fromkeys(['l-gf', 'large-gf'], + {'base_dim': 192, + 'depths': [2, 3, 18, 2], + 'orders': [2, 3, 4, 5], + 'dw_cfg': [ + dict(type='DW', kernel_size=7), + dict(type='DW', kernel_size=7), + dict(type='GF', h=14, w=8), + dict(type='GF', h=7, w=4)]}), + **dict.fromkeys(['l-gf384', 'large-gf384'], + {'base_dim': 192, + 'depths': [2, 3, 18, 2], + 'orders': [2, 3, 4, 5], + 'dw_cfg': [ + dict(type='DW', kernel_size=7), + dict(type='DW', kernel_size=7), + dict(type='GF', h=24, w=12), + dict(type='GF', h=13, w=7)]}), + } # yapf: disable + + def __init__(self, + arch='tiny', + in_channels=3, + drop_path_rate=0., + scale=1 / 3, + use_layer_scale=True, + out_indices=(3, ), + frozen_stages=-1, + with_cp=False, + gap_before_final_norm=True, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + if fft is None: + raise RuntimeError( + 'Failed to import torch.fft. Please install "torch>=1.7".') + + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = {'base_dim', 'depths', 'orders', 'dw_cfg'} + assert isinstance(arch, dict) and set(arch) == essential_keys, \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = arch + + self.scale = scale + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.with_cp = with_cp + self.gap_before_final_norm = gap_before_final_norm + + base_dim = self.arch_settings['base_dim'] + dims = list(map(lambda x: 2**x * base_dim, range(4))) + + self.downsample_layers = nn.ModuleList() + stem = nn.Sequential( + nn.Conv2d(in_channels, dims[0], kernel_size=4, stride=4), + HorNetLayerNorm(dims[0], eps=1e-6, data_format='channels_first')) + self.downsample_layers.append(stem) + for i in range(3): + downsample_layer = nn.Sequential( + HorNetLayerNorm( + dims[i], eps=1e-6, data_format='channels_first'), + nn.Conv2d(dims[i], dims[i + 1], kernel_size=2, stride=2), + ) + self.downsample_layers.append(downsample_layer) + + total_depth = sum(self.arch_settings['depths']) + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, total_depth) + ] # stochastic depth decay rule + + cur_block_idx = 0 + self.stages = nn.ModuleList() + for i in range(4): + stage = nn.Sequential(*[ + HorNetBlock( + dim=dims[i], + order=self.arch_settings['orders'][i], + dw_cfg=self.arch_settings['dw_cfg'][i], + scale=self.scale, + drop_path_rate=dpr[cur_block_idx + j], + use_layer_scale=use_layer_scale) + for j in range(self.arch_settings['depths'][i]) + ]) + self.stages.append(stage) + cur_block_idx += self.arch_settings['depths'][i] + + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + out_indices = list(out_indices) + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = len(self.stages) + index + assert 0 <= out_indices[i] <= len(self.stages), \ + f'Invalid out_indices {index}.' + self.out_indices = out_indices + + norm_layer = partial( + HorNetLayerNorm, eps=1e-6, data_format='channels_first') + for i_layer in out_indices: + layer = norm_layer(dims[i_layer]) + layer_name = f'norm{i_layer}' + self.add_module(layer_name, layer) + + def train(self, mode=True): + super(HorNet, self).train(mode) + self._freeze_stages() + + def _freeze_stages(self): + for i in range(0, self.frozen_stages + 1): + # freeze patch embed + m = self.downsample_layers[i] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + # freeze blocks + m = self.stages[i] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + if i in self.out_indices: + # freeze norm + m = getattr(self, f'norm{i + 1}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def forward(self, x): + outs = [] + for i in range(4): + x = self.downsample_layers[i](x) + if self.with_cp: + x = checkpoint.checkpoint_sequential(self.stages[i], + len(self.stages[i]), x) + else: + x = self.stages[i](x) + if i in self.out_indices: + norm_layer = getattr(self, f'norm{i}') + if self.gap_before_final_norm: + gap = x.mean([-2, -1], keepdim=True) + outs.append(norm_layer(gap).flatten(1)) + else: + # The output of LayerNorm2d may be discontiguous, which + # may cause some problem in the downstream tasks + outs.append(norm_layer(x).contiguous()) + return tuple(outs) diff --git a/mmpretrain/models/backbones/hrnet.py b/mmpretrain/models/backbones/hrnet.py new file mode 100644 index 0000000..99afa90 --- /dev/null +++ b/mmpretrain/models/backbones/hrnet.py @@ -0,0 +1,563 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmengine.model import BaseModule, ModuleList, Sequential +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpretrain.registry import MODELS +from .resnet import BasicBlock, Bottleneck, ResLayer, get_expansion + + +class HRModule(BaseModule): + """High-Resolution Module for HRNet. + + In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange + is in this module. + + Args: + num_branches (int): The number of branches. + block (``BaseModule``): Convolution block module. + num_blocks (tuple): The number of blocks in each branch. + The length must be equal to ``num_branches``. + num_channels (tuple): The number of base channels in each branch. + The length must be equal to ``num_branches``. + multiscale_output (bool): Whether to output multi-level features + produced by multiple branches. If False, only the first level + feature will be output. Defaults to True. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + conv_cfg (dict, optional): Dictionary to construct and config conv + layer. Defaults to None. + norm_cfg (dict): Dictionary to construct and config norm layer. + Defaults to ``dict(type='BN')``. + block_init_cfg (dict, optional): The initialization configs of every + blocks. Defaults to None. + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + num_branches, + block, + num_blocks, + in_channels, + num_channels, + multiscale_output=True, + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + block_init_cfg=None, + init_cfg=None): + super(HRModule, self).__init__(init_cfg) + self.block_init_cfg = block_init_cfg + self._check_branches(num_branches, num_blocks, in_channels, + num_channels) + + self.in_channels = in_channels + self.num_branches = num_branches + + self.multiscale_output = multiscale_output + self.norm_cfg = norm_cfg + self.conv_cfg = conv_cfg + self.with_cp = with_cp + self.branches = self._make_branches(num_branches, block, num_blocks, + num_channels) + self.fuse_layers = self._make_fuse_layers() + self.relu = nn.ReLU(inplace=False) + + def _check_branches(self, num_branches, num_blocks, in_channels, + num_channels): + if num_branches != len(num_blocks): + error_msg = f'NUM_BRANCHES({num_branches}) ' \ + f'!= NUM_BLOCKS({len(num_blocks)})' + raise ValueError(error_msg) + + if num_branches != len(num_channels): + error_msg = f'NUM_BRANCHES({num_branches}) ' \ + f'!= NUM_CHANNELS({len(num_channels)})' + raise ValueError(error_msg) + + if num_branches != len(in_channels): + error_msg = f'NUM_BRANCHES({num_branches}) ' \ + f'!= NUM_INCHANNELS({len(in_channels)})' + raise ValueError(error_msg) + + def _make_branches(self, num_branches, block, num_blocks, num_channels): + branches = [] + + for i in range(num_branches): + out_channels = num_channels[i] * get_expansion(block) + branches.append( + ResLayer( + block=block, + num_blocks=num_blocks[i], + in_channels=self.in_channels[i], + out_channels=out_channels, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + with_cp=self.with_cp, + init_cfg=self.block_init_cfg, + )) + + return ModuleList(branches) + + def _make_fuse_layers(self): + if self.num_branches == 1: + return None + + num_branches = self.num_branches + in_channels = self.in_channels + fuse_layers = [] + num_out_branches = num_branches if self.multiscale_output else 1 + for i in range(num_out_branches): + fuse_layer = [] + for j in range(num_branches): + if j > i: + # Upsample the feature maps of smaller scales. + fuse_layer.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[i], + kernel_size=1, + stride=1, + padding=0, + bias=False), + build_norm_layer(self.norm_cfg, in_channels[i])[1], + nn.Upsample( + scale_factor=2**(j - i), mode='nearest'))) + elif j == i: + # Keep the feature map with the same scale. + fuse_layer.append(None) + else: + # Downsample the feature maps of larger scales. + conv_downsamples = [] + for k in range(i - j): + # Use stacked convolution layers to downsample. + if k == i - j - 1: + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[i], + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, + in_channels[i])[1])) + else: + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels[j], + in_channels[j], + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, + in_channels[j])[1], + nn.ReLU(inplace=False))) + fuse_layer.append(nn.Sequential(*conv_downsamples)) + fuse_layers.append(nn.ModuleList(fuse_layer)) + + return nn.ModuleList(fuse_layers) + + def forward(self, x): + """Forward function.""" + if self.num_branches == 1: + return [self.branches[0](x[0])] + + for i in range(self.num_branches): + x[i] = self.branches[i](x[i]) + + x_fuse = [] + for i in range(len(self.fuse_layers)): + y = 0 + for j in range(self.num_branches): + if i == j: + y += x[j] + else: + y += self.fuse_layers[i][j](x[j]) + x_fuse.append(self.relu(y)) + return x_fuse + + +@MODELS.register_module() +class HRNet(BaseModule): + """HRNet backbone. + + `High-Resolution Representations for Labeling Pixels and Regions + `_. + + Args: + arch (str): The preset HRNet architecture, includes 'w18', 'w30', + 'w32', 'w40', 'w44', 'w48', 'w64'. It will only be used if + extra is ``None``. Defaults to 'w32'. + extra (dict, optional): Detailed configuration for each stage of HRNet. + There must be 4 stages, the configuration for each stage must have + 5 keys: + + - num_modules (int): The number of HRModule in this stage. + - num_branches (int): The number of branches in the HRModule. + - block (str): The type of convolution block. Please choose between + 'BOTTLENECK' and 'BASIC'. + - num_blocks (tuple): The number of blocks in each branch. + The length must be equal to num_branches. + - num_channels (tuple): The number of base channels in each branch. + The length must be equal to num_branches. + + Defaults to None. + in_channels (int): Number of input image channels. Defaults to 3. + conv_cfg (dict, optional): Dictionary to construct and config conv + layer. Defaults to None. + norm_cfg (dict): Dictionary to construct and config norm layer. + Defaults to ``dict(type='BN')``. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Defaults to False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Defaults to False. + multiscale_output (bool): Whether to output multi-level features + produced by multiple branches. If False, only the first level + feature will be output. Defaults to True. + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + + Example: + >>> import torch + >>> from mmpretrain.models import HRNet + >>> extra = dict( + >>> stage1=dict( + >>> num_modules=1, + >>> num_branches=1, + >>> block='BOTTLENECK', + >>> num_blocks=(4, ), + >>> num_channels=(64, )), + >>> stage2=dict( + >>> num_modules=1, + >>> num_branches=2, + >>> block='BASIC', + >>> num_blocks=(4, 4), + >>> num_channels=(32, 64)), + >>> stage3=dict( + >>> num_modules=4, + >>> num_branches=3, + >>> block='BASIC', + >>> num_blocks=(4, 4, 4), + >>> num_channels=(32, 64, 128)), + >>> stage4=dict( + >>> num_modules=3, + >>> num_branches=4, + >>> block='BASIC', + >>> num_blocks=(4, 4, 4, 4), + >>> num_channels=(32, 64, 128, 256))) + >>> self = HRNet(extra, in_channels=1) + >>> self.eval() + >>> inputs = torch.rand(1, 1, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 32, 8, 8) + (1, 64, 4, 4) + (1, 128, 2, 2) + (1, 256, 1, 1) + """ + + blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck} + arch_zoo = { + # num_modules, num_branches, block, num_blocks, num_channels + 'w18': [[1, 1, 'BOTTLENECK', (4, ), (64, )], + [1, 2, 'BASIC', (4, 4), (18, 36)], + [4, 3, 'BASIC', (4, 4, 4), (18, 36, 72)], + [3, 4, 'BASIC', (4, 4, 4, 4), (18, 36, 72, 144)]], + 'w30': [[1, 1, 'BOTTLENECK', (4, ), (64, )], + [1, 2, 'BASIC', (4, 4), (30, 60)], + [4, 3, 'BASIC', (4, 4, 4), (30, 60, 120)], + [3, 4, 'BASIC', (4, 4, 4, 4), (30, 60, 120, 240)]], + 'w32': [[1, 1, 'BOTTLENECK', (4, ), (64, )], + [1, 2, 'BASIC', (4, 4), (32, 64)], + [4, 3, 'BASIC', (4, 4, 4), (32, 64, 128)], + [3, 4, 'BASIC', (4, 4, 4, 4), (32, 64, 128, 256)]], + 'w40': [[1, 1, 'BOTTLENECK', (4, ), (64, )], + [1, 2, 'BASIC', (4, 4), (40, 80)], + [4, 3, 'BASIC', (4, 4, 4), (40, 80, 160)], + [3, 4, 'BASIC', (4, 4, 4, 4), (40, 80, 160, 320)]], + 'w44': [[1, 1, 'BOTTLENECK', (4, ), (64, )], + [1, 2, 'BASIC', (4, 4), (44, 88)], + [4, 3, 'BASIC', (4, 4, 4), (44, 88, 176)], + [3, 4, 'BASIC', (4, 4, 4, 4), (44, 88, 176, 352)]], + 'w48': [[1, 1, 'BOTTLENECK', (4, ), (64, )], + [1, 2, 'BASIC', (4, 4), (48, 96)], + [4, 3, 'BASIC', (4, 4, 4), (48, 96, 192)], + [3, 4, 'BASIC', (4, 4, 4, 4), (48, 96, 192, 384)]], + 'w64': [[1, 1, 'BOTTLENECK', (4, ), (64, )], + [1, 2, 'BASIC', (4, 4), (64, 128)], + [4, 3, 'BASIC', (4, 4, 4), (64, 128, 256)], + [3, 4, 'BASIC', (4, 4, 4, 4), (64, 128, 256, 512)]], + } # yapf:disable + + def __init__(self, + arch='w32', + extra=None, + in_channels=3, + conv_cfg=None, + norm_cfg=dict(type='BN'), + norm_eval=False, + with_cp=False, + zero_init_residual=False, + multiscale_output=True, + init_cfg=[ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]): + super(HRNet, self).__init__(init_cfg) + + extra = self.parse_arch(arch, extra) + + # Assert configurations of 4 stages are in extra + for i in range(1, 5): + assert f'stage{i}' in extra, f'Missing stage{i} config in "extra".' + # Assert whether the length of `num_blocks` and `num_channels` are + # equal to `num_branches` + cfg = extra[f'stage{i}'] + assert len(cfg['num_blocks']) == cfg['num_branches'] and \ + len(cfg['num_channels']) == cfg['num_branches'] + + self.extra = extra + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + self.zero_init_residual = zero_init_residual + + # -------------------- stem net -------------------- + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + out_channels=64, + kernel_size=3, + stride=2, + padding=1, + bias=False) + + self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1) + self.add_module(self.norm1_name, norm1) + + self.conv2 = build_conv_layer( + self.conv_cfg, + in_channels=64, + out_channels=64, + kernel_size=3, + stride=2, + padding=1, + bias=False) + + self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2) + self.add_module(self.norm2_name, norm2) + self.relu = nn.ReLU(inplace=True) + + # -------------------- stage 1 -------------------- + self.stage1_cfg = self.extra['stage1'] + base_channels = self.stage1_cfg['num_channels'] + block_type = self.stage1_cfg['block'] + num_blocks = self.stage1_cfg['num_blocks'] + + block = self.blocks_dict[block_type] + num_channels = [ + channel * get_expansion(block) for channel in base_channels + ] + # To align with the original code, use layer1 instead of stage1 here. + self.layer1 = ResLayer( + block, + in_channels=64, + out_channels=num_channels[0], + num_blocks=num_blocks[0]) + pre_num_channels = num_channels + + # -------------------- stage 2~4 -------------------- + for i in range(2, 5): + stage_cfg = self.extra[f'stage{i}'] + base_channels = stage_cfg['num_channels'] + block = self.blocks_dict[stage_cfg['block']] + multiscale_output_ = multiscale_output if i == 4 else True + + num_channels = [ + channel * get_expansion(block) for channel in base_channels + ] + # The transition layer from layer1 to stage2 + transition = self._make_transition_layer(pre_num_channels, + num_channels) + self.add_module(f'transition{i-1}', transition) + stage = self._make_stage( + stage_cfg, num_channels, multiscale_output=multiscale_output_) + self.add_module(f'stage{i}', stage) + + pre_num_channels = num_channels + + @property + def norm1(self): + """nn.Module: the normalization layer named "norm1" """ + return getattr(self, self.norm1_name) + + @property + def norm2(self): + """nn.Module: the normalization layer named "norm2" """ + return getattr(self, self.norm2_name) + + def _make_transition_layer(self, num_channels_pre_layer, + num_channels_cur_layer): + num_branches_cur = len(num_channels_cur_layer) + num_branches_pre = len(num_channels_pre_layer) + + transition_layers = [] + for i in range(num_branches_cur): + if i < num_branches_pre: + # For existing scale branches, + # add conv block when the channels are not the same. + if num_channels_cur_layer[i] != num_channels_pre_layer[i]: + transition_layers.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + num_channels_pre_layer[i], + num_channels_cur_layer[i], + kernel_size=3, + stride=1, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, + num_channels_cur_layer[i])[1], + nn.ReLU(inplace=True))) + else: + transition_layers.append(nn.Identity()) + else: + # For new scale branches, add stacked downsample conv blocks. + # For example, num_branches_pre = 2, for the 4th branch, add + # stacked two downsample conv blocks. + conv_downsamples = [] + for j in range(i + 1 - num_branches_pre): + in_channels = num_channels_pre_layer[-1] + out_channels = num_channels_cur_layer[i] \ + if j == i - num_branches_pre else in_channels + conv_downsamples.append( + nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels, + out_channels, + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, out_channels)[1], + nn.ReLU(inplace=True))) + transition_layers.append(nn.Sequential(*conv_downsamples)) + + return nn.ModuleList(transition_layers) + + def _make_stage(self, layer_config, in_channels, multiscale_output=True): + num_modules = layer_config['num_modules'] + num_branches = layer_config['num_branches'] + num_blocks = layer_config['num_blocks'] + num_channels = layer_config['num_channels'] + block = self.blocks_dict[layer_config['block']] + + hr_modules = [] + block_init_cfg = None + if self.zero_init_residual: + if block is BasicBlock: + block_init_cfg = dict( + type='Constant', val=0, override=dict(name='norm2')) + elif block is Bottleneck: + block_init_cfg = dict( + type='Constant', val=0, override=dict(name='norm3')) + + for i in range(num_modules): + # multi_scale_output is only used for the last module + if not multiscale_output and i == num_modules - 1: + reset_multiscale_output = False + else: + reset_multiscale_output = True + + hr_modules.append( + HRModule( + num_branches, + block, + num_blocks, + in_channels, + num_channels, + reset_multiscale_output, + with_cp=self.with_cp, + norm_cfg=self.norm_cfg, + conv_cfg=self.conv_cfg, + block_init_cfg=block_init_cfg)) + + return Sequential(*hr_modules) + + def forward(self, x): + """Forward function.""" + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + x = self.conv2(x) + x = self.norm2(x) + x = self.relu(x) + x = self.layer1(x) + + x_list = [x] + + for i in range(2, 5): + # Apply transition + transition = getattr(self, f'transition{i-1}') + inputs = [] + for j, layer in enumerate(transition): + if j < len(x_list): + inputs.append(layer(x_list[j])) + else: + inputs.append(layer(x_list[-1])) + # Forward HRModule + stage = getattr(self, f'stage{i}') + x_list = stage(inputs) + + return tuple(x_list) + + def train(self, mode=True): + """Convert the model into training mode will keeping the normalization + layer freezed.""" + super(HRNet, self).train(mode) + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + + def parse_arch(self, arch, extra=None): + if extra is not None: + return extra + + assert arch in self.arch_zoo, \ + ('Invalid arch, please choose arch from ' + f'{list(self.arch_zoo.keys())}, or specify `extra` ' + 'argument directly.') + + extra = dict() + for i, stage_setting in enumerate(self.arch_zoo[arch], start=1): + extra[f'stage{i}'] = dict( + num_modules=stage_setting[0], + num_branches=stage_setting[1], + block=stage_setting[2], + num_blocks=stage_setting[3], + num_channels=stage_setting[4], + ) + + return extra diff --git a/mmpretrain/models/backbones/inception_v3.py b/mmpretrain/models/backbones/inception_v3.py new file mode 100644 index 0000000..1d6c04b --- /dev/null +++ b/mmpretrain/models/backbones/inception_v3.py @@ -0,0 +1,501 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Tuple + +import torch +import torch.nn as nn +from mmcv.cnn import build_conv_layer +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS +from .base_backbone import BaseBackbone + + +class BasicConv2d(BaseModule): + """A basic convolution block including convolution, batch norm and ReLU. + + Args: + in_channels (int): The number of input channels. + out_channels (int): The number of output channels. + conv_cfg (dict, optional): The config of convolution layer. + Defaults to None, which means to use ``nn.Conv2d``. + init_cfg (dict, optional): The config of initialization. + Defaults to None. + **kwargs: Other keyword arguments of the convolution layer. + """ + + def __init__(self, + in_channels: int, + out_channels: int, + conv_cfg: Optional[dict] = None, + init_cfg: Optional[dict] = None, + **kwargs) -> None: + super().__init__(init_cfg=init_cfg) + self.conv = build_conv_layer( + conv_cfg, in_channels, out_channels, bias=False, **kwargs) + self.bn = nn.BatchNorm2d(out_channels, eps=0.001) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Forward function.""" + x = self.conv(x) + x = self.bn(x) + return self.relu(x) + + +class InceptionA(BaseModule): + """Type-A Inception block. + + Args: + in_channels (int): The number of input channels. + pool_features (int): The number of channels in pooling branch. + conv_cfg (dict, optional): The convolution layer config in the + :class:`BasicConv2d` block. Defaults to None. + init_cfg (dict, optional): The config of initialization. + Defaults to None. + """ + + def __init__(self, + in_channels: int, + pool_features: int, + conv_cfg: Optional[dict] = None, + init_cfg: Optional[dict] = None): + super().__init__(init_cfg=init_cfg) + self.branch1x1 = BasicConv2d( + in_channels, 64, kernel_size=1, conv_cfg=conv_cfg) + + self.branch5x5_1 = BasicConv2d( + in_channels, 48, kernel_size=1, conv_cfg=conv_cfg) + self.branch5x5_2 = BasicConv2d( + 48, 64, kernel_size=5, padding=2, conv_cfg=conv_cfg) + + self.branch3x3dbl_1 = BasicConv2d( + in_channels, 64, kernel_size=1, conv_cfg=conv_cfg) + self.branch3x3dbl_2 = BasicConv2d( + 64, 96, kernel_size=3, padding=1, conv_cfg=conv_cfg) + self.branch3x3dbl_3 = BasicConv2d( + 96, 96, kernel_size=3, padding=1, conv_cfg=conv_cfg) + + self.branch_pool_downsample = nn.AvgPool2d( + kernel_size=3, stride=1, padding=1) + self.branch_pool = BasicConv2d( + in_channels, pool_features, kernel_size=1, conv_cfg=conv_cfg) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Forward function.""" + branch1x1 = self.branch1x1(x) + + branch5x5 = self.branch5x5_1(x) + branch5x5 = self.branch5x5_2(branch5x5) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + branch_pool = self.branch_pool_downsample(x) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool] + return torch.cat(outputs, 1) + + +class InceptionB(BaseModule): + """Type-B Inception block. + + Args: + in_channels (int): The number of input channels. + conv_cfg (dict, optional): The convolution layer config in the + :class:`BasicConv2d` block. Defaults to None. + init_cfg (dict, optional): The config of initialization. + Defaults to None. + """ + + def __init__(self, + in_channels: int, + conv_cfg: Optional[dict] = None, + init_cfg: Optional[dict] = None): + super().__init__(init_cfg=init_cfg) + self.branch3x3 = BasicConv2d( + in_channels, 384, kernel_size=3, stride=2, conv_cfg=conv_cfg) + + self.branch3x3dbl_1 = BasicConv2d( + in_channels, 64, kernel_size=1, conv_cfg=conv_cfg) + self.branch3x3dbl_2 = BasicConv2d( + 64, 96, kernel_size=3, padding=1, conv_cfg=conv_cfg) + self.branch3x3dbl_3 = BasicConv2d( + 96, 96, kernel_size=3, stride=2, conv_cfg=conv_cfg) + + self.branch_pool = nn.MaxPool2d(kernel_size=3, stride=2) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Forward function.""" + branch3x3 = self.branch3x3(x) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + branch_pool = self.branch_pool(x) + + outputs = [branch3x3, branch3x3dbl, branch_pool] + return torch.cat(outputs, 1) + + +class InceptionC(BaseModule): + """Type-C Inception block. + + Args: + in_channels (int): The number of input channels. + channels_7x7 (int): The number of channels in 7x7 convolution branch. + conv_cfg (dict, optional): The convolution layer config in the + :class:`BasicConv2d` block. Defaults to None. + init_cfg (dict, optional): The config of initialization. + Defaults to None. + """ + + def __init__(self, + in_channels: int, + channels_7x7: int, + conv_cfg: Optional[dict] = None, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.branch1x1 = BasicConv2d( + in_channels, 192, kernel_size=1, conv_cfg=conv_cfg) + + c7 = channels_7x7 + self.branch7x7_1 = BasicConv2d( + in_channels, c7, kernel_size=1, conv_cfg=conv_cfg) + self.branch7x7_2 = BasicConv2d( + c7, c7, kernel_size=(1, 7), padding=(0, 3), conv_cfg=conv_cfg) + self.branch7x7_3 = BasicConv2d( + c7, 192, kernel_size=(7, 1), padding=(3, 0), conv_cfg=conv_cfg) + + self.branch7x7dbl_1 = BasicConv2d( + in_channels, c7, kernel_size=1, conv_cfg=conv_cfg) + self.branch7x7dbl_2 = BasicConv2d( + c7, c7, kernel_size=(7, 1), padding=(3, 0), conv_cfg=conv_cfg) + self.branch7x7dbl_3 = BasicConv2d( + c7, c7, kernel_size=(1, 7), padding=(0, 3), conv_cfg=conv_cfg) + self.branch7x7dbl_4 = BasicConv2d( + c7, c7, kernel_size=(7, 1), padding=(3, 0), conv_cfg=conv_cfg) + self.branch7x7dbl_5 = BasicConv2d( + c7, 192, kernel_size=(1, 7), padding=(0, 3), conv_cfg=conv_cfg) + + self.branch_pool_downsample = nn.AvgPool2d( + kernel_size=3, stride=1, padding=1) + self.branch_pool = BasicConv2d( + in_channels, 192, kernel_size=1, conv_cfg=conv_cfg) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Forward function.""" + branch1x1 = self.branch1x1(x) + + branch7x7 = self.branch7x7_1(x) + branch7x7 = self.branch7x7_2(branch7x7) + branch7x7 = self.branch7x7_3(branch7x7) + + branch7x7dbl = self.branch7x7dbl_1(x) + branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl) + + branch_pool = self.branch_pool_downsample(x) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool] + return torch.cat(outputs, 1) + + +class InceptionD(BaseModule): + """Type-D Inception block. + + Args: + in_channels (int): The number of input channels. + conv_cfg (dict, optional): The convolution layer config in the + :class:`BasicConv2d` block. Defaults to None. + init_cfg (dict, optional): The config of initialization. + Defaults to None. + """ + + def __init__(self, + in_channels: int, + conv_cfg: Optional[dict] = None, + init_cfg: Optional[dict] = None): + super().__init__(init_cfg=init_cfg) + self.branch3x3_1 = BasicConv2d( + in_channels, 192, kernel_size=1, conv_cfg=conv_cfg) + self.branch3x3_2 = BasicConv2d( + 192, 320, kernel_size=3, stride=2, conv_cfg=conv_cfg) + + self.branch7x7x3_1 = BasicConv2d( + in_channels, 192, kernel_size=1, conv_cfg=conv_cfg) + self.branch7x7x3_2 = BasicConv2d( + 192, 192, kernel_size=(1, 7), padding=(0, 3), conv_cfg=conv_cfg) + self.branch7x7x3_3 = BasicConv2d( + 192, 192, kernel_size=(7, 1), padding=(3, 0), conv_cfg=conv_cfg) + self.branch7x7x3_4 = BasicConv2d( + 192, 192, kernel_size=3, stride=2, conv_cfg=conv_cfg) + + self.branch_pool = nn.MaxPool2d(kernel_size=3, stride=2) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Forward function.""" + branch3x3 = self.branch3x3_1(x) + branch3x3 = self.branch3x3_2(branch3x3) + + branch7x7x3 = self.branch7x7x3_1(x) + branch7x7x3 = self.branch7x7x3_2(branch7x7x3) + branch7x7x3 = self.branch7x7x3_3(branch7x7x3) + branch7x7x3 = self.branch7x7x3_4(branch7x7x3) + + branch_pool = self.branch_pool(x) + outputs = [branch3x3, branch7x7x3, branch_pool] + return torch.cat(outputs, 1) + + +class InceptionE(BaseModule): + """Type-E Inception block. + + Args: + in_channels (int): The number of input channels. + conv_cfg (dict, optional): The convolution layer config in the + :class:`BasicConv2d` block. Defaults to None. + init_cfg (dict, optional): The config of initialization. + Defaults to None. + """ + + def __init__(self, + in_channels: int, + conv_cfg: Optional[dict] = None, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.branch1x1 = BasicConv2d( + in_channels, 320, kernel_size=1, conv_cfg=conv_cfg) + + self.branch3x3_1 = BasicConv2d( + in_channels, 384, kernel_size=1, conv_cfg=conv_cfg) + self.branch3x3_2a = BasicConv2d( + 384, 384, kernel_size=(1, 3), padding=(0, 1), conv_cfg=conv_cfg) + self.branch3x3_2b = BasicConv2d( + 384, 384, kernel_size=(3, 1), padding=(1, 0), conv_cfg=conv_cfg) + + self.branch3x3dbl_1 = BasicConv2d( + in_channels, 448, kernel_size=1, conv_cfg=conv_cfg) + self.branch3x3dbl_2 = BasicConv2d( + 448, 384, kernel_size=3, padding=1, conv_cfg=conv_cfg) + self.branch3x3dbl_3a = BasicConv2d( + 384, 384, kernel_size=(1, 3), padding=(0, 1), conv_cfg=conv_cfg) + self.branch3x3dbl_3b = BasicConv2d( + 384, 384, kernel_size=(3, 1), padding=(1, 0), conv_cfg=conv_cfg) + + self.branch_pool_downsample = nn.AvgPool2d( + kernel_size=3, stride=1, padding=1) + self.branch_pool = BasicConv2d( + in_channels, 192, kernel_size=1, conv_cfg=conv_cfg) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Forward function.""" + branch1x1 = self.branch1x1(x) + + branch3x3 = self.branch3x3_1(x) + branch3x3 = [ + self.branch3x3_2a(branch3x3), + self.branch3x3_2b(branch3x3), + ] + branch3x3 = torch.cat(branch3x3, 1) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = [ + self.branch3x3dbl_3a(branch3x3dbl), + self.branch3x3dbl_3b(branch3x3dbl), + ] + branch3x3dbl = torch.cat(branch3x3dbl, 1) + + branch_pool = self.branch_pool_downsample(x) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] + return torch.cat(outputs, 1) + + +class InceptionAux(BaseModule): + """The Inception block for the auxiliary classification branch. + + Args: + in_channels (int): The number of input channels. + num_classes (int): The number of categroies. + conv_cfg (dict, optional): The convolution layer config in the + :class:`BasicConv2d` block. Defaults to None. + init_cfg (dict, optional): The config of initialization. + Defaults to use trunc normal with ``std=0.01`` for Conv2d layers + and use trunc normal with ``std=0.001`` for Linear layers.. + """ + + def __init__(self, + in_channels: int, + num_classes: int, + conv_cfg: Optional[dict] = None, + init_cfg: Optional[dict] = [ + dict(type='TruncNormal', layer='Conv2d', std=0.01), + dict(type='TruncNormal', layer='Linear', std=0.001) + ]): + super().__init__(init_cfg=init_cfg) + self.downsample = nn.AvgPool2d(kernel_size=5, stride=3) + self.conv0 = BasicConv2d( + in_channels, 128, kernel_size=1, conv_cfg=conv_cfg) + self.conv1 = BasicConv2d(128, 768, kernel_size=5, conv_cfg=conv_cfg) + self.gap = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(768, num_classes) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Forward function.""" + # N x 768 x 17 x 17 + x = self.downsample(x) + # N x 768 x 5 x 5 + x = self.conv0(x) + # N x 128 x 5 x 5 + x = self.conv1(x) + # N x 768 x 1 x 1 + # Adaptive average pooling + x = self.gap(x) + # N x 768 x 1 x 1 + x = torch.flatten(x, 1) + # N x 768 + x = self.fc(x) + # N x 1000 + return x + + +@MODELS.register_module() +class InceptionV3(BaseBackbone): + """Inception V3 backbone. + + A PyTorch implementation of `Rethinking the Inception Architecture for + Computer Vision `_ + + This implementation is modified from + https://github.com/pytorch/vision/blob/main/torchvision/models/inception.py. + Licensed under the BSD 3-Clause License. + + Args: + num_classes (int): The number of categroies. Defaults to 1000. + aux_logits (bool): Whether to enable the auxiliary branch. If False, + the auxiliary logits output will be None. Defaults to False. + dropout (float): Dropout rate. Defaults to 0.5. + init_cfg (dict, optional): The config of initialization. Defaults + to use trunc normal with ``std=0.1`` for all Conv2d and Linear + layers and constant with ``val=1`` for all BatchNorm2d layers. + + Example: + >>> import torch + >>> from mmpretrain.models import build_backbone + >>> + >>> inputs = torch.rand(2, 3, 299, 299) + >>> cfg = dict(type='InceptionV3', num_classes=100) + >>> backbone = build_backbone(cfg) + >>> aux_out, out = backbone(inputs) + >>> # The auxiliary branch is disabled by default. + >>> assert aux_out is None + >>> print(out.shape) + torch.Size([2, 100]) + >>> cfg = dict(type='InceptionV3', num_classes=100, aux_logits=True) + >>> backbone = build_backbone(cfg) + >>> aux_out, out = backbone(inputs) + >>> print(aux_out.shape, out.shape) + torch.Size([2, 100]) torch.Size([2, 100]) + """ + + def __init__( + self, + num_classes: int = 1000, + aux_logits: bool = False, + dropout: float = 0.5, + init_cfg: Optional[dict] = [ + dict(type='TruncNormal', layer=['Conv2d', 'Linear'], std=0.1), + dict(type='Constant', layer='BatchNorm2d', val=1) + ], + ) -> None: + super().__init__(init_cfg=init_cfg) + + self.aux_logits = aux_logits + self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2) + self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3) + self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1) + self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2) + self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1) + self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3) + self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=2) + self.Mixed_5b = InceptionA(192, pool_features=32) + self.Mixed_5c = InceptionA(256, pool_features=64) + self.Mixed_5d = InceptionA(288, pool_features=64) + self.Mixed_6a = InceptionB(288) + self.Mixed_6b = InceptionC(768, channels_7x7=128) + self.Mixed_6c = InceptionC(768, channels_7x7=160) + self.Mixed_6d = InceptionC(768, channels_7x7=160) + self.Mixed_6e = InceptionC(768, channels_7x7=192) + self.AuxLogits: Optional[nn.Module] = None + if aux_logits: + self.AuxLogits = InceptionAux(768, num_classes) + self.Mixed_7a = InceptionD(768) + self.Mixed_7b = InceptionE(1280) + self.Mixed_7c = InceptionE(2048) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.dropout = nn.Dropout(p=dropout) + self.fc = nn.Linear(2048, num_classes) + + def forward( + self, + x: torch.Tensor) -> Tuple[Optional[torch.Tensor], torch.Tensor]: + """Forward function.""" + # N x 3 x 299 x 299 + x = self.Conv2d_1a_3x3(x) + # N x 32 x 149 x 149 + x = self.Conv2d_2a_3x3(x) + # N x 32 x 147 x 147 + x = self.Conv2d_2b_3x3(x) + # N x 64 x 147 x 147 + x = self.maxpool1(x) + # N x 64 x 73 x 73 + x = self.Conv2d_3b_1x1(x) + # N x 80 x 73 x 73 + x = self.Conv2d_4a_3x3(x) + # N x 192 x 71 x 71 + x = self.maxpool2(x) + # N x 192 x 35 x 35 + x = self.Mixed_5b(x) + # N x 256 x 35 x 35 + x = self.Mixed_5c(x) + # N x 288 x 35 x 35 + x = self.Mixed_5d(x) + # N x 288 x 35 x 35 + x = self.Mixed_6a(x) + # N x 768 x 17 x 17 + x = self.Mixed_6b(x) + # N x 768 x 17 x 17 + x = self.Mixed_6c(x) + # N x 768 x 17 x 17 + x = self.Mixed_6d(x) + # N x 768 x 17 x 17 + x = self.Mixed_6e(x) + # N x 768 x 17 x 17 + aux: Optional[torch.Tensor] = None + if self.aux_logits and self.training: + aux = self.AuxLogits(x) + # N x 768 x 17 x 17 + x = self.Mixed_7a(x) + # N x 1280 x 8 x 8 + x = self.Mixed_7b(x) + # N x 2048 x 8 x 8 + x = self.Mixed_7c(x) + # N x 2048 x 8 x 8 + # Adaptive average pooling + x = self.avgpool(x) + # N x 2048 x 1 x 1 + x = self.dropout(x) + # N x 2048 x 1 x 1 + x = torch.flatten(x, 1) + # N x 2048 + x = self.fc(x) + # N x 1000 (num_classes) + return aux, x diff --git a/mmpretrain/models/backbones/lenet.py b/mmpretrain/models/backbones/lenet.py new file mode 100644 index 0000000..8e423c0 --- /dev/null +++ b/mmpretrain/models/backbones/lenet.py @@ -0,0 +1,42 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn + +from mmpretrain.registry import MODELS +from .base_backbone import BaseBackbone + + +@MODELS.register_module() +class LeNet5(BaseBackbone): + """`LeNet5 `_ backbone. + + The input for LeNet-5 is a 32×32 grayscale image. + + Args: + num_classes (int): number of classes for classification. + The default value is -1, which uses the backbone as + a feature extractor without the top classifier. + """ + + def __init__(self, num_classes=-1): + super(LeNet5, self).__init__() + self.num_classes = num_classes + self.features = nn.Sequential( + nn.Conv2d(1, 6, kernel_size=5, stride=1), nn.Tanh(), + nn.AvgPool2d(kernel_size=2), + nn.Conv2d(6, 16, kernel_size=5, stride=1), nn.Tanh(), + nn.AvgPool2d(kernel_size=2), + nn.Conv2d(16, 120, kernel_size=5, stride=1), nn.Tanh()) + if self.num_classes > 0: + self.classifier = nn.Sequential( + nn.Linear(120, 84), + nn.Tanh(), + nn.Linear(84, num_classes), + ) + + def forward(self, x): + + x = self.features(x) + if self.num_classes > 0: + x = self.classifier(x.squeeze()) + + return (x, ) diff --git a/mmpretrain/models/backbones/levit.py b/mmpretrain/models/backbones/levit.py new file mode 100644 index 0000000..5f7aa32 --- /dev/null +++ b/mmpretrain/models/backbones/levit.py @@ -0,0 +1,522 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import itertools + +import torch +import torch.nn as nn +from mmcv.cnn import build_activation_layer, fuse_conv_bn +from mmcv.cnn.bricks import DropPath +from mmengine.model import BaseModule, ModuleList, Sequential + +from mmpretrain.models.backbones.base_backbone import BaseBackbone +from mmpretrain.registry import MODELS +from ..utils import build_norm_layer + + +class HybridBackbone(BaseModule): + + def __init__( + self, + embed_dim, + kernel_size=3, + stride=2, + pad=1, + dilation=1, + groups=1, + act_cfg=dict(type='HSwish'), + conv_cfg=None, + norm_cfg=dict(type='BN'), + init_cfg=None, + ): + super(HybridBackbone, self).__init__(init_cfg=init_cfg) + + self.input_channels = [ + 3, embed_dim // 8, embed_dim // 4, embed_dim // 2 + ] + self.output_channels = [ + embed_dim // 8, embed_dim // 4, embed_dim // 2, embed_dim + ] + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + self.patch_embed = Sequential() + + for i in range(len(self.input_channels)): + conv_bn = ConvolutionBatchNorm( + self.input_channels[i], + self.output_channels[i], + kernel_size=kernel_size, + stride=stride, + pad=pad, + dilation=dilation, + groups=groups, + norm_cfg=norm_cfg, + ) + self.patch_embed.add_module('%d' % (2 * i), conv_bn) + if i < len(self.input_channels) - 1: + self.patch_embed.add_module('%d' % (i * 2 + 1), + build_activation_layer(act_cfg)) + + def forward(self, x): + x = self.patch_embed(x) + return x + + +class ConvolutionBatchNorm(BaseModule): + + def __init__( + self, + in_channel, + out_channel, + kernel_size=3, + stride=2, + pad=1, + dilation=1, + groups=1, + norm_cfg=dict(type='BN'), + ): + super(ConvolutionBatchNorm, self).__init__() + self.conv = nn.Conv2d( + in_channel, + out_channel, + kernel_size=kernel_size, + stride=stride, + padding=pad, + dilation=dilation, + groups=groups, + bias=False) + self.bn = build_norm_layer(norm_cfg, out_channel) + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + return x + + @torch.no_grad() + def fuse(self): + return fuse_conv_bn(self).conv + + +class LinearBatchNorm(BaseModule): + + def __init__(self, in_feature, out_feature, norm_cfg=dict(type='BN1d')): + super(LinearBatchNorm, self).__init__() + self.linear = nn.Linear(in_feature, out_feature, bias=False) + self.bn = build_norm_layer(norm_cfg, out_feature) + + def forward(self, x): + x = self.linear(x) + x = self.bn(x.flatten(0, 1)).reshape_as(x) + return x + + @torch.no_grad() + def fuse(self): + w = self.bn.weight / (self.bn.running_var + self.bn.eps)**0.5 + w = self.linear.weight * w[:, None] + b = self.bn.bias - self.bn.running_mean * self.bn.weight / \ + (self.bn.running_var + self.bn.eps) ** 0.5 + + factory_kwargs = { + 'device': self.linear.weight.device, + 'dtype': self.linear.weight.dtype + } + bias = nn.Parameter( + torch.empty(self.linear.out_features, **factory_kwargs)) + self.linear.register_parameter('bias', bias) + self.linear.weight.data.copy_(w) + self.linear.bias.data.copy_(b) + return self.linear + + +class Residual(BaseModule): + + def __init__(self, block, drop_path_rate=0.): + super(Residual, self).__init__() + self.block = block + if drop_path_rate > 0: + self.drop_path = DropPath(drop_path_rate) + else: + self.drop_path = nn.Identity() + + def forward(self, x): + x = x + self.drop_path(self.block(x)) + return x + + +class Attention(BaseModule): + + def __init__( + self, + dim, + key_dim, + num_heads=8, + attn_ratio=4, + act_cfg=dict(type='HSwish'), + resolution=14, + ): + super(Attention, self).__init__() + self.num_heads = num_heads + self.scale = key_dim**-0.5 + self.key_dim = key_dim + self.nh_kd = nh_kd = key_dim * num_heads + self.d = int(attn_ratio * key_dim) + self.dh = int(attn_ratio * key_dim) * num_heads + self.attn_ratio = attn_ratio + h = self.dh + nh_kd * 2 + self.qkv = LinearBatchNorm(dim, h) + self.proj = nn.Sequential( + build_activation_layer(act_cfg), LinearBatchNorm(self.dh, dim)) + + points = list(itertools.product(range(resolution), range(resolution))) + N = len(points) + attention_offsets = {} + idxs = [] + for p1 in points: + for p2 in points: + offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1])) + if offset not in attention_offsets: + attention_offsets[offset] = len(attention_offsets) + idxs.append(attention_offsets[offset]) + self.attention_biases = torch.nn.Parameter( + torch.zeros(num_heads, len(attention_offsets))) + self.register_buffer('attention_bias_idxs', + torch.LongTensor(idxs).view(N, N)) + + @torch.no_grad() + def train(self, mode=True): + """change the mode of model.""" + super(Attention, self).train(mode) + if mode and hasattr(self, 'ab'): + del self.ab + else: + self.ab = self.attention_biases[:, self.attention_bias_idxs] + + def forward(self, x): # x (B,N,C) + B, N, C = x.shape # 2 196 128 + qkv = self.qkv(x) # 2 196 128 + q, k, v = qkv.view(B, N, self.num_heads, -1).split( + [self.key_dim, self.key_dim, self.d], + dim=3) # q 2 196 4 16 ; k 2 196 4 16; v 2 196 4 32 + q = q.permute(0, 2, 1, 3) # 2 4 196 16 + k = k.permute(0, 2, 1, 3) + v = v.permute(0, 2, 1, 3) + + attn = ((q @ k.transpose(-2, -1)) * + self.scale # 2 4 196 16 * 2 4 16 196 -> 2 4 196 196 + + (self.attention_biases[:, self.attention_bias_idxs] + if self.training else self.ab)) + attn = attn.softmax(dim=-1) # 2 4 196 196 -> 2 4 196 196 + x = (attn @ v).transpose(1, 2).reshape( + B, N, + self.dh) # 2 4 196 196 * 2 4 196 32 -> 2 4 196 32 -> 2 196 128 + x = self.proj(x) + return x + + +class MLP(nn.Sequential): + + def __init__(self, embed_dim, mlp_ratio, act_cfg=dict(type='HSwish')): + super(MLP, self).__init__() + h = embed_dim * mlp_ratio + self.linear1 = LinearBatchNorm(embed_dim, h) + self.activation = build_activation_layer(act_cfg) + self.linear2 = LinearBatchNorm(h, embed_dim) + + def forward(self, x): + x = self.linear1(x) + x = self.activation(x) + x = self.linear2(x) + return x + + +class Subsample(BaseModule): + + def __init__(self, stride, resolution): + super(Subsample, self).__init__() + self.stride = stride + self.resolution = resolution + + def forward(self, x): + B, _, C = x.shape + # B, N, C -> B, H, W, C + x = x.view(B, self.resolution, self.resolution, C) + x = x[:, ::self.stride, ::self.stride] + x = x.reshape(B, -1, C) # B, H', W', C -> B, N', C + return x + + +class AttentionSubsample(nn.Sequential): + + def __init__(self, + in_dim, + out_dim, + key_dim, + num_heads=8, + attn_ratio=2, + act_cfg=dict(type='HSwish'), + stride=2, + resolution=14): + super(AttentionSubsample, self).__init__() + self.num_heads = num_heads + self.scale = key_dim**-0.5 + self.key_dim = key_dim + self.nh_kd = nh_kd = key_dim * num_heads + self.d = int(attn_ratio * key_dim) + self.dh = int(attn_ratio * key_dim) * self.num_heads + self.attn_ratio = attn_ratio + self.sub_resolution = (resolution - 1) // stride + 1 + h = self.dh + nh_kd + self.kv = LinearBatchNorm(in_dim, h) + + self.q = nn.Sequential( + Subsample(stride, resolution), LinearBatchNorm(in_dim, nh_kd)) + self.proj = nn.Sequential( + build_activation_layer(act_cfg), LinearBatchNorm(self.dh, out_dim)) + + self.stride = stride + self.resolution = resolution + points = list(itertools.product(range(resolution), range(resolution))) + sub_points = list( + itertools.product( + range(self.sub_resolution), range(self.sub_resolution))) + N = len(points) + N_sub = len(sub_points) + attention_offsets = {} + idxs = [] + for p1 in sub_points: + for p2 in points: + size = 1 + offset = (abs(p1[0] * stride - p2[0] + (size - 1) / 2), + abs(p1[1] * stride - p2[1] + (size - 1) / 2)) + if offset not in attention_offsets: + attention_offsets[offset] = len(attention_offsets) + idxs.append(attention_offsets[offset]) + self.attention_biases = torch.nn.Parameter( + torch.zeros(num_heads, len(attention_offsets))) + self.register_buffer('attention_bias_idxs', + torch.LongTensor(idxs).view(N_sub, N)) + + @torch.no_grad() + def train(self, mode=True): + super(AttentionSubsample, self).train(mode) + if mode and hasattr(self, 'ab'): + del self.ab + else: + self.ab = self.attention_biases[:, self.attention_bias_idxs] + + def forward(self, x): + B, N, C = x.shape + k, v = self.kv(x).view(B, N, self.num_heads, + -1).split([self.key_dim, self.d], dim=3) + k = k.permute(0, 2, 1, 3) # BHNC + v = v.permute(0, 2, 1, 3) # BHNC + q = self.q(x).view(B, self.sub_resolution**2, self.num_heads, + self.key_dim).permute(0, 2, 1, 3) + + attn = (q @ k.transpose(-2, -1)) * self.scale + \ + (self.attention_biases[:, self.attention_bias_idxs] + if self.training else self.ab) + attn = attn.softmax(dim=-1) + + x = (attn @ v).transpose(1, 2).reshape(B, -1, self.dh) + x = self.proj(x) + return x + + +@MODELS.register_module() +class LeViT(BaseBackbone): + """LeViT backbone. + + A PyTorch implementation of `LeViT: A Vision Transformer in ConvNet's + Clothing for Faster Inference `_ + + Modified from the official implementation: + https://github.com/facebookresearch/LeViT + + Args: + arch (str | dict): LeViT architecture. + + If use string, choose from '128s', '128', '192', '256' and '384'. + If use dict, it should have below keys: + + - **embed_dims** (List[int]): The embed dimensions of each stage. + - **key_dims** (List[int]): The embed dimensions of the key in the + attention layers of each stage. + - **num_heads** (List[int]): The number of heads in each stage. + - **depths** (List[int]): The number of blocks in each stage. + + img_size (int): Input image size + patch_size (int | tuple): The patch size. Deault to 16 + attn_ratio (int): Ratio of hidden dimensions of the value in attention + layers. Defaults to 2. + mlp_ratio (int): Ratio of hidden dimensions in MLP layers. + Defaults to 2. + act_cfg (dict): The config of activation functions. + Defaults to ``dict(type='HSwish')``. + hybrid_backbone (callable): A callable object to build the patch embed + module. Defaults to use :class:`HybridBackbone`. + out_indices (Sequence | int): Output from which stages. + Defaults to -1, means the last stage. + deploy (bool): Whether to switch the model structure to + deployment mode. Defaults to False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + arch_zoo = { + '128s': { + 'embed_dims': [128, 256, 384], + 'num_heads': [4, 6, 8], + 'depths': [2, 3, 4], + 'key_dims': [16, 16, 16], + }, + '128': { + 'embed_dims': [128, 256, 384], + 'num_heads': [4, 8, 12], + 'depths': [4, 4, 4], + 'key_dims': [16, 16, 16], + }, + '192': { + 'embed_dims': [192, 288, 384], + 'num_heads': [3, 5, 6], + 'depths': [4, 4, 4], + 'key_dims': [32, 32, 32], + }, + '256': { + 'embed_dims': [256, 384, 512], + 'num_heads': [4, 6, 8], + 'depths': [4, 4, 4], + 'key_dims': [32, 32, 32], + }, + '384': { + 'embed_dims': [384, 512, 768], + 'num_heads': [6, 9, 12], + 'depths': [4, 4, 4], + 'key_dims': [32, 32, 32], + }, + } + + def __init__(self, + arch, + img_size=224, + patch_size=16, + attn_ratio=2, + mlp_ratio=2, + act_cfg=dict(type='HSwish'), + hybrid_backbone=HybridBackbone, + out_indices=-1, + deploy=False, + drop_path_rate=0, + init_cfg=None): + super(LeViT, self).__init__(init_cfg=init_cfg) + + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch = self.arch_zoo[arch] + elif isinstance(arch, dict): + essential_keys = {'embed_dim', 'num_heads', 'depth', 'key_dim'} + assert isinstance(arch, dict) and set(arch) == essential_keys, \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch = arch + else: + raise TypeError('Expect "arch" to be either a string ' + f'or a dict, got {type(arch)}') + + self.embed_dims = self.arch['embed_dims'] + self.num_heads = self.arch['num_heads'] + self.key_dims = self.arch['key_dims'] + self.depths = self.arch['depths'] + self.num_stages = len(self.embed_dims) + self.drop_path_rate = drop_path_rate + + self.patch_embed = hybrid_backbone(self.embed_dims[0]) + + self.resolutions = [] + resolution = img_size // patch_size + self.stages = ModuleList() + for i, (embed_dims, key_dims, depth, num_heads) in enumerate( + zip(self.embed_dims, self.key_dims, self.depths, + self.num_heads)): + blocks = [] + if i > 0: + downsample = AttentionSubsample( + in_dim=self.embed_dims[i - 1], + out_dim=embed_dims, + key_dim=key_dims, + num_heads=self.embed_dims[i - 1] // key_dims, + attn_ratio=4, + act_cfg=act_cfg, + stride=2, + resolution=resolution) + blocks.append(downsample) + resolution = downsample.sub_resolution + if mlp_ratio > 0: # mlp_ratio + blocks.append( + Residual( + MLP(embed_dims, mlp_ratio, act_cfg=act_cfg), + self.drop_path_rate)) + self.resolutions.append(resolution) + for _ in range(depth): + blocks.append( + Residual( + Attention( + embed_dims, + key_dims, + num_heads, + attn_ratio=attn_ratio, + act_cfg=act_cfg, + resolution=resolution, + ), self.drop_path_rate)) + if mlp_ratio > 0: + blocks.append( + Residual( + MLP(embed_dims, mlp_ratio, act_cfg=act_cfg), + self.drop_path_rate)) + + self.stages.append(Sequential(*blocks)) + + if isinstance(out_indices, int): + out_indices = [out_indices] + elif isinstance(out_indices, tuple): + out_indices = list(out_indices) + elif not isinstance(out_indices, list): + raise TypeError('"out_indices" must by a list, tuple or int, ' + f'get {type(out_indices)} instead.') + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = self.num_stages + index + assert 0 <= out_indices[i] < self.num_stages, \ + f'Invalid out_indices {index}.' + self.out_indices = out_indices + + self.deploy = False + if deploy: + self.switch_to_deploy() + + def switch_to_deploy(self): + if self.deploy: + return + fuse_parameters(self) + self.deploy = True + + def forward(self, x): + x = self.patch_embed(x) + x = x.flatten(2).transpose(1, 2) # B, C, H, W -> B, L, C + outs = [] + for i, stage in enumerate(self.stages): + x = stage(x) + B, _, C = x.shape + if i in self.out_indices: + out = x.reshape(B, self.resolutions[i], self.resolutions[i], C) + out = out.permute(0, 3, 1, 2).contiguous() + outs.append(out) + + return tuple(outs) + + +def fuse_parameters(module): + for child_name, child in module.named_children(): + if hasattr(child, 'fuse'): + setattr(module, child_name, child.fuse()) + else: + fuse_parameters(child) diff --git a/mmpretrain/models/backbones/mixmim.py b/mmpretrain/models/backbones/mixmim.py new file mode 100644 index 0000000..2c67aa0 --- /dev/null +++ b/mmpretrain/models/backbones/mixmim.py @@ -0,0 +1,533 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Union + +import torch +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.drop import DropPath +from mmcv.cnn.bricks.transformer import PatchEmbed, PatchMerging +from mmengine.model import BaseModule +from torch import nn +from torch.utils.checkpoint import checkpoint + +from mmpretrain.registry import MODELS +from ..utils import WindowMSA, to_2tuple +from .base_backbone import BaseBackbone +from .vision_transformer import TransformerEncoderLayer + + +class MixMIMWindowAttention(WindowMSA): + """MixMIM Window Attention. + + Compared with WindowMSA, we add some modifications + in ``forward`` to meet the requirement of MixMIM during + pretraining. + + Implements one windown attention in MixMIM. + Args: + embed_dims (int): The feature dimension. + window_size (list): The height and width of the window. + num_heads (int): The number of head in attention. + qkv_bias (bool): Whether to add bias for qkv in attention modules. + Defaults to True. + qk_scale (float, optional): Override default qk scale of + ``head_dim ** -0.5`` if set. Defaults to None. + attn_drop_rate (float): attention drop rate. + Defaults to 0. + proj_drop_rate (float): Probability of an element to be zeroed. + Defaults to 0. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + embed_dims, + window_size, + num_heads, + qkv_bias=True, + qk_scale=None, + attn_drop_rate=0., + proj_drop_rate=0., + init_cfg=None): + + super().__init__( + embed_dims=embed_dims, + window_size=window_size, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop_rate, + proj_drop=proj_drop_rate, + init_cfg=init_cfg) + + def forward(self, x, mask=None): + + B_, N, C = x.shape + qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, + C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[ + 2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], + self.window_size[0] * self.window_size[1], + -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute( + 2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + mask = mask.reshape(B_, 1, 1, N) + mask_new = mask * mask.transpose( + 2, 3) + (1 - mask) * (1 - mask).transpose(2, 3) + mask_new = 1 - mask_new + + if mask_new.dtype == torch.float16: + attn = attn - 65500 * mask_new + else: + attn = attn - 1e30 * mask_new + + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class MixMIMBlock(TransformerEncoderLayer): + """MixMIM Block. Implements one block in MixMIM. + + Args: + embed_dims (int): The feature dimension. + input_resolution (tuple): Input resolution of this layer. + num_heads (int): The number of head in attention, + window_size (list): The height and width of the window. + mlp_ratio (int): The MLP ration in FFN. + num_fcs (int): The number of linear layers in a block. + qkv_bias (bool): Whether to add bias for qkv in attention modules. + Defaults to True. + proj_drop_rate (float): Probability of an element to be zeroed. + Defaults to 0. + attn_drop_rate (float): attention drop rate. + Defaults to 0. + drop_path_rate (float): stochastic depth rate. + Defaults to 0. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + embed_dims, + input_resolution, + num_heads, + window_size=7, + mlp_ratio=4., + num_fcs=2, + qkv_bias=True, + proj_drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + init_cfg: Optional[Union[List[dict], dict]] = None) -> None: + + super().__init__( + embed_dims=embed_dims, + num_heads=num_heads, + feedforward_channels=int(mlp_ratio * embed_dims), + drop_rate=proj_drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=drop_path_rate, + num_fcs=num_fcs, + qkv_bias=qkv_bias, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + init_cfg=init_cfg) + + self.embed_dims = embed_dims + self.input_resolution = input_resolution + self.num_heads = num_heads + self.window_size = window_size + self.mlp_ratio = mlp_ratio + + if min(self.input_resolution) <= self.window_size: + self.window_size = min(self.input_resolution) + + self.attn = MixMIMWindowAttention( + embed_dims=embed_dims, + window_size=to_2tuple(self.window_size), + num_heads=num_heads, + qkv_bias=qkv_bias, + attn_drop_rate=attn_drop_rate, + proj_drop_rate=proj_drop_rate) + + self.drop_path = DropPath( + drop_path_rate) if drop_path_rate > 0. else nn.Identity() + + @staticmethod + def window_reverse(windows, H, W, window_size): + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, + window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + @staticmethod + def window_partition(x, window_size): + B, H, W, C = x.shape + x = x.view(B, H // window_size, window_size, W // window_size, + window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous() + windows = windows.view(-1, window_size, window_size, C) + return windows + + def forward(self, x, attn_mask=None): + H, W = self.input_resolution + B, L, C = x.shape + + shortcut = x + x = self.ln1(x) + x = x.view(B, H, W, C) + + # partition windows + x_windows = self.window_partition( + x, self.window_size) # nW*B, window_size, window_size, C + x_windows = x_windows.view(-1, self.window_size * self.window_size, + C) # nW*B, window_size*window_size, C + if attn_mask is not None: + attn_mask = attn_mask.repeat(B, 1, 1) # B, N, 1 + attn_mask = attn_mask.view(B, H, W, 1) + attn_mask = self.window_partition(attn_mask, self.window_size) + attn_mask = attn_mask.view(-1, self.window_size * self.window_size, + 1) + + # W-MSA/SW-MSA + attn_windows = self.attn( + x_windows, mask=attn_mask) # nW*B, window_size*window_size, C + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, + self.window_size, C) + x = self.window_reverse(attn_windows, H, W, + self.window_size) # B H' W' C + + x = x.view(B, H * W, C) + + x = shortcut + self.drop_path(x) + + x = self.ffn(self.norm2(x), identity=x) # ffn contains DropPath + + return x + + +class MixMIMLayer(BaseModule): + """Implements one MixMIM layer, which may contains several MixMIM blocks. + + Args: + embed_dims (int): The feature dimension. + input_resolution (tuple): Input resolution of this layer. + depth (int): The number of blocks in this layer. + num_heads (int): The number of head in attention, + window_size (list): The height and width of the window. + mlp_ratio (int): The MLP ration in FFN. + qkv_bias (bool): Whether to add bias for qkv in attention modules. + Defaults to True. + proj_drop_rate (float): Probability of an element to be zeroed. + Defaults to 0. + attn_drop_rate (float): attention drop rate. + Defaults to 0. + drop_path_rate (float): stochastic depth rate. + Defaults to 0. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + downsample (class, optional): Downsample the output of blocks b + y patch merging.Defaults to None. + use_checkpoint (bool): Whether use the checkpoint to + reduce GPU memory cost. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + embed_dims: int, + input_resolution: int, + depth: int, + num_heads: int, + window_size: int, + mlp_ratio=4., + qkv_bias=True, + proj_drop_rate=0., + attn_drop_rate=0., + drop_path_rate=[0.], + norm_cfg=dict(type='LN'), + downsample=None, + use_checkpoint=False, + init_cfg: Optional[Union[List[dict], dict]] = None) -> None: + super().__init__(init_cfg=init_cfg) + self.embed_dims = embed_dims + self.input_resolution = input_resolution + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.ModuleList() + for i in range(depth): + self.blocks.append( + MixMIMBlock( + embed_dims=embed_dims, + input_resolution=input_resolution, + num_heads=num_heads, + window_size=window_size, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + proj_drop_rate=proj_drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=drop_path_rate[i], + norm_cfg=norm_cfg)) + # patch merging layer + if downsample is not None: + self.downsample = downsample( + in_channels=embed_dims, + out_channels=2 * embed_dims, + norm_cfg=norm_cfg) + else: + self.downsample = None + + def forward(self, x, attn_mask=None): + for blk in self.blocks: + if self.use_checkpoint: + x = checkpoint(blk, x, attn_mask) + else: + x = blk(x, attn_mask=attn_mask) + if self.downsample is not None: + x, _ = self.downsample(x, self.input_resolution) + return x + + def extra_repr(self) -> str: + return f'dim={self.embed_dims}, \ + input_resolution={self.input_resolution}, depth={self.depth}' + + +@MODELS.register_module() +class MixMIMTransformer(BaseBackbone): + """MixMIM backbone. + + A PyTorch implement of : ` MixMIM: Mixed and Masked Image + Modeling for Efficient Visual Representation Learning + `_ + + Args: + arch (str | dict): MixMIM architecture. If use string, + choose from 'base','large' and 'huge'. + If use dict, it should have below keys: + + - **embed_dims** (int): The dimensions of embedding. + - **depths** (int): The number of transformer encoder layers. + - **num_heads** (int): The number of heads in attention modules. + + Defaults to 'base'. + mlp_ratio (int): The mlp ratio in FFN. Defaults to 4. + img_size (int | tuple): The expected input image shape. Because we + support dynamic input shape, just set the argument to mlp_ratio + the most common input image shape. Defaults to 224. + patch_size (int | tuple): The patch size in patch embedding. + Defaults to 16. + in_channels (int): The num of input channels. Defaults to 3. + window_size (list): The height and width of the window. + qkv_bias (bool): Whether to add bias for qkv in attention modules. + Defaults to True. + patch_cfg (dict): Extra config dict for patch embedding. + Defaults to an empty dict. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + drop_rate (float): Probability of an element to be zeroed. + Defaults to 0. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + attn_drop_rate (float): attention drop rate. Defaults to 0. + use_checkpoint (bool): Whether use the checkpoint to + reduce GPU memory cost. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + arch_zoo = { + **dict.fromkeys( + ['b', 'base'], { + 'embed_dims': 128, + 'depths': [2, 2, 18, 2], + 'num_heads': [4, 8, 16, 32] + }), + **dict.fromkeys( + ['l', 'large'], { + 'embed_dims': 192, + 'depths': [2, 2, 18, 2], + 'num_heads': [6, 12, 24, 48] + }), + **dict.fromkeys( + ['h', 'huge'], { + 'embed_dims': 352, + 'depths': [2, 2, 18, 2], + 'num_heads': [11, 22, 44, 88] + }), + } + + def __init__( + self, + arch='base', + mlp_ratio=4, + img_size=224, + patch_size=4, + in_channels=3, + window_size=[14, 14, 14, 7], + qkv_bias=True, + patch_cfg=dict(), + norm_cfg=dict(type='LN'), + drop_rate=0.0, + drop_path_rate=0.0, + attn_drop_rate=0.0, + use_checkpoint=False, + init_cfg: Optional[dict] = None, + ) -> None: + super(MixMIMTransformer, self).__init__(init_cfg=init_cfg) + + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = {'embed_dims', 'depths', 'num_heads'} + assert isinstance(arch, dict) and essential_keys <= set(arch), \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = arch + + self.embed_dims = self.arch_settings['embed_dims'] + self.depths = self.arch_settings['depths'] + self.num_heads = self.arch_settings['num_heads'] + + self.encoder_stride = 32 + + self.num_layers = len(self.depths) + self.qkv_bias = qkv_bias + self.drop_rate = drop_rate + self.attn_drop_rate = attn_drop_rate + self.use_checkpoint = use_checkpoint + self.mlp_ratio = mlp_ratio + self.window_size = window_size + + _patch_cfg = dict( + in_channels=in_channels, + input_size=img_size, + embed_dims=self.embed_dims, + conv_type='Conv2d', + kernel_size=patch_size, + stride=patch_size, + norm_cfg=dict(type='LN'), + ) + _patch_cfg.update(patch_cfg) + self.patch_embed = PatchEmbed(**_patch_cfg) + self.patch_resolution = self.patch_embed.init_out_size + + self.dpr = [ + x.item() + for x in torch.linspace(0, drop_path_rate, sum(self.depths)) + ] + self.layers = nn.ModuleList() + for i_layer in range(self.num_layers): + self.layers.append( + MixMIMLayer( + embed_dims=int(self.embed_dims * 2**i_layer), + input_resolution=(self.patch_resolution[0] // (2**i_layer), + self.patch_resolution[1] // + (2**i_layer)), + depth=self.depths[i_layer], + num_heads=self.num_heads[i_layer], + window_size=self.window_size[i_layer], + mlp_ratio=self.mlp_ratio, + qkv_bias=self.qkv_bias, + proj_drop_rate=self.drop_rate, + attn_drop_rate=self.attn_drop_rate, + drop_path_rate=self.dpr[sum(self.depths[:i_layer] + ):sum(self.depths[:i_layer + + 1])], + norm_cfg=norm_cfg, + downsample=PatchMerging if + (i_layer < self.num_layers - 1) else None, + use_checkpoint=self.use_checkpoint)) + + self.num_features = int(self.embed_dims * 2**(self.num_layers - 1)) + self.drop_after_pos = nn.Dropout(p=self.drop_rate) + + self.avgpool = nn.AdaptiveAvgPool1d(1) + self.num_patches = self.patch_resolution[0] * self.patch_resolution[1] + self.absolute_pos_embed = nn.Parameter( + torch.zeros(1, self.num_patches, self.embed_dims), + requires_grad=False) + + _, self.norm = build_norm_layer(norm_cfg, self.num_features) + + def forward(self, x: torch.Tensor): + x, _ = self.patch_embed(x) + + x = x + self.absolute_pos_embed + x = self.drop_after_pos(x) + + for layer in self.layers: + x = layer(x, attn_mask=None) + + x = self.norm(x) + x = self.avgpool(x.transpose(1, 2)) # B C 1 + x = torch.flatten(x, 1) + + return (x, ) + + def get_layer_depth(self, param_name: str, prefix: str = ''): + """Get the layer-wise depth of a parameter. + + Args: + param_name (str): The name of the parameter. + prefix (str): The prefix for the parameter. + Defaults to an empty string. + + Returns: + Tuple[int, int]: The layer-wise depth and the num of layers. + + Note: + The first depth is the stem module (``layer_depth=0``), and the + last depth is the subsequent module (``layer_depth=num_layers-1``) + """ + num_layers = sum(self.depths) + 2 + + if not param_name.startswith(prefix): + # For subsequent module like neck and head + if param_name.startswith('neck'): + return num_layers - 2, num_layers + else: + return num_layers - 1, num_layers + + param_name = param_name[len(prefix):] + + stem_layers = ('patch_embed', 'absolute_pos_embed', 'pos_embed') + if any(stem in param_name for stem in stem_layers): + layer_depth = 0 + elif param_name.startswith('layers'): + layer_id = int(param_name.split('.')[1]) + block_id = param_name.split('.')[3] + + if block_id in ('downsample', 'reduction', 'norm'): + layer_depth = sum(self.depths[:layer_id + 1]) + else: + layer_depth = sum(self.depths[:layer_id]) + int(block_id) + 1 + else: + layer_depth = num_layers - 2 + + return layer_depth, num_layers diff --git a/mmpretrain/models/backbones/mlp_mixer.py b/mmpretrain/models/backbones/mlp_mixer.py new file mode 100644 index 0000000..26fb8ce --- /dev/null +++ b/mmpretrain/models/backbones/mlp_mixer.py @@ -0,0 +1,263 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Sequence + +import torch.nn as nn +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.transformer import FFN, PatchEmbed +from mmengine.model import BaseModule, ModuleList + +from mmpretrain.registry import MODELS +from ..utils import to_2tuple +from .base_backbone import BaseBackbone + + +class MixerBlock(BaseModule): + """Mlp-Mixer basic block. + + Basic module of `MLP-Mixer: An all-MLP Architecture for Vision + `_ + + Args: + num_tokens (int): The number of patched tokens + embed_dims (int): The feature dimension + tokens_mlp_dims (int): The hidden dimension for tokens FFNs + channels_mlp_dims (int): The hidden dimension for channels FFNs + drop_rate (float): Probability of an element to be zeroed + after the feed forward layer. Defaults to 0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0. + num_fcs (int): The number of fully-connected layers for FFNs. + Defaults to 2. + act_cfg (dict): The activation config for FFNs. + Defaults to ``dict(type='GELU')``. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + num_tokens, + embed_dims, + tokens_mlp_dims, + channels_mlp_dims, + drop_rate=0., + drop_path_rate=0., + num_fcs=2, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + init_cfg=None): + super(MixerBlock, self).__init__(init_cfg=init_cfg) + + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, embed_dims, postfix=1) + self.add_module(self.norm1_name, norm1) + self.token_mix = FFN( + embed_dims=num_tokens, + feedforward_channels=tokens_mlp_dims, + num_fcs=num_fcs, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg, + add_identity=False) + + self.norm2_name, norm2 = build_norm_layer( + norm_cfg, embed_dims, postfix=2) + self.add_module(self.norm2_name, norm2) + self.channel_mix = FFN( + embed_dims=embed_dims, + feedforward_channels=channels_mlp_dims, + num_fcs=num_fcs, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + @property + def norm2(self): + return getattr(self, self.norm2_name) + + def init_weights(self): + super(MixerBlock, self).init_weights() + for m in self.token_mix.modules(): + if isinstance(m, nn.Linear): + nn.init.xavier_uniform_(m.weight) + nn.init.normal_(m.bias, std=1e-6) + for m in self.channel_mix.modules(): + if isinstance(m, nn.Linear): + nn.init.xavier_uniform_(m.weight) + nn.init.normal_(m.bias, std=1e-6) + + def forward(self, x): + out = self.norm1(x).transpose(1, 2) + x = x + self.token_mix(out).transpose(1, 2) + x = self.channel_mix(self.norm2(x), identity=x) + return x + + +@MODELS.register_module() +class MlpMixer(BaseBackbone): + """Mlp-Mixer backbone. + + Pytorch implementation of `MLP-Mixer: An all-MLP Architecture for Vision + `_ + + Args: + arch (str | dict): MLP Mixer architecture. If use string, choose from + 'small', 'base' and 'large'. If use dict, it should have below + keys: + + - **embed_dims** (int): The dimensions of embedding. + - **num_layers** (int): The number of MLP blocks. + - **tokens_mlp_dims** (int): The hidden dimensions for tokens FFNs. + - **channels_mlp_dims** (int): The The hidden dimensions for + channels FFNs. + + Defaults to 'base'. + img_size (int | tuple): The input image shape. Defaults to 224. + patch_size (int | tuple): The patch size in patch embedding. + Defaults to 16. + out_indices (Sequence | int): Output from which layer. + Defaults to -1, means the last layer. + drop_rate (float): Probability of an element to be zeroed. + Defaults to 0. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + act_cfg (dict): The activation config for FFNs. Default GELU. + patch_cfg (dict): Configs of patch embeding. Defaults to an empty dict. + layer_cfgs (Sequence | dict): Configs of each mixer block layer. + Defaults to an empty dict. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + + arch_zoo = { + **dict.fromkeys( + ['s', 'small'], { + 'embed_dims': 512, + 'num_layers': 8, + 'tokens_mlp_dims': 256, + 'channels_mlp_dims': 2048, + }), + **dict.fromkeys( + ['b', 'base'], { + 'embed_dims': 768, + 'num_layers': 12, + 'tokens_mlp_dims': 384, + 'channels_mlp_dims': 3072, + }), + **dict.fromkeys( + ['l', 'large'], { + 'embed_dims': 1024, + 'num_layers': 24, + 'tokens_mlp_dims': 512, + 'channels_mlp_dims': 4096, + }), + } + + def __init__(self, + arch='base', + img_size=224, + patch_size=16, + out_indices=-1, + drop_rate=0., + drop_path_rate=0., + norm_cfg=dict(type='LN'), + act_cfg=dict(type='GELU'), + patch_cfg=dict(), + layer_cfgs=dict(), + init_cfg=None): + super(MlpMixer, self).__init__(init_cfg) + + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = { + 'embed_dims', 'num_layers', 'tokens_mlp_dims', + 'channels_mlp_dims' + } + assert isinstance(arch, dict) and set(arch) == essential_keys, \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = arch + + self.embed_dims = self.arch_settings['embed_dims'] + self.num_layers = self.arch_settings['num_layers'] + self.tokens_mlp_dims = self.arch_settings['tokens_mlp_dims'] + self.channels_mlp_dims = self.arch_settings['channels_mlp_dims'] + + self.img_size = to_2tuple(img_size) + + _patch_cfg = dict( + input_size=img_size, + embed_dims=self.embed_dims, + conv_type='Conv2d', + kernel_size=patch_size, + stride=patch_size, + ) + _patch_cfg.update(patch_cfg) + self.patch_embed = PatchEmbed(**_patch_cfg) + self.patch_resolution = self.patch_embed.init_out_size + num_patches = self.patch_resolution[0] * self.patch_resolution[1] + + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must be a sequence or int, ' \ + f'get {type(out_indices)} instead.' + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = self.num_layers + index + assert out_indices[i] >= 0, f'Invalid out_indices {index}' + else: + assert index >= self.num_layers, f'Invalid out_indices {index}' + self.out_indices = out_indices + + self.layers = ModuleList() + if isinstance(layer_cfgs, dict): + layer_cfgs = [layer_cfgs] * self.num_layers + for i in range(self.num_layers): + _layer_cfg = dict( + num_tokens=num_patches, + embed_dims=self.embed_dims, + tokens_mlp_dims=self.tokens_mlp_dims, + channels_mlp_dims=self.channels_mlp_dims, + drop_rate=drop_rate, + drop_path_rate=drop_path_rate, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + ) + _layer_cfg.update(layer_cfgs[i]) + self.layers.append(MixerBlock(**_layer_cfg)) + + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, self.embed_dims, postfix=1) + self.add_module(self.norm1_name, norm1) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + def forward(self, x): + assert x.shape[2:] == self.img_size, \ + "The MLP-Mixer doesn't support dynamic input shape. " \ + f'Please input images with shape {self.img_size}' + x, _ = self.patch_embed(x) + + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x) + + if i == len(self.layers) - 1: + x = self.norm1(x) + + if i in self.out_indices: + out = x.transpose(1, 2) + outs.append(out) + + return tuple(outs) diff --git a/mmpretrain/models/backbones/mobilenet_v2.py b/mmpretrain/models/backbones/mobilenet_v2.py new file mode 100644 index 0000000..bca1418 --- /dev/null +++ b/mmpretrain/models/backbones/mobilenet_v2.py @@ -0,0 +1,264 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpretrain.models.utils import make_divisible +from mmpretrain.registry import MODELS +from .base_backbone import BaseBackbone + + +class InvertedResidual(BaseModule): + """InvertedResidual block for MobileNetV2. + + Args: + in_channels (int): The input channels of the InvertedResidual block. + out_channels (int): The output channels of the InvertedResidual block. + stride (int): Stride of the middle (first) 3x3 convolution. + expand_ratio (int): adjusts number of channels of the hidden layer + in InvertedResidual by this amount. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU6'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + + Returns: + Tensor: The output tensor + """ + + def __init__(self, + in_channels, + out_channels, + stride, + expand_ratio, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU6'), + with_cp=False, + init_cfg=None): + super(InvertedResidual, self).__init__(init_cfg) + self.stride = stride + assert stride in [1, 2], f'stride must in [1, 2]. ' \ + f'But received {stride}.' + self.with_cp = with_cp + self.use_res_connect = self.stride == 1 and in_channels == out_channels + hidden_dim = int(round(in_channels * expand_ratio)) + + layers = [] + if expand_ratio != 1: + layers.append( + ConvModule( + in_channels=in_channels, + out_channels=hidden_dim, + kernel_size=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + layers.extend([ + ConvModule( + in_channels=hidden_dim, + out_channels=hidden_dim, + kernel_size=3, + stride=stride, + padding=1, + groups=hidden_dim, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + in_channels=hidden_dim, + out_channels=out_channels, + kernel_size=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + ]) + self.conv = nn.Sequential(*layers) + + def forward(self, x): + + def _inner_forward(x): + if self.use_res_connect: + return x + self.conv(x) + else: + return self.conv(x) + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +@MODELS.register_module() +class MobileNetV2(BaseBackbone): + """MobileNetV2 backbone. + + Args: + widen_factor (float): Width multiplier, multiply number of + channels in each layer by this amount. Default: 1.0. + out_indices (None or Sequence[int]): Output from which stages. + Default: (7, ). + frozen_stages (int): Stages to be frozen (all param fixed). + Default: -1, which means not freezing any parameters. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU6'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + """ + + # Parameters to build layers. 4 parameters are needed to construct a + # layer, from left to right: expand_ratio, channel, num_blocks, stride. + arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], + [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], + [6, 320, 1, 1]] + + def __init__(self, + widen_factor=1., + out_indices=(7, ), + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU6'), + norm_eval=False, + with_cp=False, + init_cfg=[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]): + super(MobileNetV2, self).__init__(init_cfg) + self.widen_factor = widen_factor + self.out_indices = out_indices + for index in out_indices: + if index not in range(0, 8): + raise ValueError('the item in out_indices must in ' + f'range(0, 8). But received {index}') + + if frozen_stages not in range(-1, 8): + raise ValueError('frozen_stages must be in range(-1, 8). ' + f'But received {frozen_stages}') + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + + self.in_channels = make_divisible(32 * widen_factor, 8) + + self.conv1 = ConvModule( + in_channels=3, + out_channels=self.in_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + + self.layers = [] + + for i, layer_cfg in enumerate(self.arch_settings): + expand_ratio, channel, num_blocks, stride = layer_cfg + out_channels = make_divisible(channel * widen_factor, 8) + inverted_res_layer = self.make_layer( + out_channels=out_channels, + num_blocks=num_blocks, + stride=stride, + expand_ratio=expand_ratio) + layer_name = f'layer{i + 1}' + self.add_module(layer_name, inverted_res_layer) + self.layers.append(layer_name) + + if widen_factor > 1.0: + self.out_channel = int(1280 * widen_factor) + else: + self.out_channel = 1280 + + layer = ConvModule( + in_channels=self.in_channels, + out_channels=self.out_channel, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.add_module('conv2', layer) + self.layers.append('conv2') + + def make_layer(self, out_channels, num_blocks, stride, expand_ratio): + """Stack InvertedResidual blocks to build a layer for MobileNetV2. + + Args: + out_channels (int): out_channels of block. + num_blocks (int): number of blocks. + stride (int): stride of the first block. Default: 1 + expand_ratio (int): Expand the number of channels of the + hidden layer in InvertedResidual by this ratio. Default: 6. + """ + layers = [] + for i in range(num_blocks): + if i >= 1: + stride = 1 + layers.append( + InvertedResidual( + self.in_channels, + out_channels, + stride, + expand_ratio=expand_ratio, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + with_cp=self.with_cp)) + self.in_channels = out_channels + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + + outs = [] + for i, layer_name in enumerate(self.layers): + layer = getattr(self, layer_name) + x = layer(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + for param in self.conv1.parameters(): + param.requires_grad = False + for i in range(1, self.frozen_stages + 1): + layer = getattr(self, f'layer{i}') + layer.eval() + for param in layer.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(MobileNetV2, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/mmpretrain/models/backbones/mobilenet_v3.py b/mmpretrain/models/backbones/mobilenet_v3.py new file mode 100644 index 0000000..577dba9 --- /dev/null +++ b/mmpretrain/models/backbones/mobilenet_v3.py @@ -0,0 +1,217 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.cnn import ConvModule +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpretrain.registry import MODELS +from ..utils import InvertedResidual +from .base_backbone import BaseBackbone + + +@MODELS.register_module() +class MobileNetV3(BaseBackbone): + """MobileNetV3 backbone. + + Args: + arch (str): Architecture of mobilnetv3, from {small, large}. + Default: small. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + out_indices (None or Sequence[int]): Output from which stages. + Default: None, which means output tensors from final stage. + frozen_stages (int): Stages to be frozen (all param fixed). + Default: -1, which means not freezing any parameters. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save + some memory while slowing down the training speed. + Default: False. + """ + # Parameters to build each block: + # [kernel size, mid channels, out channels, with_se, act type, stride] + arch_settings = { + 'small': [[3, 16, 16, True, 'ReLU', 2], + [3, 72, 24, False, 'ReLU', 2], + [3, 88, 24, False, 'ReLU', 1], + [5, 96, 40, True, 'HSwish', 2], + [5, 240, 40, True, 'HSwish', 1], + [5, 240, 40, True, 'HSwish', 1], + [5, 120, 48, True, 'HSwish', 1], + [5, 144, 48, True, 'HSwish', 1], + [5, 288, 96, True, 'HSwish', 2], + [5, 576, 96, True, 'HSwish', 1], + [5, 576, 96, True, 'HSwish', 1]], + 'small_075': [[3, 16, 16, True, 'ReLU', 2], + [3, 72, 24, False, 'ReLU', 2], + [3, 88, 24, False, 'ReLU', 1], + [5, 96, 32, True, 'HSwish', 2], + [5, 192, 32, True, 'HSwish', 1], + [5, 192, 32, True, 'HSwish', 1], + [5, 96, 40, True, 'HSwish', 1], + [5, 120, 40, True, 'HSwish', 1], + [5, 240, 72, True, 'HSwish', 2], + [5, 432, 72, True, 'HSwish', 1], + [5, 432, 72, True, 'HSwish', 1]], + 'small_050': [[3, 16, 8, True, 'ReLU', 2], + [3, 40, 16, False, 'ReLU', 2], + [3, 56, 16, False, 'ReLU', 1], + [5, 64, 24, True, 'HSwish', 2], + [5, 144, 24, True, 'HSwish', 1], + [5, 144, 24, True, 'HSwish', 1], + [5, 72, 24, True, 'HSwish', 1], + [5, 72, 24, True, 'HSwish', 1], + [5, 144, 48, True, 'HSwish', 2], + [5, 288, 48, True, 'HSwish', 1], + [5, 288, 48, True, 'HSwish', 1]], + 'large': [[3, 16, 16, False, 'ReLU', 1], + [3, 64, 24, False, 'ReLU', 2], + [3, 72, 24, False, 'ReLU', 1], + [5, 72, 40, True, 'ReLU', 2], + [5, 120, 40, True, 'ReLU', 1], + [5, 120, 40, True, 'ReLU', 1], + [3, 240, 80, False, 'HSwish', 2], + [3, 200, 80, False, 'HSwish', 1], + [3, 184, 80, False, 'HSwish', 1], + [3, 184, 80, False, 'HSwish', 1], + [3, 480, 112, True, 'HSwish', 1], + [3, 672, 112, True, 'HSwish', 1], + [5, 672, 160, True, 'HSwish', 2], + [5, 960, 160, True, 'HSwish', 1], + [5, 960, 160, True, 'HSwish', 1]] + } # yapf: disable + + def __init__(self, + arch='small', + conv_cfg=None, + norm_cfg=dict(type='BN', eps=0.001, momentum=0.01), + out_indices=None, + frozen_stages=-1, + norm_eval=False, + with_cp=False, + init_cfg=[ + dict( + type='Kaiming', + layer=['Conv2d'], + nonlinearity='leaky_relu'), + dict(type='Normal', layer=['Linear'], std=0.01), + dict(type='Constant', layer=['BatchNorm2d'], val=1) + ]): + super(MobileNetV3, self).__init__(init_cfg) + assert arch in self.arch_settings + if out_indices is None: + out_indices = (12, ) if 'small' in arch else (16, ) + for order, index in enumerate(out_indices): + if index not in range(0, len(self.arch_settings[arch]) + 2): + raise ValueError( + 'the item in out_indices must in ' + f'range(0, {len(self.arch_settings[arch]) + 2}). ' + f'But received {index}') + + if frozen_stages not in range(-1, len(self.arch_settings[arch]) + 2): + raise ValueError('frozen_stages must be in range(-1, ' + f'{len(self.arch_settings[arch]) + 2}). ' + f'But received {frozen_stages}') + self.arch = arch + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.norm_eval = norm_eval + self.with_cp = with_cp + + self.layers = self._make_layer() + self.feat_dim = self.arch_settings[arch][-1][1] + + def _make_layer(self): + layers = [] + layer_setting = self.arch_settings[self.arch] + in_channels = 16 + + layer = ConvModule( + in_channels=3, + out_channels=in_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=dict(type='HSwish')) + self.add_module('layer0', layer) + layers.append('layer0') + + for i, params in enumerate(layer_setting): + (kernel_size, mid_channels, out_channels, with_se, act, + stride) = params + if with_se: + se_cfg = dict( + channels=mid_channels, + ratio=4, + act_cfg=(dict(type='ReLU'), + dict( + type='HSigmoid', + bias=3, + divisor=6, + min_value=0, + max_value=1))) + else: + se_cfg = None + + layer = InvertedResidual( + in_channels=in_channels, + out_channels=out_channels, + mid_channels=mid_channels, + kernel_size=kernel_size, + stride=stride, + se_cfg=se_cfg, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=dict(type=act), + with_cp=self.with_cp) + in_channels = out_channels + layer_name = 'layer{}'.format(i + 1) + self.add_module(layer_name, layer) + layers.append(layer_name) + + # Build the last layer before pooling + # TODO: No dilation + layer = ConvModule( + in_channels=in_channels, + out_channels=mid_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=dict(type='HSwish')) + layer_name = 'layer{}'.format(len(layer_setting) + 1) + self.add_module(layer_name, layer) + layers.append(layer_name) + + return layers + + def forward(self, x): + outs = [] + for i, layer_name in enumerate(self.layers): + layer = getattr(self, layer_name) + x = layer(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def _freeze_stages(self): + for i in range(0, self.frozen_stages + 1): + layer = getattr(self, f'layer{i}') + layer.eval() + for param in layer.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(MobileNetV3, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/mmpretrain/models/backbones/mobileone.py b/mmpretrain/models/backbones/mobileone.py new file mode 100644 index 0000000..1111441 --- /dev/null +++ b/mmpretrain/models/backbones/mobileone.py @@ -0,0 +1,515 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Modified from official impl https://github.com/apple/ml-mobileone/blob/main/mobileone.py # noqa: E501 +from typing import Optional, Sequence + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import build_activation_layer, build_conv_layer, build_norm_layer +from mmengine.model import BaseModule, ModuleList, Sequential +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpretrain.registry import MODELS +from ..utils.se_layer import SELayer +from .base_backbone import BaseBackbone + + +class MobileOneBlock(BaseModule): + """MobileOne block for MobileOne backbone. + + Args: + in_channels (int): The input channels of the block. + out_channels (int): The output channels of the block. + kernel_size (int): The kernel size of the convs in the block. If the + kernel size is large than 1, there will be a ``branch_scale`` in + the block. + num_convs (int): Number of the convolution branches in the block. + stride (int): Stride of convolution layers. Defaults to 1. + padding (int): Padding of the convolution layers. Defaults to 1. + dilation (int): Dilation of the convolution layers. Defaults to 1. + groups (int): Groups of the convolution layers. Defaults to 1. + se_cfg (None or dict): The configuration of the se module. + Defaults to None. + norm_cfg (dict): Configuration to construct and config norm layer. + Defaults to ``dict(type='BN')``. + act_cfg (dict): Config dict for activation layer. + Defaults to ``dict(type='ReLU')``. + deploy (bool): Whether the model structure is in the deployment mode. + Defaults to False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + in_channels: int, + out_channels: int, + kernel_size: int, + num_convs: int, + stride: int = 1, + padding: int = 1, + dilation: int = 1, + groups: int = 1, + se_cfg: Optional[dict] = None, + conv_cfg: Optional[dict] = None, + norm_cfg: Optional[dict] = dict(type='BN'), + act_cfg: Optional[dict] = dict(type='ReLU'), + deploy: bool = False, + init_cfg: Optional[dict] = None): + super(MobileOneBlock, self).__init__(init_cfg) + + assert se_cfg is None or isinstance(se_cfg, dict) + if se_cfg is not None: + self.se = SELayer(channels=out_channels, **se_cfg) + else: + self.se = nn.Identity() + + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = kernel_size + self.num_conv_branches = num_convs + self.stride = stride + self.padding = padding + self.se_cfg = se_cfg + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.deploy = deploy + self.groups = groups + self.dilation = dilation + + if deploy: + self.branch_reparam = build_conv_layer( + conv_cfg, + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + groups=self.groups, + stride=stride, + padding=padding, + dilation=dilation, + bias=True) + else: + # judge if input shape and output shape are the same. + # If true, add a normalized identity shortcut. + if out_channels == in_channels and stride == 1: + self.branch_norm = build_norm_layer(norm_cfg, in_channels)[1] + else: + self.branch_norm = None + + self.branch_scale = None + if kernel_size > 1: + self.branch_scale = self.create_conv_bn(kernel_size=1) + + self.branch_conv_list = ModuleList() + for _ in range(num_convs): + self.branch_conv_list.append( + self.create_conv_bn( + kernel_size=kernel_size, + padding=padding, + dilation=dilation)) + + self.act = build_activation_layer(act_cfg) + + def create_conv_bn(self, kernel_size, dilation=1, padding=0): + """cearte a (conv + bn) Sequential layer.""" + conv_bn = Sequential() + conv_bn.add_module( + 'conv', + build_conv_layer( + self.conv_cfg, + in_channels=self.in_channels, + out_channels=self.out_channels, + kernel_size=kernel_size, + groups=self.groups, + stride=self.stride, + dilation=dilation, + padding=padding, + bias=False)) + conv_bn.add_module( + 'norm', + build_norm_layer(self.norm_cfg, num_features=self.out_channels)[1]) + + return conv_bn + + def forward(self, x): + + def _inner_forward(inputs): + if self.deploy: + return self.branch_reparam(inputs) + + inner_out = 0 + if self.branch_norm is not None: + inner_out = self.branch_norm(inputs) + + if self.branch_scale is not None: + inner_out += self.branch_scale(inputs) + + for branch_conv in self.branch_conv_list: + inner_out += branch_conv(inputs) + + return inner_out + + return self.act(self.se(_inner_forward(x))) + + def switch_to_deploy(self): + """Switch the model structure from training mode to deployment mode.""" + if self.deploy: + return + assert self.norm_cfg['type'] == 'BN', \ + "Switch is not allowed when norm_cfg['type'] != 'BN'." + + reparam_weight, reparam_bias = self.reparameterize() + self.branch_reparam = build_conv_layer( + self.conv_cfg, + self.in_channels, + self.out_channels, + kernel_size=self.kernel_size, + stride=self.stride, + padding=self.padding, + dilation=self.dilation, + groups=self.groups, + bias=True) + self.branch_reparam.weight.data = reparam_weight + self.branch_reparam.bias.data = reparam_bias + + for param in self.parameters(): + param.detach_() + delattr(self, 'branch_conv_list') + if hasattr(self, 'branch_scale'): + delattr(self, 'branch_scale') + delattr(self, 'branch_norm') + + self.deploy = True + + def reparameterize(self): + """Fuse all the parameters of all branches. + + Returns: + tuple[torch.Tensor, torch.Tensor]: Parameters after fusion of all + branches. the first element is the weights and the second is + the bias. + """ + weight_conv, bias_conv = 0, 0 + for branch_conv in self.branch_conv_list: + weight, bias = self._fuse_conv_bn(branch_conv) + weight_conv += weight + bias_conv += bias + + weight_scale, bias_scale = 0, 0 + if self.branch_scale is not None: + weight_scale, bias_scale = self._fuse_conv_bn(self.branch_scale) + # Pad scale branch kernel to match conv branch kernel size. + pad = self.kernel_size // 2 + weight_scale = F.pad(weight_scale, [pad, pad, pad, pad]) + + weight_norm, bias_norm = 0, 0 + if self.branch_norm: + tmp_conv_bn = self._norm_to_conv(self.branch_norm) + weight_norm, bias_norm = self._fuse_conv_bn(tmp_conv_bn) + + return (weight_conv + weight_scale + weight_norm, + bias_conv + bias_scale + bias_norm) + + def _fuse_conv_bn(self, branch): + """Fuse the parameters in a branch with a conv and bn. + + Args: + branch (mmcv.runner.Sequential): A branch with conv and bn. + + Returns: + tuple[torch.Tensor, torch.Tensor]: The parameters obtained after + fusing the parameters of conv and bn in one branch. + The first element is the weight and the second is the bias. + """ + if branch is None: + return 0, 0 + kernel = branch.conv.weight + running_mean = branch.norm.running_mean + running_var = branch.norm.running_var + gamma = branch.norm.weight + beta = branch.norm.bias + eps = branch.norm.eps + + std = (running_var + eps).sqrt() + fused_weight = (gamma / std).reshape(-1, 1, 1, 1) * kernel + fused_bias = beta - running_mean * gamma / std + + return fused_weight, fused_bias + + def _norm_to_conv(self, branch_nrom): + """Convert a norm layer to a conv-bn sequence towards + ``self.kernel_size``. + + Args: + branch (nn.BatchNorm2d): A branch only with bn in the block. + + Returns: + (mmcv.runner.Sequential): a sequential with conv and bn. + """ + input_dim = self.in_channels // self.groups + conv_weight = torch.zeros( + (self.in_channels, input_dim, self.kernel_size, self.kernel_size), + dtype=branch_nrom.weight.dtype) + + for i in range(self.in_channels): + conv_weight[i, i % input_dim, self.kernel_size // 2, + self.kernel_size // 2] = 1 + conv_weight = conv_weight.to(branch_nrom.weight.device) + + tmp_conv = self.create_conv_bn(kernel_size=self.kernel_size) + tmp_conv.conv.weight.data = conv_weight + tmp_conv.norm = branch_nrom + return tmp_conv + + +@MODELS.register_module() +class MobileOne(BaseBackbone): + """MobileOne backbone. + + A PyTorch impl of : `An Improved One millisecond Mobile Backbone + `_ + + Args: + arch (str | dict): MobileOne architecture. If use string, choose + from 's0', 's1', 's2', 's3' and 's4'. If use dict, it should + have below keys: + + - num_blocks (Sequence[int]): Number of blocks in each stage. + - width_factor (Sequence[float]): Width factor in each stage. + - num_conv_branches (Sequence[int]): Number of conv branches + in each stage. + - num_se_blocks (Sequence[int]): Number of SE layers in each + stage, all the SE layers are placed in the subsequent order + in each stage. + + Defaults to 's0'. + in_channels (int): Number of input image channels. Default: 3. + out_indices (Sequence[int] | int): Output from which stages. + Defaults to ``(3, )``. + frozen_stages (int): Stages to be frozen (all param fixed). -1 means + not freezing any parameters. Defaults to -1. + conv_cfg (dict | None): The config dict for conv layers. + Defaults to None. + norm_cfg (dict): The config dict for norm layers. + Defaults to ``dict(type='BN')``. + act_cfg (dict): Config dict for activation layer. + Defaults to ``dict(type='ReLU')``. + deploy (bool): Whether to switch the model structure to deployment + mode. Defaults to False. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Defaults to False. + init_cfg (dict or list[dict], optional): Initialization config dict. + + Example: + >>> from mmpretrain.models import MobileOne + >>> import torch + >>> x = torch.rand(1, 3, 224, 224) + >>> model = MobileOne("s0", out_indices=(0, 1, 2, 3)) + >>> model.eval() + >>> outputs = model(x) + >>> for out in outputs: + ... print(tuple(out.shape)) + (1, 48, 56, 56) + (1, 128, 28, 28) + (1, 256, 14, 14) + (1, 1024, 7, 7) + """ + + arch_zoo = { + 's0': + dict( + num_blocks=[2, 8, 10, 1], + width_factor=[0.75, 1.0, 1.0, 2.0], + num_conv_branches=[4, 4, 4, 4], + num_se_blocks=[0, 0, 0, 0]), + 's1': + dict( + num_blocks=[2, 8, 10, 1], + width_factor=[1.5, 1.5, 2.0, 2.5], + num_conv_branches=[1, 1, 1, 1], + num_se_blocks=[0, 0, 0, 0]), + 's2': + dict( + num_blocks=[2, 8, 10, 1], + width_factor=[1.5, 2.0, 2.5, 4.0], + num_conv_branches=[1, 1, 1, 1], + num_se_blocks=[0, 0, 0, 0]), + 's3': + dict( + num_blocks=[2, 8, 10, 1], + width_factor=[2.0, 2.5, 3.0, 4.0], + num_conv_branches=[1, 1, 1, 1], + num_se_blocks=[0, 0, 0, 0]), + 's4': + dict( + num_blocks=[2, 8, 10, 1], + width_factor=[3.0, 3.5, 3.5, 4.0], + num_conv_branches=[1, 1, 1, 1], + num_se_blocks=[0, 0, 5, 1]) + } + + def __init__(self, + arch, + in_channels=3, + out_indices=(3, ), + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + se_cfg=dict(ratio=16), + deploy=False, + norm_eval=False, + init_cfg=[ + dict(type='Kaiming', layer=['Conv2d']), + dict(type='Constant', val=1, layer=['_BatchNorm']) + ]): + super(MobileOne, self).__init__(init_cfg) + + if isinstance(arch, str): + assert arch in self.arch_zoo, f'"arch": "{arch}"' \ + f' is not one of the {list(self.arch_zoo.keys())}' + arch = self.arch_zoo[arch] + elif not isinstance(arch, dict): + raise TypeError('Expect "arch" to be either a string ' + f'or a dict, got {type(arch)}') + + self.arch = arch + for k, value in self.arch.items(): + assert isinstance(value, list) and len(value) == 4, \ + f'the value of {k} in arch must be list with 4 items.' + + self.in_channels = in_channels + self.deploy = deploy + self.frozen_stages = frozen_stages + self.norm_eval = norm_eval + + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.se_cfg = se_cfg + self.act_cfg = act_cfg + + base_channels = [64, 128, 256, 512] + channels = min(64, + int(base_channels[0] * self.arch['width_factor'][0])) + self.stage0 = MobileOneBlock( + self.in_channels, + channels, + stride=2, + kernel_size=3, + num_convs=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + deploy=deploy) + + self.in_planes = channels + self.stages = [] + for i, num_blocks in enumerate(self.arch['num_blocks']): + planes = int(base_channels[i] * self.arch['width_factor'][i]) + + stage = self._make_stage(planes, num_blocks, + arch['num_se_blocks'][i], + arch['num_conv_branches'][i]) + + stage_name = f'stage{i + 1}' + self.add_module(stage_name, stage) + self.stages.append(stage_name) + + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + out_indices = list(out_indices) + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = len(self.stages) + index + assert 0 <= out_indices[i] <= len(self.stages), \ + f'Invalid out_indices {index}.' + self.out_indices = out_indices + + def _make_stage(self, planes, num_blocks, num_se, num_conv_branches): + strides = [2] + [1] * (num_blocks - 1) + if num_se > num_blocks: + raise ValueError('Number of SE blocks cannot ' + 'exceed number of layers.') + blocks = [] + for i in range(num_blocks): + use_se = False + if i >= (num_blocks - num_se): + use_se = True + + blocks.append( + # Depthwise conv + MobileOneBlock( + in_channels=self.in_planes, + out_channels=self.in_planes, + kernel_size=3, + num_convs=num_conv_branches, + stride=strides[i], + padding=1, + groups=self.in_planes, + se_cfg=self.se_cfg if use_se else None, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + deploy=self.deploy)) + + blocks.append( + # Pointwise conv + MobileOneBlock( + in_channels=self.in_planes, + out_channels=planes, + kernel_size=1, + num_convs=num_conv_branches, + stride=1, + padding=0, + se_cfg=self.se_cfg if use_se else None, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + deploy=self.deploy)) + + self.in_planes = planes + + return Sequential(*blocks) + + def forward(self, x): + x = self.stage0(x) + outs = [] + for i, stage_name in enumerate(self.stages): + stage = getattr(self, stage_name) + x = stage(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.stage0.eval() + for param in self.stage0.parameters(): + param.requires_grad = False + for i in range(self.frozen_stages): + stage = getattr(self, f'stage{i+1}') + stage.eval() + for param in stage.parameters(): + param.requires_grad = False + + def train(self, mode=True): + """switch the mobile to train mode or not.""" + super(MobileOne, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() + + def switch_to_deploy(self): + """switch the model to deploy mode, which has smaller amount of + parameters and calculations.""" + for m in self.modules(): + if isinstance(m, MobileOneBlock): + m.switch_to_deploy() + self.deploy = True diff --git a/mmpretrain/models/backbones/mobilevit.py b/mmpretrain/models/backbones/mobilevit.py new file mode 100644 index 0000000..9e4043f --- /dev/null +++ b/mmpretrain/models/backbones/mobilevit.py @@ -0,0 +1,431 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import Callable, Optional, Sequence + +import torch +import torch.nn.functional as F +from mmcv.cnn import ConvModule, build_norm_layer +from torch import nn + +from mmpretrain.registry import MODELS +from .base_backbone import BaseBackbone +from .mobilenet_v2 import InvertedResidual +from .vision_transformer import TransformerEncoderLayer + + +class MobileVitBlock(nn.Module): + """MobileViT block. + + According to the paper, the MobileViT block has a local representation. + a transformer-as-convolution layer which consists of a global + representation with unfolding and folding, and a final fusion layer. + + Args: + in_channels (int): Number of input image channels. + transformer_dim (int): Number of transformer channels. + ffn_dim (int): Number of ffn channels in transformer block. + out_channels (int): Number of channels in output. + conv_ksize (int): Conv kernel size in local representation + and fusion. Defaults to 3. + conv_cfg (dict, optional): Config dict for convolution layer. + Defaults to None, which means using conv2d. + norm_cfg (dict, optional): Config dict for normalization layer. + Defaults to dict(type='BN'). + act_cfg (dict, optional): Config dict for activation layer. + Defaults to dict(type='Swish'). + num_transformer_blocks (int): Number of transformer blocks in + a MobileViT block. Defaults to 2. + patch_size (int): Patch size for unfolding and folding. + Defaults to 2. + num_heads (int): Number of heads in global representation. + Defaults to 4. + drop_rate (float): Probability of an element to be zeroed + after the feed forward layer. Defaults to 0. + attn_drop_rate (float): The drop out rate for attention output weights. + Defaults to 0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0. + no_fusion (bool): Whether to remove the fusion layer. + Defaults to False. + transformer_norm_cfg (dict, optional): Config dict for normalization + layer in transformer. Defaults to dict(type='LN'). + """ + + def __init__( + self, + in_channels: int, + transformer_dim: int, + ffn_dim: int, + out_channels: int, + conv_ksize: int = 3, + conv_cfg: Optional[dict] = None, + norm_cfg: Optional[dict] = dict(type='BN'), + act_cfg: Optional[dict] = dict(type='Swish'), + num_transformer_blocks: int = 2, + patch_size: int = 2, + num_heads: int = 4, + drop_rate: float = 0., + attn_drop_rate: float = 0., + drop_path_rate: float = 0., + no_fusion: bool = False, + transformer_norm_cfg: Callable = dict(type='LN'), + ): + super(MobileVitBlock, self).__init__() + + self.local_rep = nn.Sequential( + ConvModule( + in_channels=in_channels, + out_channels=in_channels, + kernel_size=conv_ksize, + padding=int((conv_ksize - 1) / 2), + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + in_channels=in_channels, + out_channels=transformer_dim, + kernel_size=1, + bias=False, + conv_cfg=conv_cfg, + norm_cfg=None, + act_cfg=None), + ) + + global_rep = [ + TransformerEncoderLayer( + embed_dims=transformer_dim, + num_heads=num_heads, + feedforward_channels=ffn_dim, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=drop_path_rate, + qkv_bias=True, + act_cfg=dict(type='Swish'), + norm_cfg=transformer_norm_cfg) + for _ in range(num_transformer_blocks) + ] + global_rep.append( + build_norm_layer(transformer_norm_cfg, transformer_dim)[1]) + self.global_rep = nn.Sequential(*global_rep) + + self.conv_proj = ConvModule( + in_channels=transformer_dim, + out_channels=out_channels, + kernel_size=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + if no_fusion: + self.conv_fusion = None + else: + self.conv_fusion = ConvModule( + in_channels=in_channels + out_channels, + out_channels=out_channels, + kernel_size=conv_ksize, + padding=int((conv_ksize - 1) / 2), + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.patch_size = (patch_size, patch_size) + self.patch_area = self.patch_size[0] * self.patch_size[1] + + def forward(self, x: torch.Tensor) -> torch.Tensor: + shortcut = x + + # Local representation + x = self.local_rep(x) + + # Unfold (feature map -> patches) + patch_h, patch_w = self.patch_size + B, C, H, W = x.shape + new_h, new_w = math.ceil(H / patch_h) * patch_h, math.ceil( + W / patch_w) * patch_w + num_patch_h, num_patch_w = new_h // patch_h, new_w // patch_w # n_h, n_w # noqa + num_patches = num_patch_h * num_patch_w # N + interpolate = False + if new_h != H or new_w != W: + # Note: Padding can be done, but then it needs to be handled in attention function. # noqa + x = F.interpolate( + x, size=(new_h, new_w), mode='bilinear', align_corners=False) + interpolate = True + + # [B, C, H, W] --> [B * C * n_h, n_w, p_h, p_w] + x = x.reshape(B * C * num_patch_h, patch_h, num_patch_w, + patch_w).transpose(1, 2) + # [B * C * n_h, n_w, p_h, p_w] --> [BP, N, C] where P = p_h * p_w and N = n_h * n_w # noqa + x = x.reshape(B, C, num_patches, + self.patch_area).transpose(1, 3).reshape( + B * self.patch_area, num_patches, -1) + + # Global representations + x = self.global_rep(x) + + # Fold (patch -> feature map) + # [B, P, N, C] --> [B*C*n_h, n_w, p_h, p_w] + x = x.contiguous().view(B, self.patch_area, num_patches, -1) + x = x.transpose(1, 3).reshape(B * C * num_patch_h, num_patch_w, + patch_h, patch_w) + # [B*C*n_h, n_w, p_h, p_w] --> [B*C*n_h, p_h, n_w, p_w] --> [B, C, H, W] # noqa + x = x.transpose(1, 2).reshape(B, C, num_patch_h * patch_h, + num_patch_w * patch_w) + if interpolate: + x = F.interpolate( + x, size=(H, W), mode='bilinear', align_corners=False) + + x = self.conv_proj(x) + if self.conv_fusion is not None: + x = self.conv_fusion(torch.cat((shortcut, x), dim=1)) + return x + + +@MODELS.register_module() +class MobileViT(BaseBackbone): + """MobileViT backbone. + + A PyTorch implementation of : `MobileViT: Light-weight, General-purpose, + and Mobile-friendly Vision Transformer `_ + + Modified from the `official repo + `_ + and `timm + `_. + + Args: + arch (str | List[list]): Architecture of MobileViT. + + - If a string, choose from "small", "x_small" and "xx_small". + + - If a list, every item should be also a list, and the first item + of the sub-list can be chosen from "moblienetv2" and "mobilevit", + which indicates the type of this layer sequence. If "mobilenetv2", + the other items are the arguments of :attr:`~MobileViT.make_mobilenetv2_layer` + (except ``in_channels``) and if "mobilevit", the other items are + the arguments of :attr:`~MobileViT.make_mobilevit_layer` + (except ``in_channels``). + + Defaults to "small". + in_channels (int): Number of input image channels. Defaults to 3. + stem_channels (int): Channels of stem layer. Defaults to 16. + last_exp_factor (int): Channels expand factor of last layer. + Defaults to 4. + out_indices (Sequence[int]): Output from which stages. + Defaults to (4, ). + frozen_stages (int): Stages to be frozen (all param fixed). + Defaults to -1, which means not freezing any parameters. + conv_cfg (dict, optional): Config dict for convolution layer. + Defaults to None, which means using conv2d. + norm_cfg (dict, optional): Config dict for normalization layer. + Defaults to dict(type='BN'). + act_cfg (dict, optional): Config dict for activation layer. + Defaults to dict(type='Swish'). + init_cfg (dict, optional): Initialization config dict. + """ # noqa + + # Parameters to build layers. The first param is the type of layer. + # For `mobilenetv2` layer, the rest params from left to right are: + # out channels, stride, num of blocks, expand_ratio. + # For `mobilevit` layer, the rest params from left to right are: + # out channels, stride, transformer_channels, ffn channels, + # num of transformer blocks, expand_ratio. + arch_settings = { + 'small': [ + ['mobilenetv2', 32, 1, 1, 4], + ['mobilenetv2', 64, 2, 3, 4], + ['mobilevit', 96, 2, 144, 288, 2, 4], + ['mobilevit', 128, 2, 192, 384, 4, 4], + ['mobilevit', 160, 2, 240, 480, 3, 4], + ], + 'x_small': [ + ['mobilenetv2', 32, 1, 1, 4], + ['mobilenetv2', 48, 2, 3, 4], + ['mobilevit', 64, 2, 96, 192, 2, 4], + ['mobilevit', 80, 2, 120, 240, 4, 4], + ['mobilevit', 96, 2, 144, 288, 3, 4], + ], + 'xx_small': [ + ['mobilenetv2', 16, 1, 1, 2], + ['mobilenetv2', 24, 2, 3, 2], + ['mobilevit', 48, 2, 64, 128, 2, 2], + ['mobilevit', 64, 2, 80, 160, 4, 2], + ['mobilevit', 80, 2, 96, 192, 3, 2], + ] + } + + def __init__(self, + arch='small', + in_channels=3, + stem_channels=16, + last_exp_factor=4, + out_indices=(4, ), + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='Swish'), + init_cfg=[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]): + super(MobileViT, self).__init__(init_cfg) + if isinstance(arch, str): + arch = arch.lower() + assert arch in self.arch_settings, \ + f'Unavailable arch, please choose from ' \ + f'({set(self.arch_settings)}) or pass a list.' + arch = self.arch_settings[arch] + + self.arch = arch + self.num_stages = len(arch) + + # check out indices and frozen stages + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = self.num_stages + index + assert out_indices[i] >= 0, f'Invalid out_indices {index}' + self.out_indices = out_indices + + if frozen_stages not in range(-1, self.num_stages): + raise ValueError('frozen_stages must be in range(-1, ' + f'{self.num_stages}). ' + f'But received {frozen_stages}') + self.frozen_stages = frozen_stages + + _make_layer_func = { + 'mobilenetv2': self.make_mobilenetv2_layer, + 'mobilevit': self.make_mobilevit_layer, + } + + self.stem = ConvModule( + in_channels=in_channels, + out_channels=stem_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + in_channels = stem_channels + layers = [] + for i, layer_settings in enumerate(arch): + layer_type, settings = layer_settings[0], layer_settings[1:] + layer, out_channels = _make_layer_func[layer_type](in_channels, + *settings) + layers.append(layer) + in_channels = out_channels + self.layers = nn.Sequential(*layers) + + self.conv_1x1_exp = ConvModule( + in_channels=in_channels, + out_channels=last_exp_factor * in_channels, + kernel_size=1, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + @staticmethod + def make_mobilevit_layer(in_channels, + out_channels, + stride, + transformer_dim, + ffn_dim, + num_transformer_blocks, + expand_ratio=4): + """Build mobilevit layer, which consists of one InvertedResidual and + one MobileVitBlock. + + Args: + in_channels (int): The input channels. + out_channels (int): The output channels. + stride (int): The stride of the first 3x3 convolution in the + ``InvertedResidual`` layers. + transformer_dim (int): The channels of the transformer layers. + ffn_dim (int): The mid-channels of the feedforward network in + transformer layers. + num_transformer_blocks (int): The number of transformer blocks. + expand_ratio (int): adjusts number of channels of the hidden layer + in ``InvertedResidual`` by this amount. Defaults to 4. + """ + layer = [] + layer.append( + InvertedResidual( + in_channels=in_channels, + out_channels=out_channels, + stride=stride, + expand_ratio=expand_ratio, + act_cfg=dict(type='Swish'), + )) + layer.append( + MobileVitBlock( + in_channels=out_channels, + transformer_dim=transformer_dim, + ffn_dim=ffn_dim, + out_channels=out_channels, + num_transformer_blocks=num_transformer_blocks, + )) + return nn.Sequential(*layer), out_channels + + @staticmethod + def make_mobilenetv2_layer(in_channels, + out_channels, + stride, + num_blocks, + expand_ratio=4): + """Build mobilenetv2 layer, which consists of several InvertedResidual + layers. + + Args: + in_channels (int): The input channels. + out_channels (int): The output channels. + stride (int): The stride of the first 3x3 convolution in the + ``InvertedResidual`` layers. + num_blocks (int): The number of ``InvertedResidual`` blocks. + expand_ratio (int): adjusts number of channels of the hidden layer + in ``InvertedResidual`` by this amount. Defaults to 4. + """ + layer = [] + for i in range(num_blocks): + stride = stride if i == 0 else 1 + + layer.append( + InvertedResidual( + in_channels=in_channels, + out_channels=out_channels, + stride=stride, + expand_ratio=expand_ratio, + act_cfg=dict(type='Swish'), + )) + in_channels = out_channels + return nn.Sequential(*layer), out_channels + + def _freeze_stages(self): + for i in range(0, self.frozen_stages): + layer = self.layers[i] + layer.eval() + for param in layer.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(MobileViT, self).train(mode) + self._freeze_stages() + + def forward(self, x): + x = self.stem(x) + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x) + if i == len(self.layers) - 1: + x = self.conv_1x1_exp(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) diff --git a/mmpretrain/models/backbones/mvit.py b/mmpretrain/models/backbones/mvit.py new file mode 100644 index 0000000..68aee97 --- /dev/null +++ b/mmpretrain/models/backbones/mvit.py @@ -0,0 +1,700 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Sequence + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import build_activation_layer, build_norm_layer +from mmcv.cnn.bricks import DropPath +from mmcv.cnn.bricks.transformer import PatchEmbed +from mmengine.model import BaseModule, ModuleList +from mmengine.model.weight_init import trunc_normal_ +from mmengine.utils import to_2tuple + +from ..builder import BACKBONES +from ..utils import resize_pos_embed +from .base_backbone import BaseBackbone + + +def resize_decomposed_rel_pos(rel_pos, q_size, k_size): + """Get relative positional embeddings according to the relative positions + of query and key sizes. + + Args: + q_size (int): size of query q. + k_size (int): size of key k. + rel_pos (Tensor): relative position embeddings (L, C). + + Returns: + Extracted positional embeddings according to relative positions. + """ + max_rel_dist = int(2 * max(q_size, k_size) - 1) + # Interpolate rel pos if needed. + if rel_pos.shape[0] != max_rel_dist: + # Interpolate rel pos. + resized = F.interpolate( + # (L, C) -> (1, C, L) + rel_pos.transpose(0, 1).unsqueeze(0), + size=max_rel_dist, + mode='linear', + ) + # (1, C, L) -> (L, C) + resized = resized.squeeze(0).transpose(0, 1) + else: + resized = rel_pos + + # Scale the coords with short length if shapes for q and k are different. + q_h_ratio = max(k_size / q_size, 1.0) + k_h_ratio = max(q_size / k_size, 1.0) + q_coords = torch.arange(q_size)[:, None] * q_h_ratio + k_coords = torch.arange(k_size)[None, :] * k_h_ratio + relative_coords = (q_coords - k_coords) + (k_size - 1) * k_h_ratio + + return resized[relative_coords.long()] + + +def add_decomposed_rel_pos(attn, + q, + q_shape, + k_shape, + rel_pos_h, + rel_pos_w, + has_cls_token=False): + """Spatial Relative Positional Embeddings.""" + sp_idx = 1 if has_cls_token else 0 + B, num_heads, _, C = q.shape + q_h, q_w = q_shape + k_h, k_w = k_shape + + Rh = resize_decomposed_rel_pos(rel_pos_h, q_h, k_h) + Rw = resize_decomposed_rel_pos(rel_pos_w, q_w, k_w) + + r_q = q[:, :, sp_idx:].reshape(B, num_heads, q_h, q_w, C) + rel_h = torch.einsum('byhwc,hkc->byhwk', r_q, Rh) + rel_w = torch.einsum('byhwc,wkc->byhwk', r_q, Rw) + rel_pos_embed = rel_h[:, :, :, :, :, None] + rel_w[:, :, :, :, None, :] + + attn_map = attn[:, :, sp_idx:, sp_idx:].view(B, -1, q_h, q_w, k_h, k_w) + attn_map += rel_pos_embed + attn[:, :, sp_idx:, sp_idx:] = attn_map.view(B, -1, q_h * q_w, k_h * k_w) + + return attn + + +class MLP(BaseModule): + """Two-layer multilayer perceptron. + + Comparing with :class:`mmcv.cnn.bricks.transformer.FFN`, this class allows + different input and output channel numbers. + + Args: + in_channels (int): The number of input channels. + hidden_channels (int, optional): The number of hidden layer channels. + If None, same as the ``in_channels``. Defaults to None. + out_channels (int, optional): The number of output channels. If None, + same as the ``in_channels``. Defaults to None. + act_cfg (dict): The config of activation function. + Defaults to ``dict(type='GELU')``. + init_cfg (dict, optional): The config of weight initialization. + Defaults to None. + """ + + def __init__(self, + in_channels, + hidden_channels=None, + out_channels=None, + act_cfg=dict(type='GELU'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + out_channels = out_channels or in_channels + hidden_channels = hidden_channels or in_channels + self.fc1 = nn.Linear(in_channels, hidden_channels) + self.act = build_activation_layer(act_cfg) + self.fc2 = nn.Linear(hidden_channels, out_channels) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.fc2(x) + return x + + +def attention_pool(x: torch.Tensor, + pool: nn.Module, + in_size: tuple, + norm: Optional[nn.Module] = None): + """Pooling the feature tokens. + + Args: + x (torch.Tensor): The input tensor, should be with shape + ``(B, num_heads, L, C)`` or ``(B, L, C)``. + pool (nn.Module): The pooling module. + in_size (Tuple[int]): The shape of the input feature map. + norm (nn.Module, optional): The normalization module. + Defaults to None. + """ + ndim = x.ndim + if ndim == 4: + B, num_heads, L, C = x.shape + elif ndim == 3: + num_heads = 1 + B, L, C = x.shape + else: + raise RuntimeError(f'Unsupported input dimension {x.shape}') + + H, W = in_size + assert L == H * W + + # (B, num_heads, H*W, C) -> (B*num_heads, C, H, W) + x = x.reshape(B * num_heads, H, W, C).permute(0, 3, 1, 2).contiguous() + x = pool(x) + out_size = x.shape[-2:] + + # (B*num_heads, C, H', W') -> (B, num_heads, H'*W', C) + x = x.reshape(B, num_heads, C, -1).transpose(2, 3) + + if norm is not None: + x = norm(x) + + if ndim == 3: + x = x.squeeze(1) + + return x, out_size + + +class MultiScaleAttention(BaseModule): + """Multiscale Multi-head Attention block. + + Args: + in_dims (int): Number of input channels. + out_dims (int): Number of output channels. + num_heads (int): Number of attention heads. + qkv_bias (bool): If True, add a learnable bias to query, key and + value. Defaults to True. + norm_cfg (dict): The config of normalization layers. + Defaults to ``dict(type='LN')``. + pool_kernel (tuple): kernel size for qkv pooling layers. + Defaults to (3, 3). + stride_q (int): stride size for q pooling layer. Defaults to 1. + stride_kv (int): stride size for kv pooling layer. Defaults to 1. + rel_pos_spatial (bool): Whether to enable the spatial relative + position embedding. Defaults to True. + residual_pooling (bool): Whether to enable the residual connection + after attention pooling. Defaults to True. + input_size (Tuple[int], optional): The input resolution, necessary + if enable the ``rel_pos_spatial``. Defaults to None. + rel_pos_zero_init (bool): If True, zero initialize relative + positional parameters. Defaults to False. + init_cfg (dict, optional): The config of weight initialization. + Defaults to None. + """ + + def __init__(self, + in_dims, + out_dims, + num_heads, + qkv_bias=True, + norm_cfg=dict(type='LN'), + pool_kernel=(3, 3), + stride_q=1, + stride_kv=1, + rel_pos_spatial=False, + residual_pooling=True, + input_size=None, + rel_pos_zero_init=False, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.num_heads = num_heads + self.in_dims = in_dims + self.out_dims = out_dims + + head_dim = out_dims // num_heads + self.scale = head_dim**-0.5 + + self.qkv = nn.Linear(in_dims, out_dims * 3, bias=qkv_bias) + self.proj = nn.Linear(out_dims, out_dims) + + # qkv pooling + pool_padding = [k // 2 for k in pool_kernel] + pool_dims = out_dims // num_heads + + def build_pooling(stride): + pool = nn.Conv2d( + pool_dims, + pool_dims, + pool_kernel, + stride=stride, + padding=pool_padding, + groups=pool_dims, + bias=False, + ) + norm = build_norm_layer(norm_cfg, pool_dims)[1] + return pool, norm + + self.pool_q, self.norm_q = build_pooling(stride_q) + self.pool_k, self.norm_k = build_pooling(stride_kv) + self.pool_v, self.norm_v = build_pooling(stride_kv) + + self.residual_pooling = residual_pooling + + self.rel_pos_spatial = rel_pos_spatial + self.rel_pos_zero_init = rel_pos_zero_init + if self.rel_pos_spatial: + # initialize relative positional embeddings + assert input_size[0] == input_size[1] + + size = input_size[0] + rel_dim = 2 * max(size // stride_q, size // stride_kv) - 1 + self.rel_pos_h = nn.Parameter(torch.zeros(rel_dim, head_dim)) + self.rel_pos_w = nn.Parameter(torch.zeros(rel_dim, head_dim)) + + def init_weights(self): + """Weight initialization.""" + super().init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress rel_pos_zero_init if use pretrained model. + return + + if not self.rel_pos_zero_init: + trunc_normal_(self.rel_pos_h, std=0.02) + trunc_normal_(self.rel_pos_w, std=0.02) + + def forward(self, x, in_size): + """Forward the MultiScaleAttention.""" + B, N, _ = x.shape # (B, H*W, C) + + # qkv: (B, H*W, 3, num_heads, C) + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, -1) + # q, k, v: (B, num_heads, H*W, C) + q, k, v = qkv.permute(2, 0, 3, 1, 4).unbind(0) + + q, q_shape = attention_pool(q, self.pool_q, in_size, norm=self.norm_q) + k, k_shape = attention_pool(k, self.pool_k, in_size, norm=self.norm_k) + v, v_shape = attention_pool(v, self.pool_v, in_size, norm=self.norm_v) + + attn = (q * self.scale) @ k.transpose(-2, -1) + if self.rel_pos_spatial: + attn = add_decomposed_rel_pos(attn, q, q_shape, k_shape, + self.rel_pos_h, self.rel_pos_w) + + attn = attn.softmax(dim=-1) + x = attn @ v + + if self.residual_pooling: + x = x + q + + # (B, num_heads, H'*W', C'//num_heads) -> (B, H'*W', C') + x = x.transpose(1, 2).reshape(B, -1, self.out_dims) + x = self.proj(x) + + return x, q_shape + + +class MultiScaleBlock(BaseModule): + """Multiscale Transformer blocks. + + Args: + in_dims (int): Number of input channels. + out_dims (int): Number of output channels. + num_heads (int): Number of attention heads. + mlp_ratio (float): Ratio of hidden dimensions in MLP layers. + Defaults to 4.0. + qkv_bias (bool): If True, add a learnable bias to query, key and + value. Defaults to True. + drop_path (float): Stochastic depth rate. Defaults to 0. + norm_cfg (dict): The config of normalization layers. + Defaults to ``dict(type='LN')``. + act_cfg (dict): The config of activation function. + Defaults to ``dict(type='GELU')``. + qkv_pool_kernel (tuple): kernel size for qkv pooling layers. + Defaults to (3, 3). + stride_q (int): stride size for q pooling layer. Defaults to 1. + stride_kv (int): stride size for kv pooling layer. Defaults to 1. + rel_pos_spatial (bool): Whether to enable the spatial relative + position embedding. Defaults to True. + residual_pooling (bool): Whether to enable the residual connection + after attention pooling. Defaults to True. + dim_mul_in_attention (bool): Whether to multiply the ``embed_dims`` in + attention layers. If False, multiply it in MLP layers. + Defaults to True. + input_size (Tuple[int], optional): The input resolution, necessary + if enable the ``rel_pos_spatial``. Defaults to None. + rel_pos_zero_init (bool): If True, zero initialize relative + positional parameters. Defaults to False. + init_cfg (dict, optional): The config of weight initialization. + Defaults to None. + """ + + def __init__( + self, + in_dims, + out_dims, + num_heads, + mlp_ratio=4.0, + qkv_bias=True, + drop_path=0.0, + norm_cfg=dict(type='LN'), + act_cfg=dict(type='GELU'), + qkv_pool_kernel=(3, 3), + stride_q=1, + stride_kv=1, + rel_pos_spatial=True, + residual_pooling=True, + dim_mul_in_attention=True, + input_size=None, + rel_pos_zero_init=False, + init_cfg=None, + ): + super().__init__(init_cfg=init_cfg) + self.in_dims = in_dims + self.out_dims = out_dims + self.norm1 = build_norm_layer(norm_cfg, in_dims)[1] + self.dim_mul_in_attention = dim_mul_in_attention + + attn_dims = out_dims if dim_mul_in_attention else in_dims + self.attn = MultiScaleAttention( + in_dims, + attn_dims, + num_heads=num_heads, + qkv_bias=qkv_bias, + norm_cfg=norm_cfg, + pool_kernel=qkv_pool_kernel, + stride_q=stride_q, + stride_kv=stride_kv, + rel_pos_spatial=rel_pos_spatial, + residual_pooling=residual_pooling, + input_size=input_size, + rel_pos_zero_init=rel_pos_zero_init) + self.drop_path = DropPath( + drop_path) if drop_path > 0.0 else nn.Identity() + + self.norm2 = build_norm_layer(norm_cfg, attn_dims)[1] + + self.mlp = MLP( + in_channels=attn_dims, + hidden_channels=int(attn_dims * mlp_ratio), + out_channels=out_dims, + act_cfg=act_cfg) + + if in_dims != out_dims: + self.proj = nn.Linear(in_dims, out_dims) + else: + self.proj = None + + if stride_q > 1: + kernel_skip = stride_q + 1 + padding_skip = int(kernel_skip // 2) + self.pool_skip = nn.MaxPool2d( + kernel_skip, stride_q, padding_skip, ceil_mode=False) + + if input_size is not None: + input_size = to_2tuple(input_size) + out_size = [size // stride_q for size in input_size] + self.init_out_size = out_size + else: + self.init_out_size = None + else: + self.pool_skip = None + self.init_out_size = input_size + + def forward(self, x, in_size): + x_norm = self.norm1(x) + x_attn, out_size = self.attn(x_norm, in_size) + + if self.dim_mul_in_attention and self.proj is not None: + skip = self.proj(x_norm) + else: + skip = x + + if self.pool_skip is not None: + skip, _ = attention_pool(skip, self.pool_skip, in_size) + + x = skip + self.drop_path(x_attn) + x_norm = self.norm2(x) + x_mlp = self.mlp(x_norm) + + if not self.dim_mul_in_attention and self.proj is not None: + skip = self.proj(x_norm) + else: + skip = x + + x = skip + self.drop_path(x_mlp) + + return x, out_size + + +@BACKBONES.register_module() +class MViT(BaseBackbone): + """Multi-scale ViT v2. + + A PyTorch implement of : `MViTv2: Improved Multiscale Vision Transformers + for Classification and Detection `_ + + Inspiration from `the official implementation + `_ and `the detectron2 + implementation `_ + + Args: + arch (str | dict): MViT architecture. If use string, choose + from 'tiny', 'small', 'base' and 'large'. If use dict, it should + have below keys: + + - **embed_dims** (int): The dimensions of embedding. + - **num_layers** (int): The number of layers. + - **num_heads** (int): The number of heads in attention + modules of the initial layer. + - **downscale_indices** (List[int]): The layer indices to downscale + the feature map. + + Defaults to 'base'. + img_size (int): The expected input image shape. Defaults to 224. + in_channels (int): The num of input channels. Defaults to 3. + out_scales (int | Sequence[int]): The output scale indices. + They should not exceed the length of ``downscale_indices``. + Defaults to -1, which means the last scale. + drop_path_rate (float): Stochastic depth rate. Defaults to 0.1. + use_abs_pos_embed (bool): If True, add absolute position embedding to + the patch embedding. Defaults to False. + interpolate_mode (str): Select the interpolate mode for absolute + position embedding vector resize. Defaults to "bicubic". + pool_kernel (tuple): kernel size for qkv pooling layers. + Defaults to (3, 3). + dim_mul (int): The magnification for ``embed_dims`` in the downscale + layers. Defaults to 2. + head_mul (int): The magnification for ``num_heads`` in the downscale + layers. Defaults to 2. + adaptive_kv_stride (int): The stride size for kv pooling in the initial + layer. Defaults to 4. + rel_pos_spatial (bool): Whether to enable the spatial relative position + embedding. Defaults to True. + residual_pooling (bool): Whether to enable the residual connection + after attention pooling. Defaults to True. + dim_mul_in_attention (bool): Whether to multiply the ``embed_dims`` in + attention layers. If False, multiply it in MLP layers. + Defaults to True. + rel_pos_zero_init (bool): If True, zero initialize relative + positional parameters. Defaults to False. + mlp_ratio (float): Ratio of hidden dimensions in MLP layers. + Defaults to 4.0. + qkv_bias (bool): enable bias for qkv if True. Defaults to True. + norm_cfg (dict): Config dict for normalization layer for all output + features. Defaults to ``dict(type='LN', eps=1e-6)``. + patch_cfg (dict): Config dict for the patch embedding layer. + Defaults to ``dict(kernel_size=7, stride=4, padding=3)``. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + + Examples: + >>> import torch + >>> from mmpretrain.models import build_backbone + >>> + >>> cfg = dict(type='MViT', arch='tiny', out_scales=[0, 1, 2, 3]) + >>> model = build_backbone(cfg) + >>> inputs = torch.rand(1, 3, 224, 224) + >>> outputs = model(inputs) + >>> for i, output in enumerate(outputs): + >>> print(f'scale{i}: {output.shape}') + scale0: torch.Size([1, 96, 56, 56]) + scale1: torch.Size([1, 192, 28, 28]) + scale2: torch.Size([1, 384, 14, 14]) + scale3: torch.Size([1, 768, 7, 7]) + """ + arch_zoo = { + 'tiny': { + 'embed_dims': 96, + 'num_layers': 10, + 'num_heads': 1, + 'downscale_indices': [1, 3, 8] + }, + 'small': { + 'embed_dims': 96, + 'num_layers': 16, + 'num_heads': 1, + 'downscale_indices': [1, 3, 14] + }, + 'base': { + 'embed_dims': 96, + 'num_layers': 24, + 'num_heads': 1, + 'downscale_indices': [2, 5, 21] + }, + 'large': { + 'embed_dims': 144, + 'num_layers': 48, + 'num_heads': 2, + 'downscale_indices': [2, 8, 44] + }, + } + num_extra_tokens = 0 + + def __init__(self, + arch='base', + img_size=224, + in_channels=3, + out_scales=-1, + drop_path_rate=0., + use_abs_pos_embed=False, + interpolate_mode='bicubic', + pool_kernel=(3, 3), + dim_mul=2, + head_mul=2, + adaptive_kv_stride=4, + rel_pos_spatial=True, + residual_pooling=True, + dim_mul_in_attention=True, + rel_pos_zero_init=False, + mlp_ratio=4., + qkv_bias=True, + norm_cfg=dict(type='LN', eps=1e-6), + patch_cfg=dict(kernel_size=7, stride=4, padding=3), + init_cfg=None): + super().__init__(init_cfg) + + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = { + 'embed_dims', 'num_layers', 'num_heads', 'downscale_indices' + } + assert isinstance(arch, dict) and essential_keys <= set(arch), \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = arch + + self.embed_dims = self.arch_settings['embed_dims'] + self.num_layers = self.arch_settings['num_layers'] + self.num_heads = self.arch_settings['num_heads'] + self.downscale_indices = self.arch_settings['downscale_indices'] + self.num_scales = len(self.downscale_indices) + 1 + self.stage_indices = { + index - 1: i + for i, index in enumerate(self.downscale_indices) + } + self.stage_indices[self.num_layers - 1] = self.num_scales - 1 + self.use_abs_pos_embed = use_abs_pos_embed + self.interpolate_mode = interpolate_mode + + if isinstance(out_scales, int): + out_scales = [out_scales] + assert isinstance(out_scales, Sequence), \ + f'"out_scales" must by a sequence or int, ' \ + f'get {type(out_scales)} instead.' + for i, index in enumerate(out_scales): + if index < 0: + out_scales[i] = self.num_scales + index + assert 0 <= out_scales[i] <= self.num_scales, \ + f'Invalid out_scales {index}' + self.out_scales = sorted(list(out_scales)) + + # Set patch embedding + _patch_cfg = dict( + in_channels=in_channels, + input_size=img_size, + embed_dims=self.embed_dims, + conv_type='Conv2d', + ) + _patch_cfg.update(patch_cfg) + self.patch_embed = PatchEmbed(**_patch_cfg) + self.patch_resolution = self.patch_embed.init_out_size + + # Set absolute position embedding + if self.use_abs_pos_embed: + num_patches = self.patch_resolution[0] * self.patch_resolution[1] + self.pos_embed = nn.Parameter( + torch.zeros(1, num_patches, self.embed_dims)) + + # stochastic depth decay rule + dpr = np.linspace(0, drop_path_rate, self.num_layers) + + self.blocks = ModuleList() + out_dims_list = [self.embed_dims] + num_heads = self.num_heads + stride_kv = adaptive_kv_stride + input_size = self.patch_resolution + for i in range(self.num_layers): + if i in self.downscale_indices: + num_heads *= head_mul + stride_q = 2 + stride_kv = max(stride_kv // 2, 1) + else: + stride_q = 1 + + # Set output embed_dims + if dim_mul_in_attention and i in self.downscale_indices: + # multiply embed_dims in downscale layers. + out_dims = out_dims_list[-1] * dim_mul + elif not dim_mul_in_attention and i + 1 in self.downscale_indices: + # multiply embed_dims before downscale layers. + out_dims = out_dims_list[-1] * dim_mul + else: + out_dims = out_dims_list[-1] + + attention_block = MultiScaleBlock( + in_dims=out_dims_list[-1], + out_dims=out_dims, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + drop_path=dpr[i], + norm_cfg=norm_cfg, + qkv_pool_kernel=pool_kernel, + stride_q=stride_q, + stride_kv=stride_kv, + rel_pos_spatial=rel_pos_spatial, + residual_pooling=residual_pooling, + dim_mul_in_attention=dim_mul_in_attention, + input_size=input_size, + rel_pos_zero_init=rel_pos_zero_init) + self.blocks.append(attention_block) + + input_size = attention_block.init_out_size + out_dims_list.append(out_dims) + + if i in self.stage_indices: + stage_index = self.stage_indices[i] + if stage_index in self.out_scales: + norm_layer = build_norm_layer(norm_cfg, out_dims)[1] + self.add_module(f'norm{stage_index}', norm_layer) + + def init_weights(self): + super().init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress default init if use pretrained model. + return + + if self.use_abs_pos_embed: + trunc_normal_(self.pos_embed, std=0.02) + + def forward(self, x): + """Forward the MViT.""" + B = x.shape[0] + x, patch_resolution = self.patch_embed(x) + + if self.use_abs_pos_embed: + x = x + resize_pos_embed( + self.pos_embed, + self.patch_resolution, + patch_resolution, + mode=self.interpolate_mode, + num_extra_tokens=self.num_extra_tokens) + + outs = [] + for i, block in enumerate(self.blocks): + x, patch_resolution = block(x, patch_resolution) + + if i in self.stage_indices: + stage_index = self.stage_indices[i] + if stage_index in self.out_scales: + B, _, C = x.shape + x = getattr(self, f'norm{stage_index}')(x) + out = x.transpose(1, 2).reshape(B, C, *patch_resolution) + outs.append(out.contiguous()) + + return tuple(outs) diff --git a/mmpretrain/models/backbones/poolformer.py b/mmpretrain/models/backbones/poolformer.py new file mode 100644 index 0000000..e2ad670 --- /dev/null +++ b/mmpretrain/models/backbones/poolformer.py @@ -0,0 +1,416 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Sequence + +import torch +import torch.nn as nn +from mmcv.cnn.bricks import DropPath, build_activation_layer, build_norm_layer +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS +from .base_backbone import BaseBackbone + + +class PatchEmbed(nn.Module): + """Patch Embedding module implemented by a layer of convolution. + + Input: tensor in shape [B, C, H, W] + Output: tensor in shape [B, C, H/stride, W/stride] + Args: + patch_size (int): Patch size of the patch embedding. Defaults to 16. + stride (int): Stride of the patch embedding. Defaults to 16. + padding (int): Padding of the patch embedding. Defaults to 0. + in_chans (int): Input channels. Defaults to 3. + embed_dim (int): Output dimension of the patch embedding. + Defaults to 768. + norm_layer (module): Normalization module. Defaults to None (not use). + """ + + def __init__(self, + patch_size=16, + stride=16, + padding=0, + in_chans=3, + embed_dim=768, + norm_layer=None): + super().__init__() + self.proj = nn.Conv2d( + in_chans, + embed_dim, + kernel_size=patch_size, + stride=stride, + padding=padding) + self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() + + def forward(self, x): + x = self.proj(x) + x = self.norm(x) + return x + + +class Pooling(nn.Module): + """Pooling module. + + Args: + pool_size (int): Pooling size. Defaults to 3. + """ + + def __init__(self, pool_size=3): + super().__init__() + self.pool = nn.AvgPool2d( + pool_size, + stride=1, + padding=pool_size // 2, + count_include_pad=False) + + def forward(self, x): + return self.pool(x) - x + + +class Mlp(nn.Module): + """Mlp implemented by with 1*1 convolutions. + + Input: Tensor with shape [B, C, H, W]. + Output: Tensor with shape [B, C, H, W]. + Args: + in_features (int): Dimension of input features. + hidden_features (int): Dimension of hidden features. + out_features (int): Dimension of output features. + act_cfg (dict): The config dict for activation between pointwise + convolution. Defaults to ``dict(type='GELU')``. + drop (float): Dropout rate. Defaults to 0.0. + """ + + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_cfg=dict(type='GELU'), + drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Conv2d(in_features, hidden_features, 1) + self.act = build_activation_layer(act_cfg) + self.fc2 = nn.Conv2d(hidden_features, out_features, 1) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class PoolFormerBlock(BaseModule): + """PoolFormer Block. + + Args: + dim (int): Embedding dim. + pool_size (int): Pooling size. Defaults to 3. + mlp_ratio (float): Mlp expansion ratio. Defaults to 4. + norm_cfg (dict): The config dict for norm layers. + Defaults to ``dict(type='GN', num_groups=1)``. + act_cfg (dict): The config dict for activation between pointwise + convolution. Defaults to ``dict(type='GELU')``. + drop (float): Dropout rate. Defaults to 0. + drop_path (float): Stochastic depth rate. Defaults to 0. + layer_scale_init_value (float): Init value for Layer Scale. + Defaults to 1e-5. + """ + + def __init__(self, + dim, + pool_size=3, + mlp_ratio=4., + norm_cfg=dict(type='GN', num_groups=1), + act_cfg=dict(type='GELU'), + drop=0., + drop_path=0., + layer_scale_init_value=1e-5): + + super().__init__() + + self.norm1 = build_norm_layer(norm_cfg, dim)[1] + self.token_mixer = Pooling(pool_size=pool_size) + self.norm2 = build_norm_layer(norm_cfg, dim)[1] + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_cfg=act_cfg, + drop=drop) + + # The following two techniques are useful to train deep PoolFormers. + self.drop_path = DropPath(drop_path) if drop_path > 0. \ + else nn.Identity() + self.layer_scale_1 = nn.Parameter( + layer_scale_init_value * torch.ones((dim)), requires_grad=True) + self.layer_scale_2 = nn.Parameter( + layer_scale_init_value * torch.ones((dim)), requires_grad=True) + + def forward(self, x): + x = x + self.drop_path( + self.layer_scale_1.unsqueeze(-1).unsqueeze(-1) * + self.token_mixer(self.norm1(x))) + x = x + self.drop_path( + self.layer_scale_2.unsqueeze(-1).unsqueeze(-1) * + self.mlp(self.norm2(x))) + return x + + +def basic_blocks(dim, + index, + layers, + pool_size=3, + mlp_ratio=4., + norm_cfg=dict(type='GN', num_groups=1), + act_cfg=dict(type='GELU'), + drop_rate=.0, + drop_path_rate=0., + layer_scale_init_value=1e-5): + """ + generate PoolFormer blocks for a stage + return: PoolFormer blocks + """ + blocks = [] + for block_idx in range(layers[index]): + block_dpr = drop_path_rate * (block_idx + sum(layers[:index])) / ( + sum(layers) - 1) + blocks.append( + PoolFormerBlock( + dim, + pool_size=pool_size, + mlp_ratio=mlp_ratio, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + drop=drop_rate, + drop_path=block_dpr, + layer_scale_init_value=layer_scale_init_value, + )) + blocks = nn.Sequential(*blocks) + + return blocks + + +@MODELS.register_module() +class PoolFormer(BaseBackbone): + """PoolFormer. + + A PyTorch implementation of PoolFormer introduced by: + `MetaFormer is Actually What You Need for Vision `_ + + Modified from the `official repo + `. + + Args: + arch (str | dict): The model's architecture. If string, it should be + one of architecture in ``PoolFormer.arch_settings``. And if dict, it + should include the following two keys: + + - layers (list[int]): Number of blocks at each stage. + - embed_dims (list[int]): The number of channels at each stage. + - mlp_ratios (list[int]): Expansion ratio of MLPs. + - layer_scale_init_value (float): Init value for Layer Scale. + + Defaults to 'S12'. + + norm_cfg (dict): The config dict for norm layers. + Defaults to ``dict(type='LN2d', eps=1e-6)``. + act_cfg (dict): The config dict for activation between pointwise + convolution. Defaults to ``dict(type='GELU')``. + in_patch_size (int): The patch size of input image patch embedding. + Defaults to 7. + in_stride (int): The stride of input image patch embedding. + Defaults to 4. + in_pad (int): The padding of input image patch embedding. + Defaults to 2. + down_patch_size (int): The patch size of downsampling patch embedding. + Defaults to 3. + down_stride (int): The stride of downsampling patch embedding. + Defaults to 2. + down_pad (int): The padding of downsampling patch embedding. + Defaults to 1. + drop_rate (float): Dropout rate. Defaults to 0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0. + out_indices (Sequence | int): Output from which network position. + Index 0-6 respectively corresponds to + [stage1, downsampling, stage2, downsampling, stage3, downsampling, stage4] + Defaults to -1, means the last stage. + frozen_stages (int): Stages to be frozen (all param fixed). + Defaults to 0, which means not freezing any parameters. + init_cfg (dict, optional): Initialization config dict + """ # noqa: E501 + + # --layers: [x,x,x,x], numbers of layers for the four stages + # --embed_dims, --mlp_ratios: + # embedding dims and mlp ratios for the four stages + # --downsamples: flags to apply downsampling or not in four blocks + arch_settings = { + 's12': { + 'layers': [2, 2, 6, 2], + 'embed_dims': [64, 128, 320, 512], + 'mlp_ratios': [4, 4, 4, 4], + 'layer_scale_init_value': 1e-5, + }, + 's24': { + 'layers': [4, 4, 12, 4], + 'embed_dims': [64, 128, 320, 512], + 'mlp_ratios': [4, 4, 4, 4], + 'layer_scale_init_value': 1e-5, + }, + 's36': { + 'layers': [6, 6, 18, 6], + 'embed_dims': [64, 128, 320, 512], + 'mlp_ratios': [4, 4, 4, 4], + 'layer_scale_init_value': 1e-6, + }, + 'm36': { + 'layers': [6, 6, 18, 6], + 'embed_dims': [96, 192, 384, 768], + 'mlp_ratios': [4, 4, 4, 4], + 'layer_scale_init_value': 1e-6, + }, + 'm48': { + 'layers': [8, 8, 24, 8], + 'embed_dims': [96, 192, 384, 768], + 'mlp_ratios': [4, 4, 4, 4], + 'layer_scale_init_value': 1e-6, + }, + } + + def __init__(self, + arch='s12', + pool_size=3, + norm_cfg=dict(type='GN', num_groups=1), + act_cfg=dict(type='GELU'), + in_patch_size=7, + in_stride=4, + in_pad=2, + down_patch_size=3, + down_stride=2, + down_pad=1, + drop_rate=0., + drop_path_rate=0., + out_indices=-1, + frozen_stages=0, + init_cfg=None): + + super().__init__(init_cfg=init_cfg) + + if isinstance(arch, str): + assert arch in self.arch_settings, \ + f'Unavailable arch, please choose from ' \ + f'({set(self.arch_settings)}) or pass a dict.' + arch = self.arch_settings[arch] + elif isinstance(arch, dict): + assert 'layers' in arch and 'embed_dims' in arch, \ + f'The arch dict must have "layers" and "embed_dims", ' \ + f'but got {list(arch.keys())}.' + + layers = arch['layers'] + embed_dims = arch['embed_dims'] + mlp_ratios = arch['mlp_ratios'] \ + if 'mlp_ratios' in arch else [4, 4, 4, 4] + layer_scale_init_value = arch['layer_scale_init_value'] \ + if 'layer_scale_init_value' in arch else 1e-5 + + self.patch_embed = PatchEmbed( + patch_size=in_patch_size, + stride=in_stride, + padding=in_pad, + in_chans=3, + embed_dim=embed_dims[0]) + + # set the main block in network + network = [] + for i in range(len(layers)): + stage = basic_blocks( + embed_dims[i], + i, + layers, + pool_size=pool_size, + mlp_ratio=mlp_ratios[i], + norm_cfg=norm_cfg, + act_cfg=act_cfg, + drop_rate=drop_rate, + drop_path_rate=drop_path_rate, + layer_scale_init_value=layer_scale_init_value) + network.append(stage) + if i >= len(layers) - 1: + break + if embed_dims[i] != embed_dims[i + 1]: + # downsampling between two stages + network.append( + PatchEmbed( + patch_size=down_patch_size, + stride=down_stride, + padding=down_pad, + in_chans=embed_dims[i], + embed_dim=embed_dims[i + 1])) + + self.network = nn.ModuleList(network) + + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = 7 + index + assert out_indices[i] >= 0, f'Invalid out_indices {index}' + self.out_indices = out_indices + if self.out_indices: + for i_layer in self.out_indices: + layer = build_norm_layer(norm_cfg, + embed_dims[(i_layer + 1) // 2])[1] + layer_name = f'norm{i_layer}' + self.add_module(layer_name, layer) + + self.frozen_stages = frozen_stages + self._freeze_stages() + + def forward_embeddings(self, x): + x = self.patch_embed(x) + return x + + def forward_tokens(self, x): + outs = [] + for idx, block in enumerate(self.network): + x = block(x) + if idx in self.out_indices: + norm_layer = getattr(self, f'norm{idx}') + x_out = norm_layer(x) + outs.append(x_out) + return tuple(outs) + + def forward(self, x): + # input embedding + x = self.forward_embeddings(x) + # through backbone + x = self.forward_tokens(x) + return x + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.patch_embed.eval() + for param in self.patch_embed.parameters(): + param.requires_grad = False + + for i in range(self.frozen_stages): + # Include both block and downsample layer. + module = self.network[i] + module.eval() + for param in module.parameters(): + param.requires_grad = False + if i in self.out_indices: + norm_layer = getattr(self, f'norm{i}') + norm_layer.eval() + for param in norm_layer.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(PoolFormer, self).train(mode) + self._freeze_stages() diff --git a/mmpretrain/models/backbones/regnet.py b/mmpretrain/models/backbones/regnet.py new file mode 100644 index 0000000..85dbdef --- /dev/null +++ b/mmpretrain/models/backbones/regnet.py @@ -0,0 +1,312 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch.nn as nn +from mmcv.cnn import build_conv_layer, build_norm_layer + +from mmpretrain.registry import MODELS +from .resnet import ResNet +from .resnext import Bottleneck + + +@MODELS.register_module() +class RegNet(ResNet): + """RegNet backbone. + + More details can be found in `paper `_ . + + Args: + arch (dict): The parameter of RegNets. + - w0 (int): initial width + - wa (float): slope of width + - wm (float): quantization parameter to quantize the width + - depth (int): depth of the backbone + - group_w (int): width of group + - bot_mul (float): bottleneck ratio, i.e. expansion of bottleneck. + strides (Sequence[int]): Strides of the first block of each stage. + base_channels (int): Base channels after stem layer. + in_channels (int): Number of input image channels. Default: 3. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. Default: "pytorch". + frozen_stages (int): Stages to be frozen (all param fixed). -1 means + not freezing any parameters. Default: -1. + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN', requires_grad=True). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + + Example: + >>> from mmpretrain.models import RegNet + >>> import torch + >>> self = RegNet( + arch=dict( + w0=88, + wa=26.31, + wm=2.25, + group_w=48, + depth=25, + bot_mul=1.0)) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 96, 8, 8) + (1, 192, 4, 4) + (1, 432, 2, 2) + (1, 1008, 1, 1) + """ + arch_settings = { + 'regnetx_400mf': + dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0), + 'regnetx_800mf': + dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16, bot_mul=1.0), + 'regnetx_1.6gf': + dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18, bot_mul=1.0), + 'regnetx_3.2gf': + dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, bot_mul=1.0), + 'regnetx_4.0gf': + dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23, bot_mul=1.0), + 'regnetx_6.4gf': + dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17, bot_mul=1.0), + 'regnetx_8.0gf': + dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23, bot_mul=1.0), + 'regnetx_12gf': + dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, bot_mul=1.0), + } + + def __init__(self, + arch, + in_channels=3, + stem_channels=32, + base_channels=32, + strides=(2, 2, 2, 2), + dilations=(1, 1, 1, 1), + out_indices=(3, ), + style='pytorch', + deep_stem=False, + avg_down=False, + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=False, + with_cp=False, + zero_init_residual=True, + init_cfg=None): + super(ResNet, self).__init__(init_cfg) + + # Generate RegNet parameters first + if isinstance(arch, str): + assert arch in self.arch_settings, \ + f'"arch": "{arch}" is not one of the' \ + ' arch_settings' + arch = self.arch_settings[arch] + elif not isinstance(arch, dict): + raise TypeError('Expect "arch" to be either a string ' + f'or a dict, got {type(arch)}') + + widths, num_stages = self.generate_regnet( + arch['w0'], + arch['wa'], + arch['wm'], + arch['depth'], + ) + # Convert to per stage format + stage_widths, stage_blocks = self.get_stages_from_blocks(widths) + # Generate group widths and bot muls + group_widths = [arch['group_w'] for _ in range(num_stages)] + self.bottleneck_ratio = [arch['bot_mul'] for _ in range(num_stages)] + # Adjust the compatibility of stage_widths and group_widths + stage_widths, group_widths = self.adjust_width_group( + stage_widths, self.bottleneck_ratio, group_widths) + + # Group params by stage + self.stage_widths = stage_widths + self.group_widths = group_widths + self.depth = sum(stage_blocks) + self.stem_channels = stem_channels + self.base_channels = base_channels + self.num_stages = num_stages + assert num_stages >= 1 and num_stages <= 4 + self.strides = strides + self.dilations = dilations + assert len(strides) == len(dilations) == num_stages + self.out_indices = out_indices + assert max(out_indices) < num_stages + self.style = style + self.deep_stem = deep_stem + if self.deep_stem: + raise NotImplementedError( + 'deep_stem has not been implemented for RegNet') + self.avg_down = avg_down + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.with_cp = with_cp + self.norm_eval = norm_eval + self.zero_init_residual = zero_init_residual + self.stage_blocks = stage_blocks[:num_stages] + + self._make_stem_layer(in_channels, stem_channels) + + _in_channels = stem_channels + self.res_layers = [] + for i, num_blocks in enumerate(self.stage_blocks): + stride = self.strides[i] + dilation = self.dilations[i] + group_width = self.group_widths[i] + width = int(round(self.stage_widths[i] * self.bottleneck_ratio[i])) + stage_groups = width // group_width + + res_layer = self.make_res_layer( + block=Bottleneck, + num_blocks=num_blocks, + in_channels=_in_channels, + out_channels=self.stage_widths[i], + expansion=1, + stride=stride, + dilation=dilation, + style=self.style, + avg_down=self.avg_down, + with_cp=self.with_cp, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + base_channels=self.stage_widths[i], + groups=stage_groups, + width_per_group=group_width) + _in_channels = self.stage_widths[i] + layer_name = f'layer{i + 1}' + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self._freeze_stages() + + self.feat_dim = stage_widths[-1] + + def _make_stem_layer(self, in_channels, base_channels): + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + base_channels, + kernel_size=3, + stride=2, + padding=1, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, base_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + + def generate_regnet(self, + initial_width, + width_slope, + width_parameter, + depth, + divisor=8): + """Generates per block width from RegNet parameters. + + Args: + initial_width ([int]): Initial width of the backbone + width_slope ([float]): Slope of the quantized linear function + width_parameter ([int]): Parameter used to quantize the width. + depth ([int]): Depth of the backbone. + divisor (int): The divisor of channels. Defaults to 8. + + Returns: + tuple: tuple containing: + - list: Widths of each stage. + - int: The number of stages. + """ + assert width_slope >= 0 + assert initial_width > 0 + assert width_parameter > 1 + assert initial_width % divisor == 0 + widths_cont = np.arange(depth) * width_slope + initial_width + ks = np.round( + np.log(widths_cont / initial_width) / np.log(width_parameter)) + widths = initial_width * np.power(width_parameter, ks) + widths = np.round(np.divide(widths, divisor)) * divisor + num_stages = len(np.unique(widths)) + widths, widths_cont = widths.astype(int).tolist(), widths_cont.tolist() + return widths, num_stages + + @staticmethod + def quantize_float(number, divisor): + """Converts a float to closest non-zero int divisible by divior. + + Args: + number (int): Original number to be quantized. + divisor (int): Divisor used to quantize the number. + + Returns: + int: quantized number that is divisible by devisor. + """ + return int(round(number / divisor) * divisor) + + def adjust_width_group(self, widths, bottleneck_ratio, groups): + """Adjusts the compatibility of widths and groups. + + Args: + widths (list[int]): Width of each stage. + bottleneck_ratio (float): Bottleneck ratio. + groups (int): number of groups in each stage + + Returns: + tuple(list): The adjusted widths and groups of each stage. + """ + bottleneck_width = [ + int(w * b) for w, b in zip(widths, bottleneck_ratio) + ] + groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_width)] + bottleneck_width = [ + self.quantize_float(w_bot, g) + for w_bot, g in zip(bottleneck_width, groups) + ] + widths = [ + int(w_bot / b) + for w_bot, b in zip(bottleneck_width, bottleneck_ratio) + ] + return widths, groups + + def get_stages_from_blocks(self, widths): + """Gets widths/stage_blocks of network at each stage. + + Args: + widths (list[int]): Width in each stage. + + Returns: + tuple(list): width and depth of each stage + """ + width_diff = [ + width != width_prev + for width, width_prev in zip(widths + [0], [0] + widths) + ] + stage_widths = [ + width for width, diff in zip(widths, width_diff[:-1]) if diff + ] + stage_blocks = np.diff([ + depth for depth, diff in zip(range(len(width_diff)), width_diff) + if diff + ]).tolist() + return stage_widths, stage_blocks + + def forward(self, x): + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) diff --git a/mmpretrain/models/backbones/replknet.py b/mmpretrain/models/backbones/replknet.py new file mode 100644 index 0000000..4dce415 --- /dev/null +++ b/mmpretrain/models/backbones/replknet.py @@ -0,0 +1,668 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.utils.checkpoint as checkpoint +from mmcv.cnn import build_activation_layer, build_norm_layer +from mmcv.cnn.bricks import DropPath +from mmengine.model import BaseModule +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm + +from mmpretrain.registry import MODELS +from .base_backbone import BaseBackbone + + +def conv_bn(in_channels, + out_channels, + kernel_size, + stride, + padding, + groups, + dilation=1, + norm_cfg=dict(type='BN')): + """Construct a sequential conv and bn. + + Args: + in_channels (int): Dimension of input features. + out_channels (int): Dimension of output features. + kernel_size (int): kernel_size of the convolution. + stride (int): stride of the convolution. + padding (int): stride of the convolution. + groups (int): groups of the convolution. + dilation (int): dilation of the convolution. Default to 1. + norm_cfg (dict): dictionary to construct and config norm layer. + Default to ``dict(type='BN', requires_grad=True)``. + + Returns: + nn.Sequential(): A conv layer and a batch norm layer. + """ + if padding is None: + padding = kernel_size // 2 + result = nn.Sequential() + result.add_module( + 'conv', + nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=False)) + result.add_module('bn', build_norm_layer(norm_cfg, out_channels)[1]) + return result + + +def conv_bn_relu(in_channels, + out_channels, + kernel_size, + stride, + padding, + groups, + dilation=1): + """Construct a sequential conv, bn and relu. + + Args: + in_channels (int): Dimension of input features. + out_channels (int): Dimension of output features. + kernel_size (int): kernel_size of the convolution. + stride (int): stride of the convolution. + padding (int): stride of the convolution. + groups (int): groups of the convolution. + dilation (int): dilation of the convolution. Default to 1. + + Returns: + nn.Sequential(): A conv layer, batch norm layer and a relu function. + """ + + if padding is None: + padding = kernel_size // 2 + result = conv_bn( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=groups, + dilation=dilation) + result.add_module('nonlinear', nn.ReLU()) + return result + + +def fuse_bn(conv, bn): + """Fuse the parameters in a branch with a conv and bn. + + Args: + conv (nn.Conv2d): The convolution module to fuse. + bn (nn.BatchNorm2d): The batch normalization to fuse. + + Returns: + tuple[torch.Tensor, torch.Tensor]: The parameters obtained after + fusing the parameters of conv and bn in one branch. + The first element is the weight and the second is the bias. + """ + kernel = conv.weight + running_mean = bn.running_mean + running_var = bn.running_var + gamma = bn.weight + beta = bn.bias + eps = bn.eps + std = (running_var + eps).sqrt() + t = (gamma / std).reshape(-1, 1, 1, 1) + return kernel * t, beta - running_mean * gamma / std + + +class ReparamLargeKernelConv(BaseModule): + """Super large kernel implemented by with large convolutions. + + Input: Tensor with shape [B, C, H, W]. + Output: Tensor with shape [B, C, H, W]. + + Args: + in_channels (int): Dimension of input features. + out_channels (int): Dimension of output features. + kernel_size (int): kernel_size of the large convolution. + stride (int): stride of the large convolution. + groups (int): groups of the large convolution. + small_kernel (int): kernel_size of the small convolution. + small_kernel_merged (bool): Whether to switch the model structure to + deployment mode (merge the small kernel to the large kernel). + Default to False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + groups, + small_kernel, + small_kernel_merged=False, + init_cfg=None): + super(ReparamLargeKernelConv, self).__init__(init_cfg) + self.kernel_size = kernel_size + self.small_kernel = small_kernel + self.small_kernel_merged = small_kernel_merged + # We assume the conv does not change the feature map size, + # so padding = k//2. + # Otherwise, you may configure padding as you wish, + # and change the padding of small_conv accordingly. + padding = kernel_size // 2 + if small_kernel_merged: + self.lkb_reparam = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=1, + groups=groups, + bias=True) + else: + self.lkb_origin = conv_bn( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=1, + groups=groups) + if small_kernel is not None: + assert small_kernel <= kernel_size + self.small_conv = conv_bn( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=small_kernel, + stride=stride, + padding=small_kernel // 2, + groups=groups, + dilation=1) + + def forward(self, inputs): + if hasattr(self, 'lkb_reparam'): + out = self.lkb_reparam(inputs) + else: + out = self.lkb_origin(inputs) + if hasattr(self, 'small_conv'): + out += self.small_conv(inputs) + return out + + def get_equivalent_kernel_bias(self): + eq_k, eq_b = fuse_bn(self.lkb_origin.conv, self.lkb_origin.bn) + if hasattr(self, 'small_conv'): + small_k, small_b = fuse_bn(self.small_conv.conv, + self.small_conv.bn) + eq_b += small_b + # add to the central part + eq_k += nn.functional.pad( + small_k, [(self.kernel_size - self.small_kernel) // 2] * 4) + return eq_k, eq_b + + def merge_kernel(self): + """Switch the model structure from training mode to deployment mode.""" + if self.small_kernel_merged: + return + eq_k, eq_b = self.get_equivalent_kernel_bias() + self.lkb_reparam = nn.Conv2d( + in_channels=self.lkb_origin.conv.in_channels, + out_channels=self.lkb_origin.conv.out_channels, + kernel_size=self.lkb_origin.conv.kernel_size, + stride=self.lkb_origin.conv.stride, + padding=self.lkb_origin.conv.padding, + dilation=self.lkb_origin.conv.dilation, + groups=self.lkb_origin.conv.groups, + bias=True) + + self.lkb_reparam.weight.data = eq_k + self.lkb_reparam.bias.data = eq_b + self.__delattr__('lkb_origin') + if hasattr(self, 'small_conv'): + self.__delattr__('small_conv') + + self.small_kernel_merged = True + + +class ConvFFN(BaseModule): + """Mlp implemented by with 1*1 convolutions. + + Input: Tensor with shape [B, C, H, W]. + Output: Tensor with shape [B, C, H, W]. + + Args: + in_channels (int): Dimension of input features. + internal_channels (int): Dimension of hidden features. + out_channels (int): Dimension of output features. + drop_path (float): Stochastic depth rate. Defaults to 0. + norm_cfg (dict): dictionary to construct and config norm layer. + Default to ``dict(type='BN', requires_grad=True)``. + act_cfg (dict): The config dict for activation between pointwise + convolution. Defaults to ``dict(type='GELU')``. + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + in_channels, + internal_channels, + out_channels, + drop_path, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='GELU'), + init_cfg=None): + super(ConvFFN, self).__init__(init_cfg) + self.drop_path = DropPath( + drop_prob=drop_path) if drop_path > 0. else nn.Identity() + self.preffn_bn = build_norm_layer(norm_cfg, in_channels)[1] + self.pw1 = conv_bn( + in_channels=in_channels, + out_channels=internal_channels, + kernel_size=1, + stride=1, + padding=0, + groups=1) + self.pw2 = conv_bn( + in_channels=internal_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + padding=0, + groups=1) + self.nonlinear = build_activation_layer(act_cfg) + + def forward(self, x): + out = self.preffn_bn(x) + out = self.pw1(out) + out = self.nonlinear(out) + out = self.pw2(out) + return x + self.drop_path(out) + + +class RepLKBlock(BaseModule): + """RepLKBlock for RepLKNet backbone. + + Args: + in_channels (int): The input channels of the block. + dw_channels (int): The intermediate channels of the block, + i.e., input channels of the large kernel convolution. + block_lk_size (int): size of the super large kernel. Defaults: 31. + small_kernel (int): size of the parallel small kernel. Defaults: 5. + drop_path (float): Stochastic depth rate. Defaults: 0. + small_kernel_merged (bool): Whether to switch the model structure to + deployment mode (merge the small kernel to the large kernel). + Default to False. + norm_cfg (dict): dictionary to construct and config norm layer. + Default to ``dict(type='BN', requires_grad=True)``. + act_cfg (dict): Config dict for activation layer. + Default to ``dict(type='ReLU')``. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default to None + """ + + def __init__(self, + in_channels, + dw_channels, + block_lk_size, + small_kernel, + drop_path, + small_kernel_merged=False, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super(RepLKBlock, self).__init__(init_cfg) + self.pw1 = conv_bn_relu(in_channels, dw_channels, 1, 1, 0, groups=1) + self.pw2 = conv_bn(dw_channels, in_channels, 1, 1, 0, groups=1) + self.large_kernel = ReparamLargeKernelConv( + in_channels=dw_channels, + out_channels=dw_channels, + kernel_size=block_lk_size, + stride=1, + groups=dw_channels, + small_kernel=small_kernel, + small_kernel_merged=small_kernel_merged) + self.lk_nonlinear = build_activation_layer(act_cfg) + self.prelkb_bn = build_norm_layer(norm_cfg, in_channels)[1] + self.drop_path = DropPath( + drop_prob=drop_path) if drop_path > 0. else nn.Identity() + # print('drop path:', self.drop_path) + + def forward(self, x): + out = self.prelkb_bn(x) + out = self.pw1(out) + out = self.large_kernel(out) + out = self.lk_nonlinear(out) + out = self.pw2(out) + return x + self.drop_path(out) + + +class RepLKNetStage(BaseModule): + """ + generate RepLKNet blocks for a stage + return: RepLKNet blocks + + Args: + channels (int): The input channels of the stage. + num_blocks (int): The number of blocks of the stage. + stage_lk_size (int): size of the super large kernel. Defaults: 31. + drop_path (float): Stochastic depth rate. Defaults: 0. + small_kernel (int): size of the parallel small kernel. Defaults: 5. + dw_ratio (float): The intermediate channels + expansion ratio of the block. Defaults: 1. + ffn_ratio (float): Mlp expansion ratio. Defaults to 4. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default to False. + small_kernel_merged (bool): Whether to switch the model structure to + deployment mode (merge the small kernel to the large kernel). + Default to False. + norm_intermediate_features (bool): Construct and config norm layer + or not. + Using True will normalize the intermediate features for + downstream dense prediction tasks. + norm_cfg (dict): dictionary to construct and config norm layer. + Default to ``dict(type='BN', requires_grad=True)``. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default to None + """ + + def __init__( + self, + channels, + num_blocks, + stage_lk_size, + drop_path, + small_kernel, + dw_ratio=1, + ffn_ratio=4, + with_cp=False, # train with torch.utils.checkpoint to save memory + small_kernel_merged=False, + norm_intermediate_features=False, + norm_cfg=dict(type='BN'), + init_cfg=None): + super(RepLKNetStage, self).__init__(init_cfg) + self.with_cp = with_cp + blks = [] + for i in range(num_blocks): + block_drop_path = drop_path[i] if isinstance(drop_path, + list) else drop_path + # Assume all RepLK Blocks within a stage share the same lk_size. + # You may tune it on your own model. + replk_block = RepLKBlock( + in_channels=channels, + dw_channels=int(channels * dw_ratio), + block_lk_size=stage_lk_size, + small_kernel=small_kernel, + drop_path=block_drop_path, + small_kernel_merged=small_kernel_merged) + convffn_block = ConvFFN( + in_channels=channels, + internal_channels=int(channels * ffn_ratio), + out_channels=channels, + drop_path=block_drop_path) + blks.append(replk_block) + blks.append(convffn_block) + self.blocks = nn.ModuleList(blks) + if norm_intermediate_features: + self.norm = build_norm_layer(norm_cfg, channels)[1] + else: + self.norm = nn.Identity() + + def forward(self, x): + for blk in self.blocks: + if self.with_cp: + x = checkpoint.checkpoint(blk, x) # Save training memory + else: + x = blk(x) + return x + + +@MODELS.register_module() +class RepLKNet(BaseBackbone): + """RepLKNet backbone. + + A PyTorch impl of : + `Scaling Up Your Kernels to 31x31: Revisiting Large Kernel Design in CNNs + `_ + + Args: + arch (str | dict): The parameter of RepLKNet. + If it's a dict, it should contain the following keys: + + - large_kernel_sizes (Sequence[int]): + Large kernel size in each stage. + - layers (Sequence[int]): Number of blocks in each stage. + - channels (Sequence[int]): Number of channels in each stage. + - small_kernel (int): size of the parallel small kernel. + - dw_ratio (float): The intermediate channels + expansion ratio of the block. + in_channels (int): Number of input image channels. Default to 3. + ffn_ratio (float): Mlp expansion ratio. Defaults to 4. + out_indices (Sequence[int]): Output from which stages. + Default to (3, ). + strides (Sequence[int]): Strides of the first block of each stage. + Default to (2, 2, 2, 2). + dilations (Sequence[int]): Dilation of each stage. + Default to (1, 1, 1, 1). + frozen_stages (int): Stages to be frozen + (all param fixed). -1 means not freezing any parameters. + Default to -1. + conv_cfg (dict | None): The config dict for conv layers. + Default to None. + norm_cfg (dict): The config dict for norm layers. + Default to ``dict(type='BN')``. + act_cfg (dict): Config dict for activation layer. + Default to ``dict(type='ReLU')``. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default to False. + deploy (bool): Whether to switch the model structure to deployment + mode. Default to False. + norm_intermediate_features (bool): Construct and + config norm layer or not. + Using True will normalize the intermediate features + for downstream dense prediction tasks. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default to False. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + arch_settings = { + '31B': + dict( + large_kernel_sizes=[31, 29, 27, 13], + layers=[2, 2, 18, 2], + channels=[128, 256, 512, 1024], + small_kernel=5, + dw_ratio=1), + '31L': + dict( + large_kernel_sizes=[31, 29, 27, 13], + layers=[2, 2, 18, 2], + channels=[192, 384, 768, 1536], + small_kernel=5, + dw_ratio=1), + 'XL': + dict( + large_kernel_sizes=[27, 27, 27, 13], + layers=[2, 2, 18, 2], + channels=[256, 512, 1024, 2048], + small_kernel=None, + dw_ratio=1.5), + } + + def __init__(self, + arch, + in_channels=3, + ffn_ratio=4, + out_indices=(3, ), + strides=(2, 2, 2, 2), + dilations=(1, 1, 1, 1), + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + with_cp=False, + drop_path_rate=0.3, + small_kernel_merged=False, + norm_intermediate_features=False, + norm_eval=False, + init_cfg=[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]): + super(RepLKNet, self).__init__(init_cfg) + + if isinstance(arch, str): + assert arch in self.arch_settings, \ + f'"arch": "{arch}" is not one of the arch_settings' + arch = self.arch_settings[arch] + elif not isinstance(arch, dict): + raise TypeError('Expect "arch" to be either a string ' + f'or a dict, got {type(arch)}') + + assert len(arch['layers']) == len( + arch['channels']) == len(strides) == len(dilations) + assert max(out_indices) < len(arch['layers']) + + self.arch = arch + self.in_channels = in_channels + self.out_indices = out_indices + self.strides = strides + self.dilations = dilations + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.with_cp = with_cp + self.drop_path_rate = drop_path_rate + self.small_kernel_merged = small_kernel_merged + self.norm_eval = norm_eval + self.norm_intermediate_features = norm_intermediate_features + + self.out_indices = out_indices + + base_width = self.arch['channels'][0] + self.norm_intermediate_features = norm_intermediate_features + self.num_stages = len(self.arch['layers']) + self.stem = nn.ModuleList([ + conv_bn_relu( + in_channels=in_channels, + out_channels=base_width, + kernel_size=3, + stride=2, + padding=1, + groups=1), + conv_bn_relu( + in_channels=base_width, + out_channels=base_width, + kernel_size=3, + stride=1, + padding=1, + groups=base_width), + conv_bn_relu( + in_channels=base_width, + out_channels=base_width, + kernel_size=1, + stride=1, + padding=0, + groups=1), + conv_bn_relu( + in_channels=base_width, + out_channels=base_width, + kernel_size=3, + stride=2, + padding=1, + groups=base_width) + ]) + # stochastic depth. We set block-wise drop-path rate. + # The higher level blocks are more likely to be dropped. + # This implementation follows Swin. + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, + sum(self.arch['layers'])) + ] + self.stages = nn.ModuleList() + self.transitions = nn.ModuleList() + for stage_idx in range(self.num_stages): + layer = RepLKNetStage( + channels=self.arch['channels'][stage_idx], + num_blocks=self.arch['layers'][stage_idx], + stage_lk_size=self.arch['large_kernel_sizes'][stage_idx], + drop_path=dpr[sum(self.arch['layers'][:stage_idx] + ):sum(self.arch['layers'][:stage_idx + 1])], + small_kernel=self.arch['small_kernel'], + dw_ratio=self.arch['dw_ratio'], + ffn_ratio=ffn_ratio, + with_cp=with_cp, + small_kernel_merged=small_kernel_merged, + norm_intermediate_features=(stage_idx in out_indices)) + self.stages.append(layer) + if stage_idx < len(self.arch['layers']) - 1: + transition = nn.Sequential( + conv_bn_relu( + self.arch['channels'][stage_idx], + self.arch['channels'][stage_idx + 1], + 1, + 1, + 0, + groups=1), + conv_bn_relu( + self.arch['channels'][stage_idx + 1], + self.arch['channels'][stage_idx + 1], + 3, + stride=2, + padding=1, + groups=self.arch['channels'][stage_idx + 1])) + self.transitions.append(transition) + + def forward_features(self, x): + x = self.stem[0](x) + for stem_layer in self.stem[1:]: + if self.with_cp: + x = checkpoint.checkpoint(stem_layer, x) # save memory + else: + x = stem_layer(x) + + # Need the intermediate feature maps + outs = [] + for stage_idx in range(self.num_stages): + x = self.stages[stage_idx](x) + if stage_idx in self.out_indices: + outs.append(self.stages[stage_idx].norm(x)) + # For RepLKNet-XL normalize the features + # before feeding them into the heads + if stage_idx < self.num_stages - 1: + x = self.transitions[stage_idx](x) + return outs + + def forward(self, x): + x = self.forward_features(x) + return tuple(x) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.stem.eval() + for param in self.stem.parameters(): + param.requires_grad = False + for i in range(self.frozen_stages): + stage = self.stages[i] + stage.eval() + for param in stage.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(RepLKNet, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() + + def switch_to_deploy(self): + for m in self.modules(): + if hasattr(m, 'merge_kernel'): + m.merge_kernel() + self.small_kernel_merged = True diff --git a/mmpretrain/models/backbones/repmlp.py b/mmpretrain/models/backbones/repmlp.py new file mode 100644 index 0000000..f7c06c4 --- /dev/null +++ b/mmpretrain/models/backbones/repmlp.py @@ -0,0 +1,578 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Adapted from official impl at https://github.com/DingXiaoH/RepMLP. +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import (ConvModule, build_activation_layer, build_conv_layer, + build_norm_layer) +from mmcv.cnn.bricks.transformer import PatchEmbed as _PatchEmbed +from mmengine.model import BaseModule, ModuleList, Sequential + +from mmpretrain.models.utils import SELayer, to_2tuple +from mmpretrain.registry import MODELS + + +def fuse_bn(conv_or_fc, bn): + """fuse conv and bn.""" + std = (bn.running_var + bn.eps).sqrt() + tmp_weight = bn.weight / std + tmp_weight = tmp_weight.reshape(-1, 1, 1, 1) + + if len(tmp_weight) == conv_or_fc.weight.size(0): + return (conv_or_fc.weight * tmp_weight, + bn.bias - bn.running_mean * bn.weight / std) + else: + # in RepMLPBlock, dim0 of fc3 weights and fc3_bn weights + # are different. + repeat_times = conv_or_fc.weight.size(0) // len(tmp_weight) + repeated = tmp_weight.repeat_interleave(repeat_times, 0) + fused_weight = conv_or_fc.weight * repeated + bias = bn.bias - bn.running_mean * bn.weight / std + fused_bias = (bias).repeat_interleave(repeat_times, 0) + return (fused_weight, fused_bias) + + +class PatchEmbed(_PatchEmbed): + """Image to Patch Embedding. + + Compared with default Patch Embedding(in ViT), Patch Embedding of RepMLP + have ReLu and do not convert output tensor into shape (N, L, C). + + Args: + in_channels (int): The num of input channels. Default: 3 + embed_dims (int): The dimensions of embedding. Default: 768 + conv_type (str): The type of convolution + to generate patch embedding. Default: "Conv2d". + kernel_size (int): The kernel_size of embedding conv. Default: 16. + stride (int): The slide stride of embedding conv. + Default: 16. + padding (int | tuple | string): The padding length of + embedding conv. When it is a string, it means the mode + of adaptive padding, support "same" and "corner" now. + Default: "corner". + dilation (int): The dilation rate of embedding conv. Default: 1. + bias (bool): Bias of embed conv. Default: True. + norm_cfg (dict, optional): Config dict for normalization layer. + Default: None. + input_size (int | tuple | None): The size of input, which will be + used to calculate the out size. Only works when `dynamic_size` + is False. Default: None. + init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization. + Default: None. + """ + + def __init__(self, *args, **kwargs): + super(PatchEmbed, self).__init__(*args, **kwargs) + self.relu = nn.ReLU() + + def forward(self, x): + """ + Args: + x (Tensor): Has shape (B, C, H, W). In most case, C is 3. + Returns: + tuple: Contains merged results and its spatial shape. + - x (Tensor): The output tensor. + - out_size (tuple[int]): Spatial shape of x, arrange as + (out_h, out_w). + """ + + if self.adaptive_padding: + x = self.adaptive_padding(x) + + x = self.projection(x) + if self.norm is not None: + x = self.norm(x) + x = self.relu(x) + out_size = (x.shape[2], x.shape[3]) + return x, out_size + + +class GlobalPerceptron(SELayer): + """GlobalPerceptron implemented by using ``mmpretrain.modes.SELayer``. + + Args: + input_channels (int): The number of input (and output) channels + in the GlobalPerceptron. + ratio (int): Squeeze ratio in GlobalPerceptron, the intermediate + channel will be ``make_divisible(channels // ratio, divisor)``. + """ + + def __init__(self, input_channels: int, ratio: int, **kwargs) -> None: + super(GlobalPerceptron, self).__init__( + channels=input_channels, + ratio=ratio, + return_weight=True, + act_cfg=(dict(type='ReLU'), dict(type='Sigmoid')), + **kwargs) + + +class RepMLPBlock(BaseModule): + """Basic RepMLPNet, consists of PartitionPerceptron and GlobalPerceptron. + + Args: + channels (int): The number of input and the output channels of the + block. + path_h (int): The height of patches. + path_w (int): The weidth of patches. + reparam_conv_kernels (Squeue(int) | None): The conv kernels in the + GlobalPerceptron. Default: None. + globalperceptron_ratio (int): The reducation ratio in the + GlobalPerceptron. Default: 4. + num_sharesets (int): The number of sharesets in the + PartitionPerceptron. Default 1. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN', requires_grad=True). + deploy (bool): Whether to switch the model structure to + deployment mode. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + channels, + path_h, + path_w, + reparam_conv_kernels=None, + globalperceptron_ratio=4, + num_sharesets=1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + deploy=False, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + self.deploy = deploy + self.channels = channels + self.num_sharesets = num_sharesets + self.path_h, self.path_w = path_h, path_w + # the input channel of fc3 + self._path_vec_channles = path_h * path_w * num_sharesets + + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + self.gp = GlobalPerceptron( + input_channels=channels, ratio=globalperceptron_ratio) + + # using a conv layer to implement a fc layer + self.fc3 = build_conv_layer( + conv_cfg, + in_channels=self._path_vec_channles, + out_channels=self._path_vec_channles, + kernel_size=1, + stride=1, + padding=0, + bias=deploy, + groups=num_sharesets) + if deploy: + self.fc3_bn = nn.Identity() + else: + norm_layer = build_norm_layer(norm_cfg, num_sharesets)[1] + self.add_module('fc3_bn', norm_layer) + + self.reparam_conv_kernels = reparam_conv_kernels + if not deploy and reparam_conv_kernels is not None: + for k in reparam_conv_kernels: + conv_branch = ConvModule( + in_channels=num_sharesets, + out_channels=num_sharesets, + kernel_size=k, + stride=1, + padding=k // 2, + norm_cfg=dict(type='BN', requires_grad=True), + groups=num_sharesets, + act_cfg=None) + self.__setattr__('repconv{}'.format(k), conv_branch) + + def partition(self, x, h_parts, w_parts): + # convert (N, C, H, W) to (N, h_parts, w_parts, C, path_h, path_w) + x = x.reshape(-1, self.channels, h_parts, self.path_h, w_parts, + self.path_w) + x = x.permute(0, 2, 4, 1, 3, 5) + return x + + def partition_affine(self, x, h_parts, w_parts): + """perform Partition Perceptron.""" + fc_inputs = x.reshape(-1, self._path_vec_channles, 1, 1) + out = self.fc3(fc_inputs) + out = out.reshape(-1, self.num_sharesets, self.path_h, self.path_w) + out = self.fc3_bn(out) + out = out.reshape(-1, h_parts, w_parts, self.num_sharesets, + self.path_h, self.path_w) + return out + + def forward(self, inputs): + # Global Perceptron + global_vec = self.gp(inputs) + + origin_shape = inputs.size() + h_parts = origin_shape[2] // self.path_h + w_parts = origin_shape[3] // self.path_w + + partitions = self.partition(inputs, h_parts, w_parts) + + # Channel Perceptron + fc3_out = self.partition_affine(partitions, h_parts, w_parts) + + # perform Local Perceptron + if self.reparam_conv_kernels is not None and not self.deploy: + conv_inputs = partitions.reshape(-1, self.num_sharesets, + self.path_h, self.path_w) + conv_out = 0 + for k in self.reparam_conv_kernels: + conv_branch = self.__getattr__('repconv{}'.format(k)) + conv_out += conv_branch(conv_inputs) + conv_out = conv_out.reshape(-1, h_parts, w_parts, + self.num_sharesets, self.path_h, + self.path_w) + fc3_out += conv_out + + # N, h_parts, w_parts, num_sharesets, out_h, out_w + fc3_out = fc3_out.permute(0, 3, 1, 4, 2, 5) + out = fc3_out.reshape(*origin_shape) + out = out * global_vec + return out + + def get_equivalent_fc3(self): + """get the equivalent fc3 weight and bias.""" + fc_weight, fc_bias = fuse_bn(self.fc3, self.fc3_bn) + if self.reparam_conv_kernels is not None: + largest_k = max(self.reparam_conv_kernels) + largest_branch = self.__getattr__('repconv{}'.format(largest_k)) + total_kernel, total_bias = fuse_bn(largest_branch.conv, + largest_branch.bn) + for k in self.reparam_conv_kernels: + if k != largest_k: + k_branch = self.__getattr__('repconv{}'.format(k)) + kernel, bias = fuse_bn(k_branch.conv, k_branch.bn) + total_kernel += F.pad(kernel, [(largest_k - k) // 2] * 4) + total_bias += bias + rep_weight, rep_bias = self._convert_conv_to_fc( + total_kernel, total_bias) + final_fc3_weight = rep_weight.reshape_as(fc_weight) + fc_weight + final_fc3_bias = rep_bias + fc_bias + else: + final_fc3_weight = fc_weight + final_fc3_bias = fc_bias + return final_fc3_weight, final_fc3_bias + + def local_inject(self): + """inject the Local Perceptron into Partition Perceptron.""" + self.deploy = True + # Locality Injection + fc3_weight, fc3_bias = self.get_equivalent_fc3() + # Remove Local Perceptron + if self.reparam_conv_kernels is not None: + for k in self.reparam_conv_kernels: + self.__delattr__('repconv{}'.format(k)) + self.__delattr__('fc3') + self.__delattr__('fc3_bn') + self.fc3 = build_conv_layer( + self.conv_cfg, + self._path_vec_channles, + self._path_vec_channles, + 1, + 1, + 0, + bias=True, + groups=self.num_sharesets) + self.fc3_bn = nn.Identity() + self.fc3.weight.data = fc3_weight + self.fc3.bias.data = fc3_bias + + def _convert_conv_to_fc(self, conv_kernel, conv_bias): + """convert conv_k1 to fc, which is still a conv_k2, and the k2 > k1.""" + in_channels = torch.eye(self.path_h * self.path_w).repeat( + 1, self.num_sharesets).reshape(self.path_h * self.path_w, + self.num_sharesets, self.path_h, + self.path_w).to(conv_kernel.device) + fc_k = F.conv2d( + in_channels, + conv_kernel, + padding=(conv_kernel.size(2) // 2, conv_kernel.size(3) // 2), + groups=self.num_sharesets) + fc_k = fc_k.reshape(self.path_w * self.path_w, self.num_sharesets * + self.path_h * self.path_w).t() + fc_bias = conv_bias.repeat_interleave(self.path_h * self.path_w) + return fc_k, fc_bias + + +class RepMLPNetUnit(BaseModule): + """A basic unit in RepMLPNet : [REPMLPBlock + BN + ConvFFN + BN]. + + Args: + channels (int): The number of input and the output channels of the + unit. + path_h (int): The height of patches. + path_w (int): The weidth of patches. + reparam_conv_kernels (Squeue(int) | None): The conv kernels in the + GlobalPerceptron. Default: None. + globalperceptron_ratio (int): The reducation ratio in the + GlobalPerceptron. Default: 4. + num_sharesets (int): The number of sharesets in the + PartitionPerceptron. Default 1. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN', requires_grad=True). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + deploy (bool): Whether to switch the model structure to + deployment mode. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + channels, + path_h, + path_w, + reparam_conv_kernels, + globalperceptron_ratio, + norm_cfg=dict(type='BN', requires_grad=True), + ffn_expand=4, + num_sharesets=1, + deploy=False, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.repmlp_block = RepMLPBlock( + channels=channels, + path_h=path_h, + path_w=path_w, + reparam_conv_kernels=reparam_conv_kernels, + globalperceptron_ratio=globalperceptron_ratio, + num_sharesets=num_sharesets, + deploy=deploy) + self.ffn_block = ConvFFN(channels, channels * ffn_expand) + norm1 = build_norm_layer(norm_cfg, channels)[1] + self.add_module('norm1', norm1) + norm2 = build_norm_layer(norm_cfg, channels)[1] + self.add_module('norm2', norm2) + + def forward(self, x): + y = x + self.repmlp_block(self.norm1(x)) + out = y + self.ffn_block(self.norm2(y)) + return out + + +class ConvFFN(nn.Module): + """ConvFFN implemented by using point-wise convs.""" + + def __init__(self, + in_channels, + hidden_channels=None, + out_channels=None, + norm_cfg=dict(type='BN', requires_grad=True), + act_cfg=dict(type='GELU')): + super().__init__() + out_features = out_channels or in_channels + hidden_features = hidden_channels or in_channels + self.ffn_fc1 = ConvModule( + in_channels=in_channels, + out_channels=hidden_features, + kernel_size=1, + stride=1, + padding=0, + norm_cfg=norm_cfg, + act_cfg=None) + self.ffn_fc2 = ConvModule( + in_channels=hidden_features, + out_channels=out_features, + kernel_size=1, + stride=1, + padding=0, + norm_cfg=norm_cfg, + act_cfg=None) + self.act = build_activation_layer(act_cfg) + + def forward(self, x): + x = self.ffn_fc1(x) + x = self.act(x) + x = self.ffn_fc2(x) + return x + + +@MODELS.register_module() +class RepMLPNet(BaseModule): + """RepMLPNet backbone. + + A PyTorch impl of : `RepMLP: Re-parameterizing Convolutions into + Fully-connected Layers for Image Recognition + `_ + + Args: + arch (str | dict): RepMLP architecture. If use string, choose + from 'base' and 'b'. If use dict, it should have below keys: + + - channels (List[int]): Number of blocks in each stage. + - depths (List[int]): The number of blocks in each branch. + - sharesets_nums (List[int]): RepVGG Block that declares + the need to apply group convolution. + + img_size (int | tuple): The size of input image. Defaults: 224. + in_channels (int): Number of input image channels. Default: 3. + patch_size (int | tuple): The patch size in patch embedding. + Defaults to 4. + out_indices (Sequence[int]): Output from which stages. + Default: ``(3, )``. + reparam_conv_kernels (Squeue(int) | None): The conv kernels in the + GlobalPerceptron. Default: None. + globalperceptron_ratio (int): The reducation ratio in the + GlobalPerceptron. Default: 4. + num_sharesets (int): The number of sharesets in the + PartitionPerceptron. Default 1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + Default: dict(type='BN', requires_grad=True). + patch_cfg (dict): Extra config dict for patch embedding. + Defaults to an empty dict. + final_norm (bool): Whether to add a additional layer to normalize + final feature map. Defaults to True. + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + deploy (bool): Whether to switch the model structure to deployment + mode. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + arch_zoo = { + **dict.fromkeys(['b', 'base'], + {'channels': [96, 192, 384, 768], + 'depths': [2, 2, 12, 2], + 'sharesets_nums': [1, 4, 32, 128]}), + } # yapf: disable + + num_extra_tokens = 0 # there is no cls-token in RepMLP + + def __init__(self, + arch, + img_size=224, + in_channels=3, + patch_size=4, + out_indices=(3, ), + reparam_conv_kernels=(3, ), + globalperceptron_ratio=4, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + patch_cfg=dict(), + final_norm=True, + deploy=False, + init_cfg=None): + super(RepMLPNet, self).__init__(init_cfg=init_cfg) + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = {'channels', 'depths', 'sharesets_nums'} + assert isinstance(arch, dict) and set(arch) == essential_keys, \ + f'Custom arch needs a dict with keys {essential_keys}.' + self.arch_settings = arch + + self.img_size = to_2tuple(img_size) + self.patch_size = to_2tuple(patch_size) + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + self.num_stage = len(self.arch_settings['channels']) + for value in self.arch_settings.values(): + assert isinstance(value, list) and len(value) == self.num_stage, ( + 'Length of setting item in arch dict must be type of list and' + ' have the same length.') + + self.channels = self.arch_settings['channels'] + self.depths = self.arch_settings['depths'] + self.sharesets_nums = self.arch_settings['sharesets_nums'] + + _patch_cfg = dict( + in_channels=in_channels, + input_size=self.img_size, + embed_dims=self.channels[0], + conv_type='Conv2d', + kernel_size=self.patch_size, + stride=self.patch_size, + norm_cfg=self.norm_cfg, + bias=False) + _patch_cfg.update(patch_cfg) + self.patch_embed = PatchEmbed(**_patch_cfg) + self.patch_resolution = self.patch_embed.init_out_size + + self.patch_hs = [ + self.patch_resolution[0] // 2**i for i in range(self.num_stage) + ] + self.patch_ws = [ + self.patch_resolution[1] // 2**i for i in range(self.num_stage) + ] + + self.stages = ModuleList() + self.downsample_layers = ModuleList() + for stage_idx in range(self.num_stage): + # make stage layers + _stage_cfg = dict( + channels=self.channels[stage_idx], + path_h=self.patch_hs[stage_idx], + path_w=self.patch_ws[stage_idx], + reparam_conv_kernels=reparam_conv_kernels, + globalperceptron_ratio=globalperceptron_ratio, + norm_cfg=self.norm_cfg, + ffn_expand=4, + num_sharesets=self.sharesets_nums[stage_idx], + deploy=deploy) + stage_blocks = [ + RepMLPNetUnit(**_stage_cfg) + for _ in range(self.depths[stage_idx]) + ] + self.stages.append(Sequential(*stage_blocks)) + + # make downsample layers + if stage_idx < self.num_stage - 1: + self.downsample_layers.append( + ConvModule( + in_channels=self.channels[stage_idx], + out_channels=self.channels[stage_idx + 1], + kernel_size=2, + stride=2, + padding=0, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=True)) + + self.out_indice = out_indices + + if final_norm: + norm_layer = build_norm_layer(norm_cfg, self.channels[-1])[1] + else: + norm_layer = nn.Identity() + self.add_module('final_norm', norm_layer) + + def forward(self, x): + assert x.shape[2:] == self.img_size, \ + "The Rep-MLP doesn't support dynamic input shape. " \ + f'Please input images with shape {self.img_size}' + + outs = [] + + x, _ = self.patch_embed(x) + for i, stage in enumerate(self.stages): + x = stage(x) + + # downsample after each stage except last stage + if i < len(self.stages) - 1: + downsample = self.downsample_layers[i] + x = downsample(x) + + if i in self.out_indice: + if self.final_norm and i == len(self.stages) - 1: + out = self.final_norm(x) + else: + out = x + outs.append(out) + + return tuple(outs) + + def switch_to_deploy(self): + for m in self.modules(): + if hasattr(m, 'local_inject'): + m.local_inject() diff --git a/mmpretrain/models/backbones/repvgg.py b/mmpretrain/models/backbones/repvgg.py new file mode 100644 index 0000000..67c9d14 --- /dev/null +++ b/mmpretrain/models/backbones/repvgg.py @@ -0,0 +1,622 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from mmcv.cnn import (ConvModule, build_activation_layer, build_conv_layer, + build_norm_layer) +from mmengine.model import BaseModule, Sequential +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm +from torch import nn + +from mmpretrain.registry import MODELS +from ..utils.se_layer import SELayer +from .base_backbone import BaseBackbone + + +class RepVGGBlock(BaseModule): + """RepVGG block for RepVGG backbone. + + Args: + in_channels (int): The input channels of the block. + out_channels (int): The output channels of the block. + stride (int): Stride of the 3x3 and 1x1 convolution layer. Default: 1. + padding (int): Padding of the 3x3 convolution layer. + dilation (int): Dilation of the 3x3 convolution layer. + groups (int): Groups of the 3x3 and 1x1 convolution layer. Default: 1. + padding_mode (str): Padding mode of the 3x3 convolution layer. + Default: 'zeros'. + se_cfg (None or dict): The configuration of the se module. + Default: None. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN', requires_grad=True). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + deploy (bool): Whether to switch the model structure to + deployment mode. Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + stride=1, + padding=1, + dilation=1, + groups=1, + padding_mode='zeros', + se_cfg=None, + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + deploy=False, + init_cfg=None): + super(RepVGGBlock, self).__init__(init_cfg) + + assert se_cfg is None or isinstance(se_cfg, dict) + + self.in_channels = in_channels + self.out_channels = out_channels + self.stride = stride + self.padding = padding + self.dilation = dilation + self.groups = groups + self.se_cfg = se_cfg + self.with_cp = with_cp + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.deploy = deploy + + if deploy: + self.branch_reparam = build_conv_layer( + conv_cfg, + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=True, + padding_mode=padding_mode) + else: + # judge if input shape and output shape are the same. + # If true, add a normalized identity shortcut. + if out_channels == in_channels and stride == 1 and \ + padding == dilation: + self.branch_norm = build_norm_layer(norm_cfg, in_channels)[1] + else: + self.branch_norm = None + + self.branch_3x3 = self.create_conv_bn( + kernel_size=3, + dilation=dilation, + padding=padding, + ) + self.branch_1x1 = self.create_conv_bn(kernel_size=1) + + if se_cfg is not None: + self.se_layer = SELayer(channels=out_channels, **se_cfg) + else: + self.se_layer = None + + self.act = build_activation_layer(act_cfg) + + def create_conv_bn(self, kernel_size, dilation=1, padding=0): + conv_bn = Sequential() + conv_bn.add_module( + 'conv', + build_conv_layer( + self.conv_cfg, + in_channels=self.in_channels, + out_channels=self.out_channels, + kernel_size=kernel_size, + stride=self.stride, + dilation=dilation, + padding=padding, + groups=self.groups, + bias=False)) + conv_bn.add_module( + 'norm', + build_norm_layer(self.norm_cfg, num_features=self.out_channels)[1]) + + return conv_bn + + def forward(self, x): + + def _inner_forward(inputs): + if self.deploy: + return self.branch_reparam(inputs) + + if self.branch_norm is None: + branch_norm_out = 0 + else: + branch_norm_out = self.branch_norm(inputs) + + inner_out = self.branch_3x3(inputs) + self.branch_1x1( + inputs) + branch_norm_out + + if self.se_cfg is not None: + inner_out = self.se_layer(inner_out) + + return inner_out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.act(out) + + return out + + def switch_to_deploy(self): + """Switch the model structure from training mode to deployment mode.""" + if self.deploy: + return + assert self.norm_cfg['type'] == 'BN', \ + "Switch is not allowed when norm_cfg['type'] != 'BN'." + + reparam_weight, reparam_bias = self.reparameterize() + self.branch_reparam = build_conv_layer( + self.conv_cfg, + self.in_channels, + self.out_channels, + kernel_size=3, + stride=self.stride, + padding=self.padding, + dilation=self.dilation, + groups=self.groups, + bias=True) + self.branch_reparam.weight.data = reparam_weight + self.branch_reparam.bias.data = reparam_bias + + for param in self.parameters(): + param.detach_() + delattr(self, 'branch_3x3') + delattr(self, 'branch_1x1') + delattr(self, 'branch_norm') + + self.deploy = True + + def reparameterize(self): + """Fuse all the parameters of all branches. + + Returns: + tuple[torch.Tensor, torch.Tensor]: Parameters after fusion of all + branches. the first element is the weights and the second is + the bias. + """ + weight_3x3, bias_3x3 = self._fuse_conv_bn(self.branch_3x3) + weight_1x1, bias_1x1 = self._fuse_conv_bn(self.branch_1x1) + # pad a conv1x1 weight to a conv3x3 weight + weight_1x1 = F.pad(weight_1x1, [1, 1, 1, 1], value=0) + + weight_norm, bias_norm = 0, 0 + if self.branch_norm: + tmp_conv_bn = self._norm_to_conv3x3(self.branch_norm) + weight_norm, bias_norm = self._fuse_conv_bn(tmp_conv_bn) + + return (weight_3x3 + weight_1x1 + weight_norm, + bias_3x3 + bias_1x1 + bias_norm) + + def _fuse_conv_bn(self, branch): + """Fuse the parameters in a branch with a conv and bn. + + Args: + branch (mmcv.runner.Sequential): A branch with conv and bn. + + Returns: + tuple[torch.Tensor, torch.Tensor]: The parameters obtained after + fusing the parameters of conv and bn in one branch. + The first element is the weight and the second is the bias. + """ + if branch is None: + return 0, 0 + conv_weight = branch.conv.weight + running_mean = branch.norm.running_mean + running_var = branch.norm.running_var + gamma = branch.norm.weight + beta = branch.norm.bias + eps = branch.norm.eps + + std = (running_var + eps).sqrt() + fused_weight = (gamma / std).reshape(-1, 1, 1, 1) * conv_weight + fused_bias = -running_mean * gamma / std + beta + + return fused_weight, fused_bias + + def _norm_to_conv3x3(self, branch_nrom): + """Convert a norm layer to a conv3x3-bn sequence. + + Args: + branch (nn.BatchNorm2d): A branch only with bn in the block. + + Returns: + tmp_conv3x3 (mmcv.runner.Sequential): a sequential with conv3x3 and + bn. + """ + input_dim = self.in_channels // self.groups + conv_weight = torch.zeros((self.in_channels, input_dim, 3, 3), + dtype=branch_nrom.weight.dtype) + + for i in range(self.in_channels): + conv_weight[i, i % input_dim, 1, 1] = 1 + conv_weight = conv_weight.to(branch_nrom.weight.device) + + tmp_conv3x3 = self.create_conv_bn(kernel_size=3) + tmp_conv3x3.conv.weight.data = conv_weight + tmp_conv3x3.norm = branch_nrom + return tmp_conv3x3 + + +class MTSPPF(BaseModule): + """MTSPPF block for YOLOX-PAI RepVGG backbone. + + Args: + in_channels (int): The input channels of the block. + out_channels (int): The output channels of the block. + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + kernel_size (int): Kernel size of pooling. Default: 5. + """ + + def __init__(self, + in_channels, + out_channels, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + kernel_size=5): + super().__init__() + hidden_features = in_channels // 2 # hidden channels + self.conv1 = ConvModule( + in_channels, + hidden_features, + 1, + stride=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.conv2 = ConvModule( + hidden_features * 4, + out_channels, + 1, + stride=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.maxpool = nn.MaxPool2d( + kernel_size=kernel_size, stride=1, padding=kernel_size // 2) + + def forward(self, x): + x = self.conv1(x) + y1 = self.maxpool(x) + y2 = self.maxpool(y1) + return self.conv2(torch.cat([x, y1, y2, self.maxpool(y2)], 1)) + + +@MODELS.register_module() +class RepVGG(BaseBackbone): + """RepVGG backbone. + + A PyTorch impl of : `RepVGG: Making VGG-style ConvNets Great Again + `_ + + Args: + arch (str | dict): RepVGG architecture. If use string, choose from + 'A0', 'A1`', 'A2', 'B0', 'B1', 'B1g2', 'B1g4', 'B2', 'B2g2', + 'B2g4', 'B3', 'B3g2', 'B3g4' or 'D2se'. If use dict, it should + have below keys: + + - **num_blocks** (Sequence[int]): Number of blocks in each stage. + - **width_factor** (Sequence[float]): Width deflator in each stage. + - **group_layer_map** (dict | None): RepVGG Block that declares + the need to apply group convolution. + - **se_cfg** (dict | None): SE Layer config. + - **stem_channels** (int, optional): The stem channels, the final + stem channels will be + ``min(stem_channels, base_channels*width_factor[0])``. + If not set here, 64 is used by default in the code. + + in_channels (int): Number of input image channels. Defaults to 3. + base_channels (int): Base channels of RepVGG backbone, work with + width_factor together. Defaults to 64. + out_indices (Sequence[int]): Output from which stages. + Defaults to ``(3, )``. + strides (Sequence[int]): Strides of the first block of each stage. + Defaults to ``(2, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Defaults to ``(1, 1, 1, 1)``. + frozen_stages (int): Stages to be frozen (all param fixed). -1 means + not freezing any parameters. Defaults to -1. + conv_cfg (dict | None): The config dict for conv layers. + Defaults to None. + norm_cfg (dict): The config dict for norm layers. + Defaults to ``dict(type='BN')``. + act_cfg (dict): Config dict for activation layer. + Defaults to ``dict(type='ReLU')``. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + deploy (bool): Whether to switch the model structure to deployment + mode. Defaults to False. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Defaults to False. + add_ppf (bool): Whether to use the MTSPPF block. Defaults to False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + + groupwise_layers = [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26] + g2_layer_map = {layer: 2 for layer in groupwise_layers} + g4_layer_map = {layer: 4 for layer in groupwise_layers} + + arch_settings = { + 'A0': + dict( + num_blocks=[2, 4, 14, 1], + width_factor=[0.75, 0.75, 0.75, 2.5], + group_layer_map=None, + se_cfg=None), + 'A1': + dict( + num_blocks=[2, 4, 14, 1], + width_factor=[1, 1, 1, 2.5], + group_layer_map=None, + se_cfg=None), + 'A2': + dict( + num_blocks=[2, 4, 14, 1], + width_factor=[1.5, 1.5, 1.5, 2.75], + group_layer_map=None, + se_cfg=None), + 'B0': + dict( + num_blocks=[4, 6, 16, 1], + width_factor=[1, 1, 1, 2.5], + group_layer_map=None, + se_cfg=None, + stem_channels=64), + 'B1': + dict( + num_blocks=[4, 6, 16, 1], + width_factor=[2, 2, 2, 4], + group_layer_map=None, + se_cfg=None), + 'B1g2': + dict( + num_blocks=[4, 6, 16, 1], + width_factor=[2, 2, 2, 4], + group_layer_map=g2_layer_map, + se_cfg=None), + 'B1g4': + dict( + num_blocks=[4, 6, 16, 1], + width_factor=[2, 2, 2, 4], + group_layer_map=g4_layer_map, + se_cfg=None), + 'B2': + dict( + num_blocks=[4, 6, 16, 1], + width_factor=[2.5, 2.5, 2.5, 5], + group_layer_map=None, + se_cfg=None), + 'B2g2': + dict( + num_blocks=[4, 6, 16, 1], + width_factor=[2.5, 2.5, 2.5, 5], + group_layer_map=g2_layer_map, + se_cfg=None), + 'B2g4': + dict( + num_blocks=[4, 6, 16, 1], + width_factor=[2.5, 2.5, 2.5, 5], + group_layer_map=g4_layer_map, + se_cfg=None), + 'B3': + dict( + num_blocks=[4, 6, 16, 1], + width_factor=[3, 3, 3, 5], + group_layer_map=None, + se_cfg=None), + 'B3g2': + dict( + num_blocks=[4, 6, 16, 1], + width_factor=[3, 3, 3, 5], + group_layer_map=g2_layer_map, + se_cfg=None), + 'B3g4': + dict( + num_blocks=[4, 6, 16, 1], + width_factor=[3, 3, 3, 5], + group_layer_map=g4_layer_map, + se_cfg=None), + 'D2se': + dict( + num_blocks=[8, 14, 24, 1], + width_factor=[2.5, 2.5, 2.5, 5], + group_layer_map=None, + se_cfg=dict(ratio=16, divisor=1)), + 'yolox-pai-small': + dict( + num_blocks=[3, 5, 7, 3], + width_factor=[1, 1, 1, 1], + group_layer_map=None, + se_cfg=None, + stem_channels=32), + } + + def __init__(self, + arch, + in_channels=3, + base_channels=64, + out_indices=(3, ), + strides=(2, 2, 2, 2), + dilations=(1, 1, 1, 1), + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + with_cp=False, + deploy=False, + norm_eval=False, + add_ppf=False, + init_cfg=[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ]): + super(RepVGG, self).__init__(init_cfg) + + if isinstance(arch, str): + assert arch in self.arch_settings, \ + f'"arch": "{arch}" is not one of the arch_settings' + arch = self.arch_settings[arch] + elif not isinstance(arch, dict): + raise TypeError('Expect "arch" to be either a string ' + f'or a dict, got {type(arch)}') + + assert len(arch['num_blocks']) == len( + arch['width_factor']) == len(strides) == len(dilations) + assert max(out_indices) < len(arch['num_blocks']) + if arch['group_layer_map'] is not None: + assert max(arch['group_layer_map'].keys()) <= sum( + arch['num_blocks']) + + if arch['se_cfg'] is not None: + assert isinstance(arch['se_cfg'], dict) + + self.base_channels = base_channels + self.arch = arch + self.in_channels = in_channels + self.out_indices = out_indices + self.strides = strides + self.dilations = dilations + self.deploy = deploy + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.with_cp = with_cp + self.norm_eval = norm_eval + + # defaults to 64 to prevert BC-breaking if stem_channels + # not in arch dict; + # the stem channels should not be larger than that of stage1. + channels = min( + arch.get('stem_channels', 64), + int(self.base_channels * self.arch['width_factor'][0])) + self.stem = RepVGGBlock( + self.in_channels, + channels, + stride=2, + se_cfg=arch['se_cfg'], + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + deploy=deploy) + + next_create_block_idx = 1 + self.stages = [] + for i in range(len(arch['num_blocks'])): + num_blocks = self.arch['num_blocks'][i] + stride = self.strides[i] + dilation = self.dilations[i] + out_channels = int(self.base_channels * 2**i * + self.arch['width_factor'][i]) + + stage, next_create_block_idx = self._make_stage( + channels, out_channels, num_blocks, stride, dilation, + next_create_block_idx, init_cfg) + stage_name = f'stage_{i + 1}' + self.add_module(stage_name, stage) + self.stages.append(stage_name) + + channels = out_channels + + if add_ppf: + self.ppf = MTSPPF( + out_channels, + out_channels, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + kernel_size=5) + else: + self.ppf = nn.Identity() + + def _make_stage(self, in_channels, out_channels, num_blocks, stride, + dilation, next_create_block_idx, init_cfg): + strides = [stride] + [1] * (num_blocks - 1) + dilations = [dilation] * num_blocks + + blocks = [] + for i in range(num_blocks): + groups = self.arch['group_layer_map'].get( + next_create_block_idx, + 1) if self.arch['group_layer_map'] is not None else 1 + blocks.append( + RepVGGBlock( + in_channels, + out_channels, + stride=strides[i], + padding=dilations[i], + dilation=dilations[i], + groups=groups, + se_cfg=self.arch['se_cfg'], + with_cp=self.with_cp, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + deploy=self.deploy, + init_cfg=init_cfg)) + in_channels = out_channels + next_create_block_idx += 1 + + return Sequential(*blocks), next_create_block_idx + + def forward(self, x): + x = self.stem(x) + outs = [] + for i, stage_name in enumerate(self.stages): + stage = getattr(self, stage_name) + x = stage(x) + if i + 1 == len(self.stages): + x = self.ppf(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.stem.eval() + for param in self.stem.parameters(): + param.requires_grad = False + for i in range(self.frozen_stages): + stage = getattr(self, f'stage_{i+1}') + stage.eval() + for param in stage.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(RepVGG, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() + + def switch_to_deploy(self): + for m in self.modules(): + if isinstance(m, RepVGGBlock): + m.switch_to_deploy() + self.deploy = True diff --git a/mmpretrain/models/backbones/res2net.py b/mmpretrain/models/backbones/res2net.py new file mode 100644 index 0000000..6e9bb6d --- /dev/null +++ b/mmpretrain/models/backbones/res2net.py @@ -0,0 +1,317 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmengine.model import ModuleList, Sequential + +from mmpretrain.registry import MODELS +from .resnet import Bottleneck as _Bottleneck +from .resnet import ResNet + + +class Bottle2neck(_Bottleneck): + expansion = 4 + + def __init__(self, + in_channels, + out_channels, + scales=4, + base_width=26, + base_channels=64, + stage_type='normal', + **kwargs): + """Bottle2neck block for Res2Net.""" + super(Bottle2neck, self).__init__(in_channels, out_channels, **kwargs) + assert scales > 1, 'Res2Net degenerates to ResNet when scales = 1.' + + mid_channels = out_channels // self.expansion + width = int(math.floor(mid_channels * (base_width / base_channels))) + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, width * scales, postfix=1) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.out_channels, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.in_channels, + width * scales, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + + if stage_type == 'stage': + self.pool = nn.AvgPool2d( + kernel_size=3, stride=self.conv2_stride, padding=1) + + self.convs = ModuleList() + self.bns = ModuleList() + for i in range(scales - 1): + self.convs.append( + build_conv_layer( + self.conv_cfg, + width, + width, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + bias=False)) + self.bns.append( + build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1]) + + self.conv3 = build_conv_layer( + self.conv_cfg, + width * scales, + self.out_channels, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + self.stage_type = stage_type + self.scales = scales + self.width = width + delattr(self, 'conv2') + delattr(self, self.norm2_name) + + def forward(self, x): + """Forward function.""" + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + spx = torch.split(out, self.width, 1) + sp = self.convs[0](spx[0].contiguous()) + sp = self.relu(self.bns[0](sp)) + out = sp + for i in range(1, self.scales - 1): + if self.stage_type == 'stage': + sp = spx[i] + else: + sp = sp + spx[i] + sp = self.convs[i](sp.contiguous()) + sp = self.relu(self.bns[i](sp)) + out = torch.cat((out, sp), 1) + + if self.stage_type == 'normal' and self.scales != 1: + out = torch.cat((out, spx[self.scales - 1]), 1) + elif self.stage_type == 'stage' and self.scales != 1: + out = torch.cat((out, self.pool(spx[self.scales - 1])), 1) + + out = self.conv3(out) + out = self.norm3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +class Res2Layer(Sequential): + """Res2Layer to build Res2Net style backbone. + + Args: + block (nn.Module): block used to build ResLayer. + inplanes (int): inplanes of block. + planes (int): planes of block. + num_blocks (int): number of blocks. + stride (int): stride of the first block. Default: 1 + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottle2neck. Defaults to True. + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + scales (int): Scales used in Res2Net. Default: 4 + base_width (int): Basic width of each scale. Default: 26 + drop_path_rate (float or np.ndarray): stochastic depth rate. + Default: 0. + """ + + def __init__(self, + block, + in_channels, + out_channels, + num_blocks, + stride=1, + avg_down=True, + conv_cfg=None, + norm_cfg=dict(type='BN'), + scales=4, + base_width=26, + drop_path_rate=0.0, + **kwargs): + self.block = block + + if isinstance(drop_path_rate, float): + drop_path_rate = [drop_path_rate] * num_blocks + + assert len(drop_path_rate + ) == num_blocks, 'Please check the length of drop_path_rate' + + downsample = None + if stride != 1 or in_channels != out_channels: + if avg_down: + downsample = nn.Sequential( + nn.AvgPool2d( + kernel_size=stride, + stride=stride, + ceil_mode=True, + count_include_pad=False), + build_conv_layer( + conv_cfg, + in_channels, + out_channels, + kernel_size=1, + stride=1, + bias=False), + build_norm_layer(norm_cfg, out_channels)[1], + ) + else: + downsample = nn.Sequential( + build_conv_layer( + conv_cfg, + in_channels, + out_channels, + kernel_size=1, + stride=stride, + bias=False), + build_norm_layer(norm_cfg, out_channels)[1], + ) + + layers = [] + layers.append( + block( + in_channels=in_channels, + out_channels=out_channels, + stride=stride, + downsample=downsample, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + scales=scales, + base_width=base_width, + stage_type='stage', + drop_path_rate=drop_path_rate[0], + **kwargs)) + in_channels = out_channels + for i in range(1, num_blocks): + layers.append( + block( + in_channels=in_channels, + out_channels=out_channels, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + scales=scales, + base_width=base_width, + drop_path_rate=drop_path_rate[i], + **kwargs)) + super(Res2Layer, self).__init__(*layers) + + +@MODELS.register_module() +class Res2Net(ResNet): + """Res2Net backbone. + + A PyTorch implement of : `Res2Net: A New Multi-scale Backbone + Architecture `_ + + Args: + depth (int): Depth of Res2Net, choose from {50, 101, 152}. + scales (int): Scales used in Res2Net. Defaults to 4. + base_width (int): Basic width of each scale. Defaults to 26. + in_channels (int): Number of input image channels. Defaults to 3. + num_stages (int): Number of Res2Net stages. Defaults to 4. + strides (Sequence[int]): Strides of the first block of each stage. + Defaults to ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Defaults to ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. + Defaults to ``(3, )``. + style (str): "pytorch" or "caffe". If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. Defaults to "pytorch". + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Defaults to True. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottle2neck. Defaults to True. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Defaults to -1. + norm_cfg (dict): Dictionary to construct and config norm layer. + Defaults to ``dict(type='BN', requires_grad=True)``. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Defaults to False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Defaults to True. + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + + Example: + >>> from mmpretrain.models import Res2Net + >>> import torch + >>> model = Res2Net(depth=50, + ... scales=4, + ... base_width=26, + ... out_indices=(0, 1, 2, 3)) + >>> model.eval() + >>> inputs = torch.rand(1, 3, 32, 32) + >>> level_outputs = model.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 256, 8, 8) + (1, 512, 4, 4) + (1, 1024, 2, 2) + (1, 2048, 1, 1) + """ + + arch_settings = { + 50: (Bottle2neck, (3, 4, 6, 3)), + 101: (Bottle2neck, (3, 4, 23, 3)), + 152: (Bottle2neck, (3, 8, 36, 3)) + } + + def __init__(self, + scales=4, + base_width=26, + style='pytorch', + deep_stem=True, + avg_down=True, + init_cfg=None, + **kwargs): + self.scales = scales + self.base_width = base_width + super(Res2Net, self).__init__( + style=style, + deep_stem=deep_stem, + avg_down=avg_down, + init_cfg=init_cfg, + **kwargs) + + def make_res_layer(self, **kwargs): + return Res2Layer( + scales=self.scales, + base_width=self.base_width, + base_channels=self.base_channels, + **kwargs) diff --git a/mmpretrain/models/backbones/resnest.py b/mmpretrain/models/backbones/resnest.py new file mode 100644 index 0000000..4bb438f --- /dev/null +++ b/mmpretrain/models/backbones/resnest.py @@ -0,0 +1,339 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from mmcv.cnn import build_conv_layer, build_norm_layer + +from mmpretrain.registry import MODELS +from .resnet import Bottleneck as _Bottleneck +from .resnet import ResLayer, ResNetV1d + + +class RSoftmax(nn.Module): + """Radix Softmax module in ``SplitAttentionConv2d``. + + Args: + radix (int): Radix of input. + groups (int): Groups of input. + """ + + def __init__(self, radix, groups): + super().__init__() + self.radix = radix + self.groups = groups + + def forward(self, x): + batch = x.size(0) + if self.radix > 1: + x = x.view(batch, self.groups, self.radix, -1).transpose(1, 2) + x = F.softmax(x, dim=1) + x = x.reshape(batch, -1) + else: + x = torch.sigmoid(x) + return x + + +class SplitAttentionConv2d(nn.Module): + """Split-Attention Conv2d. + + Args: + in_channels (int): Same as nn.Conv2d. + out_channels (int): Same as nn.Conv2d. + kernel_size (int | tuple[int]): Same as nn.Conv2d. + stride (int | tuple[int]): Same as nn.Conv2d. + padding (int | tuple[int]): Same as nn.Conv2d. + dilation (int | tuple[int]): Same as nn.Conv2d. + groups (int): Same as nn.Conv2d. + radix (int): Radix of SpltAtConv2d. Default: 2 + reduction_factor (int): Reduction factor of SplitAttentionConv2d. + Default: 4. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict, optional): Config dict for normalization layer. + Default: None. + """ + + def __init__(self, + in_channels, + channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + radix=2, + reduction_factor=4, + conv_cfg=None, + norm_cfg=dict(type='BN')): + super(SplitAttentionConv2d, self).__init__() + inter_channels = max(in_channels * radix // reduction_factor, 32) + self.radix = radix + self.groups = groups + self.channels = channels + self.conv = build_conv_layer( + conv_cfg, + in_channels, + channels * radix, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups * radix, + bias=False) + self.norm0_name, norm0 = build_norm_layer( + norm_cfg, channels * radix, postfix=0) + self.add_module(self.norm0_name, norm0) + self.relu = nn.ReLU(inplace=True) + self.fc1 = build_conv_layer( + None, channels, inter_channels, 1, groups=self.groups) + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, inter_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.fc2 = build_conv_layer( + None, inter_channels, channels * radix, 1, groups=self.groups) + self.rsoftmax = RSoftmax(radix, groups) + + @property + def norm0(self): + return getattr(self, self.norm0_name) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + def forward(self, x): + x = self.conv(x) + x = self.norm0(x) + x = self.relu(x) + + batch, rchannel = x.shape[:2] + if self.radix > 1: + splits = x.view(batch, self.radix, -1, *x.shape[2:]) + gap = splits.sum(dim=1) + else: + gap = x + gap = F.adaptive_avg_pool2d(gap, 1) + gap = self.fc1(gap) + + gap = self.norm1(gap) + gap = self.relu(gap) + + atten = self.fc2(gap) + atten = self.rsoftmax(atten).view(batch, -1, 1, 1) + + if self.radix > 1: + attens = atten.view(batch, self.radix, -1, *atten.shape[2:]) + out = torch.sum(attens * splits, dim=1) + else: + out = atten * x + return out.contiguous() + + +class Bottleneck(_Bottleneck): + """Bottleneck block for ResNeSt. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + groups (int): Groups of conv2. + width_per_group (int): Width per group of conv2. 64x4d indicates + ``groups=64, width_per_group=4`` and 32x8d indicates + ``groups=32, width_per_group=8``. + radix (int): Radix of SpltAtConv2d. Default: 2 + reduction_factor (int): Reduction factor of SplitAttentionConv2d. + Default: 4. + avg_down_stride (bool): Whether to use average pool for stride in + Bottleneck. Default: True. + stride (int): stride of the block. Default: 1 + dilation (int): dilation of convolution. Default: 1 + downsample (nn.Module, optional): downsample operation on identity + branch. Default: None + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + conv_cfg (dict, optional): dictionary to construct and config conv + layer. Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + """ + + def __init__(self, + in_channels, + out_channels, + groups=1, + width_per_group=4, + base_channels=64, + radix=2, + reduction_factor=4, + avg_down_stride=True, + **kwargs): + super(Bottleneck, self).__init__(in_channels, out_channels, **kwargs) + + self.groups = groups + self.width_per_group = width_per_group + + # For ResNet bottleneck, middle channels are determined by expansion + # and out_channels, but for ResNeXt bottleneck, it is determined by + # groups and width_per_group and the stage it is located in. + if groups != 1: + assert self.mid_channels % base_channels == 0 + self.mid_channels = ( + groups * width_per_group * self.mid_channels // base_channels) + + self.avg_down_stride = avg_down_stride and self.conv2_stride > 1 + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, self.mid_channels, postfix=1) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.out_channels, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.in_channels, + self.mid_channels, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = SplitAttentionConv2d( + self.mid_channels, + self.mid_channels, + kernel_size=3, + stride=1 if self.avg_down_stride else self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + radix=radix, + reduction_factor=reduction_factor, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg) + delattr(self, self.norm2_name) + + if self.avg_down_stride: + self.avd_layer = nn.AvgPool2d(3, self.conv2_stride, padding=1) + + self.conv3 = build_conv_layer( + self.conv_cfg, + self.mid_channels, + self.out_channels, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + def forward(self, x): + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + + if self.avg_down_stride: + out = self.avd_layer(out) + + out = self.conv3(out) + out = self.norm3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +@MODELS.register_module() +class ResNeSt(ResNetV1d): + """ResNeSt backbone. + + Please refer to the `paper `__ for + details. + + Args: + depth (int): Network depth, from {50, 101, 152, 200}. + groups (int): Groups of conv2 in Bottleneck. Default: 32. + width_per_group (int): Width per group of conv2 in Bottleneck. + Default: 4. + radix (int): Radix of SpltAtConv2d. Default: 2 + reduction_factor (int): Reduction factor of SplitAttentionConv2d. + Default: 4. + avg_down_stride (bool): Whether to use average pool for stride in + Bottleneck. Default: True. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + """ + + arch_settings = { + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)), + 200: (Bottleneck, (3, 24, 36, 3)), + 269: (Bottleneck, (3, 30, 48, 8)) + } + + def __init__(self, + depth, + groups=1, + width_per_group=4, + radix=2, + reduction_factor=4, + avg_down_stride=True, + **kwargs): + self.groups = groups + self.width_per_group = width_per_group + self.radix = radix + self.reduction_factor = reduction_factor + self.avg_down_stride = avg_down_stride + super(ResNeSt, self).__init__(depth=depth, **kwargs) + + def make_res_layer(self, **kwargs): + return ResLayer( + groups=self.groups, + width_per_group=self.width_per_group, + base_channels=self.base_channels, + radix=self.radix, + reduction_factor=self.reduction_factor, + avg_down_stride=self.avg_down_stride, + **kwargs) diff --git a/mmpretrain/models/backbones/resnet.py b/mmpretrain/models/backbones/resnet.py new file mode 100644 index 0000000..4a254f7 --- /dev/null +++ b/mmpretrain/models/backbones/resnet.py @@ -0,0 +1,768 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import (ConvModule, build_activation_layer, build_conv_layer, + build_norm_layer) +from mmcv.cnn.bricks import DropPath +from mmengine.model import BaseModule +from mmengine.model.weight_init import constant_init +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm + +from mmpretrain.registry import MODELS +from .base_backbone import BaseBackbone + +eps = 1.0e-5 + + +class BasicBlock(BaseModule): + """BasicBlock for ResNet. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + expansion (int): The ratio of ``out_channels/mid_channels`` where + ``mid_channels`` is the output channels of conv1. This is a + reserved argument in BasicBlock and should always be 1. Default: 1. + stride (int): stride of the block. Default: 1 + dilation (int): dilation of convolution. Default: 1 + downsample (nn.Module, optional): downsample operation on identity + branch. Default: None. + style (str): `pytorch` or `caffe`. It is unused and reserved for + unified API with Bottleneck. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + conv_cfg (dict, optional): dictionary to construct and config conv + layer. Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + """ + + def __init__(self, + in_channels, + out_channels, + expansion=1, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + drop_path_rate=0.0, + act_cfg=dict(type='ReLU', inplace=True), + init_cfg=None): + super(BasicBlock, self).__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.out_channels = out_channels + self.expansion = expansion + assert self.expansion == 1 + assert out_channels % expansion == 0 + self.mid_channels = out_channels // expansion + self.stride = stride + self.dilation = dilation + self.style = style + self.with_cp = with_cp + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, self.mid_channels, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + norm_cfg, out_channels, postfix=2) + + self.conv1 = build_conv_layer( + conv_cfg, + in_channels, + self.mid_channels, + 3, + stride=stride, + padding=dilation, + dilation=dilation, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + conv_cfg, + self.mid_channels, + out_channels, + 3, + padding=1, + bias=False) + self.add_module(self.norm2_name, norm2) + + self.relu = build_activation_layer(act_cfg) + self.downsample = downsample + self.drop_path = DropPath(drop_prob=drop_path_rate + ) if drop_path_rate > eps else nn.Identity() + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + @property + def norm2(self): + return getattr(self, self.norm2_name) + + def forward(self, x): + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out = self.drop_path(out) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +class Bottleneck(BaseModule): + """Bottleneck block for ResNet. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + expansion (int): The ratio of ``out_channels/mid_channels`` where + ``mid_channels`` is the input/output channels of conv2. Default: 4. + stride (int): stride of the block. Default: 1 + dilation (int): dilation of convolution. Default: 1 + downsample (nn.Module, optional): downsample operation on identity + branch. Default: None. + style (str): ``"pytorch"`` or ``"caffe"``. If set to "pytorch", the + stride-two layer is the 3x3 conv layer, otherwise the stride-two + layer is the first 1x1 conv layer. Default: "pytorch". + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + conv_cfg (dict, optional): dictionary to construct and config conv + layer. Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + """ + + def __init__(self, + in_channels, + out_channels, + expansion=4, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU', inplace=True), + drop_path_rate=0.0, + init_cfg=None): + super(Bottleneck, self).__init__(init_cfg=init_cfg) + assert style in ['pytorch', 'caffe'] + + self.in_channels = in_channels + self.out_channels = out_channels + self.expansion = expansion + assert out_channels % expansion == 0 + self.mid_channels = out_channels // expansion + self.stride = stride + self.dilation = dilation + self.style = style + self.with_cp = with_cp + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + + if self.style == 'pytorch': + self.conv1_stride = 1 + self.conv2_stride = stride + else: + self.conv1_stride = stride + self.conv2_stride = 1 + + self.norm1_name, norm1 = build_norm_layer( + norm_cfg, self.mid_channels, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + norm_cfg, self.mid_channels, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + norm_cfg, out_channels, postfix=3) + + self.conv1 = build_conv_layer( + conv_cfg, + in_channels, + self.mid_channels, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + conv_cfg, + self.mid_channels, + self.mid_channels, + kernel_size=3, + stride=self.conv2_stride, + padding=dilation, + dilation=dilation, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + conv_cfg, + self.mid_channels, + out_channels, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + self.relu = build_activation_layer(act_cfg) + self.downsample = downsample + self.drop_path = DropPath(drop_prob=drop_path_rate + ) if drop_path_rate > eps else nn.Identity() + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + @property + def norm2(self): + return getattr(self, self.norm2_name) + + @property + def norm3(self): + return getattr(self, self.norm3_name) + + def forward(self, x): + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.norm3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out = self.drop_path(out) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +def get_expansion(block, expansion=None): + """Get the expansion of a residual block. + + The block expansion will be obtained by the following order: + + 1. If ``expansion`` is given, just return it. + 2. If ``block`` has the attribute ``expansion``, then return + ``block.expansion``. + 3. Return the default value according the the block type: + 1 for ``BasicBlock`` and 4 for ``Bottleneck``. + + Args: + block (class): The block class. + expansion (int | None): The given expansion ratio. + + Returns: + int: The expansion of the block. + """ + if isinstance(expansion, int): + assert expansion > 0 + elif expansion is None: + if hasattr(block, 'expansion'): + expansion = block.expansion + elif issubclass(block, BasicBlock): + expansion = 1 + elif issubclass(block, Bottleneck): + expansion = 4 + else: + raise TypeError(f'expansion is not specified for {block.__name__}') + else: + raise TypeError('expansion must be an integer or None') + + return expansion + + +class ResLayer(nn.Sequential): + """ResLayer to build ResNet style backbone. + + Args: + block (nn.Module): Residual block used to build ResLayer. + num_blocks (int): Number of blocks. + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + expansion (int, optional): The expansion for BasicBlock/Bottleneck. + If not specified, it will firstly be obtained via + ``block.expansion``. If the block has no attribute "expansion", + the following default values will be used: 1 for BasicBlock and + 4 for Bottleneck. Default: None. + stride (int): stride of the first block. Default: 1. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False + conv_cfg (dict, optional): dictionary to construct and config conv + layer. Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + drop_path_rate (float or list): stochastic depth rate. + Default: 0. + """ + + def __init__(self, + block, + num_blocks, + in_channels, + out_channels, + expansion=None, + stride=1, + avg_down=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + drop_path_rate=0.0, + **kwargs): + self.block = block + self.expansion = get_expansion(block, expansion) + + if isinstance(drop_path_rate, float): + drop_path_rate = [drop_path_rate] * num_blocks + + assert len(drop_path_rate + ) == num_blocks, 'Please check the length of drop_path_rate' + + downsample = None + if stride != 1 or in_channels != out_channels: + downsample = [] + conv_stride = stride + if avg_down and stride != 1: + conv_stride = 1 + downsample.append( + nn.AvgPool2d( + kernel_size=stride, + stride=stride, + ceil_mode=True, + count_include_pad=False)) + downsample.extend([ + build_conv_layer( + conv_cfg, + in_channels, + out_channels, + kernel_size=1, + stride=conv_stride, + bias=False), + build_norm_layer(norm_cfg, out_channels)[1] + ]) + downsample = nn.Sequential(*downsample) + + layers = [] + layers.append( + block( + in_channels=in_channels, + out_channels=out_channels, + expansion=self.expansion, + stride=stride, + downsample=downsample, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + drop_path_rate=drop_path_rate[0], + **kwargs)) + in_channels = out_channels + for i in range(1, num_blocks): + layers.append( + block( + in_channels=in_channels, + out_channels=out_channels, + expansion=self.expansion, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + drop_path_rate=drop_path_rate[i], + **kwargs)) + super(ResLayer, self).__init__(*layers) + + +@MODELS.register_module() +class ResNet(BaseBackbone): + """ResNet backbone. + + Please refer to the `paper `__ for + details. + + Args: + depth (int): Network depth, from {18, 34, 50, 101, 152}. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + base_channels (int): Middle channels of the first stage. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. + Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + + Example: + >>> from mmpretrain.models import ResNet + >>> import torch + >>> self = ResNet(depth=18) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 64, 8, 8) + (1, 128, 4, 4) + (1, 256, 2, 2) + (1, 512, 1, 1) + """ + + arch_settings = { + 18: (BasicBlock, (2, 2, 2, 2)), + 34: (BasicBlock, (3, 4, 6, 3)), + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, + depth, + in_channels=3, + stem_channels=64, + base_channels=64, + expansion=None, + num_stages=4, + strides=(1, 2, 2, 2), + dilations=(1, 1, 1, 1), + out_indices=(3, ), + style='pytorch', + deep_stem=False, + avg_down=False, + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=False, + with_cp=False, + zero_init_residual=True, + init_cfg=[ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ], + drop_path_rate=0.0): + super(ResNet, self).__init__(init_cfg) + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for resnet') + self.depth = depth + self.stem_channels = stem_channels + self.base_channels = base_channels + self.num_stages = num_stages + assert num_stages >= 1 and num_stages <= 4 + self.strides = strides + self.dilations = dilations + assert len(strides) == len(dilations) == num_stages + self.out_indices = out_indices + assert max(out_indices) < num_stages + self.style = style + self.deep_stem = deep_stem + self.avg_down = avg_down + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.with_cp = with_cp + self.norm_eval = norm_eval + self.zero_init_residual = zero_init_residual + self.block, stage_blocks = self.arch_settings[depth] + self.stage_blocks = stage_blocks[:num_stages] + self.expansion = get_expansion(self.block, expansion) + + self._make_stem_layer(in_channels, stem_channels) + + self.res_layers = [] + _in_channels = stem_channels + _out_channels = base_channels * self.expansion + + # stochastic depth decay rule + total_depth = sum(stage_blocks) + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, total_depth) + ] + + for i, num_blocks in enumerate(self.stage_blocks): + stride = strides[i] + dilation = dilations[i] + res_layer = self.make_res_layer( + block=self.block, + num_blocks=num_blocks, + in_channels=_in_channels, + out_channels=_out_channels, + expansion=self.expansion, + stride=stride, + dilation=dilation, + style=self.style, + avg_down=self.avg_down, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + drop_path_rate=dpr[:num_blocks]) + _in_channels = _out_channels + _out_channels *= 2 + dpr = dpr[num_blocks:] + layer_name = f'layer{i + 1}' + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self._freeze_stages() + + self.feat_dim = res_layer[-1].out_channels + + def make_res_layer(self, **kwargs): + return ResLayer(**kwargs) + + @property + def norm1(self): + return getattr(self, self.norm1_name) + + def _make_stem_layer(self, in_channels, stem_channels): + if self.deep_stem: + self.stem = nn.Sequential( + ConvModule( + in_channels, + stem_channels // 2, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=True), + ConvModule( + stem_channels // 2, + stem_channels // 2, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=True), + ConvModule( + stem_channels // 2, + stem_channels, + kernel_size=3, + stride=1, + padding=1, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + inplace=True)) + else: + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + stem_channels, + kernel_size=7, + stride=2, + padding=3, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, stem_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + if self.deep_stem: + self.stem.eval() + for param in self.stem.parameters(): + param.requires_grad = False + else: + self.norm1.eval() + for m in [self.conv1, self.norm1]: + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = getattr(self, f'layer{i}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def init_weights(self): + super(ResNet, self).init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress zero_init_residual if use pretrained model. + return + + if self.zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + constant_init(m.norm3, 0) + elif isinstance(m, BasicBlock): + constant_init(m.norm2, 0) + + def forward(self, x): + if self.deep_stem: + x = self.stem(x) + else: + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + x = self.maxpool(x) + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + return tuple(outs) + + def train(self, mode=True): + super(ResNet, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + + def get_layer_depth(self, param_name: str, prefix: str = ''): + """Get the layer id to set the different learning rates for ResNet. + + ResNet stages: + 50 : [3, 4, 6, 3] + 101 : [3, 4, 23, 3] + 152 : [3, 8, 36, 3] + 200 : [3, 24, 36, 3] + eca269d: [3, 30, 48, 8] + + Args: + param_name (str): The name of the parameter. + prefix (str): The prefix for the parameter. + Defaults to an empty string. + + Returns: + Tuple[int, int]: The layer-wise depth and the num of layers. + """ + depths = self.stage_blocks + if depths[1] == 4 and depths[2] == 6: + blk2, blk3 = 2, 3 + elif depths[1] == 4 and depths[2] == 23: + blk2, blk3 = 2, 3 + elif depths[1] == 8 and depths[2] == 36: + blk2, blk3 = 4, 4 + elif depths[1] == 24 and depths[2] == 36: + blk2, blk3 = 4, 4 + elif depths[1] == 30 and depths[2] == 48: + blk2, blk3 = 5, 6 + else: + raise NotImplementedError + + N2, N3 = math.ceil(depths[1] / blk2 - + 1e-5), math.ceil(depths[2] / blk3 - 1e-5) + N = 2 + N2 + N3 # r50: 2 + 2 + 2 = 6 + max_layer_id = N + 1 # r50: 2 + 2 + 2 + 1(like head) = 7 + + if not param_name.startswith(prefix): + # For subsequent module like head + return max_layer_id, max_layer_id + 1 + + if param_name.startswith('backbone.layer'): + stage_id = int(param_name.split('.')[1][5:]) + block_id = int(param_name.split('.')[2]) + + if stage_id == 1: + layer_id = 1 + elif stage_id == 2: + layer_id = 2 + block_id // blk2 # r50: 2, 3 + elif stage_id == 3: + layer_id = 2 + N2 + block_id // blk3 # r50: 4, 5 + else: # stage_id == 4 + layer_id = N # r50: 6 + return layer_id, max_layer_id + 1 + + else: + return 0, max_layer_id + 1 + + +@MODELS.register_module() +class ResNetV1c(ResNet): + """ResNetV1c backbone. + + This variant is described in `Bag of Tricks. + `_. + + Compared with default ResNet(ResNetV1b), ResNetV1c replaces the 7x7 conv + in the input stem with three 3x3 convs. + """ + + def __init__(self, **kwargs): + super(ResNetV1c, self).__init__( + deep_stem=True, avg_down=False, **kwargs) + + +@MODELS.register_module() +class ResNetV1d(ResNet): + """ResNetV1d backbone. + + This variant is described in `Bag of Tricks. + `_. + + Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in + the input stem with three 3x3 convs. And in the downsampling block, a 2x2 + avg_pool with stride 2 is added before conv, whose stride is changed to 1. + """ + + def __init__(self, **kwargs): + super(ResNetV1d, self).__init__( + deep_stem=True, avg_down=True, **kwargs) diff --git a/mmpretrain/models/backbones/resnet_cifar.py b/mmpretrain/models/backbones/resnet_cifar.py new file mode 100644 index 0000000..9f17f92 --- /dev/null +++ b/mmpretrain/models/backbones/resnet_cifar.py @@ -0,0 +1,81 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn import build_conv_layer, build_norm_layer + +from mmpretrain.registry import MODELS +from .resnet import ResNet + + +@MODELS.register_module() +class ResNet_CIFAR(ResNet): + """ResNet backbone for CIFAR. + + Compared to standard ResNet, it uses `kernel_size=3` and `stride=1` in + conv1, and does not apply MaxPoolinng after stem. It has been proven to + be more efficient than standard ResNet in other public codebase, e.g., + `https://github.com/kuangliu/pytorch-cifar/blob/master/models/resnet.py`. + + Args: + depth (int): Network depth, from {18, 34, 50, 101, 152}. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + base_channels (int): Middle channels of the first stage. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): This network has specific designed stem, thus it is + asserted to be False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + """ + + def __init__(self, depth, deep_stem=False, **kwargs): + super(ResNet_CIFAR, self).__init__( + depth, deep_stem=deep_stem, **kwargs) + assert not self.deep_stem, 'ResNet_CIFAR do not support deep_stem' + + def _make_stem_layer(self, in_channels, base_channels): + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + base_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, base_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + return tuple(outs) diff --git a/mmpretrain/models/backbones/resnext.py b/mmpretrain/models/backbones/resnext.py new file mode 100644 index 0000000..8858b7d --- /dev/null +++ b/mmpretrain/models/backbones/resnext.py @@ -0,0 +1,148 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.cnn import build_conv_layer, build_norm_layer + +from mmpretrain.registry import MODELS +from .resnet import Bottleneck as _Bottleneck +from .resnet import ResLayer, ResNet + + +class Bottleneck(_Bottleneck): + """Bottleneck block for ResNeXt. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + groups (int): Groups of conv2. + width_per_group (int): Width per group of conv2. 64x4d indicates + ``groups=64, width_per_group=4`` and 32x8d indicates + ``groups=32, width_per_group=8``. + stride (int): stride of the block. Default: 1 + dilation (int): dilation of convolution. Default: 1 + downsample (nn.Module, optional): downsample operation on identity + branch. Default: None + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + conv_cfg (dict, optional): dictionary to construct and config conv + layer. Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + """ + + def __init__(self, + in_channels, + out_channels, + base_channels=64, + groups=32, + width_per_group=4, + **kwargs): + super(Bottleneck, self).__init__(in_channels, out_channels, **kwargs) + self.groups = groups + self.width_per_group = width_per_group + + # For ResNet bottleneck, middle channels are determined by expansion + # and out_channels, but for ResNeXt bottleneck, it is determined by + # groups and width_per_group and the stage it is located in. + if groups != 1: + assert self.mid_channels % base_channels == 0 + self.mid_channels = ( + groups * width_per_group * self.mid_channels // base_channels) + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, self.mid_channels, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + self.norm_cfg, self.mid_channels, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.out_channels, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.in_channels, + self.mid_channels, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + self.conv_cfg, + self.mid_channels, + self.mid_channels, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + self.conv_cfg, + self.mid_channels, + self.out_channels, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + +@MODELS.register_module() +class ResNeXt(ResNet): + """ResNeXt backbone. + + Please refer to the `paper `__ for + details. + + Args: + depth (int): Network depth, from {50, 101, 152}. + groups (int): Groups of conv2 in Bottleneck. Default: 32. + width_per_group (int): Width per group of conv2 in Bottleneck. + Default: 4. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + """ + + arch_settings = { + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, depth, groups=32, width_per_group=4, **kwargs): + self.groups = groups + self.width_per_group = width_per_group + super(ResNeXt, self).__init__(depth, **kwargs) + + def make_res_layer(self, **kwargs): + return ResLayer( + groups=self.groups, + width_per_group=self.width_per_group, + base_channels=self.base_channels, + **kwargs) diff --git a/mmpretrain/models/backbones/revvit.py b/mmpretrain/models/backbones/revvit.py new file mode 100644 index 0000000..f2e6c28 --- /dev/null +++ b/mmpretrain/models/backbones/revvit.py @@ -0,0 +1,671 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import sys + +import numpy as np +import torch +from mmcv.cnn.bricks.drop import build_dropout +from mmcv.cnn.bricks.transformer import FFN, PatchEmbed +from mmengine.model import BaseModule, ModuleList +from mmengine.model.weight_init import trunc_normal_ +from torch import nn +from torch.autograd import Function as Function + +from mmpretrain.models.backbones.base_backbone import BaseBackbone +from mmpretrain.registry import MODELS +from ..utils import (MultiheadAttention, build_norm_layer, resize_pos_embed, + to_2tuple) + + +class RevBackProp(Function): + """Custom Backpropagation function to allow (A) flushing memory in forward + and (B) activation recomputation reversibly in backward for gradient + calculation. + + Inspired by + https://github.com/RobinBruegger/RevTorch/blob/master/revtorch/revtorch.py + """ + + @staticmethod + def forward( + ctx, + x, + layers, + buffer_layers, # List of layer ids for int activation to buffer + ): + """Reversible Forward pass. + + Any intermediate activations from `buffer_layers` are cached in ctx for + forward pass. This is not necessary for standard usecases. Each + reversible layer implements its own forward pass logic. + """ + buffer_layers.sort() + x1, x2 = torch.chunk(x, 2, dim=-1) + intermediate = [] + + for layer in layers: + x1, x2 = layer(x1, x2) + if layer.layer_id in buffer_layers: + intermediate.extend([x1.detach(), x2.detach()]) + + if len(buffer_layers) == 0: + all_tensors = [x1.detach(), x2.detach()] + else: + intermediate = [torch.LongTensor(buffer_layers), *intermediate] + all_tensors = [x1.detach(), x2.detach(), *intermediate] + + ctx.save_for_backward(*all_tensors) + ctx.layers = layers + + return torch.cat([x1, x2], dim=-1) + + @staticmethod + def backward(ctx, dx): + """Reversible Backward pass. + + Any intermediate activations from `buffer_layers` are recovered from + ctx. Each layer implements its own loic for backward pass (both + activation recomputation and grad calculation). + """ + d_x1, d_x2 = torch.chunk(dx, 2, dim=-1) + # retrieve params from ctx for backward + x1, x2, *int_tensors = ctx.saved_tensors + # no buffering + if len(int_tensors) != 0: + buffer_layers = int_tensors[0].tolist() + else: + buffer_layers = [] + + layers = ctx.layers + + for _, layer in enumerate(layers[::-1]): + if layer.layer_id in buffer_layers: + x1, x2, d_x1, d_x2 = layer.backward_pass( + y1=int_tensors[buffer_layers.index(layer.layer_id) * 2 + + 1], + y2=int_tensors[buffer_layers.index(layer.layer_id) * 2 + + 2], + d_y1=d_x1, + d_y2=d_x2, + ) + else: + x1, x2, d_x1, d_x2 = layer.backward_pass( + y1=x1, + y2=x2, + d_y1=d_x1, + d_y2=d_x2, + ) + + dx = torch.cat([d_x1, d_x2], dim=-1) + + del int_tensors + del d_x1, d_x2, x1, x2 + + return dx, None, None + + +class RevTransformerEncoderLayer(BaseModule): + """Reversible Transformer Encoder Layer. + + This module is a building block of Reversible Transformer Encoder, + which support backpropagation without storing activations. + The residual connection is not applied to the FFN layer. + + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + drop_rate (float): Probability of an element to be zeroed. + Default: 0.0 + attn_drop_rate (float): The drop out rate for attention layer. + Default: 0.0 + drop_path_rate (float): stochastic depth rate. + Default 0.0 + num_fcs (int): The number of linear in FFN + Default: 2 + qkv_bias (bool): enable bias for qkv if True. + Default: True + act_cfg (dict): The activation config for FFNs. + Default: dict(type='GELU') + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN') + layer_id (int): The layer id of current layer. Used in RevBackProp. + Default: 0 + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__(self, + embed_dims: int, + num_heads: int, + feedforward_channels: int, + drop_rate: float = 0., + attn_drop_rate: float = 0., + drop_path_rate: float = 0., + num_fcs: int = 2, + qkv_bias: bool = True, + act_cfg: dict = dict(type='GELU'), + norm_cfg: dict = dict(type='LN'), + layer_id: int = 0, + init_cfg=None): + super(RevTransformerEncoderLayer, self).__init__(init_cfg=init_cfg) + + self.drop_path_cfg = dict(type='DropPath', drop_prob=drop_path_rate) + self.embed_dims = embed_dims + + self.ln1 = build_norm_layer(norm_cfg, self.embed_dims) + + self.attn = MultiheadAttention( + embed_dims=embed_dims, + num_heads=num_heads, + attn_drop=attn_drop_rate, + proj_drop=drop_rate, + qkv_bias=qkv_bias) + + self.ln2 = build_norm_layer(norm_cfg, self.embed_dims) + + self.ffn = FFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + num_fcs=num_fcs, + ffn_drop=drop_rate, + act_cfg=act_cfg, + add_identity=False) + + self.layer_id = layer_id + self.seeds = {} + + def init_weights(self): + super(RevTransformerEncoderLayer, self).init_weights() + for m in self.ffn.modules(): + if isinstance(m, nn.Linear): + nn.init.xavier_uniform_(m.weight) + nn.init.normal_(m.bias, std=1e-6) + + def seed_cuda(self, key): + """Fix seeds to allow for stochastic elements such as dropout to be + reproduced exactly in activation recomputation in the backward pass.""" + # randomize seeds + # use cuda generator if available + if (hasattr(torch.cuda, 'default_generators') + and len(torch.cuda.default_generators) > 0): + # GPU + device_idx = torch.cuda.current_device() + seed = torch.cuda.default_generators[device_idx].seed() + else: + # CPU + seed = int(torch.seed() % sys.maxsize) + + self.seeds[key] = seed + torch.manual_seed(self.seeds[key]) + + def forward(self, x1, x2): + """ + Implementation of Reversible TransformerEncoderLayer + + ` + x = x + self.attn(self.ln1(x)) + x = self.ffn(self.ln2(x), identity=x) + ` + """ + self.seed_cuda('attn') + # attention output + f_x2 = self.attn(self.ln1(x2)) + # apply droppath on attention output + self.seed_cuda('droppath') + f_x2_dropped = build_dropout(self.drop_path_cfg)(f_x2) + y1 = x1 + f_x2_dropped + + # free memory + if self.training: + del x1 + + # ffn output + self.seed_cuda('ffn') + g_y1 = self.ffn(self.ln2(y1)) + # apply droppath on ffn output + torch.manual_seed(self.seeds['droppath']) + g_y1_dropped = build_dropout(self.drop_path_cfg)(g_y1) + # final output + y2 = x2 + g_y1_dropped + + # free memory + if self.training: + del x2 + + return y1, y2 + + def backward_pass(self, y1, y2, d_y1, d_y2): + """Activation re-compute with the following equation. + + x2 = y2 - g(y1), g = FFN + x1 = y1 - f(x2), f = MSHA + """ + + # temporarily record intermediate activation for G + # and use them for gradient calculation of G + with torch.enable_grad(): + y1.requires_grad = True + + torch.manual_seed(self.seeds['ffn']) + g_y1 = self.ffn(self.ln2(y1)) + + torch.manual_seed(self.seeds['droppath']) + g_y1 = build_dropout(self.drop_path_cfg)(g_y1) + + g_y1.backward(d_y2, retain_graph=True) + + # activate recomputation is by design and not part of + # the computation graph in forward pass + with torch.no_grad(): + x2 = y2 - g_y1 + del g_y1 + + d_y1 = d_y1 + y1.grad + y1.grad = None + + # record F activation and calculate gradients on F + with torch.enable_grad(): + x2.requires_grad = True + + torch.manual_seed(self.seeds['attn']) + f_x2 = self.attn(self.ln1(x2)) + + torch.manual_seed(self.seeds['droppath']) + f_x2 = build_dropout(self.drop_path_cfg)(f_x2) + + f_x2.backward(d_y1, retain_graph=True) + + # propagate reverse computed activations at the + # start of the previous block + with torch.no_grad(): + x1 = y1 - f_x2 + del f_x2, y1 + + d_y2 = d_y2 + x2.grad + + x2.grad = None + x2 = x2.detach() + + return x1, x2, d_y1, d_y2 + + +class TwoStreamFusion(nn.Module): + """A general constructor for neural modules fusing two equal sized tensors + in forward. + + Args: + mode (str): The mode of fusion. Options are 'add', 'max', 'min', + 'avg', 'concat'. + """ + + def __init__(self, mode: str): + super().__init__() + self.mode = mode + + if mode == 'add': + self.fuse_fn = lambda x: torch.stack(x).sum(dim=0) + elif mode == 'max': + self.fuse_fn = lambda x: torch.stack(x).max(dim=0).values + elif mode == 'min': + self.fuse_fn = lambda x: torch.stack(x).min(dim=0).values + elif mode == 'avg': + self.fuse_fn = lambda x: torch.stack(x).mean(dim=0) + elif mode == 'concat': + self.fuse_fn = lambda x: torch.cat(x, dim=-1) + else: + raise NotImplementedError + + def forward(self, x): + # split the tensor into two halves in the channel dimension + x = torch.chunk(x, 2, dim=2) + return self.fuse_fn(x) + + +@MODELS.register_module() +class RevVisionTransformer(BaseBackbone): + """Reversible Vision Transformer. + + A PyTorch implementation of : `Reversible Vision Transformers + `_ # noqa: E501 + + Args: + arch (str | dict): Vision Transformer architecture. If use string, + choose from 'small', 'base', 'large', 'deit-tiny', 'deit-small' + and 'deit-base'. If use dict, it should have below keys: + + - **embed_dims** (int): The dimensions of embedding. + - **num_layers** (int): The number of transformer encoder layers. + - **num_heads** (int): The number of heads in attention modules. + - **feedforward_channels** (int): The hidden dimensions in + feedforward modules. + + Defaults to 'base'. + img_size (int | tuple): The expected input image shape. Because we + support dynamic input shape, just set the argument to the most + common input image shape. Defaults to 224. + patch_size (int | tuple): The patch size in patch embedding. + Defaults to 16. + in_channels (int): The num of input channels. Defaults to 3. + drop_rate (float): Probability of an element to be zeroed. + Defaults to 0. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + qkv_bias (bool): Whether to add bias for qkv in attention modules. + Defaults to True. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + final_norm (bool): Whether to add a additional layer to normalize + final feature map. Defaults to True. + out_type (str): The type of output features. Please choose from + + - ``"cls_token"``: The class token tensor with shape (B, C). + - ``"featmap"``: The feature map tensor from the patch tokens + with shape (B, C, H, W). + - ``"avg_featmap"``: The global averaged feature map tensor + with shape (B, C). + - ``"raw"``: The raw feature tensor includes patch tokens and + class tokens with shape (B, L, C). + + Defaults to ``"avg_featmap"``. + with_cls_token (bool): Whether concatenating class token into image + tokens as transformer input. Defaults to False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Defaults to -1. + interpolate_mode (str): Select the interpolate mode for position + embeding vector resize. Defaults to "bicubic". + patch_cfg (dict): Configs of patch embeding. Defaults to an empty dict. + layer_cfgs (Sequence | dict): Configs of each transformer layer in + encoder. Defaults to an empty dict. + fusion_mode (str): The fusion mode of transformer layers. + Defaults to 'concat'. + no_custom_backward (bool): Whether to use custom backward. + Defaults to False. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + arch_zoo = { + **dict.fromkeys( + ['s', 'small'], { + 'embed_dims': 768, + 'num_layers': 8, + 'num_heads': 8, + 'feedforward_channels': 768 * 3, + }), + **dict.fromkeys( + ['b', 'base'], { + 'embed_dims': 768, + 'num_layers': 12, + 'num_heads': 12, + 'feedforward_channels': 3072 + }), + **dict.fromkeys( + ['l', 'large'], { + 'embed_dims': 1024, + 'num_layers': 24, + 'num_heads': 16, + 'feedforward_channels': 4096 + }), + **dict.fromkeys( + ['h', 'huge'], + { + # The same as the implementation in MAE + # + 'embed_dims': 1280, + 'num_layers': 32, + 'num_heads': 16, + 'feedforward_channels': 5120 + }), + **dict.fromkeys( + ['deit-t', 'deit-tiny'], { + 'embed_dims': 192, + 'num_layers': 12, + 'num_heads': 3, + 'feedforward_channels': 192 * 4 + }), + **dict.fromkeys( + ['deit-s', 'deit-small'], { + 'embed_dims': 384, + 'num_layers': 12, + 'num_heads': 6, + 'feedforward_channels': 384 * 4 + }), + **dict.fromkeys( + ['deit-b', 'deit-base'], { + 'embed_dims': 768, + 'num_layers': 12, + 'num_heads': 12, + 'feedforward_channels': 768 * 4 + }), + } + num_extra_tokens = 0 # The official RevViT doesn't have class token + OUT_TYPES = {'raw', 'cls_token', 'featmap', 'avg_featmap'} + + def __init__(self, + arch='base', + img_size=224, + patch_size=16, + in_channels=3, + drop_rate=0., + drop_path_rate=0., + qkv_bias=True, + norm_cfg=dict(type='LN', eps=1e-6), + final_norm=True, + out_type='avg_featmap', + with_cls_token=False, + frozen_stages=-1, + interpolate_mode='bicubic', + patch_cfg=dict(), + layer_cfgs=dict(), + fusion_mode='concat', + no_custom_backward=False, + init_cfg=None): + super(RevVisionTransformer, self).__init__(init_cfg) + + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = { + 'embed_dims', 'num_layers', 'num_heads', 'feedforward_channels' + } + assert isinstance(arch, dict) and essential_keys <= set(arch), \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = arch + + self.embed_dims = self.arch_settings['embed_dims'] + self.num_layers = self.arch_settings['num_layers'] + self.img_size = to_2tuple(img_size) + self.no_custom_backward = no_custom_backward + + # Set patch embedding + _patch_cfg = dict( + in_channels=in_channels, + input_size=img_size, + embed_dims=self.embed_dims, + conv_type='Conv2d', + kernel_size=patch_size, + stride=patch_size, + ) + _patch_cfg.update(patch_cfg) + self.patch_embed = PatchEmbed(**_patch_cfg) + self.patch_resolution = self.patch_embed.init_out_size + num_patches = self.patch_resolution[0] * self.patch_resolution[1] + + # Set out type + if out_type not in self.OUT_TYPES: + raise ValueError(f'Unsupported `out_type` {out_type}, please ' + f'choose from {self.OUT_TYPES}') + self.out_type = out_type + + # Set cls token + if with_cls_token: + self.cls_token = nn.Parameter(torch.zeros(1, 1, self.embed_dims)) + self.num_extra_tokens = 1 + elif out_type != 'cls_token': + self.cls_token = None + self.num_extra_tokens = 0 + else: + raise ValueError( + 'with_cls_token must be True when `out_type="cls_token"`.') + + # Set position embedding + self.interpolate_mode = interpolate_mode + self.pos_embed = nn.Parameter( + torch.zeros(1, num_patches + self.num_extra_tokens, + self.embed_dims)) + self._register_load_state_dict_pre_hook(self._prepare_pos_embed) + + self.drop_after_pos = nn.Dropout(p=drop_rate) + + # stochastic depth decay rule + dpr = np.linspace(0, drop_path_rate, self.num_layers) + + self.layers = ModuleList() + if isinstance(layer_cfgs, dict): + layer_cfgs = [layer_cfgs] * self.num_layers + for i in range(self.num_layers): + _layer_cfg = dict( + embed_dims=self.embed_dims, + num_heads=self.arch_settings['num_heads'], + feedforward_channels=self. + arch_settings['feedforward_channels'], + drop_rate=drop_rate, + drop_path_rate=dpr[i], + qkv_bias=qkv_bias, + layer_id=i, + norm_cfg=norm_cfg) + _layer_cfg.update(layer_cfgs[i]) + self.layers.append(RevTransformerEncoderLayer(**_layer_cfg)) + + # fusion operation for the final output + self.fusion_layer = TwoStreamFusion(mode=fusion_mode) + + self.frozen_stages = frozen_stages + self.final_norm = final_norm + if final_norm: + self.ln1 = build_norm_layer(norm_cfg, self.embed_dims * 2) + + # freeze stages only when self.frozen_stages > 0 + if self.frozen_stages > 0: + self._freeze_stages() + + def init_weights(self): + super(RevVisionTransformer, self).init_weights() + if not (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + trunc_normal_(self.pos_embed, std=0.02) + + def _prepare_pos_embed(self, state_dict, prefix, *args, **kwargs): + name = prefix + 'pos_embed' + if name not in state_dict.keys(): + return + + ckpt_pos_embed_shape = state_dict[name].shape + if self.pos_embed.shape != ckpt_pos_embed_shape: + from mmengine.logging import MMLogger + logger = MMLogger.get_current_instance() + logger.info( + f'Resize the pos_embed shape from {ckpt_pos_embed_shape} ' + f'to {self.pos_embed.shape}.') + + ckpt_pos_embed_shape = to_2tuple( + int(np.sqrt(ckpt_pos_embed_shape[1] - self.num_extra_tokens))) + pos_embed_shape = self.patch_embed.init_out_size + + state_dict[name] = resize_pos_embed(state_dict[name], + ckpt_pos_embed_shape, + pos_embed_shape, + self.interpolate_mode, + self.num_extra_tokens) + + @staticmethod + def resize_pos_embed(*args, **kwargs): + """Interface for backward-compatibility.""" + return resize_pos_embed(*args, **kwargs) + + def _freeze_stages(self): + # freeze position embedding + self.pos_embed.requires_grad = False + # set dropout to eval model + self.drop_after_pos.eval() + # freeze patch embedding + self.patch_embed.eval() + for param in self.patch_embed.parameters(): + param.requires_grad = False + # freeze cls_token + if self.cls_token is not None: + self.cls_token.requires_grad = False + # freeze layers + for i in range(1, self.frozen_stages + 1): + m = self.layers[i - 1] + m.eval() + for param in m.parameters(): + param.requires_grad = False + # freeze the last layer norm + if self.frozen_stages == len(self.layers) and self.final_norm: + self.ln1.eval() + for param in self.ln1.parameters(): + param.requires_grad = False + + def forward(self, x): + B = x.shape[0] + x, patch_resolution = self.patch_embed(x) + + if self.cls_token is not None: + cls_token = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_token, x), dim=1) + + x = x + resize_pos_embed( + self.pos_embed, + self.patch_resolution, + patch_resolution, + mode=self.interpolate_mode, + num_extra_tokens=self.num_extra_tokens) + x = self.drop_after_pos(x) + + x = torch.cat([x, x], dim=-1) + + # forward with different conditions + if not self.training or self.no_custom_backward: + # in eval/inference model + executing_fn = RevVisionTransformer._forward_vanilla_bp + else: + # use custom backward when self.training=True. + executing_fn = RevBackProp.apply + + x = executing_fn(x, self.layers, []) + + if self.final_norm: + x = self.ln1(x) + x = self.fusion_layer(x) + + return (self._format_output(x, patch_resolution), ) + + @staticmethod + def _forward_vanilla_bp(hidden_state, layers, buffer=[]): + """Using reversible layers without reversible backpropagation. + + Debugging purpose only. Activated with self.no_custom_backward + """ + # split into ffn state(ffn_out) and attention output(attn_out) + ffn_out, attn_out = torch.chunk(hidden_state, 2, dim=-1) + del hidden_state + + for _, layer in enumerate(layers): + attn_out, ffn_out = layer(attn_out, ffn_out) + + return torch.cat([attn_out, ffn_out], dim=-1) + + def _format_output(self, x, hw): + if self.out_type == 'raw': + return x + if self.out_type == 'cls_token': + return x[:, 0] + + patch_token = x[:, self.num_extra_tokens:] + if self.out_type == 'featmap': + B = x.size(0) + # (B, N, C) -> (B, H, W, C) -> (B, C, H, W) + return patch_token.reshape(B, *hw, -1).permute(0, 3, 1, 2) + if self.out_type == 'avg_featmap': + return patch_token.mean(dim=1) diff --git a/mmpretrain/models/backbones/riformer.py b/mmpretrain/models/backbones/riformer.py new file mode 100644 index 0000000..ad7cb4d --- /dev/null +++ b/mmpretrain/models/backbones/riformer.py @@ -0,0 +1,390 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Sequence + +import torch +import torch.nn as nn +from mmcv.cnn.bricks import DropPath, build_norm_layer +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS +from .base_backbone import BaseBackbone +from .poolformer import Mlp, PatchEmbed + + +class Affine(nn.Module): + """Affine Transformation module. + + Args: + in_features (int): Input dimension. + """ + + def __init__(self, in_features): + super().__init__() + self.affine = nn.Conv2d( + in_features, + in_features, + kernel_size=1, + stride=1, + padding=0, + groups=in_features, + bias=True) + + def forward(self, x): + return self.affine(x) - x + + +class RIFormerBlock(BaseModule): + """RIFormer Block. + + Args: + dim (int): Embedding dim. + mlp_ratio (float): Mlp expansion ratio. Defaults to 4. + norm_cfg (dict): The config dict for norm layers. + Defaults to ``dict(type='GN', num_groups=1)``. + act_cfg (dict): The config dict for activation between pointwise + convolution. Defaults to ``dict(type='GELU')``. + drop (float): Dropout rate. Defaults to 0. + drop_path (float): Stochastic depth rate. Defaults to 0. + layer_scale_init_value (float): Init value for Layer Scale. + Defaults to 1e-5. + deploy (bool): Whether to switch the model structure to + deployment mode. Default: False. + """ + + def __init__(self, + dim, + mlp_ratio=4., + norm_cfg=dict(type='GN', num_groups=1), + act_cfg=dict(type='GELU'), + drop=0., + drop_path=0., + layer_scale_init_value=1e-5, + deploy=False): + + super().__init__() + + if deploy: + self.norm_reparam = build_norm_layer(norm_cfg, dim)[1] + else: + self.norm1 = build_norm_layer(norm_cfg, dim)[1] + self.token_mixer = Affine(in_features=dim) + self.norm2 = build_norm_layer(norm_cfg, dim)[1] + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp( + in_features=dim, + hidden_features=mlp_hidden_dim, + act_cfg=act_cfg, + drop=drop) + + # The following two techniques are useful to train deep RIFormers. + self.drop_path = DropPath(drop_path) if drop_path > 0. \ + else nn.Identity() + self.layer_scale_1 = nn.Parameter( + layer_scale_init_value * torch.ones((dim)), requires_grad=True) + self.layer_scale_2 = nn.Parameter( + layer_scale_init_value * torch.ones((dim)), requires_grad=True) + self.norm_cfg = norm_cfg + self.dim = dim + self.deploy = deploy + + def forward(self, x): + if hasattr(self, 'norm_reparam'): + x = x + self.drop_path( + self.layer_scale_1.unsqueeze(-1).unsqueeze(-1) * + self.norm_reparam(x)) + x = x + self.drop_path( + self.layer_scale_2.unsqueeze(-1).unsqueeze(-1) * + self.mlp(self.norm2(x))) + else: + x = x + self.drop_path( + self.layer_scale_1.unsqueeze(-1).unsqueeze(-1) * + self.token_mixer(self.norm1(x))) + x = x + self.drop_path( + self.layer_scale_2.unsqueeze(-1).unsqueeze(-1) * + self.mlp(self.norm2(x))) + return x + + def fuse_affine(self, norm, token_mixer): + gamma_affn = token_mixer.affine.weight.reshape(-1) + gamma_affn = gamma_affn - torch.ones_like(gamma_affn) + beta_affn = token_mixer.affine.bias + gamma_ln = norm.weight + beta_ln = norm.bias + return (gamma_ln * gamma_affn), (beta_ln * gamma_affn + beta_affn) + + def get_equivalent_scale_bias(self): + eq_s, eq_b = self.fuse_affine(self.norm1, self.token_mixer) + return eq_s, eq_b + + def switch_to_deploy(self): + if self.deploy: + return + eq_s, eq_b = self.get_equivalent_scale_bias() + self.norm_reparam = build_norm_layer(self.norm_cfg, self.dim)[1] + self.norm_reparam.weight.data = eq_s + self.norm_reparam.bias.data = eq_b + self.__delattr__('norm1') + if hasattr(self, 'token_mixer'): + self.__delattr__('token_mixer') + self.deploy = True + + +def basic_blocks(dim, + index, + layers, + mlp_ratio=4., + norm_cfg=dict(type='GN', num_groups=1), + act_cfg=dict(type='GELU'), + drop_rate=.0, + drop_path_rate=0., + layer_scale_init_value=1e-5, + deploy=False): + """generate RIFormer blocks for a stage.""" + blocks = [] + for block_idx in range(layers[index]): + block_dpr = drop_path_rate * (block_idx + sum(layers[:index])) / ( + sum(layers) - 1) + blocks.append( + RIFormerBlock( + dim, + mlp_ratio=mlp_ratio, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + drop=drop_rate, + drop_path=block_dpr, + layer_scale_init_value=layer_scale_init_value, + deploy=deploy, + )) + blocks = nn.Sequential(*blocks) + + return blocks + + +@MODELS.register_module() +class RIFormer(BaseBackbone): + """RIFormer. + + A PyTorch implementation of RIFormer introduced by: + `RIFormer: Keep Your Vision Backbone Effective But Removing Token Mixer `_ + + Args: + arch (str | dict): The model's architecture. If string, it should be + one of architecture in ``RIFormer.arch_settings``. And if dict, it + should include the following two keys: + + - layers (list[int]): Number of blocks at each stage. + - embed_dims (list[int]): The number of channels at each stage. + - mlp_ratios (list[int]): Expansion ratio of MLPs. + - layer_scale_init_value (float): Init value for Layer Scale. + + Defaults to 'S12'. + + norm_cfg (dict): The config dict for norm layers. + Defaults to ``dict(type='LN2d', eps=1e-6)``. + act_cfg (dict): The config dict for activation between pointwise + convolution. Defaults to ``dict(type='GELU')``. + in_patch_size (int): The patch size of/? input image patch embedding. + Defaults to 7. + in_stride (int): The stride of input image patch embedding. + Defaults to 4. + in_pad (int): The padding of input image patch embedding. + Defaults to 2. + down_patch_size (int): The patch size of downsampling patch embedding. + Defaults to 3. + down_stride (int): The stride of downsampling patch embedding. + Defaults to 2. + down_pad (int): The padding of downsampling patch embedding. + Defaults to 1. + drop_rate (float): Dropout rate. Defaults to 0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0. + out_indices (Sequence | int): Output from which network position. + Index 0-6 respectively corresponds to + [stage1, downsampling, stage2, downsampling, stage3, downsampling, stage4] + Defaults to -1, means the last stage. + frozen_stages (int): Stages to be frozen (all param fixed). + Defaults to -1, which means not freezing any parameters. + deploy (bool): Whether to switch the model structure to + deployment mode. Default: False. + init_cfg (dict, optional): Initialization config dict + """ # noqa: E501 + + # --layers: [x,x,x,x], numbers of layers for the four stages + # --embed_dims, --mlp_ratios: + # embedding dims and mlp ratios for the four stages + # --downsamples: flags to apply downsampling or not in four blocks + arch_settings = { + 's12': { + 'layers': [2, 2, 6, 2], + 'embed_dims': [64, 128, 320, 512], + 'mlp_ratios': [4, 4, 4, 4], + 'layer_scale_init_value': 1e-5, + }, + 's24': { + 'layers': [4, 4, 12, 4], + 'embed_dims': [64, 128, 320, 512], + 'mlp_ratios': [4, 4, 4, 4], + 'layer_scale_init_value': 1e-5, + }, + 's36': { + 'layers': [6, 6, 18, 6], + 'embed_dims': [64, 128, 320, 512], + 'mlp_ratios': [4, 4, 4, 4], + 'layer_scale_init_value': 1e-6, + }, + 'm36': { + 'layers': [6, 6, 18, 6], + 'embed_dims': [96, 192, 384, 768], + 'mlp_ratios': [4, 4, 4, 4], + 'layer_scale_init_value': 1e-6, + }, + 'm48': { + 'layers': [8, 8, 24, 8], + 'embed_dims': [96, 192, 384, 768], + 'mlp_ratios': [4, 4, 4, 4], + 'layer_scale_init_value': 1e-6, + }, + } + + def __init__(self, + arch='s12', + in_channels=3, + norm_cfg=dict(type='GN', num_groups=1), + act_cfg=dict(type='GELU'), + in_patch_size=7, + in_stride=4, + in_pad=2, + down_patch_size=3, + down_stride=2, + down_pad=1, + drop_rate=0., + drop_path_rate=0., + out_indices=-1, + frozen_stages=-1, + init_cfg=None, + deploy=False): + + super().__init__(init_cfg=init_cfg) + + if isinstance(arch, str): + assert arch in self.arch_settings, \ + f'Unavailable arch, please choose from ' \ + f'({set(self.arch_settings)}) or pass a dict.' + arch = self.arch_settings[arch] + elif isinstance(arch, dict): + assert 'layers' in arch and 'embed_dims' in arch, \ + f'The arch dict must have "layers" and "embed_dims", ' \ + f'but got {list(arch.keys())}.' + + layers = arch['layers'] + embed_dims = arch['embed_dims'] + mlp_ratios = arch['mlp_ratios'] \ + if 'mlp_ratios' in arch else [4, 4, 4, 4] + layer_scale_init_value = arch['layer_scale_init_value'] \ + if 'layer_scale_init_value' in arch else 1e-5 + + self.patch_embed = PatchEmbed( + patch_size=in_patch_size, + stride=in_stride, + padding=in_pad, + in_chans=in_channels, + embed_dim=embed_dims[0]) + + # set the main block in network + network = [] + for i in range(len(layers)): + stage = basic_blocks( + embed_dims[i], + i, + layers, + mlp_ratio=mlp_ratios[i], + norm_cfg=norm_cfg, + act_cfg=act_cfg, + drop_rate=drop_rate, + drop_path_rate=drop_path_rate, + layer_scale_init_value=layer_scale_init_value, + deploy=deploy) + network.append(stage) + if i >= len(layers) - 1: + break + if embed_dims[i] != embed_dims[i + 1]: + # downsampling between two stages + network.append( + PatchEmbed( + patch_size=down_patch_size, + stride=down_stride, + padding=down_pad, + in_chans=embed_dims[i], + embed_dim=embed_dims[i + 1])) + + self.network = nn.ModuleList(network) + + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = 7 + index + assert out_indices[i] >= 0, f'Invalid out_indices {index}' + self.out_indices = out_indices + if self.out_indices: + for i_layer in self.out_indices: + layer = build_norm_layer(norm_cfg, + embed_dims[(i_layer + 1) // 2])[1] + layer_name = f'norm{i_layer}' + self.add_module(layer_name, layer) + + self.frozen_stages = frozen_stages + self._freeze_stages() + self.deploy = deploy + + def forward_embeddings(self, x): + x = self.patch_embed(x) + return x + + def forward_tokens(self, x): + outs = [] + for idx, block in enumerate(self.network): + x = block(x) + if idx in self.out_indices: + norm_layer = getattr(self, f'norm{idx}') + x_out = norm_layer(x) + outs.append(x_out) + return tuple(outs) + + def forward(self, x): + # input embedding + x = self.forward_embeddings(x) + # through backbone + x = self.forward_tokens(x) + return x + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.patch_embed.eval() + for param in self.patch_embed.parameters(): + param.requires_grad = False + + for i in range(0, self.frozen_stages + 1): + # Include both block and downsample layer. + module = self.network[i] + module.eval() + for param in module.parameters(): + param.requires_grad = False + if i in self.out_indices: + norm_layer = getattr(self, f'norm{i}') + norm_layer.eval() + for param in norm_layer.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(RIFormer, self).train(mode) + self._freeze_stages() + return self + + def switch_to_deploy(self): + for m in self.modules(): + if isinstance(m, RIFormerBlock): + m.switch_to_deploy() + self.deploy = True diff --git a/mmpretrain/models/backbones/seresnet.py b/mmpretrain/models/backbones/seresnet.py new file mode 100644 index 0000000..4437c17 --- /dev/null +++ b/mmpretrain/models/backbones/seresnet.py @@ -0,0 +1,125 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.utils.checkpoint as cp + +from mmpretrain.registry import MODELS +from ..utils.se_layer import SELayer +from .resnet import Bottleneck, ResLayer, ResNet + + +class SEBottleneck(Bottleneck): + """SEBottleneck block for SEResNet. + + Args: + in_channels (int): The input channels of the SEBottleneck block. + out_channels (int): The output channel of the SEBottleneck block. + se_ratio (int): Squeeze ratio in SELayer. Default: 16 + """ + + def __init__(self, in_channels, out_channels, se_ratio=16, **kwargs): + super(SEBottleneck, self).__init__(in_channels, out_channels, **kwargs) + self.se_layer = SELayer(out_channels, ratio=se_ratio) + + def forward(self, x): + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.norm3(out) + + out = self.se_layer(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +@MODELS.register_module() +class SEResNet(ResNet): + """SEResNet backbone. + + Please refer to the `paper `__ for + details. + + Args: + depth (int): Network depth, from {50, 101, 152}. + se_ratio (int): Squeeze ratio in SELayer. Default: 16. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + + Example: + >>> from mmpretrain.models import SEResNet + >>> import torch + >>> self = SEResNet(depth=50) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 224, 224) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 64, 56, 56) + (1, 128, 28, 28) + (1, 256, 14, 14) + (1, 512, 7, 7) + """ + + arch_settings = { + 50: (SEBottleneck, (3, 4, 6, 3)), + 101: (SEBottleneck, (3, 4, 23, 3)), + 152: (SEBottleneck, (3, 8, 36, 3)) + } + + def __init__(self, depth, se_ratio=16, **kwargs): + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for SEResNet') + self.se_ratio = se_ratio + super(SEResNet, self).__init__(depth, **kwargs) + + def make_res_layer(self, **kwargs): + return ResLayer(se_ratio=self.se_ratio, **kwargs) diff --git a/mmpretrain/models/backbones/seresnext.py b/mmpretrain/models/backbones/seresnext.py new file mode 100644 index 0000000..6a28380 --- /dev/null +++ b/mmpretrain/models/backbones/seresnext.py @@ -0,0 +1,155 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.cnn import build_conv_layer, build_norm_layer + +from mmpretrain.registry import MODELS +from .resnet import ResLayer +from .seresnet import SEBottleneck as _SEBottleneck +from .seresnet import SEResNet + + +class SEBottleneck(_SEBottleneck): + """SEBottleneck block for SEResNeXt. + + Args: + in_channels (int): Input channels of this block. + out_channels (int): Output channels of this block. + base_channels (int): Middle channels of the first stage. Default: 64. + groups (int): Groups of conv2. + width_per_group (int): Width per group of conv2. 64x4d indicates + ``groups=64, width_per_group=4`` and 32x8d indicates + ``groups=32, width_per_group=8``. + stride (int): stride of the block. Default: 1 + dilation (int): dilation of convolution. Default: 1 + downsample (nn.Module, optional): downsample operation on identity + branch. Default: None + se_ratio (int): Squeeze ratio in SELayer. Default: 16 + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + conv_cfg (dict, optional): dictionary to construct and config conv + layer. Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + """ + + def __init__(self, + in_channels, + out_channels, + base_channels=64, + groups=32, + width_per_group=4, + se_ratio=16, + **kwargs): + super(SEBottleneck, self).__init__(in_channels, out_channels, se_ratio, + **kwargs) + self.groups = groups + self.width_per_group = width_per_group + + # We follow the same rational of ResNext to compute mid_channels. + # For SEResNet bottleneck, middle channels are determined by expansion + # and out_channels, but for SEResNeXt bottleneck, it is determined by + # groups and width_per_group and the stage it is located in. + if groups != 1: + assert self.mid_channels % base_channels == 0 + self.mid_channels = ( + groups * width_per_group * self.mid_channels // base_channels) + + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, self.mid_channels, postfix=1) + self.norm2_name, norm2 = build_norm_layer( + self.norm_cfg, self.mid_channels, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + self.norm_cfg, self.out_channels, postfix=3) + + self.conv1 = build_conv_layer( + self.conv_cfg, + self.in_channels, + self.mid_channels, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + self.conv_cfg, + self.mid_channels, + self.mid_channels, + kernel_size=3, + stride=self.conv2_stride, + padding=self.dilation, + dilation=self.dilation, + groups=groups, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + self.conv_cfg, + self.mid_channels, + self.out_channels, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + +@MODELS.register_module() +class SEResNeXt(SEResNet): + """SEResNeXt backbone. + + Please refer to the `paper `__ for + details. + + Args: + depth (int): Network depth, from {50, 101, 152}. + groups (int): Groups of conv2 in Bottleneck. Default: 32. + width_per_group (int): Width per group of conv2 in Bottleneck. + Default: 4. + se_ratio (int): Squeeze ratio in SELayer. Default: 16. + in_channels (int): Number of input image channels. Default: 3. + stem_channels (int): Output channels of the stem layer. Default: 64. + num_stages (int): Stages of the network. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + Default: ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Default: ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. If only one + stage is specified, a single tensor (feature map) is returned, + otherwise multiple stages are specified, a tuple of tensors will + be returned. Default: ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Default: False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Default: -1. + conv_cfg (dict | None): The config dict for conv layers. Default: None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Default: True. + """ + + arch_settings = { + 50: (SEBottleneck, (3, 4, 6, 3)), + 101: (SEBottleneck, (3, 4, 23, 3)), + 152: (SEBottleneck, (3, 8, 36, 3)) + } + + def __init__(self, depth, groups=32, width_per_group=4, **kwargs): + self.groups = groups + self.width_per_group = width_per_group + super(SEResNeXt, self).__init__(depth, **kwargs) + + def make_res_layer(self, **kwargs): + return ResLayer( + groups=self.groups, + width_per_group=self.width_per_group, + base_channels=self.base_channels, + **kwargs) diff --git a/mmpretrain/models/backbones/shufflenet_v1.py b/mmpretrain/models/backbones/shufflenet_v1.py new file mode 100644 index 0000000..2cc3617 --- /dev/null +++ b/mmpretrain/models/backbones/shufflenet_v1.py @@ -0,0 +1,321 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import ConvModule, build_activation_layer +from mmengine.model import BaseModule +from mmengine.model.weight_init import constant_init, normal_init +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpretrain.models.utils import channel_shuffle, make_divisible +from mmpretrain.registry import MODELS +from .base_backbone import BaseBackbone + + +class ShuffleUnit(BaseModule): + """ShuffleUnit block. + + ShuffleNet unit with pointwise group convolution (GConv) and channel + shuffle. + + Args: + in_channels (int): The input channels of the ShuffleUnit. + out_channels (int): The output channels of the ShuffleUnit. + groups (int): The number of groups to be used in grouped 1x1 + convolutions in each ShuffleUnit. Default: 3 + first_block (bool): Whether it is the first ShuffleUnit of a + sequential ShuffleUnits. Default: True, which means not using the + grouped 1x1 convolution. + combine (str): The ways to combine the input and output + branches. Default: 'add'. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + with_cp (bool): Use checkpoint or not. Using checkpoint + will save some memory while slowing down the training speed. + Default: False. + + Returns: + Tensor: The output tensor. + """ + + def __init__(self, + in_channels, + out_channels, + groups=3, + first_block=True, + combine='add', + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + with_cp=False): + super(ShuffleUnit, self).__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.first_block = first_block + self.combine = combine + self.groups = groups + self.bottleneck_channels = self.out_channels // 4 + self.with_cp = with_cp + + if self.combine == 'add': + self.depthwise_stride = 1 + self._combine_func = self._add + assert in_channels == out_channels, ( + 'in_channels must be equal to out_channels when combine ' + 'is add') + elif self.combine == 'concat': + self.depthwise_stride = 2 + self._combine_func = self._concat + self.out_channels -= self.in_channels + self.avgpool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1) + else: + raise ValueError(f'Cannot combine tensors with {self.combine}. ' + 'Only "add" and "concat" are supported') + + self.first_1x1_groups = 1 if first_block else self.groups + self.g_conv_1x1_compress = ConvModule( + in_channels=self.in_channels, + out_channels=self.bottleneck_channels, + kernel_size=1, + groups=self.first_1x1_groups, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.depthwise_conv3x3_bn = ConvModule( + in_channels=self.bottleneck_channels, + out_channels=self.bottleneck_channels, + kernel_size=3, + stride=self.depthwise_stride, + padding=1, + groups=self.bottleneck_channels, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + self.g_conv_1x1_expand = ConvModule( + in_channels=self.bottleneck_channels, + out_channels=self.out_channels, + kernel_size=1, + groups=self.groups, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + self.act = build_activation_layer(act_cfg) + + @staticmethod + def _add(x, out): + # residual connection + return x + out + + @staticmethod + def _concat(x, out): + # concatenate along channel axis + return torch.cat((x, out), 1) + + def forward(self, x): + + def _inner_forward(x): + residual = x + + out = self.g_conv_1x1_compress(x) + out = self.depthwise_conv3x3_bn(out) + + if self.groups > 1: + out = channel_shuffle(out, self.groups) + + out = self.g_conv_1x1_expand(out) + + if self.combine == 'concat': + residual = self.avgpool(residual) + out = self.act(out) + out = self._combine_func(residual, out) + else: + out = self._combine_func(residual, out) + out = self.act(out) + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +@MODELS.register_module() +class ShuffleNetV1(BaseBackbone): + """ShuffleNetV1 backbone. + + Args: + groups (int): The number of groups to be used in grouped 1x1 + convolutions in each ShuffleUnit. Default: 3. + widen_factor (float): Width multiplier - adjusts the number + of channels in each layer by this amount. Default: 1.0. + out_indices (Sequence[int]): Output from which stages. + Default: (2, ) + frozen_stages (int): Stages to be frozen (all param fixed). + Default: -1, which means not freezing any parameters. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + """ + + def __init__(self, + groups=3, + widen_factor=1.0, + out_indices=(2, ), + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + norm_eval=False, + with_cp=False, + init_cfg=None): + super(ShuffleNetV1, self).__init__(init_cfg) + self.init_cfg = init_cfg + self.stage_blocks = [4, 8, 4] + self.groups = groups + + for index in out_indices: + if index not in range(0, 3): + raise ValueError('the item in out_indices must in ' + f'range(0, 3). But received {index}') + + if frozen_stages not in range(-1, 3): + raise ValueError('frozen_stages must be in range(-1, 3). ' + f'But received {frozen_stages}') + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + + if groups == 1: + channels = (144, 288, 576) + elif groups == 2: + channels = (200, 400, 800) + elif groups == 3: + channels = (240, 480, 960) + elif groups == 4: + channels = (272, 544, 1088) + elif groups == 8: + channels = (384, 768, 1536) + else: + raise ValueError(f'{groups} groups is not supported for 1x1 ' + 'Grouped Convolutions') + + channels = [make_divisible(ch * widen_factor, 8) for ch in channels] + + self.in_channels = int(24 * widen_factor) + + self.conv1 = ConvModule( + in_channels=3, + out_channels=self.in_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + self.layers = nn.ModuleList() + for i, num_blocks in enumerate(self.stage_blocks): + first_block = True if i == 0 else False + layer = self.make_layer(channels[i], num_blocks, first_block) + self.layers.append(layer) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + for param in self.conv1.parameters(): + param.requires_grad = False + for i in range(self.frozen_stages): + layer = self.layers[i] + layer.eval() + for param in layer.parameters(): + param.requires_grad = False + + def init_weights(self): + super(ShuffleNetV1, self).init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress default init if use pretrained model. + return + + for name, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + if 'conv1' in name: + normal_init(m, mean=0, std=0.01) + else: + normal_init(m, mean=0, std=1.0 / m.weight.shape[1]) + elif isinstance(m, (_BatchNorm, nn.GroupNorm)): + constant_init(m, val=1, bias=0.0001) + if isinstance(m, _BatchNorm): + if m.running_mean is not None: + nn.init.constant_(m.running_mean, 0) + + def make_layer(self, out_channels, num_blocks, first_block=False): + """Stack ShuffleUnit blocks to make a layer. + + Args: + out_channels (int): out_channels of the block. + num_blocks (int): Number of blocks. + first_block (bool): Whether is the first ShuffleUnit of a + sequential ShuffleUnits. Default: False, which means using + the grouped 1x1 convolution. + """ + layers = [] + for i in range(num_blocks): + first_block = first_block if i == 0 else False + combine_mode = 'concat' if i == 0 else 'add' + layers.append( + ShuffleUnit( + self.in_channels, + out_channels, + groups=self.groups, + first_block=first_block, + combine=combine_mode, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + with_cp=self.with_cp)) + self.in_channels = out_channels + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.maxpool(x) + + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def train(self, mode=True): + super(ShuffleNetV1, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, _BatchNorm): + m.eval() diff --git a/mmpretrain/models/backbones/shufflenet_v2.py b/mmpretrain/models/backbones/shufflenet_v2.py new file mode 100644 index 0000000..02f9c74 --- /dev/null +++ b/mmpretrain/models/backbones/shufflenet_v2.py @@ -0,0 +1,305 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule +from mmengine.model.weight_init import constant_init, normal_init +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpretrain.models.utils import channel_shuffle +from mmpretrain.registry import MODELS +from .base_backbone import BaseBackbone + + +class InvertedResidual(BaseModule): + """InvertedResidual block for ShuffleNetV2 backbone. + + Args: + in_channels (int): The input channels of the block. + out_channels (int): The output channels of the block. + stride (int): Stride of the 3x3 convolution layer. Default: 1 + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + + Returns: + Tensor: The output tensor. + """ + + def __init__(self, + in_channels, + out_channels, + stride=1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + with_cp=False, + init_cfg=None): + super(InvertedResidual, self).__init__(init_cfg) + self.stride = stride + self.with_cp = with_cp + + branch_features = out_channels // 2 + if self.stride == 1: + assert in_channels == branch_features * 2, ( + f'in_channels ({in_channels}) should equal to ' + f'branch_features * 2 ({branch_features * 2}) ' + 'when stride is 1') + + if in_channels != branch_features * 2: + assert self.stride != 1, ( + f'stride ({self.stride}) should not equal 1 when ' + f'in_channels != branch_features * 2') + + if self.stride > 1: + self.branch1 = nn.Sequential( + ConvModule( + in_channels, + in_channels, + kernel_size=3, + stride=self.stride, + padding=1, + groups=in_channels, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None), + ConvModule( + in_channels, + branch_features, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ) + + self.branch2 = nn.Sequential( + ConvModule( + in_channels if (self.stride > 1) else branch_features, + branch_features, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg), + ConvModule( + branch_features, + branch_features, + kernel_size=3, + stride=self.stride, + padding=1, + groups=branch_features, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None), + ConvModule( + branch_features, + branch_features, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + def forward(self, x): + + def _inner_forward(x): + if self.stride > 1: + out = torch.cat((self.branch1(x), self.branch2(x)), dim=1) + else: + # Channel Split operation. using these lines of code to replace + # ``chunk(x, 2, dim=1)`` can make it easier to deploy a + # shufflenetv2 model by using mmdeploy. + channels = x.shape[1] + c = channels // 2 + channels % 2 + x1 = x[:, :c, :, :] + x2 = x[:, c:, :, :] + + out = torch.cat((x1, self.branch2(x2)), dim=1) + + out = channel_shuffle(out, 2) + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out + + +@MODELS.register_module() +class ShuffleNetV2(BaseBackbone): + """ShuffleNetV2 backbone. + + Args: + widen_factor (float): Width multiplier - adjusts the number of + channels in each layer by this amount. Default: 1.0. + out_indices (Sequence[int]): Output from which stages. + Default: (0, 1, 2, 3). + frozen_stages (int): Stages to be frozen (all param fixed). + Default: -1, which means not freezing any parameters. + conv_cfg (dict, optional): Config dict for convolution layer. + Default: None, which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='BN'). + act_cfg (dict): Config dict for activation layer. + Default: dict(type='ReLU'). + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Default: False. + """ + + def __init__(self, + widen_factor=1.0, + out_indices=(3, ), + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + norm_eval=False, + with_cp=False, + init_cfg=None): + super(ShuffleNetV2, self).__init__(init_cfg) + self.stage_blocks = [4, 8, 4] + for index in out_indices: + if index not in range(0, 4): + raise ValueError('the item in out_indices must in ' + f'range(0, 4). But received {index}') + + if frozen_stages not in range(-1, 4): + raise ValueError('frozen_stages must be in range(-1, 4). ' + f'But received {frozen_stages}') + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + self.norm_eval = norm_eval + self.with_cp = with_cp + + if widen_factor == 0.5: + channels = [48, 96, 192, 1024] + elif widen_factor == 1.0: + channels = [116, 232, 464, 1024] + elif widen_factor == 1.5: + channels = [176, 352, 704, 1024] + elif widen_factor == 2.0: + channels = [244, 488, 976, 2048] + else: + raise ValueError('widen_factor must be in [0.5, 1.0, 1.5, 2.0]. ' + f'But received {widen_factor}') + + self.in_channels = 24 + self.conv1 = ConvModule( + in_channels=3, + out_channels=self.in_channels, + kernel_size=3, + stride=2, + padding=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + self.layers = nn.ModuleList() + for i, num_blocks in enumerate(self.stage_blocks): + layer = self._make_layer(channels[i], num_blocks) + self.layers.append(layer) + + output_channels = channels[-1] + self.layers.append( + ConvModule( + in_channels=self.in_channels, + out_channels=output_channels, + kernel_size=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg)) + + def _make_layer(self, out_channels, num_blocks): + """Stack blocks to make a layer. + + Args: + out_channels (int): out_channels of the block. + num_blocks (int): number of blocks. + """ + layers = [] + for i in range(num_blocks): + stride = 2 if i == 0 else 1 + layers.append( + InvertedResidual( + in_channels=self.in_channels, + out_channels=out_channels, + stride=stride, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg, + with_cp=self.with_cp)) + self.in_channels = out_channels + + return nn.Sequential(*layers) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + for param in self.conv1.parameters(): + param.requires_grad = False + + for i in range(self.frozen_stages): + m = self.layers[i] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def init_weights(self): + super(ShuffleNetV2, self).init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress default init if use pretrained model. + return + + for name, m in self.named_modules(): + if isinstance(m, nn.Conv2d): + if 'conv1' in name: + normal_init(m, mean=0, std=0.01) + else: + normal_init(m, mean=0, std=1.0 / m.weight.shape[1]) + elif isinstance(m, (_BatchNorm, nn.GroupNorm)): + constant_init(m.weight, val=1, bias=0.0001) + if isinstance(m, _BatchNorm): + if m.running_mean is not None: + nn.init.constant_(m.running_mean, 0) + + def forward(self, x): + x = self.conv1(x) + x = self.maxpool(x) + + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x) + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def train(self, mode=True): + super(ShuffleNetV2, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + if isinstance(m, nn.BatchNorm2d): + m.eval() diff --git a/mmpretrain/models/backbones/sparse_convnext.py b/mmpretrain/models/backbones/sparse_convnext.py new file mode 100644 index 0000000..8f36136 --- /dev/null +++ b/mmpretrain/models/backbones/sparse_convnext.py @@ -0,0 +1,298 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Sequence, Union + +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmengine.model import ModuleList, Sequential + +from mmpretrain.registry import MODELS +from ..utils import (SparseAvgPooling, SparseConv2d, SparseHelper, + SparseMaxPooling, build_norm_layer) +from .convnext import ConvNeXt, ConvNeXtBlock + + +class SparseConvNeXtBlock(ConvNeXtBlock): + """Sparse ConvNeXt Block. + + Note: + There are two equivalent implementations: + 1. DwConv -> SparseLayerNorm -> 1x1 Conv -> GELU -> 1x1 Conv; + all outputs are in (N, C, H, W). + 2. DwConv -> SparseLayerNorm -> Permute to (N, H, W, C) -> Linear -> + GELU -> Linear; Permute back + As default, we use the second to align with the official repository. + And it may be slightly faster. + """ + + def forward(self, x): + + def _inner_forward(x): + shortcut = x + x = self.depthwise_conv(x) + + if self.linear_pw_conv: + x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C) + x = self.norm(x, data_format='channel_last') + x = self.pointwise_conv1(x) + x = self.act(x) + if self.grn is not None: + x = self.grn(x, data_format='channel_last') + x = self.pointwise_conv2(x) + x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) + else: + x = self.norm(x, data_format='channel_first') + x = self.pointwise_conv1(x) + x = self.act(x) + + if self.grn is not None: + x = self.grn(x, data_format='channel_first') + x = self.pointwise_conv2(x) + + if self.gamma is not None: + x = x.mul(self.gamma.view(1, -1, 1, 1)) + + x *= SparseHelper._get_active_map_or_index( + H=x.shape[2], returning_active_map=True) + + x = shortcut + self.drop_path(x) + return x + + if self.with_cp and x.requires_grad: + x = cp.checkpoint(_inner_forward, x) + else: + x = _inner_forward(x) + return x + + +@MODELS.register_module() +class SparseConvNeXt(ConvNeXt): + """ConvNeXt with sparse module conversion function. + + Modified from + https://github.com/keyu-tian/SparK/blob/main/models/convnext.py + and + https://github.com/keyu-tian/SparK/blob/main/encoder.py + To use ConvNeXt v2, please set ``use_grn=True`` and ``layer_scale_init_value=0.``. + + Args: + arch (str | dict): The model's architecture. If string, it should be + one of architecture in ``ConvNeXt.arch_settings``. And if dict, it + should include the following two keys: + - depths (list[int]): Number of blocks at each stage. + - channels (list[int]): The number of channels at each stage. + Defaults to 'tiny'. + in_channels (int): Number of input image channels. Defaults to 3. + stem_patch_size (int): The size of one patch in the stem layer. + Defaults to 4. + norm_cfg (dict): The config dict for norm layers. + Defaults to ``dict(type='SparseLN2d', eps=1e-6)``. + act_cfg (dict): The config dict for activation between pointwise + convolution. Defaults to ``dict(type='GELU')``. + linear_pw_conv (bool): Whether to use linear layer to do pointwise + convolution. Defaults to True. + use_grn (bool): Whether to add Global Response Normalization in the + blocks. Defaults to False. + drop_path_rate (float): Stochastic depth rate. Defaults to 0. + layer_scale_init_value (float): Init value for Layer Scale. + Defaults to 1e-6. + out_indices (Sequence | int): Output from which stages. + Defaults to -1, means the last stage. + frozen_stages (int): Stages to be frozen (all param fixed). + Defaults to 0, which means not freezing any parameters. + gap_before_output (bool): Whether to globally average the feature + map before the final norm layer. In the official repo, it's only + used in classification task. Defaults to True. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + init_cfg (dict, optional): Initialization config dict. + """ # noqa: E501 + + def __init__(self, + arch: str = 'small', + in_channels: int = 3, + stem_patch_size: int = 4, + norm_cfg: dict = dict(type='SparseLN2d', eps=1e-6), + act_cfg: dict = dict(type='GELU'), + linear_pw_conv: bool = True, + use_grn: bool = False, + drop_path_rate: float = 0, + layer_scale_init_value: float = 1e-6, + out_indices: int = -1, + frozen_stages: int = 0, + gap_before_output: bool = True, + with_cp: bool = False, + init_cfg: Optional[Union[dict, List[dict]]] = [ + dict( + type='TruncNormal', + layer=['Conv2d', 'Linear'], + std=.02, + bias=0.), + dict( + type='Constant', layer=['LayerNorm'], val=1., + bias=0.), + ]): + super(ConvNeXt, self).__init__(init_cfg=init_cfg) + + if isinstance(arch, str): + assert arch in self.arch_settings, \ + f'Unavailable arch, please choose from ' \ + f'({set(self.arch_settings)}) or pass a dict.' + arch = self.arch_settings[arch] + elif isinstance(arch, dict): + assert 'depths' in arch and 'channels' in arch, \ + f'The arch dict must have "depths" and "channels", ' \ + f'but got {list(arch.keys())}.' + + self.depths = arch['depths'] + self.channels = arch['channels'] + assert (isinstance(self.depths, Sequence) + and isinstance(self.channels, Sequence) + and len(self.depths) == len(self.channels)), \ + f'The "depths" ({self.depths}) and "channels" ({self.channels}) ' \ + 'should be both sequence with the same length.' + + self.num_stages = len(self.depths) + + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = 4 + index + assert out_indices[i] >= 0, f'Invalid out_indices {index}' + self.out_indices = out_indices + + self.frozen_stages = frozen_stages + self.gap_before_output = gap_before_output + + # 4 downsample layers between stages, including the stem layer. + self.downsample_layers = ModuleList() + stem = nn.Sequential( + nn.Conv2d( + in_channels, + self.channels[0], + kernel_size=stem_patch_size, + stride=stem_patch_size), + build_norm_layer(norm_cfg, self.channels[0]), + ) + self.downsample_layers.append(stem) + + # stochastic depth decay rule + dpr = [ + x.item() + for x in torch.linspace(0, drop_path_rate, sum(self.depths)) + ] + block_idx = 0 + + # 4 feature resolution stages, each consisting of multiple residual + # blocks + self.stages = nn.ModuleList() + for i in range(self.num_stages): + depth = self.depths[i] + channels = self.channels[i] + + if i >= 1: + downsample_layer = nn.Sequential( + build_norm_layer(norm_cfg, self.channels[i - 1]), + nn.Conv2d( + self.channels[i - 1], + channels, + kernel_size=2, + stride=2), + ) + self.downsample_layers.append(downsample_layer) + + stage = Sequential(*[ + SparseConvNeXtBlock( + in_channels=channels, + drop_path_rate=dpr[block_idx + j], + norm_cfg=norm_cfg, + act_cfg=act_cfg, + linear_pw_conv=linear_pw_conv, + layer_scale_init_value=layer_scale_init_value, + use_grn=use_grn, + with_cp=with_cp) for j in range(depth) + ]) + block_idx += depth + + self.stages.append(stage) + + self.dense_model_to_sparse(m=self) + + def forward(self, x): + outs = [] + for i, stage in enumerate(self.stages): + x = self.downsample_layers[i](x) + x = stage(x) + if i in self.out_indices: + if self.gap_before_output: + gap = x.mean([-2, -1], keepdim=True) + outs.append(gap.flatten(1)) + else: + outs.append(x) + + return tuple(outs) + + def dense_model_to_sparse(self, m: nn.Module) -> nn.Module: + """Convert regular dense modules to sparse modules.""" + output = m + if isinstance(m, nn.Conv2d): + m: nn.Conv2d + bias = m.bias is not None + output = SparseConv2d( + m.in_channels, + m.out_channels, + kernel_size=m.kernel_size, + stride=m.stride, + padding=m.padding, + dilation=m.dilation, + groups=m.groups, + bias=bias, + padding_mode=m.padding_mode, + ) + output.weight.data.copy_(m.weight.data) + if bias: + output.bias.data.copy_(m.bias.data) + + elif isinstance(m, nn.MaxPool2d): + m: nn.MaxPool2d + output = SparseMaxPooling( + m.kernel_size, + stride=m.stride, + padding=m.padding, + dilation=m.dilation, + return_indices=m.return_indices, + ceil_mode=m.ceil_mode) + + elif isinstance(m, nn.AvgPool2d): + m: nn.AvgPool2d + output = SparseAvgPooling( + m.kernel_size, + m.stride, + m.padding, + ceil_mode=m.ceil_mode, + count_include_pad=m.count_include_pad, + divisor_override=m.divisor_override) + + # elif isinstance(m, (nn.BatchNorm2d, nn.SyncBatchNorm)): + # m: nn.BatchNorm2d + # output = (SparseSyncBatchNorm2d + # if enable_sync_bn else SparseBatchNorm2d)( + # m.weight.shape[0], + # eps=m.eps, + # momentum=m.momentum, + # affine=m.affine, + # track_running_stats=m.track_running_stats) + # output.weight.data.copy_(m.weight.data) + # output.bias.data.copy_(m.bias.data) + # output.running_mean.data.copy_(m.running_mean.data) + # output.running_var.data.copy_(m.running_var.data) + # output.num_batches_tracked.data.copy_(m.num_batches_tracked.data) + + for name, child in m.named_children(): + output.add_module(name, self.dense_model_to_sparse(child)) + del m + return output diff --git a/mmpretrain/models/backbones/sparse_resnet.py b/mmpretrain/models/backbones/sparse_resnet.py new file mode 100644 index 0000000..67597f1 --- /dev/null +++ b/mmpretrain/models/backbones/sparse_resnet.py @@ -0,0 +1,179 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import re +from typing import Optional, Tuple + +import torch.nn as nn + +from mmpretrain.models.utils.sparse_modules import (SparseAvgPooling, + SparseBatchNorm2d, + SparseConv2d, + SparseMaxPooling, + SparseSyncBatchNorm2d) +from mmpretrain.registry import MODELS +from .resnet import ResNet + + +@MODELS.register_module() +class SparseResNet(ResNet): + """ResNet with sparse module conversion function. + + Modified from https://github.com/keyu-tian/SparK/blob/main/encoder.py + + Args: + depth (int): Network depth, from {18, 34, 50, 101, 152}. + in_channels (int): Number of input image channels. Defaults to 3. + stem_channels (int): Output channels of the stem layer. Defaults to 64. + base_channels (int): Middle channels of the first stage. + Defaults to 64. + num_stages (int): Stages of the network. Defaults to 4. + strides (Sequence[int]): Strides of the first block of each stage. + Defaults to ``(1, 2, 2, 2)``. + dilations (Sequence[int]): Dilation of each stage. + Defaults to ``(1, 1, 1, 1)``. + out_indices (Sequence[int]): Output from which stages. + Defaults to ``(3, )``. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv. + Defaults to False. + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Defaults to False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Defaults to -1. + conv_cfg (dict | None): The config dict for conv layers. + Defaults to None. + norm_cfg (dict): The config dict for norm layers. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Defaults to False. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. Defaults to True. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + """ + + def __init__(self, + depth: int, + in_channels: int = 3, + stem_channels: int = 64, + base_channels: int = 64, + expansion: Optional[int] = None, + num_stages: int = 4, + strides: Tuple[int] = (1, 2, 2, 2), + dilations: Tuple[int] = (1, 1, 1, 1), + out_indices: Tuple[int] = (3, ), + style: str = 'pytorch', + deep_stem: bool = False, + avg_down: bool = False, + frozen_stages: int = -1, + conv_cfg: Optional[dict] = None, + norm_cfg: dict = dict(type='SparseSyncBatchNorm2d'), + norm_eval: bool = False, + with_cp: bool = False, + zero_init_residual: bool = False, + init_cfg: Optional[dict] = [ + dict(type='Kaiming', layer=['Conv2d']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ], + drop_path_rate: float = 0, + **kwargs): + super().__init__( + depth=depth, + in_channels=in_channels, + stem_channels=stem_channels, + base_channels=base_channels, + expansion=expansion, + num_stages=num_stages, + strides=strides, + dilations=dilations, + out_indices=out_indices, + style=style, + deep_stem=deep_stem, + avg_down=avg_down, + frozen_stages=frozen_stages, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + norm_eval=norm_eval, + with_cp=with_cp, + zero_init_residual=zero_init_residual, + init_cfg=init_cfg, + drop_path_rate=drop_path_rate, + **kwargs) + norm_type = norm_cfg['type'] + enable_sync_bn = False + if re.search('Sync', norm_type) is not None: + enable_sync_bn = True + self.dense_model_to_sparse(m=self, enable_sync_bn=enable_sync_bn) + + def dense_model_to_sparse(self, m: nn.Module, + enable_sync_bn: bool) -> nn.Module: + """Convert regular dense modules to sparse modules.""" + output = m + if isinstance(m, nn.Conv2d): + m: nn.Conv2d + bias = m.bias is not None + output = SparseConv2d( + m.in_channels, + m.out_channels, + kernel_size=m.kernel_size, + stride=m.stride, + padding=m.padding, + dilation=m.dilation, + groups=m.groups, + bias=bias, + padding_mode=m.padding_mode, + ) + output.weight.data.copy_(m.weight.data) + if bias: + output.bias.data.copy_(m.bias.data) + + elif isinstance(m, nn.MaxPool2d): + m: nn.MaxPool2d + output = SparseMaxPooling( + m.kernel_size, + stride=m.stride, + padding=m.padding, + dilation=m.dilation, + return_indices=m.return_indices, + ceil_mode=m.ceil_mode) + + elif isinstance(m, nn.AvgPool2d): + m: nn.AvgPool2d + output = SparseAvgPooling( + m.kernel_size, + m.stride, + m.padding, + ceil_mode=m.ceil_mode, + count_include_pad=m.count_include_pad, + divisor_override=m.divisor_override) + + elif isinstance(m, (nn.BatchNorm2d, nn.SyncBatchNorm)): + m: nn.BatchNorm2d + output = (SparseSyncBatchNorm2d + if enable_sync_bn else SparseBatchNorm2d)( + m.weight.shape[0], + eps=m.eps, + momentum=m.momentum, + affine=m.affine, + track_running_stats=m.track_running_stats) + output.weight.data.copy_(m.weight.data) + output.bias.data.copy_(m.bias.data) + output.running_mean.data.copy_(m.running_mean.data) + output.running_var.data.copy_(m.running_var.data) + output.num_batches_tracked.data.copy_(m.num_batches_tracked.data) + + elif isinstance(m, (nn.Conv1d, )): + raise NotImplementedError + + for name, child in m.named_children(): + output.add_module( + name, + self.dense_model_to_sparse( + child, enable_sync_bn=enable_sync_bn)) + del m + return output diff --git a/mmpretrain/models/backbones/swin_transformer.py b/mmpretrain/models/backbones/swin_transformer.py new file mode 100644 index 0000000..559fd5e --- /dev/null +++ b/mmpretrain/models/backbones/swin_transformer.py @@ -0,0 +1,585 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from typing import Sequence + +import numpy as np +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.transformer import FFN, PatchEmbed, PatchMerging +from mmengine.model import BaseModule, ModuleList +from mmengine.model.weight_init import trunc_normal_ +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm + +from mmpretrain.registry import MODELS +from ..utils import (ShiftWindowMSA, resize_pos_embed, + resize_relative_position_bias_table, to_2tuple) +from .base_backbone import BaseBackbone + + +class SwinBlock(BaseModule): + """Swin Transformer block. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (int): The height and width of the window. Defaults to 7. + shift (bool): Shift the attention window or not. Defaults to False. + ffn_ratio (float): The expansion ratio of feedforward network hidden + layer channels. Defaults to 4. + drop_path (float): The drop path rate after attention and ffn. + Defaults to 0. + pad_small_map (bool): If True, pad the small feature map to the window + size, which is common used in detection and segmentation. If False, + avoid shifting window and shrink the window size to the size of + feature map, which is common used in classification. + Defaults to False. + attn_cfgs (dict): The extra config of Shift Window-MSA. + Defaults to empty dict. + ffn_cfgs (dict): The extra config of FFN. Defaults to empty dict. + norm_cfg (dict): The config of norm layers. + Defaults to ``dict(type='LN')``. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + init_cfg (dict, optional): The extra config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads, + window_size=7, + shift=False, + ffn_ratio=4., + drop_path=0., + pad_small_map=False, + attn_cfgs=dict(), + ffn_cfgs=dict(), + norm_cfg=dict(type='LN'), + with_cp=False, + init_cfg=None): + + super(SwinBlock, self).__init__(init_cfg) + self.with_cp = with_cp + + _attn_cfgs = { + 'embed_dims': embed_dims, + 'num_heads': num_heads, + 'shift_size': window_size // 2 if shift else 0, + 'window_size': window_size, + 'dropout_layer': dict(type='DropPath', drop_prob=drop_path), + 'pad_small_map': pad_small_map, + **attn_cfgs + } + self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1] + self.attn = ShiftWindowMSA(**_attn_cfgs) + + _ffn_cfgs = { + 'embed_dims': embed_dims, + 'feedforward_channels': int(embed_dims * ffn_ratio), + 'num_fcs': 2, + 'ffn_drop': 0, + 'dropout_layer': dict(type='DropPath', drop_prob=drop_path), + 'act_cfg': dict(type='GELU'), + **ffn_cfgs + } + self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] + self.ffn = FFN(**_ffn_cfgs) + + def forward(self, x, hw_shape): + + def _inner_forward(x): + identity = x + x = self.norm1(x) + x = self.attn(x, hw_shape) + x = x + identity + + identity = x + x = self.norm2(x) + x = self.ffn(x, identity=identity) + + return x + + if self.with_cp and x.requires_grad: + x = cp.checkpoint(_inner_forward, x) + else: + x = _inner_forward(x) + + return x + + +class SwinBlockSequence(BaseModule): + """Module with successive Swin Transformer blocks and downsample layer. + + Args: + embed_dims (int): Number of input channels. + depth (int): Number of successive swin transformer blocks. + num_heads (int): Number of attention heads. + window_size (int): The height and width of the window. Defaults to 7. + downsample (bool): Downsample the output of blocks by patch merging. + Defaults to False. + downsample_cfg (dict): The extra config of the patch merging layer. + Defaults to empty dict. + drop_paths (Sequence[float] | float): The drop path rate in each block. + Defaults to 0. + block_cfgs (Sequence[dict] | dict): The extra config of each block. + Defaults to empty dicts. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + pad_small_map (bool): If True, pad the small feature map to the window + size, which is common used in detection and segmentation. If False, + avoid shifting window and shrink the window size to the size of + feature map, which is common used in classification. + Defaults to False. + init_cfg (dict, optional): The extra config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + depth, + num_heads, + window_size=7, + downsample=False, + downsample_cfg=dict(), + drop_paths=0., + block_cfgs=dict(), + with_cp=False, + pad_small_map=False, + init_cfg=None): + super().__init__(init_cfg) + + if not isinstance(drop_paths, Sequence): + drop_paths = [drop_paths] * depth + + if not isinstance(block_cfgs, Sequence): + block_cfgs = [deepcopy(block_cfgs) for _ in range(depth)] + + self.embed_dims = embed_dims + self.blocks = ModuleList() + for i in range(depth): + _block_cfg = { + 'embed_dims': embed_dims, + 'num_heads': num_heads, + 'window_size': window_size, + 'shift': False if i % 2 == 0 else True, + 'drop_path': drop_paths[i], + 'with_cp': with_cp, + 'pad_small_map': pad_small_map, + **block_cfgs[i] + } + block = SwinBlock(**_block_cfg) + self.blocks.append(block) + + if downsample: + _downsample_cfg = { + 'in_channels': embed_dims, + 'out_channels': 2 * embed_dims, + 'norm_cfg': dict(type='LN'), + **downsample_cfg + } + self.downsample = PatchMerging(**_downsample_cfg) + else: + self.downsample = None + + def forward(self, x, in_shape, do_downsample=True): + for block in self.blocks: + x = block(x, in_shape) + + if self.downsample is not None and do_downsample: + x, out_shape = self.downsample(x, in_shape) + else: + out_shape = in_shape + return x, out_shape + + @property + def out_channels(self): + if self.downsample: + return self.downsample.out_channels + else: + return self.embed_dims + + +@MODELS.register_module() +class SwinTransformer(BaseBackbone): + """Swin Transformer. + + A PyTorch implement of : `Swin Transformer: + Hierarchical Vision Transformer using Shifted Windows + `_ + + Inspiration from + https://github.com/microsoft/Swin-Transformer + + Args: + arch (str | dict): Swin Transformer architecture. If use string, choose + from 'tiny', 'small', 'base' and 'large'. If use dict, it should + have below keys: + + - **embed_dims** (int): The dimensions of embedding. + - **depths** (List[int]): The number of blocks in each stage. + - **num_heads** (List[int]): The number of heads in attention + modules of each stage. + + Defaults to 'tiny'. + img_size (int | tuple): The expected input image shape. Because we + support dynamic input shape, just set the argument to the most + common input image shape. Defaults to 224. + patch_size (int | tuple): The patch size in patch embedding. + Defaults to 4. + in_channels (int): The num of input channels. Defaults to 3. + window_size (int): The height and width of the window. Defaults to 7. + drop_rate (float): Dropout rate after embedding. Defaults to 0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0.1. + out_after_downsample (bool): Whether to output the feature map of a + stage after the following downsample layer. Defaults to False. + use_abs_pos_embed (bool): If True, add absolute position embedding to + the patch embedding. Defaults to False. + interpolate_mode (str): Select the interpolate mode for absolute + position embeding vector resize. Defaults to "bicubic". + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Defaults to -1. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Defaults to False. + pad_small_map (bool): If True, pad the small feature map to the window + size, which is common used in detection and segmentation. If False, + avoid shifting window and shrink the window size to the size of + feature map, which is common used in classification. + Defaults to False. + norm_cfg (dict): Config dict for normalization layer for all output + features. Defaults to ``dict(type='LN')`` + stage_cfgs (Sequence[dict] | dict): Extra config dict for each + stage. Defaults to an empty dict. + patch_cfg (dict): Extra config dict for patch embedding. + Defaults to an empty dict. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + + Examples: + >>> from mmpretrain.models import SwinTransformer + >>> import torch + >>> extra_config = dict( + >>> arch='tiny', + >>> stage_cfgs=dict(downsample_cfg={'kernel_size': 3, + >>> 'expansion_ratio': 3})) + >>> self = SwinTransformer(**extra_config) + >>> inputs = torch.rand(1, 3, 224, 224) + >>> output = self.forward(inputs) + >>> print(output.shape) + (1, 2592, 4) + """ + arch_zoo = { + **dict.fromkeys(['t', 'tiny'], + {'embed_dims': 96, + 'depths': [2, 2, 6, 2], + 'num_heads': [3, 6, 12, 24]}), + **dict.fromkeys(['s', 'small'], + {'embed_dims': 96, + 'depths': [2, 2, 18, 2], + 'num_heads': [3, 6, 12, 24]}), + **dict.fromkeys(['b', 'base'], + {'embed_dims': 128, + 'depths': [2, 2, 18, 2], + 'num_heads': [4, 8, 16, 32]}), + **dict.fromkeys(['l', 'large'], + {'embed_dims': 192, + 'depths': [2, 2, 18, 2], + 'num_heads': [6, 12, 24, 48]}), + } # yapf: disable + + _version = 3 + num_extra_tokens = 0 + + def __init__(self, + arch='tiny', + img_size=224, + patch_size=4, + in_channels=3, + window_size=7, + drop_rate=0., + drop_path_rate=0.1, + out_indices=(3, ), + out_after_downsample=False, + use_abs_pos_embed=False, + interpolate_mode='bicubic', + with_cp=False, + frozen_stages=-1, + norm_eval=False, + pad_small_map=False, + norm_cfg=dict(type='LN'), + stage_cfgs=dict(), + patch_cfg=dict(), + init_cfg=None): + super(SwinTransformer, self).__init__(init_cfg=init_cfg) + + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = {'embed_dims', 'depths', 'num_heads'} + assert isinstance(arch, dict) and set(arch) == essential_keys, \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = arch + + self.embed_dims = self.arch_settings['embed_dims'] + self.depths = self.arch_settings['depths'] + self.num_heads = self.arch_settings['num_heads'] + self.num_layers = len(self.depths) + self.out_indices = out_indices + self.out_after_downsample = out_after_downsample + self.use_abs_pos_embed = use_abs_pos_embed + self.interpolate_mode = interpolate_mode + self.frozen_stages = frozen_stages + + _patch_cfg = dict( + in_channels=in_channels, + input_size=img_size, + embed_dims=self.embed_dims, + conv_type='Conv2d', + kernel_size=patch_size, + stride=patch_size, + norm_cfg=dict(type='LN'), + ) + _patch_cfg.update(patch_cfg) + self.patch_embed = PatchEmbed(**_patch_cfg) + self.patch_resolution = self.patch_embed.init_out_size + + if self.use_abs_pos_embed: + num_patches = self.patch_resolution[0] * self.patch_resolution[1] + self.absolute_pos_embed = nn.Parameter( + torch.zeros(1, num_patches, self.embed_dims)) + self._register_load_state_dict_pre_hook( + self._prepare_abs_pos_embed) + + self._register_load_state_dict_pre_hook( + self._prepare_relative_position_bias_table) + + self.drop_after_pos = nn.Dropout(p=drop_rate) + self.norm_eval = norm_eval + + # stochastic depth + total_depth = sum(self.depths) + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, total_depth) + ] # stochastic depth decay rule + + self.stages = ModuleList() + embed_dims = [self.embed_dims] + for i, (depth, + num_heads) in enumerate(zip(self.depths, self.num_heads)): + if isinstance(stage_cfgs, Sequence): + stage_cfg = stage_cfgs[i] + else: + stage_cfg = deepcopy(stage_cfgs) + downsample = True if i < self.num_layers - 1 else False + _stage_cfg = { + 'embed_dims': embed_dims[-1], + 'depth': depth, + 'num_heads': num_heads, + 'window_size': window_size, + 'downsample': downsample, + 'drop_paths': dpr[:depth], + 'with_cp': with_cp, + 'pad_small_map': pad_small_map, + **stage_cfg + } + + stage = SwinBlockSequence(**_stage_cfg) + self.stages.append(stage) + + dpr = dpr[depth:] + embed_dims.append(stage.out_channels) + + if self.out_after_downsample: + self.num_features = embed_dims[1:] + else: + self.num_features = embed_dims[:-1] + + for i in out_indices: + if norm_cfg is not None: + norm_layer = build_norm_layer(norm_cfg, + self.num_features[i])[1] + else: + norm_layer = nn.Identity() + + self.add_module(f'norm{i}', norm_layer) + + def init_weights(self): + super(SwinTransformer, self).init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress default init if use pretrained model. + return + + if self.use_abs_pos_embed: + trunc_normal_(self.absolute_pos_embed, std=0.02) + + def forward(self, x): + x, hw_shape = self.patch_embed(x) + if self.use_abs_pos_embed: + x = x + resize_pos_embed( + self.absolute_pos_embed, self.patch_resolution, hw_shape, + self.interpolate_mode, self.num_extra_tokens) + x = self.drop_after_pos(x) + + outs = [] + for i, stage in enumerate(self.stages): + x, hw_shape = stage( + x, hw_shape, do_downsample=self.out_after_downsample) + if i in self.out_indices: + norm_layer = getattr(self, f'norm{i}') + out = norm_layer(x) + out = out.view(-1, *hw_shape, + self.num_features[i]).permute(0, 3, 1, + 2).contiguous() + outs.append(out) + if stage.downsample is not None and not self.out_after_downsample: + x, hw_shape = stage.downsample(x, hw_shape) + + return tuple(outs) + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, *args, + **kwargs): + """load checkpoints.""" + # Names of some parameters in has been changed. + version = local_metadata.get('version', None) + if (version is None + or version < 2) and self.__class__ is SwinTransformer: + final_stage_num = len(self.stages) - 1 + state_dict_keys = list(state_dict.keys()) + for k in state_dict_keys: + if k.startswith('norm.') or k.startswith('backbone.norm.'): + convert_key = k.replace('norm.', f'norm{final_stage_num}.') + state_dict[convert_key] = state_dict[k] + del state_dict[k] + if (version is None + or version < 3) and self.__class__ is SwinTransformer: + state_dict_keys = list(state_dict.keys()) + for k in state_dict_keys: + if 'attn_mask' in k: + del state_dict[k] + + super()._load_from_state_dict(state_dict, prefix, local_metadata, + *args, **kwargs) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.patch_embed.eval() + for param in self.patch_embed.parameters(): + param.requires_grad = False + + for i in range(0, self.frozen_stages + 1): + m = self.stages[i] + m.eval() + for param in m.parameters(): + param.requires_grad = False + for i in self.out_indices: + if i <= self.frozen_stages: + for param in getattr(self, f'norm{i}').parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(SwinTransformer, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + + def _prepare_abs_pos_embed(self, state_dict, prefix, *args, **kwargs): + name = prefix + 'absolute_pos_embed' + if name not in state_dict.keys(): + return + + ckpt_pos_embed_shape = state_dict[name].shape + if self.absolute_pos_embed.shape != ckpt_pos_embed_shape: + from mmengine.logging import MMLogger + logger = MMLogger.get_current_instance() + logger.info( + 'Resize the absolute_pos_embed shape from ' + f'{ckpt_pos_embed_shape} to {self.absolute_pos_embed.shape}.') + + ckpt_pos_embed_shape = to_2tuple( + int(np.sqrt(ckpt_pos_embed_shape[1] - self.num_extra_tokens))) + pos_embed_shape = self.patch_embed.init_out_size + + state_dict[name] = resize_pos_embed(state_dict[name], + ckpt_pos_embed_shape, + pos_embed_shape, + self.interpolate_mode, + self.num_extra_tokens) + + def _prepare_relative_position_bias_table(self, state_dict, prefix, *args, + **kwargs): + state_dict_model = self.state_dict() + all_keys = list(state_dict_model.keys()) + for key in all_keys: + if 'relative_position_bias_table' in key: + ckpt_key = prefix + key + if ckpt_key not in state_dict: + continue + relative_position_bias_table_pretrained = state_dict[ckpt_key] + relative_position_bias_table_current = state_dict_model[key] + L1, nH1 = relative_position_bias_table_pretrained.size() + L2, nH2 = relative_position_bias_table_current.size() + if L1 != L2: + src_size = int(L1**0.5) + dst_size = int(L2**0.5) + new_rel_pos_bias = resize_relative_position_bias_table( + src_size, dst_size, + relative_position_bias_table_pretrained, nH1) + from mmengine.logging import MMLogger + logger = MMLogger.get_current_instance() + logger.info('Resize the relative_position_bias_table from ' + f'{state_dict[ckpt_key].shape} to ' + f'{new_rel_pos_bias.shape}') + state_dict[ckpt_key] = new_rel_pos_bias + + # The index buffer need to be re-generated. + index_buffer = ckpt_key.replace('bias_table', 'index') + del state_dict[index_buffer] + + def get_layer_depth(self, param_name: str, prefix: str = ''): + """Get the layer-wise depth of a parameter. + + Args: + param_name (str): The name of the parameter. + prefix (str): The prefix for the parameter. + Defaults to an empty string. + + Returns: + Tuple[int, int]: The layer-wise depth and the num of layers. + + Note: + The first depth is the stem module (``layer_depth=0``), and the + last depth is the subsequent module (``layer_depth=num_layers-1``) + """ + num_layers = sum(self.depths) + 2 + + if not param_name.startswith(prefix): + # For subsequent module like head + return num_layers - 1, num_layers + + param_name = param_name[len(prefix):] + + if param_name.startswith('patch_embed'): + layer_depth = 0 + elif param_name.startswith('stages'): + stage_id = int(param_name.split('.')[1]) + block_id = param_name.split('.')[3] + if block_id in ('reduction', 'norm'): + layer_depth = sum(self.depths[:stage_id + 1]) + else: + layer_depth = sum(self.depths[:stage_id]) + int(block_id) + 1 + else: + layer_depth = num_layers - 1 + + return layer_depth, num_layers diff --git a/mmpretrain/models/backbones/swin_transformer_v2.py b/mmpretrain/models/backbones/swin_transformer_v2.py new file mode 100644 index 0000000..142505a --- /dev/null +++ b/mmpretrain/models/backbones/swin_transformer_v2.py @@ -0,0 +1,567 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from typing import Sequence + +import numpy as np +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.transformer import FFN, PatchEmbed +from mmengine.model import BaseModule, ModuleList +from mmengine.model.weight_init import trunc_normal_ +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm + +from ..builder import MODELS +from ..utils import (PatchMerging, ShiftWindowMSA, WindowMSAV2, + resize_pos_embed, to_2tuple) +from .base_backbone import BaseBackbone + + +class SwinBlockV2(BaseModule): + """Swin Transformer V2 block. Use post normalization. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (int): The height and width of the window. Defaults to 7. + shift (bool): Shift the attention window or not. Defaults to False. + extra_norm (bool): Whether add extra norm at the end of main branch. + ffn_ratio (float): The expansion ratio of feedforward network hidden + layer channels. Defaults to 4. + drop_path (float): The drop path rate after attention and ffn. + Defaults to 0. + pad_small_map (bool): If True, pad the small feature map to the window + size, which is common used in detection and segmentation. If False, + avoid shifting window and shrink the window size to the size of + feature map, which is common used in classification. + Defaults to False. + attn_cfgs (dict): The extra config of Shift Window-MSA. + Defaults to empty dict. + ffn_cfgs (dict): The extra config of FFN. Defaults to empty dict. + norm_cfg (dict): The config of norm layers. + Defaults to ``dict(type='LN')``. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + pretrained_window_size (int): Window size in pretrained. + init_cfg (dict, optional): The extra config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads, + window_size=8, + shift=False, + extra_norm=False, + ffn_ratio=4., + drop_path=0., + pad_small_map=False, + attn_cfgs=dict(), + ffn_cfgs=dict(), + norm_cfg=dict(type='LN'), + with_cp=False, + pretrained_window_size=0, + init_cfg=None): + + super(SwinBlockV2, self).__init__(init_cfg) + self.with_cp = with_cp + self.extra_norm = extra_norm + + _attn_cfgs = { + 'embed_dims': embed_dims, + 'num_heads': num_heads, + 'shift_size': window_size // 2 if shift else 0, + 'window_size': window_size, + 'dropout_layer': dict(type='DropPath', drop_prob=drop_path), + 'pad_small_map': pad_small_map, + **attn_cfgs + } + # use V2 attention implementation + _attn_cfgs.update( + window_msa=WindowMSAV2, + pretrained_window_size=to_2tuple(pretrained_window_size)) + self.attn = ShiftWindowMSA(**_attn_cfgs) + self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1] + + _ffn_cfgs = { + 'embed_dims': embed_dims, + 'feedforward_channels': int(embed_dims * ffn_ratio), + 'num_fcs': 2, + 'ffn_drop': 0, + 'dropout_layer': dict(type='DropPath', drop_prob=drop_path), + 'act_cfg': dict(type='GELU'), + 'add_identity': False, + **ffn_cfgs + } + self.ffn = FFN(**_ffn_cfgs) + self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] + + # add extra norm for every n blocks in huge and giant model + if self.extra_norm: + self.norm3 = build_norm_layer(norm_cfg, embed_dims)[1] + + def forward(self, x, hw_shape): + + def _inner_forward(x): + # Use post normalization + identity = x + x = self.attn(x, hw_shape) + x = self.norm1(x) + x = x + identity + + identity = x + x = self.ffn(x) + x = self.norm2(x) + x = x + identity + + if self.extra_norm: + x = self.norm3(x) + + return x + + if self.with_cp and x.requires_grad: + x = cp.checkpoint(_inner_forward, x) + else: + x = _inner_forward(x) + + return x + + +class SwinBlockV2Sequence(BaseModule): + """Module with successive Swin Transformer blocks and downsample layer. + + Args: + embed_dims (int): Number of input channels. + depth (int): Number of successive swin transformer blocks. + num_heads (int): Number of attention heads. + window_size (int): The height and width of the window. Defaults to 7. + downsample (bool): Downsample the output of blocks by patch merging. + Defaults to False. + downsample_cfg (dict): The extra config of the patch merging layer. + Defaults to empty dict. + drop_paths (Sequence[float] | float): The drop path rate in each block. + Defaults to 0. + block_cfgs (Sequence[dict] | dict): The extra config of each block. + Defaults to empty dicts. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + pad_small_map (bool): If True, pad the small feature map to the window + size, which is common used in detection and segmentation. If False, + avoid shifting window and shrink the window size to the size of + feature map, which is common used in classification. + Defaults to False. + extra_norm_every_n_blocks (int): Add extra norm at the end of main + branch every n blocks. Defaults to 0, which means no needs for + extra norm layer. + pretrained_window_size (int): Window size in pretrained. + init_cfg (dict, optional): The extra config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + depth, + num_heads, + window_size=8, + downsample=False, + downsample_cfg=dict(), + drop_paths=0., + block_cfgs=dict(), + with_cp=False, + pad_small_map=False, + extra_norm_every_n_blocks=0, + pretrained_window_size=0, + init_cfg=None): + super().__init__(init_cfg) + + if not isinstance(drop_paths, Sequence): + drop_paths = [drop_paths] * depth + + if not isinstance(block_cfgs, Sequence): + block_cfgs = [deepcopy(block_cfgs) for _ in range(depth)] + + if downsample: + self.out_channels = 2 * embed_dims + _downsample_cfg = { + 'in_channels': embed_dims, + 'out_channels': self.out_channels, + 'norm_cfg': dict(type='LN'), + **downsample_cfg + } + self.downsample = PatchMerging(**_downsample_cfg) + else: + self.out_channels = embed_dims + self.downsample = None + + self.blocks = ModuleList() + for i in range(depth): + extra_norm = True if extra_norm_every_n_blocks and \ + (i + 1) % extra_norm_every_n_blocks == 0 else False + _block_cfg = { + 'embed_dims': self.out_channels, + 'num_heads': num_heads, + 'window_size': window_size, + 'shift': False if i % 2 == 0 else True, + 'extra_norm': extra_norm, + 'drop_path': drop_paths[i], + 'with_cp': with_cp, + 'pad_small_map': pad_small_map, + 'pretrained_window_size': pretrained_window_size, + **block_cfgs[i] + } + block = SwinBlockV2(**_block_cfg) + self.blocks.append(block) + + def forward(self, x, in_shape): + if self.downsample: + x, out_shape = self.downsample(x, in_shape) + else: + out_shape = in_shape + + for block in self.blocks: + x = block(x, out_shape) + + return x, out_shape + + +@MODELS.register_module() +class SwinTransformerV2(BaseBackbone): + """Swin Transformer V2. + + A PyTorch implement of : `Swin Transformer V2: + Scaling Up Capacity and Resolution + `_ + + Inspiration from + https://github.com/microsoft/Swin-Transformer + + Args: + arch (str | dict): Swin Transformer architecture. If use string, choose + from 'tiny', 'small', 'base' and 'large'. If use dict, it should + have below keys: + + - **embed_dims** (int): The dimensions of embedding. + - **depths** (List[int]): The number of blocks in each stage. + - **num_heads** (List[int]): The number of heads in attention + modules of each stage. + - **extra_norm_every_n_blocks** (int): Add extra norm at the end + of main branch every n blocks. + + Defaults to 'tiny'. + img_size (int | tuple): The expected input image shape. Because we + support dynamic input shape, just set the argument to the most + common input image shape. Defaults to 224. + patch_size (int | tuple): The patch size in patch embedding. + Defaults to 4. + in_channels (int): The num of input channels. Defaults to 3. + window_size (int | Sequence): The height and width of the window. + Defaults to 7. + drop_rate (float): Dropout rate after embedding. Defaults to 0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0.1. + use_abs_pos_embed (bool): If True, add absolute position embedding to + the patch embedding. Defaults to False. + interpolate_mode (str): Select the interpolate mode for absolute + position embeding vector resize. Defaults to "bicubic". + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Defaults to -1. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Defaults to False. + pad_small_map (bool): If True, pad the small feature map to the window + size, which is common used in detection and segmentation. If False, + avoid shifting window and shrink the window size to the size of + feature map, which is common used in classification. + Defaults to False. + norm_cfg (dict): Config dict for normalization layer for all output + features. Defaults to ``dict(type='LN')`` + stage_cfgs (Sequence[dict] | dict): Extra config dict for each + stage. Defaults to an empty dict. + patch_cfg (dict): Extra config dict for patch embedding. + Defaults to an empty dict. + pretrained_window_sizes (tuple(int)): Pretrained window sizes of + each layer. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + + Examples: + >>> from mmpretrain.models import SwinTransformerV2 + >>> import torch + >>> extra_config = dict( + >>> arch='tiny', + >>> stage_cfgs=dict(downsample_cfg={'kernel_size': 3, + >>> 'padding': 'same'})) + >>> self = SwinTransformerV2(**extra_config) + >>> inputs = torch.rand(1, 3, 224, 224) + >>> output = self.forward(inputs) + >>> print(output.shape) + (1, 2592, 4) + """ + arch_zoo = { + **dict.fromkeys(['t', 'tiny'], + {'embed_dims': 96, + 'depths': [2, 2, 6, 2], + 'num_heads': [3, 6, 12, 24], + 'extra_norm_every_n_blocks': 0}), + **dict.fromkeys(['s', 'small'], + {'embed_dims': 96, + 'depths': [2, 2, 18, 2], + 'num_heads': [3, 6, 12, 24], + 'extra_norm_every_n_blocks': 0}), + **dict.fromkeys(['b', 'base'], + {'embed_dims': 128, + 'depths': [2, 2, 18, 2], + 'num_heads': [4, 8, 16, 32], + 'extra_norm_every_n_blocks': 0}), + **dict.fromkeys(['l', 'large'], + {'embed_dims': 192, + 'depths': [2, 2, 18, 2], + 'num_heads': [6, 12, 24, 48], + 'extra_norm_every_n_blocks': 0}), + # head count not certain for huge, and is employed for another + # parallel study about self-supervised learning. + **dict.fromkeys(['h', 'huge'], + {'embed_dims': 352, + 'depths': [2, 2, 18, 2], + 'num_heads': [8, 16, 32, 64], + 'extra_norm_every_n_blocks': 6}), + **dict.fromkeys(['g', 'giant'], + {'embed_dims': 512, + 'depths': [2, 2, 42, 4], + 'num_heads': [16, 32, 64, 128], + 'extra_norm_every_n_blocks': 6}), + } # yapf: disable + + _version = 1 + num_extra_tokens = 0 + + def __init__(self, + arch='tiny', + img_size=256, + patch_size=4, + in_channels=3, + window_size=8, + drop_rate=0., + drop_path_rate=0.1, + out_indices=(3, ), + use_abs_pos_embed=False, + interpolate_mode='bicubic', + with_cp=False, + frozen_stages=-1, + norm_eval=False, + pad_small_map=False, + norm_cfg=dict(type='LN'), + stage_cfgs=dict(), + patch_cfg=dict(), + pretrained_window_sizes=[0, 0, 0, 0], + init_cfg=None): + super(SwinTransformerV2, self).__init__(init_cfg=init_cfg) + + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = { + 'embed_dims', 'depths', 'num_heads', + 'extra_norm_every_n_blocks' + } + assert isinstance(arch, dict) and set(arch) == essential_keys, \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = arch + + self.embed_dims = self.arch_settings['embed_dims'] + self.depths = self.arch_settings['depths'] + self.num_heads = self.arch_settings['num_heads'] + self.extra_norm_every_n_blocks = self.arch_settings[ + 'extra_norm_every_n_blocks'] + self.num_layers = len(self.depths) + self.out_indices = out_indices + self.use_abs_pos_embed = use_abs_pos_embed + self.interpolate_mode = interpolate_mode + self.frozen_stages = frozen_stages + + if isinstance(window_size, int): + self.window_sizes = [window_size for _ in range(self.num_layers)] + elif isinstance(window_size, Sequence): + assert len(window_size) == self.num_layers, \ + f'Length of window_sizes {len(window_size)} is not equal to '\ + f'length of stages {self.num_layers}.' + self.window_sizes = window_size + else: + raise TypeError('window_size should be a Sequence or int.') + + _patch_cfg = dict( + in_channels=in_channels, + input_size=img_size, + embed_dims=self.embed_dims, + conv_type='Conv2d', + kernel_size=patch_size, + stride=patch_size, + norm_cfg=dict(type='LN'), + ) + _patch_cfg.update(patch_cfg) + self.patch_embed = PatchEmbed(**_patch_cfg) + self.patch_resolution = self.patch_embed.init_out_size + + if self.use_abs_pos_embed: + num_patches = self.patch_resolution[0] * self.patch_resolution[1] + self.absolute_pos_embed = nn.Parameter( + torch.zeros(1, num_patches, self.embed_dims)) + self._register_load_state_dict_pre_hook( + self._prepare_abs_pos_embed) + + self._register_load_state_dict_pre_hook(self._delete_reinit_params) + + self.drop_after_pos = nn.Dropout(p=drop_rate) + self.norm_eval = norm_eval + + # stochastic depth + total_depth = sum(self.depths) + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, total_depth) + ] # stochastic depth decay rule + + self.stages = ModuleList() + embed_dims = [self.embed_dims] + for i, (depth, + num_heads) in enumerate(zip(self.depths, self.num_heads)): + if isinstance(stage_cfgs, Sequence): + stage_cfg = stage_cfgs[i] + else: + stage_cfg = deepcopy(stage_cfgs) + downsample = True if i > 0 else False + _stage_cfg = { + 'embed_dims': embed_dims[-1], + 'depth': depth, + 'num_heads': num_heads, + 'window_size': self.window_sizes[i], + 'downsample': downsample, + 'drop_paths': dpr[:depth], + 'with_cp': with_cp, + 'pad_small_map': pad_small_map, + 'extra_norm_every_n_blocks': self.extra_norm_every_n_blocks, + 'pretrained_window_size': pretrained_window_sizes[i], + 'downsample_cfg': dict(use_post_norm=True), + **stage_cfg + } + + stage = SwinBlockV2Sequence(**_stage_cfg) + self.stages.append(stage) + + dpr = dpr[depth:] + embed_dims.append(stage.out_channels) + + for i in out_indices: + if norm_cfg is not None: + norm_layer = build_norm_layer(norm_cfg, embed_dims[i + 1])[1] + else: + norm_layer = nn.Identity() + + self.add_module(f'norm{i}', norm_layer) + + def init_weights(self): + super(SwinTransformerV2, self).init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress default init if use pretrained model. + return + + if self.use_abs_pos_embed: + trunc_normal_(self.absolute_pos_embed, std=0.02) + + def forward(self, x): + x, hw_shape = self.patch_embed(x) + + if self.use_abs_pos_embed: + x = x + resize_pos_embed( + self.absolute_pos_embed, self.patch_resolution, hw_shape, + self.interpolate_mode, self.num_extra_tokens) + x = self.drop_after_pos(x) + + outs = [] + for i, stage in enumerate(self.stages): + x, hw_shape = stage(x, hw_shape) + if i in self.out_indices: + norm_layer = getattr(self, f'norm{i}') + out = norm_layer(x) + out = out.view(-1, *hw_shape, + stage.out_channels).permute(0, 3, 1, + 2).contiguous() + outs.append(out) + + return tuple(outs) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.patch_embed.eval() + for param in self.patch_embed.parameters(): + param.requires_grad = False + + for i in range(0, self.frozen_stages + 1): + m = self.stages[i] + m.eval() + for param in m.parameters(): + param.requires_grad = False + for i in self.out_indices: + if i <= self.frozen_stages: + for param in getattr(self, f'norm{i}').parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(SwinTransformerV2, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + + def _prepare_abs_pos_embed(self, state_dict, prefix, *args, **kwargs): + name = prefix + 'absolute_pos_embed' + if name not in state_dict.keys(): + return + + ckpt_pos_embed_shape = state_dict[name].shape + if self.absolute_pos_embed.shape != ckpt_pos_embed_shape: + from mmengine.logging import MMLogger + logger = MMLogger.get_current_instance() + logger.info( + 'Resize the absolute_pos_embed shape from ' + f'{ckpt_pos_embed_shape} to {self.absolute_pos_embed.shape}.') + + ckpt_pos_embed_shape = to_2tuple( + int(np.sqrt(ckpt_pos_embed_shape[1] - self.num_extra_tokens))) + pos_embed_shape = self.patch_embed.init_out_size + + state_dict[name] = resize_pos_embed(state_dict[name], + ckpt_pos_embed_shape, + pos_embed_shape, + self.interpolate_mode, + self.num_extra_tokens) + + def _delete_reinit_params(self, state_dict, prefix, *args, **kwargs): + # delete relative_position_index since we always re-init it + from mmengine.logging import MMLogger + logger = MMLogger.get_current_instance() + logger.info( + 'Delete `relative_position_index` and `relative_coords_table` ' + 'since we always re-init these params according to the ' + '`window_size`, which might cause unwanted but unworried ' + 'warnings when loading checkpoint.') + relative_position_index_keys = [ + k for k in state_dict.keys() if 'relative_position_index' in k + ] + for k in relative_position_index_keys: + del state_dict[k] + + # delete relative_coords_table since we always re-init it + relative_position_index_keys = [ + k for k in state_dict.keys() if 'relative_coords_table' in k + ] + for k in relative_position_index_keys: + del state_dict[k] diff --git a/mmpretrain/models/backbones/t2t_vit.py b/mmpretrain/models/backbones/t2t_vit.py new file mode 100644 index 0000000..a57b95e --- /dev/null +++ b/mmpretrain/models/backbones/t2t_vit.py @@ -0,0 +1,447 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from typing import Sequence + +import numpy as np +import torch +import torch.nn as nn +from mmcv.cnn.bricks.transformer import FFN +from mmengine.model import BaseModule, ModuleList +from mmengine.model.weight_init import trunc_normal_ + +from mmpretrain.registry import MODELS +from ..utils import (MultiheadAttention, build_norm_layer, resize_pos_embed, + to_2tuple) +from .base_backbone import BaseBackbone + + +class T2TTransformerLayer(BaseModule): + """Transformer Layer for T2T_ViT. + + Comparing with :obj:`TransformerEncoderLayer` in ViT, it supports + different ``input_dims`` and ``embed_dims``. + + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs + input_dims (int, optional): The input token dimension. + Defaults to None. + drop_rate (float): Probability of an element to be zeroed + after the feed forward layer. Defaults to 0. + attn_drop_rate (float): The drop out rate for attention output weights. + Defaults to 0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0. + num_fcs (int): The number of fully-connected layers for FFNs. + Defaults to 2. + qkv_bias (bool): enable bias for qkv if True. Defaults to True. + qk_scale (float, optional): Override default qk scale of + ``(input_dims // num_heads) ** -0.5`` if set. Defaults to None. + act_cfg (dict): The activation config for FFNs. + Defaults to ``dict(type='GELU')``. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + + Notes: + In general, ``qk_scale`` should be ``head_dims ** -0.5``, i.e. + ``(embed_dims // num_heads) ** -0.5``. However, in the official + code, it uses ``(input_dims // num_heads) ** -0.5``, so here we + keep the same with the official implementation. + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + input_dims=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + num_fcs=2, + qkv_bias=False, + qk_scale=None, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + init_cfg=None): + super(T2TTransformerLayer, self).__init__(init_cfg=init_cfg) + + self.v_shortcut = True if input_dims is not None else False + input_dims = input_dims or embed_dims + + self.ln1 = build_norm_layer(norm_cfg, input_dims) + + self.attn = MultiheadAttention( + input_dims=input_dims, + embed_dims=embed_dims, + num_heads=num_heads, + attn_drop=attn_drop_rate, + proj_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + qkv_bias=qkv_bias, + qk_scale=qk_scale or (input_dims // num_heads)**-0.5, + v_shortcut=self.v_shortcut) + + self.ln2 = build_norm_layer(norm_cfg, embed_dims) + + self.ffn = FFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + num_fcs=num_fcs, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg) + + def forward(self, x): + if self.v_shortcut: + x = self.attn(self.ln1(x)) + else: + x = x + self.attn(self.ln1(x)) + x = self.ffn(self.ln2(x), identity=x) + return x + + +class T2TModule(BaseModule): + """Tokens-to-Token module. + + "Tokens-to-Token module" (T2T Module) can model the local structure + information of images and reduce the length of tokens progressively. + + Args: + img_size (int): Input image size + in_channels (int): Number of input channels + embed_dims (int): Embedding dimension + token_dims (int): Tokens dimension in T2TModuleAttention. + use_performer (bool): If True, use Performer version self-attention to + adopt regular self-attention. Defaults to False. + init_cfg (dict, optional): The extra config for initialization. + Default: None. + + Notes: + Usually, ``token_dim`` is set as a small value (32 or 64) to reduce + MACs + """ + + def __init__( + self, + img_size=224, + in_channels=3, + embed_dims=384, + token_dims=64, + use_performer=False, + init_cfg=None, + ): + super(T2TModule, self).__init__(init_cfg) + + self.embed_dims = embed_dims + + self.soft_split0 = nn.Unfold( + kernel_size=(7, 7), stride=(4, 4), padding=(2, 2)) + self.soft_split1 = nn.Unfold( + kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) + self.soft_split2 = nn.Unfold( + kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) + + if not use_performer: + self.attention1 = T2TTransformerLayer( + input_dims=in_channels * 7 * 7, + embed_dims=token_dims, + num_heads=1, + feedforward_channels=token_dims) + + self.attention2 = T2TTransformerLayer( + input_dims=token_dims * 3 * 3, + embed_dims=token_dims, + num_heads=1, + feedforward_channels=token_dims) + + self.project = nn.Linear(token_dims * 3 * 3, embed_dims) + else: + raise NotImplementedError("Performer hasn't been implemented.") + + # there are 3 soft split, stride are 4,2,2 separately + out_side = img_size // (4 * 2 * 2) + self.init_out_size = [out_side, out_side] + self.num_patches = out_side**2 + + @staticmethod + def _get_unfold_size(unfold: nn.Unfold, input_size): + h, w = input_size + kernel_size = to_2tuple(unfold.kernel_size) + stride = to_2tuple(unfold.stride) + padding = to_2tuple(unfold.padding) + dilation = to_2tuple(unfold.dilation) + + h_out = (h + 2 * padding[0] - dilation[0] * + (kernel_size[0] - 1) - 1) // stride[0] + 1 + w_out = (w + 2 * padding[1] - dilation[1] * + (kernel_size[1] - 1) - 1) // stride[1] + 1 + return (h_out, w_out) + + def forward(self, x): + # step0: soft split + hw_shape = self._get_unfold_size(self.soft_split0, x.shape[2:]) + x = self.soft_split0(x).transpose(1, 2) + + for step in [1, 2]: + # re-structurization/reconstruction + attn = getattr(self, f'attention{step}') + x = attn(x).transpose(1, 2) + B, C, _ = x.shape + x = x.reshape(B, C, hw_shape[0], hw_shape[1]) + + # soft split + soft_split = getattr(self, f'soft_split{step}') + hw_shape = self._get_unfold_size(soft_split, hw_shape) + x = soft_split(x).transpose(1, 2) + + # final tokens + x = self.project(x) + return x, hw_shape + + +def get_sinusoid_encoding(n_position, embed_dims): + """Generate sinusoid encoding table. + + Sinusoid encoding is a kind of relative position encoding method came from + `Attention Is All You Need`_. + + Args: + n_position (int): The length of the input token. + embed_dims (int): The position embedding dimension. + + Returns: + :obj:`torch.FloatTensor`: The sinusoid encoding table. + """ + + def get_position_angle_vec(position): + return [ + position / np.power(10000, 2 * (i // 2) / embed_dims) + for i in range(embed_dims) + ] + + sinusoid_table = np.array( + [get_position_angle_vec(pos) for pos in range(n_position)]) + sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i + sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 + + return torch.FloatTensor(sinusoid_table).unsqueeze(0) + + +@MODELS.register_module() +class T2T_ViT(BaseBackbone): + """Tokens-to-Token Vision Transformer (T2T-ViT) + + A PyTorch implementation of `Tokens-to-Token ViT: Training Vision + Transformers from Scratch on ImageNet `_ + + Args: + img_size (int | tuple): The expected input image shape. Because we + support dynamic input shape, just set the argument to the most + common input image shape. Defaults to 224. + in_channels (int): Number of input channels. + embed_dims (int): Embedding dimension. + num_layers (int): Num of transformer layers in encoder. + Defaults to 14. + out_indices (Sequence | int): Output from which stages. + Defaults to -1, means the last stage. + drop_rate (float): Dropout rate after position embedding. + Defaults to 0. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + norm_cfg (dict): Config dict for normalization layer. Defaults to + ``dict(type='LN')``. + final_norm (bool): Whether to add a additional layer to normalize + final feature map. Defaults to True. + out_type (str): The type of output features. Please choose from + + - ``"cls_token"``: The class token tensor with shape (B, C). + - ``"featmap"``: The feature map tensor from the patch tokens + with shape (B, C, H, W). + - ``"avg_featmap"``: The global averaged feature map tensor + with shape (B, C). + - ``"raw"``: The raw feature tensor includes patch tokens and + class tokens with shape (B, L, C). + + Defaults to ``"cls_token"``. + with_cls_token (bool): Whether concatenating class token into image + tokens as transformer input. Defaults to True. + interpolate_mode (str): Select the interpolate mode for position + embeding vector resize. Defaults to "bicubic". + t2t_cfg (dict): Extra config of Tokens-to-Token module. + Defaults to an empty dict. + layer_cfgs (Sequence | dict): Configs of each transformer layer in + encoder. Defaults to an empty dict. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + OUT_TYPES = {'raw', 'cls_token', 'featmap', 'avg_featmap'} + + def __init__(self, + img_size=224, + in_channels=3, + embed_dims=384, + num_layers=14, + out_indices=-1, + drop_rate=0., + drop_path_rate=0., + norm_cfg=dict(type='LN'), + final_norm=True, + out_type='cls_token', + with_cls_token=True, + interpolate_mode='bicubic', + t2t_cfg=dict(), + layer_cfgs=dict(), + init_cfg=None): + super().__init__(init_cfg) + + # Token-to-Token Module + self.tokens_to_token = T2TModule( + img_size=img_size, + in_channels=in_channels, + embed_dims=embed_dims, + **t2t_cfg) + self.patch_resolution = self.tokens_to_token.init_out_size + num_patches = self.patch_resolution[0] * self.patch_resolution[1] + + # Set out type + if out_type not in self.OUT_TYPES: + raise ValueError(f'Unsupported `out_type` {out_type}, please ' + f'choose from {self.OUT_TYPES}') + self.out_type = out_type + + # Set cls token + if with_cls_token: + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dims)) + self.num_extra_tokens = 1 + elif out_type != 'cls_token': + self.cls_token = None + self.num_extra_tokens = 0 + else: + raise ValueError( + 'with_cls_token must be True when `out_type="cls_token"`.') + + # Set position embedding + self.interpolate_mode = interpolate_mode + sinusoid_table = get_sinusoid_encoding( + num_patches + self.num_extra_tokens, embed_dims) + self.register_buffer('pos_embed', sinusoid_table) + self._register_load_state_dict_pre_hook(self._prepare_pos_embed) + + self.drop_after_pos = nn.Dropout(p=drop_rate) + + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must be a sequence or int, ' \ + f'get {type(out_indices)} instead.' + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = num_layers + index + assert 0 <= out_indices[i] <= num_layers, \ + f'Invalid out_indices {index}' + self.out_indices = out_indices + + # stochastic depth decay rule + dpr = [x for x in np.linspace(0, drop_path_rate, num_layers)] + + self.encoder = ModuleList() + for i in range(num_layers): + if isinstance(layer_cfgs, Sequence): + layer_cfg = layer_cfgs[i] + else: + layer_cfg = deepcopy(layer_cfgs) + layer_cfg = { + 'embed_dims': embed_dims, + 'num_heads': 6, + 'feedforward_channels': 3 * embed_dims, + 'drop_path_rate': dpr[i], + 'qkv_bias': False, + 'norm_cfg': norm_cfg, + **layer_cfg + } + + layer = T2TTransformerLayer(**layer_cfg) + self.encoder.append(layer) + + self.final_norm = final_norm + if final_norm: + self.norm = build_norm_layer(norm_cfg, embed_dims) + else: + self.norm = nn.Identity() + + def init_weights(self): + super().init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress custom init if use pretrained model. + return + + trunc_normal_(self.cls_token, std=.02) + + def _prepare_pos_embed(self, state_dict, prefix, *args, **kwargs): + name = prefix + 'pos_embed' + if name not in state_dict.keys(): + return + + ckpt_pos_embed_shape = state_dict[name].shape + if self.pos_embed.shape != ckpt_pos_embed_shape: + from mmengine.logging import MMLogger + logger = MMLogger.get_current_instance() + logger.info( + f'Resize the pos_embed shape from {ckpt_pos_embed_shape} ' + f'to {self.pos_embed.shape}.') + + ckpt_pos_embed_shape = to_2tuple( + int(np.sqrt(ckpt_pos_embed_shape[1] - self.num_extra_tokens))) + pos_embed_shape = self.tokens_to_token.init_out_size + + state_dict[name] = resize_pos_embed(state_dict[name], + ckpt_pos_embed_shape, + pos_embed_shape, + self.interpolate_mode, + self.num_extra_tokens) + + def forward(self, x): + B = x.shape[0] + x, patch_resolution = self.tokens_to_token(x) + + if self.cls_token is not None: + # stole cls_tokens impl from Phil Wang, thanks + cls_token = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_token, x), dim=1) + + x = x + resize_pos_embed( + self.pos_embed, + self.patch_resolution, + patch_resolution, + mode=self.interpolate_mode, + num_extra_tokens=self.num_extra_tokens) + x = self.drop_after_pos(x) + + outs = [] + for i, layer in enumerate(self.encoder): + x = layer(x) + + if i == len(self.encoder) - 1 and self.final_norm: + x = self.norm(x) + + if i in self.out_indices: + outs.append(self._format_output(x, patch_resolution)) + + return tuple(outs) + + def _format_output(self, x, hw): + if self.out_type == 'raw': + return x + if self.out_type == 'cls_token': + return x[:, 0] + + patch_token = x[:, self.num_extra_tokens:] + if self.out_type == 'featmap': + B = x.size(0) + # (B, N, C) -> (B, H, W, C) -> (B, C, H, W) + return patch_token.reshape(B, *hw, -1).permute(0, 3, 1, 2) + if self.out_type == 'avg_featmap': + return patch_token.mean(dim=1) diff --git a/mmpretrain/models/backbones/timm_backbone.py b/mmpretrain/models/backbones/timm_backbone.py new file mode 100644 index 0000000..51ecbdb --- /dev/null +++ b/mmpretrain/models/backbones/timm_backbone.py @@ -0,0 +1,111 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +from mmengine.logging import MMLogger + +from mmpretrain.registry import MODELS +from mmpretrain.utils import require +from .base_backbone import BaseBackbone + + +def print_timm_feature_info(feature_info): + """Print feature_info of timm backbone to help development and debug. + + Args: + feature_info (list[dict] | timm.models.features.FeatureInfo | None): + feature_info of timm backbone. + """ + logger = MMLogger.get_current_instance() + if feature_info is None: + logger.warning('This backbone does not have feature_info') + elif isinstance(feature_info, list): + for feat_idx, each_info in enumerate(feature_info): + logger.info(f'backbone feature_info[{feat_idx}]: {each_info}') + else: + try: + logger.info(f'backbone out_indices: {feature_info.out_indices}') + logger.info(f'backbone out_channels: {feature_info.channels()}') + logger.info(f'backbone out_strides: {feature_info.reduction()}') + except AttributeError: + logger.warning('Unexpected format of backbone feature_info') + + +@MODELS.register_module() +class TIMMBackbone(BaseBackbone): + """Wrapper to use backbones from timm library. + + More details can be found in + `timm `_. + See especially the document for `feature extraction + `_. + + Args: + model_name (str): Name of timm model to instantiate. + features_only (bool): Whether to extract feature pyramid (multi-scale + feature maps from the deepest layer at each stride). For Vision + Transformer models that do not support this argument, + set this False. Defaults to False. + pretrained (bool): Whether to load pretrained weights. + Defaults to False. + checkpoint_path (str): Path of checkpoint to load at the last of + ``timm.create_model``. Defaults to empty string, which means + not loading. + in_channels (int): Number of input image channels. Defaults to 3. + init_cfg (dict or list[dict], optional): Initialization config dict of + OpenMMLab projects. Defaults to None. + **kwargs: Other timm & model specific arguments. + """ + + @require('timm') + def __init__(self, + model_name, + features_only=False, + pretrained=False, + checkpoint_path='', + in_channels=3, + init_cfg=None, + **kwargs): + import timm + + if not isinstance(pretrained, bool): + raise TypeError('pretrained must be bool, not str for model path') + if features_only and checkpoint_path: + warnings.warn( + 'Using both features_only and checkpoint_path will cause error' + ' in timm. See ' + 'https://github.com/rwightman/pytorch-image-models/issues/488') + + super(TIMMBackbone, self).__init__(init_cfg) + if 'norm_layer' in kwargs: + norm_class = MODELS.get(kwargs['norm_layer']) + + def build_norm(*args, **kwargs): + return norm_class(*args, **kwargs) + + kwargs['norm_layer'] = build_norm + self.timm_model = timm.create_model( + model_name=model_name, + features_only=features_only, + pretrained=pretrained, + in_chans=in_channels, + checkpoint_path=checkpoint_path, + **kwargs) + + # reset classifier + if hasattr(self.timm_model, 'reset_classifier'): + self.timm_model.reset_classifier(0, '') + + # Hack to use pretrained weights from timm + if pretrained or checkpoint_path: + self._is_init = True + + feature_info = getattr(self.timm_model, 'feature_info', None) + print_timm_feature_info(feature_info) + + def forward(self, x): + features = self.timm_model(x) + if isinstance(features, (list, tuple)): + features = tuple(features) + else: + features = (features, ) + return features diff --git a/mmpretrain/models/backbones/tinyvit.py b/mmpretrain/models/backbones/tinyvit.py new file mode 100644 index 0000000..5279832 --- /dev/null +++ b/mmpretrain/models/backbones/tinyvit.py @@ -0,0 +1,769 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Sequence, Tuple + +import torch +import torch.nn as nn +import torch.utils.checkpoint as checkpoint +from mmcv.cnn.bricks import DropPath, build_activation_layer, build_norm_layer +from mmengine.model import BaseModule, ModuleList, Sequential +from torch.nn import functional as F + +from mmpretrain.registry import MODELS +from ..utils import LeAttention +from .base_backbone import BaseBackbone + + +class ConvBN2d(Sequential): + """An implementation of Conv2d + BatchNorm2d with support of fusion. + + Modified from + https://github.com/microsoft/Cream/blob/main/TinyViT/models/tiny_vit.py + + Args: + in_channels (int): The number of input channels. + out_channels (int): The number of output channels. + kernel_size (int): The size of the convolution kernel. + Default: 1. + stride (int): The stride of the convolution. + Default: 1. + padding (int): The padding of the convolution. + Default: 0. + dilation (int): The dilation of the convolution. + Default: 1. + groups (int): The number of groups in the convolution. + Default: 1. + bn_weight_init (float): The initial value of the weight of + the nn.BatchNorm2d layer. Default: 1.0. + init_cfg (dict): The initialization config of the module. + Default: None. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0, + dilation=1, + groups=1, + bn_weight_init=1.0, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.add_module( + 'conv2d', + nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=False)) + bn2d = nn.BatchNorm2d(num_features=out_channels) + # bn initialization + torch.nn.init.constant_(bn2d.weight, bn_weight_init) + torch.nn.init.constant_(bn2d.bias, 0) + self.add_module('bn2d', bn2d) + + @torch.no_grad() + def fuse(self): + conv2d, bn2d = self._modules.values() + w = bn2d.weight / (bn2d.running_var + bn2d.eps)**0.5 + w = conv2d.weight * w[:, None, None, None] + b = bn2d.bias - bn2d.running_mean * bn2d.weight / \ + (bn2d.running_var + bn2d.eps)**0.5 + + m = nn.Conv2d( + in_channels=w.size(1) * self.c.groups, + out_channels=w.size(0), + kernel_size=w.shape[2:], + stride=self.conv2d.stride, + padding=self.conv2d.padding, + dilation=self.conv2d.dilation, + groups=self.conv2d.groups) + m.weight.data.copy_(w) + m.bias.data.copy_(b) + return m + + +class PatchEmbed(BaseModule): + """Patch Embedding for Vision Transformer. + + Adapted from + https://github.com/microsoft/Cream/blob/main/TinyViT/models/tiny_vit.py + + Different from `mmcv.cnn.bricks.transformer.PatchEmbed`, this module use + Conv2d and BatchNorm2d to implement PatchEmbedding, and output shape is + (N, C, H, W). + + Args: + in_channels (int): The number of input channels. + embed_dim (int): The embedding dimension. + resolution (Tuple[int, int]): The resolution of the input feature. + act_cfg (dict): The activation config of the module. + Default: dict(type='GELU'). + """ + + def __init__(self, + in_channels, + embed_dim, + resolution, + act_cfg=dict(type='GELU')): + super().__init__() + img_size: Tuple[int, int] = resolution + self.patches_resolution = (img_size[0] // 4, img_size[1] // 4) + self.num_patches = self.patches_resolution[0] * \ + self.patches_resolution[1] + self.in_channels = in_channels + self.embed_dim = embed_dim + self.seq = nn.Sequential( + ConvBN2d( + in_channels, + embed_dim // 2, + kernel_size=3, + stride=2, + padding=1), + build_activation_layer(act_cfg), + ConvBN2d( + embed_dim // 2, embed_dim, kernel_size=3, stride=2, padding=1), + ) + + def forward(self, x): + return self.seq(x) + + +class PatchMerging(nn.Module): + """Patch Merging for TinyViT. + + Adapted from + https://github.com/microsoft/Cream/blob/main/TinyViT/models/tiny_vit.py + + Different from `mmpretrain.models.utils.PatchMerging`, this module use + Conv2d and BatchNorm2d to implement PatchMerging. + + Args: + in_channels (int): The number of input channels. + resolution (Tuple[int, int]): The resolution of the input feature. + out_channels (int): The number of output channels. + act_cfg (dict): The activation config of the module. + Default: dict(type='GELU'). + """ + + def __init__(self, + resolution, + in_channels, + out_channels, + act_cfg=dict(type='GELU')): + super().__init__() + + self.img_size = resolution + + self.act = build_activation_layer(act_cfg) + self.conv1 = ConvBN2d(in_channels, out_channels, kernel_size=1) + self.conv2 = ConvBN2d( + out_channels, + out_channels, + kernel_size=3, + stride=2, + padding=1, + groups=out_channels) + self.conv3 = ConvBN2d(out_channels, out_channels, kernel_size=1) + self.out_resolution = (resolution[0] // 2, resolution[1] // 2) + + def forward(self, x): + if len(x.shape) == 3: + H, W = self.img_size + B = x.shape[0] + x = x.view(B, H, W, -1).permute(0, 3, 1, 2) + x = self.conv1(x) + x = self.act(x) + x = self.conv2(x) + x = self.act(x) + x = self.conv3(x) + + x = x.flatten(2).transpose(1, 2) + return x + + +class MBConvBlock(nn.Module): + """Mobile Inverted Residual Bottleneck Block for TinyViT. Adapted from + https://github.com/microsoft/Cream/blob/main/TinyViT/models/tiny_vit.py. + + Args: + in_channels (int): The number of input channels. + out_channels (int): The number of output channels. + expand_ratio (int): The expand ratio of the hidden channels. + drop_rate (float): The drop rate of the block. + act_cfg (dict): The activation config of the module. + Default: dict(type='GELU'). + """ + + def __init__(self, + in_channels, + out_channels, + expand_ratio, + drop_path, + act_cfg=dict(type='GELU')): + super().__init__() + self.in_channels = in_channels + hidden_channels = int(in_channels * expand_ratio) + + # linear + self.conv1 = ConvBN2d(in_channels, hidden_channels, kernel_size=1) + self.act = build_activation_layer(act_cfg) + # depthwise conv + self.conv2 = ConvBN2d( + in_channels=hidden_channels, + out_channels=hidden_channels, + kernel_size=3, + stride=1, + padding=1, + groups=hidden_channels) + # linear + self.conv3 = ConvBN2d( + hidden_channels, out_channels, kernel_size=1, bn_weight_init=0.0) + + self.drop_path = DropPath( + drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + shortcut = x + + x = self.conv1(x) + x = self.act(x) + + x = self.conv2(x) + x = self.act(x) + + x = self.conv3(x) + + x = self.drop_path(x) + + x += shortcut + x = self.act(x) + + return x + + +class ConvStage(BaseModule): + """Convolution Stage for TinyViT. + + Adapted from + https://github.com/microsoft/Cream/blob/main/TinyViT/models/tiny_vit.py + + Args: + in_channels (int): The number of input channels. + resolution (Tuple[int, int]): The resolution of the input feature. + depth (int): The number of blocks in the stage. + act_cfg (dict): The activation config of the module. + drop_path (float): The drop path of the block. + downsample (None | nn.Module): The downsample operation. + Default: None. + use_checkpoint (bool): Whether to use checkpointing to save memory. + out_channels (int): The number of output channels. + conv_expand_ratio (int): The expand ratio of the hidden channels. + Default: 4. + init_cfg (dict | list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + in_channels, + resolution, + depth, + act_cfg, + drop_path=0., + downsample=None, + use_checkpoint=False, + out_channels=None, + conv_expand_ratio=4., + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + self.use_checkpoint = use_checkpoint + # build blocks + self.blocks = ModuleList([ + MBConvBlock( + in_channels=in_channels, + out_channels=in_channels, + expand_ratio=conv_expand_ratio, + drop_path=drop_path[i] + if isinstance(drop_path, list) else drop_path) + for i in range(depth) + ]) + + # patch merging layer + if downsample is not None: + self.downsample = downsample( + resolution=resolution, + in_channels=in_channels, + out_channels=out_channels, + act_cfg=act_cfg) + self.resolution = self.downsample.out_resolution + else: + self.downsample = None + self.resolution = resolution + + def forward(self, x): + for block in self.blocks: + if self.use_checkpoint: + x = checkpoint.checkpoint(block, x) + else: + x = block(x) + + if self.downsample is not None: + x = self.downsample(x) + return x + + +class MLP(BaseModule): + """MLP module for TinyViT. + + Args: + in_channels (int): The number of input channels. + hidden_channels (int, optional): The number of hidden channels. + Default: None. + out_channels (int, optional): The number of output channels. + Default: None. + act_cfg (dict): The activation config of the module. + Default: dict(type='GELU'). + drop (float): Probability of an element to be zeroed. + Default: 0. + init_cfg (dict | list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + in_channels, + hidden_channels=None, + out_channels=None, + act_cfg=dict(type='GELU'), + drop=0., + init_cfg=None): + super().__init__(init_cfg=init_cfg) + out_channels = out_channels or in_channels + hidden_channels = hidden_channels or in_channels + self.norm = nn.LayerNorm(in_channels) + self.fc1 = nn.Linear(in_channels, hidden_channels) + self.fc2 = nn.Linear(hidden_channels, out_channels) + self.act = build_activation_layer(act_cfg) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.norm(x) + + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class TinyViTBlock(BaseModule): + """TinViT Block. + + Args: + in_channels (int): The number of input channels. + resolution (Tuple[int, int]): The resolution of the input feature. + num_heads (int): The number of heads in the multi-head attention. + window_size (int): The size of the window. + Default: 7. + mlp_ratio (float): The ratio of mlp hidden dim to embedding dim. + Default: 4. + drop (float): Probability of an element to be zeroed. + Default: 0. + drop_path (float): The drop path of the block. + Default: 0. + local_conv_size (int): The size of the local convolution. + Default: 3. + act_cfg (dict): The activation config of the module. + Default: dict(type='GELU'). + """ + + def __init__(self, + in_channels, + resolution, + num_heads, + window_size=7, + mlp_ratio=4., + drop=0., + drop_path=0., + local_conv_size=3, + act_cfg=dict(type='GELU')): + super().__init__() + self.in_channels = in_channels + self.img_size = resolution + self.num_heads = num_heads + assert window_size > 0, 'window_size must be greater than 0' + self.window_size = window_size + self.mlp_ratio = mlp_ratio + + self.drop_path = DropPath( + drop_path) if drop_path > 0. else nn.Identity() + + assert in_channels % num_heads == 0, \ + 'dim must be divisible by num_heads' + head_dim = in_channels // num_heads + + window_resolution = (window_size, window_size) + self.attn = LeAttention( + in_channels, + head_dim, + num_heads, + attn_ratio=1, + resolution=window_resolution) + + mlp_hidden_dim = int(in_channels * mlp_ratio) + self.mlp = MLP( + in_channels=in_channels, + hidden_channels=mlp_hidden_dim, + act_cfg=act_cfg, + drop=drop) + + self.local_conv = ConvBN2d( + in_channels=in_channels, + out_channels=in_channels, + kernel_size=local_conv_size, + stride=1, + padding=local_conv_size // 2, + groups=in_channels) + + def forward(self, x): + H, W = self.img_size + B, L, C = x.shape + assert L == H * W, 'input feature has wrong size' + res_x = x + if H == self.window_size and W == self.window_size: + x = self.attn(x) + else: + x = x.view(B, H, W, C) + pad_b = (self.window_size - + H % self.window_size) % self.window_size + pad_r = (self.window_size - + W % self.window_size) % self.window_size + padding = pad_b > 0 or pad_r > 0 + + if padding: + x = F.pad(x, (0, 0, 0, pad_r, 0, pad_b)) + + pH, pW = H + pad_b, W + pad_r + nH = pH // self.window_size + nW = pW // self.window_size + # window partition + x = x.view(B, nH, self.window_size, nW, self.window_size, + C).transpose(2, 3).reshape( + B * nH * nW, self.window_size * self.window_size, C) + x = self.attn(x) + # window reverse + x = x.view(B, nH, nW, self.window_size, self.window_size, + C).transpose(2, 3).reshape(B, pH, pW, C) + + if padding: + x = x[:, :H, :W].contiguous() + + x = x.view(B, L, C) + + x = res_x + self.drop_path(x) + + x = x.transpose(1, 2).reshape(B, C, H, W) + x = self.local_conv(x) + x = x.view(B, C, L).transpose(1, 2) + + x = x + self.drop_path(self.mlp(x)) + return x + + +class BasicStage(BaseModule): + """Basic Stage for TinyViT. + + Args: + in_channels (int): The number of input channels. + resolution (Tuple[int, int]): The resolution of the input feature. + depth (int): The number of blocks in the stage. + num_heads (int): The number of heads in the multi-head attention. + window_size (int): The size of the window. + mlp_ratio (float): The ratio of mlp hidden dim to embedding dim. + Default: 4. + drop (float): Probability of an element to be zeroed. + Default: 0. + drop_path (float): The drop path of the block. + Default: 0. + downsample (None | nn.Module): The downsample operation. + Default: None. + use_checkpoint (bool): Whether to use checkpointing to save memory. + Default: False. + act_cfg (dict): The activation config of the module. + Default: dict(type='GELU'). + init_cfg (dict | list[dict], optional): Initialization config dict. + Default: None. + """ + + def __init__(self, + in_channels, + resolution, + depth, + num_heads, + window_size, + mlp_ratio=4., + drop=0., + drop_path=0., + downsample=None, + use_checkpoint=False, + local_conv_size=3, + out_channels=None, + act_cfg=dict(type='GELU'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.use_checkpoint = use_checkpoint + # build blocks + self.blocks = ModuleList([ + TinyViTBlock( + in_channels=in_channels, + resolution=resolution, + num_heads=num_heads, + window_size=window_size, + mlp_ratio=mlp_ratio, + drop=drop, + local_conv_size=local_conv_size, + act_cfg=act_cfg, + drop_path=drop_path[i] + if isinstance(drop_path, list) else drop_path) + for i in range(depth) + ]) + + # build patch merging layer + if downsample is not None: + self.downsample = downsample( + resolution=resolution, + in_channels=in_channels, + out_channels=out_channels, + act_cfg=act_cfg) + self.resolution = self.downsample.out_resolution + else: + self.downsample = None + self.resolution = resolution + + def forward(self, x): + for block in self.blocks: + if self.use_checkpoint: + x = checkpoint.checkpoint(block, x) + else: + x = block(x) + + if self.downsample is not None: + x = self.downsample(x) + return x + + +@MODELS.register_module() +class TinyViT(BaseBackbone): + """TinyViT. + A PyTorch implementation of : `TinyViT: Fast Pretraining Distillation + for Small Vision Transformers`_ + + Inspiration from + https://github.com/microsoft/Cream/blob/main/TinyViT + + Args: + arch (str | dict): The architecture of TinyViT. + Default: '5m'. + img_size (tuple | int): The resolution of the input image. + Default: (224, 224) + window_size (list): The size of the window. + Default: [7, 7, 14, 7] + in_channels (int): The number of input channels. + Default: 3. + depths (list[int]): The depth of each stage. + Default: [2, 2, 6, 2]. + mlp_ratio (list[int]): The ratio of mlp hidden dim to embedding dim. + Default: 4. + drop_rate (float): Probability of an element to be zeroed. + Default: 0. + drop_path_rate (float): The drop path of the block. + Default: 0.1. + use_checkpoint (bool): Whether to use checkpointing to save memory. + Default: False. + mbconv_expand_ratio (int): The expand ratio of the mbconv. + Default: 4.0 + local_conv_size (int): The size of the local conv. + Default: 3. + layer_lr_decay (float): The layer lr decay. + Default: 1.0 + out_indices (int | list[int]): Output from which stages. + Default: -1 + frozen_stages (int | list[int]): Stages to be frozen (all param fixed). + Default: -0 + gap_before_final_nrom (bool): Whether to add a gap before the final + norm. Default: True. + act_cfg (dict): The activation config of the module. + Default: dict(type='GELU'). + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + init_cfg (dict | list[dict], optional): Initialization config dict. + Default: None. + """ + arch_settings = { + '5m': { + 'channels': [64, 128, 160, 320], + 'num_heads': [2, 4, 5, 10], + 'depths': [2, 2, 6, 2], + }, + '11m': { + 'channels': [64, 128, 256, 448], + 'num_heads': [2, 4, 8, 14], + 'depths': [2, 2, 6, 2], + }, + '21m': { + 'channels': [96, 192, 384, 576], + 'num_heads': [3, 6, 12, 18], + 'depths': [2, 2, 6, 2], + }, + } + + def __init__(self, + arch='5m', + img_size=(224, 224), + window_size=[7, 7, 14, 7], + in_channels=3, + mlp_ratio=4., + drop_rate=0., + drop_path_rate=0.1, + use_checkpoint=False, + mbconv_expand_ratio=4.0, + local_conv_size=3, + layer_lr_decay=1.0, + out_indices=-1, + frozen_stages=0, + gap_before_final_norm=True, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + if isinstance(arch, str): + assert arch in self.arch_settings, \ + f'Unavaiable arch, please choose from ' \ + f'({set(self.arch_settings)} or pass a dict.' + arch = self.arch_settings[arch] + elif isinstance(arch, dict): + assert 'channels' in arch and 'num_heads' in arch and \ + 'depths' in arch, 'The arch dict must have' \ + f'"channels", "num_heads", "window_sizes" ' \ + f'keys, but got {arch.keys()}' + + self.channels = arch['channels'] + self.num_heads = arch['num_heads'] + self.widow_sizes = window_size + self.img_size = img_size + self.depths = arch['depths'] + + self.num_stages = len(self.channels) + + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = 4 + index + assert out_indices[i] >= 0, f'Invalid out_indices {index}' + self.out_indices = out_indices + + self.frozen_stages = frozen_stages + self.gap_before_final_norm = gap_before_final_norm + self.layer_lr_decay = layer_lr_decay + + self.patch_embed = PatchEmbed( + in_channels=in_channels, + embed_dim=self.channels[0], + resolution=self.img_size, + act_cfg=dict(type='GELU')) + patches_resolution = self.patch_embed.patches_resolution + + # stochastic depth decay rule + dpr = [ + x.item() + for x in torch.linspace(0, drop_path_rate, sum(self.depths)) + ] + + # build stages + self.stages = ModuleList() + for i in range(self.num_stages): + depth = self.depths[i] + channel = self.channels[i] + curr_resolution = (patches_resolution[0] // (2**i), + patches_resolution[1] // (2**i)) + drop_path = dpr[sum(self.depths[:i]):sum(self.depths[:i + 1])] + downsample = PatchMerging if (i < self.num_stages - 1) else None + out_channels = self.channels[min(i + 1, self.num_stages - 1)] + if i >= 1: + stage = BasicStage( + in_channels=channel, + resolution=curr_resolution, + depth=depth, + num_heads=self.num_heads[i], + window_size=self.widow_sizes[i], + mlp_ratio=mlp_ratio, + drop=drop_rate, + drop_path=drop_path, + downsample=downsample, + use_checkpoint=use_checkpoint, + local_conv_size=local_conv_size, + out_channels=out_channels, + act_cfg=act_cfg) + else: + stage = ConvStage( + in_channels=channel, + resolution=curr_resolution, + depth=depth, + act_cfg=act_cfg, + drop_path=drop_path, + downsample=downsample, + use_checkpoint=use_checkpoint, + out_channels=out_channels, + conv_expand_ratio=mbconv_expand_ratio) + self.stages.append(stage) + + # add output norm + if i in self.out_indices: + norm_layer = build_norm_layer(norm_cfg, out_channels)[1] + self.add_module(f'norm{i}', norm_layer) + + def set_layer_lr_decay(self, layer_lr_decay): + # TODO: add layer_lr_decay + pass + + def forward(self, x): + outs = [] + x = self.patch_embed(x) + + for i, stage in enumerate(self.stages): + x = stage(x) + if i in self.out_indices: + norm_layer = getattr(self, f'norm{i}') + if self.gap_before_final_norm: + gap = x.mean(1) + outs.append(norm_layer(gap)) + else: + out = norm_layer(x) + # convert the (B,L,C) format into (B,C,H,W) format + # which would be better for the downstream tasks. + B, L, C = out.shape + out = out.view(B, *stage.resolution, C) + outs.append(out.permute(0, 3, 1, 2)) + + return tuple(outs) + + def _freeze_stages(self): + for i in range(self.frozen_stages): + stage = self.stages[i] + stage.eval() + for param in stage.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(TinyViT, self).train(mode) + self._freeze_stages() diff --git a/mmpretrain/models/backbones/tnt.py b/mmpretrain/models/backbones/tnt.py new file mode 100644 index 0000000..e1b241c --- /dev/null +++ b/mmpretrain/models/backbones/tnt.py @@ -0,0 +1,368 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn as nn +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks.transformer import FFN, MultiheadAttention +from mmengine.model import BaseModule, ModuleList +from mmengine.model.weight_init import trunc_normal_ + +from mmpretrain.registry import MODELS +from ..utils import to_2tuple +from .base_backbone import BaseBackbone + + +class TransformerBlock(BaseModule): + """Implement a transformer block in TnTLayer. + + Args: + embed_dims (int): The feature dimension + num_heads (int): Parallel attention heads + ffn_ratio (int): A ratio to calculate the hidden_dims in ffn layer. + Default: 4 + drop_rate (float): Probability of an element to be zeroed + after the feed forward layer. Default 0. + attn_drop_rate (float): The drop out rate for attention layer. + Default 0. + drop_path_rate (float): stochastic depth rate. Default 0. + num_fcs (int): The number of fully-connected layers for FFNs. Default 2 + qkv_bias (bool): Enable bias for qkv if True. Default False + act_cfg (dict): The activation config for FFNs. Defaults to GELU. + norm_cfg (dict): Config dict for normalization layer. Default + layer normalization + batch_first (bool): Key, Query and Value are shape of + (batch, n, embed_dim) or (n, batch, embed_dim). + (batch, n, embed_dim) is common case in CV. Defaults to False + init_cfg (dict, optional): Initialization config dict. Defaults to None + """ + + def __init__(self, + embed_dims, + num_heads, + ffn_ratio=4, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + num_fcs=2, + qkv_bias=False, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + batch_first=True, + init_cfg=None): + super(TransformerBlock, self).__init__(init_cfg=init_cfg) + + self.norm_attn = build_norm_layer(norm_cfg, embed_dims)[1] + self.attn = MultiheadAttention( + embed_dims=embed_dims, + num_heads=num_heads, + attn_drop=attn_drop_rate, + proj_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + batch_first=batch_first) + + self.norm_ffn = build_norm_layer(norm_cfg, embed_dims)[1] + self.ffn = FFN( + embed_dims=embed_dims, + feedforward_channels=embed_dims * ffn_ratio, + num_fcs=num_fcs, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg) + + if not qkv_bias: + self.attn.attn.in_proj_bias = None + + def forward(self, x): + x = self.attn(self.norm_attn(x), identity=x) + x = self.ffn(self.norm_ffn(x), identity=x) + return x + + +class TnTLayer(BaseModule): + """Implement one encoder layer in Transformer in Transformer. + + Args: + num_pixel (int): The pixel number in target patch transformed with + a linear projection in inner transformer + embed_dims_inner (int): Feature dimension in inner transformer block + embed_dims_outer (int): Feature dimension in outer transformer block + num_heads_inner (int): Parallel attention heads in inner transformer. + num_heads_outer (int): Parallel attention heads in outer transformer. + inner_block_cfg (dict): Extra config of inner transformer block. + Defaults to empty dict. + outer_block_cfg (dict): Extra config of outer transformer block. + Defaults to empty dict. + norm_cfg (dict): Config dict for normalization layer. Default + layer normalization + init_cfg (dict, optional): Initialization config dict. Defaults to None + """ + + def __init__(self, + num_pixel, + embed_dims_inner, + embed_dims_outer, + num_heads_inner, + num_heads_outer, + inner_block_cfg=dict(), + outer_block_cfg=dict(), + norm_cfg=dict(type='LN'), + init_cfg=None): + super(TnTLayer, self).__init__(init_cfg=init_cfg) + + self.inner_block = TransformerBlock( + embed_dims=embed_dims_inner, + num_heads=num_heads_inner, + **inner_block_cfg) + + self.norm_proj = build_norm_layer(norm_cfg, embed_dims_inner)[1] + self.projection = nn.Linear( + embed_dims_inner * num_pixel, embed_dims_outer, bias=True) + + self.outer_block = TransformerBlock( + embed_dims=embed_dims_outer, + num_heads=num_heads_outer, + **outer_block_cfg) + + def forward(self, pixel_embed, patch_embed): + pixel_embed = self.inner_block(pixel_embed) + + B, N, C = patch_embed.size() + patch_embed[:, 1:] = patch_embed[:, 1:] + self.projection( + self.norm_proj(pixel_embed).reshape(B, N - 1, -1)) + patch_embed = self.outer_block(patch_embed) + + return pixel_embed, patch_embed + + +class PixelEmbed(BaseModule): + """Image to Pixel Embedding. + + Args: + img_size (int | tuple): The size of input image + patch_size (int): The size of one patch + in_channels (int): The num of input channels + embed_dims_inner (int): The num of channels of the target patch + transformed with a linear projection in inner transformer + stride (int): The stride of the conv2d layer. We use a conv2d layer + and a unfold layer to implement image to pixel embedding. + init_cfg (dict, optional): Initialization config dict + """ + + def __init__(self, + img_size=224, + patch_size=16, + in_channels=3, + embed_dims_inner=48, + stride=4, + init_cfg=None): + super(PixelEmbed, self).__init__(init_cfg=init_cfg) + img_size = to_2tuple(img_size) + patch_size = to_2tuple(patch_size) + # patches_resolution property necessary for resizing + # positional embedding + patches_resolution = [ + img_size[0] // patch_size[0], img_size[1] // patch_size[1] + ] + num_patches = patches_resolution[0] * patches_resolution[1] + + self.img_size = img_size + self.num_patches = num_patches + self.embed_dims_inner = embed_dims_inner + + new_patch_size = [math.ceil(ps / stride) for ps in patch_size] + self.new_patch_size = new_patch_size + + self.proj = nn.Conv2d( + in_channels, + self.embed_dims_inner, + kernel_size=7, + padding=3, + stride=stride) + self.unfold = nn.Unfold( + kernel_size=new_patch_size, stride=new_patch_size) + + def forward(self, x, pixel_pos): + B, C, H, W = x.shape + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't match model " \ + f'({self.img_size[0]}*{self.img_size[1]}).' + x = self.proj(x) + x = self.unfold(x) + x = x.transpose(1, + 2).reshape(B * self.num_patches, self.embed_dims_inner, + self.new_patch_size[0], + self.new_patch_size[1]) + x = x + pixel_pos + x = x.reshape(B * self.num_patches, self.embed_dims_inner, + -1).transpose(1, 2) + return x + + +@MODELS.register_module() +class TNT(BaseBackbone): + """Transformer in Transformer. + + A PyTorch implement of: `Transformer in Transformer + `_ + + Inspiration from + https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/tnt.py + + Args: + arch (str | dict): Vision Transformer architecture + Default: 'b' + img_size (int | tuple): Input image size. Defaults to 224 + patch_size (int | tuple): The patch size. Deault to 16 + in_channels (int): Number of input channels. Defaults to 3 + ffn_ratio (int): A ratio to calculate the hidden_dims in ffn layer. + Default: 4 + qkv_bias (bool): Enable bias for qkv if True. Default False + drop_rate (float): Probability of an element to be zeroed + after the feed forward layer. Default 0. + attn_drop_rate (float): The drop out rate for attention layer. + Default 0. + drop_path_rate (float): stochastic depth rate. Default 0. + act_cfg (dict): The activation config for FFNs. Defaults to GELU. + norm_cfg (dict): Config dict for normalization layer. Default + layer normalization + first_stride (int): The stride of the conv2d layer. We use a conv2d + layer and a unfold layer to implement image to pixel embedding. + num_fcs (int): The number of fully-connected layers for FFNs. Default 2 + init_cfg (dict, optional): Initialization config dict + """ + arch_zoo = { + **dict.fromkeys( + ['s', 'small'], { + 'embed_dims_outer': 384, + 'embed_dims_inner': 24, + 'num_layers': 12, + 'num_heads_outer': 6, + 'num_heads_inner': 4 + }), + **dict.fromkeys( + ['b', 'base'], { + 'embed_dims_outer': 640, + 'embed_dims_inner': 40, + 'num_layers': 12, + 'num_heads_outer': 10, + 'num_heads_inner': 4 + }) + } + + def __init__(self, + arch='b', + img_size=224, + patch_size=16, + in_channels=3, + ffn_ratio=4, + qkv_bias=False, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + first_stride=4, + num_fcs=2, + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ]): + super(TNT, self).__init__(init_cfg=init_cfg) + + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = { + 'embed_dims_outer', 'embed_dims_inner', 'num_layers', + 'num_heads_inner', 'num_heads_outer' + } + assert isinstance(arch, dict) and set(arch) == essential_keys, \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = arch + + self.embed_dims_inner = self.arch_settings['embed_dims_inner'] + self.embed_dims_outer = self.arch_settings['embed_dims_outer'] + # embed_dims for consistency with other models + self.embed_dims = self.embed_dims_outer + self.num_layers = self.arch_settings['num_layers'] + self.num_heads_inner = self.arch_settings['num_heads_inner'] + self.num_heads_outer = self.arch_settings['num_heads_outer'] + + self.pixel_embed = PixelEmbed( + img_size=img_size, + patch_size=patch_size, + in_channels=in_channels, + embed_dims_inner=self.embed_dims_inner, + stride=first_stride) + num_patches = self.pixel_embed.num_patches + self.num_patches = num_patches + new_patch_size = self.pixel_embed.new_patch_size + num_pixel = new_patch_size[0] * new_patch_size[1] + + self.norm1_proj = build_norm_layer(norm_cfg, num_pixel * + self.embed_dims_inner)[1] + self.projection = nn.Linear(num_pixel * self.embed_dims_inner, + self.embed_dims_outer) + self.norm2_proj = build_norm_layer(norm_cfg, self.embed_dims_outer)[1] + + self.cls_token = nn.Parameter(torch.zeros(1, 1, self.embed_dims_outer)) + self.patch_pos = nn.Parameter( + torch.zeros(1, num_patches + 1, self.embed_dims_outer)) + self.pixel_pos = nn.Parameter( + torch.zeros(1, self.embed_dims_inner, new_patch_size[0], + new_patch_size[1])) + self.drop_after_pos = nn.Dropout(p=drop_rate) + + dpr = [ + x.item() + for x in torch.linspace(0, drop_path_rate, self.num_layers) + ] # stochastic depth decay rule + self.layers = ModuleList() + for i in range(self.num_layers): + block_cfg = dict( + ffn_ratio=ffn_ratio, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=dpr[i], + num_fcs=num_fcs, + qkv_bias=qkv_bias, + norm_cfg=norm_cfg, + batch_first=True) + self.layers.append( + TnTLayer( + num_pixel=num_pixel, + embed_dims_inner=self.embed_dims_inner, + embed_dims_outer=self.embed_dims_outer, + num_heads_inner=self.num_heads_inner, + num_heads_outer=self.num_heads_outer, + inner_block_cfg=block_cfg, + outer_block_cfg=block_cfg, + norm_cfg=norm_cfg)) + + self.norm = build_norm_layer(norm_cfg, self.embed_dims_outer)[1] + + trunc_normal_(self.cls_token, std=.02) + trunc_normal_(self.patch_pos, std=.02) + trunc_normal_(self.pixel_pos, std=.02) + + def forward(self, x): + B = x.shape[0] + pixel_embed = self.pixel_embed(x, self.pixel_pos) + + patch_embed = self.norm2_proj( + self.projection( + self.norm1_proj(pixel_embed.reshape(B, self.num_patches, -1)))) + patch_embed = torch.cat( + (self.cls_token.expand(B, -1, -1), patch_embed), dim=1) + patch_embed = patch_embed + self.patch_pos + patch_embed = self.drop_after_pos(patch_embed) + + for layer in self.layers: + pixel_embed, patch_embed = layer(pixel_embed, patch_embed) + + patch_embed = self.norm(patch_embed) + return (patch_embed[:, 0], ) diff --git a/mmpretrain/models/backbones/twins.py b/mmpretrain/models/backbones/twins.py new file mode 100644 index 0000000..be55c02 --- /dev/null +++ b/mmpretrain/models/backbones/twins.py @@ -0,0 +1,721 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import Conv2d, build_norm_layer +from mmcv.cnn.bricks.drop import build_dropout +from mmcv.cnn.bricks.transformer import FFN, PatchEmbed +from mmengine.model import BaseModule, ModuleList +from mmengine.model.weight_init import (constant_init, normal_init, + trunc_normal_init) +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpretrain.registry import MODELS +from ..utils import ConditionalPositionEncoding, MultiheadAttention + + +class GlobalSubsampledAttention(MultiheadAttention): + """Global Sub-sampled Attention (GSA) module. + + Args: + embed_dims (int): The embedding dimension. + num_heads (int): Parallel attention heads. + input_dims (int, optional): The input dimension, and if None, + use ``embed_dims``. Defaults to None. + attn_drop (float): Dropout rate of the dropout layer after the + attention calculation of query and key. Defaults to 0. + proj_drop (float): Dropout rate of the dropout layer after the + output projection. Defaults to 0. + dropout_layer (dict): The dropout config before adding the shortcut. + Defaults to ``dict(type='Dropout', drop_prob=0.)``. + qkv_bias (bool): If True, add a learnable bias to q, k, v. + Defaults to True. + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + qk_scale (float, optional): Override default qk scale of + ``head_dim ** -0.5`` if set. Defaults to None. + proj_bias (bool) If True, add a learnable bias to output projection. + Defaults to True. + v_shortcut (bool): Add a shortcut from value to output. It's usually + used if ``input_dims`` is different from ``embed_dims``. + Defaults to False. + sr_ratio (float): The ratio of spatial reduction in attention modules. + Defaults to 1. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads, + norm_cfg=dict(type='LN'), + qkv_bias=True, + sr_ratio=1, + **kwargs): + super(GlobalSubsampledAttention, + self).__init__(embed_dims, num_heads, **kwargs) + + self.qkv_bias = qkv_bias + self.q = nn.Linear(self.input_dims, embed_dims, bias=qkv_bias) + self.kv = nn.Linear(self.input_dims, embed_dims * 2, bias=qkv_bias) + + # remove self.qkv, here split into self.q, self.kv + delattr(self, 'qkv') + + self.sr_ratio = sr_ratio + if sr_ratio > 1: + # use a conv as the spatial-reduction operation, the kernel_size + # and stride in conv are equal to the sr_ratio. + self.sr = Conv2d( + in_channels=embed_dims, + out_channels=embed_dims, + kernel_size=sr_ratio, + stride=sr_ratio) + # The ret[0] of build_norm_layer is norm name. + self.norm = build_norm_layer(norm_cfg, embed_dims)[1] + + def forward(self, x, hw_shape): + B, N, C = x.shape + H, W = hw_shape + assert H * W == N, 'The product of h and w of hw_shape must be N, ' \ + 'which is the 2nd dim number of the input Tensor x.' + + q = self.q(x).reshape(B, N, self.num_heads, + C // self.num_heads).permute(0, 2, 1, 3) + + if self.sr_ratio > 1: + x = x.permute(0, 2, 1).reshape(B, C, *hw_shape) # BNC_2_BCHW + x = self.sr(x) + x = x.reshape(B, C, -1).permute(0, 2, 1) # BCHW_2_BNC + x = self.norm(x) + + kv = self.kv(x).reshape(B, -1, 2, self.num_heads, + self.head_dims).permute(2, 0, 3, 1, 4) + k, v = kv[0], kv[1] + + attn_drop = self.attn_drop if self.training else 0. + x = self.scaled_dot_product_attention(q, k, v, dropout_p=attn_drop) + x = x.transpose(1, 2).reshape(B, N, self.embed_dims) + + x = self.proj(x) + x = self.out_drop(self.proj_drop(x)) + + if self.v_shortcut: + x = v.squeeze(1) + x + return x + + +class GSAEncoderLayer(BaseModule): + """Implements one encoder layer with GlobalSubsampledAttention(GSA). + + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + drop_rate (float): Probability of an element to be zeroed + after the feed forward layer. Default: 0.0. + attn_drop_rate (float): The drop out rate for attention layer. + Default: 0.0. + drop_path_rate (float): Stochastic depth rate. Default 0.0. + num_fcs (int): The number of fully-connected layers for FFNs. + Default: 2. + qkv_bias (bool): Enable bias for qkv if True. Default: True + act_cfg (dict): The activation config for FFNs. + Default: dict(type='GELU'). + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + sr_ratio (float): The ratio of spatial reduction in attention modules. + Defaults to 1. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + num_fcs=2, + qkv_bias=True, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + sr_ratio=1., + init_cfg=None): + super(GSAEncoderLayer, self).__init__(init_cfg=init_cfg) + + self.norm1 = build_norm_layer(norm_cfg, embed_dims, postfix=1)[1] + self.attn = GlobalSubsampledAttention( + embed_dims=embed_dims, + num_heads=num_heads, + attn_drop=attn_drop_rate, + proj_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + qkv_bias=qkv_bias, + norm_cfg=norm_cfg, + sr_ratio=sr_ratio) + + self.norm2 = build_norm_layer(norm_cfg, embed_dims, postfix=2)[1] + self.ffn = FFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + num_fcs=num_fcs, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg, + add_identity=False) + + self.drop_path = build_dropout( + dict(type='DropPath', drop_prob=drop_path_rate) + ) if drop_path_rate > 0. else nn.Identity() + + def forward(self, x, hw_shape): + x = x + self.drop_path(self.attn(self.norm1(x), hw_shape)) + x = x + self.drop_path(self.ffn(self.norm2(x))) + return x + + +class LocallyGroupedSelfAttention(BaseModule): + """Locally-grouped Self Attention (LSA) module. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. Default: 8 + qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. + Default: False. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + attn_drop_rate (float, optional): Dropout ratio of attention weight. + Default: 0.0 + proj_drop_rate (float, optional): Dropout ratio of output. Default: 0. + window_size(int): Window size of LSA. Default: 1. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop_rate=0., + proj_drop_rate=0., + window_size=1, + init_cfg=None): + super(LocallyGroupedSelfAttention, self).__init__(init_cfg=init_cfg) + + assert embed_dims % num_heads == 0, \ + f'dim {embed_dims} should be divided by num_heads {num_heads}' + + self.embed_dims = embed_dims + self.num_heads = num_heads + head_dim = embed_dims // num_heads + self.scale = qk_scale or head_dim**-0.5 + + self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop_rate) + self.proj = nn.Linear(embed_dims, embed_dims) + self.proj_drop = nn.Dropout(proj_drop_rate) + self.window_size = window_size + + def forward(self, x, hw_shape): + B, N, C = x.shape + H, W = hw_shape + x = x.view(B, H, W, C) + + # pad feature maps to multiples of Local-groups + pad_l = pad_t = 0 + pad_r = (self.window_size - W % self.window_size) % self.window_size + pad_b = (self.window_size - H % self.window_size) % self.window_size + x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) + + # calculate attention mask for LSA + Hp, Wp = x.shape[1:-1] + _h, _w = Hp // self.window_size, Wp // self.window_size + mask = torch.zeros((1, Hp, Wp), device=x.device) + mask[:, -pad_b:, :].fill_(1) + mask[:, :, -pad_r:].fill_(1) + + # [B, _h, _w, window_size, window_size, C] + x = x.reshape(B, _h, self.window_size, _w, self.window_size, + C).transpose(2, 3) + mask = mask.reshape(1, _h, self.window_size, _w, + self.window_size).transpose(2, 3).reshape( + 1, _h * _w, + self.window_size * self.window_size) + # [1, _h*_w, window_size*window_size, window_size*window_size] + attn_mask = mask.unsqueeze(2) - mask.unsqueeze(3) + attn_mask = attn_mask.masked_fill(attn_mask != 0, + float(-1000.0)).masked_fill( + attn_mask == 0, float(0.0)) + + # [3, B, _w*_h, nhead, window_size*window_size, dim] + qkv = self.qkv(x).reshape(B, _h * _w, + self.window_size * self.window_size, 3, + self.num_heads, C // self.num_heads).permute( + 3, 0, 1, 4, 2, 5) + q, k, v = qkv[0], qkv[1], qkv[2] + # [B, _h*_w, n_head, window_size*window_size, window_size*window_size] + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn + attn_mask.unsqueeze(2) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + attn = (attn @ v).transpose(2, 3).reshape(B, _h, _w, self.window_size, + self.window_size, C) + x = attn.transpose(2, 3).reshape(B, _h * self.window_size, + _w * self.window_size, C) + if pad_r > 0 or pad_b > 0: + x = x[:, :H, :W, :].contiguous() + + x = x.reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class LSAEncoderLayer(BaseModule): + """Implements one encoder layer with LocallyGroupedSelfAttention(LSA). + + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + drop_rate (float): Probability of an element to be zeroed + after the feed forward layer. Default: 0.0. + attn_drop_rate (float, optional): Dropout ratio of attention weight. + Default: 0.0 + drop_path_rate (float): Stochastic depth rate. Default 0.0. + num_fcs (int): The number of fully-connected layers for FFNs. + Default: 2. + qkv_bias (bool): Enable bias for qkv if True. Default: True + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + act_cfg (dict): The activation config for FFNs. + Default: dict(type='GELU'). + norm_cfg (dict): Config dict for normalization layer. + Default: dict(type='LN'). + window_size (int): Window size of LSA. Default: 1. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + num_fcs=2, + qkv_bias=True, + qk_scale=None, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + window_size=1, + init_cfg=None): + + super(LSAEncoderLayer, self).__init__(init_cfg=init_cfg) + + self.norm1 = build_norm_layer(norm_cfg, embed_dims, postfix=1)[1] + self.attn = LocallyGroupedSelfAttention(embed_dims, num_heads, + qkv_bias, qk_scale, + attn_drop_rate, drop_rate, + window_size) + + self.norm2 = build_norm_layer(norm_cfg, embed_dims, postfix=2)[1] + self.ffn = FFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + num_fcs=num_fcs, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg, + add_identity=False) + + self.drop_path = build_dropout( + dict(type='DropPath', drop_prob=drop_path_rate) + ) if drop_path_rate > 0. else nn.Identity() + + def forward(self, x, hw_shape): + x = x + self.drop_path(self.attn(self.norm1(x), hw_shape)) + x = x + self.drop_path(self.ffn(self.norm2(x))) + return x + + +@MODELS.register_module() +class PCPVT(BaseModule): + """The backbone of Twins-PCPVT. + + This backbone is the implementation of `Twins: Revisiting the Design + of Spatial Attention in Vision Transformers + `_. + + Args: + arch (dict, str): PCPVT architecture, a str value in arch zoo or a + detailed configuration dict with 7 keys, and the length of all the + values in dict should be the same: + + - depths (List[int]): The number of encoder layers in each stage. + - embed_dims (List[int]): Embedding dimension in each stage. + - patch_sizes (List[int]): The patch sizes in each stage. + - num_heads (List[int]): Numbers of attention head in each stage. + - strides (List[int]): The strides in each stage. + - mlp_ratios (List[int]): The ratios of mlp in each stage. + - sr_ratios (List[int]): The ratios of GSA-encoder layers in each + stage. + + in_channels (int): Number of input channels. Defaults to 3. + out_indices (tuple[int]): Output from which stages. + Defaults to ``(3, )``. + qkv_bias (bool): Enable bias for qkv if True. Defaults to False. + drop_rate (float): Probability of an element to be zeroed. + Defaults to 0. + attn_drop_rate (float): The drop out rate for attention layer. + Defaults to 0.0 + drop_path_rate (float): Stochastic depth rate. Defaults to 0.0. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + norm_after_stage(bool, List[bool]): Add extra norm after each stage. + Defaults to False. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + + Examples: + >>> from mmpretrain.models import PCPVT + >>> import torch + >>> pcpvt_cfg = {'arch': "small", + >>> 'norm_after_stage': [False, False, False, True]} + >>> model = PCPVT(**pcpvt_cfg) + >>> x = torch.rand(1, 3, 224, 224) + >>> outputs = model(x) + >>> print(outputs[-1].shape) + torch.Size([1, 512, 7, 7]) + >>> pcpvt_cfg['norm_after_stage'] = [True, True, True, True] + >>> pcpvt_cfg['out_indices'] = (0, 1, 2, 3) + >>> model = PCPVT(**pcpvt_cfg) + >>> outputs = model(x) + >>> for feat in outputs: + >>> print(feat.shape) + torch.Size([1, 64, 56, 56]) + torch.Size([1, 128, 28, 28]) + torch.Size([1, 320, 14, 14]) + torch.Size([1, 512, 7, 7]) + """ + arch_zoo = { + **dict.fromkeys(['s', 'small'], + {'embed_dims': [64, 128, 320, 512], + 'depths': [3, 4, 6, 3], + 'num_heads': [1, 2, 5, 8], + 'patch_sizes': [4, 2, 2, 2], + 'strides': [4, 2, 2, 2], + 'mlp_ratios': [8, 8, 4, 4], + 'sr_ratios': [8, 4, 2, 1]}), + **dict.fromkeys(['b', 'base'], + {'embed_dims': [64, 128, 320, 512], + 'depths': [3, 4, 18, 3], + 'num_heads': [1, 2, 5, 8], + 'patch_sizes': [4, 2, 2, 2], + 'strides': [4, 2, 2, 2], + 'mlp_ratios': [8, 8, 4, 4], + 'sr_ratios': [8, 4, 2, 1]}), + **dict.fromkeys(['l', 'large'], + {'embed_dims': [64, 128, 320, 512], + 'depths': [3, 8, 27, 3], + 'num_heads': [1, 2, 5, 8], + 'patch_sizes': [4, 2, 2, 2], + 'strides': [4, 2, 2, 2], + 'mlp_ratios': [8, 8, 4, 4], + 'sr_ratios': [8, 4, 2, 1]}), + } # yapf: disable + + essential_keys = { + 'embed_dims', 'depths', 'num_heads', 'patch_sizes', 'strides', + 'mlp_ratios', 'sr_ratios' + } + + def __init__(self, + arch, + in_channels=3, + out_indices=(3, ), + qkv_bias=False, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + norm_cfg=dict(type='LN'), + norm_after_stage=False, + init_cfg=None): + super(PCPVT, self).__init__(init_cfg=init_cfg) + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + assert isinstance(arch, dict) and ( + set(arch) == self.essential_keys + ), f'Custom arch needs a dict with keys {self.essential_keys}.' + self.arch_settings = arch + + self.depths = self.arch_settings['depths'] + self.embed_dims = self.arch_settings['embed_dims'] + self.patch_sizes = self.arch_settings['patch_sizes'] + self.strides = self.arch_settings['strides'] + self.mlp_ratios = self.arch_settings['mlp_ratios'] + self.num_heads = self.arch_settings['num_heads'] + self.sr_ratios = self.arch_settings['sr_ratios'] + + self.num_extra_tokens = 0 # there is no cls-token in Twins + self.num_stage = len(self.depths) + for key, value in self.arch_settings.items(): + assert isinstance(value, list) and len(value) == self.num_stage, ( + 'Length of setting item in arch dict must be type of list and' + ' have the same length.') + + # patch_embeds + self.patch_embeds = ModuleList() + self.position_encoding_drops = ModuleList() + self.stages = ModuleList() + + for i in range(self.num_stage): + # use in_channels of the model in the first stage + if i == 0: + stage_in_channels = in_channels + else: + stage_in_channels = self.embed_dims[i - 1] + + self.patch_embeds.append( + PatchEmbed( + in_channels=stage_in_channels, + embed_dims=self.embed_dims[i], + conv_type='Conv2d', + kernel_size=self.patch_sizes[i], + stride=self.strides[i], + padding='corner', + norm_cfg=dict(type='LN'))) + + self.position_encoding_drops.append(nn.Dropout(p=drop_rate)) + + # PEGs + self.position_encodings = ModuleList([ + ConditionalPositionEncoding(embed_dim, embed_dim) + for embed_dim in self.embed_dims + ]) + + # stochastic depth + total_depth = sum(self.depths) + self.dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, total_depth) + ] # stochastic depth decay rule + cur = 0 + + for k in range(len(self.depths)): + _block = ModuleList([ + GSAEncoderLayer( + embed_dims=self.embed_dims[k], + num_heads=self.num_heads[k], + feedforward_channels=self.mlp_ratios[k] * + self.embed_dims[k], + attn_drop_rate=attn_drop_rate, + drop_rate=drop_rate, + drop_path_rate=self.dpr[cur + i], + num_fcs=2, + qkv_bias=qkv_bias, + act_cfg=dict(type='GELU'), + norm_cfg=norm_cfg, + sr_ratio=self.sr_ratios[k]) for i in range(self.depths[k]) + ]) + self.stages.append(_block) + cur += self.depths[k] + + self.out_indices = out_indices + + assert isinstance(norm_after_stage, (bool, list)) + if isinstance(norm_after_stage, bool): + self.norm_after_stage = [norm_after_stage] * self.num_stage + else: + self.norm_after_stage = norm_after_stage + assert len(self.norm_after_stage) == self.num_stage, \ + (f'Number of norm_after_stage({len(self.norm_after_stage)}) should' + f' be equal to the number of stages({self.num_stage}).') + + for i, has_norm in enumerate(self.norm_after_stage): + assert isinstance(has_norm, bool), 'norm_after_stage should be ' \ + 'bool or List[bool].' + if has_norm and norm_cfg is not None: + norm_layer = build_norm_layer(norm_cfg, self.embed_dims[i])[1] + else: + norm_layer = nn.Identity() + + self.add_module(f'norm_after_stage{i}', norm_layer) + + def init_weights(self): + if self.init_cfg is not None: + super(PCPVT, self).init_weights() + else: + for m in self.modules(): + if isinstance(m, nn.Linear): + trunc_normal_init(m, std=.02, bias=0.) + elif isinstance(m, (_BatchNorm, nn.GroupNorm, nn.LayerNorm)): + constant_init(m, val=1.0, bias=0.) + elif isinstance(m, nn.Conv2d): + fan_out = m.kernel_size[0] * m.kernel_size[ + 1] * m.out_channels + fan_out //= m.groups + normal_init( + m, mean=0, std=math.sqrt(2.0 / fan_out), bias=0) + + def forward(self, x): + outputs = list() + + b = x.shape[0] + + for i in range(self.num_stage): + x, hw_shape = self.patch_embeds[i](x) + h, w = hw_shape + x = self.position_encoding_drops[i](x) + for j, blk in enumerate(self.stages[i]): + x = blk(x, hw_shape) + if j == 0: + x = self.position_encodings[i](x, hw_shape) + + norm_layer = getattr(self, f'norm_after_stage{i}') + x = norm_layer(x) + x = x.reshape(b, h, w, -1).permute(0, 3, 1, 2).contiguous() + + if i in self.out_indices: + outputs.append(x) + + return tuple(outputs) + + +@MODELS.register_module() +class SVT(PCPVT): + """The backbone of Twins-SVT. + + This backbone is the implementation of `Twins: Revisiting the Design + of Spatial Attention in Vision Transformers + `_. + + Args: + arch (dict, str): SVT architecture, a str value in arch zoo or a + detailed configuration dict with 8 keys, and the length of all the + values in dict should be the same: + + - depths (List[int]): The number of encoder layers in each stage. + - embed_dims (List[int]): Embedding dimension in each stage. + - patch_sizes (List[int]): The patch sizes in each stage. + - num_heads (List[int]): Numbers of attention head in each stage. + - strides (List[int]): The strides in each stage. + - mlp_ratios (List[int]): The ratios of mlp in each stage. + - sr_ratios (List[int]): The ratios of GSA-encoder layers in each + stage. + - windiow_sizes (List[int]): The window sizes in LSA-encoder layers + in each stage. + + in_channels (int): Number of input channels. Defaults to 3. + out_indices (tuple[int]): Output from which stages. + Defaults to (3, ). + qkv_bias (bool): Enable bias for qkv if True. Defaults to False. + drop_rate (float): Dropout rate. Defaults to 0. + attn_drop_rate (float): Dropout ratio of attention weight. + Defaults to 0.0 + drop_path_rate (float): Stochastic depth rate. Defaults to 0.2. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + norm_after_stage(bool, List[bool]): Add extra norm after each stage. + Defaults to False. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + + Examples: + >>> from mmpretrain.models import SVT + >>> import torch + >>> svt_cfg = {'arch': "small", + >>> 'norm_after_stage': [False, False, False, True]} + >>> model = SVT(**svt_cfg) + >>> x = torch.rand(1, 3, 224, 224) + >>> outputs = model(x) + >>> print(outputs[-1].shape) + torch.Size([1, 512, 7, 7]) + >>> svt_cfg["out_indices"] = (0, 1, 2, 3) + >>> svt_cfg["norm_after_stage"] = [True, True, True, True] + >>> model = SVT(**svt_cfg) + >>> output = model(x) + >>> for feat in output: + >>> print(feat.shape) + torch.Size([1, 64, 56, 56]) + torch.Size([1, 128, 28, 28]) + torch.Size([1, 320, 14, 14]) + torch.Size([1, 512, 7, 7]) + """ + arch_zoo = { + **dict.fromkeys(['s', 'small'], + {'embed_dims': [64, 128, 256, 512], + 'depths': [2, 2, 10, 4], + 'num_heads': [2, 4, 8, 16], + 'patch_sizes': [4, 2, 2, 2], + 'strides': [4, 2, 2, 2], + 'mlp_ratios': [4, 4, 4, 4], + 'sr_ratios': [8, 4, 2, 1], + 'window_sizes': [7, 7, 7, 7]}), + **dict.fromkeys(['b', 'base'], + {'embed_dims': [96, 192, 384, 768], + 'depths': [2, 2, 18, 2], + 'num_heads': [3, 6, 12, 24], + 'patch_sizes': [4, 2, 2, 2], + 'strides': [4, 2, 2, 2], + 'mlp_ratios': [4, 4, 4, 4], + 'sr_ratios': [8, 4, 2, 1], + 'window_sizes': [7, 7, 7, 7]}), + **dict.fromkeys(['l', 'large'], + {'embed_dims': [128, 256, 512, 1024], + 'depths': [2, 2, 18, 2], + 'num_heads': [4, 8, 16, 32], + 'patch_sizes': [4, 2, 2, 2], + 'strides': [4, 2, 2, 2], + 'mlp_ratios': [4, 4, 4, 4], + 'sr_ratios': [8, 4, 2, 1], + 'window_sizes': [7, 7, 7, 7]}), + } # yapf: disable + + essential_keys = { + 'embed_dims', 'depths', 'num_heads', 'patch_sizes', 'strides', + 'mlp_ratios', 'sr_ratios', 'window_sizes' + } + + def __init__(self, + arch, + in_channels=3, + out_indices=(3, ), + qkv_bias=False, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.0, + norm_cfg=dict(type='LN'), + norm_after_stage=False, + init_cfg=None): + super(SVT, self).__init__(arch, in_channels, out_indices, qkv_bias, + drop_rate, attn_drop_rate, drop_path_rate, + norm_cfg, norm_after_stage, init_cfg) + + self.window_sizes = self.arch_settings['window_sizes'] + + for k in range(self.num_stage): + for i in range(self.depths[k]): + # in even-numbered layers of each stage, replace GSA with LSA + if i % 2 == 0: + ffn_channels = self.mlp_ratios[k] * self.embed_dims[k] + self.stages[k][i] = \ + LSAEncoderLayer( + embed_dims=self.embed_dims[k], + num_heads=self.num_heads[k], + feedforward_channels=ffn_channels, + drop_rate=drop_rate, + norm_cfg=norm_cfg, + attn_drop_rate=attn_drop_rate, + drop_path_rate=self.dpr[sum(self.depths[:k])+i], + qkv_bias=qkv_bias, + window_size=self.window_sizes[k]) diff --git a/mmpretrain/models/backbones/van.py b/mmpretrain/models/backbones/van.py new file mode 100644 index 0000000..c34dc33 --- /dev/null +++ b/mmpretrain/models/backbones/van.py @@ -0,0 +1,434 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmcv.cnn import Conv2d, build_activation_layer, build_norm_layer +from mmcv.cnn.bricks import DropPath +from mmcv.cnn.bricks.transformer import PatchEmbed +from mmengine.model import BaseModule, ModuleList +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm + +from mmpretrain.registry import MODELS +from .base_backbone import BaseBackbone + + +class MixFFN(BaseModule): + """An implementation of MixFFN of VAN. Refer to + mmdetection/mmdet/models/backbones/pvt.py. + + The differences between MixFFN & FFN: + 1. Use 1X1 Conv to replace Linear layer. + 2. Introduce 3X3 Depth-wise Conv to encode positional information. + + Args: + embed_dims (int): The feature dimension. Same as + `MultiheadAttention`. + feedforward_channels (int): The hidden dimension of FFNs. + act_cfg (dict, optional): The activation config for FFNs. + Default: dict(type='GELU'). + ffn_drop (float, optional): Probability of an element to be + zeroed in FFN. Default 0.0. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + def __init__(self, + embed_dims, + feedforward_channels, + act_cfg=dict(type='GELU'), + ffn_drop=0., + init_cfg=None): + super(MixFFN, self).__init__(init_cfg=init_cfg) + + self.embed_dims = embed_dims + self.feedforward_channels = feedforward_channels + self.act_cfg = act_cfg + + self.fc1 = Conv2d( + in_channels=embed_dims, + out_channels=feedforward_channels, + kernel_size=1) + self.dwconv = Conv2d( + in_channels=feedforward_channels, + out_channels=feedforward_channels, + kernel_size=3, + stride=1, + padding=1, + bias=True, + groups=feedforward_channels) + self.act = build_activation_layer(act_cfg) + self.fc2 = Conv2d( + in_channels=feedforward_channels, + out_channels=embed_dims, + kernel_size=1) + self.drop = nn.Dropout(ffn_drop) + + def forward(self, x): + x = self.fc1(x) + x = self.dwconv(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class LKA(BaseModule): + """Large Kernel Attention(LKA) of VAN. + + .. code:: text + DW_conv (depth-wise convolution) + | + | + DW_D_conv (depth-wise dilation convolution) + | + | + Transition Convolution (1×1 convolution) + + Args: + embed_dims (int): Number of input channels. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + def __init__(self, embed_dims, init_cfg=None): + super(LKA, self).__init__(init_cfg=init_cfg) + + # a spatial local convolution (depth-wise convolution) + self.DW_conv = Conv2d( + in_channels=embed_dims, + out_channels=embed_dims, + kernel_size=5, + padding=2, + groups=embed_dims) + + # a spatial long-range convolution (depth-wise dilation convolution) + self.DW_D_conv = Conv2d( + in_channels=embed_dims, + out_channels=embed_dims, + kernel_size=7, + stride=1, + padding=9, + groups=embed_dims, + dilation=3) + + self.conv1 = Conv2d( + in_channels=embed_dims, out_channels=embed_dims, kernel_size=1) + + def forward(self, x): + u = x.clone() + attn = self.DW_conv(x) + attn = self.DW_D_conv(attn) + attn = self.conv1(attn) + + return u * attn + + +class SpatialAttention(BaseModule): + """Basic attention module in VANBloack. + + Args: + embed_dims (int): Number of input channels. + act_cfg (dict, optional): The activation config for FFNs. + Default: dict(type='GELU'). + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + def __init__(self, embed_dims, act_cfg=dict(type='GELU'), init_cfg=None): + super(SpatialAttention, self).__init__(init_cfg=init_cfg) + + self.proj_1 = Conv2d( + in_channels=embed_dims, out_channels=embed_dims, kernel_size=1) + self.activation = build_activation_layer(act_cfg) + self.spatial_gating_unit = LKA(embed_dims) + self.proj_2 = Conv2d( + in_channels=embed_dims, out_channels=embed_dims, kernel_size=1) + + def forward(self, x): + shorcut = x.clone() + x = self.proj_1(x) + x = self.activation(x) + x = self.spatial_gating_unit(x) + x = self.proj_2(x) + x = x + shorcut + return x + + +class VANBlock(BaseModule): + """A block of VAN. + + Args: + embed_dims (int): Number of input channels. + ffn_ratio (float): The expansion ratio of feedforward network hidden + layer channels. Defaults to 4. + drop_rate (float): Dropout rate after embedding. Defaults to 0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0.1. + act_cfg (dict, optional): The activation config for FFNs. + Default: dict(type='GELU'). + layer_scale_init_value (float): Init value for Layer Scale. + Defaults to 1e-2. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + def __init__(self, + embed_dims, + ffn_ratio=4., + drop_rate=0., + drop_path_rate=0., + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='BN', eps=1e-5), + layer_scale_init_value=1e-2, + init_cfg=None): + super(VANBlock, self).__init__(init_cfg=init_cfg) + self.out_channels = embed_dims + + self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1] + self.attn = SpatialAttention(embed_dims, act_cfg=act_cfg) + self.drop_path = DropPath( + drop_path_rate) if drop_path_rate > 0. else nn.Identity() + + self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] + mlp_hidden_dim = int(embed_dims * ffn_ratio) + self.mlp = MixFFN( + embed_dims=embed_dims, + feedforward_channels=mlp_hidden_dim, + act_cfg=act_cfg, + ffn_drop=drop_rate) + self.layer_scale_1 = nn.Parameter( + layer_scale_init_value * torch.ones((embed_dims)), + requires_grad=True) if layer_scale_init_value > 0 else None + self.layer_scale_2 = nn.Parameter( + layer_scale_init_value * torch.ones((embed_dims)), + requires_grad=True) if layer_scale_init_value > 0 else None + + def forward(self, x): + identity = x + x = self.norm1(x) + x = self.attn(x) + if self.layer_scale_1 is not None: + x = self.layer_scale_1.unsqueeze(-1).unsqueeze(-1) * x + x = identity + self.drop_path(x) + + identity = x + x = self.norm2(x) + x = self.mlp(x) + if self.layer_scale_2 is not None: + x = self.layer_scale_2.unsqueeze(-1).unsqueeze(-1) * x + x = identity + self.drop_path(x) + + return x + + +class VANPatchEmbed(PatchEmbed): + """Image to Patch Embedding of VAN. + + The differences between VANPatchEmbed & PatchEmbed: + 1. Use BN. + 2. Do not use 'flatten' and 'transpose'. + """ + + def __init__(self, *args, norm_cfg=dict(type='BN'), **kwargs): + super(VANPatchEmbed, self).__init__(*args, norm_cfg=norm_cfg, **kwargs) + + def forward(self, x): + """ + Args: + x (Tensor): Has shape (B, C, H, W). In most case, C is 3. + Returns: + tuple: Contains merged results and its spatial shape. + - x (Tensor): Has shape (B, out_h * out_w, embed_dims) + - out_size (tuple[int]): Spatial shape of x, arrange as + (out_h, out_w). + """ + + if self.adaptive_padding: + x = self.adaptive_padding(x) + + x = self.projection(x) + out_size = (x.shape[2], x.shape[3]) + if self.norm is not None: + x = self.norm(x) + return x, out_size + + +@MODELS.register_module() +class VAN(BaseBackbone): + """Visual Attention Network. + + A PyTorch implement of : `Visual Attention Network + `_ + + Inspiration from + https://github.com/Visual-Attention-Network/VAN-Classification + + Args: + arch (str | dict): Visual Attention Network architecture. + If use string, choose from 'tiny', 'small', 'base' and 'large'. + If use dict, it should have below keys: + + - **embed_dims** (List[int]): The dimensions of embedding. + - **depths** (List[int]): The number of blocks in each stage. + - **ffn_ratios** (List[int]): The number of expansion ratio of + feedforward network hidden layer channels. + + Defaults to 'tiny'. + patch_sizes (List[int | tuple]): The patch size in patch embeddings. + Defaults to [7, 3, 3, 3]. + in_channels (int): The num of input channels. Defaults to 3. + drop_rate (float): Dropout rate after embedding. Defaults to 0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0.1. + out_indices (Sequence[int]): Output from which stages. + Default: ``(3, )``. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Defaults to -1. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Defaults to False. + norm_cfg (dict): Config dict for normalization layer for all output + features. Defaults to ``dict(type='LN')`` + block_cfgs (Sequence[dict] | dict): The extra config of each block. + Defaults to empty dicts. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + + Examples: + >>> from mmpretrain.models import VAN + >>> import torch + >>> cfg = dict(arch='tiny') + >>> model = VAN(**cfg) + >>> inputs = torch.rand(1, 3, 224, 224) + >>> outputs = model(inputs) + >>> for out in outputs: + >>> print(out.size()) + (1, 256, 7, 7) + """ + arch_zoo = { + **dict.fromkeys(['t', 'tiny'], + {'embed_dims': [32, 64, 160, 256], + 'depths': [3, 3, 5, 2], + 'ffn_ratios': [8, 8, 4, 4]}), + **dict.fromkeys(['s', 'small'], + {'embed_dims': [64, 128, 320, 512], + 'depths': [2, 2, 4, 2], + 'ffn_ratios': [8, 8, 4, 4]}), + **dict.fromkeys(['b', 'base'], + {'embed_dims': [64, 128, 320, 512], + 'depths': [3, 3, 12, 3], + 'ffn_ratios': [8, 8, 4, 4]}), + **dict.fromkeys(['l', 'large'], + {'embed_dims': [64, 128, 320, 512], + 'depths': [3, 5, 27, 3], + 'ffn_ratios': [8, 8, 4, 4]}), + } # yapf: disable + + def __init__(self, + arch='tiny', + patch_sizes=[7, 3, 3, 3], + in_channels=3, + drop_rate=0., + drop_path_rate=0., + out_indices=(3, ), + frozen_stages=-1, + norm_eval=False, + norm_cfg=dict(type='LN'), + block_cfgs=dict(), + init_cfg=None): + super(VAN, self).__init__(init_cfg=init_cfg) + + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = {'embed_dims', 'depths', 'ffn_ratios'} + assert isinstance(arch, dict) and set(arch) == essential_keys, \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = arch + + self.embed_dims = self.arch_settings['embed_dims'] + self.depths = self.arch_settings['depths'] + self.ffn_ratios = self.arch_settings['ffn_ratios'] + self.num_stages = len(self.depths) + self.out_indices = out_indices + self.frozen_stages = frozen_stages + self.norm_eval = norm_eval + + total_depth = sum(self.depths) + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, total_depth) + ] # stochastic depth decay rule + + cur_block_idx = 0 + for i, depth in enumerate(self.depths): + patch_embed = VANPatchEmbed( + in_channels=in_channels if i == 0 else self.embed_dims[i - 1], + input_size=None, + embed_dims=self.embed_dims[i], + kernel_size=patch_sizes[i], + stride=patch_sizes[i] // 2 + 1, + padding=(patch_sizes[i] // 2, patch_sizes[i] // 2), + norm_cfg=dict(type='BN')) + + blocks = ModuleList([ + VANBlock( + embed_dims=self.embed_dims[i], + ffn_ratio=self.ffn_ratios[i], + drop_rate=drop_rate, + drop_path_rate=dpr[cur_block_idx + j], + **block_cfgs) for j in range(depth) + ]) + cur_block_idx += depth + norm = build_norm_layer(norm_cfg, self.embed_dims[i])[1] + + self.add_module(f'patch_embed{i + 1}', patch_embed) + self.add_module(f'blocks{i + 1}', blocks) + self.add_module(f'norm{i + 1}', norm) + + def train(self, mode=True): + super(VAN, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + + def _freeze_stages(self): + for i in range(0, self.frozen_stages + 1): + # freeze patch embed + m = getattr(self, f'patch_embed{i + 1}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + # freeze blocks + m = getattr(self, f'blocks{i + 1}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + # freeze norm + m = getattr(self, f'norm{i + 1}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def forward(self, x): + outs = [] + for i in range(self.num_stages): + patch_embed = getattr(self, f'patch_embed{i + 1}') + blocks = getattr(self, f'blocks{i + 1}') + norm = getattr(self, f'norm{i + 1}') + x, hw_shape = patch_embed(x) + for block in blocks: + x = block(x) + x = x.flatten(2).transpose(1, 2) + x = norm(x) + x = x.reshape(-1, *hw_shape, + block.out_channels).permute(0, 3, 1, 2).contiguous() + if i in self.out_indices: + outs.append(x) + + return tuple(outs) diff --git a/mmpretrain/models/backbones/vgg.py b/mmpretrain/models/backbones/vgg.py new file mode 100644 index 0000000..026b916 --- /dev/null +++ b/mmpretrain/models/backbones/vgg.py @@ -0,0 +1,183 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm + +from mmpretrain.registry import MODELS +from .base_backbone import BaseBackbone + + +def make_vgg_layer(in_channels, + out_channels, + num_blocks, + conv_cfg=None, + norm_cfg=None, + act_cfg=dict(type='ReLU'), + dilation=1, + with_norm=False, + ceil_mode=False): + layers = [] + for _ in range(num_blocks): + layer = ConvModule( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + dilation=dilation, + padding=dilation, + bias=True, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + layers.append(layer) + in_channels = out_channels + layers.append(nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=ceil_mode)) + + return layers + + +@MODELS.register_module() +class VGG(BaseBackbone): + """VGG backbone. + + Args: + depth (int): Depth of vgg, from {11, 13, 16, 19}. + with_norm (bool): Use BatchNorm or not. + num_classes (int): number of classes for classification. + num_stages (int): VGG stages, normally 5. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int], optional): Output from which stages. + When it is None, the default behavior depends on whether + num_classes is specified. If num_classes <= 0, the default value is + (4, ), output the last feature map before classifier. If + num_classes > 0, the default value is (5, ), output the + classification score. Default: None. + frozen_stages (int): Stages to be frozen (all param fixed). -1 means + not freezing any parameters. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Default: False. + ceil_mode (bool): Whether to use ceil_mode of MaxPool. Default: False. + with_last_pool (bool): Whether to keep the last pooling before + classifier. Default: True. + """ + + # Parameters to build layers. Each element specifies the number of conv in + # each stage. For example, VGG11 contains 11 layers with learnable + # parameters. 11 is computed as 11 = (1 + 1 + 2 + 2 + 2) + 3, + # where 3 indicates the last three fully-connected layers. + arch_settings = { + 11: (1, 1, 2, 2, 2), + 13: (2, 2, 2, 2, 2), + 16: (2, 2, 3, 3, 3), + 19: (2, 2, 4, 4, 4) + } + + def __init__(self, + depth, + num_classes=-1, + num_stages=5, + dilations=(1, 1, 1, 1, 1), + out_indices=None, + frozen_stages=-1, + conv_cfg=None, + norm_cfg=None, + act_cfg=dict(type='ReLU'), + norm_eval=False, + ceil_mode=False, + with_last_pool=True, + init_cfg=[ + dict(type='Kaiming', layer=['Conv2d']), + dict(type='Constant', val=1., layer=['_BatchNorm']), + dict(type='Normal', std=0.01, layer=['Linear']) + ]): + super(VGG, self).__init__(init_cfg) + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for vgg') + assert num_stages >= 1 and num_stages <= 5 + stage_blocks = self.arch_settings[depth] + self.stage_blocks = stage_blocks[:num_stages] + assert len(dilations) == num_stages + + self.num_classes = num_classes + self.frozen_stages = frozen_stages + self.norm_eval = norm_eval + with_norm = norm_cfg is not None + + if out_indices is None: + out_indices = (5, ) if num_classes > 0 else (4, ) + assert max(out_indices) <= num_stages + self.out_indices = out_indices + + self.in_channels = 3 + start_idx = 0 + vgg_layers = [] + self.range_sub_modules = [] + for i, num_blocks in enumerate(self.stage_blocks): + num_modules = num_blocks + 1 + end_idx = start_idx + num_modules + dilation = dilations[i] + out_channels = 64 * 2**i if i < 4 else 512 + vgg_layer = make_vgg_layer( + self.in_channels, + out_channels, + num_blocks, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + dilation=dilation, + with_norm=with_norm, + ceil_mode=ceil_mode) + vgg_layers.extend(vgg_layer) + self.in_channels = out_channels + self.range_sub_modules.append([start_idx, end_idx]) + start_idx = end_idx + if not with_last_pool: + vgg_layers.pop(-1) + self.range_sub_modules[-1][1] -= 1 + self.module_name = 'features' + self.add_module(self.module_name, nn.Sequential(*vgg_layers)) + + if self.num_classes > 0: + self.classifier = nn.Sequential( + nn.Linear(512 * 7 * 7, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, num_classes), + ) + + def forward(self, x): + outs = [] + vgg_layers = getattr(self, self.module_name) + for i in range(len(self.stage_blocks)): + for j in range(*self.range_sub_modules[i]): + vgg_layer = vgg_layers[j] + x = vgg_layer(x) + if i in self.out_indices: + outs.append(x) + if self.num_classes > 0: + x = x.view(x.size(0), -1) + x = self.classifier(x) + outs.append(x) + + return tuple(outs) + + def _freeze_stages(self): + vgg_layers = getattr(self, self.module_name) + for i in range(self.frozen_stages): + for j in range(*self.range_sub_modules[i]): + m = vgg_layers[j] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(VGG, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() diff --git a/mmpretrain/models/backbones/vig.py b/mmpretrain/models/backbones/vig.py new file mode 100644 index 0000000..c1a7879 --- /dev/null +++ b/mmpretrain/models/backbones/vig.py @@ -0,0 +1,852 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# modified from +# https://github.com/huawei-noah/Efficient-AI-Backbones/tree/master/vig_pytorch +from typing import Sequence + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import build_activation_layer +from mmcv.cnn.bricks import DropPath +from mmengine.model import ModuleList, Sequential +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpretrain.models.backbones.base_backbone import BaseBackbone +from mmpretrain.registry import MODELS +from ..utils import build_norm_layer + + +def get_2d_relative_pos_embed(embed_dim, grid_size): + """ + grid_size: int of the grid height and width + return: + pos_embed: [grid_size*grid_size, grid_size*grid_size] + """ + pos_embed = get_2d_sincos_pos_embed(embed_dim, grid_size) + relative_pos = 2 * np.matmul(pos_embed, + pos_embed.transpose()) / pos_embed.shape[1] + return relative_pos + + +def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False): + """ + grid_size: int of the grid height and width + return: + pos_embed: [grid_size*grid_size, embed_dim] or + [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token) + """ + grid_h = np.arange(grid_size, dtype=np.float32) + grid_w = np.arange(grid_size, dtype=np.float32) + grid = np.meshgrid(grid_w, grid_h) # here w goes first + grid = np.stack(grid, axis=0) + + grid = grid.reshape([2, 1, grid_size, grid_size]) + pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) + if cls_token: + pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], + axis=0) + return pos_embed + + +def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): + assert embed_dim % 2 == 0 + + # use half of dimensions to encode grid_h + emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, + grid[0]) # (H*W, D/2) + emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, + grid[1]) # (H*W, D/2) + + emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D) + return emb + + +def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): + """ + embed_dim: output dimension for each position + pos: a list of positions to be encoded: size (M,) + out: (M, D) + """ + assert embed_dim % 2 == 0 + omega = np.arange(embed_dim // 2, dtype=np.float32) + omega /= embed_dim / 2. + omega = 1. / 10000**omega # (D/2,) + + pos = pos.reshape(-1) # (M,) + out = np.einsum('m,d->md', pos, omega) # (M, D/2), outer product + + emb_sin = np.sin(out) # (M, D/2) + emb_cos = np.cos(out) # (M, D/2) + + emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D) + return emb + + +def xy_pairwise_distance(x, y): + """Compute pairwise distance of a point cloud. + + Args: + x: tensor (batch_size, num_points, num_dims) + y: tensor (batch_size, num_points, num_dims) + Returns: + pairwise distance: (batch_size, num_points, num_points) + """ + with torch.no_grad(): + xy_inner = -2 * torch.matmul(x, y.transpose(2, 1)) + x_square = torch.sum(torch.mul(x, x), dim=-1, keepdim=True) + y_square = torch.sum(torch.mul(y, y), dim=-1, keepdim=True) + return x_square + xy_inner + y_square.transpose(2, 1) + + +def xy_dense_knn_matrix(x, y, k=16, relative_pos=None): + """Get KNN based on the pairwise distance. + + Args: + x: (batch_size, num_dims, num_points, 1) + y: (batch_size, num_dims, num_points, 1) + k: int + relative_pos:Whether to use relative_pos + Returns: + nearest neighbors: + (batch_size, num_points, k) (batch_size, num_points, k) + """ + with torch.no_grad(): + x = x.transpose(2, 1).squeeze(-1) + y = y.transpose(2, 1).squeeze(-1) + batch_size, n_points, n_dims = x.shape + dist = xy_pairwise_distance(x.detach(), y.detach()) + if relative_pos is not None: + dist += relative_pos + _, nn_idx = torch.topk(-dist, k=k) + center_idx = torch.arange( + 0, n_points, device=x.device).repeat(batch_size, k, + 1).transpose(2, 1) + return torch.stack((nn_idx, center_idx), dim=0) + + +class DenseDilated(nn.Module): + """Find dilated neighbor from neighbor list. + + edge_index: (2, batch_size, num_points, k) + """ + + def __init__(self, k=9, dilation=1, use_stochastic=False, epsilon=0.0): + super(DenseDilated, self).__init__() + self.dilation = dilation + self.use_stochastic = use_stochastic + self.epsilon = epsilon + self.k = k + + def forward(self, edge_index): + if self.use_stochastic: + if torch.rand(1) < self.epsilon and self.training: + num = self.k * self.dilation + randnum = torch.randperm(num)[:self.k] + edge_index = edge_index[:, :, :, randnum] + else: + edge_index = edge_index[:, :, :, ::self.dilation] + else: + edge_index = edge_index[:, :, :, ::self.dilation] + return edge_index + + +class DenseDilatedKnnGraph(nn.Module): + """Find the neighbors' indices based on dilated knn.""" + + def __init__(self, k=9, dilation=1, use_stochastic=False, epsilon=0.0): + super(DenseDilatedKnnGraph, self).__init__() + self.dilation = dilation + self.use_stochastic = use_stochastic + self.epsilon = epsilon + self.k = k + self._dilated = DenseDilated(k, dilation, use_stochastic, epsilon) + + def forward(self, x, y=None, relative_pos=None): + if y is not None: + x = F.normalize(x, p=2.0, dim=1) + y = F.normalize(y, p=2.0, dim=1) + + edge_index = xy_dense_knn_matrix(x, y, self.k * self.dilation, + relative_pos) + else: + x = F.normalize(x, p=2.0, dim=1) + y = x.clone() + + edge_index = xy_dense_knn_matrix(x, y, self.k * self.dilation, + relative_pos) + return self._dilated(edge_index) + + +class BasicConv(Sequential): + + def __init__(self, + channels, + act_cfg, + norm_cfg=None, + graph_conv_bias=True, + drop=0.): + m = [] + for i in range(1, len(channels)): + m.append( + nn.Conv2d( + channels[i - 1], + channels[i], + 1, + bias=graph_conv_bias, + groups=4)) + if norm_cfg is not None: + m.append(build_norm_layer(norm_cfg, channels[-1])) + if act_cfg is not None: + m.append(build_activation_layer(act_cfg)) + if drop > 0: + m.append(nn.Dropout2d(drop)) + + super(BasicConv, self).__init__(*m) + + +def batched_index_select(x, idx): + r"""fetches neighbors features from a given neighbor idx + + Args: + x (Tensor): input feature Tensor + :math: + `\mathbf{X} \in \mathbb{R}^{B \times C \times N \times 1}`. + idx (Tensor): edge_idx + :math:`\mathbf{X} \in \mathbb{R}^{B \times N \times l}`. + Returns: + Tensor: output neighbors features + :math:`\mathbf{X} \in \mathbb{R}^{B \times C \times N \times k}`. + """ + batch_size, num_dims, num_vertices_reduced = x.shape[:3] + _, num_vertices, k = idx.shape + idx_base = torch.arange( + 0, batch_size, device=idx.device).view(-1, 1, 1) * num_vertices_reduced + idx = idx + idx_base + idx = idx.contiguous().view(-1) + + x = x.transpose(2, 1) + feature = x.contiguous().view(batch_size * num_vertices_reduced, + -1)[idx, :] + feature = feature.view(batch_size, num_vertices, k, + num_dims).permute(0, 3, 1, 2).contiguous() + return feature + + +class MRConv2d(nn.Module): + """Max-Relative Graph Convolution (Paper: https://arxiv.org/abs/1904.03751) + for dense data type.""" + + def __init__(self, + in_channels, + out_channels, + act_cfg, + norm_cfg=None, + graph_conv_bias=True): + super(MRConv2d, self).__init__() + self.nn = BasicConv([in_channels * 2, out_channels], act_cfg, norm_cfg, + graph_conv_bias) + + def forward(self, x, edge_index, y=None): + x_i = batched_index_select(x, edge_index[1]) + if y is not None: + x_j = batched_index_select(y, edge_index[0]) + else: + x_j = batched_index_select(x, edge_index[0]) + x_j, _ = torch.max(x_j - x_i, -1, keepdim=True) + b, c, n, _ = x.shape + x = torch.cat([x.unsqueeze(2), x_j.unsqueeze(2)], + dim=2).reshape(b, 2 * c, n, _) + return self.nn(x) + + +class EdgeConv2d(nn.Module): + """Edge convolution layer (with activation, batch normalization) for dense + data type.""" + + def __init__(self, + in_channels, + out_channels, + act_cfg, + norm_cfg=None, + graph_conv_bias=True): + super(EdgeConv2d, self).__init__() + self.nn = BasicConv([in_channels * 2, out_channels], act_cfg, norm_cfg, + graph_conv_bias) + + def forward(self, x, edge_index, y=None): + x_i = batched_index_select(x, edge_index[1]) + if y is not None: + x_j = batched_index_select(y, edge_index[0]) + else: + x_j = batched_index_select(x, edge_index[0]) + max_value, _ = torch.max( + self.nn(torch.cat([x_i, x_j - x_i], dim=1)), -1, keepdim=True) + return max_value + + +class GraphSAGE(nn.Module): + """GraphSAGE Graph Convolution (Paper: https://arxiv.org/abs/1706.02216) + for dense data type.""" + + def __init__(self, + in_channels, + out_channels, + act_cfg, + norm_cfg=None, + graph_conv_bias=True): + super(GraphSAGE, self).__init__() + self.nn1 = BasicConv([in_channels, in_channels], act_cfg, norm_cfg, + graph_conv_bias) + self.nn2 = BasicConv([in_channels * 2, out_channels], act_cfg, + norm_cfg, graph_conv_bias) + + def forward(self, x, edge_index, y=None): + if y is not None: + x_j = batched_index_select(y, edge_index[0]) + else: + x_j = batched_index_select(x, edge_index[0]) + x_j, _ = torch.max(self.nn1(x_j), -1, keepdim=True) + return self.nn2(torch.cat([x, x_j], dim=1)) + + +class GINConv2d(nn.Module): + """GIN Graph Convolution (Paper: https://arxiv.org/abs/1810.00826) for + dense data type.""" + + def __init__(self, + in_channels, + out_channels, + act_cfg, + norm_cfg=None, + graph_conv_bias=True): + super(GINConv2d, self).__init__() + self.nn = BasicConv([in_channels, out_channels], act_cfg, norm_cfg, + graph_conv_bias) + eps_init = 0.0 + self.eps = nn.Parameter(torch.Tensor([eps_init])) + + def forward(self, x, edge_index, y=None): + if y is not None: + x_j = batched_index_select(y, edge_index[0]) + else: + x_j = batched_index_select(x, edge_index[0]) + x_j = torch.sum(x_j, -1, keepdim=True) + return self.nn((1 + self.eps) * x + x_j) + + +class GraphConv2d(nn.Module): + """Static graph convolution layer.""" + + def __init__(self, + in_channels, + out_channels, + graph_conv_type, + act_cfg, + norm_cfg=None, + graph_conv_bias=True): + super(GraphConv2d, self).__init__() + if graph_conv_type == 'edge': + self.gconv = EdgeConv2d(in_channels, out_channels, act_cfg, + norm_cfg, graph_conv_bias) + elif graph_conv_type == 'mr': + self.gconv = MRConv2d(in_channels, out_channels, act_cfg, norm_cfg, + graph_conv_bias) + elif graph_conv_type == 'sage': + self.gconv = GraphSAGE(in_channels, out_channels, act_cfg, + norm_cfg, graph_conv_bias) + elif graph_conv_type == 'gin': + self.gconv = GINConv2d(in_channels, out_channels, act_cfg, + norm_cfg, graph_conv_bias) + else: + raise NotImplementedError( + 'graph_conv_type:{} is not supported'.format(graph_conv_type)) + + def forward(self, x, edge_index, y=None): + return self.gconv(x, edge_index, y) + + +class DyGraphConv2d(GraphConv2d): + """Dynamic graph convolution layer.""" + + def __init__(self, + in_channels, + out_channels, + k=9, + dilation=1, + graph_conv_type='mr', + act_cfg=dict(type='GELU'), + norm_cfg=None, + graph_conv_bias=True, + use_stochastic=False, + epsilon=0.2, + r=1): + super(DyGraphConv2d, + self).__init__(in_channels, out_channels, graph_conv_type, + act_cfg, norm_cfg, graph_conv_bias) + self.k = k + self.d = dilation + self.r = r + self.dilated_knn_graph = DenseDilatedKnnGraph(k, dilation, + use_stochastic, epsilon) + + def forward(self, x, relative_pos=None): + B, C, H, W = x.shape + y = None + if self.r > 1: + y = F.avg_pool2d(x, self.r, self.r) + y = y.reshape(B, C, -1, 1).contiguous() + x = x.reshape(B, C, -1, 1).contiguous() + edge_index = self.dilated_knn_graph(x, y, relative_pos) + x = super(DyGraphConv2d, self).forward(x, edge_index, y) + return x.reshape(B, -1, H, W).contiguous() + + +class Grapher(nn.Module): + """Grapher module with graph convolution and fc layers.""" + + def __init__(self, + in_channels, + k=9, + dilation=1, + graph_conv_type='mr', + act_cfg=dict(type='GELU'), + norm_cfg=None, + graph_conv_bias=True, + use_stochastic=False, + epsilon=0.2, + r=1, + n=196, + drop_path=0.0, + relative_pos=False): + super(Grapher, self).__init__() + self.channels = in_channels + self.n = n + self.r = r + self.fc1 = Sequential( + nn.Conv2d(in_channels, in_channels, 1, stride=1, padding=0), + build_norm_layer(dict(type='BN'), in_channels), + ) + self.graph_conv = DyGraphConv2d(in_channels, in_channels * 2, k, + dilation, graph_conv_type, act_cfg, + norm_cfg, graph_conv_bias, + use_stochastic, epsilon, r) + self.fc2 = Sequential( + nn.Conv2d(in_channels * 2, in_channels, 1, stride=1, padding=0), + build_norm_layer(dict(type='BN'), in_channels), + ) + self.drop_path = DropPath( + drop_path) if drop_path > 0. else nn.Identity() + + self.relative_pos = None + if relative_pos: + relative_pos_tensor = torch.from_numpy( + np.float32( + get_2d_relative_pos_embed(in_channels, int( + n**0.5)))).unsqueeze(0).unsqueeze(1) + relative_pos_tensor = F.interpolate( + relative_pos_tensor, + size=(n, n // (r * r)), + mode='bicubic', + align_corners=False) + self.relative_pos = nn.Parameter( + -relative_pos_tensor.squeeze(1), requires_grad=False) + + def _get_relative_pos(self, relative_pos, H, W): + if relative_pos is None or H * W == self.n: + return relative_pos + else: + N = H * W + N_reduced = N // (self.r * self.r) + return F.interpolate( + relative_pos.unsqueeze(0), size=(N, N_reduced), + mode='bicubic').squeeze(0) + + def forward(self, x): + B, C, H, W = x.shape + relative_pos = self._get_relative_pos(self.relative_pos, H, W) + shortcut = x + x = self.fc1(x) + x = self.graph_conv(x, relative_pos) + x = self.fc2(x) + x = self.drop_path(x) + shortcut + return x + + +class FFN(nn.Module): + """"out_features = out_features or in_features\n + hidden_features = hidden_features or in_features""" + + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_cfg=dict(type='GELU'), + drop_path=0.0): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = Sequential( + nn.Conv2d(in_features, hidden_features, 1, stride=1, padding=0), + build_norm_layer(dict(type='BN'), hidden_features), + ) + self.act = build_activation_layer(act_cfg) + self.fc2 = Sequential( + nn.Conv2d(hidden_features, out_features, 1, stride=1, padding=0), + build_norm_layer(dict(type='BN'), out_features), + ) + self.drop_path = DropPath( + drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x): + shortcut = x + x = self.fc1(x) + x = self.act(x) + x = self.fc2(x) + x = self.drop_path(x) + shortcut + return x + + +@MODELS.register_module() +class Vig(BaseBackbone): + """Vision GNN backbone. + + A PyTorch implementation of `Vision GNN: An Image is Worth Graph of Nodes + `_. + + Modified from the official implementation + https://github.com/huawei-noah/Efficient-AI-Backbones/tree/master/vig_pytorch + + Args: + arch(str): Vision GNN architecture, + choose from 'tiny', 'small' and 'base'. + in_channels (int): The number of channels of input images. + Defaults to 3. + k (int): The number of KNN's k. Defaults to 9. + out_indices (Sequence | int): Output from which blocks. + Defaults to -1, means the last block. + act_cfg (dict): The config of activative functions. + Defaults to ``dict(type='GELU'))``. + norm_cfg (dict): The config of normalization layers. + Defaults to ``dict(type='BN', eps=1e-6)``. + graph_conv_bias (bool): Whether to use bias in the convolution + layers in Grapher. Defaults to True. + graph_conv_type (str): The type of graph convolution,choose + from 'edge', 'mr', 'sage' and 'gin'. Defaults to 'mr'. + epsilon (float): Probability of random arrangement in KNN. It only + works when ``use_dilation=True`` and ``use_stochastic=True``. + Defaults to 0.2. + use_dilation(bool): Whether to use dilation in KNN. Defaults to True. + use_stochastic(bool): Whether to use stochastic in KNN. + Defaults to False. + drop_path (float): stochastic depth rate. Default 0.0 + relative_pos(bool): Whether to use relative position embedding. + Defaults to False. + norm_eval (bool): Whether to set the normalization layer to eval mode. + Defaults to False. + frozen_stages (int): Blocks to be frozen (all param fixed). + Defaults to 0, which means not freezing any parameters. + init_cfg (dict, optional): The initialization configs. + Defaults to None. + """ # noqa: E501 + + arch_settings = { + 'tiny': dict(num_blocks=12, channels=192), + 'small': dict(num_blocks=16, channels=320), + 'base': dict(num_blocks=16, channels=640), + } + + def __init__(self, + arch, + in_channels=3, + k=9, + out_indices=-1, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='BN'), + graph_conv_bias=True, + graph_conv_type='mr', + epsilon=0.2, + use_dilation=True, + use_stochastic=False, + drop_path=0., + relative_pos=False, + norm_eval=False, + frozen_stages=0, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + arch = self.arch_settings[arch] + self.num_blocks = arch['num_blocks'] + channels = arch['channels'] + + if isinstance(out_indices, int): + out_indices = [out_indices] + elif isinstance(out_indices, tuple): + out_indices = list(out_indices) + elif not isinstance(out_indices, list): + raise TypeError('"out_indices" must by a tuple, list or int, ' + f'get {type(out_indices)} instead.') + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = self.num_blocks + index + assert 0 <= out_indices[i] <= self.num_blocks, \ + f'Invalid out_indices {index}' + self.out_indices = out_indices + + self.stem = Sequential( + nn.Conv2d(in_channels, channels // 8, 3, stride=2, padding=1), + build_norm_layer(norm_cfg, channels // 8), + build_activation_layer(act_cfg), + nn.Conv2d(channels // 8, channels // 4, 3, stride=2, padding=1), + build_norm_layer(norm_cfg, channels // 4), + build_activation_layer(act_cfg), + nn.Conv2d(channels // 4, channels // 2, 3, stride=2, padding=1), + build_norm_layer(norm_cfg, channels // 2), + build_activation_layer(act_cfg), + nn.Conv2d(channels // 2, channels, 3, stride=2, padding=1), + build_norm_layer(norm_cfg, channels), + build_activation_layer(act_cfg), + nn.Conv2d(channels, channels, 3, stride=1, padding=1), + build_norm_layer(norm_cfg, channels), + ) + + # stochastic depth decay rule + dpr = [x.item() for x in torch.linspace(0, drop_path, self.num_blocks)] + # number of knn's k + num_knn = [ + int(x.item()) for x in torch.linspace(k, 2 * k, self.num_blocks) + ] + max_dilation = 196 // max(num_knn) + + self.pos_embed = nn.Parameter(torch.zeros(1, channels, 14, 14)) + + self.blocks = ModuleList([ + Sequential( + Grapher( + in_channels=channels, + k=num_knn[i], + dilation=min(i // 4 + + 1, max_dilation) if use_dilation else 1, + graph_conv_type=graph_conv_type, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + graph_conv_bias=graph_conv_bias, + use_stochastic=use_stochastic, + epsilon=epsilon, + drop_path=dpr[i], + relative_pos=relative_pos), + FFN(in_features=channels, + hidden_features=channels * 4, + act_cfg=act_cfg, + drop_path=dpr[i])) for i in range(self.num_blocks) + ]) + + self.norm_eval = norm_eval + self.frozen_stages = frozen_stages + + def forward(self, inputs): + outs = [] + x = self.stem(inputs) + self.pos_embed + + for i, block in enumerate(self.blocks): + x = block(x) + + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def _freeze_stages(self): + self.stem.eval() + for i in range(self.frozen_stages): + m = self.blocks[i] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(Vig, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + + +@MODELS.register_module() +class PyramidVig(BaseBackbone): + """Pyramid Vision GNN backbone. + + A PyTorch implementation of `Vision GNN: An Image is Worth Graph of Nodes + `_. + + Modified from the official implementation + https://github.com/huawei-noah/Efficient-AI-Backbones/tree/master/vig_pytorch + + Args: + arch (str): Vision GNN architecture, choose from 'tiny', + 'small' and 'base'. + in_channels (int): The number of channels of input images. + Defaults to 3. + k (int): The number of KNN's k. Defaults to 9. + out_indices (Sequence | int): Output from which stages. + Defaults to -1, means the last stage. + act_cfg (dict): The config of activative functions. + Defaults to ``dict(type='GELU'))``. + norm_cfg (dict): The config of normalization layers. + Defaults to ``dict(type='BN')``. + graph_conv_bias (bool): Whether to use bias in the convolution + layers in Grapher. Defaults to True. + graph_conv_type (str): The type of graph convolution,choose + from 'edge', 'mr', 'sage' and 'gin'. Defaults to 'mr'. + epsilon (float): Probability of random arrangement in KNN. It only + works when ``use_stochastic=True``. Defaults to 0.2. + use_stochastic (bool): Whether to use stochastic in KNN. + Defaults to False. + drop_path (float): stochastic depth rate. Default 0.0 + norm_eval (bool): Whether to set the normalization layer to eval mode. + Defaults to False. + frozen_stages (int): Stages to be frozen (all param fixed). + Defaults to 0, which means not freezing any parameters. + init_cfg (dict, optional): The initialization configs. + Defaults to None. + """ # noqa: E501 + arch_settings = { + 'tiny': dict(blocks=[2, 2, 6, 2], channels=[48, 96, 240, 384]), + 'small': dict(blocks=[2, 2, 6, 2], channels=[80, 160, 400, 640]), + 'medium': dict(blocks=[2, 2, 16, 2], channels=[96, 192, 384, 768]), + 'base': dict(blocks=[2, 2, 18, 2], channels=[128, 256, 512, 1024]), + } + + def __init__(self, + arch, + in_channels=3, + k=9, + out_indices=-1, + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='BN'), + graph_conv_bias=True, + graph_conv_type='mr', + epsilon=0.2, + use_stochastic=False, + drop_path=0., + norm_eval=False, + frozen_stages=0, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + arch = self.arch_settings[arch] + self.blocks = arch['blocks'] + self.num_blocks = sum(self.blocks) + self.num_stages = len(self.blocks) + channels = arch['channels'] + self.channels = channels + + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = self.num_stages + index + assert 0 <= out_indices[i] <= self.num_stages, \ + f'Invalid out_indices {index}' + self.out_indices = out_indices + + self.stem = Sequential( + nn.Conv2d(in_channels, channels[0] // 2, 3, stride=2, padding=1), + build_norm_layer(norm_cfg, channels[0] // 2), + build_activation_layer(act_cfg), + nn.Conv2d(channels[0] // 2, channels[0], 3, stride=2, padding=1), + build_norm_layer(norm_cfg, channels[0]), + build_activation_layer(act_cfg), + nn.Conv2d(channels[0], channels[0], 3, stride=1, padding=1), + build_norm_layer(norm_cfg, channels[0]), + ) + + # stochastic depth decay rule + dpr = [x.item() for x in torch.linspace(0, drop_path, self.num_blocks)] + # number of knn's k + num_knn = [ + int(x.item()) for x in torch.linspace(k, k, self.num_blocks) + ] + max_dilation = 49 // max(num_knn) + + self.pos_embed = nn.Parameter( + torch.zeros(1, channels[0], 224 // 4, 224 // 4)) + HW = 224 // 4 * 224 // 4 + reduce_ratios = [4, 2, 1, 1] + + self.stages = ModuleList() + block_idx = 0 + for stage_idx, num_blocks in enumerate(self.blocks): + mid_channels = channels[stage_idx] + reduce_ratio = reduce_ratios[stage_idx] + blocks = [] + if stage_idx > 0: + blocks.append( + Sequential( + nn.Conv2d( + self.channels[stage_idx - 1], + mid_channels, + kernel_size=3, + stride=2, + padding=1), + build_norm_layer(norm_cfg, mid_channels), + )) + HW = HW // 4 + for _ in range(num_blocks): + blocks.append( + Sequential( + Grapher( + in_channels=mid_channels, + k=num_knn[block_idx], + dilation=min(block_idx // 4 + 1, max_dilation), + graph_conv_type=graph_conv_type, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + graph_conv_bias=graph_conv_bias, + use_stochastic=use_stochastic, + epsilon=epsilon, + r=reduce_ratio, + n=HW, + drop_path=dpr[block_idx], + relative_pos=True), + FFN(in_features=mid_channels, + hidden_features=mid_channels * 4, + act_cfg=act_cfg, + drop_path=dpr[block_idx]))) + block_idx += 1 + self.stages.append(Sequential(*blocks)) + + self.norm_eval = norm_eval + self.frozen_stages = frozen_stages + + def forward(self, inputs): + outs = [] + x = self.stem(inputs) + self.pos_embed + + for i, blocks in enumerate(self.stages): + x = blocks(x) + + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + def _freeze_stages(self): + self.stem.eval() + for i in range(self.frozen_stages): + m = self.stages[i] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def train(self, mode=True): + super(PyramidVig, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() diff --git a/mmpretrain/models/backbones/vision_transformer.py b/mmpretrain/models/backbones/vision_transformer.py new file mode 100644 index 0000000..a54053c --- /dev/null +++ b/mmpretrain/models/backbones/vision_transformer.py @@ -0,0 +1,537 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Sequence + +import numpy as np +import torch +import torch.nn as nn +from mmcv.cnn.bricks.transformer import FFN, PatchEmbed +from mmengine.model import BaseModule, ModuleList +from mmengine.model.weight_init import trunc_normal_ + +from mmpretrain.registry import MODELS +from ..utils import (MultiheadAttention, SwiGLUFFNFused, build_norm_layer, + resize_pos_embed, to_2tuple) +from .base_backbone import BaseBackbone + + +class TransformerEncoderLayer(BaseModule): + """Implements one encoder layer in Vision Transformer. + + Args: + embed_dims (int): The feature dimension + num_heads (int): Parallel attention heads + feedforward_channels (int): The hidden dimension for FFNs + layer_scale_init_value (float or torch.Tensor): Init value of layer + scale. Defaults to 0. + drop_rate (float): Probability of an element to be zeroed + after the feed forward layer. Defaults to 0. + attn_drop_rate (float): The drop out rate for attention output weights. + Defaults to 0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0. + num_fcs (int): The number of fully-connected layers for FFNs. + Defaults to 2. + qkv_bias (bool): enable bias for qkv if True. Defaults to True. + ffn_type (str): Select the type of ffn layers. Defaults to 'origin'. + act_cfg (dict): The activation config for FFNs. + Defaults to ``dict(type='GELU')``. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + layer_scale_init_value=0., + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0., + num_fcs=2, + qkv_bias=True, + ffn_type='origin', + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + init_cfg=None): + super(TransformerEncoderLayer, self).__init__(init_cfg=init_cfg) + + self.embed_dims = embed_dims + + self.ln1 = build_norm_layer(norm_cfg, self.embed_dims) + + self.attn = MultiheadAttention( + embed_dims=embed_dims, + num_heads=num_heads, + attn_drop=attn_drop_rate, + proj_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + qkv_bias=qkv_bias, + layer_scale_init_value=layer_scale_init_value) + + self.ln2 = build_norm_layer(norm_cfg, self.embed_dims) + + if ffn_type == 'origin': + self.ffn = FFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + num_fcs=num_fcs, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg, + layer_scale_init_value=layer_scale_init_value) + elif ffn_type == 'swiglu_fused': + self.ffn = SwiGLUFFNFused( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + layer_scale_init_value=layer_scale_init_value) + else: + raise NotImplementedError + + @property + def norm1(self): + return self.ln1 + + @property + def norm2(self): + return self.ln2 + + def init_weights(self): + super(TransformerEncoderLayer, self).init_weights() + for m in self.ffn.modules(): + if isinstance(m, nn.Linear): + nn.init.xavier_uniform_(m.weight) + nn.init.normal_(m.bias, std=1e-6) + + def forward(self, x): + x = x + self.attn(self.ln1(x)) + x = self.ffn(self.ln2(x), identity=x) + return x + + +@MODELS.register_module() +class VisionTransformer(BaseBackbone): + """Vision Transformer. + + A PyTorch implement of : `An Image is Worth 16x16 Words: Transformers + for Image Recognition at Scale `_ + + Args: + arch (str | dict): Vision Transformer architecture. If use string, + choose from 'small', 'base', 'large', 'deit-tiny', 'deit-small' + and 'deit-base'. If use dict, it should have below keys: + + - **embed_dims** (int): The dimensions of embedding. + - **num_layers** (int): The number of transformer encoder layers. + - **num_heads** (int): The number of heads in attention modules. + - **feedforward_channels** (int): The hidden dimensions in + feedforward modules. + + Defaults to 'base'. + img_size (int | tuple): The expected input image shape. Because we + support dynamic input shape, just set the argument to the most + common input image shape. Defaults to 224. + patch_size (int | tuple): The patch size in patch embedding. + Defaults to 16. + in_channels (int): The num of input channels. Defaults to 3. + out_indices (Sequence | int): Output from which stages. + Defaults to -1, means the last stage. + drop_rate (float): Probability of an element to be zeroed. + Defaults to 0. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + qkv_bias (bool): Whether to add bias for qkv in attention modules. + Defaults to True. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + final_norm (bool): Whether to add a additional layer to normalize + final feature map. Defaults to True. + out_type (str): The type of output features. Please choose from + + - ``"cls_token"``: The class token tensor with shape (B, C). + - ``"featmap"``: The feature map tensor from the patch tokens + with shape (B, C, H, W). + - ``"avg_featmap"``: The global averaged feature map tensor + with shape (B, C). + - ``"raw"``: The raw feature tensor includes patch tokens and + class tokens with shape (B, L, C). + + Defaults to ``"cls_token"``. + with_cls_token (bool): Whether concatenating class token into image + tokens as transformer input. Defaults to True. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Defaults to -1. + interpolate_mode (str): Select the interpolate mode for position + embeding vector resize. Defaults to "bicubic". + layer_scale_init_value (float or torch.Tensor): Init value of layer + scale. Defaults to 0. + patch_cfg (dict): Configs of patch embeding. Defaults to an empty dict. + layer_cfgs (Sequence | dict): Configs of each transformer layer in + encoder. Defaults to an empty dict. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + arch_zoo = { + **dict.fromkeys( + ['s', 'small'], { + 'embed_dims': 768, + 'num_layers': 8, + 'num_heads': 8, + 'feedforward_channels': 768 * 3, + }), + **dict.fromkeys( + ['b', 'base'], { + 'embed_dims': 768, + 'num_layers': 12, + 'num_heads': 12, + 'feedforward_channels': 3072 + }), + **dict.fromkeys( + ['l', 'large'], { + 'embed_dims': 1024, + 'num_layers': 24, + 'num_heads': 16, + 'feedforward_channels': 4096 + }), + **dict.fromkeys( + ['h', 'huge'], + { + # The same as the implementation in MAE + # + 'embed_dims': 1280, + 'num_layers': 32, + 'num_heads': 16, + 'feedforward_channels': 5120 + }), + **dict.fromkeys( + ['eva-g', 'eva-giant'], + { + # The implementation in EVA + # + 'embed_dims': 1408, + 'num_layers': 40, + 'num_heads': 16, + 'feedforward_channels': 6144 + }), + **dict.fromkeys( + ['deit-t', 'deit-tiny'], { + 'embed_dims': 192, + 'num_layers': 12, + 'num_heads': 3, + 'feedforward_channels': 192 * 4 + }), + **dict.fromkeys( + ['deit-s', 'deit-small', 'dinov2-s', 'dinov2-small'], { + 'embed_dims': 384, + 'num_layers': 12, + 'num_heads': 6, + 'feedforward_channels': 384 * 4 + }), + **dict.fromkeys( + ['deit-b', 'deit-base'], { + 'embed_dims': 768, + 'num_layers': 12, + 'num_heads': 12, + 'feedforward_channels': 768 * 4 + }), + **dict.fromkeys( + ['dinov2-g', 'dinov2-giant'], { + 'embed_dims': 1536, + 'num_layers': 40, + 'num_heads': 24, + 'feedforward_channels': 6144 + }), + } + num_extra_tokens = 1 # class token + OUT_TYPES = {'raw', 'cls_token', 'featmap', 'avg_featmap'} + + def __init__(self, + arch='base', + img_size=224, + patch_size=16, + in_channels=3, + out_indices=-1, + drop_rate=0., + drop_path_rate=0., + qkv_bias=True, + norm_cfg=dict(type='LN', eps=1e-6), + final_norm=True, + out_type='cls_token', + with_cls_token=True, + frozen_stages=-1, + interpolate_mode='bicubic', + layer_scale_init_value=0., + patch_cfg=dict(), + layer_cfgs=dict(), + pre_norm=False, + init_cfg=None): + super(VisionTransformer, self).__init__(init_cfg) + + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = { + 'embed_dims', 'num_layers', 'num_heads', 'feedforward_channels' + } + assert isinstance(arch, dict) and essential_keys <= set(arch), \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = arch + + self.embed_dims = self.arch_settings['embed_dims'] + self.num_layers = self.arch_settings['num_layers'] + self.img_size = to_2tuple(img_size) + + # Set patch embedding + _patch_cfg = dict( + in_channels=in_channels, + input_size=img_size, + embed_dims=self.embed_dims, + conv_type='Conv2d', + kernel_size=patch_size, + stride=patch_size, + bias=not pre_norm, # disable bias if pre_norm is used(e.g., CLIP) + ) + _patch_cfg.update(patch_cfg) + self.patch_embed = PatchEmbed(**_patch_cfg) + self.patch_resolution = self.patch_embed.init_out_size + num_patches = self.patch_resolution[0] * self.patch_resolution[1] + + # Set out type + if out_type not in self.OUT_TYPES: + raise ValueError(f'Unsupported `out_type` {out_type}, please ' + f'choose from {self.OUT_TYPES}') + self.out_type = out_type + + # Set cls token + self.with_cls_token = with_cls_token + if with_cls_token: + self.cls_token = nn.Parameter(torch.zeros(1, 1, self.embed_dims)) + elif out_type != 'cls_token': + self.cls_token = None + self.num_extra_tokens = 0 + else: + raise ValueError( + 'with_cls_token must be True when `out_type="cls_token"`.') + + # Set position embedding + self.interpolate_mode = interpolate_mode + self.pos_embed = nn.Parameter( + torch.zeros(1, num_patches + self.num_extra_tokens, + self.embed_dims)) + self._register_load_state_dict_pre_hook(self._prepare_pos_embed) + + self.drop_after_pos = nn.Dropout(p=drop_rate) + + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = self.num_layers + index + assert 0 <= out_indices[i] <= self.num_layers, \ + f'Invalid out_indices {index}' + self.out_indices = out_indices + + # stochastic depth decay rule + dpr = np.linspace(0, drop_path_rate, self.num_layers) + + self.layers = ModuleList() + if isinstance(layer_cfgs, dict): + layer_cfgs = [layer_cfgs] * self.num_layers + for i in range(self.num_layers): + _layer_cfg = dict( + embed_dims=self.embed_dims, + num_heads=self.arch_settings['num_heads'], + feedforward_channels=self. + arch_settings['feedforward_channels'], + layer_scale_init_value=layer_scale_init_value, + drop_rate=drop_rate, + drop_path_rate=dpr[i], + qkv_bias=qkv_bias, + norm_cfg=norm_cfg) + _layer_cfg.update(layer_cfgs[i]) + self.layers.append(TransformerEncoderLayer(**_layer_cfg)) + + self.frozen_stages = frozen_stages + if pre_norm: + self.pre_norm = build_norm_layer(norm_cfg, self.embed_dims) + else: + self.pre_norm = nn.Identity() + + self.final_norm = final_norm + if final_norm: + self.ln1 = build_norm_layer(norm_cfg, self.embed_dims) + if self.out_type == 'avg_featmap': + self.ln2 = build_norm_layer(norm_cfg, self.embed_dims) + + # freeze stages only when self.frozen_stages > 0 + if self.frozen_stages > 0: + self._freeze_stages() + + @property + def norm1(self): + return self.ln1 + + @property + def norm2(self): + return self.ln2 + + def init_weights(self): + super(VisionTransformer, self).init_weights() + + if not (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + if self.pos_embed is not None: + trunc_normal_(self.pos_embed, std=0.02) + + def _prepare_pos_embed(self, state_dict, prefix, *args, **kwargs): + name = prefix + 'pos_embed' + if name not in state_dict.keys(): + return + + ckpt_pos_embed_shape = state_dict[name].shape + if (not self.with_cls_token + and ckpt_pos_embed_shape[1] == self.pos_embed.shape[1] + 1): + # Remove cls token from state dict if it's not used. + state_dict[name] = state_dict[name][:, 1:] + ckpt_pos_embed_shape = state_dict[name].shape + + if self.pos_embed.shape != ckpt_pos_embed_shape: + from mmengine.logging import MMLogger + logger = MMLogger.get_current_instance() + logger.info( + f'Resize the pos_embed shape from {ckpt_pos_embed_shape} ' + f'to {self.pos_embed.shape}.') + + ckpt_pos_embed_shape = to_2tuple( + int(np.sqrt(ckpt_pos_embed_shape[1] - self.num_extra_tokens))) + pos_embed_shape = self.patch_embed.init_out_size + + state_dict[name] = resize_pos_embed(state_dict[name], + ckpt_pos_embed_shape, + pos_embed_shape, + self.interpolate_mode, + self.num_extra_tokens) + + @staticmethod + def resize_pos_embed(*args, **kwargs): + """Interface for backward-compatibility.""" + return resize_pos_embed(*args, **kwargs) + + def _freeze_stages(self): + # freeze position embedding + if self.pos_embed is not None: + self.pos_embed.requires_grad = False + # set dropout to eval model + self.drop_after_pos.eval() + # freeze patch embedding + self.patch_embed.eval() + for param in self.patch_embed.parameters(): + param.requires_grad = False + # freeze pre-norm + for param in self.pre_norm.parameters(): + param.requires_grad = False + # freeze cls_token + if self.cls_token is not None: + self.cls_token.requires_grad = False + # freeze layers + for i in range(1, self.frozen_stages + 1): + m = self.layers[i - 1] + m.eval() + for param in m.parameters(): + param.requires_grad = False + # freeze the last layer norm + if self.frozen_stages == len(self.layers): + if self.final_norm: + self.ln1.eval() + for param in self.ln1.parameters(): + param.requires_grad = False + + if self.out_type == 'avg_featmap': + self.ln2.eval() + for param in self.ln2.parameters(): + param.requires_grad = False + + def forward(self, x): + B = x.shape[0] + x, patch_resolution = self.patch_embed(x) + + if self.cls_token is not None: + # stole cls_tokens impl from Phil Wang, thanks + cls_token = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_token, x), dim=1) + + x = x + resize_pos_embed( + self.pos_embed, + self.patch_resolution, + patch_resolution, + mode=self.interpolate_mode, + num_extra_tokens=self.num_extra_tokens) + x = self.drop_after_pos(x) + + x = self.pre_norm(x) + + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x) + + if i == len(self.layers) - 1 and self.final_norm: + x = self.ln1(x) + + if i in self.out_indices: + outs.append(self._format_output(x, patch_resolution)) + + return tuple(outs) + + def _format_output(self, x, hw): + if self.out_type == 'raw': + return x + if self.out_type == 'cls_token': + return x[:, 0] + + patch_token = x[:, self.num_extra_tokens:] + if self.out_type == 'featmap': + B = x.size(0) + # (B, N, C) -> (B, H, W, C) -> (B, C, H, W) + return patch_token.reshape(B, *hw, -1).permute(0, 3, 1, 2) + if self.out_type == 'avg_featmap': + return self.ln2(patch_token.mean(dim=1)) + + def get_layer_depth(self, param_name: str, prefix: str = ''): + """Get the layer-wise depth of a parameter. + + Args: + param_name (str): The name of the parameter. + prefix (str): The prefix for the parameter. + Defaults to an empty string. + + Returns: + Tuple[int, int]: The layer-wise depth and the num of layers. + + Note: + The first depth is the stem module (``layer_depth=0``), and the + last depth is the subsequent module (``layer_depth=num_layers-1``) + """ + num_layers = self.num_layers + 2 + + if not param_name.startswith(prefix): + # For subsequent module like head + return num_layers - 1, num_layers + + param_name = param_name[len(prefix):] + + if param_name in ('cls_token', 'pos_embed'): + layer_depth = 0 + elif param_name.startswith('patch_embed'): + layer_depth = 0 + elif param_name.startswith('layers'): + layer_id = int(param_name.split('.')[1]) + layer_depth = layer_id + 1 + else: + layer_depth = num_layers - 1 + + return layer_depth, num_layers diff --git a/mmpretrain/models/backbones/vit_eva02.py b/mmpretrain/models/backbones/vit_eva02.py new file mode 100644 index 0000000..20ec4b2 --- /dev/null +++ b/mmpretrain/models/backbones/vit_eva02.py @@ -0,0 +1,350 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +import torch.nn as nn +from mmcv.cnn.bricks.drop import build_dropout +from mmengine.model import BaseModule, ModuleList + +from mmpretrain.registry import MODELS +from ..utils import (RotaryEmbeddingFast, SwiGLUFFN, build_norm_layer, + resize_pos_embed) +from .vision_transformer import VisionTransformer + + +class AttentionWithRoPE(BaseModule): + """Multi-head Attention Module with 2D sincos position embedding (RoPE). + + Args: + embed_dims (int): The embedding dimension. + num_heads (int): Parallel attention heads. + attn_drop (float): Dropout rate of the dropout layer after the + attention calculation of query and key. Defaults to 0. + proj_drop (float): Dropout rate of the dropout layer after the + output projection. Defaults to 0. + qkv_bias (bool): If True, add a learnable bias to q and v. Note + that we follows the official implementation where ``k_bias`` + is 0. Defaults to True. + qk_scale (float, optional): Override default qk scale of + ``head_dim ** -0.5`` if set. Defaults to None. + proj_bias (bool) If True, add a learnable bias to output projection. + Defaults to True. + rope (:obj:`torch.nn.Module`, optional): If it is an object of the + ``RotaryEmbedding``, the rotation of the token position will be + performed before the softmax. Defaults to None. + with_cls_token (bool): Whether concatenating class token into image + tokens as transformer input. Defaults to True. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads, + attn_drop=0., + proj_drop=0., + qkv_bias=True, + qk_scale=None, + proj_bias=True, + rope=None, + with_cls_token=True, + init_cfg=None): + super(AttentionWithRoPE, self).__init__(init_cfg=init_cfg) + + self.embed_dims = embed_dims + self.num_heads = num_heads + self.head_dims = embed_dims // num_heads + self.scale = qk_scale or self.head_dims**-0.5 + self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias) + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(embed_dims, embed_dims, bias=proj_bias) + self.proj_drop = nn.Dropout(proj_drop) + + self.with_cls_token = with_cls_token + + self.rope = rope + + def forward(self, x, patch_resolution): + B, N, _ = x.shape + + qkv = self.qkv(x) + qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(dim=0) + + if self.rope: + if self.with_cls_token: + q_t = q[:, :, 1:, :] + ro_q_t = self.rope(q_t, patch_resolution) + q = torch.cat((q[:, :, :1, :], ro_q_t), -2).type_as(v) + + k_t = k[:, :, 1:, :] if self.with_cls_token else k + ro_k_t = self.rope(k_t, patch_resolution) + k = torch.cat((k[:, :, :1, :], ro_k_t), -2).type_as(v) + else: + q = self.rope(q, patch_resolution) + k = self.rope(k, patch_resolution) + + q = q * self.scale + + attn = (q @ k.transpose(-2, -1)) + attn = attn.softmax(dim=-1).type_as(x) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, -1) + + x = self.proj(x) + x = self.proj_drop(x) + + return x + + +class EVA02EndcoderLayer(BaseModule): + """Implements one encoder EVA02EndcoderLayer in EVA02. + + Args: + embed_dims (int): The feature dimension + num_heads (int): Parallel attention heads + feedforward_channels (int): The hidden dimension of FFNs. + sub_ln (bool): Whether to add the sub layer normalization + in the attention module. Defaults to False. + attn_drop (float): Dropout rate of the dropout layer after the + attention calculation of query and key. Defaults to 0. + proj_drop (float): Dropout rate of the dropout layer after the + output projection. Defaults to 0. + qkv_bias (bool): enable bias for qkv if True. Defaults to True. + qk_scale (float, optional): Override default qk scale of + ``head_dim ** -0.5`` if set. Defaults to None. + proj_bias (bool): enable bias for projection in the attention module + if True. Defaults to True. + rope (:obj:`torch.nn.Module`, optional): RotaryEmbedding object + in the attention module. Defaults to None. + drop_rate (float): Dropout rate in the mlp module. Defaults to 0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads, + feedforward_channels, + sub_ln=False, + attn_drop=0., + proj_drop=0., + qkv_bias=False, + qk_scale=None, + proj_bias=True, + rope=None, + with_cls_token=True, + drop_rate=0., + drop_path_rate=0., + norm_cfg=dict(type='LN'), + init_cfg=None): + super(EVA02EndcoderLayer, self).__init__(init_cfg=init_cfg) + + self.norm1 = build_norm_layer(norm_cfg, embed_dims) + + self.attn = AttentionWithRoPE( + embed_dims=embed_dims, + num_heads=num_heads, + attn_drop=attn_drop, + proj_drop=proj_drop, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + proj_bias=proj_bias, + rope=rope, + with_cls_token=with_cls_token) + + self.drop_path = build_dropout( + dict(type='DropPath', drop_prob=drop_path_rate)) + + self.norm2 = build_norm_layer(norm_cfg, embed_dims) + + if drop_rate > 0: + dropout_layer = dict(type='Dropout', drop_prob=drop_rate) + else: + dropout_layer = None + + if sub_ln: + ffn_norm = norm_cfg + else: + ffn_norm = None + + self.mlp = SwiGLUFFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + dropout_layer=dropout_layer, + norm_cfg=ffn_norm, + add_identity=False, + ) + + def forward(self, x, patch_resolution): + inputs = x + x = self.norm1(x) + x = self.attn(x, patch_resolution) + x = self.drop_path(x) + x = inputs + x + + inputs = x + x = self.norm2(x) + x = self.mlp(x) + x = self.drop_path(x) + x = inputs + x + + return x + + +@MODELS.register_module() +class ViTEVA02(VisionTransformer): + """EVA02 Vision Transformer. + + A PyTorch implement of : `EVA-02: A Visual Representation for Neon Genesis + `_ + + Args: + arch (str | dict): Vision Transformer architecture. If use string, + choose from 'tiny', 'small', 'base', 'large'. If use dict, + it should have below keys: + + - **embed_dims** (int): The dimensions of embedding. + - **num_layers** (int): The number of transformer encoder layers. + - **num_heads** (int): The number of heads in attention modules. + - **mlp_ratio** (float): The ratio of the mlp module. + + Defaults to 'tiny'. + + sub_ln (bool): Whether to add the sub layer normalization in swiglu. + Defaults to False. + drop_rate (float): Probability of an element to be zeroed in the + mlp module. Defaults to 0. + attn_drop_rate (float): Probability of an element to be zeroed after + the softmax in the attention. Defaults to 0. + proj_drop_rate (float): Probability of an element to be zeroed after + projection in the attention. Defaults to 0. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + qkv_bias (bool): Whether to add bias for qkv in attention modules. + Defaults to True. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + with_cls_token (bool): Whether concatenating class token into image + tokens as transformer input. Defaults to True. + layer_cfgs (Sequence | dict): Configs of each transformer layer in + encoder. Defaults to an empty dict. + **kwargs(dict, optional): Other args for Vision Transformer. + """ + arch_zoo = { + **dict.fromkeys( + ['t', 'ti', 'tiny'], { + 'embed_dims': 192, + 'num_layers': 12, + 'num_heads': 3, + 'feedforward_channels': int(192 * 4 * 2 / 3) + }), + **dict.fromkeys( + ['s', 'small'], { + 'embed_dims': 384, + 'num_layers': 12, + 'num_heads': 6, + 'feedforward_channels': int(384 * 4 * 2 / 3) + }), + **dict.fromkeys( + ['b', 'base'], { + 'embed_dims': 768, + 'num_layers': 12, + 'num_heads': 12, + 'feedforward_channels': int(768 * 4 * 2 / 3) + }), + **dict.fromkeys( + ['l', 'large'], { + 'embed_dims': 1024, + 'num_layers': 24, + 'num_heads': 16, + 'feedforward_channels': int(1024 * 4 * 2 / 3) + }) + } + num_extra_tokens = 1 # class token + OUT_TYPES = {'raw', 'cls_token', 'featmap', 'avg_featmap'} + + def __init__(self, + arch='tiny', + sub_ln=False, + drop_rate=0., + attn_drop_rate=0., + proj_drop_rate=0., + drop_path_rate=0., + qkv_bias=True, + norm_cfg=dict(type='LN'), + with_cls_token=True, + layer_cfgs=dict(), + **kwargs): + # set essential args for Vision Transformer + kwargs.update( + arch=arch, + drop_rate=drop_rate, + drop_path_rate=drop_path_rate, + norm_cfg=norm_cfg, + with_cls_token=with_cls_token) + super(ViTEVA02, self).__init__(**kwargs) + + self.num_heads = self.arch_settings['num_heads'] + + # Set RoPE + head_dim = self.embed_dims // self.num_heads + self.rope = RotaryEmbeddingFast( + embed_dims=head_dim, patch_resolution=self.patch_resolution) + + # stochastic depth decay rule + dpr = np.linspace(0, drop_path_rate, self.num_layers) + self.layers = ModuleList() + if isinstance(layer_cfgs, dict): + layer_cfgs = [layer_cfgs] * self.num_layers + for i in range(self.num_layers): + _layer_cfg = dict( + embed_dims=self.embed_dims, + num_heads=self.num_heads, + feedforward_channels=self. + arch_settings['feedforward_channels'], + sub_ln=sub_ln, + norm_cfg=norm_cfg, + proj_drop=proj_drop_rate, + attn_drop=attn_drop_rate, + drop_rate=drop_rate, + qkv_bias=qkv_bias, + rope=self.rope, + with_cls_token=with_cls_token, + drop_path_rate=dpr[i]) + _layer_cfg.update(layer_cfgs[i]) + self.layers.append(EVA02EndcoderLayer(**_layer_cfg)) + + def forward(self, x): + B = x.shape[0] + x, patch_resolution = self.patch_embed(x) + + if self.cls_token is not None: + # stole cls_tokens impl from Phil Wang, thanks + cls_tokens = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + + x = x + resize_pos_embed( + self.pos_embed, + self.patch_resolution, + patch_resolution, + mode=self.interpolate_mode, + num_extra_tokens=self.num_extra_tokens) + x = self.drop_after_pos(x) + + x = self.pre_norm(x) + + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x, patch_resolution) + + if i == len(self.layers) - 1 and self.final_norm: + x = self.ln1(x) + + if i in self.out_indices: + outs.append(self._format_output(x, patch_resolution)) + + return tuple(outs) diff --git a/mmpretrain/models/backbones/vit_sam.py b/mmpretrain/models/backbones/vit_sam.py new file mode 100644 index 0000000..0eb46a7 --- /dev/null +++ b/mmpretrain/models/backbones/vit_sam.py @@ -0,0 +1,697 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Sequence, Tuple + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn.bricks.transformer import FFN, PatchEmbed +from mmengine.model import BaseModule, ModuleList +from mmengine.model.weight_init import trunc_normal_ + +from mmpretrain.registry import MODELS +from ..utils import LayerNorm2d, build_norm_layer, resize_pos_embed, to_2tuple +from .base_backbone import BaseBackbone + + +def window_partition(x: torch.Tensor, + window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]: + """Partition into non-overlapping windows with padding if needed. + + Borrowed from https://github.com/facebookresearch/segment-anything/ + + Args: + x (torch.Tensor): Input tokens with [B, H, W, C]. + window_size (int): Window size. + + Returns: + Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]] + + - ``windows``: Windows after partition with + [B * num_windows, window_size, window_size, C]. + - ``(Hp, Wp)``: Padded height and width before partition + """ + B, H, W, C = x.shape + + pad_h = (window_size - H % window_size) % window_size + pad_w = (window_size - W % window_size) % window_size + if pad_h > 0 or pad_w > 0: + x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h)) + Hp, Wp = H + pad_h, W + pad_w + + x = x.view(B, Hp // window_size, window_size, Wp // window_size, + window_size, C) + windows = x.permute(0, 1, 3, 2, 4, + 5).contiguous().view(-1, window_size, window_size, C) + return windows, (Hp, Wp) + + +def window_unpartition(windows: torch.Tensor, window_size: int, + pad_hw: Tuple[int, int], + hw: Tuple[int, int]) -> torch.Tensor: + """Window unpartition into original sequences and removing padding. + + Borrowed from https://github.com/facebookresearch/segment-anything/ + + Args: + x (torch.Tensor): Input tokens with + [B * num_windows, window_size, window_size, C]. + window_size (int): Window size. + pad_hw (tuple): Padded height and width (Hp, Wp). + hw (tuple): Original height and width (H, W) before padding. + + Returns: + torch.Tensor: Unpartitioned sequences with [B, H, W, C]. + """ + Hp, Wp = pad_hw + H, W = hw + B = windows.shape[0] // (Hp * Wp // window_size // window_size) + x = windows.view(B, Hp // window_size, Wp // window_size, window_size, + window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1) + + if Hp > H or Wp > W: + x = x[:, :H, :W, :].contiguous() + return x + + +def get_rel_pos(q_size: int, k_size: int, + rel_pos: torch.Tensor) -> torch.Tensor: + """Get relative positional embeddings according to the relative positions + of query and key sizes. + + Borrowed from https://github.com/facebookresearch/segment-anything/ + + Args: + q_size (int): Size of query q. + k_size (int): Size of key k. + rel_pos (torch.Tensor): Relative position embeddings (L, C). + + Returns: + torch.Tensor: Extracted positional embeddings according to relative + positions. + """ + max_rel_dist = int(2 * max(q_size, k_size) - 1) + # Interpolate rel pos if needed. + if rel_pos.shape[0] != max_rel_dist: + # Interpolate rel pos. + rel_pos_resized = F.interpolate( + rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), + size=max_rel_dist, + mode='linear', + ) + rel_pos_resized = rel_pos_resized.reshape(-1, + max_rel_dist).permute(1, 0) + else: + rel_pos_resized = rel_pos + + # Scale the coords with short length if shapes for q and k are different. + q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) + k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) + relative_coords = (q_coords - + k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) + + return rel_pos_resized[relative_coords.long()] + + +def add_decomposed_rel_pos( + attn: torch.Tensor, + q: torch.Tensor, + rel_pos_h: torch.Tensor, + rel_pos_w: torch.Tensor, + q_size: Tuple[int, int], + k_size: Tuple[int, int], +) -> torch.Tensor: + """Borrowed from https://github.com/facebookresearch/segment-anything/ + + Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. + https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py + + Args: + attn (torch.Tensor): Attention map. + q (torch.Tensor): Query q in the attention layer with shape + (B, q_h * q_w, C). + rel_pos_h (torch.Tensor): Relative position embeddings (Lh, C) for + height axis. + rel_pos_w (torch.Tensor): Relative position embeddings (Lw, C) for + width axis. + q_size (tuple): Spatial sequence size of query q with (q_h, q_w). + k_size (tuple): Spatial sequence size of key k with (k_h, k_w). + + Returns: + torch.Tensor: Attention map with added relative positional embeddings. + """ + q_h, q_w = q_size + k_h, k_w = k_size + Rh = get_rel_pos(q_h, k_h, rel_pos_h) + Rw = get_rel_pos(q_w, k_w, rel_pos_w) + + B, _, dim = q.shape + r_q = q.reshape(B, q_h, q_w, dim) + rel_h = torch.einsum('bhwc,hkc->bhwk', r_q, Rh) + rel_w = torch.einsum('bhwc,wkc->bhwk', r_q, Rw) + + attn = (attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + + rel_w[:, :, :, None, :]).view(B, q_h * q_w, k_h * k_w) + + return attn + + +class Attention(nn.Module): + """Multi-head Attention block with relative position embeddings. + + Borrowed from https://github.com/facebookresearch/segment-anything/ + + Args: + embed_dims (int): The embedding dimension. + num_heads (int): Parallel attention heads. + qkv_bias (bool): If True, add a learnable bias to q, k, v. + Defaults to True. + use_rel_pos (bool):Whether to use relative position embedding. + Defaults to False. + input_size (int, optional): Input resolution for calculating the + relative positional parameter size. Defaults to None. + """ + + def __init__( + self, + embed_dims: int, + num_heads: int = 8, + qkv_bias: bool = True, + use_rel_pos: bool = False, + input_size: Optional[Tuple[int, int]] = None, + ) -> None: + super().__init__() + self.num_heads = num_heads + head_embed_dims = embed_dims // num_heads + self.scale = head_embed_dims**-0.5 + + self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias) + self.proj = nn.Linear(embed_dims, embed_dims) + + self.use_rel_pos = use_rel_pos + if self.use_rel_pos: + assert (input_size is not None), \ + 'Input size must be provided if using relative position embed.' + # initialize relative positional embeddings + self.rel_pos_h = nn.Parameter( + torch.zeros(2 * input_size[0] - 1, head_embed_dims)) + self.rel_pos_w = nn.Parameter( + torch.zeros(2 * input_size[1] - 1, head_embed_dims)) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + B, H, W, _ = x.shape + # qkv with shape (3, B, nHead, H * W, C) + qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, + -1).permute(2, 0, 3, 1, 4) + # q, k, v with shape (B * nHead, H * W, C) + q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0) + + attn = (q * self.scale) @ k.transpose(-2, -1) + + if self.use_rel_pos: + attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, + self.rel_pos_w, (H, W), (H, W)) + + attn = attn.softmax(dim=-1) + x = (attn @ v).view(B, self.num_heads, H, W, + -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1) + x = self.proj(x) + + return x + + +class TransformerEncoderLayer(BaseModule): + """Encoder layer with window attention in Vision Transformer. + + Args: + embed_dims (int): The feature dimension + num_heads (int): Parallel attention heads + feedforward_channels (int): The hidden dimension for FFNs + drop_rate (float): Probability of an element to be zeroed + after the feed forward layer. Defaults to 0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0. + num_fcs (int): The number of fully-connected layers for FFNs. + Defaults to 2. + qkv_bias (bool): enable bias for qkv if True. Defaults to True. + act_cfg (dict): The activation config for FFNs. + Defaults to ``dict(type='GELU')``. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + use_rel_pos (bool):Whether to use relative position embedding. + Defaults to False. + window_size (int): Window size for window attention. Defaults to 0. + input_size (int, optional): Input resolution for calculating the + relative positional parameter size. Defaults to None. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + embed_dims: int, + num_heads: int, + feedforward_channels: int, + drop_rate: float = 0., + drop_path_rate: float = 0., + num_fcs: int = 2, + qkv_bias: bool = True, + act_cfg: dict = dict(type='GELU'), + norm_cfg: dict = dict(type='LN'), + use_rel_pos: bool = False, + window_size: int = 0, + input_size: Optional[Tuple[int, int]] = None, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + self.embed_dims = embed_dims + self.window_size = window_size + + self.ln1 = build_norm_layer(norm_cfg, self.embed_dims) + + self.attn = Attention( + embed_dims=embed_dims, + num_heads=num_heads, + qkv_bias=qkv_bias, + use_rel_pos=use_rel_pos, + input_size=input_size if window_size == 0 else + (window_size, window_size), + ) + + self.ln2 = build_norm_layer(norm_cfg, self.embed_dims) + + self.ffn = FFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + num_fcs=num_fcs, + ffn_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + act_cfg=act_cfg) + + @property + def norm1(self): + return self.ln1 + + @property + def norm2(self): + return self.ln2 + + def forward(self, x): + shortcut = x + x = self.ln1(x) + # Window partition + if self.window_size > 0: + H, W = x.shape[1], x.shape[2] + x, pad_hw = window_partition(x, self.window_size) + + x = self.attn(x) + # Reverse window partition + if self.window_size > 0: + x = window_unpartition(x, self.window_size, pad_hw, (H, W)) + x = shortcut + x + + x = self.ffn(self.ln2(x), identity=x) + return x + + +@MODELS.register_module() +class ViTSAM(BaseBackbone): + """Vision Transformer as image encoder used in SAM. + + A PyTorch implement of backbone: `Segment Anything + `_ + + Args: + arch (str | dict): Vision Transformer architecture. If use string, + choose from 'base', 'large', 'huge'. If use dict, it should have + below keys: + + - **embed_dims** (int): The dimensions of embedding. + - **num_layers** (int): The number of transformer encoder layers. + - **num_heads** (int): The number of heads in attention modules. + - **feedforward_channels** (int): The hidden dimensions in + feedforward modules. + - **global_attn_indexes** (int): The index of layers with global + attention. + + Defaults to 'base'. + img_size (int | tuple): The expected input image shape. Because we + support dynamic input shape, just set the argument to the most + common input image shape. Defaults to 224. + patch_size (int | tuple): The patch size in patch embedding. + Defaults to 16. + in_channels (int): The num of input channels. Defaults to 3. + out_channels (int): The num of output channels, if equal to 0, the + channel reduction layer is disabled. Defaults to 256. + out_indices (Sequence | int): Output from which stages. + Defaults to -1, means the last stage. + out_type (str): The type of output features. Please choose from + + - ``"raw"`` or ``"featmap"``: The feature map tensor from the + patch tokens with shape (B, C, H, W). + - ``"avg_featmap"``: The global averaged feature map tensor + with shape (B, C). + + Defaults to ``"raw"``. + drop_rate (float): Probability of an element to be zeroed. + Defaults to 0. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + qkv_bias (bool): Whether to add bias for qkv in attention modules. + Defaults to True. + use_abs_pos (bool): Whether to use absolute position embedding. + Defaults to True. + use_rel_pos (bool):Whether to use relative position embedding. + Defaults to True. + window_size (int): Window size for window attention. Defaults to 14. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Defaults to -1. + interpolate_mode (str): Select the interpolate mode for position + embeding vector resize. Defaults to "bicubic". + patch_cfg (dict): Configs of patch embeding. Defaults to an empty dict. + layer_cfgs (Sequence | dict): Configs of each transformer layer in + encoder. Defaults to an empty dict. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + arch_zoo = { + **dict.fromkeys( + ['b', 'base'], { + 'embed_dims': 768, + 'num_layers': 12, + 'num_heads': 12, + 'feedforward_channels': 3072, + 'global_attn_indexes': [2, 5, 8, 11] + }), + **dict.fromkeys( + ['l', 'large'], { + 'embed_dims': 1024, + 'num_layers': 24, + 'num_heads': 16, + 'feedforward_channels': 4096, + 'global_attn_indexes': [5, 11, 17, 23] + }), + **dict.fromkeys( + ['h', 'huge'], { + 'embed_dims': 1280, + 'num_layers': 32, + 'num_heads': 16, + 'feedforward_channels': 5120, + 'global_attn_indexes': [7, 15, 23, 31] + }), + } + OUT_TYPES = {'raw', 'featmap', 'avg_featmap'} + + def __init__(self, + arch: str = 'base', + img_size: int = 224, + patch_size: int = 16, + in_channels: int = 3, + out_channels: int = 256, + out_indices: int = -1, + out_type: str = 'raw', + drop_rate: float = 0., + drop_path_rate: float = 0., + qkv_bias: bool = True, + use_abs_pos: bool = True, + use_rel_pos: bool = True, + window_size: int = 14, + norm_cfg: dict = dict(type='LN', eps=1e-6), + frozen_stages: int = -1, + interpolate_mode: str = 'bicubic', + patch_cfg: dict = dict(), + layer_cfgs: dict = dict(), + init_cfg: Optional[dict] = None): + super().__init__(init_cfg) + + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = { + 'embed_dims', 'num_layers', 'num_heads', 'feedforward_channels' + } + assert isinstance(arch, dict) and essential_keys <= set(arch), \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = arch + + self.embed_dims = self.arch_settings['embed_dims'] + self.num_layers = self.arch_settings['num_layers'] + self.global_attn_indexes = self.arch_settings['global_attn_indexes'] + self.img_size = to_2tuple(img_size) + + # Set patch embedding + _patch_cfg = dict( + in_channels=in_channels, + input_size=img_size, + embed_dims=self.embed_dims, + conv_type='Conv2d', + kernel_size=patch_size, + stride=patch_size, + ) + _patch_cfg.update(patch_cfg) + self.patch_embed = PatchEmbed(**_patch_cfg) + self.patch_resolution = self.patch_embed.init_out_size + + # Set out type + if out_type not in self.OUT_TYPES: + raise ValueError(f'Unsupported `out_type` {out_type}, please ' + f'choose from {self.OUT_TYPES}') + self.out_type = out_type + + self.use_abs_pos = use_abs_pos + self.interpolate_mode = interpolate_mode + if use_abs_pos: + # Set position embedding + self.pos_embed = nn.Parameter( + torch.zeros(1, *self.patch_resolution, self.embed_dims)) + self.drop_after_pos = nn.Dropout(p=drop_rate) + self._register_load_state_dict_pre_hook(self._prepare_pos_embed) + + if use_rel_pos: + self._register_load_state_dict_pre_hook( + self._prepare_relative_position) + + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = self.num_layers + index + assert 0 <= out_indices[i] <= self.num_layers, \ + f'Invalid out_indices {index}' + self.out_indices = out_indices + + # stochastic depth decay rule + dpr = np.linspace(0, drop_path_rate, self.num_layers) + + self.layers = ModuleList() + if isinstance(layer_cfgs, dict): + layer_cfgs = [layer_cfgs] * self.num_layers + for i in range(self.num_layers): + _layer_cfg = dict( + embed_dims=self.embed_dims, + num_heads=self.arch_settings['num_heads'], + feedforward_channels=self. + arch_settings['feedforward_channels'], + drop_rate=drop_rate, + drop_path_rate=dpr[i], + qkv_bias=qkv_bias, + window_size=window_size + if i not in self.global_attn_indexes else 0, + input_size=self.patch_resolution, + use_rel_pos=use_rel_pos, + norm_cfg=norm_cfg) + _layer_cfg.update(layer_cfgs[i]) + self.layers.append(TransformerEncoderLayer(**_layer_cfg)) + + self.out_channels = out_channels + if self.out_channels > 0: + self.channel_reduction = nn.Sequential( + nn.Conv2d( + self.embed_dims, + out_channels, + kernel_size=1, + bias=False, + ), + LayerNorm2d(out_channels, eps=1e-6), + nn.Conv2d( + out_channels, + out_channels, + kernel_size=3, + padding=1, + bias=False, + ), + LayerNorm2d(out_channels, eps=1e-6), + ) + + # freeze stages only when self.frozen_stages > 0 + self.frozen_stages = frozen_stages + if self.frozen_stages > 0: + self._freeze_stages() + + def init_weights(self): + super().init_weights() + + if not (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + if self.pos_embed is not None: + trunc_normal_(self.pos_embed, std=0.02) + + def _freeze_stages(self): + # freeze position embedding + if self.pos_embed is not None: + self.pos_embed.requires_grad = False + # set dropout to eval model + self.drop_after_pos.eval() + # freeze patch embedding + self.patch_embed.eval() + for param in self.patch_embed.parameters(): + param.requires_grad = False + + # freeze layers + for i in range(1, self.frozen_stages + 1): + m = self.layers[i - 1] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + # freeze channel_reduction module + if self.frozen_stages == self.num_layers and self.out_channels > 0: + m = self.channel_reduction + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor]: + B = x.shape[0] + x, patch_resolution = self.patch_embed(x) + x = x.view(B, patch_resolution[0], patch_resolution[1], + self.embed_dims) + + if self.use_abs_pos: + # 'resize_pos_embed' only supports 'pos_embed' with ndim==3, but + # in ViTSAM, the 'pos_embed' has 4 dimensions (1, H, W, C), so it + # is flattened. Besides, ViTSAM doesn't have any extra token. + resized_pos_embed = resize_pos_embed( + self.pos_embed.flatten(1, 2), + self.patch_resolution, + patch_resolution, + mode=self.interpolate_mode, + num_extra_tokens=0) + x = x + resized_pos_embed.view(1, *patch_resolution, + self.embed_dims) + x = self.drop_after_pos(x) + + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x) + + if i in self.out_indices: + # (B, H, W, C) -> (B, C, H, W) + x_reshape = x.permute(0, 3, 1, 2) + + if self.out_channels > 0: + x_reshape = self.channel_reduction(x_reshape) + outs.append(self._format_output(x_reshape)) + + return tuple(outs) + + def _format_output(self, x) -> torch.Tensor: + if self.out_type == 'raw' or self.out_type == 'featmap': + return x + elif self.out_type == 'avg_featmap': + # (B, C, H, W) -> (B, C, N) -> (B, N, C) + x = x.flatten(2).permute(0, 2, 1) + return x.mean(dim=1) + + def _prepare_pos_embed(self, state_dict, prefix, *args, **kwargs): + name = prefix + 'pos_embed' + if name not in state_dict.keys(): + return + + ckpt_pos_embed_shape = state_dict[name].shape + if self.pos_embed.shape != ckpt_pos_embed_shape: + from mmengine.logging import MMLogger + logger = MMLogger.get_current_instance() + logger.info( + f'Resize the pos_embed shape from {ckpt_pos_embed_shape} ' + f'to {self.pos_embed.shape}.') + + ckpt_pos_embed_shape = ckpt_pos_embed_shape[1:3] + pos_embed_shape = self.patch_embed.init_out_size + + flattened_pos_embed = state_dict[name].flatten(1, 2) + resized_pos_embed = resize_pos_embed(flattened_pos_embed, + ckpt_pos_embed_shape, + pos_embed_shape, + self.interpolate_mode, 0) + state_dict[name] = resized_pos_embed.view(1, *pos_embed_shape, + self.embed_dims) + + def _prepare_relative_position(self, state_dict, prefix, *args, **kwargs): + state_dict_model = self.state_dict() + all_keys = list(state_dict_model.keys()) + for key in all_keys: + if 'rel_pos_' in key: + ckpt_key = prefix + key + if ckpt_key not in state_dict: + continue + relative_position_pretrained = state_dict[ckpt_key] + relative_position_current = state_dict_model[key] + L1, _ = relative_position_pretrained.size() + L2, _ = relative_position_current.size() + if L1 != L2: + new_rel_pos = F.interpolate( + relative_position_pretrained.reshape(1, L1, + -1).permute( + 0, 2, 1), + size=L2, + mode='linear', + ) + new_rel_pos = new_rel_pos.reshape(-1, L2).permute(1, 0) + from mmengine.logging import MMLogger + logger = MMLogger.get_current_instance() + logger.info(f'Resize the {ckpt_key} from ' + f'{state_dict[ckpt_key].shape} to ' + f'{new_rel_pos.shape}') + state_dict[ckpt_key] = new_rel_pos + + def get_layer_depth(self, param_name: str, prefix: str = ''): + """Get the layer-wise depth of a parameter. + + Args: + param_name (str): The name of the parameter. + prefix (str): The prefix for the parameter. + Defaults to an empty string. + + Returns: + Tuple[int, int]: The layer-wise depth and the num of layers. + + Note: + The first depth is the stem module (``layer_depth=0``), and the + last depth is the subsequent module (``layer_depth=num_layers-1``) + """ + num_layers = self.num_layers + 2 + + if not param_name.startswith(prefix): + # For subsequent module like head + return num_layers - 1, num_layers + + param_name = param_name[len(prefix):] + + if param_name in ('cls_token', 'pos_embed'): + layer_depth = 0 + elif param_name.startswith('patch_embed'): + layer_depth = 0 + elif param_name.startswith('layers'): + layer_id = int(param_name.split('.')[1]) + layer_depth = layer_id + 1 + else: + layer_depth = num_layers - 1 + + return layer_depth, num_layers diff --git a/mmpretrain/models/backbones/xcit.py b/mmpretrain/models/backbones/xcit.py new file mode 100644 index 0000000..392ebbe --- /dev/null +++ b/mmpretrain/models/backbones/xcit.py @@ -0,0 +1,770 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from functools import partial +from typing import Optional, Sequence, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn.bricks import ConvModule, DropPath +from mmcv.cnn.bricks.transformer import FFN +from mmengine.model import BaseModule, Sequential +from mmengine.model.weight_init import trunc_normal_ +from mmengine.utils import digit_version + +from mmpretrain.registry import MODELS +from ..utils import build_norm_layer, to_2tuple +from .base_backbone import BaseBackbone + +if digit_version(torch.__version__) < digit_version('1.8.0'): + floor_div = torch.floor_divide +else: + floor_div = partial(torch.div, rounding_mode='floor') + + +class ClassAttntion(BaseModule): + """Class Attention Module. + + A PyTorch implementation of Class Attention Module introduced by: + `Going deeper with Image Transformers `_ + + taken from + https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + with slight modifications to do CA + + Args: + dim (int): The feature dimension. + num_heads (int): Parallel attention heads. Defaults to 8. + qkv_bias (bool): enable bias for qkv if True. Defaults to False. + attn_drop (float): The drop out rate for attention output weights. + Defaults to 0. + proj_drop (float): The drop out rate for linear output weights. + Defaults to 0. + init_cfg (dict | list[dict], optional): Initialization config dict. + Defaults to None. + """ # noqa: E501 + + def __init__(self, + dim: int, + num_heads: int = 8, + qkv_bias: bool = False, + attn_drop: float = 0., + proj_drop: float = 0., + init_cfg=None): + + super(ClassAttntion, self).__init__(init_cfg=init_cfg) + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim**-0.5 + + self.q = nn.Linear(dim, dim, bias=qkv_bias) + self.k = nn.Linear(dim, dim, bias=qkv_bias) + self.v = nn.Linear(dim, dim, bias=qkv_bias) + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + # We only need to calculate query of cls token. + q = self.q(x[:, 0]).unsqueeze(1).reshape(B, 1, self.num_heads, + C // self.num_heads).permute( + 0, 2, 1, 3) + k = self.k(x).reshape(B, N, self.num_heads, + C // self.num_heads).permute(0, 2, 1, 3) + + q = q * self.scale + v = self.v(x).reshape(B, N, self.num_heads, + C // self.num_heads).permute(0, 2, 1, 3) + + attn = (q @ k.transpose(-2, -1)) + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x_cls = (attn @ v).transpose(1, 2).reshape(B, 1, C) + x_cls = self.proj(x_cls) + x_cls = self.proj_drop(x_cls) + + return x_cls + + +class PositionalEncodingFourier(BaseModule): + """Positional Encoding using a fourier kernel. + + A PyTorch implementation of Positional Encoding relying on + a fourier kernel introduced by: + `Attention is all you Need `_ + + Based on the `official XCiT code + `_ + + Args: + hidden_dim (int): The hidden feature dimension. Defaults to 32. + dim (int): The output feature dimension. Defaults to 768. + temperature (int): A control variable for position encoding. + Defaults to 10000. + init_cfg (dict | list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + hidden_dim: int = 32, + dim: int = 768, + temperature: int = 10000, + init_cfg=None): + super(PositionalEncodingFourier, self).__init__(init_cfg=init_cfg) + + self.token_projection = ConvModule( + in_channels=hidden_dim * 2, + out_channels=dim, + kernel_size=1, + conv_cfg=None, + norm_cfg=None, + act_cfg=None) + self.scale = 2 * math.pi + self.temperature = temperature + self.hidden_dim = hidden_dim + self.dim = dim + self.eps = 1e-6 + + def forward(self, B: int, H: int, W: int): + device = self.token_projection.conv.weight.device + y_embed = torch.arange( + 1, H + 1, device=device).unsqueeze(1).repeat(1, 1, W).float() + x_embed = torch.arange(1, W + 1, device=device).repeat(1, H, 1).float() + y_embed = y_embed / (y_embed[:, -1:, :] + self.eps) * self.scale + x_embed = x_embed / (x_embed[:, :, -1:] + self.eps) * self.scale + + dim_t = torch.arange(self.hidden_dim, device=device).float() + dim_t = floor_div(dim_t, 2) + dim_t = self.temperature**(2 * dim_t / self.hidden_dim) + + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + pos_x = torch.stack( + [pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()], + dim=4).flatten(3) + pos_y = torch.stack( + [pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()], + dim=4).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + pos = self.token_projection(pos) + return pos.repeat(B, 1, 1, 1) # (B, C, H, W) + + +class ConvPatchEmbed(BaseModule): + """Patch Embedding using multiple convolution layers. + + Args: + img_size (int, tuple): input image size. + Defaults to 224, means the size is 224*224. + patch_size (int): The patch size in conv patch embedding. + Defaults to 16. + in_channels (int): The input channels of this module. + Defaults to 3. + embed_dims (int): The feature dimension + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='BN')``. + act_cfg (dict): Config dict for activation layer. + Defaults to ``dict(type='GELU')``. + init_cfg (dict | list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + img_size: Union[int, tuple] = 224, + patch_size: int = 16, + in_channels: int = 3, + embed_dims: int = 768, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='GELU'), + init_cfg=None): + super(ConvPatchEmbed, self).__init__(init_cfg=init_cfg) + img_size = to_2tuple(img_size) + num_patches = (img_size[1] // patch_size) * (img_size[0] // patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + + conv = partial( + ConvModule, + kernel_size=3, + stride=2, + padding=1, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + ) + + layer = [] + if patch_size == 16: + layer.append( + conv(in_channels=in_channels, out_channels=embed_dims // 8)) + layer.append( + conv( + in_channels=embed_dims // 8, out_channels=embed_dims // 4)) + elif patch_size == 8: + layer.append( + conv(in_channels=in_channels, out_channels=embed_dims // 4)) + else: + raise ValueError('For patch embedding, the patch size must be 16 ' + f'or 8, but get patch size {self.patch_size}.') + + layer.append( + conv(in_channels=embed_dims // 4, out_channels=embed_dims // 2)) + layer.append( + conv( + in_channels=embed_dims // 2, + out_channels=embed_dims, + act_cfg=None, + )) + + self.proj = Sequential(*layer) + + def forward(self, x: torch.Tensor): + x = self.proj(x) + Hp, Wp = x.shape[2], x.shape[3] + x = x.flatten(2).transpose(1, 2) # (B, N, C) + return x, (Hp, Wp) + + +class ClassAttentionBlock(BaseModule): + """Transformer block using Class Attention. + + Args: + dim (int): The feature dimension. + num_heads (int): Parallel attention heads. + mlp_ratio (float): The hidden dimension ratio for FFN. + Defaults to 4. + qkv_bias (bool): enable bias for qkv if True. Defaults to False. + drop (float): Probability of an element to be zeroed + after the feed forward layer. Defaults to 0. + attn_drop (float): The drop out rate for attention output weights. + Defaults to 0. + drop_path (float): Stochastic depth rate. Defaults to 0. + layer_scale_init_value (float): The initial value for layer scale. + Defaults to 1. + tokens_norm (bool): Whether to normalize all tokens or just the + cls_token in the CA. Defaults to False. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN', eps=1e-6)``. + act_cfg (dict): Config dict for activation layer. + Defaults to ``dict(type='GELU')``. + init_cfg (dict | list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + dim: int, + num_heads: int, + mlp_ratio: float = 4., + qkv_bias: bool = False, + drop=0., + attn_drop=0., + drop_path=0., + layer_scale_init_value=1., + tokens_norm=False, + norm_cfg=dict(type='LN', eps=1e-6), + act_cfg=dict(type='GELU'), + init_cfg=None): + + super(ClassAttentionBlock, self).__init__(init_cfg=init_cfg) + + self.norm1 = build_norm_layer(norm_cfg, dim) + + self.attn = ClassAttntion( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=drop, + ) + + self.drop_path = DropPath( + drop_path) if drop_path > 0. else nn.Identity() + + self.norm2 = build_norm_layer(norm_cfg, dim) + + self.ffn = FFN( + embed_dims=dim, + feedforward_channels=int(dim * mlp_ratio), + act_cfg=act_cfg, + ffn_drop=drop, + ) + + if layer_scale_init_value > 0: + self.gamma1 = nn.Parameter(layer_scale_init_value * + torch.ones(dim)) + self.gamma2 = nn.Parameter(layer_scale_init_value * + torch.ones(dim)) + else: + self.gamma1, self.gamma2 = 1.0, 1.0 + + # See https://github.com/rwightman/pytorch-image-models/pull/747#issuecomment-877795721 # noqa: E501 + self.tokens_norm = tokens_norm + + def forward(self, x): + x_norm1 = self.norm1(x) + x_attn = torch.cat([self.attn(x_norm1), x_norm1[:, 1:]], dim=1) + x = x + self.drop_path(self.gamma1 * x_attn) + if self.tokens_norm: + x = self.norm2(x) + else: + x = torch.cat([self.norm2(x[:, 0:1]), x[:, 1:]], dim=1) + x_res = x + cls_token = x[:, 0:1] + cls_token = self.gamma2 * self.ffn(cls_token, identity=0) + x = torch.cat([cls_token, x[:, 1:]], dim=1) + x = x_res + self.drop_path(x) + return x + + +class LPI(BaseModule): + """Local Patch Interaction module. + + A PyTorch implementation of Local Patch Interaction module + as in XCiT introduced by `XCiT: Cross-Covariance Image Transformers + `_ + + Local Patch Interaction module that allows explicit communication between + tokens in 3x3 windows to augment the implicit communication performed by + the block diagonal scatter attention. Implemented using 2 layers of + separable 3x3 convolutions with GeLU and BatchNorm2d + + Args: + in_features (int): The input channels. + out_features (int, optional): The output channels. Defaults to None. + kernel_size (int): The kernel_size in ConvModule. Defaults to 3. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='BN')``. + act_cfg (dict): Config dict for activation layer. + Defaults to ``dict(type='GELU')``. + init_cfg (dict | list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + in_features: int, + out_features: Optional[int] = None, + kernel_size: int = 3, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='GELU'), + init_cfg=None): + super(LPI, self).__init__(init_cfg=init_cfg) + + out_features = out_features or in_features + padding = kernel_size // 2 + + self.conv1 = ConvModule( + in_channels=in_features, + out_channels=in_features, + kernel_size=kernel_size, + padding=padding, + groups=in_features, + bias=True, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + order=('conv', 'act', 'norm')) + + self.conv2 = ConvModule( + in_channels=in_features, + out_channels=out_features, + kernel_size=kernel_size, + padding=padding, + groups=out_features, + norm_cfg=None, + act_cfg=None) + + def forward(self, x: torch.Tensor, H: int, W: int) -> torch.Tensor: + B, N, C = x.shape + x = x.permute(0, 2, 1).reshape(B, C, H, W) + x = self.conv1(x) + x = self.conv2(x) + x = x.reshape(B, C, N).permute(0, 2, 1) + return x + + +class XCA(BaseModule): + r"""Cross-Covariance Attention module. + + A PyTorch implementation of Cross-Covariance Attention module + as in XCiT introduced by `XCiT: Cross-Covariance Image Transformers + `_ + + In Cross-Covariance Attention (XCA), the channels are updated using a + weighted sum. The weights are obtained from the (softmax normalized) + Cross-covariance matrix :math:`(Q^T \cdot K \in d_h \times d_h)` + + Args: + dim (int): The feature dimension. + num_heads (int): Parallel attention heads. Defaults to 8. + qkv_bias (bool): enable bias for qkv if True. Defaults to False. + attn_drop (float): The drop out rate for attention output weights. + Defaults to 0. + proj_drop (float): The drop out rate for linear output weights. + Defaults to 0. + init_cfg (dict | list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + dim: int, + num_heads: int = 8, + qkv_bias: bool = False, + attn_drop: float = 0., + proj_drop: float = 0., + init_cfg=None): + super(XCA, self).__init__(init_cfg=init_cfg) + self.num_heads = num_heads + self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1)) + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + B, N, C = x.shape + # (qkv, B, num_heads, channels per head, N) + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, + C // self.num_heads).permute(2, 0, 3, 4, 1) + q, k, v = qkv.unbind(0) + + # Paper section 3.2 l2-Normalization and temperature scaling + q = F.normalize(q, dim=-1) + k = F.normalize(k, dim=-1) + attn = (q @ k.transpose(-2, -1)) * self.temperature + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + # (B, num_heads, C', N) -> (B, N, num_heads, C') -> (B, N C) + x = (attn @ v).permute(0, 3, 1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class XCABlock(BaseModule): + """Transformer block using XCA. + + Args: + dim (int): The feature dimension. + num_heads (int): Parallel attention heads. + mlp_ratio (float): The hidden dimension ratio for FFNs. + Defaults to 4. + qkv_bias (bool): enable bias for qkv if True. Defaults to False. + drop (float): Probability of an element to be zeroed + after the feed forward layer. Defaults to 0. + attn_drop (float): The drop out rate for attention output weights. + Defaults to 0. + drop_path (float): Stochastic depth rate. Defaults to 0. + layer_scale_init_value (float): The initial value for layer scale. + Defaults to 1. + bn_norm_cfg (dict): Config dict for batchnorm in LPI and + ConvPatchEmbed. Defaults to ``dict(type='BN')``. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN', eps=1e-6)``. + act_cfg (dict): Config dict for activation layer. + Defaults to ``dict(type='GELU')``. + init_cfg (dict | list[dict], optional): Initialization config dict. + """ + + def __init__(self, + dim: int, + num_heads: int, + mlp_ratio: float = 4., + qkv_bias: bool = False, + drop: float = 0., + attn_drop: float = 0., + drop_path: float = 0., + layer_scale_init_value: float = 1., + bn_norm_cfg=dict(type='BN'), + norm_cfg=dict(type='LN', eps=1e-6), + act_cfg=dict(type='GELU'), + init_cfg=None): + super(XCABlock, self).__init__(init_cfg=init_cfg) + + self.norm1 = build_norm_layer(norm_cfg, dim) + self.attn = XCA( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=drop, + ) + self.drop_path = DropPath( + drop_path) if drop_path > 0. else nn.Identity() + + self.norm3 = build_norm_layer(norm_cfg, dim) + self.local_mp = LPI( + in_features=dim, + norm_cfg=bn_norm_cfg, + act_cfg=act_cfg, + ) + + self.norm2 = build_norm_layer(norm_cfg, dim) + self.ffn = FFN( + embed_dims=dim, + feedforward_channels=int(dim * mlp_ratio), + act_cfg=act_cfg, + ffn_drop=drop, + ) + + self.gamma1 = nn.Parameter(layer_scale_init_value * torch.ones(dim)) + self.gamma3 = nn.Parameter(layer_scale_init_value * torch.ones(dim)) + self.gamma2 = nn.Parameter(layer_scale_init_value * torch.ones(dim)) + + def forward(self, x, H: int, W: int): + x = x + self.drop_path(self.gamma1 * self.attn(self.norm1(x))) + # NOTE official code has 3 then 2, so keeping it the same to be + # consistent with loaded weights See + # https://github.com/rwightman/pytorch-image-models/pull/747#issuecomment-877795721 # noqa: E501 + x = x + self.drop_path( + self.gamma3 * self.local_mp(self.norm3(x), H, W)) + x = x + self.drop_path( + self.gamma2 * self.ffn(self.norm2(x), identity=0)) + return x + + +@MODELS.register_module() +class XCiT(BaseBackbone): + """XCiT backbone. + + A PyTorch implementation of XCiT backbone introduced by: + `XCiT: Cross-Covariance Image Transformers + `_ + + Args: + img_size (int, tuple): Input image size. Defaults to 224. + patch_size (int): Patch size. Defaults to 16. + in_channels (int): Number of input channels. Defaults to 3. + embed_dims (int): Embedding dimension. Defaults to 768. + depth (int): depth of vision transformer. Defaults to 12. + cls_attn_layers (int): Depth of Class attention layers. + Defaults to 2. + num_heads (int): Number of attention heads. Defaults to 12. + mlp_ratio (int): Ratio of mlp hidden dim to embedding dim. + Defaults to 4. + qkv_bias (bool): enable bias for qkv if True. Defaults to True. + drop_rate (float): Probability of an element to be zeroed + after the feed forward layer. Defaults to 0. + attn_drop_rate (float): The drop out rate for attention output weights. + Defaults to 0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0. + use_pos_embed (bool): Whether to use positional encoding. + Defaults to True. + layer_scale_init_value (float): The initial value for layer scale. + Defaults to 1. + tokens_norm (bool): Whether to normalize all tokens or just the + cls_token in the CA. Defaults to False. + out_indices (Sequence[int]): Output from which layers. + Defaults to (-1, ). + frozen_stages (int): Layers to be frozen (all param fixed), and 0 + means to freeze the stem stage. Defaults to -1, which means + not freeze any parameters. + bn_norm_cfg (dict): Config dict for the batch norm layers in LPI and + ConvPatchEmbed. Defaults to ``dict(type='BN')``. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN', eps=1e-6)``. + act_cfg (dict): Config dict for activation layer. + Defaults to ``dict(type='GELU')``. + init_cfg (dict | list[dict], optional): Initialization config dict. + """ + + def __init__(self, + img_size: Union[int, tuple] = 224, + patch_size: int = 16, + in_channels: int = 3, + embed_dims: int = 768, + depth: int = 12, + cls_attn_layers: int = 2, + num_heads: int = 12, + mlp_ratio: float = 4., + qkv_bias: bool = True, + drop_rate: float = 0., + attn_drop_rate: float = 0., + drop_path_rate: float = 0., + use_pos_embed: bool = True, + layer_scale_init_value: float = 1., + tokens_norm: bool = False, + out_type: str = 'cls_token', + out_indices: Sequence[int] = (-1, ), + final_norm: bool = True, + frozen_stages: int = -1, + bn_norm_cfg=dict(type='BN'), + norm_cfg=dict(type='LN', eps=1e-6), + act_cfg=dict(type='GELU'), + init_cfg=dict(type='TruncNormal', layer='Linear')): + super(XCiT, self).__init__(init_cfg=init_cfg) + + img_size = to_2tuple(img_size) + if (img_size[0] % patch_size != 0) or (img_size[1] % patch_size != 0): + raise ValueError(f'`patch_size` ({patch_size}) should divide ' + f'the image shape ({img_size}) evenly.') + + self.embed_dims = embed_dims + + assert out_type in ('raw', 'featmap', 'avg_featmap', 'cls_token') + self.out_type = out_type + + self.patch_embed = ConvPatchEmbed( + img_size=img_size, + patch_size=patch_size, + in_channels=in_channels, + embed_dims=embed_dims, + norm_cfg=bn_norm_cfg, + act_cfg=act_cfg, + ) + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dims)) + self.use_pos_embed = use_pos_embed + if use_pos_embed: + self.pos_embed = PositionalEncodingFourier(dim=embed_dims) + self.pos_drop = nn.Dropout(p=drop_rate) + + self.xca_layers = nn.ModuleList() + self.ca_layers = nn.ModuleList() + self.num_layers = depth + cls_attn_layers + + for _ in range(depth): + self.xca_layers.append( + XCABlock( + dim=embed_dims, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=drop_path_rate, + bn_norm_cfg=bn_norm_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + layer_scale_init_value=layer_scale_init_value, + )) + + for _ in range(cls_attn_layers): + self.ca_layers.append( + ClassAttentionBlock( + dim=embed_dims, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + drop=drop_rate, + attn_drop=attn_drop_rate, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + layer_scale_init_value=layer_scale_init_value, + tokens_norm=tokens_norm, + )) + + if final_norm: + self.norm = build_norm_layer(norm_cfg, embed_dims) + + # Transform out_indices + if isinstance(out_indices, int): + out_indices = [out_indices] + assert isinstance(out_indices, Sequence), \ + f'"out_indices" must by a sequence or int, ' \ + f'get {type(out_indices)} instead.' + out_indices = list(out_indices) + for i, index in enumerate(out_indices): + if index < 0: + out_indices[i] = self.num_layers + index + assert 0 <= out_indices[i] <= self.num_layers, \ + f'Invalid out_indices {index}.' + self.out_indices = out_indices + + if frozen_stages > self.num_layers + 1: + raise ValueError('frozen_stages must be less than ' + f'{self.num_layers} but get {frozen_stages}') + self.frozen_stages = frozen_stages + + def init_weights(self): + super().init_weights() + + if self.init_cfg is not None and self.init_cfg['type'] == 'Pretrained': + return + + trunc_normal_(self.cls_token, std=.02) + + def _freeze_stages(self): + if self.frozen_stages < 0: + return + + # freeze position embedding + if self.use_pos_embed: + self.pos_embed.eval() + for param in self.pos_embed.parameters(): + param.requires_grad = False + # freeze patch embedding + self.patch_embed.eval() + for param in self.patch_embed.parameters(): + param.requires_grad = False + # set dropout to eval model + self.pos_drop.eval() + # freeze cls_token, only use in self.Clslayers + if self.frozen_stages > len(self.xca_layers): + self.cls_token.requires_grad = False + # freeze layers + for i in range(1, self.frozen_stages): + if i <= len(self.xca_layers): + m = self.xca_layers[i - 1] + else: + m = self.ca_layers[i - len(self.xca_layers) - 1] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + # freeze the last layer norm if all_stages are frozen + if self.frozen_stages == len(self.xca_layers) + len(self.ca_layers): + self.norm.eval() + for param in self.norm.parameters(): + param.requires_grad = False + + def forward(self, x): + outs = [] + B = x.shape[0] + # x is (B, N, C). (Hp, Hw) is the patch resolution + x, (Hp, Wp) = self.patch_embed(x) + + if self.use_pos_embed: + # (B, C, Hp, Wp) -> (B, C, N) -> (B, N, C) + pos_encoding = self.pos_embed(B, Hp, Wp) + x = x + pos_encoding.reshape(B, -1, x.size(1)).permute(0, 2, 1) + x = self.pos_drop(x) + + for i, layer in enumerate(self.xca_layers): + x = layer(x, Hp, Wp) + if i in self.out_indices: + outs.append(self._format_output(x, (Hp, Wp), False)) + + x = torch.cat((self.cls_token.expand(B, -1, -1), x), dim=1) + + for i, layer in enumerate(self.ca_layers): + x = layer(x) + if i == len(self.ca_layers) - 1: + x = self.norm(x) + if i + len(self.xca_layers) in self.out_indices: + outs.append(self._format_output(x, (Hp, Wp), True)) + + return tuple(outs) + + def _format_output(self, x, hw, with_cls_token: bool): + if self.out_type == 'raw': + return x + if self.out_type == 'cls_token': + if not with_cls_token: + raise ValueError( + 'Cannot output cls_token since there is no cls_token.') + return x[:, 0] + + patch_token = x[:, 1:] if with_cls_token else x + if self.out_type == 'featmap': + B = x.size(0) + # (B, N, C) -> (B, H, W, C) -> (B, C, H, W) + return patch_token.reshape(B, *hw, -1).permute(0, 3, 1, 2) + if self.out_type == 'avg_featmap': + return patch_token.mean(dim=1) + + def train(self, mode=True): + super().train(mode) + self._freeze_stages() diff --git a/mmpretrain/models/builder.py b/mmpretrain/models/builder.py new file mode 100644 index 0000000..2ea4e25 --- /dev/null +++ b/mmpretrain/models/builder.py @@ -0,0 +1,39 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmpretrain.registry import MODELS + +BACKBONES = MODELS +NECKS = MODELS +HEADS = MODELS +LOSSES = MODELS +CLASSIFIERS = MODELS +RETRIEVER = MODELS + + +def build_backbone(cfg): + """Build backbone.""" + return BACKBONES.build(cfg) + + +def build_neck(cfg): + """Build neck.""" + return NECKS.build(cfg) + + +def build_head(cfg): + """Build head.""" + return HEADS.build(cfg) + + +def build_loss(cfg): + """Build loss.""" + return LOSSES.build(cfg) + + +def build_classifier(cfg): + """Build classifier.""" + return CLASSIFIERS.build(cfg) + + +def build_retriever(cfg): + """Build retriever.""" + return RETRIEVER.build(cfg) diff --git a/mmpretrain/models/classifiers/__init__.py b/mmpretrain/models/classifiers/__init__.py new file mode 100644 index 0000000..5fa276f --- /dev/null +++ b/mmpretrain/models/classifiers/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base import BaseClassifier +from .hugging_face import HuggingFaceClassifier +from .image import ImageClassifier +from .timm import TimmClassifier + +__all__ = [ + 'BaseClassifier', 'ImageClassifier', 'TimmClassifier', + 'HuggingFaceClassifier' +] diff --git a/mmpretrain/models/classifiers/__pycache__/__init__.cpython-310.pyc b/mmpretrain/models/classifiers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0126f3d9238e6db86e1df245f18a05660de521fd GIT binary patch literal 381 zcmY*VO-chX6n>dWI#t`c>LCWf*-H^o5Z$<`t87Br%)~%4DamxtbYix0h1&@|JJMiPL+- zRK^^1W0#)jZ~~H`jPOt&1-yx4ax%=Ge)U-w2jqf4R8r1u0S{3!H;0Nbt~FzC;7*cQ zg0G9lSvxew>|8rIuKE$RqSe0H{_o=(##FBc#?E)oVY)I_>~(vYmiOteXXx(~KR5Pb Ai~s-t literal 0 HcmV?d00001 diff --git a/mmpretrain/models/classifiers/__pycache__/base.cpython-310.pyc b/mmpretrain/models/classifiers/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1416cff253ddc70911f80fc8de7fbc458c2c628 GIT binary patch literal 5047 zcma)AOK%&=5uO>2D3YRNS$=0Xn}#-tMMI`HmpurM5wGn84q|N*FR%bN#&9^@B*&Tu z-92MPf^u04@Lo1RZutR00AKPm@+0P&Q-Iv|7QlAC>UnS|$r}&MR!?_Nb@f+OUsXF^ zUF{fn{`&f#a zQ)f107)uOl}M;ZVno4t8MWsydi9{bPb2d|BmgK#@OUb zym`DjXo_X1-x^!I#oNdB3v!T zA>F@QhCxHjqcJv)W!}@Xs^94_Ja^?p?dTr8`H`PE-gv^cxbG#~EG|Ip?XZWK^4-8c zbO~qX#nITGFk05HJ|_|P#n?@QM6o1hyKyAuvmD4fGbv^=_JmSU=kzXLOrdTrn)IEo z_M-{3o2ALZW+bxD9**@++ssecz8?fE#i2J#WB<0c<0f;V*uhL}GbO}=s2z>c&M<^$ zOnm4alGpUlk}}j-hOrck!zcX!VA9dc=QIR*`fid)f0QQToq)e5%x3_(6OW$0pYGV> zZnU7T_#*;}kx;_>W?>!z91x@du-c#c-V~GCZbId2$1)BVG@q zJVLO3t2qb(VeR%TNkk@B(10kGR5}wKWjOQy~fi)aQdxPV=GXDGN zEg8qjtuO>Q0LqURy5uD4mR_C5)vXaiu0K1FtJvp5{1nk;r(v6&w})5Ao$gg3+Ap=p znvk8!$TP9Fe5g;yyocX+7ttif5yj6US^EZhkohri3~LPbHj9;p;Ao zgtynvZ2AL0vSp0Aa71|t0$-VO4S(eMs@Xse{Mr*Ir>@_rJD@IGQW+Ec+OxD!Qp|Y;_E#-!@ zK&Z^72=~aD%7^-dIw7v;g%iS-`{j~h5HqC_*_bor4tbzSz@?Tvxr`2w6dT3KlxbI? zAaWzl5c8nPDq2WZp1+uy#%aKd5P=1yz!daSJ8U!8esh~`!u5!0L0=HI8N^C$?##vA ztd{PuUl!_+FYHg_K)?Z#{aEhlPjfGWIs^;Dza#||EFM>?v2v2k8>a}~=zz^)mDG3v zkNNerz4sez?p$;n~`drkc3qRy1L6KNbZ zSr&d&v+Hi02-pymmrQ+CnwZBzMb{I=W5l&RfGnbQrhW;Uz zr88^>t?Kx2N5O^i<>|t_U!JVF^KH`JTR8?EE}8`>^H;q!T@-Q*%C^xAZaFLVXEDey<4` zF$8Pw`EXlo|J)5y(O(d9J&&d9HPG`fYe}Aj(*aAzNBx1qVJ0H37-A<^nzUUw_f4v6 z$^#WtFdS}|Qryx~h+_1MI4%YkjjDauOr$A_0sWTj$>N7Lm){RLW2jIYg} z=|0Bb11f&N17qPL0fPpptkHq>9^p{47}-~)qnkSa5}Sa=q#$;LnceXWnH?0d`!iVK?1%DuAERoxFv7v2z!m`~0 z0*8+NJD4jsZ|CrX6Xvb2%eo;h8a@g+Tnp5K#TyVoKdSuHPtl-sG82?es{Ly!q0Fz% zm&QvI*DH%Rj$23Oed7hCdfTO9=4_vEab?IamKBy&v{iv~+8}iu*;aWs{i@EtPpBq8 zExi&-mY}DI9nc>0T$C9oNEGp2#N2d3j-_A5LelMb9%A(AjT?KYQgGE{pC^=j_QU~- z0@NLHlG2wt{hch*>tNT}=}dwn3PGMP!>A(bdKk@hIy@TIrK=%BzSO5d0E5p?L5*vh z&Q z&=$1cN5;dhH&A!bDL=+$9Un(ji3{^eQn}a2H~ScUNp7Aq=1-`n_g-B9 z`3c4q{U1lSYnk?2)bt;IpAxu1p&;LtbcyL*p(|5_vd?j{j^iLx;%10`*KwYuZcxlD zI}VRM$B`(Qjm##}*Y}{TMTL%#$w6k1;y93WbCVxXLt&aN>2S!JIcrHOy(KA{HIR%Y zqgTRPA3C!M64|5a)#9S&i6FqPb5=P=s|f;q-J@He`V>veuuW?Nhkw&Ft+!2U?Tyu3 zH!YifRs-X&G5@w>S#PXH_x1XZ38;;?S_W9Hjhfxn1m9cD8t%xGWXt>nP5iF{+%H$1 zPOo*9KSM8Dt5PTx7=c`-!3)(O4wHI1`RFt2-D*eChtk$;-OgdZL&;2a(X5*n&6d?Q JyXMu#{{V=fh#CL@ literal 0 HcmV?d00001 diff --git a/mmpretrain/models/classifiers/__pycache__/hugging_face.cpython-310.pyc b/mmpretrain/models/classifiers/__pycache__/hugging_face.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..480dc4b5614fd30720e3b942db7c83d859cd4e1f GIT binary patch literal 8178 zcmd5>OKcoRdhXZs42Q#q_>d)ME%(Z6n8?~9#Le52oMU{T)aSlAPBN+fDD2FzT_U{o@*U>Z-q9|Kt1rzb2lWt7>@u^4I^^{`~yJ<_>a<|f~Ml2zb&b!Rtek4Ur?DAUeWtN0Up_XE_2w$mA87U2Utn+Ac3sbA z?+L~QXSV`Bts9y3VUVO*Zg@V1IP)*6j};pjN%*rQQRt+Y33q%7Q=c2ZH}X z$4vO1|B;vQo1vE^K|A2Wrv-{nE&oXV4Hd36b*429rZ-J)G2>XDG1(iu#4J`~_7?^Y zV&cVdeqLfT+~%c=bc8aS&FeMwzg_u>*4Eh^n}4=|!&TV#*djakOmEJxC0^Z|Z5wQv ztvs`uHTBI_`5d3WsI@J2p1p$h1$Kex3hfu!t7u=GDYG|6V{A1_j{6PvooA)yIrbX6 z^h|3m@n!ZpbDn8feFgRJvdgl*`V-C4n&-jlx3bj_`khV?b?$h6fkED%%xK!(uGis? zKjK>Av}55E=$&rNc-W{`9Xy}x28kkeJm2TNG;zEzbb4_TC~-Mn#GFU`U_Tbj@kFQJ zT=F}h#yDo zpfe=TCo2rOMVfDun$pVmMJE}rucnKD-fx6pd=ykh<7M%tqsAkrzzLfgP*H+-Sz@gmIEMYm5MQ)43^4icEBf0?HzA#3Y&6 z?&Nwc8o!nv^!SapZwk=fj#3f#4n9Qn?R7cY-RD7PH@$JKakbuRjn{{WgUI#U9kq#d zCmy1`5|-=-UO5R*Q~Ym85h}#-c0xX}5nz4DfJ1_BDs>=k(DRPt^*aiG*2t-RMjL*Z z>%)$h`ny8{i2#mbs+0>9W4v26+efj0HS*L8LO`H0&UV}fUIdY|36V0u!$1rzaN@lU0P0%`_TkfUjj-c`vtknf$QZ|Oyxk3aAWYm&Hv~^2p1$*gtDqQK8)q_9 zs84KBf&2|;dNXU-z$B&A$OM~`=WHNoU~AXbo$s$Z@4ow68HDwe2NM&X+%=%GAf4zGoG0dMjzCI2O!M@xh1S$#*f!R$$8P&<`W>CC`P z5kwyw$){=UWNu)d>PP0l#0>L;63T^vwznXBl;xAfQ)Dy)?c^N3mQHn9Uq*d}>P$nx zT-g?M?88e14L<`tvdYlEWb@LUxWn(dAsMMyDzg(Kt5N`}d~Npnw*;g0)vOBF$5f<{ zm6Cpsi>%_hQf#i6#qPv;Dqca6ElfqzSxG8qb5^!1EXt|@+yz3x$mf|6MaZgR)(^QL zA!j9NCAm&vFm;epG+*loj|GU~-Lanmin}}_Kb&231@FMMa^Yg?l)Gft?rt1El3m^S z2`_fQVB5jd?DBXNg;7%!YWR8AW79ntTc69Ugoo{{Dv#(=SeI4DKp;p@&r<5)07BBt zR)|_)p{=;|m7dMG3b|bOEA8v84H3ub##j#~s+3J68)KYHhB2=zgWleOcon<3gE!ek zQJJ;$nz3k94BN1D`}v}2>6UKj#&ZK-hF(Pr)oMn~sOq+E8@67h5&Ge!GeNR3REW$@ zc?4hVlcf$Hs?&QDZ}K6EfywkohNwdKwSj)B?;#IIn}N0!v>5|^085r_CfY8c%^bF+ z7PAKC&ooy05;^=I>veluT*aJ{-&q*~Aju!j-;JKY!7+)B&gDbna-&`n-vMD}hjAxJ z6D6Q))KdGnAEiN;-xeYkbxXV@$rPeyH3?dY@=BRS(iRO`P!=0RT88t6XJn7@k<6p0 z=rw)#E*gigoz1j-lz0y#d;`Vsp*p>iVHH$TfSWw5v3LupD)b#*;?;KHlVeD8`y_WB~yUV7AdVfWIc;?@?(`0vhVx-vPpI zdZFJBDH|p~!*NYO$*||BI2y@A?nNnqCea}drL!i5pm#D&qo_I`v!)#5CsK#=_*`~5 zO05z4rMr%FrXv?m!|7SuUUzP;JB>!8PJwubM_hQgu28+IEUJ>DQh{68qPj>LCInuD zpjssYa{qCAVh);#KjDa>M+Yk|@jnjIh5&Dvq zl60qZQ5x+ZfmEtsZD9q-b@=MpDo7KE-vRA}>Xp$z>L*pNjS2(UFyo?zP~lvvo#?o2 zvz{RsxP^NxIBzSeqSe4%+(c_?9&1obZO;NQS-4w6*l4B;+_(Qq-VK*0Fg@dpP&Gc~m;GkIIA6Noio8 z>L>P~Ou5A4R|kNGj&@{#o~QwC4;Mc-B&~5$PAkV+afBRmjmFgN!|;Bbr0?XnEb{gM z2_^7~P=A+ute}#2BR}jjx?7ob{tCoAWZXEtmbi)1!khWWdhc^LGvc`QfQLI~NV9~pnk#WASY z;}H|Y?s{bkG?a|h$fPL+uJ)W{v$SDXZ5T)-=PoHsn}QVbEdPKvArY$pAkoJU9lM`=}N zV{?=~%}ama;=0Ep?qu^3-!J4)nf@r#pJZif#a+>~&Afrt{t0hFzaiR4F3uy#fZc4o zgew9!hi7GRKDqSaBj>`0w4__8kQKe69hFl8VX){}2a{HhDw09SG=8RiNp}JjuxDnV z4JsEwH<*FbM*W8NxGM8-b6b3lxx{BE>K1M|;QlfTXG+A6sokaas%TN&qhbd|W+P;R zm&$700KWtxSET{jLOUZjfN}lTacKsDf>nV$M-K^@mDl$a9=DLsjUYPfkUHW~* zbpjVTe`7oLDCk89tE6>GCL{-BdKou-`LGNk~DW2LGahPuQM2kJ6pwa^1y z>id+%C6bBRVxdpo?n{Y44)_61;OA6a#*A)6lq}_u3LKQYBo1bk>xyEFg3aUt3QArV T0bDhG33-1FUp3t}&+GpMWT6T} literal 0 HcmV?d00001 diff --git a/mmpretrain/models/classifiers/__pycache__/image.cpython-310.pyc b/mmpretrain/models/classifiers/__pycache__/image.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f74b08b8868a78e34297d5407a231a37f6350cf0 GIT binary patch literal 10161 zcmeHNTaO#Zm1Z}aO%BBwNtP_zStlw_;>=LXp*)fzdk1T864}Wr8;^}hL4q;7%^r4< z)0$>CQ(cra1dUxp0`io+CO{BC0`lUYuzz450tCo&zXn(!`3FhXR=!i!&FJChHWK^Y_uhWzn|JWkeA|`o z9d|Si_-_nqZzOA9bz^=jaO2n?_+0dfO!0RveTn}E54^Hxc$GcVGxn-0X8zB6*{gEP zvv`eLFM;XwZsTs_y`H`|())_O57vHA85myEJM(yTuMTQ0@3Y=2Z`@n)*3hEK&%9JY z@3Y(<*xots{NvhQ%ln-7oOj_dO*?C;+Ibhbg;vjdmmXVtYb&OAd0wAWx~OTQ-3#7} zpmx@K3A9b`Gu}U-*SQ~5tjgYb2$LmeZ;#wzK`id)?Mg*`WBs|TVm62bizj0)?)x$K z=2d-{gwLcKA9kBfhTm7jFy2v5^tSKz5BH;xvrW(M%k~btd%*LLqsW^Cyvy!zKJP)j zc2Kd?8;!<-OX2!q7w)QMrN>}0Vq+2Yd5o!WS55@S zgNR0RIz?_3M<;SYWWfP@n-APcAY-Dvi#g^z9dOq>fhSd!^W+g*oMb~{bVEqi4f>P7 zm1T(p5it&avsqvbvU!}&Ee%+d7tM1wl762vdEiRs4{+y9@#V)Xj3kS>>@YVNMX|&! zU?VOMA}6ak>u+AwB=Gd=JW@{6V^=C`{9rTBXRF4Eq41f zWp_U)iv!RSKvu5cJ9L1RxgzO(=4fLMMqz>(Zh;!xA z)ZUE*XC9ZXAE<@Fs$h>!pg?|D63I!ssXLMA+D&;XY$(kuHp_m-=pcw(`QehqiT2%n zKk(%diw2C}=i&%^8xvVzARVJLtuDJgP`jqX`Ug=IbLt-kN6Xf!MDFBW=K=8@XiU1# z0ayxjj`7`(!ht_jh?cje<@Lkyq^wUwa?U9O#V}&=nD@zmcuXFRd58IuJ@Bzhut+6F z*KY+e8KL(J9IZ1KaDi^=o>d6D(_7NANBouOjS4HiOiEfW7-r$i6O@@YdOCY(6`rJs z0=50AlkW!pR4r0j*_KOuH)&*WoLB^jiA5NaH1j>1tju>-a{k!rB#n&IexqQ9>Dj_n z#TCPcsLU*{GBHF`!nwe+`9t%g`ph1$`jwAtY0PX{d1Q)LXZ91*t9sT`bWzW>S&g1$ z^@%mJpBf+8;aSzXcF%rf#y4c$vt?suKQew=`I-61_`V@mWOD{u_s+~f->d(SdK)uC z>K?ZhB?FXbVpt7mEEKXHB!>#9=SfGLrF8oL~$PCN-iu>q-0HP zNhbrJ>DP{}1d9)%N#GHLxU5I8)FWN*g<yF?X~TuS$rjRHlM;P$G0lJZQz@geUmlMnpJ-aC;4aa-oJ0O zZTG)09C?6~P$MX;A#4WumM)bKT;XL74pffqNAiHptuJ{;lE*T2GzssBmbR0b4r{9n zOcNYcm~p(r*0o&LJ8T^~K)L`N2C;SWs@4l;dTZWvhkY$&hcNTOK@@Nna`_+toECS4O$PS>=sOz0<$M3=ZX{s(wesh{ z3sy2-b6$vPzM;1zl zNd*@GgKN>urBE2zWh6nOKyBbgL*$Y0$bf=AiEHS?lozC+)ltcfG?IjXuq-aDV>xJ` zl}-C>N|H|Zu99==E2*kf^h)kndv{zmzC>bkx5I9AShw44lQIo?$b}o^BBnOQ9g@0b zxkQe;Jjwx9CbBR+qN6G0ZcsKwc3W=5ShMdkaB4-rRb%RGWA~MQ+yE+nOM{zsi!yiV;w< zLg3(#cchG!Kpv$c{*j2azBLJ@Khox|T@$ZR%?1^lRJ5s0jiRVne1Sfw;#K-uPvP=A zc!?=<(YQ?Zq5*3{*Ew_g!lIo>d-SVlo&5;@>AHn09;1M7W8AOAt#2T8Gcy6+RSn*A zlTbwq3sC>UtP0pC^Ke<22kqUMUJZ4xrgc@(m1IJY+JVn?4K(Xy2nn?cA|bc{lps6j@Da|%P$M+Cr`KeV8249dP4 zPDW0;y*rFv`Uijoq$fJ;ia#1UicouTXTuK}&NuFN6jtj`z)aW;1Re@H)0n=?Zt0+i zLjHn*qHRh)0CeVjuj`yjN&}P1Dl&W3O>u&Zj@pCACPir54<-u2N62XSv{4mzd9tLu zT~i*&?LRhtA?tE%=zO3hO#DehWB z=X8+^z^pR>WLe0hQg%THaP0Qn;B`nArD-qG6|(PY;gAAK)kN8A663siiAcTxO!rxo z(mM~ena`7fB)9D@`)ZaG)P&0><*m2gqPZZk18)MRzceOQswCJ5GaX{tvZl9GNPkjW zu&SKMG>w>bQlW?T>s3N{vHRjXqYTlTOpnziJ`m~f%?%w<$5)Y3go~Kow%>5xc({N4 z8se4hLpc8%Z9V!jqVv3F7ZO1S7$CS+Z?etTI&8bcu65Y;He;V>?`fP+`(!IdrVA>r zbD3=9j^ksb$yW$KXK|vNm`M9rYm7~Us!rXa{Yrf>0Ug-{JJ-R{wHvBv!=WWydvhZL zosHvku^mZ;u=VM-;B&w^Q@d#_kl5QqeOJS0i978B-tnh=^8+w&6N7*0CS4zvM_XvO zeeDf!sjkx-UL$JTpt^nI^oC#B##+3N|7cd?@LHGc0t?fvQm*^EM*oF0y0k*H3jZ$+ zCv^PtX!q$l2(3Q70krw_2GHU~{k`60?+d=A4Qi^zY%_~#b6c94$~?QF&J7ke!{4yN z<8TYnv;%7q#=xx+7gz85a%&$4hg;+Ao6cBn-MsN||K<$^>s!FV)1duN!rx`u`hSMw z+&C{$*+r}+(?Z_Gf$w=dM07uj39Pddq3f@`eyzmZ0?bl7;JpB%Kk2>ONK3;`K( zbqLZZE7vyM02%aK*^y$Fu~7nc7vrG6!30g35z5mtDxrfYuB5Q!Vd>EG?5;=%sEt zWdJ@<=aJe`|8NIRF?|F$xJXzc%Aza)Eqxy77u7{a^;fB0Me?ek za9*VZ5r93Ur?(#9uSF=4h&(iBpDg^Dz=q%mJcPZ=m0fGx0)P@opnJ69Ojly3dJ8~atZu8hX zIXmRiQ9T?rk?Ben>~yEAL@j$|(<>~qHt8QHu--!F9%)J#J?LU_m03~eSacqgT)oQ@d(@Vq50_+BNeJwr&32uCM*h?7-H3VxdJ7EzD~3lV-cIXXD*8n$OJV ztWUE#8+Dh?tHZ|j8V=Ns#yHo|6QDVYe?gHnH0*_;_zb;XD5RhD;iEXxr<>0gYVyRi x5=CN>Qz`01;OzX!QVUxRUy#AvOeBqe)-B!wjO(=TIUFM508t$y+SN&vP5eb)Fdv1|SHM5Giq`^^fh9y;(w`AZZgjF(b*@NV|^gHFsmll@xa6;c&VE zj5wHqdIqA10xzMe?8~~6ODZX6tH`PxvN`M@kwXso6Z)EqeRGv>a@NZCx&aV?D3_0c zYFhpK$LsgL_q}Giv{bS1`{ytJtM%`1Sl0hgVft4@;VWg#{qMwf*J(O7 zo(nPv02n*JNBBM?px}Xo8?j6LbvQ#w$-gP7j=yrSG!BiC7mzE%iWdciq3m+ zt$VI{4*3!<$E)47=9+DZrQ`D_*7q#Fz$?!zUJ+|Yg(JIpf!p`3`r@y!H*33YtHlo` z7gF%MQJB{q<$e%lxhj9y%cC?2Vs!9+_~G66KDduui-6QMY?4m*9(%W!>urZNb8curk(4CF>pJ zsSG<;BWk_cO}U6Owvl6ao#|4eQepT#=tP+r%z`i!y*y(<9J5}UMdsF6kZ`sq4)#;Y zSs>f}u1L@+ZLz!~SgW6e)KAZ~L(@0RT8&VOAQ!$av74Pd?`2!puIV1FAj=y;bZxrJ z?HymQ)X#*TE6Q1(GShOQZ2-u{Fk_VqqR1 zZ0eVIQ@)~v; zllkK`gF0}s(=B#~_TQ!}9cxh-(52$YS=L~8MJwpXxbxfy#;k&Mc4+)Ye$W%Q-o7Kz z_q`;SY46|zWZ&M@qy2pmwLAH(8;$GrovHP68AOR6w%TSBn=BoJ#1>RiFx?&{ZI+2V z$NvpoYcw?2jg5|sHv))=IrLVFEpsp9hN~>-w~fAT5Egw*8-9or(-ONu9(KkZ%z3*a z@1&RsnuohNm6nx&}ACs0%6q>=llQeIjnnX0!updETNT*qrc4vju z72Q-Gu>DRHqcu+VX(&(k6K%(*^6m-2XYQQ650jYM8wYUvIA%|#p~y1qMeET^SE#L` zZNO;Kn--0dUO%4^1Wnms_gY#C=5#FUiIBjbGg7ck7z)#;8w14njxXPdGx8-LP1Z=| zWftHPF|5Jv3&FOuT5s)~(Q}9PH?tQ=X>!Z6a+tlMD2eh{C@=xb==6kPDOp9V#Y}^u zAdUuF-)0r;)!K07%HF=ggAXya`~%T=ETG>92m0jiJ=Jb_3Vm+hzRjX;FO>vS@FEid zQG*8Ic}360{dfCO%>6Nrh*R0pP_p-1K-39pJeYB3zzpp!{3PfCQl!Wdk$>wts>fC( z_^k)BFX}TJGA`y8n_qeZ%gy%GnwMb<`cw@G*S2wEll{pid*_`$)yZuCw4bB1o`1uP zoo(+)xR(v|YGj?DH?=J@|NUqnHa^AzH?Cj5{?H6nF8L2to5#zxVKl$~F7YN(UA0K) zJHpTH_v=OFQiWQWK&L8=U_zaH$tG2Kte>)mndLc3Rw-FS5)#d#AJNMNd@jBfeA#~> z8M@r+ThfcHLwD#NgLHD|1XOeApx%}jd12^0vE;>}gP!h9E4PoVN6xT_5*QbVBRCgP z(nCwFFQbCl;67*Tm!E@8)muTSaY8CI^!V?UslHSGK2a^2AaF`^lf z&+0TUsjsREd=NK2P!+R&Pe@hveJx+Vxv1%YR*_7uV=t@pqCet*(c6v86Sxrf=ogzhM$YJk5Uc!>!#g}~(NqNDwt4_@+JD%g(-V5YWuGpUK zIi6jiHhb{eERZzD*Q2~#Bydq7DfswDdHU|)%f63fSm5@aBNw1*7AfR?s&P=Wh8j{% zsK2f&pzdp^bH;T!!i-_z7ZxvmhIr`j?Yg%u{}?lBsi_jKf$nLrd@p$d)aM#X*yVw9 zxlwoJ>*J1c6FF7=Q9pq)iT9*TC7S8*$}_N|s@lRc1JueT6_a%uuahg(wFFNFH^BoG zvSlP?NT+O9^;dHS>vJ+1b(alv4IL!oALS?Vq1GHm_{iq=kt4}GZ8|?m38!~*muik| z$e=KifxE40qsjdif`C4Nd{80Er7eRE{sz1Q*#36TU`^V~h^IicFlVFg;N4+h*fF|N zN6n~r=4ahuhOsq(j-^hJ1$i!wSl_|6)!KHN-=heVD4^h4uo^F5^(0+=^m#E>M|;s* z-$Spgf}{#LI?y!t#o#M*a-8ljuc2>PKr;S$$dE99fG@j@WUOSsRtRqB4xN?*ge(w3 z9u|j1pqd?g0Vui?#9=>%t!pAJ5Y|!j6O4E($oj~eQG7<)0p3Sv&8A4^Y)nCfU^HgK zIH(8OI7WO}H!))`pjvr{X{R=EwKSZ58{3=g&L(R#8g(MRZIK8W#IsyxT3eZ+-gmJ@ z6Yq}+c#wd^8se?@pSE6FOFR+atdoP84Vd%~CPm~mN%MfQQ=<8ifL8Y~Y=%Hs-bV5dlp+76ZxYZ{)Yb;>kV9CifZ$rWq*W3xepbC?)Pycm!-r85 zFNb?YDEa5(dFDuo#aA7BMq7x9OmO}p3PX6mBl{3E z41N$K@5CVjr~#>qn?r;}yJmoM{Ty&M9!=TU|5NyRDN;QJqKP?=k!A!;Y!-;lh}OeyZkMP_|X!5CZTsWaX0Vsmt;0pMF1Kn|t z3U{rCOP>@Di-+D}X;?fi4!slm*c+CJygz<@2+M9;hr}PKHxTJz?Gs1$b&gAU`N*0c zp~u{yF?DYcf0SnV)zR~V4yzG?0d5GMJ0jpy8h9@W<36WHhb(R7-QcN4Cva@GvqQ)B zwI$n{$RHZ#3dZ^e2OSa!vrP1WdgUwGfg;^-s%saSZD9M8YMByI!6H19Q0sJ+o0k4?}@A?3)b(TeL?h8f2PfhVp&P z`~|-3t4NlMt2Vf_?bypNRvowG{&v-N?HZW1TNu1?7H~|d*9?!I(Gh~k+=AbL&p5VE zEZpZB?i074Lqo_9fgw7d0ab>@lL9ot1NAB0v>uln*If`_aHJtmPg8_*}Ehl1sD%?@_p0b~)64l}@d)`Xk#A^SE zPg6Y)G;bZ$8J4m!FAj-Ew6h|ukUa1BM|^Q&J9kkayLrtzED?_;*&NxB&V|FWmJGP` zFRag;VQE-~To8i{%a;g=7vN=)zhynH=uo$?E&mR4$-hNXckvhix20ShCX-F7|Cs74 zlDxirNXbu-C=YA~n60Ya2!BgS+I0F%t@IP|6z^*w#ipVnDCqFJ86vt03CW06?$AWL zl+3BrH&FNA__8XJa-n9Q*MWxn<>0M(nVlV8Iwnx6+uQZmD4eDzAKzCMpZ;P1s)l^k z_aFCzc+^tzeV&HCFGoDVLr|b;u}hQ02Q+=^6eezeP2~FSK=;Se6{8j<~d4 zs48hw6q-Onk|szxoLU-1vZ07$ta^I>x2cPE_H@zox{}du%9iVRS1SbvmPP-(=U&12 z(s7;t^@`4KJ+Jn>@@B1av0T1@sP%=j=KjvLofj3S^1I3f=X}L>N~^Y{8SCr%Le*lJ z;;#pqduGNSJiH6x4$Xn+@aC{(L*|o2l6^PV#>K|?k2686DyMn*ee_amr%`#sJT1rr z^EPo|ss-B94|M>i1*mFMZTe|j-v;gge+H2YH1EpfoxZu#s(jP*xD9Gy ZWyC)ySgiq^s`e_vdJ4I!wpUoU{}1!C5 bool: + """Whether the classifier has a neck.""" + return hasattr(self, 'neck') and self.neck is not None + + @property + def with_head(self) -> bool: + """Whether the classifier has a head.""" + return hasattr(self, 'head') and self.head is not None + + @abstractmethod + def forward(self, + inputs: torch.Tensor, + data_samples: Optional[List[BaseDataElement]] = None, + mode: str = 'tensor'): + """The unified entry for a forward process in both training and test. + + The method should accept three modes: "tensor", "predict" and "loss": + + - "tensor": Forward the whole network and return tensor or tuple of + tensor without any post-processing, same as a common nn.Module. + - "predict": Forward and return the predictions, which are fully + processed to a list of :obj:`BaseDataElement`. + - "loss": Forward and return a dict of losses according to the given + inputs and data samples. + + Note that this method doesn't handle neither back propagation nor + optimizer updating, which are done in the :meth:`train_step`. + + Args: + inputs (torch.Tensor): The input tensor with shape (N, C, ...) + in general. + data_samples (List[BaseDataElement], optional): The annotation + data of every samples. It's required if ``mode="loss"``. + Defaults to None. + mode (str): Return what kind of value. Defaults to 'tensor'. + + Returns: + The return type depends on ``mode``. + + - If ``mode="tensor"``, return a tensor or a tuple of tensor. + - If ``mode="predict"``, return a list of + :obj:`mmengine.BaseDataElement`. + - If ``mode="loss"``, return a dict of tensor. + """ + pass + + def extract_feat(self, inputs: torch.Tensor): + """Extract features from the input tensor with shape (N, C, ...). + + The sub-classes are recommended to implement this method to extract + features from backbone and neck. + + Args: + inputs (Tensor): A batch of inputs. The shape of it should be + ``(num_samples, num_channels, *img_shape)``. + """ + raise NotImplementedError + + def extract_feats(self, multi_inputs: Sequence[torch.Tensor], + **kwargs) -> list: + """Extract features from a sequence of input tensor. + + Args: + multi_inputs (Sequence[torch.Tensor]): A sequence of input + tensor. It can be used in augmented inference. + **kwargs: Other keyword arguments accepted by :meth:`extract_feat`. + + Returns: + list: Features of every input tensor. + """ + assert isinstance(multi_inputs, Sequence), \ + '`extract_feats` is used for a sequence of inputs tensor. If you '\ + 'want to extract on single inputs tensor, use `extract_feat`.' + return [self.extract_feat(inputs, **kwargs) for inputs in multi_inputs] diff --git a/mmpretrain/models/classifiers/hugging_face.py b/mmpretrain/models/classifiers/hugging_face.py new file mode 100644 index 0000000..26a8fda --- /dev/null +++ b/mmpretrain/models/classifiers/hugging_face.py @@ -0,0 +1,222 @@ +# Copyright (c) OpenMMLab. All right reserved. +import re +from collections import OrderedDict +from typing import List, Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample +from mmpretrain.utils import require +from .base import BaseClassifier + + +@MODELS.register_module() +class HuggingFaceClassifier(BaseClassifier): + """Image classifiers for HuggingFace model. + + This class accepts all positional and keyword arguments of the API + ``from_pretrained`` (when ``pretrained=True``) and ``from_config`` (when + ``pretrained=False``) of `transformers.AutoModelForImageClassification`_ + and use it to create a model from hugging-face. + + It can load checkpoints of hugging-face directly, and the saved checkpoints + also can be directly load by hugging-face. + + Please confirm that you have installed ``transfromers`` if you want to use it. + + .. _transformers.AutoModelForImageClassification: + https://huggingface.co/docs/transformers/main/en/model_doc/auto#transformers.AutoModelForImageClassification + + Args: + model_name (str): The name of the model to use in hugging-face. + pretrained (bool): Whether to load pretrained checkpoint from + hugging-face. Defaults to False. + *args: Other positional arguments of the method + `from_pretrained` or `from_config`. + loss (dict): Config of classification loss. Defaults to + ``dict(type='CrossEntropyLoss', loss_weight=1.0)``. + train_cfg (dict, optional): The training setting. The acceptable + fields are: + + - augments (List[dict]): The batch augmentation methods to use. + More details can be found in :mod:`mmpretrain.model.utils.augment`. + + Defaults to None. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + data_preprocessor (dict, optional): The config for preprocessing input + data. If None or no specified type, it will use + "ClsDataPreprocessor" as type. See :class:`ClsDataPreprocessor` for + more details. Defaults to None. + init_cfg (dict, optional): the config to control the initialization. + Defaults to None. + **kwargs: Other keyword arguments of the method + `from_pretrained` or `from_config`. + + Examples: + >>> import torch + >>> from mmpretrain.models import build_classifier + >>> cfg = dict(type='HuggingFaceClassifier', model_name='microsoft/resnet-50', pretrained=True) + >>> model = build_classifier(cfg) + >>> inputs = torch.rand(1, 3, 224, 224) + >>> out = model(inputs) + >>> print(out.shape) + torch.Size([1, 1000]) + """ # noqa: E501 + + @require('transformers') + def __init__(self, + model_name, + pretrained=False, + *model_args, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + train_cfg: Optional[dict] = None, + with_cp: bool = False, + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[dict] = None, + **kwargs): + if data_preprocessor is None: + data_preprocessor = {} + # The build process is in MMEngine, so we need to add scope here. + data_preprocessor.setdefault('type', 'mmpretrain.ClsDataPreprocessor') + + if train_cfg is not None and 'augments' in train_cfg: + # Set batch augmentations by `train_cfg` + data_preprocessor['batch_augments'] = train_cfg + + super().__init__( + init_cfg=init_cfg, data_preprocessor=data_preprocessor) + + from transformers import AutoConfig, AutoModelForImageClassification + if pretrained: + self.model = AutoModelForImageClassification.from_pretrained( + model_name, *model_args, **kwargs) + else: + config = AutoConfig.from_pretrained(model_name, *model_args, + **kwargs) + self.model = AutoModelForImageClassification.from_config(config) + + if not isinstance(loss, nn.Module): + loss = MODELS.build(loss) + self.loss_module = loss + + self.with_cp = with_cp + if self.with_cp: + self.model.gradient_checkpointing_enable() + + self._register_state_dict_hook(self._remove_state_dict_prefix) + self._register_load_state_dict_pre_hook(self._add_state_dict_prefix) + + def forward(self, inputs, data_samples=None, mode='tensor'): + if mode == 'tensor': + return self.model(inputs).logits + elif mode == 'loss': + return self.loss(inputs, data_samples) + elif mode == 'predict': + return self.predict(inputs, data_samples) + else: + raise RuntimeError(f'Invalid mode "{mode}".') + + def extract_feat(self, inputs: torch.Tensor): + raise NotImplementedError( + "The HuggingFaceClassifier doesn't support extract feature yet.") + + def loss(self, inputs: torch.Tensor, data_samples: List[DataSample], + **kwargs): + """Calculate losses from a batch of inputs and data samples. + + Args: + inputs (torch.Tensor): The input tensor with shape + (N, C, ...) in general. + data_samples (List[DataSample]): The annotation data of + every samples. + **kwargs: Other keyword arguments of the loss module. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + # The part can be traced by torch.fx + cls_score = self.model(inputs).logits + + # The part can not be traced by torch.fx + losses = self._get_loss(cls_score, data_samples, **kwargs) + return losses + + def _get_loss(self, cls_score: torch.Tensor, + data_samples: List[DataSample], **kwargs): + """Unpack data samples and compute loss.""" + # Unpack data samples and pack targets + if 'gt_score' in data_samples[0]: + # Batch augmentation may convert labels to one-hot format scores. + target = torch.stack([i.gt_score for i in data_samples]) + else: + target = torch.cat([i.gt_label for i in data_samples]) + + # compute loss + losses = dict() + loss = self.loss_module( + cls_score, target, avg_factor=cls_score.size(0), **kwargs) + losses['loss'] = loss + + return losses + + def predict(self, + inputs: torch.Tensor, + data_samples: Optional[List[DataSample]] = None): + """Predict results from a batch of inputs. + + Args: + inputs (torch.Tensor): The input tensor with shape + (N, C, ...) in general. + data_samples (List[DataSample], optional): The annotation + data of every samples. Defaults to None. + + Returns: + List[DataSample]: The prediction results. + """ + # The part can be traced by torch.fx + cls_score = self.model(inputs).logits + + # The part can not be traced by torch.fx + predictions = self._get_predictions(cls_score, data_samples) + return predictions + + def _get_predictions(self, cls_score, data_samples): + """Post-process the output of head. + + Including softmax and set ``pred_label`` of data samples. + """ + pred_scores = F.softmax(cls_score, dim=1) + pred_labels = pred_scores.argmax(dim=1, keepdim=True).detach() + + if data_samples is not None: + for data_sample, score, label in zip(data_samples, pred_scores, + pred_labels): + data_sample.set_pred_score(score).set_pred_label(label) + else: + data_samples = [] + for score, label in zip(pred_scores, pred_labels): + data_samples.append( + DataSample().set_pred_score(score).set_pred_label(label)) + + return data_samples + + @staticmethod + def _remove_state_dict_prefix(self, state_dict, prefix, local_metadata): + new_state_dict = OrderedDict() + for k, v in state_dict.items(): + new_key = re.sub(f'^{prefix}model.', prefix, k) + new_state_dict[new_key] = v + return new_state_dict + + @staticmethod + def _add_state_dict_prefix(state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + new_prefix = prefix + 'model.' + for k in list(state_dict.keys()): + new_key = re.sub(f'^{prefix}', new_prefix, k) + state_dict[new_key] = state_dict[k] + del state_dict[k] diff --git a/mmpretrain/models/classifiers/image.py b/mmpretrain/models/classifiers/image.py new file mode 100644 index 0000000..6d0edd7 --- /dev/null +++ b/mmpretrain/models/classifiers/image.py @@ -0,0 +1,265 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional + +import torch +import torch.nn as nn + +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample +from .base import BaseClassifier + + +@MODELS.register_module() +class ImageClassifier(BaseClassifier): + """Image classifiers for supervised classification task. + + Args: + backbone (dict): The backbone module. See + :mod:`mmpretrain.models.backbones`. + neck (dict, optional): The neck module to process features from + backbone. See :mod:`mmpretrain.models.necks`. Defaults to None. + head (dict, optional): The head module to do prediction and calculate + loss from processed features. See :mod:`mmpretrain.models.heads`. + Notice that if the head is not set, almost all methods cannot be + used except :meth:`extract_feat`. Defaults to None. + pretrained (str, optional): The pretrained checkpoint path, support + local path and remote path. Defaults to None. + train_cfg (dict, optional): The training setting. The acceptable + fields are: + + - augments (List[dict]): The batch augmentation methods to use. + More details can be found in + :mod:`mmpretrain.model.utils.augment`. + - probs (List[float], optional): The probability of every batch + augmentation methods. If None, choose evenly. Defaults to None. + + Defaults to None. + data_preprocessor (dict, optional): The config for preprocessing input + data. If None or no specified type, it will use + "ClsDataPreprocessor" as type. See :class:`ClsDataPreprocessor` for + more details. Defaults to None. + init_cfg (dict, optional): the config to control the initialization. + Defaults to None. + """ + + def __init__(self, + backbone: dict, + neck: Optional[dict] = None, + head: Optional[dict] = None, + pretrained: Optional[str] = None, + train_cfg: Optional[dict] = None, + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[dict] = None): + if pretrained is not None: + init_cfg = dict(type='Pretrained', checkpoint=pretrained) + + data_preprocessor = data_preprocessor or {} + + if isinstance(data_preprocessor, dict): + data_preprocessor.setdefault('type', 'ClsDataPreprocessor') + data_preprocessor.setdefault('batch_augments', train_cfg) + data_preprocessor = MODELS.build(data_preprocessor) + elif not isinstance(data_preprocessor, nn.Module): + raise TypeError('data_preprocessor should be a `dict` or ' + f'`nn.Module` instance, but got ' + f'{type(data_preprocessor)}') + + super(ImageClassifier, self).__init__( + init_cfg=init_cfg, data_preprocessor=data_preprocessor) + + if not isinstance(backbone, nn.Module): + backbone = MODELS.build(backbone) + if neck is not None and not isinstance(neck, nn.Module): + neck = MODELS.build(neck) + if head is not None and not isinstance(head, nn.Module): + head = MODELS.build(head) + + self.backbone = backbone + self.neck = neck + self.head = head + + # If the model needs to load pretrain weights from a third party, + # the key can be modified with this hook + if hasattr(self.backbone, '_checkpoint_filter'): + self._register_load_state_dict_pre_hook( + self.backbone._checkpoint_filter) + + def forward(self, + inputs: torch.Tensor, + data_samples: Optional[List[DataSample]] = None, + mode: str = 'tensor'): + """The unified entry for a forward process in both training and test. + + The method should accept three modes: "tensor", "predict" and "loss": + + - "tensor": Forward the whole network and return tensor(s) without any + post-processing, same as a common PyTorch Module. + - "predict": Forward and return the predictions, which are fully + processed to a list of :obj:`DataSample`. + - "loss": Forward and return a dict of losses according to the given + inputs and data samples. + + Args: + inputs (torch.Tensor): The input tensor with shape + (N, C, ...) in general. + data_samples (List[DataSample], optional): The annotation + data of every samples. It's required if ``mode="loss"``. + Defaults to None. + mode (str): Return what kind of value. Defaults to 'tensor'. + + Returns: + The return type depends on ``mode``. + + - If ``mode="tensor"``, return a tensor or a tuple of tensor. + - If ``mode="predict"``, return a list of + :obj:`mmpretrain.structures.DataSample`. + - If ``mode="loss"``, return a dict of tensor. + """ + if mode == 'tensor': + feats = self.extract_feat(inputs) + return self.head(feats) if self.with_head else feats + elif mode == 'loss': + return self.loss(inputs, data_samples) + elif mode == 'predict': + return self.predict(inputs, data_samples) + else: + raise RuntimeError(f'Invalid mode "{mode}".') + + def extract_feat(self, inputs, stage='neck'): + """Extract features from the input tensor with shape (N, C, ...). + + Args: + inputs (Tensor): A batch of inputs. The shape of it should be + ``(num_samples, num_channels, *img_shape)``. + stage (str): Which stage to output the feature. Choose from: + + - "backbone": The output of backbone network. Returns a tuple + including multiple stages features. + - "neck": The output of neck module. Returns a tuple including + multiple stages features. + - "pre_logits": The feature before the final classification + linear layer. Usually returns a tensor. + + Defaults to "neck". + + Returns: + tuple | Tensor: The output of specified stage. + The output depends on detailed implementation. In general, the + output of backbone and neck is a tuple and the output of + pre_logits is a tensor. + + Examples: + 1. Backbone output + + >>> import torch + >>> from mmengine import Config + >>> from mmpretrain.models import build_classifier + >>> + >>> cfg = Config.fromfile('configs/resnet/resnet18_8xb32_in1k.py').model + >>> cfg.backbone.out_indices = (0, 1, 2, 3) # Output multi-scale feature maps + >>> model = build_classifier(cfg) + >>> outs = model.extract_feat(torch.rand(1, 3, 224, 224), stage='backbone') + >>> for out in outs: + ... print(out.shape) + torch.Size([1, 64, 56, 56]) + torch.Size([1, 128, 28, 28]) + torch.Size([1, 256, 14, 14]) + torch.Size([1, 512, 7, 7]) + + 2. Neck output + + >>> import torch + >>> from mmengine import Config + >>> from mmpretrain.models import build_classifier + >>> + >>> cfg = Config.fromfile('configs/resnet/resnet18_8xb32_in1k.py').model + >>> cfg.backbone.out_indices = (0, 1, 2, 3) # Output multi-scale feature maps + >>> model = build_classifier(cfg) + >>> + >>> outs = model.extract_feat(torch.rand(1, 3, 224, 224), stage='neck') + >>> for out in outs: + ... print(out.shape) + torch.Size([1, 64]) + torch.Size([1, 128]) + torch.Size([1, 256]) + torch.Size([1, 512]) + + 3. Pre-logits output (without the final linear classifier head) + + >>> import torch + >>> from mmengine import Config + >>> from mmpretrain.models import build_classifier + >>> + >>> cfg = Config.fromfile('configs/vision_transformer/vit-base-p16_pt-64xb64_in1k-224.py').model + >>> model = build_classifier(cfg) + >>> + >>> out = model.extract_feat(torch.rand(1, 3, 224, 224), stage='pre_logits') + >>> print(out.shape) # The hidden dims in head is 3072 + torch.Size([1, 3072]) + """ # noqa: E501 + assert stage in ['backbone', 'neck', 'pre_logits'], \ + (f'Invalid output stage "{stage}", please choose from "backbone", ' + '"neck" and "pre_logits"') + + x = self.backbone(inputs) + + if stage == 'backbone': + return x + + if self.with_neck: + x = self.neck(x) + if stage == 'neck': + return x + + assert self.with_head and hasattr(self.head, 'pre_logits'), \ + "No head or the head doesn't implement `pre_logits` method." + return self.head.pre_logits(x) + + def loss(self, inputs: torch.Tensor, + data_samples: List[DataSample]) -> dict: + """Calculate losses from a batch of inputs and data samples. + + Args: + inputs (torch.Tensor): The input tensor with shape + (N, C, ...) in general. + data_samples (List[DataSample]): The annotation data of + every samples. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + feats = self.extract_feat(inputs) + return self.head.loss(feats, data_samples) + + def predict(self, + inputs: torch.Tensor, + data_samples: Optional[List[DataSample]] = None, + **kwargs) -> List[DataSample]: + """Predict results from a batch of inputs. + + Args: + inputs (torch.Tensor): The input tensor with shape + (N, C, ...) in general. + data_samples (List[DataSample], optional): The annotation + data of every samples. Defaults to None. + **kwargs: Other keyword arguments accepted by the ``predict`` + method of :attr:`head`. + """ + feats = self.extract_feat(inputs) + return self.head.predict(feats, data_samples, **kwargs) + + def get_layer_depth(self, param_name: str): + """Get the layer-wise depth of a parameter. + + Args: + param_name (str): The name of the parameter. + + Returns: + Tuple[int, int]: The layer-wise depth and the max depth. + """ + if hasattr(self.backbone, 'get_layer_depth'): + return self.backbone.get_layer_depth(param_name, 'backbone.') + else: + raise NotImplementedError( + f"The backbone {type(self.backbone)} doesn't " + 'support `get_layer_depth` by now.') diff --git a/mmpretrain/models/classifiers/timm.py b/mmpretrain/models/classifiers/timm.py new file mode 100644 index 0000000..d777b2e --- /dev/null +++ b/mmpretrain/models/classifiers/timm.py @@ -0,0 +1,209 @@ +# Copyright (c) OpenMMLab. All right reserved. +import re +from collections import OrderedDict +from typing import List, Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample +from mmpretrain.utils import require +from .base import BaseClassifier + + +@MODELS.register_module() +class TimmClassifier(BaseClassifier): + """Image classifiers for pytorch-image-models (timm) model. + + This class accepts all positional and keyword arguments of the function + `timm.models.create_model `_ and use + it to create a model from pytorch-image-models. + + It can load checkpoints of timm directly, and the saved checkpoints also + can be directly load by timm. + + Please confirm that you have installed ``timm`` if you want to use it. + + Args: + *args: All positional arguments of the function + `timm.models.create_model`. + loss (dict): Config of classification loss. Defaults to + ``dict(type='CrossEntropyLoss', loss_weight=1.0)``. + train_cfg (dict, optional): The training setting. The acceptable + fields are: + + - augments (List[dict]): The batch augmentation methods to use. + More details can be found in :mod:`mmpretrain.model.utils.augment`. + + Defaults to None. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + data_preprocessor (dict, optional): The config for preprocessing input + data. If None or no specified type, it will use + "ClsDataPreprocessor" as type. See :class:`ClsDataPreprocessor` for + more details. Defaults to None. + init_cfg (dict, optional): the config to control the initialization. + Defaults to None. + **kwargs: Other keyword arguments of the function + `timm.models.create_model`. + + Examples: + >>> import torch + >>> from mmpretrain.models import build_classifier + >>> cfg = dict(type='TimmClassifier', model_name='resnet50', pretrained=True) + >>> model = build_classifier(cfg) + >>> inputs = torch.rand(1, 3, 224, 224) + >>> out = model(inputs) + >>> print(out.shape) + torch.Size([1, 1000]) + """ # noqa: E501 + + @require('timm') + def __init__(self, + *args, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + train_cfg: Optional[dict] = None, + with_cp: bool = False, + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[dict] = None, + **kwargs): + if data_preprocessor is None: + data_preprocessor = {} + # The build process is in MMEngine, so we need to add scope here. + data_preprocessor.setdefault('type', 'mmpretrain.ClsDataPreprocessor') + + if train_cfg is not None and 'augments' in train_cfg: + # Set batch augmentations by `train_cfg` + data_preprocessor['batch_augments'] = train_cfg + + super().__init__( + init_cfg=init_cfg, data_preprocessor=data_preprocessor) + from timm.models import create_model + self.model = create_model(*args, **kwargs) + + if not isinstance(loss, nn.Module): + loss = MODELS.build(loss) + self.loss_module = loss + + self.with_cp = with_cp + if self.with_cp: + self.model.set_grad_checkpointing() + + self._register_state_dict_hook(self._remove_state_dict_prefix) + self._register_load_state_dict_pre_hook(self._add_state_dict_prefix) + + def forward(self, inputs, data_samples=None, mode='tensor'): + if mode == 'tensor': + return self.model(inputs) + elif mode == 'loss': + return self.loss(inputs, data_samples) + elif mode == 'predict': + return self.predict(inputs, data_samples) + else: + raise RuntimeError(f'Invalid mode "{mode}".') + + def extract_feat(self, inputs: torch.Tensor): + if hasattr(self.model, 'forward_features'): + return self.model.forward_features(inputs) + else: + raise NotImplementedError( + f"The model {type(self.model)} doesn't support extract " + "feature because it don't have `forward_features` method.") + + def loss(self, inputs: torch.Tensor, data_samples: List[DataSample], + **kwargs): + """Calculate losses from a batch of inputs and data samples. + + Args: + inputs (torch.Tensor): The input tensor with shape + (N, C, ...) in general. + data_samples (List[DataSample]): The annotation data of + every samples. + **kwargs: Other keyword arguments of the loss module. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + # The part can be traced by torch.fx + cls_score = self.model(inputs) + + # The part can not be traced by torch.fx + losses = self._get_loss(cls_score, data_samples, **kwargs) + return losses + + def _get_loss(self, cls_score: torch.Tensor, + data_samples: List[DataSample], **kwargs): + """Unpack data samples and compute loss.""" + # Unpack data samples and pack targets + if 'gt_score' in data_samples[0]: + # Batch augmentation may convert labels to one-hot format scores. + target = torch.stack([i.gt_score for i in data_samples]) + else: + target = torch.cat([i.gt_label for i in data_samples]) + + # compute loss + losses = dict() + loss = self.loss_module(cls_score, target, **kwargs) + losses['loss'] = loss + + return losses + + def predict(self, + inputs: torch.Tensor, + data_samples: Optional[List[DataSample]] = None): + """Predict results from a batch of inputs. + + Args: + inputs (torch.Tensor): The input tensor with shape + (N, C, ...) in general. + data_samples (List[DataSample], optional): The annotation + data of every samples. Defaults to None. + + Returns: + List[DataSample]: The prediction results. + """ + # The part can be traced by torch.fx + cls_score = self(inputs) + + # The part can not be traced by torch.fx + predictions = self._get_predictions(cls_score, data_samples) + return predictions + + def _get_predictions(self, cls_score, data_samples=None): + """Post-process the output of head. + + Including softmax and set ``pred_label`` of data samples. + """ + pred_scores = F.softmax(cls_score, dim=1) + pred_labels = pred_scores.argmax(dim=1, keepdim=True).detach() + + if data_samples is not None: + for data_sample, score, label in zip(data_samples, pred_scores, + pred_labels): + data_sample.set_pred_score(score).set_pred_label(label) + else: + data_samples = [] + for score, label in zip(pred_scores, pred_labels): + data_samples.append( + DataSample().set_pred_score(score).set_pred_label(label)) + + return data_samples + + @staticmethod + def _remove_state_dict_prefix(self, state_dict, prefix, local_metadata): + new_state_dict = OrderedDict() + for k, v in state_dict.items(): + new_key = re.sub(f'^{prefix}model.', prefix, k) + new_state_dict[new_key] = v + return new_state_dict + + @staticmethod + def _add_state_dict_prefix(state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + new_prefix = prefix + 'model.' + for k in list(state_dict.keys()): + new_key = re.sub(f'^{prefix}', new_prefix, k) + state_dict[new_key] = state_dict[k] + del state_dict[k] diff --git a/mmpretrain/models/heads/__init__.py b/mmpretrain/models/heads/__init__.py new file mode 100644 index 0000000..4364fb5 --- /dev/null +++ b/mmpretrain/models/heads/__init__.py @@ -0,0 +1,69 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .beitv1_head import BEiTV1Head +from .beitv2_head import BEiTV2Head +from .cae_head import CAEHead +from .cls_head import ClsHead +from .conformer_head import ConformerHead +from .contrastive_head import ContrastiveHead +from .deit_head import DeiTClsHead +from .efficientformer_head import EfficientFormerClsHead +from .grounding_head import GroundingHead +from .itc_head import ITCHead +from .itm_head import ITMHead +from .itpn_clip_head import iTPNClipHead +from .latent_heads import LatentCrossCorrelationHead, LatentPredictHead +from .levit_head import LeViTClsHead +from .linear_head import LinearClsHead +from .mae_head import MAEPretrainHead +from .margin_head import ArcFaceClsHead +from .mim_head import MIMHead +from .mixmim_head import MixMIMPretrainHead +from .mocov3_head import MoCoV3Head +from .multi_label_cls_head import MultiLabelClsHead +from .multi_label_csra_head import CSRAClsHead +from .multi_label_linear_head import MultiLabelLinearClsHead +from .multi_task_head import MultiTaskHead +from .seq_gen_head import SeqGenerationHead +from .simmim_head import SimMIMHead +from .spark_head import SparKPretrainHead +from .stacked_head import StackedLinearClsHead +from .swav_head import SwAVHead +from .vig_head import VigClsHead +from .vision_transformer_head import VisionTransformerClsHead +from .vqa_head import VQAGenerationHead + +__all__ = [ + 'ClsHead', + 'LinearClsHead', + 'StackedLinearClsHead', + 'MultiLabelClsHead', + 'MultiLabelLinearClsHead', + 'VisionTransformerClsHead', + 'DeiTClsHead', + 'ConformerHead', + 'EfficientFormerClsHead', + 'ArcFaceClsHead', + 'CSRAClsHead', + 'MultiTaskHead', + 'LeViTClsHead', + 'VigClsHead', + 'BEiTV1Head', + 'BEiTV2Head', + 'CAEHead', + 'ContrastiveHead', + 'LatentCrossCorrelationHead', + 'LatentPredictHead', + 'MAEPretrainHead', + 'MixMIMPretrainHead', + 'SwAVHead', + 'MoCoV3Head', + 'MIMHead', + 'SimMIMHead', + 'SeqGenerationHead', + 'VQAGenerationHead', + 'ITCHead', + 'ITMHead', + 'GroundingHead', + 'iTPNClipHead', + 'SparKPretrainHead', +] diff --git a/mmpretrain/models/heads/__pycache__/__init__.cpython-310.pyc b/mmpretrain/models/heads/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7edfe83e3928817991bdd34d8ba50f25e9be7fa1 GIT binary patch literal 1990 zcmZ`)OLH4V5MId-yV zaOBL5Kf;gjl@ot~Dyo>C+0{A*q|#SkcTdms^kd|gN_k6te);?Ni{q?i{XxR*Pg)5l z>KFW+vMkqfQ)r14q*BDwm=+T-q4@+(iYb`Vd=jTc1~QsYVOHcIr};GIMF9$$XRs(r zP|`e$Wl@2O<~gj28q_q;qF90@EthdwtiX!q z6wx*F_VWn%8hcY{I7IGk8yI!ItK;_>s5|_cgEM$KnAz(0mTJ#Y1?g`8<9i z9>F8c8~CZ%fgQ~ka92Er$C@wVp4f+d&6n^%9KxaI%lMgi0#7tw!Oz80c&hm-ej%R0 zGtJlVOK}88ny=$m;uwxKZ{pYD1Wr=c&~i6^O+m}u>{{FR{!~@5wo^Cx(=*=d?|sW0 zcP#BkQl{NH8&eD-T59{=g)aq@k)om$p>%?fk65HCe8YIJJ$9KtySU&3&b;usc1$d# z7t+7>T<#6ygI@O93GGC*$a}9}wUJ+HIeoKq?u2T$w)BIb?Munf3Ayi)^`>TQuO)N& zAdELSXZ_m|mCm`x9GL`C>9o$2i@FEb+o)ueTXOK+8L(tgrt>n6Xr{y8E9s6)zT>z3 z{y}tTt#gebKX=|RO!g?WyFauNv5~B~8&XM|dQR{*-mlB9UNDcz@nxxGT`rQaYTZjm zes?FTdN*_iZ<%}dfNb|&s~_#1@AF|2)k2>KDxW>+c!8;PVypGPZ{3MvLr$q~k~M-g zf(F4N!3KePA1padkRzBSSRhy;s1h^@=*^H51d{|Af-*scV1{6hV4Yx|piZztuuQN? zP#`E0&OZJ$En4ddZx&;C($iasvV>>CZ84cURij7U)^5d$OAC80cs+utbZ zT2NKWcm#}^VuzfoPBSv40O~BB1Cvx^qMSf^7!QI`Qs|K7;lPM=ClZAb>2^@&;iYE} z)KnNHT@;FH64Yd9O^_rH^2kPUER=94pq>EZN|l4Gi0@6AhJtY9kb7nUl}s(*(fO2i zwSd1LYp50 zFh0~qPz>0WJ!Db9)DBPxxEM!BT>|+)%}3-#J)ykJY?zIxc_A0Pb4Es{MuY5#4~^I) zu`$xKVXJz_!(pt{>T2;-bP?*sn~ZHch_?NkCAXBU;jhD8>HFcX5SP(|^8xi;hQTg1 pXs~PB+*4JvcP@Xr$sP&cy+)>g^#59&{s#tB7DfO7 literal 0 HcmV?d00001 diff --git a/mmpretrain/models/heads/__pycache__/beitv1_head.cpython-310.pyc b/mmpretrain/models/heads/__pycache__/beitv1_head.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86c7d053bebb5cfb362e57605a1fe38862aa7c38 GIT binary patch literal 2018 zcmZWpOOM<{5bkcbJ)Y+#n#fP}o&YND<*)9Lty;+^7rHykB~oc zb9%V2`2?o<7KkL0W+bNp%~`;5CvYg9v5W`2{=2ysc=ef+wQ@i3>ow2Xc_-*lqB;lN zL$XDrE4{}=ddh#~JfcBQvfE_XdJY=M(6M|wHI?;mO{%mkqRhHq7qHkFQrr0~GU{e2 zr*!%fo4XGeQZaI2k-7K0ZZ5>ld%SUNsZ096`wtXBI z%h4646J@hI^3a zu=D01f5`UTxeI~jdw*@ILg@%9?NlKlqRti*p48@xOI4}kyBoVuMWqIWehyAewK5{n zWiCche1CL|Ka|4Jc>Id!ZXF0QURWT-XtrPt7iXnD3v&$FE6*%FkfOfE#-%fcvt9Isvot=%Ky{8PKl-8G0HMtlviR4iW*x zdQD$yAoV65*W@i+AHP%)U&3lSDLNTi?GxhPq}a$ Za_fKC6#Yk-=sNBYb=ITsG0(Y3{|C#Q7<~W$ literal 0 HcmV?d00001 diff --git a/mmpretrain/models/heads/__pycache__/beitv2_head.cpython-310.pyc b/mmpretrain/models/heads/__pycache__/beitv2_head.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f48f11deb8ab2fce36e4a74934382cc78065dc70 GIT binary patch literal 2157 zcmZV8K8qKaf+iB;q*t3-q zHe6b%r`|YlOpd+qGx!m{Ax`}XP)hke&u+3QUiq8vv;DmvTlIQv0_)e8zo$1^g#3xb z^74WB47T|mfFzRUq@WQkSi}l1@+h9MoJZV!{UV40cjo1-B8)1nF zZW8Is;4zVb3LkloXw;SL4jHtbg9bA2EZ@mYZNuA>I;+YgxBj;!AiD!<+g~I`-Kyj? zS3m`~ZePE#b7zEX%d0mh%+G)*5>ZJaCTZj;E}8TsNA*v6N@Yv=GK9SXH*FQDmf|W@ ze1oKHgKQAlk=?ZYguu(5>_6^BD{@t}?|0-m2hTgW=3ob5+LjmO`ePn-pAb%>9yoOo zY`&50-QE65B{InHczBe ziX6DgX%RclWhOWrDvcbYJTWFqvr&SX5%vB=f%b+~I9tSJnT;0OMpc<+V`Oa1Ts#C% z>v)uoo5un|feHT(Ie)k!s(ISM(2N@{lG&FG9Bvl)qb?H>g* z@0bGKk_1YIL8a^Sz%>Z1Z>AHlD~w~OF}AEM^$OhCaFm<)9@LT!ko1SsESI)}=EX(R zA-?5C<*Dw&1HFpi90C;mjM|krcKM9sXXNjXwsck1TSYMef3OzMpp{g)*+OMzYpAmN z;kMJfIoa3iAZ!D+K`Z)UpPr)!7fvgF^TcpJRY`4Z*L^^XM89Cw*PQc;RcNCjb0Z{(Thw!q)mcudB z>@b|QW?_m$hb4EQ5N@hc=>!^Xsh_a0&h`|ZIP=Bjy3(V2n|shqm4-t75`3F#Wkjm0 zLJX79gW*XTA{VY@$6pg5UYFn!BXVaE7A6R;^hu*N@`PCA>P!pcWfRrcl)waV!4~7W zvr-KA=TGDLS*6c57N+HYViv)Y!d>GiTUY|~Yhm1T*I$8crQk-bO3UiX`YvwJVO5pu zOORFl27tke#uC!+Ab1ynK=2*_8#LoD>NVea;Y4r@b^hV}yRF|s1_$0o4D0-T*ak~~ ztrOC|*QZ_9r6E07U8+u$GU#J_l}VxE*tX+&^8ntB;~%F_kN`-s%3ml@r!(Bu!&U-xfu+dV;c$lY%{R*0?IwZe)xUp*e>4dB z8yAa@1B>tA%@1G@M9`Qd)T0UWSYmlrVtY14Ig1_7fnO_jlbTmEGCQs(4X;6oY@9XE z$z38G;hqxV%Gz`5IrUZqdq}#qH(;4`ZEf#Gd7G69h4}^&D4!7W2l=CCB9mIKNEDdBTRZz&>Y#b^n zqCP6~B2WWa@acEKpB!@$#IdYgxUB~HEn@;5GRJS?-tH}a0ABU8G>is33rjy{ibeKJ zrqNg`e;5=;d^0apwZp7IA7h$n^=2C({KD^t18z>?a5spLM|kApEuK{<0bBP#F@j4O z2PTfn5!9O-LznN!Fc`%J1fKEzEQPpGy02}FMc1e2X;;Z&q|&~%fGV6%-~2OJ6y!O1 zN+$rUmz0u%ommrlPA9~-1%dQ3z^lT_>N?uVM?9Dt7hn*8(iw#u?%J4qmK1|gTGnUHb5jp@#}`+@Xv)*BwH zHkez7H@9KvSZj2=R$;F<^+~tmY!DT>U!tJTKy&(F+_-ZG-W=@|7&#SGu$SzFP1qUS z!a0Sgt?{3CD|nf#DhbJegpU>xR>2AH{xYOca>)z6S!Akz)H{%Ao=s6xYFvito!fhR zH+Oq^Su1GgCzHYjyrOW?S_#_9SQ+#jEpMPA2~dnBqNHnS$86Lt6i5>Mu4`c&6opK? zmRbWn^&t$}Ep4b9C^5qaC|>iY^;F#~VyDbQvq{J6vpif;({AY+-~(zkEqp9gNK&~d z&(d}hxlG9A9dUk>A0C$E1I^>`a4C)*g&hX4r?TPk z6;fZaVS3U#jNE^?du1zB%_jg%?Fv~yv}zR=-9}kSpTgXflfp7lpX0!B_!tJSG4s}I znyESTO2rxO{1Vjj77WcBE!wmgt-q~XFv8u=G6)QQbl$G%{cy}i!Z1uQOHcb#@xHb~41(hj?RVfJ-fz(L1D_{&f_ zX8bg#8)uD+og|UzAcEEh4M)ZbsY2hFcat6f9(t@gE?d-UxrEkT_9t`Bg5z_VtAA5! Ue2c_sZhoECS&O!qYkfrj1G&%0Jpcdz literal 0 HcmV?d00001 diff --git a/mmpretrain/models/heads/__pycache__/cls_head.cpython-310.pyc b/mmpretrain/models/heads/__pycache__/cls_head.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..741d41ff9f0046d60d7ccb805bf4e174aaaeb7a9 GIT binary patch literal 5960 zcmeHLTW{RP73Pp!?nNtEmgQ@1L|Ql6R9PoL3bcmf#F69HZR`}agQDvg6vdfcQR0%D z8Lk~IsF%t>+5#z1$M7BbKm1N|NS5q#}ao?4(UMUhnf&XBv>T|0g1W4Yk$+|HSq zbNkLPSzK%wcz*ZQU%G!hYZ!l}#`IG`;~l)}S17o_S!DEChxN^l*|$1Y-|pCq`pu}) zspx;FU+q-OzFNO(8qDa|I}P1qMGO6Ar>X09wAf$jEa`eBI?-S5EbF=xo$Rl4R+u4{ z4^KTdzHjgQ$K?q#^XOlxSNV%Gv~^I|iJpE~U-JT=3;el9mCh;tJb&Sl(OLb}u#L`X$or+dx)G@# z2%r1!nuf6v`AUV|Fz`w2?l$_{jfRWon(V2|x<;)iQOaH8VUV^iyBkT|4SR0Vb%R-Q zn$>o%i>^P6Qst(}gr0b8ZW8@kx;GGSy|N)O_q{lk$zbn&R9{)wYj_Vu*xOFudZYb% zYjd+bp-+>+uDce-shhdlFz%s#AA7l*3|`xH{n8j_7RQ2XZ>+mBd;&l6Fxg$(N|FfV zKiwATwvgDWi=ki``Kg$2bNNt)anDVqAI7L0Z-4t+|=wQrey8j@FOLT zhq2|<3%b2RZR>7QdKONNBfCMNL|P*uHAK3Xh(bRKNBR`U%xI(2Yu!%ba;bcIUX{JV z>;C?B%gk-kcD_J{QL47ZzF?Se<92RSFJ?fsd9_qbUK?*apoleosBV?7x{69_q%1QI z%pEhcIs1$;BQrC@vr;=_X=TSbFxA<#dRWWYW46yS%=PM-g|fj7c!7z4B3HntwUAfT za3G|l@Q~L$Pp|LEWokA;6~-#{<3QwQ93#-^@FQz9SlJqe5iifyF9uE$%Svu55q0GW zG&CPc-FmqvEU$Z>_8QN7V*KmPOEO8)Oa1;p3T+27)r5;kT_T@Umx4%n+B*k(auw`e z#H*SpRw|C!WDVxv9W9r>-5#GKuXKf~KqT6DhQPbVJ1EdAFa68e}I#8peZR3J27yZ*G!Z`)*1Rox(;cVwK2s?HIy` z>l7U{3#K{o4k8|6VNm$+v0!%#R_8(=J<)b=#$aC_cXM;yeJI?Wp-SCkD}}cfn=R-k z{AgwD4KXV%xlD$>IKm@efDl@yynsLX5*5@@6eRDw0X2D1(hF1dZL}3huT~qajHP}& zJR|6_-sA`}c76jY-5LPZrAU%A~JFJckP^fc=!XLKZTME!3m!#9h-Ca__D-4;|e}EQJ=8 zD?WjX1gYT1>x_+JHc%16TR@j5uhX;ssEOhyAf0)?I37aS1hp zRJ882#@sbZknSBPIAi(vahw#Oqvyk0o?5fmSgHD1S-tw|E{qOP?|uN^knXP7dzioo z(KjSC(NkdQRsfX7&zJ%_@fre1SI~8EKPxv+6;)4?rNf=Hlq$hw%Wxvap6y={nSsoQKG7=-BPN z)=LY~Ny6iRit$5njRM3Sy#FAW>|uxZD+HuF#&67z5wx_N5usWbePZTj`}LfKbHdLX zS0dOfEqnE6#EST?7^910`A;<5z7i$K0Mu1f#uQ(>Bl#-WlL=ZkCCQhPX-UMzNbR zh?z5xL@CUYRMqn+5LAu(jgQQI00BqH{k`!A0K(^5z5s>AaaQXgMS86`uqD?)n6urS z`E!Sbw8^)$v_o2gY}hdzwtV#8B|f2QE#?)-CfG(!l){2?3)h0&rg3?mmbFzl5;~j7 z?U;vsSw(NB4iOZYbCESU!vO-9$gNR0Xw?eWB`1^Qy7F7Z$Vm}jknyZh59LX@g~QGr zNM6K%78WVIX`eAyO$Yvq-%*X3M<#QQYUo*EE2gdUANvSDdvx+C`Y5+8YE9Q2_z@^V>v$KLUl|gH_|Tj3sh2L2jIXNH$OA? zt<2iNmzCn1%Axhx*tdHGg@DMRAz!9%EPyXNvjB&+$H+7ZNcYXu!5z-Z>;vn%asT{B z*1oe}-LGYirg}520!{&#xYJc1v%`7@N`kz@M&?lM<24O`^-=WWM5V8d?=GZJxGdd9 zAo{tR%FQ^4hMcbWD(R+u{}T<9&$N^otsN^t5`;h#7NX%@Tpiv;Gq3InF+j)dneb2G zHwINj;GjX(-p6;S0#cu_+eMH!%OLN~=}N=d=b+RVk7Z-DJ`?Dsl>96_)i96yF1wLe z%j4z_!Z41VJ6s^(BkoH-?g{xltRX3DlBDQ-2@>&$dzoq-x3wpM%U~d4-m1^V`oiuC zB2_jth3YIrCmy-jIAY`rQ!U^R-5Bypf$X!lF!?t2^h>;o?rBSv6=s_?c8)ow$(9HZ z*fK7}u;6oFCmnemgsn4l+mHRe@Vva?c|}T#deieh9{N!^QuREZ1fD0q3mQo_pIi7o zBMCrqo4z7RQmVd_OS*yOPLb^DyPNz5jgjWHB#W%$A~mm3K_|%<%MSv9h$5_WeAwq` z3f(*Pw+S*d^$v=fVVjK;CZnJICCZ~E)BLw-nO`~f(w7$&FEm!GCTkqk&f=56k@=$i zRSn}ydc5)F@^hM7>ohQUZvfLTcBjukXVOP34pP&(Ks%1*84PqzjhpT6FfMZVC~q9s zQ5hTN`_jR`tDx4QfT-pCAx|}p`2qi7|IH3vL^nU%(2d} F{{WgtEfW9$ literal 0 HcmV?d00001 diff --git a/mmpretrain/models/heads/__pycache__/conformer_head.cpython-310.pyc b/mmpretrain/models/heads/__pycache__/conformer_head.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b889b397ab0068af6e996eb84ae514a25f43133 GIT binary patch literal 4612 zcma)9&2QYs73U1OTxvgL*_J<)v?-B(Slc*jr|qF|ZMQ}oqzMugsNJGi*PtlQ?1~aU z)^wyR4d!0sVMi}p|;2%rGD_|jAVhMxKt@K)sHbJ0tYSpB`>YPD-2K)K-Te7>1^ z@ArG}F>SXS2A;ou^^g7+rw!u^YRo=0G=7FR%TWkJu*is6kHx&lW3y*6>f@2sv-E#0 zwtKejGoyO!^c-EcqJ_B8Yv_6{YR0Wz3w2x6qjtR5TV#f89Gv>hc-Ih)SlBnjf?V7) z_gHU9@H zP;1=wbN`MXW72a*B3M~}JIa18eG!nz*~ghQvVWo?jUE$5j|Xi7&e$i`N4#lU!rYJE$z#p1rzYn>Q%s+pK^N-SYU zd*QUW_|mL1khiF1>Hoeh&Wh#zTJMxNhrRSp;&=|uBhHHpkhS#0u#DbmsBfigznv!i zRK-%!)&0lN#Rp*`edPv`pJib`l*--2P`A-=@!V2_?3%7oD=A`6&y$(E8YX$?n!8rS z8(5O|-N4V~AXOp80#~#{einG!XS6YZaU1vH1Q({zl&qa z$GP&|FbQ)vfRe%iT`KZ-q{@zkko4QK?-x;?xq0dqnRM6JN#JU}Gn6+jtwFhdbm?-n z^2Vh~yJ%;*xN)_6^{vkOdL^!G-c>~sR0~SGnmG8~^55S5*XS=lC5)5AvZjwvnpj^p zXADp_XVg_XbZ)v`V9h|82utlI-b|s$jhu~*J-)?3IW{&!Hnu)ze_+fQ^ReNXV~l#% z*hE$}4=3-j=ygaH5}sT8sBnbB#MwX7JTO|GeQJWpTLQ}9oQ zTB3ramyKWhQ6b+^Dpe&<64j#4HthHifXzHYbh!xoCfT&zsg+hHqkbht%hRIl?MFUP z|BRIjUWIngQ)eOXzt^s)G|jKX@lZ*~3X{2NDuCN7v{QD40Hj%_ckT{%)Hw)v1#h-Y z72D=Evsr_4)?n7*vU!n>&d*V^dvw#XMrh7T8+#Ti2{_gtJyfUnZM+#NY|I2(A+7V# ze+bh`epZ#bhii<=|&C>f1NaJ z0)}B)C2P9j^Fut^S{<%P-|Aabkol_OHPp%m4CqDaAk4Ej(auPR&cf*8++n)Mljf9u zL@KTHn^-wE4hZsWmxEk<4<^{=S^ItCBarEUk2%oPy`(P{MRNC1m~W;t zJsP0ys)pvhyBe1iFSJgpI6wgjd<}9b+`dX<%>#-`gvNQ_P69uKa}NpU8e0_+0Dkbt z4+1&N{SAz5;D~7w?PyuYNSp$Z1kv*-j3V-I2o!<>#w5({boBwVO@FBQBp*t(?kZ(V_l{Gb`nCPe>xtR&GVhoQj$as3dR3+`rULZzcty z`@MXdYwj&K!VT$^vy$CMn;~2iI6Hy#BdsJLPQ7%xFsW{i1S#pAKlQB&Za@ei24r`efdzo&NdFlU3@#FiKGO5zcQLd+ zt=eOtX>DwY`gP+e+p4MGJmp)qaNyN`@;);1Dh1fW>?#2e&Wdrtc*-#s$?wM|h;1D- zL{qf(jl4;{V|#CM9wZtTNZ%fP_gBdf44k(YMUx+ z{3E%aJ9z(1k$V7#bHBFB#%#;@6Mq2Ts@30Ns*BNQyyV@hB@17+N#iLkSLOS z#u&XZ@BcIy^qH0$7YJvAt!s}#`TWy60s@cM9msnR+`SCj7H#i=MF zi5w^#Nd(KcAQ-t8g^g#XdKE2=_R`Lg4I$mpA#{4ukj_G>jBRk!dSM}naTq`3__q&B zi#E_U~6#;1tJf7Zz8uu&Uw^@pMdQEyg8 zGAlK(?UMP`ji=Wx8`sHMXq-HvEr^A-VY8*f{|776wVhU3gDrwhC_4uON)tI)Y0)^6 z{30&vlRKAIBEncvx&im%M?n$c3rpRgwp|S2=wxY*!r_ryB3DtlB{E)VlMQ61u2aLr zU90}9&%6@>tF)ZFXWqrvxe_@;aD{d`* z`P$jno4jqkXaU4F_4CGy)+@G#^qE7wzH=H}*ck$s)#3Dk^;z{X9c|PuzCA8rkKQ+r zzY@CgAwEv3L?#B4!XT@xdUj@xk^^`lwWA1D<++&=^cDeq&Sv`JrE~N_qZVlDscNNE j6J6`G&S3W#z|QrG@;Av_W#loJk?6Hpi(dR}^E~?>(u}@b literal 0 HcmV?d00001 diff --git a/mmpretrain/models/heads/__pycache__/contrastive_head.cpython-310.pyc b/mmpretrain/models/heads/__pycache__/contrastive_head.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dff07c63bf035f7bf0a659d23c80ea83e555916e GIT binary patch literal 2029 zcmZuy&2HQ_5GE-~t6i_1)OFJ)XaS+fAq%+c<{AY3bNUC^n<7asDhfdsskK~5+)`Y( zHsq57~a^XsVAU)ZUT>6sVBq_T|ZW7TE zPU=4+DHXoxK3xvGq9=QMOJb>!9h9jj`eOMh5BtvuC*d+czhr+BSnyvf!T$%U9=aMuuuil zs#QGBip{M@>y52#wW-#F2ePc>=GJ;3O)_)xu^w0Vnl0!uM;CWQmYBi40C1<-IKWv0 zwP>OnsI=XdPRj(Jp&DKsH?o*W9h<3^!JRZ$F`B_^EGl*nV9$G#SjUAlQUm_jTv$L5 zO-NK;fu;#S0#^m2;0hwNFLOkIDgj*3+0GO-a|~$VflT9RZoq;H-XGqbE3+~)QId|^ zI|dMcB-A%}#c$UGHKP@P{t`B3ah@HvegV>XDCWkldK#%xw%R{``5D`0L>WWfJSx? zJnL3dh_LmesBsi&NLXUsooSW}yM%b6_9->y^nhDl$vo9vm^3Smofp)0qbSK^*d4tf zf8XEGN|}wKm;h!SXXT}#5;Ctgkc8F70;3~b8BPxL3fR5{UxiRsc%Sy@;akA3#k?4v z<*tm)+r678vS?|*MV94GN`g5%VV9HgUgJ9YyZ!|?*do{E?zl! zkvESTA}wSxVpw$F=HbHIG)X*nt)VE9mnfioIwC8ebA^Fi9dv;p*`c&vlt!LuB$s9=kgob|5Y}rSuK{J3X9^h_Tj8XP4W9*+*hn+EJHKyh{M0;t&H7xiexP_i{Fx283RL(e z2JT2VbW^vU#kbfkrW*M2<6-1cM*Y3_c3>T=vD$kR_1izQi1qh@zg_J=(%I1xj64k) zdjvbOJl9Ipn8g-ok>=95Y?x^)#z5a|w*}h6VltIBGb8abE61W7YjNN2yg&GVb`K7) zEpuLqkV-irjhCfgl~#&8Gg?}a%M)lS3o({P<=RVDz=x^l+VsQ`Gz~K&^EI$kmhiK1 zX<@aO2OWe*zR*^Vv?zw+xB$kb0_ds(QXjM0&GORGuE8M9UE=i&1^}nLw84PXucnvg z|J{zjY9))CcR(Y{U`RM|vo|`w+0h!sr9>a-5}OG0||cFa=De*nU`mOv@x4 zj>H|6rQ|aTE#|Tpb!5`Q46{*B{9y7E1XE_YJVrvO8KvFYIfPJDjm?(H@ zd8a&?>Ib)vq#57t4h9<&aN>OkHDp@KWS*UFgK<03cQEW|dI-;z(v-IkYNtK8{n%$W zzF)}v!RKobDUrph1sFtlP?5|%Mpv?u&)MtXJXkQ!=ImLh_>9@>Wpv)aR})Wb5u9-o zFK5El4Bqkio(krC5rEr>x5CqJKK&j>y%UlTSKX%KL^i|;RDkOpR(r$%NFY~&MjCE5 z)z-EEwCW_@mA-gg)!t-U6&O=BYwN51txT@=x2mRUu1h39x5PRa9o>;m^{*1@b;G>|;ukg#k z6@L2h79;j{OleRhxvB{@Giy=`(W*5uAnJaI$2~(g=ZfEAXF{R5En5ri#J9n+V1OZ%R?pZAQG66Y1hzf(Z(HKzt~)L~Yj- z*uBnwqKp;e;Dq$lb__ZYsDhF1iSG@_b<4*C+yyV8<+w!A5^b$TZHXbNxO>W4h}RDj zUrMWhe1}rDa&#@|;w{<*mC|9iQH8lS7XJ>+UPf1ipqZ+T+)46cl$EaAv{z{HCM`yU zq2fukx4sUl_T~<-*J-EkZqVoxbbrO;NJ1QUf)0<7SuwvBTtk0)W6SQ|M&(W_Q5sR@ z`&9ZG0|cMhxa8;f4_I)jeWV9QNTA@15``EbSb!ZcYR0?GJZc3bSgO0)rT(UV$Bk8M3HBRo^R8^di6~L@!P|gGZ=>M!>b5JF z`@5YgP7)(0I!UT_vLp|TJ4y1>OyS0uY;$KJd1+P)nKm!xG`_P{mXUFjNME2GZNRt6$F=grM3FI zG#z{OJ!32hs;+Jbt!MQJ0kJ2R5~I5L!v9Ss2*T=j(zBW+7OMVGZ`O8czE{14d7Z6Q a{8i1aU(?0os2E~?8426rl(^0C2LB&vDnC5{ literal 0 HcmV?d00001 diff --git a/mmpretrain/models/heads/__pycache__/efficientformer_head.cpython-310.pyc b/mmpretrain/models/heads/__pycache__/efficientformer_head.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d317047e30847afb1f895c9ef9620cc64247d55 GIT binary patch literal 3322 zcmZ`+&5s+m6(5d9BWa}Fb^NhuY&2ohAYKKA);V?&rzvbVO<`|}deH(hiqUAuk!C$J zR7p;3sp_RRkY3tzfug_~=p~o_554q{;H~J%=iG`G+xIvlt#<7aAjRP$AK&NiQDSQ= zaG?F^zyC*dX1)2=l#KW`0N zcJ5{Eyff^;yvh75$h*TXad_*b_tN>?VJ+5v;jlLEK5slH!!71Ma{8TDaFf$_jrSl` z+BA>KS;i0h#P|=tym$Y>BbWvEA{{-7av;1SxS5#N-Aw(2M=Zv}o6WC3)bC-!ogrb) z&}9UKcDctI%tOKc&`8{`orG9{I>RRGaG$r1+wV9mz~97UUDkWy4Lfh3Y_V;i1RInc z-sL@e)=PZL_Y!;SMQ^zEwc|O%ZBX*E*}b16X`J#x|5(VJ%Ub^E@1W2H5{(ES5?Smt9mA`%)$noopGfyST3S;^93LiLo> zaTGt9NKqDS%?O#*`{#r8YFdQxG%5<7y@~ybk`|V0TFgpK6CUYOa+*i8Gd>J_nr2y~ zQ&G^pu@D&u`pYQ?VI(aTrxA@9OOYI9wBF5GtvAZHYWH{&m6=vl3wk6=es)J%q&kd~ z3Eg99jD|P@P5Cn|BZ{On%F;z8KrD(Rou~obV768nFB*-I#h#we_??@F;HW6Od5dPz zoXa~mAEX73Bz!B)?%W>S{4c3XP(yFofU&uBjy>i+ zA@cj$XF&P0!@R#hYW}Dcf++w8PMH3Qcz-kW6=jpYhPyCiq2^ zmHfVxLK?RyB%+h?FzQVQH-tFtw~VKFmdMLMm2bf?>}>{?+_tA8ND$*cd5XW^5Yq{( z1PQ}$oPU3|FNM(ic|MaI0wXQXO$CEE-^Z)eK40Z&V)N7rFqqBdCAi}{boBuYLCYoX zY1REx-}Q-40@o!0@lG!{e6kC_i}x<%b)E8qwQ-u5G9WUDRLUZDf$^$gNMOGUUEPLZ z>6|!A@{;@(oOwc)F3ixmuH3~?IBDR@+kn?M99VCFJKe=q%mdH@48rJ4ikK@kc!jt0 zJrjT-!c0t3t>i8&$tyU#ZHe+bIDZd^_hB%dI8&jDh2-+PxM+uWk{eY2p(SDD9gIphav&B_lUf4^K;XbF@FXds#tJS0#SmpS zAQMv>k5&LR8d-afMl@9r!x{?>qNV1A67m*}3PyPpPwNHTrKfz24=a3w6Ojedu_!oT zK{Vk5`b9wx#Q1phW}Qd3=u=LQA$``|#8^WVR$A1+iK2k80RFmWyWaik6q>br-`()e zd2d)}{Q!)o(3I`Y;(KqFW^m?u(_>ItLuG5LiW_il)m9PMg#{b)5N(cZX>g1M8235C z3q)e#rp~f~5M|x<7%smXWpN3h56TFopb8~Y?#!$~h zke}(W_9nT*XSNToF1ROkyiGB@-1|g6SyBj{k1zQ!Rjl|+v=l_DkMFiUqVz$ z`h?G)3IGy7*D{A^j4i|~bg^*@uBN$QWd>GX-}G~fZ5t04Q~`_ZW2GVIYL6a3O5q|t z1He*{2^csu7WoXE2$!w-Eq?m&;m>Ler%Z5F+=Of?XEPx+dccCab=%ov0<5nP87`#5 zpzm4KGgl6U{sLc@s269p8c8h3#zQe++ZiB!2+I=dfV_dj8VcneeBOg@VbH$>wE=3t zuD7^x;nr0Zu$*_WR2EUr!_Wj_SXmF|y)gWB8D%x46^2a2unaXb*0vH!OjXm=LdH|$ zS5}lCphWNEfB+!znPJ5;TXiXnd6tFY28df75Xo)bhCc+3x(Y+v@mzPON4EcS{o3{5 zYS$&Zr*=nw+xYODf#+m(1xg_YFqpO#WdJ4o8h&0mtF!~jClH`=E)i19wG9famN7mW zl@->svCtrXOMDiYo$4f)wPCCX8`7_hZ`z+!ru!7%EecEZCDL|#q=%jOja~9TaE+NK literal 0 HcmV?d00001 diff --git a/mmpretrain/models/heads/__pycache__/grounding_head.cpython-310.pyc b/mmpretrain/models/heads/__pycache__/grounding_head.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5af3c41a4908317d736770e472bdcf66998b5b24 GIT binary patch literal 5436 zcmaJ_TaO$^74E9;>FMdY?0vmXOc+8?vQe^52p3^OFo~T&;%p$Xgwnw@y;C)_J?ZIQ zSNGW7RgZ+QBY3fZHzYuseGI<>35g%TV@W*mfcyrJIFRK#)iZmsqV%Y%>vHPU?VR&{ zHCemuTX=r=zrXg=FIv_=sBrqJp>P>*o+1&JV2L$i0b`Ws2@kmW+9QWs%o^1K*R+1Fof~)WLNdM`lF)NtWN-JQ(!U|bnH#mNF!)b7_ zDr(XZF5bFC?z|;D!Ta7Xtv(YC;ooZqu4szZJu9d;+G0&~#KJumcw$j3p{^nQY4gw$ z%VOo8g_X8`X*pKV#%kAf`wf+iQxT_w@5@L;PcqBe!SuVYWtl>4M0?P!)@HK1km~j` z)g6tKB7T09i74srDfxV%qBxbJTSWPX&-qa`#IaI3qWsZo?9``vMl zYEp75&d2CKn9pI$H$A_n2KgnEBRxr$=a;%7?iH0@suQx838@~fj?=gZd;LN886)1! zW@os0sax#IZZAvw@t|8|lq*!0m|E%-)5)2|ZXJ(H<6M#b2n)@r&vur|s&)IH)tN0` zo@K1dx;~qdZWP&vGKC!|?ak+-T_uaLN_+09lc$6C@Y;CuWo&$56|A)G@*yv4g577# zYQW@6E3`|d)(U4@E7>6=Lc4H>^&w0^*uuHbQPU`Gq<-ljZ5FMP7wx-Ty-;+}hnqeN zsa=xff~X-~EZpf*X$=>q%fcN%>mi%Cr|Rm$yJrqEUJ%ofEC|ki)kPvxTmY_{)~)l-`CV7FMkZWT0cQ^eTOG z`EWy6FwiAdE+<&Y&RM@lfcc~STYi&5fqhV)DT=Q{%R_eK zV+&*Rtrs*4)e5?sP|c)^)0%rwa_lH24LkL^69^48wI_Fqooyy#`4 zy;r_LlcE`3unwL*RtLilw_Z-NUXN>e=*iZEmX-{4ZO}UrJeqXOp)c}$4bfq8XahexV z+LPKLw^el-utftl;!!k^p^~|F3z_Db3fADdx1=f%dEsU`1z=iqO-1QI>IEeS*cqw9 zopHYp7p%b#vs~MWOx0tgvb__hA%=s7SlrocXeXCRzgi@Xsh*UjPelgv zG1HAO>?IM_IJ1FuYI_0+N#>W3EPI@}%wa8)dPlD9GM{&t%Uw>j&XL1iw$AvGbJVh1 zY>{6;Exznv{j(3gM^-z3XqI$Dui?%Agru;h4F1msfQ>^wshP4<@>$u1_wk`UbS8DQ z)XIA44qd_Fjr`D_*3pNV7O$+M-V*jlXgg%n#-TO!QP(7IhK?3`dS$J2g#)SDeRt9* z8^gA+p$Wf_IO37{_E9tO;n$tgN8e`IDE+e8x8YOnw%Wjqja{n8k-NU>2JJZA8y9&f zM>|sFK_@D3*5usAF7=vBO|TDlVm}>m?=kWCYVlVMB?GZb>-BZnP(LW1QwU$5I}^N)d^2?Ic+` z(7edBE99-XCyfmS?X$D=$mGe}H(Tl=joc~ZouXPQp{`yzTQ*BpUxlQ4iNqI263#5b zYO&#Nt~;hC#Hx*K;3FoG7h@&C!b!erY|c|pJzCC{|J-XrIpKV%BW zmi!R0>JdvOdu711xh0eJ!IoF8seMS8;+(T44P(>R&@H`T{n)BKVZ-wL!WuHzUV}V= zkdmx&NY)5T7BwP(_iM0EYY2h_31I8chZflHp*?ApP1t-3W3IS1^ZtQSZZ@ zV|qvZ=@C&8g?TjEOE8gVt^rp*pnJd-R9{Wx2r=r$Om~!~Sy6Fi>eS8pXILaDlK`## z?o8XMslRK8;VHUnMg|OikQeH??sbEDH!gJ}!+_~V0ySCR>*%hYq2xQ1Y*F$pN(`P7N~&*DV#oy{t9EY1^0s!9Y!DY9F~rd0 z!8jY|>Uk10*!*P@tfyHxP?69sc+M!gquJ|V9Tm~gHDEJ{7=bvkiD;l`HHubaKwUc{ z8Ko4mi>SA|xnY8``W`jEO388L4Ho9nvZ7OY(4OsA-UHvv6~V8%f&_q-=C?sPW`bsk zL3>tNp^&gZ*vAGVD+{z#KKD(&9_@7oD;x>a1mklG_bx@QK> z3cNxhbH@cb3C(wM^%ers6NCvs2ZuYKI=~C(2%3rOFy~VjSmEM_)+5Jp{_EG) zfhUXX!oR&G4q1-8&)iQO?_l{Xe$W!muplC5bb~T1=x@j%)_pDuLOVr98|=q9#tFo8 z!+yYN=#F#an}+Gsfe*xU<_OV1KX2NYA%uti6+!@6hJNV-A({qhfLFt2*_@$)U402B z5Kq1Pe2QD$p8My!eLsTLFB>1VM8(@6E5~sEmqk zoPy_q9>{JHkL1=P{;%G|)@)x_-^WM2j%2fa%0WNNBcCP7W4Bb>RD6e$cai88?6@bv zerYPuMV#id+~>qSDvw`-T+o@jROJ-ved-)g@)JtV+NZA1oPj8Aei@11II&UBKC#Re zc*tj2CfVy#ur=;@|9#AL_@kU;roTs*uK6>+A{w(I?=FcHSR2-a6$}PMB^$ua3a4a% zcB;j-9Hs#>ZAvam{6idc-hI>h1+JBJmE5ivFA<*2dKC@3$m?dI#u5?uR*Zi$Ot_>- zrJY_9?`a!{~3~%E9nORFc3 zLx|OC|{c&nSZIq}lp+)RM{;BrOh1uL>Pu)qYjWx4A hb(Cf|4EOe4t|VRo6S8u`;uiDxGF=V%B6sa|_J1O#iU9xs literal 0 HcmV?d00001 diff --git a/mmpretrain/models/heads/__pycache__/itc_head.cpython-310.pyc b/mmpretrain/models/heads/__pycache__/itc_head.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..524fde0c09d73eb71308d42796470263758f2efa GIT binary patch literal 5108 zcmai2&5s;M6|b)8?)lu=*$=Pn*g%66*bUCu3HboWI3#ug367KC4G|MUyLYN)w%6U$ zljkx4;hzkP5VMjtpxxtM;03ik^Yf4X*!j%~#{Nc?`Nu-#Yk1?EAc6@aV4=}6Lf+yA)p<~874*Lu zS}kkLvqOtBgN4ObNplK8IjpoQx@-p3u-2;SvK1_Z^;X?rvVO2QVqa&%7R5VE6lHC{ zuy3>)f?s1BrF$5gZSbu0YCrL#o)=`+=Aa+Qs~bjE@q)nZddap_RD0fwa zbdq&y@3D>{7DWBdLaQtmWkptZYA2a!(0|7iOJezs*;=@-r3C(pSOvc>j)}E9thFes z2aORE4~gUA#2o_@UV4w2thJ0upUR3aZC?6@^n~|A&e%(#*OiYZ@^<2cUeez7dtGN6 zjZP<0PB;h>|IsiKUf}c<#FXdvq;L{1zR`4^6J9?-ZELsU;CcSaORt@Ap1bn=3okaE ztC0$^fV<8hh6L$SGEu&~2~E0pMfZ75b>s88L>gqcC50>e&{_9;$;Nr-1!6euZLimp zffM_CQmcArAP2HJ?WU`)ZXNe~odjm`KF!hbdN3UGY&w@^$HUBGCyAUVpML7xxtXp+ zhJC5LWT2$8-U%X)%-Eb`qD$Kg+JnGLq=UnOQ9?iPl%MR*w9sVc2RYX~_w=k4OM*$p zi5~>c`c@PL7~;)sNhgggLbEezzHhW3isPA|b_`Cj1fdE`|y zWk*fE8uiB8c#r*+u@C<7`tLve?%#j+?B)l*`~^LKdG=~no*s&n3|xa zlHBvsp&tRHIy^ius@T$T}d4~&oPPUj>J2hc}E{M`yts|-LEMkO) zD2vKnj`12}&eD{#loEeAwLn({y9T6$OILwcs)5;LRejW5y--<&PW!lA_QmbIc+Y^K z|Et5p!>pVyXg?u@vqZ9sqff2x8(9HfxUrg9@c=wUOO};fSFfp?@m?=0zvd|~l!;WC zl|-t&otfCpI5TNu6aitjsALz%Tq<{K(CLt%f)DmGvoi>Qw0lxTkPo6>H#1($$~_Gk z@Nu<--fEdhjmRR%MlmyE8FW;2%3h!n4WtG1{9$<6b=!d#$FBQ6`|ydgDvFY`AuIy= z0K4uhMnVShSpvKGte>=9DmDAN>LKX-G~W30Aj`!y!=@jv80KM48=M>V;R(LXmx<5K zf9^HLnA)5!lGTx35@$9>9;jX$P&<97Oz$PUaS0?fgmIE~lJBi%|)xlvof z>F;5;4%H)&+~Dd9_)}+y5F;mFL@6tdyL|~}GyRU=)>z`i?MMMLrr3Xf z03m?U%kHcv6vyA%l)X4oZxe>ovSLR7?6CvCP+ps2*L0jMuYF@{ibteq^1MPp5Xzi5 zp)lh2`U8Xx4*|k!w`D)^w$QdUNSvsLct`jLlapL}YUf1REdu>5nTJJ!&3t;hO>JPj z?e(P-bsTw9s$Iv2FFEmcGzbu6h<$+;y+s32bKguJI+`zd-NOxYvIwUqc0OiJ9aSXebW zI|xK!ANz>Grk1N!oNaX+WTT`$g}NBZ4Eqtk*-W!Qn*o_ zd0bYTU&qYOXPcG%uE09z}+h& zeo!5;VKv1M;m)Kq{4PJmhBZhmjMz_}q0qTgS7BO6?FN+>(YipOF|F0XZ6r%O%jp6% zK>sQe2**2(6D+N*vg2&1!={#~{||}An@p5UO=TupVE_Noa#Uh9 zIkwM+$I_K_^*9sdQ!G6uDi^SF2WwbEGcAuW!}J)X1$z&_(dz@(9PP?ukJWiFImrMC zES-pEn_~Xtp}0XY;u_wVVsIC#-?oMZjsg2Ae+T!bJP;X3u_&*0eSsSkZ=TDzh>xDw z%7p+>L!({vNTCg4Gj3E4_a2^{%BiO{F9Z>C>-ZuHlU}Nc9y9;lM50o>A!Q%w$<=#g zYpX<|o93}`Jojxo(fE+@BqE=_>9=KO-t^@yb&hmBMr4J^<3zp$l5zP?X6bA{E5w5^ zEA}F{t2~jX7oQzvuY4^-ATm^rH9mpP%2w>h0IecGH-} zjw4T#qAwGnIIYeTd4|XZB2N-|3M4BaV`~*DZ6ib|_M;dpeY;h~eZx%vs4|ILWvcj( zCvKP*lgE7&7bo0_m}XGhgi#zzv~&S2trD#usT!BSQ-m}}HdH+U-rIO%0a7t34=MqH zYTPjz#*h~uc0}vab2@gbEiO=*3eD}K&*XZSj4}b+H3d% zaf8R7eVP5hfD~~{`1-)!<;Yx$seK%!4u_jrx~=?)ZTMa>Fx0olW8ymlEGVP}-J|k} zJ+x-BV2!~N8r2fUg*>b0S9LOu_(LHpKfE+^B;}MQ;DF%9b&Q%QOU{hVxxAUqH)0*7 zimgiSrWATwv2;dIDaD&L828@l}V_3w7*y z1v`1VaM~zg$4(n{(7n^sUHc?guoP{ZDuZk)`c8xk%+-w*x?=Qjsc_w_;<|ZSfO5^n z&BY7GEk)NAQQLLZ8>Hf8kj$h@v?4T6uMwdHUJ;0m0U=Kq)LG(OBtoZHT_HlV$ri?U zhPK2fA9rrT>m)+CwY~w-g(D`=C^3`sC5{+NKl4M-L(cifyud%=CjSq&_`hwt{^3KX zPM9UfKC^1*{u@h~jXM^;u;jMREG+38RI}Gpr=Y&IGO0E@gI;_5WvVhO=Qy+wBBko$ zdrXtQ7ASo*>Qf$8PX>y5RArL#gK$Yus^a9JC~{=U4^{MVMNCo!#aS+Q2H=v5 zU3g}f5?J7gO8F#L<(4B^m5cvGj`7K$(NQ| z4xYb!|M&i*Rmb@UeVl%LeEbTp*+U^6$x>&?dThvhoY6N=-JWazy`kUpZJV10!$z-R z>t5O%F7y^`-A`M?#oi)wK67Ls8_yisQ2tK)(Csa$#iQk8YH!K~v@fW}p?k=BE0RBQ zI;|I2+v)JC`EXPuc^0LWzfq1-^`OJ5*2j@i_j6gM3SZ4zaa`&s9-|)Ie|YDUdyit8 zeEM;xALboYl+$C<>G7|fK9fxHXH9I!mEJR_=Y8#XPS1w|eieL?=c&>$KP8Cj9$u1X z-a}D1ht3f@c9>J}BX`0k?3hoS(8HI1?4TA%2h!MsPNVWoIa0c6hGCK=MHp5*%c|gB zlBq~nzEqh}mA4UP+f^%yi)1gNjl%?!wH07Z)}3iWqNDnD+x)MA<_R#^)_ zkL@o{H!~g33CQ+YG*qyCD-4I$gHUgW;a6pp);-oN`XWhQn%X#4X&Q#7);)(_O^fRW z3fhvjVbf{zRo}L~=7BVb9`g-~nFmS+&*RD?@00#>P9f6wWK;PFhd{Q_vxxt;YAElr z({EXiU6RYsmU>OOq83zZXYsNlSLq+~&dIfBUax(o$3y#hc>(Q9@(nwV%Zu_&d@oyH zU%}~Isv37U?tc!qi2eo-zdMWu>T02$7Gf9`@ph681o@1B9mTLri{vUy7o`H;cGbF@ zlA?&rZdcrr(Wt=J*0?3`e0=Zj7gxlm_ip{}R<~taZs~!!Zfn#(J9@aZSWmK|b6tFH zyL!v z!6Uokj_OC)fe}S6Hgu^@1f{t#VqGRNX>mKx`pH1#eKAP$Eo?Q;GwjqPdrHv2GlJ&2 z(3UngNyK_F9;six|CyZ(Vbi(yF!lY;=H|(&ag>Hp9KWVI)I(QXrU(jw0T$qdEHxd5 zwK*y$Vrb>NeKF2UQ5q$-=-5P54u&d2SOG?iNRwS9?k7*nk%%%WZkGi3wv{F& zQJU=AUHt#G9^~0{sf}v&wuYTQAt9qNnS5X)&-v#^1g3N{)gm2Y_(Uax?c&o18Z78k zi>F+v@@PQSm>y5noGBj@R?Z%(&oI&a0Uw1^u!(cXclgAY>^WmVNN|8V;re3XO`IJ> z`*UjXGrtIq8kQlb*0g=D<&K4-b+ky>iE-@-W`#?ZLp)HQ{xMd;A1VBT$f&cd3G(#Y zR1l!sE(<|bPJ(q*yi)lT%BBiHSf$QhqZu-*rI{a~ zVrd0bec1t_5XrD9$yjRHzpw%KnPNOJDUac+-4`$MfN$QxeElAZDc|T0>QxKS6{h(h zfeJ0+#JYY8;w`!p3*5wO(R~>k0jaFawlZBYb}?Y~%sk@PXfQuVF@XmhvIB48?I4lb z<$47*p2tlBuP`Tr3kygBtU?Ad*iNE z&}>H|kU(FkJ*CG$B{dXgJ1gWY}>lFT;Q34ONsO!yKprfnm%H3vB4LPaCZY>aFRkkW~e>o4y)F z0)2Pz8Umu`xt7~z7hFVAdlpGF%}O{EON46_Pd1jGqEaC6ZN82i^Nw@Kj@c2PupP=W zy}~E6;*fBt zGds=?ta~HPL0#r1DzjO*RO}7HJ_0<~HW}|VYv41c*QVMm%jmQ#A7kR}DyZ}CiWhkm zNVS*5xS)`8X-fWyBeMZ+CrHt1UZ9I(wJ^_Hs-PUfSyZQ4QwjNq{*a2F(ZHZU0N}bp zFlou9a-qU2`_?~2Z;034+*%%I!OJG&F9XlxP3GCUZR3I4<`-R$@1J|k9GzADCM|M_ zipwafMi|OGMo#|`eYr?*Dvy#!O{Z4#D4m`D1$75?XxTe~Rn#h_-=YEM=f%mLfVh}C z?G>r%c$}}b7{|~14&_VE`G2^}zvmwRci=C7_tx5_;L_Wc!8@z4%6Np-u2$qkho(J| zZkE+-zB~kQ3~+Po0wz?di7u-*PVRqQwHKwOB?oO!<@~8Wtp;!kJ+2dg;J&>E)hWYD p-KX^Qa#ZIPC!4f2d+Tc6r~&FG`MmY-Ro3Kfh9Al`H(0*N{tLe5Ji`D0 literal 0 HcmV?d00001 diff --git a/mmpretrain/models/heads/__pycache__/itpn_clip_head.cpython-310.pyc b/mmpretrain/models/heads/__pycache__/itpn_clip_head.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a5acac8c25cdb3405215198fbccc9d50ab1c40e GIT binary patch literal 2065 zcmZWqOK%%D5GJ|Y)moP1M;?V+wB23`t4QU$QS_CxNddUZ4<<>)w_A!_KjH17Rx1yKkC$wqm&g{yvjZoldhMeI%_zkz}cH0ETFRy<~fAtj;dLM`&g65>4AuU+Q3MX_ZuCbhly#99!FZAj)CvO#g=+`{Ymx^}Su6Z|K zE;?a{64^QFo|3ync%t=`h?Z<0JI6Ha3HFHe{g>d4^c~B0GHtB?U~ICgjC1RLTLQD& zr?x$kCKB?8ERiUE8EbjJ5|bPy@BV{3U+p|f@Q}-+I~Vkiz(^8OK|&^I=twS@a0C~w z@SgLOLO8DQVJ^W=TY9o3x%4I9ASv4*8$>LNPTGD(K(i})PrKogSds06WwBb*wVEy? zO?|O0u07>p=NaK7>_T$aZ717%xO+R##`k0_;>VN_WP-F(0qz73mHgPKI4iSq6ihTy z)E>0k0gT&KF`gJ1n0*=K)hIJMh)Yq6iK?^?WNB12p2DWqbDJ;JNN?BHc!Dg3QbZyv zbg-F~roSER!C4^7QbT!zDs5B*cpfaQ%1IH`mY0oSaVQm9BzdfLmS#zecNLiFSb}$S zhXZ2?Hbs`4drPV^%|>Y0gxRPC0VHjrBpo%%0h9zG@Euxye=DfwK!A|H0mUrNv!lk} z{5;gV@QR1tk!d{1p-83*cB@i0yLNd`P0D1qQU$=u+sR59tInWx_?9359tPM~>tNOQt*a+vh|7nFL6_5o9>g`zv0WVx`*7+zE~UE*4`Uy;~el)3M*{p zY>+2V%M%x>3OuZGkh~ek8b5JoE>x8t{dHHCQpM0#4ICH1z1lels5;DG(^M+iAM8QP zR0`nuH3UCVQU|H3ieMNghrawf-SWH zr0*%bQ1w2N0LceHtk*n64T|2y<(j;Q93tyOn7RXGb!naYw8uM))1$R3P{7jndkE+< zE@Twhb~J}8@SP}nJc;x9PAiH;mB2E`j<*gxV+uP;AwU#Eg3wdw)_Tn?)p6MF9NLM@ z^C)VftV4YV_3L{)AQ77?I>CYiJADNF@zoD5(I)_BwEF>fz`;cl!H46+MFhPrq}(L zusK=nVsw%K=i*Z?Iqn|yNAep2=9paa7Xm0w6?+~l?^?GU+yJZjC>E=VRbR2{_d5=J ze|_<{)bBaYKXGvJ^I`BSX!W0g2uG0IDM&;L8qvay+`@}Ig7Y-@Bj5gmq7}95oSTP5 zJ8Bo5sAI=o-Yt4j&-VSiUksuFapd6a+PU*vM+Bnv%n>cwd+I(V(avT*6m6Jquja1{ z`p_A6{tenVL(h1JS*=a@V6L;OoR!?BX9A@~465irzCX>V4&SPSgiG z2j==A*Rs@~f(cooKbBnZe-X#Q4ofS=a=Xk@$@M}>HgEXA`29|Y!S`sE)og~`lSP(` zn(;MHrEat1S(eOL!b>)hjF%@&$tgHgf)rNDxyM4tAGP)R%b<)bCu~6 zmREHRD(Pb>OQw%2H1(J(FuAd_URn2RHLdSh&Cpi6>lXTHc!wQSWtvSHj;y2fOj?HK zcw}Gs3QT&{o(ys##n4OgoN~oCA0K-1*NhN2;pys3^c1t$0?x)UAZf>k-CjJ!(QawuAlo ziMkGQe*~=#fDF7I*&(NITuO`mwYJ93%K1W@Kyw8NpP)z9AEFt0=>S@N3&_$rbCzTZ zUJ+!^dEzeV898^JP?)Dn7@gB6-qJ(=oc zx>67fi=XX}fO|@}Jo#xR3)V?c%|A00AKI8AMSFgiQ?IC#si1AQDbJ0Jc(C@|Z9~V4dt+e7;R0&Lbm|Y6*QsE^kiUcFW_|{w#G>%qy zy;d93Uxi;Hp#;Q@u2$h6AdA%+YahmN{WBn;<59Zfy`Y4CLn;05u1jA~ckm(%J(wAg z-ES1~C(vH{HsxUg)8A1Zd?*hAlm=gh(*Mr#(1!BRIqE`rKpG&uvM8*g|3HB+4=SZ3 z+%Evk#}JtOFR!Q$32p!w+U!_`@uysAR;7U55>|y|P%|dLRUJbeNL5v^FX}{Pb8X}9 zgPGRz`p#%H&Gc+B*@qf2N_iqDRrRHkHNbo_8%?;%t7CnfmGx+nSCdg;QyvJU^oL)> zR|0re#>tGAC13%&m6ZtN<3%x%3YBZJYQ4S%&{v{@Xg8bEjt zCoJ4s6KpRQR+q{Q_RCQ^=})0gu1Np*E)mn*{(L znJX-a=C$p+XTN!4tCQwgb#49RUcaf(kyfb-R&A2Ygda4J_8}JS8q4!da5=t-dQfr$ Kcb5di8{~ifk&zPs literal 0 HcmV?d00001 diff --git a/mmpretrain/models/heads/__pycache__/levit_head.cpython-310.pyc b/mmpretrain/models/heads/__pycache__/levit_head.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad82e0331f32b4c558f6db192bc76bb1e6e83e6b GIT binary patch literal 2764 zcmZuz&u<&Y6`q-$T`nm~W^6^W(WFpp>AtqCwH316uBk4bc6D?TobbVh;&)lH zeumvyKjE%(E3^7erDlc3sP$oCKh>Fn8r}KRM<3t5dnRCrVekGdFH~AqW|9`!kv2nm z!^I|-1N%M{%?4btfzW~CN_@>n{3|x%Dp2xqdmwdq8ZG=O9y9pVYECeSzhaUN+VHRK zuH4G%;eH?6-Ofv$87FV`e?fl9`e>?$tIcRn(y0gN_9jHlYCdNt;#4dcXLT?S=KMs= zSt@I6Jm+(cAH_ae!_oK&ckMJC7MZna`hATY-(}hFp=$1U&;9Y)|A78%|sNI$OLk^==qu5aCeTZ)V2{C5} z>;O@ML@XeKZ4~1*_7nD~nU#JvZ0t50`%9MB*&~{X@E#of6)KN0}5Pyn#ms!?tP z-_u~hP6Eyz$Xd?D2@2(bBrqqU@10CSw?W5Iz#>@7=+d?JGn>`5aV@KBQoNzNLa9op znSph!jZFd2R#N$I+RN)4?QhLI9}IvZ|lG0_m(Vu_FvT0z5XXdLiVarC}}Sr z`_G`P*zf>B%-KBr2mgkUg~CgNIRcu>Ik29Fb@T&Iw&udTeqkH~m!Y|d#I)y(a7n>l z1)PEcK+=AO&eqhD}*=AUqnX6c_ys2pC)A%g$%VCx&=(58ZA;Y()`38i6e(dL%Zue0Kcu z#W?;rWfFZ7Zhc+80tG=}CZuEg>xIHVL0kpm9$-sO`Sd5ear#ol{0`4QK({eO1czc& z&GDM$3fSWfl7DOlb$E~;EBR~j_OZyr)n%_X)6_^-YA9$t* z<Z`Ucl<2|2Hwfb zor}rd0hsh|61fhpy0oaqd2O2{{+8bR9)ycmi`}(;RsV4^E%Zc}wN@V+QyKFn8D5Gb zknbwKr%yKZVx_A1;2c a;kfrHxgI1jpn)Imw=VAm@r@|n;{OMiM0k|| literal 0 HcmV?d00001 diff --git a/mmpretrain/models/heads/__pycache__/linear_head.cpython-310.pyc b/mmpretrain/models/heads/__pycache__/linear_head.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..feafad4b381e7b862bd73efc25632c315fc24b65 GIT binary patch literal 2526 zcmZ`)&2Jk;6yMohuRjv^1JbBz5gKtxECr`MAytGArKMDA(?g?zjL>?sGwWe>K>A1S9_(Cgw0AoFVCvq1$r_?p@*aJp1bpYrUF1^F%PL_v*0rSxq#C z&0do@ynfPp=G=Bz!0N{itMld)?+NKGG53MfX}kdKPRBKk`y-voEE1-+SBwPT?GO{( zyMOc6-3KoSiV#!Z5&5S)Vlh6M{~FaM*RbGDk1(g_GSc(7&s^rAGv9j&VF9m!`5J5R zfctAs5h>?OvTN@dj@mw$>cBjK7J`+U(r1503u#<~+LqH_J~Q-|4j>jEkmy zH_dpYDi@CO$hTMYN% ziA-o5X+DrDg**N@76nVQ0o4bb?nm*_K*=IwGexMZk{@?xr)d_(2T_)B@jCUVH)&?I zrrD^_G~tmh6sN;zbf!nh92BrLMhACfmZSsgU=<-4h7?ki>e8D$i3*`}s^!cN^se8> z_ZxaV;@j&x3S@6(TFKG)E-cqK?d$LnPX`Bj`+E0Ur{6!bujS~-;zOs@R>>ED)tBJQ zo*cbzi~zPaKI5gz3hw8$|XIzFILF&OepTPSiW(j!jqrH_jdjWR}e3Jse|;{q(Y(!NPk zP1Br?B!nh7;Qd;s7M+;Gntl?AoS$8T%Q}pcL7BEqDyuewsIi3NGKpwIVj4tM0;x_T zkxpzBXUZ_(d0Pv)+?8q=3Cv(L=4yMrEPvSNnr&a}Ub{X^feA`k6O=N8Z&rT2{rlwj z!;WWsEDTe#I*bQT&C*`VdMTOu zzmOjYai;Fn3B4)YhW^w!1hzOLPz5yxXFman{R~9)9nrx_9d138c!n<;%z>T{cGY>% zu8^H3%X3a{Pp;Bok!!jSMFfe=0nD#Cg?#b>SI?bJ)70`}#FeVUQ_~1T8%LY`Wt2f~F+c0ueb0rAefV5nfw21#|)tjuAnXBlo zdm4hNA#da+0Aq#9Vi(??ez?Z510zSuo)SjZ5J`9P=N{DJNXZxrexD~2mG&L<% zd|-k-oAc6liWlvwF=8%6)f}bIh>Aj_Z97`lcnfw7$9LV80LtBe=@R#q>$-mj-no}o zF21v5B|FO|0J4IAgwh8zuhwBQ4eLV}`gR4kZ=N+@SMfobYc*Dw7}G%bu$&blWX_kj lTvhQ~GjqV&!mu^bc)vvG10ba+ literal 0 HcmV?d00001 diff --git a/mmpretrain/models/heads/__pycache__/mae_head.cpython-310.pyc b/mmpretrain/models/heads/__pycache__/mae_head.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b9b1ce68aac9b14896e0ec60d57415e936fd603 GIT binary patch literal 3718 zcmb7`&2JmW6~Jd^XIB(OQH~|klHE3gHc8D?rrUfSjKHle$8ieV4Qc}h*rKb|&X8Pb zKj_(IEUVO|Hky;~IZD0dmP^q8Ab-T30^|_rkaK|4sr%k6KbBGqxI@h2?0mkN_nS8_ zSzN3VNT2=qRp;M}gnWY^a$!62%YfL-HY! zT-r~Fv=w`7J*I6((z~ST9Kj6aPLt-G}t^?)unTP79-t^j}a= zq)jDhGfCUlJe#-XxjYB+bLl8s&M8M$AkWKcr}`=BP`My$PZryAa#2;b=jA-g4wrSg z^n|yo^3+W2EAlkdF39DX+7)>QYHPA#YKykBr5+WzD%YUL;-`d@b{+P2K3}?h^@ESK z%Cr~6HeB<=)V zk#tOTYtkuBbmaDfUAO4Hl_VkbzTZ<>PiX;5#YqNPM_w50DUo@)tFq~ET<5y#c!MxY zMV5#gUYM$BbKlGSo}1#x3qg!}e2g^$Yg#h{fA`*WR1mv<&x>Ofg8s#@JA-Ho_34Np z?hi6CZl9gx?P87_N2kEdM;3^jkgTB&v#dU)?$Xr)fU>ZG- z0&j~pTH0HKAe8w6D(glC&34XH6?XI-_L?2^InC!?*AG4D?S4*vc;}K%lI&6x^+&|a z6cY&td8r5Nq?aO3xmarTA8ELvWCe1HdQ@$P;_npQTRnzeYpibabH*@7YU*HFyf*LY8R1*Se?TG|>R|Rk3BzMnui@ zEpe}>MB4NED3d`bCzlQVgQ4W&^drwJ;r_#Wk8*!x0jo&o

  • 0~I+ZKHGUs31-U5p={Gs?T6Y+Rte(rw?=&2_~li9^(Kf`2Sd#!0-}8mX|FrQvw_nz;uK_eG3`V z*S|M->9bGJ&c$(>fp9y%Ig;xIfJ4oWm!#uIm zN3rmv3^LQ0h!h=P2BE@?^UIPQ@L%gNVUUXA-4PFiFck2NCzGj4&p#||NEDv$CtBis z;6pbz$M5TA*?FBrv^cR7oPLB36Bf>TW^0Ad1V?&J?1~M@HzD7jqW>}o;>IAz82iK* zOnwU&nC$cDIi!0{mRmB2^0^(Q`cUXT{qDph26RemAgVQo)>)mdP|o%+yer3kt>eRT4iCMA<-K&r zN8H8<;}y&XKfD13KL+@LqdEYXz+qX3_{egMPrNn2>O&PP?PY2d7*0;(r1fyo6Sl+4 zUSe^4;?vgyulWn?j$=nnhN{^?xVxWR&NKiXcnxNUPn3TdL}nGodxGKDREH+hzkwsv zOCXwdVPA{^wK4KVEScDB*2t#rFVL9cgITTB(V{EIEs85Ots5GRWnR|i5{8Je7b(}x ztL`WWgmTSw9}K*3)H3I~GV!6BV_ctejMw$Aa1xB>G`hmvDPod>)7P-tYy)-87e`^E zufh;w(W&aMU_X3xHS&bvX8K!G;EdH6Wt2Yy`J6HKUuLl%8E4NJuRm)nG*%kR^)npW zI6eK`j0R_I6sbp%io5W#wZOHgP-7U8FHfV?mR8*WT-u`|2wo~al^$*0%vDm+C|kq@ W?-#!63cemDe62u4P-BkOp#KAxji`M9 literal 0 HcmV?d00001 diff --git a/mmpretrain/models/heads/__pycache__/margin_head.cpython-310.pyc b/mmpretrain/models/heads/__pycache__/margin_head.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1d3f2a69dda41c6d5b0c63932b8391db5d64f7c GIT binary patch literal 10178 zcmeHN&2Jn>cJHt0`6NY=BTAHIsqNi`G;4B5O8$@)NwzFo>)pt-U~z3^v`$ZRx`ymw zPxrXGMihw=uviLMm=Wl=aFWrZWit?ZIkpC3$@Gid5zoKy!SM`+^{nvc8 zueG$k-qQO<%jg$cg}&J`)nrYvRg`N>{c@|Ej#c_)O;MG8wN;ZNx2;Fj<$vqHD>Yo)8Y6}S3is%3&K#a_vs zqPAv0>Dax(DV!@UJLb#Ui1`O^+jks4V(s=Ktr??1=g#Qu`=Fe#eB40*l39-Hdcu3i zQdoI3+U<2d1h5GB z=TqSOJL_>Z#|CjNf4i|kM2&0tqsG-7MxA0#pr}uZO~*I7j7BK?N>3fBBW%}y$v%iM8g5&amlv5z!SF3>lo*A)SN^U ztJ{rYY(&iO@|kRqpQauGNEXtTub@M0yW=}iWZPdU-@UcW!%!^u`vcAdcf8ZaYc>sGA--wj^e92VlucGeNb0Xf%u#F2+5@QoD8SXXUffGcckBpi$3wtYNGmDXR3$r}|?WExPKq z#(y}{?<*typyYz4y)uo{yr}f5Bi+?q<4f%q+GFE!VPt^Lg*AEmUq8d=YeEf*Qg^+6 zqbBJVt9Rn^SgA#OcO34n;2|QC6OB!ES~ay9wW1m7tTw9}>fT(Q z>gk?_kuvpqqe_euIDKZ@amBX#P{2O*YqtHx(DBomqHVij2g5qlAJ`qbMP4(%NT`_W zEQmtRU!#Yql!rRZ_rZ)4tQRoL33Yw~4e?$?$1N#_s?BOD{Tbh(eWq#J_nM*oRx4=# zqnXp+ou8IFju|EbL7(V1X!3^1)#xG)!4<@t@_91JH-{ zbjbN2Gc$0YORRPZ;F@(SQ}wrkP1sd#eI#Ywx*JB3v=!heXl6FB2nXvE)(JPEg$Ua9 zEg=TcwdG}pKk^D{sB|_SKthn+&i7)4o64SW4`;HGcVfVHgQA z@W)_p>{@q$2wS=jgU_tB(2I7h&)9~9MXRI9>mi5R&1gWL0^`k2*kA5!IRiKh%d0ER z)s>aEmfwEs>e7{^l`AVtZ(M!z^6Ju6`}He8E}_;3kEC6ncmwO9HH=t>B`lT&XRGUN zTI7^PsRKb~Wwj|6K6C=l_nmA*0+r^QP3&~pwQJU3S8RpBWQJxTiOjlTxn4(HI%Jh; za>MCtZ@{s`f=7=YEgqW>SZ6z#d(I@W!j0#{CD>wjz;0YT%vBeUzk-&+JxL4(i*CHJ zva<60RWJ>pco4u~+*nW@#q9bb>6DnU$?gyNATRM=L)JO;OiWQAHGMeo9}M1@+zy9n#%(b`S(29^MMWZPFeO zq*_bV$9*P_T=0ijb+PezB@yStFN|r8oWpl<9MnV_Ul<-n(qvxh`k_OSY+^yX zP6sY2c>rlt>W7=2I1J;>*PAO7OcJLbfo2A19rPG5q%g!TaHfc$NwVg<2*2gAy1pOo z5ar-A^c{f!`;gsQvLvKy76D2CELuAm07S`*Hd^qCCA5^rxRVJ^ME@l#OQ1ZE@IFrf zW>>Pb;Y0}kGwdm)a&&(rI2<4AVpBX47o}B)TLi_iv*p2Og0U9}h9b*BS^}G`4=4pd z&iFX!P7X2IUrdK4f5 zkc*_hm-9e~9{Ip_rTNC>u*`{e?Su~Fxct_`&~pi0;aIO(ee328>)uZfbAikjOrS{R zeu=~mymUKpgk-uR@jQVp`GU0S5ST+pY`1e_`v+M4e0m|X^_Ik8(F)T%8n7XcTuIj*5t$(IUf8LL97Z7x;yd>gwBt=#`%-zoOiP zVwVw7D|SWZA*v$-{hG{DOo?gCt0R4%G888OyC7!vHU8eHu!+1zU3pvxUXi0GG5YDK za4_p4TIZj-1=oC{h`CW=UwvQMSFvtN)Ws=!7BgbLs{y~x&lI<~ul`CEr)hkIC_d_A ztVCmo-_N*Zv2bv9RCuZ$%sf>n<4E}vjJ+hz^cnp2Cl=1OdMg7tIJvW)R7*Ga`7WEtH2CxS@e>&o71hn@3rE~Rg1xZlaUz+v}4UI6`o5V~Dh9JLRS}>6&&H=%vA9yKO~VnZ|XjL?J^-ja4eCFNrk- z4XW6Bl_>D)L`OFF-dM-$d=_j%XJK)sXZx_zWCxJjcKjrTo0EekzmAjL0Zl3hKO^7{ zwHcepg7ocvTozkUq+6lyHY|P$3;FBRP|hQ+cvut(N)yLg5Fo`Z6BwLCATDeSJ>N~` zFUf51w`uEDYOYc9C)C`ahLRF-jgmQbnmXq1(vS{w5SLisMidwDBCRE1g9HLkPU)9e z$ARM$oNW^3+};>=yNsu~5gY}%5rFZ6*nrvFinW0!&l~GV?lh`N`bVN9k(=N}L`jgl zNj=GZA5iZOH8P(v&Id_y{0+t<+BI9L$!t(vOR{gW)il+7R?%x}o&NA-JexMCHS`(G zoH1tA8O-dRegS*&^MHIPK}Y;JPSE`l55(p|g^*WADnwuDX%IP8=)xfOmop0ji8+Wd z<{-!x1Q{azcgo*uQpC+(k(5lY)GG_KS8=s3Nu`vaqN+41#khndDO3-}rjoHLJx{wx z>@|$N9m;EGY)96Xci`Nxd;)~j{9*5pMzTGZWWD9V5iepf;rq{FSBS&0*n@|Y^F8)f zGXNQ{k0D5aiJ!Cxu@jBY7p+ZT2@7Gm?4d`s9+ynTTH^%&3CJipFX2*RHU2R*_o%r@ z%_r1I=J*UfWCF>RyKe;?B{&DkzmY0MS2VAUcbtV%Cs| zD}gI!WNUz5O3&&V_~PslzQ}EdE<>dwI>sEtAh!v02@ULtx*)UHd*2WZA!bO_pd!KG z*QG9^2mf)Lwo09cTK&wQq#F3`! z6PIk9QGqhqWGIm2wBv_Jm{5IWhgm(+W8nsQfG>iDSDoH23l64PygEPFrr1NKQH--9V)XW`dJDhl|ZB*XKDUl&@wZ@m^bJ_A8dKCnS@%U`PL^50}oTIe)vx+v4ynkJvHb`=7dYv-=KRUuQ&xgg^fOCk6NTZXyN+$Yq zbOf#pnppj-mr<5d4xq2W3@A#u`cUDgB-4ORzy>H;9q4cmwWlbdd0RYeV6RsIZLumsqrPU^tetJ7^j2~a&FQwvRU2a1R%H+<%zP|gV@PUK52p+h|~ z-Z~h3yQk3GFi*mW0 z6l$YGv?BFP0{5o`Q=k!`4^it0M2}orqKiwkLM`DT#2@n-+}`|rA%Ivw5TGDO2E@b^ z`o02ZoG4_D3SG@LN9I9+TxJhW_CKh9sXi`^N~5A%ysnIjpkT>)06lSgT>GZj08t|S zR3MR}v<{Fe)Tje}o0FmAwovL->yp%{57wmzlUcr0Yf>fzRl$xxFg<=xrbH(qM+%!h zAE8d|k$RR&G11@9*--mVr^5zWNo-3|6~+NrqiA$saA|Q`(JuoJsG_bAff~E}XpwliyKf z^g*IKOzSDM9*7)!XM$MDPKRmr;~#HB2=Ltm=tZf&#Ooe)BSu^|;|WUW7w6s(Yu zHPs4GwaeY;C$gBD`w4?YA3^#9xU7p-dZX7YN7fe*p*x@yNJfkUo!cKE@Wso&yr zbg(+P8{be7P6jyzlw;MNayY~(Z9;oF8_fSolYi8v*0B6D6ZqzAdPZP4RvYQOs=w3lN7L1o!_RZa>vK7bw~_RCzir0 z-*c2U^zJIKipcY>pRB3#sEbx~Q#YXBjb|k)mcnx{VRUcqi0&e=4Rx(?hU1r23SXk8 zNzF1f18P>Nd6ycxPELA^Q}oI|qGpvE3bJ^M8nOzB<-1Hhsfat&dyN{pJYJ)QaOPL2 znYf#i(l14B#0b$E8kBxD?dM79w?cO8+j03<*M5gX{kASkz}k0OQTuPRH2v-Q=Gl{_ zs`ircJ)OOTLNVy^eSJ<-XP?QauyKK}12Qb^?f|LA1TT3yc`$hhc}{tsC0T}V1_A#N zn_6?(W3xL9I_dARDw95cl|Y~9LlEKnvnCb%yikUOxlukusf$Z<6;F@X4u$7OGH`62 znf#=pNmV{Sl1$DbPToR1mzz#PO1_)Kv!`-1kYoH(ihhzL$sU)8qsUX{)AIax^>LhgFxMUfc9rR&Lxn_#+x>q_c5vXSbnT1)=Wr@vXItfuXr>7{=LyGOeT z!znB6D{xGXc@ZAMS5AmGKnhUho{&MgV99osf68Cg_qm-+Mg-a~H@_Bt1cdyC&UOpI z`2xCq4uc|!R-~pWrD(H?r3~hv3TmEmzs9Suj?#z{9bFEt$TOls6}=-W()=R0plPhw z2}uUOLo||57r$C|W!q%cF*u{Anbosattt&(G<)^c*Dp_UJh|P58^@l4p-D;=0TeV1 z6w_P&%aj5c;8h}B5#ybIIed%{UN0(28Mn7x=i z*O@y1>06^e=}cBOWiuBGSQ15R#4NKvXeD0M*<1@itSi`_jz$97mu7Acy@g)YS}UHY zGVhW@anv?NITz^pFtyKBv_*fcST#9T!%qEm$MLd@jP6#ZInLQu70?E{e77Hg(UA*s zNv{Yc9lH$H^op*D@qrfs`}x?Tb(tjd2(tb;wkBH4qwtR-60<(WZ+J%n!Y%@N(B z=X;y_PH*8j4iS}$y3EXV=XebT&hhwdLmgs2f^IR`np~1KU9)v?75vQC9C)MI$CQw7 zTJt6|%9FLBo*KN4 zt%rLr$f?ib{{+B1?_EcA-Ftj1mG8VDwnXNkY~^t%P8Yh@ROb`BL!D4FhUz7unZRVQ zlI9T(k8wcG&ECJmgWI)zFg$y^yPTenZ`niHm>Bkw9aL76)mln7k{c<3J(lvvRaR|w z22!dvhh>g6_PTJPC%{EL0|tfdVYtagc)6}B(CZzlT|DBUp&(FP{s+W_v;Bw-_zkD* zZ_e0XalmeP@bJdqxnv0HI$Od&?ccyp<96#>H}kU5QxLzd41Ov1=uX~KqvxgV%vt|k mKImO-HVOK`TgW^{MCRhp`rq_92$EQ=_?X6QOvfw=_US*ddR(>u literal 0 HcmV?d00001 diff --git a/mmpretrain/models/heads/__pycache__/mixmim_head.cpython-310.pyc b/mmpretrain/models/heads/__pycache__/mixmim_head.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aaf8d9fdfb64e1b02d6c44aafca8f543a3345e20 GIT binary patch literal 1737 zcmZuyOK%%D5GJ|QYW1=e$8hTOvWFh5LuI6g76^i(sS~FKVmD|46j&5swjw31yAPFI z0gkac*+ovh_h=vUNBSdpEl|LJp@5stP_iv2Ccs$^=dm9--|QBhPLsg+^Wv{zStI0c zRF((qk#As{M<67TG$j!}SxPe&F-oK-dFo|6;*=p>awMRgNAH7*>SRtau~=Jxq8B*=3Bx^)B=Qe z?Al&(xcB|uGMb}*5K}lTbT~JLVEx>IP4ZE{*$iPk)+6)Ktx!t~W5O+&49ecaaHq(J z$tWy_Zo7ZwlovV^ljKm$y?-f+6uf^Lt8%P#2ye@a5~j}LG&xdX8S9ZMufy>@Pt-7; zrlkqXBHWErqpr=9xEzdy!N{#7M;tG3C&Zopjlc(A&ZBnEyKuXfBODuCqO;BGOxA}Y zO?AFM;7irPHIl@GX*NKVlq!iV(P5w%_w~9qc+lfi>+hTQk#%(l!tx z$&*ru9=F`6bohbiJ8Bz34AR&bA>NUHzuMMCQEq41WWk#o%R;KuY>%OAW;;s`vm_I! z^e1n08(w=CrePp9_|c~y7rcLo#nujmRs-wL@80q(Hox9Ddkd1_`FAkmYY-JVB^5iP zGgi?9`m;CVv)atBsH7E_>^}LOZIW3X?#5}7Iz6cBmA6i+;FL-4j7na4pOAHOkIWj7 z&#M}ckbMU zH>I8qO7KXsc(lYGjGQ0(*4TyVmLR;LTSJ@ydH~3MnRyX@1^!1zuBA3|D3*|a1UxD7 z51MNK9P6s_!=jOpK&S52ImM!mH8YMU%JN~FOl%AJF0w*SQ`Kwf4o0q_K$hrr5SD&x z>2phWBLDCK5(1uIxMypc1HFV}Nffw5VAj}2b8!b($oWl}W(7pE-DX{{OYgHbz4v<~+wB-Z`{m-d z;ja-wzuU=r3t;jY^y&u~IN~@*{rZaw(kIx;B=-ux?-xNoaC0vYi$=fUWIvCJX1|G% zXr9IA=sw~B51%0(ipHsTiu)TJe}_7aS706OcZi8uo+l%w$3mHCBy^%!`Lu&g`~|DT z1IcH(fLZw9+k0Pr^Id9Bt+(c4Qau7epg!iPPdM&-O~N<1Cw%Th4+LP~jbZd04Ka^+ z^I5we@>nz`5#MklpqoU)khggIncr_dM?UJufOE^l59FTwe)lWEID0}6vH~4SMeRC0 z*rgAZ*wKn*Wmb;5aZI5-9%mIDTgAaF%Xvi^ohrezR7-V>9*(ngOjB0UfuO8BqDqXi zN(<1SrAVLBR2EZNic)u-wXZd0c_nF9Omk7daz%5d;R}i-++6AGK+s%P6&TcqLX=b= zN{jK3DZnb2o+(k)#vU@IsT@-9Hyr1J0@6bAY?#6ALnVvb?#?H7##&FS+r8c>)8pBo z3z&LCmWqLtPr;aADjoLO2`o;JKpa!Oi<0>;U-EF$Y`A;$`%9@xR@#^ z7|3i`Fi9OCWs6D2H-07Zq1#rLl5Y7WHq9hSa|YVUOZ4|AJtd{?6$QM@5#|jMaxt%Z zmNb>iqJ!PUP9TX?8!X;{UfJtozlB>k#>ZDLiP>F(HPHa7Mj~h0v017+R^XjIHSL$ff2Q-z~;(T zu>2}V$|i{+TAGlHgDe$l%c5wiZTP6S?Qq>{_?DL@)J(ze%7nRu5?Psez;rrJDtmXB zvjJptO@*uWpc}4P$!6b&(5v@g*lulm+xUui-HW|V9AWa>_Lk@4eUA@M93%Pihn@>DW Y?$pKVuFa}y6NU&%$TsvA3BBw1Un+5fv;Y7A literal 0 HcmV?d00001 diff --git a/mmpretrain/models/heads/__pycache__/multi_label_cls_head.cpython-310.pyc b/mmpretrain/models/heads/__pycache__/multi_label_cls_head.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8d4960bdc5bcaa29326b5c2e6f92f39dafe5e6b GIT binary patch literal 5739 zcmeHL&2t<_6`!83on3ucl5N>?5}1U9XoIs3P=UhbgakVUE*vKno6DNiXtdqC8fkWB z(>-fNTdNClRp1aR+&D^AT=Iv&A2BDYIOR`(ax8wYXZBM9UpO*b-P6L9`<{&xL;>^?H8dEAEvw?3w*gBest%9 zdk^YnUb_=!;e)UXviw3EZi_g`(jZO5PMWo7?deBL0v+lpDq%DkH<~7AO-tC^qU#dUrK{cBZ+zj*Fky$0rALE+>iB8fZ_O@AR1N=*Xopx* z`B^%iPixTZ&Fr8j-g*7D1my=wCez-*Jv3jx?)Q}lRMhFF5r5~Q?2CG1JUQEu{$@K) z!|b}Bmi7ZSK9+)(#95TTA7wkfYNb*rzn^d+{cJ~|;Cm;Hxf-uUjD%A7EzF8QC%7Lf zKM@@W<*}Gy-oFo9o8ltv?fIKg@*E~;-@P}%%}&(W5h@#Tg-vIem}Q`ss_1zPL`jqd zt#+r-!;B~%VpH*XCPSZy=y0Wi;_yI7H8}(we@Z{)5jA7?(@ach{BGOdPH~K_El6=| z%MTMyZ3uxrAVDP{323dPfVO=uG9kNBGCAq!Tz+q&tcH)Z69J7uO_7>g=$*G3Z_^GF zmGQ!!ibLcp^S1L4P+^l<;rpZL2Km6Erd-V?*P} z+%<`9N1%YNR$ZL^`5|xEXOXOyf01h=U$_kM=+x;l! z`2w9R=oSMPFd$b@)t%f{B5uoN>KS+IeZOXTH3+n~2Eo_Hzuvnc(=@x$?LsZEnkboR zrd-782H}Icp}`?2(6<$68)EJ-8X^SvZ zxr?3qGHC&r#t*fr+(uCzsv+ZSgCuAU{z*38lW9u;iM9oRi~L*o?Fi9i9*8uT1&)!$ z)6^*G^)uM2-W>)8!wdb4Vh{mYCK8p(>)Ogi*xD&OhzgsG9hWBeu`y{VS{$xe^1fBvh^m=2mZ%*;Z7a@+iDL3$v z-=T^+is~h_@)}eb#AzqW)DQ8kNRD20@ZuSU8WX}9;g6g^3!iLgXdFWUFm)I-RKNz8 zNRANvekl1!6%APS%OI{>Q>;cIBJM_y ziw3RmYZT?98iLOwc4!ZcU1Mku&9)gCL+dZ>kL=JHI>dYU2=cfc#;txFW)L$R2$7;K z)2=q6xj3c+Nf`&per97K^=K>dH?{pddZ>-(Q-mZfphBan0OVGN2s7SiWHPgWObco` zkTGiI7uE_3g{_6Hmgr^SHpaGL!D$j7Xr3pgGPV?2h@Sl@j^QJO&YW<$Jl;V=GpKgL zo;G3eSjYoE%0$;klrY@^zvplZLs z$|;S0qQPe>+>zKQn|P!$z(A?doAB~7#RwrOcs&^o!fUq&zsF`{&XKrQgO14HflAfg zDPQ;=>@#-@rt>O6sJ5=$Eo^YkO*D9G47Esgx z)sP(n5jnY?EK{#ee_f0qvK1r8)`@Xw zXZDc+XEJsj#67!hazw#ngIgyoVx_COLwD$K@0Oumt~z+_H%Si=>1!>Q%G3Z$^6Qsg z*pNQ>%)L%l$VD=Iwx}9C1Z3px4^Y%Pst)}9(+UOPUE`RY7=JQ913qa%0w6a*kvlqD z{MyXT##=dy=7gNrZpE-oV&mN{T9vj~HwHJ(%6Y8Q#;rJQg|T`UjnUjQY*H>kfl*yW zrP=fjSq3favfm@4m}RsI8hvgZT;&`1M3`Qf@~?tzLU;Kls%;cS>UE7p=AP!Y@Np+- zhpjA?+HC4B5`btRatA%F9V8eT#4qcZk=ta(lGv(O3gaZ>kz}0mH6m~`co1@d*QrNm z5IVlxpij-gt7tV*6g3vDMRT1knk1&UnWn|Ui3ROGN7Z*-{3)<;gWKhBLF-x zG852^e?(4@GZDleni+DYp>+hn|Mb=0!q*C*#~dabSOu_7z`y65FpmF74&B5XI=BWI zyOk3L`pTg@w2#qu!j7v$_k=OyvuiqMbO!Oqsmk6Q(q3v$qbV+{?B$POX$(M-@_l4OV2@dLba7R)I|gYQ=u z(s5$V82scv0Na#q&kGkiwAE|!$FxhO{I4T-WSDe>{1J6pgQ%x(Zn?9M6d6li< zMz?ORSZfTR{>t;uUT62~>vW|}!mbE#r47ovD%y)d@L4~M%aKYD@U#^K@@?V-$ZklA z14YQP@K=kzW=gW^LTYZ2nCTmbe1*D6!CHMq(4ycZe?}D@Bws2oy)6;P*k*ENx<({A zuF?H_5k=iW7+QOxZ$!e$G6&JO+Za<@lr)kXE zGjGFXYp41~UtdK;J?O!hi(_dv_1Pwe@pPDOB#FF?t@GtBLRbfXo-~lHida&l&o54{ z=nW}4z%6-D{1D2{WE$f`WNi2=t<|^nkMe=&5{z;=u)64y2K1;=i|+r zH}C!4Z{w+{ih<|%U;e54+KgfRjb0|70$y&RBq=Ik2o@M2YcWP`9`F{||7K{lEZt`Y zh0t!%dIln1z`u3u-d9JLzbV{UKn=`Q52;;LzHB7*W6{TDfE;@1w9pM z@0zV?!5LGSE8Z0ZU4f=sxZSbt}zL&`Ru^0q0tK|Rp{3OL3`~FYvd~omKAoL+n+O0 z{H)NjWI>cjVNpFWyG+!iEv8VWQO?L>x3q7dcSg+aO|?p5PL^e5vwG1GXX(FdiMn`g z&uZ098967;V`OUIu#DC;OmHEqt~`8jdvpZv&)hIp0xwCfCVkoQyMD(}GV#U03mh*^ zWt94Hkr1OZ;w}BcWyfm{Sfwc+Jn^j zpxc$5)PE$M`!d~#h0~3d7N_ldRU6oOAUkm{^7ZEPAFZtX%voAoy4G-(H`279Twhr5 z)D!Ij*7evpi`y{&r5bv;J9M}yGSr;>>? z??-9ly0bb6+c-$vbvj-ud$IB**3x8p{K)NWcu^#S(-w?J{775DkNP-iS9<9{NhkFB z$8u<6vf(LGCJgVu@(GThXC5yO;soo2-ywZh;;8HQw7#S3f!=gmG;3mw7drFZAogg@ zJuhqv4?23O(}}~jAL0DHAa2ujKa$GpNym!>RDR<0<2dl6-jM~!3P~r1mdC<<+>zVq z_Hw1JIB}me@B(bL25*p0Qsu#PKJ7NvHt?QGH6erK-jQ7o;gygZ4d7Gj>vYKZ^l4w- zxV##x&Y;fN|860*_l)d2!zuHF39w;l##Qjo$p#RD~%niLo3D)T_&CUEK1^_>KQ}h7quOh z8qfGMIs+RroPq6HzvZ6*@Lw8)@Pv0GgfEj;Za@IfGu~XxnE%pJ=OId6r0N{1%t~a? zRj;G@kJ}3>j?;xO>?;XZ_M;QcSO5(c^uZTG$_{Qo>A~$J%F{vK>_5#a%d{PWt$*`- z5SfsR*aqiwuk2Ub^AlLybX|RFx49g|9WO|3qA^~o)oGNo`h;2Cky(?>+LBL`tdI;~ zaix8*G!~}5iTxBsKvqy*)I*j#QZH!~)f+TS$Ih*Afm+%WmuQK-_1FV$zGPY1&GFK8 zl|yshr0Oy%D#;wGnIdP_s~LWY)z}=f`Se%Y_0x`*6$luU%!Ww^D(cX!jvs1MSwYd3 z8Zvi-hF-DfvW-u zi3gDT=74)eJD~JB8)F2 z%G>8ARB0YhIbm=DL2?fIPu@Z`G@ctn_JaM&NSP)bfRMe?(Aea8`0wx`F)4GqPBz6Q zz(K_LTAE)ASCXy6wT^}kS9=FR`V3H#hv@NH8eDw~lQN6$?j=);csV|#`ZiuaMA4_K z8{3yBPdC!zw8&|gS`yH95UK_QphG)7SrSl8X3Mu(LWlP2tT>XlLi-W!R`>;GvF)>` z?AbVzA=n}>l&+gqTsPEQ678DnegYab8Y#N2h&za$@6fgurMJuk@jx_CZq}is=;E?M z8mrDmX0K@qkelIc8kib|U`GZ41Q9^x)pw~yo}go&vQ;vVs$^K)nzh;J_eE{W`hqk5 zigW%IH~E*`n*Qgd**B|tRR;@BJV#{qCb}odY?+IyFr_6dlmdz^3nPL`9DJ%}=ghlR z)TFZnZ>gzoK#2DjfH~+kIA(4niaWBX^Vm-}9N~i*DlpE;)bamC-qyxkaqX1kb`-Qo z*8eNU!!lpgsKzw{D3G0CRis&m52RXnxx2$K@?uF9gcahn6?k!fh9MKqPWFX6aA%ezpySEJ61Fc*=1N4bLs}x7BHuR_d;EN zP9N|ci|%5xI<)G>`JCTr1Fr#>Nx`hRC1oGR)g(?^TfL9|oH*CCE!7R0Xhk6Gnf+M$ zy^S<0gEvJ@hiQD%k^JR8JLs8)WqpoHR%@B~?Wl tI+M?oYLu>Y3dqCd_%ufzo|Tq!z`h9p&&ghmmw1iMuo`OHoMZLne*xf^nNt7& literal 0 HcmV?d00001 diff --git a/mmpretrain/models/heads/__pycache__/multi_label_linear_head.cpython-310.pyc b/mmpretrain/models/heads/__pycache__/multi_label_linear_head.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a48ad9c3bb797c3856dcfd81379eee09c917a44f GIT binary patch literal 2718 zcmai0&2Jk;6rb5IZ|pcto3u@eN=Fq^V<|WViAzQJP+FCuG_7b(7Sd+DGiz_MA7*Av zi7^)%si$5M5<*Ikz3^x7N6Zaz>L0*?LJ9BfI<^xv>RL1V=IhOSzu$Xroz?4A0?*H{ ze-HnfA>>aSOg}aZK7*#dhK>_XW0KGoO^lY2m@SjyoDo|st9aXq({hSAGp;0V%LUou zc07|*TUAPgdsur;?h@|s$^qdOQGH@Qp{-eNJR%MECEP$7rnYWJL9X2gy*$cNKi2kk zzZZ+G2G!2}2e{Aj_KF`k| z)LS$Bf~bnx?kub?;2U%-@{0#ntNw zoMA!iD;0%N;3F)o3*NFYlPp0DV#I*uzS?V6s|=pcWk=mABpk(=Qf!3-tc_c2BTK`m z!?N(SO5D|Cw?*iKO2zVQd>(b+>6LuHC)RIlNZ5HN&1Kfx-vsf-D(frZsi>1=5ntby zebH!+59eLUR>C;*^Hr9OI0Z8vN`WfD7sVb&d6%gmlR~k6%7tWkS3rYtH;cI%>q0|( zDOdofBG@N5^A$@)2SE8&oG_kkWvQ65BJ1t3l_-6S5qR(3`(RBs>U4$5kFA1BPZ)91 zfL^Aew;2$nQSJp{r{v)T6x(pA_%4?|!%gVih@jZt7g9}}K=-HkLDZ*t*~(<%$G}E$ zZv93XIWSf^U%%PB`2ny}c(Ik`Vk$iDg{+-{&pSH+`OXgWQ;sqqh68|G34jeRD|bL> zFfMW-6A0EZEXN?Q-h|mE1Kp_rdV$kOol*ogeS|km_~m8rl^*N)(}A|#Ftmj+sB2RJ zx22uZbM2HKcfMcz?d~6ggHIc##OkK)0uXHi4!V~16R$`np>!=uy`bx-sfd-fFafj) z7{DdqympT{3o27J1!V`C2~B+kT~2a3Bu|W8V`vVop*?gseP&TIq}rZp7BZ)J?{m1wXr6%EMH3!;2z>QN>7Lx2YA=8VyU&FTzN&k^5UY_dAQK* z?aRwx!Zm0Li+0s@sdKbsI;KM}83T4kg=`+XqwP@mxzY|8(3fcdg>(F%jBNv&LKJ`* zx{Np*gCk77p3DLPC9*A`qGI6@ER5jObGkwnmX;}msfb6TwAareFN)hCm?6l0mg5Ht zD{L-OmC4m2ql6!HM-#XUE*Yl|9`3`%aKdM8KiF$S?SicJJ5bZ^rJ(){_jh(y*<-{vjj=NaoN4V(4HI(FHml`;s3M z)C3!^!!fxG9c0MFUYyl4L9D#uGjiJg%1F^VY*U!LRikQHba3q~2S)oF^O!KHp9s&> zRnJQ@-j7k9^}KKTemq*KcplFJn6~hfrcKB_i4aM279tZrenogoL_phlCWEeawu|T} zJ$erp>Z6znGloRiuGGwAi;u;9537tzUqk7Sx_;aXrWW56E$S{om4AXe+SjNAO zZT#anbFZ!}T(1>p8+GkK1&z|q#KOWVxe5xkTbR~_f`Ihsg-J#?rRYQ|m-`a`66(bS z|96ZM8F9zlkXRe_xpJqFBe;rtr<}oHE1c2p$0eYjU|B2Dev!JyES-hsn3w4P0-^MN literal 0 HcmV?d00001 diff --git a/mmpretrain/models/heads/__pycache__/multi_task_head.cpython-310.pyc b/mmpretrain/models/heads/__pycache__/multi_task_head.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7dd489b268f2c084fe9bfb4a7b097ea4a321bad7 GIT binary patch literal 5433 zcmd5=-H+T>75BaV7?0;8n@t+}p;8k-?X;T>v;{%BL@ixtTGCXofrn`$$7|o&^{&UB zz4j#Cl}AFI71=%%3Gsqdkai_r@{b_?1^kMH#4`^_sA>3}>)H88f{=K^tLweKzCQPS z{?0j*wpxyc>vs?S)Z4tEX@8}M`OCz^n;2P+Td0LB*7~f=`n=2gdRJ$(#$%&vsQ+f) z>RM_|k86FqYxnEjx_UNZr{Cx{)ZC1l{Z_YyxfRw9wQgH9j?bKE?`WYN)(^F?F6^;B zX5FQbZ)hFo?>JfO=%sNz%JS0Q5PL(B1fn!=4hONg(cz`@Lq8Mmr{ORbrBnT1kAl3z zO6&a()_-(;<320Ty+4fe=%%0TuKT&a;rH=vz=`za(((UvWXpKCr`=;c92A<_RmDu!hqe2x{Me^2{*b9|*}H*cOJmplu^gv#hN5p-L}{4i>dY8Z|J^VPscv z%W-_p#$29*0NR+%YqikUu`*^G)h>O6d+0ne&5PUoLqM4ntyk%zy6u|m65 z8!^bnjIq_UJ6z6I!Q``Ra#$}nN`Q?6pAVn_y zC|Q`Mp@_4~+rkgC%YAYpFDKDyVRf)yI%_ffGD!P_s~2%*Ml4>?4z7`wydX{P2$`pH zbuCTdWg%>c%+PFE)V#~~U*yJ|u2e1Dx{K_T>by|`vQJSJZ$oES< z7D;JrrfFQ(MKbIQ>E}Y8#|~MiZ5D~2gr&YE@>0)+{nE;n042}U((J{lpLZJaY1-Qi z5vyLNf;B6x-MhZr%9KFev&sky?^#qf<`%k*>O+qdEGbsIEt-nZtMpVhl+5WchkM-T zWnDe?hDlJ?74^gi_sq%X0i?$nkUy*+s0 zzo@nHQ(eAHr@TV96}pl4$`0Ms$?Zidr1gxVtj&$*@r$CM;3gu>mT!s0#=G>!Q)KLMLn-8r^1{hwUfsQG2nk8MY41 zZtEU`zuQLKo+(>Xz`ce@@qdmmRx2*4)>YBtI4-WYVACsVMo-h&%G{Si0`V*E#&A>Z z&)ht9hnZM$^KIeoihcKP6vytSphbEo1zC*VPRS8TQl3iznkFL2v(@QKto75x3wm4A zvu=WNwd`gh&!c23b9_fJ3v{^9SF6EY;f^0YSp)Md^qDlH6V8>>TI|+~qsS zU`R-eX}Mmo97vljzlRhuW}mR!90Nu@t6;~q6JWARQ^v@|PBXW6>e%~A?(8%Qv(Q5w zNzKfgg}Kv0LgIz#wXm-3ScS2s?LDhd+o1GepGtlpNzmIf%ieYo$3O-cV>=zjA(_xu zYFlydZb!lPiy4_W!!}rx+=c9l$0T&``0XT#i0s3 zg)1GaG%^wQ2QG%|ut738XPfoZg3MrNf%+tClslQrPe0MpPN z$TSdHwi?W5S)ww{vd`iH6@zxGj@pHx5Ml*Kre(iXnYNNcC0kt*Fe;%-gJJ~&n?2#@ zRhF%g%vmEqK! zH*Qtl71j=7tl+BD(p9O2;%>61g+2DDPNi63evXQ|us_%Spi}JZ*@XcUasMB%#@l`z z3}fU@g@;fggG`l%TF~ zD9A-PJ7?1mb~mRuLxkv>%dmiGs>wf`Dsz2WTA$MQlO)Y6KIuaQ>y*!m96%El(!M)M z@{5S~^2=4qzv6y?$d&{OcT;%n6g~*`PhFW}UqO9eh3cF}ex__Wmt>S2@WT(YT&}ni zja->v5TP>|0<1ieI96QY2ip}O(W}QKnNLY5C414wQ0L~pGMa)Fm7d~gSGX7H`P&|m zxj3a$GOuIE*Wg(*@2&vGnR9XSPlYd~MW2hTsvFj@+^tjGDXJygE!mw}c&d0%#D0p6 zssgmyRHQ6n@P`IJSe}~k8kM$0d%^vwdL#E&81pwwl>6&*?*Fxq_@@Ynkc~`AHbW+# zQ#iy!{m?ErArRge#}pjq38fK)0M;x@EJqqb0K4#=CchUN_(r2XiTWn&;Ykg}3f3lc zIC_^V=q{;K&D<)GjK(>^I%5`y zQ+GQJz)4^}5{T}*Jr}X-Ql+s%6^VQMHk=yX)VY*b|fDmT~ztD-6}OE^Ch+ zLQRG`^a z_XXLmm!foYdXb1jkGfp6g0B%dRa#NHFKZJmN&4BPG^L+xiBdm^24x*y>ZwoB8v|c6 zWtY+#4&W=IQ-1_RyGs*FK>W;8C7=S)ro!seuG87{^I%*3(jm#T-NxMOO3G`*pt3U6 zG(C*$ySQ0rm$`w~&|w@kN7JO4&CfH7)AN#v?i+78{;UTMp4y!M(`YP1_H z%9XN+=ar7vy8H^w$jwzdUMZKl1xdV@ ztx1I%h&Vp2Qk4;}&=w^l70@(P3v6o!Z#V2S&6Z?HNHR9REWc!`LfsrCyw(VF6G&P%7RTH0%sUawM3titO&?AAo4Y~pU~*2N3# E-}9Pga{vGU literal 0 HcmV?d00001 diff --git a/mmpretrain/models/heads/__pycache__/seq_gen_head.cpython-310.pyc b/mmpretrain/models/heads/__pycache__/seq_gen_head.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65e50f9c194d9d013e334555a8b662a5a60bc98d GIT binary patch literal 5319 zcmeHL%a0sK8Sn1t>F(*7eR%y`lbD7B@MMuH5<+k>5IbIeU-dk97le?IK%!SuTaT~4 zuj+e$mCnt%8or->@$1gJ$2INuG?@OX7`%(0yn=>lOpml!Z|OSqjmT&j_+N=CvDq@! zoEcSPt7YjLx3=tU?E{Tfne|9x7B?SO9_lTJ88@}2{S?@=rd~KVdRdqzL9~j2`(7aV z^_2A^j*)f!#-;CEyV)kl>8~~%$RDG_wU*8_oI-C^n88g}VJ53SHkj3^FdJhBzZ$P{ zb6I09W}T0Q{z8`3-b3Ez@)Dp>f;DO{PEAbuFuGnYq4=S--0K71g(L12Z<$AY=nND(cwX zbc(9%_qY&*xNv-5QTK&O!|GZ;jF=!bEo?fR-${x_*iBNw0m%6MX0mg%6DzhNtvN$rD4zi3Vq!WG& ztt~7iAUVTy3uJBl-lsDhLz8Q_wOj{7E5vkWJgyF^c@>PU1b>97Tu#NkK)?)A=w&um z-n5_LP(%GDRhrdtni(J4JDH`Ty}ojbCo&aK)>}~4EWN{%@B!>b!Fd_KpP?a$SsfkS zZRW~*P+n0C@k)kx9g)Uf7zbTGwmLcyeLP;{jD<;8?oVcxg?s2+C8SeGyz*Fk$P5oM z#^#YgW+|4!*+XIE4zfTH!^tbOgQ(q)AZm{kcZ%sBDx;e6Ep$V2D?gN(IOUZL|Is-w zP?O{<#HN>ac1dxH6StaGaTH1+NHfGTH3&aiQ7_HcS1wCe7SgoD60PqGK6C><)I^ZB zm(3AEW#dMqQD4D8E}}6H82aeXdT!bemiHRNaLThD^30#I{B0yKQ>8=nX#-5 ztlUEUseEKm6jCkvoyxMdrEO~i8(LqkRtctq9ctdpt=l?7d>S~p2^%r)7+aNXO?;S} zTP8EN^{py)o1?Ws4f9rBW7f6-w`FfRd2L%CxLGZCx7=;S+lR)Wp4hB1Z~<8doIa=n zZzHz}U+!$f>E&)-XEx5>U{2l$^}$>|2TQC4U%(RIpV0y?OD!kI57QAro5E9v2XM1| zT1WnGm}C^=@4wqH$-WW2;~s)>l1&+A)7;n4tSIesB5TdPnJnXJ# zg&S;i{SLGv6|IF)Fp6TIC^hE_mr{kW+fVxv(Q`dO_@u~JRDxu)FgHSeuc)>ooTg}y z2jk4|gi+)dRpru)I?;!F@{=?xDsgbXsK|bdI10}$$he#H|IkZ_SBTI7L{Rc76trX; z!B`f{ay{%ooBh2@?nc@U)_g+WstD0G%UCXQDO}G+>r|FHglfT^7&utKhXyZ`NB;K z`csg(g34W1dty&RHS58rNS;O!X(A3U=Gl6R%yb<0{@1;XA zFbbGh4$2y$Ok8Foc`so9Y%`Yk+l8XRPx|eM_oXj`xECQQ@lLL#X$0CoSOaYzC&ZcX@ zS{Q}d<{ot3g?F$`Q5*>#UN_?c8q%BMXIUb6k7r6`evhMq&o;-$R(Ut+F{~HD zrp$5z?G2%4vF#Q#?l=qZ+G}@^%yrLsHKJmF6vtDJrg*HVGAdM?#w4A+B|Iz*YuP_vY}Qm*E!>I9qCeIuyfmbxZE}iu zkVxwg+0HN_8?J}Cbg|mpa(G<0@NeT*1;yg9) zP($f+vsPG)Z-i|wniv))0E@258#{(+=+;ZuG!4fX#pcO>^Dl;LJu{Cw&n9!!Gyi5d)?ZC`zuI9)t!Z5r zuY;I)gBp5n5N}dLX#f%-JQpM|4BszY-^VrZ+J%0@_wV+DXt-kgK1AINqmoFySI`DYEOs>0Fd8mh>bYSUFTs!( zR@HcJSo5Dd4HGQF`b#7xFP-DYxd|JVo+&(}Do}cOit>C)U*=;NWf#v`D@s6u(v;%h z}j@NS5bakG3YSty5G3>l#iPn}#RBZ&!rlwGoI z1mcX1*bd{r!O9`0elCXMYIB z{v@M2JT$(*X&$3cO!1UW>FK0AbEXa_-A&z0Oa*5k*4~CaV@fFRnkf&QEBA^|eZ?=> z*uOy!>}nIKcrB)bw@k2UfN1t@aFJxE-<&>&NUeS)DQAV2 z?V>!_@JXqoBv11BBnTvqFZJ9!YBaKzQ5i2nldM1$ULWD+Hyo7vm z_fA?EBac)P)A7p%$hgR7$-KFj_S%y+*!i}C)~&ChtaN@ByPX7)emjjsqhwcX%{Pp* zn%69JN_Q=4XG6ND7{<)c#=iATwFGVZVTi$%VW_FXnjBg0$0|vc-J?6htgVl0g@JU| zp7Qb3J8lPI7^e}Q3E#25K0DDxQJ!Sk5#_{Q04 z4r)lA4PUz(_QtIlriRx}?QYyRq86AOH>x>f&madKl_1-C+?DQfkrmJb1GHjvX4Sl3r4tnV9wcKV}% z)QPDjd?%!&$hS$Rqcm-$xmORQJ z3~dl@8w2%W7{02abi30FLsi7M>>^|6y2SNjeF17u~?ML8qDvY83M4u1&(DB`e{2waCoYDXQ literal 0 HcmV?d00001 diff --git a/mmpretrain/models/heads/__pycache__/spark_head.cpython-310.pyc b/mmpretrain/models/heads/__pycache__/spark_head.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbe9b98196d367058ef5fb9b84aa2bf4cec84977 GIT binary patch literal 3077 zcma)8&yN!~6t+FiWRl$t&;<%@g{D+$Hx&e_NIf7#3kz*&fugb!QdDV}9XrYF&W~VE zSXfOi1*s?QJ?!trf{|KKd^|XhcD^;TOJx{X9vb4gIJRh^R^S z)Okv~uAuiw+uZ{V?Y;x}I2)jg6+LW834RC5}Af-34*f zj#mGU&CT${TVfezEdEG1=`O*kPncu(hN1fAu9Afc#G0#3v5L56aG@>JLFE$WyXIcL^mWD@njg2U@29K$Uctg6s+c?x1(tbyL%$ zp;X2Tf;fwdAW(Hspge3)r>U)v;zTGH1V z9v*!as@{R7BpYPR3R=<)_LK^`$<%u&3w9dJauQzJ{s^Cq=rdoFAMBd%`Pdorac%6D zwUWI<#`V&*Uk|i6U{YSz!Mt9{dakHFr{e}1Sh!_lmyH``!_KLf-f`GXuw_#6DVseQmz4xr4==Z~aH!r@Q~-@0Dv$s<7E9DE2k5~qE53`MS@ zL1#^7I#&SCtAINMC;+fhe-1@G2#3|XfWJ*c>c=zU~Bj=YP%MB z#z|zRPQXl4SJKWjHG2}{$~e=bwB1xo_?zS94ILVK&Cu0uEmv_cs6JbU4h)6%u{sJ< ztL7Lk@~l0;wnv~d81cv-9#h1WqS z$qvCsH-=}oL6vV2NCgwlb2es(Br)wdB`w)0QaXaa4Sc^$-X*ZgvV9nU?SSWWp+-^R z1B>}eM$>+rXHyER$PyiQGxI|sVjv;-nfn@%Rv^93qpip{0Wjx4AviC!LB!vRlf(}b zogX052~onvcD)})QLY5)1LY4MOv&oOgPG?KBegQvA*|j+{KLM*z}~C^DWDZ`dU1-= z{nNYwOxN&Qz|2#W{)cWv>gvAUy_Znd+RQj2PECDN${`5-=YRgX^Ve^`T5CuD#Oe;k3t2*h)8u78zf=%5jL+t@94hMg31`5z$6X10%XL_&Ep`i42ekAz=q_WijdOuwAm_K;@;g zQ4{nuWFx>|9Ok%)$CB?l<62n*zH-72pzw=aZG{Q~^eBU!0bJmy+gEScsQte*g~vVQ zdSs6|Bu_my1Ciw*8=hagc7iC2$%BjL?DE=`XfQlGzNOb|c@v-=JU*HOgZJztmh;pH zVE%Spt-!}($1EgyKaRpAfIP%FK>UV3ishE2yeibX6`I<(#6s$0kh(`8JOpz$t0M6$ z7^_3>gf6-PD|8E*=3>)Xbeh0#Ee@P^`Y^aAE81*LA!gbw%m=bCl|f*dK~OOR$csUM zsm|n2JqSb|!7#^6%y3Mz)HzshLwWT|kX?;ig{8^>9UNJjBn**E6397bH^B2MWNC#o zikW~$vw4Iwwg43Uf>HL8G4>C0*bC;gUU-d@CtJ%LMmc@!rCkN4w%fMSRA&7cQZKlG zOcVwovpma7I!g9qpwuT-rgNm~N;SE!)irlCd&nw)T&^r~1tXfq%)^6#vB+HKB>fku Co+ilv literal 0 HcmV?d00001 diff --git a/mmpretrain/models/heads/__pycache__/stacked_head.cpython-310.pyc b/mmpretrain/models/heads/__pycache__/stacked_head.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec0eb01615621f60cb2e56e00947ee9a6ce7ff7c GIT binary patch literal 4292 zcma)9TW{RP73K`NTrPKYm*q>EgtgPu+Y0NnMT;hFiq>}0G;kE4cKfhxKvA4oi83$i zA=il)>z7DHo?P^?KpMaX^5VbHAJ9M0w|VVD;J;uuRlhS_Nh?7`LxP7hhcm-7=lV@C zH|JZp{`&Pl27h0&tbb8w_G+N>OFZRYK!hb&YGtg?GT!Hz-M2HR?=a%?w9#*vx0^Nl zO~bQOFKhK%ncw$K-$~n9r{Bru`g5k=NawSK{sP*rXr_zVQh$kAzqW)YTKkr0$H->qX$yy#hRZNDyumd>tByb`9UE@snmY`zLk_!mudHd z4{yG6>-IB7KW4glBQ4*Pk%&o<*~^)B${(U3tv(Z0p9|KvTfAk{ExgaH0TYgB?6>+3 zsbJsgH$Jl*tM6iKE#1DAasB36*0^W$JE0s5KCVYEtZ3;V94-mLhpkGAuOxJ_Z*hL2!VjSZ;GOY)=&1gXu!t9iW=yDgo@=+>qUM8GJ7gto!-l#kR52_@m7>il7eqI2)~s$*JEcqq zx}D@QWMVzGChQ4&2u<%Y!C;RC@YoR#P6ysa@c0qWHoPm~ z!R}@*gLU}x)jK)>Aa-IlnqRZ7KyfphnT_&C(SBTS` zuJ9dnO4_u~eeCh@cG!4jcAwMH_{%)TZftD8eDHug%48U7KMXSyhiG@g@ZKm&r(c?3 zD2g}?jc(QJ^yj&$v0|B~_;4&&fUk;#Q*$8n7{NTtVZ6Z^-Sf{E&zY|=zMi;9EV|0S zfy{=o;KG&;h43Rbu!SodqKU_oXkDZX))c<Z<814k^2m3^nbAQ#1|IJa*h<1L*rYE7KSsAZEhR$##kdiAWd5d9NwbA!A% z86;8#Pw5XSl zvN}Ly8C*^Bs{2;3G0L{^mtqjah^_5HC9>=tO_&MEGbT)fHAwQHf-fey5kN-ONXa0J zhR5qIFCDAR47r8B|5^#__fe6H#zgprow_#k8d1?>CBk zkZcFUC*{(}C{4zumL1xSWIFlxv*ZiDflM1kE|0~UX2t(6)p2JvlT??=2}osCNUM8%my1SXO`2b_KQo4Oi=t*vOtD1LMKZn1)GJj(Ai;ia3Hu8G zluucOVpuizc)9+*B^;Daa6h-c;Ph=`V}%xqsTKN@255WYf_9LQ=bl&uju6c5hN+F? z&di419={Z1BRJ6({4W?5WdblmVS+f5YW&{j>{d6)OU}v%LAg_mQc*8bPLa|&Rw*mW z447q7XLnu!5?X4V2o*4Ojfl~y|NAH%$#;}0l=hR7rbd8UiqxliMBX5BQrSr0rmok) z*7ZyQIbQV4DeheqqwP`!wA@jRG2&bYt0dN%N43;P68BL>J2pqpIdBeKyRNbwK7RQu zJsg)<0wIT&jRt|@8+b|qLSPBjp#sZInYC|PAFV!w{dd`8r)m&%LK_y`ZQ}6&sv1wo zPcZtPe>93b#Q>+>mH;%uJsl?;f=1)H%fA6II?O~}`;_vY9pI`xzU!+NHs{-qBHgK3Jv-Z$RF{Pz<(%1>aNL!@d9a25Za0M}f!jKXo!5bRG- zBTV2vv=K-MsE7PsdqU>{d;F9T%oMr6zkw~J$-ILE$AZ8hIn+SZCCW<%oYh5*Y5+~G z3(n@Ik#2J{NXj4zDmp??SX44E3$<>_qe=WVgGQlR9VIuKO!W#gqF@U!eG9oXK!j}1 zWK{KI>}=yHQ$N8Q;K$hIO|%u^CfzW$J!KN@19*mMv7J<$-?QbvwKazWK4@;wmkPyyC9a<0jC@MJ;<%BBId z?>bdd@TdljZ&ZbfceH!QIFFHCdvH;lbgBn6vL^7VzE8yH(vX0FGiP?WsX>SKFegjjWcNvfLx}Ee6P+!E5`?-o|4-*D60pG zd`f#{4GJ^Wq0t1q^+G*FswuFq>&x-RO^Y_Jy`R_6{}<$}}*rzXR))Cu9j0P z^$ifm?XfQc@8ByZUI7V-nPj)!3QHc(*zx$A@0*mXs~rO4``;hQPm7RW7%UG5f?b$; z2aF(sW+cSb%4lwd7DeCA?3{&+63J%HH8~)H3FndsM_L#51r1$6k4WJDh74p7P}6x4 zX?a+RaV8;h58u9g_2wwX)8)bDf!+m2l8_1#T7rhQup|?gDYw!dE%Njmf~s#D~39e$ZKpC#iWuF|s5z52Rgc-gp0R%2BRVxahT zpl{Eoehf}UF35~t6G|#;W>4ugof6Ij0r+$fIL6WAkyOUx9I#fLE4*!-kK;5GW(6PO z4Vu6*Ov@}$sNM}VZO-E?f@6L|{ygievaI@fJ_1l!_C2>0GSmGNAV~N1S@fBs+Z&xL z4^nNw)Tm2`ZqUht1);qKAj2T1xL4$&zB24+fT1WWsBO)V^J^6fwtQbm=l37iY? zzX-U4+5N4mRPjmgSQffeAjsg$zMmGOF~}h!U|9n7x6-|@d0)~J(<~?0_K#0w(+5ak zqZ)n`P=$X+V5=2a6qa7osj!H~tS|N+fz$BPX4Yi&mKV&3oyU0*<&tyL;d6z+?{fZW z9A)#97U!ajVVhxR)odH525Q`<)cSR^I+rq*Sq2?&2d0Z)YD6dys-@9*gjt*J6Q*!C zXsJzD%vzqyVwe`P2mK;5g`H>~-gQJz$ziH1b>4K?dJ{@D2Vdt}db$RIPaAKiX~5?c PB8~icv`f2|Yj4tj`dSmP literal 0 HcmV?d00001 diff --git a/mmpretrain/models/heads/__pycache__/vig_head.cpython-310.pyc b/mmpretrain/models/heads/__pycache__/vig_head.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc9fea84846d736cec9cb5ad18e93be728745148 GIT binary patch literal 2499 zcmZ`*OK;mo5Z+x<6irEXVy9`7qV1v&!~$G74sr^TqOB981zh)#GzVQMn!A)X;X_@L zNo=Sm2gs>C_viq<cEilPg;DTx1oMwly(GT2yrQ0-iF5^gJBh~1FZX@X+Sp{O82af_M z-{;h<_|vlWeZ2;fzprlVEGxE?WUOVO1C^d)GXVv-jXfdXeyD~%a=r1PJ_|=bgeS*) z)|;!WO+DJC>-1=2(PDaY{~B1ExpCl%Ql}wwp7}r?o!f;czY1gmpkE+A0h}kmKcf{# z=Ny}J2rWt}0(6N`KCh-(3&MWD^CB2Z{~*r72l;f;nZsNIO9RI z{Wu${BL5ubRT$l-2SLQ2&5qt#7};CwJry}DmwDvggULK%P#`CC!XP@<#DeJ1qjLx` z#00WX%%ao-13!Ud`XUf;VPPz;nw2ol{XB#?zG-~3R&*V<2#a=O+{lglo zFE6fryjGp)c8mk94nCfh1sX$T+{%nzn(B3!_T3~2ANE3+Vl2$YqL6!94pm<0LyeI# wF5-#xs}VA@HkC?UV^UqXgmjfKRj#<#r>K2_|8nv(K-$!Wf`=ba$2w2{19Y*aSpWb4 literal 0 HcmV?d00001 diff --git a/mmpretrain/models/heads/__pycache__/vision_transformer_head.cpython-310.pyc b/mmpretrain/models/heads/__pycache__/vision_transformer_head.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db7951e2899d52101bbd6778e7638801d75aef93 GIT binary patch literal 3705 zcmaJ^&2JpH6(4eDXJ@}8%aRq#Y1&C#+gj9JH$YEDT_li$HmEBfWEjDq!0c$q-5q&8 zWRfdKi}j@vkdu4rp+HeUDtgI3QuJ@&wI}}zbsgtD&g^Q}tug|+Bp>LXo$!E~ypP^$GBdHbOFHK2C14?jgr*`H9PF4$QnH#ufjimLU zZuWawBWRd4Cv9e}pk>Cjw4HT=jv2e@O12uTnsGh7lC1@6F!op@y_&5D>%`(~XB+3% zZ!Feity7D&`1*-`LV|0I>{;FRYmm&^?GoL3EEpG@JxpTNwYBpokxF}y4^>j+QL1ZC z#)m0~eVhB^BxPX~tK=v`Vwgt9TmYrH$DfaRu97H)Sx1R+9*21$vM3Ep$TR>g?#&WFS3P5;6Uw@iN0OE;=aybBfxu|rDo>R*ziT(oBkJCuX zWRP&-j{t&Rv+2Y4xfsg(W`whRoP}l|m;QE=tL}Y&cbx6Rkz(M-k>bNbBtYX|$LW|Q z`OsG*?(awOv!N))Ih!*=X4U@V-h4I5!*~?sIZyu|`xzz44c8<;94mjoBQ+M>&!WQ% zKBI&&o`)>SjFfl%qVkv}c~Laj!gIt)`T&w&9`ud#T(Co7AM!yoPL=dk;qMkXzw5sk zCGp75IN!RDcuzD?%wUk{~*gi3yxa z7`eoKNrt7Hb^tjQrGgl9P)6;|PHMM2AxsF` z0#cTxYsLcF1|GV#;B>mR;3B$tVP{>hFxK8o)!1DK;5v*QXc))xE9jJ^$kaNa2Na@~ zOlt|5y8j@5BE*`~sTJB&poY%WhQ2m+pm(R%fy3xCB5tcX1NvVr=KKwk)*oT#nRjks zvV+KTcf|+5->5TbFUbC2@}ZxNrSkW|`2Jxb0rn&ALlp5L7vEalhOWu+As3g88x3T#Ti1@{>7YDdj?PaR!LZKJ(4}}ZPZ>j0k<$MhKZ8e27l}}jm0$0b8!U(Ua%zkrm>_zl?3Xh z-O7Sge1vPau|vC*-N!gWOn(VYVw5!Lb#m2ildHsed$l&Xbvc~IkxK4MkRrS&iH$6u zLN|+wGm8GK44jOdP|-E8IS1|mz;o7M2sX}|A6n;x*{|T)pjrp*vre_Xat@KioITUJ zwaIn~;0vA%N03CJS}dm81uVr)5JTV#22zDB72EBuiY;6-QPDQ4RBUv86v;>_QDWuc zEh=8qseC}kgv}18D&SGf`PHt9Eu|5QbOi| zBF9bj93=37n%8_N#ZUtm>poT+JX%)Se%}b+@B4}LBVXaO3yqk9|KqQMB3g*Mev~tx zM^F~a1=u|=9awE=#TrgT=2fr{mDGmH&aK zz!mU_Q6)zs4Co8lj)5BQJKUUC6~y!T>v!<&c3s_sBxkBy%(a7>migDj_$hSV zTIn&L!dN?~pl-ye3{~-r=bC)|egxye-$9e;&YrhPAsLc3ZIj8(%XIHuq?IMF!Fpyx z+d+1icA*Y|=YStzbYeoJFt*W0x@59})||mnIYh5(m+@wF`UF^XoeOo!STfqk82siv zRdl`L_6NKl(?}B{!yyy@Qbno*-8HOkd6e-m)Xgx=3N}u0+z!L%<0!2t^)O^b49gBa zZL|%QQlQSJ#F$hlFfav1k3b-FO%);@Y4?e7$5OTXNa$3RF6L=kRl8Cf6X+(!(H*u)JGqU!eG~9w|;AL5S)Qe$?Na~dg-lao$O7#F7o|-BV+*n@h zm08YIe}TzQuV0kzp5Q}t*<;Z#x1mpBkrMa<(yJwviwb+A$AW$5sZV>qDy{QtNF|oU Sbwp)@c;K?tABy176y$1L-k~axi zXm&qzzkdC`Uo))LECrvx|K?vhZ+u@-{(}aypCShD;faQ5n8H+F>8UNXr?oVd#+qMf z738npD#~A@Wys%B&up3boN~|96jkY2t%`&c{A#b(s>#0YFZ32$i>ksG59>$DhYBk) z#%o?}p4iAH@QzKN`dB4BoVF2iy)j_Br7O`{y?8b2)YV z+!PrhykLJcofPkK8+_ot{m}D$Tkv=wf-w<*#k)35+4Z`+Jc=hrAw2Zyi*U!?@q90S zYj~DZwKH%aVrKbn|2V!*@&cPU$h^%{&Yn! z=s7#w?M3!^FCcDj4SG9ZJ{XjN2@mz`h`XY_+q5r(ksuvtC4BCz<2jOoXwdJ6BDNpy z^57-$T)XyNe80gvF7z(4rZMNjwDs835}f|;qqJWOb_9Q5y&RJw!zjQXy$(BihVr{ zI$oF9F6ZDa1P+%phRh+IBV&6Tc&-azz8S_5JR?s>u4piKsT?OYHB9D43`gHUkC z3mAWN?ZV|tmm1sKQ_8%+i=B3-n^CuEhq z{45W)K*98FCMAdT-s!ur9J1889X}x`_S&AA+T4r6V0-T5=LtlTm0WUjVM>E)H0@9M z{ecHINd22z_nm^XGIw?66zVr$I#C>Qy4z-jrHLTIkarP9!(MbjT`zevl80LrHlLmK&4xw!yWg~a zl1*uO;w;i->du>Es*~oX=2(-``i%6Xpi?9Bn{5;Hj6c-Z@erZWnyM(V603=FpzUdi z!PIA}s>FpuJyDMUb&83`w0%u{KQ@v=TsqLA*MFoiMEbu}lLFqGOhH?gtrdKV**a$5 z$825BejBrkG#ijoT*3OQfEWakYs|QMSW8NBFAH;&Ju6-~(8Qg1k@}Ql91tDjcZ+z} zj+a=8nNP6uo^=E757ej&tYx{+GIm~}H-KwITTqHunRR$7(T*?^bQ5$9dllo;d+VU+ zujF})N9w4U6!C6@DG1yZXj$uJY9znZ^K;3gGft%rA;^6kox4Kp-1`<(Q^jP zUx2Uo%#j)^i5i24pCK2#uMtn4t6NZI%Y%tUv5U~37AWRxsn)!d>R!xy&lT}HP<;J^ zivk9GvDZTuBa@i9F2(ieB4s_%#RvCYhkDKaAa?-rXW%NK>_+>58RV$HNOR zWl>EBz7gfs(w0ZHqN9)L^3Xffn%iRm{>tMdRp8Wcs>&g=!1qqKlDFfH`FAT9VjNEhCZA}%N*-4r4esevQ) z+*m9UHZ2H{dCP#8R>+MwS@dg|sUC5^lTk)fr$mWm8cK^;Men(qmK~?n7kgYD;A}RudstIO3 zGe=rNMNE0CHTI}p&>zH($D-D$Trh4N^MIJ^^pKmUi}wde{b&foK&q71CIHNg(uI6k z?20arqq~BLYm@+=$8!n^HRa;YuE!XkU|So5PfRytAdAvU=I0&KiB>Hi_+i(JBT=QD zR4LF#L0ra2`~XdB38qQ&iRgSPneJ4QX$czh&=u^@0gl$t=#a_yt3jra(Jystxbd=V z@|A6p!}vqyOtv6p@^|PYkivmFDr9mQ=}dzIM$5FHQ;mm~Dz`)BNEzvga)^L)i0}eQ z7WPp69chV9<0JKm))fymyf+x!IAGdgi51A9$L4`DGOz<`zofXTj7mxAuyjQ7M-}** z93$+RSCzydnY$mrH(q!B_P~c6Wp0SXcTz&|NAPBCD#A$T27;p?nxs1ORTbrAQZXlW z)m?by@iKb`)3`p_aohVlbD10AKNJZ61I=ok&VWzm~da~I+=&5YR9Igf3vO| zaC>!(b#1uzvVjopIqP^0h{|}#VxoTpyK<`_>wp!&0;I?DpqYBn$ z&=T3+s2Ur4rKAGCSb3)H0bf#O)o0qMmelqLua&*|i8fjoEq0Yr9nVr+Nfr;(8_K

    p|@%2yJ8fC&fE6rI^}E&z%{pzvNWhDv=U=Crk!T^EV@L>Z9g3>wbkc;?auNYg1;q%-<1 zkx%{D72@Y68G#=Wxu_D^B(Z0fcGatHpP@06JT*g^GS~&%L84{Kq9}w6A1kwPdEkHR zK*-XRCFpQGo&N=&`vvX}L0sQBnx}J38x8!q{QSbX8APDN+Cp**S+FTf(2n&Y?HH#( zUu)t6*R-lH_3;(6eaKT$uYkmyLp|rR!`wCqZd)c@=&P7bj^E4RF?jqLM%>qnR{ROT zGAb%?1Vh4TONL;<_T;@MlyBya<&by=1WpG->5x!qmN+Pn4eXQCf&=;})HsbW-DU_$bMIp;I@7mQTu1j0I8@tG=O3^2?Iq;{M?E(MAeYvS zYnwt_D^2$r(IeV+{nA!G#7VTcZdXk=rD*P=5!0{{9n+00!fk}Ip0c(XMV`D>FhERl zZS)@xngDx`xuM2iWwY_)VYBA;eHbQpuw3pOH#_s1C6&Oa6U`6K@$6_s$Fou z#=8rsch?jxEwV|d)XLD0_5#2)sa_-N!V0tE zLT8%LAx9IWuYEGYSqW$r&_;kc;>w+Ov05eF$9DgTyt*+j6S(tRB$M%Iv(ti+OCfS1Jfjk?yAfYbxpLFz;kaz-C&w zaY))PyEtg>SyY8dhFv?B$cnH#F+t}Q>A1wKqRmCJi*>6fmDJu_pSpW1y~mPWjvp@n zvOGLqvW)tV7g_)05lddv0eX+B5+nYqXNo0vg}kFen08m3*xpU$) zeDb{+J`Q)6U%%XgNZBrbr~jktqfz}@WmALB1bLST(zFNcIlSrNHOvI#0)V0j#+2-g zvjUp(O%*2B22KMwi{=OyD8~A41uVWi2W15=t`aab(ip>QC#r)Er^H?0lL9k{CO2Lo z_;iSK{qWsmM0Z5eI9f;_k=SEPf6^h6#=Pm&Eu4zw`Xk741ezPj+C^ilrbUI`v^aR; zWTnXWYkkj#-G9Q=!>~NDAXG1()=U-Hd@s-R++YxJ+&YD6Qt{GHIa1=Hs>ZXy6WwNX z`cGNxV}mm1&l*_UTl?UCHpz!N9xb>Oqk`W#qZJ1wTK+PFhB|3WKWY?c?f~Dv#Grld zOq=6{I~z55M{JRtgwbcPQ$-<&q{Ax*Jhzt-!ACd`Jh2Y*_JQtOFqwIaa9;0}>%NtE z)_tQdw++5pJzCDPF$YDwexGEnhPfw^(;LQ~9{c^NB~fMm8Kfd1RD~ zvZn!%w=;)WK{5w11!4!uhfI||H)fGVr~v)-m1~WeAb(Mc8$vx;t+a|~`_!O&TEaBO zdS(D;t21#D?^)HOC*%e0#iyiJB}(Ev-EM~d_%|Y~ihqlZsIkaz<{%M83Er4vgt18_ z6jJZjQ(OaBni5?DWBMyN?u6iDmL|^9gDqN?y=CWz3}B{3kD1LUTpn3fe7d-ofFsQ| zi_iDZ$bv;5a~MddOOBC$R+kuGrVT9hkBC!rM2Nc;o%b^@GqE`{J7$S=3TwqMn?-%C z80Sz~+*W`mSFi=bwNE6=ZNVyE*WuU@m*FOt-;^$`<>c~-!P~1HC%ga^f`)>7o z3J`mGLO0L=98GsMzL7nNdSp1#eR7ayYfdP!{MDFAJJ*kKsE)D)DFdxKKd+k()NTPz z;J5x@{)-<~E2mN=qDfSX_QDhRGFr4o-vrn*-C4jPFD+Q0RBcnvMFZqOuNUOF1e*#@ zHt0ZQTv?pBcU})UQ2+;*R7}|EzotqS5Q+uj>4Ig5)7wxPGh6UBEGPdNPnN~Z6^1jd zSeaf1*sL`DlgJ<~(K55ouqDUQqMjm(^xJ>BIJXVxTJOn?a>(Bt!H?12SI9a{fzi|D z7lK7t;RoDy^z^awC0!t%Hl<#m;d4`2(WVuP44D1(ZbangF4BCxCT3_JPgJ8rxWgs{ zMWIg;qgDpWSy2kLD`2z`?Bk}l{^vjV#(ksrSP8#hVHRZbm->?S%83UaXu(KJK)I#R z49~9~=_XYLa{bbS-3A6y&+8D03Cp~@IzR-fa~^;?o}dVekCU5Lo=CW2DkIV?c+{~k zlTTjpD(#>i^vV%QzGqP8=EP$FF>cL(LF;zJCy<*>V65;Kk*nYmqwpKquilxVwO)ul z`V91>^AiJ7G0~psb6(l6=p7;yh5Z`@tyiyUl$Q4Zp(7%!<7N0aCC7( z()U1ihEyNBL=QmZsws-g2Mm!97a&*)t-t$(B-N9r5d8qxiE%R`fRJF}neYMwl)gvA zU|}>_7pHwJl}8m|c6S}I^IpOeX;gwdX+|LYr8GqLNe}Gm+R}Pw1ea(1>`Tc-X~(ux=v$0 z^BQju*XcElEvGi+3lxIm7N_w|6w( z;P$13Vg`vz0F`;^p*wO?AW%c=fC8 zfeSHaR86n3(K09wpE-q&Uy`rJ&_`Yf<8f2BCfXupm#e$rt>p?z;NitL;~Ld|!dWF1 zG4N!?>ilg-wmkH3E2S6WV5Lfludq@_}eKprgi`1z?i?Dwe zie!@&1zljIl0r`kfei{#eD|P0fo1BVy%w!al>W$0dN}4dU!0G3{PkWov%5XNNjG_X z$R!AASiAMUGYA1|7vTe=na6A(KVf&2Y8GPE6iA($%uO}yKVBY6$?WY6IU!FKv#xVy zOkolzp*ltc(Xp`}i27dc($7fK&a2+D$Lf=3ts0CFLo;NrIyo{^270r?8M0O$qYlFL zB-1UQUc=Ad*+)Y@mKxR&9Q)iz=tg)DbLE(3Xn^AdTnorMc*a?Pc|+3_89sk3vo-*< zb&1Re?jNi}1&e~oH)eCOOX8dvI^u7MQd4Zc-bRBzlF#J2$SkP8D+0-FQiYja*75B# z0ofMAN>{R6HT??XBI9?#|7M}yeMu{W*LcgCgq=dai64Cqm@4~>F%#{^q%V)nBny@p zrISI9KVYoM!%;|Pg`MMog`pYZ55ko#GU^yYLLV0Hy1{ziPDXB+*^r(6w8YoJ2E9qP z+?eQ)bCx`|F=d%Crd)ENzPXXl^REH#ut)9oLa1W>h%5dcMoTLMS!`6aLO=3`8Xn|2 zQlV?WZHGjnInE1U+J@s>u3cB?-1hHG%7L>6X-9b6QkW2cx~0pp$*0-fcRrekPi0q2 zoUZJU^_C<#tSfSRm?}rBDNR>uVKxVRs(D}E z*ZmC$#y2ZZR!}Bdmk9a9x;=N->1mLkh-f=Ne-mtno8})wTwdGX#GRBttZxili*4E@=E~Uy3&c5gP+`IgDdubayg8-|C83JV46_gKpT@pN^S4u*>Qg5fsMSB zNQ&g#oLq$E_dE}b&Q_+Mo1Yu5jK5=xC==fpp2l%`yx4xWEq_`-nF4x0V;ud-jX35H zpW->vfO`?;MYfESy`M7UA%F8=D9NyaBoIPJzWfY-nz)Oy%#;Mj!u9?S-|IIR zNZY^EJ%|2F81&p=vpV3onL;lS?Bx=p7gRVd)lj`qbbEki!+lwDD4|Kl;TP2xHcuqK zUc{Rbe;dzvLA7w`U(zE2eV&Ie8$4-g+F!9u2SD4e)o5T}=S z2H8i+mi?0t-YFds)VZ$-DTu*$3#>}%nN9vCSfLW%j@rA zFX8d7SojS(H=H3B$1=(RL;@xJsO_I`*sL6`I%T~v@O)WEHCh4#;*mlaOCu~F4{CfR zjBb#`QXAmrEAyaC#>U|D8GSmj#{efs83|@EmnM%kK(UFx49;g^=9ROW{*J?mR3_ZxPK@q+qjI#i(B64N+t+~<=8v~ENU_k?uA&&$;IrHsQPzsfxFb?YnTdgO82XW|&Ow-V~z_4@hZ)UZH+9_ltV$+MQ7weSPH7@fZW=%t~2ZO?Mhsh@5x=9d*K;fO*_jybT zwlI7nG8K7WRmc!J1lx`aeK5Cz0eE})voki)fLa}&>4w{x7iB^+tw-?QkSsR{+WzI+ z1_)XF>Poh1T%{V#;_)=cpCdj9?Jfb)*!(dtZMQkp9pdScA?ugR2PH6*T_>%dOOG@!PQD^8c zD_8;}jg)49?hvQ1X#vg}OBM^8Bt+PR&-KfvW4OKi9!Yy%z~TdM1*+`f)()opRh$KAEEXH;qqMpirQY*#hY#f zG2VCLDF`QCzZ^jmTlk3cuR#l)^wT1eCpCf%F9EpP1u*l3Y)tPd&Rz^hX&!tuQmjO^ zu84$vUZLQQuiRn-V{b0YJfV4|0Mn;NFigB@uvk45jGvSZJ+UpvTu#qWtui;P18XVP z;~=*$`xN~Rgsc(XYyo~r&Q0F}du?Z@UH^ zg#e_;f;|F@0y|iC`NZX-?WmprSqCIcpa5%_ztn^akdh>HE5@HNTmV|ZT!OD4XWh#2 z%!bPelctCA4ASqsNmU>Ebu0dc03UoQUvH9p#@oG>x@FB(nPu~b0%T$4O`6*d#33LK z#)EdSYv?Nfo*OoN{bDqxNp9Vcwt)rE#cLd8-wV3m^4-C_>KeJI!(|7?V(3@>d8Y!M zitZy4N31y#EXR!?kOQJI(nYfy@MW)WJ?BNmUwUCvBugl;@VX|eyR2MPP1dgi+#O%a^r%XgO zTzUOzmOe?$VG7_%hgGiwF@{xHm0~%f8KDtD@fw0KD?^EQ;|IMr+FdHz#l^OM#VByQ ztUfY5w_$0M9O+>a-)!CbAvHbY3M%;cSj(_`8i9|}lid1afmA_CVl?5)jb{ygy`4wl z1OAHQj4-P|t`wALLQj7eUFutY_Dho+bi`Df70`a&G9Ke#LE2DQv3OoU4)#cA63}qC zBNfU`xO^LDMf(zizqfIV6%Bx5XD6&jRLGL+R|bB;+go6PS5EFcPKh6Q+AB;G69uf-8NPo;u1AoJD4CPNAIq>62CXGUD1@p+^HxbPi+cb#V{C|X1^q6t zU9r9WhdA}!eN71AXtWi%`hI-YQo8kzYnG_3rNVw@swaQG#wFBe({1Dbl7K)9E}seh z_ycq36FIc_*vvmGEsNwQvR7|yhPLp}&GRd&#Iknub6_xXN+F>*lu)%~TmZRwk^TC> z32vpGadT96WV97RAwLbrG|)!Fs1`G~QRUL)%Qs&DoQ(+q5lo6WrFsk7n$m9N-3l0``p0lEbZ=5Afo$BpI4)Rf3mZnp{f zr4n?E>({LTAa43z5V9viutWI9)@^zNx3LXc!F_ zg54VTSBc?pc-UdWBSF$E+9~G(3_=R3C{|_>^>_Ajtk@Kp?s?CyOwLe@2;$aJtqKrq zhkKs+R8Z~n1d`i{L5W)d%PLi1_74g?gm+8|=n~6DMn*KvI3(OO@N%qENE(pOO%1xA zP8hK9h$P<>Xej-@_=v@61V}X$m;=Ro*R@x9wjUTyQtBNBbw>t&gQa^CiivCrLu39y z+fC;QuNMU-70n^owBTA{^`YLLs}HTB;hs}e$FI+|e}b_=6HM!;?nvRRAV;ht!CGhq z#rcFPSUQxQ)!PK^M{Rq1net#?S1d*eT_kfM7ja46WOIMz@Ac+MFK_vc`PW@xS3#dm6M0QZ0LaalTlP#&2K_s4*)R}wF43NsnssS8g1Gawn;K+H~MKAoml7yYLggRNeX|WSByM~D_ z-kEtv^__~wfg^As5I3IWBlhRon7=jX%P)QoNV4@=!p{LRRUP{_=`lVR zdz-Wb8=d1k5=ps^LnhHHFHr@DosKy#ptGmVTC}<`u2|TxTQiet#ICE)k5mR?G4e)! zEkCK6M9~>%ui@p}5(~?$keouM_0NN=-MW%CS%x*#p+5+>7Wq^pr>BZ_uRk zkYzV!^%JHegHrEUvt=goK%2G7FIqx#GOWrp#7`D+F2&dZJRMd5ePq*4MrxpHeXO>=C`|Ilu;TS>NE2%(}sHNe~&$s9`N_XJpH4e}?HO6%+rQ0Ow*ySUHbR5jSC zn%#$Q4s7AUlH3&0t)A<14I}p2n9Lz^UXc+?o$jKQwznb;(mZ|hv2J~I$k#N_pxJ4m zb@Suy`jIJ8a<3^K#WvU9)YK$#KKYBLV89~CxU(o-oF+v7;ZqC;r0`=|&Kw~qK%c4U zZCyPUL=BY*@mQ7T!%U#$_-JZvkK-a+;&;$!DIsfrWIT4;J`kP5y#=EY&D<2g9#Qkm z?-0&{`rwnVy$SN_>r4*7n#}>48u|Vix!%zFnnu28{Z#5Qp)%MCwU1xdPfcqf?TMp^ zpvY>sR=U97Ie(X12C9xy$+zdiwnH5NK{QSoErz1)?={qnsSMq(0Fzu-*Ju^D3#_(j z%q#0OGa<1Y7krr(I>8?#KLn^ZGjI#$$NOL%P>sa)YHIua#M}BIMSt$L&XypL&fQ#% zueyzt8Q0Vri3qvrNT701gKXCrR*m=qfz8nyI>DDqCo(;{k3_8b0*HYZy98;kJK9x3`opZF~va_9V&AS84&%h=1wdvU~xt_pzZEC?6!)> z{HQs<+$OcAfP41l`~=F37kaR)V`yT??OM8B4hT3=+k;e>Z{71q3GW^$HSbT*@`8*J!@-fCwwzL|4)TbPm@cwPxFB6FE zY*v6{K$mY??0UG^;_WP)H+h=VjTs~MH+1>AK4?g-Pm#IFxMh+s^@b@5+If0codX18 zKF4vFusQlro-yt7mue;k%Gbc)2aKMS5gaF$m7GkvJd){0`3!MD4-LBKvgThCt8V7B zO@Ix-EB!U72Js=T`_154G7&8cG|i}s6+fhLrBhxc09#hxf)fENn!kE*2X%z26LYE8e$tQu0WRn-IwD8rMuY~%cX%w-Rq5HvR ziJ2VOAN{@3F5X>KmAHQAQadbo7Pux&SJ&6y^_4Q^t1sa74Ar~T(8NYbmV>3DdKy=K zQlLfnxqgP1VW|qXe!dlY_D&zdV5_#f1Vuek*h7EYEK!b!8g54nt$ET+$OdYWk_{2Cy!R-8<=KdNh5OhQHpyJP?K|1+b+a|?j9gywSH;h5$ zh|^q_C~4V6$s#ux6Ex}W8({G_e;Yfexe$vaTDJyP7jjb-(NIJKmR5L!6e%sp<&E}S zBTbZyjtSPqXq=o35DI-MzTgOzb>yb1)47I|R%_8Gr(7(8B|R>ZGc2INM*$XwF7|NA z#M%vC@D;@Qt|`84jSQ+FIj&erh5kMw%2a}Xts2oeC&(y*?$X-y!8@ph}8R$W;L6itOp zv@qjCW6S6x@=PE3=9rn98pODO#Q}?Dox)OE7pd7((F65?;6WE!H1oFae{U&D{@vT{ z^e?$A(FbPsn1px~RZEy3DvBg53dl*I01@#yWd>9$?Salkxip%sp0KyDkaVBDUhb8d zYvul^n6}E#i9J<;F28O2+g%>z7`7bW`>(k|MZB4bU*(>px;2x!W$$$| zqicSeb&WQU#NR$sP}Z(*^noa5{qkHE?w**wxx$>NLH8b>An7B1_A0&L4hCKZ%=15j z$ibSYl3X{C>F25h8)#&C14J-`%rst026Rl;kLpQjAyC8u5K6?+pj!M2!+|I+N~Ea& zN`b+Vt=hYTglLD<2;~seXp3BoNmpG!ovrd+4gi1y0M${py*Fl3TY~@&4k?)PXlPbI zQ(P&+h-HIv19EATBv1%rDv#jcdzS%Cr&Ai;^OIM?Zb>fd|JY!?Opt-S8_3@HxGG%Yni4 zh!>9crVonIoae0Ie|p1Ge~))t&C2Vo#hbEm&wQ`sx{pp1YhpF`al$;C=tRj{NBCbe z$f+<(I*W_UkzBY3QtX;-lsq>EUrrV%n-f(UB5d}OE)s4suP@k+W3uDY02%X(g8kxb z69as0weuAWJru4?uZxN|fQVm6O>kARQPMZ%2Lw(WLy;XcpABls_1g&U|8NzzzSP;# zJYRV6wEX&LL)Sy2Awc`=udg`ZRT7sh+PzIy^|rPL4^41t6+rgXCO>FLE};0}wJ$Q% zK1c#*pH3vh&h*ne=Ji?rVzX%#Hi-e=T#FCHOsbdT($Z&?Swe*wDf&xvISg8HrBXSK_P1p2p;4!+>6@4T zfdk0g3h5IKfe~r^JUCvS^6q*d^!4ePbO@Cf_@mwR_-oHJg(0Fe?lD z*r1$|M}nD>)QYKyQMv1$OW-%k$_a+j?#hFvN=y?rOHLxpf($5iaL=HS2;U?gJEBG9 zBZen-5o**&`kxG=CaFoqiFid=Q9O&K&5+?(pFzB*bBQ zArX}kbK;62qHAA^Z>{c#-%8F{jJPfU z&SZVXOsSm{B>0_Sg${b^sE~kBOVU6B%vDK|m(v7&#T5r1iJ~!rVsb^D01rY?r&yl0 zq@qg=CJ?7sy2thM3<2?Xf51NR>f!HkOk-aaIextxeH97~03DrKLPnV%Ef614^K4!p zD8xbteP?K^#F0f!$v7;KSGzd9n4BJ-_sdIOD<1+!6Q33UCxkfDk zi;3l>ka}%MH0?6Jj zDVi|G8fPRd?{6X`%R5g^XOFXFOGUD$~hJO>m2o1%CLbZATHky#E3zHs@b>#}UvpJGOYA^9u(EGN` zpj$yWyIv}hW2`mmvBl{5#GqY|uzKDLp?Xpwi0dj%;n`f?LMMI^8A{@jK+mYbywteP zWCwUECx3i)5J8edB&Zidrt^1zbs(~$p+ut$ZYcn>0-ESdiT27hO@SrdK)%(0D3z#r z2)D`e!h^7#O302pB7q-yW2f~~3`~SM=rCj{P7JX&)-(~037^zpawCwP9C4lv(rSGF zlMsnAEfF$6Cgk#^AxO>=x)9tR{HpOYA;oEx5vkjl;K+Rx+ovDvt<%Jx`STzpV@t>e zn9<>338s&Zvs9F>lM@jw`@@e8^vpoFJgcOk%`3i<@i7vv8($Xhs&wAPuEOk>p=!P< z9FRl8im3#UKP;+(%cFr>A|L#xgi@(aOx$g`oe)DObim%%IgeQS^$`J@1mUNp|49&L&MO=ezIk?z-rB#Q>S_6S*vF{f(C zQQXQ9h@+jXh*~*EqM`t;E8#*=5fe(yUXRFKP@|JC6z41isHBH}$UO7|*)lA*ZpbigWdMC95{$>W%RZ)k|hm=u)<8_co+b`yyky>Ky z#pR^agdQnWJ7#w23{4k@PBg^{fedAjv#n&=G(*abhh1O1=xKiQd6UdYT^pm;jjo9F zBUw7WJJIp9A;=>LIne;2jrcGX)=b7eQIO$lta8*un7ITt+1FqEZSLU5$L>d$Mgg55 zH2A>|RU6=t8-e^rj`ex^WZ$?F?T?y}5WY1}C<0k{lSJ!rQuCsrCU5pG$}mAv2_fTY zLJe9S0QT!@5OuPku56lIab=znzF(D8^2Yi|5d84r4C znV}-@h_Eb8VXFPm&NJufLyuw z`nKPMG^`VKu^Je93=Q0Gs5RD~)8VnZ8X{P0>gH6hgmi}_BjP+2K{FI{WsV8jSrFik zgIf&|e5_GzXAY-4%?1`+?rP6q3*#2y*8Wq>G0tj1eo9C4Lz<4 zN{qTRc$1Jx4-=DgUA#LrnC3gvI6optn z;*LaLQt^kF@OlgnA|-t@gtbr1a`c@IQRJ{P^e}8{O?gm{`P44=i(>3>qXMW+5~7ij<<~qUqM#j|O?=3HRW-I+LQ{RtK!+(9}xP@hXET zgp2Mu2V+bnS|D81@4q;g62+PGfNd9zD#SEL=Si0gtgPlVV(n7z)-V(2y!tZy}9K?Fux==?mm(ecrZ7)Rj+N92L2HR|o5K zh(vCH=79h}veCHjRbC;lR-&6!3*pO*M<@J{87rJ~guXz;FZR=$N~oERAKGj=Xwj<*G9D=O-`44NERjQ#}($$H{RE5rnxRdcinMFw)}{z zKr98f1!(H6-O5@yzsmgv;0LTLIYl1un?5Teca=zfcZD&H z<=0?nrG3TSL{kZfa_**5n-Ra~Bz#1t=Zs0c&uC zW&&%YHy6cMZR>6VU6SkfKH( zfKi_k@EcCBc@X(t5iAU!E?4Grx#3py>^)EEQ4y5B+?m9E%KPFV8@gZuM<$>M7~-Gr zcGt`olV2VT`U;RF@&U$`}16?HH%e!0q>8sYjuMwmGG4i13}rxy~ke0IN{_o$qJ{ z(Iu#t-_UElY&Y!Z{1W2+<-|`Qgjf9aYm`&jI*}Y*U#7hYgyS53dl5CfA->doawfFv zmYF4pF?6>Lf-kmTO=t;BW4oFo_6B*P0$<9#EosBPQ`S7QHNU_!_kdk%v?tIE_LPZk zO?L=6iY~@YIkrkif)9ZrEvAi+yh?@dg@fH!I(NQ%;2+8b0O>H839qGiH(JiuR~jDqV|8mNTf9r2eRQle5CX6pqQBj&T-PZ->( zGTLVSqM%KAB5$egWFCmySF*8rgXp>Zh7U@<;MdD*3;pxKcgAOD8q-TYcjdZR9mI_5 zn=FbCFCY9|M#&XJbqUuyZ3uL{np_o)iG~(-Xto5g-xx6XL?s#M9O~t*DbV@39bQ$W zT~?ap3)TsS;TAQ5aq;Em_FN?iWvzz@b-#rWH%HI{aX4YT?E)U+Hv)+289qoeEb7Kp z=neTP2EeamG6>Wdk5>>N_Ufy#_oC0OaK7&e?OB*nI%vV2$Oe zY0bi62xxI3G0#x)f?V8E%?NwQj}wqdMQz!$6Jsmz>I;&in2&Et$?#4r9qYfirzts6?8z7k+o7N=i+n|XMv)O zKs^zZ)SNK#*~4(MXLds*LLL~Z)9W5vkpe2mKU=1iU|8KrXqG zm8;6)!8cT5+9$69j`6)^CjdCjW}%yij$2=2&?UE5-N)y*ZkL(@UJsy5r$sN(3i6tv zT=o_aap2@dwwyJONMJ2#84yB3J}8I-XNii`yVuLd1Wl7S1vCc+A>(^^ME}g)5|<*u z;y{L}kUs+9)EZqlWeN8W(x&tLLtAEdSZp#rN@sTacbcw0hDkWM%kvrQPP4&Eg`4S5a!;!z1eEtLU) zwwBQwD<->CuWnECw^%V*ag1T?$bz?_P``eyh_d-%*8lzuOBk<{vDRVz`vv9jY(gL_ z_R_xA^V^d*UY(4Pk6rHyU$=)ZNuAm`tz1>v=<591f5Rh8RN_xMWD(~V((g2#$R@5Q zM|>({3Rz=^z-dZ51nDK9lkNu4OL-b#Fs1Bof%G>ZF3!Fc5j1G-!U zB4ZYisY+=Q?H8X*!ltEoN(|l(s}~3`;-(h+Cah!n_0HC}&j@N*1m*8vk~)ESLK=3R zay5Lz-%iv=Z%l~ztqk8W8M+o`1uQnmc=Yxi{gRMW!t1Z*zT+V-s)Siea{))OJ!#rS zYZmvByxb8DvN@1F5Hrx_I#n{$s_5ntccENwPyO|?BQ&(d`;I|Ju}mI*DY4!$pmHzO zQDr^GRNEkJac?McG7vg1yvsDK#BdJEXgsG@%p!6EfnavmpwH=OUbl4uwL&u>-hA=8 z;7imILo1^l@yN^^tOG0>tLEH(pd`!&KcCe+JLUjYwC$E)Y@x2x0+}_U-VC@HIe^@ zdHnUumpso9s@6eF{e*V)%DEC5u&R=GEU+OT(cUx(YTBt5+iP;)Q!nR5xUpVeaZOsR zi42lVk=cQ(-4Npr$72q2RAQc?Z%_2X2c2)doVJne9v z!+^9fqH^J)W2G5^tpK-{;$cX7dA2|okD>U?R6g>aK^)7#y`p-qFHyv0zL1K)Pzcq< zl{Xe@H2b|_8n2}1@*+@-QlA9)M&eNkrRP%1$b#}sr z-42<_o_BP}L0GFckUO1ekwDDlGNmcPPOhT~Vdv92Tacl1n`c^PnfkuZAy z$A)Ec%ik>ch^r8R-+apt>KX9s6_8895UF2jod#^S;iT6Q(~$HSHT}&v$VNV$UU5XF z;YlJTpUwlr1WFjAihUDtse-)`$tYz}Z*rn@TdAKve4PsT`iM{yQl`}P_S&rFFZJ^p z1G^9aVXQR(Q;azqs^qCbvwDhD=7frMtjspyi@w$ClxZK8q*`sfVXFbWy4oFtS1B!S zS%CI02al2nMs1ua>Rb)b{rZ?amUXok_&&e4Q<^S&&aA(DMe1R}Q+bY26%xyJiVoJP zO~SJO_udb1)BbxdG7sKSxx-SjRwmxGX;0Zs1IO@S-~8ZyCNlv37tTHMWMDfZo12I$ zDwxTl#zt!ur5=0{GPl2*?(Y!EDA~egTcDy!&rdBN-O~^(6A6xq6u3RvFf?6?C6SVv zwP@wYOO;Vi0iC>5CMiY)wL8*71YAGlaBgbQJz08dL@NMB&CHGwo*1>@#touH60sUZ zHv&2}tNS40wnN#8-2xfin+Bnz<+ritUA!Kr8g$Pn=LZ@3B~T4Nm{P8Q{>Q=Re8&bF z;%Fa`pWgxEnkUgg~0 fuN=zpH~k;}LSu{XD8KFWX literal 0 HcmV?d00001 diff --git a/mmpretrain/models/multimodal/ram/data/ram_tag_list_chinese.pickle b/mmpretrain/models/multimodal/ram/data/ram_tag_list_chinese.pickle new file mode 100644 index 0000000000000000000000000000000000000000..4abe105e3b347ab63c1dd8ac25977853918635c5 GIT binary patch literal 50796 zcmZ_1$!;vo)}Cjegp%sGw)ma>|DW^&9(bx*!O$g0uy4SyM;;ik2ZCVO1Hof<#kSZN z`@Zisdv3B=>}I}_ipb27Zo$vHR%DUq$UfMIUu0XEkrAtT*Sl5>{nnwR7q zLrcZ_0Xy^Ijc_$jBV3*Mo??Bp_xi-1UUxb!H#t8}P8Wx>+596rpuHu@vUu*R+u_#x9a@uy>Dgh-{JjE7e2n){wk-11)irC+b`MMO5FOTqt6`O z)8!*ip>v|f?MPgm?%<8=;Wc~R^?b;kV0q z1>GlqUf|Pkt?qWY9*_B6x;CHWN%RxpZdr}baXYP=U!BCMW1X}5QY`Apd!PI2md_z} zhW1^i3+vtpw?oCYK6=qy%%Hn; z`P4~1!w*!8%e8Q)FPeQ!73UgH;c7MB%rFAlt%30I95&yx$y1JIQ~I=F>o9EVH?sNT zY;KVAPy01>tGCk-PlrNh!fzPg zc#CR2x{-IbY4B?K0TZ$o7S6)UW9W{tlW~^sYaHYo!}*5#Is4e4>uBh)7j&q2`Sgb@ zmWHyuQypsl3O9_BO4i~P>Y>}t+Gp9>bm-3YH^b(K4c}@Q?`E^~Ft5ClpAPE*-UjoL zk$jGasm9q>IMmU6`X%n@@w^f5c$l*N$XF@cdvr@`7flTVdOI9xbY|U?aIM)@3~uxX zPZBPhkNF{Dq85KQv)9jj@?LE|?&@4|LO;}au>AL&iKlAp?8j3+U;3K8&QC;7VI0< zz0LFI*ZaE3{$bqLx9i_FY~6;IW^#796Q>6Kp1tm5e$CD*Vlf|Ae`e>O%!sVbOI6vE zT_VUBqEDO2 zI?ia$^CB+A^2u)8y5T3hVG^On^kJuU=0Ms%a~md}!xG=Ip7tOt_IT6sgy&-fbhtih zw|@Ew+3rCxJ{RW}89e-Ae4NggH09&UxV==JHww?koNTu(AK+Ru)bCKN4VlcUWJA18 zEgRXTk-c>|q1WArkN4rG8@i@2rcSb7ylFk&9~aw;44C|xm#Sv3qxqpat=Jsvy)m2y zvi5GTdm4WaF<_E}otd*kwQxNynJ9HqXGZg!sIj2uskt(6;rpc+_@J8~+*zG4et`}M zb1U)Jhs*15bQ*8l;f`-w)u`)gcWz`2>m>V{ARSWZ-@U|SL7+Uqv zJkx-AKXA#Gw?=*N$LX0cv!WA@kMqxqc=zrnAi;_QW>YmgdJB8{(YP}dnv?qV_pN-8 zPN@0GVV0u<-bQ(6^J9Dkb)DU+*nDz@&24Qkr2VwREH_D;XySOs$SjXeJQagqn_v2@ ze7x**nl4rUTLbwS53l16WAbZHn0vB!{I&n$M5>146(7dM!I0X`4n3w%Jp0V1byOuDO7g^&Q=22T zxV=@pjO0@iP}%e%|7e8s$1sY((Ah) z1qP(1c0K=`Wlwzk2sbaLK3idaw!!~h)8-?sxUOMM!#}MWEv~HU95cbf?n~U`;%fMK zw`o?%W;FfsnMD%{sK$3G#rctP9M}7M7+X5?VA?)IjUVOaVM|g>CfhIR-T1H^5AC#@ zyW$w-SID%)L&o@``|Gp%vtCS-Vd$oJ`#nm5uoUV8|0%=QXTsFUYM zY@Gefr>^qlk9=Y$96$7pzWz?l!19FhXuZG3v;|g4+wNKSMk$wT@o-VUGATi+`)ia} zv&$EsZ!~O}R*RGMe3|J~_l9>)f3qCBH9s0POsiNDCYNhrVl&L$vh}V?Ibh9xmbE)l&goW5oR>Dyj_eh zgf8kF4cZBvA%;;ry9h7MMqFJD9qEm5{a6Y-HT;F+$+ADRKDpWO@p08=h77zgkEOta z?jw-0J#JdVaKji0;}80!kyAzhJG~Kim<`pqy;m&p32PECdOZz@)YgE|=ZQD%a5lm} z&M(d%@(BiREiA5t<$X7Nc%*FOTF=bD`<)|9C9V5o zC>-n6(&p5%jjD8^tLXGw6Hg`>YRt)AXWG=YUK#+EaVk$dy^$7}AenNA|56niGhb z2Upze$qyuNbV$}k-5W)7#`F|Mp!S7}FhoRVQqp#RG{QjET@O<;|I|=*wEXU#G#(_1ms0bkEnbFmV}l zA+M+NQ8VGr%^+QcoVVV@DzeR42|9iy8@dVma<8+wJ$tL3Io8}W$Lf3BHoeE~Xvgtx5%$)k5U{9|zh2vs+wc7jmv>8gR6LQbgr$%V)2M~!D@P40sL?1ohdxxp@)7b-Cs-qCes2pR zsQZ451+!ezY1WUaZIi2-wYS`7!i3M)HTJW+2XkerarMc(Z=Dm5WNWbL&O5@;^hC`Y zaq>_{m$8O4;KJKY+`QK_TuV1u1$uArdhG|%U-IPGUAGd_=Fo=O0DSSc@WPnE zM|nkk=oiOlnFVSrzVbCS`xXVg6t4^JYSWO1YPf=S&YC)YX%%L{u*XzV6@VeM5TZY@ zjX*dWVPepLjz8kf4T7POpNwUjE9M|!_zG+TQ!xEm&91?07pn(Et?O z)xk;Qw72XXAqie}VfJt>ZaiYC7MmA(>TQYEFtY)K#%8`hs7=rc+3^+oixVX^lkfk^ z_Zg3sFmDFP{0lcppuqh@ekW%jU)r{}AMi%1pcSXMjCMmy9SeC8mrjx2#vd~GY**(TXD09zlaL&TG}S(v3E4GdM_v% zo}4*-onRWik6^EA-o3HUJCJuTd+$qO>^}a!u$v5op<{&ZH|39E{FpVbi_ShniEE2? zGm|VFD2{KDG~sQoI6ytg3PZ&ftLk~7p>OK6{QZ`;by_`6UFO5*@nn-*>Ew&E;zA=g zyV=f$^i;*#AfH})Hpk4Psq|jd3H;6OX8On?n@O*)jHB#r7V@$u*_BKL9W_gnMvz50 zz`O*viqjt@oChFv#<r<{iF~;l(>j0%FC>hjx;6I9;y}bLY zk#9fd+fSL9wREvH_{k+1+p-TT*@0dUtF`zTMNIWQxBebC*!r7)b8;V1+jLu|j%4G` zXmqzi*epKmL8l)~L+jgzv1WdVwFP{oof@F!QJbPQ-?UkZAbJj0j0QmXo4B}O*k@I3 z`#kx_rFT6NJ|C4|6eeC=Vtd2YF|sl4Z0qRqm<{4uw*DTPU~e*^ir3L%36s9=4TN@R zX%LidSaLN@Xjgl03(-`3Ts<$2P|CGpP2)c7>5cM{PBFrmNb1<&kZQKKV)CM{hM{`) z7L{-ykyswZ>EFn@h!3!V+i)fsM57pylI^`c+8*X4GP#<)-I^7MA@NH{lDza)=SIU?bxw_BW`fh`!j zCTk`ygT*N@Y~r1L)5GOtX&f$_Djx{XryzwOPo)xJ)Sic#3G2N-!C58ehmW|5hev3p z7z&7>TX-W=*|h1VRx=^Nx9#}Arw;HZ3Mh|Zg{d-!Y?QAcoZIzzyR{op5rHPxRlj^GI8r> zcx+7;i-sZ`;FQff1d=~q^%O>`DcW(mYePWu$#gCc@XVLDVgAE+Hc)#O&I4MRNCRg0 zEMJ}T;T_NK(l~Gb&ffHe`Rk|kUe|d&!Y2AL!#(`5gr*sYi^-StRLWF=ou8P;UaTU=v%S9ZbGRR7e?t>H+_nW zF^kBdq?2Cv2Bxo>g@D=5!OgF&`+OduS1YXHlkrR0r=wNQabZr}AZb|(s!El<&-+`T z+TuOxvj&XS>ueXBGTO5BR<^33P@ZgB5GMdNeP#qp@?y4Ev#BSOGqt$ejoT~U_7hCq zqRZyludDbuk5eC=X2X2`AAI;Qw$~{3pA7r0h0V>lduLNm$X_PiQ;WmDh{z=0y@M&F z`5zwk!KBpayr&t-$`^jqpndu62Fw7vj%hB zU%)rZ*jPF$r(;iARJwfvn+tLB;?sf{Bol%?dGprBdo};v@(5zKH|+C1I~=kH8sMj~ z)x-NTX0RLNSfu&bTpxKZmO7?9t6_H*r6L_@&H{h43z=YQ+)FCb-~mEYE{{MY~P-(y3?_J{Wyo*=H79`fki z406r1Fh3QI0YTIpr&4>mW1$Jo#Gx5iH~G|yNqB=L=H@e}tAHXKI|JJ!KIrMBYGt1e zcw8AR#+-BrD&LyOHw>OxCq85yxb%!6$yFfPHoL&EFZfdI$!~^`$2zq10m|-;qHQDi z$nkje&UmptlQ1`w#*qnoM!XO|M*N|Jb=m!7_KUd${P6w+zgKMG=o54o9QvXROZjC5h2$ zw3mh|+?KO!81oj>rHLZa$=2{MC9qTzvL2fgVP%wljyv42+${M%#Z}Ydqg5iW#t}ga zy%&S?x@~vahQN<_I2d=28#o}@u>^@XvRjj~=4NZBon*JDA!fqDn=u#uC~&)`?iykF zgny2GV?h*6ENRdJHR9c*O`5dX7YJEvHor5A1Ep-aBm}cB&+cs8*Q4#pmM@3}>jiAn43;`vmtwt1gb@@$Uw&`ZG!aM>L#ZZ* zexzftCMeRrp-6S@#2)|{p2o$u{hth|8vGf6Kk!}JG9ZXxp5yQko1W~4YH;>k(`A>( zQN)7*--8X&A9ToD9@XH>TK;he)Ne8t&Al5obwfOgtaZ*mex3P|H{pJtcDXwFf_dA@cp4*j%=-34qTB#*zB5eV5;?W)>|8vuE@d zxR|zLwp+N>+=CWXevlmAbSZFKbSXx}`H5+n-KuNQaKL43+vi)6CU@^5{8}&8dQE z4-`U+3i@Wgzv_%7Z3;g~_u-yKnX2ZSNBI`UA0mA@yFE2mh=A{5npAUFSr@OZit*=K zNu8|F7l_;tV44oWQ*qCmbGFo_#k#{&5T2UxafqAqH;fP3g^-`L6P|}@x3pPCOCY^9 zUa7b^uo3-sD*2CvzF}NE#W63C--{gEy5U!PnZT2)81RM&F8cXcDU8<~a1Jg!d=zMJ=u`$|MzppV|)05P&Ulxk|5#afQNKfazHhQ+VbL z+b?_%3_G8Zprki?9Zr^Tq+!MjG|4-~`VCdePF@lECeAGEJpJhmjjVa(>W`pSxHn@X z!QC}IUgzD>GI!PWL(N|%zU#@DemN;#Cfon}zu}kXZe*VuAF??AD9#Z%)qHm$-(hr1 z+K1bZ6r;MGBZO~}=AZrjP*lT&4;E+Op1 zzU6JBtEMQvV9qk4f2ik-hxlfw>j9ikiS%z6p98C$lkNX9=0aU>I(NuldRN!J9=Vzb zW6w5P2l~EvUy;%G@SXV4IQ`ZiXizTk#?3C1L5CtH3c@+x(BEoya2pqQ_!U^lhK|+Z z(=ZLQ%jyuJX_v@RE!(}$PG{Wvke_ICF&;d4r-hp_ND@p|P5qTEh^8ecrRtmg{m3&X zG>7+9)1)v^h%gZWsw1{z^A^X)?FaauOq%pC-cHU0mg(akvJWtmQYO za0^qI`sl;RhoV!4X~`1C7m$xSsx=;FRvk3N3v<)2Y>2EVj-v6$=vd;5#)uQC3|Le5 z*&EA;WeZC-3*IH*qy`q8Nn#V#Y|30M+Xhn=?Q65wemaUFi*M~CJL=jeu7B;EK*pzg zhP%1&0%n0NnQk6WhQsG?cja(*7$;6`@wQW2ASHxDVG8m2IgD?cysT*!EIjy-D26Z% zmJQebGn@VeTqug*_6TE4y(DsM)jO7(6zyZ-^72E$d^hrIe4V;C+zH?u-?U#Z+eTdw zh36fXc_-Y_QGo3C*#(0Hw_^fEB8@&j&>KG5W9Qt(KO+RHn8t?lPM;l5`Py{fMGT&K zp<<=`Scg`CsN-{Zm`I4^y3{Fg*0#pBF_e->38DWe zzn$PrdE$5m>*ys-Sl_hiiQ%_Y3=Z+J6a_S^xRHvu+4%f^5=oMj&-5n%+CrrhNW zdx$b)nq+<_(>GD2BvVZ zib=iWhEPEv0iUA*yZkQ$knENJJI9 zC!c@kWHdyWljA4M;{pLz%f^@cz8?{G!;|IVhF4hw8Hh;t(zKVk3nybqV&OW_HgbwQ zh7-ql>iQvM(&RPLgxY~k!J7X5Cb5S@neq4$i9Je zxY8d40eN1<^W9s2ST2)M4w8VzWNnUyH@nt|%O-P;%Bh$G^a1edO~zhvFkvh<5kq>f zbhEgmG>9km+s>!RcSiDExqtk1Ukj%=>uOJ27>7$m!|Bf6FrccIx0mv#DGQ<32m=)3 zGd5ckZ1+G@m<2V#f`WYaiF;!rwN4Zj6ARTCUskvqu6@3)XqfO(`TlZE0lj-5yK-D> zz6t87pN5P0%7jE-C)GldhfG0n2!X?HO3R z*UBaE`_&cG$FX%}@~4_@jDwIcw~95q3B<;z#L4x3sgMTp!KdsDOJ%j#_(-vd=Y3#P zJ!Ge2{1+V0eUaehMW%3NQUkNoVS7$zmiV-M7G?E4xL!C}G zYABT56@PIW&Iu}|tL9cFDTN0r)&+fK(Ctm>;bPpbq3p9szC|5_hCa+D*ONg*m|LVv z3wc1{l!zaQt9uy$^a9xEs53`Ea?M<;%H~_SlW#iRR@IdUeuy@j6Q|+1>)hEbbfED)Q6RypZE2Kdu_DUVB~Ubv6Gu_dJsl$3Sbn zW{V^8mqlJj8aTHE6>dSt=SQPxRUOhqf72japiyquykS;nrFg+c69*kyr5wUzbw@U~ zi}gw)PAwOM0tMpqSZ=9gaS*>Mm&II9d*iMr8}H zv7W7eXv7gYH0L5tEfga|9v>pw;&BA%afGY0Rf9;uxP$;13-WDGAHi}%%2b`&Hy!x{ zKZvp4Py(Vs@#s1ONnmfaOzF`-w_vtzU`itmLN3G5iqv zrgRo2?dbw@#vbu&58r?kkpHi9s@ckie`oqM3Alq@6c~uVi5`6h_%o@$rd}K-P&8p! z-yeE3(fVtzoLJ zhJawF&twV0Rb-sNa zIpRtXt_vW*O#dwo8kRblB@I+3 znTnX2I8kVX(hRpmEHQ2C@rv{MBtvY8YlE<)fJ$_xChPzk-}t9+1%(J1;zz{POerMg zaCRy=Ada7Go(j^V-TDh+0d3U?D}4GI8`xep0*&dYfC{Of9==zY*UTWeJ$qO=)@eV~ zIj@fx*wX};basmQ0JeanN_C**x0i;=-U`7IHu-BGQb7bK9B_&0Mo~qw z3Fxzz{GQGP#+MBONOeV&!A%<^Q@Ovc;L>ilpSIn5TgUz2{9d<(X$*GGO>pYMg2n-^ zIz5q3XyS#X1-H|S<8Cru?${dRSzHg;bxIdr=!|_y)O-OCe;9< zVGnY#*plJHxH7TMGU59ebdBBIwX5d?3tP$>UE{N&^pHdGrRBo z(|I5w@`BTRI&SHeuy4EN|0`=N?|9tT0X>fT_Y!(HlcGI;V2fhzc> zag;I1VBS^?i-(!jJdE=f76<+jz*Wu0Fiz%=9*-iTm= zEe#s9a9lDoI1!HUDUOw@t#G5fOY?jOFfs+Ler3Na6D`Tk%^it>Hr{9*RSu1rndu%` zI1`Ai=e9;EVgU+|O%gMi)v))GT_wGJJ48eA7o`yQleg1oFk&+}Q48{f&j0wGZw%_C zMCdzHO0#`VL7eTge9LKcWU?$}Z|8liSphQm1{M&38RR-4?oTG0e|^JDUx59MQkG(_ zZ^$=#lp?{D08H?;I#Whn?#CT*YO z7y|jJb^CbNo(ffS0J+ZtA6_VeAsAktV4S!mk_}zSbJtvQz0`Id0&ZrQ)yxs0-K!gH zy%{cC$+plsHSNdWw6H!R(qs@+8wTh&!oM_yd2!FlujHfHZ*~3>l8}0`KneLSa0B=i z$QPa#BuDu9Lnet6byKAS4J2d55*}4U14cvw93nVR%q#ih^s0hr2C(2;;h5<8TeB`1 z%XD8L$S2Eu2F_Na#B~V$pRvdI`~;PBh!2h86$ge~0sik&Jlw8e_;YLgSpPABMYqT^ zlHJwlKLnd@Ctw7JTTLEskSJCh-|F)l5Br<3uRWZp0fAd30<$hbB=mVXzR8GpGE_Qx z1~&?eKbQ@oQ*nInsX9QQ_Pclpc$i4p0WwNx@Lot???w@NPU$qu>`w$8 zXadcIKu-O_XV#N1pDAK$Jb3%w7x}&9DE7SwG|J*pK?QdtK>hsz&|N4Weg5tdg=6%w za5ipaU3=AYWLM}iw1 zy<4A*ZY-nMUnR1!F!-cj#746OOYcTngxQA! zXylv$-vl&>YkLS8I@tFB6mS!_Y)Em0JE>3fIUZj<|00 zx@b616FY6^=oX7hel2@^1kuReWtx0HWJqw8v_`}gDaypgHiLB*(k!GrpCyGF%TZV{bxYEVf9!mK(L&}{SC-w3HiFiBT|1#Jkt@J7E|AQU>6$jcLG2%UG)uw?FF zL%9CUK3|EQaOeE=L=!13{#Z5Wd z%B&qAe^6Xb>qmmaCHT_&wwgVFN{Hx^b;b zOm%6G7#McQkVoygl-;CrW+wZcf(z_`rm~VA^>fmcsxnZan>gP)t3NmpjF!PyNpV*` z`8}^zA!g6(;DoA7lM3@dVru5Y+s+KkuG?rhZXT!B+!|%R5zbei+=4beeoN&d1IqRo zekUj0Hi7Ts>&Ca!kSQ;5HBn_5Nd{YsD8pMA0!YNiv2_twW!#WA@hAuiN1HuLJe1-b zP#gUX0rnYyk3ABY82C{IscNo4(rn*vVY}o{59SS`;8AdRMTTx$zDaqg?hR~w%LM-^ zbB#(Z=cDn&Qim*EJ)Kjgg!`sFQh9kgG|;~gwUzQZ?HRsH;cu7Myq$P+Ogu;07=$yL zbv$<7p`3vUL2xv)>TrgW$F|%I*L+Z1+@>Wz6N)FBP70;uWRNDOsv*+ICN9%v8u{hX z^tXh4S=LLF>h|~U;q36j_)pO`)!UO zxnBqqFziThWCzX){KiLYtvI^J#DTWLIm9^QgDkLO@xYC_Y4L;gax|i)~6>tIsA1!d2`mC zziimkgh33Z^IHMROu&RxZZGDWm|>M-OH?X|c;4uBj&Xc_4|k@)ubA>yG9eA_Y)Pqt z;Mughr9-70NSh^tBb{kepvont9m`7+=nItlqUX2iqhBYeW4R9)NFKo?^~H=6)zlH& zOnC|ET5dwkIFM0W?#7QkTVHxit#iXnfoU(ZU z62cxvfYtWE5LCl`UhE5rkFtcELm4_brJ$SG-MpgYS}Kur4juGKJ!L64epspwV&`UbcZ2?I1S8-X^KaQA8p8k91bTPD=;iG6r079+j+=vB8?5T zgZJXvzY7qcON9ftc*PgI?53h^FYnoqp%6!J&aq)YAdV?)pr^IA={B`F!5E8azXeO{ zeopCrX_M2yF|kV2SCl4f-+?FSb+(Cce<0?Jc~A+b>Ms7Kd@O5tb`eEf;oE7Wj#U0_J?b2PHg zwtX@oW(fghoU6JD;%ULdTx2sEEwmD5nPNmiuPjxy%K2_Dla56r!5b-MUFj;s!!0ll zM}C5VMEr>og0kk8w;JK#%pOQ6q32xi*D&ci4c#u4@6cvMR1jozDjr8w5pzJG6?pd1 zWr-Vn?cvGHd;6O`CfEOFj~T9ivnSaL<(;{YxfMj-G}NRI6PLOcmX77}ds@N5CDWd+ z5y7>~n5%n(o;7y8Flu8yV9hR*nrA@<1fJSZPdTb?y?yrSQ4Eg7iQPinMg5|xWyzYT zX2&b|xZVIf(QvUn-j;j;vAstC|CCEbV(U#8(s`c<GT*q% zw-^~f&y!(R;2LuSSB_1$_`sS7>A0{6)KfZ;L{O~LawJ#HvMDclsCY6#X2! z#YW>2j4%I1Qb=G2>L#heVBH&i=UBN(pTgpb)$1^5(m7GG4NO!*5qV}*{Zt=jSE943 zXRK6`t9lOMg4@ocJ`S553LVP!+B9qekTbrTctb*O;DHPij4~mG3y@M_Y@&WR(H-v~ znbnZGR4r9TC0f+WSn-MJI?d0rCR!@INOFRLw_r`v404Yt5s7k~)Of$L+7t?$(W?;) zO1%113P>3O)CBTK0A8WHV{#8W*!~q1BUhL58-Y`&;LH7!w=sNAPUN#c~SFs)#ymnc*wJ}f0nazw0P z?QuW!`yMaIDt_6FOd-j7FlF&?d!2D{WL9|9UyGB#06LdZm=dCdb%otDy z>x8*^5314bi_l_;VW{e2{y}tpN49Ahjnko&cr>?9n?sPmpr80v4khyNCN`>MM8+2V zK#5x`YiotaWCoZm8(unIu|vb514689JKB~e^Ancuf9Y9KAc2l@nrols=nxC%J6H<_ zfapyQDmO6(knRRO++1Nj^W0&koUIy04&J%7Rp?He@J>CQ3s$f)E=dr23J~dJA^hvG zJ&(iOi1R~ooyQ*t@hmWuIB+r*_udzNlUu61i;5)ygH7nL|ifJ zsUiHe=}a8oV+=lQMN3vFYh>^hEodlKRI&cs?_6>jtK!@#IS<-7zUv8}uwQ_*Sh^*3 zPcJq;Xy2(G)?S&{Sf>dfynis*qE2phAa<5=!O~_iw3>gKD6*p(lbiSO(QMiR6UqcK zNj%WsUoy0R_V;`fmi7KlOKd`_5cFU=RL~gu{6pi0*mCDz#rC4PST@w=7{Px5k8hN* zec|?^^MQz(Azl{AGd!QK+D|*WK2ph$RQ;CwHnB8PM;N@1CNdIc3IsqOx5@zZe!iI} zR8l5^IEDIgYuM=wY{E#?=Lcl7I(&rQX;{AT52O!F!bX_>;E8!Za5s&AiY2@}IC7Ou z4LZz++QV2z?dgQCnhaIqv2g?uUx)q@2Ui^1o?as%y)w$Y+8}5(-TfgWuGYonl||@58?234)x02HXD&cCYm5cr~xQnel~s4 z(%ez*J65?#CqmG&Y4WXS0(CS*l+@g(7WJF*CiS(*s9`SF!odzaIUhP!MNM)B*oVPr zb|l(n2?Ltv%%;f&oE7|Kv}A%KuJnq-B|f~aLP;K8hGUC^_D}nDXqJjm`^!{I!nX97 zi6g<^xY0(26NUhW=9-EpWLEL;{B?&fv{qqurW$r*w>D8U^WyV(d`9~X^xKMuD@gve zr*r|$Ei+!Juv*<;=jdU)zLiy~_dN!YLAqY#pFtKB34PK}n4cQETeOqlm5F2U5l5w0 zYkt(_p9e+!MUYS4ASH!Z8n;CiH;U#laE1<~BHmUD>h(Rp8R`c~R2|CplsDpq|B92< zKsD(<{NH9%t@w4AZyo1b0sy$0YBn^=cv%%Sl;ZXb6%y+|u;$?bQkEP0SbPo9SS?fp zi0^U{nhV2B-*;k>sDKR>#qG&vw}Gk%x4aQm&Q@U3qz1f3@D_Y`r4TbgoKx!odBX|F zxa(&7gUBPR-Hx9(A11&Rti&>5Y?Ed20jjwa3k0Oz+mRXvC3$A%*5r{*$(I$kWs8Hg zT<*=cDm2%VX4d^se0}|_!T;j{U%i&SZf9?3D{LNu%!`JYBl*6|NI3_op32{nmvMpf z8@{)gw)yB(JeuWl^T4Xb@L;Y?xH{5}sYo|@#!afSo@~NDeY4Lk>u1B~>o55j!j%*X zgj6jXJCmsRNzLW)gE zA5CB=!vJtcMil!25h9y~A^y@ZD*)*$PNxa*It5t79;6DO>UvkmtI(5K83Dt0>e=Q( zHqFWd;IQU^>};9jBl>zQAU1(qwY)W%&$=A}0R==%BeX2b#<~r^5gtZw_}OoNL-haj zo%zlTB(87!6WiZPVadUX>({Oa04Bp2phL|YmJ3%!z?jcV0QQ+t$>Wv6MiEpoBrPVR z8c5gpJLIf*gPavL%%Y{v)|)13f(V82t1OeH#^Y|IWY9dG&*7V<6RWX#z-kXXPd?Ok z9vb)0uW#Mr!IR3mznv>CY*^`GdfK3rJm45A5)J2rHnag;tyq6NIiry#%1%m6MZ0?L zq}Xqys#t|EtYdR62SV4P8GB#;5Of@ZGTmCb8`s#o}>j zA#ZTZQ&I!>3e6{T@GE)efCtN`@rVtJ&_V(b*IVY#42W+(`U&(Y+fD>LtoXD?r$MR_ zPlG|91~fZ~?T2}d1jm5gEx+*VWuO_YOMKA zn}_Ay7-qJlK{~%g+CRfqS?e*?8ek!fF^$ap`GQGA zL!lDOPiNG*Hwd2$+KIlFH7swij*3&HBr!VG^9BtARtkq<9fee7pnX!@)2o&L_@Dl7 z_1Ja4@WDX8jRE2?A7v=4&>u5`n4#`Kh);K;?2y7lbqP?2F|Xq$90w>5PQ@UNOygy8 zCj25ZZ3LtOsmE;tg_aJi^a#WLhAc79py4kyP<6@6ijA;34OIv%4GrzLq+QTM&+bvx zn^_Be4BeQ%1@^hII721dHGeU&pzGw?VUTJliyy`U8&))Q|1#{VhbfL?$#|SLL*%yP z;S2Y)vOfA!mW^)i$DH4te`D1g{z9Cffyrm#f&oHH%zT)xrGEI@Q|4SMvBW?k zBK=3<`kmqEWk=%Ypku4q_9fsZ&&S8e9+@-xs9y7t90>>7p(VvaA4MwcxE4TW_ zmX9qLdmCt}?hO^_Gt@GCI4iD_d-<@pWhyK3c(~q9BUK{oydIeSq|MNX52I!h{|JMa z4IsgAD^#gXs~PoP)IjW{j)%j$&8MLD$@N!;LL&AON#X+_krV=;;d!8vD02E_XDC0$ z!Wm)m+^XwVJkUdg+juj7q*TDeqXtYW*FG|x%0xC48GbJ}LhqoJX~f;Vcp-=08%c)S zPUO6Pl1O7q4WJ@(1qpne`=^eRL#DqfhRY13-%j(0#}(hvrF~j*hDmX<;%okV5i?)| zj)Ogj7U)T8YCu8S8@DV5Act2=Retv@mFlKlt1W~inG~Z;wMn%gkbXkvmu*YA^8FqR02(9PPPdYMD-6gD zT`M&t{*N}_mKaJ-}EzDAGl9;>bQFT^6DZ2XP#W1#r%x}sq zxtNPjC!sw~r6+a@0PAYn#;1Fq>6m7+;8F;c@4+ksYX(wpd*Mxb4JNX##pk!xzQ9;* z@X{kXgno7}vhM=qk5tFwtJg7u!%U}4SokUPMd6xvvUwN;Zt9nbuH1ja%1yJRl0I{> z+DW>0Saz;cwaT?$s=)n*@R=!IRy*pCs`$8xsjsg<5$ps3r`g8j;f;swf_SfZS*xQ$0z<*}@k z0omX4!X9x>T-Kzznb^Q8plMj)069wh%NOqW=OK%5e9-%tAmFI(vq3w@PP~ko8e2EcAjau(*WhnZWnQhUMoRgD;zfFrAsB8YPjLjC-leLbuiC@7bez^3 zW~wmSo~$ebk*JqcxYM(mJIs)}XEnP4r8|tzk`eH|5l>qNWYxn`S1EeQA)-DRB|AIj zb0Sxsz5l^@S!aUFDWKvFP*lf#;^tdEL(%9}j#3^bO`G*=I@2=4tHrMoS9@M>LC#vYcEn@7`W4PJ2Du~+5u`;v*WJmaTqS(hx!Lc7CTP|ElT){v0D#GkhR-e{vQf8_` z94Ts#Q7~$e_gBlP5h(1W@qFjO%6iBg2rt~N&OGGx1=eV3A)yBJXYz`{`OoAe>11DY zY3I@=4@8Pf<2>Q7T!ITw{9Qe8zi!ZyRnVE9EGwm*(rF$yW2on!7y)%}NbRWH6R{Mp zeW5Ci8CjBs{7Rh5CnvS(ndGAX!h8Mc`~UU}Y7&ugq*wbzhFuC=>eCQ)4_%J8-;gNV-84Y*Cwva+i&)n2PlASKtAwT;;m*UVNqJPX^B znAz^9C!)|m(&>G7KN)KIBT86pa9lUlaBzC>n43g0u0dE!8Bx>7gIr=sU0}3~5S@cD zwll4Wu?0Wb=^@_Q*bCm+m@ zrI7dn`Aj7OIsiC{HRS|Fd+s_(%6uNM1MgXa5DD$vnlRc9qH7`<(D!fgLAEdTa-FS1 zDk9D>kEBK65_|(A`8>KdUOzL=D%rWHRsb(G^pljve}{uzc(%QS@pSKsS#`9 zgnVPlzGXdMU1IW=_$;OpMST< z?DaqYZfAM7bU92g~jm4;JN)flDc>pIlJxO?ydJZ#$P5!#Qf zO9ge?pYFyeSJ!c8&B}sAccNJ+v$+QgcsCmOC1D5Ru*P42*=4%W2oT4%g zICFDv7(t@$Z35{wY6sFLrgB;=@}JV4_hll4IB=eGV6bJnw!|#bZB4&db5bTB4XK)f zIgPTdQgpyfxPKC&@p2Epi#Qpq{8XvOT|l$GK-YLkx5SFJrXOY5!>QS>z+JW`TQ+@_ zU5uK36YOUx1%E22u&v^Y^fQNKjc7T(KN-H+=u-;pe(xFE?|26%@GFv(9?7^Ne*%M? zf)h}jscb9FiGhTAQNzfiH1bjRbpIe(5Go-3)6g0mHkQfl_K&(Vo0)3Rem=r{as!X!V1a zM|I-Lt@j1(tV=Nsh8pn?^3-pWamMlpS_*i;vFJFx89thZ3BfoyP9)!y#D~?ZL=rBi zVJUlRSQisA*n$CJ7nVc`TRNZ*TorFvFTw<9WFK-X3Gd{m0s;DX7;PRyD^c4r(E=ze zGXRpcEdiaXC}6Ivj3xy-hwAnN5Zw{h2>C zRfoYsrup?1yb_Nby1~thun=TQy_uWuX^Cd^(vIHR3T>n|v_@YYzz$ie1S=M&(~6C@ zx&G-ws~5rY#yTp%Sx)TF{>hA}5_ZNK+_Z3mTnLd5uh$%b5hum!^9$?t#X4w6;?(_+ zmc=lPObD1(S*~f_<&@Qh^QMFb^^Y~ul=ij>IA^tcGPA1gEXY9ch6Sjtjz0Qd{J=ke zdWy9+kKG7}=eI$+Oz4j)PH5>DYR);Il8RN-4bY9O;oqL%ifdAopgd_q`1(7Y-p?`5DiR%GYg1_3h#UH!aT{O@0BY{*jM$XGLW!& z<+}LLR?eBd58vNYGmFk|<3k93%Vziq=GF?+h_~lL5v@JrV&a+o>WEZ;N?M8RWW{>z z1SwrlYdpGwg$)mk4@p6DcN4Vew@`Qx4=F9tOt z`mboyhAiI0^dI&ij30e)lOUH2GbkX!kx)>R0E=n0GQw%T#NEnUwyF^?nfNvqi~`6w zGd1#0y?162Uvo*_t)AU@J%XC<6I4IieDev<-fh z$81hiC4J#;ClMGX#@XYB91SLw#Ribe6r*>jL-qSzzwxMatMTlJFimT}OjD|wCNlrJbkuCJ{|Q6fyN@N@)7P&$VL0^e>w@Gi;|5`)|C!(R{96EXoW=eKlo;BeQ3 zh#_0!SMq4{id%V>makyIBYj)SAhq*HMgp0ETH>G&8&HVR)WeVX>y+y44SY?5nTbPC zE@aj%3Va&oD#;YA)}vc=D53I#dl!sWLiT}c7+-<)uv&{R4aXIEaP@%tvfm?y9~5H|oq=wOq12Q;lHipZ8vQ-a3Ej zI_9xsUHM(hf4TuNA7;(J?aAm6{{E&J4aN9rydCeCli5Z#j_SCRYd?g+*bckka zbsrjG=C_GeGn)50W{C(jwHHYx+4|A|CI^QonaqWB6yrm$F{y##BT}Pk5o}6Rk)e;f z?6H#Kh0F+GgTJ8ORZAgiaUM!Xz8IuMeXypbSg}H`8i}xpNBpa8*l61C7>l^J7U|JK ziwiOYdv8NVt>hAJXH6s zZ8@Gx*jUs8Iu9p&x&h7Wyv?~3zjGG)f z6-q24%iSe^=#+$QU%W<{+gibv(u$+Tafi-+jzlVHl>=$>E5KY+&fPpq#R_Zmwi<(z zaazW4#Lf40m#L>>(J4K)()qzS)i7a|+8&zp{a!f>lQAV2mk(B)uoQ&kf01az3|>ml z9-16d1sy*>?kt-+6@i+0V75^WgC6I=LjjOaQ)%99tWsPzId(~+sA2QQQj0j0&+S>+ zFN$z3_Fk5l3HEq68EKPqh!~jB0a1NW*-oPhM}U#((ERUgUi|}t?s7m-y3&T^nq-?a zSYlTVn*^!tl8+_X!9(Kj1-nr4A3xeO^-tUtu{{9M_#wlvxDPNW#SrX8uyKWxrkYo& zYNte*EEG>OYF(hqEZ|$xs=fXr3k}XoZAEMN#1_&zf95&4X}{$%<9qa@iCES8vV7L& zvjeZ9gE&Tp5?6)^nOfwG>-WM095TIstlL2i=P9jLT(Lhx~3@2IXI&>@=AC;dri5 zVj*==Da*Cpbytcf*vjVKX3Y8{tHf8i0xDz}PuS(n*#B*q=yLHZyo@KeN^1xIp&2td zoqjb1Q)kDJjg-KCO8pv7@$U)3DU?0Q&~NQZw`t4W_dAhAx&x zHZW9Q{Dbi7hmoXOfR!xkZTyj6cW)eOs7@ zO{1D03U|0Nq&IxVdpCH2-^Se3kpA{+FsvG~%j1j4?4QLO2q8v=x61`mHF&9_rA#hB za!7h`a;TQqRSgy0VUwb6rfpc4O;sR-VqzFlG>vxt_`9rZS8)f%Yv4&~STVeBNIrx; zhz$=sB)3JSP=A$1_CB1ckQ=LWK5KK9^4tcRwk`~!s$}=zWdrDDEzV>`8tsSbP*=2U zK}om-{my^mDUEu3o|>YZzzy@^6_0&*16$AxCE(AoaQYdhKB*4jHeCL=Yv>-y5Q&OF zu+w^5;m@?}42wO2Fj(+Kp15Io-e5`LcI#gvq8iAz21_^ia_=5;qN}`Nm$EvC#i7zd zEl|%!5Lhm%%m_hVS3EF+Zp%&tL+4~!EK600$fE3H34KG!q-Am9jLZ9{x6?eCn^Hpw z`DB=lfLW6VrVF7$D0`P!>WFb*2Aql|ahVp60vwvij_rdm{}sGys`Q)PiWK zv^#rBZj2IIvR6K)$_xV$Ia|o4x>fy}B?}3#s*aVEZO-GzCvS>cxh|%g1oHH1h!ra? zAo)jwu60w`H_aWwh`UXWpOH!B@sDj$QwsgpKn8eqDc|Of`B2N^sm$6X8a}oegmIR4 zs{^}{DOxIgJRt)vQ(<}qRaINsG2bPOs_KB$9Ws$wEhy;xB6V4ek0Y&;V!DE6hC^n`*DCOW>lu~v zr{JgIP6CPVCzH))#UFHM+~mz^-$@}rA9~sAYW5~oPB}KfFEzdv<2>{Xf->Bz*r(w) zR%r2lw*gDn@f?qf=$Ww6ruDU95#^Q~$w(>Fx3B|zQP@A^_Fv6`8L7~o*@h+y&PJ%> zY5oGy;jsqv4G<%q+;JmENZ4t=S=Uik&DlR@@!$SACJbbcak9;I3aaR(_-i>*;>7?8 z>-_bX;$heQ5so@VMNir}k?5B#ij_hJ|C!C1l*bFc$=&k7R8(4sp_f-}X`v-$uK7CSf?RDqr+7%gS>^^_cv^Y9T#1qAM;hTojuiW^ z$=Pd9<6k8$wbsmU7+Mq|P$O>pYOpv6Ajs_$J)EC4DFOxl!_Y~n0a{$qtijZ_fH8cQ zr#Pd>P!ge*4TPz648~1^%40740G%n;swvur$Ug2|81shz@I(A)BfDI&+BY>{xX&e_ zm*Vrb*GBd)n(dxD;1)J__+h=RHIl4m5k_S*i=Be0>MJ3NsiMM2+S$jXXhVeHS#{Kw zY$abjpEhNCG-O^pGyXY$N9R)tdMHf52UJ3+IbHn*7z?X*k9yxJtzrzLhmP>IB(N-Q zQdqRj>UIiD7UNS;jC9WOLY&jh!}Tg6Rxe@=|5QDUtvQ}q0h-t{^RA>16+_4USTBW9 zi*-Gi0Q)1R68RU8YRm4gXA{5VS(P~y7!ya0(0b%g9^0a=yHKPd^TYNX0SrYZ`S;HBY*($M*CHHrw zoa(-(WHw!#qie`*Ry9>27_g_4U#C-6P2azffyuid`VW7V6mMzC0_$8iF49siSY26y*GWNY}yrVC6^$@bs7hzGr=%`NW}2{mLT>$ zS&RhnK_1xGz-2vjIBx1RYxsmCcPB0IU=8eVcVnTOS@>tb8)0IsIPeN(EEp!$z~8WZ zWU_?G_W)zuz<%WUZwCdl*~@H!LC4cR=bu;LTy(7OL{{nC+lw=RWq9ycpr#l7w{|M`Y^$7D zKSO+-;LM&amEt`Q9R*I3W`Rgf6*5ot*yJPGD#86(;6!JQA7ecCS0M*kvSH6EXoAJc zb<|jsfO7LDj7b`?YTL?*!fG@wxN_8%H7vi1FPWd=6d#!cDcA4MFN|_=i=cYmBn!fN znBu1kUu-2v)Yl%^oHxz}#BNr9wxG%8 zycgz(%U-5wKsTAaWSI?W{QCrl?w6=_tXAXERvunaRk3JN7>Yw$;8rh~k|@U~{d4Gn z*B~un2|#BM6yQZsh0(F>;$X_jC3C49Fc>P7lpacW?CdP7M{q3{vM0HD5Gt+6K7kdZ zf}lJv4LGV~Z?ilGc)17qGHTTOGlK$6f9g;|eh0F%L3E3Z+f?g>(Sl$EezpaPlmTiT z3#Wr>`Ch|`ezr~j#ITrXSgz<_veTo6Vk9CA;*qp_AJcv66~sU%QDiHum4Ja#LpAWL z-oJ{pi3(ZCJO6GU1Z&dY{4~@@*`N%TfMwjlBM>Ddus%1&xC2@?(hDf)VkPK z-cpShl#$A9YGl~{@Wr686}0)x2HJ+n3p3_a`YG9Da~&3pcI!M$d)@DU=u^)ceJ+yT zVPcFW5_m%yT(qqOU0%XMlqckC1mj*%1)UcAL{yQK$YnhIA?kk?js^0jgkMonSUeaRC+ z2C2@AGemd|!(+9|vKqyQL>!HLL_aLL!2BzL1K;VT=X6>pokqRRbfIf{F0^%P^+V#b z*sVmaF~;sIYc75_aQNC@`-BB|PmPNEJ^RA072%_rovvAU5z9`w8TMfPb)85)!64U* zlujdJ^O=9v(X52DDS+Wj79ZpqcaQLUUDRN#<=1D)sUF?UPw`i4c*0cOHtm2+R%LE$ z5A@sE1}HDRU8>uSo1pH(mCL%YqyoHq5=-`o&y8`t=Upy^>M7xZVDhBv=jEyp=!KBeqM4YuGuHcEcW zG$Bm(Z zw)jLw*XSBWgRG9HhK?=Nf!$_hp$S1@QOk75f0Hk$irTtcD%yzZLV=;Wrk&$V!`Tls z;_(TGXYv+{Z%*#tW;Fy-;)EvV+HBT7B@yg#w*%m4(q&=R4C9A&VjpL+;x0hri51;2_ zOmqgpG9T4MKS|bfa#|+Db6Th)K%z4@dVfH!Zfg~qt_ z#lFPJI+A?O$lW&IP0q7`%v#)dks+m$eX2&?Q$EN!FfE(D?inoLaDv@NQT9_eVX>`K z;!ShL17PfNXHy=4*DYqLRTG?L(A`LMN5E0CFaSG!xjV8pSw~%aNRxf-c*D61V%(oQfCzC9;tGL!cy#a+suvR?`;Q*aG|_>k@T<0bkd9Y=l3yoXbNw62^kM{l{ETv z$h4mxL#py^%sR3>w7nu#Y==s(v46+}lpWG%w1&?ofj+~gd*IG!4PdwidwdyTz_LTc zuQARX2#gvfM0Re|NIUzB7NZy0R5(ci$~q~Z;P1i@RyA?uE7?cp$>X^MQpU1#TQc@& zIMS?!@{_7scuZzYCD}iehL#gi_Gl$RJ;0-+Ux>dbIu{xKE_g9`;`pLo zxe57x#e;V+`Qp>EC+p^qmw4XUg{d$?NLFoG{CMS@{vcTzLUkifz0>07@uQt$QLZWg zA6jox+&{bZ#!N9FUs+US>YEiM=AgffePeEN;d+q<{*~SK!W;vV)0gR^^a*9?>tv7H z=_-p_&@hoJV@V*45u?*hEK+?6H^DK+Djx~Y3{`CTy^a%+QV0=O=J04fBG^ARE$y};BK#2`wel0C&8*V zz7BroFCvT#5qOkg>;Vr73JOm}!Z0H*NiYz)P0bAg_ISvAX{s`^$=?(?zJv0LGf*p< z9c4ur`PTD4j5eqoR-?SDJ`4FMrJ$~s*@ktrl7_EU7*n5E7Gf%$agF1*uGqQLZ%QVG z%QV*g%b#sHCelBml7fi#DOWG3@Xd3oIIKzu4-ozrI&SOQIVMJI($H~W_!ox}KD-+LbECqKGP67dmsnZwETbbpWh~k9H2>{EEezc?EKGf6qFbYRz@{-m#jmr zh9UA;xn4@iys3G^U6SJb6a38eW_TM-*-M=n9DYbAdXnQeTlS>isQDg)SN`zqHt1G! zCE->EvLSMRWyCkK-PP=pT$+>wS8=>oNgZO8;oKZxb0?9ywridzPW6o`MVaQlCvKlP zJsg&CXkJn=)ZT7~TbC+R=$kf6&yF{;-x^4D_rDT0v?iHH(ECd<0A^Wf1T0TA+-u7h zI|N$wkk+t=toPg2V`)>9)RUY}oNG#Y4_yaMl@)tJa=X#ZZhYDg(l$RcA_ z_+i{&PnL3iU$V78LG6FB9K8)QhA2AtN1S+QXie3`kY|z27pMeIebUnJN@q(L^{jMErEDLu-6pjbwx;Pm4~A=&pmB8(e|0pylY-hA;oTaHtf zaH-U<0~OkDrLbsNQBVpTJMn??ZCmk5gNa5k%TJiTNYsB%ip*FdSj+_j{aN6~i2d*v zfws-K|IRl3p;$CrHqDhgsr#=~@$RY#RI1Gi%T-F@24*Z~<|FL{+)WSr{xbRl6>rkr z6lXE0Bc9K|m$~rUA{iQ$1a34nmA(JN5r{^hY`35dm{Jt~kU*f}5uh?B2QChY+o^oN z1o*d2ccIxsoKp|_dkB=%!kg(T=1bB(po-Qjx;!mtlg=r*W%xU*?idS?Hf-8|i)osB zL5}H3q#O?!r<*Rivv-!(VOrtMG55``dL2u(bEa61{L;&9@Xo1^7>2%9@Y|(DiL2zd zT8&;SfR&PE>Nu$Ja6^k!Xt5EHe~(@LWMLC3VTsxUpDAI=pgo%`YT-NSUl?h+1WRil zAzY*89il9GI+HKp&K&K%Z@S^AZ>h?)PjS{eG`07u({{yT5xf|uWPguhjq}$oJqO1_5#3tP@3kzpn zo{Yb7T@xLFL)-EX>ixDwPl#^*Mg&>dGTksm1lJ#vn&Zx@NU5dS%s;_Bzvg$H6|D!r z=@zk+{Ee)8>MsgNCQ~-fr zE+Z7*OCqGUok>@%2z5*AOQubMhqj=XY9zc$b%&eJw3L7~!u%<}DDattEh_@0@_=6R zb$-fe{>n%hr>=#3IO!R=30eSMd>}1w3nZJeCh0BX+VbmalvNOtiQ%5N3dY|tLCqt) z*);E?JV8(dL>0KhniGgZ)6?DkR>Vb!G(M7LS}ikHO_|`hs3%XGlE( z@!yKA5gs;u_sMjY?bDK_RGxd{-OLiUou0PC zQ59FSJ?iO-MYpRsy-tzhs+5w+s$eR1QD-p;j9Vya28O52-s^Ps3YPMZQG- zgZo2^Ze89qa3Ik-hIUjXc0K4$&ZCyOFIPdh_S%zGc#lx-ctmOlSRF>J2Q|Q?^*L{r z-7dlE^(;cbL69%KlC8&Q4RR3ZvKoMN96C34>OjBEDui$XI1RR}mxlG#XhrAs&v>u0 z47%KU&=)|pLa=G6?zpj%km;dConwhR4gz|mYP6km#(SMDCNjSj2e&E0#wg5}^lDaY zP$C>URWWsKc}}cT*4CL?d<~KBBpPrH7-D8XlFBmPval^>u*SofEJVk-IXNXKQ(60% z@H?X!TjAQRNcE?QZO40b@*_VDxX$3H!{p5NR)miTbE>hk3_;VjLs}w}2$*`9zh;wH zSksAXDe3`NO*B!P&r07PksH7{DTx=RK;oOL&Y8E}JHqY9pw!9K*;8*1$kn+5c^Sp3 zv&8^OzTL>51h9foCEW1=MhfYt#gG9ig66`EYoIYSsn!~DkN&nhC zQ`-=N)b@}?N!F{Wo&yDPEJ&0;cH$aJ3t(d}6N6Q;CmL>kpyI$332qeaW4?rM_WcIm zq+oK`1C;^W(8;uImEg4b1sCxOs)b2D$SC>A31Th#q4bgybI;zIGOlu+`DaVxrp++c zOs-}+>KUc!DEP4FlcS)#BE6>&^LXCCYc&f8bfKSG{D)-!`$U}7LP~T zgf|ycbApZlVx1$uU)qsLx z{*YaaSTQ5kCz#(+p3|#lweCK;;ij*mymP2}-8aEh$1AC1PuXy`q|Paclx2YtNVE!` z_gCx2Bk=_k(KpFQ~-H!CcAmk;lbGJs3elHD*3p^oghpa-C}z!a)W zDyRH8^u%>&;(X)0E@A%IsG?BL5gZBWFs<-j3##+Jy|_8@b_Vqisv1;}q|z=-$y1-E1eq9;&ta&UX^pUx95YqLVJAI2hc z%nzu8Maa?O4T`*22&dx2xMFS4!+D&M?++Hgc-Csa(vSEeUIBA`BcCClBBaowz<-fj zy7bg=294&SmgV@F?J_6l7-foi5Ft=#i(Gr#SynvA+rLuqkD^SLmFs&-WTZ-BlP!o& z6d^5ljaG#P!OsJfSxFVydAM=5o~Bp&JR@!|ht*^32{cjY1?6L8aHT_19zLk7Q^o0l zU(BZFJMO7;)ax8Gy4Sf4H_9ZF%C=VvURS&)H6W{zY92jV_O&{`$Rq9;lZ9213j4`fDg0ortfMK>S2h}n!v2n3M zor8-xf%44eF|1Xr$=ilck$idOo0?U}bMCvx(N6=L5?J7NQIBL67Im7E(Zy8ua8#L- zrM09nj!iZWc>BAdO!-aj={N7w!Lrj4%fLtKunzAfI^Ss8Uz??_^SV+pc=W~fj}c9| zQasv{XW>=t2iLj8naY$4XGJ7yS19_(GhiA%JmEyv>n=EGi9OwJ@_Fr)) zSSPy(rJ+J~_F$AdNHbr{EUMt8;o8}tO7(I%K^qfXKXsI(8S0X=IB7|z#Fwzj1Gao0 zC-f^d-#%jD=-0_O7VcEZw`8x2p=SlJNc!*)1fqd(tA1!sYbSH2xp!%by4PNo@MXa* zRro6@$93ONFU4)yrGB(rU$X8MXBf&xktk2Td5*ikIN7iDw0?CaRf4;B76Beehb)iS z3+MUKKKjchD{*TW9qY+eD&6F6F+O8yvtXG^v?1E*<5I96{=cAKmcS5nMyHv-SM!F| zQ?0NlhB{&X)O^tzcq$#9v=8fL$ZSqKUFon`hO2lr?(xTfUFM80N+nM6Xf(&l$w}kd z47Q)jS)h$^vnWu}O&Sp$%P_1Lck^$A2qs-ZopUy%2=FIbvKp4o-1eYUnxiu`QuS^2 zd<&Mc-v%U%5~#xVgxBwG5-T0jSOL)FjZjnhpZ~*u;D`R-p3ZHhVIYd44+TL8^iTAw z{E`~66j7tCctHe9z4gHhqA$g-87KJ**WPEwmvS;q(&=PoaxVLv&8PH7b6xgF9o|Cy zi-(TA27yC%x)5S%sD;k|1Ni|exTKCv7)2$9Gv+Xv)SL!&51Ix9yO1YbhZYn&8Z8V3 zw?*%DVf3*TU{(Jh3C~s*C}M$+2wE1PJ!d-ETysXCq%mco5%_l%0;jvhqr_SUFX$nK z+rZpYrc%@cy_u?S_EO@X2pgWg0y!Q5LdD>8`B}T9T3qu{Likt7<#EOapYcx0Kdr$> zczxHbFRnuHIwd7cE&O6FJ=ds(WT=AZEQfXXD2H^?@g01sauI2)NYN6*14ijfdcw8Q zkUu54QeufZR_S`9Jd={5TCgsK3ofa+lx{DE?~fr!z-TN6zlxa@PRI%R!DHd3q0trm zt?_Fur1?JIdZ%R=RcbdKYL0RM_~D@JWY9lodkg_yV-gFwPbE{ zZN7Soz)s%+ePQCkJ0tB;eGse_OK3*RE_!M(Kq+byz`+Ax&CERY35CUx7phLKBIyIy zQLIe8u}ocXG93fhu-3DP wTo3}U5qvXt0AG1B;nV<~PFv6Lk8$ z9IF$B!@W zzx&&3{g^{z4Oo-E`0{UmxclD!hr=fH6WU<+22OA$wNZ{w1E>XPRB)_vgx!Mb)#V|% zJTBM91}T7js?_C}`}pw(=bVEf&K|U+0`LYxKCHOr7kcCYgv|*l8@}`&I~<$4Ke@IK zzc@@S_a0t5iT-uGJJW|v-eiiatTL9WXp-*^YS=xyuP*-q#y&l4@nsWACnyk zwQkOtHkgQrYy{1fs42nfD|E>I0W{96dm%qPtZ2Ucdp9c!u z2>@y};Q8?${cd;BbYqlob@mUO0C4u+XjIg~C#XV-1zzAd6^@R{MBM6=)5A|VyMyP) zyQcf+4)@cy-|V|p8e`8B31sft;4}c|5x^N0Lli)24Y-7T;INc6hsfwY#iNB<29y9a z{XoW$2*4>v)=n!*eE&hXA6#obou|iVjX%D)GdhB6ZJ^rglwk(I`MU)kt9$ih=di>w znKqicKOY@OFdP9I10Z*VQ~+x#40#JoqyZEKjP?(zP;10#$e~t~q67dH5dbLzug4gW z?8C;#&Zq$9yczj0e^Vx$Va?MWH{!^f9i3ZAp#oUdq&9L~)R9oaUH;RZlJ5vSO6TawN@Yb zeU84YM0H94NNogu$Gg!Gd1%!sA^?00-~a;0sSvRt5diYFVPhW**8t-Xxl8yl7-WEb zNCn_pCWnW(LGF|z)EYhG^o)d0C;_Jdx8So@0+j*VwKfuv<4gp=Iw5eb$fqyS&<_C4 zUf`;qw@<>c#21pU8Iyw1G1b19ONNASV?LfyvFZgL43W+~``!Lkdm<>@!+A zx&)8USx^B?9!0`IX?(n;jS9|a?nN~M8#^4;f=p2WslC9>In!Y%5mLwj4cQz=FdTu# z#~dPn-HY0gS^#s1m_q~r84yx%0tj+aYg0FJsyjj|fJPN2y^x}4;560o0a)b-Us92B(@>+@#mNUjk zaWV##$wV4}bJ&?9AILOMWCUmo6%hcbHLy0B`?h_re%K1{I+nz)WpeY@EYu=pKpOya zh|sFvM#32Z3>(#mjA5gKV}K99H%F-807$KkOnd+s5&@86{Csx$t+qKt4DfMk6bU{d zH7a2;+fIoqps#!)hePD%KvN42XC)CDt0Ko`)Ab)VAqUuplmY!X6(Xp@63cQo#=$jt zEka|+Dau0(QUEwa0MFk!o^t2DjQ|HZz$qdIR;_WY3XyODYWlIWRpH?JFxiku@PPwx z8>B`6AHX;A!#8D~9eyEs|J`dp=ndYZ_M2~B_<9wpzuEq|f$U3JqiQ&zvAt4Wl8>DC z+KM$F#s+dCYy~L*DF7*eQ$#ibN&svaQYMcgI0Hs<$jAYt0HahOV;BH(0Axo<-2!N; z!vWBk=N3{Xhsy%zg;f|DnC!<+x9}Z$nx{iIMhajRsqYqSY#uqF*_;vi77Rl_O$rX+ z=6VjNRZ|qss8v)OK~t2c7bX)Kt(rFWye6mBAiue-aP{zn)nrF*VfqTUbpJX1=#Ifp zhk&r3NinpD1Q47<3PWEj8x`^ftQiKCm)~n&3~!o;6Z(PE0LD)+Bn7}ZJdZaS6)AvI zL;~RavQJQjo`Z^%4>u_T8uLs-$||0L;XujzOPIznQ5<~#_jKF9h zgYpTY1 zjpN@+(jfzT%^4Mj0H=uT4k!xPt3>ln&!$#r%&Cp{gEsU4XnKZYLs9{Rp|#P_a-P>Y zMTa2;2N0TvGXkUPjdNqTarBOE@v44KUlR)>+&=u(Hc|ku$C_OShy8;gDS$_UAFBpA zJ~szk{mmDt-|{zQMy*lJ03ZL>j#LBuXWtrC;Lxf$Ybkss(UEi2z7#1#f@d zN-Y4jnzw&eAzuTbXk!@q2`QYztQF+VN{ z)f2SZGjN7`ZuTH%6^yZ3EdpMAmk=ogJY>04y)f z35N6>I2bk@$PK`dHX2o!v616Ur~pB=aas82-r=8Qe{yaK+EfpXZIt6|pKwXzo)^+1h$B@f0kr8mKj*JWg+0Rk}`Rl)a{K3T? zuW3=)&=JUhuz_nSoEyNv20S@TTK68Fo3(5hrVEqzig>pW0Z_{kQX^oW=KbKt5|(qA zc$4MEhycRMiV{o(L;&~%@Fw|NzA3|Ypfhka-tm$Os^;Vi8*S(2fV;6Ka;Dbl&#gPP z7I7QF2eLs>-9n0fsMSQ8;g4Yh!svBBjUk-9Z(XfF{ z2{`uQTy{}Zfba$t&MknZRoZBt;b9m6h6ds86Ee<308Pr5b<2Z~$wUko5>7eN6h&pS zBai`b4B!O7D~P}esf}}8aBfm;av;O;*-Z+$Bcw*4nF^@o(d)Ak@~Dk9GPMl64=E8w z0zQxdqyVH0IQ2|+#PSdc@{n3H3@TE-zn~44#E?h8gAI8|!2z(;2hbs$09cPTDu!0m zVPhB?+nAy{yJcjEyisv7q5>f2$DiSg8<78f_jrGM?lG#JQLUvKIh+7Ob*oG!0@&;E RFoV`@G?8z<{PN*XzXGC--R1xQ literal 0 HcmV?d00001 diff --git a/mmpretrain/models/multimodal/ram/gradio_demo.py b/mmpretrain/models/multimodal/ram/gradio_demo.py new file mode 100644 index 0000000..206e6b4 --- /dev/null +++ b/mmpretrain/models/multimodal/ram/gradio_demo.py @@ -0,0 +1,109 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse + +import gradio as gr +import torch + +from mmpretrain.registry import MODELS, TRANSFORMS +from .config.ram_swin_large_14m import get_ram_cfg, test_transforms_cfg +from .run.inference import inference + +parser = argparse.ArgumentParser( + description='RAM(Recognize Anything Model) demo') +parser.add_argument( + 'ram_ckpt', type=str, help='pretrained file for ram (absolute path)') +parser.add_argument( + 'clip_ckpt', + type=str, + help='clip vit-base-p16 pretrained file (absolute path)') +args = parser.parse_args() + +if torch.cuda.is_available(): + devices = [ + torch.device(f'cuda:{i}') for i in range(torch.cuda.device_count()) + ] +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + devices = [torch.device('mps')] +else: + devices = [torch.device('cpu')] + + +def get_free_device(): + if hasattr(torch.cuda, 'mem_get_info'): + free = [torch.cuda.mem_get_info(gpu)[0] for gpu in devices] + select = max(zip(free, range(len(free))))[1] + else: + import random + select = random.randint(0, len(devices) - 1) + return devices[select] + + +device = get_free_device() + + +def ram_inference(image, tag_list, mode, threshold): + test_transforms = TRANSFORMS.get('Compose')(transforms=test_transforms_cfg) + model = MODELS.build(get_ram_cfg(mode=mode)) + model.load_state_dict(torch.load(args.ram_ckpt)) + model.device = device + + if mode == 'openset': + categories = tag_list + if categories != '': + categories = categories.strip().split() + else: + categories = None + model.set_openset( + categories=categories, + clip_ckpt=args.clip_ckpt, + threshold=threshold) + + sample = dict(img=image) + result = inference(sample, model, test_transforms, mode=mode) + tag, tag_chinese, logits = \ + result.get('tag_output')[0][0], result.get('tag_output')[1][0],\ + result.get('logits_output')[0] + + def wrap(tags, logits): + if tags is None: + return 'Openset mode has no tag_en' + tag_lst = tags.split('|') + rt_lst = [] + for i, tag in enumerate(tag_lst): + tag = tag.strip() + rt_lst.append(tag + f': {logits[i]:.2f}') + return ' | '.join(rt_lst) + + return [wrap(tag, logits), wrap(tag_chinese, logits)] + + +def build_gradio(): + inputs = [ + gr.components.Image(label='image'), + gr.components.Textbox( + lines=2, + label='tag_list', + placeholder= + 'please input the categories split by keyboard "blank": ', + value=''), + gr.components.Radio(['normal', 'openset'], + label='mode', + value='normal'), + gr.components.Slider( + minimum=0, maximum=1, value=0.68, step=0.01, label='threshold') + ] + return gr.Interface( + fn=ram_inference, + inputs=inputs, + outputs=[ + gr.components.Textbox(), + gr.components.Textbox(info="it's translated from the english tags") + ]) + + +def main(): + build_gradio().launch() + + +if __name__ == '__main__': + main() diff --git a/mmpretrain/models/multimodal/ram/openset_utils.py b/mmpretrain/models/multimodal/ram/openset_utils.py new file mode 100644 index 0000000..5fa0f52 --- /dev/null +++ b/mmpretrain/models/multimodal/ram/openset_utils.py @@ -0,0 +1,212 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmpretrain.registry import MODELS + + +def article(name): + return 'an' if name[0] in 'aeiou' else 'a' + + +def processed_name(name, rm_dot=False): + # _ for lvis + # / for obj365 + res = name.replace('_', ' ').replace('/', ' or ').lower() + if rm_dot: + res = res.rstrip('.') + return res + + +single_template = ['a photo of a {}.'] + +multiple_templates = [ + 'There is {article} {} in the scene.', + 'There is the {} in the scene.', + 'a photo of {article} {} in the scene.', + 'a photo of the {} in the scene.', + 'a photo of one {} in the scene.', + 'itap of {article} {}.', + 'itap of my {}.', # itap: I took a picture of + 'itap of the {}.', + 'a photo of {article} {}.', + 'a photo of my {}.', + 'a photo of the {}.', + 'a photo of one {}.', + 'a photo of many {}.', + 'a good photo of {article} {}.', + 'a good photo of the {}.', + 'a bad photo of {article} {}.', + 'a bad photo of the {}.', + 'a photo of a nice {}.', + 'a photo of the nice {}.', + 'a photo of a cool {}.', + 'a photo of the cool {}.', + 'a photo of a weird {}.', + 'a photo of the weird {}.', + 'a photo of a small {}.', + 'a photo of the small {}.', + 'a photo of a large {}.', + 'a photo of the large {}.', + 'a photo of a clean {}.', + 'a photo of the clean {}.', + 'a photo of a dirty {}.', + 'a photo of the dirty {}.', + 'a bright photo of {article} {}.', + 'a bright photo of the {}.', + 'a dark photo of {article} {}.', + 'a dark photo of the {}.', + 'a photo of a hard to see {}.', + 'a photo of the hard to see {}.', + 'a low resolution photo of {article} {}.', + 'a low resolution photo of the {}.', + 'a cropped photo of {article} {}.', + 'a cropped photo of the {}.', + 'a close-up photo of {article} {}.', + 'a close-up photo of the {}.', + 'a jpeg corrupted photo of {article} {}.', + 'a jpeg corrupted photo of the {}.', + 'a blurry photo of {article} {}.', + 'a blurry photo of the {}.', + 'a pixelated photo of {article} {}.', + 'a pixelated photo of the {}.', + 'a black and white photo of the {}.', + 'a black and white photo of {article} {}.', + 'a plastic {}.', + 'the plastic {}.', + 'a toy {}.', + 'the toy {}.', + 'a plushie {}.', + 'the plushie {}.', + 'a cartoon {}.', + 'the cartoon {}.', + 'an embroidered {}.', + 'the embroidered {}.', + 'a painting of the {}.', + 'a painting of a {}.', +] + +openimages_rare_unseen = [ + 'Aerial photography', 'Aircraft engine', 'Ale', 'Aloe', 'Amphibian', + 'Angling', 'Anole', 'Antique car', 'Arcade game', 'Arthropod', + 'Assault rifle', 'Athletic shoe', 'Auto racing', 'Backlighting', + 'Bagpipes', 'Ball game', 'Barbecue chicken', 'Barechested', 'Barquentine', + 'Beef tenderloin', 'Billiard room', 'Billiards', 'Bird of prey', + 'Black swan', 'Black-and-white', 'Blond', 'Boating', 'Bonbon', + 'Bottled water', 'Bouldering', 'Bovine', 'Bratwurst', 'Breadboard', + 'Briefs', 'Brisket', 'Brochette', 'Calabaza', 'Camera operator', 'Canola', + 'Childbirth', 'Chordophone', 'Church bell', 'Classical sculpture', + 'Close-up', 'Cobblestone', 'Coca-cola', 'Combat sport', 'Comics', + 'Compact car', 'Computer speaker', 'Cookies and crackers', + 'Coral reef fish', 'Corn on the cob', 'Cosmetics', 'Crocodilia', + 'Digital camera', 'Dishware', 'Divemaster', 'Dobermann', 'Dog walking', + 'Domestic rabbit', 'Domestic short-haired cat', 'Double-decker bus', + 'Drums', 'Electric guitar', 'Electric piano', 'Electronic instrument', + 'Equestrianism', 'Equitation', 'Erinaceidae', 'Extreme sport', 'Falafel', + 'Figure skating', 'Filling station', 'Fire apparatus', 'Firearm', + 'Flatbread', 'Floristry', 'Forklift truck', 'Freight transport', + 'Fried food', 'Fried noodles', 'Frigate', 'Frozen yogurt', 'Frying', + 'Full moon', 'Galleon', 'Glacial landform', 'Gliding', 'Go-kart', 'Goats', + 'Grappling', 'Great white shark', 'Gumbo', 'Gun turret', 'Hair coloring', + 'Halter', 'Headphones', 'Heavy cruiser', 'Herding', 'High-speed rail', + 'Holding hands', 'Horse and buggy', 'Horse racing', 'Hound', + 'Hunting knife', 'Hurdling', 'Inflatable', 'Jackfruit', 'Jeans', 'Jiaozi', + 'Junk food', 'Khinkali', 'Kitesurfing', 'Lawn game', 'Leaf vegetable', + 'Lechon', 'Lifebuoy', 'Locust', 'Lumpia', 'Luxury vehicle', 'Machine tool', + 'Medical imaging', 'Melee weapon', 'Microcontroller', 'Middle ages', + 'Military person', 'Military vehicle', 'Milky way', 'Miniature Poodle', + 'Modern dance', 'Molluscs', 'Monoplane', 'Motorcycling', 'Musical theatre', + 'Narcissus', 'Nest box', 'Newsagent\'s shop', 'Nile crocodile', + 'Nordic skiing', 'Nuclear power plant', 'Orator', 'Outdoor shoe', + 'Parachuting', 'Pasta salad', 'Peafowl', 'Pelmeni', 'Perching bird', + 'Performance car', 'Personal water craft', 'Pit bull', 'Plant stem', + 'Pork chop', 'Portrait photography', 'Primate', 'Procyonidae', + 'Prosciutto', 'Public speaking', 'Racewalking', 'Ramen', + 'Rear-view mirror', 'Residential area', 'Ribs', 'Rice ball', + 'Road cycling', 'Roller skating', 'Roman temple', 'Rowing', 'Rural area', + 'Sailboat racing', 'Scaled reptile', 'Scuba diving', 'Senior citizen', + 'Shallot', 'Shinto shrine', 'Shooting range', 'Siberian husky', 'Sledding', + 'Soba', 'Solar energy', 'Sport climbing', 'Sport utility vehicle', + 'Steamed rice', 'Stemware', 'Sumo', 'Surfing Equipment', 'Team sport', + 'Touring car', 'Toy block', 'Trampolining', 'Underwater diving', + 'Vegetarian food', 'Wallaby', 'Water polo', 'Watercolor paint', 'Whiskers', + 'Wind wave', 'Woodwind instrument', 'Yakitori', 'Zeppelin' +] + + +def get_clip_model(): + model = dict( + type='CLIPZeroShot', + vision_backbone=dict( + type='VisionTransformer', + arch='base', + img_size=224, + patch_size=16, + drop_rate=0., + layer_cfgs=dict(act_cfg=dict(type='mmpretrain.QuickGELU')), + pre_norm=True, + ), + projection=dict( + type='CLIPProjection', in_channels=768, out_channels=512), + text_backbone=dict( + type='CLIPTransformer', + width=512, + layers=12, + heads=8, + attn_mask=True, + ), + tokenizer=dict( + type='AutoTokenizer', + name_or_path='openai/clip-vit-base-patch16', + use_fast=False), + vocab_size=49408, + transformer_width=512, + proj_dim=512, + context_length=77, + data_preprocessor=dict( + type='MultiModalDataPreprocessor', + mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255], + std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255], + to_rgb=False, + ), + ) + return MODELS.build(model) + + +def build_openset_label_embedding(categories=None, clip_ckpt_path=''): + if categories is None: + print('Categories is None, so using rare_unseen categories') + categories = openimages_rare_unseen + model = get_clip_model() + model.load_state_dict(torch.load(clip_ckpt_path)) + templates = multiple_templates + + run_on_gpu = torch.cuda.is_available() + + with torch.no_grad(): + openset_label_embedding = [] + for category in categories: + texts = [ + template.format( + processed_name(category, rm_dot=True), + article=article(category)) for template in templates + ] + texts = [ + 'This is ' + text + if text.startswith('a') or text.startswith('the') else text + for text in texts + ] + texts = model.tokenize(texts) # tokenize + if run_on_gpu: + texts = texts.cuda() + model = model.cuda() + text_embeddings = model.extract_text_feat(texts) + text_embeddings /= text_embeddings.norm(dim=-1, keepdim=True) + text_embedding = text_embeddings.mean(dim=0) + text_embedding /= text_embedding.norm() + openset_label_embedding.append(text_embedding) + openset_label_embedding = torch.stack(openset_label_embedding, dim=1) + if run_on_gpu: + openset_label_embedding = openset_label_embedding.cuda() + + openset_label_embedding = openset_label_embedding.t() + return openset_label_embedding, categories diff --git a/mmpretrain/models/multimodal/ram/ram.py b/mmpretrain/models/multimodal/ram/ram.py new file mode 100644 index 0000000..c5d22f0 --- /dev/null +++ b/mmpretrain/models/multimodal/ram/ram.py @@ -0,0 +1,332 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import pickle +from abc import abstractmethod +from typing import List, Optional + +import numpy as np +import torch +import torch.nn as nn +from mmengine.model import BaseModel + +from mmpretrain.registry import MODELS, TOKENIZER +from mmpretrain.structures import DataSample +from .bert import BertConfig, BertLMHeadModel, BertModel +from .openset_utils import build_openset_label_embedding +from .utils import tie_encoder_decoder_weights + + +def get_path(path): + file_path = os.path.abspath(os.path.dirname(__file__)) + if not os.path.isabs(path): + return os.path.join(file_path, path) + + +class RAM(BaseModel): + """The implementation of `RAM `_.""" + + def __init__(self, + tokenizer: dict, + vision_backbone: dict, + tag_encoder: dict, + tagging_head: dict, + text_decoder: dict, + device: str = 'cpu', + vision_width: int = 1536, + prompt='a picture of ', + threshold=0.68, + delete_tag_index=[], + tag_list='./data/ram_tag_list.pickle', + tag_list_chinese='./data/ram_tag_list_chinese.pickle', + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[dict] = None): + if data_preprocessor is None: + data_preprocessor = {} + data_preprocessor.setdefault('type', 'MultiModalDataPreprocessor') + data_preprocessor = MODELS.build(data_preprocessor) + + super().__init__( + data_preprocessor=data_preprocessor, init_cfg=init_cfg) + + self.device = device + # build the visual encoder + self.visual_encoder = MODELS.build(vision_backbone) + + # build the tokenizer + self.tokenizer = TOKENIZER.build(tokenizer) + self.tokenizer.add_special_tokens({'bos_token': '[DEC]'}) + self.tokenizer.add_special_tokens( + {'additional_special_tokens': ['[ENC]']}) + self.tokenizer.enc_token_id = \ + self.tokenizer.additional_special_tokens_ids[0] + + # build the tag encoder + # encoder_config = BertConfig.from_json_file(med_config) + # encoder_config.encoder_width = 512 + encoder_config = BertConfig.from_dict(tag_encoder) + self.tag_encoder = BertModel( + config=encoder_config, add_pooling_layer=False) + + # build image-tag-text decoder + # decoder_config = BertConfig.from_json_file(med_config) + decoder_config = BertConfig.from_dict(text_decoder) + self.text_decoder = BertLMHeadModel(config=decoder_config) + + self.delete_tag_index = delete_tag_index + self.prompt = prompt + self.prompt_length = len(self.tokenizer(self.prompt).input_ids) - 1 + + # load tag list + self.tag_list = self.load_tag_list(get_path(tag_list)) + self.tag_list_chinese = self.load_tag_list(get_path(tag_list_chinese)) + + # create image-tag recognition decoder + self.threshold = threshold + self.num_class = len(self.tag_list) + # q2l_config = \ + # BertConfig.from_json_file(f'{CONFIG_PATH}/configs/q2l_config.json') + # q2l_config.encoder_width = 512 + q2l_config = BertConfig.from_dict(tagging_head) + self.tagging_head = BertModel( + config=q2l_config, add_pooling_layer=False) + self.tagging_head.resize_token_embeddings(len(self.tokenizer)) + self.label_embed = nn.Parameter( + torch.zeros(self.num_class, q2l_config.encoder_width)) + + if q2l_config.hidden_size != 512: + self.wordvec_proj = nn.Linear(512, q2l_config.hidden_size) + else: + self.wordvec_proj = nn.Identity() + + self.fc = nn.Linear(q2l_config.hidden_size, 1) + + self.del_selfattention() + + # share weights of the lowest 2-layer of + # "image-tag interaction encoder" with + # the "image-tag recogntion decoder" + tie_encoder_decoder_weights(self.tag_encoder, self.tagging_head, '', + ' ') + self.image_proj = nn.Linear(vision_width, 512) + # self.label_embed = nn.Parameter(torch.load( + # f'{CONFIG_PATH}/data/textual_label_embedding.pth', + # map_location='cpu').float()) + + # adjust thresholds for some tags + self.class_threshold = torch.ones(self.num_class) * self.threshold + ram_class_threshold_path = get_path( + './data/ram_tag_list_threshold.pickle') + with open(ram_class_threshold_path, 'rb') as f: + ram_class_threshold = pickle.load(f) + for key, value in enumerate(ram_class_threshold): + self.class_threshold[key] = value + + def load_tag_list(self, tag_list_file): + with open(tag_list_file, 'rb') as f: + tag_list = pickle.load(f) + tag_list = np.array(tag_list) + return tag_list + + # delete self-attention layer of image-tag recognition decoder + # to reduce computation, follower Query2Label + def del_selfattention(self): + del self.tagging_head.embeddings + for layer in self.tagging_head.encoder.layer: + del layer.attention + + def get_label_embed(self): + return torch.nn.functional.relu(self.wordvec_proj(self.label_embed)) + + def extract_visual_feature(self, images): + image_embeds = self.visual_encoder(images)[0] + image_embeds = image_embeds.flatten(2, 3) + attn_pool = nn.AdaptiveAvgPool1d(1) + cls_token = attn_pool(image_embeds).permute(0, 2, 1).contiguous() + image_embeds = image_embeds.permute(0, 2, 1).contiguous() + image_embeds = torch.cat([cls_token, image_embeds], dim=1) + image_embeds = self.image_proj(image_embeds) + image_atts = torch.ones( + image_embeds.size()[:-1], dtype=torch.long).to(images.device) + return image_embeds, image_atts + + def image2tag(self, label_embed, image_embeds, image_atts): + # recognized image tags using image-tag recogntiion decoder + # image_cls_embeds = image_embeds[:, 0, :] + image_spatial_embeds = image_embeds[:, 1:, :] + + bs = image_spatial_embeds.shape[0] + label_embed = label_embed.unsqueeze(0).repeat(bs, 1, 1) + tagging_embed = self.tagging_head( + encoder_embeds=label_embed, + encoder_hidden_states=image_embeds, + encoder_attention_mask=image_atts, + return_dict=False, + mode='tagging', + ) + + logits = self.fc(tagging_embed[0]).squeeze(-1) + return logits + + def forward( + self, + images: torch.Tensor, + data_samples: Optional[list] = None, + mode: str = 'predict', + **kwargs, + ): + if mode == 'predict': + return self.predict(images, data_samples, **kwargs) + else: + raise RuntimeError(f'Invalid mode "{mode}".') + + @abstractmethod + def predict(self, + images: torch.Tensor, + data_samples: DataSample = None) -> DataSample: + raise NotImplementedError + + +@MODELS.register_module() +class RAMNormal(RAM): + + def __init__(self, + tokenizer: dict, + vision_backbone: dict, + tag_encoder: dict, + tagging_head: dict, + text_decoder: dict, + device: str = 'cpu', + vision_width: int = 1536, + prompt='a picture of ', + threshold=0.68, + delete_tag_index=[], + tag_list='./data/ram_tag_list.pickle', + tag_list_chinese='./data/ram_tag_list_chinese.pickle', + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[dict] = None): + super().__init__( + tokenizer, + vision_backbone, + tag_encoder, + tagging_head, + text_decoder, + device, + vision_width, + prompt, + threshold, + delete_tag_index, + tag_list, + tag_list_chinese, + data_preprocessor, + init_cfg, + ) + + def tag_process(self, logits): + targets = torch.where( + torch.sigmoid(logits) > self.class_threshold.to(logits.device), + torch.tensor(1.0).to(logits.device), + torch.zeros(self.num_class).to(logits.device)) + + tag = targets.cpu().numpy() + tag[:, self.delete_tag_index] = 0 + tag_output = [] + tag_output_chinese = [] + logits_output = [] + + bs = logits.shape[0] + for b in range(bs): + index = np.argwhere(tag[b] == 1) + token = self.tag_list[index].squeeze(axis=1) + logits_output.append( + torch.sigmoid(logits)[b][index[:, 0]].cpu().numpy()) + tag_output.append(' | '.join(token)) + token_chinese = self.tag_list_chinese[index].squeeze(axis=1) + tag_output_chinese.append(' | '.join(token_chinese)) + + return [(tag_output, tag_output_chinese), logits_output] + + def predict(self, + images: torch.Tensor, + data_samples: DataSample = None) -> DataSample: + self.eval() + self.to(self.device) + images = images.to(self.device) + label_embed = self.get_label_embed() + image_embeds, image_atts = self.extract_visual_feature(images) + logits = self.image2tag(label_embed, image_embeds, image_atts) + tag_output, logits_output = self.tag_process(logits) + data_samples.set_field(logits_output, 'logits_output') + data_samples.set_field(tag_output, 'tag_output') + return data_samples + + +@MODELS.register_module() +class RAMOpenset(RAMNormal): + + def __init__(self, + tokenizer: dict, + vision_backbone: dict, + tag_encoder: dict, + tagging_head: dict, + text_decoder: dict, + device: str = 'cpu', + vision_width: int = 1536, + prompt='a picture of ', + threshold=0.68, + delete_tag_index=[], + tag_list='./data/ram_tag_list.pickle', + tag_list_chinese='./data/ram_tag_list_chinese.pickle', + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[dict] = None): + super().__init__( + tokenizer, + vision_backbone, + tag_encoder, + tagging_head, + text_decoder, + device, + vision_width, + prompt, + threshold, + delete_tag_index, + tag_list, + tag_list_chinese, + data_preprocessor, + init_cfg, + ) + + def set_openset(self, + categories: List[str] = None, + clip_ckpt: str = '', + threshold: float = 0.68): + openset_label_embedding, openset_categories = \ + build_openset_label_embedding( + categories, clip_ckpt + ) + self.tag_list = np.array(openset_categories) + self.label_embed = nn.Parameter(openset_label_embedding.float()) + self.num_class = len(openset_categories) + + # the threshold for unseen categories is often lower + self.class_threshold = torch.ones(self.num_class) * threshold + + def tag_process(self, logits): + targets = torch.where( + torch.sigmoid(logits) > self.class_threshold.to(logits.device), + torch.tensor(1.0).to(logits.device), + torch.zeros(self.num_class).to(logits.device)) + + tag = targets.cpu().numpy() + tag[:, self.delete_tag_index] = 0 + + bs = logits.shape[0] + tag_output = [] + logits_output = [] + for b in range(bs): + index = np.argwhere(tag[b] == 1) + token = self.tag_list[index].squeeze(axis=1) + logits_output.append( + torch.sigmoid(logits)[b][index[:, 0]].cpu().numpy()) + tag_output.append(' | '.join(token)) + + return [(tag_output, [None]), logits_output] diff --git a/mmpretrain/models/multimodal/ram/run/__init__.py b/mmpretrain/models/multimodal/ram/run/__init__.py new file mode 100644 index 0000000..ef101fe --- /dev/null +++ b/mmpretrain/models/multimodal/ram/run/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/mmpretrain/models/multimodal/ram/run/inference.py b/mmpretrain/models/multimodal/ram/run/inference.py new file mode 100644 index 0000000..da5afcf --- /dev/null +++ b/mmpretrain/models/multimodal/ram/run/inference.py @@ -0,0 +1,29 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + + +def inference_ram(sample, model): + + with torch.no_grad(): + result = model.test_step(sample) + + return result + + +def inference_ram_openset(sample, model): + with torch.no_grad(): + result = model.test_step(sample) + + return result + + +def inference(sample, model, transforms, mode='normal'): + sample = transforms(sample) + if sample['inputs'].ndim == 3: + sample['inputs'] = sample['inputs'].unsqueeze(dim=0) + assert mode in ['normal', 'openset' + ], 'mode of inference must be "normal" or "openset"' + if mode == 'normal': + return inference_ram(sample, model) + else: + return inference_ram_openset(sample, model) diff --git a/mmpretrain/models/multimodal/ram/utils.py b/mmpretrain/models/multimodal/ram/utils.py new file mode 100644 index 0000000..32cb115 --- /dev/null +++ b/mmpretrain/models/multimodal/ram/utils.py @@ -0,0 +1,87 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +from torch import nn + + +def tie_encoder_decoder_weights(encoder: nn.Module, decoder: nn.Module, + base_model_prefix: str, skip_key: str): + uninitialized_encoder_weights: List[str] = [] + if decoder.__class__ != encoder.__class__: + print(f'''{decoder.__class__} and {encoder.__class__} are not equal. + In this case make sure that + all encoder weights are correctly initialized.''') + + def tie_encoder_to_decoder_recursively( + decoder_pointer: nn.Module, + encoder_pointer: nn.Module, + module_name: str, + uninitialized_encoder_weights: List[str], + skip_key: str, + depth=0, + ): + assert isinstance(decoder_pointer, nn.Module) and isinstance( + encoder_pointer, nn.Module + ), f'{decoder_pointer} and {encoder_pointer}' + \ + 'have to be of type torch.nn.Module' + if hasattr(decoder_pointer, 'weight') and skip_key not in module_name: + assert hasattr(encoder_pointer, 'weight') + encoder_pointer.weight = decoder_pointer.weight + if hasattr(decoder_pointer, 'bias'): + assert hasattr(encoder_pointer, 'bias') + encoder_pointer.bias = decoder_pointer.bias + print(module_name + ' is tied') + return + + encoder_modules = encoder_pointer._modules + decoder_modules = decoder_pointer._modules + if len(decoder_modules) > 0: + assert (len(encoder_modules) > + 0), f'''Encoder module {encoder_pointer} + does not match decoder module {decoder_pointer}''' + + all_encoder_weights = set([ + module_name + '/' + sub_name + for sub_name in encoder_modules.keys() + ]) + encoder_layer_pos = 0 + for name, module in decoder_modules.items(): + if name.isdigit(): + encoder_name = str(int(name) + encoder_layer_pos) + decoder_name = name + if not isinstance( + decoder_modules[decoder_name], + type(encoder_modules[encoder_name])) and len( + encoder_modules) != len(decoder_modules): + # this can happen if the name corresponds to + # the position in a list module list of layers + # in this case the decoder has added a + # cross-attention that the encoder doesn't have + # thus skip this step and + # subtract one layer pos from encoder + encoder_layer_pos -= 1 + continue + elif name not in encoder_modules: + continue + elif depth > 500: + raise ValueError( + '''Max depth of recursive function `tie_encoder_to_decoder` reached. + It seems that there is a circular dependency + between two or more `nn.Modules` of your model.''') + else: + decoder_name = encoder_name = name + tie_encoder_to_decoder_recursively( + decoder_modules[decoder_name], + encoder_modules[encoder_name], + module_name + '/' + name, + uninitialized_encoder_weights, + skip_key, + depth=depth + 1, + ) + all_encoder_weights.remove(module_name + '/' + encoder_name) + + uninitialized_encoder_weights += list(all_encoder_weights) + + # tie weights recursively + tie_encoder_to_decoder_recursively(decoder, encoder, base_model_prefix, + uninitialized_encoder_weights, skip_key) diff --git a/mmpretrain/models/necks/__init__.py b/mmpretrain/models/necks/__init__.py new file mode 100644 index 0000000..2952a69 --- /dev/null +++ b/mmpretrain/models/necks/__init__.py @@ -0,0 +1,37 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .beitv2_neck import BEiTV2Neck +from .cae_neck import CAENeck +from .densecl_neck import DenseCLNeck +from .gap import GlobalAveragePooling +from .gem import GeneralizedMeanPooling +from .hr_fuse import HRFuseScales +from .itpn_neck import iTPNPretrainDecoder +from .linear_neck import LinearNeck +from .mae_neck import ClsBatchNormNeck, MAEPretrainDecoder +from .milan_neck import MILANPretrainDecoder +from .mixmim_neck import MixMIMPretrainDecoder +from .mocov2_neck import MoCoV2Neck +from .nonlinear_neck import NonLinearNeck +from .simmim_neck import SimMIMLinearDecoder +from .spark_neck import SparKLightDecoder +from .swav_neck import SwAVNeck + +__all__ = [ + 'GlobalAveragePooling', + 'GeneralizedMeanPooling', + 'HRFuseScales', + 'LinearNeck', + 'BEiTV2Neck', + 'CAENeck', + 'DenseCLNeck', + 'MAEPretrainDecoder', + 'ClsBatchNormNeck', + 'MILANPretrainDecoder', + 'MixMIMPretrainDecoder', + 'MoCoV2Neck', + 'NonLinearNeck', + 'SimMIMLinearDecoder', + 'SwAVNeck', + 'iTPNPretrainDecoder', + 'SparKLightDecoder', +] diff --git a/mmpretrain/models/necks/__pycache__/__init__.cpython-310.pyc b/mmpretrain/models/necks/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2f1884c44edad051a444578041c477d48463404 GIT binary patch literal 1063 zcmZuvOK;Oa5Vn)1cI>>HhEfWY-g+TY#StOWw3Lb@sz}wtTrAh#rmOCFqqX~hU&5Ii ze}psVTsiRt$ZFNI^!f7V`^nydW&I@M?kQ;Ft$x!V8e%K9 zg@rcQHt~WeU=fOj7sUdWpk(-hSVRXL!%M=&GL#Kp6dwBE8}5h-R-tORD{5GWy5VKf zz$P@Y1ubKHVhP*OHryA>*ny7W74ZmHV8!sNc#NyCYIsdN!8KSjye^*NGk9irLp;ZI zShuZ-6*s@y@FH%Ft<9xhwr*?Fo_X85{P1XN#G*58IsM-5occQ~Nm+j|H`{wc9*3fL z!BjY52T}^2OmswNk0shA_-7UmS(w~@sl5OA?mT7VC=@K!e=hOEgVBLvnF@Kb!y*|o zMFIvqVWBc*3bRIEq}yQ@osOhJ^37Vq-tNPw_HcjD`%kvzA-@{#4<812L)n)%eO5;@ zxl>n*8*|igx$jn7b36^zhXJ3QX17jveAzoPdYh`Gr>GVQ9D*_dZH_7u=)JO(Pe8k- zDg;%68bO_)LC_*_37Q&4n;YOCGoD>+1&JPkNeFHfvfQN9P>Jb!B9XhO(5yHKr$&%w z42>#qPE~NCZ^6ieGS8+-9zoT>*O#bgpL<9Awa zk&(QXOU*$ok;&cc)GYWZ$D2Uv9K2Ky_$<#vy+b*@3@>ti=YqYV)lWAxRP6gt;~%?H_zNo= B6p#P_ literal 0 HcmV?d00001 diff --git a/mmpretrain/models/necks/__pycache__/beitv2_neck.cpython-310.pyc b/mmpretrain/models/necks/__pycache__/beitv2_neck.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c59f661505721887a9061d7496d287a2cd788dbc GIT binary patch literal 5172 zcmb7I&668P6`$@IjYe8YyDP6hmJpaEcDy3a+Ju5sOmO1FaUeLGRJ+bhnzxR8JtJSiB=NF&6*M91_Vf>XE(@z17XYnRKMIj8qLZeAv9o<6%#ih+ zBfG|nhA0Z>jv*Xb+cvjZb3w4HMx%5OyD?T8R%TrZk~DK(>7_v&`C(RA?e{{N6}}gt z-C)_=dOrw-7saaUh5l`+&{uxWPvpz7=!X)m2cCN&SY1_ql(a!4)eBK87E)aq5!)}n za_NODS6j5;^aEk(C;1yb(r7YaG`V0+GccsJQxII39WE?Uz|R)NUt7EU_e_sCqJ)vM zm=P6G9gj3^Q4_Pm6m$5^i@LPL5wReS-sPwr6UW5~u_(T9*J_u<1F|T-i1$J1h%d>K z_%dD>??bXI9tQOzaz<99C9Bd}G}?U8STw{*ajL!W6Qj+#O4w}d4D^BKZs?*%IEzXF??^w+vKQSz$xd7oWLx2$3>rbr6)-}IsK@q69ke-|O zyRRzwc&hv$3Zl+(x$NRu-3Su5t7Y!C;wVW~zm>Y_hIED0^49aI-438fx2L2CTBOtw zgNv$@oY$;~UDO|`GjW%KC~cf~SNh#`soc2j%3Oi2R(C=@5%#z2UXpFUAEt?$#_ki# z`>dp|!rQrR>@!=%KJ;`qh=jaFJZNj0)Q$UTub;YY6?dt3pAqeA-EUo6k0W_-wWlVl zjR`zowKm)($N_tQJxFj+cghG|8V*v~O8e>%vvnBngr87x&r^OX-KBOI`y`21RlM%6 z2Vs!jrj7fND?{0p5yTz4>(bqlDwblxg7!Tvn{$&P&AsQR8;6!i(zvzZCuz`v`g-Yx zOFKAt<|!^(l3vpCL+QaLQtyTz_NO*X(x(2v4}&eAbnfO9WKxGE96||^zaC1L7{b8x z8PgQ!qltbb+{>3&4)5iwlQZ=b=_wg{y*TmK10OnHkK+(>dt*alF{~K7pmFk@ zDYkS&x}e7vu>?&zb3p4s+r8|Ei9DkxCL2DC_j4vfUNCSB2VgWdmVIX0O|{w`cZrN2 zdwM>O+Cj&qmcAfFFp-iL?BryJ6}h%X^On-vJ^8KES5{6p*48G(S`qt8opIxl2gRb_ zIj9R<*YO@E(+{t;LPoF8k+Jp6A|nk9(=hH;;2r}5C7g9x;*JVW1l=SnXv4`$bdxuv zFOqDbEv10UZ~97jFs~?*VUjWU^w3uwu!E<$cLMD0&1s$}?{R9DP;~I>r}L{{((~tM zE@%A8O6Tt1|FrYLdw;yp;F(2&$V}NwvYA!Yk6J5Qn3;VA3c(6LTKL0@f7$x^Gm3#x z!%~a*(Ao$ZnVF;lr-J6Q^2GOKm1*aZRmTd|mpd!%-_5Lc@FHc$KKn{!bBB#HbH*l- zIb)4wwvuoQ(Mn5)?)=zB9bb|)WNsvvaD>#@Ha6LyB-mZXjFb<|)ZFIkv4Oc`?HWT9 z?ZUv=w71y}BRTmJLi)gb#}NCy?-;+aw%Lyv(G1K*V_EV&3Sg+UoJxt?j#oB>B!0+*t&7meXeTHcuv#X%vh3}!IKjvL#2*Bn;T>P~H! zR+&xb&_5cvWW051SWO*0-=STAPigcm<7*8ngDSqxz!t`!C}!@KhBZ*l?#!okQ4Nei z?RV^FY!@=PE(QjH3YK#Lx8ZEMfSI$~{7pWb8_r|=sHhC)cR7I(#Jbu_gQ+EmbgOWY z9QM|ti#Qg=DcxUOWeOaI=64%eK^vwXi z^P0S!xN4Tj$@27^35!PQP=|Vih)z;*nu=2>vYDF*RPjwO3EBeHo!4e;?h9VI*3r)LFVtd7iVzt@wIP!4fArDPr#?jz4P zds7CTja1o0TlR(UatP6=XI3J^b}nw(lX{qniD*bFS`fLANQGHVCJBWJKlDiETI882 zdMaZe4; zL5}3?x(w1APk0)C^lqzV%zhGY@;ZuIwagth#~l2uJ!`LKI=p6b=IlA#`qVaUvy71n z#>!@$p&viaEXMaZw^7S~_WSlsh1q+KS>`ox+d4AAX3JxZx5%!>51A>U`seW`Z=w)P z@H@tkrF1RWHp7*B7}tTrMX-Un$v4eSYg4<50Y450K^GQRh{lSWj(}_Utnuq5Vc`N5 zw%pfp6>9{wALbDObKNG(Epp=Hhw7y5t9TP~_PCNp%60&)2W-LE)fZV`O};Yr`Oh{? zvd$Y`W_5v8U>es|3Ws0l`O5FYQptsTfzaB^i*JK-RvZ~$N^8-=C)n1-De?VJwz>Rt z7z2nT7nVol_ej_)jZ#*Uh!m2Lk@_YrXZdLwWmZqco0**-9^3X)KfhFFyB%e=-fM#B z^DL&Q6)Ik);z26N`X>8V7tp+pHzD8en02$F&1H_0L0F$`o%?^!lG#B7Xq#k}X@Jx+ z!G8SEttI_@7jLqIV!#B0`-KfH8k`TUVIifkQ$Q+Xz74H)7^M*dYt%+s^fsp+q(DfI z?((4xtK+aaXILB*2ljxA0`*=$3C){>!Y%?3zVhh(J#>DQ=fA`e&R zs8-OKDBv>kDn-siQM^&)VRZ=9j!S8{)FgU4ze4JV)ToYvZA*sXDB$KU?=np%zpV)N z5?o3Y3dgWGKWec_NVGmih}^TBgFV)V{Kz7w&_)ttc%+Q(m4lz*c_@hPG3J*4O-AfDo>q0&*C zs!DklvL=(iT1Ri{vQ7(29iwT;ydIW2X491UQdsF&O-tsDaI8~pR%N~%j(2L!n#`Nw zL}#)&Df5-^SZAs^g}mjBg~vP7&1qG+sd!ay{E^~~`_+e9^MpUOdvcFzYu*HEC#d$J z+MMy&9i=|`1T$9Zswmynb&x4?Y0{)Bd^`KLCvfW z-N?Vy^ZKEW(s<1K-Ina+hR$PEocZv2aQ7~EyHOjAe15&#>Uln&OBousKE8H+?#>gI zj6QRf_oC=lKa7KA-}OF-W4{{*y>5$USA5Q-g=iNUUumkI0_mzv&10VC>0T-M^L5Yg z$~!84O|RlvJFH#t#{803MH)w{Ax-#(H|dwXV@Oj-$C0LyPN3IGq!~|dpWag1s`r9- z>d|=9^iKPg70Wv#=~+o%lyrt@+wxxWUVfxE$F>w*X;vX!Tb#Z6!S#X^KJk~hA4R=H z4(=5wPT{RB#ce}icD(G{JZo(C+BWz!EX&4ocR7gcjugC&oENm)K8LvMHt%)pRwTXGeq_7cmjid)Xw~=ZxVP$eqgi{gAKTPy)xVE6 zUC-Wd!+zdevca$|7n|nNuF-sH0VyBq((jHiO_%ZkUy3fv{${7jz=~T+of{ zm$6jeZu@TB$HY*N*?@av*rD6+IHWEk?NPbtbDBc(D(KoB(yQ}SwB1azaxQJ(_aKeW zT<$roWw+b)!x4tdf#><%e0*Y!BM{{rX!6(mwu@n+%l4&SQTp4`Pd$e zkHeu}>ENmKsJaK;q&K78FE%dby|Lw8=Q%9wjE6jULj|GgHE{pT50;kWBX*nSS{O=> zvIaAGV77PS9yBY8gBC=z7B3$<<)NhH-Y3#_f^HBy^1O|%o$8ZwNb0!cb~ zImqjyIL_6CF?EvMseu~?o5`?p&3W&GD;ODUZayE1kKrD;G5_=QT2O>eQ^AF#)5HXu zoB#RFfBgOKuYdN_D~}nUL^GlJYmu;WVPScUF`kf7%maW>OOd_hI$At?_JO_^lyTkTj$Sj)W2`50zbYPf?Ya4XOjx zQ$4n${eV3fA1ID?NS%I2UFncIBQ6ir0UMMC#=snuJ#EJTM>DRVRvYN3Q3hjl+wPQ6 zZ^dJSif8Po$X5rJS6PWWhj$oS96R#Ol&9QL=xx>uBNX~P zQ!Crot$C>fa5>6ry(n%6_v$5n5+dTKh@1uymEp#S${O#jNON$bUgjsL$qbPfsGZqv zC#FJ-)7HTPBpwidnch}mN*$OvcqZ|Es93V|;J3)J8OmK$jdOp!AK;32mbmNHkBd^& zU-P*zo#Z4toHkF4(>`Bv)}0o)Jy~&ew78KL$EfieNr6|XyCu6hv?xxlr_eBOu2U|Z z6HJg`-L9zIcDdW}V@#nG_jqer=&-C9QCbY=QZ`~zOb4mggdX`}JCTkQz@R=#@k0`K+W3!Tmybep?DcO(nvFpMtX_FIiE zv|QhzTx0D%p9QlENYS5ySQAV$)apKCIh|%qE&iEbF|$}^s`;C0#bml#MLYAUuF-Gx zsjge9xvw(|{5HS&JU6Y8ZJyBXIqO2d=d5ms5|j-E`Lo)hUC15-8G;K}k)lNq92#X; z87M3GUBMC9)#w<_D0@_|fr)eYN} z;ApbVV>c2P#%{w8Cnuv(XF^>S>PEf7e;2dh?-02}Nvu3oW#9FzUnyZeK$4i9#obN0g*jBDICTO%pTj!dvWdev=C0 zsUEicFm#;48s5WOP7C2=XpbR9O%PMjRrX3zi%iv0^)DD>PnpKPW;**HR$~9n4E7Z( zv)@4D{)<)EubIWZWMk}Ctjd1L#@QFFR{KTmUGUmZ($u~jr-dO#o^Pb3=%0`|6x9ID zBuEBG&jL0XBaFAhfNg4Cr9A;GQv#ML0n3ztWlF#@C19Bnu#5pLQ}$+%UO+m9bQ=B7 z;P))vzli)x;9yIf^Rj14obzp2`iA$)qiVC_{f=*~jLBBI_p0|zAe^fAEgyE;dri`F zlD_0m&@ZJnJEgQuukO8$wv!SK9fOk2iZXOz9tYcfi~~a%=?S3HzRmsf(kKMor9*HI zypjkA6Rrc{ija^-Y{QO_8W7;fp%Q@&YUBA5Z%U^MN_Ip)YTXXtkQbO8icQD^kfsc{ zrvOfJ2R?gAV?D!h=-Uo`JaTa0M7?wA-GURJ`y|CNWG%suh+MsN?HMWpx*1vf$N;Gf zMt$S&y!}q@XAS#0iC=o?(uR%f#jpqaJj(CwoLf5H^LV9A4&_vs*U%#?3L+P_gmOm# zyCa2tsUYf3C=o@_D-Ph_TO)-4o#7;amVNz!I}I0Ks=J`RCuR=Yl%iQ$K18ek(LKDsu{pI7y_%fr1y+G1qr$ghfy| zCKndz(0AU-r!1l35OIwF^pAiRgIqkR*hma{_oG!KP;5mB2zERn-{y%)gdEW)I_{z}Je^0on>LL^KBt&C zAVOhglHhc&w#X~V9-1W09BWY`#8xGQf?qZPuS->xZB2)dp4!2#5UF==QmBzt3HOfF8>swX+bi>heEWp(RYx(mOkF9_LRS7 z>j)%Ep9&to9>jj5bug0Ch0MeDsVh~ieSvv%btcpZ^084CRQePds`isTgx z9qz_^xNzJsx`IsBO=7KQCP*qsdKZy1Dt5m zhecT@{a+gAACMwC;)Y_XhwwW}$QncofkMc-hmRi+y0Q)JfS3&gBr+ju^EaLqGp$OD zeHL`R-e*n}Z2Dpn_oBo7&>?_D!OBB7fb@oYAazp;UCa#a$N^D4e>X-2@Q+}4xS%D- zBvI?XGWKP_fxm*rPw1f3Pw;Q!M}iMgC6kwWXOXB3T{=1f`f^(2dsIme1b= z2Vo`OQ|6%SqJ-G}k}s^Gl0?Ex*BB*Hgqpk)hwm{oEG%p`F6biYE6k75h=tI} zZQ^gyK&4F|kq2SrbE+RpU?0KkQWdMnlV|2`G`2h@X%2>(4Igs|#k$-xMy z;`#eAJRQ7EjiqW*2;gs#B0{jW$tu9Vp@BZNb)#zi(wMA5Sv$3>ftdz5|r@@HoUrmf& zw~eL0nEtz)|FZcv?;}8@MX@&zoh5}4Re4dR0cKGm7w$f=YO$7=(U9LK@)>n8lGd<2 zm#I$T*Ib7OsQYImX;v*(Rcq?Bx;dS#yvSInofcX4?8&VBRa8Vj1c?#1h}9MLknwNf zS3{&iT|wLd%g*1tf;z)f`zn7`wn5ksWwr<5c(2YVHx*kv0q*1o0!JD zwD(KM;rY+^>lI-I5e`l4cGFYC`*?$k2R@FNxaV#JK6m=v06?tc@V}Dp$c%`J1d(nx zc0^^4K10r-u~t?XJdXCfeMwCU4v+5Hly=B9rXCVvjLCSP;(+aJ~Ba&wsxtms!jYA>C9;pXMJ@skv>R$0wSww}daZ z3k#6oGf|GDRYqlN8CP*Au0mbr4u)BBupkfRlYKmV1v*NG4FL-Jw}5K#>W1Mb(>?cAK^QN=cqAAtz-o|9}>vy$I^V@X^@ z&Rvv;cVM#qg9n>pg8z;b(G^~sq6@;(m^y{)eNr_LxYSisvtT?e)k0`*b0RzFS+tT# zWHDc#;=d0L{39ZNK!i{apCfXM$UKpciO?O!$vh_)y^&n>oJ@=q0>4c;GQu)q$^VFQ z(iBM|?Yz7UKfwz!co7*##3>oGi*d>M|w`9Ly6YO&~QTx1heY(u7 zVGI+TYTDG-oaRw~K^XYVhBK7RT<)fsC}p=RSK2f?oz_O9g(6+8&55koSmXg>Ny%4> zGY4uLg@0T~zHrn!`1-#DkEVgpupe^rQk$noD;vq(D6O%)H=B0;u_5UfH5T30>LPsu zX)O9d%*oV>Fi7Viubr;k`T3&Lt&%jZqY^w^Sz|S X6>=@2vmgkI<4P|4$|qFsV*OZ)4XHacDK@Am6h)+Lr4;2- zQql(Q%Tl0Ed)vbv6JRg>NA^eXbx-{l*0tS{b`l3GiV@)X?#MeHdG|XxZ?~HkjNf1V zncM>IFD=ZD8VKLQEY^Wgiz04iC_otwaApT~<^&GXddzD<&HTDqJ*XR<&AqG&aND={hk|$)kt=nu87^|rma7H3Q z3Dx$KZK)umNJgA9zD$yMQ8HdFChz z*5jNqa%otWqhh(&8>Hp1+UUl4*8Az_RPbC3ded+&uZPo!uE;^LY#i(OQW3_(NC?IY zav>FEXPNY?Y=cRCl~jzXk{qm0qR~dfl)l3XR^h5|G0Us+9cHll_R~a_g#($=gZT8j z8@Wh#m=1IDunCLGO0uF08Azgvmjx+v($57u3Op4lgpdr#1)9bs$tB@Xo%ghG|02n! zH41O=J#0)Pp6*1tY;{_abU^gJBWL<0-hNbHwPxN``Lpk({Mlzv-ofpZcMzO%C4-xY z*qs4@<{L0=n8hDJN^95JL*v>$LRN|QY>M{L*a{usYcSn08smLj;>|k6)czG29Uj~J zc;AM7-q@j5h2(jP1)>gU8e?m-A-~qX#?IJDu<_TN`fF)_w6<>_^Vk}LN3AtJeQi69 zqfIS9joa_w7YL%28oz6R?>f{Jk79PiXgYJMR#YPsCx~N1P&ZnT~glE z%~HozPQiF0^{YzVI%P`}Uz0YF=SVfeFy>KFgyD1R?{9iC&&ytxjUYWTO2si2aL9|E z?g2&5)MLy;Ep(%v~eMXT;H-NP3_ zk78&yhr;LM{MN&Q{8{ zq2i%s%5z$)3sQch$;UuCj{HRPp92Y6Ghqb&jIbm>)te0Y3^-MXP;`EKAQWeSob)}^ z#AlI{1AzC#@L?74=}J8eX&!^x zhA))VEl}!zGs@8oS!!$;j7g0QRWqqo?xbvFN9)>CAjgdJFr2vs(YE?3(gUI!K%t+& zvmA`)F>+pFgx_F{Ut=4;avl87d=0Eq35veL62uP`fbOLR+Yv0|rd+V+3 zOPb^I!bK{tydfc^++)88A7Y=7c*<7*p)ljUYZ4k^$=>nU9{)V^n_0EE*dow=`RDiU zA2mY$#L3wPI`SEGvjKx3f@Y*sed&O87)6<7-oWqplt}-mHYN9n@PvOzgfH2n`ox>i zPD9Wg64s8v2H6g2S^w(mt-D|DBsf3Y{Mp2;gCI$V3esVMc0AF1M!MA5ZFYQ76ZJ#V zsfmUN;9Gx2e9~#a4XyIpy-Yofv&~(ps4 zS;=ibwtP?Wp^^N-ZYuX4aI3hM1GOu;d}?*flYX4%GUIXAO3jPvjyC^Ra-n#xtTX1$ zn>V(7v|99mh8sE%--hfPJPTElnD;=hH@Rc z2i;^aSYqjzOxO{d5=zEY&|PAdzW`zh`YVuWjHCZsZ9;#fuv&Y}^osS4fCf`I7u5vW z!Bch6w?SVQjZ;0$fh4E%u(=(2$3O?7k!;5ZV)Wrfq;bAcdA85Zuqc$ad{kfMJqh0? zPjh3z(QS?S;Vgx$l2H+Qr?Qj6psaxv2n60&22qsesg0tN<$3w)ricrh?#j*G9yrf?jFR5;ckd?lXS-12cl+S6|jG`oqjftY?`HxdjnBNriP8}bI!nLARv6D^KXxg$@h%|3I+k%l>W?~z;Wk_km^gY z4T8(sH%j_cUxV2hC$;Wmsj-O~6dNC)Ikde@MmH{WQCI1`Sc~?ZOeIi!Hb9skJ_isz z)quW(7FS?!02jYw2l!SBAlVTVWOyr`vI;Da9^-p_0u7mn^%D39MF~;>IaGRTX!YwL z{U^@)I_N%v?y#>AXGgyQiqR)XNyxBb2m~;FHOvHus$+*(iI!vxUa0=cV*Q1Q@@Eo-h!cQ-rq0eT`Y`%6nR+Is~bxn5St3E^SWbY zW9G#7VW$gTi!MWQT!HlP8w-E8Q1XJ4(cAwQ0f;Il0Tcr$0C_x+Pykv{G*Dugp}Y`9 zPlj6QwB$K` z7-$aKGN{~`=emj0tEWYxtz{1gqW3k@s(huIN<9M#KW_)vSvzyR1LB+&+^L?lf!wGt MKq*;Zb?-X;7n67-Q2+n{ literal 0 HcmV?d00001 diff --git a/mmpretrain/models/necks/__pycache__/gem.cpython-310.pyc b/mmpretrain/models/necks/__pycache__/gem.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c85043baaf19830461b8f01c401f7c76204c9c24 GIT binary patch literal 2602 zcmaJ@&2Jk;6rY*hwb#xkZ7CI$c3M7KLvd)4I8;$6qNNgQL!|^ES)lD^XKZh}-rdaX zCatx(kVxf{12>NLF*h#!3;iSZ%Ax83i5n7#n)lWpal*$~@6Gp{_kQnV9nH;E3A7*o zde%HF6Y>jArdtIjw}A9EFhK+jNCSHokPdCo4r?%qb0aW1X2UARW>D&s8|7kb1$L*> zsK9tp*piBhEQ_kFiWy0pwwM)j`&MH{R(i92a+ipCv9M3Xf@GU>d&VAZ%nQ0kYOlP2 zx5;Xa<)w8Q>PY1*3~My6Hq+2c{3vt-m{lIQ%I(NRDwve+e|h`Py|w2Dzy93&{n<|| z>p6X#(>OO|tn-qRNvcB6M6siWW3Zom2BepPWrngdvPm)~=mFg&3GK0r^o)%5DfFgb zqu%J#ChN1FAF0UX-EKK? z6a_a#Zp6_}Zfd_PYo@Z{tU~Y=`bRAQJ0$81bbl-@sVGX8I-MBoP_7>y8%9C~dMT9N zBfZp;oqGHvH=su;x&+Lmv-BEOXJP089cyDi^n0)@NrMW~0P`D$s5~W2DlAdjuQW_i z7WO`ASWk&b8YM8dnxDNZL#f;Vlo$7<8$JM&{IKO-VuVa>aopphO^!;|t5ptdHA*B; z+HS&kBu};E54U}}^N=SISF#grOD-RSt=wz7VJHLcHo?yvfrD4-{AN0pQ@I=SqMdwV}7+6L~c^M$tRJ^x7{F> zb$(km-84uvO8cljxdOp^bO|PY5QWDNUzZpi}R5}ha4$`HlKWGxcht|VYSU*VlpCwxPKw+k=umXa<;8H8oXuR}u5DY=#U zN@}O2TmeyU=`;opzXl?5+j0ERPaH>~%DI(9%4_Fl6iR&}kMo%+SfLu2S_aJ%XVgUy zatKAualmC-gILUc^(wOSz&Mbe1-3}%={Z`0UzK(*PUYHE8rH|Ud615ihLB=KD4@+? zkN~&|-DB!PLW_K-g6-QGy-oJ$9^GXL6$Y%3gaMpzZoHncwc%Z3(}deM6DyEJdX141QR*{XfqOHejBc#is~);y+)z=hfv17=Wne9mk#FaA!QX{& ze1HKNf*hDXuq_Z(qld-`7@?42p*y+Ca#sJaD6xlBaXc^qH?J-SzD~TT6R%(u)Tl&> zFm`588gy^`2fmO_JFXJ-KnET@B%pSwdq081%POu zjjUn{aZ&TY;0tJCT~vrl%}{Scmm5)<3_llz5fe+)yRf?iq!G)@M7;&wk*x0fSbn)5 z#wpYp-Hy^gVDu+x4A~9ITLf@@SP5&hdDYi`s1rBzBm_4e%8jlct2a?9zVL5It1GyH z+UAw@C$YSP*BZ%Ogmy!bHCgtP0;P#_X5OJVnicS)Tp# zEQOI_54sor-zyLV=TL4@T-Ix4Ow|znDL77Eb(~Hl(g4S^j^NV&-A8Ks)er;h2F${;?9s(VP3hSK0 z>y1<6wR$}MQ!^|pr;on+btPL6Wa^2+B+jkkV^TOFUl?vlHL?!=4i#1wnhcRX9xJ)M YJj7uIV@;zDHeCe7;m6J!rP4Y2HwMF#YybcN literal 0 HcmV?d00001 diff --git a/mmpretrain/models/necks/__pycache__/hr_fuse.cpython-310.pyc b/mmpretrain/models/necks/__pycache__/hr_fuse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a62d556f07ed21cfe6b8c6e82f50d32b74a61a05 GIT binary patch literal 2345 zcmaJ?&2QX96rb_;I-8HCY10Y>76MYEvgrp0R8b0PX$z!5YB?=bj^ml#II+ju8E;F% zMnY_)o^l6H2`75t&Og8(F*nL77w%9Hyyx9bwmmSGpXbe+_vSY<@Aq6xPWlAecmMp_ z`e}lY-%%NFHYgv!tG)!m2%{OvX_Hdy8<~-tO|$BonU&j3yXsq+le&n9+jO`Fx(l7JX()s?D;S}LMsJr;szDma&=s{A4q z<^AwV@cB9iW2ajNM|OxEXW1}p<0wSYEyrSj`_OybuLU^SIM8EvL~(<+VnD5evIs6+ zc<0??t0-h1C9QUFj-?4k#!p&^SZ?9uq!6l<-J}d~VvylQsAKOJW3R5F`?+$f!>=wZ ztt^}m@*?M=?B-W5G%kj#tH-XXNMT!}g@ayD9S*wRKOYpG3PT2XK8AT|oTb|_4k4gc z+OAL@$NFCwD*!ys07?b^>cVm=cq~CzB?D9!E`~?AQsNP!jir_L58q?^Rs;;(1lPY4l+>tV=e4aKo9kJsZ}VVFW1UBk{e_RfpE$H?Z$R0>nxQa6JX;kwef( zXz7VDs_6Q7(raII^JpzAl1-%@$j6lNLp(-_=X*1Blzuh82mcC8b)e6%*QfJj$9NF* z$iCY%4rmL1V&;m(B$X#Ygr4?Ol|osJMZ&dRcH!n^faWR-@gC%>DDWoan;=3JYjc%)uk7CI_1w~DP2uYth@)9mOIn0uTXIT`DL3#_U5^+@wz`9mp@`z;^ zQ$C=Y`g`VbWeX>@R~9l^ZwMh@gNe=QJWn1r5+NFEGKHiX%K-yw542U&7n9AjFzZH%#@YTsyhw8mIIFa}9 R`l=n&0Xsa#40X)Y^gm5sT;Tu! literal 0 HcmV?d00001 diff --git a/mmpretrain/models/necks/__pycache__/itpn_neck.cpython-310.pyc b/mmpretrain/models/necks/__pycache__/itpn_neck.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8aef89176881f5abc8bcf8ff4bc04f4d0641bf3 GIT binary patch literal 10390 zcmb7KTW}lKdEPyH!7cy-AV`tA#a>?$39dw0vaP7D;+t&QPGv@wlsbj&f)LIUToN~( zUFgCrJBi5*=}zfMI_f*;|NrN|9kj>B(h5F*a_49BKRTl*|3WXr9|JEh;qiY8z!k1GlrsIY zhT3Fh*3`;cQ!ndk+-8&w*~e_A%BiHzYNnW?DowkbmNi-<)6AB$vaC18nz?dLmW@Wf zStu7|*=&qAi{&E9DQ-0;nv>;8S+*Pdnp5Q|Re4?EX`WeAc*adFPOoa^{cdsVz&6!q z`55ZQ+}x_Rs+JFO^|DgR?O^rFY)Oms3!WcD*0(x=*KSoCk@4*o3MDlvT;NZ zakZ?iEAuL6TwAlsI@h_erj(6!MOVruHfl!ci`AgEaJkd)g6b(%QLZewPPcR1uQodk z*J;YlI9=c6j@NQpZf(gq8VnZhbZ&`i)8l9fZnVWxDV=ukd0Eu`=VgiBxYu-!daa=J zyb}Ps*Xncws&rdE_T(I`EmT`Ax8bz9*qtcN^asv&T9sXcQT3&UO1{+Q>&{W`)oA8( z?bf_k$Jkonfanf&s5ZRiD$&)E92`tctESJZ?tHb|2z)1KJ9Be1_Goac<9_4Fh1nye zxw&{iO&{U|$v-{B4*z+8Kv`9`)NM#~z(OrlSJ}I4MGuvV7U+vcsD@zDB+dudHZ|0m zAp=Zt14G6_Hh`cr7oeR0Pcx>9m=MwOWyR!E3oy5!u4! z-5PBzN+nb*8IkV0jrm9eNyRjcvwBpD)CwlCE0tQK>id<-ZRPXlPKb6pIMHl&gbRjy zt&t+6wc(#2aqv%g0oX*v83;oWcn{(6Er7IIP^Z+ix;!xuvYFn}Yjh5JpA-rpsD}nuIa(I=?bMb*#Kb#fotr#` z8rci&ItZHO z$7$-Zgwr**(LlpsVYCcEs}o}Y<9H+^Ek#$ecEOajHPnuLo=V>QM?k${)gu{^umQ}l zz!uxpNfYzzCgWpZQI6+%0pC2&&rk4i*Wg7w6L==^?87sKXBy9bJO}U`#B&H(4!b7* z3d)bTDgLNyxv2vRcU+r4hUam-+jxHhziBt)>IW28pJxXY)IEurj!LEYRsOZLv2xbd zZNoP0lzqg`*?GHw?@>F?pW`L|)S6lz-Q9|L@xEK1H3^ys5YFW%gl(HJNfIU2YOKUfDu}D zdwAY#R_ks&(xLiYgNMd?tqLjjkrnhbJ#9@|0gup);&^5-y|i}279g>cM|`Jm2nW_+ z6wPxdPYn?5G9*NdFKbBTP`48-^e8}srMXD#7149vtp;5Iy2RFSFfot$8{6&Sog|EZ zz)t!WaJLZ}*;NlY-@UxJ0_#e8j8f*z{mUbPS#Ybo&#c+Ls8oZ%ZAlv$cNoI@z*U|c z5`$)=QxRljd-S^`Uw{TJ&Vt9eD=j*i*krl8i85I4ZgM<3XzH1=zQLc}!@!S7y7acF z*8qtCyIOuAV$)e6TkjmjJNyS?J6Sph7P!+E@Ci;iOI`~k!&anKzV4Ab;}5XyUhR>N zchhYg8FV1Bx4S@8B4H0{7E z13QLxHCXfRP0hAj?x0nACDQGZe)2oyJ-NcOu#W4$VQCcOKe+q(<54)&_6I|U?Rk&vGP+6Ge?7HGleN*9D znEhDc`kx|xdS4B*a5F$p?z1rnxi8b4y!72WvqjjL1!B3Ucrxem4Mt0bslop(`Oq{X0>*+^THpZ#itWvD$HcRf~O9@ouK|J z^WO5)dpLOgsAuLZk^c5-IUQqX}=)PJNDPVL8bEqi0sjUI5jX7 zf>#o!L(qbvbew*WS&P2vk=iIStDTP9;!*lCC`>jT=?uGX1jRlYZeOe-o_7Ppm`2bR zwS~x7c17Ed(#@*BR0-NkZfl^!u;|#ci43F!>QFvCWp|ZQAATI2_LZVE(G-+K=_=<* zxDsW1V@Oezre*LXmJI(Sl< zQ;f@%4-`r|93;pj)PAh3WJ5&EOS~A@)GHwKlH~~5fXm`(so+w9g}MyYZ5XSR z9I??a-BYjY`ns`huBX8C8v{u>?!;bzs!JZqzJ{DNO+rylDAm%*M zgS#>3zuewQ1xqztn3ax*)P2Y)s?SF1nTVY{Ep?vEJBYJj(QRaC$Z$t?--aia9e&7y zxQH%Nlb#n#RFvwpfzo|Sl3GYfeoB$KRFdO~k^~orIHgKXvCT}ZC~s?nwTLE$iWGs% z0JrUkz0r{xJ=X2=Fb>qDRQgiI%2oE09OAbz_#g3n>Ht7pjoPX}$ded2e} zC(4im7zgdXc!!2k@+-=Zc0VJNCsD?yRI5aAM!ZX%YZ8alm~l0Hxc)?Eg7&jfIvx|3 zihOqPZ& z+M9jEKxoCWhqdbYv})ZevPV>NEd8lf(4kK)Xu0^^%s+Unbx;3f`S|}!IPQ}H1ACE5 zkR)V~l_ejJtMK+t1jxwZ4e7%P_z)t;3{4VPdfxSwlCjp>$szEg}YVFRgN@UD8+SNc*s8u=@I$?@ZWGt%j3tTHoeF}+J+S0+X`aT+Pi4*%xX_kiV%u0J|-YM$)S(Yu!Y}c;D*v#-AMp&u? zAF-6&0Dleu8DF-hke&lORI&r_J&9C1GRTy8542S#j)xio2(7D%#}UM!yl4=UVF&eT zWtALns&IPBt)JW$!Rkzic-gauufy7N`vk8Nhm*~ogd)F(G-h|Fa#{QZJ5VIN`^2;JiCY~xDA4=KMiIu5niUcVG z>(j*h)#WpLotHhhHc+EUT8@_Qn-AXqGfG97vB;TQxUUaXJ~QNbiXD zVr0fd5bsk@4KZz`&DTzq^jIV|Q7is{nhd#zT^kVVXgWlTFQ8y)MU7J7Q~0v)>0c}# z`I1{1kq}^$2VP1DNj^!2lnn|B)a55A?-9%N$Tbk1z{Dq=0qIk9H^4T9aDf8hl$3BO z*8|~RK+};P>9gA2qTKUyrO$Z;BchDQ{R)TZ^YMKjF$iynlcU z_6qKhNGs9h1+GZ64-~o}A?J;*KuPTV45B1Dy?P_3&i z1HuSQ+bGlJ3XTOsjQj>_sl}ug{H8*o6AH@#bJUM#yniZN987FH<1LGQI$?{Y{dKEeq`WJ30&rl8^nKz4GA3dyBkPl6% z0P;`(sTImD6hdI4I2>=7GfbUTu6>O9afBnI^);RnD`Qja9on!^?q(>@x@#-Rb?om!|IJ8C6laP^osZ~uGIOLPC-6uQKt?*p zV#P=ap$%4tid#fFh?L?1Wdik=6MKtigL%7{i`rFDvT zlSEQ_=MLPj**Hpi9McXhk5&>#d9?7IyXXkp_f@JT4@*$KDoP{89eDL_yX(VOx}s8kqJ$wn;7$b}h5y z3jFIr-tI*vE;&5>14V9%(s3|!2k3{%pUA*XhkLX1SiF}H&?0`Hz;yzz6Idp2i@;3+ zG7MUwQb^#t0Fepj!4*NIo<{=cdL932X*W)B@ga2}|3dnxk+$r0M44VC@v3s9a66DO z>5@iTtr|pT9qtjbRCc%JBb((eBctDGcVgF*(`d5Eg8ZsDrET zkSU-@r@NR)^~A^2Zcw`)>GUK17&Yb0AP440+SSHV%%>j{N*OmDN9h$DBvxqeU5Ufpvh$f>uXr zIJgyailU9kj1{H)lY%%!T?dYqj3*?SpFzVhJpP{pSPIf`hwl#^+v)L~Znfw>au+I>Y`~umEPg#!rCqmQz0M-AUjkBMd zMfPvS3HAw_$bG`*hVl2x>CbXJoo89Bz|`;3pEoYuHq8UpIPv(dqM2jPYm@FZXKCwaZA{Q;qah;eBoIc%0^E6-l;4(qo9leTO=-&c^wa7jT;5KgU=&5*CEMh(m=h NO7tyi=AkL|e*s^QRg3@t literal 0 HcmV?d00001 diff --git a/mmpretrain/models/necks/__pycache__/linear_neck.cpython-310.pyc b/mmpretrain/models/necks/__pycache__/linear_neck.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea75ebd7ff4e259b153363556b7f2a048e6c2b9c GIT binary patch literal 3071 zcmb_e%WvGq8J`(GmP_rfq_t~QNLvirCeaqc`VpW7jHakk7io)FDPs4+pr9$va7CGq z?QmAI)b%AY(39^4iU86v|49Fcx%Q-cPi|A?`$(>Kh2+oxAu%6ke)ISq^F3Jh`#yo^ z?_d5i`9Y76f8k*DabWOkXq5&ch@cq>@XIoqvw-DRVC8mT(`L>IoO+F$cY;ohS(%r2 zgKq8zem%CcwY(Sf>b{fp^Fc76L=Mi^FUW^PxT5olh>q-?S!XoZ5cCNdd6&SM?2Rnr zeLU4^Swxv}p3bJ3G|rz27>}5_vp-8S5k|32k0M+XX3>dMW~0H2QsoUYqQ?Iqs^sHR z%rXfh_wmP%e*4jr7q6(nGS29{vb7M8GIQ{f0Fv4txgkk}C(w61ie zy+wrIe%G3B()$}psOX9QtA5ZC1L?`G^bgk(25Z-w{uY6=HpJ#DJAl(D)yv zg^bjmjGsrE5z-8K0b_og>I42L&1F%6%6zKILkSiri?Q!>cy`sKdRX^xlonxp5EX^Y zD!!c-di0R*&GLPzc$x6)nY7^gK*HkbOpmYkmNWe=HpA*0yC=~!6luQTdM$BEEK5|+!L2YrPVCfJSxNuJ}{G{6OJeG+LvxJ!(+Df$yC1o?gxAK z#k->=b8y1&JK^1Wv-~FhdtjO+%d|-UKc2WSjk5Gq@?)7!4s^9V1g*Pj&m{{*AqI?% zibBNLsP>*&Tk$8;TXhiAX)oM(i;FOw)+)wZ$QpN6!VUW_Gz(hwZy=hSkwZFn1$|8^ znbSE5*_;8jw0-W(?F&TQy6@Ee&Kx51h*aBuAP}AGA4J%53#jwCb4EX-FzY^N>Vfvo zyB7rZ`UMB91K9h3t%**3ieG#WP!G`G0ooJY7WD!30sREfzF2Ee?Hvw4GA4SjQL^>9 zt)$+V+jDltL}v@RY!=Qn$IZE;Dm|QoJTP-hkSFANt{#p0dn2kgLHm(ioBR^(ehEQO zF3~Wj?|hh*`%$)gBvmw#e=N%^Ehga6IRr{}!mHVIS}M&?-5oxW@a;kvS6W75Tpy=~ zaEQ7EWW<_&4ctb+S^*G+oQfBm$VktWteOj6j7GPO1I!@5dSO`KXQ-fd5Y-1_?6{ns z7*9w!#hzNnnGKv_MPa<(3t8w?pBM|g#BA=02!iuS?jB80q5I-%(1Wi-?+N2JiO{el zR_Hm#Mqg7{u8alFHtHC=l3B78Okql2N>dy93u?My7-vych2a>oj^9sn71T;|dqzKl`(ic$Z{V?FB8LECzC zgV}6IeV_w6V5helbui737dAC+orNpof-o~x#OMi&$A%6@&2OMp1rQBUqN!$wRs#N4 z4IvJww&(PWLKLvip`Lz5n>A-ly)~!8+9EH(6Cs-D1$_z8b?!pz2#76VT`)k%xd*Ka z&7ZRiSOx2>xs_m5cA{T_cP6Dej+EfZtXM5Pm`ZLIA4D2$_;y{iKdmeH4kTd}AB>+u z2`kl~AwNHb;YU#99IaRpvMHGaiVAYCnpf?_)CLkD(BxSVbxg(XrqbDaoTa zmQx+=!`%K%*TM3QXMr2&cK30bWmth`l@x6?MUJ1;7qL=Q5v(=&^oAl2`7YP^i-Hxd zDTmiuEi>-vBhrxk{M zRK)OuRPTllTG_abnbjJ&i~1oDV;`mR7y<xpoi(+(+_rAg15ObS$$h3|GW{2bZAi zstW|uRfStUVly`MD1+C21N4e9_CIE^FI}7c*LB!`TzBxsecJ}zJUYCpcaJvJ7VI@H z)I-R_ja;=fwOn|v@tQj;3iTEY1250xqj3yF%u`L?8szZ*Hi2w11{=ssVNjb}%M3JD wa+1PN%8BxDX1y6o)tampWkti~?RdXzjEZqeDopAgy-hI>ZPb~`weHZb05SMMiU0rr literal 0 HcmV?d00001 diff --git a/mmpretrain/models/necks/__pycache__/mae_neck.cpython-310.pyc b/mmpretrain/models/necks/__pycache__/mae_neck.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b3535940a7f09f00cffd42e54943896dde40cf3 GIT binary patch literal 6778 zcmaJ`&6C_l6_;8X&G+tl?by5dAj(IPov_TtAp{7C<0Q6IDV$9zSq@AvvSzwxR%?wk zZfX3njEjR!*%u$E;=qBTc8|GGT;KvniW7f9pP)EUK5z_ye3{>CX})%Cw56U_tNZm+ z_j|woy|;0_UNLa}>dU`$AA8I&{y`73R{;;FaVNh5YW!y(`AH)3!MjRK8 zD6Sg(1b$1REbLW7*j=`2plt)~bpce}aGQmVvdd z^C!i^&LVepxk6>v;ZO0WZ`rM-+lFnlj$qB7Rm<n;|} zMHd%+zNtH5L7J6{i|fO2Eqw9A>JB}|&kqBRQwm15|xY%@`aijRMp9y|L_L5ih6f{jV?0W-0?QDz0T??YL z@rt{J8PVJpQqLc!e(2_1n(i6V^@m}axM}R3{NBsWi6Gqr+q^5ra^C6tJ&}!csr|s< znXy6Sb+%zcFBL^Xca`{O(~1 zv~k?4A= zk4WByb?B&n;vDNR!#n65OsD9?Q8(z#Eeeg9pL8>hMDCPlulaqT<nHAVG_6 z0;2ia$#wVJFRZ&{@AE0-Khpsc6xmvlqCkqrmD_hpQ=pp zOO?iWq{^oo^+`4O_rI1>CPFmb6 z;nYO?zO_&N%IKH3j*K0~?GpyQqjU8x!}|(GRc{-&&D-p@)wOZfmTr~O+Q_+Sp269t zWz^G!I~Hcb34GW3F^yWhQ5zY1OOUuYs_irARN=~tDqgy#aC^CP1xq&mA*0@31?@F=)(iV(q*-INTj?;~)$In%@^KV3=b19+2*P~iI(>tR4HPfnPS#K? z)Esl!KF(}=8Jkf0>X_{~mCsn&F>Ci1SjnoHwn_Ef$NtaO9I{P{4Ya>*p+f$;XYL!O zk-|^mB{nyY!ZuMW>^L0!leq0&>qgcBpBPF&+fEZ%MUy1o%Qovf)T5w*TiM)CeO2hhgX^9ubi>$B zhD(e(E)5uw!^uN*L<*b^yumXzOzh>y)o zb*=DJ>(}vWho&K@s#y=zb8Pvv63V=$sxv2Lhx8m@YHv2BSVWXcjvN;>E zy&^B{Lpt7MGdQbsSQKHZ)N3jYbIwV#C(=who_0zn46SMTjB&xaQ5a$5P3V640=}a% z{*JwBUVDg+kYJMG`$w>(v{5e1X+%2d^+3iQkt7J9`rsi5aZ)s136h{4 z=8=g=o^$haIB~Ct4$v=g$#1A9`ObuFX&=&YPsVk|!^Wf5-K*O{XWP9RgrVCO?l7T1 zEtV4j5`jgOq;hyr)A@*ubid}VWwdPz@k%T~TyxevR&_iXfu%06skHsG84tF zdCF>smohAyK!Ymgh8tr<@+^wVvSYcXeSeMFnhLC#&X;R;#ljz;u3|lE*=+aG2LN3z z28&t%lkzQ_0Q^mGL%)HA zrU!~jk@i63n4Q*s2Luc`AL%$a zA*JMon)Okq_B6vZ4?8+i8;2y5qymBHBx^!YGX|h3fcV`mq`PZKJB0u$A6cq66n^9) zCLIdksGnxaBqAjwxp;9)4#mZb>v%Q!ggIy5_7fKpWaJY}=kf<*hMLSr=jT9&5uOv; z(1pm(UDIqs>yqgJs)?1s+yv@KUc`Ms7_dp_-+(7V24K~0lKSl+4ASdUaU^m!nR8Nn zb0$%uNruinoPig)5U8^c^nmQcOxocsooCSb2t8-UpC(GvEF^21nVA2^ECWHk&PfV) zHr2xXb}1)YCHV;6Y8tH3phJ#4zNk(&xq0$5ij0$vY|Sx28gNXr5k*QAJK8!jW1GMg zj=oxOqo6rp1`3+SRVSu=k}KAZ&_i=La=q!eOrg`%HbFowGexQt2@=VG-&wnlkR|6>Ib{oT%#Z}q0?`U5 z6fx<5vUtP1V`NM#3n2A3AYkpH2Uh1f{N(z1@?AX4eB(ME|A<=?z~jd5`k^n(HF`)~ zSI~KpF)=2-o(b2}XhbFC{R7QFtGHY!nY$|wo1^e_zmuaUTVl+BDlA|n1%Rdf_Qu33s$ND z^lNPSD@o%Tk0=NE8X#6SyVgk0PTQ0n)uoJLM3IgwTBUx!bEVnABk^Um&_}`-d_`nQ zIY~Jbb#xlJH>Kzyj+WOYWsF*nxBbqgHqw1blSm@|O@XBR_+J}Yil8t|gD?wx)M7ST t$o!Pmr;gTqW+^5XuS&0GaD9qsNfrLT5Wner4*>uC5@J*}^I zbdjfdM!(XjAgw!wSMAq2HIc4(^?sw%K)UMGyor9Z)0CwTB&Y5)?n+LB)$XXB7OQOT zdq}wxP7}FJ*1V(Mkvsbx^0A%(=^)h1_=ic_!L+$%^7Lm3#!7NHD1*J5on+4gvp;Y~9y5^&hOF!M+^(n>t}t^b{A{w={if?Dt<4Le zdDHbg(;tN9Xz17>6Z)vl4L5L?pf4P(F+VVSP+?Mjq9p103||gTiZ)~i>n}0Ka@>Ak z&bWTqJ_*a1*<*G%;><)oG+}z@vIhRBZ_&(W^)K7p_B`g9cDfc!QJy#HG3H?WH*M}% z-4)ySnU}Su-d0@4VSe73aKicXi$Si!;e(;YF<5h^=M8MK6E0t}mt4;c*UdrCw0)C# z4CaOyzq!QB4aNt|$?T@V+P#o*;Sw^5x*?lPj~4fwInR1F)^$MbkIfy+JIBC$YYz*^ zcFHyfqtFz_(Jbd~i1n5%cxoR){m>3qc8v1lZ~&JDA(n>2hT)3n=NY!r^z^zV_abWv z!-kJ~BhOp^ad&_j=!S5BFc1p`R+rsP&MKOEB6r61wHwxwYomvy!N9{jobzp3bRv&5 zp@Ql5uoXtEY^TV{ykU1ktJ_;PXB@YiE<5D8H*7jiOc>d7m!HG5Yu8A@Ot?N|KYQzg za|@rm)xLHuH`m80R_f<$te6@J?Nq_`+>N-~@0hYuXA6tvu9obuyJGcR9)y{5K4j~& z=C4MKuVaBa=BKtdVy2Cq6|C4phB?XnK`x&WDrQZ8Hs6=+QVsZb2Bk^8Z1c9uo<47T z0n2^h`fkD3v*sX)1IcDjJVJ9vHQme`xrUwVVR7Im`j^v#omY^$5@|FA2oxGkNTe(* zE=Kjt%pj^|$46Alc2v|V@35#<-VISDS=CWBU42m{SwB%Vo#CjOjx#b4fJWT!PNc@+ zm(oqVD&AloB9!h(tMXVLD`R!6jrFlHu8gbW+PFS$IP!yrEQN|F)yG<>I?C8^)CbDA z>Szy)v1B2PBRJ@b?FQcuZTtX4G`ZJN#}88ZArg}$ULbLp#1Rq{4f!-gRGr6Rav5hp zTj4Frh+TwGu+%mRz0@*yMKu(zinA$dlGAV!Sz--~4%&L82cse5QPr}9cPw5b#TqUD zNFzP%iAW2W*URUbmj^gxL z3y$_(&-N`!&kfi4576fi@dhM}2}R!P&v>L{C3V!8c)al@!TJjNSa#t1Psv=j$ zqEV11V_7KnKY)kMC-J2>@^P@?P@^q&EjD;Jg2JJdZS``DrrmeToPnL)mANYj$ODe8 zd(jTo;!vN4>j20A5h6ZX7SUwE>O-C_@8hx;cB!SHwi8IiXdp%unT=!>Jt;)l)%T6v zeHT2wXU8ZPu^jj&0WknRAuJkjci9DW-POC>x0l(jaTi?qkvIwph=*(zt~6s;o-{9w z!XYAf0(y3Pu{;hOZdDg}B?RzjYy5Srk4UFO=y;^|dVZWffs!{+!f844ACZ_PF_Vcl zQc(pff*{B*@e$CHn`%wUn!NGGvsZP}s=c4~wQu*CWkq$%is2;E6P9&-WP3?T#j>11 z7uhNR9)FGc)dh%(Gz#IIPDD;?o)i8TJ2BFQhaw|(5kF3in#n%zGS9QD!cg9%W;7n2 z9H9#G7 zPzN2er(WGDlJE)TVKIS2wd%!)xp2VTCi#eo)pbnz|NG|0E z;0-bNa{c95^X7`%UD-(&Xg>upY1p~XpNB9|CcI^NE^&p=7qdlEO^ z3S#W#z=Js@DXTet?8G~{rerRxT^bYq`xK3(qpS$F-#uP{+o|#x{1+%%oPoh6HOdzL zWVd-4@=6E1nDK&~vc$};p!KO%9xqG>799Z!D>*@lPNtm*j+C-Unj1RJQ+g?gTB(Ob z6LZSLDY>NY2`cCg(mp;S>`uB-QXrGW;}YlWoPyjt6b*{bFU)HSEHjF-xCd1zHC`Yh zNwsqQDJC^%#GT7$bQAL#IqT{yR+soG!0`(u{GE`Xl7pyC**7^B-}*Za!7W0gTXd$o zxWRzHp}tk6JaNSDLu&cKzyG%R)fay~o#ITD4THs~QP|p1J>Q-LtmEw#)v`4n9Y|MZ zp4CjV#M;bjWGgQ^w8u(`YT1P5SeVnnz6-xoc%hL69f&f1zb}oA`|^F|zADgrozVNL zirgwif(I%g_E6c>?@9OMd&)hvr%`mcTMdn|aYvq)?hvq(tcpPO#$vn9&!P`LPvRVj z^CUhX@e2|cNPI{l!|zv!_7Dy3(p^~kP)sGvP53{cNVyCkd3E1yIh5{8_vMgmmL}1^ zBi&a4A@$!Y*Xi<3xq{gdEP*;eng-9`6X{5qI~K{-=UQyg4qF7qpl}@3PkC5wT>#|M zf53M@^uv%4Z@;0aOb8nB_#0z;{|CbO>1W?*-5WV4o_b}eX6#7OBm{7o=vhts(LAD5_rhAhy_ zO(cTXA@=4G#+O>lkgMC1x?!{)Z@m1>vBv<8up6Piz*=K$7^j?3f7qpwrw`3?11}9E z_!4VCkOr(PK@{Xou*p@-@U%3>%5-os=_p4)E#mYPz8Y$1)I$`a8M`B`MN9 z747t8%F{qI4AjHQ1uIoHD~|pU+EEhsU`$I7Rj2Ym6}570=(D8F$g~j)mR74oX-fU? zXd(K1if!my-DL@`U}H5tt)rb+w%3l2%8Dz?d3B$ z>-$+g%PdWNmTKGKfpO7i(q^5AAKA6pNK!=ps`kJ3Ab$l)@;vO0;${qEQd*M2IY2&y zx|rn6EY6w>v*y`$9&jk2eJ9QG5k`RD5Fiu&8-v1xbd>@Q=|epV+bIc4kX5N#x;OTw zbxD5|{^Pr85&6wE+-UB$oHo|~rKS%i48)Mr=cjYNgg(c&+@SpfKM46qT!ucw&!v&> z;d4$0KCeU2|5Q}KpRH{?;z|rB_is=T>8|gvTexk>qNE6%QJt)huO#T+Y6z~4OB=c! z%_KX60hQBGNB}lwY3Bakh+z%h$Pq;?7G;c6V(Fcdx-5QMZ9_ z;s0#uf9g1ayhtB~t{23&eAT|q#049tup|IPv@h1kcye1HtSY!A2M^U#@qNN+THFFU TOv+VdLY|-(fBRMA@?rUZ8w=jJ literal 0 HcmV?d00001 diff --git a/mmpretrain/models/necks/__pycache__/mixmim_neck.cpython-310.pyc b/mmpretrain/models/necks/__pycache__/mixmim_neck.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2c4a674244c75062283e4b283d5641fb325f74f GIT binary patch literal 3914 zcmaJEO>f-Bb!N!ra=#=;l5NR}9mZ}EZ|kfoIsHVbAZn=_o&BRV4a6Ei^32^VG4bT2))IY$P(d8 zz+v%I(xUtVfBA8(S>l(|w^#VZ$JJ(;UlNsWmA{(8C4?=5Tl}@hdb9Q^(MfX#Om{_{ zz2oiNxqauED?0N!A;arhtw>rKl{F#*12xKJ3!eK?AgrimB^_a*bu2HowjwE`M}k`| z83ooSoh0eSYs<@RFX{BR8g3LU-xpyl-uPfSWBW$kDRUGM_5-`; zByLB<)>SV|>TA{mkOk;1Aydgw;`r8(q+zX#meco>*h(U6<(EHej2WdA;P5>$;&VH2 z+9Eq?A^w@exwBqqyB#N-(f|>m@KFUX?DZ4NLw3_Vu200Lk!5o)7)ieckq6QAdTfn@ z&VtWi83@E_HY-0_eKW@?J7~v=^uXXbqeT4#6&Zd2#fEO%a@_5$D9kB0whEdUEOmW# z!k=ehp5;Pb<6%I&K;tQvc`y8Q>wDb2~>p~9VdI;?*VO=fJj|!Xtjd84D2_3MPsk{q*bEwBmiNlo@Iv88RUKKkCk3xpl^zSfb*j#b zDE|K{{uxyz+je~?j&1uh^6k%;WfUdLK>)nd4m-7l>gdPIp>Vh3<-pqsyuiknMsHVM z22nqR730RJG5Y+U@wHy90^a=Yg0^=lD}hA>Hv7?oGXf4Wspmg{O+pen zAP4M-9TV^~czHm&1y1M50pPnvx5%L*r~cOVGmLpu8qlNiF*&Ai0(x(^)|~;`S)2t9 z84Nc?@V>csW1NoAhlVpqt{pfbw*n{LhB~ti6=zb=hdybvGo}A=POzOBlrZ6?0X&vVKX2{WI&H8c!hT9N7}x& zukRPY59~<8EavoB+cyRja+WQU1p-;h7RY(BU&K7e_t6)?nTg&lVAi+OsqT;=SM)!8 zqe&5V<^f$NADbVs{nCCJDg!+L$q&~+3y6kP3vvY5Ods%n!TBu&-n)@}hlS+nAn&5VK=lO%u2A0Oo{94u_#^{%8pY%9x0+S&urN+rdc;A|~~sb#?QawQX zwF-I0ca*l}Z)X`XP776pNgG4qF7Sx4nwD!-dWJ2~1v*E|+MHH~G%0I3+q-yriVV*~ z>b(l5H|w)l|3j$wHl(I)2NCc4xUbsw!@lDWzZ7koM=qdsZ2j^&9F>?>5<9uXa+ucJ zG;1?M8Cmqn4fv*N!;0<--?#1DozkRF^BsRRVl$2J0WgWq*y6JV8}na(0@&}N9DK(# z_LS-DzpTLi!wmK(d&*yEnxr$7Y%73~1!goe+}WL-`DT=NIxd0r`>Vg> zKW#$(L1lGWpnMHf{0xi}PE(RmpJvQwnc*9m>6;YoEVX>A{7Xy>SRs7S<7bX zW-Y%(iD(_R=j1!WEp9(0+!oCP*=>WPRel*PDjBc&r@%67dS-u(^!;a2(!$+a572vJkY#Rn(v0`ah&+Ta4J+d z5*|7ya^!_^30}Kl^l&$qqVHW7aag5gfd>s$B~D(M$OPhzN8Tn+qSDKimjZt7p>qGC zmoHHSp?nV;lQ2#8Lj;x=}*-pYk` zjzZM75@n@i1l@MLP&a645WM}zrYSTqMc_5u({6n4ZAI;H`DNyA9=Tx}KAL@4b9K^@0rs5gW)MVaSQJ6KYE|0Pa+1Jj~2=epld+IRbVBV z(F4lqLsmFHkYDMHau@}_K-wQrb$&(;*$j$+`k^tShbEB4*_?7rI*lnNeE*McbM-h> z+>0w2A=s8Zjp%y2uhH_v+pH(ty)swPxIYxK$klxSEY6Hrgn$^~W#I%{{kS0(Ljv7z z)sR}56=*!=z{PpX_FW!ZoFKnOheDjZcmoTHXrw|eiuN8@6((An(?mRm@+SMD$J7P1 ze}e3zWA-s_Ys8BLd=5&12v~1A)P}NLp!;W6%Cg+nW5WT=OhBOwGZ6&Z4T7wm?;y8> z;87K(%P)-};CTdQ1E!!hampy{9EE>Y+Qe>FSkxLD%0#18=T+N_#Hvrx)>+~&5@{L) zE8%~LzW4@(J&Cw2(7_=lW9y8XFBxTT7-O%Q!CoKv-wEE94BUDSs+i&Wu2(C1FNp|8XUCYst)h*Z6{P=#8i jsJ?dP1$8OvDmY&*lK2%?wdex-fI6%VJfVR literal 0 HcmV?d00001 diff --git a/mmpretrain/models/necks/__pycache__/nonlinear_neck.cpython-310.pyc b/mmpretrain/models/necks/__pycache__/nonlinear_neck.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..479f27d40fe62f12f090bcce3fa0e67e132fc418 GIT binary patch literal 3837 zcma(UU2ojRk-Oyac&9r_wq(c3N3$)8SclqYH%PyXwunFKw1F%amV%<^7zB^IJkchX z=Os_J1PUlB19@_v3ls(9A`i)P|3iPoz7>6`9}A>kZew?5$bK!|5NmA-m?CJ$l<>%G-(VP=C7MJ z2TikPCtlhbw9MQ|+Ud+-#>^W@C+!Zp#1h@T*(cU}7I%5`k;R*0X4l>&gE>yute*E2 zBw9U3J1cQsXz%8@h%*@^y0JbPCqg$qkbtlDh@Kx#;)MG$Q>mW>TSCEF`$~|D8yTM@ z0xKH&5#=+F z?GcGQ-VqJng)s|b4#vE2`3V>&VJyH{gmDTuPm8ATgdYhp%p?skzjokb3RoxkzO8O(lUIo4nfJo7c(UCrc5EJdJJMR+gxvtwE7 z8-mG9o=?m&ld#@zw;6nEg_?xLM2Sl*3eOMa`P)h)lk;YNhdtO3O0WXB^T{xuZe_7# zx9_kpkZdTJ5@Qh*g7?jN*E41Ia}foTq+q!yidc@$nZ@Z=R>Y}ban8P7F&Xthr|FTC zTyPYU26Br9$tY8Cv61HInCOrC=h#}DUR}9$j$QfX%@u>K#X(xzb6Jh@OGXp4E|z|{ z5lAVLoGr()=v`u~lXNH)%OW6-Ck30*D@~ATBj#Vi1ynBNF>KkS_!2gt9xqK!>bN{c z!zg4XWb;0|R{1k$MaJGf(Dea0#vjH3Fb}gV0sDTjA>h8?5;6vEWQGQ=+&_vfi-(~Y z7c8o7YTs9AdJ^PC@2IfrL6VCv2*hpvP#z~U;M`m_ZedqeEBz1HQOIFa*45-Chx>)At#5K_8p@affSZ+auzo4QH6NAK zl(cMc=dled?P8{Emvoo%#*zhiXN0h8;A}C+8>O>LuUU7tcRFRKgjev4b&sldOIyMz zJKg;?TVPJ)X1S_x03UOf_6x4)>M?)tz=fTyR&;Irc|^=e*f6hJk;87 zxa!H#6N{6)6Qk$?pd0yQER<>ixAuJBT(GaOYdki6kb#=a@#|BQF~G7ODKl9_J*>a zHB=x+Lbstn`>APtK%wziNUj~squ#7`a*;%80WDfYa0D>X+Ml`>5R9~hlmndX*)SYVpAZhgiaYiiHO<;{t69+mJ`YCA zq}Q7t(jB^E7wyu*WZZFpXQt+HP%fMTZRmuk8)lCUdm1I2ij@2Pl0*bTwXyw2Xv`i2 zinC}UOjv`h0HT`2kJ0(Y7w-1&@e)GHpVt;|8c1r zN@Txfs??!A3>S|%usW}9jp}3bgV&)-E-c(nU%u5U>vWmPOENf>o77rk9<87hN z!-DGLs=XbL)dfT^BHA2@A}9)_z6Ds%Ro_9%B>;nFohO54ohIt*Nc#o?4A=_%0A{)g z?GpO$LqKu{V7}!+P_-cd+wePWpa;R##sK@3OSVrP4X2vFH;?&_$?5y>9M!1^^N#P| zp9D!wY5G3TLdctUP`C}PD^wK7euYI|VfZPGNs|={8hlGP%vEW(x+`@V*SdB7g(6An z9IQm740*y_S73_VGutkE5ntqk}n%(PcUn3U=)paU;W z!_9sOsHtYNX)5Fh+Q2@PERm=tvMe0bkG>KkXc*L%@{lrDp+ePeHCi0HNpq*>I=vrO ZHSot6T{--3Yn>q;?LbL^A9d~1(d1*{t-V+Y%3fIRZzZ9CMTh9Ooeq1mPhl>{7+A zQH6QTM^}?P;)#f#k2vwRB7Dr#E0QO#5kdJFD%f%RGua&fc>GK%QRpL4wp{1}4um!t zI;7}#t)`o)^jr(83#p`IR5rz2&`P&W zpEnELsWV!M!ggBFrZ^w4zSeW|v`6*GQn7MTC?#q`KabUI)K&J9_Jb^ z17)H1J?JKFXKxvCJ7+=zHO$Bhk&Rqrx^tmj!WdkRWy}RixpXF8bV6BK)Gj`3)h~~^ zoBS+aih8!_s;U-l#5dQTg^n0eSN%@fH`^QR#*CG9VGLt$&_7QOblcj4rnzZ>J1(?N zZ4P`Jm;)o5MmEd`)ALt)55OLPn)_fnf|(|2^?^!6C|W$D?t5$}6q z4nd7aUZNFR;x+yaFa5K^7w_9K>ITE$KHH+Yd!+cjjGL} zM-UhSCJhL__~c!c4KuUUbFGR-0M{vFP5*$wpD^~KE9zln#29Z&NQcnCJrypptu7ZX zK7|f$wSMUL-yN7PMO`ztRo(kAs(p#tS2YGT3777lnO=^5+EO1{_}_DkQKW&$R` L6aNKq_yPV8LT70U literal 0 HcmV?d00001 diff --git a/mmpretrain/models/necks/__pycache__/spark_neck.cpython-310.pyc b/mmpretrain/models/necks/__pycache__/spark_neck.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..588d721613876108804e73589eca4e1127e125bd GIT binary patch literal 6029 zcmb_gTa(nr6;}6XG@9!!u)wmmWBHPpiSYm%$4M~37%+})ysm_GTs5&vh-uBNSdBDp zX~1k|U$UfjU)B{@{E(_tsut&om;8+U2)(89#1F6?NC^2(Yc85yOya6&s?VHm^-ZU{ zzw`A8tJRW*=MNA5+J5h_rv05V*(Z<8S)}N95JD5o*P8Uteb(ho&eF16GpF9^-F!2z z%5uKZEi?;C*L|~FY!;#Ch2fXF3G4E8Vc8sjt6dQH98*k9Jy+f-ss&-OfRx~ z;f=+himoD>@;sAe{I!s#o_)hy)P&+{O(wJ^7p$2paxp7%t}b+>Jd)w&MFGil4N-KV z9f7e0Z_jROZKjM}ZJMI$7O$1sT-3zOExlRZ)O0KkzH?tvJ|70x&-r0%b@7w6=a*Mp zOSo+>@W|d)*tS}D9r}Hhy-L*@OG()a?AD4C1o+UT687UvuIq_O&QuH0YIl-?(?TsM zrtd_tjU23jQB;`$GxlZBA`6cm(kan;MYkYgZ9}`p1_i;k8Pj5ZEjM6;oZupdA8fI0 z3^ZGh^86{i)p3B#C%J*LF~mhCsh{fW}-)~-^576{dd@a4>dm4umZy}eXQk&8qVDvwp2Idljuo4MdE1^7Kvv_JO@$F z$>&L@$s{@Ky~(+h^j%4z9m*Sy6yHNeeOrmCA1A4^wFB928`hOCpmM!R3l}(D*S3?A zZFd#MK(E;L$9=~izA4zY2wN!5c|n}$!oyb9uZE#7$u!EWNj?r`Yb7z3-5?63tkb-z z!$oMhzCT(B1u5U9W@~VJErOUs;xG`usGy|;Xqiv;U2t0fLrDft0efwc0EW)N2vSlb z6VQmXvUS6YSFC<7a=Ja=3p$OvbVww5Ds|iv>8fyfrteL@md6?KaL>D3h6>-?eELwnDV%QOBl5xIB93l#RjYb7Y2KR7$}J zkdvQ>16j?>gF`)ci1IwL-qwUZ(EkYUILJd9f68yL->^}w1#P`d(6q(2RN0_Ro->8A zO>L8H^3B|)zL{?W+~G$}z?HGVE@&HUy@GbepzHn7tgEiokuGpXS}`!(3Z>=J_O{~C%38^C0%0A^cJR{F`+BBB_qrQcFzQuTM#roh zE1vJ7bJt2+TeJk07g=iuRi{&6FoF zA#95*hz>09QDvRQ8myS#%5CXed<)EF+xU!sOwODEQyztr3L@#e)tNd%b)N9XiG z`ooF=D9ilJ`*WsIFz(G3*A~9nC{wP6R(detJ$NH`ub+*-@)6x(4X=~ zQBdG-(m$A^4%P^2-r=IU&5;9B6tf*B_H7f}+%NJSO&s`wiMj1uC)YN`K{$m&NQaT; zk&YlOAU%QFPa-`9NAaC0NAa|Pd)N(6MgINJee7iE}gjISVxHyVuLNA!128` z+BgBRp0?MSED~9XDjp>k^V1hnR`WuA(%-A04y8(RKcm^%h+< zcUwsrChaU`Gd~27O*o)@g|a^)@e_!PN#PyG>k?adg32yH0QrB6za+1s07-dS_JdZs zAe~>&{pHQSt$qGlVqEe9*O9nVsJgi*7V;9cGO_=o*a1xBa1%J^#H|%-n-nimOhKtt zwP2$+<1Vf{e%eJjgE&cT1Q98Uq<6Ld?tS-gP`dPx7mU7unC771kDAS6T(CA+H2b#p z8y4d-ykpSi3T&~w$q){}Wf>f@0G6pA28SH-NOK3QeZ(|3jm-k?0eMxkG^hx0*5C3Y z8Z=#Kq-}6&r>@oI*t}N38v{Xy0^aL%i`dSOugj&Bk?!Gd#O4)$BZi~+o4L-2vklk* zZ9yCHwsghGdty1y;xAzyxZ3;BG8ftN$bN>D22`}Q#vX6-Se~VB6;m@L3lmdPGo-Di zW_b8RzK)A+j5!YY79wJ+T+R$wz`!`!mI3CeZ)sZy(rg!p^XF>s){DEz=VKQh-$ycYK$5MaWeAb(5rnGfe0}s3 zc1>;_A*q-TP_l}^28FF~UbH@gj#A1U)+l6x@D_AB!R!3v0(h+om*57&({N;6Pl_(w zmka9Q$}1Qx$@y*|9ccBuqPk5Z4sBRl#Yf}^RE3EQ3snZ7c*LKPA_^SnY(JYv=x0_@ z;Ju$c3X!~px|xn9boOn$pczC<5TLW1A4#`XN@!~^Zkjik{(wKwzkF0QBTH?0iGO93 zYhTX3D5>xIK1n=1G2kc(;}y*&{+yU9%+d&gblcUsAafl+dIM1f*OzpTlLM0|Z9}>p zy1d*R#@!Iq^dl9IQ$bjD2ULeInMrH9au@_s^T>Ss_>3jy=`@0Ih9bo(rfkBNrV6H2 PAkEyYK5H2#4zd3Le~zSD literal 0 HcmV?d00001 diff --git a/mmpretrain/models/necks/__pycache__/swav_neck.cpython-310.pyc b/mmpretrain/models/necks/__pycache__/swav_neck.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7dac231ec2bb51ebd9b7afa86baaad963d4bc30c GIT binary patch literal 3137 zcma)8TaVnf6(%W;Mx&kCn>S9kxd{c*tO|Qvxb2IP7WE}D5^Oewcas7N3_&Y$X0(>5 zF{PRHE@Ys94dlr$eaho*-||O_{s!hTPyPV~(!^rZ84Flu1r|jcOL@TScO$a{yS7n&BsJErTfTwM1xhq_KEL2h24m68NQnorExwU zmPxLn)HFU((Ae{-Sw9>lsR&iBvoMWLqz0RNBP!%axfrDqdiF;j-+FI%KgOk&p9|AY z@egQ75>P<`CTL&@CYa>H68s5;cLQA$PM?X^6RS^qT)46!R-m+@bfhg-p{&8kb=j1T z*zB!;PI^>a5L=Hsfh%5-D@Sdyfi>pqMbWg_jqQ5gj4hGUwxw@PtN=DjKVE6K#hhwgJz4-E>x~ye7 zx(rQ>PLw965r(hpx*j~&b-%b)x3HI}Fdjro$+Ylx5>@)wyuDF&C^asbsNtyeX5YK> zNrObZWEOlCQoUeWJ}O@_4Zgm3x8tN7gwePk4)Z+qb`GHiFaJ7_y{K18J@+1ueHg{}4s#{D-YwaSMrm2#LI-*z7pJWEO|N1M z=er&+cAdpm);mwVuXpl>IzN)}B<8r=?D_eA~7lRX(Ktg7ldb##eVB2Rn z=k6shpK=bI`2Yqca~#d~$8nRz4valn9azV4H{;wyk0Vt+r9d*{hBu-z9_)d~rgdBA zqaikZOQZN0YmCgUPhwV<0BPFGKr`)S{Fu&jurMo2*)q;7oW_}*(b!s+BdzvgXG(v3 zVY>yzf>L||RY@L^Gg>vzDJ2!JNXV)N)K{vAf}T_8+ZBk#J*M9RTl1WN#Svtm zR3IQ=y#&@)MJwx^&AyawWmoJG6V^74UV#zq=_rPyj=*x%5gbSypU;6 zZ{UubSfS7^9MBiB`zlmlQqu~t>#Tc7fn55r3{X z&kHUx^ntfi<~km952Pw`4Gcp~@5u-dD!nmCN3M&BYRxokD&9EzP7UNG4M*ehdXwd* ziZord@Q@%&4kF!muhuujLJ?|6J;gxy%vd7H3?2Fn?SY{Nk$xRo#_f&NM4eL8LNuT> zNWkaYIPN!C?aV75XKeOzeH>{KE&}u}*bC&J+p-~BoSwcm&(KdtKbNQ`%$g@EfKH!0 zWEH(d?p}Ck0W;|~fj2nivoD-t5#%sd!;(@h{6~%lWtq2}WhbD|MFSEE*Pi}`>ny~C|E`S2*d%FH(SorIxZz{@U7{bjGX7wLFXt%@g{wPYPUz%Yk@)*n|54k|JiidH*96~-xuHd)oQ(hzh-Q>IROI` zrxrfeo;eddDOEEtaI!2OcVp-x56o(oN!169?m{eNs%AXz2= literal 0 HcmV?d00001 diff --git a/mmpretrain/models/necks/beitv2_neck.py b/mmpretrain/models/necks/beitv2_neck.py new file mode 100644 index 0000000..745e387 --- /dev/null +++ b/mmpretrain/models/necks/beitv2_neck.py @@ -0,0 +1,153 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch +import torch.nn as nn +from mmcv.cnn import build_norm_layer +from mmengine.model import BaseModule + +from mmpretrain.models.backbones.beit import BEiTTransformerEncoderLayer +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class BEiTV2Neck(BaseModule): + """Neck for BEiTV2 Pre-training. + + This module construct the decoder for the final prediction. + + Args: + num_layers (int): Number of encoder layers of neck. Defaults to 2. + early_layers (int): The layer index of the early output from the + backbone. Defaults to 9. + backbone_arch (str): Vision Transformer architecture. Defaults to base. + drop_rate (float): Probability of an element to be zeroed. + Defaults to 0. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + layer_scale_init_value (float): The initialization value for the + learnable scaling of attention and FFN. Defaults to 0.1. + use_rel_pos_bias (bool): Whether to use unique relative position bias, + if False, use shared relative position bias defined in backbone. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + arch_zoo = { + **dict.fromkeys( + ['b', 'base'], { + 'embed_dims': 768, + 'depth': 12, + 'num_heads': 12, + 'feedforward_channels': 3072, + }), + **dict.fromkeys( + ['l', 'large'], { + 'embed_dims': 1024, + 'depth': 24, + 'num_heads': 16, + 'feedforward_channels': 4096, + }), + } + + def __init__( + self, + num_layers: int = 2, + early_layers: int = 9, + backbone_arch: str = 'base', + drop_rate: float = 0., + drop_path_rate: float = 0., + layer_scale_init_value: float = 0.1, + use_rel_pos_bias: bool = False, + norm_cfg: dict = dict(type='LN', eps=1e-6), + init_cfg: Optional[Union[dict, List[dict]]] = dict( + type='TruncNormal', layer='Linear', std=0.02, bias=0) + ) -> None: + super().__init__(init_cfg=init_cfg) + + if isinstance(backbone_arch, str): + backbone_arch = backbone_arch.lower() + assert backbone_arch in set(self.arch_zoo), \ + (f'Arch {backbone_arch} is not in default archs ' + f'{set(self.arch_zoo)}') + self.arch_settings = self.arch_zoo[backbone_arch] + else: + essential_keys = { + 'embed_dims', 'num_layers', 'num_heads', 'feedforward_channels' + } + assert isinstance(backbone_arch, dict) and essential_keys <= set( + backbone_arch + ), f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = backbone_arch + + # stochastic depth decay rule + self.early_layers = early_layers + depth = self.arch_settings['depth'] + dpr = np.linspace(0, drop_path_rate, + max(depth, early_layers + num_layers)) + + self.patch_aggregation = nn.ModuleList() + for i in range(early_layers, early_layers + num_layers): + _layer_cfg = dict( + embed_dims=self.arch_settings['embed_dims'], + num_heads=self.arch_settings['num_heads'], + feedforward_channels=self. + arch_settings['feedforward_channels'], + drop_rate=drop_rate, + drop_path_rate=dpr[i], + norm_cfg=norm_cfg, + layer_scale_init_value=layer_scale_init_value, + window_size=None, + use_rel_pos_bias=use_rel_pos_bias) + self.patch_aggregation.append( + BEiTTransformerEncoderLayer(**_layer_cfg)) + + self.rescale_patch_aggregation_init_weight() + + embed_dims = self.arch_settings['embed_dims'] + _, norm = build_norm_layer(norm_cfg, embed_dims) + self.add_module('norm', norm) + + def rescale_patch_aggregation_init_weight(self): + """Rescale the initialized weights.""" + + def rescale(param, layer_id): + param.div_(math.sqrt(2.0 * layer_id)) + + for layer_id, layer in enumerate(self.patch_aggregation): + rescale(layer.attn.proj.weight.data, + self.early_layers + layer_id + 1) + rescale(layer.ffn.layers[1].weight.data, + self.early_layers + layer_id + 1) + + def forward(self, inputs: Tuple[torch.Tensor], rel_pos_bias: torch.Tensor, + **kwargs) -> Tuple[torch.Tensor, torch.Tensor]: + """Get the latent prediction and final prediction. + + Args: + x (Tuple[torch.Tensor]): Features of tokens. + rel_pos_bias (torch.Tensor): Shared relative position bias table. + + Returns: + Tuple[torch.Tensor, torch.Tensor]: + - ``x``: The final layer features from backbone, which are normed + in ``BEiTV2Neck``. + - ``x_cls_pt``: The early state features from backbone, which are + consist of final layer cls_token and early state patch_tokens + from backbone and sent to PatchAggregation layers in the neck. + """ + + early_states, x = inputs[0], inputs[1] + x_cls_pt = torch.cat([x[:, [0]], early_states[:, 1:]], dim=1) + for layer in self.patch_aggregation: + x_cls_pt = layer(x_cls_pt, rel_pos_bias=rel_pos_bias) + + # shared norm + x, x_cls_pt = self.norm(x), self.norm(x_cls_pt) + + # remove cls_token + x = x[:, 1:] + x_cls_pt = x_cls_pt[:, 1:] + return x, x_cls_pt diff --git a/mmpretrain/models/necks/cae_neck.py b/mmpretrain/models/necks/cae_neck.py new file mode 100644 index 0000000..81fc301 --- /dev/null +++ b/mmpretrain/models/necks/cae_neck.py @@ -0,0 +1,273 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Tuple + +import torch +import torch.nn as nn +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks import DropPath +from mmcv.cnn.bricks.transformer import FFN +from mmengine.model import BaseModule +from mmengine.model.weight_init import trunc_normal_ + +from mmpretrain.models.backbones.beit import BEiTTransformerEncoderLayer +from mmpretrain.registry import MODELS +from ..utils import CrossMultiheadAttention + + +class CAETransformerRegressorLayer(BaseModule): + """Transformer layer for the regressor of CAE. + + This module is different from conventional transformer encoder layer, for + its queries are the masked tokens, but its keys and values are the + concatenation of the masked and unmasked tokens. + + Args: + embed_dims (int): The feature dimension. + num_heads (int): The number of heads in multi-head attention. + feedforward_channels (int): The hidden dimension of FFNs. + Defaults: 1024. + num_fcs (int, optional): The number of fully-connected layers in + FFNs. Default: 2. + qkv_bias (bool): If True, add a learnable bias to q, k, v. + Defaults to True. + qk_scale (float, optional): Override default qk scale of + ``head_dim ** -0.5`` if set. Defaults to None. + drop_rate (float): The dropout rate. Defaults to 0.0. + attn_drop_rate (float): The drop out rate for attention output weights. + Defaults to 0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0. + layer_scale_init_value (float): The init value of gamma. + Defaults to 0.0. + act_cfg (dict): The activation config for FFNs. + Defaults to ``dict(type='GELU')``. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + """ + + def __init__( + self, + embed_dims: int, + num_heads: int, + feedforward_channels: int, + num_fcs: int = 2, + qkv_bias: bool = False, + qk_scale: float = None, + drop_rate: float = 0., + attn_drop_rate: float = 0., + drop_path_rate: float = 0., + layer_scale_init_value: float = 0.0, + act_cfg: dict = dict(type='GELU'), + norm_cfg: dict = dict(type='LN', eps=1e-6) + ) -> None: + super().__init__() + + # NOTE: cross attention + _, self.norm1_q_cross = build_norm_layer( + norm_cfg, embed_dims, postfix=2) + _, self.norm1_k_cross = build_norm_layer( + norm_cfg, embed_dims, postfix=2) + _, self.norm1_v_cross = build_norm_layer( + norm_cfg, embed_dims, postfix=2) + _, self.norm2_cross = build_norm_layer(norm_cfg, embed_dims, postfix=2) + self.cross_attn = CrossMultiheadAttention( + embed_dims, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop_rate, + proj_drop=drop_rate) + + self.ffn = FFN( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + num_fcs=num_fcs, + ffn_drop=drop_rate, + dropout_layer=None, + act_cfg=act_cfg, + add_identity=False) + + self.drop_path = DropPath(drop_prob=drop_path_rate) + + if layer_scale_init_value > 0: + self.gamma_1_cross = nn.Parameter( + layer_scale_init_value * torch.ones((embed_dims)), + requires_grad=True) + self.gamma_2_cross = nn.Parameter( + layer_scale_init_value * torch.ones((embed_dims)), + requires_grad=True) + else: + self.gamma_1_cross = nn.Parameter( + torch.ones((embed_dims)), requires_grad=False) + self.gamma_2_cross = nn.Parameter( + torch.ones((embed_dims)), requires_grad=False) + + def forward(self, x_q: torch.Tensor, x_kv: torch.Tensor, + pos_q: torch.Tensor, pos_k: torch.Tensor) -> torch.Tensor: + """Forward function.""" + x = x_q + self.drop_path(self.gamma_1_cross * self.cross_attn( + self.norm1_q_cross(x_q + pos_q), + k=self.norm1_k_cross(x_kv + pos_k), + v=self.norm1_v_cross(x_kv))) + x = self.norm2_cross(x) + x = x + self.drop_path(self.gamma_2_cross * self.ffn(x)) + + return x + + +@MODELS.register_module() +class CAENeck(BaseModule): + """Neck for CAE Pre-training. + + This module construct the latent prediction regressor and the decoder + for the latent prediction and final prediction. + + Args: + num_classes (int): The number of classes for final prediction. Defaults + to 8192. + embed_dims (int): The embed dims of latent feature in regressor and + decoder. Defaults to 768. + regressor_depth (int): The number of regressor blocks. Defaults to 6. + decoder_depth (int): The number of decoder blocks. Defaults to 8. + num_heads (int): The number of head in multi-head attention. Defaults + to 12. + mlp_ratio (int): The expand ratio of latent features in MLP. defaults + to 4. + qkv_bias (bool): Whether or not to use qkv bias. Defaults to True. + qk_scale (float, optional): The scale applied to the results of qk. + Defaults to None. + drop_rate (float): The dropout rate. Defaults to 0. + attn_drop_rate (float): The dropout rate in attention block. Defaults + to 0. + norm_cfg (dict): The config of normalization layer. Defaults to + dict(type='LN', eps=1e-6). + layer_scale_init_value (float, optional): The init value of gamma. + Defaults to None. + mask_tokens_num (int): The number of mask tokens. Defaults to 75. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + num_classes: int = 8192, + embed_dims: int = 768, + regressor_depth: int = 6, + decoder_depth: int = 8, + num_heads: int = 12, + mlp_ratio: int = 4, + qkv_bias: bool = True, + qk_scale: float = None, + drop_rate: float = 0., + attn_drop_rate: float = 0., + drop_path_rate: float = 0., + norm_cfg: dict = dict(type='LN', eps=1e-6), + layer_scale_init_value: float = None, + mask_tokens_num: int = 75, + init_cfg: dict = None) -> None: + super().__init__(init_cfg=init_cfg) + + self.num_features = self.embed_dim = embed_dims + self.mask_token_num = mask_tokens_num + + # regressor + regressor_drop_path_rates = [ + x.item() + for x in torch.linspace(0, drop_path_rate, regressor_depth) + ] + self.regressors = nn.ModuleList([ + CAETransformerRegressorLayer( + embed_dims=embed_dims, + num_heads=num_heads, + feedforward_channels=mlp_ratio * embed_dims, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=regressor_drop_path_rates[i], + norm_cfg=norm_cfg, + layer_scale_init_value=layer_scale_init_value) + for i in range(regressor_depth) + ]) + + # decoder + decoder_drop_path_rates = [ + x.item() for x in torch.linspace(0, drop_path_rate, decoder_depth) + ] + self.decoders = nn.ModuleList([ + BEiTTransformerEncoderLayer( + embed_dims=embed_dims, + num_heads=num_heads, + feedforward_channels=mlp_ratio * embed_dims, + layer_scale_init_value=layer_scale_init_value, + window_size=None, + # setting `use_rel_pos_bias` to False ignores the `window_size` + use_rel_pos_bias=False, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=decoder_drop_path_rates[i], + norm_cfg=norm_cfg) for i in range(decoder_depth) + ]) + + _, self.norm_regressor = build_norm_layer( + norm_cfg, embed_dims, postfix=2) + _, self.norm_decoder = build_norm_layer( + norm_cfg, embed_dims, postfix=2) + + self.head = nn.Linear( + embed_dims, num_classes) if num_classes > 0 else nn.Identity() + self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dims)) + + def init_weights(self) -> None: + """Initialization.""" + super().init_weights() + self.apply(self._init_weights) + trunc_normal_(self.mask_token, std=0.02) + trunc_normal_(self.head.weight, std=0.02) + + def _init_weights(self, m: nn.Module) -> None: + """Initialization.""" + if isinstance(m, nn.Linear): + nn.init.xavier_uniform_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def forward( + self, x_unmasked: torch.Tensor, pos_embed_masked: torch.Tensor, + pos_embed_unmasked: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Get the latent prediction and final prediction. + + Args: + x_unmasked (torch.Tensor): Features of unmasked tokens. + pos_embed_masked (torch.Tensor): Position embedding of masked + tokens. + pos_embed_unmasked (torch.Tensor): Position embedding of unmasked + tokens. + + Returns: + Tuple[torch.Tensor, torch.Tensor]: + - ``logits``: Final prediction. + - ``latent_pred``: Latent prediction. + """ + x_masked = self.mask_token.expand(x_unmasked.shape[0], + self.mask_token_num, -1) + # regressor + for regressor in self.regressors: + x_masked = regressor( + x_masked, torch.cat([x_unmasked, x_masked], dim=1), + pos_embed_masked, + torch.cat([pos_embed_unmasked, pos_embed_masked], dim=1)) + x_masked = self.norm_regressor(x_masked) + latent_pred = x_masked + + # decoder + x_masked = x_masked + pos_embed_masked + for decoder in self.decoders: + x_masked = decoder(x_masked, rel_pos_bias=None) + x_masked = self.norm_decoder(x_masked) + + logits = self.head(x_masked) + + return logits, latent_pred diff --git a/mmpretrain/models/necks/densecl_neck.py b/mmpretrain/models/necks/densecl_neck.py new file mode 100644 index 0000000..bee9a23 --- /dev/null +++ b/mmpretrain/models/necks/densecl_neck.py @@ -0,0 +1,71 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn as nn +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class DenseCLNeck(BaseModule): + """The non-linear neck of DenseCL. + + Single and dense neck in parallel: fc-relu-fc, conv-relu-conv. + Borrowed from the authors' `code `_. + + Args: + in_channels (int): Number of input channels. + hid_channels (int): Number of hidden channels. + out_channels (int): Number of output channels. + num_grid (int): The grid size of dense features. Defaults to None. + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + in_channels: int, + hid_channels: int, + out_channels: int, + num_grid: Optional[int] = None, + init_cfg: Optional[Union[dict, List[dict]]] = None) -> None: + super().__init__(init_cfg) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.mlp = nn.Sequential( + nn.Linear(in_channels, hid_channels), nn.ReLU(inplace=True), + nn.Linear(hid_channels, out_channels)) + + self.with_pool = True if num_grid is not None else False + if self.with_pool: + self.pool = nn.AdaptiveAvgPool2d((num_grid, num_grid)) + self.mlp2 = nn.Sequential( + nn.Conv2d(in_channels, hid_channels, 1), nn.ReLU(inplace=True), + nn.Conv2d(hid_channels, out_channels, 1)) + self.avgpool2 = nn.AdaptiveAvgPool2d((1, 1)) + + def forward(self, x: Tuple[torch.Tensor]) -> Tuple[torch.Tensor]: + """Forward function of neck. + + Args: + x (Tuple[torch.Tensor]): feature map of backbone. + + Returns: + Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + - ``avgpooled_x``: Global feature vectors. + - ``x``: Dense feature vectors. + - ``avgpooled_x2``: Dense feature vectors for queue. + """ + assert len(x) == 1 + x = x[0] + + avgpooled_x = self.avgpool(x) + avgpooled_x = self.mlp(avgpooled_x.view(avgpooled_x.size(0), -1)) + + if self.with_pool: + x = self.pool(x) # sxs + x = self.mlp2(x) # sxs: bxdxsxs + avgpooled_x2 = self.avgpool2(x) # 1x1: bxdx1x1 + x = x.view(x.size(0), x.size(1), -1) # bxdxs^2 + avgpooled_x2 = avgpooled_x2.view(avgpooled_x2.size(0), -1) # bxd + return avgpooled_x, x, avgpooled_x2 diff --git a/mmpretrain/models/necks/gap.py b/mmpretrain/models/necks/gap.py new file mode 100644 index 0000000..0877743 --- /dev/null +++ b/mmpretrain/models/necks/gap.py @@ -0,0 +1,45 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class GlobalAveragePooling(nn.Module): + """Global Average Pooling neck. + + Note that we use `view` to remove extra channel after pooling. We do not + use `squeeze` as it will also remove the batch dimension when the tensor + has a batch dimension of size 1, which can lead to unexpected errors. + + Args: + dim (int): Dimensions of each sample channel, can be one of {1, 2, 3}. + Default: 2 + """ + + def __init__(self, dim=2): + super(GlobalAveragePooling, self).__init__() + assert dim in [1, 2, 3], 'GlobalAveragePooling dim only support ' \ + f'{1, 2, 3}, get {dim} instead.' + if dim == 1: + self.gap = nn.AdaptiveAvgPool1d(1) + elif dim == 2: + self.gap = nn.AdaptiveAvgPool2d((1, 1)) + else: + self.gap = nn.AdaptiveAvgPool3d((1, 1, 1)) + + def init_weights(self): + pass + + def forward(self, inputs): + if isinstance(inputs, tuple): + outs = tuple([self.gap(x) for x in inputs]) + outs = tuple( + [out.view(x.size(0), -1) for out, x in zip(outs, inputs)]) + elif isinstance(inputs, torch.Tensor): + outs = self.gap(inputs) + outs = outs.view(inputs.size(0), -1) + else: + raise TypeError('neck inputs should be tuple or torch.tensor') + return outs diff --git a/mmpretrain/models/necks/gem.py b/mmpretrain/models/necks/gem.py new file mode 100644 index 0000000..f5648be --- /dev/null +++ b/mmpretrain/models/necks/gem.py @@ -0,0 +1,53 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from torch import Tensor, nn +from torch.nn import functional as F +from torch.nn.parameter import Parameter + +from mmpretrain.registry import MODELS + + +def gem(x: Tensor, p: Parameter, eps: float = 1e-6, clamp=True) -> Tensor: + if clamp: + x = x.clamp(min=eps) + return F.avg_pool2d(x.pow(p), (x.size(-2), x.size(-1))).pow(1. / p) + + +@MODELS.register_module() +class GeneralizedMeanPooling(nn.Module): + """Generalized Mean Pooling neck. + + Note that we use `view` to remove extra channel after pooling. We do not + use `squeeze` as it will also remove the batch dimension when the tensor + has a batch dimension of size 1, which can lead to unexpected errors. + + Args: + p (float): Parameter value. Defaults to 3. + eps (float): epsilon. Defaults to 1e-6. + clamp (bool): Use clamp before pooling. Defaults to True + p_trainable (bool): Toggle whether Parameter p is trainable or not. + Defaults to True. + """ + + def __init__(self, p=3., eps=1e-6, clamp=True, p_trainable=True): + assert p >= 1, "'p' must be a value greater than 1" + super(GeneralizedMeanPooling, self).__init__() + self.p = Parameter(torch.ones(1) * p, requires_grad=p_trainable) + self.eps = eps + self.clamp = clamp + self.p_trainable = p_trainable + + def forward(self, inputs): + if isinstance(inputs, tuple): + outs = tuple([ + gem(x, p=self.p, eps=self.eps, clamp=self.clamp) + for x in inputs + ]) + outs = tuple( + [out.view(x.size(0), -1) for out, x in zip(outs, inputs)]) + elif isinstance(inputs, torch.Tensor): + outs = gem(inputs, p=self.p, eps=self.eps, clamp=self.clamp) + outs = outs.view(inputs.size(0), -1) + else: + raise TypeError('neck inputs should be tuple or torch.tensor') + return outs diff --git a/mmpretrain/models/necks/hr_fuse.py b/mmpretrain/models/necks/hr_fuse.py new file mode 100644 index 0000000..4a97f86 --- /dev/null +++ b/mmpretrain/models/necks/hr_fuse.py @@ -0,0 +1,83 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn.bricks import ConvModule +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS +from ..backbones.resnet import Bottleneck, ResLayer + + +@MODELS.register_module() +class HRFuseScales(BaseModule): + """Fuse feature map of multiple scales in HRNet. + + Args: + in_channels (list[int]): The input channels of all scales. + out_channels (int): The channels of fused feature map. + Defaults to 2048. + norm_cfg (dict): dictionary to construct norm layers. + Defaults to ``dict(type='BN', momentum=0.1)``. + init_cfg (dict | list[dict], optional): Initialization config dict. + Defaults to ``dict(type='Normal', layer='Linear', std=0.01))``. + """ + + def __init__(self, + in_channels, + out_channels=2048, + norm_cfg=dict(type='BN', momentum=0.1), + init_cfg=dict(type='Normal', layer='Linear', std=0.01)): + super(HRFuseScales, self).__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.out_channels = out_channels + self.norm_cfg = norm_cfg + + block_type = Bottleneck + out_channels = [128, 256, 512, 1024] + + # Increase the channels on each resolution + # from C, 2C, 4C, 8C to 128, 256, 512, 1024 + increase_layers = [] + for i in range(len(in_channels)): + increase_layers.append( + ResLayer( + block_type, + in_channels=in_channels[i], + out_channels=out_channels[i], + num_blocks=1, + stride=1, + )) + self.increase_layers = nn.ModuleList(increase_layers) + + # Downsample feature maps in each scale. + downsample_layers = [] + for i in range(len(in_channels) - 1): + downsample_layers.append( + ConvModule( + in_channels=out_channels[i], + out_channels=out_channels[i + 1], + kernel_size=3, + stride=2, + padding=1, + norm_cfg=self.norm_cfg, + bias=False, + )) + self.downsample_layers = nn.ModuleList(downsample_layers) + + # The final conv block before final classifier linear layer. + self.final_layer = ConvModule( + in_channels=out_channels[3], + out_channels=self.out_channels, + kernel_size=1, + norm_cfg=self.norm_cfg, + bias=False, + ) + + def forward(self, x): + assert isinstance(x, tuple) and len(x) == len(self.in_channels) + + feat = self.increase_layers[0](x[0]) + for i in range(len(self.downsample_layers)): + feat = self.downsample_layers[i](feat) + \ + self.increase_layers[i + 1](x[i + 1]) + + return (self.final_layer(feat), ) diff --git a/mmpretrain/models/necks/itpn_neck.py b/mmpretrain/models/necks/itpn_neck.py new file mode 100644 index 0000000..1a3626a --- /dev/null +++ b/mmpretrain/models/necks/itpn_neck.py @@ -0,0 +1,388 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import List, Optional, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import build_norm_layer +from mmengine.model import BaseModule + +from mmpretrain.models.backbones.hivit import BlockWithRPE +from mmpretrain.registry import MODELS +from ..backbones.vision_transformer import TransformerEncoderLayer +from ..utils import build_2d_sincos_position_embedding + + +class PatchSplit(nn.Module): + """The up-sample module used in neck (transformer pyramid network) + + Args: + dim (int): the input dimension (channel number). + fpn_dim (int): the fpn dimension (channel number). + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + """ + + def __init__(self, dim, fpn_dim, norm_cfg): + super().__init__() + _, self.norm = build_norm_layer(norm_cfg, dim) + self.reduction = nn.Linear(dim, fpn_dim * 4, bias=False) + self.fpn_dim = fpn_dim + + def forward(self, x): + B, N, H, W, C = x.shape + x = self.norm(x) + x = self.reduction(x) + x = x.reshape(B, N, H, W, 2, 2, + self.fpn_dim).permute(0, 1, 2, 4, 3, 5, + 6).reshape(B, N, 2 * H, 2 * W, + self.fpn_dim) + return x + + +@MODELS.register_module() +class iTPNPretrainDecoder(BaseModule): + """The neck module of iTPN (transformer pyramid network). + + Args: + num_patches (int): The number of total patches. Defaults to 196. + patch_size (int): Image patch size. Defaults to 16. + in_chans (int): The channel of input image. Defaults to 3. + embed_dim (int): Encoder's embedding dimension. Defaults to 512. + fpn_dim (int): The fpn dimension (channel number). + fpn_depth (int): The layer number of feature pyramid. + decoder_embed_dim (int): Decoder's embedding dimension. + Defaults to 512. + decoder_depth (int): The depth of decoder. Defaults to 8. + decoder_num_heads (int): Number of attention heads of decoder. + Defaults to 16. + mlp_ratio (int): Ratio of mlp hidden dim to decoder's embedding dim. + Defaults to 4. + norm_cfg (dict): Normalization layer. Defaults to LayerNorm. + reconstruction_type (str): The itpn supports 2 kinds of supervisions. + Defaults to 'pixel'. + num_outs (int): The output number of neck (transformer pyramid + network). Defaults to 3. + predict_feature_dim (int): The output dimension to supervision. + Defaults to None. + init_cfg (Union[List[dict], dict], optional): Initialization config + dict. Defaults to None. + """ + + def __init__(self, + num_patches: int = 196, + patch_size: int = 16, + in_chans: int = 3, + embed_dim: int = 512, + fpn_dim: int = 256, + fpn_depth: int = 2, + decoder_embed_dim: int = 512, + decoder_depth: int = 6, + decoder_num_heads: int = 16, + mlp_ratio: int = 4, + norm_cfg: dict = dict(type='LN', eps=1e-6), + reconstruction_type: str = 'pixel', + num_outs: int = 3, + qkv_bias: bool = True, + qk_scale: Optional[bool] = None, + drop_rate: float = 0.0, + attn_drop_rate: float = 0.0, + predict_feature_dim: Optional[float] = None, + init_cfg: Optional[Union[List[dict], dict]] = None) -> None: + super().__init__(init_cfg=init_cfg) + self.num_patches = num_patches + assert reconstruction_type in ['pixel', 'clip'], \ + 'iTPN method only support `pixel` and `clip`, ' \ + f'but got `{reconstruction_type}`.' + self.reconstruction_type = reconstruction_type + self.num_outs = num_outs + + self.build_transformer_pyramid( + num_outs=num_outs, + embed_dim=embed_dim, + fpn_dim=fpn_dim, + fpn_depth=fpn_depth, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + rpe=False, + norm_cfg=norm_cfg, + ) + + # merge the output + self.decoder_embed = nn.ModuleList() + self.decoder_embed.append( + nn.Sequential( + nn.LayerNorm(fpn_dim), + nn.Linear(fpn_dim, decoder_embed_dim, bias=True), + )) + + if self.num_outs >= 2: + self.decoder_embed.append( + nn.Sequential( + nn.LayerNorm(fpn_dim), + nn.Linear(fpn_dim, decoder_embed_dim // 4, bias=True), + )) + if self.num_outs >= 3: + self.decoder_embed.append( + nn.Sequential( + nn.LayerNorm(fpn_dim), + nn.Linear(fpn_dim, decoder_embed_dim // 16, bias=True), + )) + + if reconstruction_type == 'pixel': + self.mask_token = nn.Parameter( + torch.zeros(1, 1, decoder_embed_dim)) + + # create new position embedding, different from that in encoder + # and is not learnable + self.decoder_pos_embed = nn.Parameter( + torch.zeros(1, self.num_patches, decoder_embed_dim), + requires_grad=False) + + self.decoder_blocks = nn.ModuleList([ + TransformerEncoderLayer( + decoder_embed_dim, + decoder_num_heads, + int(mlp_ratio * decoder_embed_dim), + qkv_bias=True, + norm_cfg=norm_cfg) for _ in range(decoder_depth) + ]) + + self.decoder_norm_name, decoder_norm = build_norm_layer( + norm_cfg, decoder_embed_dim, postfix=1) + self.add_module(self.decoder_norm_name, decoder_norm) + + # Used to map features to pixels + if predict_feature_dim is None: + predict_feature_dim = patch_size**2 * in_chans + self.decoder_pred = nn.Linear( + decoder_embed_dim, predict_feature_dim, bias=True) + else: + _, norm = build_norm_layer(norm_cfg, embed_dim) + self.add_module('norm', norm) + + def build_transformer_pyramid(self, + num_outs=3, + embed_dim=512, + fpn_dim=256, + fpn_depth=2, + mlp_ratio=4.0, + qkv_bias=True, + qk_scale=None, + drop_rate=0.0, + attn_drop_rate=0.0, + rpe=False, + norm_cfg=None): + Hp = None + mlvl_dims = {'4': embed_dim // 4, '8': embed_dim // 2, '16': embed_dim} + if num_outs > 1: + if embed_dim != fpn_dim: + self.align_dim_16tofpn = nn.Linear(embed_dim, fpn_dim) + else: + self.align_dim_16tofpn = None + self.fpn_modules = nn.ModuleList() + self.fpn_modules.append( + BlockWithRPE( + Hp, + fpn_dim, + 0, + mlp_ratio, + qkv_bias, + qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=0., + rpe=rpe, + norm_cfg=norm_cfg)) + self.fpn_modules.append( + BlockWithRPE( + Hp, + fpn_dim, + 0, + mlp_ratio, + qkv_bias, + qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=0., + rpe=False, + norm_cfg=norm_cfg, + )) + + self.align_dim_16to8 = nn.Linear( + mlvl_dims['8'], fpn_dim, bias=False) + self.split_16to8 = PatchSplit(mlvl_dims['16'], fpn_dim, norm_cfg) + self.block_16to8 = nn.Sequential(*[ + BlockWithRPE( + Hp, + fpn_dim, + 0, + mlp_ratio, + qkv_bias, + qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=0., + rpe=rpe, + norm_cfg=norm_cfg, + ) for _ in range(fpn_depth) + ]) + + if num_outs > 2: + self.align_dim_8to4 = nn.Linear( + mlvl_dims['4'], fpn_dim, bias=False) + self.split_8to4 = PatchSplit(fpn_dim, fpn_dim, norm_cfg) + self.block_8to4 = nn.Sequential(*[ + BlockWithRPE( + Hp, + fpn_dim, + 0, + mlp_ratio, + qkv_bias, + qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=0., + rpe=rpe, + norm_cfg=norm_cfg, + ) for _ in range(fpn_depth) + ]) + self.fpn_modules.append( + BlockWithRPE( + Hp, + fpn_dim, + 0, + mlp_ratio, + qkv_bias, + qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=0., + rpe=rpe, + norm_cfg=norm_cfg)) + + def init_weights(self) -> None: + """Initialize position embedding and mask token of MAE decoder.""" + super().init_weights() + + if self.reconstruction_type == 'pixel': + decoder_pos_embed = build_2d_sincos_position_embedding( + int(self.num_patches**.5), + self.decoder_pos_embed.shape[-1], + cls_token=False) + self.decoder_pos_embed.data.copy_(decoder_pos_embed.float()) + + torch.nn.init.normal_(self.mask_token, std=.02) + else: + self.rescale_init_weight() + + def rescale_init_weight(self) -> None: + """Rescale the initialized weights.""" + + def rescale(param, layer_id): + param.div_(math.sqrt(2.0 * layer_id)) + + for layer_id, layer in enumerate(self.fpn_modules): + if isinstance(layer, BlockWithRPE): + if layer.attn is not None: + rescale(layer.attn.proj.weight.data, layer_id + 1) + rescale(layer.mlp.fc2.weight.data, layer_id + 1) + + @property + def decoder_norm(self): + """The normalization layer of decoder.""" + return getattr(self, self.decoder_norm_name) + + def forward(self, + x: torch.Tensor, + ids_restore: torch.Tensor = None) -> torch.Tensor: + """The forward function. + + The process computes the visible patches' features vectors and the mask + tokens to output feature vectors, which will be used for + reconstruction. + + Args: + x (torch.Tensor): hidden features, which is of shape + B x (L * mask_ratio) x C. + ids_restore (torch.Tensor): ids to restore original image. + + Returns: + torch.Tensor: The reconstructed feature vectors, which is of + shape B x (num_patches) x C. + """ + + features = x[:2] + x = x[-1] + B, L, _ = x.shape + x = x[..., None, None, :] + Hp = Wp = math.sqrt(L) + + outs = [x] if self.align_dim_16tofpn is None else [ + self.align_dim_16tofpn(x) + ] + if self.num_outs >= 2: + x = self.block_16to8( + self.split_16to8(x) + self.align_dim_16to8(features[1])) + outs.append(x) + if self.num_outs >= 3: + x = self.block_8to4( + self.split_8to4(x) + self.align_dim_8to4(features[0])) + outs.append(x) + if self.num_outs > 3: + outs = [ + out.reshape(B, Hp, Wp, *out.shape[-3:]).permute( + 0, 5, 1, 3, 2, 4).reshape(B, -1, Hp * out.shape[-3], + Wp * out.shape[-2]).contiguous() + for out in outs + ] + if self.num_outs >= 4: + outs.insert(0, F.avg_pool2d(outs[0], kernel_size=2, stride=2)) + if self.num_outs >= 5: + outs.insert(0, F.avg_pool2d(outs[0], kernel_size=2, stride=2)) + + for i, out in enumerate(outs): + out = self.fpn_modules[i](out) + outs[i] = out + + if self.reconstruction_type == 'pixel': + feats = [] + for feat, layer in zip(outs, self.decoder_embed): + x = layer(feat).reshape(B, L, -1) + # append mask tokens to sequence + mask_tokens = self.mask_token.repeat( + x.shape[0], ids_restore.shape[1] + 1 - x.shape[1], 1) + x = torch.cat([x, mask_tokens], dim=1) + x = torch.gather( + x, + dim=1, + index=ids_restore.unsqueeze(-1).repeat(1, 1, x.shape[2])) + feats.append(x) + x = feats.pop(0) + # add pos embed + x = x + self.decoder_pos_embed + + for i, feat in enumerate(feats): + x = x + feats[i] + # apply Transformer blocks + for i, blk in enumerate(self.decoder_blocks): + x = blk(x) + x = self.decoder_norm(x) + x = self.decoder_pred(x) + return x + else: + feats = [] + for feat, layer in zip(outs, self.decoder_embed): + x = layer(feat).reshape(B, L, -1) + feats.append(x) + x = feats.pop(0) + for i, feat in enumerate(feats): + x = x + feats[i] + + x = self.norm(x) + + return x diff --git a/mmpretrain/models/necks/linear_neck.py b/mmpretrain/models/necks/linear_neck.py new file mode 100644 index 0000000..bcdbee2 --- /dev/null +++ b/mmpretrain/models/necks/linear_neck.py @@ -0,0 +1,88 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from typing import Optional, Tuple, Union + +import torch +import torch.nn as nn +from mmcv.cnn import build_activation_layer, build_norm_layer +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class LinearNeck(BaseModule): + """Linear neck with Dimension projection. + + Args: + in_channels (int): Number of channels in the input. + out_channels (int): Number of channels in the output. + gap_dim (int): Dimensions of each sample channel, can be one of + {0, 1, 2, 3}. Defaults to 0. + norm_cfg (dict, optional): dictionary to construct and + config norm layer. Defaults to dict(type='BN1d'). + act_cfg (dict, optional): dictionary to construct and + config activate layer. Defaults to None. + init_cfg (dict, optional): dictionary to initialize weights. + Defaults to None. + """ + + def __init__(self, + in_channels: int, + out_channels: int, + gap_dim: int = 0, + norm_cfg: Optional[dict] = dict(type='BN1d'), + act_cfg: Optional[dict] = None, + init_cfg: Optional[dict] = None): + super().__init__(init_cfg=init_cfg) + + self.in_channels = in_channels + self.out_channels = out_channels + self.norm_cfg = copy.deepcopy(norm_cfg) + self.act_cfg = copy.deepcopy(act_cfg) + + assert gap_dim in [0, 1, 2, 3], 'GlobalAveragePooling dim only ' \ + f'support {0, 1, 2, 3}, get {gap_dim} instead.' + if gap_dim == 0: + self.gap = nn.Identity() + elif gap_dim == 1: + self.gap = nn.AdaptiveAvgPool1d(1) + elif gap_dim == 2: + self.gap = nn.AdaptiveAvgPool2d((1, 1)) + elif gap_dim == 3: + self.gap = nn.AdaptiveAvgPool3d((1, 1, 1)) + + self.fc = nn.Linear(in_features=in_channels, out_features=out_channels) + + if norm_cfg: + self.norm = build_norm_layer(norm_cfg, out_channels)[1] + else: + self.norm = nn.Identity() + + if act_cfg: + self.act = build_activation_layer(act_cfg) + else: + self.act = nn.Identity() + + def forward(self, inputs: Union[Tuple, + torch.Tensor]) -> Tuple[torch.Tensor]: + """forward function. + + Args: + inputs (Union[Tuple, torch.Tensor]): The features extracted from + the backbone. Multiple stage inputs are acceptable but only + the last stage will be used. + + Returns: + Tuple[torch.Tensor]: A tuple of output features. + """ + assert isinstance(inputs, (tuple, torch.Tensor)), ( + 'The inputs of `LinearNeck` must be tuple or `torch.Tensor`, ' + f'but get {type(inputs)}.') + if isinstance(inputs, tuple): + inputs = inputs[-1] + + x = self.gap(inputs) + x = x.view(x.size(0), -1) + out = self.act(self.norm(self.fc(x))) + return (out, ) diff --git a/mmpretrain/models/necks/mae_neck.py b/mmpretrain/models/necks/mae_neck.py new file mode 100644 index 0000000..773692d --- /dev/null +++ b/mmpretrain/models/necks/mae_neck.py @@ -0,0 +1,188 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn as nn +from mmcv.cnn import build_norm_layer +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS +from ..backbones.vision_transformer import TransformerEncoderLayer +from ..utils import build_2d_sincos_position_embedding + + +@MODELS.register_module() +class MAEPretrainDecoder(BaseModule): + """Decoder for MAE Pre-training. + + Some of the code is borrowed from `https://github.com/facebookresearch/mae`. # noqa + + Args: + num_patches (int): The number of total patches. Defaults to 196. + patch_size (int): Image patch size. Defaults to 16. + in_chans (int): The channel of input image. Defaults to 3. + embed_dim (int): Encoder's embedding dimension. Defaults to 1024. + decoder_embed_dim (int): Decoder's embedding dimension. + Defaults to 512. + decoder_depth (int): The depth of decoder. Defaults to 8. + decoder_num_heads (int): Number of attention heads of decoder. + Defaults to 16. + mlp_ratio (int): Ratio of mlp hidden dim to decoder's embedding dim. + Defaults to 4. + norm_cfg (dict): Normalization layer. Defaults to LayerNorm. + init_cfg (Union[List[dict], dict], optional): Initialization config + dict. Defaults to None. + + Example: + >>> from mmpretrain.models import MAEPretrainDecoder + >>> import torch + >>> self = MAEPretrainDecoder() + >>> self.eval() + >>> inputs = torch.rand(1, 50, 1024) + >>> ids_restore = torch.arange(0, 196).unsqueeze(0) + >>> level_outputs = self.forward(inputs, ids_restore) + >>> print(tuple(level_outputs.shape)) + (1, 196, 768) + """ + + def __init__(self, + num_patches: int = 196, + patch_size: int = 16, + in_chans: int = 3, + embed_dim: int = 1024, + decoder_embed_dim: int = 512, + decoder_depth: int = 8, + decoder_num_heads: int = 16, + mlp_ratio: int = 4, + norm_cfg: dict = dict(type='LN', eps=1e-6), + predict_feature_dim: Optional[float] = None, + init_cfg: Optional[Union[List[dict], dict]] = None) -> None: + super().__init__(init_cfg=init_cfg) + self.num_patches = num_patches + + # used to convert the dim of features from encoder to the dim + # compatible with that of decoder + self.decoder_embed = nn.Linear(embed_dim, decoder_embed_dim, bias=True) + + self.mask_token = nn.Parameter(torch.zeros(1, 1, decoder_embed_dim)) + + # create new position embedding, different from that in encoder + # and is not learnable + self.decoder_pos_embed = nn.Parameter( + torch.zeros(1, self.num_patches + 1, decoder_embed_dim), + requires_grad=False) + + self.decoder_blocks = nn.ModuleList([ + TransformerEncoderLayer( + decoder_embed_dim, + decoder_num_heads, + int(mlp_ratio * decoder_embed_dim), + qkv_bias=True, + norm_cfg=norm_cfg) for _ in range(decoder_depth) + ]) + + self.decoder_norm_name, decoder_norm = build_norm_layer( + norm_cfg, decoder_embed_dim, postfix=1) + self.add_module(self.decoder_norm_name, decoder_norm) + + # Used to map features to pixels + if predict_feature_dim is None: + predict_feature_dim = patch_size**2 * in_chans + self.decoder_pred = nn.Linear( + decoder_embed_dim, predict_feature_dim, bias=True) + + def init_weights(self) -> None: + """Initialize position embedding and mask token of MAE decoder.""" + super().init_weights() + + decoder_pos_embed = build_2d_sincos_position_embedding( + int(self.num_patches**.5), + self.decoder_pos_embed.shape[-1], + cls_token=True) + self.decoder_pos_embed.data.copy_(decoder_pos_embed.float()) + + torch.nn.init.normal_(self.mask_token, std=.02) + + @property + def decoder_norm(self): + """The normalization layer of decoder.""" + return getattr(self, self.decoder_norm_name) + + def forward(self, x: torch.Tensor, + ids_restore: torch.Tensor) -> torch.Tensor: + """The forward function. + + The process computes the visible patches' features vectors and the mask + tokens to output feature vectors, which will be used for + reconstruction. + + Args: + x (torch.Tensor): hidden features, which is of shape + B x (L * mask_ratio) x C. + ids_restore (torch.Tensor): ids to restore original image. + + Returns: + torch.Tensor: The reconstructed feature vectors, which is of + shape B x (num_patches) x C. + """ + # embed tokens + x = self.decoder_embed(x) + + # append mask tokens to sequence + mask_tokens = self.mask_token.repeat( + x.shape[0], ids_restore.shape[1] + 1 - x.shape[1], 1) + x_ = torch.cat([x[:, 1:, :], mask_tokens], dim=1) + x_ = torch.gather( + x_, + dim=1, + index=ids_restore.unsqueeze(-1).repeat(1, 1, x.shape[2])) + x = torch.cat([x[:, :1, :], x_], dim=1) + + # add pos embed + x = x + self.decoder_pos_embed + + # apply Transformer blocks + for blk in self.decoder_blocks: + x = blk(x) + x = self.decoder_norm(x) + + # predictor projection + x = self.decoder_pred(x) + + # remove cls token + x = x[:, 1:, :] + + return x + + +@MODELS.register_module() +class ClsBatchNormNeck(BaseModule): + """Normalize cls token across batch before head. + + This module is proposed by MAE, when running linear probing. + + Args: + input_features (int): The dimension of features. + affine (bool): a boolean value that when set to ``True``, this module + has learnable affine parameters. Defaults to False. + eps (float): a value added to the denominator for numerical stability. + Defaults to 1e-6. + init_cfg (Dict or List[Dict], optional): Config dict for weight + initialization. Defaults to None. + """ + + def __init__(self, + input_features: int, + affine: bool = False, + eps: float = 1e-6, + init_cfg: Optional[Union[dict, List[dict]]] = None) -> None: + super().__init__(init_cfg) + self.bn = nn.BatchNorm1d(input_features, affine=affine, eps=eps) + + def forward( + self, + inputs: Tuple[List[torch.Tensor]]) -> Tuple[List[torch.Tensor]]: + """The forward function.""" + # Only apply batch norm to cls_token + inputs = [self.bn(input_) for input_ in inputs] + return tuple(inputs) diff --git a/mmpretrain/models/necks/milan_neck.py b/mmpretrain/models/necks/milan_neck.py new file mode 100644 index 0000000..b48b767 --- /dev/null +++ b/mmpretrain/models/necks/milan_neck.py @@ -0,0 +1,222 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Union + +import torch +from torch import nn + +from mmpretrain.registry import MODELS +from ..backbones.vision_transformer import TransformerEncoderLayer +from ..utils import PromptMultiheadAttention +from .mae_neck import MAEPretrainDecoder + + +class PromptTransformerEncoderLayer(TransformerEncoderLayer): + """Prompt Transformer Encoder Layer for MILAN. + + This module is specific for the prompt encoder in MILAN. It will not update + the visible tokens from the encoder. + + Args: + embed_dims (int): The feature dimension. + num_heads (int): Parallel attention heads. + feedforward_channels (int): The hidden dimension for FFNs. + drop_rate (float): Probability of an element to be zeroed + after the feed forward layer. Defaults to 0.0. + attn_drop_rate (float): The drop out rate for attention layer. + Defaults to 0.0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0.0. + num_fcs (int): The number of fully-connected layers for FFNs. + Defaults to 2. + qkv_bias (bool): Enable bias for qkv if True. Defaults to True. + act_cfg (dict): The activation config for FFNs. + Defaults to ``dict(type='GELU')``. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + batch_first (bool): Key, Query and Value are shape of + (batch, n, embed_dim) + or (n, batch, embed_dim). Defaults to False. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims: int, + num_heads: int, + feedforward_channels=int, + drop_rate: float = 0., + attn_drop_rate: float = 0., + drop_path_rate: float = 0., + num_fcs: int = 2, + qkv_bias: bool = True, + act_cfg: dict = dict(type='GELU'), + norm_cfg: dict = dict(type='LN'), + init_cfg: Optional[Union[List[dict], dict]] = None) -> None: + super().__init__( + embed_dims=embed_dims, + num_heads=num_heads, + feedforward_channels=feedforward_channels, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=drop_path_rate, + num_fcs=num_fcs, + qkv_bias=qkv_bias, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + init_cfg=init_cfg) + self.attn = PromptMultiheadAttention( + embed_dims=embed_dims, + num_heads=num_heads, + attn_drop=attn_drop_rate, + proj_drop=drop_rate, + dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate), + qkv_bias=qkv_bias) + + def forward(self, x: torch.Tensor, visible_tokens: torch.Tensor, + ids_restore: torch.Tensor) -> torch.Tensor: + """Forward function for `PromptMultiheadAttention`. + + Args: + x (torch.Tensor): Mask token features with shape N x L_m x C. + visible_tokens (torch.Tensor): The visible tokens features from + encoder with shape N x L_v x C. + ids_restore (torch.Tensor): The ids of all tokens in the original + image with shape N x L. + + Returns: + torch Tensor: Output features with shape N x L x C. + """ + x = x + self.attn(self.norm1(x), visible_tokens, ids_restore) + x = self.ffn(self.norm2(x), identity=x) + return x + + +@MODELS.register_module() +class MILANPretrainDecoder(MAEPretrainDecoder): + """Prompt decoder for MILAN. + + This decoder is used in MILAN pretraining, which will not update these + visible tokens from the encoder. + + Args: + num_patches (int): The number of total patches. Defaults to 196. + patch_size (int): Image patch size. Defaults to 16. + in_chans (int): The channel of input image. Defaults to 3. + embed_dim (int): Encoder's embedding dimension. Defaults to 1024. + decoder_embed_dim (int): Decoder's embedding dimension. + Defaults to 512. + decoder_depth (int): The depth of decoder. Defaults to 8. + decoder_num_heads (int): Number of attention heads of decoder. + Defaults to 16. + predict_feature_dim (int): The dimension of the feature to be + predicted. Defaults to 512. + mlp_ratio (int): Ratio of mlp hidden dim to decoder's embedding dim. + Defaults to 4. + norm_cfg (dict): Normalization layer. Defaults to LayerNorm. + init_cfg (Union[List[dict], dict], optional): Initialization config + dict. Defaults to None. + """ + + def __init__(self, + num_patches: int = 196, + patch_size: int = 16, + in_chans: int = 3, + embed_dim: int = 1024, + decoder_embed_dim: int = 512, + decoder_depth: int = 8, + decoder_num_heads: int = 16, + predict_feature_dim: int = 512, + mlp_ratio: int = 4, + norm_cfg: dict = dict(type='LN', eps=1e-6), + init_cfg: Optional[Union[List[dict], dict]] = None) -> None: + super().__init__( + num_patches=num_patches, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + decoder_embed_dim=decoder_embed_dim, + decoder_depth=decoder_depth, + decoder_num_heads=decoder_num_heads, + mlp_ratio=mlp_ratio, + norm_cfg=norm_cfg, + init_cfg=init_cfg) + + # map the dim of features from decoder to the dim compatible with + # that of CLIP + self.decoder_pred = nn.Linear( + decoder_embed_dim, predict_feature_dim, bias=True) + + # use prompt transformer encoder layer, instead of the conventional + # transformer encoder layer + self.decoder_blocks = nn.ModuleList([ + PromptTransformerEncoderLayer( + decoder_embed_dim, + decoder_num_heads, + int(mlp_ratio * decoder_embed_dim), + qkv_bias=True, + norm_cfg=norm_cfg) for _ in range(decoder_depth) + ]) + + def forward(self, x: torch.Tensor, ids_restore: torch.Tensor, + ids_keep: torch.Tensor, + ids_dump: torch.Tensor) -> torch.Tensor: + """Forward function. + + Args: + x (torch.Tensor): The input features, which is of shape (N, L, C). + ids_restore (torch.Tensor): The indices to restore these tokens + to the original image. + ids_keep (torch.Tensor): The indices of tokens to be kept. + ids_dump (torch.Tensor): The indices of tokens to be masked. + + Returns: + torch.Tensor: The reconstructed features, which is of shape + (N, L, C). + """ + # embed tokens + x = self.decoder_embed(x) + + # append mask tokens to sequence + mask_tokens = self.mask_token.repeat( + x.shape[0], ids_restore.shape[1] + 1 - x.shape[1], 1) + x_ = torch.cat([x[:, 1:, :], mask_tokens], dim=1) + x_ = torch.gather( + x_, + dim=1, + index=ids_restore.unsqueeze(-1).repeat(1, 1, x.shape[2])) + x = torch.cat([x[:, :1, :], x_], dim=1) + + # add pos embed + x = x + self.decoder_pos_embed + + # split mask tokens and visible tokens + visible_tokens = torch.cat([ + x[:, :1, :], + torch.gather( + x[:, 1:, :], + dim=1, + index=ids_keep.unsqueeze(-1).repeat(1, 1, x.shape[-1])) + ], + dim=1) + x = torch.gather( + x[:, 1:, :], + dim=1, + index=ids_dump.unsqueeze(-1).repeat(1, 1, x.shape[-1])) + + for blk in self.decoder_blocks: + x = blk(x, visible_tokens, ids_restore) + + # full sequence recovery + x_ = torch.cat([visible_tokens[:, 1:, :], x], dim=1) + x_ = torch.gather( + x_, + dim=1, + index=ids_restore.unsqueeze(-1).repeat(1, 1, + x.shape[-1])) # unshuffle + x = torch.cat([visible_tokens[:, :1, :], x_], dim=1) + + x = self.decoder_norm(x) + + # predictor projection + x = self.decoder_pred(x) + + return x diff --git a/mmpretrain/models/necks/mixmim_neck.py b/mmpretrain/models/necks/mixmim_neck.py new file mode 100644 index 0000000..8d67ee2 --- /dev/null +++ b/mmpretrain/models/necks/mixmim_neck.py @@ -0,0 +1,111 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Union + +import torch +import torch.nn as nn + +from mmpretrain.registry import MODELS +from ..utils import build_2d_sincos_position_embedding +from .mae_neck import MAEPretrainDecoder + + +@MODELS.register_module() +class MixMIMPretrainDecoder(MAEPretrainDecoder): + """Decoder for MixMIM Pretraining. + + Some of the code is borrowed from `https://github.com/Sense-X/MixMIM`. # noqa + + Args: + num_patches (int): The number of total patches. Defaults to 196. + patch_size (int): Image patch size. Defaults to 16. + in_chans (int): The channel of input image. Defaults to 3. + embed_dim (int): Encoder's embedding dimension. Defaults to 1024. + encoder_stride (int): The output stride of MixMIM backbone. Defaults + to 32. + decoder_embed_dim (int): Decoder's embedding dimension. + Defaults to 512. + decoder_depth (int): The depth of decoder. Defaults to 8. + decoder_num_heads (int): Number of attention heads of decoder. + Defaults to 16. + mlp_ratio (int): Ratio of mlp hidden dim to decoder's embedding dim. + Defaults to 4. + norm_cfg (dict): Normalization layer. Defaults to LayerNorm. + init_cfg (Union[List[dict], dict], optional): Initialization config + dict. Defaults to None. + """ + + def __init__(self, + num_patches: int = 196, + patch_size: int = 16, + in_chans: int = 3, + embed_dim: int = 1024, + encoder_stride: int = 32, + decoder_embed_dim: int = 512, + decoder_depth: int = 8, + decoder_num_heads: int = 16, + mlp_ratio: int = 4, + norm_cfg: dict = dict(type='LN', eps=1e-6), + init_cfg: Optional[Union[List[dict], dict]] = None) -> None: + + super().__init__( + num_patches=num_patches, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + decoder_embed_dim=decoder_embed_dim, + decoder_depth=decoder_depth, + decoder_num_heads=decoder_num_heads, + mlp_ratio=mlp_ratio, + norm_cfg=norm_cfg, + init_cfg=init_cfg) + + self.decoder_pos_embed = nn.Parameter( + torch.zeros(1, num_patches, decoder_embed_dim), + requires_grad=False) + self.decoder_pred = nn.Linear(decoder_embed_dim, encoder_stride**2 * 3) + + def init_weights(self) -> None: + """Initialize position embedding and mask token of MixMIM decoder.""" + super(MAEPretrainDecoder, self).init_weights() + + decoder_pos_embed = build_2d_sincos_position_embedding( + int(self.num_patches**.5), + self.decoder_pos_embed.shape[-1], + cls_token=False) + self.decoder_pos_embed.data.copy_(decoder_pos_embed.float()) + + torch.nn.init.normal_(self.mask_token, std=.02) + + def forward(self, x: torch.Tensor, mask: torch.Tensor) -> torch.Tensor: + """Forward function. + + Args: + x (torch.Tensor): The input features, which is of shape (N, L, C). + mask (torch.Tensor): The tensor to indicate which tokens a + re masked. + + Returns: + torch.Tensor: The reconstructed features, which is of shape + (N, L, C). + """ + + x = self.decoder_embed(x) + B, L, C = x.shape + + mask_tokens = self.mask_token.expand(B, L, -1) + x1 = x * (1 - mask) + mask_tokens * mask + x2 = x * mask + mask_tokens * (1 - mask) + x = torch.cat([x1, x2], dim=0) + + # add pos embed + x = x + self.decoder_pos_embed + + # apply Transformer blocks + for idx, blk in enumerate(self.decoder_blocks): + x = blk(x) + x = self.decoder_norm(x) + + # predictor projection + x = self.decoder_pred(x) + + return x diff --git a/mmpretrain/models/necks/mocov2_neck.py b/mmpretrain/models/necks/mocov2_neck.py new file mode 100644 index 0000000..9ad9107 --- /dev/null +++ b/mmpretrain/models/necks/mocov2_neck.py @@ -0,0 +1,52 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn as nn +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class MoCoV2Neck(BaseModule): + """The non-linear neck of MoCo v2: fc-relu-fc. + + Args: + in_channels (int): Number of input channels. + hid_channels (int): Number of hidden channels. + out_channels (int): Number of output channels. + with_avg_pool (bool): Whether to apply the global + average pooling after backbone. Defaults to True. + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + in_channels: int, + hid_channels: int, + out_channels: int, + with_avg_pool: bool = True, + init_cfg: Optional[Union[dict, List[dict]]] = None) -> None: + super().__init__(init_cfg) + self.with_avg_pool = with_avg_pool + if with_avg_pool: + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.mlp = nn.Sequential( + nn.Linear(in_channels, hid_channels), nn.ReLU(inplace=True), + nn.Linear(hid_channels, out_channels)) + + def forward(self, x: Tuple[torch.Tensor]) -> Tuple[torch.Tensor]: + """Forward function. + + Args: + x (Tuple[torch.Tensor]): The feature map of backbone. + + Returns: + Tuple[torch.Tensor]: The output features. + """ + assert len(x) == 1 + x = x[0] + if self.with_avg_pool: + x = self.avgpool(x) + return (self.mlp(x.view(x.size(0), -1)), ) diff --git a/mmpretrain/models/necks/nonlinear_neck.py b/mmpretrain/models/necks/nonlinear_neck.py new file mode 100644 index 0000000..ef684d3 --- /dev/null +++ b/mmpretrain/models/necks/nonlinear_neck.py @@ -0,0 +1,115 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn as nn +from mmcv.cnn import build_norm_layer +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class NonLinearNeck(BaseModule): + """The non-linear neck. + + Structure: fc-bn-[relu-fc-bn] where the substructure in [] can be repeated. + For the default setting, the repeated time is 1. + The neck can be used in many algorithms, e.g., SimCLR, BYOL, SimSiam. + + Args: + in_channels (int): Number of input channels. + hid_channels (int): Number of hidden channels. + out_channels (int): Number of output channels. + num_layers (int): Number of fc layers. Defaults to 2. + with_bias (bool): Whether to use bias in fc layers (except for the + last). Defaults to False. + with_last_bn (bool): Whether to add the last BN layer. + Defaults to True. + with_last_bn_affine (bool): Whether to have learnable affine parameters + in the last BN layer (set False for SimSiam). Defaults to True. + with_last_bias (bool): Whether to use bias in the last fc layer. + Defaults to False. + with_avg_pool (bool): Whether to apply the global average pooling + after backbone. Defaults to True. + norm_cfg (dict): Dictionary to construct and config norm layer. + Defaults to dict(type='SyncBN'). + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__( + self, + in_channels: int, + hid_channels: int, + out_channels: int, + num_layers: int = 2, + with_bias: bool = False, + with_last_bn: bool = True, + with_last_bn_affine: bool = True, + with_last_bias: bool = False, + with_avg_pool: bool = True, + norm_cfg: dict = dict(type='SyncBN'), + init_cfg: Optional[Union[dict, List[dict]]] = [ + dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) + ] + ) -> None: + super(NonLinearNeck, self).__init__(init_cfg) + self.with_avg_pool = with_avg_pool + if with_avg_pool: + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.relu = nn.ReLU(inplace=True) + self.fc0 = nn.Linear(in_channels, hid_channels, bias=with_bias) + self.bn0 = build_norm_layer(norm_cfg, hid_channels)[1] + + self.fc_names = [] + self.bn_names = [] + for i in range(1, num_layers): + this_channels = out_channels if i == num_layers - 1 \ + else hid_channels + if i != num_layers - 1: + self.add_module( + f'fc{i}', + nn.Linear(hid_channels, this_channels, bias=with_bias)) + self.add_module(f'bn{i}', + build_norm_layer(norm_cfg, this_channels)[1]) + self.bn_names.append(f'bn{i}') + else: + self.add_module( + f'fc{i}', + nn.Linear( + hid_channels, this_channels, bias=with_last_bias)) + if with_last_bn: + self.add_module( + f'bn{i}', + build_norm_layer( + dict(**norm_cfg, affine=with_last_bn_affine), + this_channels)[1]) + self.bn_names.append(f'bn{i}') + else: + self.bn_names.append(None) + self.fc_names.append(f'fc{i}') + + def forward(self, x: Tuple[torch.Tensor]) -> Tuple[torch.Tensor]: + """Forward function. + + Args: + x (Tuple[torch.Tensor]): The feature map of backbone. + + Returns: + Tuple[torch.Tensor]: The output features. + """ + assert len(x) == 1 + x = x[0] + if self.with_avg_pool: + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc0(x) + x = self.bn0(x) + for fc_name, bn_name in zip(self.fc_names, self.bn_names): + fc = getattr(self, fc_name) + x = self.relu(x) + x = fc(x) + if bn_name is not None: + bn = getattr(self, bn_name) + x = bn(x) + return (x, ) diff --git a/mmpretrain/models/necks/simmim_neck.py b/mmpretrain/models/necks/simmim_neck.py new file mode 100644 index 0000000..cb1e29b --- /dev/null +++ b/mmpretrain/models/necks/simmim_neck.py @@ -0,0 +1,33 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class SimMIMLinearDecoder(BaseModule): + """Linear Decoder For SimMIM pretraining. + + This neck reconstructs the original image from the shrunk feature map. + + Args: + in_channels (int): Channel dimension of the feature map. + encoder_stride (int): The total stride of the encoder. + """ + + def __init__(self, in_channels: int, encoder_stride: int) -> None: + super().__init__() + self.decoder = nn.Sequential( + nn.Conv2d( + in_channels=in_channels, + out_channels=encoder_stride**2 * 3, + kernel_size=1), + nn.PixelShuffle(encoder_stride), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Forward function.""" + x = self.decoder(x) + return x diff --git a/mmpretrain/models/necks/spark_neck.py b/mmpretrain/models/necks/spark_neck.py new file mode 100644 index 0000000..ac129da --- /dev/null +++ b/mmpretrain/models/necks/spark_neck.py @@ -0,0 +1,169 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import Optional + +import torch +import torch.nn as nn +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS +from ..utils import build_norm_layer + + +def is_pow2n(x): + return x > 0 and (x & (x - 1) == 0) + + +class ConvBlock2x(BaseModule): + """The definition of convolution block.""" + + def __init__(self, + in_channels: int, + out_channels: int, + mid_channels: int, + norm_cfg: dict, + act_cfg: dict, + last_act: bool, + init_cfg: Optional[dict] = None) -> None: + super().__init__(init_cfg=init_cfg) + + self.conv1 = nn.Conv2d(in_channels, mid_channels, 3, 1, 1, bias=False) + self.norm1 = build_norm_layer(norm_cfg, mid_channels) + self.activate1 = MODELS.build(act_cfg) + + self.conv2 = nn.Conv2d(mid_channels, out_channels, 3, 1, 1, bias=False) + self.norm2 = build_norm_layer(norm_cfg, out_channels) + self.activate2 = MODELS.build(act_cfg) if last_act else nn.Identity() + + def forward(self, x: torch.Tensor): + out = self.conv1(x) + out = self.norm1(out) + out = self.activate1(out) + + out = self.conv2(out) + out = self.norm2(out) + out = self.activate2(out) + return out + + +class DecoderConvModule(BaseModule): + """The convolution module of decoder with upsampling.""" + + def __init__(self, + in_channels: int, + out_channels: int, + mid_channels: int, + kernel_size: int = 4, + scale_factor: int = 2, + num_conv_blocks: int = 1, + norm_cfg: dict = dict(type='SyncBN'), + act_cfg: dict = dict(type='ReLU6'), + last_act: bool = True, + init_cfg: Optional[dict] = None): + super().__init__(init_cfg=init_cfg) + + assert (kernel_size - scale_factor >= 0) and\ + (kernel_size - scale_factor) % 2 == 0,\ + f'kernel_size should be greater than or equal to scale_factor '\ + f'and (kernel_size - scale_factor) should be even numbers, '\ + f'while the kernel size is {kernel_size} and scale_factor is '\ + f'{scale_factor}.' + + padding = (kernel_size - scale_factor) // 2 + self.upsample = nn.ConvTranspose2d( + in_channels, + in_channels, + kernel_size=kernel_size, + stride=scale_factor, + padding=padding, + bias=True) + + conv_blocks_list = [ + ConvBlock2x( + in_channels=in_channels, + out_channels=out_channels, + mid_channels=mid_channels, + norm_cfg=norm_cfg, + last_act=last_act, + act_cfg=act_cfg) for _ in range(num_conv_blocks) + ] + self.conv_blocks = nn.Sequential(*conv_blocks_list) + + def forward(self, x): + x = self.upsample(x) + return self.conv_blocks(x) + + +@MODELS.register_module() +class SparKLightDecoder(BaseModule): + """The decoder for SparK, which upsamples the feature maps. + + Args: + feature_dim (int): The dimension of feature map. + upsample_ratio (int): The ratio of upsample, equal to downsample_raito + of the algorithm. + mid_channels (int): The middle channel of `DecoderConvModule`. Defaults + to 0. + kernel_size (int): The kernel size of `ConvTranspose2d` in + `DecoderConvModule`. Defaults to 4. + scale_factor (int): The scale_factor of `ConvTranspose2d` in + `DecoderConvModule`. Defaults to 2. + num_conv_blocks (int): The number of convolution blocks in + `DecoderConvModule`. Defaults to 1. + norm_cfg (dict): Normalization config. Defaults to dict(type='SyncBN'). + act_cfg (dict): Activation config. Defaults to dict(type='ReLU6'). + last_act (bool): Whether apply the last activation in + `DecoderConvModule`. Defaults to False. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__( + self, + feature_dim: int, + upsample_ratio: int, + mid_channels: int = 0, + kernel_size: int = 4, + scale_factor: int = 2, + num_conv_blocks: int = 1, + norm_cfg: dict = dict(type='SyncBN'), + act_cfg: dict = dict(type='ReLU6'), + last_act: bool = False, + init_cfg: Optional[dict] = [ + dict(type='Kaiming', layer=['Conv2d', 'ConvTranspose2d']), + dict(type='TruncNormal', std=0.02, layer=['Linear']), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'LayerNorm', 'SyncBatchNorm']) + ], + ): + super().__init__(init_cfg=init_cfg) + self.feature_dim = feature_dim + + assert is_pow2n(upsample_ratio) + n = round(math.log2(upsample_ratio)) + channels = [feature_dim // 2**i for i in range(n + 1)] + + self.decoder = nn.ModuleList([ + DecoderConvModule( + in_channels=c_in, + out_channels=c_out, + mid_channels=c_in if mid_channels == 0 else mid_channels, + kernel_size=kernel_size, + scale_factor=scale_factor, + num_conv_blocks=num_conv_blocks, + norm_cfg=norm_cfg, + act_cfg=act_cfg, + last_act=last_act) + for (c_in, c_out) in zip(channels[:-1], channels[1:]) + ]) + self.proj = nn.Conv2d( + channels[-1], 3, kernel_size=1, stride=1, bias=True) + + def forward(self, to_dec): + x = 0 + for i, d in enumerate(self.decoder): + if i < len(to_dec) and to_dec[i] is not None: + x = x + to_dec[i] + x = self.decoder[i](x) + return self.proj(x) diff --git a/mmpretrain/models/necks/swav_neck.py b/mmpretrain/models/necks/swav_neck.py new file mode 100644 index 0000000..807ae8b --- /dev/null +++ b/mmpretrain/models/necks/swav_neck.py @@ -0,0 +1,93 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Union + +import torch +import torch.nn as nn +from mmcv.cnn import build_norm_layer +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class SwAVNeck(BaseModule): + """The non-linear neck of SwAV: fc-bn-relu-fc-normalization. + + Args: + in_channels (int): Number of input channels. + hid_channels (int): Number of hidden channels. + out_channels (int): Number of output channels. + with_avg_pool (bool): Whether to apply the global average pooling after + backbone. Defaults to True. + with_l2norm (bool): whether to normalize the output after projection. + Defaults to True. + norm_cfg (dict): Dictionary to construct and config norm layer. + Defaults to dict(type='SyncBN'). + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + + def __init__( + self, + in_channels: int, + hid_channels: int, + out_channels: int, + with_avg_pool: bool = True, + with_l2norm: bool = True, + norm_cfg: dict = dict(type='SyncBN'), + init_cfg: Optional[Union[dict, List[dict]]] = [ + dict(type='Constant', val=1, layer=['_BatchNorm', 'GroupNorm']) + ] + ) -> None: + super().__init__(init_cfg) + self.with_avg_pool = with_avg_pool + self.with_l2norm = with_l2norm + if with_avg_pool: + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + + if out_channels == 0: + self.projection_neck = nn.Identity() + elif hid_channels == 0: + self.projection_neck = nn.Linear(in_channels, out_channels) + else: + self.norm = build_norm_layer(norm_cfg, hid_channels)[1] + self.projection_neck = nn.Sequential( + nn.Linear(in_channels, hid_channels), + self.norm, + nn.ReLU(inplace=True), + nn.Linear(hid_channels, out_channels), + ) + + def forward_projection(self, x: torch.Tensor) -> torch.Tensor: + """Compute projection. + + Args: + x (torch.Tensor): The feature vectors after pooling. + + Returns: + torch.Tensor: The output features with projection or L2-norm. + """ + x = self.projection_neck(x) + if self.with_l2norm: + x = nn.functional.normalize(x, dim=1, p=2) + return x + + def forward(self, x: List[torch.Tensor]) -> torch.Tensor: + """Forward function. + + Args: + x (List[torch.Tensor]): list of feature maps, len(x) according to + len(num_crops). + + Returns: + torch.Tensor: The projection vectors. + """ + avg_out = [] + for _x in x: + _x = _x[0] + if self.with_avg_pool: + _out = self.avgpool(_x) + avg_out.append(_out) + feat_vec = torch.cat(avg_out) # [sum(num_crops) * N, C] + feat_vec = feat_vec.view(feat_vec.size(0), -1) + output = self.forward_projection(feat_vec) + return output diff --git a/mmpretrain/models/peft/__init__.py b/mmpretrain/models/peft/__init__.py new file mode 100644 index 0000000..9f43e14 --- /dev/null +++ b/mmpretrain/models/peft/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .lora import LoRAModel + +__all__ = [ + 'LoRAModel', +] diff --git a/mmpretrain/models/peft/__pycache__/__init__.cpython-310.pyc b/mmpretrain/models/peft/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51bbe037b3d5b5c895e80cd93507da40cbb15c4b GIT binary patch literal 206 zcmd1j<>g`kf*F60rR4zW#~=GRWMj$+BlFG>XSZn4M5C+6hD$FF24Vgbs6iC@P0 zMfv$9`nkCUMX4o4iJ5sQOpsB<`UR=1phI{ literal 0 HcmV?d00001 diff --git a/mmpretrain/models/peft/__pycache__/lora.cpython-310.pyc b/mmpretrain/models/peft/__pycache__/lora.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16b3a3d979fd8d37fc5be918c7d157a9d10de404 GIT binary patch literal 7470 zcmdT}O>86AeV=cV!w+$#^?G;hB!f8Viq4wSrm^d)D>ZV~PTH(D#_rZe>A0g2=aCv} zI77V|?n*3G7}d7vB?WpYP#`TT0Ral1ir#w3H9&jo^<8@?4D?p?5JJeWPWlS-sgZQLl$qztAekIwLIhORbX3o1xjaTei$wVYy#vRaE7>ieK=HPZYn% z>|XUmZ`D}kbm@%hOTLYIo0U%V6SY}d z#$6Qbx4eYC8~ekMp`=9-QtRCx+$50L9BOfb$nl&|wmYGB#CX%T9X#8-n{3D&m0})rgUAbA*~M9nqGmQ{ zW5ap4#}+1V(%9K$P6uAgftnJw7n2Ly>=RNC+^4+jpkmoKR;NOpE+oKHpu7kz~x@boYhVkd(#h7wG$6hM>civbrQznr!k9M`Mo%0 zGfB{H(f_f>eP_VqHcOJ}$`_>&Kz#Ymn@y+hrF$E*c}S`|N6x16fxGS;I^S^K+CFq% zbyk0v32DvYjh)%}5B3;m&W4<0=WxdfARH_thz7&-=CuAOugA`)A^-ejAU>6M8NeR; zEM-#eG}lzcnH5|ZyS?Lhkw5*^TPR^cG%k!Eza~}Tvv%Ac#7U3_aYWnY!C`WK9%m=p zV7`t86PSI$+2MH?nmyC#rz#wTQ}Q1Zmr+D*y70_4g((Fn3>qa$x&GS}NTyE#87k{| zlfOWcDyceFPP9|)Oi`7w+S89sq{gw8ny1#-I76#jNX=exY>stb^|c@Cs42-BeQf15 z_E;P1FDYm%qpi}bj+I`iS4)>p>%M-b`o@nnRiT#COK7oswKJ8zhyKgH0*5v4y@Nnu z>;_))DXkC%)oZK>GZ_vT=S5VBqU#1xkh-oY2a(%hUOMC~5fx}XFL8@{7Vf~#Q7tNA z%srQcK_x5mT~W$vI#FtA?o{H&_~f7Mgh#e?+WWN3%Yx$Fvu8% zksNSLIAT0bTwbOX7f{K&2M2$!zg4!tCC-A|NXtgZ1lPS^VV z0cR=qf@mQd`z%b>2CS2=(K?%hBff%3UcsBtlr_^*Ew!R8tIMjbUf1fXt&LV@k*}Fc zOg(HP2<8uonBMQ;O}>d_teip}XX>$roGNor^eL6}F%(FK&@(P8#|9K_ya<(jqTXwm z*=AqF1pGQB*C=@j37Wo2`5Q=t4&k>fDe1#jQU0Qaf1NtW^6Kp`Q8+t|$& zwWy7j=XNyd)X+Jhhx7?fNZP29eMInW*oW=9eF;IxS6ugC=!KJ(g6sNm+jXU#@T(Xo zbi`$0$|xzND@;V?_MWgFvM7l;f0a5_CrWFxFvRHD(n%Mb)-P?8l%0GTNl`IW?TV&q zs_{8;Pc=>ZLesThYDVqzt1Gf^8{HNjvKe}kzeO?+hrZ_PzQK$WrDFgR7XZ;Ou_80C zDIHq^qwSZUR9htph)T=$Ypm=qG5F&Gh}wSLzXZUn`OE&w6Q#A}U-nDM;3yy z4RqYtnYnHgW%%n(-s%4x?$fU5NlD-!BCEvkJ|sbXG+{1TONNz_sDSZ!gFzTD-+3IQ zds2X;&>0n>3FoPe=dk`DJpyY;(x&rZz}i9Q$k{z|IO`5Wk2~xT!7xb(O+iqSVW$%u zPORwdLywGSj-hRBZIQxQ2uhLL1Y9N*;2FPh z6WHF~+q|)RqcMZ;=>$@jo8P+FKzRGC`W%5b*PFL44x5O0^TP{ErRC18v-#@5{v)i& zdF2@b`c;F$)OxfpBA}aZ-URK#V&9aLWpceR!AA{QbuPLJD-+;3ToZ-KHrfQc(}(hi z2^q`y6J%251fW6;Bh>&W1T%n`QzPRX-~c8-MRBb$af`y(Kw3;oWAjAy)qOqr+VA7I zbE5vK3J#*Uwokpv0H%r@uk^~v>uJ?DWDn)EmetZRh7LQDTS%A~-F!&2jYVCUDY?2d z@~J5&d+{*zohVL;#IHBs8Li)oLGn0G33NOl>dDz4^x9zd&U+!lF~@1|vG)GdTbhkJ zU&d}Fh;X7>!rUDOp)V?uwq*Pv;jdGlMSdW3EQ=HHiNeI2giVQ77#+62Yb0Utc0(qvyKo-(6XR}z*vVWnAa^g0_mhT|A@MUlE5FDkg5@wQSN|ba zk`Sob1zRhsmPRzgQfnIE?5U+&vQC2A)1qEcGk|`Vyf9}RZDOm_hoG7`(s%GC|B8fo z;}ejMp0tZQ%89gyJIY7z69MUI$h~(A;^FJph;rzsz&|1&XBzk^m0$@y6D8OTN)}2M zN`;*XIk=GW9_s=43>LnRZhWBVRyx}JT9id!xIfhpqgJ8Yd^v9cWfdpjV+}1 zxbPF@?{KvAE5uo0XG^e7;=k6oaAv}$z-`_7!KH(WU!$Yu=;dwFl#D)fbX%~i=7Jo^ zD-663@ctJOxGv`Z$oipnOvFq5EA2pgFrh@GkWX}>HP?l@FVuO;L5dOfZHk|C^4L12 z*@)d;Rz|CN3~bKqr+GV!!ETc+WTu0JL1FNm9geCGSek7FG0_=SvzRW!$A&X1W&s4b z>P?5@n>T)6QJc+394D!YC3u1K(8<^M25K86Y5$^r3A>*q=@=p;}aUuh!{4?#E0-qr7p}1@zU&!0Z^NK0aeH|WUo*2hC^^FVg z1Y8Nh3t*`YWAys{OyUbxpIHs9^xV}z zPJ9J~IxkIm!_Hz@zR&Z~G5Y$>cN?cd=nl19xurMZn?M4jhveid_NAs#>vArR!lWDvxkD3cQ= zf-!1LY}^|C_x%ZYITE}i`*9?AO=uq&k@KuRiT4O+^sFm|UX*d6^IQqWIolxt&%V`k zwsFNXMbNnh;J@cDd|?k)HMqLN_djRDOj#b!?It56XUKLF*5Gg#e^w&29XG3a4w} zzl#OUnaQ^oi_%^0pzfcj{%s_q`*XH373BpoGDD*CO|zC6M)5zw#AM@Ukg|Rv^g%oT zogpS8Y`F``bA*NS0?r7cCSlIS^`7T@sV5B5hR~7D?Sz&t>gPZIkEl(Elr2l^w^i+9 z733q`F~8GY@4vx8@P z=MQMEIuc^FV>M95+E3NL#J^`aMcsxfwUGyXoN*Fi?c>_Mo19aCyx96V%|_f2Fn4LT zrpYZE_s~L`$4zUrV9hjy?&t{bWV{-cW_QUH?mqy@HI!68x~k$FMm)M_oZ@czOtq9h zMBC%Zdadlt^ey^O*>%D72=H-lSKG#Kusck%b5CYu^}L5{aovWNF#}rY`IF2e)cp&s zkKkOx6=?-vu00*SFxzK79cFrge}g6=;O4JU(nKN*V$8yz^AumB+I323+u1pfsGZQ0 zlxH{3*1PiFkrS2R1go6%FGbBsW}Ftqf0L4#JG??gX)GiLio6LiSKJ|L+RO5or5c~% zckUKZ``j{XpB0_Tby-hss{OQR7mLNOd|Br-4+!{I@Wa241QhpZ0Ef=jAXk5js_T9H zOAPO&?WmU6F|&@5Wb+;(%{ e5XIYBk1aYZN{1(RuY-J4pe+1p%er;ziuyn6lCaPK literal 0 HcmV?d00001 diff --git a/mmpretrain/models/peft/lora.py b/mmpretrain/models/peft/lora.py new file mode 100644 index 0000000..ae1bae7 --- /dev/null +++ b/mmpretrain/models/peft/lora.py @@ -0,0 +1,205 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +import re +from typing import Any, List + +import torch +from mmengine.logging import print_log +from mmengine.model import BaseModule +from torch import nn + +from mmpretrain.registry import MODELS + + +class LoRALinear(nn.Module): + r"""Implements LoRA in a linear layer. + + Args: + original_layer (nn.Linear): The linear layer to be finetuned. + alpha (int): The scale factor of LoRA. Defaults to 1. + rank (int): The rank of LoRA. Defaults to 0. + drop_rate (float): The drop out rate for LoRA. Defaults to 0. + + Note: + The forward process of LoRA linear layer is: + + .. math:: + `y = W_0 x + BAx * (\alpha / r)` + + Where :math:`x` is the input, :math:`y` is the output, + :math:`W_0` is the parameter of the original layer, + :math:`A` and :math:`B` are the low-rank decomposition matrixs, + :math: `\alpha` is the scale factor and :math: `r` is the rank. + """ + + def __init__(self, + original_layer: nn.Linear, + alpha: int = 1, + rank: int = 0, + drop_rate: float = 0.): + super(LoRALinear, self).__init__() + in_features = original_layer.in_features + out_features = original_layer.out_features + + self.lora_dropout = nn.Dropout(drop_rate) + self.lora_down = nn.Linear(in_features, rank, bias=False) + self.lora_up = nn.Linear(rank, out_features, bias=False) + self.scaling = alpha / rank + + nn.init.kaiming_uniform_(self.lora_down.weight, a=math.sqrt(5)) + nn.init.zeros_(self.lora_up.weight) + + self.original_layer = original_layer + + def forward(self, x: torch.Tensor): + out = self.original_layer(x) + + lora_x = self.lora_dropout(x) + lora_out = self.lora_up(self.lora_down(lora_x)) * self.scaling + + return out + lora_out + + +@MODELS.register_module() +class LoRAModel(BaseModule): + """Implements LoRA in a module. + + An PyTorch implement of : `LoRA: Low-Rank Adaptation + of Large Language Models `_ + + Args: + module (dict): The config of the module to be finetuned. See + :mod:`mmpretrain.models` + alpha (int): The scale factor of LoRA. Defaults to 1. + rank (int): The rank of LoRA. Defaults to 0. + drop_rate (float): The drop out rate for LoRA. Defaults to 0. + targets (List[dict]): The target layers to be applied with the LoRA. + Defaults to a empty list. Specify by regular expression or suffix. + + Examples: + >>> model = LoRAModel( + ... module=dict(type='VisionTransformer', arch='b'), + ... alpha=4, + ... rank=4, + ... drop_rate=0.1, + ... targets=[ + ... dict(type='.*qkv'), # regular expression + ... dict(type='proj', alpha=8, rank=8), # suffix + ... ]) + """ + + def __init__(self, + module: dict, + alpha: int = 1, + rank: int = 0, + drop_rate: float = 0., + targets: List[dict] = list()): + + super().__init__() + + module = MODELS.build(module) + module.init_weights() + + self.module = module + self.alpha = alpha + self.rank = rank + self.drop_rate = drop_rate + + assert len(targets) != 0, \ + 'The length of target layers should not be 0.' + + self.targets = targets + + self.applied = False + self.apply_lora() + + if not self.applied: + raise ValueError( + 'No lora layer is replaced. Please check targets.') + + self._set_lora_trainable() + self._register_state_dict_hooks() + + def apply_lora(self): + """Apply LoRA to target layers.""" + module_names = [k for k, _ in self.module.named_modules()] + for module_name in module_names: + for target in self.targets: + target_name = target['type'] + target_alpha = target.get('alpha', self.alpha) + target_rank = target.get('rank', self.rank) + target_drop_rate = target.get('drop_rate', self.drop_rate) + + if re.fullmatch(target_name, module_name) or \ + module_name.endswith(target_name): + current_module = self.module.get_submodule(module_name) + if isinstance(current_module, nn.Linear): + print_log( + f'Set LoRA for {module_name} ' + f'with alpha: {target_alpha}, ' + f'rank: {target_rank}, ' + f'drop rate: {target_drop_rate}', + logger='current') + + self._replace_module(module_name, current_module, + target_alpha, target_rank, + target_drop_rate) + self.applied = True + + def _replace_module(self, module_name: str, current_module: nn.Module, + alpha: int, rank: int, drop_rate: float): + """Replace target layer with LoRA linear layer in place.""" + parent_module_name = '.'.join(module_name.split('.')[:-1]) + parent_module = self.module.get_submodule(parent_module_name) + + target_name = module_name.split('.')[-1] + target_module = LoRALinear(current_module, alpha, rank, drop_rate) + setattr(parent_module, target_name, target_module) + + def _set_lora_trainable(self): + """Set only the lora parameters trainable.""" + for name, param in self.named_parameters(): + if '.lora_' in name: + param.requires_grad = True + else: + param.requires_grad = False + + def _register_state_dict_hooks(self): + """Register state dict hooks. + + Register state dict saving hooks to save only the lora parameters to + the state dict. And register state dict loading hooks to handle the + incompatible keys while loading the state dict. + """ + + def _state_dict_hook(module, state_dict, prefix, local_metadata): + """Save only the lora parameters to the state dict.""" + keys = [k for k, _ in state_dict.items()] + for key in keys: + if '.lora_' not in key: + state_dict.pop(key) + + self._register_state_dict_hook(_state_dict_hook) + + def _load_state_dict_post_hook(module, incompatible_keys): + """Handle the incompatible keys while loading the state dict.""" + missing_keys = incompatible_keys.missing_keys.copy() + for key in missing_keys: + if '.lora_' not in key: + incompatible_keys.missing_keys.remove(key) + + unexpected_keys = incompatible_keys.unexpected_keys.copy() + for key in unexpected_keys: + if '.lora_' not in key: + incompatible_keys.unexpected_keys.remove(key) + + self.register_load_state_dict_post_hook(_load_state_dict_post_hook) + + def forward(self, *args, **kwargs): + return self.module(*args, **kwargs) + + def __getattr__(self, name: str) -> Any: + try: + return super(LoRAModel, self).__getattr__(name) + except AttributeError: + return self.module.__getattribute__(name) diff --git a/mmpretrain/models/retrievers/__init__.py b/mmpretrain/models/retrievers/__init__.py new file mode 100644 index 0000000..593b637 --- /dev/null +++ b/mmpretrain/models/retrievers/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .base import BaseRetriever +from .image2image import ImageToImageRetriever + +__all__ = ['BaseRetriever', 'ImageToImageRetriever'] diff --git a/mmpretrain/models/retrievers/__pycache__/__init__.cpython-310.pyc b/mmpretrain/models/retrievers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1def0dc67c83b9e413f4421f81f0773bed8605a2 GIT binary patch literal 278 zcmd1j<>g`kf*F60rELY$k3k${zy#zt0CBMjkVs)jVa#F3WsG8E1hJWNm~xq;n1O8O z6qaV^i+}>6p1FzXsUi7b3NBFuvZ9C? zNcd^8-eO4tY61&Iac6>*8-Yo%{4MtQ_{5x?`1q9!MQk8VAmW#$eo=mYiGFTwK@rfd z#LPStW`0U)PO&~n2JDJr{rLFIyv&mLc)fzkTO2mI`6;D2sdgY27lTaWVFW@Z0AGhk AkN^Mx literal 0 HcmV?d00001 diff --git a/mmpretrain/models/retrievers/__pycache__/base.cpython-310.pyc b/mmpretrain/models/retrievers/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7672811377f94ff74b25c9a65e5cbf1ba62b0a1b GIT binary patch literal 6413 zcmeHLOOM;g5hf{0qtU$mw)c_F>;@irHS#6NK}dplZEs*?J!`C;ixvh&v0Ed0_|P`V znase4AUnYV0enbKIph!ckK{-27UYtg_ZFD}-ezBx4g=CJ9vhAq|B z@Ro*4=rg^xzdT%4ZB4J^uMAhzbIV)x*M@7l_6vm6ieHQeGyMX-fg7jr*0Id?ayYh8wR$QHh&f1VXv#F%OBg3 zxF2%i;bntf?%1(?*Au=7Vl;K=_0!PiLOQfg^=juY(ckePw4u(mp~3ZGW7(iT{tj;l zQ#3_OEQ$7$<%xL}`xG6~!vALWf71F|o9Mj7mrmD)D`FM%+WcL6aw4IN3gMDZ>OXAvM zB(l%$P1H&~=Em&E^*okf%iBf7JgBU=&1@)kJQF<@2~iU@P&OKje1zA&3%z{`z5eT@ zjFct)PzuHo)vg!KH+i-Ru=O~k_1TV!^DpKx+*7uMUa{=KcG8*lABg}~dJN;!urWCj zMjhAnE*#4HjEpx4Juf^WYfZvLs=Y?chNe+6n}sqKJa;@PO4qIup5bVqM1<0X0c?+v zbv*?zSW6xX2!i#nNV$#rzIW9YG9j506PZN|)J6lgmN>CmFB7{s9Rsr2GdrHLaH1BA z>_frXp>SZ7#R*kAWizwUv9cUHnuf3$cj3~Af;O6liN|3Y0SHyEMk$d-JRnRG8*E<-KddS!cQ zyWnHTtYkNOre6cU=tJCME!LlEUl~t~r;VpbbI*{}pQ6QTa07RfYe)`_y{;kGFj}tS z_DQ#yHbDtON~#>vwq>ccEV+W$;by6pY8Jd}re-9(iRxAol&t4S)0U-})3Tmx|NPNS z8HVvq--khATsNpa5}`&n3&N7oO`^2^>{xDK&5!VlNS98_)LSoGMn^xnMC|){nXw8T zQ_~BhC~c{H5T&ciaFOlLJQ1ZP@%AhB(joXZtw4t3I_zQCKSXvU%1i=x;=)TPE95cx zj={;9p9Wh{`atgb1DCJO+BgcRt4UeSPUXWX>iW7{wi z1L96UTysx zM6gJWQi#^NxA#V%D^owM-s?vl$jhSl$<=1V{qR+6ji)u z!J_O%@k0Bjc&YerFN~33rvPp|b)&+SJQPuIJ!Vs^uIvu;3dzQ{b3oBCv-h(U7lg7j zH&0Q?Jd)Ciy0Is(BC9LV}%u-zWgp zh-xSx7VTNRWFSd9dp&l$$NK$#mvq?|0qR9>?s^(i#TDD3LS5A>nWrAtszX}Z4uUXN z#wd3{Qs`0^hqI_EJuXep?#0(5#L}0E3nVai!bT%P$xk!Kk1D~Ci8-sfq!Bck?I1nD z7@!RT#S!`Qz(ver`9s@FM88&|Ue8c-y<+xkW~Ce;(X_!L>ZyL?VkQC}F#so1nzXI$ zdzb3cVnaFTN26XL#m=2nH+gMRfxX(S967b6SdODP@Ak7=E0KusJFi0*?WwkyqMzDI z1;R4j=X&nR^B~c#DL3&?;<%~F%XGU!H|ol67x0uW#UxSaHzMq z=((WmZ-}yE?YDYYw;!VME~RZ?eign>_rR$){=tA z5ROGSf=67U2s&=o7y%usba3Wpk-9F#4Pyd15mq3agUYIJ#`Gga-&Cf6Tw>H62-pno zS!J(qtQzAYzbqUD$fu*xPLTLk&Tc(MFAlgXK~Gz%*N+%Q?NgmZ1o)UjX(!mPOLz0rD(0 zA~oA!-8@%Px}PC9gE%VJlkbq-8x?OWK-XO^FJjO>e%~k@ubrH$dz#!-^DN<(Jo`)h z3eWxq4G*%i2VjfThk_End8`0?1)kmtIMwu5Oe_#evI=@AU^!ry5p?JNSC)|P(a!3zp0;s*bEYWVeuvI1;9HBw zkz$BLL0(++OuyUs6+8b35swYiAyO5nwf&C;5@1!9C1#FrN3owHC>u3Z$`(M&ivKsk&)+MMYz^;IV?&mLefNucjF z1>6@LgYzm?7^z9~79F(C6Mr^0#2?7qbQV5&w{C{|Fr0$-a1P#6T-Me1x>xAr8{jBp zS!u_zP(b3~kLOj(`ZBS-yl2U>c<5M`r0?i*St&)#nl`|((Qe^5tCvXmF;qy8a|-g59oHCy4LbDwIe(atIoq zYip)%eBaOw-TZdbFurRwj2A6){aa(jXrS!{dS7nb^E=E^ zeXU04#pXu4y=f}h_`tplBC%C<)ZIuM_Slg`{AyQe3m-8N%h~SKj+JTp7?7^lKcUJc zYCK;y&MQ-8mUJ^q#z{=yQHhO}ft3=KohuutlCu4i%u7Ebm__TjZR!{Fwy~jR^049vDU zu-cYR?~R}`u-kTaUu{?UU2QPep5u3BP#?^<=lR|W76yy$MZT{DOM~V1GT+-lV{oB; zf$yuq%3!sSF$ZCg#1hWAj*VKP8M?T65t`tWaxO zY3+s^1nzF&rPhajoTRmnhlwABZje@f8scWBsi*c&Mq%hl+|}Q86ZgZ&75rp>^zpmz zeRym7{+2r!1|FW)66yBtbfT~`aQ(0|lu^%%<1clZQBN1&abxc%ULt+(o+o>>j^gKH z_7MLG`g>Yi7h2m8dfS{c@TacUoFQHn70>c2p6ykxXno^~CTu)c@uud@9n||4-c-GL zY85s7ui^h3depoH?p@E?=Cl6`-lAAk_x+V$X?h3Rc9X zSfv@nMfA443s*EyUh`^wTU-)PJ*c!-#AR{if!1D~_2XXRX|IAl&xmIqSnZ3dt#63u z#PfJw!;ZG_?ox^B(~3Suy&xP=eoA~ttb_7p@uGMM|F4Lb#Vh##v^RJ1%tK9F6&t+H zXGK$7!~5rcrCD10dC0q!uH78Cd*1Dc|IOI%{(V)`_?2@za@5VN(dkFhiAO_E-t%Km zI8q7C4V=V{@3iW52cNg)UcAkB^yJed9WU%fP!VT?Rp@8J?skp znM5T0Js$?-2+SI=tvE4QNQ7c1!6dV7ATLrOZFq- z^xTkIu&)u+yv(F`y}mmN5(qBBETNaT#{R$$TsAlRn2 zPg}A(+JmY?)*HF6%0#&9CcS+p>!6HMafcNHYLl;)-GcK`1kMXDas7ZK8xjNIQbOVQ zVTn7=Im+D{B^cVurtB8XjV37auB?{ibr5pB%#`3>&$dkEVc5mzy@`o_I2q1W@lOTj*pb&~Z2tSF6LzY)abem^O!Xq_D)Tx&}icQ0_`c)NQ>&n~U6 zL~l^wYgvWLY)GQv9cRN269^CiZ6x7Ep{@Kn^Ne{oLy&=ux18?QcgZQZVO*LM9ces( zQhCO$@8(kXLq9o3McGQ&pJ$Fdkr6^DRzrh)H}J=-Q8}^mCA^bfc+671e1uQBSPH%A z+$_kX7P-H)KJ}fRNEdT6gYQb`W(JmCcuaj#i+GmSrW@=r1kOMj-P`z@_{RT?%LGt% zq)FS?j;fRD2_UdA9vZqfvGHD)mxVd8?`iV##70l+x|ZmPG0~2V19LKmH|ThR*c4 zX~@gqg1m&wcyb#e?#|=@CmuRZmoe697zV8y#T990AY2Am{jT1NQd8rYGsr?a$YFVRh zmEDma2-%;-*A zm^w0bxk`70{^c57h=tsrIrQ8ZNbYOJ6V=M$M=jmTo^rZg67>{KDOj0HrzMv#Vb8#2`k{MDPjBHHKZ{F(B}}vvq*f30 ziH`rqLtS9{FHBfMWk=4VkEBh=Wn7TAnE-uFQ@%{Ehu* zEf$#C3MZ#Fhs-!#VCNZg8XTt znZS4QI}`7oBWX?Fv7(qoHF3WO@sh_Gg(@xs$;d;F@6ek_-gl*-%!5*K@j7!qwlY3PU5lh!$z z))nN3oCtVFr5#8YGEs`ii^HplvyYL?cd!@Od=~{9(vG3Kr_gO!z+IHn4g4{>jfR+v z`JJ;y5GaH~(JHrTmhG;>6LI1Vi@3F1DImUNo7gCax@1IUB$YaG4`?Q3^3Hv%6-lT& zEJIA9w6U}4+}LzltyYuPvge^*eEfQ!2nK zETl5aOM|AiR~nUBPH8HnL~KWSbZ>zPsC{rXD3{qOhL$E{_fwi_YGJ3>DZdX-oLV#> zt!8T+FHIv2%V1r;fj+2~E@Y*u4wWb55AjkGj*#EuW))#5*2^Ez#S#?PG{c(PHYkZxi$eQ!)jO~c5MF`*i-?p{IseoqzGW(5^4y0EdH zV_QXAD(JxZ)_a=;4;_6QZiLcgsHU=Pm58&&@=H`-rX{3P0tYbeZiJ&jC-Y;Q4n6eX zr9#wsHl1t!V6Vf3nngQ0Teo>V%6}koz)!NRs)A##pFjizZ2@T*7?9Xfi{)Atnfwu2 z{)Tu=vAbTk^zr)HLMkdpYAf^Vk%Xq75&)n?VJ0yoE(ZW%NMJbRLv{gVcVi0hreT2! zW{!KB%}F*~jKS=}gg+jsFb3V$)Gd`RO*eMK2N0 zUAXEjz!F9nB5a}7Gn)^3K!R4e2w8U1$`ym{IjxX(#HTEpy}&=9zfzCIC4Kz#+3GOiXS&^R&@%8G%{DLMGU{JC*t9$A#tPLKf;PCu$lDumU?|9J!3 zA(TQP7(qO%krOtUr3l=X)ycGq;EP&woX+RVJ*^XI4Vidv9}A{%m5GHm6huyJltq+pG2H)vZ}~)_b;NI~eW_YJD@o?X!1!4WKDW9jxRU~En+}|w z><}&TT%@0bGxoGKE@iQbH_VGIwAo5_eQEDta2Z0 zWS46BsOxky@prp3%_K_n$)Glz&9S=~oMRh!E}BNV_shH?4E)%H z)H7QSR+ve2)=Sw-Rc=6e(hKNEzJg9=m7h>MYB-; z&{j;7e?H>1X%*R%D=5=GJMqFzJM%+Qw}tr(VV{eDHa^oDYIubdqdPb*m|K2iy479al+4{&1| zIX#G2KQNBB_*g)CJ(<(e3U4TMEXnpK>uAdipY zbj}07Mvk07sjxVz-Qw3Db)0c@VDh7$*PLg~NtHsVHKry8$#;olo%MwAKE`5h`6f+F zaX8g|`DgU-=X5E9>GZ`+;+gYdR&*#hbaGbw4&!3-Rw&8p7K*ZlUVF4)EJ4R=(6BnP zB`VM^>Bct;7JBI8A3jmLo-|iU;Y#?N3{mKNP_l$$tz1fs6Q~->r6`u-=mK?Gx&t_) z0}k9lf#4gUQhNLXsR0Y$8ect6_%j#GHUHKQms_dQ8p|HAii-uL%YIz?6-@?KNFM3h zuEf8d8z&v)%Qy$_P&S?6);11&u<;HZC&)fXvD{9JWWQnX1X|Z;&EFiv<28mJOYF(p}J5GZLOWW>0Vaq=y z)QKt*;y9Hx;^~<(s%W#q^Yl<006QR5;2}d-0Gvk6V()D9$GbQRRz))PTZ|mss_305 zX#tpTm5zW^KAWpdMg?msji1fDMMwPEAh4fOAL{rRy35X+fC=cg9$ZSC)><}a#0b?8 zLRWP)t}7NZjSTN)zcQg?_}NJH`Og@(NvE zqRW6TloO~^EkYn^gO9}u?n>^{>t*Tf(TuXQJL>m6nHor+$cat!$h$;_eW$J1amp4Y zod`Tl7DttV#=Nq&B?Qm~I6KQ&Kp?FfQB%5RSD(Q;LDo|3bVStabR;=ErA#K>@i{4L zS*~;>BXxph;-?FIPMOxgVyJU+s18R7)yeC0;c^AnAaBwmMS4|@By7e?$@-gC!>F?- zU7@2nCA>MZijJrld>FkbAipgMN8c@?L!+6y|AfEnNzeV_c)G&;1Y}5GK zwv6A`E5?7@w(*r+HU0~SslT)5mVRq&)OBMOC#YW|h(mjgZw#~kb?w#KB4FYpL$5z- zG>nGz4XARfO|8$=R%@5-dR=ck#j9vu#!1`ZkkWk8W69iOP7z1b#Q`!j{Nm1Q5rpBW zsnn#V>K9=s9qP9{>nHi=tWsIC>ZE>b!#IZIlCBnN&&sgnp*j^`E41WQX$ISAqtM0_ x8I7eD)iESh0SaD1X$x<#z<)+d+1aV;@fOrpxrEC&rltVQe{cu%XPcMw{{h<^a+Lr8 literal 0 HcmV?d00001 diff --git a/mmpretrain/models/retrievers/base.py b/mmpretrain/models/retrievers/base.py new file mode 100644 index 0000000..1581679 --- /dev/null +++ b/mmpretrain/models/retrievers/base.py @@ -0,0 +1,151 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod +from typing import List, Optional, Union + +import torch +from mmengine.model import BaseModel +from mmengine.structures import BaseDataElement +from torch.utils.data import DataLoader + + +class BaseRetriever(BaseModel, metaclass=ABCMeta): + """Base class for retriever. + + Args: + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + data_preprocessor (dict, optional): The config for preprocessing input + data. If None, it will use "BaseDataPreprocessor" as type, see + :class:`mmengine.model.BaseDataPreprocessor` for more details. + Defaults to None. + prototype (Union[DataLoader, dict, str, torch.Tensor]): Database to be + retrieved. The following four types are supported. + + - DataLoader: The original dataloader serves as the prototype. + - dict: The configuration to construct Dataloader. + - str: The path of the saved vector. + - torch.Tensor: The saved tensor whose dimension should be dim. + + Attributes: + prototype (Union[DataLoader, dict, str, torch.Tensor]): Database to be + retrieved. The following four types are supported. + + - DataLoader: The original dataloader serves as the prototype. + - dict: The configuration to construct Dataloader. + - str: The path of the saved vector. + - torch.Tensor: The saved tensor whose dimension should be dim. + + data_preprocessor (:obj:`mmengine.model.BaseDataPreprocessor`): An + extra data pre-processing module, which processes data from + dataloader to the format accepted by :meth:`forward`. + """ + + def __init__( + self, + prototype: Union[DataLoader, dict, str, torch.Tensor] = None, + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[dict] = None, + ): + super(BaseRetriever, self).__init__( + init_cfg=init_cfg, data_preprocessor=data_preprocessor) + self.prototype = prototype + self.prototype_inited = False + + @abstractmethod + def forward(self, + inputs: torch.Tensor, + data_samples: Optional[List[BaseDataElement]] = None, + mode: str = 'loss'): + """The unified entry for a forward process in both training and test. + + The method should accept three modes: "tensor", "predict" and "loss": + + - "tensor": Forward the whole network and return tensor without any + post-processing, same as a common nn.Module. + - "predict": Forward and return the predictions, which are fully + processed to a list of :obj:`DataSample`. + - "loss": Forward and return a dict of losses according to the given + inputs and data samples. + + Note that this method doesn't handle neither back propagation nor + optimizer updating, which are done in the :meth:`train_step`. + + Args: + inputs (torch.Tensor, tuple): The input tensor with shape + (N, C, ...) in general. + data_samples (List[DataSample], optional): The annotation + data of every samples. It's required if ``mode="loss"``. + Defaults to None. + mode (str): Return what kind of value. Defaults to 'tensor'. + + Returns: + The return type depends on ``mode``. + + - If ``mode="tensor"``, return a tensor. + - If ``mode="predict"``, return a list of + :obj:`mmpretrain.structures.DataSample`. + - If ``mode="loss"``, return a dict of tensor. + """ + pass + + def extract_feat(self, inputs: torch.Tensor): + """Extract features from the input tensor with shape (N, C, ...). + + The sub-classes are recommended to implement this method to extract + features from backbone and neck. + + Args: + inputs (Tensor): A batch of inputs. The shape of it should be + ``(num_samples, num_channels, *img_shape)``. + """ + raise NotImplementedError + + def loss(self, inputs: torch.Tensor, + data_samples: List[BaseDataElement]) -> dict: + """Calculate losses from a batch of inputs and data samples. + + Args: + inputs (torch.Tensor): The input tensor with shape + (N, C, ...) in general. + data_samples (List[DataSample]): The annotation data of + every samples. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + raise NotImplementedError + + def predict(self, + inputs: tuple, + data_samples: Optional[List[BaseDataElement]] = None, + **kwargs) -> List[BaseDataElement]: + """Predict results from the extracted features. + + Args: + inputs (tuple): The features extracted from the backbone. + data_samples (List[BaseDataElement], optional): The annotation + data of every samples. Defaults to None. + **kwargs: Other keyword arguments accepted by the ``predict`` + method of :attr:`head`. + """ + raise NotImplementedError + + def matching(self, inputs: torch.Tensor): + """Compare the prototype and calculate the similarity. + + Args: + inputs (torch.Tensor): The input tensor with shape (N, C). + """ + raise NotImplementedError + + def prepare_prototype(self): + """Preprocessing the prototype before predict.""" + raise NotImplementedError + + def dump_prototype(self, path): + """Save the features extracted from the prototype to the specific path. + + Args: + path (str): Path to save feature. + """ + raise NotImplementedError diff --git a/mmpretrain/models/retrievers/image2image.py b/mmpretrain/models/retrievers/image2image.py new file mode 100644 index 0000000..a00c1dc --- /dev/null +++ b/mmpretrain/models/retrievers/image2image.py @@ -0,0 +1,314 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Callable, List, Optional, Union + +import mmengine.dist as dist +import torch +import torch.nn as nn +from mmengine.runner import Runner +from torch.utils.data import DataLoader + +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample +from mmpretrain.utils import track_on_main_process +from .base import BaseRetriever + + +@MODELS.register_module() +class ImageToImageRetriever(BaseRetriever): + """Image To Image Retriever for supervised retrieval task. + + Args: + image_encoder (Union[dict, List[dict]]): Encoder for extracting + features. + prototype (Union[DataLoader, dict, str, torch.Tensor]): Database to be + retrieved. The following four types are supported. + + - DataLoader: The original dataloader serves as the prototype. + - dict: The configuration to construct Dataloader. + - str: The path of the saved vector. + - torch.Tensor: The saved tensor whose dimension should be dim. + + head (dict, optional): The head module to calculate loss from + processed features. See :mod:`mmpretrain.models.heads`. Notice + that if the head is not set, `loss` method cannot be used. + Defaults to None. + similarity_fn (Union[str, Callable]): The way that the similarity + is calculated. If `similarity` is callable, it is used directly + as the measure function. If it is a string, the appropriate + method will be used. The larger the calculated value, the + greater the similarity. Defaults to "cosine_similarity". + train_cfg (dict, optional): The training setting. The acceptable + fields are: + + - augments (List[dict]): The batch augmentation methods to use. + More details can be found in + :mod:`mmpretrain.model.utils.augment`. + + Defaults to None. + data_preprocessor (dict, optional): The config for preprocessing input + data. If None or no specified type, it will use + "ClsDataPreprocessor" as type. See :class:`ClsDataPreprocessor` for + more details. Defaults to None. + topk (int): Return the topk of the retrieval result. `-1` means + return all. Defaults to -1. + init_cfg (dict, optional): the config to control the initialization. + Defaults to None. + """ + + def __init__(self, + image_encoder: Union[dict, List[dict]], + prototype: Union[DataLoader, dict, str, torch.Tensor], + head: Optional[dict] = None, + pretrained: Optional[str] = None, + similarity_fn: Union[str, Callable] = 'cosine_similarity', + train_cfg: Optional[dict] = None, + data_preprocessor: Optional[dict] = None, + topk: int = -1, + init_cfg: Optional[dict] = None): + + if data_preprocessor is None: + data_preprocessor = {} + # The build process is in MMEngine, so we need to add scope here. + data_preprocessor.setdefault('type', 'mmpretrain.ClsDataPreprocessor') + + if train_cfg is not None and 'augments' in train_cfg: + # Set batch augmentations by `train_cfg` + data_preprocessor['batch_augments'] = train_cfg + + super(ImageToImageRetriever, self).__init__( + init_cfg=init_cfg, data_preprocessor=data_preprocessor) + + if not isinstance(image_encoder, nn.Module): + image_encoder = MODELS.build(image_encoder) + if head is not None and not isinstance(head, nn.Module): + head = MODELS.build(head) + + self.image_encoder = image_encoder + self.head = head + + self.similarity = similarity_fn + + assert isinstance(prototype, (str, torch.Tensor, dict, DataLoader)), ( + 'The `prototype` in `ImageToImageRetriever` must be a path, ' + 'a torch.Tensor, a dataloader or a dataloader dict format config.') + self.prototype = prototype + self.prototype_inited = False + self.topk = topk + + @property + def similarity_fn(self): + """Returns a function that calculates the similarity.""" + # If self.similarity_way is callable, return it directly + if isinstance(self.similarity, Callable): + return self.similarity + + if self.similarity == 'cosine_similarity': + # a is a tensor with shape (N, C) + # b is a tensor with shape (M, C) + # "cosine_similarity" will get the matrix of similarity + # with shape (N, M). + # The higher the score is, the more similar is + return lambda a, b: torch.cosine_similarity( + a.unsqueeze(1), b.unsqueeze(0), dim=-1) + else: + raise RuntimeError(f'Invalid function "{self.similarity_fn}".') + + def forward(self, + inputs: torch.Tensor, + data_samples: Optional[List[DataSample]] = None, + mode: str = 'tensor'): + """The unified entry for a forward process in both training and test. + + The method should accept three modes: "tensor", "predict" and "loss": + + - "tensor": Forward the whole network and return tensor without any + post-processing, same as a common nn.Module. + - "predict": Forward and return the predictions, which are fully + processed to a list of :obj:`DataSample`. + - "loss": Forward and return a dict of losses according to the given + inputs and data samples. + + Note that this method doesn't handle neither back propagation nor + optimizer updating, which are done in the :meth:`train_step`. + + Args: + inputs (torch.Tensor, tuple): The input tensor with shape + (N, C, ...) in general. + data_samples (List[DataSample], optional): The annotation + data of every samples. It's required if ``mode="loss"``. + Defaults to None. + mode (str): Return what kind of value. Defaults to 'tensor'. + + Returns: + The return type depends on ``mode``. + + - If ``mode="tensor"``, return a tensor. + - If ``mode="predict"``, return a list of + :obj:`mmpretrain.structures.DataSample`. + - If ``mode="loss"``, return a dict of tensor. + """ + if mode == 'tensor': + return self.extract_feat(inputs) + elif mode == 'loss': + return self.loss(inputs, data_samples) + elif mode == 'predict': + return self.predict(inputs, data_samples) + else: + raise RuntimeError(f'Invalid mode "{mode}".') + + def extract_feat(self, inputs): + """Extract features from the input tensor with shape (N, C, ...). + + Args: + inputs (Tensor): A batch of inputs. The shape of it should be + ``(num_samples, num_channels, *img_shape)``. + Returns: + Tensor: The output of encoder. + """ + + feat = self.image_encoder(inputs) + return feat + + def loss(self, inputs: torch.Tensor, + data_samples: List[DataSample]) -> dict: + """Calculate losses from a batch of inputs and data samples. + + Args: + inputs (torch.Tensor): The input tensor with shape + (N, C, ...) in general. + data_samples (List[DataSample]): The annotation data of + every samples. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + feats = self.extract_feat(inputs) + return self.head.loss(feats, data_samples) + + def matching(self, inputs: torch.Tensor): + """Compare the prototype and calculate the similarity. + + Args: + inputs (torch.Tensor): The input tensor with shape (N, C). + Returns: + dict: a dictionary of score and prediction label based on fn. + """ + sim = self.similarity_fn(inputs, self.prototype_vecs) + sorted_sim, indices = torch.sort(sim, descending=True, dim=-1) + predictions = dict( + score=sim, pred_label=indices, pred_score=sorted_sim) + return predictions + + def predict(self, + inputs: tuple, + data_samples: Optional[List[DataSample]] = None, + **kwargs) -> List[DataSample]: + """Predict results from the extracted features. + + Args: + inputs (tuple): The features extracted from the backbone. + data_samples (List[DataSample], optional): The annotation + data of every samples. Defaults to None. + **kwargs: Other keyword arguments accepted by the ``predict`` + method of :attr:`head`. + Returns: + List[DataSample]: the raw data_samples with + the predicted results + """ + if not self.prototype_inited: + self.prepare_prototype() + + feats = self.extract_feat(inputs) + if isinstance(feats, tuple): + feats = feats[-1] + + # Matching of similarity + result = self.matching(feats) + return self._get_predictions(result, data_samples) + + def _get_predictions(self, result, data_samples): + """Post-process the output of retriever.""" + pred_scores = result['score'] + pred_labels = result['pred_label'] + if self.topk != -1: + topk = min(self.topk, pred_scores.size()[-1]) + pred_labels = pred_labels[:, :topk] + + if data_samples is not None: + for data_sample, score, label in zip(data_samples, pred_scores, + pred_labels): + data_sample.set_pred_score(score).set_pred_label(label) + else: + data_samples = [] + for score, label in zip(pred_scores, pred_labels): + data_samples.append( + DataSample().set_pred_score(score).set_pred_label(label)) + return data_samples + + def _get_prototype_vecs_from_dataloader(self, data_loader): + """get prototype_vecs from dataloader.""" + self.eval() + num = len(data_loader.dataset) + + prototype_vecs = None + for data_batch in track_on_main_process(data_loader, + 'Prepare prototype'): + data = self.data_preprocessor(data_batch, False) + feat = self(**data) + if isinstance(feat, tuple): + feat = feat[-1] + + if prototype_vecs is None: + dim = feat.shape[-1] + prototype_vecs = torch.zeros(num, dim) + for i, data_sample in enumerate(data_batch['data_samples']): + sample_idx = data_sample.get('sample_idx') + prototype_vecs[sample_idx] = feat[i] + + assert prototype_vecs is not None + dist.all_reduce(prototype_vecs) + return prototype_vecs + + def _get_prototype_vecs_from_path(self, proto_path): + """get prototype_vecs from prototype path.""" + data = [None] + if dist.is_main_process(): + data[0] = torch.load(proto_path) + dist.broadcast_object_list(data, src=0) + prototype_vecs = data[0] + assert prototype_vecs is not None + return prototype_vecs + + @torch.no_grad() + def prepare_prototype(self): + """Used in meta testing. This function will be called before the meta + testing. Obtain the vector based on the prototype. + + - torch.Tensor: The prototype vector is the prototype + - str: The path of the extracted feature path, parse data structure, + and generate the prototype feature vector set + - Dataloader or config: Extract and save the feature vectors according + to the dataloader + """ + device = next(self.image_encoder.parameters()).device + if isinstance(self.prototype, torch.Tensor): + prototype_vecs = self.prototype + elif isinstance(self.prototype, str): + prototype_vecs = self._get_prototype_vecs_from_path(self.prototype) + elif isinstance(self.prototype, (dict, DataLoader)): + loader = Runner.build_dataloader(self.prototype) + prototype_vecs = self._get_prototype_vecs_from_dataloader(loader) + + self.register_buffer( + 'prototype_vecs', prototype_vecs.to(device), persistent=False) + self.prototype_inited = True + + def dump_prototype(self, path): + """Save the features extracted from the prototype to specific path. + + Args: + path (str): Path to save feature. + """ + if not self.prototype_inited: + self.prepare_prototype() + torch.save(self.prototype_vecs, path) diff --git a/mmpretrain/models/selfsup/__init__.py b/mmpretrain/models/selfsup/__init__.py new file mode 100644 index 0000000..08c1ed5 --- /dev/null +++ b/mmpretrain/models/selfsup/__init__.py @@ -0,0 +1,59 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .barlowtwins import BarlowTwins +from .base import BaseSelfSupervisor +from .beit import VQKD, BEiT, BEiTPretrainViT +from .byol import BYOL +from .cae import CAE, CAEPretrainViT, DALLEEncoder +from .densecl import DenseCL +from .eva import EVA +from .itpn import iTPN, iTPNHiViT +from .mae import MAE, MAEHiViT, MAEViT +from .maskfeat import HOGGenerator, MaskFeat, MaskFeatViT +from .mff import MFF, MFFViT +from .milan import MILAN, CLIPGenerator, MILANViT +from .mixmim import MixMIM, MixMIMPretrainTransformer +from .moco import MoCo +from .mocov3 import MoCoV3, MoCoV3ViT +from .simclr import SimCLR +from .simmim import SimMIM, SimMIMSwinTransformer +from .simsiam import SimSiam +from .spark import SparK +from .swav import SwAV + +__all__ = [ + 'BaseSelfSupervisor', + 'BEiTPretrainViT', + 'VQKD', + 'CAEPretrainViT', + 'DALLEEncoder', + 'MAEViT', + 'MAEHiViT', + 'iTPNHiViT', + 'iTPN', + 'HOGGenerator', + 'MaskFeatViT', + 'CLIPGenerator', + 'MILANViT', + 'MixMIMPretrainTransformer', + 'MoCoV3ViT', + 'SimMIMSwinTransformer', + 'MoCo', + 'MoCoV3', + 'BYOL', + 'SimCLR', + 'SimSiam', + 'BEiT', + 'CAE', + 'MAE', + 'MaskFeat', + 'MILAN', + 'MixMIM', + 'SimMIM', + 'EVA', + 'DenseCL', + 'BarlowTwins', + 'SwAV', + 'SparK', + 'MFF', + 'MFFViT', +] diff --git a/mmpretrain/models/selfsup/__pycache__/__init__.cpython-310.pyc b/mmpretrain/models/selfsup/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76ff11f50949bbec22e6c4dd3cf55e43495cf69a GIT binary patch literal 1401 zcmY+E+iu%N5Qa&SA|;EmWLrKvJ|=189F)e1n~S2TEK7+I$w@0iK<7rFEh-?gLkgs9 zCvVd0-t|TD5PRFJzCwWlof*cdLx3Ou+1(lL{@i6;F3%Wr{r>OgaW`)me{pd8%2Dt$ z-RVoyFh<76R0f)0nyhmwhZb0(EoEaK^5}qrF1XTWs{$6GC_1lZummOaz{4_>rOi>Z zSb>UYS5N|V@4@6f~9rs}$AHqY4tLhOxhR34k z)Dvt#1E0cEiRab#_za$jUQh@49G;7=sTX(%hoMNia?PFdBqslw=?M z_O?sItX8wFX9>5uwRha?cH8afA|3?^4R()%C=FU&l2&`rtmm{%^-gt>`8wo^RH;Xm z3Iu7;4ymled;Rv+s~`#ze;Ow=!cRYR0)MJYH;RYlD8JL8tWGD(%J)vX%~M@&bx+>i z&gRxE!Rdt`dnY};nmun8aF+N{I*t>RsoTA{6;pMGpMwKk%${7q)J{LdR`*x#CdIWD zvuB^I{kD!v9sSTp%F6dAe)5Kd-M?-Q>U)VrpCxe^HlxVk1Ch)zs*G7inK93(F;*C> zjCIB)V~4T9xWjN6TZ~->oh>6NFe;1%#xi4#vCZ%pB?ce50EZu zly{H})b=@TB3TK^Ae?4Pkr(pHz&`&JE13^i7k-e<;_FJm9q|siQ1WW9f-7HU0(Ll^ zMA^dG0Mpo>Aw&vKgt&T)jREA_z^?QEMOzS z#`q!@6X`cX^6DTnlMMzZ4UwiPrb@uM+tef0gG=2heP;0+@+p1k58@x#>9v0)erk6( z^pzS8|1^?1XEOddY$S0!Z6Hp*oU?Bv9VC@DQaYsR<)kqjhEX^j4xdau>B5f~k1kd4 d6Cdt$pWvCES^IK(W|eeJSf#d`o5y&${{W|jOpE{k literal 0 HcmV?d00001 diff --git a/mmpretrain/models/selfsup/__pycache__/barlowtwins.cpython-310.pyc b/mmpretrain/models/selfsup/__pycache__/barlowtwins.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e090b3b2c7608e7a9222093bfbc7e05634bb3c70 GIT binary patch literal 1608 zcmZ8h&2QX96rb^WZSQ6qB1kK8LB1fd60GxaB8sZE3PM0AsL27jLeAJTyX(gGY-h$v zQ*AD!SMD5>BmW40#9TS`UqC|eo}Hv_N1E^V@q6$6X5_tlLjvuOfB%|(-Y4X5+;kh9 zkw?(=FEDVzX+cUF)6$6@ihHN%#XXpNgIvUf@274968@!&03Cu7ID`*~)pdys4MbWE-H<4^nF92~-GxX%nbWaXj| zZz;-B`|%T|#i1yshxJ0J(_BlHp_y(AVDkvN-i1Msm~s+3oC0J=xT62U+W_dW@?mxb zfWot>vq7dFf`8-2L+%5>z^*)Ds*q<#XL+TW2M{{@$#57#`wr2SqB6|nvI^xiJZ^=; zMr$`juwUJx6+Ao;OjUU`3r}+vJ`=pIIIFVr#u#q1#?Ic{n1$YrBBoyDrxU4W5leNn zy|uYB+1%OQ*?f4MOd6e^nKJ0s%te?gyfwQTSCb7{T2TGU3{5r+aLx~Y)1o)V^3Rn~*F z-!>!~(&-22I)%ZIB{lTY(f;@3H@c*pZjcKXX2}0Ve@U(!=)I+TO|RQK@9wOBcjhn2 z3-_wO^p^g$a}gMC8Svik^kT3aK;2xn2cA5!Q%kVwq;5p=Q1vF*q94S*s zt${P;LRLWFP8?8#f)}0H+)duR9C8lX9(g7r?IATu3%c{xi?w{GTb>%88AzTGN&=uzlDiTV_D{`opmQktkN z7eFwq-g`@+e4!(Bqw7VNz`)qX+4HUs| zY6ZH+#0123)`JmU{jWJ6kF00T7r=nVQsYN`26Fb3cWIa?F@rLwbLHaA{mu=X!beiW zyKdlEH-&Fg`6%{Lt0mM;aE-qXHcYuffHv4`!+(fu(5o=4(ZCrwBRZm9?*aWEB?h;1 literal 0 HcmV?d00001 diff --git a/mmpretrain/models/selfsup/__pycache__/base.cpython-310.pyc b/mmpretrain/models/selfsup/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a63b6e0809016b40c54ddd98b0ac1eaaca619328 GIT binary patch literal 6883 zcmbtZNsrvd6(-qi_A)(-cFUHn$~d;Bolz?ef+QFufn`Zn1V|&nnjF%K(am91PgCO3 zE>>$ajrJuYz$agF3St|acYGq1Cob`nG~nXt(XK+%AVs+tF>M zpwh0O&km|#tzFY?<)9MQ+jU)cf`zcrZs>X?SPYliOQvzt;8k9GYVew<36s}9v)jwO zbl+$${1Yo1O)ITjegE1Wq1<%I-A55gyUM-qhJ!$Ow8-qUl7A#;O~VjI+vG;u;%2*4 zv!;KX@shB)Ey}_^Z}4*d>*T)`{5oi_;Hp>^DF1G7p zfj2~>XYob8^wi!r+l!NyWxk?YmL@GH_(|Qe3~o;GAMn#yVddcoagxgO2Cc)-V3m{N zlwR?yaC#0u$6t9`ZlA_zjKWIi`KwRu_8Dps{TjcZ*Eq*t$83wg!7pO; zE1w#+(LN8=tfnVPy!S=WyFVNV`Pffl>HfKF7!#?pUMv|6xLk}7oZS|#jQnV)Rj)HV zSA&2hG$hH1V2B&+#L`#&FkwkQ9tNEGQD2~islK1EFy_NR6!YayvY{yvwQbjXv>iu+ zt#RK|%?-BI7t_XkB3KfW88mAH9UI*+97q9Z`B4ksB1l>VwWQmcjE{u(D5Jj4;v91M zI=U-cgQ=JeWbBC~VLjohp%kddIGoU#<+eo}9m@rg1qbZ9=()o{B}97@dz|vr7cM`B zC*3vWi4$9bWP>BZ+=w&J4ZLCCs(Fb7F)p*lMv64F(%+&D&R2gl`*gXHJ*m(#({G+jsxcs~f{Aji8#OX!bW68$<$hF+h! z$sE1k+`YMut?A9o2s~3n46(_ak!0=q)!Vl(-&kkYZr{4s#3bcS;ZVRBB-UUn58-sC z-|GWhgV>K08@Q@ZYz+pnRC8R-=>x&AMijGTAUwb4WA|!rAl4bo z#;y-o0XTE?R&&!ue&F5=pQ})do;sB%fmk0=FL0A&qkGKEt_DCDOTjpNl@Aj#x8m_9 z*pGbG@p?N);E_&CE)g7j)Rc?^eTHa~?*=)fzRyydX|=FvsZDkzwdo+HD+d9e)~6sz zPaINZS}hp%sxy9*>NegI-sE2>#$|2{O<7ln9Z?mX3k{8F$ zGmDqF{UxFbReENRoiEK#oM>5(C_i-et)!(YysWBYXW#tX_`=#Z|6r=xzVWa=20iY4 zMl;Q^sj|^mH5C(7%(108xQAXV@4cscE%aLBlJ4!Gx0LsGbZ-g0rLnDhBlOyNZ=`!| z^x9){Y;+dJ7RtuBgmQ6gqg>(!oMUA(tpiiqn+T5h#U?DK_33$&ry-y8+%ZTReQ-$T zg)7_^%w=6VL|p=WcXE)rI1`C-BM+G0#@X0`?QB}|B-W89P&AiQo2I38KPg70br@fs zITl$Xj>_9ZKj4y7iC{^e(6qJ%=5I(D%Z%%?_SI>%)6p>K$Of&kNW~Hr%P5-F)FxAS zRG*~yO7o<-`wEd-r=yQnr}LHZ?N6@AI96At{&=pcUGo(p4KA*1yNPHG_T(G1B;Mp_ zD5}+lS+(kBL;u;PV^zO%Z2Z1xlgshvS)6vBlJZHit6u z5D-otsuR|);r*KQsf;m@zHh~~vGLI4*3bkNN~5=~k(n)AAH1;$SRY2lWiuf|OABz5 zVqNsPN=e=Dl_@WRvRs`MZ=#mgb`c+SX!>1zer=&RT^e0DVh59f$FeTp!t9r_ITJ;+ zw2Hx|d>gYeeP5#5kqpwrE$SuR9M2vN`=9K+d#n@>TCXBP)IglDN~n}av3f>}=pd2b zVR%CMkyWyRld5UG7_A*A--DA=M~MjWaO!A3m81=Qo)evJB&kgp@W@ma55i_`Itfb2 z1`3!a^N}SN#>O*aOs32p+xMx}LhBi{!rYCmFRlBAg7F-eK1a&+MTrb!v+OQ_G)2C{ zD074YPGe6yCzr}ySMnmTgVA8yh%K15RpePIDpVp-d4Ns6k7#948S{oxf?Tl2T#sTf zxXRs_1TK;dwwmo49uzcIDTrL%$ijT0a(U9f!9K{jAn#VVB%B7&l8^EzgN~c_b9vx7 z<4Ut-){-U|B{9RnBN>FvbE7>rKpc5Fk2~M zvY9jKC6T7HkOP5yIwHM+hwpoQYUa>~69$`cC2_Ctz;whm* z?=`#$g%uM^XpLd@nx-4V|K7;5rAcy{fW zK|%_J53i3T8Fv-y>>WSc>1aaDIRqXF;tXYDmNAxHlfZsbN|kRj#~*C$6oO4tM{~{6 z6bz~PDUJMFy3ZP+w+fx9A25|6EpFB<7wCR}iob$#7c-qJ#=v#&iOCJ%) zsgW*YVfF!PvODB@2e`C~)J#r{Ti8>$8@ff#FhrN&I?0&DS7vPHV@qwH{s?3wWH0O! zqjN{#ixx!|>qS>(V2rFC1?~vg5xUDosy(yD2;X&dRpKRu#O|R%#`KJi?i$r5MsBo7)QV3wdWZ2?B+(_9 ztL(yvbq`GLvh6+G#_YD>I&`L>bHraj){;^*ok${y(7Daeso{W*Y2;t<>JYq9TQV(c z1!4GT^$1-bSi0$K>ZfUKYR}hvl~Pp_YNwOdI~@cExIad{(dm3Vbc4L7(&_Nn>vZHf zVuu3qv;<&F`d<*856jo6Mww<>MSO`nZ?%`&+i@JoHR^eXiXTyTS)cUO$&Q31D<&zG zkUyn@45TD8r-3YKr|Dvz0DB?`z`|4*jK;l5#R3Y-v6J7Us2a9uy>6MspZyJLFC5GI z&M8^X9ozbkQ?~xS;#l80m8Ea2w^vNd&fkh<+Rsm7bRDCuQvLbL53S`YQef+aWj0=% zJ*ij1*{lJ-uC|8Fm9*q;dy>wL<{bIW-T+Q9+ZxF#6VrRBrAr|W^Nx=jY6`;wS)p!y zp>S%N)3>D9K_n~pvTKJkGd*Y>diu&d6B=!NMj~1^B$2fG%S@HOCV!F8rNRob`>Lh$ Jew442{tFeQdUXH* literal 0 HcmV?d00001 diff --git a/mmpretrain/models/selfsup/__pycache__/beit.cpython-310.pyc b/mmpretrain/models/selfsup/__pycache__/beit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38f360022b05c980e4da012473baedc0569c585a GIT binary patch literal 13103 zcmb_j-E$k)b>CeqfCUJG5J`!WB|B?7wFrj-MLAJzN0BW{{!k;+j>Xsw91Xa{y#SY7 z>_T@Jq{)CyrgA4U=_qL?o#{tj$WHphIiL z0J|WaI5S<&UhLkzpXZ)?&hMOa=}k@+6-=}ATTH+o54S-d=p+y5RCrZCk} z>h!BQYDcSUolHH`(d&9AThDfK^_&`&$=CBJlW~krpM{-)Fyz?1 z=j1!(dRac_ovBWxUXjm+bF4F6pYF`mXXJaqne7~}AD7QX=R{|&K8NQCR&q{uPSsDT z3O~O8z=86j!X{byZH1Nj#GT9?wSIb-KgBA@ukeb@KQqif#-@=!%}>ev2bp?Psm|QP zY?b9|Hq@`#%^=io*nSWiuXF?3b1f&#uJpPN53`?k@m^KKg5Z`AmfPldDL!ZU{AG{z z9D17!M9*zDTu*c?r-2O3r5gF?p0`(Cw^yoKcw!mfo`3nu>%19w;!{1#4eVVm!iwNN z9vWTGZ}837_18c3$!pekGKvzB>fC@<%>1P5Z9AN`xp@sMWjl^VD^)8N zO z#l^NAtoPPxO|P@qv75s4y;iW;b8V-yxaN3kiyg}kxL91{c5q=4opZ-_+l{T;o2=H| ziH3DWwEd@L7c|ItEDa27n{@@tS$H zCj@r`XJ=v96b*t19MQ<{b-SJj%)m3(dbY!cU7Oc<%j!9SPoI`OmyZVH_Bs)xn)9|x zgJ14}ZG}k#C;nw1fw?8Q+&69446rUV{58+ptPR^u4G4Wax%AY9Q6I9Q2D3YU)QK69 z2pQbPe7}uwJUJ<>->QXC>W-De*6JsZ#_$Kk`trW{$A9N@8Ani6{ zXeu|F2{Bue?juH8zNeUqau2ui-EY45ue-C~{QfiF#S5+f5=QFIKA!34vc;6YEg%|g_zOzGLc4dJ8sUqTWnca#maZ!q<)sw#oT zbX)1G-%!7-_LWAauQ8>kiqmnv7U%0hwx8+i{aioW&ok|A7G-lme&4`d=qnqUjUt0g z96cRq_%d;zjD6P+w1W)lmY4#`m|d=_;vtNwniZ$;BOajS3?-+LgcAvNk>B1?s*_QltHRt3+lBTECoR!jZ}>b2 zu+9BYU$NZv@L1GbBe49}u(2zJu$!N?p+-Tqr(i6?;9cb4^qv=?_ zk1;-soS$D<6rL9>b~;@wlz=2pJ(8^T7pV(OPzqp3T=6I>R&o0uMp7A0`r_AesoEBjdg1NF0+J#7!S zt?cRj%)Z)JH=tjkL$w2~ud~c0l)|r$U!B%&_u&^giP=raFDVg7Wtf=RIXN}ZvL;Bs z-@A`1%*ulGg6(#DLBsB}tC=tt(Kyt%Y`%?UUAMX%oUnjO;yjX2ZG?qrFW4>%b6zh% zWl72?S)@Y6HV+!nFkZmBPwQOKboGR4sJga0C8;qglOjeLWodIrV*E262E?F=?`eH) zL;b7{qUd|sy=wpO=6^DlWMK zrrojH+?N0Xw1KRHTJ^i0D^(pHNm0h@N8EFd#D}OLc>#D0d(Bz{Y7tAwSl*poGw0XM z?W)-{&s{dx@I!A}sIG_VcC{jibH&Gygn4XSs1Y9K(FH9x@db8Ys5g7e3Nt{3p<(p` zuL%?)J|e3GJ6+ze{4lr8?e=;QW}u=&y<-Ka<=+;8$W!x=A*q%}yi>2l@}SY;7K9@A zL-n~(y&7uEOSmu8j|X03C>s)mmc=Kiy~~3}L!C66_-%TpO+4Z}@dRF9!R^l@DeH0* zVjGrmPiJ&&)7N*seKj*W7!ROi|mNkPI1(F2#&QXCkv?M&isQGh!LF-DLuV zP`=xi0ICBzPhdspr35%SgoS94qfIi>W#R%lfdqbvezXzo#^xF3<1Sk$=hG<5+1-+y zMf{%KSg>!r)lqL8SR&vuKll;)qbJnw4ZBv?Pobd`>AG>e>(f1R?ums zwc>jZ&B->KXfAa<9>4iN$3xO)iG&D=3YKSi*eeDrU>j>>7b0&Ge-o?>OGU@`%3YP~ zcNBK4H3Mre3u`0CW^o_KeFFC!?3$CfPoeY!uy0PI%o*fAi06lJe+ajU_lJ@H2+BN) z`@_7zi+qA?Bv?DzDFt%=x6s49w1*yJXJHSO*m?Oj&#G^i>XYm|dz^hl+B*w;YNNu= zp$+^}O@1tTYUN~&8e2pS*jBKBX27{i;S^bouf|#r)}xi5gfV1G;JX6*(+4MYVq{50 z(|Y76vxAhjIS(me_9a9EkUXM2DrE;i?vMaA^Lk4nOjLq)N*fT(bI9~Q53 zcELQm2D|jE6q08hOSJjfVH?D#3nmnvzwY%Mmcpj%))qI{xZ`b`n|ueRQtInD^YO<= z0Ppd~2Q~-o?V&Z`r_6!TmZe=z4G>tAsKieuG@@pH#u3`O_9hn6OD{|N|i~1@MxVz zLjk;KkOxAN7W@&A(<8Pi`CDDE1FYhN9>gL-7uC$?U^tNpvYoay1|}ujfxNPtaZOog z!QANiU;>h^SU>Pc5B8k2KIy&V`H?M7>qZyM?hc4|MBmBzT)6Pj(PX+-K=AZ^rX#Eh^{?GD!|$%|kmV!~*sgN01nSx<}_0X731$F2aVn2$hq zlY{h;J^hMAZqe@AUbnGx{ebYtsQ&l>&n+#Ou^k9H0z`wzRAp3y39s7_R)AF=Sh}wY zZ_Qe>9Xr^epSKw95UtyBS`Np z*&4RH#As&6>ZWO#o+U9E4NA)Wct6W`EOulZ3|fLK;#G%3dYcKsJ}!q>S08CQeq=j5 zvbq}ktjJ)Cpppq$2bf`Q!(|qU2y_0q1@mfkL=2+bITH=h3^4j(Gl>}unvuGNq(Zby zBQ%zhY38A>7R;9x%r}ne(AsKC)SB$H4F?`b4)r6jRR9r3WQ_;$Yq~qCBVlb1XNBis zqcL%8x9Rj4c71YIq%z04l7{fWTpXk5jfkkJ8rOpe!)|+G)0cF?3dhn2KFWmOjhG87 zkS54ziKs7_;qf{j;#qikn%KfFHU^x0iBXd1f(wf5q}0Hobu+Q4lPXq<$zR30Sp8k@ zHX^w42Iz=Ujx4o{}78aAhgH#LO!m5%lc6@!7Vj1ar0jhJpOLMT?9V|xd z=^OLOol7|7py?Xcnm=&UAEFF6pV}pbh+>`tyUm14UvBJ?Uvxz1aVheG<@gDCP2CCZ z$nK7miHZJD4(kCFHSCXS{6<|tJqyl0aF0=ObI@^W#nXdG>Cv=&l47;tH!X)pzA3C* zFQK0Z;thS8ktB{vKn0|FBq;&$C8U{cQiHUxq|%1?i(LONys(^Rx%0Ke`htJC=z5?e zC&OzfH4$h50*ZYB!y=Nuq^cN9H#H)ODQFOp8ytip%#rT>ku|&7!~Mu;p`=haLNQ5v z8%c~o%jvdAcXztbH@dA*Grb{~5Z7+v9*+6g-$Cx-QMr%UL7fvLwUpPVPHO6pZVF<6_ zsV1a_S`iisjRDvQiz6%$8Uy7VPNqa%I5j3`!pgWL2kVnSPtQ~RGSW(a66vx26w>K_1?f!x7}DAPG}7b! znXke7PQLj4OrY;)5m!6`r(y0Ob3i_MM5JLupIjCUBNdzy^2CTXCF_*flx$G4Nr^*A z2T52?E58-t(kG9S+mv)E5tR6p1eEkBAqTc}_etML)ew(R#-o%-KYHQSfr5!VGFgJw zzVy+DMN-cZs;bULUZvl7^-Ee}{(oE!_SJlF}@BY2f2|Plq>0 zhc`!uH%EszM~62@hc`zrXg|@5+W+Vi+K=^;_9J~#`)|Fh{ZOCM{!6cDKhTe9-`A(L z|I}x+clBBQKhB)kJvrvi7<${AgBEI-p(_O6Dq^p+pIkyn{opmNpR?k9#ptAw%~8h{HiDjQ}7vi#b4xt^}`}OH!h%z+vE|ed>ZWt3N?j= zA=DB(^gaVkF;a+81MoRYC;!;G^-=yixP5Yd7PXRAh6BHhw50By8K-!1JSnOZ#mOgl zm0BXIlh6KbWls&@P~Ctp{hD&?%x5(Ub|9dU+0YQYg6|%Iugsis2lSS?Ie6g_&`N)~ zU0_k@!+b58RIEe?QA!5S0r{x?+9Aho4g9SD=26qlablj`nA$0>Kjg48ZKaJf7F z#ZjR1S?oQmsM>?P zigq1^xNZ{VH5u&E_EcK`jSRwGQFI4uy`M#hGmC&%2JsYaKgS?|_Yv+yYEW!t&*+mn zFit9n`sDk@tTG3_0#_BNCY-udvk>4I2;wqoiy}qX!LmdNV^PIt`f4zV@+I75#E+DX zDcluAjIvQn`^RW>vc|Nmv565+A1M1Xh(*mFsICg0Q5whF3L-$HB<3VPKL&nA)Tdtp z?$w5@YjGbnXG~1rJ30Oh!HTDa_+JTSM_5`dG_+$y~uCCI8 zuCAK4KSXF`eXOohgne~&L6#cSANI116EkEx@wa#r_6bZTA5H{+ll*%r%Z&iA1kuR= z<;P9?Q13_gI2=>DXY3gV6=XnqzFLciHxGZwf*JX__*(#>K-;k?jz}pE;|yDiqj039 z$-1SH$47t2m`b0sp`5qOc^NOOtw_%VAn$dGr^eB|L7%onz|ubnhXpEK#r+cQaYq96 zmu@iyYhR|%m=jPlIys{C;XG(;bjjW#BnCl3CL^3cZ=Sz}z7ORt-GVhcY~eL|av^c7 zh?BNFw9xqF2{(okr#Y25$F3us`@9C{0`MuZwNXNASWsVpkZ;S%pvx{p$MYd z;!9L!Om!2K!!4QHiLWU2;>y-BfB?Jlt z>0m)BBLcDnH>4(#%O!r7UI{qJIHLFrJv}^3z@geq`Q7qD zandLnXw^_p+!r*iChpW_;O$x5J}q!M0?7e-q>9oJ)5|f9BM!NdMKmvqCj-AZ86%XX zaN3a~l?F;qumV&=G1O^=c7G-{!`9N`7S5l^&3v5>c9PFM@^3)suTq((p+jCdl4?E#S2U4FV@aKdEsLpy}a6xP*%2W zZe4iVBw+=qA2s-LEbZlpq_Z(PFP1oXiC`fD(B=xAUx6tses#we&Occ@fANWn7twJf zal)LGivA&_N33?0NKPmg`W1MR0fXMvB))q`6}0Ohn~XK9xw!`AHVD1{74j1tN6Z$M zV*Mdqy%zy$B&sW`yT^#AwdB{ZL@=!3%agT!Wb__|k3NZZWk5QBEfQhlzIIKyL%2ha z@R2;qg`-+~S(M2fC|{Y8r&)ADKc|3KaTX@GpFL3aayZM9O`T;~NS$SY6(g>L26>#6 zQGm1!ARP@@B@ZM;sHsRO3Wv&Yo@TG$7UR6E?4NQ8O#w>roi{NL-W{`H_nC8vJE%LQDcx(eaxYPC&U{k^VM{+z1&$qq8p*w8Y6E zZ0UptHf3@FB}yY#8aUH zLq+ng*C2;`SP3Y~A2izBC7Z_+qwGvQu*8_j_$O5DFW(6a>uU?@Jq=I~ho}><)q9!O3y`B|a?BxJq~+eOWOu4z+w=&=+7X zGH^kFoi4aJYEZT>RkqZ}Qg71`aAMGQy{<2he1+u>)Jz*^IBOEz3(|G<2hy3f$RL;A zcFRA)#*hEGkY|Xb8kdwgikP8sDJpvz1XcXUkQlI>4Xp$piTu+l T@ly_q})IC%4xgrKA6m{sBc#bM2wN68Qf1 z{ojYJ4k7=-!Qxkg!DmqPH&6*8XhKptpeY-$)EPLbJ8&t^v!pht+15+zgSwq_5=M0Hy z+4wxyaVEdGx$~S>GwatwE%#+I+#eTGJ&bj(BD`twTb~Z}U!Wt&fC@5Tf)1PpYdH9e zb>T`^)S!4!>ar$$>B;&)DJN;&p!}ldhoyyFF zs34vX`Q7VzZnRNh!Efhd#cv&Dd^a?(Wt;EtJ$b};ibCaKbjVF^&p@rbH&crgd{>4l zi?b2Gc4$nYw>LLK^-cV+pR3Vkc%V10Zf$+k-}?CD4?g(hZZPFzM~(EhJq@`!2&4N4 zc_#T~0Y>a?^Uw2a7>_uP_%K&I&BZv8$nI=w|C~6J(fyx5D5Na-P=;o#q&_Fc+fB}+ zJS{@Jt6Jv|Wn^-t=a(GHQ2c~T?6%=Ji%`1@|ESmZHWT7>5C3fJSJ$kpG^7y%e!sxgB_4tffppF}y2iegd^<`)t+(&~2<6?WgDS~|G+~@9 zm>rK;w9v9@;w&~nG#v5E7NqVVblpLh|N4scd}tO-_IgMh$#`_QY#}@_4wLwi4elSa z%D=yR_Md;;JSaZhEB#qCl`ck6>7xIZoh5HAn{!7g*M4Bm(w~V@df;$ll|}vnMq(d} z14VxXl_4kO9-Yt$n>dr&#GBLwed5rH8Zd6YTHUvXS1n)q3{poBSO*D$XXNG8O$BJWnWi(HEp>At6TOK?19O`P$9d8( zj@8?6?60BdH=*);M(4lxk~LOo`_-e4h47gyrDp>{mu+hfZBvdsou7iPPo0T-M$f7>?|iOyK36yHY3+>s^%5m$ zrPI3cPaDv4ps+J`Msfbs!&B_}z8wL*9X0+$gyTYjI&mn;{P&68Jpu~+Z$p7Inx`%B zh{hRv+?I?=0}J9uVe-|_4O1cE6j=0a3o)g>_z2I4?tL{UKU8c0{|qe zbgKa`AB}3i)z7f9QTr?0<|2E&j(QiTe~Hy4oc8X65l0&QI~@fN z-=DTu2VKY>_kui2Ae+JbYvbAaG=k69XXC4-hvEcqo^7XNsnsv=W?OOWA#2-EG>*D$ zpZauzZO|t3SeLr5y3Q(n_hr}b(skz2M-3a3R#C3Pc-!0C>#ZT;WFUGFl+6HAKE7L^ z-wuKY<1m@7)Pq3eQ4pvekd-z298mC3Bc%f>R`{}37|j+N6oNzPRbHv;IE}AX%Wc`2 zqE94~1kRfa^mE+8v1&tw?CB4o@`=mXI~BsXFQ9$J82cY{*!Rq3-+49mU(aJN*X!&B zt9M^)Y_#e6D|PkaWmdMbHa3t$E4X$s6R8%GtW+mRHPxwQYC+y}E0+GXO6#AXYiq>ox2ezCw2cLR I&v}#nA5Kbu^8f$< literal 0 HcmV?d00001 diff --git a/mmpretrain/models/selfsup/__pycache__/cae.cpython-310.pyc b/mmpretrain/models/selfsup/__pycache__/cae.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34d609c22791e9fb81636316abe92a32755e30ff GIT binary patch literal 16367 zcmch8`I8*ieP4G^&rHv;vp0YRNU8>QP(oJ;x5e?XUikSkSjp%fpHd{7k0 z_2=_m_e{@V88Z1HJ=Jeszkc`eJwD%WcPA!_2CiRu_nT|R4;jY);6wV#;^DKngO8hr zAq-)*j2i!~mf5yymdSXgm1)~GTgB~Gww(@v@j`-PgXqcE4#nV~C2FeBBU}etGkO-AwHxNSPATNSTgPT>t3iN5zbo zy<_6toS4VEd4GC0vuoB)`bYOZy3bmUh@(h9>d&k62M6iL!~)V6{QFh<$Ao#!s2;z| z8C?9GBGF_ zaj(?R@0;r1**7f%cO zu+_>&cB3b};bP*Lf`q7JTdnA6Rg=kek8HICjw32R!e_mp*yHwwhc)4Z%a8I!R;_eo zUWsxIb+*(hRc-kRW;w%Putf49KB{1eW%FxYdE1lXEFOX>1aoHDbj)K`*>cQ^b)U6e ziDx;^SS@qMb+A{k-C#GuNA+^*XyF&sMl53)n17*%?=3UYPZ``s~qL7 z`yCJ^`APO;BHlL*ztuviVK09WZza1brx9>h1{{`SU}NMQYz_NuY>)%Xvfi^Y*1MK% zy<=ys?^`+RZ7WxK`+=jX+&z?!M~PSP1q5lzSFj2hF)3`{2Gz4c^=wc*><>-%gtb;y z)DG-PQM)qU6+jOWg4&S=rm5XjRDK#{8Z6_B=y)LY(b zz6??akO!v0ot|8zYNQy7vi%7nL(0|%eLs$!UAF{wDXG^9K4n6j0#KnW_G)G)$32W) zezLD(!rIMrGNgDs6LkkM&@z$5e`()XvqVniGd3-e-_78eIrN(X-XNWK{H{27SGwn2 zdGKx`eurMN#&u`c+{d}BmlchyEZ^nf9v-AOt>JXkKgVoq7UIP7HFZSl z_*a=8j-TgOYeEQuMad3`*Gr1eSGIiUDaq3zx9>paj>oq@rbRefUS zU=cwvS2Qa)sg9L9z+pJZ;kGRc^l=LJOngt7_FKh+?fLY+8_-QfB1R}DJ4`|GDYili z)Hn8wzFL7ztim2R%R)l+@J`#R4lr< zF3I)aqKffpU3Pn0*oD1L=$-}vuU>SQdk{6H+g;Ny1Gm{p1T|k1yCwIszvlH?VL&17 z!qT8Nb&isHE;rj?0BD$1;e6o;xtg?%mOuZV@^jhS%vp4!M}rbbZ5jlSFn*WpkTtZ`}_@>w0)t*R5{DTZKrz?I+c| z#FSpkldriiqDnmT%qDO4eJ_Pgs=8 zO6+Bap^|7pZnV22)vv6ii=RFP!TMBnWo6jPi1h59CeubEFKU?@6YGg4Xq$_D)aiz! z({jaY1^%!mC7U+Z*4@)e75X9-q%UGB!dDjE_^%slOK1VI##(b-y->UKw%=Ue2uJ$P z3PDtx+n7vj+L5VR?snp)B}vy^Nb*{0mRIoDG+aE+Aa-RrDy4R|d_8*#o5DP5eBJnl^%X;yYTi}2YhgaEn-*xU73CD& zscVUZyys0TvX;(9X7f!8s0mp)HkaCHDe1fTSPRFY(QR)%{&Q@YGF;BE%#R&xSJOm% zz^a!X19`&%dmNFZWMpqqrk+4qu&B}vUxJ`1Vw4Q^{QD1;JXH<#`1nvUZ0m+4XZ4Uf z5Ea$XRhSyOEo-PkkDC*!`k{wNHZOmafnvsr{nzpMRovgLAiz+q@_Wb390YjhAnEPH z@7vq&eu&a7Y>z0DrlThfaEbC&Dt%1W=Z?x$qaORS6ns{COF+~ZNW*#MJsRdVSO|k4I z?tpu{QplMObxNhrQ>TYk|BjurAfrME!>`Q$1M8rYvEPM4cVOk?RLg!3skZ%&N_9Rc zm38RSl!Dn#S=Gy0tONU7l!FgFTv}CD$`q$6xfmY^!x9fr{SB)Ap$R|$L`j$g4?=)u zeEvFA{%?S+e_6d1_5ujWrcKBIIkL>bG~}XUWP_~OabBoXn|b8BfP8tCX6!i>v_W8; z?LWPu6?JzF$RRl&d3`L`C1RHr;3z+g3UN$%H4z!K)(u`uNhdAcxVM|(h8t{nTaaR& zTy&pWbU(M~zFdvvLRRvkLeWyYY1Y-;jGQx*Ln`ZB)$SycDHsX&jjL2|6frWhr;LqD`4^pBC5 zlsI4)p$J*lv_6p*uph?kw-AniDpZPaTHm(v*7v5I%3BY7Rs(9_MXJ~{d81$>eACM#4sp;e($Hi3tnm}_2)Y1Xn@6ULua9ChMX=C(}xsfN`=Ae z2sh+ur6~YPQ>a8=o{v>v{6ZZg5QNxSyVNPyJa(E^|zK(w)`{lGn~ zQIv*{mbCWyyyA-Pn(VgS+Z#=cDhQQsHfWF(Agw<=bZ*gY`q z2%8O8_*)>o?7?8xmFk?TWwi;qrxY+g z-db{>T~mo8vxE(FUG}J)h9K6h*1dHM%5k%ux_PV4icbv+jMUccv|hWn2DPKni(wa7 zl~NK1)hIHL{*P35?-1fevstt}c&i$7@a4_LTK*pc=ot;p2qLWFb6Ggfh%|E!`B~Bt7s)2C8Zs;4y7)Yjh8A zg@*m);b5oKS&sLBdd4P=*=~&N1WWD<*bf2WjBXcyJTgNGfdhWXU0FHVXa)7Kd&BRX zTv^fVTOktYFVfslVf5py{HYZ09#q73(fdI)3BrW5q#AWFj?%i6ik2kvB!nwspzU50$1(BZCxDy}H++ib=-tEDt5Fb<;PYx`xt)2k2N{@;} zd#tI{yawaLyb5_MEopI}&jMiKnY!JTK(r{GVTEInt?dOjM)wJpa&$t5n+BBAms{Ny z=nL&NgUti$FlfDIEyFBIno`F$F)ojcLm8UMm2ROdC=38?09>PeIl0_kd^n=_k#W`ba^D1p4!&cRb96}WW zjg)~ixyg7&M?iv4Gpb^i!}|0nKxIZ*(+I^;b^9Kw1_K{3c2#nG42j@A!mr+^=>%qS zPqVd=8GCjF5`(#X0HO_h&JC63iYcg!mc+Z?Vn73+g39h5;F8qmqkK&DBkSt&`klY{ z!@a-%=I=gUwd8re%=lXY4D&b?fYkEwsz(k;FSv3iRDyd{P>Zq*`V4eN7sJvbCt(T?;4CBz+XTRpqZiSB+?j3DE56z@eCGbd)L3IN)6}`r?S* zbmK=Sni@aWFiO$P98XQw$IZV;+R&U#liw0RoDQt$_991S$-M4 zmb(mIXYi8@eu}{x48FqPry2YVgP&zUBqFMil)_CR=6l!W9lrW`2EV}IpECGG2LFr! z;VStx21FUJs8gZwF6w*LT*^>L3!(PJQC=I-lwJ^!$ZkvR$*2V2pZ34Th96YtHRkgF zHjMwPULR&c{0@2_5WFj&fGwRy!}n}h&}jjq1)a*C4P_607jM}G>+kKN^>=p3`dhnf z{f#|geb=s7-?1mH|7}lMe{D}&e`U{De`(KJe__vA|I41YzwI1>b?@k?)-%8{??KN7 zJF$fh4XxE7Eyu7;?^<$(F=0Z{0xTI^2mzNM1=#p3aBjpL*x>=J(Aoy{T&K|A#@s9K z!#@X}ITOfLq3U_Z!VW83Q+Ktv{h0y%gkTc;Q)#w9KcvN&tq&Zmlsbmpqa~yzP0@;p zZs&`2i~!fSo{$oMbm=M#_L|&5l01Q6xjLyfP6X5g-aFdR5ky6(go<`He0^>^EKp&f z)c#*#?3WpEqM-Z3MJ&oTx?8W+6>=eIgpj|A03=!=dMdL*=7uON;Y|Af1q2d{R+*?A zLZW54cW1DY_FLO0#+EMKO@)-vpD4J|ApJ>FRo?ASY*oh()emQvU^VE5mT{SYSM5VV(LT0e_4SUWZyT18;R0L**Gf=8CI zSzsyr7e(eMtOm#j$|L~I{UmcCT(dw$tg*TZs;(p-XkDDIFsQrG&-V*Q4Yr|ux6nR3 zwl*hr&CA9sCoqQM4k#XMG@y>EFU7E;-;2yK0|5HZ;AxWK-V}YmcJep7{X8InocApt zi_iESNUG306RBMZUYuA6ud&r}D70VSfbKK^0N3NPfy?}HZnZnteJ$0+R@HRf0~rb) zflaE4F)>P^d=$V2jlef+t1D^Az1?+hfIm`l0`H~M5ENkmY0a*Lt^xg$g8j-0`?9hE zC2!CYLA;d}3LGmdiz-)Ae$a~2KA~3oulkK%h@nH-O;5t0E`{__`b+~PpLUz~w0?Mt zgL+0=hE&=@0p(D4y1Ep1?=(!Jz$q2T5Sz(U%S3I8LKQE)Py-R2Jx|T&hXG?#@JW!g ziM7UDYdG{t0Ju-#{v7U?N2NNo_^~}$qp%~w;d9&@7!hPoAT)>6Js5=^i|ewjtEv8f z2-J6MqYXQ_tcajummq{0(QJb*(IqV^r=_3jwC1A3Bcqb$nu_4U0TGg#B4|<0e{~C5 zRg?!mB}%Q`;xe$RYl=wP7>jG(8Z;itR#CwR$5lrx$|;#BkT;M|i34!#ffRQxv3;;x z@}IL36RvHm1KdjPF|9Zt;-J#6)_WbjKULGw%z*I^U%bNVD7KC>Uk>sDsqa}toOw{+ zOvRkC<}$#C;kjcU9Jg}HiwBmm1IH>_Goa1KEKuj4u&wPQV{|#*tnj&^h__swIut!7 z8=>Q<)NF(xw^R@^jWm+{Ks(hu!5GbuAK*&UMV4>k4k#EY1bHz*koCW|yrk1;x2ah0 zRWc!Dd^_#1ryOo#Y#EMBZ&?sGFzmmxAa1}B@oy}M8y3V33*v?aaRX@eUtv=I(t@~Q zLENzBt#4b$EB~`{!KnbhKCsOr2a-L!2c4clBCp_&5Twy*y8Yp{#IbSLKc7N{1H2ec zekxwXe;zJ6NcUluUI2T7dEJVgbRhUDtaw4;!1ProR=f~9LYKr0ywoSaUS`D{+;%E> zc9?=X<`J7@UH+6$Uv}hz>rXCr@R?Cq@+=s~@hF2W;5}l)KkL%d@i~2V6dDHISb-82 zf^4~$dSN$KG!-47PxwREVyAGB+82G$(jY;V6*agDovK4B``AVpZUq<5oblwV&09-d zxqb#(%$f7&&z@g8`|#QGk3POqk9#mm=hVt@Z*U8)U&_{;Z@gWw- zF(m5jQVO@GfLg}}+mc-X`tbLCpLvZz3$``PjV(CRoVMVxN+obmq|iP{Zm{Jynrlr= z4bjjcvJ9nqq=u8+{n-5cwBm(SuTE0pVA0`RaoVr>zI##Ob{ALPr{s!S+IAPdbixn4 zW-GO+SjFL>3*RV-S|Qf|UtOb?|6Gwk1d)@RJ$N?aFu}7NhbWpF-{aB5$i9iD?^)Co z#y}s3b1v5s#oL%sf2C@24r0g1N! z*9`tG1FGXA>bH|@Us=MdD62k}iSi-AIA2yBtq>hn-@h5Dbev(6M}CN3%(CsL*J)2TpV-<4g%5t@+r75!YD6`$%QgcY?D{&2!|DLb#u}p}m zk>9|B{51y05CBZ0;#jMw?DfGGnb6PiY>v+n+xjKW5i!J~Rm?bM8Ny2i24(x5?J3Sr zT7$f2j;J<1g*$i>K}eqobI(#Uli|Obnb~A!@HvjLnFSbxk2lok9NA-lhM@Rab^9iG zv6`Dx&n3jHw}lN0N}-j6cmQrfyJCI)ub<#%`wwVc@3{ZS7^g{3&%Z+~nVEPdFf(Lv z@{gHm)b<}uCR?$t8{gy9P>nCb@?SnEa$*kb_qJy_F=Kh7oT78VJucbYM{15(5oUm{ zY1k2WlndJOo2C+8;U2^to~Zx-8S(MXm1Av2Sx6cMi9rZwcgcdZo znUjm_%(MN&xPz~uk`UicK?}BC$1>3O5V|fdb~C2Yan2dR<=Z$U z{QU@SZW15^MP2J=N#R5Il8q<=H1<^83LQY7_VD!%b0vB4tyf9p77T?a98yBz3GZK~ zfPPLWAI8BbR7>#A+|R_=L2<7FM70>V13Up`CeaIw6Z1u^VQOz0exPNb3_wBlvnV&Q zQwe8wjU6DX{R+1Iq{u&JAU-J^{8#W_z&{-iir!;b8uIs8PZ(=gs*&8>TxvuC* z3Zs-k#z&Fr`Jrm+W!Or-2(<^e13!*i(hBk`XjxfyU?!l8HYh`@8w9YNw6}1gJ7J24 zk5DR}6CMJ($pusSe^Dtl4P!@9(hKL0k`p*%Y~M$E zz`h*j>8Vj?xes5EQPl=VKbWHH6C4DOXO1A{mgbzLl?huQFRKP37k@Wpn00)l>Ze`5|c5ppznkk5e#&n$^^@4S@K_c}(Yh97|^e mTuBG(0JI0bvRDtq@Iy@hW->F?qB~>e)@DrO$8!%JG5-Os(~7|W literal 0 HcmV?d00001 diff --git a/mmpretrain/models/selfsup/__pycache__/densecl.cpython-310.pyc b/mmpretrain/models/selfsup/__pycache__/densecl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ae6abe8657f536ac6412c8ce878de12c311c5c2 GIT binary patch literal 5788 zcma)AOK%*<5uVr1&OW((h$8i{CyEn`fkav~WLt`2TNWL|i9{Qb9VaUrqv3WhIpjQ6 zJ+pdY76?KcISIMskmK^Ge-R{re?U&boC4%vD7V0XlQ?#iuX<*=OUeqoi>c{nbyfA( zRn-$U8dU|q-+%pS_p4_Vqvy~3*g zY%tfJQx)DA9@$ggQdo`EKUP?s&+Y2FYI~k(*OjHlBS@w!8JV%>b&|}u>cvTBz0*s) zD0Ka-^nQrK`jVPeUEg;$-DHak)E3^n+lxXTCZ6kGi*9@2=H=U5xSJe(=Cya$-n@F9 z=B&Aid)*CsK7XXqGFmph;U=9eC*JCJyFPaq>t(ZfQ$L(&e5BHFHJf?Ejrn!%cdz$* zT-^5JNOVZx$=^aziT{EESK2C5+8R^adPS?~6{AwBnDl2SS(zKm!c)Ohn_;sbH`+B`A2#+BHplDRP1$R( zBW(U-1LmFjhQ6b0LH4!SA~<6XcpgJ(nt1aWZc!Vp)iv31cH(2BJ^LHQP}+0YsAsbB z8V_TB<*NH9z^W)OSF1LDZl3sjqWyKYDz1z!?*j#Zp;+54G&YfF+;ljD| zFTZ-zS(ek@h=hplfK^vSfeoD4Za>+IMEtzni5S0V-`wZ5=_OnJjpa@htbFhbFZ82u zbEQydpWS<4;74)n_-?SlT>FkIf_~5TV%rPtn>^g~LcX#UMLY2dBfE5bhic2cdp8TC zTo#-0MJXDYXv6L7Yyh(MX$GJ#T_n?Vy-k~H(jq~``aUPyjfUoW(d3YKcD}_##(B@~ zayRJ<9@|L-#e<&PN%n;xUPZrsn|BgeXS~Q3ciFdCL=Nlr!wzlr#PnNz-sg_bL;JKB zl7j2~V1o-%DC9V}1c4oXFSY}0ZGs0|1yj;S$*E&ONXj+db^CrIwRq|L`4?Xrcas7R z^McVjYaWmn7q}}lenQIw49~5;GG-S<1l&H@b^XXC!LN;4IuY-7y^e=|xkV)FX!zNF zkGTnF2W-njf|^)9u-XgD7cN{FuQv&*{UxsgY4#oNZElTMig8u|2sdQ5L>P8+|1cd` zl{g+=^89$gp5Qox(%z@zM4Yjs!n;CMLit30XA3~j{Ch{Bn|?|k2QY z0VGj&m2EXuQ!Uj~E3KqeramxLCD9UnXr#(^36Hs_rRJ`dDo!~?n`5PA&`PQdt4vF3 z!+KgujXiBo!#sVvvE9U+DW)^y0nM3`<4Q?wyTweh%)ij!Z52|=NKsyrm+-RrSMqJjyQ^u;usB|5_}U7TaBmzPj7thkv5(-hA85B~*b7)zrWk!rxKVCD}T&_l$ zlUO7|>mITOEKqSAdFCAFq4MSV6%j?rN)U`-b)X2h>&Gj!5-c!^$Oy*bd2n9A6MrAX ztZC}_Z+@v&TB`Zj)J=6xpHmkkJ)z!j70JNz$c&jmk*h;FPgGj&lJXo&>qt~$^Oq^UNKRD4H^Py9srHAW4~n4tr@%C^M}W_&^r z*Hi7N@`={fE-87LX3i_%Ks-uJy`(S$uqxeeye}hB5k)0>gbjftVAhu^;tVR8-fd}xSO>U){`szuU_ZO(Lp;(6+lGhj%CNep{1#>g z=-@vSbJ#$MpbEh5sqmxCw6v>!s0~VkDvg0Z1>~u1P%G-tBCTPJKBx~GgJxQjQrFTN zq^=cG(|aU!t&pN#NP$(Q7R{pGEb5J--YDv|qFzf{X+5p(YJ)mTO<8K9)R3hHN;O%k zeW0+CfeX&At}3B*PKiIg1C8kaOFYmW7cNAQ`X-{l5PlweFeO{d6j!!qhjV)h-EFCV z&_d5VtbY`T8TQ=M(;8Z*)9LL6)Qt!EZq8uTV%lI z9;g1nTsk*2XzfCRBdIlBV_>0n0vs%Gu=Z4}XcZD#knrRI3Fp!B%<$QCDs5rh(V})F zokwjUv3HfhVtSO(-hO96nN!k5R)O{RC6yOY*9Li z%nf%XT*Hbhf=u#@>s)a&qIBrs?TA|Pb7P;D>5q&!%kLqvkHkksqMB!YUVv-Wbko;#dTba1p(Ny|+aYJU; z2PDVJGKItK#1;4GE<_oUyqC$(7ZZBqu8e~GqsRD37ruVq3Oi@Q&e>PP`xTTrJ1Ff4 zavnvlm0uFFs!Y7(HDT$wtm>gTPTa7=5sDGY$I&-4ZhQO=Vlsv7)8HePiBN>h<3u@( zoK4{}@dH|9I=|&OJy*B^M@kTjB^pZUS!U)}R#BG~jwSm@SnwSM{-5t>1Rdl8NLMH) zMh;;HZW8qUtc)}z=qHFLfqOSI^A9_j$?x_MLNc8=m2rusNR@kP%9kjwh>=}TZk(qK zucKAG0kU)~k5?3q#TrOv?f{0Haog|$=a!^9ZT*(Bk*T-Z<}K*xu}r(Qnl(Ij*V+I0 z)7E!r$WGhbDTeH&lwakY7_6a>@b0#!r0ww$fZDu}wVNRPE@w{8 zr`?zwaI0ODED>q&>3}L=oL9t;NrShf2Ba;I55*_ZMqeLKybaQF@kd4acDlAwX*ha z)6)KBRryyybtkh3% zDV0YzKeEOKK5hz1(I!&<;!KliChRuO^{h+zh*#;B64R+%0EFs*5dL_#@HX{%^?%TA BE#?3K literal 0 HcmV?d00001 diff --git a/mmpretrain/models/selfsup/__pycache__/eva.cpython-310.pyc b/mmpretrain/models/selfsup/__pycache__/eva.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ab9f8defdbf1a9634df4f3005fcdb51a8536d9f GIT binary patch literal 1634 zcmZux&2HQ_5Edn^Rx8WSB0+TcmB9(=vRFMCPP?)gPf_LDxyJsuQcIYrvXXF}xFVD2`4|1g+jED*D|G2k*Fa;^vW13A_ z*%aap!L!6%|C%W=6~%1YHbS1{s+JkLSsfuL_h71TVG&M3xRbb?0AyGA&wDeMd)$BJ zCqbw4p6H8k)8T;_h^RZU>C6HP+6S-$02>)^|MBkmZ?KG`h{E_DVU?oNOy_k)>lu9p z#vQtU+7z|St2xz2f*#~$t`%DEGj%LDeVnV771qN;(MX}*c{mVER(OwTI?Y%i=$#|2 z8?`f;FnOAvjB7ccutPPueS2$b{KZ$>UvA%hmX56}6XaFXYL&V0%WntDKFn`msts5c z&U0r0+2G_g7Qn@9`z7FlZ+OVEBO;i}QZ9O|4ZTWDnh2+_(a^WgZ~ z5rcrN`~jvqgk|Aez^`1ApWUUu?14tC<}HI|e;I=AE`mjP=`IHgpA$}gbvaM~Rx8iE zO=sytI=l;i0VfvyOK%zJ-Xh}OYqDHhtU-Ewwhg3v1Vx+&bTt|D7Iwvx_kHjb zuJ0BF6&?2!mEuL4OTj;gi>`S4j)WiMU%~v)=K3Sp*pE+?mY>sA;7=hkfDnd7o=%XAdk+f`<-|WLd;YI@D2Ne}$X-bqEVg@h2bi5% z_RNwXJSZmtWu=m$Y$vHCRY|IVR9^Iw@=L1n7x*>rQHe@%~QKt zUF+&~y=&BsZmynF)9+^8lyi&JZFL$TvQ{76vBI|PAba$pc zBcDxgwmVm!!*iZl-h6kVzJTWfD|&~zi}giS;WN93_mtNaR$}Gb3M=!e9eqcw9~st9 zu?p%dd{NdPW$HDhI(-+^E6Y_QF)q5zC^4?MVU$>J_aZm&8(xxI>GwQdRg%L z+%OPLB76LLEPDx`L?^D)Ri@N6rq*?)F`XGK$4p`zF^vAWL2P1EcQn*hP&3VDTC;4H z=hz(XdE5)Q58++vbIq{gKjA*<606=;>Qe=aJzl0m6SSDYz?Fm!;?$e+59x zC%Y90o9gYJ;3p%|aDB+4RxH}MUKZ`}tbC$3x7&vFwz$3I`jH*mkp#kMOn8 z<~Dt=*%x`M(f6XzjspA4ndfT5A$kOu0|!~@a9e&oV2IPtk4A8Pp9@E}F_V zTh@`h(E#EO)_Ie*U}(E7dqwp5h~n!TPS|Xq%Tmh=8WDyd`S^Y8Rbhi$%)Q|kx#-Rue`+ zvjeHPO`Gvv)REH-86r2q7{R1doo1_TFEOZKaQ4-}Z@F!-v>8bnQ>E)}5i)&<`xX~J zpS;yo>bDf#>hTwkUs*n0U0oeDZuCH927xzH5udOdjMI#5iJbEDz)$5?-bxN|avDt))*_kBBYjQQP0xG_)w z8Dj`2sY(D6qabQ{V>ln^nG}}P`-@@u!dJ(b!<*P!Fr;#mIV2)`3Gdlv5)+?k8-%cV zC&SF>aW)|ak2i!=%oF?Ic+cJB-tlzK#8`vjy!#`V#=A3A7%1Afaak9#hnWj2%h{u% zB-)kb_MN}~i`}38@CWCsT4E55iOzfBrL4{0y_DqS9Emm1{z*|XMJn-RYC;_+l}RO= z6o#5Fu?9+so>GY>h2g$UDwF#-sZ4IR#2RdyLLdlIm(;&YrUbv(tf20Wrq)TH1zO&H?Pam3S@>1St7i6Dqhb-Qq)qz^Um7%=XI zr-&|a=#(4v{95mpIEjwW;118DunuYJ7yTLksLhz4TJxq_)To_#-?(q;Mb*+CR*m~x zb3@OrHsrnuQu@Mgl3P8vbUc{2={Nix6omF&;B8&o1CsHpGZm;j8X0)bkvDxuqxxNQ zPoV(hniAzB3*QRNm{)eFXK}s6a`2Ynao^Fm)0*<`RIKh+_LOYY>GhfQ+4b_a7R{~C zuP^K#inZOvSVK<>J#%f$PAzHtYH{nuH=xShhDW~UgBI#U<|hvAkpyVdBSQ-aq^N9v zYvIpd|MAxEy@c3945_dc#ms4bv}BYOzq&~o*#IDy|J*XZ@UTgg-k;>0u188Iu~PHmvXr+Y2In*3Z_->e@ey^aHsay|(5Rh5^xW{m^R?{b zvecT@Vp8Bxd7S*o#9X5g8|utOhztkcZ*q~x=+OP}am5>W6kn&}5>@Ijt%=@ho~ars z8h)8Rt@|zqF-jM_L7X!bF`Jn*b6t%ywSB<+C>{&LefA ze5f%c&V8u0w4=&)0WI=0Mw_f{I*PVkgpDl@+ZIw9sK+@4?Jay$V~gc5UlH~j?=1qC znXw-^3&wA_P0C*AFeOqEe4>e8#iEj&>odMtEu`DbpukpK#0R-a6GOl%iVvt^o@@f_ zWnv)g2@|soEOAjSNwFlwwPDCBElHv-C+d~D<#FHH;JlaQ{J;%)z2vgc2|N8(%j1cB z0aRe}I6N9aATibQ|pJ2-ErBkgPf3@f;n|YHMAmG3By2H(H8VY?U*_PC9t(H z5;cvNc1_ex;i2b*q|2x%;3nyZ9calI+o`XFIp56e1|)tB@?Y29H%RW=xos00Plue8 zbz@I^U*FDeTicM>ZZ1Y!C=Bv`Aah6i7XCU998~QeMk8i z_JyoV8%V&|1;`barv!`~RiJ!kwu@0IE=FZ2o^QkjvOd!C6k{l-ZT0#U%t`qmE1j=p z08unTMf@&kn+Ag6XhpP@FiPpL9Cvpn zT`f_X(wVLVl$t=|fU3O_rO|9$fHl=(OCq%J<34om>l|qf8M^WYOgZu$6vL9xI7{;_ z2waC@0tGTHWbI_wnE7z9iAYaC#&GRg)Ef>ALxkOdT?qu@B!q=PtG&8Pqpq&nZa5ro zb(L}gt8n_L#c<@CuIJh90cF&Ug7yqab{KTCJPsd?M0+LZ!m}h#vdt0YW56ug(i&eH zl6@{EDzdQagnhH)Ham8gH~i6P4^o4S0pH|J1hfck!wCj;$@c=pC$d|=I9wwVAN!UM zR#5r^tN^Q5S1&$rDz9B{V!!?Vbn+ejs ziQFoZ62sw$qmb_KeKHqEX^KsIN&3XK74Aa;A+8+D#D#-N-B8As$)0eK;Pe~T1$%iV*VUqte9npHgjB*M65a7-> z+JIgnb8u0AG8Ea`4jAUYS^Qw2~xnqs>y(#uZj=ZJ4mV=^y^374Cr-4pb31P(M8y#NU$PNxJ8 z$9Vyr4MM!*YNB3F)OW=O)vi;mfo&Bg`kJ>PZTWtG zc`93sBg5T4K#!2ztm30(wW2Q|-ZEswH3z2|F_)^{FTajR*=#^G?;K8;JB%A zWk-V{-x;kTnv3&0bgESl0eH83DaB|CTuMr0G2f>ztyJeFZbc8ZDP|>7370}pF~p(> z>6KzUaUDgHZvltM87GClA71ZszQvO~rOBK|nC4X?YDHL z!9{gJH4*$-z-mQXqySK}wDG`ie7Ffv5SG-8(#J$?>H=Iq8b5TGm?%x@})Q(5wtvU83pAD zgv`o_x2cZSoa#0T%M&xTvV!8^(SjtwNEW2&peFacEG3jKOorwZm5Uh^bjuv0r9h|H z2jzPxG#~sKKhbpU9@NigT2A{<&D4IT<+cBS=>FX-Xg`H3|8H7J`&X^3{fjoG{WBEJ zKWWn%@LxA=Zx?Wf6mU)+vT{W8EqT<6<4u!k^v-fP(xh~b$qO7uuGE5#Gs*Ert5TZWNeq(a z)_17PEbbe0P)w&k>4^s%)m8dR?o7myF7)6zUd7?$3LQ?u3gXuf0a?V&_SIVm)!j5# zzRcn5ijB}d*NLKDc=pt(hS+p()B@2yg%gicXU?2HT|0f|g=fEde$~lx=B7)Bm*E4s zgdylFSUZ6p8VdbFiIGdGsn*%7v&W~ zH)Rbf8M15RUz0Lrikwg$o`(|U6C09pOeB{#afg3{f@nj!nzrO4*oL5UPlfItgklK9 z5!fKWMmv4iKq4EV4)nO?zZvCrAvvbsMp$M8KZELpG%hUR#al!LJnxK=yf@@Jnr(YK3bs=DI!TCZ`g9z>lLb z<)>jJHSmDwcU%hg68)RRdl!fQ)bIfutoo;BP= z2494pdJh`u0eAOhc~pqA!G@5hr(O_-b~EVqfI@%#C{wE7CfGN;2zEMdnI)rBk2d7< zK@Op?x~_a*yAE`x*!?cPArq$6o=Hk$tk@6i;yDkTp}Eh0a{%i(B~2#Ph^|rMYt&MD zlG4Y7{!R>PhGfb2Fk^ZmVEDYbkr*A0bAsQeTB&|>$cUjj?+5Y(A(@GgcHxne#TkL1 zTn?Ia#BUPqQ)9c4^4#JVP$VWfmB`OZ%ai(sNZ6;-An%6%Z%7*?W;!}jwb-%9=@5}T z+_!v5-D|XN8-tgpW?-!5psvgC6KC|IJ_mdI@aM(cljK9HTk^jzBv+QJWpSH``jCq6 zQ1KBJv{3QeRD7Qbl9VV>L2?tnMTKOD)GVVy+Lt@{dX{J*m!0g(j5(vjAGj~*OZAxe zCH!Drw|W3%I-ne1nm=jNB%7@?((-+ohOQs$LN6mm;x!pa*V3%A%zDdgc>S@_kw@Aj ztPM0`SnJ@w1R|Nk6r{6KXMcXIhv04CN8A!cBJR-G7u4e1lyV-D14*tQxn3w!>|&P2 znx-Dp=>W)(r)FQ;F}dI|QAtJ-j}q%#+Uz{-2`My7J%S??@-h}=NlzvOr+i!aw)ym; F`d{d1xeCjvsCLp`lkEX>6|?D}L!=SxV%wW6fF!{owSlnw+Lc zHeKD+jAl7m1MTW(} zwB9U?U=b__UDI7%^;gwjfBmno{po2dgWvb>{;=`UYnjYH)5GX5hlkg31%HD=WQ5kv zbhN70(W`nVTg`Tis?o_+a~-p4YV=-j=c{@A&$bJlVzt<@sutH9?Wskp>&Wif;L8MnywhlGAPQ<=SknP)0yWSsNrVPstLf-ow)-3vXx>$Ic7W%uL0+pW7% zZnfWQA-+KGp8<#HQ&N}CW&Sj^A zzISvQNsErG_r12LJtt~`*RA_Ot>*_GjaPF!>#h)9w|PfXP3O-zfqU6)H!k;kuDs?2 zzO2)HM}Kzw5FA1$H&fL_rm73Aniaap3Pa?;N7FS#o_KguC$5Q-Yuw1tC8kBW0lv67 z7i}8);f$Dd^I{IyJn9N=p6d>{MX`YEAm|)&E%AVBiU)BW#`8nCeh2R!cBjw-ln;Y4 z^#ZL&K>g9rG7U{UDjxfEx;iZu`N%sBoPAs2TT3&wXxtCsDt1U61)eVf;@kJlK(m4foxZ&D8C&WZMPH+_qu)QuX%ION+ zVmPPmZM)Qt$s}TDr($PsdRPUd(tQ?kiGTLqHVIc`GdRU$h(`{wXE6w+J+KANQRS4q z50j~t7rOPZFVzgFEB(&74X57@PuWM;kES%eP7?4aERqwPC1%sbLwch0*-qDXV<7{@ zb=Tf@rSFO%i4lJBpeKf}m^!$yF)>6C`t=RW#;e=H?O}&$21^OoC#iIy#A@|M(_V~s z@~q!&cun3{J}1QPxZy#^)`*_H zxbFLHu;a>x3;C49PDcpap^^`Z(^f;#n`8#Vy{BQBn%!_6k}sTr-uMz%W#1hRO?yw1 zVyVTrBqIEbJ-|EOc||E$vfpXD(8G3p!}sxzBtoeM8kK&}+G}e^>g}Kw`d8iVk+n5N zuMQC)?xM~P-7b`Oe84sw8u-jH`)p-6qm&S>0JBfYjI1aj!$Ju@#pj)eP)dg9!-YD_ z!=aAZ7mnFi_8ZW-)~rnq+H6BC+qQGfl@NL{%GP)i>A6!wP7b%tONyyuJuX6FciY$PO=Y3oUj-7p`?K)6_u#JhE6CX#f zJH9G-Gy)1?OuO7?XyQGoD$h@kKOI9=gVlt72odb|;T6erbr27w7^(L!SBx)ybGS^h ze@d&d1NT1pdGFIAKRm_^!9PEC`k)OiCe~8vxH@aVv2`*N#}>KbcVqn$Y3p}>1h4GQ zEEV_gljP4u`lXeh-2B(S-Tm1QfAH#UJu*n&L|L~NtVUTJhbSNK_D|3lzkhjcB{Iln zMuo%{MwZ%sl9XtQbXaV^qGD=VqVl*Mhzf~DkF24>jS7hdjAlnBP<{&>P42nVDt4k%32NuB=wd@)3}hk5cg%6*d)* zQ*nffC#ZOmilCI6p7O8j!#d+Q+S;lQ8ALRn_x}!o3T!!@2z!!My ze7d4(iNL_anXNUYBV+|VqFKc@RQl~c8OLY}!dv57H>k`+MgUj-e>LgJNzynx)7x59 ztYI;*Ftyt4%+HsPOWzNVcRKK3*?}Fq#gS|W$7ysd>3GL+mwMOb5@?;m6%ZrL8C|>o zpZR%V&eR^#wr7+Zy_B$|PJ*5MXtzyVyz76A8`||Sj1k8lOFR7=;jl4e05%)9eONq&hZUx#|-sWmbHNf zxzHg6dDP?vd7+8yO?{`pPlfoYI4B^NHNLA0V_<$)r}%QmLJM=Ckq+6-i`*@3ms)Fs z+^wuIL2nB0i!?%0$2@2ZeWxVygVH#qmm!^?Q$o)o_bahRDmJo^p@QQhY=6`3!ViMm zPCgfD!8BleXK%nr{?O^M@Q7-*>_y(TrB(I>$;faB!+sMa>d%(9=59&Nl)sK{HPK%Cu4tZxI!?XWB}nU5d1GQK9X2Ygb*j z7v;LX7r^zPWdl2 zoYm%aBWq~py|TW5|A%n5U@T~}x}|R)jJ42sZ_bL1X`&)XrMBoF@i0J6te**TT~k@5 z0c^)*(oe{eL6^Wf?PjTtpU6{TYb7+&I9x)rK<}XK0~xYcyE$%S-qNApv>kniyp{Y& zFO9X;Xs_HL2Md=)Mh+SZ`mfls;0I|98o`8a(!we9FWu12Wj-u?pzq{&3haM&%qyi}YBL4ruhdY*K8v13d@_4%en{BWEN? zt-ywCI#M9}umQW`OGLmI;f=y>{DfCVG>POnh|6&<1`d z4qn}%W>b5LK#fj)(^Uc0h9?B#=dqS(LM|C>L;D z88-^Nk({g<3z$}Y#M6k0$VP4^DI5}q309=jOjKPgA;kJrxWLh_j}SF&{Pmcc3*(Y> zEG18K;dTAm)@C17KOwjKuz75Jo6Y#*CHonkiVBM>;QxeG|BbOxDxmNgqj<_P}J!|Yg^MMkC^8<}_! zFuC7Ahn0#ZAIAUN+G^z>MTK3b<02}wY7{ji8ISu?t@d%>X~%E!wVLqjs5U5FAoJ)< zae`Eqn!q!8n(7QneoAQaj3jTGBQy5M6=O{0GD@;%ii5O}XgZGn>TbJTtBu%KcJ0}# zrx=`k^57E`g^Z!=$A&>3`SUS+e|5DHC|E-()&-J|iUwT3R zPra!B4E*^IeM`Meq249pf+zD`cl$t zXy?X$6fV4uD>#p09KLdmRu0AyA>yY-72zU1i=3Nx3kd&Ax9D1`1;a#ux6o=z6yda6 zQ5HM6JwOp_k&6j*qr7Wqy0y2I`aj(deE@uvvl^w-4CQ$ zUS3{0aq{^WUra%o$m9fLP)GQX{^*{znd2kMGQi%$2N6I{ZxR}*lZ+9I5KtD@o%+>v z*qDm0B!7VVkzJ+@j!Lnu;QZ}Bp(daq%G&lUt$p-mRH7iG7BCbUut3*I*peRsy(E!; zfGeOQ1&0GuyQRUsLGZR?;u!(l01nR%xj8!Ar94~_a%Bpkf$&VbsqGX8#DCp6iSU>m zjS2k^D?vF##E>bzM%s6;(NfIY=2t)#sf!T6Nn?>wzXxjynA?mVHDM^WUGN2-)qE7{>ZF!vv%JwMT zHAQzMgmROLPpCLKUXbLAmRo!=U^y z&R3Z${9G-=ga& zR}w7d{{UTQkU56cY4hj^Wx~_xNdl%v`bZb{#Jl!O`%H=e$HFN^g&zgK21&kupqX(So7=#;d!fy3QW6xK{eQ#>P-gy8;6BqyFLHv`4 zsG$1Q^BjPzOh8D{^w=4eHno48iX&7!fg;Kh+K|QeMu+y_5fzmY2&Q~P@=lW<-P1+> zV(>bO`@wk(=1s#Bq)+*D@)7~%l@cfqN%jQ3%aThmf!=)&g!h}f{1bZdr&Qdeg64R> zlIK8Eg-V2|$P-j>XnBV22x>tz`aIqLngcO0<_Jer?8UI}vIL@Y@IwOPuUQ_3RO5H% zPYY#JgEKyt80%jFH|v6AoS_8!7~jgmy(T0y!OqN9egZo)TZP>surn*g&Imc1ZWVx? z0Xb{U5^lCT59|y$$tZRvOt|a$?c*15>f+DCl8+UJpM~wg$(H2r5?dEgoR=@rEtrA$ z_<#!ja|B*oM|?U86)_5?0^bw`Q`rH0SelWqQ*oLK+C6ew{gymO&+KGUa46rT;sO;H zsh~(@GQxiub<4CfbgIe|FxC-`)5q-cERD`>ED$0GmLm1DxPl*`;5;vaAAw*<*9prC zk$)XQvT|XzWiXP3Xb$^CSeA)8b702kRi2+xWD6Kqf@S4mEUSnX`53E8sOUSE$PKKt zZBfmEdH}P^whDNsA@Cxs%NhV78(1nU?0g0md<~e@I(<#ZU(~4&n)sPNx}#j7io+=8 zN;Nf^f}yrnoj9}h8-q8IxdB8mHe;lD*gJ|pI0Ar<0F|(}xG}j?`y)@|Im9G=NGnbU zU@S+jK#n*;wxYf;co)yeXfYIQ(AOM8@J6muTf%PSx2X6dD&9j;nPP1gS;K8n`mRbX z-lyV2Dn6p3M#UNx4i$AOhG7_KG4(kI0^OP?UZEpHxvlw`ST7fLPGtaOSVHcQb#6ToDck0Xp=#2|g>B%FsK2EGFeE zVu0oML7P62W(XNa44<8FpDFnn=1`(c2XY@cXgm*#fgBuSK+St*M1dZlXuKU*fR`}0 z7?8F=#fssh1kAy#)ST3eCua4R=%Eh^e5{Mo1xJy0B~z?Yt`s{^FRd7ypJUpeb=W_HpGg zs03`1kfl1~i_v5(*zu+H70K9nBxCjI@)u=WFCmlt3;pOs`jtknJP?`q>4 z)#7q=$4g>X(7K|z;paq4jF1JA(zTLKc64CmrF5Ibuj|sHHwQ=FptjE+d zQSJDX7-Jn@)yf&Fo>3GKh^kaN=ZZNKBx%|s++@D8{wlnz+P(_uQQFJa3i_;Gf@sj6 SnVr`%-!ae3YvyBzwEqFJria=9 literal 0 HcmV?d00001 diff --git a/mmpretrain/models/selfsup/__pycache__/maskfeat.cpython-310.pyc b/mmpretrain/models/selfsup/__pycache__/maskfeat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb70d49cf790e855a818b663a39ea26cc730b7b8 GIT binary patch literal 12105 zcmbVS+mjsES?}BQ%=BD#_M+9UWXl~T#5+XVwPfPhisLwz;tP@2CQ_2BNz82ToHH}M zJJUTn-J{iRW-BSAR0tp-fdY!6!fdz{hAJvAyud>W3IciHALyqL9w=9qdXX>6^hd_&C00gynXk(9N11+2tIplSJhjbgCb6#gjX1Hc`ca(Z zUhBnv7&fRxQUW^qrb|>84=z4yz@xL|v z+*W;EHvTflgfXWWBklOXt{XPpEn18{f7kDE=6kba;KBMhXFU|H4f@)68AmZ}39nlZ zoAprmJcv1~BY{yglD)}xJ|bGtB^jf24YvIta@YJIu3mCC``v9WsDGrcyH|MA>vv*| zEOehyW2Gv+FzmQ%%}(ge)@krgXSUMQlS(bGA4R?w)c5>=g?pnOulWa@`Wt1tyIchP zSTh%%QtjNMgMKeZK?OmQk%Lbx8c;GfY9j3+u?5Fb3N{-S%8s5aA~s$VZ}fEpv36S< z>PO%;1AU;?je&vCjIEsv{xh*X%(lSSzNRBIWJq!INI$d&)-cCR{48c2S;IX3>B~eM z1S0bbEQ35lexto%k=X-$qc4hQ2N@YR(Nk$)iccc7JTM0)%be1Vj6++N$_;EP#k6bk zt>!nYrYKeD{lJgwbumvZEKmzp*NfYU(eslG=%Ue13NO zM)0PdfD$BUwR{Y~xrPi=@W1POP0*myzIHyu@kO|4>*AkJu)SPke~Jc_o?;*DGc zj%Dk%p3@)EUA>|g@v{vJDYkCiuNc-_2PdZa*ZOFsk}Qq9!-K?(V%DIQA3b#1=)H_L z+C;$g7-tEu8S6U+*saAfcdSF~J5Hc++n|%fY-WGKV0P13)zlY#)7O%Am>J+VG>^2? zI1|C+(JbD)E6f7ZV(0Lg>L?0x{ui=d@A&5Z{ z4Ag*=Yv3dkO&HYQ(<-h%ihq=`7}A-!h%td4Fn)~2+LXt#g;wx<`y2B7RWriHS7LiO z3@r#p$vbeSxQ7Mfw|Z4;GQLC)5`9m+4;55!j;akk=Xl52;pI-Sdz{h_@YpP+4g4D=yR5i`Ie^nt#k zGjmmA)+ulm@GpG`{=sl|n0{nYKK2QFxdXnoV;q@s*T!EmKWz(b8rVnFkf+N1IFH(1 zPf1e}u@K9Vl*?g1?)Bq!#e|5zh!Y6zl_^F%Nvt4%j+AY7yf_9;WMo6NLO*DR9afLP z^OGz%O}8KO#8#z0eL#uc;mi-e}wlJOdNw_ z^jkA4T0#@FHIgR(5Mx#=dPJ)wSHU1DAmu@9?pOn?Fj47U^H9fkh9t!fD3tU}(kt)U zpodX3g;$UVxSJ?mJCg}RI4tH{Uyt|@+9 z#g;Xe+clyu{iMc9!zGn=WZc$&MpyAu+F=1TP7Id^g+ZZ3=c5=vUhSMXEDo$e@fnhH z&hR8FV+<=t+Hmzq&S4o`>&S$hbB3o_9_3HddqiWUl)2fIoky@HI##G~S3h*Hl8+7? zD4}Imxl6L~4CLTrsBz|qPUifi4jtx8&nB7n8cbMBMp+?UU}pQ@(XC{<6n?nITM#OVevxi3$~2R)YZ=@BP! z^d!xI?@38M8zfo5n;qVWt43n>JO)My>9si#`ij7;#M<_~D6ykh_>3sZW6Td)iQN)m zzZXG1n#}K_kqnFXd!RJNZ~6^>;I!S(9=fePgpxhqBLRS7dQZGUF&EO8NK|hN7=2oz zzbIaye;acBCYaqs47-$^iDIv@n^*ztnIr=PCb%dbLxz&x&&nbPT!fK$f>J+5!C4BV z{I|&r@?t5`#k;9+F=`Oo5OqwaE@cI27@aU^iRY*q=_BH4Dw(G)y$+4l1XB_pMq;(3 z_{j5!*X-7mZ9?%d7Er39FBnDRU};8Tq@5&Lk`kEj3`-+bjo6xGX=W^87LKb2J#=| zQLlMei06itfkO-@o|ml8S<=9StR)RO3xnd3zNDQ*FNY=M6I+|kU!2M>zjOT+sz3M6 z^<{o#P+eSB9>3er;(Bj|}wQ2v-cK;EQHiVKvTjevD<&>r{-I6ylw8@;}=7R;`9OOk(* zks6;ME+EdxT4a#IBoL$@p-LD*W%i*4Hf~-7mHPn)_qieB>b#=kEflNe>BLJopjIL) zb8Oba)}YTlpTRoGqSrbwgIXS1W`~1ld*XTOMc=RG$JHkA{8p*|ke=g^5H`!j|CJ7K{*1JW+zB~o61hHBw;@T*8-KOR;j z!CPPh#&?Y$SPKV_&5-?r{lNf~%GMnE6=VrlD`d0^fR|Y4=FASA2jzNoSQDG|2{`TwqbBBvLq$#MI8;qOVeFcF8rZPc2ZDDv-V| zh!qi|j^2wPr&)$EXXsd*w-DYp4C8IXG~P1I@>?rQ%VoQ;qMzB2bs8u>^U$H8C;CSO z)7Ta`lvK*dR3c=HRI$4TVOoS|-PY)3r6yoJp5Z7##uJ<4S)f{3HjlI%&&sp~o(Gzh zXG>`11a}C*Vkhyg;CmJCDPBNnw7H5lsRr6Pjg}vg7}_K3QDA5#_PG2y!yda+td*I| z=XNSk�Wm-o?%W1zTY6M(QGa58A^&wL0kG1 z04@}E=uUx$QXL-^b7A+MmxeERNvfxO#C?7LI@#;sH2@X6K!TvxTynRl5-_k-uXkS; zfSDSEV#+27Z#+K&966Jk3wp;%qLSw^0ssVHSYues6Sw@E5{vS-qm7FfE?iuH;+YFi zKKV|l6X9j<8Wb0I;8MrXy&f_@{f?DOho@?PG#aj3j_b zLmoZTnA-UZ=UlRZQ|J%Sp{g=b6`2ToQV*HnZLf=P+uQa#e!NdAsu#FC-B^f^ZSIn) z#o43@>Ug@-Cnj04bIWosj8>vp}~>=dtyKA&_=dryOsrxyLv5s}~6 z1LTq2FqYfy>m3g5(rvUu0AKP%C=i=^rRTi6wRN`9iRy8<%Y(C9TdF;YWnsKXb740F zSpx)&JMx$mV)=l`bI@Pj^Xr?9ifv(0D&jTHL zHx)`T-DwM$Hd? zS8%mdksB8w_lEL(056qicxF@_*t}E|Z=x=a1~f!)v)Qqn-S!)Oyk{q4P+aao`izKs zsN*nddA2R$EZrWs3=!m{JxNq`v^UEGXJ}uxEsb%=qmid8B}YG)omD{28Sz7PEm7(5 zOrYAPemBYFv0!8hDkcLzp5&kD&J1nxa=_rq9+^NPt-FMkkpY%x8KBkl z52FsonpTHj7pjcCdA(W|Uqn~pXDRp+1wTi@&r|RV6x^lY7b*B91WB3r948ue$9{?_n$00+_gB`xh{>WxHqP%jz`yjSAzcvC<%2Q>Rd!gUe&LoVA+0Pix7E)Wf zRDaydp5N(^ox^YO2K+}*`Ef5~YItMqg0v~%Nv#>X_=B$hK<^Llaoo;q&u}4jPyuu0 zVy(KK4sQ)+$~jkAef$=96tug^0^*{{xHYweQVS)el14t|Q}|Jx6tl_=H8$(hIHn_w z6xh+F8d~xn!j17_3P>v5dCVcFTpG&jtMsM<$VK9c)deMnC5$Bf00kvaB|jCCzuNHr zg4HMaevrb63Y_{qYQ#kVU=ZGc-=~<;mb@nU2Edk+Jclr*i#mWHrS1bL5iWSCM&ND; zn8t;wGw}w9uTvxX3yFR;(d$WpR!hlw$cf3M5*>FOWOL#eQ>S_0o`@E`2miVGqVAYw zy=amjaz(EI(I<#+!N}<=20W6Vw~T|uSJ4Np9?9hNsDP1?2Y$1NWhznv(3d0*x%%woeHTzY0OG*$|RM4K>fX~tAgmKEb zNp<7%$Qa^{ng}FRw>D8XFcJA9w*}z4?@Cu|3g0=F@dG1c{I_8n-#4&mj@XkCrd_9j>LaDP*AIE(h zFhJ-V0Mg)`pl@7i%kmub6r1O{qe`r5xK9Bn5Tyz%Pmp_(8|}ftS4Odv;9eO)V#;O! zv-;6sF@mfBiAmD_f52iye2Fcs%IE>9Pg{JM)`HY^Fl-1Lx+8g)+z7@dlpZ?F(0+5< z@V#w=UykJCUm<;Jn`wuWV#<=_)q?la2?nFr%J7ztS7f?GQe^Pr)gyTiF;W%;owm_K z*e;#WkKvV94%K}TX54|8yh82|bvp|C1qTuCjlf5Y+ajhM=DXtu>Mt%r45dpurBV&$k#=u&E&l&C+c&b?z zp5)@7h}Ft@PhrMVP$?(fL}20mpj0O(cq#44gFIO6(JW_8As__`jN~F!A3f7+=&sR4 z(N9lqcfNt@AAulAp}Ppiyfk8tBNwjA`hsK_VK>J`;gsBXC=*=?Nnf5<~2SQG`2^-5$;nt_n_s%8dCyXNHc!HQIL4v|2crZ)BvtqkY-9fupGo z-Zzm4TiIBDA}LI*!tqn-JlnydP`A%r8J)=m?ciPGGfBgm+}fFxo^piK#gk2t0o=n; zR4pnl_;DnP4^r?K6#O*>Qv6r}+-K7FoK#{@$QwALtEh?9<{lG&ONGCNVB|-Z^z6;?c2*%e<#)#v})pijW~N z5LTeJI=XApotA^r%o=QBVhbwPk z6iBtj(dYU|mBLkNXA&2$Q>Ahj-ia0rg>|X8i7X|~PpkDdU{(3FaX1 ZbGY8MfR{E>-Wh~4pnc9R%doVp{|~crr0)O# literal 0 HcmV?d00001 diff --git a/mmpretrain/models/selfsup/__pycache__/mff.cpython-310.pyc b/mmpretrain/models/selfsup/__pycache__/mff.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8629e50b1672676294c8ff01e9974e17db5ea66 GIT binary patch literal 7306 zcmb_h&5s*LcJD4Wn`D#28INRrcx`X5ypD#>Xl&>#PiIt@+kUgv- z*`xlTtDEy7^dJk!!8!*Wf&f8~AixZATI7;TZaMECQOF@cF7Y83=VO!2+RE=$lTC6o z$>y*<=wf$u)vNbjy^r6kiI;nbCx3>98{7o9(84w}RzxrM;q` z%fSobYJ1f*#LDrx6XSaZw|V7LgI7fJN$H8%K40`#c@6zFv84N7yDqKs`}w5_2bA3RP~pItWiim31xaId1TDk{3~oT%~hxL*XF3&Pd3U${U{!Q+R=I%282F5!yMy$~L^zN4~ z8+}{Pxx!z=oELs%SVnslihUg#+1!L~yf>qkQ&{WNjCAaK*6NQB!TN4Pb@wnHyFD+`G^FT* zRSfOk4o5QH1C#Du|3C!ScDzJz_k-IXxI2gL_XQvK=-YiSNvS*LBEY+*@t!yGcKpCk z54#fqZ^}V(U4J8SJ=q(&Ye_2I%;hk`)^+y`bxbSzxb(f4F!Ivg@XRUOL*eSLixc8;+VLasi!E52 zdI?s-dRXVfaY_s6z1bi75HLxxX0lN%ILCTI3cmLCx*H0Z2W_*9%}?_wvl(2*qmJ}a z2-Oc_kCwSBbnTUSJ9^jka`P|u(4fk#!;3{r!7{}zj@YdYR zwGe2o!FjLrf~1qik3@8(+s*0a{;&JoR745fbl->ilc7fozq{_f*P3xe#55sta&TXe znL0iCrew5Hl9A)K1TyIZE7(Jg?>x7A4 zu-sV-NpHW93eN@6iE{j?7mPWaabBt-7Uoh>JEzy=v_|jbN?l;?cIw7aaOm#G@=-nk zI}9(UO%A=}(W0GTD;E_q+ad%8AxCil_vwU)k5l8h2bb-^VX+}|LlBcOgmT0vPP7d# za3#VW!71SGVJ-$qLD6iHmn%DdZ;boOj0dfoGyF55=#eXKN_mkMMV4@s9%@IP(bN7* z^cG{_g70hE!b1o|nk%xOo(X{$rHXoxTK>P5Cf!r0^5eq&$qn-!NR!4XOfVWlQL>N@ z=8*?Rn*DzV#^3tQnV3kpawK8Y3 z+N@QrFf!%L>_a)T4FgLPg(@51XDp-Ma?Z7GNpQMLxRbv?lNwKqT{AN?mX$IqD`$3A z$*Nf`bFzBY$dyq)ryoLK{IW6zn$CYgP#Qek*T`$8At;be6br4}cvgud>x@$_wC8UZmy{ zHJ7P*iJGrb^D;HBP=gx5kYA_9rRE#dT%qPQG(Fno>BaD1<35Kwp=kpnGalBD%oK_^ zW?+KEE1j@EVV?lS|JZ0*s*E^*?PgJ=>^puWJgFS*%pLBB2~37JD~Iqw88|6Ux9ZrmsKfNsGnV=K?> zXOCaYG4DnPwgsQ*Y`hc1JugUZ;9)xJbCOij4sW)astnRXs_JxxDkDk^VA_F*a#j8c z4Zn&;Ilz_UKJefo|33m(HwZvGL22T-e@sm8fz9O3J6S&y+#*vF+K^N1Z;m{FOV z1#dewtDKdOt62$NRX!;l*=cRpf%lqyTETwGj?95^R7vZg(I78KaaJu%I=H04`U$V1wA^CHC9$Xz6id}zD1tyjV;6Yo``^H z0Kq4?3gs8oPdc0AWd$I95$h=-P0KYGc0P(FO1X4Db{`@1QdYw3eNS>=^`W=t#}at) zHgXr{b-P5h+jadJHtTi?hfWn zn3}_nq0hO97Dd}b;kpwP1lHZWn$eX?A?Nelp!N_4qBo{u&`TB+M8Jk>Ll+_E+O+ch zor0GCocK@$rAMd&s_u3-5l&`0nCNdo7X>Xuo&Ym+gjs8COq9KbGGZ!+H~ zsgHgh%u5Cfq--;hbMfurb@%;6OIlyxMaxBYM`q)54apFdK~P|a{Q=c=`4-IaMQl)k za{x0qt>aU6dxn}9)^}Y}`q~}$4XxrFM7C%j7WGfd5wB;gs6`ml0m#JBru-6{(~IUq zyv{_nGsye`;irVQHZ=s4Zy~tryop1$5|VoBl6)IosxsMnR#YrcB zZ026yA?{%X8hfZ(QFF@ikuo<`C5iheLJ#D1O(+?Ms!TBQZAodc)yNa?4Lr+l(+KljRi9Io+@wyY;41;Z zMM^h)Rn|PTZJpM0^70)bLs%wHwm0$Q;Z8_d&Z`a6DK$-(oiAO%@2Y9BRkLb3X4AZc zXUAkebj%g>(709eadVPrCkkzu+pTj1O(KAcPDeQ%g1A5+cy4q$pNzd=@}<)0@VJL= zi?F?{U~yH#k(Md}A4@t2R%Lx?qbxeBkVqwlq(rUJL9TH-4;qqkxFk8$(gaTQL=XT{ zXeo80h1TGLl0Mx@gr;g(jIGb$1?1CzmTBd`U!dn1W9%zdVqZEI`#CGK|7JG(8LP1W zVpaB^tj2!I9QGfq&i*uSwQ_OGi=_62J;zhJ*=eF^$s*_gX(UHG}Ab$hrME`o#K z!JUweES~TRYq!ixIJ>RjB-O%sZH;>9^tEr($?vCD+u;@D+A0p0H#Z*-(M&=BP9O7w zt^C{w;q#YuAW2T9Y+cVMs=;w|-AVM>bt1nB33~GgtBwxZOZrWA&;T(2dz_dbTSwMW z`N;lb>M>EH03#8e*kwRi6`&?UKUH0yn4FnY(E2Q^BK z@Jr@E7iI5TfO6Ys1bCzYDyadqi<4s#=J~9Y3-?^uB%1j-MxO5$qu;zH5G<+Uot^7H zL}t5*+&VdJOmP07p>=-nOk7MaK9-blG(tF)pys0cnOWx+Jm~>^=(+52Qy1NItS1jC zlLj$P%Bdg$N{G_AGu9q7S6oz6(dOxzii$FyYAWXl|ANTOzcl^|RaFKk`KP*~LWHg4 zbwloUEhgW<6!{Sv;QYzAWEOZ(7Ln)cvgq$Is3?n?R2IEVmP~BU)kDQ_9o(^?MFU{_ z)g8pq9mt}rNFbtGL*a3`OUwT{8kBD+;N{2kXpcwOC()|sA(hh@p+7tVtQ;g1wQ)X{ z>NqR!>Dho?IkooEM8XjjPc#r5LQ&ZlBAsN)&BOVWJmI3d*^TEsSr!GOL6)G*C@8 zCgtx&=m|-|P1ew;b8`cw7{EQ`p(IY#i*sMlJJ$cPNF<4^mh+)PPV$Hjp<1Fj>9rn6 j1*-Z^o^EcC9VZvixMmd@Yz1Xu!?cmHj6bw5UNHX$pX~SA literal 0 HcmV?d00001 diff --git a/mmpretrain/models/selfsup/__pycache__/milan.cpython-310.pyc b/mmpretrain/models/selfsup/__pycache__/milan.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca0739e085ae9f6d6de293ab953a83189ede4a42 GIT binary patch literal 7367 zcmb_hNpsxB6-EOX%*qi(Nwnl_h@C~_&_l;gxe}G*Sc?}r6s=f_T{X692u}BqpsP1zS%Pw)$3lRS5g0# zU+q;@o#EB|datg^rq}SBy{0Nxyq3SvTR_?3Rj=)z>YYNl#_Qgqf4X;Cl^foYf2Mba zX`*>>_E39U<4xZBMB^<{-95K&?6cl-(XznXXlaWxs{Wax{uEzC{h~On>d$lirq((A z2>Z}FW@=t@2XShyyHT9h-W$bk7&u;9xiubnVxz;-#l9Chyg%3xgFBVJq+;NQQr@_@E)3}-5NJU zo&M<_>KC-3Nlg931hX2SX{Od|f&q#-4=iMFfn zGtTyP)Iik1OMT<5j+Is-I@h$;@4JB;_xth`C`eK(Y2lI9izRgQ)Qlhqs>c^Bt@j}= zP89X~Uuge&WmSe@yz2WSEKxdcFjpiNMXNLzv#t8B=LFr+L%D`jJwmNrR>4n#;2RUN+h1c1Dk}7k6BcI7dpMq{($Z8f)xonWD`9h4Kf@bg8i+9 zhE8Yxq^ahCeGxJ_*y-L90qIG0(rz#s$Cxk`VP46nIVdE}x+W^*B%Cs>4v;cO?G- zULun0rG=JZvb}TjvX=Miv`8ZY#~1y6+UOG`0v}Lr_4{|nj+eJo`+Xh`P;Ef5l{lvr zbslLo2>aX8;i;9$f+SNUze6io$n|C*JP-OdTjx3ILW`AcJV+0fV$?KKU#?l!3ry7? z!`v1+(iJ6m9L3z-3UJlprZ7>eh${V;@1Y5#t&u7F#O&1wrP0p>6661Z_QVmELUF?9 z)*L$5uCJ|M-hgFr-i4Zd;6{YOx1rSveCUm|WMX-^Or1u3Tj0)PX?ZojYy0p6L z$OrDdZYZ}`ovmo~<(FUjW%s35U;4$bUf=AWWFP`at{X?0UAeA|pwfsi@E+RXJt3vb zVT4Gta#6B};{ZC5FN`!&ijn6GgblWESSPY3wDkKfEX2%m<<*%UDG5&-RM&^$bHlXZ z`lC?BPB0L15eGjoNe1$ZWT%E(A>-dsNZ79Su`w|xm5CLzq{3OEpV!W6`)pE8i~}9* z1~9S~yj$9sJa$LRTCJS zqCC4K=QvEF_JjnY4x` zxdKiX1|atY#1Y2LF%v}l;t;N8oZC7jn`AZA+DH*XdkZU9 z>N-ziw!33ibFC0Itx7)paDRT)kdba(dy}9|I zpDliK^U|E(H#a?Xba~VV!T|mS^`!$t6)_;MwL|G{yYSO9U*JRncZ3+xXd0QrA06tH zohOdrWB=$FDSTiNKQ>G@fdy~De{bd(L|_Y7&fu=U@#rxwnAsQ5C#|>v7Z1?W;^LkQ zj8Wl06Ye58EvbQ!BGs|i7tvY%kVZ89u@}2R=)-oWRXEM?fa28JhTjYSvOW%?yJI2t z1Ua!IIP0ky1|sU5nptp3&a3<>5n~%ETTj`wwC0JRPh3eWLFh&zZ4#fOo$+w!H z9hg+7-r%{yg9Tc12GljwQ+c9&du$uhhW3hc0U#97sO5ge_MgRG95Fj zWZediHyC>k`97F23wWo%MqDo$8fvs77$@<~hNd(Z4JnqTh8~}DAj7C^54S{oUx0bU zM-@2&91$KX{yLslkPFcP;Q_VfjuwrW1u8Q;k_iHVZ^6*10F=X`LJ+bLU?y<)-O{vR zmO+0|ZIw<7=RCYl{BJe8UzJ=qs*aaPYNznga#`r3X@jJLb zoN<9pb;f{sYr0~Gd3FdEdadBn5xZI}dMY5t!_;qQSLaM;CS*@Aj&n;LM5;SOj*ZmP*-0Wc{W8 z^G>tmEo6?u75Qr-N_JcRh8}Mc`9?uLN?AXxC@N9~*hNY}t=C8I2&rSO_S_Opb8UN@QkSjSEO%X;H$eHFD|*9`ruWpnx8l12=0&s`Myy@5M= z3yaQSUOd_UsZUjSMo4NO$;A+3kK3$_(Z}$E zXqM8PaPVmO^eXcL;E8<9N3#zQw_%qq!|;ksju3N;yJJ@hK1YH`@v&5~^9k!Oq5X;R z9(^tO2q`*Pp80I)Wh4Pqa>kKJ-wbV!JeENSI~gD{D($0$eh?Uw`XPC~n0*}R=gu9H zmGs>sH3NKhNzEPMaQS;xL9=ugO2T!Es;xW7NNz`ItDL#a0wCZXeZn{bZmEeRR!-BR zHHheln5y~y0^$)R*W2x7gf1rFY*B9m%8>7DWYS85H~t%j$(x`dNpK}WiX?%THa%!p zrI}}#dIJq#;EpIONeH%REgCJ>ek`e_b0M|jha((gMoW<pS&xSVG-y~Xg$ Ro+w!eDy`?2m^EHz{{bSg=D+{| literal 0 HcmV?d00001 diff --git a/mmpretrain/models/selfsup/__pycache__/mixmim.cpython-310.pyc b/mmpretrain/models/selfsup/__pycache__/mixmim.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47eb34c25c167696fd364f005e2975312609ee8a GIT binary patch literal 9343 zcmb_i?QWz;Ta9?1DJz zfP?oA;!A-ht>pNV^10JbWT#&;(~q6$f6zZ-`z15!Oh@0`G;Lb9w)%Va4#0sD9Zzd; zZg20|-DjVDp55nd8P3j@4gCJSJ#8~Kjiu+5}Tp;17eTu|r~8$}JXf>Nj4n9=ooQ0dGzW_8^Ts-3yUoURvw zT4%m7k9v`pf`!gvV-fW-p9z*aXBuZrL(CnWJvQDpc!kg2HTbNUy_36RHXa+pt9%ad zIdMkAALr&xqh7lY%8iYBURhWDW~{91eiW-tUVeASD+n_dTa?lVjV&@s3?sJ}Mn2(lMQ2-Z?ss?Yn@QJ&H@!&Q6hZ4|zbE8vKMG}&NY4JAO+O+7gM`sA zxzS+UY~(oOId0*}?-<$I$*bo^yDf0Q^b3pThGkF(WLD|28l_^QqA?`FFT;ceYW{ z^{4qWcZ-c#{w)9QU87MgRrvS#b7F3{rpJF<%v1fG(X#pTeEDv^u}~`Tm9)osU@jBp zNiU7F&MyG};-`jXG?pOJi|Sk=koTmBrRR4ug7GdD4I^oFw!P-9?XW8x$jnJ$Jt;2g zF_6G&x$NNghV$ORCYauKd=g7ZBWEx6-#xpTvdZlU^a$J-Hl^o@{< z0Lb(tZ?#%}(DF}TV7cXJh>y5^&*$seN^0!#qr`~xZl}<(l``)G(;Se zeoA>J_Ir>n_MhMF^<(T;>_SMw>p|Rail#^M-b?l+iJ5*;zt;<8?C^uG*YTU_NIho7 z+3iP=q==mu0)hPddh=sV<26jJntF9Y3~7d)4tVnjDzb}OU(YP7=f%zTsSDGsj$StS zlYE`BC9*SIdO5?luG?&T-L43tEaM?_+9nC9qYkT6%)dNAvgdbsxR;U{z*3})Zwr5? z9qYrf=kvI&cOJNr^q-9X_||QA+xI}hb{Ga2{6SlQYm)Yf$GM~1NFAXgXnNE>OT?Cx z*|l!UzL-puu(R3Pan7eY;H`Ze1xTjb@^`de&>HD$v(oG04EEv!J#;cp7v|2*PIKkN z1k}L`{KG`S>614ZO{>VRVQY(sJs%(R#2+tT-&n41ZB06J8R~cnBEyo)A(vJW%I;B?Fz@C}ky zWf3RtTX3`P|C)*%in?{bikCbyD()9h+f?lO20T2AHu1jzpnuoW+OJ>VP!<_zRT`^M zRUD~wRZfmA={YsyPZXA#$tWIG9BC0%O8Vpcsp9A)s?vD>sp|BuR@LeKsjAa^O_e7S zQMHpY0-Y&;l?}(LFT2h8(faVFdJPY;Ci*^#*tlctngesd2DyPX$Pes6VNe{D2Iaxb zpfZ>p)VO)CY8r#;z;M}M4rPuT@ZZ=*y(*ss2Kf{f-=gB%RD6dDhl;1Ec!mmcedTwl zc#ewasX(M+$O|aymdeAx38{*%tEqS8d1|av z0e;tThDyyKa^vup=x)?!4*}ry?(Oa_}k_zJFcGFkhF7BCHC3t z^|C7ah+L!C>o$cfpiSA={Vu{6m5(V_QWh;x&Fy=)eIea`*QdbPRd&KzC{%bs5HQ?( z;D}vSx=v2{20D$HZgvtrPnb>TA;c3cps1KNcGg@niw`QShPUA$$e}R>Pb;eJvVQy#*^Fte|3ySOsV_1L+OKQbIftyRlphzWS!_u$}2-6Uh zfXoalfaEj;B_OlIDk;DvV>lPHT?)&s!Q4&`b6Grpj}2=B7Uz#_PIDBF*<;3#pN>0PTW=UZ6+>_juEs=m8z_11T@LCmkIxAOvd}#0B53p>aJ5&A7lvHTeVdQq^hcNK$|*Pf=4OZ&2Na6Uf7kDx?ZrJ5I_X zs45^KfTI-4m#JqlYEn`}x~hQrw3#)pkrNA8FANYgsWP;E?C^fL zz-zY{w0z0q@Oj2sz9l@$PewXDL4u=n7U}A2j%~B;qeGg5cXm<>5hdD{@~|{8a-y&U zzmqacz?X18IM7*|9TAT;7TJ}y&$;M2QmD1yvG=MYP+Z>?GZ+EQ-+U<&5HM5?Vn;i;5i-jZ{~?s7x0@4U|`24?h`q z?4s}A;nCWuyjU@t`rz zMp{_7t}453lIlji;<_K>_9AU5xGoQy09%y2lQyVQIhcF-Dr(v|NO*AuTz;fHC51LR zxYePPB$qxhJ9grkNm76b!)c8O$dXXHDT2UtGx}NwVmbz+(2Jhvmne#c#n{DBuxuk1 zv%rVZck;GALyYzS!P=KB$G%_|`yZBP|IKXnIV-UL0-OGmmDulDnf(WwVgJr5?00OI z{Tr*Y-?BOOuXc@n#%k5iD$f-wtYUp>rMQZ${3UxS`L=-@cH&D(bdi{+eq^xe7yUJg z>~&oU^Co|Mf#-0^X5m7PF4OYq+s50*cL8r)<%!~Mi7x8^DJDHy$u%3id0dI%YER7U zk|$3$q86^*^f#{gs$vdf=0pwGeY%Ifa;x!q^qSWfX$#QIi^@*6%i-T9?^JKRJGuu; zoMpJeTS@zAH(}1BZ^88W|LU8tRp;wmgC$3NLls9?dN^!Ipel-t;6EA|RdGhsL z0D3ZPiU=A*=WLo3{*8WAC&!9*ZS(?)LGFlBBQVBb8oLLRiO8EFyM^FwlHKCDd-{SK zjI%c6`xu9#M0u;aE&m)Z`76AJOSa2->dj58z4Q*515FHXA8AQ~NJ?5p`+I zF7KkAIiqwuRYlOvLo;^Cxc&~1h*BI?^KdbxB-^ekT0oJ$Esx~)&~wT~qWlkO2yz^C zmJ!Yn2-9v4f^~eElxsX@2-}Tac9cWNZsI#PEFhhQp#7dXEDnma1n*@;`V^9l?C02@ zz{QJB)cv8LGGW|9CW&xnuD(uOG-lW5oTvXb`0(oZ{{uckVERT7jIJ@@w1|)UK9c(n znM&TLYg>I2aFYH%WZkbqO@9alJsl2ygqii_4sJC)sqa&QFpB6duZKiFjhmEU)zXi6G}2A~wtiT$J7eox{i z_qbX?-UqI@WggC?+b1DpgMRfz8KaZzQ*oP$Ju396(uRi|dTpW7oE7vEZf_HNbcNdf zjEYaFpwlmBsG#ktsgMJz>3ozfCMIZzhJV7N)rB@e#nGX&K1b~V83Y@~#)h$}!PwY# z_4C?%EmtuY9!MHpKPNZwQa0`lAlPJ5O@k&}TSYRh+E^yk7n9h&+m$*DY@oIA;DsoG!In1ANxU)YnAgv#vF~9OAx^4 xNqhrs?(}jpTpH)^RPoiM+iR3ej7T*W&8N&Fa;Er8O4RJ!f@%EB{?S?Ue*q+C#I*nb literal 0 HcmV?d00001 diff --git a/mmpretrain/models/selfsup/__pycache__/moco.cpython-310.pyc b/mmpretrain/models/selfsup/__pycache__/moco.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e98b6f48e97663036fe53cd07a4bf26f9f7b449 GIT binary patch literal 4511 zcma)9TW=f372cV>aF-&f8!g+ZTWr${q}HJ<2T5DkaO1{7kTh0;C`nPe!Fshbq*mGs zJ+qW!VFeVGgFG31=|dl>$NYi*g#Lg&&TAhEzZ57?AZ?n|J!h5_Nu>p{#0=+pX3ja^ zxlG<{)(!an@a3O+e_J(-zhGzfv!U|}6#09on88S5q@+ty+NG)4HB+l=5gez9-L>_< zlh(R5J!U3u>UBNcwvu|f&|T1NJ87iNZd13Nq?Imq7olBaZnBgvcbC(Z?h5ohR!>&b zcDGFo-W**xHC{K^0&6@rScA7u%oEaGV|2&ZXg-CT85@?e?!-}{thZuWDEI9_5$9Q$ zDEs{k8rvI0)x#tS`eCuh1@tbx_R%2Ecvi$=ayLJWv;OTvF2X*CIp^-%cV2sI2lw0w zi*P4Q2MK>l@ffO>cf%su3*_Fg*GqW7*g&mTL&I!l@F~INM6LWhlzfLLy`ABJi^Evv zBEk#Lerw~7{3A5D(Iw32QbxL_M?KTCJR5)PGIO}aYEWD#9=BN?$^v&lAT{n@Hdqt> zE%AkO>`VmS;GP2gQV5L})Sc#DT317-O) z_lja5Z)|Rc;-mPmlZ*alxGOieezLXIx%SeJu3x`(Kj>%*?}VbjgF*3rlr!$f(%;R6 z$RENElsB5-{HM*m=v0emtT76 z#h0h|L}mgOr;~H;!~iEb2G==`zse=DK1+P-`j4li(+M)ydr2N56K7bBa^CC3Q4B!T z>OwG0mcK9@u(054juDjTMdfpEK4zP84b5!OI=l+c-38lPo|Nrc~KQ=JIeZ~QL$qa1I<6&Mj zaTXUr)a(1}I*Q!G*m4g|_QR{%a^Y`AW#>)$A&>ie^BO{e5OCrn?GFE+Rv@Ag^EU`iiah6ub{;;3C+CM1TurgFB7(lr6yYfXKu}LJHz# zxkypzTznI^+R(_QvXVUOZ`73~Av>6!RPlyqh>KWV!fKvXV9yHz4f-JX)cE}RrpWVR zGfgMjo@;^|O5`RU0xauz1#wDz2gr7y$ZtXA)G3+%oX@GZM4V@i)h2Cng&Z|?K%YoT zS?I?jOot|)1q}zvt5D=2R0ZIqpd~pWj4=9y9s_FjO-3#o$F$H$IU&a;qNSv3kX0T~ zdF4&xx3FqdgB>Pd$k=z8#jKAJYi3C=8Xwahy=hc!+_`1|1*Bm!a?@ZIpu|3EzOQ|; zU&Rp(27w%E{cmr0VjViljKV_Tg#iY8;eaa>9INngomQ>~ZaI^KTvi&}ump-qyofvM zlNg!1WylX60ww(Y3JRRrVs`12ZXeDVf%Tt~ zV^5FPVAR-O8ZDn1qm@$w=a1_!vN~#)URj48+-`KCboI^F$_3^=A;*of0a)q4vf1HyvAuNhJpEn4M!>s`0Lv3YxC~B2f0QrJ{$+=D%!;^@Cw7&SemI12z|(ZYo{O2AzpH)sOty(z@ilZ$ zXBBfdhm&>af`lm)k1#z;aw&DHsGmbKd{owhDAOjiedK1*)hv49R`&E6d`BL1qXXz3 zfMW%3CgsLORgYzy$s)`m4rAaUD~GLJ_ji%h53u?nR@xM6SswI7$i(+yNG(^XR4@od zm~!y0QhX1WE@0Kb6%GgTLvZcz+5*$!XHGQ&cgmH65kA_hoaimwbpv-LJOY11WAHyzp{*0YAyf?mVC@{hIU=V%LjL7iIU$kTp6i_j5Euv08!wF1@l_Vz{_ z<5UI`2?A9Qg6gdR?M4thfQQm}rWOP&kAgtFj(cB*O4&M*73iOp3AzyYo)_pubO6vQ z5VvvGsgSM3B|Kb!4!&ZwISw`vhZl4(!|^6AL9Ztkp~52XK;;@1rQe$U2;iERGWwrPD0LdDOs@?K@lyIf^2amc{frrve4|Es@d(? ze#}<&EVY|ifG`CZa6l)=9DJCCeTXkXEYA#rzWVBW{VJ)|${L=3{rXp(C0EmaLm#tG0UvLn$p1mbG^U4Ii{3`4M@GxgscnX4 zWVI~Sw!%VWx9q6cDn?GriAt@K>es_^tE}c%qH3$E`U+t!s<-N@ZHEidVrx;gi{VnV z+**##w9cq+CtQhETdS&F3eQI8TIbL%vr4!Yoo}5-yUJ?eh3I1IqOS4P;R~nQI~uFA zh0is%z!x8xkM!1ylm11vg#IOdQT1P9`j*yMeuCX;n+-j)1!w&>N6U=*VZ+L-+d(_c ztoH($X3qP)G)Q7U%nE-Oqk&On@4J55-rGz>gs-KK0*U@@;m5LrE-o5Ic6p~Cgv`6a zJQ>98M0&kM21Li>(GF)Uh zTg>JKR>a%kHZO9Am$-9Y>lo*?^BOBLt5g25*3n7ypVx4ltLh8F|Ge6&uvH~;t5s!Z zAzn+3x73Jbb_U~XKh`X*Rfo9GWu<$`+sQ|-d=zZ^|72=fE;e^YZZ5><=7Zdx;MY>& z2QehoESFt8x7^-Qihb_60SPDKaq1?Wo9_KvvHNc1?{et|()~CQ>7Kjs+GiWDQJcw| zvr9-?Ls3&R+#m3EvKyo;D1^S(FY=>uO76K=Z4-FCt_aS$ae=mea(9g##g)nfN2BzJ>!ufNl5C((MxZ}Xib z*%w@LAKO}wl6G?K@RjwQFxgp;u=n-DAeGJD(R96AVprxQNg^^$dfu+^nb%I@!wvTe z$c2rY?t?uJhJ>3YYKY>Fh6TCE6iYOp9-5hiebP-g^p7wrh~4`ikb>VKRlVPIZ}X1d z4^v6>?)stR%?Sl;@tDV+OqKYr1aXSxxAHHH-r*z!jBu|IuP!m+`Z04QPhEcKha@cC zn9?JTu5GvxhcUTvl43gN$It=HI&yoy@FSjbA?KO8wlSp;!vsA=dMu5PXQY8;X+XYH zwV??YSKS!I!=tN{%@H$deeQ~WOqvLB`jEpF;!o}4&=-xSyP2f?rhBK;;caL`>?-$w zUJ=dFqK)G~T}%r67>y(jkLGvsjCi5`sn_o8x>p!Hk_+<);gde0k58|<$;dN7*jEG!bs*j9DY^nf688 zrl>J~Y&0mwR8Zsq)j;cNLp?QyCexX*YcP{p_!XG_Gwaki)paek9%Fi|@L3w3Fri|?=!D3`mHZq=(z+I6os(1r`A8buGdp33I2 zL$-DJ$~Adh9k(Js41y)>%{e$Wr#Nt8RttewD@ zS>4OEcVwOoJF_H0hFJL{E7<$k&aV6=f{o|3LtjeI6E9)lm#?miBuUq!2rgN99%f+e%4pUQ|sy3QW<^pvy7^apK-iA2@uV( zG};vJ#t&&&2?fDJlW*XkRC@%iqH|869bFSTWQb*IF&)9n*f-?Q(gMD1lp^WeWAj8u z+v%2u<$+Fm$LN2@tEAOojoE3P71M=uFH*eN4Gq zZA+C#{rJO)*Wm6rKXE7d$yK;pDD!N`!T#acbCNG>hjQZI(qx~<&CE)VdORzCFj5uH zwi{-)tO!9jp`W8rBQD?!vl1wHYUbTteB)Vr>j|B%SVWbXSWl1w>^}b8e-3~DtN(Zt zxsWMSw%PcW_%j+&RW_5`0PGcsSPTyxx4AMKahXVwAIcVU1A2}DY&*}$V3$a{vO+uQ z9eJWbbF9cu_h2#yBFze&F!57C`It})l4nzJyVsfJ(t`d#n zi0{lq49zT}nCX#d;y-1y;ySgygX$d=`6jBeSvBgqVOqKcJG8&9TaIp-YxuHZjcewz zzGzl;^cjYJNmmwX)@jDWv%2-0Vn6%Zlp<8R-kL<4r99Jn%qT-C3-GKr{voO zi%M~tN;~i7+FGE4+SqJVbLUMekXe*nP*fOCR+^=bQv3}KC;&C=^32YojJQaB1F^3wT%{}oyxigZzxjj~?HfjWkxuizjZI9W$u zG?q~t53T<_UYj22e4yBd>cg+MN!!N{DLtJnz2)~%<-Rxfq40D0+3JFPjVZo`9!s6^ z0{mrRV5ByD-lc&tERqf!oT0cpE)9LN;pFyQPyt5JK2-F}iX$@-uTz&p)%&O#rV@wv zm^zdso>Be3#~%+-ElR&L6G^Z@GNFkgRmulZ$A8ecD4##ct6{)hUD<(J(R zL9^N@a}RaWBY%qOIYeldOt#D{ptu|rT0n(JzX%l)62#lWyNowsM6|gzLWvb-b`~eN zkWgYBBaoC~`~q4vyeqs8bcsHi)3F&ra;vqV@Zlm%@C+cZ!iRnhdPYqbnM98K-W}t; zJi7(nBFuLu>IEVIVgumlvm=6G+atv25;VRx#>RkRIoMRu3S-7VU4A3f+qnK}^ZJeJ z-~Q`wf?hj*dw&NBbe@?b1Alv(aw{C0D=slL9y*DYF>mkx3G*1|J-5SsWY>VUKvMvX zJs&V&)Hk8$;yFY@m;e_|Iw@EE6Bbd!I{jFslQYwIaK8YBCy?Eb+$$>lVVC#DJ?(^d zIzd|@N`mbKfXBnH<*6>4@InmonDDWfhdbAs*Jq&HIKjku0WOL;fU^zyGyk{uAj4h) zQzMVP2PfU{^?>~6mJAb84*FmDY{4VoH1$2t#S{n;qm&1DSPEHon@e1*s$L=KXJ&d4Hszg1ZdSB?xDZok;v~j7a~e$gkk2x(fS* zF384U^m$DBtVQH$+T7-d;1srg)2aOSA6NeUonIe+{H{6gggGb->_L&~k8Oe)sW}7y?J8h# zij#7rgU&RBwwyWee-KRn;H^4`#FjvjA=V#Xhv`QOQW=?SD%h!}5Buzs6K8pNrRH-)5Q~i-4i{I0J1Yr^`JF)Oa5IBJVhY;M* z99XBoV(LF2wNYPT(pOBaVc}H!=gTU4w1>si84~WN4AIDM4=~FnDvE}qzA#^Xl|II8 zwT0y65inZpY-sYm2fB_dMA!ZS1vJXT3XZpgqs-gIfpcn{(@x51bx;Oo)K4md3PM!D zC(ucrXoUXVvE!sWGnGe7Z!5`O=5On$CzqgmMOluz%xrG+7*6I>I9H;aJB&bGXU<#p ztk6dU$S34C#WejEya&(kMWs8s519AC!~yFIy2v7b{p>DA(UwO2K;7xgTs2bXZQNXZ zq8#%a=U7?YG~qtN7b*;giIgf@!QJ7WYq}4WFROH!?%gdK*Qlx74j*tPxy?}Ck`>e% z1b@_(hS;ahr985DGYK`>i|A6NOf9Asgc2O6BJ3&f3R)i%jKf+g;A`3v9eOtm!Arv z(0EA@fXqrt+mYf*g@ZFE!Nrx2V^WAXvk{D-Svg;d@o5sQxOZ}`iR0YIT^N5hM>&3T zhp-pH9+l{~E>5qUXbA3^Fe1;oq@a7C*16|L;36CRmGQ5r5YuO7h6uh(I(8(;O=YVy z|Ddt+qZ#o};8^Cokq>(lUPF^~$b`YF W`Wi4DujfDO##7F#VHQjP0fzgEqQFs zjM6=FV%O+`!+}F?95_+(aX$h@apfyeGzSix{1vET&F}R{M9wZdu5NYrd;PlK>-T%_ zIbT@`EPQ|X&tC_BTCuEu(Pa6nVR9Euy+p??&PG$tJlKV+U~WP_0ZxzZ@jd4L)7+G_tuV`-USiN z)=!8M@Fplt(LQ#LS??mZx2rh1PbTA$*y`Fkh)1Jn z7#F)jYJVt-NXFTImuc^pzk2Y~N81<$58@);j#F$nWh9U3^`FE_Y>UxgdomXCAW^yO z(^1QxkI7v$wS`VtJ;tq`%~{V8F1JOEJHp$mb65DHan%}lyvDtkF0LNT>Ge4sT%eNT zL~Zf_iZylfp}F#-_)y5Bd(|W?M24HFa3J$EgkIrT=6#{m+o76_$GI$$Y#8pu{r%^$ z4Yk%%*l> zm-aE+wmxNBT}NL~@qvg2xr`PEN)Ov~tsu*H9SIk*WSf?}d{W@tPc7NO^xyAp$~-SN z({zk`$T-Q)4RbC=YE!}X&}ma8X@4X)#)rC&qe!h)h){G=VEfExM;Dh2wK3<>wV{l; z>eFj3J{JRO57E>ObfvXteN1Yx85BEVQ>SEwb8KC;N(bI+{noyX(Y7VmzziQ-A%%G~ ze*@?VpV-yGk&wBHM#;XAb;DRG{uaiS_CDPuf(eZFRd)$g?Jo>RGez#?+W}i=E<5Th zE45(T>bA5WMOmDRDAGX`r8%FBXxxgTXOnm|r_`f}=Y3jM1zx|OiefkCl3rV0rGvdE z#*ZWcT9QrVW$M049lg*}M^}?Abkr1ezU4C8zRv7g$PDFkz@R8{m)HmBz7!bVUIQR7 zZQz^-39pML(XJvUVAR5|D^^6C)&{oWZSdfw+gs&LiUiu*PSX329v}U_`rQbE5Z})T z)v3tH5P25ngYc7kAvmxaekftd=egVu;Wpv@JcBc;A~_J@BN5Atumt{JCkm!D!X68F ztXc?D;qBd`7^_>Go3VV69BkxrxEb%L&3A6zd}rh4d+*=;;hj&S4YN}RJ8{w9jZ|_Z zw8zCk(if6E`5Xr6J;<{S2LFfwXN9%LN`%yog^|sjSN05Udcv+)a9nHVRiHGdm=pL=qsZW$BN zjK0@~3U4G?(Y+Nufnc*5lKCowR=iici_KJg?&dH3SFDPvL9%>$h8+E9}b0fM|n2XjY+1S zO@tsXm0}Dfw0n?<=eoXFcJoZAZcTn0$H?!X(=I8k>v-fS;V-n03r4i`MikNQs4pQ< zL_3qgK$yMiIs{x_Teq(u=-FrMXzpj1U6;9M9>x?7k1oyee15m~Oe#~l#gV+Dxh?vX z!4@CEi-PJe(9~Vnpg=w=*s-nZKev9v3WFrV!xZ@qNMf@ZP{O#=0UyTB39Z+-JO8bn zumQ&4wq_n-a0;Z&>Lu1V0@Acr)(U&iFR>GNLs|36<^EgNS3tpfqX=fr5;C~W8?S8Q zUYvIs_};hGW4hJr)oa$&!yQ{CajsMd#$D?;vt4>+eQ))|{@9uN)5bIa-&$EOea`j? z=Wbb_%MQ$rdWkSPy8vC;v^nPuzL2+tq_{r2ST@TRcCHkia)mcvv1z+(Q;>~s!;A#S zd7cdilgxN`m}H?z?*073QTN2-wL!@^UX9CDD5U}AYl z0dJHm74{Jq0Tx{5wH^kd29NT)aLZHj*Hg;CPf$^@v37Wp>iWJAV+@9W{>$I~{?GCs zKk71h8$6P#7C;+NRjPYQ2Z>5Dr1-2)@P$Dugx9dt2OyEEcH=QfCh3@BV+Z%qZYDr> zvs|XA3gf~gab1hY(qtR zeFL3#5E_jkjn-9-^i84~OPFe@K~s7FMd2**hsw)}<}U;toI#MOijvnx-t7lFWo$8(s2ofM~M&9!y>M21U1il1O?ondV->IHU&vNZ2 z_oB~K*X7iyQ>V`PPHoa?*czTc|II&iKYL!&{+T|CPZ=NI#hd;L#MgWlY7uL*NN?*= zsa=YUwh@)v<;ZNCjN0|E(ypjqE2_4us;(5;QLSB5Wh1Odjdnwo%i%(_*j`j+GhB+6 z+si0dd@EduR@<93i{()z(53zFCirM1cL4|&|-vV3MF;Z+e-x=|$l2n9+<(uDt7|e3OUW&EbHH-5^aw zhlD9UUmbs>|AGS7+RWG5y3g7rU-wJC;g@}rRDi##Z*jx7{n{hOjU$a-)YiL;euI~} z$&IdlLGu?-u5gQ6^p#V8kz4)}ulmb)SGeu3@|wSf_Z;5yc%K2^XEClm=5Ybv&!PS+ zyul6dd>*qnKhwI*f5Ct8VWYj^UsPW&`Od?7d(r=@|MEkvz2v{bm$z5^S1}5|G!7h9 zscWkKU-Mr>|J9#qhSpxg)~w5On?ZEv)}2#&_5QA+X;kMFl1TQ1B@mVs(>uy_tjQP2z!hEwIlE!v(c*8SDuG<8*6U_mI6S(?eP5}Q zcuk(-;$J_-bKqtDQ;avW1hS=B&~bb|$oguA*)ZZd&++Q45dyhEjP2oA_(Yety?ri} z>hEy}J)q5u`>i=H>tDa*G-rH>AWAN-0AdrXJ?R*rNE5i zBx^YzrcigGVQ$X7APk+<+l4755qBaUC1T&%>jxof!USid=lIE9tc05!1t%SFXlIU5 zQQxQJ?26=o$8MUzI;Y0AnSa69q4B&29i3$0^n?es#lDmB%;CFU=tPOn3z{cgUA_ug z;UH;=7|!_tN$SP>(4g=l4qHp-x9@Vn2FTUAiuGjA{-n>LenE$irlk%}I5mo?6qlSB z%)|Xlb8A=ga|d3W9EF30;ux=!d}+i_PgEwV^I%#$qSYZRYvjdjNBBI;UjeUZ+20KpUx; z%+JT8LAI~NpP86C7c|#&m1pxmeSYTVD%$ATllX25RfTTk&5qUdI1gZ#bVgMdr^`L^ z;SP0$)ldUY3(m^il;of<(RsqD4pOKiim>ETsR?=Sy? zo?pM+WMUaL?>~NmN9wl;Vi{z<8`7ZYE3HXHm9;_?l+|ejlMAXi4Lq_|2pV$vDbFt} zV^=I2bG}Yi)!8KPA+5=8h3rXQCwuPt0J3Z11tKpJd5MTagkYw4naC?d>L5)^ zmQwRMU8#JkO5&#)B0ld*5p9Kq4A*C z&(fAy!@Sb|;aDr&-}vmuYk%{>KOOwp+fZRC&3r*(Hg#!G)Dz#NkDB-zjjF3HA9Ip+ z05pOFfJ8`Hh%_j=vxw*}(ffyy;ItGHt{%wyi_&uVtkX+ctdW z>q@_~mX}dq_De%1UPWK?xT3y|6YK(Mdh(8WS>Sc!bie60cCR?DX^J->hIo_6H$XO& z`aeSn&%eIitcq`;ATEMP^L7w(PY9)5GuN9m?gd^d4O+6SB1Y4WWUj0#9J~R%(A1S; zC5v*k&UE`7Fx|nMu7Hqc&$BgVJ*n$U_%*&fc;#&E&TX4y4`3-c*&VX#$+Lh060+RV z4H(o23F=T2c5d+_cEZT2k98`2tRJ#N(tYXU(xGu!J|xSATO;S^>nEkd%1A#peK@z{ z3clH~HL9GjLu(WLz%~x+aJ=k@9X3Xd?ImCDVazYt zsJ6ZQbM_%=%-J<-swdb~Dj&{@3y{p{SRdW7{htKKab;0HYBDvcTk z?NlK*b~U!+M)=&wRK7Lnv9>$q7n%}dH#TRknRA%r+`K?@us{8%!h^2w`XW4aNM@oCvoG!DR(D3Ic5iI=3n?iyJ6OgMf+h9||@-CU4g~pZo60 zSdX%n;}jK`5zYG^!nyLwCVwyhc9oTG=pkiBR3FAEF1q*umlXnCu9xQC{o6EI4y@Ew z0Qq-W<+yNAhAqupQUx+(rN@DTGa-%LfbYpV;jhVUgsfol2sS2dT6@qNCc{*k2oK0; z043D*$U+_$2pWkGXlARw=PJ1+dsnjClDYW;*q$`NJ(VoL!YC<~wt{LZPm(3HosKSl z1b#!jDTO&}xy}|!bwn2OgQjjXTQ~F-w!mz*#4J{41~Z>ngbAJ)O#jj@=|8pfgBQ;B zl4E_t`x2JeXr3d?L@6gQlkK`VJHwF5b=SQ=^ulpV#dZCpZ*WCuAAOe zfLNNL>&>S@kOfsh1^6>Qx1o*|UH?)q>A!)#{$DTa|Az?ug$d)1 zdQJaNyRQF--q8PDU(o-}T+~0;7Z*NXc*R<>Y^W8gJ^w_|+#S4g4@JE1;#B~7{>qaZ z+vTph_d&s#4Wmyon~+3 zVYl*jI;be2)p?&TCiW7sqkK0Vd$`G4u-i&R?+QTC zl{ctomHlI#3yQYiU|1$MAHRza~{)_vShyQ#XMPL&yVib1}Lp=5i1Onh#G zWB{`SpB^II-_(vM>>7EH9|0^KmQgMtq?ayeXqg0g69L$uAP+zbaQlb?yM?utz3adY zcM-;(3Q$P5C-+ys*R$sub=K+r{-dIg|74uI7VcULew93PaphwMv&(uK{tki^&+R02 z1pXlq2tI!aqu30Q$@k6-vjxe|j14ydra!{bN02T=r%%WT31{PC!Nc_)tTRkfbsI52 zOvf$LEMU#$9<rccU1Ol({Gxqct!lpD)^v;qC=t=DC(poPpSf!AoO1BB5H>!vw)Ry@A4{Z=PHri>D6FO75b_S$B2>IUK|=+h8gYvtLMkEbA@VnVJqD>sZdR@p)Oxd=|HhYX>m za9JzQ4-{C>Glu0^GMslw1s!3zTBsMihpiI(g8ru`&lOrwJ5>MbNY!Eyd+?ts(V%XN y`6w>NQBC=9wV;CHl_HcAvaaXB?*=8BDcwXk%+l+Kul1ZVv$VprpP1**v)=+ZxV~=y literal 0 HcmV?d00001 diff --git a/mmpretrain/models/selfsup/__pycache__/simsiam.cpython-310.pyc b/mmpretrain/models/selfsup/__pycache__/simsiam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..134b7b2e83811e181dbccf7c0fbdaef605d2969a GIT binary patch literal 1596 zcmZux&1)n@6tC*(uj%xT5=SMVPI}r=&ZAUoK9(OO)ZMFm35{a*gKh>J5$H-?99!*sRw5#bu&NjPP>$F=PI}+ zUy{^I{nsS*dH1q&Nv9jBH6erU8_-S$mbUjrQfvD_sQT%EYWLgk_MaR~;1uk~bv%jl zD&ub`%2K`gMXdOQXR}FDad{$CDHBw)K71e^!Bo4ja5AMSnOZ3Ymo08{=cT&^uEWWL z-R9sF@@Cc+nYrlvYkL}`9ysW0ZzA%Ei1YK`SBGH`Fc@E${9O+a$04j z6vdo@DBQvp=Uj32j8_u&8`%RM%L2rVp1nm~S88`Oish*|8J2QBijUN2`~G-59N&Mi zv-9vU8nWjL&Ps^(*4Rv(qM>Z1iop#x`oIw|mO?s9$%JB}V28Yzi-M09WqGVdO5_Us zijW#sXNN=cOuM3}np){Fg@i}SWL#-i^16{l(t$MIj14#-JidXcPGPA@OKW;*Der6Y z6K!cqx5$MJJLL1iX~~rZv(wtw^m=*c-twGVp4XC>_LbARt#@r*_$$h5U7)(Pn|4~i z?S4r5P_LiqMbHLN9VgxcOki$irTj6LDVsG#V(I~5*D@BE=2gM4XpiMw?V5Oz!m*5+ zeTv9?0Vzus!{@vJNc}L_HCZ$p%(H?+IaK++6{1<1R9nr yiDN8BGkXqgd$i~;g068dUK%a?%cddfhbu?M z4TINtn(N07?sTFSZ>^>Sr>5M{Y?KN{yznuM{kf0=;T?3${X z%SqA;l2neuH~_=>@S|(jKez?TyB6l*t#CAs#Unwotc3l`r6J3zsjVUYhZjF>~!pqi^yRzWT7~*J_Nv z%-4iBtn*W%fx0Q2KEn#9F~b|S&KP}%pW$a8+WrE60h~5JCl#{m_LP-Xq10M3RT3^iJCE=)QH&>)`qC-Y>3+EE*(HGU!~r z{_*WjKb4(#(quP{C*`emHbs6fl{?uLy;3GRVZ!xdHzOfK+1rkKVchvJ%ytCte3)_( z(_*h}=lM9hvau1$`_b-tDhC_kR6Pbm z2T{H~*;?Cy$lx&Qp$R!aOl(4ft-qTBLKKe86_fZGM z;Ie+cc~h;@Tv*6n(z_bwz3old-z5%fvhuyLhz!>-l)Dx7cDB+)C@T@Y9c68ckSlM# zi{NUJ!$Jcv#xm`R3`VBBC>c-kAd4Od)#mBFB+~{QK(0Jh-U!M84TY+ckqK#9ys~4;PdQ6WZcW>H zZFs8iie}Nq{%vlb)xqY#nJ(&)Oy<=l0_J+;rbpJ1 zIbF)n6-#hfI4EB4z{ak|u!T8GS^|&T=Zq;<)pfIIaQo-b$=&PRfYa1ByN)E~$`vYB zsaT_e;)6U#1zm%r$nuDkFXYD-dGM)8EOh*Bp0v13EzO&--l6E_smxUK7oiMCA{SE6 zsB3%8(;bmyT0N>hmxc`1mSHjwssb6b230+U?MDfZ;0==SOF7|qEFh;tq2v8k~nBYdfOk?2A7^AUd=AHVqUzw^F1id)SvLN`%czk(7rfI%08qK}`Ig&zMwn6gZ z8jquoJdX|Nt}-8m(_-ecpYzzMI;Lwnro*UozjG>{Nk5mZpv_F{y9R4B>v#_FoX72| zYc|X^wuI8fOwY6)J!qAIeQrB2rtNSnl*7d?E<)7?B0+p69a|7&{?PT1s^7tzJwQ>I zpIK9DYEPXhV#m;!y16-Ih%9d5t{6oH(WNpZaI74d(^^pjKw7_H+`?GY1WAW>!QhWI z%yZ`RKvxUK?E`BF5XCG4)XHJ4uqgf%?vVu_bi*Fpi;$x;6Bdv)0|i2>x8)$aqN6#r z=+Q^)oY#i4ehbmId!=)GTg>bn_?|LK`EP6SN;S;H0+f;mKNGoY%6DE>kuq*pt`M zb-P=UH&H7aek|@QdpC?HqU%X=Dfue(SR}uyhPaee=JH3xW02uB-MN*fv2wFsm?Me= z%G!;@JxqU{reC9icF|HO^Qto6QRYo$exl62szm(0yh^>gAj3qgg#4d?%E;jxEyp~E zpJle-AaDrNdfH&@=>xBH0UE3veeIcQXml>|H|tU4o4h%Sg zM|=R}>z}cyH?0GrD-%S zjw*WW(utY1!)2O-9o?bLP5i6_rYSh)0h_vaFG9+dqK0*wpjH9AW?^BaR=x(nwc*MO z#R6Q=315XG$c2#j-3uj>u$ei+jXFB70;>I>PnLS`oG0j*`BZ6>b9edyf22fec2%deEE5 zAjHzULM&02KAx&lfSVzli)1pQcrN4yt$T@9tBLz#IAsuK zrHk74wuKaOlLov;#YHN9O2u)OqbxdEw_UpCr_@J4rJNmH{~+^EmBJzKK+ZaceIz0N z(#eigMuRBvb!IDg5RC?^abj@hd(Y14FQ4oQ@|5yl`37xzoL682`6f6%4&0;VbT$RL5d?Q9VLa=p1_4idL7+b}C7~>(cM=gT z$(7xY(=gZBz$|_eUP=0Z*RfF5j?qC|igHThlRDX?I3*bsS`W=xR?-u3jQyUe<1HFO zAyc+cEa1(^>|DcQ>^Svsbvj{v%b*=rVNaOF{_WW8Uyj2bvnu;1bJ;&wjs2a~+25Q7 z`<69Ze`Rl3PoPtzBU+N_-8IaNU T!D%F54Wwa>vION+*3AC^QghQ9 literal 0 HcmV?d00001 diff --git a/mmpretrain/models/selfsup/__pycache__/swav.cpython-310.pyc b/mmpretrain/models/selfsup/__pycache__/swav.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93af9d073e150ad8a895bc9a91ca9da9f3477371 GIT binary patch literal 1863 zcmZuyO>Z1E7`DBhkDY8vDNUgVq){cLu7q}@9#Dy*(9lo>lp?XELR&?X$=JJ@WIi^w zCvB?P3n|AQkT`Hmj{FgRge6X#@)wXwdEeO$-J)8u=k?d?$Itt`o^p9PAW(jL`N!yn zOUPf?Su758?m*Q)LL-Qv8OiC8=4{9)_F2{%w&2^!c(hV7K;y^J5co+G?EOdAVVf7KxUFlXWzXJ z(82QH%O!xqu@SpYMlOPX&WC~U0HAOA;K}BrXMbYb3xW{JR|qDTg^5jC7GXIGcYY}J z9E%j;eHp7FEyg(WDAiS*h4*A^DkXKedmP>^3!`FfU`6BZZdPd{Rk*2jIxf(!4%hdL zndr5CKUPoEqh6`T{diaRZ>_F=(p&xX_U&66JJB4?!#x=uR|3ht{kGhteUJ3Dzk zQ4$VHi@q$zX(9W2WqF|c`bm5gq1l@p@9gv%2-Z!DNoBO{2w*$X4c*$h;Gj}P(gF@I z$`Tkz?l(~Nuh1A$Q$tUf_P!=RQNvDKhV66sI)V!J9A>x32|c5tCHM;_93b31V^c2N zsWT>1x8_@9du8g?-UQKJK_I`#?oWZWCU%RJ%?VnLfYH#Ws-hg%O+JzanCi}tZ zQe(GGr)~@H1)TFe!7m-7H$7QW%)-Wvh7W2P50qFxK4gyhnUgy{1p_)Qb+H_6A7u!daKrH znEIWl%dqqBTYXiQroW)jMY9w#1DQg8Lryhv1*O_xJ%cDR2SosieEYCz7C1nM-Q}EUiHGfDr?&~hwAk}H1Oa@=g4hBNCXRj#Yt zUa5){atwSFS|*iAkEC@?BL(a4#>v4haA#XdY^)nkCbAHg7cx1pd{4$gU4=6(N7lPb zmgCJ-8)4uJY~2IU=vWWisdySCs+?%s1}w@%Fks!XGN4`SfzM`uM=&z;=puZn8%}4< zyXX~9TY;)?LUXCZ8K+k$qx>Jn7*zg}b54Ew0S%}NwZj6+>9h8KL_PqJeiRjPE~Cf> zQIwaW%Fyma(ILEM^BFISM43d9!skJ~4~=za>7nrFp)f+W1qy5sEucgVs0J!NB>13c zWWpz$UG+Qk(kq4I-40NEJb@f-JT08^-fOk=lpI66)UiTs+LeVF7_E}#(QV*Zz6*a1 o<>9h-X1k=CU#&UsZ2+ymKD+z|Dn?&|<}&qJhjnO&x~*&UUy20&sQ>@~ literal 0 HcmV?d00001 diff --git a/mmpretrain/models/selfsup/barlowtwins.py b/mmpretrain/models/selfsup/barlowtwins.py new file mode 100644 index 0000000..4c75cd0 --- /dev/null +++ b/mmpretrain/models/selfsup/barlowtwins.py @@ -0,0 +1,42 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List + +import torch + +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample +from .base import BaseSelfSupervisor + + +@MODELS.register_module() +class BarlowTwins(BaseSelfSupervisor): + """BarlowTwins. + + Implementation of `Barlow Twins: Self-Supervised Learning via Redundancy + Reduction `_. + Part of the code is borrowed from: + ``_. + """ + + def loss(self, inputs: List[torch.Tensor], data_samples: List[DataSample], + **kwargs) -> Dict[str, torch.Tensor]: + """The forward function in training. + + Args: + inputs (List[torch.Tensor]): The input images. + data_samples (List[DataSample]): All elements required + during the forward function. + + Returns: + Dict[str, torch.Tensor]: A dictionary of loss components. + """ + assert isinstance(inputs, list) + img_v1 = inputs[0] + img_v2 = inputs[1] + + z1 = self.neck(self.backbone(img_v1))[0] # NxC + z2 = self.neck(self.backbone(img_v2))[0] # NxC + + loss = self.head.loss(z1, z2) + losses = dict(loss=loss) + return losses diff --git a/mmpretrain/models/selfsup/base.py b/mmpretrain/models/selfsup/base.py new file mode 100644 index 0000000..9d53a72 --- /dev/null +++ b/mmpretrain/models/selfsup/base.py @@ -0,0 +1,179 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod +from typing import List, Optional, Union + +import torch +from mmengine.model import BaseModel +from torch import nn + +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample + + +class BaseSelfSupervisor(BaseModel, metaclass=ABCMeta): + """BaseModel for Self-Supervised Learning. + + All self-supervised algorithms should inherit this module. + + Args: + backbone (dict): The backbone module. See + :mod:`mmpretrain.models.backbones`. + neck (dict, optional): The neck module to process features from + backbone. See :mod:`mmpretrain.models.necks`. Defaults to None. + head (dict, optional): The head module to do prediction and calculate + loss from processed features. See :mod:`mmpretrain.models.heads`. + Notice that if the head is not set, almost all methods cannot be + used except :meth:`extract_feat`. Defaults to None. + target_generator: (dict, optional): The target_generator module to + generate targets for self-supervised learning optimization, such as + HOG, extracted features from other modules(DALL-E, CLIP), etc. + pretrained (str, optional): The pretrained checkpoint path, support + local path and remote path. Defaults to None. + data_preprocessor (Union[dict, nn.Module], optional): The config for + preprocessing input data. If None or no specified type, it will use + "SelfSupDataPreprocessor" as type. + See :class:`SelfSupDataPreprocessor` for more details. + Defaults to None. + init_cfg (dict, optional): the config to control the initialization. + Defaults to None. + """ + + def __init__(self, + backbone: dict, + neck: Optional[dict] = None, + head: Optional[dict] = None, + target_generator: Optional[dict] = None, + pretrained: Optional[str] = None, + data_preprocessor: Optional[Union[dict, nn.Module]] = None, + init_cfg: Optional[dict] = None): + if pretrained is not None: + init_cfg = dict(type='Pretrained', checkpoint=pretrained) + + data_preprocessor = data_preprocessor or {} + if isinstance(data_preprocessor, dict): + data_preprocessor.setdefault('type', 'SelfSupDataPreprocessor') + data_preprocessor = MODELS.build(data_preprocessor) + elif not isinstance(data_preprocessor, nn.Module): + raise TypeError('data_preprocessor should be a `dict` or ' + f'`nn.Module` instance, but got ' + f'{type(data_preprocessor)}') + + super().__init__( + init_cfg=init_cfg, data_preprocessor=data_preprocessor) + + if not isinstance(backbone, nn.Module): + backbone = MODELS.build(backbone) + if neck is not None and not isinstance(neck, nn.Module): + neck = MODELS.build(neck) + if head is not None and not isinstance(head, nn.Module): + head = MODELS.build(head) + if target_generator is not None and not isinstance( + target_generator, nn.Module): + target_generator = MODELS.build(target_generator) + + self.backbone = backbone + self.neck = neck + self.head = head + self.target_generator = target_generator + + @property + def with_neck(self) -> bool: + """Check if the model has a neck module.""" + return hasattr(self, 'neck') and self.neck is not None + + @property + def with_head(self) -> bool: + """Check if the model has a head module.""" + return hasattr(self, 'head') and self.head is not None + + @property + def with_target_generator(self) -> bool: + """Check if the model has a target_generator module.""" + return hasattr( + self, 'target_generator') and self.target_generator is not None + + def forward(self, + inputs: Union[torch.Tensor, List[torch.Tensor]], + data_samples: Optional[List[DataSample]] = None, + mode: str = 'tensor'): + """The unified entry for a forward process in both training and test. + + The method currently accepts two modes: "tensor" and "loss": + + - "tensor": Forward the backbone network and return the feature + tensor(s) tensor without any post-processing, same as a common + PyTorch Module. + - "loss": Forward and return a dict of losses according to the given + inputs and data samples. + + Args: + inputs (torch.Tensor or List[torch.Tensor]): The input tensor with + shape (N, C, ...) in general. + data_samples (List[DataSample], optional): The other data of + every samples. It's required for some algorithms + if ``mode="loss"``. Defaults to None. + mode (str): Return what kind of value. Defaults to 'tensor'. + + Returns: + The return type depends on ``mode``. + + - If ``mode="tensor"``, return a tensor or a tuple of tensor. + - If ``mode="loss"``, return a dict of tensor. + """ + if mode == 'tensor': + feats = self.extract_feat(inputs) + return feats + elif mode == 'loss': + return self.loss(inputs, data_samples) + else: + raise RuntimeError(f'Invalid mode "{mode}".') + + def extract_feat(self, inputs: torch.Tensor): + """Extract features from the input tensor with shape (N, C, ...). + + The default behavior is extracting features from backbone. + + Args: + inputs (Tensor): A batch of inputs. The shape of it should be + ``(num_samples, num_channels, *img_shape)``. + + Returns: + tuple | Tensor: The output feature tensor(s). + """ + x = self.backbone(inputs) + return x + + @abstractmethod + def loss(self, inputs: torch.Tensor, + data_samples: List[DataSample]) -> dict: + """Calculate losses from a batch of inputs and data samples. + + This is a abstract method, and subclass should overwrite this methods + if needed. + + Args: + inputs (torch.Tensor): The input tensor with shape + (N, C, ...) in general. + data_samples (List[DataSample]): The annotation data of + every samples. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + raise NotImplementedError + + def get_layer_depth(self, param_name: str): + """Get the layer-wise depth of a parameter. + + Args: + param_name (str): The name of the parameter. + + Returns: + Tuple[int, int]: The layer-wise depth and the max depth. + """ + if hasattr(self.backbone, 'get_layer_depth'): + return self.backbone.get_layer_depth(param_name, 'backbone.') + else: + raise NotImplementedError( + f"The backbone {type(self.backbone)} doesn't " + 'support `get_layer_depth` by now.') diff --git a/mmpretrain/models/selfsup/beit.py b/mmpretrain/models/selfsup/beit.py new file mode 100644 index 0000000..c301f7d --- /dev/null +++ b/mmpretrain/models/selfsup/beit.py @@ -0,0 +1,357 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import Dict, List, Optional, Tuple, Union + +import torch +from einops import rearrange +from mmengine.model import BaseModule +from mmengine.model.weight_init import trunc_normal_ +from torch import nn + +from mmpretrain.models.backbones import BEiTViT +from mmpretrain.models.utils import NormEMAVectorQuantizer, resize_pos_embed +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample +from .base import BaseSelfSupervisor + + +@MODELS.register_module() +class VQKD(BaseModule): + """Vector-Quantized Knowledge Distillation. + + The module only contains encoder and VectorQuantizer part + Modified from https://github.com/microsoft/unilm/blob/master/beit2/modeling_vqkd.py + + Args: + encoder_config (dict): The config of encoder. + decoder_config (dict, optional): The config of decoder. Currently, + VQKD only support to build encoder. Defaults to None. + num_embed (int): Number of embedding vectors in the codebook. Defaults + to 8192. + embed_dims (int) : The dimension of embedding vectors in the codebook. + Defaults to 32. + decay (float): The decay parameter of EMA. Defaults to 0.99. + beta (float): The mutiplier for VectorQuantizer loss. Defaults to 1. + quantize_kmeans_init (bool): Whether to use k-means to initialize the + VectorQuantizer. Defaults to True. + init_cfg (dict or List[dict], optional): Initialization config dict. + Defaults to None. + """ # noqa: E501 + + def __init__(self, + encoder_config: dict, + decoder_config: Optional[dict] = None, + num_embed: int = 8192, + embed_dims: int = 32, + decay: float = 0.99, + beta: float = 1.0, + quantize_kmeans_init: bool = True, + init_cfg: Optional[dict] = None) -> None: + super().__init__(init_cfg=init_cfg) + + self.encoder = BEiTViT(**encoder_config) + if decoder_config is not None: + self.decoder = BEiTViT(**decoder_config) + + self.quantize = NormEMAVectorQuantizer( + num_embed=num_embed, + embed_dims=embed_dims, + beta=beta, + decay=decay, + kmeans_init=quantize_kmeans_init, + ) + + # task layer + self.encode_task_layer = nn.Sequential( + nn.Linear(self.encoder.arch_settings['embed_dims'], + self.encoder.arch_settings['embed_dims']), nn.Tanh(), + nn.Linear(self.encoder.arch_settings['embed_dims'], embed_dims)) + + def get_tokens(self, x: torch.Tensor) -> dict: + """Get tokens for beit pre-training.""" + _, embed_ind, _ = self.encode(x) + output = {} + output['token'] = embed_ind.view(x.shape[0], -1) + output['input_img'] = x + + return output + + def encode( + self, x: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Encode the input images and get corresponding results.""" + encoder_features = self.encoder(x)[0] + B, C, N1, N2 = encoder_features.shape + encoder_features = encoder_features.permute(0, 2, 3, + 1).reshape(B, N1 * N2, C) + + with torch.cuda.amp.autocast(enabled=False): + to_quantizer_features = self.encode_task_layer( + encoder_features.type_as(self.encode_task_layer[-1].weight)) + + N = to_quantizer_features.shape[1] + h, w = int(math.sqrt(N)), int(math.sqrt(N)) + + to_quantizer_features = rearrange( + to_quantizer_features, 'b (h w) c -> b c h w', h=h, + w=w) # reshape for quantizer + quantize, loss, embed_ind = self.quantize(to_quantizer_features) + + return quantize, embed_ind, loss + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """The forward function. + + Currently, only support to get tokens. + """ + return self.get_tokens(x)['token'] + + +@MODELS.register_module() +class BEiTPretrainViT(BEiTViT): + """Vision Transformer for BEiT pre-training. + + Args: + arch (str | dict): Vision Transformer architecture. If use string, + choose from 'small', 'base' and 'large'. If use dict, it should + have below keys: + + - **embed_dims** (int): The dimensions of embedding. + - **num_layers** (int): The number of transformer encoder layers. + - **num_heads** (int): The number of heads in attention modules. + - **feedforward_channels** (int): The hidden dimensions in + feedforward modules. + + Defaults to 'base'. + img_size (int | tuple): The expected input image shape. Because we + support dynamic input shape, just set the argument to the most + common input image shape. Defaults to 224. + patch_size (int | tuple): The patch size in patch embedding. + Defaults to 16. + in_channels (int): The num of input channels. Defaults to 3. + out_indices (Sequence | int): Output from which stages. + Defaults to -1, means the last stage. + drop_rate (float): Probability of an element to be zeroed. + Defaults to 0. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + qkv_bias (bool): Whether to add bias for qkv in attention modules. + Defaults to True. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + final_norm (bool): Whether to add a additional layer to normalize + final feature map. Defaults to True. + out_type (str): The type of output features. Please choose from + + - ``"cls_token"``: The class token tensor with shape (B, C). + - ``"featmap"``: The feature map tensor from the patch tokens + with shape (B, C, H, W). + - ``"avg_featmap"``: The global averaged feature map tensor + with shape (B, C). + - ``"raw"``: The raw feature tensor includes patch tokens and + class tokens with shape (B, L, C). + + It only works without input mask. Defaults to ``"avg_featmap"``. + with_cls_token (bool): Whether concatenating class token into image + tokens as transformer input. Defaults to True. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Defaults to -1. + use_abs_pos_emb (bool): Whether or not use absolute position embedding. + Defaults to False. + use_rel_pos_bias (bool): Whether or not use relative position bias. + Defaults to False. + use_shared_rel_pos_bias (bool): Whether or not use shared relative + position bias. Defaults to True. + layer_scale_init_value (float): The initialization value for + the learnable scaling of attention and FFN. Defaults to 0.1. + interpolate_mode (str): Select the interpolate mode for position + embeding vector resize. Defaults to "bicubic". + patch_cfg (dict): Configs of patch embeding. Defaults to an empty dict. + layer_cfgs (Sequence | dict): Configs of each transformer layer in + encoder. Defaults to an empty dict. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + arch: str = 'base', + img_size: int = 224, + patch_size: int = 16, + in_channels: int = 3, + out_indices: int = -1, + drop_rate: float = 0, + drop_path_rate: float = 0, + norm_cfg: dict = dict(type='LN', eps=1e-6), + final_norm: bool = True, + out_type: str = 'raw', + frozen_stages: int = -1, + use_abs_pos_emb: bool = False, + use_rel_pos_bias: bool = False, + use_shared_rel_pos_bias: bool = True, + layer_scale_init_value: int = 0.1, + interpolate_mode: str = 'bicubic', + patch_cfg: dict = dict(padding=0), + layer_cfgs: dict = dict(), + init_cfg: Optional[Union[List[dict], dict]] = None) -> None: + super().__init__( + arch=arch, + img_size=img_size, + patch_size=patch_size, + in_channels=in_channels, + out_indices=out_indices, + drop_rate=drop_rate, + drop_path_rate=drop_path_rate, + norm_cfg=norm_cfg, + final_norm=final_norm, + out_type=out_type, + with_cls_token=True, + frozen_stages=frozen_stages, + use_abs_pos_emb=use_abs_pos_emb, + use_shared_rel_pos_bias=use_shared_rel_pos_bias, + use_rel_pos_bias=use_rel_pos_bias, + layer_scale_init_value=layer_scale_init_value, + interpolate_mode=interpolate_mode, + patch_cfg=patch_cfg, + layer_cfgs=layer_cfgs, + init_cfg=init_cfg) + + self.mask_token = nn.Parameter(torch.zeros(1, 1, self.embed_dims)) + + def init_weights(self) -> None: + """Initialize position embedding, patch embedding and cls token.""" + super().init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress default init if use pretrained model. + return + + trunc_normal_(self.cls_token, std=0.02) + trunc_normal_(self.mask_token, std=0.02) + self.rescale_init_weight() + + def rescale_init_weight(self) -> None: + """Rescale the initialized weights.""" + + def rescale(param, layer_id): + param.div_(math.sqrt(2.0 * layer_id)) + + for layer_id, layer in enumerate(self.layers): + rescale(layer.attn.proj.weight.data, layer_id + 1) + rescale(layer.ffn.layers[1].weight.data, layer_id + 1) + + def forward(self, x: torch.Tensor, + mask: Optional[torch.Tensor]) -> Tuple[torch.Tensor]: + """The BEiT style forward function. + + The function supports two kind of forward behaviors. If the ``mask`` is + not ``None``, the forward function will be executed as masked image + modeling pre-training; if the ``mask`` is ``None``, the forward + function will call ``super().forward()``, which extract features from + images without mask. + + Args: + x (torch.Tensor): Input images, which is of shape (B x C x H x W). + mask (torch.Tensor, optional): Mask for input, which is of shape + (B x patch_resolution[0] x patch_resolution[1]). + + Returns: + Tuple[torch.Tensor]: Hidden features. + """ + if mask is None: + return super().forward(x) + + else: + x, patch_resolution = self.patch_embed(x) + + # replace the masked visual tokens by mask_token + B, L, _ = x.shape + mask_token = self.mask_token.expand(B, L, -1) + w = mask.flatten(1).unsqueeze(-1).type_as(mask_token) + x = x * (1. - w) + mask_token * w + + # stole cls_tokens impl from Phil Wang, thanks + cls_tokens = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + if self.pos_embed is not None: + x = x + resize_pos_embed( + self.pos_embed, + self.patch_resolution, + patch_resolution, + mode=self.interpolate_mode, + num_extra_tokens=self.num_extra_tokens) + x = self.drop_after_pos(x) + + self.shared_rel_pos_bias = self.rel_pos_bias().to( + mask.device) if self.rel_pos_bias is not None else None + + outs = [] + for i, layer in enumerate(self.layers): + x = layer(x, rel_pos_bias=self.shared_rel_pos_bias) + + if i == len(self.layers) - 1 and self.final_norm: + x = self.norm1(x) + + if i in self.out_indices: + outs.append(x) + + return tuple(outs) + + +@MODELS.register_module() +class BEiT(BaseSelfSupervisor): + """BEiT v1/v2. + + Implementation of `BEiT: BERT Pre-Training of Image Transformers + `_ and `BEiT v2: Masked Image Modeling + with Vector-Quantized Visual Tokenizers + `_. + """ + + def extract_feat(self, inputs: torch.Tensor): + return self.backbone(inputs, mask=None) + + def loss(self, inputs: List[torch.Tensor], data_samples: List[DataSample], + **kwargs) -> Dict[str, torch.Tensor]: + """The forward function in training. + + Args: + inputs (List[torch.Tensor]): The input images. + data_samples (List[DataSample]): All elements required + during the forward function. + + Returns: + Dict[str, torch.Tensor]: A dictionary of loss components. + """ + mask = torch.stack([data_sample.mask for data_sample in data_samples]) + + img_latent = self.backbone(inputs[0], mask) + + # inputs[1] is the target image + with torch.no_grad(): + target = self.target_generator(inputs[1]) + target = target.detach() + + if self.with_neck: + # BEiT v2 + feats, feats_cls_pt = self.neck( + img_latent, rel_pos_bias=self.backbone.shared_rel_pos_bias) + loss = self.head.loss(feats, feats_cls_pt, target, mask) + else: + # BEiT v1 + loss = self.head.loss(img_latent[0], target, mask) + + if isinstance(loss, torch.Tensor): + losses = dict(loss=loss) + return losses + elif isinstance(loss, Tuple): + # the loss_1 and loss_2 are general reconstruction loss (patch + # feature vectors from last layer of backbone) and early state + # reconstruction loss (patch feature vectors from intermediate + # layer of backbone) + loss_1, loss_2 = loss[0], loss[1] + losses = dict() + # the key with prefix 'loss', like loss_1 and loss_2, will be used + # as the final criterion + losses['loss_1'] = loss_1 + losses['loss_2'] = loss_2 + return losses diff --git a/mmpretrain/models/selfsup/byol.py b/mmpretrain/models/selfsup/byol.py new file mode 100644 index 0000000..803e400 --- /dev/null +++ b/mmpretrain/models/selfsup/byol.py @@ -0,0 +1,89 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional, Union + +import torch +import torch.nn as nn + +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample +from ..utils import CosineEMA +from .base import BaseSelfSupervisor + + +@MODELS.register_module() +class BYOL(BaseSelfSupervisor): + """BYOL. + + Implementation of `Bootstrap Your Own Latent: A New Approach to + Self-Supervised Learning `_. + + Args: + backbone (dict): Config dict for module of backbone. + neck (dict): Config dict for module of deep features + to compact feature vectors. + head (dict): Config dict for module of head functions. + base_momentum (float): The base momentum coefficient for the target + network. Defaults to 0.004. + pretrained (str, optional): The pretrained checkpoint path, support + local path and remote path. Defaults to None. + data_preprocessor (dict, optional): The config for preprocessing + input data. If None or no specified type, it will use + "SelfSupDataPreprocessor" as type. + See :class:`SelfSupDataPreprocessor` for more details. + Defaults to None. + init_cfg (Union[List[dict], dict], optional): Config dict for weight + initialization. Defaults to None. + """ + + def __init__(self, + backbone: dict, + neck: dict, + head: dict, + base_momentum: float = 0.004, + pretrained: Optional[str] = None, + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[Union[List[dict], dict]] = None) -> None: + super().__init__( + backbone=backbone, + neck=neck, + head=head, + pretrained=pretrained, + data_preprocessor=data_preprocessor, + init_cfg=init_cfg) + + # create momentum model + self.target_net = CosineEMA( + nn.Sequential(self.backbone, self.neck), momentum=base_momentum) + + def loss(self, inputs: List[torch.Tensor], data_samples: List[DataSample], + **kwargs) -> Dict[str, torch.Tensor]: + """The forward function in training. + + Args: + inputs (List[torch.Tensor]): The input images. + data_samples (List[DataSample]): All elements required + during the forward function. + + Returns: + Dict[str, torch.Tensor]: A dictionary of loss components. + """ + assert isinstance(inputs, list) + img_v1 = inputs[0] + img_v2 = inputs[1] + # compute online features + proj_online_v1 = self.neck(self.backbone(img_v1))[0] + proj_online_v2 = self.neck(self.backbone(img_v2))[0] + # compute target features + with torch.no_grad(): + # update the target net + self.target_net.update_parameters( + nn.Sequential(self.backbone, self.neck)) + + proj_target_v1 = self.target_net(img_v1)[0] + proj_target_v2 = self.target_net(img_v2)[0] + + loss_1 = self.head.loss(proj_online_v1, proj_target_v2) + loss_2 = self.head.loss(proj_online_v2, proj_target_v1) + + losses = dict(loss=2. * (loss_1 + loss_2)) + return losses diff --git a/mmpretrain/models/selfsup/cae.py b/mmpretrain/models/selfsup/cae.py new file mode 100644 index 0000000..67ac091 --- /dev/null +++ b/mmpretrain/models/selfsup/cae.py @@ -0,0 +1,472 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Part of code is modified from BEiT +# https://github.com/microsoft/unilm/blob/master/beit/dall_e/encoder.py +import math +from collections import OrderedDict +from functools import partial +from typing import Dict, List, Optional, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmengine.model import BaseModule +from mmengine.model.weight_init import trunc_normal_ + +from mmpretrain.models.backbones import BEiTViT +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample +from ..utils import build_2d_sincos_position_embedding +from .base import BaseSelfSupervisor + + +class Conv2d(nn.Module): + """Rewrite Conv2d module according to DALL-E code.""" + + def __init__(self, + n_in: int, + n_out: int, + kw: int, + use_float16: bool = True, + device: torch.device = torch.device('cpu'), + requires_grad: bool = False) -> None: + super().__init__() + + w = torch.empty((n_out, n_in, kw, kw), + dtype=torch.float32, + device=device, + requires_grad=requires_grad) + w.normal_(std=1 / math.sqrt(n_in * kw**2)) + + b = torch.zeros((n_out, ), + dtype=torch.float32, + device=device, + requires_grad=requires_grad) + self.kw = kw + self.w, self.b = nn.Parameter(w), nn.Parameter(b) + self.use_float16 = use_float16 + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if self.use_float16 and 'cuda' in self.w.device.type: + if x.dtype != torch.float16: + x = x.half() + + w, b = self.w.half(), self.b.half() + else: + if x.dtype != torch.float32: + x = x.float() + + w, b = self.w, self.b + + return F.conv2d(x, w, b, padding=(self.kw - 1) // 2) + + +class EncoderBlock(nn.Module): + """Rewrite EncoderBlock module according to DALL-E code.""" + + def __init__(self, + n_in: int, + n_out: int, + n_layers: int, + device: torch.device = None, + requires_grad: bool = False) -> None: + super().__init__() + self.n_hid = n_out // 4 + self.post_gain = 1 / (n_layers**2) + + make_conv = partial(Conv2d, device=device, requires_grad=requires_grad) + self.id_path = make_conv(n_in, n_out, + 1) if n_in != n_out else nn.Identity() + self.res_path = nn.Sequential( + OrderedDict([ + ('relu_1', nn.ReLU()), + ('conv_1', make_conv(n_in, self.n_hid, 3)), + ('relu_2', nn.ReLU()), + ('conv_2', make_conv(self.n_hid, self.n_hid, 3)), + ('relu_3', nn.ReLU()), + ('conv_3', make_conv(self.n_hid, self.n_hid, 3)), + ('relu_4', nn.ReLU()), + ('conv_4', make_conv(self.n_hid, n_out, 1)), + ])) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.id_path(x) + self.post_gain * self.res_path(x) + + +@MODELS.register_module(name='DALL-E') +class DALLEEncoder(BaseModule): + """DALL-E Encoder for feature extraction. + + Args: + group_count (int): Number of groups in DALL-E encoder. Defaults to 4. + n_hid (int): Dimension of hidden layers. Defaults to 256. + n_blk_per_group (int): Number of blocks per group. Defaults to 2. + input_channels: (int): The channels of input images. Defaults to 3. + vocab_size (int): Vocabulary size, indicating the number of classes. + Defaults to 8192. + device (torch.device): Device of parameters. Defaults to + ``torch.device('cpu')``. + requires_grad (bool): Require gradient or not. Defaults to False. + init_cfg (Union[List[dict], dict], optional): Config dict for weight + initialization. Defaults to None. + """ + + def __init__(self, + group_count: int = 4, + n_hid: int = 256, + n_blk_per_group: int = 2, + input_channels: int = 3, + vocab_size: int = 8192, + device: torch.device = torch.device('cpu'), + requires_grad: bool = False, + init_cfg: Union[dict, List[dict], None] = None): + super().__init__(init_cfg=init_cfg) + self.input_channels = input_channels + + blk_range = range(n_blk_per_group) + n_layers = group_count * n_blk_per_group + make_conv = partial(Conv2d, device=device, requires_grad=requires_grad) + make_blk = partial( + EncoderBlock, + n_layers=n_layers, + device=device, + requires_grad=requires_grad) + + self.blocks = nn.Sequential( + OrderedDict([ + ('input', make_conv(input_channels, 1 * n_hid, 7)), + ('group_1', + nn.Sequential( + OrderedDict([ + *[(f'block_{i + 1}', make_blk(1 * n_hid, 1 * n_hid)) + for i in blk_range], + ('pool', nn.MaxPool2d(kernel_size=2)), + ]))), + ('group_2', + nn.Sequential( + OrderedDict([ + *[(f'block_{i + 1}', + make_blk(1 * n_hid if i == 0 else 2 * n_hid, + 2 * n_hid)) for i in blk_range], + ('pool', nn.MaxPool2d(kernel_size=2)), + ]))), + ('group_3', + nn.Sequential( + OrderedDict([ + *[(f'block_{i + 1}', + make_blk(2 * n_hid if i == 0 else 4 * n_hid, + 4 * n_hid)) for i in blk_range], + ('pool', nn.MaxPool2d(kernel_size=2)), + ]))), + ('group_4', + nn.Sequential( + OrderedDict([ + *[(f'block_{i + 1}', + make_blk(4 * n_hid if i == 0 else 8 * n_hid, + 8 * n_hid)) for i in blk_range], + ]))), + ('output', + nn.Sequential( + OrderedDict([ + ('relu', nn.ReLU()), + ('conv', + make_conv( + 8 * n_hid, vocab_size, 1, use_float16=False)), + ]))), + ])) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Forward function of DALL-E encoder. + + Args: + x (torch.Tensor): The input images with shape (B, C, H, W). + + Returns: + torch.Tensor: The output with shape (B, vocab_size, h, w). + """ + x = x.float() + if len(x.shape) != 4: + raise ValueError(f'input shape {x.shape} is not 4d') + if x.shape[1] != self.input_channels: + raise ValueError(f'input has {x.shape[1]} channels but model \ + built for {self.input_channels}') + if x.dtype != torch.float32: + raise ValueError('input must have dtype torch.float32') + + return self.blocks(x) + + +@MODELS.register_module() +class CAEPretrainViT(BEiTViT): + """Vision Transformer for CAE pre-training and the implementation is based + on BEiTViT. + + Args: + arch (str | dict): Vision Transformer architecture. Default: 'b' + img_size (int | tuple): Input image size + patch_size (int | tuple): The patch size + out_indices (Sequence | int): Output from which stages. + Defaults to -1, means the last stage. + drop_rate (float): Probability of an element to be zeroed. + Defaults to 0. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + bias (bool | str): The option to add leanable bias for q, k, v. If bias + is True, it will add leanable bias. If bias is 'qv_bias', it will + only add leanable bias for q, v. If bias is False, it will not add + bias for q, k, v. Default to 'qv_bias'. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + final_norm (bool): Whether to add a additional layer to normalize + final feature map. Defaults to True. + out_type (str): The type of output features. Please choose from + + - ``"cls_token"``: The class token tensor with shape (B, C). + - ``"featmap"``: The feature map tensor from the patch tokens + with shape (B, C, H, W). + - ``"avg_featmap"``: The global averaged feature map tensor + with shape (B, C). + - ``"raw"``: The raw feature tensor includes patch tokens and + class tokens with shape (B, L, C). + + It only works without input mask. Defaults to ``"avg_featmap"``. + interpolate_mode (str): Select the interpolate mode for position + embeding vector resize. Defaults to "bicubic". + layer_scale_init_value (float, optional): The init value of gamma in + BEiTTransformerEncoderLayer. + patch_cfg (dict): Configs of patch embeding. Defaults to an empty dict. + layer_cfgs (Sequence | dict): Configs of each transformer layer in + encoder. Defaults to an empty dict. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + + def __init__( + self, + arch: str = 'b', + img_size: int = 224, + patch_size: int = 16, + in_channels: int = 3, + out_indices: int = -1, + drop_rate: float = 0, + drop_path_rate: float = 0, + bias: bool = 'qv_bias', + norm_cfg: dict = dict(type='LN', eps=1e-6), + final_norm: bool = True, + out_type: str = 'raw', + frozen_stages: int = -1, + use_abs_pos_emb: bool = True, + use_rel_pos_bias: bool = False, + use_shared_rel_pos_bias: bool = False, + layer_scale_init_value: float = None, + interpolate_mode: str = 'bicubic', + patch_cfg: dict = dict(), + layer_cfgs: dict = dict(), + init_cfg: dict = [ + dict(type='Constant', val=1, layer=['LayerNorm']), + dict(type='TruncNormal', std=0.02, layer=['Conv2d']), + dict(type='Xavier', distribution='uniform', layer=['Linear']) + ] + ) -> None: + super().__init__( + arch=arch, + img_size=img_size, + patch_size=patch_size, + in_channels=in_channels, + out_indices=out_indices, + drop_rate=drop_rate, + drop_path_rate=drop_path_rate, + bias=bias, + norm_cfg=norm_cfg, + final_norm=final_norm, + out_type=out_type, + with_cls_token=True, + frozen_stages=frozen_stages, + use_abs_pos_emb=use_abs_pos_emb, + use_rel_pos_bias=use_rel_pos_bias, + use_shared_rel_pos_bias=use_shared_rel_pos_bias, + layer_scale_init_value=layer_scale_init_value, + interpolate_mode=interpolate_mode, + patch_cfg=patch_cfg, + layer_cfgs=layer_cfgs, + init_cfg=init_cfg) + self.pos_embed.requires_grad = False + self.num_patches = self.patch_resolution[0] * self.patch_resolution[1] + + def init_weights(self) -> None: + """Initialize position embedding, patch embedding and cls token.""" + super().init_weights() + if not (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # initialize position embedding in backbone + pos_embed = build_2d_sincos_position_embedding( + int(self.num_patches**.5), + self.pos_embed.shape[-1], + cls_token=True) + self.pos_embed.data.copy_(pos_embed.float()) + + trunc_normal_(self.cls_token, std=.02) + + def forward(self, x: torch.Tensor, + mask: Optional[torch.Tensor]) -> torch.Tensor: + """Generate features for masked images. + + This function generates mask images and get the hidden features for + visible patches. + + The function supports two kind of forward behaviors. If the ``mask`` is + not ``None``, the forward function will be executed as masked image + modeling pre-training; if the ``mask`` is ``None``, the forward + function will call ``super().forward()``, which extract features from + images without mask. + + Args: + x (torch.Tensor): Input images, which is of shape B x C x H x W. + mask (torch.Tensor, optional): Mask for input, which is of shape + B x L. + + Returns: + torch.Tensor: hidden features. + """ + if mask is None: + return super().forward(x) + + else: + x, _ = self.patch_embed(x) + batch_size, _, dim = x.size() + + cls_tokens = self.cls_token.expand(batch_size, -1, -1) + + # NOTE: unmasked embeddings + x_unmasked = x[~mask].reshape(batch_size, -1, dim) + x_unmasked = torch.cat((cls_tokens, x_unmasked), dim=1) + + pos_embed = self.pos_embed.expand(batch_size, self.num_patches + 1, + dim) + pos_embed_unmasked = pos_embed[:, 1:][~mask].reshape( + batch_size, -1, dim) + pos_embed_unmasked = torch.cat( + (pos_embed[:, :1], pos_embed_unmasked), dim=1) + x_unmasked = x_unmasked + pos_embed_unmasked + + x_unmasked = self.drop_after_pos(x_unmasked) + + for i, layer in enumerate(self.layers): + x_unmasked = layer(x=x_unmasked, rel_pos_bias=None) + + if i == len(self.layers) - 1 and self.final_norm: + x_unmasked = self.norm1(x_unmasked) + + return x_unmasked + + +@MODELS.register_module() +class CAE(BaseSelfSupervisor): + """CAE. + + Implementation of `Context Autoencoder for Self-Supervised Representation + Learning `_. + + Args: + backbone (dict): Config dict for module of backbone. + neck (dict): Config dict for module of neck. + head (dict): Config dict for module of head functions. + target_generator: (dict, optional): The target_generator module to + generate targets for self-supervised learning optimization, such as + HOG, extracted features from other modules(DALL-E, CLIP), etc. + base_momentum (float): The base momentum coefficient for the target + network. Defaults to 0.0. + data_preprocessor (dict, optional): The config for preprocessing + input data. If None or no specified type, it will use + "SelfSupDataPreprocessor" as type. + See :class:`SelfSupDataPreprocessor` for more details. + Defaults to None. + init_cfg (Union[List[dict], dict], optional): Config dict for weight + initialization. Defaults to None. + """ + + def __init__(self, + backbone: dict, + neck: dict, + head: dict, + target_generator: Optional[dict] = None, + base_momentum: float = 0.0, + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[Union[List[dict], dict]] = None) -> None: + super().__init__( + backbone=backbone, + neck=neck, + head=head, + target_generator=target_generator, + data_preprocessor=data_preprocessor, + init_cfg=init_cfg) + + self.momentum = base_momentum + self.teacher = MODELS.build(backbone) + + def init_weights(self) -> None: + """Initialize weights.""" + super().init_weights() + + # init the weights of teacher with those of backbone + for param_backbone, param_teacher in zip(self.backbone.parameters(), + self.teacher.parameters()): + param_teacher.detach() + param_teacher.data.copy_(param_backbone.data) + param_teacher.requires_grad = False + + def momentum_update(self) -> None: + """Momentum update of the teacher network.""" + for param_bacbone, param_teacher in zip(self.backbone.parameters(), + self.teacher.parameters()): + param_teacher.data = param_teacher.data * self.momentum + \ + param_bacbone.data * (1. - self.momentum) + + def extract_feat(self, inputs: torch.Tensor): + return self.backbone(inputs, mask=None) + + def loss(self, inputs: List[torch.Tensor], data_samples: List[DataSample], + **kwargs) -> Dict[str, torch.Tensor]: + """The forward function in training. + + Args: + inputs (List[torch.Tensor]): The input images. + data_samples (List[DataSample]): All elements required + during the forward function. + + Returns: + Dict[str, torch.Tensor]: A dictionary of loss components. + """ + mask = torch.stack([data_sample.mask for data_sample in data_samples]) + mask = mask.flatten(1).to(torch.bool) + + unmasked = self.backbone(inputs[0], mask) + + # get the latent prediction for the masked patches + with torch.no_grad(): + # inputs[0] is the prediction image + latent_target = self.teacher(inputs[0], ~mask) + latent_target = latent_target[:, 1:, :] + self.momentum_update() + + pos_embed = self.backbone.pos_embed.expand(inputs[0].shape[0], -1, -1) + pos_embed_masked = pos_embed[:, + 1:][mask].reshape(inputs[0].shape[0], -1, + pos_embed.shape[-1]) + pos_embed_unmasked = pos_embed[:, 1:][~mask].reshape( + inputs[0].shape[0], -1, pos_embed.shape[-1]) + + # input the unmasked tokens and masked tokens to the decoder + logits, latent_pred = self.neck(unmasked[:, 1:], pos_embed_masked, + pos_embed_unmasked) + + logits = logits.view(-1, logits.shape[-1]) + # inputs[1] is the target image + logits_target = self.target_generator(inputs[1]) + loss_main, loss_align = self.head.loss(logits, logits_target, + latent_pred, latent_target, + mask) + losses = dict() + + losses['loss'] = loss_main + loss_align + losses['main'] = loss_main + losses['align'] = loss_align + return losses diff --git a/mmpretrain/models/selfsup/densecl.py b/mmpretrain/models/selfsup/densecl.py new file mode 100644 index 0000000..c969af1 --- /dev/null +++ b/mmpretrain/models/selfsup/densecl.py @@ -0,0 +1,203 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional, Union + +import torch +import torch.nn as nn +from mmengine.dist import all_gather +from mmengine.model import ExponentialMovingAverage + +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample +from ..utils import batch_shuffle_ddp, batch_unshuffle_ddp +from .base import BaseSelfSupervisor + + +@MODELS.register_module() +class DenseCL(BaseSelfSupervisor): + """DenseCL. + + Implementation of `Dense Contrastive Learning for Self-Supervised Visual + Pre-Training `_. + Borrowed from the authors' code: ``_. + The loss_lambda warmup is in `engine/hooks/densecl_hook.py`. + + Args: + backbone (dict): Config dict for module of backbone. + neck (dict): Config dict for module of deep features to compact + feature vectors. + head (dict): Config dict for module of head functions. + queue_len (int): Number of negative keys maintained in the queue. + Defaults to 65536. + feat_dim (int): Dimension of compact feature vectors. Defaults to 128. + momentum (float): Momentum coefficient for the momentum-updated + encoder. Defaults to 0.999. + loss_lambda (float): Loss weight for the single and dense contrastive + loss. Defaults to 0.5. + pretrained (str, optional): The pretrained checkpoint path, support + local path and remote path. Defaults to None. + data_preprocessor (dict, optional): The config for preprocessing + input data. If None or no specified type, it will use + "SelfSupDataPreprocessor" as type. + See :class:`SelfSupDataPreprocessor` for more details. + Defaults to None. + init_cfg (Union[List[dict], dict], optional): Config dict for weight + initialization. Defaults to None. + """ + + def __init__(self, + backbone: dict, + neck: dict, + head: dict, + queue_len: int = 65536, + feat_dim: int = 128, + momentum: float = 0.001, + loss_lambda: float = 0.5, + pretrained: Optional[str] = None, + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[Union[List[dict], dict]] = None) -> None: + super().__init__( + backbone=backbone, + neck=neck, + head=head, + pretrained=pretrained, + data_preprocessor=data_preprocessor, + init_cfg=init_cfg) + + # create momentum model + self.encoder_k = ExponentialMovingAverage( + nn.Sequential(self.backbone, self.neck), momentum) + + self.queue_len = queue_len + self.loss_lambda = loss_lambda + + # create the queue + self.register_buffer('queue', torch.randn(feat_dim, queue_len)) + self.queue = nn.functional.normalize(self.queue, dim=0) + self.register_buffer('queue_ptr', torch.zeros(1, dtype=torch.long)) + + # create the second queue for dense output + self.register_buffer('queue2', torch.randn(feat_dim, queue_len)) + self.queue2 = nn.functional.normalize(self.queue2, dim=0) + self.register_buffer('queue2_ptr', torch.zeros(1, dtype=torch.long)) + + @torch.no_grad() + def _dequeue_and_enqueue(self, keys: torch.Tensor) -> None: + """Update queue.""" + # gather keys before updating queue + keys = torch.cat(all_gather(keys), dim=0) + + batch_size = keys.shape[0] + + ptr = int(self.queue_ptr) + assert self.queue_len % batch_size == 0 # for simplicity + + # replace the keys at ptr (dequeue and enqueue) + self.queue[:, ptr:ptr + batch_size] = keys.transpose(0, 1) + ptr = (ptr + batch_size) % self.queue_len # move pointer + + self.queue_ptr[0] = ptr + + @torch.no_grad() + def _dequeue_and_enqueue2(self, keys: torch.Tensor) -> None: + """Update queue2.""" + # gather keys before updating queue + keys = torch.cat(all_gather(keys), dim=0) + + batch_size = keys.shape[0] + + ptr = int(self.queue2_ptr) + assert self.queue_len % batch_size == 0 # for simplicity + + # replace the keys at ptr (dequeue and enqueue) + self.queue2[:, ptr:ptr + batch_size] = keys.transpose(0, 1) + ptr = (ptr + batch_size) % self.queue_len # move pointer + + self.queue2_ptr[0] = ptr + + def loss(self, inputs: List[torch.Tensor], data_samples: List[DataSample], + **kwargs) -> Dict[str, torch.Tensor]: + """The forward function in training. + + Args: + inputs (List[torch.Tensor]): The input images. + data_samples (List[DataSample]): All elements required + during the forward function. + + Returns: + Dict[str, torch.Tensor]: A dictionary of loss components. + """ + assert isinstance(inputs, list) + im_q = inputs[0] + im_k = inputs[1] + # compute query features + q_b = self.backbone(im_q) # backbone features + q, q_grid, q2 = self.neck(q_b) # queries: NxC; NxCxS^2 + q_b = q_b[0] + q_b = q_b.view(q_b.size(0), q_b.size(1), -1) + + q = nn.functional.normalize(q, dim=1) + q2 = nn.functional.normalize(q2, dim=1) + q_grid = nn.functional.normalize(q_grid, dim=1) + q_b = nn.functional.normalize(q_b, dim=1) + + # compute key features + with torch.no_grad(): # no gradient to keys + # update the key encoder + self.encoder_k.update_parameters( + nn.Sequential(self.backbone, self.neck)) + + # shuffle for making use of BN + im_k, idx_unshuffle = batch_shuffle_ddp(im_k) + + k_b = self.encoder_k.module[0](im_k) # backbone features + k, k_grid, k2 = self.encoder_k.module[1](k_b) # keys: NxC; NxCxS^2 + k_b = k_b[0] + k_b = k_b.view(k_b.size(0), k_b.size(1), -1) + + k = nn.functional.normalize(k, dim=1) + k2 = nn.functional.normalize(k2, dim=1) + k_grid = nn.functional.normalize(k_grid, dim=1) + k_b = nn.functional.normalize(k_b, dim=1) + + # undo shuffle + k = batch_unshuffle_ddp(k, idx_unshuffle) + k2 = batch_unshuffle_ddp(k2, idx_unshuffle) + k_grid = batch_unshuffle_ddp(k_grid, idx_unshuffle) + k_b = batch_unshuffle_ddp(k_b, idx_unshuffle) + + # compute logits + # Einstein sum is more intuitive + # positive logits: Nx1 + l_pos = torch.einsum('nc,nc->n', [q, k]).unsqueeze(-1) + # negative logits: NxK + l_neg = torch.einsum('nc,ck->nk', [q, self.queue.clone().detach()]) + + # feat point set sim + backbone_sim_matrix = torch.matmul(q_b.permute(0, 2, 1), k_b) + densecl_sim_ind = backbone_sim_matrix.max(dim=2)[1] # NxS^2 + + indexed_k_grid = torch.gather(k_grid, 2, + densecl_sim_ind.unsqueeze(1).expand( + -1, k_grid.size(1), -1)) # NxCxS^2 + densecl_sim_q = (q_grid * indexed_k_grid).sum(1) # NxS^2 + + # dense positive logits: NS^2X1 + l_pos_dense = densecl_sim_q.view(-1).unsqueeze(-1) + + q_grid = q_grid.permute(0, 2, 1) + q_grid = q_grid.reshape(-1, q_grid.size(2)) + # dense negative logits: NS^2xK + l_neg_dense = torch.einsum( + 'nc,ck->nk', [q_grid, self.queue2.clone().detach()]) + + loss_single = self.head.loss(l_pos, l_neg) + loss_dense = self.head.loss(l_pos_dense, l_neg_dense) + + losses = dict() + losses['loss_single'] = loss_single * (1 - self.loss_lambda) + losses['loss_dense'] = loss_dense * self.loss_lambda + + self._dequeue_and_enqueue(k) + self._dequeue_and_enqueue2(k2) + + return losses diff --git a/mmpretrain/models/selfsup/eva.py b/mmpretrain/models/selfsup/eva.py new file mode 100644 index 0000000..30779be --- /dev/null +++ b/mmpretrain/models/selfsup/eva.py @@ -0,0 +1,43 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List + +import torch + +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample +from .base import BaseSelfSupervisor + + +@MODELS.register_module() +class EVA(BaseSelfSupervisor): + """EVA. + + Implementation of `EVA: Exploring the Limits of Masked Visual + Representation Learning at Scale `_. + """ + + def extract_feat(self, inputs: torch.Tensor): + return self.backbone(inputs, mask=None) + + def loss(self, inputs: torch.Tensor, data_samples: List[DataSample], + **kwargs) -> Dict[str, torch.Tensor]: + """The forward function in training. + + Args: + inputs (torch.Tensor): The input images. + data_samples (List[DataSample]): All elements required + during the forward function. + + Returns: + Dict[str, torch.Tensor]: A dictionary of loss components. + """ + + clip_feature, _ = self.target_generator(inputs) + + latent, mask, ids_restore = self.backbone(inputs) + pred = self.neck(latent, ids_restore) + + clip_feature = clip_feature[:, 1:, :] + loss = self.head.loss(pred, clip_feature, mask) + losses = dict(loss=loss) + return losses diff --git a/mmpretrain/models/selfsup/itpn.py b/mmpretrain/models/selfsup/itpn.py new file mode 100644 index 0000000..488a996 --- /dev/null +++ b/mmpretrain/models/selfsup/itpn.py @@ -0,0 +1,359 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import Dict, List, Optional, Tuple + +import torch +import torch.nn as nn +from mmengine.model.weight_init import trunc_normal_ + +from mmpretrain.models.backbones.hivit import BlockWithRPE, HiViT, PatchMerge +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample +from ..utils import build_2d_sincos_position_embedding +from .base import BaseSelfSupervisor + + +@MODELS.register_module() +class iTPNHiViT(HiViT): + """HiViT for iTPN pre-training. + + Args: + img_size (int | tuple): Input image size. Defaults to 224. + patch_size (int | tuple): The patch size. Defaults to 16. + inner_patches (int): Inner patch. Defaults to 4. + stem_mlp_ratio (int): Ratio of MLP hidden dim to embedding dim + in the first two stages. Defaults to 3. + mlp_ratio (int): Ratio of MLP hidden dim to embedding dim in + the last stage. Defaults to 4. + qkv_bias (bool): Enable bias for qkv projections if True. + qk_scale (float): The number of divider after q@k. Default to None. + drop_rate (float): Probability of an element to be zeroed. + Defaults to 0. + attn_drop_rate (float): The drop out rate for attention output weights. + Defaults to 0. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + ape (bool): If True, add absolute position embedding to + the patch embedding. + rpe (bool): If True, add relative position embedding to + the patch embedding. + layer_scale_init_value (float): Layer-scale init values. Defaults to 0. + mask_ratio (bool): The ratio of total number of patches to be masked. + Defaults to 0.75. + reconstruction_type (str): The reconstruction of self-supervised + learning. Defaults to 'pixel'. + """ + + def __init__( + self, + arch='base', + img_size: int = 224, + patch_size: int = 16, + inner_patches: int = 4, + stem_mlp_ratio: int = 3., + mlp_ratio: int = 4., + qkv_bias: bool = True, + qk_scale: Optional[bool] = None, + drop_rate: float = 0.0, + attn_drop_rate: float = 0.0, + drop_path_rate: float = 0.0, + norm_cfg: dict = dict(type='LN', eps=1e-6), + ape: bool = True, + rpe: bool = False, + layer_scale_init_value: float = 0.0, + mask_ratio: float = 0.75, + reconstruction_type: str = 'pixel', + **kwargs, + ): + super().__init__( + arch=arch, + img_size=img_size, + patch_size=patch_size, + inner_patches=inner_patches, + stem_mlp_ratio=stem_mlp_ratio, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=drop_path_rate, + norm_cfg=norm_cfg, + ape=ape, + rpe=rpe, + layer_scale_init_value=layer_scale_init_value, + **kwargs, + ) + + self.pos_embed.requires_grad = False + self.mask_ratio = mask_ratio + + assert reconstruction_type in ['pixel', 'clip'], \ + 'iTPN method only support `pixel` and `clip`, ' \ + f'but got `{reconstruction_type}`.' + self.reconstruction_type = reconstruction_type + self.num_patches = self.patch_embed.num_patches + + if reconstruction_type == 'clip': + self.mask_token = nn.Parameter(torch.zeros(1, 1, self.embed_dims)) + + def init_weights(self) -> None: + """Initialize position embedding, patch embedding and cls token.""" + super().apply(self._init_weights) + + if self.reconstruction_type == 'clip': + trunc_normal_(self.mask_token, std=0.02) + self.rescale_init_weight() + else: + pos_embed = build_2d_sincos_position_embedding( + int(self.num_patches**.5), + self.pos_embed.shape[-1], + cls_token=False) + self.pos_embed.data.copy_(pos_embed.float()) + + w = self.patch_embed.proj.weight.data + torch.nn.init.xavier_uniform_(w.view([w.shape[0], -1])) + + def rescale_init_weight(self) -> None: + """Rescale the initialized weights.""" + + def rescale(param, layer_id): + param.div_(math.sqrt(2.0 * layer_id)) + + for layer_id, layer in enumerate(self.blocks): + if isinstance(layer, BlockWithRPE): + if layer.attn is not None: + rescale(layer.attn.proj.weight.data, layer_id + 1) + rescale(layer.mlp.fc2.weight.data, layer_id + 1) + + def masking_id(self, batch_size, mask_ratio): + N, L = batch_size, self.pos_embed.size(1) + len_keep = int(L * (1 - mask_ratio)) + + noise = torch.rand( + N, L, device=self.pos_embed.device) # noise in [0, 1] + + # sort noise for each sample + ids_shuffle = torch.argsort( + noise, dim=1) # ascend: small is keep, large is remove + ids_restore = torch.argsort(ids_shuffle, dim=1) + + # keep the first subset + ids_keep = ids_shuffle[:, :len_keep] + # generate the binary mask: 0 is keep, 1 is remove + mask = torch.ones([N, L], device=self.pos_embed.device) + mask[:, :ids_keep.size(1)] = 0 + # unshuffle to get the binary mask + mask = torch.gather(mask, dim=1, index=ids_restore) + + return ids_keep, ids_restore, mask + + def forward_pixel( + self, + x: torch.Tensor, + mask: Optional[bool] = True + ) -> Tuple[Tuple, torch.Tensor, torch.Tensor]: + """Generate features for masked images. + + The function supports two kind of forward behaviors. If the ``mask`` is + ``True``, the function will generate mask to masking some patches + randomly and get the hidden features for visible patches, which means + the function will be executed as masked imagemodeling pre-training; + if the ``mask`` is ``None`` or ``False``, the forward function will + call ``super().forward()``, which extract features from images without + mask. + + + Args: + x (torch.Tensor): Input images, which is of shape B x C x H x W. + mask (bool, optional): To indicate whether the forward function + generating ``mask`` or not. + + Returns: + Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: Hidden features, + mask and the ids to restore original image. + + - ``x`` (torch.Tensor): hidden features, which is of shape + B x (L * mask_ratio) x C. + - ``mask`` (torch.Tensor): mask used to mask image. + - ``ids_restore`` (torch.Tensor): ids to restore original image. + """ + if mask is None or False: + return super().forward(x) + + else: + B, C, H, W = x.shape + ids_keep, ids_restore, mask = self.masking_id(B, self.mask_ratio) + + x = self.patch_embed(x) + + x = torch.gather( + x, + dim=1, + index=ids_keep[:, :, None, None, + None].expand(-1, -1, *x.shape[2:])) + + outs = [] + for blk in self.blocks[:-self.num_main_blocks]: + if isinstance(blk, PatchMerge): + outs.append(x) + x = blk(x) + + x = x[..., 0, 0, :] + if self.ape: + pos_embed = self.interpolate_pos_encoding(x, H, W) + pos_embed = torch.gather( + pos_embed.expand(B, -1, -1), + dim=1, + index=ids_keep[:, :, None].expand(-1, -1, + pos_embed.shape[2]), + ) + x = x + pos_embed + x = self.pos_drop(x) + + for blk in self.blocks[-self.num_main_blocks:]: + x = blk(x) + + outs.append(x) + + return (tuple(outs), mask, ids_restore) + + def forward_clip(self, + x: torch.Tensor, + mask: Optional[bool] = True) -> Tuple: + """Generate features for masked images. + + The function supports two kind of forward behaviors. If the ``mask`` is + ``True``, the function will generate mask to masking some patches + randomly and get the hidden features for visible patches, which means + the function will be executed as masked imagemodeling pre-training; + if the ``mask`` is ``None`` or ``False``, the forward function will + call ``super().forward()``, which extract features from images without + mask. + + + Args: + x (torch.Tensor): Input images, which is of shape B x C x H x W. + mask (bool, optional): To indicate whether the forward function + generating ``mask`` or not. + + Returns: + Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: Hidden features, + mask and the ids to restore original image. + + - ``x`` (torch.Tensor): hidden features, which is of shape + B x (L * mask_ratio) x C. + - ``mask`` (torch.Tensor): mask used to mask image. + - ``ids_restore`` (torch.Tensor): ids to restore original image. + """ + if mask is None or False: + return super().forward(x) + + else: + B, C, H, W = x.shape + x = self.patch_embed(x) + + outs = [] + for blk in self.blocks[:-self.num_main_blocks]: + if isinstance(blk, PatchMerge): + outs.append(x) + x = blk(x) + + x = x[..., 0, 0, :] + B, L, _ = x.shape + mask_token = self.mask_token.expand(B, L, -1) + w = mask.flatten(1).unsqueeze(-1).type_as(mask_token) + x = x * (1. - w) + mask_token * w + + if self.ape: + pos_embed = self.interpolate_pos_encoding(x, H, W) + x = x + pos_embed + x = self.pos_drop(x) + + rpe_index = True if self.rpe else None + + for blk in self.blocks[-self.num_main_blocks:]: + x = blk(x, rpe_index) + + outs.append(x) + + return tuple(outs) + + def forward(self, x: torch.Tensor, mask: Optional[bool] = True) -> Tuple: + """Generate features for masked images. + + The function supports two kind of forward behaviors. If the ``mask`` is + ``True``, the function will generate mask to masking some patches + randomly and get the hidden features for visible patches, which means + the function will be executed as masked imagemodeling pre-training; + if the ``mask`` is ``None`` or ``False``, the forward function will + call ``super().forward()``, which extract features from images without + mask. + + + Args: + x (torch.Tensor): Input images, which is of shape B x C x H x W. + mask (bool, optional): To indicate whether the forward function + generating ``mask`` or not. + + Returns: + Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: Hidden features, + mask and the ids to restore original image. + + - ``x`` (torch.Tensor): hidden features, which is of shape + B x (L * mask_ratio) x C. + - ``mask`` (torch.Tensor): mask used to mask image. + - ``ids_restore`` (torch.Tensor): ids to restore original image. + """ + + if self.reconstruction_type == 'pixel': + return self.forward_pixel(x, mask) + return self.forward_clip(x, mask) + + +@MODELS.register_module() +class iTPN(BaseSelfSupervisor): + """iTPN. + + Implementation of `iTPN: Integrally Pre-Trained Transformer Pyramid + Networks `_. + """ + + def extract_feat(self, inputs: torch.Tensor): + return self.backbone(inputs, mask=None) + + def loss(self, inputs: torch.Tensor, data_samples: List[DataSample], + **kwargs) -> Dict[str, torch.Tensor]: + """The forward function in training. + + Args: + inputs (torch.Tensor): The input images. + data_samples (List[DataSample]): All elements required + during the forward function. + + Returns: + Dict[str, torch.Tensor]: A dictionary of loss components. + """ + + if self.backbone.reconstruction_type == 'pixel': + latent, mask, ids_restore = self.backbone(inputs) + pred = self.neck(latent, ids_restore) + + loss = self.head.loss(pred, inputs, mask) + else: + mask = torch.stack( + [data_sample.mask for data_sample in data_samples]) + + img_latent = self.backbone(inputs[0], mask) + + # inputs[1] is the target image + with torch.no_grad(): + target = self.target_generator(inputs[1])[0] + target = target.detach() + + # iTPN contains a neck module + feats = self.neck(img_latent) + loss = self.head.loss(feats, target[:, 1:, :], mask) + + losses = dict(loss=loss) + return losses diff --git a/mmpretrain/models/selfsup/mae.py b/mmpretrain/models/selfsup/mae.py new file mode 100644 index 0000000..01bc5bc --- /dev/null +++ b/mmpretrain/models/selfsup/mae.py @@ -0,0 +1,416 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional, Sequence, Tuple, Union + +import torch + +from mmpretrain.models import HiViT, VisionTransformer +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample +from ..utils import build_2d_sincos_position_embedding +from .base import BaseSelfSupervisor + + +@MODELS.register_module() +class MAEViT(VisionTransformer): + """Vision Transformer for MAE pre-training. + + A PyTorch implement of: `An Image is Worth 16x16 Words: Transformers + for Image Recognition at Scale `_. + This module implements the patch masking in MAE and initialize the + position embedding with sine-cosine position embedding. + + Args: + arch (str | dict): Vision Transformer architecture + Default: 'b' + img_size (int | tuple): Input image size + patch_size (int | tuple): The patch size + out_indices (Sequence | int): Output from which stages. + Defaults to -1, means the last stage. + drop_rate (float): Probability of an element to be zeroed. + Defaults to 0. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + final_norm (bool): Whether to add a additional layer to normalize + final feature map. Defaults to True. + out_type (str): The type of output features. Please choose from + + - ``"cls_token"``: The class token tensor with shape (B, C). + - ``"featmap"``: The feature map tensor from the patch tokens + with shape (B, C, H, W). + - ``"avg_featmap"``: The global averaged feature map tensor + with shape (B, C). + - ``"raw"``: The raw feature tensor includes patch tokens and + class tokens with shape (B, L, C). + + It only works without input mask. Defaults to ``"avg_featmap"``. + interpolate_mode (str): Select the interpolate mode for position + embeding vector resize. Defaults to "bicubic". + patch_cfg (dict): Configs of patch embeding. Defaults to an empty dict. + layer_cfgs (Sequence | dict): Configs of each transformer layer in + encoder. Defaults to an empty dict. + mask_ratio (bool): The ratio of total number of patches to be masked. + Defaults to 0.75. + init_cfg (Union[List[dict], dict], optional): Initialization config + dict. Defaults to None. + """ + + def __init__(self, + arch: Union[str, dict] = 'b', + img_size: int = 224, + patch_size: int = 16, + out_indices: Union[Sequence, int] = -1, + drop_rate: float = 0, + drop_path_rate: float = 0, + norm_cfg: dict = dict(type='LN', eps=1e-6), + final_norm: bool = True, + out_type: str = 'raw', + interpolate_mode: str = 'bicubic', + patch_cfg: dict = dict(), + layer_cfgs: dict = dict(), + mask_ratio: float = 0.75, + init_cfg: Optional[Union[List[dict], dict]] = None) -> None: + super().__init__( + arch=arch, + img_size=img_size, + patch_size=patch_size, + out_indices=out_indices, + drop_rate=drop_rate, + drop_path_rate=drop_path_rate, + norm_cfg=norm_cfg, + final_norm=final_norm, + out_type=out_type, + with_cls_token=True, + interpolate_mode=interpolate_mode, + patch_cfg=patch_cfg, + layer_cfgs=layer_cfgs, + init_cfg=init_cfg) + + # position embedding is not learnable during pretraining + self.pos_embed.requires_grad = False + self.mask_ratio = mask_ratio + self.num_patches = self.patch_resolution[0] * self.patch_resolution[1] + + def init_weights(self) -> None: + """Initialize position embedding, patch embedding and cls token.""" + super().init_weights() + pos_embed = build_2d_sincos_position_embedding( + int(self.num_patches**.5), + self.pos_embed.shape[-1], + cls_token=True) + self.pos_embed.data.copy_(pos_embed.float()) + + w = self.patch_embed.projection.weight.data + torch.nn.init.xavier_uniform_(w.view([w.shape[0], -1])) + + torch.nn.init.normal_(self.cls_token, std=.02) + + def random_masking( + self, + x: torch.Tensor, + mask_ratio: float = 0.75 + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Generate the mask for MAE Pre-training. + + Args: + x (torch.Tensor): Image with data augmentation applied, which is + of shape B x L x C. + mask_ratio (float): The mask ratio of total patches. + Defaults to 0.75. + + Returns: + Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: masked image, mask + and the ids to restore original image. + + - ``x_masked`` (torch.Tensor): masked image. + - ``mask`` (torch.Tensor): mask used to mask image. + - ``ids_restore`` (torch.Tensor): ids to restore original image. + """ + N, L, D = x.shape # batch, length, dim + len_keep = int(L * (1 - mask_ratio)) + + noise = torch.rand(N, L, device=x.device) # noise in [0, 1] + + # sort noise for each sample + ids_shuffle = torch.argsort( + noise, dim=1) # ascend: small is keep, large is remove + ids_restore = torch.argsort(ids_shuffle, dim=1) + + # keep the first subset + ids_keep = ids_shuffle[:, :len_keep] + x_masked = torch.gather( + x, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, D)) + + # generate the binary mask: 0 is keep, 1 is remove + mask = torch.ones([N, L], device=x.device) + mask[:, :len_keep] = 0 + # unshuffle to get the binary mask + mask = torch.gather(mask, dim=1, index=ids_restore) + + return x_masked, mask, ids_restore + + def forward( + self, + x: torch.Tensor, + mask: Optional[bool] = True + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Generate features for masked images. + + The function supports two kind of forward behaviors. If the ``mask`` is + ``True``, the function will generate mask to masking some patches + randomly and get the hidden features for visible patches, which means + the function will be executed as masked imagemodeling pre-training; + if the ``mask`` is ``None`` or ``False``, the forward function will + call ``super().forward()``, which extract features from images without + mask. + + + Args: + x (torch.Tensor): Input images, which is of shape B x C x H x W. + mask (bool, optional): To indicate whether the forward function + generating ``mask`` or not. + + Returns: + Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: Hidden features, + mask and the ids to restore original image. + + - ``x`` (torch.Tensor): hidden features, which is of shape + B x (L * mask_ratio) x C. + - ``mask`` (torch.Tensor): mask used to mask image. + - ``ids_restore`` (torch.Tensor): ids to restore original image. + """ + if mask is None or False: + return super().forward(x) + + else: + B = x.shape[0] + x = self.patch_embed(x)[0] + # add pos embed w/o cls token + x = x + self.pos_embed[:, 1:, :] + + # masking: length -> length * mask_ratio + x, mask, ids_restore = self.random_masking(x, self.mask_ratio) + + # append cls token + cls_token = self.cls_token + self.pos_embed[:, :1, :] + cls_tokens = cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + + for _, layer in enumerate(self.layers): + x = layer(x) + # Use final norm + x = self.norm1(x) + + return (x, mask, ids_restore) + + +@MODELS.register_module() +class MAE(BaseSelfSupervisor): + """MAE. + + Implementation of `Masked Autoencoders Are Scalable Vision Learners + `_. + """ + + def extract_feat(self, inputs: torch.Tensor): + return self.backbone(inputs, mask=None) + + def loss(self, inputs: torch.Tensor, data_samples: List[DataSample], + **kwargs) -> Dict[str, torch.Tensor]: + """The forward function in training. + + Args: + inputs (torch.Tensor): The input images. + data_samples (List[DataSample]): All elements required + during the forward function. + + Returns: + Dict[str, torch.Tensor]: A dictionary of loss components. + """ + # ids_restore: the same as that in original repo, which is used + # to recover the original order of tokens in decoder. + latent, mask, ids_restore = self.backbone(inputs) + pred = self.neck(latent, ids_restore) + loss = self.head.loss(pred, inputs, mask) + losses = dict(loss=loss) + return losses + + +@MODELS.register_module() +class MAEHiViT(HiViT): + """HiViT for MAE pre-training. + + A PyTorch implement of: `HiViT: A Simple and More Efficient Design + of Hierarchical Vision Transformer `_. + This module implements the patch masking in MAE and initialize the + position embedding with sine-cosine position embedding. + + Args: + arch (str | dict): Vision Transformer architecture + Default: 'b' + img_size (int | tuple): Input image size + patch_size (int | tuple): The patch size + Defaults to 4, to downsample 4x at the first stage + inner_patches (int): The inner patches within a token + Defaults to 4 + out_indices (Sequence | int): Output from which stages. + Defaults to -1, means the last stage. + drop_rate (float): Probability of an element to be zeroed. + Defaults to 0. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + ape (bool): the absolute position embedding + rpe (bool): the relative position embedding + Defaults to False + layer_scale_init_value (float): the layer scale init value + mask_ratio (bool): The ratio of total number of patches to be masked. + Defaults to 0.75. + init_cfg (Union[List[dict], dict], optional): Initialization config + dict. Defaults to None. + """ + + def __init__(self, + arch: Union[str, dict] = 'b', + img_size: int = 224, + patch_size: int = 16, + inner_patches: int = 4, + out_indices: Union[list, int] = [23], + drop_rate: float = 0.0, + drop_path_rate: float = 0.0, + norm_cfg: dict = dict(type='LN', eps=1e-6), + ape: bool = True, + rpe: bool = False, + layer_scale_init_value: float = 0.0, + mask_ratio: float = 0.75, + init_cfg: Optional[Union[List[dict], dict]] = None) -> None: + super().__init__( + arch=arch, + img_size=img_size, + patch_size=patch_size, + inner_patches=inner_patches, + out_indices=out_indices, + drop_rate=drop_rate, + drop_path_rate=drop_path_rate, + norm_cfg=norm_cfg, + ape=ape, + rpe=rpe, + layer_scale_init_value=layer_scale_init_value, + init_cfg=init_cfg) + + self.pos_embed.requires_grad = False + self.mask_ratio = mask_ratio + self.num_patches = self.patch_embed.num_patches + + def init_weights(self) -> None: + """Initialize position embedding, patch embedding.""" + super().apply(self._init_weights) + pos_embed = build_2d_sincos_position_embedding( + int(self.num_patches**.5), + self.pos_embed.shape[-1], + cls_token=False) + self.pos_embed.data.copy_(pos_embed.float()) + + w = self.patch_embed.proj.weight.data + torch.nn.init.xavier_uniform_(w.view([w.shape[0], -1])) + + def masking_id( + self, batch_size, + mask_ratio) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Generate the mask for MAE Pre-training. + + Args: + batch_size: The batch size of input data + mask_ratio: The mask ratio of total patches. + Defaults to 0.75. + + Returns: + Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: the ids + for the tokens retained, the ids to restore original image, + and the mask + """ + N, L = batch_size, self.pos_embed.size(1) + len_keep = int(L * (1 - mask_ratio)) + + noise = torch.rand( + N, L, device=self.pos_embed.device) # noise in [0, 1] + + # sort noise for each sample + ids_shuffle = torch.argsort( + noise, dim=1) # ascend: small is keep, large is remove + ids_restore = torch.argsort(ids_shuffle, dim=1) + + # keep the first subset + ids_keep = ids_shuffle[:, :len_keep] + # generate the binary mask: 0 is keep, 1 is remove + mask = torch.ones([N, L], device=self.pos_embed.device) + mask[:, :ids_keep.size(1)] = 0 + # unshuffle to get the binary mask + mask = torch.gather(mask, dim=1, index=ids_restore) + + return ids_keep, ids_restore, mask + + def forward( + self, + x: torch.Tensor, + mask: Optional[bool] = True + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Generate features for masked images. + + The function supports two kind of forward behaviors. If the ``mask`` is + ``True``, the function will generate mask to masking some patches + randomly and get the hidden features for visible patches, which means + the function will be executed as masked imagemodeling pre-training; + if the ``mask`` is ``None`` or ``False``, the forward function will + call ``super().forward()``, which extract features from images without + mask. + + + Args: + x (torch.Tensor): Input images, which is of shape B x C x H x W. + mask (bool, optional): To indicate whether the forward function + generating ``mask`` or not. + + Returns: + Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: Hidden features, + mask and the ids to restore original image. + + - ``x`` (torch.Tensor): hidden features, which is of shape + B x (L * mask_ratio) x C. + - ``mask`` (torch.Tensor): mask used to mask image. + - ``ids_restore`` (torch.Tensor): ids to restore original image. + """ + if mask is None or False: + return super().forward(x) + + else: + B, C, H, W = x.shape + ids_keep, ids_restore, mask = self.masking_id(B, self.mask_ratio) + + x = self.patch_embed(x) + + x = torch.gather( + x, + dim=1, + index=ids_keep[:, :, None, None, + None].expand(-1, -1, *x.shape[2:])) + + for blk in self.blocks[:-self.num_main_blocks]: + x = blk(x) + + x = x[..., 0, 0, :] + if self.ape: + pos_embed = self.interpolate_pos_encoding(x, H, W) + pos_embed = torch.gather( + pos_embed.expand(B, -1, -1), + dim=1, + index=ids_keep[:, :, None].expand(-1, -1, + pos_embed.shape[2]), + ) + x = x + pos_embed + x = self.pos_drop(x) + + for blk in self.blocks[-self.num_main_blocks:]: + x = blk(x) + + return (x, mask, ids_restore) diff --git a/mmpretrain/models/selfsup/maskfeat.py b/mmpretrain/models/selfsup/maskfeat.py new file mode 100644 index 0000000..fd9f0b2 --- /dev/null +++ b/mmpretrain/models/selfsup/maskfeat.py @@ -0,0 +1,336 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import Dict, List, Optional, Sequence, Union + +import cv2 +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmengine.model import BaseModule + +from mmpretrain.models import VisionTransformer +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample +from .base import BaseSelfSupervisor + + +@MODELS.register_module() +class HOGGenerator(BaseModule): + """Generate HOG feature for images. + + This module is used in MaskFeat to generate HOG feature. The code is + modified from file `slowfast/models/operators.py + `_. + Here is the link of `HOG wikipedia + `_. + + Args: + nbins (int): Number of bin. Defaults to 9. + pool (float): Number of cell. Defaults to 8. + gaussian_window (int): Size of gaussian kernel. Defaults to 16. + """ + + def __init__(self, + nbins: int = 9, + pool: int = 8, + gaussian_window: int = 16) -> None: + super().__init__() + self.nbins = nbins + self.pool = pool + self.pi = math.pi + weight_x = torch.FloatTensor([[1, 0, -1], [2, 0, -2], [1, 0, -1]]) + weight_x = weight_x.view(1, 1, 3, 3).repeat(3, 1, 1, 1).contiguous() + weight_y = weight_x.transpose(2, 3).contiguous() + self.register_buffer('weight_x', weight_x) + self.register_buffer('weight_y', weight_y) + + self.gaussian_window = gaussian_window + if gaussian_window: + gaussian_kernel = self.get_gaussian_kernel(gaussian_window, + gaussian_window // 2) + self.register_buffer('gaussian_kernel', gaussian_kernel) + + def get_gaussian_kernel(self, kernlen: int, std: int) -> torch.Tensor: + """Returns a 2D Gaussian kernel array.""" + + def _gaussian_fn(kernlen: int, std: int) -> torch.Tensor: + n = torch.arange(0, kernlen).float() + n -= n.mean() + n /= std + w = torch.exp(-0.5 * n**2) + return w + + kernel_1d = _gaussian_fn(kernlen, std) + kernel_2d = kernel_1d[:, None] * kernel_1d[None, :] + return kernel_2d / kernel_2d.sum() + + def _reshape(self, hog_feat: torch.Tensor) -> torch.Tensor: + """Reshape HOG Features for output.""" + hog_feat = hog_feat.flatten(1, 2) + self.unfold_size = hog_feat.shape[-1] // 14 + hog_feat = hog_feat.permute(0, 2, 3, 1) + hog_feat = hog_feat.unfold(1, self.unfold_size, + self.unfold_size).unfold( + 2, self.unfold_size, self.unfold_size) + hog_feat = hog_feat.flatten(1, 2).flatten(2) + return hog_feat + + @torch.no_grad() + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Generate hog feature for each batch images. + + Args: + x (torch.Tensor): Input images of shape (N, 3, H, W). + + Returns: + torch.Tensor: Hog features. + """ + # input is RGB image with shape [B 3 H W] + self.h, self.w = x.size(-2), x.size(-1) + x = F.pad(x, pad=(1, 1, 1, 1), mode='reflect') + gx_rgb = F.conv2d( + x, self.weight_x, bias=None, stride=1, padding=0, groups=3) + gy_rgb = F.conv2d( + x, self.weight_y, bias=None, stride=1, padding=0, groups=3) + norm_rgb = torch.stack([gx_rgb, gy_rgb], dim=-1).norm(dim=-1) + phase = torch.atan2(gx_rgb, gy_rgb) + phase = phase / self.pi * self.nbins # [-9, 9] + + b, c, h, w = norm_rgb.shape + out = torch.zeros((b, c, self.nbins, h, w), + dtype=torch.float, + device=x.device) + phase = phase.view(b, c, 1, h, w) + norm_rgb = norm_rgb.view(b, c, 1, h, w) + if self.gaussian_window: + if h != self.gaussian_window: + assert h % self.gaussian_window == 0, 'h {} gw {}'.format( + h, self.gaussian_window) + repeat_rate = h // self.gaussian_window + temp_gaussian_kernel = self.gaussian_kernel.repeat( + [repeat_rate, repeat_rate]) + else: + temp_gaussian_kernel = self.gaussian_kernel + norm_rgb *= temp_gaussian_kernel + + out.scatter_add_(2, phase.floor().long() % self.nbins, norm_rgb) + + out = out.unfold(3, self.pool, self.pool) + out = out.unfold(4, self.pool, self.pool) + out = out.sum(dim=[-1, -2]) + + self.out = F.normalize(out, p=2, dim=2) + + return self._reshape(self.out) + + def generate_hog_image(self, hog_out: torch.Tensor) -> np.ndarray: + """Generate HOG image according to HOG features.""" + assert hog_out.size(0) == 1 and hog_out.size(1) == 3, \ + 'Check the input batch size and the channcel number, only support'\ + '"batch_size = 1".' + hog_image = np.zeros([self.h, self.w]) + cell_gradient = np.array(hog_out.mean(dim=1).squeeze().detach().cpu()) + cell_width = self.pool / 2 + max_mag = np.array(cell_gradient).max() + angle_gap = 360 / self.nbins + + for x in range(cell_gradient.shape[1]): + for y in range(cell_gradient.shape[2]): + cell_grad = cell_gradient[:, x, y] + cell_grad /= max_mag + angle = 0 + for magnitude in cell_grad: + angle_radian = math.radians(angle) + x1 = int(x * self.pool + + magnitude * cell_width * math.cos(angle_radian)) + y1 = int(y * self.pool + + magnitude * cell_width * math.sin(angle_radian)) + x2 = int(x * self.pool - + magnitude * cell_width * math.cos(angle_radian)) + y2 = int(y * self.pool - + magnitude * cell_width * math.sin(angle_radian)) + magnitude = 0 if magnitude < 0 else magnitude + cv2.line(hog_image, (y1, x1), (y2, x2), + int(255 * math.sqrt(magnitude))) + angle += angle_gap + return hog_image + + +@MODELS.register_module() +class MaskFeatViT(VisionTransformer): + """Vision Transformer for MaskFeat pre-training. + + A PyTorch implement of: `Masked Feature Prediction for Self-Supervised + Visual Pre-Training `_. + + Args: + arch (str | dict): Vision Transformer architecture + Default: 'b' + img_size (int | tuple): Input image size + patch_size (int | tuple): The patch size + out_indices (Sequence | int): Output from which stages. + Defaults to -1, means the last stage. + drop_rate (float): Probability of an element to be zeroed. + Defaults to 0. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + final_norm (bool): Whether to add a additional layer to normalize + final feature map. Defaults to True. + out_type (str): The type of output features. Please choose from + + - ``"cls_token"``: The class token tensor with shape (B, C). + - ``"featmap"``: The feature map tensor from the patch tokens + with shape (B, C, H, W). + - ``"avg_featmap"``: The global averaged feature map tensor + with shape (B, C). + - ``"raw"``: The raw feature tensor includes patch tokens and + class tokens with shape (B, L, C). + + It only works without input mask. Defaults to ``"avg_featmap"``. + interpolate_mode (str): Select the interpolate mode for position + embeding vector resize. Defaults to "bicubic". + patch_cfg (dict): Configs of patch embeding. Defaults to an empty dict. + layer_cfgs (Sequence | dict): Configs of each transformer layer in + encoder. Defaults to an empty dict. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + arch: Union[str, dict] = 'b', + img_size: int = 224, + patch_size: int = 16, + out_indices: Union[Sequence, int] = -1, + drop_rate: float = 0, + drop_path_rate: float = 0, + norm_cfg: dict = dict(type='LN', eps=1e-6), + final_norm: bool = True, + out_type: str = 'raw', + interpolate_mode: str = 'bicubic', + patch_cfg: dict = dict(), + layer_cfgs: dict = dict(), + init_cfg: Optional[Union[List[dict], dict]] = None) -> None: + super().__init__( + arch=arch, + img_size=img_size, + patch_size=patch_size, + out_indices=out_indices, + drop_rate=drop_rate, + drop_path_rate=drop_path_rate, + norm_cfg=norm_cfg, + final_norm=final_norm, + out_type=out_type, + with_cls_token=True, + interpolate_mode=interpolate_mode, + patch_cfg=patch_cfg, + layer_cfgs=layer_cfgs, + init_cfg=init_cfg) + + self.mask_token = nn.parameter.Parameter( + torch.zeros(1, 1, self.embed_dims), requires_grad=True) + self.num_patches = self.patch_resolution[0] * self.patch_resolution[1] + + def init_weights(self) -> None: + """Initialize position embedding, mask token and cls token.""" + super().init_weights() + if not (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + + nn.init.trunc_normal_(self.cls_token, std=.02) + nn.init.trunc_normal_(self.mask_token, std=.02) + nn.init.trunc_normal_(self.pos_embed, std=.02) + + self.apply(self._init_weights) + + def _init_weights(self, m: torch.nn.Module) -> None: + if isinstance(m, (nn.Linear, nn.Conv2d, nn.Conv3d)): + nn.init.trunc_normal_(m.weight, std=0.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def forward(self, x: torch.Tensor, + mask: Optional[torch.Tensor]) -> torch.Tensor: + """Generate features for masked images. + + The function supports two kind of forward behaviors. If the ``mask`` is + not ``None``, the forward function will be executed as masked image + modeling pre-training; if the ``mask`` is ``None``, the forward + function will call ``super().forward()``, which extract features from + images without mask. + + Args: + x (torch.Tensor): Input images. + mask (torch.Tensor, optional): Input masks. + + Returns: + torch.Tensor: Features with cls_tokens. + """ + if mask is None: + return super().forward(x) + + else: + B = x.shape[0] + x = self.patch_embed(x)[0] + + # masking: length -> length * mask_ratio + B, L, _ = x.shape + mask_tokens = self.mask_token.expand(B, L, -1) + mask = mask.unsqueeze(-1) + x = x * (1 - mask.int()) + mask_tokens * mask + + # append cls token + cls_tokens = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + x = x + self.pos_embed + x = self.drop_after_pos(x) + + for i, layer in enumerate(self.layers): + x = layer(x) + + if i == len(self.layers) - 1 and self.final_norm: + x = self.norm1(x) + + return x + + +@MODELS.register_module() +class MaskFeat(BaseSelfSupervisor): + """MaskFeat. + + Implementation of `Masked Feature Prediction for Self-Supervised Visual + Pre-Training `_. + """ + + def extract_feat(self, inputs: torch.Tensor): + return self.backbone(inputs, mask=None) + + def loss(self, inputs: torch.Tensor, data_samples: List[DataSample], + **kwargs) -> Dict[str, torch.Tensor]: + """The forward function in training. + + Args: + inputs (torch.Tensor): The input images. + data_samples (List[DataSample]): All elements required + during the forward function. + + Returns: + Dict[str, torch.Tensor]: A dictionary of loss components. + """ + mask = torch.stack([data_sample.mask for data_sample in data_samples]) + mask = mask.flatten(1).bool() + + latent = self.backbone(inputs, mask) + B, L, C = latent.shape + pred = self.neck((latent.view(B * L, C), )) + pred = pred[0].view(B, L, -1) + hog = self.target_generator(inputs) + + # remove cls_token before compute loss + loss = self.head.loss(pred[:, 1:], hog, mask) + losses = dict(loss=loss) + return losses diff --git a/mmpretrain/models/selfsup/mff.py b/mmpretrain/models/selfsup/mff.py new file mode 100644 index 0000000..2685058 --- /dev/null +++ b/mmpretrain/models/selfsup/mff.py @@ -0,0 +1,194 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional, Sequence, Tuple, Union + +import torch +import torch.nn.functional as F + +from mmpretrain.models.selfsup.mae import MAE, MAEViT +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample + + +@MODELS.register_module() +class MFFViT(MAEViT): + """Vision Transformer for MFF Pretraining. + + This class inherits all these functionalities from ``MAEViT``, and + add multi-level feature fusion to it. For more details, you can + refer to `Improving Pixel-based MIM by Reducing Wasted Modeling + Capability`. + + Args: + arch (str | dict): Vision Transformer architecture + Default: 'b' + img_size (int | tuple): Input image size + patch_size (int | tuple): The patch size + out_indices (Sequence | int): Output from which stages. + Defaults to -1, means the last stage. + drop_rate (float): Probability of an element to be zeroed. + Defaults to 0. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + final_norm (bool): Whether to add a additional layer to normalize + final feature map. Defaults to True. + out_type (str): The type of output features. Please choose from + + - ``"cls_token"``: The class token tensor with shape (B, C). + - ``"featmap"``: The feature map tensor from the patch tokens + with shape (B, C, H, W). + - ``"avg_featmap"``: The global averaged feature map tensor + with shape (B, C). + - ``"raw"``: The raw feature tensor includes patch tokens and + class tokens with shape (B, L, C). + + It only works without input mask. Defaults to ``"avg_featmap"``. + interpolate_mode (str): Select the interpolate mode for position + embeding vector resize. Defaults to "bicubic". + patch_cfg (dict): Configs of patch embeding. Defaults to an empty dict. + layer_cfgs (Sequence | dict): Configs of each transformer layer in + encoder. Defaults to an empty dict. + mask_ratio (bool): The ratio of total number of patches to be masked. + Defaults to 0.75. + init_cfg (Union[List[dict], dict], optional): Initialization config + dict. Defaults to None. + """ + + def __init__(self, + arch: Union[str, dict] = 'b', + img_size: int = 224, + patch_size: int = 16, + out_indices: Union[Sequence, int] = -1, + drop_rate: float = 0, + drop_path_rate: float = 0, + norm_cfg: dict = dict(type='LN', eps=1e-6), + final_norm: bool = True, + out_type: str = 'raw', + interpolate_mode: str = 'bicubic', + patch_cfg: dict = dict(), + layer_cfgs: dict = dict(), + mask_ratio: float = 0.75, + init_cfg: Optional[Union[List[dict], dict]] = None) -> None: + super().__init__( + arch=arch, + img_size=img_size, + patch_size=patch_size, + out_indices=out_indices, + drop_rate=drop_rate, + drop_path_rate=drop_path_rate, + norm_cfg=norm_cfg, + final_norm=final_norm, + out_type=out_type, + interpolate_mode=interpolate_mode, + patch_cfg=patch_cfg, + layer_cfgs=layer_cfgs, + mask_ratio=mask_ratio, + init_cfg=init_cfg) + proj_layers = [ + torch.nn.Linear(self.embed_dims, self.embed_dims) + for _ in range(len(self.out_indices) - 1) + ] + self.proj_layers = torch.nn.ModuleList(proj_layers) + self.proj_weights = torch.nn.Parameter( + torch.ones(len(self.out_indices)).view(-1, 1, 1, 1)) + if len(self.out_indices) == 1: + self.proj_weights.requires_grad = False + + def forward( + self, + x: torch.Tensor, + mask: Optional[bool] = True + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Generate features for masked images. + + The function supports two kind of forward behaviors. If the ``mask`` is + ``True``, the function will generate mask to masking some patches + randomly and get the hidden features for visible patches, which means + the function will be executed as masked imagemodeling pre-training; + if the ``mask`` is ``None`` or ``False``, the forward function will + call ``super().forward()``, which extract features from images without + mask. + + + Args: + x (torch.Tensor): Input images, which is of shape B x C x H x W. + mask (bool, optional): To indicate whether the forward function + generating ``mask`` or not. + + Returns: + Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: Hidden features, + mask and the ids to restore original image. + + - ``x`` (torch.Tensor): hidden features, which is of shape + B x (L * mask_ratio) x C. + - ``mask`` (torch.Tensor): mask used to mask image. + - ``ids_restore`` (torch.Tensor): ids to restore original image. + """ + if mask is None or False: + return super().forward(x) + + else: + B = x.shape[0] + x = self.patch_embed(x)[0] + # add pos embed w/o cls token + x = x + self.pos_embed[:, 1:, :] + + # masking: length -> length * mask_ratio + x, mask, ids_restore = self.random_masking(x, self.mask_ratio) + + # append cls token + cls_token = self.cls_token + self.pos_embed[:, :1, :] + cls_tokens = cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + + res = [] + for i, layer in enumerate(self.layers): + x = layer(x) + if i in self.out_indices: + if i != self.out_indices[-1]: + proj_x = self.proj_layers[self.out_indices.index(i)](x) + else: + proj_x = x + res.append(proj_x) + res = torch.stack(res) + proj_weights = F.softmax(self.proj_weights, dim=0) + res = res * proj_weights + res = res.sum(dim=0) + + # Use final norm + x = self.norm1(res) + return (x, mask, ids_restore, proj_weights.view(-1)) + + +@MODELS.register_module() +class MFF(MAE): + """MFF. + + Implementation of `Improving Pixel-based MIM by Reducing Wasted Modeling + Capability`. + """ + + def loss(self, inputs: torch.Tensor, data_samples: List[DataSample], + **kwargs) -> Dict[str, torch.Tensor]: + """The forward function in training. + + Args: + inputs (torch.Tensor): The input images. + data_samples (List[DataSample]): All elements required + during the forward function. + + Returns: + Dict[str, torch.Tensor]: A dictionary of loss components. + """ + # ids_restore: the same as that in original repo, which is used + # to recover the original order of tokens in decoder. + latent, mask, ids_restore, weights = self.backbone(inputs) + pred = self.neck(latent, ids_restore) + loss = self.head.loss(pred, inputs, mask) + weight_params = { + f'weight_{i}': weights[i] + for i in range(weights.size(0)) + } + losses = dict(loss=loss) + losses.update(weight_params) + return losses diff --git a/mmpretrain/models/selfsup/milan.py b/mmpretrain/models/selfsup/milan.py new file mode 100644 index 0000000..fdf8673 --- /dev/null +++ b/mmpretrain/models/selfsup/milan.py @@ -0,0 +1,202 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional, Tuple + +import torch +import torch.nn as nn +from mmengine.runner.checkpoint import _load_checkpoint + +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample +from ..utils import build_clip_model +from .base import BaseSelfSupervisor +from .mae import MAEViT + + +@MODELS.register_module() +class CLIPGenerator(nn.Module): + """Get the features and attention from the last layer of CLIP. + + This module is used to generate target features in masked image modeling. + + Args: + tokenizer_path (str): The path of the checkpoint of CLIP. + """ + + def __init__(self, tokenizer_path: str) -> None: + super().__init__() + self.tokenizer_path = tokenizer_path + self.tokenizer = build_clip_model( + _load_checkpoint(self.tokenizer_path), False) + + @torch.no_grad() + def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """Get the features and attention from the last layer of CLIP. + + Args: + x (torch.Tensor): The input image, which is of shape (N, 3, H, W). + + Returns: + Tuple[torch.Tensor, torch.Tensor]: The features and attention from + the last layer of CLIP, which are of shape (N, L, C) and (N, L, L), + respectively. + """ + # use the visual branch of CLIP to get the features + assert self.tokenizer is not None, 'Please check whether the ' \ + '`self.tokenizer` is initialized correctly.' + + clip_features = self.tokenizer.encode_image(x) + return clip_features + + +@MODELS.register_module() +class MILANViT(MAEViT): + """Vision Transformer for MILAN pre-training. + + Implementation of the encoder for `MILAN: Masked Image Pretraining on + Language Assisted Representation `_. + + This module inherits from MAEViT and only overrides the forward function + and replace random masking with attention masking. + """ + + def attention_masking( + self, x: torch.Tensor, mask_ratio: float, importance: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + """Generate attention mask for MILAN. + + This is what is different from MAEViT, which uses random masking. + Attention masking generates attention mask for MILAN, according to + importance. The higher the importance, the more likely the patch is + kept. + + Args: + x (torch.Tensor): Input images, which is of shape B x L x C. + mask_ratio (float): The ratio of patches to be masked. + importance (torch.Tensor): Importance of each patch, which is of + shape B x L. + + Returns: + Tuple[torch.Tensor, ...]: + + - ``x_masked``: masked image + - ``ids_restore``: the ids to restore original image + - ``ids_keep``: ids of the kept patches + - ``ids_dump``: ids of the removed patches + """ + N, L, D = x.shape # batch, length, dim + len_keep = int(L * (1 - mask_ratio)) + + noise = importance.to(x.device) # large is keep, small is remove + + # sort noise for each sample + ids_shuffle = torch.multinomial(noise, L, replacement=False) + ids_restore = torch.argsort(ids_shuffle, dim=1) + + # keep the first subset + ids_keep = ids_shuffle[:, :len_keep] + ids_dump = ids_shuffle[:, len_keep:] + x_masked = torch.gather( + x, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, D)) + + # generate the binary mask: 0 is keep, 1 is remove + mask = torch.ones([N, L], device=x.device) + mask[:, :len_keep] = 0 + # unshuffle to get the binary mask + mask = torch.gather(mask, dim=1, index=ids_restore) + + return x_masked, ids_restore, ids_keep, ids_dump + + def forward( + self, + x: torch.Tensor, + importance: Optional[torch.Tensor], + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + """Generate features for masked images. + + The function supports two kind of forward behaviors. If the + ``importance`` is ``None``, the function generates mask and masks some + patches randomly and get the hidden features for visible patches. The + mask is generated by importance. The higher the importance, the more + likely the patch is kept. The importance is calculated by CLIP. + The higher the CLIP score, the more likely the patch is kept. The CLIP + score is calculated by cross attention between the class token and all + other tokens from the last layer. + If the ``importance`` is ``torch.Tensor``, the forward function will + call ``super().forward()``, which extract features from images without + mask. + + Args: + x (torch.Tensor): Input images, which is of shape B x C x H x W. + importance (torch.Tensor, optional): Importance of each patch, + which is of shape B x L. + + Returns: + Tuple[torch.Tensor, ...]: masked image, the ids to restore original + image, ids of the kept patches, ids of the removed patches. + + - ``x`` (torch.Tensor): hidden features, which is of shape + B x (L * mask_ratio) x C. + - ``ids_restore`` (torch.Tensor): ids to restore original image. + - ``ids_keep`` (torch.Tensor): ids of the kept patches. + - ``ids_dump`` (torch.Tensor): ids of the removed patches. + """ + if importance is None: + return super(MAEViT, self).forward(x) + + else: + B = x.shape[0] + x = self.patch_embed(x)[0] + # add pos embed w/o cls token + x = x + self.pos_embed[:, 1:, :] + + # masking: length -> length * mask_ratio + x, ids_restore, ids_keep, ids_dump = self.attention_masking( + x, self.mask_ratio, importance) + + # append cls token + cls_token = self.cls_token + self.pos_embed[:, :1, :] + cls_tokens = cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + + for _, layer in enumerate(self.layers): + x = layer(x) + # Use final norm + x = self.norm1(x) + + return x, ids_restore, ids_keep, ids_dump + + +@MODELS.register_module() +class MILAN(BaseSelfSupervisor): + """MILAN. + + Implementation of `MILAN: Masked Image Pretraining on Language Assisted + Representation `_. + """ + + def extract_feat(self, inputs: torch.Tensor): + return self.backbone(inputs, importance=None) + + def loss(self, inputs: torch.Tensor, data_samples: List[DataSample], + **kwargs) -> Dict[str, torch.Tensor]: + """The forward function in training. + + Args: + inputs (torch.Tensor): The input images. + data_samples (List[DataSample]): All elements required + during the forward function. + + Returns: + Dict[str, torch.Tensor]: A dictionary of loss components. + """ + # ids_restore: the same as that in original repo, which is used + # to recover the original order of tokens in decoder. + clip_feature, importance = self.target_generator(inputs) + importance = importance[:, 0, 1:] + latent, ids_restore, ids_keep, ids_dump = self.backbone( + inputs, importance) + pred = self.neck(latent, ids_restore, ids_keep, ids_dump) + + loss = self.head.loss(pred, clip_feature) + losses = dict(loss=loss) + return losses diff --git a/mmpretrain/models/selfsup/mixmim.py b/mmpretrain/models/selfsup/mixmim.py new file mode 100644 index 0000000..b202f83 --- /dev/null +++ b/mmpretrain/models/selfsup/mixmim.py @@ -0,0 +1,263 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import random +from typing import Dict, List, Optional, Tuple, Union + +import torch +from torch import nn +from torch.nn import functional as F + +from mmpretrain.models.backbones import MixMIMTransformer +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample +from ..utils import build_2d_sincos_position_embedding +from .base import BaseSelfSupervisor + + +@MODELS.register_module() +class MixMIMPretrainTransformer(MixMIMTransformer): + """MixMIM backbone for MixMIM pre-training. + + A PyTorch implement of : ` MixMIM: Mixed and Masked Image + Modeling for Efficient Visual Representation Learning + `_ + + Args: + arch (str | dict): MixMIM architecture. If use string, + choose from 'base','large' and 'huge'. + If use dict, it should have below keys: + + - **embed_dims** (int): The dimensions of embedding. + - **depths** (int): The number of transformer encoder layers. + - **num_heads** (int): The number of heads in attention modules. + + Defaults to 'base'. + mlp_ratio (int): The mlp ratio in FFN. Defaults to 4. + img_size (int | tuple): The expected input image shape. Because we + support dynamic input shape, just set the argument to mlp_ratio + the most common input image shape. Defaults to 224. + patch_size (int | tuple): The patch size in patch embedding. + Defaults to 16. + in_channels (int): The num of input channels. Defaults to 3. + window_size (list): The height and width of the window. + qkv_bias (bool): Whether to add bias for qkv in attention modules. + Defaults to True. + patch_cfg (dict): Extra config dict for patch embedding. + Defaults to an empty dict. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + drop_rate (float): Probability of an element to be zeroed. + Defaults to 0. + drop_path_rate (float): Stochastic depth rate. Defaults to 0. + attn_drop_rate (float): Attention drop rate. Defaults to 0. + use_checkpoint (bool): Whether use the checkpoint to reduce GPU memory + cost. Defaults to False. + mask_ratio (bool): The base ratio of total number of patches to be + masked. Defaults to 0.5. + range_mask_ratio (float): The range of mask ratio. + Defaults to 0. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + arch: Union[str, dict] = 'base', + mlp_ratio: float = 4, + img_size: int = 224, + patch_size: int = 4, + in_channels: int = 3, + window_size: List = [14, 14, 14, 7], + qkv_bias: bool = True, + patch_cfg: dict = dict(), + norm_cfg: dict = dict(type='LN'), + drop_rate: float = 0.0, + drop_path_rate: float = 0.0, + attn_drop_rate: float = 0.0, + use_checkpoint: bool = False, + mask_ratio: float = 0.5, + range_mask_ratio: float = 0.0, + init_cfg: Optional[dict] = None) -> None: + + super().__init__( + arch=arch, + mlp_ratio=mlp_ratio, + img_size=img_size, + patch_size=patch_size, + in_channels=in_channels, + window_size=window_size, + qkv_bias=qkv_bias, + patch_cfg=patch_cfg, + norm_cfg=norm_cfg, + drop_rate=drop_rate, + drop_path_rate=drop_path_rate, + attn_drop_rate=attn_drop_rate, + use_checkpoint=use_checkpoint, + init_cfg=init_cfg) + + self.mask_ratio = mask_ratio + self.range_mask_ratio = range_mask_ratio + + def init_weights(self): + """Initialize position embedding, patch embedding.""" + super(MixMIMTransformer, self).init_weights() + + pos_embed = build_2d_sincos_position_embedding( + int(self.num_patches**.5), + self.absolute_pos_embed.shape[-1], + cls_token=False) + self.absolute_pos_embed.data.copy_(pos_embed.float()) + + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + # we use xavier_uniform following official JAX ViT: + torch.nn.init.xavier_uniform_(m.weight) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def random_masking(self, + x: torch.Tensor, + mask_ratio: float = 0.5) -> Tuple[torch.Tensor]: + """Generate the mask for MixMIM Pretraining. + + Args: + x (torch.Tensor): Image with data augmentation applied, which is + of shape B x L x C. + mask_ratio (float): The mask ratio of total patches. + Defaults to 0.5. + + Returns: + Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + - mask_s1 (torch.Tensor): mask with stride of + self.encoder_stride // 8. + - mask_s2 (torch.Tensor): mask with stride of + self.encoder_stride // 4. + - mask_s3 (torch.Tensor): mask with stride of + self.encoder_stride // 2. + - mask (torch.Tensor): mask with stride of + self.encoder_stride. + """ + + B, C, H, W = x.shape + out_H = H // self.encoder_stride + out_W = W // self.encoder_stride + s3_H, s3_W = out_H * 2, out_W * 2 + s2_H, s2_W = out_H * 4, out_W * 4 + s1_H, s1_W = out_H * 8, out_W * 8 + + seq_l = out_H * out_W + # use a shared mask for a batch images + mask = torch.zeros([1, 1, seq_l], device=x.device) + + mask_ratio = mask_ratio + random.uniform(0.0, self.range_mask_ratio) + noise = torch.rand(1, 1, seq_l, device=x.device) # noise in [0, 1] + # ascend: small is keep, large is removed + mask_idx = torch.argsort(noise, dim=2)[:, :, :int(seq_l * mask_ratio)] + mask.scatter_(2, mask_idx, 1) + mask = mask.reshape(1, 1, out_H, out_W) + mask_s1 = F.interpolate(mask, size=(s1_H, s1_W), mode='nearest') + mask_s2 = F.interpolate(mask, size=(s2_H, s2_W), mode='nearest') + mask_s3 = F.interpolate(mask, size=(s3_H, s3_W), mode='nearest') + + mask = mask.reshape(1, out_H * out_W, 1).contiguous() + mask_s1 = mask_s1.reshape(1, s1_H * s1_W, 1).contiguous() + mask_s2 = mask_s2.reshape(1, s2_H * s2_W, 1).contiguous() + mask_s3 = mask_s3.reshape(1, s3_H * s3_W, 1).contiguous() + + return mask_s1, mask_s2, mask_s3, mask + + def forward(self, + x: torch.Tensor, + mask: Optional[bool] = True) -> Tuple[torch.Tensor]: + """Generate features for masked images. + + This function generates mask and masks some patches randomly and get + the hidden features for visible patches. + + Args: + x (torch.Tensor): Input images, which is of shape B x C x H x W. + mask (bool, optional): To indicate whether the forward containing + ``mask`` or not. + + Returns: + Tuple[torch.Tensor, torch.Tensor]: + - x (torch.Tensor): hidden features, which is of shape + B x L x C. + - mask_s4 (torch.Tensor): the mask tensor for the last layer. + """ + if mask is None or False: + return super().forward(x) + + else: + mask_s1, mask_s2, mask_s3, mask_s4 = self.random_masking( + x, self.mask_ratio) + + x, _ = self.patch_embed(x) + + x = x * (1. - mask_s1) + x.flip(0) * mask_s1 + x = x + self.absolute_pos_embed + x = self.drop_after_pos(x) + + for idx, layer in enumerate(self.layers): + if idx == 0: + x = layer(x, attn_mask=mask_s1) + elif idx == 1: + x = layer(x, attn_mask=mask_s2) + elif idx == 2: + x = layer(x, attn_mask=mask_s3) + elif idx == 3: + x = layer(x, attn_mask=mask_s4) + + x = self.norm(x) + + return x, mask_s4 + + +@MODELS.register_module() +class MixMIM(BaseSelfSupervisor): + """MixMIM. + + Implementation of `MixMIM: Mixed and Masked Image Modeling for Efficient + Visual Representation Learning. `_. + """ + + def __init__(self, + backbone: dict, + neck: Optional[dict] = None, + head: Optional[dict] = None, + pretrained: Optional[str] = None, + data_preprocessor: Optional[Union[dict, nn.Module]] = None, + init_cfg: Optional[dict] = None): + + head.update(dict(patch_size=neck['encoder_stride'])) + super().__init__( + backbone=backbone, + neck=neck, + head=head, + pretrained=pretrained, + data_preprocessor=data_preprocessor, + init_cfg=init_cfg) + + def extract_feat(self, inputs: torch.Tensor): + return self.backbone(inputs, mask=None) + + def loss(self, inputs: torch.Tensor, data_samples: List[DataSample], + **kwargs) -> Dict[str, torch.Tensor]: + """The forward function in training. + + Args: + inputs (torch.Tensor): The input images. + data_samples (List[DataSample]): All elements required + during the forward function. + + Returns: + Dict[str, torch.Tensor]: A dictionary of loss components. + """ + latent, mask = self.backbone(inputs) + x_rec = self.neck(latent, mask) + loss = self.head.loss(x_rec, inputs, mask) + losses = dict(loss=loss) + return losses diff --git a/mmpretrain/models/selfsup/moco.py b/mmpretrain/models/selfsup/moco.py new file mode 100644 index 0000000..7ff4cf8 --- /dev/null +++ b/mmpretrain/models/selfsup/moco.py @@ -0,0 +1,137 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional, Union + +import torch +import torch.nn as nn +from mmengine.dist import all_gather +from mmengine.model import ExponentialMovingAverage + +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample +from ..utils import batch_shuffle_ddp, batch_unshuffle_ddp +from .base import BaseSelfSupervisor + + +@MODELS.register_module() +class MoCo(BaseSelfSupervisor): + """MoCo. + + Implementation of `Momentum Contrast for Unsupervised Visual + Representation Learning `_. + Part of the code is borrowed from: + ``_. + + Args: + backbone (dict): Config dict for module of backbone. + neck (dict): Config dict for module of deep features to compact feature + vectors. + head (dict): Config dict for module of head functions. + queue_len (int): Number of negative keys maintained in the + queue. Defaults to 65536. + feat_dim (int): Dimension of compact feature vectors. + Defaults to 128. + momentum (float): Momentum coefficient for the momentum-updated + encoder. Defaults to 0.001. + pretrained (str, optional): The pretrained checkpoint path, support + local path and remote path. Defaults to None. + data_preprocessor (dict, optional): The config for preprocessing + input data. If None or no specified type, it will use + "SelfSupDataPreprocessor" as type. + See :class:`SelfSupDataPreprocessor` for more details. + Defaults to None. + init_cfg (Union[List[dict], dict], optional): Config dict for weight + initialization. Defaults to None. + """ + + def __init__(self, + backbone: dict, + neck: dict, + head: dict, + queue_len: int = 65536, + feat_dim: int = 128, + momentum: float = 0.001, + pretrained: Optional[str] = None, + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[Union[List[dict], dict]] = None) -> None: + super().__init__( + backbone=backbone, + neck=neck, + head=head, + pretrained=pretrained, + data_preprocessor=data_preprocessor, + init_cfg=init_cfg) + + # create momentum model + self.encoder_k = ExponentialMovingAverage( + nn.Sequential(self.backbone, self.neck), momentum) + + # create the queue + self.queue_len = queue_len + self.register_buffer('queue', torch.randn(feat_dim, queue_len)) + self.queue = nn.functional.normalize(self.queue, dim=0) + self.register_buffer('queue_ptr', torch.zeros(1, dtype=torch.long)) + + @torch.no_grad() + def _dequeue_and_enqueue(self, keys: torch.Tensor) -> None: + """Update queue.""" + # gather keys before updating queue + keys = torch.cat(all_gather(keys), dim=0) + + batch_size = keys.shape[0] + + ptr = int(self.queue_ptr) + assert self.queue_len % batch_size == 0 # for simplicity + + # replace the keys at ptr (dequeue and enqueue) + self.queue[:, ptr:ptr + batch_size] = keys.transpose(0, 1) + ptr = (ptr + batch_size) % self.queue_len # move pointer + + self.queue_ptr[0] = ptr + + def loss(self, inputs: List[torch.Tensor], data_samples: List[DataSample], + **kwargs) -> Dict[str, torch.Tensor]: + """The forward function in training. + + Args: + inputs (List[torch.Tensor]): The input images. + data_samples (List[DataSample]): All elements required + during the forward function. + + Returns: + Dict[str, torch.Tensor]: A dictionary of loss components. + """ + assert isinstance(inputs, list) + im_q = inputs[0] + im_k = inputs[1] + # compute query features from encoder_q + q = self.neck(self.backbone(im_q))[0] # queries: NxC + q = nn.functional.normalize(q, dim=1) + + # compute key features + with torch.no_grad(): # no gradient to keys + # update the key encoder + self.encoder_k.update_parameters( + nn.Sequential(self.backbone, self.neck)) + + # shuffle for making use of BN + im_k, idx_unshuffle = batch_shuffle_ddp(im_k) + + k = self.encoder_k(im_k)[0] # keys: NxC + k = nn.functional.normalize(k, dim=1) + + # undo shuffle + k = batch_unshuffle_ddp(k, idx_unshuffle) + + # compute logits + # Einstein sum is more intuitive + # positive logits: Nx1 + l_pos = torch.einsum('nc,nc->n', [q, k]).unsqueeze(-1) + # negative logits: NxK + l_neg = torch.einsum('nc,ck->nk', [q, self.queue.clone().detach()]) + + loss = self.head.loss(l_pos, l_neg) + # update the queue + self._dequeue_and_enqueue(k) + + losses = dict(loss=loss) + return losses diff --git a/mmpretrain/models/selfsup/mocov3.py b/mmpretrain/models/selfsup/mocov3.py new file mode 100644 index 0000000..61b8033 --- /dev/null +++ b/mmpretrain/models/selfsup/mocov3.py @@ -0,0 +1,215 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from functools import reduce +from operator import mul +from typing import Dict, List, Optional, Union + +import torch +import torch.nn as nn +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpretrain.models.backbones import VisionTransformer +from mmpretrain.models.utils import (build_2d_sincos_position_embedding, + to_2tuple) +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample +from ..utils import CosineEMA +from .base import BaseSelfSupervisor + + +@MODELS.register_module() +class MoCoV3ViT(VisionTransformer): + """Vision Transformer for MoCoV3 pre-training. + + A pytorch implement of: `An Images is Worth 16x16 Words: Transformers for + Image Recognition at Scale `_. + + Part of the code is modified from: + ``_. + + Args: + stop_grad_conv1 (bool): whether to stop the gradient of + convolution layer in `PatchEmbed`. Defaults to False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Defaults to -1. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Defaults to False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + stop_grad_conv1: bool = False, + frozen_stages: int = -1, + norm_eval: bool = False, + init_cfg: Optional[Union[dict, List[dict]]] = None, + **kwargs) -> None: + + # add MoCoV3 ViT-small arch + self.arch_zoo.update( + dict.fromkeys( + ['mocov3-s', 'mocov3-small'], { + 'embed_dims': 384, + 'num_layers': 12, + 'num_heads': 12, + 'feedforward_channels': 1536, + })) + + super().__init__(init_cfg=init_cfg, **kwargs) + self.patch_size = kwargs['patch_size'] + self.frozen_stages = frozen_stages + self.norm_eval = norm_eval + self.init_cfg = init_cfg + + if stop_grad_conv1: + self.patch_embed.projection.weight.requires_grad = False + self.patch_embed.projection.bias.requires_grad = False + + self._freeze_stages() + + def init_weights(self) -> None: + """Initialize position embedding, patch embedding, qkv layers and cls + token.""" + super().init_weights() + + if not (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + + # Use fixed 2D sin-cos position embedding + pos_emb = build_2d_sincos_position_embedding( + patches_resolution=self.patch_resolution, + embed_dims=self.embed_dims, + cls_token=True) + self.pos_embed.data.copy_(pos_emb) + self.pos_embed.requires_grad = False + + # xavier_uniform initialization for PatchEmbed + val = math.sqrt( + 6. / float(3 * reduce(mul, to_2tuple(self.patch_size), 1) + + self.embed_dims)) + nn.init.uniform_(self.patch_embed.projection.weight, -val, val) + nn.init.zeros_(self.patch_embed.projection.bias) + + # initialization for linear layers + for name, m in self.named_modules(): + if isinstance(m, nn.Linear): + if 'qkv' in name: + # treat the weights of Q, K, V separately + val = math.sqrt( + 6. / + float(m.weight.shape[0] // 3 + m.weight.shape[1])) + nn.init.uniform_(m.weight, -val, val) + else: + nn.init.xavier_uniform_(m.weight) + nn.init.zeros_(m.bias) + nn.init.normal_(self.cls_token, std=1e-6) + + def _freeze_stages(self) -> None: + """Freeze patch_embed layer, some parameters and stages.""" + if self.frozen_stages >= 0: + self.patch_embed.eval() + for param in self.patch_embed.parameters(): + param.requires_grad = False + + self.cls_token.requires_grad = False + self.pos_embed.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = self.layers[i - 1] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + if i == (self.num_layers) and self.final_norm: + for param in getattr(self, 'norm1').parameters(): + param.requires_grad = False + + def train(self, mode: bool = True) -> None: + super().train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + + +@MODELS.register_module() +class MoCoV3(BaseSelfSupervisor): + """MoCo v3. + + Implementation of `An Empirical Study of Training Self-Supervised Vision + Transformers `_. + + Args: + backbone (dict): Config dict for module of backbone + neck (dict): Config dict for module of deep features to compact feature + vectors. + head (dict): Config dict for module of head functions. + base_momentum (float): Momentum coefficient for the momentum-updated + encoder. Defaults to 0.01. + pretrained (str, optional): The pretrained checkpoint path, support + local path and remote path. Defaults to None. + data_preprocessor (dict, optional): The config for preprocessing + input data. If None or no specified type, it will use + "SelfSupDataPreprocessor" as type. + See :class:`SelfSupDataPreprocessor` for more details. + Defaults to None. + init_cfg (Union[List[dict], dict], optional): Config dict for weight + initialization. Defaults to None. + """ + + def __init__(self, + backbone: dict, + neck: dict, + head: dict, + base_momentum: float = 0.01, + pretrained: Optional[str] = None, + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[Union[List[dict], dict]] = None) -> None: + super().__init__( + backbone=backbone, + neck=neck, + head=head, + pretrained=pretrained, + data_preprocessor=data_preprocessor, + init_cfg=init_cfg) + + # create momentum model + self.momentum_encoder = CosineEMA( + nn.Sequential(self.backbone, self.neck), momentum=base_momentum) + + def loss(self, inputs: List[torch.Tensor], data_samples: List[DataSample], + **kwargs) -> Dict[str, torch.Tensor]: + """The forward function in training. + + Args: + inputs (List[torch.Tensor]): The input images. + data_samples (List[DataSample]): All elements required + during the forward function. + + Returns: + Dict[str, torch.Tensor]: A dictionary of loss components. + """ + assert isinstance(inputs, list) + view_1 = inputs[0] + view_2 = inputs[1] + + # compute query features, [N, C] each + q1 = self.neck(self.backbone(view_1))[0] + q2 = self.neck(self.backbone(view_2))[0] + + # compute key features, [N, C] each, no gradient + with torch.no_grad(): + # update momentum encoder + self.momentum_encoder.update_parameters( + nn.Sequential(self.backbone, self.neck)) + + k1 = self.momentum_encoder(view_1)[0] + k2 = self.momentum_encoder(view_2)[0] + + loss = self.head.loss(q1, k2) + self.head.loss(q2, k1) + + losses = dict(loss=loss) + return losses diff --git a/mmpretrain/models/selfsup/simclr.py b/mmpretrain/models/selfsup/simclr.py new file mode 100644 index 0000000..4b19ab4 --- /dev/null +++ b/mmpretrain/models/selfsup/simclr.py @@ -0,0 +1,98 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Any, Dict, List, Tuple + +import torch +from mmengine.dist import all_gather, get_rank + +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample +from .base import BaseSelfSupervisor + + +class GatherLayer(torch.autograd.Function): + """Gather tensors from all process, supporting backward propagation.""" + + @staticmethod + def forward(ctx: Any, input: torch.Tensor) -> Tuple[List]: + ctx.save_for_backward(input) + output = all_gather(input) + return tuple(output) + + @staticmethod + def backward(ctx: Any, *grads: torch.Tensor) -> torch.Tensor: + input, = ctx.saved_tensors + grad_out = torch.zeros_like(input) + grad_out[:] = grads[get_rank()] + return grad_out + + +@MODELS.register_module() +class SimCLR(BaseSelfSupervisor): + """SimCLR. + + Implementation of `A Simple Framework for Contrastive Learning of Visual + Representations `_. + """ + + @staticmethod + def _create_buffer( + batch_size: int, device: torch.device + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Compute the mask and the index of positive samples. + + Args: + batch_size (int): The batch size. + device (torch.device): The device of backend. + + Returns: + Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + - The mask for feature selection. + - The index of positive samples. + - The mask of negative samples. + """ + mask = 1 - torch.eye(batch_size * 2, dtype=torch.uint8).to(device) + pos_idx = ( + torch.arange(batch_size * 2).to(device), + 2 * torch.arange(batch_size, dtype=torch.long).unsqueeze(1).repeat( + 1, 2).view(-1, 1).squeeze().to(device)) + neg_mask = torch.ones((batch_size * 2, batch_size * 2 - 1), + dtype=torch.uint8).to(device) + neg_mask[pos_idx] = 0 + return mask, pos_idx, neg_mask + + def loss(self, inputs: List[torch.Tensor], data_samples: List[DataSample], + **kwargs) -> Dict[str, torch.Tensor]: + """The forward function in training. + + Args: + inputs (List[torch.Tensor]): The input images. + data_samples (List[DataSample]): All elements required + during the forward function. + + Returns: + Dict[str, torch.Tensor]: A dictionary of loss components. + """ + assert isinstance(inputs, list) + inputs = torch.stack(inputs, 1) + inputs = inputs.reshape((inputs.size(0) * 2, inputs.size(2), + inputs.size(3), inputs.size(4))) + x = self.backbone(inputs) + z = self.neck(x)[0] # (2n)xd + + z = z / (torch.norm(z, p=2, dim=1, keepdim=True) + 1e-10) + z = torch.cat(GatherLayer.apply(z), dim=0) # (2N)xd + assert z.size(0) % 2 == 0 + N = z.size(0) // 2 + s = torch.matmul(z, z.permute(1, 0)) # (2N)x(2N) + mask, pos_idx, neg_mask = self._create_buffer(N, s.device) + + # remove diagonal, (2N)x(2N-1) + s = torch.masked_select(s, mask == 1).reshape(s.size(0), -1) + positive = s[pos_idx].unsqueeze(1) # (2N)x1 + + # select negative, (2N)x(2N-2) + negative = torch.masked_select(s, neg_mask == 1).reshape(s.size(0), -1) + + loss = self.head.loss(positive, negative) + losses = dict(loss=loss) + return losses diff --git a/mmpretrain/models/selfsup/simmim.py b/mmpretrain/models/selfsup/simmim.py new file mode 100644 index 0000000..635a329 --- /dev/null +++ b/mmpretrain/models/selfsup/simmim.py @@ -0,0 +1,194 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional, Sequence, Tuple, Union + +import torch +import torch.nn as nn +from mmengine.model.weight_init import trunc_normal_ + +from mmpretrain.models import SwinTransformer +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample +from .base import BaseSelfSupervisor + + +@MODELS.register_module() +class SimMIMSwinTransformer(SwinTransformer): + """Swin Transformer for SimMIM pre-training. + + Args: + Args: + arch (str | dict): Swin Transformer architecture + Defaults to 'T'. + img_size (int | tuple): The size of input image. + Defaults to 224. + in_channels (int): The num of input channels. + Defaults to 3. + drop_rate (float): Dropout rate after embedding. + Defaults to 0. + drop_path_rate (float): Stochastic depth rate. + Defaults to 0.1. + out_indices (tuple): Layers to be outputted. Defaults to (3, ). + use_abs_pos_embed (bool): If True, add absolute position embedding to + the patch embedding. Defaults to False. + with_cp (bool): Use checkpoint or not. Using checkpoint + will save some memory while slowing down the training speed. + Defaults to False. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. Defaults to -1. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. Defaults to False. + norm_cfg (dict): Config dict for normalization layer at end + of backbone. Defaults to dict(type='LN') + stage_cfgs (Sequence | dict): Extra config dict for each + stage. Defaults to empty dict. + patch_cfg (dict): Extra config dict for patch embedding. + Defaults to empty dict. + pad_small_map (bool): If True, pad the small feature map to the window + size, which is common used in detection and segmentation. If False, + avoid shifting window and shrink the window size to the size of + feature map, which is common used in classification. + Defaults to False. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + arch: Union[str, dict] = 'T', + img_size: Union[Tuple[int, int], int] = 224, + in_channels: int = 3, + drop_rate: float = 0., + drop_path_rate: float = 0.1, + out_indices: tuple = (3, ), + use_abs_pos_embed: bool = False, + with_cp: bool = False, + frozen_stages: bool = -1, + norm_eval: bool = False, + norm_cfg: dict = dict(type='LN'), + stage_cfgs: Union[Sequence, dict] = dict(), + patch_cfg: dict = dict(), + pad_small_map: bool = False, + init_cfg: Optional[dict] = None) -> None: + super().__init__( + arch=arch, + img_size=img_size, + in_channels=in_channels, + drop_rate=drop_rate, + drop_path_rate=drop_path_rate, + out_indices=out_indices, + use_abs_pos_embed=use_abs_pos_embed, + with_cp=with_cp, + frozen_stages=frozen_stages, + norm_eval=norm_eval, + norm_cfg=norm_cfg, + stage_cfgs=stage_cfgs, + patch_cfg=patch_cfg, + pad_small_map=pad_small_map, + init_cfg=init_cfg) + + self.mask_token = nn.Parameter(torch.zeros(1, 1, self.embed_dims)) + + def init_weights(self) -> None: + """Initialize weights.""" + super().init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress default init if use pretrained model. + return + + if self.use_abs_pos_embed: + trunc_normal_(self.absolute_pos_embed, std=0.02) + + trunc_normal_(self.mask_token, mean=0, std=.02) + + self.apply(self._init_weights) + + def _init_weights(self, m): + """Initialize weights.""" + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def forward(self, x: torch.Tensor, + mask: Optional[torch.Tensor]) -> Sequence[torch.Tensor]: + """Generate features for masked images. + + The function supports two kind of forward behaviors. If the ``mask`` is + not ``None``, the forward function will be executed as masked image + modeling pre-training; if the ``mask`` is ``None``, the forward + function will call ``super().forward()``, which extract features from + images without mask. + + Args: + x (torch.Tensor): Input images. + mask (torch.Tensor, optional): Masks for images. + + Returns: + tuple: A tuple containing features from multi-stages. + """ + if mask is None: + return super().forward(x) + + else: + x, hw_shape = self.patch_embed(x) + B, L, _ = x.shape + + mask_token = self.mask_token.expand(B, L, -1) + w = mask.flatten(1).unsqueeze(-1).type_as(mask_token) + x = x * (1. - w) + mask_token * w + + if self.use_abs_pos_embed: + x = x + self.absolute_pos_embed + + x = self.drop_after_pos(x) + + outs = [] + for i, stage in enumerate(self.stages): + x, hw_shape = stage(x, hw_shape) + if i in self.out_indices: + norm_layer = getattr(self, f'norm{i}') + out = norm_layer(x) + out = out.view(-1, *hw_shape, + stage.out_channels).permute(0, 3, 1, + 2).contiguous() + outs.append(out) + + return tuple(outs) + + +@MODELS.register_module() +class SimMIM(BaseSelfSupervisor): + """SimMIM. + + Implementation of `SimMIM: A Simple Framework for Masked Image Modeling + `_. + """ + + def extract_feat(self, inputs: torch.Tensor): + return self.backbone(inputs, mask=None) + + def loss(self, inputs: torch.Tensor, data_samples: List[DataSample], + **kwargs) -> Dict[str, torch.Tensor]: + """The forward function in training. + + Args: + inputs (List[torch.Tensor]): The input images. + data_samples (List[DataSample]): All elements required + during the forward function. + + Returns: + Dict[str, torch.Tensor]: A dictionary of loss components. + """ + mask = torch.stack([data_sample.mask for data_sample in data_samples]) + + img_latent = self.backbone(inputs, mask) + img_rec = self.neck(img_latent[0]) + loss = self.head.loss(img_rec, inputs, mask) + losses = dict(loss=loss) + + return losses diff --git a/mmpretrain/models/selfsup/simsiam.py b/mmpretrain/models/selfsup/simsiam.py new file mode 100644 index 0000000..a502cd7 --- /dev/null +++ b/mmpretrain/models/selfsup/simsiam.py @@ -0,0 +1,43 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List + +import torch + +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample +from .base import BaseSelfSupervisor + + +@MODELS.register_module() +class SimSiam(BaseSelfSupervisor): + """SimSiam. + + Implementation of `Exploring Simple Siamese Representation Learning + `_. The operation of fixing learning rate + of predictor is in `engine/hooks/simsiam_hook.py`. + """ + + def loss(self, inputs: List[torch.Tensor], data_samples: List[DataSample], + **kwargs) -> Dict[str, torch.Tensor]: + """The forward function in training. + + Args: + inputs (List[torch.Tensor]): The input images. + data_samples (List[DataSample]): All elements required + during the forward function. + + Returns: + Dict[str, torch.Tensor]: A dictionary of loss components. + """ + assert isinstance(inputs, list) + img_v1 = inputs[0] + img_v2 = inputs[1] + + z1 = self.neck(self.backbone(img_v1))[0] # NxC + z2 = self.neck(self.backbone(img_v2))[0] # NxC + + loss_1 = self.head.loss(z1, z2) + loss_2 = self.head.loss(z2, z1) + + losses = dict(loss=0.5 * (loss_1 + loss_2)) + return losses diff --git a/mmpretrain/models/selfsup/spark.py b/mmpretrain/models/selfsup/spark.py new file mode 100644 index 0000000..d5570a5 --- /dev/null +++ b/mmpretrain/models/selfsup/spark.py @@ -0,0 +1,163 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional, Union + +import torch +import torch.nn as nn +from mmengine.model.weight_init import trunc_normal_ + +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample +from ..utils.norm import build_norm_layer +from ..utils.sparse_modules import SparseHelper +from .base import BaseSelfSupervisor + + +@MODELS.register_module() +class SparK(BaseSelfSupervisor): + """Implementation of SparK. + + Implementation of `Designing BERT for Convolutional Networks: Sparse and + Hierarchical Masked Modeling `_. + + Modified from + https://github.com/keyu-tian/SparK/blob/main/pretrain/spark.py + """ + + def __init__( + self, + backbone: dict, + neck: dict, + head: dict, + pretrained: Optional[str] = None, + data_preprocessor: Optional[dict] = None, + input_size: int = 224, + downsample_raito: int = 32, + mask_ratio: float = 0.6, + enc_dec_norm_cfg=dict(type='SparseSyncBatchNorm2d'), + enc_dec_norm_dim: int = 2048, + init_cfg: Optional[dict] = None, + ) -> None: + super().__init__( + backbone=backbone, + neck=neck, + head=head, + pretrained=pretrained, + data_preprocessor=data_preprocessor, + init_cfg=init_cfg) + self.input_size = input_size + self.downsample_raito = downsample_raito + feature_map_size = input_size // downsample_raito + self.feature_map_size = feature_map_size + + self.mask_ratio = mask_ratio + self.len_keep = round(feature_map_size * feature_map_size * + (1 - mask_ratio)) + + self.enc_dec_norm_cfg = enc_dec_norm_cfg + self.enc_dec_norms = nn.ModuleList() + self.enc_dec_projectors = nn.ModuleList() + self.mask_tokens = nn.ParameterList() + + proj_out_dim = self.neck.feature_dim + for i in range(len(self.backbone.out_indices)): + enc_dec_norm = build_norm_layer(self.enc_dec_norm_cfg, + enc_dec_norm_dim) + self.enc_dec_norms.append(enc_dec_norm) + + kernel_size = 1 if i <= 0 else 3 + proj_layer = nn.Conv2d( + enc_dec_norm_dim, + proj_out_dim, + kernel_size=kernel_size, + stride=1, + padding=kernel_size // 2, + bias=True) + if i == 0 and enc_dec_norm_dim == proj_out_dim: + proj_layer = nn.Identity() + self.enc_dec_projectors.append(proj_layer) + + mask_token = nn.Parameter(torch.zeros(1, enc_dec_norm_dim, 1, 1)) + trunc_normal_(mask_token, mean=0, std=.02, a=-.02, b=.02) + self.mask_tokens.append(mask_token) + + enc_dec_norm_dim //= 2 + proj_out_dim //= 2 + feature_map_size *= 2 + + def mask(self, + shape: torch.Size, + device: Union[torch.device, str], + generator: Optional[torch.Generator] = None): + """Mask generation. + + Args: + shape (torch.Size): The shape of the input images. + device (Union[torch.device, str]): The device of the tensor. + generator (torch.Generator, optional): Generator for random + functions. Defaults to None + Returns: + torch.Tensor: The generated mask. + """ + B, C, H, W = shape + f = self.feature_map_size + idx = torch.rand(B, f * f, generator=generator).argsort(dim=1) + idx = idx[:, :self.len_keep].to(device) # (B, len_keep) + return torch.zeros( + B, f * f, dtype=torch.bool, device=device).scatter_( + dim=1, index=idx, value=True).view(B, 1, f, f) + + def loss(self, inputs: torch.Tensor, data_samples: List[DataSample], + **kwargs) -> Dict[str, torch.Tensor]: + """The forward function in training. + + Args: + inputs (List[torch.Tensor]): The input images. + data_samples (List[DataSample]): All elements required + during the forward function. + Returns: + Dict[str, torch.Tensor]: A dictionary of loss components. + """ + + # active mask of feature map, (B, 1, f, f) + active_mask_feature_map = self.mask(inputs.shape, inputs.device) + SparseHelper._cur_active = active_mask_feature_map + + # active mask of original input, (B, 1, H, W) + active_mask_origin = active_mask_feature_map.repeat_interleave( + self.downsample_raito, + 2).repeat_interleave(self.downsample_raito, 3) + masked_img = inputs * active_mask_origin + + # get hierarchical encoded sparse features in a list + # containing four feature maps + feature_maps = self.backbone(masked_img) + + # from the smallest feature map to the largest + feature_maps = list(feature_maps) + feature_maps.reverse() + + cur_active = active_mask_feature_map + feature_maps_to_dec = [] + for i, feature_map in enumerate(feature_maps): + if feature_map is not None: + # fill in empty positions with [mask] embeddings + feature_map = self.enc_dec_norms[i](feature_map) + mask_token = self.mask_tokens[i].expand_as(feature_map) + feature_map = torch.where( + cur_active.expand_as(feature_map), feature_map, + mask_token.to(feature_map.dtype)) + feature_map = self.enc_dec_projectors[i](feature_map) + feature_maps_to_dec.append(feature_map) + + # dilate the mask map + cur_active = cur_active.repeat_interleave( + 2, dim=2).repeat_interleave( + 2, dim=3) + + # decode and reconstruct + rec_img = self.neck(feature_maps_to_dec) + + # compute loss + loss = self.head(rec_img, inputs, active_mask_feature_map) + losses = dict(loss=loss) + return losses diff --git a/mmpretrain/models/selfsup/swav.py b/mmpretrain/models/selfsup/swav.py new file mode 100644 index 0000000..efe0eab --- /dev/null +++ b/mmpretrain/models/selfsup/swav.py @@ -0,0 +1,49 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List + +import torch + +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample +from .base import BaseSelfSupervisor + + +@MODELS.register_module() +class SwAV(BaseSelfSupervisor): + """SwAV. + + Implementation of `Unsupervised Learning of Visual Features by Contrasting + Cluster Assignments `_. + + The queue is built in ``mmpretrain/engine/hooks/swav_hook.py``. + """ + + def loss(self, inputs: List[torch.Tensor], data_samples: List[DataSample], + **kwargs) -> Dict[str, torch.Tensor]: + """Forward computation during training. + + Args: + inputs (List[torch.Tensor]): The input images. + data_samples (List[DataSample]): All elements required + during the forward function. + + Returns: + Dict[str, torch.Tensor]: A dictionary of loss components. + """ + assert isinstance(inputs, list) + # multi-res forward passes + idx_crops = torch.cumsum( + torch.unique_consecutive( + torch.tensor([input.shape[-1] for input in inputs]), + return_counts=True)[1], 0) + start_idx = 0 + output = [] + for end_idx in idx_crops: + _out = self.backbone(torch.cat(inputs[start_idx:end_idx])) + output.append(_out) + start_idx = end_idx + output = self.neck(output) + + loss = self.head.loss(output) + losses = dict(loss=loss) + return losses diff --git a/mmpretrain/models/tta/__init__.py b/mmpretrain/models/tta/__init__.py new file mode 100644 index 0000000..568e64f --- /dev/null +++ b/mmpretrain/models/tta/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .score_tta import AverageClsScoreTTA + +__all__ = ['AverageClsScoreTTA'] diff --git a/mmpretrain/models/tta/__pycache__/__init__.cpython-310.pyc b/mmpretrain/models/tta/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69e6a275628a93018822dbf5b996b55e47e47e35 GIT binary patch literal 217 zcmd1j<>g`kf*F60rPToG#~=T*3)DdlDhxtc(%UU4X+vt0 zI#>e@)(_EBVITVMImT-5IXq$Ulcr;Ar@xnQuORV!gN+6KKD`|LfonLCD`Y zXtxdwzJ>0#!6>4rAtlWzMV}c~@|*{s8&P)hPVj|^%5L7JM0ZYlr{sI0Ix0FQD$;z$ zXEeX1x_hMG`v)SDKJ{XIIqnAIewisVdD&EXL%WCdW(y#t}R62~I%%;DtG9EM-se64LU)X$CE8`Ksd_`3=+ zN*7W(X#k*)tZpW@8n9M&V2ni2pF?+FgBg+|HlJq5;Ii7A7+ zX~8HVJK7F)I<{Jk2F<9hTv{Kby85}O23n<O`piegc&sbs zKa(ZWA>F7N8h8cT`@##S&4KL#ulc>OL&0ifo<0}k~#k1c8VRY&2 z?b;2+-*jHtx~T_+aU0o1FgQY9={KIcN$I)Km0xnYku7M#ENc64=r9d{`1xf%uWVyD`^*y`P=iY+MX8?65fQ;_txiSZlJ;( O@}JOz!9yc{pZ*UV^pg?* literal 0 HcmV?d00001 diff --git a/mmpretrain/models/tta/score_tta.py b/mmpretrain/models/tta/score_tta.py new file mode 100644 index 0000000..5b8a078 --- /dev/null +++ b/mmpretrain/models/tta/score_tta.py @@ -0,0 +1,36 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List + +from mmengine.model import BaseTTAModel + +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample + + +@MODELS.register_module() +class AverageClsScoreTTA(BaseTTAModel): + + def merge_preds( + self, + data_samples_list: List[List[DataSample]], + ) -> List[DataSample]: + """Merge predictions of enhanced data to one prediction. + + Args: + data_samples_list (List[List[DataSample]]): List of predictions + of all enhanced data. + + Returns: + List[DataSample]: Merged prediction. + """ + merged_data_samples = [] + for data_samples in data_samples_list: + merged_data_samples.append(self._merge_single_sample(data_samples)) + return merged_data_samples + + def _merge_single_sample(self, data_samples): + merged_data_sample: DataSample = data_samples[0].new() + merged_score = sum(data_sample.pred_score + for data_sample in data_samples) / len(data_samples) + merged_data_sample.set_pred_score(merged_score) + return merged_data_sample diff --git a/mmpretrain/models/utils/__init__.py b/mmpretrain/models/utils/__init__.py new file mode 100644 index 0000000..e59d71d --- /dev/null +++ b/mmpretrain/models/utils/__init__.py @@ -0,0 +1,102 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmpretrain.utils.dependency import WITH_MULTIMODAL +from .attention import (BEiTAttention, ChannelMultiheadAttention, + CrossMultiheadAttention, LeAttention, + MultiheadAttention, PromptMultiheadAttention, + ShiftWindowMSA, WindowMSA, WindowMSAV2) +from .batch_augments import CutMix, Mixup, RandomBatchAugment, ResizeMix +from .batch_shuffle import batch_shuffle_ddp, batch_unshuffle_ddp +from .channel_shuffle import channel_shuffle +from .clip_generator_helper import QuickGELU, build_clip_model +from .data_preprocessor import (ClsDataPreprocessor, + MultiModalDataPreprocessor, + SelfSupDataPreprocessor, + TwoNormDataPreprocessor, VideoDataPreprocessor) +from .ema import CosineEMA +from .embed import (HybridEmbed, PatchEmbed, PatchMerging, resize_pos_embed, + resize_relative_position_bias_table) +from .helpers import is_tracing, to_2tuple, to_3tuple, to_4tuple, to_ntuple +from .inverted_residual import InvertedResidual +from .layer_scale import LayerScale +from .make_divisible import make_divisible +from .norm import GRN, LayerNorm2d, build_norm_layer +from .position_encoding import (ConditionalPositionEncoding, + PositionEncodingFourier, RotaryEmbeddingFast, + build_2d_sincos_position_embedding) +from .res_layer_extra_norm import ResLayerExtraNorm +from .se_layer import SELayer +from .sparse_modules import (SparseAvgPooling, SparseBatchNorm2d, SparseConv2d, + SparseHelper, SparseLayerNorm2D, SparseMaxPooling, + SparseSyncBatchNorm2d) +from .swiglu_ffn import SwiGLUFFN, SwiGLUFFNFused +from .vector_quantizer import NormEMAVectorQuantizer + +__all__ = [ + 'channel_shuffle', + 'make_divisible', + 'InvertedResidual', + 'SELayer', + 'to_ntuple', + 'to_2tuple', + 'to_3tuple', + 'to_4tuple', + 'PatchEmbed', + 'PatchMerging', + 'HybridEmbed', + 'RandomBatchAugment', + 'ShiftWindowMSA', + 'is_tracing', + 'MultiheadAttention', + 'ConditionalPositionEncoding', + 'resize_pos_embed', + 'resize_relative_position_bias_table', + 'ClsDataPreprocessor', + 'Mixup', + 'CutMix', + 'ResizeMix', + 'BEiTAttention', + 'LayerScale', + 'WindowMSA', + 'WindowMSAV2', + 'ChannelMultiheadAttention', + 'PositionEncodingFourier', + 'LeAttention', + 'GRN', + 'LayerNorm2d', + 'build_norm_layer', + 'CrossMultiheadAttention', + 'build_2d_sincos_position_embedding', + 'PromptMultiheadAttention', + 'NormEMAVectorQuantizer', + 'build_clip_model', + 'batch_shuffle_ddp', + 'batch_unshuffle_ddp', + 'SelfSupDataPreprocessor', + 'TwoNormDataPreprocessor', + 'VideoDataPreprocessor', + 'CosineEMA', + 'ResLayerExtraNorm', + 'MultiModalDataPreprocessor', + 'QuickGELU', + 'SwiGLUFFN', + 'SwiGLUFFNFused', + 'RotaryEmbeddingFast', + 'SparseAvgPooling', + 'SparseConv2d', + 'SparseHelper', + 'SparseMaxPooling', + 'SparseBatchNorm2d', + 'SparseLayerNorm2D', + 'SparseSyncBatchNorm2d', +] + +if WITH_MULTIMODAL: + from .huggingface import (no_load_hf_pretrained_model, register_hf_model, + register_hf_tokenizer) + from .tokenizer import (Blip2Tokenizer, BlipTokenizer, FullTokenizer, + OFATokenizer) + + __all__.extend([ + 'BlipTokenizer', 'OFATokenizer', 'Blip2Tokenizer', 'register_hf_model', + 'register_hf_tokenizer', 'no_load_hf_pretrained_model', 'FullTokenizer' + ]) diff --git a/mmpretrain/models/utils/__pycache__/__init__.cpython-310.pyc b/mmpretrain/models/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4131d1c62960e910812c9a1f580a8b6af6ead3a GIT binary patch literal 2783 zcmZve$x<6h5Qf!AAcTaF*f(#07kL2;*v7^SLP&s(*aSEtFCC4#BsyH#t?31fpJa|R zzRiow1I)3mbC~ceOvFS?WwnGbHWB(FE33LXEAy{xoZjA^i2VEYzdvio=}6>n8eIL0 z$-%1pd4FkEn;?ickm8ZVHNX`$2C~P0u*o^*0BghwPui)a051Q6E@X& zh;QM0c&|n35p(#f1|OK_{v&HJ!nd&uW!!-s+=X4Wc9ieoKJ4QG9AE`1cnF7Dgnjt! zNIpCERr{T)H(pV{+{8Dkc1{z0AGZ!S_43j7;Z}KfJ-_`;);p3*h~BlLb(r^kX8V?7 zi?PC~VcU$C1MXX=%rM(iBL&y-yq`=Yw^@5t|4$Z-?zs*&{GZIEE2mb?|7h8!b6&3G zMWSscFU{kHTugKo0>5m17M&77L-ZdQ@)=w+{OV~wsG|&DBo3HoePMDzu0v!`XiE1^ zgIbL<-836wAan)x6(?n;8Xmc|Rz95A53K5^jpFu^$eaWgH}xvF8ag^APO_VPfdmGMRJTEu&2o9M7^@ zv7DEIBsVWkT+1xt2{T2{9-TO}Q_3zgw{F>Wk#SXG^oHZ)-#Fn+?%!ic+cm1RNy2yZ1wUwTrp(2*xzsjoWy&x!TlN`qeP&Wk%)sE%*RyS0 zFt<`QV zU3E+fa^$+Fl{S$+^se$`Y-UfoA0qR8Ymw zG8Yt?bjB-1<;%rIrqVE6kLAzmdyc~?ifrgn;%uHI6}n{XXYyRl(I(>!8C!FC`{3)X zJ<-B5?>->hBRnPCB}@^f2{VKngqws(LLY%XDefIYH{mhi7U4Ewm>|D35x19+A<$>W zy-!FIQiM*z6GEJjBRnKLB6Jbv2noUpVTh0=^bqLKq~xB)lTb5*7$c zglB{oghj%0!m@;v;e_qz+%Zi3w5B&)CTlM5n;Bkyk#$+!@_gpftm+41=&H+iJ~3OR zI406-vS}TY7Zy{MRcV0A z3ccZ)W$a=QBjUPlaIWi+JPqpfS*RIR7K-UoTh!T``VCdL zDqo|trj#hUGTsik6*+GIycj#Ap7HmaqfDQ!l}NOpI`R-~t+KN|b@fB3{F A?f?J) literal 0 HcmV?d00001 diff --git a/mmpretrain/models/utils/__pycache__/attention.cpython-310.pyc b/mmpretrain/models/utils/__pycache__/attention.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9d6a6e56cc872ae4a392454710dda0675f3228a GIT binary patch literal 32899 zcmeHwdypm9S>Nr)ec!or=drKZmDF0v+8ME$efWiiELg3)mh9EoXcw)0Epg}W?Vg!C zb04$aJ-e&Poibqsrl6L9odkk`%up^T3Mo>lKnSTypeS&7I7LCgkWNUEKoxBAM@-^i zqga~!e&6Z7-F;`)l5m_*hMu}-&N+Sh^y$;*eCPXq-}jyFkByZwIDYum&n$iVpJpqjMC?i}mhQ@=;cIES3l>buA3*v6Nq+;<587`*`Yp~0wBw|$UCPXzxWhNNFsB9i z^``5$o1Hl$FfX<}KPY@)-Ea4M%}$X0wjLrVGj?^O-LV_C+h6Z*_{b0?t^QQy7y^3OmcF%ThwtLHe%*OJ#(8{Zbgaw2geCP4=ej33byIvS% zwlM^EG+Up_Y-I;I_xQlDGaH(F+}BohIcw}_0}Z9{A9;pt-ZoZEJ8S1|YgYy? zZwaRjyWPD!R}M_Cz3Bv5zwfqI0<+(9yuiHL?{@;zciSCz8g&NcZqr+H?8Z{NgSWcW z={J461Gm}p*883lRtv414Tj znI(;*0+4(ec}ltT;{E@|@1nBg3b&X0JSgE9R`OOf&07+h4IYOOST zJ#_y3@R}Ra)&@tFH7j%UBe?VhTzh%Nu~y`*TFst~2H9v4YVwhygv*Z(m%r+Dmwk{(S1*3z(Jhz481#A*K7>{7FS(Ef=r3&(4i|OHHV_ zp3x^f9iPHfS$*TT07=JCNH;lAS-S^P-db9=X6<&%PjwYXgmV+~*=qHBOYLQAsqbQ7 zVN^Cd?M*&HFK*Omd*fcYyx`U_?^?ygoU9^F`i}XM@xFw>&)mvvYlFg$rh)PfG7WtI zYTf|7Mrb0(n8|Dvu>u?(6m4xPgWRm2+s^xi?V^pcws9mgow3csU`UWMqttX}VC?8v z5i*U^AhT*buWyyM%2+4LC|6cj3`({+DDRkCl>z=%D~B>$V}r5nG27hHwyQf#Wwyud z>|kt1*D`)?wTAX+b`Eu$cK$Y7IDxiI4svpzDa5734xeCJ&gA9H0bF~~F4)D}Y|kNE ztq`D?_!e+5tU&~5drrC&pst* zqro1G%iKPn5^^fqLA@gXC-u!qwPs*Fn2+9Ei;O{ZFwQkh zW{ZW0pYbt*Zt2_l4wz>jBM=-8c%s`FpBz;$8w<;SWBRAw`=2-e_BYRgnecpjF6+J> zSGw~Ef{Jv6A~~MRiRiu?!ONQaKE}V50T*`lSl@=&I({D8P}R`jgK>kCHMQ8l=omwU zHJ``NTg4*`wAIY>#+I>Vf^p+y2OGDoV}hgUoSFmVu40<+WVcNl6B-W$wrwZ7l^f(Z zy%AzsBh-&%3_}N#wN(H~$m6Q)G}*KKps<~@b9SCA+ZLxg&yhf2`W?w^7m+GSP5dt; zVs8%08Xyqmunp#pv0VlUC=W`=tB|P#c_@Lz6u@+Te4wv_(L9m?=_sLoV^EpNfQayE z%z?R+9q71*WF)>4$o(#RRgICTT8)VT+ z^xF0~dr@K_IT9oPUwq}i4c9WoGYpfY(>Jm7J3X)OV(nFo!jjYUH(bY4e2BNwTz9xU z&5}wevf(}a#7S%6q;-BSRTcTgsJ4?!8!?Eg4G9ZCmn417` z7sb4(XEdLFoE2gLDy783IE zUdBjc=Eh_S1=`KP6kb|?q7i5dfp%WqSObAv3$*KjNk$jM!X^HU|SG*DEK*D2#UgFGUw8b zb`M-WWLgZ&E@YhN&RTsJI0TRk$W810dFxpRj0L3up(!iAzwSMH>J*TQm5rY+pftZ#I`#`Uw1Sfyri!QH+~3Bqqerv18; z&AtW_*{D>6TYPO~q5Ud;9SNwleznoO^cZ|V6p~W92>5uo*O$^$jlrZrwU1yR zgHP)%r|&*{8tW*Q+8ZnF){512FxahL-?soD*{NFMmJzOSI&qsLhWcwLJqiiFlYwmX zvaS{gF7gX4L9IiY$910Kr7*~w2t#qQxW72*XSZ{Hb`{Va;5#fow+*BUh?%!F=7W3I&H*S~0w*|~ArMLU52dPpd3Bt8on1VVvGa#BWh{v*g)=p~ zblccMEn%5_sti)YGG*Lj!k;9+xvbwJk35C@X@nKGr*|^j2N2W1Z$gYj9mO@%{nVfm zEwO5$m9V&EW;0lDRXMch9(2!R;7<1`n;(`9+oj6(=Jzcf)tq?NH&8cAr)o#t<#&N| z#`7K=Q~=hNw-4cWm~k6IIQ!Q>vVF)d?`VKu!Bub1*kd?LA-oLEH@rW3V{Dkl9gya` zU>}YRs_a9oYry)q#*uq;Fpl0FX9}^9F$vid${2|}VN#HHII~q7)Y5N(IjO6QONR7z zqEFtb`a4|?<7QYc?2UcPh4m@Pi2Vkr0`Kr$VV&E@>>8ePLi%K4Fd==ykxO7=5@oaY zlmHX=49p+U+)rY}a|GW@89|_k`}X8my_`*0 z5HsT~b`8DDat{fhF})zH_+@Au%2-1q$s(wL3=dQ8+b~-pndTvWbeb*a!W~X$;oKoy zkGamZjW$^H#NNp&pD?~zJA&}6)a=AXqxq?W@%qvOZ zt%%K?P~x!gX=xC_eH7<|(j~NkP<6B8QiKi6XPt}R7UaX#!u=*zCoqZ8=`XuSS)_~@ zU>AWC-Bsq~1%Q0(S@NmP)>=>%AivRt^sx*S()|c)p;+O*z~VsrmwRAl`^#qoqj}YX z9I);_5mqEvA!PZDu80!uW6aN86HqnCDR9+&59>;cB<|D9qpUJFspR{S=Y9Y18cHzr zseKm2`oVY&NUl!qGrVGA^hK=f&AQ=!CyN#Z!NQweiK91yvS2OX<$TAhPp4~Bxgv{= z0uy|Bzkr|jQwS<$g}0an^aBuXN;>#u`7>Y5ec3F{XoX$Vq^ca&rItZF_z;pC@U%;Z51W;@Zut;VV(idQe zdwhMH7?9@ct0w+uWl_VTw3_8IwUq4h6Sc%mv8G5ho*$_?J9(clE5%!w27p;=1*)- z+G8N?AjR8LfK)1j@}L6XhG^_GN*usachDwmQ-%f(2rOcUm?PlLL8J~#OZXpcp~N|w zgR;g}bx`F}jb|ue$vlEPRqb)~(9EF9B?afpC~=ga5TKkgpd9w$?+TQIC&j|R1<4qU z1Il5`%R?yVC~DLaSE8J1uaq$5Nsa+Y*0#z5<%DrO-#sc9cP;_SsiJRlfO4e&hft0X zXMu9=OF%g__X^(s!UgxE`1i6AWCgGZatgc?I=UbJ;eH6kV{BNEkdShI_*)1GLLqpE zBW%ucKg^;ZXQ1d=3;M&7V84w{d(Ck_fm4BYA;@(EvsKjRhghBxxd5+1$N(yfju^Tn z0a1c1AX2CVzlWv%7K4u=2n^@uy8GQsk|MazGZ5k|FfNH9RF>`KZXfiKRB>OJrKTv^ zxAT01!6JhugR2aLrV;9KTMTRl4ud5I(jtL)*eC7^1ChNY5ZRkzJIo3)-gh8UDVPF~ zm;_ufqd{3-Gk2$jnj`_3<5>VL09B;KZb3ImgGg0&^{)uafGT}uU}h>wEkbB!E_YdY zV->u}XV^rr;9f&Gmv>#}cno|7#Eldz!AEm%Fo>V|TbYyxzlqqV@$;q;2)BIte6Ov`AJnI=MUkUVE zm#BL^7pa~tW!mt6O>Q5mQueB0Wz7~XF57W8J*-mRzP@M|%UvEHJ(Le|^&YjIoVN&+ zQCfAHkne8DdINZbAmWI~N?raVSNp!-@6J;j%gGOIY|FQxuZ8MnqtkT53Lfct&9oSq zP$p7yLpHBNn~%H5gy}LHM0~C~7}5Zom8LQ!dM1^;R8Y>+YU14|2p)W}>93qPX|X<9 z4_tls>G`?1ht{FH^(N-s(;O~O|WUR-)USpXD7DIeR&Z7qQ zO%2yp2Q}7;>rLVuK#$mp&q^yMaG&C^1(Q8}FtG*mi$P`0@IHrUA6qlrKN(C$2+bfz z_0u5FP*5G{ez(UV7)%Z(qjv&vg6*2booj>>(*P&IZqacx&ab|8xk%)zXe8jc#yyr@Dmb6-rJ{K3|xTIc%Wlg z8%O1L6n2YSp#Lx+2<@q0VFhywkR$_!1vzMHv)eSf5MEu(D015-0B2PmdRk0Gl)d49X?ws8BAX6CK2bJ0YF3Z`^;yfIT;7?%Owc_4sjb z#62XE?a%Z2kzx7M$op+}4(;lyVdosy2~=&~w}1BxCtzwcv2%wD9!!g%!?!h%-GR?B zr7wfn{{;U3p!O^mH=_*>0y=+`8NhD(mhS8PKhVQC{@ci1f)&6UD3b%Gg{ASy%nOIG zEavckQp|fo`vER45&4E11Bx5kU|)Ou+M~X)4PzT%Ca~>;8BP|~d}t4pGb#;(A;8!r z7|;OD2AEIdp8TMU=Q4=GB+@!CAxU3CyO}@wDoMf=sKT*M>;T7ye?i!}xq{F>1gPV*yZvZoXCnsZlZ0paC@v(g0^S;W{6b^H_BWn|OB^f?fA4AAtP@7(&;N zqM7nn{Ze5@e=kzt<8&CoNH@1oEOl`1@$jBPkOm|$hYf%dge3%&z^o=*2qxLqmy}I} z0w=N|IFS?BA&=ou2_}cI!np`gXnqV-ShAI=_wYm-X}G+CC0~g{v4sP2VjJ;TcmPM> zTd?L_@4Jvc>^6-iA&Mw2Lj(@dQ&U(JD@N+4y?N^#(rrlX9j_l>-J}_n>$hMKzU211 zuxu9NszoplqWYEU&@@Q0OB)^9I4fR7?tpyigvnWWuzrE42L>o{%Qt42P8LcovMK4J zVx>!uCA~cu3T;t49jB8pksg6iq>-_@eh&sRd7Qa1vy<)0wkfB{@b8AtIpJQOOnglQE|Q~FONX<0~&QuFSv z?H$3SP`^SMBTo-Hm7vvER$yYsnbv0$!pSEJ+4O)OsZJ)Ts!Ey8t0z_dBNVYjZ##cE zvYLGw=Jlb8DrT!(ooe|$cTCLN*MYUpe>L6~%GpBcfj@7qPrGWcV^d%W#S*5} zGklh&wTrTah9x%y5hxYKCSeTBnzEPSeSj#0`LNG`?5%7LXuXjWP-<#Wc0c4#!|nw( z2WVZ{`*v8461eei;ONab!$kwDzpUy{r8q*1%4OHp#XFtvJ9`#~#Y*9CVBOkETTqN*awpIL5-O z#_5Jf;;7b$&<)Auf~$~&J{=X}K`EYbF?JV} z;wc^!xsuAt=~6lp-?`wJ+#cEcIpc{?U5 zZO#BOFy_)9AjmB@yWQrQpzt(I?fka?;#_qU2K*J)CG|_KKgs0#8C+xq${~bA(fw7X z$46e8`)^oOUbU#SL{JjZoqcl@kL&TZPgYA1*7;Y03SWVPrPp%iVdPFiNl)>+q*Y8& z_rbEdYHS|eI}&2hklh#^L~ZlKgG322|rhk*dZN+_bh^> zc$4sH9Z&kS(jMZa4E$N?&6=r0q=62W{;Y+19{N?d!^6k5F!XDk2*#dxPy6yvlxco9 z?s!G%5WgnAtt7!*7-4V~OGkW&0}u$x%Ru#z-;yIH)sd!s*v|M@>cMDz?Fdq!yz~$!Fj{p<|7mE}YUzn$10fsj21Qy5~ z-7IZ72y?RuAl(DgJw?T8FYm)v+%MeHy?2VDRqVxuzolOvJO^Q`T!E$wi>M7RMR2J( z;eo4u1s2T8Ex3lblo0#c2{{8pGqeL5EG)hG1@|*}65(Cle~;LOgdOWttci`eJ!KX) zi=uEdlGhRT7??vaoUldhwYaMU_vjE1GN_Hxq2Q3ZFrrOE^k$S56b3Nn2f1X2PH>6# z2+;Zae2BL(5LT|59?11-dhjVG?jN)G0)zK6*oUXP&b6G6S}iMgTPR_m@FlVdb1iUj zRpIoYsu73Vnznf`wXnrZ9J!k4V4UMfm^iN3bZiUFEWsGb+sW>e2OH{3naBd`QCt@r z3JWr<`sCw0@d*acF^Cxi;SB=h{Rp3mYy31glFg$dEl$4Dl&3JU`y77K?0;l!|%2!*= z?Xe>X`oAEdn)F3wMxRixaY#amdj$Vv-xsBb*ktkyPffWp!8p}|1LSI=B_EQWn(=Dt zeiN^lVelA(cQJT3gH;CK%i#MM`~ZWY&Q9I(2buQ-1G!~U32z@@#s?7u1?4o~SQ*A{ z1UZ!#RKs&ifH~oQ%sl}JcwTox^L=FmIx-jj6Kef&&IQgTNNd`?kjzApnuRB;UeO=c z_1#J1xPDxF2=g6A*1I#tL8xU>21Xx+-JHRzHs7+}4BA86Qgey@%^7Im=RJf#S>KSP z^X4IG^H2@}=q3hc1IA2G}i*kRNDdxGXDG$p~F8=2KfIN@M6!gynfu05EuMbNEb z4CiQAG9I=lbeKX;1$hT(yA5f!0*xl`^LLSp8IN=YdvfGpvO=e-%=RI=A2D}OhNbp6 zo(fb@J+l~ypl{%!L|Vw$kuXLHxw`q=Ll!8#Q4aypug>Q_X0BLQG;W1g9(Co9X4MFz zpK`??wv)B-5J!Iza>U7hAz?@vavU^2cmECpfdM8MJI#R8L{TZh0w$4MP(C;_O27R! z77%*#NycQ^`w_-u6{O{e`(q5AWbi`_>I{S~3HjmTt}p^2Q^HG-{3xaBImTXKa6f|) zP9$L`9#oS3<_^IFWk!-)6Y=YH49AC*&RyosMu-)mg#|e}ztm97Z6QeL;8P2&C967J{vf7KtG|S+4!n30`-?0RrviZJ? zCbp$+@uoDgGL?p^Mo*NC!YeC{tej3$gIM1Y*^5&g8uHhFtsPY)NffOFiD6^?S4BIz zK_z{f*8e5bj*jTf{vWC+qy3y3w3)VMV^m71O(d6^^Kd)2yZ7RW;r?3^qQ4!TtB1lrlNhrZb5V3fF8*kld z!89LGoUGK)Kf*UaflJ`!v8!KNf0>4aV%^SA5q|D3wfZ&<5vBU@&0|AipC5OK`!}%! zjjBN}Tn>uE_ue8(6djC{c@{SRw-5=9?7;?#E|fOU#Ecjkf*fJ9IK;EaQM%9^(Iqh0 zbAEms+EBRlQBcI@H-ZwBd)%^)NYdND!ovAd5T>ll@AV@ISyn`nbHVr$3a%>Bl6qp>C z2{dUsfhPS5uXrZ|Va0xxG122Z%2+H#9%WK^wDXJ!8@CTAB>F-3vna9`9+F!L=k^q< z_(=xuXYiW{V6Yo*GTQZ)^+|sa7nrQmOb2mpmNjz+c zj7S`&`E)&i(*SmfkiT7Ud;zHHv-aAx!EcYl>3>pcHhrw?wz zdmNFd1_h)XLKV`6*HdANvAg@@EN(H7p)azBOqTaDkHb9HUb?@75=TDw+l1p$6mw@PW2@|LgXK`@89z2r3~M zG*$%$rvtfHK64*?(D7<0K{LlUBI% zO}Hk5mw^&a>B1JJ)<><`_ub`F*xTXM*)yl#Hh=oenYX^<@x{h|+Dw>9MiN4- zlT@NcD5)eIN(uOh0KfN=__3wHkdb)Q`o@^8ft?{OLZYjmj7&7?Kbo{bOe$i?sH5^8 zl||UkHr#~xSxMlsH*{a@^3m^LZ)o&!tV!j5#81l(#_{PyvuDq|CM;IGsU&bTG7AlrtwN9fcSiWa{;})nGoVDOhXX|hWn;xo! zuo0wf$ikl!76QA2bB8=rP+y>KMW~G6t04wYr9=v4$PkD^i7Y`G^dIGbTNV5e%)SHugCR#hH{0wG7A5=6kTk(h+P^1LkY{KA4`2U-tV5}O^oofior@*f!cj|c(-eqD;m zAuSK`;amu^y_LqA?fz?KYIdO8D?uq9?}DZW#f$WHUckl&WTxD1!bplM0XB2Nt+9VP zsDcj@MGrgjtDVk+tlHJ^`&la2Twiy3%6m$&R>bCmTKbV3Pi+QMy4de6D_catz~(}f zi)fAeXAFd6dxce$;u)m)r7!aQFY;7wy$^#0Pj*eHPsF$CFD*fef!!8+Fwn8rqP~7M z$f@l8RB+F8N#Yt-&CfvDM*Nz)vQNtY3gG&hrt336w6P-s7AC6ZW`z$!{+Dy`m4c@f zzK z+!lpfQPOc1XXf;UKyO~1%gNY)R+O8i1^Jbxhc0p566U%KT}{UQpSThDl=tBuHuuCv zXrp~mMyc;&UldS;&t1@F^v#Ln^GM~XXCXUD7U?{G-hV}~A3G^vGvom$7{)lwJ#&-i#f*$Q)RHw5ma;?=v|4tb zLm2KIh8Vm(k7VF?2{<3b2yi}BW^f%#u$)EQF$?It7PfUFWH%=hkU8WM0g}(+u>~_$ z(^+OWfkefRO(`7t%ghi?>3POD#Idc?dZ+EHG5;mz3Sew8#;NT7HiO~IQ}6PA<_X07 z=Zp!z_TL%XhuON#xA`zm!X(fr0h>L6TY%y8czuw7Hh_Z=a& zX1vC@e~S9>ZH~*ZWpjC7kAcr%gMkpI8N`BoudjB%;=q!PMDGsSejXPru(i{O2*XxW z3|sY;+#Fh(cUhu^U+NeSQ{Flv`+|Td+$!y7;sRr4DQs z%xx>wmc(zeIsZS<;IC^x3A~}yCl}JTP+!*$IYLz~gkYD5pR)1`c5)FjIyR&Za`-gM zhTBUqwOpqts9~7M=sJO{G67`~u6!th+p8)HrKD+?74QmcWcv)#^@s!(>li!n5 z7#Qw(-`pEQGr`6CZ2wP#B`NaUV2Sy1IkT#v*ajKgW7u3$}))Ky>MM+of$}ndv z;7}X8Db2d@Lc;vu^@NJ{4>2~$;1Ghj1Bw^=I5Ws2xSwPoL-a=(6HR)Hu^(bE915d4 zbs|!VMY0&XpTWq;4m~pjuZrvWS(a=v_#A?|vCvp!a9zz6nI_U2X~PoB?Za064u?D| zu4F|e4%8|~+zNvpF$e<;r*-aXFWaXfgp&NVs%)<)2-Lpm5?C@(pW}L{&-Kmw?lxf} zwk(?>pf$7z(!c`Tv-e}nC&PRhRX#ms1Cu$3wQn zO!9N2)uGojKZQoIS}EW$&zDNz>fq~)AGkUPBE4O4F!u4Qg6jh}2VM@Jzd}B^IeQYn zDg369S9D4aKmiAaZbz`&FnB+GX+}UiehqJ|UbYWAuz$t(?-p%keQ7XT`1~)+EO3nPRk~g@W++|+D z4|n~wCp6H*;W6a0|H|n4#UVe&U)d9X$#nf`7vV?=x1Zko4f{I$1wX2W>AITqj`?d( z=y>2Lwm$xD;?M#B#t6#cMwLhlv)t>`l_G>9gv0&Y zf)Yghu1Or@){PG95&MxCx0jEReuSSW{Rp=JcmW$bs(*>^1d?{fZX-gNBLc#T5}&0@ zO{Lg0Al$b|_;@b%?xzr3zL4PFe}%ci2$8!_Xre`$`c3AEBK)@)6F1WdF2kb+1!T6d z)MvcQ2trK(L@8R%!rg>hz;UZIZ1E3=5bZWTh6|Vy*7w-GnJRrO&7#tWMnUD+)B{Ah z;$ncW<$z0xCB8S`Zi+{0W3YTeMtYLDY4Iq~*RH<_mHk~i>RVa0FoU9DB>VJ$i7q;h zNNy}*g+~A9zF<}-4Zx`P-;Fm&OFuz1T&EjqoyI2gwg*#q4<7tZN+9ApE8=$^Kd*zg z$UTm@-;u3}r9VT7i|>NA43iGAo5o&4u>B{1Y^eXx^jOsN;Cfm{Q}}?LBQzNF=!FQ4 zQecsGxY>++At`Yeh|sCKz?UL<7YJYw&bZSzd@>+hVBaE$zTw^J^9#Xh$J~ZsCF~rx z%EPC{D3*j}!-U=@5HN)xK4xOX|e_?*D_&$5I_ zxI&Hp9g>i6MXvo3p87Da!kz(KTGSU3fMTtquN7`nXBk0tevL7~{_%-v1F*PFlUwp(l7JU28_+gen+*s%&98*}EV_v^R>(t??E=Dc#v5w}VCre+Xd zB_F?bx!MF%@mI3v)mKs77u^!iX0JqCZm;PVU!CCFg^ejx*YFhN2ZdtSo%D#xLM!Sc%FLAYY-HKW$ed^GpSq?Y+;?F7Dx Q^U>N&?L)N>O+WvC05d+UfdBvi literal 0 HcmV?d00001 diff --git a/mmpretrain/models/utils/__pycache__/batch_shuffle.cpython-310.pyc b/mmpretrain/models/utils/__pycache__/batch_shuffle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93178a1f024d1111eec85a4fe58e08d291c15598 GIT binary patch literal 1588 zcmbW1JC7ST5XVXGQ+p@JuRP94LAdfFj;_uj2y7=n5Fl`Z*luJ5!dj%HJ?{%7*OrY{ zb#C3Kyes`h(&Y=VP35l;Ah^s>dxGRk6#|^)V@Uov^W)aTVM3t%{?8w?K}5)3*x4>H zM(#n?OE7{6T9A^CDaAG`*qFiBE4 zX{L>h=h946Rz0_LX+tGVtEwr*)qNSj*N6HYRNa8-$T3{&fUKyY$IN&gIiZ3I_Li+! z$ND+1^E!Hqtb9XH{mwh1tH1zt@iB8BW>Vng_^M!xLx(IfTJJ!Vq zqzh+wnIPjJ=s{+t3$7RKY*xs-d{!%7X3z6#&RZ>cJ>$;dVXewhl5i;Z)m$IB7WS6> zj;YmjF?uX3U90qnf1jC*=M|S3?EAwnPx@uQx)5xwbP;^Iz4$KQ?)`See{N0F8Whp% z<8>odX7aikZ4{u2ulTah!P8tUCuWgrSpH~zDZvFu`fvgIwN=0G5&y9ga_Mfbqy`xf zfb|N6Nqa}FC-M>!?`buCe-0%b*g!9`M%uusw%#-|$^&N`L*4}lEfpr%Mj1q`Rz?Nb z^IzohWjatHc0PKB(Vyu2wRECwd~pwZWorr$vvJjylX=tX@g7R=h1=oIJ!}LX6Bx2F zj$<6&5p@7Z{r&Z!s%vvtmJPtDvb@@9)V zQ^ef^phEyUg69BW1I+&GKc7^h=1%GbF8`ZYWD3hpa4(VmpUgNeFTuVI_BPiW0zV|% zVW%)=Y7a~rs(ow^(Of};q&cG0HE5-=1Ik5RUB{^#XfQ?UV>F+l`3w!xur{}#WpAvL zyAguv{SqcLo^p|1Ol-{57ci9W+0eXd;C(2}u2W=(Whtw9UdfTj_4*Y!_SeWqRXv%j xOxSS!lUM_Sn{A14vYev$o3a*dA-~0!r;($5IQ%9Jz1uXr9aEpObDv&4{})Q(ny&x= literal 0 HcmV?d00001 diff --git a/mmpretrain/models/utils/__pycache__/box_utils.cpython-310.pyc b/mmpretrain/models/utils/__pycache__/box_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7891c8ef5e5bb7d2258936c830eed55e7dc80f69 GIT binary patch literal 1987 zcmeHIPjA~c6emSVaf7s7-EK)SVBn#L2uKs#XfHw9wc9Q$79aNKlC7{6k+#*!qCwKJ zG{~1>J@(#Xa?DrQZI^wA=`>)ce1!qS_DCs?llCz5JOboP^19c;8NIf7XGV^no>MK|ZbPQ2W@DJ`B&z(7$y9H)WLGGXpR5-x7maAp! z3)wmvx^qwC7f#_Z%*bm8BD&qPe_#y3FN{AvJ;fzq@%WUK{xm&|Q&xIf$Y@l0N{7+Q zz%OxDlB^e%B!>mhN<1p@;VF_g!Tzs@J5mU}GagSQ*D{RLORQi#Q9Dx|!+I#Pp5?YD zdD%3(D2wvhsHa6Q%d_01tjyBHTlfZ+w}5@CE*=PNUA_S3U3DqQI#m8+s6pY@94Ro= z$t|3Xch^Ij2d(RYo=5^aY2pOFyauFnvR3KjFtaw$w!C2!O!C$8fzh$~m@G^-RbFc# zuk*QQ^7#n%kqLD^4DZ6U`50}Dj&Xqsd}7`Shra?SzUF!=QyQn5OT{A{i}b!Y;`079 z1(t3;3!^BM3=|m80(!{xjPsQ0nV^bmMa5v1!hmXXwY_Tj&x9VGA65r6l$`Qx0{%Ip zT7VmHNc;WGti45Zm{|vCC+PR-K*({Z=`7YG>#g5!(Y;4BYtuc@0);STU}=T95#}JQ zBR`cxwQH>wcWTqkZ+WVO{Jy(IA3P6s=^&O$oBP4ttLt61=qxPuYYV80fS{L5tW8zC zrH~h)_Mh)kJ>s;R?Ni7{#u)`C8t@yZLwhm_V>yczr(cTi=uwzVxiZNwF2ms!a>^4v z=BYO2vNdko{4LwDs)18_c3Z9h>BVAt>Xd$^Thq=JFv$(TLKCJk2wA&n_irI*@g*dn zR6u`V_!aa5vksiQ7ZT_SmE@&&j82f+c!qw$&_Ax~pG&Ulw~W5V$Ib$oB>epXnM0m( z87A?MfSmV0sjX3s)c;XxJ_}0U3#H5N0Ek_{^a8o`KO(k!W?&bPl7vYj*MTo@FM03P zFqU`W~PXUsg3rm{D{N3d*` z{0JZ6Evx(j5`sGuw+-b=b2W44+;fhvZFzXuM-bnB|HxJ$LO9H;ySGHV_^Fj=;2%7 z`+`4XS9^k*9$;}D)VS`|Q5~=22ASYGtPu@qv<|PMHEJ9mU})?SkvoHvUh)ZpW}XPa z3UZd)EGrn16;p}fQjGh30&$*mO)@J|S0hX$vw~@oDyj9exstXbTx3#}zK3LmToO2v z(iVnSz~?dAQ>9WYWMMr~k*t`~oyKc5*QXxh6feoiITKo{(J81i;IFJ9#+Syos4Xu! z;lgw{Vapj)L}r~PF_KWe;FJ+_kLH%e>2cxj({2!ymmqaFfcMN;C3gI6&27C&YwpaE zBr{;(hDiTEQ75Nh=;-PxxSe)LO>{18LCK7H!t*V yQ9pzUdcvi$uyVM>9TAGvCUKxJcBu?@WATTiV6bK0N79dY?p7I-ldUBA5U=hF%v6Cp4yp!*oYO=|h zS$hoxL3*Z(-F55gQ>V`Nol`xoRD27^U;O+Zd)?1i*1yor@=?UivpC})BaxQ0BWq~4 zY@4n{B!;;E!t`|4t1M*CGR-O&d$#da(d7jF8p4^ld-p#i*W$udAeDDt5 z*=pLl^qP{P3grufZqm$Y@3p;TFdlUxoqv6@7lk_i#t8S@sOi1>>g932A1Yjn(Fm#g z>T55&c=<|~x>`P*^d{cIMQF8bX|;s3TRAC0XE!f%l=htOSv^}iGXHM5<$lj{tX2U7 zb9M1@=Vqw3$7(ozp)QWqpx@?t zIgIi>TNm4HezkV{J?m$e9#`XW^7wGLheFjEj8-nkGK}KKC&>V5HyZ4<`{5{59XzkS z6Gj-y*4|B3$79y$C0!)0*sy(jy0#G4Ryx+Er@eN2)ES2Dw)WfY;aE;0x-Pfd*C(AQ zEh)6ya@=jVRf7iaz8;R^vEoPQN;>Xt7)7YCi3%n;M^_gpp@mh&$wToh$Nre$yi*nB zS*rN!NbapDtRzm$l`a&dpo?Fc47%4|e(~}f)6GjkuM;Ps3T}id#w-QnUO**VEOu2w zzbooMszK`TJkFS2Ze}Oefi3Nsy`467KJxiew5BkILK)xr` z2FlF?ve)M~r%8AaXH3s&i0Rs5sPhTfZWh!7)SRlRk5KXuCG-gOFeTK-l34i(-1$dQ za31Getm1-Dly^d`^wxC5-$Al0)}kopWL-KTRvk_^%!lr7VH0L5^rR<6&*wEs#;MXO z$`X~rcp>dMvMej_IxT;Z%cWtNO1XyEw0d+pvMOt+S>ZKZg+SNz*)NCjKtjJRB}q7< z<@8)M?q2Ksp>0|7qF_-LaA9!q%a@*OZux$I-4!<2UIz0Ps4m6 z>~$a(GN!@ycodq_?U!^ReF5#7vFYOCak_47YJ1VKSyQxz<_BGqk;~zX-$9aCFs}BY zWm}1WNwr~A#ms6uNq%PEvT@}izmRy+#%~ejwiH->q9DaaRnp4MZ)>O9?se~w)z?lxj3(Ocw)fO{w^>s)%7pbBj0OoTSAlLD zCly6I^!9A9v=fcmPiX&2czr@@+ldq{Z0%kijKYrM)u(gAXpbv?T743Q>LQY6-q?s= zqYN@`S&?EpnXT%_Q2IE|n3lGmuL|FG>Wt=oQ)0eH|AwYZn}6VyhZm?Lz9i0Ki><@@+993uv{ej-x(`6f?ztxaW4c+xaV0 zdP=@@Hd*%5^~Km0%W-QP){>w=yE42n=!Q+9%cC%q?YF~0eHe(q7rY4eE~WFy+3E|lN>Fq0_(srrDNO zTWWdl`d2Jf#^az^ERx^BwW9Y`Pf`MUYc-t=m$Z1Nt+>vr#28%OK;@WF#&t-~_yk{Y z?CC@Ikj0!li0zlKj!uCuorCF9WB@WC)6UiLI8vnFW+-GQ6baBkTtHXZ@G|;9ERAq2 zegp~82T>Qc^C4#LNC!p>W60=Vguzz=SAbLqIO-JE5`y0C9+qs5cC>M z^fWYFAfYFVKtc~hosh5)7UyWkh?iruBiasJWV9>62vv3Ib=4Wg5ZVwd?W0icrTY{> zo*M&$ZVX~TVgME!B;h+ra23tEJ7G+y184&ZfeX$%Iu&`|9!T_af4CD7t&W!~XSid^ zGPq#$Ilby)UHorC)V9v^t7zAZMCW+~#yT*GTLv6jfF4fLxA-cO3~*>cwZ)9!2F(5- ze~1MNxGBtpw9i|&>=&%J%KLdNTKB+92zqie=T`2}wyo=DS92x6=(a;83@y&~9kNod zH*I6!eTEaG-A|)HJwpj&ivKE;>dS*T(M1);SBdJxI*+;RhuZDz?S&)B+6tS;R5I(} zS?YygMBDAtkYW!-F|k2E@7h2HyUwY5>PPV(~V_yyEj10rO2jigf9E!7m+~& zNJCDKz>MYg9Rk^1mfkfB&;TQL%Z5>7U_Wpqpj+k+jRXs(6zy!t`1Bg~3?)5sK^>;q ziY&Jx(cU3wZRT|`gkF++6Dk^-5$ub&(gnCW!$}g(#YUK??J10pe5aMW8eO}m$@>;+ zvdMFthFyiA&e~;hmRzLuWns$#Ov89a+Tb(7B1V8`5Fv6eK7jLG$p42j=fIO-Dy+aa!t-XR`dUpH5}s|LH?JAj4ZtQX)Zwz zbdn+$!9{{hQ2DR!gm7&XG4(|m`8w=pWM;YpuNMMqCmq!flhazKHP}%ywM^@mc-`sx z@yXDBCT_KAmb7nPfL=@2Rv#U+SFqhB&wqZ<>Lh!}R{9@sal%Tw#!4e1M0OZfn{6(v zbK9Neq_b}A7h%H-vtr`GO@NId7agVkQZ6jf1xx1FE$N=KP=_1|s#8kf9^zzk!#!;7w!xqOl($k zDapu%_Kcd1LS3LbV?dWfZj<^VM$+^RO!4@(X?&}iVaEIoDxm6!k99jycp7~D0LAg6 zNc zaBX-!(%wW4eqredytpI>D08HsI|>Oh87QEZcS!f33Z|VTp9zr1J$ShGW&U;^s%Q5} zecjstKO!N!B`7YpW=?M%low-w$3@TL=UN99%6}72H9o_^YH2lXW$o5v0lr(reb?&7 zuYg|+%j_8-B@AVqx^slXsMv2J4}sbomh5)3u6`f4x&YI=)8Q~C{is){9Y?rw-A7?$;@4Q_`V?=-0C6VIcVTs3_z1i`du?k#FW74hKm6QB^pY z@e8(l1Rc059QVUU-nXlw4wUxc3j4Mvrkf|{KSk>IgfV2z(!JrD9gr-%b{|N68KeB; z`A8Y5H!#-s=MD&AH9H`A@d4EIXTlTj3$ObA#g8d!c>=G=U!o)adn70D`Xfttjc{Cu z;2iz+;8i0f4LyP3!0|H93eIX+0D?OR>aF!IFs?V`I=d_n$qjji5FEP&{%#2|m9z33 z?#p3?bE?QWFE`(Hu=`Mz7pM;MUHUck*|~W@K8Q9oW&w5R+QZtRFj(ieUfQ$#N4NIU!G?BQ4FEJM*{29S}K`{@d1 zpWMQB6ps3g%w&$$2TzjuENdi8%zy^4!f0v1NJcS-$qWktbtd09pET6&l1n69qs ziuN;2&}+-nV!Z5?EcUc@W4V?QtojD*6(f4e?kc#S9|R7>@Q4t_e}`m=-4U-7Gx&xV zto=$t%q|c9m74+TW~Esf0Y~Cg6b|-g{;UXqb`Jr>w<(ZYV15SQ{J_VR$ARM#?#fB! zph})7xO?>vj~14i$Y z-vI88@t;qkeg6}${3-q)eYSaiiPBw138R8`F>8qS81A)kbux%#7F6fZzCl$*Etvlm zUtDmyS~9*Cj+W?Li)t}w=fmM%a#Jxj6YptxAaKxXY~j|}d=QNbc?(lJi2BFayxr5e zXx!H&BnX8wThnJKERF~_{H*rgoN30*s-b=nJ#R<}MGDlI5{gAAenB2{Ot%Ro6H2~8 z3DLpUTzgGO=algGzoAzA6cWD%)`vjG@jvfG)2NYPo0uDxOx+%gLq-c7dz*Roc=dA{^sR>p8okW#7W~sruXkY zqrQqsVamGQTr=A5&{K3_geYn!YO5(KXx9wMC_8h|Y$4Lz@=k%S!Z#zOPxlzKNA4Lr zGaM^#QbqDX)C@_wQ`7Re1;*6!fKQ_~UAet3{pvhN@Q(A@Z=>~t(6abrxY%$T;H%Kf zqcXT_8A0{4_#^MdB_@0198E8;T4gKBN?|Y}sjRFtPTHWx)RMYlBh5G~3DlN0 z#Gs`1K^|@kUty|q)6M6|w(vw{NXxr{Q|Gb4sAv36*97XniT-&F>wFLM+{O=mzM%7P zTPJO7eN%i=JJkI6`_oR}ll1-+imf|%ORUQ2Ccour=56IAX?4=cFImO~UXIt_E`hgG*py(m9baykEpPVvg6-#j_p3_- z|1t-jmNv7gSN`E{!-DPwi!bU9=Rw|VTB z=2O_HOxIWt%cW`;?V+EvIY2h!luaoUt);0oyH12KCR+P#x}vEzUtEwTYcc@vh5@Cd zKkAPt2`I_Nm~ByAL%~1cj0xb_LwpDx;!kLMioC@?hXwK9>>(D#e+ggwCy?wv z09QX174Z{Mt^T|JME#s7Hf-Tg)VFFkjuiEt2I%}aK_DEyR5!^G)oj7%P-}kByAV6$ z<#ZN;qwU-q7JbvCJ~hMVY@4+Ci=YJn$$WD~S}qQ)7qBOScs}4Qt>dDO3(f|&bKXXJ z=kdYXI%@70X6_6yw-5d%eJ=M<9}G^G=C${I+?5x1CEQgOcV)1@DzicaRm{h1vvdsT zy$VKH1Scf6=zHZcELCi_+Cfw zWi2Tjc!$!^hUVf`9EE9fkk)|MBT1yzo zHQ>>9lT793p|sQDu?u(Omqv5pJe>>%pBB#Lai3VmqtTYxhB8Za`9*1kPfNz;Y!U2D z*V7vgG;XCJ3qe^cHhXR;ub_ZGrRzh9-l{Snfl43R3b8TzmdadxUtFi?l$aLM-lrQ| z1g%@E@Qg14NXbKQy(hHT`jobhhIxK0#5y_Gdrv>h{jU@nQ%!lsu{qmL8EkDg(|z~p0k_81{g zg*L6t$@g4}H_-Bg6basmF2}T&|80_D?KCOUlKcu_@;c&;kfbv`v*e_0Wjl-ZR!W_O z`aU{D{BV?UyI_qU*6#6&!2c7o1I#AbIoKrj8}}eykp9*kl`p_u{wHBLDu*H t^=U}ZinoxI>7^0+FVmlB2`+wVb$5oUc(@Xr9!ppO`yrQZoj^d~- zr9yYDrM9(-rqZ+C(%VMGXs0TvwplUT=}Nktsbn}!ZDlK2{w~+fYl^D03zaEOF{D|_4fD*M{|EBo6ADhH63b~3Gl?L(DA zs^ZRVAKp=(Q=F`myP-HacX~_TQY#M(;`2@c@dbB=xy9Rh4#u=WZv`b`jzUk?blbS zwu@glcmAbIs4YiXFWGG>c zFDUqK2lZFI^;Xk=7h|oUqS7mN$LY4uP`#&nD{Z&q*Qv?zUnzX?`gm}ais~p8%~30Q zR>Pm3HL@xE;n~^an6BZb4qyPod&V`Ltee8HrJX#kf{PfWPvM?DpfqN_t~6Aq=uF?3 zs^px#7?!Lvi+m+F+c2FuXZ}X2lK;A5D3t<+V_z_Lw&jg>p8XG|qP)_rZ(ZwlKIL0Z*Xndd+io>CWNpocRjajKyHl%KO%I8_<*mDQ zkmEXwOv4lWxzlwiHU24v#j1B(Xo0tG*Ihh!uDZgbHrNf{MKnzqBA&-zmUfa$^Lp#+ zUEzB&#j@qFGDV)%YF=@zcJs~Ny2VxWe7uAmf`*{;t~MP|L%rc(>czX1YO_6bYpd10 z=E?RSM>n=ww(oLr?e5j^Gr!A4__kPa(KGJVX5E!U9=Be$9jn<{?}5ZHTibrK+3vM1 zuesq`-G)`~34y5?wt$DnaT^#AhkMOJx31ex^{UM$_cml~a)omo| z8Kc75y=Iy+@!__PH9LOTwrJ+jEOIeDc_h5mZm;EFA|PwHe5j}ME$4o|b|*mAeq0_oRQUCc6`baIEld}PvjI-Ys%6`GE( zJa=YT01Z%_ybE^Ai;d2JgAA&8by&rU+i?Z0bqwfna2eWR+qQ45_{W+5csx}-*H82p zDBzvMV=m?;n>JE?r`N9X9*ixBMrUjeA_Lek?-8`KyCcOVr0;Rc8Too0Q!M9ws_ zOQH!r7K^u#Xh^eDZ}oVOIU1FDlv{ih+vc)&bkVwi{kq*1*mzvuZnnGy%X5>HXUJi| zt9VgL9^);JbK;sDwIwkN#w&lT>@kQ7!?=(na6UQ@OBo z>(b?}<#uf15VTt-d2q8s+%Lovm<|gTP#7Rx$i%N(%U$3G$>4b&)G#cy#mizIefI8c z{CMOph&_1c@dM(@T)g;FcV)#D8{apiQfF zJioHboJ_`0zaDak?2!h$F zB(ECvnHx+CcLmtk71iZlqk&>lepl32F&7zm2FBHmAIDt8q$=s{F*>*>ctx9dOF zZbyqS@kp4@d#vYU0Y64NS9LfGx?*wtx_Ag>{4y@@GYE27Q;q&I+Kg)IraG@0w@qzc zHE)}`L2+t9&8Qk;bB1v%m)STx0mm*zogNreYmkX}c3@(w=!s697G(6JF-hY(i_7~< zO`!#(U;(NJlpS?b_tn1cYis(JCR{8KLmckw+bIW2%h9iB{ASYIrjGdZj)J?gmJyHA z0$%hLozn+Xjcs3^!2p@E!nrV1In*!o5p6!pV}_cRoggWUyuWXtei>)CM zo5nU_>{m83*eFuCXZ`8znLaJ*EUrw$aIihB&F&bRdgqB1Wi!{;&nd5eVl$6eLu~Z( zpxq2SwxUzu-2doI#d4Q?ZTmpKgbgmrH{E#@UmQd|*udxudg`M- zY@q*!4YWubC~_QH|BwC@Xr4ydbNx9^{{ZQz`)jIyxL@qg_UE?rVYz*x{RqB%fb#P5 z^x#R^egGpqy`zC{eREpYhdfXENBRZ-K`L!a+tM%wIpmn%+|!@mw)%UhFTbO{@n}CP z=~pR#e>#?PFLFNA-y7z9wv*?cJjy-!D$mgZT2Mgk1!wk;G;G7AfBp)1+sm#XG?Cnb zvExcqzd+FX2|_+`K?pJu@0|lK9hY^GY8VgSsF z;TOkZXhkutuI;!Y39X_vI|)8Tj3$KQmfvi<3E&YU=oMb^F^tYM0l%EG&XHIK-o)aC zjUqx62GNY7SY(w_$&j?bl)?)$q<3i_dG`bGrzPe}K?a>l+pHfHs9n_%sRfx8UuqBP z1U{l4_8Nu;mvH|H?dB`kEnZJ;s(p1$`919oj4AK3WmS9@sW-JiTYNk)WucrRgX2gH za!<9uGU^b;pZQ}dj|wj-8;?!eAJNWh5C#^XYIVW$yl3zbeaobJbwICONfn<%(Gg+K z&@*k}BlL);5cg*k|1^S)&z^;B1DqRYoiW@Sg5Kqjk9WFO$V-_;ZoD?(>o{y4`yA>4 z2+Oq?PJ`Y6^OpxD&V*`$`=YG4NY;wY6vkY&hFfdfcx1=_Uz)xfB`%~qgZ2z0DQ1_Oa&yzyv?DP$<`6Q8Ef z7Abg)0&04ot_JEga7k7%R96R2*Mbz`;8l6MR+$MQlj{IYkk%nw3{~#DN?uOPYSVYy zL0-aQ9+~Kc38`cw0=>!d&@90>bw?xQP46P#TWE>wLnj5Mk@ky z5b7ubjg-`!L?0Ss5DqoerQN1lN=A-r)qqnpyccd4^&Gw_s@huxe3{cW4&7lrmNQFn z5#_?t(oOY3d5S<}2isG%8sw@~>`J{BJr}CgH+ps}Oi5R(PPYzNMp|e=i$BmI_5}vX zx5A)zgQPZwT;ePAJ{4lQy4z}ncuschqf`WKDq;=+ZA>05`HW&{+9%Pj*q`wpwcXUT z4>VnSUo*6yX({bLHB-A~rnR4%nc{oaF)pQ!0unzemVVx!BiL0L3WaE-40TO6?PeMP zP4p)fqa$${7p#962W4p%%F-;9rCBISjd4XON+(rm9;(tTRHX%X$}O%TUV&g}ourGBnq(L02yh83J z)vDmUyK7Y^>QlkIpdFL*gcwx7UiC1LEg4Ondg&6Af7TVI*)8| zL!lB5~eG?Z+$QsL?+EO_^k(wq-5^EG(K~UC(Mb9K{1as9;&sH4*pmJ93i^;uv z*f5bSPw)cat%z3%mHT(NJo?_u#@7dSf}fVM8`drpW+17=w&cfANJ2oN6-^76`{=Lm z;6ps|)h*4-4)p?#4j`>=YdZ>qY4sgt8_;}*;IzJJ;0xV1Ai_NL4rF|B4B`jnhiCxc ztwVbddqWOWe4fn-CgL+uuFeDe?TZjL(oXtqZAEc1KTw@)Lq9;`O(^{E*I(}IxM%h& z2(v8wU@V;+gVelz0=rWPQYR_vC*acvfDYj4{~aJ|y^vJ168iLd%aLjhYJMSLCkWRp zbdX)CrnqjA<{i5RV|k}ZY5*wm2R%UL&~FH4u(Y7$CavO16G}gPXALkk?JvITkt%M8 z=7Dj?iesHAZ2`4f^fAGES(jx^1k-G+do8|aELf3I?={*|UVr^=SbIplMBz7oZ9d6Mijlh;3HA++GA+q_1Tok{8u$&X8aT5&$L5cvZC!VEegXXVjQ5to-Y|nsA z+)Rz&>7XECuVk~8l6+&SBNPQ?KWtK2%sU8){R=LSwtb@nesPGLowo^M{ZuAX@8>Jtl=>(}oATzWvaz>JH#Os*Q~(US=C zTC>#}D4e7P0E~&`4YIRKLmKlSX~mWi(mb2sY)MoMrvivQP=iN~21(`8h_RJraKf<= zd+ug(WdagRXJC8lYDugp5g{`&9LTyyQ5^@Xs9^bS`qej+0}4fXi(vZhR!8H z+Q4f9wUc?%gK)t^2N!{%)do%#3DaiM7?`v>-%GdO-TEwN#Jvx~c!`N-qNl@w8T2c6 z%E(}lm1)9QB#Me?;&I_V$4(B*)tEtjw=qeS#_e3R5(6}JWx3m+B#MgLo<|1Ag=phc z?`wU)Ri!SP?52PoNmRt1E|L2U&)U!_q~7yjID;Ats$80nw78=g7^5yx*hEV2XGYzf zP#RV2QCI*!H#$iT7I#hf-*kA0FSy8`6x<9{wR{_9SjQ;qS|&{$tKhdnf@BR0Gcr`K0(1J5sa&mxD@(+Qa4XhBb`=(kDtT`Ni~vrs|2iU+&-v7 z2!KEke=?28NS?X9VeK*)_pE3lSbr9mw}HTcM9FF;$aPY!gxLo2BZ72E4vOFmnJS6R_Z7KeDls-aGecqtb-Bc>qsQNdNEzuCH zq+vs@4+^0rzN>PkA2oz6+fs+bs*;TOE&A?R3Wkc2flfs3oV@3Km9h-GY@qL`p=gi1 zph|N^HKyZ8l{8r8KFFmu9=uE6kzONdD`UNe*h?NFRUGncu~H1)u++4uQ>FS?CpV!?@6# zm4h#g)+ok&{_|`on{nX_!p1HbvfSx47_nf=njM(3O3ob40hHL3H3w7H-eAv{pndL+ zX%&8exz8!2U>-RMa9_48-aChu7V93=4>s?-i>LWEO4$VE!-q6)F@?#k{0@@Sm>;e5 zHRdlMbw<+SfMw85gZNO`29#ktKDpCC)`Ihe1vr207cFg%QWH`6;VAomD-^{XwdKtH zu)g5f@!|hbl}8wkL7@z}?Sl099y!)P0Agrvp~({yq@kDk)Y94KU;R{Ml8FmFfk8>T z0{fs4Cql51k+i(gk7ONsC!Xg?Bja6Nm8FyNmqRDz2ZqK-7-->Syaux(JS1Uk+)Q_- z`8|dA#~l;zX&*+!zWF`n`z$R1S7sp9WY^%*j4AokI*TL zdlqiTDaXIQ?n+nW^3+f&7)AKsq$-b6ZMiSe>6-IGbj66&5Tsd{Kly|dA2N5bLQ|2I znMg+p?_efvk{Gc~td7Pwmri)p!av1?ABw3K;i>^62y6<*`-UN(GVg=)9+}7wCWsGp znkmsQg@?iHc^s9GOAatd$0P@mfPM0*U4l)=5T_8z+42cUvm|qb(nMzur_j84!hxS{ zEG6TcPMT7qIRr1%SUOVD;v#d%EaHb|?DI%5#0@T&=BEo;BnM-59`>_>Q+QjGVZ$J^ zcIQFPNA>q7=L>T~zCrFWnqU^skWVmE^IbNJ!><-^(kGbYohg=gioNr@$~!-P>#Nvn z`MgBvC^G86`LTgKVMd^RGx~jG4CF6CcuZKyVWhZMGkJo6Q_Jq;qMCsIv`X(+hWQZ| zu#{m=Ox_LdAi?mCo1lP%58^H)MR~^%uswH1+ODzg*lz7;Sa1EMx8&%9zHjU2WTD@s@UMM7E@|>bMfWeE8-q<6*LF>6Gqta=ejaen`yctnFi_*aSO^3pgXz!H*9(yC2O1&T+wW zSCO+$OgZ||GjT}5BOYna?sybdN|sNDlI6jKWQh}TuzKl!dOO3%U&;QJJpKx~fK%!C z>qsigiG0wO<;2WVC?~S|av&v!(p_$M>5jSL!)V5+3CO&Q496OL3&V7SB=eJOwXM@FE5K5R}t6^3h%qmnh;g z1z)7#OBB3H!LLz}M29#<5}M(w`1D1ZiG+}c2V(AigvQV|K6i(?IVLN_D@+VCD(4x> z!%PiloNjJnAo@7{KI(&8mTs)#yU2+Xa({rE#H0h%+dzp@j(PHa2pk@UV}%fOKDyIv zZyXK%so=iN{}v|ndO^2&(hbX7l!wSP9E)P#D*2if&k|`bQE-}qM<_T$0j){#1quji z2L@@D%40sI235gjar@A2ZtJJ%yMayR^E7lcM{+m;g}?ncZKNLIb4~>fo}x$Z-UH=S z5>r=ETDdS0PBY;&Uc#ZkrGU0F=^uJ$H_+clz6??ONqV3&MF%68Y}}%gL}ub7QSp{_ zK+rekqdfD2wEWK%o;>7aqR-%kAl*?7B7tcmb|Yxd;NGsxN$>PdXE8GD;{Q!)olcM& zzVm7dii4x{Y#$WFvjjp@TB{qJfttK3&Q2Wo>&URiC!HC&0SBP=9{ALixiM~ zh|lWDQ}reKa(LPwRpM+HpO<3KR&j%}lPthJAg4Doj{ye>GEd2`o}vAOB)1r;Nii%-Khe z?mzhKv5x}Rzn!w905WZ)Q*C5Webi_ftwzgiSuLBsN1Mz%u|PSSwxG0GlWsAGwNBa} zXmp!x(GIny-M?y6op$NY^Ui3S?!s(`zN6}EupU=9^j*65+#JD5Qyc97!0UPIULt~+ zJvch_KD0Hh?vX%{AWB0P0Uk!lG~pymsV4yyi4q*mB9eOCi^oS?OgoVl3u-G^lkjRts zVcA9LfBeY3khkwH$o2Fr_2arNCkIUpYw_|DexeLhCVAv{0h z92F6grTZ29PwQ!!NH0{AqI3?}I8G!38yET!@?;LOhMtssIIgTU2CvOB1ra7`Fr9*R z5id_70>NL$K>{?1f-e}a#rdHp1OvB&k(Gu?dWk8O=t|052<=>uR7qGm)VN z#3Let28pw10x@3;f-&W5vMCW9kq`k>uvKw~Dck^QjRgbpm@(poBB{j<*)yONq?loP zK2%m4j%C9A`5=x5pLo)r9|KK3rsIlMd?r?2ixSF0(u1W(3C%**A1YwQFeo&E(+V5W zL49Q9UecR}i6_fFDhz9fRA#!*rd}3Gh3?Ma&YhpE#6U2+|6LT!ToI&FNF)=)Uq>Wi`No7CsBYuvQ7R1;YxE2Jr-ei~(L!r;t6~)Z`ervQnmq4tJ6v zodoGkk_1%~r&LLxEjM*ZAa9`#b%tK?t~v#{hAR}B z4dpf@J%rFKHT~+FSO5CQ(IoxlSJe&nT8Gy0!`xCra;NfrzP%m^xl?&8w+mzB-A%-~ z&6&)2>^CdP@QZCM4Jd*@CACZKL|^C&V_`0=g-!J@EnQnQ7Mg2N4X!iexNq@oSjpin zp>e!h`7T!PK-F*LmdH}ZbH{aqIFPQ(yExv=O~JzH+D06{vip^uw_I0|(RE*GZw?N4 zlE{N7N?S*zymF5j?Vee@-v}JRlzi_CrfRUH_NefKhq~@3S__Dy@w)Yo^ zfO}agkzR?y4DQmI8eF?pkv>)I0ExIK+3dW`Za8PM;(>A2{gvBD^ zW!|bL*P?&LHQc>!$t!z{ssFtbtzIRbCqSR#E>J*&2yVA4aOa>4WCj**kP3Au7yvoA zp81{Pz*;KDWd*PzhyPz(c`NdsxdEg`!MDKquMMc+brY*wxMd40fbU^< zo=y0NFf!ULyzJH)zM$p}+@h)vaG&Fy&fIAh`}d(Iu0iE=+xjit(M{cWYv}gdTgEy^ zs^viU7D~c@A{6N3hy7jTcMQ)`*Uj55Tt|2bLVMeF{|wi7In#7qn)onm#4$Y1)gu0V z5R*3`S#hLMZmGLLWsuy&l&nac+l5Q`4{%MVObR{=Lr5qqT-Q-01jmtwP71?47>Zr*(uPY1+KkSasZu)FWEIW+44LEN-353 z0FKC=>aoCYV|O=#=h_Tju!As}!SAdHgl;{ok~fFfzm~tdN@xSlW&v>VoMRHscZx3K jrA3uGYk1e3`Ln|2zrf2uIdfOv)*WLTYWOqk#&!KaA>8+r literal 0 HcmV?d00001 diff --git a/mmpretrain/models/utils/__pycache__/embed.cpython-310.pyc b/mmpretrain/models/utils/__pycache__/embed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01edc464841349f483bbc4dc85a98c5ed86300c3 GIT binary patch literal 12290 zcmc&)+mGDlbtgF-4u>;4JK7y-bxX^tPIkxMtRuyVUB!`OD{HU%-$x^@EGB~H;Rr=LdwLjOIxixhBj?+|<`sUE}>~<&cZ?`>b z*AaP+ABcXMgBJ5o+twg*Z#yqqw(a^oWEsy`q4RuT4?X9ejE<32j)L}uXgKhkq_pXF zhIl(g8Z>+6@J~B_8E>egQ7qf?DJfPcys3xb4_JbjjIPnT-mN1%2d8xjg7e4MZb3Ku7b0fxBgL)q?eq~ z-E+)A5VoEErem8sj@#Rc!ZpJ%@jNYh;YE&7YEo+@3PfjX?YiTKfoNVdUkpMwas%Jw zPUyJuj?5i5+A_l}Yv7Fgp}t-|ziM7xH7~!qHqH)3r_B}36+Th)dwt6>g%bwekb4ih zrXB3~q1DHM+h!o#p6i>Duv{NQjyt0M-M-axR!tOlK~+{{u3T9)Z!{;P*dfkvJ(miWSIy;3cRJ2DVA-^M+gy%BUNl)XZOvDmt~KAp=r2bmCCW%vqk>^T7;-?}#u_iB{f`lY;H` zn{$ck<3B0#P)P}dK=&vq4II%QMouC>n^bT(ju-@2OrDaf;Y5W~Ytf1F-&p#@HAbPal9|(tU@k|su;f0Ul<|AajvNqUF>evMA zTYEffnqIhuz+6)01-UBg2z6PJ)%)qI-B<57)P{0aRtr^GyWdc%@`55BMw1SS+x%HV zM90gb;tr?@Y*+E5zC2PPAPctohBPYLMKFI70;O0x308ko6`zM7C@q2EpOHp-Tm;M4 z!PLvArTqegL>U4GanySx)<-2DY<-0KdF!yzgAlNFFm@$tC4W_Vbsp^7I4Hzm-m(-| z&|}rsm!w7MacNY!^<{{R${SLo#>PHs@5`gPc+Qr4@{$xAOVX$|lbXj2N~fh!Jt`h( zalInNb-RlB&K(v;$BxKbz>F3#n*#Oy>gO;Mhzz^-rb6Q5lyp#vAs``W^h4>Od?<|? zvAS)m5F~&&Ivy_`6r##OH9j66ix>8lL-}A1wM&2qI$G9ZWqbZmrnTLAJ*sTi z<3$1wca_nJ_yk1LqV-d3|4XT85)P;sFo3RcDq}WXaGJ>4^qeE5)R>5;85gMLmn=jk zO!60x^~psDFBft#7r3clJaT8i#)wblR|=epc#1dS$aIyr9Lvu4XMN1nnO(4?rScpE zd<;(g;fF<{fZ23Y7AJAjI>e~+hvIY{ui*`;6xTtH?vj=;H9#a$Rd$_PNzW=6_VpUC+LHMqXYnM^``6F_wM5n$57Hd2I!b);9PGCARES-$#c9gb zQe2rBP!S@xHw=cM_!Jk|&TY5jh+n0Vm&C8(Pn@HIBA-)B&qKjzvaU4b29+o# z^3+Vmk)%`Z>$3Jr>1P{nNnM!~ z!8giKFLk>NwXgUV?qo~DrK=>yi-du`Mp}aP0?76`LfC{MIp5?0LjD<3zTptUH7%3$ zJd^c2s_YVUCnS_M<5owq@fS|y+xOK{Eg2g4|Z`fE9?y=Y#z@YonEyMR_( zmhS^evXy1HbF`Ov-0nfT{u!?m^^1o8n!YFmB?X8F;^Fu0ux{k3f6e zK6(UNE{bPHnH&jtJoTDCNB0ixDoT6b==VFf*V0LU6?5L0Squ%8mhoDsnLA$0M-6oE zpFo^~boG105Z5~d<@1NKDNUR*gcU@Jq)7g@w67c}u|_x|DjcXbBZ{aP%aOLPh@}Xs zIxYXnfLA&XsY9T%d8{Z3WlC-Zdm|#nF@9uI=y*e)!f9j?gf#V zXrQOt7wmh4^4}>YDr+v4n+~)kk5#KggI?|0q!gugqC~e{!bO4q0XeIolSQJS-$uZ4 zc6o%8-G))<6z}Y|ysc2YAuVB_kt$G%V=C8?35o#s# z(Q_`A7{DLfP#iHY(ZcZJWeR?Wf^Q-~yU$Vl>jA4V1|cS!+KT9AeWamyCB0!~f5LK(({O(rTavqT#XY!IBF8!KL=fP_zL zVL(E}2_ZUjK%P`cYG?U^WEIq2&|uRLZED8@K+PR9a}|Q-?s!%hw%g)&segh`!b0#C-tY+o^^zeQDqhIqDs3}KWN49^ zs>#@D29*>KhV#8#(3a(UOW1<=tVNy)XKz)d(txykfEjKlSGlCz&cvwQHf0MrZPAdn{HyS_d#pzJL?HxaDqrCkvf%yw_-L5m2h7BH14%{3$=p2m7X}1%j-Nw{mNJhNcZr>VOURF|S zx9y+<;vkX`y9koGjAA+t5Mg_2q7hYL`k`xd8*fOf*Cka}jscftzuLR?&+7d{f1Rj%xUf!mCwtiJ@kU0z=|pLHx4*c|hDYw!VV_BAr^5#q?|^ihY+jcB-P zW@e+*;+U~5f^K}r*|rP#b0lWlv^qC8$#%H{nesgEFJT2(=tg|XjhU^^hU zkpk>Td4&Bg0@ESSmVx~qp>o1iR42JKCU`gps%xHufNTCIsL-N65tkX(w0H3J%?H6Rp*UwAgTRX_5-#794gmq!B8x^7k zIDEw}q4xudZ67~4aR_Naz4;_3D%(rw7l;*3FF2M6>tJOkV{P!G=oC<`7M+6bhh2ii zfaMaXmfTUNGn9P>wn!r`?ibMJEaB0nvHk$k6J*H{k0H4CKCA>uHNT^-59WNG3JHES zKP5IO{u%|$YHh?|C-4(3 zy-B$|q463C;EF!wZ&46XFra|2MIr}DCEpFOP;rY&Sca@nYz#zxid0AvrLL0!;AkMC z6AUSl89BaLQ?56&EZJ| zpl47y;e-fCAyT3O({3HQ6VWK{#R0G}Fh$1Kx=UZ{z}8k!$~5f1GKxgBmlBo~6RCATn)S!3BcTz%JUFqe4m4?~-w)(ACGv zU>V-~)Vr>+nWcmcn@g&UW}bHhT$?@XM?j1ehdq})P*Wl-lfbwvZr3GOBKZqRKVN0# zEZTBOWh0CO_gto^h45Xu-Cgu-?rb^H7JPXS?9c^S1j6(geU@N#*7bGjBDLJ~L;yGC z3}D#YHK+8jbWqs+Jl6?L>V#*J`X;$|?uea^7}w+uY1UH^WMzeYqR?Tl?I5^LUMn0W zw9zKKYcC81_cOL2r8W{Y3C)X)=r3-hK16_z-vYDo>5{KD+C36FeLP5UJ%Az(#k)K7yj}E;2{|@T0|L#rBH$yUwpw7&Q=_E2U+swcO_i|e4JD@|Nq)m+A zMDTOZbh5EAwGSH`lNo0QiJ7gz)M=8qf0;);uQc{S&-6Wk+=TYl%o|&dZ@MsfhNhKu zgl`<+o=&HTEdUwLt`VDIWna>Ohp?HF{PoNb%yaniz&XbxaxQg2pTphVnXG|mY5IGC zyw;>fZf%)a=FG{AYYgB3fHwv-`LT!qUks!)ZJL|p`diry0&ivnYzvN-c^Oui*1`}e z^T{|EZlNDUldc|I*|Tu)z@eCrlv@{$K-Vxy%~(^Z>e$TrAvRm4p^XhPkF1a#>&wem z*O!~q6a!vH-iD~n{FB!SGMQQ(Dv%#EVg@@I6KkKE`bVs60_dZMKi#rW{qM7fIZz{Cv*$qtzk@`o@!WL%2ze}+gT?ZXBV z8@7B`CVUN~slbvdwSkKfs0sFQqm7YmpWgBSOKCMS2Xo z34HXxCwGB>+hrRca``~=6i59c>K7wp8|a>D!TPFlJ^Hu`)?tJ|&p^p?1HO0En*#<` z;*zc1B@|qXb+)I3z}6~0?3srh7ucTKptMdHne!^Mc|1CoHcPZG1QMI-#2%QM2iZ^v zurn@Vqao*#(J-|#!dghzqIoO@)QqJGf+p2zzMjm@5Lb$;8FClEkVKIh)N@7wYaBBQ zu}|4rDom47`rVaCw5$B}QBsD`8Kx#pf{dwwCj-X}g8!6q$2QI>yXT)!fltAoQ9!Ft zjI?|5wHyVCzeJ$#8C0xQG}z&6c`CYNrDdRwA5oxBz_-@rkaOeH}vdR%*~^fhKBX zJ_LNvK@?Vkhd@10d$8yIMQVS8O-@{zw#RKl3$=@+4QBmG_;MqytKsuPXYG1m!9K&xV8mwZmw_l=&7^TiC=H-5?FORr z4sfktw>5p<_TV>!)E_-FZnF&DQT_+MoS;k|ls$hDbWvb*!lyQU=ExuA5nOWt?PjJj z)hBiF*$-JaYYTS!iutgaMR^~<3!3A7O#2}Az#f_ObIW`mu6^hp%=BTJEBSD%dh7s=c$RFE>{QprD&OA^=XJ8GbN z^)$-GRMMPJ4bZG$k}@m4M8OFPzDL0iC}0-K%yhz?*hbCl=xwUP*6PYc5rr#+jN-gw z;x^UzGzFibV26S`2$CXg>Q?%icI459a79z83&bYshE|r;x7fHDg4!uIXuF?VGyTlE(1CgO<(xC6{9 z_&ms{Abx{_(^RJCIvWd{)?%Jq>kfUEEimL5lekzyqn6HoXm~DDs|-n>D-gf}yl zZ?!(W_E4*xHA2arApQQB_4Z(REyVXVs> literal 0 HcmV?d00001 diff --git a/mmpretrain/models/utils/__pycache__/helpers.cpython-310.pyc b/mmpretrain/models/utils/__pycache__/helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..369489414db7461ef6abb010825b716ad6a326eb GIT binary patch literal 1568 zcmY*ZL5tf)6xK+xEz9;M&8DSIq0F&{McXu`hY&(TLLiq=NK3(o+GA;KYa@-!%qZ?I zo0AvXpHS#!_n1G@AE9eczVrta0{z}NUT;R|&C|?#Z@xG0d#VP5*u(hq{ok{HTb}n1 zecT)YK7PP!6h!7_tnfz6&Hl*8>=!{9jzYHGYmHi13yOBx8FjE0%4p_it*m_!j=EVV zi!QuTEaR-ZW}_bF7;_(U5Ay(XAM=5XN4GNe${P+gIMaJMWKCPkr4$z0Aj{{u9lw#< zOBW%`*t=S$5*DnFahcd)?Ca@Eu%+SKeFCw$44P~ z@_fcmtC|bMtBp&CP$tiZeNBvLS`K8>8}BMH9ybB9Y2y9yr02?5p;Fm|)2b>OfBY0a zH%y|H&q{`cTmSCu_3%O!HtB3B=&R0(rj+Z=Ks zizP^sEKhX=Phz>b3vnliSsa8c_Cw!iz8}61LuZi^Z+63nLW$CAZbSU;%~-}V{~~(J z&WW6XIE2F!j!KSgy)5K}&uW!gu#(TEl3L)3lGv5|h2^_ah-<4OPy|)Ic3R~{0W;#c zBTufDHMEKg?&5<*E?6Pe+%7;NHJMPIh=W^OL{pXJLh%=%hL{$TSJM-j+9O^OU01o0 zM<0-on+vyf&2kMp&|(YtiJqH>ZkyPu_}96z!-qsNuIh3M-ht9|uoZd^0&OuR&f}wd z-1Xui-6Myv9{LU4coLxJx~}I!>cKy}H98dc%3coJx(myumsV&fccZ2W#58T9U0&cH zUAd+*fhO#?cM!@^MBgFb)>3FAhfqx$;(&$hfSr9dR?aVZR8;5!^Eh#4&Tr`JFJP(f zksz+=&q=swoezFrVa3p|rQZu%`b#JqN^nCkHbj*TZ4PfYw{AdBfLv4ldH~UMbJWJ7 z8H_`(@zeF1gR(?b@n=aKd0mfbqO4=4JE`e{X!jj_r+&LvAMDld_Ubm(Z7MDwCtN)BP+UH1C{f#>rt z|2p|#n~=ZZVEu7n@N@XoT>zYLnvtB2DXnpqJB(10yW^Inu*}Qb_}%+EXHj>X+9 z$h+e%;4SWDy}Uo}Q}S!V+uVOZxG%bA&UnD-9Wv}Z0X<}QNKNp|vN-yn3m8iln1=~pfhgq9`8ROWA9Vql5rcX9GLA}#gUL&@Y_PAd>&`#{ABP)}`Q4oai{6g-lH!X&vw1|>vTofWx;Z9oU;hW({Qvu}JT!+h0j` zVzlHg=lVi+p#NX|dCD*3zev3ZxtQtcajHa^z^0VhT&HERGUiZ7a3fObsaSWVCWn@6 z{rI1PFfQ+jNj%TA3UwL2vC^TmO!O`3?3$$q%Q(?nkj%NhC;XRrm3 zbdz*HM0z+WWe6^3m{_9?NxNkst}XWhtv;Tn$u!JGT&S=pS8U5g7`TtR ztF+j+oAWwfd0CV)|3;m-AkNa$82w*pU0VF~X6)@@RCe^qOuX~TjonvYs8l}^u&ejm8H+SvTmAS_$ z2uNn(&fT)aZT}ckdn%H{SqY^W!p0Y+9s${gbv1FEW?87>BN3`H7aO6M<3_69f_3R?_aJ|o z%&lcTaiHbN7JyyDZeUAwaqohAzF(X=9G?3`7-N0gGP z#pxj{w<=n(LspT<0SY@J>gsRFpJ>$uidzNTIdtVqx>W^dv~aX{-hM=2#;-h1<{&TX zR8Bs0N^ub?qM^nqspGY-6N)vBO< zzd!)5=e>s=nBTH8CwiN=Q&M?JG*@Lt;E z#W&rxW^H=wz1MWtnxFCNo@Cmym~+@|<2Pk*{H7R9dr>R02O5SQi7}T01X~E;6ejW# z0ORe;az0b?GE$yFa0S3)GJM9g)O;qSY~wONifqb6=8{coDO9AUrPRq>oAx5pCGMzU zMPd9~TogLhC#JKkrNuHO%(h)G?# zS!E!uo27+_rKJx0##JJltQ;Qq){mi;`(sbp{`{2ZX0SBa7QcMKy5H2xV`@54lw`3| zQ6!&*xqrQLU6!T3p64?uYz1zhC5N;3I-F$y)5UQfLGZ9 zk9yQ+>}y8duRZtz7Eq53;A8aZ*Z+cb8k)D9_5bMw+sjci&uH(9)90JpLvwi;H5;2? z#G40iWcb+<{V5>!rb0P;;NNGTkpsw*iXKpTMYD6KVikSF?nANMA&8&R8Xr2RpWiIy zaV&W_nHSKU@jPGlv(1|d9t8Ko9bL*~I{HWys+7<&SCRBKRSKA0c=i!FK=*nW6WL1BO4Q@@1rAn#msn zFaxNqY4YiQMV1V2GwjT|9nEp0D!{Q)Qnn@}(4UG7oW^}hnsp8Id6 z4Sj4iMVyN$GC>s8S5m-xQS=GifEP3EDB@)T75IHrh$ST70ATuy+Dt^2E$Xv2Y=}$H zFx$@X^if#YK17baSXYQo{YA|T{;~ZtyKcB;{5qwILcR#=&9n8;*7Il;6VtL3`8sNN z4ZzsX^WrQ9 YT=3tngYzz?vclfpqu4#{%dR;82AzSVAOHXW literal 0 HcmV?d00001 diff --git a/mmpretrain/models/utils/__pycache__/layer_scale.cpython-310.pyc b/mmpretrain/models/utils/__pycache__/layer_scale.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90efee682e157055442c1b2cdc554c204b8d4315 GIT binary patch literal 1757 zcmZt`+in~+)b?EVlHC-V5D8R}#RJJm+l_btq^PP2t-?hiLP`N?Uo3a*$xfJ^S!{1Y zqJ1Kf$|vxcJoW?l1wO*BMMB|?H-uWkv3I){sg``^oH_T4Pt|TW5u9IN{+|66A@tl4 zizfu)0bKnt0D%N9&;a)^b~q_WkN9se2zwz$pCb{7@B)cYhDXtP(2E5*KuL5Bx+uXm z`nJrgazAzC;%Q6?y$6UyJuDCy#=W3H8bKozZ4t>(#4?hxtR2-e@-`AR*${Qv%<4~3 zhDAd(FWS8ou_9U*sMmUmLey)6>s8y_~5=Vbdz3c{v;znn`X(O6u-R-dkeYIbnI38+Oc#k)&H$QE@{nMNOsB ze)ln0suURg5@hH^2CRDRnJwqvTk6tBGUKDdXi%pg%DW%U9YP=lPr<{Jsw%+Xl-{8! zhd_r0;^jqgN<~FYUoP3MhEj1S07`egm1Re}ywGy)NpQniR;d9bvZalS{&8PUE8&=Q z!tBtr8Wn;*k#sZd^RkqMW(C(~bIH3a#+;w!3IsY-$syEJmL@L`>DFf*`mjU4=+N&I z%1g1V@3{LQkWBqj9|LF)Woq(cS)4AXGk4~LnI!h--~H#m{Pxcu_jhf3DOejo>+Qzt z`)O;_GTW7fs-*C2X2;>~25|KQ00y0-OMHbe8ixWOBi;TA9?m)b8IOZ8Vq_cw3`}?# znfS6c##eYu))7F&>H@(82m1+8&ZJ$PMe56Tc5ibbip^>Hs}i~nJ($V5tX{>oM1Fj2Sd~)S_(bN1eUn5s)UwD_?6hhQ z%=DjO+h8mO*P5|s=-*GaRaKeo!2r5!6wk{gv=T5X+ar?$Tqt~Zc&b`pb_1?n1<;I| znBedxCNX{opRMi99(MOt3MoAB*>Sjw>mgkI9e^=9f(7;iQSSnV)!G@nk{S&}|hPz-#v%PZZJ{J_m-#ISh|qv*pV z5)p~`6a7F0c}E7F$qfSeNh0M_&o!5nbKTt|ew;ir-bb*>6E%_K{`o;hXou-9V{ z@8_y$bIcmAMZw%$ve}$#EiIPZZHNdk{PQ`l;+tr{$cw?gT^+#S*1W;>a%Pasuig0dKqh& zTXbQ;xvtGX#VHxTKL1^lngrdco0A)%QU!e5PBc`3eew&Q92rDaob!)WjE(J7o#1qJqp7vp#Tx5eScaOa zT`e!_>=$D3ANFTFvc|Yko-eF)mMgurG(u*@$U6md4v+jez~dtyE|yh)JB6x>J(#+; o5_+HRMO~Uui-|!ru@rynZD8J*Zo*O3eFCNXOg_bn1!DE-KSJm&3IG5A literal 0 HcmV?d00001 diff --git a/mmpretrain/models/utils/__pycache__/norm.cpython-310.pyc b/mmpretrain/models/utils/__pycache__/norm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec20576e04822b77da73f6d6e73dbf546c07fad4 GIT binary patch literal 4563 zcmdT|&5zs073Yu?B~jY-X1$G*CJm-W+pLPU)^XAz*cwLsk+>g;8rX|A5RlSb%}C;| zM9RaF?OkYls12ltrso1d0SU;(w;q~*A^$*o;;l%5!iOGu>K6+4_lEjf+s(ZcW;mRA z^XBXKd+&{sl@;H@^PB&C-hThIWqm;(%a4bTw~*2Y$b=A$ z-Dbhm*gdabY!;a%i{sM7x^4+axDPDhN_)S2-?`74KDCI#grUt7MYI*sRxxc=QIad7 zELVk3@@-F4MD>B&JSEsotG4n8GqD;qmU-{}_}bg=-fYqL^5afF(l=0$R+9;46kR)8(N~WZX)GR4KpM)q1eAy&bB1(VcptI@{r1x_#mG z^RLyh%@e`+d~~8kAbcF;7}3io<~ZFn?Cc{I(|Mossfbt z`J`8-2N<*?mFkS$JF#)JX*8v)r04g@MxelftzOmEJ{vJ%6AeV+WB1q@;kR#thOQ`n zjIG*t&R8#5$1X}PHrd^~^QrZvxE2LC~OP7cyQ&#hZ$PaMYCTer3hH_~o6kVLj83C7cIGVBSyC!bQbd1Z@V z-QwTh;yJ-&{g=#8GN0!;0d*JImwNA|@gA=o|BF>Kx^EG^=G=9lnXD{IA zdZm9;=@nLD?qeItwT~)9t7(rKQ{+A#vV6v6G7>Q?0gQwn1bt)fQLYBT?P1uPwiJUv zB&{GY#LbuF<@i(Ir0J|o%|T1{dXQR7b`7nHW~^vW%1CJmS;=zk)yh|krnc$6+qfWF zs6O%hZzpFY`(L=wd~2hjP>0pl|k5UN3rCadr8tWevS%&a=De2;+?M`n@PjW%>k}X?FQav+DBUWFCCbP{oZF z(RlU{_y|uGr1Tre$jOqIXtM~G{!{jA26t=0N3a+14jki3?S>*FP;(Xe8iW08jhLLT zBrvv5)k(z3`Ag-@I{-5~sWRA~Q`U5U3+*XQWP`1-qm4NlFNnFxgWGc8w!W_Iu|x3#92^`Ql802B6z0y&h14}@ zLX1lBsVGg{WA1|#AjN0hO5GQUTh59plK zd5C3R4lIN;0Fp7kSrKsU54FtvRub!|Gfakva~3vOohAJXGiEFpS-NXTDGk2nyR7uM zWUn)qY;3vo&ozWFRNio1c654aCuWvceGC0+CG{#^YLl`WW#2}YIf!3#<4U8bEy@;w)(#RWE#Hk7K)f-nQIFSzv$Hh@$R18*b?~Odz{pZ5B!P_gt2;s+!uK8`oK;xMP z9-GHEMbw(#A^;z5#5o|+jyhW~k5Q{Ti!=x~h;(=IiwU1T=eRyZI5ds33p@lh+)wfXe&;Ie}OPPJWVN7o3-8_t2R~p$ykPc)kY99td41&&J7+`;lZ7>X~t!93d z=DXUGnQK;1-o?b;Rw}_A=|}P~FS(e*Vde~yfg<0LIUT8CS7>IL54R}tTU}O!=L@w~ z>M}|B3pc0e`UZl34=;6*8eQxU;wpoqfHouZX@>(-VZ^h7DQ5+vW{LAA%v;+d5F1;T z_xTW7Q?eNTY6%e@{e0V}9JZ(Qxaw3H*`#%Q9ko>(F`%t(K)kk=M{TA2?hwa{OhNPX z?6ck;#x1(=hrP^Sl+_!M%1$qCcJf1<+jRYT!BxRZZJ!Zpr@TAQBwkd5@V&M0pT zjgi?lM^@V!*=<|Zv0-%5ZITs(p%Sq9H1S z<{=Xe?i{i`OMX}As5#ADUVW;!*CT^U?e*_zyvFOFYrHOMherDh$_>7T@|r4dxToAE zzsOGuTbvb^ID0|s>K8P=F3$2Z!lW|a!1ElQXEEaWZk3&Er1>$x5d@WKfrE*^zZuS3%=U*@ck z{5CQ|YcsC3bjivX;A?=O#Kn78Q@(6q7z12_CadPY0wLm@!A8y66cP-dWu5I7A zzK#95d2{>r6@Rvp@5)|sOAUr)g^}Os2T>%3iMJU>Y3r7^Ga2m)NlOZ&@g()i_giHj zf%n|!;b`84KCUZ*bRva^dJ)kAx4d^nH<%35Ti)-yd2`-7NoB}IIoU@=36pi==%F_Z z9t*kD@kY_@nPzC(Gi<%K)3KILner}fyw#F7k)&G6GVM?w=$XmcQ^vH^7?>H$bgpLx zH@JzM^|X>{zLgnBZLZo4gD_gj#vI3aZIXsa$1@4` zp>;}xx5kg&N;LP9?K4JF6Ubu(>%liSnfl}tOb zGWVEe?s;v>BC9M>xeCHi+DO7>fP83Q0@D z-XxwR#RgwO`@GT}25BmyR#jf6GJBLe{e3@CW3YE}wv)3vIeRZ>AGfO{>wX&ViKy*# zqR6MR{3^N?32iL~jOBhH`4$SvI+8kbu+x|IDr>T;PP=~awJ+P_S^t(H8#FS>;|@Lx zQWXUw0ja9`egQGa*M0x9NidwfsrWvRJHD?DT3$oXyfG7>ju;N{VzJn-(BQOUCA1_E z32mUGnQX1rtgG4%Dpsyi36;{VpsP+wCd`4wtu9a%NDHF{q_w%jT_9~mA+4j3_WX-- zbuXUCP{`m6aGx-xd_M<4Z`Q)wynpxJ2?&c?LfC-E<5Y03E8~%Om-obu_(A$ojNHEd zCNT1wH@~GY5}@dfl2{TVC4Y@%$rf14wt(v!8ms`s^eI^Yi|eU9sGv^s9asv@cQYMnH8YXc(t6q$ ztR1qn&MojPOIHT;1 zyCY9wg$k*ZVz-zih&ZrrY~2L8v9PbP1ixv(u7He0J<@amD_|98v?0$^|8G)4U@k9E za*+~+X67hJ`?)?2VURk(P{`k)SFciXi;@bG+)`(l*Zh)h5ss*tVcZ|WNK2@pmK!MK zb^sLZfwsZC`)SKjAh{SduPq5 z4qMZk>@qu;gFtC~W)QHjLA1*9B9tUXplr}Tf-z(a7Dy)>lxj!d0kAjbv2mmyYezW! z0f1^?A2Zk|>y$RNIm5|88gO>nz~Kf?(oto|!mR9OuwNWp{zqUEDpmQeaY4%}=e0{X z<$=4Qp`V+%vvy0Zf2|!^yeigGtfTrupQ5MMH8Fbik~Xc)=bpVORxp2GPa%O>ZDrn~ zC72F3e`HY0qx!LSVjF#_P2(7z0MX;8`q3JP9iG~ zvkA>|xz5dFbJ~FIH?s!T(8wBOI(B+b|MhQ<{^RFAyKE z7>Msko<_cPTHeMZw+@7i6EL?p9_Bi9KxvG_RD4E;0_Poke||J~eBaP}{FuW5hr_usjfuaASY(-(;^MG_A| zXXB`Vuy@fScRRzR;9$885+q?9V|zxqcBUuX<9dM#>y0tjj@t zGxrRDVd|zjh^w91M-?cb+XDffR%dmUrMenMz$iy!eV@7En|%kxA51E-5*xv9~)3 zhunkIDL1%aFJzCD9VeSKq*n263Lk__xuPxe=3xX0f+?E0xOfl8H|Pm(cCsgVq~*J% zQ};Giu;7RAPp^7q1c6GQemY|q3ke~=RRo>>3V%}5ElA4A9Bl5m(h#&%)l7ZtL(u_F zECyIIc8bMa^}sjrk%)E_jAyw4;vnGb>5^YdJG-*J7f9J$vb{|Lsg=<`?hC9<(yoJ6 z0e3`%+f%L~D&dkWmbpaO)M4HWh6$*Yn)*YfGK)X}`Vs|v0N*{9#YfDK@#Kw=3Ut2* z9aDIFaOEz=1s49lH<~1=w<}Qd5D|;rq40Jedv9zN8m|4@w+||#tNa_cN4&uB_b}2< ztDaZUI0DR4l*qs#E1Z$<&^$jtlCP6|y(G=Rqt#~AQ0Undo-!C4sx7dw#eSSnfQ2(nm zs7eTs^VgKp_}d&Y132A@Uv6Z9>m*T6&@Q=&qZt>FhO{KC;U^g7$X&(s~z zAo}Ugb=&+!(=q?kte#_V9d+45=ML_bs|3JbbIV-aHF#CIV+KJblAyIO*kXVxDtu2q3?X7wIN;wPnvByu)W$L|6 zJvZwz=sZcMPh99q@TX5yaXXwo;nV5V`(#D)R9DPOUGezRuBzjzYp68ie-DMJJt0)` z`Xb;91y#7T0(6jfFY{EW^F(MXAy$1i5(TGM;Ze~P*cxy1y{q2Gt;H1R07q_9S?Sq` zeRy?pj!J56rG#Sp=)@P@X`Y!UrXb`6q*`_NR)FgL(#b2}f5``Y`REH*PyPu6do|cs zK@ADGfjfDCWXZvs1^Ws+3^&V25$HQ4XKohe1A&Cig}t|?Hu9kMnRSeiyvm!IO?l;- zSxc@N+!rZ6OMJ*wnT3qB+`{?lV+o4&AW#$xyCemYT#vl@;<3WhqEpB(cH$8ts^SDd z$WKYp3lmrge|OUD3fZa@E|x;AAx$-+guE&lQZk_A1b^!9P@fR{S+}Z)HQB5VIBBwj z%O_~{!iNGunqL%dn#b|pz#DidpiW0RC;t|HE&L}F1O|EXYNrgF3I7&RIBj5Ii{Wii z>kJXPOjrJp0e{F;bq4Cti~6F@LX@`fPSn2+I6jofqoMyGni|NGNl`)^89c3-r-L^y zZ3o}2Y&RPRbm*v}ygtl$Y``m{e_Cc^shM7E;ie1}3Emq#5&TI2LJ0bGK0+{HPad;lX6}t_~cn>?pHX`1qeo63>EZa(4>F<@ta+ltbeSnA4Z|muPyIShF z-(Sk_=XSqve60PDBWB=~&6cEW)xOCH!~O&Agk;lXuPCEc)yZUS>YMB}{WX}VbNV?L zwu7ycr0(Tvuo6$EAUY}i9h&QJk$}QOsi%-E$Pe2y43TFBNl{dvVnfFkMU~0t$NvQkV^`t-x`S#~tHBe8 zY*4+fV$ri+nFCK1(Q2%%MiC!8>l@nn;{S!E-Bc1l(W+7m2sG4ztDTiG6@Q&zH=2xmGC@gRqwI`^%lByx zTm9XW47Y+g8G^@`y-uxew^Vj_V literal 0 HcmV?d00001 diff --git a/mmpretrain/models/utils/__pycache__/res_layer_extra_norm.cpython-310.pyc b/mmpretrain/models/utils/__pycache__/res_layer_extra_norm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a64755ca46f65f055da76b8799850fb8a0f8897 GIT binary patch literal 1310 zcmZWoOK%%D5FT>hiqzPN;Z!-Px9Cv$kRFS`Me7CyQn#oNMfXClmz1>jJ`|EdY+-$p zf!uNq5EMv&Uh+r!BY5q}f1yPmLrRJrbQT=WgWR3@=G&&%>j28@zkeV9nuz~VDbxmVK4OFaT;`=wt6Y2fUj467)O2#Dxy{0^Q14|#M2JQBgu zTaq;S8F+jIK8Vi6V*>W!ahm+%qAy^WToe4Yr${SV(9JUhzmq1%F;IZ=;r6Oa-PDdi;fUc#8zKCa@_=ROavAPrt(U1@j( z!_aJR-hFVcWp;>FEFOKu`9N5AV5=JFX3)rDQq);FI6mH-KRzDiwuO!OXjmxk;!;1N z(x6ETOL|IMQb0?8Bd-ut?Vb897S@M8LfVH*uZ6b(_jK68X=Dq;J-!b+(0ZrI4&Hx9 z5jwEN9-4rQnVw49tnqDe*ZXK*Q&hu_3DkThq>OAfW~`_S&6u=>nO-LIDN{o0qMj(z z$v9_K!{?=t_Ogi`{`q)(=f)Q$k7*@f(3Id-WnRJ-IJ z^$8^*xku=~@jr`)8~8trYqV)Jw~cHhK*O%Daj7p*X=st!h%J)g0la}<>5^Vj7xmEc zTAb37T$0vvyWjeVxWCx`wvpH`AB^X9t`Uuq2^>qMoxMy@4XKGHLT6e_xo!PG-p9?5 zbCY~%(w#J3GuN_jFFN(m9*f4SFYKbD?4PHFBF1==BMB^`a)8S8*5k@WSz_X*ng{lXEy)qccBU+!WAJI0ZY3^yIK1OJ zaimFitBMOfLLT8*M<-c+cGA>BImV@*f0UF>!KNbPYNV!_6nyR6;A4lCezLkF#iUSL zUdRxG=I+WBazj|h?>paBvqtIzDI0mKi5pzwFIW72Vh5#mP<08VU1Fb*eoDGK9i-HM Dvb0YY literal 0 HcmV?d00001 diff --git a/mmpretrain/models/utils/__pycache__/se_layer.cpython-310.pyc b/mmpretrain/models/utils/__pycache__/se_layer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89a2306690dd872aa12729738eea3b69d2412500 GIT binary patch literal 2908 zcmb7GUvC>l5Z~Q9`_7KzrVvyDLM)X~Vw5_xLI@&2EhR;*N~kah$p~G}w`=>z{c(5q zk|w&oG?n_)cO-<=K9&!H_zd^TQ$7JI5-PLnvmK`q5@+4s?C$Jue=|Ef<8*Pcj^O$E zpWpkxH4yq!cTPSwbgsfH??A&4!x4&c7h^4xh{RUcGO`ufaiv=^vK>`pr|W=RVb#cu z-L8w#mxwvceTbOLD@Rth#&8FdLg}e&6mc)@ zA7fYm8qL_>5pJ_8a~@;(y87EE ztj0*+eT4d$)!D+sTDQU)Z1ExLRv#f7bsb>bES%2PZU2A^|J@2gox2(5hy0SCuuEI_ zf>8M?OcOe$-mcdvJlDiPZW>AVf}x)zJd$)ROjK)=emmqeOh%caYXCyiOo7~*sil3Y zK&7)qBfV>T3M9u{do#f8G~qNA05=0xT*RD(zTy)|nq+a03!3()P?}d~Ym867;C(-f z)F#z%>on$mA`OK-Kgs~VPj_}s@4d4#hZRcDnLnVlDb0-yDzul@sX4t=w03r8R+*`Z zZ%Xfu^}#G4XnFOKI6uw(o5wTwGEXy0#jONp28VaKAJHJ;EH)ce?ZqJk;1}2 zhYH-iemDRgfuel~Ag!22nF-}nQ87nm93lI|Fc_9`lF2X`nDtlKY-7PyCK7L-hl8P7 z>!oP~n|?p!5ZFR{VLWdfFxENY${T(p`3%Yr)Yt?=7Pxje+=C?GKj?uU&oBm7gnQ#_ zdFlq~u)o=-6G%8l-}nH+Yd_V)bL%A4u|*-Cz&+ru3__K&Ie+vHzy^TPRN5~=%aWihO;f{~o?BA~ zBmU{?H~jW@t1W$-r$e1^5T_wqZMDqu?c*g#!pAN&h3#i5Eo{9Avbe1wAft};Zv>_m zm6==1I*VKI%0Hk{NZ}kEktZatJ;fL)D|eKAM8x^r$(=sA@2E=d9ASociCnpb9$-}+ zYe;H(&J&lBr-0$ra>7uC#AQ{_ac;eVa=^Jy0FkH#K-`3RYuA?7Zy{#qe#yH=mAD8A3GbN}>3Gi*?$Af-&HV9@bz?D(|W^0!qxX(->SkX??P*-sK{)@g>Zmg;d)+}gv#@ZI+R=3 z&W8(E*!si^E8>aJ{}Kw4B*pn_%pYl{*Y*Z?;GSP$MRO3PJwNjNy}<}ZL{;lrU`Vkz zmluTgNm0>UE{{J~TJ^$~Jn93(vTAmhPCKfon37RAWmXlh0*qMJ&1<@OTcf&$K3Mn+ z7d6idB40|+dxrl0U_+#-+KA&3a1nl(%!w(3LcIZb5L(H-arzh?hzo#u5ng!}nx%Rj zyLbsVaRa_hyo?Ea$qV~M)gmCzepT!~C+@6Ia1}S6A1;+utv&H_pgl2pbolG{I=u3I zXma!f<@hPSPe6hoEG06LezL@JPCB5IlVu%lc)Mkbw_v59x_L)8+AA$fXd?=IudpCg zL`zSY<~=>^Kqc7s1-l6y34aYy9qV5e+t?+C=O&~}t(L2!B=KXeqr~%K^H&b!hUeYQ z{HUC9JddS;=ZO`)nf9Z&1WmD69!kKY2xezgR<(-eV5pRNa;l^>W*AAd-f~**fT{so7`GoSzf literal 0 HcmV?d00001 diff --git a/mmpretrain/models/utils/__pycache__/sparse_modules.cpython-310.pyc b/mmpretrain/models/utils/__pycache__/sparse_modules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..543ea799e29724cb875b63aa73a2d94ccf077e09 GIT binary patch literal 4303 zcmbVPUvC@75x?C#9*-0yQL>WQhTC49q%{jcu2rXPT&HnjCvgf}Eg%&whyte*cPWuR z-Z6VeTOuf+hyvuv?*)pY(xbmYKS4jlqHlfDQy=mcpjLmgcOogvZqgpKquZI?*_%H* z^J6kQTea}~>EC~E-+axo{z;SRr-aEJjP!3H!V)a9n)!d$<4w-!okw=hX*$gM#1gh} zzOaOIz?&t^O2Wm=mAva7*v&FHW#NJ2f#ZQw0jDCW;8ek>f-@tX!`cyPnh`bdYv9iw z*axgRH+nmfNTM>1tVJ&Y~5-s9^>n+wFcAf!(ZR~=)ro6 z=F^W?OwvE%MOsZJtS0|0>}}eDHyv5(y0R?n+m`eyj+hrES%F-6`rVkQNmZw zNDsDvS4f;Mtu`~Z)F7?BR7&=cboNWjv6^%E-ny=>_d}J+`!ecFHSBHdNPow?`&r_* zl3st1Nk27f`~*wFtdqq4lTNnd_xG|ywRV>MbkN%I!?ajd_3^Zl_;Hj(16qv*`Tngi z!@@XGy=7l!t(7%B6SM{@2wPd_vE0z?zFtx?8>qMwZx`I47xuNAb6b4MlMdc#+{H-m zfQ+on>as2u?2>ia+UGPov`1XncgI>%CqduEV1E%xfZKlwL=0K=arH zy+jNmnXdHr^u=IXW>fAD5`~Y0nRRGRUk9nXE^`<^@i1zfgF4JT8NNN8ODnI^qS@}5 zK$5%2IC4z-W01_s*vLAd!4LU}9kG2jVwv4_MiefDS8!oJgssiO3&-lcZ4HrQeAgi^L?e*dAVox5V3s|mZK^j~;sf^lc36p0` zSAw7wg=rcD#@T8SY(1a$1IqfK&~XLKlxp6R?b=o5oj81W<&1+~*l@-v6lfa8DG^>M zn_<4m?%e!+xaJULP%z5iBL}&SY`qT4)um_77m?U*;n}UV})vNDav$3)~Xp0_^2evqmAkduGD3CyN8;h zD38<|L@t8plBp%SO4*(UQRk6V-vu8R<`S(T&o^sDnww z5X50m27#^y#ofVsEeJjvgi#?W2Z2ahND{j3$_LTzhK$oh>G>cC<2cDoHANojT1xM& zp3HU

    5Ox?QA7Uq)ZZ0E~&Rk!EBLwEg40p>1Rwd#!@00hf^d{+Euk2HfM7@-if)V zy!M>WTw%QO7BeebSajy0NZ!HtiUM@npr}1br(0^ef+Iid)3$LJ3BM}7ZtjGwN4Nas zu~e!fq@SUSoV$LeFRV-HZ@!TDn|`>3t+d1MB})22W+6(G*|7GktSDCMheWOsX%I1G z)cA$cCl9l6#5myU1_bY8d_@<@gCnZ8;l0viMb!t!)$`~-40nHn5TF;SRS4G9Pe5OK z(mN1*Ov0N)X!dQ-_~7yOw>;yg(E4zE2FkJht=AZT|~nwjFL%J{sNO~jW~WIy8SX2!dKNL?lzbXfvM1~QH8ysaEq zhiDw8ow(idh@qW$rk&soGoV)L$I#_LS2WgDnbvg(91bi#1Mp?q%`0@{ob$TPUhEW= zBj>PsRP_IB=5yAlg?~lm%H!xqXLTP8mVgyUYIlW&d}L6`VG$9RMc#FUrs9%1xde=96;L zj3c1@o@|GMC`;*Q!wVKn+;mKxaJVtfainw-M|=1I(eEb;n~i(YJVv>}*VZN`Ec;vV z{dRKd|LJwQcIq}V%%G40ntTUms`rQx{+l3`MortI(=!Nf3Z)oaVtR^C%fC;Akcj#o zh^_+SWu5IoGD!7>wImzgV<|pTDpAvTMbAv7gf1D>qTVD`SBad3C#F2^fS*zinX7RC zZ0E!|@c>5%pm?PkCwSs4djHBfb?!MiZ;3g!b27YCRIy2&St*)ugBb@jyqC68pT6ct zM2y+j>21HS0>zbXSFGqqG@oNR0ZhLGvmsCLzu3iXV3HlQ{Z)F toO0$nr_39}1?}C=e*^uJ5Ku~Hdv=Z0_?*4?&SH77?3R}p4X=EW{Rh=WCTjoy literal 0 HcmV?d00001 diff --git a/mmpretrain/models/utils/__pycache__/swiglu_ffn.cpython-310.pyc b/mmpretrain/models/utils/__pycache__/swiglu_ffn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6765c4c410691e3d81f19b0c6a04b5760c94844f GIT binary patch literal 2754 zcmcgu&2QX96rUN7y|(uwX}WDG6bRoehh35u1QkL^l~NF85o)=tkSxdJO`P~+8QV=8 zIj2-qLh7Akd+e1zfkF``bwo zWvL$zq0?S3qgZ%CW|OQe4rndnUh{8<@_OLMA;>J&GHE6gFUE-gcbYG!r=9#~U_vsY zg22n@$Z!~ZhQoEsMOT<17gh-LJluNP+My+!umQIR>(2)(43nrLzIOYd*+#qK&xp#^ba7Av~v6nx99$QGEj za=N541-oZ}q*XNpDXDzFXjF~L*=I0jr)I(LI}Zr*dYuC+jpH?L#f7oW<&^@@;DX=7 zRjs*It;&+aqFq`0H04ESYqr}$WrO6RAYfc`c);N1Ju2JKNDW=43{b2DPb9rl%FN42 zD3$Hiy7MHeKsJFWmZr+O7NuZNW$di1N()Chs!-Wq%~Z#BE=p9BvNrrA@mD44OPNIm zD3BfC59ppz8Cz6P!Gv*-{lND4 zo>{Q_M#Ue{%IuOo3nY1Efh2OK<{H2Od|(VYfKSbBfr+=AZ6jZW*3BI9U>T z8Gy(tRyEEc0QgWY!9@;^pa2T+^}VKi2^wVvn`OGK=o(liic5JO2IVVAE+EnNKpfO2 z(?+!AYdG*0lDB~<>YBpbf|X?V8dq$9)l&n2IP6U19HHBW(~*?5b+^HG(`(Cgsr2$1Ni8u&auAN$~+Xi zKh^z{6tKJlVSagB=25!wKc<7);xCRDr$7|}MxvBWiy zTdd;t1VE#Gul6xpr-go+%CjIcIz8+5$7LGSe_YBv;pKZErMgKH-0TNw+FzGZu$}iY zr`KtwIhXu(7OJBdLP1&Z(*kipdz8M>-1Clgt?bY0@bNi?H95x07PVQ6F3}ckVQX|h F{})t>f7k#3 literal 0 HcmV?d00001 diff --git a/mmpretrain/models/utils/__pycache__/vector_quantizer.cpython-310.pyc b/mmpretrain/models/utils/__pycache__/vector_quantizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4749627fb72a80dda40ced2d0bdc8d2f3731eacb GIT binary patch literal 7113 zcmdT}&5s<%b?>VFoSu&z&Muc+lA>CIlW-DgWywfj34&xL5(UHdTDCS;Jh0R5ovPWL z*_rNPbr07Z^yESYJ`q7~0fGRtmjKMc$jN^~jyd|!H^Ya3r}!#|6!Ux4GqbZxGC*=m zFZxaOtM6B@-uu0`NxkkHc>dw(zxDq4HN*HVeXKuqe0&RU>M+9)hG0Wu#5!!mJDkxs zADSIg|5_uvV=s8l$mRw!MsCN`9CKJ1`5k{$?Ns%*HLQ*5ojS_4GzSggXzFU}3EuNW zMfeY_&c;jYtD=Vbrl{*a4S7j;RO&fmLo`v-s$8nH#EnJ$ChE8FUY46^$El@m&3BBR zFSf;%2X<%cQ$t)8I}Z%8BkjTE`(|ewoNMAbIM)}PD}sM+v|o8l5^D3p`~EoVCvh|^ z?ET4jDEGjtC>bdg#fP$RlpM<_YqP?ShQm-vG3m;}A0>DD@nIO~1o?7Z&VxQK3rg$$N1bptwlPLvmVoJtg|}k%A23-6$It6r9sr~CQ3jz zgDeTMBN-g_@5(s9@(ys8ic|=?>GQB9mM%gM{Ebcmbr*u>;?(uErSzBLuT>TLkkI8`D zE^0J#cQ{EisZyK|zWTb@n4~i7CTTyGVcH*c=@ga^9WAN4mh=-8)+LQHH%)HNteHJ? zG80l|_xa4tteoFx9~(dI&Yav-uYvE*?Ng&^%)EFrb8_6s`y9WnFz@4j&MJ9DSewSc z6%2aHs%V#c1F!rVRAzqW530G3`|LdAvua)y?lGU$^6I>n*MxVOF%p1bZHgaG1xh1L(vAR=}8F?eGi`tHH-9Q^tjkD(9Qr^J*V4_a_GJoE}m}rr^ zdE?Zaxp%fvs`kjPHKyA?oW#NLn11RqHtqIyJ{PvnbzY#1aE#2#KEn0Ag1+h z`H2sd)MYez#MBOcpd^)1KOV~XFgwy}qi-76w#e>{<I(JSRB%Ct z+9eVYq4O>5(weeN(a{xVIvEw_DEhhjLu&Fm@w|h6+)XBNR#Zn(dMriQ>ko&}0(lbR ziY94MOS@49eF(|aDY{{bR7%~T?)J%%RIK7RWW*=QVDz z2J^VbtYAYpb=KW_LBrk`uzLmjeaw1+*f3V{Ewa|7pP0FtnoO zP-lae#|9VI&w^Xn-Vm$&;z+`NDzc-nKC2srPiofJj)T^EZs;Fr@A-q3^9(+gy=XGb zQW|DoP2|~}%T?%E!*K++b}P*kW~N=O=7NxD5ojC&+wvP#TIoL5fc2MgrYja}`3k!~)n>FK*ow8FPqWDHu z6?Sg@qw!C$*USK3s9}6ZxUjNL?#%0_#taBz*;jbWo>^nCL6%nd4{23R;elQf@Tl$T zp0;$_B5I>mz=8E)S;X|!VG;=~{FRJ@6WKpJg4|Uy3JSA3p1|VOM$tGNCa_01o&DS3 z)Rnw5=_{Frhbj`dKXy8S4=uc~Jm64~gHj(8(e{o^qKMZmSZU=plstrGf|)y*^m-V< z*@u27s<6ij=U-T~M8KFKPPpK4TvR@bR5X(4Q8?vV3umGcWZNyQR1SM-ODMz-x7U?RV;0!*LbgWR}+x&Q(}W%S_#1c7TVz@qh&5=I#VfXKN+ zfEtR3Ui6i@b5B@W8Nj7c+Rn87?X#Qvobe*_v6RN{-t=pqNrkHhyMGvbXW>mrOU~Ut zMMi$V&9s+kSJbEYR6ig>2E7zJNvs5+*Qi9YQ?$RfrT%JJRoHqt>OEpyz@b%r8FhV_ zpk5;=$Fv~(Z8(-|YaOmrF<#UmtI+*1gOO}9Ul*$d2j z+tlh>RP?aq8nhO*dAXCHVyKI&cOk=*NQrx>(_uq{Pq)skchR8D_xIXOx-K!4ISdOw z3`dEW45?fTDIggxYTPgsNf&HW-v_eSiuMFpktwaEeek$+h5rOid^NP=5~cZM8=P5BIOESa=U;J?KXt6e6K7|e-K^-= z&v|f)E9gmo26EAZSImkfHWwb;f(N$|*TP~+2OirIRq0A^$AHJSdQI)s;j`h@E815Z z9Ulmw^}y&2M+_sACJ!mZ4^#wN+eFgDQl@tML7w$S(#dwG`NzgBK-}<=!FW!6Q zlFm6vxWaF~{q8$wuCUZn_@f0x_}_T;vpaikj&)!7jkT!`WEKUtda$8{M99;Q5ciIU zeRz8EtQRD=b_s?_nw~8g?sl~~^6hV*Rq0f~`8i6~cxk7})ZiR;&yDc)-FM$T8zjx( zTGFiFMc5-`I7yKKco{TVL`;Z`rjpj8B*lQ|#IYLg{G8_z$r=`T*;LE80<(~v`Tv80 z{^OUQ{${%M%m4fka7YK1TIUM?O!a{Lp&L4DQf#f{wI+Ub<~z0bq9n`mq4EkEk(F74 zMg%{!=K?fBh!&$NT6PBt2Z>L;RyDs`2q|Pc+mjH+=KHt zgA=QD0o;|rvsQ(6O??L|DGj2=9l8qz6MC20WU0TVPx343`$S$RvIo-kN~1m`#)!yS zN8NlitI{S@8&7RF2`2nKUY+dnJ%S1J$KkSIL-FT5aU2WI%X&6_^~DBj1s0z3TPuk0 zb<9A{UTs5A$f9L@PQtWG+=voE0tk*oy%HSXQ)BdNyD!>tDN(8~c$O@kT7d71POSx) zC$nXyLJRv}&@9zj0QA#h)>qT^OSblW-*cO!WG#)Oo%62|b!7n8f|3?2D-(u*&%y$L zhRdD<3+YV*2`#o?OH>HBQ~jyAWgyvMBAHP` zvI!0eFdKRa5D&r5k-iu-P8py)n{SX751OZZehJ(bxTMdU=?o)oof?0)11#nMgZZd& zv&*@w5gZO{a0ST=VmvsAO2dM)@`&1ei>g)D;Hp1+1!5VC9;Ncu3`Vs_Oa-W7x= zI6+%9aDq1Y2w!@X=G~ZHx?Mt}pf`!T6i4h%TL(cmI0{ZEwQ`^f-3y_L&cf_NIVq)4 zxW`hC;WyPA_?lJ@#2c}A^Miv}Cv5f(!Nl{s4;5V&8k~mimV@hg+OO+;%vuoQLOpdt zP#X+w9LSs%<~TV~-#|$tz7k(K5}6Ssa-2vV0w)*N-M&20>7T+ya1tIRnZ8qnMH!Gm7xKww!jSkj?B=s=t7-Zsf~uP@F)K?LZK-2Sybmy4 zS3g5ZMMMsW5Ij`^q_8yrQj!>ehKA}9A;YZ{5rW97Pehw_P0oPv02P0OH+=_01LJKn z>saiU>)@DM{0X=De_^M;cU=A@cN<@NJ55JXe`xS6{M>F|EgWbEg%WxpCYYsmc{&-r z8^?;$=bg>v=kD^l;9oZWS-Ff%NF|eTT57e8enowR^w1#^$Gep8E0ygtnuOB%r6Q3? zbeggh2!(Ei^F0lTOCL!KUXz3t2f4b!d%r}^AHYr<>H8o}i_Wlxv#hZOJRY68|EvLj YSC+21NC7hc>AIjArvJ3{`u3mxH)PfW?*IS* literal 0 HcmV?d00001 diff --git a/mmpretrain/models/utils/attention.py b/mmpretrain/models/utils/attention.py new file mode 100644 index 0000000..e92f605 --- /dev/null +++ b/mmpretrain/models/utils/attention.py @@ -0,0 +1,1129 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import itertools +import warnings +from functools import partial +from typing import List, Optional, Union + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn.bricks.drop import build_dropout +from mmengine.model import BaseModule +from mmengine.model.weight_init import trunc_normal_ +from mmengine.utils import digit_version + +from mmpretrain.registry import MODELS +from .helpers import to_2tuple +from .layer_scale import LayerScale + +# After pytorch v1.10.0, use torch.meshgrid without indexing +# will raise extra warning. For more details, +# refers to https://github.com/pytorch/pytorch/issues/50276 +if digit_version(torch.__version__) >= digit_version('1.10.0'): + torch_meshgrid = partial(torch.meshgrid, indexing='ij') +else: + torch_meshgrid = torch.meshgrid + + +def scaled_dot_product_attention_pyimpl(query, + key, + value, + attn_mask=None, + dropout_p=0., + scale=None, + is_causal=False): + scale = scale or query.size(-1)**0.5 + if is_causal and attn_mask is not None: + attn_mask = torch.ones( + query.size(-2), key.size(-2), dtype=torch.bool).tril(diagonal=0) + if attn_mask is not None and attn_mask.dtype == torch.bool: + attn_mask = attn_mask.masked_fill(not attn_mask, -float('inf')) + + attn_weight = query @ key.transpose(-2, -1) / scale + if attn_mask is not None: + attn_weight += attn_mask + attn_weight = torch.softmax(attn_weight, dim=-1) + attn_weight = torch.dropout(attn_weight, dropout_p, True) + return attn_weight @ value + + +if digit_version(torch.__version__) >= digit_version('2.0.0'): + scaled_dot_product_attention = F.scaled_dot_product_attention +else: + scaled_dot_product_attention = scaled_dot_product_attention_pyimpl + + +class WindowMSA(BaseModule): + """Window based multi-head self-attention (W-MSA) module with relative + position bias. + + Args: + embed_dims (int): Number of input channels. + window_size (tuple[int]): The height and width of the window. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. + Defaults to True. + qk_scale (float, optional): Override default qk scale of + ``head_dim ** -0.5`` if set. Defaults to None. + attn_drop (float, optional): Dropout ratio of attention weight. + Defaults to 0. + proj_drop (float, optional): Dropout ratio of output. Defaults to 0. + init_cfg (dict, optional): The extra config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + window_size, + num_heads, + qkv_bias=True, + qk_scale=None, + attn_drop=0., + proj_drop=0., + init_cfg=None): + + super().__init__(init_cfg) + self.embed_dims = embed_dims + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_embed_dims = embed_dims // num_heads + self.scale = qk_scale or head_embed_dims**-0.5 + + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), + num_heads)) # 2*Wh-1 * 2*Ww-1, nH + + # About 2x faster than original impl + Wh, Ww = self.window_size + rel_index_coords = self.double_step_seq(2 * Ww - 1, Wh, 1, Ww) + rel_position_index = rel_index_coords + rel_index_coords.T + rel_position_index = rel_position_index.flip(1).contiguous() + self.register_buffer('relative_position_index', rel_position_index) + + self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(embed_dims, embed_dims) + self.proj_drop = nn.Dropout(proj_drop) + + self.softmax = nn.Softmax(dim=-1) + + def init_weights(self): + super(WindowMSA, self).init_weights() + + trunc_normal_(self.relative_position_bias_table, std=0.02) + + def forward(self, x, mask=None): + """ + Args: + + x (tensor): input features with shape of (num_windows*B, N, C) + mask (tensor, Optional): mask with shape of (num_windows, Wh*Ww, + Wh*Ww), value should be between (-inf, 0]. + """ + B_, N, C = x.shape + qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, + C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[ + 2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], + self.window_size[0] * self.window_size[1], + -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute( + 2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B_ // nW, nW, self.num_heads, N, + N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + @staticmethod + def double_step_seq(step1, len1, step2, len2): + seq1 = torch.arange(0, step1 * len1, step1) + seq2 = torch.arange(0, step2 * len2, step2) + return (seq1[:, None] + seq2[None, :]).reshape(1, -1) + + +class WindowMSAV2(BaseModule): + """Window based multi-head self-attention (W-MSA) module with relative + position bias. + + Based on implementation on Swin Transformer V2 original repo. Refers to + https://github.com/microsoft/Swin-Transformer/blob/main/models/swin_transformer_v2.py + for more details. + + Args: + embed_dims (int): Number of input channels. + window_size (tuple[int]): The height and width of the window. + num_heads (int): Number of attention heads. + qkv_bias (bool): If True, add a learnable bias to q, k, v. + Defaults to True. + attn_drop (float): Dropout ratio of attention weight. + Defaults to 0. + proj_drop (float): Dropout ratio of output. Defaults to 0. + cpb_mlp_hidden_dims (int): The hidden dimensions of the continuous + relative position bias network. Defaults to 512. + pretrained_window_size (tuple(int)): The height and width of the window + in pre-training. Defaults to (0, 0), which means not load + pretrained model. + init_cfg (dict, optional): The extra config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + window_size, + num_heads, + qkv_bias=True, + attn_drop=0., + proj_drop=0., + cpb_mlp_hidden_dims=512, + pretrained_window_size=(0, 0), + init_cfg=None): + + super().__init__(init_cfg) + self.embed_dims = embed_dims + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + + # Use small network for continuous relative position bias + self.cpb_mlp = nn.Sequential( + nn.Linear( + in_features=2, out_features=cpb_mlp_hidden_dims, bias=True), + nn.ReLU(inplace=True), + nn.Linear( + in_features=cpb_mlp_hidden_dims, + out_features=num_heads, + bias=False)) + + # Add learnable scalar for cosine attention + self.logit_scale = nn.Parameter( + torch.log(10 * torch.ones((num_heads, 1, 1))), requires_grad=True) + + # get relative_coords_table + relative_coords_h = torch.arange( + -(self.window_size[0] - 1), + self.window_size[0], + dtype=torch.float32) + relative_coords_w = torch.arange( + -(self.window_size[1] - 1), + self.window_size[1], + dtype=torch.float32) + relative_coords_table = torch.stack( + torch_meshgrid([relative_coords_h, relative_coords_w])).permute( + 1, 2, 0).contiguous().unsqueeze(0) # 1, 2*Wh-1, 2*Ww-1, 2 + if pretrained_window_size[0] > 0: + relative_coords_table[:, :, :, 0] /= ( + pretrained_window_size[0] - 1) + relative_coords_table[:, :, :, 1] /= ( + pretrained_window_size[1] - 1) + else: + relative_coords_table[:, :, :, 0] /= (self.window_size[0] - 1) + relative_coords_table[:, :, :, 1] /= (self.window_size[1] - 1) + relative_coords_table *= 8 # normalize to -8, 8 + relative_coords_table = torch.sign(relative_coords_table) * torch.log2( + torch.abs(relative_coords_table) + 1.0) / np.log2(8) + self.register_buffer('relative_coords_table', relative_coords_table) + + # get pair-wise relative position index + # for each token inside the window + indexes_h = torch.arange(self.window_size[0]) + indexes_w = torch.arange(self.window_size[1]) + coordinates = torch.stack( + torch_meshgrid([indexes_h, indexes_w]), dim=0) # 2, Wh, Ww + coordinates = torch.flatten(coordinates, start_dim=1) # 2, Wh*Ww + # 2, Wh*Ww, Wh*Ww + relative_coordinates = coordinates[:, :, None] - coordinates[:, + None, :] + relative_coordinates = relative_coordinates.permute( + 1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + + relative_coordinates[:, :, 0] += self.window_size[ + 0] - 1 # shift to start from 0 + relative_coordinates[:, :, 1] += self.window_size[1] - 1 + relative_coordinates[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coordinates.sum(-1) # Wh*Ww, Wh*Ww + self.register_buffer('relative_position_index', + relative_position_index) + + self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=False) + if qkv_bias: + self.q_bias = nn.Parameter(torch.zeros(embed_dims)) + self.v_bias = nn.Parameter(torch.zeros(embed_dims)) + else: + self.q_bias = None + self.v_bias = None + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(embed_dims, embed_dims) + self.proj_drop = nn.Dropout(proj_drop) + + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, mask=None): + """ + Args: + + x (tensor): input features with shape of (num_windows*B, N, C) + mask (tensor, Optional): mask with shape of (num_windows, Wh*Ww, + Wh*Ww), value should be between (-inf, 0]. + """ + B_, N, C = x.shape + qkv_bias = None + if self.q_bias is not None: + qkv_bias = torch.cat( + (self.q_bias, + torch.zeros_like(self.v_bias, + requires_grad=False), self.v_bias)) + qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) + qkv = qkv.reshape(B_, N, 3, self.num_heads, + C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[ + 2] # make torchscript happy (cannot use tensor as tuple) + + # cosine attention + attn = ( + F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1)) + logit_scale = torch.clamp( + self.logit_scale, max=np.log(1. / 0.01)).exp() + attn = attn * logit_scale + + relative_position_bias_table = self.cpb_mlp( + self.relative_coords_table).view(-1, self.num_heads) + relative_position_bias = relative_position_bias_table[ + self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], + self.window_size[0] * self.window_size[1], + -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute( + 2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + relative_position_bias = 16 * torch.sigmoid(relative_position_bias) + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B_ // nW, nW, self.num_heads, N, + N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +@MODELS.register_module() +class ShiftWindowMSA(BaseModule): + """Shift Window Multihead Self-Attention Module. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (int): The height and width of the window. + shift_size (int, optional): The shift step of each window towards + right-bottom. If zero, act as regular window-msa. Defaults to 0. + dropout_layer (dict, optional): The dropout_layer used before output. + Defaults to dict(type='DropPath', drop_prob=0.). + pad_small_map (bool): If True, pad the small feature map to the window + size, which is common used in detection and segmentation. If False, + avoid shifting window and shrink the window size to the size of + feature map, which is common used in classification. + Defaults to False. + window_msa (Callable): To build a window multi-head attention module. + Defaults to :class:`WindowMSA`. + init_cfg (dict, optional): The extra config for initialization. + Defaults to None. + **kwargs: Other keyword arguments to build the window multi-head + attention module. + """ + + def __init__(self, + embed_dims, + num_heads, + window_size, + shift_size=0, + dropout_layer=dict(type='DropPath', drop_prob=0.), + pad_small_map=False, + window_msa=WindowMSA, + init_cfg=None, + **kwargs): + super().__init__(init_cfg) + + self.shift_size = shift_size + self.window_size = window_size + assert 0 <= self.shift_size < self.window_size + + self.w_msa = window_msa( + embed_dims=embed_dims, + num_heads=num_heads, + window_size=to_2tuple(self.window_size), + **kwargs, + ) + + self.drop = build_dropout(dropout_layer) + self.pad_small_map = pad_small_map + + def forward(self, query, hw_shape): + B, L, C = query.shape + H, W = hw_shape + assert L == H * W, f"The query length {L} doesn't match the input "\ + f'shape ({H}, {W}).' + query = query.view(B, H, W, C) + + window_size = self.window_size + shift_size = self.shift_size + + if min(H, W) == window_size: + # If not pad small feature map, avoid shifting when the window size + # is equal to the size of feature map. It's to align with the + # behavior of the original implementation. + shift_size = shift_size if self.pad_small_map else 0 + elif min(H, W) < window_size: + # In the original implementation, the window size will be shrunk + # to the size of feature map. The behavior is different with + # swin-transformer for downstream tasks. To support dynamic input + # shape, we don't allow this feature. + assert self.pad_small_map, \ + f'The input shape ({H}, {W}) is smaller than the window ' \ + f'size ({window_size}). Please set `pad_small_map=True`, or ' \ + 'decrease the `window_size`.' + + pad_r = (window_size - W % window_size) % window_size + pad_b = (window_size - H % window_size) % window_size + query = F.pad(query, (0, 0, 0, pad_r, 0, pad_b)) + + H_pad, W_pad = query.shape[1], query.shape[2] + + # cyclic shift + if shift_size > 0: + query = torch.roll( + query, shifts=(-shift_size, -shift_size), dims=(1, 2)) + + attn_mask = self.get_attn_mask((H_pad, W_pad), + window_size=window_size, + shift_size=shift_size, + device=query.device) + + # nW*B, window_size, window_size, C + query_windows = self.window_partition(query, window_size) + # nW*B, window_size*window_size, C + query_windows = query_windows.view(-1, window_size**2, C) + + # W-MSA/SW-MSA (nW*B, window_size*window_size, C) + attn_windows = self.w_msa(query_windows, mask=attn_mask) + + # merge windows + attn_windows = attn_windows.view(-1, window_size, window_size, C) + + # B H' W' C + shifted_x = self.window_reverse(attn_windows, H_pad, W_pad, + window_size) + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll( + shifted_x, shifts=(shift_size, shift_size), dims=(1, 2)) + else: + x = shifted_x + + if H != H_pad or W != W_pad: + x = x[:, :H, :W, :].contiguous() + + x = x.view(B, H * W, C) + + x = self.drop(x) + + return x + + @staticmethod + def window_reverse(windows, H, W, window_size): + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, + window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + @staticmethod + def window_partition(x, window_size): + B, H, W, C = x.shape + x = x.view(B, H // window_size, window_size, W // window_size, + window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous() + windows = windows.view(-1, window_size, window_size, C) + return windows + + @staticmethod + def get_attn_mask(hw_shape, window_size, shift_size, device=None): + if shift_size > 0: + img_mask = torch.zeros(1, *hw_shape, 1, device=device) + h_slices = (slice(0, -window_size), slice(-window_size, + -shift_size), + slice(-shift_size, None)) + w_slices = (slice(0, -window_size), slice(-window_size, + -shift_size), + slice(-shift_size, None)) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + # nW, window_size, window_size, 1 + mask_windows = ShiftWindowMSA.window_partition( + img_mask, window_size) + mask_windows = mask_windows.view(-1, window_size * window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, -100.0) + attn_mask = attn_mask.masked_fill(attn_mask == 0, 0.0) + else: + attn_mask = None + return attn_mask + + +class MultiheadAttention(BaseModule): + """Multi-head Attention Module. + + This module implements multi-head attention that supports different input + dims and embed dims. And it also supports a shortcut from ``value``, which + is useful if input dims is not the same with embed dims. + + Args: + embed_dims (int): The embedding dimension. + num_heads (int): Parallel attention heads. + input_dims (int, optional): The input dimension, and if None, + use ``embed_dims``. Defaults to None. + attn_drop (float): Dropout rate of the dropout layer after the + attention calculation of query and key. Defaults to 0. + proj_drop (float): Dropout rate of the dropout layer after the + output projection. Defaults to 0. + dropout_layer (dict): The dropout config before adding the shortcut. + Defaults to ``dict(type='Dropout', drop_prob=0.)``. + qkv_bias (bool): If True, add a learnable bias to q, k, v. + Defaults to True. + qk_scale (float, optional): Override default qk scale of + ``head_dim ** -0.5`` if set. Defaults to None. + proj_bias (bool) If True, add a learnable bias to output projection. + Defaults to True. + v_shortcut (bool): Add a shortcut from value to output. It's usually + used if ``input_dims`` is different from ``embed_dims``. + Defaults to False. + use_layer_scale (bool): Whether to use layer scale. Defaults to False. + layer_scale_init_value (float or torch.Tensor): Init value of layer + scale. Defaults to 0. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads, + input_dims=None, + attn_drop=0., + proj_drop=0., + dropout_layer=dict(type='Dropout', drop_prob=0.), + qkv_bias=True, + qk_scale=None, + proj_bias=True, + v_shortcut=False, + use_layer_scale=False, + layer_scale_init_value=0., + init_cfg=None): + super(MultiheadAttention, self).__init__(init_cfg=init_cfg) + + self.input_dims = input_dims or embed_dims + self.embed_dims = embed_dims + self.num_heads = num_heads + self.v_shortcut = v_shortcut + + self.head_dims = embed_dims // num_heads + if qk_scale is not None: + self.scaled_dot_product_attention = partial( + scaled_dot_product_attention_pyimpl, + scale=self.head_dims**-0.5) + else: + self.scaled_dot_product_attention = scaled_dot_product_attention + + self.qkv = nn.Linear(self.input_dims, embed_dims * 3, bias=qkv_bias) + self.attn_drop = attn_drop + self.proj = nn.Linear(embed_dims, embed_dims, bias=proj_bias) + self.proj_drop = nn.Dropout(proj_drop) + + self.out_drop = build_dropout(dropout_layer) + + if use_layer_scale: + warnings.warn('The `use_layer_scale` in `MultiheadAttention` will ' + 'be deprecated. Please use `layer_scale_init_value` ' + 'to control whether using layer scale or not.') + + if use_layer_scale or (layer_scale_init_value > 0): + layer_scale_init_value = layer_scale_init_value or 1e-5 + self.gamma1 = LayerScale( + embed_dims, layer_scale_init_value=layer_scale_init_value) + else: + self.gamma1 = nn.Identity() + + def forward(self, x): + B, N, _ = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, + self.head_dims).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] + + attn_drop = self.attn_drop if self.training else 0. + x = self.scaled_dot_product_attention(q, k, v, dropout_p=attn_drop) + x = x.transpose(1, 2).reshape(B, N, self.embed_dims) + + x = self.proj(x) + x = self.out_drop(self.gamma1(self.proj_drop(x))) + + if self.v_shortcut: + x = v.squeeze(1) + x + return x + + +class BEiTAttention(BaseModule): + """Window based multi-head self-attention (W-MSA) module with relative + position bias. + + The initial implementation is in MMSegmentation. + + Args: + embed_dims (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (tuple[int, int]): The height and width of the window. + use_rel_pos_bias (bool): Whether to use unique relative position bias, + if False, use shared relative position bias defined in backbone. + bias (str): The option to add leanable bias for q, k, v. If bias is + True, it will add leanable bias. If bias is 'qv_bias', it will only + add leanable bias for q, v. If bias is False, it will not add bias + for q, k, v. Default to 'qv_bias'. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + attn_drop_rate (float): Dropout ratio of attention weight. + Default: 0.0 + proj_drop_rate (float): Dropout ratio of output. Default: 0. + init_cfg (dict | None, optional): The Config for initialization. + Default: None. + """ + + def __init__(self, + embed_dims, + num_heads, + window_size, + use_rel_pos_bias, + bias='qv_bias', + qk_scale=None, + attn_drop_rate=0., + proj_drop_rate=0., + init_cfg=None, + **kwargs): + super().__init__(init_cfg=init_cfg) + self.embed_dims = embed_dims + self.num_heads = num_heads + head_embed_dims = embed_dims // num_heads + self.bias = bias + self.scale = qk_scale or head_embed_dims**-0.5 + + qkv_bias = bias + if bias == 'qv_bias': + self._init_qv_bias() + qkv_bias = False + + if window_size is None: + assert not use_rel_pos_bias + else: + assert isinstance(window_size, tuple) + self.window_size = window_size + self.use_rel_pos_bias = use_rel_pos_bias + self._init_rel_pos_embedding() + + self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop_rate) + self.proj = nn.Linear(embed_dims, embed_dims) + self.proj_drop = nn.Dropout(proj_drop_rate) + + def _init_qv_bias(self): + self.q_bias = nn.Parameter(torch.zeros(self.embed_dims)) + self.v_bias = nn.Parameter(torch.zeros(self.embed_dims)) + + def _init_rel_pos_embedding(self): + if self.use_rel_pos_bias: + Wh, Ww = self.window_size + # cls to token & token 2 cls & cls to cls + self.num_relative_distance = (2 * Wh - 1) * (2 * Ww - 1) + 3 + # relative_position_bias_table shape is (2*Wh-1 * 2*Ww-1 + 3, nH) + self.relative_position_bias_table = nn.Parameter( + torch.zeros(self.num_relative_distance, self.num_heads)) + + # get pair-wise relative position index for + # each token inside the window + coords_h = torch.arange(Wh) + coords_w = torch.arange(Ww) + # coords shape is (2, Wh, Ww) + coords = torch.stack(torch_meshgrid([coords_h, coords_w])) + # coords_flatten shape is (2, Wh*Ww) + coords_flatten = torch.flatten(coords, 1) + relative_coords = ( + coords_flatten[:, :, None] - coords_flatten[:, None, :]) + # relative_coords shape is (Wh*Ww, Wh*Ww, 2) + relative_coords = relative_coords.permute(1, 2, 0).contiguous() + # shift to start from 0 + relative_coords[:, :, 0] += Wh - 1 + relative_coords[:, :, 1] += Ww - 1 + relative_coords[:, :, 0] *= 2 * Ww - 1 + relative_position_index = torch.zeros( + size=(Wh * Ww + 1, ) * 2, dtype=relative_coords.dtype) + # relative_position_index shape is (Wh*Ww, Wh*Ww) + relative_position_index[1:, 1:] = relative_coords.sum(-1) + relative_position_index[0, 0:] = self.num_relative_distance - 3 + relative_position_index[0:, 0] = self.num_relative_distance - 2 + relative_position_index[0, 0] = self.num_relative_distance - 1 + + self.register_buffer('relative_position_index', + relative_position_index) + else: + self.window_size = None + self.relative_position_bias_table = None + self.relative_position_index = None + + def init_weights(self): + super().init_weights() + if self.use_rel_pos_bias: + trunc_normal_(self.relative_position_bias_table, std=0.02) + + def forward(self, x, rel_pos_bias=None): + """ + Args: + x (tensor): input features with shape of (num_windows*B, N, C). + rel_pos_bias (tensor): input relative position bias with shape of + (num_heads, N, N). + """ + B, N, C = x.shape + + if self.bias == 'qv_bias': + k_bias = torch.zeros_like(self.v_bias, requires_grad=False) + qkv_bias = torch.cat((self.q_bias, k_bias, self.v_bias)) + qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) + else: + qkv = self.qkv(x) + + qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + if self.relative_position_bias_table is not None: + Wh = self.window_size[0] + Ww = self.window_size[1] + relative_position_bias = self.relative_position_bias_table[ + self.relative_position_index.view(-1)].view( + Wh * Ww + 1, Wh * Ww + 1, -1) + relative_position_bias = relative_position_bias.permute( + 2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if rel_pos_bias is not None: + # use shared relative position bias + attn = attn + rel_pos_bias + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class ChannelMultiheadAttention(BaseModule): + """Channel Multihead Self-attention Module. + + This module implements channel multi-head attention that supports different + input dims and embed dims. + Args: + embed_dims (int): The embedding dimension. + num_heads (int): Parallel attention heads. + input_dims (int, optional): The input dimension, and if None, + use ``embed_dims``. Defaults to None. + attn_drop (float): Dropout rate of the dropout layer after the + attention calculation of query and key. Defaults to 0. + proj_drop (float): Dropout rate of the dropout layer after the + output projection. Defaults to 0. + dropout_layer (dict): The dropout config before adding the shoutcut. + Defaults to ``dict(type='Dropout', drop_prob=0.)``. + qkv_bias (bool): If True, add a learnable bias to q, k, v. + Defaults to False. + proj_bias (bool) If True, add a learnable bias to output projection. + Defaults to True. + qk_scale_type (str): The scale type of qk scale. + Defaults to 'learnable'. It can be 'learnable', 'fixed' or 'none'. + qk_scale (float, optional): If set qk_scale_type to 'none', this + should be specified with valid float number. Defaults to None. + v_shortcut (bool): Add a shortcut from value to output. It's usually + used if ``input_dims`` is different from ``embed_dims``. + Defaults to False. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + embed_dims, + num_heads=8, + input_dims=None, + attn_drop=0., + proj_drop=0., + dropout_layer=dict(type='Dropout', drop_prob=0.), + qkv_bias=False, + proj_bias=True, + qk_scale_type='learnable', + qk_scale=None, + v_shortcut=False, + init_cfg=None): + super().__init__(init_cfg) + + self.input_dims = input_dims or embed_dims + self.embed_dims = embed_dims + self.num_heads = num_heads + self.v_shortcut = v_shortcut + + self.head_dims = embed_dims // num_heads + if qk_scale_type == 'learnable': + self.scale = nn.Parameter(torch.ones(num_heads, 1, 1)) + elif qk_scale_type == 'fixed': + self.scale = self.head_dims**-0.5 + elif qk_scale_type == 'none': + assert qk_scale is not None + self.scale = qk_scale + + self.qkv = nn.Linear(self.input_dims, embed_dims * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(embed_dims, embed_dims, bias=proj_bias) + self.proj_drop = nn.Dropout(proj_drop) + + self.out_drop = build_dropout(dropout_layer) + + def forward(self, x): + B, N, _ = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, + self.head_dims).permute(2, 0, 3, 1, 4) + + q, k, v = [item.transpose(-2, -1) for item in [qkv[0], qkv[1], qkv[2]]] + + q, k = F.normalize(q, dim=-1), F.normalize(k, dim=-1) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + + x = (attn @ v).permute(0, 3, 1, 2).reshape(B, N, self.embed_dims) + x = self.proj(x) + x = self.out_drop(self.proj_drop(x)) + + if self.v_shortcut: + x = qkv[2].squeeze(1) + x + return x + + +class LeAttention(BaseModule): + """LeViT Attention. Multi-head attention with attention bias, which is + proposed in `LeViT: a Vision Transformer in ConvNet’s Clothing for Faster + Inference`_ + + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads. Default: 8. + key_dim (int): Dimension of key. Default: None. + attn_ratio (int): Ratio of attention heads. Default: 8. + resolution (tuple[int]): Input resolution. Default: (16, 16). + init_cfg (dict, optional): The Config for initialization. + """ + + def __init__(self, + dim, + key_dim, + num_heads=8, + attn_ratio=4, + resolution=(14, 14), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + # (h, w) + assert isinstance(resolution, tuple) and len(resolution) == 2 + self.num_heads = num_heads + self.scale = key_dim**-0.5 + self.key_dim = key_dim + self.nh_kd = nh_kd = key_dim * num_heads + self.d = int(attn_ratio * key_dim) + self.dh = int(attn_ratio * key_dim) * num_heads + self.attn_ratio = attn_ratio + h = self.dh + nh_kd * 2 + + self.norm = nn.LayerNorm(dim) + self.qkv = nn.Linear(dim, h) + self.proj = nn.Linear(self.dh, dim) + + points = list( + itertools.product(range(resolution[0]), range(resolution[1]))) + N = len(points) + attention_offsets = {} + idxs = [] + for p1 in points: + for p2 in points: + offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1])) + if offset not in attention_offsets: + attention_offsets[offset] = len(attention_offsets) + idxs.append(attention_offsets[offset]) + self.attention_biases = torch.nn.Parameter( + torch.zeros(num_heads, len(attention_offsets))) + self.register_buffer( + 'attention_bias_idxs', + torch.LongTensor(idxs).view(N, N), + persistent=False) + + @torch.no_grad() + def train(self, mode=True): + super().train(mode) + if mode and hasattr(self, 'ab'): + del self.ab + else: + self.ab = self.attention_biases[:, self.attention_bias_idxs] + + def forward(self, x): # x (B,N,C) + B, N, _ = x.shape + + # Normalization + x = self.norm(x) + + qkv = self.qkv(x) + # (B, N, num_heads, d) + q, k, v = qkv.view(B, N, self.num_heads, + -1).split([self.key_dim, self.key_dim, self.d], + dim=3) + # (B, num_heads, N, d) + q = q.permute(0, 2, 1, 3) + k = k.permute(0, 2, 1, 3) + v = v.permute(0, 2, 1, 3) + + attn = ((q @ k.transpose(-2, -1)) * self.scale + + (self.attention_biases[:, self.attention_bias_idxs] + if self.training else self.ab)) + attn = attn.softmax(dim=-1) + x = (attn @ v).transpose(1, 2).reshape(B, N, self.dh) + x = self.proj(x) + return x + + +class CrossMultiheadAttention(BaseModule): + """Cross attention between queries and the union of keys and values. + + This module is different from ``MultiheadAttention``, for the attention + is computed between queries and the union of keys and values. + + Args: + embed_dims (int): The embedding dimension. + num_heads (int): Parallel attention heads. + qkv_bias (bool): If True, add a learnable bias to q, k, v. + Defaults to True. + qk_scale (float, optional): Override default qk scale of + ``head_dim ** -0.5`` if set. Defaults to None. + attn_drop (float): Dropout rate of the dropout layer after the + attention calculation of query and key. Defaults to 0. + proj_drop (float): Dropout rate of the dropout layer after the + output projection. Defaults to 0. + """ + + def __init__(self, + embed_dims: int, + num_heads: int = 8, + qkv_bias: bool = False, + qk_scale: float = None, + attn_drop: float = 0., + proj_drop: float = 0.) -> None: + super().__init__() + self.num_heads = num_heads + head_dim = embed_dims // num_heads + self.scale = qk_scale or head_dim**-0.5 + + self.q = nn.Linear(embed_dims, embed_dims, bias=False) + self.k = nn.Linear(embed_dims, embed_dims, bias=False) + self.v = nn.Linear(embed_dims, embed_dims, bias=False) + + if qkv_bias: + self.q_bias = nn.Parameter(torch.zeros(embed_dims)) + self.v_bias = nn.Parameter(torch.zeros(embed_dims)) + else: + self.q_bias = None + self.k_bias = None + self.v_bias = None + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(embed_dims, embed_dims) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, + x: torch.Tensor, + k: torch.Tensor = None, + v: torch.Tensor = None) -> None: + """Forward function.""" + B, N, _ = x.shape + + N_k = k.shape[1] + N_v = v.shape[1] + + q_bias, k_bias, v_bias = None, None, None + if self.q_bias is not None: + q_bias = self.q_bias + k_bias = torch.zeros_like(self.v_bias, requires_grad=False) + v_bias = self.v_bias + + q = F.linear( + input=x, weight=self.q.weight, bias=q_bias) # (B, N_q, dim) + k = F.linear( + input=k, weight=self.k.weight, bias=k_bias) # (B, N_k, dim) + v = F.linear(input=v, weight=self.v.weight, bias=v_bias) + + q = q.reshape(B, N, 1, self.num_heads, + -1).permute(2, 0, 3, 1, + 4).squeeze(0) # (B, num_heads, N_q, dim) + k = k.reshape(B, N_k, 1, self.num_heads, + -1).permute(2, 0, 3, 1, + 4).squeeze(0) # (B, num_heads, N_k, dim) + v = v.reshape(B, N_v, 1, self.num_heads, + -1).permute(2, 0, 3, 1, + 4).squeeze(0) # (B, num_heads, N_v, dim) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) # (B, N_head, N_q, N_k) + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, -1) + x = self.proj(x) + x = self.proj_drop(x) + + return x + + +class PromptMultiheadAttention(MultiheadAttention): + """Prompt Multihead Attention for MILAN. + + This module is specific for the prompt encoder in MILAN. It will not update + the visible tokens from the encoder. + + Args: + embed_dims (int): The embedding dimension. + num_heads (int): Parallel attention heads. + input_dims (int, optional): The input dimension, and if None, + use ``embed_dims``. Defaults to None. + attn_drop (float): Dropout rate of the dropout layer after the + attention calculation of query and key. Defaults to 0. + proj_drop (float): Dropout rate of the dropout layer after the + output projection. Defaults to 0. + dropout_layer (dict): The dropout config before adding the shortcut. + Defaults to ``dict(type='Dropout', drop_prob=0.)``. + qkv_bias (bool): If True, add a learnable bias to q, k, v. + Defaults to True. + qk_scale (float, optional): Override default qk scale of + ``head_dim ** -0.5`` if set. Defaults to None. + proj_bias (bool) If True, add a learnable bias to output projection. + Defaults to True. + v_shortcut (bool): Add a shortcut from value to output. It's usually + used if ``input_dims`` is different from ``embed_dims``. + Defaults to False. + return_attention (bool): If True, return the attention map, computed by + the cross attention between the class token and all other tokens. + Defaults to False. + init_cfg (Union[List[dict], dict], optional): The Config for + initialization. Defaults to None. + """ + + def __init__(self, + embed_dims: int, + num_heads: int, + input_dims: Optional[int] = None, + attn_drop: float = 0, + proj_drop: float = 0, + dropout_layer: dict = dict(type='Dropout', drop_prob=0.), + qkv_bias: bool = True, + qk_scale: Optional[float] = None, + proj_bias: bool = True, + v_shortcut: bool = False, + use_layer_scale: bool = False, + init_cfg: Optional[Union[List[dict], dict]] = None) -> None: + super().__init__( + embed_dims=embed_dims, + num_heads=num_heads, + input_dims=input_dims, + attn_drop=attn_drop, + proj_drop=proj_drop, + dropout_layer=dropout_layer, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + proj_bias=proj_bias, + v_shortcut=v_shortcut, + use_layer_scale=use_layer_scale, + init_cfg=init_cfg) + # no longer need qkv + del self.qkv + + # to project the mask tokens + self.q = nn.Linear(embed_dims, embed_dims, bias=qkv_bias) + # to project al the tokens + self.kv = nn.Linear(embed_dims, embed_dims * 2, bias=qkv_bias) + + def forward(self, x: torch.Tensor, visible_tokens: torch.Tensor, + ids_restore: torch.Tensor) -> torch.Tensor: + """Forward function for `PromptMultiheadAttention`. + + Args: + x (torch.Tensor): Mask token features with shape N x L_m x C. + visible_tokens (torch.Tensor): The visible tokens features from + encoder with shape N x L_v x C. + ids_restore (torch.Tensor): The ids of all tokens in the original + image with shape N x L. + + Returns: + torch Tensor: Output features with shape N x L x C. + """ + x_ = torch.cat([visible_tokens[:, 1:, :], x], dim=1) + assert x_.shape[1] == ids_restore.shape[1] + x_ = torch.gather( + x_, + dim=1, + index=ids_restore.unsqueeze(-1).repeat(1, 1, x.shape[-1])) + x_ = torch.cat([visible_tokens[:, :1, :], x_], dim=1) + + # full sequence shape + B, _, _ = x_.shape + q = self.q(x).reshape(B, x.shape[1], self.num_heads, + self.head_dims).permute(0, 2, 1, 3) + kv = self.kv(x_).reshape(B, x_.shape[1], 2, self.num_heads, + self.head_dims).permute(2, 0, 3, 1, 4) + k, v = kv[0], kv[1] + + attn_drop = self.attn_drop if self.training else 0. + attn = self.scaled_dot_product_attention(q, k, v, dropout_p=attn_drop) + x = attn.transpose(1, 2).reshape(B, x.shape[1], self.embed_dims) + + x = self.proj(x) + x = self.out_drop(self.gamma1(self.proj_drop(x))) + return x diff --git a/mmpretrain/models/utils/batch_augments/__init__.py b/mmpretrain/models/utils/batch_augments/__init__.py new file mode 100644 index 0000000..2fbc4e1 --- /dev/null +++ b/mmpretrain/models/utils/batch_augments/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .cutmix import CutMix +from .mixup import Mixup +from .resizemix import ResizeMix +from .wrapper import RandomBatchAugment + +__all__ = ('RandomBatchAugment', 'CutMix', 'Mixup', 'ResizeMix') diff --git a/mmpretrain/models/utils/batch_augments/__pycache__/__init__.cpython-310.pyc b/mmpretrain/models/utils/batch_augments/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18e07c827c4fd4274675cc1fc666898a5569e4bf GIT binary patch literal 362 zcmY+Ay-oxn5XYC@Wp~ebjc>7y+gcc-UaiI1(Gc!HC4q&o5Ko`PSMWuAh;6N`>`hFZ zVH+L7e||6@8F1_MQ33Y)^M3yY{sRwxt%SoR?CA_d7Lu645=IDeX-ZqMDsyFOOBu~v zo5r@R1*WW%&0W#Ap9IQ#GB=wPZ}b6(f=eD?wN;aTQOoVgR*cfwYZ3R4SLwm3G4^$! zP#kcQP@&WyxBZGW`xLD{#*N3b`f5Fn4aBg7992+-Pn&7shb@R)h}Qp?q(K z;9MN6^+ClT^!N{*RAw4d)Sx?@jf-WtY2aLsI&yyMpXct}I+={Rz}HUrvBzXtUHJ>d C+FO?Z literal 0 HcmV?d00001 diff --git a/mmpretrain/models/utils/batch_augments/__pycache__/cutmix.cpython-310.pyc b/mmpretrain/models/utils/batch_augments/__pycache__/cutmix.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf03f6331c028342b3072ebaa6d61d75bef48f33 GIT binary patch literal 6124 zcmc&&O^n;d73RMr?yv2w|B^P-G-+1AS{pkFU~ki;j??@U@urBkaG|)>QW{F4Ly^jm zjCURCQajDb_a5zIfS!BovDe;tY!8KR1$qnOxZj(hC~58V6rd6~9L~IX^XAQa-+MFh z%8IMu_oqjH>%VeF)BZ^x^FJFOzrd6H76sS19%>`Kt&fbhF*4hxPHjeLwQc!!+78}k zSR2*bb@W->4jUu4?dqCezrXZAdspKQuie#n&3EsZcl7o$H?C>T`a^Knnr2~r5F}|) z|8SfJapZ-Cy_bzcf48X@t8ZQ2yYgP=@^9X~`pezDYY%lApcnSl;2<0KX!!hZt^7#7 zM#b0KI@j6;*V|^pXqfyAw|t1^+rHylXS6dA%CGgEFSWkTYrKAUrCo0r{48(y4d3Od%S4&^FoAm;N}+$WtXO({9b zeYh+J$&PHNUGc*4&|{nZF!s{s4*RtyJctjquzoDqTYl;>4zCeGH=}7tk!o{Nxn~t0 z;su##F;ICB(?+pm7-u18U5Lc6S3w$Jr=x7t^+k(ajiDBf34&1V0kNQ8&x>eCU#=co zw$r1{+S!-RxZkR#gi0}EXB0#u?|^NRbAK$?@w+V+mp+aq?Li};*(M^X87beqe$=#V zx?d2z&!RYGyK&@iNz3e#ELhvQZhSx^r#p+y&Y}RC8dXvev&0(#SU4lA!UCjGs9A*< z4ZxEufp%(tU+RBh)qlg3>-sx1TuB zubQmI-uC-m7N!Z2(JE)tVZ88rX(#kXY_l83AvAb>2zT`b5qsk?>*z(TEONTrAHVN5NcV7OCL$CjMVjf01iz8`2kGb1@Fwi%>_+mb zNIyS+t`vJB8Sm~DwzR=wWkIvW(wrsqthw+YfAPDfCFAeL08mH1&_QqG#c(u+o?BLSe(`AYlx z^0tWMbbExT;Y;*fsKy)^xecH~DdSMb%LWvOl5MrP*7#7YV76^MiGyO*atv3$y>>;$ zvDQ?sq7jS+on+{Z{lf0Sh|D7Fi1r_qyuBbS{YGGI3ZuyI~)h_9(Zbrd=%cw&_VGzOEND6 zv`M)38EaiQ*Lp?9KoAjLSMszjo@-sAWeg}rfr)o8H_u{prI{;BVw)-Dn*h}JDcmDE z!n~LV5T@peKaG(l$2NlqxHA{Zc4g?Kg%ZZjqunBu5+GLm9vU!AK^d(eTZKniNpRb5 zI)xefk#x1fh{iB*xidwL-k7;CQ8bstDvdlr#RiJPru7Wf>lU0NOX-VVw+iDhEQ|v@ zhZ)|PI7Xvnah!&(6Xy6Qk$ z3Dpc^QlB)?W9Id;$IKgLk2_gPHF9gXo_k8eXcvxVaT6}QT=guWr3(hJL?yk&lAmB8eUrG&F2Ss@bn@Drj+T#}2F zuC0bHa3hYJfY7W5iV?*Y$%PWjEKLHA9FX9lx#-d_&r&pE@O=V=gariFNTYK8>!3(T(AD+2ezyFd)YTtov3?XS7E&0>Myj~|M|>d4 z;`Sla#W9Mvxt^QcxM$q3Cf3BB02-oYv>Rv#5TM7L^`PBMUaZFdEVpwDu3n>|V&oR6oxIk!a-&Z-70&Jdyem(>$bBWk!+XCg<^KP=#P?}` zlpm7W4DjIh?>A?`s%fiK@TX`O=c%BStFYB^wQ zQYR3(=oG045T1Yn35FyT_S~h@`e}oL(X)7NpDm5*DMN~7m2b-ABbk0B$f5mYA!S?! z+ZrAji~fuX;)H%5IrB|Q49y2f7IXc+#dSm`n9WU_8{GT?=^~Os=YB1>(C*~g{W?qs z)HldTYZRjdFFJi#F#|`=`0Vw zef;?Gt_1RCLn60?>%!{denlOrPQ%u4{!&Mp6hU}c)Co=zbVuS9bT*fjJH3j!c#R4| z^};O`g_Kcdfyn$K?gO+B&*Qm(JP+@K3+-iikY&XCc*>I~`;xVcApbba>pu zaqg_GhS6Ak40)Jkb(g$;wl*_L9qTFQyp z5xFJ&0bSu83QB?t8*0wRfyVin%erHp G)c*@gq<;7S literal 0 HcmV?d00001 diff --git a/mmpretrain/models/utils/batch_augments/__pycache__/mixup.cpython-310.pyc b/mmpretrain/models/utils/batch_augments/__pycache__/mixup.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..949c8275d824df2ccc0cc1afcab5d71cd134fc50 GIT binary patch literal 2600 zcmaJ?UvFDA6u)-vP17_jXsZSTB>aFxBZXu`NQi2h)=gt$QY*9-A%#izI?ki9G3P@<2Y^c5Vb|(xSJO#+u_jvZw<*@a zwT&{^RYyh6|Dc}dW^BOiaKd;oPGoAiRax3?HW{=^tPJBUk#>>^W;3RxD2CE7JCX1= z$+S9gXIM63!%XWUw=CuZ8N;zuI6W#IYRS#)kj@5$v8gnmBV$~=klJvqc+BJxPas{U z%f7Ln8kXxU&x{mIrA|$hpyS)@zC6xS!5${L(rO6jo+@()&Za7v`Qo2Z$_Oz-xWw{LZCef7od&+kTI73Yo~n{9Unqvmlw;cROZXWX{8*|%JCh#s6| zqfCQw%b8HdYBeY@R~W=trc!e&F)0f@s0%z+>6n2`Wf&DRldOmZ8%W401gETeAz4}^ z1F5@gH`9^{FrZ>{MGc4l3M@g&)6B}*_3Xj)((Qc^ZYSJMwxj1RgotfLQ8kO&s8;Gk zLE6ExhMUQfD`70`&2(U#Ta}^?tG?~1%f5ybNDgqRKm&I!xaA%8lVrwS-a-#xW+`~K zm2iCscgfAd%a|tv!J`g~BA7yY6opxX6Y8;zh%G4f+GeVR`Cf*eL*hBsc(bx`qYP6FonOHKy=gE`;f3{09;f1g(9PDdhdCsb&%7Aoc>3?GObU6 ze;}Pc{}s{+LAX1t5K=jhR4|83DnFZK7K;hc%Eme?KxTDeCo^1Q;an|A{Ii@3wzV%) zlW8c=ow?c(Qwbj<$2ifvUd)8&61ZuBCDes2injJT>_LY;?y&FMQB>iolFhAExqiE~ z>}nh{%B!mMKgQ->%&bDehr^f~phRbRvHz*N)Fm=Pf7~1LLltp{Eh5K)2Sho3M1Z&) zS8iO!;_&?nLB7{sDQTJma9r`1{s1c6;Tj;WP5|~c)5A&GKzR;a(Qar2ly2d$4ntXk zGA*0cMN~&rD&$ewa&7>mE1XuVDtE48ssW$ivIfFWU}8|~we==x5=I$m(390&2Y!&3 zYcmsw^eX_zlEm%+-i67O9>8mTNX_aK0Pq=s>h{jD!s$7^@Gj_cfc&Ws0EQaU>p1_*$J{8Xq! ze+WkP3JmRF=>k5&zc*p%uR)5W2wi^POfL`%Jcw}^Zo^K48E;Wa{MVa)gIv-}wPmlp zhD4OY>lTJ(GYpeV6fw?QVfbUgkS*6nc)&!y8?29TfpSig(soxSUqgxl3} pj8E{fM*1t?sg_dDLU8)t=1AGNQw4b!E5c#01=wql7OmGd$-n%j#O?q9 literal 0 HcmV?d00001 diff --git a/mmpretrain/models/utils/batch_augments/__pycache__/resizemix.cpython-310.pyc b/mmpretrain/models/utils/batch_augments/__pycache__/resizemix.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..783fb5cc8914bbaaeb17ee22870b395125cdf2f5 GIT binary patch literal 4121 zcmai1%WfOj8J-!E!;47DwiL%MTHv54ATg0C$z8M%+@y-#T!2_M92Eg*8#9NTLvqx) z<;;vl+2qPLvM9W7yRw&gguXx@pv&2IQTP?yB+!zp62U4-8XuMU+R@~nlrp|ucH3Vo~iz;e$CKy z&9CI^_k{WMf+*#souNb7+-0f+H3IB zI;E`qr5h(%?M{@qVZgnta+pS*IOynE`;+~{o1a_zfBfwB9}W(`dacs|Ju`2n$!&Ks zp!4US)_f2r=m@Q+J6g|h^j@iM)Jyepy;3)wbx2WmYIy5-8=~T@2vcD0s&-X#nxZyr zeya_2r{%PtwR&}DRWy!PoC~VGD4O(7@6foa4Nd2gv-YgqYkjMgwO$)Ky_ePRiP(KC zARhm7RnrzDc8>>6=(D>#P6YG#iI8jV#A!DNp? zWH%VGTabbsyUCc{1xq0xVdtIxkr*V3(iZ`w_Bojf92|BiQ^CIEec{FI#yCl$cyD`~ z%ZdA_8_Ll(@5kHMcXqFLcXxMpc0aPMT)%xeiuV*f(w2MCn6u5H7xJXD2Z$v1MIvOZ z5c)(wAO|iY-F~Y0LiLddgyhf_DlX^{EkD-+Yi>vivv?e)p2PYAa6xZ3aUTg5q<&w> zF1sB{!5lDfy?BYt0662wFifE%fZ7{)JdXElfRsjdcd^&wzU8~YLUuA7#;%|G>=E~X z4GfD-U3N0*b`Q5$IL|+TZf?bG+C&O5EDc-=q9p~CVPGbTJop=$c+OR+wxxoN=-Rdl7u&{G#6FzV9R+Y>r_hoN zaR+%s7Tla|_QTKvf?tmX381o6 z6nQ5U1AT|H`CiH%$72z&+#swEke1*Wcy1H{j{eC!-Sr_PGQzSPlzbM)KKS5I$DHyC z+Ye5d%6$NRkxHOvgruTKo<(zhk06{Ck%KT1MO+*L>pdj8@t*x47v5%@wvDcBcgTv+ z?6M&?GW|lXM_jtR?+GaEBGC^+&kK*^Guo8hHXkUHZn6BoV=H5Wh0h6u1*P0T zM4xg)1*oVs#Ii7TIAL)~T%EP#bqc}|E0Io|{SxjgSxHm|Vz~%;827a#~ z_&+pC#3Xs5qI5=T#ox%}$?mne)VVJI{@Y*a`TNK7&g+k}+F52gILs=_23b|%=mzr+ zpJc72?3%Tfa!9sv&iecY~#F=-^i0#+{2+RVfxvN4^M>c)Li;(TQ9G z|KH$^Noccb>gK6wn0iBheCggD4)qCM5oYfS6Z8xrp} z)wwS~)iH=e-cGC2+H+%CpEjNw!}1LcavAHWey61?WNb6vMYZF)1_@Wd1C=C9Uga-P z+oHHo!ID>mIY25EYiOL-c*?7z&Dqofo0nxMV&4Vp=bBlex#lG&?=c=KCTW-&Hb9+kmN@~PY)!0 za(qf3DOA;GWgjt-wYY~n*MiHU2#uwQEuGcO2qO5e%KcfnkF=hLqKZ(8M?^3oDu1E+ zNhk;7tVa7W=Vxp4v_dzbg-N`$S4LR}?)g-a;q-IhsqY%X%B}+|aKW zW!=;->t)^0fB8MD)QrciBF1s1Q;~I|MBkip1^!ucN7HWyAQ;fF|@Szd?Iy7{-rA$@sx28{ao7#(xa6{ifD}wx@0V(y2P% zxtN*BNrZ|?36_gUQe+i)C(MI7|RVhx>ls%t`sBs z+BuPQePm|t7?M(8wwkXAIR{Y*bWYR4ezhAp%pZ{h$8?db>oudMU(%aKQ#VVO_5T9N Cj-IFh literal 0 HcmV?d00001 diff --git a/mmpretrain/models/utils/batch_augments/__pycache__/wrapper.cpython-310.pyc b/mmpretrain/models/utils/batch_augments/__pycache__/wrapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e7420e4c4d0803efcf7775c6fa7df1d68d74aac GIT binary patch literal 2636 zcmaJD%Wm67kh^?HmK`T>(&mL03lwN1K%ngeZ2_Y;Vmpr}Xnbg#9uycf&0R`#_)xe@ z$2RCooonv}iUQ8DKhks0y!Mn|C{Uy`OG~!uwuCs`nVp^Y%qqKdsb#?P(_g=ZpTp`8 zM9x1ZAot;wD*(8`No*vfM-tkj1aTUhJxhP>o(*3!c9KS~L5#-+x48Y*;Px@?H9385 zbe#7f!|2eex$eiYzY~kfevw95y3rxk?Aq$)`ZwO{i^tC%ZfrgekoElI0CFE*i9i@V z!i^r~q-XLLH-#l^;fO|P-ZVn072BZH5mUP@-ar^yyve8DTD@s)%5*jNouBe7S@Tse zSS|XANR_{48AeTx4_GkBGAUS=3bunx%s=H)U@=n}^M}Luz-_e{Jga$MF6k9wr<{_t zM~mT8)Fj>U)-ca@KxP`ADFbVr_xJ&}Tg?12dVB32T8MlGi80-4yhMT5tSfl~E=f}*K?9l{om}dz~l3^}X?nkMcWL(5@ zB>Q6CCJv8c?*(C>Ewh*7P1kkNX`P3*Iv9%OJL`pd7VY2ZGCv*;{N+Wr)BQho;4Oyd zxhI&n#u)mmqwSp<19n`rx_#c#u1=aPFQZ85-dD96c~x44h)?@XFr-D5E5b zC>qjZDqAJ#lOxhMO2act^1#@-eq;iNsjOp~v(nf#Pi*C!G)nUwc}Ixx`f^P}95LXf zstLUFB?U>eq}(V-9;m63920=_n7pF$=~EcuHSK2@-!&ap<}da!`GKew@^R}Gk(E!9 zr^>>4n`7833ny^pz}OZk&oLX7y;DRnuWUUf`ww9xx%Z zzZ&^Ru>NyxKF=~We>SKlmU?E*7b=S7{6yQx`8PS*kh{Z!s_A)A8Y$0fgGA{9m?aKz zXp^+aH8M+E)FDkeM<`IO53uvW`Y>x!SkC~pMGtRZ94zigW;I<&7n+4aXz9s8%ILcamuVE))^J*KJwo&R1(}Zz{abNjZFOW>>!v6#^+~_nanhq-m z=U|pp4&2mHAo43<`dO;-%Yb(5{3=p!Ah-#jHw6iVkaMx0Uqd2hvIZZ+3iA=be-qDJ z1x(^!qZ?)mGSDJ3{}t<+ze6^l0O5EFrR{lD%k$vcDq>vEc;4&6k56|Rp2xGm^KzUG zIYlsoU>d=708qMkeOAt` + + With this method, patches are cut and pasted among training images where + the ground truth labels are also mixed proportionally to the area of the + patches. + + Args: + alpha (float): Parameters for Beta distribution to generate the + mixing ratio. It should be a positive number. More details + can be found in :class:`Mixup`. + cutmix_minmax (List[float], optional): The min/max area ratio of the + patches. If not None, the bounding-box of patches is uniform + sampled within this ratio range, and the ``alpha`` will be ignored. + Otherwise, the bounding-box is generated according to the + ``alpha``. Defaults to None. + correct_lam (bool): Whether to apply lambda correction when cutmix bbox + clipped by image borders. Defaults to True. + + .. note :: + If the ``cutmix_minmax`` is None, how to generate the bounding-box of + patches according to the ``alpha``? + + First, generate a :math:`\lambda`, details can be found in + :class:`Mixup`. And then, the area ratio of the bounding-box + is calculated by: + + .. math:: + \text{ratio} = \sqrt{1-\lambda} + """ + + def __init__(self, + alpha: float, + cutmix_minmax: Optional[List[float]] = None, + correct_lam: bool = True): + super().__init__(alpha=alpha) + + self.cutmix_minmax = cutmix_minmax + self.correct_lam = correct_lam + + def rand_bbox_minmax( + self, + img_shape: Tuple[int, int], + count: Optional[int] = None) -> Tuple[int, int, int, int]: + """Min-Max CutMix bounding-box Inspired by Darknet cutmix + implementation. It generates a random rectangular bbox based on min/max + percent values applied to each dimension of the input image. + + Typical defaults for minmax are usually in the .2-.3 for min and + .8-.9 range for max. + + Args: + img_shape (tuple): Image shape as tuple + count (int, optional): Number of bbox to generate. Defaults to None + """ + assert len(self.cutmix_minmax) == 2 + img_h, img_w = img_shape + cut_h = np.random.randint( + int(img_h * self.cutmix_minmax[0]), + int(img_h * self.cutmix_minmax[1]), + size=count) + cut_w = np.random.randint( + int(img_w * self.cutmix_minmax[0]), + int(img_w * self.cutmix_minmax[1]), + size=count) + yl = np.random.randint(0, img_h - cut_h, size=count) + xl = np.random.randint(0, img_w - cut_w, size=count) + yu = yl + cut_h + xu = xl + cut_w + return yl, yu, xl, xu + + def rand_bbox(self, + img_shape: Tuple[int, int], + lam: float, + margin: float = 0., + count: Optional[int] = None) -> Tuple[int, int, int, int]: + """Standard CutMix bounding-box that generates a random square bbox + based on lambda value. This implementation includes support for + enforcing a border margin as percent of bbox dimensions. + + Args: + img_shape (tuple): Image shape as tuple + lam (float): Cutmix lambda value + margin (float): Percentage of bbox dimension to enforce as margin + (reduce amount of box outside image). Defaults to 0. + count (int, optional): Number of bbox to generate. Defaults to None + """ + ratio = np.sqrt(1 - lam) + img_h, img_w = img_shape + cut_h, cut_w = int(img_h * ratio), int(img_w * ratio) + margin_y, margin_x = int(margin * cut_h), int(margin * cut_w) + cy = np.random.randint(0 + margin_y, img_h - margin_y, size=count) + cx = np.random.randint(0 + margin_x, img_w - margin_x, size=count) + yl = np.clip(cy - cut_h // 2, 0, img_h) + yh = np.clip(cy + cut_h // 2, 0, img_h) + xl = np.clip(cx - cut_w // 2, 0, img_w) + xh = np.clip(cx + cut_w // 2, 0, img_w) + return yl, yh, xl, xh + + def cutmix_bbox_and_lam(self, + img_shape: Tuple[int, int], + lam: float, + count: Optional[int] = None) -> tuple: + """Generate bbox and apply lambda correction. + + Args: + img_shape (tuple): Image shape as tuple + lam (float): Cutmix lambda value + count (int, optional): Number of bbox to generate. Defaults to None + """ + if self.cutmix_minmax is not None: + yl, yu, xl, xu = self.rand_bbox_minmax(img_shape, count=count) + else: + yl, yu, xl, xu = self.rand_bbox(img_shape, lam, count=count) + if self.correct_lam or self.cutmix_minmax is not None: + bbox_area = (yu - yl) * (xu - xl) + lam = 1. - bbox_area / float(img_shape[0] * img_shape[1]) + return (yl, yu, xl, xu), lam + + def mix(self, batch_inputs: torch.Tensor, + batch_scores: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """Mix the batch inputs and batch one-hot format ground truth. + + Args: + batch_inputs (Tensor): A batch of images tensor in the shape of + ``(N, C, H, W)``. + batch_scores (Tensor): A batch of one-hot format labels in the + shape of ``(N, num_classes)``. + + Returns: + Tuple[Tensor, Tensor): The mixed inputs and labels. + """ + lam = np.random.beta(self.alpha, self.alpha) + batch_size = batch_inputs.size(0) + img_shape = batch_inputs.shape[-2:] + index = torch.randperm(batch_size) + + (y1, y2, x1, x2), lam = self.cutmix_bbox_and_lam(img_shape, lam) + batch_inputs[:, :, y1:y2, x1:x2] = batch_inputs[index, :, y1:y2, x1:x2] + mixed_scores = lam * batch_scores + (1 - lam) * batch_scores[index, :] + + return batch_inputs, mixed_scores diff --git a/mmpretrain/models/utils/batch_augments/mixup.py b/mmpretrain/models/utils/batch_augments/mixup.py new file mode 100644 index 0000000..bedb2c3 --- /dev/null +++ b/mmpretrain/models/utils/batch_augments/mixup.py @@ -0,0 +1,65 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Tuple + +import numpy as np +import torch + +from mmpretrain.registry import BATCH_AUGMENTS + + +@BATCH_AUGMENTS.register_module() +class Mixup: + r"""Mixup batch augmentation. + + Mixup is a method to reduces the memorization of corrupt labels and + increases the robustness to adversarial examples. It's proposed in + `mixup: Beyond Empirical Risk Minimization + `_ + + Args: + alpha (float): Parameters for Beta distribution to generate the + mixing ratio. It should be a positive number. More details + are in the note. + + Note: + The :math:`\alpha` (``alpha``) determines a random distribution + :math:`Beta(\alpha, \alpha)`. For each batch of data, we sample + a mixing ratio (marked as :math:`\lambda`, ``lam``) from the random + distribution. + """ + + def __init__(self, alpha: float): + assert isinstance(alpha, float) and alpha > 0 + + self.alpha = alpha + + def mix(self, batch_inputs: torch.Tensor, + batch_scores: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """Mix the batch inputs and batch one-hot format ground truth. + + Args: + batch_inputs (Tensor): A batch of images tensor in the shape of + ``(N, C, H, W)``. + batch_scores (Tensor): A batch of one-hot format labels in the + shape of ``(N, num_classes)``. + + Returns: + Tuple[Tensor, Tensor): The mixed inputs and labels. + """ + lam = np.random.beta(self.alpha, self.alpha) + batch_size = batch_inputs.size(0) + index = torch.randperm(batch_size) + + mixed_inputs = lam * batch_inputs + (1 - lam) * batch_inputs[index, :] + mixed_scores = lam * batch_scores + (1 - lam) * batch_scores[index, :] + + return mixed_inputs, mixed_scores + + def __call__(self, batch_inputs: torch.Tensor, batch_score: torch.Tensor): + """Mix the batch inputs and batch data samples.""" + assert batch_score.ndim == 2, \ + 'The input `batch_score` should be a one-hot format tensor, '\ + 'which shape should be ``(N, num_classes)``.' + + mixed_inputs, mixed_score = self.mix(batch_inputs, batch_score.float()) + return mixed_inputs, mixed_score diff --git a/mmpretrain/models/utils/batch_augments/resizemix.py b/mmpretrain/models/utils/batch_augments/resizemix.py new file mode 100644 index 0000000..c70f81b --- /dev/null +++ b/mmpretrain/models/utils/batch_augments/resizemix.py @@ -0,0 +1,95 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Tuple + +import numpy as np +import torch +import torch.nn.functional as F + +from mmpretrain.registry import BATCH_AUGMENTS +from .cutmix import CutMix + + +@BATCH_AUGMENTS.register_module() +class ResizeMix(CutMix): + r"""ResizeMix Random Paste layer for a batch of data. + + The ResizeMix will resize an image to a small patch and paste it on another + image. It's proposed in `ResizeMix: Mixing Data with Preserved Object + Information and True Labels `_ + + Args: + alpha (float): Parameters for Beta distribution to generate the + mixing ratio. It should be a positive number. More details + can be found in :class:`Mixup`. + lam_min(float): The minimum value of lam. Defaults to 0.1. + lam_max(float): The maximum value of lam. Defaults to 0.8. + interpolation (str): algorithm used for upsampling: + 'nearest' | 'linear' | 'bilinear' | 'bicubic' | 'trilinear' | + 'area'. Defaults to 'bilinear'. + prob (float): The probability to execute resizemix. It should be in + range [0, 1]. Defaults to 1.0. + cutmix_minmax (List[float], optional): The min/max area ratio of the + patches. If not None, the bounding-box of patches is uniform + sampled within this ratio range, and the ``alpha`` will be ignored. + Otherwise, the bounding-box is generated according to the + ``alpha``. Defaults to None. + correct_lam (bool): Whether to apply lambda correction when cutmix bbox + clipped by image borders. Defaults to True + **kwargs: Any other parameters accpeted by :class:`CutMix`. + + Note: + The :math:`\lambda` (``lam``) is the mixing ratio. It's a random + variable which follows :math:`Beta(\alpha, \alpha)` and is mapped + to the range [``lam_min``, ``lam_max``]. + + .. math:: + \lambda = \frac{Beta(\alpha, \alpha)} + {\lambda_{max} - \lambda_{min}} + \lambda_{min} + + And the resize ratio of source images is calculated by :math:`\lambda`: + + .. math:: + \text{ratio} = \sqrt{1-\lambda} + """ + + def __init__(self, + alpha: float, + lam_min: float = 0.1, + lam_max: float = 0.8, + interpolation: str = 'bilinear', + cutmix_minmax: Optional[List[float]] = None, + correct_lam: bool = True): + super().__init__( + alpha=alpha, cutmix_minmax=cutmix_minmax, correct_lam=correct_lam) + self.lam_min = lam_min + self.lam_max = lam_max + self.interpolation = interpolation + + def mix(self, batch_inputs: torch.Tensor, + batch_scores: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """Mix the batch inputs and batch one-hot format ground truth. + + Args: + batch_inputs (Tensor): A batch of images tensor in the shape of + ``(N, C, H, W)``. + batch_scores (Tensor): A batch of one-hot format labels in the + shape of ``(N, num_classes)``. + + Returns: + Tuple[Tensor, Tensor): The mixed inputs and labels. + """ + lam = np.random.beta(self.alpha, self.alpha) + lam = lam * (self.lam_max - self.lam_min) + self.lam_min + img_shape = batch_inputs.shape[-2:] + batch_size = batch_inputs.size(0) + index = torch.randperm(batch_size) + + (y1, y2, x1, x2), lam = self.cutmix_bbox_and_lam(img_shape, lam) + batch_inputs[:, :, y1:y2, x1:x2] = F.interpolate( + batch_inputs[index], + size=(int(y2 - y1), int(x2 - x1)), + mode=self.interpolation, + align_corners=False) + mixed_scores = lam * batch_scores + (1 - lam) * batch_scores[index, :] + + return batch_inputs, mixed_scores diff --git a/mmpretrain/models/utils/batch_augments/wrapper.py b/mmpretrain/models/utils/batch_augments/wrapper.py new file mode 100644 index 0000000..10e5304 --- /dev/null +++ b/mmpretrain/models/utils/batch_augments/wrapper.py @@ -0,0 +1,74 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Callable, Union + +import numpy as np +import torch + +from mmpretrain.registry import BATCH_AUGMENTS + + +class RandomBatchAugment: + """Randomly choose one batch augmentation to apply. + + Args: + augments (Callable | dict | list): configs of batch + augmentations. + probs (float | List[float] | None): The probabilities of each batch + augmentations. If None, choose evenly. Defaults to None. + + Example: + >>> import torch + >>> import torch.nn.functional as F + >>> from mmpretrain.models import RandomBatchAugment + >>> augments_cfg = [ + ... dict(type='CutMix', alpha=1.), + ... dict(type='Mixup', alpha=1.) + ... ] + >>> batch_augment = RandomBatchAugment(augments_cfg, probs=[0.5, 0.3]) + >>> imgs = torch.rand(16, 3, 32, 32) + >>> label = F.one_hot(torch.randint(0, 10, (16, )), num_classes=10) + >>> imgs, label = batch_augment(imgs, label) + + .. note :: + + To decide which batch augmentation will be used, it picks one of + ``augments`` based on the probabilities. In the example above, the + probability to use CutMix is 0.5, to use Mixup is 0.3, and to do + nothing is 0.2. + """ + + def __init__(self, augments: Union[Callable, dict, list], probs=None): + if not isinstance(augments, (tuple, list)): + augments = [augments] + + self.augments = [] + for aug in augments: + if isinstance(aug, dict): + self.augments.append(BATCH_AUGMENTS.build(aug)) + else: + self.augments.append(aug) + + if isinstance(probs, float): + probs = [probs] + + if probs is not None: + assert len(augments) == len(probs), \ + '``augments`` and ``probs`` must have same lengths. ' \ + f'Got {len(augments)} vs {len(probs)}.' + assert sum(probs) <= 1, \ + 'The total probability of batch augments exceeds 1.' + self.augments.append(None) + probs.append(1 - sum(probs)) + + self.probs = probs + + def __call__(self, batch_input: torch.Tensor, batch_score: torch.Tensor): + """Randomly apply batch augmentations to the batch inputs and batch + data samples.""" + aug_index = np.random.choice(len(self.augments), p=self.probs) + aug = self.augments[aug_index] + + if aug is not None: + return aug(batch_input, batch_score) + else: + return batch_input, batch_score.float() diff --git a/mmpretrain/models/utils/batch_shuffle.py b/mmpretrain/models/utils/batch_shuffle.py new file mode 100644 index 0000000..a0b03c5 --- /dev/null +++ b/mmpretrain/models/utils/batch_shuffle.py @@ -0,0 +1,66 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Tuple + +import torch +from mmengine.dist import all_gather, broadcast, get_rank + + +@torch.no_grad() +def batch_shuffle_ddp(x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """Batch shuffle, for making use of BatchNorm. + + Args: + x (torch.Tensor): Data in each GPU. + + Returns: + Tuple[torch.Tensor, torch.Tensor]: Output of shuffle operation. + - x_gather[idx_this]: Shuffled data. + - idx_unshuffle: Index for restoring. + """ + # gather from all gpus + batch_size_this = x.shape[0] + x_gather = torch.cat(all_gather(x), dim=0) + batch_size_all = x_gather.shape[0] + + num_gpus = batch_size_all // batch_size_this + + # random shuffle index + idx_shuffle = torch.randperm(batch_size_all) + + # broadcast to all gpus + broadcast(idx_shuffle, src=0) + + # index for restoring + idx_unshuffle = torch.argsort(idx_shuffle) + + # shuffled index for this gpu + gpu_idx = get_rank() + idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx] + + return x_gather[idx_this], idx_unshuffle + + +@torch.no_grad() +def batch_unshuffle_ddp(x: torch.Tensor, + idx_unshuffle: torch.Tensor) -> torch.Tensor: + """Undo batch shuffle. + + Args: + x (torch.Tensor): Data in each GPU. + idx_unshuffle (torch.Tensor): Index for restoring. + + Returns: + torch.Tensor: Output of unshuffle operation. + """ + # gather from all gpus + batch_size_this = x.shape[0] + x_gather = torch.cat(all_gather(x), dim=0) + batch_size_all = x_gather.shape[0] + + num_gpus = batch_size_all // batch_size_this + + # restored index for this gpu + gpu_idx = get_rank() + idx_this = idx_unshuffle.view(num_gpus, -1)[gpu_idx] + + return x_gather[idx_this] diff --git a/mmpretrain/models/utils/box_utils.py b/mmpretrain/models/utils/box_utils.py new file mode 100644 index 0000000..79db516 --- /dev/null +++ b/mmpretrain/models/utils/box_utils.py @@ -0,0 +1,56 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torchvision.ops.boxes as boxes + + +def box_cxcywh_to_xyxy(x): + x_c, y_c, w, h = x.unbind(-1) + b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)] + return torch.stack(b, dim=-1) + + +def box_xyxy_to_cxcywh(x): + x0, y0, x1, y1 = x.unbind(-1) + b = [(x0 + x1) / 2.0, (y0 + y1) / 2.0, (x1 - x0), (y1 - y0)] + return torch.stack(b, dim=-1) + + +def box_iou(boxes1, boxes2): + """Return intersection-over-union (Jaccard index) between two sets of + boxes. + + Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with + ``0 <= x1 < x2`` and ``0 <= y1 < y2``. + + Args: + boxes1 (Tensor[N, 4]): first set of boxes + boxes2 (Tensor[M, 4]): second set of boxes + + Returns: + Tensor[N, M]: the NxM matrix containing the pairwise IoU values for + every element in boxes1 and boxes2 + """ + return boxes.box_iou(boxes1, boxes2) + + +def generalized_box_iou(boxes1, boxes2): + """Return generalized intersection-over-union (Jaccard index) between two + sets of boxes. + + Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with + ``0 <= x1 < x2`` and ``0 <= y1 < y2``. + + Args: + boxes1 (Tensor[N, 4]): first set of boxes + boxes2 (Tensor[M, 4]): second set of boxes + + Returns: + Tensor[N, M]: the NxM matrix containing the pairwise generalized IoU + values for every element in boxes1 and boxes2 + """ + # degenerate boxes gives inf / nan results + # so do an early check + assert (boxes1[:, 2:] >= boxes1[:, :2]).all() + assert (boxes2[:, 2:] >= boxes2[:, :2]).all() + + return boxes.generalized_box_iou(boxes1, boxes2) diff --git a/mmpretrain/models/utils/channel_shuffle.py b/mmpretrain/models/utils/channel_shuffle.py new file mode 100644 index 0000000..27006a8 --- /dev/null +++ b/mmpretrain/models/utils/channel_shuffle.py @@ -0,0 +1,29 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + + +def channel_shuffle(x, groups): + """Channel Shuffle operation. + + This function enables cross-group information flow for multiple groups + convolution layers. + + Args: + x (Tensor): The input tensor. + groups (int): The number of groups to divide the input tensor + in the channel dimension. + + Returns: + Tensor: The output tensor after channel shuffle operation. + """ + + batch_size, num_channels, height, width = x.size() + assert (num_channels % groups == 0), ('num_channels should be ' + 'divisible by groups') + channels_per_group = num_channels // groups + + x = x.view(batch_size, groups, channels_per_group, height, width) + x = torch.transpose(x, 1, 2).contiguous() + x = x.view(batch_size, -1, height, width) + + return x diff --git a/mmpretrain/models/utils/clip_generator_helper.py b/mmpretrain/models/utils/clip_generator_helper.py new file mode 100644 index 0000000..4f67f0e --- /dev/null +++ b/mmpretrain/models/utils/clip_generator_helper.py @@ -0,0 +1,394 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Modified from https://github.com/zejiangh/MILAN +from collections import OrderedDict +from typing import Optional, Tuple, Union + +import numpy as np +import torch +from mmengine.logging import MMLogger +from torch import nn + +from mmpretrain.registry import MODELS + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Forward function.""" + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +@MODELS.register_module() +class QuickGELU(nn.Module): + """A faster version of GELU.""" + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Forward function.""" + return x * torch.sigmoid(1.702 * x) + + +class ResidualAttentionBlock(nn.Module): + """Residual Attention Block (RAB). + + This module implements the same function as the MultiheadAttention, + but with a different interface, which is mainly used + in CLIP. + + Args: + d_model (int): The feature dimension. + n_head (int): The number of attention heads. + attn_mask (torch.Tensor, optional): The attention mask. + Defaults to None. + """ + + def __init__(self, + d_model: int, + n_head: int, + attn_mask: Optional[torch.Tensor] = None, + return_attention: bool = False) -> None: + super().__init__() + + self.attn = nn.MultiheadAttention(d_model, n_head) + self.ln_1 = LayerNorm(d_model) + self.mlp = nn.Sequential( + OrderedDict([('c_fc', nn.Linear(d_model, d_model * 4)), + ('gelu', QuickGELU()), + ('c_proj', nn.Linear(d_model * 4, d_model))])) + self.ln_2 = LayerNorm(d_model) + self.attn_mask = attn_mask + + self.return_attention = return_attention + + def attention(self, x: torch.Tensor) -> torch.Tensor: + """Attention function.""" + self.attn_mask = self.attn_mask.to( + dtype=x.dtype, + device=x.device) if self.attn_mask is not None else None + if self.return_attention: + return self.attn( + x, + x, + x, + need_weights=self.return_attention, + attn_mask=self.attn_mask) + else: + return self.attn( + x, + x, + x, + need_weights=self.return_attention, + attn_mask=self.attn_mask)[0] + + def forward( + self, x: torch.Tensor + ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: + """Forward function.""" + if self.return_attention: + x_, attention = self.attention(self.ln_1(x)) + x = x + x_ + x = x + self.mlp(self.ln_2(x)) + return x, attention + else: + x = x + self.attention(self.ln_1(x)) + x = x + self.mlp(self.ln_2(x)) + return x + + +class Transformer(nn.Module): + """Transformer. + + Both visual and text branches use this transformer. + + Args: + width (int): The feature dimension. + layers (int): The number of layers. + heads (int): The number of attention heads. + attn_mask (torch.Tensor, optional): The attention mask. + """ + + def __init__(self, + width: int, + layers: int, + heads: int, + attn_mask: Optional[torch.Tensor] = None) -> None: + super().__init__() + self.width = width + self.layers = layers + self.resblocks = nn.ModuleList() + for _ in range(layers - 1): + self.resblocks.append( + ResidualAttentionBlock(width, heads, attn_mask)) + self.resblocks.append( + ResidualAttentionBlock( + width, heads, attn_mask, return_attention=True)) + + def forward( + self, x: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Forward function.""" + z = [] + for idx, blk in enumerate(self.resblocks): + if idx < self.layers - 1: + x = blk(x) + z.append(x.permute(1, 0, 2)) + else: + x, attention = blk(x) + z.append(x.permute(1, 0, 2)) + return x, attention, z + + +class VisionTransformer(nn.Module): + """Vision Transformer for CLIP. + + Args: + input_resolution (int): The image size. + patch_size (int): The patch size. + width (int): The feature dimension. + layers (int): The number of layers. + heads (int): The number of attention heads. + out_dim (int): The output dimension. + fineturn (bool): Whether to fineturn the model. + average_target (bool): Whether to average the target. + """ + + def __init__(self, + input_resolution: int, + patch_size: int, + width: int, + layers: int, + heads: int, + output_dim: int, + finetune=False, + average_targets: int = 1) -> None: + super().__init__() + self.input_resolution = input_resolution + self.output_dim = output_dim + self.conv1 = nn.Conv2d( + in_channels=3, + out_channels=width, + kernel_size=patch_size, + stride=patch_size, + bias=False) + + scale = width**-0.5 + self.class_embedding = nn.Parameter(scale * torch.randn(width)) + self.positional_embedding = nn.Parameter(scale * torch.randn( + (input_resolution // patch_size)**2 + 1, width)) + self.ln_pre = LayerNorm(width) + + self.transformer = Transformer(width, layers, heads) + + self.finetune = finetune + if finetune is False: + self.ln_post = LayerNorm(width) + self.proj = nn.Parameter(scale * torch.randn(width, output_dim)) + + self.average_targets = average_targets + + def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """Forward function.""" + x = self.conv1(x) # shape = [*, width, grid, grid] + x = x.reshape(x.shape[0], x.shape[1], + -1) # shape = [*, width, grid ** 2] + x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width] + x = torch.cat([ + self.class_embedding.to(x.dtype) + torch.zeros( + x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x + ], + dim=1) # shape = [*, grid ** 2 + 1, width] + x = x + self.positional_embedding.to(x.dtype) + x = self.ln_pre(x) + + x = x.permute(1, 0, 2) # NLD -> LND + x, attention, z = self.transformer(x) + x = x.permute(1, 0, 2) # LND -> NLD + + x = self.ln_post(x) + if self.proj is not None: + x = x @ self.proj + + return x, attention + + +class CLIP(nn.Module): + """CLIP. + + Args: + embed_dim (int): The embedding dimension. + image_resolution (int): The image size. + vision_layers (int): The number of layers in the vision transformer. + vision_width (int): The feature dimension in the vision transformer. + vision_patch_size (int): The patch size in the vision transformer. + context_length (int): The context length. + vocab_size (int): The vocabulary size. + transformer_width (int): The feature dimension in the text transformer. + transformer_heads (int): The number of attention heads in the + text transformer. + transformer_layers (int): The number of layers in the text transformer. + fineturn (bool): Whether to fineturn the model. + average_target (bool): Whether to average the target. + """ + + def __init__( + self, + embed_dim: int, + image_resolution: int, + vision_layers: Union[Tuple[int, int, int, int], int], + vision_width: int, + vision_patch_size: int, + context_length: int, + vocab_size: int, + transformer_width: int, + transformer_heads: int, + transformer_layers: int, + finetune: bool = False, + average_targets: int = 1, + ) -> None: + super().__init__() + + self.context_length = context_length + + vision_heads = vision_width // 64 + self.visual = VisionTransformer( + input_resolution=image_resolution, + patch_size=vision_patch_size, + width=vision_width, + layers=vision_layers, + heads=vision_heads, + output_dim=embed_dim, + finetune=finetune, + average_targets=average_targets, + ) + + self.transformer = Transformer( + width=transformer_width, + layers=transformer_layers, + heads=transformer_heads, + attn_mask=self.build_attention_mask()) + + self.vocab_size = vocab_size + self.token_embedding = nn.Embedding(vocab_size, transformer_width) + self.positional_embedding = nn.Parameter( + torch.empty(self.context_length, transformer_width)) + self.ln_final = LayerNorm(transformer_width) + + self.text_projection = nn.Parameter( + torch.empty(transformer_width, embed_dim)) + self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) + + self.initialize_parameters() + + def initialize_parameters(self) -> None: + """Initialize the parameters. + + The pretrained weight will override the initialized parameters by this + function. + """ + nn.init.normal_(self.token_embedding.weight, std=0.02) + nn.init.normal_(self.positional_embedding, std=0.01) + + proj_std = (self.transformer.width**-0.5) * ( + (2 * self.transformer.layers)**-0.5) + attn_std = self.transformer.width**-0.5 + fc_std = (2 * self.transformer.width)**-0.5 + for block in self.transformer.resblocks: + nn.init.normal_(block.attn.in_proj_weight, std=attn_std) + nn.init.normal_(block.attn.out_proj.weight, std=proj_std) + nn.init.normal_(block.mlp.c_fc.weight, std=fc_std) + nn.init.normal_(block.mlp.c_proj.weight, std=proj_std) + + if self.text_projection is not None: + nn.init.normal_( + self.text_projection, std=self.transformer.width**-0.5) + + def build_attention_mask(self) -> torch.Tensor: + """Build the attention mask.""" + # lazily create causal attention mask, with full attention between the + # vision tokens pytorch uses additive attention mask; fill with -inf + mask = torch.empty(self.context_length, self.context_length) + mask.fill_(float('-inf')) + mask.triu_(1) # zero out the lower diagonal + return mask + + @property + def dtype(self) -> torch.dtype: + """Get the dtype.""" + return self.visual.conv1.weight.dtype + + def encode_image(self, + image: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """Encode the image. + + Get the feature and attention mask from the last layer of the visual + branch of CLIP. + + Args: + image (torch.Tensor): The image tensor with shape NCHW. + + Returns: + Tuple[torch.Tensor, torch.Tensor]: The feature and attention mask. + """ + return self.visual(image.type(self.dtype)) + + +def build_clip_model(state_dict: dict, + finetune: bool = False, + average_targets: int = 1) -> nn.Module: + """Build the CLIP model. + + Args: + state_dict (dict): The pretrained state dict. + finetune (bool): Whether to fineturn the model. + average_targets (bool): Whether to average the target. + + Returns: + nn.Module: The CLIP model. + """ + vit = 'visual.proj' in state_dict + + if vit: + vision_width = state_dict['visual.conv1.weight'].shape[0] + vision_layers = len([ + k for k in state_dict.keys() + if k.startswith('visual.') and k.endswith('.attn.in_proj_weight') + ]) + vision_patch_size = state_dict['visual.conv1.weight'].shape[-1] + grid_size = round( + (state_dict['visual.positional_embedding'].shape[0] - 1)**0.5) + image_resolution = vision_patch_size * grid_size + + embed_dim = state_dict['text_projection'].shape[1] + context_length = state_dict['positional_embedding'].shape[0] + vocab_size = state_dict['token_embedding.weight'].shape[0] + transformer_width = state_dict['ln_final.weight'].shape[0] + transformer_heads = transformer_width // 64 + transformer_layers = len( + set( + k.split('.')[2] for k in state_dict + if k.startswith('transformer.resblocks'))) + + model = CLIP( + embed_dim, + image_resolution, + vision_layers, + vision_width, + vision_patch_size, + context_length, + vocab_size, + transformer_width, + transformer_heads, + transformer_layers, + finetune, + average_targets, + ) + + for key in ['input_resolution', 'context_length', 'vocab_size']: + if key in state_dict: + del state_dict[key] + + msg = model.load_state_dict(state_dict, strict=False) + MMLogger.get_current_instance().info(f'Load CLIP model: {msg}') + return model.eval() diff --git a/mmpretrain/models/utils/data_preprocessor.py b/mmpretrain/models/utils/data_preprocessor.py new file mode 100644 index 0000000..c407bd4 --- /dev/null +++ b/mmpretrain/models/utils/data_preprocessor.py @@ -0,0 +1,620 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from numbers import Number +from typing import List, Optional, Sequence, Tuple, Union + +import torch +import torch.nn.functional as F +from mmengine.model import (BaseDataPreprocessor, ImgDataPreprocessor, + stack_batch) + +from mmpretrain.registry import MODELS +from mmpretrain.structures import (DataSample, MultiTaskDataSample, + batch_label_to_onehot, cat_batch_labels, + tensor_split) +from .batch_augments import RandomBatchAugment + + +@MODELS.register_module() +class ClsDataPreprocessor(BaseDataPreprocessor): + """Image pre-processor for classification tasks. + + Comparing with the :class:`mmengine.model.ImgDataPreprocessor`, + + 1. It won't do normalization if ``mean`` is not specified. + 2. It does normalization and color space conversion after stacking batch. + 3. It supports batch augmentations like mixup and cutmix. + + It provides the data pre-processing as follows + + - Collate and move data to the target device. + - Pad inputs to the maximum size of current batch with defined + ``pad_value``. The padding size can be divisible by a defined + ``pad_size_divisor`` + - Stack inputs to batch_inputs. + - Convert inputs from bgr to rgb if the shape of input is (3, H, W). + - Normalize image with defined std and mean. + - Do batch augmentations like Mixup and Cutmix during training. + + Args: + mean (Sequence[Number], optional): The pixel mean of R, G, B channels. + Defaults to None. + std (Sequence[Number], optional): The pixel standard deviation of + R, G, B channels. Defaults to None. + pad_size_divisor (int): The size of padded image should be + divisible by ``pad_size_divisor``. Defaults to 1. + pad_value (Number): The padded pixel value. Defaults to 0. + to_rgb (bool): whether to convert image from BGR to RGB. + Defaults to False. + to_onehot (bool): Whether to generate one-hot format gt-labels and set + to data samples. Defaults to False. + num_classes (int, optional): The number of classes. Defaults to None. + batch_augments (dict, optional): The batch augmentations settings, + including "augments" and "probs". For more details, see + :class:`mmpretrain.models.RandomBatchAugment`. + """ + + def __init__(self, + mean: Sequence[Number] = None, + std: Sequence[Number] = None, + pad_size_divisor: int = 1, + pad_value: Number = 0, + to_rgb: bool = False, + to_onehot: bool = False, + num_classes: Optional[int] = None, + batch_augments: Optional[dict] = None): + super().__init__() + self.pad_size_divisor = pad_size_divisor + self.pad_value = pad_value + self.to_rgb = to_rgb + self.to_onehot = to_onehot + self.num_classes = num_classes + + if mean is not None: + assert std is not None, 'To enable the normalization in ' \ + 'preprocessing, please specify both `mean` and `std`.' + # Enable the normalization in preprocessing. + self._enable_normalize = True + self.register_buffer('mean', + torch.tensor(mean).view(-1, 1, 1), False) + self.register_buffer('std', + torch.tensor(std).view(-1, 1, 1), False) + else: + self._enable_normalize = False + + if batch_augments: + self.batch_augments = RandomBatchAugment(**batch_augments) + if not self.to_onehot: + from mmengine.logging import MMLogger + MMLogger.get_current_instance().info( + 'Because batch augmentations are enabled, the data ' + 'preprocessor automatically enables the `to_onehot` ' + 'option to generate one-hot format labels.') + self.to_onehot = True + else: + self.batch_augments = None + + def forward(self, data: dict, training: bool = False) -> dict: + """Perform normalization, padding, bgr2rgb conversion and batch + augmentation based on ``BaseDataPreprocessor``. + + Args: + data (dict): data sampled from dataloader. + training (bool): Whether to enable training time augmentation. + + Returns: + dict: Data in the same format as the model input. + """ + inputs = self.cast_data(data['inputs']) + + if isinstance(inputs, torch.Tensor): + # The branch if use `default_collate` as the collate_fn in the + # dataloader. + + # ------ To RGB ------ + if self.to_rgb and inputs.size(1) == 3: + inputs = inputs.flip(1) + + # -- Normalization --- + inputs = inputs.float() + if self._enable_normalize: + inputs = (inputs - self.mean) / self.std + + # ------ Padding ----- + if self.pad_size_divisor > 1: + h, w = inputs.shape[-2:] + + target_h = math.ceil( + h / self.pad_size_divisor) * self.pad_size_divisor + target_w = math.ceil( + w / self.pad_size_divisor) * self.pad_size_divisor + pad_h = target_h - h + pad_w = target_w - w + inputs = F.pad(inputs, (0, pad_w, 0, pad_h), 'constant', + self.pad_value) + else: + # The branch if use `pseudo_collate` as the collate_fn in the + # dataloader. + + processed_inputs = [] + for input_ in inputs: + # ------ To RGB ------ + if self.to_rgb and input_.size(0) == 3: + input_ = input_.flip(0) + + # -- Normalization --- + input_ = input_.float() + if self._enable_normalize: + input_ = (input_ - self.mean) / self.std + + processed_inputs.append(input_) + # Combine padding and stack + inputs = stack_batch(processed_inputs, self.pad_size_divisor, + self.pad_value) + + data_samples = data.get('data_samples', None) + sample_item = data_samples[0] if data_samples is not None else None + + if isinstance(sample_item, DataSample): + batch_label = None + batch_score = None + + if 'gt_label' in sample_item: + gt_labels = [sample.gt_label for sample in data_samples] + batch_label, label_indices = cat_batch_labels(gt_labels) + batch_label = batch_label.to(self.device) + if 'gt_score' in sample_item: + gt_scores = [sample.gt_score for sample in data_samples] + batch_score = torch.stack(gt_scores).to(self.device) + elif self.to_onehot and 'gt_label' in sample_item: + assert batch_label is not None, \ + 'Cannot generate onehot format labels because no labels.' + num_classes = self.num_classes or sample_item.get( + 'num_classes') + assert num_classes is not None, \ + 'Cannot generate one-hot format labels because not set ' \ + '`num_classes` in `data_preprocessor`.' + batch_score = batch_label_to_onehot( + batch_label, label_indices, num_classes).to(self.device) + + # ----- Batch Augmentations ---- + if (training and self.batch_augments is not None + and batch_score is not None): + inputs, batch_score = self.batch_augments(inputs, batch_score) + + # ----- scatter labels and scores to data samples --- + if batch_label is not None: + for sample, label in zip( + data_samples, tensor_split(batch_label, + label_indices)): + sample.set_gt_label(label) + if batch_score is not None: + for sample, score in zip(data_samples, batch_score): + sample.set_gt_score(score) + elif isinstance(sample_item, MultiTaskDataSample): + data_samples = self.cast_data(data_samples) + + return {'inputs': inputs, 'data_samples': data_samples} + + +@MODELS.register_module() +class SelfSupDataPreprocessor(ImgDataPreprocessor): + """Image pre-processor for operations, like normalization and bgr to rgb. + + Compared with the :class:`mmengine.ImgDataPreprocessor`, this module + supports ``inputs`` as torch.Tensor or a list of torch.Tensor. + """ + + def __init__(self, + mean: Optional[Sequence[Union[float, int]]] = None, + std: Optional[Sequence[Union[float, int]]] = None, + pad_size_divisor: int = 1, + pad_value: Union[float, int] = 0, + to_rgb: bool = False, + bgr_to_rgb: bool = False, + rgb_to_bgr: bool = False, + non_blocking: Optional[bool] = False): + super().__init__( + mean=mean, + std=std, + pad_size_divisor=pad_size_divisor, + pad_value=pad_value, + bgr_to_rgb=bgr_to_rgb, + rgb_to_bgr=rgb_to_bgr, + non_blocking=non_blocking) + + self._channel_conversion = to_rgb or bgr_to_rgb or rgb_to_bgr + + def forward( + self, + data: dict, + training: bool = False + ) -> Tuple[List[torch.Tensor], Optional[list]]: + """Performs normalization and bgr2rgb conversion based on + ``BaseDataPreprocessor``. + + Args: + data (dict): data sampled from dataloader. + training (bool): Whether to enable training time augmentation. If + subclasses override this method, they can perform different + preprocessing strategies for training and testing based on the + value of ``training``. + Returns: + Tuple[torch.Tensor, Optional[list]]: Data in the same format as the + model input. + """ + assert isinstance(data, + dict), 'Please use default_collate in dataloader, \ + instead of pseudo_collate.' + + data = [val for _, val in data.items()] + batch_inputs, batch_data_samples = self.cast_data(data) + + # Here is what is different from :class:`mmengine.ImgDataPreprocessor` + # Since there are multiple views for an image for some algorithms, + # e.g. SimCLR, each item in inputs is a list, containing multi-views + # for an image. + if isinstance(batch_inputs, list): + # channel transform + if self._channel_conversion: + batch_inputs = [ + _input[:, [2, 1, 0], ...] for _input in batch_inputs + ] + + # convert to float after channel conversion to ensure efficiency + batch_inputs = [_input.float() for _input in batch_inputs] + + # normalization. + if self._enable_normalize: + batch_inputs = [(_input - self.mean) / self.std + for _input in batch_inputs] + else: + # channel transform + if self._channel_conversion: + batch_inputs = batch_inputs[:, [2, 1, 0], ...] + + # convert to float after channel conversion to ensure efficiency + batch_inputs = batch_inputs.float() + + # normalization. + if self._enable_normalize: + batch_inputs = (batch_inputs - self.mean) / self.std + + return {'inputs': batch_inputs, 'data_samples': batch_data_samples} + + +@MODELS.register_module() +class TwoNormDataPreprocessor(SelfSupDataPreprocessor): + """Image pre-processor for CAE, BEiT v1/v2, etc. + + Compared with the :class:`mmselfsup.SelfSupDataPreprocessor`, this module + will normalize the prediction image and target image with different + normalization parameters. + + Args: + mean (Sequence[float or int], optional): The pixel mean of image + channels. If ``to_rgb=True`` it means the mean value of R, G, B + channels. If the length of `mean` is 1, it means all channels have + the same mean value, or the input is a gray image. If it is not + specified, images will not be normalized. Defaults to None. + std (Sequence[float or int], optional): The pixel standard deviation of + image channels. If ``to_rgb=True`` it means the standard deviation + of R, G, B channels. If the length of `std` is 1, it means all + channels have the same standard deviation, or the input is a gray + image. If it is not specified, images will not be normalized. + Defaults to None. + second_mean (Sequence[float or int], optional): The description is + like ``mean``, it can be customized for targe image. Defaults to + None. + second_std (Sequence[float or int], optional): The description is + like ``std``, it can be customized for targe image. Defaults to + None. + pad_size_divisor (int): The size of padded image should be + divisible by ``pad_size_divisor``. Defaults to 1. + pad_value (float or int): The padded pixel value. Defaults to 0. + to_rgb (bool): whether to convert image from BGR to RGB. + Defaults to False. + non_blocking (bool): Whether block current process when transferring + data to device. Defaults to False. + """ + + def __init__(self, + mean: Optional[Sequence[Union[float, int]]] = None, + std: Optional[Sequence[Union[float, int]]] = None, + second_mean: Sequence[Union[float, int]] = None, + second_std: Sequence[Union[float, int]] = None, + pad_size_divisor: int = 1, + pad_value: Union[float, int] = 0, + to_rgb: bool = False, + non_blocking: Optional[bool] = False): + super().__init__( + mean=mean, + std=std, + pad_size_divisor=pad_size_divisor, + pad_value=pad_value, + to_rgb=to_rgb, + non_blocking=non_blocking) + assert (second_mean is not None) and (second_std is not None), ( + 'mean and std should not be None while using ' + '`TwoNormDataPreprocessor`') + assert len(second_mean) == 3 or len(second_mean) == 1, ( + '`mean` should have 1 or 3 values, to be compatible with ' + f'RGB or gray image, but got {len(second_mean)} values') + assert len(second_std) == 3 or len(second_std) == 1, ( + '`std` should have 1 or 3 values, to be compatible with RGB ' + f'or gray image, but got {len(std)} values') + + self.register_buffer('second_mean', + torch.tensor(second_mean).view(-1, 1, 1), False) + self.register_buffer('second_std', + torch.tensor(second_std).view(-1, 1, 1), False) + + def forward( + self, + data: dict, + training: bool = False + ) -> Tuple[List[torch.Tensor], Optional[list]]: + """Performs normalization and bgr2rgb conversion based on + ``BaseDataPreprocessor``. The ``batch_inputs`` in forward function is a + list. + + Args: + data (dict): data sampled from dataloader. + training (bool): Whether to enable training time augmentation. If + subclasses override this method, they can perform different + preprocessing strategies for training and testing based on the + value of ``training``. + Returns: + Tuple[torch.Tensor, Optional[list]]: Data in the same format as the + model input. + """ + data = [val for _, val in data.items()] + batch_inputs, batch_data_samples = self.cast_data(data) + # channel transform + if self._channel_conversion: + batch_inputs = [ + _input[:, [2, 1, 0], ...] for _input in batch_inputs + ] + + # convert to float after channel conversion to ensure efficiency + batch_inputs = [_input.float() for _input in batch_inputs] + + # Normalization. Here is what is different from + # :class:`mmselfsup.SelfSupDataPreprocessor`. Normalize the target + # image and prediction image with different normalization params + if self._enable_normalize: + batch_inputs = [ + (batch_inputs[0] - self.mean) / self.std, + (batch_inputs[1] - self.second_mean) / self.second_std + ] + + return {'inputs': batch_inputs, 'data_samples': batch_data_samples} + + +@MODELS.register_module() +class VideoDataPreprocessor(BaseDataPreprocessor): + """Video pre-processor for operations, like normalization and bgr to rgb + conversion . + + Compared with the :class:`mmaction.ActionDataPreprocessor`, this module + supports ``inputs`` as torch.Tensor or a list of torch.Tensor. + + Args: + mean (Sequence[float or int, optional): The pixel mean of channels + of images or stacked optical flow. Defaults to None. + std (Sequence[float or int], optional): The pixel standard deviation + of channels of images or stacked optical flow. Defaults to None. + pad_size_divisor (int): The size of padded image should be + divisible by ``pad_size_divisor``. Defaults to 1. + pad_value (float or int): The padded pixel value. Defaults to 0. + to_rgb (bool): Whether to convert image from BGR to RGB. + Defaults to False. + format_shape (str): Format shape of input data. + Defaults to ``'NCHW'``. + """ + + def __init__(self, + mean: Optional[Sequence[Union[float, int]]] = None, + std: Optional[Sequence[Union[float, int]]] = None, + pad_size_divisor: int = 1, + pad_value: Union[float, int] = 0, + to_rgb: bool = False, + format_shape: str = 'NCHW') -> None: + super().__init__() + self.pad_size_divisor = pad_size_divisor + self.pad_value = pad_value + self.to_rgb = to_rgb + self.format_shape = format_shape + + if mean is not None: + assert std is not None, 'To enable the normalization in ' \ + 'preprocessing, please specify both ' \ + '`mean` and `std`.' + # Enable the normalization in preprocessing. + self._enable_normalize = True + if self.format_shape == 'NCHW': + normalizer_shape = (-1, 1, 1) + elif self.format_shape == 'NCTHW': + normalizer_shape = (-1, 1, 1, 1) + else: + raise ValueError(f'Invalid format shape: {format_shape}') + + self.register_buffer( + 'mean', + torch.tensor(mean, dtype=torch.float32).view(normalizer_shape), + False) + self.register_buffer( + 'std', + torch.tensor(std, dtype=torch.float32).view(normalizer_shape), + False) + else: + self._enable_normalize = False + + def forward( + self, + data: dict, + training: bool = False + ) -> Tuple[List[torch.Tensor], Optional[list]]: + """Performs normalization、padding and bgr2rgb conversion based on + ``BaseDataPreprocessor``. + + Args: + data (dict): data sampled from dataloader. + training (bool): Whether to enable training time augmentation. If + subclasses override this method, they can perform different + preprocessing strategies for training and testing based on the + value of ``training``. + Returns: + Tuple[List[torch.Tensor], Optional[list]]: Data in the same format + as the model input. + """ + + data = [val for _, val in data.items()] + batch_inputs, batch_data_samples = self.cast_data(data) + + if isinstance(batch_inputs, list): + # channel transform + if self.to_rgb: + if self.format_shape == 'NCHW': + batch_inputs = [ + _input[..., [2, 1, 0], :, :] for _input in batch_inputs + ] + elif self.format_shape == 'NCTHW': + batch_inputs = [ + _input[..., [2, 1, 0], :, :, :] + for _input in batch_inputs + ] + else: + raise ValueError( + f'Invalid format shape: {self.format_shape}') + + # convert to float after channel conversion to ensure efficiency + batch_inputs = [_input.float() for _input in batch_inputs] + + # normalization + if self._enable_normalize: + batch_inputs = [(_input - self.mean) / self.std + for _input in batch_inputs] + + else: + # channel transform + if self.to_rgb: + if self.format_shape == 'NCHW': + batch_inputs = batch_inputs[..., [2, 1, 0], :, :] + elif self.format_shape == 'NCTHW': + batch_inputs = batch_inputs[..., [2, 1, 0], :, :, :] + else: + raise ValueError( + f'Invalid format shape: {self.format_shape}') + + # convert to float after channel conversion to ensure efficiency + batch_inputs = batch_inputs.float() + + # normalization + if self._enable_normalize: + batch_inputs = (batch_inputs - self.mean) / self.std + + return {'inputs': batch_inputs, 'data_samples': batch_data_samples} + + +@MODELS.register_module() +class MultiModalDataPreprocessor(BaseDataPreprocessor): + """Data pre-processor for image-text multimodality tasks. + + It provides the data pre-processing as follows + + - Collate and move data to the target device. + - Pad inputs to the maximum size of current batch with defined + ``pad_value``. The padding size can be divisible by a defined + ``pad_size_divisor`` + - Stack inputs to batch_inputs. + - Convert inputs from bgr to rgb if the shape of input is (3, H, W). + - Normalize image with defined std and mean. + + Args: + mean (Sequence[Number], optional): The pixel mean of R, G, B channels. + Defaults to None. + std (Sequence[Number], optional): The pixel standard deviation of + R, G, B channels. Defaults to None. + pad_size_divisor (int): The size of padded image should be + divisible by ``pad_size_divisor``. Defaults to 1. + pad_value (Number): The padded pixel value. Defaults to 0. + to_rgb (bool): whether to convert image from BGR to RGB. + Defaults to False. + """ + + def __init__( + self, + mean: Sequence[Number] = None, + std: Sequence[Number] = None, + pad_size_divisor: int = 1, + pad_value: Number = 0, + to_rgb: bool = False, + ): + super().__init__() + self.pad_size_divisor = pad_size_divisor + self.pad_value = pad_value + self.to_rgb = to_rgb + + if mean is not None: + assert std is not None, 'To enable the normalization in ' \ + 'preprocessing, please specify both `mean` and `std`.' + # Enable the normalization in preprocessing. + self._enable_normalize = True + self.register_buffer('mean', + torch.tensor(mean).view(-1, 1, 1), False) + self.register_buffer('std', + torch.tensor(std).view(-1, 1, 1), False) + else: + self._enable_normalize = False + + def forward(self, data: dict, training: bool = False) -> dict: + """Perform normalization, padding, bgr2rgb conversion and batch + augmentation based on ``BaseDataPreprocessor``. + + Args: + data (dict): data sampled from dataloader. + training (bool): Whether to enable training time augmentation. + + Returns: + dict: Data in the same format as the model input. + """ + data = self.cast_data(data) + + imgs = data.get('inputs', None) + + def _process_img(img): + # ------ To RGB ------ + if self.to_rgb and img.size(1) == 3: + img = img.flip(1) + + # -- Normalization --- + img = img.float() + if self._enable_normalize: + img = (img - self.mean) / self.std + + # ------ Padding ----- + if self.pad_size_divisor > 1: + h, w = img.shape[-2:] + + target_h = math.ceil( + h / self.pad_size_divisor) * self.pad_size_divisor + target_w = math.ceil( + w / self.pad_size_divisor) * self.pad_size_divisor + pad_h = target_h - h + pad_w = target_w - w + img = F.pad(img, (0, pad_w, 0, pad_h), 'constant', + self.pad_value) + return img + + if isinstance(imgs, torch.Tensor): + imgs = _process_img(imgs) + elif isinstance(imgs, Sequence): + # B, T, C, H, W + imgs = torch.stack([_process_img(img) for img in imgs], dim=1) + elif imgs is not None: + raise ValueError(f'{type(imgs)} is not supported for imgs inputs.') + + data_samples = data.get('data_samples', None) + + return {'images': imgs, 'data_samples': data_samples} diff --git a/mmpretrain/models/utils/ema.py b/mmpretrain/models/utils/ema.py new file mode 100644 index 0000000..63c5006 --- /dev/null +++ b/mmpretrain/models/utils/ema.py @@ -0,0 +1,87 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from math import cos, pi +from typing import Optional + +import torch +import torch.nn as nn +from mmengine.logging import MessageHub +from mmengine.model import ExponentialMovingAverage + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class CosineEMA(ExponentialMovingAverage): + r"""CosineEMA is implemented for updating momentum parameter, used in BYOL, + MoCoV3, etc. + + All parameters are updated by the formula as below: + + .. math:: + + X'_{t+1} = (1 - m) * X'_t + m * X_t + + Where :math:`m` the the momentum parameter. And it's updated with cosine + annealing, including momentum adjustment following: + + .. math:: + m = m_{end} + (m_{end} - m_{start}) * (\cos\frac{k\pi}{K} + 1) / 2 + + where :math:`k` is the current step, :math:`K` is the total steps. + + .. note:: + This :attr:`momentum` argument is different from one used in optimizer + classes and the conventional notion of momentum. Mathematically, + :math:`X'_{t}` is the moving average and :math:`X_t` is the new + observed value. The value of momentum is usually a small number, + allowing observed values to slowly update the ema parameters. See also + :external:py:class:`torch.nn.BatchNorm2d`. + + Args: + model (nn.Module): The model to be averaged. + momentum (float): The start momentum value. Defaults to 0.004. + end_momentum (float): The end momentum value for cosine annealing. + Defaults to 0. + interval (int): Interval between two updates. Defaults to 1. + device (torch.device, optional): If provided, the averaged model will + be stored on the :attr:`device`. Defaults to None. + update_buffers (bool): if True, it will compute running averages for + both the parameters and the buffers of the model. Defaults to + False. + """ + + def __init__(self, + model: nn.Module, + momentum: float = 0.004, + end_momentum: float = 0., + interval: int = 1, + device: Optional[torch.device] = None, + update_buffers: bool = False) -> None: + super().__init__( + model=model, + momentum=momentum, + interval=interval, + device=device, + update_buffers=update_buffers) + self.end_momentum = end_momentum + + def avg_func(self, averaged_param: torch.Tensor, + source_param: torch.Tensor, steps: int) -> None: + """Compute the moving average of the parameters using the cosine + momentum strategy. + + Args: + averaged_param (Tensor): The averaged parameters. + source_param (Tensor): The source parameters. + steps (int): The number of times the parameters have been + updated. + + Returns: + Tensor: The averaged parameters. + """ + message_hub = MessageHub.get_current_instance() + max_iters = message_hub.get_info('max_iters') + cosine_annealing = (cos(pi * steps / float(max_iters)) + 1) / 2 + momentum = self.end_momentum - (self.end_momentum - + self.momentum) * cosine_annealing + averaged_param.mul_(1 - momentum).add_(source_param, alpha=momentum) diff --git a/mmpretrain/models/utils/embed.py b/mmpretrain/models/utils/embed.py new file mode 100644 index 0000000..8299f9a --- /dev/null +++ b/mmpretrain/models/utils/embed.py @@ -0,0 +1,423 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from typing import Sequence + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmcv.cnn.bricks.transformer import AdaptivePadding +from mmengine.model import BaseModule + +from .helpers import to_2tuple + + +def resize_pos_embed(pos_embed, + src_shape, + dst_shape, + mode='bicubic', + num_extra_tokens=1): + """Resize pos_embed weights. + + Args: + pos_embed (torch.Tensor): Position embedding weights with shape + [1, L, C]. + src_shape (tuple): The resolution of downsampled origin training + image, in format (H, W). + dst_shape (tuple): The resolution of downsampled new training + image, in format (H, W). + mode (str): Algorithm used for upsampling. Choose one from 'nearest', + 'linear', 'bilinear', 'bicubic' and 'trilinear'. + Defaults to 'bicubic'. + num_extra_tokens (int): The number of extra tokens, such as cls_token. + Defaults to 1. + + Returns: + torch.Tensor: The resized pos_embed of shape [1, L_new, C] + """ + if src_shape[0] == dst_shape[0] and src_shape[1] == dst_shape[1]: + return pos_embed + assert pos_embed.ndim == 3, 'shape of pos_embed must be [1, L, C]' + _, L, C = pos_embed.shape + src_h, src_w = src_shape + assert L == src_h * src_w + num_extra_tokens, \ + f"The length of `pos_embed` ({L}) doesn't match the expected " \ + f'shape ({src_h}*{src_w}+{num_extra_tokens}). Please check the' \ + '`img_size` argument.' + extra_tokens = pos_embed[:, :num_extra_tokens] + + src_weight = pos_embed[:, num_extra_tokens:] + src_weight = src_weight.reshape(1, src_h, src_w, C).permute(0, 3, 1, 2) + + # The cubic interpolate algorithm only accepts float32 + dst_weight = F.interpolate( + src_weight.float(), size=dst_shape, align_corners=False, mode=mode) + dst_weight = torch.flatten(dst_weight, 2).transpose(1, 2) + dst_weight = dst_weight.to(src_weight.dtype) + + return torch.cat((extra_tokens, dst_weight), dim=1) + + +def resize_relative_position_bias_table(src_shape, dst_shape, table, num_head): + """Resize relative position bias table. + + Args: + src_shape (int): The resolution of downsampled origin training + image, in format (H, W). + dst_shape (int): The resolution of downsampled new training + image, in format (H, W). + table (tensor): The relative position bias of the pretrained model. + num_head (int): Number of attention heads. + + Returns: + torch.Tensor: The resized relative position bias table. + """ + from scipy import interpolate + + def geometric_progression(a, r, n): + return a * (1.0 - r**n) / (1.0 - r) + + left, right = 1.01, 1.5 + while right - left > 1e-6: + q = (left + right) / 2.0 + gp = geometric_progression(1, q, src_shape // 2) + if gp > dst_shape // 2: + right = q + else: + left = q + + dis = [] + cur = 1 + for i in range(src_shape // 2): + dis.append(cur) + cur += q**(i + 1) + + r_ids = [-_ for _ in reversed(dis)] + + x = r_ids + [0] + dis + y = r_ids + [0] + dis + + t = dst_shape // 2.0 + dx = np.arange(-t, t + 0.1, 1.0) + dy = np.arange(-t, t + 0.1, 1.0) + + all_rel_pos_bias = [] + + for i in range(num_head): + z = table[:, i].view(src_shape, src_shape).float().numpy() + f_cubic = interpolate.interp2d(x, y, z, kind='cubic') + all_rel_pos_bias.append( + torch.Tensor(f_cubic(dx, + dy)).contiguous().view(-1, + 1).to(table.device)) + new_rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1) + return new_rel_pos_bias + + +class PatchEmbed(BaseModule): + """Image to Patch Embedding. + + We use a conv layer to implement PatchEmbed. + + Args: + img_size (int | tuple): The size of input image. Default: 224 + in_channels (int): The num of input channels. Default: 3 + embed_dims (int): The dimensions of embedding. Default: 768 + norm_cfg (dict, optional): Config dict for normalization layer. + Default: None + conv_cfg (dict, optional): The config dict for conv layers. + Default: None + init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization. + Default: None + """ + + def __init__(self, + img_size=224, + in_channels=3, + embed_dims=768, + norm_cfg=None, + conv_cfg=None, + init_cfg=None): + super(PatchEmbed, self).__init__(init_cfg) + warnings.warn('The `PatchEmbed` in mmpretrain will be deprecated. ' + 'Please use `mmcv.cnn.bricks.transformer.PatchEmbed`. ' + "It's more general and supports dynamic input shape") + + if isinstance(img_size, int): + img_size = to_2tuple(img_size) + elif isinstance(img_size, tuple): + if len(img_size) == 1: + img_size = to_2tuple(img_size[0]) + assert len(img_size) == 2, \ + f'The size of image should have length 1 or 2, ' \ + f'but got {len(img_size)}' + + self.img_size = img_size + self.embed_dims = embed_dims + + # Use conv layer to embed + conv_cfg = conv_cfg or dict() + _conv_cfg = dict( + type='Conv2d', kernel_size=16, stride=16, padding=0, dilation=1) + _conv_cfg.update(conv_cfg) + self.projection = build_conv_layer(_conv_cfg, in_channels, embed_dims) + + # Calculate how many patches a input image is splited to. + h_out, w_out = [(self.img_size[i] + 2 * self.projection.padding[i] - + self.projection.dilation[i] * + (self.projection.kernel_size[i] - 1) - 1) // + self.projection.stride[i] + 1 for i in range(2)] + + self.patches_resolution = (h_out, w_out) + self.num_patches = h_out * w_out + + if norm_cfg is not None: + self.norm = build_norm_layer(norm_cfg, embed_dims)[1] + else: + self.norm = None + + def forward(self, x): + B, C, H, W = x.shape + assert H == self.img_size[0] and W == self.img_size[1], \ + f"Input image size ({H}*{W}) doesn't " \ + f'match model ({self.img_size[0]}*{self.img_size[1]}).' + # The output size is (B, N, D), where N=H*W/P/P, D is embid_dim + x = self.projection(x).flatten(2).transpose(1, 2) + + if self.norm is not None: + x = self.norm(x) + + return x + + +# Modified from pytorch-image-models +class HybridEmbed(BaseModule): + """CNN Feature Map Embedding. + + Extract feature map from CNN, flatten, + project to embedding dim. + + Args: + backbone (nn.Module): CNN backbone + img_size (int | tuple): The size of input image. Default: 224 + feature_size (int | tuple, optional): Size of feature map extracted by + CNN backbone. Default: None + in_channels (int): The num of input channels. Default: 3 + embed_dims (int): The dimensions of embedding. Default: 768 + conv_cfg (dict, optional): The config dict for conv layers. + Default: None. + init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization. + Default: None. + """ + + def __init__(self, + backbone, + img_size=224, + feature_size=None, + in_channels=3, + embed_dims=768, + conv_cfg=None, + init_cfg=None): + super(HybridEmbed, self).__init__(init_cfg) + assert isinstance(backbone, nn.Module) + if isinstance(img_size, int): + img_size = to_2tuple(img_size) + elif isinstance(img_size, tuple): + if len(img_size) == 1: + img_size = to_2tuple(img_size[0]) + assert len(img_size) == 2, \ + f'The size of image should have length 1 or 2, ' \ + f'but got {len(img_size)}' + + self.img_size = img_size + self.backbone = backbone + if feature_size is None: + with torch.no_grad(): + # FIXME this is hacky, but most reliable way of + # determining the exact dim of the output feature + # map for all networks, the feature metadata has + # reliable channel and stride info, but using + # stride to calc feature dim requires info about padding of + # each stage that isn't captured. + training = backbone.training + if training: + backbone.eval() + o = self.backbone( + torch.zeros(1, in_channels, img_size[0], img_size[1])) + if isinstance(o, (list, tuple)): + # last feature if backbone outputs list/tuple of features + o = o[-1] + feature_size = o.shape[-2:] + feature_dim = o.shape[1] + backbone.train(training) + else: + feature_size = to_2tuple(feature_size) + if hasattr(self.backbone, 'feature_info'): + feature_dim = self.backbone.feature_info.channels()[-1] + else: + feature_dim = self.backbone.num_features + self.num_patches = feature_size[0] * feature_size[1] + + # Use conv layer to embed + conv_cfg = conv_cfg or dict() + _conv_cfg = dict( + type='Conv2d', kernel_size=1, stride=1, padding=0, dilation=1) + _conv_cfg.update(conv_cfg) + self.projection = build_conv_layer(_conv_cfg, feature_dim, embed_dims) + + def forward(self, x): + x = self.backbone(x) + if isinstance(x, (list, tuple)): + # last feature if backbone outputs list/tuple of features + x = x[-1] + x = self.projection(x).flatten(2).transpose(1, 2) + return x + + +class PatchMerging(BaseModule): + """Merge patch feature map. + + Modified from mmcv, and this module supports specifying whether to use + post-norm. + + This layer groups feature map by kernel_size, and applies norm and linear + layers to the grouped feature map ((used in Swin Transformer)). Our + implementation uses :class:`torch.nn.Unfold` to merge patches, which is + about 25% faster than the original implementation. However, we need to + modify pretrained models for compatibility. + + Args: + in_channels (int): The num of input channels. To gets fully covered + by filter and stride you specified. + out_channels (int): The num of output channels. + kernel_size (int | tuple, optional): the kernel size in the unfold + layer. Defaults to 2. + stride (int | tuple, optional): the stride of the sliding blocks in the + unfold layer. Defaults to None, which means to be set as + ``kernel_size``. + padding (int | tuple | string ): The padding length of + embedding conv. When it is a string, it means the mode + of adaptive padding, support "same" and "corner" now. + Defaults to "corner". + dilation (int | tuple, optional): dilation parameter in the unfold + layer. Defaults to 1. + bias (bool, optional): Whether to add bias in linear layer or not. + Defaults to False. + norm_cfg (dict, optional): Config dict for normalization layer. + Defaults to ``dict(type='LN')``. + use_post_norm (bool): Whether to use post normalization here. + Defaults to False. + init_cfg (dict, optional): The extra config for initialization. + Defaults to None. + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size=2, + stride=None, + padding='corner', + dilation=1, + bias=False, + norm_cfg=dict(type='LN'), + use_post_norm=False, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.out_channels = out_channels + self.use_post_norm = use_post_norm + + if stride: + stride = stride + else: + stride = kernel_size + + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + dilation = to_2tuple(dilation) + + if isinstance(padding, str): + self.adaptive_padding = AdaptivePadding( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + padding=padding) + # disable the padding of unfold + padding = 0 + else: + self.adaptive_padding = None + + padding = to_2tuple(padding) + self.sampler = nn.Unfold( + kernel_size=kernel_size, + dilation=dilation, + padding=padding, + stride=stride) + + sample_dim = kernel_size[0] * kernel_size[1] * in_channels + + self.reduction = nn.Linear(sample_dim, out_channels, bias=bias) + + if norm_cfg is not None: + # build pre or post norm layer based on different channels + if self.use_post_norm: + self.norm = build_norm_layer(norm_cfg, out_channels)[1] + else: + self.norm = build_norm_layer(norm_cfg, sample_dim)[1] + else: + self.norm = None + + def forward(self, x, input_size): + """ + Args: + x (Tensor): Has shape (B, H*W, C_in). + input_size (tuple[int]): The spatial shape of x, arrange as (H, W). + Default: None. + + Returns: + tuple: Contains merged results and its spatial shape. + + - x (Tensor): Has shape (B, Merged_H * Merged_W, C_out) + - out_size (tuple[int]): Spatial shape of x, arrange as + (Merged_H, Merged_W). + """ + B, L, C = x.shape + assert isinstance(input_size, Sequence), f'Expect ' \ + f'input_size is ' \ + f'`Sequence` ' \ + f'but get {input_size}' + + H, W = input_size + assert L == H * W, 'input feature has wrong size' + + x = x.view(B, H, W, C).permute([0, 3, 1, 2]) # B, C, H, W + + if self.adaptive_padding: + x = self.adaptive_padding(x) + H, W = x.shape[-2:] + + # Use nn.Unfold to merge patch. About 25% faster than original method, + # but need to modify pretrained model for compatibility + # if kernel_size=2 and stride=2, x should has shape (B, 4*C, H/2*W/2) + x = self.sampler(x) + + out_h = (H + 2 * self.sampler.padding[0] - self.sampler.dilation[0] * + (self.sampler.kernel_size[0] - 1) - + 1) // self.sampler.stride[0] + 1 + out_w = (W + 2 * self.sampler.padding[1] - self.sampler.dilation[1] * + (self.sampler.kernel_size[1] - 1) - + 1) // self.sampler.stride[1] + 1 + + output_size = (out_h, out_w) + x = x.transpose(1, 2) # B, H/2*W/2, 4*C + + if self.use_post_norm: + # use post-norm here + x = self.reduction(x) + x = self.norm(x) if self.norm else x + else: + x = self.norm(x) if self.norm else x + x = self.reduction(x) + + return x, output_size diff --git a/mmpretrain/models/utils/helpers.py b/mmpretrain/models/utils/helpers.py new file mode 100644 index 0000000..971f450 --- /dev/null +++ b/mmpretrain/models/utils/helpers.py @@ -0,0 +1,53 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import collections.abc +import warnings +from itertools import repeat + +import torch +from mmengine.utils import digit_version + + +def is_tracing() -> bool: + """Determine whether the model is called during the tracing of code with + ``torch.jit.trace``.""" + if digit_version(torch.__version__) >= digit_version('1.6.0'): + on_trace = torch.jit.is_tracing() + # In PyTorch 1.6, torch.jit.is_tracing has a bug. + # Refers to https://github.com/pytorch/pytorch/issues/42448 + if isinstance(on_trace, bool): + return on_trace + else: + return torch._C._is_tracing() + else: + warnings.warn( + 'torch.jit.is_tracing is only supported after v1.6.0. ' + 'Therefore is_tracing returns False automatically. Please ' + 'set on_trace manually if you are using trace.', UserWarning) + return False + + +# From PyTorch internals +def _ntuple(n): + """A `to_tuple` function generator. + + It returns a function, this function will repeat the input to a tuple of + length ``n`` if the input is not an Iterable object, otherwise, return the + input directly. + + Args: + n (int): The number of the target length. + """ + + def parse(x): + if isinstance(x, collections.abc.Iterable): + return x + return tuple(repeat(x, n)) + + return parse + + +to_1tuple = _ntuple(1) +to_2tuple = _ntuple(2) +to_3tuple = _ntuple(3) +to_4tuple = _ntuple(4) +to_ntuple = _ntuple diff --git a/mmpretrain/models/utils/huggingface.py b/mmpretrain/models/utils/huggingface.py new file mode 100644 index 0000000..a44d6da --- /dev/null +++ b/mmpretrain/models/utils/huggingface.py @@ -0,0 +1,100 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import contextlib +from typing import Optional + +import transformers +from mmengine.registry import Registry +from transformers import AutoConfig, PreTrainedModel +from transformers.models.auto.auto_factory import _BaseAutoModelClass + +from mmpretrain.registry import MODELS, TOKENIZER + + +def register_hf_tokenizer( + cls: Optional[type] = None, + registry: Registry = TOKENIZER, +): + """Register HuggingFace-style PreTrainedTokenizerBase class.""" + if cls is None: + + # use it as a decorator: @register_hf_tokenizer() + def _register(cls): + register_hf_tokenizer(cls=cls) + return cls + + return _register + + def from_pretrained(**kwargs): + if ('pretrained_model_name_or_path' not in kwargs + and 'name_or_path' not in kwargs): + raise TypeError( + f'{cls.__name__}.from_pretrained() missing required ' + "argument 'pretrained_model_name_or_path' or 'name_or_path'.") + # `pretrained_model_name_or_path` is too long for config, + # add an alias name `name_or_path` here. + name_or_path = kwargs.pop('pretrained_model_name_or_path', + kwargs.pop('name_or_path')) + return cls.from_pretrained(name_or_path, **kwargs) + + registry._register_module(module=from_pretrained, module_name=cls.__name__) + return cls + + +_load_hf_pretrained_model = True + + +@contextlib.contextmanager +def no_load_hf_pretrained_model(): + global _load_hf_pretrained_model + _load_hf_pretrained_model = False + yield + _load_hf_pretrained_model = True + + +def register_hf_model( + cls: Optional[type] = None, + registry: Registry = MODELS, +): + """Register HuggingFace-style PreTrainedModel class.""" + if cls is None: + + # use it as a decorator: @register_hf_tokenizer() + def _register(cls): + register_hf_model(cls=cls) + return cls + + return _register + + if issubclass(cls, _BaseAutoModelClass): + get_config = AutoConfig.from_pretrained + from_config = cls.from_config + elif issubclass(cls, PreTrainedModel): + get_config = cls.config_class.from_pretrained + from_config = cls + else: + raise TypeError('Not auto model nor pretrained model of huggingface.') + + def build(**kwargs): + if ('pretrained_model_name_or_path' not in kwargs + and 'name_or_path' not in kwargs): + raise TypeError( + f'{cls.__name__} missing required argument ' + '`pretrained_model_name_or_path` or `name_or_path`.') + # `pretrained_model_name_or_path` is too long for config, + # add an alias name `name_or_path` here. + name_or_path = kwargs.pop('pretrained_model_name_or_path', + kwargs.pop('name_or_path')) + + if kwargs.pop('load_pretrained', True) and _load_hf_pretrained_model: + model = cls.from_pretrained(name_or_path, **kwargs) + setattr(model, 'is_init', True) + return model + else: + cfg = get_config(name_or_path, **kwargs) + return from_config(cfg) + + registry._register_module(module=build, module_name=cls.__name__) + return cls + + +register_hf_model(transformers.AutoModelForCausalLM) diff --git a/mmpretrain/models/utils/inverted_residual.py b/mmpretrain/models/utils/inverted_residual.py new file mode 100644 index 0000000..8387b21 --- /dev/null +++ b/mmpretrain/models/utils/inverted_residual.py @@ -0,0 +1,125 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn import ConvModule +from mmcv.cnn.bricks import DropPath +from mmengine.model import BaseModule + +from .se_layer import SELayer + + +class InvertedResidual(BaseModule): + """Inverted Residual Block. + + Args: + in_channels (int): The input channels of this module. + out_channels (int): The output channels of this module. + mid_channels (int): The input channels of the depthwise convolution. + kernel_size (int): The kernel size of the depthwise convolution. + Defaults to 3. + stride (int): The stride of the depthwise convolution. Defaults to 1. + se_cfg (dict, optional): Config dict for se layer. Defaults to None, + which means no se layer. + conv_cfg (dict): Config dict for convolution layer. Defaults to None, + which means using conv2d. + norm_cfg (dict): Config dict for normalization layer. + Defaults to ``dict(type='BN')``. + act_cfg (dict): Config dict for activation layer. + Defaults to ``dict(type='ReLU')``. + drop_path_rate (float): stochastic depth rate. Defaults to 0. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. Defaults to False. + init_cfg (dict | list[dict], optional): Initialization config dict. + """ + + def __init__(self, + in_channels, + out_channels, + mid_channels, + kernel_size=3, + stride=1, + se_cfg=None, + conv_cfg=None, + norm_cfg=dict(type='BN'), + act_cfg=dict(type='ReLU'), + drop_path_rate=0., + with_cp=False, + init_cfg=None): + super(InvertedResidual, self).__init__(init_cfg) + self.with_res_shortcut = (stride == 1 and in_channels == out_channels) + assert stride in [1, 2] + self.with_cp = with_cp + self.drop_path = DropPath( + drop_path_rate) if drop_path_rate > 0 else nn.Identity() + self.with_se = se_cfg is not None + self.with_expand_conv = (mid_channels != in_channels) + + if self.with_se: + assert isinstance(se_cfg, dict) + + if self.with_expand_conv: + self.expand_conv = ConvModule( + in_channels=in_channels, + out_channels=mid_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.depthwise_conv = ConvModule( + in_channels=mid_channels, + out_channels=mid_channels, + kernel_size=kernel_size, + stride=stride, + padding=kernel_size // 2, + groups=mid_channels, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + if self.with_se: + self.se = SELayer(**se_cfg) + self.linear_conv = ConvModule( + in_channels=mid_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + padding=0, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + act_cfg=None) + + def forward(self, x): + """Forward function. + + Args: + x (torch.Tensor): The input tensor. + + Returns: + torch.Tensor: The output tensor. + """ + + def _inner_forward(x): + out = x + + if self.with_expand_conv: + out = self.expand_conv(out) + + out = self.depthwise_conv(out) + + if self.with_se: + out = self.se(out) + + out = self.linear_conv(out) + + if self.with_res_shortcut: + return x + self.drop_path(out) + else: + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + return out diff --git a/mmpretrain/models/utils/layer_scale.py b/mmpretrain/models/utils/layer_scale.py new file mode 100644 index 0000000..bb480a1 --- /dev/null +++ b/mmpretrain/models/utils/layer_scale.py @@ -0,0 +1,40 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Union + +import torch +import torch.nn as nn + + +class LayerScale(nn.Module): + """LayerScale layer. + + Args: + dim (int): Dimension of input features. + layer_scale_init_value (float or torch.Tensor): Init value of layer + scale. Defaults to 1e-5. + inplace (bool): inplace: can optionally do the + operation in-place. Defaults to False. + data_format (str): The input data format, could be 'channels_last' + or 'channels_first', representing (B, C, H, W) and + (B, N, C) format data respectively. Defaults to 'channels_last'. + """ + + def __init__(self, + dim: int, + layer_scale_init_value: Union[float, torch.Tensor] = 1e-5, + inplace: bool = False, + data_format: str = 'channels_last'): + super().__init__() + assert data_format in ('channels_last', 'channels_first'), \ + "'data_format' could only be channels_last or channels_first." + self.inplace = inplace + self.data_format = data_format + self.weight = nn.Parameter(torch.ones(dim) * layer_scale_init_value) + + def forward(self, x): + if self.data_format == 'channels_first': + if self.inplace: + return x.mul_(self.weight.view(-1, 1, 1)) + else: + return x * self.weight.view(-1, 1, 1) + return x.mul_(self.weight) if self.inplace else x * self.weight diff --git a/mmpretrain/models/utils/make_divisible.py b/mmpretrain/models/utils/make_divisible.py new file mode 100644 index 0000000..1ec7468 --- /dev/null +++ b/mmpretrain/models/utils/make_divisible.py @@ -0,0 +1,25 @@ +# Copyright (c) OpenMMLab. All rights reserved. +def make_divisible(value, divisor, min_value=None, min_ratio=0.9): + """Make divisible function. + + This function rounds the channel number down to the nearest value that can + be divisible by the divisor. + + Args: + value (int): The original channel number. + divisor (int): The divisor to fully divide the channel number. + min_value (int, optional): The minimum value of the output channel. + Default: None, means that the minimum value equal to the divisor. + min_ratio (float): The minimum ratio of the rounded channel + number to the original channel number. Default: 0.9. + Returns: + int: The modified output channel number + """ + + if min_value is None: + min_value = divisor + new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than (1-min_ratio). + if new_value < min_ratio * value: + new_value += divisor + return new_value diff --git a/mmpretrain/models/utils/norm.py b/mmpretrain/models/utils/norm.py new file mode 100644 index 0000000..8b890a0 --- /dev/null +++ b/mmpretrain/models/utils/norm.py @@ -0,0 +1,133 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class GRN(nn.Module): + """Global Response Normalization Module. + + Come from `ConvNeXt V2: Co-designing and Scaling ConvNets with Masked + Autoencoders `_ + + Args: + in_channels (int): The number of channels of the input tensor. + eps (float): a value added to the denominator for numerical stability. + Defaults to 1e-6. + """ + + def __init__(self, in_channels, eps=1e-6): + super().__init__() + self.in_channels = in_channels + self.gamma = nn.Parameter(torch.zeros(in_channels)) + self.beta = nn.Parameter(torch.zeros(in_channels)) + self.eps = eps + + def forward(self, x: torch.Tensor, data_format='channel_first'): + """Forward method. + + Args: + x (torch.Tensor): The input tensor. + data_format (str): The format of the input tensor. If + ``"channel_first"``, the shape of the input tensor should be + (B, C, H, W). If ``"channel_last"``, the shape of the input + tensor should be (B, H, W, C). Defaults to "channel_first". + """ + if data_format == 'channel_last': + gx = torch.norm(x, p=2, dim=(1, 2), keepdim=True) + nx = gx / (gx.mean(dim=-1, keepdim=True) + self.eps) + x = self.gamma * (x * nx) + self.beta + x + elif data_format == 'channel_first': + gx = torch.norm(x, p=2, dim=(2, 3), keepdim=True) + nx = gx / (gx.mean(dim=1, keepdim=True) + self.eps) + x = self.gamma.view(1, -1, 1, 1) * (x * nx) + self.beta.view( + 1, -1, 1, 1) + x + return x + + +@MODELS.register_module('LN2d') +class LayerNorm2d(nn.LayerNorm): + """LayerNorm on channels for 2d images. + + Args: + num_channels (int): The number of channels of the input tensor. + eps (float): a value added to the denominator for numerical stability. + Defaults to 1e-5. + elementwise_affine (bool): a boolean value that when set to ``True``, + this module has learnable per-element affine parameters initialized + to ones (for weights) and zeros (for biases). Defaults to True. + """ + + def __init__(self, num_channels: int, **kwargs) -> None: + super().__init__(num_channels, **kwargs) + self.num_channels = self.normalized_shape[0] + + def forward(self, x, data_format='channel_first'): + """Forward method. + + Args: + x (torch.Tensor): The input tensor. + data_format (str): The format of the input tensor. If + ``"channel_first"``, the shape of the input tensor should be + (B, C, H, W). If ``"channel_last"``, the shape of the input + tensor should be (B, H, W, C). Defaults to "channel_first". + """ + assert x.dim() == 4, 'LayerNorm2d only supports inputs with shape ' \ + f'(N, C, H, W), but got tensor with shape {x.shape}' + if data_format == 'channel_last': + x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, + self.eps) + elif data_format == 'channel_first': + x = x.permute(0, 2, 3, 1) + x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, + self.eps) + # If the output is discontiguous, it may cause some unexpected + # problem in the downstream tasks + x = x.permute(0, 3, 1, 2).contiguous() + return x + + +def build_norm_layer(cfg: dict, num_features: int) -> nn.Module: + """Build normalization layer. + + Args: + cfg (dict): The norm layer config, which should contain: + + - type (str): Layer type. + - layer args: Args needed to instantiate a norm layer. + + num_features (int): Number of input channels. + + Returns: + nn.Module: The created norm layer. + """ + if not isinstance(cfg, dict): + raise TypeError('cfg must be a dict') + if 'type' not in cfg: + raise KeyError('the cfg dict must contain the key "type"') + cfg_ = cfg.copy() + + layer_type = cfg_.pop('type') + norm_layer = MODELS.get(layer_type) + if norm_layer is None: + raise KeyError(f'Cannot find {layer_type} in registry under scope ' + f'name {MODELS.scope}') + + requires_grad = cfg_.pop('requires_grad', True) + cfg_.setdefault('eps', 1e-5) + + if layer_type != 'GN': + layer = norm_layer(num_features, **cfg_) + else: + layer = norm_layer(num_channels=num_features, **cfg_) + + if layer_type == 'SyncBN' and hasattr(layer, '_specify_ddp_gpu_num'): + layer._specify_ddp_gpu_num(1) + + for param in layer.parameters(): + param.requires_grad = requires_grad + + return layer diff --git a/mmpretrain/models/utils/position_encoding.py b/mmpretrain/models/utils/position_encoding.py new file mode 100644 index 0000000..07a3c48 --- /dev/null +++ b/mmpretrain/models/utils/position_encoding.py @@ -0,0 +1,247 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from functools import partial +from typing import Optional, Sequence, Union + +import torch +import torch.nn as nn +from mmengine.model import BaseModule +from mmengine.utils import digit_version + +from ..utils import to_2tuple + +# After pytorch v1.10.0, use torch.meshgrid without indexing +# will raise extra warning. For more details, +# refers to https://github.com/pytorch/pytorch/issues/50276 +if digit_version(torch.__version__) >= digit_version('1.10.0'): + torch_meshgrid = partial(torch.meshgrid, indexing='ij') +else: + torch_meshgrid = torch.meshgrid + + +class ConditionalPositionEncoding(BaseModule): + """The Conditional Position Encoding (CPE) module. + + The CPE is the implementation of 'Conditional Positional Encodings + for Vision Transformers '_. + + Args: + in_channels (int): Number of input channels. + embed_dims (int): The feature dimension. Default: 768. + stride (int): Stride of conv layer. Default: 1. + """ + + def __init__(self, in_channels, embed_dims=768, stride=1, init_cfg=None): + super(ConditionalPositionEncoding, self).__init__(init_cfg=init_cfg) + self.proj = nn.Conv2d( + in_channels, + embed_dims, + kernel_size=3, + stride=stride, + padding=1, + bias=True, + groups=embed_dims) + self.stride = stride + + def forward(self, x, hw_shape): + B, N, C = x.shape + H, W = hw_shape + feat_token = x + # convert (B, N, C) to (B, C, H, W) + cnn_feat = feat_token.transpose(1, 2).view(B, C, H, W).contiguous() + if self.stride == 1: + x = self.proj(cnn_feat) + cnn_feat + else: + x = self.proj(cnn_feat) + x = x.flatten(2).transpose(1, 2) + return x + + +class PositionEncodingFourier(BaseModule): + """The Position Encoding Fourier (PEF) module. + + The PEF is adopted from EdgeNeXt '_. + Args: + in_channels (int): Number of input channels. + Default: 32 + embed_dims (int): The feature dimension. + Default: 768. + temperature (int): Temperature. + Default: 10000. + dtype (torch.dtype): The data type. + Default: torch.float32. + init_cfg (dict): The config dict for initializing the module. + Default: None. + """ + + def __init__(self, + in_channels=32, + embed_dims=768, + temperature=10000, + dtype=torch.float32, + init_cfg=None): + super(PositionEncodingFourier, self).__init__(init_cfg=init_cfg) + self.proj = nn.Conv2d(in_channels * 2, embed_dims, kernel_size=1) + self.scale = 2 * math.pi + self.in_channels = in_channels + self.embed_dims = embed_dims + self.dtype = dtype + + if digit_version(torch.__version__) < digit_version('1.8.0'): + floor_div = torch.floor_divide + else: + floor_div = partial(torch.div, rounding_mode='floor') + dim_t = torch.arange(in_channels, dtype=self.dtype) + self.dim_t = temperature**(2 * floor_div(dim_t, 2) / in_channels) + + def forward(self, bhw_shape): + B, H, W = bhw_shape + mask = torch.zeros(B, H, W).bool().to(self.proj.weight.device) + not_mask = ~mask + eps = 1e-6 + y_embed = not_mask.cumsum(1, dtype=self.dtype) + x_embed = not_mask.cumsum(2, dtype=self.dtype) + y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale + x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale + + dim_t = self.dim_t.to(mask.device) + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + pos_x = torch.stack( + (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), + dim=4).flatten(3) + pos_y = torch.stack( + (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), + dim=4).flatten(3) + + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + pos = self.proj(pos) + + return pos + + +def build_2d_sincos_position_embedding( + patches_resolution: Union[int, Sequence[int]], + embed_dims: int, + temperature: Optional[int] = 10000., + cls_token: Optional[bool] = False) -> torch.Tensor: + """The function is to build position embedding for model to obtain the + position information of the image patches. + + Args: + patches_resolution (Union[int, Sequence[int]]): The resolution of each + patch. + embed_dims (int): The dimension of the embedding vector. + temperature (int, optional): The temperature parameter. Defaults to + 10000. + cls_token (bool, optional): Whether to concatenate class token. + Defaults to False. + + Returns: + torch.Tensor: The position embedding vector. + """ + + if isinstance(patches_resolution, int): + patches_resolution = (patches_resolution, patches_resolution) + + h, w = patches_resolution + grid_w = torch.arange(w, dtype=torch.float32) + grid_h = torch.arange(h, dtype=torch.float32) + grid_w, grid_h = torch_meshgrid(grid_w, grid_h) + assert embed_dims % 4 == 0, \ + 'Embed dimension must be divisible by 4.' + pos_dim = embed_dims // 4 + + omega = torch.arange(pos_dim, dtype=torch.float32) / pos_dim + omega = 1. / (temperature**omega) + out_w = torch.einsum('m,d->md', [grid_w.flatten(), omega]) + out_h = torch.einsum('m,d->md', [grid_h.flatten(), omega]) + + pos_emb = torch.cat( + [ + torch.sin(out_w), + torch.cos(out_w), + torch.sin(out_h), + torch.cos(out_h) + ], + dim=1, + )[None, :, :] + + if cls_token: + cls_token_pe = torch.zeros([1, 1, embed_dims], dtype=torch.float32) + pos_emb = torch.cat([cls_token_pe, pos_emb], dim=1) + + return pos_emb + + +class RotaryEmbeddingFast(BaseModule): + """Implements 2D rotary embedding (RoPE) for image tokens. Position + encoding is implemented with sin and cos functions, + + .. math:: + Pos_{cos} = cos(\frac{t}{\theta^{\frac{2i}{d}}} \\ + Pos_{sin} = sin(\frac{t}{\theta^{\frac{2i}{d}}} + Args: + embed_dims (int): The feature dimension for each head. + patch_resolution (int | tuple): The resolution of the + image, in format (H, W). + theta (float): The hyperparameter for position coding. + Defaults to 10000. + init_cfg (dict, optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + embed_dims, + patch_resolution, + theta=10000., + init_cfg=None): + super(RotaryEmbeddingFast, self).__init__(init_cfg=init_cfg) + + self.half_dim = embed_dims // 2 + self.patch_resolution = to_2tuple(patch_resolution) + self.theta = theta + + freqs_cos, freqs_sin = self.compute_position_embedding() + self.register_buffer('freqs_cos', freqs_cos) + self.register_buffer('freqs_sin', freqs_sin) + + def compute_position_embedding(self): + frequency = self.theta**( + torch.arange(0, self.half_dim, 2).float() / self.half_dim) + frequency = 1. / frequency + + h, w = self.patch_resolution + th = torch.arange(h) / h * self.half_dim + tw = torch.arange(w) / w * self.half_dim + + position_h = (th[:, None] @ frequency[None, :]).repeat(1, 2) + position_w = (tw[:, None] @ frequency[None, :]).repeat(1, 2) + + height = position_h[:, None, :].expand(h, w, self.half_dim) + width = position_w[None, :, :].expand(h, w, self.half_dim) + position = torch.cat((height, width), dim=-1) + + freqs_cos = position.cos().view(-1, position.shape[-1]) + freqs_sin = position.sin().view(-1, position.shape[-1]) + + return freqs_cos, freqs_sin + + def forward(self, x, patch_resolution): + # Check whether the patch resolution is the predefined size + patch_resolution = to_2tuple(patch_resolution) + if patch_resolution != self.patch_resolution: + self.patch_resolution = patch_resolution + freqs_cos, freqs_sin = self.compute_position_embedding() + self.register_buffer('freqs_cos', freqs_cos.to(x.device)) + self.register_buffer('freqs_sin', freqs_sin.to(x.device)) + + batch, num_heads, num_patches, dim = x.shape + + inputs = x + x = x.reshape(batch, num_heads, num_patches, -1, 2) + x1, x2 = x.unbind(dim=-1) + x = torch.stack((-x2, x1), dim=-1) + x = x.reshape(batch, num_heads, num_patches, dim) + + return inputs * self.freqs_cos + x * self.freqs_sin diff --git a/mmpretrain/models/utils/res_layer_extra_norm.py b/mmpretrain/models/utils/res_layer_extra_norm.py new file mode 100644 index 0000000..37e387b --- /dev/null +++ b/mmpretrain/models/utils/res_layer_extra_norm.py @@ -0,0 +1,31 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .norm import build_norm_layer + +try: + from mmdet.models.backbones import ResNet + from mmdet.models.roi_heads.shared_heads.res_layer import ResLayer + from mmdet.registry import MODELS + + @MODELS.register_module() + class ResLayerExtraNorm(ResLayer): + """Add extra norm to original ``ResLayer``.""" + + def __init__(self, *args, **kwargs): + super(ResLayerExtraNorm, self).__init__(*args, **kwargs) + + block = ResNet.arch_settings[kwargs['depth']][0] + self.add_module( + 'norm', + build_norm_layer(self.norm_cfg, + 64 * 2**self.stage * block.expansion)) + + def forward(self, x): + """Forward function.""" + res_layer = getattr(self, f'layer{self.stage + 1}') + norm = getattr(self, 'norm') + x = res_layer(x) + out = norm(x) + return out + +except ImportError: + ResLayerExtraNorm = None diff --git a/mmpretrain/models/utils/se_layer.py b/mmpretrain/models/utils/se_layer.py new file mode 100644 index 0000000..2029017 --- /dev/null +++ b/mmpretrain/models/utils/se_layer.py @@ -0,0 +1,80 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule +from mmengine.utils import is_tuple_of + +from .make_divisible import make_divisible + + +class SELayer(BaseModule): + """Squeeze-and-Excitation Module. + + Args: + channels (int): The input (and output) channels of the SE layer. + squeeze_channels (None or int): The intermediate channel number of + SElayer. Default: None, means the value of ``squeeze_channels`` + is ``make_divisible(channels // ratio, divisor)``. + ratio (int): Squeeze ratio in SELayer, the intermediate channel will + be ``make_divisible(channels // ratio, divisor)``. Only used when + ``squeeze_channels`` is None. Default: 16. + divisor(int): The divisor to true divide the channel number. Only + used when ``squeeze_channels`` is None. Default: 8. + conv_cfg (None or dict): Config dict for convolution layer. Default: + None, which means using conv2d. + return_weight(bool): Whether to return the weight. Default: False. + act_cfg (dict or Sequence[dict]): Config dict for activation layer. + If act_cfg is a dict, two activation layers will be configurated + by this dict. If act_cfg is a sequence of dicts, the first + activation layer will be configurated by the first dict and the + second activation layer will be configurated by the second dict. + Default: (dict(type='ReLU'), dict(type='Sigmoid')) + """ + + def __init__(self, + channels, + squeeze_channels=None, + ratio=16, + divisor=8, + bias='auto', + conv_cfg=None, + act_cfg=(dict(type='ReLU'), dict(type='Sigmoid')), + return_weight=False, + init_cfg=None): + super(SELayer, self).__init__(init_cfg) + if isinstance(act_cfg, dict): + act_cfg = (act_cfg, act_cfg) + assert len(act_cfg) == 2 + assert is_tuple_of(act_cfg, dict) + self.global_avgpool = nn.AdaptiveAvgPool2d(1) + if squeeze_channels is None: + squeeze_channels = make_divisible(channels // ratio, divisor) + assert isinstance(squeeze_channels, int) and squeeze_channels > 0, \ + '"squeeze_channels" should be a positive integer, but get ' + \ + f'{squeeze_channels} instead.' + self.return_weight = return_weight + self.conv1 = ConvModule( + in_channels=channels, + out_channels=squeeze_channels, + kernel_size=1, + stride=1, + bias=bias, + conv_cfg=conv_cfg, + act_cfg=act_cfg[0]) + self.conv2 = ConvModule( + in_channels=squeeze_channels, + out_channels=channels, + kernel_size=1, + stride=1, + bias=bias, + conv_cfg=conv_cfg, + act_cfg=act_cfg[1]) + + def forward(self, x): + out = self.global_avgpool(x) + out = self.conv1(out) + out = self.conv2(out) + if self.return_weight: + return out + else: + return x * out diff --git a/mmpretrain/models/utils/sparse_modules.py b/mmpretrain/models/utils/sparse_modules.py new file mode 100644 index 0000000..dd6bf34 --- /dev/null +++ b/mmpretrain/models/utils/sparse_modules.py @@ -0,0 +1,149 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Copyright (c) ByteDance, Inc. and its affiliates. All rights reserved. +# Modified from https://github.com/keyu-tian/SparK/blob/main/encoder.py +import torch +import torch.nn as nn + +from mmpretrain.registry import MODELS + + +class SparseHelper: + """The helper to compute sparse operation with pytorch, such as sparse + convlolution, sparse batch norm, etc.""" + + _cur_active: torch.Tensor = None + + @staticmethod + def _get_active_map_or_index(H: int, + returning_active_map: bool = True + ) -> torch.Tensor: + """Get current active map with (B, 1, f, f) shape or index format.""" + # _cur_active with shape (B, 1, f, f) + downsample_raito = H // SparseHelper._cur_active.shape[-1] + active_ex = SparseHelper._cur_active.repeat_interleave( + downsample_raito, 2).repeat_interleave(downsample_raito, 3) + return active_ex if returning_active_map else active_ex.squeeze( + 1).nonzero(as_tuple=True) + + @staticmethod + def sp_conv_forward(self, x: torch.Tensor) -> torch.Tensor: + """Sparse convolution forward function.""" + x = super(type(self), self).forward(x) + + # (b, c, h, w) *= (b, 1, h, w), mask the output of conv + x *= SparseHelper._get_active_map_or_index( + H=x.shape[2], returning_active_map=True) + return x + + @staticmethod + def sp_bn_forward(self, x: torch.Tensor) -> torch.Tensor: + """Sparse batch norm forward function.""" + active_index = SparseHelper._get_active_map_or_index( + H=x.shape[2], returning_active_map=False) + + # (b, c, h, w) -> (b, h, w, c) + x_permuted = x.permute(0, 2, 3, 1) + + # select the features on non-masked positions to form flatten features + # with shape (n, c) + x_flattened = x_permuted[active_index] + + # use BN1d to normalize this flatten feature (n, c) + x_flattened = super(type(self), self).forward(x_flattened) + + # generate output + output = torch.zeros_like(x_permuted, dtype=x_flattened.dtype) + output[active_index] = x_flattened + + # (b, h, w, c) -> (b, c, h, w) + output = output.permute(0, 3, 1, 2) + return output + + +class SparseConv2d(nn.Conv2d): + """hack: override the forward function. + See `sp_conv_forward` above for more details + """ + forward = SparseHelper.sp_conv_forward + + +class SparseMaxPooling(nn.MaxPool2d): + """hack: override the forward function. + See `sp_conv_forward` above for more details + """ + forward = SparseHelper.sp_conv_forward + + +class SparseAvgPooling(nn.AvgPool2d): + """hack: override the forward function. + See `sp_conv_forward` above for more details + """ + forward = SparseHelper.sp_conv_forward + + +@MODELS.register_module() +class SparseBatchNorm2d(nn.BatchNorm1d): + """hack: override the forward function. + See `sp_bn_forward` above for more details + """ + forward = SparseHelper.sp_bn_forward + + +@MODELS.register_module() +class SparseSyncBatchNorm2d(nn.SyncBatchNorm): + """hack: override the forward function. + See `sp_bn_forward` above for more details + """ + forward = SparseHelper.sp_bn_forward + + +@MODELS.register_module('SparseLN2d') +class SparseLayerNorm2D(nn.LayerNorm): + """Implementation of sparse LayerNorm on channels for 2d images.""" + + def forward(self, + x: torch.Tensor, + data_format='channel_first') -> torch.Tensor: + """Sparse layer norm forward function with 2D data. + + Args: + x (torch.Tensor): The input tensor. + data_format (str): The format of the input tensor. If + ``"channel_first"``, the shape of the input tensor should be + (B, C, H, W). If ``"channel_last"``, the shape of the input + tensor should be (B, H, W, C). Defaults to "channel_first". + """ + assert x.dim() == 4, ( + f'LayerNorm2d only supports inputs with shape ' + f'(N, C, H, W), but got tensor with shape {x.shape}') + if data_format == 'channel_last': + index = SparseHelper._get_active_map_or_index( + H=x.shape[1], returning_active_map=False) + + # select the features on non-masked positions to form flatten + # features with shape (n, c) + x_flattened = x[index] + # use LayerNorm to normalize this flatten feature (n, c) + x_flattened = super().forward(x_flattened) + + # generate output + x = torch.zeros_like(x, dtype=x_flattened.dtype) + x[index] = x_flattened + elif data_format == 'channel_first': + index = SparseHelper._get_active_map_or_index( + H=x.shape[2], returning_active_map=False) + x_permuted = x.permute(0, 2, 3, 1) + + # select the features on non-masked positions to form flatten + # features with shape (n, c) + x_flattened = x_permuted[index] + # use LayerNorm to normalize this flatten feature (n, c) + x_flattened = super().forward(x_flattened) + + # generate output + x = torch.zeros_like(x_permuted, dtype=x_flattened.dtype) + x[index] = x_flattened + x = x.permute(0, 3, 1, 2).contiguous() + else: + raise NotImplementedError + return x diff --git a/mmpretrain/models/utils/swiglu_ffn.py b/mmpretrain/models/utils/swiglu_ffn.py new file mode 100644 index 0000000..20b4591 --- /dev/null +++ b/mmpretrain/models/utils/swiglu_ffn.py @@ -0,0 +1,98 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn.bricks.drop import build_dropout + +from .layer_scale import LayerScale +from .norm import build_norm_layer + + +class SwiGLUFFN(nn.Module): + """SwiGLU FFN layer. + + Modified from https://github.com/facebookresearch/dinov2/blob/main/dinov2/layers/swiglu_ffn.py + """ # noqa + + def __init__( + self, + embed_dims: int, + feedforward_channels: Optional[int] = None, + out_dims: Optional[int] = None, + layer_scale_init_value: float = 0., + bias: bool = True, + dropout_layer: Optional[dict] = None, + norm_cfg: Optional[dict] = None, + add_identity: bool = True, + ) -> None: + super().__init__() + self.embed_dims = embed_dims + self.out_dims = out_dims or embed_dims + hidden_dims = feedforward_channels or embed_dims + + self.w12 = nn.Linear(self.embed_dims, 2 * hidden_dims, bias=bias) + + if norm_cfg is not None: + self.norm = build_norm_layer(norm_cfg, hidden_dims) + else: + self.norm = nn.Identity() + + self.w3 = nn.Linear(hidden_dims, self.out_dims, bias=bias) + + if layer_scale_init_value > 0: + self.gamma2 = LayerScale( + dim=embed_dims, layer_scale_init_value=layer_scale_init_value) + else: + self.gamma2 = nn.Identity() + + self.dropout_layer = build_dropout( + dropout_layer) if dropout_layer else torch.nn.Identity() + self.add_identity = add_identity + + def forward(self, + x: torch.Tensor, + identity: Optional[torch.Tensor] = None) -> torch.Tensor: + x12 = self.w12(x) + x1, x2 = x12.chunk(2, dim=-1) + hidden = F.silu(x1) * x2 + hidden = self.norm(hidden) + out = self.w3(hidden) + out = self.gamma2(out) + out = self.dropout_layer(out) + + if self.out_dims != self.embed_dims or not self.add_identity: + # due to the dimension inconsistence or user setting + # not to apply residual operation + return out + + if identity is None: + identity = x + return identity + out + + +class SwiGLUFFNFused(SwiGLUFFN): + """SwiGLU FFN layer with fusing. + + Modified from https://github.com/facebookresearch/dinov2/blob/main/dinov2/layers/swiglu_ffn.py + """ # noqa + + def __init__( + self, + embed_dims: int, + feedforward_channels: Optional[int] = None, + out_dims: Optional[int] = None, + layer_scale_init_value: float = 0., + bias: bool = True, + ) -> None: + out_dims = out_dims or embed_dims + feedforward_channels = feedforward_channels or embed_dims + feedforward_channels = (int(feedforward_channels * 2 / 3) + 7) // 8 * 8 + super().__init__( + embed_dims=embed_dims, + feedforward_channels=feedforward_channels, + out_dims=out_dims, + layer_scale_init_value=layer_scale_init_value, + bias=bias, + ) diff --git a/mmpretrain/models/utils/tokenizer.py b/mmpretrain/models/utils/tokenizer.py new file mode 100644 index 0000000..fddda43 --- /dev/null +++ b/mmpretrain/models/utils/tokenizer.py @@ -0,0 +1,188 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import collections +import os + +from mmengine.fileio import list_from_file +from transformers import (AutoTokenizer, BartTokenizer, BasicTokenizer, + BertTokenizer, BertTokenizerFast, LlamaTokenizer, + WordpieceTokenizer) + +from mmpretrain.registry import TOKENIZER +from .huggingface import register_hf_tokenizer + +register_hf_tokenizer(AutoTokenizer) +register_hf_tokenizer(LlamaTokenizer) +register_hf_tokenizer(BertTokenizer) + + +@register_hf_tokenizer() +class BlipTokenizer(BertTokenizerFast): + """"BlipTokenizer inherit BertTokenizerFast (fast, Rust-based).""" + + @classmethod + def from_pretrained( + cls, + pretrained_model_name_or_path, + *init_inputs, + **kwargs, + ): + os.environ['TOKENIZERS_PARALLELISM'] = 'true' + + tokenizer = super().from_pretrained( + pretrained_model_name_or_path, + *init_inputs, + **kwargs, + ) + tokenizer.add_special_tokens({'bos_token': '[DEC]'}) + tokenizer.add_special_tokens({'additional_special_tokens': ['[ENC]']}) + return tokenizer + + +@register_hf_tokenizer() +class Blip2Tokenizer(BertTokenizer): + + @classmethod + def from_pretrained( + cls, + pretrained_model_name_or_path, + *init_inputs, + **kwargs, + ): + tokenizer = super().from_pretrained( + pretrained_model_name_or_path, + *init_inputs, + **kwargs, + ) + tokenizer.add_special_tokens({'bos_token': '[DEC]'}) + return tokenizer + + +@register_hf_tokenizer() +class OFATokenizer(BartTokenizer): + + vocab_files_names = { + 'vocab_file': 'vocab.json', + 'merges_file': 'merges.txt' + } + + pretrained_vocab_files_map = { + 'vocab_file': { + 'OFA-Sys/OFA-tiny': + 'https://huggingface.co/OFA-Sys/OFA-tiny/blob/main/vocab.json', + 'OFA-Sys/OFA-medium': + 'https://huggingface.co/OFA-Sys/OFA-medium/blob/main/vocab.json', + 'OFA-Sys/OFA-base': + 'https://huggingface.co/OFA-Sys/OFA-base/blob/main/vocab.json', + 'OFA-Sys/OFA-large': + 'https://huggingface.co/OFA-Sys/OFA-large/blob/main/vocab.json', + }, + 'merges_file': { + 'OFA-Sys/OFA-tiny': + 'https://huggingface.co/OFA-Sys/OFA-tiny/blob/main/merges.txt', + 'OFA-Sys/OFA-medium': + 'https://huggingface.co/OFA-Sys/OFA-medium/blob/main/merges.txt', + 'OFA-Sys/OFA-base': + 'https://huggingface.co/OFA-Sys/OFA-base/blob/main/merges.txt', + 'OFA-Sys/OFA-large': + 'https://huggingface.co/OFA-Sys/OFA-large/blob/main/merges.txt', + }, + } + + max_model_input_sizes = { + 'OFA-Sys/OFA-tiny': 1024, + 'OFA-Sys/OFA-medium': 1024, + 'OFA-Sys/OFA-base': 1024, + 'OFA-Sys/OFA-large': 1024, + } + + @classmethod + def from_pretrained( + cls, + pretrained_model_name_or_path, + *init_inputs, + **kwargs, + ): + num_bins = kwargs.pop('num_bins', 1000) + tokenizer = super().from_pretrained( + pretrained_model_name_or_path, + *init_inputs, + **kwargs, + ) + length = len(tokenizer) + tokenizer.add_tokens([''.format(i) for i in range(8192)]) + tokenizer.code_offset = length + tokenizer.add_tokens([''.format(i) for i in range(num_bins)]) + tokenizer.bin_offset = length + 8192 + tokenizer.num_bins = num_bins + return tokenizer + + +@TOKENIZER.register_module() +class FullTokenizer(BertTokenizer): + """Runs end-to-end tokenziation.""" + + def __init__(self, vocab_file, do_lower_case=True): + self.vocab = self.load_vocab(vocab_file) + self.inv_vocab = {v: k for k, v in self.vocab.items()} + self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case) + self.wordpiece_tokenizer = WordpieceTokenizer( + vocab=self.vocab, unk_token='[UNK]', max_input_chars_per_word=200) + + def load_vocab(self, vocab_file): + """Loads a vocabulary file into a dictionary.""" + vocab = collections.OrderedDict() + index = 0 + vocab_list = list_from_file(vocab_file) + for token in vocab_list: + if not token: + break + token = token.strip() + vocab[token] = index + index += 1 + return vocab + + def tokenize(self, text): + split_tokens = [] + for token in self.basic_tokenizer.tokenize(text): + for sub_token in self.wordpiece_tokenizer.tokenize(token): + split_tokens.append(sub_token) + + return split_tokens + + def convert_by_vocab(self, vocab, items): + """Converts a sequence of [tokens|ids] using the vocab.""" + output = [] + for item in items: + output.append(vocab[item]) + return output + + def convert_tokens_to_ids(self, tokens): + return self.convert_by_vocab(self.vocab, tokens) + + def convert_ids_to_tokens(self, ids): + return self.convert_by_vocab(self.inv_vocab, ids) + + @staticmethod + def convert_tokens_to_string(tokens, clean_up_tokenization_spaces=True): + """Converts a sequence of tokens (string) in a single string.""" + + def clean_up_tokenization(out_string): + """Clean up a list of simple English tokenization artifacts like + spaces before punctuations and abbreviated forms.""" + out_string = ( + out_string.replace(' .', '.').replace(' ?', '?').replace( + ' !', '!').replace(' ,', ',').replace(" ' ", "'").replace( + " n't", "n't").replace(" 'm", "'m").replace( + " 's", "'s").replace(" 've", + "'ve").replace(" 're", "'re")) + return out_string + + text = ' '.join(tokens).replace(' ##', '').strip() + if clean_up_tokenization_spaces: + clean_text = clean_up_tokenization(text) + return clean_text + else: + return text + + def vocab_size(self): + return len(self.vocab) diff --git a/mmpretrain/models/utils/vector_quantizer.py b/mmpretrain/models/utils/vector_quantizer.py new file mode 100644 index 0000000..7c2ea89 --- /dev/null +++ b/mmpretrain/models/utils/vector_quantizer.py @@ -0,0 +1,232 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Copyright (c) 2022 Microsoft +# Modified from +# https://github.com/microsoft/unilm/blob/master/beit2/norm_ema_quantizer.py +from typing import Optional, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F +from einops import rearrange, repeat +from mmengine.dist import all_reduce + + +def ema_inplace(moving_avg: torch.Tensor, new: torch.Tensor, + decay: torch.Tensor) -> None: + """Update moving average.""" + moving_avg.data.mul_(decay).add_(new, alpha=(1 - decay)) + + +def norm_ema_inplace(moving_avg: torch.Tensor, new: torch.Tensor, + decay: torch.Tensor) -> None: + """Update moving average with norm data.""" + moving_avg.data.mul_(decay).add_(new, alpha=(1 - decay)) + moving_avg.data.copy_(F.normalize(moving_avg.data, p=2, dim=-1)) + + +def sample_vectors(samples: torch.Tensor, num: int) -> torch.Tensor: + """Sample vectors according to the given number.""" + num_samples, device = samples.shape[0], samples.device + + if num_samples >= num: + indices = torch.randperm(num_samples, device=device)[:num] + else: + indices = torch.randint(0, num_samples, (num, ), device=device) + + return samples[indices] + + +def kmeans(samples: torch.Tensor, + num_clusters: int, + num_iters: int = 10, + use_cosine_sim: bool = False) -> Tuple[torch.Tensor, torch.Tensor]: + """Run k-means algorithm.""" + dim, dtype, _ = samples.shape[-1], samples.dtype, samples.device + + means = sample_vectors(samples, num_clusters) + + for _ in range(num_iters): + if use_cosine_sim: + dists = samples @ means.t() + else: + diffs = rearrange(samples, 'n d -> n () d') \ + - rearrange(means, 'c d -> () c d') + dists = -(diffs**2).sum(dim=-1) + + buckets = dists.max(dim=-1).indices + bins = torch.bincount(buckets, minlength=num_clusters) + zero_mask = bins == 0 + bins_min_clamped = bins.masked_fill(zero_mask, 1) + + new_means = buckets.new_zeros(num_clusters, dim, dtype=dtype) + new_means.scatter_add_(0, repeat(buckets, 'n -> n d', d=dim), samples) + new_means = new_means / bins_min_clamped[..., None] + + if use_cosine_sim: + new_means = F.normalize(new_means, p=2, dim=-1) + + means = torch.where(zero_mask[..., None], means, new_means) + + return means, bins + + +class EmbeddingEMA(nn.Module): + """The codebook of embedding vectors. + + Args: + num_tokens (int): Number of embedding vectors in the codebook. + codebook_dim (int) : The dimension of embedding vectors in the + codebook. + kmeans_init (bool): Whether to use k-means to initialize the + VectorQuantizer. Defaults to True. + codebook_init_path (str): The initialization checkpoint for codebook. + Defaults to None. + """ + + def __init__(self, + num_tokens: int, + codebook_dim: int, + kmeans_init: bool = True, + codebook_init_path: Optional[str] = None): + super().__init__() + self.num_tokens = num_tokens + self.codebook_dim = codebook_dim + if codebook_init_path is None: + if not kmeans_init: + weight = torch.randn(num_tokens, codebook_dim) + weight = F.normalize(weight, p=2, dim=-1) + else: + weight = torch.zeros(num_tokens, codebook_dim) + self.register_buffer('initted', torch.Tensor([not kmeans_init])) + else: + print(f'load init codebook weight from {codebook_init_path}') + codebook_ckpt_weight = torch.load( + codebook_init_path, map_location='cpu') + weight = codebook_ckpt_weight.clone() + self.register_buffer('initted', torch.Tensor([True])) + + self.weight = nn.Parameter(weight, requires_grad=False) + self.update = True + + @torch.jit.ignore + def init_embed_(self, data: torch.Tensor) -> None: + """Initialize embedding vectors of codebook.""" + if self.initted: + return + print('Performing K-means init for codebook') + embed, _ = kmeans(data, self.num_tokens, 10, use_cosine_sim=True) + self.weight.data.copy_(embed) + self.initted.data.copy_(torch.Tensor([True])) + + def forward(self, embed_id: torch.Tensor) -> torch.Tensor: + """Get embedding vectors.""" + return F.embedding(embed_id, self.weight) + + +class NormEMAVectorQuantizer(nn.Module): + """Normed EMA vector quantizer module. + + Args: + num_embed (int): Number of embedding vectors in the codebook. Defaults + to 8192. + embed_dims (int) : The dimension of embedding vectors in the codebook. + Defaults to 32. + beta (float): The mutiplier for VectorQuantizer embedding loss. + Defaults to 1. + decay (float): The decay parameter of EMA. Defaults to 0.99. + statistic_code_usage (bool): Whether to use cluster_size to record + statistic. Defaults to True. + kmeans_init (bool): Whether to use k-means to initialize the + VectorQuantizer. Defaults to True. + codebook_init_path (str): The initialization checkpoint for codebook. + Defaults to None. + """ + + def __init__(self, + num_embed: int, + embed_dims: int, + beta: float, + decay: float = 0.99, + statistic_code_usage: bool = True, + kmeans_init: bool = True, + codebook_init_path: Optional[str] = None) -> None: + super().__init__() + self.codebook_dim = embed_dims + self.num_tokens = num_embed + self.beta = beta + self.decay = decay + + # learnable = True if orthogonal_reg_weight > 0 else False + self.embedding = EmbeddingEMA( + num_tokens=self.num_tokens, + codebook_dim=self.codebook_dim, + kmeans_init=kmeans_init, + codebook_init_path=codebook_init_path) + + self.statistic_code_usage = statistic_code_usage + if statistic_code_usage: + self.register_buffer('cluster_size', torch.zeros(num_embed)) + + def reset_cluster_size(self, device): + + if self.statistic_code_usage: + self.register_buffer('cluster_size', torch.zeros(self.num_tokens)) + self.cluster_size = self.cluster_size.to(device) + + def forward(self, z): + """Forward function.""" + # reshape z -> (batch, height, width, channel) + z = rearrange(z, 'b c h w -> b h w c') + z = F.normalize(z, p=2, dim=-1) + z_flattened = z.reshape(-1, self.codebook_dim) + + self.embedding.init_embed_(z_flattened) + + # 'n d -> d n' + d = z_flattened.pow(2).sum(dim=1, keepdim=True) + \ + self.embedding.weight.pow(2).sum(dim=1) - 2 * \ + torch.einsum('bd,nd->bn', z_flattened, self.embedding.weight) + + encoding_indices = torch.argmin(d, dim=1) + + z_q = self.embedding(encoding_indices).view(z.shape) + + encodings = F.one_hot(encoding_indices, self.num_tokens).type(z.dtype) + + if not self.training: + with torch.no_grad(): + cluster_size = encodings.sum(0) + all_reduce(cluster_size) + ema_inplace(self.cluster_size, cluster_size, self.decay) + + if self.training and self.embedding.update: + # update cluster size with EMA + bins = encodings.sum(0) + all_reduce(bins) + ema_inplace(self.cluster_size, bins, self.decay) + + zero_mask = (bins == 0) + bins = bins.masked_fill(zero_mask, 1.) + + embed_sum = z_flattened.t() @ encodings + all_reduce(embed_sum) + + embed_normalized = (embed_sum / bins.unsqueeze(0)).t() + embed_normalized = F.normalize(embed_normalized, p=2, dim=-1) + embed_normalized = torch.where(zero_mask[..., None], + self.embedding.weight, + embed_normalized) + + # Update embedding vectors with EMA + norm_ema_inplace(self.embedding.weight, embed_normalized, + self.decay) + + # compute loss for embedding + loss = self.beta * F.mse_loss(z_q.detach(), z) + + # preserve gradients + z_q = z + (z_q - z).detach() + + # reshape back to match original input shape + z_q = rearrange(z_q, 'b h w c -> b c h w') + return z_q, loss, encoding_indices diff --git a/mmpretrain/registry.py b/mmpretrain/registry.py new file mode 100644 index 0000000..cac2bda --- /dev/null +++ b/mmpretrain/registry.py @@ -0,0 +1,195 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""MMPretrain provides 21 registry nodes to support using modules across +projects. Each node is a child of the root registry in MMEngine. + +More details can be found at +https://mmengine.readthedocs.io/en/latest/tutorials/registry.html. +""" + +from mmengine.registry import DATA_SAMPLERS as MMENGINE_DATA_SAMPLERS +from mmengine.registry import DATASETS as MMENGINE_DATASETS +from mmengine.registry import EVALUATOR as MMENGINE_EVALUATOR +from mmengine.registry import HOOKS as MMENGINE_HOOKS +from mmengine.registry import LOG_PROCESSORS as MMENGINE_LOG_PROCESSORS +from mmengine.registry import LOOPS as MMENGINE_LOOPS +from mmengine.registry import METRICS as MMENGINE_METRICS +from mmengine.registry import MODEL_WRAPPERS as MMENGINE_MODEL_WRAPPERS +from mmengine.registry import MODELS as MMENGINE_MODELS +from mmengine.registry import \ + OPTIM_WRAPPER_CONSTRUCTORS as MMENGINE_OPTIM_WRAPPER_CONSTRUCTORS +from mmengine.registry import OPTIM_WRAPPERS as MMENGINE_OPTIM_WRAPPERS +from mmengine.registry import OPTIMIZERS as MMENGINE_OPTIMIZERS +from mmengine.registry import PARAM_SCHEDULERS as MMENGINE_PARAM_SCHEDULERS +from mmengine.registry import \ + RUNNER_CONSTRUCTORS as MMENGINE_RUNNER_CONSTRUCTORS +from mmengine.registry import RUNNERS as MMENGINE_RUNNERS +from mmengine.registry import TASK_UTILS as MMENGINE_TASK_UTILS +from mmengine.registry import TRANSFORMS as MMENGINE_TRANSFORMS +from mmengine.registry import VISBACKENDS as MMENGINE_VISBACKENDS +from mmengine.registry import VISUALIZERS as MMENGINE_VISUALIZERS +from mmengine.registry import \ + WEIGHT_INITIALIZERS as MMENGINE_WEIGHT_INITIALIZERS +from mmengine.registry import Registry + +__all__ = [ + 'RUNNERS', 'RUNNER_CONSTRUCTORS', 'LOOPS', 'HOOKS', 'LOG_PROCESSORS', + 'OPTIMIZERS', 'OPTIM_WRAPPERS', 'OPTIM_WRAPPER_CONSTRUCTORS', + 'PARAM_SCHEDULERS', 'DATASETS', 'DATA_SAMPLERS', 'TRANSFORMS', 'MODELS', + 'MODEL_WRAPPERS', 'WEIGHT_INITIALIZERS', 'BATCH_AUGMENTS', 'TASK_UTILS', + 'METRICS', 'EVALUATORS', 'VISUALIZERS', 'VISBACKENDS' +] + +####################################################################### +# mmpretrain.engine # +####################################################################### + +# Runners like `EpochBasedRunner` and `IterBasedRunner` +RUNNERS = Registry( + 'runner', + parent=MMENGINE_RUNNERS, + locations=['mmpretrain.engine'], +) +# Runner constructors that define how to initialize runners +RUNNER_CONSTRUCTORS = Registry( + 'runner constructor', + parent=MMENGINE_RUNNER_CONSTRUCTORS, + locations=['mmpretrain.engine'], +) +# Loops which define the training or test process, like `EpochBasedTrainLoop` +LOOPS = Registry( + 'loop', + parent=MMENGINE_LOOPS, + locations=['mmpretrain.engine'], +) +# Hooks to add additional functions during running, like `CheckpointHook` +HOOKS = Registry( + 'hook', + parent=MMENGINE_HOOKS, + locations=['mmpretrain.engine'], +) +# Log processors to process the scalar log data. +LOG_PROCESSORS = Registry( + 'log processor', + parent=MMENGINE_LOG_PROCESSORS, + locations=['mmpretrain.engine'], +) +# Optimizers to optimize the model weights, like `SGD` and `Adam`. +OPTIMIZERS = Registry( + 'optimizer', + parent=MMENGINE_OPTIMIZERS, + locations=['mmpretrain.engine'], +) +# Optimizer wrappers to enhance the optimization process. +OPTIM_WRAPPERS = Registry( + 'optimizer_wrapper', + parent=MMENGINE_OPTIM_WRAPPERS, + locations=['mmpretrain.engine'], +) +# Optimizer constructors to customize the hyperparameters of optimizers. +OPTIM_WRAPPER_CONSTRUCTORS = Registry( + 'optimizer wrapper constructor', + parent=MMENGINE_OPTIM_WRAPPER_CONSTRUCTORS, + locations=['mmpretrain.engine'], +) +# Parameter schedulers to dynamically adjust optimization parameters. +PARAM_SCHEDULERS = Registry( + 'parameter scheduler', + parent=MMENGINE_PARAM_SCHEDULERS, + locations=['mmpretrain.engine'], +) + +####################################################################### +# mmpretrain.datasets # +####################################################################### + +# Datasets like `ImageNet` and `CIFAR10`. +DATASETS = Registry( + 'dataset', + parent=MMENGINE_DATASETS, + locations=['mmpretrain.datasets'], +) +# Samplers to sample the dataset. +DATA_SAMPLERS = Registry( + 'data sampler', + parent=MMENGINE_DATA_SAMPLERS, + locations=['mmpretrain.datasets'], +) +# Transforms to process the samples from the dataset. +TRANSFORMS = Registry( + 'transform', + parent=MMENGINE_TRANSFORMS, + locations=['mmpretrain.datasets'], +) + +####################################################################### +# mmpretrain.models # +####################################################################### + +# Neural network modules inheriting `nn.Module`. +MODELS = Registry( + 'model', + parent=MMENGINE_MODELS, + locations=['mmpretrain.models'], +) +# Model wrappers like 'MMDistributedDataParallel' +MODEL_WRAPPERS = Registry( + 'model_wrapper', + parent=MMENGINE_MODEL_WRAPPERS, + locations=['mmpretrain.models'], +) +# Weight initialization methods like uniform, xavier. +WEIGHT_INITIALIZERS = Registry( + 'weight initializer', + parent=MMENGINE_WEIGHT_INITIALIZERS, + locations=['mmpretrain.models'], +) +# Batch augmentations like `Mixup` and `CutMix`. +BATCH_AUGMENTS = Registry( + 'batch augment', + locations=['mmpretrain.models'], +) +# Task-specific modules like anchor generators and box coders +TASK_UTILS = Registry( + 'task util', + parent=MMENGINE_TASK_UTILS, + locations=['mmpretrain.models'], +) +# Tokenizer to encode sequence +TOKENIZER = Registry( + 'tokenizer', + locations=['mmpretrain.models'], +) + +####################################################################### +# mmpretrain.evaluation # +####################################################################### + +# Metrics to evaluate the model prediction results. +METRICS = Registry( + 'metric', + parent=MMENGINE_METRICS, + locations=['mmpretrain.evaluation'], +) +# Evaluators to define the evaluation process. +EVALUATORS = Registry( + 'evaluator', + parent=MMENGINE_EVALUATOR, + locations=['mmpretrain.evaluation'], +) + +####################################################################### +# mmpretrain.visualization # +####################################################################### + +# Visualizers to display task-specific results. +VISUALIZERS = Registry( + 'visualizer', + parent=MMENGINE_VISUALIZERS, + locations=['mmpretrain.visualization'], +) +# Backends to save the visualization results, like TensorBoard, WandB. +VISBACKENDS = Registry( + 'vis_backend', + parent=MMENGINE_VISBACKENDS, + locations=['mmpretrain.visualization'], +) diff --git a/mmpretrain/structures/__init__.py b/mmpretrain/structures/__init__.py new file mode 100644 index 0000000..e7de863 --- /dev/null +++ b/mmpretrain/structures/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .data_sample import DataSample +from .multi_task_data_sample import MultiTaskDataSample +from .utils import (batch_label_to_onehot, cat_batch_labels, format_label, + format_score, label_to_onehot, tensor_split) + +__all__ = [ + 'DataSample', 'batch_label_to_onehot', 'cat_batch_labels', 'tensor_split', + 'MultiTaskDataSample', 'label_to_onehot', 'format_label', 'format_score' +] diff --git a/mmpretrain/structures/__pycache__/__init__.cpython-310.pyc b/mmpretrain/structures/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62a0e530fa7e5bad64f522becee982a170945682 GIT binary patch literal 489 zcmZ9Iy-ve05XbGN`DmI7Bm^^S7Ai3##LyKMn7o|aB&88{9NCwFH(_SuIgoe=uS~oG z5)!^3RS_ror~B~T{cW>aL3CC>$YF2Z_G#8wcKT|UuiD>jA zHP^9-bs`d-id1JJ(+jcCxyU(tWcB=&i)9_(voiS#dsxYRzL6;JrS4S|qLbS}Vf!H6 z(w=S32lmiCoFfGym9}n0nH<^7H_BWeW`4m7Z z1@Ok~igppcFW1%>Tx;Fi2CZznsq4@V6%Mv>Yk;-W!V+CS(e|FiJ&EBr@&G+g7luXXf>u2bLejgHwcRp0cjMge1{Uu+cd zwER-1+$dwNz>9vRV>j&1L}Q{;ZB#pxjY(a5U*jcSzOV7JDDRt%DPH0BeXTLgCwLXl z8KEDX5L06M&;So}O{-0Q0|~U6o=m+L%55)b-}IW>zK|Fxd=z+Lu!`RFJ8mSdxUsw9 zi;f85Z**E?CHA$;@2p&N)<3zqlGtnS-uQ4O?N?i&?6|SxyBoq!hHp_bl%h%7<$q(Q z5L!d$TEpOa!{mmrxXHDa#VuaAPlhl=aWGQkC5)7EG-Y1FNSSC_2DkYH-YR&r#l%)s zOm0oJj8htBs(cbN)7%s@c$?zWcss#oV2TroO**c*ovtt3f0i}vGHZ)KNY`f^6D*2l zuNn8GU|tXl*>anrZrcpMtMTcG^&-KU8?ksZN5D2jd{>Boaj(@962@cAu+t3#^rFG) zbu4Iwz8~I&{aMQsK987-ZJJ^58R*~z7PuV|VPAIr`b{Zf>3YFDa|6!q3U=4?eHMwB zg;Gh=Mi)Xv;F|KnGNk~P1rhlsnrEGm3m;G7(E&rR3HB`(2*CxfvrobvYr28r=lW5o z_`AU_2<{Hqf)m>fdr~bL=yF}aQ0cZyYB8C|>$II{)9ng2x88$sYfFrDPKUylvKYW2 zMFhieGJ+R$dok9zZJ3x?&@YtU+V0c4F;^D$4O*a;8h$n`gCVV|&o%SG5*jy}vD3k+f z+A{3HH^5xH$y6j|yJLxprl&1Y1lf|V6g&;cV~c3QYsSFtu}##9$8D3XIJQT}jkb?* zS3@c?U`cwxKFX~r0O9Kaa8`DdPofMp%gf6k?S>L)4rOz5gaevr$*{vZ9f+c0p+1UD zpg3JawoYR?zcAkiW!nMvU3LlK*^K8#IK4FY_JxIc_U0Q4wRx6v^`-N*F$M$+XMmJT zEZcF8xOzb=yfob6iRzCKB z-dG(?Q26BZSYqeLrbdb{u}t$3#g)t8`rg%*Ygg7rW5jSM^%6^gJ9lgGb|zqrxiMSV zd9&5JaISf_d6q9OERUAMh50^vmErhdot__iuPYd;h#MoxSpojaX^wV{ z6NLctLrwYpm40-;O!e~{=iXirXBVET`WmI~K)k*g#yAJ)w^1V4cqKRF;ay(l19Q2 zAfAk;Cy~`VyN==m{M~|y--##_V`B?-+E*x6`x+`JbMFLU z`OckTX1H^w-Y5@W9-^e=@orZB!e8+-Vfg;)BAz{zuTP zkwtZGrW91ZkL+2=*;8eNp*_`$oPspTL@uo#Jws)cFM>8o2Gk&mj2*p($|&sl9JMGD zpY~8G#i6Qda!XmF*OT(_*m`It7FC?e0g0^~6N7TGR&5x;J)VxZ~PpNsLfQna}NNQ*vFW}Q{ z7G1+a!g#E28NhNiHV>@6eyHEmgK0HxZWZ>8=%u&_?3XAOzSbT9?S$p6a$o003<_?J zP;9O4&0Y(%n@U0{~I&IY4W7 zH^p0J9V}L)3}s>ww-k?$^Ie&IM?1w(E2^TY0$Npl7JoNa! zqi1f4oy72XZIFw9Nt6X;$m1CVi&6vA2dRhx$u=!>N-yir=xO^JotXJ>8Ln8Z&C2(Q zv+X$esL=Ci>#F0>K{F7fm7Yr-@EO@;Lb=pf{@dwQFK zS3l4X5fitc1wB}f zQumr7cS)(&%H$ZjWx@>PGFhi4J3JXAKzNqT*x0o4A zBxJB*DHxH*Vr^d;491$cXU6&gilV-GN;^dMBNKg!VyAC7rpi`8&p@RyyI9IOsUgmS zr=%LKW=SdtkyBAhPmy3(D# zrb&wQq==IP!zt{3gOL<*=ptapt=G6naC0Gi}fk(_C+sA3zkrg@?` z@Fj8ox-p@+k^Uap=F-+iFDT6X8pi17;Jy#=MD7TT(izAY3I$VQS!km?%~-vQd%uU90*}6)fkM2LOe9m{JcqPSJm2ul|$ZEX$N+&hPT5F+I3UH*e-2rvy8`m k3+KKqN79;JnA)`VtcbnZ&wWFb~LgoX+8a4c2UB{5sub8;P3%>LqbUQ<9(6)5dF#v`3jMa@T>0qGwVx4BoGoKPj63mb#+yB)vv0Z z&COLTJb(H2o9<_emi2e~$Ug;qe2Q2615IEBEVdHXVu{_d6Q|`c8na`!RnWi1R#E?! zlCo_vE179kG=&q-Ce>DzS>aq*4BS(je-svv=3Db=B`BbGDJX^uT{kEN?n}2dA38J^ z7LG25c6g=hu2`+b{8-n)NG&Lbi@^+0Vr3sqh~9yQt{xEd^BW+kw9torgRXlvCSC}-WI($lBy7;y}neXC=H@cC^|Or89rpnBHl-M#iwWvtP@+pqz7!k zmaPFBSZ6F?=lEW>PVHYo^@oKHVK}T8x zOR^(dI!A8ThAY$t)=?o~PtlV_*r0T7ow5@{2O3a(AlX@Y0L{;4u(EQF*>p|wnZ@iq zSsh@7xCg4a3sg5WRS{H=Fgri64q#ubUKmMnYKt9tDX`@N@%XD1IDd0a*&lHhkjZ^& z3;$PEP@pf&6-RTheaYW~YLBw?dB~;r+~-l&7hdmJ9%iX0k9(o;Wah>Gei(n;&3NKV z&v;6+Qt|NY^n1OG%P<%c`#jw6Haf8{#KvCQPkP7AH1Ik1kM|nmahdVXVe_%}xV?tv zGg>lo(HwP@%Dv%k5BY)E(DbyoJ}qxG&&?ZnQ=8&@#1UO^s&9DPq>NcSWc!sq?;Pii zZ2xFuFVA>y!+U%fdN6^XNK#juWNZr(M?3NB!ebcCd+m9V@X}0re(L$p{Rrf7I9ddX z<}323*B=toAWDRnb%*+h!>k_%-hSxaZg~5B=^cQJV$J$xRf$BD!uxdyYw1+cSW($& zPnCw2RxZLeQ4Z8qZY0A*Rk}P&+S)S8mYH(nEIr^3B;XXzs%XwsRkkrc;3Dy!Nxb_Ib|Ve_ntUyLyKjqC7L5WG9M z{(#wuTl0D8oVhw23g;GF{)gjGxG76IU~YoKgq5K%LE#+DASl*kC17%P7!>x%@sFy3 zgP?eTpaAcqDuSXgnnO?&{X6dw6rz*y@P8W=|AjC>0PK+9#@oby5CK~M=?FmTwRnx1 ztJEw}vy7%b$H_JL_oyNN;49R4XjGvaXTF>`Bfmvc*QxmtH9w~2`_z0$%{$yr3v&+y zg0`>DEx`2_rrqu%OsH}ghe?=9G39_11e9hpx_^ldavE~j0Xsp)LIztraR%%Rxr?$E z@)+I#Ic(sVOt);ZSxMSwpaU|y17y3v32e%AUCME;zhqph0|fQGeP4DCDdzJqLP(pX z;n<`a=%yiWB)zvv8U2X~+g+UL!wW3md9aWBy*PZ5hqi{Nc>yhUcZVn!aUnfI4ni>r zi0Wi11RufP7=g|H4S*c*A7ml~VKB8)7ZgBVT}Y6|Au`ReC;S9*G&Ls;oUh*C(QZ?B z`7DlrQ$?r!L^;?Sg}_fRpo-dJLKQV*p-MxOs99}OgF_d+WC0A?xp_fnEauU;l_Dzx zqORjzVEhh-I)sa(hdg7P_sN4KsvA@>_Bm>rfCctTOWLSu7*#sR_pXL3_nhI(9e)R` zM!D6$V8r1u4-fqiAa9WLtudaHeLCUkqn|#V5o5x9FX_iJf~ChtMw!YOA1*>4^k`eQ zM~7s5i~IrHDM7u*84?%dm$uIQC>mBxuBFu&-VZq){c!SKn3H!(voWG!VwkB|8S>cr zz4oJA>fYSk^rD1n5?B^x##l$1FeVvSiOorAI9ii4bX#U^Bfu1oTi!z<`bmf=zF>FF zef_)ErJZjLrN6!5J^7^J-QH~s2kw}GI}PurqeBWJkQowGmN&j^8;xnwZq;PmRY zvYVf%InndE2#3df5j86JV}NWYOM06FVorC3qGj9VH^nzE>N&eitkt|9XB|Hlo9K+z z)(eXLQkCFJZ8)>OdKg%vi%jP!fbBLIw;&6yUZWsXv$<$$U73=zO{`n9#y9oHMai$e zRk#i*L8w~22GFyM|E#$D=lE0wjj4PC{i&-S0pS?@et-|Ez4WDq>8l(A2---8074CT zw!eham;l}|x#_Hyi(}&+Z^b#YVs-oBXItCt$G`n@3kbBGuh&POle5nME!JpdC%!Wk zuD86MC_RY7k2TTM)P-fMh@PA^-WG@BAktwtiBkhipHqr>k9`Pa02=FnseiNIII5{L zj9C+Xt}gyL6>9uv)U2baJN!QN$^ZCWYAzIcmoBJ$3K~II_;Rg^yv-k=TfdH5%yAD! z})NXM3bh?Za?iv{3pbZae+#4O&Wh|5!HNxTJa!C!>0bv zfw?S|4t?QE$xS3vp^EzyBGMGL6ehSwjU8B3b{_rut1a!!MtQ0tbfKYxPYa(@SgWpY pR^|Ii7WCtAlWvNF_|_1$WfZnh8oxECR>J+tsokutloyx0e*x1$6;uEK literal 0 HcmV?d00001 diff --git a/mmpretrain/structures/data_sample.py b/mmpretrain/structures/data_sample.py new file mode 100644 index 0000000..ce588b8 --- /dev/null +++ b/mmpretrain/structures/data_sample.py @@ -0,0 +1,167 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from multiprocessing.reduction import ForkingPickler +from typing import Union + +import numpy as np +import torch +from mmengine.structures import BaseDataElement + +from .utils import LABEL_TYPE, SCORE_TYPE, format_label, format_score + + +class DataSample(BaseDataElement): + """A general data structure interface. + + It's used as the interface between different components. + + The following fields are convention names in MMPretrain, and we will set or + get these fields in data transforms, models, and metrics if needed. You can + also set any new fields for your need. + + Meta fields: + img_shape (Tuple): The shape of the corresponding input image. + ori_shape (Tuple): The original shape of the corresponding image. + sample_idx (int): The index of the sample in the dataset. + num_classes (int): The number of all categories. + + Data fields: + gt_label (tensor): The ground truth label. + gt_score (tensor): The ground truth score. + pred_label (tensor): The predicted label. + pred_score (tensor): The predicted score. + mask (tensor): The mask used in masked image modeling. + + Examples: + >>> import torch + >>> from mmpretrain.structures import DataSample + >>> + >>> img_meta = dict(img_shape=(960, 720), num_classes=5) + >>> data_sample = DataSample(metainfo=img_meta) + >>> data_sample.set_gt_label(3) + >>> print(data_sample) + + >>> + >>> # For multi-label data + >>> data_sample = DataSample().set_gt_label([0, 1, 4]) + >>> print(data_sample) + + >>> + >>> # Set one-hot format score + >>> data_sample = DataSample().set_pred_score([0.1, 0.1, 0.6, 0.1]) + >>> print(data_sample) + + >>> + >>> # Set custom field + >>> data_sample = DataSample() + >>> data_sample.my_field = [1, 2, 3] + >>> print(data_sample) + + >>> print(data_sample.my_field) + [1, 2, 3] + """ + + def set_gt_label(self, value: LABEL_TYPE) -> 'DataSample': + """Set ``gt_label``.""" + self.set_field(format_label(value), 'gt_label', dtype=torch.Tensor) + return self + + def set_gt_score(self, value: SCORE_TYPE) -> 'DataSample': + """Set ``gt_score``.""" + score = format_score(value) + self.set_field(score, 'gt_score', dtype=torch.Tensor) + if hasattr(self, 'num_classes'): + assert len(score) == self.num_classes, \ + f'The length of score {len(score)} should be '\ + f'equal to the num_classes {self.num_classes}.' + else: + self.set_field( + name='num_classes', value=len(score), field_type='metainfo') + return self + + def set_pred_label(self, value: LABEL_TYPE) -> 'DataSample': + """Set ``pred_label``.""" + self.set_field(format_label(value), 'pred_label', dtype=torch.Tensor) + return self + + def set_pred_score(self, value: SCORE_TYPE): + """Set ``pred_label``.""" + score = format_score(value) + self.set_field(score, 'pred_score', dtype=torch.Tensor) + if hasattr(self, 'num_classes'): + assert len(score) == self.num_classes, \ + f'The length of score {len(score)} should be '\ + f'equal to the num_classes {self.num_classes}.' + else: + self.set_field( + name='num_classes', value=len(score), field_type='metainfo') + return self + + def set_mask(self, value: Union[torch.Tensor, np.ndarray]): + if isinstance(value, np.ndarray): + value = torch.from_numpy(value) + elif not isinstance(value, torch.Tensor): + raise TypeError(f'Invalid mask type {type(value)}') + self.set_field(value, 'mask', dtype=torch.Tensor) + return self + + def __repr__(self) -> str: + """Represent the object.""" + + def dump_items(items, prefix=''): + return '\n'.join(f'{prefix}{k}: {v}' for k, v in items) + + repr_ = '' + if len(self._metainfo_fields) > 0: + repr_ += '\n\nMETA INFORMATION\n' + repr_ += dump_items(self.metainfo_items(), prefix=' ' * 4) + if len(self._data_fields) > 0: + repr_ += '\n\nDATA FIELDS\n' + repr_ += dump_items(self.items(), prefix=' ' * 4) + + repr_ = f'<{self.__class__.__name__}({repr_}\n\n) at {hex(id(self))}>' + return repr_ + + +def _reduce_datasample(data_sample): + """reduce DataSample.""" + attr_dict = data_sample.__dict__ + convert_keys = [] + for k, v in attr_dict.items(): + if isinstance(v, torch.Tensor): + attr_dict[k] = v.numpy() + convert_keys.append(k) + return _rebuild_datasample, (attr_dict, convert_keys) + + +def _rebuild_datasample(attr_dict, convert_keys): + """rebuild DataSample.""" + data_sample = DataSample() + for k in convert_keys: + attr_dict[k] = torch.from_numpy(attr_dict[k]) + data_sample.__dict__ = attr_dict + return data_sample + + +# Due to the multi-processing strategy of PyTorch, DataSample may consume many +# file descriptors because it contains multiple tensors. Here we overwrite the +# reduce function of DataSample in ForkingPickler and convert these tensors to +# np.ndarray during pickling. It may slightly influence the performance of +# dataloader. +ForkingPickler.register(DataSample, _reduce_datasample) diff --git a/mmpretrain/structures/multi_task_data_sample.py b/mmpretrain/structures/multi_task_data_sample.py new file mode 100644 index 0000000..f009938 --- /dev/null +++ b/mmpretrain/structures/multi_task_data_sample.py @@ -0,0 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from mmengine.structures import BaseDataElement + + +class MultiTaskDataSample(BaseDataElement): + + @property + def tasks(self): + return self._data_fields diff --git a/mmpretrain/structures/utils.py b/mmpretrain/structures/utils.py new file mode 100644 index 0000000..a4f9e95 --- /dev/null +++ b/mmpretrain/structures/utils.py @@ -0,0 +1,153 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Sequence, Union + +import numpy as np +import torch +import torch.nn.functional as F +from mmengine.utils import is_str + +if hasattr(torch, 'tensor_split'): + tensor_split = torch.tensor_split +else: + # A simple implementation of `tensor_split`. + def tensor_split(input: torch.Tensor, indices: list): + outs = [] + for start, end in zip([0] + indices, indices + [input.size(0)]): + outs.append(input[start:end]) + return outs + + +LABEL_TYPE = Union[torch.Tensor, np.ndarray, Sequence, int] +SCORE_TYPE = Union[torch.Tensor, np.ndarray, Sequence] + + +def format_label(value: LABEL_TYPE) -> torch.Tensor: + """Convert various python types to label-format tensor. + + Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, + :class:`Sequence`, :class:`int`. + + Args: + value (torch.Tensor | numpy.ndarray | Sequence | int): Label value. + + Returns: + :obj:`torch.Tensor`: The foramtted label tensor. + """ + + # Handle single number + if isinstance(value, (torch.Tensor, np.ndarray)) and value.ndim == 0: + value = int(value.item()) + + if isinstance(value, np.ndarray): + value = torch.from_numpy(value).to(torch.long) + elif isinstance(value, Sequence) and not is_str(value): + value = torch.tensor(value).to(torch.long) + elif isinstance(value, int): + value = torch.LongTensor([value]) + elif not isinstance(value, torch.Tensor): + raise TypeError(f'Type {type(value)} is not an available label type.') + assert value.ndim == 1, \ + f'The dims of value should be 1, but got {value.ndim}.' + + return value + + +def format_score(value: SCORE_TYPE) -> torch.Tensor: + """Convert various python types to score-format tensor. + + Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, + :class:`Sequence`. + + Args: + value (torch.Tensor | numpy.ndarray | Sequence): Score values. + + Returns: + :obj:`torch.Tensor`: The foramtted score tensor. + """ + + if isinstance(value, np.ndarray): + value = torch.from_numpy(value).float() + elif isinstance(value, Sequence) and not is_str(value): + value = torch.tensor(value).float() + elif not isinstance(value, torch.Tensor): + raise TypeError(f'Type {type(value)} is not an available label type.') + assert value.ndim == 1, \ + f'The dims of value should be 1, but got {value.ndim}.' + + return value + + +def cat_batch_labels(elements: List[torch.Tensor]): + """Concat a batch of label tensor to one tensor. + + Args: + elements (List[tensor]): A batch of labels. + + Returns: + Tuple[torch.Tensor, List[int]]: The first item is the concated label + tensor, and the second item is the split indices of every sample. + """ + labels = [] + splits = [0] + for element in elements: + labels.append(element) + splits.append(splits[-1] + element.size(0)) + batch_label = torch.cat(labels) + return batch_label, splits[1:-1] + + +def batch_label_to_onehot(batch_label, split_indices, num_classes): + """Convert a concated label tensor to onehot format. + + Args: + batch_label (torch.Tensor): A concated label tensor from multiple + samples. + split_indices (List[int]): The split indices of every sample. + num_classes (int): The number of classes. + + Returns: + torch.Tensor: The onehot format label tensor. + + Examples: + >>> import torch + >>> from mmpretrain.structures import batch_label_to_onehot + >>> # Assume a concated label from 3 samples. + >>> # label 1: [0, 1], label 2: [0, 2, 4], label 3: [3, 1] + >>> batch_label = torch.tensor([0, 1, 0, 2, 4, 3, 1]) + >>> split_indices = [2, 5] + >>> batch_label_to_onehot(batch_label, split_indices, num_classes=5) + tensor([[1, 1, 0, 0, 0], + [1, 0, 1, 0, 1], + [0, 1, 0, 1, 0]]) + """ + sparse_onehot_list = F.one_hot(batch_label, num_classes) + onehot_list = [ + sparse_onehot.sum(0) + for sparse_onehot in tensor_split(sparse_onehot_list, split_indices) + ] + return torch.stack(onehot_list) + + +def label_to_onehot(label: LABEL_TYPE, num_classes: int): + """Convert a label to onehot format tensor. + + Args: + label (LABEL_TYPE): Label value. + num_classes (int): The number of classes. + + Returns: + torch.Tensor: The onehot format label tensor. + + Examples: + >>> import torch + >>> from mmpretrain.structures import label_to_onehot + >>> # Single-label + >>> label_to_onehot(1, num_classes=5) + tensor([0, 1, 0, 0, 0]) + >>> # Multi-label + >>> label_to_onehot([0, 2, 3], num_classes=5) + tensor([1, 0, 1, 1, 0]) + """ + label = format_label(label) + sparse_onehot = F.one_hot(label, num_classes) + return sparse_onehot.sum(0) diff --git a/mmpretrain/utils/__init__.py b/mmpretrain/utils/__init__.py new file mode 100644 index 0000000..991e321 --- /dev/null +++ b/mmpretrain/utils/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .analyze import load_json_log +from .collect_env import collect_env +from .dependency import require +from .misc import get_ori_model +from .progress import track, track_on_main_process +from .setup_env import register_all_modules + +__all__ = [ + 'collect_env', 'register_all_modules', 'track_on_main_process', + 'load_json_log', 'get_ori_model', 'track', 'require' +] diff --git a/mmpretrain/utils/__pycache__/__init__.cpython-310.pyc b/mmpretrain/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..541fb6b93c0d3a57dc8511cf28e602780a92db11 GIT binary patch literal 481 zcmYjN!A`^=5QT2LZFjfEgMLI0x<6oyU*N@y;X;~%4LXzx)MWh<&wBDl{0Od|`~?#e z8G6|vymo0B&0vvN?*OUpX| zteQc(wzR7#Z7YNl0VlQv!WtouaN9Pa?8XibuX4G^8nzK~lB1Age@JN|*UI&&HdIk} zuu*vFf}KO{q@O38B*}%6r#kVkPt;xw5;7^VY6PmPD|+K zRIZ3C$M(pJ@DRRo;t@a~nAs$4L{-Y6UfG_R&z||l>#kd2VyG-)N6%g`S{&om5%bm0^9gEOLr=HOx9 z`FLaG$#=fKU~19nIb>4*t8XFEh+6D*;^8+kd+y=8LX@?)fx{}vTDySY_Z&LfYI8xyB_vjX;6WmIsFEG24S$uXq!X)d#*N}IcSIDL%&$~9 zv~}Ih+J%JLbSb}?8C`4hr#;Ej!1TD<<_2WM(FNa*Vma~fSek)OW}Kk=@m4LfL2Ni0 zkc|nV1W4>E9vRus>bz<+n5qTa>P`?>>wH`3BQ_YZSP>=NA}VGD%OVjNiG~S-M{k3E z8q4D}))OwrHm(LH9hmkwSxnLQfr^l~UdAeX-EQ-AXnHC+#&ONw5e3w+trR>~+Wf@{ z2;seItgCI_^j9&ds4okp*#b?bD(r!ZNM>Y;VzW7_pM#&)Ir&*T)sOG{CwOk1u&ls8 z`Nz=&VK~r1IBXb|u0r~2t>T!bhjHpr{JrmX)5imy>}&n^zB^5S?d*MLr#dPUuQ}E$ zE`xO;ZA-OvVy*Gf&`^A2qgJvEFT7%*P>O!Nf^@uS|*N|{E#pK4i9nH2{ninB(L1o7~l^wXhTz}TH> zAb9aeb_Is{>+`fs7wM9-M4g|N-)wH_JDby&0IE3}pRz(OBF+pK0kGw}KxYukd-wuo K42CkDcmDvC*F|*z literal 0 HcmV?d00001 diff --git a/mmpretrain/utils/__pycache__/collect_env.cpython-310.pyc b/mmpretrain/utils/__pycache__/collect_env.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4f988f90a7a18ada231ed2ec56927e978eccf6b GIT binary patch literal 649 zcmY*X&1%~~5Z+nIvZ~l=Z*HImUq!)gq4X3&!H}CnprHp{7FD~pHtdgBtsN5FQ`&dv zu|4)h`VhMIlvfB8I;%9)9WkSs-EU^T*td~|ZFj)f zR=5`XGS)?nZMsbB#t9$tT$nni#q%raYvw~oh8_EcEKu%Jtkioyh#MjJ{?*)B>*xP! z?7JI}4CjwbFm={kyKtjVP}`87&qOMrcI5#Wl@CGqPG%RB2W8 zyBXS1wN(Y$5|ty;s)RaTH9@5slm=_DMXFLSQ;72NLYU9^}NK`+R? zFQm%gScf9h@l}D@hUX9PWZ%Oes9+CC4=nnQKBCVN!Ji`sG^cmikPeA3h4qzjz(hsZ zM`XxQ>jO4qqWU~JFmhwa4yiDAxq1uMOhNw6o+qEP1AbtlouZD`iN$ZtA&31u=3=S* zI`~b_hX(MnhFp+7$v)1_+!|VUDu;B)4;j!V`bTYlLd2PE0^{;F;Uu^BJxcKYRIl76 z?%C#uUKr7#YmC_D#)xffjOgZwx<8`X;6OX_?`3Hi``4`(`+vG8xH_^2z_?|T`7c^CiuvrhIwKbI_`C1gEO6JNB;#m@65oT*brZN^X z?(8?x{i3!WLZCWHly1(WTvTF(3|`6n9HPAMDY@4Vl#FDYH`c?XQM5R)?sEh;R} z>q*bUS?-l{FE8hCGQ)S*RFK<5VXCV+7)Eq`z}7ch)@Ao~o-1YPw{adR^p{&H3^1xNQa>-u)K_^{RMIjj3znqH!6RHJ?(({t3K{Hev*Pu!|MVU#oWM7;t>JLtyA zh1i`u*)qJ`C--TWVU{1+n7M#5!@mliyyhUpQmLGz>r5c#bQ9&sdk{@$Y?Lz-l&n#! zIdEN5z3d%L;TyQaxd2JxzT@27k@z|e`3Z~IkK{!s40ff{ZUgT)12GYxv)w-7f}BA1 z>F=8xR~oNfZCq}*8z&osILrMo)FLlBNs5^F!?E!3N64onb|BFrKNikT0Qux>$*$iI z^X!BXO3V|dnZ%&n-JPJb<3tj4!^M6koz*l*oryHCYPHni85m*8V1Tn5Fy!PIN&`L)j5rm`CIgXpbKQ9--_|j}NgNTFnlyJh?Fc zuztM+Pj(fCV|s@?q{#RYJ7C8wT@b+L8>nFq45&^JZJ&8W8ETAkh8Z~o%whM~t zn7@vOBV-mBpoFdf9tM}j`8h2Zj!){&AI2ruz*HxMvgDQn+RS01XW`)63S730NdKGCj`Z3c!Qm^4U zE!VaD%GlV;@F81*p-wHfOl@nKa-cXJoc|?dv?*ifYsLywsx{hkQPye*w4dG4!T-13rQYSh&E#GcXlA$Yu2Zo~^F; zWGwHcYI6(r=%};5a>AJPwDwe?yIt0rOZPiTC&`l}%myp7m1R;no+*xqR@5}R0K<@M zkz0%ZfFjw|Fw!LwKy?hQUKn4&uTH78%TFT_!-Y&V5*L6#*aN=k1Na{Re6i->pHl#p z^)!Ow6I_ZC;fK>28OZFHz;z0e(;oYo=A+r%T5ao+IlJ=`)UC$pJq>X8MbHu4eF-Mj zfZ;J!Ye?%Bg@M*-h6OCcp2wgQ0k*Cv*@NaQl{XdYX>ji(p^)llAqHZQbzy;y{ZPUm z#Nw1qxnvY3!apnQ@{8Kn;!ADTQ5&6mFHO&(@3hUc;;*B+_%jD>lPy{NKk6Eu)fu*^ zUI4nN>c)oYp&_OVOe#CMMqRNS=n54Ex4j5DzVLHjH~I1}U?GAZkzNqXhW?k)KLaI3 zG5reznow);lj}D>@ILzUjhoj$`qS368>I%*#((19!d@@^v55sHI|li`Jd|M{{Zau>Jk6| literal 0 HcmV?d00001 diff --git a/mmpretrain/utils/__pycache__/misc.cpython-310.pyc b/mmpretrain/utils/__pycache__/misc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99d8387e33ebd24b2e5e42af69fe3e14d64bec86 GIT binary patch literal 591 zcmZ`$u};G<5Veyu4ONQ}Ba=t6q!MF=5ETOx48YW-3bot1ik--IRDrs|NAL^$NI!tl zD-*wfgv2=sQ5H_R%R4Lg?)f<%jRpwf?PtIIzzBW1rW*k?myr4l3PluWNZ=X94ttp= z{Fx8fXLu9Pz}00ydo+9vW<84UQQZH6IdmIid&KozB^R*1 z4Tn9*D@c6|#h@K}!K%MQ8=&1GpvKSpYi3BU_=?LkBNZFrOJdfHa9Nb*??{uhfz9e< zFdz_9wbGLcxvri+l`^@>X_>KjLZ;23NT0|f`-kPOlHVa}87AZbq}04yZq|7#sny{Y z$DVR`wITfJ_G0Bj`;JrypYM3A^4yGtD8MI`a@mEY;h7!_t`|x1WJfDz=HN%|vzykg zL#DRpZ^*&WV<)T-81G`s!Pk{eFkQ`Q!Sp$;2h(Z*=aS z7C2vn=-)vJB4|c(8d8cj%UJG&&cf!o8@i3{WM1A1TVV6hm+np9 zg5R5yIS>1?CHuk`?MoI8#H#4Zq3B5}S00fG7kx3faKlx&ABvR=8jb{eMuO2B=!FE- z`af1CEsHqY0f#?LkLCBLqu)%dpO1yqiAtO2vt4-Y=!5bk(v_6r4NeSKBPAzF>Vv3~ zD$+733t@@v-ifrXDNURu4%S;70Eu9N1VsM>>J>3Wb!ze*K=K1Y0G<&;uh<-H;~esu z)=bb#cIJTR*3Khx#!sJs?bW1q1iNI*$X`j#4_kuFi24%V{n`P)jj@oaBOJjmFk^R{ zyr!?IdJ0tl#HLDro9wl*!quR5=CGclZR`W(UCzD#k@NVyoc_I>#R)kffeUD>HcSo} zwh-(n6$u4@c^DWd8s#CVSEMF;-WjdwA^8OwK4REdKyhvUBrQ^XAVt6|8$Y%jGyX}` z0OJ1rVndas*~s%sNu%PlxNFW#n(2*7l~dTF9#=1Idvhua`Jz%=A7g0_ik+{K({+eV zo{u-PGKn+2HGcP5FtlqT)$x8Nqh+FKKUTIo&C2~ai!$Jpb+w6=vBSmP@80k z?3%GA`X0rR?nv{7;$MM*6u+W#qFRQj!FyD9fX1w*KvQQ)y;{*Y>9FPVS zQyt~7NyIyWy{T1rZF&eC^$FUy1K%Ihe$b#BmBqaTpF@ns7>J9|GIX9 z2i7w$tF)LZq@?ZVxh$q>A;;K_`n0)IX>u^WUF*^c+sy-VKp`egtJeQ%!8qIgCRX2; z*(@*YXcv~cd|2E)*iCc!v&3Pc{LQ6W!#ubv>p*$yPO>sK&G=2*cQ)G$kL$JmW?qU} fCb#fMrauDJqdf*fIpeTn4|eNO{JA=`GKGAP$ literal 0 HcmV?d00001 diff --git a/mmpretrain/utils/__pycache__/setup_env.cpython-310.pyc b/mmpretrain/utils/__pycache__/setup_env.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b94f2473be9961bb6d65b7e89e3436998af722b0 GIT binary patch literal 1572 zcmZuxL2uhO6ecCvQC+8KvJNP)9f+VnYrv5%I|bXYp~HY3)i_B$omEBl{7$4m<206a|XyQIcc1Ed}xMQIFqy?|V;FySqJt82a)0bo!b2WFz&o7I1K#C3FFIpfACtkIfAJ)F zHlSty2bd>CW{*>~f>F6Iq_7iS?@f%`EV~+Pp=Ck%3`C zvOHg5jyl1mkXA9f1g0xnEx<&*9>ejyUYMh5<+P7^VozpDWrHL3%MuVkS0*CK#06A< zEhi1Xw_pyi;yPkK&Dls7FszO&GBryw)|l5O>`Y+rGhhZRvuX=ty74g`vPAMK(M=}J z*+OLeHvPF)`4-3qO+uz>=9NFpv#n=ff?yRR)Q1bk32I z58_=~yD~>{ybyL-%%W7~F*ck$%=0Xn#d!{LAtb~zq#@3+Ix$zNi9yD;uu3DhCT@~D zk8-}{wrOjFE-ON#>Z^7={H*M4a>QStS5ZHaNe&lZj4+Y3&>E!ufcJxoKMt5M$b@AF z8-Rl$o7O>qo?u^1n!cLWWY3jO>-v8h6~n_?w(+VSuuJsRID_9-kXqnF)^O=6KmE7? zzxfdffoXJccN5*=;NJMNjT-U9Vl}K8@3l5-n#Z5Fw@#C+NUTtD%Mx>g%#>|QV|9_@ z{QzeCumx}`OyPRJ!qE8dk@CR;?4)5~BBbk~6w1EaU27VzJ5q*ciIzexOc}UU8Di96 zMGmDe)!87>K3Zva{gk_Fy!xuzlW=wmR#o&*qVJ>OyC~L5S?sCxpWa>;R$wb3FU4vC z@-$kVmk&Nw(-4u&Bb>s1NY_u`1vK=', '==', '>']) + ')' + parts = re.split(pat, dep, maxsplit=1) + parts = [p.strip() for p in parts] + package = parts[0] + if len(parts) > 1: + op, version = parts[1:] + op = { + '>=': '__ge__', + '==': '__eq__', + '>': '__gt__', + '<': '__lt__', + '<=': '__le__' + }[op] + else: + op, version = None, None + + try: + dist = distribution(package) + if op is None or getattr(digit_version(dist.version), op)( + digit_version(version)): + return True + except PackageNotFoundError: + pass + + return False + + +def require(dep, install=None): + """A wrapper of function for extra package requirements. + + Args: + dep (str): The dependency package name, like ``transformers`` + or ``transformers>=4.28.0``. + install (str, optional): The installation command hint. Defaults + to None, which means to use "pip install dep". + """ + + def wrapper(fn): + assert isfunction(fn) + + @wraps(fn) + def ask_install(*args, **kwargs): + name = fn.__qualname__.replace('.__init__', '') + ins = install or f'pip install "{dep}"' + raise ImportError( + f'{name} requires {dep}, please install it by `{ins}`.') + + if satisfy_requirement(dep): + fn._verify_require = getattr(fn, '_verify_require', lambda: None) + return fn + + ask_install._verify_require = ask_install + return ask_install + + return wrapper + + +WITH_MULTIMODAL = all( + satisfy_requirement(item) + for item in ['pycocotools', 'transformers>=4.28.0']) + + +def register_multimodal_placeholder(names, registry): + for name in names: + + def ask_install(*args, **kwargs): + raise ImportError( + f'{name} requires extra multi-modal dependencies, please ' + 'install it by `pip install "mmpretrain[multimodal]"` ' + 'or `pip install -e ".[multimodal]"`.') + + registry.register_module(name=name, module=ask_install) diff --git a/mmpretrain/utils/misc.py b/mmpretrain/utils/misc.py new file mode 100644 index 0000000..cc53267 --- /dev/null +++ b/mmpretrain/utils/misc.py @@ -0,0 +1,18 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +from mmengine.model import is_model_wrapper + + +def get_ori_model(model: nn.Module) -> nn.Module: + """Get original model if the input model is a model wrapper. + + Args: + model (nn.Module): A model may be a model wrapper. + + Returns: + nn.Module: The model without model wrapper. + """ + if is_model_wrapper(model): + return model.module + else: + return model diff --git a/mmpretrain/utils/progress.py b/mmpretrain/utils/progress.py new file mode 100644 index 0000000..b23f976 --- /dev/null +++ b/mmpretrain/utils/progress.py @@ -0,0 +1,40 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +import mmengine.dist as dist +import rich.progress as progress +from rich.live import Live + +disable_progress_bar = False +global_progress = progress.Progress( + '{task.description}', + progress.BarColumn(), + progress.TaskProgressColumn(show_speed=True), + progress.TimeRemainingColumn(), +) +global_live = Live(global_progress, refresh_per_second=10) + + +def track(sequence, description: str = '', total: Optional[float] = None): + if disable_progress_bar: + yield from sequence + else: + global_live.start() + task_id = global_progress.add_task(description, total=total) + task = global_progress._tasks[task_id] + try: + yield from global_progress.track(sequence, task_id=task_id) + finally: + if task.total is None: + global_progress.update(task_id, total=task.completed) + if all(task.finished for task in global_progress.tasks): + global_live.stop() + for task_id in global_progress.task_ids: + global_progress.remove_task(task_id) + + +def track_on_main_process(sequence, description='', total=None): + if not dist.is_main_process() or disable_progress_bar: + yield from sequence + else: + yield from track(sequence, total=total, description=description) diff --git a/mmpretrain/utils/setup_env.py b/mmpretrain/utils/setup_env.py new file mode 100644 index 0000000..1b57b84 --- /dev/null +++ b/mmpretrain/utils/setup_env.py @@ -0,0 +1,41 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import datetime +import warnings + +from mmengine import DefaultScope + + +def register_all_modules(init_default_scope: bool = True) -> None: + """Register all modules in mmpretrain into the registries. + + Args: + init_default_scope (bool): Whether initialize the mmpretrain default + scope. If True, the global default scope will be set to + `mmpretrain`, and all registries will build modules from + mmpretrain's registry node. To understand more about the registry, + please refer to + https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/registry.md + Defaults to True. + """ # noqa: E501 + import mmpretrain.datasets # noqa: F401,F403 + import mmpretrain.engine # noqa: F401,F403 + import mmpretrain.evaluation # noqa: F401,F403 + import mmpretrain.models # noqa: F401,F403 + import mmpretrain.structures # noqa: F401,F403 + import mmpretrain.visualization # noqa: F401,F403 + + if not init_default_scope: + return + + current_scope = DefaultScope.get_current_instance() + if current_scope is None: + DefaultScope.get_instance('mmpretrain', scope_name='mmpretrain') + elif current_scope.scope_name != 'mmpretrain': + warnings.warn( + f'The current default scope "{current_scope.scope_name}" ' + 'is not "mmpretrain", `register_all_modules` will force ' + 'the current default scope to be "mmpretrain". If this is ' + 'not expected, please set `init_default_scope=False`.') + # avoid name conflict + new_instance_name = f'mmpretrain-{datetime.datetime.now()}' + DefaultScope.get_instance(new_instance_name, scope_name='mmpretrain') diff --git a/mmpretrain/version.py b/mmpretrain/version.py new file mode 100644 index 0000000..1822b7f --- /dev/null +++ b/mmpretrain/version.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved + +__version__ = '1.2.0' + + +def parse_version_info(version_str): + """Parse a version string into a tuple. + + Args: + version_str (str): The version string. + Returns: + tuple[int | str]: The version info, e.g., "1.3.0" is parsed into + (1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1'). + """ + version_info = [] + for x in version_str.split('.'): + if x.isdigit(): + version_info.append(int(x)) + elif x.find('rc') != -1: + patch_version = x.split('rc') + version_info.append(int(patch_version[0])) + version_info.append(f'rc{patch_version[1]}') + return tuple(version_info) + + +version_info = parse_version_info(__version__) + +__all__ = ['__version__', 'version_info', 'parse_version_info'] diff --git a/mmpretrain/visualization/__init__.py b/mmpretrain/visualization/__init__.py new file mode 100644 index 0000000..0dbeecf --- /dev/null +++ b/mmpretrain/visualization/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .utils import create_figure, get_adaptive_scale +from .visualizer import UniversalVisualizer + +__all__ = ['UniversalVisualizer', 'get_adaptive_scale', 'create_figure'] diff --git a/mmpretrain/visualization/__pycache__/__init__.cpython-310.pyc b/mmpretrain/visualization/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76fb470ad20f836ca54247d220789a6184b42fb4 GIT binary patch literal 308 zcmYk1%}T^D5XUoVyDrNXL|?*O?8%#mPvAxL(8~}u*%C;aGD#QoNj!V=MSO^O^yDiD zf=NNyf%(t;m=9)r+iqC4m!J3jM~2x;eh*HTZnuEr$lU55z=MQ5>3erBBFiUHmHHnJq zyn?DC<%m16O3fUN4GS)Xv^o3_EbZw8QCf@m#%Nt8i+o>o literal 0 HcmV?d00001 diff --git a/mmpretrain/visualization/__pycache__/utils.cpython-310.pyc b/mmpretrain/visualization/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..724fbc47b50d965640a5704cf37d79f1234b82ba GIT binary patch literal 2196 zcmb7FO>Y}F5M5H*wdD9ic5+F32-`~|!;n+EC!uI4$5w(RxiQEjP z=lx|qs;vnhyU^7SFffmy*h^3kCAf!?$B}Ho8jfp8y;nzGv(~J$HRf~7eeQ?&w#Nb< zum$K19)u0nWQ#BS-U7S9mVQHQovpCd7eTMVTHku@o39~)*T&`Y;j^zF^!Fb<*njf) z>BG`LEb^GYL}hRwqC)alJGTA#TiEjy{Za==;zWfkAsy?*g6IrZt8>^v>(r(`Z_sq6<957Z?y?`UHF4L#_#BG}q#U z6B~)p(I}Hlq!G~>5ebc8!=ULbE)r;mV@@=c5gc^(jORRMiexF#W)Hd=XHpZ+;AqU# zNRLSt+70WjLmt~N>+b)i{HKD(ifnIxwrS5~h0Y62gd(4I?sdps%E)Xf7AdFF!BEy) z1^c^T%&zb(E69kZ1j1xQ<$NT~iyd{V9h9s5_HoJ>qu6~NHA)j%yI*I;_k2H_<@UXeMX zl2gqIg_n1d;i)E}tpc-r-xp!XrCpX;V!781i*(c-PlM`~9 zrJrcao1vj@3i@SaIDx6o<4nh5=rjZj9#f~9ZZwpicf-nr|lJCYGf=h!( za>XT-Km+DI8Z`Q#KT>U+br?wkhS z6qCv;&0Wvwa%4Hi=I}~GZXo`u%fs~pO7p*XZ8z&>J?5bZxT*6Z#<8@re@(wILZDVP4EEiya`3*BSYHv5oKu>b`vq_Mn zC(Zv;esHy0)}XSMei&y|+hSpzIRtNEX?&h!tcdv+CV9$uXaRsF1PD;;5+nRC#`qHB J#mif_{{yC4cSryL literal 0 HcmV?d00001 diff --git a/mmpretrain/visualization/__pycache__/visualizer.cpython-310.pyc b/mmpretrain/visualization/__pycache__/visualizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e392df4d84a28d4e18a6543f050251e1063b981 GIT binary patch literal 26347 zcmeHQU634Cb?)xz`Rn=F+5eR+i^gE2vE{WbjKRX#GT3rC0Ri^fID?&0?{v@XtmjX+ zd$n4nmjJS3nTiugDpZJZvRehIm{d_k9`cf^yre1*dCGIYKvf=6dIe*GjZwaHZck75 z>`1Z<6)@S^nmf1e&$+*S&v(x~=Wf4LG8BCN`t5&je*deA@?Z22|D^G75?}Y9;n<3; zc9gDKQ@dJC>!xa{u3pns3fDU6TAF_|wJiVUYB~PRcMVNZm2RO{>S!v9#!muU3^}#i_Xl&)J3&+v#mX$R7;mpccrQaxz7%~cE4wJg4`3%*$t=HaDw#8 z#-M|rCwquFT~&j^CoR``*z(|d^w_mr&k^;0ud{_mHSY4EHS*2QNL8&1e%X8|@c56d8!A#5X>XvPx zYUiA~+ps!LgX$aqn0Ppeulr9aMRAmxYAZF(R%vr1C*x8p;c8+@H+ZYQc zLw&)21-szr7Ztl?mk~B)mzzbq;$+uz_Oz3?XB@*RIK>0X0mYtmO7_(5mNz6oK-VIOje_RUV&K5Q47hwb;-=JRRvdIe>_A0>~Mnva!wi!*J1 z06F6&O)f!}G%V$y{XzRyE@dYFp}d|yVm}b0y5P(@WoM4+-PBOOqj)aHo~s-`#c_wJ z2hbC@*|$HRuFX3cNiSc@m-B`ERK9FK_%115tCtq+WA+_f{{JN|gQoPI_g| zUbgRKs*Ar?s(0CUGu5R%RFB&q#ypCT=y1MVtG@Td`u`(5HV1ethNHV=-)n!AOFSs& z5cSVkALZ?jVb;;ijn5$}^}dNSbbS7tus_Zv+$2lbGY0WC-6G$NcQ{i=@4`X*e)|(# z$|1Xe73gFzi}mH4BV4O9T0pFS(G(>LH%Ad>voFlN`9s|6yx z!E(>Ggs`@bnf-8;1XZHe7wuNNXUc@2V+(6@*<3*ks@b)?h!j=HUF&ajY;)ByAAj_d z$?{M4d(Nm_RHnXaHJ)>Nwrd{kv|TSzuT9dhFW85#px(zdKwn3%W_e?6; zw0c{njkSM71`Vilr5eqadDLzU;x2rTpC2l8n|l6(eAZIRK^?a zbO#ZE@`d5S--K8kmH7ms^z==Uf&&9#6~yQ+8%yZI~dazSQS1$dN4>o-rwli z)l`sgB2}Z`>5Cw3bp~rzEw|b~?|NI{y(zCh2=tEA^s4C~)3mzn&Q_3?iS<<^3#Rev zcx|voaNbt0>-4-}b`A6LLciz50#C1;62xQ!B}nsh4GJ^_=yl@1LAKGcT-R}fEKT8h z%M0@C83@M?4EDN>K3H-_I4(GRP{uFu<2ovj*%9PC=e$Rq8t9nkn?ZilYI}9B-F1Ro zf5QVuM=3hJl}O+@10v*Yh+bog`$IYl_{ri6POiA?SOI)x9k^;k73E9nphQnQD)_p( ztG%T9S&q5SSJx3@r(OV8dq&%y+OGH&f9g3!9P?5;x{q}Do0?Pn%0Y0xU2R+KEk-dF zgijq*{A?Iw_&KKfDWpgvZ;sNpl*~$3ouBs}&E?% zx}|#gOF&k0%1g>6m8rB}RGTWHkLPojQV%N^)t8hPl^0V@4Oj$Wo*{EwR4HzI#?Eig z`qMiFf7Ukaf|vJaiQ1yFQ}kzc)kTF%Pm3SgMK1k^D7`eHbReF1UHpvrfnDMpKX_3? zeaf-==wVI&m1{Z`i{YBSs5et~Wnx{YuU}WM93LCBaL%6bru;d3b`I@rs~FG9cU3?C zU3EJT|MdC{;^r_S?Aiv}J-w?oQ*Fi1e@Fc@>U(8pc30V+_l>WsVi9@f{CU6R7wq|$ zQ;026uOsGmjNZIIZ!ch8sP^Iu== zI=>d}Cf+YX$0jB{SVqWjXt`Ng!@7sXX{~RYyvWh3f(6c`*TKa5X(E^8JA(}@lFU)S zC?bACkr9_@_axh(qO4WCHWO=c3~T*!;CU2=H9nqt=7potU z-rq?uy`r`l@Bnw276&iQPS&h4F|b z8g*_Gw(M=Aa~AADc&1|mPsB%^dKz`xQxkXKH1GnV*E&wGg=xO( z;3aXQH;Cs?vRX;7L!+ByR_4)Wr*G}4Ah9#9p`7unTl^4a-BT~g_uf@)hO9C|I**+9 zge8{@?5sT?jbkQ2Lu1Xf8VzUQ?HwOWv^I#>GbB1e3($!(T?7D)I?K@_CYE{5QF-o} zYrhd#vL?YL>OA7-6Q@(#9ds<&OH|Gf2LljWSPXl1fAg3LZ~Oe&)_{hgn37-#ss>KUI7WN zhxxENG0P$G%ZRGXw})J`ic`r^RlzJwphkM1+L%j2xIP)@(_FarA{yUxt)LuPXxqj_ zrB_&>HQo`|+;E-dMh7$8>A{cLKKaET+jI<9i&*oAy@_o~G@=uh5UxgYh@oSs~d+QTyUEcBar~%sc3t@9ObMxI_~oQojw*A_W^i@6t3Dp*92K^U~yaZfbpq;7ggENMMXA} zTk@J3q%g6>ab$p0v&a}m_Msc3+V*))CGJJE_$ZtU__|bSQ@K!nlrf#jnC^rbsK%SB z85qQkC56z>jQi?!y#>^`t|d7b>pUDg>MjQ475!CB*0zcvIcC0@T0Y*aYT_jF1UmMr zU0HKg97f1kT`wZY&A`bh3u@-dRcj!YDes*T0qP6!k?1jM`y~qc7@Vt8a*raG+fqsN zyGH%L7*4cA|BL&GDEEF!e4PCE!wHJqXW=-EHS23g;O5|D6iqvLC3EFM@pBP+#xUr# z>%B~y)5|pBF@bYnadlep#1RB6U%rA5rt@uhs4fOYLcn0=b?{~@NI{Stq}?@ZfbDO$ z+c+2G+Fh*Fw3E)WkHANerwsfiiaS6i$asC4mO%>8CrAsc*TM$e8Vn#93iS1UyC)t* zGEt_qDKN>P^6(?49(wZAEA^E}o?fYc>eQn_);iCUl|UyH5adasE=%|j<;u3Zn}Vbh zdCRtCM0Ej;bvjLPGtuT5Kx=_8$$39HG`GYD$oU{Sw~|BDxFZCOmZ*|LT@k2jfw~!_ z8G{5FUPs&@gX!FCpQpA%@FUyK5CTZ%z&ml12I8fI}$pHa)2p=Exl>!v!R6*cXO zu4%v2Qu^DObVkh}ul}~CXMT~(WVDKEXa_Z2ul!QiF1-J`i;O5zeWwSa@AbW{0VHp7 zYG6qEgXbD0#120P;GsE2&2*bsX|eT%&;y&~EH=qmKf_{$Sx?=;uF!NwU;#rK}y?YFJq@{{bhA~%AeZF+K7WF1A>vAoL_-R0vqEOG};#Lst|5W@8s<) zix;MM3@PUD3SJSL~B4jGX?L?dv)uH-IMy<3KT>#U92hqxyOKp`0(l!!DhLHFN?=0l%SP3Mb1OyJW9^* z!Kp5ar^$bsoX^1l0_}^&npmTNljIQY3W@}KTcT}~j3p@0le@7x==8lnx6ZeLRNGxz zgo3=&gTPHd${eIFvc!HvWxi5SvCeZ>|vB9Fragh<^N@p%eii2Mxso`q9AApVg2 zBb42uN3P!pU|$X@e1ojpmTM-WJCRn4kj6kV>PpWT_- z#ZjUUhU#-`LX zy?z|+_2n!~p{XvN3L80Wd4^mYMfrgI2aX>);dW{rm^XQyxyeS{}R0eWRA{H4RQMVu4kY!Uh_$fYAs z!8gKzB^{Tnbrdsjw7sAGba z@Qavb&qC7r_~?izzK4Qzl?9~3W1v0YlBFt1M0x!9JxSR-za%V}A1`$UTQ(UR9!4cP zc$}xk#oxEEKF8Q00KbL9=JAUZ%8((?>_HtwK(TUbD9PqQ924SxELrh=0Gu3xeE_@< zfa7OKzl&!t5Pgn7;TQ`gfD2^6*q3o`TKolqB(`KmKLVJ=>#$?Sqem!rQe-=kH!qCf zp5$JVK|{Y>aSwrpemh|~`ImW1&n8gZZIjsaWP%L?#DW_6$%is<{;M1 zGs2J>ZC^#*AbN!k7`3VpB8)5}j1nUZ%2R^av&gv+RxMe_5(q;gh6ZWQzjL*v>xeHy zYVUE_kSF-KX-)<_~SCS2g7Xeo901Bzuy*r&-y0)lY~Sa~8qxiv~T z37ga@_6#{TIg`W)iz8$qH;s?wt^$Fsqx6RIE~ijdkZuVF zs*6SkW(oDpw(YG223alES>)a*BF*qar#uljI;!oF^=PpC!!g`;5pV>u9{5U-|Imrd z3v6WuWw+1*uuLj%ei`zgdss0J2YL7_1E|2#54LxFSh2dT<-ZN}rNLGb2c5#9PYuF0zN4A&CFDl!!p5d2v z3U+B%Wi`N?q5h|=7uQSvEUffO!*u)v8%p#Fa`6b2L(9ET?sfJ*aFAMtp<>a5Agw{=A1ZY5jlyfDbA$1tohl!C?4X`T| zgFOmgT#7a$rD%*2Cy3I5nYt}+D5*Dbl|!9vMG8{?hASSEwXIMgT;{dm=E9A^&2JOg zSIN1JoWCXK@5qUSa6xu6G~dXsg4Q(}aTs;MN2?<)3jtSXjBZPdgp~%>}wBGQtfOJWljA^pVz*P(>Xrs-N1{#3z zoL9gB;v8w4!6pOOPtXyhgi?TCQ?TBs*m-|Cw8>cT7k0|F!B!OuWXr+6v}pSau*tv$ zSp*nEn~Y3olW{Dv$-pHHu*q2DJUQ_t0J}MVk!>HoS3JAI;4O*0Hp9-!y|v)qWdI zGsr?JGQ}OcHUq}gh-4L5LiuF3IA(8gcT$<^H)!YaTO<1nSFEwC2c*Iytyq!s0ABk-3IAM~|c=KYmiZ zB!M%hWMgF748&|T_{{pXDB}_GjVaY_QrLNNw#d0a&N?}NM2=6+HaTA+=a0#mgqLGy z+urf^$mZf{2u$4*$W{LE98R=h@qFX97S}n~PL!8Gyo)F|LcGsl5JNlV+aP{~MMR{S z2diYi?3W;zJv_2t_DfRiD%J0L#VCdZwk3#TLmOw{OxR=)&h&IxI&140Am|h$Xk7V5 zSG0M!=FP8M)L^ZAV2Fx=iE%v}tS;stc3c6H)nCE&Xs>Gi9E6*5WcTdP5zVtFA?S?N z7Q)hb8+IHJMP?&mX)&_HR@O_f;egdM>d3C02XY=>R>l|}<^a;BW5LgfZvYoh`wNVV zjSv^pwRYb@YiIlgpllU?#ZcTSZIu@o7tbQk0&uZJ(+I1Ut$$14V#dP+j0qqE4gZ>K zmI<-^zr@9fd&WZT{Ts4Y#s!}iU&qOM{?v_0F7TVcv~;c&-UKJD#Ue5GQ1pev$XKHH z{Ec5X4!HO9NZh2^+`nT>8XoN5u?1aNsa~{a!munMyvJ^&EV_Tk*8Uw^^3pG!A0u8M zh0PKWKSY3!bQ5EuV^YWzpCIQ^IC0VO99v_K>t2rGVurtre4iv5WJ@76GubeDBq)B0 zA|@#(ERIWyTa@xf43tNLVljfh7Eu|2UdGfU=Di-?Z6EhWc=!8+cd3JNAC`ChkS6CG zguf6BLr7eKdv(1|y2a-Pbvol^S5x9ERNrZGz6z(B8w-$EEuZH5(%4$JdUKGg^JP)> zdSKM+Fy!69-O2T!Sg)VO|4g9U3-FY!*X@1-9-WxAJRcxnEnXt$YvhnAt3cxwK?)Xv zAip3ZW;wKfO1>sJI1yLGm&r+!qm#pyLVroVZ<6yZI53N+|M4LHfdalq&OefKchb3+ z9!Yz324D9Kiq9#y9;u?Co5|!S^A?%o4$Xg{Il`gxS^qJfu4uSS$w+CqLWwj=w6`Fz ze>0QSexAu`|D)x#pJDXg&wyPUR$OZWQ0p?#GQ#@ml_Dm^%}z<(_uui$Bvo^R9D#Hkp%a{nP7}YoNc4u)noJ z@u!+aeLOVBCmMKq9WIYrypO5&*X-jbuQginewZ{`H2!k5DzEcws~(0?`qltql-P{t znap#V%-w=Cu4Nc($rpqcz}hU|^RV3OEz_klbo_+>X`=O@gfw~yVnH0f5Ac&HaxH56g^3deSBl K6*ZGuQ2!T=sxwLe literal 0 HcmV?d00001 diff --git a/mmpretrain/visualization/utils.py b/mmpretrain/visualization/utils.py new file mode 100644 index 0000000..91a1d81 --- /dev/null +++ b/mmpretrain/visualization/utils.py @@ -0,0 +1,60 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import TYPE_CHECKING, Tuple + +if TYPE_CHECKING: + from matplotlib.figure import Figure + + +def get_adaptive_scale(img_shape: Tuple[int, int], + min_scale: float = 0.3, + max_scale: float = 3.0) -> float: + """Get adaptive scale according to image shape. + + The target scale depends on the the short edge length of the image. If the + short edge length equals 224, the output is 1.0. And output linear scales + according the short edge length. + + You can also specify the minimum scale and the maximum scale to limit the + linear scale. + + Args: + img_shape (Tuple[int, int]): The shape of the canvas image. + min_size (int): The minimum scale. Defaults to 0.3. + max_size (int): The maximum scale. Defaults to 3.0. + + Returns: + int: The adaptive scale. + """ + short_edge_length = min(img_shape) + scale = short_edge_length / 224. + return min(max(scale, min_scale), max_scale) + + +def create_figure(*args, margin=False, **kwargs) -> 'Figure': + """Create a independent figure. + + Different from the :func:`plt.figure`, the figure from this function won't + be managed by matplotlib. And it has + :obj:`matplotlib.backends.backend_agg.FigureCanvasAgg`, and therefore, you + can use the ``canvas`` attribute to get access the drawn image. + + Args: + *args: All positional arguments of :class:`matplotlib.figure.Figure`. + margin: Whether to reserve the white edges of the figure. + Defaults to False. + **kwargs: All keyword arguments of :class:`matplotlib.figure.Figure`. + + Return: + matplotlib.figure.Figure: The created figure. + """ + from matplotlib.backends.backend_agg import FigureCanvasAgg + from matplotlib.figure import Figure + + figure = Figure(*args, **kwargs) + FigureCanvasAgg(figure) + + if not margin: + # remove white edges by set subplot margin + figure.subplots_adjust(left=0, right=1, bottom=0, top=1) + + return figure diff --git a/mmpretrain/visualization/visualizer.py b/mmpretrain/visualization/visualizer.py new file mode 100644 index 0000000..5d18ca8 --- /dev/null +++ b/mmpretrain/visualization/visualizer.py @@ -0,0 +1,777 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Sequence, Tuple, Union + +import mmcv +import numpy as np +import torch +import torch.nn.functional as F +from mmengine.dataset import BaseDataset +from mmengine.dist import master_only +from mmengine.visualization import Visualizer +from mmengine.visualization.utils import img_from_canvas + +from mmpretrain.registry import VISUALIZERS +from mmpretrain.structures import DataSample +from .utils import create_figure, get_adaptive_scale + + +@VISUALIZERS.register_module() +class UniversalVisualizer(Visualizer): + """Universal Visualizer for multiple tasks. + + Args: + name (str): Name of the instance. Defaults to 'visualizer'. + image (np.ndarray, optional): the origin image to draw. The format + should be RGB. Defaults to None. + vis_backends (list, optional): Visual backend config list. + Defaults to None. + save_dir (str, optional): Save file dir for all storage backends. + If it is None, the backend storage will not save any data. + fig_save_cfg (dict): Keyword parameters of figure for saving. + Defaults to empty dict. + fig_show_cfg (dict): Keyword parameters of figure for showing. + Defaults to empty dict. + """ + DEFAULT_TEXT_CFG = { + 'family': 'monospace', + 'color': 'white', + 'bbox': dict(facecolor='black', alpha=0.5, boxstyle='Round'), + 'verticalalignment': 'top', + 'horizontalalignment': 'left', + } + + @master_only + def visualize_cls(self, + image: np.ndarray, + data_sample: DataSample, + classes: Optional[Sequence[str]] = None, + draw_gt: bool = True, + draw_pred: bool = True, + draw_score: bool = True, + resize: Optional[int] = None, + rescale_factor: Optional[float] = None, + text_cfg: dict = dict(), + show: bool = False, + wait_time: float = 0, + out_file: Optional[str] = None, + name: str = '', + step: int = 0) -> None: + """Visualize image classification result. + + This method will draw an text box on the input image to visualize the + information about image classification, like the ground-truth label and + prediction label. + + Args: + image (np.ndarray): The image to draw. The format should be RGB. + data_sample (:obj:`DataSample`): The annotation of the image. + classes (Sequence[str], optional): The categories names. + Defaults to None. + draw_gt (bool): Whether to draw ground-truth labels. + Defaults to True. + draw_pred (bool): Whether to draw prediction labels. + Defaults to True. + draw_score (bool): Whether to draw the prediction scores + of prediction categories. Defaults to True. + resize (int, optional): Resize the short edge of the image to the + specified length before visualization. Defaults to None. + rescale_factor (float, optional): Rescale the image by the rescale + factor before visualization. Defaults to None. + text_cfg (dict): Extra text setting, which accepts + arguments of :meth:`mmengine.Visualizer.draw_texts`. + Defaults to an empty dict. + show (bool): Whether to display the drawn image in a window, please + confirm your are able to access the graphical interface. + Defaults to False. + wait_time (float): The display time (s). Defaults to 0, which means + "forever". + out_file (str, optional): Extra path to save the visualization + result. If specified, the visualizer will only save the result + image to the out_file and ignore its storage backends. + Defaults to None. + name (str): The image identifier. It's useful when using the + storage backends of the visualizer to save or display the + image. Defaults to an empty string. + step (int): The global step value. It's useful to record a + series of visualization results for the same image with the + storage backends. Defaults to 0. + + Returns: + np.ndarray: The visualization image. + """ + if self.dataset_meta is not None: + classes = classes or self.dataset_meta.get('classes', None) + + if resize is not None: + h, w = image.shape[:2] + if w < h: + image = mmcv.imresize(image, (resize, resize * h // w)) + else: + image = mmcv.imresize(image, (resize * w // h, resize)) + elif rescale_factor is not None: + image = mmcv.imrescale(image, rescale_factor) + + texts = [] + self.set_image(image) + + if draw_gt and 'gt_label' in data_sample: + idx = data_sample.gt_label.tolist() + class_labels = [''] * len(idx) + if classes is not None: + class_labels = [f' ({classes[i]})' for i in idx] + labels = [str(idx[i]) + class_labels[i] for i in range(len(idx))] + prefix = 'Ground truth: ' + texts.append(prefix + ('\n' + ' ' * len(prefix)).join(labels)) + + if draw_pred and 'pred_label' in data_sample: + idx = data_sample.pred_label.tolist() + score_labels = [''] * len(idx) + class_labels = [''] * len(idx) + if draw_score and 'pred_score' in data_sample: + score_labels = [ + f', {data_sample.pred_score[i].item():.2f}' for i in idx + ] + + if classes is not None: + class_labels = [f' ({classes[i]})' for i in idx] + + labels = [ + str(idx[i]) + score_labels[i] + class_labels[i] + for i in range(len(idx)) + ] + prefix = 'Prediction: ' + texts.append(prefix + ('\n' + ' ' * len(prefix)).join(labels)) + + img_scale = get_adaptive_scale(image.shape[:2]) + text_cfg = { + 'size': int(img_scale * 7), + **self.DEFAULT_TEXT_CFG, + **text_cfg, + } + self.ax_save.text( + img_scale * 5, + img_scale * 5, + '\n'.join(texts), + **text_cfg, + ) + drawn_img = self.get_image() + + if show: + self.show(drawn_img, win_name=name, wait_time=wait_time) + + if out_file is not None: + # save the image to the target file instead of vis_backends + mmcv.imwrite(drawn_img[..., ::-1], out_file) + else: + self.add_image(name, drawn_img, step=step) + + return drawn_img + + @master_only + def visualize_image_retrieval(self, + image: np.ndarray, + data_sample: DataSample, + prototype_dataset: BaseDataset, + topk: int = 1, + draw_score: bool = True, + resize: Optional[int] = None, + text_cfg: dict = dict(), + show: bool = False, + wait_time: float = 0, + out_file: Optional[str] = None, + name: Optional[str] = '', + step: int = 0) -> None: + """Visualize image retrieval result. + + This method will draw the input image and the images retrieved from the + prototype dataset. + + Args: + image (np.ndarray): The image to draw. The format should be RGB. + data_sample (:obj:`DataSample`): The annotation of the image. + prototype_dataset (:obj:`BaseDataset`): The prototype dataset. + It should have `get_data_info` method and return a dict + includes `img_path`. + draw_score (bool): Whether to draw the match scores of the + retrieved images. Defaults to True. + resize (int, optional): Resize the long edge of the image to the + specified length before visualization. Defaults to None. + text_cfg (dict): Extra text setting, which accepts arguments of + :func:`plt.text`. Defaults to an empty dict. + show (bool): Whether to display the drawn image in a window, please + confirm your are able to access the graphical interface. + Defaults to False. + wait_time (float): The display time (s). Defaults to 0, which means + "forever". + out_file (str, optional): Extra path to save the visualization + result. If specified, the visualizer will only save the result + image to the out_file and ignore its storage backends. + Defaults to None. + name (str): The image identifier. It's useful when using the + storage backends of the visualizer to save or display the + image. Defaults to an empty string. + step (int): The global step value. It's useful to record a + series of visualization results for the same image with the + storage backends. Defaults to 0. + + Returns: + np.ndarray: The visualization image. + """ + text_cfg = {**self.DEFAULT_TEXT_CFG, **text_cfg} + if resize is not None: + image = mmcv.imrescale(image, (resize, resize)) + + match_scores, indices = torch.topk(data_sample.pred_score, k=topk) + + figure = create_figure(margin=True) + gs = figure.add_gridspec(2, topk) + query_plot = figure.add_subplot(gs[0, :]) + query_plot.axis(False) + query_plot.imshow(image) + + for k, (score, sample_idx) in enumerate(zip(match_scores, indices)): + sample = prototype_dataset.get_data_info(sample_idx.item()) + value_image = mmcv.imread(sample['img_path'])[..., ::-1] + value_plot = figure.add_subplot(gs[1, k]) + value_plot.axis(False) + value_plot.imshow(value_image) + if draw_score: + value_plot.text( + 5, + 5, + f'{score:.2f}', + **text_cfg, + ) + drawn_img = img_from_canvas(figure.canvas) + self.set_image(drawn_img) + + if show: + self.show(drawn_img, win_name=name, wait_time=wait_time) + + if out_file is not None: + # save the image to the target file instead of vis_backends + mmcv.imwrite(drawn_img[..., ::-1], out_file) + else: + self.add_image(name, drawn_img, step=step) + + return drawn_img + + def add_mask_to_image( + self, + image: np.ndarray, + data_sample: DataSample, + resize: Union[int, Tuple[int]] = 224, + color: Union[str, Tuple[int]] = 'black', + alpha: Union[int, float] = 0.8, + ) -> np.ndarray: + if isinstance(resize, int): + resize = (resize, resize) + + image = mmcv.imresize(image, resize) + self.set_image(image) + + if isinstance(data_sample.mask, np.ndarray): + data_sample.mask = torch.tensor(data_sample.mask) + mask = data_sample.mask.float()[None, None, ...] + mask_ = F.interpolate(mask, image.shape[:2], mode='nearest')[0, 0] + + self.draw_binary_masks(mask_.bool(), colors=color, alphas=alpha) + + drawn_img = self.get_image() + return drawn_img + + @master_only + def visualize_masked_image(self, + image: np.ndarray, + data_sample: DataSample, + resize: Union[int, Tuple[int]] = 224, + color: Union[str, Tuple[int]] = 'black', + alpha: Union[int, float] = 0.8, + show: bool = False, + wait_time: float = 0, + out_file: Optional[str] = None, + name: str = '', + step: int = 0) -> None: + """Visualize masked image. + + This method will draw an image with binary mask. + + Args: + image (np.ndarray): The image to draw. The format should be RGB. + data_sample (:obj:`DataSample`): The annotation of the image. + resize (int | Tuple[int]): Resize the input image to the specified + shape. Defaults to 224. + color (str | Tuple[int]): The color of the binary mask. + Defaults to "black". + alpha (int | float): The transparency of the mask. Defaults to 0.8. + show (bool): Whether to display the drawn image in a window, please + confirm your are able to access the graphical interface. + Defaults to False. + wait_time (float): The display time (s). Defaults to 0, which means + "forever". + out_file (str, optional): Extra path to save the visualization + result. If specified, the visualizer will only save the result + image to the out_file and ignore its storage backends. + Defaults to None. + name (str): The image identifier. It's useful when using the + storage backends of the visualizer to save or display the + image. Defaults to an empty string. + step (int): The global step value. It's useful to record a + series of visualization results for the same image with the + storage backends. Defaults to 0. + + Returns: + np.ndarray: The visualization image. + """ + drawn_img = self.add_mask_to_image( + image=image, + data_sample=data_sample, + resize=resize, + color=color, + alpha=alpha) + + if show: + self.show(drawn_img, win_name=name, wait_time=wait_time) + + if out_file is not None: + # save the image to the target file instead of vis_backends + mmcv.imwrite(drawn_img[..., ::-1], out_file) + else: + self.add_image(name, drawn_img, step=step) + + return drawn_img + + @master_only + def visualize_image_caption(self, + image: np.ndarray, + data_sample: DataSample, + resize: Optional[int] = None, + text_cfg: dict = dict(), + show: bool = False, + wait_time: float = 0, + out_file: Optional[str] = None, + name: Optional[str] = '', + step: int = 0) -> None: + """Visualize image caption result. + + This method will draw the input image and the images caption. + + Args: + image (np.ndarray): The image to draw. The format should be RGB. + data_sample (:obj:`DataSample`): The annotation of the image. + resize (int, optional): Resize the long edge of the image to the + specified length before visualization. Defaults to None. + text_cfg (dict): Extra text setting, which accepts arguments of + :func:`plt.text`. Defaults to an empty dict. + show (bool): Whether to display the drawn image in a window, please + confirm your are able to access the graphical interface. + Defaults to False. + wait_time (float): The display time (s). Defaults to 0, which means + "forever". + out_file (str, optional): Extra path to save the visualization + result. If specified, the visualizer will only save the result + image to the out_file and ignore its storage backends. + Defaults to None. + name (str): The image identifier. It's useful when using the + storage backends of the visualizer to save or display the + image. Defaults to an empty string. + step (int): The global step value. It's useful to record a + series of visualization results for the same image with the + storage backends. Defaults to 0. + + Returns: + np.ndarray: The visualization image. + """ + text_cfg = {**self.DEFAULT_TEXT_CFG, **text_cfg} + + if resize is not None: + h, w = image.shape[:2] + if w < h: + image = mmcv.imresize(image, (resize, resize * h // w)) + else: + image = mmcv.imresize(image, (resize * w // h, resize)) + + self.set_image(image) + + img_scale = get_adaptive_scale(image.shape[:2]) + text_cfg = { + 'size': int(img_scale * 7), + **self.DEFAULT_TEXT_CFG, + **text_cfg, + } + self.ax_save.text( + img_scale * 5, + img_scale * 5, + data_sample.get('pred_caption'), + wrap=True, + **text_cfg, + ) + drawn_img = self.get_image() + + if show: + self.show(drawn_img, win_name=name, wait_time=wait_time) + + if out_file is not None: + # save the image to the target file instead of vis_backends + mmcv.imwrite(drawn_img[..., ::-1], out_file) + else: + self.add_image(name, drawn_img, step=step) + + return drawn_img + + @master_only + def visualize_vqa(self, + image: np.ndarray, + data_sample: DataSample, + resize: Optional[int] = None, + text_cfg: dict = dict(), + show: bool = False, + wait_time: float = 0, + out_file: Optional[str] = None, + name: Optional[str] = '', + step: int = 0) -> None: + """Visualize visual question answering result. + + This method will draw the input image, question and answer. + + Args: + image (np.ndarray): The image to draw. The format should be RGB. + data_sample (:obj:`DataSample`): The annotation of the image. + resize (int, optional): Resize the long edge of the image to the + specified length before visualization. Defaults to None. + text_cfg (dict): Extra text setting, which accepts arguments of + :func:`plt.text`. Defaults to an empty dict. + show (bool): Whether to display the drawn image in a window, please + confirm your are able to access the graphical interface. + Defaults to False. + wait_time (float): The display time (s). Defaults to 0, which means + "forever". + out_file (str, optional): Extra path to save the visualization + result. If specified, the visualizer will only save the result + image to the out_file and ignore its storage backends. + Defaults to None. + name (str): The image identifier. It's useful when using the + storage backends of the visualizer to save or display the + image. Defaults to an empty string. + step (int): The global step value. It's useful to record a + series of visualization results for the same image with the + storage backends. Defaults to 0. + + Returns: + np.ndarray: The visualization image. + """ + text_cfg = {**self.DEFAULT_TEXT_CFG, **text_cfg} + + if resize is not None: + h, w = image.shape[:2] + if w < h: + image = mmcv.imresize(image, (resize, resize * h // w)) + else: + image = mmcv.imresize(image, (resize * w // h, resize)) + + self.set_image(image) + + img_scale = get_adaptive_scale(image.shape[:2]) + text_cfg = { + 'size': int(img_scale * 7), + **self.DEFAULT_TEXT_CFG, + **text_cfg, + } + text = (f'Q: {data_sample.get("question")}\n' + f'A: {data_sample.get("pred_answer")}') + self.ax_save.text( + img_scale * 5, + img_scale * 5, + text, + wrap=True, + **text_cfg, + ) + drawn_img = self.get_image() + + if show: + self.show(drawn_img, win_name=name, wait_time=wait_time) + + if out_file is not None: + # save the image to the target file instead of vis_backends + mmcv.imwrite(drawn_img[..., ::-1], out_file) + else: + self.add_image(name, drawn_img, step=step) + + return drawn_img + + @master_only + def visualize_visual_grounding(self, + image: np.ndarray, + data_sample: DataSample, + resize: Optional[int] = None, + text_cfg: dict = dict(), + show: bool = False, + wait_time: float = 0, + out_file: Optional[str] = None, + name: Optional[str] = '', + line_width: Union[int, float] = 3, + bbox_color: Union[str, tuple] = 'green', + step: int = 0) -> None: + """Visualize visual grounding result. + + This method will draw the input image, bbox and the object. + + Args: + image (np.ndarray): The image to draw. The format should be RGB. + data_sample (:obj:`DataSample`): The annotation of the image. + resize (int, optional): Resize the long edge of the image to the + specified length before visualization. Defaults to None. + text_cfg (dict): Extra text setting, which accepts arguments of + :func:`plt.text`. Defaults to an empty dict. + show (bool): Whether to display the drawn image in a window, please + confirm your are able to access the graphical interface. + Defaults to False. + wait_time (float): The display time (s). Defaults to 0, which means + "forever". + out_file (str, optional): Extra path to save the visualization + result. If specified, the visualizer will only save the result + image to the out_file and ignore its storage backends. + Defaults to None. + name (str): The image identifier. It's useful when using the + storage backends of the visualizer to save or display the + image. Defaults to an empty string. + step (int): The global step value. It's useful to record a + series of visualization results for the same image with the + storage backends. Defaults to 0. + + Returns: + np.ndarray: The visualization image. + """ + text_cfg = {**self.DEFAULT_TEXT_CFG, **text_cfg} + + gt_bboxes = data_sample.get('gt_bboxes') + pred_bboxes = data_sample.get('pred_bboxes') + if resize is not None: + h, w = image.shape[:2] + if w < h: + image, w_scale, h_scale = mmcv.imresize( + image, (resize, resize * h // w), return_scale=True) + else: + image, w_scale, h_scale = mmcv.imresize( + image, (resize * w // h, resize), return_scale=True) + pred_bboxes[:, ::2] *= w_scale + pred_bboxes[:, 1::2] *= h_scale + if gt_bboxes is not None: + gt_bboxes[:, ::2] *= w_scale + gt_bboxes[:, 1::2] *= h_scale + + self.set_image(image) + # Avoid the line-width limit in the base classes. + self._default_font_size = 1e3 + self.draw_bboxes( + pred_bboxes, line_widths=line_width, edge_colors=bbox_color) + if gt_bboxes is not None: + self.draw_bboxes( + gt_bboxes, line_widths=line_width, edge_colors='blue') + + img_scale = get_adaptive_scale(image.shape[:2]) + text_cfg = { + 'size': int(img_scale * 7), + **self.DEFAULT_TEXT_CFG, + **text_cfg, + } + + text_positions = pred_bboxes[:, :2] + line_width + for i in range(pred_bboxes.size(0)): + self.ax_save.text( + text_positions[i, 0] + line_width, + text_positions[i, 1] + line_width, + data_sample.get('text'), + **text_cfg, + ) + drawn_img = self.get_image() + + if show: + self.show(drawn_img, win_name=name, wait_time=wait_time) + + if out_file is not None: + # save the image to the target file instead of vis_backends + mmcv.imwrite(drawn_img[..., ::-1], out_file) + else: + self.add_image(name, drawn_img, step=step) + + return drawn_img + + @master_only + def visualize_t2i_retrieval(self, + text: str, + data_sample: DataSample, + prototype_dataset: BaseDataset, + topk: int = 1, + draw_score: bool = True, + text_cfg: dict = dict(), + fig_cfg: dict = dict(), + show: bool = False, + wait_time: float = 0, + out_file: Optional[str] = None, + name: Optional[str] = '', + step: int = 0) -> None: + """Visualize Text-To-Image retrieval result. + + This method will draw the input text and the images retrieved from the + prototype dataset. + + Args: + image (np.ndarray): The image to draw. The format should be RGB. + data_sample (:obj:`DataSample`): The annotation of the image. + prototype_dataset (:obj:`BaseDataset`): The prototype dataset. + It should have `get_data_info` method and return a dict + includes `img_path`. + topk (int): To visualize the topk matching items. Defaults to 1. + draw_score (bool): Whether to draw the match scores of the + retrieved images. Defaults to True. + text_cfg (dict): Extra text setting, which accepts arguments of + :func:`plt.text`. Defaults to an empty dict. + fig_cfg (dict): Extra figure setting, which accepts arguments of + :func:`plt.Figure`. Defaults to an empty dict. + show (bool): Whether to display the drawn image in a window, please + confirm your are able to access the graphical interface. + Defaults to False. + wait_time (float): The display time (s). Defaults to 0, which means + "forever". + out_file (str, optional): Extra path to save the visualization + result. If specified, the visualizer will only save the result + image to the out_file and ignore its storage backends. + Defaults to None. + name (str): The image identifier. It's useful when using the + storage backends of the visualizer to save or display the + image. Defaults to an empty string. + step (int): The global step value. It's useful to record a + series of visualization results for the same image with the + storage backends. Defaults to 0. + + Returns: + np.ndarray: The visualization image. + """ + text_cfg = {**self.DEFAULT_TEXT_CFG, **text_cfg} + + match_scores, indices = torch.topk(data_sample.pred_score, k=topk) + + figure = create_figure(margin=True, **fig_cfg) + figure.suptitle(text) + gs = figure.add_gridspec(1, topk) + + for k, (score, sample_idx) in enumerate(zip(match_scores, indices)): + sample = prototype_dataset.get_data_info(sample_idx.item()) + value_image = mmcv.imread(sample['img_path'])[..., ::-1] + value_plot = figure.add_subplot(gs[0, k]) + value_plot.axis(False) + value_plot.imshow(value_image) + if draw_score: + value_plot.text( + 5, + 5, + f'{score:.2f}', + **text_cfg, + ) + drawn_img = img_from_canvas(figure.canvas) + self.set_image(drawn_img) + + if show: + self.show(drawn_img, win_name=name, wait_time=wait_time) + + if out_file is not None: + # save the image to the target file instead of vis_backends + mmcv.imwrite(drawn_img[..., ::-1], out_file) + else: + self.add_image(name, drawn_img, step=step) + + return drawn_img + + @master_only + def visualize_i2t_retrieval(self, + image: np.ndarray, + data_sample: DataSample, + prototype_dataset: Sequence[str], + topk: int = 1, + draw_score: bool = True, + resize: Optional[int] = None, + text_cfg: dict = dict(), + show: bool = False, + wait_time: float = 0, + out_file: Optional[str] = None, + name: str = '', + step: int = 0) -> None: + """Visualize Image-To-Text retrieval result. + + This method will draw the input image and the texts retrieved from the + prototype dataset. + + Args: + image (np.ndarray): The image to draw. The format should be RGB. + data_sample (:obj:`DataSample`): The annotation of the image. + prototype_dataset (Sequence[str]): The prototype dataset. + It should be a list of texts. + topk (int): To visualize the topk matching items. Defaults to 1. + draw_score (bool): Whether to draw the prediction scores + of prediction categories. Defaults to True. + resize (int, optional): Resize the short edge of the image to the + specified length before visualization. Defaults to None. + text_cfg (dict): Extra text setting, which accepts + arguments of :meth:`mmengine.Visualizer.draw_texts`. + Defaults to an empty dict. + show (bool): Whether to display the drawn image in a window, please + confirm your are able to access the graphical interface. + Defaults to False. + wait_time (float): The display time (s). Defaults to 0, which means + "forever". + out_file (str, optional): Extra path to save the visualization + result. If specified, the visualizer will only save the result + image to the out_file and ignore its storage backends. + Defaults to None. + name (str): The image identifier. It's useful when using the + storage backends of the visualizer to save or display the + image. Defaults to an empty string. + step (int): The global step value. It's useful to record a + series of visualization results for the same image with the + storage backends. Defaults to 0. + + Returns: + np.ndarray: The visualization image. + """ + if resize is not None: + h, w = image.shape[:2] + if w < h: + image = mmcv.imresize(image, (resize, resize * h // w)) + else: + image = mmcv.imresize(image, (resize * w // h, resize)) + + self.set_image(image) + + match_scores, indices = torch.topk(data_sample.pred_score, k=topk) + texts = [] + for score, sample_idx in zip(match_scores, indices): + text = prototype_dataset[sample_idx.item()] + if draw_score: + text = f'{score:.2f} ' + text + texts.append(text) + + img_scale = get_adaptive_scale(image.shape[:2]) + text_cfg = { + 'size': int(img_scale * 7), + **self.DEFAULT_TEXT_CFG, + **text_cfg, + } + self.ax_save.text( + img_scale * 5, + img_scale * 5, + '\n'.join(texts), + **text_cfg, + ) + drawn_img = self.get_image() + + if show: + self.show(drawn_img, win_name=name, wait_time=wait_time) + + if out_file is not None: + # save the image to the target file instead of vis_backends + mmcv.imwrite(drawn_img[..., ::-1], out_file) + else: + self.add_image(name, drawn_img, step=step) + + return drawn_img diff --git a/model-index.yml b/model-index.yml new file mode 100644 index 0000000..1bd9285 --- /dev/null +++ b/model-index.yml @@ -0,0 +1,85 @@ +Import: + - configs/mobilenet_v2/metafile.yml + - configs/mobilenet_v3/metafile.yml + - configs/resnet/metafile.yml + - configs/res2net/metafile.yml + - configs/resnext/metafile.yml + - configs/seresnet/metafile.yml + - configs/shufflenet_v1/metafile.yml + - configs/shufflenet_v2/metafile.yml + - configs/swin_transformer/metafile.yml + - configs/vgg/metafile.yml + - configs/repvgg/metafile.yml + - configs/tnt/metafile.yml + - configs/vision_transformer/metafile.yml + - configs/t2t_vit/metafile.yml + - configs/tinyvit/metafile.yml + - configs/mlp_mixer/metafile.yml + - configs/conformer/metafile.yml + - configs/regnet/metafile.yml + - configs/deit/metafile.yml + - configs/twins/metafile.yml + - configs/efficientnet/metafile.yml + - configs/convnext/metafile.yml + - configs/hrnet/metafile.yml + - configs/repmlp/metafile.yml + - configs/wrn/metafile.yml + - configs/van/metafile.yml + - configs/cspnet/metafile.yml + - configs/convmixer/metafile.yml + - configs/densenet/metafile.yml + - configs/poolformer/metafile.yml + - configs/inception_v3/metafile.yml + - configs/mvit/metafile.yml + - configs/edgenext/metafile.yml + - configs/mobileone/metafile.yml + - configs/efficientformer/metafile.yml + - configs/swin_transformer_v2/metafile.yml + - configs/deit3/metafile.yml + - configs/hornet/metafile.yml + - configs/mobilevit/metafile.yml + - configs/davit/metafile.yml + - configs/replknet/metafile.yml + - configs/csra/metafile.yml + - configs/beit/metafile.yml + - configs/beitv2/metafile.yml + - configs/eva/metafile.yml + - configs/revvit/metafile.yml + - configs/clip/metafile.yml + - configs/mixmim/metafile.yml + - configs/efficientnet_v2/metafile.yml + - configs/convnext_v2/metafile.yml + - configs/levit/metafile.yml + - configs/vig/metafile.yml + - configs/arcface/metafile.yml + - configs/xcit/metafile.yml + - configs/byol/metafile.yml + - configs/densecl/metafile.yml + - configs/mocov2/metafile.yml + - configs/mocov3/metafile.yml + - configs/simclr/metafile.yml + - configs/simsiam/metafile.yml + - configs/swav/metafile.yml + - configs/mae/metafile.yml + - configs/simmim/metafile.yml + - configs/barlowtwins/metafile.yml + - configs/cae/metafile.yml + - configs/maskfeat/metafile.yml + - configs/milan/metafile.yml + - configs/ofa/metafile.yml + - configs/riformer/metafile.yml + - configs/sam/metafile.yml + - configs/glip/metafile.yml + - configs/eva02/metafile.yml + - configs/dinov2/metafile.yml + - configs/blip/metafile.yml + - configs/flamingo/metafile.yml + - configs/blip2/metafile.yml + - configs/chinese_clip/metafile.yml + - configs/itpn/metafile.yml + - configs/hivit/metafile.yml + - configs/spark/metafile.yml + - configs/minigpt4/metafile.yml + - configs/llava/metafile.yml + - configs/otter/metafile.yml + - configs/mff/metafile.yml diff --git a/projects/README.md b/projects/README.md new file mode 100644 index 0000000..5122e4b --- /dev/null +++ b/projects/README.md @@ -0,0 +1,21 @@ +# Welcome to Projects of MMPreTrain + +In this folder, we welcome all contribution of vision deep-learning backbone from community. + +Here, these requirements, e.g. code standards, are not that strict as in core package. Thus, developers from the community can implement their algorithms much more easily and efficiently in MMPreTrain. We appreciate all contributions from community to make MMPreTrain greater. + +Here is an [example project](./example_project) about how to add your algorithms easily. + +We also provide some documentation listed below: + +- [New Model Guide](https://mmpretrain.readthedocs.io/en/latest/advanced_guides/modules.html) + + The documentation of adding new models. + +- [Contribution Guide](https://mmpretrain.readthedocs.io/en/latest/notes/contribution_guide.html) + + The guides for new contributors about how to add your projects to MMPreTrain. + +- [Discussions](https://github.com/open-mmlab/mmpretrain/discussions) + + Welcome to start discussion! diff --git a/projects/dino/README.md b/projects/dino/README.md new file mode 100644 index 0000000..3458fa4 --- /dev/null +++ b/projects/dino/README.md @@ -0,0 +1,26 @@ +# Implementation for DINO + +**NOTE**: We only guarantee correctness of the forward pass, not responsible for full reimplementation. + +First, ensure you are in the root directory of MMPretrain, then you have two choices +to play with DINO in MMPretrain: + +## Slurm + +If you are using a cluster managed by Slurm, you can use the following command to +start your job: + +```shell +GPUS_PER_NODE=8 GPUS=8 CPUS_PER_TASK=16 bash projects/dino/tools/slurm_train.sh mm_model dino projects/dino/config/dino_vit-base-p16_8xb64-amp-coslr-100e_in1k.py --amp +``` + +The above command will pre-train the model on a single node with 8 GPUs. + +## PyTorch + +If you are using a single machine, without any cluster management software, you can use the following command + +```shell +NNODES=1 bash projects/dino/tools/dist_train.sh projects/dino/config/dino_vit-base-p16_8xb64-amp-coslr-100e_in1k.py 8 +--amp +``` diff --git a/projects/dino/config/dino_vit-base-p16_8xb64-amp-coslr-100e_in1k.py b/projects/dino/config/dino_vit-base-p16_8xb64-amp-coslr-100e_in1k.py new file mode 100644 index 0000000..d4a1c24 --- /dev/null +++ b/projects/dino/config/dino_vit-base-p16_8xb64-amp-coslr-100e_in1k.py @@ -0,0 +1,104 @@ +model = dict( + type='DINO', + data_preprocessor=dict( + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True), + backbone=dict( + type='mmpretrain.VisionTransformer', arch='b', patch_size=16), + neck=dict( + type='DINONeck', + in_channels=768, + out_channels=65536, + hidden_channels=2048, + bottleneck_channels=256), + head=dict( + type='DINOHead', + out_channels=65536, + num_crops=10, + student_temp=0.1, + center_momentum=0.9)) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='DINOMultiCrop', + global_crops_scale=(0.4, 1.0), + local_crops_scale=(0.05, 0.4), + local_crops_number=8), + dict(type='PackInputs') +] +train_dataloader = dict( + batch_size=32, + num_workers=16, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + collate_fn=dict(type='default_collate'), + dataset=dict( + type='mmpretrain.ImageNet', + data_root='/data/imagenet/', + ann_file='meta/train.txt', + data_prefix=dict(img_path='train/'), + pipeline=train_pipeline, + )) +optimizer = dict(type='AdamW', lr=0.0024, betas=(0.9, 0.95), weight_decay=0.05) +optim_wrapper = dict( + type='AmpOptimWrapper', + optimizer=dict( + type='AdamW', lr=0.0024, betas=(0.9, 0.95), weight_decay=0.05), + paramwise_cfg=dict( + custom_keys=dict( + ln=dict(decay_mult=0.0), + bias=dict(decay_mult=0.0), + pos_embed=dict(decay_mult=0.0), + mask_token=dict(decay_mult=0.0), + cls_token=dict(decay_mult=0.0))), + loss_scale='dynamic') +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-09, + by_epoch=True, + begin=0, + end=10, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=90, + by_epoch=True, + begin=10, + end=100, + convert_to_iter_based=True) +] +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=100) +default_scope = 'mmpretrain' +default_hooks = dict( + runtime_info=dict(type='RuntimeInfoHook'), + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=100), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=1), + sampler_seed=dict(type='DistSamplerSeedHook')) +env_cfg = dict( + cudnn_benchmark=False, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl')) +log_processor = dict( + window_size=10, + custom_cfg=[dict(data_src='', method='mean', window_size='global')]) +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='UniversalVisualizer', + vis_backends=[dict(type='LocalVisBackend')], + name='visualizer') +log_level = 'INFO' +load_from = None +resume = True +randomness = dict(seed=2, diff_rank_seed=True) +custom_hooks = [ + dict( + type='DINOTeacherTempWarmupHook', + warmup_teacher_temp=0.04, + teacher_temp=0.04, + teacher_temp_warmup_epochs=0, + max_epochs=100) +] diff --git a/projects/dino/dataset/__init__.py b/projects/dino/dataset/__init__.py new file mode 100644 index 0000000..da65f28 --- /dev/null +++ b/projects/dino/dataset/__init__.py @@ -0,0 +1 @@ +from .transform import * # noqa: F401,F403 diff --git a/projects/dino/dataset/transform/__init__.py b/projects/dino/dataset/transform/__init__.py new file mode 100644 index 0000000..00dacb3 --- /dev/null +++ b/projects/dino/dataset/transform/__init__.py @@ -0,0 +1,3 @@ +from .processing import DINOMultiCrop + +__all__ = ['DINOMultiCrop'] diff --git a/projects/dino/dataset/transform/processing.py b/projects/dino/dataset/transform/processing.py new file mode 100644 index 0000000..df4bf0b --- /dev/null +++ b/projects/dino/dataset/transform/processing.py @@ -0,0 +1,91 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import random + +from mmcv.transforms import RandomApply # noqa: E501 +from mmcv.transforms import BaseTransform, Compose, RandomFlip, RandomGrayscale + +from mmpretrain.datasets.transforms import (ColorJitter, GaussianBlur, + RandomResizedCrop, Solarize) +from mmpretrain.registry import TRANSFORMS + + +@TRANSFORMS.register_module() +class DINOMultiCrop(BaseTransform): + """Multi-crop transform for DINO. + + This module applies the multi-crop transform for DINO. + + Args: + global_crops_scale (int): Scale of global crops. + local_crops_scale (int): Scale of local crops. + local_crops_number (int): Number of local crops. + """ + + def __init__(self, global_crops_scale: int, local_crops_scale: int, + local_crops_number: int) -> None: + super().__init__() + self.global_crops_scale = global_crops_scale + self.local_crops_scale = local_crops_scale + + flip_and_color_jitter = Compose([ + RandomFlip(prob=0.5, direction='horizontal'), + RandomApply([ + ColorJitter( + brightness=0.4, contrast=0.4, saturation=0.2, hue=0.1) + ], + prob=0.8), + RandomGrayscale( + prob=0.2, + keep_channels=True, + channel_weights=(0.114, 0.587, 0.2989), + ) + ]) + + self.global_transform_1 = Compose([ + RandomResizedCrop( + 224, + crop_ratio_range=global_crops_scale, + interpolation='bicubic'), + flip_and_color_jitter, + GaussianBlur(prob=1.0, radius=random.uniform(0.1, 2.0)), + ]) + + self.global_transform_2 = Compose([ + RandomResizedCrop( + 224, + crop_ratio_range=global_crops_scale, + interpolation='bicubic'), + flip_and_color_jitter, + GaussianBlur(prob=1.0, radius=random.uniform(0.1, 2.0)), + Solarize(thr=128, prob=0.2), + ]) + + self.local_crops_number = local_crops_number + self.local_transform = Compose([ + RandomResizedCrop( + 96, + crop_ratio_range=local_crops_scale, + interpolation='bicubic'), + flip_and_color_jitter, + GaussianBlur(prob=1.0, radius=random.uniform(0.1, 2.0)), + ]) + + def transform(self, results: dict) -> dict: + ori_img = results['img'] + crops = [] + results['img'] = ori_img + crops.append(self.global_transform_1(results)['img']) + results['img'] = ori_img + crops.append(self.global_transform_2(results)['img']) + for _ in range(self.local_crops_number): + results['img'] = ori_img + crops.append(self.local_transform(results)['img']) + results['img'] = crops + return results + + def __repr__(self) -> str: + repr_str = self.__class__.__name__ + repr_str += f'(global_crops_scale = {self.global_crops_scale}, ' + repr_str += f'local_crops_scale = {self.local_crops_scale}, ' + repr_str += f'local_crop_number = {self.local_crops_number})' + return repr_str diff --git a/projects/dino/engine/__init__.py b/projects/dino/engine/__init__.py new file mode 100644 index 0000000..4142254 --- /dev/null +++ b/projects/dino/engine/__init__.py @@ -0,0 +1 @@ +from .hooks import * # noqa diff --git a/projects/dino/engine/hooks/__init__.py b/projects/dino/engine/hooks/__init__.py new file mode 100644 index 0000000..df43c49 --- /dev/null +++ b/projects/dino/engine/hooks/__init__.py @@ -0,0 +1,3 @@ +from .dino_teacher_temp_warmup_hook import DINOTeacherTempWarmupHook + +__all__ = ['DINOTeacherTempWarmupHook'] diff --git a/projects/dino/engine/hooks/dino_teacher_temp_warmup_hook.py b/projects/dino/engine/hooks/dino_teacher_temp_warmup_hook.py new file mode 100644 index 0000000..d66b025 --- /dev/null +++ b/projects/dino/engine/hooks/dino_teacher_temp_warmup_hook.py @@ -0,0 +1,33 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +from mmengine.hooks import Hook + +from mmpretrain.registry import HOOKS + + +@HOOKS.register_module() +class DINOTeacherTempWarmupHook(Hook): + """Warmup teacher temperature for DINO. + + This hook warmups the temperature for teacher to stabilize the training + process. + + Args: + warmup_teacher_temp (float): Warmup temperature for teacher. + teacher_temp (float): Temperature for teacher. + teacher_temp_warmup_epochs (int): Warmup epochs for teacher + temperature. + max_epochs (int): Maximum epochs for training. + """ + + def __init__(self, warmup_teacher_temp: float, teacher_temp: float, + teacher_temp_warmup_epochs: int, max_epochs: int) -> None: + super().__init__() + self.teacher_temps = np.concatenate( + (np.linspace(warmup_teacher_temp, teacher_temp, + teacher_temp_warmup_epochs), + np.ones(max_epochs - teacher_temp_warmup_epochs) * teacher_temp)) + + def before_train_epoch(self, runner) -> None: + runner.model.module.head.teacher_temp = self.teacher_temps[ + runner.epoch] diff --git a/projects/dino/models/__init__.py b/projects/dino/models/__init__.py new file mode 100644 index 0000000..49d0148 --- /dev/null +++ b/projects/dino/models/__init__.py @@ -0,0 +1,3 @@ +from .algorithm import * # noqa +from .head import * # noqa +from .neck import * # noqa diff --git a/projects/dino/models/algorithm/__init__.py b/projects/dino/models/algorithm/__init__.py new file mode 100644 index 0000000..1125b63 --- /dev/null +++ b/projects/dino/models/algorithm/__init__.py @@ -0,0 +1,3 @@ +from .dino import DINO + +__all__ = ['DINO'] diff --git a/projects/dino/models/algorithm/dino.py b/projects/dino/models/algorithm/dino.py new file mode 100644 index 0000000..2d78922 --- /dev/null +++ b/projects/dino/models/algorithm/dino.py @@ -0,0 +1,82 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Union + +import torch +from torch import nn + +from mmpretrain.models import BaseSelfSupervisor, CosineEMA +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample + + +@MODELS.register_module() +class DINO(BaseSelfSupervisor): + """Implementation for DINO. + + This module is proposed in `DINO: Emerging Properties in Self-Supervised + Vision Transformers `_. + + Args: + backbone (dict): Config for backbone. + neck (dict): Config for neck. + head (dict): Config for head. + pretrained (str, optional): Path for pretrained model. + Defaults to None. + base_momentum (float, optional): Base momentum for momentum update. + Defaults to 0.99. + data_preprocessor (dict, optional): Config for data preprocessor. + Defaults to None. + init_cfg (list[dict] | dict, optional): Config for initialization. + Defaults to None. + """ + + def __init__(self, + backbone: dict, + neck: dict, + head: dict, + pretrained: Optional[str] = None, + base_momentum: float = 0.99, + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[Union[List[dict], dict]] = None) -> None: + super().__init__( + backbone=backbone, + neck=neck, + head=head, + pretrained=pretrained, + data_preprocessor=data_preprocessor, + init_cfg=init_cfg) + + # create momentum model + self.teacher = CosineEMA( + nn.Sequential(self.backbone, self.neck), momentum=base_momentum) + # weight normalization layer + self.neck.last_layer = nn.utils.weight_norm(self.neck.last_layer) + self.neck.last_layer.weight_g.data.fill_(1) + self.neck.last_layer.weight_g.requires_grad = False + self.teacher.module[1].last_layer = nn.utils.weight_norm( + self.teacher.module[1].last_layer) + self.teacher.module[1].last_layer.weight_g.data.fill_(1) + self.teacher.module[1].last_layer.weight_g.requires_grad = False + + def loss(self, inputs: torch.Tensor, + data_samples: List[DataSample]) -> dict: + global_crops = torch.cat(inputs[:2]) + local_crops = torch.cat(inputs[2:]) + # teacher forward + teacher_output = self.teacher(global_crops) + + # student forward global + student_output_global = self.backbone(global_crops) + student_output_global = self.neck(student_output_global) + + # student forward local + student_output_local = self.backbone(local_crops) + student_output_local = self.neck(student_output_local) + + student_output = torch.cat( + (student_output_global, student_output_local)) + + # compute loss + loss = self.head(student_output, teacher_output) + + return dict(loss=loss) diff --git a/projects/dino/models/head/__init__.py b/projects/dino/models/head/__init__.py new file mode 100644 index 0000000..fe31e08 --- /dev/null +++ b/projects/dino/models/head/__init__.py @@ -0,0 +1,3 @@ +from .dino_head import DINOHead + +__all__ = ['DINOHead'] diff --git a/projects/dino/models/head/dino_head.py b/projects/dino/models/head/dino_head.py new file mode 100644 index 0000000..e817bfa --- /dev/null +++ b/projects/dino/models/head/dino_head.py @@ -0,0 +1,69 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn.functional as F +from mmengine.dist import all_reduce, get_world_size +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class DINOHead(BaseModule): + """Implementation for DINO head. + + This module is proposed in `DINO: Emerging Properties in Self-Supervised + Vision Transformers `_. + + Args: + out_channels (int): Output channels of the head. + num_crops (int): Number of crops. + student_temp (float): Temperature for student output. + center_momentum (float): Momentum for center update. + """ + + def __init__(self, out_channels: int, num_crops: int, student_temp: float, + center_momentum: float) -> None: + super().__init__() + self.student_temp = student_temp + self.teacher_temp = 0 + self.center_momentum = center_momentum + self.num_crops = num_crops + self.register_buffer('center', torch.zeros(1, out_channels)) + + def forward(self, student_output: torch.Tensor, + teacher_output: torch.Tensor) -> torch.Tensor: + + current_teacher_output = teacher_output + student_output = student_output / self.student_temp + student_output = student_output.chunk(self.num_crops, dim=0) + + # teacher centering and sharpening + teacher_output = F.softmax( + (teacher_output - self.center) / self.teacher_temp, dim=-1) + teacher_output = teacher_output.detach().chunk(2, dim=0) + + total_loss = 0 + n_loss_terms = 0 + + for i in range(len(teacher_output)): + for j in range(len(student_output)): + if i == j: + continue + total_loss += (-teacher_output[i] * + student_output[j].log_softmax(dim=-1)).sum( + dim=-1).mean() + n_loss_terms += 1 + total_loss /= n_loss_terms + self.update_center(current_teacher_output) + return total_loss + + @torch.no_grad() + def update_center(self, teacher_output: torch.Tensor) -> None: + + batch_center = torch.sum(teacher_output, dim=0, keepdim=True) + all_reduce(batch_center) + batch_center = batch_center / (len(teacher_output) * get_world_size()) + + # ema update batch center + self.center = self.center * self.center_momentum + batch_center * ( + 1 - self.center_momentum) diff --git a/projects/dino/models/neck/__init__.py b/projects/dino/models/neck/__init__.py new file mode 100644 index 0000000..e5f4aad --- /dev/null +++ b/projects/dino/models/neck/__init__.py @@ -0,0 +1,3 @@ +from .dino_neck import DINONeck + +__all__ = ['DINONeck'] diff --git a/projects/dino/models/neck/dino_neck.py b/projects/dino/models/neck/dino_neck.py new file mode 100644 index 0000000..8d8881e --- /dev/null +++ b/projects/dino/models/neck/dino_neck.py @@ -0,0 +1,41 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmengine.model import BaseModule +from torch import nn + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class DINONeck(BaseModule): + """Implementation for DINO neck. + + This module is proposed in `DINO: Emerging Properties in Self-Supervised + Vision Transformers `_. + + Args: + in_channels (int): Input channels. + hidden_channels (int): Hidden channels. + out_channels (int): Output channels. + bottleneck_channels (int): Bottleneck channels. + """ + + def __init__(self, in_channels: int, hidden_channels: int, + out_channels: int, bottleneck_channels: int) -> None: + super().__init__() + self.mlp = nn.Sequential(*[ + nn.Linear(in_channels, hidden_channels), + nn.GELU(), + nn.Linear(hidden_channels, hidden_channels), + nn.GELU(), + nn.Linear(hidden_channels, bottleneck_channels), + ]) + + self.last_layer = nn.Linear( + bottleneck_channels, out_channels, bias=False) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.mlp(x[0]) + x = nn.functional.normalize(x, dim=-1, p=2) + x = self.last_layer(x) + return x diff --git a/projects/dino/tools/dist_train.sh b/projects/dino/tools/dist_train.sh new file mode 100644 index 0000000..3fca764 --- /dev/null +++ b/projects/dino/tools/dist_train.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +CONFIG=$1 +GPUS=$2 +NNODES=${NNODES:-1} +NODE_RANK=${NODE_RANK:-0} +PORT=${PORT:-29500} +MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch \ + --nnodes=$NNODES \ + --node_rank=$NODE_RANK \ + --master_addr=$MASTER_ADDR \ + --nproc_per_node=$GPUS \ + --master_port=$PORT \ + $(dirname "$0")/train.py \ + $CONFIG \ + --launcher pytorch ${@:3} diff --git a/projects/dino/tools/slurm_train.sh b/projects/dino/tools/slurm_train.sh new file mode 100644 index 0000000..7e2ad29 --- /dev/null +++ b/projects/dino/tools/slurm_train.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +JOB_NAME=$2 +CONFIG=$3 +GPUS=${GPUS:-8} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:4} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u projects/dino/tools/train.py ${CONFIG} --launcher="slurm" ${PY_ARGS} diff --git a/projects/dino/tools/train.py b/projects/dino/tools/train.py new file mode 100644 index 0000000..b9482c3 --- /dev/null +++ b/projects/dino/tools/train.py @@ -0,0 +1,104 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp + +from dataset import * # noqa: F401,F403 +from engine import * # noqa: F401,F403 +from mmengine.config import Config, DictAction +from mmengine.runner import Runner +from models.algorithm import * # noqa: F401,F403 +from models.head import * # noqa: F401,F403 +from models.neck import * # noqa: F401,F403 + +from mmpretrain.utils import register_all_modules + + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a model') + parser.add_argument('config', help='train config file path') + parser.add_argument('--work-dir', help='the dir to save logs and models') + parser.add_argument( + '--resume', + nargs='?', + type=str, + const='auto', + help='If specify checkpint path, resume from it, while if not ' + 'specify, try to auto resume from the latest checkpoint ' + 'in the work directory.') + parser.add_argument( + '--amp', + action='store_true', + help='enable automatic-mixed-precision training') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + + return args + + +def main(): + args = parse_args() + + # register all modules in mmpretrain into the registries + # do not init the default scope here because it will be init in the runner + register_all_modules(init_default_scope=False) + + # load config + cfg = Config.fromfile(args.config) + cfg.launcher = args.launcher + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + # work_dir is determined in this priority: CLI > segment in file > filename + if args.work_dir is not None: + # update configs according to CLI args if args.work_dir is not None + cfg.work_dir = args.work_dir + elif cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + work_type = args.config.split('/')[1] + cfg.work_dir = osp.join('./work_dirs', work_type, + osp.splitext(osp.basename(args.config))[0]) + + # enable automatic-mixed-precision training + if args.amp is True: + optim_wrapper = cfg.optim_wrapper.get('type', 'OptimWrapper') + assert optim_wrapper in ['OptimWrapper', 'AmpOptimWrapper'], \ + '`--amp` is not supported custom optimizer wrapper type ' \ + f'`{optim_wrapper}.' + cfg.optim_wrapper.type = 'AmpOptimWrapper' + cfg.optim_wrapper.setdefault('loss_scale', 'dynamic') + + # resume training + if args.resume == 'auto': + cfg.resume = True + cfg.load_from = None + elif args.resume is not None: + cfg.resume = True + cfg.load_from = args.resume + + # build the runner from config + runner = Runner.from_cfg(cfg) + + # start training + runner.train() + + +if __name__ == '__main__': + main() diff --git a/projects/example_project/README.md b/projects/example_project/README.md new file mode 100644 index 0000000..2931d87 --- /dev/null +++ b/projects/example_project/README.md @@ -0,0 +1,128 @@ +# Example Project + +This is an example README for community `projects/`. You can write your README in your own project. Here are +some recommended parts of a README for others to understand and use your project, you can copy or modify them +according to your project. + +## Usage + +### Setup Environment + +Please refer to [Get Started](https://mmpretrain.readthedocs.io/en/latest/get_started.html) to install +MMPreTrain. + +At first, add the current folder to `PYTHONPATH`, so that Python can find your code. Run command in the current directory to add it. + +> Please run it every time after you opened a new shell. + +```shell +export PYTHONPATH=`pwd`:$PYTHONPATH +``` + +### Data Preparation + +Prepare the ImageNet-2012 dataset according to the [instruction](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#imagenet). + +### Training commands + +**To train with single GPU:** + +```bash +mim train mmpretrain configs/examplenet_8xb32_in1k.py +``` + +**To train with multiple GPUs:** + +```bash +mim train mmpretrain configs/examplenet_8xb32_in1k.py --launcher pytorch --gpus 8 +``` + +**To train with multiple GPUs by slurm:** + +```bash +mim train mmpretrain configs/examplenet_8xb32_in1k.py --launcher slurm \ + --gpus 16 --gpus-per-node 8 --partition $PARTITION +``` + +### Testing commands + +**To test with single GPU:** + +```bash +mim test mmpretrain configs/examplenet_8xb32_in1k.py --checkpoint $CHECKPOINT +``` + +**To test with multiple GPUs:** + +```bash +mim test mmpretrain configs/examplenet_8xb32_in1k.py --checkpoint $CHECKPOINT --launcher pytorch --gpus 8 +``` + +**To test with multiple GPUs by slurm:** + +```bash +mim test mmpretrain configs/examplenet_8xb32_in1k.py --checkpoint $CHECKPOINT --launcher slurm \ + --gpus 16 --gpus-per-node 8 --partition $PARTITION +``` + +## Results + +| Model | Pretrain | Top-1 (%) | Top-5 (%) | Config | Download | +| :----------------: | :----------: | :-------: | :-------: | :-------------------------------------: | :------------------------------------: | +| ExampleNet-tiny | From scratch | 82.33 | 96.15 | [config](./mvitv2-tiny_8xb256_in1k.py) | [model](MODEL-LINK) \| [log](LOG-LINK) | +| ExampleNet-small\* | From scratch | 83.63 | 96.51 | [config](./mvitv2-small_8xb256_in1k.py) | [model](MODEL-LINK) | +| ExampleNet-base\* | From scratch | 84.34 | 96.86 | [config](./mvitv2-base_8xb256_in1k.py) | [model](MODEL-LINK) | + +*Models with * are converted from the [official repo](REPO-LINK). The config files of these models are only for inference. We don't ensure these config files' training accuracy and welcome you to contribute your reproduction results.* + +## Citation + + + +```BibTeX +@misc{2023mmpretrain, + title={OpenMMLab's Pre-training Toolbox and Benchmark}, + author={MMPreTrain Contributors}, + howpublished = {\url{https://github.com/open-mmlab/mmpretrain}}, + year={2023} +} +``` + +## Checklist + +Here is a checklist of this project's progress. And you can ignore this part if you don't plan to contribute +to MMPreTrain projects. + +- [ ] Milestone 1: PR-ready, and acceptable to be one of the `projects/`. + + - [ ] Finish the code + + + + - [ ] Basic docstrings & proper citation + + + + - [ ] Converted checkpoint and results (Only for reproduction) + + + +- [ ] Milestone 2: Indicates a successful model implementation. + + - [ ] Training results + + + +- [ ] Milestone 3: Good to be a part of our core package! + + - [ ] Unit tests + + + + - [ ] Code style + + + + - [ ] `metafile.yml` and `README.md` + + diff --git a/projects/example_project/configs/examplenet_8xb32_in1k.py b/projects/example_project/configs/examplenet_8xb32_in1k.py new file mode 100644 index 0000000..99ab94d --- /dev/null +++ b/projects/example_project/configs/examplenet_8xb32_in1k.py @@ -0,0 +1,10 @@ +# Directly inherit the entire recipe you want to use. +_base_ = 'mmpretrain::resnet/resnet50_8xb32_in1k.py' + +# This line is to import your own modules. +custom_imports = dict(imports='models') + +# Modify the backbone to use your own backbone. +_base_['model']['backbone'] = dict(type='ExampleNet', depth=18) +# Modify the in_channels of classifier head to fit your backbone. +_base_['model']['head']['in_channels'] = 512 diff --git a/projects/example_project/models/__init__.py b/projects/example_project/models/__init__.py new file mode 100644 index 0000000..e2d4f2f --- /dev/null +++ b/projects/example_project/models/__init__.py @@ -0,0 +1,3 @@ +from .example_net import ExampleNet + +__all__ = ['ExampleNet'] diff --git a/projects/example_project/models/example_net.py b/projects/example_project/models/example_net.py new file mode 100644 index 0000000..ec3aab8 --- /dev/null +++ b/projects/example_project/models/example_net.py @@ -0,0 +1,31 @@ +from mmpretrain.models import ResNet +from mmpretrain.registry import MODELS + + +# Register your model to the `MODELS`. +@MODELS.register_module() +class ExampleNet(ResNet): + """Implements an example backbone. + + Implement the backbone network just like a normal pytorch network. + """ + + def __init__(self, **kwargs) -> None: + print('#############################\n' + '# Hello MMPretrain! #\n' + '#############################') + super().__init__(**kwargs) + + def forward(self, x): + """The forward method of the network. + + Args: + x (torch.Tensor): A tensor of image batch with shape + ``(batch_size, num_channels, height, width)``. + + Returns: + Tuple[torch.Tensor]: Please return a tuple of tensors and every + tensor is a feature map of specified scale. If you only want the + final feature map, simply return a tuple with one item. + """ + return super().forward(x) diff --git a/projects/fgia_accv2022_1st/README.md b/projects/fgia_accv2022_1st/README.md new file mode 100644 index 0000000..f929fb7 --- /dev/null +++ b/projects/fgia_accv2022_1st/README.md @@ -0,0 +1,143 @@ +# Solution of FGIA ACCV 2022(1st Place) + +This is fine-tuning part of the 1st Place Solution for Webly-supervised Fine-grained Recognition, refer to the ACCV workshop competition in https://www.cvmart.net/race/10412/base. + +## Result + +

    + +Show the result + +
    + +**Leaderboard A** + +![LB-A](https://user-images.githubusercontent.com/18586273/205498131-5728e470-b4f6-43b7-82a5-5f8e3bd5168e.png) + +**Leaderboard B** + +![LB-B](https://user-images.githubusercontent.com/18586273/205498171-5a3a3055-370a-4a8b-9779-b686254ebc94.png) + +
    + +
    + +## Reproduce + +For detailed self-supervised pretrain code, please refer to [Self-spervised Pre-training](#self-supervised-pre-training). +For detailed finetuning and inference code, please refer to [this repo](https://github.com/Ezra-Yu/ACCV2022_FGIA_1st). + +## Description + +### Overview of Our Solution + +![image](https://user-images.githubusercontent.com/18586273/205498371-31dbc1f4-5814-44bc-904a-f0d32515c7dd.png) + +### Our Model + +- ViT(MAE-pre-train) # Pretrained with [MAE](https://github.com/open-mmlab/mmppretrain/tree/main/projects/fgia_accv2022_1st/config/mae_vit-large-p16_8xb512-amp-coslr-1600e_in1k.py) +- Swin-v2(SimMIM-pre-train) # From [MMPretrain-swin_transformer_v2](https://github.com/open-mmlab/mmppretrain/tree/main/configs/swin_transformer_v2). + +\*\*The architectures we use \*\* + +- ViT + CE-loss + post-LongTail-Adjusment +- ViT + SubCenterArcFaceWithAdvMargin(CE) +- Swin-B + SubCenterArcFaceWithAdvMargin(SoftMax-EQL) +- Swin-L + SubCenterArcFaceWithAdvMargin(SoftMAx-EQL) + +## Self-supervised Pre-training + +### Requirements + +```shell +PyTorch 1.11.0 +torchvision 0.12.0 +CUDA 11.3 +MMEngine >= 0.1.0 +MMCV >= 2.0.0rc0 +``` + +### Preparing the dataset + +First you should refactor the folder of your dataset in the following format: + +```text +mmpretrain +| +|── data +| |── WebiNat5000 +| | |── meta +| | | |── train.txt +| | |── train +| | |── testa +| | |── testb +``` + +The `train`, `testa`, and `testb` folders contain the same content with +those provided by the official website of the competition. + +### Start pre-training + +First, you should install all these requirements, following this [page](https://mmpretrain.readthedocs.io/en/latest/get_started.html). +Then change your current directory to the root of MMPretrain + +```shell +cd $MMPretrain +``` + +Then you have the following two choices to start pre-training + +#### Slurm + +If you have a cluster managed by Slurm, you can use the following command: + +```shell +## we use 16 NVIDIA 80G A100 GPUs for pre-training +GPUS_PER_NODE=8 GPUS=16 SRUN_ARGS=${SRUN_ARGS} bash tools/slurm_train.sh ${PARTITION} ${JOB_NAME} projects/fgia_accv2022_1st/config/mae_vit-large-p16_8xb512-amp-coslr-1600e_in1k.py [optional arguments] +``` + +#### Pytorch + +Or you can use the following two commands to start distributed training on two separate nodes: + +```shell +# node 1 +NNODES=2 NODE_RANK=0 PORT=${MASTER_PORT} MASTER_ADDR=${MASTER_ADDR} bash tools/dist_train.sh projects/fgia_accv2022_1st/config/mae_vit-large-p16_8xb512-amp-coslr-1600e_in1k.py 8 +``` + +```shell +# node 2 +NNODES=2 NODE_RANK=1 PORT=${MASTER_PORT} MASTER_ADDR=${MASTER_ADDR} bash tools/dist_train.sh projects/fgia_accv2022_1st/config/mae_vit-large-p16_8xb512-amp-coslr-1600e_in1k.py 8 +``` + +All these logs and checkpoints will be saved under the folder `work_dirs`in the root. + +## Fine-tuning with bag of tricks + +- [MAE](https://github.com/open-mmlab/mmpretrain/tree/main/configs/mae) | [Config](https://github.com/Ezra-Yu/ACCV_workshop/tree/master/configs/vit) +- [Swinv2](https://github.com/open-mmlab/mmpretrain/tree/main/configs/swin_transformer_v2) | [Config](https://github.com/Ezra-Yu/ACCV_workshop/tree/master/configs/swin) +- [ArcFace](https://arxiv.org/abs/1801.07698) | [Code](https://github.com/Ezra-Yu/ACCV_workshop/blob/master/src/models/arcface_head.py) +- [SubCenterArcFaceWithAdvMargin](https://paperswithcode.com/paper/sub-center-arcface-boosting-face-recognition) | [Code](https://github.com/Ezra-Yu/ACCV_workshop/blob/master/src/models/arcface_head.py) +- [Post-LT-adjusment](https://paperswithcode.com/paper/long-tail-learning-via-logit-adjustment) | [Code](https://github.com/Ezra-Yu/ACCV_workshop/blob/master/src/models/linear_head_lt.py) +- [SoftMaxEQL](https://paperswithcode.com/paper/the-equalization-losses-gradient-driven) | [Code](https://github.com/Ezra-Yu/ACCV_workshop/blob/master/src/models/eql.py) +- FlipTTA [Code](https://github.com/Ezra-Yu/ACCV_workshop/blob/master/src/models/tta_classifier.py) +- clean dataset +- self-emsemble: [Uniform-model-soup](https://arxiv.org/abs/2203.05482) | [code](https://github.com/Ezra-Yu/ACCV_workshop/blob/master/tools/model_soup.py) +- [pseudo](https://lilianweng.github.io/posts/2021-12-05-semi-supervised/) | [Code](https://github.com/Ezra-Yu/ACCV_workshop/blob/master/tools/creat_pseudo.py) +- bagging-emsemble [Code](https://github.com/Ezra-Yu/ACCV_workshop/blob/master/tools/emsemble.py), +- post-process: [re-distribute-label](https://github.com/Ezra-Yu/ACCV_workshop/blob/master/tools/re-distribute-label.py); + +![Overview](https://user-images.githubusercontent.com/18586273/205498258-e5720d83-7006-4aea-86b5-aab1a8998c6c.png) + +![image](https://user-images.githubusercontent.com/18586273/205498027-def99b0d-a99a-470b-b292-8d5fc83111fc.png) + +#### Used but no improvements + +1. Using retrieval paradigm to solve this classification task; +2. Using EfficientNetv2 backbone. + +#### Not used but worth to do + +1. Try [DiVE](https://arxiv.org/abs/2103.15042) algorithm to improve performance in long tail dataset; +2. Use SimMIM to pre-train Swin-v2 on the competition dataset; +3. refine the re-distribute-label tool. diff --git a/projects/fgia_accv2022_1st/config/mae_vit-large-p16_8xb512-amp-coslr-1600e_in1k.py b/projects/fgia_accv2022_1st/config/mae_vit-large-p16_8xb512-amp-coslr-1600e_in1k.py new file mode 100644 index 0000000..8b2c5e9 --- /dev/null +++ b/projects/fgia_accv2022_1st/config/mae_vit-large-p16_8xb512-amp-coslr-1600e_in1k.py @@ -0,0 +1,107 @@ +model = dict( + type='MAE', + backbone=dict(type='MAEViT', arch='l', patch_size=16, mask_ratio=0.75), + neck=dict( + type='MAEPretrainDecoder', + patch_size=16, + in_chans=3, + embed_dim=1024, + decoder_embed_dim=512, + decoder_depth=8, + decoder_num_heads=16, + mlp_ratio=4.0), + head=dict( + type='MAEPretrainHead', + norm_pix=True, + patch_size=16, + loss=dict(type='MAEReconstructionLoss')), + init_cfg=dict( + type='Pretrained', + checkpoint= # noqa: E251 + 'https://download.openmmlab.com/mmselfsup/1.x/mae/mae_vit-large-p16_8xb512-fp16-coslr-1600e_in1k/mae_vit-large-p16_8xb512-fp16-coslr-1600e_in1k_20220825-cc7e98c9.pth' # noqa + )) +custom_imports = dict( + imports='mmpretrain.datasets', allow_failed_imports=False) +data_preprocessor = dict( + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + crop_ratio_range=(0.2, 1.0), + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5), + dict(type='PackInputs') +] +train_dataloader = dict( + batch_size=256, + num_workers=16, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + collate_fn=dict(type='default_collate'), + pin_memory=True, + dataset=dict( + type='ImageNet', + data_root='data/WebiNat5000/', + ann_file='data/WebiNat5000/meta/train.txt', + data_prefix=dict(img_path='train/'), + pipeline=train_pipeline)) +optim_wrapper = dict( + type='AmpOptimWrapper', + optimizer=dict( + type='AdamW', lr=0.0024, betas=(0.9, 0.95), weight_decay=0.05), + paramwise_cfg=dict( + custom_keys=dict( + ln=dict(decay_mult=0.0), + bias=dict(decay_mult=0.0), + pos_embed=dict(decay_mult=0.0), + mask_token=dict(decay_mult=0.0), + cls_token=dict(decay_mult=0.0))), + loss_scale='dynamic') +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.0001, + by_epoch=True, + begin=0, + end=40, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=1560, + by_epoch=True, + begin=40, + end=1600, + convert_to_iter_based=True) +] +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=1600) +default_scope = 'mmpretrain' +default_hooks = dict( + runtime_info=dict(type='RuntimeInfoHook'), + timer=dict(type='IterTimerHook'), + logger=dict(type='LoggerHook', interval=100), + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=1), + sampler_seed=dict(type='DistSamplerSeedHook')) +env_cfg = dict( + cudnn_benchmark=False, + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), + dist_cfg=dict(backend='nccl')) +log_processor = dict( + window_size=10, + custom_cfg=[dict(data_src='', method='mean', windows_size='global')]) +vis_backends = [dict(type='LocalVisBackend')] +visualizer = dict( + type='UniversalVisualizer', + vis_backends=[dict(type='LocalVisBackend')], + name='visualizer') +log_level = 'INFO' +load_from = None +resume = False +randomness = dict(seed=0, diff_rank_seed=True) +launcher = 'slurm' +work_dir = './work_dirs/mae_vit-large-p16_8xb512-amp-coslr-1600e_in1k' diff --git a/projects/gradio_demo/README.md b/projects/gradio_demo/README.md new file mode 100644 index 0000000..6799f86 --- /dev/null +++ b/projects/gradio_demo/README.md @@ -0,0 +1,44 @@ +# MMPretrain Gradio Demo + +Here is a gradio demo for MMPretrain supported inference tasks. + +Currently supported tasks: + +- Image Classifiation +- Image-To-Image Retrieval +- Text-To-Image Retrieval (require multi-modality support) +- Image Caption (require multi-modality support) +- Visual Question Answering (require multi-modality support) +- Visual Grounding (require multi-modality support) + +## Preview + + + +## Requirements + +To run the demo, you need to install MMPretrain at first. And please install with the extra multi-modality +dependencies to enable multi-modality tasks. + +```shell +# At the MMPretrain root folder +pip install -e ".[multimodal]" +``` + +And then install the latest gradio package. + +```shell +pip install "gradio>=3.31.0" +``` + +## Start + +Then, you can start the gradio server on the local machine by: + +```shell +# At the project folder +python launch.py +``` + +The demo will start a local server `http://127.0.0.1:7860` and you can browse it by your browser. +And to share it to others, please set `share=True` in the `demo.launch()`. diff --git a/projects/gradio_demo/conversation.py b/projects/gradio_demo/conversation.py new file mode 100644 index 0000000..3c59469 --- /dev/null +++ b/projects/gradio_demo/conversation.py @@ -0,0 +1,137 @@ +# Modified from +# https://github.com/Vision-CAIR/MiniGPT-4/blob/main/minigpt4/conversation/conversation.py +import dataclasses +from typing import List + +import torch + + +@dataclasses.dataclass +class Conversation: + system: str + roles: List[str] + messages: List[List[str]] + sep: str = '###' + + def get_prompt(self): + ret = self.system + self.sep + for role, message in self.messages: + if message: + ret += role + ': ' + message + self.sep + else: + ret += role + ':' + return ret + + def append_message(self, role, message): + self.messages.append([role, message]) + + def copy(self): + return Conversation( + system=self.system, + roles=[role for role in self.roles], + messages=[[y for y in x] for x in self.messages], + sep=self.sep, + ) + + def dict(self): + return { + 'system': self.system, + 'roles': self.roles, + 'messages': self.messages, + 'offset': self.offset, + 'sep': self.sep, + } + + +EN_CONV_VISION = Conversation( + system='Give the following image. ' + 'You will be able to see the image once I provide it to you. ' + 'Please answer my questions in detail.', + roles=['Ask', 'Answer'], + messages=[], + sep='###', +) + +ZH_CONV_VISION = Conversation( + system='给定一张图片,请仔细观察这张图片,并回答我的问题。', + roles=['问', '答'], + messages=[], + sep='###', +) + + +class Chat: + + def __init__(self, inferencer, device, is_half=False): + self.device = device + self.inferencer = inferencer + self.model = inferencer.model + self.is_half = is_half + if is_half: + self.model = self.model.half() + self.model = self.model.to(device) + self.max_length = 2000 + + def upload_img(self, image, conv, img_list): + img = next(self.inferencer.preprocess([image])) + img = self.model.data_preprocessor(img, False)['images'] + img = img.to(self.device) + image_emb, _ = self.model.encode_img(img) + img_list.append(image_emb) + conv.append_message(conv.roles[0], '') + + def get_context_emb(self, conv, img_list): + prompt = conv.get_prompt() + prompt_segs = prompt.split('') + seg_tokens = [ + self.model.llama_tokenizer( + seg, return_tensors='pt', + add_special_tokens=(i == 0)).to(self.device).input_ids + for i, seg in enumerate(prompt_segs) + ] + seg_embs = [ + self.model.llama_model.model.embed_tokens(seg_token) + for seg_token in seg_tokens + ] + mixed_embs = [ + emb for pair in zip(seg_embs[:-1], img_list) for emb in pair + ] + [seg_embs[-1]] + mixed_embs = torch.cat(mixed_embs, dim=1) + return mixed_embs + + def ask(self, text, conv): + if len(conv.messages) > 0 and conv.messages[-1][0] == conv.roles[ + 0] and conv.messages[-1][1][-6:] == '': + conv.messages[-1][1] = ' '.join([conv.messages[-1][1], text]) + else: + conv.append_message(conv.roles[0], text) + + def answer(self, conv, img_list, generation_cfg): + conv.append_message(conv.roles[1], None) + embs = self.get_context_emb(conv, img_list) + cur_max_len = generation_cfg['max_new_tokens'] + embs.shape[1] + if cur_max_len > self.max_length: + print('Warning: The number of tokens in current conversation' + 'exceeds the max length. ' + 'The model will not see the contexts outside the range.') + begin_idx = max(0, cur_max_len - self.max_length) + embs = embs[:, begin_idx:] + if self.is_half: + embs = embs.half() + outputs = self.model.llama_model.generate( + inputs_embeds=embs, + eos_token_id=self.model.end_token_id, + **generation_cfg) + + output_token = outputs[0] + if output_token[0] == 0: + output_token = output_token[1:] + elif output_token[0] == 1: + output_token = output_token[1:] + output_text = self.model.llama_tokenizer.decode( + output_token, + add_special_tokens=False, + skip_special_tokens=True) + output_text = output_text.split('###')[0] + conv.messages[-1][1] = output_text + return output_text diff --git a/projects/gradio_demo/launch.py b/projects/gradio_demo/launch.py new file mode 100644 index 0000000..61bccee --- /dev/null +++ b/projects/gradio_demo/launch.py @@ -0,0 +1,467 @@ +from functools import partial +from pathlib import Path +from typing import Callable + +import gradio as gr +import torch +from mmengine.logging import MMLogger + +import mmpretrain +from mmpretrain.apis import (ImageCaptionInferencer, + ImageClassificationInferencer, + ImageRetrievalInferencer, + TextToImageRetrievalInferencer, + VisualGroundingInferencer, + VisualQuestionAnsweringInferencer) +from mmpretrain.utils.dependency import WITH_MULTIMODAL +from mmpretrain.visualization import UniversalVisualizer + +mmpretrain.utils.progress.disable_progress_bar = True + +logger = MMLogger('mmpretrain', logger_name='mmpre') +if torch.cuda.is_available(): + devices = [ + torch.device(f'cuda:{i}') for i in range(torch.cuda.device_count()) + ] + logger.info(f'Available GPUs: {len(devices)}') +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + devices = [torch.device('mps')] + logger.info('Available MPS.') +else: + devices = [torch.device('cpu')] + logger.info('Available CPU.') + + +def get_free_device(): + if hasattr(torch.cuda, 'mem_get_info'): + free = [torch.cuda.mem_get_info(gpu)[0] for gpu in devices] + select = max(zip(free, range(len(free))))[1] + else: + import random + select = random.randint(0, len(devices) - 1) + return devices[select] + + +class InferencerCache: + max_size = 2 + _cache = [] + + @classmethod + def get_instance(cls, instance_name, callback: Callable): + if len(cls._cache) > 0: + for i, cache in enumerate(cls._cache): + if cache[0] == instance_name: + # Re-insert to the head of list. + cls._cache.insert(0, cls._cache.pop(i)) + logger.info(f'Use cached {instance_name}.') + return cache[1] + + if len(cls._cache) == cls.max_size: + cls._cache.pop(cls.max_size - 1) + torch.cuda.empty_cache() + device = get_free_device() + instance = callback(device=device) + logger.info(f'New instance {instance_name} on {device}.') + cls._cache.insert(0, (instance_name, instance)) + return instance + + +class ImageCaptionTab: + + def __init__(self) -> None: + self.model_list = ImageCaptionInferencer.list_models() + self.tab = self.create_ui() + + def create_ui(self): + with gr.Row(): + with gr.Column(): + select_model = gr.Dropdown( + label='Choose a model', + elem_id='image_caption_models', + elem_classes='select_model', + choices=self.model_list, + value='blip-base_3rdparty_coco-caption', + ) + with gr.Column(): + image_input = gr.Image( + label='Input', + source='upload', + elem_classes='input_image', + interactive=True, + tool='editor', + ) + caption_output = gr.Textbox( + label='Result', + lines=2, + elem_classes='caption_result', + interactive=False, + ) + run_button = gr.Button( + 'Run', + elem_classes='run_button', + ) + run_button.click( + self.inference, + inputs=[select_model, image_input], + outputs=caption_output, + ) + + def inference(self, model, image): + image = image[:, :, ::-1] + inferencer_name = self.__class__.__name__ + model + inferencer = InferencerCache.get_instance( + inferencer_name, partial(ImageCaptionInferencer, model)) + + result = inferencer(image)[0] + return result['pred_caption'] + + +class ImageClassificationTab: + + def __init__(self) -> None: + self.short_list = [ + 'resnet50_8xb32_in1k', + 'resnet50_8xb256-rsb-a1-600e_in1k', + 'swin-base_16xb64_in1k', + 'convnext-base_32xb128_in1k', + 'vit-base-p16_32xb128-mae_in1k', + ] + self.long_list = ImageClassificationInferencer.list_models() + self.tab = self.create_ui() + + def create_ui(self): + with gr.Row(): + with gr.Column(): + select_model = gr.Dropdown( + label='Choose a model', + elem_id='image_classification_models', + elem_classes='select_model', + choices=self.short_list, + value='swin-base_16xb64_in1k', + ) + expand = gr.Checkbox(label='Browse all models') + + def browse_all_model(value): + models = self.long_list if value else self.short_list + return gr.update(choices=models) + + expand.select( + fn=browse_all_model, inputs=expand, outputs=select_model) + with gr.Column(): + in_image = gr.Image( + label='Input', + source='upload', + elem_classes='input_image', + interactive=True, + tool='editor', + ) + out_cls = gr.Label( + label='Result', + num_top_classes=5, + elem_classes='cls_result', + ) + run_button = gr.Button( + 'Run', + elem_classes='run_button', + ) + run_button.click( + self.inference, + inputs=[select_model, in_image], + outputs=out_cls, + ) + + def inference(self, model, image): + image = image[:, :, ::-1] + + inferencer_name = self.__class__.__name__ + model + inferencer = InferencerCache.get_instance( + inferencer_name, partial(ImageClassificationInferencer, model)) + result = inferencer(image)[0]['pred_scores'].tolist() + + if inferencer.classes is not None: + classes = inferencer.classes + else: + classes = list(range(len(result))) + + return dict(zip(classes, result)) + + +class ImageRetrievalTab: + + def __init__(self) -> None: + self.model_list = ImageRetrievalInferencer.list_models() + self.tab = self.create_ui() + + def create_ui(self): + with gr.Row(): + with gr.Column(): + select_model = gr.Dropdown( + label='Choose a model', + elem_id='image_retri_models', + elem_classes='select_model', + choices=self.model_list, + value='resnet50-arcface_inshop', + ) + topk = gr.Slider(minimum=1, maximum=6, value=3, step=1) + with gr.Column(): + prototype = gr.File( + label='Retrieve from', + file_count='multiple', + file_types=['image']) + image_input = gr.Image( + label='Query', + source='upload', + elem_classes='input_image', + interactive=True, + tool='editor', + ) + retri_output = gr.Gallery( + label='Result', + elem_classes='img_retri_result', + ).style( + columns=[3], object_fit='contain', height='auto') + run_button = gr.Button( + 'Run', + elem_classes='run_button', + ) + run_button.click( + self.inference, + inputs=[select_model, prototype, image_input, topk], + outputs=retri_output, + ) + + def inference(self, model, prototype, image, topk): + image = image[:, :, ::-1] + + import hashlib + + proto_signature = ''.join(file.name for file in prototype).encode() + proto_signature = hashlib.sha256(proto_signature).hexdigest() + inferencer_name = self.__class__.__name__ + model + proto_signature + tmp_dir = Path(prototype[0].name).parent + cache_file = tmp_dir / f'{inferencer_name}.pth' + + inferencer = InferencerCache.get_instance( + inferencer_name, + partial( + ImageRetrievalInferencer, + model, + prototype=[file.name for file in prototype], + prototype_cache=str(cache_file), + ), + ) + + result = inferencer(image, topk=min(topk, len(prototype)))[0] + return [(str(item['sample']['img_path']), + str(item['match_score'].cpu().item())) for item in result] + + +class TextToImageRetrievalTab: + + def __init__(self) -> None: + self.model_list = TextToImageRetrievalInferencer.list_models() + self.tab = self.create_ui() + + def create_ui(self): + with gr.Row(): + with gr.Column(): + select_model = gr.Dropdown( + label='Choose a model', + elem_id='t2i_retri_models', + elem_classes='select_model', + choices=self.model_list, + value='blip-base_3rdparty_coco-retrieval', + ) + topk = gr.Slider(minimum=1, maximum=6, value=3, step=1) + with gr.Column(): + prototype = gr.File( + file_count='multiple', file_types=['image']) + text_input = gr.Textbox( + label='Query', + elem_classes='input_text', + interactive=True, + ) + retri_output = gr.Gallery( + label='Result', + elem_classes='img_retri_result', + ).style( + columns=[3], object_fit='contain', height='auto') + run_button = gr.Button( + 'Run', + elem_classes='run_button', + ) + run_button.click( + self.inference, + inputs=[select_model, prototype, text_input, topk], + outputs=retri_output, + ) + + def inference(self, model, prototype, text, topk): + import hashlib + + proto_signature = ''.join(file.name for file in prototype).encode() + proto_signature = hashlib.sha256(proto_signature).hexdigest() + inferencer_name = self.__class__.__name__ + model + proto_signature + tmp_dir = Path(prototype[0].name).parent + cache_file = tmp_dir / f'{inferencer_name}.pth' + + inferencer = InferencerCache.get_instance( + inferencer_name, + partial( + TextToImageRetrievalInferencer, + model, + prototype=[file.name for file in prototype], + prototype_cache=str(cache_file), + ), + ) + + result = inferencer(text, topk=min(topk, len(prototype)))[0] + return [(str(item['sample']['img_path']), + str(item['match_score'].cpu().item())) for item in result] + + +class VisualGroundingTab: + + def __init__(self) -> None: + self.model_list = VisualGroundingInferencer.list_models() + self.tab = self.create_ui() + self.visualizer = UniversalVisualizer( + fig_save_cfg=dict(figsize=(16, 9))) + + def create_ui(self): + with gr.Row(): + with gr.Column(): + select_model = gr.Dropdown( + label='Choose a model', + elem_id='vg_models', + elem_classes='select_model', + choices=self.model_list, + value='ofa-base_3rdparty_refcoco', + ) + with gr.Column(): + image_input = gr.Image( + label='Image', + source='upload', + elem_classes='input_image', + interactive=True, + tool='editor', + ) + text_input = gr.Textbox( + label='The object to search', + elem_classes='input_text', + interactive=True, + ) + vg_output = gr.Image( + label='Result', + source='upload', + interactive=False, + elem_classes='vg_result', + ) + run_button = gr.Button( + 'Run', + elem_classes='run_button', + ) + run_button.click( + self.inference, + inputs=[select_model, image_input, text_input], + outputs=vg_output, + ) + + def inference(self, model, image, text): + + inferencer_name = self.__class__.__name__ + model + + inferencer = InferencerCache.get_instance( + inferencer_name, + partial(VisualGroundingInferencer, model), + ) + + result = inferencer( + image[:, :, ::-1], text, return_datasamples=True)[0] + vis = self.visualizer.visualize_visual_grounding( + image, result, resize=512) + return vis + + +class VisualQuestionAnsweringTab: + + def __init__(self) -> None: + self.model_list = VisualQuestionAnsweringInferencer.list_models() + # The fine-tuned OFA vqa models requires extra object description. + self.model_list.remove('ofa-base_3rdparty-finetuned_vqa') + self.tab = self.create_ui() + + def create_ui(self): + with gr.Row(): + with gr.Column(): + select_model = gr.Dropdown( + label='Choose a model', + elem_id='vqa_models', + elem_classes='select_model', + choices=self.model_list, + value='ofa-base_3rdparty-zeroshot_coco-vqa', + ) + with gr.Column(): + image_input = gr.Image( + label='Input', + source='upload', + elem_classes='input_image', + interactive=True, + tool='editor', + ) + question_input = gr.Textbox( + label='Question', + elem_classes='question_input', + ) + answer_output = gr.Textbox( + label='Answer', + elem_classes='answer_result', + ) + run_button = gr.Button( + 'Run', + elem_classes='run_button', + ) + run_button.click( + self.inference, + inputs=[select_model, image_input, question_input], + outputs=answer_output, + ) + + def inference(self, model, image, question): + image = image[:, :, ::-1] + + inferencer_name = self.__class__.__name__ + model + inferencer = InferencerCache.get_instance( + inferencer_name, partial(VisualQuestionAnsweringInferencer, model)) + + result = inferencer(image, question)[0] + return result['pred_answer'] + + +if __name__ == '__main__': + title = 'MMPretrain Inference Demo' + with gr.Blocks(analytics_enabled=False, title=title) as demo: + gr.Markdown(f'# {title}') + with gr.Tabs(): + with gr.TabItem('Image Classification'): + ImageClassificationTab() + with gr.TabItem('Image-To-Image Retrieval'): + ImageRetrievalTab() + if WITH_MULTIMODAL: + with gr.TabItem('Image Caption'): + ImageCaptionTab() + with gr.TabItem('Text-To-Image Retrieval'): + TextToImageRetrievalTab() + with gr.TabItem('Visual Grounding'): + VisualGroundingTab() + with gr.TabItem('Visual Question Answering'): + VisualQuestionAnsweringTab() + else: + with gr.TabItem('Multi-modal tasks'): + gr.Markdown( + 'To inference multi-modal models, please install ' + 'the extra multi-modal dependencies, please refer ' + 'to https://mmpretrain.readthedocs.io/en/latest/' + 'get_started.html#installation') + + demo.launch() diff --git a/projects/gradio_demo/minigpt4_demo.py b/projects/gradio_demo/minigpt4_demo.py new file mode 100644 index 0000000..e4d6142 --- /dev/null +++ b/projects/gradio_demo/minigpt4_demo.py @@ -0,0 +1,144 @@ +import argparse + +import gradio as gr +import numpy as np +import torch +from conversation import EN_CONV_VISION, ZH_CONV_VISION, Chat + +from mmpretrain import ImageCaptionInferencer + +parser = argparse.ArgumentParser(description='MiniGPT4 demo') +parser.add_argument( + 'cfg', type=str, help='config file for minigpt4 (absolute path)') +parser.add_argument( + 'ckpt', type=str, help='pretrained file for minigpt4 (absolute path)') +args = parser.parse_args() + +if torch.cuda.is_available(): + devices = [ + torch.device(f'cuda:{i}') for i in range(torch.cuda.device_count()) + ] +elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): + devices = [torch.device('mps')] +else: + devices = [torch.device('cpu')] + + +def get_free_device(): + if hasattr(torch.cuda, 'mem_get_info'): + free = [torch.cuda.mem_get_info(gpu)[0] for gpu in devices] + select = max(zip(free, range(len(free))))[1] + else: + import random + select = random.randint(0, len(devices) - 1) + return devices[select] + + +device = get_free_device() +inferencer = ImageCaptionInferencer(model=args.cfg, pretrained=args.ckpt) +model = inferencer.model +chat = Chat(inferencer, device=device, is_half=(device.type != 'cpu')) + + +def reset(chat_state, img_list): + if chat_state is not None: + chat_state.messages = [] + if img_list is not None: + img_list = [] + return (None, gr.update(value=None, interactive=True), + gr.update( + value=None, + placeholder='Please upload your image first', + interactive=False), + gr.update(value='Upload & Start Chat', + interactive=True), chat_state, img_list, + gr.update(value='Restart', interactive=False), + gr.update(value='English', interactive=True)) + + +def upload_img(gr_img, language, chat_state): + if gr_img is None: + return (None, + gr.update( + placeholder='Please upload your image first', + interactive=False), + gr.update(value='Upload & Start Chat', + interactive=True), chat_state, None, + gr.update(value='Restart', interactive=False), + gr.update(value='English', interactive=True)) + + if (language == 'English'): + chat_state = EN_CONV_VISION.copy() + else: + chat_state = ZH_CONV_VISION.copy() + img_list = [] + gr_img_array = np.asarray(gr_img) + chat.upload_img(gr_img_array, chat_state, img_list) + return (gr.update(interactive=False), + gr.update(placeholder='Type and press Enter', interactive=True), + gr.update(value='Start Chatting', + interactive=False), chat_state, img_list, + gr.update(value='Restart', + interactive=True), gr.update(interactive=False)) + + +def ask(user_message, chatbot, chat_state): + if (len(user_message) == 0): + return gr.update( + value=None, + placeholder='Input should not be empty!', + interactive=True), chatbot, chat_state + chat.ask(user_message, chat_state) + chatbot = chatbot + [[user_message, None]] + return '', chatbot, chat_state + + +def answer(chatbot, chat_state, img_list): + llm_message = chat.answer( + conv=chat_state, + img_list=img_list, + generation_cfg=model.generation_cfg) + chatbot[-1][1] = llm_message + return chatbot, chat_state, img_list + + +if __name__ == '__main__': + title = 'MMPretrain MiniGPT-4 Inference Demo' + with gr.Blocks(analytics_enabled=False, title=title) as demo: + gr.Markdown(f'# {title}') + with gr.Row(): + with gr.Column(): + image = gr.Image(type='pil') + language = gr.Dropdown(['English', 'Chinese'], + label='Language', + info='Select chatbot\'s language', + value='English', + interactive=True) + upload_button = gr.Button( + value='Upload & Start Chat', interactive=True) + clear = gr.Button(value='Restart', interactive=False) + + with gr.Column(): + chat_state = gr.State() + img_list = gr.State() + chatbot = gr.Chatbot( + label='MiniGPT-4', min_width=320, height=600) + text_input = gr.Textbox( + label='User', + placeholder='Please upload your image first', + interactive=False) + + upload_button.click(upload_img, [image, language, chat_state], [ + image, text_input, upload_button, chat_state, img_list, clear, + language + ]) + text_input.submit(ask, [text_input, chatbot, chat_state], + [text_input, chatbot, chat_state]).then( + answer, [chatbot, chat_state, img_list], + [chatbot, chat_state, img_list]) + clear.click(reset, [chat_state, img_list], [ + chatbot, image, text_input, upload_button, chat_state, img_list, + clear, language + ]) + + demo.launch(share=True) diff --git a/projects/internimage_classification/README.md b/projects/internimage_classification/README.md new file mode 100644 index 0000000..53b0213 --- /dev/null +++ b/projects/internimage_classification/README.md @@ -0,0 +1,121 @@ +# InternImage Classification + +## Description + +This is the implementation of [InternImage](https://arxiv.org/abs/2211.05778) for image classification. + +## Usage + +### Setup Environment + +Please refer to [Get Started](https://mmpretrain.readthedocs.io/en/latest/get_started.html) documentation of MMPretrain to finish installation. + +Please install DCNv3. Run the command below following the [ InternImage official installation instructions](https://github.com/OpenGVLab/InternImage/blob/master/classification/README.md). + +```shell +cd ops_dcnv3 +sh ./make.sh +``` + +### Training and Test Commands + +At first, you need to add the current folder to `PYTHONPATH`, so that Python can find your model files. In `projects/internimage_classification/` root directory, please run command below to add it. + +```shell +export PYTHONPATH=`pwd`:$PYTHONPATH +``` + +#### Training + +##### On Local Single GPU + +```bash +# train with mim +mim train mmpretrain ${CONFIG} --work-dir ${WORK_DIR} + +# a specific command example +mim train mmpretrain configs/internimage-tiny_8xb128_in1k-224.py \ + --work-dir work_dirs/internimage-tiny_8xb128_in1k-224/ +``` + +##### On Multiple GPUs + +```bash +# train with mim +mim train mmpretrain ${CONFIG} \ + --work-dir ${WORK_DIR} \ + --launcher pytorch --gpus 8 +``` + +##### On Multiple GPUs with Slurm + +```bash +# train with mim +mim train mmpretrain ${CONFIG} \ + --work-dir ${WORK_DIR} \ + --launcher slurm --gpus 16 --gpus-per-node 8 \ + --partition ${PARTITION} +``` + +#### Test + +Please download the pretrain weight provided by [OpenGVLab](https://github.com/OpenGVLab/) from [here](https://huggingface.co/OpenGVLab/InternImage/tree/main) + +##### On Local Single GPU + +```bash +# test with mim +mim test mmpretrain ${CONFIG} -C ${CHECKPOINT} + +# a specific command example +mim test mmpretrain configs/internimage-tiny_8xb128_in1k-224.py -C /PATH/TO/internimage_t_1k_224.pth +``` + +##### On Multiple GPUs + +```bash +# test with mim +# a specific command examples, 8 GPUs here +mim test mmpretrain configs/internimage_t_1k_224.py \ + -C /PATH/TO/internimage_t_1k_224.pth \ + --launcher pytorch --gpus 8 +``` + +##### On Multiple GPUs with Slurm + +```bash +# test with mim +mim test mmpretrain ${CONFIG} \ + -C ${CHECKPOINT} + --work-dir ${WORK_DIR} \ + --launcher slurm --gpus 8 --gpus-per-node 8 \ + --partition ${PARTITION} \ + $PY_ARGS +``` + +Note: `PY_ARGS` is other optional args. + +## Results on ImageNet1K + +The accuracy of different models on ImageNet1K, + +| name | resolution | acc@1 | acc@5 | config | weight | +| :------------: | :--------: | :-----: | :-----: | :-------------------------------------------------------: | :-----------------------------------------------------------------------------------------------: | +| InternImage-T | 224 | 83.4700 | 96.5340 | [config](./configs/internimage-tiny_8xb128_in1k-224.py) | [model](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_t_1k_224.pth) | +| InternImage-S | 224 | 84.1640 | 96.9320 | [config](./configs/internimage-small_8xb128_in1k-224.py) | [model](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_s_1k_224.pth) | +| InternImage-B | 224 | 84.8660 | 97.1820 | [config](./configs/internimage-base_8xb128_in1k-224.py) | [model](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_b_1k_224.pth) | +| InternImage-L | 384 | 87.7060 | 98.3820 | [config](./configs/internimage-large_8xb128_in1k-384.py) | [model](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_l_22kto1k_384.pth) | +| InternImage-XL | 384 | 88.0460 | 98.5620 | [config](./configs/internimage-xlagre_8xb128_in1k-384.py) | [model](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_xl_22kto1k_384.pth) | +| InternImage-H | 640 | 89.5500 | 98.8500 | [config](./configs/internimage-huge_8xb128_in1k-640.py) | [model](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_h_22kto1k_640.pth) | +| InternImage-G | 512 | 90.0580 | 98.9700 | [config](./configs/internimage-giant_8xb128_in1k-512.py) | [model](https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_g_22kto1k_512.pth) | + +## Citation + +```bibtex +@article{wang2022internimage, + title={InternImage: Exploring Large-Scale Vision Foundation Models with Deformable Convolutions}, + author={Wang, Wenhai and Dai, Jifeng and Chen, Zhe and Huang, Zhenhang and Li, Zhiqi and Zhu, Xizhou and Hu, Xiaowei and Lu, Tong and Lu, Lewei and Li, Hongsheng and others}, + journal={arXiv preprint arXiv:2211.05778}, + year={2022} +} +``` diff --git a/projects/internimage_classification/configs/_base_.py b/projects/internimage_classification/configs/_base_.py new file mode 100644 index 0000000..4e9b2ad --- /dev/null +++ b/projects/internimage_classification/configs/_base_.py @@ -0,0 +1,113 @@ +_base_ = 'mmpretrain::_base_/default_runtime.py' + +# dataset settings +dataset_type = 'ImageNet' +data_preprocessor = dict( + num_classes=1000, + # RGB format normalization parameters + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + # convert image from BGR to RGB + to_rgb=True, +) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=224, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=224, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=224), + dict(type='PackInputs'), +] + +train_dataloader = dict( + batch_size=128, + num_workers=8, + dataset=dict( + type=dataset_type, + data_root='../../data/imagenet', + data_prefix='train', + pipeline=train_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_dataloader = dict( + batch_size=128, + num_workers=8, + dataset=dict( + type=dataset_type, + data_root='../../data/imagenet', + data_prefix='val', + pipeline=test_pipeline), + sampler=dict(type='DefaultSampler', shuffle=False), +) +val_evaluator = dict(type='Accuracy', topk=(1, 5)) + +test_dataloader = val_dataloader +test_evaluator = val_evaluator + +# model setting +custom_imports = dict(imports='models') + +model = dict( + type='ImageClassifier', + backbone=dict( + type='InternImage', + stem_channels=64, + drop_path_rate=0.1, + stage_blocks=[4, 4, 18, 4], + groups=[4, 8, 16, 32]), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=1000, + in_channels=768, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5))) + +# optimizer +optim_wrapper = dict( + optimizer=dict(type='AdamW', lr=1.25e-04, eps=1e-8, betas=(0.9, 0.999)), + weight_decay=0.05) + +# learning policy +param_scheduler = [ + # warm up learning rate scheduler + dict( + type='LinearLR', + by_epoch=True, + begin=0, + end=20, + convert_to_iter_based=True), + # main learning rate scheduler + dict( + type='CosineAnnealingLR', + T_max=280, + by_epoch=True, + begin=20, + end=300, + eta_min=1.25e-06) +] + +# train, val, test setting +train_cfg = dict(by_epoch=True, max_epochs=300, val_interval=1) +val_cfg = dict() +test_cfg = dict() + +# NOTE: `auto_scale_lr` is for automatically scaling LR, +# based on the actual training batch size. +auto_scale_lr = dict(base_batch_size=128 * 8) diff --git a/projects/internimage_classification/configs/internimage-base_8xb128_in1k-224.py b/projects/internimage_classification/configs/internimage-base_8xb128_in1k-224.py new file mode 100644 index 0000000..735d17e --- /dev/null +++ b/projects/internimage_classification/configs/internimage-base_8xb128_in1k-224.py @@ -0,0 +1,13 @@ +_base_ = './_base_.py' + +model = dict( + backbone=dict( + stem_channels=112, + drop_path_rate=0.5, + stage_blocks=[4, 4, 21, 4], + groups=[7, 14, 28, 56], + layer_scale=1e-5, + post_norm=True), + head=dict(in_channels=1344)) + +optim_wrapper = dict(optimizer=dict(lr=0.0005)) diff --git a/projects/internimage_classification/configs/internimage-giant_8xb128_in1k-512.py b/projects/internimage_classification/configs/internimage-giant_8xb128_in1k-512.py new file mode 100644 index 0000000..4ccd34e --- /dev/null +++ b/projects/internimage_classification/configs/internimage-giant_8xb128_in1k-512.py @@ -0,0 +1,55 @@ +_base_ = './_base_.py' + +model = dict( + backbone=dict( + stem_channels=512, + drop_path_rate=0.4, + stage_blocks=[2, 2, 48, 4], + groups=[16, 32, 64, 128], + dw_kernel_size=5, + level2_post_norm=True, + level2_post_norm_block_ids=[5, 11, 17, 23, 29, 35, 41, 47], + center_feature_scale=True, + use_clip_projector=True, + ), + neck=None, + head=dict(in_channels=768)) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=512, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs'), +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=512, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=512), + dict(type='PackInputs'), +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +optim_wrapper = dict(optimizer=dict(lr=5e-6)) +param_scheduler = [ + dict( + type='LinearLR', + by_epoch=True, + begin=0, + end=2, + convert_to_iter_based=True), + dict(type='CosineAnnealingLR', T_max=18, by_epoch=True, begin=2, end=20) +] +train_cfg = dict(by_epoch=True, max_epochs=20, val_interval=1) diff --git a/projects/internimage_classification/configs/internimage-huge_8xb128_in1k-640.py b/projects/internimage_classification/configs/internimage-huge_8xb128_in1k-640.py new file mode 100644 index 0000000..0e7c8e7 --- /dev/null +++ b/projects/internimage_classification/configs/internimage-huge_8xb128_in1k-640.py @@ -0,0 +1,55 @@ +_base_ = './_base_.py' + +model = dict( + backbone=dict( + stem_channels=320, + drop_path_rate=0.1, + stage_blocks=[6, 6, 32, 6], + groups=[10, 20, 40, 80], + dw_kernel_size=5, + res_post_norm=True, + level2_post_norm=True, + level2_post_norm_block_ids=[5, 11, 17, 23, 29], + center_feature_scale=True, + use_clip_projector=True, + ), + neck=None, + head=dict(in_channels=768)) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=640, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=640, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=640), + dict(type='PackInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +optim_wrapper = dict(optimizer=dict(lr=5e-6)) +param_scheduler = [ + dict( + type='LinearLR', + by_epoch=True, + begin=0, + end=2, + convert_to_iter_based=True), + dict(type='CosineAnnealingLR', T_max=18, by_epoch=True, begin=2, end=20) +] +train_cfg = dict(by_epoch=True, max_epochs=20, val_interval=1) diff --git a/projects/internimage_classification/configs/internimage-large_8xb128_in1k-384.py b/projects/internimage_classification/configs/internimage-large_8xb128_in1k-384.py new file mode 100644 index 0000000..838ec95 --- /dev/null +++ b/projects/internimage_classification/configs/internimage-large_8xb128_in1k-384.py @@ -0,0 +1,51 @@ +_base_ = './_base_.py' + +model = dict( + backbone=dict( + stem_channels=160, + drop_path_rate=0.1, + stage_blocks=[5, 5, 22, 5], + groups=[10, 20, 40, 80], + layer_scale=1e-5, + offset_scale=2.0, + post_norm=True), + head=dict(in_channels=1920)) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=384, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs') +] + +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=384, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=384), + dict(type='PackInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +optim_wrapper = dict(optimizer=dict(lr=5e-6)) +param_scheduler = [ + dict( + type='LinearLR', + by_epoch=True, + begin=0, + end=2, + convert_to_iter_based=True), + dict(type='CosineAnnealingLR', T_max=18, by_epoch=True, begin=2, end=20) +] +train_cfg = dict(by_epoch=True, max_epochs=20, val_interval=1) diff --git a/projects/internimage_classification/configs/internimage-small_8xb128_in1k-224.py b/projects/internimage_classification/configs/internimage-small_8xb128_in1k-224.py new file mode 100644 index 0000000..ba2075e --- /dev/null +++ b/projects/internimage_classification/configs/internimage-small_8xb128_in1k-224.py @@ -0,0 +1,11 @@ +_base_ = './_base_.py' + +model = dict( + backbone=dict( + stem_channels=80, + drop_path_rate=0.4, + stage_blocks=[4, 4, 21, 4], + groups=[5, 10, 20, 40], + layer_scale=1e-5, + post_norm=True), + head=dict(in_channels=960)) diff --git a/projects/internimage_classification/configs/internimage-tiny_8xb128_in1k-224.py b/projects/internimage_classification/configs/internimage-tiny_8xb128_in1k-224.py new file mode 100644 index 0000000..abee278 --- /dev/null +++ b/projects/internimage_classification/configs/internimage-tiny_8xb128_in1k-224.py @@ -0,0 +1,8 @@ +_base_ = './_base_.py' + +model = dict( + backbone=dict( + stem_channels=64, + drop_path_rate=0.1, + stage_blocks=[4, 4, 18, 4], + groups=[4, 8, 16, 32])) diff --git a/projects/internimage_classification/configs/internimage-xlagre_8xb128_in1k-384.py b/projects/internimage_classification/configs/internimage-xlagre_8xb128_in1k-384.py new file mode 100644 index 0000000..dfd494b --- /dev/null +++ b/projects/internimage_classification/configs/internimage-xlagre_8xb128_in1k-384.py @@ -0,0 +1,50 @@ +_base_ = './_base_.py' + +model = dict( + backbone=dict( + stem_channels=192, + drop_path_rate=0.2, + stage_blocks=[5, 5, 24, 5], + groups=[12, 24, 48, 96], + layer_scale=1e-5, + offset_scale=2.0, + post_norm=True), + head=dict(in_channels=2304)) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='RandomResizedCrop', + scale=384, + backend='pillow', + interpolation='bicubic'), + dict(type='RandomFlip', prob=0.5, direction='horizontal'), + dict(type='PackInputs') +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='ResizeEdge', + scale=384, + edge='short', + backend='pillow', + interpolation='bicubic'), + dict(type='CenterCrop', crop_size=384), + dict(type='PackInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) +val_dataloader = dict(dataset=dict(pipeline=test_pipeline)) +test_dataloader = val_dataloader + +optim_wrapper = dict(optimizer=dict(lr=5e-6)) +param_scheduler = [ + dict( + type='LinearLR', + by_epoch=True, + begin=0, + end=2, + convert_to_iter_based=True), + dict(type='CosineAnnealingLR', T_max=18, by_epoch=True, begin=2, end=20) +] +train_cfg = dict(by_epoch=True, max_epochs=20, val_interval=1) diff --git a/projects/internimage_classification/models/__init__.py b/projects/internimage_classification/models/__init__.py new file mode 100644 index 0000000..99cbcf6 --- /dev/null +++ b/projects/internimage_classification/models/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .intern_image import InternImage + +__all__ = ['InternImage'] diff --git a/projects/internimage_classification/models/intern_image.py b/projects/internimage_classification/models/intern_image.py new file mode 100644 index 0000000..41c42cc --- /dev/null +++ b/projects/internimage_classification/models/intern_image.py @@ -0,0 +1,636 @@ +# Copyright (c) 2022 OpenGVLab +# Copyright (c) OpenMMLab. All rights reserved. +# modified from +# https://github.com/OpenGVLab/InternImage/blob/master/classification/models/intern_image.py +import torch +import torch.nn as nn +import torch.utils.checkpoint as cp +from mmcv.cnn.bricks import DropPath, build_activation_layer +from mmcv.cnn.bricks.transformer import FFN +from mmengine.model.weight_init import trunc_normal_ +from ops_dcnv3 import modules as opsm + +from mmpretrain.models.backbones.base_backbone import BaseBackbone +from mmpretrain.models.utils import CrossMultiheadAttention +from mmpretrain.registry import MODELS + + +class to_channels_first(nn.Module): + + def __init__(self): + super().__init__() + + def forward(self, x): + return x.permute(0, 3, 1, 2) + + +class to_channels_last(nn.Module): + + def __init__(self): + super().__init__() + + def forward(self, x): + return x.permute(0, 2, 3, 1) + + +def build_norm_layer(dim, + norm_layer, + in_format='channels_last', + out_format='channels_last', + eps=1e-6): + layers = [] + if norm_layer == 'BN': + if in_format == 'channels_last': + layers.append(to_channels_first()) + layers.append(nn.BatchNorm2d(dim)) + if out_format == 'channels_last': + layers.append(to_channels_last()) + elif norm_layer == 'LN': + if in_format == 'channels_first': + layers.append(to_channels_last()) + layers.append(nn.LayerNorm(dim, eps=eps)) + if out_format == 'channels_first': + layers.append(to_channels_first()) + else: + raise NotImplementedError( + f'build_norm_layer does not support {norm_layer}') + return nn.Sequential(*layers) + + +class AttentiveBlock(nn.Module): + """Attentive Block. + + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. + Default: False. + qk_scale (float, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + drop (float, optional): Dropout rate. Default: 0.0. + attn_drop (float, optional): Attention dropout rate. Default: 0.0. + drop_path (float, optional): Stochastic depth rate. Default: 0.0. + norm_cfg (dict, optional): Normalization layer. + Default: dict(type='LN') + out_dim (int, optional): Dimension of output. Default: None. + """ + + def __init__(self, + dim, + num_heads, + qkv_bias=False, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + norm_cfg=dict(type='LN'), + out_dim=None): + super().__init__() + norm_layer = norm_cfg['type'] + self.norm1_q = build_norm_layer(dim, norm_layer, eps=1e-6) + self.norm1_k = build_norm_layer(dim, norm_layer, eps=1e-6) + self.norm1_v = build_norm_layer(dim, norm_layer, eps=1e-6) + + self.cross_dcn = CrossMultiheadAttention( + embed_dims=dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop, + ) + if out_dim and out_dim != dim: + self.cross_dcn.proj = nn.Linear(dim, out_dim) + + self.drop_path = DropPath( + drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, x_q, x_kv, pos_q, pos_k): + x_q = self.norm1_q(x_q + pos_q) + x_k = self.norm1_k(x_kv + pos_k) + x_v = self.norm1_v(x_kv) + x = self.cross_dcn(x_q, k=x_k, v=x_v) + return x + + +class AttentionPoolingBlock(AttentiveBlock): + + def forward(self, x): + x_q = x.mean(1, keepdim=True) + x_kv = x + pos_q, pos_k = 0, 0 + x = super().forward(x_q, x_kv, pos_q, pos_k) + x = x.squeeze(1) + return x + + +class DownsampleLayer(nn.Module): + """Downsample layer of InternImage. + + Args: + channels (int): number of input channels + norm_layer (str): normalization layer + """ + + def __init__(self, channels, norm_layer='LN'): + super().__init__() + self.conv = nn.Conv2d( + channels, + 2 * channels, + kernel_size=3, + stride=2, + padding=1, + bias=False) + self.norm = build_norm_layer(2 * channels, norm_layer, + 'channels_first', 'channels_last') + + def forward(self, x): + x = self.conv(x.permute(0, 3, 1, 2)) + x = self.norm(x) + return x + + +class InternImageLayer(nn.Module): + """Basic layer of InternImage. + + Args: + core_op (nn.Module): core operation of InternImage + channels (int): number of input channels + groups (list): Groups of each block. + mlp_ratio (float): ratio of mlp hidden features to input channels + drop (float): dropout rate + drop_path (float): drop path rate + act_cfg (dict): activation layer + norm_cfg (dict): normalization layer + post_norm (bool): whether to use post normalization + layer_scale (float): layer scale + offset_scale (float): offset scale + with_cp (bool): whether to use checkpoint + """ + + def __init__( + self, + core_op, + channels, + groups, + mlp_ratio=4., + drop=0., + drop_path=0., + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + post_norm=False, + layer_scale=None, + offset_scale=1.0, + with_cp=False, + dw_kernel_size=None, + res_post_norm=False, + center_feature_scale=False, + remove_center=False, + ): + super().__init__() + self.channels = channels + self.groups = groups + self.mlp_ratio = mlp_ratio + self.with_cp = with_cp + + self.norm1 = build_norm_layer(channels, 'LN') + self.post_norm = post_norm + self.dcn = core_op( + channels=channels, + kernel_size=3, + stride=1, + pad=1, + dilation=1, + group=groups, + offset_scale=offset_scale, + act_layer=act_cfg['type'], + norm_layer=norm_cfg['type'], + dw_kernel_size=dw_kernel_size, + center_feature_scale=center_feature_scale, + remove_center=remove_center, + ) + self.drop_path = DropPath(drop_path) if drop_path > 0. \ + else nn.Identity() + self.norm2 = build_norm_layer(channels, 'LN') + + self.mlp = FFN( + embed_dims=channels, + feedforward_channels=int(channels * mlp_ratio), + act_cfg=act_cfg, + ffn_drop=drop, + add_identity=False) + + self.layer_scale = layer_scale is not None + if self.layer_scale: + self.gamma1 = nn.Parameter( + layer_scale * torch.ones(channels), requires_grad=True) + self.gamma2 = nn.Parameter( + layer_scale * torch.ones(channels), requires_grad=True) + self.res_post_norm = res_post_norm + if res_post_norm: + self.res_post_norm1 = build_norm_layer(channels, 'LN') + self.res_post_norm2 = build_norm_layer(channels, 'LN') + + def forward(self, x): + + def _inner_forward(x): + if not self.layer_scale: + if self.post_norm: + x = x + self.drop_path(self.norm1(self.dcn(x))) + x = x + self.drop_path(self.norm2(self.mlp(x))) + elif self.res_post_norm: + x = x + self.drop_path( + self.res_post_norm1(self.dcn(self.norm1(x)))) + x = x + self.drop_path( + self.res_post_norm2(self.mlp(self.norm2(x)))) + else: + x = x + self.drop_path(self.dcn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + if self.post_norm: + x = x + self.drop_path(self.gamma1 * self.norm1(self.dcn(x))) + x = x + self.drop_path(self.gamma2 * self.norm2(self.mlp(x))) + else: + x = x + self.drop_path(self.gamma1 * self.dcn(self.norm1(x))) + x = x + self.drop_path(self.gamma2 * self.mlp(self.norm2(x))) + return x + + if self.with_cp and x.requires_grad: + x = cp.checkpoint(_inner_forward, x) + else: + x = _inner_forward(x) + return x + + +class InternImageBlock(nn.Module): + """Block of InternImage. + + Args: + core_op (nn.Module): core operation of InternImage + channels (int): number of input channels + depths (list): Depth of each block. + groups (list): Groups of each block. + mlp_ratio (float): ratio of mlp hidden features to input channels + drop (float): dropout rate + drop_path (float): drop path rate + act_cfg (dict): activation layer + norm_cfg (dict): normalization layer + post_norm (bool): whether to use post normalization + layer_scale (float): layer scale + offset_scale (float): offset scale + with_cp (bool): whether to use checkpoint + """ + + def __init__( + self, + core_op, + channels, + depth, + groups, + downsample=True, + mlp_ratio=4., + drop=0., + drop_path=0., + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + post_norm=False, + offset_scale=1.0, + layer_scale=None, + with_cp=False, + dw_kernel_size=None, + post_norm_block_ids=None, + res_post_norm=False, + center_feature_scale=False, + remove_center=False, + ): + super().__init__() + self.channels = channels + self.depth = depth + self.post_norm = post_norm + self.center_feature_scale = center_feature_scale + + self.blocks = nn.ModuleList([ + InternImageLayer( + core_op=core_op, + channels=channels, + groups=groups, + mlp_ratio=mlp_ratio, + drop=drop, + drop_path=drop_path[i] + if isinstance(drop_path, list) else drop_path, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + post_norm=post_norm, + layer_scale=layer_scale, + offset_scale=offset_scale, + with_cp=with_cp, + dw_kernel_size=dw_kernel_size, + res_post_norm=res_post_norm, + center_feature_scale=center_feature_scale, + remove_center=remove_center, + ) for i in range(depth) + ]) + if not self.post_norm or center_feature_scale: + self.norm = build_norm_layer(channels, 'LN') + self.post_norm_block_ids = post_norm_block_ids + if post_norm_block_ids is not None: + self.post_norms = nn.ModuleList([ + build_norm_layer(channels, 'LN', eps=1e-6) + for _ in post_norm_block_ids + ]) + self.downsample = DownsampleLayer( + channels=channels, + norm_layer=norm_cfg['type']) if downsample else None + + def forward(self, x, return_wo_downsample=False): + for i, blk in enumerate(self.blocks): + x = blk(x) + if (self.post_norm_block_ids + is not None) and (i in self.post_norm_block_ids): + index = self.post_norm_block_ids.index(i) + x = self.post_norms[index](x) + if not self.post_norm or self.center_feature_scale: + x = self.norm(x) + if return_wo_downsample: + x_ = x + if self.downsample is not None: + x = self.downsample(x) + + if return_wo_downsample: + return x, x_ + return x + + +@MODELS.register_module() +class InternImage(BaseBackbone): + """ InternImage + A PyTorch impl of : `InternImage: Exploring Large-Scale Vision Foundation Models with Deformable Convolutions` - + https://arxiv.org/pdf/2103.14030 + + Args: + core_op (str): Core operator. Default: 'DCNv3' + stem_channels (int): Number of the first stage. Default: 64 + stage_blocks (list): Depth of each block. Default: [3, 4, 18, 5] + groups (list): Groups of each block. Default: [3, 6, 12, 24] + num_classes (int): Number of classes. Default: 1000 + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4. + drop_rate (float): Probability of an element to be zeroed. Default: 0. + drop_path_rate (float): Stochastic depth rate. Default: 0. + act_cfg (dict): Activation layer. Default: dict(type='GELU') + norm_cfg (dict): Normalization layer. Default: dict(type='LN') + layer_scale (bool): Whether to use layer scale. Default: False + cls_scale (bool): Whether to use class scale. Default: False + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + dw_kernel_size (int): Size of the dwconv. Default: None + use_clip_projector (bool): Whether to use clip projector. Default: False + level2_post_norm (bool): Whether to use level2 post norm. Default: False + level2_post_norm_block_ids (list): Indexes of post norm blocks. Default: None + res_post_norm (bool): Whether to use res post norm. Default: False + center_feature_scale (bool): Whether to use center feature scale. Default: False + """ # noqa: E501 + + def __init__(self, + stem_channels=64, + stage_blocks=[3, 4, 18, 5], + groups=[3, 6, 12, 24], + mlp_ratio=4., + drop_rate=0., + drop_path_rate=0.2, + drop_path_type='linear', + act_cfg=dict(type='GELU'), + norm_cfg=dict(type='LN'), + layer_scale=None, + offset_scale=1.0, + post_norm=False, + cls_scale=1.5, + with_cp=False, + dw_kernel_size=None, + use_clip_projector=False, + level2_post_norm=False, + level2_post_norm_block_ids=None, + res_post_norm=False, + center_feature_scale=False, + remove_center=False, + init_cfg=None): + super(InternImage, self).__init__(init_cfg) + + self.core_op = 'DCNv3' + self.num_stages = len(stage_blocks) + self.num_features = int(stem_channels * 2**(self.num_stages - 1)) + self.post_norm = post_norm + self.mlp_ratio = mlp_ratio + self.use_clip_projector = use_clip_projector + self.level2_post_norm_block_ids = level2_post_norm_block_ids + self.remove_center = remove_center + self.act_cfg = act_cfg + self.norm_cfg = norm_cfg + + # stem layer + self._make_stem_layer(in_channels=3, stem_channels=stem_channels) + self.pos_drop = nn.Dropout(p=drop_rate) + + # stochastic depth decay rule + total_depth = sum(stage_blocks) + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, total_depth) + ] + if drop_path_type == 'uniform': + for i in range(len(dpr)): + dpr[i] = drop_path_rate + + # InternImage Layers + self.layers = nn.ModuleList() + for i in range(self.num_stages): + if level2_post_norm and i == 2: + post_norm_block_ids = level2_post_norm_block_ids + else: + post_norm_block_ids = None + + layer = InternImageBlock( + core_op=getattr(opsm, self.core_op), + channels=int(stem_channels * 2**i), + depth=stage_blocks[i], + groups=groups[i], + mlp_ratio=self.mlp_ratio, + drop=drop_rate, + drop_path=dpr[sum(stage_blocks[:i]):sum(stage_blocks[:i + 1])], + act_cfg=act_cfg, + norm_cfg=norm_cfg, + post_norm=post_norm, + downsample=(i < self.num_stages - 1), + layer_scale=layer_scale, + offset_scale=offset_scale, + with_cp=with_cp, + dw_kernel_size=dw_kernel_size, + post_norm_block_ids=post_norm_block_ids, + res_post_norm=res_post_norm, + center_feature_scale=center_feature_scale, + remove_center=remove_center, + ) + self.layers.append(layer) + + # Conv Head + if not use_clip_projector: + self.conv_head = nn.Sequential( + nn.Conv2d( + self.num_features, + int(self.num_features * cls_scale), + kernel_size=1, + bias=False), + build_norm_layer( + int(self.num_features * cls_scale), 'BN', 'channels_first', + 'channels_first'), build_activation_layer(act_cfg)) + + else: + pretrain_embed_dim, _stride, attnpool_num_heads, clip_embed_dim \ + = 1024, 2, 16, 768 + self.dcnv3_head_x4 = nn.Sequential( + nn.Conv2d( + in_channels=self.num_features, + out_channels=pretrain_embed_dim * (_stride**2), + kernel_size=1), nn.PixelShuffle(_stride)) + self.dcnv3_head_x3 = nn.Conv2d( + in_channels=self.num_features // 2, + out_channels=pretrain_embed_dim, + kernel_size=1) + self.clip_projector = AttentionPoolingBlock( + dim=pretrain_embed_dim, + num_heads=attnpool_num_heads, + qkv_bias=True, + qk_scale=None, + drop=0., + attn_drop=0., + norm_cfg=norm_cfg, + out_dim=clip_embed_dim) + norm_layer = norm_cfg['type'] + self.fc_norm = build_norm_layer( + clip_embed_dim, norm_layer, eps=1e-6) + + def init_weights(self): + super(InternImage, self).init_weights() + + for m in self.modules(): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + elif isinstance(m, getattr(opsm, self.core_op)): + m._reset_parameters() + + def _make_stem_layer(self, in_channels, stem_channels): + norm_layer = self.norm_cfg['type'] + self.patch_embed = nn.Sequential( + nn.Conv2d( + in_channels, + stem_channels // 2, + kernel_size=3, + stride=2, + padding=1), + build_norm_layer(stem_channels // 2, norm_layer, 'channels_first', + 'channels_first'), + build_activation_layer(self.act_cfg), + nn.Conv2d( + stem_channels // 2, + stem_channels, + kernel_size=3, + stride=2, + padding=1), + build_norm_layer(stem_channels, norm_layer, 'channels_first', + 'channels_last'), + ) + + def forward_features(self, x): + x = self.patch_embed(x) + x = self.pos_drop(x) + + for layer in self.layers: + x = layer(x) + + x = self.conv_head(x.permute(0, 3, 1, 2)) + return (x, ) + + def forward_features_seq_out(self, x): + x = self.patch_embed(x) + x = self.pos_drop(x) + + seq_out = [] + for layer in self.layers: + x, x_ = layer(x, return_wo_downsample=True) + seq_out.append(x_) + return seq_out + + def forward_clip_projector(self, x): # for InternImage-H/G + xs = self.forward_features_seq_out(x) + x1, x2, x3, x4 = xs + + x1 = x1.permute(0, 3, 1, 2) # NHWC -> NCHW + x2 = x2.permute(0, 3, 1, 2) # NHWC -> NCHW + x3 = x3.permute(0, 3, 1, 2) # NHWC -> NCHW + x4 = x4.permute(0, 3, 1, 2) # NHWC -> NCHW + + x4 = self.dcnv3_head_x4(x4) + x = x4 + x3 = self.dcnv3_head_x3(x3) + x = x + x3 + + x = x.flatten(-2).transpose(1, 2).contiguous() + x = self.clip_projector(x) + x = self.fc_norm(x) + + return (x, ) + + def forward(self, x): + if not self.use_clip_projector: + # for InternImage-T/S/B/L/XL + return self.forward_features(x) + else: + # for InternImage-H/G + return self.forward_clip_projector(x) + + @staticmethod + def _checkpoint_filter(state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + + def internimage_to_mmpretrain(): + for k, v in state_dict['model'].items(): + if 'head.' in k and 'conv_head' not in k: + if 'weight' in k: + new_k = 'head.fc.weight' + else: + new_k = 'head.fc.bias' + elif 'patch_embed' in k: + map_fun = { + 'conv1': '0', + 'norm1': '1', + 'conv2': '3', + 'norm2': '4' + } + new_k = k + for old, new in map_fun.items(): + new_k = new_k.replace(old, new) + new_k = 'backbone.' + new_k + + elif 'levels' in k: + new_k = k.replace('levels', 'layers') + if 'mlp' in new_k: + new_k = new_k.replace('fc1', 'layers.0.0') + new_k = new_k.replace('fc2', 'layers.1') + new_k = 'backbone.' + new_k + elif 'clip_projector.cross_dcn.k_bias' in k: + continue + else: + new_k = 'backbone.' + k + + state_dict[new_k] = state_dict['model'][k] + del state_dict['model'] + + # The original weights need to be converted to mmpretrain format. + # Some modules in the original weights starts with 'levels', + # and in this implement they are replaced with 'layers'. + if 'model' in state_dict and 'levels.0.blocks.0.norm1.0.weight'\ + in state_dict['model']: + internimage_to_mmpretrain() diff --git a/projects/internimage_classification/ops_dcnv3/functions/__init__.py b/projects/internimage_classification/ops_dcnv3/functions/__init__.py new file mode 100644 index 0000000..bc1f5b6 --- /dev/null +++ b/projects/internimage_classification/ops_dcnv3/functions/__init__.py @@ -0,0 +1,10 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + +# Copied from +# https://github.com/OpenGVLab/InternImage/blob/master/classification/models/ + +from .dcnv3_func import DCNv3Function, dcnv3_core_pytorch # noqa diff --git a/projects/internimage_classification/ops_dcnv3/functions/dcnv3_func.py b/projects/internimage_classification/ops_dcnv3/functions/dcnv3_func.py new file mode 100644 index 0000000..da1b6af --- /dev/null +++ b/projects/internimage_classification/ops_dcnv3/functions/dcnv3_func.py @@ -0,0 +1,248 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + +# Copied from +# https://github.com/OpenGVLab/InternImage/blob/master/classification/models/ + +from __future__ import absolute_import, division, print_function +import pkg_resources + +import DCNv3 +import torch +import torch.nn.functional as F +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.cuda.amp import custom_bwd, custom_fwd + +dcn_version = float(pkg_resources.get_distribution('DCNv3').version) + + +class DCNv3Function(Function): + + @staticmethod + @custom_fwd + def forward(ctx, input, offset, mask, kernel_h, kernel_w, stride_h, + stride_w, pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, offset_scale, im2col_step, remove_center): + ctx.kernel_h = kernel_h + ctx.kernel_w = kernel_w + ctx.stride_h = stride_h + ctx.stride_w = stride_w + ctx.pad_h = pad_h + ctx.pad_w = pad_w + ctx.dilation_h = dilation_h + ctx.dilation_w = dilation_w + ctx.group = group + ctx.group_channels = group_channels + ctx.offset_scale = offset_scale + ctx.im2col_step = im2col_step + ctx.remove_center = remove_center + + args = [ + input, offset, mask, kernel_h, kernel_w, stride_h, stride_w, pad_h, + pad_w, dilation_h, dilation_w, group, group_channels, offset_scale, + ctx.im2col_step + ] + if remove_center or dcn_version > 1.0: + args.append(remove_center) + + output = DCNv3.dcnv3_forward(*args) + ctx.save_for_backward(input, offset, mask) + + return output + + @staticmethod + @once_differentiable + @custom_bwd + def backward(ctx, grad_output): + input, offset, mask = ctx.saved_tensors + + args = [ + input, offset, mask, ctx.kernel_h, ctx.kernel_w, ctx.stride_h, + ctx.stride_w, ctx.pad_h, ctx.pad_w, ctx.dilation_h, ctx.dilation_w, + ctx.group, ctx.group_channels, ctx.offset_scale, + grad_output.contiguous(), ctx.im2col_step + ] + if ctx.remove_center or dcn_version > 1.0: + args.append(ctx.remove_center) + + grad_input, grad_offset, grad_mask = \ + DCNv3.dcnv3_backward(*args) + + return grad_input, grad_offset, grad_mask, \ + None, None, None, None, None, None, None,\ + None, None, None, None, None, None + + @staticmethod + def symbolic(g, input, offset, mask, kernel_h, kernel_w, stride_h, + stride_w, pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, offset_scale, im2col_step, remove_center): + """Symbolic function for mmdeploy::DCNv3. + + Returns: + DCNv3 op for onnx. + """ + return g.op( + 'mmdeploy::TRTDCNv3', + input, + offset, + mask, + kernel_h_i=int(kernel_h), + kernel_w_i=int(kernel_w), + stride_h_i=int(stride_h), + stride_w_i=int(stride_w), + pad_h_i=int(pad_h), + pad_w_i=int(pad_w), + dilation_h_i=int(dilation_h), + dilation_w_i=int(dilation_w), + group_i=int(group), + group_channels_i=int(group_channels), + offset_scale_f=float(offset_scale), + im2col_step_i=int(im2col_step), + remove_center=int(remove_center), + ) + + +def _get_reference_points(spatial_shapes, + device, + kernel_h, + kernel_w, + dilation_h, + dilation_w, + pad_h=0, + pad_w=0, + stride_h=1, + stride_w=1): + _, H_, W_, _ = spatial_shapes + H_out = (H_ - (dilation_h * (kernel_h - 1) + 1)) // stride_h + 1 + W_out = (W_ - (dilation_w * (kernel_w - 1) + 1)) // stride_w + 1 + + ref_y, ref_x = torch.meshgrid( + torch.linspace( + # pad_h + 0.5, + # H_ - pad_h - 0.5, + (dilation_h * (kernel_h - 1)) // 2 + 0.5, + (dilation_h * (kernel_h - 1)) // 2 + 0.5 + (H_out - 1) * stride_h, + H_out, + dtype=torch.float32, + device=device), + torch.linspace( + # pad_w + 0.5, + # W_ - pad_w - 0.5, + (dilation_w * (kernel_w - 1)) // 2 + 0.5, + (dilation_w * (kernel_w - 1)) // 2 + 0.5 + (W_out - 1) * stride_w, + W_out, + dtype=torch.float32, + device=device)) + ref_y = ref_y.reshape(-1)[None] / H_ + ref_x = ref_x.reshape(-1)[None] / W_ + + ref = torch.stack((ref_x, ref_y), -1).reshape(1, H_out, W_out, 1, 2) + + return ref + + +def _generate_dilation_grids(spatial_shapes, kernel_h, kernel_w, dilation_h, + dilation_w, group, device): + _, H_, W_, _ = spatial_shapes + points_list = [] + x, y = torch.meshgrid( + torch.linspace( + -((dilation_w * (kernel_w - 1)) // 2), + -((dilation_w * (kernel_w - 1)) // 2) + + (kernel_w - 1) * dilation_w, + kernel_w, + dtype=torch.float32, + device=device), + torch.linspace( + -((dilation_h * (kernel_h - 1)) // 2), + -((dilation_h * (kernel_h - 1)) // 2) + + (kernel_h - 1) * dilation_h, + kernel_h, + dtype=torch.float32, + device=device)) + + points_list.extend([x / W_, y / H_]) + grid = torch.stack(points_list, -1).reshape(-1, 1, 2).\ + repeat(1, group, 1).permute(1, 0, 2) + grid = grid.reshape(1, 1, 1, group * kernel_h * kernel_w, 2) + + return grid + + +def remove_center_sampling_locations(sampling_locations, kernel_w, kernel_h): + idx = list(range(sampling_locations.shape[-2])) + C = (kernel_w * kernel_h - 1) // 2 + idx = [i for i in idx if i != C and (i - C) % (C * 2 + 1) != 0] + sampling_locations = sampling_locations[:, :, :, idx, :] + return sampling_locations + + +def dcnv3_core_pytorch(input, offset, mask, kernel_h, kernel_w, stride_h, + stride_w, pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, offset_scale, remove_center): + # for debug and test only, + # need to use cuda version instead + + if remove_center and (kernel_h % 2 == 0 or kernel_w % 2 == 0 + or kernel_w != kernel_h): + raise ValueError( + 'remove_center is only compatible with square odd kernel size.') + + input = F.pad(input, [0, 0, pad_h, pad_h, pad_w, pad_w]) + N_, H_in, W_in, _ = input.shape + _, H_out, W_out, _ = offset.shape + + ref = _get_reference_points(input.shape, input.device, kernel_h, kernel_w, + dilation_h, dilation_w, pad_h, pad_w, stride_h, + stride_w) + grid = _generate_dilation_grids(input.shape, kernel_h, kernel_w, + dilation_h, dilation_w, group, + input.device) + spatial_norm = torch.tensor([W_in, H_in]).reshape(1, 1, 1, 2).\ + repeat(1, 1, 1, group*(kernel_h*kernel_w-remove_center)).\ + to(input.device) + + sampling_locations = (ref + grid * offset_scale).repeat(N_, 1, 1, 1, 1) + if remove_center: + sampling_locations = remove_center_sampling_locations( + sampling_locations, kernel_w=kernel_w, kernel_h=kernel_h) + sampling_locations = sampling_locations.flatten(3, 4) + sampling_locations = sampling_locations + \ + offset * offset_scale / spatial_norm + + P_ = kernel_h * kernel_w - remove_center + sampling_grids = 2 * sampling_locations - 1 + # N_, H_in, W_in, group*group_channels -> + # N_, H_in*W_in, group*group_channels -> + # N_, group*group_channels, H_in*W_in -> + # N_*group, group_channels, H_in, W_in + input_ = input.view(N_, H_in*W_in, group*group_channels).transpose(1, 2).\ + reshape(N_*group, group_channels, H_in, W_in) + # N_, H_out, W_out, group*P_*2 -> + # N_, H_out*W_out, group, P_, 2 -> + # N_, group, H_out*W_out, P_, 2 -> + # N_*group, H_out*W_out, P_, 2 + sampling_grid_ = sampling_grids.view(N_, H_out*W_out, group, P_, 2).\ + transpose(1, 2).flatten(0, 1) + # N_*group, group_channels, H_out*W_out, P_ + sampling_input_ = F.grid_sample( + input_, + sampling_grid_, + mode='bilinear', + padding_mode='zeros', + align_corners=False) + + # (N_, H_out, W_out, group*P_) -> + # N_, H_out*W_out, group, P_ -> + # (N_, group, H_out*W_out, P_) -> + # (N_*group, 1, H_out*W_out, P_) + mask = mask.view(N_, H_out*W_out, group, P_).transpose(1, 2).\ + reshape(N_*group, 1, H_out*W_out, P_) + output = (sampling_input_ * mask).sum(-1).view(N_, group * group_channels, + H_out * W_out) + + return output.transpose(1, 2).reshape(N_, H_out, W_out, -1).contiguous() diff --git a/projects/internimage_classification/ops_dcnv3/make.sh b/projects/internimage_classification/ops_dcnv3/make.sh new file mode 100644 index 0000000..31ba0f9 --- /dev/null +++ b/projects/internimage_classification/ops_dcnv3/make.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + +# Copied from +# https://github.com/OpenGVLab/InternImage/blob/master/classification/models/ + +python setup.py build install diff --git a/projects/internimage_classification/ops_dcnv3/modules/__init__.py b/projects/internimage_classification/ops_dcnv3/modules/__init__.py new file mode 100644 index 0000000..930cd3f --- /dev/null +++ b/projects/internimage_classification/ops_dcnv3/modules/__init__.py @@ -0,0 +1,10 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + +# Copied from +# https://github.com/OpenGVLab/InternImage/blob/master/classification/models/ + +from .dcnv3 import DCNv3, DCNv3_pytorch # noqa diff --git a/projects/internimage_classification/ops_dcnv3/modules/dcnv3.py b/projects/internimage_classification/ops_dcnv3/modules/dcnv3.py new file mode 100644 index 0000000..47a369a --- /dev/null +++ b/projects/internimage_classification/ops_dcnv3/modules/dcnv3.py @@ -0,0 +1,360 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + +# Copied from +# https://github.com/OpenGVLab/InternImage/blob/master/classification/models/ + +from __future__ import absolute_import, division, print_function +import warnings + +import torch +import torch.nn.functional as F +from torch import nn +from torch.nn.init import constant_, xavier_uniform_ + +from ..functions import DCNv3Function, dcnv3_core_pytorch + + +class to_channels_first(nn.Module): + + def __init__(self): + super().__init__() + + def forward(self, x): + return x.permute(0, 3, 1, 2) + + +class to_channels_last(nn.Module): + + def __init__(self): + super().__init__() + + def forward(self, x): + return x.permute(0, 2, 3, 1) + + +def build_norm_layer(dim, + norm_layer, + in_format='channels_last', + out_format='channels_last', + eps=1e-6): + layers = [] + if norm_layer == 'BN': + if in_format == 'channels_last': + layers.append(to_channels_first()) + layers.append(nn.BatchNorm2d(dim)) + if out_format == 'channels_last': + layers.append(to_channels_last()) + elif norm_layer == 'LN': + if in_format == 'channels_first': + layers.append(to_channels_last()) + layers.append(nn.LayerNorm(dim, eps=eps)) + if out_format == 'channels_first': + layers.append(to_channels_first()) + else: + raise NotImplementedError( + f'build_norm_layer does not support {norm_layer}') + return nn.Sequential(*layers) + + +def build_act_layer(act_layer): + if act_layer == 'ReLU': + return nn.ReLU(inplace=True) + elif act_layer == 'SiLU': + return nn.SiLU(inplace=True) + elif act_layer == 'GELU': + return nn.GELU() + + raise NotImplementedError(f'build_act_layer does not support {act_layer}') + + +def _is_power_of_2(n): + if (not isinstance(n, int)) or (n < 0): + raise ValueError( + 'invalid input for _is_power_of_2: {} (type: {})'.format( + n, type(n))) + + return (n & (n - 1) == 0) and n != 0 + + +class CenterFeatureScaleModule(nn.Module): + + def forward(self, query, center_feature_scale_proj_weight, + center_feature_scale_proj_bias): + center_feature_scale = F.linear( + query, + weight=center_feature_scale_proj_weight, + bias=center_feature_scale_proj_bias).sigmoid() + return center_feature_scale + + +class DCNv3_pytorch(nn.Module): + + def __init__( + self, + channels=64, + kernel_size=3, + dw_kernel_size=None, + stride=1, + pad=1, + dilation=1, + group=4, + offset_scale=1.0, + act_layer='GELU', + norm_layer='LN', + center_feature_scale=False, + remove_center=False, + ): + """DCNv3 Module. + + :param channels + :param kernel_size + :param stride + :param pad + :param dilation + :param group + :param offset_scale + :param act_layer + :param norm_layer + """ + super().__init__() + if channels % group != 0: + raise ValueError(f'channels must be divisible by group, ' + f'but got {channels} and {group}') + _d_per_group = channels // group + dw_kernel_size = dw_kernel_size if dw_kernel_size is not None\ + else kernel_size + # you'd better set _d_per_group to a power of 2 + # which is more efficient in our CUDA implementation + if not _is_power_of_2(_d_per_group): + warnings.warn( + "You'd better set channels in DCNv3 " + 'to make the dimension of each attention head a power of 2 ' + 'which is more efficient in our CUDA implementation.') + + self.offset_scale = offset_scale + self.channels = channels + self.kernel_size = kernel_size + self.dw_kernel_size = dw_kernel_size + self.stride = stride + self.dilation = dilation + self.pad = pad + self.group = group + self.group_channels = channels // group + self.offset_scale = offset_scale + self.center_feature_scale = center_feature_scale + self.remove_center = int(remove_center) + + self.dw_conv = nn.Sequential( + nn.Conv2d( + channels, + channels, + kernel_size=dw_kernel_size, + stride=1, + padding=(dw_kernel_size - 1) // 2, + groups=channels), + build_norm_layer(channels, norm_layer, 'channels_first', + 'channels_last'), build_act_layer(act_layer)) + self.offset = nn.Linear( + channels, + group * (kernel_size * kernel_size - remove_center) * 2) + self.mask = nn.Linear( + channels, group * (kernel_size * kernel_size - remove_center)) + self.input_proj = nn.Linear(channels, channels) + self.output_proj = nn.Linear(channels, channels) + self._reset_parameters() + + if center_feature_scale: + self.center_feature_scale_proj_weight = nn.Parameter( + torch.zeros((group, channels), dtype=torch.float)) + self.center_feature_scale_proj_bias = nn.Parameter( + torch.tensor(0.0, dtype=torch.float).view( + (1, )).repeat(group, )) + self.center_feature_scale_module = CenterFeatureScaleModule() + + def _reset_parameters(self): + constant_(self.offset.weight.data, 0.) + constant_(self.offset.bias.data, 0.) + constant_(self.mask.weight.data, 0.) + constant_(self.mask.bias.data, 0.) + xavier_uniform_(self.input_proj.weight.data) + constant_(self.input_proj.bias.data, 0.) + xavier_uniform_(self.output_proj.weight.data) + constant_(self.output_proj.bias.data, 0.) + + def forward(self, input): + """ + :param query (N, H, W, C) + :return output (N, H, W, C) + """ + N, H, W, _ = input.shape + + x = self.input_proj(input) + x_proj = x + + x1 = input.permute(0, 3, 1, 2) + x1 = self.dw_conv(x1) + offset = self.offset(x1) + mask = self.mask(x1).reshape(N, H, W, self.group, -1) + mask = F.softmax(mask, -1).reshape(N, H, W, -1) + + x = dcnv3_core_pytorch(x, offset, mask, self.kernel_size, + self.kernel_size, self.stride, self.stride, + self.pad, self.pad, self.dilation, + self.dilation, self.group, self.group_channels, + self.offset_scale, self.remove_center) + if self.center_feature_scale: + center_feature_scale = self.center_feature_scale_module( + x1, self.center_feature_scale_proj_weight, + self.center_feature_scale_proj_bias) + # N, H, W, groups -> + # N, H, W, groups, 1 -> + # N, H, W, groups, _d_per_group -> + # N, H, W, channels + center_feature_scale = center_feature_scale[..., None].repeat( + 1, 1, 1, 1, self.channels // self.group).flatten(-2) + x = x * (1 - center_feature_scale) + x_proj * center_feature_scale + x = self.output_proj(x) + + return x + + +class DCNv3(nn.Module): + + def __init__( + self, + channels=64, + kernel_size=3, + dw_kernel_size=None, + stride=1, + pad=1, + dilation=1, + group=4, + offset_scale=1.0, + act_layer='GELU', + norm_layer='LN', + center_feature_scale=False, + remove_center=False, + ): + """DCNv3 Module. + + :param channels + :param kernel_size + :param stride + :param pad + :param dilation + :param group + :param offset_scale + :param act_layer + :param norm_layer + """ + super().__init__() + if channels % group != 0: + raise ValueError(f'channels must be divisible by group, ' + f'but got {channels} and {group}') + _d_per_group = channels // group + dw_kernel_size = dw_kernel_size if dw_kernel_size is not None\ + else kernel_size + # you'd better set _d_per_group to a power of 2 + # which is more efficient in our CUDA implementation + if not _is_power_of_2(_d_per_group): + warnings.warn( + "You'd better set channels in DCNv3 " + 'to make the dimension of each attention head a power of 2 ' + 'which is more efficient in our CUDA implementation.') + + self.offset_scale = offset_scale + self.channels = channels + self.kernel_size = kernel_size + self.dw_kernel_size = dw_kernel_size + self.stride = stride + self.dilation = dilation + self.pad = pad + self.group = group + self.group_channels = channels // group + self.offset_scale = offset_scale + self.center_feature_scale = center_feature_scale + self.remove_center = int(remove_center) + + if self.remove_center and self.kernel_size % 2 == 0: + raise ValueError( + 'remove_center is only compatible with odd kernel size.') + + self.dw_conv = nn.Sequential( + nn.Conv2d( + channels, + channels, + kernel_size=dw_kernel_size, + stride=1, + padding=(dw_kernel_size - 1) // 2, + groups=channels), + build_norm_layer(channels, norm_layer, 'channels_first', + 'channels_last'), build_act_layer(act_layer)) + self.offset = nn.Linear( + channels, + group * (kernel_size * kernel_size - remove_center) * 2) + self.mask = nn.Linear( + channels, group * (kernel_size * kernel_size - remove_center)) + self.input_proj = nn.Linear(channels, channels) + self.output_proj = nn.Linear(channels, channels) + self._reset_parameters() + + if center_feature_scale: + self.center_feature_scale_proj_weight = nn.Parameter( + torch.zeros((group, channels), dtype=torch.float)) + self.center_feature_scale_proj_bias = nn.Parameter( + torch.tensor(0.0, dtype=torch.float).view( + (1, )).repeat(group, )) + self.center_feature_scale_module = CenterFeatureScaleModule() + + def _reset_parameters(self): + constant_(self.offset.weight.data, 0.) + constant_(self.offset.bias.data, 0.) + constant_(self.mask.weight.data, 0.) + constant_(self.mask.bias.data, 0.) + xavier_uniform_(self.input_proj.weight.data) + constant_(self.input_proj.bias.data, 0.) + xavier_uniform_(self.output_proj.weight.data) + constant_(self.output_proj.bias.data, 0.) + + def forward(self, input): + """ + :param query (N, H, W, C) + :return output (N, H, W, C) + """ + N, H, W, _ = input.shape + + x = self.input_proj(input) + x_proj = x + dtype = x.dtype + + x1 = input.permute(0, 3, 1, 2) + x1 = self.dw_conv(x1) + offset = self.offset(x1) + mask = self.mask(x1).reshape(N, H, W, self.group, -1) + mask = F.softmax(mask, -1) + mask = mask.reshape(N, H, W, -1).type(dtype) + + x = DCNv3Function.apply(x, offset, mask, self.kernel_size, + self.kernel_size, self.stride, self.stride, + self.pad, self.pad, self.dilation, + self.dilation, self.group, self.group_channels, + self.offset_scale, 256, self.remove_center) + + if self.center_feature_scale: + center_feature_scale = self.center_feature_scale_module( + x1, self.center_feature_scale_proj_weight, + self.center_feature_scale_proj_bias) + # N, H, W, groups -> + # N, H, W, groups, 1 -> + # N, H, W, groups, _d_per_group -> + # N, H, W, channels + center_feature_scale = center_feature_scale[..., None].repeat( + 1, 1, 1, 1, self.channels // self.group).flatten(-2) + x = x * (1 - center_feature_scale) + x_proj * center_feature_scale + x = self.output_proj(x) + + return x diff --git a/projects/internimage_classification/ops_dcnv3/setup.py b/projects/internimage_classification/ops_dcnv3/setup.py new file mode 100644 index 0000000..34f8e7a --- /dev/null +++ b/projects/internimage_classification/ops_dcnv3/setup.py @@ -0,0 +1,72 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + +# Copied from +# https://github.com/OpenGVLab/InternImage/blob/master/classification/models/ + +import glob +import os +from setuptools import find_packages, setup + +import torch +from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension + +requirements = ['torch', 'torchvision'] + + +def get_extensions(): + this_dir = os.path.dirname(os.path.abspath(__file__)) + extensions_dir = os.path.join(this_dir, 'src') + + main_file = glob.glob(os.path.join(extensions_dir, '*.cpp')) + source_cpu = glob.glob(os.path.join(extensions_dir, 'cpu', '*.cpp')) + source_cuda = glob.glob(os.path.join(extensions_dir, 'cuda', '*.cu')) + + sources = main_file + source_cpu + extension = CppExtension + extra_compile_args = {'cxx': []} + define_macros = [] + + if torch.cuda.is_available() and CUDA_HOME is not None: + extension = CUDAExtension + sources += source_cuda + define_macros += [('WITH_CUDA', None)] + extra_compile_args['nvcc'] = [ + # "-DCUDA_HAS_FP16=1", + # "-D__CUDA_NO_HALF_OPERATORS__", + # "-D__CUDA_NO_HALF_CONVERSIONS__", + # "-D__CUDA_NO_HALF2_OPERATORS__", + ] + else: + raise NotImplementedError('Cuda is not availabel') + + sources = [os.path.join(extensions_dir, s) for s in sources] + include_dirs = [extensions_dir] + ext_modules = [ + extension( + 'DCNv3', + sources, + include_dirs=include_dirs, + define_macros=define_macros, + extra_compile_args=extra_compile_args, + ) + ] + return ext_modules + + +setup( + name='DCNv3', + version='1.1', + author='InternImage', + url='https://github.com/OpenGVLab/InternImage', + description='PyTorch Wrapper for CUDA Functions of DCNv3', + packages=find_packages(exclude=( + 'configs', + 'tests', + )), + ext_modules=get_extensions(), + cmdclass={'build_ext': torch.utils.cpp_extension.BuildExtension}, +) diff --git a/projects/internimage_classification/ops_dcnv3/src/cpu/dcnv3_cpu.cpp b/projects/internimage_classification/ops_dcnv3/src/cpu/dcnv3_cpu.cpp new file mode 100644 index 0000000..a3bddc1 --- /dev/null +++ b/projects/internimage_classification/ops_dcnv3/src/cpu/dcnv3_cpu.cpp @@ -0,0 +1,37 @@ +/*! +************************************************************************************************** +* InternImage +* Copyright (c) 2022 OpenGVLab +* Licensed under The MIT License [see LICENSE for details] +************************************************************************************************** +* Modified from +*https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +#include + +#include +#include + +at::Tensor dcnv3_cpu_forward(const at::Tensor &input, const at::Tensor &offset, + const at::Tensor &mask, const int kernel_h, + const int kernel_w, const int stride_h, + const int stride_w, const int pad_h, + const int pad_w, const int dilation_h, + const int dilation_w, const int group, + const int group_channels, const float offset_scale, + const int im2col_step) { + AT_ERROR("Not implement on cpu"); +} + +std::vector +dcnv3_cpu_backward(const at::Tensor &input, const at::Tensor &offset, + const at::Tensor &mask, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, + const int pad_h, const int pad_w, const int dilation_h, + const int dilation_w, const int group, + const int group_channels, const float offset_scale, + const at::Tensor &grad_output, const int im2col_step) { + AT_ERROR("Not implement on cpu"); +} diff --git a/projects/internimage_classification/ops_dcnv3/src/cpu/dcnv3_cpu.h b/projects/internimage_classification/ops_dcnv3/src/cpu/dcnv3_cpu.h new file mode 100644 index 0000000..d457bcb --- /dev/null +++ b/projects/internimage_classification/ops_dcnv3/src/cpu/dcnv3_cpu.h @@ -0,0 +1,31 @@ +/*! +************************************************************************************************** +* InternImage +* Copyright (c) 2022 OpenGVLab +* Licensed under The MIT License [see LICENSE for details] +************************************************************************************************** +* Modified from +*https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +#pragma once +#include + +at::Tensor dcnv3_cpu_forward(const at::Tensor &input, const at::Tensor &offset, + const at::Tensor &mask, const int kernel_h, + const int kernel_w, const int stride_h, + const int stride_w, const int pad_h, + const int pad_w, const int dilation_h, + const int dilation_w, const int group, + const int group_channels, const float offset_scale, + const int im2col_step); + +std::vector +dcnv3_cpu_backward(const at::Tensor &input, const at::Tensor &offset, + const at::Tensor &mask, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, + const int pad_h, const int pad_w, const int dilation_h, + const int dilation_w, const int group, + const int group_channels, const float offset_scale, + const at::Tensor &grad_output, const int im2col_step); diff --git a/projects/internimage_classification/ops_dcnv3/src/cuda/dcnv3_cuda.cu b/projects/internimage_classification/ops_dcnv3/src/cuda/dcnv3_cuda.cu new file mode 100644 index 0000000..f793248 --- /dev/null +++ b/projects/internimage_classification/ops_dcnv3/src/cuda/dcnv3_cuda.cu @@ -0,0 +1,174 @@ +/*! +************************************************************************************************** +* InternImage +* Copyright (c) 2022 OpenGVLab +* Licensed under The MIT License [see LICENSE for details] +************************************************************************************************** +* Modified from +*https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +#include "cuda/dcnv3_im2col_cuda.cuh" +#include + +#include +#include +#include +#include +#include + +at::Tensor dcnv3_cuda_forward(const at::Tensor &input, const at::Tensor &offset, + const at::Tensor &mask, const int kernel_h, + const int kernel_w, const int stride_h, + const int stride_w, const int pad_h, + const int pad_w, const int dilation_h, + const int dilation_w, const int group, + const int group_channels, + const float offset_scale, const int im2col_step, const int remove_center) { + AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous"); + AT_ASSERTM(offset.is_contiguous(), "offset tensor has to be contiguous"); + AT_ASSERTM(mask.is_contiguous(), "mask tensor has to be contiguous"); + AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); + AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor"); + + const int batch = input.size(0); + const int height_in = input.size(1); + const int width_in = input.size(2); + const int channels = input.size(3); + const int height_out = + (height_in + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + + 1; + const int width_out = + (width_in + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + + 1; + const int im2col_step_ = std::min(batch, im2col_step); + + AT_ASSERTM(batch % im2col_step_ == 0, + "batch(%d) must divide im2col_step(%d)", batch, im2col_step_); + AT_ASSERTM( + channels == (group * group_channels), + "Input channels and group times group channels won't match: (%d vs %d).", + channels, group * group_channels); + + auto output = + at::zeros({batch, height_out, width_out, group * group_channels}, + input.options()); + + const int batch_n = im2col_step_; + auto output_n = output.view({batch / batch_n, batch_n, height_out, + width_out, group * group_channels}); + auto per_input_size = height_in * width_in * group * group_channels; + auto per_offset_size = + height_out * width_out * group * (kernel_h * kernel_w - remove_center) * 2; + auto per_mask_size = height_out * width_out * group * (kernel_h * kernel_w - remove_center); + for (int n = 0; n < batch / im2col_step_; ++n) { + auto columns = output_n.select(0, n); + // AT_DISPATCH_FLOATING_TYPES( + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.type(), "ms_deform_attn_forward_cuda", ([&] { + dcnv3_im2col_cuda( + at::cuda::getCurrentCUDAStream(), + input.data() + n * im2col_step_ * per_input_size, + offset.data() + + n * im2col_step_ * per_offset_size, + mask.data() + n * im2col_step_ * per_mask_size, + columns.data(), kernel_h, kernel_w, stride_h, + stride_w, pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, batch_n, height_in, width_in, height_out, + width_out, offset_scale, remove_center); + })); + } + + return output; +} + +std::vector +dcnv3_cuda_backward(const at::Tensor &input, const at::Tensor &offset, + const at::Tensor &mask, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, + const int pad_h, const int pad_w, const int dilation_h, + const int dilation_w, const int group, + const int group_channels, const float offset_scale, + const at::Tensor &grad_output, const int im2col_step, const int remove_center) { + + AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous"); + AT_ASSERTM(offset.is_contiguous(), "offset tensor has to be contiguous"); + AT_ASSERTM(mask.is_contiguous(), "mask tensor has to be contiguous"); + AT_ASSERTM(grad_output.is_contiguous(), + "grad_output tensor has to be contiguous"); + AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); + AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor"); + AT_ASSERTM(grad_output.type().is_cuda(), + "grad_output must be a CUDA tensor"); + + const int batch = input.size(0); + const int height_in = input.size(1); + const int width_in = input.size(2); + const int channels = input.size(3); + const int height_out = + (height_in + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + + 1; + const int width_out = + (width_in + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + + 1; + const int im2col_step_ = std::min(batch, im2col_step); + + AT_ASSERTM(batch % im2col_step_ == 0, + "batch(%d) must divide im2col_step(%d)", batch, im2col_step_); + AT_ASSERTM( + channels == (group * group_channels), + "Input channels and group times group channels won't match: (%d vs %d).", + channels, group * group_channels); + + auto dtype = input.dtype(); + if (dtype == at::kHalf) { + dtype = at::kFloat; + } + + auto grad_input = at::zeros_like(input, dtype); + auto grad_offset = at::zeros_like(offset, dtype); + auto grad_mask = at::zeros_like(mask, dtype); + + const int batch_n = im2col_step_; + auto per_input_size = height_in * width_in * group * group_channels; + auto per_offset_size = + height_out * width_out * group * (kernel_h * kernel_w - remove_center) * 2; + auto per_mask_size = height_out * width_out * group * (kernel_h * kernel_w - remove_center); + auto grad_output_n = + grad_output.view({batch / im2col_step_, batch_n, height_out * width_out, + group, group_channels}); + + for (int n = 0; n < batch / im2col_step_; ++n) { + auto grad_output_g = grad_output_n.select(0, n); + // AT_DISPATCH_FLOATING_TYPES( + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.type(), "ms_deform_attn_backward_cuda", ([&] { + dcnv3_col2im_cuda( + at::cuda::getCurrentCUDAStream(), + grad_output_g.data(), + input.data() + n * im2col_step_ * per_input_size, + offset.data() + + n * im2col_step_ * per_offset_size, + mask.data() + n * im2col_step_ * per_mask_size, + kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, + dilation_h, dilation_w, group, group_channels, batch_n, + height_in, width_in, height_out, width_out, offset_scale, remove_center, + grad_input.data() + + n * im2col_step_ * per_input_size, + grad_offset.data() + + n * im2col_step_ * per_offset_size, + grad_mask.data() + + n * im2col_step_ * per_mask_size); + })); + } + + if (input.dtype() == torch::kHalf) { + return {grad_input.to(torch::kHalf), grad_offset.to(torch::kHalf), + grad_mask.to(torch::kHalf)}; + } else { + return {grad_input, grad_offset, grad_mask}; + } +} diff --git a/projects/internimage_classification/ops_dcnv3/src/cuda/dcnv3_cuda.h b/projects/internimage_classification/ops_dcnv3/src/cuda/dcnv3_cuda.h new file mode 100644 index 0000000..d7ac024 --- /dev/null +++ b/projects/internimage_classification/ops_dcnv3/src/cuda/dcnv3_cuda.h @@ -0,0 +1,31 @@ +/*! +************************************************************************************************** +* InternImage +* Copyright (c) 2022 OpenGVLab +* Licensed under The MIT License [see LICENSE for details] +************************************************************************************************** +* Modified from +*https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +#pragma once +#include + +at::Tensor dcnv3_cuda_forward(const at::Tensor &input, const at::Tensor &offset, + const at::Tensor &mask, const int kernel_h, + const int kernel_w, const int stride_h, + const int stride_w, const int pad_h, + const int pad_w, const int dilation_h, + const int dilation_w, const int group, + const int group_channels, + const float offset_scale, const int im2col_step, const int remove_center); + +std::vector +dcnv3_cuda_backward(const at::Tensor &input, const at::Tensor &offset, + const at::Tensor &mask, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, + const int pad_h, const int pad_w, const int dilation_h, + const int dilation_w, const int group, + const int group_channels, const float offset_scale, + const at::Tensor &grad_output, const int im2col_step, const int remove_center); diff --git a/projects/internimage_classification/ops_dcnv3/src/cuda/dcnv3_im2col_cuda.cuh b/projects/internimage_classification/ops_dcnv3/src/cuda/dcnv3_im2col_cuda.cuh new file mode 100644 index 0000000..ab6da3c --- /dev/null +++ b/projects/internimage_classification/ops_dcnv3/src/cuda/dcnv3_im2col_cuda.cuh @@ -0,0 +1,1094 @@ +/*! +************************************************************************************************** +* InternImage +* Copyright (c) 2022 OpenGVLab +* Licensed under The MIT License [see LICENSE for details] +************************************************************************************************** +* Modified from +*https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +#include +#include +#include + +#include +#include +#include +#include + +#define CUDA_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ + i += blockDim.x * gridDim.x) + +const int CUDA_NUM_THREADS = 256; +inline int GET_BLOCKS(const int N, const int num_threads) { + return (N + num_threads - 1) / num_threads; +} + +#define opmath_t at::opmath_type + +template +__device__ opmath_t dcnv3_im2col_bilinear(const scalar_t *&bottom_data, + const int &height, const int &width, + const int &group, + const int &group_channels, + const opmath_t &h, const opmath_t &w, + const int &g, const int &c) { + const int h_low = floor(h); + const int w_low = floor(w); + const int h_high = h_low + 1; + const int w_high = w_low + 1; + + const opmath_t lh = h - h_low; + const opmath_t lw = w - w_low; + const opmath_t hh = 1 - lh, hw = 1 - lw; + + const int w_stride = group * group_channels; + const int h_stride = width * w_stride; + const int h_low_ptr_offset = h_low * h_stride; + const int h_high_ptr_offset = h_low_ptr_offset + h_stride; + const int w_low_ptr_offset = w_low * w_stride; + const int w_high_ptr_offset = w_low_ptr_offset + w_stride; + const int base_ptr = g * group_channels + c; + + opmath_t v1 = 0; + if (h_low >= 0 && w_low >= 0) { + const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr; + v1 = bottom_data[ptr1]; + } + opmath_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) { + const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr; + v2 = bottom_data[ptr2]; + } + opmath_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) { + const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr; + v3 = bottom_data[ptr3]; + } + opmath_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) { + const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr; + v4 = bottom_data[ptr4]; + } + const opmath_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + const opmath_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + +template +__device__ void dcnv3_col2im_bilinear( + const scalar_t *&bottom_data, const int &height, const int &width, + const int &nheads, const int &group_channels, const opmath_t &h, + const opmath_t &w, const int &m, const int &c, const opmath_t offset_scale, + const opmath_t &top_grad, const opmath_t &mask, opmath_t *&grad_im, + opmath_t *grad_offset, opmath_t *grad_mask) { + const int h_low = floor(h); + const int w_low = floor(w); + const int h_high = h_low + 1; + const int w_high = w_low + 1; + + const opmath_t lh = h - h_low; + const opmath_t lw = w - w_low; + const opmath_t hh = 1 - lh, hw = 1 - lw; + + const int w_stride = nheads * group_channels; + const int h_stride = width * w_stride; + const int h_low_ptr_offset = h_low * h_stride; + const int h_high_ptr_offset = h_low_ptr_offset + h_stride; + const int w_low_ptr_offset = w_low * w_stride; + const int w_high_ptr_offset = w_low_ptr_offset + w_stride; + const int base_ptr = m * group_channels + c; + + const opmath_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + const opmath_t top_grad_im = top_grad * mask; + opmath_t grad_h_weight = 0, grad_w_weight = 0; + + opmath_t v1 = 0; + if (h_low >= 0 && w_low >= 0) { + const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr; + v1 = bottom_data[ptr1]; + grad_h_weight -= hw * v1; + grad_w_weight -= hh * v1; + atomicAdd(grad_im + ptr1, w1 * top_grad_im); + } + opmath_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) { + const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr; + v2 = bottom_data[ptr2]; + grad_h_weight -= lw * v2; + grad_w_weight += hh * v2; + atomicAdd(grad_im + ptr2, w2 * top_grad_im); + } + opmath_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) { + const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr; + v3 = bottom_data[ptr3]; + grad_h_weight += hw * v3; + grad_w_weight -= lh * v3; + atomicAdd(grad_im + ptr3, w3 * top_grad_im); + } + opmath_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) { + const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr; + v4 = bottom_data[ptr4]; + grad_h_weight += lw * v4; + grad_w_weight += lh * v4; + atomicAdd(grad_im + ptr4, w4 * top_grad_im); + } + + const opmath_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + *grad_mask = top_grad * val; + *grad_offset = offset_scale * grad_w_weight * top_grad_im; + *(grad_offset + 1) = offset_scale * grad_h_weight * top_grad_im; +} + +template +__device__ void dcnv3_col2im_bilinear_gm( + const scalar_t *&bottom_data, const int &height, const int &width, + const int &nheads, const int &group_channels, const opmath_t &h, + const opmath_t &w, const int &m, const int &c, const opmath_t offset_scale, + const opmath_t &top_grad, const opmath_t &mask, opmath_t *&grad_im, + opmath_t *grad_offset, opmath_t *grad_mask) { + const int h_low = floor(h); + const int w_low = floor(w); + const int h_high = h_low + 1; + const int w_high = w_low + 1; + + const opmath_t lh = h - h_low; + const opmath_t lw = w - w_low; + const opmath_t hh = 1 - lh, hw = 1 - lw; + + const int w_stride = nheads * group_channels; + const int h_stride = width * w_stride; + const int h_low_ptr_offset = h_low * h_stride; + const int h_high_ptr_offset = h_low_ptr_offset + h_stride; + const int w_low_ptr_offset = w_low * w_stride; + const int w_high_ptr_offset = w_low_ptr_offset + w_stride; + const int base_ptr = m * group_channels + c; + + const opmath_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + const opmath_t top_grad_im = top_grad * mask; + opmath_t grad_h_weight = 0, grad_w_weight = 0; + + opmath_t v1 = 0; + if (h_low >= 0 && w_low >= 0) { + const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr; + v1 = bottom_data[ptr1]; + grad_h_weight -= hw * v1; + grad_w_weight -= hh * v1; + atomicAdd(grad_im + ptr1, w1 * top_grad_im); + } + opmath_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) { + const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr; + v2 = bottom_data[ptr2]; + grad_h_weight -= lw * v2; + grad_w_weight += hh * v2; + atomicAdd(grad_im + ptr2, w2 * top_grad_im); + } + opmath_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) { + const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr; + v3 = bottom_data[ptr3]; + grad_h_weight += hw * v3; + grad_w_weight -= lh * v3; + atomicAdd(grad_im + ptr3, w3 * top_grad_im); + } + opmath_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) { + const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr; + v4 = bottom_data[ptr4]; + grad_h_weight += lw * v4; + grad_w_weight += lh * v4; + atomicAdd(grad_im + ptr4, w4 * top_grad_im); + } + + const opmath_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + atomicAdd(grad_mask, top_grad * val); + atomicAdd(grad_offset, offset_scale * grad_w_weight * top_grad_im); + atomicAdd(grad_offset + 1, offset_scale * grad_h_weight * top_grad_im); +} + +template +__global__ void dcnv3_im2col_gpu_kernel( + const int num_kernels, const scalar_t *data_im, const scalar_t *data_offset, + const scalar_t *data_mask, scalar_t *data_col, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, const int dilation_h, const int dilation_w, + const int group, const int group_channels, const int height_in, + const int width_in, const int height_out, const int width_out, + const opmath_t offset_scale, const int remove_center) { + CUDA_KERNEL_LOOP(index, num_kernels) { + int _temp = index; + const int c_col = _temp % group_channels; + _temp /= group_channels; + const int sampling_index = _temp; + const int g_col = _temp % group; + _temp /= group; + const int p0_w = ((dilation_w * (kernel_w - 1)) >> 1) - pad_w + + (_temp % width_out) * stride_w; + _temp /= width_out; + const int p0_h = ((dilation_h * (kernel_h - 1)) >> 1) - pad_h + + (_temp % height_out) * stride_h; + _temp /= height_out; + const int b_col = _temp; + + const int input_size = height_in * width_in; + scalar_t *data_col_ptr = data_col + index; + const int kernel_size = kernel_h * kernel_w - remove_center; + int data_weight_ptr = sampling_index * kernel_size; + int data_loc_w_ptr = data_weight_ptr << 1; + const int qid_stride = group * group_channels; + opmath_t col = 0; + const scalar_t *data_im_ptr = data_im + b_col * input_size * qid_stride; + // top-left + const opmath_t p0_w_ = + p0_w - ((dilation_w * (kernel_w - 1)) >> 1) * offset_scale; + const opmath_t p0_h_ = + p0_h - ((dilation_h * (kernel_h - 1)) >> 1) * offset_scale; + + const int center_h = kernel_h / 2; + const int center_w = kernel_w / 2; + + for (int i = 0; i < kernel_w; ++i) { + for (int j = 0; j < kernel_h; ++j) { + // if not remove center, or remove center and not the center + if (i!=center_w || j!=center_h || !remove_center) { + const opmath_t offset_w = data_offset[data_loc_w_ptr]; + const opmath_t offset_h = data_offset[data_loc_w_ptr + 1]; + const opmath_t loc_w = + p0_w_ + (i * dilation_w + offset_w) * offset_scale; + const opmath_t loc_h = + p0_h_ + (j * dilation_h + offset_h) * offset_scale; + const opmath_t weight = data_mask[data_weight_ptr]; + if (loc_h > -1 && loc_w > -1 && loc_h < height_in && + loc_w < width_in) { + col += dcnv3_im2col_bilinear( + data_im_ptr, height_in, width_in, group, + group_channels, loc_h, loc_w, g_col, c_col) * + weight; + } + data_weight_ptr += 1; + data_loc_w_ptr += 2; + } + } + } + *data_col_ptr = col; + } +} + +// debug +template +__global__ void dcnv3_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1( + const int num_kernels, const scalar_t *grad_col, const scalar_t *data_im, + const scalar_t *data_offset, const scalar_t *data_mask, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, const int dilation_h, const int dilation_w, + const int group, const int group_channels, const int height_in, + const int width_in, const int height_out, const int width_out, + const opmath_t offset_scale, const int remove_center, opmath_t *grad_im, opmath_t *grad_offset, + opmath_t *grad_mask) { + CUDA_KERNEL_LOOP(index, num_kernels) { + __shared__ opmath_t cache_grad_offset[blockSize * 2]; + __shared__ opmath_t cache_grad_mask[blockSize]; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % group_channels; + _temp /= group_channels; + const int sampling_index = _temp; + const int g_col = _temp % group; + _temp /= group; + const int p0_w = ((dilation_w * (kernel_w - 1)) >> 1) - pad_w + + (_temp % width_out) * stride_w; + _temp /= width_out; + const int p0_h = ((dilation_h * (kernel_h - 1)) >> 1) - pad_h + + (_temp % height_out) * stride_h; + _temp /= height_out; + const int b_col = _temp; + + const opmath_t top_grad = grad_col[index]; + const int input_size = height_in * width_in; + const int kernel_size = kernel_h * kernel_w - remove_center; + int data_weight_ptr = sampling_index * kernel_size; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_offset += grad_sampling_ptr << 1; + grad_mask += grad_sampling_ptr; + const int qid_stride = group * group_channels; + const int im_ptr_offset = b_col * input_size * qid_stride; + const scalar_t *data_im_ptr = data_im + im_ptr_offset; + opmath_t *grad_im_ptr = grad_im + im_ptr_offset; + const opmath_t p0_w_ = + p0_w - ((dilation_w * (kernel_w - 1)) >> 1) * offset_scale; + const opmath_t p0_h_ = + p0_h - ((dilation_h * (kernel_h - 1)) >> 1) * offset_scale; + + const int center_h = kernel_h / 2; + const int center_w = kernel_w / 2; + + for (int i = 0; i < kernel_w; ++i) { + for (int j = 0; j < kernel_h; ++j) { + // if not remove center, or remove center and not the center + if (i!=center_w || j!=center_h || !remove_center) { + const opmath_t offset_w = data_offset[data_loc_w_ptr]; + const opmath_t offset_h = data_offset[data_loc_w_ptr + 1]; + const opmath_t loc_w = + p0_w_ + (i * dilation_w + offset_w) * offset_scale; + const opmath_t loc_h = + p0_h_ + (j * dilation_h + offset_h) * offset_scale; + const opmath_t weight = data_mask[data_weight_ptr]; + *(cache_grad_offset + (threadIdx.x << 1)) = 0; + *(cache_grad_offset + ((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_mask + threadIdx.x) = 0; + if (loc_h > -1 && loc_w > -1 && loc_h < height_in && + loc_w < width_in) { + dcnv3_col2im_bilinear( + data_im_ptr, height_in, width_in, group, group_channels, + loc_h, loc_w, g_col, c_col, offset_scale, top_grad, + weight, grad_im_ptr, + cache_grad_offset + (threadIdx.x << 1), + cache_grad_mask + threadIdx.x); + } + + __syncthreads(); + if (tid == 0) { + opmath_t _grad_w = cache_grad_offset[0], + _grad_h = cache_grad_offset[1], + _grad_a = cache_grad_mask[0]; + int sid = 2; + for (unsigned int tid = 1; tid < blockSize; ++tid) { + _grad_w += cache_grad_offset[sid]; + _grad_h += cache_grad_offset[sid + 1]; + _grad_a += cache_grad_mask[tid]; + sid += 2; + } + + *grad_offset = _grad_w; + *(grad_offset + 1) = _grad_h; + *grad_mask = _grad_a; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_mask += 1; + grad_offset += 2; + } + } + } + } +} + +template +__global__ void dcnv3_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2( + const int num_kernels, const scalar_t *grad_col, const scalar_t *data_im, + const scalar_t *data_offset, const scalar_t *data_mask, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, const int dilation_h, const int dilation_w, + const int group, const int group_channels, const int height_in, + const int width_in, const int height_out, const int width_out, + const opmath_t offset_scale, const int remove_center, opmath_t *grad_im, opmath_t *grad_offset, + opmath_t *grad_mask) { + CUDA_KERNEL_LOOP(index, num_kernels) { + __shared__ opmath_t cache_grad_offset[blockSize * 2]; + __shared__ opmath_t cache_grad_mask[blockSize]; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % group_channels; + _temp /= group_channels; + const int sampling_index = _temp; + const int g_col = _temp % group; + _temp /= group; + const int p0_w = ((dilation_w * (kernel_w - 1)) >> 1) - pad_w + + (_temp % width_out) * stride_w; + _temp /= width_out; + const int p0_h = ((dilation_h * (kernel_h - 1)) >> 1) - pad_h + + (_temp % height_out) * stride_h; + _temp /= height_out; + const int b_col = _temp; + + const opmath_t top_grad = grad_col[index]; + const int input_size = height_in * width_in; + const int kernel_size = kernel_h * kernel_w - remove_center; + int data_weight_ptr = sampling_index * kernel_size; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_offset += grad_sampling_ptr << 1; + grad_mask += grad_sampling_ptr; + const int qid_stride = group * group_channels; + const int im_ptr_offset = b_col * input_size * qid_stride; + const scalar_t *data_im_ptr = data_im + im_ptr_offset; + opmath_t *grad_im_ptr = grad_im + im_ptr_offset; + const opmath_t p0_w_ = + p0_w - ((dilation_w * (kernel_w - 1)) >> 1) * offset_scale; + const opmath_t p0_h_ = + p0_h - ((dilation_h * (kernel_h - 1)) >> 1) * offset_scale; + + const int center_h = kernel_h / 2; + const int center_w = kernel_w / 2; + + for (int i = 0; i < kernel_w; ++i) { + for (int j = 0; j < kernel_h; ++j) { + // if not remove center, or remove center and not the center + if (i!=center_w || j!=center_h || !remove_center) { + const opmath_t offset_w = data_offset[data_loc_w_ptr]; + const opmath_t offset_h = data_offset[data_loc_w_ptr + 1]; + const opmath_t loc_w = + p0_w_ + (i * dilation_w + offset_w) * offset_scale; + const opmath_t loc_h = + p0_h_ + (j * dilation_h + offset_h) * offset_scale; + const opmath_t weight = data_mask[data_weight_ptr]; + *(cache_grad_offset + (threadIdx.x << 1)) = 0; + *(cache_grad_offset + ((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_mask + threadIdx.x) = 0; + if (loc_h > -1 && loc_w > -1 && loc_h < height_in && + loc_w < width_in) { + dcnv3_col2im_bilinear( + data_im_ptr, height_in, width_in, group, group_channels, + loc_h, loc_w, g_col, c_col, offset_scale, top_grad, + weight, grad_im_ptr, + cache_grad_offset + (threadIdx.x << 1), + cache_grad_mask + threadIdx.x); + } + + __syncthreads(); + + for (unsigned int s = blockSize / 2; s > 0; s >>= 1) { + if (tid < s) { + const unsigned int xid1 = tid << 1; + const unsigned int xid2 = (tid + s) << 1; + cache_grad_mask[tid] += cache_grad_mask[tid + s]; + cache_grad_offset[xid1] += cache_grad_offset[xid2]; + cache_grad_offset[xid1 + 1] += + cache_grad_offset[xid2 + 1]; + } + __syncthreads(); + } + + if (tid == 0) { + *grad_offset = cache_grad_offset[0]; + *(grad_offset + 1) = cache_grad_offset[1]; + *grad_mask = cache_grad_mask[0]; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_mask += 1; + grad_offset += 2; + } + } + } + } +} + +template +__global__ void dcnv3_col2im_gpu_kernel_shm_reduce_v1( + const int num_kernels, const scalar_t *grad_col, const scalar_t *data_im, + const scalar_t *data_offset, const scalar_t *data_mask, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, const int dilation_h, const int dilation_w, + const int group, const int group_channels, const int height_in, + const int width_in, const int height_out, const int width_out, + const opmath_t offset_scale, const int remove_center, opmath_t *grad_im, opmath_t *grad_offset, + opmath_t *grad_mask) { + CUDA_KERNEL_LOOP(index, num_kernels) { + extern __shared__ int _s[]; + opmath_t *cache_grad_offset = (opmath_t *)_s; + opmath_t *cache_grad_mask = cache_grad_offset + 2 * blockDim.x; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % group_channels; + _temp /= group_channels; + const int sampling_index = _temp; + const int g_col = _temp % group; + _temp /= group; + const int p0_w = ((dilation_w * (kernel_w - 1)) >> 1) - pad_w + + (_temp % width_out) * stride_w; + _temp /= width_out; + const int p0_h = ((dilation_h * (kernel_h - 1)) >> 1) - pad_h + + (_temp % height_out) * stride_h; + _temp /= height_out; + const int b_col = _temp; + + const opmath_t top_grad = grad_col[index]; + const int input_size = height_in * width_in; + const int kernel_size = kernel_h * kernel_w - remove_center; + int data_weight_ptr = sampling_index * kernel_size; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_offset += grad_sampling_ptr << 1; + grad_mask += grad_sampling_ptr; + const int qid_stride = group * group_channels; + const int im_ptr_offset = b_col * input_size * qid_stride; + const scalar_t *data_im_ptr = data_im + im_ptr_offset; + opmath_t *grad_im_ptr = grad_im + im_ptr_offset; + const opmath_t p0_w_ = + p0_w - ((dilation_w * (kernel_w - 1)) >> 1) * offset_scale; + const opmath_t p0_h_ = + p0_h - ((dilation_h * (kernel_h - 1)) >> 1) * offset_scale; + + const int center_h = kernel_h / 2; + const int center_w = kernel_w / 2; + + for (int i = 0; i < kernel_w; ++i) { + for (int j = 0; j < kernel_h; ++j) { + // if not remove center, or remove center and not the center + if (i!=center_w || j!=center_h || !remove_center) { + const opmath_t offset_w = data_offset[data_loc_w_ptr]; + const opmath_t offset_h = data_offset[data_loc_w_ptr + 1]; + const opmath_t loc_w = + p0_w_ + (i * dilation_w + offset_w) * offset_scale; + const opmath_t loc_h = + p0_h_ + (j * dilation_h + offset_h) * offset_scale; + const opmath_t weight = data_mask[data_weight_ptr]; + *(cache_grad_offset + (threadIdx.x << 1)) = 0; + *(cache_grad_offset + ((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_mask + threadIdx.x) = 0; + if (loc_h > -1 && loc_w > -1 && loc_h < height_in && + loc_w < width_in) { + dcnv3_col2im_bilinear( + data_im_ptr, height_in, width_in, group, group_channels, + loc_h, loc_w, g_col, c_col, offset_scale, top_grad, + weight, grad_im_ptr, + cache_grad_offset + (threadIdx.x << 1), + cache_grad_mask + threadIdx.x); + } + + __syncthreads(); + if (tid == 0) { + opmath_t _grad_w = cache_grad_offset[0], + _grad_h = cache_grad_offset[1], + _grad_a = cache_grad_mask[0]; + int sid = 2; + for (unsigned int tid = 1; tid < blockDim.x; ++tid) { + _grad_w += cache_grad_offset[sid]; + _grad_h += cache_grad_offset[sid + 1]; + _grad_a += cache_grad_mask[tid]; + sid += 2; + } + + *grad_offset = _grad_w; + *(grad_offset + 1) = _grad_h; + *grad_mask = _grad_a; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_mask += 1; + grad_offset += 2; + } + } + } + } +} + +template +__global__ void dcnv3_col2im_gpu_kernel_shm_reduce_v2( + const int num_kernels, const scalar_t *grad_col, const scalar_t *data_im, + const scalar_t *data_offset, const scalar_t *data_mask, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, const int dilation_h, const int dilation_w, + const int group, const int group_channels, const int height_in, + const int width_in, const int height_out, const int width_out, + const opmath_t offset_scale, const int remove_center, opmath_t *grad_im, opmath_t *grad_offset, + opmath_t *grad_mask) { + CUDA_KERNEL_LOOP(index, num_kernels) { + extern __shared__ int _s[]; + opmath_t *cache_grad_offset = (opmath_t *)_s; + opmath_t *cache_grad_mask = cache_grad_offset + 2 * blockDim.x; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % group_channels; + _temp /= group_channels; + const int sampling_index = _temp; + const int g_col = _temp % group; + _temp /= group; + const int p0_w = ((dilation_w * (kernel_w - 1)) >> 1) - pad_w + + (_temp % width_out) * stride_w; + _temp /= width_out; + const int p0_h = ((dilation_h * (kernel_h - 1)) >> 1) - pad_h + + (_temp % height_out) * stride_h; + _temp /= height_out; + const int b_col = _temp; + + const opmath_t top_grad = grad_col[index]; + const int input_size = height_in * width_in; + const int kernel_size = kernel_h * kernel_w - remove_center; + int data_weight_ptr = sampling_index * kernel_size; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_offset += grad_sampling_ptr << 1; + grad_mask += grad_sampling_ptr; + const int qid_stride = group * group_channels; + const int im_ptr_offset = b_col * input_size * qid_stride; + const scalar_t *data_im_ptr = data_im + im_ptr_offset; + opmath_t *grad_im_ptr = grad_im + im_ptr_offset; + const opmath_t p0_w_ = + p0_w - ((dilation_w * (kernel_w - 1)) >> 1) * offset_scale; + const opmath_t p0_h_ = + p0_h - ((dilation_h * (kernel_h - 1)) >> 1) * offset_scale; + + const int center_h = kernel_h / 2; + const int center_w = kernel_w / 2; + + for (int i = 0; i < kernel_w; ++i) { + for (int j = 0; j < kernel_h; ++j) { + // if not remove center, or remove center and not the center + if (i!=center_w || j!=center_h || !remove_center) { + const opmath_t offset_w = data_offset[data_loc_w_ptr]; + const opmath_t offset_h = data_offset[data_loc_w_ptr + 1]; + const opmath_t loc_w = + p0_w_ + (i * dilation_w + offset_w) * offset_scale; + const opmath_t loc_h = + p0_h_ + (j * dilation_h + offset_h) * offset_scale; + const opmath_t weight = data_mask[data_weight_ptr]; + *(cache_grad_offset + (threadIdx.x << 1)) = 0; + *(cache_grad_offset + ((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_mask + threadIdx.x) = 0; + if (loc_h > -1 && loc_w > -1 && loc_h < height_in && + loc_w < width_in) { + dcnv3_col2im_bilinear( + data_im_ptr, height_in, width_in, group, group_channels, + loc_h, loc_w, g_col, c_col, offset_scale, top_grad, + weight, grad_im_ptr, + cache_grad_offset + (threadIdx.x << 1), + cache_grad_mask + threadIdx.x); + } + + __syncthreads(); + + for (unsigned int s = blockDim.x / 2, spre = blockDim.x; s > 0; + s >>= 1, spre >>= 1) { + if (tid < s) { + const unsigned int xid1 = tid << 1; + const unsigned int xid2 = (tid + s) << 1; + cache_grad_mask[tid] += cache_grad_mask[tid + s]; + cache_grad_offset[xid1] += cache_grad_offset[xid2]; + cache_grad_offset[xid1 + 1] += + cache_grad_offset[xid2 + 1]; + if (tid + (s << 1) < spre) { + cache_grad_mask[tid] += + cache_grad_mask[tid + (s << 1)]; + cache_grad_offset[xid1] += + cache_grad_offset[xid2 + (s << 1)]; + cache_grad_offset[xid1 + 1] += + cache_grad_offset[xid2 + 1 + (s << 1)]; + } + } + __syncthreads(); + } + + if (tid == 0) { + *grad_offset = cache_grad_offset[0]; + *(grad_offset + 1) = cache_grad_offset[1]; + *grad_mask = cache_grad_mask[0]; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_mask += 1; + grad_offset += 2; + } + } + } + } +} + +template +__global__ void dcnv3_col2im_gpu_kernel_shm_reduce_v2_multi_blocks( + const int num_kernels, const scalar_t *grad_col, const scalar_t *data_im, + const scalar_t *data_offset, const scalar_t *data_mask, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, const int dilation_h, const int dilation_w, + const int group, const int group_channels, const int height_in, + const int width_in, const int height_out, const int width_out, + const opmath_t offset_scale, const int remove_center, opmath_t *grad_im, opmath_t *grad_offset, + opmath_t *grad_mask) { + CUDA_KERNEL_LOOP(index, num_kernels) { + extern __shared__ int _s[]; + opmath_t *cache_grad_offset = (opmath_t *)_s; + opmath_t *cache_grad_mask = cache_grad_offset + 2 * blockDim.x; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % group_channels; + _temp /= group_channels; + const int sampling_index = _temp; + const int g_col = _temp % group; + _temp /= group; + const int p0_w = ((dilation_w * (kernel_w - 1)) >> 1) - pad_w + + (_temp % width_out) * stride_w; + _temp /= width_out; + const int p0_h = ((dilation_h * (kernel_h - 1)) >> 1) - pad_h + + (_temp % height_out) * stride_h; + _temp /= height_out; + const int b_col = _temp; + + const opmath_t top_grad = grad_col[index]; + const int input_size = height_in * width_in; + const int kernel_size = kernel_h * kernel_w - remove_center; + int data_weight_ptr = sampling_index * kernel_size; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_offset += grad_sampling_ptr << 1; + grad_mask += grad_sampling_ptr; + const int qid_stride = group * group_channels; + const int im_ptr_offset = b_col * input_size * qid_stride; + const scalar_t *data_im_ptr = data_im + im_ptr_offset; + opmath_t *grad_im_ptr = grad_im + im_ptr_offset; + const opmath_t p0_w_ = + p0_w - ((dilation_w * (kernel_w - 1)) >> 1) * offset_scale; + const opmath_t p0_h_ = + p0_h - ((dilation_h * (kernel_h - 1)) >> 1) * offset_scale; + + const int center_h = kernel_h / 2; + const int center_w = kernel_w / 2; + + for (int i = 0; i < kernel_w; ++i) { + for (int j = 0; j < kernel_h; ++j) { + // if not remove center, or remove center and not the center + if (i!=center_w || j!=center_h || !remove_center) { + const opmath_t offset_w = data_offset[data_loc_w_ptr]; + const opmath_t offset_h = data_offset[data_loc_w_ptr + 1]; + const opmath_t loc_w = + p0_w_ + (i * dilation_w + offset_w) * offset_scale; + const opmath_t loc_h = + p0_h_ + (j * dilation_h + offset_h) * offset_scale; + const opmath_t weight = data_mask[data_weight_ptr]; + *(cache_grad_offset + (threadIdx.x << 1)) = 0; + *(cache_grad_offset + ((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_mask + threadIdx.x) = 0; + if (loc_h > -1 && loc_w > -1 && loc_h < height_in && + loc_w < width_in) { + dcnv3_col2im_bilinear( + data_im_ptr, height_in, width_in, group, group_channels, + loc_h, loc_w, g_col, c_col, offset_scale, top_grad, + weight, grad_im_ptr, + cache_grad_offset + (threadIdx.x << 1), + cache_grad_mask + threadIdx.x); + } + + __syncthreads(); + + for (unsigned int s = blockDim.x / 2, spre = blockDim.x; s > 0; + s >>= 1, spre >>= 1) { + if (tid < s) { + const unsigned int xid1 = tid << 1; + const unsigned int xid2 = (tid + s) << 1; + cache_grad_mask[tid] += cache_grad_mask[tid + s]; + cache_grad_offset[xid1] += cache_grad_offset[xid2]; + cache_grad_offset[xid1 + 1] += + cache_grad_offset[xid2 + 1]; + if (tid + (s << 1) < spre) { + cache_grad_mask[tid] += + cache_grad_mask[tid + (s << 1)]; + cache_grad_offset[xid1] += + cache_grad_offset[xid2 + (s << 1)]; + cache_grad_offset[xid1 + 1] += + cache_grad_offset[xid2 + 1 + (s << 1)]; + } + } + __syncthreads(); + } + + if (tid == 0) { + atomicAdd(grad_offset, cache_grad_offset[0]); + atomicAdd(grad_offset + 1, cache_grad_offset[1]); + atomicAdd(grad_mask, cache_grad_mask[0]); + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_mask += 1; + grad_offset += 2; + } + } + } + } +} + +template +__global__ void dcnv3_col2im_gpu_kernel_gm( + const int num_kernels, const scalar_t *grad_col, const scalar_t *data_im, + const scalar_t *data_offset, const scalar_t *data_mask, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, const int dilation_h, const int dilation_w, + const int group, const int group_channels, const int height_in, + const int width_in, const int height_out, const int width_out, + const opmath_t offset_scale, const int remove_center, opmath_t *grad_im, opmath_t *grad_offset, + opmath_t *grad_mask) { + CUDA_KERNEL_LOOP(index, num_kernels) { + int _temp = index; + const int c_col = _temp % group_channels; + _temp /= group_channels; + const int sampling_index = _temp; + const int g_col = _temp % group; + _temp /= group; + const int p0_w = ((dilation_w * (kernel_w - 1)) >> 1) - pad_w + + (_temp % width_out) * stride_w; + _temp /= width_out; + const int p0_h = ((dilation_h * (kernel_h - 1)) >> 1) - pad_h + + (_temp % height_out) * stride_h; + _temp /= height_out; + const int b_col = _temp; + + const opmath_t top_grad = grad_col[index]; + const int input_size = height_in * width_in; + const int kernel_size = kernel_h * kernel_w - remove_center; + int data_weight_ptr = sampling_index * kernel_size; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_offset += grad_sampling_ptr << 1; + grad_mask += grad_sampling_ptr; + const int qid_stride = group * group_channels; + const int im_ptr_offset = b_col * input_size * qid_stride; + const scalar_t *data_im_ptr = data_im + im_ptr_offset; + opmath_t *grad_im_ptr = grad_im + im_ptr_offset; + const opmath_t p0_w_ = + p0_w - ((dilation_w * (kernel_w - 1)) >> 1) * offset_scale; + const opmath_t p0_h_ = + p0_h - ((dilation_h * (kernel_h - 1)) >> 1) * offset_scale; + + const int center_h = kernel_h / 2; + const int center_w = kernel_w / 2; + + for (int i = 0; i < kernel_w; ++i) { + for (int j = 0; j < kernel_h; ++j) { + // if not remove center, or remove center and not the center + if (i!=center_w || j!=center_h || !remove_center) { + const opmath_t offset_w = data_offset[data_loc_w_ptr]; + const opmath_t offset_h = data_offset[data_loc_w_ptr + 1]; + const opmath_t loc_w = + p0_w_ + (i * dilation_w + offset_w) * offset_scale; + const opmath_t loc_h = + p0_h_ + (j * dilation_h + offset_h) * offset_scale; + const opmath_t weight = data_mask[data_weight_ptr]; + if (loc_h > -1 && loc_w > -1 && loc_h < height_in && + loc_w < width_in) { + dcnv3_col2im_bilinear_gm( + data_im_ptr, height_in, width_in, group, group_channels, + loc_h, loc_w, g_col, c_col, offset_scale, top_grad, + weight, grad_im_ptr, grad_offset, grad_mask); + } + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_mask += 1; + grad_offset += 2; + } + } + } + } +} + +template +void dcnv3_im2col_cuda(cudaStream_t stream, const scalar_t *data_im, + const scalar_t *data_offset, const scalar_t *data_mask, + scalar_t *data_col, const int kernel_h, + const int kernel_w, const int stride_h, + const int stride_w, const int pad_h, const int pad_w, + const int dilation_h, const int dilation_w, + const int group, const int group_channels, + const int batch_n, const int height_in, + const int width_in, const int height_out, + const int width_out, const opmath_t offset_scale, const int remove_center) { + const int num_kernels = + batch_n * height_out * width_out * group * group_channels; + const int num_actual_kernels = + batch_n * height_out * width_out * group * group_channels; + const int num_threads = CUDA_NUM_THREADS; + dcnv3_im2col_gpu_kernel + <<>>(num_kernels, data_im, data_offset, data_mask, data_col, + kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, + dilation_h, dilation_w, group, group_channels, height_in, + width_in, height_out, width_out, offset_scale, remove_center); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf("error in dcnv3_im2col_cuda: %s\n", cudaGetErrorString(err)); + } +} + +template +void dcnv3_col2im_cuda( + cudaStream_t stream, const scalar_t *grad_col, const scalar_t *data_im, + const scalar_t *data_offset, const scalar_t *data_mask, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, const int dilation_h, const int dilation_w, + const int group, const int group_channels, const int batch_n, + const int height_in, const int width_in, const int height_out, + const int width_out, const opmath_t offset_scale, const int remove_center, + opmath_t *grad_im, opmath_t *grad_offset, opmath_t *grad_mask) { + const int num_threads = + (group_channels > CUDA_NUM_THREADS) ? CUDA_NUM_THREADS : group_channels; + const int num_kernels = + batch_n * height_out * width_out * group * group_channels; + const int num_actual_kernels = + batch_n * height_out * width_out * group * group_channels; + if (group_channels > 1024) { + if ((group_channels & 1023) == 0) { + dcnv3_col2im_gpu_kernel_shm_reduce_v2_multi_blocks + <<>>( + num_kernels, grad_col, data_im, data_offset, data_mask, + kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, + dilation_h, dilation_w, group, group_channels, height_in, + width_in, height_out, width_out, offset_scale, remove_center, grad_im, + grad_offset, grad_mask); + } else { + dcnv3_col2im_gpu_kernel_gm + <<>>(num_kernels, grad_col, data_im, data_offset, + data_mask, kernel_h, kernel_w, stride_h, stride_w, + pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, height_in, width_in, height_out, + width_out, offset_scale, remove_center, grad_im, grad_offset, + grad_mask); + } + } else { + switch (group_channels) { + case 1: + dcnv3_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>(num_kernels, grad_col, data_im, data_offset, + data_mask, kernel_h, kernel_w, stride_h, stride_w, + pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, height_in, width_in, height_out, + width_out, offset_scale, remove_center, grad_im, grad_offset, + grad_mask); + break; + case 2: + dcnv3_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>(num_kernels, grad_col, data_im, data_offset, + data_mask, kernel_h, kernel_w, stride_h, stride_w, + pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, height_in, width_in, height_out, + width_out, offset_scale, remove_center, grad_im, grad_offset, + grad_mask); + break; + case 4: + dcnv3_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>(num_kernels, grad_col, data_im, data_offset, + data_mask, kernel_h, kernel_w, stride_h, stride_w, + pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, height_in, width_in, height_out, + width_out, offset_scale, remove_center, grad_im, grad_offset, + grad_mask); + break; + case 8: + dcnv3_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>(num_kernels, grad_col, data_im, data_offset, + data_mask, kernel_h, kernel_w, stride_h, stride_w, + pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, height_in, width_in, height_out, + width_out, offset_scale, remove_center, grad_im, grad_offset, + grad_mask); + break; + case 16: + dcnv3_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>(num_kernels, grad_col, data_im, data_offset, + data_mask, kernel_h, kernel_w, stride_h, stride_w, + pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, height_in, width_in, height_out, + width_out, offset_scale, remove_center, grad_im, grad_offset, + grad_mask); + break; + case 32: + dcnv3_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>(num_kernels, grad_col, data_im, data_offset, + data_mask, kernel_h, kernel_w, stride_h, stride_w, + pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, height_in, width_in, height_out, + width_out, offset_scale, remove_center, grad_im, grad_offset, + grad_mask); + break; + case 64: + dcnv3_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>(num_kernels, grad_col, data_im, data_offset, + data_mask, kernel_h, kernel_w, stride_h, stride_w, + pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, height_in, width_in, height_out, + width_out, offset_scale, remove_center, grad_im, grad_offset, + grad_mask); + break; + case 128: + dcnv3_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>(num_kernels, grad_col, data_im, data_offset, + data_mask, kernel_h, kernel_w, stride_h, stride_w, + pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, height_in, width_in, height_out, + width_out, offset_scale, remove_center, grad_im, grad_offset, + grad_mask); + break; + case 256: + dcnv3_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>(num_kernels, grad_col, data_im, data_offset, + data_mask, kernel_h, kernel_w, stride_h, stride_w, + pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, height_in, width_in, height_out, + width_out, offset_scale, remove_center, grad_im, grad_offset, + grad_mask); + break; + case 512: + dcnv3_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>(num_kernels, grad_col, data_im, data_offset, + data_mask, kernel_h, kernel_w, stride_h, stride_w, + pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, height_in, width_in, height_out, + width_out, offset_scale, remove_center, grad_im, grad_offset, + grad_mask); + break; + case 1024: + dcnv3_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>(num_kernels, grad_col, data_im, data_offset, + data_mask, kernel_h, kernel_w, stride_h, stride_w, + pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, height_in, width_in, height_out, + width_out, offset_scale, remove_center, grad_im, grad_offset, + grad_mask); + break; + default: + if (group_channels < 64) { + dcnv3_col2im_gpu_kernel_shm_reduce_v1 + <<>>( + num_kernels, grad_col, data_im, data_offset, data_mask, + kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, + dilation_h, dilation_w, group, group_channels, + height_in, width_in, height_out, width_out, + offset_scale, remove_center, grad_im, grad_offset, grad_mask); + } else { + dcnv3_col2im_gpu_kernel_shm_reduce_v2 + <<>>( + num_kernels, grad_col, data_im, data_offset, data_mask, + kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, + dilation_h, dilation_w, group, group_channels, + height_in, width_in, height_out, width_out, + offset_scale, remove_center, grad_im, grad_offset, grad_mask); + } + } + } + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf("error in dcnv3_col2im_cuda: %s\n", cudaGetErrorString(err)); + } +} diff --git a/projects/internimage_classification/ops_dcnv3/src/dcnv3.h b/projects/internimage_classification/ops_dcnv3/src/dcnv3.h new file mode 100644 index 0000000..ce4500f --- /dev/null +++ b/projects/internimage_classification/ops_dcnv3/src/dcnv3.h @@ -0,0 +1,59 @@ +/*! +************************************************************************************************** +* InternImage +* Copyright (c) 2022 OpenGVLab +* Licensed under The MIT License [see LICENSE for details] +************************************************************************************************** +* Modified from +*https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +#pragma once + +#include "cpu/dcnv3_cpu.h" + +#ifdef WITH_CUDA +#include "cuda/dcnv3_cuda.h" +#endif + +at::Tensor dcnv3_forward(const at::Tensor &input, const at::Tensor &offset, + const at::Tensor &mask, const int kernel_h, + const int kernel_w, const int stride_h, + const int stride_w, const int pad_h, const int pad_w, + const int dilation_h, const int dilation_w, + const int group, const int group_channels, + const float offset_scale, const int im2col_step, const int remove_center) { + if (input.type().is_cuda()) { +#ifdef WITH_CUDA + return dcnv3_cuda_forward(input, offset, mask, kernel_h, kernel_w, + stride_h, stride_w, pad_h, pad_w, dilation_h, + dilation_w, group, group_channels, + offset_scale, im2col_step, remove_center); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} + +std::vector +dcnv3_backward(const at::Tensor &input, const at::Tensor &offset, + const at::Tensor &mask, const int kernel_h, const int kernel_w, + const int stride_h, const int stride_w, const int pad_h, + const int pad_w, const int dilation_h, const int dilation_w, + const int group, const int group_channels, + const float offset_scale, const at::Tensor &grad_output, + const int im2col_step, const int remove_center) { + if (input.type().is_cuda()) { +#ifdef WITH_CUDA + return dcnv3_cuda_backward(input, offset, mask, kernel_h, kernel_w, + stride_h, stride_w, pad_h, pad_w, dilation_h, + dilation_w, group, group_channels, + offset_scale, grad_output, im2col_step, remove_center); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} diff --git a/projects/internimage_classification/ops_dcnv3/src/vision.cpp b/projects/internimage_classification/ops_dcnv3/src/vision.cpp new file mode 100644 index 0000000..1f7a908 --- /dev/null +++ b/projects/internimage_classification/ops_dcnv3/src/vision.cpp @@ -0,0 +1,17 @@ +/*! +************************************************************************************************** +* InternImage +* Copyright (c) 2022 OpenGVLab +* Licensed under The MIT License [see LICENSE for details] +************************************************************************************************** +* Modified from +*https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +#include "dcnv3.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("dcnv3_forward", &dcnv3_forward, "dcnv3_forward"); + m.def("dcnv3_backward", &dcnv3_backward, "dcnv3_backward"); +} diff --git a/projects/internimage_classification/ops_dcnv3/test.py b/projects/internimage_classification/ops_dcnv3/test.py new file mode 100644 index 0000000..b9c2204 --- /dev/null +++ b/projects/internimage_classification/ops_dcnv3/test.py @@ -0,0 +1,255 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + +# Copied from +# https://github.com/OpenGVLab/InternImage/blob/master/classification/models/ + +from __future__ import absolute_import, division, print_function +import math # noqa +import time + +import torch +import torch.nn as nn # noqa +from functions.dcnv3_func import DCNv3Function, dcnv3_core_pytorch +from torch.autograd import gradcheck # noqa + +H_in, W_in = 8, 8 +N, M, D = 2, 4, 16 +Kh, Kw = 3, 3 +remove_center = False +P = Kh * Kw - remove_center +offset_scale = 2.0 +pad = 1 +dilation = 1 +stride = 1 +H_out = (H_in + 2 * pad - (dilation * (Kh - 1) + 1)) // stride + 1 +W_out = (W_in + 2 * pad - (dilation * (Kw - 1) + 1)) // stride + 1 + +torch.manual_seed(3) + + +@torch.no_grad() +def check_forward_equal_with_pytorch_double(): + input = torch.rand(N, H_in, W_in, M * D).cuda() * 0.01 + offset = torch.rand(N, H_out, W_out, M * P * 2).cuda() * 10 + mask = torch.rand(N, H_out, W_out, M, P).cuda() + 1e-5 + mask /= mask.sum(-1, keepdim=True) + mask = mask.reshape(N, H_out, W_out, M * P) + + output_pytorch = dcnv3_core_pytorch(input.double(), offset.double(), + mask.double(), Kh, Kw, stride, stride, + Kh // 2, Kw // 2, dilation, dilation, + M, D, offset_scale, + remove_center).detach().cpu() + + im2col_step = 2 + output_cuda = DCNv3Function.apply(input.double(), offset.double(), + mask.double(), Kh, Kw, stride, stride, + Kh // 2, Kw // 2, dilation, dilation, M, + D, offset_scale, im2col_step, + remove_center).detach().cpu() + + fwdok = torch.allclose(output_cuda, output_pytorch) + max_abs_err = (output_cuda - output_pytorch).abs().max() + max_rel_err = ((output_cuda - output_pytorch).abs() / + output_pytorch.abs()).max() + print('>>> forward double') + print(f'* {fwdok} check_forward_equal_with_pytorch_double:' + f' max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}') + + +@torch.no_grad() +def check_forward_equal_with_pytorch_float(): + input = torch.rand(N, H_in, W_in, M * D).cuda() * 0.01 + offset = torch.rand(N, H_out, W_out, M * P * 2).cuda() * 10 + mask = torch.rand(N, H_out, W_out, M, P).cuda() + 1e-5 + mask /= mask.sum(-1, keepdim=True) + mask = mask.reshape(N, H_out, W_out, M * P) + + output_pytorch = dcnv3_core_pytorch(input, offset, mask, Kh, Kw, stride, + stride, Kh // 2, Kw // 2, dilation, + dilation, M, D, offset_scale, + remove_center).detach().cpu() + + im2col_step = 2 + output_cuda = DCNv3Function.apply(input, offset, mask, Kh, Kw, stride, + stride, Kh // 2, Kw // 2, dilation, + dilation, M, D, offset_scale, + im2col_step, + remove_center).detach().cpu() + + fwdok = torch.allclose(output_cuda, output_pytorch, rtol=1e-2, atol=1e-3) + max_abs_err = (output_cuda - output_pytorch).abs().max() + max_rel_err = ((output_cuda - output_pytorch).abs() / + output_pytorch.abs()).max() + print('>>> forward float') + print(f'* {fwdok} check_forward_equal_with_pytorch_float:' + f'max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}') + + +def check_backward_equal_with_pytorch_double(channels=4, + grad_input=True, + grad_offset=True, + grad_mask=True): + # H_in, W_in = 4, 4 + N = 2 + M = 2 + H_out = (H_in + 2 * pad - (dilation * (Kh - 1) + 1)) // stride + 1 + W_out = (W_in + 2 * pad - (dilation * (Kw - 1) + 1)) // stride + 1 + + D = channels + input0 = torch.rand(N, H_in, W_in, M * D).cuda() * 0.01 + offset0 = torch.rand(N, H_out, W_out, M * P * 2).cuda() * 10 + mask0 = torch.rand(N, H_out, W_out, M, P).cuda() + 1e-5 + mask0 /= mask0.sum(-1, keepdim=True) + mask0 = mask0.reshape(N, H_out, W_out, M * P) + input0.requires_grad = grad_input + offset0.requires_grad = grad_offset + mask0.requires_grad = grad_mask + + output_pytorch = dcnv3_core_pytorch(input0.double(), offset0.double(), + mask0.double(), Kh, Kw, stride, stride, + Kh // 2, Kw // 2, dilation, dilation, + M, D, offset_scale, remove_center) + output_pytorch.sum().backward() + + input1 = input0.detach() + offset1 = offset0.detach() + mask1 = mask0.detach() + input1.requires_grad = grad_input + offset1.requires_grad = grad_offset + mask1.requires_grad = grad_mask + + im2col_step = 2 + output_cuda = DCNv3Function.apply(input1.double(), offset1.double(), + mask1.double(), Kh, Kw, stride, stride, + Kh // 2, Kw // 2, dilation, dilation, M, + D, offset_scale, im2col_step, + remove_center) + output_cuda.sum().backward() + + print(f'>>> backward double: channels {D}') + bwdok = torch.allclose(input0.grad, input1.grad, rtol=1e-2, atol=1e-3) + max_abs_err = (input0.grad - input1.grad).abs().max() + max_rel_err = ((input0.grad - input1.grad).abs() / input0.grad.abs()).max() + print(f'* {bwdok} input_grad check_backward_equal_with_pytorch_double:' + f'max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}') + + bwdok = torch.allclose(offset0.grad, offset1.grad, rtol=1e-2, atol=1e-3) + max_abs_err = (offset0.grad - offset1.grad).abs().max() + max_rel_err = ((offset0.grad - offset1.grad).abs() / + offset0.grad.abs()).max() + print(f'* {bwdok} offset_grad check_backward_equal_with_pytorch_double:' + f'max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}') + + bwdok = torch.allclose(mask0.grad, mask1.grad, rtol=1e-2, atol=1e-3) + max_abs_err = (mask0.grad - mask1.grad).abs().max() + max_rel_err = ((mask0.grad - mask1.grad).abs() / mask0.grad.abs()).max() + print(f'* {bwdok} mask_grad check_backward_equal_with_pytorch_double:' + f'max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}') + + +def check_backward_equal_with_pytorch_float(channels=4, + grad_input=True, + grad_offset=True, + grad_mask=True): + # H_in, W_in = 4, 4 + N = 2 + M = 2 + H_out = (H_in + 2 * pad - (dilation * (Kh - 1) + 1)) // stride + 1 + W_out = (W_in + 2 * pad - (dilation * (Kw - 1) + 1)) // stride + 1 + + D = channels + input0 = torch.rand(N, H_in, W_in, M * D).cuda() * 0.01 + offset0 = torch.rand(N, H_out, W_out, M * P * 2).cuda() * 10 + mask0 = torch.rand(N, H_out, W_out, M, P).cuda() + 1e-5 + mask0 /= mask0.sum(-1, keepdim=True) + mask0 = mask0.reshape(N, H_out, W_out, M * P) + input0.requires_grad = grad_input + offset0.requires_grad = grad_offset + mask0.requires_grad = grad_mask + + output_pytorch = dcnv3_core_pytorch(input0, offset0, mask0, Kh, Kw, stride, + stride, Kh // 2, Kw // 2, dilation, + dilation, M, D, offset_scale, + remove_center) + output_pytorch.sum().backward() + + input1 = input0.detach() + offset1 = offset0.detach() + mask1 = mask0.detach() + input1.requires_grad = grad_input + offset1.requires_grad = grad_offset + mask1.requires_grad = grad_mask + + im2col_step = 2 + output_cuda = DCNv3Function.apply(input1, offset1, mask1, Kh, Kw, stride, + stride, Kh // 2, Kw // 2, dilation, + dilation, M, D, offset_scale, + im2col_step, remove_center) + output_cuda.sum().backward() + + print(f'>>> backward float: channels {D}') + bwdok = torch.allclose(input0.grad, input1.grad, rtol=1e-2, atol=1e-3) + max_abs_err = (input0.grad - input1.grad).abs().max() + max_rel_err = ((input0.grad - input1.grad).abs() / input0.grad.abs()).max() + print(f'* {bwdok} input_grad check_backward_equal_with_pytorch_float:' + f'max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}') + + bwdok = torch.allclose(offset0.grad, offset1.grad, rtol=1e-2, atol=1e-3) + max_abs_err = (offset0.grad - offset1.grad).abs().max() + max_rel_err = ((offset0.grad - offset1.grad).abs() / + offset0.grad.abs()).max() + print(f'* {bwdok} offset_grad check_backward_equal_with_pytorch_float:' + f'max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}') + + bwdok = torch.allclose(mask0.grad, mask1.grad, rtol=1e-2, atol=1e-3) + max_abs_err = (mask0.grad - mask1.grad).abs().max() + max_rel_err = ((mask0.grad - mask1.grad).abs() / mask0.grad.abs()).max() + print(f'* {bwdok} mask_grad check_backward_equal_with_pytorch_float:' + f'max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}') + + +@torch.no_grad() +def check_time_cost(im2col_step=128): + N = 512 + H_in, W_in = 64, 64 + H_out = (H_in + 2 * pad - (dilation * (Kh - 1) + 1)) // stride + 1 + W_out = (W_in + 2 * pad - (dilation * (Kw - 1) + 1)) // stride + 1 + + input = torch.rand(N, H_in, W_in, M * D).cuda() * 0.01 + offset = torch.rand(N, H_out, W_out, M * P * 2).cuda() * 10 + mask = torch.rand(N, H_out, W_out, M, P).cuda() + 1e-5 + mask /= mask.sum(-1, keepdim=True) + mask = mask.reshape(N, H_out, W_out, M * P) + print(f'>>> time cost: im2col_step {im2col_step};' + f'input {input.shape}; points {P} ') + repeat = 100 + for i in range(repeat): + output_cuda = DCNv3Function.apply(input, offset, mask, Kh, Kw, stride, + stride, Kh // 2, Kw // 2, dilation, + dilation, M, D, 1.0, im2col_step, + remove_center) + torch.cuda.synchronize() + start = time.time() + for i in range(repeat): + output_cuda = DCNv3Function.apply( # noqa + input, offset, mask, Kh, Kw, stride, stride, Kh // 2, Kw // 2, + dilation, dilation, M, D, 1.0, im2col_step, remove_center) + torch.cuda.synchronize() + print(f'foward time cost: {(time.time() - start) / repeat}') + + +if __name__ == '__main__': + check_forward_equal_with_pytorch_double() + check_forward_equal_with_pytorch_float() + for channels in [1, 16, 30, 32, 64, 71, 1025]: + check_backward_equal_with_pytorch_double(channels, True, True, True) + for channels in [1, 16, 30, 32, 64, 71, 1025]: + check_backward_equal_with_pytorch_float(channels, True, True, True) + for i in range(3): + im2col_step = 128 * (2**i) + check_time_cost(im2col_step) diff --git a/projects/maskfeat_video/README.md b/projects/maskfeat_video/README.md new file mode 100644 index 0000000..6a8ce03 --- /dev/null +++ b/projects/maskfeat_video/README.md @@ -0,0 +1,275 @@ +# MaskFeat Pre-training with Video + +- [MaskFeat Pre-training with Video](#maskfeat-pre-training-with-video) + - [Description](#description) + - [Usage](#usage) + - [Setup Environment](#setup-environment) + - [Data Preparation](#data-preparation) + - [Pre-training Commands](#pre-training-commands) + - [On Local Single GPU](#on-local-single-gpu) + - [On Multiple GPUs](#on-multiple-gpus) + - [On Multiple GPUs with Slurm](#on-multiple-gpus-with-slurm) + - [Downstream Tasks Commands](#downstream-tasks-commands) + - [On Multiple GPUs](#on-multiple-gpus-1) + - [On Multiple GPUs with Slurm](#on-multiple-gpus-with-slurm-1) + - [Results](#results) + - [Citation](#citation) + - [Checklist](#checklist) + +## Description + + + +Author: @fangyixiao18 + +This is the implementation of **MaskFeat** with video dataset, like Kinetics400. + +## Usage + + + +### Setup Environment + +Requirements: + +- MMPretrain >= 1.0.0rc0 +- MMAction2 >= 1.0.0rc3 + +Please refer to [Get Started](https://mmpretrain.readthedocs.io/en/latest/get_started.html) documentation of MMPretrain to finish installation. + +Besides, to process the video data, we apply transforms in MMAction2. The instruction to install MMAction2 can be found in [Get Started documentation](https://mmaction2.readthedocs.io/en/1.x/get_started.html). + +### Data Preparation + +You can refer to the [documentation](https://mmaction2.readthedocs.io/en/1.x/user_guides/2_data_prepare.html) in MMAction2. + +### Pre-training Commands + +At first, you need to add the current folder to `PYTHONPATH`, so that Python can find your model files. In `projects/maskfeat_video/` root directory, please run command below to add it. + +```shell +export PYTHONPATH=`pwd`:$PYTHONPATH +``` + +Then run the following commands to train the model: + +#### On Local Single GPU + +```bash +# train with mim +mim train mmpretrain ${CONFIG} --work-dir ${WORK_DIR} + +# a specific command example +mim train mmpretrain configs/maskfeat_mvit-small_8xb32-amp-coslr-300e_k400.py \ + --work-dir work_dirs/selfsup/maskfeat_mvit-small_8xb32-amp-coslr-300e_k400/ + +# train with scripts +python tools/train.py configs/maskfeat_mvit-small_8xb32-amp-coslr-300e_k400.py \ + --work-dir work_dirs/selfsup/maskfeat_mvit-small_8xb32-amp-coslr-300e_k400/ +``` + +#### On Multiple GPUs + +```bash +# train with mim +# a specific command examples, 8 GPUs here +mim train mmpretrain configs/maskfeat_mvit-small_8xb32-amp-coslr-300e_k400.py \ + --work-dir work_dirs/selfsup/maskfeat_mvit-small_8xb32-amp-coslr-300e_k400/ \ + --launcher pytorch --gpus 8 + +# train with scripts +bash tools/dist_train.sh configs/maskfeat_mvit-small_8xb32-amp-coslr-300e_k400.py 8 +``` + +Note: + +- CONFIG: the config files under the directory `configs/` +- WORK_DIR: the working directory to save configs, logs, and checkpoints + +#### On Multiple GPUs with Slurm + +```bash +# train with mim +mim train mmpretrain configs/maskfeat_mvit-small_16xb32-amp-coslr-300e_k400.py \ + --work-dir work_dirs/selfsup/maskfeat_mvit-small_16xb32-amp-coslr-300e_k400/ \ + --launcher slurm --gpus 16 --gpus-per-node 8 \ + --partition ${PARTITION} + +# train with scripts +GPUS_PER_NODE=8 GPUS=16 bash tools/slurm_train.sh ${PARTITION} maskfeat-video \ + configs/maskfeat_mvit-small_16xb32-amp-coslr-300e_k400.py \ + --work-dir work_dirs/selfsup/maskfeat_mvit-small_16xb32-amp-coslr-300e_k400/ +``` + +Note: + +- CONFIG: the config files under the directory `configs/` +- WORK_DIR: the working directory to save configs, logs, and checkpoints +- PARTITION: the slurm partition you are using + +### Downstream Tasks Commands + +To evaluate the **MaskFeat MViT** pretrained with MMPretrain, we recommend to run MMAction2: + +#### On Multiple GPUs + +```bash +# command example for train +mim train mmaction2 ${CONFIG} \ + --work-dir ${WORK_DIR} \ + --launcher pytorch -gpus 8 \ + --cfg-options model.backbone.init_cfg.type=Pretrained \ + model.backbone.init_cfg.checkpoint=${CHECKPOINT} \ + model.backbone.init_cfg.prefix="backbone." \ + ${PY_ARGS} + [optional args] + +mim train mmaction2 configs/mvit-small_ft-8xb8-coslr-100e_k400.py \ + --work-dir work_dirs/benchmarks/maskfeat/training_maskfeat-mvit-k400/ \ + --launcher pytorch -gpus 8 \ + --cfg-options model.backbone.init_cfg.type=Pretrained \ + model.backbone.init_cfg.checkpoint=https://download.openmmlab.com/mmselfsup/1.x/maskfeat/maskfeat_mvit-small_16xb32-amp-coslr-300e_k400/maskfeat_mvit-small_16xb32-amp-coslr-300e_k400_20230131-87d60b6f.pth \ + model.backbone.init_cfg.prefix="backbone." \ + $PY_ARGS + +# command example for test +mim test mmaction2 configs/mvit-small_ft-8xb16-coslr-100e_k400.py \ + --checkpoint https://download.openmmlab.com/mmselfsup/1.x/maskfeat/maskfeat_mvit-small_16xb32-amp-coslr-300e_k400/mvit-small_ft-8xb16-coslr-100e_k400/mvit-small_ft-8xb16-coslr-100e_k400_20230131-5e8303f5.pth \ + --work-dir work_dirs/benchmarks/maskfeat/maskfeat-mvit-k400/test/ \ + --launcher pytorch --gpus 8 +``` + +#### On Multiple GPUs with Slurm + +```bash +mim train mmaction2 ${CONFIG} \ + --work-dir ${WORK_DIR} \ + --launcher slurm --gpus 8 --gpus-per-node 8 \ + --partition ${PARTITION} \ + --cfg-options model.backbone.init_cfg.type=Pretrained \ + model.backbone.init_cfg.checkpoint=$CHECKPOINT \ + model.backbone.init_cfg.prefix="backbone." \ + $PY_ARGS + +mim test mmaction2 ${CONFIG} \ + --checkpoint https://download.openmmlab.com/mmselfsup/1.x/maskfeat/maskfeat_mvit-small_16xb32-amp-coslr-300e_k400/mvit-small_ft-8xb16-coslr-100e_k400/mvit-small_ft-8xb16-coslr-100e_k400_20230131-5e8303f5.pth + --work-dir ${WORK_DIR} \ + --launcher slurm --gpus 8 --gpus-per-node 8 \ + --partition ${PARTITION} \ + $PY_ARGS +``` + +Note: + +- CONFIG: the config files under the directory `configs/` +- WORK_DIR: the working directory to save configs, logs, and checkpoints +- PARTITION: the slurm partition you are using +- CHECKPOINT: the pretrained checkpoint of MMPretrain saved in working directory, like `$WORK_DIR/epoch_300.pth` +- PY_ARGS: other optional args + +## Results + + + +The Fine-tuning results are based on Kinetics400(K400) dataset. + +Due to the version of K400 dataset, our pretraining, fine-tuning and the final test results are based on MMAction2 version, which is a little different from PySlowFast version. + + + + + + + + + + + + + + + + + + + + + + + + +
    AlgorithmBackboneEpochBatch SizeFine-tuningPretrain LinksFine-tuning Links
    MaskFeatMViT-small30051281.8config | model | logconfig | model | log
    + +Remarks: + +- We converted the pretrained model from PySlowFast and run fine-tuning with MMAction2, based on MMAction2 version of K400, we got `81.5` test accuracy. The pretrained model from MMPretrain got `81.8`, as provided above. +- We also tested our model on [other version](https://github.com/facebookresearch/video-nonlocal-net/blob/main/DATASET.md) of K400, we got `82.1` test accuracy. +- Some other details can be found in [MMAction2 MViT page](https://github.com/open-mmlab/mmaction2/tree/dev-1.x/configs/recognition/mvit). + +## Citation + +```bibtex +@InProceedings{wei2022masked, + author = {Wei, Chen and Fan, Haoqi and Xie, Saining and Wu, Chao-Yuan and Yuille, Alan and Feichtenhofer, Christoph}, + title = {Masked Feature Prediction for Self-Supervised Visual Pre-Training}, + booktitle = {CVPR}, + year = {2022}, +} +``` + +## Checklist + +Here is a checklist illustrating a usual development workflow of a successful project, and also serves as an overview of this project's progress. + + + +- [x] Milestone 1: PR-ready, and acceptable to be one of the `projects/`. + + - [x] Finish the code + + + + - [x] Basic docstrings & proper citation + + + + - [x] Inference correctness + + + + - [x] A full README + + + +- [x] Milestone 2: Indicates a successful model implementation. + + - [x] Training-time correctness + + + +- [ ] Milestone 3: Good to be a part of our core package! + + - [ ] Type hints and docstrings + + + + - [ ] Unit tests + + + + - [ ] Code polishing + + + + - [ ] `metafile.yml` and `README.md` + + + +- [ ] Refactor and Move your modules into the core package following the codebase's file hierarchy structure. diff --git a/projects/maskfeat_video/configs/maskfeat_mvit-small_16xb32-amp-coslr-300e_k400.py b/projects/maskfeat_video/configs/maskfeat_mvit-small_16xb32-amp-coslr-300e_k400.py new file mode 100644 index 0000000..4308571 --- /dev/null +++ b/projects/maskfeat_video/configs/maskfeat_mvit-small_16xb32-amp-coslr-300e_k400.py @@ -0,0 +1,101 @@ +_base_ = 'mmpretrain::_base_/default_runtime.py' + +custom_imports = dict(imports=['models'], allow_failed_imports=False) + +model = dict( + type='VideoMaskFeat', + backbone=dict( + type='MaskFeatMViT', + arch='maskfeat-small', + drop_path_rate=0.0, + dim_mul_in_attention=False), + neck=dict( + type='LinearNeck', + in_channels=768, + out_channels=108, + with_avg_pool=False, + init_cfg=dict(type='TruncNormal', layer='Linear', std=0.02, bias=0)), + head=dict( + type='MaskFeatPretrainHead', + loss=dict(type='PixelReconstructionLoss', criterion='L2')), + target_generator=dict( + type='HOGGenerator3d', nbins=9, pool=8, gaussian_window=16)) + +# dataset settings +dataset_type = 'mmaction.VideoDataset' +data_root = 'data/kinetics400/videos_train' +ann_file_train = 'data/Kinetics400/kinetics400_train_list_videos.txt' +data_preprocessor = dict( + type='VideoDataPreprocessor', + mean=[114.75, 114.75, 114.75], + std=[57.375, 57.375, 57.375], + format_shape='NCTHW') +train_pipeline = [ + dict(type='mmaction.DecordInit'), + dict( + type='mmaction.SampleFrames', + clip_len=16, + frame_interval=4, + num_clips=1), + dict(type='mmaction.DecordDecode'), + dict(type='mmaction.Resize', scale=(-1, 256)), + dict(type='mmaction.RandomResizedCrop', area_range=(0.5, 1.0)), + dict(type='mmaction.Resize', scale=(224, 224), keep_ratio=False), + dict(type='mmaction.Flip', flip_ratio=0.5), + dict(type='mmaction.FormatShape', input_format='NCTHW'), + dict( + type='MaskFeatMaskGenerator3D', + input_size=(8, 7, 7), + num_masking_patches=157, + min_num_patches=9, + max_num_patches=49), + dict(type='PackInputs', input_key='imgs') +] + +train_dataloader = dict( + batch_size=32, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + collate_fn=dict(type='default_collate'), + dataset=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=dict(video=data_root), + pipeline=train_pipeline)) + +optim_wrapper = dict( + type='AmpOptimWrapper', + loss_scale='dynamic', + optimizer=dict( + type='AdamW', lr=8e-4 * 2, betas=(0.9, 0.999), weight_decay=0.05), + clip_grad=dict(max_norm=0.02), + paramwise_cfg=dict( + bias_decay_mult=0., + norm_decay_mult=0., + custom_keys={ + 'pos_embed': dict(decay_mult=0.), + 'cls_token': dict(decay_mult=0.) + })) + +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1e-4, + by_epoch=True, + begin=0, + end=10, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=290, + eta_min=1e-6, + by_epoch=True, + begin=10, + end=300, + convert_to_iter_based=True) +] + +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=300) +default_hooks = dict( + checkpoint=dict(interval=1, max_keep_ckpts=2), logger=dict(interval=100)) diff --git a/projects/maskfeat_video/configs/maskfeat_mvit-small_8xb32-amp-coslr-300e_k400.py b/projects/maskfeat_video/configs/maskfeat_mvit-small_8xb32-amp-coslr-300e_k400.py new file mode 100644 index 0000000..3f26e56 --- /dev/null +++ b/projects/maskfeat_video/configs/maskfeat_mvit-small_8xb32-amp-coslr-300e_k400.py @@ -0,0 +1,5 @@ +_base_ = './maskfeat_mvit-small_16xb32-amp-coslr-300e_k400.py' + +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=8e-4, betas=(0.9, 0.999), weight_decay=0.05)) diff --git a/projects/maskfeat_video/configs/mvit-small_ft-8xb16-coslr-100e_k400.py b/projects/maskfeat_video/configs/mvit-small_ft-8xb16-coslr-100e_k400.py new file mode 100644 index 0000000..367e4ba --- /dev/null +++ b/projects/maskfeat_video/configs/mvit-small_ft-8xb16-coslr-100e_k400.py @@ -0,0 +1,157 @@ +_base_ = [ + 'mmaction::_base_/models/mvit_small.py', + 'mmaction::_base_/default_runtime.py' +] + +model = dict( + backbone=dict( + drop_path_rate=0.1, + dim_mul_in_attention=False, + pretrained=None, + pretrained_type='maskfeat', + ), + data_preprocessor=dict( + type='ActionDataPreprocessor', + mean=[114.75, 114.75, 114.75], + std=[57.375, 57.375, 57.375], + blending=dict( + type='RandomBatchAugment', + augments=[ + dict(type='MixupBlending', alpha=0.8, num_classes=400), + dict(type='CutmixBlending', alpha=1, num_classes=400) + ]), + format_shape='NCTHW'), + cls_head=dict(dropout_ratio=0., init_scale=0.001)) + +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/kinetics400/videos_train' +data_root_val = 'data/kinetics400/videos_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' + +train_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=16, frame_interval=4, num_clips=1), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='PytorchVideoWrapper', op='RandAugment', magnitude=7), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='RandomErasing', erase_prob=0.25, mode='rand'), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=16, + frame_interval=4, + num_clips=1, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=16, + frame_interval=4, + num_clips=10, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 224)), + dict(type='CenterCrop', crop_size=224), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] + +repeat_sample = 2 +train_dataloader = dict( + batch_size=16, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + collate_fn=dict(type='repeat_pseudo_collate'), + dataset=dict( + type='RepeatAugDataset', + num_repeats=repeat_sample, + ann_file=ann_file_train, + data_prefix=dict(video=data_root), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=16, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root_val), + pipeline=val_pipeline, + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=dict(video=data_root_val), + pipeline=test_pipeline, + test_mode=True)) + +val_evaluator = dict(type='AccMetric') +test_evaluator = val_evaluator + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=100, val_begin=1, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +base_lr = 9.6e-3 +optim_wrapper = dict( + optimizer=dict( + type='AdamW', lr=base_lr, betas=(0.9, 0.999), weight_decay=0.05), + constructor='LearningRateDecayOptimizerConstructor', + paramwise_cfg={ + 'decay_rate': 0.75, + 'decay_type': 'layer_wise', + 'num_layers': 16 + }, + clip_grad=dict(max_norm=5, norm_type=2)) + +param_scheduler = [ + dict( + type='LinearLR', + start_factor=1 / 600, + by_epoch=True, + begin=0, + end=20, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=80, + eta_min_ratio=1 / 600, + by_epoch=True, + begin=20, + end=100, + convert_to_iter_based=True) +] + +default_hooks = dict( + checkpoint=dict(interval=3, max_keep_ckpts=20), logger=dict(interval=100)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (64 samples per GPU) / repeat_sample. +auto_scale_lr = dict(enable=True, base_batch_size=512 // repeat_sample) diff --git a/projects/maskfeat_video/models/__init__.py b/projects/maskfeat_video/models/__init__.py new file mode 100644 index 0000000..96e5f91 --- /dev/null +++ b/projects/maskfeat_video/models/__init__.py @@ -0,0 +1,9 @@ +from .hog_generator_3d import HOGGenerator3d +from .maskfeat import VideoMaskFeat +from .maskfeat_mvit import MaskFeatMViT +from .transforms import MaskFeatMaskGenerator3D + +__all__ = [ + 'HOGGenerator3d', 'VideoMaskFeat', 'MaskFeatMViT', + 'MaskFeatMaskGenerator3D' +] diff --git a/projects/maskfeat_video/models/hog_generator_3d.py b/projects/maskfeat_video/models/hog_generator_3d.py new file mode 100644 index 0000000..02d52c2 --- /dev/null +++ b/projects/maskfeat_video/models/hog_generator_3d.py @@ -0,0 +1,39 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmpretrain.models import HOGGenerator +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class HOGGenerator3d(HOGGenerator): + """Generate HOG feature for videos. + + This module is used in MaskFeat to generate HOG feature. + Here is the link of `HOG wikipedia + `_. + + Args: + nbins (int): Number of bin. Defaults to 9. + pool (float): Number of cell. Defaults to 8. + gaussian_window (int): Size of gaussian kernel. Defaults to 16. + """ + + def __init__(self, + nbins: int = 9, + pool: int = 8, + gaussian_window: int = 16) -> None: + super().__init__( + nbins=nbins, pool=pool, gaussian_window=gaussian_window) + + def _reshape(self, hog_feat: torch.Tensor) -> torch.Tensor: + """Reshape HOG Features for output.""" + hog_feat = hog_feat.flatten(1, 2) + self.unfold_size = hog_feat.shape[-1] // 14 + hog_feat = hog_feat.permute(0, 2, 3, 1) + hog_feat = hog_feat.unfold(1, self.unfold_size, + self.unfold_size).unfold( + 2, self.unfold_size, self.unfold_size) + hog_feat = hog_feat.flatten(3).view(self.B, self.T, 14, 14, -1) + hog_feat = hog_feat.flatten(1, 3) # B N C + return hog_feat diff --git a/projects/maskfeat_video/models/maskfeat.py b/projects/maskfeat_video/models/maskfeat.py new file mode 100644 index 0000000..cd50dac --- /dev/null +++ b/projects/maskfeat_video/models/maskfeat.py @@ -0,0 +1,59 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List + +import torch +import torch.nn.functional as F + +from mmpretrain.models import BaseSelfSupervisor +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample + + +@MODELS.register_module() +class VideoMaskFeat(BaseSelfSupervisor): + """MaskFeat. + + Implementation of `Masked Feature Prediction for Self-Supervised Visual + Pre-Training `_. + """ + + def loss(self, inputs: List[torch.Tensor], data_samples: List[DataSample], + **kwargs) -> Dict[str, torch.Tensor]: + """The forward function in training. + + Args: + inputs (List[torch.Tensor]): The input images. + data_samples (List[DataSample]): All elements required + during the forward function. + + Returns: + Dict[str, torch.Tensor]: A dictionary of loss components. + """ + mask = torch.stack( + [data_sample.mask.value for data_sample in data_samples]) + mask = mask.to(torch.bool) + + video = inputs[0] + video = video.view((-1, ) + video.shape[2:]) # B, C, T, H, W + latent = self.backbone(video, mask) + B, L, C = latent[0].shape + pred = self.neck([latent[0].view(B * L, C)]) + pred = pred[0].view(B, L, -1) + + # generate hog target + video = video[:, :, ::self.backbone.patch_stride[0], :, :] + video = video.transpose(1, 2) # B, T, C, H, W + self.target_generator.B = video.size(0) + self.target_generator.T = video.size(1) + video = video.flatten(0, 1) # B*T, C, H, W + hog = self.target_generator(video) + + mask = self._get_output_mask(mask) + loss = self.head(pred, hog, mask) + losses = dict(loss=loss) + return losses + + def _get_output_mask(self, mask: torch.Tensor) -> torch.Tensor: + size = self.backbone.out_patch_resolution[-1][-1] + output_mask = F.interpolate(mask.float(), size=size) + return output_mask diff --git a/projects/maskfeat_video/models/maskfeat_mvit.py b/projects/maskfeat_video/models/maskfeat_mvit.py new file mode 100644 index 0000000..3661ca7 --- /dev/null +++ b/projects/maskfeat_video/models/maskfeat_mvit.py @@ -0,0 +1,146 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Sequence, Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmaction.models import MViT +from mmaction.models.backbones.mvit import resize_pos_embed + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class MaskFeatMViT(MViT): + + arch_zoo = { + 'maskfeat-small': { + 'embed_dims': 96, + 'num_layers': 16, + 'num_heads': 1, + 'downscale_indices': [1, 3], + 'dim_mul_indices': [1, 3, 14] + }, + 'maskfeat-large': { + 'embed_dims': 144, + 'num_layers': 48, + 'num_heads': 2, + 'downscale_indices': [2, 8], + 'dim_mul_indices': [2, 8, 44] + }, + } + + def __init__( + self, + arch: str = 'base', + spatial_size: int = 224, + temporal_size: int = 16, + in_channels: int = 3, + out_scales: Union[int, Sequence[int]] = -1, + drop_path_rate: float = 0, + use_abs_pos_embed: bool = False, + interpolate_mode: str = 'trilinear', + pool_kernel: tuple = (3, 3, 3), + dim_mul: int = 2, + head_mul: int = 2, + adaptive_kv_stride: tuple = (1, 8, 8), + rel_pos_embed: bool = True, + residual_pooling: bool = True, + dim_mul_in_attention: bool = True, + with_cls_token: bool = True, + output_cls_token: bool = True, + rel_pos_zero_init: bool = False, + mlp_ratio: float = 4, + qkv_bias: bool = True, + norm_cfg: dict = dict(type='LN', eps=1e-6), + patch_cfg: dict = dict( + kernel_size=(3, 7, 7), stride=(2, 4, 4), padding=(1, 3, 3)), + init_cfg: Optional[Union[dict, List[dict]]] = [ + dict(type='TruncNormal', layer=['Conv2d', 'Conv3d'], std=0.02), + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.02), + ] + ) -> None: + super().__init__( + arch=arch, + spatial_size=spatial_size, + temporal_size=temporal_size, + in_channels=in_channels, + out_scales=out_scales, + drop_path_rate=drop_path_rate, + use_abs_pos_embed=use_abs_pos_embed, + interpolate_mode=interpolate_mode, + pool_kernel=pool_kernel, + dim_mul=dim_mul, + head_mul=head_mul, + adaptive_kv_stride=adaptive_kv_stride, + rel_pos_embed=rel_pos_embed, + residual_pooling=residual_pooling, + dim_mul_in_attention=dim_mul_in_attention, + with_cls_token=with_cls_token, + output_cls_token=output_cls_token, + rel_pos_zero_init=rel_pos_zero_init, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + norm_cfg=norm_cfg, + patch_cfg=patch_cfg, + init_cfg=init_cfg) + + self.mask_token = nn.Parameter(torch.zeros(1, 1, self.embed_dims)) + self.patch_stride = patch_cfg['stride'] + + def init_weights(self) -> None: + """Initialize mask token and cls token.""" + super().init_weights() + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress default init if use pretrained model. + return + + nn.init.trunc_normal_(self.cls_token, std=.02) + nn.init.trunc_normal_(self.mask_token, std=.02) + + def forward(self, x: torch.Tensor, + mask: torch.Tensor) -> Tuple[torch.Tensor]: + + x, patch_resolution = self.patch_embed(x) + B, L, C = x.shape + T, H, W = patch_resolution + + mask_tokens = self.mask_token.expand(B, L, -1) + mask = F.interpolate(mask.float(), size=(H, W)) + mask = mask.flatten(1).unsqueeze(-1) + x = x * (1 - mask) + mask_tokens * mask + + cls_tokens = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + + if self.use_abs_pos_embed: + x = x + resize_pos_embed( + self.pos_embed, + self.patch_resolution, + patch_resolution, + mode=self.interpolate_mode, + num_extra_tokens=self.num_extra_tokens) + + # if not self.with_cls_token: + # # Remove class token for transformer encoder input + # x = x[:, 1:] + + outs = [] + self.out_patch_resolution = [] + for i, block in enumerate(self.blocks): + x, patch_resolution = block(x, patch_resolution) + + if i in self.stage_indices: + stage_index = self.stage_indices[i] + if stage_index in self.out_scales: + self.out_patch_resolution.append(patch_resolution) + x = getattr(self, f'norm{stage_index}')(x) + if not self.output_cls_token: + out = x[:, 1:] + else: + out = x + outs.append(out) + + return tuple(outs) diff --git a/projects/maskfeat_video/models/transforms.py b/projects/maskfeat_video/models/transforms.py new file mode 100644 index 0000000..5b3bb37 --- /dev/null +++ b/projects/maskfeat_video/models/transforms.py @@ -0,0 +1,130 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +import random +from typing import Optional, Tuple + +import numpy as np +from mmcv.transforms.base import BaseTransform + +from mmpretrain.registry import TRANSFORMS + + +@TRANSFORMS.register_module() +class MaskFeatMaskGenerator3D(BaseTransform): + """Generate mask for video. + + Added Keys: + + - mask + + This module is borrowed from + https://github.com/facebookresearch/SlowFast/blob/main/slowfast/datasets/transform.py + + Args: + input_size (int): The size of input video. + num_masking_patches (int): The number of patches to be masked. + min_num_patches (int): The minimum number of patches to be masked + in the process of generating mask. Defaults to 4. + max_num_patches (int, optional): The maximum number of patches to be + masked in the process of generating mask. Defaults to None. + min_aspect (float): The minimum aspect ratio of mask blocks. Defaults + to 0.3. + min_aspect (float, optional): The minimum aspect ratio of mask blocks. + Defaults to None. + """ + + def __init__(self, + input_size: int, + num_masking_patches: int, + min_num_patches: int = 4, + max_num_patches: Optional[int] = None, + min_aspect: float = 0.3, + max_aspect: Optional[float] = None) -> None: + + self.temporal, self.height, self.width = input_size + self.num_masking_patches = num_masking_patches + self.min_num_patches = min_num_patches + self.max_num_patches = ( + num_masking_patches + if max_num_patches is None else max_num_patches) + max_aspect = max_aspect or 1 / min_aspect + self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect)) + + def get_shape(self) -> Tuple[int, int, int]: + """Get the shape of mask. + + Returns: + Tuple[int, int, int]: The shape of mask. + """ + return self.temporal, self.height, self.width + + def _mask(self, mask: np.ndarray, max_mask_patches: int) -> int: + """Generate mask recursively. + + Args: + mask (np.ndarray): The mask to be generated. + max_mask_patches (int): The maximum number of patches to be masked. + + Returns: + int: The number of patches masked. + """ + delta = 0 + for _ in range(100): + target_area = random.uniform(self.min_num_patches, + self.max_num_patches) + aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio)) + h = int(round(math.sqrt(target_area * aspect_ratio))) + w = int(round(math.sqrt(target_area / aspect_ratio))) + t = random.randint(1, self.temporal) # ! + if w < self.width and h < self.height: + top = random.randint(0, self.height - h) + left = random.randint(0, self.width - w) + front = random.randint(0, self.temporal - t) + + num_masked = mask[front:front + t, top:top + h, + left:left + w].sum() + # Overlap + if 0 < h * w * t - num_masked <= max_mask_patches: + for i in range(front, front + t): + for j in range(top, top + h): + for k in range(left, left + w): + if mask[i, j, k] == 0: + mask[i, j, k] = 1 + delta += 1 + + if delta > 0: + break + return delta + + def transform(self, results: dict) -> dict: + """Method to generate random block mask. + + Args: + results (dict): Result dict from previous pipeline. + + Returns: + dict: Result dict with added key ``mask``. + """ + mask = np.zeros(shape=self.get_shape(), dtype=np.int) + mask_count = 0 + while mask_count < self.num_masking_patches: + max_mask_patches = self.num_masking_patches - mask_count + delta = self._mask(mask, max_mask_patches) + if delta == 0: + break + else: + mask_count += delta + + results.update({'mask': mask}) + return results + + def __repr__(self) -> str: + repr_str = self.__class__.__name__ + repr_str += f'(temporal={self.temporal}, ' + repr_str += f'height={self.height}, ' + repr_str += f'width={self.width}, ' + repr_str += f'num_masking_patches={self.num_masking_patches}, ' + repr_str += f'min_num_patches={self.min_num_patches}, ' + repr_str += f'max_num_patches={self.max_num_patches}, ' + repr_str += f'log_aspect_ratio={self.log_aspect_ratio})' + return repr_str diff --git a/projects/maskfeat_video/tools/dist_train.sh b/projects/maskfeat_video/tools/dist_train.sh new file mode 100644 index 0000000..3fca764 --- /dev/null +++ b/projects/maskfeat_video/tools/dist_train.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +CONFIG=$1 +GPUS=$2 +NNODES=${NNODES:-1} +NODE_RANK=${NODE_RANK:-0} +PORT=${PORT:-29500} +MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch \ + --nnodes=$NNODES \ + --node_rank=$NODE_RANK \ + --master_addr=$MASTER_ADDR \ + --nproc_per_node=$GPUS \ + --master_port=$PORT \ + $(dirname "$0")/train.py \ + $CONFIG \ + --launcher pytorch ${@:3} diff --git a/projects/maskfeat_video/tools/slurm_train.sh b/projects/maskfeat_video/tools/slurm_train.sh new file mode 100644 index 0000000..ac36d50 --- /dev/null +++ b/projects/maskfeat_video/tools/slurm_train.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +JOB_NAME=$2 +CONFIG=$3 +GPUS=${GPUS:-8} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:4} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/train.py ${CONFIG} --launcher="slurm" ${PY_ARGS} diff --git a/projects/maskfeat_video/tools/train.py b/projects/maskfeat_video/tools/train.py new file mode 100644 index 0000000..15c5ccb --- /dev/null +++ b/projects/maskfeat_video/tools/train.py @@ -0,0 +1,93 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp + +from mmengine.config import Config, DictAction +from mmengine.runner import Runner + + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a model') + parser.add_argument('config', help='train config file path') + parser.add_argument('--work-dir', help='the dir to save logs and models') + parser.add_argument( + '--resume', + nargs='?', + type=str, + const='auto', + help='If specify checkpint path, resume from it, while if not ' + 'specify, try to auto resume from the latest checkpoint ' + 'in the work directory.') + parser.add_argument( + '--amp', + action='store_true', + help='enable automatic-mixed-precision training') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + + return args + + +def main(): + args = parse_args() + + # load config + cfg = Config.fromfile(args.config) + cfg.launcher = args.launcher + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + # work_dir is determined in this priority: CLI > segment in file > filename + if args.work_dir is not None: + # update configs according to CLI args if args.work_dir is not None + cfg.work_dir = args.work_dir + elif cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + work_type = args.config.split('/')[1] + cfg.work_dir = osp.join('./work_dirs', work_type, + osp.splitext(osp.basename(args.config))[0]) + + # enable automatic-mixed-precision training + if args.amp is True: + optim_wrapper = cfg.optim_wrapper.get('type', 'OptimWrapper') + assert optim_wrapper in ['OptimWrapper', 'AmpOptimWrapper'], \ + '`--amp` is not supported custom optimizer wrapper type ' \ + f'`{optim_wrapper}.' + cfg.optim_wrapper.type = 'AmpOptimWrapper' + cfg.optim_wrapper.setdefault('loss_scale', 'dynamic') + + # resume training + if args.resume == 'auto': + cfg.resume = True + cfg.load_from = None + elif args.resume is not None: + cfg.resume = True + cfg.load_from = args.resume + + # build the runner from config + runner = Runner.from_cfg(cfg) + + # start training + runner.train() + + +if __name__ == '__main__': + main() diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..6da5ade --- /dev/null +++ b/requirements.txt @@ -0,0 +1,3 @@ +-r requirements/optional.txt +-r requirements/runtime.txt +-r requirements/tests.txt diff --git a/requirements/docs.txt b/requirements/docs.txt new file mode 100644 index 0000000..208d8ac --- /dev/null +++ b/requirements/docs.txt @@ -0,0 +1,10 @@ +docutils==0.18.1 +modelindex +myst-parser +git+https://github.com/mzr1996/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme +sphinx==6.1.3 +sphinx-copybutton +sphinx-notfound-page +sphinx-tabs +sphinxcontrib-jquery +tabulate diff --git a/requirements/mminstall.txt b/requirements/mminstall.txt new file mode 100644 index 0000000..9b736b0 --- /dev/null +++ b/requirements/mminstall.txt @@ -0,0 +1,2 @@ +mmcv>=2.0.0,<2.4.0 +mmengine>=0.8.3,<1.0.0 diff --git a/requirements/multimodal.txt b/requirements/multimodal.txt new file mode 100644 index 0000000..f6150b1 --- /dev/null +++ b/requirements/multimodal.txt @@ -0,0 +1,2 @@ +pycocotools +transformers>=4.28.0 diff --git a/requirements/optional.txt b/requirements/optional.txt new file mode 100644 index 0000000..5f31808 --- /dev/null +++ b/requirements/optional.txt @@ -0,0 +1,4 @@ +albumentations>=0.3.2 --no-binary qudida,albumentations # For Albumentations data transform +grad-cam >= 1.3.7,<1.5.0 # For CAM visualization +requests # For torchserve +scikit-learn # For t-SNE visualization and unit tests. diff --git a/requirements/readthedocs.txt b/requirements/readthedocs.txt new file mode 100644 index 0000000..145ceda --- /dev/null +++ b/requirements/readthedocs.txt @@ -0,0 +1,7 @@ +--extra-index-url https://download.pytorch.org/whl/cpu +mmcv-lite>=2.0.0rc4 +mmengine +pycocotools +torch +torchvision +transformers diff --git a/requirements/runtime.txt b/requirements/runtime.txt new file mode 100644 index 0000000..e0b0d90 --- /dev/null +++ b/requirements/runtime.txt @@ -0,0 +1,7 @@ +einops +importlib-metadata +mat4py +matplotlib +modelindex +numpy +rich diff --git a/requirements/tests.txt b/requirements/tests.txt new file mode 100644 index 0000000..ed0110f --- /dev/null +++ b/requirements/tests.txt @@ -0,0 +1,3 @@ +coverage +interrogate +pytest diff --git a/resnet50-test.py b/resnet50-test.py new file mode 100644 index 0000000..33ede50 --- /dev/null +++ b/resnet50-test.py @@ -0,0 +1,25 @@ +_base_ = [ + 'configs/_base_/models/resnet50.py', 'configs/_base_/datasets/tiny_imagenet_bs32.py', + 'configs/_base_/schedules/imagenet_bs256.py', 'configs/_base_/default_runtime.py' +] + +import torch + +torch.backends.cuda.matmul.allow_tf32=True +torch.backends.cudnn.allow_tf32=True + +# optimizer +optim_wrapper = dict( + #type='AmpOptimWrapper', + #dtype='float16', + optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)) + +custom_hooks = [ + dict(type='ProfilerHook', by_epoch=False, + profile_times=12, + with_stack=True, + with_flops=True, + on_trace_ready=dict(type="log_trace", sort_by="self_cuda_time_total"), + activity_with_cuda=True, + schedule=dict(wait=1, warmup=1, active=10, repeat=1)) + ] diff --git a/resnet50_imagenet200_8b32.py b/resnet50_imagenet200_8b32.py new file mode 100644 index 0000000..ecfe2c6 --- /dev/null +++ b/resnet50_imagenet200_8b32.py @@ -0,0 +1,22 @@ +_base_ = [ + 'configs/_base_/models/resnet50.py', 'configs/_base_/datasets/tiny_imagenet_bs32.py', + 'configs/_base_/schedules/imagenet_bs256.py', 'configs/_base_/default_runtime.py' +] + +import os + +# optimizer +optim_wrapper = dict( + type='AmpOptimWrapper', + dtype='bfloat16', + optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)) + +# 自定义hooks,添加ProfilerHook, 只在rank0启用 +custom_hooks = [ + dict(type='ProfilerHook', by_epoch=False, + profile_times=12, + on_trace_ready=dict(type="log_trace", sort_by="self_cuda_time_total"), + json_trace_path=f"trace_resnet50_8xb32_bf16.json", + activity_with_cuda=True, + schedule=dict(wait=1, warmup=1, active=10, repeat=1)) # 这样的设置是10次 +] if os.environ['LOCAL_RANK'] == '0' else [] diff --git a/resources/miaomiao_qrcode.jpg b/resources/miaomiao_qrcode.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d34cbae6fd131d668b0f16bfe918993610257131 GIT binary patch literal 225737 zcmeFY1yo&6voE-T0Ko$Uw*VnH!QDN$OYmU9-8HzoYXSs!cL^3exVr{BxE*8;$^X0W ze($|`Yi8EWnptaZ!`geF?%MV1s;;iK+D~&&Yrrch@ekqv6chkJLH>ZJMIZq3>;L8S z^aDVD`C{Y+1`HG#0F4d>gAVo71CT%}2o3cD22#y`B}60yC}{YXFtBhho)&T zcIUr}|37s{0f4E0uh0dbFeoUMrZ!d%Ker;S}7J zE?@scz85*}QohJjsHd$Y_8W;NEW10J2cf_``M=mBzxL)0^%Ri18$W8()3^I_*z$dT z%H$HVDva{Zj*pK5INGUnbW{Jr>k^2r=iCObTzNeKi*9w^ zd|4!82OD+ltF%T6cez1yapzm9ImSzSJCfN2HDiKEXJg=!o3_wVtu~M_9bQisnd8u~ zQvp+i)U*-(>Fw6=Ymov>fVx^F`ptvo9!S#zhI9bU<{#N$UOWK}b$nRh_umfnsJvHQ zjO%N_30oq|Qf!6VIZN;Dxojk$HaDW1+GuUYH@y@mKjX&(C33VzRi1{d@T*V5({SGo+7JBp=cfB3fdyv>Xd=$--{7+al&6(1-(Gj2qjTWwk-;EY0 z5E+p#7m+b5ru5aZ;+Y;ocD=5%Z90uO3R4%l>)+Jit#6|G@1oUkipRdoKu4v9G2Z9U z(I4v&X5wL54GX4=K8vpCeQ1BdpB>7K5Qnn%50-xN<+U_%OA}@SSHt;v1M;PSAKw#j zcq79jx9bW3mb111T%_PX#6TDOW4CSpFR(Y;B?7@M+n&E(zEQ&wPiy{hlD;N26!ctNeSxB;%fJA()5^z{ot`V z#%Vxg20Z)+9?-cf*YbXX&JK*cev!Q3BCvSwvw7|1Jc)H~eU^AIl3UHzG?tB)@;iAI zuGfNpX)(0Cc-T2(2-?VBS3MiV-DFe`M{7|Ff}4Cfi!nYOf1zAWeu}Ys=KC*!80mjK zI__oBaaYkjlH9h-{Yb7H&XQ;|)w7f36rh#bw^Uh$u`DvgS3P~PpEs|6l%XAPN#3aV zH=ChkGTQ48+B0D!J2E@t=vLbI#W@k_3V>wALpY}w(gqK5@fHB}gGqIR#G;{pHtO?r2gRM2#JsB)$<7F>_a#2tqymi4e{z&Crr-BF z0@!|8O~!%|TK^l5|KC2r(K15DNUwKMQUCxW+foMrW)03?4L}$G)DVUR_>W$o!UjKk z1AuE?oN67nA7DFBpU&jlEhd90m976{TCPSe8}2KB%TcUkV~2jI+U=ggRxq~f3q{`| z@#pmyLbUL!S0v)Vra<@TbtjZAAw?N@M1e70;bO6r8kfH?%2o}<7hoh;3fMR3onOpw ziSa0Bx?U9qHloK?!R`Mrv48_h_wZ;{Lc=DWIJBJr;_jRiv!R}0C&$_TCM{oToP1WL z`DR7+8{T@7Jw#j*A^Ic7pO_A?^gBYMV0`Q%q1`G09A=KDNGu``#a8d<8{rZMyJp{T zc>Ll2(r*T3FnRPC5V@*scWws|$JS$cQwU@RY56_o8d#@bQJ4JP8E$r_eApxd?2mS` zB$rgOIjF48r?=BL6_xH+4T9%yVMWz8==Ox}8jr7aF#sX+<|W_d!$0iFkTf``R|QI0 za?u*S*b?~Gf{zS5k33<<^;^i7f9c1uV1DVIQOGsjN>c+RcR7Cm=T=DDb!Q5=ciHTZ z^0zwq(&hCjA8u7dS>|??%X_Eq*O*q|tO?Lm*=_#@HtmI<09p8jjh1CsDHQYrw1yg+ z8pe<14gV&b%h-WZt)-APawO>!prOa32SiT`D-+hpdwu@Z)nF}>#ZBoIxSyWerN}#W z*r+CfYTa>wgrpOvMbh%qmHU$2-ZRlFlsyEHbH zQs$w~l=NTRO~mD&#acP|p>m)6y8pXIgO=ISig%z0&bgU_p)e#`AtKdFyj+Y`aZM5S7QGgLR4?} z>B?$5-}I~D=3x}cvner_A%^LMZa>#od}&UJWL#JzBWk4DEpF{MD=NwCDK-xGVL3#$ zeJ%?$XCZYxTr>W4_Buzclu}-{?~u|Qh+vBk>|tCg|Hv9bufJ5_`PfF(E9mw13txnQ z1=6m?8X+5uCdtO^#E67Xr#>1L5!kVSP&1(GQO@JA^WO4ZB$64^F0a!u;)GeZ-lR;2 z57C+cm0ifzeUyZp-Oaaw4(Z2+($imAtM7z1n&hrlF#6kB?nRE@!Z#zS&TPSf^q--j zUVZFplDmBneD&~G(v5F;5n_{F9?oA&C-9xGBq{AQ-*WDz!k86k%8&;uiO!J04kxEt z^Qur^VM{PRW^SRsJ3!VQl5{7VMcI`?RULAvKZ)<(Kx5ydpZzzrKCr{PC_tfqesHKe zK@AgR7r(JuH7E!lDh%hfDmY!qY>Cy6+-m=XN6{qcdSB|^nJ;tm4jc~0>uoZ)*YuuN zaU^+5Y-`%rT7U4h^C97}BSA^p9TGAR%^Zrtj~{*iVKw3<1U&%i>`F`zthh2mrHz-j zZB_j>ON*J$0|&1~;9Y!vw7&TM1!;i|yN*ir!}KunJC$B`ylV?xI?kiRA+-gDKoh^R)%fV7Q3yog@o?!FeVb*R$S%VTlYptTuE9FulXE{KZnzA=k?*iH!=gLm2!amIwODY%P>n{yp4lhew4H zM?@RM|FOCexkDamBZfWWnDJ3wqBGg?Ku#q*pj;b{-(rM~Yj3~tV2UVh61w|vyW<#4 zpt*9V!}x9Y_#Gz;Z2)10@AF42s=z}qt({EMFFtUt!K4;|HqNm|9IRzG z=jlHu3_S8K2MoOMDy2BG{e#6iodaFzS8v1FJjv3B(Hro+CbfG-{RoNH%0Za+cVJl~ zSLPB5>-Xs8WjOYUvY$9?l`hIg^PilCCJ=*l+MBE=;=SEsUhtOP4w1!`5AN0zUBhoH zetb{gj?%puW?Jt!U3({meU5g~{z`0JmtT|`&W0u4!nH;WZ4#+|44K|P3&J&tr7%ze;x^9h? zc;=BWMb6ggN`FfRTd)+pLV_td$b5Y<-P6O~C3QpT#1r!uv6EX57O1 zvjjcM?}v`NG*l5}^>jMN)sYiJ9*0f9ix(dybLUZA;j(Nz!+rkyIBDU+JDOp9INQa! z0pFZgSEE`&T)PTC-ZJUl%mV1?01sz-J$~H zA9je@Z%j``X2}PXNI0NPdthWO;6q*?yvgMlBJMHSx6E*S2ljB)z3H8L;GC1Z_TXV+ z%VfpeRfY4uSU%eN0}C(3l$lyezvW-bU(?vwJ%WmTraaxDx3pwz5{xuY7+DeZp>cbN zki_WQPl4zSavL6Ry9pfK74gQcF^(Avr1yn`GFAmRA;TFLoAWzVjCIFg^r)r&^;~MA zDMt>%RM`N}EL;DfIPCs<1%vlgB#J*DtfKRlPU#aQi_U91?|uJNqAdS4`_#aDDF)Y# zn-M3($#pyHfY2#LcQ?&$1~*2w)XbDLH5(KHkGGiLUkNC>*FUr*@O$;77;N)sypwe% z$>?`$&rhJaf<9xpKx`)?<(VUu!+-77Hs&c+VEAHwaIR*j#GS(Btx3g4c%Ds{;DkS* z=t&V(XZ#0sI6XUk7UyD+8-sU#nW93pXd{^rrR2l8v&1HtelKC5`ooH3u0C|<%It6L zY-7r*=Vc6x$q6*@Dy&ke<3sdZitmfJR@Ea5De{rcb-~R+I~Mo?T~t)6bEdkEzcy*N zn|bWM)ADaf+YVR8wWXH#N`vw|P+FX8kQ&QZ2xh>>ztpu*EnqFSGc2!LzvhwGyUmA) zePO#EwZ`WgY#boj|GsO5aD1Vy@hs`20u=C5Y>q{*w*Qb*kQifTa_6^<#7`aRa^ijJ z)V$9EZ?i}Z4Rky)IU=G#%`eTQNgCoTS?NeC;;iiWwu*?l<(6f7t!bNsiP}@Gu$Dz~ ze5P;Zblu&95+a-%xKCj#;X0ari3l_{U^619)E=+%t)z#rL2#iOY@zf(1iS9clPy2{ z)@#0>K*#3gUry?bfelTf@In42piV{Sd3oJjuS zVW`jA&3iidJ-n4&cgj7LQGc6%FJj71cSe9{CRf&m`#ts12?S6g%^KgO76yw z7-^UX01Wh_Y6~DPcJIYNSKcnZwer~vW)|N8oYVZs7TKTWQ^HHa|usFXZ;ImriX9pV>ArKg` z1UC96yra(?$vWzOg@q+^+?D8c(oqj$ZpYW_uo~l@dQSp)p!RuY!~3!|itu)JvB{rD z1Ya9j^XZ!xrVfc@*SSrEi22q%0dn@vn}Dj4+>GN#h#2_h0Nlt?OD+x`wPz2uxP66L ziI~TVKZjjm3L#%!U{FQxC*qNXR&U{Nt~ba5@+-1lVV5aA0aL7epYfMSvy2_@bM<+| z^OQDQ^!4ngd(N+{ggP{$fSz*aB=`8jx zd9P4`UClfuQh1Fw%~GiLv3?}CE7SE1=+D4Qz)@@d+nV*P3Lfu`>G-$|WP*UGNq;c5 zy~#N&i-=n-yJIsnudQdYi|EDzYAn>{N2_a5ZX0*r+gjuoE(;fKqG|a2!CxzYWw_gA z(zryIvvWqVneDQzxj#w2(kWrD(Sa_sYCTKubC=KRRXFi2Z9r*m{Ta6AB}6{R5F7zL z1<%OChgJC_&Gm#c#VHeiCLI}JP4LX{%DjX>$nr$ri-q?6H5O8T7kRq%^+tSo)7Jej zZ~`iV9TOB<&@T(yAh$30<|+9{$ovy43=!M1=K+B@HXsy4gqAj!h4*!0a#6i}f`7n> z*nL|=z7t0r+8EbixS1DB)hk;eEe0ogyuLyfo?d=t8~v#fH03?$%6K@l zj(l#FCy`}Dijp33HkAvizP%5o4^TZF_4Z6J2@EdlomcxjOpNO6lo4%)T;v>_oJ!XY>TT4B=dZEq#) zXSvuToCNFT$M1KLV4TMDn~$t%eGgwlwkDd@Mivkr_95Op|0@7pU#POEMZN~Smboig zsf*4Oj%TQO3t(vJN7NamBN1u+YK)OwQ|V%HJ$(VG=iV130n5c1WJCQO z`r)tz(sf|0*8t#mKAS`I*B~Z_waG z``&dV@TlAOd%;9dU;h9*Ggdyzs{JI@TNl5Ra#I!o{H>Oqp~{h}3FSF)E7ZwAwR#Wcuq z$nIHZds4petQ9JaL&8YXQ=Sm=ksVyVZ>?5%gK4|4h>y#<*b&C^@ljRT3V*pnL6d}y z(&9gfjyt$R9j6F?a zm3v$CWmF@}pSaR7t{t|i&xOvp>sZE~dFo$hK&pgM?#jj4QD1Ylnq25X(Gb5mk;VD9qOL_< zH)c}8qn+H3`6BwoP?e4KrmVp0pJ3@Er{_un&kia@y$+4Qp1F^9G{oqB4U@xc>C1;P z+oLDoOdr2Ab!=4d4=-p&^!Th5QR4S>TI3vzxV{7Ms4M=Q<7t&R?Jcx@_t+G^s;lU} z0L+l{idc`rrpNo6z=1OX-pNF&y`jN36G8D|6tTaP0|bqUh#vdpk0Mgu3_5eRC1$Td zjIG8MfYP$0vC56ZKCAnwJ=*?q@?}KFzz{aB+WyaQDar@0<2Sf-g#x*20P%;Bxr)M4 zxHpq!ehI6jJ`2=vJxY$hbTVM0%$qhec{s`BKB4Q3wZdq0Orvw{aa~UwKXm>XC4e84 zqeiCqn%#G&lf%Zw?noG7haCQOH;col2=@Mj*{W;`@IMSn#-oXS;qA}(c9zI*3C(6U zaw(RX@5f+kM%kkgqC&GmH6pa3AeKfzwI9Q5zKhwxnHqFP22u$)pp%3gt30Pg z7#qJz5(Qo_{x1o^dB5|^Q@R*LI0F~?Wz(O#I}|SN-I#p6Rp)#g4l=yd3YEPXi>gGAX|5+RBk zEp4upH3BpE?ZpB?MhYd?J%h>l@UL?)Ma(WNms7A7kRiRVHS|bc<_}}w^_Jam!McY) zBOoKitw-dbm+%TL$g27_Tv_5SFBX-++#X!U>?210*-6VI=@+8zt<>flWFl1o_RrDI z-0MWPs;Wk7lnPKf2~xYKdiOg8#DCbrdUzTd#(!r!mYmSQcl&raYvG;0aQFnkhV#pX z%>I7+TiL9`?YraSV|SzzZSGw-@@4-N^wy-GK+90A4ISlid*aVCcFoOT10RR0S@>Z* zjVQi=%-MM@5dYQ=cQq&^QtdG7ZRLS=) zO*YZfLNzosti;@8-dUIQ^026_vF`kPODf;kGaNmtsx3$f~%A72!H z|9OiX9VwKhpxVEE-87AY1!9mpbULb4m;~_-3eVm7*{)GYGy5m)md_d`el|GZf_t_7 zZz=pbx|cIPN^->hs<~7W>=QTDKc&#VisFm9QrWb+d+QON`s!gcCFY?uJG}K`!<0N$ z;clL3bCD}+I;6sF>SlA49Et1G!Rq+=tRQpmm!;A^%rv9O==*S?E2Sli>kKhsv(|MN z`nkQW(&t?v)+qo=+~wseSBIg_;LJBI>(`NvIW0RBsYqH5dCHqP(X)f2xHY3+GKT1W zPDR6to(qQ-Obwh7xU6}ocITP_DN-o5IeM_jHDuSYn&!NuP? zLRG;bIRMT1yaMyXLKlV;<0nw&Dg>6m1p@u0|Az)_A-M|?SD#BLjE#Eg%ip8^E$)Kp zy(uZjgLjnGq;WF^AKrA6X)`~()7aePv0km6VXE56GPU^y1$Nu~f;;yhH9=)B9h=hM zL$>wZkJvDixbZxoHo=x+^cKEzxcpQ2JB@2+1_$3~bAmCZzUOmII^ERTB-InJl}aT%*u8&jN{zmv z+1zdSo#6fnaJ-HO>d53ae2SM1at+$tlpC{;>ngjeyUjtAHT?3h7Pj|I!PJXXMIf=! zH_!M`^75~=w>vP|-VQx>we?Z#%cn&8ZoZmkl!;;|ZkByDTra`2Z}9{e|J177R&E_J zLc_C;o10WRVR=o!+*!G3n-6!+@&)=)$XVfj9o0+sQOJO!aJO~smG zWuFwl4T}PwDpP#4sk7J+b^K#J1$PZ!{BG5FELk9PX!3izZeY4-dDgS<_)9iJl!B+l zrEjlAm7MzkgY8**0FK^EPJ_4s;THP9^y`r`?>TG;VZi0=sdF@ure%4al zwuGI|6|WOf>eMF5TOy{rQL8<1p>+bvQXGef0k)eAMxzqU*7OWWB_OMQ0L~Yfif$9e zd|M-{Im}X!3}coqMzSKsK!E*VdRhqk$@U_13Dcy&Xr?Zxdw*T~6i`T>Z-Nv3xJkHC ze@u>|g1l)!w4&7(nO=wmYFlyleI7Xg7`R4h0IfNRLx9@~HWXz*m4NDGaE1zls(UP( zA@(8zWb0yW{=zi(ls4tNWWD%apFXsUR|DY7LAT`rjoEvo*8IDKgfTQu7ak15xeP+R zd-QU4{?W~A9h z9SLzLt_BWJTSnGOwBM}{ADvsKZs${_lxOz_mq-Qo22C%GrM-GsYr1)E5kT}^#jb%{ z3e==c<}S~gi{Fb^>)8oFzBE5mFk<#QQ#?HjyQEDiv-X`06d}Dmj)@p5 zer7u5%2btMiMPE%p8;a}8@KT?NcDPUJRz2$n=E11!fQ3%n>Btz?Dj#JedBPt z5PavxD(t>RHv~*XMj5rh{A8V}ztNq`)QLDfd^!0ux=eg>C@|W(Jbi}ul6*`KgJ_{y z;@Lc(XM&>4743@cu}XPXLe?)L*RM5vIQ5{mdIDu939{cz6$p&Im2}a9O{P!S^wJI` zL$efD?{xiLy4~UfR4kCi@A_uUQ_?UxsmUyo5iMvrbF9Ucn>C0GHO%bZ`a%!@03Epm zlwgymQbeGj^@ZHkYldVLr_+raPuDWRcIdXo0*@BC1zg`qf7=jI7qon1Rus*BYji{l z6@~a<4CDJKR3$wWS^W5&nxtI3O5T~}4FdJR2grPdb&7q)ZGH&h`agrH;^F+#I(mcq z4+tZag33>_hdpCc}RqUUc`Ivu=qY_%p&d2-{Uh+@pmgN9Mj;l~@+hKmX?v-A;)L9VIgqkR0 zt&V$8#_`l)$!rW=8lLPm;_wT;W_2KpHh2Pf8p&kae?s;*Qcb{ym9L4MS5CGJ%tJDU z-?TKi{8C_B0k?7dhuX$C+JF0!RRl6PKoI8huzYxrGMW9C{HqA=OjRpn^lwSchz{@@ zA4h}hzrJDuTV}7(sG=*zZUy1H-cciR5lM;_3hJd%0{!>2M(JNr9n&9kpxXS>egDoX zU)D`?0`T_l=~e@|?RQpjR@GhE^A{k2T8`h=3G7zP7fO5s>VD$oobAKnF7=b&XlNAa z1RNO*jZLrw8jm!2OoWY&>e!Ten$u+^(0DjGiQk;6nDI*kb<;%Vai-hw{Fgd-t&rhN zRuJI-Sb2yJScPH%#P>U^Rr1TVf4K3wt{qBj%+{7UZzfj>q?>-`wj1`0osw%j zoBiBeci7g?{Mg|)TkZM;T#y(pF+|yCt;}3(ZZ?_j-`$>OBo4O~POtZYcs<|0cT$dT zr)<)(T^}7jzgBG9MY}ScI-%sV52pA1iv?rC0jAvRKxm0`Pn;(a1Gh*_9{=z-Za(E$ zdC-IRu+~0t_MMMh9s1o;HSs8T=WV?9-fx$r>axuX?7ijna(~wU(t3TYZNq#0tU=OE zdePRVa$|Jpwi&W{MsW0u@w_Ag2;4)jNmv@{@>dGI8b!Wz$;37Iy)^0WOb0?iNF5}o%&i$3{V!NK2&C!@H0)|!xl!K5b{Mg7_} zvtuDUM;TiCd^X;kExX{i&+T;_A&8RcH;G&`x9Vu<6SdzAN}T_VaF>t+o#8#WYpvR$Wgn-~*%AJmSm#^KA*-G7{3G$dW9&cq|M%SpIDG#*X8!^Hul(m2YC=eeG!*RL zIO%`#;h|t&z`{Yl{1ZS8{pw%T|I0fStl-~roV+pbbVBxW_J1PtP_sS0##DI+2QW5+ z(*B|0b{EG$9rE(G7X3o=_br%$QuXA94l}x5@6NwDj&n=0HWs|r6d|*}{^p>jtLEo7 zANSptq5q9t1E40>&s`i_TBedAzE=M)6}JonCda51h=oqzZ+I2}d&`N{69U;eBK6#SC|m}zNGdw{eX3y4OZxBnms$`p9ub1P1z z9{u{~_kUgfO9K!MJo|pBSCG$VpdsJNz`THmdjSUx1N8zH1{w~4f`5gD!>)>sf%*Ck z8#b#7DWifK851)LhmbNkg`lF6{#!~3<9EVh28Jd^U)AkmEC2b_1_~MG3AkjA)5ys$ z-i`|?Tpi$UZne4aGEn2VKw##Ep#2P7kw1mT6Kh$;7I zQFMBquEU0c!X-UxMjM9qdm!OP*qszVq%<*BlLM|YkZMrTL*8<|OA-1`#+#GL$!7Mv;#zrZbP_D*3p>e}0ed>Gv9~ca6nsQtdQk6?|OrEnj1*0`WFS|sw;*YPd zu3l@WB&%D63FteY8}e!Ey|;;%oQpUrS&qL_K+~!GjC3`xKH_2qFY<8#YEjiuG1k2H@`ISSk`JkD@gQK&N zQH+h<{k03JH0_X-9pd*Krk|fb$0n|2V$vy%Bsq@A7`dFKDeZ;+%p2LMM631(-Sby)D`_W6ovEzT*>IFk^B&TsdjmOQ+I2={A0+Rw)nhpc38(? zyX&j_x<>IxLs3)v>yetw&92Jkbqx#4Uh@^TjhnF2)+(J)Gj&zfsTj-s)>5#sQ}xpL z>nJX67YkAg>n#g5+a!rjwO}Dk#0ffS(O6nk#@wdEzy?L=S9WmSnd@d$4H=7HgLjO5 zrG@8oR8&JE5RF?A4IEb7IupD$gVdAq-ucC8zJJ9@x0~EzD3RESE}CXRHce(GnMpMv zdxG`tS3p06y)X_5)JLM1+8-sAv%R$khv29?)e=8yY=tV=1rmWySR_AoOT&I!VkckZ zPjnNp1wxb?a!Q=f2mI{&Owz8W-ndvx9aR@5S&)7&PPC=o@Ig1-IBO}a?T53Wjjd*2 z$o=wa@N1#vh3RhQ2GSWd!!5m8XAECj_t2Y-ovJk<8(J5f#q?BPFWQ0pT~B>t09dX|;zAR(*+;nMam|kSU#B$J+wziiQtSOTS0mSRSZy5noUet zlij|ys3I$(y;cw+ViNHpN{kvNKROX49Jwy>LM2qOnmujEDCM?q=2$6v$P8KUjk4nC zWV~V|G$LWDBMT;U7Im0493ols}i1^ zM@<_sbys)$!8H2};1vbw#R4`sOc>qaJQO^2&ar48s;UbpDYNTLd;ZT0>u~ zus^>X9X5~7xvzmeuaiV_20~(<2EjgO_^s(mT z!;nCIihXf2j~SRcmpi9|rf3ZP#%NOBntY6|=HwKzgHs@|U2|*5{d+Lu;OM9e@2V|W zKziu%tS{e}t&Tdzj(z(+oBh3XSzXT?+pIV-N17=fUy5-5pqYda*L3?wg>>u|`xWZ1 z=RzZncjMp-+)ezbxFM|NAG6-Z&PJrf>JjGL(uAM;LB}W!U<8KnRHxGFVxt_W93-^> zROA-YUeu|dePXY(_|K;}4y~qg@v9p*Gb-ya_C~Zqag@~PJ@Sv35As(Vd8tl|!zHon za1=;#`8qUljxI%V3`gHo`Ph3bnO%Dkhh@s$P?g45`e*LE8mm1+ z-G-t`A>;V-Ie}aXEXH_C6Sbs-EcB~aPXOfe%!v()oEiL#5=z-K;&fpGlQlF-vX5(J zWkZrmG+f_ES|+~NWO;ZLJ}zwcMcjjjIkk`iP3f|)-L*%J=~EYE1ZpZY+>8j#1>P*W zwucULUEH+hj#`3vHaucg0GS{sH^wQl@cQr4nGL*5zfGx-ATHY*;1N&=+;@+Ru`}e_iZu@LZ&fn{{e%4%^4HOY-)?3J5 zx8SYAsQ-T3L`CSe(Q?&#<25BJ>RihB*6ec@k-_#?J8Qj}7%F8rWt*1?$Jls9yBxla z*H+_B+3u-{Wfo#8-%zxU=MLJ+ zney?Ze(X-EvJpxJ@}07ACbS0gIDhW4gIJbopII&ghC@U=l{n)pcg`%unlcU8e6#w< z>IWkx295UgzE%L!aOEn^U1Ao|&1n2_NeH?G26_-NE>sR~VB1lyb)cOeX)S`1Q_kW( zj2@ASB8rt-ub7ggZGvJhm+7wV^nK-VFXv*bbfBgolz8RYPtF3l^85pla}I$hq9xsnhpWl-4>;SEgC;sA+*gsXV{N#9;f?oOUU#HBFZa+VuAQ!c6kRHbL8d z2FI#*s@F^^waQ681&a~iqwBLL`$N$VhA`qOd~)UIlN`O1t-KD;$5#6Kbkj$BbB+`6 z8lt2R(8T@1%+0x`dPN8w#fZ=)2LVFh?!UL&k1KBG>Y7b%6W>jMJ436yV=LX6{4d1r@#BE2R+#j~HoJTk^ z&rcy?;!-cGVw)<4JxP3(_8>L_Vt8mj&ubnM3L&h#tjZEO1caDRv+TA6XReDVzIh;^Kn?2&|k;Mi!uMW z8@AIuMx8Qid<~yxaDDH>fNH?Js0y93A3-5B8RTweQ3aZnOr%e1qxyuRDjBwu;`zhx zn<%b%eoR>U`)|eFGqZZZvfT7?n%j$ZyP52D`R!Bqt|G;n`SYB)$>NF$yf6A?Un|P8 zD$BA?x@dAOBdcI3GKs!NvCInfzv!DuA^W7;@@AwqX29xusCJ%$tj{6R^_J?VsjJ^i zSx!b02M-v`L(CtB=vtVFRj&-WdF*>EO^FE$2h%J@teoWGqbZG5(P8-+>|9gTV+6jw zQ<)Se5Q{aOlyGl;`IWkcfE(PF;9?GP=}7h=0*#MVM^vd?ZqF}5I{3n#%oDKU0{U6p zTcW1PP;xX>Z`Qul;-eS5n%q0~OcZu@IE3e&MgH!aaXG$4afFIYe5|U@6ELxIj<((l zZ{@br=Y9a17GvEI?vFy=0-Jo*9UW|89xXK$!!u+6nVOpD#4E}_KBO8yoKc$YKLKuf z9b_fgPrw|k<`cmE8SHWC-MW>ZJ$q%y++QHDSb#N>sPd5biKcom<9rNOHrd;)g|S(x zrdS5-J+1f{Df$8IWSSkCrTHn$VaV9ghqp>c+4!y|KSNa9Z8m{LLQZoa*=g?9{Ua+Z zqcWobA0x7ENvpoBg4XBX#ZeTx<`&@`A%%G!Cj07+NGfZynV4Qn&CQ+8aY)>qt))te ztad|0n(D;w36g1b_)I579k2}b#FtG{sD%*eIhGHt@&#Xoy!mcytfPlTTUbOE1;do( zrkv!Ynr))Gzdl_vY=8r!U^6fIjrN_LglP3#gQmKMNt{A=WOhEd4W)Kl6Zcz$nuv79 zrVzWEagxvB5O@pA;BC_ciktG)>?&DC%@{Af%Grl?*DNAfMH$WO?^(qTP__QCqRbyM zVx%eUqgL;$RkQ5Yynj}YR!payT}dQz=5$Ps3Dgj?Yi?heCh-Vl@$e60ihFf{;jyq1 zH?FRWvR4f*_VId`)b*VkownD>McY@Q-v|cNP`_1-@DmF2V}|FZx!AuA6Dl5~s91@% zDa`CSl@w*(kK;^;>gPl_hd$uNl9he;EeUG67WStiqYmF&>Mi(#l?w160*)O#J0#%r4Ig4CCXj*gC}Q(}#?58|??BMLz-wlb&wunr&Y!t5=x zzstHs3(DYbtGR9S#;q%-Av_MsEtm|J*7Vd~P0oja$5pd5y^B~t+#~{00!L1k89PJ# z%V}e>#0Ic>w;<2`5?QdtLm6#743%3}7O&CN-IDe_f-s{i*zxOzuk*xmw!kk-!DP8_ zLooh^D4fTZr@?uO8CsPnt~=i-ezr}Q+ut%z7qwd0$xJaXOwATfYN$l3CGCGis zRQMCY{Z67CQ0m7XC?a>f^P<*inIw|Hx`oj7a? zIc5X3a`e@Bo@twZbpQ4RwufDFGP8Okxp1~-sHVe9mVVwN&;~)xt-o+nlxWrl36@I{ z_F&MaDz|-N(Xt*;T5`}XlRuwgY(A`7ixEjJBSR6O`uzSbM=MjSL-WyOwKv!TI+IaM zlFq4Ssze)8DQ*L5GcYQVEd^}DR8YcQ42h=Edjj4RAb>psb+WA~b9o_4yIvJVI&TEl z-AtC;)0*32o?P;L4Dzv_!Df~UhC%No<4oq=EX{|M!7#J2_h(N0$^^gz=$EmsPUhHj znpyFn<=F$>@1V2-gXCcO`Q)_x?D3Mekg)i+d3rr-$L(xmw@(%BB$KYnYz011S7|K6 zwvl+gmAHhpqwMd=`B~avlP*Hn{@?sz~br=_dB$;hw! z&Spo9GE=IK2g|-Ni0wWGx=ZB?OPks!FH*SYj)vN~pQRsu2G!QjXU`{FGiS~eYGVcq zCHlqiQP;s2yk|j=Ft6S4^-?<9n3RP+;22)KX+R51qY&>_WmERbe2JYKwlhXomD@ca ztR_00yOJsDWgSibJB9h7zVHvWR~x0j;HLE?JIcC!l~^Mgr-&RR9v|bUX%7FvQBTP zqQBnFc7b*ITpV`xpfbBCKc6LV8SBMCQ4c%X!6-Y#Z63Bd70xZSDXSeAB*tB4H@+1T zP$gDfs(9p1Llss?U0+~tL8n!}+#6?W?V%EO4@RsL--&j9)fyXAU50z4dO1#^BG-3Y zW$vSGrTRhgn9QLBN+KqXHlaC21Ej^3v(ftffloqo`ph>_i6cF07CG_iE+!(ni{?(v zA>3#UyK6_fGLcc*S`dlpzDmt2|3PQNLix)Hlyx)qf$T?#<1#*OSH4bkRNs88n@J1R zGdLMjOSHsrWqQ*FDDy1^wDhmsG$BHtAchLc6mYb-oy&oX3#y+|3MIQtHBR<^s3U1= znqks_Pet8(nZ$l>D-zvHZCq81kQ;~PFGuJtttl|0OIP?lWtd}Q4Evi!T?G$y&#soj zlx_C8+};xqkVc~bYpEgsY9$9R<#sSz*(x|0dEh_+C%wQ zC*U}Uw9hpjfpR}BsHmlDS6r9)+*tl_dOq@GR$w>sc=rCZElq#m0}D3`8E;7soraLO ziWp>SrEHRDJfG!yJ}fZCYXc{+8xbi$)3Q3|R(S;S*;$eC(euEnGI*zx=v_E!fHN<# z7;Zp#5vwe+cl_b;JniMnT+ZDf>f9Ua!HfzThjW^~?dkA(X(@noH zHrGoWzEbpetL+L(M+aGr`X-a0Xk|h%{2RH+AMz0h`I^xnOhpfh(mEtG@bLu)%8`v0 zUoA6BZ8_V*s^AkqXi?Yx<&ksN!VBi!u6*VW{$^9B>;mnyuSIK*WHZva;g>)^{*ldB zpQ!&O{Rw;Z0?UYuQr{X4G$%WGpT^rC8Xe)<97lClsw$Lt8GWba(|1Vz5HxSUH3&n! zuZ>K7E$Vv=$aeo13!p5pa_I zECjt*{0CO2xfSUd4s1c0T>kECDBp^X*u^X2yC``Ap8#!}NFvXrb9{Qv4P=6Q?A$sw zwn8(D?UR=`&xLl8gr4&ON68tMeS!-|jUz?|Y>ft3YKfD!LiX>5JV^M6BZZst>J_9p z>$Zi?hUm@)cqi29R=vLCs2mUpMx;IgEdt0h?dmX=hGXoSyRR{&lVdsd2K)cdTzXZe z#WW|?Apcn4G*IJ=6P>Y1RRWnTqps*`E-k)eW3QTD%vjL3flG7lbNr7eFQ5@a$bV*D z7;lOtG6wpqu^_RM-mww6MI2!Z`=f}WZ~bqiy=7D!P1~r8J0Z9yxDL*sfdmh3!6oPf zceexx?(Xg`gG+)AIuP6woZucT@SNs(zi;oe*E#FxpI&Ra?~KFHP=Y^x` z?_42#*dRcW9yB|*%h#mxg^Z2Pk;ZW@|!*HF1 zGE|TjATh2>D5-n+&qMQ#5CX-6C`<&g9^sy=AMb;;3Rp}rh*7g(3u4p|aUsI>@6m6X ze!iy6xpMca2cvarqlDn^vm@J<*Q4{e$bLt67U3^e}krr5)2s@>kr6O zhh)yuq*s#gv>2Wiwj`$h)UC{;vUi`F+wA>b*?GXN5tJ`wU@H;az=k?1T`esF(}M@0 zMm7|^w*2wLZpo^=6IyW&Tkxkg24UY#20n|%s%RiPdWjFs1f}Kp`{I0F%DY$<-{-{~FbosBK{+%K2m%nGe?xgCf z?*imCHE~hS&HfzFZ$*)|s=cPt?e6$;yE%9~i#|lc<&ZBi!^OLw<`%+? zR3<}~1nqk%TRvivN!S#(m}iC{d6BOcSu?39{nE6=7Pazcgb!yW`juMOCz`y#jAYZN zc^xUhWzeS>!!d{hEfoo-Up(2CcaX!Wjf-aUDl&5)mw3Y{g^xT&b!KN-Iv*v)^d@D_ zLb6-9nTQg9qpoWFh@VzBid(i<0@*2ZbbosTwKjDr(kdBq-E52Uy^E8>O%{vpa=*2> zgX}8b44;k;ov4G5g^Qo5<{&$1t!b)tjEZ!06@l9~G~k34p@sq|3r^8o z*EPaOi?4E~N|~pD8`IzTlj%CfxO|`p76$3p7S$hwUj_PXZ7^vxnA9x(`oAvey#v$t zS=!*0Q2zcLwpR+7JZ8`l4D1d*ui5}7U7@U-t`lH{Qn1myrucPS{zrcm?r=Oyt!R
    73-GoxX<&P$=f%t*7Wuuuew>bQ?kHysXBH#C$=pIX>^@Sg?__h4lS2R-I+SW z`+r(*w7ry1v04sV8Nf@0#!m|lEv~%BnFhn*Py1n$lu^FFrndC&ig80eqr8K8O}+MX zzxj9Iwa5OtI{e&Hv}EfF?rha)-?*t$5oSb#|6kO@_kYx5b-283DmSzJfRYo-=(TY$ zg=blbzBEN|Lhu9xE;DmcyA&j59slkVYHePvRFn}l$?`kGDKTk#&mt8JD^s?`nsI&~ zkyS_m(NyFlbK}00%xLuNwW`|Uddb}NpC|rP)v+c^{#_9VY?g%iiO;To2R3=~#K4|M7a+ zKaDs2NpkH;>gD>}eW=_}6ql7gmDvA+ra{1#a<`X9x1R44j?w*Pb@R}`iI?*Cu7#W0 zC+~UvExA`MJi%I_)WTARf-*gtr5Ya=ZyOwp@CLjZ74&nsZmDIgY}U~tkUq-fimRaD}NyJuao05`lu;?4KreW0T>R^}pUBo%^0EsJM~>D0-#D=7 za&u&+=sf~mE7O^WZBXWkW) zxy)<>Rp}81ffdS9^~jRav8xbru2ZY+>wG~Hg3UR7`9N{1A$D^Mjvfn>uXXIKHAMDf z?Sz(C7Y)@BOYe_JoYDDv9d=(!LO?RRY)vx=e!@M*;O{H=N6qb-YbzCoXAcXde-nct zZGMT=sb%~tA43m6vIo|`&c$=6zJ%f#C%Y(gXD*Dfx3J%UR~+P=Rx0qE-)O<*D2=Z| zgk~Nk6Hl^#C}Z&`dsdfX{OBdAB5BY(WF6$gfnW^|8>P_?+Yi86GuigaV&X&;s&y#> z8P`|?$%xD{ez)fi+L!PQG-!z%I(SbLo2ygQWFrryi`wiqq&b8&zl$>)p7PWRf&z|5Q~p2eUq(zC;yxcR082V22H7ZIOr3~iw!uU}nvrgS!7Eh>@-n-IDN{N8K20sGrj5xv9K`Cm|6{T{$fN66D$E|3c_Vb=k*;+T z<#CWl-@a6s@p+RZAV=%?mbL{>c=IRA9h#wilypla4kws=IIk&Hiid5n!e7 z&fa4rR!^%^rAwsbKqZ*TAvQM@mwz#)nQ33w?_QVEvxql|-hAAcx)m9o6(S+DA(k{# zHiUT;nru(C15ErW{MSL@i<`Rg$X2JH@HURwq;#OZGdGjWrfz`w>*TShu=h z|4HG#hl{CQBJ&hoTD;Itw?U7n$6niAT_*)jlIZBtx_Tu$u^J{X*yEdO!}uw+Z-8oJ z_hp+ojRVn=n1CN`^R$6FT=L7sYunBg#&#;+c5&{;&6J1*9wY3vx|6x}SDhgwa!Pp^ z_HL=Ou4?(9&=r3<^Em`9V=b9iZ>VwaE6zdMPwQug!+2qDI9}W*@|eq5PP4k}T@Nw@ zgb`KOm}Ur%;x4WK_MB6WdL_!4fHg5$%Szj3GEIHvaFZFnq<^+Aa7XF{m#*=kcv&rz zPH&PQ=%s3LwNxLusBAw7VnZ4B%j++uTz@ZFVSXdU0S}^D031X2R;_v`R0T2CD!)W$t@QUOg8on8sBc>h zuK5(F6kCyX*izpr?U3KQg>tZW;nBu8{P)1?Zu+sB`|1WMhz$}iaAnXKc{ zPFris8>$`E4>5MT;7)#MLgM{@4MPe6IR4H2Q;yztswltQ&sCGZmzS_j06+tZ#OYMDFb^b zEzMlF*)Y9<5I;3>ggd1zOXy7QR!r)oif^odftc`#(30KNUfciu?WltXx-%xB%~%N{ z(~s@7E3Wi~Te3bjyXAgu%&zW4x--Y(Oj9&7i{yxCol`A?{QM<$X?Oe`yt5vr@l%<9 zzYaN8sa;)VwS9>*v_sD5Rm>|d7;KDth%`>2#JII1voH1-m&mdLU}xTkXSW+=}cw!fTf`<6_F$clI|TJZ|hIy z$WFJTJK|unaxXH=jF35%_zwd6zHQ<0wuxaBdCfTGXSvLvb>vK*kLev@WtK)Ep(D;k zvBvuBuGc8KYy+VNbfV&XB}1s34uw$^zNV^H-kU1C1i03P6AW)ENcarW1y17l5r@&6 zExwN)kmRIzP|$-*dlCJJ+wX%w1|vnSNeH>*#dO)WjxB{tM2&GE*Iq_vr85057wQ_v z&JX=D9+RrRvEUUku@j-We_LMb-wx4!?hx?59rAy?y#n+Tm%(^d*MVi_n?Yof|ExPK zP*rO=&)qJqnH&zA$irBnngG<$M`k{kn zJ@9M^&z0QTNeaJV*nDDAT-p%3<)EuL&kVVxl_goriB6%I4kIpZokIbXN2d5k4<7`b%u5SB$7lT3Pfu?V3nc+>amM96tDBa&Bl!7KTcu%8z(TMH>xvJorQCR8Ywfs@O0!Ilwtzaj-*t>Y=!3 z4Y{sP5*F1WXtUJPsZ$_uPcXi$bMuY7dc^QAnwO14_4+cj&Z!ogoip}JJ&pPRv| z2}X8P#unzS2Eu`m$$MI;jBrTWD*ugukm4zu$H5Q-FR&lIUBG!9_)B-w^i=fZHBNK0 z-8Nrqg>3q{_a)y_VZh0jL-R8M4J9PlGw;41gpQ~FgYbY`@@QSk`wq`u$&zzcRH7pO z3!#q4Rsb@sU;@pWL5f?}|zvjXhUdxLs3aXZ$UkSBjUI(UjV(khyKyWfQsl^IrNjFg8@Sbp%Y{BfsPT)IF2GSqDFnbW08#}1+kNsSm<98)=FDj9XQd63sDOHB->d&B5IooiUZ(YBE=a)lYo zrrQmv)}gpjd}!W5MqOoX6fta*v14r6Ol8>E79iV)7tXfFaeVx%0DsezltqY;>M4J< z`oS9QhR=6I8K=gpDbFva&9=0quExTX16`_xiHvOU5D?3==#9y4H^VisFiVh4R7kjZyEcY7Y?fV zO1EX^?wJjI;H^MS`KX|j)ej9ma9u^GvbM15ADgMkC9mP- z>8)uBl#VQ(V^}DY92yPK2f=x-P^<7r;q}Cbg1@pGH+x>l0M1vwiM)hFUP|GJ-z?}F zoY0sX_@B!Te-9iunlgr1h6P-#JZnPB?o1~;oBQHC^jfuqsbO%UT_#@uok;AaCu9`& z=O-+Upo-r3uh*zAVP-#XuF}V@{U>T&tbO_AR)3n*Wipct@#J5tKI}W&UV=MgKJ7NN zfS<6-mI|IW9|)csV8*ww8M_AqxwDEf?Y5=6+KtGeGx9BncoWQj-ME2?XY1#P)hS+RcT3dt2SS<6j_(716j-PLIh@esBhJSbgWVhrfoj|G=T( zMdxWoR#E80CCey=^q1#+(LhT#7C9Zhxq_?QNWLp>5`%8r(5a$d(7?j)v^;Xt%0^PHBrNe~n6+sPUEuV^3+@Qik@LEX1rz zInYEoecQ40Q7f0jrVaXuLC2GQMnC5b9JjPed&>*lS0qB7VXQxJUJhSiCmVs` z`L!NIJDc+IbQfp;A-LT+a&{3DE*O~jv0By4DQIS>z*dluvQzCy@nXlNH#*zoW-Oq5 zjD(V&a#=o3{3BI&Os!m5BY#F_&aWm1X35nE=)dBs`mw)J&9a1gPt&NiQb7>LmS)0N zZn-=L6svWrK+HYA-~jZ5RLXF~H#({|$JA@lGY~}e+ybo!)%DZc&Z)^kTkyRk>Wg2M zJTSlnX=&ff-5&WGQrB&dJf;1@Z43QdwmU@E)kSzi4fldeMEA6eS}&E~a)uA;fEY5a z2AJy)c_EhR72$nK@3IQ&luToU^v$wlyvq8sQS3T6_9VaCN=GS9VjM3(T0Ww%7~=!)Wa1@j;#{0;zOU>XnH;yB_cPj9<<6 zH!_B-7ctfu=6VE8IeqiNVLXT@somgE5OFC>ljavcV?p-D?lwf^nunWl3zln_dvyD>ED6sYVc?Lj?Tw&L0-f4vBeQ?rE)4p6-L)m z5>zw>*wj>%=F-XAY!o*UFYIR=|*&LxBGu1hw5xc^>BtX`W6n(mx2P zQ}Hx?SiDMGM!nNBo9yd0N0oJ0!CCFF&ftZbJipMPZFDUb1TS zcsPXYC?#Y^z%Y)kl^}al*Oo3vHjxzBpWqBPoW;%wsIgMETJ5>t8d)>5-+*(vpMFM-R{n zRIL&z&XB?rZ*cgtvShH!J$lNuI}&LgFJ<^MC^hU!m-83zK(d#;!?JVnJI$A-d2^DcTU;|T#n zF%EJ5`^U_a%c68DbMoJvoD>{xUt211Myj4=m1D%eTXz0k-$f@YZI~~0%T{LBLVgB5 zlip$MrRo<6Ek+B(Bps5JpU^B8U}M20rLeqT8b((cn5Hn+YH&pkQ-Z?IyE}GAokd=^ zZd=B{X&^ejd}g^Ux*oxlAs4(#%^^d5fr+|$izCz;l8E=fn|u&Q_hzZE>xajh{mq;K?Gt(uK$$7A)DL5RY&SA1 z?y3o^M^h^1mbp|9{f4YJT3POz3E6;s&cr(O=fSgmP8+b=&HuZmH3Q&I-qaW3`PL;#^;0@y1#FauD!r*sppUPUY zf`HErcz@7X6K&EW$K0qGO*96;akwz0cNQ~^*v_HOm=*5Ze5dsoc-W?8?iP6#yC7#j zB!<~HtYp~5_>0kTeCucGuD;AS{SZzqZn0romASjmnY?_gGOOh%!~W-O zPv8VW(8r)R?Rn^4M)N+t)&iWGaIF;S>X}1jiG)`KnO1CfxAbEn=V@W9{~#oM|J$as zE$Ee1*R!_5DUR2qz^!Sx39aOzg5 z-b|MKhx%DXF+Eo#b02F)<9vM;oCq&mm+Cr2Tp8#*DK}&57@gJuYU%dy!j~DgQD57l zcHUrSnrRgwJ##mobGGIgPPnpZZ{`Vr@JwCwt%NJ_??t}slf=Y1Bx}!cL$(iOhKNBx zV^XL@Cl|k*P-m9^2Z8?cU4w-m<#zb(+rEp_HZg#MbJn(O&dU7p&Td?N-*n&`cuQp^ z{U#J=(_J&G24|3CV&oJDDJ4D0eZDjsxqUmVd=?h66FZY7mXMUK5$V@gg*N-%5{Hz1 zl=n0)@a_5&&spc4Wx-l|zymRt+MqJRwoq}|hGpE;L7$?#OqZBB%Zord-`=&8fUA>J zO4ShG-Pd8FS-;L69d3#r__GqfEI<6BHg#+ZyrEb*9c{6eV_;uC{7GpFC=FpPu2}HA zD?v!aCAMI`vWR$U0*Wb1UX{6A3&Uicf>ykp9MGqEL$6@SLX#_dA%PV_}{L+wx4&f z4})a8Xy7(K&FMDXRWARPQme+Sy$=Kt(veMIt{N$eb65bo4-|FjYy9RvF%94b7zpx0 zvV6+JiI>1b8R5d~)be1KT0Hk8o&H|}P8pmd*j$j3X^Q7RhpST>S-ez^=3^1|%i;CDak zVwwn0BEN{h08=j+JRJ;7(B9x>HM@7ujGogwN4RNAI>629>=g~GWP;`C^63+2 z>y^UfW138p#}zYd-}z_1_ET&FY7a9X1Xs%E8eJv4 zQwcxu(uC|ArlFBKCr$%R?r7wX=znRKvowvu-&FC_-82p_ud-!fb)`_|0vLJ3phLXZ zHJ+>yLTvu)-c4rSs=7x6e{{VBsF%qhk&9iMJxfe#wv)ofK}18@ABL6yITh1n0*5i+ z&|i#>337m~$#O9bskH=NmPcTH`5?vNI#mBi^eN-3!+gFsrR{q%kOwl8U4JEB|8>R; zkCJwqCKH*_(LzOi&jNjRQ*xNGL(N|Iin{>WRGB@7)1V%j(mP*p%8Ufw|CEGfH}cdJ zHbHkMTm%dk2pfO#*J{hmd=(_DU**gsf!ws%Akz~qb4(!Ft+V^K97OXJsbnTjWlk$) zCvb{JEIBNVu=DXw{A|k!J+_$Q63tZuwAt z>lt2hK7Dy;JoTV+C(jD21q7no>cDI!ssmDjPrg;|cj^H5lS3>%*+ZQ=a4#y%EFfO< zaxrK&%_cAv?yQa052g_ga#Zvo3)xlPr=e@=q5SqUk}+0oMa&F3t&~uM`dn?uj#h(H zC-HsemW8G6yqvO}Q@iev3b(;potAkNYC_C0;8DpHt5c*$4J9AlG0))M0P3b53nGyf zY|_m2YoGWCJZh$%z78e_DB5F9J@!R=_F1~ltLN6txF!Kw6EvRDxT%;P*lo`#7*-Ez zB+uAfT0kyvaLa+o%&F9OS|k6Me`mK9_Ko6Nmdx5kaN|A4-?a}7KaYKisH|o}Wwlq4 z#?lj(4CBbCNRdEl!45BC0XgG;tXYk z?Bv4V>6-h>v?0iBrL%ASpjeG0NT{%+QFj~(#?YsjrkMR~eMPZv#{=Tcz6P>S zud-lqC1Bch1Xz8em5Ofln~vCCd)8Hp(9}Q-su?_kcAPs6x9v<2R0t&gWR9ywGqHAlQmi{hG zcz5-=Y^zboyVKxt0`+|HWuLA-BwD?n;0y;*x;=R$VwyA=JSR~v*pS~?FY=EL!bBE1unoFzYI4MsrNU>I+1+}f|; zu@j5kc~6+WUTyaseU@5=X@*}WDk#Y1cFDzUHoY}_(5V!0fO{TrDttF9X)hI`+eL=K z9Fs<>tQh4#;ECi!B|{WEbOQp{)aB57RDOIfFb^Q-Z&bK;2!NcStKv_W2!3a}^S^x6 zQ{hb8`C>jL!*L!~WqkDyVd5tXSpIVm04f1R(KlJuTiGU8wJiRbgdg|rd0FU7rCsr1 zCzX%Y=hm0X&C%a+J8Y)yuVkM}vzHC#em|)ot`W^7v2Tg;jbfjNs(&K&mHM9TvQDRo zk%`osu8V)JJ#+vy#hr-VDYOA_#=v-W#J&xnxC~r?;$kZ}S!#Ss9uNG`u)XxmuJr3; z7v=3|z?fh1l`;;9i}Od`f%8n+n<)T-q`{bZqCK8ST#hBDqI}=(tUq+nxGo73vQi?j z!Jstfc-xef!MevAw@D_=P?^7b zrSqWZVU1=i>n|glY-g4G!w*}Lo(uYARGQy9=7ctLuIDR;o`qHZ9}4&7n74mT%+7#O zdU0em%gl1cb{Jly+2YDJk&p{9f`QF~R=IXol9@5G_?>am^{YvJi*=%fCtB@%{n(fY zFPzSuH!b*v$^DA)EL>Ee1|LqZAPm`1+M(ERlf{6dr9;JR0{zbetly7yWMN%sMi-|Y z)hfUBjD_X2ytN9x|IVZAmE0kEt+d&b7uoXmLmI{j!wnU2Vt$80^y7Qze3}qL^V6F0 zqfI9-H)-Q(+P<%GtfT8q3DGQUx=z7!_U#Kz8IFGtaNET0ej?nhH`IK)%CmUgvn60v z#-G<}(0wiofA;h;_MPzq-7wzH{|eBADtOa=tov|B4ucc!0-gpY9hL}I96t^xBEI*o z)G4+aVEq7s#NN{t3i03ui(gUV+%+~hxMFdszvY(Y$TgQZ$h(=|T-Y`6rI>`9+gbl5 zQHM&4`~P8j+)i&6E?j{b$L&OZH}?A1$x!$ymRqUv85oTj;I(}k^c5?QXDbQV;N|Hi77`tme5h`sb z8c^eE=xDjySr;BzntZ6z2ezz_)F#+YDi-5xp;yo;ZQfTOqV$8~wNK(+b@wnst*FiK z^{lEWCYYLQkS?@$;`-q1|2qRfEhr!bUk=)~be1?l$^!C<>Rn)*; z35(#fm70fOPLjLvo@&N`ZIT`WQG$yky=-2hX2noW^?{!?A_=!M9fT}`cXe4KPz#oq34G6ja=Q(4|n^&ZjHFM zKNxo_Ky0d%mU>dlng5e%7*149$3;|nVAflg-h`6Gfv%mV*MbB)irGq7lMba&uvV!a zg0kABqil}!U#tF5nejDenelgUPEsW@5G* zhC(}Y#^?bomyp%i4yiZR2aPk!3PB=oHn_+)H(GMT_mf@N1-cA8#mtZ?AZ>vinJ zVgTk&6mJnUakAwnzDwyzHLr+!1fj+&lyQ?X2Ib+sw@^mV+5EC=ZpxP|TE2?ZZP#hN zE!o5-B-x=%s=m12vpaHv&6t6H!U^Kr?$hL3vI$URfg{`(rY3*o)K2%0u>xNr6fjJH zS09B*Xuhr24kw<+lRLwz^2%soxi++drVtJbD&&kq|7v?_JFCbAu8WA~jHaW0>de8V zuyUUunIaaNFGLLFu+F?Px(@kz;O zHwQ=*&ub6x<3Mb6c3&S3eOk526BE5!`O{gIp2t~kxIOYBwh$?k(G?Vng)5SBzJtLo zNg_V^*f^-JZfuk?MC~om$l+;pG@-^7<@)=Q_-fv$8*DFE8@W{_!d_a*{tyEUZv*{9 zDX&Fhioq&tw4d`NVL~V*RcUu0jq?v^wF?TX4T%U-V4Z0|eY#rH78)6lGSxSIp!`} zOEv%dwD&yv3So`IovM>;mzBh}#9y-C_Zsp`#vFXVx>by}*5!)j$r+(`8Zp$E$sqtf zeVd?e&$l}|*gF2+IqhI zoq68T=d|(Y98Qwt@vrEPU0dGzcDjQHiPVIV_~QF9`x3Q^9c_pDYSW~Kt2MoV z2Ws*92%ea;)n+GK!rT$d!tW#`Q`(R+hGCFe3w2|wK`l}dMj}yB6O&t(ElzIVSzOm=waBA zBFuWo98N#U$TWxME*)d1ZxPDru^q9?^CZ7&f8&%^zZIenf@Oh`V+MVhE0n+L@Y*Jl zGpiXfOa~M%V=FyVh(ncHjU3ZJ7m3kk&I(4Yh-t~_J%es?h z%lg<;z$3V>6`YCI{sOV5!8gT3!?4*b1wo+W@cN~dbM=2ayeSwNy&WIIwlj_hHx2hH z3c~muN*#8fmD@DZ%a|J=67#OvD2HO_$ak=-`NwL8R8qIb1tgj@j768W|Wn(+N&+&DPN8V^of?DdG*q)>22z3kST@y8%bZ=>R}kJ4i|U zgq8v*YjZ+@U)Q26Pw0wJ>AQs7HM?D6-?zU#o|z^$M{}>t*R8@siRc7VT|x#PS;;^s3an!pr07cA6w|mAp|n_PE;9Ejlr) z_2q%r2)h05QtzhtI9%TCsr8+DA~^*NSG!%0ny4)!K@B!4*n9tsKDm*3w1W!m_1@@? zR_9TxNWM(w@5RSzf5z^;SI7jM_HG$&s@MhtO&30G+wG7`$Tmz#6$@363gFS$!(Z{( z0;(u0P`!_?RlyvEvSLFfKkg1se;~|mYCE+FLREOEC`#VN<7$t zVH8{NTFah+WS?CV9UDP-uR~Wl8EJBE zR>ooQYjC0m1$I$finZ3F1FP)2U7bivv zm&P)k9x2KuRsJli@JcvW*y@}#P68(+RJx8<`cnj5U0@T8NTs^-w$zmT>AOcni<{2% z0**_<%;Z@&-jQ-7^x#De?SFymMoi=_70M0zVvUz>XfpK93yF`wNhK%FfA3endy6x5f@2CNyx9CJ=2G zZwxy%$Q|yBFwxS~$1}imDXF9%g*CKU64q-=TKovG2COHs|3MytP@xWXWw*=ge_#m zkUI2KW_^*@nBYS{(IXcwY2a)>DvJr}Q1_BcTKg;esBNAl-*-k23>PjRb~~#Bv`rzi zAN8MJDi$sV@cn83P`>XXXKTpRf`l=f^C}=KbiYydlnA(4C%okh2EFa~w(1z{S zB0`k&Y7NjMMah=~dL-huoGXvEsV0tKKou0ILh~&Kn1BLeSi8;mS!!T>JMs@Um6@!S z9njHCw?2xT=C)JG-^Xbb$rVYyhJmT-AE5gjEw`R?0@-FdfiXtXJ8jr|deX(cMHPk| z5e-Uc>FN!Y6)d(k1X}zaY5(allI9*lKDh*m>p2?(yzsVU4>l1t_XupWRZc4!zEq~? z>TeF}>qn`JTyC8yF8-WvlQc7#E*I}OX1Bb$p>P;b7>+pN-%y>n#|ZwHN;UfoD4-00O0-3Ao=8dwG+QEbp; z{{*j5i=lDIU9AoeI4Tk2=G~%q46kaJMpn4|Vo1_6eEEZCxyB&fm(#P!Abi4^fjB9R zqBbvIB1UwUpmqt47;oP5spwx|VYLWSUF6ClEEYhJLJXkn974^w*T zWWCx(pyV{#aT$Z2k{xPh$1*cUX{(_^tB%tg#q8ME<|kMpEXbxZmi=8F}8Fl*5^?Af~c#LfOqn>W!!0a^*b_^Zlh12P)Wz~ zykn+Pu!l2b4>vx=6#83u9JQRt+!`J9roVe zN^WjJEzBQ|L*#cRx!;$!8$xx!IpZ0OVlKyzWHa9X?30IZy~@!bLPB65&oR6k!HplL zC??mGoWYICT{RAx^_6e0|3Z(|NDp~#FV(oW!SA2=zkRfkLoI;NzpI>Nt?7*M2hM@$ zP2y7D3$NFU5wJn0xb0-@q}-J?X2fpU|4di7JfvYy3{k-Zu}`>*MA?XlF!sLI$^7hI z)Z!8>ub zGwA_KOn6^T3w7x=erwC7Yu8+@mlsKF%uTvFfkQ0zQd31nG|lwG$4i3d@OwTMKZ-&# zGK6A$hW;Wt3cIB;2Eg4z$oo|JtZp^!@n3^m zrq=L*$8*%4>dC>M17HzXVuIgkipu|_kw@Be(RVzobA#SOjD3P<{KJnML7IFX5^Xq7aM@b*G%FY4!g_^Wx$El-Siz*!a7dR=Vplv_^T1yD*2|xS%6@MrcgvI7NP--jVpg9dJRlldq_JK(N zqJGb!t*~5nz%EkHgn!HE5AU~Gr4zBFR=`Um*{O0$JU1dL7YF@2a{fWUeqJwTsMuV4lh=8>W0>>-R6&gMwUz z=G@$_$RZ!YL@7F9`IbZfy-M<)^eGt*)2{RUX=o#EC}cD2q?tZsk61=WmO13ZR=Y;1 zQcw#rq#^_iDpkML%;x=-N28gEdev5p59tFlGucYU-Xt1Xx=6hXvucN!`cE{nRx73o zcx!$Pe8S^uJsLZnnAFlxG-X@9sLbf52rGx^TZT7VnJayysI4oI3aTrAb`oMiQ~3!t z0v7t`W%l&4ul@xh+`9Ut+(e6=6jV&}fc;sMnm^BNC4F+O5pmp6r|Va-xs|8=lmHesy8ZG4#*nLeE)>?Enm$Nr?V!sF5Hsx|hvWem18Ixj8t5qW&< zzY{ca-oN;Iwm~&dF@HLd&`HXkk8Q4#^>2d1zpWehE2jzq)^8aD_ve54X*P1LdmHfU zWTbEGbqn;>xf;dT0G>qrWeIb~fIh{kY)#vLZp9wktvti+F?syYn!N*QdJJkU)jtS5 zPv4x(n-zi4GvGwI?H%m$e*{;V@Od5QhhA-QwUFC+84HwWcf}wkfFYZ*9Dfj z7!eBBgpwa5qVwQZVXEvT41c8KbEtEuoJ?~c^%m(2OP1UBk+D>Vy6MuoX%oC|O3u-Oxdz zyAcxeqj3lalv3(VZ zSa=#MTzM%%dxc`gLd2^{b|y{E+V;`x3RIu@OVv?=W~4WoCEfZl|HsHAnBRq{cCeWs zL*g)L5Ygc{*^`=E4G8rg3;Dw(HAcvZ@WoY+CH}+r=bwEB{$kyz=FDi^g-iYkW>fL% zYv<}>-o%As?Vo#+g3DT6TC0+~`YP9+i)Ppy%AHKc+;6gur3K0YHdlB&Ag$!mp=J$o z`NI#g!db817c!!MxaooGWKO-x)dpo8las=g3#8R?Jo5B$qO&jYhx8QeqV#B!a~s~6 zXWV|e80LNWi@z`b+r7Leng=;dv4%2E8-DuADYJDG(2+qDO`fYWW9WvGi}!Xd z%PqsN8jiu>vj2^;w+_mq3Eo9<3ju;dg1fs02=4Cg?j9_-ySoGk?jGFTg7f0;?w-4R z-}gKBRNZ_2I91dZwKLs2Gw<%qGu_iqH!rcIx{JvhbVNjva-bR|$dTC(cEh|{xYE+` zPW(yjm1OrHcgYvQ{?oI-hD(%iFic#FQut9cRd*f3Us4pr%$J{LiKY z|4hrBWQNvG=e@G}?$l5h2&{jUA=LzuPlQiLmXzfCe|(x=QD>*J^=iGRyXgg^gWRZHxV*(Z+aZvg&LNdQ?2;zkIyyC~~mRL^H_K|5()^O6> zVTGd9>f=~u-v$=#QZk|S0|M-dYJa~|Ov$<2FQ@jJ)uq~JQhyDaa2XwWHk8)vo z{c%aO29Gm*<(U@S5FpSIBDcpdg-O?L?t})4M4uX6?e0&xBx$ZbCEa>;wTh!|Q z*M&bcIA`WzSnJ}`y>3y}R$3kZ?2W;bZW*V~IsTyu5)Q}|;d-4YtYI?AbKaPgzjlAu zZ(gU0hO+9_FU1(OQg1jmJ<1tIQMX)&uqP$IHz~U+XJ|MoJnWa9yL%n! zu0La1XM5W>;DtM-iEQi=k~C%9R=2s^Pc4ZhoG||jhO~kQKS9)xx&Q0x=M~6KFN7XH za37b~juSecJOq+ca3wiwfM&pXFDvjSufCFbxhjHmzxy7y-|jcB|A2k~SH2gA{S5_n zH3oM2j4~wL3u>b_X3_vnA=6&wo2k(*wNP?M6vjP_zE53#nl^zTWT$q%z9|xpYn!}-`GxVk)3pho>H`Mm(puW(|B+%B}bcUt^`&1kW(L+k)($(>&`!!37xWy>z+0y?1MX)g990n%&S% z3F@n-=r(*!-7N>(1SDBQr6#p6tLx?R>;5II2Q0>Zcb{ADZ$i;JjdPeZd#axw6 zY^h7uY@~2lIzW9`yZc?_cjP^@kOG%O>ERPP=ghb;^9gN#c>pfzCFs72Eox|72FLKW z874t}73B{Rs7g!TlRl(=*?hu5nJX@uP3N0n2=4b{vekriNWwwiHdl{KE0R>4G>HCD z%8!^eoMhF4_29OK9$LpV#TqyW{l`@BNHtt^zlmA2o0!;j?5-LWQr9_I$SeQV*q|XT^n1ms^@6s3vLc^(m%^9%sk7;RtAY{_ zCXNz@3k>I+p=~X%19ZSX$g7#s=UNga=lWM@V$Wa1^mVNfUBe%gWjxXone z`NU(T#_Crm&4q(M^Bz^*21Hd)jf0)c(2EMySEZ3)E-)=W0QlguX__N9UftW9wdTvH zEf5z}ZWHAbfWU6<^Sc8849iI4oI8o%!gRqHN4g@*3n zyB($@;i#`7MUpk4YSm1l>XRU3r9jUnFGk~hPTDkza5bCz>??~A8~X))7KMHwJ;9=w zT_1s6b4aViH9<)q%*iCr)Bjk)TI{1u_(k2;YTB)+K8c7iDNGO>)W_O=4vG6=&mBGH zi6st+rXkPNZ}cM@Ez`^B0!oH$v=7ZDuNtXY;fnD?eQAAJ6xmbssj{?!)+ImLBLt~o zNB>ylRbm>+qyu~H)s$OP!?l)a?aN8c_%`h`CFcw;oc6wJ#EO9WA__rU11g-XZdcMq zssQ$WHx&oR(WiWS93y;I7+5x_4>eIi7HsrJ<1&arHlp9f9 zOlfk6*4`naM8QN`(QqVr*shG#HZZ}`?j?DN4X$*>sUyhRd6XYI;cB*i;?f@!XIJs& z{a}o=Q|>B=%7TJNu%!d#sM^v!=dz6>)|Su=^nQSu0;~e*=~R+xMoT4;KW|5feVbIb zzJ{k-*mtlvZf3>&G3qv@7wyP{D@Ebo+ReBXHGrN--wT#VOxoJKd=koVx=+nE2r9Hq z+B>k6{}6ksqa}Vu4S0GUq6QJIZG%L=*viKz2x&rxalDVWDy^gSqi=SGDCdUnscbz~ z3a4WZZ9K)SmdfT7JsgOc`iZ_TVp!~ZFAnpLYnbcq>oOmyZJMKprh8nTT>mJ`QB==| zG~RKjKq`uXp;x)Og)~mZEYkR*SuKstDLS}eW}1uTkoDCKf-K}I(82uq52ZSZM>5xi z?3-@R)&3Nw(| zUXQ|GCNkvbTUTw*n&)x847rWaOX*~RxCY01EOlu^6uc}=9BN!@>H2l5WZc{!V!0dc zh}F;m2TlqWMVjuhXdP<=$0$SG)Lx)UOE1C(ogJ`MbdxD5)T~9JTCpJNF1m?I<&(mU zF)uda8`!OWd+5-Q{IVp(;!peZhx37yA)o{48qM7sc$%+a_sn@9Ipeg=n2^&GQ@)~WQsKPBHStOuJn~6$Q*lX8YAMqLWJsW zm4sSau-@kn8$fFNus`?_P$RccIGdmUm*Ge)Zs!* z=In%=$%vHeRA4v>7b>dygABd!;%Wi>mf9=4A3IH>y#?pr#OiG#D`4FHi&V(lV&c02 zU|2eWWbmi+$!K^DS=>x97qJwTt``FuW8+FogSK%n9&dQXICCQ%9!XpNZj>o#g3yTV#o`WE8_C ziB8{x4en}QV-)rp;TJ)Bx9i6%gtjN~*0wn1YBoQY zJ`$G9f2KMX?KN*GLo7izf8ys_?k7Xc#a56u%c9!ITo}o~EH^9jjvZAU^Cq%uB|r|1 z#-0OCiPl%cHtfb9=9CbOFD2>}Ch2T`&i*tjcjOr+P~c!qW?v%P4lXL8gkIdGvF)bJTtu(@vFsNHs&lmC`Sy8zja&P1*>_Vh# ztAYTp(rmn~ZTK6YQ6=pPcg{L+lu%Bqj<*g%iZ#r%eyIjwTZV8z4o^3Q4#a5gV3NU8 zwJ&uQ>JDzEG}0Gw0YL^re-@2Wc5jO;O_C>d0uQ@>%64k$!6pG zFH_$X*jW|gX685G!;%#Mk}H<`hX_aNca0`@JJPR>O#QX>)9KE0D5?WUxJ;}8(wu2w zwHeGvM`}weUCT8r5au#wEB*B_2#U-BHV?&vsu?b22O~;+xW8K<+bp1e z?_iGMW`cu79CTt}AI;b{d$~)R^>R?`-n&gbI}ClqVda4U|2*YG%{Es&=kair*cn{O z=X;LJYs&?mKwjj>Nxb43HS~4B`OK75$!>{*N;Y}0Sv@AXYPnLqT64;c^b1F?8Uepc z1h_`nDVfz|Ob?Aoulb69vyrGp>SR*0`AT4`mh8Sh?^iZ(g=ia5eLsRowRef{Q3Oqy zzD}ZIzWgLBm_b7{!RV%GkvGe(Q3F!_&Sy@8MblS*nuV>sxC#tK=^`YziJq#?Tp<<8 zF_-g~=`F#azA*H15CGlCo^?r16b$Gp@_1o!xPeuldUo&G0@dmxZbYB`X@;`&;wm)a zqtlYyCf-zdo#^u5rKf#P6g5LmS7q6ntGAUn!@$`~^PM`Gb%Et>kpAQhpx=+$Kfu>m zUDs_}Ov|w$IpwYAo^7To1uWs9zL2Yq4?q5x$6lInn*KAT$jes$V7h&PbeVtm>{837 z-$msgVp^pY9p(C&UoGCg^Ig+C&Qc{%IZlyX<$o#{f2OUd857LJZnWHOJQ?(_hjtKy zzvu@vuhI4wP&A^`b7vOQ7=7U02kU`~BcUp-tDuS2N~29G(x?3vyu#gezyTXrjxifv zq>(+;Ck}#W~#7>CLhEf=a7~ z&NN1%?+RqfD+^7(AOZ?B!+g>##^7iI{S_Z(TwaN3HLGfy6(F{|uWqf~%VkBa&5yx! z#4TLnranU8rQasO*1f!bms+ygs7>V!WF5h$7!UXl32ai$*K!BN#nORW z8xqaKif}-w>N6C}2+fk%Fbio{3+&NEap+AG_p8mJFV?T|iFN=!zN&nYDDKX&Rufio z!5DZ?HZ1w5`q7V;-LQ!rtU5m8F_@z*wyaVsGarSXT+rml8a{k71j;J^-!iKe0G!)c zl~+*#PB8&3Y}LD}W~L!!D=})hFYS7)JLB;98Hl^lem~gDWF0&7xNY&{d^F9;Pv@n*PJLDZd3A@d-UX-;? zB6f)rF(udW3~f1TBP{wiHn+nJy?phzS2hgrA z+?dxCeV_X7Lc#AAD`>WP58L^*i+~4nRdGkGl~pZW-Qd21tLNMv78i6L?ke_zOko^ zCJNv{y6jlB+KXkkup`dDIaXs0_!riD+qeA#>*vM^PXNa?mcg?}=fB#2f)4#;?chbGi3MewsXaQ;fmgKJB+gV4gvNHh9F>>m6^YQr5~ zaq(|iXr52#TWhj!*qioUipx{usQSbR!K%E) zi_OUT>F}d^hD(;Cp&N=Z@UnC__fQIgE7(yn!LHu~V^GE0iBEPCH~~8#025{V+gvpvko>0BZ#O6WQUwQvz?wrmHj)B*rQR)P9?@QbAnMH8dx&WMl3T zq|pbYX>;vC++=u3Eu*E84?lMg2Ys8^m$ZUI&4D)TzW-SLVPFXQbvlIA%E*W(-U0Y% zx$^!91M-Lhg1t(o7=^Nk12VLwF2r+=&NP9{mJ%IfHJd}&l?N8&?VEY+q?sb9$)!fY zlznvH-lZ|oiT_91CBVFv-YHTFDu}k$#<~BG$Ls}?R)!5-w=`>$9(=R?^zZ@WW`{+$ zq^e9YCh7C2h=&ArtyH}@jE4>7mOpHHeFXcyX&(dgkK$e_7H3{c;UU##Yg&OAT>Y_N zX6Vg=s6R$M&XOoG@}&@wl(KW5c_8+YgiMo#pQ0?=00(Ru9hvvHQtj|#z!^?L_Z}R` z?(=8m&OC*Yt=G=QB(USG3>P#O!45v#9uy@=No zcH?6d4BNltcF`dVWh-gyz_}}|)C9l<{vIa_xWLUKy zqB+VA#Ehbjo<1#ZdvpZ4{$JkoYp9`3P!K?f!6mavX+aH0M#B+2`K>76K*7evwt%jU zY_R~=A5_!{J7VxFBd*B0ll7UVLw?M6#h=9=v1J2$VitqDrwwu>343erTy zHh|f&sAM%%!ogI$`_sj&gOf^bi0SD@f&V(Q9KQnx|`nR!){qNmr>-Ex+#r)o|2T`{wSxfcu7{i>eV*zDC_CpFC7f=nP#EP zpm1)x7=wi2&>?<$9LF=L%-Qui6_80!s~~X_k5>9cAoY*@`87<&Lh{idFG1V@6>JL| z!0G-XvI3@_<`NOp-eFQ2fvU}8OwhLGFX5T~V|O)ZSeq%k-6~BsxXx_%AComGlA8?i=-EXh!5 zNT3t&U?aSm2c1g5xWnX($-PQ+A(IMZ)|OKNn9!*PP7^ZeQ@(0~+(Fdl*Pv3d)Q%~7{-Sbs z9{0`)dR7I$8Sx!>iFg5aD;ky#LJU{oQE^@4m}8j`xs3f^5vlHnFZWR#w?xb%Y?78RIUHJV!BHDn+mbwYP=4Lh( zQ3kLJ_gS%m{(Tq$9ht&FdijP4i0}>e*#1_zxc3vid(ho|X}iY#X3MSJpTG`=0=5?M zs!(|FV(UG0K~0=d zF%gb66f~xj&1PO@4K$z3Z;zqtPXk;|)v8D@y@97P@brh5_ci7P6-`N-yslMg$xb?c z;Kz!0>-_NEWWy14O)DMl-h&bUJJvdS-&9OCagbbAuDN#YN=aD-%O@gG3Dx9x%S7UX zHph2;^z8#NS`!$vi+B7Jh+1rpAUCTTP2b=7eGi)NWB2h?O~t|uo|1qH=Y`?FZmdwR z4KgF>8}f~|fV28hiw}jD9}Dip?@5t{Ey5yirZ0^Aw8rHrAciYFJ*&_r_7&4c)DY%a zwbJx;1b1pN90_f`)GyvzDD5Y%@@eSY6-1}3!(_EG9BSnIlU$K7lUi1)Wn82ncL@t=! zpl-|B_0q3QV1)h=)*Tm~WvNgSQLk1wCV6iGS%2eEMi3#AMR4xx_o)7h7GljA*wmDN z8wVUE*J=Lp72F`Sk$d8qMPjv^K9t5xbrt{8@y;Jyek~by?0(`?tG^bo53A(j-P#?8 zfXMX$DD_nUs%dU=mgjaLNzXIJbNp*dQHo9Ev1z!|XI@~|rvYaD)pUxnkG$MvL#K`A zIh{avV9T@)m(5d19do#{sNm{zXk3Z7Z~an{k=A(7*`H5tG?^&D{`@jT4*4uFoz zZuj5k_|A96QvOBciLl5!OWmJr(xaqd##c#~ZXV}j@ zMrcG*pBJf}c4Sjn4D!p&BTLgQ)Qp-4a@&H>C_KP$_ZE56IPJQtIdJ z|DNAaOW+Zw!tCHZ@mBUl^-|gL`>tqj%R{^AUoe$my5$PGsV~!1B;40VC3oq)W-x}K z=_=ewmd)H4y=Xkf+G-wO^(Z+?Fq9~;PS%fVy2t*E1*?k2(jGJwMmRe%8D*HE5-BVx z$g^6z@J*{5Z;WVPjT9-Ks!<64HY^H_3XJ56m86m%;D2GOJx&KGIR$_NyZliD%t}TNGFeDMKhcZsR zv?JmZZ3Lz4;amF%fB1RJjN?(@icmzl$IhI{{XHr#^HE)dD-s3p7;n4 z^CmGGOs+;hLOGEFbR$6kTQbudMT+wj(ALof_D?&`ULfl9x)V2C`v?D0O9WZoX1qFG zNX-dSkFON=wsQVATF1G(lTZnzIb4jljd6P`FN@%DW}4DBEk*M3fjs^e52g)Q#h<|+ zrkt^homzdk{LesHyOph8g|~=GT2PMd0BO0Fc^gy}cyhzGEJm2SOx( zbTx#NWB)^c^B7z^fPXmuW%$3A&v@rZP)I8)r zw*|Z{b~)T3@VVB+&tPkgyA8FzAeU;kK}R5U6;6)zkh74;Smn*dcjaOI z4hNS%*ojz%ae}G&1pQd#uCo*Bwr~8Y4cTbHzweHd8FZtGKFP*t&l{ur0h261CTE`_ zagx)D7qdKIBopfs67fg85-0shpiRApCVBC)?n+>Ll)?h}b9B~dkjs6_YHL{l$|{Hb zN*KA}iQk6#p{9#?Dtfxmy*R)h(c{9chE~%0BqCYy<*>Y7Q9_nn<%}rstEbqp`UWON zV`OSoJJ-k=ttw?M98EdZNZzaR^d9kTU+n!X7L1})oz-4 z!b*oSmY5|k8rpL}gauNY8_ze28E+bng(b)GNPJt+Wj)yfX_QF4d&19jnRx?Tp;MLX z;w)--K1cE7VX$I6-In;Zo;t1o^}Qs%mFOaStW^+xOg5WLd;8~UN`<7bL@^7NW z*@nmb!-B*D1(&361#VNKFvBcCS7>AgY{s6LbxE_up+ zlZ3YqwSU1B+syOPjN?Wg;o|v0wY+h==?+S$Dv3iImA}{!l&8F@D!fWzy1bpG&FWO$ zK`~X5Ttv3xz|yw-zO*%MCFp8KsaEBseRxnkcximb6|ia5?vD_q8P@$)xmic=w^Ca~ zY8_s z#W^p#+Dv{0IC$b>6D?nBFB3$@)fk9;*Wp*cC9fOyDH}>D2+pG`;p8|TjkUm7dTB+a zacN;_+j+Kq&IFOv5SZXU{7ctq*(-I;?Cz^jX zqp1;IZSR~eX_P^uj=`S`p{TmFLm%)oW=$#FroyfYGzkYn@PIF-gT_Z20U zG4y2N|7nbk8dkjUI9~!*rOJbWF`(7-M0IJ|{tyR;#6!TKI+)&dhgry9FLDVJqmDl@ z`w0ZsA;1QtqU4(+B|C<}&vP4f{R<|-M`z~qDOY#D`|9zja&Y}45nE@#9?doz=oM;l zCgS0^QpwMh6&>TCHRpvnZ~!EWA8MpRC%z#@X(F1<(P3i_zNr0An6>%mobs#jX}LxE7R(Rb4l&SV(h9<`1-TMk@p-BdGWD@z6FQj-BcTfFPjtrie7HO1g%b2t~-YF+pl zpqbY;3&t^v+uO}KZt`9@xI3C$Hu+p9T>EzCl#TS*^Xy==yKJAkT;}exF~bfVU2C0u zVe>IjF^P~nZLukT&qMMbI9vn~r^HTw)z@d|`vTHS85~5i&Kcd_4q=zuy}azrptyQ? z*c?xI^zsray0b=|qIG@9dijsWeDr@9=2K47u|vbfDWb!2H`^2b!1e^f{}G|31`DJ- zQ^QqD@V~O|oMdWTIh3^(8kRgK?r*_B&I`xf5Ont9sP%w*APhQKP~*83M0jJ=SU)z2 za6A)pp5e0$EiFA0!&T})T>I%h&%T^Ry$)eq;4Q#p8zgj)7xWt?ivY>GYIIxQUoBrx z`+5$J=Aqlnry`new3%#1__36@v<&3126t*5vIYQ+HvPK4V6&qrBh5=kCJ~&-TD=hz zKR*V+B@%~=qX)n#Ji*~Zy`yAc1$uB{XdPJ82w0>+9ph=X5SUyUq_{lB{=fK!-(7-GifNQaT$Y9m) z@|g=cXY-{MmzfvkRbc658T(J%1Zz|>SaM)p&$F-(0u2X%>NreG_5-xujkpCC^vw3Z zNYjoLN>uCex3sBP`eX;Jdt?z$uw|qnF1;t;@_DqHBt5g?&6GtH^4bdQn_7ddKA;I~*0n5+u#UAn}Rx~I0pAUmyzz(3dqrsdZE(gK5 z!QHlf@xa;~x{!QzIsaG1h>2V7b84QQ{1Lw(@14%sW6?UUG*hrOzYv*xU76UR>@9yW zkbOS>mb~FV80w3U9vUWn_LRA}h7;a?v{OD;`dA&919orWRC%{CQAkr{+~wLR7OKD* z>uxb|%XW<4zRM+(;o*zx50fOS4!6^ky&Q$_6FAjbJ2D%7t6tKC(!3p+q+4@3_OGKl zn8k%;D3Ix}7?{z@&BbDwYs|SFH&+>Lamc>s#NQz&kGBjx3E}Aw=)M%D$Nk3t4zz_V z2rp-EKX#JaE^l7|dMq=EWmucbp69HCoK^}U;wuUHdr?JnxC&F>7-Z@O(%RI?+;8Z( z(_(tM6p?m7AV_IgnaiI4d<RdS1PS0~AxxE5 zvF~s)yi1K=#+>3h6FL7qkXMxFvPcYbU|d>bW?q6X%9>~Vtimicf$-~`PZ^KJW`Av6 ze6sUxyxdBwz7_dkvITRjNMY9>de&<3S$AUy=QvY^KecLC^rZI_FUVw5QU+b#? zRfwDhnYC@-JncB^v@u~Bm*3BoLsFD9gEG_NOm$4ZT3B8qiS@Eq6(ivo=H^U>N_U!% zWsn-otTPgc{^bKOe$M&jf~3I}>3I(Nn$RO(u8{RSs;(z-N!^o%Hh(w`tLsK7^K{Xn zTET_ERJH3p)eVH<8>hAC5uvJn#-w23Oqe8Bl&V!vhn^3v5l<%QmB59B%@i~$G&mcl za;y#;7Wrs{31EUZO`n*+vz$@6%ZscT00Z!$wfN9=4)%PP+iiMy2P=(#!3h2VAlsW? zi>{Vmyxtf7e`W0c|737STBXuciJP=$bo`2(wQ%e(-TR~}QnvE(mMZ>5O^W{@AY38q zR?YqhRjDCewn39s$X1hN2^=huc2gaw7A;Y8P?%W3Ym#&1ZWIoQu?{W@7DxzB zrsI7uItF5wZB-_wA_&bOurO6-Ka=RMNRN!n)>T?mV&Kdtg*sT?)}AJqoc@c|bO5Zj zHKb_xRYPz9eM%tXGe}L`Ix>LOdbq#HbmeMA;wX*=WfN399OG_vLKlHda|=_0`}I(Xd>h`;Gc^jS6)U_ zPcoOi1XBb-1cTjolutPL3{LGwiJ_aN)HJ8((!xelTT;tOe1;6Tsb5#)x7$-5cf$*! zGh2a(NXKhNA5B^lRUnOe%5t)Cl&t%%^2Ll=I@%H8;QOT?V~Sko;Ha?0flC2-^C7F* zM^x3D;RkSz@j_ zf3W_7(WAbkTu)A$SbN!fiR%|uQKb1S;fXo^Q^toTmWJQcV3Lz^>P zy`ZQaJxxM7o}ti!s6a<)E=x#OSy8^xE9jc0?%b6!y&O{hwpY4WD3NIar`$L@voJjM z<&k91%UDe~9)W~uvyhttH3vWTLr`T!W>QrNBRH%}#{`pMRoQlw_nhZJfM^oyDS+Q6+vROGfq*2oI;Z#8(Tg9q}x9n&Sehn zav`7xl`u4cXge>r_)Opf zK)PYt>u%$C>5T`H1sIKifpn@7=G@ATjU|$st z=zjp-Q)cyJIu}+o8TF*t{9$F4eLWwqfVUF+6uEJ(x|X6J{$;3*2z^dXIZT41RB-oFuP;VYA-Ti zaU4VEV26?Ox{7)~fO_^NS>^Tj+WHRsI1MEMS4JR+JUz4L~m{jEDZ&OJ!OEx~s z73B}+MO0-vX@(2@>x=M&N%zILF~}H`yNejFVQBh{d*$ZWrH&$#T_e=3b1doQiC%5f zK9u74RB3~(Z_pYEN#Cj$-I?(M@s>)`<@0;eORka@mY_ zS)-mPgyr;g)oQC~7{U_w_<3Uc>0Q1zMzLYUOTk+ehK5vzEsfKv?COAhR{k_9mPl`% zFt@~dYnJS8kVGU3|EYKPpfYc7w1qNXjkH%}M{aV$OvWXI&1p#I$Jq_^XC0MCMjvUv zkWldFuiiZqutdWXTt7?YsU{eM3#;mwyr{x>Ks-E~mz&|H@%>qNQ~LPeu!YunBMlle zvF>xr$;x8nq8R&ola#%T;p*QF5ODWm-Kq6hlWL+{%EHYHN88a8hvSmeGV`KjTtsAb zE+hT9O_KrIJiJJht+Gz+%NgCFt!P}&xS1sMCrT*s(vcR}==4T0bO)uC=**J*?uaLa z?SC&JPYPSf@ZSuWioDhfc(=_GWeB-UwG^aV0^ zF*(nkm?vkWo`@%hElQH$7$j~_%4o{%XW;X{N_5-5Xp?SrRYFu&QBfAb!;ph@1S(E(ZLwBXn-2Yb288 zmgy#5)XeIAozA4fx{&NVC##|)?yqz=O?k7Qt<&sk_S9C7 z-oJYl9s@}jIFea1G9c4A;zZ zim8vSd;}DRnl{r6zEcbi`gh||lrWUr@3ogS34g)<9RKYDvqb@e0EdKtfPzJUgZ%&t z2Zam{0SWa96$}lX2pWZ$34>Wc(7Aq=kVL`2$)&DgdWMvdMPAXtaZbq4FRrF`3I>ym zm0w9eFt@Rr&A+dIbLT?XC@45SZ{U(#S;hEg@Aj@p!momW#H9Sf>aD*^U~u5zU?0Ch z{sp^`PRaOMEuwH)v%_|H)9G0ySMa>*@q=OQc;?XiUN=YTzUAgguYI9t`t_`xFXi0j z|4``_o7y0$Go8ckW}0oML(Ps&Y(Tatoh`u1krj;UH3j-e0gawu8?5 zC=Z6$ZQZE9U~d22S_f`%{C8`5z}T9q7F+8tIysBTWDOZP7e4f~GgxYnvx&HCyBB=};nr`}OjqZ?CaCPazXO~hQ^ua0c@1aah! zNaCgrkKq1-?Tt?V5ng|;{IMz_vw$+%w~Fl`Z<`h=a5t0;K`dRV*3`JK@`?1%NC$14 zc<$7~mI+~W&Dw|Jg%O6kBMIE>WJ%-xAAtSx;HfAnThwMveylk#T;lV~W=rG{(_k;jNiUZ7L$Pz>sbUW5JDOXg(+$U ze*d1UdQeeDe+la&_~EjfLUXT|>`QUs?v_8gf!?FIUSjLn@yX*TwZt&aqA^#4+?r&C zjRA78`NLw=60mw}C=fC!!^3SF#H{16|B5U-8wNuJMVK;Dq>n-=%Am%pQt3cSU{j=Z zhiPR$e|J%mq2n3tnr~$8+2g_@^DQ$x5}8fn-H5BoIDXC1<5+m+>#(c3`{A{&ck<+~ zlO`p^ian&vTft1k8~<^TP6Bs#-0xnLgUK!7~%{+$1 z!a{|o=u>N(r<1}Zd^tCQXDu5(?&5F@v|k#f6am#yr0(Cq5fN&)!xPw=5K8MKLj(v|CneAepBdZX_+u0ddUX?YZWTPsC^v5<7n(Lh{kt0 zcZhjBEsf@Q6TV2<<;;p1z4PhsBm-SO`y5~ee>=)fUcjK3>dp1m>^!Lc3ueKMUTLsa z9jI~^c|BV5NXt07J>4x?CxNY6h{3EX*d!~1=8be^YxL4yhKHcOt1-{%1LK;_DQn0Q zVEm=l#1n;KwDW>ZGI)d3ydc>nY{51yO@kCVjkyjH8>W+2Y^qIAqCU2ICECTf-YaJ3 z40+H-NMjSvoYz038*Jr9lB@F(4wQ>M`WwUT~ZcCmb)B6882babp3fmE!C9$N^?y? zKK6l=R4X!uEmiL-w>iy;&+hv8dFx=Z^6{dQ=K2r-LqbJOQCAWUfcJb#14PKuwIH7uo|iaBhDMx#DwN;B~beWyp!Dk z#_DFIFQ3;Z9$He9jS^Re_vCiHisY5bgcvPsqi6b3uo5HgDpq%A)RYHEV@Hsd45DgW zpde%4et55ntsSYMO2(xStsn}00W~itmypDhp-0K4(PTc0TDre%?mW2Y5!0!m@!1h# zFbaDr{8BfLUg%&hVBEgbq+-WT36AGizi2TITeque$?kn~0+-T*iDkt}Cclvqrjz8b(p~U1SMo`cz zoSYC2~n2VG%tg{n~L$bm}#!+m4Fv`9 zWZ2A-mDl|gY^R0bM#NkEcDCaU(}?G^hvCyOd*knz~tAN>U`Qxv|5s^bSv1+CUbw#X_A^W@6s?QeA7*X;P)WTBW~vnvI4q zI*%*~(odA%OWPQn__go;2>h#k!mN}!)Z{1e9h7m&Fnh0N$JtN682fywMbIr#Sw+4i zDe$X6#zHI|nm>s}7spePyW8>7j;}MSd}Ofk#k*^$s1gcUy1}6nC6;!?!;ULnS{=+n z?(kOIKHsepi&$#-b9v*CTO7_s?A8fao+L(9<3*~|x4S0i)(}`7x`>AZ7P+Z?b=C+ z)TLF|v35DAe%8&lQW>GzR*c(Nxx%0qvs|;7?^CcJnR6XxNLa$j*PXl7BSeuM_?hlG ziLRyt%Ic3Oa}_^(Xb_!}GzcmkRzYN7fu}HG655tn^wv2J7ItnDtEE3bP!&$9KUq0H zuRtJ+v2*Zs@Cq$1@rcS@_e9V%unSgA2Qu7LyGTgLNjIJiz$p^h?daH-9HSh4FV=~H zb-+H6y;%Pq`{Sv|9LuPCg->gSlw);!zmaSAk;PLh#rwC~Z5VL!XBxTc zko#A~VN!Ie(oJTqi25ouo|Mfru@14upIXUdC(zKBhQtajG8H;|d0KyTc&4Zr77`{o zKas~}e#D{NL*EZtX^{A8Jf}Mgxz2;2>4h7{O--c4XbI|+Ux^(!)7+k0r=@A+BBKNS z?zf8A7N&iF!7^W1eq|EOL?;q^uW-wQLH-6`f%(m?SNKDKR{56s=P7AT<1Mtrm5yYa zM$Xq@xd4?_=%b{9e{+J%1I_B(%@om)DjkNg#5XPY%I0kSX* z-*fvln6W(6p;THLTB3d9LLewSJw{R-PeeUEMWr8_urcnUUC!feA=wb}D~LgWL)@B| zGZOFYLwG0BviknxH_uCVizCaaLMmL;61rGTQjwi+5pTW^MsLN3KO7W)e9_cywrE%p zZFahP=zQ4yweu}!Rj&+i+68xZ@P$7erL4?IHQ|YG!zJohq_lSpuZiyrwr(z*+=SFw zW)~Dlz?Iz3Gokmc=S66LMR8`|SJ|%uXQR8}qPp@u2?TFBtBF+qP3@{{v9c>0VJ8 z_15rRhxVENc--xN=f9(-pBD$wDUw^4l71b4>x8Q{+V)q z(R$}lP;*lm5Y$i)_hm@zqZ>>4T_HJ>zQy~bUr?|qa zeP9b}n}d$&W9#WGAFq0g{ox}qo6+Esq@=(to;-qp}Nmg&qWQ{jQ7 zTW&ZQ6R1EedLmNL@G3}4O2M5=`*`o6Jw$j5*F)1SvB{gT9UHo0VjcEdi1OSX=Le;k zG?qej3e8j#an;39avIx1Vq1@Vzk-3gm4yCajI&c2D)E$uF`mp@*7$D3C?Bul5}T(( zv4o`UFm;pdvCQ!61z4$@$k2ZI(GO>04xgsD`m0PBWPe-8PWiJj9iC zRm+{w^txel$xi-|*FgvQmd&-qoU#;(aLa^|4c(t{u{~lt>cUE|pLZ#`kvo?siGk5E zaZUlQ#u8W8nqrc6GR#`$j3942Z+qN02;vu||Bot|Qc%q(-m}x?#P62%LSWtT`k!^j zrv$UwRu2i-7|E&p1xq~u68_*Wb@GDyxLjCutRuzT3r%^&Oe%MrEQoj0GxzZeSbJVV z`FS75RY5qbbYFRc!}{p1i~GpncZll!p5LJehkIt%4T#RXvmbpivA^W8i^%Q}wR`BU zsEh{QI_0-ZGH0wf1BssO0ha9hyXLQPvu$NN7#fKPR~RG0>Tdq;)YjO*+7#hN{@^dxRq^+gmRlAF`d zG!R@U0#Khum8a57V^_8Uwy;bonw23mLcz3;qWP~Sk6)%|4ejGA{9f?m{vXobI;ySi zYZs*jT1t^Zk>VC8PH_uRin}EQhvH6gEfgpY0fKvR2ojv)#T|-Mthl>-Px`*+-tTws zxZfG)j`K(M*kfmBt-03DTzfvxoNG?4Jjif*WWkJ#j5TF!s%(y{8~^9~Id+!NxU`6^ z#+e9wN9&KZGCI{md1DG86C3r*Wxa(+0u4e;=M@G2Cj08SW+RVbm;o5vh_cJJwAn{SK9o9#W;;$jc;< z0upAF7Yb+FA^dRo?kS(qQ*#-o--)?9mdpmCM0GDe#3zh#Z)hlCI(eGtNm$GsRsh64 zbjmVai^k|qvf|~+G*0GFDi+o@g#4IWV`)u$cyiqPpf-NHns+NuBYx_~5I={^sS9oj z@w?~wC7h`UdTJ|7AFprgeRO=bkuwxtcjhO*rpPwfx^Q}Q-Bj}^v3`xnL2t#;zZyHF zdnA4MY*;39=G@vAHG1c1bGN*WD7X4NMeZ#5+uzZ9U_UbJ2Fyn$<9vR)aAr4$E*FeR zkjwtuD@92O!mzPgWW-_OgxLcRE$L&X?gsh1e~HsbUkq7AD-=0HubkIBoa1ln;5`d+ zui4weJhGY@A$%NzujheQx^hvUztUyDZ5boZ*}Vj03`PR3Ik+XfG6W7 z>`gMq61hqkb<3q0XnyqDLz>-p?|*EkD>c`N;@kM>MYW>B3n9huH1hR%G)aF|blKJ6 z6N9kQ3>GSkY%2%tTCNn6qP?4$1;KCtmUAqa&KC)q<39 zg~_f7=~HP08BH`mS~29EQcvrr9Ygzxu0bWx)aSQc?Hp+|VS6OxxMOB?YQw++t)Dq} zXY1(6afAP%DU7wpo;-+Yj(OO@>?w+%ySvA9eVqJ$lhyjx$?4$~B0`t`==#P<6g&w5 zKk~wi-AHMyxk1CITL}5P?L^izJkR=v!YkN-`Za9A5 z-fW+CkvTp15&P{;zpS;IVCokt2n*z5!L5wYsEp@WYG`Y z|5No1@&2H2QTWL~PF>-41&N}DHQtJTF}mvbp7)e0&c{?7cP_Hst=z}D5_j21h29Yv zdb;1+V}525<-H^WOnAo0_Inu-a(?V;M@D_X>-it@vt>K(HYeL(k<9(r`+RN#IS zxa#o8eudi9QcD;FCjWjkL3H8}{T3M_9GKwEW?V=@IOa!;?%d9j3v#eU28I2y-*A72 z1|6sWxVrTid~NZMjf`*d2W5A4 zroGh3c?KDAhXbhvl7K~hHu1k>kXo|^Lk;D}HexFA*fG?dmQy76R%t)X(9`EWp{byC58 zvZ~(GoS*Lm(*2&`NpXB$%s}?oINoDah4%8T)af;h!&~yX2nZAo*1~*- zWen5J=s+(cAuS3r=TU29R^Pa%0E07#41)#K-6U?_w`v+;OA-RL-urBYAj6ZJ5p|}< z?BEwqbUenj@(c;rVkL5M!{P(s{5wKdhQXv>ZNByU(!)$`sZkp<5()W6D%|ovQu(jr znOg*?9+n<;vDJZ~Hq4XS0&Za8b^OnR5Rtver4O#l+Ye^E?UN5&HhG_Nz0(->BU;Ad z?!D)87>|Y={czdK<3w=-dXw>sd2x?tHA3`4T%su(rKk~K-6`S>)#SfE{>rl=UWxj- z*M8!*`#Z6?{dNEZa zW-ANT>K_Kz3r`2Ouj}yXUO7^){wVsjL$x<|+el1}tB_fV!+9zEz1 z%fAynHh@iDrF$Z>x!QY(1OJ|tNY}lN#fd_Kh3UJQ{YQWDP`YHgX1{D-5-QMnU68~d z6g8pz3cs5qWT#7{e^8>^k8Nn8Mw7JYk#@4BE90Y+cgSf5<%S{EjdGcfsZI3y&vgkW zWER1_mK*myO3>p+j#Xg|tL%^${mIyD>vFzrTF-uoqX+&Ft zCp3vCDl#;t<$Njm5+EA0JsCk2XsZy_N_KVQ(gg;Jkb$u(HKyM0eaFQ)qz$ioTN(a? z%CB~+Qrk=Z1@VOly2i_UH{gZ7V%#M7`-t&~ikYcHzhuk2<0^Lf4ZnyQNF(YT41tlX zYw0kN{t_~yZ4B_nsy9$v$w{puY-lqD%MP>^J9W#%zDHH(P@>D;Bq7ZiDqN*YgctG< z+ssZ(RrQZeQ1R{d5V{{m0%t_|iguQa+O`0-IiWsI1M0ljv)0cOOk$gFZ}0PpV|*Mt z?jKln2NBO@_L$0XN_KK*UdTzQ?<~bhy-I~T1Ya&hw#C^Ko<^x)5TbY{s8PU!9(3W| zq6Ex7jkyV4Ks;k2NKFg9+V4LoaXBm0>khKN33nRFa=rzjRA}YwkY2AD7(Oi`J@)xM z&b#vFLECF6S}>=?pC|+42~(h~8M9G-By*{{w8U!ATzkR^XA3~Cw?}+Go>N1HOE>GC zokIlpVEp{Nv!>0TR!LhSwch%Aytn`EeWuSv^|;tf-jprg6bK|6eoCDo8K#2AKQ9|o z^eJ)V{gA`JA~GC+RsMBYJ{H=^Xz0~&4TDzlWlXkeyMFDMtC9@aYrpnLD^{fIZDsR> z@f2WnpS2AMmYEV^%T@h@GNoL8@o_W5j|9UnBI%B&%x!-@s~r)zI)3HzS)N-Am5PUI zXKH@%TgFF^+pYZ)m2%$PMdW))kXU6h=8Y6Ol?NXHHQu(Bbk(&(o#4^qGkHIK z_a`ILtu)5X{Jb|IVJheAY+Rf^IUW7B@@*Q;AY z+4k)Fz)^t^vP75HNU@&Ubd|@vvckB$N{kGjg+j$;dr(sxiIvz_v36 zkcs^<#_4V*ll6A-`X7`7#$@$=xxTD{hA-(dG+2mHh_$?@Vax7Y;freDpPEq|zTV?E z-pwRqzja)Ko2mHEqdcU=G!3%hwi|d@bu0Oh9%2tvj@P=JmCa?AM-1qAPpMcy(+X5) zn4+Ye%bO}4!$m@nyPV*R*Qu;{gL=qL-P)1yeQy+NG%n7EHmzlk`(38h6X zLElM5o7ZkmLQ#(HnWy!`#)o@)aVyyejM*DrwQ+%7@5?&sXxiuP@yQGBRTZKfx5O*i z7b*%?1#g$nUVq=dr=7cQ4DqA&+#4pYe6-)2&y0>gLtWiiaFKarL(rW5*1zfM_=7U= z0*z|wv!{Uq?ncGtoG38yNbJ()0-F+X6qwW6ly}9f7+))QqoaM&>*!sju?SFfjPb$3 zi?4Ib;ha+MA03p^Z0O`JdMX|MP9?k+*Gwp_k~w*&k&9I&y7?hI1iFw3%y{GGmb^fc zWVfm6X0^KRW>r<~R?1KRiKLVeOmt&^?uVGz6DndZ=8keV3AfjJQHIv{;|T!&te;4> zd?8U9Zv>s|r$(PJo2Nzm;tF|rr6`vDcj0o{vPAj*Nw@cEM=r+;X0?Q~3$xA8qdudE zr#Uf0Ch~bv2$DquM^NXewXRz*XQgUUpYYQH9YCg9bf*YU&hx{rern@KZb4(MuCYOE z05qBhfj2-HOWVwIUYWVn;q2@J<7rx=glVrU-FmPQW2>f^X|OVT+voh)7;XHqn+jtr zj22jLpS~jL8YT0sF%}h3peKP+^yTzPL9n9dQ`SO_!-wY_y|wg9F+o$pQcYOX+5mbS zms15Q&e6EX7Nr%DhVO7DjxLpSg9~iQIIN^B9-^}{Bm?y|^ZrdlFZCmjI$Eq>WG+aZ zSKOB&{3B1yS>gMbN#L~Ar+jzR=5lvOeVnxIh7oO(fd@B4_Zp$ zDzF9fQye`6+RLtYXS;b<14-9M+@iqYIK1^K5o~gk zU^?BfVkbz6ZAKw;ubiM=k8>W#vCJnrHCRzDlrOaX{NAV!@C#FsK2L`r$st#&!EA2# zmsx~jC#xB_A=Qt=PmcGNpT~|-x)p#}lHBxr=1#!jr(!*aO@5+ZPD1W^k@W@;Y;9|Z zKw7_(`^IY?QEVIp&bc#knTp6S-tgO$e0##dhVx280&phETY)TSBR?`*#A++Z__pZ- zydv59TVkdz$uqWkuH4lntJ>(e2c`sD-0ga=?~h!W$lO{9kv6n)3+&|4UvT;l+c*(l zB-tL@5wV8Vh?svrd52fu#J*wUQv3UkL_ZpR&hSZkNWI~>;}LGzdmQ!=M%~|A_uHT8 zL;AUL2j&fN*;jGTeapxr@hU0@J~*X>#5I4VxF4k%eBcq;VP*~W)A>$k$fx#sq}3+p zF3M*{>WcKHJmQe|OrLncX0wsw`^>P5p(7%7yLHWwckFY|f8G}I_cq)GoAaHb;%^QQ zMQa*=P;N0Q`z@IuzLk%--e>?p9P24AaWbvd_0yT0bN3^CFAf>si~plj-Xk>)8PpGn zXqQ10W_tH`!LP))I6gYjw=PNnBUn|!n5SI;`dVBSN18;`R3m%8$Ih9VnS0kqTd`oI z&R-*F!;4oI6q3&l|Br8!alr!xw-r<(H)GpK z15dg%-#5zmeM5s}EN9W(gEg#9ZQq}zq9@K+P8Z||n3`TwJr}4?_1xQH9&Mcj|}!_0$4wF(&oU6P*+YXS><>|=wxJoV6ety z*D-if1})d#io~h=5GpMDgK{tMka<>p44)&gUXTIhQZ7tz_KH&@;^nS|6@8*K>#6M4 zTX&P;l(2@k&5vUFZ@TgOJT+!wJ+LgAF_Z4edxY6oE8AG`{rEh5lXf2y_Q=Da{GV`p zLxs*geMed-k@_W*w7)mDarfU}_+|?Z3Z-j)PJ(lNkUdyS(6L}OKL3_G2XH4Ksw{S9LP@dslr`C1IajXy88!NnP@#6V| z;*OBcI=tAPcvxy7^=q2RRk`vyw|YD>h!R`;uY0ol0{a4l+jozzBpyDh+|+lTw?SMG zplTTsy5{Gf>ZwMHCQWU*Qx)9u<8Q{rJjEhXn^h+dionJA6O+me@wh9wJhK~HVao2z zEqb5dmdad*Cx0*GMNiBWvds@gpBJndX0=!bNu5PpimD?X@uO`2poA}&bvx?eOuOGF z5e~Yy+0+N!kv_oNTBlfj)JdI&Rp|NT@SZzHl=ibk2kDayGtCzyEJI4Iz8;Qo$~aXD zfYm!In)@g=Z{ZnCbMn$@o|NV<$eyqUX-mY$tlKTftQn<>AFqoPPk~u$5(if0|(w}Xnbg2f5N)88`>IJp2`7RnR z?Z%>AeW})H#%dS)YP`qXu(ec*8%H_GRih@NJKTC_@ys1hK&s!LMryyV={3B6Eq@`; z%H1T<9j|LTWuVHule2Gjfl@28=$-7Uf%Y)BT^C97>=_*&F||7?Vy@)wK%Fsb=_RRq zp*e;aV|Q2f-Ca8sdP6sh%*$z>1&OcwNv=rQQSZ6~0+dAA$p|4V|uHhXkfgvG{96)bIg@Sfa7TLV#qKSWK!a#EUn-{{#sL8DY zJTLsz_susuprJR~A|~naD;aL)Xb{D)>&@Ki$GXq z!=XH&aOohmu1j6-Jqrs_*D8_n&jys+ex*!*Q24Hqi^XBW?rQO4f*;AstSDlK{eSAd zzs5|7P<`8VZpjbC-lW&s?qkNq3Mq$H^hvWFWJ0u=Wo)S)*AQ>)u?ZIvFf*MqL#TFM zhIo1IP~JJy?c!JV77}QDkzG#fkZ0#%k}Kjy*~%5T(+dgwb`e*0<5xa+{9DTFob;HG z1K|>_x#%zr&d_s(loUcjgji<2-CGc9pTE2C@Xmg$D7>Fvxiu_lgp`CA-h%gJb5hHz zEUy&Jb>+z4b?sF-;PS^D`f3nr%?Z~JpBp+O(zaW-4Ee{9FMWrcy9}S-QddHnO&`vA zwySW*%){tnQ14|lPImKg*7ZgfPB_Co+Kuu91|kW|hMb9yc8~X|Am>RCD!Dzpiyu*} z6Y}kJP#zNDCQ5lYdcV+3eSw(!Zl*ZFn!N`gEX7IjTQQU*K%jM@$=iubwrC?KhC?+a z5h}InQ77=4mN7?;StqNTE8R_9DLqC1C;z^mOY1N^TnhRo2jjd-UFh;ty=efBcnGk` zeK4~{6mjIL+Mb`VSddc6THM5BF=uc>aoks}v!#v0B_uxH!^zgLwY&`|)JBG1mTL#! z=LAnnj`eB^Y0<>sF5lCr5DKRIp>Zv+-MrH+&*V$A$}mM>S<#I)Q@?+HNC@(0qR_`<=7 z?|$>S^d3GaN3e2Bwy!1flhEZN7&OV-$)okf2Mj0+ygYv0BWholeoSi#rO0*i%n%=8 z$Skl9l78SMFC9$1DYHSum1ko)byGK1Wo^^7XgO;PIV1f}e%{6NOnL5}1>PQTvzY4T zmffT-e3c|^D3=h;_&f`zn%|8maRL4xlq|5|3wFR$#o%D`Smt}iC!YilaAJc6u$J|D z2oA5}$)RxaJZ5#zjhLdQgn(40f_6a*_N6)Qp2+s~_rLkh+bXQB^>-}XW%8{x6%$<& zM}xMcR1?OQLL09C#9i41u0i==FltMqf+-vNSaB@PujCr4FC zY}}HiUYA4&Ks-g|$;!TCMO615v(S>{(I|g`b&lVNbQaMo zRch#)T-7jA4r+MLk0(>TV6R4359gc~FPuhkDq;w$E~0rC@Czy^z4Y|0G?#W2+b^@O zz_ZV9kr@nc#gGZu&i!1GO7pmfRGNqB_{*K8aExhj`_*H(d>igZ{|)#o+IO-=a(q~? zQ%T|id?%4k*c3e)^4}`xN~751>Xr8c-QE<10Y%{MBW!nVN5ojP>D*y@Qp5l)X{Jc% z9;>xUd#YjFPCD%n&(`cMBhcHPw37DcGfZCh=+6=7(U5DEJ!s55givg} zu9blYzez7ewN&(*8nOYe^o(o+S`y%qL ze1MNl8dKexpRq*#zZN~OYy0R6ffl60LC)(!1`=X)+m_e({Z>UQy1bIE$5r*tsh!95 z9W10*XaH>GKPY+UNrz?XfRv$u*G^U&upHwe zpw^YyN7A&4NJcleCQ5i%rpxiyTGh$yl!!)w!t6{2jIN*!Ud3AF zpA{~Wgaq#-vy-tFstFAVUmWhjwT7G-vA_P5ukKFGA{FUUqYWrkzphH7@Pf!b?oKJ{ zu{x@eP^K`QK{e?5{|XKaMNuQov#cM`;Apl7V@^<$X@y2O zNp?9GJ-0?)+$edh96InctShduQRvP3hkPA|(6EPpdPf@&nvo=#I^ocb>!|NZftwo9 z;*9q`qw|tcKiCnWxkG@hD=@YPYgt{kJKp~q#^S0O`h(IXDMjgNgJ|+hS>_!FOMvk1 zv=nR3xjQ}jnNkJtgYfOTbnPsyX-hkE*NYQ9%?o1b1H?KQcgASh>k|%#L)(dt8mPXn z!CUl&ngz!UTAmwRa7>+N9))DhSbKekk7kFIbS~mv1c`0_#vOmqA8*kEi;W`;vBw?@ ziJN}!a2OQ*KVC0A@s-_dk^B*Q zZIX1lUJg5ss3|QJr@`W)?^`RT9srnIW)99mWmSVzlrR%AM}O3a!2KHJsYA4p#-{k)$BY_DO980H{i|IcNI89bxJnzqx#95R8bL> znCX@yBHeHp*el|L?0ycKz?{39=eYsZ--ulM3y3s^vAb`|MM_f(q&hN@eW;n3kTOfX zh8KZReBL(09EwYdt<1s;;MSpe!37?xv5HPj9N~+J6#0CWKeuN~2F3@DfeBob(-U!k z*qHrda!fpqP}Y5W+1@SEww9^)D}WMAv}6xuYl?RJ56)=Dr?@LGVo*9u$oaq0 zkP;eZ`@%))H4tv*hMH&_Q|zrp+)%LxbKlpPQc=NVP6yKkGZEr}-#_hK1Eghu&V%U5 z$3TS!B0w+<^T`I-ddB1td_`lP(99nEd? z&tu6X`8uw}z7kNLlcDE7$R``$Kyl>!(c9JWFZwVb-$ob&BD|7zZ&lnoNB3$)^uQKB zJEv9-LvA~=&KyE^e9LzX%9*ovAc){?WJTYI#iMg%T}f9DH{6n2g_7vJi_qIeCQ~S3 zoibBNG2HBh@6}w;iSm^V^J3^suF@s%@?|#uP?IO9MGA0d8Es)Bg=b;Jsr!xLQ0N+& zq6GiYM7?@arEa(0Y~XGG3nE*CC7PL>(Mw)?j=+tU+LbH}U9v|=(1D%DjrU?T5?@-} zkuGCnz|1%|)Taolmy0glY8+uh=!x4e4x&g0!XOw1l9rnZc~N6Oxhj}DlZ6Ot&iGWD zEh{^j73iw_2nm>(fTP`y5?)qaa%)I5D1$_UEEr><24@R`|1@q5&XZ%1AaXteMD_IBoZBV>VXBfQtD^!&?&zKAZq$VTH-&C4E+Qh|rMDH+=T@yf-^lf9-1RpiI^t%KV*_dQ@uo)a5aZkq1scfil<2hngRO*vmGhD%Hy}ARpjuq2ZspB`C=xO}4s|idTB?$R#`=)b#4kB!F zG4=3RtPzT>h!jWA4BG3MG7WaSmaodql6pz37I#aoN_PY1cFIZ{y6$f~NE zkF(FoQy0H^^H~*lo0fs}-p3!FNxX@+3v4cs1q@t*RDn(p(HCuPQ?|DATv{~_1Xbc( ze}QVwr)aZb)>--!%2i8r(@}M;e||{%Ev&O9f5qvSacJ^9*OtTINh`@|olN8)>t&4@ zNFq;uffvvctYF?P*JTq8zi`<--a_I-3h&7_$hdbarJsKB?rMo>FeL)lMQobj4@#Pk zn8Jm-{DmXv4~l$?^SEj=t9Kr_P&OrL6`!tfPvh{{p1EF76_SO^=t2wS9X!Y+dFpK_4#8X|DL?qolrC8V~fn8^Y-2!ln42^ zl6LI9nhammkeOSF5~SG{w(HeQ_y)$s)He__e4-N)BkZbc*mWPoO7 zQZ`ueywUOQdt0POZ8IahOJj(1o0eQdff;Y$(oD4IbM8|ID3dlj-FwcZmgyrM~f z=-9u&W9Z3tkJ6FkMCaBnd~SjcQ6czRsZ6>R$OHIr~vT#fcM|XjDJR5Xf)m= zJj^|-4I1VKrr9i2ZhSEwwt;zFk*f);UU&Yw`WcuzwHx(rKgGUTXyD^e7$s{P9E{p&^}PqM#qTi@g~fwU6xlpwmS50|7gD5Gld;e!fBfZeN43aNZ7ELmqGP3V z-xQUD?nera6@94XJJxVrrd(+c2$9op#P-C-2`r8tz z7Oj8P;&ZNA+Sej@anaY)dV3Ocj|Csa!i?y+_gYW-P=vowAtTQI+l8ks@;?cq-?^go z-*%5#TU#U$FCbn+jmJS$Nai;BDKgLP17wG)Z0}+8%gV)L?Cz4oRi-ZjEn4sIB0}+Z z5n&UtW^)8n$%_UdJ$tiKKA&vOm|GxtWI;%JMPu> z*)@!kn-RO7^|g!!DxoOQ`)6t_3WcsAborNIE4ln#@*hm@LR-_7gH1PKquA*bfZw-vtyqzkDvK zObdMzFN{LcFMx33UuDW(-UzPeR6PGR$R$VCj#X8<#~N8$T1H;x~g z>@4Gpe!oSkd`fuH3Ef{1yFW<8d_?#3;E-u~c{9AGuVz${(rOJRNF89M>5^yxk(QQI zn?$xyD>FFrcrfXxZzInk)XLWQzA}8Fcv!b$u5X~`QvWi>o+@bvcd-#3sR00)%_T4e z0vf9S;?M)yxpqx~pYDC*-ZS5`>{gSp0+dk0jE8-;2aE90X)0R*6h3XTz6Vn}n6A)h zZY2t#s{UBd>~Kg*x=uSG-t*(2heNAWEsw2Y`TT{0DtYRhL?pV-o(eX4-of>Wz_}M3 zEz_@lfv=#v2avm#BInFOg6kRc-l`I};m<>`ni$7X4GQ&=%W!$7?fkFt#t_Fh_9#^L zwDUGnYJ>&^xEg>{1r^$UEWfX6oupm*(tG-8C}0rLFm(X|x0H^lj(efp+f3x{H=0zb z6w^#8S2NRC%vZ*ip;_ZBe^A_ZGf=yAwsVClg{Kldg0&`iC8I3^Ybra#kK}hv{gMz5 zi|`*YcH_iIt1f%tIa7AHX3E0ZKAiT+T4G~A6D$krmAo6!W|`NK8LkyUho6%BgxQ|~ zk7!U1H|sEqL@!SwExLPJ*qd=W+CUb!c@#Ae@dA|D5$RSdkSesq+erOaxC8?W zh>WHjVSI3CkK|IU{#_G@lgpuxP)8e(D3|X;w1Sr(d%4y>CvYMSlYS;$_x%>BK1HT- z#)*7P04M2onB>P{{Ws5$JSQ=!0s!XXG$E%VlJNO0;2K+K+4Hygl)Ijlnao$qn=dQdA zYn3m>wWZ0{?rF=>Hg4SypWg=<#N=VrKi%@g^{hCgaLi|QRIo|DPMx#1Fl)=COE?y_ z(QZyXzin%tc{=q2)OXeh0-RTv!eqM>beLpKDPUj~KR>k`!g8iO5B)8xUFxQO5(sl; z52wPKo5{%S`7NSlG5{_}4~#gTavtv)w!h-+NU6`fG8wuk)0bo-CwwiAFN1|3W$q<| zM=(xIPyE;sQJm;HQDN?;t#N8%+OMpSr(BzE=8mnt*J zq^uVfCao(-peP&UgA#J_r{C}=mB9gT#KAmxw7CRQva7CAU{#cg2cHYx&-AR7AkZ*S zT>=dJG>}j4rn|bBSSLIEadCQFW5OO08K%atknDpUw!WnFk63R^;1QwjiINHH^ER^k zy3Z)>4jg?e$0{{fkWvi;rwBj22E-D&KcUtGrAzhK{heP|Aoq*>3cX9~U8bo}@+v0$ zO`pvsPp@K|jU}<%HVmyBPkXgC$j#I97D72<)FsOToq^!3TY_Wx%OinV=lgkWwD`09 zfeiw-ZX6)@9>ABov zPYb?ZCE|bUg4IztrnWyHdbF4S0#50Uf+Ea}g1{b=yAM*$COPPhV})ih$a$4nD6g$f zf0C}DIWZ;wdgXJGA>ol*71Y8YLTq*hWl}eeCW1LB6Xt_wFf`kXXF;v)^j>Mf^;i!N zhGr!XV!c3Ciu+QvLALlNQfKH%V}=*K%;F9a91>IpV^cDdQWQT}ZJjJhr5xv-YJgU2 zt1;K8%JDdmk2l9Q%8^!8m>PLfg`SO1PYm_Q0>X#{%(P9-VNzfsu*g^*x*ezBcE<(@0em?-zN%i!B6)@+VQMnF%!IVzqjal@VnY5X%;)xjV| zSKeB53ibUIDXw^9*2_;o34~PEzO}$^9t>aHln{IYR(n?Oyg!tJ@Zue7dVdjKoO#j- zDC!49ZVv*i2ioHUSY0VTvq_{-d&JTfX3!_3tYhZ%6*iv%8&3<;B{PNTMI@aSLQi8! zD_<^ohlu<$>K)A}N;6LG(Pmfz$XgIIV597#J2iBLrFXwnEeR&X#F`&yEh@JV z`BfHuOdfnur*ZSMHJs%f~y_`OFT{}@56!1b0ugui);R;^B&MpHV43@Xe3sRF*2Sf?W ze}&0PN*%`Z9~r`{=W1&o>0uk)jy}(pw%mG&@7bzn4-PDjbpD{|U6yHdchik;b&7~O zcBy?WKV==H3t-izz&$n>^SN?419H~d-xN8&CUAGe+95UlksuxmuAR*Qt59(B5?yCd zn&b7XdAUo2-AE90QTvN7?N251ur^Yqf~k9lm3sC18eV#%A}&U)rjRsBEuRlh{0*xc zt)_)`!mOvZzDhEr9;*yXNw?M7&~_PFS0c~mUoX3U*p%MM1aZ#q*LiUj(6>*m7D*&3 zhP@dUI-M%7-VK<1nRZA`>Z}@N7}_IU|B)co`qD^yR76vSg55=Ter!;<)RDI^B{q(a z1S-AIzF{IFM#eH_&NQBqmx?jY`8qF}dk`(?5KJef@B*v#?6KiMC>3a&YuvzQh(}L} zFNP(mq9*c}Xa zDR9pu(CR{RKn1eol>l056nkP}!@bN2`0TGbQ)(pBfk5krox0$6UCWtqF|ew0$#a05 zl2>?}?iHBb6cnmeD+BglqU$c_F(^x$H+t-##p}*jj;ihvU`d`;@#sdP1;(KZ7Sn%n zI9GolyQsy5_~%{hQnsOTluuna;9g~w{5aaH@e5spKV~X@6X!3By9Flig#; z1ACU9naV%@_s4qT##M{IyhEe#^y(StYGXnb@f@pvA@3xy=MdnMKA54b85;HBQ6`VPfhCNMl*1*_bnb_FuxSHlx z&}s=zq|$Wy^4s02%4Yp%p7K;Hv7kW(ewvQ#FC>7R0ehQE372ddTzPw6e=3kTy0Bp@ zei{s%U11=z9oQuUe4?q_w^Gv8s8dNkQc&Sy@m~t6A|@ukP@Vk%Q=gf5?nnE=+UjEj z-=P$T`(HkqH03z-BkQ>A`~J${4Ep4po6%rQ`C#hC-plAQj9`kdUF}r!6h~gMx}=IYvZi z4upz6oi@Pa@@?N^7FcDCDBFQF`#iPmXsdpTz-ZMo_`e z->ym|auEUtER-@@Q%iT@-#f1uue}4C*N#P$^HvW6v}>5?Jhz*y>p9abB0cy1rDflh;U z>4Ag6bJes&SlhV6EiC&wh=51i3XnMSsTo}?8d(YIU@ZB?S%~bNa-l7fhfrdfkHKa= zJY80xmaQCT&O;AZqMcT=9_zz(F1DBk=)sN zNX>4*lUq*Yrs@cEF!`5ydNA?Z7~+|K<|FKK9M3Ajqxqek8CZv%_Ni3y|LhPLn5=z zZh}lb?^0nwB9ZBd9_!bhm0WM33jAPBDSBQr_D$eG&y?ziX!?fGsfd_2K{U$n7r&Gm zwh}t7hijF{t1x;UV~P|*b=U3w(U6*olV^x`Ka*t}8@bHBQ?6k*N5GywmHk)jsLS>J zeyrH~AIBrHL-j}MeW;#!yD66iTE#YSH2@`-gT{oyOD@VTe}DNghhPGpe>2nGLl;YdYoQoCv*o*$!p794En|87D z0caUAM#6J>_ZZ3hYPr}qzct)b--G7edOXa0EkR}y_V^GnZL&9*>sy<%d(3^iL1pMq zdvW_%avnW>@woLv_4CJZ5yKU+av_NmJ$;$+)@F{er3T#9?<3I{P{i|z+kY69e*=;z z_mCN$3JoZ>qYVpZ{Fy}T+e$)1_DGk?G2?dDr|lkukXsA?2J|7Gj@=HsL$LYFL61(Q znewG9kvl*qWn_YpM_eUVd;i&kdb7&?szqmy0&VkUo&4Q2hq~f7nNWcwz9$Nm0&Mfo z>0VFi#QFoROUD$@#wLGslf`{YeRE~f$P;TG#;PnCaaz07S-dk$z^S%r1xr~E@C3we zh6@_FC8B}AASmP1AW7=S&Z$v)Rh=^-kPP6^GyxY^0bq;lxy@+JsNc0eJA)x!wN_$^ zZ){kdQ-sx#-S%mc@h9Cn9BGC`QIfTi!5}Z=QFiMwDL{!(-*hDwG~}93#plNQ$Y?Tl zvE)O3H-xBmWtzn=B0OKMMtM!#bx9j%^}fu#!a4^B^G(s&3NrG6yBP)?lVTYe?+6cv z$99ueGMFc0Fc-q|H5=gg*_7^9^pVHsFv)>(ZyZcmbNEyCpUQE4jK~V7tz*M@$mF`K zddVmw-IcmAP)~1o%6gHS!PHCDC~gvf^ehqrg3SeL+gWUigbGsL#vq3>*$}!Jns#0( znVpW6p4jbRYBHl)xyJRSN02&IC|OQpjbUGwUbmQYop7qObHk=31kCvkzwGu9;=?c@-)Wjwa9S{%85&nb1 zVtCgE3S-a^z%)fhQx&HKMB|I>WLdEf#Tu8MzGGin#SmaJE*;Vk;cA}f1vp@-%U73H z3pG~NIQ0yO2W9`$!oE-rRI%_AK|kx+poTCSCBS<^bX@-zZM%-#8xpb;ZspU?UfvI& zsLm8QbP>8PM`I9|F7J~Hg;mBaXi?ddBI_`Ke^;!`wG^w`Um|C*pko(FPQkL6`-?iAJEE8*l=UGkHwQ}GApl=9~bZC zl?S8yPQS)r0)$p^vZTHOpolJL+m&HUg8dmuv$tT@RzzAF$KhJBwCuUAO)6g)zYr~` zIE^P3uBB!(cktVgbc-MX@&|bflBe%;l)gL7$zz6=d{E^%0#;AR#=Q>-%@5}d?fW*c zSip!$jebQ3O;;PmHedG7mPypgd>cTbCr@bsfvA_~W0-QratuJcaf(eKg{85OdQBMc zDv?(sihGzP@>L35G?h<6e9{flt~Hvn$1_pLdP$B)p4g+cmat)N659=yp(g8@kkv_p zTE~~*)dSxWoWnlozhL_Q^~HxUx0O*VA<+WTDd?AxGbJ?zW+gP!N%gKUoYg>Xtc9H9 z7@9aQK%tJ@YT1Go7H17i`h@3|0(Bgdp3ryZPW;m3Uyqt?v%C^GOXt}VeM1Lytt|bZ zzJmb$a0X`mK6XFBe)$V0Om|EDP5fpFCOVq%uZISQ76us_SB)bDs4N~M@n<}0TenI9 zk!dZ_+@s|}bEzKVm|;dyG=(XvQJMl7kZv_UCg4aANG@{fwbJXnaynO!JSS*rsVCkE z)XPgkYc*qz*t0(WD^V~o&DK&XS#zkwt5G@c-5BvY1>^;Mp_}-{pR8T-f zL3)!CN57FPhvw#o_%1k~aIOl6DmuBlAAPf!DQX))K#w=Ym1|WG zbwyMF1SAS>ZtV0}J5$9YJ~3oGjWLwuiLUtC@X^!nN9EK*#wa&kiKWJzP(qK%zj&<&cU-y+^rnrD%S)ZA=wL(HLwn1T{Nt zxg63ejT0ADVkmQkQ**>mAd!y=Zv34~5f27L@WXJvL#ViD4;g->f3Tg`Q=(5hn#1y5 zR6eOe@3-3V0Rs!&lvX>FupVOK{1p-}=SD#t{N;isvxcTH0lgGzD-&Vg7?{4SH8&jC zm{z_8qKV8kW=E>>pp#X%(0Dun0Sa|tYGRY=?|@c^I3&;`^RO8~hHEZ9&O*>S84<`I z@S2|{LfgRXh7&Wf0F4wC`?#Mbc<9n6q3y%eA;vGqMS?;*`#{0bDQ(1?*0 zMhuZ@P*SlUBsLKbU)t4G~@jic|ZUu z&i2Jg8pWGvPechE&a1{F`2C&MBgKv9)pthK@n~wQ6tDa|l%(H-e*NH0d(xk2?)3NV z%42Hp&O3kGlHa!qAGCI(r_3!h$s7Le(x`9L7MQ5u$uq<#Cqs|c;le$);Akm!oZXuI zvhwMS;TI&aW%N*_PJ5Mod_o&e_cF9$%{*-gn&{i~{R5XDRezset**R4jIZ8E+eCRx zSCrpW&Uo-*GOEDuP4^R|8QkE3Tj=S!R$80(`0*q3D!02$7|g~Sqv~yo5x0BV`(*8< zC8jyk*Xb#aZj^@$X6K7hK@hON6BKG$hNXuqiIxMA`n^ zn!z7XndM`VDp>6%0442fOT`4)YXn3Wrrh`;L8@+fx(DzrpGUqWU;rg~3)8*}kXFA& zNw43SPO(~L18s@-y^s3iDlVfpL8#uX-j8AT)ARsQlvX((r4oaZHq=XF=Lk|v`h(Sc#>9-wht14n%oO6 zNfAG0+44`QD@nbar__ir8Q{oIF6Af%ljXCl7CUc33jCk>*{Fl@95^F~WTZdjhw zSdz!M6c-O?rbO>X8Ai1%p)6b^DX?C5lnR>y{nkh3M8vP`rO~y9w4#1qx;Xfl`6479 z9qUQxgdSId8;OEq==jnS?0CMIv?#Ut{6s4F7C%kBE)zIm@b&*otUR{YxgCbv9mBTU^UAsbT&E_~sSH0my8v>T)wvV}4xfI1n1jd*$-nbKPS%~OLOA}E))Vs7F;hRgVqZ1m&mu4E^z&mSWm(iLqYTahavw|}^h@rs zch^7sdU~rnnFFh-D)70E-=k8g__7UAAP&9lBix#Jw|~@B^2VY0xSjLEUf)MwQ{g!e zx)n1xGaWbID-Nx&WjdbO)~2uaO-j=d&z$_|Xnt?)c-pPeRIoA(> zc|{SLk0{8-xK?M-YAFy|f~e~+gE%WMyPr>6ztD(Q%~MVgcqojSKQA`U$~+PH#5#~hgW5^7wVuxt(a(&AF|#>?ws|c(h#tzwryB432b`N^O0#9`QEW29j9>rNgE8V;$i`|k z7Kc1+;P>hOZ$&r?MA;&Dd)$>&QV03DX*HaUulC?Cg>lJdCSvY|C5O@Pyk;)z$S5F9? zwr@*G6nBm@4e&7gU&$j~LdP(AebuajFvTu(zGJD&WYSJZms9@I){J;YR{%P%$n zfW3!`fQUh}=e3wewQ%LiMiF%_%;X(;>OSaOcXG5{%&a|zRseRFfrvoemP>HD;&efm zg#s=sXkRuiM-St)Jg3Z+Jo}cj7E3c5|Ay+{cVacEDHQw+Ba44UgI!d{8hh=E)Vw+6 zx7p(WQ$2W%t~+M3o_9ToW^py#eP%}s?YN%Fsx;3_YL@fEl){dZ7geyvjK`4A?;QGT zsEbzpBJs9>^{HCFD)I%9nqy+qqomDfei<9lzt z%;<5KYBqUuvceyAJZ+rs#1X@8Y?q+Kx%%RTHMP-$oB<{Uq;JEcp9;lW47W7veRiAa=%zW~oHPzo3Ee})mx5LRIqRgz8Uq&QRKj46U1CzVSw zKv;^qr5z-?G?XiTG&Y2WKWjMS?D4r!%!Pn3ue7+(g@wxKtZ7}npWj@C9^Qz>Gfxcm z)N=oLbl^6{{Al7kL*=P=lnRh^X%*J7>2(cVGs!Cn#smH%P;>s|^S55$!B*z*WA6@hSBH(vcuPZ-MDNKe`kSA`gPr6;%075vm%FfDL zqXAtGMi1{-O!~OjS;63?S$;47nWmOymmhpGDM$+SiW33>bW8fM6Q-v<<96%@yJRW- z&XG>}1WuUyMF<+rk$fk8PXbU${YtCf%6YA|0dvSWq(O=zpqSmtfNn+?lZ>#nK`n`e z1PH1*nI3xR1pbZ_0-6`vVSax^_6f}Xu6AfB``W1_i_4s=q}H~O@`SE3Ll1j0j^8j=Z%oM)Nh=;rOd{}-$zPBU zVJyxY)n+9d4bT%=st9+SlX>V1UNREU2f-{ZIxwW5#_rC>V#=wUQRjNMF@zNxl$XvH zmC|>}43y5E!KWEzoN;CvLHN*i#r}44mUh+yE+$!FkuSJbW0!-3BY&hR-2Qv7wfrB@ax7+y3HR6(8NFqdf|;PSx>wk4j_&6DWV&knUzP?(vPi}Qjhh?8{dWfN5HtU zAG+|l^FM+J_p7se*dC zG_u?9hdwQLe&utV2)ne5W0Jh{%9xPNgSfbXn9A>iaY?m4F~jek{Hsa>`L!Zx&g5bD zey5D}r|l?Qm#fRtl)icdp_eZ2>Z<$%)U%hv@^LSMOPPP~@zl@)_fBSJnx~V+>oJ(l zw)@ZRr4UsPS~p&o)7{D|2OuPOSz#5b^I~V;qa$L~tbhqJq@*8}*y!vEz^wqd=a`!98T z2j@HMskx)Af-^3m2}1+b(MR;BPFw6T7Q-mTV$r0!cwc)i)=ywt!0$0PoNTL+v926w zPI7aDs_LG-WXnPNVXL9U?tcV*OX1JYtP}#iE(X^h{znir`PaPZr2@aMR&p2FGi~9b zp2aviPd%bIU1szHdSdpA>l0h6FTa*m!iiDaJL|Le=XAfrX&5N@#>FjL^7X;j9*Soz zZg>8YG^MSd{@&knz1$0!ON(3y;ynobaerAMU`42{p$+&e@O>0G|7ZDo<4IG%2OwtG z;!WkJ1&(bm=lFsP?>}l`3Y%Ctvo?;C2wf|HIimpPoH)rb6`C@Ix@i25;2c=A8`>TZ z4k3)4Ds8SDRWO_KIh=T@%hk&LWE&n>AyTyY&XO8f#WRbtBq+F+^EEDAt>d>2OasOP zR<=SiPL@OBPkMu=9$x$0L`One?yNwGVA~6v6JLrZPqNI3B+DckZU0tJMaghaeg8jX z8lAW;v-gioZy$(rml>Y89|!CjS*l(5EdPzV=zez`!Orl)wCvm;Jd>&Un#zs6zar$e zk--M_?%xHs>Mep{P&u&lA&oO9xJ7qt)4;Hs7zf)f&DKc5wrm5BQX4RXMev&1;~jU` zzU~?RuEB?Vqi6v(yxH@O5|%3wO_IcwZBU~niK_&TeZviMSbgJC_jUx5dD_vdM%KkC_&SJSfyA2!mp%>!=+NBu^s_!+iDn}$23 z_pwWKiFSXKmQt|A)=&N!f^I@%RoW-Wfu3|+S0bu8ZwbD70^pY$ap+#dX&EFtW(UIe ze(M4rg!|VXATi3$i=vM@LMD8xw^4sw?}iA*t{%_+Mb9KUmQX4Po16HpQr zo*-j>Zd>#foUu@tAZ==o5hA#uFJ|!7fEzHteeN7!whR&-jl$V^3jxQL8Q{H5MhauR zOP47aa)n>AnPBfr{dzM4l<5Sqe_Go2Fu-9}XAI<53XQwlh*TbiI-8iEJD=w=<7!X7x_knC%<1RQlGe6Jk+AbIOFOnH+T#kA>Cs`8M$xtUQ!J+ zT7QY~>EW#ADZ-U-ClnXZTF9O5xTa<{D z2ElM9Yvgny>TwOiVrnl>rUF@V(VM3IX{dJa_CVbee}j61s#(I`Bop8KBznfv(%?j- zBlwg4`e!wrvKsTc>ikMksdH)ywQSQ(zH&bHmLR0Sa&P_{s-cJ0UtVoL|40UhaUgss zCY~v^kM)7bYn0!G8>lhl_%O!LKKJKo>I)vMsjEUSvY`;LS=$urjFsocSn`HJf@!rN zBC5lQWtH|Zn7C|NRZ+)ScP#zC-Bck0Ges?tZ{b7C_JQ|w62(SZUC2b#Yht+EmtS+_ zn9&W|YF_#*cHwOeBdaH5k(#38>p+vo_x|}F4f(yv){?e%fVT#0v13|JLM}GrKGI1) z3t$Qy(fY@UzlLBarX-QbkT-utD(=^5*dgGB!N_a*2mHI;UITmXQ) zA7p%wr{V9qZ7^&7nuyfOgQLh)dZl#u>1W0PKDURTsS-Hvi$A$*sq$L3}ffb~s$s=&gd1lYRh0?!kXcrY;h}ZOIUp{4%%fFtf_NC}FkR>uo73#d8UEhYr3*Lf9jq)~!c2aw&95z1!Y?CSTog~q z+;vZwUK-vIdUs##ciU!W`Q#nPjE#A}PzOc2Vx9H9CT`0&@UzTt_vA@q*Kj%h`)W}g zzt_W*;@Tx%q;%6x6^>u+C93vWh40zf;n9VWZ)IRyIyt#n*l$c`bi)~wzk+P?$-e=u zB*oY=mDzfU8%TouQ(OY<)D}@ELiA?cK1C%Mx}$S57X-~dpdKu6(IT!I22qshX&rXw zu-jQ`fICk8LY8jIX-%hCVTU|6k4+t4<+XcTthCkF`hG0EP>usYJ?*mO9d(3 zW4auc^GH%2;J+r+L+sZgMNu{)y0Zj}3o*qe%pFIQEwU3K+apz_D&x`q@D+ic*6lG= z$g#2eyj|oT3&VlyOa9NE^M{RcXWc(4wTdr?+*LoQ$z}iwvtI}H31>d{7-w+jZxOSr ziZ!{Q|CI+!HY55H$Qy_od1kr~`RXYL7<S1Qdvtb50x}?_P6I97$hUlY#VXv?EpII9uP;f74L$)DO=`RBj4$D^Z{!=) zD|GqMHh(O?=532S2dF!)1cz^a+tO+)8cIb<0qb)8;$>5*4e&Z(qyaNsAt>FzIFW!T{>?Us;7L=>Hg?AwN9wNDr+z7op z?i0E&I9!)$tWV6-U}C(BthbLtWK$r*037L097yth22 z_*LY1)!)8VlDB>wzZmE8JW5FL;NR@L2I0h6gB6LQ{WYd)Eh`?DL%YGcTrQ1pK6oOl z@k%_Fg5r7fdr!UF_zRuLuw3avjN=+mAd(~*qsqI|puX^Pt4y{-ic@q{I+Uy;#TEKq z%<=9@U!ZuoyIOGNHMcA&H)3Xc3gVG5B?metq>pub#CD6A$bN0RU9|U0M#A(4r?Lw$ zBT&Woo81^9-@8;~79YDbS@&HnG}Rs7j^$}>%KsW880o&d(mS6yxl?uI{0$YdZB&w# zE73<;n6nY5@bO%#`K)9uV6nN-+s|n|Qxff;Z9HQeV{&hzo$>X6xj7|GX11}lGWrQ9 z;Yb~ir>4sOk09D>JK4!Lf%UNyk+_RE0^PU|2t$dEGyA2p0`3_tIKM&>1%v}PHA^lo zk1FADMSpd-r@Jr_*3GN06mPEe%BeVPATP-Dv^g?VVBl;V zLF?-4j%9~*`Gi4w8kpW)5MWTnhlu#!_vsp3UM|8pW^g9wtHL-`Rhz4T7BQ7D3vJof z@uSZSDujJ}aibb|K=wH>c*JlJphph|dWsk@tJt2mdW3B;fN!WZaMJ)|eLoRAnX7~t z&k)$SH%955aBDf;;6rQhWm;l~h8!}!iZ0Tc8VamTna_?xzn1ZK%79n{ zYXFeL?z_c~MfwSMGvbOct zQ741JD56WO%o2fY=e09iNDl>IJVfZ%VQZH|WI%?=^|!v60yczZ4jllqynP#CJTa_M z#sH*m!1enH+V%52E8q#fWYTk%5e8?b#R;T-T3gosAhenjKjW+OxG2QXjJ>uFDobC^n}3&|gW7&rx65 zhYcILL0%%G6WN@r5e@p^@w4p(A;s-r+b1u4A@4(6^RhL}OL4y0u%}*3mHX82?r000 z+0!?7MTPjo21L3f2_gCg%25P|`xAW+pMDWmy(LcM75BDFp8EvjKOMWHj?CmvP>Vzi z`OC!E1UJ^uCek>rTH9E;|JpVYtItTMr1iH>kiw8yC-s|hJ2==O%oWQb6|H|m_FX5Q ztDJ%$LyY-}WTluRAIk1aH#>p!S~W(>*z;u0k`L>}0kw4NsT-spmdQ8v8|m12cU^}f zkAFodBVe2a8C;Qtg$Q)EDmAr8ev)+d`Kf?8G zF`xg!xzb+zN02;F$Jyi7n9!I=sz9>h`RS|8_j059l|5@^-?`yzhW`jAfdzB1H&ugi zC7R3-{{4{kM%-1|9%Ey0(88$v5dOto5xVs-$0J%pVV&f@hJ^uE^SiwKd-*uh?MA>I{%LH1Bk3eldE@JCU|;*R*Sull6`{sD|^hy~>#%HM+|mPr`|AqyGQV5$gVPW@ zl(t~Nb?4z@L$=A(;xb=j^ID`ivXAb*`>R^vH3H_fcU*ZA0b7_q51#tx!<9z{;74yp zSdV?hz9drxGXpJC9_1FIOzf)uLKBwmG6&6#$bSX|h1}CV$8O4(Ams?s#*GH-q-apya&?5!_m7KWe?ZQj;LNWr`-r1b zXkZ{UTI12wz`TLPLt-a99&XkDrb-E9m(u0VSaqhl3%(h+3#ywC`=ki%3W&~A{4w;{ z(~Wg~ujHNUvwQp&ioC!3S_lOhMf@tL69h=Jzx|H`;!7Hdk14j;;^~KzZ}c@yd&&SZ z(%`=DSdqh2ZEZf0U4ZNH=~Wr_a1sXn7+qW_hTEWdCiJCNpV%p@+(05c3GUvQ(z-Mt z2BWrYO!)2!{Cd!;rGMVe=kDG+Y4?=#4!|0L8l=CYPabr~15(Y--dA79)l3wKAI7eH znrr;as-i~jZ}+Y>L+JS9RL|A1*~LW|mIa){`UVj=X?;xvp62Z5y8gB=1kw4!_TZW0b=o@fK)vehel z@E=gO0BfP)?AcZM_oAC;Kmc3FwIH)=49M*7)ZW(j@!+!v}-kpZGp z&(6$70n@|wXAA1>Z-M1Kf3twltKg7#mV|jH?MYiJ))(C^rLXgVm6#0RqrhCbkar8f z)>rpV;uSWw`quEDT7tBhYFl6sOJa~}oGf!~Up1RCWmBCv+SGlfK+fj(AV^ z8=g8hcc_OQ5YbMDB?FNGFFem$;jN{RrgM6em&%oHCzF?41qu7cH|LRuU~`ahFUvu_ zWBZYg3o*F0!j~3!r%y5X=(wVxb?#l@5b5CSlm}yQz)_JBAmXX!`|ZxM@rY&g`1D*`3nlMnNH_B-YK7j&w{vOIS;@T6^v`7wo*9fuh19}*u6 zV-Q?)7W7IfuRW!vMdr;rMYP#R+zzjinZPezz*2GBCP?KHbTk2Ba4=2k&IXJnsno(8 z>a~1{ZV(yCbc4o=?cfwd=fL18uk%dP-+HN1fr++|bn18S>{0fd0#)%~tI@u81uvG>^GqO+iSYz{YqaS@= zm1EQtRyn`DGun5l(=+f_gQF~tMpD-O7uHsod#8sI!^xw=GE zHL4->E}5^o5SU!F-_}9yk|<1!yhbrClX%2t=n2pol7pI{!5PrJ=7%~pjTrWj>r2XfX3`G9i4K!AJQJ>EYu;#>CxC_9 zRaqt$qwCpo7Zz}Zz<)K+;$fAAKQ0czInDu+U$st&IYMFZE>POB=pFg z_w~D8gV_!+pjgs!rq27g6>hH<*GYBrN8Q5OWm>+tYf@8O-R0;f<}6|x$1bc}bPLOIrRKYR zaV&z%@Nfpa#Tf}5eml)Wlqj(ey_)6Z^B8Owvn~V#f~vZq*TAQZVNg2sf57GTHE^je z(LfuEswLsfbCaUeZil)fgbE{dj5wxsqx_HVvgNsJadax>8SM zitFu69`;*b5~%|#zKQjmgp5F(>0EIy;!sZEC8HF{H!F#!3zvyQA~`Lo4(Dw z9cYzRFE(iA>d*efwI8p>2ps%XG{M^YdkAkVGtW06QTKnUGM#oqFtJ{zU0- z+qisW6pAt4*6%mB4yXmG|EVJKtUUDFvsS#y7% zO`k{pc)!h}2>TbwdH(sypsx8^YY(ROM_(U$M_6d-8;o6hVBt_q_W;OI4Lv{A%A)Lq zx0cq{zV^)C@x_?H;Yc`4QsDvNG)l%OFX@^lNNo3uBnW+dQ0X{1A^yvkWylGZnk1SZ=_mxUUDq zS|^>V5BoRSN@vmA=EhXG1Zs(O{yAW0^>`|>gVviydDSXGMG zBMKwvVa_>|3cG!~^V)skNoQ`XC8>W=%xb18mfkjoUe_+7Yp*DocGS?|>k-00n-E4t znU4|egFs>d8Qa>?t%?Qu6Ycvgu1^p&HW#lpe11|}`BaW&HKas%uCPEcW(jPZwXM_YgWCmh!rE#z^zbGFgA54lKKrqhahkGSJ{ z3_Y`n`qL*@FC-ZXfvMS-UtEME9wjwRFT}?7qZ_;aiQLY6^tE*j>e^-1ZrO3&`WoaP zu`jP3KxDLl@_SYg6|EWx10^W#18>yda~E1n|3v(rrO(n-(u98Nkc~t;u7cS`lKK&6 zEKeCjfmdpu+CI209|edCE36F`gg}<4YXk#@Z#AzesHv!AiMh%09SkEy?*Q-AVJQN9 z{)|FLUcX2ynkyy!)({KZGaT5|KFM+&`!9H5e*_4Pz~v;;<{IiuI8dcRV!+Un z*+S*k0g zr9DK&l~(o{OshgW+rP|ZrHMFnBZ$$Jn|*etI{g~TYMD6#9C zbthX<41vw!r-8+wqSEg^WD2ul=P7WVR{kd+ts67nx%A!|;(qwd{kfRz&w{RiCwKln zuMnE}?MnSLtw|s3p9QYvy;(-QPssB0FV_OcN&QQ6{W^D#J_CLOLa86%qVh;q^RP=t z@^4bEnl7Ks;LKbWkC?T5D24zxrr^2C^O06-RmNDm=OlK{75Den0IAoIDG22gx@er2 z01ET76{U@snVEqTSOPU- z-v_l^gEfzNxideg|JuG=m)yey?}UobYpM?J?NL+lkg%ojw6S!Y;YxFc?G?*LMAQ$} zGX%NeAh(87{mobrRhY`zJ)L@=hlVi>H>l!49%`gdfZavnGZv;Haf+olE(O6>qr?lq zeEej>n%zKOeE?_(zZSccuP>;}l&tRg7=*n^#^xX!k_dc3gp~S_W_mu3B$jxmW`M5* zQqy1~p-NZ9ig~ZD!c(D90y&++d8PAOheIRx8yT=s{N*zTiz)dK_8b@wjo*)d+N*5U z71Tz8x-T85SjauTIc`HPrbF^v_zI;{0e-otp;JkK47M4u`X6GbkVxB1u_XSFpkUmG zq~~%l%ktJg$<@DLu4Pg|lf*;ApTl^^;4ZL%nI6<84O~agkPF zNXgGx@cU#0yqs0b7Dj!^Nop?SoYo;*J+=GN`|n8kns~8aJMWh|mH0q$c;Vq`iz-p- z@GJ6BEBuRY5CVutzKeVCg+Yt9=^fJ(h#(Mfz1_ZQcDpEc-XfWpme2B(X0I{C;*H&a zm88+7d`UXrNDYjzFR8hGupi?h>7FD&gvFg1l$5uCe8k7+!uVZ^)(d830-oVq2Sh})1*=YpXTm(HR`UVn;_foCXjHsxz^LWyS8MZ{Fg5m z6qX$IUOxEdPG>rK_~Nh`TfgrkR~|BTroAh(XkZFRc^s-FVh{IDRNr+tzay`4drBB- zq3tq5S=gK|&Ek=O!_(Eud#$EqKfiXK!Z@{k)K%GM_mVQv_+FRo>~)sc&t3pwQKc7KjMCXM z_=~6ePw%?uPjnL7A7+JF%-6Opq;LIG(o_KUZ9qxWoUv-c->d%6;|ZIwt=oi7B+Sn1 zDPAEz`+MFS7NzWVw~EVn#dD1UOR$Z+Rt{GGars7r)H}8b(cOVsO#?Vx;7x=knH%FZ zJzz2yVHwTR4$jZa&V8${mR<>UEEvvA>piYo$9rn=H^FrwwtW3u1vq+lSCu>g8$NuO zCz9ibeEQWgo=(j(dO4D=8_eWUTy+HUcz;f<3e-OJDtBf@wSetIh9KzD@vD1ml*dl7&jmxJ?>=jTQ$VYPYQWGy%$HRMdxYkGK+cLTFwCK7S!=rL?k8Eh#i*XAG%CTATbf zuKG17B0nf=CJwdDGz>S>w;4{M>$1?=WW?^c=Eev)Jf4<){k!K6{Yj77iQmoAZ5H(Q z0sN$?xpC_1N72>+)8Dp{s}SIKvv9)4P4My?@VohD>L-w`oegAb1CpjMSC+TkEc4DJ zu75lOF66UI^XbiR4LvaU^yzPq54BK#RImP(_l_lLy2rd`0?N~nEWUsHPhNIWqV2fW z1LBnr)=9Yuz1HumLDlt@i2@}QdERn^3Bq;?6t{cKT3U(c_U3y(a`gz4GshmM`u-?R zQX$_8XR(&(Th-D7;l>@F6nuq@VX8%jYt^9;g_$yf&k#k|1_!V3Ielejd4?~|<61o#) z`|vZ%$_uYV5PA59!j#-Q0L$c>nn#G}*Cpw4$Zzq*%WB@d@w9{i{3j=EvAINVtY$n? zY}5efBp@-8Z-P@j9~u8$5D0`p441ICOcFGC1nC>?K`4GxAYLz4CP!6&pxaPDI?SVT zAF5G~-KIJmt^F)on*p~40=rTP_@ZNfWXNsfaiUt=dURuL61qIB-SO=GHNxk}k=D_y zj+5c0F}1H118LM_#fODJZ6HM>M<7F4cj3-hG1 z%JE?K$%Hm7>9!ty$y#AQ3=v;1P|H{G5@x|T2%fu+1qpU}nPB3%a$cd}xDlE!K=85BsswFbKpMoyW^@c!QDcK??FXEgsN&iYSO@9Lw%mBL7K zwVp=C#w~-r$fh3TsI6sM-jS?7n_GZ`X4+S)0L6}!cMYL`Jj9Mk)h|`T1jVtdnx`DC zVp}AMHur4JP?&6pV~yNyxCY*Q+9J{Mw=UvUS+Yhf$hD}=f{xlwUQeZd4Hx?O)%oj2 zm3~G@NsmpGw6uf6n%EMi@|CCo?9leHa7W4=fgEt68=ij zNbJd6A>Vo6KUV!Er!&Z>Xs|houAw(+xlFyW-T~$myg3nbjy2k)O6{VEN7r{dKmT|S z{V+NwTc(=7L=)EK{hIW#u*ZTZA<~PIH1S!PwsNnM3#w7MlTjxwMqocIlNjuaZO z$kITBC3dh>qkL7o^tq($^Llpm)5V=01L9}=Y$y-o+CxbEFo_nNZYF8L%M1u)BOZ8K zM@?Y}LRh=ZK*0I1r-q`c)BujcOj+N)Ib+|Z)d!_aK-E83a`SnvsqCs0QR}bflX$UI zyVdCO$VPykExbCBxyytsyoClDqB8;+a@)y@Q42`OhDntpqe+IKKxcQ9tODxqxC_Fdc4nc%GmzTdH?S0rN z+8>AMt?sR4ib%#LN3$jh#M3g2d}d3Oc`bIZv4gYk7Vkp5HQagJ!W^Kr9yJ)Qd3!~6VT$K%X|Ys&QnEJ#HI{-0 z2B58F>}ouv*i%3@K(4eZv&tI%L$o(8MZLN=0T`*FD(k#YL_q|X6^nRDc~Mbn!M^=$ z@`PC%tWW(Z`M&~$Q^Xry0^+=I?c1nfU-P^m^INgjX|#`nhnhvVGFh5Y?;-v)^_+_J z{AQ3afo*NEa=Nz0n~STs^qXb(I`5>jCL9Ds&HpOCXCbm4puv6qn`i#9qs*IvItv4uV7) zj4_Rk`o>}>BKg8MyZc3xB7m+WvVD6KhYJN6T#px#oFujPA=v@NikR-z+&7fx6l|7<}4~-$2 z<9$A7iB*^z|Msx=6r!s@l8L?;1alW*1hqaXTfCt%5>vgLixgZgN+BC%yJ^UL8-D zlvVb4sD9JnstvKKCTCogv9gMH62x{u4nBph)4XtQ1Bli2IE1H&m}=DF{VfZx z?3gA`JLErH$Ur*D1V$`#owjnFn7WbgswwfeLEO%dnCmx@kNitY)+h%j`?WJOz8gB< zcmbwdTiBVhZvtI5ZLt>k+pJxR+Jlg;zxar1%)~2ghApv@a}zQK!=xJBsxSwCLrqzW z!{@ZML2h5CxRI2!NoB(fRv#ch_ZLjNfBTV-D?%*AUR}96ABp@KV z9{rwpB^fzGBgH7qAI*mrfOVs`@ag$Qg_*hB=v;r!xUyspZuI=*Z}6JxhjimU15G)o zaK07@;A=KWJqp;lu4kBkU!6SPjemWsffZYmGt=)35!zaAgczOUG2LnOW2Rg+`mdTcFZ2Z5?8~xkG1-}@ zXiMn=Ck{It72pe}rEShub~H5TKSJma>5_SThV*ejYjOb(>3QGU`;^HQobgIq+*@&5 zYJrl2rUb6eppB}dZD0h=Be`NcD4TyB5;8-+=o5LyG6fekNZQ@pIQ`#WT5uP`^2jb* z<3sh=wFKPlrXDjYN1uqAaym?J4)%P2#9~5N#Z3&ye`Qdxg#xQ&rN%>tZ)~~F)Nl}?j=jn^LOZ^R@PNUp6`tx?jGl)MO^5|>t;_nm( z;H5$xthd4-*f_sSNKHY+?3gD**|H%{<`sNqd1*1tuB?~uXOj?7nZL1BX-;%y=qvi? zVU*3#MBztdT03{*ImHz2d}vg^aooCkDHB?N8MLe58kFkh9tcw-Z&n9&7WtL1Wl=X>% z2;`+{xSOxsMh@Y2nh(cibjc?4t1wd7EF{K`=k&`=MCbnR=E^-ELP|F^_c?o1MiH_4 zIwl{_NKdUDMm5e`Ax2pcRz;-!ium`Hb(D8kf zMq4x-=L+O^8}r(%-u;gtX-wzWPv;dp?wYvuaWDi-bLj1mlf+V+SxjdmALdgs5N}zR zD2Wn}DvMK(ed(kN>qUB+&8o-&PEo~tfwkHpW+>!qkouXOcK3}8n_j%EiEi$%sC{i9 z3$9E2K{7b|oz#lsT7v%%EH{kV#KP7VOBYsEC;4vPEKj~9=Zr$gZ!I1zBXdkvCLMO) zgW>0g%`?>4kxz-lR#{w*;%BMZv#vYzFcIZq5#_A|ZjI%{_;)wtrRK7Y_8QYH>B~+~ z_rLhvEof7H>G2P%EOdqIbp;`YMW3;3?o;r31-Y)>6%6jl{gt?H0grg&nJ8X>xg$k1 zm+4dhORfx=<0v^Mu2mD|lw@W`)T=zKeLJLUK_xAFJBUI(S}4)e+}E-_7XodTmq{-| zqcSLYZ2z#=c>BGsqAW;0l+L4k;~65PZiscx#p5bn{2lauog92(Q6zigkz=gn;yN68 z38-PjG_F{%6uQ(ZS|wDJEL=5yYvHmtASRCt>c# z8nb!dEbUL!C*)$Sz=wk~{Y{Wux`ta|^Ck2;t7j}Jrg&G~6tRf6JQdw5Q;vWQ~d z-owm=?F8X{!MGCM91U)#@<+oqEs=RNEKV?S;uR$><9Tu z&oEdD#BqmH3)K&57M>S-kD@mNy}e2KQ`ER+COJAB^`rhD%HA`si7sp(q$*92-Vu_3 z5CYOWNKfc3q4(Yqkd7iK2%#oGC<00kp@a0MNUzenB27VxpkUV>eBS@N``b^uUm<7a zoRi7S+}C}Tq+AE~n~yl%l?VUp5QGgG-;)E>g}03^C4IZii2+*U|MY@iEpE@% z(|x(UDD{`9fbZT&-C6%>OQW2D5yXtpR*=jKZ^|LVw@uv>Kl)Lr#w0dWY$~^ufdYcW zuXca__%N&5$Us@W^nNUs-*qy2w`XabkkBvk!)t%r<4oWXk7w^jvK~n%R5~{d;XmtN zu)5Q0vwyPRx)|D_{T=-lcs49=g-^Ul)KpwLu)h{# z8*LV}SPfu8T#*MzKGIi(G@0h+L9!p%`Ly+i3)(urhZrww-mJh^YR7>5bm_a`{Kf)C% zZagR^y??U`t6S18!Nj<3is1gJwDP=f0l z&pyvH?$Cem=2+KQgTKnyD6bY)EA89WkTkKwEBf@r%9vvz*Kw{qbIWNVzD%BPJ2us% zfuA1wSyG{c)NuJWp(R<-_Fl(M6H)l<-B|esq6ifk<+0Ky;zZO3PXr@X3a68}e+DrcFUMjemb|q4IFvYP-XAfw2`#J$g6MC zfCE8b_k7PseA08`P6QoO<;PD>+kt4APAX9TK`_vx$elUz;6ACVu za5y{S*m%F;rKf9`L@Q)p-T%lHE>8F8Chigef|4!oU0-gqQ((nf$Pm%-{uTgMn5UF* zdG;Ov+e%4gnt96SS=JmMAYJR;*cJ+Kf6T~5v^1^Kxy?N>rOuBVE$n!*q6^{ zuuBP`D@x*q^sUU`%fsT}XLJu}FWirjHqAV$Krq^L=e)rq{Md{E`+ zd!uiloj;Hf6zn?9dKNT%Z&t>}*(65ntt7Hn(87>lWdlExl-mO0wf{hz&i_E1Z(R?M zy`P4LrB#;+;dK5RO2)SXmbY@bVr8|PDa4&yq5zkiH(y{IC@2^ZhE^ihYv)3k-Ch=w z#$#Uo+P`!|>-b+cw9y}JIltdOtbSqk+i52MQ+Z77_&)&sl;?_(UCqI*+1vkjrkdwH zchRo99t%>p(O^pBi+4r0U+05Y?$Am1N&9=N&Fom~=f_U>w{T5xos5;tl&pS!a+Y}( zY&Mpu#oJSd)wW`@wLXaYk!9N`LqvO~b4;q4AyA2v-{LpOr3AaHBhNs;((mKYRJl$z ziAA9iSa<*=4M+M74d3VdnS3{Wap8Bg=A~7AQOfyp$R?FTje=Qq(ha-CkZ_f!iy=L? z&ZoL%FLSgUxHY~!u6|H){t#gC01~%I@Y4Q|Uwj(BJq|YaRq~Cd7lKF4+ANy`AeU^N zT^iq-*yYU#t_b?EJxZJxLy7^A8Y+d7)M}NuLn_{b+aU?jW3Neg%OwM>U^_Jk`wxU}DP20Fwhbd+qB=GNi+ z$%B%(brbN|SwC*4n&W1X>(|bzqEi1xeo08Cc!c?>0J-E%(vh|9h!7(Q1IXPZvBC*p z781|2$X|>+xd?E(omi0`A85?>;>(H-6w_L>h031pue;2s`7;Q-e`MRAjgE9@BIfi} zKO2vOH@ks6Qp@!sLS9iwTCMwA?X35pOd2<~t2Y_PbHaP8Ds)x^7fmF)^?9VPNrjV- zYQyu|?tJ|BSot={VpX+=rBqek)Zr8+im~qv+o$uK4NqtyL}GJJ1}08Yk9KCD?vIBn zs!+V|Oo$KVWQIj22$X9W%3eGuU*R#lo6-M>t3+c}FZZmO@o^&cw>SS`EVj*38Rgc~ z1HAWV;NqGtzHHg%ETgLY$g>4N&_(fK(L{KXv?im<%Y_O&sg^kf6=&&Hr;w93FO!em z$LUn2RIE1FR6~xh9oMBOsTcOGRR1hg%}J;|9hpxmst(m8`Z7-iE&;%Uo1!|xvL0_b9B7!Euq{v}MHZC6V;oAEx%<}Il^ zDIFr`$n%TO(&5;UzISO=X(dCc^2#lynkI5y9nUkeq1J=VZ$oH-wW+d6CB>9+4__Bs z6d=agq3&2J)4MqTh7^%plhuvvC{6|ubkn(OE6fX0|$0NyA zi5lk2=hRnpXN9$tM!6D4`P>}G27>>4ipBh+%2@o&{?K0C>Kx1_ZHBT?o$b(L1qsrd zfz<8w{o0qlB=x>2M;G(w)n*ka`x!|6g9(@>r@dB>2O4j#56`?_q;LU-N*dWvj|{?W z-n`SYO`=>KT50)X@6bl5@w!>vlvMU;Y2K8xrBCp+HsLI7jVmw7UG_`T{lyaCepH(M zc@3w-&Q&eFe1*s^Aa(F4X23|8olW;NO?#~PIsxF~Ahn21m{F=IQk3#d4Go3vdXS!; z1&W4Ib7U7c$l2s~_fB1_+ts?r{74Ic-;&h4f$~2ep=Pn+w6!1jI4kfovQpPO(bRm} z->L|eQQh)p0&G8J`L?kB`x~GL$CsCt*SciCw>BS#z?`eRB-xkm z4&38!{zUcA@DatycN=ZQ$ne!p55~UqAF0NS^OTou zzKu_PpDn;JDeEsd-=M91*cYl=)i`|XiI2*AjikJ-o-q*v{pOOs7w;gp!};s=-%2e2 z386IXT&z17F$;z?42}3#cD{QYNUCFFpg4XxfAK-GDp<> z{QyRVnVOI=RQq&sBMme3Ucgb;bp0E&FxRy1p32Z@NJ!NA+<|DU{<6@U=^xVYmFtvd z0`$hs`dLAo-`!-s^^HvPUU^mm$HQAq7EmnJg!CL?V6lmPgC@uB71k>ibqqZUF964% zKrJrDmwdV1v{Q%g=t&-6*(o2-L~4AkAb_cv84+dtCiqA_9l>Gf-69!z0t~B_-lI98 z9(fCK|H}eL&6cjxV!kEUCab?!MK-@S_s^8H$_e4sG}EkKcSFojOomgzQp}$;G8=Bi z@t2y@zZWtSUNT|^C??FBD0%~cVj^*$-bN{aVdPZ^tTtA({_a|Y&|;HU^`m-2KFnT% z+*TUpX2zI3t4HXa#9~z)>P*(l@6Z7GEUhRFZpu(wfz6*CY2P)>)O;^R4QV7lRG-qk zpE~Q%6Qa%HH>Sp0j-#Z7id`1csK+X6)oJ&Vog<41eE=Sb?k9?}2$wXu(ztMlgd3w| zS3Am9)k0Js7=62k?u&OVv(P?w76H|bRXdp#vYCU-$d3|X5vPZmQ=z|E%VHuAk|~2r z8K}G*v$9Rvq~^x3)k1xK3opqWBRqA@^L(~}YMbnES8;CXC9SELgcFA~Z@A3j^tO2S2*G>1sYkI(#e)_W?4RuZ7d*EJebrlO1ZSJKH$|D(?4@m_{zd zSTM2zugJFitaB66PhxZ~Uy46c-S49S{1o&Hfj`gAhr~p4|KqZMfblbZIhzNGf2JOw z+UpH^^Ko}U!-64wL7_=m+lig!KMH_cwlhXe+ z!Yj%=D-tx-w6mPVJo6>Q#IxkGd3U3NZ#vZfe0DMEU%I5dRhi=d!ZI(_XViH9RV#@2 z0-Rkjh}zBQUfjro3VVf7CA;qHnb82x|FTyypK}iic4_L(?95sh0c#rr{TcUL%h3`V ztLkhH#`0Ls?uJoLdiQ?sur$Z|ak0vhy7{dXkiQJZJ%AB$Wvc?F z+#L$vd#ew<3u|GfU=4*h^0HOzgtP2`+~E$@*L`1TS=lGt8^e+jrSWb?WVBlU=KziKY;aSrhHu`G&QL(AXu*+#*LmzP~E(>2r{~+t&8+JuU9ycl5j2H$X_Yg3TJ>jjk7UzxU35 zK*0ZxkIwdb8GrC5l=|4bArMw06XUz*vn{s1Ms&@B&_zPpaw9KhltQRUtu!_+?6uMk zra72?$86BG91t+hXf~AG%~ByWoPi$$MiHABi!Snt@@w@50yCJe?+~=WSzwMF?X7lj z<^$mb2d0hz>Oz#t&KW-{L=LM*G|N^+aDJq?Cd6*c?kWPyLmUqA_RHs&G?fkKHnS}f zN?hSXAKM9BLT=aGedZ*kVD_G_>Ea&L(n=V;hiA7~qI}0vEFh*Tf%^{rGY`z}M0r2o zB5JSE;dp?@Kcnck@gGl+0bFs){}P3J!`JWB!s@iYjzWJbxaB9#F3&Ml_nI~x1Q=H+?| z7GDe70Q>S;=OZA;rpoW8XL8_Df2 ze~DVWJOnCk#V~_R<)wa^DQnoldsu^t<@q}fs=nR~Kd7$4>0E2dcx8ZM;@eT~u+`?U zqSoW{;$FdUg9{I!2l~!x9^z<15>8h5?z(D|<1~*Y;^6I*=v%Y)9Ww2mF%?FGF%R!w z=F0tZKM(5w?q&A^G!`~>lw(TI-*LRP%Cx-)#LRuq{C{LUEZvuUkyX^;CG8dinKq<}YdVps(lzbCJ)Mn@-u%&s@yD=6cp+;1ig%p-zFcY4XU|wUdse(=4KffCwRT;2D=pDkBx^3WAM$U9U zkL}RvWi@pm-^XRN$#nd(HKx*uS8wkB1lECQ$qea%y6dCXB`_Q5w=q>e6)I8B;aN(6{MgJ@fl&t0D9qb2d~S5Z?>ey)C` zMlg%ZRLc6}e|&+~UhV3`gEq3@>q3SZ16HL}T<*3x@|ol9#Jj&vxbLA9qP9V0QlCee z#7hJO3HV;MiH&p#LmYv*cUw*foVWvr!_rbxG=xcL^HUbtz`msO0pqmW+082ADaPW^ zHs@z7e$mgL`k16$^N|mrh!$tQn(lKxRD*1q%Y;y#MP}lEMAgoCmhuj3#Uf9ieP#|c_0_Io ze!)qHa-IDe7DSXaIn&H1E|N%|Ijh^9wr)G@?) zIa_8f4I$xU=BhyL2#76!^(9{Rd^yI#4R$=t)4WXO=`m-3^0o!!;%hN;!iHYtOCf!botWxtEQcLkS zl|t&9&|fD9QoXoenOe-6=|s?=KOvEDrp6{6Y{tlaTDz?Qhf^aFX>p)(tlI52u6Bf-YJuieHr5TuQs)++PBu+`6*3fiZVY@A0Ov$j*k59rLy=e zjw7)Y)4Z0w>lT!<Lb<&7oaods|#MT9FycrwXhs;eQ*eLe#6K;3QB0w_8ToYKsvy{(EgGfJh`uTr$x zHuQNfLH>m0cM>Xus)V4>XIN;`4o(7+AOl33mCLxP7;c5*3(KoZ@fC4Wsjs?pq#B`u zRF&H|v2g~^vntSPdYscz!2x=5V{ETf*7$aL$u)$orF-1U2Pm$lv_f8bDK$W|8MD%N z2QS8F{1|tX(QtA@#^T;kDyi`;R;H}aI*RkANZ?mX=tu=WZWD>yIjG8Ga1wDd&@?kO zMWWOoW)rh~R?ew89S_`Opi#f_w~8@))n&kR)i<`m%xC*goOvrb0HcfV-o7JVr&>s z^7=L-J+Tyx$0?xbBYTpUr1w_zL zVm%jv27L*l6L0iv(+6s@&O!9sBUxwu3PVc!;@nxE)A^_RTOmWx} z`LeI0r6^I9Z=&eLKtzYwfsWTfD?A-!)*1u@c$7Do6@iw;67a1B5^{^nTJlX_ zl8*8-ZluYuCw&|{e^Gc4U7qwEna5qCc2ikwV*;#To&i;x5 zo?sEuV4snlBZ()z5Y`X+cl-{l2s*{6*5xnnaq zTR~&D{+nxA3KI4_D#-{!{wO^6eZH4D(pmnotDEvn_R%Xzt*e~e`YkZ99BcNox&+Y` z@&=?F39D?YpOpzSpOiRKLqW#`G{}@w)D*tLyzHJ^B|M=#BqA-bI4K=8_T?k#s1p4! z(;XwS1Q*DEr~(myDkT3y6{=0uUn>H*3fbA5e(_+l!Lozjsdo6Od)lRWv3$Oq+%m9G zXd!m?IrHmRVUAPGj16T1rp)W!tGQJ{#c37n+t~6&!Ka_lquZV{%kJ&P130>s!HRP0 zeE`ttrY6Hq{|}&%bqQ#EgF2JQ2RVPS&-lGhSk6nKc{yRkG^+-EOyu5$k)cDg=v!gf zvbc4GbnnYRtGpWwr_7{)^JYq~G3jZS!3%Par9(845yo0(Os9Hgph22mAa6=R$@@^2 zkr`0LMAeCiTrT=f_U84G&l+5TaB@us_Tc!Hjdf5RpQ-xJ;D;W7*MORl^p&S|4ll=2 zT1dD0bTJ2Mnm$@s;Sv zJX1EeU!c|!*+j)_^Mf}?^OfiP;T6`S?}0Uwu=9A9rm^iG@MZ=J+7)h`q6VNUZkaQk zml)2BDU}YF6*S*V==X&d?{i9+;fkOYB%No%cbM+qHCwWlqagOt03v}yu^C9s*5Hx5 zYi^EeY(;i{`YpkewoTn2A$!Dj8rvqm^4|Es#PLZY3rZW3^~FEq_g>7igH}d?xr`wh zE1sLSIhi!uz8w7@ol!=v_X9um=|mo`ZdLusQ}|2t@~Q3J7>ftqy$v7whQDcdExIl} z68!C~(tQ~xuToFaS8x0WoJ#oNu@o<;?0qX_2z5{LV}H+$kAWfRqV)5T&Tf{X4%&7? zdbwY?OQ~`wcQ{GTN1m*Mf0Uo40fR58^)z=q>R#0onMMLi)^oqb-ySwg|EGP{ccuF( zU=#_t{cfSn<~%#V{_yG2B`NyHZ|V`n%{6YVay=eX{xRS!D0jv9S_N2h_@fE z3xC62j6rSVgjm_Z7>IE7i^qYuNj#6S~GdIGF>wdcTL{9+DR;DBOW*`71H5<>`!KQ&P3%8$zmJ>kkfv5`+Wpw4$n6ojuEtu_b?SZ_q2r{+5!pnw1r2LKXYKq@A_mu0!`F#6+^@z#toE_DFT5kQBiwUXD{we1 z=-9h%-#Qm@dfff5X|^ptIs?|EyK8M2DF)jc`C*>O1a@3U+OqxyqJJ@Eaeka?Vq>ON{_B?3xhVq8hN&pbhI`wU9Z|#{ z9`jgblBro5ZUXVYQIF>?=I@Ab#81pFAg@4H;Ed5URZn`3~Yu3lkJ-~z;^hQ z@qR0dE1@J$|Hz}RIDaPNRByO{+i!l>e>_;t)YIvM2St}3Sk-rYv;Cqs z%Xv#WLHi4e{>7}rSslv}uY0Kbo{e-luEt{fi3obg#{Eg~N-X7T|N8{4Q@AQZk-FyAN1oGmy zGMIpPL`N9@c6qrs|sUJUuII!kGq~ zS3xKk&K*QkBv4=TlwN*S!a>|O+{yt{p(K6T)S z5xzPq;5!fDN_P*x-?y8I3O<&^WpuGJ@WZn8?0oZlM=5Lb5sP7ZgiW%V@)?7|?5c4o z8fyT@;d3XkjsWFcX&6(wSSEEU3|xHGtt+ z`fu5zP5^SbR?e*WC@7*mCo(}@UfXKrsYAHiO*uS7Pi+zsi3JTSOMxUsdVSt5c|7ge*P! zpzJLYPDSUi&r|>D=@K{JaWt{sdAe>$%%eZ;`?S5#nE$!q-x#>OWgYs|rylrA6_L>Atwcff9L1ujcN6&du-mOOvRO#WcB zVw zg8n%t%0&0944^|?30#)gGgPq&x=mZPr}GAC>8evO5UfSVzo2$ULgqBU&veLZEyU^_ z_Q}nRAj8>_rrR-MW6`adA{gRpF6o`;!p3P%^ zi+4;#zk@Mp=RUWYe)Dr=<5(tQhHKk+mIf6}3Vxd&T2&%7*EWqFiGLbzO669%y8;(#zrUtC`|NBB z-(6?9wvGa=BKKTy>+{Ui=X)VaF#qcjhy3G5$M>9rh|zTn~YJp8R)UdymQAPRD>u7hc^EW&34 z9zI1qsGj66=!ZXEYB?iFZ;F#^W;QBWq#Cy+hy!wt`@R_ti^$>%TY1Tf; z^#h{&D;aGPOARb6UnRC7rfK0u^kV`}?*;7TSRi5C(eZ|aPbtq;%P2{>veo;1JAAMk zi#tl4SoKfCD4Kc&;ZU^VuxqqsMh%mlFQVsMO)13d8Z4-^M@uOgMx3vg`s`yC?nt^} z7pFl`$G(Do&m0V<5BXf$mqMP})q4#%e6?6oimoYnf`lDOb!hTNntqJ&+=w*=um|r` zIg4obSE8@Y%N9IiAZ}kEJ6#wC4z7J-W(3kRdZH8f9AaBW`ovOXNHcMhshM)7h@CIB za9g--)rz@1>T818&hXQbd2g(g)PjL?c<#zBgKlZBEDk;|sJ)z!L{x)Twr&Q=STS2hSN|pQe_G04|89M+ zqz+1;3dy6k!=y+`sE|<-rHieZY#%cyAhwGX-IL8`nX;N45psUjg)o%fwfm7gv;`k6q+ma zb;0ZEcwp?sb5D*gr#1e65}h+#7nrR~@LL9iMCp~^aAIv$yx}|5Mm5P{ChsaVv_z1I z{080q_SNu{c1x*X-Nv!BbgY7cIJqVLuT}*nF8=|)xE3=cQmymbNg>l9uFir5E(GVf2dDFaR>>u59ai`Y_KJz#btcgbFA=k>mm4_EYWF zd{8iqL#W)cT-##Qj-4&8hT@M+NE!nlEV|INq=|xmV6ADADW!Pnd0S_Os={}RpVpi0 zGo%Fz-eH@ML#XYtp6jFfGwRW%6d}1*QNdypSzQl_t{(R^7Y2FZ{$60E} z&CYh_sCD@2IWhGN#sYoU_xi#(%Fb|3MaIL435waX)TDXh6y`R*>+mx~`onJRVbUt` z?C00PciPsa8f*Myck}9n5pM3mm)rlm+Pnd=LO=$<=`qxHIgp)~mcmwgBuH*${b#Li zS{O8laEZv0KnV{BL>!rAUP4b>E&nCw&2+XbEX<^X6c76+2=ESUiT)FbZVVbqs1O8v zYe9G_AO-IlVC8YQ>Eet*&CG;LQuE-uuyASEb>HRAl?BTX9T90oM3KtGS>mpi^2hvw zAR95(M%JaVzKa6|0RBwSo^;8pSo2U{a+{10qhVTRXk-1DTnehOq;D`3@J6@WyAKVn z`7>!w{d7?hUyu5Ac^wmK;U-q&_YrbG^3|zVf2NXorpxZyZfvZqBz^bzv!O*JE&dAZ z4$j^YJdW?u;3c1zQL%A&YHP=N&3dlXu|R)rLG!bWjObb*NYsKtqJJ$tXRL2^!Ja8w z`d;C8$8D%rE^Eu@aA&Ou^0Le#f4z>|(7!}$cu-sko2z`{9u98BO&z)g)J8SrS=39;uzF^ye zi-L`H9OQf+VQ)nf?;rJmDz^kANXg|{%@s%(LgSxN6=gju#&gCmT3e5`I~Co8k6KxA zW)IHvBXUR3uRi;pFGvR(Xz7?(kPf0Gr*HJW

    $XrM!NDsiVutG-G717Ci70+D5De zzR{^j0=Pv2>nrJQ%()LD0sQ4%VuQgHOJ{ymNkMtK{OaCqd%Dch zd2=m(sk3PjEoP&hmr&tctQ>976H!BS9!B78d!dj$TQb@DJD)1=4W=Ti?mJ>>7v!g= zWAY#O2!Dy%mx$&mhk)U;Q|D6IJYUHOO>9`XDANo^8KnA-PTMiRZUiW?Rj~jt>@_RN z91kpwf8@93n`x*tD<~!QBCQ5tnF@=2H@a+UG%*tLqzsZW3+upG+zN}D$tt1c* zV5`Go`4KO}oy`Lo!540gwz1{|S>tj zAfW;yP*z$6RI_=jnnEyfiK5Xxc>p%{R#=Tp5~wMyDvy%`UZ;Tm>kMF%^Y3I=JGb01 z?xnz>N(_H6NGe#1-Ymo^a9e2S1-FKs0SdN|mCcj^LC!&%7=cl{=#}esd0p{!@Q0-z zzHV+|D?ACHs#^Tlk;_dPmg~1+9<_2HZF@S2dK-v`GkqyOUjJe4j{)#l{VjHq?5o_b zjpbQfKBHoq4|GFHQ&5BaN+$vbx)Tk$)dI8l#*7EloTBEwTAC^U7E+0Eb^b6j$)uke z!n-!aJwe&Lvkw_G_wqC3GNDr=MhB5*vvcl6mo!#7^ zuQuII?NA-wMZMTMuaBMwLc9nsk2EjkGtVqmXsf18v-okVW(X_;NfF?Iam+w`ys@v7 zD0dT<&NsFCozQhGtt4)$-eS_t*o_(O+))G7#EDTHH4vxs_oQ|{DcyRe#AS+{WcoC1 zal7{wgJg|=CJkh-Ls*zu&f~LPz`8ef!F0AX!6&eJAvYs!e5!GbSW8CY3etTy=JKb4 zqSrE@Lct--w5J_meo4%>+P55!eAANyr)z(xK*29xiRfc46-pSG2^a|MEWNtdMnh?q z<{-WJdkWu=GSE%Wv_v6MQV6)a&H_Ivxxg6-esb(Dk+j9{Ap*c*ASa}57QO)(LCY9( zwHuy9heR&X0sb9=e5E~167!>6ug6XJHqcBST5eg67XukDmRB#w-W#f_TGvOfIL;cu zmUrYCN>n-4n|hMCer#sco%1uJP*S}?-I9a!I?VG4JVD=@pop4eIKX;4XIwHxn)GLw zENCd7RcuOeuLr5aBPL9?j*HkTf5fT+i)!Y}e&B9-22EQu z`_S^XyML~N9)aa!tFfI&YctArf{XVm6n* zTv5sZp)+h!BZWhP4I}~L^4QpxdRMMZW~y4^~9o{)4})tIz{qbBUs#WG6JXt|5|0 znVFYG@VF+5pT3k7_&~0v`dc(4WVOI?pgb@?ZYOnK0*`f!@Q9Nu zO=B+(7Q$W)3%!GnMZy4+<$vd|(UFMFRawsf(*It_wR&|?=dLk}@bClhrb0>f_5*V* z6A?qfKoQ|rRy1Luwx`G@62_aY?yV(Ib) zjI4POnsl&Fa6q~$b*tSqc4MOeidQAzePWjhSmKC#_8AUtwQHc5B(9XMmzd?XNxT-%} zgn-D!wn;U0j-|-|+!hw7*ESk{hY_?Ti z_j@O%6osfF=WHKRsKn{U+{>`+riAz|sX{Q~Yy*~AUvz%`SOa)asU?qT+=^Mh#l|JW>kZYi%jB=1#N6R~-Mg2ue*})}^Flxu!w3cN7%saQ z0O|5_X02pRD!TMP9u6BB(aR0@$yiFY9{KSn%h}I`KdU|C`M1wDLF_`3)?o?0*;m%* z3_v3_3BOy-xrUU6-_!l)k}djlKePTn6PB2$a|g>W=G`K*Ds3|DV*wQg;etQbH*EeA z{W8!4Vg;W%eAoy#>{~2f;@0`HUr$PBi$7T3zfnW@@e6GCQNpL6>3-vlt+q(Tk5vBk zWH)Sq&slsqsi!iIoVWhO1^(WtQP$Zu{d3x6Ve{z^SMK{n6+d2;{pjm=u)X`Z;}iL< zMQ)*mKMX(K{Ev4P+ogAvJ#qPuSLF)rvFE!U77sVii~5LXBTo?Lfx_pL0T#qYAFtZf z18IS@fOpE=hr1#-YnRRg>F>GNExa_nC$&1+S831lA-zWE$1i^1#dSNf-TFp3{?|J} z6G(}5Zt!ph?D^+`_}Y;U+mV*hh6P0rnhNWx);EijrV+d8;!&k8SXnO$M>^Y?ZZj>= z<21`M({!%HB@4JIDmy+A!X0UWh_WZxxjrR@qzi+#^2Q}cg*4gyhKp8NvoRU~4JvRW zR}V#;NuTJ!G}m9ZxMP`;NV2o+4G;lqMQnfi`M6h^n7|?cdETgzH1FqmQP*Tikdm(j zOTVSPysFbSKH7f7k|7aZ@g=9#)9h1$!0O?hWxrgRqokBd*Qy=(z4QG-{-b<+2sYZ< z3X+Wp)ZZ=K%_^a3-JbTN9vLex&v?GL@g%5%kbw$vA9t<8K8n495vA6y+NyuK!-tAS zX$J2Z@uJSz<%FkxiLJUVjN@!~lq`n%PfVpv2efDq?uBs=v$}jHH0eAf$3aAWijicU zD_e5iJtqM_g8E#jJvvT8$8kL*c4lG>uO!$!s8omSD)yXjHn+>WQa$0cFKoz~Gkbn| z^>b!m6o1Cq9mRAJB$xA+*DA%zucY>&71?~ch+P5>1h7CS<&CfHidRY}(x-V~f)o3^ z-^9vPZ{9A2F}~s*%#PiOCLBamlhhD%e3`;rOd1IsF0(@-xM(bdfb=zJ>45$x{Q^SV zvHlKwxs~^`bzU0*#LYB`_fye9{uht~&Lds7swdRRcIFY~s&7eMu-_>bloBi@yF+qsYPm z?51lNCT>dt>wz^TC?TiGM#mo5)i1Q4ne@lGR~wF6xn02_Ds)4esuS*2xmy0D{Unrr zp+LcX<3yLFIq=!I>%{eXOMCw&De})(?kJI3t3W<;JRlINAcsVS5 zrW}MUYS}moG6edePl5<8JT5GP(3^QCr=5EM zsnU_n_Dx|j5Z{@G7^fDqas(Oz_pK_Td{OLK&pj>X6yU30UQ?Orw3a-5OdZ$Xm<$Es ziAmP{Ur}57%qi%tNkg7Akx3{bE&xXz53EJQPI1Ab_lc|u756%L@(U{;id|82GfUo# zx|Uu*u>cp;GEjVx_6PB*&m*vV@N?H=W!E!PEe^v4!7}HQNGnhtuMbg>Ulq4S)8uub zvwOFWI;+coR)n+};!7?$VurN|k0vRp69APgH=SMn5GD~nvB23CSC=Ay>%oVPiFGjg zwP=XxPoOJ;dfCDMK-8tONU%A|4NP_gE&|>KOR_C2%BN&YK973HDt^kSi{JHsf+pRZ z%5y%vrpS^kgS(m1-s}RScz1|Z=VPemf0z4^mn|ST#!D&Qx7sI@ze^b8IcI;c> z6GZY!roW`)7XkYH{+X1-f@qbYsCgdOea#tun*qw$+J zS)zfaOzf(C-mLvf<-K7FyOs5`u5sZ};pHH9`GFuCTI-68ffEA*$R}0@PJh&4ugG2g z2-k{%WJcu+Eq&lEv;7Yx_jh)i3B#|A)YCaXi0`NVSbSXlBIw8Os}4Y{ppQaZ-?!4s zRKfpEnSK2OcuM@2GMfpc%$^qgmn7JZ`%4r7{n5?T+rsMbKq7psGyF^RB#Z7vRUj;f zD0QE*U-;q1YRs19Tl-9-i;#V>dxequH$3b!udXbWz5M_1hr0n;{RnrV@tmF%YTL8Z z`1*VDZVWdubD7}cW^nz7_A7&yp;kHvHJV>XetGUBAd6VvkHqIgV7-xX-Szd__&*!S z;r`-y4moi7n7mEX?~3f*)QGRpXDw$k2NmfFE>Lzy>=Pk=`>_=qGJ9HO&s=t|8W{G| zdgU(>`)=33scq^s4+&&3o4^nq{LeoV((7VXAHlNhb1N8YJi{!>xMIaEjVe5r!BlbR0MgrW{;R+mFewaiD@IW zTz6w&+thV~UJTX^Y4Gm(6B(=UwDU3r1yt*hWsO7T;Eup7+=>kEO{D%(i-7}^K+Bpy zTBqPxtDD(VN(n(@hXf@pK7YTMK|Ns|8kE@BXYCu$O$upmB{MMBh_<%(;yu!;oGQ?o zVMSN1ct^|H*Jz)$x2!II4MZza|`1~-{EA~-l zRNabQZZqSl*NzAoN~~*=Lsp41;|VBn5YXg?Rz0{emEhOh6Gre2);KRg;TzI)?m z$YaW(^*~M)4&@u$+uNV+pZ9$C`FXtewb$qJpD#F$9{XmXM6{$htFy<%lg3ma01|&k zX5KO?{iV|ex?hc3%T`R6xki^IP1|;MR^&LP)}=lx;An|_=+1GFR@K`$?)g41wt?kd z2n?tHifwyfVJeRmG|H69!4PeUwc#NBKEJ*;)0cwS2$2qfN#rMOSKiRK z{!-V#b~{cTd>+ojgouJsVdZ)k^n+?N2=o9mgs$R%mOu%yRX+^TY5og~*4~}D#YODQ zbv0?JyWD^soC^qCk6d7yZ&Hdbcq#7$y>g=e7-XE#GI)r%E%+7yb1U%)?6m6g<(P#P z$|F%HZM|q6qSaqNHa~oTnikqHZQaC`LvpvBCrqVG^5m%B`M$lC6(CR@>IEevfFhnr zGmAAnk5wDpPGZnunH`asT<+Ta=?6pXin7RMm>I4o(5VzzO!J0RNzx;V+ZpQH{5c( z)U8VYB?_z(k`o+sH8N1j4A2l40zhLwXl+UvnP{Lsdh{X602Vw0*pUJD)QX4%`QoIk z3!D5W3Y-bXA>4jSXGS(!W;#Y(m%(Y)Rwbz3|BJD=3X7xby0tsOf&>fhPH=a32~IaI z!QDMrfZ*;HoFEOs-Q5We!3pjj9P(G5=Y9A8eb==Q_5mDKS9PydJ-gPu#vFs*NzB7! zSmrMqrOnTWmH{pWdM%tT67R@cs#-2xg;F7c^>5v{6l&+E=F~*v)fE!uyez^s88xc{ zj|ZD{w!J$MWTsBv=cPbBEh35<@-MrQIKmA01HR;9j}w!=HeR_eC=|+3kx`kEE+@5} z)c`{n%~EUhG0n8u7}GyZ01lok@O5$Q8mD5sD?4mkSLMW(L$H)Fvg;8))fK)PnWr;$XbNZ$Vh7-3?14h z(92dt%1v{+&CF`5&Q5j3sRBBW?0Z3Z0%_zZr|+vPF=7(zzhyYFbtwnq=|(xq#aEkc zy)kY(BN}aBfC*Z$hVs(o$~Ubcg<9&bE*nc};|Nc_`iT-7>BzD`jl$_4Ai)H#9*0dF z@-Pa4>JWg}8ohA_KF%qZ zMiO8R1|jTL8;5BXum!dq2bW)Ie~)huZa&0EI#ZpTgQ$(PA5pyMRI^eDoa$BtQ|jJB1>bJpQl=2jWzsHUXn&uW|0 z9byK-Q(;qHE>*i{A76P06?5YnD|X@zvd9cmz!6?ws)H3;pTB1PvUVi5ZKb1vIv*QV z@FyP3RK!#EpgaB0-iyd!Xv_$dOB#||2K@}2C%D#`SR)7|VW->gErbW1_W$$ijE28K}jLs|M@_`Rkg zt$E%GEY8}oIH~kYX%bO`1zQKBO3AXDX8_O*a#6I1o?8&naK~s<8_~Chpb(YOHPEX! zzDS|2IQDzfWq4o=my z#1*A2XM)aVgDiKT$1wPBqWh&3DAiWPRVd`$Z&y6Q80-QOXIh+ z@$A1PS5eOTF0LL&s%`Y$BwyzF*p1fr+0StmpC0SC{BPe3Os zbzA*AhJlJ+C!iCTv0Q!wc*+&&Kr$?A(JYGyI?%kxCJLJ6mne%Vd~iKNC{X?bYPW1@ zLtv>?al0cHa;_``0?Yn@5`TN*KYxl{Id%1z=WPFLSze^>AAbQRsmn5W8#|x4a^Wgm z)oy?qIcG=0a!-<-*3(^7Fac zv;K6u9+*_V-LV&|-p$*Js(C!BdJul!v**qCOf2IMacn(%Qzb%)2GL^2E!Wm)W(x%> z-puU;L5r>-kLv2GCn6VyZz#R}MoaI0^Vf`JTV~^07WF(IG)0Hs@wf3Il(>d!{$*10 zaU%GAT0I|)A`2tziR4o7v`%rlUMtpL>QkRAt!gvSEt<+V9tX#$cT&}~!DLW>9V7}- z8sRJ)el;NRLCGQEzCA3Zwa$iJD|yf274Wd;=-lv>Fm~<%bsw|5UxXC$ z%`{-vGx24RZ4D@neWq5XY5W`Lh5;>Rm7LgC?CFEI-|6%M;=`5^Nm6qBh@PM|AxQmb zX5Ux3qQOK)o26qI0{7LYIFvl6qCO_05T2z#*tMKj-ebnF*dbde6BN(M?HOx<>khiF zb0HxuoEA+iNkh8T^;@Ujiz*w@pOAgq)$6|nnk{9CohDi)xc|IFJC7u#KFG0`{#L}% zo7E~`Q>Gn3GOF7gKu~hr+pnHR>2WDd#VGBt3$sNckyWqmTSBIIaBgs~!z!j#`4iR0 zV!N^b;u`1iQ^_~R%ld4fsazM-etfcdvg|0gSkrQO&zx|xO4SW59T`gIp%`X36HKU+ z$rx~XUrhEo@kdrwe5uvgE8ZH7FVP^t4B^zPvF5LyNxz=0Z z$~KAu#V00gV-i|n%)e#c_^Eivg-AzDu8fbdVXD}3bXiyTs9AZt~9Xl~>3ELzm$NT&L?Ck?g) zCA0~g(uW7PbrKbzpyqQP81 zNwX|4l2xQVf*%9p*K6?Z##g-w|4xvyVp7a}3PD&-9GLiol7~vw3P)_W=JH-R{^o>g z5j>OZS~?apmH-T74aNvJGuQ=& z@AGHULzTz&m$=^#F_oxdhs5M_gKEC~KoY{gy{A+i?GlxUnxAG{&ZBY52P1h0qI|5u1JV@z76OLDz{mPKJ z9o4@bPy*7Y#r4CwHp?)e;MH<|XEdqzrhgC8kAW6>-B^=(5&zXJj^m5zg%8$xTW6SN z63= zzNzT`+*-|L!BlZl(f`zPE%$N-q3aC^M^TYcOpvn$-fT2QWP&Ydo9{5+Jl&`?P|i#` zZucmIJp?~7mSJlhDwj}To`Faaza1RZ$Kn3*-9N0OefF|$_^kpOV|BWKQjw$?O7njY z&XrJv={aL$qzXAb-XL>CzHBC{SAJ9I0#KQhcmu0A<8Rt#az%b~$3xo~i~66afVnT( zJO*dtduEd#@uX?J4k=x1=oH;RV>p_keH@3Bgz`@1hwwW}Yz5&^FNrxXY=b}^hgK-n zy{bx=W5JIKMUt)>_&9w_cT4|h89C3}l&-|Pf3>!@$MM3IGuzojtM||mjYKoDPnV6e zyMs&R)URiN zf5S&uW36WUn@-1_(r;GA5k&|oT30977Sf{g#!lQ9c7C7ov*~G9 z&JC?f7ai4_bP7IQ{7y1si3DRA)x?i&cCNOmN=YCrO)1E@Fmt#Mk;L}^?lb+&(D78v z4mUyya2K}CnH6|Mqo@>Oe2o`pW6IV)R`zf&Olm`)6{AX&{62vYuZPfIf&JY%Z9VI^ zgrJ*KuOf2anJP?qmFcOTT{!?!aBWPSTuSn=YEB2tL*!kpsA5LGX`5;$pV~{%ogN=e z?mn)n=R5wU>F*bpojx66GXBmOGZr|nZYCH4j8^-c`TNr_lVGE$6e+N=E<RJJl#br+LgA_PcFwwCJ*bKcVpbUdXYGoNQ%Q- zXd1(XYzB@7K_DOy*Pyzjw$8m2Cm?44q;peKVP+p?E2FJC zPQvv=R6#(uF$4qz)pexc(64aK!*$BvT;6_mZ6`=xVgG8vXW(D%t74$#(t~~h4Nmf+pHnH?)z#HxIAG5l$4yOW#TWOj^$gvBFugVO zj>LVSf8F}cmcrI;BFC6jjHjzKG{H6ev zA1oJ}dh@VJ)=qa8Vk@)F@Mqpkc(Qn3Wr4bcTAdr-5~K#Oq!c)Y^A%;Z2Ing*z+c=I z?4n&!GXt5ELOCD$o>0TjlSHJ)SQ-@;)2BOXjnVh(o^z~S0#V8u&hyGO5oGIak;Ep+wFHMDZp&C?l8FeUqNhXT&IWfN5vd@4&a40LQ=aXBcCn#9#{b)Y^GBlx17)<>f=mixcOXVlQl3l?({1{hs!lpwvu0f`A^{Vufk(&viAMuGB5m z9Y;{}zZaLJvV04if~+q)*j}+x5|sD;VCZlh|nMOBY|k+FrdRU>1Fmc!G;z2`{(DC zm3>Rrt*em`R7r9(`)D^jxk@D&uMI~lJGG#!YAB;F^IAei9qZD2`{KQo77d$Zqq)fn zxhiIHU>7NOcG9! z!kLUoYm~;OCLpRyeKsdaWLgU;lf&%(8V9(}$i1`26(8Fvhk}q66SBiqPga8fyQ!}tbcncC}*e&qg<2vZ`zzn$5LYM{1 zZdwU@Z!de~KZTc?^|zIn24o2Q+>jO}jP+2Wlr`?4pHZflAh8%GsNKG4HGV_%dq3G= z6>8b9rYK(#R|hvuNhm{=P!+EqWR{MCcd6%Xut{7O!my|?BU>$#ezvUPlf7~Qi~8wW zzS4e<9_dR)5~pJw<0#28@G9ZWMjNtiR-w_b5xT+%Y-ot~-T+648b7!%)wy}|UA2$> zd~?y zZjPQwd|Vz${9NG8Ia}+{C`F?{2zk;iQ}ZCl;A@D4;LK)L)HlyVV}-AQcfrSo$n`Z` z$!uyQnE2!wJf-mz5uyfOkQ8qFX)VmJy`}>Cs%e`c?6#RR+!%GCopy4=pLe^o(B1ak zNS2{oa(%PVVVGV@qk!l&X>E+)OH>K?Q=Gk*56#w=hj|&+0R)$A(=zG=<~L(dGDII3 zVZKJ$*>3T-L~6Z^>T_dpy-YhL$vV{TuO+K&5#5YcC|_(}?&$m~4h$;j(Y_h9wD8l> zttS`CZfFt3E~fApxS!q$EoU^(61o87s3`moNPXb2cXT30GnDK%YTV&gsc;s5_x{Wu z(3uXyA5d+HD`Q8igu_1nd9*|MFkmBgu(c47x?ufK11Nuv_@BtNk1%a|I1Oq^+K@>r zHGlhm@n4>v+XITfe`WXFM)i@(V>V?0z@wrf%et+Oocx#bG*z!FE0M1qDkSFVA4KLg z%ep5YCKuj`OgFS~Ol7ye1%!SJ{VJcj4$pl7srgOp<;On%Mg!7)|8lxNAnrdPAFNJI z*Gf%ngy~8hqC_`u)yz@>=TdFTQMd^N{Le6YHaphO#fA);C;az1omLAYbB{_L8kI|~ zq15BW$qpBVwQY6}>9UhJAZHNj20|R_|6CD{MH5xa<|W9kV7s~rrfbW=l1SQ2Gx(ON zHznA$EfzX9RXEE}kRu67j{YOW7}h}46r$p&Gl$@i13PvmUTQywy5%}4`VP{*GfC-j zFv`&peYS#{M^oAF|NP#oGlnK4MxKH}V^rYW)WAYW^_le|l~HFz3p3IowfrVaMU-Vo z28&JlH3dcNeJPR6csN8wZXx#%=w4qPqg9#Tb{3X#Hw&X0-qPQEhhe=i&um^#Pzq$CVd3=K{|rbpTv7OU>?NJdHPnX!xi06j zr%DZ-r6Sq&k3r0r9IaZ0=(^a=RlD}o14a1>bW8ZuNX<97r4ig8z1dlnW&@Uvm834^ zwGZ>N)SddPWU}*3wM{Qfc!PFBn)lRwTRyq|hpD7- zmU<^FPNj&>R&5LoV*j4K1~~iF-?KA>8_1DxY!;Y96#G-2o>v9Z*5q}3_mLw_G4=b= z!g&+gGfVrDtGS5sHaC98$O*dHi$XJ#LfsYcK&qQ8R+_pt|(Z^g}xcqz5 zh~6op4A%>KTxb_GAB$x4rcwy}R(Nz^q89!wzi>T>I{t!)yNUlFyLkVLTDJE8x(Gb*9Pukv!iZF= z9`#^%)Y7z3OC7Pfo&817tN$Wsw>zYqBh-Y$KOn?sr)koN&Ef}*g%Ey@1)~K(4^nlY zR6+aFS$24CD_kQqfiDI`s&!w#RNqA}iU7T$Yhk}10AE3sKOp^K__3GW&QIF3P%Lpv zaJ}!hWsrroVl$KX82!mr#Hms#^pxvh@_FF@9P*9bF-d;s^Vo}q%aEKfh*@jVTU-F1 z=O5MNDb&pB#+q);3R6zW`E6gkvqCt7TT^>De^IhItNP3^N`jyT@4(j3FHKgC9dBox zv!k-V+7W=%NM+!XqOuQssr<^^q5lbM-4~klt5*g;pDkAyy~8)egCjNULQ5DKFbb|^d<2{n#ZNELFQNzHyY#w7lTQ}FrDySXQ^Q!1 zjLH{V^mGg>UD!eo*K~l)6!M{pgp;+Km}^?WkvhH53TLOL3Iq~(IN1E9T^KbETbH+! zER6(70jHRg2Wgd|_a^907Ck&+d72s=~pPC0R5G4LIPdms<4q%_^qvv zOD)?YaBJNT#-Fz77}aUdUHY7(!>*=Z_%EAI$&Mz?AD=ah6J$$_gFi+k5->_uk8Gzn z`;m{T$s4?}rpwkg)~N0-ltUM0ic=5cHA!CD1HT*8B*HouM44mdA63q*?QAcPAWlMV z4_xbeERL%~pPdn$kqpgV9;wMqeCG%d7`D;>VuP-89H4Q=teE+XD*HNt**^F64YH5J zePcn3T0%>~FLXPIs=iE?l??^cIgPy+%ROxU?9ZSfcQZ}n!J*fsRQLQn>XNn6b`(sC zZcLm_JfGM?tg!eOr|%0GKKK4UXb~!FGIqc#i^)%U#cyyYAVD6Zz_tpGbQK=ca;j$y zaoOxgnE>K3Rj?r#;w35|7(F{due`GuZ4Ew*q*XwrQC9G}TxbDb72QK7| zQxGBw)J{T0L{nkTe-s_1z%vUQN)%E~kVHXBYY6;QMl-$j21gSP^Cn)O)2^j$W%i~X%x$otu$g#>Yq_zY=t;a~iG48iYt2m6QBwO?hO+{>W% zRS#y}fJf^?o)%x)0cO;8nK^W%2y~=Y?XI3~;+bf~S5kekU=WNN^M6MTZkFB4ijR=~ z%#@7D`3eIAf*}e~;i8pQQw~qM|R92 z5|}IfoUo+1Za(y$8r6>5BzZMGGi5xzOtG876}{K-6mlsTO)0>glFWda6+K0dGK!JR zGLAgB=p&03kpc^iXHrNXCV$3x9|2_yi_b+Qpny_HxoBxe4 z>!rvnD=6?_<+jzg-7n|=mXV#l9kw|3PMCdwu-yZ_5U6dK3D8g)h^BdSAOaSQA)RkJ50AQ*4~u!x2<^;UrbqTBJB@_=kY&?1F~DU#Ltq znf-PFb8|TZN(*bRMjn&V0aY>;ygUsC*AB+6mv+&lIrOf9Bdi&~@Jq(eCQU3MD4deP zYy!fbnqeXM^DY;}SW567P=T^m_hS~SLIfFVTC>@Y%_b*1_Nu~0*aM@!gMj$@ z0}1;eVdd^fS~h;Z1CDH=T1D|Dz1#To`yuNGrx)8U+uvmGEXRe93q3C)ae#$!ETCYq zkH4|=8J`v&ul&0PufO7vedO*iKOi+<7+a-0Ub@dL^vyKnaE-0C`M(~TeZ$46J|!_^ zSg)#6cYSbSLBDcIbp30}J>2>eo^A?jEgZWd#Z`)0Wb{n|&ReQ>b!vfrLT9I53Xgk+xPcBXczuwff=@q)y z=1E- zVShT0R4o3*)hAp6r5?ie7uUciWMf(UbWIs$-wu?dV_x*+0dNiJ|H3tlw!O7Y;q2vU z+kv9p`txJHgRiql0rJ8GQBWTgf{0qt{gO^JmdqGs50P>;P3xP0za4u9Mmm7rz;dp3 znF~_SWrJ_D$U>l0%FIWz@iFEIfQHzZ>|hm@ISsXu1U6UC;y@gr6VZ)#DCB^GGB6)$ z$+{W(Njw73I`IPO6$Y0RNPqLP2GRwpx7mh6fp)|_9e}<34OX(Tb_PCy6)X#}C?YFL zz*K^M&E)A5F#Quyr~Y>O5yC&9X%5W{Q%BZM$r1WB&}BvTnn8vr;ttEdP-^x_2Fope zSXu3oF<-+;nVum`bf;;4xLGEJC1O@qBR?dhq9z6Gk|C(xi%bbGP1MX!SQwVTKvq=w ztvoxw1He*Q5kGK^{+wC(o`o zHLZXc>9T8|aAT(~#g@(KL>3LD-z&w~Z4dFB|CoTqM6zMm7|(bzTid}gIXO44HWuR| z6XI!P`f)^TFeyHBkX?qp21(~Ze({iDA6l{@Yei3)^?d0G=8VF{E6}awBML-tJCq|U zMPHks@49c18m*@@#NadcrmXu9I4XKynio;QM+S8dA8kFq%x~#m0tsxTcf9)CQILM3 zDj}h%EyvDdM+NjQYZGg0D>lpFx4BR`mkreUlFR6;s!n6MajH;px?w1jtz=W9F$DL# zPo%Y=jEfQ|2D_-wy%G76OlGW4j>9)<;;N**jLo-M{P%n23mJUNi2)h7{;e?CD$rb< ztRWfM@Va%G!$*whc}%@myV~C;oos^hvu-0-#sMkgF~oic^7m_NF{d*#(~Yg?tUkvU zuw&t(OkW<3+W{)yFn0(eGgKKF?NgbA%&RGNu#dt?vL?B@nK^JvcC1YGv$xquO=J0z ztEvMcD6k467KBadutpi_RFQ^P0>w)}z|<+&EJXxJq0%t#<|BDD?7YBiKpeVVdVbk; zmNXvB7Mu|7OpQFT+1Y&5x%cdtPaRlx^Y&|#YpB4jVYdI2AD>SZBbC@mV!pL?t1i6a zj@$gAOgp%Jc{w-hjE|}H6NKP@>BTvsFNTnYkQEDThemoY=V%@gobJl_b*Ku@$FkWF z#zrg=|0)!fIf6afF?m__DfeHl43;%|+Jb5>zqKDSS&hH+zq)lXrQBH$YrbVG^?;C7 zsqXZ-%;sMNEt)^edjSvu`e6etW&Q5OaC^2GYuhWkGaw862UNH3u?egV#TG_rmlk^7 zh)m64h0ruLTKS2M}&5lsX`Fv}rDTE1!}N5lx0rN8bA6Ti*=JQv3l8mwZnT zjG+Zp4H>KeIbhagi{-9mX9*tKfnS|;(fDsiQ{p2!IN98_y?eFqNyC=b^yQTqu?efj zvT{?2HqvW`mcTK4h9`*~po7bDlSIx6Mf({=Qyv zNJ3arbxcokHm_7JfRq<{wA*V))9GMX)#)J5*NnsDz*LWvR+4+>u`_;giBA|uJ1&Bg zbT+)$!kCL2jc1q+ChqX#@X;|(kzF-08?+J-z3O1{JNmi5{I-tH{;AU0VY{(ApTs(5 zC3I~ta_u>@(YHCvo`#Ms0WzEp&USEg>U1ID5JcDW5~w4+yx1C3f&6s95T0~ML?hF| z%xFs!z06_=@D!csQXc%~;5i~+`r(ODR0HwCuiXpYt#f#kSX$1U!T~#NYbpI5gNK)Q z`(UEB(nloI_ezvZJzjV1^CbGV_%)#%d%yNbLtYdScSNCBkqo4va;ltO^#T1%6>56m12wRv4kY%YI6CY{T3Z$u^~ zf4I7|)^r~gF5O+85!}N9UtG9<{{|B_Fz5G1`c|?Km1brm zF~<;uS~KY*Gp|OI>)qJ}tK;~Vv88l>qWa@6^d-PN-ArW|+dcY99Ws}{La4^|1kTI$ zoaM0@fn}H)XXBo;zpN+O_D#d$V*JbmXo9HKGi_wpTpL~WJB!pzJ=mVANCU!$_q=ub ztb*AF8JBP{(4#6J^naiw-g6=C5rwvB`9%8%`>0<$30}|DzkQh$`E_dL^*P=pqhJ#( z?iAZ}>vC5Q{;e(~?X9L=5LMa16Tuf;{gRkZGh)EX7BO|>mOF4wgfQ!SsaJoW7BR|tTShmY5 zO`7M2O>2?LvSeFzJA!rC*3TGETS}uSAHZtnV|RJ(WHx?9+;^_4zcJ*?fB8$18Ie%l z_#eYRhvOpPP`Mb=Hb>xp6DfT@Ge9G$uAnb?SlY7oFSOjmwWK6bW4h0++D7vtYPKx4 z_9#|GuQ41D!D~F4p5R0zS*jgn){pZt=-J#GYHK}4M)O$`4H`AuSr5MGOzngwppsCm zEO<31J_BWT@r)ptND<@8o3>5_=(l<3pl~t)tkx+TpWJ$7V)mPWmU+BJkIN%#wBaa? z8=jqtHSPS`7c~X1Eq`267WYQ;#=})RFmYJwFP8N`pjQ7KGB4K+IfYFb(aqJmZBfSJ z)=@hpY2iom9uZ;b6AzuOKcJL&h`mn(&L5B{oCF~Q%r0rr2b(1x$}|+d0Q*g@OXcT` z)zpWzJ>T&DfSpi4Tw(AiJ;wLBHx1{M{~q^c++M*yk+O2pzqZ7{t_SAE8Qhq8LF`xb z$WMIJ$+8EdNJZ_1PL)z>M56`$_0_=B$%@zE z<+ET0S1ra*z;j?T=-jHGnWoPW??Py6NE&dVoMF4rxqVUOtZlmU0=(cIn0!M2fPPoT z{Q)%^k^?p=1-Q3AtC`j}T~0hK@6EJI{Zq4kRBC_yaMts)KQyt zUP5;3?tHd)VR z6PDO+J!?EmNG}?gCl7Xco7Tkc&hFtI^LYdVA+A5YBO_#qqXQ7fV?`)QKHhsB&|wOu zW~>Y+SFTpjn0!S)Wr<$(35hn{9b=S<@QNiTov9c!WBu_s?034r1gnrO7A@$Z8#unR z^8+Y9(O^1H=$n2GwZ4%E<;dhgVa+Pgt04zFu~qx7x+pY1>el9*n}d6S`#6^5@Mo4F zAhvaSbtk^rbpC|z4=A*&fxuJW8&3P@hIq^bhr`X=HyVFH*!}l^K!Y3eHYg4>+;u)_ ztIb;Vor~WB;q;hHDCv-JR>DVhwRsbfS8g%~&Z_xWOKdKy#DEQjn)Ty3rs=d!Q-Srb zZ7;~qcRjc0(glth#g|Uq=T!;o2$?GmPVo(e+YC}W3jPwm!~o>K7dM#F86nnqbSILk zo{rq&Y$KW1FxNJ{eS5p~P^w)MW5J#_W|J1LbrYC_*|)TDg|_z)psz#hznTYZhKwDjtN z(C(U2(OE}o6AKv^gZcJV^NYdR_OtK9bMni$Uk_krSfSWy(`ncTG2Dky8L6ohaGz;y z?fgxmue`;cU&eES&g`G6+LT;5zL=QterMQWJo;>8e)rQ*H7UmA6;g;{Xw$mn%0`~U z(-KvEXwT~2y{lGpV{)w5lyQH=YiFA6tGkER`};R{Xr3>Ms2gpe!WQ=verenDL_ePx zy(cdBFX_9`Wd_hFD*<7Pzxe*;!hI(XK>B8KHx2U{~18?_n$#!xu12563mMDw zE%&aVfCqPW*au12SJjK|Jt{TK2yn&rwb#`)2A`m;=QDA_a{TS*Q?s@Q;hH97 z4OmBwmvcl>W(%c0nVpzR7p94C5DN(To8juur$0j+$9iiR_kqR3$&2W(5>n+HVV@v2 z>Hq7I$MEqET*{0a`!*3x1;=Tk&HHp76Gbf>i1?h0S?n%NVfm|z&1ICgNI^6#dt z+zrX^`0n$2ivT7_VrG~2HmVdDqj4FwqQwWTkz0p3Ap2%H*grA1@-g3TEPF3`CPxRGU z?Nt1AyH%eSk@acE1vJ}TnvdwAZ!F!{Q}DgJah&V8f@R@=pr=MODrt$RMNL`}7@6)a=n z?c5^Wr}#*Mj07WXgV9dr@4BO}J|&c@m*Lv~a&$wk<(mj(+zX-~z3RQU#Yg+#om=-x zaY6BWywM-f1fDtX2aFcF6!wt|H^+CKCqyZy8s<+ROFg-Oq(Eo#<#evF2J33(circE zi*AnHD|ajU16vsH$q0A%`MbSkmGAIbmeE+!g`S&+IVqD1?|rH<@7o-cId9)#->TZ- zi3eVBNETSdzuvrBx!(kt@BYAy+Cd$&%D22YCT4Owx%rNltp^GTGRNuabIlyDrt7FV zE%4~Zq4Y$4Gop>m6a|yI1xL_h;C|JmLLB>b2aizH!;IPzv8r47>tanmSakk!+w#Kn z6VmUDcjAvrEQ<@xrn`kJ@bMFsuUJhHC3!qR1l=c6qcuM5x*EJh>GJTha@yTrP_$$` z9`7(HqP1lGC7yoO_d@TfRBu^`T=$K#W@%QBOPbu8M`#%c7soQyf4O*X`zg+g4?7&) z>z3yJ;#X6>AD&UNhU*7^*DklWi=eOMZuHMhqs;^tKX>!tqMa;%@qJRhLKOo{gSw^p8&7HJUnpnn?!|{Xc zFuH}$_P*#I8bk82h}AsFL7%kFBExbk)Qe{Gyj!WV$4#VWdM6hsdvddqjCsYw!8kBo zPA6002)Fs=g9sr@ZzsHWZR^FM_vpV-A7eBwNjKKQwe+_d6xx+>z;n2i%4K?38)uG- zBMViyzm^?O`207l{3~Cv2t%IFzsz3c*j!gM-SXWIiZ!(l)h_>#X>7?Fd16~{9n5Qf zLb>w2(w~e$)O~k1PRxursQKzDbau)fQk#@J_S*O8$BC&o^H&9gYMi$>PSL{GLa&?h z2JQ42WQ-}Ub^d@Bw0MyfZ=*BEdm}@iS?1Sw#(7p(#oeQ01>E$whT8_l&Hz=IK=ZoK zY1)@kp}_9}ZwW`(b-NpV<>kgux87ETo3;etAm%Q9HBtk*wxac(aI30xqTT2n=l8f|M|854zaDnSa5mYh z6G_4}TtIT1OovTp>5unsrU==gF)F_dc5o=9KYy$~i8ZoU3kwV9(tLr#G+eLsTb$Vy znSZm3>OR{$g<#;?5-XLd!xP&sASO3pgu?c4L82`%X;{J1{tW=&| z)(NZz?7O93heQ}53k7v^$_}HzJ2OPh4CA1cOY6kPY?dwA97NENABcMR=y-#_)+|ZF z;_bvg4J#Z?!8xJJML~TZ` z)o>}d+Syrotlj{Td-=rXwD(mZF>v~h`)iThBE9H$qNit zEznHtM4fF-!2XR(>lH{1ZhR^={#L`}H6NER+se*xDN^)D$z0-nXHIT1b{gd!^%H&F_x;^9k;8yZn!O~aJ9VHi7HeuHh%GJ)XR zl_{~=0BtQb48P!6!SUGS+s+^O_390qgDR|TNd#*Jr-u}=a?Mk*9 zx`0)9jv;Xp98UYOP#aZQo9$+c`xJbN6f{U%F>*2W^HmXEMP9P^rJid+o6IOpR%E&k zOvALWoYKrYMXCiGZS+#8TUTOvRQ_kODo*qS(J?L1h*oYKF9-xX0I?xH6WmDHs=>2E z6H7eQm-;oZ8qWKzhF`bw1^(s=ZUyJWhsa~pr0HGeng!nkzWJ?)JyBq>2J1OBOC1Oi5pgI>iNtE|NJ$`Iz;-^-$oc(%Xu zA!X`toLipYD|Wm6;cT}0A~HZe%JBd0_1)LiBnq{pm20jtB6-UZ@Ccs!+i z5}qsXwo!@C-sBUAK4@t&C>$Y6YhQ!za**qa z&}IGkZQdlIW)OYo$MW5}UnX4xQ5d%mKG_#NH>2-c-e?!n+8jEo$_EwxpgK=v8B zDcgYlID6u`Vf-8#WaOCbo9+aP?qC*}5X$1PuG2o*=9%F2N~?IRV$|j8CDG?^e-a7# zU@LOPjG&V?7w=I8_@@q2_b2MytIeV8L>d!k9 zijeoqPL6V?{Owm`mm1i_&K+ZB1sCCA(@Hq(C>hHwTu(FTCLu?m*^HZ?^{zbTwsm@`h-N zU-k_x@FTyP?2By$-tI7?{Iu7x+Oqbj^c~oZLF6UjE+-AJHHqF>#CCpkAQDPM^Zg1J zx3?l%5nnRxZ=SlRI)G4=(JIHJmHCy7vKy&eev84-mC{hN){!os_QS3)$iKjjC@_5)xUU<8t_VRJ^6p zU<_FG7rav0H8_PygS&L&xJ&<2rB^4KIZnmKm%{A!L~|((hPECKqaC}q_nYSD1f3NW zF}N?)y64U2pdA%7*Zy_%kQL9T?k#luyrWyTaq`tSA_kfcMORr}ksY=wP|FervPbXuyGFHb*YmkSE)uzGhet%o`De487-t3AFe8Ej- zqcjD9Hqr13)Gi6PQJE`S-cTimYjN^e$sN$Ng25M^&jh$y%7@r6EBK^- zCTQP=bDPjOeJ8^>vt#{s-6II&q}VxDly4^6xewRVU^zCayLVCSmr!aaekF-?C+A?h z?+^rgLNoVW&1H!zPRhpV*Wz0_ z_uEgW2gEsYD#${|RMNI|(*b=wE4sJpwANc80-bD+ z9w`Lir9L}~B%36(er!`W7=~ml0&8;4?@aoQR%=e$nThqU;Lj?)z;p*lDxI@TXo^qS4oFS9z}lj?9buAcq@<(IfqWiJ`2yjJg@ocK`t z}n6bWSKEy*gp>TFp+%X^)nhRQU?6=5`l(xFy*FmRn#k znLpC}LTCHl(__%~H?}wFkEAFywJdF8WolV9B0fe7^n?UD z;zRPbwHV?0UIi6~FuE9)@+pnW!x>N!;Z_GK$Enh9Tsg(R{O+G~U~f-e%C%mAl`qV# zUsZB8!e$b^Jx`G=&t{5`e}Mh{scLqlX!~F&GSzl~`0V~eS;`^aJC-EgL&ESw~fAIGj z>fxfp-kZWoS6<5eRp)tv!Z%{c<11AzUy#xhYN28mxVoe^tWs z8-D`FpZWfJ;&HSK5slv0;&SCTE{h~nE)WRdL~QBmxa;uxCW3IrJqLW9W+iV+-$H~39DLhM(?1ga-}=j?PBKMr(4{@R)4BU6{(W6F<8Z>Cpqhb0{u0SB^9iIxM zu*-AXXp!~Ux)Fg9goQx1R`>OXoTfNGJw$roQ74JV?Ck~JAjcXD(wUiGmM64gBCv2W zPlJR9KG_ck+acilWIP{ihlA~q@O`o#54JI;nNQ;uDt2^5a>SVYVdjP z(h7^Kkmmjx3z7B*po^fg;ECs{0_moD0js(0U;|6Ta>L2aLf335Iahu@L+_Kn+3P}A zOC=Y05W81PoyEYfT{_aQOPrHTq3-(r!B18rUF(BK`N=5t+mf*cm{Gh!W!bYnX$T_T zhT6eROq@JB2>~lBaArx1leGQv1{sDFsi5rustD=Ec8Z70EhMoeUqm*0O5`iH#$kcO z0YxD}B@Kgx3)tVIj{{UaQIpw|-Pi@x3yMD!#I8UFX zJ$UipYG8`<-b{Q5&iO`*TN-mjAluclqt+}t(#9nK3(~?qD5BR}+9N;UjK~1>d?dOK z@wkeBZEfx)dOBXrU}RY*Q4`B(+}R(~v3ChabGr;Ly+Rc}@lC z7^u*9e}m%cUDIFC%UFC~h(r2_v19I-y%s^IQc|5pAy0YQiq2bXReytTy{4h+e!FQI z^X!D9fOGDz=i|lzGIA%Jo_ZmRSPUl1L%(y4?Hnu=C016s{yL+z0h{R^dCu0{eLfE zY8AM5%X-?vXCo?BlSTKg(~FVBy_NZTXBiRW@t z%Krd~j{p)WK8Zq={o6iI_)7C*(x1QlHpF|Qo?9=YWlA;xi8nNmezBj+nTN-A=Cu?B zgRZ(QM&)}YKQ!p`QjGG*n2HvcuOWZvQ^}c23UGer^G|`bLaude6dJL4T9l?~l?2zV z?qIO8R4$}#M=AkSbuxNJ4}mnLl^(Q@!tImos2V7o6;phbHBh{gPavD9J~-{0ty>A? zo;;{OKyEbH7Z~3T^);j**XcA5z@ImTcBG+Q!Z$g)6HlW3g|DB1x>NW60EXC4baTsP z^lYgMv?6RyiWc#XodIutyt33lsotki%ZO>~(cR}nVp+*PTZ+fAbKke13ZQslncvFZ zXbSwiFDtMZ?G9|8b}Xn%pfCsg7X!N!%fB{GstooIkpBP}rNS^V;d-GZjs_C3gWRul z?pm84jh$ZYfC9M(S&$&+UQvj2kWIWbObgVH@wym;*2czkb4{G*R z8OijX(NVUmNoc|Z3_4dRsp4YNsLHgrneOh_h#`hz4YHAM-yH@q7esS}7WROUUQuQB!yGmq1plU=+nThFOf4nW+&4ub{0M)V-64b%i zJDo@>LPxa1=YYc#pP!&Ae*XaQ+X?QDd2GIol_UYT@B#Epq8ZCX4Q;H+Crv>C)GIa1 z3e*`aYEKngjvv~KDIJw;E%mwIb1YUqmn{Yp*B9mZ`W$BoGdnZD2={~Z(a$J(-IV+- zMjpg4ipNS%d$ADRb4=6OloxpDQPvsbmVWv_B(OPKB{cA_YwySk@9x~^ZGr}NSvoXRl$3D@*^gPq{{R7(OYS}KHT)MrX1D8aIq5*B@B00O_eVUoI3o|4z^Uy!ddv#1&K=Wz z$KdR#ALoiXXL$Z|jt-x2{^R`g65ogYUY;wDwEqBqq)!W{zq>zsq6h5wTly1!@cO^c zpdO!zLH-Qi@*s5I_C@yiRWvdCzKaR_^lzaCeSaWdE7zzm*98yUE5BB{Ipw-_x&tlV z5v8u|C*6+cwtB9+HR#B5k9!-R;})`G?cx%}>BUnsWC$4p$O8nr$^k_+iH8gxg*l zyQ(3%XerbA^=QOD-^8F4 z*|h9EFAA1RjMyf&mV>_k00$Y2T##oFFqSdmXiT*02NGIQxyHRJnN+c%#|{{SmK9CSV7 z(4{|quRpBa9Pu}lD)`NSB5ZS=VAAXvaOsW1q8qgSc3;361ylY32~ zpsbW5=sf&rJR*P|+tH8hAfMs%;x?uFa~F(~mJ4heIwws$pMIj2Ky0Vls5b~)Si0ek zdg`7O-eB(sJajj%<{8Gbzatmyj&j9xY2^F*ID2jU?74#V3^nz<9%ClbgFcWnl2u_= zTkgd@yjRv3;p4>%L-c_^l5QzjXNZ^#CjpM%__?NqXEyl|_))$9sJiP&G`RZHREwRY z)e8+9s!iyFeloeoI5D{95M&bu!~w8>8vV-W!{?7|*|TQNn-@~Ws!87<+z9}Fo(|Em z^w&Dug=yu7$K!tvX6+4*-rZ!cltjNmnI0L_q}{S^*!#S=i^J6#CsFKCiGt!27c0^H z4~`^GZcOz+um*hG2Xe00Bpohe8XJ&_4y_zU#-M*t(mTSc(%_P5WNRDMy%+`R3Byr) zNj}G5FG4OOb~FbmL5CD+)od_8=$U4#DqY|e-u;|iMgxfX={8JZeK+bJCuT0LI7C?Y zSn-52Nn6lku-W|&%Zoq~8~*^c#|yZ+q1Z$P;%m{!3G^tAL0y`fa^qgoRqW}(ZwIEg zpQ%({a4qvQ!;{reS5WJ+tr-kDazB&SSc!AI=)Yw_2*~MO0O5{_WA)4Li(;G#>--Kn z?c$d~^yy0n^v)T1y29x6Bp^5{M}ZI(VSCN|WRi93*RNi^diCqqyD@9yjxpT!!l2dM zVf)d3xTI>M1@xxidKH<`KxRXlu)9&yP+puauPFg>5Yd6sA|QujU{1#b2xF_|b|^(* zP!~p-9^L-{`KI@A(btK4C0h1QbPFtM+(JKj5HfcJs$n{(25kxd)}{ibQUvNF*fKqU;BrcuY$`*<5`1FW#wAoF&Ja64ehsl$o~Bj58?CXEHJ zQ>|aOkb=3g!!iotTT;^G*yZPkM|RDHXh)LcN0HIs-u~hsqX{3yDpaXbrAn15RH;(7 zAZgpmNbJBc9}C^eU#&9Rzu*B(_QR#o#@f$d{{YZnk<8n4JbCTxy=}uhTtm7c^Pz)# z7n*-PaAQx2`{hXT^?R&NQq$lop2(L9Qn3KNkeRU?gJ>YyB#uKXTRSLEge!$(+WS}r zAtsRbJsQf%0XE=Jrti%K=QxHPLSVf`XrD=+@8^unn*s4Mh_F#~L^?8VbR7Qxe>hAM z>H0Akj!yJPdhE+hB!}~LeE$ISah@6qM2o+AP^+q57vL9j$98mPZv{6Cro(!PRv2>n zc%GSVr0yuhKRo1W0-g%N_wSJKf5gwzf7|~3GdMn9_ORnSzE|`3JYF2%&{Yp}r}=ju zjrw^}*Nu92!+#;%bMfDp8RItaJumcU{{ZXN`nl7C*t14rKgHRcp~a-r7PcD6p`qdq?Hkvy0EbQ6kiO@!!36R1=giGN+2jj!#>)ZH^9y z-?@7@25qzWSMimErz*n8`q@$pZTR0z0323W7m-v&RnKmb6eYlQ+HQ>PicrLD51-** zR96&Y$sPqArnmukKpy3>H?`+FLXAvFu$Ap-=?GbqOvF@pCm5Uo$K;>a_{;I80s%eE zt5R*y7F<9;vSfOha0kEa=R`TQ0>wZBTuwor{q9dO&Djvs!l=8MGm(By`4p_yAt12P$c%J+my2^L7 z(SlhEA+h~Qv#(AzaD0xtzL3!_>2bO9&q)d8eMGzcKc9$*%?itY!xH}hgp{h)KbHgG zeV6b)7r6|d=6CwxWg!Rget5rC4S@kJS#i-(j#@V$dnTHyD_4F4I#=iRbBB*eLnXD- z0eLx@QtBnlxDz-Y-FSCv?KWx|niSmYyK&oq%or76$8lFJNbx1E z^pP%%(&Ng*NFNl1ZYbbMHUPw8L{)rc?cW6i>-p+@k{jL@Ve+I|ij||x$l7oMXVLw? z-f$)f~I(_M}S5Hn8>TbGlsp&mODAW3&fBb%6#-B;=xsgI{{Y@UoELF~CbJIW7q)yNC_BNbet0>h#Eyzij>4m{6ncWOAxup$%VzhR zgfHHH(y)yJbx#ADMHhSl?hLjmB5mz6OLy8}w;UcWXBx@a4XVJ4aT4J^yuel%a}!1r zoc{o#pbrM%YrVRMF4CE^1)Y0LOW12gur{vo1%t3+J$G6Z;@(zw?r7?23wRTvaZoVO z)A6k`II~{2EIkE;lozI!xmYhVd9dnU8yCZ_#s=$jMsP!5K5=fZ#{l+toymxHT%>t4 zgzF7@(n{dtSE5V``p_Q!M&UAAV1JOW@W+ep!VI@bf7LLyLJIgLnj)g==VGJ6T$!(t4UX!pd&2lvU(WX8}cvt=sW{HAdi z1qOp=H2Xsp7s`nTS?4__Zp)ys0Bfd$x86+kw-}Un05D3>U_Lz~4>4QRvlJ$<1ixJH4Mt{%=oxbF1z zT%{}}>(#Pe-d}%j670Q^{(N>qg~hdnG-@A;c;ruTKut;4;I1Dso-y7r!WRZyC!IqZ zX9I$NS9~U#2l$Pg*j~m0|mnZ z4bJ`RlpuA;^?L-+Xe}WEt;+Taz~jpR@xfI_&Zl(`6fl>n9+i9-fZH^U44Qa_XL?Pbwu@I{uUj6g|1Zs6^4Yhw-O|>7;6z zTr|+v3{2qmhb({GxNh;Z0RI3ACpENXv!StSo>BFl=#zgw{@8-1Q(S;ufo&h8r($!V zCx!4>716`N)XY;JN3IW@`WW$H0CF-kAWy|yf{Bhdp3$A-Lepb1c8ZmNd^QEPdVTwy zAnmq(wS{zIjK_0;{{VH`{S}~SlYw=g`Pi+S-s zTKy3J058Nb-rkC{`~HHyQ~KwwCBIR$`mg2ebbp`t&s=m`COkR5UHez>hbPZU`#;V4 z=l$pK;=M5lc6s}|zA;*R>G~Hkbii4sYIzq_gTr=Pva3cX#sHz}Q0j{z=2q`ooJ7xLez(y^t<+HMJj| zAq>;LrDHW6k@P5{@&GUzSpHMdwz1Z9pH!Z1lm~VEppz2UWupgfKF%1JlJvtod5W z93HTdmrPsBl>Y#Y+`#cPr1c*g2u@DXa$jWUKO5!J>A~{-Kaz(usekX@jSBME?LYnL3H?@Oqm>Lk$OJMR8Dc zznN#6{4Plzv;9gX3JOP=?7M^YaW{UPM2wg{MfrfJ+Y|gmgp@it*)O%CiMuiEx-XP# z75r}-bXuj&X_zxhn{;rM8yqP3?N`}Ha zM_o-{5%)Xao$LdV8KYJK>M2dY3uw1b3FEBZ-WeaiHFvZ79pEw*aTKJ&K?^f@IvY|{r zw%H}h1=>41p%tB}e4OK|h?&UeG*4QwT18Ge5>_aYvE|4s?}V}_zxsq|d^i2J__xB= zlQrwC*~jQ?v|zQfE>qvXuZGbQf{v~>ou(dDCz^8hJb&iWomJ5IiwX;-Z%)f~F)lSGJ(pklECO zAeB(c(7-fT%*0zH_Fm!3z0JR$kg%C~B;+T5FDM903{NgxfLvxY(|+#UPFch>cU6o307zsKCvAI@$x`UU0O_)W znVE*{=Np_Mzn*7;K{Bj0oNSaVCZ*}+SC~l%Wp|ayA5EcXo~ttKhjbWBc(OF?1N8YF z-%OItq5ErbsTF!{W6XAArHEvlM_|WWwl(wKeKtLwJMiX!0Ro4DHOaz9;;i(gChNs8)ea&}=AOYMlc0 z%Z<|W7FNfw%O|cc9J$&T33%2TLVZA}*tIIPJDQ|$pSj0(<%-G_s1A3l^#+GU!#NnK z$RktP$F%H(;ZLnic!zCr|VRV6CRNz;7 z35Kyp0C9!D0|TEb)B#+uEv>5KYF`>{wR{ES8SM)U>?b=9ONEhVsuBC`@K-`VaC?47 zN-P7c++I&L@}D7!xhAs{Vyc!idMw>=LJNd$=+^fAD0xnsXNx?}gWAE`!gnXQ2dM1Z z>8xf-#e4p1sCTC#3$m1X$fxww@AfI$%d(m|SS#=qA)YJY5TOXDMe&76e4WGjP@VmE z+#rdlw|p_iZ6IC~QxP@0ku>ZJbe4EobdC!W!rKq87=?`vH=g;&Y#eCaI&FD~N}9rl z0ya*F_QA~0!66eA(m1=^I2t^W(Cla{t0?9`yO{}7^xcO0M!r(KG1ZH$zV0-(j{Hf- zZ|lZ5?QMATW!EL#$}Xbes}`>^&ga!LLxx-?Tvx{U&u`aYK3)agzgDC@gCcYu`L_Tc zxm0AwR8to@{{SjOQ#}OC*rcd|-49-fTz5EiG$Q14`PM}*QXW6ghl3}iAfwq#_iN{V ze4lLn>-c>MNzJY-n`INEz*+pcT^PBMpY*5q!R`fShv0UEp-|`{N*Q(oM<98^LhU!D zmW54+U#9Q+&2sk}!+o>R9DF`jALPFsd;t%@eY$dgbpGG;bU$5xXZrmk_xJ37N1|Zg zjx&evdN1ShXXxjviT?oDlxdyY_EQL2vYBHMX}R67pqrzgcB{~h$CMBDa6Y$3Ba#%M zeE=o*@0GwbREASs6K_%L5%e^*ME-sEj{+kjWU_j>8mb^Qki-r*l$S?s!9i_KnE&-XA=Zzt|KUg zqR$MBCew!p5c%=H@Mc7b5+q2GB1DN2BuJ4QeM@VJ*7@&9ID3LSgwRr{7r95N<{`1c zvU6O7m`wo_eH3lESvRj#+)NUJ4Z}i{jl*B29e|eKAt??qYS+gi3+Z|YXqwIS2P;9q z3DwQ%h6Gb6^Q4tCl=4MTRORp_9pV!9{d;%=Mi3*!@lUIS&J=&ffv2pFqYSs^{j+;v z9GL)3o&vPNF|Qm@zoO7+t?ejgc@HNibb6jP&31J*?r6kq#ncO37UeK$0REr})9-(m z{&DS^p{JM9hFUr%-6TIhd`{JTulQcvAY^9$0GCthwz?3f{7EszL|{8PN^#;9I!v3s z_LD89bUEs-sqrlH0zCY86))oEz(rI(alKnD^ zd1fr=f(hEV4H=OWmXNWn#?~{xWH^}IpY~%h zfY&OBj&CMzuxI=K0C)(=kslWI-O%&B+b9JQizBGSc`aJDW4I;PN4Qc!<(?SvacYay z=pb?U=q=AH(*FS2!?o*~BfS`0tbUpt!Kvgjuz!k&Y@LWyzaazqI+m3d$1Le6+zYbp z+R+gL%j$(Ehc7b+$)U5ji^BDVG|jPeeP4!3R@YeO0lFDfcQ;XD%x=R8y4kugr;j`t zF{NjBTRX~la5H?SaAV@fl+H6?meMV-$` z10G|<00!U~RPWN~W4FKuYiSw3q~W6_dT}`S+A5~@eDA6ieVYO!sPhQ(HvCVr<@PSL zpC3gnDeG>kO*eSn#8SO96UK_QCx^d2sP&3{3&vug*~?caa_8C1W8Fp({&4K96(+4M zb>WTF#^2Lo$6e*vh&#+XM-@4I0}M2LHc&Acz3?8wL^rGqfq-`4_HIjED%BrdzqHZW zj!onviwvVGbDh18OPzTfD0jOXImIE^e*TwuRLD1B-+nms96mQPX=ka9Mt&WixDu`T zG4$v_K_1zxbc~Ndbl)VW$b!4^;|@H2njJ5e&a-3Fv^&6CoIi`Ol+3SFK&bksZU}k_Z;P-F{{S*sNWKL5Vb|;_?$6CRdqW>o*?opiz|*PAn{+E? z_+wkwt$1kaOq)&l7D}{rItd&-*Aw%voW7H$!<+uU?#ni~u))Y3+8ny1Y-V!&Aup_} z$H3I~34^u<0v&RSw_r&NSv~zC~`h{VE$r$z~k*!A&7yvw& z0iVd#Qleg^<{!5I03T?0UR@Qu{{V-ItH3BH!ob_61m(j%F$=(-Rw=nq#;mx)w%kLv#b@ZZ}9q)W937v-2UpUOLaKj6UINBjjv=7vv5B3Bij7^IDGN}@Ob zZ&LKa&teR?>J+oj>?Gsxf5+#x5DwF9g4wufdfWL=UZ_3uTg*I){#Pw#}W*^V6(34a!mgKfO!vDEES1^(4@_u*htO^2y{pKzlNA$SwY+d zpE{p7{IS||x3q3%3siya_Q-hZi6j=`X~&hz3YDnQQtmn#!=dcr?ZRozrbV;`ZJCe* zoFusIFeDwbN(_P;#Vi&qh+X5Bjgu3`$sE|xyr?{YC?(me7dn~=ri>ZuX*gubg zXiks6+5Dc0!QkDWPf@99^dC|9U!Bk0zn+|bVE#W(X$oboWUp#52brV|Q#QrB5-sig zL-_kWv1ZM>p%G^R)ELAa8^aYejhCQCzm@jKHN{}bCjhpGZ$RMspi%^{*zm4ld*?9> zoOdAuNsXgO32TvU(~Le(K%xxB10|v3{RR>t`9ul0?;nPDFvbJksaLTE(=TV6ZZ19v z|5ZKjldPel$Mv6@zgCc^RV8M5>N{2`324$Ud@}cF7MA**`cT`Fk0@x108@!#4 zAZ_4-lS(sP2fnocwlI7r#(XxeX3gz`ibcGd(P0H+lVl782dWqQQfu$VRiUUld+rk@ zc)n2Nq9ctXQMTspi$ahIm$^@ID&WiM6aZP*cMN|f{lAkt z-o$pU;9o(cb77*i@&N0OQ!-<4QQ-?Wz7vk{kIhwd4PmYcf^PnpfQQF?9g_+;00&jM zT+Iu5i9ZEwT`UP!mAxZJn$m7WgJ}In*Dw@oPj)_|3G>!TOe5QH*fZtu_WuAQ&Q^`a z(G)TWggO+)b-)^z>k&ud2TUrd-@_qgzo|d=9+%^Z>og<(0Bym~+SuIsj-vVia{Tc4 zUqqer5TXxbCeC=c5Qk3EhtpR=Dm~L0hxv<`>eggyr+A?i$evr+aq3X6bhWu=xEjUz zzAVB-KsW};1!s()YMW_-w*k!0^fKtgO`J~Nnt0zBn1v2XhV*!Vn&4^< zJ8-?dc|(IC8@1%b`~I}Q-gA+A)-XSOwfIqd;_=?yLs>eSQE>X4=DM*?dm4gv1kG1Q~>U*Fg++*sDtMe zkHws*fMPTX+L0`wdk6D|4tT>cDbOn|+#n`2mc3DPuD4AJ(M00-<+Q3)};BxV`WhU^$NG z7=MJCMC1T5b5hbS1ZZtV*#QG~@}F-}^VRicKaRCiuB}5Y*Qco}v%XJ#d0t714&V|1 zM_!SwaU?*MO(F!0I)0DpLUP1JoGA_kX$hP54yUYjVwGr6aCMHMGm;hCqF&|@3fmSF z)HD{xzP-_^c9kw}bcKYXSN&-ipH9aOm^mau+}?g=zX=Y_S>J0Lo(-{omaUFn0dYVT0&F?gCgAO(T7=_WUseh)H zNlyqaZoJSju=x>$_cD+SFq#iK@CqQA6qdr!1e}E7-n#V{aNG10-h{@CS&Pr-hZI>X z8EW>{`8x+}Mbe@p@grSgbqM={N zv0PeCe?k+$J5VGc6$Ci9 zsEJKr%ix%v4}su@Q$u2SSLi!3_9$Dh6?|nmFE|EXdB;Ycl~ld*7qED`v4$5)5tuPd zTSB9wVV1Pq0%Npj4tCF_9u0Bikxcha6M289rk+CIF5Vtco5=T z4o?ZE!0&~DjLqP1!`<%^RvHS$;FbRXXK3EtexTd22`KN0>_B)^f z5v%bb>BE);VZ~ILBFzJph5d9|aPYi=mb9 zWk8v(9`cK|`LY1@aMBOISJB74r1X8``~e!3>Lt> zQH5exlaLZ+^Ph3EE6ebH-x1(WCnEr$QMWFV-k;~N)NTjrH6QMfa-1wkZZg)QP^wz2b{h*S3_@&xV|X3A`QM8RxqW6v|{cit?c?1NP4QSvT9 z(sv1U1Yu-Tli*#}P0XU`SL71-x~Xn|lb%23A4sS6{5OX`ALY`S!SX#nlk(|TgXQ{u z(O3HFr6t9YlkC1&L7r-7gn&EpswsNwRun&TcfGAktcejbMY0J^pzSbg?*w-R$xy#o zm*M@-^OuEGfmI<(fIIprgqcV187ivBxn@;k^QHI%i8U^8n&gw4$9%30zO7b}PjEYv z-o;|iz&B88->+Z7K*tzJ4Its+7fXZD>9LOb>|-<(7y(vjOjTyL#|AZI@E zcsogjeEPuEAg$yrTBc1^vS^b5w`tJ4%tg}z)CEVq_LA-euS?A{bZLrXXf6#ken0sA zyk^rKjY-H`x~D5${xNZonZl32asL3(zjMctK=kZt!9eaOkx&msx0pO&rT6>mDy&2g zG|bv(gVJJjp`VXVa;KpcISd9%|{1!n=Lv9XSC+gvA%=+j%$|e3tRA zVC9~yw4je{YTO$S9~e4T&MI6p^?wmW2ls4WLix+!>K*hzPj&v!_0>G@&gj}d-}CVV zlaisZzSGGOkYiJEi_np(1ye>34XA`jX(C7je<6P@=Ms=F9Mf z0qj$fP$`Fgy-DWiF;`0;+PVOK0tKHfO+WBQxk5kSsBaRXLe{Gl>)M<_v8INGnH5?z z=aiP`e=|q%pFSDH+>O+J%6dNnx(3^6@*$W>=HRv6vu<%Kk;@b+K$xJjS(&hO`hOqq z)x>Gz_B+x&(b4=N4g>;!rd_24>XJ1JC4mHcSP0k+^0k;3Y;4*1;#yihVi2qZz4|z! z>)~;=v@Vtxj0*kA(9wvahrpYcKgm0hh)@^6YQK>r{`#PqVvRV9rt~v~+H9abvr5g5 zqG0qefEZ+J$U!c{y!sleSneC(g>ZrdZZ=>x5S(?b^afHRQAK%>=Wx$d*s6Z!1MYQQ zL!J%3Hq0=L=;$klIF(&8AkJ3|vmN;v77Tl6SF3Hb{{W2hR99@Oq;ti|K{EkshZ3g$ z0A2+B8sy0t29`Mjk<>q5;$coX!GzC1UhL$FUUrz<5B(jM7&=9JoNBuj9eO>M)H{=ubpP0%01 zmaZ_(MH{k`{4G)LI|PxdM{S(E5M2{2%L_pN05nddPJ&7#(#pK#sm6Ro83l0~4cOwt z^g42^OxyWfD%}eU3tHLDS&Q@}wln8KmMP_D`EI%w8tSs#mr~?8e5XnK+GqKJ()CYg+Qp=4si=0 z`uct-pVM^}^0qXN>F5rTe=a8glDUmnS~WN6O$tExJ_qn|J7lAEs%!j6PzoSbT>u+Z z;KvOHsu$fGUghW4o*h}A-zl&Qb6F1}=x=<0ByBOhYi&WM0Dp1FApo6e=8g?> zXFG^CaEq#C;Uh%p4lZhK^{}ZGo_jq|{{YXkPT?sv-*0V2^$O z`xuQ*@v$J}M^@?7dWfPYwk-I)W2NFg};5cq=-R>Dd1&3qEMk>Vxb^Z`21vCLw@E!4{7ly2zgQYV)Ri~mW^3JJS z<$q`V)_kM-ui)xs^jAH0h~cH>zc-BhkAGbuO;6oDIQM%g`fMZt`&94mVF-1Ujk(}> zO5)r%o4$q{d{Tk;U9tJ=`Q$EVvkU-zMNt*>IC%Jz;Xv0>Sn&NP_BB45#ztKzQAS$2 zw|t+G;&X-35Eg&rB$(ycTQ~sOU`;bb#CsakQZUoN;e&~5S2|m1Uh{P9R!(t0ngZCE zE$jgSi2ndmQsLhQ?i*m0pi@%y0n{J0G-)b3eiB z*RNkLT)A@P%a<-(xlJH%I=?c)S9N`N{m$;no$E;fH|X#Kvx>$1%-8U7u-0#NmpPiNZ4XwW%vnm6}_DNr})C3kr~EAfZb8acE0(zOdmD{YbroJ zU;-&L<3~Jh!#Mld@BR}qci>uNpn@ympZ@@}iWgf0C&9op*NKaP=FKPaU=GRgKA7$+ zo(%V@$`n+Wz8oAQV0G^wTv><&fRt)bI$|-kWb|={m?6hqm&yah@6K`kHlOMI{{V&y zCZvCR)b`_cgJ?M-i`P+~hC6jen@(>49~QRHCg9UkntaJMM?FMz_bTo2xXBexvtEJshL z@&5k+LbvVhpXkoJq`d+NE7>&bA+kOod?)#J@I#N8u6{}Pm7d;MI-IH$k$7qYrY)Cv zkrD#5x*84oMv)^Z1nQe>tP}Z1?7y7TPyYaJb*iI~ey)aW^qm+}M_U}ygYIq+rs?9u zTM42pzc)yDN&sBr^KqNXDz}#Mvkagm$5380zE~*}aL#L+@xvIlCzH#Ur151B3$HvM z19_b$&8%4C%IJ|!?!&Y=8<&EY`bp@jQu-M53o5yNm8x>CVE+5Vhw3wkL~U#v=go?S zpb2+Rik}x&HCjkh>BwC4WgTE@hB*OyKnZC!B{$3(5+~EG)B>|qF)DxI1C1u?_;wH} z{Q$#s^wz2`P124b@L_vX0i`NkK(M&|x9JlIxQ=$+YXyjb&LDy5WJ5ljh!ke={{XtM zmVg5337xKr1IRcE0=)`3aNVWAcI@?)@T=+j1oLq~O&wVn1QUfCWyBf~l)cTxVP2kk z`=W@Df=;R~KUOAmNkIo-MkvM(3kpt3Km}$lO_=d9Tj)XgOl+}Y$j-NDFo9E8ZUd&)V>XV7@ zKyplYLzA5m6%^XyAN6F-O|BwbdO6(dgZ3c)ga=Zn%@=ipd@pZ+IXD)S!N_GPi);tM zm<+-+-O8yca-{SAHh%aq7B8y-HX5=gOUw{xRQ?JRvtjyyztVUp9i%u($UJG)b38es zSFT<3Sdi4K0y#@P4Z@RLRsR6l>zaefk>K=Bknxc4daiL;R96~$%2?9$`#y7^T-faeDxu$X#<<#f>zLi2e* z;DfOjsxpd^GpH^I2W%p$f~5f*HggGtoW@fvL3QE3~_m# zPR!kl0nvhL5qRi&(%KSvn`+@ew_^pfrlYsqK+Gcm!J|+S%gkLn>tM`Tq}H9cQ;a7x z^{^TuN4_HS^6NYv<<+Pcn_mI%M6kdh~T@o(*C%_t3)yCzzg1Xpuluwgd%fb)=qvG z(?60J%q3|A5stGeKe9~j>8q*UTs)XAv|QGU4$~kTTLni!;*^TeCp-P^{8#qR>k4xK z_f!l+s^^VpfG^zDEO7Q6Oh8eoQFP_;XD;P6)f<5|TR#W@TTl0TxOh+JenlBCMpt|NHWV9mw}K>>!&oQCSk1y^f&%z)n1RNIy<^)6kI_Zk`M44_=GJn74DH17ubmhJqqfy>AY z{JBIR1Fcujp#u5Z2O5;vHiQY|8c38T0QOrgnTken!2bZn{J$;`6mwj1D02X5KQsQb zzqsN-p6$ZeDKomL<^<6Rv*7!;(~nM~g#;$UpVl@(wgp;*MmH05$PfWe+;B{BB*t#I zdtB|;tTZurE%)_#QBM!=k%LGS-JgK82X>fcZSAg;E`=(kvh{|zfhhc6lKMCK;>Y(tlwcFtqqjB>ZF z?>FP|Z|fX~bk#Lc-)V6LWru*as)9~gaH)>2TQD~Ed161Nx0UpE$)6$n`ag5^mXzq? z`!&$}{{T*`(fvQ6-Tp;WcMp$~mE@-Ld@VAQw^4s1<&Qp_Pj6W2L6SW5@%Ya!ire{H z{#?F>{MEky0NMI{-PyP|M8z~6?&|&>pw{iEIu>(Iy7ld>1oHq{9YV=M$^E2_h|u@}k+q;vC_#y<$`}&e z61w#|RA)TP9Fb+`c}{#>*6|E*H)P>V%kmHF3$Sq|M6p=lJLGaekMz_K20RMU-8#}z z1Ob=&bHeI#yoYo?La{}p#@=ib8(7Y}YO)W_U?g5Bn&s7wF7E&&dt84`d1Qm3tYK%) zhv2`T=NtqUyjNb$_Alvp7FaUs8KU%LUe^fb{{WjC-)WOvWuRhv2|KD(I5by+gg+&` z0QiCKZA4yA-!Dv-+yjxBhRantv^e8eR%hs96{%f?V&rp^P-)OLMqk>Fz%Wa|rO4(6s zbdi0J01<*FXG}oCsx%NaDXzG}s4-bo?o7~>25`c4Js%{xA6H1de&!)G9(%AEzE8%l zSEBs{m1qNYX-w@y_D+=MQ7AiDNjB!?n-3DNX*IKAt^pP#skP1i>#H zNsW$=BW8HxrF6q1$;vrvo1@_W06XeAA-Z3FozrqaJOc5X?Fq}R2b{&VIF?~0^o8Oi zee%lXk#Wjt!+atX$`I3IXtGdNa56+Az*7Vh^DP=Ff*8$YQsSxAz+IWrXpZY-x>k?Q0Qn9u7r1v_~X-=chVGjG9QG6UUN$XS=Ln0Nqx0W1=t8$Y%K*{VP zwJge6OXs0blKbI1^NH%TLDHVhP2(qAzt{$jRWXY;d`)nD&k9NB;-HiIZn!%ibk1~p zW)yXlr(~E19zh~uP*7p=)APb(WPTH@R__6iL0w|=z+!Hy)5=y*S+JBTp!nCC2#e57 z!rG+CZY}X~XKuJ3;x_ODmo6vb5&sZ_#LO9FGj4mT30s0RV4rD{XG&k+JdwCbcg#M^ zcg_$0lst9$>T7tmyFcnuO4(cy7p`=&l9v!P|7MNb)@qCFvaASl0Xy;J&Z3YOA_d&r z&ziN`E!Z+)5f+Xctzfj;;>T7nQK4CatZa}by2MD%25P`^ECIIrc8g} zAh?ZN`C8fMU)ygjh~|ZtdF6XCQXLgAf^3q7u1{h0r3mstrH+A>7aKO7$vL)Dp>T&6 zo*4m0I6pDgO6wOWbi5k6g%~^gUS6Q`Ca@n`tn};hZF(|I`{}Wv^)37^wdv!z>;dvK zzWCn%{PqE#)xP@hlLX5QxN|H)RDYq*3iwXsrT zKjhZzDMteesV-tO%-Ttr25yI2sP+ek+?pt3lu^sV&ziqjikAxUf5Fq1iH3qKWQn0! zyqyG=F&ffh+~`I0?4WcmY;=Cd?k^?;ngN~poHY-oj10*JLETzl6vwDW?Xw&ylMGo_cdwkZ@$gFr4tY#F#4-rUy+aEpU`Rc zF6vQo4~LPlX?f(P0L8dNf*FCC4KX;P!82f35gJBtSi05}gATbd1!Nb%(bstPZl~|y zg~MLXt$LjKf^n_|2+lBW7BGTm5n#Os-g02H~Xu;cZ6B;u?oQ4Fn6m-}DK9Ca1k zbfU_X%?cH!7V=-}cL3W?+DYp;6*)`}#`}$iEO#)9k)IA@LZ>w|*j^pX1W38Itw6%d z+Gq-1Kq^c>XGJk@*_o0OJ6#(b5F!}zBb#G@H4%Ua9orh&vML-7E{Jo#%zq|WFepG^ zWQ&Q%oajMjc^gRfJ}>BQK0i~glpxCw%jzO%m+cWI zC(ZmLF&d=uAM4+48?K2TC!;1ne7GpUFQ}hf-qLzgM4RQAoc{V-Kjr;hvUdERW(#(v6lC$$j=${A(2~3~&t~va0qZmA<5T_aa5N5| z$NlH97iK&BYPfm?Ygj3zfuw<9oTeDE)-vJIm^GGDiUr4vh>)qigbGt@bs8S8#+X83 ze%6n8kfkMjOe63pLCyf=U-pr8F0LGaO=3fpT0Y-XUsk;Y`ql3^k1{uhvv^Zk%v zsQN)hI7jz-uo)Pm>0+hMpdN&VMcMrsg`kPMddKDyzOpFfuT$I#`ZQSLVIOF6T3h)1 z)ewrbx=9Fv2dKA+mN{p!T=Y7RVxK*L&?$M(%k%PqVH-MbEm6J#pE32k&HfO=3G&?f z+bjGbK2;jxQ@6@P4(RJp8u^fIUanBmDrJ%i0aGUf`f4X zehJv)duoz%XnF(Y!N}@9z3!+u{!llLM*cnBQr2EpQdpItqGv!@44oH<%{PlU7C{2L zs;$H@gB6w86UhM@f33|w1Wo@B;3N+2DRF!7VpVPi=28vLaD7@YlMfp14X&s0{uJr? zYWpZaHM7eO@Rs=k{s%zH4S7M!A=Y6bYpCDHl-Q0w)4qzcp``;VyH}=3?1W#8jm%<}wO*W!Fe114e;#T^S@wznj-*l`AFW?lnY|%{QZHB! z*h0GnLU_>$-ZY#05VF&w=5;|8)NI%lf#J~kcHg~ej!S~~k)6?KZ_&$&+oaF>#%)3K z5hcDtSO|9X4&A?CJ)2i)0CDYc+VrlFm50d4U0jI~Y1iW~m%wy@%Ll=>F;8uDgdbI! z1W@Z*r{DbV5TLi{y6jkQlG#MW$g+jpl6MM*h`^J|r(r$2TUh=mk4O+iKI;6N+5 z`wUG)KlC0ZeAuv{|9?Q9F&u(TC#m6iIfIZUB*@8pd?^$VMCPVKnR5c=szSBY#-@dKbIFqp`V2yY%(jEj?Wy zI%)g*fozsEKImr0C4Atgeefr-us5<_PVGLBe3vMc2Xehl{GU!Kuwgz8v+=37LZkMpVm~i3YvGibN6VPRL8cs0;9{a=Tj$b8+k~?^Le`X-_ z1_%D;*lA(;?IC|zBO9?fhK9i>`0x^60ME$yOfb}HAd8=^$x4UH3F|cCM$`4?7GQL> z;;9smzX`;!4b7EIv=l~cnCB4dhxSc^)B8#b0Re}W5IZ(J-oLt;zv)nt2&Frt1;Lg7 zZj~Li!j6S=3__=ELMs`*;;apxzs0 zTxEi{&)_l(O4D9^_d#%P-a!qw(x%s%#*0|+7*18-qxJ$>3_=MX$1Hd>3&AI}_qej_ zP7O!t1S;Cr&{TF3nkcoB0~hb^XeFPp2MffwOejLKaT*WrZ!yu)jM5qPD;h9BbV&4+ zEyE7I=6Di~ zz^?M8A+i&^n-o%o%HX;ifwMM&MKpds-qMq71>yPx!yK4vB}kIdg9A?X`nZ~@7Nb=I zeYxj()2{*zI&F|CG_8m^=y=}eGQ~r$S6cRsmsNsAw>xZafuMtDMvuhTY{dPWLt@C2 z{mD7*#d^n=l(g_jTx0G0>9p|WE$54aCGc~0D2ONO$;U1y`R=R}<%RzAX)y&l7=Gc= z)ZV+(^#nxJ9FvYxc8bR4Ibs*JcO+0weAVxkmS3Q}ozMQ)+-8jyq*Ds0)k}Vk%39Lm z+4F!+q_8GsZWn%#8SfACPK6zqmk zjw=D&1W+@SPg#}vkqf?m%bgjTn zX_*B8$KE3p+tczH?12-?|BK;47I^gBcGSPFB0IEGT zW;fI$Lw^_oQ+ll-yJbS@Qjma?!LiQCWedYE$9+kIrY!0m{q?m=c1d>61tE-C`lPSE zXyw>n=To=yyK=@^=3jprdoD>zo}HX0K(0v|DKyJq(DB~NTFvZwK?<6*Bcn=VFJwGY zR4qCIW{s@7fpz${2GO+uxj%c*7C%@ht#TDY*Z)4_1)L$4T)ESvj2PFh?Ni6?NXOQT z*^j~F)0K?ekg(0!R6*?(;8Z2BE?*h5|||nQ&#SOqex6dJr&J-;+BB4wAMbHt#_7 z?~ihao~>D16XzfL&gS`4k2Vr~1AG>X3eBgM-5kgzXID>NpM?Ck1_?8;Lp$v_?I#=P z7P?s5Vio@_ClwkbUdew~P)2#O&TerV$+a%^t}cGbC*rZZJx)&#r)w<`3FwTD?v7;# zO z&hrSXCZED;Ho)Kk=i|TI%01`Cg}X1i+{mW4t|BOcLXFrYuFvPHM8cdRXvg)>KEu1KIXxnnzc`0v)fq{Oi4FM`th}_ci zKT?&mb*R)r?sE!z4(9Lme*jDcm9KqNt?A7G2jH`i|Lb$?L* zeRz1e13e%kdxsw0WviCRkqOi^pyQyEk9gYJ=~yV1_9H?UwMLky4folO)d-BAX02>#9@=(PYC|Zn%tG zO*~5i%6D1)@69+bff-DT>=mW=Kljw!y!s7uJb%J57_sRwJjeNm^2FvoWjE>fi8GWU z`_16c6PpLuP9YB^(+$|w!#_Z*##PrwDqnIR^JYniIMXZEdkjx*PanQ79f#X<&SP|b z?f605X3E`M=M?xeI`@6BcOE9Rfa2+2yPJUU-_e`34Q~mbeJ$rBk8oEy7{?J_)9LjC z-d=NmkCa~7SN$=(s*2p&W@E#SdVe&Ub(^31)x_}$NJuY(5qb~|Chx$#aHsz6s~4=$ zg&~y=ut~uT^}NziKiybEY9ehJ{>7Fs!O@_6YyiNV#pVAlxO#64rqPTONF<9$x>DCJ z2#-zRQ(rlOCY6NMYUPO5e;hbEbEwfT0hMNd$-S1(8Kqcas0g?jp%C{2T?1Q`a-LUO za!47g30PAP`qL0koispeB#pOph5RehIH5i95>_ajq4mWhmmT2K#E8bhYjz$*G1N~C zWMi3C2eJPH`9%E`23)l*ESk=7sUuX3Je4IoxIM>!td=((a0gOU*kDs zVMU{D*0k6G?$<$%SE1!aF{%2wxRb5Q9*C2}PD^S|%N22o=%4R#h%nPIgY2v9sk&Cq-q$ z$qQhl5zqz#UN1BeyGwXNeQ}!y9-cp372VV$CzSCOg0dbqC;Z61KEJ3pDr@ZRtBWb8 z$vpp`7#;%)jn`Rz9fDDhFX0#wSx9U}5${-n#Z7%fuc&?-U(E3&GMNk}^~B zHW@}$3UT8B=4jJn##?=$3-M(viO;;9t3aoF;vESmL~2ju)%k9IJLpMro|Ur&tqS%J zd<{Iun;JMsZ1i^szJO_V%fK#q{5eb*1&EbY0|>g+={H!wmz9oiq+14J!Tvghnl z^|cFvjk zuT=eH>_K%eHa(q$KQkzcs40M?sRbIsKcL?YlF-4&z``yMuR=M&EjSQgn;f@C(_(6y zKQTCYM3Vd^3L3!!KcbjZxNoAliN>4T@&nEt-a~9#2ZPkRgrF{hfS~MNEY5FqA#?5h ze}J>Pmt+@NxIy|a5U8^6r_cA7Q_htI1ju>Z)>rX^c>^e#OTBdBDZ_da{jmn|O>yp_ zY5b&LB_6(&KlY>57r-b3f*!7$*3#BZI>GrloHL8&BmM`~#mnI+6^Y2tek=1IV3U6b za$^_TAvNc~+7347)gd<$|8A5Is|;c%$6$zaS9~xSxzx;r_f+2UjWW~qucwR4Z~%Tb zCOk0Wv|xa>-H)C;Q;NKrM^0x$V^m+d@3qTb)0u&xG?=~%XncLWZxPa;TabPN!4ETE zdfVwuD)jC0GwS)Z{(p^Vt0l4J?JMq5ml!cEa#(P;f$PiS{yD*%WDlH8cbIC_+}hoLn3%Xxp#GIowa+ZkU7lM zKbR_;ld1$}39K~W^TPh54g~-~P;mSM>|wh9gjSWaB9cIr>{;z06rT+Z8)CowjXBH@ z#z_|_1CdLHoSfWg}z5DoOFk8gL0kD^~=iD30OSVbqUg z_gDVAaDuFR>~mS?i=nD;{Pl>Gcy91@ry*;8;zuga#t(>biqJK|Zq z=(%FekphfXxJO@qCLdwpQ8aSPPg-GP%Mze->!aKE+c^vyf@kcVfN75AA$inRU1GOW zE`)3rtmVa~tnU-7A$!{UZe~9M!2m@Y{v;^&p1|}i<{4HxYQ4^j_YC|Ar1Ig+Yr2UCz=R2(}lqbIuT-E2%9P9>r*O?<5|S-Un?#(%kP(pFUd5UOh5Ew&oU z`ZZF#p<#3vjq1h1&5J0f`Gew*9r86yJ!i>INLRjh(V9zqq7qyul{0XFY7K?CCKlqS>47Q-`>mW^;Nw-1> zcp+l4xpsl|-H6$y4fA-zC@0?6u&wr-jDG-Es+!ncqlbx6%RA)iz^Ig#T3eD{fs|nE zNG=ucS+X=)2pN6C~9}MZCnH|2j9R8V}kw!X)_~kq!Vw4%uuG^Q29uz;glf z{eb;^-i8m%5|J&kUB*a7F!R^ymO94R2m9C?h^3i7CC_7eQCD3a@TB;&qT;fg4qh4^ zr?^Drfi?`{NPlLCH;nDFn$~dcN^%7;nXx>b!k>=B?~HZ}7(YTQZ+!t_QrbjCsuaQVT|Yt!IgK&Xs6f z0 z60o8lDXs>1zt)>kcS^^L=t4ix|Ekbq_pDO>!s)=xwvOze&Ma;H*xm@fWCbvruaXwr z)O%2rtOYDDNsMZT~Q)#vHc{5sihC%6Qr+`L^56{|kG^bmhX_*AHZRN^l`Y=I|^@jL1U4^8@jX&xp z&y4@tA){1TR<@mULr7;0rDJc{L(?mN9-qR*nxf<_#Y}vayTSu@^%9zb6h{;k<5d zZ`R@u-6bKn_hCI}aY+)p6qL92EI2}oE^?%h`AxVTqwFshiJM~QkH%ijzFTk}Agp>t zg+l~0z5e=;L#{&}f;0zH`^n?;emQz#4-U#F@>szroot7(BYud}fHILCp`A}VqGIJF z9QQCGtYJp)1=o^Ed0EsP=}8CzIOj zj}ETLRg?D0^{$&-71S+Fx8UFrCXk*K0bI+^{hX?;?q^glH&Y9nBZ8y=3(}#cSs>cS zcSvsIAUStMe)zKNjQ|35qxk#!+gngAwg7k2?f@v?XWf&wr!RX#uPSb}dHU$tjyoiTHw<+IkK{*@zsPPM~ut2tJ*B%R> zTTyOp9R5x0D>|L}1mmZL?P!c#4uYLEU|9BgdFpDE^C*oY5$OeUR~4zYD&#ROd-Il?HneCSVI zSltd}YuCWGbyLL!i%2P42kb6gG!Y9&g&SMhztYNYs-?{Cm)L zTFEpL5y*K^&Vw#_NxBB#t%Iw=OI8$BjI?UZcG%MdK-wD66|UsT_**8GiJhP-Z_(^^4>ggI==d$yxsw=ZTj0IBry98 z$eg-tzYMtwodO^b&#PCgN`lK8r8Uwo35R+MAO#>1Q1{n#T+gQ%kL_k#;cG*KHt`Id zdO#`)cv$f&+gi!OfX=SR_54anq;&AK&Tik z^s>=}a<2%CGxJ+FY4Q$2U3d$%ziq8mon!LKv1B?|pzO-s2;-k+RM5?{y~EjofCJ-5 ziU&~Af;72&K2I`|#GQ;XYQPP|_9I70j`jrSAqxxLm0ddDM%oYO9c}bn^BtKZLdkvd zUv_7q0KZTq_wT*9W>|>#cl%v6pvp_FQ(bxXKl;0~=~S%9xav20N4ZsZKSTXqT7JSf z=@9>u#R&`w1_}c7pU+PiCjg0vQ4mqlz=1Fzq2QK5pfnYsrM!Yh+T$T=|iT~?`x-m`h=v};*@>p{!xNTK?spkFc7$4*ss$6q$S zGwa&xl--~aXG#tRNOx4?O1oNqDL4x{M$*J%D&D?2j815OqhWPQdmz<4*vrs$hok{#>R|@` zk#XJZ`#6az|37T}S?ML@zpP0z6^Nmmry@WR=jiDmxziX7t55M&XxbJx&BFEF3SLE1 z0$J+mrPKzCd960~-s9 z$V5~C2`}48vE=CT$r>!_ADvYYgAM3${p@CcH@ui`h-vD{-Z-p^qpfC}5=OGEPJ_ed zP+7+IURp&DQ9;k?K|-;an!pU(bSo+S3B)$Kg>@kI3`d4M&9Yd&z{ z9<_ndLnbRqaH=`6Rm!U05>IZzQK^*(T@Y~=Gd)?~qn>>n+;HNYJ14$Gnwge~)R7a5 zs5CsU(;!)f@z0WdwrN)cnEU}Dr=`=Qszn8njM9hp&g?XBdgB)ojKj=#Sl)Ay@B4O| zg#v5Dovt4*xM#;o1^T{W9vwlQ27GoiPs7(I&fxI8Ja5-U(4ele36Y?eCkkJh6aeTw zZy!R7cZ;~P_Vqa{4altoH3q733$ECA0o=eW6s&$@ghs^jID~t5hFEmberv8dXVD#K z5GW7#8t^bJl=Sh5({AhCh{Le_nx}85(MdHmcv|yC{nLz;gvkI!mf;%P2<^Cqjr$v3 zUxiCBy2(Jxo~Dene*j=Ff}=e|D0t~c;i~n@kmf~VSeOFOE>Q@uB8ABpSlLP@?#bjM zH@7Mh_pYnG5>q;A>J&g!32Jg6DI=Yk^&CV~!)~Ap(iHlw10Z;ju}abUONp5HFA1k0 zy2El=E%+#Dy=hZCp+$kT{Xxv=QE|K4fak!<*E#m`dYd6>AHHON%up?2l@RbIZb#Ad zXD>LM{!7v)&7pCU8PO@0eJpEng(n#xE($b!sG3CTo9ma|YqEbPfV|D){D*Drmj&T4 zQX@Vrg5W=f&O!PIf2)^7)HDUh-mNm%QdX)%vQpOGq)TfBFc{#Lw#;d9CiH<;iL_s- z38A2w)gPiGt9hrftK|~lUc|(>k@-fd8RFEMjEkC~qf(gW|2QfxnMa+6d>~}g(FSDn zt!ZCL9l(_w8|l3IBBmr&SQiC7`Jj=X?&>+kNt*24p}sfhD?H%F*>6&KeXpN^6h|2f z$FaMV_c(w{WGJu4*l9I-{6HNC88LLb=)hvHL`d94~1;Yv3A{)Z_onXB{NdQetjVgcttXR7 zg#GcuSu<}*$sAWI31eJchws(lh)cBT@&8RaAVn{>tJzRc`sb{tS-8M5WO;uH*S=7B zpL7`Ecj9t!^cU5l|JD+9g{7WNnME~*fVCCWXZ}>IEftKYkVq5aaOx)CTquVrt9S$Z z4fuQ$fN3{T+?n@&O0xd=ljl^ zZCo9gx=4(&l>AtK&sOB#Tvg4Q1XIcyB=%gYELuUT28+(P=6lso-uuktVm>HOgaL(3 zPa!ZyAR28^+A_dG0hc=RbMiM9xC*XFsQ%MkGYmS{C5ro93MRO>m8zfbI`ud(nP4@^S8~8bYfd_JhmBq^fgFaEwrIRH095 z7eVG?B29V7`CywFfK>CDMj9ZVLGjHxD1o>1Zp2+4IVqCuwL0(X822HDVk8HmU0uOV1(3QRn~ATC--{R4$m!x04A!BJjA7%2 z3nolVCP334@O)(WmMJw zk1}QOH*{M{(wJ0PLQ=W9pX%riI=1R)2LI@+dDq}&+6ttAb(Yi8MN*i)_r!ZBk zW$B8)6cragdub#Nb@_OPoX2W}Gin`G6v29EP;O94AXaRP<>$={(zFvdOQh-;ETmmS z_M#3GU46BBuuSZ}I^DK99q3Sw1ONU$do1?7-KAjmnulVPZnbKk77z!;=JlAA))c@& zs5BdAK8(G@Vvp^~sq44Z94z1nT%3A(V#3yvxhuG{3RcB`sJR69s>@Q=oG0)p+`{5d zl-~Y23sM%HF>C%4zHy>ED6ma%SLH^P^Oj=2`Mj})uUjz!15O$(U@uf;@f1Wn&UZ&4 zpv_8M?Si*5W%j4w)gL87!7M19HF>Ojw&p6KP# z^hbR1AThzT5C=(iC)@bMPDC99Kq0C`+pqc8(x|HTdk%Q|p00jkyZq z3TV+|N{?+-HJmjnB zHnRi8xyr$8%MOcVRh2NB?j!<{oy@kDX^78ds=kNt-Osnw7+!v6Gbq~{pRYZ^N|FHZ z4z&bG+>5*%Q-VgKMaI7br_(0JDQhZ%IIwHm7+3V7BHF+}{b1si*|j@#on18&9ckr? zkWu7lQJ87}fZYQh;pHdJYeOdqb41&rY>ZgDMmLS+uvrd2Nfyi z_wC7r*JpZdBeXV(j5R#;g2EUMT>`M|L;<%>VFnst%v1D&L~I9C`|MMY{sSO`p{COu zf$d?XTn))WEyvSUYbvPel8Yvu5B#Op@yV#5)OO~QW1yh?*>7P*J19wQz^=yjg>&Fu zaJu&G`%5M@qnTBtOem^QvgBBpnc8q!uuE2!!s%uH1{>i}(Q@gDZ%;kOhQ~Osp7v&p zMCEi<&)$c>G=**T5Uj9k?-jp0#<^eRi~c*Kr|+cLk$+6qP!9H2dNiYU*{zy`YToYe zZ^&l&yTBQ#`M-$|Oi#IMx6H!Ry-|h>Imfp80Wg=sgtwrILcPc*KyOoK=w z4&7i&)*4_HBVzFQX`}K0f56F{2%G3y(6nVgE=?7SAkc7pHj-*34{wSPPo7?I!+NTj z9$bWK0y8lWM+V1_B z)iuCN{89lOl8We0ExUg8h`s)^omL>~IWj>VlQX~_YRAch^L1w%6PV97!}x;a231rG zFiQUXw=S1vV$I!XiC+iFkSM_KPX73Ue`*>9K}YuaIYn`5>#HO#u$r9qpmMIJrjS`R zlhyQc4E=?|zFnwdf_S`*b3yI-+pD8xg_?5_Q}|xSqr-FJL9nU9uZgr~KH_g}k*p zhW76mebFV$R(BM_vxhxv(5_xcM~!KEG?1CL8wD}!W$Gh=8x*fhavhY6wzn>SAnI_; zl_3R*PzYhKzRWQhY<7TEu?a|BP!y*OCwe4^QX^1L?$FUsS95v2znU4qfZFWgd`WA` z4TE>grB#s$#^VppPCOiZFmnMxS%Dv>g%zny#15i~L zr>G?_IpO2p#kj_zg2#^jDNpsVQi2t_aiw$nxTuV2qZVWp*k_-(fMq*f;Aj*A7Rm!o zBJX@wCUC*uAqMyVXj-nN?eY@yegQIGOfLO#q&PR zXcD#T!ReRORV$aw7E}`93d5DUGg}Q(E>~D+ONO!!jV$}|dQglv%qs9%P?H2Z&z3x> zFbramXIP_`phLf`X28=+j=4D5!c8~K0>{)b#v*BGnhHg0T|}H|Kuw*zy@48xBV1CU ztzDY4=oY()1Lx&H%Hf`7>Fm+Bu{(D*-YgtH<{$G~IWD|U!3jK2XQ_xw6@a4^FjT-Z zgI}>gh!sFwQ~=B8Xmkr(OZq~GjCKttGx!(MJJ5eM7nVCHK04rG7tuoL@+AQ!rz0tD zl%=-Apkq+&q-kuA;McXYLOE~Y5&pRFQq-B{i@=Ll2}ZT&K#Ny*-KRahzVa5=b*|7{ z!56;2;O9U~W z4R1|Z{ER#1q1Zz>_74h!H2u-2{PULuwuu)fEnSCHVsQeadeWYrCPqp!3`N@iGTrB2 zRl54wGP*FpblLx+g+KzyP>?BTYW}@N&GJn@!g@uD{$qgqiXyM;5>2kLRun=Du}UYh zws_B?Z@rR~6=}6$TQ%8YgRI^UP))UV>o~_HhF8TlNp=HE$p1ZH9*XJzBvRl+(H|HS z%(4cz(>6_4^Sr`ODL_*o0@BQ*sVG^2#vW`80@QmmshMwZKGnutO`}Ga-`bCYcn~7I z^^jqk9b#Sr@)1cHhY@37`{!!y!>HdYS+p^7HQ+kL6uN4`<;mo`MFYJfJb=vS3yL{% zGqcUq-~xj_RwEcWGcNNER&wd!89lcXtu6*3 zwP@z(h8G_vtpwbtWX5JpcBDRp?&fof$6?FVpCU9u(gx>IYFK0~3c*#;`kWFM)eY*D zO-~BulBC>E?JLoxsY`6_19$j?tlBeak(m2tdY^jiW{&`vFJTJ8kii1=Oa-}~ixCGG z#t^+^t|!%rj@3bCk}<$K=zi_>vMG~wKC>w!0rbp1ZkEg+_Z=@9ZjRD}t`-N6>c=_1 zz4@)N-@${+1JE?J_FZQ}eg~J?ep|XKx0wxKIISgj0TzS#zEme+4pWfV?FCt5%s;)#>#x__PDQ|N!z$HjE#yPP7&BN(& zF~ydHP1FjQwb@6&y#-e4mco3OJ%)rVn4p%<7N=8YhE?-$S(9;UTTunLEW)edaW z!o~>W(lku7rupOBLK#EZLXJ3XhABaDi^rYlEu@=3E_&%&__a0DoPhs9hV?g~3-YfGWPgMPX|+!t(^ zBvBivDCp!&Dk)yaK5=7?;R`|du7FT5yFV3Fw5`^atIeuRlh-SSalP5-1Ky&3N<=rp zNI>6fq+8*!kY+im7GMEs_+cj!jaoa|gu8-a482$VPry>0ZZ{-f%cF2|cMDR3NS1*G z#Gv0yOWwjaS;s#>f)#(sr>xVX#!}rK4Y%#4;d{ZrPbu$5O8awwUtcqrA$NneXpVUo z-cH;{^6eOS{)r(Q-!@p&$nOp(4i?OksQCw2(!V^v*&(4J3k8zNWroar9S1Wx>^C`0 zt9fynQh1BcyI}Tdp;Oj*-Em@(QEHekbd zO{CNp6RGk|HOajvbt@dNCYhaIf04j{CZvM1=>(GnojOepR36d`D<6yfT6wqlpMYL4 zD>XxbSrSB%UXZJiIrdbc;y_8`GfS`olneO$4b1#PNEkpg(YcIZ_134(z0qa@0AKm0-ZxBRR55og zO0Q_FEZNQ3-d^CyM{HQ~2Y|!k^97s_7#ejO?aiA$!(uI-wesc~&!X?bh&QG!e{BYZ zk)Tq;KO>oM2_cCL@QCDx*hnPNFLEO!XIvEw;qBZ;aHx{BB--Y)Zu6qsq5eH5#3 zpLo4`uaJ5Q^c{-tV@I+^EKsV^;oNXaPkld(Ge}ZoBo-`QU{5Rks3<_Ad<8*Gb|l4( z5GPg~``^JarN2+eb=) z*OY(ZYkUIVjO4{lU+BQs`HGnD_-M!FS}RA2V+<;S6~n1&75l1TKAZ3PPzl;JO>g{C z0kLDp#%JnflX7ZB$N3e{N;Iq387*!sd`tKbkR(|uR0G4Nz$b`hMMnju#=$5MCZn{V z#Zez+c!XrG*80E}Gr|m$`?9qowtvlgj#Xk`Am=k2e&>teyfIWNZ7P-;5Y~(lwMuE@Irf<}tuHm-)Azr7&< zTL}0zcy@ZTy2EV*@(XA!W$u?2gY-4~u`~#r{{WHg9Xn>;?>?7wAwMVolKl|HdD(wj z?GHU_gd-sX_tNc*8*$*2OhjQ;=P*&lndht^+NT9g;Dr{5bs#id}7@S19Uy|or+ zIrY4+k_-x@AI9YO#=s*In0m`-FT*GZai;;cNSNdHjvqN*?4;I#HY-V^9tMNe!gv?E zpt_Pvy(3t-0sSMpYTD@K0_)AB38MbIwSM)L>-5KdMY!DA^u;BEyk~0Po@vdpL_VLj z0>dV2lUW#VH@avVlvs1{y;>X^3&%xK^>42EBy@6m!M}2+SAXQ4X&F#wkQz}k3f1}W%^U6|jX-&E~tW7T7 z{P<9;-5}4;#5q7)y#ZFG&bNDFze2U!4O`U%Mz6R6nIRtER^PHQYC}C1!F)X+qa}{v zZT|--8efI46!wDB0Lyo5ON>lfoeePmNottuD;6|7g-LYAoE{nGJK1UfHc&$Uc^w1Y zI=`**?C_d_xy8(5dLN^}_uH=D9hf*K+L2@Y1B`=7;R@wV`=OfI?_KELbs_3do`haN z@R~og&?=v}wU~DomW7NJ6>_{K80wp4U@4@O-)RuSsYA@l=Pp#Ygi!d?+K>UWlH?Pm zuhZzE*)w`ZxD*N+SA?^WY0wd?mp*w1VyR;O>F9-`Gk0Varx*^`Tu9`363tGCme5CdU8LlzM6R+`-*{F1cn_05T#R(nCVaLpa|98@kt$I zpDUaCP;V9*e*Dw%?1tgFCyxN3mR={syOJcX>0JEppeg|{p6s?IT1cqA>N2Y{h#Bq` zKErN!x^`A0SNV4vn-`(8%sFjwlSQ#Y;sa5ZQI)HfuE)tujb?Oa8}bc2$e_o^pI5hE z+Bc8HJ#v#>8;}xRPeDHk7ICsEQ^OvHKo*6eFA6&?v&x_jF66Lyo$aD1puwhuJELSl zzea;Xu4aL6Xg(SBG*kv$DJ<|28ph*I#mz{V-(=2Mlyr4;KE2xtf}M0Vj`;EqxJgVA zB!J@S8z_NV!b98`l%Q<{&f2P(HC>W1zTf7TT+Mg&n&g3MyUD%Z9iacd1W^5VMX$>% z_0t-Q!ROQcZO^-}(F6B!`~G)N*lquCN~#>*K)2$SKGzkara{vQVrkz2@C@NIU#nwO zW#`SWhu~gE%{v;la8?_I6PVh+=^L)=ZYDp@i;cWC_ZhNBlFp~{n5fWEHzu#Y_w)T{ z+&rS*?x>^c3C8<=#cJ?Po*Wu1-#fZ^#y7@D{7;-ysdZeQ@6msL!r-@~XAm~kV@5w$ z%m1+!+qk}pLZea(kleJmy@}2RR2h&QMPvCN{;X1Ac(oyM32Q1;V!Nu?B>TIQ0)Z*` z^e6Mr!X(QS#RCa}7@=@Q2QS>%sDgMYc^1cm0S3ETkNc&8tVRzu4stz2X_tVDtRJ=x znt(LDW6zppE(d0mDBjS$~lqrb+&qJ3_od6@(Yxr0Y-BO@Ej}y;*AEh>`5ZdP- z&$0)c^ZN>T`;s-ET=!01Ts_ym8o&(WIF|T1La09{z$M4Az03KR77p7=Gi?*JLHJ&q z1#Q%ooM;x2s738#m1;X|Y?fDI8=f7u_hJZdTCc^?6Xd;ttrTO zcGqqUV#VYgS#kRQjw5mj5{Xv_Fr_^%g4C~NbGu3qv|k~zRUal-6f3Ml4CM0C#+DI| zJfTrX3g2%&{r_^FO$%DAtShPCK z@~jfBu2A14^J^2c2ZOg&{T~3^KqSA+&jr4pYb%o9_&5eOYudiE^S>y%z4%V~^y97b zZw>W>_a#c`8^FmTMxqFg03&k?I?^;%=sK_IyRTlodiCqquU}hwp`#_`c`&Qrz=Tx@ zsUxDmz~VG8(=5HWi${<>Z$|_DP*j`ghFDji{nJaTaMFK{`WW8Jx^><6A5N~%a}!NK zDUvr+^Ws*yF7rmWQ=Q1v1<5m;6h3VbbL^GiOM#s65{eYk4$zN5#UwX)9JYFqO%~47 ztE#j)j9_;x(!2%rF1bAS9%vuH{{ZTAk(eC`{&S6H<>gmmA+qd&)U$MYKe$p(mKS^x z!Po8mH=oZz{@fMB$b`!al?~P8@IIa9)$|ZR(ZYFozm3Mf26vGJi%Y)z3qme`U8zR` zimpn0*H(sO&1gOwoc9!WEH1BjVsKpMY!u=HE5E9rILww-wbQ9-GyISMW&!y)jN{qqZcG7jX^MhXTlLjSx~}eKraCi5>u#2$&J8!t zY1^}m>~<=bpwkri)UcEWW zs0ZF1t*%YwA z!Gi`26W+@pUuGj`3SdXRy!H(<$i5*117*RBt5sB344(}&_QM^PxTE~FzXgVzDI>OE z*03&KaFMIN0JXZKCV|l5XZ+I$2?mRxNEdt)olyFgtjs^3&jyzUmIB)o8fFq;$16!W z$-XiF0D!hugYG^soq01-^-2P7556gK1Ho3B!>$om3lo@U=?f zs5#@Z6VmEF>;cFrO_I(TN-w521dFinjW|}wnZB}`TZ}}!CsK9g6&kqYWOQTBxDx(P zaubHb3Pkn#Ttyl}&NhgTqJ9)=?W0%(%I7-G4`%Ll?4<}43~4jD-pp2PHI zi4x})F5)1IU4057M_Ar(wgmdll0Mh1TD5A`t5&UAwPMp)epZIs%%)vBphHWR7k_Uy zjz|1A@P+8>(;Q?8>@`fPj2O_^@?$yirv+08SDcWW(zcuQi*1T`77O!W7e0;B3)wD9 zd74*zvZZ%u0dMoS9{B$Ny?+r-N5v?!ti%One7Wm=Des5yK+q2H@#~#kCUOQ9CmkOmb=~~FWVUS+{V(K|W16=KmT4$(H*bSpg1(FT6ywI8)@H~G* zHHY{EBtQF1=ttmb@xOj(yj8+uUify41zYq0gUd5Spic)0?i-99&=)0Ll}D%ixdxOU z)>n)T8yLL8>XLS@iK=viBYz*CaSn{I8ZReA@^$M!$R?U&fCNFpS=avnA7b&N@@c`P z+#}!h3HhPuc^ByGoiV6Ht<{Z~a}QI7oCV?4;f>$0;&mPeJ7j#z$v4^n0iXfc!QpL< zi0nfi$?D&nK}Xm0m}_2e-y05?ygR~WR(YR7{{T}vKgj$q!u;ni{1AG&<4P#NAIa}Z zv)a`0ag)4sQ}=`tNwf;^Ezu{C08+^LwbdlxH$mJgIZN#;7dA+NwT;9Vy>(<`nJ;q_ z^WgTp(P`wm?rpiy0CIk9qm{+N!5zt=8R}**)9JFrS2Q>YTY*37-?Ob&yc_l0ZZNFF zf`tyV<^Boi2S{bD7rn+f7)FSEpB!k44s<9ZFH^#GQfLngI%_RKmsy=S{m}_hVF&(RYlS~NE8M5t8RPITfGC25UH0SyCxx-{{ZkgP+z(wmScuW_eKXM z^f~Fi__#(C+O5Z?=x>Rsgr-5%!w%B;7ANXczpxRhHJie>bN6Mr&0^rpxGth(tOF(x`dULSzK9E0%^e+E5jp*QHkQx@@qi=X`WT^T>a$Q1SiW#C;tETWPH z>_aOt$V#6H5)Z~8qdYwQ7Y ziAS4joth%ie5YCTf(tZwI!!2&a87~`8wST zNx;Ch&gj;W_urFcBJ+~}08l?Tdk2Qjy9XZnoFw5Z^d1T|Pf}g! z53P*w>5!auvbEanxDS=7$c008ub<8Qw7?8!=u=zZEMY$;jSIkN_F)v)dom=R9r0mj z1z$Cx@UVU8hi*kKBx_A)`OoLT40+8EtKjkxSVeqdKdn#Fw9nRx!`g9My1JU5Q*RWe1uC!C_xg%Sl4{Uiyx1b0)Enl#U>H`D!BdPkESkIhS8z-OJg@Zi8TZyL;CmlA z&a%tO2OsP|AU&m3uJH~~3W_c+e?g$Vpn|08rSn0oSPj=JGSJZe`y^mI=Du$wc z5^4o*Q0aVV*zw%c!ag*iPKK%W1_+C-yW_j^_m2m)3K9PRY{!I~%Tbn1Tv1Z_32hr7 zvC#}|l@*zjY18_;2vBxHZejD5JtZwi=`kb9;+|&)c}n!-BSccRrjM}2T|ziKvl|>s z&r9;8;aKCh9(hj9MIwb#r$2L_4VI1sZD?UP4PRk3Jsbq810*f%p>#~T-ITRKD!`p! z!ra*Bp=IXY2{PgE3aUecWj3JY-zM8 z@W~M?xVpJ+ceW2oeiPrNdML#-8s|B1R?_(wM%7i07`#R&fn&BPnGRr*+DkCAE@N=Q zzn;>SfO@!c-SPf=at$DFi?Tb$J(Z|v`;L2Sc=6YvEMj>~Y?QsFCa&$DhO7Nh-wY_3 zD%DlPWHBi@W8@1vwtBc=C}#1?r*1QYRW<2R+z$L2W2o=tU8jV1cCPws25kTI4PT{2g0E7CKoFV3551to<2~HN| zH)o2wkcDo3!uGR4MMGorF!aM=XITrli$yavp5ZpSCLrPCldeDshujF*vF}xv?50Z~ zGEKPwFKDL%lcO3+)*}J;OYB%@GvJ;AfsFCNjf>D&&>x-pE|jlYpxOGxl>t2csHfbq z5ca~QmGeupF`{;Gvq(qXrVMjgIJw9Hs=PWwq8 zzS5DCFlo-R{{V6x$Bq?s^>jJv=RiK^E?vF4h@1j(8H5Dbz^p+30O?ENFTyP4c+^vb zK_1b>9TjjGOiZJD<184w@d`9+v2JvTjQc+c_m7AkRs50s3ASl-!j4L-=!}1rOdp=Fji;$-Ch+&w?RDfmq?N6z{h>}p=9aZH-ivUxh3_z=39*=ui4^NHNV za;l4bt|Q*H2-?(;nuqC|6P0eUcxcRmIX{N~08SvL^WKzk3T-VDSgDC>t-8V2 zH&z+7`o3*&Mrd~M3?pTijaypjaMIYEX*=-|g}U3}v%x9ko^SDDw0B-GEEf-+{>KMQ za*a!@tizw8Pv_Qm+nfiZvVC~Yd;23q?@!c5F+Kh6O$68Dq3ow#u-EFE(I23sGK3%w zZ_B_NHm}#=?7vSeyl^g_nN(pkLX$*J)7XBe`1%>gN^`YT>*QO<)q&XgYDQ=&Hd{o(L zc6@HkRQYlDiyhz$qMN*25&^2tLgI;%brPZjhx;H>xr$Yi#b zL`^TV7^(avjWlZ>QN+Bg!)4bWvVE2VG@+Bw4nUU_!W3+nLCKFBGv2o~VVunn6ch@9 z=J)|gV1>CM&a*Py&C6S9N)r!ZTC2W4a~Q!axWXB7ssLy+J34`C2O=#d#srr*y=JXl ztDQs+qRaa{@U2Jr`RqJ_o*H4;YbT$ITP+IhMr$kAAI0AP01(}BD)y`Dpp;|d&}3G& zi+wdzx-d5@35l2&eCmV>RTT|J^zgOOz~i&Mh|i&gHKntcoIw%N5Rf`UTwysjU5HL= z_kBZVIWRA5G_fQLbV-K7ogS;J=eIO76L7ju&*3nwt`K$1I&iD! zds;PCx*7>gM|inXa;%Y2%897oW2_45Jxa;+7FYBD7MrfAiJx;twSblrV1U=dKwxqjd5|iumu#SbtMDgUC6nFf*zZx1id2_c?^}?@8+#^N+tpmJR|}zYveKAS zuGqG6CEF^7`QPW|pcoMv6_hIH1!xV%+-B@;TMMaIQ+qj>u-NzmLzdRaWIKeJmPEGJ-pbGwJwiRn0j!3 z4{we&@HW%RgS>gpb0%`{%H?W0h8X(;@xrrx1JeHho+E{Gm;vEB$+@1K)c0q-CYTk* z6h;#W;zwlJ@nUA0J7S+m3UwO?d-?)o7R2|3^|RI0LZt)s?Y8&QX3L<^4LD@p+hpBQ#r(;w*nB?HTUB7fp+CI;;4ft~>OVsDi<`ba)6+V2J z@=PKJI;IbtK+^%XK~7yIVjSrFQQ!XncdR3hctvn02FrvV7eLqd!LHaoapJw+vKNjH z#1fJ6aE>un1Yr~LTJ;X@b5RUu2jG$pGFCb8%;EtHRQtk^kf6w2_fy-!;dbo0>3=?B zeM`uHNjg9I45m9`Y>SxURugwh`wS4UgZcNS=$+zL4E|6XJaTQB`E4kf} z+esJoM4;;ehSC?Ytz5qWaJJk6RCWGU2Ef_MP~G(ns{JMUOY=NUQ7z6g#iQt1?p1l% zMfS4!HG+GMlwg8>9fe+5jYzMfoSFXY>Ggw6Q`k#44qoYx)#dPf8Rg8syuCMtjQ8Fe zd68GHr73(5g1Y*Hz)dAvMWo9?*=@2CreXSi#@ferv?Nsu`4KFJYYd9UZyL&{1IMtN ze)TcT>d>Zi{{WnpXR}o^k^6!PCxN6-isv&eS{G=9Zmb#z49^>#3-|ksQ9=7s^HaQ` zuLMWfg>bLR)gvgm|q*TNp_^S;{_@8%W<`jpnz^RLx&jFrw;;=Bs^A z_QpkXJSDxy=|`gXf;~SRUzH^q;O(+}#j4zB(6XJ)Y471aq&4HV8*$0COre^caIAti z@W?vfmMn0Iw@1ync6Zu1N0c4l=B4@THYXMQ2EMJX^5%txmupGn+p1KG(YS9TCDBWO z{Fb48)W{4a3bQZCJqv{NFig$U*eA%NH1IV3=Q`0b!dsko2p*gJTuxnNA}<07Xs=bX z-FfoAMq{JNGMt#b55fhxS!HT0ty;F~_hJ*Q^}&Zzo-NvBXGWZ8`S{SUlMO4-k@C4{ z%caYDk&v1)VslZ~frj^;3ckv^T<^)u-$xTq6Eefv1cUkPTe#P*nYJbQJ)`$~1`$Pq zF^33utMLa2A|!&hTuux0c(Eyi8347cHq3ml@kq*Oe`%cBkrMKVw(;VS(t+Nk31V5| z*u`V3`+Wt?l~gN6_;HMt>X*Ay&q-EZIxGgZk|J70piD5*bzZSmRJPQnJ4bYJQRTy^ zjP3JKT15a_OdRYk48$dK){d0Pp*XNWt9V#y`m&s?q{r`z@V6lnI3OqM4tPH23`x(;&x@-9u(FUKJ}YwE^-$v;vcQ6y;dT- z^YUJSsag1PBhu2&Nn0XDE%Q8-Bv~;(&lcdQ!SXbN)CEW93bif?j2jscg`O-p(Ve)) z0Sy2mu7GQ|LOtmvl^XJR0JS}PvPIBaPno>7(I+jM z;I**;yw_t@!EF*Og%E)8hwJkqNC5;gcN5nI*sPycu`|NDm@R|<9sRb z`~LtB#_0BxZq3+d1|KEM_`E8U?HNPoZTwk0NoqLh`J?FmkES)4mn!;Vdr|C@?9D@D z9g6!soZDV1&QEIv9i5GikuXFDYb+25<1)a95$%M=kh#oub?ILIa2RK+^`~Q_Uy5@# zHX~4ZtWncHFn%?tu978=cx>=+N_DZQ7uhR)`bF;^{-TfRm2PlD(s#^J7^~6cjT5|k zUgTtyn8y|&?|Fy8#4SRIzh>AFg-s{wp`>@TVjeO0b~9vX;|UHA9<37lL!oty5+KIF z3z%5LDxU7ul5n2`0sJccQ1TO12m-PoCZVIip(p12ZHH_@>n!10oxU4b6}R{X^`u(u zfl$ol^h^CvZPa?o!~0y=XY#QB0Edp5GGxhgJ^TirBIh(GIEnnzkS8|SKIts38MPUcK_Zp0PG@t*z9DaIA=jBIKx&m7Y8 z2rhh&TKeEgB7x2HFG>6eb2OGU8wjnxB|sUcokg;U zY-+XIZ5?lsim6`edZ+03jeblXRdXgq=jEqShhdr1(J82XniBDc;swJ)cJgb6!A_^c zD;UlKu-%9dyq!C9huL-Q;@pOcJ7vk;z4xf4wGALy&E{C2c2&i&(piWxwMLWF&3 zpTrK*_+N(Cs|T`a*)a1akDUAlpFwK2qVW&mz8Q#TU<~cIn*LhE>bxfwHI|ybARfg` z&Cw*;5=7j0jYjj}72Ut#h!LPiKy-FCh~<5+vwHUW7DJ&VI8a-V*{cbuQpH%$j@?c= z(&^H`MZ!iB%iHp3x7T5Xu6R~j^;2p0S?zb5H_X-ZIg0M4u^JlZ8`gGh4h&;;7iSoo zdVfi#hLrE{%M3e#v4r@cK%46^(cUXz9vXryl#P99v=sjUi2l*g^K`}jU8*_p)7Vf~ zT|pwZgX6NH{IcOlw0P?9luo5@qxoRN{#68iMY~gHmL_p|*k)`P{BS^vKGtOHt69^j zm^?K90O=miUxOojUO(ZE(v!ukP8X~mrqgnC*AE&jDOx=|a@gC0%dJRM?t_(wZuHZx zO97w^mpy_dtyRrIBjEx5TXVJ1-;p&#S;|(XS$GA@W_4bPi~9#MHyGh_6w`sBMtly8 zJ43iKoBO=bUuDf~`|&2R_FhWQg8e1>OZ1n*OX!5F>E~N4{FuYuCEb-r)?TBvX=YAAj5Iwr@tb9z7VUU zS25Z(EJ70cfG}!NrKOH-XIQfNn)U;uEp@L2eZfXS3m#kYnEygd~3dNNXgm07g_giNC~q32HsJvKWJT?O~=8e^3Tf`Je!x$g1H3v*xHJ(oy~Q3SMdDKo+P3@ zIi_;U@L}k#_*+`BLI*xw_y@7tSqE1%vHt+95il90i*9w_xq3U9`(`DbzdRq2JRk9a zX~?J-iQ0Shuj9(^cxPnZp|ncC%?4<@WI2?knfH=lpy$uiE_e+gY;DcGvQ7qX$h}Oi zLN!1Ig0?i09&c=R=we1L~4v#k#v^*xp_gOSo*R3QoNl;`d;_=F-ONVa-3-lg0u4gF${>K z1#&pb*2@__oBR3&YZu$Eb-Xh(TZa8wXHt6JKQ@WvgzY#SZ!Ay4{$@$Yu}5N};}=b_ zSMz134i%_78Wud^&-#pcpo!*&Y59aA3>`4=LnJ^FjHXL%s)?9_5;#Fit~}?D{GKSZ6|tH*g)|xwdB2XWAC>z4;&Y}oir*fse5jFd3Gm8L&7_mrG{L{Y7v`%DjXuTtg|bywgLmRH+m@?b!8aEklLUv zDCuS(Bez&|gR*7HsZ$&YRE9wk{)hMY@RgdwFT12YOpG!*q}Kx8d|U|pnWhW}hu7)V zaM5bCqmQAt`~Db@)X=S4?l_pxI6!b#o2L>GIx6(tIDV*((`UYe_~G07v#J7Lq2GHh zX>T29V>E1ey$ew;Ts7U(Rm;tt2cg6Pk@K)5XgTMDPT7T3Tl?BnDcLpJwplMfi$>zw zr!zuJP@v+}MUW;;pxXSc29k%quKxgPbd#+9)oa`6+nQ{4pAF6WD@pAi$j-w)oFk{q zk@D@sjn=I^fc=0SxC!0Q_hAh_pSqZ*qm|Cp4DdAvXgwf-Lg;iSi~vL%dT!yPE3+&G z_+6?P|#rRy0XU|f&LLG8i6&hmYtv*WU&z99JlwJ7kGWgSv_P1#LGCo7jOyx!G*i?=e~QUu(8 z-ZB!BKt}m1^?+s|0o3IG0GgiphA}Xi3NeK}2>gfmMdX{V(iNUqZG&kGw@k1B>9&a_ zU-_}sL#_^uClhOa38sl|ue$#LPk0fmGy?wstf5Rk39xwN52J3r}@w~tOc z?AFcmSjsfYx+%0P$)^oAVUv4<;qY6|hH9-Ton7&~u)*dB2D=8E&W#HA zjZua(cHZ=DZ5>jUXhag9pXS_{-|#tgBh2fWACLY1JOynyG;@TAo7Qu`$RO7BsZO5U z0$I0rtCU(XG)=2MTQXU64|op1=fhE$8$E3qpjJX^$sAR$c;!gmLg`)dF!?2AHYyq^ z%KI#PIF(la0Fzag;!-Hu_#MIMrh%g#hc@K820@7Qzvs)$f&(yXDS#cVbJRLX2r7@` z`L+@~agg%SPaVt$wV%s`I$QYQ$86Lb7xPyj+&OccmPzfxQI1UYuQJ@`qh@YgpF_~%)msXTMuI7M)2s!V0^<1d z@%z#g=F9Zc<8u%YP-()aNtt)dJ2IS#2RJKj{#M6aoB_iq^CDxXI$#j@dqFr*X@DfK4*8$@d*(qy|m>qh>nUcPpzs)nzT)RT0e7l31Bfyl?(w>!{|kGg8R$m zJTM0y3n^>`cS(`JiJAb4;y|uD+2MZ*`TUz2M?IC$K@~b(Pa1Q3LYyP%dBmUOOX7bS zmxnGGH8dIkkv8X=_Wm^IyR5zo379B1`ncVTb6+uOuI%v%erPPYdbe4(>*ioVSz^{f zDekl89f)ulauJO)8T|`h)2CxWS-SJYMeqfn4M8Iz0IP|hH=~!VYWSlzf^)dryjbCV z3ESM)lE|Rbig2;-)HIq44PD z9#_p(kd9AC7A2qRcuvnn_&zVvLF2}hYZd}W6OYYD>B$v+oKiGCoF&7dm=V>fGT$Cg zX=R1l&H&!Wdj5XoX^;I>8HhoyI}~+BOcAMjqh;26ow4t=J*y-yj^8A67ElfaV(Y{u z-TwfaQc+=LMNA5qs(0`#qlobDI9Vfw93};Afaf9WhusbCYicSw(_7#sU2U2VGy{|7 ztEz%F6h5X8_X7nED9x&n3ou&-J}i&S5njO@DL87tI)ZF$!W}RpvecdNsCb_P-~9*S z?|FX2e)r}UbO&0gT~c2p@ev;i{G9Fx0v2HeYX1Odk36HhIs(k%<8FZH3G^DJ!yLYm zy0(0=LS*#b#xzJsvP@gbrIc{MDkEeN6n3!Hrrrj`!HZ{PJR1U@F@Nvz#H!X<9$=;q zHp~ox80mM{z0blDpiyTPt2F2>ibRD8hjP@EO}P#5eZ0jU>H8OWKHqA1nLgJLLY1T1 z{5RRm?reVyn2_vG890jHa&;QNFd^6|+EuANHvwho(qX3u{Lr{^%iP=C+ub|HPuu7y zmL7$k2o;38zF0@8xHQC-w(q;~v51mbmXz4m`Am&L+PbJ7{ci|KHiMUP6(`)b$-2D+ zxvhtb6O;Em0vYQwPG5>Hm|w&vhu1w!3= z8)d=Qgg}=sFRQC?GDbB=P`Kz389S<9C-?HiNuKVzccK#bXQc_U-=m@>X{$0yrROSb zv*CRvCg-!E7xZzgFr?(I%ijrghQ_P|ucG^*R6$L-#n-^doC7omgm9Ix1)Bi%9<>yi z?hKZGaze6Ds|?w3xUPx_=jIt(g38`CQ7oFO2VhMH`Uu6@YH9_H3l)uXnA8|Je}9f8 zO@X-&la=yZXahMACrv0E6H|VcbmezMJ;TyCkNz}!)PwAj`BtFqTKL6VK7nCnwGdle zMD&Y!V%s7^Sj9#B*b5q{>jRhW0p`?2Q%Gfa5fw%V)vk%?^j|fH@bf(BGFptlG~mH; zB1*3rVaehP5Egk_3qkKM(v4Ggt~GBn;?%o_Q~pf$$t0R?B~0I)$lrh%u4DFlG^{&H zcaFmgF6@HTy!(SAG1YnWrYBS2FTno*hI6BgLAfhLW4N{Vp17#Htm%C`bZbTS;Df1i zrj%7yIY0=gx?Y85`=ReC;;o%8&t8>aW?SrZ((W2-jP{Cs*oa5eN)1LdK9%%aQyr75 z(F@t^K`=~T7y`yoF!oFh(yjMSS32X(G zcS4*PrOpkpuwvT6n$oC|-67}|i<@Exu8>n2;F>@OSkk}3{kFD-{F1On> zV$i%;AX%l*nRK;o-rm`#RYGs}RdzO7M&cVf{H!2rwx-0tZ6R-h?mLtxmwTZxflU|Bs`OYwkPl7newt*a@B`X12$ zjV4x*s#Mt2A|CN_)n4`m<{|A;+5-)%$qwkyhdY81t%q+3JWJv7 zTmJyP7nn$}W;b98Erj*^t`{!Y@^xZ3{TNo&m8k;N4P0-^RhVy$lKFw}8o$!!Esl2)9yKb!)FO@opd#M&<=ud^!3sxK z`mMuN5v&`@D&L!N{{H|T0<}9XS#bz52t&I_o8P(C78B|Ys#K#EC1(qYUpaFaxgEp-W`YE{hYNYULYz+QaeEx5f^;-H z53S}76yW~9-Qo`li!oe$;+aqoY5*KvFg@A2R&-NmCL=vyTb*M=2_@-Y@fE~DG;>$}dGrO#RT zZY7AlA^!jdtLWPxVaEqwc1j}sys5(+j|*emeAu0&M@t`%BxdehyF**$u(r;Ap}?Zm z8Q9RI;-xB+v)`;r=I@vuhUfZh+*EJYE;zb-i%NgGrXKJBf-J0*<(P}66#L@VVEYzt zpka}5c&n5(wH(zmDhw`;o~`xD8RhZWwb16 z$X6Q+g=*!5y@u_6g#4dZUvyn!;8-{>eEV1F1o|UL{e56Mz|ChuA5F&_Vn62h{r)^< zca5G;T`>q&@+x}6XRZ;8gMTi7XTzOYbsF+eH0l{b4X`kn(hBnmLxkGUsF?Ib-hTB1JpeyH zRaaG5=knpk($K?RfpnD3S`TTc<22hZ22~{{VIW00~kS z`b$4abee;4OlqtiNg&VH&uYa29M2ltS!kb6yuQu=6kBX&hjxf@+|%fYCo@hw0inbs z#IYs@M>{(I0H3+<`}}x{xOSrFi;p_Mz5`CsT zpmFy;cuwwZU(^v}lH8i7DA?rv9eVgDdJh(htLRm}sC&oYx}Q5R>XK;q^Ev$~P(I3_ zDt)xE6^CFe+5Z5NAMil`052c_6>w%Wyu+SuZR=KhULC$kjRmRKVS!eRN#eePQ_eNq zUy%N>1T}AVoYuQ|JHnXumz1k;TQMVVuMP$uSwiNUX%1AdR=4I~tfxop&>CWuXWM=g z$iZo@Qxyfbdi8i3(WG3yAqsip<|S*h7zU6_i{ex zP^Y88n~Ap!AEX^mvRKV^{SOAdqs;z~0^00fCgaC8YWx2HP939&i><1GU?t5j5fPn8 zu4D+w{{V>mL|o>8wL1o4M*dndj}Y2enB1oiPJ_~Ljhnom3($X2r(GUjl@eIg`X7no zSCQZ(4^91u@P(_$gsZB2HJ3c>G|;Z+>Y;`n58m?hNN{p5M!y8Z=a z&+F2Xrww7>l=O4~<`aI|f2ZLYOEDx2MzC z$V~3Jr9?HYSn204G+r)>u`uwUK!F4G5A)~GpFVu~^XJayLS`J*$DbFp==wv#`o(|L z=j4dz(D?j!tWejG$;Py4D;O29`j;4@Vnjc-7e=O+dFdiGBgZnj5A& zhhK2lF8tTF`Z$Lp%OG?-&&xVTtyu5KMud9c<=_E6yTzpR?Azh}BFm}*>gIzw`Hsi6 z!2GKtBAu*G+X6d5cjV>TO60nY-s|OS z_$8!gU}nkyX`l)K(xIB-1|p1jol8YI$jK-i4Kc($8BtsyTi|w1l*3y6{9%lsBOvto5MtL-aWB4L` z6;5(EU@FL?-5$A*=lyLh5?@c4@n`j>wl?YgyNdnY69I;FOC}wZcGR`TQx{sL9Z9QG zIkG(AwVy~Y?l0js`J_uNbqojkusRl;U^s0brAXTiRN%^_iEAzVfm_)s`9#YnKuk~i zJ^@X_${H$S62b_T8vXS+;IDOKIW}yBvrsp#VN0^~cH)5=CvVikS|6qA>^nQnil8;4&Ri zBq!HIGb6eAllIoP)=0y*T=-0+;A`h)QuzM>8Z+DW6HjB=#Jp?YM&b&dt9S8SZ83|U z6sG0gNM5RrBdQf!w>f5l;6ku9<3W~v3iel3YCCMiWOtLx!KxWUfyMMSdKXfJvZpLu z9dhGa4KvIu`5hc8g_bzpjF$C!fEcE8OR^$Kx?BK9M;C`2`ezyI5+jw<7AE<>HM+|a zbP}98SnTjpe)>!I7J_wtJkV#VkPWRWa}i+b>#c|NE680y7~8J>d5{>M7&OmupsVL# zFsA3C-Kx};)}U0d6bVhiNb~G6x~khh#2s8|20pZUjQHLYt^netBcME!&uPJN$+x6Z z8Xz3ih*Vr#?9D>Q%(yKFr`#1DXXJrT_+1}Lg00?SYPSU)j2^@|8*muy_!u$u^ou@c zAwU}e#EtzAUcvluwCx)&z50jq;7D4i?iUi?Z28GUO*m_N7oXv&aYk8gtp5ONdlM2k zaCSdufsM>p^638n5#y5Fmw8M%JdJwOx?otyV$6W9$My4rkKDf zfALvoK$M(9EUo_l666F6rEcNqe+#k(2`d3XaP$&9Iyn*RWu{LRewv)P*>Q^WmR*l@XO$;}|$ITY!O zYZ`X%F#*gS9b92aLQ-^pTP>|VmhrxI$sn7(jIIbRoux$pbu1){wA~E8iS9vZCGl-7 z7lcpkIm40EVM55M9P0>TPsHZ%>!a799_g(;^6>-l4^*=Wzg&#_NFkYIuOoCe_;=`R z5LL!Nv!VBeF@0$$8s69|iTb@Q7uW+C>j~7oyv?=0GMzON{#9(2zU(fV%&j(X#%x~N zyN86qQWB@#JauUVtxjwNmYp})glEbAdARJhTSlPEAzaE$O}VT}b91-hrXs|rzlI=4 zRIfe#DzGjf_zs)sscC^aPX7Sjc*~5^#=GQ4{ZU4u1%)MW7%19)xh!>RBeHG{l9N&a zTOxd}+LBZxn=Y)DsDA`WRXfU5{h5#Bu)S8;9py>lc zvBH;o)n7#y@iWiie&O^93vRancAV^Xovs8bDX{acP8&XPsw%tL;3&Y2J1)-wN-=r$ z!&vRwdTPBt3w7G#*V@?MJAsb_H1XfO)8QXzR;?Rrwe(A-t+e@1ca+d=@_XCYfGncQ z9$NJ^ye_{6U>p$IvfG(kW9o;=1nln(4%|KqsT*&dy|`%7?V%=$HOuNxwkEJ>!+0B1 zZGn91D)rubw~MV5l-#*o^<4eFXr8Y#d{c8m;Ns}$HToOucUh5qwI}IT*8?#z^FYJNR(lQ^y^=)03@s<{&9yj6KklS z8AR|(5nvU)H8&=9g_JEE8dSOxDO#_ ziD{{|9}cLnwulL31R%*>@_W^n%odzm5UMe_u{y2H<4i5%R+1CIoE4VWH_hD+wHE!w zy6)+};sD&gJjub?B?D>fksk>H8Tu*R6V~(O4j&j!F+>O&S`_20{D&K-H(|MVzrXrj z4G?vS`Vm#DN-m#ioCHt6{{T|-#t~qS=SvGm{h#&tOyh_f1)A#G*BM+Xh2a(%LBvS~ zCnIuQ{6%8p@Bjw;Ho9t;Kf&l2$QQX?7HztzriAcsE&vdUQ50Ve40VSyd$qSMO?vL? zga-z&6(&rDb^|6!{{Vq$j(OTRX#W7*IQ}KQ=4C@vFF=>m6W3gBa|n$$0{TmjU<9G2 z9Sx4~6H;JhvJJ)YF+zex;(Tl+Wu6kcX1KeO&|`mX8PVUQFeL*vaNEu0Ytzm5I)!4{ zpllvs@TU_00G#qU%BxV24Ug-dfN}#8n*qGD2FnE?4+Z}Kbqit%^}6G7P~~Xf%hs+{ zwPjUUChH*NgN~DUcK}33qH27!tMky}K{o68QpAKs)3*M2gS>6S7pNm|NYDY?Bqnl9 zY359?o7@MX-ABkU!ZOf6oxJzCAW zvw7cKX)EF0$QU&x!MXnMexX7iK?+!szp!qzX$7AR62eBz)S|){8a9Cr7>Lh-2PgWZdwV(l`rp|LSmbhpM{Bx?e z=$YsTp$O?ZC5*xZH^D$SeN5!VH{6NA6=W6<3>YEwd zYT^57CRL85^_P~RmkHnP%LL069ZLk-dH(=PlqC2%ZNVugECI+kXg>uf+eD8_Fq@TQ zPm!rgjwM~+#6UN-{~a>Uk(;HQB|PAb(QsdG=>76P5Q!3oO= z=ht}mkfB#g2lM&isW^82vCWWNmE%tv9$(9bnA3G7J=OK3WTayDHu%qCb4r&&=HoPQns?06r<2XkN3X^wdU%lCFn!@)_+aA`1Mvuj6Tx+ns5T z8*3{gs|6!5rvnai`lj#ct0-e#44XW{9MlG@TrSQU)v8-EoNSeIynM-{J=~!h@boyM zg{D2e#iNFcbe>Xju<8K-7{7{ zb7H;z!>+pIuP=uJZ6mw{5KU7j%-f1U2JYzofWj|eILfL?!XJfeJiBSTosC#lqMI(x zy^RlWG^@M_o#IlO4Rq{YY2Ur)ow?u~ET48ih2_SLYrvr_Q>g8w8iyxk!LrM|vcOCK z0Iy=CYT(l8iA`Qn-6&;|oH`mxMC4ntNqmk0ySg+!fsbsbiosl+b9AFo7w*&4wrz82 zV`|&R)V6Kgwrx+XsZwidx6@9Ix&6L-@486VO4j@Sm*lLoPxgME-xCF(yN%0$Q$m~W z452o#AV^~{<;LkZ$W^<*Y|u#wzAAwJGC7RH!AUu~JfK~yP>pbIJ(zX%I1VZcbPA6^%CTGc$xsWu?6P|VeIBKpV1}M~^1LTvvO7`S`<>wZ z-~gQIJ{&8y`$?Ssi}&evse+Q8g7l8=7Q-V8!8dBdli#Gme&ewNIp)OosTl3@9{?-L z=SK-u(so|(=a0nEo7_KKuW!+|tot`?zJbtFsM;ifr34rRE*kWf3!CvjuTznVDg_{@ z?QsT^eQsSd%!yyXzazRtudqGMv^fu@$_yrqLZPSCVbC z7b+1rg!n`NcB=Yq+B;N2`5b{UqYnfYhv?xB{2{!W+NYoHM}YnG`MYnQzq9`IFa{$n ztfOjyp}rDW75@1eOwb($Ydby4%1m`R$ja_@g_cT9R?Kp^(fi9*s7r_P`l~9( za-SGp`HM0|m@8vQJPC?d<1;q~Wu4Bs23=yRc3on250;3DDcS?dGy*G5Aw1Cp9$e^y zynsl7P@^(wtHr&`I-ig`&(whkVyv9hon`?hR^UIa3obeY)O|fq7~ka2Ih$w zH($Q0_O1X8ot-^ewsJx@_DqoFQYpyEbz$W-IdwF<#zxV894zLKpfM$bu z!RZw_AOnNjw}D@w=kY)m=}__P{YT2h8*&ZQY!0XG|3m?9=}MUn7q7`O6#oM(awK^i zX2tUxu&nC=9kC$lVe=lSs0<~45!I{v`Q`5Kq3&Eb`|gfnIyb+owA+XjH?NI$ueKnn zTqGx-^#22}OoOvN&!u*Eu<3Pau$76%um*NV$6;qJ1~#}p!qdR~*!^^9Q<<*j2h?7| zzS1}x8|`Gfu)j)|(0JHQmcGwk8Y2E#`$oUQ$8zwk;V_`J4kM|IE4rVoKOLjP%BI++ zdT7cI@u8JuktJt18~w}WU$%6`tr@su4S%JQq6^)C-pC$)ok zcl+`biL@O-r0o_t35L@B2GoV|O~V;m1*QA0zMvI~T!}l1k~3)~w)SbSX_xd9#yQ>G z4jy~wt)s60bLs2D%>S{+&d+bzo5pt3%+hSrU9v62A`pjZz*VC(>)c0BQZnx^$XNfg z*vm^aQA9-$-pOp9iOzrkYF@c?5P=7v3}wBtbv=@w=GT$5rls_r(x|MfJyO4?*^jSM z3NIoi>&}$t!>S5jY;3&tr7?7=9^U~wGN`Us9ucepMaRC`VEWlSNmG|hAXhIXAc`VO zSomZyL)5SxNz6sHpda05Q_RA--Ctn5`E&mE0)L0wG`U&bZLJwi6i;lv&es%QwZ~+k zYZ9!en%duPM3O?|YWybZ>u(c7JB%wMa7{HK-4E#;4(F_6RX;bq&*5&y(JVuv+!T#N)FOSSZ96g>GX(q2it?(_9XHjH9>-K5Q zVtA4O$qC9@Mp;*>H^B#k{suK-KVlF~sqAjP?Y$Z{|IikSbs$?2YnlP!o^I3+W4mtIk+75|wt2dWOE812C3#grv)JxvgsDUDolIHgh$$_Tcm z@dEK4`g?FdqT(Iwr-;x5^$WyT)2-m}>z*~f*PRjebD_@3h7bX8kPEm8bzREAw?!t6 zPX(E#wbT*@*!&JJq@J)tTj~7VZzd;svtgbetHIuZSEJw&UDJ#vfucDFDGBqQ=&q6= z83-26Z|-Gp?cTeYQYFcZZCIwDG9sVZV=|X)Zp-oAyQU#Enem16H|M1lm5^_+Xx6iO zLz|c0W@0tOF0~!_c!c11FCn`)a0Nx8$;wdP`_^I^RyHx{M?)_^hi}4AXaf+QDVFiG z;V+d+7x@eyE?CSe$1$$50)ffi7Bq!(=^Dk88oKYFFL}iG6B=;`ogBJeX{>GGa3JGJ z(#f6J)!nzVQOaQV6!2a}z)%D#7h82!r)=Cj-K$g6 zg~9xe^xTyj8?|s(Bl)Gh-t}2#*+36Fw1A{R?$*yv-&OoPodU+$CJ;fG3sZ{mmCdq8LOb1Zk`DqqPB#-@Py?HDOBsAUR zHTf?)jv@ZEpWkuwJSL38Yjdh%;v{<=T+M|}qR)tN)+LY};1qvnz=|kn0OWA@?q{Ui z=#gfC|IkKjOg;s+{vmfE@mOW>Nob)TVxxMeM7g(|+gko?&$-|)I}pR%3PjxZctRKj z%q0XY7FZGnjeO63uaQ?RcXj+bd#a)q+IN!#x8RS?)&zE4`3UktW^y$)so8k6B%+~@ z^b(#M-qWYl2G3ys*b$$n!I%RB0yvfn!z zXu)TyV`~lb9o;B`E`8}|i&)nL^>&~t$%D{ zZBYvgM&NbsAM<3si+@wNj!jNu~=&o?(O0)jWmgAir! z_4Cn{j_eE^jzbeT*tDWK0nbQ`#|HM}+|-X_d^jbH|nUfXe&}C>qRJ zO2Lz;Ar4<+{eCEV4)m=U6<}K*5n@ zaESIv#sI>sBK(3plMYA6>Ke%~jr{oCoT;>RbzQwW$g5?;zdGO2&@?{Tr^0O0PO)KR zyiAA`?9gu%AowO|FQ!-?p|EWr2`#wr{M-Dd$TUyveFg~wUhSpRLFhUy3c!g=}5OmI3{@Kj%XgeTps&hHN- zYRGweDt3U85h8g6Rx)g*&j5oKzLe&B=}*3wTj(|!_VhHv>2!wAUS?Ey9nqlj-Nc+N zxOiu}PSpDE8Nt*K2>k)CjuAnVr`#W9VB#rq+oP_ll=^eWyBhY09IV!jnXfOHeS7>5 zQ%o*}aw||CS~oU3*7z)Re=|HJn`@l#8!#n(=juEiY=4S_`!j3`4{C)@$YFcf^M~~w zkJ(Y9^?%s*Q;3a2@AyfIJqB7=THlsDLMiIW#|8eFMyWX#Wu$1CP?k_!kKr!CH?GsK zLJdlbVwH~o(OgRuQC^EAm^XlURCctG9InVjCJMW-eBXgIHDVdVtJfw2%=@TFXG3vo zOcN2Lqb%pRDhJq}Sie}*&(%+SuljN}ti$$Vi}fPl=_(kz-6{}t3b$;N&(MOI~Et^--$iZ%ob=_iu?tsJ2j8tSZg!?L@>gK zuanavO1ZLdyFcqGZ9luGdsEpPAG=YxX3C<8D?x~IPRZBPdX6h2! zK7JRBm8*_xELn=wIAYuZ&6bE`Cz4?Zv}gw0ZQ&S3kqkL-S7_ zS3pBgWftr9qDP02x4Ec4n_urCEW;3p=x8xO8 zfdA#PlA32ZqL$E&$N%5^xglQYPdWSDVyqz31W|--+7f*lK;yEe(a7CbY|#sz?6IT2zE!o!XVV2gw19q&QHHg)nwA*!a)3abAmv&8Zh`H7EQUwwqxXA2>>DQ(V;q zq@$yH(d;)TS3*~M5Md*WaP}ay5!K#MuM9Tnp!tx40%N7J-&2Yrd58r&=HPbBsxm-> zw~8RxcBg_n79WKk4>9>KrV_({NQJ6W>f%>os&4ukw;gny{itl4K{)umn|1BKS>9Z>jiHlrvaXveqUJJgU+Zle( z+c}m>`Ous#wPPGOeQWahsJ%e&`2SU2_HcksNpM^V6S78m%*)m~-i>wUg=2UwF%Dy6 z$JbP525!ydSqf8-5PKB}FMUa&o*FDM)l{XZIq0hSOvu^(i{m|ipVE%Nq`v>SB>a=1 zx=K{TYV-r1VULkFuGmNP<(IIsLYWZMdr?LI%99J!?cg8hI|8Dqao~fjm(AjP2VTk* z5C4R>aw^;6y4-ECv{tid=nxn~4-keJ*$h3h%+h@^1ghTm;b8Ccm}^S033s;W#zL3N zfWi1QV%jG>M!Mdl_53q3#eSRJ%U(=ijjJq@YTx^!BF<0{&*Ac-AI>R*yKZlLb?*+^ zwS;8r!8am1fd#r!^H#1ci66eZX{qTixaKeO0W>RHvkSIt9mjLrpQEKO&{Ki45Nwx2 zwmUb?ciy-%!NcvhC78Z2wyRJD)?7*0&=t|#gz2+;*fG_vmD_fB_S(iCM zZ-I#YzjB5umi_@=|9kWpYB>>@bzHGOAta1fa|?AOU>U^kz^)=BfR2;tl?QzluE^JhYG2{RxtyM|D(#e;6dofMU+sTl*c!B!g0ssFQcD-y-zlODBbE zH^&0QuACKG<}zXhl3w;kHt1){dK!A}-z#aUntKAj7b)%^(p*$F%AdcYbQ<|7`z2r; zkk>{;J%grmJrNoSNZ5dWK;f-dI?#1G606`hNTQ@4j`IssV7h-l?91}dPl48*j{)nO za&horK+eIVrSS767Rgi?Z$J(m{$<<*?nHjJo$ugS5=ML(_WJ=&X<=T&`!Pentc~->% z#!@Zm>=u`v`6o&%Gugo~gjWC>`vxK(B0*=`KF!Zm0?)BjbH#Hsuzs%v88q+?-k_yf{Ns1;uxMg{*FONR)K}I z#!lXeFgyC6Jq&u;sagAbf<9_ct ze%orVX2 zeu(|P(qsQkcGbq<<~*R{(HfT z&a1+8%Vs@oe$)vT^%;9}cOP6K`eBQA5pZ2{ijrS)<~MMEQE zG)39?&jY1pk65|kSWB~9>p4|6d5NlAE~n4T6w2|vzMso8n6#S{L*DbfBrP+3s0r#{ zu~hIis1_jl;9Pw^Uw*MI#@JPh!ZKvc<-xPbxGYyI4+V(wb{YMXYqE7-`z18^1O)2a zI;WyTP5Ap(o~epfN3!In!=pDhC8pV%J+}>x+x5mBfyb^N%=>P1q+tZYZZ?1AwBETr z7PTAU?}jnO&1drVgC-g}lqddvc|sz=%7ykzbeQvGc!+P6C%#sHe{*V)Tsn?d!Mo#& zBJ=o%pv$kXE@c?89uJry?+2toAvOYD-zL0xH+t=>Fjq+sqa5@E&Vyq4fl)ROH_2kW zx@KMLeiX_OQ2!nrLmihDwVn%FP2&+0Bmp_BrPMuDHFvMC)@^b`QX*?CUZQo)FD5B1 z-V;P~xgVG!)hTjAq`OwqL?U4r8NZ@~5lg`M<5q96YCRZqgy@6ITRq|l>C+o?@t8ns zt)!G+wS$^wzADm1ENqg;z21_qVE`&5J8a|YYRQ8C+m~A{k$YLHO!)%OhjGk>x{e`F z?XcZ23E|p6J;-4{XoJKQ6?WVj*V^lQg2+t>DkQ44oe8nqw}h5l0~g; zJ`<&i&(o!GPBwFcmVwBOP&x={=MGV-<$n=oCd-Twt03ubW2NmMmF(7#kDm?TRxrqVE=( zswH8gD0rw*Edp^pWTYTv5hCBG8I5s{7rD2$=YlKDYRi9uGxmumlfOnXN+E=sM(fKKil>oel5)SsTv%T$HXd8~B~-&vyy4{4}Z`j)!l<~4EvzbB{hPN>Hb;7yMJ8bW8f zA}AxOEmITzLcPILi7EG_iVN^{C&weDetfsnjbsWO&aG?5SABSp#h+Hnieb=ep{&+j zM!_jTuvhR`SIP!QXYK6J#U%L}6wJr<3~BC@U8;t`HHudZ0(lZ>QN%p67x)*Ho3+ea zhYEYxs-`W|ISQ+%of|uu3Smnvw5;=R$XzdZv58foxINeFPTbFNAbypIb=im0Lo-UE z;NwJx%MtQ|94lGQKr zX0NQ$TDQGS@#nMn28{f(jJ$;p=4fgFv^f<5Ppcq?x&Dl=c4IP5{|PftP2$9`3pGZ` zI{aRCbR1tKBvL{h??pMzRv_38T{_-+IwS-tyY^^h_G}rg%GjcdrZ1@2c~9R*H4rBd zf}zioLpDp7YdS#XjsuuVWZ@bvIC~xb6wE#bvHxe3QGJ5*1QL>kY#MYNWIL(Lql9BS z_(i8{*vh0MC`B~Zv^Hb;e!TTy02u=nN08R{x6YW+Z$88yiV>8Jm$U}xUMZu(Ik~Q? zL2IW9%^}yJy{u$1g8Qv=^Cz@B_s~**_}A_Qy;1rw3_yU=MG~il&tQREH6ugkYV-w; z?%6~~ko1$CptG~Mnu+h$3G{9Q`Vp@U>`a~W*m=4j>$W0>k$fqk6GbUr1@m%jB@L`Cq5=+xSFRE zX}LyJYr(PdwpA%Kjoh|-2u^KvvhQ=drJp$)XubIsy{K+Ka0G3c% zc4oJUIZEF@MVB1bIovTAK+0fi_E_%W6dLSLK})hJ z>p>y)k{vkRCRLz{N@HC|DFNaEa*NcYOHgdHaRXkM;|ewdTyUl5c=#UTIkCZAHHP*) z%tmqGcV3R(V$5&m0=#m8+p(xI%Wc?2%9O%?NLxq@-j;1y4%NpqSxas7H`m>>;mAi$ zP|@7N)bW)7&FV3_B3t^DTvgsp^{Z?aTmiqZnTE_qsCas8@sMS8nd9feGEQh_ zVs7>|7clgNL)JVe2Fx*1N@joOKa@%_d5a1skMXPtj;{}$zsW-7=AR!th9rCc;P8$L+Wn6n89ajF42I}|8DRwCPZIU!8rbW>C!8`*DJ^CQ^zA(q zeIqP*uO(q*K3<-%{j48Z{wv>ryB}guWXNq%F)+02XiQ1_sQt9SPF&gg?FkMP3Uid9 zJO?{gJ%+Z>oio|w*NcsB#=p;HSIAI2xtGx}dwl<<{XXZ#Hfih>JEndWLBw>`mVLV5qNtJW{w1^5tZx>DF_0BK+ zqkLKiN#*xPe_Pp(4_C&l?{L1!CyE``ousGI3|wtzARd^Ga_P@cxpk8R0}+Mbptisc z47fv%I&tl+H-8^{RXI37HD%TLA`am)U!o686gvfi~CeKO#p!mOT#D2AFHgLi54wSo0A48Lb$G*ocm~e|6 z-X>E>r8NE(oL@C){Cx8>%9!-c`0`ieSHg1U;O{@#*y3bHtkp9;A%A2d!Rsc?&RKM7 zRWIV}8yH9D>qRR>Op!m*Xd=6A#$JtYQ<2_N`~&!kTE?v+`x+Hf!ZsggRM(5y98Hj_ z{^rR`fWBUDZ&~3z84FfbMw`3b;nJrIv7{FJb@XTb%NJFnZ^H`pe2$SD!ixu(pqC1Wy7uq8O0uB{B64FNl*Dted9rAeIu$$J3Uw zl^@hto*B~^j<#R-I2@`~c31AjnFuvMffCQc9e{nD> zL*>GN0kg^da%t1Pnbp12T~Gfea$m^WG$m!J++i!7Rqv2o;rKq-i_rF+!|D6E5qC_r_LbV&f)UpYvZ{xx*gy-_<#R~O$5mBI8=$7*XYW>wIbtVURUvOcmDVd#tV z2TTBd9RhZV&E49SWIYT+>E1hTl|YLdgA@;cqgKwf`6aP0NO8_Ya!cfY)cLS!&~sYR zskdkT{w{)HPFRH@LkXFQk-ItHc@f(CzN#CrSI*~&BM|F(L(CCF^Fr!6WT%Q6iGXFH zz@Y9l`#=lpx#fNcd({VJi2|veY$J^n32}u*^33(UT2hn3^hp%Py2zZULi4UZOZHe* zPThmV#-O`?6T|LE<2181eFns^S4wl@r}o#eTH9PW(E-v5hLX#*gyAAA@hnCq=az??(5QMB!h1~>5)=B@(M|;mi!=z43<8R9_vdlvq@9Us0vMXb{ zBGG1LIV{mR#okG?EPU|vkEIAM3mrDu%_&L~|5R@mm6Mx>`s6RWdjRL1`ZPC~H|BB6 zOO@{xk~>j`w_a;0)?Z0TP~xqd%_9f94A@O_gTs#3nGBFO$!v2WP%k^)^t za0{#zSqF=QiYS;b_@iK62lT^yJRTtV9?PL|P|TiMEkb&Z)_Yln6H$FEy{UFaR6rCx zhI6bzjH)1O+yXWY zR)?%;XZxV`EP_LF*;3C2GM%4^QZ%b!x?A2;RNf0WQmbgxb^|g>lB9$_QRFxz*uo~feBYcJ7kw;acXmlYD0}jkK%N$!w_rQ3X|peg?h_bfXI@6z9F}Lx*fBz4q0ES z-rA59`JgDVG3%u?9h!bmCWDQpF-`)@YfV_~KA#kO(;;3lY~r&f`ZB&=O9&2MXEwp* z;lv5FII04`pTE3Le7u9jiIMDyjEv1`At3eeriC}BS1RUK+X z2MQNcO?yk=el?VGFkK(q^vfH};zLWDHcBq6Xsg+BAdwgoUg zUl#YV(i3DGxu2-(+zY)aKS zVxQh~x-gpT)XYCe>J}5!@tH(fjwDokVPcU+<2oL6A==-jgR_F7W>uBU};e4T~uZ$m?cQ@2dDHS^Y4B;lCQsuhn ziBr1terLX}Os36j^2%5RD<&V>2#4vn^Occ0X%PZda3bg#i2l{H+o=7SwLB>96;1^M z=TZ4scll)nieLH;WmoA6=D2`YO$j3pdRo7HKHVUzE7Hsc$OWIX;Dh7wcO*M??l9J0 z*ZqZ={{X+j^S;B=&LuScTIr`ISn<1>P!95<>45|kEt-8m_vn80j0PX8sNpa(>gwVb zu0&Nv{met*2Sfw023w0s%JUQd0qP_IW6~MGqn%`1YtlObEPrtc)6`SgS|ff~BzE;F z?Yd%dv#9MouxQILHJT`c`^y!_(vfWi4yKW`?ExloIAZ!Lv&M}Ys*+#r^m)bq4EcN| z>b_D`O~PwH!bD@ZJCoz+ch*nmmyb<%)UTyg)I+vkFhPa~t^qw&EBlfhGzSt#w-t48 zoDvCcO&kb;5k7`~y8vNufUOKSDOCkw(C8r8Bfn}Z{Z_C%zZ3+VprsyQ4Srf&TzX7| zdu9}0o3qveZR3K9p`IfAkyx`bjNd@QGfrlNq7SFvnSq6|;P?{cEM^<}?R3N`6;_!F zvadS7s?y2+>#rSbKnhiU*jffkN&)Ly~*n2zx(Yop9l0tM_4O4m~w3J>tMxe0Cc zga{Y|*rJ=1rqBB&4i9Z>czWV<9(og>A;gJ6`;f}7kX_sx)30+a(|spl-Qh;34}Qui#DGV`Cv;EUDYDz>{#$4*PCL_SYT+Ht_5`>%uy1P`{`xV* z6#`2`Vx)Vg@P&Y3Y+N7jxw@`9WZCRlf`}f&4vU9mV}|pR={U<0kE1D0O7uZ@<3>dq zu>=Z=!UaTSEX28UKB!Rt)4{A$4KXky@}m@AuV!xdyMZLm&S$omMf^$*rvcXHMsUWm zCk$FM?e=CfyzMu(1(jyZXwEPiI#bKXzWy6Tn79Bws6GM8I8}{$H=AKIb!_h_B^Fu| zZjlnDEIW2$wvCDENNX@ZJCC$aLP`Zh}o6li2Ldg_kS{+?t@;#poTD4b7l6X0Q zLPt$$Bn1E*ERY3S7%>>*lV)Iw?kHcBJ>EdzTjp?DRFlGMw#Mx>BvW>^Pvj!`0-g3- z2tJ+ah_K~Sz%4G}oW9U?XgCL*R;K9DZuT^eTtmr)6vnJ2iV_BdH)VbafV?@wT92rz z7E{AV9=-mOGy6&0yiUSJCVRoI$_^qsu1Wd_=u{k8UE)Itn?i&PV5lvMcjaLa z=c7Yb2Hh-|j}EU&;I?QG)f&i2V?9rj@u#aMoK`1C#rz<-KOT`ASov;Ld z2$ei=!R<}q{{!rC<-+OV z9|@Dd)Ibo>XiDYK&d3Fblt(2RC}056i$qvrS>(+Q9b>RDe|L6!>CW;DrYWS2w9zxl zGqNqnOYG6s``1*TeA5AD(JqBTph1!X*HkdzXho|QH7+`nAb`_ly&@25 zaeos&)N4t7ch2Tt-wv~sXKRd^YL9X?Ec+strdJ_Z z%Y~n%)Dj@0jsjs!f#pxxQ9CA67y{QB%$!kBb{ZQj!8#o10czSlf0U_V8ybOSCM@6rtY{ldXD|`6V9u^~H*hFK;@K7zGvn_35rrE&%5dA6Mif?4AtA=H6 z%TPN`^lI_O%4MTs?45q`Al$zEl@6(KPKzhLCQxQq0VZKfbm1qHEwTr`h}Vm0)!!M^ zs7YX+nU^}Govx%ZYJeD|-iQ~-@MY#~t*AcGH5q%-Ra$CL;LmLG(pD^6hYWpaj!ef< z)@BekFDoY7t6WJ#T(NvH3-Izs#2fIf*+Nvh>;4Cg~*7B<|+%Gz7$+vI*QspXu#mlaBl!I+G9 z2)p{(uTM?~N%I?2Ez?z?mpT{$d8pTD<}~;Y{GhUCjt&VYCaDY{I+VtsOR=cE z7=~6~Ke0Kd`U(KFTBWO0dBCrxRUiTI9Wwej>1NE&z;B+-U)SXYzM=~|vL-TAqLg(l zj8}=rL?Hs}s<{Oi_!4|4(&!J}s2Y}kYvC|e`PXSU)RQX*C9oH-Hk2KuQ$a7=0IQ=A zqX3a1TOvy}dnIuYjn;5BjjwV1QMVgs&6*jcXYvjuj#fLj#z;=oH2cuh?R)L+P!)C{ zpCx5{b;Vo+=DPWRfO&7Xp9NfyGrA&!biyVaJ1XWD4*TAI*)Zs_ZtgAl#s|FB8pf|z zdZKFd?Hh5(KNsfLzL%hTPl|dd0@jJa1BW9`ISo+*$?0Ly=vId2?-gpdrXHNKuTzHX zGug%|=lDTah3E`5;(FbXb}Z{ngErt0hHPJ*BBrQ}bG57lfz`%rN=^BD(?Em~wmE~- z-{x?OT(HA0_1`jXww~>N3EqI_U77+sUhuxJv|Yl2BpkZ4w~A;YF{3M8h=F^2=R}QaqzuV4qOrpP*9;+wPG$7k(m0Wm zy}v{Dd4MZz0hqDm8w!-QYSd#aHZv-BD5b3i!+T0iI#+boOU>yv&z}QpyZc?^U0J_V zLqRHAY)&axrA4@fF>UrgIB2`nc{ea4LP5i?5m*vCJsmqRYwSX%hn3x-7y#$n#*~gC zQj1s{ej`3%9#`l6g#_g*oKU5r}j6Hfc`N)u?$2K*J0&~jycu9xZh`8L z{7G&RkIZO~h-{posW(pG>}}pelR5D29Hc8SH^1O!$12ZyYdchYmetU)0IT%HHZ}KC zny}^)J=8TKQOm;|KbI$bOHd4PVDgPU> z{n}(Xzqv&adDa+4s7^3>yr%z%uNZfVPaJ~}pZw-9z-S&?N5 z$Eylqrt_Bre@D@x>GnSCO(*`IigIq<9xiL2Mn7OIruD+Q0y`$d_2}u9VJbRbPx$es zh3tar{(>zo5ZnH?NABk^&yDria#l4B`8uN8(}hi6U~aKw7}efl7O*j1%`eRk_xo^d z6MQZ2uv)SJvKmUZwUvi~wC-H4SgXqwP zJ*7w?22i|d727jVsbUE&#m1ZyFec^og}96DJTnoJ!-nxWjiNN*x8Tl{*f zGbI{yZ{80^eK#-;AI4smVbwqsb=O;@5+N#BEqH4_wK6}XCP zXRVVy~O>1=R%xmKku{gijgG)@cy$L z<+RJvOKvwWvvWlp6;UQqXV_oR?=LM|MSaBeKp9&;ElGEC-~Bu%wG!q$ z4Qw8#pt%J*i)v-Y16|tNb0L(G)NH1I@buz2T@>|9Et2<%%Jx`&$$h28ja=(H4r9Qx zkQ~26@?j7YN+w7-Z-^>~0^0fft#Cg1RO&^rA`D~QZC5V=K)UWKJQBR|^84kcRc=2f zVkGI!qyF~0isYE*O1}pQM*AmI3O6ovF;)}PlP*nbnQ*e$)+VH&)ff!cFsarAj$3HW ze)&^GDY|l5m5au}fal&ARql%Z~!{fkZBuyrQarJxe1k|Egi0Jn*F}TuoljYy( zcN;LcP0Jd`9ZX$L2@a`W8aZ#$ch5$*FdrdL2&>r+3du08q(tTr8NN z<1k_)lM?|K^x+6>zhXN%E%LZhBm7-Z66`8I@53o0Tt~x;3RmJ#s4NN9gB%;uP zdYT?^00}Wd*j;-SsILMk*%+1rX2u!@Ws5c(SJFOS$9lp`X=c@0nxkl6CpU{;&LBajlIYKqCLacRN1^i4m6!BtKF$lkff5vtCNCFV{y3NpoqUY_5; ztpX_j{?FO7M0BWp8Y)|s+e(&~c6DEQh-NRk>6|IZIDvh19Sk-z6aF&Hv#vPI^U5AO!@ad}b%>+2s_;rLpB_iuTs`<;*Ud?DAi<48BX%2eOa zg7MSwhKzP;=uuvQfcd@k7L#a_Pt(;Dau89<)O5STuj$F5@nre(8aP= z znaOWlKHOE`w_F~-cc0pzq@P`RX?Yaey6Q%#HR!N1I;dp~!_Tm>x~SXNr%}JB^C7*A zwbTT$0K!2)2pAL+U{N#d7vcNM00B!!``PJefAc<+4;@$;-Cey^VaS>B#Kh1>im2W^ zpFvdCmN=zO@v(X(woITGI zS19jR02oNOI4>8>LK+p>@|=%5)*w(N=wP&A>wpL%LMjy^w~i||UIr0^YB>}*0Odhf zej^>i8!>@D08a24Vcqt0AUvx*PBne%5*aSuv-XA0It2QrryR*q6Tw%YTGuKUivo&ph9ob9TQu;u@J^vJp~2#;g?blqA~_U74TTpGZ;Bdza3H|=0nLM|^dbs@hTZ7iUOuBR*sJGClD zyA1(wIUnMS>uteBM&8EMKZR}0@5fs|>A|V0YES28WOS>pgP)O^Fvz=~q&JPCIT7T@ z+$(F=-{kWiE)@(|?X^9D5wuH3ZVY^VyEhUpmV4W~FN?5EYYU)dMi>a{a;`*Kj{#=5 zuCHC-b^v!%LB7Bff9}(ZC9^-Fwxbt=MyW{wbr0u3++6VKZ{jM;jM&U$nR|7OtjAX6 zv+TW61nB7+J87wkf|;DuIqoscQe_5!6&L4r=y$DTZ1rpIkzcNvngTcJeWFj~4>;Uw zpTR|c&@mgl(>G_;BpknWEIz7=B(3EAR@*+TAlxjCsTIR7mIXQs$6py(s}F;vI^fbn zBMhoZ&ARYCF3oLH(Ow`hL|pi+&YVE1)3MEFLxr-!PYJ+vor_02Kp-jIW8HprweXNn z;Ez0no}{zLW)2?JLy3WY&gG#|rW*wDV zHihfd?@i5?$!tS=#5I7nbq;zKDbxrDa)tZ8r*9W9BZ7bES)^u{WoR^DI6Rd`r>0My zDQJCzX0`$Kp}=OC|6Fisu`tv#a9@gU+EgAebkKfLaCJq~-P3B`w^wNU_~Cr z=gPh>{1@G^t&VNmHahCq9ox38j?uBX<8-W!ZL4GM*vb3u&$-`Qbx+m#;I7C)N`i?iN)EKl}1t5#Wd?=P=M^-m^I^LlmdQY=xb3)>cyYlFG2Y6O3D%cqedf)`cg7(0K<)YpNqE!7@% zv+FOmU6z4?#)ln1SDp7W`b|}xF@$3#jQFa=igV6wNY}bAIJn}L&6FiJsuZ7`Z><A`c9aX0B>(cQG8!<_8}W~o};IS>(cff@heJc6W1WTchz8PB0%H_W#U z)yrgLmRDVr4FV)~aYtIgCvuk;_Qy4krF^|lZZw3O=6ROdk23-(hD>t(cVedr?L3~; zG4l>UFXa-1QO7S$UEDA38G83$797yJByQzs?P>k%LH%__KU|hV(d1v)h@l(-(Ez~m zF#Xy^U3Ckkf=c;^s*q+hPrEuB&rVv9UXhZt6{i$=JDzLrHp@!RmY_l4BL8`1$w z&d;3=`>y3s8#XNk7M0tb<0gNoY9U}SHSjY2*u&INwc3F9Dg4y+bNJSEkk#A&)3QAL zvgKj4_zgG5S>yI|frO^LW@C=VRx~PzJ^2j~M}7-gkD`>>uV$K^B{Mx7L3jv~cKo+J z&6|_%Iy#`I4vsg$PS6?F;B@h}irW5ym}L0aA~q7LpwfYWCQjCjG{Iiwe|Z&9kF9egwI6 zMI@swSx@>6h=nS1PpiIQ0IF$Su2C;;HVqwHOifZ{pz8c4x^)#N9~LCB;~1ix>}Etk z40MHyiYz&btd?orT%GEPcVs@*YBJ-c^a`R*`8-tWrm%=rNgqNIjt=+FHyRFqCO!1v zO;!^`;$j+XNk>(W3|3vdeq@w{eOyc5iC8$MA5r@szvDR5O;6{@skl;wV^!FRCuEw3 z!`t2x0nLiP_eD4!54tG0Q0N*A^T&cLgA;tj8=8mxUGGPxnOA1GnOX2K2d8({NDf)p z{@Bm3)Vk7=9H+~=g|(?vg=@0K<^0su|JyeTn%U2r3P|HD)%6v& zz;^qV&X@SuF79;XU~y9P_;WJ1{!>~1)PjE*OiYNa!M^GZ_C#>saUe+zm3icJcjsb%8kb!BrFLz#}O)7!o)L|#`vgmg@-ETg~aIYj~xkndRq%Hkys?1Ypn^z zJz}nQF;uh_*reEZMu7pZ$t7&RRfG&5xprj0?fr(N0zBvKR)TOU2|s7FECn>K6lW`Se}Mo(bf%~Pmp&{ z0HyAUfYDNm%4> zY$gL?u;|%uH@AT2&4pN_xSKiN;po?@oJpjQD#!uuD_+@F(}nEm&H>+_Kf84+LYD{y z2p=IUVKuFVbr>Z@j5BmDlVUHyNI9y4x{87)tI1OuHsHP>#z!^e@*q+Gc`#`Zlwe*% zOBLt>^mN>Jwm0-202`ko)#HuwKL9*#i|2=y0*orka^ZD@`qQ9M{g59qcfH+0(a_AF z<;0qWJayrkfK75CsWHOF?t;6wqwQPjS=NH4r`1lA@#bM~T6fk@a(^ zH(jRt`#B%MY*U4+Go!;qE~fnjbSfeAO4PhC>UbRZOtUzdia!VvdK*eR@2&XzF!;WB z8#s*~QOEHgfK>VQV>gi&Y_jqTcgNR$d+&>FKM1dE>z99oHBBk^ojH66Be&q0pBDd<{0=fO{2Y*cMD(@~X;3_(v zzHj!C5O{l9)8)|TxR_kd1L!EhJdBlsGfnzFV%0l-fSm=Q7BN)Rbbxe9ozVxD2WIrn ztxTtAfBNuyl>b{erteS=KkDUR&SYZ9<6q1K+n9Ca__*Ss6Vk+%WxS;IFOJp6+TwYB zDFR*T;mhoaG4pO?^$<$^6(yVmY7l&k76m`$g4@ISOYZ-s7XR1qg7FJVZs*n^5{M;4 z)HZw}()#_rc{2VyLgRq`_q)f$tC$DuH-x6ElO!k}*p-Yl2F+qw*b*)W8({PduN3U& zat_oxG`++TqwX*vrU>Rs36;y`HPHPO4^@TValC5C0NWOqDNF{F)YwO{9)%YKQ`eUc zQ1$WSG|i|1GkHVoA9c2IcMHL8R&E`zt9#i*7w+Z@U|mA}ts!I?=Ms#KO)^@)hqe(rQC zSG^nwJu7g2g4`{Za%aRj|H9Y7b*C_23x$RTZX*6g#mN@nU^I2s>(N%9X%!q%4IT%Q7DM(3I4AXH(DvrAaT1ff-O zv{@iGhV9RQb^txu`M=B16_gMq^*6}(1XC~I0CA4*WAlBF_^ke%YKSfJbW29DnF?Za zq`D)aMwSNZvuGm42y(OtmW0B|asU7n000NkNN-{|hZv%k##<6v2puOu#NG9p$G5fX zI9(_j^U(r*^sJ@}EQcWd*Vmp6#gq)M<0NLvJK10v3XEVUgVeES=^7Rz*wXzUg>qo;#$r} zU<*JM4gl9!J|>1vvxiYWB?Rs!_iF!d8rs{t*`80-&P@Um;WVq8hk{!0%qI7aK16xK zAL1&SoT9v)agD5R{c8e4#|KUcW2<1t@Y`z(sX&H{I&FbI(W5-_2In5&0dSLh?4tz9 z12o#*O-l!WW?rWr%{QHK;zBauMi%?bPHlH0A;o3+9%lkgsXrl39qWWZ`XZry9;0B) zBD|=dx};Fv&NtmjteXPOe5`Fa<5t*iwi6CCzmDx$-T*<6G&lQ8P;_bz9uu zyBSEnLhRZkKB9r3p^aaL^u(w4xu>s4&EvZ>gB%u}%r6t%2~25%*~`PLy#!pouQ0${ zrIhh&82+FETJBj6Uy$Ux%aAtOOt$v0J z%YtfS*i1;l=NI)bxg$HPWuI`4ccu+96Ok?{N}O!d7Xj^BE14u{RSKD=ezjZU<9X_< znrBI&f5&yOjnTfXUwdre+uRudLkT?IZZCdeOaSuTiIjw%lNFF3VL5^U~Tvh zlU|!{g!{P6eDROb*@7`sjjjv9Iaeb@|5$uuLAp}aIw$q&#H8&H8I<6B3_7!5002OT z46uhw3eS-9g>#12KEa1lvR^`%DC>YE62pBBBHmF3;W^E2b`U_DkplX|sbWrqs*F`& zDFiA{;PpFlG&$Ps_OjHx^T>5@70o92Gn{e2Fp1znB~mdrSOF(-qaZ|JD8w8a zGPD9FB|^9iR`^*ZGB|4B@dNAGUgv4G6T^u~{n$s!+(6mE$il2{2l?(6+H;wrKalP} zz$Aji8%`SJ8X2IlUlz$K1>kV-xRtP*z+JA3i1rImP-YsTwXCT*QLklUk2rGl61?jl z(L>U9rnJXR*KmEZz=j?DV3Yn<0zL15z2^@AAOoPAt{OA`p`-5#zGFyNZyM{g0xx{t zB+ec~dIE$Dd>@1z6x#mTO(e*i34TVK?aEu^FV~p~M6X2*)~L$=Wvb4t%F9N(U9Z^M zYd${G39`et7x+!$Sxpk4F@xs>+uQsw)(JyXO$_=D@3bzH!f0E+${W@FK}~o_{$g8q zU3RM5*ty}0+=kccN*{)?PenFjwWX zHqg6+E+5uNA&^BBi9nGAJUpWP5)2mBlT8vZ0+MyoLH8OY9qV{md3DA073i+A=#t*- z7pLU}!*bklWthu!g05*gLo}c~tOaW|lsYk6^u_9tT)dh65$SCHgk3g3&izivfhzmA zQd6+!&E$b2Y;y6+x&(W)K560WQC&{FWSO+3tbW<3I=rZP3Oc}Zs`w0&(W{Y&#Y^5e zF1^~A3!D1PbN_0YOZO57OEEKBibGUkHT%UI7%p*pY9eA0h0!Zr*?v|R?3sPj zhr-a*l?S+6f89G*ZwtWKyOTcoAU=~!!RIXl$!iBbBs!m1AXZb`TJI%MkL!%4JI>+K z-i=*fn7TrvXnX&4vMvSb-(ZYHK$+uF)~`D{F}%?Y@m zBS!6WG$XF}?X*Ev*Gy?Rc77?CE5@~T^Ta-Oqi#UX;*U_vY_^?o!`fKS*J7-^XD@|w z1^4+ve+9>o(~@~DW?Q8uCG0ojzs_jR&5TE_3<TAiRM%3Y_juO7wvM3_RN!wi zi6ftX@x&rJP}J(HVy(9pu&$;pFWC>vV_a9~Km#D)S?$VA)&t`BfYM&miWRhksBlq+GqAtJ`??}ABT@^4Ub{R0`}V#`vyQL=V?cFjZRR13 znSO?epV1ehW(iQQPs5xl5{ch0IBx3c)QOR zd7TTCso7&^suWtg$a=8CL5G8oZrnj;w>u2J#7B8yhiQRTRpqA>s7hfSEgc++j>*z_ z`VY{HjXSD+E|H>Q^Q&(w+T3(QOH+rUswBb-lj8UY>Q9tY;zZR*#0#PI73Kw^pa$S(=V^8bf#zzqfatro$I%U{m(Ra(`IArl1$ahv*^x8 zx#x&d^7H_t(`JyfkxZ@Lx4gc+&jk-AGi!|@J$d(VfZnb_A|8EQ-kdxHY7o}_d39XIZ--S|Z zGmm0{RO5X$9ei#6y+}K=w8?~f<7TmWV3~R))m|tm59&%~@I)N4u;L_WH4l>4Ae3B2 zZH`@xZ5wC5BOlV8LG9>l03lWIm>fIHNYt;UE|%PXx`;?h%_$qAfR;6zi`!&Haki?Q zUx57HMgu?Klzcx6FAQyu(Gd!#}cr?c8)k#t%J_lnCh&z<_v7#J22@gsWb0>ra@ zgN8pzx>-1P2MSs$O6#kfdiwfEFs~pctpPy(=)2lsD9`tkPCMM4uB4SiMWIvHUYn9? zq#ygR{3jnzdp~Wj3Uv9>zQ>E8O;4P!(6sr>$51dq^7pH-^6J<`}yft@Hb8CVOM|fDXcR`Q<1S@ z_>}jLE97|az_h#DJ$SA=?G93OXB=r)uI9(f!VFbvGb;7NEw!kIf3A&z%Vl%#V|hsn zJ5szQ2+q(oZ1FegYUffPog&*~?@Ufbyh7`dXd>|9kqPQ#__#w}B;uyt3G9+zH>RwB zZM$aaeCwXt;7{s-uTNRIgqyi$Ny&5f-196llEQ&~mC3E=Vs}Jfj>JCV_*o-9P2YvO zEfz2qu#KH5(-XBKRekkr@@6#j%vT(z;iIusGQ4cI))%CZCihghCy#*cWd*O^+HtpP13WTAkGd*g*S)S9Kdcdp#vxc)WzGI`UTlQ z;jj!wsQ~5fa$k3&_V;CBa~+FQMxHK4>y3cp7dIe!wSv{YrTuEd%r@QyAeoZaOiL}phL z_mDcyT0x=}XtkkT3=jDsZax=!;Xfskd^9XKx%}9zQgP&R_m`V^5OmHU_Gc=c(BgvI zsKfS>WDelv-#Xzc8!@AY4|+agb34NH4T$hR2K>Yh7mG~n9@v~s5_}ixS-yMu9lJL0 z4R-N2WT!iO_Dp-$gX`?kXEl<|##8mQJ@nz4A*-4LAc`f(ctod)EO%j=w>$L6szNsB zy=P>KpPF#E_KhKh;abzIn~KxsTw6F0Rl2PzKcjdZvP~UH#o_x73Qv~d zHcXNnZ$1SS(wEwlA!2|z^L}2wc|ha`sV19be2A3|TZA-ukvIml7rGn^rLIsjwNtz0 zpMG`mQzb0wZ@yVcrMl2pM1*qb{16^MNq3$}#nn!6c4-RJKJN`mrtJi%a&C=C>kBU? z$gjF((=H7*t+J!DB-wvnT_~coHi1MxpM910>mDeKM)!oPhyf%G1*aFi zkuI!m+I-H^l>hSAsK3#AFtX1LOvwIeaDwVu?gNY5l@7IOzlWBZj6TW#FG5ws+qsKk z8YlD8{O+4Ey*;a+L9@)1EmuzPh+V5(>1Dhb2N=SxrnD-{iaIO-rsw!##J7) z)}oODm5ui@Wt(%90|iE{bi;kQdS%@+2qpy@ z2&yOdgFBWpbks8S!|H(bEMM(6C~MO7ZMDm`sEDwWo(0nY+_0pp*$Nep%?M7T0zN zp=gv(tTVJyMHqZnO-*fvMhb-p$q){i+9N~%`7-9EqC2@KtdH4o@_eb1|&ly*bAY^Af?tZ z5ksrzGe_W`^R8mL3GSm+AE{pdJ`f;;1vze4WG>bnCm%`)q&o6g8I94H8iQr@nCW^w zoCCp^03z5_9{ZtL-G=lr<14R47kANOV&d7CXph!}XHi<(e6Iw(>Ychyzh~T&(>@>v zTqPa;-O3@+%FoUw97KI(M&rPi^b`cMr-zHv{=nW(ONkk@eaw2qVq!XDD@k{hT9Q*? z)m#-?q$phGw>05t z1uMWv;qE@aH$<#JYgnIE0hvQbP%gnxYjmtzePo-hH6MseY^ow?K-ogSxkoa{Fz90f zsBo8WJ^Se@;3BT^x6N?zx5Uw}eGx0;w}!`nC_J!^6zwenV1g+4O$b51r#slgzr(gF z`7f4;nRiXz1Gz$78rsd z!&6uRte#gYe-`x_ZRqxpSe?5XwJyfr2-H;WbDdd!%4>90NynddNcV_@B z0I(-o!mPFN_90Oqfd|)XBBGQv=uBF=T|Vp3?D#rwDE$yoIWLj;KxWD_U2`20^%$!h z5pk^u)!nrbWM-s(mvUM0)OuKmf`#j5Omy7B;iWa#_GaaTY zqpke140qYGH!|~K}=t>PM zh-8KA;Ubxo;h6xo*$k!q4eE4seyM02_vxr46Ap$4DJ3S=g^Kq4QzKS8F-$HFeE4fh z5F)H7e|UxyyPVwHFWbc#HcJ{mqSnww)kFU_L{eXG0U{K0r5K^CRfod+m*`{A zf@It_fd!c-n+36UdyfVrdFvUbh_Nf!{)0r7y!3EabE~w3KqMt$77#@FzhLSRjHYKV zFN=n7h=Y5B+LJL+p&BYqGF;dYCWQiyvC^4pMPC|5;Z7(29~mms@cE%S1* zmKGm$c4@J#v8ILa?{9(;3Lc@Z=B|5{z8TG3bbv%{8pot^S-Zhh%Di3kXuGE|991t* zLVA~sSxx6NmmtW^zu<=Af=iH?wQU|79i}|V-Zl&!XV&9yx`(+Fh?F1kRGdc@#gN1@ z4R%CG4iSK(0ASWE1hla~AVl@IT_St>O0&11`3Tsj(~n$c!`y7HYv$96hriFO`m&s=LH*P`p3Gu85G|% zBbjq;yx#OD$cS@$Vh)niDv5lT>a%zJQpYHg9{<4VLGbDA>Epw~cBaVr>3%7g)B}bK zX#84xr^=FkuAzgRyiTM~lK+yIx}%lwE-J&Hkc6tz#zI9I^DZDt#hpTBRwbm4mM$KC z&Qv508wZ0;f{lI0>Ht6l@F8+$ZqGx@QijJ>H(eL$I(p1~97V*cnN@@wV6Rg;AISYK7jQ<*(ATl)Zk*@y|b^_t#s#vTwBwHCL#g0M5Ty4-hTFjG5u9w% zZ#zEzWF<}={iD;7p|Q9c^OFt!gD{#ghC}HfJCbKy`NU<~=^7L=>kgS>b`*gR< z>*#!B7-}^T(i0g2VtHqeQY(6P7o-Tz?%)dSdP^|AJUW=Rp0_qaP=h39bw6odG!RhP zL`~rp%wcU#(RsPSt7HQhlDBOD+H0gVk9`{c15{(YYQHd$cmypnqqkQ2bo>Y4f4bTe zA4EGGzg!RtLuQ^+4u8uAXT3*05!K5nZNH7b=*U8WHjR1nRsxW8)V<};sWANwwa~r4 zteIwUtm*xPwCX z!MFX++G#1|jH~xz$mg8pZ*x0h7dAkxG^l2Xky*```>8?z zONlSC1Mh?9!}tm+vQT4VF*b>K2bcr~ErQjk|3{SiOmUz$^$Z1<5-H9~Fzgx1Wnl2Z zmx|0&oNm?d!tAVjh7v2BkB$pfiPL0_nDWFRjkM>Hm;e$-W>U?dpk!~PYB}xHh3{kj z6##by{$|h*(52O${tY9;?q{R;!r4{uIJy?IT1-zd78B|W-F5glSk=a$$fldcHi zV*neMf=^!wt7V7S?|Yh{rBK-I&B7_hFS{=agL-6i35`D=XnO@P|9Ye9-{ZvO%+7VuEevwv%6X6QI>rG-q;I-3Cs*Iym{*N`F* zU+v=~7K2-GgCQ4g0M0nLia(Y#eL&HO=7|a|Y3j?P9=;2^8r<6}^M((NcLiZczIvUJ zxHfO|=t=bZKlDc+FCHIbVI&rscIPafo~Zv{Jzw+0LgI(+Qe|!!Q5Zz>6pVf{>6lPH zMD3$mKlwJ@VJ73!$eSoRMf$YE$k`|XWG0Xed}O4e!yy7ia9#l?r+-AEFPo}=?;0jb zgjTB0P2+_cEBUHB14wT432|nOirvJZLbWNpPAtb@pwJgWC;kxOpL)Y%7(KQpyB(Xg zQKLbn2b1@M2i36f*vrlK?#@ASMP0&u+6haS=b{w8jcKQCY+ff{<=V~~P`*|1ds6+g zSQ$itk2^-1kgWT+#3rM!`@e!}4k*Wy>@Fl73c@vGe|ZIgr7enmzCs||4lLa>qiX6c zz0nOKM-nH7pN2Qd#KsRK$c~(geopoWK6-hERJ>~n^6?lnzS+ihGqM1l__dA*>U#Gc+mFaHdVr|tJ?t#pM zl;;wk?xh?lhBb9Ov8ZS$l~~6e@i5zMD=xlY&n-(>xBQ47(QSlk4RV}*vknu>Okc`$ zf1=nvXl7K6bysk*h#Wy)=3}`FHMBw02MedJd_1rEO7N3yBxd5<6^D|@R$Whc>{Km& z5<84S`Uahgze%d~GwOlWMs6pW{ZhYidfBe2|Ba;1zBzMH7+W-t6Z{8Q7Ruzgn>QZ5jPg`Zi~R)p~gewJ^8959q1655)kE3v$_ZoFn*nM8CfY zrVGSyKr99rV+w&|v-AqTB?DzWV!wc~>t}yly1g58-M)v4 z)r*`=ojB_l_+>Ks7Eyiw;{^xcneoXm?Yfi0H1_ASHjUhc)d}LChrUyT-MVu(QAsbc zy#a9~1u2F~qzr2IC|{PL$0F3vl-2nNo_%!dgfFml_T0E=p+UCL+2Y{KlV&SR9LKbxp2=Zz#YIVMQjrye3`#_Zg&d_eaS&K0-9o zRP92xM_B~MKS`r1V1neAjcwcN>Ob(*elHrSWP?WKjWjU;jZWo*xuRz6N!?D& z*sj4^-Mrt-yH%k;1Fcu!-&>}l>ATV*YZw3XNNb@faJml_TPx29_@%T5oml8s`t)bF z;*(j6OrZ|iAq^4rprXJLd$Mt#bp4|sXUmb!a)q;kLnP|r9$B$vxSv@0OPRhVOLrL3!@u0B=Zci$i;*X#RaU4L@%%2idL7y zi&^XcRpW|x1{F&2YqM*J0sc_ja9W{^BtiEkX|mqK#v%!}+B7xt^hKVUV z#Gj;FWPd>*e+tGDlk2Ko+E8oy`#&eR=7K59>6|GSgCaad0>Cwvd9fzPBe83ZRJW-B zZ|w<%&nwH|WM+HWO~Wg7Kh&PQ?}!;+(}vc^D2oFfg4l}$gKpB0#;+Xz5l{~g3(0X{ zZd69AvnXVsJ}88Q*soUxS{yhkQnAbQVQEkAwRUSBCXxb(G4YdH=KjSxVI4g!{2)mtJVYZwVl`L& zaJ>a<@=~2K$|s4O>WBygbmxq^d!o1wo+?%Sj>GiFE&Vo1x|CEEKXx5=CZ+_Itebal2X3AA?{NNCS=i5nH)dXby-09EMmL1ST&_iW1q|CzTry8cwgomG z)fpYqWLyAq6P)0KQY``nrujHnl2C1%Iw-e<2Xo1om1Cw}uNf#Qw8L6uMX5FN1D+d= zTIoR~Z7!ITj*dRsCA3t5YsIK^&4fCcUNyaz;0kCCjW`se)YsuP0YqI zd?S`$y$iX_c3xjo%pQ9kL0EDCdenW2df8q4qZrX$Xjykiy2qJ!TBI3MB1!k|_uxSL zYtARjA5-ijyPzFNC$ap@3QK|FmEHE$_LGB3$62T4e^><%I$&*fL7S+ImeX5TAN*N? zhI%a@2L(0ResHG3ZoBvEZcCphh=01zVJ)@AZ<<9qza?XvFBx8f&bE}*n9%zPsKtmk zFKv6u93&jh3#%l&B~i6n4r zv2nn&eSJ({EddSaHSNVSD?3>{M;q$%VA`h4*U?8fD&&^M^ZLk}Sp-%OHDj1&dRe?j_3yOc{Vio4!yY_80iVgTWYjcH0+f# z(vbt$q;EY_or)Xi>=zBiZ@iJf;ScaqN`yNc{q27iGexqX?-CN9Yc8w!PJqbWvL7a> z64k||fbE{&#|%XiP6{-mCXx>OM=m1J%x8G(`^kK4eNk5hB4wK|O`p zCnX;=u+_`AIiTKM-~hsC<3P#X*8QWM`|G0j|JtU7PykrSHUVoPW%f$fPEK8uYue)irl4qYUsz7~LuU8?0EE@8l0SsQ7{xIVSEk5?FJ{TinACD@iJu7C zSV)_n?T3O|(Jt-5*`TbyjrG6fRGeAW&ghTFf#}U?f_hKOgNAmZ9W)reFr5IXGj>&~ zYf;Wx8v%+}6V{b2>)>!o@iPcWF*1`b)+nCll)8Jnbj!_Vd&ac@U^}9_3@)z>c#k%+ zHgE|jX#hMbqLHC(l0{z}{-Ld_Z4k1Vfd39M_?;^xU7edHdm{5;Mf2D9D!a|T*mh_dq8-gD-cz*jqZE0M zr(Zs_&cz}Bjh+#|D@`g(94oyiPf~m2zV-fY0GkU70~?UzDExH54L0ieB!#B6jc!7A zKNR^}I*$`%Ye14&&KJOkeH)}<9}VB!)96Ta+Kc25Nn%4ZsM78nc!7=5XX;?6uCc8N zL^W=?!1Hi0ihx*SBSyxe4_qyDA`1%!5CY)(jqA4axC`b$Woc_Bhr4wJ>nUhQW>-K3 z=%+P*ykbp*2Pbionye)?xADkOgTABTdWwDKoPm5(*<(@J)F_H4%&~Az_zwl171K4C zUwE^P-k^@29MR+RiJd}=kVGR+#6(dUhH-x<&}l!qoybq*v99LUd(3$>xW|wU1KT{* zSTme~4F-93CUkuEOlYX!Yl+v?jP}}}^;6>i{sSa{y=dk#y|4TW?Ts%rC;|TMnMA3$ za9S?d&ekYGSvZaEMb>xDiK8f06e~2nVbgQhXhlq@I=fwUu8Eht0~P%G1YG#Se!^=V zmq&`=={!m=jKWI^BlKgKKUUCExJR{H(qCe`xgt{I&I3pObQ5UT3h%#h{t^f_r$FbMZ^$URu7xA{ z^Gy#b$Snuz!0q!DT1@$0`x5q>iEgR?g4eA}YH)rWv%M__>i4aa@;kk{GX0{NbhzYf zMQS~1Pgf*i^gNAe7r@rt03BeY5;7RlTVE4?K>Jblw`SUS4Cayvf@XC8fqXTHg=r=A zqMQj69OExRp~B{vag7nteiOB{oa=w9ZUuO%t4*ta9jFhc(aDWv(^@mS)bNvU(Qch{ zl`nTP`=~PC=0KF%;?y2$rpy~u$OBqozA>wBqyh75KreN&wc3PT+Y7l0b(O4}>=0n* zUbs=ULT?hp2Jek-B~y^HACjgR3_5<&T677Oha##os(GcP>^tz#gf+^?^|j!w+K8yxDiyp)nf;vOT z_M+*@mEhgB^mE4&1oLAl2}~b}Z^;{tnQgk1lR%qU5y9bVpN zj_Sl3rMnxEVQ@`e^c(y9R`jRh5n8p%Q9(u!8e@WqLq=HvP2qj#)?8G4#L*D2;(^Sv zQ{65LQ+VEarKk4m(I7M4qmw|ck#WW{?kb`ML6TCf1`MFzyk+m|*_H=XiVVtmBmFq9 za^X5zr9rwv=R#S!aIFpt%%FLn*34ea8)esmM#iG5< z7Y8i<7@jpraNpER0hOW%E^lFCD?Sr6MpbeNpF$sXSEAGs(DbxHuxobpmPY$jqUUEk zZ#e%4ID6{q_a7>)`<`r_ROm7|_B(T$^n1x=j>l!6`EuiyNY61l;ple{`Cgk1KQT-X z;n5TyqFe}k7bK}0<~BMB{l@TBhUVM}0yi613jD-97=3ZSV#&qET1`05bS z7h+qDt_>7XVfRV_ybAvYu$%q^tO{9v6{7$4)cW2(Ok5z_G{Eb+!aQcc9VH@2$Ctsj zWEA)bE=cF@obGS=P2D(5L28!MmR;v4{t68CkXl>n7?tzIJAvreV9T%busZ;^I&9hy zLZclZ?8?Ii13(FeTD$23p9O~tv#bFW11kUHA)l5 z90>!032l@KxFCbYbQTqi9!kNkU@>GE5_lJtn~Y-3Q!no!haUq=pNz8~gFga8pvWpw z6jVU2%ijVtN_#=K-q~=eugZQN_je!fZXX(V`8{XUfe%DM2%S!c!tUMnf&|!p-htD* z0Cn&V+=b=SD&qay8TY0CK3u+6=++F(^vV~4*R(r^l|i&FK%3gS&bt{eFsEg&^)9d{ zsdDlf_0pHG=t4+oF$4!TDh3(Q484OW5!(O_n9l>hn`;;w*~IemLuAKZtd;+H$(W!t zU%aznAruuKocW{eo{MG`f_SeUuU@}6Bxec=X|cS^4vlHbI4pH^5rZ`JwT+=j=RR!) zyUkh5?YqP>;Oz; zb_>|}t81i%vz6%zz~Z1BfAvk7X>RpKrkmf>Xe4wDR^sUN1CXOvHp>fsMY7RvZ#YP- zIL|q?3AT5WBvawlNlvIKT=W;P(C1|T&R9GfJ0?jOC))np^!%@>gWNDcHlzIypx4~G zz1%rpm4*muRmo$6pOMynvWPW&?mK65We>@v=7+-^BMs!1gm&1gB6-A=xuH-h{24#( zbfDiaPbH|Tc$G%*HwIyLe5qtz>WbMbwz%{-1whQ!r|nraQJD%$bn+~X@IpW+V1 zPH1LQQ^&wP)nm?hgA%*_=-RW&ShDPjtD(mueMx)mlvBhj4st7Gkh%+1O&KAzl!3l% z+dl)BWEMu)a>wh=pJFa!uwYXWI_{JpCA15wf7@;6DC5E^g#Wx7`#Nb@g3I1xru%K} z3jjhK%%lB_!YE0_y*Bl6eQx~FHDTWu46rwO_?pYs*(`$jh6*x;`ckzlf39ZG7lFWj zY^JO5sq112thn4&EZ?l_d?U+}E0%ed4$gt!pw8~3*4hyj8=9kA1%HNKn9FEvT1OBo z$+MfUzfy=lYalbF`T4?Q6>?A&g*zKvPuSR|x=ZF{*fK|EQZjNAU|l9Xm}>Mv!6f|z zx!w#id{+T>%laq1S#C3(0#};bxdCiwt43#V05Sj!**77?8@J%wHCZt<%6|M2Xw9{|BV;T3^=*cpBzxes{t13!U_QqdM#o|Ju#vL>fsi|+-iH@v z--E7_a8|yFspFMsOvyW>j-MszahY~9BBa0`(X0i!kSyKyRnD|B$S$1>O&E0e4?}gn z*4Ulb)PrrbH{&hMg|1?n0N4yk5_Y8%$VM?ORC#*9tGEiGmTf41NYokJ5spN{_ih{- zWmFt0d*{m}jRRlE@zHeP{6Yri-3JiX=FJ|##Zl87xa_SJp%6^#t@WlT`!#zz?Gb1y z<@Ukga`;kKPQbNv^m(6tABTJ$r<5AO@q0HpXKP^DTMkmwb;3 zPsXogSJodgPB3ai*nvqJOUc=W<|)W@sL@c+kVoL#{{gQ&bGrlby-m`a_SYi8B_X4@ zUh@`o{w)X+#ooqg#UzEZOJ4Ptl{oj}%_Odt#`Dv3yqf#np{v*bEz<|56OO!JuG;0! zm$_1j^4=ST5(w}@V4EF}tL(Jp^T^|_UsfHDQzhDtoWWbSjI>ZP9&&h~axYpx|0o{f zV~br26oyfc!tr@_SIqb!8|!;NDmfzi`#IiV^)Fq?7ico?d3|LQWd*9E*zaXJpE@oqT6yK0shdI>rB$=rLj^ z>f5$ty%U{Tq6E2gD=5IC^pCrA8;<843i{8Q$VEYd#=Mr{_?19b1Ev5bjkv_!?ad!; zF95BC=m^I#btW)QYXpPh(&pLoe{ahdetTkBeNryUFr0#*RetQ%_lU^26Kvnvf=CpE zz}(kbjNTH6UFMLq#r3Ibw4ea$7%iRk7bE7xPk|)loXn=PZP>XP-jTC?QX0?|ci#23 zUV3W;cbJT#Jw{~^Milz`PLb4DVtANXQHX`mXokTlwCH80ZECe|5}CVFfmss`iL$%l z^dR?-LJH|<+;jDu?&p~|a$9~K!ezvlc=Gn?UaupeM$zx@;*Inq&H*f}Aq$L&GL?k% zv+j`hx8n~kISYcx#*2yMPN}W#jR-*z@K*_p3Ukn`I_Zu>%Wu;LOc@#H6LVzJ0Kzig zQ078MSyChzrWcU*f-dmh#qY&@WmUR2p>Si@AmTHiED6Q>}WmX)-!3m1BITL`i;t{qZgj6>RXwF z$Ont-=MFB#M%{L0;MsW@Np8$Z-!G>59xGFPM8_A)D)>N0v0+wgMRrfQXj&hl;WLvl6NxIfB4aiB2@K)%DZ0X;kJh-X9ZC>|jxqx?tXY>5pnRmC+ybq?XMU{JTON_J^fU$0 z&pP%78W%VQMMAfKm#txy6NINtDWmDyA|{t1eopmHEq^wcj(_4t6&zP^ACH|^?{H4n z4jErXMfo6q+bc(p4FJq-cMMOadnT|H+cNKRL7y?B0)wQ>L(8H4Gho z*I4sb{ZPI@oHwnW{P!s4r9Pec)PJ?e)tU+6*VKE8=`e7H>SR47wj3XFErRZ6{SPvq z6D`BH#)BdjZ!?0P4#v*pH7k2ig5=j>kLIAWqyY8i3ae}|3fu%*AKLdwzUu!ES??TN zN!YdT?qtWFOl)&v+fF97ZQFJ-!Nj(0+jcUs&51c@zwh_`P95y(s$Kn0S65f}vz}V_ zz3$7j1!HiI5r8RdzHV-@WAo<>Z=!qbDabh^fc3tr5T3)jmsS%oCD=H`uH+##eg3Qt zkEDMs?8DtPHe9I=a9{#kqIfr*_0i!Q!|Hy`yt5CUjubCV%Vu*SwM^8Zq`rcG9YBH^ zyXt!0u1@=r{qF1S=O4CN-E#bQo$h}8D7L_%4`_gZ7$Bbund52J(|BvgavFMovm&jZ z@A|u|-x*!A4iw=w>nQ;pEpR~v{}u})vY*OlK&;Yx%-T&9_Lj{{U69E&3Dvnj4?M6=C01zj&c z*jg>s|K9tta-G@R*Y}54X*fN35`m_@n}uK!V*!?m1@kT|4->kqr6FSGHPdtqJ8eHP zSPIdU>^#gRllSG&1o4fhnxzYb5m1canC{O`f6PYB1obK70Dmajg3kdYTKRQf(5dw! z^}9+ACv4Y06SVzqU+*m4jSz#!_*qf$GWe!$shqn}d6Ezxq?2*91npL3(U_Zd<_kp2 zpY~jyAQOt^khh;SDo9EA;>ZxSodhZ%@fBi`q}aAsGvfa4qN&fM5#sI47oKUMF^cnt zz@^;y66h7KJcMBAgTQ)7n zqf^1qkW(o}D*Qv=I)0d1_p035?X+*a4%Rw}EpR0H?rZ z!S-D#XlK2Tq9&KP_*Bf?bMO@xlX&{Lq_sT<-cf5SgiIUz-GN!KEBeB}OxZh_;azPX zLO06cb_AqT8NgV9u5BbzpM^&#)~)v+ZUDMX`uRm8n6ktZggSG z&Q|`SephEPpqEs-S*G~2eF-V{%j9-Aj$#N`g}?`GCqcrr>riGEYwD(#)OT2y#5Sj- zaST=-s|%vLPU#06!cYlNtm!CtpN`8Oj4Xf|nowd$G6<0mi}>yZA8;7zHI|K6BY&3`B;%o2Kmwc&iF+gq2{{w8aJp2*) zN|9oKE%uHITc@_l@<4<#sYq2<6CPO=B2QYtOppYt!)>FbwLU*Oir@*}>;_X|Ux94x zZqCfTPjIxMQbB^mp}f$y01trb1H1G9OBqoOTtWkA=?x0cRV~)84hio#1!N!EnkD_b3zF(j1`|XRlNBSoG(STG<-vK?Ujt zZ4YrlLs<7x3!Bv4{x#Jyt-?+arjmuzbPOX5~bPQ?o1n$YVSEYFh?M*8eGC_(mG(dj+_Mpt#B6_X2oIbXMbW5Mg9St4=MK1aF~XnHWnOA76WO~i)yEB zy57yIQAtpP2+2UkL9Kgz;)VUI09z=5HtGkbxy{b`3ZUpKBU2#rQ)bU!jlvx(xQJMU z-#oT{UE*F21{K!igVua{Tka8;k2YqP*9|(H9Ib0c-R^1CPlEFV{?P?FB7EzU07kG+ zZ!L!%aQX3Uyw>br92m1E!R%hVGyJw<-8zki+&;pjRa@5%Xvn{6b~^?P$D8^_19<>A zS>IZJg@%0o15hRFF8Lb07p}qH$7Uy7mIe;hoS?jsiDdfpmH$d!WUyX6Kg-zKY~+m8 zw|x%QcjFI!fPFhYPucRne;@3R2PnH9mM)~NVA@XvjRKHqQuU-6JRkh~TZ@~(xnu?W z2omT!kO0hGfr}8{eTv77-Cca-C7a`=fg&sg&8KLX@6J54nWGd0hNv6VKx;2)%Bt{` zeq*V2qc}%-;8*TUKnIn8Gbl=efm3sivcuY(@?Hki4MM{sx?H^GKZ@@{IAHX!E?J`K zVo%$1dt6I{1ULXJVpE@xJ*kAQ(y?)U%So+-ZbW`w$GHcfo4-2bJEf{TYrTK}2gs19 zg|Mt}<=OhF{T(K%ntG9-66j?pp!!1(%nZxMi(<<&$T~`NNbD7HHLewYlJ#wf8MEy9 z_qb)QfH_Z9Y+WJ~b7{z{}- zrJKPhw~TvV{M+ZYOAu}bR?7TYg6 z{B6AShc6=e))4B=KW3MZz%CZJB3P;xP%hy3QwQj)Zbh}e63gB)4VQdSn~>#=WJkja zA25Ba<(>3b1Iwn!v3;p4Oj!ke#QC)Z)ykdG7Q*YGz!?ZQK_DtSL1mqL;vpQj)ob1xw`66DvUW#tIO}I8okShiPt7BNQhVH z#h?uAEn1)4;Bv{roUwJ>G+pdfX!U&;waN1Fj}az9%(KK z(i=&zr>4qAH#-zDbV(0`pVLdonj5*z7(9xTUxL+wo;ui!c+SVKDp&SHG)GlIY_tBP0Wtn%fvv55f z9?ePv2S~hbCm@e+2l7_64;Vf{X4Hi0?qKfI!(soE?&VcQ_Rp`>6<=QJ$h+FH7%8p! zp~4_#km>SLq;)#w^i}NHDLGD+ushMWa+H^Z_KyU+da2uY3uccC!tkPfK7F^N2h3)9 ziK8c@A^z(jMe8CVTn+{!-GrND2Hf9iLA(^O4>!s`n1A$<6QM4gs|R~6hQ;wUB2mBK z#^=zs;^BtyI3={VR=h7rcw5YjYD4+COkvWV;;L;X;iqHd_+LeCW*e3ky^804S|WUY z?V!+%l1=Mp)n|sr-G56vdrRqr2hIf2jQpsA#?T8EOw&mcMnYE9wbX{=T^Lno@YuZ< zv|-aLX4SsrMcmoI-oy1FpG!uXBUy-6g__1RQj_QU%Z}TI!wXo#AsqNlX`!VJk~&J8 ztHkJFYq}@W#Y{`y>v24hDk)E1%hA~$dkX7Y+&FUZT506`xAd#9wj*c|55C)TIC&yP z)MM2^YNqdI|1mXPOoNl+dM#5*b-K7+U84 zjtAmu{4w2AZ$if`V{<;W?flkfjub}qg_n|d+g=#`MAd#Aq^tR5uQe+Fv*a>XI1e=m&ML z(i~{f6S}CrTsr3RCe7ALdeyanTS~Sg;WK!<`x)x4s&wb->SAHaaPfdS)OCc;9JGEs zqebNah^!(BvKoepv9zYmY23zC8^zmSFu{u`cm938nU-tn+(wD*>nBK{XcVDv%-`4> z^p1%5VSAZxOJT5#Xc5>`lp>hj3pggdl1e16r0yp+$coln~(D z`yBUER0!8g9WQo`8?spv7G~n)cWvwhX7FH*mlcGE#<7iy%k&B9rMHy6fZ3{j@dT}W zT36ZrCmUrg^=Nyq>@D|qU}^EaeZJ$@Sx`56QLzl=i}^BgQFLgYQ=OD!c=HU6VZ#TQ z5O;W5;3fAH_hzs&&G9D1oa4t%z4&cEl@zP}kNw)Ytx8}dglG*wt53FoDy(#q*R$0- zcj3F6_-C9YKX7?KZPvMjc~P^Yq5djK+{ z|C`r9l1ZaV-f~)1I9j?fq{juJe zBYj|pINoNRGUOUI(!gpo!C!I+`IkB3xH}uwQjL&U&eG>Tgs^lfFk;AG`+#rOKh*(+i5?UmNq)*?ru zUdGS`V@Idx92YLJ?o-4SU3{hyg>%?by|#DoZB8qSYfxHoF&&0$t*-5)jW5=-?!lMt zyDsTpsEO#^-h&V|c{L&wKtu{@hy+ZUxVQPD$Kc`WMV*|=L#{Gci!}41xR}U;>=}PK zm_i554#~pS+!4a$Hjh0eJ0g#Ej|5`VH_NPDB=wOPf!*PYZ2z`qR-ZM-yT_Eps$0KQ z&AJ9iYBMmdI3URHXhk_n)w(7Hbuol-Cna#{U%j{yV$+Z_?btx&0IUzT%iLZ&*B9@J zA!jK16E4~hn>Ex>5rY}*YMf(8x3)FLrQ@2pA4xy5uW&jcH({Y1-E*;;t|~1Wwm>cH zDjuvoTI|#Ee0Ud}yKoVPz8k|WtzgsBXro)T*0CW<{qtzx!)1=sRqo{BUfMT#DVXQZ zE^C7gA}9mxI73^R{BUIOZI3qmlqt3~H<@5Ukl>`7d$x1q4FBiS8IGNJ_vp{xs0)P) z9tXTF(XW;t%2yA8I}xu)f}>knuQdTtzqC&t7>4h3&sn24-|pKk{tB%*-ukwLTs+>1 z&nz{ouvuUIZAe}}S#+=SGCscePUVH88Rc!z(+CR5JYkZDiE-)Pnq4Pt@X6<0En9IY zMCVU`_qb<=8l0y)aeMCak-s^Iq))N2Kb$YTL)HT^A}7jJy477{Sqk6fGMNeZnsXbS z8Rp;q0hZWZ`!{69RlT6cE@SX1G=Z}cdJ~Xlp9;lrKK}qSp|e|e3>&Acnj&oitHDE# zq-AtVnFJgyv8+4}mNt>cT1(egZ6gqeAOG`AmC z-Qco`AZQ*BI!os`(k^QWBMfg}qXj7P53^r{bNP8a#eaC#1CRSS^FBvfHM@EbZjs<^ zpXuS?Sjoev3kc*}^!U{RiOF;@z6Hg5k?=*y_s{@YDO=c#o+ItFz^7=oF4CF)V8-2s zxl&DwtW;y)rJOQD7Pk9`{sMuRy^@CJPQZubZ(QVl?nfRRPURP~u>NOI?+o|1Z91z! z+pG$9a~j1k+hE8P%LiKQA|iZfHpt}SlVqwi5=f3Vl2g5_H{tEejom?H95=`s8nXuf z{fq10ZL}8b1GZ1jDe3Kh$piM#RAM7!zGS43h34A1^Q2360!DuOeo+O8pVMIe?aVNC zU)Q$a2#&KeBR`~bMi;~j{}aOLEdQ%aHNxw7|9q8TV7{ov&yU%OXA-rd183BDV#A55 zCqclF;0fpJ8y7FEUXk6Dr9I?X9(HI2bBN94*RPS70 zJ1?EpMJ2mF?W06my%W1T=Sg6jmD5TJh0G=4P+(VI@`v_blBwR}5X9<13Ml zt`+*9p*nbF7=~Vq5oiENAP*d}APAn)eri2;a-D?fVUO+bG>(ARm*8KSNhxg%+Ce$=~wM)_z7c-0*TZ(Pk;8`T9L z)_%d;nH1i7*ZqseiR#p-pvL7-l*1e8lDR+I;ApYo+o@wblAijge9^0@?Am+s-f1_{ z$wG~TTKcE8JxMQxRd3m!vY+#I$-A^2s%g_)8h(o+DJ^0;TNh>0KY(8*FvgrW)o8Z2 zDgDi@vE}O(TT8`iA4<=El~2H9mo1VtnP)b>A{f~*K=Ll#D^JU_{lEXv2a1{SzgmoU_irK2{$7!{u5>Shpnhx5njQxOC3AAf zWSz}x?dVCWs?u}Ks!UAzl^J#^M3JmBbt?WD`Fo7AuI%LubvZ2$EY@QW&Y7D z!&XtHDTdu6`iZ%aJ1o_70snS&(u6>1Jk=Ep^hUf^f9Jlyh&isuSUhWd>A2k+3`fG_ zGdB?@xo5Q>wKE0AFMGD)_5JAI z#SoiwjPOiEZ=kZ=W)&*g)K>!f12|yRZ;yF^%t-1QvK^3CY~B_N=!j~2Qj#~3vioh< z2k7-+;syZ}g!FIC0a|r%{qQVcWK%4RW&KW6v`FMGTKFjqy8)% zvZw`VFC;2TO_mU&pPP5G65NT}R)7w_Z6kDC^;bTBAV z;~UI5I|@4-E(8yy=hF~45>osj=tg`{WEZ87UB%)S z6hWhXWJlAs$@Ny_L9MR#M&A-N6X@6clw0jvdh!21Q57VaHCIuuWPAR!N0h4q={$qp z3wRtkwsSu21^Qw5_j+pKo1v9gT;yPwap%~B)X3VMNWQH~ z(H?6U#1rY*LC6eW1cQG7$1H?ngta_4i@M5!v&z|Q%(cvEx(l)QKEy6oJ;()JOgU;F zDt%T*P@FCJvb>s(uejO&MFk$`f#&Wpq?)fbl*7W zui6!2G>nYw(wY$-VV_-1muYejBB< zt6y4R`l`?~90aIx41KHqbKq?ilx~+a9$EL*@rSr|^Pxyx5vu>ie3;SEq7ZEk{)C!LOZS!0PIWHfjjUmB0!X*7XN9H4tc$nv z;Fx(E|K1AK-J~cjB4Iul2i1_mxts>X)XX}*Q>JL{ZXLz!2-EdyhC&?=Ulrp=y!;L* zd+BPc5K-}Zcb3Ftb~l8iJSA}K4S*@97?0ChvjO(fiv1s3pUS?VY+t5YMUnDx+7W{R zKm>?wXR2rs^>>3M2ljrv`O~#Q`_pkfdUY{vV^5AA{eAS1fFPUOrD0!3?d`gUC|Uea z_JWQRvUxyaeeULM8;9|##%Ck!&UTk8QDU-^sPg*zz9p$Ce-;Df4Q1m+d{T;eqQI%y zG+N-G)v>UY4<5@4@u9cj)nCymE9Ytip23A@!^7uX;6MnK#A?FK&v;6W140@$ zC5nCj4-S|>I&Xi4VDTQtX$qN&jk?Ve%W*NC3J2Qkvfdt8@jc|xbE&NGW5w9t+54MC z{W>KFmLTVKp&JkAax?>xaTwAEoZpV6BN= zZc`m*+`I$F=yhMUx-?YJ;jZ=1FoOvw4gD5axoi6U1=ICJEx#w+>uv!Q0DGE`}a{bA+TP>bAH-w=f6tA zi*KBjm>W2_#pW(5#gWvP9Ktk|3Hcqq8EGA4mhr8<)Hp^qriQ^FSi#Ur#Nf${ZJ3FgL{3)-OfK_YWV zH=4bRq0PQ?6v&z`u;_I52cQ6+zpPVnq*L_?jX95>;uCzf`yt>rB)w2kv0hQpuDX>b z{yAhM+OnD}6(@e6D_kRztX;XRQ)BCb#N6a7=AuO@5zZ{H*_h}O>*Naj*&+EXky22UgLL3}{q94KxSEcmtlKe~pDN$cA| z^X(B;Tyy@;V;8GCUO>gSZU0)<1*6x4D|K(*-8_oGc9shTcr7r&e*o1pI6{R1*b~mo zWJOSXcPx~(nxb4buS5Q+G$-3sNYz~V3NzO1qMr><1Vl917G?T<$wM2KyfqUk#tMbJ zog&D&w6doWAeUS5+`x(kmxo&^=vnvY`_8A!yNG%Yh4Eesq^ZY`g*Y!?F&+2QgoNB; z!f^M=$-3JIv#xgD42K9fnbPIZ1eF+7w&oSLGDLiFm;-VQ4kNH6YkgZE%aiYvZ1?H?>V@RC)IzM4lF#9C-xFS|8U<{QSw{kr~+{@I8-m2<`d zo(^R1TkXkOjt5**#l&@w{gjtO0+D)`KT|h{&%->2*$Gs00X#Wj{co98^yQi-YwlORCMSyyjL>!j`icFXEcwjd13H>>BR4`5Bu} zX17Ko>;-s?=*-c&rW0+lB>8mUSjg!=KoMrbaJlC)3$2jSWe=6RRH7bY*-_-mBrw`m zEBDp?|EL9Y|6Sf*oRa=``BK95sM@$BahOtxiA1voG-0%bh3fU7dH2Eqw4o(7u_Yk> zGn~(k!}w?^31%%D#jLO;jnJ`x{K7_2kl8HRyoM5Ko;~avL&9&AHT@4zAcrX>pg!$S z+lBi^yk14$uV2J>j6N;?{9!1PY2fZ|!<{cAclPa(K3+XT@n=I7@6{n=|C zj~a7oPQpL{=SWp5`{>K&4yhjlt=l(m@&Quc0a#tm^APd=L4!XH+-Tiarb_utr~s*e zd;CR zXX&-1yIFkS3DC`lFDDQc{w_f6SCd3WTW1vwPYIr%VDvtUnH)b>bXoIoMdv#WRueTNbt7l~@*cw&ZV4buvAvV`7tC@JJuMi^&JPJ{BNucGw3$H$s zQpEo*xH4RVwIJ7Ain*z8&9gR3e2;31y~iINIy>KUIRq0t96m~7*RPMnMos$n*1&|> zbqWhw=Q)bYk1*Z!EOpeMsX@kPfxdoeC>C%&fg0n_^m@&+ba+)aL~k~+5<>3XspYs0 z&1LS#pFgE>j$82>y}Wm(z=|;x@`5#?lJL)jel|JoN^^=aIc3T$@NZKrvRz~Yls+$B ze7%oQZ-y=E3wt;;Y}W#ude#L4ncPoNSkvg}`B!zfibOJ@)?2igEKq+fl~h+(&wr6B zX$FgHe*k+*@kUn7t5r#E9vjhkslld6uV=y0?JRZDa|D6r#K zaDOd(;^q})*WQ8cNM}WvErFi~%tkp%rGyV$q_Zd*n^7fHG!harezL*(6qYBak47sQ zdBL11O)zZ`45Mx4o*(Hn$ZD^Efyad+rUlLp4str_|IfP#G9x-501oJQJ(rP+uV9zg zqB?a6twnk-JS?(UvmmI`+hW^_DjGkazq@R`*oEF&EX{AuYG+uv0Z3Vm#>XZiXypLV zkkZ~cGc`C1{+j%mh1xVBsP}rgF6ackjNH=VY{n!d;0Fc^Q=}AOGZ;{tN#Gg zP3_xiT^78Wy`>iJC39I|h9#H}_eg`@0l^Xyb4It!E2Pds#S<1aY8tIl5W;`E zHZA)6u?7n76AB>-D!1rkLw4*dt_hRt!{~fH5wu?Sy!_G0=o8c)k|WF@1q%*t^OH2g z!r}YC&(-KLi5O0qCdDv~o~<%_%m&{s{#)p-8CS{cy^dCYa4n^s zs3|+|3EDu?I3(pMZp%!M1YX^yd+ZluVm6-nDUp?E-W{WkEQ#aKIr(u`RfwvNpDT~S z!tI-dr0i88BAm(i3T_e$Ua>ok+u`}OCjFjIuoDN(=Qh_GdSEtw#(sJ^3b;RzGKr&Q zACcQ&ShIlr*z`Ea_K6-n)_8Lt18c$g9mQKXTFgWT!OVInnc~Ctw|emr zFPg`-&KPiBd|F6-!Nl$cPkIsl`_L{fPG~y@xXPgZ`-e&e{%@z^{cmgXgBm82T4zpr zQAiv(L+kosZz3(1H%t?c$7e)P&*kybde`lAy!Vdew{e!eE62xjuZxGCUzstS01_RHRsp=_>E;}?5uH4SyE|5iv-{EyB#nwW+MvmF)$e9% zr`z-WpS~SP-M6O9GF*J)=4Ahe74hFQ%Jv^#8F-r(?w|I^P|jQ7^DHVSHbVh=6WsX3 zpuQAC!Fs5wRpoNZfC>tP0ZAYjfIKn~f2u8KF8>f0Iw3zt7P{2FAiNGb+&`D;8N`iZ z)8-0Iir#u3y!Q4%yPa}b6Di?T4NK?e2sn(X;+2JGxT>s*;+E@gAgjNM%nGyVx-^v3l8|~z?xEXU7lNfosDBd=Y=*ay}E(7x# z0X73Z)RSPWIX!(+Yu23?IpKd6m7;5144mku{|0YUw+AYvO$@AC=(_P|*{tZ)x#zHT zI-?{BiS#6~u1`^HzMMRY7@UHYLf{3B%?SwjzJ72t>$oLR2F`D#${wetHl+8}rAGT+ zF~65+o@Gi^UX?~3)grf|YgTv7J{-kmRyJYSh~M)b4<1}MCM78K7vec>v!cJwlcqM2lvdL{HNv4fAAIObAVwtRgD~~p; zbP?GxxvQjRPT{g|PH2;FC~Q^&=!M$R z2Mdh2=5->}MfV;IwqCOXx6TQxbqF@CoSEu(&(V`Eb@j8QFGV0Fu4Y^tjRe*yDjhma z9CsNLtsM0K zsq8)pkvJ0uG3^J9>u=FETZUrRY>-*p(`0yPA^rh`5>JxHqboVMY(o&c&DaZCJ0ja6 zgvhinwN#>zPkewrX*!}QKZAaM2UoY~xO>aK0Sei#pMhQhuk3_-Ho~1rw~z^J17H)` z7y;BKCY4%7o;{DNLM*o&ULO4*Qxg@PMgR5ub%(;aM0c?F$DQ_WUWSj+srsO?koOZU zMpay(6fZ5jwFiI-cEs9tDM73bq`C?qOEqGs#y4R2ImDrT;pH$5SRkG>qFkmHg4x;7 zXK$}?`SoK#^MSv&CxNgbKw-EFR)WDY1*cJ=A!^M{z zvHy`nBVF`eDDApfKNG%`5Y&Gu>pTR?yomx_TB2iyw9rH^wj6WjB9VF9$aE6_88{L>y%6~IoW<6zn80n~w|Ay$TG^t- zYd@2E(6SE88+UlKaj|j#A&Ng4xC4YDrppP&2p5kY~+MS^y=!D~ql@-GktWq{qGf(j5DmYV>F{lz5 z4`^TA(zt5R()oRk!Yj=v-tb`Zq# zW)Tr#iQ2;k&;(#M=%}d;4E<(5+A60iBvkR2NM>mnVPgneb(7+J4|-W!!34>}V&Sh&Otw0H zeXDL?Qjv6@hVuPCz)|=KDE|LPUeR93Qv5tKKn(Xi1YnvE@ck+(53@Qp2+UT&!&^m7 z$f~_`GsGs1_XEX8d&-gPttFCcGK-=`xJ?&yNEkzFTyl8Pv2L1ogiF+l+qj}(v7jA< ztLXiS{CN;nFqmJ11`79wP%$-DG{W}5uoo@SPB*9QNn*Il>?%^5D~Jw<1vdYgw;#6p*h1fG$WOyx4FVBA*d5`Nh1@HTB6V4VPzZ`OISPSe#(!DlCeW_ zicybHp%Hc&WeAWZIq8Ju6M~`voh+GCL+GzA+K*sWYov>-CgOTFE2u*Z02)r(;#;mn z*uU!qQ?{FkLgKq{3{%5cbpC>PB;G+BD~foF)BT8Xwn6PpvqD_)>-(i;(6Hey7)QcV z(r|}Qr0_>=#A}`x)OM}J>8bJRe?-QiwqdjGSm^;$1nC7bP;qg|u*;ZcsR+8ETW(S! zh44Ts28Smjzr-P{Uj#64OTkv6(k`6I{C-L+uJml?Bw>9>mX_ji+`E@lNO%b^rWNY^d~;A4nuC^m3lp6X$y zLJ)@ZX#{>*n8FOOey^!8fxN>+a^NM;bY-1rq{HUKR%txq%Niy`9M;C-8^dLrpOrvW z#;rK2jMy07JlJ3ZHaD+b7cnr#F@R#3ug#hwT8WlKV%QmMNHQ#c9dDj3@4K^7sm3^bXs^)vy9Ze?h>0UWNn+_A3_D(b(KRnrBYyvaxR)+;&9zJp1Je229(ZNARE8kxS&O(P_!x12!24w&jcFN zwYb!XlzCLal1QWE?LAvZhrAxm47S0eBP!V5QsZ%D*&2VnJ>ZY|Xbnn@2sHC)d^|`77d{+O`1Yh zZqGA+?=%yHo;EkKjzwu?)BgcbnV2eC|K#V^AGOq3v z@(@QUBX=pQ(quq|3^;Z6@b-tdP9sj8XiC%;J5w~x8eqdVxBp?c_)Ou3_&BsATP%y> zHbHf{X3>bEPLtCh@NEUt3=gJjpxGyh4J{fY5Iud-mFRRnzs6{c`bVEks*pj8GVDUf zADR52w8mk$LBh&_tU|DTVvAd3}EHwWH~>eMz759zC6i{@-udu;2Os;6VWaRGP-;L>rt!Aw%Cyv~1Syw4p^Y z_7d> zwwblL_tz9syE@2E_vQE)sThk+_1{FtgOdbXu1BF7RciuB9@ubOm91==kfbwWY)p(t+h}{$g|L=570-U|vny%z?aQ)1 zq9?123Vu&F&qhjsR96Zcn6b!q%CYK1)CATAi?rWJ6D^B{6|USEW-kGf;J^Ze1OZfe zQ1DQ{^0CstI}l7b{R4Q+gj&~yM%TA*ixtHS;F*r95~`?+WhSxdVAmzA&D9QX=S2j> zkgH(~NXBv&S#GP1oKI)4!orY1Kwl}PlT<})s_)7XIQ3214uwie8|-K9Kx7~a&rb;H zcF8e$TcXyFjXN+-mjoR)LRi+A&8izDK*EDz%-x`6zS6p{Eq#B;y=Cf|7@H6(UI=QUK__=8+sgogW8_{uNGRh@?Q`~wY2^( znE!?VqTc{uKrl!M2v|5EEZ{#sFmT{E2mmA$Dls&Zkm7d?!+LfFH7D1=gx~pS=%nN< zMvlpix6Eu!m?RX!#sLj|{r}bhFks+dU;==D03#&|I+BI|Ie6XWWIs)1(2}O9*P~KV zC+SL+TJaR5QFxC~AQJ*^m$4D`*YXzhU?q_j>xG-cQjB>tSBg%?>XcLfP?W-S%ZRz2 z#>%fA3#9T*SF6wX@}O|56gK@-&-yPv7Q50*5DN7!S-s}^ga04E;we+<@&ZP+;b5sP zI>!6T3UL7P5x*MjB~i)vXZ|HZlLA( zw1N+^F!yx(&$Jj;#p*wmG=|dxe}-~G?lu)kLdg8Wr{Iu4d}=GNxp=Bl?&wHK4Ab?s zFfG9MV2*EQUWg2(6PD!3Gpqmeo{Xeea_$Rj*1m#MVsdJcrZ zyhL4hiB>Uyi>~R;&8o1}HE07?H|$L*c_ExH^#%Nepw)fK>nvmZnyeolmkgnMwk%^% z(-#*OLpQycQ>NrKqa??0g`GhAD;R6*RoB;UNv$k6OfI&gAZKeOv8V>$Z|Gv_@SBtr zdnvF3)nh#uT75}PMJazJ6G6o%x7ZVjj83TmEhL~g1g)hQGX5JRDQlV5UR!QkInxns zg-Bs#YXmYoMiSj|c8RS69;^zsqI4nku_WcF2E#y?z<-}jOItA|`i>9{=>r9z$rPfYYugJqNpdh&~@ zAuuuSGx|azX|lN_TVT6sXs$QiIx>dUOa_s`(ue5`%IX?SP15Fd+%tiU#-IQfj}yoj z_3%$xvA5Hf6^sz0&f988eNBD<9+HaC3ZKyw#Ee4_f+PwL*q(WNr2JuuH1_Kg(7{&N19vd!ZJ$^&#)bRwT3 zZtIJC8YAx(z;uqvp>zl=#M@@>@ADLbOR$S|mgbfj4&YkCNkJItl4NMt!F_=vOE+j# z<@VhBBA%%0dE{iA#aZHp(qW!%<(lViQ3wCzvK*-J=DZj-l+VU!qP%+-mj0?H1`p!HA%%-mHt;(;A5 z&<w<5n*xDO={{y`AX7EG6 zqGUy0f5W?y9oW#MY!dCH{qx zhqi^eCaeQENp>F54jOrRPlL*Wj`lHCRqbEgm*1xVN(t8I9Mp2;yf&UPjfClg8b^%qpZkl`POHc#RbT;&?7_xRxOW*G$<#AX3n z%iS`CBh)f_O5@3`aF(y8mK@Sz#Y>nYMRkYUXQRR@uT+V2@Lbg~G>`8UDQIi#IW%BG$-K*fELtYH5FdB2Tw4tq?D)qK&!|z*vQF)c? znU(%5HvcHgD(UDvkCjeZ>Zf6i1iJmv1V2^or-A(kAlP#Bem7UL{p+9AIigb;@7)vf z#A+CBdKM=BYe6~-%_DRDj5Z!FkjyKO45D26oGC}I@^He+tbkO$T3I7ZGBKoK)ZQT- zRT_*h2&ORr#l2;V)Kh3El%x>66wb;-Ik>|2h5EkDNtnl#c#Wig6~}!#8?#=UN1cYopl0KZSZ&j&;Mim)M}b=)rxDX0d!3t1 zaq9mHvjt50B`w6Ts)ah9U!4B{XMW5VoR9-oNmxo(#CZ%+t5sPUo<(I+oRb!%N>XX? zA1d=`QYlWjB)Ej2*!G1%E9w+SL5h~HQz&#u6(?H~W>sLi`>|uGA4e?|CDFvNL36Yz z32c1e0PV5HdCw6utlcMdh*M&!K5KxrnH&YD86(PW+AGB_F#YZFM884a9DA8G}hRZT76`;x)x`}DDB^F9bjrc$bu~$VA zhUoAZP>ReLY@xHpI3u)_aSg7JgoNC2FLAiKx957Eo#mRGs%1^siA3sKvLA@!NkUMU z<%>Sy09>hQB|KiY4Td9Snl&{nn9vzR%%${pyd>YwTy4kfld?muB`P)o{H?whm+2B) zC|w>K$!sZ2p)i;svUgDfv$Aiw+nG1t8s|xaOsr{lnNwSUmou~;KFO3Ym5|!(3Q&}S zN4stCIS+95&ZPZc=ReW9zwS=s$+2aBaZXsMM?d6{&gmt%!H9za(MvI4y> z5R=@FHSf_DwigbN&jMFHmJm`8&I5bdHw52u1`P%*lyRn0LBCxJ7x~bu6x*K40{;N5 zY&6t%71%>;dB-KXv=ug_j(=$&Tcxj$DVWYjs%a2)NmAX;@EtrLg!*i{qBrio2O;hr z+0?(Q?CIbA8mwFXi`epQh~l)0!P9 z0F{ZhCNv*tk~r3Y6op)pk^r%{BWsNJHV2uZ&+ zaGmY9Gl=pdgf_6y3cEo-jqXUlEZXBi`GO=1JcPf>NZrDoaV?u}X+m_Zt#CZ-qw) z0HH}*7E+Y0JBuBT!*SD}D~veU2|@=z3GV2hv~=;eb{yjX3!7hVqxo{fjilK{0IK%l zw)XD7cP+6`C=Z}6*0{C(wNlyzYKvl-z?`r~oX857API`Mts3|Fq zPozl$sOx{fJAyzH zx}BIQRJ0WcwD6QIsEps{m}HJfX&!*;d7KR+)D-TWU(5sC+DCVeK1sC`^Tn9I*gQON zG?P$9{{So*hUb|G8}jk+`|p2}T|)fuW-gzIJUo0p{eDR`8>Y49r1&MaY&mQR-rIHl zH~A#gZ_j3H{{V@P4-db_^gSJ(#d4Wj&6(PB?2>Yf_NH}!Z+fB)GRFH$!E literal 0 HcmV?d00001 diff --git a/resources/mmpt-logo.png b/resources/mmpt-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..f4e060716520ece5db7e85df3c3ad8fd9e0eda57 GIT binary patch literal 28982 zcmcG#1yq!6yFW^&G)M~!Ejh%{J#>c%3?f4#-61XALrEwhsep7UFob}Df;3V?w+zxH zXVmw7zi;og_kZv8Kj*L(%rnni_Z7b@@0(}h^mJ5-2sCA* z)L&^13p&&bzPp;K7aAHr>#tvQ|57<$G&F(?MiOozxhg{u7BMY05Jb1@ph5{DEt!0Y@)5ltmNit%Ph(d z;kKa$2-_6rr04O1W;vpm;B*cfJ;PVP_^@jTMxq7kw%>iudW$o$c z?(OL2%KVEX)XL4rTLysA^p6QH?*FiL_4*4Y6kr1WPhXS^u`i%iF;AUwHm6mZQjjFLswy^0b9|yLlS9 zxjFysWxc<#VOBB(F>^h1bhUBw^Wy#`>@Rv-Fx1;t1|TFTB*-Ty#wRFh1Qd}Jl8_V; zv8>lz*A7WHs2=fVv8v(^7g(W0~LH~i+&BoC#;D03j$1##hZq9C= z+HN)|J%j3p{~}p>R=fF%2s|yM;l37JE*9bAQZ?aC}b3P~3 zc|sjs{~g)iAOGU@ZyyVw_z3(;#6K(r{u`nHy8X8`3MibtP=WVfsU-WinRY@@Yg-|p z6`!z`tsS3;xCDp~DvAP5MAXj4%F0$yRKyDSN4&G+f2+uUB@rb6tBUN24MXwNo-M``E%Rxzlf9n(IbDY^>eUAG5kMd&L0t8Zg$>&P)}P0 zdsICAPl@@<3BP=Ra^`>3+Y9RZf7V)5R0t>v6c-W}Q&jmCM+%~%$|}NQKoD3^QBYJE zl~{im{wJ+LMnIsXh_Ixn@ZTx?f1&kXtgRiOuJ*R5UL^qdzqiBM#>Pq)z{YZ-@?fq z>WRuZZ&Z590PH;7T$rKm?#_ zR}$41{)bLsWo;uNE(8_jgW3TF`9Q)VLVV&vKoFmeAXLa&NJL!NTEzBOcK+h#_W!0+ z{8PdIZM6EkmZIjhUzh(3il{gLjGDHtsGRjg4U*>z?jdMsc53Qi1tb4&`!D^n&FuqD zL+y9&o;>+z+{F8_2U}HXph!G2l1}YHt%fZejU_TNIeEK`LW>I@VWdco!JSH~rg;B@ zHT*$VLg0Z<>B7q+*(IifoR_{k!#R^#o1JeTetVfSzVy2fQ_aknt1kcUg)$ z3mHj13tnRK>%BUNK3l?MYQ>fH7YIViO37Z!TL*3ZL>zWMUA9hv7uaRC|k2zmGt zS*N;u8naw1g>Ik6{cNm4@QC?U6>9q%4k#1Jz|dT*%-d2-oN zSYQRw_+z`XBE=EZ;gIkV3?|Hw-ca4mXvHEuF3R{$dG-E1czQSqx*KLb5ij;B+AX0u z#uTP5x&^^|>~lP%BZ3G{7?u=PT(19X0YMbGo-)H*%n(BN@W&^i0_d#p1zDEhVqNhr zym=B{+}kM0rs#<;1U{VSPf<<#`}fa zJxKT&dMHT{Nh_K&MhLlohpx)fCdK&ki{0QmwI;f4zjY;GFKnRDto=GPjd6e#g3&63 z@JbHdClcIA|B4$ffPR1*GQxC#_8sjF+`56zDyWD|pfy1_YXi8Q*_DaQOTLy7j#2;g@S7eAjK zV94S*AGO@_I=gIR6edok-bW#U(h5V8RS)egP6*DN^zl1*dRTYjlpM|;PUkAc$lUe* z^}XhhV?+2Y+Gdzj7$oem>RNT|e7yE{7luDj4?XL`Mx-Xv=r=m-h4I^d3t78isNq(=#OZn&~&@A2u?gJHU9HHT*p zKUVz4NJE~=kcL$q{AZ$N=?huAkEPe-fEv{`%-Cy8z#N%Ga<+2p z`%x8@3r*+gn*+JHdkaj48Kh~V-EiIy&xhrvSh_sCn zcH5Ut*Iy;SrjmHg0C21AQ%6rm zKeJLs-qP?mcM&bOC@jVS3kI^^0qaS1@5KzJu)M$9J4Y5kPn-L~KK#sTuBY+hUinVAVh#C7Hk7v74xjgZ2ofn|Vx@94ZPA(UD zCP~Ay^Z2f{vd1yq7x%^ad!CXW+ylc0`I;Ra`&FW;{>0~-Ey8&C{i?eg@9NsKbul(} zUOPVPRRGIALwbPkDVIcMZ!&LqFe^jMzQrty`~JrGS5#wuxxheh!z=4VQv}emc3$I$ zmjm>Ml~{C@a5VlxWpq16!IBS2UX{6Cd9y?X zb!VVqjQXl6C<}_%yb%WYL2dX|@)HLQzzhW?c2a-RhztGY7ZC#blWQ`BFG2=xeREVJ zvB(??k2>SHB#)||X3f{1?3EtQ=06WS3g^Hw$2WxZpmNi@#m2vDceCl>~Z@*$q zNMebH4O(>3ZqtG9+U5od+|3O8ljm68-D}?+Jg|ZCC1Wy`wck1Xa!nC5wz;h+%OzO+X%WxQ> zPyMV!qC_0*0NU3(s7+%Ur`EYSSPsjkZS|O6)B*ojO$L<#KJex^TJpP!*+Uq z1CCz-z;X(owle&#ao1p6aNrXoqa>QW4g`?P?-D|0{as}Z3$S}55+u>FUUtaDsgX{F z{Rrz?+&F)?7cLdVm6&(Qgkx#QNmggO!mdmFF*3pjphKb|BqrE<7+=t*Eq$yW zlLXCkmZ3@=vA<9)@oveV#zHqif1JwN#|;lkJbkR29JS>yuh$d0&#qi;v({RzAexJ&q zTQ42CG*g}@Zk}&fL3HSi>21?}B)0SAjgKtL4I?5Y%c>gOkGQv(7^Kq$FIlK~jXEL6AY2Q32mZY&Q`aF1*G+#=dXc5uzZW(SN9wew^ zBm21mI_z5j=R=!K9K5#qYS^_a^So{A+PdyDn13qUQgiNVAY8R}`;_@bPuc42N6zB6 zx`K}hgRst3Ev1^(w{gQC!$H6fLjQNVTA}EzZE?qc6$KGP@nohCxs7%u>#BUZPGGQcSWOPU@<^DvHmr|ja)F+M0WrDN8-xbg?+bP z-)1@PHLsZP4(_R^q~|!u0aVp(Ds;nG#0zBi*r(GLRSj79GIwNvJU+Z{NU7=F_ z7X;`{9-10EK{ax$F|~BiVBEGB=UQkN&KJR1@_nCp870uJ&br~2y&hceSYls*ZKzfH z1?zFFvpB|XhWRhsNatSvR6!Wdct4$4%EaBn`Ep#_@d!>GOnxaGdyNA)kY6JPzlq&b zUIPFibxB$U=(0)s6%U`9{BEG=pRrE7zh5+@>))D!ho7XLP$V|wc>4-YF z|GatAUu@2KG3*OT0eIeneFzb|wi(`4s0tbz3ZJXfR!FeQlDM?sqwB^7`NQ@y-V)pAZw$vz{aw(&-* zmE-4OZrJMXR2|pM2Zt8j=8ct{-NRu|>Bly*&fcNPM>A?G0^vb-U`yQu1JB8M-_g@| z57V*2fv9Ot&YJ$YMatj^izhvRgr)uttl;d-PrmE&&kQ6&h-yM!S*cj1JqQ67&cnVLk{~F6i(=g=Qo5ui{ z2H2UFoCmQSh^d#N22ZQhIr#g#*7?!d%G~^4I%Ua5eP3YR!}&?Q=9_vJHcwPfv-ZIR z6Z^N(8DVxRidXo(LWxyd%vojDs%9}TcMivhSnj03B3Nyb7RlO?9`9`6=p`5Ba-7&) zSxMbjOY(fzz1~43nl?OVc=meH*D>?A^xLJ$ELCUxUUQ`z~6fzmoV7H`lBE=gDl`x$O-rF0nZ_;)%z;dZn1aD_D_iDIGv!havEt&| zZL`*8j$QhtbO`xVQXOW4{y{qFkE<8jirw{^N2q$fg!>Z@Rshcyve07ljqnC(N`F1} z8jui=t46Q3m0SMs<>au=n}q)Npv;B7kBR?zD$9$uXA+MPqd5ID*;l)oA6dlPv0RRX zK*4P_M;j$WyzMQ{GjGrBn#ri6T01^zUmkOs?M~u}>|(r_>*(Ui*fu~UX;#=Id`SMg z{Mx#|oyD{3T*Q}~IPXpoWyMl$2f11nM^1O?r{B4o6QMy>`g6HH8D_r+XZAvFwa@1j zq1Im_J@_!%aBQH3yZSVhPMl@okw&lZ9T~VYOG+|cXNI`N0QHU})q~nSh+(+E6{rmb zAP=ol`5PQ9`6wN)KxOSD)_KeOi3VwusLO3j*Ys8^6Sgm!z3|H=YioCpK61Smoj9-m znuBxs!5LI_X3>KWmKSyuN(95QqsjU!bWVK@=q42kax?6t&EhO16O+2%Ja9i6ujV!M zOH#8J+z0^^hga2kYJJ`u3%EOYMHWB-5VIb}NZe$Dv|yDla{}`7Z@pqbTQu%+?vfNM zIYZrB>=h@M?thO-KUoM<{9#nFyaXwMD*W`wI^QuG70NkNhpRxPEd2Run*Grs+M6mS z-6FJ7fO+evi|YFrpp@VSTL18e?8=)aH|#~U_q5CQzFnm-Y@Nt5$qnFAqHKmIhyJ|b0NOCbD_x%8>yb!QV=DUb)ivo68$p1 z?vK2W2CCe;EE{C0`c-c?A;66Qe>>WI?nZ)7XIq_=V6{W~SG;5=f*bmQ@gbLF*WX%0 zgT9myQvIIYfa&;K;(M?O}Yl*kjq&BRrDr$SzC(g9{Xzv!%j7Us`&$K2baxgtdK9NEW zqem)$jiFkvqufCW<9c_Y+C$E(@of`ws7X*%H_J%*L2+m>$EZ>kEoyaIyKyWI-pi74=?c)9HjdNg^ER zw;I6e->DY#O8-cCJ$P0RXl`6>{JX!}k)rcqreKCFkn4s}uSJ*T+0d>%dHjq|+4vgMB&8kARFCnZL=88r<-)x*BqdkK$$ZKxE)qc&y2HXmt}JMN?} zm@j6r-61{;kNiDW0^i{|pgRShOC$K;xMZ%xqei@d`hKPb)vZ9!DVPHIE!c+Hghn(_0pT?_yJJvUP(yaeI2Vb(Lx(X5SU7e8Dc5HB+LviQH?)NpzFW3E7sGZ}l zDvTup%zAcog%=u+Y`|0EE8!MT+S6p#{|+>t-fyKdL8pW~q*{&`*Wr*-WBvGkmYpM5 z7mZt6G4`W;&p>Qa&|S?NPT}YL(EKZNo101ZLxbEK=~$R*{ef@I5Wli0J8gINDosP) zw^mh`7i-qTHLO08aj)$Ve~G5bPZ;DRJG$Rzo|J9il*Gtlqy3{3@A^L2bDLF01n}?? zjHrzc_%LEk@c`iwIl&f}Hm0t{ivN1agGoc9S1tGs@TK(KbWPE-9~Z87uusvY7)B6{ ze()GF19d zTwQy41J!xP!`nWx78Oq!F-wpwkVs>lzrNVuC@DL-cV0`nitbi2iT=?$s4RMr4BX5r zlz;wWs((K}#_`R@k@lLQA@a|m5gm9@evLNvns_ih%wW*|)!1}^CDmXPbCl6n30%4Q zi6;LujK~Lg>v}(O2Hjq+E$qPbKcRW%#ij~P4yqDErjX8-P@x|I_yH@cu+Mo_x)Z7#?kfa8N%xJ6+wu-gw8(4}0LK5)?T<~_%^5lt0`-IzlP2ZC`{n1Bw zD>QOcOE2!9!A%@uMPb$VM;{aQ>{Lh?>+iX7K&@v7|?(JcP_*7zs)vFnm_;eXWs3V1HPOm=vdI zxv!ycf3i#Sz9GExaXy|Iu?fC;iYOp9dKEsC&0etmL?H!Z$>qgnB;#R`k|gWvF$FL- z>j3F&X2soc8Xlmi^q$jH{|obxXW`*xYOkaJ%ACfWqxWdpOoKy`fS9^nR7V$$r8FQL z3FNFRas4*W`De`wsnY)VYc)}wU)GFGliM9EG{Mj4EABL_FWE;qGjUr^%td|)C$y5 zwsfE&z!q$3=FApR`dv z+rB!OXXq4B#mS=?fo}J(RFyj>u#U-_zUe$^T8Mih=Ok+Kp20I}tz_}M=eR9`DNY%) z@K03lq`#FYA{*kWpO9d%L`TwqqYcyr14O09N?jN&b525^Th$YvV=-<6Sm6)DZ;H|k ziilKGd7scK3Z*|u(IHMCvz))ZrLzopRmyJxc5TNG_(lRU4`0#8^u>%pTV-ofbrG0z zXgNx}_ndTg)vsIAIYahw?PpI%JvSA)Tx<0za=kzM*p~=eooClr9~ZLm6g7Czsf2M+ z`y9RYU}W$o83FC;$M`V5*kM9ySCXylE=jBgs>AX3$RP%I@}n@E4?^Zm{h{(20g9ZTw2W|npOIQ*Tn&udkRH*RGl7cv0jhn#{NmNwsl-{X`DRhds# zYdl-1S#A?_gZke9lR?AoO zQic*(cTO^b2YTVA;CnsUy+veWhaU88=5627zViMYoxoTBQ-bE)650GP0At4WiP^iP z!gBhN14p^9z^`jDU`?^yeW1%og7Fqob%iyqT1+r6I(xqKz$Xqej>Q7_mCUo4guPm{ z)?7B9Ld)X9p)3~NE(u4m_m+97r=qY$4ueJBrnPi)tuRj*eq1W3i(^x8k(I_vC?hO6 z2|by;=quQryO#SfAg9coLG{-^968+P1E(8aKn^Y`HmBa5inlC^Zp;Th-$dPILEZTI zrS0Txfro2Hr6Av2>O+rN0uqcsaEL07dF{DsNwj-a3thW z668h_Spye9tE}@3lRyw4l;MxjmIkU9D>$7cDfAqD*Bx0Od=~qBmr;f8WkG6lW>YJWCs^Sd#!6{C1e$P0&)60rh>8)tmESnw^bZAy08Q>{+99{t;_S)=T zDfJlBl#sh0)E0lz+cM^XZY1X5S$ZTFOg%~Rr^8!99~I5g4?z>>%sr)8j5;$$U3(V0@j zIGXkq`^z2rAoASr=$vf=K}5T;-Wi1=@0o7ABN$-s!7mRG0oF7Dl&s}F^r=D10VAv& zwWwo6_m&LsTT??_ya|+x-B6Ba_rPqS6IF4qEcdA7bgz8pQR%m0+!X=25pg|PXq|k% zYFrQpdGzM-x^R~GUUClW^QY2}1jaH7l#aq{(0B}ZMX`eLTLP2v;oDmB&&EFYkrowR z_V<7GB6NSyexoLx#q3=7tr)7Uh<`Sn?D06z@m-e?rv!SY4hG$X?Gl~P=jc2w*-+Ck z?OqZ#j)D&WUeh%18ytT;aRxel=eVG;lAD}SDT_71`~b8OsgzBM=|Wf)x`QYPTW%sn zT{h{*=`$0lC1p5WA;vWqk_WyUo`Qah!*CM!sbIzUaRh{Ms>U};QNO$p-p-$uM;K(T zyX4JQ$Va9qxhtprPSzIu(#mOJ%MNT@HioGUVn}_!%mdh0ESCH%6*m?DzK6qq$vdDv zk;Kc=2blPh0aE9n(3V_TC?{{{w~e2Kf3rCprsK6p4EUN-y1msI>7z`C-({g4*10}x(t=={!p>*VAmRgK=T-Hs_E zmc_STI@j*%#X?TcZ)Br1-QIv$ZQK^8fKbeyPqOn4)GYO~-(JQw&Pjv~-ZXy~eYN_? zDsn~Xxn{eYzJcz-r*j(}D`U%D`E)AZ_;bef*GST44zY|83C4&ca->%nM}>W|_3%A% zV82P#5&nhA8#!UBS2fneNC=#G{!IdeZ|7l$B9Q0MyKp*j(Cb7*M+}2*cZK3E^v<~D z3uZeng(E1U!9zXPxb!DFm zlS!oHAl_ioMWb8NBVFcq%x>Bra@jFX+1DoT9*gKa#Gq4Cqj(ZVhhP83yGp0r9Ogmr zQv>{VyYU;_3oaZL4akn1AB&x;=c~o{uH)PIOez|yUtA0fa^F<3c#$`t(W5i&tp|Iz z6BK1-3|*!)jj{TH&-IloyF|b3%SqLtxb>Oy7Yg>ByXVHRvuK zI^)^%l%>fBGvEEx0NreK=-RRD@UB&3(cB`v;d=~2mW=i&AXG8Fc|NEN}1@b**cP$9Dc@oszO@!HX=v{au(MDGIXk~$pqmw z^&_*i)>|6Pb)g@__A|-nHYK0Xo`yBld9{4t>@-bN|F)Ks*fG^F9ltq=Dk1OLF7qU+ z$a?CHj2`>Qr(4M)ZRdB$Z+1}5!TyfYiDbv|g)5ebkc*akuf9V@+3IFx?V}#k(hO== zy9Y`wl&_RR6+Bh>B9F-dBqooMY~RYqW?dMUpi9Q(W}KyD383As4XAz2f3-ppcv6$WyXPtkG%qDA#l!DACu2kJ~T%9 zQzP-F@`WQpzIkmlzVp_WW*gOFIAie?86)?d^~{zU05Q`FINxDjv`frU8oL~+khIk9 zj#&;sRnx8ME=6GPvavLRB+#VycG>v*1ck%`o`cCw%Vgy|8nt{s8O+4A$T*^z95zY& z@);4HljJ^6T$Uuk^IXX&WbG0@(>Mc_qR6VjS<$O={kgocH45gGrUG-=lh_^KnohjhDo5gZ4TNqMOAcxr#XLnUQo{ikha5>N54H@?Zzy)%$WBMn^=UzP_auD+; zacwFoS18Ip^p|sgNIjH<)>TyQ)=sX6as3q%w@s6+sz7}8XqCk#zOZ7;6AXGL4g;%O z?Jna8)}zBJ&3r{hqqs_l8`F!v50nL3t3k2qGERfl)!BN~94gdqGFXbd!nN66V^&*# z1#nTOkyj?nY}7V<+^Bm@NVjOz@+`L{<douNWzaAzc5k6xzC`A;7Z?=XFNf; zZ=5JxV9QaD%T9d$6Bkm^d4q>rCHyu9(x$76Xk;toNan)tp;7UL`cL>KwUCt%kIgD# zJY;t6R+IRuEg5v@lJUfA)ollz4U$_$U%Gh1PZGwUJ1Z+2cr1{kZ1U^9TfhOuImNIecHdu*4u9jp@MLU zRMiSiMyyXvqUmFCf&Cxg9FgJYu~uu0fpxmRcoV}Sz?Zv>SUIeCXUrS!wPwg1=U9dnQ{!!Xm�Ei-is;{x!X zRxe^R&4x9ivF35&RRFm~oJFyh*j@GVWTLF5Qb#FTGnp{+ae@f^GgJiNK*SKd24i}~ zxU-bg*^FXTFZ|S3{{onbODaL7Q5d0avhXfg;kpxC_HsIcSDkO()hzc_?%4?qK&s;1 zKAR^C;M|;Dy<3wYX>8a$^-{x$=iUpJC{$eA<9A;W0Hdbo+%%O%(1QdPDbgx*2MGcYNA7rL*`15ZKk!VMx5f7yL% zQP>jB{WEn`f@wvi#boBgIa3FCJUR9!wd=Z=I}g1(52ieEOZ}aeAJX%m?A>{+1JlZ+ z(32?5TF$%*!Zt|Tm|zrNt+o{wx9jn2f}M#Ryf1?>0h;6>YIbJ-Jymgehc;USMH;`o zK8nP?D`{mlt>Qb-dsZOJytI{Nanu2od6KQLPANwr#rj1r$Hy3RlUX=&AXF$EF;1bH25AQVaFp0+RI zl!=()<&u0YzH)i_-D=_(pnWA##$A-7DxVHsaSwaHPqS(Bs^ZaZ;3lhif*|D=1=cet zKOm38S?1ZG7q5b|`gj3$0O^{Mr@@PejA-&7kppvNjvd43qjz7;+bBHILI+MWn?sq5 ztWIr%7y|moQ`d)9f~IznwVqm>hWj;#kJ9L11N-o}nE(sju9(m=R{<$|gTYNL_3n3> z`tzUm;pt<_4G^S%2hp#=TMJM~F&x2Vkb}vM7DGmiVNtcF&vRjh=B!|&;N31=@?FO! z|0L92P|r^!u^L-a1ij}T_`A}YvlW2WKO|OX($BS}x=bXw&wed(;vHsYDBpXP&&BLV z%6I4whgGBvV_lH6XQ@q2Q2*>WoP2)u_d(J^r+LBvY69LZ*t~MDw4eK_bGB@J zi`jA+GQV@=Uiqxyd02d;cVmmwqqKo+@>+VB67bQ{{_C|DC+_y9)K9mV>v3#cEhhv; zJn2cfUBLIwe8H1!9P6$TQw0D6I+~YJHVbDm#N)ey)vveXxGFs0yx3ku*#=e$r`}EI zn{^S#SMPF{`V+_N&HSVSI*{ z_mMcEPxEdrQk_;vkLtz(1aMrjf6|NZ$W<9xHT$T3^0HOEw7v|Xo|}E?NiR1wS=TyQ zu3F`V?s}5cI>`t>3p=~myZ|;zDr8+t9C{=C!b7o$SB>SOAPa?9B4+*_XwnhxwFzeZ z3XvyZP!$$=Rsi!uCA9@cGRIYTG*eq_rjPjZ_^^JT8C>O%@u?Jz7hpqbrJcaSU_PG<_5_v!w+vb=W%(!`!TL{bgS(3shWJ}D zkNCyr?G=ACz;9-Vf3hKiR$%M0<}r{Pp3u%NCZ~Y8%7{G%M0r@(iJR|1Up4;&97*lX z1c(nRc$VF+9tXV|-+%TY=oY%JGr!Y?FfOQ|22D26^i_8<&Jew)n3?%twt+5yeXp-N zguJEQ{SD*Bnh47W(M5SO*aFevDQV3yB5xaQP?VYQ*&~=DD|pBSll+KWT`tP^mG0hj za3|M=!xQ`xBcDwDk_6;(gQ;V6$h@egJ&PuE~m~ zAW_tlFb)>iB)nBjngAMBi(V@3o%d{ewYr*WaTPM3jR+@>W&1=f-iX~ekB<%G4*pi!D#I>i^OZ2MsmEs?jEYfVI%RK>ArhfiXy4Hy4CYC z%T`dvvCr?6*NJvurhAt>M7s@#-0*d5@l9LFAQ=(1>F^8Lcu4RK>U;_;_6`dM$Rn1S zs7u4|oZg#U9P_WggV*AEXR&KiWI80r?v-6caiT))!nVDFZ$#T@tQC8#3C<^N1*^l) z8|%CG4q&pUI?OE{K|)$CY@HGql3!W)cggM^J7y(Y1&1v+{$q@{z^ z-*5RPbs_R0ArB)Lb3HLEvy+40*wV;J{#|R)qS}wLB;UOuf46*G>9!%;+cz;@wdjxm zzZ=STk|St8A@mt#gF$s`M+IRJ{5!iZkrW~FV%i7oiA_~Lm5X)YzBXJ z;=)wPXbGL);8Q%Ne!VnU^TeAxMum~_w&5YG5NUvQGJ873)e&D;h-e4NJ2fql#(>9T zA|fYTM)x3oxeAvr%e)&zZd~8R?>Jtdp4p*Vhl1%KBFf7$z=>ZAKf!JMw^ zkVpB?GG~nTjHo8N?w$K3MOtj2`UazO`nZv!N>2In+xnu&%){I`tVV7+O4NfLH%O0E z8D{GupA@VGwNkvQfhUtxQpE-P7+dvsA!`tVrWUppm4(^m+u&5zk9mGQPh{~qN z=J*}{%YzSq`i=YWIKWflS`6SroebSZ(xDQ%!eLj8C!)ul_|7|?L`(bJ>o56ar`IL2 zS5+I|=B_;4l|(Q-D7mix^1S8Ag#Opaa@2v}39E2Qn8qE)9cEpMq8d!KN1BEhvh<{n znuIWS!+jddml^T!fN#-FRrJnOG}qiCYRShk*zi=HZ|oISU-~YpsLD9iBBj;U9Tl5l z>J&jZipLad4W=(Tl`CXpVA{-@R(_wlL4e68>V`;5!yVjkRq~)IOZu$uy~a+_FCdu3 z#ptfOe8q{`+RQ^_ADiNTl(QcRXNQ|{W?}y{Nr6ZfhCfa^(2ougaSE7~qOOne8mzIvIC z?n_e$W_#u5!!-|FF#}aW=Z4Db#PhFUTa;VsJWEM)qR-O7N1IVjw56E>O#n81!^t00mHh^<&nT? zYa2(;f=eT|U9PDcR$}Xr#$$g68vURjrU@C3F-GoKu04tTV0>g`=Hy2q=H^IYK+;Oz z;yJ?Rdce@kbN$45yqlt~J%ux@YY%YQCeiD#(qWR^?()j+MRj+UKrL>%w{{Ke+sLv9c7yU-Hv`k1)$4hdzqlElH2W(y$|!p05L z-68K}HO@K%Esky?sO}o)4+*Z!wA2G=s!u0{0p!@+0Gyo}b{h6k3;O`KdBu1!0Wup0 zk%)p^fLzy!Og<0mde?m+`TEjZmr=CpdgP=v6SZsXN`YKJ=0A0`X0D?4Y^|Bat(mwU zJg8O^z~s6v0VhCc|6I~;F$u3$H9CIUY2p*PH0Zw6jM;KgV%u2 zU5wDYtg5vUVSAH1+^`VF0GCWM*UaxAFL_;@42$i)XY^UB8+v)6GLR_w@L#L^l)nwAkmOEfrVzC z0S}&o9#HUU+Ej3x0K9tZASghpS@+e$AVz-zS6f2YbrgP~ebswNCL^6y@14OnZ+$B?_E+DK$8XBC<|hgJzT^FfA5_tD@{SeooXp# zE?od?q?=}xIe>TS@L$VNEnN${^iB9byxhUY-&zy*oFrH)+jX%=J|&jq5I!1v+lS(!yFX^NH~db&?7i#-Lf zEfj^Q;(RIVn%>8Ud`9gxa~G>kM$qLFAOUw>*KZAmNuEJ20L{i%lgGUt&URJ|=Z^EA zG(C6*C|+y|Sm#{@vJ^d(ME!Sdn;jUO1tnlr?rWv|=mAdlNp0hLWe235i4-IqLqZfy zRApSKjE-n$xE?U+cu{;c1$V|}o7vlFykhP3I4J~!HKZbso;@*MeIh2TRd5EDw{Fyc ze0+j#4Pve5)jtjf5Eblbp7QtX&h^(h*5O)}zo(|pe31f#2-)=a-;h2#i0L0rAUoI{;FziDM`omqni98EXp*oW;GeQ zCeATs?$L5aTqqIWN;BWyAwg%#8_z2b~`kg|ZRzWOGMc8m{Z9eG#vS!D#Z3JN~JMp~Oo9Fhs zs^a*oJJ)Y?hGd7STXKuVDnBd1$31vR>_k+*_kkK-jtw!y`##EZvma@(MQGfL$`~X0k@~+p?|->qB|+Z*7&@B@?~AN^YtZ zC%knt{3C2^tcBrN5?1s&Tu$PAV-X!$_`S8X@>vFZKf)V9+okeSLFPIx_NmgTi(;ryr~ z#KUJr*15w~;M|>L=u07G(7VpSr)&Il_Te$4#D1T=n(=e|>RY2`i0cCQY-Xkw7Vvnp zkqfTGgw1(ZA~h|nm!BLXuj)RmyoA;EgPwhuhGcFj;YoK#@+N0AQ@m@3=#&B0i20J0 zWm3Yn%8ME(%-^qDf^g@mo68SmXc*+*n!`i@d^2>pG20?kf-_s2i^$#e?iUiZL2*f* zDhErMG!5@rs3%oE$;34vor6ub%dVlP%a!m3cOH8VkGy+Rc`};PBu+)o?+Of5Cu;Qo zyE2a*rv@B^4IXa{9X)MNb}ju@j+^O*b}%`#5Z+>gbia0So&}3>4KrZSW$GlpZy|^^ z2e0X;TEB#SF_9<=fcwPqI$!NE*J49Emw-#O`kAD?$mu#2GWDeIqPmje2yAm_XK(7HU{S4+*!S97RyFZ*b+(bWFumMb&N?7j@A zs%A1n?Gt0~!*EK8Vl%AP;{bU+f~fCitVw&igK} z{YFx-#?Gsf(%$xG5A;u%fTDqpocn&^s4v()M)M^vb>eJn2VxUrs=UQ&z}6~i%9`?4 z8K7u$zmJ{-KMOBMHP}eVA%E{_%h~_a(^*Ef(Qa)Rf(B1;cc(bT-Jw|MON$iOB1MZ7 z30fQiv{2lk#ogVZxLa}82G@S+d(QcOWF>2om3ijbGka$CzVBS;EZ?2EvN0lCEWGscNPY{`Aw6cNQA@^?#mzI4K3#RloXjPhDMbfa|C?CPE&GLt zD*IMZg`!hRv8)Dvf^cZeLqMwy0CV41O!p#2^fiZEl__I(-S72{S(#R=XB0>d2<_T{ zvRUQ(NyNvlh7G6nqAIyxYpu}_Rh-LdA3e|}Vyu1%8y;&sNIGCms5ZsLy}?U3@LdGp zxL2*PuU{nIX6H`5A~6|qe)p+pMApQb=N1~TnYUp%9#EML3pJagFc+JA*9QPJcC?l>(53v+ADL+ZYZ2F>}=vrG(^KdjQ zDY_{wXcO8qz0=7PN6c!b(?k*^cBo8o!>FP@%qL~84_L&kUg>D!k=Zs-P^)?%QC3mB zUbvs(jup=o!H9~tQBniiK@Pim+V*zye3~Wqp9*I@EL!mTjU06sUEHTS*O7L^Mq_p> zZyTSOOP6)rUd+jEI$$uHxy21~L^tF!yB?@^x}SZ9+yVn8MR-R}bsu5j8u zpTrK#My!TviU#=b2-R-~b$Q>kR2V`Bo4(^xCGA1{IU<-40;mk z>8l78yI1$^do1}}y62-H>4V3nX-0oO^Li)|_{4$R@nt-<#$@=#Lhj>M)r`<{`U^SB z7?17uiMhFp#sZG0tKU?avyYrPHK$X84fVqf^`i`%Kn?KME5lLFoE7l4p^gwkQC`J3 zX8XyLI)O)}*3`uqu08bz)LOBj0pDb}7R`{CpH1ONpq4SJ;6M2UC3SZ8GB=?K6*Ay3 z9u*!+zG@tha|(Vnh(39Nn$3$=8RWgtfOS&s!V2WmFfAijSJPE6Yaz}ZkA|j(WbV!k z?K3F&BaIXNesuTSK6k*iPGy}^qSzADOJ!#$+qkttsY?lC(+{pJpGA%w(e*4z!LiH; zGAT!Wy{9h6weQ-b%zBsNc{q>Cir9Vn4qVq0(}M7Qq+^Goj(=0fipgtCg1R_!XppVY zx`$0SiNWnyEa!_Rs#Fn_ z7}xZ!^kR-=DqI*)&cHh|Jqxzlm3`)e_WDvcQoRaJ-wwj8uXGG_8xPY}pJXWte;2jM zpVb~=7TbN%JhU>U(3Bay%26_ic;Dmw9SXDOwf2ToGe_;RmZ6}Ppe#`#<0hN@Ah#Tn zv+OdmePEM|z2i9WZ7SV!wVaCcf&@RIJYGZ7uY_tsJ7aT9k+Qw>5?`Mo}L??-9RbK5e!pRhdBkF}R4dzbc|1f!T z!SR;_DotsAQU|4yDh5UAeGA;sOXj2qX>Xg6c^5-e69vUPU~VtHxo*v@9|>}3k*jgE zO)W&NmH*7&%g2@~;uTH>ph0~fA$~~7|fLYbqqnMty}wKG4M#ctPk9l}nw=9lfA!J&tXMC)&R_OnLUv-JCy z#@}%ValJW8QR(`mFxO8}gT+^3xKo&aVMLaQTs2ShNF+N~_tpk6cdqX3mSIl$Ni2Vm zJIlByDu8}!hdjt%UdKqCpk9Wrd7m=3SG}zN^nyY9PawoCVo&JlADjG3O_aVZ&Ww6);l7Y6dCGXH8S!3v{$nn2nHYu!n{RZVEI0NUpI zylo4ty)o5Hb6r#FgnY%|;eE1>z?|`P7x z)X(RMd}M0Y&s4G`WvC0$OTVITx4Fe#Fwiz{IRL^``OOIy(Odw4Tr6SV{E~&AUmTe$At^(WEL?g@&KWTAy z<#D7NdMf;i9yjnXt)fE3(v?o$n9b{?BvjDst7Cyr)bt0}*DU!;q~44tAJ)=>ZOU2f zy8PO7CCTxS)=+8aI5KubhEl5#3j4u?h`UwS$6BRr*7q`rKY$3EeC}YgwVNmDNGs^A zh(`a22O?g78xf0Niw&>rb9qFBTYzlfU#~$W%8RmXnsn6xL9;izmPvwsYYwD7s0f>} z8&^AH6ZX?jHaQAaH4CF8M4MiaT#P{x#BIlz6XWiQxUn(+Ii=u{eSm5EK!rnBOH=L2 z&hLrYKahvH>1+Cep435kM?t38a;HNF4_kEj=Q7@HQ?otBY zcCZMdA-rXaD+hX3N#7vJ%5q-GcqV0`Wzmm+>c7}r9;=WCM zZ_+$P&Kzbj0~Nk|)z24=v+0@5`kxF_)GbIR6sE`KL2}()GR-mf|Hyj zaps$F;({9qL-r~MSd2j`>uDsUElDroEbaxKY&J3rr`xyn~HkOT9V@H<3A8g>8$D=^6xlfi~yF?O)vcFfsFYoNjZ8t0tA z1s@M_^NIIl2k2?N?s&M5spjafbzm(ceF z52ojwhO9^ibEO|uj6{i#@pt3Sl>Kf+dN&oJRNp+yjtiy{m570kNj=(=jyy6sU5QD0 z)Zm$%xHk25w%oUVIJ4R{9O2-mL@od=*v-(VvNUAQh^VxDD9x$Mk!f81I8; z8G4fSh5EU;@6fw`fkQF+W6VhYWT`go1JnZZzF2jUclL&2f`$J6+L}?dl?H<^?a4Y{ zF4wAmz^oDnrQlLWdq8#<*AS38!W`kt)0J`+b)`fZ5_=7PCW7H+;CnRSn=C$L$){T{|Y@eR8H&Zd|4 z@O$>_X62%fSKYSQ@Mk6wWDJs%gMLCbB_D8l4*v{QDJOSi|sbseVchjwofJY4Dtr_(2T+5{p?$?G0{ur_O&^|!A( z%R_~=38NpMCiPxSDbl48kZYF8iaEu1@$(17mSJd+J>_ddmm0I4V?9h#qr40W&c7^! z?&M0Rq(vhRG)Jzn^oQg;Q7MryxMp0H32Xeo9Tlno3Dv1W&J9q+D1Rq+!nl*42v-hE z_*t@|0s{cXM;!?m{-8$uhWq)(*|f`qcXLgy)t`6v?wBrbTuSb^_mq(v?$OunpYMqx zhnTW~Ev5;^Rh-}FeD(ig)uV9Wd3BTzQ7@m zMVGN5Tp1=i&!lO;kVZ&~C7e9$;(DilGyYf(q{a3A`JlSq(m0DK5dL%F0^b`}Tz`@* zFF;yBUkNqzGut#UZ%7>|YbU$quK&(nL8`zfiYmx4HRaSo{+4ndt;|ijds5Stgor#^ z&8;3*8=LkA;j7HC)QYoHwU+ywE5fig>CD`<_p%d zTkoRz`iFfTq1s*cl|2{ibt7xI$9`LNA&rtiutY^kZBl%`0!~+=jkBW~`R2$@9w<&%BnZ{Tx?V?!#-Bex zXGhXyUy5^~mBR+-5}pZcQ9GT9wQ=W|66hNGGl_kyGgu_laqda#<;kC;PGTQ8adHF{ zJU#o1Sf5bgV_oC3*AyG}OBxb{JMpKbz$abm_%vszDxnboEU3;P&;T zRV{|yH8`pQIUv3(6^_^t@pCH&wKh!-O|vgD?RdTfUi7@_T`#j_oR^Fzvzp+^oofdX z+CTXL92?sOlvQppWf?rrm*6^OzFy2P0MXFS-V~?>q zH4j_-REu_ae^s0e#dM||(sWe)f$IKs%fo-hu6cuIUC8H-g}4JqH8-7|a);`kD=8k5 zo?3nuBu)@p`w=#2q3Uwr75jVt~iuk?fIoBZKg>&DkN`IvM5PS3`jCz2zWkwUPEvwbo(lw z{9@c;CH}CNYcMxKY3WnxhW%yzj=RYle|NJt8^Df2(e`qgWj)vS8?xXc4>l;QB zxW~XG_PZ>RAy5EU9Ad}(^##6*%a{Qo3ywteNv$_^F4|Q#*aYsa>s{}Mws5)ah<8@( zBZ7(tSW=(XaIQULU_clH@nbmp4Xndv;{e^v3O_mz6og#|4mNjb>+zXxs>XA3zRwR@ z>FK3Ycc|NkjMpln9f2ld$J3~~tp^gn#DbcsAsh_B%%8->A&oXZDYktxFVkI6-b~Y*zZ=}VZRd)b z>h*+W>Y?SqEoNEkU^G&h9X4;2x3< zch8(OYHJ9Sl@o&Jv8pU3FIJHlpL?EmY8)b2Mt#NPtjy5bUt6`}zphhy0l|M@18x`P z&dq*gSbq8?PaPM2>(HM^`|kY3cfX1kqsE@(SlaZw>tw+etzd>U(4yD3VQh2+IHlVF zM^imAgDy%V%hzYB*zJm`(LzdHT(j>L3(0_=`ELW$ZT{0Ck@J`9)+@vQZM?d!<-Ww6 z5gzkQj=urt;FV!}XU==YeAL=Fm0Yx?jLrvtg=@va?ilx+tRZ%VQ{*vb>3(C#7&4Q< z51J$*k~|fqheaOvq5@aj_1Jau3OJ)KXX+f%5K{9TpuX+Os5B>*nbR`q_f%|=9}Sx& zYsz)&2v#Fq!ptwyREc~Q9*SsEAUU8v@A4may@94Vb1oyUl-uZZeaerKGBH=MqOV=l z|D28kHCj0DsgZ>=D=~c`31qNM5^_aL{LzgEg}QHG#|up5z%!xSF5>G2eQE1th-*TP zj(f@;iJYnD+H70n3@<9p_q4joBuE=vJB*=r0$T4IiAfhQedjv9>Tl?*?*-C!mr0be z8u9^AlX4U?)^+%$&GLtkLwdrg@V179OtEdC|0p^`0S@gk@*f! zF82qrroJZTZ}+9O3st&GytfOhD<|yLEmSbhoc0QLC1KTD&zyJXk;j<*PmNzp0D4F~ z7kN7R@sloyZe>Yei8H5ktl^!so55*_E1 zwfQOREx`Q;*5pWinO3r+VMb%1p&e~Y(fZ9cB?5F-YjH-zM8@jOnbldg5_A<5kQ(3{ z0C*&=@4CRrSe-0-RGA9^c$yxmX92l7%Op@5;GmV^*Dj0t3p*2cQxu{B;!akQl|Zk8 zfYpu&A+YUBP~-a9Lk9{inm_n$tf4oe(e1{^OvaOyZ|~toy4*;=^Rj(qr|MCQwtPL6 z`+93j;4`%QQ=l~3^YjM_tOONsZ^{IwzU57mC_23I%#ciGH|Vqubo$B}U?!fB%Q^1S zEG_=sOO}isSCl3v(aY0);l{Ryou!3hA~r1de$3h_Nkwc}C1V0{^*?s=o8r;vMl2=%x;V%f>FM?f{= zcdzOlso$qkx*`5X2(kDD;#Gq9-0{WHFpkyeW-#BV#L;lvqbj3@y@Bu7jz`7SMry`^ zxwMiE82Jg4p+aW+M&=hboRn@2eGx7C*2tPHWqH;g^D~G>&XNoAhEVQ3mw0Znxa_G0 z@$9iuNP8duG;03V!R&xkqqx+FyF@}isiZ--8b$kAET0JE0~_3c!7o-&Nv;&N{_m7X zm-5*Ex1Z1K5V|%fm<0(9& z!ybHx@~Q?6&2Kd>CA#x4NiABAnPIP{^ExF757ry`zF%dB_mAo0#e7PDCKPJUDGJ<3s-MMqejsQKzI-_T*+|FWXy z$k47W81u3)+M@?Uxj!V?fEKAaPHC+B7FCC1O|g$zF;uzanS1#K!=b52>QhYL?_a%_ zZJ{02P>NMGTK;$S;^2)y|S9YHvW&t_95az;cilC z4LZ&s5yU%1r&u_OE(m>`bWd!^@+12>_8KmGXLK{Ah*;C|P<}5F{M+?-SlNW-3|f{f zZt;OQxj|42l!VochG!tse>|o&JFq!R10S6bLDLk}Wo7RCm$}&YlOel}H(sE#2a^=6 zOeCHL>koWzl-D%?%=PWlQ31K+^J1eAIc~xRXdLiHD7`I4Rc4VjRDEjvY9_Qwy-KJk z*v0;12(V=|V*kSkN@Q{_NYyzdVej8-&~5->K?uSHaW(W|TCjr|U&)XwQ5pu9aswzprRzD% zcDn+}#_mYWT*bBxPcJq1o6;2UCu@f_9`2`!g#V|1eL%FI!UoS6;C2Z1oDt>@@d>J) z7Xb`KQkAv9afQAcsLI3yAOQ#BZ~$>%NMlaYoao2dsaZ5eKw13MWUzu&ytyIiiIzBO zE@S*d_`oJtTr9@*8G~^*!8MP3(0yqU!W%`;oK+*L5cxcg^8fU$whi#y@L_Q0a^-U! zg1HFzgF)p4x^?RF4mq>fmN`UEedTSKmZzgVYJ*{C_rW&O_I+gaeyha08N9@KQQhvs zbyC(}h0r7nSdQpW|9Ltyzlg*e^f1kosdJDnm6SPF4=b~YRIqLS z(xMc2Ct;(ZM`DnZEsS=a%cAd6#lu&EA!Jfqp*e{OoEd%WGed};#V}moJFQu!M&X}* zm1kaOl3V4{qKl);60QFp3c)p@?6&_4_bXJT8=`6{8tJ2g&$iyAQR~c(x^|715-T3ZA zSEEyh@UsYQY>n7rA1r4ss}x(H#Q7$vzGJK_xF9wns^NkfqNoSXPnr*V%^uzkyds(l1&tOmrlqLzH5p6aP0h~~{B0HXO zeD{Rt5N-x>*cfmJ1nXvFFgCAxwj8uz%pQ5n+ETUpju~u4#?}|-s>=2yR+syv=(U*2 zclW`BiA}_uq9jFrX4}Dsz=v#`oa0@8BNcSXCxzju+l}$DkBQRXbwzC^;UaUpT1j(RH=}M%Z#DyoBR8!eH;_*KYXZ{pp=u|5{e5DTKrNJ_pxaKZ z&mu{UHUF&?b6?;zB21ij8*T}dVyt89XgMMrB6VSc(3}y$wqQp&19^z#ggNqXACm>t z^*4BDlYUq+%ajBWZ82Nr-$*-C^uq;=N!SByq?!=!46|(aKv!=CbPS%s&U1El{Ile# zbZSWYr#K??4vwbH2Ie;mi)@Dt-~Mxc(IgzSTC|LMXA6J`$#MjX#pT9w6s!Ll+AZCo z&eGm(A>T@Zv$E~PY7f)qy%v%E?v(&JASwvdg-}T_-{t5g&`K~ibzR6E0Em=b;6lAH zy^`79)c-U#=k>)TY^9@va911ydk(F?D73FR?n@ z)MnYOae;8n;T;UN9UPi4%e;{7;rp2X z&b9!4yN|Y>#+n&u*mPtf#O~61YN`EMoEs+>b08*ByL zAho-^BsgNK;&iz)@&&yTHYb3HfU@`HByQqlfQo!&UHjDQ(^f!C#pU!w!btWJ>OD~i zNj*+dPJA5Y77?74o#a}V`^+l4HaT#UCg8ZxXi4$=CGXCZUy{0~4#yKu-{w3U$#bCj z^99jG7Jb%(NbJRc$XM2vh)JhQJ7BD#Q*=eURE_^~{9mY=nv4xyH)J_q-6S#THJB$u z0Sl1&1-+OLF@Af1m}ZsNFnD5a(6mm z0<9Et6P(;7k`&FxD1<=gC&>yz_3YF8IZwND@i*Prof|Lj{)ITJ1Hu8Yl|JC23FD#4 zc`GJURhtZPVJ+DF7}<9Q8~4K)xGR)ykJ(y@=vfsR>k**>#G=?hFFF;Q*>QFLrc?y+ z9j7P6z4^d}KvPECSlIF49@CghW29H;?2@8ONJeVS5tq#$cVAUqFiM$?I1a)=RU?#x zmgTh@2N|O-NS9U;4M;NI5ZQTj8Q!rV!iZJ3+iy$?i(B5!5M^bO_M#|qr@`8iV#hH( zZgf8ca}pwEEkJWx#ilhrpgxbtXhO@BfV8R`-lN=OLO5|4#& z&@?F2I!8ke$O^h$0^Wk(>s}79&ztbj78yyYdGE9BY~(?xq_{&j2ooqJUNlX4r$4fZ z7%xYxLQlft`X>mE*YH`j77WH`60LmhzWkSI>%I%t=7nHgCB=cGw7B?rp5~E-`oR+r zU?ifB5?P>2q^9f#_f9uI5b1l=c?dwfZMsA+kjJ zUlN!*e9*>v+mQ&C5l*GlIALy2{sWmpSq;dVJ1X1w%0s5_ENEQ5szfI*URs_$AYE>w zF(&L@niQ^)Hz61~T9J{RA}w1raVwc6uFB_`YJSR9@aR2^Vv{f83yE~YN903fcDyr2 zX?H*oo5-D_NRwT4TdAHkR?#;Xjmi1PC41g<>J>TutE_2Zk`N#=+Fdev7rv z$qUI$E2(MlvY()7YO#C8K{jx9f;4JKqXrz4YMgTq{tyyHUAe5BrAn(-IZq`!z^&3E zi^>XZH*g9z_0>YBuP^T8lU!Tab~==+ZvSD&T`?PW`$gZkA*}Adew08<275%B%!|hP z8R!e_AMm*TUu>2(kN?8M^`uJnQwX=T2EIj!-tg14=4=FigX zyB#w0e}Gs`o{+(|!1N2BPQ_cyZkxagvy(T8LP*3>&g;BmDB`8!34c@YUuNnw0XkXN z@TmljFY&GXdR-?E9b9vrHjt6p?o0$?pll9V>T!0}!>$$g5*btEG)$6K%JE7v{~c~} z<~>umbuqwBbQp5kI17AnoTt4Yn{)mSEA&sH_AM#=jwfN{3Abs*1d)AuU7o*Dts(t* zJ2LBs@D!ugWBY|THIFMkMEtv>AVTCexUXo6f9|-h-g(m50$o$wrVjT-hG7&R$tf9p zsuvQhBP=deYDdDj?s{I;X1jJh7BWquA{`X98QRlaD7HNazk1DcTJ&~7wd$PUZslp) z^(Nl)lx1&4gZsuWedQ+b!H=N>gYunJaM?sp+oe9N=H9>lDIqL&8u$*K18D{{P1KleO) ziw4D5XI`rA7I&m8m)?H=-`qyBk*$MYI{MO{E3V6TQU)Sy3TV$=0cV&DTQP!?xTE(# zjr-j(FV-|kE|M~d$*#8gasly~<;qqT*MrITw*Ld;7w3*QhQvHeLd)=<z`Y0jnM zd7vwMgmaVVU+l|i9rL`}VvX>|GD8UmvR}_}=KqcCzxi^2Z{1ko84b1^=xN?^xQ6yf ze%=vIg+}f;6c?q-S|A!NLKvg5ilbFZz4sP3mbT8$`(L`%-~FNnkhk#qF&T=_P>l=o zq=)+}JX66wAhkfFG=y@!%@-RMY&i)_8t)0kc3!&? h!u`+IGk~6@vkP(maXdrC`1@o-RYgsOayiqW{{s|x8hQW# literal 0 HcmV?d00001 diff --git a/resources/xiaozhushou_weixin_qrcode.jpeg b/resources/xiaozhushou_weixin_qrcode.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..873a0ba40a5af1baec49c11b16f86edd79714eab GIT binary patch literal 42538 zcmd443qVZy+XsFUa?7l15)E<>A*qxOYsoFS=RRvgk|aq*&B?7+XpqYijeD%ipc|Er zE=s6D=%%KcE~u%QF4Igi=gj~4p4e4+fA8A&{lEWzyJ6Qfb1vWS^L(Dq^Lai;lcOoZ zewsUL&MZtz3&Uo^KbWQ%n?KXrc@u`&*D0MPS9n3xPgpxGZS8jLwL5fZ4;0e<^_FA-FXNY_6iF=P< z>SXlu{lq?tUrUFY{9XYx zD^{*@+^}(z(`M%_9y>jEd3pQn-hbd=K;Z9(f=+~-JaziaA7{^9zH;^2_3#@v|9lV; z85JG#@KJ11a>~=xv}fth-(<>rBtJXm>m)ram{lacd} z*{b4po>&q)rw@0jMW#J=4y)f{T*OlBHmNe!U^7iM*vMF+1{=IcgRy2(2Z@i@i88&F z8mx3QrlNxs+O*+eRTmBRdcFpGJ}yFoJzGj7zb4jmnnv-JGv5=+F8($e>~aF8!2&PS zNo4&Jy!P;I4OZKSEv@I|IM6{(^de6_{qsc@`BC7H7k?!038>ye_2o)jeb81tRZrdr ze*UZmJGs_LgB`n}UDIS-LGI;8v7V55_9_>;x8M+2Y^1>scvXqqG}zDak+t01awRTn zY;P6gW724PG+t~pwecxC$VNvop=hmri9^a`dzC@*Im1!1Hrl7V9Uq{o6gaax{w;N)x*s0n9YI92~!knRX*J3gudG2y4pn^q&$r%v=lm={>Yk&K|CG*}mh z6p{3}21~M%yTxd*OY2to;9Ki_`3lv(f%)-gRA*BQ-H(TRb!p_~AZ#SM zepi_5*U{SJrmvael$c*d^xRC_=1I`?gTwvlS_%;#6LrEg*77IyUH9{b(|c>l$Ns=d z@YQOwPg8*4IuB`F32!fLs?eLSI3>+mLgz}$?`=|z6&*Jt&&qwmI^$m3k3XNB zZea3qzs0kn{p*fS>(HkH@68>lU>BwB^~4WxdN$xyZrwMRv!m83d#&*yHlH2DYuxo| zX!y<(qt!Ru&xdK5)ta|mz_5Qt3W5_x(uJG{{6yY>&m3LuFz*T#JLMdHTpq`%%9tyD zq55T%JG*AAWW$l_;9qd#^G-U)W@3JaXa2LZ<6x~q0Iy9nS*Rnb|EG)cpKrt73$#vVK{Uj>Hh(Zyw%1_$t`O-BY6x==&FQnZAr6TYgL&%rLe*&v);Nqq5Q&S3(iD2Q z2K!*3!R#k5ld{x5rO_@<)C)p&LO_y5H^|!Gh+agst_BMa9gWe`KEkS`NxbL`2mYcP zSq;H9JTy2` zgGG4-bNl;FmZizf1L}#YEN2($?DY?@!?VV28%mWuy?x@so7olR9!H9{V}fHE402|S zJ=I&JzWh<74zz2%RBzQa`Qy`Czl=$(pCGk{qSOla;#g%}EnHL$_MO#!t4Sp&?L~Jv ziWZ&B6z!W$uy07BC;+cpjaRRRMSseVf>kq9gN5``zw(<5&$= zJ(pZZLeAq$-w^H8^P~=tynBcgvlE`ue7Rec5_gObsG)4nc9#?`5GG8Bvlh44Rs>!q z39>j9Z&;1j=4-H;Q(Wwo7KaqIcw5Y+zN_4TdW07rA&Munr3ZivL8KFR4ab?F3m%nq9K)EEQ3l@hfC4dm9%>qjf`NbZvq7 zr0WSYw^OO^`xA#G%!16nmJ0v|B0z1_ZTQ8NiH5LE{)BCE2EO-fhSyXO?|Nyl7aI}q#)&`}r_4tr`-CvgoW{DN@s1;`bpV7I zS|}!n?=T=_3B>f>_yzfDa)<#+ZSg(1)x|@9uMVEYIosXz__L1jg!oXp^)&Gv#5Gx7tK%lE}-8OT%@!2A}vjg#%t| zun2CtOGSoz8Fj@j>S0LZ8P?D{v2z9{I9X)sU|kf|2?wYf2>^AH=lJfFnWyrrI0{>$ zc!)}`Nhgn+TS^6I8rV0`Yv8P`=%Umrk0X386`;Y^lZvf13Ok;9I4KPTM8c+EiH#RY zSdkiRvtKwJ<{JN)ze$6+?5fTw)q;qM!|v|}YYsx{b(cKY{xX$(9qf#y?A#b}ctFA= zUJzS)oJg7~7uB<>>>v1uwnm)GqKN$K`7xOVjosaYO^>UzQ2-!p$$LcAK-DPU&D2R> zU71OV?OqKQZ9V5U;oLP&AelN>^ipLL?&-#^mQ-K)-8Fa;dvrJPh!b{%#WD(F#yAqP zfw#ofoN0e7jIrAsv}$iqYnfR8oW}IiGue=Et_Mh*WrHqZ;KGNb|<4;FsaB{JO`Y z8U@OQGMAN5q~s|ZncIGgpEnQP_O7gh8cZ4tM>qp+yatnyDDFg@gSeBkz923=zcJM` z%zZ!H`3}S&zI3UZ(uO?5XVRkCDT3qKCVwr}YOMJ-b!7?ilS=}F>2BQThL!;BL`&#Y znPKow(Sx-<<53KMd&jjDih()3wguaM*@xtW|1OR$)G1=1T#I=9vCYoqLdLpCKwXs| zP6xTsi*~xvGX&Xu#V}jR3|Y1uV$#7d?j%Jh-O~hi-F=%xI?&0fl!}lf5bE92yHkbp z31ZiT)QKMI#OxLs8A5$)_(D&sY*NF9x~##{52+#P04xgIdP}K&kyTS0H1c9Q_-1Lc z{s$?sO~MffKT`m&TLLkZ-MWnYv9#X`x{>b`5Cz-A-a3~H06;{S-fDz5rKgj{*&N8M zy-d`1HjC6}IoTP#wbVn^ixeC&Z@=M`w{(B?yf+%`zS{PR65kS8SNyP+e%-|$kaOYe z;E$!tm21Qn*87Y}g+6SKooI7J!~U47aH4zS3R`Z5T!Yo;TiK70gt@`JVS6OUd7T8r zWs#pb(X^)Z>hn{zJjNT^MfR`*Bw4TnUZ5Rd4m)5Gvjbvb2M}d0kg`gj(+Qga;2O{a zJykoQOrHp4`g=&@>8EP-p0W?pOuDcy7Nk5jVbVniN`XwexQbF>DWr>;Zl^L$_dgp# z#%*Z}tS~9Qm;a!V$y07niw6+j)Z%tbo?>coYl!If&9%4<-~`yF-yWo@rUitoX~2j< zA_!Nojt`xomq)tM^RKi$1IqXHGR32fb`*}cLv|V<%K-8wNfg8|@CZVU@xY&*q2o=M!p%O_p zVGwk=oIEzAEwaUws5%kAYVl-3=?d1qLVXDNhXE)_NC5xWSLQ15tt!3a%yJ>ULPmTj zIO1{S2t5NO?QQ&~;T+|v*Q9bHDL6M$TiFpp#9A&S_xWUP6eRbJ1ZNV&h)g|6u}X*g zj_i+bsTrW2r*JJIBz)aUcJAdjl`4C?m^vHb^;P-ls<^{9$_zFfO=~VfIfEf@f9F$@ zsQnu3*P6VTB2u!p$n6l()0YJ-z+<**_Syo%`BMxqER$Z`^44HpZl0(<9i&_+wzO{N z(liwcp#S&aW&)s=R`KbEwo)5$Z~{?Hi|WnaOa-g*+YkT+UQMgwvH6a*DLJ%NU#UH9 z&D(z&@9J5IkU<+1BI)ySutA^g)PnWRdG-(DJ_dKyp;nh!d9bUlL-Nx@dw&XI9=@pz zrm?d#+GaJYHlOT?LIXmQLh&7jEnA{;Oxc|Vp#hL2BAKotPR9tKjv41vNB5BQ;r{qG z{Krax|Ki>>*L-l_fGQ==o8%|UWz;QhSNdI{rNUX7pT^E9GDPue9shWoyh`xYNaIOB?y6=Hip&qEMHsOnY4{Seu4-y^03LAo(s=@?ngckEutl>nBqr)9l?+)4 z0O-^m_$9=pmL}- z?i%bjzx~w2T4B1%Dc=`fRv$J{75@iKS2-TpP^vAXgJ>S%#>%mhFIJeSMk*?RNhKgL zJ1RnK>5RT-IC;BILA`Wz!TUn{f)8uw^e7k*Y_q{4({@mQdixDbPzfm16A?So7l;+{ zd3=qDel1Ei?sWPsJ4!FjJ||7gKSXX4aZ5;^39NWuM;pV^z}R}lep1!c5w zUrD)Lqj}GCRr=~}l3NG5U^?}U95|!abcqJiL;D-l4ek{B4IzyXrlOPrm2jsT`G_3+ zRaKRmAMFTK5Ek2V z28gISX5SL``y)Wmnu6h^*di)g9A1@6LEdAi=wP_0%}n&l0;R~YnTd9d^UYTMky-7r zmj@M0%To470%ITbe#BZr3LQl%XF>|?j#x{Cu4clu8yO3~41T3kBUJa%qvzX~xKW8@ zeceXRvw?nxWw!EKh*G8sgCzuw@Y%Y%EDD0fBU%0Hj2&20Ja% zV7`E5gD^GV=0IS%-oc%I0W4PnbZL$dpQDK~xVWOxl;w{T<9(6<4Yca?z&`j7p8^23 zZQG$0RqSu`pWlO`{~dX~owz4Lo1hz9sm?cT%wQ`d)4f<#B4vOEbD~03xCF1S^C)*y z*{iH}H43}7jrs|`-C&D%fGwWSkzVtKMBSOKHX=8vh^9Ve;URLNc{=Ryhl8PvBp#i2 ztP#(U$%1K~&mt2iIoIeeE| zZkm&Bc#{T;nGxp|5?9^LX@Gdd6G->n*bOU);_bk~krc$IPH>^SS!*y{HG@7|RSQQ& zq^sP1u;sw{gOn~Ka`%V@k`b^0pZNAgH9ZFv&#n|nP0U1bJSU!~P@yn>)~3A5@%)Aj z+C_L?Al>CI?rgG`J6th_tM`@Q$RS}bMbn7msWl)Q@b!VaYDPGT`epj0vcwb@IF**z zlGmoVS*X)chiK(K20$y9IH4Vwu@PwHwwMMpsWMmXAd+FlCvC-xp+EWcZXY7P-fYR= zH=E5RlHt|qxVS6x;)6brwe)C5GLHj$*pYsw!4Ac!eW-K7j5=;w86mI>ZB*y)Vn6B6 z)T3*loN7>;LJ2IS&k8o9&VW0QS}s&WwWlhxRpVCl4L-ewPe}x#&uyy8X>30@bwF?E z^Cv-l2y64p0sP9zCI31Ado|bZI&%Q!8tjH$sN6T->lARe{P=9 z7j&W5=r{L$33E-y<%oAF&pY)SaBQTuA`PLCV{UD>phHdecUp>4R;$*4pk-%(`x{+| z_qOgT z?S)z$PIg6`jEoV)DFwx>ztJqoFj-a#+28&t3lYtO5z#Ckk*DV0&rNowNy#f%e$}e| z`e_#FOON&o{ zUB9(N%BFq23 zV^bYIb^snv>orGI)>{k8J4m^)OxtRs26N|A)`i*d0;Z>@^}Te1em46ft5tWC3(?Ml zEI$M}7+}D|yf!`Bi>%6O#EE{7)&x+{{5Suwt@~eL>t>+W8cWaYjX6kl;A{zOC)TRf z3cPO1?lXm5WI?6U61X6;>T6@%n4x{w62STbnFuK_%ngb|Z2{c6mObJG1nVn^j9$=K z4S_2gCDm(r&47<6=nY2(q&41(YUdITsPJxkUobca=C8g}le6J|E}GT>zS)}WFNhj| ztuziGlD@j@O;F67xlU|Vc&r#t{h1{^2YnmsbZgLicq5W)^%rD5OAv-ocrYQ&;6s~I zc1xwl@a5d6^j zGh0$;nY!Hqb4c^7A0e436Vm;mvvDW^ki&0)cB}xE>gABHY_jCS5mmdk=MjS1VnC## z#*XHI&s2>7j1bmvB9gWe2x=U_>7SrJWNejcTockHji|lRuZ>g{$Lt+|4zbrCj6G&$_2^lO@E4*4-z3NL>)-zE4a2v{gL|l<@b;z&i3X1g$a%T7`L75$?vyPm|vP0>;6+qBuO+Z2;lu z?FRhI3l@zOJ$@ed*aKQjZs@twU&2d%71#;lP*PC(1iep_D+5 zSMd}lfqtvV+ln{`u3OWAT+%5c#-~NP!M3yS5fZU#xq35oD_&QcRbn5ENBf#X-_-!= zlqRFT>se1msN@K(`boI)5wXs`nS^Z=>gRNWYOi~~20H?<3TThMY_+jKxt9QVwZSh! z#i57cGNn9JX#8B|6iQ!etjeveF8p?jPGh%gbphKF0@)(s+pNp`7WYrn`Ek<4slKAv ztI9w%O5IPE87ea_7VIVOtLI#SP`DwM&U%X!n?K8Ixz#0~XJ>vab#JEavdeMxrTz3R zmUB?U{`KtyUl&K4vry(UOehtH;#I!jWQ-NU~~frdZ)4B>d)PN++-CT1S=Ye(;14 zej6@=tPNl_-NvO4iE0iF$>#u}!8#feeCiT3kL5l@8d-{|8e7S>1f|`PGz3H+rF_{z zJ`#Pv(HuOi!6t|#CKBR4Tr(mipSGqmoXVA?V=b9|Jwrvi#&vE*klfI2Z2H$+a~!0~ zzr#A`7zyMeu5dO94jZn>qTpb@?$c>1dlYpT)cFGKQ{qFvf?)R(S~b8Yw!8}hr14;O z;cM812F2ki`~i@*+*Z(U%y%Kx2#UHBnW&qW2BOlV2NCjw+J$wZ`07Ig_^Fh0?>a=%dOPJWytpTvvN%kv4@If8jP6p;r#L0Dk|ZtU}}%>I@C{DP=TG1OL)A-Bq=1NfhBxdJq6NiO_`|ivnP0J+%?p8~}6F zkUf%701VwGvT_nx=7*W?t$T-YMRX#NB|OP$*%kR+sd=*fAsY*jS{B}?u5JU#=cIh- zW;AVl$7nu-7e&v0rM_!=#ISQ8$}n5jLp4tA0g?+>#iwHaQ9Ora7B-TLL{hv=)yDJh z`IYKNAk&DUk}o_ITe1bXR9(|_nNaQ^8l#cwxor;opV7~VeQr&EOf%F0MbRb2C)FGP z07TMfnYnr>Aq}R&q0_N^R~RfhffYS<5UjXSHne-pkqs51Hu4z2(U*vgA_RHJf!3z7 z@UM_3A3>h`$}ifOmXJr~g%l($P!AG*ew$7x`XZ~m*Z-&mVy)A5Ei}n~xVBRVfI19B zM2>95&-O?}^an_2D4-X{i>e@CB2tJB+vW?!xHHg!+X|IM+WQzv^c|_M)w89de zZlxik6=oI=BN8ioZ`m=|Xh!YB%<rddin5-yusw8cVuPng&;H>1%=AcbCo`2B(}FmYFon+GOC!=! z`AAvBph47w$Qn~)U2~sedJuEUb7L}jHIhv4%^|yZxUTtWKg@po zusEIU0V>*WScmVB4h!1>d~U&egFp`M+Q&vY3ly9iP4+=_ie5ocN(hgv7`T z6r4BLAoWl6T_e;AI6m<8v%oS?aMpGbWRX8Q&48y&<+j!YQ&WDN=rLA4XJS-KfxtqY z*3$xlyIeirMN&MKX!i*%$Sp*^&4BmaL>uF3ha;_kY2t|L>YkRfa*Z}Bk2&cN_{l{v z=X;eoP}4k)_&>!EUryA-|v0KDa>sis8`P56k>L_ro2=;zkiAHLHd{M`DA;=f`d2vrfJrjm* zf$k`hFGeP0dzRWU^2OK~Mo$m&#U5WqW9VR_<$Ma~S)bloO*(D#EGRCv77i&GqTvSh zueRvrmMW|IcHfbEgP^>!3376)IU8co(uF!X{21O-Ry(y5AZZW4l?mZU@am{CuR61i z8BwXvU+u%p9K>xnnq5Bq{D$q*?`ju0RXvtv$wO#>(Fcvuy#ll_P-PDPnYC47&5!m; zuvhyDL9n}odMS3ZcqSBrdKv=JgukU!KO$3VJ^sVVG?$2FGobiv?s(2sxZMOs7z%hg zLp3a$nU(OH%(R1tqVm)8%jy%yBK_yq|DBTp9^c>q?HC{D?YpNJT+!i)MUVqwOF z9=|;SP-TDFj3cGe&Vnp{3mr+DDS)L~>b=>GN)G5qiIZBg?5FJf7xE>b(Y-JaL|1|V z&F$@4o9wR<;^4UE0CC~(0FlB|j3y;BzI4>rLo;SIg4w8}jyf^jnT|TsiP`seC&uYX zYYn`bZWiTzBhULXC~-MRQ6rZsE{Vzg><59t++n`vZ$Z`;Z+qneS|Iby0j8N5GYjOz zFZoxFXP3jt@6duwHBtc5vewEKPs;a|PHkuCpv{8KTB$h8sKHHFI#e{E@~b!Tt0>oA zj0by~z!XcoE_J`e&heRQZMHn@8shkSqkY%7D0OmLe^#@+iemEPjW_FZYadEeUDdaaR(zdgKarw(vXmM-=ZkJow5rVwg=&h`vw>B~DYj zms~JNT-Q%=P@dt$c^s2f^(&qJ+Nf^Z-Le(yc{@)B9e$M4En@VM+G&DMyYo|aXD8MK za1FEQp)Mhciqw#l)2T}X-skx_EZR76_Mo7CMFAEe<@u)!hAet~p;owX|F#!9w%AR# zoRe!*@a(C{x?F+q!Vk_PO<^wVQP*iGB7(Sq#90=Bv{i*ikNdx5Jv@GtQ1g;#Y}E%EI` z{Tbg&W>tb8xco+Kb%k*D`?J{7K>xYll?9Jrw{2fg?J+dSVt~`s4#doq?D7mj8Yh3U zvVHWrzS1h$@nbJ~AK<#&Fk1~-AVcMlfQ8x1*jp0!(Df$Nj57fqo84GaVo`ddkA79@YH!L|l_om5;IJS4iq$sJIgkQox z`blLel*P!G%Wl4@wl;SiF4%F%E&N0N=P5s@B@{z;x_Inr^vCPVI;9^Q`tu1(o>Li= zLyUF?DIT?bCn-Y2JVx=DH>(;cXBfrf+BLSS7kgG9GMM#TY>`-WeBHRTS555K5~y~W z&B&0;h+zJ|k$+8XY%vM;gd?5D2SIuO!6?qmB;M>a=(dZ)VO;YNEWukSDe003Rh2Tea6(9!cCpt})&SprviD~pTwcFP@k*w$GtuCoE@-QMUxx+{ zPyFI#Yh>~=Q2$veOh6Pw!Pz5>1G?50>O!>|bge{-CY_fC`**Vv+u1QA)Gki+PXfhn zB1H@#y^lwdF+{nETv)}6@Z-@xi{M-Y(f0JMO%e@eBe~_z5`8vi1gA_Yh6oJcSVvUzd>9JcSXuNi+o~V=ToM7QlJ~eEM~2RYWdZHVQN;!pK}@f*YvL8ahXCg`wN(U|Q;R4YN<5 zb2@0Sjcl}0kAubyW^^@J^G;oiG;TKwn4P+Eh}D(jtmhT_iKRWJ4bV|rq|vvW=+y*O zd!>aU%Aqp*hbu?^rSA!=TF_lCKpTFi23e@%&wa}QU{`!KVTyOC*d7MFjetDzJ4|VV z((W$}&p6nXkT|wG?y^j^C%MTWP*P9!QV&l7~m0ZY4*Cl&TNnx4T0OfM?xa0(T2K&TqiR|YGfDVb|J2M zfEaL-ykq?T0yBX5eo{>$5=XicPGm4>6Ow&ogRPga0}9KhhtIjxb^Vx-)$`m8d{X6p zM|(YcaI;qU@=8Bd@<)@StFDy^C5MW1`>yP|@>7%5hm-@-2L)4~&rU8l81E<*kblrJ zoL$BVBl=lV(eVQwllKOJpgQ*8o)1>NWqQO5lXSl2(}^zK?z~^5e%&zs)2Bs3>s|Nk zJa&}y`}Jt4m*c^u>#m+FZ#X(V<7vTx12fzIJIqEQBNyf=>EH#243GQGfGM@ieKHeb9wN} zA*RXl!#;Q3d!IZwaa!%=+k1Cd&TqK;tZ?O@W3GEA#ksb>muPdUwt^S0E#~Pdw)FFz zVHVjqWhr~({eZ%W{4G9)lTNSnN{>8v!2H?KOnajvYv1Bd1v3`@vDWWFDr=&1!R=rD z@9gV*IcSKC7eDmn#vBQ4^lY0;)u>;`_Zd5Ec_Ae35Y-zTzZ1IYWa$3nI|ouYfPV9%bl*g_ehv^sXU>F;=!7ex?ba~+OpgUgv0@#WqJmP(5iGBCgwqCbC*|7I74H}Wz-`M-@ z0xstd$*hds>_FeeP5AyvH9fqaHyruoIN(gFptJS95qUjy4Cd|S6$`q)+Q~6g6554R zySN-RQ1x7EwQl*^uwT6TzFt45_-tSohs=QI4Aa<*}&;{ z=2@KWo1b~O>Oj?0>p#*KxbOxpI4Ll=v)W4;<@RVz!;a@Gf_9daToCuZleMA7?MY+$ zxUt1r*H1Zih(7wV;{wyYmG2*3*t7PH8Lx{=<@)Upq9XN<#a6Vp9{F6odsI!M%VC*Q zvRfbTq}kg%57j>^4}M=QeBt7}u5eOpzN)^P>cF$5weE?MHR}}<{EF|4t6Y4d{?$4ID?>W(O)|^;yr-c12d)dxHFXkuY;c;K{YNFy z-po~J>9Y8HQSSs}-gjPV&2c){@4EMM^XJRLoIX6ZapatLr<28W=cN7e_lD3_J_z8g zOC9|yo=fJkx6;|5g+o%b%Qeqy6LQO;ix*du8*wy0?PGRT4-Gc-76&S@eFQZ$O7J_r zjm+oES1BH;MtN5jbCPQepqfxzSu$lyK7i#-pU1mRj=gZZmX`0?Tw>sOR5;G`NVgLm ze5-ffIdbF7uHEO&&Bt^Yt}=oHXfb@+Lb|Hf`kt0@Sy@=RI+=dU(Z-8Ixe$hnV&vJ7 z34s_tOyN}^9)|d)D{A6R1pTmt?_06WJ5whC#92QQgrEE`sG3@6YFhLUIpkXy`(u5p z9(=~hJ1pj_CxJS&m#;JZ6ymiPXcO)e7yZH|VfHyy#gQ6}QwC#XA!K#8ZJ=pVR)g<_ z;1ahZ_l0O6?u<)F#o`1=_6K+{J`mwYQj;^YK*kwb3it7Yw7Q@S|ByMk?#PiQou}*^ zbLaz4C~`$3AOdZ|QQ>9_JcTC}&dj1SZWE1*XM4>V!un$DVKW7d)qEp;n2MMAITV z&5H{#P3Mj^C=O)laGY2_wDriiFHK6sM;d1J``^L#FTlb%IR<&I}5GRTfG*AME@M@d1F zKrzfs9o7gnZDqiHm}+|wM7>v#AXJSg0?90vsKKt7b!pte7L!u)nG(!O+7oH)48tf{ zyM{BfW!-ER-wRITma#1dIE6LTbqsYH92x!S(3`RQeYalRPZhaT61|^z_Kkm)ne&Fu z$a>lJ^nd}SjjE_(_m8Jzm!03WWNyglk!voAHP{_ak5#&HBFBdKfwTWy?y{3NL&|;3 z3W|)%@*K6y?c(6&zgSu}2$p5JEiQersVddw=RSu$x8*~*yR7#4y5w-qQ;*G)pWARw z`qel_KW5EW9CdZvnc>lO-WdOOIhJYJ)#vIrzc1UecDFKY+r*%A+dpMJYdkXEHGJQm zCDCTvxdYyfbGzH^v=`TvEZkg%n`f-;bN%McIp!gr;gK`iSL+y+-TiEF^+fFb@^?=n z>%GkKsdK^idZoZ*9$y_>^z)s%-L-oh z0=z5I`rmnf@aCsN{h1FU_V_eDN*rSy@#eySd7hUae9XW85^MxRoO*)>R$Hr;toY7 zO*MDHMC5u?oCnz$8y5b2d$#JJ$uHbpNRZd3H0(9V>sm}71A~vo!Em_lZHDju(Zc_C z1}C@BTp!kUiDGLuA(M1b_F$QXghP00Qtnm62l zj1g=Usa17Heqxmyx20ZH4NNnl#SA={bhR1~X;H^))~?f&%M_d<&VDvyJ)$bU&YZht|KzK!y@wmXOgKE#5^)RJ_q>{$i;T>e)@g1oo*O%kjBG&uPW>-1LvOLDxj4Z0 znfv+RX=dtTq=_^ifwW5FBM=4AtW~n2u6@xeg%WA}+}n&)$)Tw}mn1S`*0+=@`5`K} zwfBH}KXn7&ViL^1&r8F5aWgm(9H3RZE8HN+-6u9W1FibjCWE@;4jcu*+WDMv4GFc??_9 zVrmaA#7vaBT8Imf3sOr%8UX=B^HM-M?MO8D2^4B9qCh(TBxUCCvP{POHHSB+`0qJ9 zKNa22jWz^~pdSve7O0JdiWOvCJ352TRbZotPBNM67AnWhsh_usBFgjQDbsxqd#blE2oDWY?-q5q` zcK#=Qu5G6Km^C+&jFn>Ll}{PV-AtB71Z?08FJ7x3w2$jHgVS%-lS-elPfwTIhRq!9 z{bWf@lfz^0kQBXhr*AAy-rnQnnAOMGHw^4oRgA5#beUk3#P#W3vZ6cZkp|n_U)f1j z>$;VDVeX&T>}>BG7tVA6H-+fKtbUbQc*V7)agQ$yow+%9s{Nf^XL`*rJo&=d|H;PH zcU?}zAH|Ce;@)jr(=ph5Gxbp1wEV-7q!70y9cxy++j+}As&?EQE1Ru;)%#+Pc719% zW#7BIp03lwbnr3LJhHCKmA?xN&pbW;xhdh)l>*%u1ILLsN_94l(4pS;$#On=)BBCS z*=py!{JB{L#YY_iLY?!^%LgpVlz+A|x)qojd;0FCh{s-bw|CvN7SkP;J(WLp+`iH0 z@X8L)k_QBBI_2oRD`^8&>hJmiU$VB_=^EFLRX#TYhplkJ54u*JU39|Uv5Vch&x)I; z3|-H6|M?hR^z%cqS7p*vL*=-!NtXhabxSVKOfHO7nyp^ZHN*I@#oh{|8g8HCB`c~x zx5X0KoO|_hapB_6x6S>Mc_B5+BY!PeQF$l3JowM7Bb%3I&BN2f0B^NdS}JA;h#W3J zpgCh*q<}Q5&8S+ zUTnn*wsi08H^i@?m;tuVPoUZWzd1@o&at&>L9`Dkt5(UkD!sNDMtjnYiz(qtg%g#; zyNnZ+ZF$=^j3_XEEk+5UqRRzt75pH9mi*ha>La|Y1p$oSsKxx_jaF~8RI7;d0QxyI zptGH+s3_*=-jbgL)zolYSG`^?lB_kZTE3geBO+$QWGVz+r;OGnJE^Yz`vkVGgHYPLP@bC39IpJo{V07>>DA zaSn=N&PY~+S!bS8GAAIxl1`Hr@+u$;f0^zz6*7&;V}CayeLXq%*bf@jJuhL7m45eBGOd@OF^ubP=K7ZK(apJyvLDaEXG$!yg`J7f630o(-cVH zULenfcru;`i^PQrm+)<JtCCR4}U{PF|&)T)mY(4}MnriH-PvLjW|xe9ffhjK&48 z`Z}Ne`?}`wZ2iB-vmxKDCv)8Be!y)m_61d8=Royvb}m_`OABHA9WM5#@`{%tIbXg@ zQWG|{5o)#y5b)+ALb^YBA7BuwbkPu`vueZz`HN?+c(&35gyL?hKr8pu9mpG#glU*> zr@mkXFVy1jAPJz+HL;*77Y#A0`@)?@Y%IZa&NJ&WRqW=@xofEJC!6vQ73Yf_<)E%0 zx7ou18(7glUABe;?nI~uqxubw_?60D9+*phD$jqB3qxcv+a$r$a4q0#TX-#e$6ifD zY=IGpX;dgokA;foC@#TuKm(+}j8CIeRO~M&<`;`72_S|m`hlpBSI_jRdt-}c_SVvT znG!gALb&2-`H(49Ms zd>NC=KDBIADx=l`Q!2AN#G}NC6qfk}fcp6njDwSj5p7d}9GvVQ#r1>vR>~h_IOk1r zaC!GHE)5+OXJlw1wb`a2@?Zfp^_y)P7&T4fVz6oW$`_DV@<29K)SbK{f_IQnClk8T z&uuy?oMlGf^FR-U^cZ=x0DK;?wB+^qr;W`)tS^4ldJ`lz<1P6$-SAES4|+Qc~%RB^@1#Y z^1W>X{w%wUl*|r-@lip*GP*`>^d165(aeTDYjCv_tx8Q_P-kG#Zj<31(sx7&XetttsqYKxFtNkBot8$whT<&<^Rn7DC zIeYzxdt~gi+S&r5W4yr0)w4ie`jxaqcU#DBR{eKnSN zGTC~p%C>S>|FXM>N^a#u=}xJgu9r7hIe%N)0AA=L!JpQg68*thLtZc37VLgx@7wL7 zHzeEI?DFTP;M=Dc8JXvm&&0Y1zqD-1t_;3)?eL8;i-vZWziPipC!wSLCx@V`m#L)9 z@7+AoCpp`ZK_g@XB$25bK6{Y6I_eakYhouiz3orz&$f?R`*2AuZ+T@?_R-|x{1=A< zVsaw~9-Je7?9T2N?{(9Aai@58@yhlNwW;fzc`w%=wh$Kx3vO>Mt)H?pDpyVfm<^nq zZ8!Z<`%IRsd%~%k1|Kq2iV3y0M(xO+^|(Q0mLhUhhG5;;?9h8w3sQRJ50F&S8Qgtp zBSn-TR}^y~e&fin*(a^djo%&j-5ZonEZ%bN>&z8Ud`J2-22!1H-=?O&#a zO)G!srt56%;rZw1j3IyKt;igk#_z>fe!w=z&285b8t7$S^jYUoW$gfAl4+eC%VseA|_dbuKsageEmkQNe@3 zr?^&Z@g#ka1JEk>^b4e+>)h6I8Tz=i>k6=?$k1SvR|t3>n&qBHz6A|7y&yxCa%^Ag zn^6$t+ek5u)2Bv!M+WyVe1rKVhOPSB8219ym=IiHl69d>2r0VBNiO`oVgGeV(U}2g zAmv(fp=@zxt9zVpCgZ@YF8iK26PN(}YsqLu7i53hT8zl13)wNes%s#2U&I_7Z9(VA zhTo^oxqkJBZo0q>k~LrWF9y*_Tnl;zvL*W{DE;1NFlnvNw}#xelC%v%LxFb-knKpCsIzo$vT$q5n&a#8)*+S3VuEcB9fxX(HLanwqlr#KES zS74XT&ilC?Bfr7ji(|#RJ8+F0ka_#|+{wP<(%pp#37WM!w!BX2i}Jpl0Y7 zFxd_L$M#TWXkwM!20T|bR-$at1-hh=?w`Mp05wx2*4o#I>+b-dPqDU$*l*YYd6-UW zriad=seI)0dD)4X%15rW8DHk_%1xT7cSeeE5AP6f_eF}(oRyoU2uxN+QUv=)F_sk; z{V>22Fo?r|fm_^3Fbs*nz=$+}G#f023HJ3oknL zfzB_~7OJK1@%mhM&V6>&^()=asFo^r%ePAGS$Ug+Ryz2m8}sQL-_ zRYlMIsoif*_rJV*CM8q{I|N^f+;rzN`BEMCCZWG~l=bp~JLi4c_S?ycA>H=ct5gZA zC=873Dmg4;7w(u7rnd_JG@5m%ywh*I%e@*s&Y!-OwaS^?_EW)@!$TLFKlfEjbn`9C zV-0k*e~Ek;ALF-Nv@tf2*LWzB|G0AXZ&R;ioq`WNa;W7~bs%apSUQ|I*1@q;Q}C>!NUSsI__LOod)g1hwsk zFuZWO%YZ9ek6$WgoQ|V1K94H-bFS4|{)ptrjrQ7edoFi25`S)n!N7dSXXSJ;RwhyL zxEs6K@(+9^_`31#&6aX)&5ra)~@ z%&kpSrx`MH;LFmg^3h;+hrlG5pqvfm^FWvcC*&eD%ZK{G;Z>$k;BQq`SP1o>P92jVQSB%&h9hnqV@* zNj*j;3s1%wC*Wr1&lgsIJAeB8?fmJ_x(GV@7RLQBW}&k9Um3Pxbb$Yntb%b0&OqNq zjDH9)E^ib1tLGvIahNJHlAu{o&NnX81 zp$9HH$PV`i*AiTGPJxRKfR9dI$;~c0%T%smjEj!t?Xl$RdS&&v4I&Ts-bCD?=;{wS z(^5dW_KnaFT7%zt1kJKW3ghBn04ZJOp_;7T3H>xjDap7vAhUTYMs9BUMlET(kTThV?lPhxFGngRAysm7$CF%_?iu_zI5EJ-def?F9}TpJ7c+3(z!k_3EiDA&#J-tM_F<7*ClTnQ`6A- zHl5z_vQhD{(>mvfam%&l2Xgx_T-$EL5Z@v&cI4l?)s20KubhRR<8v>{L@~;#YOwEB z^OTDom6t2o>VvbMysLNey2(xCcZf0$kGuEQ^TOo9C65M0#~vz~R?tqr_>`={W2vpW zS7Y&zyI}S2I|FF_zI^pz2X1%YH40tT{0puLofU_hZ%u}riuO7U=A70d;A<`(-c>W& zzS&6aQ^H6hsjJlONgh9if5@T27Ff|z>d%kKlV}(qo(H;muP*56J-^3~syr9;_SW8ZG|;N^v88tf-0Ixdf> z9qX$CC2X#$8}S-m`+NcLpwE`6(}}GvcpX@nz|b9Ay#+>!@AK>A9B=a7R+9ELd58oc z#UipZ{JjypeP;=->XyZQYC4r}KZPz?LGI}g|GP)8!C$PSKk*gYggK6M5OmJXKm~n8`D;O%&dGSk-xlLxjJr1%AD<cm_}_nk5ptg%!F=3w)9*j`+jzE z!M2Z+-h3dol;w|EP_SGwCFaekHR3*IFGk-jydG)cr9F4RonC$Xnps3*m%7NA2~yNtaTk3 zy1mrzE;2zzGM$Ojk*WVxpxb|8H$NB3$LSt0?xHegB4FY7rkoc4n2Kx%XlGiu%(wOV z77-h36vL(iaXiJ_MLb)9h-08;MXt@nF=E6m@8H=KLmZ2mdE&sqUE*x%-%Y%6~L<^8Y8@`1Be+G$s11q2F)@ zTAN8%_?k2)6gEmIXl_jaU5WGA#Almi)Rhn=OgLY6gx&7{i-bbh??ewrXz?_%?>!&u z-?=lA-#TddA(=ocfABVshk_ycZ>%|6=nVeR&l9g<29{|T{PhqxARQpqU2ZW%dPAEr z-9k^FX~&F6eD6hH{QTQ^wgO+?mdV(xUBv#+(~!%Ww7%n?3uf=l<2A6a;LrcvhPVY! zBWm&B2rvm_bZ}rS+lti(q8peY0M%`u^3w>bv(kqcI9?FiwM~TDA4y{U^`u0b@PzgmH;QaEJc3PD2)I-?!g8 zxdDLT0a;J zCW8O}DLDF6O|nVUq?}Hn*_AWRO-eLEg#Xr@YtS^11$1AjSTwyN+U{skp+BfA8YBVn z+W*x3`X>GZU1GiO?U`w<+prw|DKwDYvc}SuvBBR0rr?}p@0n*7-h>h4sgCKYWiSPY z{1_K^0zkR#cnVA)3%-)V9O+zM?w)1~)UG(KXAe)hKo+QIO>JZ>P@C5jvM%_tri5gR zNUm!9V$TB)sWKap`o)7J^cxQnc#cs^4-yC+z^ktVUcDcv0?j4Hz^lWMoCf=yW+7tz zmi$G=j4Dx{-m#oDpqc&L>%;0f33(;ZH}UD^TJUIl@oa`!Zy}H%7W}A}*nT9(ZgmN3 zplbgIkKa+z%`0m3BL=;kDY~!Vd_ga;qU^yy7NHl5Z7^Tv7l8WSq;G}3*aBluZ}kwW z1V7Nyq30&u=RF~Nao>;;9Jm221+R$F(rqEP#xq(vAKin(CYwuBpGJmC?hfNM9-my0 zf2MnHUOT`w-#nTt3jBmv08bdrgq?JS8o?8!nYd3d=!DTsm=mA53_5|QkpVjSiG|*z z#kJ&e7olg7eQT0s1ip^OrQY!xy0?_GQRD z;b%zEw`inZ+ay#hXG^c49(<>dGCi=%1yPQP336~b{0t`N9w7&pkZRXVpUjRp7hM)U zV^(O!JW~xl+=rWwo~g!|6{2SwokeDa!UT^PYbWhmy>Ev~UxDbSRh<8SdOQ2DsOq$j zk5RE&E~WYS?x>i$OG>*YXd1_ITYSmTTC?kFb255FMI8+jUtkWVVwT&OZfmRQtZh%) zV>T%YCCWkFbu<%1Z4m|>6c>~jXA~F)&dh1Q_wNkO%Nf!^+dsPcM}>j&a^Lswdw)Kp zIr^RMq|ggD6P27vY=vzA?M~#-3LD#tp%-+FcMSfv9~>wM64BP| z(z$o?75`uD0A=Zt=50_AUHC$J?8L>Y?*HvDOC;2MW7=uWC-Lxcps{lKde#l!8Cla z|C$Er3l|7&0_nN_$ub4Z6|{$Ap#r7~VxahG))ua#w>3HIOe3g3S zN7X&9nZO!!#KOeync*%`oM>H~mqt$7dh@P#@R*o?{aX+}dV1KGSE?1~LHn^rbEnq= zQLh7O6c}py^5}|WU|+<5o4~#<)vm`hGO#b%trsIbz&^y}F;J4W$7oCH=3N+r)KpPk zSBNf5hxNS~EoqM7ycAjFvh6KzdZ-M@oz)dAYaPGn&ELA@z;8pf^#p@pq60qCNv2>D z4Bi^ugy{%SP&6U)g~9i^4=gb7lrlFE3+!zR`0ogLLq=e!2Rw=etf-Y4eLQvkjm7EL z|5Q@lt#2Q^b^9z?9wWRH&hVkujzIXll)nDyO86Ya!b_U^=MdQUqo?6nb% z5|q2{GK`I_8KWP?LJyJsHHiU}%=VjhhMuM0Vjj@TnDkJ0C1>Js0UwijZ zLdrmcQk^>u;1jIe1RmF;_GC|-7o0`pPPu42$^#m>WFt^H4Bbhf>GbDibf@VdFKHU; z6w*^EO;)XZo#l|35d-~G04M~w%pjBF@V;R#v#66dU1kgLgxb*JGINdfU+AC)14GzLEC^K1$Px*USY)vmTcwSL`@e;^5v^UVx$zB0CCzH9An zx|QtX#oSed87$Gg(fwHby**sV$oaxx*Fh50^4#xsd>1i%A?X9{%rDvyW=5O9yJ>I1Y#sI5| z1izQ4QgbCroPLj$5ys_B0Hkj2HJ5Eq)AUr=vIGFDOeYpgGAWAz|&6y*|5c8*2nD69DPSl^}5sJ}Nh zU!(ZePm`Gi{12a#^m_w)rSwLsxsII>h&rBm1{sI7NMzf91bIN3p-#lEO@f)b+!;GI zr^H_BA5yI)H|CX<1b3ZdN)dr(TvMA!DcOC-+G0FL#!8V+j#9ElbM)|tEPmGXu`^O6 z{mz7i^9G8^YZKj^u_tDjn$jAmF%NDgJ+xww;I9ukmRmRqChue^rd6_`t1>8X! zWr2o#2Q7p^19wnT_t6_>h1E_7I!$ej+jr#?dIJac)2HtVum|tA`+*JbxwG}M3~8hU zM;ITd<5DSt6Hxb%rYHcU6aF(stuIzC|c`}%oxQTwf;21T3+u5}zrd48q3b4&~PddGj z>%rtplul=4o3U~_o&3Umef+}Bb!}tXk)G+aGNw{&D^@`LXOU`JbK`aXL4i!uJGPYi zKl+PT=3JI6u;wMDaA0x0LKq+CG)1t?Na0>xO}A%hdX9c*mtT3|rfzFrYomF zEpK`7bzwv=+OWn^MtuA`0&{mSU0R*4XB*Z}$DGPr#Pjo0jxAWwP6QsCO)EA!yJDs$ z+?-ZKvuOn#yk5p=(|20ItrvWUn0a(u7=V!tsQz$7)j@a-qethjzh)avNflqQ(JpN? z(I5b$UD{|m@g+vPZZ_HlWd);MY&1;{$MJ`ER%mR;W`LEB4fuYwYzD{%dTa(r$s=1o z%m80>bd#qm>#9NA;EGUB)3ID}gQz%BRYCf`9k=Ws)Fgdht9!!mu9^*O#yXvbb>W(rw*D3=&aUaL~@TfRZnjzq5XC^JGxkr~ZZIRAO z1?7k8ObxR`_32^U=KECP`U#;S*CTPc_>RnqFL6g~WblTjcNqHpNzqKxyVdau4Bn*a zJqRtz#Y$*;V?4mRyhHf4`o-*TBY_s|YTv22JXEyGH25`mCy#z4ownMkip`!q8oZ(Z0bn83AZY3;R($)GjMt+OjS$QL!TaZ!P z?839Jz5XcQE&V+)iZW^jhoM#+F?T3m)X?5~EwLm;`xt+4ZR)(TZ=U&7w2}Iw2d*30 zw>re)dMttYR^M}HiIE=PD#-j%*7f}El3e~oZt`nEebXewg$be!Lmixm8%?&-HB$S@ zJPqm|b)L~7L^TQ@BLaRPZciLRB?@{bG$2?>K8n<`ZUrNcC7jmWG6|s~R$)&R@xQ?b zJZI>E{#qV0l4n95Gr}|B9W$=Rz4(b=_Tclov$}$#XvS_BaTEM!%keQ zJ^GGmU09lt_UPp_BU_{&h-b=_DlF%1&9Z%LE$p6ek2Y^)p9&5Bs=<2tRBD{R@N9T0 zBN#C}^&%Lz)wKahgoCtz_=ieK=kObp?jj`&LmwqU31ct5XrvkK_%D`1Z$j)Nnk*e@ zWZ>Ts$Ie%#c(jP;DHiR&H5!72zI-u^oe=jC(QCXSW<`UOnTty|h! znd!lj*-!8KSE2Y92xvvp2FL9T zbioVfep(Kf@7w1J{7$BHyG?Oha9oWv<_diiUe&XvU0^8GCx{Xr%#7yuP37Sp2v{=V?>@*^W%Fe@u;l&Py$msmkQhK%Q53#amZ+k8jJlv}8-3xLet~_8Fr5qehs&+E zwD3_6b!z-KgtPA7t@oPZj=EuyVClvt_g@eK}IkL^(gEXVZna z0nSQHsg{`>ALQa$#}1^fZkonGZ)BTj(XP}ZYc{Jyks?oMrfOI(8UN6TS-$ygKDDX(V@R1%Rz1(5d{2lgc9(1(mUG`|15 zqT@e!j9qmbfsZLOkP3E7g!37}Q*FjduWLNjW`cXvAHCJ4#bC#uhQ#(rA^mBO#Mb86 z)FQD($=!#aDNAfMKb)P3)5nQS;$Suv(NyFX`BR;|T4zuep_V}XYH)6$$uZpD61?oanzJVbT?^ry7@$M%(8DK+)= zYOkC%?_v$K4J(vgno|Prf7G>pcfJZOL5LF={&}YF*=sng?{!s>^*!o;Y^G&SkC5PZ z(Xx+Vo!3X8OGMhd8&~rcs*a1evUrh`9!B@qZK=dN*0_h2pq}o7Nr;7YT;H&GR?+yTJUFar)t$`{Q3?1bVqycCOocTn zI4V}+bQ0TQmGMMG%rtaLTL}Nip+s?9ITlT3&d#Pl!buiL(8{}LEh5RMjF%#D2vv=E z0C^+19()T9gh0i!S&vEKXI~r$H{Blv67(qDy@aF(lqkwh%@-b&&ItQODyt@P`PHd^88BbrdAxu@8iFIpNO z=V$Rpp2i~#n+nGX)bQCBS7Aj--41d!j?#KujW6@|F9qgm{J>FYyL*?s@NHdv&D(bM6LO)PNy$mH;wGF$w<0#?e$MAa#dhgf>}wh#1`N%yI) z6@YgJ>zfr_KXVHOnZQU31+DtsKKD5bNh2yF5OLaMa%fA29%P5%SF^LBvAh`h;o=iL zjo+;talB%srgstEJ{J2}b<=J`BkqiA69j%2=DEJhk0?l~fJ}P}uAUq2KT8iTseZwk zd_Bs<5i!Q4-%rnzInyAl={FGrm!;a|0ZEJAY+0wd9@#cnj3REXfW>$Uz##eazy(N6 zzqBb%O{IQipC`{rJzNuU61K+`|Mwfv~s#>K5q*mdKgUV?T^NZ4 zBTvLB=b`B-q0aw~Od?S3L}Sx$xJ(T*c$~7bv6)}A0W8j6I=6$x$?Z>Fk@slYxO2EZ z7>si^_L7dvFwVEM*h@M?FwXw2oeMwS)^2$7TmB@WWJDymAP<2liUEW)1|94kEOWQv zetlN8EcT9PkimZ7LFI18HIL}JIxBH@!D_>Lp_YI6ij+{gd$g1MN6%1HEtSZgUGn5D zSjoDp>0Z1({ALw9!z`ZT^L%4Tmh?ZoYp4O`?USSBJ>XF%y?emzy?elVW1A;&p~S0N zdm0znST85DOnN~_sfE9WSSBT(odFw8W|@?hjTcY8_cBANe+Q_}&U2n3e^f#BZ3-9rd&!5tFZ-CY6%4<0;Na5^NoySux)H`+i$Pv_k8 zoqPT}Gtb;}|GD>>sqWfUy?6KSy-L=*)?2G!^RRUQj+~UN6aWDM0FZ&d0I&^!wxp+( zIRK!j2w(;P0M7x42zUS_cntym0)$%ukpET(0O;W*0D$xz;oq-*NBrw5GXHm^zv`%+ zf4&Gi0q}pYbZ~WWv2<{xDSQ- zVeJ6i=ct{?{74AY07P5_BwPeoH-G~EL?{S znB)~5Jp&^XGdB+}AHRU0#Cu68X&G5LHFXV5Eo~iL6H_yD3rj0&7gslT4^J=ekk6rE z;a?&m6B3h>Q&PXBrRU`r6#ghGE-C%>ySk>fuD+qMv#YzOx37O-aB^yTW_E6VVR2(~ zYkOyRZ~x#Bd~tboeRB)ByZ<8>ygmO-Eco%?B>N9?;lkxYL`FtJM*Slf0;0zs!EupM zs5qYCiL0P~a>A$P40=xRE?_X9Rlk7hySnz*IvVREn z?{X~xBmjtiOGt?DUP3~G_Y^Wbp`fDtNvO|J|CXNrJ)!?Cz4((b|B+zun;^jN0srwc z_!|=q7409_{?irM3Vc|;hb;mykPzU52?-Y<26&9liv(UFU7qJfG6MefY{QwCVoZQl zkco~^ojiZI>vo2(yAS4MJF^f#FLmZRJJ3J-0yVoao=yLR^@tJj6*#nNbxLQo=X|Im7m|1bULY*O(MGw%t9jgsL5m5Ti|6LW2u!z zQkt}qF7lu>V_U-r9nv9u)Dn&>u6T24B&2JwJU`6JqTjn!e`c7U?>RVoUeU6DVJ6nC z+^>8E%IMi~DLRu-_YX-1V+vNOl%LSFDka^AF{pB4iEpdV?9}HdGr6NP=0#DV5U5Cg z{r~tOxt%dD@=f4>?x6V=E|C!BAL0ISNPHUr8~@)JFQ%d0wk9MWvBrI}rx7q`rg|4>L|!WaVwI_qAiM<3KjI>9IxhS;g#;+5#6^fe24-@xSXq1?@YT z@fy~^HMPs~TahwV2W(90YJB9!Q!C4Z*~sh3W8X&jBY*3RWc-f!CnpkAf0((#=Bga` zRrC0GfHLKk@+feazu02lr&8A%H(cj2`R8$*!MERE-$XDE>|%L14qgW;GVTw65SB3e^6+zO1E@& zVtuuWq(#Fdy!qJSj}rLboD=`_(_ep-{HJq2!cPF;83<63B>2|^_=!I! z%Mm0A)c!u#nP~s%!`~DD5^UmsqmLPpw%8)MG$6IxdA3pQm{*}?On3l%wxCa># zHl-vQl%5|L^oDMc*e!gVj)mN#o{H9Vs(!TK8TryBRc<>vOfFjcvZy-mcr3sC%np=? zuO~yRwye>-+l(qVd&V6`zw+zzPSO?6VLC;`n`=q5X4<||{87KMU9dLVDtAr0-}Aq>@t=17|3gNAP5y87_U2h63~(W8O>x=-1AL#$*2;xy!2t1> zAO5X_n=jgwhuT)G?%DdGi4;#@wV5Z=iA&8-jSbal=5@{m!hQgl`y~XN7Gb>$8wg9v_TZYL56H37{-#D{BYmFqgazrKeFJ^UkZ{_H!T72_LQ#(st}$^VD4NRQsHFXIdkAnrt~8$IT0$`32K|pMQVm z;Qw6y@tc{+gGxi3HXDH0N;~N<+glMj7S5dOfMNUta z&3xCnyN@2kQ!odDoVpvw+(u|R9S{*&1Oo&y+;JUP1Gjw6miRh;Zd={I%yrv0l-YQS zF&OIzh#LdD7*1b>PlSC-`9KR695y#ciTnx!FbDc^71KhKVZ`NnfvP@qe@p!3R{>)c^2w`y@%AWz|iv z-kBju{?_{7K5I6IAq~mYL#?0ZK+wLytT;4=)UO9*>p@O1lq@q zUo7v2alR6gwNER83rcky`cFvk&uZYmt4_`Xe(2fiXnVnvF~4Rua}}^K+Y*=Iik={X z-upVd9yA;fwuV;&3cVk`KMiuQ;hDbKUE-6`r{)6UeP{0wa>`Mdn`)~bE!@QZ0t4W1 zuZ59gSEoY{BwUBk6MmdJzOZ*_d`a6*o0F6>;umlZYOnGYJQq#f_%QJ1X5X|iSsG7Y z&O=7J82Lo)afa}rRwex-zftH0h8>~q52i~&-`QK1)4Ker_mj2FW530SMmU!paKW@Y zA*BauU91{HMr*Al)-^Lf&We~%<3idtezr2e0AoLrVE}&$7$8;!2EYsji#_>qgC5Rd z0H|}-4gw5d?To4nk&)g<@h`;T^gJ^Re`x}7pu4Yw(sugKq6ZLG_~*Q6tIQ6kz*zST z;k)mHGL4cC+EZYj*|*`<{U{2C2yOaPR)Ze>2x3R;2}v z7KN({5UJ54@yyP77?7nXao20Al}l8T zZc@IRd&&tzQ=Q9Lz=oy&l-T8*0SwTN3Il9u;P~g%1<(wGVWPbu3 zOBEG?Qi9uJ*zBMZ>ms`HKA*pcyL`OG#!zKW(0or52;B%;k9Ou58)CR|gj{-1J-mM% z5UD6BBbJzGRQ%g3kAZhX*P!U5ODIb$0wo#7H7MM*ZPBM!lmZgN7uw2rkfQlgWtr3} z=9dh}R`!ne&1t7hKV@R*`nE}--yV{a7~U9)FujIS=3}&j6`TJ`a{6xF9cw?Nm2+d1 zB9@c5e9(>R!~jHeYBYMV2m|asWUCMzr-J>@oxyb!02 zG+#B4fmOm{7p)4X?htSwO`EaZ@~a6$w#{`clSw*v`b%uGND>_-2__O`ig&<0w#B+9 znH?D5(QhGOf2VSl46G;8XAZF?w3kF1<*<^@clT3Lc<2&NMNQG_QQdX^OgpnP!zQu0 z7E1B^sR67fnWH~-EwA^SO4n=;XmwA4z$2!;uvrpT``5;ibgZX)=%x9z3Elv~SzoJ7Wt{cjo(?XrwX&r-~a7v5^ zJ)Ata2)X+CPs9NdZ2JFQh|BZ+Uo6Vg|C2@ecZmDr_B2o@L;}8E&Ni|zbQ1}>*d}tl z5BQudJ=xGQy$%C}nrNn5Vq5;4uDE`VJV9nrn%G}~vD+*+d+#HQ{I!7?BsJ+PZR1l* z65zqKl^xc6_|3iMeYGnb#$8w>KjuZgbCT?4Aqd<}`8Ti5tJqL71|!5==Qw_swUcda`v_MA7x>8358G!;oh{ljB&#b4_>=&5Mu3$G@s5*4h^n zGz{jcnx1~0%K1>~H%UaMqC9FCJ;O_tGzPREx4oHJHWzD1xyUor5|`^Q{ijV+zjj#-a$OJ2EnUBC1T;+H0YhMMHg zRc{u0-%`CL++x^hJCXTb6|N1yNPH~Bc6jgvO*yY&|D11T5mvMvji#f`0VdcP<=>{X z%qhl=aX?+ZbZ%ON=cj698ZYjuxVs;=tTcHno5zr}JeI~2?J{(8`8>Ao-D@3+YmKdl zCHpgay2yZQ+cv=_8-?{b-J-UY7%t3;yY%m4c7HRD>1Qp^)xZFqHIF3$dtp^@#myLS zN8P5db&A<#PWW@8BCb$&s)%^ej{YRT%bJvD$SsiS`o9ood0ohdL8H7Y2$_1yQA&1CHyDZthmR}To_=&S!%JmzsEI>;;`S>dg8}+O2%m`V@t*qDE)-bXj+w-s0+X-Je2X3^0tVO{ zVSt|nFu*;c){KHS6Eo-Ab^PcY5O=fqpF5B|@rpi&JAcpcq0ilXLmt$j^bYuzQClZi zIBu`0IqRzOc8aQxnwEP8U;x(~xq;~bTOT|6r#<;;UlKQznF0{TM9qDeEDfDD#j-nf zvFwA@{zdPE{QBL>Yq?(;S5!&SIMVUsk@vB$)>qjEou+@kp?`Vv^e809<_5N%+aJr= z#A4adA45@N>oK+qI2}Ex$mwbf*oI^1FOOJXiA^>%#f=r?Sud7qyAq4ypO4G&WKY{w zhZ8rw{)Sl3_X-}%0=8p~V1T)m)Tdb83l^c9p7YswiKcm#+sAldHl^tH)f;h8P~P@pC}(H67?$`vm|1#%HnM0r7t-&3ZHmVGJvw(hOtYhwhWRb! z{hYbb=gUTY11*6tq$Sn8UD;9edmxXOeRk>ob_ZBWSu6BlhqL#AIkVRPIICvH>UH|W zLU!^4uP+e{@cD%lISUwBd#*-Cs(;zV!XA3OR}jl?H16A4b4};V9$*)xw2GYy13Z(5 zkFV*82b=fDcx+B37hi5o9v4nng22*1o}KGUwM)`#uxjbah@luQnu z5Y8$zLuP)q}93Dr^rHD|$2N_n*u})U$z+m89h#$eW_mt%%Dj__S-= zKI6O-a~qSJQGE_#Ro^ajm(V2io+{4gzQG^I)J)$cer9G%-bF%Rc1UzS-)Wj0d~e>| zrZG#`QosB;sn7d{bGXto!bQ<1JT0bg>c^d6dSh=J%eaqc*Bh@snrHPLS$&I@z|^Db z(ow-2H6y2xu6MyZL6`iIr}h`Xk0tBz=|@mpN~4!nq>ZHY$*s}tuXb!o&UM4UQt8Sz zpFdrrQa4@bhu{1k z$%Mn6a|R(@qByf|55b7q3(6b=y>hwjgN#2sLz|=OTOL0!Mfe zD(|qh-?mduUXF^9bf?a1g*D#Xpx}w>ogKpRoE69dIV%1h&fvx1{6LGU`(@$;Tidy) z>ti-lV`B`(^S26kE~>J*zT7s8CPwP6DElC=+h+PJLE=2ph<|~l?rHL-0ri~01S{U* z-~|V*YHmN}{XOn@Y;T*`6OsSu?2{-&$faB#*Q&y3x%yuHSX708@<%TWV653cX3V7z z>IjIupX*R2)Q52Oa>%92FMqRN_dWfU!_P%|S_BE)2p<+WdJrQsC03#XMRuEMkilt= z!HvR9)d!L5wlSqSXhz8IW-aUwT_1)IiZ{wC%|u)i18jC2Qd&;#Nx(EuvbPap$0Tr# zn>XUC_COfW-?iy*4~E0)M2wLY^nHRr>ub+2?Qepj$sgr`{X@A56cM1<02?uU zh>bSH!^En z&jpE2i}$ClMpd`a#226kbY(c-N$00y#PlDK(qt1Gn&deK{_Xz<3$fr>czK2cWrR3w zwusj_;6VIGFTm~CLQJw0NJvia3crwQOsbC;)38?lE&qbC8wHc8Lw-_Xe< zLP72HNT6vEZEBvvb?J7Rn`~n;`?<4E9dYfGyKu-80a3@;Jz+Tva8?ok+3_?wAv(|B znETvHY-nRhI$m+imfuh#na)M^5yV`Eo%Us$%oS99vLo`_jgxYF!;XXx!7(=~Lt2wS|z4)WBw|^^8a99zpI`F~eFX zc(P26@9a=1fQ&~*aDTNk7$N`zysOE1yk>&|$UqE_u>spzhekK}4vzq>+3Tskg&Pa$ zFHWw-n-;_+mPy+Q6Sj#0o(yzDrQ6!p$&TkqnQrzRc8B-XUZ_w;wve1FAe=f}Q-uM* zgH=}Nd>eEz(|on9#J^>~yCv|7n%KDspki#3QK%S#Bz=AB<9Myd2vyg%Qtxuqf00!Q zmGcO}0KsD0aIN9lSN_ksBLFu0UqA*EnUdRBk$rqCVAZvu*DS%Z425pb^mY9S*;40= zfk1hri|9GZ+uOPeF7D5Pj(kpWMKuoSSi0GoK=xq$pWZ3gOG^>+xHPm*a68CCtm1=m zngsrcB=hK!rZFih!|T36@tbO?=bU-Nr6AF&wd z_CASBurF7v=q=XLA%6-iT?(RXBfhTK@T|ZnB%~`@_oqKMIE|vpo+mlM7HWUU?yb-J zMl4cx{-pQ4Rpaw9P1KPo-5phght`s^5bs!n$nlQi0uzDMGkPvB8xN0EjP$mVCVB$ zMC8xg%B^?cSM?7f4}mLLjjz^aiHn#v8w#a!oJ8WBbn;yBP*q#MgLP89t6%2Etx`Io zx)@s61h~DbDdjLrBqn*NDVW1prxL0s+ihZR&^M!FI`*8;G>NKGR?GGo5Pk^bH|b3L z3KW!yca&ZMV0X{VQZzsB6FAV=O*Mb}wrly^=CDMUNeU>@UYuxeFo;syfOH@O=_iQo zS#w`$lasi_HJJF#Ux@4OgL_?>*ijj=Y4eOBNN^VLHvY5%gjQnb-X33_5xPYQ&TiJq z$moUWEL9!sph*)xd&u!BK{iz@vGzN(Zx<>JGDi8=O|6FV99ho}zsW08K;ONuT*qR1y9s65b(M$xTA`6lBb z(>LYj@U2N~kcMM%+gson8-f1_IXSp3q6o|uA5|e=5joxi=N{J;h^~>^9el&tv`S6+Lq| zN=>Y7t;%Z?>4$`nB%9+7|D3#?nuP(ZM5a!2ps&XoM>&OA&4N;lzQonfrabU2fRrJm z%A5tZk zG(E@K$HEqKI@g{JY@8S#zGzBwplx8WCmllHcuVpTP1vO!WS$oZ-QpUy*#c{D)Terl ztX$2FKS0|Mq*21c@UbeM16dnUG@F}ircUVFTH-&|)k!7jkcPfjN$8u$Qq=j;NLxH1 zmpjp+sR<}!BBl)KKz1YSBQ+D?}x|83q9GgA$y2O&~E3lvxt08JZBU0F-i< zs`%=Mkq#P$1nPJR--iA2XQ<#;Tg^NVq@KSv?|H$76=r6pv}VxwN6*xK)uf(o@VmwoUFmZ&BSdL2cNdwEThhxU%W6 zN&hBi{%O!BAWTb3wM4yfx{zb9Uh;$T6n$%Xxe*q`NA~@`n~Q8Jj-j%ktse4dQ>$ zAlR_Ee+{j^eto7)7*S^9-e`T|j`C$xXiCMwgn4E|*3fpmsU>WJPsrIDecMe@)us@) z_g$q|pDJ_oT$#HE2W@O{$;*!3DT6CZx-!uK+;oB3{GTq>Ap<-*>)ARcxQWYn$^eZo z-^da;GzwZ|XuB+sEvQ`LDqLw3g&TON6jCX;{l0Fwb1HI}B01LyU-vS1uDl1IsU`C$ za4ySDTCT)ZT*Tih;@#6<+YSL3~+t4?`0mkJy=%T6&;&w{66?_=6eIu zpnq`VIK)&YY zz%8y8mK)!8r-4(U-~0c9L#C}l&Pc0>vUG3j@c1NWrx0CCij=~iM6a7>3|_yh2F1!E zHlmqeAoxGk9DOpsv-N%AV4#m5%X`eTWnV;0OfRF&$YoY%``EQud-{PIm%_d#y)t;d z8-m#;AewxjmQp7?7&+L(KeKI+B`L|$^j={78p?cdk<9ND+efHHxhK$9QrTbE$iMSa z^Ab(5!T>u|=G9D?|7~{jqv9ude9Rxq+f{wfYPqFs{H#YzQ9V<*?jv{mdU@C{qmDm%1$F zp9s#33&y4NcKXM1#b&=N)61B?f3wu8R&P=?S&(dK(V3I+bCgS^hpAS`^h>bIs52v~ zXA`Ftm81;xC&5QVkpn|~ywkV}aSx6J8FEp87&{?bIzi7Reoo8S3vt@`Cc%?;z1vpS#!?M%eSV9izHf!M6lmtj0KBWu-HC$;G{*MyZyxJij~!q^`~n5bO8G?8Hz zVP;P^<)f?bipOG|hF_GQ(aSFPeorQg&)L%$3~ItV^@vX)_1o_YQr%W@0M0k z)`KqKG^}-#bW(Mcu#m1ueTQ99h&oB7#LMWD0ftH&U7n(6g{W1TB~o%D6g<}622XKf z3pLMm>7QPTx%Cy7AKvR}Or5wDzg+qdWAtg`%O1njYO_zbC|1)Xy+)toh1e*kvX@QyXEug*o1(>SgGI%50N$@AMM*jL&r8M# zp(lRIhtJvE^yL`yg5fO7zeUgeOC>N391v+MOp!ZMzUI5cFLR5++V`DeF0#=vX*(Ci zefV%DhF@T0fyU8rF(coYPQsDME1KdPTcwCVnZ}*$=2`SA?Rj0d4I7!R(!icf@9vua zb^^7d`0JZufa`(L4^a%TdFoaG<~$_leUhSuA6|b$yCBM8^< zkc+o=9b=yxB)>E-DcY?7^ya$rUlQYw=;^Cy_C`aY^0;$Ig{~=$0A$jXQ6H?Co`^oR zyc!F8QZ|sm;`5yuolzV-bP{9 zL^zGjOiB@)Cw=}ncNgaKs3jLnpjTTmsmQ@s^# zCep;Q@+(n!jz3-MYP`vg26O1pQztm(%s>!Wg7qR+Wvy8KjnWW z?l0JBw!~ny&xU+!Km@gdyYl^^6!UStj#dgk;gGfNMe&6l!m^${3?Mh!GGot5i|qSF z5J7v=#71DR#Z9$q&84H)952x+ETsT7{=wVubk}I)@iIXUav$o zQXjfV$ftMiBCtF`QXI0{U<2t(<}th7VLE(@H!)3~?P8&pw!u6Xb4ZqlG9$PJ3c}UI zA3RJ}tIXpIn=F?GNq>a|>XBdR)i*S#{GhO0G$p>`A3IR_mYaARru@x)L_Mq3?$cX>xq`N@1VzOYuYB@CC(Z)3m!x(vUB zUXsun_e9X%kk260)kcIb6tFNI#b;_5ZUa+xZHg!*$UBxZjlZYpeB=*2aqr!V#bW01 z%1^vacs4d+Ep7EaupU66i>{aYd|0mQOom(lhqQ^ ztYPvYtktd8R+1|VF^8IP3%g0iY@sSlf=PHb{VR5g6vrBMsU3Doy_D9dGMUMdiXP$^ z(^tgYpJ^o2Srt`$bMG#t5^VX90Yx0eM>(oi6MCK5rlovdfx+5)Vp%fOD9`PBs(OiD zd|*l-4WP;D2pX%(yN^{l^@2x*V>&p$+EbziCUAa8tZB_o-l}_fu{b=gPZcx09!3l* zD||e>3oK^Xxr-Il!~$)saOu=?7%{C)%aV`=-OrVT4(}-zkEuIrb^NIG9rm8o60d-G z!vK=?W?5*Tt8fPv!fo$gbu1kiNy}AR1G~X{Z zD(ncZOWY}*m!W8OLLz7;n!9yYdIPB0f%lI#b=b5u!pjN@gqPpHHs~kby2mHVQt}lH zixaQ!74sB{ROVvMC{~IjQ5@;)s+naCwpR_b_oE&eCRrP4XR9A9IhW%-TaJoja@LM* z_EsnTk-}-_5}y?+y^E=Q6Cm*U_-%+k8tQBRWX=VhKIUO_=j`yw8V%erW-W7F>9DTt z9A!|2QEk{N(K&Aqq7A9BT2YDS4a-m8{~ zTJmhu$ufb|PNS%MN7qDr3!XI0%NC+_--=KxKO$>HLXrBPQo}n){{*i8msI`pAL&5P zY_D&$PGv$|WB>xw?Q{s+=3KXcuvJ-xj}F3=?Lx|mR=x#7W&(mx)oM$?R$R9jwSX^4 zf$YQF2R0*1eu-cli(LLun{5f4Xh(if{f2D`lb?OwT<%>Rk%elqbV$MDOk3J|4)JiC z_0M^%34A>1x!IP>8z5vf`}K?&#A*`_+9aa%_U6qa-gaUV>O`;LImco%!{%`%T@A|o zJ1XO$Y_u_b@41MmJJ*s5H$}mBznJi(siQdww{d)M4}p`T?BEJdnTubUMl;LSmEl+R z^P^npa(BOP%pgNHV25`16A#A(n?>vAHN^4gSo}H!91K>(0|UFQiJ(#IN!RTSnrnS> zX5vV>@;_mozXjp`8XPx(TQC68)1<%X`C258B9b!1BA{f+>Q+^+$BXQOdGw1T8G>#5 zv!m%po27n;&+Clo96BQPK~#Gv;2 zF*!d{nKPd~l{`)l^rFI}`hctc zoPt^4qsxGaQ9#&v9Iuy~ttJ(n3V*4S29vJt!^lwjB1`g_yYGl0Ux%M{)+@z&od!{z z7585?*Z!Jf8vCGdv5m4;S~%BFH{EtlZS*9W|MaS9Q~43k0KV*pNC%Jm)&&P_5<3w= z@57%k;VYa2+2QwV$uNL$+c_FMikM;9EffUF>B=Vf{VF`dO~`CFl&=nG>x%6_9)(nD`<$iDmGh0jyspOR1TPtY&h3i=2tUUIA(+! z<=>ToX(0gY5n(HQbbn~f-i%Gi8zM!OMo3QD=FApHhT9~Vg00ez$-Y{f`zLOoP*H(k zmw@%V6k>Z?P3D{JuQaPtSj&i6gY3sBriXCU7qb>>(Y!cPgrr&6S-rUs#!1QG=pB|T z>8T>a5=_6fadZ zA?n(8k3Lc}xqIU#B82fWsTu|HPjclYooY^TA}53%HXUr;eUBz3=wCy9->etXk+$j? zoK&aX&X5p<=Yocef}dW%mnZTaHsJ+@A! z(dU;b@jLjF)#-z43Nl)54(igb{Rr>W#DX09dZQe|c-siwWV!367B9B6ZHhD^-~M9% z!nP<^>;8T8+}}P^YnlS1^*JaUYGSfUY806s7a^6cE6ct2wb{9hNjc|{UIC!wrPg0I z>PgBv^z*R#@pH;N4>zY+lZ#02WCYMN6BnC`H)B`cJ4RDKxj|N~(tTd26*WPzCpk0Aj<6TiApxAE zHrnOmw7^(VljZbGXRi`8%n?4ax&G0DrRJ=$x+3CE;VMmmr~p#WcSqfInV%bnumGrhZi4T*<_@>WHiC(5VtS}kWiXtU%w73D>~xkuwG=@Cc4pj(MTF) zrD$6DKEy)Qjrf{=@CBUNk@`%;QLH+D`OAYW^P1;=)M!K0E^cF3)Gc)~OD@@h zJD;FD_q9Mq%ju45qmDj=%!p#0J*`K-j!2dLGYygZ%5rwCp&V~p+C9P4o;5k~RnhZ; zzF~2R4w5~>i@mOZ#r$@$m%i$)#OF_`Ws8O{tWGuM=S^Z?|73DT(;?mt>Lc7-R6Yp@ z+jQ487Kma&G@L-PyJhAy$88)ktc)|6uc>Y(mv6NsBYUzi|6m^p zU7TrUewqbtod|8jTpoC{HkEZJE)_Vv-KTLrByQp!6o{!822Z75G5MqWPi0d_+k&LeS9NYU0HqblQWSBPqW9h-+5sx><%XoE$O?HH z;jBX4(;w_E0hC-KP`tA_7=#QTp?vP#%pxPS&}8ye!g_VB4>Axd@LBMuZn0+mL7hX} zX$=EtBy9K=hxLdkdGR=f zSg_I%zOr6XBwbMz1L8nO^qUg~guD{675TWo#{x@n<#X4KnFk%5`QA#tn}{}P75Mc63X07-vm1UFqU-*3Tm&Q*9N>PR3k4Uo1vgfyBT7# zy#N>j-=AJ{I8~R6q*3z^4BYS+Q`SXVWlL*VsP&@gQ@@u&4hF0nSB0lnYUh6RJ`wV) zz<;l-IAW?EeW#D~IFWOtUeuf*0Ieu4i%YT)KrCeM3@V7)b>yerdseno>_W3F6|A}a zaW_FhNwEz3a}UkkFe(jF;%dS{UJQFNY7vP*aS8)6emyyaOl_4z=#AI91$_mz3IV5H z{~EKy)~@-TdyWEm^+3Td4U~uhVT8rBRfKDdb9c^7>1`*QhkYd*Ct^ z1HfE5qo&B^cS^tlQnvN;(3b8=%%x-iu+QY^Ojcyz?!(7Gz_xiQTL#(ZQk?STy~(eY zQ5WK5%dwHq3FQWvK1p2yMr98cD-|6pckIw(@>zmbqYN&?E=g%Wa57;e;6#28>0=UYFew7rjN1P9-&VcM38_$eJqx}auSz3___0off9MV3$tI*nD zbGc`YJKx%e(~dMB^8UpQ*&s1U#8wm=ie&|NEapz5 zRbndD(NjkN#oWBJ&E;9&Dn~Oo_Sb#V8m*j6-RI2D3JCXGVlh4f^jdGs%y(P*&z_`5 zov@sLCJ8XM`DMoDmgpa*{QAvoP1CQrjXVbmAwf>w7C}i5Um=m zkswA_e7C*Fvap^*^1;4(4Bgmv9e=f%)vm(2)G3shjBKtIYWfK|g`e^)CMo)Iwi#v0 zQAk<&$dHxF(2i@ibAfhD0fG-UepsLDCN;uqS4j75*R#fXxtQ>-k3^WFA**W{{c&U5 ztp(WKC{o#!x6Cq#bgK7N|aod>$95Iqz@fc{AP7%3_!4xm8uRdl35CYt<_ zt$?^7luNSx+tltgJ4cUirAwea`66ll5hQ>shN+Q<*N37oNi75^9v*x)H>Cd%^tYvk zIQB5!6dhivNS(A;LUm_Zk-$RG*jVHmAyf}M)tvx+3E{%%>>K8=zR$&cRc*Osio8y| zuGWQrDdYcvqsxyC!?Bk}pqpR3ST(psc4*_XPn+nF`;(oQK&yj1f~=hEpz|b$-oTAf z*}fM=UrzLm*vlvS13|1D6R&blqqmJ8S>8BLSt~u~`~@K0p>i+jxO4WIo5b;-^-eCw zUDESq0t-96i;U094=l|GAl&!|YK1|Hy1dzrH^6c2HX9?HNBRj}$_YN-PJ$jZuDgSh zGXbDLuFgQm{6m{n7+|A<$LOB!i5>=Mg>U$WR-C_GBq zR}2Oqt_-+gX|a=Y__CPz?q>H)C)Ncc$DfNStCkOGSAQKmt<`U#&lFnpb&F%hc_utd z%wllHLKQkCdoVmX)#kF&ZsBH;_Mxkb@Wh1GrU*+~zwGtmg+skiNd4Du(! zHp<}<>jEBd58TL^pL>$XA!s}1u!HYQT-6#nUxOx+^c7-13VJVqpt0=aHDXsG^P)^o z6RoHa@lAsBy)SRh%o^(JORWUHCMLJqFUigNV&ajedfl5!vv~T|W!N?ipuRJf4(2q~V{C1MV?u zj;99B7qoEA@BDFjl!(1c3|64HKG0Fr*s_?xpZuk8r?2$g4LmwedI>cb)R*(Q;z8KU z`xeTo%)S<2Nl|s@Z!VmZ;qLBqTQ%FJ(HsCeGJJU+mJjmukV!O{wtGqQEDLM%GBp`D zaFL`{!+04hxb;mFq+&~6o4&Ob@{Br?&WbKYUI(!>R_EI{exwqfv5m|s*K_{*@u(Ut zlo9ml14Vv=gRwH@G#0`-sZ8BsdHi=8ubK(63YFBNRHu#)3Y&67p*ju=Tjp@50Nr62 z&a^KAZ^pQW`x29vZXpi~2S)Nwlda^FP#v)J246T&lGU3UJi9%0yK)ncXdKe2N*i&= z!X`GJwsbkfVck|RG=l@p#Rh4J#8h}A1&XZGJ+ny!o1 z1tnImoIl(CNHutUwp`?QZnTb>+G{7YzU1kk<`W~WY$xszmj5MQgWfJojLk``fkxm= zPYh8QI}ZZ9$p)u>7Up=-V`5eG{%3hyOQ2u0^Rh*v9)8Y=(USYX#z8*BYtq)c89yEd z$WLhu_-sN^%hrvj6<^p9{ZQ_9;nnIA`!v|C+O3tD^pm56j%iRnTZ0;8zS6Q}cc1;F zld@52a^S2MZ5QFFNZT@l`Xk#z^t!e-c9tNRs`K_#QO7|eHalpqP$YA!Jo9a@2~7xv^f_3gz>bv$^{OOpscU2e&BxD#1hEXs`o;l z;#*KGa9M0)gW`GHK4E#l8ZJDp&u!RKYIX7G1`5vfP@x@o%nY}UB(^wy+46JE8V~+PW^bFQRNy!lZ>(_nCa&1wR-I(r~28adURN@F#4Jc9W7; zhzEM5TgEZnjETGBI6g zva;n;(=4>AHbt{e{#s0Ou-If@)rP&Ybnu>sT#t3OsjcMvHqf8IbE;3QS~^0M3HoWu zf8<%@klptM*{CQh`1bYK@YCe?$eD4ikNkwazIKyWO>Ijw8(}dvR`y>DvzkUQ9P{!C z(+^d3-gex*Jp@+IJ|R9$-G|i_AHFi`eNiR^%xKuHt<$l0!r+t{Av3{xhAU&pzu78N z1+fE%^cYoVt%ZC7Z_gPrpReQ(`o243^2Fm-ca>=J?8q59TAsT~#gxX3?VTNl?_W%a z#dG}d*}8Z#j)B_$%3U>iB_q*6J|}9=zLgYP`Pokmfl^}{1}1(!zL60-3Ny1jSr+Aa zY=~5~kT}muR!lYi9c!!UhXT7`ft|OY@PJQVpCvX0DH?s>i75*FwTgNw_u|<2 z^FuORHWgp3k|yUU#qTjOB@n6ey8|x-F#Asoy&kYv^%j?)GWaEQP^s@YTrhx0*(Jnm zn%$QpbFyo5>x<&29B%al<;9_u+er`2Haq3=lBkL=qrVQ5iv?M)Q-M2e?KQW0MwOwU zrN;L@%Hh#1*9)@HTu>ujL&7tbSEpmv5Yn5Sw*$>$n>_KegWv|&WJx+fRsGZx{((!QcL6A5Zjl43-4@HI2I_@{U40IWmMa3yX_0biWexZ zMN5$4E~OMN4nd1k+}+(>i%W3{9^4_gyB7%V?(poq?>=X(z0MeWo%JCZ`J9p5_w}FG zoWJ=N-BxYzTH;GR2uu5J*}MSHV^sBkv(^c|Fn_TmoxQ-8F|CiLM&H}#*Ss4RC)ni{ z*2&aYs4rn!s3mSgmzn(hJ%)6mze+@xlFJ1K&B$SXEfK+d*t?dS6H?v$sqzGs$kNr4 zo`^KE7k8zhNYchL8W+N_*C){=ldKD#hZ7vb|k241FfB7ZIK3!c7vBoJk-5HqZMiiEl3t4E!iYAs=wtXScx|7R*k_-}&zdDCKtUnyv+A67O- zA-^+NRtyc;Z5(>~!$FhVR>hI-C1SMJ#F{1_ob-jfflk;;x91L90zX2HMF!ES#4iIHwkDqHv>XT*e^ay=N*xRA7^7io*Zw8 zQ;xeV=Hz{;S1M?xd10)13@?;k-pN2=6kPtuYUVGqJ5Efe18~TwwFw=??k&L}4Qwt! z3Aa}iNwKNWjBR3E_sD9n)VyHN^5s@>S2x5RL;t`Ya(b?sVJToTjDfVKRm zps{${cw8)DiJz{h#0ip`7|>$N^1=uj>U|5^xoTRDUc;D9->?-B*aqX|Op-z_>l2cC zZL>?69?=XH+71?6aiN}it@N-4)n=}CB*C&(=T42OYWL2sLVnrV+Bmc}b!?bEVNu&> zzi*$Qg(9|5sFTYs#uSrh<_ds**XFn^Md{)L8PG>gqRD}s9@Qj#C%>eS@=VX+B`CqtUix`=fhCwd0DPy2nJEH9rEh;IybiU(bWy#-yN~!40 zW<S$k&52zg31n^tky6ShGtMEoFAWs00u99wP{pKjsrCWT1!BFhO{$Sr;161ORnp`z9tO=OQ3_*Jz`AcJ1>$#wIc;7sr74p$i%?Ph(W zoMr3^XHU9B*kPsk@Pu!*%x7CIOOJPSj^Jrta6ywb-_a4<4y13mujAn{=?jQvn{)vT zEx%7kWFu2uwp6=a@|=Wkmax#|bR?_=;fqly$eG+%K8FN;*`lomI80dKX}m8Sq29vruaj%4oP@ z&*AuhlV!^we9W^;qHeq!n-_|LXnbU)vUhd1U`$e)$p@DZ4fJgPts$a&VJ4KS2R8`v zbS^w~^>A5VPo#a_{vK&aO8B4=aJ!e8w3`mp|HAMP>X)KrabNbcS(yK>RHWxi3T-s} z-+LmmQ<3JPiA+u_Is7pV+)r#H(H+tQoQk#TvEQ-pqY#=7EDU4K=VGd1Q#e6Sx5(No zP$G7Z?MQtT4*qzd(Jq7ab69~krP-lPn?jT5&A4BRJ`6JUCYJj57WUbEaVrU!q-XsW zlK;Z@MD-^UApgJjjQ>M>PJBE(LtfFi`#q$KTr}jSw#1^}r}a{H=L(mzM^rV3$(o0v z$g@0!yMn(qEzR&y08C!#K{@axyg+SkGSnF~X#@Xyk{3iOEi8DM@%Z#cY|tMIKeQQ{ zY86wTPWu?a&v2|>{RL?7(af&Hw4~9?;CIbu}Ch!G<^={|J-(>>t`G2)LL815inlp41OpE zO4eih50xNQ?0Y>Id$^q$Ecf)0kzCd)%jTO&{0&@x+&qKW;~YkTECOlWh1m5L*wJiL^EYaZv7-AN-=5!u5n?s z{Kz$}X^x5*ng8qNW}BqlU)L7+Hi7=Efh*>OD~}f&9m`SZBjGJ#8kB<8bZ{psKI{+)#z(%DD2J)P*DGj{ znRfoX12^9L43S(rGm$dUm1>hvwiuzMfqR;=!6TU?89_B*LIoeBm!DHtFHjN+HOtBC^U7v8M+s{dj zYRZ0@jP5qLbaBvZQR?UWVt%t5^~}Dg3Z;Co^do{Tb(t3ZF3;b%HFIlCp+}tFl5=z+ zpp9d#NY{|J8~IP^krY{Fe4?`VxkPW%e(3Ee)q3t`{VQ2_tucuOGKBFbMA89^R#;f# z*n^Q5=S!WgbBo)A;f9eC^>eq7jP*5Rdt=UooZwQ2ENA)-wB*AoyfLQP(Hs&^H5&#- zFxOs2lSl@O^O=*RF%C1fI^m?X~_%+6FhyPOUt&iaE6Zx*s zn5RY`Wo`9Km<);wR8PXiU@xZGYbaK z7kJrV%QL@g)-Px^>uKwIbLruB@MG0)(B%r@hXVbO{ zo}AKfi6LGKQHKR{;g~k#0&+i`wcBi-M;=#L{U(VSC9S9slxx`l1`aP+6QvBVq#L}~ zLq-AnAUsPyoaw`R)6bt<&q#I5{eEU$E>Q7qVeMm#kH=cCNG!bdDd=#|$$lnzNcSZ) zCm9Sp_p23h!i4+S3Z}M<${9UR_QuAXoC@5i;;tQ(sNX;^f_9H}HZgno0akyf6 z_I~HDI>DOJ%F5HB)64`=q|*>h6V29l0ylZ)sN}4$mAAk`_{1ZwTQQ1~(3|06VN&WM zu21eS?&0*LU~xXe2+n#qa~>_Lh(5?i{?e@94^>Fe?vJ}&n6dbSFFU+~v-aZ$TdbuYWcrtgZ-+ug{rMksw{lMOX zt@{sLMCX^cF_F)gOAkR%?DtmtDjy{9bQC_W3ikC5Q=7`M>9TU5kOk@6W$b_*9&dGh zFfdZkC-i19x%q)A%c2|H_;g+ksi!(k0YQ*K%v33C4LnK<^D5a|2PLbfmI{T1w!@a$ z7b@6j4)Jm98>FP<%I3h^?!pVqzyu$YcbTrCCnI7meV5*bg%)QM;j3+5(|xJDMJ^|! zz}a%z)7W9Y09q!i%0aHW>XjL0Mt8%6k*b*jPb7?XN3n4y0k_l13lRv7^v$a_J6ic4 zx^^+8&)sk+Fgz`sGyb^6KX5Q<@Yf{Si}l5Q40F41MJ~P^v!_q{NZCh4mcI8hTI%|H z=5q4kM3GshC`Vi2%N}!{jqLW5ip#VGIn5(F*R=tP?XJy z4w$Y83prkK`$mObR76-!hj^;xL1{ByPw5IYwE5RPIicm3dvcP6WfO%}nTq!=6sxaE zf;|YLO)y3DAGi~et|u`I{rs3NI=omu?uXZWZ@Guug10G>#lbHRK$4RJjYfih;FkaX zYN61hip1P7OPGABeBF4c&i^-7>3l;3rWvM)TE61Ny$DV^0Uj7Pw@?A@mLGSY zS7c{{ZRjc&Cd;R9%!V#Xh)vxE+)KCV=q9xb7ti+2iDeHDDY`uFoAIdefA85Tt0~EF z)&;V0U<>Va!fY9#oew!xW=WfO?L<4y&8tBtHm!^Euw&T8?-Hda^^Jc1hBOZIudWe-a~E@} zY8$o5<<>_m8Oq=SpTFZ2rWbxtI%RdZ7jMmu?zQ9P9OMQ7+%Ob3$1`mrT7GL5C|MC8h#v2h-6~bZZ@o zB`Jv}O33ODl#CGXkc)~2SyF{EkwAx`6h%vi&xCfqbyuc9GXmhCXUgL6{*rtXCh zb7AD#lZe@<=eqH*;Wt-{Ue?IbIM2Nq7AJmIAL;M;{;hS{xpmD>U}EB~ zjKaC}HH-CWxrMWE_1$iiO(q z^2JRjQCssRD+rn_5YK`zNP{l%EqM4^5bCDb+og_QoIwee>oE4kI)gSc?ep!0!ooSr zl7t8mF)oyL9uj=#d>zJ2GTsY~ty8xTMve51c5&4ae-2x`SV$Gk!L6-+4Ed(E%&70y z&TRMj@gvI0&o=-Mp$nhqK@X~WjF+l+{tqq}b$6>@FDrki-t#W}wP6O>a{vRy5%p7a<)I#7-VUfvthu4adt(;e}gdnsT$U@;cq5ATTr<6fAEaP)+ zeAN4YILfjA8{ARe&7Vkk*y_n7<4ly|NV@R=D27EhNsEY(<5Mw~&k-Ii&-bZ?9sbp- z?O~dc630b)Iabv5>&@%RVw$9P!}B*Dzu;)6B>fswE)V;Lzlkq#yDxhc?oWsRa?J6# zWnf2gvml!!_R5!cdGZiTlJ~spOuU$kakl&LJqo?jP4_H3ntiYY z(KGl?WDUg!Lo`Ox=$ydNm?v3 zT&#PwXY#f} zPetb1fb9fRCBx&-CoQ#5A*8)B!Nlzo3DzVtd$-Xxkq*vCXFWC9OEy~gsVC35?v;se ztU#rLN{vu;uJ0DV+!d&_|9FUBZdKvNerYHlx?M10|*rq@ywH zvx$R6e68QxZ@DvVpYVq=I2ck7Y2h`4iHP759_MC;VsI<#l8yM3H>y#PcuG-(#cs-H zwaRga@LNdLvf>4EwWo1D*Cj7pYsXxMBAC-A#mn536*b0e5Su<`BTHg`^YO8i*87z< zNX?syf_U}$Gh+JiWY>C$kBqW0Z=G&D^j-o64Y_27`^}WmXp`!K-s0(^XW6yB`ofz8KdNyV=B$W(! zjD9`{=}MtlQ`|FB1EM(=^2|<_XJvp5KZ9p&hN$YljI`{fb4NSL?WH_UV4WnyEoAdE z9etl|3~`IGrN*@}C|5`Y64wS|FLF__dmQAi$CJVFLH~j^=WO)of&L%& zo8Vq8Pr;e;=s~Q#qp>L32TqZ39GpA0K+Y383ZtX1QI>(o9Gb5d6_|5PMjBzEOv6WP zR+hpf%WCzb*YG_9$bJHmy(3~UtsVjJK`Bh=Rl@jmK2xHVz;b_Hn-h7KW$2WMvd9uh z7`m&~;N_;5Y315Q**`FE(!Ts$g})I@nF#X{)$+pL{)bfQQd@@D6~Y{2Fbz_A+}DCc z#hEx-`WX!`q8$25%#u*&GO%1%8_}brLrWYS+Jhj3f~w;Z>1HK+Vn4%-7WyZ)`%UUHzukBHB?vB}N({MDXvASTTWTBs*=PGVgU^opIonM(4c*;%oZd1o6#O0GzJ zgRlE}eGZsBfJ|p>*x>uC^ikUMQGm?XrKyZu@#6!yl_`^Oa%4J554BX{qra|gjO3Yp z_w612joknri+oIFa7_js6)OiQX>n0x{@Nr`^TZ&hSs`~wBP7bLbX7PKU7Vcko$hCA ziqOhH(|xppSt|8DL^t?gcvbVY@o4Bu{bfK_ps?Rim1^~rgA$g4C@zw3`5WXf$)e6w z?p9a$7hafLp3NtX^=N1$^R@pDDV6{|zbP*C|soqdzOwF73=8QY3s?hvuEIESrMcEssmm~t2W5R zwWm(LR7Hkx>gp6G;_C%Dy|Whto`z-{7HJp7rrY2bwfnh#R+5amL4&JER-c0av+xsI%Q|{CY!&V%|u-lg~=L&i5TNK=N?n)+{>_rF6PfENvEiEF& zt!Mxm?LtFP%r8TR_L|~q>GU`jn4y~X4kehOMBr>&UUyuS-AdMhN{0G z%NK{nOMPPsV~3gZ!{*mAJrLLYd0c-z$f7XJgh|_!! zL&12z86COm^*a#RIXZ0Tbi(S>sab=Ixq*jL9>(fwQ={cL=p=D?SGcW|d{Xw&63n2* zYLoQ08f-=EkJRqL-w4ZaD= zAgBM)9;r+PmQnEoHI>*z`dP&)QX>gGimXF^BZ(C`r_dW|a+$nq#|TnTFJD=kJO_M; z(x`imGK@V_)N@eW5R44ui|WceV3$t|-k$|Imi|@E=W1fH{Y7SWUC!u70K4{sZ9qX; z><3ta*K(L+wrnLLU}feReiI{crp61NfS^}T7p_M_rR2Zgc9*gvJrvY-@PbzI=Js*K zz$-s6*srd}dv3_)c(Iw}h(i5Xq&9x|9Hog2BXp^-YY?#({n%U=|nWWaP%(qw5x{IG%b4^nl1YXe6| zraw#-Y+0+nenRnWrbJgT3WD{2+f@r6X%(iurm#R&2x%~Y@txhJDz9;7;SQ<~@JU{lMqWmK=g-jC6Zx8uRV) zL950mokQAw1;>Qk){+Gf*YA4i#mh<-+aS`Y5Bqj{?2*$rZa$K_7B-|z!!&11%~Co1 zm8;d54{rURVLDyHL8HdZw<05}G!@`i4?borPVL0Ml~E$yn0Ugg800-G%x5cY3V`!AWl{|1PbcPa7d@N3~orl_bd=c!%9ihokb z>w<44L3^iTogli)N~G0{FAr?iMyFKm+41-au3C$O%h^z-k^7}z#!bCA$R9J}2nJCr?k44V*D6D<^$n#eqztM3uO`-)2^hrPxMe9GJUK7%T^4hACj&{PD8~G|5CTB-b)Wos$Shxu*tmp zexeyh%r}U#(!$=1rkSCT{oQrbiDXH;9K=v|ACg*Rvi~d2v+imNd z*ouNQaYdTe!e$j|%lPV4*x^uL&D~GGZt*1bY&?u+@0+hF_v4*$(tNMr%3&^O=kXN7 z*@n;pG(C0t==4MBuSbh;M~F-x>srDYfY2os+AU2%{29G66+)dXX!8}GXyW`0%ouGA z=!O&KE8DwTGPWPA9shb}tc3F$rIxtFbv`ZQJ0r8DD~+8~H{!JntMt1cOwhBS3|{Qc z5f1*Lj};DhRhnd;Q2H#T%oDTKogw83B<*JbVouvOT^HkVyl&L&T4n6lo>n zSmuY%BgOfK9@Y@h;2oGQ;nt^DzVKM~OGzGJhC^0GNrUF)YZb#^3< zCE&5fn05BhW@56(qY6m90r~Ik#%{tXhz)pDl3HYZE^gpIj!`}>FfWX=3MGznbvncL zRK{7V+su!T#n#QpkU?1E`wS7I6sS_XD$gkDkKi)or4B(P(0c=S@N9(lI@Gt(y5~+T zQ)6~4#ioeaQWIjn8!3OWp#XPq!K{emG^nKJHY1`b#)z?wRZU-}c;3TBzu{NXpZ;=E z4_Vojv>82Ov8TqRWDzlg#uaHbCKpAd3`}W;VDM5FTW&Ugp0uGjj6k?gBYLsqN0n5g zA`f%WTLv}7M89S#oIEM^(2H+XaB^5%QwUd7RJrw<=8AzCo{L)nFHwa5?qV=IZASOe z&hAR_{nverqWM29tq9(3N8etIDGVik;V;M<8v<_4cOKB3K^Hn`1}W&H`ZB)9g5FX% z>beC($zhhPhm|Y2blZ|ShV-4(t*GIo`?P42#i<<+zlO5Yy#^ghhJNCTU<#6)bEk}} zfM}{CWPAR-dS4Ix6{5ZrK7Mj0AWDi_rB!03;=Veb%V65Klb**|_Q{#fH_Q&L$Lr$M zf$uE2)UY=WBSs_J!wp3wR%fALBZ7 zqxJ^A!qDXS=_((MyzVQe6LZ^a3Y~X7iucBnU4#Uo2;W6|5pEC@-S^rNl%Kg0P}Btn zNkhEuA;<2gIsHT8%%w{((cz~0L+ce(dirah!%a1AgU_AkjYg{@M`}Vg4YTW}f4hlS z>}@W!?l**XuaEY816EccbH9}G`6ykpQnz-gzmm&LycYsulB519O0L9NizPBjMMf2J zWSdwR7a5abNQWPoXRy&Eh_g1{Kse{yj?bDu@zH-~d$A1Lii<{0DE)yP2jUoXqI|iI zzo^6NL)?wCXS!pj;8!+{2pZVr5Xh=v!U)8+m!j^Exb-W#S^ z2T%5)Z?4;(`&Jh~o;zN+QVT-v=mb0$C(JZa>?+0gd?dv(KC3;oFT@$2Xfq-UI?3sC z`Lz9)2N0#oeM^j_DHZjo&pTHApTBM55_cXL-itluq@-9}_0-EMIVDO@f#oVteW&Z) zhm^93g68I`%hyPk$7IjmVMr>D6#4i7itef%n=IuEgUhdnRy4hyXYwqJQ=eX#eSLWf z%_u{?7RS|c1>57Q@`+~SXnD9v{()0oDW;HF8H}#toB&v`+S9iF{|7&I#yrHH9QcAZ%f}x*c@;S zvLwxUsUwv9%&hRh>02!)UmDu4g_sLUk(|cY!LLi2*A(zFl+(u^_WzRcHR6M_w`_ea zmr1?0P*7y!l?$b(gWQpZvHbNoJ5Fk@a^{>LuA z(_w9?zP0uH$MDh`!w4YTObGF4)A;0;a)eack8DA88_B&rewtW__>km3aLgCPUT@Ti zu?3EC%3Uk$vf__6fv!+;06z|Jt1wac;QfFOLaZ1>qb=!PHJA~P%L6chLSLKCZ5p^i z>MWs>1x76k%|2@7K^y8k zg!pANcAd#gzQIYYJhx?Sxh=%BF?K`gJ?1q<7QANSdhgMSt_o&sA(y4EQO8kENrl3l zovi|`{%?C|hkSt)^IiI{@QIccousC}_)10d>1x#;pZ|RFjIw$kFfH3Gd5x-pp3}k4 z^Yq! zq+f5UYKJN>x~re?Qk`QJSN+ECYi*YWr7&cf;OG9NOdCK5;`-rlZGN4z50cGS`F7yW z6gSX;H*CQb{5_$-nV&((Y$dDl-WugB;>9Ar5bf(InM+aZ7L-)3OXP(Fy~M1gnyk*l zGHG!4gj7<&s&Lx->l5ojQyXA&AFcUCV3C-%NdrlFxSI^?mOClDc+4MX>TFX33-B8p z#@r~#*4#3km?BK+bV7;bK0*0G8?eTwlQ8Ez?lvQkf)whYkH2)Jh;&qlxtqv=MtQ`v z?oA2Z>TvEbD69(&0l&F3&g#++F>zwB8 zh?iOl*L@p-y(3b0601O(7-SiS$jdIA7l}kWj!AL(a4ey_Bi|r04Lv+eV0*`W{-j=| zA!)W>_=|eUAwdx@sx6UUh3fT}xw~qtIhWB>tx??;ripoBxXLoxL@5hXBUlYzQldQD z2d3kiWgh{7Rn?w;qilJiEHkba`ozdn=^fZ%pWXehA0gv%@2t7;)poHL{F$f7(v7A` zl0P1O3D^~A6>S?T)=e(8C7EEccX(ITe}Yq`(e$o?b-M}2#v17*#J)9S=_t~5slXj# z3~X}rzr2d9Fe1@W#oV&+{e@V2`U;pksO=$+Nd64dIUowIL}>LGH3M6~^yC89F%O1l zHbkUo+&0eaz7lO^@{Su02|)OrKfInuitz~pI~Yd9ca*Wyw{(ca{(=!S#(>t1H@T$& z$i~4+`pyml{R#iD+v=53;bgwszAvK9gM8Mdz+}tBE&XQAmD60#JD`|4$NIWM<_7KH zQ@6;F#r~~&K#9_O@!+{^;>Wi6vNT|oFKm@{5hZZIb4b;rICm8r0_7>M8rV{BLBnOD~J~K$lIqG#uNJq0kVowF5DD* zafDb#dZR0*x_BLggvgmO3;T%?+DRoZFSW!z8z++8cSy7et*u-1V7?-Su_5O~M#Ka@ z6=Xi4nOc=M-E!S{Y`wzg`FIQ`4|@}Q;^F;B@V>$r?^di=!1r=1BQ-^|hoTbgRvj7Y z<|Jgk#>6GW{oH|e2IcyUA%^V0S2Z&}k=q+72;U4rLn8;GMUTB_%<}nxPel)H?Ob8j z0I8p-9}%1{_uk_D&xt3QqO#$l=(-<^s+;wGuBmR+>OX`1u~hjOE~{crs6*DIPbShQ zut#%?j6M>oq$|iqvkF5TkH%FyAw+J-s~Ca^ucw2|*tH-z^Zk|HZMJ;U&#+yxY{qBF z6P0#01*w%ngNXQYc%i3xw3Q=mX@B@2Mp`qt>z|YUFD{ktln??vx>{=9dc)oea_kMv z*u2P{n>{&or-l!b@Pu#?*A??uQ_Jx-yeO{R-bB_RXf>H#e+!eM}Zk?c_) zz)=`RN@f+rI9PBv@J&hHvma0auG@-4ey-x2|BtDSWj~wv&u6Ty7D`G$-OcdnsRWzH zbZJWrf=rvyz7WFVN{fSEpZRf*p!$yuLY}i)pK8RqM2Lriuq7A*DSB*#57d$De>*N2 zU8y&1ovBK;m?gaCOsX+qJRZWI0l)Apu>0iDnj5L^{gyApwtQS9@cEYIksT&cwmMGA z(n*W{x4AO>m(K9Y%9hs07f35+UtZ;XS8#t5@ScvPgq}|Q2g2HQWNlH=?%0xi-JfD~ zb_AjWhm9~9ItPI>g`rc4IiA#l38BxY8}+u_>Y+F;xMvz~WKNnIiXJwmoYFuo_hpi- z{oL7i1)+jI3LjL?4RG+?rH>SKUjg`n^=9Est+AmSIzF;E%u_FUJ9b1pcV(BBVqjc= z-4GmwgKlc99c!VNS~By8D#EO+ILn1Z(&l~1xcf}cIPD30Wmi*`YL*kb9`eRqt6gnM zPP0!^Dwb5eyYTkL>DS>f=Ez=MuFD_0X4MbE{H43CI+D7bmke~T@Fz@sGrTTU5r__2 z%vUQ?jnduNv@!Q_w`D;g;MjAOtAbx-Z2Q%+Dx}W>ij2`P{81hWs1yudue74!lJzI= zH%7SxC4b4!Dl$R4Cscj>iqt)Hnx%ICU5+;wHilrVTx}dqwTIWY={j!QeL~NvOYPCj&wkxgDff$Dk=Kb@kgkVlg9DHJ`ut}VEb1qf zt%^Hm=e2fF#T{f$-5HM@y5)L33?y^(Irc@p36A)25%DsQ;actIw9@>mC1jr7r|!}2 zYM=w%b3W5X;n*axkA#|$E+{vP(r_c|;>vQ0KE%$fGK!>fZ${j;XAC7o;&DPzHg-jy z$!5%92-#iNY20{K|Lc8(GubMVBVDY1HmWehV%&pwWrbVm6KzYfJ^!K3Gvnt{;?}a! z4Ct4t0!}%r5Bfrk7UC4@in`d^qm6Ap>CMf+F?%wqc?{7v<3gi$9_}s8{Vm-|w&KQ% zLqAZjgbgB(ixY>1>q?4~STY7NYR1!C-L0L93N5_N8i7W_1AX-hwqKu&&Im6X#|9rsR&0AuA z()*jn+XMbCbD|{b;0Urz_0NYMM*RqqX2ag_uTLM5+q0W$`9vAFyMB#i*EHN7bkRI& z2%Ta@EXy=vP~_c|*e-RNS%RHe7Kl{viS;@k%O^yBxN;w|xCrGaOY}-^|LVi* z(d$shR1helA_6YJXp-Hw zD4|olxEX}1K|^Qulx&vDRIg2LL5yz`g_3fVrN_Ep8y~DEmYj+$Q6gRQ5o7yBg2`BP z0;U`8a{*wPMTIuqctI$Tn5J1IwDW5yFqN|2Vhaj0TD9f7C#hVlO?$ZU!G}H!Z!20p zX1VH?^K7-;dFc751j&}SD^?N{7wh`0Io+1Q5L$ba6k+&d4_*);$}}Uz{mz;ZMv+C zQm&a~PDP4U1TiERJ??m_=Wzgdmftnh@t~+Eipl>x2n06fDFuX7sp?%Svl6AYjHp}NU) zclWF!y!!}u9ccNyI?U3O&=aBLk0nVd?0y7Dw2nN;ie|9;v8eVdqjsE6XXD$~tzyl6 zYAQts;K(Pe5&U<7PP4F4+9qUvEO87~30~#>_Mg-ZDPug8t4YLE6q<|jy%tuMXs~I3 z$n+;x+D)7(=uE}WpDihvbtYz6Rnjk3rq(o(h=EkD-`w?@&lN*8)0a&`X!dI$a-~h; z&O7tz#~XyC&f!uEtt9&|96b#UgI^Vxy#4!7vKIFr%JeF8Gewnpy4J*}Dd;XKx8G8> z^m%1xM%ItSq)o{x85++7o;Ai+zCb7m4>Q`1#y#i;GoA}0#jiN_`4F1GKGm1ln-pQt zY~n9Brcv_q)9AF}Xocg4RKpWq6wB2CLNZ(OZau9@7QUarUUC6h$j_kIqxJVs8p{{4 z3;>97%KM^Ifs(cxQFf>|k4G>BaC?4I)k5Z~*J-vcg7y%V_b>pLTKB#h--#B(*YM^R z{{TMVP(PhOJLc^-$HrmOu#J6IWkTZu{;gSj`iQhR5D zPp^n{vA1Hd$#{CJQy`Rn$=k0@xVp}_(`-gf%|SLvvTmR!Ayh3WUaX91ht)Sc!}0)j zq8b0kpz&Y!)Ri`F+Tf|1Wf$~BcA`fOK4vd?1$IYE_O-|qV+B?xPnwvAknP>66JLmB z|N0sh9N$U@hhLF*>*1cDhzzp~?LB=XIQ!SJ)*M}Dhjl~nDawOJ4W;ZbXMZIPKT7FM zb<^mDWiX*{$^LQ<|Mt-}@1G@qjzHC^?Y#J3Kky(r&MI<2T0KEyy1Zi1e%iOT8A4?h zxTk7TJ5E|4Cc~7wkU>Pej6|>00OByc`HvTw;k&AlXwRx1c?($H~7 zC>%MGu{`be&)O@oBP83Rzbo)SB>vTFhj9t}+7;W8t+ou4fs|VzQs{T(Vf>Qi3(b-- z&LJZ&jTA+caTT3(Wx0g567mp-1&jI; z9nJAMkg@YWaNk$lk?J76%~TYAfIp4O)L)Vn2+t8zV}kA-dDaIJ8WnF#WTW13jBRrc zs!ocmTcI>BWD-|oKO;}|XPIPWsoh-&t_{?|iP#QuR~mh3DE>&P2;XPusJT#p~k^;AR zzsro^X;~@g8L8vdlQ#neK1hk{m;1qW(how<(OwfT$}KZg3OS1%f3k--ochC`XS`IR zQjcCc3p366lDY3IJ@{?w{B#|Wx=QhHj!;0X-7%pUB)6Ivt8oBHtZoZi=ThEkzf5j< zg+A`A!q_WowkE8emm)yoqYCrCZeT!V zDyO383d{XP3!h!G@#+Rhou5^{QwB47pC<2~ro&i|H)1;UH}fp94VUC@?}--L<|$dy zS`v`k##tv*P6r4yOi9-?qk2~Jv`xIbM^H+6SwB(0Vb8kXYdP3!5V&3H0{0(mekJa{ zMsT~~-ccz*z2>OWH|B~GU(-}(Df z_}cD+mFBl=TN=aIKpp+6DDpw1{zsUcN)0VqNV`4PHakjcQvOZ{*dTt@X_4(19T#47 zOIZTJmXw#XHbM;lD*;vP<$@e0O}u>x!L0jn@e19IiPY&n{w8Xn5J&zyYnO^)LqCh+ z)YPpV4>+TTAtk>yS+y4BB7rK{k^K$1es%dxe&BYiqFd2wGavY{Xye z_@CH^&8dI~(ghxsTf0&P;Aio3n2~OS4!`Y)$neu%3D-`kn^@S;>SztodPhpx3j0j&*uOAG}_x-mHu-<8GOcC3t0MGwY$P({biPb1E>jH<{S zI91T+7f73TmC77BPu9!q{NPx8qjBB_0_* z9Vau$-tT%93WObQs)qQ$d-jgtmgLnx-xlI~XB=|bQeNEgWo6F$OQ!}&KRrK3AmS}j z^!{nA)zhN<5jzDK~g&NY&S~m#I7>?Y=nn z&{ft{+G6dvUo8)U%G05 zO}L+K95(%U&+ZTRP1s6L!g+d_fq`%$^1~y#L2c<@4t6y438%x@?N&?kAJs_BRgt#C*M6~ySdKz(w|9q|cmMmlYSZ8qQRN1(=7 z5?O3iY)U&EIb-dgfqW$ctiW8=kMxh(Fmq+ObP23&@jtso{yis8{eRH+-VW`cM)u}h z*>nZQpE2||(x#e?;GEOO?zTA3gN8{g8K#IY0cwM2*z|2tI)Yw&{lFu9seL^WPM#Ry zPi4njju}a^>ReJR##`=FC@t|xi{@n51Fa8+u^(~r;MEM!Kt4WZB|07JH^YdZIVSeu z&>r2kQBH}Ct!Zby3A)TvWMo)PRBQd#b$2^k@CO4%FI$Qfv{AwHH&T+K?A^Q0kH_4; z>-t0Y`h=F9uu$T&WRn9K2Pe{3B0Hl&IQWM}nolsMBd$iRNXi8;e;ZS$U%Qa-X?*Ut z>f`jO8SN?Q;0XUFw(Ro&d)jnYd)`~hZOWDC$xv@r$=I&f9q`0E2IKXY3xl$FDtpt! z>tuj02Mk;h5?5ZkAMPl+`L4ZDX6nmOGt5Aw`p{o28X?e7ScdTl4q3P83OXIjjUgQV4;H})1Xa;B6KX6Tpet&=OjXe^18l@!LU??J|>Srr#1${U14*v(v zMc&i=3Qnud=&NMUECZQJA@2A0249_0CX1MDCsVo&U#J$+lAG0!=indfEv(b zw6srr4CEOn+ue_@MfA_R!T*RS+&CJ3cWQ)3Jc@7}2^Jui5E@tudhWi3I4XK&H$W-V zo9xPveg;xIEg4f+=!PVtmc1T4kvJ@!w9Ipv6Zl{5{*w%kwc8fv+a*NzK`{3exr=w%D zoJ+dTtsLs;Kh+T7EVK*xBIht#6t?Bc{r;tH4{|)FD191ZE3&2#tERgcj55V3YAmDS zXF#WM^dsrowxiUxU=MHD{;ir76qKS5tB_LnUc9r>VEqmjSxL3|<~+AryJ96U9~xnG z7qy(inO%{a9)5$rnL~FuQ38^@tKkovk6Tl{CiBeLiz zIdSy7Upw*i1TI8ut)AMtXoe02kgj*?i7ww(TTH(Q7n|qO%5ctb$k98{BAHwJ5o!30ux5E~%2 zogm=-lFW~vl#(*tB`?=$R0AKU=Wwnbl3;|B%Jbt)$WgK5`*lP_C|~2InX8k^vNW%U zUA&qbEVEP+F%=HsEi(|?DL5lub4Qq|KNV}qknS@!oN&nPZ6$kGfUo&}J0+bEX+j3S z((=qgz=CTCovyWhXjxbR=_@hokYV>`Tz(0&2J&s@1L-44CyPHn4Epp^o}7qcHg%(j zQC^%@E{OLN(`keBXCvhW%}>-ref&t7xrF??8Ny+dh9N_WGq0%M*Wui%|Kcw8wh5A;;j0dMg2<}-6!B}z989*Q;-=DIlmX+o zwC6dmCb#2yzTkKZHhUHNHCa?H%M^%TRU2%Yp)7*kog_*3@Sknm%q+a@=s^FXA(d%J z!39>bNpW9dL%NphG=H?-2M~I)TQD@xVs>1mcXyMwjQjTVQGAm8qiGFU(I20Fddv-V za0_+pV>GG*yW2wNJ>8@tUPK=tFbdIV-2!p7^A7yC+}i~BS&KoMrNw0u>~QBU7&fK! zP?dJN88O5_mQoTS|7md#0;#;Km$7gTV%RUK<%Vk5C9H)b> zWKR0>t4k)_9b?%6OKJrStG~Qs#RrI@5f>~ftmF2{sXr6ue;-4FRRvp z5Ll>NK|YciO}=LDq(L(a+G|BSSyjA`{R;W<`-?ygYCR?BgU5lKK(4W@qg#uNj(DFh z`%>?HYExYsvUDT>;cAnjxf-q+Xg-IF@VwaZqSq3;Tbc3oyMK+67mNo{tw&lKLaH%p z$A{Gd6_wpjtBmd|DZrcwXn&^`l#qL<)Mxxly>tFkq6++{P|bu{rpJ%-$a| zS(8aJnas1+dhYwauHRKZOsM2pg%3xfjz_Ssz{_V&QlSHbId1ZkJ;B;4B#tS8_ioru z3uVsDGzYW6@59z&qN?%3d-n54ru4D`Y;0^7pV9A(9@1ZPz4(H8yeJjNkZY~@Q5l5N zbK07#+E1WL>weKpJME2bmFS5{6>~fj4fS5G zlW8hEB|mojok-~JSzcM~h`8A^9;+6b}muD0~Tm46lzled=(lCX7S47&Ma+MNKzy^_m~=I z>*wHYriucsNGpM_##a-z26a;(>e|99X;S^CI0Tjh02C}BTsytYNjcRfb;_|($ym{0 zI&W9~$%Mvj-}gCPaxt*=U??*)jJo*Y2X6dqrGZXgz#eDDhi5K~xXj=R#JX7gQv*CP zy$cZmjYojm$d-479YXcO{=?XB~OGh=uvIY?H^Dvu*T>T5C(|yw4K%dmIZSA5}GXBV}m%RF#Ey2~2z*JN&frA*Hm*N_C=UXBcMg0RN3#o^-ZMpa(hgWZQV? zzWJcw>+xLX*Nj5vk0U7$fFff2d3G zP)=>?d}*vzzp^=7c<6bCdiV}s!Q)G3B~I-bE>tEvT2sz{QTF7y?ura#J=O#2E9xTl zqq&f~Y+@f6{jBu2xC{)>FUS5=E| zh&hD~Tk}M|oyN+#;xmpg{H{WK2!_smxy-66v@LI14yjs`NYVNSaERNGd(0f%Acq-N zk(0n{ahYbW2&(T}g5R#ap0s1QI2N&F?C*0F2~tsN_@)Y)lJ4xJRrMdGQQ36~xI0r~sl72`b9quD$~^#TI?R@!U5 zh2w?HGA)DI0G$&jCzw0@q&?^fr>glyawi_+flDs#xu36Qg)#Pvl$a1X2B5>|%ot7i zLj++8MR}-e?pFb-3ZKqGkI#4;LAKtYEB(j8caO0Nr5*U#a@Wu-G1lJtj}tcelUWS7 zeV_BxG9R*YS3Po=pkaZ0P|JUSA7>TgIEL&R7_M@IyrB=lpyBsYvDF8=+i8=_YwX`q zrmcfjeuUID8FnaxJO9QIFaz^mNniAFxt)1{<^qpEAqp|h+2HHri!0D$RsYGekb0Ol&oV8{+HVB9?8t)SbBEli6sV6&gQT=UO|zp89dzO%bE? z#Nuq9RkSZz`*bHGEr|JDIX>eu$D}VfpB4pf*{r zZZ<@#hN59|4-G%@_53^JGn?Xg{=MCa1d9;HWw!TbAkWCeTc^a^%-@jqmUwCGhKK|} z;oP&ELM7M(#u{P5_@A8<#LzQcQvhbNUH^2`ms|c0_87 zG%onuDp^tq9rIqthNuBT@I4GL0lv-vyfcWl&FRbVfb{?ufs6iFk6J~{tKBwgQM_5T z|Lscmiw&@DzW#>TswFYHUEc)-nHEIjH=GCOxtW_uo4cptfKvnrCnr36oF5@iWX!mC zbRc-FMd6mcd*@DR?=J&Ms#VSDGvLM4+nY~Pm;D-pwAGf}{dO*7SxKx3MdE^J}*%e$8o zQMBeHeT@%G(jPU*Q!UR~=Gt+g@yby6%G}CQ+xrfVB-|f653S^PmE&5{-ojGvjAnDU zJ5+ZqS5@-FO07hQ3p9$Ag>4Hn8dve9_(9 zz)t(re0Fjy#V$#D=f=+xkv95O z@hH()*yGgxjt6aIKFN`#dE;T82yVanBqz@}wxuauBzMGJAte<4nJ#Z!1c{yFwlZVR3b!<8|Agc4 zCUfo&fAOAWqNo`k46RKnAf#heyJuDMcXtMrh=ms0^3V>OZlqC^_kLt(Pwo5H%}eA0 ziK_@It{_2eGhmCJkXBQh6yUqQz?oXD^gdxi8=fDW_85^rpHnJ}RAjDQm(BX_Qh_-o zo$h(YR|4FaIN$wxTnddcw*=XzX>*iEJuKctJ|`ho57#+KTF$IUT8SaIsdObM-ExGi6m34i6{>=*LT?mXjoY z&hs;`1V0z?U@un0gq@PLzTb$dvbR%{h`d^eTvHzrZE)oW^M1^Xcbkw+^7@cH(Qffo zN?JT-VTKe1Y?9|g!azV4{>xM!3Y&%ynaj5C%;CeqD{bCUMc9qAO>zdI$s}hL(YA<` z4o=p?Q4h|Y$P-=<>4AEC_uqC-?@A4)OkYlxZ>Zn~^14BBCB!X>)p9umZi~L)>ik{c zHZb9D<)m|8Cqn!D?`Mw(8=4bx3zK27-p-Q;WXy(~WT3M%sdP<9<4m0WL2=bNy$iuR zx-W>Ti`p~B@Ur&u_ouV)2D*Ec1_%X*&b<$j`n-iJbLxjkXNpCXA(_UxpW>Hc_^CoT zSlHt`92Dx9P=ozC{L9`dyLW#N#+Ny+atqv3MsO_*%Q>;lW&f;YIo}xwrhyV=nWf6o zRJxsBrg)N^K3CKyn_DvCtaN3xXJL3TP?S}7OmDWT9Z!&zjGDaUEym03(a+tUpn~Lq z2r~%J6_m;%(t6=iiW`}`e3~CE%AbGj`w7jrwEkQ;_dZ&Y^HMC)618GXOfyFK@y% zbab3Ik?Kknp^+I`=1@~rKPiVor{{R^L*D>~gL=uz=W(?*6-?qwNyT~kC z2-P`hvA;|o-kFs0NUm6NnwYwg^$tC|@R@9)SXx>AGeW!6X+%7tldW(z<)~%pd#b5*?a+1_3XAP2ywOD!B$b z_niwNT3;16|3qzTOOdsX-y1I593iel@bIe;%+Jw0hSqANR3WEU^UfbVqH+JYkWAud z!Pi^2YQ~@uWLQc6;iHLwdD^Z6%9Zv7wHFJ0$s>UusB#;H;Yr4Jty6e{Sy_Q>^McQ0x zg@^l&mOJdg!#2}!h2Ie1FBmtgslZK^F%D~Je}L1~rOXhFaEq5nxyLLi!3yop$TDT@ z)3gwIu^a*oljssD{z=5kvakq7jKEGzt3d?Zp6BTS%2i+ zxsT2(fcwGmFNldoIC7VlJ%}`=egIzzzC=hK3#FuDI?p(`h0?((l$??jfdbcaV3FJ2s8CY6v6SiFMbkNS?tD;j*>NS6SY%Vb3ja*r0PHblo z``-RwiV~LQf+tL|ZC<&e$g`jLfz0&VI52prn?m5kSo|oG5GSaWd49yvJ=T#?mLP(J zEGxdi-wB(5l;H7L0wkiHcuP{L&+3GhE$Cq=z`_l_{|BI*cp-xFi9_ARtP=++mL3&2 zZ-VZXC32M-rXH@hFe|&57PyfJ`ThZXs^#*-+$Xt0bpnMS=+a}Jy26Z4(@`@pXg|o3 zzcdR|eGvkSg>|x>h?2Jm62*VoB}38tRS8xBkcZyaL3nO7E=v`6IGL7LdM=zwyrhTB zYT^~Te%e+V*Qq*ev@=1`4rOAu7VQ}wZGMi{V~B<^`i%t$Tca?3 zWm5B1&IYi6BOv5gm_EM@JWZ3fJ5~neJ!bv^GH!^=dA0k!iO5PMZ}kV>VqO%xYFg*m zcB{L>H=BQ?daubiLi+vOct!(q zRpaPooXj^ve8?i9aEnxWFYC~uKA8IzSxqpo1y#fBn>wQq-HBdkgJJUr$&SH0L9<$Z@2B#*#0>4+kF;;$@+J!ewn1Ud(XaB79yu;P%yva7*-U z_UN^)Au{}oy!0yeHW^}Y!@+{70aZa_7I<5=>-ml|EHm9ym^E+6GuTg$*X1`cksJ zFWp#v+s!9xj+ukC$G#Hu2b}PIa6<*Sr6rGLcticXL~1^WG0xw8j`UsKwaDXD4JJR? zOs3M^ek%v;VoV2Gmf%WMD&zNhHZEu0Fi*r-!g&`-o$ut}yKoAW0FYt;-hVh-^k4!m zW-0pGbgB-NFV4;i+7W4KHc`!YJ5l$_v8YT*11bJ($`d(}a|D|+5;St?qy0?|FJ0dY z-4;$Y?Da!k3TY#dW3II)ij$E74gI3@^F8559V1)FHCR}pgCmjXi67$yZh!ZJDSW8T zg_|d5{i3Rfezl;I089vlBX~r*vzjKms1GFx*;hIGL^qXDkVRLg5f|~C;^VuuHGg}X zt3Y5MlNyFx2*Jk}cm@(&Vx)6a{wdk~y10b16%U#1Fj$ObTQhI4{C4;b!<=fel}W9= zLAbn+@(;S&>H&8=*0QaHWyhbdqSpzsVzKVY{so%968iY3$y0&iR;09_(4Lex<;#xlALw zVZ2uoqSS)5*ygca?VtUmoAkMfH>-q66GsgV88>hO?~)SS@f#jzo{Rsp6TB(KZpZh< zc3y>$rJ?;ZiIn7r))2w@ui%gHC`kO|?q&7r3I@f08>>r(CCLwdpJ)>W$IYLjF?OaA zF~dZP39i5YMlmuS|3{(xKLzvu>vPb5V86@PjNioP{sEfpU4eKt|T0 zpC8`eSWkWGqbjN5hDN@G<^2Oxnsu;V^*@8I#fXE3<;0%$S#HSjm&?W|hjvuL=d58v z5Wk(SAEegbE2=QFe8ZsqlUzN?QBTVK%TDRf!Y8f9eL-PwWQu-+poOArtfJyXme;fV zFaSWiSR{hWh~aW4e))Z$(LMO#<>cgoyfDZi7am5eL%VncJ-pEmDDHHGOM6R^uXd0f z1s=YNg;%Hi1Js!tf%dXW8Lw^qJR0s=c0obCxMki=GVQPV{>C5gn#zsn2-m4XOvDq) zh<6eg;Lu8hz9YDBoh<$X^z!`iR=?|qb<4NfxT~w{FL%qxabFj@(~APbJSrsy?tJ4q zo=pyMZSL?^8&iES_ijdu9N1a?ya@S;Nhq-Fo4O#}6V7)_(^wgtqhLYFf$U-Rgrst( zXSeAr+}U<=v(lCJ%8t}!eU=r8wXsx68`s|1JMguw(dq6RPtb_4yOJf=M2Z(=}3JYYN z4B8S}h><3CDT+4HJQ1?*ERJGdETKm}@cDl8I5Wt#6tqpmhSJ)*O)-5rY9 z-?BEnE>lKdiDh%SuZwm)>^;_&q}?>DJS+K}!O&`#8M@hvj8pjgbLW3FY$x~JQbh(7 zKP5y^3Hb=n(pFdQpSA3}w#7v8n|ETI8h(Ar$G8fxw>1-g&pl4`Ty&z_2*Yb;hXKd& z<65>W4D@7v(KsF*soe3#?MEDbGYyMSFwgSU0!d8}Wu3E&ngGu2Q=*tqm*ts#ccKny zWJwWpdfb{Zu(n(1K{MGkC0?%c?+pci{Q_vj@I8R@Ee5?>D=$n4OJoRHl76Qc{!WG< zKh0UDWAD~~Z{(jsEXyvVm|v>y^m~JjkQ{cDY-I;)6DYBCV`yy~$n>2*ZPzCVeV>@u z=FU#sQ1aE8id*42r`FZjOO`V#oJWsiQw9l~9% zFo~prW&fjvIHR*+H35~rxoqYw0Tyx%!EV`*ypabxZ3ZZaj_RS@(ACPG6qUkN!Qam*eGSdS(>8r5p)Nx-q-?crmV@>hYzxysgKsoL*c7Kv9tHbZmn@&TU^JaG{y&;gtQ*V=5?WHd-t zbnnxs=7Cx-OKKvgnE@p~)@@ctzlg+o%lSSp=Oh)3)-y}#&LQ9h6z6d5VTR?w&n;I7 zI~4gl2d@F;zPa{ON~2*D-WxV|-}v%M3} z?dJ`i#`aBh&=Tp%^diVta2V%|IvsIp7T6IGR?qW_e5KS>({F`pYbK$38^VBJwd})i zfuTI5HW3Y!)Nu%^OxCOXWZncywxG2x&+qYa^>YTuHtGAD4oDfST=~JHcmaafs(x*R zd6q7y+*^i$Dn8x93q=jrDoKkYBc&BYpkR6_HKEY>J?*pJQqwjK!!(F%MMoi+!3{W# zb<-MAgq4b%32eX#z2;LA()#?PU9<3zum|b@|07+!5{fN@#0JW&K9HIgdG*Lia$*a(;$?fC?jbSu>riPv7}m1U4CsE-gm%ELlb?c4YJ&uyIeCQepRxChi10dW z{r>^D4!u2^P_rqc{+PH3@iMb$6Z{CnD`q??em1Sw=M=LKKCC{=9c5v&Fj;W^op*!q8?@4R9*~- zB3|Cdd;*DlrM94D;e_k(KDD3d{P05B06|#piUicpv!lkC(k9PChaKF9e79e?pwdQ{ zlJkKi(9{;4KL8Rk`fn612QDIRpxbr4v=8M{>hwimkq)7}Jxh>AcU3VEL*S?b{|r_t z4J>8w`5MP|Vj7M36bwU_-GA+tSaeTe$o|!pC$HcR zF?H@}OD_707Wn0ZCpR|P-~{=Xc<#`BHrOn)UdX-ZL1KHdZtHpA@*wuNrM#;-7mh+G zGWrGrmaF~ZZ7!C#^0rb^5?4V#9bWP`m(5pS=#k<3GJR~s@QFWDvS%M#jg)Wr59j*K zEub0XYph?_bE4i91B>kQPe=nBEGh1?IVUi_<7k9P!L0KOpYgoSC80(cqKOsy<(Ep~ zsMJmd_v(vLk8&rHP)M)Hmt$Y^%n1SF7*h~u(Qgl8>P?TTfJ?Cf%=ZIOI?T79f&4a9 z4;}M2w6|5XBuNW@JDY<2Z@^9ZRYpTImaCG=1aCc{xjPR}VA|}>#i74G)c7uN;f09y zxRc`g^Gyt1ckJ9Z>0;Gu_*2;RCr8x&ZdPYj!y-|FiI_Sj1k?Wy)cMYR?)Je7B1{|t zQ8#F5Kw#ySOb6pdc=Ox^h5CE^c^HL^&lOVo!tM923MQW&8RsrnrWV^kW^H{d z+S4E^$;^2o zg#KZuuw46EJ6MyXy*~xd{ajUFzGAk@F>RY=jLH#Tbg#7UBhp}r-Qw$wN-0$zIciO@X$Ew zSsfVxlfrhMKi5dcSj#fUV2iS7X#8N!#faiQPYlv-ZIat7 zOJmsisrQ2B!l_kp^LJu1)5y)6w+4giCcLjn-*wCxkU{D!C?wAz91pYo;1bmA6z-m8 zDsU@V&EQuyOZ7cVa^GT}n}hBp1(4-5?Zbe7pQ{DPxW4rK(|t&ci67dN{^qyRJEGE3 z4c6~&;C3b0t->wwmYk%}7CYmHjUDkL(;*Yfb>97VSVaK%zx06r4>E9hw)2~KT~KpS zs=by}+hDS*v|%Jc0adJT*5cHr@=UG>77W0*-^l^HF-8SJIE>O~;wu$>iekie>2hz! zwKq2BH0KQC2RE1bsUpR$TyltDQoEC&17i(;3n)^?X6~|EZznGlV|zosditDDO2u$I zi35#Y#C|U*(jaiUlbGt}*(nGE0yZX}+d?YXVf2^ShUfeF*|B2a?~&(hurnEp6Q0Zt z#nbnpBDsgtr;xI2ekD%Z-l>#V`41H%iA=q~=vevZwY0Y)#2~2scJ2^*#%Umy4ZP$W z$;O%Q3Kfe#kVEZT(HO$+RmPOD+@GyQe}YS;ZAE*zPEmGfO!2hHb4X)a%j7|{i&_5x zX2T;q!PrCkc0?IZtu1hbGCV&*95MX+l{unDTuFrMYI2gReL5^I14`DPpx60xOK706 z18v4|)r^?-M=nSjv;KP?U0e~tj68EeWD!;6$XBeXR1xEcoQo@gkD1_pEm*pp2eWRa zpYCVL+Q(xX#Zo&`7^pg@grm8&xg~ZlIRTXo8*rn#=t*$jO8zzSHSwiv^SKIZYOBJE zg7QSIae*Pi#((6DAWWB4R5i&}+V{DOI9Mv^yW4}dG>%4*fNCmLaFJ7X48K#Vo?JbP z&lmKkb`qMV|{V}9}7gZ?;faueI z{c#)}jSqb~hvmX`a1EtshAP5p;?=brqRV$*>k~T?wXU+)L&EHfW^=uh9MH!i@%?MA zp3aBXA24ykZ*q*NkF`l2qTd{+JRPELES-vS$!bYbWZe^`eqs2!SCIMn(Bo0RLQ)om z;#@_F(?0zn3uca!5ssp1PGXu?A}>o!+o39On^gYOz}=djCo{uao(YA-Xn<=K9&u=& z;N$mXq@RSw@cm#WbKnU`U!x_5$KcU~JjX5T4#Bg0ucV!30_9U-+y{|9t{+z@f!eut znWr=!>(IxrtONey-&Qu1vzCZ6Q=_syHwP6q62;4>xP%-#ae~&+O4~My>3Iu13+jlT z(HjEq0bw0yF4Wfzy(D^V2HT_fAs#b}Ro(-h5tdsav-;_1Shbu3RFaA- zzuL*{#||QiM&vznB}c;Kb-q?bI`e7f^Ckf{TuBzr^(BA>t(enpWiI3o0mn<4>M`3X zh4`CS0n9DgKX#e&U>Zq-93vq$?w(p?Wusfhm@^tDjUZfp9u!8etE3-u75jcRD-GVI0Ag!S^iJlr zq|#@_FZ52poos&>@-En6xhx{i@{A6q%i@prYVC943s#L8Fp%`mNu;1K(2cu83}nFE zMlM_0?c9a<$9#U$8lE=Eb{E2-C~K3AhrQ0)U8-(KFwHAjrAS#VGB<7x3mWmySx|}W zQsv-73=i|o8Z%VsbDk(m^PI5trL($z8-3b|%;zK*1^u8_-!q;Ys926nSOyx`iC?1JC4L z0*3E+&bQt4h)3CU>R(HjDm#k?_g+L|M}e}Ia~(*xF*!BY$~S!Nnwui%1K=R*c^Y)) ztRX=RLEq~`zo^Gr7}o?k)bpw_m0u5ORd3w>nv8pk@%z&Lw7Rpw=opOtOOIqn7dHZ} zdYC89I7hQD@ci#V5%{xn;^Bcmmo)jBopMr>Yz5U+%icQXL;BVQ*-}8-nHqZSH)m&F ztZiS%DTAmjf9FdRR7=PUiEvtzYzE#kYn9l|I7|>OCv1CZftqP>=}=|uMdpn2;ZYVo zDYDpku-5-G)KBkn4|0dyk3uIzhVG(KMgt41`{fsPS3M6(2VePYv{SyN9mgjRnvuoN zH|Jz`rIiN>Rj7ZwR36ASmTrU5?fixkMsQ;QO?cV0@N@eJZnsqhtj?zmGKd3NCESWL zDG^A`pMrXg-an)l47tY?L?vU+(>WWicWv7DoSuo&_v7M!*H-<#UbImtyr?r1+_ZovP2 z$i$)GP5MW}i;a;yXf1J3bJnL~desn)<<7+irhsyTqxZ`ve3ZbZrTQd!z|!WM<|g#; zPEYe})w3kFZuonSOKCrx(aB{Bv>0wxDhQTX2B}(TPRA)V*%*Ei3N>feEjP;z>fu@$ zhA-ky2`%^(=o+|1p$j!0NZPo}Fnj_A9}W0OFiA~{k=`mtD`^jUNst3y%Epv)IjE&n zrQOzHdwH?{+RKamvX3fzqn8eVA49A5QX4`x?SVhh(Soi?v-s@nB8N<*iSBA`s378$DUS8dF zr;-EK|Kg6r|80HQghryc8g_%XFJG2M zy_4Kk(Wjn@vH-R7q5Yp@jK}#0wJ-#EN*Dfo2?_mnMOV(TjdnaMtBJ!jZq0MVl2NsdRp-hc)GPx&H?AA%^f5?U0+_Et z)ZE*A_+*(aABWY0bzP;4R+7f>qjvZQ741=ZY6kR}5^~r2%>v7t8RmEr-4JMq&yutl z;l^;A?;9ehzCsMy8H> zlHLTWBNcu<8~+{r2$?GLz-*u4Y270dy58Wq`P`%bgFaf-o2$P0WTnD;^~*}H5S4Pz z=u(yl)vn*xArm77W7QP)8qR+X8~zWE;r;`;TpsK9Caz6&4uX`&dcBGBQJsTRI(d%W z%nhrr1C_5t{hd2E{!3;<^@z&w>PB!BdBHai$#@tNTbBKAkxh8$8}p4AY3QMg$v}}E zdR1PgUzurY^H~0ouK-6t}PC4jlYHqm*Vj5b|Qq`>cUy203-+ zrQU72)t5w(jjvs5seEk*7OGBU8PFMEl43yoR#&_rL*JU#&{llJjeh1U=Q97zGg6ZV zm%%#B%MH*4;(QvRCWX4K&UTV&_4Zz!?82@=Tc>U4KVqSCd*P=IYdIo+{h1*S{_?*c zG6$C{liXjCq~>`tiGL}Wp#=faUC@PSk(j^i`e~Q)oqhBvJzA%qxm6VLk8bj=ug#^_ zx5a1DHeuKGF#%2~`<`-ViiJ|4zG%ATpJ+F6*>^I}!|L-{O}||F1A9jm8uf6v;l3n6 zKP}~f39dWO%Z(p>Cpn}6+S=*wCkRrm^5~V^0B9{o&(GJ5I>k8#}En^jFFb z0+#f+h_Ck2&ZT#jhK(Hl>9Ut${Et!KF9({+16BTk3ctfCL1;t&P`_UNw~}xxVK0Hf zAid4MHP0HgCV#_^n$L)Na*I~&NK>wj?I$hTn%!tAmqfbImQXQVxxK~O4X6qNz<8Mj zvP@Lb=2>={$@;wJeRxBkGifO@qk?nzFZ7`8gYV?RV}CA7t4HaWMyyse@iV}8=`{!_ z#@gaOPCvO%7#z5PSkleHOaIpVzjfV@PtC&_bfkN39`#SX;}&(Spf`Zj@PrAdOm z3Qkdzf;wfBv-d3LC7!4TpvQQ$n4c!}=6p+L8ziy$eN|woN(FoLBJFt~RQ)A_R($40IL(*B@MWkko_AL_KB18toN1rVyrop9bgWn3W1G{&~TraiZw#CPn?VC;9t zrVCog7yeSNHDN36pM2br@Y4bI@=hvI1y=y|xsuYaX}dt5Bs?p_a{lcKA5JF>a>Sbf zYD#Og#rr{997hKy^S4{hZ#nEcW2OnPs9zX}k%l8Ud`Mi~AraN_xo+{U*yu~HaQ18I zR&@oiahDTOMFUHKxFm!cF}oMrH!q+>*H$aYu`@xMa<)%INd(c``R9wDUm0>;t+}(E zGNKfn=^IQW$jEY)8gW>TSQ+_+s9J1}<9{Rb$Cew}$K+viVyGZZr( zf9fQT24++gu}$oN8^i~!bzk=@&>lw4r&_}t@qen`B}kitF5F*r&siA+`m3cTV3~3u zu<@fPg`H(fbCWJ&3;cwXL}DVW&wgO!$%j(*agCx`K|YTjq9zuR`+h1a=e$ovF`Y0U z?~&M~KX{N|Si3$yxSOWa_7W0)bAjnIIFWCi6?^({K=DweG|Mx>x@6Y;@z3qCRtr~5 zYi@%F`%LYrecw_9O%5S<&E%CfP*qRC8~Bh&fSx73Hdyz-#W`V9@wM$e}K zA$joR>K4XpP16v}O~bEGij-4|kV-s9Gjw`}m%bJJ1AJiFczR6#2l&bZb9SMK z%^y=;qP<=S-n*cR(LTyYXuq%yfsK=4b|WU+3B{^#=`)VihobE^Bxvl$e9#9YyHVc| z`Z~$oH4gv%U71Bu|RK21x?P{1qfL zSRqw4!mf+JSvf1^DllA1gg4~au&16Ni>Q$tb)zJ8TB+ONcIQAs$hm3(I!MN*VY@_e z&F4dWnAD!vTqy~J>eTP6VLP?jyMLIx*#yG##E1(h~M1& zMfa0`C`kK2BcD?V%O+_McrQcYZNf1(n-{hJg}fU-ArJ;jxcf~QHNX%jv-`!+EjzO& z23|Sc;ls|l#pabA-Efjrr-G|o@YGfDcC&uwAH4Hji`eFsQ$=_ZcZj_6d%9YT?N{h$l~!tjJ=M$;&c@@ z5sYyBVgo$~k6_K=_Gh~E&%dD4R!xKps=_V5TIk5{w-Z(-yvV0z*>MsZ(w%9c6?t?G z*1|jPz-eNuuT9KyMH4N7cgnvnlycUWH>C>Ihj(mN;)^HjUla7j@wrNOp+gK}dV^hQ z_S&nio)o2DkVz_2^`6PqW1Bb*G{bv`S|aytIO@c?%gcWmCsYq&&vod7uAa2k>Tn0) zFr?KVG>;rlhh?2+ys9zlYxrMcoe3v(*@}3asWNe{Q89dWDgsC@IqBlYRM7JMUnkwb zoxYZ-Y`=bid8S6+)8RhAq{ax@WXfHr_w6u(%Y;wHb&z`EjqnH9kcQt^Z1~=EHVRpe z<+|^l?*EVA<^SuwLI3_AHg&l%PS7y@&g=3r>}b>Gp4As&zrFxk<|P2!IA0L!h5eF) z=|HO|(6uuxOLtgO?BrLKEIh{gE(G*2^!AnMfA4L>-Z(GJp$#qj>U@hl#g#WdtTOfI z(R5b^?3202Ur@R!GP}f$m0y@<+TkyKx1EVz48WhZputVHc{NtoA8VT)n`1Tb3?mT^ zM9TJMJhHXBLA!4PvqrqFgz>h{`;`|T_Wl9V%2s?gO(3(Bl$R4lk(~r9{vNGi5Ag$n zp>&D7zY90N%*mKLe?-Ej#)8e|%%iRO7*#X(!fv#VUpBr4M)$H8j4@vt%tll`f1D{; zxL5?YG%qyxWlQw?ehd@En7)|0Xjd)+c-a2Va{335I@WOEjqApiJ8s9_stR8o>WhE> z0O><_t1zzDhKlBu1%H}p)~S9*rDD#Cfiq(8DR@Zkzm~3eQGz0X*pkW>tPX_?M^ME$ z^_MS?C0xoiidD~07h=3^(Mn+Rs%02%&(ucGAlu3%Bwe zxv-XekYiQ2eZy1TAqUBtu&}i@ev3KXqk0o}rv8#chl7t~S-US!>nI0xFnox>06@oDmJHM+U5~lz!Q#VH+j^8VkP%gX^wr9anruHoV#HXdG?~ur^ zl83|v5p8J1m2e{1R?3WvdE(bRh}~8&CdbMyL1B#=suU>BKuz#dPaWJLo|HD2uTL*= z@lA?k{?$Fzp@MTp_x`Pp_PS&r?N}qbr8Mn|5D{&2(52(v?^#8_$pD}qBADxIr%2`w zL(@M-@+dA7f3USqG}nP4pa=L1mt}DaPit5{lf@wdDQTevIp0Z4nSi{j% zribu-q?#Fh^bktec`12QMV%%Ml1kHJJ9U-aO*WPd9eFH2cJSdZ_l<9Zx7LRemQ}5y zR;@a!dH%IvL|(4R%(3N?O|m~cnhoO>_L)z~UcrkoOCeWLvRm<@miuqoVF5e?er97i z!hi@(k`^jny7@F*vYxMKd_cu2x*t9R^1c}sqF-c0J8hv2_FZ)C|6=VegW~GLZBG(h z0>PaoNN^8sAp|ElH16KGy9NRT8g~osH12MVLvVL@cX+4YId|rqxl(tgYCd#zb@l$% zUHkv+^{n+OYm}+QmE)X{jMb&dZ6EFp2EgD9xAEoX&)*-3gj|Lo zK6NvFC?%ASYH+YAd$(lgqqFj9tlDEM$8yA@jUB5;>b0E7L6x$H#}Xzq$EudOlbu|` zI;(y!#@e|D#_i|oHaikYT(n9nXrFo%oJeW=u&T4xrahD9t^@e-bCjB|c*NN_!a+&W zS#oKAPxwcuq3V1EPXd{$3EQHgP%Mjdta|dHjdI`fir94(tta> zS0S?5+?SP*P@&EKovENw^}##QL_d3v&OY3SPR*nbQu-~;TCQ5u>GHotRCK=Mg(~s$ z_nHW!jQK^&^5(JK2Ldwv96@<1Q;~NqKXRogLdw4oEQJg5oEe4fA@7-yM$-hIdE)Pw z#`h#`#lT2)L^iKB$B{rni1(ehdv5c!3hlGPUk5=8@aRR|5n8meOIQkH8~ohp%EQv z1`^TXGmQRNxRn9sl({W(76@M$o8|nCS20PZjw&yoL;GCgq`(Qv$?93U`r_HW4o(br zwP#Pnr!=XJ;hI5POXoWp*cB|YbYl6d{SkR|g)G~;zr#HZFA86h)a`5ci(-MT!8_*i zZ>Qps#ry+IJiCnG}(N4zPkssI;sp4COsj5RvCP zeB&m6ypN~E@G3!moSzg$uw4@7xuM9igL^q1p2&WtmYTdsb)x_LCyu*Z5eN2%e}DhhJhy82koP9>ynZiQ0TO?Ry_I4WUPf2% z-1U#$C?W%2c!vXExMJWRIpb4ZX-w1=yXPhX5e=Gi_c7sp0#rtKDp*ysTB9Q;Y1gqoKkRZslX z!9Ql>ug%OGz-ntUW;>hdm5fmp`-$aOK|WBp`vA5JHVdb~8+`m)dJhV`W*?Fc^rrx7G;=M`mns|B~L8Gsf zPF@*`=uQg!&6+vdkFP#sA1tvwXFajQ*_zS-OAneE&0s6J797yaob{W5DDF4e!my9b z1zGgP@W(mYtBPSBb7RmP0`@q3DBo%D{1xQ4=aM6|-HR@`gVLIWq%hSf${n5$1$m5; zuIlzl+2fw?*eGL3`bmXP(hVFuGd(_IsCFZC{Uema*OqKCj{FE5Wf7tA9Ac(oOv!B4(|Sk!aW)op^ubboVVkc4-Cu~jIDEwFOu&d+i! zPbYGRi2ZF-XjEaTKOdon@oqkNI%kQP4o+{T|JZQ>Nj+x?RELw0c_|>CU z!Kz6uuR!%>c3I%nqHoDB1Uv4u_A_TOKZ`!%78cQwK4n4~;4_?4HN*C$@Nq^WLGB@?!I9%7b8eq7c1;eJ95!P(6;%5!9)}hEEMRfHfx+5_Q&9)*V@%e^N zG>%2{oYoc-%&>h9hx$c&P1O_=OMX^n2Qxs16nB2d#!>&hQ__BIZeY%bsvy;C0t`Ol z!ygV4Y@>ei*bzBM`!xC4Y=(XpW!^hWBwtnI-e)zz$`%w8!(|VmPio@6VZazelUzFN zX2UgEn4U;wzJG{Kf%-t$Hr=QN$XLWr2RH2kX==~?-Kp9#UB74Xah2WXvNh)gR3?Kx zpcZ_y*QOztUDNuG-4IH5tRrCFPTNY1q=_9Q3`*)iFIUw0kX|=qec}~b+tlR@{#`2)k@zn_8CDjE|2a6R!DZEM`-h;^1oqLdCk@*7RghmH>Q;Hy z`1`WDlNMEr>pE+FG$Db5<6MJ~3=sw&foz4W>;5c?M(O4$Su#}%J(w8_z#>; z<>({t=6tm63w80*vh_7jNzMwH)qIFpTwfJpPC|C7V+^}5)5){oi1)EebAv0aCajDB zf$WuO-ykd-L9*E16@W5?Uzfb^x|+X!dCr9y$q`g>!v_($XbgZ=m|>xRpA#?}tJBEl zP)ib{Lb5yJqrTp1({xr^Bt?%|j^E#f&rmi*_bO|P zzWg9mkC4VQ%xq#@SE#@FMfDlEa_Ovxu=m@cXRvyJ# z+x*mzsHkXh?Z2&dDjLn#y5g$niXX(`LrYG{xtWG+Jp~utWtX#4)3RTFo{JuD8N6c5 z*Z9Cx9`$1XE~bH7(MQxZ|M}6*Ct`@{gZNvS(m!wkz#Al|z=xq_=f8b(Aea#-Z z7E6HVSQWI4uba1!5N|+jHkOZ^lMkr0_QSF{(M#EQn#ca9TpP}dbw$}5gK(9Q%Z`e` zf8bbPYPXQ*&l*oY{sAY#=<@Hu8sWwxbt&e^yb*ns97**t!vZsl(%|ozBI1@+P*td%kP9v;n;7w zs#VZMlce!>VWSbH*D+PE$E|eaM#y8VoWRxhH}r`Bp8H4+OG4(GM(e^}yZTPfZ)W2{pj55ed!*4xKS-zvzxVFE)G9Hv^p%}_NdnIl_I4K!i){qFjzyefdS+8 z3i9MIS4j!Shj10{7;{Cm3%Q4zH=;9DC0u!wQ_a?~%BjeWfYQT)d+@4+PHi^4Uo{j+kWEjxz@Q6RHQ)c?(q(=3_z4JU84_oN;$9Fky6ygOVJ)c z`xY6i9;YHgxoR>y z^7uIC<&q=r^P zz*poTDFYZt2AS?8*N-?HSjOaTDua3OlK;94X!}}>Gs(b2xq2Qk>G4)BBB-!jj!;a~u6kiv@3=oO^l+%u%^jlW&IzCn+E_TwI zzzubp&zf-!s}y^hyBXOn_m(LBLPw7Iehcqm#66#J6g4X$g8Ll=^`TH)UkiIO&!0o< zix&~y2=&nO-*_>oHC(yY$=Xu{mbI#?wzlrNE^R@=51}bC8cTF8fFVDmRD{fL{A!I^ z8xmzO6xC%`r(p1gu(&NeAOK8G-%pWm0GvA2p3a{Mk0$?eTr7~lpkZz4_`T)_GBR8Qy5gQfvH!UqF%@Vu~oU6J*V|jYDs9%71#Qn5A^jfC3*Q zLtXDCeU)3z$qnWoElcJXavQS@q9W!azQr5N4Foz2Nb#3wJAPD=bjRe-Y`zgV?51_{ zXyn|zJdJ*9w_LP#*S0C0Bp+5}^hB!3NzAYyDid@!ZaSu&InU#%msE^=F)Gm;xkER$ z-7p&o5%ri)LlQpDx#SG#Ft`V0K-|$KzM=NpnQ1ts--nD5kI~}K!Ju>W=>I2~njQg` zcvl2jIeT+1FAl&uRq(~=j`e3Z4StoF`z=?Trs?{`V*b9pZR(72LS}TQ4Vj0IB=b;rwVL1NoQ0vL(o@WItBC9$--Ar)lWlaWe-h7j}99Zo^M$xmKja@1X_QFnUB89GFC zlwITA_RHmVZ*XG~D)8%alujKF6$tN@vEL7?``u}Y9iKTX5h0vi2PO?T z-W1R7WGzI~$~neq!b9z>r*yb5-F?vsd4n{bg?3_qBF66~s*5+I&5XmyY{{WSPqW_Y z;fa|kNIQ%%Fc!tJO2vM1>zFtZd{jk7_C=}VB;M><@34j-vK_lMA?I>uQa9W^%PS)f zm+nxL%$>*JppC`n#J)J4@+e9tOkT!QI*N|EQ`eI8_+~GLd7K6osFZ=&A?k)ZvGC~b{($Et&Ru-| zq5{YdQ_VWOTy}}IEhZ)N=`by8ubEU~iHyYaSDz8s6EVr1;!n||daMUTkF|p6)Blvv z3{X&Dd=a-rgVudpAvC{V*U103RCWqGjeMv@Ekp<&;y)>pS(jC1jNP~M;rC0_>v+DY zuf0fqUE`ds{Rd9?AGkjdVSh%9pq)4m4A9LBx?O@W;f|kpia&`TGtN4ADE>U;n^; zP3oWv%YI?qBzdbB>7<^f@2B1<9h$8`>~A(BcG)jdUaK!b!C?2kG_d`hUpnQouDWA6 zcju&19luE$f|Jg$OwAvqv($Qc7B}P;#4{AzwSo)_cj-O3)znD6u%bMX+|qZohnq*a z<3Bds90`NADIXaHe50KTOJrQxXq#KWT%3>Am`bKWz4Ktk)-PjUhQ3H&GAuQb?USPx zSXCy@krMreL)xDhMlD%Fl{aVU�z}kBy(lpECAp?2$3?2Ce?C@=QTLjJV^{=f7xK z=)a2^#182G)Wm-WF@ccc&LCmd?GY0dg6p34!?ODQKPllf54~8IM^b;Aptxv@1)3s8 zY~^tf+97WOalb@X)8OEzCn7j-q~(;YFY0L3?~bi6=0%#NQ#y#$Pk2l~mymxNISR#& zx&1k9oS*@(&0f+)`hBuqW6IKKdhJv2(=-vKr?@TuMwC1RZ| z3yOf5VKN8tL2qgD7S`4>qb*4B8Fg}!vY8(djy5q+@SAAp1Xay!Gb1hHhx(S~GLE_) zI>C0z7}`Uha5!iK%&A2o+E0Lu@bN?+Z_gKC`1iOz3d9I1tV?091&&CCBTD82~h!FUUZDu2bvOc$H zp8TQ09Z`5h5v70?wEuh~JkTdy78+)=yN;#F_<4)Qp&tt}vo%BO29fkdu;WX@;e#wI zb=_PIJ~pe?DcZ9R(i`O#-loh*DDgz99^|&Umz)o1TjR|M0dk5|Npqe^a7et4tFIKp zw-q0x*@z#Omc$!>?1#o=D~?*QZ}TZn7%8HwZb$SYGxUcKRdIE43m!$o^XNL;&UyK z8Yxm!_Hs!Ep@;E}2Zi+=wR6N4i8hC|$KoX`O#GDC+pPkyl*YhG@oDriuo{iw?7fFc z)c4_GEtCOaP}dtHg3bu#FKQFuWO6lkA9jbr1}CRC@R!H6_^j)yz2fv2Z!Nn@8Wj$b z9X9GDsMxvYOy9Q*a#`?@tKbD5okpAG^pByFxaKXr<5?lZi}-#DfhLOYlEWi>ppt}; z7i6y|w#|u?>!L$Bf|6~j#(Ak&Yt$>%qP4+CSbmY|aDm0PykfhdF8ou(r=QYY>dGgG zekOfFuJ~R=STE==(W`tWz5HtRKC@T1$~rF-!WVQsqUattv`NG79R1XgTVu=?qQ(CD zqurRx^H^<;GyIGLU9BiER#N4aX#eQsb-t7TjFUt{F&?QRS5@?FSW(+0%y!QSxC?y@S80CWPmo6{GlnP zxmNgD62^+@JrnRwH!<@@&?INyj~*r-ZoZv5<4<{Hz?Lykz{xznft-}Kqg*vEP6T}g zMo$EzrR`pG{7|Df&97W=VM3bh2~KM@mdz6^VBleVt{v;a@lBd^!I10;XtkLQCqnS7 zN$Q0k2J@rkmw-}EDU|`qk@V!hrrMa@Ebc68XL>iD2FE7bc#9r9-Io4bqx8x;X~Nd? zcDe+XFoH%eb@6TPm5hlbqofQ``6h4j=jD21GM@q@2@h4L>~^}61Beq(Sbvsfw<)|x zDvdJv%yga{gJz<~hrsxV)NfGS<~QtCgjw`uZ>`fD(oC)K6ZipoWer4c{hfyULUo6J zw~d&wjBFz?V?kR%;d`m2s8-6GyUzvBfcTHUodN`qY)HZ@wqrF37b)Qw=%NQ2{!*hV zC;*$D8BBwuRAE#x&Pq;ElElpw!bTKi$`njWF~sFT&iGAgrv*2qrxnMUwLB@+$q6?fB)l_hB~_U%*NSESh4ns8EcIf>{&-9D%r#tBv% zA34@qZuo*2T37Mn1y_;#ap0qF($_asDM&yA0I60MnAU3aP>_|W@0M+p<57m&I4hFMuhpS;v_ zR2Y^2bsl~$DDx}`nUhW_W21~Xb!qpJEkxf#>oeq)e+301MDUVq^=;&6X-nB+*c|%M zO=$b~wvJgSHMh3Kblco>*0`>WG`*>xlCTCE<@6X;|7hM!Z}kP)TapW^%-4rQ=O7d^ z?ScS~WK$0tvP@aW`>YcIm>}6Zt-9=X00X9fgh3Om$|7%?DBr#{F;GXtl)N(^D>$s2>%hd1FOZP>6SlzXzbXK)_{EIIr2B;R>X z64bGm5T6@+jq{K??*acNl?ngWL+}sW6A`4k(+cwV(dZwzstnO5g!;>g-Yd~f{z**# zJ!;$a*J=aXx5<{z)s-H2{dr3=p0xER?l?5!&S9}M7+tP{PF&#Giwx-*=Qir3--@Hp z@}aDr3XB{pFSi9H6rZCanm+wfl0>|a7<#g#J0V{4<(d$ zogvl3%tdidLIXw_-VwKHSAYLqpZDV&scGse5MY4!(ANB2a zA;mpzD9T#L^1(Y%Rm|-V637FDAW;H8~wrOGZz3sEvI% zRc9`PJu5|Q6gXxf@E2TTUUx9;4FGr~y?E9P-Jgy)nLF=oG2KI_{}l9%UvnEBx~m_*H7lFVA^@zuQoh(< z(yy>?YxH&%K@za5iu?KeVO;QSx3lM0eQSg0<>?pG+s{1P*ZB8GE6aLpOGe!)UbiJU zYdwZ*E;5wV0L~x=sVKUFgEv~((a4o!VSRokwO^dXuC*IIHeq+uu>Ac4%_WM1BATED zxc&HKOs7j9Z)VwIk$|&Y$AwbT$uFoax-T2`+$h333L`xw*C34n1&2VG$0Ya(SRc9- z$wM_Pmzd22-Vho=M^+YbX{F${!+ttve0YA~K76trs|=wS#8xjF{R-WBaFCw|R0ek1g=qZ}Y?#WiJ} zvwA_0{YOn3Bp~H}%cUNkvy|hEy~SGwvAc+^OzqWxG8!W%AC7h`ou_JbRX{Apk6TEiR zOs*(MH7Fy(gTDaaq27P&90UzGD)p~B^DVI7*GzU>zGkp*^Fmvll_trAIr0lDvd61; z7kOre9u@rRUs;mZk0xRF#25SY;x<}M?FUy9I*{)+s$U9i^x%c3pZzP^1jhhG!;Z>|?KRok~^ zB*`$8rqVm?O;+^lg`)&?eHw`NtKXaFsmi|s=&-F71Dh~@7P zy3s87qSKoqk-HBqM;`2so2b6+NnXmI3#P0QBf{KAYT72XwpkYc3@U5V`iMm8PQK!( z*dVh12P@`$ELWb#s4{=I${_uNxw#0;g} z0akt_JBGO%J~2jWik8D!ZWm5$CpJOm2+SLR#(ib~ida2C@oJTCo_|6!JvizvSvhvE z2@bv}tv-n}B>pWE?Ao;Oj@hHviNEY>z;=2}g%?xOI=vxSnL#%MwTb%d z=6x}vq|e}6vzQSST%4Dp*<6Q1*dzjz|mBbRr!R!ukJ2MB$K z+bMIS!2s^&&p#!uZqBft?WY~4duHyK$Gcaa>~mi)g1xHh_3+7+Zx?yd+guUDGETBa zCSRv?LYsbmRVp?{ouEl;rvX!eoGA$H;I}%t{BZqdNU}p%s3zNtA^sNjvEX)Qb+a4|882=xQz+6RNXv^_3QHe`oLFV%i5>5iQ!ect-u@Yd^QKG ziuTI3o@#6kvE`AY*jn5t=Q4ovp?v-0NBi)+MUV<3WY>BN7RDBU`P`}^xS6GCTTP}q zUSg9pdvyBnm0PNJ=5s=!Q~hGY**~zDxQ0Q77DO8L>&Cu4eA+0JSP47|SW_j%W=8c+ z?V7RoojpZK3}3a^3IjDZrwQoZ$e?dgvhc%Hb2N!IdnkzUxe2 z{vESDS$gPL0r;!#N5)Z8L+R%3LtWK^QLiQ&lRu};M^;o#!`F(HgbFj}4Jpj2mYU94 zf1ZJQ_MZ4KEZexCdnM|$qs5fbrDJK`WU~ahW%>BO*hRQMFaz0S*Mk)F0J80CHKFPx z^YZHfNOHbg4SfD-5;*SP(+U6-+mjrVf;pVC2Z59mcBGiS%nohtkQs+$wN1Z~D^c?P z<)!h}^*zr9h~3=M&+4iH1*yq7TNi!mkg%%+V6<}b>P3r|Rm5c;$1!rD8t?L-}h5x7Q&JR$=}=xCl}j@Qr%8uymB={PS-Y;GMUo zG?7pF5nl{e*LJ7uLpHP%tqr(A_$*lfm#yf|K)!?QVlnM-^z#nE^-yaK{o(gk^t+XC z7oE(zSBfd83AKvLAW2G>5@DBb4cE6b;^Pox;y{#R03` zrxKlc+^J zyRyp!{8^GBo2{2=easuHJinZyagGV_%r{XxVcq*m!*W`BvTxVw?Qw5U;>=eWwG_8H)?ot^)<(o# zrA4{Ck)G;{GC8S4gGAVe;6&7>80zxhy$lE!R`XXQAD4c|c+Js7aXICUP*++*Dxd{7 zoHPrX#qu$V!c3xt5;ijQ30eH`kai`HuX@W>-oYxd4$~^t`}Xy|z-PYlGEnIVK%8+u zS^|>DbhN(MFI_iFix3bO%Cy)7{W5y#v{&d$$6T%Ox1Wm@9Zrl7;`|^%UaoC>wY*Q^ z&QEj;$+GK#3Mfc^lt%x^74d|-$zWdH`B{-$=zO(--R(Kz>N@*ghOnoebaW%x;*p|L z0uR`qT@AzPe*NFS)Kn+%>wXB0Vt+JCxp|)k{EMxU`_nTe8MhyVzM{}g9VN#_5rFoH zzSY_1*(O2uBpG=nW%bd9le`dkq9Y6kgpco;xd={360kC8)2QIn}fg{fB!i zckgqs&0%vo<*V@V(UnhL!Q`R$w%Q;J zQeYzAF$vg+PWp;f$q!%Pu=jw7Do{6x^}n2Xr~kW( z;Qv55l^6xdg(Em_JKh#IA0Yj3)&5bsoIBwXNZgDNANv5L;KeEK9X;*Z*wxRKvn$ZF zVYNj8uwG9-0j28F4I_d6hbpqU4kRK{CgMM(-3Z=2oEtEx;xb03>112TAu43p!ha#! zKs;*+{wt+D$`kJBWNwP6xr5<^Xp8!-JbhAH+$4S(|6M|Et4?3Da$LdSD@b$Pj*i<) z`9OWSJ#AL0wD?gIli|>MvdKM_qb{nVi~q%1IxJPi&7LMpHkl7EnQJ-fue|yLks&Xp z^(LJ{%bd)9Atf?z@XIuum1kRgmua3Y{lbQp)Z68wP-&5R?jNwq(LUPn2gbYNc-6w=J@EyQuIHlwLBVQemxsE*^CW2)gOSMfMG13WR0^$G_cI zcz|;(oC|b?BfH%t-EQ~cbD-_M)EIZM-y0fPm_4?=qdR=VDoy&e@v|V-hJhwJb;KG= zydROBkU$d?2lE3)daN<6()Ufg#394i$PqQ|It*O<``y0do`)K65#Q;?7`t9qE~KuD zcN;x+J$8i#q+d$&>cSS5P;TMVUb;Uwisl&&X`neQ{Eb?vl3ws>mpRvVOTJ!iSV(6% z)!j&Jd}$^wGg|x9t`iS6&@RwdUKp<=$Vl#}^g_%I{}gYdfVsWSJ>AVIE2aT!KqilR z3EeNQDARdv1!_?`$(+t}ugc*w0jN_PZbg8M0$ky<1_JF*B5>D4SbcL0V9D52Odbm1 z51_U3IQ`-2;P!>;)1lJ8e>9e<4Jib!$NW{zYGFI{xMTcBK$q4ms>&$Sbk>oR!EYuh zyyUN3JOhV8AUV?*%CF+=#Rav|yQVbwhrud;{4joHSJ)~SRF9w}q2ar(y))$GP$>^cot9kZw($yxdLL( ziT&Ki57AuIJ<W5t{Djdz;)3Y^*Rp)TVu_=j=5iHuP#%4b-my1TO2&g!Er7f*Lc( zIPP9R(=0B>EIfz}RgqFS+eAl>=X$POW+bv7wUedb`2{CTLjDR-+vk1%*%PN1fG$0f zoxVKPYP?4j=)<&O`cWtTgwaa;9D#xnA3?#)xv2^Yk-ga0SEuuA&B_#@7}LVP3lB@F zGC7T*Pw63abX5_wNHe{{Xb#)BhvL)B{qk{=z~NxJHnq6SX$8v;nAVKY#BL*rtVhXl z>J0>ajm>ohjwA0_ol(AS2JFX)M4c+v6?&^hbvtA4VAfaEX>cq%{fwlo@(w4K-wdFn z=zUa+lVj?tY4J4T-`UID>qn(Gq$aXq9{|a2#(Dex!IYBtGQ@-u_Z_Uc>VC?PyjGj{ z^W!}sMk#6+1}Y#?yCK{$l#lXq!i!){r(_L*SpAL4;|tEb3C!1QIEZJGqa>l*oLFEc zNfQ9acV@kwNL!aBqa1`v^Oi8vsCD|Yd$=YOW^{z8<7I8tGO|puU*)Op@_xvAD6!3t z!%F>%K!HlUm9QB0Rgue)CK1hRwm%Z35wN-Q%5V?@vi5}bWg{M}W4i$Vx?a&mXpGvF z)t~MF27cb3-oWtr>d%xOwelJ*7k4>Gd8E}0=nP;9`3rR1x(r{Y8#}n#q3OuI3T9k= z-QONFjg<8F5MJ^`BS*HSIqk}#)&vW_QK~x+ch`lN==eL!hXF|a7tIw4v1fS`>Pzm! zERxqKAB!?7p3ai)d`ZVWwgd~?qDnL#PNqAy?+-?{&t(-xjXrtza?8Tum6tEnxujBc z5sTCq1>*0n**bhd8<$ffCHz91KC6J#r$QNwwYp6m(h(vzojM=Vi=qwQUvKyeqb@{6 zoh{AOzAx2&mjTdWGzfYO3e3{8K3N4Mk9}-5~k|T`K#_nA;ou#r_@Qv$$<`tu(KsFV`|R^rMRW5UDH{fT0^8 z`$~{Rm1%8V!0R&6f0-hi;x(SAZXT|VP;IW{uPn7-D=!AzH9J~_tpg_(DQPk95eEk} zcf8fEB}mBg0V$C&{@QlhXkQ*vGZ8B-)K>$LoF}v!T|Wi(xKTsewK($Wn3mh6IB7Pd z&w#dy+#`-vV?8Qh&23W0h3A>0Z;ZWlM&7d;V?B)BpV2Zx2RsW=&SLg}u&Qj5xow>z z{n&gBy~tMIW0heomI9n-r~1gt@>XN>oh$yEl046t&1K=H@?C_n92F(^--5?KzA?m{ z_t^;IYEuSJwQcPTUThKtXEj&63Idm6yBSuH-Wh`;Tnxlly3J8Lw4tgJl5zRL^kNiH zZdxOg`N(+Sze3W05Ah}1C$=cp5a*C*MbSj3XJyr0xRAr!%3n}LYL9hINztnd|3qgD zukQd5jws2i9LjPL`W;L&y<>fwb3cP&dZ~5mpY`I|0YXwRvX`>Hk1*3hzS?rvMx=3D z=^OTkmEvIZrfnF2Ca`?I#bcq^l>sIapJaTJhYG147L}AF^~!f&;{Kc-L{1VyJMp2l zOIo6mp%*NC7k1w7pxx?Q`9#V+;)H~Jte=I07m8xYnx*S4|I;x>_;{z9YrkK{MXFhW z^$XQ=ybhE^*3^JgJ|o1a=aoZQzoOOIk(h=IjD3lTHEW7xUSJpw_ZDb1g&z2eZSxYUC5a#FVA1X zTcEFA?VjApZEEU^7Y^_iyVBCit?&^J&$(r(c{YE>ffan|BOt1%!DnfDIAs2EoqEGA zTeU9g-2;&1o=JTycDHE%QrY4XQ(Y9>s7Lqbl(?xqwnY-Q%rL=bqSC2y|LYlm4aos3 z)o74NmvO0_{rd}r>BFz1T*Q3a6ssWE{JF-)Bs*&+{C&9TZUD45m=pSqT4Mww4s;Y9 z5L>RTQPdoOWQGtAT^&Q>iw?NG)6y~+V&04%g`)vZW}}Me@tk5I$Z`rZ4Sd%8OKqC| zYa@nZx)c928_jsx!k04D42zx;Vqf8z3VkjU;FBd7&3*N!o^Mrwc^Q${uT%J^TC^^4$mc(W1oQgVW#Cn+M<_rdDMaWc!V1UTt_mvq7B+IOfmWP>U3bTnk~55R??MQuRdWH=LZ(iM{&iZPBy_Z~0#OvLO% zU5#kuo`r_;F62YMXF@EphR^HYV|{gP_QaWb!NVDcxCD2pbGF{-Fag58Z1iQ1Y>8B~ zQ4VIYKuc37eQz=KbeR--?OYc=goS7)kfW z?X6y0&XR`Xhl`vd@i<|~L?x4~x4Vrl)!Tz(_cq|0qZXl#QB&wbUEJfS3^QE!Q^ZMd zr|M{*)MEaVjiMuYK_AOrWCtggX0BSz&y<(`sOBXR`)T5`Mx4RO;o#lpw(G9q%F`6v ztWgG>)4gwwdI{PpvWntKZ2;P>_N|bHT7m-p-#B*g2rZ{X_|`J8yUnIMPOrTU=41JB zYuL?Vq=E@)D9~<;>&#K+%N2L>xe_C#D36nl=u`_V(1@7RJwZqM41&!@I%=F z1cQ#3-CUXC>+p8k<6n170h03&1M3RN#J-)F3Ay#k|8L*%IE zHzm9|;bPC5&D`pJ1&M8tMYFLfcex9Pg+h zzOeKBMd1i)OsF~wc$Mj|^?nFbML1=WhAm`TNPVhEG(;K9w-sxE?D0}2L)4OFoowo2 zG<#1RJcAK9f2e03g_P`$HepLOEcTe3gWwJ1Nx_y0!W#Ah#*3F591`xG4g3J3B2Pwt zV3im{5nx#PX$@@`;YGQu5>wLHd3|@;01N|#GS0>^5a1ZiRhD7qC&sR zeLx7=(*hpKsmU|v59Z+lwTohOC*LF9R5JT;4Ey8Yh^aV9Z{OIGYj&={0jt)Rg3*dy z#~{b7I_F=XWU;to+Xk3*0=%YWK;CYF3N+>jjv7WBHAEEvzrS&g!9TupCTVfzzYC>q z2XfLS1hT9FWeVM|Rly7WLT7?Gbo9`7)#D%Zf_TF=G6&+v?YcgE=y)J&##EX2&#aF_DV{_v;q3y~o$>5Im(zA-SLgGHjPLGG3 z2G{=?%Y+L!bz1LDSJy;_eU-Iw$9;YKmdukW)KD8$^Jze;$e8Ky8<#F1>L$n8rtva5 z4f_|=5mwL@v%QLz5$6TK?}xRP;fd5Mh`hMp2x)%aB$bv+dEyw5x#ea$b&!7UvcRVt zm=bVS2NS5$!(wZzlDbPPFEvk=RG9||sp`V76V^USU6g33cXaVegC$I0R3%ZEY4EnR z_q$JUZ4eef_HR2kXDxNux6*}2w+xpvs{tOv}RTVuc$zl?8SLn78W7VGiM|?c9`MOUEob{{zRIoYpfd zq-bVZ&c`a?{-(Z9k{o6Skqg5d+@W)zNL>BZrB-%Mo3p!hEvx1`UnWeVNW6SlyNnJ* zkmJk?K2Ts;%YP6{Y5Oa+%+Vq2$L ztFSJVd~Drg#HETjsp4aU(#bcnf4z{rdW{D+c18kbKA zX0nDjzSp`LEkAYy-RW&#IbNL&B*e)T9OX*(W^ycYGGo=wS-0!LMm|CR+;UgHX1h-h z^WPsnsmrk-+Sx@uRf(92GhhSg8awBzpyupRyl7rSZ}0_TYbW+UzcA1x*0*NthMT^C zT3f)vIdnd!{0Mi2VV{%m)%z$hV_jaYs=9?FW(LvNA7iUz`aSUGm%Ov`?__K&gc=&I z%EaU2@DhapJq@3ehNQ+wI{|%dey*8-#k0yF#*irF;~!0NThdvJ!`NBxK171qZ}fP@Gb+-BwVtDFLNy$&W#K=)&x#X=0}j9=0}n_WE|NtnFx*j%uV8)4aDPjBl6T? z9%(}7J+7149*Ume`;#mF_;1-wS zJ#H3u?R*2%*2j0m`Jpn@2?B8i3S#79_(S2nJ@ve(d98?%C84FI;`@9467)x~5^iyf zAc2owB(6TH&x|k56l_yl7i{5`A3+4t1mjY4t}~3Nl<@;M@R#^^#fJu{`aes4^hg~M z3F!&_pw)<}Qy9j0KR z(r}A>T|J+^Qvia>yP%w~GmzM~8cw>Ah5v)EzYL1&i^9B72n2##AUK4^HMqNb z(8k@}-Q61}xVtp&?(P=c-5mn_-=2AA?#!*abw6~S>hs}Lbyc6e_gd?Devh|uKb{Kg z!OHkv17ee2+~_|(R85IcK~FvL!65A4Uzmu43KA)(EMPLiI5~PFP0ZZQ5Z zk7bH-E*^h=yEKf()yw8F3e|1A&4@fwH3;YC=ci?7b2?NvQSP6Ysv$Q)dMCZ{Yh*!L zouO)(FFlA^(hge_#+yb{Xsrh1CpXc_VmQpX8Be(D_m^+=Ht(?>eLW5Xu-oyGWTh(m zdaE$5d|`L}Cq(ZFxcXFUG@15Fh*q~4ikD!S3WtyDaL$Nby4=x#B1io@rI)d_`Q@wA zk;IN0Zq<|~BM!>=kb+m#MM#YUFf4{Lj;6Yw3-7lRL5;lA_$isZrGrUqz@SxW82qeJ zH2 zVDuUPnR?o&?=%#YgL81M5bu1~Ij>!qfTWs(@cvqj7WqoCR2Y9LY=awwm?)@{sG8MC zTU9l6q!7`7vwGnt@y*NE`e;*Jj#gPYe1-?&4~YW~=Tm*gmoT=Lr5dAY zA?GaM@U2lyIplA#g{$f?s8+BX%hcOT&PnU!tO9R`{!CT~2j%=tFqR*N6;pwEz8j^I zq=moQs2`8a`H+MAp&H4USKzJ#w1QjVSQR0P1H*r=MK$_wH?s}aUuG@YRWehpQkQV6 zi2QR_QFyGR40#=)?_6ip0DkdJm5j}!*ND=TG|L3|n4|rb5F4V-boG;b?aP_T zWF1uOJbaCVg8M14$BPxEzkT6+c+tC$lC_Y-KdGxQ@0i=4aum8sXAYPJ%BEz4SXKzW zNvPXZ6_$2s$Asf5DXrVwx7(sDW~3iEQl8w#rJ$+XqCnZiNCmoO7vbN^hE2}oUPDBi zD?bHUTgc024)v%UC-3~b{jC1@o@Q0eCQIQQEg9<}*ZSo?AaTPAb& z^_+fDpX+*Lu~`j?rz=&QPNduTDw%tiT1ho%k2XjQ)J?OVUu)5J>#3H}dcll>|FHRb z|DL_#JC}S_+!B3;?C&ll_?qVq731o7Bgi4oPvG>C!}+|$yc&>I980$RR;gZ%({GVX zG#y8Uc+~v5)4&XyaE=;aIRLIOa&@FJ&ewWU8CbX0M8AHiL|(t2H|$#}K=FE*WAPXI zfH=Jpg|H%gk`GN`uOt#FZ;b^_E~CaI((1yu`LV$gi7kB$l3n#sH5zQvIo7}*YeP>j zBqF7>2%|bF-zqxI%}td7Ah)y&v7W&QoB79YsR5^$jrbo_C-tf^De^%Mtwz$bU47za zAOA^m;u1#jr4gn%a~i4vJfE zI5{T}-`&jRXM%`Ao|HFCMxv~^S(rhEOjeG7fnSrItUt@|k6+H}bIb$k3T|5X73%nN zk!e)b-nqe5v8fR8G&bCFbP}( zU%HIjkR603j|4^xTJx24M;s-KH>Q70IQzXoQb&P&6}FP1#2nT&(D)|`f9;hz$F zMsNMmEvJgDutY)ebtVGCZFyi)mY^>!xL~u}qacISFxJ){`FPM3Rx z#vN(9zP}aTe*T=uLBcs*m#m;0Hj-SOhR5T6x|XTOIdKwbEZLL`?RzTFwjCnU$%BdS zn(|6qaqd+);|9TH9>#ZO*i6PXGo9Q6=f=gFPx_i`LDC+zA~#h8(@(3O&m<(rZY#PYUxY zqAB-qgonIeY-+w=999ZQ;^nB7WyB3Fkb{$`1YRrpCVk)L$3@ES?q{miVN+(j9+v3n z0D~z&DmSaYka83YNm=r}2@*MG+3$>Oz63EcM?VaSdRupVdX?UT8;yD+p*~MO)$TUL zmQKM`uepUcRPSA{n99UqsR`YuZ@?*%eAb;FT%ML_S9KzRS^pHLq4Xo$&fqw@qT674 zKgy+VBzHGU6Oo8eXWW!MssrZ=Xk#R?>YwxwwHoBex;)LSR%0S-@jlWEQ%M|DHR%QA z;FvMm9c#DGVSgWXkh3%^>_pe0Nsa+PIdu0UZW8?+e#!i*6uZhjdaB&~CyZT6V-RX& zBFMMAuF5TBgs+VY)_I!WQN(RE?YpXWYF(3*n1nPpgxpb6l(UB*q^dj*eUHwga+Kpq z1ve=%3VZ<-n{+EhY{8&)_BtpQQ`TH5%EI2`C8)1S3}03OW0bY|)LT?Uohh%NM5*|4 z8p$anUiM9458{Pk-)=J!y*&=PN(%(c!q@xIs%9Xa58|Scz6D1;WShXv zU!}CEM+U;@6F(mHAZkfnf^CnL6R%MRYNx#P>i&ZwoydJVf(T2qYb>SLAcnVFw15kC zYOS*&u^-S*Uy$iY9QCNYjonR#30gBa2%VVqHA*+JQz|UH%)lgR-iSSgE`VDwM-EM? znJ($M9ldR7h? zsSMBNv?Rl_O&DMnIA(j#acl-*F5jaRt=l>7$ zChP*?tLJsBk0o3vjOH3o*M871H0A@>x5X{CTOR?EMrNVj&58It=a#K*i_VVLo;L4} zQYHWduX|ydZe2;CUhxdv^Let*^k&+a_q&;>j0BrmM~6WaCbEPXLmlxy`*!x(D>Zq7 zDqE>)zJY5?zc*`<5NKQfaT+IlP2ji>9%;SQbZ=4Gu#v_x{>euS0i5c9=&ZmK9eKn} zDK5Lso>{NPpz>?ISZ;>7gGJkv2;>md-i2N0u({=US#(9Zmk)ktdF9dX7m#T|%<^S= z5s2?)s5sq+2BlZ-74yn%WD|aIWo*94IK_Sd`$=-p(3dI%>=PUsDk_C*?(jr(zMtN$ zLrFJTi6nDWi}4hJu>y9Kr!Qihd(68sU8BK3B6Jxtv2f9nX!6{hn9wzsPOrYUHPJLm zWD$i^$=L)sZU8UBduL+2{D(kDqE4D*OPA=9zBVNERli^YPd*&ixxj-rG1|l_Xa7MV zbdy@^BX`QGH0S)BGt)F|6`ITsv{-L%=D|4CHG~BY%P_JyrXHU-*=$l>BdfV8F4sST)4ogQ|B^j zo;-l_dKrvZM)!_yV<>ScRy3@`w2iDotjwn^)m#afcJSpNbfbQfo!QY&wl?NjDj7OZ zLQ|@isKmn0s8DaYRza(id9TH?I6F+GJza_bMiz;RR>PbWb;o0DihbJzHC;OPKIJ|xk)qY~ zx|7MKmk0d0>Z5iTI&4gq3GoMflPfhv-53q8Zqjz+jM0oW9!nUE#lvG&o4Q43vwuRw z2Bh+xtL;jZn;>KP37LR9KUA$pR#Td>`UGIY+(TvxVhYk$A&gjm$fSaffGan&-^+kYU|qzS~ugm z9}6L=QgEAL+Akvek5#J&3=tuQQtxLUjW+c``vKxE3qVU^Q;VYP7|$Zh5`(!EM90<# zcG&d5m9b9&grZJ2?_rEYEGocO`Z1dmF$TK9g= znO=6as{%I|N2n}1=s2r~J-SAWX%;kcnBOc~efUF|zVqK^Aa3mHl2SV5b_$I46pAq@ z=bWO2LnAP*3M8HGVF*(KZ*qJv?7c#*Pwp!o(Wz4gRoPzedbiikN~HW}+ka47?3b1I zV!LUUeb*VcviZ@GlK&(+YI#$XG|}Vbsc}mV=92Wvr_8Mp6|G#Hq+zP+nAMfP0h%P2 zlkwB(3U*#yC2Ksi_sHueh{9S$hd1a~p~|1Z#?qJILQPv;6VXxAQ+<+V+30?J=Ym+#u{9oZ>1C+tx^lS57B5s3DngnFH6ya?o^Z zJ{AmITS7&&Dfbgm6+9s8-$GHnq#@(}h~1LKaF3P;E^bBgo0t{;W&)X^1I>6`k7qqU zv&z@+t=O4K-a+cdv@*shc?r{yMxbLy?Z$v5e;18@UGbHr<(%hRXs2I2(>*vZ4Fg2) zLWp?Fgw*#GYXu#Le#L%SWCE)83CEC^X*6E_kPJMf2+i77S87RfJxF+S231=UH%+dM z!$UMbg7=etVeQVrQyv-_b}TIkkGS~V^O^P?@5A%>?%0Rh=-n*uoGU zUZwh_olonWB=_iGJF@Mh?OgpBL_SGc0nLzvSL0n0bQF&J(N=P8IY0X+jpzTM@Lzy? z|3STfmz8g?OOtrfEaIEisnk8AF%u} z^WNmPhP%bTarID<)LiBWGz&|@Y|gh)ROA-?1Flbo%0un%=3L;LxunxbWpRcmIKtVV zM1sDC{hfi?cV~=ZPcR42CArpMbss+Fs0voc4nVE~ z@fyiYNAxiF3N+1{nU9c9Uc?Qi_yhqh*1Vw4L0XIklJPnyWqoef04}_uR!PQK^c|9n zM5AFiSi&c~Vc#-1l{k275D!A48eI^qf%D^~=48+^&2tSB(Fhgk@#^wWWNZO2=U2TK zD)XgWd&cr<_xM=w3O_5z3wbiypZoGak@fl(SM4uoymWy)UXmVbSZ1C2i-@Ih+Bki^ zn8LyxhXb>wBE|Fw1IwOhQ-Uy|AO8uA1fCuKl_pYJ0Cb}U2-6@Mbntit3oZQzbwB{= z#pxQ)escLMqCJ8laiHZgRjim<$S@Gl%PZ;P# z7%;70SpBXU;Xd}GUVh1%l>>qI=wO65{%y5??+-WRXD#E`<{fkMTk6lIZAqNNA`li< z+Bp7z%FK+{8*nEQMrd&iZ>Zn#{4V3QHvdEd%~4<82Q~VWJOlLMQ~DWX-SoF`yrQZa zuom0i4)5PSF`kpPQsqN0OjLZ}D`l9ZC(ANH?h!j~+zQrs(eARn79|>FIrzjrI2!v< z*9Xk*diz^S&V5Q&LCkx%$7hfx?f-8Nx{iO;ySC--x+ETo%b8E(j91g1KIUH!r^zXY z^&-PN(zD@iqa|0mYNIFFe*^83e9&`}d0-XBiI2vvV#V-#KDl)c)gN_5A6Fky_;LB3 z1U}9bELD^ghnfFvMo>1 zshz-?^-#TAS}KelLfE_okgGK1!(X*C(_)}5qMCAIeHFd>bvl8EihOtg z`_`_mXac#$l+@PkrxU`tj_XvO(;SU~(7947Y4UZJ+Ny}8AOMVQWNl4Uam$vudZz$| zTcZLbpo^CZlv?vftW69Ggh%F`!w*wFRD_52&T=m$7qEa>h{I_|Ud3p8AC?$q7tl(&_&Ug@HV zXMf%@apkiY>x0l^QE@LRx)YiVbJVNJOk2BGKIB|%@V_BX{{IKqc`q?loQNIvo!&iZ zq{(K6tqM8?*#=(GPy^q0YAUlKA2>=%y|Xmj-R)}L5kqg>W*`OrGa?~VDY+5ZXd z04%p;!BSFPN84SD!ySd3dGPlz*=Y`|imZbc4NOcK z>TJA-_&J8z1>Ay z@CJMv!c$0>a*>@^KnzGWS5 z(Ie`y^*CQTy}_!$m;lGsH}!|6M9Gw>XG0oK!odzLO+r_Z)#4cX=u{YoYalYQ_T+)@ z5xKWfUQ3LP1HzV9lfSm%#~Oy7s0PQ!6WaJCxe6!~U&sW-L@ zjzn-~b8A2OHYtJtEb%=bq^^;9!B=ZcpOeJmv6sw$%78UR4q?d&aZZqJT$vOgx*e|0 zYZV#ZLp?d>B3^o6jNBvQ)D4g|r%fH!;(MQX0gf26yv}xP0#0iGtS+msuJskdW#JMh zHyzPztGaP~iK6|HW;yheTwaYl%{X*KC91A9Fw3k${vQ3JLyDg@!xDw)I55etoG#%R zvnyaf>TWxH4wiNDbs_-1%gZo>@p~;?_YeN)RGQTf|B(H$v|%Qfb@Qw<{N>#Im&CJY zwYIqBIWh+FdP3SzJo3*k-KH-o|GeIR-h^QNeGxIgB`(JODJRvx$i)ZoF8+N(T>B_< z*%Z~gk&L)-F__o&B&7G;P(`lAJ-%Am$(l>f&`5WfYg300`gWeN!cx#iGc!6tGi0rf zu)S^mcN@dsPCuqwuq92d*;^O2Z)`1<_WYO1tY^owqyEn`;PYG8!nReKik^<_#28nWvH&sY(bAOh{bFfxMOr`#iK5?(ej{qb&9#y~>N%F@8pWLI z7r6wxzrA){kQ|KjT#Z~!%3Cug!d1t(5aJNveyviX^|j#NesF4`Dvm#m(tT% zt!U3JnCAN{A2?Sr(E67`VKP#0>St| z;elKnyTdwReFvM(d;O@DeH6=EKKS^Jc;eft^|58n_vGaRX5K_^UzQPdr4FhRAfuqH zqaGCq)enw1e18?RzxYUw627*r<aYYg7{c|lPNk>}qXnoH&Y*>AZ>s*+->W)ecusLV=+=Z=Kl)h(@Y3F?MLzu#&iO_6bG%vAyh=EqguuwEU1mVY8$(wE~f9+rtI3W&dV>giG zw?6sMLb4ts*Y~3w6a;j83xUwmv8naezQrn%|SM`Pvbyx>$p2Ikq(N`iN z7zjZAM%|2cwLgdc+HhGMAP}JL(6xykBCrF8w%lY&a&je#vz~~`tmz<*9JpmTdIH@? z*}l(PIFq=TPhW3tFigvJMXH%%SO5K4Z>jie(g<^oZ2=3{G2fdi>%h0xd#taDg0U|8 z`9X0-Z!YulN1}-230K2XGjQFax3)>z9$}x%1Z1AqfpN^2`r3_FiPZD}h9K@?>tlUl z*(X~&*JK13tk_caF`biWz94HwhSdFvV*)S21@$HSOy`EmeJI2eW%9O}OlUP_T%w(W z4?5Z>nyuZE^}|8Mi6D4^@N=DFbWwYT&OVb*v~%FdpRTc5Z7{UkjQ6&9gXt#xMcw1TT z0fEnT8yu{V8f(m~&AVjNq8hDd_hMpgWK}h-6Qoj*_B~}USK+`@*#jEk48K z+&`b#*(ySH2enXyJ2?<8kjLsc!$dam6%2++dCI1PI*ZUnWsV~-@%j)QZoMTqQG#Yg zPj4~N=jeVt+B!bN3A!I(^jtl7P{;ukq;WVn7PIsQjW>! z=z<-|<3e5yn}a;cAE_cd?OnrV&B$?%!Np3^Dho{!KCSkonq+*W1~pSB;xa*l{r&IV zd(3sMqlK$uoG$EI?1nSao1WP7S9oIy6sGjQDoSCn*&7o8wAfp{N!f#ra;**j=;@wR zCryIF@p}2oZ~nq{sm1&<45Z@!{YLE~_%Gzsc2fq2w-W<5l(>DS6S)DNlu#y@v@=vwHN9P=^6nP=EW)AN*BNsK(CZ zJ)ZzVv6ebNiAqP~Jex;N(+_p7Lr%j9KkWO|O1G7?6;9afxb=9f?7Rn(P_4F(_MYUf z#n_*M%*f8FOc)$jdYsUDKL`ydLi&|~9ZBvF+9b9+N2bU#tb;OZBDC8arsdS`j0o7g zI1paI!J7&u^8(wlIuNOKCuZev?61ZrC|PBd;Fz237M*KsNNfZ8bA?dF2%ejp@0l$U zsKlnXU;-H`|8hRpMi(JZ@;Vec;kPE^*8eGQEB-Tz=yR{b!BL?|<;uge+L9JC>0(lu z=x@q@P~DQ}3L4o+v?HA`=Yg+_^|#IeeotD7Hl0P`KNW>|fRFs#9TW$kdoX63)y&eT zsipcu;+KkaE*&TJa!KTbKxe~YqTzm2Z zmrZ6ne3)9KPhF7WLR6F8UHP|t_qK8}e||r^==o%3@>|g*BE;u*%2eJEO>GB@KMq|B z@78_SaE!$eiQG!N*tAgsiw3&y`G9?z>A}RwQHRqjqcuiEQd3m2lDIb&05a1H{GUoR z>T_usuA(RCK`*rl*?*e?;;nTl1<5Xe^+opz?ee~1=BSas+k4^?3XCUj@cp^ywoPm} zZ#nDUDbVibhw*Rs3#WQ{iR1w+gu$a^-M>%;QnoBe(61ZW|kYO#6m^G{q3E{Y6s;y zF6Cl^0TQXhiM8j@R#fZCUj-@RVc%(Qfx(@kJF!}}KL(ebG&jU9Dm&51q4u3}LWq~9 zZQ0+zw<80o5P?9WbU|0F{8R<&-%%(Of;g{d_*U=)-cHW^k+y8gUwX#5_BWO+{8yc0 z%E}E_!nsxKGlQ9c71QYKq>a?+=lJ~ z8y~x;Gw2_+xPNlRCy@QfI^p<$l|FbzJ(!IKsL-Cm&mRl_L1|r%T05D2HzhgJj|w1ecS`6YIy|{V_i7mzH?$#H&XmbU^!+uWzu9W5`88puS8sGo#XG5JKf#NmniO&Su}8D<<@q7Omz>g z0;%XT~%CHKG&+;DD&KDJI1?vJ|ZmFigXOeYo zK7Ou2UtsPXwYLEKnqB<%rRzoq_o`Sl=3I#>zJExSjX#HWCA=cgrT zgJt#{!WR(?^M8w}Q1AjTS7QQn_Sr@|YyW*mSL7bS zW-Y|-k4I~rf-)COvy0`)BQ_+c<@vCxYnDy3)ZnT3SeV6Md3Ce3^R?Ow;HDVpvSMmJ8%#9(3__(%_poCk4H`&15 z$GzH#h`^xJCouSr7dbe`1FrTeKZk2Gor4!0S~1hjYq_K5j>V}kyXf{!x!dk%46Mcz z#|2E%5R{i>|9RjOchxBGo=x*^aFSkhB?R8evH#iDOW1GLWrVj25YR z2u~no!O{!E6S<{AWIuF9YkQ&5+M3H}`S$`wR*r+!#ub^W8y-JG&A5hN)$6;P`IN`( zYPLa3+-e_wsGRYaqKM&ytT?Fa-<4mGzM#z)rpFJS2$RSCa@4I;U%lCQZA#rGN8j|h z@>9BwqqWu**%#^jk>UO7L3*JV18>BC&a8{SC50zTOTB}fQDXZ9?{G0-kGSornE(A) zUV*QE6=cr?K`mVT%awR>AMrA4iy+~6#{Cxynew>nHs}+GdVa>$uOnb8Z_~gefDZN2 zc5@@rGJGP0AFI~yR7WSNB|MC=~)~N zd23q@@@9@)V{|cSd0@8gC|Mer>8fJi$`eTs|Hx#*+dz%C=5X^0?av#Z9ilj1{6~k? z75MQP+9raIT)`O^0%Io91%;!~pqs%15}Az^Ho7i`pbf(W(D=lB?VrgC9P-Va?xoB5 zvf>K9v+}KZR=b3~$isxr8P(I=@rJ`h!Bnm}Nxc1PO9cvCsH_}R+Dc)3j{O0QIN$ST zs_zLBRGJtGSur_qtW#?7f21QW1==d-wW}~ZqlEEB;JMxwzZ$oLo&(_Zp!N2*+U0bk zseZ>+zc&iqL5I>qBfYK!c-&EdG-9Ke?~N(`l&Q_^>YaXVl{&cRF$WcUJP#3|u~)?? zsB%EuqeLFVbogbhIY5SF z!}v%{1g(=|&<}41Xgrv$ zre~N160PZKcCJ=gqf^xs?uvLfi;s*OaOA`G4~x{ds}#~ighlflwT|-VnxGzTe{)6r z7tla^gJnNu>)f?z94AVa#AOz?Kxxl>kAzwawY^ftOO_wP?wNL>pxoF!mofGBWi>8D zBkhn=1~Fqo|2Oxn7?w+7Qiox&#hT;-6%uv8M3*xaH?Z72H`g?ny2T55CA>X*O-TvV^S@7 z?QeBC4QNLxWA1$foV49ER_J=wc=G=&G+k*YYx6|fn%%=clkj(^ zuqcp)iE<-e2uQuWIvPg(x=dF0Z#C6i9kh$ zk-Ab9B4Ksnyp>X!WP?P>1nw5MnT!(cKVY*|Qq@SEN|$5yQv2%jIVnoZPs7=d>tnh0 zWY2oW`Qu5$!1atk1x^IGxwY@(8%;b^QZFxjnq_%818<2~kxyQ6#{dzoVo9?njK?tM)& z+;T=uRew)gSAz3%<5ylN%ZxeSnAUL$>$kb0VDky^r?WW0yK{847^n-EfxNPXT>!k^J? zYDmP2C=ugLWL8l7*Z5X%?bxK}Xct$9Fki~6EVvt^)7J8>cZkk`N%wOAb2 z@Y1oHe=*a#8lkX*nk;fCRT(?3kGzcQ_q_)mE2Utq!b8s3LJCN@>7wg{MAuFdJy)-A(z`w#vw$;USvW8lI}^pYO}KV4(wo z|9f~U!!xfWgrA`R_Ak)!E$EjIc~p7UKBL=sXWzFba@=ojhePURadLr5*<~ujkwG|qUQjg6DzMnQ+81iOu2&gPM;Wgb9#Nl~!n2^-e(v z(uW+b@OKS)8=Y|^{YC2VRNLd?o~iapWra}=)n+eTeJ(dPROS?$>K2aOE8rEhSJ{%FGClIBuP^ z5_3!fc^%diixC8e&FDE<^XyroUplJlnG!MQJYLNWP!BH)0I_1THJv$ljx^1fgH zamRnsEav$otxBW6rRty1-ER>`r5Y+QReqqYwpJ*I0o5Hh$yZV`_@MXiN>c@|c+o~z zOmCi*KF4lz68y7l+SK@898~{<;+ecnDbRT7P;Ufm8+iZyoRRm~-`bM`pzk*iXsTRV-4G%8(T zdZ<4GhpW%vLLkLUXjN6|`E8Nww^~#xeEe5A!tq`S4&xGUb=zzjIt#Ic?>^j6YIP*U ziw)$VLTT_@*$Cq)pGRNKDKGPj02^He5HT{Aww~Z&1?1PgMYv$!OT0P=o(Sk4V#)^Hp+c^;MbyZO|A72#@7H`KC7KUg~&Tv|^P2D2C?!QPrd`XKRew*u zrW9}RBVG>Cr+N1>?}L2MrUPXi%bnzCm1LXuI6%V6cx2z)YW^Y|Ae&>u1_&WfBbI++ zACo9nt6>#h=CE(T`9|hR*7&fR-7zYQbUerXSZMOX`Tz9PkGD~AmQs(-Xb8OPgTiAw zy?}XE|3TfMK!P7@Z1r$hp554rZ<$m*MSSNTO4ZC;!(dV9z7itms_52$3Wvz0T^(Ss zMv20gJHf`3ZS5qQQ9}#G-d%E=bW*9Wohu}5M84-uEsZVp5k+(X3X0VhWMn0azw_K+ zu=SL0B@|fWtm|XRTLi3sM5Uk7Lf#%*hi2$CJkW(t8M-dKDwM9$pXLIT9Z zczfZ$0s6Pt=lNtYw9QO%e({YS^a`pSC+Jg-&`4?Rki9C$kNA@l@1XWKgf57Btl(RZ zA;AAkPSCUCq%4l?OpSS}&!HSL&{5TA<0*Y}6L^A!3^DH^k(~lG@^lau>Ps}SRsiO% z>LbZEA85Gi3ZbAalqxWIgcP}|lr(q)IJ|eG=CE*3)F z#=Qdn_hz9a|5@HOBr7#uRaHG3EL2=~oHT?NU$GODQC^qGp{^72-ny5|X8U!5>IQ4% zMa9nC!We$S41N|^RC8&(f6Ee;DdC}RyZg#Tz`}#?XQo4*>E(~=U(I6T9f&1zL>7{^ z&)fBt#X5Yuby$c<8W*lz`O2})O4Qf*>$|PJL*TvvbFS7s3Hl4eC1}lNu~W#T%>EC_ z8Qs#_9*vruIefy56F+TKr^={UJvLHS7$3%^E}QjY@U!NW7DmFo>ebJ7jqRTrJ2zHk zO)ZwV;(*c81~)+v`(+N`W+{1||0o$MVTBwuBW8Y((S)A1I9`QP6niI3r z{ddGT2gNab+#fK@OQCV!|bYe(cL%F%=a_9YppXxTEAwAval^y ziM@lxC8%?dTi|~G{ssGVEhnocH7=PjMt&GF|%Mn=H;@K;?oDiXpuWkpe`b%;m&`eo-gi4$tS2*LrAMIUmk!g_AnTMPl zj+rnL&l_W{9YSk((Xgn6p~%>Zyu+8&fzCN!Dfaq)hh%M0)ROo;0UFZ;O5}+|1{HBN zY`b-e32t@3ykd2IJulMsNMxsFD&`SFf|BLL2KC2ubWx}Uo@HbqZiZL>1(+JHvb>ot&O(jC}VsR=X~q=1n%15E6BlI9G%kbaZMYHlD~=kiCzEY1X7WQlro8hZ zFSmv&=hsv`Jb@dPsEj`l=VIWA@TV%n9M-z%v9q!LulRJr+JtC`iKn6*LHSEosZCby zaLst4up_IOLp8wEG3#E;1T_y5J)<_`d{5KQNw+y>x5u@*Q$7PA%OFcG*jWEcUur>B zf~H5iF-M+26inH08PePq8%RAhviMl3aHI_C))!9*WJMooC87nu8X^`{{hUV7Yod>t z5)!STVin*Tn~}lA5nljPMErO-k8fLoKPIq?(EoQ}z<*fpICJgnkxg zGuTcWXuOA8m4Us-xq4nea5CLq{+Z7;I`|23ODXMOs^_OvA0AN!WxWJ);UT%P!@<(f z@SKWR==ztQ!kb6@63~vPQtYJ50vYZ$TOx5dRrx!e$hH(93o7_U61&*mq;^yzQ5N+h zRui+#m_(UQTHRJ->bG=?yY!CkAn(?@?QmRd~<8r>rZP0_h*uy|A!oUN%bU-wOOm*NI7zKo*6 zZxJq0#%qM(#kT)GJNY*J-+bA*|LdESx z6j^!QC?0c;ysG__i|m}n?T3GFOS@6iW+W%hM$0(Mc+YYxChMiX@_@^Ip7IxJ0ymJ% z?&Ed%trZi+Ry+AsU zA$aHdf_`(&+qO4qodcxn8`mR?Mx6pQ1*Iu!Q5U%zGvzxO6*;cq_LVJL={LpRX%~-z zDYvLw8(9bmeeae11Hft@^sJ#=fT+24w+3V6HBPv{{iJ`uGN{ zas$4w-Q$gK5ujVLz5Ee|^OQG!#g9ak?+;MP+&%j}EUy}yl+?0iKk$gd1S9|3XG&a` zL2IGzS|1f$*mHvu&gFF16x!;>ze)=GnXx z!08h>&NwTA!l>`D9bnW_0-1o0ko##h+7o5Q#;!}12EU0!^rnMUi6r;u3u)L!jzAMo z4^-?xmHQM&(`W4!-xZq8kVbi#2Gdlnx9$@M zr>86;&sTY??)PXpX>;)GRV5>~@qZO7o~Q#eG|^i^L6Xk3Am?qhPuGXx0sUVV_qzJ$ z&1+_X))bHDJn?lkhcV{YQl4vti#xy;rikkYU+KnNY;E?G@n#s;$dv|wJ3J(qdWGwi z@ETioU7W_+_+bmBS6#y@#gxRg$+#APt#uyl$@?DrUNPsAAexViCPG`h{q8;SKPr;l zq6N!b;Yh{5V;(1F1ByQnj$dL`j1ux4 zio3$KK4jrN=B3`#i2QATN{MV!!{x!^W#AA7w))bT2sHqDzBy+#FT=MGeVpD&T!@lq z{zesoA^)&0IuaGN<@LgvuaGuJzQZGN<^i>%vI3!{Az=H>9c4O+p=QF`omp5V8BqO} zxXDIG#I)N#EIBNxqTvp=<7ZFqXn*&}wK|qI$Rlsn7C|@oQ)0)YaTKz_G!&>*q5*i^7PGtC09mhH;g%U)YGhV?zk{ z3WNj}js`mlM*w-pT^gyo>F>z_p@77J`MjXti8vP7&&MeztU@5*i!WU#5OJ;oY9R0U z9xS?8;CWFJM*qWw)VxIA(GOaP!Q5UQ`-I%A(?;~UitXacQ!wIztML+QQ^tK77c20t z{DDMf=i&Ot84uqX7Ryi{t#%o|5ZLIp9&|w!^YH(W^_FdIu;H>cR$Ph|cPJ9vEoh4t zC@w{cySrPFQXse$cMtCFQarc^cXxSq*4pp;Y5#-dLvq}6&CEIMfP?hia5NHK#E(vB zXNK9jsKRXWM(UVK-edZ+Vu>?;^2H~(7HFF0I}c_zdV7EOkrJw2fnk2Bwxz(Vk=Dj_ z%+{A)ka(Cca`4XEC9huh^S_|zg3^nOu`UGtwP~4;RaNOnkJ@T2$Ebvoq;k|qUt4y| z3^{=l^2TK=MlzTx3BDQvpLI!~a!cix@=va}eEm!c!aNEI`fZrFb-|yVdJgDjvvAeU zud>F3#EopKZ0&TiVogVBlDq77Hp*AW*;3Pxi8~@l&NSl3(nMta2~J4N<9&ByJ39d) zrw=L47m?*5oL(e+V7{ipxDm}o*QQfD3bDp;%QQNvCyR4m*wu%#j;CxrB}L1NbJI6L zjxg1m8!!k!I7a4K>{gKGUATmM`E9xi+c=P=Nz$WYIzx&jx1>^0SeF8*C%AU)x{lLd zR}6#^nC|k~^N{<>^<|hj>+^o;TQ#D_=4lrnYpgnB#0_?l#$O~jQ2k2brBA3Zd1NJN zelW@!*Bc2%WN2@0^mpGX!hQUq6dj0!UUCfByZ!ndA=;!X@6u{G{Ivc*I9Lk0%C9<5 z%*Hvu^6-@*RLAB<^*9!0o`6%tF8}|n6XpaP6?q+&(ox15!tHn7$LN79 zf&N*-ll?W*1R&Aei2<>l=!wF#;8n|*j+dc|;lMk~6|gCGRvH&yA6Z0Q)Di;gFS3iO zR&@4F&|fn-kCngWnggjyzGHaBc`rJoA z+W~2!e^aj0CE!|zcJ!{f;Y9c8@#7oo<7|710+TEo9jvn1Gloo>e~pu2^FT~z>#I@? zZoY&WC5&cI+^7AFBpwb-rCaQ@1g8^o?|yX61nK2wR5cX_t;V^CcvP+8Vx}nJ*RZvt zh?-u3#hiCNtmd_%H-5oyC$LrY2PFTqCDjnlfOicVebAM`a(6_iFeVmwDWT14jnPNQ zuTK6Ul5ab=4`SYaj{U=*MaNn`JK(KNmwOAXL6;n>yPM?hSe#C6%64xX5XpApmb)GF z_KbWL5;Z#Vh&n9|Fz+3qQnB@Hb3G?)ExvmGtAU5m7XV$EU-g4&n36C zb))2-LYVNG*v-B>vfO5BYGMNB#Z>BEKZ8}5Nc~h;z@Wh0Gx$m;#~;>+5li*LtH@WN>3ns8h2|$iFQ;V#+oJd4kY9pJXY7 z-Ewb^wf4149iW|_4N-y}XO*5THR#yDmYvzwba%9$vHN5DA2?Xs@%J$M4{hc~`HJ&H zgi@?_wO7fNC6FG(8fE%cFl17`#+!TEb=-`2eo=YA&wHLPHPGL6{PAV;(jcWc<1XJQ z>?7;gciFhdJ78e4{|hcMFWpOi7j|uXmZ)Umpd&{rXIrEL`KOck9G{n_r9=e$MXL$F zMcno=oRK%O;1aI|kVQAG4{RRD>}4AE%Ru~Z?k)5Pme95amPvqTaHa_C!g|1;A5$+Q z`o;0szNOAQktV4oHMc3FPFW1A8aB$XYzs-=d>LvahJaW>BRQ0Qf>#{Ma$3J=IJAY$*X95p`R^2RgMSwZ?JIBrVL&R&G(yGmsR zK;Cs#uErbIY=QA)1Z2F0j!{ah$2N`uX#??@A%LL@`QD}H48bvXqV`sc{Ip;){2sWQ zBei+oA}o8_+8!3~%7w#~iU%^;DWBwPsoAue)7g)qn|x{;-nvWrru10tl8ARzfN`to zkr8E?P`ab{-?0gxaS9>>U#MaAv2d{Fj6GlKXzP+6-YImgRc!Qi& z3X^j%BxAU%Id#e+Sz#9e#c+Ox9#_VkR$}n?nyiG_R9 z>oa4MWU0~u)Fh@DSBjZ>Zq&Z?G7O37IqQW^H;j^FME>;xzRXvc3u60Z0DdAcX1ap%sXlt z+dFJ#YN>i84z9{}@phm7Tqg*LD}c<@ECjXT+^H8vb=TA6n{H|F%7;m~0JX#>UP4v) zn1whjd}yz-f(Z4WP!qlvu$Eke3k?aC?PQ>t0>~&z_7EwvlM3&qt+CoPRbe*JL6kGg z5FP3VuHUHSiKjM6_xBgrnuUx@>d@a%(1XKv;0i*6ij>1e8MkCJdwbkL9)*oZd&+!A zE8Qp^bz<;@L^GGMF^_kmfSpDvlXXx5<%&O%-6gW1vu7ezMq7%!vRI_0?ZRt%(UgWFj4+GHwy11Rv>Q{E|B7y4Cp=t_c z)+hI}#uG}qON^xV>FELnoljn_u-um>y(2*kLAfeqt86RONHJnas(j|yo3Knv!WW@LQx8en@*qY*ZZ`WfUwK?}g?a z5VA1-W(G!h|33Pjm?*dAR)k2GA!FhW=6~!oWxMF_)D!XgSNCMQS03GTb}aY`n7w~? zRU3%f0x9edZtoaImyTOhdhVqyZ(V6zeb;vO6vMjIK3%^e8*)L2mo|3{`c^L}IQ6Ym zXwVgIP&>$ykGe+>vIetfPnwLRO^ zFiuM*(e)a1_;Dg|>6Czs-p%BY{xE1JXPN|;zBL~frpCD}1a7Yg%QNJ!LV8nu@-Rx@ z;eM`e!Y+u00TLck|ASkxis3b8R1+XPlql4Z>D!r$lK4$vqHh)$!vauPjRX)s3@VXw z7JN|1{!L=Sm0YWtdeOebEK|;ax45`4!=>+vIHz z4YTj&)5y4^ZVOe85@_vg7qLIvR=XB)?oquvpm)}tFC%qwNvxA;D4i@!vaV>d9_7_q zIODA({BD-G(bPe+)s6cqlSN-6&-+HG+0Jv#e76&wRh^ac9~x{43EhLRFDh>h^AL zwSbMVNHcz}$TrE7=I~s%^t0BQa8`Cj@u<5swcFD29l4M8^z~Qr5z}PN`Ec|aB;0c> zFREyrgU%Hi(&-R|0S(9w2fa!ZgCCuvLVs~-_ZG-=^ zjp39S$5$^%!?xrJ3PW8e3yvl%4GhQMntd%xr4~Fdm5-e58F9$}#!6C+Lw?4h@?HpA z>*lICM`voEEujr*m@>5&2jGCB3k+2g>c3V%`zVec!arzfw{U$#*13wxBC@e_knQG8 z>17Dy=SfDPO7_eQ{V7q3?BWXv&=1)iMZ|QA`{;c;2>8)KL}^AWX8g8)F9_#@Jx`Nv zYJ!jeN8jwggy6{KVIEQXTJgD$BtJzZDa=H!ii)pDZnp`2YyiJyXBRl!C2Wz&`2Ahy zxX%o89|a6HxK{6Wn3pxqUADq;(z1`GHRVFa6j485>(6bDk+&(1To{$3gTo z(V=GDk08bMOtX(dM>$8DTF~RW$!?YoGKHWi^{+XK>+JT0)m)mA9gyV9k|1uGe43iD{JTG|Z!XAT}A|x%TFOQv# zM8va1i*+u^D$IhPEV{xUf3;5Q2R1d{JR~+QNCB2dyTbqvg0(Vcv-MdMy zlkInj#Fg0(+hwyuPnkSdlz&~?G-hl@XsOty0jk;*4`|$1#Gd1+P*29o^6ne8 zlH@ie11S_QCeSkmtgk1#CWw=|*zJ*J#YXoSzn`$1Z>Xj?Z0M|Z$Tiq{x|LS%2 zTKHW!2);r(enL13npC*He4$>LTN!59cnhl)2wtwoiXUY8^>t)CEA~EzC1ofzWhazn z6Th3C0|qDO+zK2HrgsT70AuGX6k!AwQBR?#2>)|nt98%v{We`SG5^{tf%JmDXjV(DCPgf7n#C<{YB0M*99PeZl`Rg{IuVrd;2xQG76^wtp!7_JAlTs0w+r4HsQ4 zXuR%@g!;*Dr?xg{Ttggks&Up|BaHewir@S0Cy~SNG`~gmaK~nPO!(u?DwFu_eXr!^ zTBvt75cvkVm-Q!e!Y1)WzbJ@JA_52=8z*JWJe8HzfWEiH{AT=TmowBP%C@yGJD^?u zotOWgcRJ)0?AG`w&M?&MS$2XRzWI;sLmk&iHU?#Pm`}`oGG$ir9~l5b@HJ`=oaHq7 zEuGr&L2_i(*cpCU&E-YJtANAh(N)7WVgHzR=9@pud-lU9+6#?_gd#;h%f@_d*~+k` zyFDvDUP7N{vh0{ACZ3EiWZd2rhS6^OM!j0B=b45eA(~atDt6~7dNkVX;kr`&*Tl}g z`h0SEl$N21;kzk>@27tqs(I83^Y~gTQmiscpxzN@k54I_Omw(R7h zl1R#TIywajYJo9)px~jco=10wF;nM^(yq}N+kq<=Cc!OGWWULL2CGmJyKw)9_jFrW z4-DF9q~NwFFk^!^S=%#6z$ByEodYFz)dZ&;*`ariPw<3D>i@)F&Cp-LwFM+TS{n<$ z=MV@mELRsWpuViC=)Y(m^3~8HVJ#wb14YRSPgK2flzv2hDIAN9MkYq^MZ~^%EaP;^% z*_JY^TqOgYCnD&>>4|0as56uUS%&RmT@jcNR4dotcCY5U*ryj0TamZ<7bt^PS)J>k z+AD+PNx=;q4493NmDRg;{2hqqCv-)nWCg~xcPH27};25>T#j{D>|g&Xu8Q)B*5AIQc~r2v8583ffLL72uNN8J@J>Y7+Nw4I3yJTAtlqcF7>tmS2>@9veg?w#4vyN=cKZ_zsz^9;&5JIn;L zs>0YL>kHNT`e>p{e)TEz;b#--pfgkZwx4Av7ajqwW8Bl`h6aH$Gp5@LjMITR6K1`sA{d-6sBI__(m2zM<5 z$_c_4Aqx~e9_?}R(_v!E)y-~R@ahK_+4B)){AOIq-_^7FqeG3-9#hG_7aacwC;g1l z^JPJVFmMmF|8E%`6$BS215X0YkMwIrNS*MSPj-^LBP4eiuqS-NToEsme~5E|Y>~a$ zaALrG4xI!dai6RNQm^JIt;99wToo6efb&iaFy#_9dP|C9Calxa#E3 z`{@<08A_9~6U|p(&xrXOQjC|Y*Qxj&%h3drk`=ty^c`$DL}TjEh4v|QFX(}+WBu`$ zDfpfbzZt}z=X=z&@6PP_ghy`*Uaw0c%M`SWr>a;RMX+qv9a#6|3Ebfhkc+YucC4mU z7P<^(?ke8!cJ*}?zMBzA1ZX4Hf_2lt&Lyo zNhRI-7%+A4q+NtH1=UZNQFm-JcWNr~MJm#MIehsFqQ`_G&4x!2vGzY4b7`Z=?6&X^ zmDhdVHAnY3c-HdniWVN4X@QGV7NNW=pktCaKu_#B}anorC>1l@`0U!E6LXv>?Ht`-}$TC_0X>?|7c1eEn>IIAe#E&Q&^ z4j)!D{yG$xOF4l-$&t5wsi8)y@rLEDqyL1E$S)}A#`MnSDnhSZrj;n z6=@LFD+Lzchhg5?e&Q0Dm#eymxUSf@ZGi0Vq>~(5C1a>&tO#i_|=84Z1Dq~xQZBAAHsJ7nt@$Gv-a!>Ct zVN%uu|K1;8{Js$?Bv-Bd;S`0LM!#0B9?hM#Ipeoo5!cLz${$_B8g6U#h$6#H`ldBy ziB@B=Lgi;Cuc!&svJHs$Uee~F_mf*>UVWxV72?^+vc0^L_l!c0QoDp8cncBxBIDV>N_cYdm_ z?kwqV19#|wVL*aF;q%)V9K~t7{5f7*oysZ`V!1qna$rE=17o7zF0`9{;xVL&U%&Rj zc=dH^^_rcq&m00NOe%LbMR?Ij3feEou-b|#N)DlAwy+R{ml&nb4nn`2ZM}V_ms5h< z@8$`#%6Vk+4DX^mu6tD3)@O8Z|#Kql^-xMbwE-E1tDX}M~^)RkiU zkD~o4%J9J8sz@(AcOveWu5ZuJQNQUqrkzjJ7N@GJebGv4p*e(ND32;o`|cHXI?{mJ zPng0nZA_`4?ZlF_h!)1!0ZQAO?x78|E2F+Q^M(t86@td(u@xKK-PN_=&@j^xhkzZw z{xw%4uzM&}9Mj5>b#`y2BMW7dn%Nf#WqZ8nMk=?17Xy2;F;tx0`SMtp(x zIr^h;=t2j}b%e}`cv83w7eVu zT|2MkHP=mPtNRuXU*Y!^?Oqb8cd>bH%N;4V8IwHlr-Ns!O1Zw!#BzI!a_H$SsL5Qb zywJl(Np;Q~ZDO0Z_+k`hHQUF*Igdg+X<^GaJb+$TQkfnADCbdrSf$Dbmv3hgHkpKb zg)B`KVfK?Jg7BupwQaW!fOxWcWF+}2<>%$|?dAp>aF9q%?QFQAYN>2kg8W&&ofadr z-Ej?Wh?6D!g01y{&&RNnC3^az7n|p3qm!>O)}vAnFZ36>(6KKhVIVYk6uBbuiwI`S zLy)*aCLvNY_~hFcN1owmOzd>ZyIHFh$UR;$N?I0rA`BK3OrI`I8U%mxs5aat){1mn zY11qByFcw-5u;>YdE8Ay{~FmTzwJa(6~W#PAF@ccMc_HNXCXc$$hQd}b?MBpQ7882 zLr7Fq-2D(TbJBMqLGp}UtNV+w>b~Nr4c%D$y9%wQ&#GN^rKdn!0~4Ka0+alcykTv8 z;zP~wQB#$)^UqCo=f(B$7R@fzov2&$qRw){`>iKT6E5}t+-+Q48BLc&fzXj^h-7Ko zFuN0RCUW)R#~$1fz8f`sgY#hMvVCFH&{MOFQ~7Wy7=Jb|7etcEHx;EiNELl^N2FYliXrSduOm8E^eJ(H5fnYftobL&&2q>-nmkjJ zy3**ggm&FdyuoDEwrxcqx37l9FD@AD8%`VVWUo~XaKf}0^VKT{N9(UjQkfUkm^;^= zllR&ORTw-|GMgANyRpIpL&}NgOh-&Vir>;|MDP|YlVG4CR_8ojd5-w>btyme0Yvhw zEx?k*;Cwxr^1M^zM#OpH87DC2Bu?7bE^oF{d(#Y_ZO>g8rk|g5uiyW4^ut4V{XAp) ztjK)$@*wp4xYZ}*Bu<9F&`$GAt_gP~8RtD;a=&v9>BCKIzc2M&jfx`qT5dg}TjUR; zV5x={SGNShF?-Pi>Vu6gj+?=B9?@(a^8K!eN7eF)s>9Fk@))_v`G{q2o9aeF>08#b z5qykb(}kV|mw>0^vwIMa|Kz;%13@Mp%TzkCG))X~>GqF(ZmQoNPK<@ zJMui;NmRn@jf(GX6A_+>aFU6bn*}B8L7=6Tk!KrMQ^bGBvwFif%O?E6K|KM)bG**gdBR4toa7tf*a?^h$uJ# z2Q5*rm))jkkBnPY$K5m#*gW0xgCtB{p}>|Re!=9|t@O`>c_*FigpaMp`@PdKDqJzz4!PW-ha8R!Oi7`asBn`M;6{jhyag@!$gyitbXZ=)ACsy zVB6p58-&+()XtsQ5bbQK0{MftAvccmIbfqiEt^&#_F&xPvXNE8in!;TAUi&xG#NR_;HGZ1&(Zxs^3;}5_4ll `>)#!#P|5Ci-PrOOx&3?kH z3h%IigtJ~atn~NBr<~tL=ePd4FH^zld&4HQ-N^x#!lRjxh0AK}^F~(ZaMsr|)x_9_ zDPE!{p60ho^w-c-1C^sebt!rT(Kx}v8;mBN;3KuM06&jGTs;!`L{G#AuVTiJF=B*FYX<%XNVt zFedRx*(iRuJ%(NWLjFYFXo;w+dT90cPw;GrM9wpT6G$iZiiNm%*H&HaPOE3FI)US-iY=45>(wdFdCMZ+O>i zt3^y)?DX6DRlr{eQdJUM=iy3WjDfcO>e3-kygpbYCncTiQbwqZ>gK=`=_pf6O%~2~10i z+~l?w+TG!E_95x~2;oGX&ok(70UkwuvPR_WL1~?|@fIze+b1@?233i!+G|Qi2QTfH ztIbIR$5HpS`hwt&RTN49imqwesG~bDmcik`HLYn&NJ~^G4ZN#{X#DiM5(K(ZH0{uyq@k<-RAC?RdiDnBv;1GQxj zG97+5W@3xCZ;uQrK7^S_-E*#{|H~%OAL$w7^iwZc^%r?-_Y`Rjf^BxOF z78r-1ZMBnQ_uPPLUYWNxaUMC8-v3RreeJOG92rPSkR1{b$<}$Z-Kno;j-@o5Lcd_| z1kDIK2A4ytj7W;a){UMheYdDWg;gRLLiHBcY@P?$x|sD%0P070w>vKriP>IYJ5>0) zJZ-B%;ckIJ=rPnrI5}%o+_HD;(Ab(q(5rNKgt=0Jgtstw6#S>#I%id&#|WO_F<=xY z((ebal-%u!CmL2;E1BW3-N&F1T~%owQzWLJ^VLQvQ0K;ih`#;|mFFk?n4y`zS@()| zdViWLGN@-r0O4bR3ZB=2R?}?doRc)qnRh0F$CpsnQ|B2;APkK9AKV$vi<_U~;|$+s z_$#;BCHPlnN1$bsOLJ;Xiy^!DF{k8{=?bc#-Hnm3pW0*Z710iVM>g%_pYkl_bENB^ z7^KpeT)SDdLqRNeyE}}neEUTLc#`DU-rz$qg3Al6ROu7q+Y;@7D^N+G_I2&!dEgLV z?W9Ic)lka%-Dlan*{`%Seuc813GhIoq*Fa2RwHJY(H?l{fh!><96 z?^8LKDuG0P^3$|epn>)>QI29Gy~%rig;q%Vx<@bXNWYnC=*z^f3*(?wZ*WGJ*!#b3 z+-^&MbNg2nqWjoC-T>K>6OY=Qx~T?-W$IB*rRE&ADbZ6)^a1}5*vw{@qfi0|EhFzQ(63-5n$z0%c{44}Ti2CSVmNjj6w0a`vIT0SjS z_5}L1C+ZLG`ZpsNM&M_yvXYYw`|7G9{XtJlUv&K+Qy9PLQkaod=Od6fcS+x6uM!}26^Z~7;r?%#3B;ZQI7$D%Y`hB zDD zL!GI}3)VeA3v2D}9eG8WncYX??X?~fE zmW9_pLUK(3T5)~}GdL%r z)-$6raojFqFBPa^Mw{pFD$#{ssZA|XYQCQ0gJjs*TbI6}Mp>JM*P87y4hjl*ufhxc#ne+p^0E-qSI#t3#@o9DNtJj2GfY5TkE&Z_fT<{5&$=|-@8s?=G$g|__TGuQyVgf}p z#O+e(@uxrx+WYi)%Z%jewR9~nV*O}{DU+u;v!NoCwU;mBr2f4!dxfKvcZZt&kDvII zgte$PtDzmRHXAWH$^fu?$*xFQ5!ztjt3e6)+;Ev>fJbC!_L;Mm*bS$hD@!7^RB^o}kCBlFfuTsxOEV=Zh$YiN5X>dFbfv zbbNxZ&k^4wTw&!NrPd%1NWPih?3gOb6Vsu=`y3xqj)&v3+|(eKlmh>EaW>~e%WugG zE$!$<@@N3`uY_RH$wER(;davb0B!6CzBQ)G)xUM{8Ydgc@eS0=Hb06Y{W@*P0I4|| z4Ol^#<(vSLw3esx^Q1tit>u4k1}W9nOg!^_#!qI2{-&q{awz7646;PejnC)_U1;Y4 zIV#(*H?!(pZO3g6!XK+_ zHT-h}X3lH;vUE4#a>o(oMlrkWuEtB)zb2mUtAQ1<3!C>F+`Hy&t68R{ZBF*i-dN_W zh={+XGUW8@MD!7D%_!V!zgT{mEQaXcQF?#d11zc^ozo5r$D{Z052++tl)p$HaBv>G zl@<|XbbhQ`9>lb$zaPQq@y|N-{h1dQZrW7@xX6Ys{Uudq#QSYdv@2^jp%ad%1@dxn z$SCm{V9!$P6%9vcE*-d?Y_w;s%fDa7&?++j)pV!z1F^B3_0EevW$N)UUGIY8cC|d3 zcaU2=K*}aj1AEL1OnsJA$cc7N`xd0dcitUp1n2jZ$7$%KRweSD`_` zl8@tQ;;%-fzdh0drz)qa%F1U5CE=~gV}JsdEC1|=4ajh4)v<_9NnOWRH!)`wXDvAiFm#pZ0cZTI92;(oMhVKgAdM^k2^pE)?1k($Lg zTMYdapLcSF1(|&IIt-lRyzte-Wz422KU4B+iMQ7>Oow_(F%U8SHbvdEE z%;#SXl2&|avDk8;j|DJS?PgfCrbWZY6cpQlV1hDT zA+ZB~fsGO+F!y_$IVJ5JiTO`<{wKt%q~KhHF2~3c?_Lkt#Y;tdEkoODD%fJ=B$XdbK*9yWrdR$SK?$p> z{g1FD9;sUBsD}#l1!DQh)mXWI`Dl>)HovC$n;^&u?L7A;>drX9>4U4@WVdaJ#43qd z(j@_<>_TI~)WkXYQjW#e+SJ$ufgkl%bGUnb{@N1nYT6Ue7f)GQ%h2pKcZ!NbP#EQh zj!T2TYla5xN*?8gYgFq)fn{L(?ygkT)QOe@k`)}gMWH~S6l?m3geW=0vU_lO9*Ks$ zyBKz(6<|EC*c>TNh4&2KFjT$OUkq<*FGO(%&a}yQG5$E4qcepD$%d9KTUOuSw&u~6 z1@%05?@{O?dQq7UUM-nKBSX+?=LiBOb+W7|q@iN+0tPhdEBA>+B!)%alZdDKG?KUU z(!tH*ZR?k!3nwmu^sdy;ea<z2pY9N} zDrPC0B`%c^NXa(~^-$*w)x4So^U4le5JKRtnApR#1xKN+7bLmDnC-rG@8MmkznE_< zi5;(oZ^Y<8$Ey*wot>H8bVK2Ch51qMZPrvhT=rJ%SPbk+4AhJNi(V-*^>;O9f{m~W zo&%O&xppYT)p1vB@VvEULvz}O|APY?;v^~*hPKLM3L&{ylWU<9nq0KbNPj%4Kaqj2 z8*^daG>=j=?abiHtW^&UzA&WNo+4Y

    BleOu!O-<4iCWZ(G;i3}JnJvDT7{oZRkt zv)#KAv{swXULeCIaWLYNtu>3@a~?DsGfR<3_?oQV;=Axm`D=5^>(7rkclj%LzZ-Y0 zff%OZMa~sj+@3Ssg#tHgRIF!y?$p5`8#Ab>D@h~@s*sul`NFUGFD zyJN(rRq3Y;hbr^yMMpY_22Us;@ctEs7HGN(eIr!4u&5fwbdDKYcJZ;OwJ*pADAFYK zN!erE_@LA3$Dwn_FY@K>dbe7*6a|KdrHK)?2v&9~>3@7wm0-bkRkp&oS!L_{=H0O%m&E-Oo8OJ0;y)J1^=oJ~W0ifh;ok7XUl3n17qy8J_cY37xtouXR} z423LiwLOWN7t&~_(kZr%i}!^No&YWvE*BFHrARSxHtU-S3g2}MzyHlOMK+IaTNk** z%Ex`Es(i9(!6Uw*UWukJpL&%3?SmJ1m}-S%ySc{z8|JrvI>zlPy*~qngYf@@D>jT% zC{+!u#fT;$wkH-GB?k9)gK`>62E{?10Q(AkEHbGWboTg?_DVblVpA?gx1vtsWW-!> z?RGZCkj0#uVnXVTSXb8OmS30mppJeQJKBxLX`4@uWx!nE<@e$F%HdzIFZVR|!$9h4 z^Td337v3O9?P+yu2tRD14{C3rEw$orHj4a2!wSk5!?1XMlctjb<;IqAO_qGL5w)+)25x zbE#_eu@Dwnp$CmR>qHL&2aBk@Zxzko4NLxs<8;77FQUYl8!okj8y+ByzO!GPLmLTL zQY_-9#-a%DbR)^N&t*@$U$7sgw{AVe_^&!l^{#4Gg?WGdvFBRmH2sac%-}wXtTiIu zS1~HC@yhxrvc(bFMcH(D5PVz8S`e}!$k4WgH9zw$;buH$^51UlXb)o$GK?28zAA!Y z4R*AM-Pl+r)h$l$lO?y}{&za?K@*Wa0gmu-^OIL_)C5`ocITMU6VF}qE@;lnH`L1f ze!v8H+#T}Xo}%i+X_Hb^v}CiOu4BW=FhGQ=CbvtXr49+a8>VXv&<>a}x>Wr`g>55# z8S97sX=R|M_D84I@hASCin3M58+ayj%%Z3u;c73wTaMHD7h5l!oh0W+UxNfH?8gpH zqthoNJE9*;zh3BLH#_X?F(YtVUh~48w@g|T`TMu&V$0-9fo|*Gjcq_5zptrU108r7 z`>$eepg)1uRF8}S6}eN6)$db2{zC=Dr!Br`?9d}h=vvt()$;h6HEdJ~X_?x#bj6r}BT6*uo*&FRk}7Mt4I zj107dy|AMx(y7tWb!^~eP6y5hWIx0I`!=~I#vq!4HCjQtHi%PxwQC12^+Nj9Vo6=ywOH=Q8bvm8N;a(>qdf6DZs1aut_9LrJP>@pGU>Ix6QM)6P}va zY*K3L@X9_Jd@0)x&N^VJbPD(mxWDLm7rl%a2*l}!f{>0ER)+ra@R~rEf2-0`rIW~E zMsssl!uO&OQ|BC<({4vQsA$UKlJ%Z3z{+KeyHiVbhSYr)Zcn8m@{W`huZ!tRxEUp$ z=Jwu5Db1?%(_=;SF%a&yY*gw?Nyt;N%>L7+Dn{juT_ zzb_C^{p_$nVWB{cuB%y}Xv$jSr{JWJIOnPDxeFy%Rk%$o^9JyR3O?sjiB;YGvQ4o$ z@LF8hxxpl^O_1JI+fRKPOz;YXq)J^if|m_t+?rZZIQ7e&TgjbA4y^le6nb=s#r}f> zhEPfn4+g+8?W%*bnpR_nuYX8_!C|FG+A(P=`^r*<=Bhp^g8#wY%|b=|ROo+wc#=^j zNV&heEpnoExo{eglh8Ebder)ZnzGg~8Ygs=ZCkVBOPRR!*hWYG3sV2Voq<$ z+U5Rq_X6%9w|ZZ(G&x%Te3)Dtr1!dS2Nk1Pm*Pc{>UTB{-st$W{>iUSu7sju&Ce>h zL090+!vYR|5-^zAkD9O^U?_>1-x*ydZgI8-bqGOd~hVXk#ZXV20be zlJ@wL|L>ETl(}>8tJs|bz0=q)_an{Iy%ZNy*9InfDutAP6TCS*H9wUFkMz&lZQLph zbZ$ZDHcom9cIN?KlVYka4Er6_$I&x!jEsDq`o6|zP#f(l7h+npbutb#`?%#PADP-A zRLT#xfB}Ru&joFS=C+heJ?bh&mdiF;&C`|fBWQFClDh$ffz+6bNzgfnRy!tyM}ZDj zbEx1u9vb*eMwmzs2Bf+|ZcmwA>YGx?($|`S{qncv9q;axN+f*wQMHFs8IhEYt_uIb z#Wm)ZZ}H3bOq_=Di;@f`BaO0jo~t?TY;Oq}ROimOJDTKaemcQXd+_;m zq01M~iGL}ai>ildj`&;+A}0Fa=?CPJQdpFjaeD4YXfvSObb^tVD%Dz$Uc3K4xCW6> z=!=D&s|{@3pkAbXIC}x|w{AH| zimDCL4Q=n3REmT~nPQSEJezsR-L8{CfqP*!P2J70docWqpK<-FXO(l?LTeyc!Lq|I z)+2XY!ykYb)n)P&H;u}B$s_+&o?|4_l6-NA3SK_3w~wcfwN$Cxch#O#lWd#`fcRwS1jvhgu8KAKmVU=%P7?ZAn=LIbY5klH*N8A${*X*~e z`vVf1L`CMrp@epLx=pDuCap%^iTtk7{^@c?vK_IlwlZw(Qpd{GG)OOC7r)Tl^)GUh z2~|>v+C?^xFRm@Exy|cC>!+iK!b5J`)jtZtRBT%OkUX5f6h-}ddt0V)TsH`SpsVj$ z9az<}PSvMZpNOeADTkW?ohdr!)nmFAoI9ANV|6cEm#(*9fvdfyD$8v)mmljAqeQ7M zEtl}a5Y65-jR!G~1VNe=mxEXg`NSua+G@sl{rpToK%MX~qfOYxA8AQEfL zf2v>C=xdG(d~T*lD7?X~V6ztCpr$&@wwpy}r_2!Q?H5OlJlnZd*g$j(ed2d%tE)A8$e0lt6mw)&eEerlN*J8_iltf7`EHND-k(O ziK+_Y7eIMiw;hJ(o%sHP3k*2 z^AE-XQQ`~q*=l}(-|tw|oImH&CCN;Vrizy>>2#a!S17m^U6V0J2(+tt9(w-JY0*m+;kg0vYi1 zJHV+b!`QWdxfSKXn@-^R{yoT%x`M*#W2KC6k?uMWnWV5al`Z<7GNe;{8}e&izxtF( zHSuV|d%#t{&p;m#!b$v5?zLiGj3j8&%9_A0Ph-o2F=}@rxu#B-yJ>@k^&Z1~BRavs z+~HR=0?QIQ-E}BCSF~mGY&v=UD1a`APCe|dtawE;g(hMc>2|8q43Zu#{FDF#vvxY_(zcC%fkSTAmIX;{c? zK?vQb7B34iy&*`1L}R)Y(-MwQlz+khEW=6EN@0EInd0ZGz1yTEUI;ml%!}Qz6-_Xf z-*%q6%EPUb6l)(A}7^4F`AEk9lk7GVahN4-!1~IkAPCKQ)Pdo_XgvlatT?%%+D$08mr3knXs?c zwA%s{pu7euJiU_|)PmIUWal=X=y%G-<-NQx85j8>+uc>~!Vt}r&rIVZ)LT(^7ubFh zQF1$TcbWipsmkRM$11;b9zYVs(J@FJD@skj4yddnIxNfTMFH{&m!nURN*BCPZi?sH zXU50|XNxtw=w@Xoof<&mTjC^d!KA9TGCqHjLCdtsQ5=6oId97-IXDtSd`wArr`Y!l zEHQsiiNt!nGP9eTlZ7IS4U4MDkt@_mR1Yw%w2-@%pIGC@@q7@`2+p>#W>4|-05o!< zFeXNU?YKJ4U>Dv(Hy&qcwMAF+mO<;zv0jIGPf^kkL8xR8I?pS0&Q-am>e%&pLCEtuMT->}W#EyW833MD|G6u075+TsO@Yw;8bQlNMURy=rdE5$WX z+})wLyB7~0An@D#ci)|z&CK%wGMRaDU)Oz{$9a=gs8Mlvea(N%@8C7Gp$6OASZ!I{ z(@fV$7;W}z>^h5XS03SIi8j>K&nj#zt=p#$vIR+J9?s>4YR?vqSe#w)ldL@1{b<%e zJ51vDvdt`fn=V?EO^7`J(~VW8lAUt3Z-@b2KR2q08vR`ax?GF)ntqRFSqW&KYh}^{ za>>auv_IH=lQ&k8zz6-J$5N`hdK>*CX@bSlYtUBuE86Z^!mDPd`$^21(X?m#lCV`t z%Ky+%T&JKAaS->fjiEOF?&SS5hfUFh=7GInoc{nbrg~6Glw3YaU!mmiUP$$u)$KNr zgVaW&iQbR=wsf~Ti!nDSGGf*@=Fi3nY42B6rNWnfqmK$no}}xm#hYJ)jL#yJ3NXcU zQ({Vc8aIX(<>&15N4N+dHM#*UFVti-SL*wiysMiK*C=A$uVQqvUUqGK~ z0fWwNj8QH<93|U3sVV+|L6*&i95u~&IMtGrm>OTCi@V}MW)H?G%TS2jF5GT=y0j;U za&{I8lBRgZO9ojrdV0kxW?lKmcO+Q2Z>=ShFr$>L=-o1%C?e`VfUV!sQeQTO8^#Ft z49(S;{vPQSgNdnxyiz_3626t}Sjq{XbDYMqqCb&524=>p^M=4eh4 zLnlmFYS1rvZ64ho*0A$sqY^0QmwcN^07rk>^aG-@ZhBYwrweD?jP_QzM~9uDSiy2f zML}5FW&l1w8H8Bo^9&+q@-p4l$;t7HMd0GfOC@mJUn(7K3MDCgF@zb4A4#_f?Dr!t z2wgA5zn8QpUd^mQAE|I8efN1tTub!d0m{L(OlJq7ca3zwe7ua^M|tb z;l{JjBg~h=JrA&$LVKNUNY15Uy)~g>N*HjLVQ+DB$~ZnG5r%J^%Tr(Vt5?-bIWSdL zg)`RI%SFH0!qOm@r_v??V3+fKZv2M_CBD~wTr#0XNthRT%5>-n-_^%ZnV)ZZE^c-W zD0W5~|B*D0Nunb!4PDmG;8BIqnbM}6F^I&~vrEOJEbmx%Vdd(iN!uwf2j=E0>*;J7 zn4stL^_U-yEnl{LdZU=5qH2HIXG2YaCIKp+}rnmV(v9jCb8XBfK%0I0dy@6D;4Q=4Y zyTN+_zs?U=5YA>^T&P#9iQy=-tkWi+u+$%iMZ8UBppXl}X@hzE6%ArRzLMTw2K(rAei_VRd6v#28=mk5x_q8m~F0@8@ zyFE~cdajn!l+Y{VwxsGoS28-iPbdRa@_ER) zEd>k>b>otGGDiQJ+cjV#-_4LQ)c5#~IF&vA4}f)x=(*46XX&thD^+q>lYCW-4$A== zUbhDUfse1DL~}XQEmS4SN%(GQ7fOi>Wln$&a~6XRi}p9M=5s}ibNe^PAMOFRus2FA zU?lS-v7nKyp-MT?+i&_SUO%jsk`|Nx7INuH8)3iy!HAExo2}vY>3*5t7rv?TEK2Zm zP2BIS#a-cO#qAFvnlx@&oi~V;KP`Kln_QqmtIJt|+PUF#$b}5k$e%KIl?a{Zw%mDj z)|cRWvB%j}tcRSOhRVtHbh)`Q4XcJJUY4lHTx(v(gN~7Kf7&`G&3By9kb@nG5XdcX zqW0LS>#jFDtVP5}I=y)Rg6q28{jX&bJ9UmOLoQ=bH?}M;1?Sb$wb_4wz~JuUuol|0 zyTjq>6UTUrSR!P_vfi0JT6C+4ylVb_m_OO#BN@^96}mnWDqbG^vr5dur4Zdb=y7El zen(2`JSr(yi8^LQuK*EWVRafh+&nqW;cWGB;P5`3i6Hhn@?Imv(4y@^_V3qeO=2;W zo*%li_@`JVB~{Hya%|aNizT~1`KNuQDM5y@5CS7iv7qY6P5GHis`M|&eb@6o_?$3* zv_fjsss&a?L%)5rt7*ZHudyY65XM!1_#3@rdWwrmt`gmp3 z+XC3?Z1!*x8TTlwB@;eUQjo}h`^InT1hsrf<3`K!V@&PN_{k-2<5fNEw#!>rH?orf zw=NV?KNJ3n&vlR7h&x=S>9*QRXBNHhWvOd?AN@r1pLfiAQK5Shm7Wy)UPGSABN%)3 zw^cUoGB&J=~l27q4JCZ7?I@7Uyai!oFl%t)qga*F_{4l+>_Iv-;+5!&tFkC%5c zJecEx(7Cm;T%^yldyRO!7&zjbgo-Ow?aemue~YU4TPBrJD>BCR7USVBFY`Vt^nD-U4N}}7vr2&L znHSnXX!V+~TII|&OCrWg*DOJwCJI{cZDrgeJO>WH{Lr0vA7J@e%I;*mg%?0X2>FhX zu30|ntFgKkne&FGA7?vyBP>mV^D*AMOD|X0fyVr$Q@&RwZNC&}U>gI~i}b84YW`xm zweLROm1a*)RZK+k(eG-j=Nwx2xw;HaT&omVELU6M-x17h4Brj4jIy-?M{F$;zTY`= z>oqN0dx<=(6bf0Cekqlz32RkO>veS3iCUl7)fA8?>?Qxud`b?FKt*1MrpUmLFJe$v zI9G@|o(e-@<`1zXqPuN055JaGwj{isi{A4c@UONda|A`nuUTG{4c5!AT8D|@o_+cq zamV6m^Z$yTzDboHo%(xWQp?0`W_fGIMD?Gr`Aum^YOkF-Vo3v1y}zC~?Hi+_n7%v7 zvVoSMNdcdJYbQ&+7gj8~3IFB0Oeu-!XNa@`Jq@^t5-_rkG7{AWWIe%1@Luv7RnOY> zb29ZEAvrUf8i)m!R%t$R3E)3e_&zC5-8F!sl5D!kgO|Ty-Beg=PdW4?T`d0|Kt0J< z5@o|waS$$6Z||Ngk{R13C6C%nSg^(TET=wYk6u&0Sc^z{)C(4R%1l>2TID3@1vc|` zo@{;bA3!cp6gK(3YK!Am^v-N93##2}AW2=aBs<Afa#MmhTChEgB^~3*n&Dk%@C07zyVB1YoOLJYN?c2uO z3-=WC0=|xsl)Cp&2Evugq4g*Cu2R7PHMwWInlCe;>?X`=dAsiPUBkYq8xt#KRs$^p z{+-j4dFCx7;q`5y)xE4H;lS|PzNt0avgt2G#wOnV{2kD|Q-;_{=l}Mz?u|A5KWWzS z|NrFA{8cPk4Y*2C2=2=NO;~`mqk<^8ZcMkQjvTw_*CpXO4dp$Y6S4D-@j--~f#3gt?pU zr(IYNSp0AihwCka8<_*I6Pv!vNRCt=#6PSsH2*=TCFK-7=blD@7}T~hp6_eZDzX7G z?mt%yy`wT`jm?!Cmw{T%P4%*r#iaerxS?g((_(lxIBN2OT#g>RU_gH^aX+giKgdK| z92ZM;%-ZUs@n*T`pG|G=Pe3=oLXFR;)(6;i(I(NL96Lf4x(@02t;lC0w+vZ?k)_-0 zwOCsS1`|@o-Cj~{?R}sA7L_%tI5+z{_2`ERoJYhGJ{>3LeiqCVXnd|07n4&cdELFo zO$m}dgF^b} z!t`t5JH2k_AtxMKncWB4yZRq$x3%(f^L}@lf+v7y@$X8M4@AQ%#r6Us+L_COm1uh< zdkTMT&uvfSvrB;P)R*MRcN6|_G>7(>L3_6H^+WK(f?xTvTh=YPG9aB}AzwCvX{5Jk z*l@B)_e{+bz6E=qYxVNzKXRelBU?1Oz-QkD?zM~Bwg+Zc)J-02>lbx0j~G;YEIw|V zMYNQWKRt|2WG%fttG3MPg=HtM@0*z)Ri6Ek$r8-Cp-Y=~b53v?{(Y$s=$YO@{nFEC zh3$JK@5scJnN;a$_1`D`vu)imWG(Ri(Cdn4y)fL$+2rFpckUBDCBk1Up=;y!bJJ$7 z4#u3&)bqsi8<i2Op9nJLNC-U3PuIRmW! zqshTimh!>WC`>^t|BSbo7uo99HaJO7#Qe>}?qFzm-8!c*{k`xO5qCX1*jjtT^ z4+*E1(hQ;|vZlG(g^)f`#;r9B0eY3(XLqLHNJsT%&{gnY*o{kB5bVn#QEJm&6!32F zwloSk7R}`bH(HLVUddX+w8ko*^d}uB3=`fOP9JDwvrL@ImswS$e(uIn&xC>ZHS9IZ z_+wX!r}~axKTp`t(7IN_GT_4@LiSixNQCFzypdzw95|EzQEFBUDv;0xKO$bFGl_7y z9K3^jN8}(--9w}KDPQ`Tvd3eTFMQJPBM%YM>nEaAYayLOYUh$f-6C*aLZ7skb&YPm zDcfvjVj7cG%%p69nbtO&<2wvhc^nM7#LGX1B6BXz61`y2O!2=E4Upm`P9`A=$JWCh`*(3nVJP+ zQ3m6>k=Ss0YGXn=C7=9jiM!cTXmX{t84j=ZZr^Xl( zNHXtqXp9Pr*3HX2ngU>ZUn)3YP3mqB42gVUj{ z>g`TimmjXV9;#ssw%)Kzq-CGDTxd`dGxL-ioz{-Nu^_vb;nBcaQ<4&nF#4y(y18@0 z-47=X0l0#wPWbK5N5g|9sFQ{e;;(q%<`PDiH5m%t53( zyLD9rdAt^Z<}UVx4Qn?|6t>lQTC`O&s^U~KVoZ+j*kTnhcv5=IBHOhUD&bz{kY6Se z`@m4G*jZrVPB={9bLYDUUeutXid813R{P|Uyf4MZ<54)hCh(X+baWtv@+(Gc>s zB!-SFE<-@lg`IUQ}Cd;i~+EvEjJMCwPrz1Yn3+e6 z3P~^2Gv5-@@@pd(c4=5WHRk5jaL;uL>fS}cz}&E5!`xe1931>i)8g!!8}#!nL*$mJ z=iz!-vffl^rnB?L#f7O{W3rX|Q+mI)kDC3KKt2xt=J~mu(V6m@sOed~%0VY*Cy?A* zfgB}J+*4g$T|kH~E}6V6E(7F_lWCLw1w>(SgSBq|q)lP-woM~^*v?1tqsKyd+xg2k zzgwqI##mg&=l}Vn9YL{>_kG!vk_x0h(0F-cWp=KCx-{=}bZ}0UK#7XueqLd;Tat4p zen%7cgr3hgUM`@T`^nkDpk^TXkJ1*IKfL}Gubuk zl?{@~ID_nloVshezEqp1)Si%zEc&)H(@3X{$;(Xf*J+yvmceqSvVjIMayob^NGu`r z?c=;(O?C*zwQc%*e~y6$dA>iJ#+O7nT~X;Vhz`j8L5xOM(btNVpKzJ5|S?@8l zF+zRLv{31Ndd#9-VaGLx7Khdj6k9tVxfTX*Bl%ep($3oXI8I%z^L>0Tevzq zH)DOwqk&vQW{Hy2;9pgrL?JbH*-;o?c+Yf?PNH|kc|tSH{vm->p3=52QOKs8tVTo5 z%$|uBA&81ZrgyGU98Ge==BU3}Et!4@ZU_zog*>wEoz%&BJ5c3wOy#p`rdP`gAukU* zCrc?e?*uOlF=PSb!xS?jM5#G6iScTu482h%UkOOVoY8AD)mYLCa!)_doEoJ&Pr4U&e^YQqdkC zLxs$OnA4|Apl^b+lfboSKEn09jb+{74Fm4~V7E|Wbdf1H@zM3i2Sv^2zs?HWq4-RCSA9Y9L#KzI76dP1k4cxVCByq` z2|6qGXj7eFNOA0~WA|IYT8WUpS5 zaOh}O^Ixd`Ko`AI;O#o$r8>!{mosryvEJT#nF5_VyFv??w+mygRd)k(W0gj$@ck=K znN1e=+fzMmu7Z|W7Leq*FQ>w%iP3>IQLx&w-WLa_S53!oGhfcmVx96Br-C24-d|eN zh~DrxCta03UN%SbD;c{uZqStn59YDG)io(Ew{2oAU9}T@GJ*dMU;lx?km*@7W2R!S z$)}0!8#qEKF~A ztn<$V^uuKJ!|tQbovJBkZ_PA6U&K=KmGp4H&S+>U8+t8=Ro1or=-kWC$F^5p=cL{@ zOSKS}9g$>|4UuqZ&eC$DGurYRJb`Qi7YXJ3Xx|Z-R%6cLPG3@Ar<9WZ{4QP4Mu&Q+ ze|)arAz9u4EJoxF73}D&Ro(W1(QL52P==bHyN9ZXT!pZ}|D)c=dj&b>^>^-P3#?*f zc{bQUz>s(b_RU1Hc6m|-h;n=I0rOog5Q8}rq3&^~f_daI%Y74DZAUZ0NiTwLtcEC$ zukR*Yx%+?S%g^D^I0ts`_lj@#I+(Lm#c{z?&Aw&aSc|bCO)FPknWjTBp53H$tR;Z<()9lvF)6B0Qla*>I~v zStXsf*&5;EqIgS-{?z-ip}i$u|0HDr>+c3yrw6SVNZazPl)Eb060aHipY*p8Zw7I_ z0t}J+ToeQsSs)QsOzHN!I}Aca?H2mimiov)-R{vA%rbh!o1Z;TP&sxf`)ncNF_9apF#s@j}C|DM#{g+J|%w960Yd%D!ygVi@l zN)^VN5;_{SMs-F6G67{fnns`*Wq#8Z>rfO>VaCnZLp}XdKwk8-l$DhbDu9%F<*6xH zwXlw5vK(YUahPOlL@RZ@RjZnZ(j4j=J~~;md`B%oO{DlpW~Y+wb@bLY)iOLwK=Z{) zooZ;7{_M|5h3xR2d+p!3R-q`%kIg|ncRICqRDr8H=ei2J=G>63HH5A{n7>X`R}#7>GjL3*cE)$lJ(9c zUkr>d!p`6Gse^@f>#04c1!#Hb>tWu6A)6561uP1$<(Ix7|qn>S5CBr2nziqQ^ zaO>3R??@6y9o?N)`9ab(g4mw>rVc^FOdlM^e)R3SR;<*~{{=lP%hd(V3DIqwFEGWl zig{mribc;H7WvdkCUYJr{w=2{q5&WH+Syov-wTdvY9$gMY880nbpG@G_eajOibEi_ z^VY(9l9xU1P~%tBrgKZUSjQ_nYo6GW;BF6sRuxON_3}Q>g-43sCY3%Mo)pf6^NW5^ zt7nVGVB9WoBf7w_ok=KA|*CTdC z3AHgo_JJeNc;MvxoE)Q3r~mB0%Yr*?%i~2d8y>MG68oev^M4TJT)RW z?0YwcU(UkjATywK69VBvgz_~gjInwVJVyn@j3(r$FXt{eKIjjE;AHXw;@XZ?*@+zv zE$bJWk~TDecK&~MIdxxC^9Pr6zY6-xB65Al#htaL^ZSQ1Yt!`9*LMD@vsjcp^^mKT2kdJqQoML_+D%eWs^mso z|6ai!@j50_`%N2sWV+^yh(RU)0l6a!83O*tCjzPJ+Es5Tv7@g}{q>!<&)g)ZC_k+j zsLmI{Y^_9-7w2eA!${msFs)Y{(661_){oEe16*~!giXR;dnC<@{3t+Lv#$t(f^+v0 z4mby22XDAX%PkIlQ)u;-2U5K(`FB>L@zbOzB;rj)E*VD60r=mO|H}5wucx!}Q9oiE z<+Wlv)`LtEHrm6eh2cp})wHp(FYM97k2ggK-uzdP_%@PEozrsFVx4au3Nul~wx^JX z-(dOo;vpDaa=p=+&dl1KRE35f$X!kKTbgmqPD=f&)W6GT+4*+3JYGV(AFy*c=~K{u z^Rii9VNi&tuJJ}^#hM~oW6b4!FNLtwrVe$LFIuI+Cko|m{}O+Dkrkf$^toc;=1hd? zF`8WqmF_&re78pt%Okdac$O`Z;-}F;#Ou8Vwu$_n zNZlfy3yl6VXWS8kEEQjhQIl?7D6jqv{9{yKwYy4%bZp)#O>oGOx2VDmcVok|hPk+?|o)!?#Z^X8?YWJ6wxIpwZy~Ln!%~gE}2n@L#POR(?LUT&2n|;o^GDE@S)JQ zlC`{skV5n~od%^lPOT{|6{d+hbFN?ce3CQgzCgCCLGllEV9V+$x|tAwgAI30Q1m?g z#QS&3gjr!qIB7FMeqD4-==S(t6}Iw7h|(fH{Q+YzmfZwW<0j$T(D@dm3-`Xc5MGJT z&L9ixKZ)%jkGNI8_Qc#dMSJF|%&LW+UF*(VHZSCoA6$Gk3t()dc_0jCQ0~dCsG9Za(diNvO zWX^00f6?MW((@wzWrLAwc^9mM#?H=ddh*pe9wursC7c%;%#>U|N{QfB%&AxU)qVZlh7e{UQ`ZZ z$;yWzRj5?e&Xe{}2sz!^WutdVSMl@s-erXYek{HtWuWOC6L)U8N(9chMq$xs9`_}MVMqO1e(v)g+qN0v#Tm;Ft@y0U3 z&x?V68C!PoGJO5%lrfY86N7Y=-_1U7vUh5dG6r=L#6s@wz{CSW$o^xtdlIM&YP??h zHS&+#Mhx{RZ?@3U6V=uWGLL5|0XVMo&pG_>8yyzJ9zOhN`IPjkOxnSNA;Ab!-{*CD z00okLduvT?%7gjkd2fNCK%zE2=m*pX#0P{YwNcB|kg=RpZ<$N|f)nl+icJb-#e5~> zxI}^-#`PEsW|4c{ixlaP5*|~U0oRU8T63z<*^~=kXAi0K4caSe{?lSPOJoYQpIi6q zi3v6(gHb@@J$*Jw!kzP0IGg0hE;_|??vW$8RuSF1G!p)xZzhQy^9P2}3BE51b4`r( zP2MR*Y|iS1*bHMALUM>2HDx>hDI%K28nk%FkcZ@Q`BKw!rfd##n=$Y~(NMadI^XR~ zlkR#iFOU38=5=ZR%0ad6m0`W6q;2B84^?^g$o>dsC3lkrqrTWL&F9kd5jhzrI5+;u z8d-D!ru2^^n5;&tit-ETC#dZyJ0+>{2wrmHh}knGf9dInUrjlKJh|>9Pec*(54^fl zLd{RyR2dHFmL!7Lh}lw1ImAOReia!B(GX)f6lQO>XYr!um}_ z9)9s`&N@MlpLI49h3eb55$HFs0xwcaU_;F@Stq zP}y6GZ0JtO1K}30!=rOWMQI6eUVTk3ux%3hA?}y}!Hddlda_Yp7iAH`@`{Qapu^B% z(N<5jSt0Dn3sy^uQfO({Zj#a3<9lQnk7l1PmA3PxzO5dHk)cUYu1GD}NI9f%f|D)p z&$t!Sl+M-`&Y6OAx^%YPXI<=UHdF^5H`j(biG8`n>aS|c%LLTlPSy=ziv!?SY94*>7lkPciry9~EFl{hU!Rjzvt`K;oNwtUP4rTCw>huZDw3=T6#W6qMno^*LG`djsq?q&1a9kYkK4h(l=rskW(bCiree+jHS;tl*p~jJd++;Mf4>fq;9@Gyd_p&u( z?wl~0oGQ=dZt)%$gTdDLf9(}}0Vwq@2DY=+nk+h`W2$S}K|DLSrW!uXGyaad){F2N zQC5)9J>;Gr*3m|Z@*~cUlICKOW;!FP>rDQX#PmAT;zVgsNl)-ok&fEclk#S)HeH|r zl=30#krw)F;`bDqHm{?;ncubKQ^2Vsiy1C_GFG{ijHI{gUa#rI)DRsL36<;l%Dz#_ z-j(cE%WE<`@IV^cz)AXJr^ll8AhU@V#q#2M+;FKM0>S7e-cX2ceJ%>iB`nGH2drat zP+hEH#-T;ajkyj|(orwOa`|J8r9$-tH#p|Fp26~nIMj?nXeV_E$dO^S4$S4_JZHP^ zd8*Zqa74M#!HA%AQp3%!5y*9Tr6LH_~P3);!K+Zc`Q+Jq;#E1>L9#xUC? zHtH7)1`d)UTXkk?0x?jZOtI(b{f*I~BP6H~UKx$$Zk}YZD z(is-|z`_Q^j+W+>)1}Ws&4b~r4=2W5#w9{tqX{CT!B@ZvUJu0d%J$a;{uH|k(KVRJToXpc4Z5qxeQAhlzczkApn1=(yS}jL>VM~@TsR`NGIb z$at|$Hh=}*s-n97W(Gk*Vc2biEtRO0Ma2gvte715@6z$asaKe>i*}KU?w4JBgf2cJ z=i;W<$$ybL#(t-Dq9qe#Xkompz(BE{<;+KeiN&8?ka*ASVRorv2u}%3U%Ebb3fts} zk&?o_XdSJnvnF&|o>`y_J^kF#j&qg}k{}qyXzRBi{iaH#A%L5XLRS$p&rkRhEz=@pya(08_SjiI^;V!ZRLih4o z>E>tCI>wTSwI&&&(en&7?CRVw3_vNs&Ln{&BGo^OFf;*Ha|!N*v+c4K&L)N&4^V&8 ziYC_eQ6iay=gwzArX}6&3|ROxcU5mM325)8a#AD|mCwtZH4NGp#``6XOqHu~AzZxFl(fx#U@2PTxCmIxi}~Kq%Kbq6 zg_3sB*zj!gTIfv^ckA}q0gt9GM{#rTUg4IpQ;NSmfDpo@k*z#bCbqt{JR7B^#11s< z8Tcp6XC}q2*bDs+u+dIq_fnP>w=5jwxtB--n@w<2U7aTw-Ih!)4u z5`H$lCv@m4>=kw3?a?Jdd`6%qx-@JCqqCi+&%8- z%t(caS29{2_8I@(^{8R|%uDL!oqOclA;R(R#aJrT#i+K6 zC9)9QiGSbw3@h&Hv)t0Q{lh_2mbiS8LvQ-5UE!pG1m+xw-ZI%o`%P-WG(ay+jM zv9@1p1?ySx=`%DSbP1P$yl*(`hMxa@!OmJ=FVJ+3O;imuo|9mJFYacuw&m%R?^*82 z%YPs}iM4xJ>6tj-pPxUD=5CIB>Jv23>p1wFXZcec2q)H)CEI)9kG7_VSh@5wRUXfp z6`n~>h|S+g=68*ke+guwzE^|r!0fb+%8p{RS7l4hKEwLCJ|YHB{ZhyT6lPoQ`yPcS z-iBSPels~%xC=9i^;?uG(r6y9TUx$U%z`wuw8~n>X>SW;Y?pss`BWE0uZ9#iw*K~$ z{tdcxCrlayNvn_kZVr)5a-Uq0d-g7>TmB>`;A&vh*Vf(sCM8o_ltrFpo&OR)TZD7R zm#|bNp76`?B9ZQC$$n)&^}=Ukh5H^>9i$KWn9vmlJYlpkgq1Tin!BHq$#wCEdt+t! z&&_pge{FM;@hE-K41{!Q*imJtp@T8@zE0`&|KUTJHsBx@thAllY>Fj)_7Gqe^bxx; z%CgthSg;fP6Mlwa*KWa4WTW3U-W`lMr#+YwCl7g7pS3tTQ3|_eIxdozG%zKDfl@m!WiP{i74;b zK3>YSfyxUCuXO9G7ooE5HJe_0ET>9P1_Y1&s&Kp+abL;Yz9Z87^D|DiPk%#Uvf!(i zl_p`uYQDU5?G>im#1=2-T!=%voO9Yc^^93LpT#V6dB!97w?lJ1sfMB8lRUAgcR2oe z(C!1fyfXcwK4G}-nb?7B;je$@9{uf9^k*W&(fw(a2`S~POlWIFzI+tBO!Jrj02*l% zb-hC&T#^-HgQc2AO?8vfr^Y%Kh3@obS@WetZ1A|}W_Y+U#?#8W%kFeL@6}Ik9HsLt zVrALVn!CmK;?}5GJct>!pOgj-V~y<@x>N9_3(()s>TpQdMD0&aBcw89JEJ19KO~q; zAb)-!n$xxKO_L*$AQ8Xn^^$CZsDpC)jCMeittuON9Ctt4d;;#bh_VZX+>g-v41muPm(nvkCZPI= zuKOFaz+1~B-_lJRt?TH$#W@5A$6l3TnX7y?-u;T8o^n(Jf)RNGXXtw!c+X2i^PT{7 zXp#ypF61Gx@?IKn5l?SUJAQ}>}c;MD= z@%4W@G&_7&?YO~TqLu;b8dOYO%x1}hJF(4WO9PjfVE7W0Pb+v zzRM@~Vz?p#dKIX-0MI3)r|htowmMETE9_A zrLWEPrM;*t|824__~%oojRg*goTb^^1a<77$MgIi*2IQwy<`XjWfIX?VGy&&Cl{H% zHVlGVY%ADbD%YDK&#$C80FSB-z@;a%4N&zi{Q_r5kNSUCUWIQ?RSOZyG+F(sxpFw~ zPC>60)(PjMg=|I3qDTZRzXDe)V$AM`r}glZ3>DIK88CbL;82`=;b#W#DU1Ui7Hg@x z6GBb!6fa~cB)1by3O!_v|D=VKIY+lHQ1r!>3c}N7C*Edi^)vQbSBct)@?Rk}*vqU>4cBO_-m*$cl;d8OD}7Z$gip4c~B( zdRvY^!|f33J2UuIYGFQpq`#3R4Yhm``DQ{bntC-G7Kt94^o01AT9k+N-NzpP?@DxU zjr5W9Uls+w-D2z)1>>h7O>T6|Y+_^xB%Izl-cR`&@Jp)*F>*MUX%}6t=LM`WLw^5O zlSnV=WzkH$r1m>vV81e@!mn4FZVLTn<5;2FFjpdRUdm9Bv70@rCYqDWrqEMx<@qNfU(76P-C*a#_01T zFTxj3tmJ(=oXk?0VBb+nNUcp^AA7sOEI zTDoSZSw4C>a*|YF#-HL4*(4X-^};lDM$bbwrciK{iKbU`nu~FUt6=KnQy|?Bn>=zw zWTOHu84J8Lr*pj&TjC{piz=ctqMT!9Wrxjutf6~jBebAsLRZCdi*zyQdKAgl@z1e^ zwSZ|3YA%&4`9dR>yBs~PE9S@w26NCn->KM@__idA-!6sGG>|TJYamD_MGQT^ zD*5v&#>Weel6Olj^f3cc5`7Gu6EzMh-A(ccAM5->&X;Qooq~BH%&&>EEO0_PUwN-5 z-sc7RuL%U~tFs}ii9>qD8yfEw3r|XIolZS2m(D?gTSxNDf%&1v@jh+sJpG4bM{EB9 z@LnjIr~fZIEHv6P`L@VGXc%H;r9V_cvsYgqWg3Eq?jcb-qWb(Rd(KDYP~!f3)1Nf4 zjaWy$sGwqo9tvVVCN-?jGWou8_hPV_H8MYINci#FuqnfFCvdb;6vw%OwyqlO@jL}n zT*3~Ut!~!|f!$VmWQ$;-Xp5S5BY5@|u+T}hl>{CX!zx!o8?+m9;bzi0_D^K9z4Jc+ z0R0ttiUyg;Og-1Al^5t(g1dCGYvydl!N4FXF|B}D2=c>q^aH#NgvQPK@^!(qr|mgc zfLo;J?CXH-<&Rn&U@vMjt@{olD~H>lx}k@8l;GLn#$3NCX_RY6Q^Y8jMxT@YYmVe3 zO~t1RZ_VOK32?z`0#E7Z5M#I=9&uf0@%?z> ze*lcMqrkV$DRx~jf}^B%b<%Wi1~^~^#auD&`(e1K>tAQ5M>i#d!eR3HxnBn9)@%hU zpNfX@VT%zq22`$jY45>8gozP;f=f#Nfcbuorr2m&5ot1HkBet$Z{6CAwH> zjqV9eA?>5z+fjAh@l(0SNFUe2eOv;D06E~<=D@q5S(oAn? zqlL4xoBFs!+ohmZ19IW3cXfCv1eNVme|Gd!Gqa8NT;VrV0hPJs7isc3ySGhpdT08I z_9vrtSDr?NqGIl6inq8K9091gtTGA}=5E5b@e+6+)eKUQm}-YRy*wx%N@Hsc&hVxl zne6|4%GZmJ7gV)CM9w3k#P!2LhxV14nkRm{2gX}5HecTX#D=z0*pdDRcuE~<;5E%- zp&dQ41C&6hU%;eLTMm?lT-W0(`v1Ur)LOK%ZjktjEJdBgPh1uH{CZD@^tU0J-XMurg%&IhS2~$1B7I{#JO2eR8D!PL$R%7&oRL7@3XK zK!h8vMS&Vn9|v)k=mmyA0OS+w3AQ&u%)geYjz>XXOH;=X)~>NBh}8$6|1!rNFW5c*&Ivs5gslwp+~NPw%L@2Sc%! z2l_!hZ(~(B*sAC&vApT%#N9PIro7$ObpEizkHLYbHk~hx6mF$R!R?OcV{Y8+X05MA&Ny z{`^423mbBdTb?H`}VD57|VbiH+jUK1E2w`miSnWliKdbLgMLU-AjXT~drL)A-)6N(Mp?47vuaA46WtO7P7gZw=Fz}2}v7E>*? z5=$;;yGb+k9f^naq*abCUn^?WW%r$HgS4#akT4FtCZ~g>-r?;j$$GV+^cXczzTO0m z-owP6L5{3yj1!=)g$o4eq? zmxj#%pSN2t&DA0=2a7$u55P&M^3AkC?((;cj6sxTjkE+1`WFP)?S7AhXfKrS*;04E zDN^nx%{=Z`V@)cwGzI)FKS+Mwp}{-^v-`Rzr2OV6$d)a5voDpGytj~DV`Z%MV?;9$ zS}P$l)p)F}*V$f6aNM{-8fbCa1tW`JGClj_papJ7hrGI{a~m#HAa8IBBQq_~c9DLD z()iz>Bnq*CpjsX~4m`=|pHZ<%>C(>^Y zj?7hGQ9ig6^XBc((O=~nM%F$AD`EOl6;6eyI?zZIQ{ahI z7$Url_i-5FKIs{{VVwXLynhdzHHIE9Pp=0z5f7NaeJA0XVYeLyJInJ^r-d?wS6hFX})UOQV(ST^Lgjn#zc~)!XDsWT~(?6pJMH7{Je# zov%cT)h4$X(L>Krn@#sNqHz=9MUkSYFi)xlPrK)_S`F=cQ`^X{LFqtyKN%Dezw%P3 zTgMA`bUS4%y(YFdWSOwRT)^!7$rfdv7V~+=PLu#S=o8r`<4krMrn8<+>Oen5B5?iG z1XWCHT=FKi|H>F4+cAesTo(f4qtQ_eIK zl?`j9VpP5M8Ga^J@Z)wgD2qfSTgzDL$mJa%lF2BlJ1%XiHDOR)D0E1)OsLU?;a$TC z7QOCI+x$EoIAl7(F!k4;D!tf-S9W20`Ye&Lbh_7 zq&NMZNe&N3Q*o3AB$(o(-MUemA0DOwdTZ8u?-gkXg5bMtIH=tY)ULBX$;7w?Ci3M` z^g#Oe!f69KM&%r&TG(y~_N9=d{2wy(zlTR%`ToN?-@-aA4ZX#+Wmdaq1gb%AVGSGT z6zCrN1Tt8i5S>y1Rqt@ZaC5b}19^gNNS}a+Mpve!CH|;hugk))+)Toc2P_(i1dW#^ zF)VFbITHHLCLFd>>R0eTgmGk-U@gwMcj|V^9)sLF|f{G}w2>URiiYNpB)BCef z)h#r|Epp&H0%JkRWBwK9I>(5wHCIS(f~0Yrgi(NV-1?e^Lf5kqb9KH${{Ss8IX-}Y z2&^Wznk`OMxaBs2ta=<*^o}0`7<@?nTE*rvttR5Ehu)R6kIYV4jxpDo#B=1##w5YU z;>vJ&>spr=61M1F$7N1LB|6kt|-HrO_KeNGZd2yjhu|~4M0=6;eXx#0QJ?6GUH){ zACtZYY7rcx=GaKjekysM=De5Ko#Jw;sGxu|yS*0ZCz)B>3%6+=wV;c>7h(Z1u6BZR zO}ATlG310&Ily8`$E9Ycb68UMtY=;O=t&jgzBc{NzrGNwgxi8RKT7$0 z7sP!oX&G{Ab8hPbVY~BJ^nGq^E>AK7S|&dhm6 zwCJfvQ|0MZlwZ1g){)|S4NDP*DK0Wp@N24!t9$0YUyA+hU1x_XIM%L<;^&|Gu3x`p`o8RBZ`Hi9%lS=R#CUEcCzwEBhr+BY7@4IE>@5+QGt_H3adGd ziOZby;-~WGj%Sn1+>MQ~fYPUck%=@;(zgU{aU^{wJ^x2B~jJcwKOWu2UK$-?{8 zGO0-Sbw5yll?xrxpDA%FHzo0c2hyThk`yuQS0taG7$kP$x@o%{RW)ZjsOUDDurm+c zP6uJZuQj`|n#x9wP)5>5dsn($LXj#m$fu4#Jm#=(EUm8ZvQ7?23w*sKKafRPG?BuS#Vo>rIL5cK#d~ zwsW^RZgX832#y@!aCrLHJD@R+&72N1$?aZ~;NJpke-JKq*ngwxgK(Eoak?WDO351GGR#gks-uO6G?~an1?HuD7LTLbwdSF5pC$;t^JA$b z@Txnp;8)b2555T5X>r?YJ~GCksO$_aKjqd2KEwTE0fCmt^cCtJ3-AYryc1$Aw3q>% z)kODBH%~EM?A>wb1_10Ux&T+@xz`g`#jj^e-H+nW+Bj!{@cDhQIFEXxTRoSd5LEyG zxC5{l6(WpM_32COE9G^2pLUW%sWL0h{v&)&@F&7w55I~&AZqYw8gOEY@{(Rz0+#aP zjX{!G2vR~Kvm72VUy9!sf8e4&6+A`bn;lExu#dyq9*K7&+uM@Tok~1~{{T%5wOTe& zkcz5Pci!9s52VOAPcqIqrG$sNzSG+My?-vJhnsN@6E8_ue)H(J^*?*4uZ6#6zuSAj z9~rc}9TUb^c77W1w33Tg)18_-Tgh7!BXU? zIRlFQEd8cGX1y2ojquDmMgFaOqv{p~p2F%09qpBvjlz3f>lDD_sl%`Euo(yW@A$Lu zQ{oT7KM^;FynCg|V$4ajxyrr0++!GcU0H(;Mn-a{h5(BF8^esJCdE{8#lCvWXg#gp z``@Q-r^NB5WtrjEE*)~ye`#<300a8e{eb2lSy_5%jO*DmG}`IyM+|AmES2zWn{0{{U}a8hj;&KMDLm zj&BtBZ_JHinpxc6Y4Pxf%XShOVpJIs5!(vN39na+JTuke@{PK~{{WGHqxn9EpToR! zWU%><%3tUHXYbbs@T0Fv;(QVD55eCNbXfdD;GJ66S<|E#^X+9>0=WCgz!Fv0xIHVh zQ=V(_nv~^EPBT_(x%z!6!lXIi?w?dU8OODJANxvv!u|sI#cel;JQW?cg}hd~r1Rj1 z7T3&n4Yv`g0dN%UWB~4BTX#eEkgqNHsqvfP-@%^`zli*QtHY;3f*^ELle+%C z^ZD~IqlblgM+q)y-$!C9QP#ej{h$8;Wj~1j0JH|HZ8n>0Hk09hF~!cmaS{745#?DS zQpjhMcguu3RD->SFn>_IYySWQ*!WGWcoM@w@MZnpkFRNBLYlq6S=#xeAfL9Ph{J#e z6U*7NIRNqxhnsQMRhQR}9=w~A-rDl|{{X3>7e%MpG~<(ltC*gYkRn( zZN7E9ply~jka93rIA9wfd7_4m!^TRokW{Wg1RR10=t1dNXM8t+;tC$sz1O=nzt6hz zG|Kb5R!>&ZS}o%L0IkpK{DA9UF8xFHZs+@B!fS{ZRFz%t^yJUV%*mIyg*gREw4h)s zVM3qUZv^=B!T$gev@3rT_$OAi)->CW65`T9B8LDHj5uU<$lQzrDd2%$g=RS}X~Wp1 zS6JEmvs*te^GyA70h(p`<{80SORQe&`tFbBljE1{@9`7%e%BjL@twJSGHZz;)3t;F zZLfr2Hc)S7M%qCr*dv@1f@|ErW-r)}#lIZIuZ(weGsU66I9OtRq@{R&FX;QD&%POaF7PkH zPYpxiKLP1b>6Q^Hh6pn(&Z8UVkmaOua!F&&59v=0?V@+{gV#Nnlp zP!{uToQ3n_DhXf!c*r4!HnV+cg>}OWgX`Y1%QHO7hjEhST(a6xdq3-=MPl%oetm>i zmE+N_kNW7(3HK(6GK=yAbd5`X@vx__it!nx3=F50K}vs$UDE3Oj4~>CDwxPe zA1hP*-)H1~vEt9zN8$Ff@IJ%gK8>nh+g|9eadoHabIl}+_PdSbnXSVJjCo)-qNxEv z2PBjDpYfC6XT(o{ek6~>e-LcqiLVTjSY2DlC4%-C$OcIN0ElC73>X3l?m4gO55s;V z@b`;!du@Be8jY^2rrs|2F0AEQp^ySW+UQ6)AcN>Z2C}?w@bkfc5%d}SHREj_*I3i1 zQVdryhT2FYayQlGDN(dnS_0WJl4B@heyKP!D$zpMe^X<_eg5R}Y>__8&hZnlf z#9swX{k!2HX1}}hWX1NLfjr4tV&@XZtQhZ8xG7LnoV9*CbbiARb4 z9_o^5moMd|(kKlL+%dRu6v)7aQb$|{0Gtt%U%Y>_KkcF5kB-pUcw16O(R^yq;!Qq9 zEe+el8F{6)kwF_kMs1IRtAUn0*Yp1X;;#sJr@-3W{u}Y8mvf`)wxoHsmq=1Y3Z}MVM+6W-!K`R*ZfxByY~^?+jDnIz#a&3~K^*43`#s@2w>+S#!P;$mvtNbRRsAM> zj!(oGyrQd(uNAys^|}3bBW@J>{AvA`zu=&^)|VFk3HbYVl35iI+IWbeSA?hvk=>+G zw)JoM?r_V=kgz#l*c}&J(tJUtTI*UJ#EbsY{)6*8Zdm<*h8QIu?gSPyKGnsXArjcQ;jvqv6ZmOW|8 z$lM;ZM;YA3&OzzzNbr^!iyMK-`U(wOGqobls~+AI=hm_a^9PorX#+K@dn3-jAS;eI zKi0EEY=utju+CWLrEg7XbCRbwQ#%cBDu(%k4s%pyVkLdO3F5S_qIrO52$TcK2c=ZF zj6|r+r!V<&U6DyGPHEqvog=Zr`W)t*9OIrbO_&nFhfb6r^CR8$7^_a^Egk;=!#0op z6HPVb{{WuK0rw`qO$Xf{L0^~O4p=vh^r*WD)c!`lP)FS#L0_HmxBJB}Mt+&VpTMi? zg{rrzt!pB%$SP{pR=fWIk7)<{CcZv9qwXp@L9@z|{p89+mDor*85tGuf5!sNQ zr58M7+P>>WDwzjp$?eyg_&ei!gX7zb9Q=a5mnyTU?b!U=F_-Szea|9(w90dJrq|G; z$3aCDPytE-G-ABh#orS&zYYnktpK^c0H)vIMm@>(6%(3@xurT&o3v5V_?N{RXND|g zxV)5k>MC+Ad1sY32_rf2y*iiyCk1#{KVCDi~VZt z?(gI-%yY>mzVQ8m5;;6e;#k~dG#2BIy;{7yMj^{>k4~Nu!F$`Er^y&dRh7(**?1qV zO*FAV3z=|Ciat;|R{GQ;AjBj}(W>Np-F~$U_D|)K(&c;c)A6n(lX{!ItaFVX?DyuP z@|raRac)WHwra9TbqhRmg>WAcx}Uy>x%4@!LVU{66k)gck3;mYUN1xDZqUb#rH=%3 z^{+Shqa=gCTF4|hTXKvSouHA{y&8DaDBZzr}3jWoXb1TZbt-`9Q$UXl6DqYDc6k-5C2>fapWQyHDOiCQ!`x@wp zymmG`8dXMqE4Pyk|aUzz?5n|F=0LAU;2t^KQuM z#<{BdwWV(&>o46x{$-yI!yD_itso3Uz^+Di{SRvTYQA&O;tQ-=mE-q|Uq)BXXKwR_ z`yXGy-vw9ZzaXNDE5ts~D58o0|J3*BEWDQlq-;OB2~ql*lI5i(!5&;ufwg-K^}(WN zbMrV1zKNgfQJYqgl)JjbdaLnY)T`drpUG7t_EDPx8D+_Dkb_nJdv_%pYi`YHPN@?F z{ts%#mv(veJl9+*X=92|u!bdOCQOjWClzW~t(wovgcZXEJC1)ULimg3>OtG}6-p+Q z@&HyH@sLNRDxRhjX>L}LSPXh_D#9%A!z>_@8w3pZsljDC*ajn&wKrFkTgCEiNyDmWkHQ((5bw2(EFh0X}vF^{J;Bp6AIP1KG^ z@0yyyRv6mZ!wt?`Ipg!Fe(7or>NX@}zB`DCex#wn{OhB#M0l1nBkq2tPH;Vl;gF^NErlIuKYTUP0h*c zwFc8(UvC7Yw62?mFmsNT>Hh!@C57+qA&ng{gFCt&dsolq@BN-f(`5;LVkfRe8#0~j zNzWOt4e{Rg*7u0<7+eNDN%~j3yLntH7SE?@@SliMV_n#Lp#5v+@DB6P_H&t9o~IS( z1yC?DPBT$W^8AKC`Sk5cuy-{Xfn#4dbm?D0qU~mUg0+f=aBLoxVH^%KQ8r2G)|D0# zu5pK62iBJ=DOx(6c7x&>w2PE!T1SMCHU=Noy}L}-Y_&DrZ29g~k&$0B$s)3eY?(_7jP$Wn^{a5PzL;Vz6G(^CQy2W-^A7)gM#mAW>d_;m;aDeF{OSowpq~Z~?CE zrbGaf!LJ6k9(3NQ_bFF}Plf6uw>xoAPlZs#g(L2h&=d5lK&uSmp>QPa#bTMGNh2{O zo)i1_-=9uKPo+mCqB?n*DgnnF9C}uqjT#{1*i~t_$s0!i>H+*~YJJS6uB>7;vq%WH z2jx8E)mKG$oM3Mm0GifhlOa_=KgW}UjCRL-)_Rpdog1 zpO}-|+M{S7ak=E%=xXGxEC>A}J&H&1cgo>MJ5CA|;4fOMz9$ zeZ@~fR%0Rp?=O7fqq~fql0;4l@+t;2)98|*UX{Yz8mCgf>MS56#Ol6>_4pScnn9`~`MgvJBoFr(anUA0YlU)~t z{3qg{AL|XF_)-|;k}hs8X2`dNtOdv00Np{{U@&hra=&Zw}ZY@h6I& z0Ww%yKHGRh5)|BA0Lq6e_W)TMuT%RHy8kNPtvO(+H1G)7moZ%@YlswJ~Z%@(Lt(d z>{Wun=6Ef?cs^53i~)ty{So4)UNWAsz@ zRsDeVUlYls{7m?@D74VsW4yA_;&~G4C=w*w)zhVuh12m?9&vPIa6xy}F? zKGpDl?92Oacq`*NTlizevP0qj02vtfJ+zFmSzMnnV?k{!kCu``;}IX5t}+FE;toJH z{J+K+{MQqXf7-F-lG$5Cyu14KKYHPOE?-9ozq8%b-SuzJ{DrOy;vd6b1^iU- zT#{4`;wDz)1W>p*zyJ#UqlG+5Q&Ga_8|909l}dvYi)NRwzQ2= zNMe)T@17lv{^YE2O{4 zo{9TVe%!tj_$tzPL&7kB!b9Sln>iXcf)m3AR;0aK0 zr{@4EbNeHwYMOSdX%+UFaTV3H>%J+WaU^cJ##NM%MnM()h>$ap*1nne3Hw+4R`?S= zj+3NnO?TmFZv6Y58g)mBqjEgeg_(CkSPcB6F6^9Sl7AuM-wmUP=Edc%)7{6T^z8h$ z^gl)6&NX;wsu=6aJsP&2zDM*k@lWA5f&MV~W={+F>rIbQwIs<2a_aC*0Sh8DY>o=& zI1B@HJXhe??HT(R{6P2-yTlf&}#2#FW0&J!TbsRoxUM_fAJyF zJShj*^y_%U_qx5(vf9BBV37a+Bo&U z6|6g2=&)@+Pm)#2v$CJxI;#fq)PQ&l%U>FP(to$#jQ%8CYCaD5T_b2-54nNuq_}95 zYB$i#in1hyi-=b{fb#IgTa%m-`QL;9D8K`tudv}C4BVV?d8+>aw?FwV-iznh`EDZP znAzd7=dbgBm-(G9i+pF|&xk%Cy76C)^-1-8Uh@K5n}*3MoGPk}Dk6@EfG8x8IW>W6 zWp#IR9i@~K+|Ma0kwT_P9D$M*R1!`Ga0PnL!OwyJ01&=8c!BgE0a%HAg)MP=bG|z$ z2;?M_DmYhes=Y|&zj(d?e#l=6{s-IGYgYCPsrb4ZUnb{SxMp?|NEgogJZwrttXBZ2 z-M1q+CqHk*JYRsw=`1}pd99-!p1qgie7;M=ILz9ksV8Xllm4}5$Dgs^>@TTlT9%XX zi{j*x+f5Qff1_w{O7P7DtT#n9)UoYg%A`1nSYgg2#@0stc0d(C0|0P6F-Sn*_xjhs zpS1Vw%i%wS3#oV`z~ObD8bXMNAT^DXN)Ak}8|RS+K3MKXXRxuo_> zXs^+EpRUt}2P469x62jxxBh3=UlhJAcoX3FhORtS<>Tqf?(w@C*^ck*Zd{VuxeF^Xb z_LumT`#$R;e+^x=?uBu5E;S7f;m_HwzFmZ?VT=;(#vKPcS9-Fj75m}v6ZVz(d-3-` zj{g9|cLvu}i9o*6?pP(oyrg7+GyB!R$aKiZTpWS?;P?aK?~6Vrc$&|`ehky))HMs) zQZ*5y7~plnvAd9`%`{ z=sGutY$DP;KVfU7*ukGF*3J~Sfj1IP*y9SLB$9AC*1GU3=YNlXwBN&T+4E1flg0DP zaj9w^V6)R~!pU;`5VrRPhnE&WUF3B*0N3L994;djRyb@lT%|3N>VBh$uSX9DTDwUu zuc7of^Vh2X0Kr5*XsNtsCyBJ{>pNX2O}ZTq?9sibfMfS>Azw9%W0&2KNya#@ z*5BFN_OtkH`$6fZ4JLo=zY*G`X7fh7brXhUEX{6mJotfD1(aoR^C$!p^cl|u=6P); zTD)%_yt>=h`my5WoK=9v)O4x4HQ!76A5!4ajB`q-H0t?FpGbJ$#y^2y4}LiKaA-a^ z)7E#lSe}CiZXQLn z)~#Y9AQR>baDcJ5kg4dzsoL9FuuXqU6`QH)I&QOLai!|k(Oy~I#?ahb&Vop#jm8;W zSP(%SL9egjjxxq%)h!Qto8B+dK@KWIDI zZqWY#!ad=uLZahSyM(hyWCWtnhz-eiHmB@SnlI2HN;9!O%$!lm(?= z^UWxEr+wij^RSlhsh z_KSmUA(AOBowpXr+qxipv$O&ZPJYq-m49#j6XFNiyb1A|crZuFqPw+k$jb@FId&ry@UWMXli9QM>Y70&gkb6! zNFa=Y4nYQ;Ulx5?=l(tTYvE6b`f}KKn%B&DL;afOGqQVTZ~zOiuoNbH_Id~8^b&-Qdp&3uN#i4!1N&3 z?3pfqhsObKn7q2HjcEJ5A4Xn4RdVK$1a71-%APr@=_@q8Q+EXZ6=@I% z{I&iOUh2P7#L=TZ;Ib8jAVgD@E6#qkTIyjE22vHhd)2#;`BnprRar@wYK6!Gt{a%m z88Ddww!`<1TO+zl^E_fQKvraEA;QC z2cQ-CoACP5Rq>XRE(X#r5%KgnuhS56Pp4}9^Nl3_u}jgPqi`x}m0s6E)_$f{Z02bO zR3Xi1ReP|8+EF0Nl{ppiPq(t=U$c{_Ec(w?-H@1w|S!BN~z4W+x? zT)L!DjI%Mo6`QMT+E$4fZ9-tI6tGPE&0jD0ui_r5;slb*%-LupBr1W(9Xab<^()3Y z9W*e~sj54#AN)<83E{Eu^raVRGD@s6+4S_qcooIemlC}4F=unRn4FSNy+bU5L%1HC zQwXm*rDoozuS$$*uQE_c=D$FHV0PW&UlaL%d|NO0?VS^CqxBXyR|)5?;1435c%a+Nv2eQ&dbY8n4a1?TH<7Whx{r8JRE1&J z@veAlGYLZaqlfZ@ep12}QAyl6B!W1{zG^~|xyuZm)RC?b0&{`ZpNtV++;%=zHoFIp za30mdd`*N&;2mBk83qsPE6{6#_?%gM39GXI01O15pdz(^;(mDjj7oOw%78wVN?43e zNAM{G4z;(uNCa|&gU1IL&1Jd8%iEzWsouIGZ5@q21q}G(Lu?=T{NLPBI<4w^Iu5U&Sy{es@;#T;Qs*ORP_odqP%15f{G}h5C7Hr zFv)HL+O*8_s3Y!UpyMX1PQ|VKtA~)5IZ`@|Qcvbe-!KulKQSHss@g_nK^@X5QTKMA z>(;-hoRUA2n!9Mhf3x&?Bn3ab2eSNB-l1U%QmGE#t z4bwGXd6vH?+Dvf+g#c%h>0Lb97<)H;OzCk9j~P?bwOxWMhya=6A$FV-z%=HL1uh>qM(w_3zjJIY}#;0Jq=m1w-BVt8H%S&p0$-p7fvqvqE9_> z8je{{Vb?Y4zYftCP?(naigf&I$

    1*>=a5&H={)y-VSw4<4g%(qQgPf)0Dvi<^JD zNcXavePT8}s=No{7*^Mt{gf5-2HrDX1Mw6H^}>6)3i;f#bbA@XUd;3IRoexzibrN# zBy#Ul)C2tLv#Bq^BAp`$wv{95E9lRUO|@d_+O7{}s*(V*GvMuGoa2gx-HtHS(iz7j zk@cWum)R<)g+XOMdp^F@1vc`M!zxD=3}j*VE^=x#Zf zo$P=p>CS35&eNU_Gmoua3=bWtlbFzM+!T@Cxnk_>&bFPdWR^fT79jd{J0vPJXHseqS8t(`ZpbC) zU_D9bIl%pE)Vw3`iuc8+x0>?3o|Q5NT#d8Nt^L8(6p!L=PhpO0)_e`{drr|=bw3d7 zzq~7f2^;O=KAIlUw6~O;x2Pz9v$XpERA>`aL+Ltb^W^xB!#d zqmoGCLnMq9R2E`-06Lod(fy_W0N|khANYGH@V~*aW8!^X<9|O=^SsL&Cjz|Gvv|qzNfZ8y=@tyu} z?jvWi(yhmtd3Choexrru*SlBX29Dp*qk-<_9L9SQFUl05x@gG9D@b`=K z%Uwp&<_Vx5Xo5LjA3X{)!qO{wasYCy20;M!uf=$4k8=#NxBEn#>AT%Gt^23k{XaS3 z3??$4HW?^R+pU^ElSX~AEU}zAFl9L9N}Pf{2t5x`U#MTR5BwAtUGSXtz7F`~b8)Na zg&7lED)%~!61E_#R&RV&;rC|UW+5_P z*sA2^$_=u{7a5RkV%hls8Lt}soquG{h@T&|dv{1D(L5~5&o;X}4$!bd0Z0eRB$VfY zkT~Sm>zS5e!};D+e0AJM;sd{yD! z8)z0@DDb8HI-ZrN#IsvnTej%rM)dR`@XC2o2p|webT#CE1%3{AQ{ZQUE&L7PA35z} zcA9DK3Yn*!@x9!lsOPsC0|O@&>UURG_E1T6cPc{+N?u7MV5o|tB$hk?0Ldo5n=uNP zwv=$~xZT?NHo9nj;YM?Gl&d72wbw=SED{G=XPFs&x#qtre`#O%D3*tzoBsd=d=Lq% zY6joSlf)Z=vA2>?bM{~}=gAnsEy4nD2_b;_Q}%QHyS^a&EYwDq@iOxJ#r_()n|#t- z%4gKA2?Q2`NgFPN;IwXgWeDhg#|h!~abE*0Y+#m`QF=-Geu>)W&0;vqE~ATBSxWsb zwoLu_HZ}(H`#{^oZi4LdwlA5t({?kDY}J-zQIo}RpANnycrW6whm*w~1=Vf!Z8@Z9 znrnC55>&?M+;vx6jDuaY_VL_A(aNEbfdmndL9dFbPH>Z_SvbjWbVH}=Bk(Fa*r?A0hoxxu={eZklp^f)$hyK5k8ZT--r(X>lc`>`!kzF44&pidE6j;gx~p5O^ZL zqj(1clT*T~xpeueKZ(DK&0C`U`?K)wFRz-{#VUB{^1)w~Exm8tv0i}J0Ia}yFqMj5)&@e>uC(!zhjbbodB zn)sLbF7NW|w+B(mr-|goJJ0N=@c#hA`kn#%PJh8W65<=b8vF%TH}hRQ)A)7#h;4*Y zoPlI<62;{ck15zIDac?~5BU?TXu8LTbt&|35Ln!4xAv}5dt11g8RB8t<8T=p*nn&L z75$}uZC{6f1a!--A48ho#C|ajU|lW=a+a(BEcZ^lBCbFPGVFK*0f_t+_?z)7#NQXZ zZK-(w0L9M}YI>U?b%i!X8b+ZbjTe#?5kTaUdXtk`cyo-->fx`AkM4fVo|5@@UVoXV z7-90d72I&Lw7R9L{{VsgY-0FR#hxPYKEG$;e+z0C8fK+sAd)MKX&NUO>ZOo^Re>r- zc?636$o-gqZ$AlqP=`w8LpcEnEEIkX z+uB{;+(l)l!uJ<5y2lhzsb_a$096Adlh{}4{{Za4{{RH`pHzoV@&5qFHlDyoaMHu! zrA?AZu{!x}cOz$NKGw`ixhTqat7HzZ5HNWx@P5}Ye6>EyUlV<;)px$#(a*uv^ElSF z4_I|i;r{@D{SVoQ_~H*cc&np>+I-8;^GI{wx0&`BzV zJL5krv62*&I6Qo;q~^YWg%%W8qZ?5aDyZxS0DcwD_`}Bj7Vr*-H;FuBs#@tfWxhPi zi>U-?3E%;qr1k@v{&m4t#8SjUrWWd=dRu#+wPLAJof%WaS;4JuvHaA0ZT*@)Ec^kz zf-f9cMa&jpsJV<4K=>5v0}IF>fU50*qydE2`L*U}%eKkOOfZ;BTd-YWR14d;eD zD$)7Y@u?8%SG@d~jzlIHo0GY~W?U5;STURSrTx0KPm4&t67UY29+jck-bZwo_Zug@ zo_lA^az;LA;!nN7-d$Uqau3$Ovse5SC&J!2SUf55iLHD)ue(0YNq=*uT<&nOMgzM% zQ(>?{$~N4F1-6gfGfdNpR!F9(r9ZURvP+}o>F2$>E}xNLFc}2%R|iqJeU#OceO3Pe zEsxMI1^6GrzYjb=YvFGPX_nenn`nU&3s{mRVh1k400r3cPf~fUFB15lK=5vZed2Eu z-bJTrdUP*6)zsWa9BI^?WRi1*QIf=f0VfsWzaIYpX`hEb05o5=c$RT{t4|n`%SX17 zF?Vu>f|pS2phl$Sn>{)Lc(1{4j-Rxb#Gj9zAiKZu4x=@mi0&5CLbj3C#t4w*H>Pnk zx#gsAa(?Jg03Vz1zXoG-$-48rY4wxq>EH4`??2*PCRI&RhO^m4boKtOeRum={@-_= zE{^Bn_rVK^EVOr2TdxoymfrHSgH zPpLGLMmp4L3X(l5`XeRC@L5hBVQ{gxch$c$@vPG}#AX$Jt|C|Lx+B!S82lpmh4GVG ze-HdS(Wcd|p+%N58B3nUIVDm!i-VKK3FRT&ts-S3Bgw1>g}0EoT|x$(x4;!6lEG`lFTb%^e6BS|!+ zk})A$ub6}>lB_{1xDW}i%zQ`3XOwEHbk{8ojyJo0cGYXsPnq_-6N1WXr#hbXRJBss zdUR*wSM4?X4|w0;wD&$L@M7F}m%(EV=Bk->3jh}`Zq*b9k0T;vP@|)T;J3n~1Y~qI z`o;TOe&11Q_PU?LFM=(;*5)>~zwr=Y=`3;qxoozv7{*nkVd@o6-XD|EoQ`_e+VE~C zIIDn9Heca?X|FZ?KIhGGMjruB6yujl@#?xK*W<^O5EZa~hbDxxucs2b3d>Z|rei?jn(Lc2O5voTm+z`ht_MIqb?(EEi zqoV~Zp@C5%6S>_4WjrK?oS7N7#5x()!~v@ zbqzvz*{p3IHz;F>?q5DoRFXh30gN*eK>SzMb^U+D+JxHAiM1=uUr@L?mhSpzxSh}Y zQaLPt6JK_I)L*d2ihdPe9};{mhW`M>z8ID}i;3Y#Ot*S!!b1@$L5U;SavT8PD`j{D z{(U5YhaC-le*<80S~xXREPT>?$^2h6)B4=-xT71GRK&_SF#`@oVk>vqetyG z?7ja01qji#^-l(Tc)PXHui;C0Y<0b?miLG5xx8{p!znm#GYb(Lixj{I^*YM>%Tlz4 z`%a!an`?;HSZ0O9k;xl?%F4<}Wne)B91&m0Lnv-*?BCh9_RjJD0Kkj1(lv&k#J&@d zDn6faJ2du+ncMazk@iM8JEI^gb;{$P#<;J;ILXTult#_!;nV)z{{SQOz8>SrMXWw$ zs{U@jGx{~8Cjz{?;djO#0eoZd+IXYD8mu~%x5~4|r)>7pvWD`fjQr8K2caKI^yMYf zk;$*cD^jUOqorCcNp706KU1Yz6%v&J)|#-A1ZQFY?q9O_f*H@6Z<(X6h`4Xkd(gf{5J zh@w)*YA*m+>fh~S`vpa>-0MFTz7tI_)h=J{SAHIj1!TFAimSD$K)W3M;*1Dmxr+nE zeqi`B;8(@39eB>;z84V^J=OszxK=&M1Le;Z`|km)gAaxJ z)e4`yOW$(QuJ-cu`_IR?!g1xFsNBSpfB+dJ zlV7JFvnTu$<6iMw$Kub9cGGCuKknwU(h=1!Zb%`sbfp+Y3ojdY33Hr;z%}-_?9uxX zcpKoIxAxzPg|~`)Q97pUi;HxU*3704`Xb7u)wgFdk{FBttImGq3J1_v&T)5%QmxH| z%GILubL#y&eDyt^9N<+=tVTaq(m&=>()7C>A5ON?bjVWL(#|A;2xAyX<3WX09e@KB zcn}UN_Q?2s`#xGWleFvJDw``7lWQV_22*g7>O+wfeE=9|IpIzYKN@X+Z4cTO z-{MrC6Ay`Io=GQpKe6U{4v?@gDj*iB%DYug1O5Ds#FLu&Jd=VmJl2!MQ&x`Y%S-;c z_t^Su(~598eMr%kudTX&U61K#9MNARe$Ah?=fmHR_6w%RAklnN3YMQoxs^o26Pei|Bqr zc&EYsDDVcY9*^T2L2Yv)vA3N*Xf6>>3r66P{{R7OFwY=XWLR!Nuhzd3_*X&kewj9p z;u|P$ZKhJCWl&1zx;IsCLI@(iJ^m?t2k}3{d0STS=$~HDUvYL+&Bc;0PcSbGfqJ+D zk;onY09D91(lw&6_{V8<-GKtMVsEg&roqr0R9fuVFc>6GqNx?^;2;|#|AAfC?(4o^an5<3 zheMW;_tR*3d^Pj=A8%B+X)TfGx&zNZqW*!I`nZqzjN557Ea+;{X%xJPBAY1v$FXOg z5Sip0+C8X6pPa|Z-4kn7ui@KDS)Y~aHxaB-#|z>erk{I;l*{1>*!}%U=~rc!iP4CS zl(!Fjw98MyPJUdEgG`Dm02H1v(%e6w({=8Ao5%^weM%8fWiw^V;9AkZ&0NiyFEx*+ z%0TlyLk$P$oQL{?VvWVYqETCmS*tN;Y&+bOZ!;x{8%Wuxd${u-;Gh`#*RklL7K6O5m8 zzTIDtP-|b-D37#HA+y)Ufw|GuWDRpSa)>klnlF6kxoodrijta1X$!qB{u!^wQx~!| za=>>!wa9piV2Ni4RhV-1zi$j8teoiG+5Zjw3tfD^F5RIB3*EOLGXs-pYkF63nX!~L z-4Y#-Sah~hWtTJ0zwi1|Ax(v?Q1FO~WDmxZ*vt{&z}a1n5uec9e)0BSZMS;*-TWTq zu6&a(NMP);3hxSb;yeLbJQ_OX3NXwwBf{?;x`TpWXUjmoe6ho(Uz%HD zirX%DDRi?gLl_0Vt15RRZ~eh-kRTFg(_**uhaF6aa_w7LV1_E(UgM*Z|ar8 zP{i8d2e6e2^&U1cf?+?v;La5LFFADDjZ5_WHj^J=0-Rf_$aqz767n?#+XcXj2=t2T z)Cz+u7cGd{d7q5i>(a3bzjik!EymIRUkQ8*La5*E`4H?FL??hQJtwQhp`cd{+5N^e z-w50)Qh4znV9NHvc{NDhbaaA6l9{{A*AMdF{JqmrPmjG#OEcF`<2O7ln}$^l8q5Q_ z3~2zmwFjpL1n!+H=_6QI$qTOavWtEg^o!l;3r*!bjSXr-m1fz?EbctRS$9=|H*^!f zU9e*6u}RTzltrNmy9aaYl=Q@xK!ZBzBYm^l3Z^={$iM>SF^ZW(gLMXJU0B}k%)ah% z{MRS3{T|Jo1aH5sytL?{?IhgCO?=?Z$US|RLIh2}PYl0Z7zYLfJ15OuqaG|qvXp9XR5lICb(`@!+K>xEp|`(V z&24H_Do$CRx~88j+|m$EXZbB~TqsWZLv7+maGrF+zN(vF$X{B5;K>YXGth#~P{R*I z44sO5e?HnM1Xk7Ml`npXUyj^8`8N<7Zm>%k8YYmsOUfjg)?T+HUTiAu(yz%Z`Acz1 zx}3KB{nzy0P438Jq(PR-o!wVK0_pc2kbq|S)K{7x^qqYq4!)A7rH)bLv6KWf_kJ=F zbj@-(JL(FKezf1{Kli6=nW@+E_9<_{$&5HVeMY2(h}7{ zG44g{P>l>>UHhL;-w4UhhB>3j0`47LV+P88F#P6@&yuEAfKTwIyIfL}41LvcQkht3 z5BHRk{SC}iCuYd;6>?pl6)8}Wf2>4w#t{49WM%IH4yKC6ep)v4Z$UR()Y z^dy}bL)Y86WEg9tN;?KG<_g7kf*`5k58E!Rk?8a`~$KX>)?T3nj(IvHM-TB2nqR& zwHh;e&^S!VU2ThBJ4AKR{qR#=8J@1`PR%qli{3L)KN~t;{Ai@_=0@G&NJ3HVQAku? zi1%IwKtNl802S={uXEGiIq<2~VH$y^BQ0J}vQ@9<_R&)z*;asFgu04C?)dW#gT>o# z-$NM}$PffDod^XQr2X+gfy5l^%omjL(j|L}h`laEkZRbG@y1{TEe(Z)eA-yXTDu$F zgg<8li@uv9p00XoxRowtt}u@STuZpgtDbc~%G=!Xm;e)_9g&9!ob9OuhW>TQ4$@lu z=cX=IHkH`bt#177R*XntOgkQY|oNBJeD+T&O zH4dsLxvpDZ4z4?&Z7e}<=n?w^6K^;AE#>Q5;ZqU+BBM1Ug)KKi{nXh$i+x(%Q^iSi zAz)33^x=rr?dI*nH9B%TS;5|#p3b5r#T1?)4@^<&2FZLD_VbIur3-s2O@2kJ8bm(` zzBuo_)RP!CcDWN2E^uS)ynCY`955nu_G|yI`v5;~tlu=eEHp2^LVt%uFkuN%+G*P zHEJXcN);Q%B=2&ln9=L-YwUWuHVI#L^7*t`x~}V5#7;iEeHJ=i6A{sabFK6AM_Su| zfK$^2l-&I8o&P;LWy)`BFFC*5QLV}hjqWN9Dh_=#5KI7F)g6(p|EaCV>$J?RN2<(9HD(@DunE zW=a9~0U{@{EE%z#e|E0-d``$AXYJgT4cEU^WZm2$4WPmz_-Gi1`JPbD7fL)0{B$0X zJJrVkO_&F>H~$BSv>#wxGarUB^l=2y=F%8sjG8?n=W*cp{It~zi9+dvR|9Sbppv~n z65IBFa-4E+1u_2^H3R**PL~82fZ6n0K2e`{7z^gmr&;iM=X~qk@!ZUZ!?*W9Advm0 z+BGoS&e*S9Y_37KX=4X`4<}&a+kz~s-Mm10Aqu8+5j4>*Mey?^SH{((w;~vW)vo?* zVFyRDl+MCea+hgjBiJTT0yLH2tqz5s?Dlp4?Fh&3J{zbCy<4ILdBWLyDoff)ku}Zz zbFzOriP%n&uHLTj0-8pIP@FeT;6t&`;Z3fyn*6{5onUC&uK`?F&u&k^h*YJ&U|`yw zZdpu%0mH4i>ugO|37*d7m2n1xug$5m_84jRboEe&`V`b%Rblt zW19){7I-DqP2fbadZ8eWj6?aNy%1%IKM_JPjkxfEq{!-f@OKw_?bIqh?(>~gFIj52 z9o{oQWd#mP1y+eWz&IpfW?6m%W6&N?&y&Cu zW_`teVQg)!tuu9}%r##o_Hh1eP`ulzI|K4?dL?Ut1(1*)3@==;*<%Yz=jdyyQ0DY@ z#Lbs3Db~=IzwE5d)$q}5=4Zxdpg*97?9-{ukcgxTq*~9gIM#ewzEijITS3YED<<0L z%rqg1e1S?8?hC3ORis9mVD}2LL=XLFN?+>a#_IlZW%H;_=cngbeX_iAU*n66QRAqH+#k1diVI8%GCIjN*tmSP zMRb?pA(s>6MuPykAqiGN4MwLf@P(V;dI{6WGE)TqsKQi{af?WW-73^`jWsdxV_vxSTd~+1~9dXBBk>(qpADP{xRW03Hp$fK6ies*-93mLt5_qcXxsU>%WKYW% z>6F-_)gO;>X zD66n+NhUV4z2r=d!wHooNlcH@9V3^9enxVlM1npbPixj3!8E_k_~b>bNXyEP=;GHW zCOs2dDK&%$gn!l&oPTCqICnTXM~Nb7Zv5dW$90y=#;1D6PeUp#zRqZO*IHCoR?xrn ztfW?4Ng7DuLRKZe@_%PRB$27JYbIo1R0 zXJc;*Z5T&j@RJ082~yfLd)pu_F5&Jw8pxmd3d~FhTt|T0ur8rLq<%f$t@OwvhyGGM zYioLaOSnahWVhdQ$A!>6sm1n}D5%OrcS4GB0Y_Jq1J1pJ z9&H$Wm}`H4nnA`E`a+hfw#oW87`sBhOp#e0Hv5eC>9)=mrme4cv*PAhWZ1>pEhc0I zcc*0Y10BGW;9eHI4zf*uIeFNcaFAxzk;ER8nQpGh77dZ@<#Vw(4g>YSk=^^8ILyvx zI$W>Dj|xTeYH`xTl69sl*4IrOeWoopCYTyKUN_XgJ4*eTd@}y}@gGvwZS<6wZW+M| ztfts(?yhTuu@y-8(ero&b$Xrcc3gpEKp9~A3wV%bmRehLmjLqu{50oIjg z%pvQJ>{7r#0KB1zV@ofTBxaa}qElgYsB+}l0rNB5IPt(pY3%6o%BM+^e6^`tf(q97 z`>C4>EC)2SwARH7j@mTuHvkX(^d83rFS&zOFYQ;QFy}=XM;NvH*z!;)3bWG}9q<3d{#z9eak(&(uZMv=`qG zYCr^iKkFI2@Dxp4sJ1d&?-0UHO*CX(JA7&mH#0nSRX`n54`Vv_f0 z=w9?_+i!h3g7KAxZA8S)*N0~8KICQ*Kh=GywRKK(cq?)1lQqDGGGCCWdty`I@TlqQ zrnNXHfJNd2ap$VvN7v+Q=~+Jm%Yb&dp5C#9s7R4c5PuesehnJJa_hFY%DMw-PtQwT zK$d^PO6z9$>7^Ny3X3LJYN@ou#H?-fTaEWSc=Ip@|)Iu>SxtU{*+Pf$X%an3naDZb?H+&&N8FDttZN%otTe_VP)^ zUshEs7)f*>G6u14kz#~soDMGdw<{s>{GbA_ax5_{lx`Bhke4+SfOwG}im$OrW{OGg z{?H$6DmutfX|`Vf7cEJ1V&NEUh|U*k>3+ick^`dMc(#E9ba0E^V7^8+{0AU9yx>Zu zg2_Hu+wnPmBH$-x^iL*0*S~90pL~QRE};Z5-34sOIIjHdu7rP0sar-|pBg$cX2q#! zdG<)`*VpbwhBHGqS$X$w+ItHqu&IS`F=8Mw(sF|T%NSw*dqrmV%0JQ^X~7yCx#;tF zw0cpv8(CJPOgrdg2)&F-Y1+aNRmJJ{TR$u+K9JN;HaCxfA1_A%t<%O?q5cd|800jCN{#I2-Yc_un3#6fmK@OAP1e?!582o_Flfp5pHNEu!#7R~ z20#Zv%bFM>o^oWQ)|OD1wb@hYoV4Ho*emYV6fYh#KMS2qhH?_Dmh7i&V{2=RQOmZY zgOFRV&h2s|G*1424pROhT9a`&dyB6$Kh-Ru%wIeGAk8jYS=T97bSCjNGQ+(rcB-4$ zO{&9o}N%`4R^`XPQ^swr9= z5BGKJQ1`lLLq|IOA$WzeFOgWy|2bc28U2m0F9XDcrjKuL~O||<3MJw4bo2iP>@i#lX=-RKqDI0yI z%n}#!3Hz=uz-Ue>%1dhYz5q!~hdCSm)fDsNobX!uhZkw@bJIQgz=$`gaR-+BNcQ$Y zKdM8dL^+%8yR*QojF9Djfb8!;AdFci(`@TvjpouDoZlrfS>oYaVL4ImL?gPkx+?AO z(2`n`rHV!MuGT-h7Jnn+`H0T4bHfycbL0Dv(#akK#xZa9YXA}txPP6fu)x2x-xncP z4}I8XJsLL<>3-b*3N-)J?B>w}T%(#UCVj1G9m1T0L(!n50;yX3Xihd z))TzNGanMG#2Y8tzwO4W9E^5{lU>pgM1X3r3+_3ZihUBr9i|{}0TIttiRs6VRs+Qx zy{SF{rg%AZQ`%cRje{5v`!~kOe3Zh`*8!OER@b!K&L^zaGm)6OTDQOR3C_&p5w07? zxMcxXzwf%ic20)-`DG7ynfQ%ugT5DiN>gK+=u9CA6WV8c-QyaT)bXu2k$kwCt1sEc zZonO(%7-ODI1Rvx^DOSnT_!>V+q`wO_qCOkC40Anv*-mhOb<7*)`R8ARuYKYEtDpS zQ9MPh%`HiG(VxD_d8%&n9{j(%Y2?hOI^FO<@V4%s*!NG>C!YmijZMu_N-89;%GI}3 za2^@F29SnYH#*0uc+DPdy=cYeE9Y+gYmiPtbR>YCWX>}^-}+xb(@-<=5$b`8gMD#sBKbK0b| zwk)O(5O(oB5C!I$pEArf|HcKpShLEzVb>XX(h@ezO6>*%T-Ju^1;nG|A`%d^&nxDG z{-Sl^9hyB~JP_W$*H3k@dr<0~TC|@@=tp*W;e|uJs#i&qCcXeFNdocZAa$hUwosIB zs+sfVy0KQeTZI`rEp5cD?e?vnZ=bv3R*HC!_|6pVrxhrZD5(CU@Re{zrYWTsOYHUq zT9ve_C@vx~Rb7b+niDO!SOM-MfygkG1QL~Qeow2%UhQtsm7~OpX&;))I6|TVh?OVP z4<^fc;%%@Q+yLt%R^T&GF^Mb@dJ~=zD5qBP?#Z&-i$?+5{Crn2EW*$z*f~spq3D6^ zFOm=G9c*+YQL)ihQk=X7sLF&?x=4sbAE*T7NvbW)A?fz@CUe$UIRnzLzZUlJBgomy z3n6O}@h%xQN$pKCGiYt!%e&(sO~6S1;6?LT%oEWTxtTRc8q7D;$sNQBcNj>Lt`Sdg z6Wmg~aNWMMjcaF7Mn+hkQM6cMj|PnPtwB?L6B)&d=dj{0bXi%+rYX3_w;C-Ct zutzKIByKMy?owzI&7PufXbw^4wlRpp{_7pX;;af9%8zNHJ;P50{xoRJJA!^us{=kQmWh#~3gia?@!mJ(eJmTyKbMsum1lj9IyMpzaO*YzbZ*8Y3t zq-BZN99H2=^yQvCLed-pP$rKyTL$?HaB{=Qk>-!ehE+tM(G!Bg|K@6MoLj z$}IM-!gof~rZiDkKoaE}i^miEv!S%VG@F1OLmUfwGBeK!)9iCpnculeA17xDn>snl zOsDNym$Q0J0I4UOFkUKxpn{sq_iK;pv z@yq#ySfU+`?Su=j^79(&6_ZWdBX7}F}-s&21`On&Z&47YU~1uVTKCjCjluzo{KQ6u9V}Iiv3AO-ecQzwM+O zySO=wygUnh?DxQTbkR{-K$*GRv7z5R;Z@nPv~);1Bm{F?OHkpE99?AH`Di5Y$2kxb zZ9L9@;_hoji2B(&8O-;yCZpQD=dks^g?rzVaukOvVUM`@C2@E-2|fo-S87U$jtfC0L{1^^_x#2CfYE9yTnLfbtw2AbA31ZHP27%>TS=s4~$+R#eD>RJ7+ZZH15f6l_g zmlrs9ovd=F$WNf>#IvnIGb$%bLgJ0)b9~oA?0E)Et-a89Y+u z0ql(MNWohF13W`!tHDoX%f3zdhTBB>e^k5u!`I`z@%-45HC-~DIx=`Mdsh)j6bGX; zrT^E1QafkDk~i4>OEu-6C|yS9XLGZhETn76Es)yfkJTO-?@R~f9ILMc8gagzRIAV= zCJG@8i!H1bTjmpQoek{jZA`riyan;|PSCn+=aC1{U!h)ZoTd;GvAg^HmR_@TfG5BD zGIkfY%auZ$7(Fb0e9_2JQS&9qr%B=aiCvq$H<71&GtKjGcww|21ZrqPJ$yy&8X1}SPBo#k(qK#f#ebEC| zM(VR<`g(SC++_nhIIhJ938(*!x^kv)OUVTo{Rook3g6dniDY*(zTb|&xRjiV&ir@0 z>%KdnSTMBQ>5t5D;aI*Me$6F*j9mDOWG{q1LS~FS@2FB@8@jeFuvt`F`>UYm1 z{Bu{v-1@7@rYncTU;Fq}LBD{`4fu@!5uv1Uuj zhtf?5nz`=MP6hnPd`@T5jK@_b_?XHSNHSX@f>Pd2&e=;Jjq#aArGSNBlST;`vxs^z z29+^AJiNS^_4rr&RYk3SY<1Ktd9HZ4Y&5-e2iv#Asx^>lCdHj@dPqf)Sf#GB1LQ5+ ztxjVL_uGWje5(9eDy8snWcT6+yoJD;2XNnPTFa{Lev!XE>J?vEi_cmVE}HDO2jS38 zizIHzT#L_W5Nj=Q2pVP9G`!f5GQT$pEb zNYmhw=_ZODvWq2dmV1?P{ypuRcYc#%$Qv$V3utD-7jevLQ>}%Wb)7trQi%QA4wPP@ z(uBakIqZeUX4nIt2i8qyITZ*lo4NYg#ldmf0x9q$9RH-I1Cu@lX*uR9K#G=Yl58gX9)V%)L(3%qh2IZeo0= z%WMk=xs0mt1r+OV|9MYfs+gyji)@=KGYq7#y zk1vm;nwgsK`tFw4)#NY;Nw`I{AEj)qNlZFoA?VW_f!306xg~?hH1Y_>Jd!+bXv_aN zeaM#+rOm!9QDC`mVuw4hr=GYMr<#CJ4g@PFanQsc@O$^-N5iPkLw(~5Pa>0fM=eZE z8!=!X1*0c}OWr~HecX2c{y^@Sb5Mqu-ZtRC_iV&e?_!OZ4)qA%SosjUjM-6G2EZ(> z_U(7S7fPMlC(Jr9$>bnn7$ka~rxPBN*l;KIpre!dGfRR_byf(kw+rSmMak6 zv-Q`><%aJiMRP_bbz8iGi3$%<1@y)|-yuP7{8 zdJ+-H+3xJ^dR%h@6zg@Lw*pbY^Rm*`Q9`1ysW4EzQUAs=KE@Vq@l&N_X@tr*w`AsX zW90B^Xu$AMt94v@VnF;u3mkf)3@7W`Al-rEmldkiX!um#1`JG}&K7L>U%tozJ9|GJ z9ZYIw;X9|Bnq6mvcg3JNi~D{copqXKKD*5FD!Wkj=K(~N!m?OBhtk)Zo8r;jvB=|B za@p~OAd`P4^;sg9=R zq!+P8{HGjsj2&s}y9H{l_Igy$01y=oh zkV}1F4VC$q>;?L^QO|;=V(7!jxCnIvX@+hxly$yh(yofwX0vrT%MM|@8 zl+H7mkH#r8gz(-M9O7~3K#nL0@`fY5$8-=OaJjh5o+G|PP-s_O8CFMIetaz`#-K8T zmc{FL46L#YPvxc5yyI21bH@j~f6d{5!x1!DsS<2~k_RsUDLVmgzpP1To54?}2QLqe zlKa`XP7)lSC@9LtZMq1AD?)!wVWqhOBHhuno6e9j&3lMsx*Fc~w~jz3i1+;+OMM&g z#1T%cfwbc8BY+=Snwm>~mq!;r)j9)wOnWF%`Yn7eOOt^UUd9bf+GvP5i9irnM4TV;y22vlMGSo#Mq=yyG@bea$qk=|u-Llr_DdshbfxM*M&$8r5 zoyUiGX}?&dp79;9gkpW0ln{prr~UWv2t|Q{QfvC8%<;hO7IojxJ-AeMtBY30hpCo6 zZu|~&+}C(C`XyJO1)F9;V~5z3`IVhxm#JmoQu>V$T&;Ud(~ZI}nlCtHum>Ehu`u4; z@X?s8mhWeS?TxJ} zrLUcHYAHCf*=6ohQ~TJoK2}vFp9|8#I}ht5xVyK^=dx;9*dX?HkBK(3>$&)FA-U7a znAC`#u9Lf!8ZS|!pp!{}c`gItvSvHrwTGIF-ES`^Bi@qPHitmhgEvs>;3aJNJwRX~ z#-yt$=;MuxhhvEn!czThZVVS zseV0J*Ce~Po*G{+WF+s2%)uyVgUzsu@wHhi))|30x3@=MauvW-vrHID_kB6PZIYI@ zcSW-hqumgJ{aBN{+tju#6O}YgkrBQww>cu#!X|brSI}L+cM}8&zrgkyN+vX{SfWcB zJKBZ%(S72uAh2C*eBZhR_{eNys`UX6JHOi4)i;$Q6Wp1}I8x%*#%GXwtDcdX1$aU~ zw@9WSZY`oV{)hp4igoVU>jXS}16n}{&~(_#dVvVHF=DM!9cGX@kr|oJV`%(rm(<`1 zNO*zcS*YmHMpfi{nMVU)oPMf{0~-g%U)mqqyJp7feSo$jCCOQIny)l;#;z~f!sm&- zUPO!Oh>xc6)Zb+Qz*8H2F6n|sm zqx+Mfyjpn3a@h?df~LngFohW@0dw){eXQi56!q(>pC}^egp?z;31(~X2w#%KE~eYT zk>p22V8t+ci28vd8@@og*Q!?%@pfb4X6Ewptmy5?yrDUj*|2=u~#S;eeHIO^yRhw`aZUT z@nk~abl3N(rSqG1Bm_=+@l;{?NXx5t@Pl*G^TUhh;tcAInGxdcE`xwO5qzUxAzwPF zz%w9X1%4#)dt^khld|^je*ny4if+G%vBcU&$4+>nXl=}T&jD7!ZAppnSd#yMkpgL` z!MhwD*snfk$G`H=@E01Bb*4ey7hPOp8Pnn7WB|GjZ@Av_wjTV%y<<5#pFiBJii;F* zA)_H@Dc^9 zY8R)kNC=t%5tpv3-O2fU7w9pLfdV~~)hv5%j`R_Z`*cv7ySZ0L1^4$Wq#MEvwK0b} z0_zMs8?zJ`6Iy{f--TF4jMvi|t`*zI5sSRAHZb{38}|2ESe|!xdANGN3Q@gm-b(|1 z4ea>sPWmE|A{+T4@XO{yutKB#{NCOumfOiwu)ep}|8?RSmEW4bp&>5yM}J(#2#}S5 zthAcf-p`r*y`}pKlu$^IeAxm$3tbSnF>OPKhjCtpQS@`#fjW-;;n(xH$6raD3`R!y z3#~fqABuoh(CGgFR9|?+bpVd@ozD@fkWV(X&&dZ|tiFtla`vkiK30AIw)oZ4OE;oQ zInf(ytUepD=rnES$Gu%$B|gW>xR0C8rCquHqM8nghyUL$pZQQl92lt$~SB@j8*&ORR@ zpPv1)xp)K=%jvx=$h^Ns31Iq|u`vfXt8JaQ_X9P4lfVr%V(Q#J9ryl08>JTiZCV8@ zPJ>qM*EV-Lz|$B6CL~T6{Q>)RW~Lh^KX20Xj#KCr__ot`<0(NsN$Sz^^@%ie;e09O zW*rV1GW!n1JNBt5L=K!TO|-tK5wfHjFI~DIJ;4tVh*rWG`?&BA@kg5@8BZywBkT6z zyUX3i*vy`vIY*sZg45Gh(Axgnb;HYR1(?CI*Rkaj#P&@F(h;TLNMVVb-qV1d^#!ND ztJf7+MR}xAeJ$@gl{T0PK;GUIVjq8)BZlD5tO5&~@#6Za=Tf8*{Y+HWdWNCZ%ZFy)N1gGgt}*a=V`f%!9BmQmCj{Z*_F#ww?$d2p785)nuxa((R_c?BLs) zXJ_7y8y6x9?wsu!VcsYAs9_X1*vu9~J!E1&-wLIKG;;S(rQBB~1zev5$>Sz|ZZNYK zUjDhi>;HjeLo-ytVOT#uOi{MeL)wR$9$F*&)l)w2^CoHsO&7brW*Rzj z=nq*;-xNv-qHx?ImM0=3U3GMC@R2FaA@NwNqT*Hf+oRa3Hxw8n8#hU(Nf{yrv}akF z!k!?BRJtM+c;ts-gLvpqnp*&x<%@tPM2YI6LwQe=mndV|9j( zc^n0Oz$QiNMDPrX8`JF@5TUreks=|oywX1+o)WYBh$A&jZi9y*oP=!6qa7&8$dZAP z#Vl@6frb4lhx8i1eWKTnDi+3C8f z0;^5hHnaqybeFXTG3OEsn5NeIKsy`9GSfaQD_o!X`GeX4l9!ZXDzrqEg#f?-0ANIn z0K$gCI?0euA=G}C`UyJqkwLLw|9$WXiSk}*+zDP#4oVlZT0n`E#+r-~9VGDMb+MFN z!HB*3nH{OM{WeIh3c-Wvl`Cx`c?fwYPml>>V=&}da{`tIClKO?Kg>=mOA-Yt;%=%rJxP1cV*GBbYt=#fOg9r*jm`S*Z%pB$+g=l3(6}< z5m^CyS);(Rwu*Gy&ICvN}X_ac^NH*a|~Ka!4Odw9X}6n z2*XIdySZN(&#HB=UASme$8x`76lmrRKvO@S+;q+sXq)LqNC;xsz#y zv)j`=iWReR^&cupR9_5qL$O0!eB0--mJ~?4E$1m(yXlh*n%sA3<9;7hSVY^I zA18;9uWGp#|j~;6h&eu|l6EXb-C(JlzAI z>zX%5bU1E7So{KJ1}rINU(AWM?+W0J<7T)Nhf6;S!~Mcaf8@OB@I*=bnW6KS;3%2Y zybqgaIr){2xnQV`M1G~&kgu~$MiyO$m}^jV|sAX1f+_Pxg15-&_66qapIy&fbogi&n_z!Yy?CySNClbCl{QleiBk3!r^dYvqh7ys0(}}>}nR8Q7 z8G81DHHF5`xnHvAC-RJ54U7Dp4Ey__2Y$(9^MA)9Op7Iil5s{Myj65EWlS z?;rmmF;vC}x(tj5)k8uRSP!#lc$}G{s~33!id>Byh`Qa?j+`$pu^|+~4g>gtF=ldp zlWX1jov-diq73OG?!9-7^_Y(S$n&1>!sQfsj|*?PYQ|jn+>5xxh_7n~8v#|6#!#hz zS!n~COTbWbKw)6cjr$uLjzlTBK%)&%Enepe^tlFk_Y)^V=H05zbB>N2I+ppa*Z%>m z)@r9<<@Z_7yM8$7@xDD8I*1HWAR^g(^ub%jt@bU)`@)Ds#<@QSwiJUWWO5|dl^fe9 z(=nc4Sh8>cK$Zxs?HCVu46!^BQyWo{UG(6H|M*%)@8*~0AkA$X zFvgonZ&a^d@rmseD@%#2d6A~cLPHJy(l2k~SQv30AF~Ei=ruGq$Ptl-(l5}GRMvNM zPV|3ZN@*_fx2&jk3Ydj@luur&4{>{JEA;-(bk{z+?%*k@aH261YgobRO&(i!zc?I= zosp7qk@YO=LSCRBfrA!tLmWo?h8ZI)i9rfyJqB)8);hGrY}g`akUp|+pl(C+MoU61 zT51t}+~bC6B+4i;Sg~=ceVaj3(F*Hg9I^Ad5fC|1VPnaUc^^!5k6OqclWxmAEBo5s zI$|@zRqxP3*2p>d8g)90;i@E#^L0&<96B!Gn=Oe>6n7BYx$_tL;WEAK!^;))KuD$a zsk~(Q-SbUysml0z;nh4*3hj23k`keZ&|J%}vc-u%+B*H0ZlB)oWZ>GhFK|Pg4!ko` zbERz$6afL&wGz-0|He-CUlfM3|NXtc=zD49@JLxOSor#WZp8RykgEF0TFUhI8{;xV zjj(0KNo^-%WRz7CrvW~}bk|P?;CCt)26lzOZ>OirTa$B%&p+Q2!G)fq_0m+bhWd~x z9zA)XW*D5qYp4t2&1bQvN=78Tvi?ul%eQNS6#Tuet4p|Dv<~%jJWa}4+cL}AQj1e3 zO}?jT@!7bjXr>#lo+b+X&d^n1>&4dT>DU~kgd>XYT}wz}03&o$`<8t^cXh$?_)Hov zX3jHDC;Vx8ILS(nz(0*^k8YOd>s{y5{!7rM66bP`k;EP7q5cUaho{(&w_kmJPkb5Pa};Q1vM=3<&LnF0 zR>1s^)bX_NqR}id4|AU4>O7Gc3C)U2ZFjdC5Qi$qPNI#AYPWPw#$9*u0^1? z-PpespKh;b%;6 zcGS?@IO4GWeWCD)0g)cc<7zWBm8=W^hj*s&Bh^M%h z1s~%X=N&-rv$ToUmR-Um-O5+8633S`c80>g*+L2a2n*)J`a8l&X?{;AGWuF-WU2Di zc7oo$9~dA!dIozOh)}zvTs;^-bQMB`1S-O# zLVy1~&B9~#uk%i`*G-z1)YAbe76@BtOgW+Zq+xFtt7eMttdQubB~NC`zD>$1qm2rr zkji~*;iLF>)_HzZ9`Sc2UdMxavX$Y+p#)y$Wl0@gTc8*)OFKd*QJnD0sl+IEt_G*8 zv1&>DV8mYPFblR=yZ|r)?N3?uy184x@bLNRY%y#gefxB%CmJ96e^4IlYs8l`yrl_$ zRq4?uZw0@I5+u%73lgWMOOvQG$z|`+XSmo4ma5kf8?0pKc&$^ z@XZx$C`flz)6ug%&-Cz|f_G5$Ns|!?(H%CU#fQ^^h(Nk+cf~S`AhThz5Nf}T4=r=z z49)x(;~Tt*X%ysEx-?Qmg#f;sHwNY}yEi|60T@*Ty+po1&}{q0TElNc#pYYBy*&K6 z&rStwaPj@^+_HBGWL1r?0C_~1uu&;D{^3`XRn9p<`q|e%FR(Rj^^E6Cd2{d0zQnKG zU&>YB;9)gg0OT_^L#e97?D;fA6@nXL9TEv73gYuBKEC&cCWyPJ*eBnX?9UoCs}aYdPsCV&0O-WsV9jmQZfm3 z`BnKCy<>Za6r!vf*B0KIaW_-daSX|xf-jy8x~l@oqLchkQMINWLb zPpN9(0l1{Czw#A@_C@2chKmq#-x+o`w9Y`eX3n?LurkEB=Kya;nxuYsb@5}uEAbVy zG`9H4Ez0!XFS|Rp`rcskC8scBF`|gzVpqBE2m9B+XTu}EEcviGJ+g0Q@x7+vmTfLV z**XT7t-llp{St9*utY?bcuc$KeSLdFI6}!>HLdVPs5gm-3?t*Ee&RNMr|3f-R@oGa zfiR{qJw_?8AWZwG1R_Ja4wm*>G{{0{+X^oW z)mn(|;0PMFTj3g;c*{*|C9G|ec!_GXXW=T%Vv=}q#BUd7Xz&r_^EkN?S%fIM=|@%}%wTva31VaO@Kb2aIrF|^ zWAhVsQ(*?&ZqyvH{Y>(0w<#G06EkrS=49A{lQAE_H~+>6AtH4{_}&Iv9#Bj=9Hu??!?XE z57}LA5K`pe%3C%#7=CQ96}ukr;Oeb&l@IonMsAHmC$M;T(KtqcM_zmk+Zc6#Fzzq*Jn3kecH*}C zCq*aP%j_3Pv+cO`E%#zT4AWT*orba1!`R2s{1kLri!+mh54%&J1^87LBoF<3_R8WF zsT7@>1#vIlICK>^5!}ev3s1Na?Ag{sUAC0I?!fh2et4(HDL$(35SCB3c+E%l!@Y~# zVHa~#O4Up8Kq_58*jeLbC&(%2w~vAnb=2Ehnz+`o)mZ(dAq?{+a4m)fICRja9srXM zJY<^wKb1Y3zYC$iYolZqRthr=R@_@m`8|ZS(d4QC`~k>`O-D;3kBglYi(7Z|8O}&n z4w^`B8;HYnR~4f2-zUrTi0C`VuGvGU)==-(q~zD`V+p(yW(~r2`W)$!3*2xK|c!m&f>u_`4!eGW2O$kxI zzwB#seq3MrSJTEM=|Sf^_~PN&*`q+Yv)7D`g_qT+wVOleB*1%nfI3-MjUI?zcc5V& z!82R^GvH8J9vri($`hi^$w1WE^B_Q3?p5n@B*4y-0U3lFO{?Ls8wQ~ZJ z#KX1DSj3!bldqo}nDEMXg0KO~kDG!tCMDoxKQrXBp*FAN7(y2ULrcQ)Z7yeBR+pR2 zy2O+5Ib-9kH}k>IMr0t8f6!^~HH~g3)6atpa!#>H?PJZ+A|ojDrFTaDp_NnT5H6N3 zeVWppXb4F9$pqWM7Bsrp>&fG`ApCZ}&&6Y^FLF$L7u{WHi@HL$2Sy0-qwwz&brYUx zUmD%DUnE6;1~`|)lB;!y`5$2ql};J|9Y5PeFnf!07-TCpFfo*!& zCYESX-Asp;{?kWVkX))Zh~&Z}n9AcVlwL-YyR>cg^gobDCKfMFaJ`D0x)ojpdvyuf zhB`y1U_Yg!lsNhWkn9m|CcA7r^Jk-_1^BYSfIME9^0t?e$Rw<+(U<| zk~^WhJg@^K8pi&tG?BoJqE9O4^=g`z_=$W-?^4Usj*2!DNK}E2iRq?u%PkgrnnY`x zrDCI7ZLj2mehSn_qa$l6m+8OWRzB-~Z8^E6v68wHY~~!yzCuhGPfaoH#cq7u!5XNB zj_&ECUG#13_K#?k&iFOR%_TupLocqKDIO+0;QoRwVm*8ux(=QC5A@R|94D*_j>WO? zc19~$oN2hZju{&5YsxdE{Q*g5KJ^yl(HZ6u<~C8KC}Kxm5?)3Z5yX#f!->-SSuep$Ir`r#O)E(yd9v6L`8FMuGnb+jUpbY2hS20A2)B;wZW158|AwZty94SsTv3yxiuqjj?dg z5=zx;j0b6H7eIkOkS6KK4heO_@IV(bb5``nu2K&*`X1ct9#~yn-YB;_?!9%{?s)v% z8sh8R25sf1T*bsI7vk!v1^95`1>T65Y8W61R=2^Yy=a z9ouu~=;d%wHe;Z@Of~vTod@d7onlSOOug@3l<06dxPEmQjjBYbhKOJ(JS^G2teM6? zuJbD!sG18R?<)MVp)3A{nB!F4+2P>9^OiyFH^XyY?d-kTntxgVFp_2I!jFuNZ6B(B zY7zZU3Lt^W3Cw}S__bXirT(NM#QSQ^21v|w-%=0=AAJw&7p^|C0u1;?cM)D|5w6R= zQ;4=E|EtKqqms+w{8{JCO@X1`hCA%KWR_|83Cv<-Yiut+-Nh()psT;5rDI>=m{D;9 zY0CcSfz!3Hd9iM0wO<@h&xno1qZ_rg?SMmf$(f)RL)muay}VMNTF)?qH+*G#J68dE zCp2+0RQTrvw)JVfhJbhz{AmL7gfJG$6p4cOBZD@o?*W!oDCE!dVO_Vrt!(p`T=uNC z{+$f`2X|7D=0bj?m9*`p8hc9Sm;MlRlLm}bH=05a@w(~o3U?yye2HrW>$-rgwL^Sh z-4NqrFX+X_~K+!@hGes|cXsm(h3eN?d_d(-tj%H(>or ztnFDsNAPOPzF{|s^h9Dy1D{pjdtF3F$@kCUiy~~e>COjGb__K;`)4-phR$DY?7O0S zlS1SDORUXQ1=Oz65Bt`iD9mkuj~?7wVpT9_<$_dgz){^5?Mas8*t&Md|LoW;P1PeB z&XQjlD9Ng1-O}(o;c=MV|3S3ney&$O#iyD! zm_RF8!VnVLE(vMznY}JiYb$20YvwymDVFY4=H|PIRC}!Y{uGch9uh2a75r;%a7BKQ z5-a~36|_4i%qdN!%%mEt_MxIaMy@tA#ptUTbzsX5-7V>@7_O&-79-)~g4BMrEE1z& z_@!heTa2|Nx9?c#&3%m#nfC&0;aj9>u)pG!dzv!OjS@kd+jW0@{{xAQi5`$05iL$O z0W=bkSN7?y-lPw7W)0HzpwU~>M<%zR(pWeg${9!*ORA{lNo|Vpex_#8mMc9WNUadV zLO!H!4d(ud@AT>8TH>Rcl=5BU^x3#-6LtF_D}fhoE>D(t+<){6kbe({)PfrzSXvS= zIRITI#}T#}`0S>dtonYDYtyfD6|;*pdh98mzVNfG%gPO(VvzLC>iuj3X&%J5HT&k~ zSOvIk5o4*N+-yc!iRu7p%M(5usF>(MMq-qSTD%fL2Vz?xkS^>&0{wlNMAJw0aKILy z33G3J@R;EFu|LV4f*2y0%6G!J5p@#jd61y3)0XlvYITLQPTunm0q9DG?pF3z8ra%n zCD(fbHP8)C3Yrxou^f{j;`WzWz0}u(uqR*<&A2d?L-aYz>wX!QeD)*pw$*-9 znH>F}-rtSX`;Y#>1fn;cxQbQX>CCdiZ0&fGTQ=9dC;aa!@5%tYvp$=U2a-LMzR_Q$ z{H$R+rul_~_aB3Y_pG+MMkG!HYg48O;oD8U>3(*mRbM`}za>-KQ>=wEB9ae8 zO$>iEF3YE!wJ1_ai-D3wA9~m*IHV(gPEePQj#i~L%w1oboc)~>M^yC^n(64cWbHYZ zhENtE6>fB&aCWlHotFcQ4qc2bpxlfrDuEG{SD7yiJ5+v76JTgnzfKh`{-V|y@dS^b z0LUOk1TO+vk4JtF##SZoU`JL``6h~M(Qm)`ju3dK{@b)_6Ge{jP`c8{cA-Sg`L>$l z9e>VPjZ)O{)mO_i6!1 zFgIyn*Rsx;dr)3lwtaON8so=(LDF!#98Ekab~MvD4*LN3i5o>iI`HhT9_hvP*I;_D zH0Tp+hPaJ>Ybb;=@;}*e_K|)OzX_$re#9UFR8UeJFIpr4?o)Ox3LxgH$Rq_d>c zo1>m0SNKQ^_qGoY*AteEk04O?-9fak{DWj2uYJjn}9Tk^ad}|sHps{QOVh0!VjowkE!tlsC=^bqF`BRpAi&SKqN>*|L8PQx5E0gv+C9QGAc8(F^(irT zG*5ZSGn@(4RP%_A)2!F%4ZbbsSAAI&M#N*FN`+D)lr23fzfJd72TJn(wYlJ-QSOag zeS`P&TBoHmigTsc>~nS;r%xSDlP)cQ?jXv(gENQ^aWY_+sWtt@#4zxAO~LHeSE>#lKk;@v_fDF=WmN$*3bD7^07TY)TH*bxZe)t~%PL}gc3@WaM_j?&$P9sY ztJ}Q+Kbu(iHMT~YRC?q(*_o-gAJ;I18SFM^Mh|?wQH>F_Y74Ea|Jj$)WD5koF?w^v z9+biPX}EGC+PJa9$`Mh0!i_iH_?0$=`0&G-?g5GQRLN_3b@Pt&ecf`VXB&RsetSg9KK85n zS06%@IAE0@hliKyIX&DLzEgA?Va>rjW>|O)% z*03ENg-_2Rq{ARGPl9sO3CXrHSBmU;wl(8?jfR&Dt}bQ;4R5wJ6Wg;F|K6nx+jMat zWx+_(KXnKg8K&glp;s$UYh*XGspmwnj+wQ)JhrmMop_~*LtnY77J9Q`fw-}-qH|3w zF`ev)-oEOGErgr?-T6HiNEcTvq@j?}2U8N2y_+8(3ElUAa7}#vJA|5GQnzvx|Jh4X z24B5L6%ceNBYUtM9171>5`d0FSuvq5!j(V1oL5^Mrxd-q=5tZOYpJ%LQ&v-7dVdOI z<{^U~gN<)@E?LFyI{P_)e>ob$jt4p+lcHB2qv)$VVKm0-_~G9+2dt6<*rL}aJBIF} zt0U%c{5jr}wTqknN!fK@>!o$UFRdDsJ)8K|I9QU5oh!8Gi`ZuT80?Y7?`ArY)5sr$ z;7E@i=v}Pan}>xl=F`{@4!Q=dtVwaoeeQr-bS;dn({CgHeEu))yw$WH+&6)THlbJTRZ!G6Bj@{|Y66t_SCSb-WO5 z(-1oKb$BmL-mf%vy;wQLvXk@%^C1Wc@xmm&TxFc`Xx?5KAV$)m!fF+3jno$7=5(Rc zUv+fT6N@)#bzX}_(&{|VAN;q5&u#zg?k%^YC66_`*24hW+NBt!!Dn#6Q1=gPGe~y) zwkd})qEf>SYeee>+ypN+M6G z3}3}>F~}%7G-GVI^Nc-HyzBb&Iul%L<*z$iU#jnqpTb7k*%80XXO4MX@xufuOnu;d z5>ph9Cm@1v48OUbD<+rHon`h3SS#`5>mM{c$5!rX3G9GlIAF8`_XbCZ*h<<28#+G+ zWL?~)>`bblU)8)L%e@c1;2lYiWEzmrU4<~*O$i6>Hx6Xb8fK0y`Z4tf@pvmyXhq8G z7Kz`zKb+Js&nCzCVL6DFp#YDrt#)i-WpT4 z%6&pQAX}MCv@yh!hs5{)uxnu>OwP+H3h--h_OnlDXj9aZ$x{7Uu4tR)XhX86dTk*0)@id(yUzTxwX35jfgg7xbe0ehCH<`6gXpEKF zcb1Ks`6!2U`rR|Lv2I;Pp#B;@ti-vQ@kJg5wF@5Up2haJ_92-TV@H24g@V7tA zB-u5CPxN!qoRHGVxoVa#!+MwuZW6tzGur9UUs_20MO8LE3Xcu;jZIujr>y$VNiCUhF zkLrN&^d^(U*A2oj8Z!nb3Km%m-nxdt&=ky*y&je*|IyCZINS0-8_XSA1UcKlR*k$x z3(~tt&kI`Uur?IDS?GCkJ09}A)h1DHEgZVfzkyD&MIR(FwD z2s+fWli7Ex_-`PCXSH|GliuJ5SV7dZ5)JBc zCVhIzON}odhgZ0*JX*yB|IBZuWgn)>{NIoML+^tHNCtH8uR$o*`>v>i88@*gz0_vu z&pYi*l|8E7#0eKR^dZGk?c6JJ`5FL3er0t8XQRoVzW)r&Es2hR)#Wy{HGOlQtRODC z;b!0 z**o-{53sp`f?94x0hy-qaea;h1Ropd2j|rMx#XMT##7Ovr`I0OUsqA1p;cVO6FAbB z$<}CzD6x<1u6k1sGpwGw#&*(-_qcW3?|6#XI<83#0G}$g|1VkhBP5!gnI1?UxOr=< zjLsXTZY-r~%Jkv-{@Uc*pDlq%Jok_;atcX|0s1{cBoRuA`4vonIqh0BSHbIl<`-b{ zZQeGJIvKBQcYaHoFdJ8L?&P70CeI_U{;~R^%y7BP*V(Cvx|=^gK2=yR^a4M2Q;8Qi zcW^)Kp{Xxq@R3sFPL znorifEwKFSR`!}8onVN7 z%4-9<9(f+`n+*OYj3{kw9g{?haZy96@c$ z`jT<(i2hxemhg|4T2#>Up#mjhE^sS^#)f5!NJeG|z9@hYcYfUc^mVy_-CRHW>G?hs zqDqrA>}F7xHOS>fd-%Jlv6orA|bVFE8jO98! zu7kcAWt&t#f1xRi$j}x#h11sN7Nw{aJ#&A$JM!Y_Zwlm3=HcC~cHqmq{EHUezq|~K zLdYJM^PE&;Cf&z>xU67xLmz&whQucuK|lt88MsS(F#rJ{yaUo}rQ@7xx}E3-3Jb98d{1%viPvL=y(6c z`7u-pHw~+$h{X{HRSqbTpEXPn__ei+Wj5X$=Kk~H9htz?*dgJ>@Oa=O*Urv$yzPr3G20r|K#tD!l!gITJ*vd7p^In?=ieLGB=SMe%RlZKwJAeSy|vIKIL^@5id)mV1)rP|S^P9p_XKaHa+=$`YB7_C`8aK}3uc0Yp$ z69@E}WqUWXFsI5rY0=dMb7j{4_VL!LpW|c|Ol;D+dg)&Q7c|GF!5K6teuJz@;^vQ> z(}&#?--ZQGhtZSiyVj>YHjLQL{qus{fS}5&Q>g8k#&_HZghg^9HVyj(*VSA*?PQ&- z?dL@nS6W0o=_VIe&AjQjsLq1~Md4j)tn(eJqec;`(abn%6PY&k4FHqlV5Wi#>Jg*c zG}r5I|4qQm>B;NvzUd_kZ|Hd<-^Ki8!0nGxLr#rDHaN;Nt~$WHJzQ(5%1Xj?#Z0YM znap7(Z$9#!qIyl>Sz?{SEY%OF+Tpb4b}($Hsx-n?HgLter3hZRn=~8@kg13PO2_EN z+5!$-r-rBQ^?gE2G?%(>v6aBak%ee@DH8BtI&1|m3|?|!lF$_wl1Vh(Zirx#g_WFu zVPT}5OAVRd8Fv^H1GfXH>JWs#ZXKQ!bXN>SL;lF@;EuiOX2%P>18ZeShavk!ev_8> zI_nD2M2EUHWax9fRA;7fHrawNtX5S69l+eu#Tx5{F}`72AlvCf-E zT+c;UvVz}?{OEV!&E9?!{0ZPd{yqr0Y`9Qjm++&wta4!({oe)|+X`#Y{|{6<*Tw7- z^$RPU)T`#1Rg!qDZfp;ozN&p?sgdyw@Ck_Nd|{M-&X?$bUIGUE2@3Xr?=Q%<=bgE4 zcz2UK)q^p&XIE0zTuqst+>mtjwO? z|8>HW^_*~QvEJ8m`fD!(rsYBWz^nT;kNtol$h+2^op$&fZuK0SxVTDGNZ{j0%+e zj*)*^iyDj){5a)VEkzeLJ4+RjD9Lg!H|=Of@7wJ}d@b|RGq*CB~ybH=>w zZQhsJn>zKG^p%vHulw987501a{ILh2r+{1QS3V0`DspVw%@2%Zk~L^l!4;P2V?_;;?h-wCbq7ICj%DFrIaE(ZG+mY}b^*GXC=6(HEB z@ZiHH)Y`ZSbbMEpX=io5^bQ_Wi+ni}zvcQ$t>#R38`-1Ig)(ER?Dr zAJX0NGB(v>Oti~=G^a)C6IAB(ryd75j6OxSG-WAaJzjPH8*mq|y2nIV4DH4N`!v+M zuQR2!D84~^tETpROBbgNk(F-8?kGz0Cm@x6REJ%UJLyrcBvcs_@O)0epnB2Qqt`fS zpu7Syyk!h5Evz%_MYZTuO+o64ApBgACUhD)#*Wgyqv}_HG++y0rewS18YKAdqevJ=ZVk9> zqrq>FoGsb`JjBARoy|M+P=vUg=#UL}7||xxY+DYJy{j`C!ctFFfH`@qQ5q(jI)YX= z$>Ga-F8;DB>6-^he z-WthUX98=~D<$rpsz}p5wmr+cRl)UlQg#sRyPDbQnE6)rHvMYzp7TytXcA5C;#yIC zoToNt*$XG6NRrqni#ySF<6o|{(U0WE!V6FfxB3WOMaB!f zOHHJ~Z5P$9%4@Rl>Y!QfzWBuw0vVan^!?n7lcv(4YZfrx)^x24^~{{wx#~zK6W9tc z&cK)g!M4Twavt^!{+b0w_L(;xe(sAV{UNmxjsv@(-TfesZ0r*Z`c?+}>JBE^ET~|H z@3D8at!!DgSf`FZ%1qS|qqiWuET-kkXpz6x1g;N0f}zgV$((*H7GTo)ZNrOAw5Xg^ z?yYz|F*pU5y8%AU=j@VD3amb60(p{<|D**CNxUEyw*I=Y`cK`&MCJpwwwF=<-Mwj~ z;KC=KRENVrvBLZny=uM-2XO51EmrPo?Z;W5n_zjs-{6kZJ8iK)ukAn9 z*<7k*t;s)^OQV>E6doyPB7z79jOk9I0<~@E3yb$f>lyUx68?=c$I~fdy|c!eM%_eCL^;<%K0OIQRhNW*qlSM_qTT~NOu8IV_eXN(t|Pa^4mA+e_$$2 z=H*WN(9smMIkQ*^!tBYgbN<8>HYp1HS$m$~peJPwSdy(*xtr}#3CYM4Gkg zPmq{@0M+szwLD)>>4zHs2K9-`niE~Y|FJJlT<*_w2wdkAV%|$0$01ygn;6hTmsiH$ z*q`7F&&J*-9&>6Q?zU)Ng5+5pBWvWd@9IJ`oPTZIRUG5SaXi2f8~*+j$Cq!8c9!-3 zb%1R;rHn)_-OMbJk{)TDvVt`lvrqRC5w6|%PJ+K=cPnCvc`oml>m7mLbxgWAU1*B? zIXNHks;}oWh>g;Gadru)cEbVy;p74e5Y_Z? zZTs~MJN$UfU1iV2L~s-}D>5`@bg>j;)m~q{tSB?DwXNt zbNAyvkPCa%8rd`WAw0UtJ6{ydyZHiP!WbM*+RSqVDcT}|7DRyM=y6}tp zAFeS8ZL9dWuX)!_DBjJ>rt^s<@wdIaJr?aXkJJSmf_RQr*aJ_V??)tzrxK{j91J40 zJjsbTqyiq{Od8gb{sYkia)=nWK*M(QgAv~-;*Y6J)e~v78VvU(W1u=r_?19vshaFC zkO#F8cX@|e>yvO;pNNAy0na|_wtUTY6oHW3f1spH;lAp&!IA+^7Nq4U(ey;1%-Aoi zcwc{c5nLb$c4qF+km1%r$PAfRcVBb3kOePl-Oxfbr9NwVMEadlQJYOkg9x#>PdP|dMfJQ7o@IB@$fbpb+Q2oQ{us&jT}xG};|2ax(}Rp=OGL-M zt0GRAF|vUDqpgKx8C$G?Bi9RNvGM21-dF*U#GJMI;c- z(eWsZ%x@X}Vx>Cy&4`!hH`Hhe)&dix z$~71ok&4$FSDL)lpB6e#;GYdxIvB>6x@`QXFr~@usrC;~qV^E55X=C#0giaDe0jgo zTmiF3*;pI$!pr9Spn%`$+oVp0UW*Se>Y;l^akk_J%-V;|@t@xJ8VZ|~3E0!2fsp7b z>|zwV_=KCn>^AOfft?RJ-ua6?G1yL{tRb{j4HY5%ly$b8qgN|5BwgqpzQPxoWtA@9 z%`n2_wQhI_5z=VQx9nAQ_gs3d2ntDJeKa?nB>GNL~8B`|KlM|8z( z?XcBKrzFO6~6n<1^Y1PRQwbDq{+~!AxE8L ztU%$bI%OKm7h-O*HfJ7>SS%}M>sBG~^&PD3dA4-?k-mn;&k3t?87O3ciKoR5$`CMN<<@8a5llT$cMsq{5983I>;>@6mO7GM45Z_8RL6lFF zf?L5j;)G$(J9{9k4~BN+uzM&W^2+Ay&+q0 zPOCI%wc$2_WI>f`{fMWOt+EQToqck{{#AqGoArFVNkpc30<;WQg`*ENLV*V;NEUsY zx^ChPGMdCxPL^SKLhI!kig4hIBUW)Eh66Piw{3Sc+hgETXFm3v zsUa)Gv^8knsIB`i!|eQ;EuEvVHfka`LWMm z6moTfmFAIw+UxBieNLBkIc6`o7wDEv)e2?rwTg$&c7_q+$w0~nP) ztbYFwMn$ip@B z<@AzL`?96$jCfYH9d3N~mk*)g;e$HW51uGa4A-&zY9x22<38KfONR;0ZjnW}HCyX> z#%_^hWl75(7BW?)s1)VR)1`U&@kkT!a(oA3S#+rt9<3~V4j8VUHhHx$JPd~H>7-sU z8uc_%;DzcGER8p;@h&5@dbiPplEyprSyqzFAN^dmK^ml!IQuvnE_`rrI;k%EL7nNFhz| z<@@3lxAJLsTml=H9FuZd?e*`+Q~9rnBI=V?lSgg60J>9fHTti9uG*4MC(lKBK?TzE0&=4K`eDU*b6CpYt{OCvHIUs8X)TBRsL6hwGzg1(~vv(Cpu)b;0SBj_f%y zLxXP!t;Bwcjo9nWRkJpPL=g(5}3i`ml9=4 zc5g!VAp7c`=d61Qv*ZYTgz!0qKYAkcips?PccDD!i05<2`7mMj#f6hDDgKw&B%^J0 zO8)V4Z`qXx;lAVbSC3oTqf`@kt>c@P9}Wb)mUP-^Y-YF3JYnsk$xc*}Ht-&IbClxK zXHBZMTkU0v8&CM4c5iWI;PXy#3YGJCdZB$mo2|E?*ul+Q1y(vlyK>oFp5Q|?QDM9o zQt1NuQ`4Q1`Rjmd%LU!h+CywogDI&;+D~Wf!Pt1}O`W1+%o=r#|0)sPS#aOV8Y+!b zIf_U$=wR>h9z+_es)^$#^XIWhvNER!WX=8ovR9}HrDm$*vFbWS-xJMSrDY2VZn71>3qQM#M=0^#vG5UyUS@hnkgZ1_?96X*2ve@bljT#sXl%xZhQ6G_Dg>C7gdM9ubE zHCLNhvC#i8(=$!gB9Jh;5*>A4wbqCnG;b;Ow6QGK&LD|*CM2An9=>O&tFr7ZJn5fp zm_DZQ`?)td{+^bb_!*U~qNv*k_+Oq$18eb*4-CAy{dx%Y2dC)H8G7Mi*-;(u4bX-; zTD6NM@;2AD&4j6jJ7RFH1;yIgMktY;IjCK`$y~GzP12}YeGGQx=H3YTEKb;CCc7swcpC0UII+hGl}zd@j>;vIPIzLiL0x0 zBq;+wKHNLU1s!O9_?VjCE^xNJT(q=&rzEChTC4w(s-IW3a)V+q_;L~z+@;rs>WF6X zUrXRr%eht3+;efP`{C)gqAF`v#Fd-uAA9aT2B%A^`8luV0jL)Zf>K5k2mkiyc-2yG zsRGti`VMg{7$brxMN21nsU2DhZ7p@@%`v=p66B5&JCmR+I`0~M+({5@FS5h{81LDg zdH){9?Xpou*;3m7RqHV++I@8Td_Jw@C5TQP98m4c&<`*&ejjuPLuA2`P{5{a&Te^n@KMZOt7*L*JPu26BV?Q#Y>Mu^ ztzTOm8NoM&eYK@KpF6y`l?3)D-xXfal|(x)Lc^IUnhr9l=$o2e3{%A?$-R%~e>wm~ zXYE^qAIFWpop3Tu?X;<{8#BvYF!r4YQu)!mqi*-pA@cBZA?ATcU z1qu}|?y|Ag{%?w%{MyCAF-)dVOY44;t~y^iA7A#FOlw~(qn!q6y3*$y4^Q(vU{3Y^ z?~pH>Z~7z5g`JCY75XGJxNfd@B4m}-jJ-#H&8|IH0P;DLzhwVdk0tea4+qcD>vi&? ze20~yRGC718;dU?brLKO0AoUa2+2kVC-x;OsRvGC?mIQe(5pQu`0~2$s~`D}%xjX6 zRHJ4Vt7*V(XM6hwXi20n<4)j+KsGpHf>g~W(JSUZ&}0MAH^i$6EYbTDu6%V_9yiE0 z>o#zf8~xMqrcZZ!od*V+op{G*rYCr5|K4R>P72&iP2WN;3-3CgxkOzV01@o3gd`^@ z2Y3=s-g%4ImdbR!`{CQ131s?&l`&cUW$$OF^MwQ9e={X>ZI%^g4v!K>>xSL@1m0|Y z+dLZX5%%W~sgMDI9(dfxSB`jB_?L_%r%~XBIoj6%FYW!Mq3^1TU;Em%2oF>?&y7Gt z9Cl(>_)jPTkG5qaR$wm`Vu#vToW6N~8GKiv#EqLUn5qfkntkRl#jtqZdTo6=(%;E$ z>&16?s1Z0u)s1(21z)2Gh1ElPVM%h-pOvUPUrm1ulXa=j`bIh`m24etiX9>1_TSQy zn>)QSa|u_l-w2Lg<*7rvgkLb7x9bjv#`-ej*SxTHg&L@IEmslAu(%4G9Sg53JpP>L zD7RU?@;^l!&UViAg#M%J!}n#^<(CJi;3|h!0f$>ZAz`HY_pCSSEU`6-rD05llrPS? z>8_n_wE^|6XEl@*{s>DLr6_GMI^p$09#&^AFJ>Fy$C{MJ$4B)as8ZpQo^P(=KhWb~F8_|qvR%1qK>EC-VH0f2nHb>rx2X2#;fQPS zKo!QNUx^E)-|Dd8Lh_m6k@F9GLH9w)wgjffkICtNs%#ByvONq|53XlFY)7pjFJn;$ zvvH*d|3YT7x2ir17ubLvq}BhUl=>RD9yCcr(1{PW11}&4!F4CGFjchRW29PZbE+b@ zzFC<3?mwO3`6^?ncL+mKc2Sw3Zup9fPlBlIA+%hVU-Hqc$cNjd&SLmAENv%OENv}ES^XqO!3Cx^giLph>Eyq&>18cIu33MPQXzrK$$QJ0IXes?^|Jf9bxBG zAtXJXd|pDBJg|j1RkW%_61{AK_*xifu3NtXJkfwyZjsj7^FY}_=l!UUcf7dZZD>!R z_gx;8JunD`np7}wx(vXO9|#%G=(srL+JSfejHa6f^!8c^d zHwxV73q7`?&#D@@-|rPkrpzI>J$gZ&OoUY9_qkRI_1Z(q9M0VQj;Ap<%6H_5F+Pn7h%;$KoKqZ00G-d(tOAvj8xj&W za9}*y2iU3#rFs<;$`Xo2SQ1A*z2Y|@6dAcYb~vMh@O4nUMLArVDv1ClHkxSuT)EOV z$S!tbsPf;K`&*l}R^bH4x&7exYlJ}d9w{;@5{U+fy0o3&)8C%d`L#c(E1PI{mi%H} zbfi(7m~Q_{Kci{()Y!b2wU4@Hfh%3PFiAxppXvg>xzh!sQgB1H_9bH~yZqY+Qp#SA zEFl~RpP0YYU>Z@&g{6nGjrj&yUj))21q?k!=rtM~Eh6*PyttCv>KK1f zWDM1|rhme77F8c&k9UKBHpB-7wHQ_l8qSj7*YK>HObWL|c2-9v?v*m!&K~Y93jRMi z`T^aTe^OG%1C{L?^~L?2-sq37HdHJ-uBmDlst-aHGHGV@@5Vbg6ua<*gKoHA z17E`IZR4z)x|>AfbS$s%;cDn}p6r_dWpo&*x^e=ukgS`wbVDmr^S$HXh66un6If^?G0o=|hR}b~=F7L8PLaaj7IaSt6(WUr<8Xbeu)*T&1X=pH1syFohm_w6>!c#w7tJTg8Ui}1w%TlvU@{C| z?;HTSgBH6K?|G|tIK*WVh5serN_tMnGlyU}dV1^aJAowCMrhv`zo7aIaEFx8a@dZV=0mOKp=eBL4nIkQn1_+b?isu4_oXxib6pP_ioO}SQ<1MOBuF@X2ze`l zbjN>jNBXX>+kX$Kt>^kBM7pGiFVI~w90F!*{JXh8!%H@4%nY-=@$*#q948v?BZi-3 zDVGJMqJx~>B-QQ7xkm~HhfR?RoCrAHu9P_=DnAc6AAiq}0uBydiz#J{H>qx;)anYU znn81PzDcS?`OE@=GjgN8aIF)Rl5#ZWapU7R9kT9IyT3b6RU@Gj!N0D0;R|PPQo-b} z7NeTHE1G_fFDBKG5u!MTDF@T87~p97uE8mmC%j2`shXcY`wyfPsL~D8202;Mo~b&~ zth`OGN*i%w`D_~UXD3o;SW!TYaa;EuSt)-&k{cw_$}JtOXUI$edM`t2#QJXy9V;>T83OSCfXn!!ouhp*VZ0s!|iT=j5E`~n9YpAQ^&h@%%IWSS8FFzY8^AJLaO+=>@EP-|_{+MZ#~lEgCuM>T8*zO? z8;h>vE)=MI%K1M*2zT-eCVWr$6o8b#k#JZ0z67{5=;0d4b}O<3YWj+R!98q!J$Zr0Kl5-3xSvjbֽUZ#?zv6#s zxF9y&I$jL~Pv-fV0z)k=x7=i3_)-P>SeuM`fWN78;nv{DWsm*adxXLtLqOJ2X9gW_ zUB>iY`rTzJaYFwtw%NXlyZwJ0orPOdkK4wFq_hIk3<&`#0qGb9EueG=6Da}d8Zb%e z1_?F#E9ZlEw=+xI=c_b=E5*SXGlp69;r&n?;OGE8{Y?DBPG;79Bs3BzUK zvcDbyIl`THuhXOBk^k_2AR-(cTE>~g=##;64K$A_Wxn{KnYOQ~^jaI;YsQ|Rgdn`E z#?dc#%9C{gl!ucCw-m7**}xbOwLH%q#x*q5!w!0=#&s>1QM}zrbD%&1oE)|%XwAr2 zc{GB~oQ2fof_dLsXO-EHE3#zaLqlC9nfHvxptc&TgtI-EV+bE_tYq-hO@_;hHNIl>9VTk zz6%E%r;s}sEP4A~8xqn}Trk!4tY(B@IYv9WT z;aZE#r_BxBE9v{Uv)}nV!0^2I20}9;e0zKw{Qtrud+DuB_Lnky315Y@2RT=`ZGO{F z;l@w0`&@rq+k7CN&r2G<;Q!>z9jw>bZ1?DXhJ?|sh~wowX=bC}yKBK-XN&_%5{%Z3 zsnVxAp9&&V5J?hG502c*%AUQtCrZK|0)3}bC};8;=-oileqN~48ayRFX#^euTn)0h z26a71(!W>F66|0`Xlzx}$RO)KcY(WAbm`U$)sYrVBJRLVL8?`Honx;i$_^6xx8ffu zJyfC0vtwJaOBHTOlQ<5ws7s;zA%n{9)_2$rSPH?BQk28|k4CySScSM|{}x+&M)!#Z z4EBHX6OEP`_&ORCCTR&HmwxODt!r`oms4)(Tzq&DSzfWNww3atcaWXDOe`drL>#l! zw+PI~c~GI#PrZbRfK5s<&Z}&*2Kz1;nIus7!k0Xi6bQw-|RqwyG$FScovh;-{~EK zDXlbKyH3Ukrw>yJ*10g?H0|lHm&;EF3W`@XyuCd+HRLX(eYm`iMhCf)p=v$mO!|H5 zQdu)=zyIwXVf};!u~gs)^mZz%jI`fg?xhEG&)2ZOGd6(QUkXS<&;qji+q>7h@FqbE zB3X@3L0l@dK2o40wU49Px`(?_sKSRV2XNNS^M`1_a@!IR0%!iXoKsu;XUfz> zdAO|qU3R4H(;rXyMWi?Jp-lkz#P)=2izkz5oozazQKL{)%gh@`NRvVljb{ohunu8+Kg}u zdK3EaHO{1go`;<69fm{eyFnM010pE`OHyRrmCG9d46N6mcuEj1y``x@A*NLu`!S5n zgCy@QA>*Kc3z5-I&Rz?_%Qm3zH~f_GP7@8j@|_&bK!~rmd*fxknVx#joFy?f#;X$% z<$tB+hxbPu&1RRgm!%Y?x3;#?N?3E_?}$@5Ve~f)8bVfR4GHet{s%IP_d(G|K8M^t zxR$w&6#g+&Ciw1Gs$V3*r;;DZL-H1BPH&*cPc3I0S3FY_9a3uQr3N;sM$ddk znt8IwDfzqU?g1s>TU3;vKJ2m>SbaMZ%%2L##86GZ;$!zCwc-db25oLTFTky%jGP(& zAYY+`a$iu5eJHeLR|`Cis);OfKu1sj@rV3UEe0euX&u>hucH*39$*2XgIiTMq6XuH za(itk3(yr zCw`mIrQ2c6mp;T17!5D#iK4Y`1|xM_GW)M0kv=MgzxSvujdtc2X zpKIesjtzN5{o`B9~d0f%m~*fi^IlAm%lCa2tOc8sR&- zT<~X|Jtj16eOZDlVk;7)uRkv8b^=-*Rr2ZYt2J9y^K|DNdpwz~U0J;^vd zNlh+!XfJ_H1loZdB>GVEiLc*X|M*F#&>u{={5IUcT9Q%0J#CEvK|eM$%nC&Uq8Q>1>MTesU!*>;)B(WL25*F-!YTfU}&LKAEmcO=!ZyV$Hw z2OuC@N15X6>?miV5|xV2eHQw>wcurJea555@PiiWE+SCua3Onu_~u?+ohH-(xnAi| z`AIU_?l$(@PX~ugZ;=t2n3z&ZAA*p@;1`<<-6(DREiogRs`vGsnVpsY+>b>m`TDI- zb=wdBY5#>(7%Uoc=2cE?EZ1JKPx^~7!tFMR>j5rWG)J&5*GuZuma;5b8aj5RC*D-g z`JMhyb^SqU1GVR*kL_R?-a-CyVoE-t!Bh`a*mS(tLg`)EZ;V3XMTqX>^Czido%sQR z7mX=J269ha+MLEzA=bb2b`ooo0w?kg_2pA(1o-B!ZUj-@AsAiYC;R9X=up42B~nV3 zn$~>x@z0yKWoSYVC9!4MGXtKIl z8|q3D@Y;t0owMG0r* zBEZl~g>>w9z%6ES9ct5nv>jBln)9%+j@mHS2isRGy)8*2iIO`ABR+@BQE6h`pmNIGbt%!wP04me%IP(Loc5##FHuKS!KVl4H&>LGy4fXC%>bHiJGmF1H zJ<5M{%{uUpl^L5iQQ@$3Q1G(tU?d~6mRy-F`;&|(-HZZl3vP1Ssk63z#8{x7kYxbN z;3mmWNg{QsZyoq&CH>$2Wo22%vo157+8YkKLdsVi4mS{A&Qu|WN-AeMjmxwX?EOa8rT@iC_jCsG>DJm7r*RYQ7yvo<6ZlU8Alz198-_4ahwT&q{DNdE8e` zmH3S^sHsG!ZC4)=AgI>Oa2)0Ru_{TOqbW{;tFC0x+(or$yZO`j$gc~v@yYuZQFR|0 zlVE3DaG4lTK!J==j0RUlcaR*p2QAajf>{c=#2SUkuFEVdeZSz9~I-~OZ z+S)j%a$4 z@>CcN5Okwv2TE#w&UZWWajNxicZ*83LK*U7e|Zv>O6qEgsSai=W!LbGc`-8%Mkk+)ue(NP$yl90@}7B=TH4o{ege@JLnK1QVqO zG`s;NrwmL@H2b`7#B$zLzs9tfU8_X{f(fobe=#3I&_lIS7X}Lh&5@+Jn?$YX4n+~eUK@F{r0%P=amSN?a*Oq~nlc0VBltT6mdXE$5=tsqik#KchUp(V zZ!{k32)I5(eRv)v1%ayH8cRBWDWulEd#l@8>7T!Sv7gE=LRxq@T{kMySJIczb(y@L zYS!#N6QHY4L;o=BCA|%fL&N|iTEvOgvLh$8G5|2+p#2cEZnSEk5(%DB?AU6&`jV`D zOd+!N<>YEFm$$^UoY>24;B=AVVZB4$n+Vl_%c9p9L=OC=58JD6hz#U}G_tuzUG&z%0J3H19ISG;=#Cet^+R+(dqQ|e1^t0@$xhy9CJ!KoZz z=pSe^Bt4;w5iR>g-Sw9}CirPTFvSN%v;Zd3v;4Zqj-_8tvL~yUsp8`ukuBE+yPGc} zP)nHMfDC=~UsTbj#(PRE8Z;EOFaHCrdB_#tF6{bLC`ERu|D!_y@c&%PYc25h8zBd+ zziWka0cq}~oZ=VpfX1-IyyZO@?Xafuww?(R^(2f2oi6obrg$0H|EGsAyqwgIvG8V@w5DwzvcaV83q@O$V;Bvn7v);Z(i z-cpZzPO)Q)Df2HE)%$#U97^5fF(q9wo5cnSQu0*0(0Onl1b|7Rv$sq)h|mL*P&@A< zg2bItem-*Gn{KF#_bu+LA>cJV)x4vGm2S!RPsA!P_q$fY6qC88lJ_?3mI$i9f#m+E zykp<>eRiOI{+@4}o5d}Tnf1(I8Ms$LcQzb5IZ^9eo5PEIe()!C|AFZK1A#`cnGmO3 zRyb`O6$!fj3Cb|^fo0VCx^a)0-2tP@*%OzQe7!%^%_|A5CL*ogXSsp?Phd3L{s|f9 zf&W_N?WYiN$_~c*Il|AK$OL>gXyRU|=mT|-cmz~CCqX=83v`E_q#eSr{msV6@Mt-F zSVP(vndxW^&v$ZU$Njk1Ttv))7OT`0cKvK@7VZpf%I4qrJ#ogqKqOVPhE23GNIi~! zqj0U=urLv;466n9L)3Vr>sBSbUBhz2caK(^y4DyCnyVq*RWJD$JBEmfQ+ePvfNnvi zi)}#4;Zb8WU<@^5f~fWH zqRIEkOVp=3#+*7U1~2G63H!5W8@v5gN<31CH@LrtQMSSv?hlCBS2@+dZvr62NBf=S-ZHzaKq59wuU_AGyLnF4RqHK}z zaes#dgg@ToJ9iSGz^z<5@hOvcY?QzseZDF@U?i~4QU$MnI4>NK=83*m#RMp9l^GZd z*&S=1{0Ew--hZx({;^4(aYxK=58v#aZa3VL?5+f3KB43b!{9W(#~|6FgRbx?>6+U2 zkAz*HmI^q8N=*o;zDrW9EDo^VSak2tW&Ev83xA=}Nr9FF>GJJv*sCLOX4k^Q1BmZqnUUA{pJmmw#e7jbx}dn z>mrHd9{`e(6nR(k*cTTVU2G*f{57&7)_a@kfq8eKT?2L7wWOgCXB01j@cZ|8MZl^$ z;Jlm+REAVIq#2qMqp3WvSXsCH{Kot&Oq;Xg#H?1rQ#0M9_y7)-9+n z(|%slFdHW>bfT?D_ks&Ycl+f|1TbJI)H~A(r2)wibFY+wRv#4Fa~oDEWQoIfyGtpb z&3WVZCRB^rmK_UZq5TK^AD|yN6GzrhW=zDI)3EmMA-J=v1hfPnOp&IVwSEu!9CMEt z+K~B)QNHwYT?qB$HwXH2@x`F6Q00_|x!*0=p%-&sD?|U1&c`F(SvM0|`4uC!`Q_nC zuL9-tsfXi}QOQJGYtmj)=Z_~ZdGQ5CjR9}RlOL$_SPTTY(S2JPC#Ng+qTt}_)ZOJk zQktwwj}cT`P5L82@<9@@`1GK=qnOOAr>axgWwc>WzaU{4dkWn*Gyh8O|7|yzXI_#0 zCNkmd$}~n^WRO}w*>B+6>NMqh{E>Qb`a8p?>PTAj{ zy1}lY?=D3SM3NxzPFu*5?AHFt<%b2C_W{c|j^>w*GIPr+SE0uDd=get-hK?M!LQ}Y zDI0aTxdK`eoq;bNQJWfllt1{$&1f~a15QwDvNrHFk+n~;C{bE!h`<)Oz_vnb!T)>W zn<~{F8Dhn<$)U0YAr`Oq44;l291Zuy7(rkYgOQZC4#%7k7ZrrV# zwZJFJH;s1<&8PhD0EhC`#?k(=)iW?RT=>oiw`?NW#RVlrn=G?B4@ll`>*j-4RmaDu zxz>f*IB&%ovF!u}`k!?HP1~qDNsJ^m&!f~7%(60?<_k+ckqOO=ICC6rTE%-DlJ``! zjO6T@PvxHrbu=k#QCl=5MX1=ned_j!qXxa)^LbK(+}y*Z-0`#0J4Lv~9F7C9GcD)# zU87*%{qpvDyz0kW6IR9vQY_82@B8DqU%)MCar_t%&vSc*-Fu6u6y8^*i zPj(i=qb%pxtrj%H7xVFg_H_;ZfaoDF-&=4FvVuk~KRUVClk9GFVPR%s^p2Nh+C__( zn8k1)w=V586Ud7_*FAXjS^S*7MLqE_wX95FSHvkyW7^2awU%zvdda=c6`HUqicyxR z>D~IUD>)Tl0xm#^)Va)N+h4qkI{QM0;@l1h27rNX$NdU2bL=w$f**bG62rgqL2qoy zErkVQR7oR#B`Nm%Q{twf4HI$xa)Xj#b+6NU*{D8D4Hfn)+?;n=hP5W%3;qRyrwsFr z8>7W9qNAi zP?PG?o?y`u&mU0ej;uM?+r>!KxI_!+1y%z{KiFX?kUW8oa_$Zlrs%+nDXJRN-GN#}WE2u9J

    ;)nHSEMCOR2qY<5vCck_q;_FvVeJ^-1MZ4|kL)BtQ z5B!h#% z$ohR+xBKl8C56x#Kihd)QnZt#3Nk#Q|Nok;)5fZ#!GZrkZ1Cu-BpNP%MzZwn#Omsb z--i58=Egtjue1`hQl)mX{aS0}?2S;m|?vvBpavj9~{VjHO zc8cYy8SZ}fKv_csz?VRip>+!wT7}`E&Mg`=z8w8b(y?cy8bj)$vfk%AhE>T9H}9Gh z1_p{8`S)oY{d+0cFiQF_P^G=4?hKp7&mHmged=pzh6nNUpDG-G`*21nd4)Ep((YS+ zU}5o)V&5)tkWon97JPdCYC-UE=gfJYpI@oz*N3m3ivnc~60d-35+@y|7sH4B<^R7* zfs}=4Imz-d&nJb?XO0Y7vXa(}H#A-2MSZp}={f!9^tEb-eQl=(R|Qp@5S?q9_x;kp*ztABDaX;Da@ z__K{x$tfU72YCf3D9e>s_%mBoZRj8*Q&<0U{yj6a6KK39=3<_tL^WZ5FCzOZRkGb5 zXI@|n+04apqDBg%{Kb|0w#Mj~ubEVJor_mno(+9d4gd2sPz8U~=|>SLcQvdxD%x6~ zQhvi-OtwwAjcym@LA=I8ct1ESMaQK{K-9OiR!KtYFF7Uq1#}d$S5*~)?f36FF4j%<(b9Sn`5!#(> z<9nvM@UlK7Mn}N8-DIl^DjoeHEAU59P=A1u`7@--SIn2ZiLFk?LxYy_OcxSAho)3L z@)vIp7+8~dE$bJ^f9*VgMuO{k0JUE`Y-$a*vc>e?A%`(YZX}xi$rtshcaj>9?=uM` z#QqA-cZ(xI@SMSA`mH=lFA({6d)OtlyAA_0aHaw@_SsUlx?xY}+CPneTn~TO$J-g7 z@MgEd{>Rtn_*HrEtrZ+C&A&wdJX`zSl(?*!tx8MA!0oVJ&N9%2qWem~N>oNBLH}D! zFr|`zhyN&0kT>7iG6Tm$ZEXhS!bM=e)eIOSZ{XkB;y>y0q)6fgW7js})8N4ro$joA z`u>-eK{Le%GEt;q$A2rcCSZPeS-Ku5kycYRXYg_DR9sj(j!bUYmR`UhObcN$c z`GC87S+C)=jPX9cTh(?a`TrL974SeO$Z2b`-dPrkbdWrMe*I~KQ?q^%Rqv>!yH zH>-cSHtVtrq2XMnXS=<7U5)H6Dw4C0wK@o>SV9?hsxg$nv zHQnXEtrJ#~_60z^T@8j_i~%qlHQXKTjw&2>t^J(Nk+W@Gq|`PmVDzqf`sbn5Vv7P|dec;luX(A>ip6{jjX&63@&Og$d6? zoXWrOqQxb9FTmlOPhe=!=qe}HD;gfBy2EVDs!a~?H`#u6J_VdnXdd(E(!o-o7L!Z& z&8(MvUk(dNv9)CCkCU3k)}M)0i?Pw1VxSI_xG&5=QADep7|Kjgjt=$W^e+`Xc&+yt zb}j6ed+38lJR|9G)z!UjZxa(B%3k-O-@U{QgDC<&35xul z+r$l{SVUACGUhTkCYp*!8}Y%N5p=qB3B!0K!(_Hd9t5-t648k#r`*AY>W{eCC!aS7 ztrlHc`-wD>WT#eE)(^D!Uz^Q&Pn&1R*pH&wYksdUU%*h!2_SSE>dNTgY1ppa>8v}z zo1c^ZW5;mq{f$PH%mN@F6TxZ+mrpyxF!aCH9Ar{P2;_zT|TsO`tUWniIB271;y`4b{tj<(0 z(*FF*MKy0fw;t1xVkzc=WU?-NJUv3z@b@^seslH@7Y)e2f->!uKY?!?U{9#tI-~B} zlPG`ZHgmo)c>}rf0uGe^Fsd#IBszS%PhwqgVU<5xEuZSQ@q^%d%1f68#AWe=nnk}J zZsgxE?EUb0$R!FH&gV|g0uj6U8SZ+Y0!&Lnyzm~w1qD1bt2^OdrGr4~a!&bkHplvE zve<^c?3&>+pO1{RF&1%(7=;I8uy|IR)qOPgE$2TKY<9vHBoz{Ey9lWRpK2h_uf9y_ z?3;Mad1iiRVs_*hP#qiir+K%(+SN-k9K2{VQbBte-#rl*P?y`c5ij%WnvJhBX+zZ~ z36NKol}*W<1z%QBC%0dw3hgu)A@E7%=ym7n&HVJ5=EX-=VwG{HqqS>h!!z3ajRrmd z_4muzr%y~o_1m!@Cwg%#)ww#emAgic<$?)A0yUDLr}6vKKygD3X}8s1T(xnr-Z4+j z-_zsf8wjkjK^oK?7pC}_HkBPDs&*Iz_3|Its8)$mkc3pQ zOh?01&xI#>Ew4Tt(2q#tj}I{t5K2Ai=*Ff6e4ya((WVCqTue>vZ>y}6MwF#8hSyk0 zEWDYEjdyOEbT9Z=?{|ZXsb41aB=p_;3Ho0@E(-|M1)>Zz9CLM!v?n!sQ}`w8KhO=` zEp}}pZ|jWk+jO6eX@Ky}l@wLj2ALcgGPBnOZP;a^)RAKOfO4CzcY^orZ@b*Gq?#ha z^dwjFC?Y<)ibEzwt`8;o+>u=%?o?~AQ%ZE2Qk?V4Y_UiC=D0MR-jBYy#O2M0n%3cR|t z0D#&Eakh?#$_N|}3cJ~samqv(u%<{`_TaVeSNu|sn|Np;{tSlGmZgKKn$%^N(!W{! zGNrWjC1bJ!B7uJc!|5p?BcA|D;Uu7JclaxVCYAauQ}Du=^J2(GA0-uTURU|VQ_OS( zv}#FGq=1swM!fCiSS@7<{1LmT!+_W*c;9a6^@jZIlNVZU+Vj|nH{sQ_$s`yO6i}HO`Aqz_3H7&M?Zu5+Gi1wD*`eZ+|`bUZ?qm4mygp>N?03AgqGtbJ}QJb)-+%pyNoaHN=WXIHlof4;-7Imw~Xr-Gp^ zNyLU{cA-4g+RaV>2$Q&PDKe$jO_j6fx1`zMd+v2)M;*hI&!i3}?9kRqd3K}5wJ^uK zWXwh^K0+xa{#HQn@@dTN_ODB|aT$Hjmmx>)hLgcw9$+5d(PsUW`TN<1aus-EMzUX< zk9gC&T{=vu8NRF!J`em{@|ZO78T3u!#J>KTt(3s3_upc8(~{w-M%tU899JD`QiR-F z`-tHmM5?F+QLj0EzXs9ftat(J08gUfJPVQDzDvJBlU~Eyh$wv532BHr!QAlWGr@K$ zw)*cb+>WCNX^_y7&j)du zsSJro;8)Jv%^?g;Jjb$PDOMe6qg>vhKlq1dU)mA*zW#+$zn(gJG*w%*{kF+d zp_xHEOmkb`$Zcj6G1~dv`Did3=B{4Wd@x@GlzgOD(?rPsx zHM}3pe+;Ivt@MUbDAio1Ud(`9~|V?-jMBPUg-Qs<}J3A?|tjHoRnX}vxriz?3lY|M{bvUO&hra_g*|He?^{pg&KRB!o#_D#Q&phX8V&e z=jf&d$%zB)p?4YW>Km=mq^9uzrga{f0!O38t?w`Un9LvB+uLg5;4jImm|7AJN zr964{ywjFjNQ^xpxczGTyXZ&JO}4yf+rQ==!?k|7+Y@KYg zz*#~_heB-Jc;T+KJPAkf2|oktp?=jfO`y;EX?M@h7Mmuxl06-ry{NrLi9!|H8b?tj{ScaK3+L1kXMIE3=IGT5u0l>kP?x1LivmNo z_dkWco!N&gFI>VH5yqUZVJdhj;d#U%dVgarH5UXv&U$2~pPd#k8gqOY{?c@QYVrkL z8T#$LS$!~vW+)O|fRj2|twib{pNJkY|Q2aJ_0zH;mZG6n|pW;v1lav=F zD~F}I)x?4NNhkrjbZaoeNsb$lE#b&%SWhy&yy__MNRiQq)&|HBxs@ryj!3}s{jl7D zi9jf&f*jhe4T6ZKxKL&-t0!~cT{j*bsZ*m3sgU{h;m_jDw2nBh5XZr3Pg@?Lo*KjS_V--Ga%fV}*ryK+8v z39zrS`Z@UK&|-fGrE1W}a@H+agiw(cGPJ1Pcv^Ele0oNxwdrjIx42WmjVQ=sbHsZZ zr$$QAHk*4R@53He0TaWBY@le>8%T>OCDHB~{hqiB44m!X8#m&X!_hTR@GTi_)d*eC zn;1jFPI|ffjn@>a{UFE9SXWp;pNw#{hpg}ewA+O&R_tAnkfW0YaGXf6HuB))EaGHG zD#{tsptLvld(UHV%+6%+7t=Fu>(;p<=VElbIsSescq0RTOG$D>5ugxIdwpsP1jruc zlQVr8Z5a#;N|?xdKdRJ9PrMfy)f^N8Us9Cx_|O}L-9gjcy}~GA<1381D2PPkmU!W=>MPw)q4SVl)JcCeGBo(ibS4 z7p^P}7{%D5XYTMY=1BS|L=D>05)%DaQ{DTK_X8h0!7A>kehNyNR9 zphdhuLI8ZLz09*U*yTv6@gIoZ9?-q)hMc+PW%L5%=a#fU`e0WLYRLxr3egw%I|0k? zI8wZ=4UqUoFpf&?QR6nPkK?&GySMS-_>T{LCX!LR7pm+JBlNqpXIs~5_%$S!}$aMXAuh_1IUGle|{@~62%x2$IlAWT?+tmd)#{yk`Ybw zaddJD_b>I%r+{FpH^Tc=4dskjn}j~HhgeP_5`Q&L3#gjKGp=(0v#Fqs*@HCsDQy} z4abkSO2Sm(JMRR`t6T$}RAp_F3HC&p#5Z>uwHGem9P@3Ba((*s^YpBrEAtZAVJ60r z(oPoHAEke;whsMOkimWjayYqc7>Bl*HtlUGtI0MhQNivGx1ZZMv1@}l>Bpw6RrsuU z<0@y`J@Fx(R5)_97ZnAJUL$UpVfLnxz`41}J$OQ(P@pd{%l6X=q3@U2^752Ag9evh z)ICv&gG`~XUo(&b$Uh=br4YIoRoVr>=Nc)zl{wu5K1cKIl|lYg1a87(}Db$|NjnY04y&`M_6_rvSk*sv5!g@R&j*7EW? z?dLP<-8xc$E}WhN0$DBWK>nAYB&5hLY(jbiv{Xzob3Y`K^z0)P|54(fbe%RFDrktk zh+zmwizHwx{2${cf#u91*DHJr2L93!P0;btC-A|ZCa<1dE&+nmKE8!W4f6CgXw!w* z9!vAJO(K;4&kNl{5E90QF{tU1RFaylH zkG|4}rJQTZY=Z0R`bCg|F~aNd1;d5EMOHAC_?W?Yiq9sUp~^+7Wn0|+ z&7_KAL}w5mt((~W++Nb0_ZBX41GR38ZJgU{A2@U{L(kf zaQNk!&B#Q|mF`*D~e_{Wr&U(W>@G^}N#3{p!xi|4?853)LU#w%w(B%_?(A3XKlBaL! zLA?74{x7)5*`wW$3)!^D>d*ma_Ll`AXP z-os=!#Yc=kozfXFUN*e8}edw30pPVNj$dzjWwZZ|q?qIxv!uwducb6E+EjqK2(h=vjb(tC)e%Km< zi%$6NM3i6X*%IPfPJxPds|{S|6>K_$(Fw@(58s)qHqP;j*lkTdX@KUTPFyDBXx3hl z$5WJq=e$<7iZt%C_{B~gWfmN~NEpF5bT5A;=FadRCvxS=AFYdQ?OsqIMve!4YH_@E z>@=QSZ9lZQWZX$_{d1p(bo6B$2xPbmILF-Yh#ebIyMEw@eN*`#cYo7|OXHN_d zzp)8)!=tUg^{ksj|8DlMP^ZiMA+Ly0>jKBw@1^~QI+$eV#QuJ5+t%3>HNVcTC@e7g zfcBvx<40mHQtTQw1M*>l7$aaROpj5y177nwQOW(x${QbNcCBSo#VsSh!|tglz91;6 zaM*3ikE&`Gr#(KFhVua)oFTS(&4g`tnYM2pQEOKaBAzfm(@005Z5U6Oa+FblPLc`- zi)c+1p%L4Um>I+J099}uDGxQHh_3^sE=Yx&!k zxFJiMx=isw_TAIpk z9f9B6?>~bZ-lzhHV;19}5Y>%13j1z(RhaGKpPM?WCqV&X0+S?lB zh*_5SkNoG5rOc~+R9bhC_2xNCSq>-Cx6YM)6KYrOB%49pb@`2GBIPlT`ayEm{vZPj zf+omyxB$mhmuwEamVHfeM3~3XT~qyqvF5QS@f*jcrVjj{PCsv{K8|oyMZAs3pXBYr zKQy;P86qTmCqkGW0{Rbi{inZ+JhpMm0*pdJzmx=Yco+6Q5{a%pO_F?&n48cc70~?x z#{dKOOll%~&Q|(c&hkO2BnxAvrg{mil`dYT&IImMBkVZAAHe~%xW{lk)CR!mRojjS zAJy8YjY|}}H}EFkB`PZzP5Sa;#{4yU+1L>7_x;}6dmP@JVP$=5{|a3G4`b+NAt>$h zxpVXy&=j?=hJbl%(C;|*UVp9<5>D^TUGPmn$1mcFVRlU>HSJ?Kta-7ks z$J5~#M4iQuh+F~%28D^AN&1r4@-R^7Gj#DYkLi$VM)0rgi$7K|h1nl}3&i)6gX~fV{{QIH^6T1qUbu70^ z{1C1+kIAETQ&%%Et!dH|Oz91zmlg~f``@~Ue8&pzy(df#C(PL}n*F{*zg&hAsSAEq zpzqjgIhN2ozAXW2a~fAE_8);%gF^w3n7I&Ycm)>HultIIHx-1mOWI5`l6*wI8fevZ zI7pmRcomP0QecC-Bx%ze+UKyMi~{?dPq!&3CSO}Ht-iWeBiHjOk?RU^QUy_smu|zL zcdnW*TKIvGB@oA=sC3R99$cF2MG;@UicY%fyb*7AcE}AefG~9M8KFl3V@^L)E{2Q3 zO=zc0Qn*FU+qt*Gi*^F(xDc8t_6!EeGRMb;0Q+eEHYb|1&*XInb9K#wp&|!KUhW#zEL$w&)YY1*FM$fP z_69HiRqrO`1cBVR@}CS#+dSqJ#2ozm4`k5V3;3dvl=r$w0+Z{h3Grula^2%a?0~x+Ra+muyS20kDAXlA?kkK*8wqL z5lTJo-XO<~Z3{mfm|j_36^OlZl+e3TlZwsHQTkl0h_Ji|{ylSJVEApjmg$({Yxwir zt)pA;Pee5u&dOu>KtUh-*&pzleljlDwwqpk*{GAR!9qBzK9z^}T~{_LSdojIe6B$$ zpnWXhsbMZthu|uD1NZ3lI{%z$U&*-c+dGZ2q6#&U-_!T{$K|T-;a!{#(%t^Md*Z+D z3fGyd+Xw}0tO8?&5xe#G9ZHohUlhN8!7SJDSv5*<$NJ@QOg>BHuvgd}`E_s1pkS|n z9=I;bFr{PSrV`S1lT{k~%xHOnN_=FgJ@-N$%NQ zUnq;*&4O-pWVYaatcn6u(r9AfKhKR~-iE|Dm18{UdvR=2Lgp*)G8lifG-YfHJZku= z#91Om@W}3&9<@Xg>|elg?$v*wg?jAE<1QewF3i;XN?*k6II&d4+r!wa&Ba;Y&r?Tr z9h8um!tROo+dLnfhy?bNx9i+Urvi5R&yc#$`_7_dJ8R8fOO|dXq!KI^02FC%%T`%u zWbw`%UrCgxos_!+SG)7?%qQ7x@!!U)eeg%~b(4;S*Ikz|ElXF8qqyicK_uB0_$&NT z4&Dr0%t9#X@w4H^w_KN6RNMtAv#kxj58b2&p7CD{Uc~?#8q6VwDb<-4Lt$M1<$2Bd zq?OA;!ab-*q9$j_5L0Z3B^;}@Cuay)*~lTzxxpR6NHTQzKuZ}L!@~1pXW4ht1w!M1 zWcMiC`Bn>akB$sz>_#L*(*pqBcrl6$Q9d>v`cnfE*2c(j6R_k`vx=`(UVhz_&))<-@w3y$ z8O>96i>*PoB6jQhL)*e6YbFtCBN;{$qb7&>(lKn$qPnXS77wA-GF0Jp5VFIB0~}jb z<4oD6lImtdtfe9?Aqy{~6v*llI`JRqWb4iieYaVci;PFtnZas%1$X)>+7_gms+2y< znzjX9NaS#QEKYc4$hB|??T=qM-<$}B-?!a{5UmexuR#sMqy_%A8ozdXSwmiH&StVP zf=?tWH@ySOb32|GkcLoiyA!InA3edYn_ghomn;QYj`-Q?V ziYFQ)wngIT1lKNJIjn}7bz3?uP|}YzH@mXDt8_C?r&5%b0%aV)ToYFfFS=3g2!Ki> z0jojuvx&~yCTjL-T7KlVbo`J-ph^>}Swdt4GOpR*`>4GHsa3(y68D?qWl)h|%>61K ze$t(weP^$u*>bKKPN=~;Y_k2#Mb&n-gmQJ0FQ&H#GZ04jk#9)dt2JK#;C5wqM~~PQZN>kh*LSOTof7M2A{C^==P;3X>3^{euMuXBahQHYQvCY+y5)CyLND?yJ zk2RP?vRB{Qr@37H&vuJi1t`;>nU0F8GF z=w$+uCRyRnkGDL(c-~JrT{VWUO`ANmhS|bJ@y<9QZQ5ZuVGAXDm-%+7+q{*Qo9K4t z!#s>V1;hsD?)P2ig(;R0R{dGaA~EZ2clf+m@vNHSdo%+z=0*YeGy_rw7Bc`LG)L$T zwb@D*{|t;`jn|Io><1}**wXtKV%N;+A5K*2&MHyXpj(W|u!eS#2U!nT@o!X)XokHgzDR)TjlzkxJ%T6(!} z=*bXZa(4q+_MZbY9i=6IGEW-M>aP}`&)zP7_HuD0(ybVHrddZZuuB zf?7N5t{(EuD{^R4|Gju*@XdH-_>QGH$vVm7upGdY;emGfvLooWm|~a)^JV9EW*hcA z@T9n<)Py)~<3+U8q+G=52_=}l?-OppPmRG$Fh*RE)K;(CHa~T%mNTjD?h?=^ZSS8E z_K@h{w_yo!hA{k4HlzXiyEr6Erm}qzYf_n$arOIs!Wa5CgN(kc9?V8F1o>!2ewqsF z0+PR{rE*z~B=(NeFO2M3^XkpCe?3vw;NSjbeJYIvC<@~`3*EwZLw-Q~y9E;-5~8SK z**`KG8at(8aUh`QwN~Vn+Qu&sBK-=Q0MRX?kj!%#f%Ez4)zjK7gf# z%bE}9LCfImAGuLIv>WlRRiGev+pM{eO0u%6i?E6yTbIIW*sLzgc4A(M6`wLJcWaKi21foWsUn+z?B8i`Vc2Og-XD1}e%i)+C zA}9HbFtnM#{57;6MK^cW&?z`3K+#$BM~y(>)iqu`;8`1h8)m_}s%7tZ z>R>ayBG@T0^UBGe;vS5%5KQAH^vajXHE@Lpo+ncqy|;B|<#1O%M>7l$8)l0uQzlt= z*LKI<0)}9iv>XQ|n0gChoC8dx*NUT7m zua$X!wIedLLT~~vuM`|z?Oh3{9E9pW6BE2I(5%w^{Oa?iWa55GgQ}J|!6JQKvXG9c znaK!*@>H2fqqxhJw|;*1L-f;MgN-rj9UP`A#cwMkRje(&(!C7zF3Y2yvm zoP5$gy@(_jOa@?9eU1%uRb~&;fAy?@NH-ar7r>Ml$}&mgBBuC_Y$VO<OoU)XQl%j0Gk|FF_hYUVu3e7`(v_^wICx%p{m|un#kV+(ic>=hay7I-u zZ^odYpmJImt{r_46#b#e^mQzQ0@ON4uJ|p@phZe|I4zKVkotdsfQ0A;=)%Ei7nABW zLvP!Rshs#bgf znHg`7b$KQ9!8ZztLd(GabtP4DjR#qgwKeTpV|!hkAfU%I=y-g*)@{I7iL-ix4+!Vs zz5r{chH<+Quu>2s11wz~tTgAtc>&DO*(D~`seDXSgiWh{Kh0NDdWCr z%({;^n<@Pw|ANLwcGK%xZ56zM0;;tmfx(>$fhBz>;=o?})VQ}zyhwN@W<5_{8dO&O#yg=}9*+U=p}3BJ()?PR;^G1{jmy!3q&Md~fglfxY~uvGuz;$7 zZDo6GiXNbN8N2uA+ZwF_oA`e!#M?S5792Ec_Uuw#L(=8tBX_(tq|bsE?<#aYD&aj- z#%TKjo-O+mUl&gP<(p8V0o-1O#pHE3VSr;Z3EnTXEhpLG4;D>T=5P{J+t#Lmq7-BO z584r~V+v7CeFjmrlc-8nu~n2t+zu*NKp74{(zF!d>dCYU)39Bb=ix+k7;ZB?w@jEO z(tONH4;lO(b4Lb`%s(EmA?Sb!r4EKfZ8F8>u&3}Yxzsz9z60cs>3{}UeWe*fUW`Cu z#E3Bp?DLn8dEbp0Hn)ZEqnoU@@~8dzLC{HidVYEOJfp`(skgQ4W>cKeqo zZ~&dWhl_iJa#2!S1-C89Q^1L361WiuNw$C0pA>|yosy^o71Pj~f{6|Um)?`tOSjik zW@F&o+24heBrURr{!1iNYijq@U>UOW$uJXcKyxbo(R{jpO(jrxdR(FQC-V^X-tCm{ zy8R?cOe^Z_yvp`-akK&_t0zmZ0ou9AIMf)-y~9L-z_e_FYeqDxB{3N^OsD%eub5#U zR}$*a+$mydx;s6PJQa4^$VP+dt|yg@=1#Z%g6stwzBy5z)Tnzz;omlx;eP*$r*a?8 z8kvJ+Mzq5$w{d!jNV}tuAtfU1xj#)8d!3{fhvB_^0Ov5$T){hPX7C{>CgJk*7F?6- z!dDX#w9v$TEXmQN}#0QVmxc)#*Srl7&d4v>|?p$#h6>JG6@{EBWue}!4K02ZADuTmh% z)rT#=fj2ELk_D+Rrrc$S-wLVz6;9()OahMd>T9dj3!n#vDy-L-IsURlb-Azkj7AiX zj?BVHSkzEy8)NBgkrr^3_ZSR zBED=>C=rbPs|?fO(Jy~)rMkb~*DM_g96A(!cvL}Wfc6Y6d{GU+epgU@$NR!a&%V7$ z)!%3hd zO8zoG2fo*-Xw)E_F=fwr&>RQkj6+49i26lkmo1p*dA-Nqp3vl9+D7o6yH|9*tH_kb z_H@!`u+=*3%?M}tGQxZfT!e1hnujyV)Y z22x^=NBUG~Rvy>AtD(!jjbKsyWb{vqtDIYkxE%02eZ9LxGrs$IC~ffPS?p9ekz$Mv zNg-S3^h0}v`<`(}b{65!ro_!lnc{AMtV7C@(*T;nRov_fHEepwin%d9P!~K@P4Hb{*Y5FBggq~gJla!cg#{&&sJ*YJBp+(WqY-s0koXf zKK{FjXWr;XGN7-(96>k46@tS^VAVL%zmHXxLs;9ZQ`#;MLnu|i)|ai-hWqt4di?G=jkQj*7O;s?cLp>w;S0|Z05sSIn{T?M4~Jn(+oBp!C$GZUYGg9SLwGF zPhA~0Tk1MQSV*guPPk%r;O%XbSR7Mc>B=N)fs7~iV-y#XpZ_wv^>w%VSxNHcGIwdS zb2g>IY7w0i03Fb_69+|LFr3^!3XNUyF%EHe8N1Q&qA$XQZh5d}G4nsJ>~;7$oOf3z zF-;XmJ>nn(=DRqX`}0hOq$L+boZ>|09?xV?+n2QaS@xBBUIAlh?!))HokB5DQ=xz- zFj9;$Yb!|qr+)s6!ucCNg8B^=>RxO7mN=DzaruUj07!vo@~WZ4Ha~^GQ{OjvuWc8T zah2$OD>w3HD9K9Uhn{cYBV+i)xBxG68Ys=DfDT|x>9aW-eDc%b6Sj-MUMkQC8n6LU$jxxh zRCq4q&d_bwB@k=|>+aRWGDBPpZa6s6`6|Ent!bDA)#a6T1efCOcztVSwr9me-i6M9 z8<_^eQu0|kB=b(&{GtgGZh7(66-{lEc}@e~%69%Z;F%oq*8)&K@Dm=|`*@^~#gsW3 z-sM)$h3L19PrzChs*TkMN~gG&dDH3~!cy-pPOJWhTd18QWuiw0G(~FX_}gUm%aKw= zyhMcDDY&z`1MsP>BiY?u0Z-=jHFa|?EH!gg+vu>0LOivV9KFYH0(4st<1IZv9MUtr z1in3j$MoJ8pxtT`ZXlH{654)JS5k|kEdc&56|=CFBIx(qqX-E|n+#gc+AA+Z?XTOP zTGE#=05@nYT^X_Bq)+X&-qRaq>&X=1%^QBvx~J^ycd(rp6ANF;DTz%0i}gW!vi6vy zPL92ICA!q3#yhK+Jb332KcB*?QffkZfLP05kP zHw?uDczRfV*=TS(3)q=X2^?JeN;>`!{6Xm&qtKH8yedqnuN+3)@dni+=Yiw{OMv z#bQ5&DKP*{5){DeWPr$!ZyH4Cx!?lNRW@!^_nQhXcn*w~=4@}b;wV!1DG*F2Sn-EE#_G{=HW z(QJZX%ZN!o&o5_HDiY7usm3Zeb|!!xr%Gu2c86~NBC<-d3kmMaOw;Vn%DX$Anqn9g zN)(kC0=E=)qNq^z-$NI=n>~@zEgc9qSU#ueKxqMAqiJlD>0%sN;17fMuL2|(I%=;7 zGN~bh)5K0Wka&^t3o=Cfxw*XlKX#{|m^Ms)=m&}L#fAT>!HHt+xh#fiCJmyhaKJz4svX2@%LVZSndCAxiDAy#GVHJ(po z+W?&mEgAA{x)YoI!LyYX@C+0Me0NIv#`q^DJ07;mAy+{LeYlk2^)yWW)&gM+iVL_6x|&I(Ze8G8@3`47MY#h(_rVm}jl1R!*W-OUl>TUec4U$zyD zS<}^ZnCIM#@&?W@iB;}mxt`U!{rE@_h|nazeE6|ZF-0i6GOKxGPKNfwf*z1q$t5wj zGmbi@gJED@*kX8p{A|OFNX6vqJM;S5mY*7P@1ClUfS;mY?qXfhzbwmzr*WV2FKOO# zFLVhW*CA4db(BgFY9CrNXZP`gN_RWjTfa8Akv0&bR0|;!6Y^qMGP>!Hky}hrOoSa2 zTk@L_LjNGmhQH<>(!ZJ&lQ(niiV28vhB4VLom|=AOgNbaM$W<^f7+G~+biyxSMAf> zU4aZlPWqQhCE3VAsSC@j(pO#cPd~a{H0h8yPL_C1eVWAnJmN{t=6&NOz*c^?dS{;1 z+#lI!*{|R8+$Z%%i_pG&`o9f%n%m$$rKZ1I4T-CkhiLdlXXd|A`=+#alwz*r?yB+%ge_|J#B^lzx5UiEI zv`b0SHK@k%CuCbZQ$&Hzz0g-objB!+d%}j!EBVQ3PKFBI4Za&bJyy0Ao-K3pqjl>Dx^*qdj`=@3z8wv4Nd#oTds(zEA2xj4TuNfvJBQOiZ{d z&ZqwpEz#XxFP!?&@kfJcKeaCm75cc@7dgTQ>dfBFqE3}Osff7FSK^-4N2quz0dK#3 z3|Er3B>o=_!4l29e3yf2rI%rWJC|ZwnkhnF=HrUj8_=YH?53wY<_WpaT83sQyK%OW z7KU-dW7UR)MxBphYkaX!K{0O@AE*@wf+75a!J}`mtPm*!v`SIJXhJfn(_B?`Uh&GP zkB`u^_a5)Xgg)BlOX01R--+Jd8C!u4`M((K>zC3W5YqEl)$v{)`CQg%P2cf+_ghl=@{l9 zf>8ZOwMWCyo(Ii%mR3Y0Oaq^nOfOlYWL~&1mgoGdM+7aQfjBFh#_1MylZqK>7!}_c zzwr1;p6I6Pv=VEwj}l4zwhcQ z*`ha&nsC@Y`=ORCt!oc*9fiOUNKD_T+=a_Kj@82|y-M&zo4Ya{`FL5Ld}c&aP;xW!N&LvN>VNrCa%o^QBgInzU#t)73L*;lwA`1P zMtUqh-|A`jOu=B)$QAQBG+|rDICqC7m~Oww#ft7qR?G#OwHXMaNE$!XPa8lVc=IZ| zs}DL+bN>NqHokTV#B~^Na-0QA@6fOXRI{<9&1pNx+~t?j*|vAN2^jLIY^3?|Z&Af{ zNR=a`1|(y?-XLZf)Kv4%Tz8TcGHh~WXygmH7NvV%bu0Y`aI{=G7zpK=uMTE_y+efn z`{6v_%#PuZjDR5Kk77L=q15gr4qLgR$_zOBJSnVo9dlCugPZPp8m}$xW&hZ%CmUtm z%~uZ$W!m%#eocZttE#Pq5m*j%S}M3??Rt8+0(l`O2#Sl4;-)a>Wv(DKhYP-fy85{N zmKL9Q>O!4ar|xgn^m{*Ov5MeDS5siCiv;1?k0+O;>IV)Cqe*Ce(iuzsI9HPem)CYk?w1 z9){Fgf}bzdz4X%B)UMoqi`Nm-5A5p%x9OU8U&`kGY1E zU+(_3()4icIeHL}e#4pKWif6NoKo-{`DNnyzTV}qjjq}IR)z08CnNiYVfcaKBQAkp z)e{y9Js4&JaX-rW^B|i0DWOeZ`yVUjash<>_FSK6Wl3 z_u6pJPG869;)W^AKXPE50al#v<3{F~8K{s}BkvI+X*$?)O}EP>7$RhnDV7#5!y=B&i+wG>Q+2hBacA8MbEh{}^427oRzeY`>vCN|J;bdhGC=T8s`+H#4xfxomxWNU=&Y< z8gj@-z0*PIMpUsq8>KPr;MaUFJAm|(+5#Dvi|M3vzPwn0BB#;7hfhoh4)yAC=MH@M zx?W@V;x`jSIDvv6=g0D9<#I|YE~N{c^Th~_i7yr&k2>h%!@FvvA|D6`_R9EF`10K< zB9VzN=qqqvP0x;(megZE4>CLgV5zvQ8p-hZ!1*P=uQ?@VNpso5%6$NvV#ydQhE|3J_iPIad=BV*DQ(nKeyArFvxx+HXl4tPdBKA%aon zFDI9{<1W82+SbC|;7N)~5+B-UChLI|p~tI>!^5)l)n_23>Z;+G4(d8g-VgMYnixFYE?*v+jK zXWgL@85{(KM2`Q0waA8|9aKFwCu{fo9j7A*C9;$~h*n7S#( z5^mH47fbL{;<6BS<~@xMS6wM&`f>66KCvLS2P0s5Rr!Z?;C`{icMr?(edB6&CS&NJ zW{&2=U*|5RQzL?IqR8K+TRV17u3|;{-Sa%&6)OBUG&@brQEDnYl@xkJI!;qEZ0L?i zSl<&br12z0@TetXh?I8QKw@|toFTrK!lXMG`Vpy)_`8jUPnaYoRjfW`sgRyG&b{*5 zVFS9_pSq5lqac^+#gaXhX))Wa+NNOpv{lB*m(w|R@W z_$aeBJ!)&k9_*jv-rS1h`MsPtza#mRuUaxvODq_ao->>?d*vG9hlgkgvjCA3*Zn14vw0^f65hkzC_O~D??$4Lsgm4PXG6QM2 z_96N&t=Z=3w=QC3*CJlhe*JexSNrFyj}^47{TkyDk?e5pD(ZQGCnq%2wZF7 zkT+hFrMr&6@7*rcJUD*;cKcankcm$y?Z6q|Q=91dcJx6<++~xgO-VT1!|a=b`-?%$ zJ@dj}rD`c}Fw&=`KR>HeO&vLENc8U6xhhWEx4#BhS4mQV1Tbg3i(N39qNaj&wO%W{ z@HDE$xfC8{BKRu`cnI?S>^C}HeqP4)>$1=I^fv~yijJU1& z(k33MQ=B~R+_>$1IN+L`?DYe2$3eh=^90cc>tf)P)`}>}JZNVT#{e4yzkZ3U|E!V^ogtMLz zmKR7K<`Ejv@o@IN;q#hU$rVJQhkE!Iq`yP4yGiLZl&M17k2-M9-BER5USvh;nT_x2 z4niT+{)Gfn>E8mKiUyKOWxzReh`JYTN*qfw8&8=3u?+n1rRQDSC#G0ct0GTPf@KneGg~?$sd-li{S8br1l9vB9i5E9RpfzL)`JQGi{@ zLP1Saw4nX-Gz~vSGz|O3K)0AOqdQYJ6-k!Iy{~_PoJB5~)tDAN>e=Mum)>_3SDPPs zj4~d46SgHSJ66zY@6`5Xep5#CI4n^e%`L9VHW2e1q9~3jvLP`jttp}Fk~{Incp$iC z1wSBYq3yOcye0Ne1$f?Q(Fc9IDCuU<%EfpKlIu|jV*&v zq^w-5m^|RJlGO(H*Aq#LJJ9%GgUVgeZGmYDYk#%=22oE>&AHD_9;+P9`W*qdCVXc(@_;=7M*i%kU4{eX%6djCIl7Ap0uE_T1To zgn4VF*4SzJLEbR{nV*(|3b6_s7n36-*jBfiwb6F5()QaM#&Enf)v008*Sv!b#@2W~ z$6RGN@djgllOG~IcMWwGVe_s7&z*+@%M*2TYIOJ67YaVBLfD0S2T~O`C!_^b-;&VZ zlxKfqXyJZx_$FnhXbZ$Jf=GLE5KpKblz;gyv6h~w-z*o7^HxBAZ(~XMuHb!gIYzJU zRLDzGe`_LaG(o^)1JmfO@UxYjphXrjVJDBTTr#cJ<(W@3;H5N zuq6>|@5`iun{+R$U?P!R{N{w#mOZVPn3IHT%#nX$NSD#qKzH9cu@H5mss(mjyBkxFqh z+~1uTXc@Q8KxcM&z4qxtcvQESr;`7VJ8560A3@N42`Ii{FTWEGnlaz0xh8I(oeOc{ON@JM@1@o4O_nqu>lmL3)0t)5x*BvD2#^)W5Z@^#rd75k= zPdBdL!@~Eqij>~-d91RtT?Q_`r4MRyqoyPRMJk%TLqzu-+F|GNxR-6*)o%|Zq~!Sd zOrFVs!7H@|Ym=yZEK+6VSy#8f&cqJQ*JRQnHP*`IzoP;F0ZeXW!Z7<~<%LTX5;yTd2Vm-{D@oTqC+?%oKZO$^ErVm12tv?imqSR0K8G z^drMdFXH%{#Zy(dK0Sl72o*v4A=2S<<}pU_g~ENd9FVWCX#ZG!lakA19oxNMEv8p) zJ(iXVp1`%=X)ih(TECfVuucr`@M8qRf*r>RRP{EDGTG|!N5&YX*Q|Z<1Xb%U>E^HhShW$eGYNRYT^2&T{-G|^)scO@;k=k&yK0O-dq!Ou|XJcnw-|7e;q?7;H(mSx?&# zQbc2sv(6o6DLb<-`>A{7Vh~JFTo20{^lO14*<9=yGSH%ClCByO08lDU-h)`Xp+a;I z$1^=C=C;2lj2OuP=kNFJ;zQ?WDa(p1qA%?e1xfWXSke>>6`zMJXa((HccRAf)WE9k^k8tWj7uiNB<)c2zY;U*#*AMVq~m z9vP7}9n7cSf>)dv+ZBqnJqli*A|Fuf6Sg3`*xu5=LtURy+{o!;extwM5aJ_46VYae zmp4(@{BB#-W#%Yej1rb4EA!!w$K(7!Txv?#EZN3o+Q3(G%VbK|q^R*A8Gk47Es zFYSqf(R3LqgCX=|)Pc}liBE3X?kGX+=(L2p-?yO^HXIF4^5A7eade1g=B?kpiT-!lS6N_}&3-f9NcmpCj(SB1I!Ir5#e zuvi2-TX!D z?NtMFS$mz!5@ctm6cdz~IazAXbh%p~%r0!@N(J8A2;(0LaJdE(OPMY&9@}uclqug^ zu5-|R{Qj-;R!EG_-{eV>AX%I_9u+uQmuO%d>+kF=VTX_vegN0mE#)&YDH!yt9zC7R zBYU69!`xPw0ORFoAcGpgoPYk<*6F#AX@w52b8*DGbRLhX!fAKy@ALt|(FB_xQ4&nC z`J(AV<8Ydrgw$;oU;eK3syg{ME~`;}XVdyav*JVIkRzs@lGAZm_icD%0*Wk%U0q}9 zb^Ne?qZh-!x>^Fm$21nJJ6nSu-i9)3r~6C91&nF1yB;0cQHnz9^EQsH3D+5Ex(Pea zEf-qZrLkGh)IXCkxS07SWLf8rc087AWfGE~8AAdeTKSqn+94Cjxg=P2hGYVXPVju2 zsfHzYXs71vbT}BM!;AMHK+T10r%wi4YP`}hTDAPnHsZdY{@p?i%V)M_LjruoU4hp! z822l;N$51c7hG;1cF=dw27^SgH@z`)GlJ@N*gv9dV{I?BFrtidN%39T!@KitIDF_y zv7L@T!wA*oDd96QISC7#_x#6eM0tZAp>~FNHOa-xqRnAKE1@q@GaD2#&n(O}VJEt3 zPEL2xYUA8Lbb_4HaYxoG1^-0owNZqWTzC(k$$mxW)xnX07yS(K+mt2P6;`j){+g~W zh9vDe@D&@_D!=~F(|aqkb_oRhsiC*uWLiqNC4^fJ{9KWx3N~y4zlSCCWW6zsAz44B zyVoL*uuj3`nT(L53)~I;hw$3U3h!XwJVY@aN>U^>#}t04TG6~TP-aVhD<7xdA5e7Z zmo;jQ=0_cLF@G|XHEFQHGrLK@E22zm)?3TlP;a=HdDM8>YflcFGDFq`alS@5n48|n z=3Oic-L@c79^bB+If$h$ zl(l*8jB8e&fMcuhQ{v8KI)+~b*XS7Z!mSVTH6IHhv85c$viVX(_~gaf-nZRR(3MKk z&+7Z5Huu=Q3uN(<-meiO`q=-z_F@pf2knroN?jT%%)=}I%`qcVKk>XfUKD~zO%Fsq$lUfTylDNIN=I5*=WxAfMN5dByq3nnKT0jWIkZ|KEenBY$X*B>G>)%?chf^r)5u?5`p)6wUGW{My>J zF8I7%<+BZ*XQ0Fswgm8Y$yzX!w%1sMy87ps=8gTH@X;rlbFRsz^$T?25qM=f`d!if za${TvQ)aG#@+f_d{X`pX`LD6NFC``ug(*w$YQ{OMFM4%3k9&LqTXgq;wbuEnTdF^J zrP<3eg{hI~6h8ol`X5mEYnHZH)GPz^uag*Dp6&Zsz$;@>?tx>)zvIsc{d;9)zahY1 zhpnZf{DVYMR3S_d`@8&gx1Jxl=&|lQi@&@NB1fypIT|UgXKM5W1eZ=h<}knbU;&5X zV@8%)H{lZ(YfK+2-U)SDvp&MvznPfKjGaS@fr%(6YOK2k5{+V5IkF^1u@63#Y$G<# z1dIa`c#|S4(#bgAjSK&L*R}0iCXVM$+jWA7fz9=XQ3P}PpeMIiZ z+}TFKf9%SS)jZHg$Zbx7bo#d7j+u;v?wacv{{gNupqQO~6Nz$M>D}tXbuvF@HIiK50DVb=xw<$kyMq9UFW+3FXXSjSq1=ev|krwLieU zzcr2dSt)PYZsZlfcOD)_Y)f6)MwWy2jo<0asvX#mKiq!m;7zwD7nm!;_2dXP{3n}n z^F)-`?I?@jFsQOCHA7c5bEzbq7t+P{-RhMz*bI+yJ-vF8Eg?-K$lAGnbhIukwaA7pyR>Be+A!Y)2JCjEQ;cW%MVu}{{2qlbhS=X zs%=hwN0jwU-26X)HyN<_^t$H+d0!Dje+QZ6U(&sW!P&!I8_*j~q)E$9eOMiHV||BF z{*HE|7pe^RdBY4^-D6&?Fw)EQrYj}ZIMgG)Pkis{X|40HiuH-fQghm7auaD`Do=)5 zV+ul3(6f$1kCV!~Rqs=7Bc3X{s8Q1kmY<-X(skeju_xTEX6 zKQL>J5)DVX8)MFHyteQ&5q_}1^`HpM)HrCg=mL%$)Xy!!$Sz#HBWiMdC`Cg+yt0k z`tMMjg3+7ndbB&TaVmAo+#|VrGvVz=7l}*(9~ZK;!Lwo(qq^FA8()t8T{>KXZhn8@ z)?I=OtA8{RzQ2(APrdo-0ObO+O~RtZE$xuSFU#HNcA8!HqI@5i0QeUf2A5oQTo?Ib zurHjVV`JTQg$n-1jICtNeg4lJ{#@PW%#l>8ho6<7&i6Jo#s&vMCV3&lMe zYH4mI{aNoxFAenQEkW7M`Fz{ozPUvqjT4xvT31)Y9IYdVQ(NJ|H>)9eQ&w@`+ejn! zQpz!OweZ|?+Tvh`|BM%XG&ydc-D!uH^yoXBery(8_4irYYa@vAbdGyI{(W^_?afC+ zk}RIu5GHRSMGr*KB?v@(Og~bRL zrRYU5Inl{BYH;2vQr*5-;ohVwzJQ3TJ zVFBvwh0wC8>`HswKNf+F|61C)iXcW~C5lF)It6a97c@iiL56u`UD`9FE%Pjyh$FaLvS^}MUeX`1W2eo2!-ZApsuFkIc@)JnFn6&j zL%@YrrSBa!hi$g3uUEsR#=53z4ZE!KxtzD1kl!kXbzYn z1&l2n24PxQc)3*^t)BNT@ApYAn_-9A^wN1v1<`h~C2!h74m2V@^D*IdV(djr{MIH2 zJE3z8Bo?xiqRf$YRlX5u#4Yp*^2Ooqxqj7AF(5V9+CRBmTw)Gc{3rnY8hBC-H?YmgFa!H|03`!W%YX} zwO1SOIvhtLY2~i6U&(dOjB0!gcCmpO`n`(&{ey&9D9-R6>Jd;XXHV^5n(KXG=31HP zBBz^FO{i2_?HiQM_|80mNyp1F5%KMNt==rka{lR}FSkhqmcga@r~AG0#tGDmjGnc8 z_xi->lW5EMh{n!*{tXYE1fQS+P?J2RNqWF<$L$I-b+4m3eD3ee!|6aSTv=QA3~5rFiY#x>Ar-Fpzx*& zf%3c6#7?xjuI7#s1t{m2!G=1Fpu zR`R``%$HY+zV8JxX~!RZ4Jh(;S1@)|Au}CHdY)(<`z2ynLUxVf2yLipHrG&gF_Hw? z?s`A>g@|^ykoHj;k?n#q7}NvB`c9P70@(3J28~LAnCWIqg=#NZ{p@5fIBu6Oi5TA7 z&J*YVm<&X{3`mJTd)@gD7_mfah~C!~rO5Pk+2D!~8K%pm2%24oWHf_j0|+=9{n-;2 zx?@1HRVPq&!aT<-EPxPGh8|eQX$d3-`*I*);WTF@k4^k4gH1`?E8pwP;;O_!(l^z) zDaWP57tR-IG|w?w{h>e3N+k#+Q+0F7Qxb`eD>e)sQPiCWut$ZyJQ5{UDmy9XtG>OW z=}l5Oo68M?7COn0!cdKNxA-d@=78lNT6jA%w^h8Brn3!nVjcfnw;SW+q>9{s?bJ-` zTE0f0ZBvQg19JQ#W{BlSM_-vWV~9}o7?tfg+ai=0rYE>#pt)zWRG_A@Sd>=Bc#%)xjAb&Z?#!y zyg6uOB@QTi5Pw*mT7V}+V*}lj7>0^Ggb+vvQWnK!31{iTD~F2klJ(#G-Ah;^SJu7U zDGAhfNi18O6%XE1MkKS?dxkYFs~s>x&i3-9yyCwL$7W~IU4gY9bX#sX zlyH^|#{{V)rZg=aIYHS6%B)Fon(M5ENr{rgOIW5e@%ieL>_2wNW66*%BFJ&C+_%Tx@mk;@b1x^OcQfW)m2IW#_A~JwokojV2Yekx9(L@7M z;^ZsfXky-HwlX1@rszr3T&?8({{UV=p}u*q%?ohSLPMTwqwuGTyhGr9OYPntocT`5 zZY-o@O3pr9a?YLW?I8!*6Y;$9?oeG zO7kxOd{pq?k6>u*WH#E2ITKjUK$&sDWgo{MqYe+&y)^x6^Gf)7l&0y_R%rdF4T`0R zrmE4BY4!|p#eDhuNql_K{{Ux?hm!ciQ?;J-;qMw~w;mkvR-0?9X?I`=?Jpu(p_6G0xkdzJFa#WMMr%wI=)$}u zh>tWA)opYvT{_dFDA1QI?W20?e=*+}{yqFi_~WnLYhE(dCHpMpJe?{DWEy?4$DM*W zOBE$@eqsv}NhZ8W%y6N38C6tdvk(9tq@FAE{{Z%f{{Vt@_@~1?Pq}g*KI)xq0c6c&!_y6NlJrO(CV!dbQ}8!C98oSVA0 zm-%!*4m?-jKOJ}$JvMI{XcyWvTErh|v3tmjYdlAIEnm3?oj z>30&^URp}CL2(0^<%&c(Rz_2mV0i>|uj<>!zYV+-@jt`zcxT62MYgf0xn@}I5HX5E zr38^T-ik#GcDfMDo(QkT&xk+pO^+XGUl26SBjNU^4gUaz^;um8p?$mU8h@7?$lq#M zqu9#9Hpc6;G>j01$R`|!#GD2;5~rBjoWA(A{(fD51E)6Nd}bC(rVHO*S~Pm!?0fri zYyKeDbl-&E71HZeyS*YEH&=owE^efei!*((RFknr0FhVa{w_Hmu1ZN9@n1B20{xi& z9ee`O;EO`FivIv!F_pa6Cxu1a<+)&|lN@>bs0qO8O?@l7Z&Uf#<2*@bZbg8UG$_Ptd1CFmw{h}-?GQ;=i@&HU)X#; z_@5pB0E+Z`tB)$$_C5Dny|YV&NYz=haS(os{;@+HrGQkgZT+#oZcTr|_qwmbFNU^S zhKa01!Yi$A@*{03#70T8ibW1X@~dz_1ZAS!Hb{Kh{0*7Y%vN2?cJ_TO-@nS&I_dF- z0$3-ElWN^9t4{@hQa~oa6qyv_OF>o+JAog^9BJD2p>5(R9LV~lS28W*xxgi+1Ubk69Fvj3&3@s2#~-$4 zrSZCT@DGi&Yl!?qX>QVgYFJymntbu6LpuWBW|e?Ex9;7P;gy+VK9>`Cd5p?vtVATI z1@Uxi<=gQ&GEOqX<~3a!k>qb~xBmcwbl`CB%jQ00_Bk-z2;olb#G~(jkHApoJ zhYOFk0Hl$X`|F0}lboFVh5J5#!8!a#KZs7V@#$l;(rjic2A69vl1ulA$lWZ8K1+50 zGKC@7ADAy9ziMfODLFl>#ldloYlgwR4LYxt?S0py=z3N7Q!TBESgF(Zmrjq>9RC1@ zd?VqH1?ceT9ud;5G<`bXK3%n(D-1xMrwRs1?oTJZL*ow__+P*t9=h>QjCCu0CrYvk zXS%s(b%F@Mau*a$11tcyORoe_wU@&s7Sw83)3_o{; zETe`n7nPwOKZ;NDZ2X#aGc5BL-cRmG`G4UDz)y+)030<@ruY|0d0xX*tu>;036rvm+blbD*QS41E!fYhz6;zOjymV-a{YSg18xqG6(MB2RH+g z42}&anF%bm){(4eU9q{yQOh<4N#xevwXW#i9Mfmkd_j9} zr&wC9`R=ZzSYe4rcXuIFeNBEN#awfY&R?*kxha1t-d?}uvHGS#!gzeTKH971so%G! z`JXC&(Lb@b!+(!9jj6{Jejo8neq+b0S;BzaGbzGdpaafL_+r@-4tB7~ufh+D-vquR zd=AxN(f%Q6O=j0mB3j)>M7DQEAf>#SMhjpt;gwHbI5qu7Xxi41;w>`UTF|c`zp%WG zWrF6~MV2XJag}9sC0G!02sO`m-{IGR{v>IC*q%Gk?6unL5y z_z$LA>Ux}Zz9I34nA1M9CAeR+7gC;GqOoHV1Y)JLx$;Ti?H{3WKN#@tFD|u6dr3)X zwrTmj<8Q~%_^gYDGn_U%_fq6bchRp-r_EosH|?$CF9qM+{4ekZmw%w$-Pp#{>eu$~ zZF3i%izaM{W=8@x+RxP_1a(^cZ`Ji}TVK>)Ro3tBbt~C$9^&q2o>s>LF66Pt*1xFV zjK2c@9{h9g-2NQ##+@9@(8&$1qa11W`x7BjUfNey*x6Mi5LK~;1aV)Ici-?#--$8& zMsE-4n$>~utCpTEUS@M~9k_4=Zvc!dTwwjxV1y0Zmpez9z+6v|;c(8CX!|OOYTmrx zJ^n|j#d#KIUlHvf+~D?0r_1#}KKvQre-->f@hz`}d>y9GscN^J#{?)_Y61gD=NL%W zBXgbxKqej#(cAS=;WmP9F$WetI0OGyHaz7gR&Mfg8 z6OXz`1AWrd^7!@^m#6P zS$wnW656%SpQAu?b$Q8E!NFMLEC?>#01g8YgYysVm-}ODUk9%4ei!&FNx6$%zOfR2 zZQUzJZ*G(2b7}?*vZ%@5xGZgs80Q~{-YD@Oh`eE}&*BdiYIl0RrE!lo>hflqSK2T# z%t_!3R-7Z@-Au|c#p7kmN$Qf`U*x|Z%&sKkIAEsmxK(mBrFRSY>;t`U{``RzzZ<|4oN5Zn!DlO4{M$&)vdG- z4cJ_2dX3}kg4WVDmRT6|3aSbGdJgy%`%U{W{{X>1G;ao!@ps1rwb%7mi4DGpRcF)e zi>_PF+zQ-Gr{&1~+TiWqkk$IeX`SVKJA+V)O5Y3?@A~T5`Q8UF$LDybH>-Qe?2n7T zX8!=#YsWts=3RTnt9Ri~3V|YtWQ@hAT*|DWL{Q9}L`LFdVc2a19FN@}hu;gl3-G%@ zgTsCW(e1RWNKsMD5rvXsx%uUAH%P>iNn&{>yZB*>-Xw+;SmTXCs;I)Mfq($)0IpwJ z@#lj4H>&Bn7Qe1(`gWtNOXO+xmp3uRHPoNoi5-JEpkDnSJ2mxi&}93DOuvkF}BT{}MZ>*#goSX^a%E2V*-EWfV359RjvQ`EJ4 z`;Ajqx|;U#>HNv(xRIrnM?7Vn$!0hJa(WC`Ti`#3-ZA)};yZr|co$2TRn%uIxgz0V zLcx5=U!q2&^2e~?6UBY?WB&jI%J`=zkIspx>pEm!7mg&C`)V-lB!J`?lGA?iDzM2d zAdhJb2|0{@w)`RZ8Sv-ej)=N1fb^Jc8KXqHyeb;*?UO$|(w~{mNyj)H2NnBfb>dC~ z2SF^l)00{yqF%q`epQxmMmrT<%NV(0o~`cv51Rh~W&Z%!f5CqTwCFYe03Tb~_?N}* z(a-105SH6ckra)Vp~pP2CPo<~0x^M)y*#x8Xy8+jQ;!^1;8~_+h|DX>zA9If`jtDjQ)B9t7!}H!<)p}c}sqx%vmgDNs=fg|) zYk2JaeAkiUzq2px$>V>7GTQi4$CA(DF9>UHlO0HX7(hxu!aT~T3w`WH%91jw&GGScm2iw4C8tGW8#m&KZhR}^aJ7l z037JHw}Loc8%wV(e$`{VV|}!Q6B{oCby7gw2;!qZ%=mVuZyM2{Uy>F{;Yp7J~8pPj68ed7V%e%b&JhkR$u_Tx|NjbEYL13HP@N#Z!T^Hwd8iz;a)>6lM?RuED%D@9H}kkjN|oJ z_ILf2J{|l7(Cr1at+uh^bXGSy^w!9?EY2{?5j%`g0l-ynQgTVJtm3X9;rZo|ry6Q^ zUhGxVj_UX6s{a6heC|P$=Mcxbu5LQ}&AX@NYwCSP9D+p$A9!SaE0*zx!|w!qLDN66 z{BNPyYnEen*sq{Tf^Np`rGY9=21=2fS6Xnm#c_H+#IJ|`A@LpWgfvePX_wmOoEY8d z*7tEqbpTQoi^Qy1c9H?e;G7!#I-j&s=c1mP-E{f%KUk^k`BgfW*G+n#jNczW;GLc| z@W+WX`Thx`>)Ix*bZyPG%z-D<8b)STl0rUvbxbLKTo5uFc1PWRvrp_v;2(n;Y&zeI zZLNGu;_1>kCb_wZWwx@BdD&==4lr?<2;4}>$3gckQq*-D9VWu#RMh8=*3!}^o_VEU zk;fchvokT^1p_44(_;9`HpO8U3Z+MGw%%T^`oB}>@VqCM;js@D2y5rE^gJWur|jAA z)8p2PYw)&i5MXs@F8ZG_0hEmG1Fguli+Eua!ayhTbtsDLcxABha;zXVv)~_t> zd?^&7R@UL0Gf8IhI-rUjoLuZcT^r<0!{%;9e&hIK#hw}Qu8{|bd?Tva>pEr5OgDBj zCBzbKI6GYl1cER}V_g9q>N0*U&2p&9jS1O3EcLVQliB|ORyE9ccQ4EwYSoR}>bm-# zFYssJKf}*~`T}U)4AY{D;p0TS)Lu0gccwhyrgZ~2$-o!`(!HE0pwg~u=W!T{xQed5 zc{xe$-siPL4^D+OPLi@iv5F|9Cl!gXw9?8t)E75)_Ez#;UC6OT5|?*!6;)JZl1~5u z-o6z0)B9J&YWIE__+aZam@!GL{pgUNz4md@$o}~|bDRU#y&ShI#AB+eIC$NjZfBce za1>p9Osu~X-u^HAd+@Kq+jg-MCZn&&fuPf(Z<#}Md5HUo`kuqM{K)u=@iWCA5+rmosF%IGx^FV#=2tHxx$0Q9N&M>; z{wd^&NRaGP&reGIr-%4E50|AW7r$_TAvYs25M3H1JF@iDD zu16XVl;8@&(e5RZ*A7{BF-@nZ2e0GSwb@aelPLonr)v8}1r?$35`50=#MG^=Zf0b| zz*PM6#sMs&wtH5tlMS2OW+UbWBq@Ga52>zXP3B)W*~`SChep~6?~r+}y^2HtjsYMY zNaGY(hOR+p4Xjgn5E9S0kYADs^d`H#BK}*gL+pwH@`elKmgDrUCu>|4F`$Tp_kqc; zPqBi|7zMYR95T&=@{BtibYq@#?OYh$Qt~|f98)`szm%0q<TE=8 z#4cS69DW@IUK-)IvALCyP2>UriAmhUpuo*^(tQqTJ0b;Cf<|la-YmFOTxaI)bJo7G z@GF(@wx1FyAyo9|iunV~yq+6rk~cqWU!6h7PLyJ)VXF-(Eb_9HcPbf_dQIvy)n$)|KgprowNC-D!2Ueq<_oC`?YT-%| z!Q<;+N2xi(epSZn{uH*pSGlnsT;n9D`>p;J>fy2W(xW&k;rkvs*#?{St9O={7IJx4 zF_dpYDw!D->cVPD=;3P4%!%7AfO=Bzk)%*C5CflF)in-61VXS*`aKMg{SLJBp`p?j#vv z7p+evviYkrg$xD{9Fb5=A>PahE6hqHn%CI3r zb}Rd*^sj@_{{V!lkG!AcO)A2Q ztbX%%^);5n#z{EOZgat@uC6}NK14eQAor>zmQjQ!Bd%-UIv=vZX{jtCX=CzsuOQ%O zo+(N`?Tx`B`Bg@Fp_E9h6aj_={{UKOmp2LXU=U7o+Ltk>DXhh1D{Ub-#&8ZY1v6)m zBX!A)w<-^%Q+2tLOu*>8V?FCdW{%nfOO^yLQax(ouG2G=ENBa{IV%dNk#WNu3dV02 zY0>J@XfjHTs5+Mjhbk0x$sOy$z99TkyRp8TMeyqza-$N=cyp1^dVOn#_+>5C*NtuE z4$V8qCLHoX$6EfW%ToHCEB^p2e!6US(++ zwm>9p6tel!clI^vzYebEp89)#D{7=;2R}Dn!o2dpSkJotIT}cibCto*^{$WM-;Cq% z-nDgmCZFcVa6l@<=1{ribJIEIxN}uGVWsSq+B&3jW5HreSC{Qq z>V&Kje8YEO&Dy^!sSI{1zpzn!-H+H|TQ0)kmL<({chma0>NCp?x-^n7MM08DzyiK^ z_<{RD=$;=*bpHSkFWc|AUEqNnXW!I{`HRON75rP{Mrf|Cn%>VmTflZM0m$4(E%?`! zcW{}WMUFVbe8q{zF=RrK^HHNaSDvRvxcx7wVq zad2min6b|}u6nt3TD4t8v5ic9D8<4|`m-&Z8kKq)l1--tMg>LExLxg`#E{dwJOPSA zsxVmKlYle*D0vw|h3dx@9I@_nIQf?*oJT7qVbT^D+A=FRTSpwJ%~_ip94xUB77Lu5vBz3iOE2!`V$Iz4s|=n{1QOrQ ztVUKv=*#TG6cR-7C*1%5d8o?uUezS%_$4{O;Aa&TxOZRP4Xutwb3hbsoW~O}A9RdT zpjJRndWuVVo_2XdDxNXBEA_`}S*KfUAq9(l-M`khi<3tr^m>j`QLw!#CWcvZB9O+% z_^5%I#^wZPps8S#I}zzktL{bJ^egNdRm2yz_bqLA0|$y|+iRnag-`PbgV2mu)SnLi zJwZ9Q(R^~t6!O294xob&Nt|QOAM(kG<3Duq(>Sk|-sLVXji;1ZqsCd+0|z|`>(Z;* zFWTBzfKmY}LBSQ{=GhJt9ZmaHY4mGz>iBnw@p)B6>|NmZi`e~3j##FRWr@|%fFYFR z5Pb;iPsp!?ejRxC#9t3v?Dx_e%@x>Y+21V)>6nT8ynhcvj@9;8hWu5b__I%4OGCL? zqAjt3`CT9Ttf!EFL)$g^7HN~#z|`fQtGd|!g2?mw8J!hUmXhlpsOa5G3O-#7^l%f5n5+hVR?45gi^&&ZexL*068S~1Fe1_{@0(j-^7he#agGs9|A?Fcsob5 z2HNUth-9+W?qx?IofwsSc$feb038#64op-@;Z-N?h1GHQu0s#?@HZ0S4sBVoQ}g4E^Ey^YH88r-A+z_;TCB z9tF~%(`>Dy5sivG*)U1nESTLRWS&@)U#;;ci8x#>K3uX&bst-475cv~y!_iE;B00h zT=?b51paQ{q4V$T+4~^;Jopo)TWYi0L#%kk?bR;zX@{Dq#fvi9EQ9w++jcjw9G=zb z9~%DvXHSKH9W=ML@xG4~nu^3@w$-2lEBkT-gxfF$nGu|@jrT{m?%SSg*>!ziOVBjS zooiaUips{$C!XTsW(gcoMx~wEj{quyNvz)sc*nxtInZr6f`$hvI!96 z1A~rL#&F2MGM*R%kzb0{&9e&FmzdP4CsO|a7iaoOqb9Er=B=Vn`*phGw+M|U4abBZ=oC{BQlWzhdtT=$4mX z6@DCQI$wru;fh%`4J%N%7S{7j(t@pKTlaAQ9D^9zfCUN%O8cAkko~Ou3HvkjJz6Myo#)gl>w)IOhJQXeo|N|>A|nS&y0T@Jah5S#Ye?HH@e>>ahNS_Vai)u zLFPNEB4-4R`%9mlg0A9lEA<}^@*L(^g_vQb?CE!U^LbmZuS4_xGR^YkN?2-eCG zyzlxSuHUjp>`mY=25DMf#D5!0r}(p9nPI)Mdue?83ldN=>?(Fn<8V8e6;c6QoHc%w zVR3*livDE20DjZo5x-}>aL=I5bu9|%9MQ@p*A{{Zmk;4g-x@ehr!u_^g&bnARcd37lt zD#spp2fMZvM)-tTDoU-4}D9P^5? zIh>vzish%ah>>y&~v@ytg0%Nlf9>o*fBNK^frJ^EIt$8mEUf zd(8u0wU)zC)8~@f=Gt+#NT!L3y0G>m85sw$rO|vtuXtBgxA6ytH4B|DQM8aeS2nV= zGOjv}$U(sz5zvkg6~7bkHWN69*C{m#^lNE(yKm}ak?}?|E1%WsUQz0v>(jCQb20ev@BT1q``;w;$TxCv1LV!(v ztR#sdtDy|60R$X^4{&Sw?>EV@nRRbri;cTJkI|p9WqGa}A4}Td-TSVq)RP#dmCI(7 zkOogm_}liI{jod^@FPo;Nz)~RT=8|nvfJs=NUoOb0Qs*96478VV22FDrySQd2QROV zuN*cSJkos*jBYlD5{s{hm7# zh^~st^2<`Sj`qT6?rr5=(aRHuM;@xn!5>QgRK6(uUh((FKNU~nza45)UEE69js<30 zTW}OEA%`U+`BVZx1+kNpUfKIQ{@0%oz5w1^>5$JRvEc@tUhhVo^RY-F&8&@+103$a z`51wb*1t@|cvXy!586|xrB|$adMDr1_-sFjSZU(DoZ-t)q`y{=-`|R#2tFVDWbo_w zYsdOrSN7mJf@}r3wUNdQkWGTow@l-PzzhiDzYhK+e#9Ol@W;m;8{uDvyh)?aZ{mNn zTxpWuU8FY=K?97(JZB8k`D__PYJ9*PpgR~3r@l1*0KrQ%r|{Hv9|P=X8pYf}rN7l7 zUoOeG$|N~qD`P(~Ax1hL-;#Qti@Z~>Ytne9S=DcEHHjw|Ci+{4Yk?}9t1Ow^r7$*a zBWME{#d`b;!@2ev6PG`;gp;yr`t)|Xw!eAj@n&0ii2NB#xY-tAF|Kw_pJD8 z>es-Z8!n{OG*Yo!S!)G%+*v9qRTJQEomlM!*zQruBxG0Z7LlvzTJE0?t)kxBYBpC3 zz8jlKQfOsdk(6{StTG75Jw<+P#W`MMg2lI)UCHasx_>{@q4wN8nP4$kb@dx0w_R8D z-1x8dw*9as{hwoh3+TE9-R8f4VfKqHLgEs-R4nXJZ+wF*E(zGZ@Cg~O!M_)H>&5>7 z5cS9LH;uK6&0|z&F1HsmGZ=R^{jjb=!hyL}j4F=8zp9@Tcvr)oG12ZcFBEB(x~`ve zjFQ{k#Ir_2sZ+xPgM-v^I~x2${i1(i{VU; zyAkjV@hofx=Gs93Gm`2n?mRxua*RA(3ee@M{?pd$ucniFD5pTKqWpwQ+yq8+~4DTP-J0NW}M6(y?U4!C&7_j9EtWy_b^5Xe55s zp)4{*e5Lz1{>nZA_ywgc-Kr+D;%k&owd(W6x!$?r-A+s?^AJLup5)in#jrDq_>L^b z<+!YTs^yU7hhEM1OQH4LIfTaH@R6sEpESC8FJ7QPt{cZc68r)1&c+Q-$J+IlgKKjr zf+?@2Sdru)oC1A^C!nvAKWZP_gTg-pWxMcahM--0#!Ct}k{o@G%*-ESPU=e*U8=?) z6qR6dEAYGGN5x+r{9N%Az9{&Cs>`b1Ny2VBVS#V~NMau(N^z1D44zGUd@bSrFEmQN zB3{mu+1pp>yLp~wdB#|56x|9Q{ZGMqv--BOzO>YBA-=GgC%3pok||?g$mqi=%0~nd z$Re#|0nL9npR;f6jpN^dHz!KD^E^Z0dCGa3ZPe~w4WU#utc*yD2r;t-KiSR*`qTD= z{{VuICH9uS9{33q(8+fJ`(CrCL*>hESdp^YEyQoY<=Z&j@_`Qgpzjs#w2gB^Hl+cON9HtQS%Gyb%e?>@Ach(1KV;9?x4|C_wB0|){wkWs#a<`6hC8ckxG_GT zXp*R9Ho%ri9C8Hz0L9gIgNpkz;t#_g1N>3&?A{mg#*=TU*vgF>i4b{^?#4_qV|-1( z`H+LfepAF5?n75Nje2-~XvMx~w>QhJyzO)D@@$_srT_2ZeMU>%a8FZ{t2JsyMG+VAHwZM&VLHuTmsL3 zD=p5O3qA^!!TU#+pD;U=x7|Ga$MkdH2kgJ_=imm76goYWI)=R9HRiW-61CKlZNU+- z1j87~!XX`b$t2g(aaR)X<|{6ZIK$dWuXRV=aDKMZ-L<*k@b*KVVR6n^agF`!>YwAU z;C^;~*Z%;wSBf<~QvU$qZ^EIdcy89l;(JX`Q;{Ch`ryRDO52>Qupo6{Ux#dbqz+H! zr-!^%;vWlmj@!ll9n`LDHLFR@%s0_W#&Iz@W-P?+-I5(!D(XtGDo8)kr|lK{DEvD8 zp>&09N+r@fLuirBZpzNxMFUroEzF11}Q)6TcOirsw0 zLXq(e%K}g*++qO-FSLGke$zj-&%_Up8r9a74A*)OgcP8IL<}QlzyzDJhcdA^I~bH) z0tRwx`V{adfP5qH-@|sE6!2b~2AQVXBBDnM0M1ke-y>smjzHy@mOagUt@}^@#Xbl4 z@ofGf(0rc__`(R7M|}hdRq7{sYuPK|b%a>DOjSbot8# z8{P$x7tH||Y=DOV0iC%A`X>FKzB%as0JH~yA@R16r(Wq~NY?jO_mMKHPP~y6xY;rl zD&dfkh5+HR2f#nGxBL@+4J!Bk5>JnB73`MR0j)GkIgFOBu|7lHNwrLY@IQ6{U8IK2 zc>QIt)9iH1NNhCgc&#mM(PN4j5o3-tQGlw70;mIk0OG5U^PHm_QAY&})K6Kh6}wxn zujWSvWqFnkoBGSN9?^QgTl6jfDb!LEftvi$A9yWsUO4ghhJGIClIy-Exe!2{Bx*NH zEB08>a>w)?O?jWi{{W5Je}HW#(zH)6Q1J;*-P@9Us0)49l>2qqIX>0-m*X!Hc(cY@ z)VkM^lDdxDMZ3kPUbXv{s;Ri_8Oz3VU5c3?%^| zuTzYSQ{lMD+(5wqsrAi%*_CBfGWuS^gl*^5pMhri#w#$Z?W@VZE{o6+nb&j&%kQ+F zYFI8IodA3@at?UrtIIS3UHot1bM>sdyHrNp&9IY^&sz0QF*}ytc(!y<#GmzNAY;~$ zM=F6SWs!)_CmeoNg$p&Lb2*GaX8YXl9C!R{R>DMUb22}gM>!)H{3`FD6))`#)*b+e z;DSn=jy-DH$1mB+VTp|HTphVT;ZOQsJSV3aCd{5*`rhlK!k&6^;`kawoPP4!+V6~5QUR1dt9tih8(!JUN z6qbT}ISi;HJJnk%SPp~UxN+Mf(98bU$huNei7xU4e>OWX#c}q!-K3M;SkE&}cN=A$ zRaG`IJ4YUaf@@<%y|+y&IV6fy^CT!^=0;LZIUR*mmr2nhyO#RO1YmMqp}{4GVg-57 zTArM?3FWyMF%T5?>}xhV75f~LY0|8C4APKtH~LnfSanNRMON~NDsrW9T<`XZ-Wjd! z-dlBryp$3TEq5sO{vq#OYSz%?Y}(PDMAM0yK`qV+01N^2_4?P=J^|d`6w~IE;IbV4 zb@67Rn$DcMT+oNPW_L0;JAlqO$6;S*_zGDi@Lrm3kVhj%OD1whC-ScoJfH3~?s_;C z?`U;58Ip2$7F-N|6fj6)EXV1)6y>&bB4pgk2Oyv2Rpo%(5G723)0+9n$4j2fmqt_6 z8YQy_a`}uh6miGUS0~%0bXvA!0Z8Zh3d6^>chJ!sHFvNMl#B;@QAoooMQeL%V=LIK z>-)gK^cd?-f+q8%X8WWJRAp5dZex?$q%w%myD(K9Njy|0?{Z@FGPSJ^%T|mf!T`(n zTy?J**0dY#B6bdep&W-&Pagi2?~KvN7v}(uah|o0cr9)rc%&j#ZmKyr`d6ig#!3%E znxzEwK71<{3xG0xI@D~e>-YYE^sc{L@T{?1eXmOep=IS5V5zPaQp(#i&Pc#Moom{q zUNcuXCrgxUQJZOGmPhj%PtXc*n7jFEhXiLMu&CpTM_}09f(=-e?*xO&QL%^4bAwxn zS*sO)d-kw{{a!ga=xJG_-I#V9edE;U@u;t0X#+TRQ^3NUQ{s{B;y}@69CG0D57WH^ zCtSkcK0bb*g;SC>`%4BmB=c4-OU`51rAKD=sn=kCz#IGq*c@9jpRop z3~l4|uRh1QuIF5~wp5soHm*n3xNYg{Tva|JWPktB{Ry^+pxv}CdSe{bhIZZ*DaWlg zIN2fEI}8ev*fPczPFuZx&ik|Qgl6i1i*tdR;Q>3&?BgcA4^v#)xL>`FKDFmJQall| zD-j{?EAej-Zn5!S-CwEwKj6eIlhpqJ8~l-AN)fj9runfmm6dV`#~d2X^T|a8cNXps zJXBFG*55M~FUqO(uY$WjVNukzC990IELn5FZgEkn%Oo4-E09X<3yz-RoYP4Xoz6C% zz+l#7mn`F8JZChm)Th}N?XJbCWefBalif*ndSH507{Jas`kIlxQI)MJy=-GsW8~i* z5BRF@f8G_<{286#@#VaVrT+kNGT?U%Fl&_Y!Ox1XbLuOt_%UqPemu9gR@hqJW4qI6 z2EVED`7f$cU)_)7e+Fl8<;T}g`Q`bu?C-NkB=aOjjDj|Y?VkN{Rp!v_q7qJIb!mQP zZZccg5sK5eg5DUjJhDj2xZ|FK>sS`{@#MON3vE*4AdVI0NY~eTy zfyl*6bWkuckaO1+R!tTeBfz>UpmMSgrc)TDWCPq?iv z?$r@FqZD(FE75E2Gv`ZCJaJA39cwh-&prC++8vylzM~+>wY3H09;AWL4|@7@;cx8Y zuU_3IweccxW{fi)l@SH{f~?LM_Q)d@$C~CCe7=jTikgpQ)%>?QvfQ^7kF2R-VC8+^ zMe5I!^`_0p3&<50I)k)INwI2pjrdhd$78EJLn?PAUYkXof6vK^&}8za`Y? z9@PMyd$OFAncwr8+T(Jze z$3Q z9G_ab3jY9S!t&fN?m+a;N$HBQ_S;q2v9|-y1EqP@Yc`#aZk7kx`_c)mZtzuPGK1Nb zaZ>neR`DK>sxGOg$Sfqczzm;uP!HUZGlo(){zM*W?Wn}5D-~?Qp!ECOvw%HM70p9& z1g|B?l2&;>@;h+9LE5~`RT_2ax^y!<)%;d$Zc;?lfX zG`g0W&xF5Yx_f0k1b}lQXZXJG?~&8sb8Z}+8`i_?9>?qX4;RA`YB;?m-=X$>6`!YS zmKwFJciL>pac?NfOBvj(J&3FM74pwVW_`MPTv{lifrw59Yrwube$ihJ{s3r6s`%34 zSGl_mwmN;Hi7sxP04x*|6#&S~NXov1o-4%v0BmpC(^LJPY^`(;4(Jy*TIRWI?_;Um zNB;m4t9DbhgB*sIR3A5%xZ@dM4SogqtHk~!@xHrt;(r%vGwQmv>u#Rv;fp&Ak`xY# zi=UNL1zy6xN#M=^shZmtikB@PE#LVc4aB@(3>%gg3$0GRU*>yv?Fsu%d`11BHJKY% zx|2`v;z%0LLWV+Zp#T*RCr>s~80FhI+(Ll31DW_M@$=$OzKi5Dkp44nBskECjP?x$^`=$4Y->Nk?CHrBT2WR_W6;Z;;*l1+48 zI{1C#Z-$!Z{3D(_(XO<6@}6SFa~WSTGmY`L-F46LDI&je{>jTJ&{<^T9$RS+{&wHy zeo*mQhqe0bXDhF@`hUPbqF>qj_O$fgd#xV%L{lB02_&~|%(KE6 zkO(`#!F61W{YXi#Vy|__b;GOGj_K{XW#SO+Q7`?lo;%<|{ip zNZMKFox(*FYBJ2sZ~z06PfGlj{ilE6q0mOM_&4@`xml!Tc<($y@#SMAjiSUc!yInR zuDBT+27Y1jkL@x0Tl`1(^{LCGNM-Q%h3zMqZS;s4E-n~NsOnPz_M7R(aPCI|P#v+L;TP45Gkq5n@#YgJg2UkwonFgj>u$bV zo*qTQxV+V>mHpU%H&%ZzSGKoWb>!N9qXd?>cG5IcK{^=aibObMbYK}-PXrN=YAgfD zujsS(mi>*q5Boq_uBm8Y@b8M?W|4JEdt@f>DYQp-w+$ljxP0zVIxynD3O+G>6!G`K zzZ1>j&l=cC6k_4wv(yn7S>G&R67V|#xNTraATZA(t#)v~9APtbsr)s1#r}`4=3&Gf zEs4upe-%IG{{Soc6{onA4#X;jjdDUL90SfzHT=dB9A!=qt$qIh z`z`+6o+S7?9lwHpBQ4K}d?v1m1i{){?Kut?3B8Y*0xmHSIZ!Z}&f4=lLdf%6!h-sY zuLZmojrrfNr|Na^hHHSq*S0D;y`|gw?tZ)cbN!mW8vK3Gj+x@CF>9#ARjoCPL`A-~ zAd*x$2Xt;3gCk{0JFpp5m+(vD=j_YzBldmPB)9Q3t6pexPGZyb%XrV(E|&$AHVYVL z+74oKoU3dui@g5;NZtzZw}k#7Xcqn`@NTJXt7(@x^6o8U5-vwoIQy(SgUIx)t!GBk zb^R{uMb~X&y|BKC;JCJ(cf~An0<5g3f(Sh;@=hP)9DYqdtI2ayPguU5ouA--iJb7x z6EvgkD85V49apr`(t2n;Q* zJD6CTsNA~kvm6N+6y%Z>M||KMaJU?!hOmxJsK>0Eyr1g*68RBd7nx^xl^J`_ve7T= z%=x3jo(u6`jvzi(lN);rces=!YFUX?1oR(?;AG$&38HA2$0#VuNGijS zN3DNS-v@pOd_VXl;g#^$f;7mWEQq(4?uu`3oim5HlmO0h%s~6Ale7x(AKHiZF7S89 zzdOY`G^?Qa(mQsJJHS8DFG_*st>OS%E3_z;K4nva90d#V7(W)Uv&Xs>Y4Xr+%3Xb0 z{{V-v^%$QIaWKOwv}C1Ita^Nz{P1l9kwyh~zY+cz_~YR>h#SK`JkqDq?#YnbnUsXN ziZ?0d!r#P&5>75cR2iN8^oJ>sr+2Ictl#42%Xz3V`s4x%p8*RU7~Su2_z= z+VS!Z63eor-V#3Z-MwF@lJrNdn(-DlGKyGgdDfmZoY2hD;^IYq` z4rUR??8aNoG#miY6Z1(5_lTYcDTDI2W&2P5-<~1y(_VNd;MCISz8qqUF0zJ5bsOaX zmsHNvZwoO40C|q9$yIhr{L8>2nwaBm4mhrQoDG#!$NvCm%9`h0KFjmBa_Mm-L(|5L}5ruq<69rNcQj||ZP(*U+|2bZ4bxO}G#^{iFx z(JfnNUR9P>$KnHstKvST;7i|$b{-zSpTjzI$qmk=u^%NGPK0EJbW?!t ztF;MG#1&Vhnd9E*2SN8&{RsPt3$*n*8f2 z@m7u_5h_^tdpJLZuAWl=05$&r1p7?S!|Jr@drUOBq_Oz-XDXX9)28ra?nn>;9W^3XtDOI3CgAQ zp*;lff=cI{Ff5Iam5vTSm@RWa*E}b!U3hcEwo~bvrIQ7@v${hZ(K9G;8BZfB!H2O1 zw>%N!zZHBo@jZu(d>eZ$mZzu49yUfHBus+QN~7+vrEVKaDrYYNmV z%T(9HFJE5k@jRS^i!oVb{;!BmRMWFhc=~^l{bW}y;{N~{_)oxECC7<8XR6w0+Fh>S zHtOEcg_Lhr)97eO1wuL)b-KuDp-kQ{AT!k%O~=YXw)kJ=-L$+&j_;Zi9> zXMJ3~S5xxmh|IH$Z#)}v>20I_UZ?1n>~;HE-28ab*TViM(r=T*`dlOJnw7n=Z8GTq z`DJM%8*S!t4(P}TSY;2*v3~If0=Qp?{{RoXA@Iw?mfjNZK9>fQrdt3+P_7J;Z3@yz z-+DO*!{~bp@^f0jXYHOq`HU)Iim#A zfgLN%{v-TZ@Q1@4HLo@MwKq(p+t|jy+)mwssXPx>I0C%*!cumMyE`X1!Y)#FM{}#{ z`gVn6M20eJMo<1e~Tk%)LtHUm{y3$)Sfh~|2x49;=HKS>9WPV_d zP7vccC$|}_3r$XUf;i=sq2v;C^JBW2=^3peNbRj~BW!%Cx%rQCU!viyLfneUADCj4 z>-*Im+2>lGn=Py>D8eZrUw9=>YNW;!h@I2{@~Grz>5BC^jlIU7D1EmGxebBbcKX+z z$!1nVG#0l25DK9Ls5$G5SGPkO34BL~iNQ@P#TnmZj#*+~o6Jw29hfhzP-~q+Nq>JF zaplUQxXEu!R@Lm+5vVaRS5i0prH?g?Ce+_kM7WJ7w+cyMHvWg4iu9G5Jk+hDA2Xzu zj+ZeV(jA~DVmtS!bjVCkOg4)(>$;x0#CP6uy#^WnMbJ&Lo*bE3eO+FiQ5Y;%WpILY-Ur@quyD4>r~wFVc# zmOwaPrrv74rM_KVZ5fZ2v2IiY?=>-Q-rhyDpHJQ#a#-Z!-nyZ&$oDj))c)CJB$|7} z_GrOeo!K}YF^c+^;i{y54%0R%8>7o|TPHrIzE0Gx5#koQe#=KWGA1$z`d8K84jx#1 zDW|-JWJZhSz`(B$IBVFuo`wabBhX#Pm*xfCgN$OcB)Rfs`#C2rODkg_dm6BlGr$*zD@e1k1<%U4R{?gBf?&C41=()aC8;aYLF$oM{8~* zWE0l8dhMk2IB`1w7^O6v_US?CS{O<#D;Y9Ly@}il0zgsJRN#4b>}EqJA9Y4^Q3;>S zlHNFWkU!QD$3BO>7N^N?Do5N;ZuM#xS0hw5NsuVUcOHrdHDca1WduT39mpL1R8P6k zsxVRRGgbb_D<~pWUt}2k>XyN&BF1Kwhv{B>;%^A9sLgq&ffdYo1~33wKE9uYcMWNH zv5_2~Ic^z2;Cu7USB}##i-pSP?v4rkYdRUl)zdPDQ5*fnISN4N zC?-UnL^jDinnJ-y!Ek+h^U|u=tQO4D$k+hjG31Y>OK_7|YUvr-?XGbi)e6{S)SBk9{=cPg)qe5NkN?vB z6^X==i4f&mk&s98=ANvi@Im@iBh7UolrPZcrh*Wwl7SmM;O8R0Ws>TC6&PKVwL;5h zBdOXz>*<>FbEiy&r+IwfNhdYvx~rA2n;@KE;8&Wgv%r%_ytHlebNn^<$B1_R(O=`2rYoj-qzk_l8C*6hd6{-gLh)cRj`bsqCmm{}LmleF6O4-9oVGBiv}eiQ zH~#>UruY5fU6;XEjeK zpL(tf4s+I*Gu!;O#jrRS9@NyDR*ZeCiFwU$&TwPZsMw1C0RB9r{{VKpYEhPAJxzNp z{{Z}X$?hw~?p(%LjP*6^Woqz}UC$M_eReGP4@7t~Di&;_y3pD;#c_B|#jJPfQHg z&F6XMCZ1)NZ0qXd)=S0O>iwRu$jdf039sDz5A29|f) z03A(IdoeVtGdqH~!iFJu^&KkWV9N{$Gkr$PAS-oVG9p>=w{HE5&$q9=X64bH4)QeRxMA{+m73Gr zM8S^T(wqXLIX?L5_|y^FEC5Jdm<~=`bxU%vM;iH1z9r7$owM#b8q1Ylk)=1uNL`%T~JT2k9t$f0Ke zy$^Bt*2+Fc6Ri3jCxiT9uXrlrDYPqswyh{@n@2f21L(wBLUOgejr!E%wdjb?Z)rkA!Pw_pbt~n)2$@9vAJ8@X&&BtX#>S9 zWUH<@1QE@A_HDzIaVgZItuyR+e~c^Oq^Q?!RNvv~ewYKATKSLR-^ZJ4*%L$YlX+J= zl_S3^vMEawfgl42M;!2{+rQGV&XXv3*fO4kSLZmq4Qw>sI?Bx-vtaSmaa7e>61rzI z;~xZgN5}d*=pHoDthK#8+i|zPg=Rq5BXaHsYhaPMVBnF^n*50PoBsd=*6}m^MThK&`_wm=d(f#MlU$f`zwea)cCV^!hoo8>Uc$p(2^In=2`&806A^fs(Vvh_| zDxbT-CcQJ_f5GpE{{R>KKK>o??voCoZc)}Lh}&~+WX9%#Ve`bt10_#58O|%zX}tPZ z1#Hh3R~7Zzl^dzv?*9NT-y^D?U514Zt40fxw?py6O8)?YUg~jtL{`~21Wu^SiT(=~_;3X+xVQc>YR`fn0{iOc@Vy^@I za-&0=u%L4XDPlUfz;^?^V<1(8{#wTiMlXT`c;aj6byR>>1+kg_jo|E%;v) z-XZX;v$K7YHEDGXHa6u|$0m4`85?MeFrX+AoZ=f@ugVz<(KK{L&Bd!`@V+3FHF2QiYtEu}08BreA-^KChL{fC5n zN0e=Z%_Zcg+x%bW_xC?A;ywuEZxxp>SpNW*`5%)X34S>INBAkN+v^?%)vhhz7g5~Z z>83^&SHeCDPZ&5Mm*p%lz+(r2kJ5h^f5A_*`@aRr7leE#b!p;Dl$te?>U5IY&9)CJ zP(p(-+)vAe#?W$ef%!S&j|=#N!J5_AhkR9GK9{LkOz=%@aJyVQWUFBGUYo0#-}>l$b{jp+viQo4DY&PlyLxsypBH%7#r{6j{{XhU zYpvgFm*q-}i}~7T3QlmjI6>^74{FBJG)-T`8jKn~gJW^2U)++g+SRP)yZB!5Ofvv=&h@U!5a zkp{WpxM0=1L3wDA+-uAUl4#flA|+>#LXr>*Di9lL=QtSU;*KiA@b(rEr%zY#q^z|3 zE%=*cIW8wTuKA^+_@woIhshtaC;Su2&q$Wf#{U2w65mpuNETQ;I5N>$%vo7Y%!=D4 zWk6M<89>DH*ap8=thDc%Rd4_Uu&K6e85Q#X0PR`v z^IZ4~;3U@kJ>jeUZue3%TETY$gKMYFv*eQG?U403RN09zM&+;PtY&4HXSmp@zG=zm zwoB8ZKW4&UaCtrz%9`bh-SvLGPigVj#V?273+y7)z94JXdL6yPh~aCe!%D@`Fo*$m z+Ff@Q1SmV3025tak*#R{BGWDPjSlwTP_VmCGT!FeMV1**@wr_IRvF`i)MCGs{{V>} z5`1a#Ys8ayyT@9LnvSD53zqpKi3dcAG4jTv(Lnd+y|eaf{ir@Bdu}M5&&u_e7v@+v-P2VaEcUQ4n@)`e^*2koiA}lv>W!mm-HeR?06-oS*JklQhV<_c zTxr)@OqzwXcMWBF5L`j#4Z&mx%%OnooQ{Mk$Q=vBlEzX+46FztxdeI|_}qhvvDr0u z>|Nv5FK_q{%^ypf@a_jRqNdfQ(o27zbNK^C$j(9ZBau(9*uUF*_D1+`rRX}h#$OET zcAgm25?L=b3p=?;x74Ja*KmMkjC1ZR%8|z2Yq10ZcMr!*spWba{*d9!`i5CQ0HnZc1Pi#40w;mdd;_n{3WK`>e{8*A{$#oGDz6! za5z;R*pQ>S75g#!F@M27G*1aM9y9pe4W_xPJWW4PBXuxiJ!dc7{n7BqfR3gpYGyOF2l8$j1t!B>M{f zed6yC@mRS_3zoi0TVH8E@?VeSe#ygp8A}Zq;&FDA`*dID`571f7x3n*qPK?iEiTUM zNhqvk+vP2klM*=yW78#ptib}@p zM>zur9>NG=kzW~<=XsV*T~?td7WHZEB>Shm`=3dV%P={e7fz+6Z7#Q8KhXX`@2s!% zoBNF~R=1YU^3}{!+s=&~u}Hm9(ST)P+!31ePlewXejxlD*MH$6_oDq{>zQ45R?2+LQ+5=9TRfMjW;~Q&1ZKrB%WH73%zHgT_D#2Y? zmR;mCF74QDp-;;H0JEq36P6zh>Tlvdj}2w3&21?A4wqvruWSkwVKlPlKQJ-erV}19 zhTM7&*rDPMO^nN>EPQ?bu(j{$<@q0&z6{JT*x0HsOX)7&zGvEhvj@lNzi4j)w~H=3 zJ9VH*1c?h7etTQCDtAk3F9p+bkU<$v0VLPbn5wq6&{@Q=TEvjVg9?$TRZ-{w;8A~d zC6$H6f2}eh-bQPOWK9ggIi~iubpb+4}Yt z6)DkCtv+cj-*e_4+C%ni_*?Ol!n(cZh^@5!7h6kXEM6utB28)JfJ4nDQ+z>J4jHkq zV;})voIkS%?2quT#{U2kwIA(|h#EhOth_IH-|;8YmPpO|CN@nWmm96gBr?UgWDLF- z1LEJcSM7E2JL3+$sCY(OdHgfsIA)Dw(PfWi%#8t7^Df zt@u}0v+);(?`G5W3ki~VCxN!QJZB^7GC2o>(2Q5^yf?ym#B!-nmMPPem%AP9r+wA- z(doZ)@|^EI%PM9nOAe%>^>=Fj04~2X`o_{cHK6DkJYF8rH0w(pG8Aai&fXM?0F9l) z%TO1bobk?iuYmsmX&=}R!e0|EA@L`|!DHbqWm;CewMaJDUwMSS?=CW;DT5R9uM7?t zaJcI~VvpKG;xETf4&GY$>%+GTE{upD#Au5Jo%FI6A|V#l3o<6s!ZRQ_+`=HpAMVWV z=D#C&jK?C(r4>6>i`LHR`t*16{Lj&Sh6^>otH$)?wCLBTWAlIZW&MFXG4SL5DvyiN zSa_peaU0uOG-gXH9n15fNeM~cIK#3GVBm9KqsbaZijjb-qY9&d0O$v`I1JLByw{zY zXE@BtyRVF!ll&K==yc_|ZW{qh*Q;xePdg? zj?&r%EYBgw&;iwn=m6x`!5<56;-nP674l zUjl2B-*}HypIg_blJ@%GhLTyBxi}<%Gr$8U0B|YqY($MHC0u|r)YD;0lqbyj$M>^? z`B&>XFAQPuDO7&+&&Tu5Da7OIa?^aY*H7~~`><`}X#*7t)QZlE;botClbxfIdskU? zXl^1gg$x^mlZ;n0eWFVO?@~qvPTp~j)$U54RCp?{HsZWUV=3x$`P6NCU>iiw76uCr z0LQ5!gniL(-ndcCM|j9>%xpJq#}$>Cvg~f?nv^n@g=ENVoMVDVf30emjl`?uOz24< z4nR2eHRo%oT4_$PPQoW6A)A3#Zgsep>N}}qx_68fi^)>R3wnTXMId_}B$`{!WL9;C z;4#U^1a_`M=ECyU>RXAIV-QtX6UhEm*IG$;1P!d}O}x#83I+fJryi!Zq_$m3*evU? z9&p?ceQVIeV<#4ro`;o)!YL-wJfUUOV~xwZ6~=cS-oR8*{kBlxNbe+Im2QWc-qh@6 zvW?}FGDeTJwsIHKHASuFxa#kY#+`Lo>Q%e&=1V9q_p zaaZjA&v>CO6tgj4Q#t#-ovJ^x!tb<^mgIZXvcn~&>0!$48CFra`;2$3E@asm8&RQz z$pe5&p=_KA^<5&(?3UM2xGZEY2dOQJSGUMSdeBERMyw9bKqImD z`d2j^%cxyXtIc^Oy~VKskdx4ijywJpvlzS5?8Dxa**_>lZQR|4F~K!<@(U|FL34Ww zd5odKOdMy_*6vLslc`GjJKa(UE@1u9g*VRXK@tPdH*sHOd^kaK;9V|qy9-2EuHeCN zdlAKag&mcoX5Rk*O_MQPsau5&m10M6Ut;_|5nA{!O|*(Vh_Mi%PXJ`si=0LK3%=*4 zmn+oN>86f03v$F^a5`kwi(pSkKaml7^wvH87a>*wEj>q1tF=$CL$h$v;XPWRX zne5AB4_4eG0G>TDT-GbC)!>p?mTiSY)LaN#4Amj?TWP4;DW@IdJpG?)8iDR{ge4B_JLiOjRQ8Fxrcb$>2 zC4)9e^b`QMA1U3tP6yogeiE8_vL9O^V>~}L=*jzl(ag`&5`g>OgaXfGmts6c%yk5qX4g>K{Cr_0;M81<^Q(8In+ z=ah!PA(xC+rlD&3bn;o=MdYfUp|=z7SW_%wQqeJGZ(8+ZCAv9Yj>YBsIDLhbz6n-- z_^ioZS!OZB*>2@JVxD4UfhTEH%FIG1P6aU~M{hDHp50%DW;h2P^cZT z)B0t9oq1b_k``ExC!rp-_{WG%Jjq-6SLV;@?*`*+j#vJ7`6D7&*=1$y38w8)(xzo~ zC1v4-RXN?pabE={&)1W^ixK0BiHP4$Qb5i#ie@*Qfl^hK^(;m+`Bk{%`EX8ZqJ{&C zu!Zqk!Wyfb)tX74CwS-nM$6Iu(5}PamA$ps#0zMymva4@;EZrX1>^OuKgS>PHQ#6Z z!}(Wf@HC?QLbrJaRV^-D{{X;KHT_YRul6cm;a`#b8=R}=75@OoFU_B4+{GQevzV2g zP-7iiKGfB*n&Q?7!!lzG*y~o(;yb=#Q-R+VE6H&f9$Z_q*ql@Ln%MKKh~u@E-ZhOU zB1Op=$Itcu0P9v%vs_OmXI;3!z!)4=$yVMeR(2VD=LeDxwKn9C<2fOKBR=)CS?A^| z)ES+9pRq}wO7Ux;#5OwQ*R)^rWhc<~uN#?_ceC)SNhi|1+_FwEZFfF)Hr_|0_zV6O zr-pnzW3Bv6x|_s$!`zk9^xU5<9OD2W!sM|d=ExQHzl^kx5O|-#QQ3G}f9#9d?SuIl zP3-}A*8NMNWMgIVVpHou027pYv*nE;opzQ-bhm&5RUN?O7#?G+uDnfKWSVNVN` zxnn8v!hY;|bd-|bZSMTKb~{hm@50x%-weD#;13+iV!pf7F5$UGmvp8l067Gc$rxZl?pr)ZuI@fV16Ti+7e+p}5PWu%$^0F39Iq@GSsHMMM-KHgZ2!l%1Yr^ua` zH>XwD=7xJEhK?TrPv54k&eh){zg~>%JQb)3rTH??a_c0Fd$}Z&UCsQ4TX_Ur ze5ac8--j`I&3uc<0yER=UW2GcEv@7&!tFJ_F3S?*9hHwMRJ{&nak1^-Rg4O$4jDt| ztxP4s+luU4A31xwfsIQRBA5v$K9w^z2TbOoW)a3rY60oet|l$$K)c+LxTyA}iru2i zJgq6tM&LLel?o*8zb_}fNovv%!azX>1RA3xhsafi-~c_nGw)QOx>av30)1dD=!XP)lj%&jgAAD%|nfq0TFFXws8qiCC<8k8bbwx>OO_>UUAGVIB`pO0)f& zVX&l*%t^?{v8_)mGlOtdwXk0mKgr>j(FqWlT?kx>7V5C zS`6bVa{2eANa8YLE&l*UInUuxUffw}dvhA5@_7Rs)(cHp()nZXnvFDQo8(+)zB#UP zOY20sc%s}HhECvlA6k}6^|(+}@OlreWnEiIE5<{V2-zbad)l*7>}$zwM%}|D(z+=C zHa79o6bL-!5v#K7;kM(NyBsdSEPR@l#@b74nUs=PmhLgynvXNF9MOlo%FP-=(Lk!l z%{zJIel@uSf-qoQF;UJdEr3g-BgpEHz(L3~`0Zt~l1X5F{$b7};{(#Ra7xE4>Nlz- zihnueRouIH&OeoAZ+I@^vvx9hnSU{{kOyx}RsHuS>0AXU%ZE|e`ijfdG}X9~B$n&T zWj`+%{5YC+ofg zXf0({ztI5!?XW!9Gsftck@Ao39z6)@UU_8#La#J(F&q*5mfuaZD6?Hk2g@3?gevYmSo9p=;}vj&oRnj`CUa1WoVG@` zud8X=C4}0Zt#b{XqzYPD=aFNL6nAD~0IxRqTk+4~f5tmo`@e_U5^Iw>s zvR~}6@OR<|kM%h<{{S25zAcZ!_wspKC7g0w{{U!3Zsk_yQ7V(;5b)oI0yFb z8RG1oj!`Ik9!SA^(Mi5${Px$+@;@ohczZIdnMaD1Xdn{plH4s(%{j&Jz;OA z*js`kwzP?1g*Ot%YA66@i6_*PTLj=|(!UkWaa1uFx!1$fRN&r`T|V>mEDaj8C`OfO zb4e{(;GZ6UXFrD@A9N#Q;|oz1rD+7Irs}a|Ph!#)Vs?gAl`^9!`54a9q?2FHhsF?Uzza|R{(v{`d9CM9b}n~4+yK*ti6=g z)Hk*I->0Jd&&RmiF~Q<-9JFcLYk6&dm+HV|8v391b^WfsB!15?Hj$>m7QNxSxjfl4 zo4Fb>9J@;Ix0nYpWNqBLSq|P55=rv!0(=qpx$!$nzSn#M;cM+@PSb(&?qE={mtIOn zT(6qoa#b=%exp84+m+cQLX3=2gK$GB!3VD(j2in~b{j6tV=NXS#&+H})jyW0_Ue4j z8n!b6<6f2G-p`}|05kfW_(Acr;qS)J4#(mT0_u@nA1zwy&6q9Ko1UpOS;~-m^dkfu z5ni%YB~p$9DHtGtLHsNEFz}C!{A2L{09#)W_#auk(KQuNpJ=m{qh^nAZH%RWw`YX@DkYbLej~J62a6dai7q5ymO7Ng)BIz3b>h#1cQENXrS0gN z=SiF_dU;uh5;P?g7LlY28 za7DR-1A)1TEZ)G7e#5{&GWV=ad&E8;@TQwKx8iM1(@E3q_W9RWvn9MKyzNlVPy0 zm387ZXNuxo$Ca}jG--i@BYe%C0)dnHNq2L5s9jBceKeOhHx6Z*X_(0*j@?!nlIrauXL5YsCJr{^uUhWDCVVdNC&gb3-FRciI$f>( zw9rc&a?2W-ty)o!HU}dy$QT9;2<1j=`PUc4Syc1y?GLMSMSPomC2gu8PP=ALW zKhtjY$t>V9MdCPP7*MB(?{=!7Y<<^WTOTmbC*-{s_FMP|b@BS^z&9Tkw9B6i#cq?u zwotb-Nibq9mhgX~7mws(>e*o4SY6L?zR?2eWB5Bquk)BgY$yd$eW!v6pZX?m}N^yhUi;vjaNQalD!w3!AXT<#JL z-eCX&6;D5wMJQW9CqHT6Z=)K3spM+N4Kab&z{wmXB z@f@S&vT49#<0s4A_k7F(b`zWqmFwz5FSUOl(p=ojboa7M?Qruo!^*^&8fR_oA#CKi zZ9afUu;-YdId zQ}>)l;jLT-4dUoj=22eqdVJUUGvjhjI)*P0@b&5Px3#tV`+Dqs=le%~$$t+&XnP}h zsUgyQO$j|>J>~fN_cFBIA5GD$E%dD>+AB*-NY*GIg#t$!D8N+%u&p2nsVX_8ju@Uo$=48UB5ae=y+U?uczBHQVd;6EXy?37CQ@Q1omywC? zS-Ca!`6mij#mCxK_hb2=A)ax492{Lb9`w$S$A1w#OYs9!I+nd~-dH$>);1ytLF}vC z`je7tl333Hg+|Hd$jZtI`68zzNoK`Z0 zjp0>nbJIa_KA&?1=}brt8yVUv9R}Le+8CNP44*DvKQQcS^~4vll0Yj71$Op-L!P3j zJne53VZU%8``nYe9lo{AV`S8ph4^QV#_6qQh%Kap0OgO(?VRGf2(9hqvBjJ3by5_& z9B%ELR|^%@uDGaBGJ($0Fl$)|u|X6BfZf5xAmv0h`i7#nO>2D&r4Pzjs49Oh-&%dm zm)k96dwVx88)I$&+&xBXXGPa+t`XBaWsSD5Zn*W$Y+Tw%>}9u*lQ17EISMi9k6P)W zRnvDz9wL&dqdcC|Mv~g$uWr=FUxkmH56-q;1a&@R9li0^yNm5I-|XIH>oD3lJ#cG+ zju}jBh{i<>aNeT5tUe)3@o^YAQ`F6ZSn^@CLJ`k06V69!^v^2!luZWQ2!J{6JxzI2 ziL6+8Yq?#9c<-O*Ubq)fZ7of#ZM>)fTwu4Ttnu#ZEgEJ20Bht?f+2Bm(D_PajRetT zkV|`Wim!8F{h;UjLECwiSfgWpN2wfR>0KNamP>gD+S?O^#8LFet!L^Mwz@^v+fqH| zXTaXXfKRtTE6;}Kr!9&%b~jIV6n8J1QaHB@l{o0x<$)gDR$iqG>azWU3ysegRg7`~ zKI0W~au2T4j@0|D(Ojcip#^0-%5Yeo%tdU~&PG(zwEN2&sXo;^ zO>8!mLxF?b3|HDd1%bce71Ohvd42uOe1ob^(MGXFZ1T**e%Qb+N47}M=U-L)D|^K7 z9+HI8j`TyQ|g*c^;VesW#PkjFLH75C!Nv*5?~S79w>E$Zn_7i@0Es8D+)@{AxLs zo$&z(st!ot15rs33bHXQ_`yA@l&nfB$>dwj=F1^M%Y{*ZtTEBEO&)VPLkwyVsb}B} zfBNd3{Fk?~iyK(cjAdDxL68UIQrs-Iq)f=tI2%DupnW=Gtvv+dw9 zJW#hKoL`9ay*F5Nw$yF0xZqqV!vXZ}E6X+C4UG~+GnaT-w#LXt)?UQdxn5tH3~*f8 z4ZFBJ^!3e4sOg%1p%ROTm&lLijYk}T(>3T(#JFg4%93ZzQd`}mVb(a(EyQ;$KVJ2f zBC(L*VN_!sje8Eg;Ym~eCR$i|VUc-+uif>P7H5>-VIj?4wMJV3qG~;KX z%jy7&86*G0b6?cicl(tu@UO`q!kNeX zj4S^Dj$fJgY&OjkIy>(NAUAqg?Z80~n8Yta+?-WwaukTAk)B*ZFbVBb=JWyhlluUk3}5k9ytvI=nthACzi+;4G|vcYdQXD<2c?}oF7J}od89zBKLMm8Nsp)FUXOCQvm#Po*LO9*ct2FrEq}FdBt^5c7=6zCAOzrYa!x%fjqw-8 zXz%q4eFwod(%n3hqDwhkfP~sk20@+Nbf~FSMAnua$%rkabDV+i+L;>59ne`Fe6TVZbAo^R)u58b@eP}~>$qU& z=}{YVBY@li&(^qVS8_dCcxc645ty3LMFp5eh;z`B_!^dJqYH@{e9qgB4L#gOSangr z=CLH3&AB7bRv6(5u1DjF%~a`QSD{8{lTXdvvy&lij6*IDrYb9<_ffo;U`Q7QK;r}4 zR2L|Bc`=Yvspp}nJkL7k$b{}HaKj{HiqYzhd1%WJtYoN-NX89Wca7gv$BXCrx146Zhlh83Uwlp%raWeo@M zDf!#;Jk=$4a#CwT0IQz|YdH`O-0EKfF)wo|Kx$#wW5A-o5@l5eJQpY zQ$lVd7lByef)2d(tfvJf%O#;uoS`mgyEEDT7W{kEY?d7(#)~z@u-c1xwT#Kz_ki;1 zJx4wHud=l5Lc>+Fi%`>UBf7SkcSkHt9T<9oIIqpw z>y?{HEgiG4Yl}I*Sc%3tPw zh|_kC{MQHIpNLu~jcpoBGS=5o9kW`@K#>oq!6BF2ab4&+uY;T=2g@a9eyL8RD#cC? z>7N~b(Vwx$!+(zpKD*_%hvLB*^7UIV7wrtFRy4ap2%0rJdE!L_3g8kC&7ZSx{1d-V z@b&yYGWg=q>UY+&$@Xmq77|#mmgQ1u+;cHt1EPGY0r}Lo%D<~5W|{yT*SkZCvsySe zfTvaaH}|F4K8W%vaP|gx#f_XVCHH#UxeOMz_K-(yYZ^lgN)kBG1yxlT00F=NcCRJ) zyYbWEx4|6@>mEJT;IooQJe4g(d)HT>Fm{{YASG5DY2yT2It>rk6f)vnIh zdAJeAx!V+JfFp@AcA|g*B%HBkKKBOjT(gOLdh_L^p0;VpzSimL&m)PrqB&ajG@*Kr zzKhq)`WyC+{jfeIe$ZDbVCVi3uMI#XMba#1lNT!{!~U500(L^bE)`I9+n#IYJp;sA z?}fFCZxDED`r}K~tny&Hw2`5jJ;Q5lbs&s_LG=}oze@Yd_8<5s@N43?jU=1mPl#>q zF0CSVw(z0zE#$g(Vyh!gb_r2epDq?Bb1w1#+TXh3p_gP?T%`(syNcN>x9jWGv+{aR z9h_sM&r|o0X5FpmevtmgzqDt?4~^OsQ25`%b1l7;4Ilg^7QjVgc{~9WqeR4h`SMxJ zM4)a0Ag{W>+&W|0vh?0sr@+-#?VqspZ8io5>+JzE*oBb96b z;g=x(cla;<35}&$d`i8#@t(HYkAiIGj%_;D7ct)6sU(3YA@MhXHH&Rt!caeX1A@`FIY|@mM&N=9umllZ!1b>D7ZGN;Z7JbsRFo6Z zuHEg~oLIb<0gtI1O<1Wlt^4}-M>pY52zY0~8U?q7yeFpH>3U6)?1IwS0z`oFNCT-S zl20V^YvKO@+TZp^_%q{uB7cb&nx&t^{Yu2|ywwUKSlyZARR-=cu(XBSYC1BxFNWMU zf%hki^?g@B@Xn>JXu3@L?e&$r%?6un=^WQiqarkE3aDe-C!T{OfPb3*0NOL-_r*Vr zUNX5pJ@d4y8w-c~4~PC_hQl8&8CU}IWA8}ACnsoCIInYn^1OaOG4|MM+>ScRTfe)l z{PaA|Eyv)nxJ5$|Pn(-at649fPnM_8H&I6g)b{bplcZ9-Vpz#2;TW#v+!W!lk&q5L ziej?^!NxFuDr=HmKvrgRfD{r*{43e-zh{4qdRKu{;!XbmgmoCS1~0nc$MeS$1wM3< zee{L54YComocbF6jjxKs(s5XtiY@4_(!26M6Qzf%SDV>Iw6)Ui$oix9eg6Q0fq3)b zW{`X_FSotfgSL8pr{nDgYYQz( z1e!V1_pwCn)+wEGBb7!sD;_h7@qDXrd-WCS!nHhZ7Ec9NbkdY;?ECgOXG(M`rCu97 zoz?#UQ~H+pH}E^*zrgpMp7HCJ!ARVgw6_x@DBBSp&XHr2>i+YgnwwCjUU_5 zd?bTMyz#Du1-LOZmn$5)L=dYG7HP1J8y%`6l2J^ZWmlX{xU465aCg_>?he6&!{89y z-QC^YAq01K5AFnaXK;e+zZ%q1)mh8y|C9ks__^Lj#UQV2LMFP6Qjw<+@lT`m}wS;L^xInua+z? zqwol{-@F%ygK(5d_&7zA2ImRGN{PJI$T z7p|2*mAusNC*q$KA{MOt8+pU67UH?x%t*1VWrv?MYoB0f_$62p7l%WjhA`yb-{yGb zc^hGbc_E6x^}%sCC}3ax%Gr$Bm49jG^rNx+J9#obn|AcLQ0J+rasYI|>?iC;VGrj= z;=Z0MPpK;*in3-V<-zNGBWwf+q1bsNc7OzOF@7FJ3o(Rw+27X@;y$e95ey}qv}1)j z0F8R%S!LkCpo;vWa9yYIHx zIz$ID&mC9lv|Yo)xF86l)%n{#DLlb{6nzSMfnNQf5A0bkhIa@T4=@tZ`7|>Xzlq46 zWXD7gC14XzvyUF*;|E|t_M!gP#>-6kZDJfn=S^?V;P04GIE9Of<-{q9!0kUe&MAEs zU-=&EA&V5rOJ*nlasJAWcVAdwSALOcUIwqL>X9w@^kv#>S!BzrdS82e^lk-&iyHhHqh3O94hQ?Oz`~*t zevYM^=NTN;;X3o2-><`k#K?YXWMxWemUv$o%K~_I zAxo5*1#5I={`8ZLYVcBF`Wb`q{4WgEv#Cu9sy7Rd{^NTYk2=xKiC`JA?FwM z0YVL3`8Ho z8DsN8eN?$L=|XjrOtDx0Xdmf~QaWw*0b^@R7!;OJPhtWNwpj)kqCTAT57#G%P`^aG z(xf-~b4mgjbg9tq3a&Vj9iE_ruf6L8hj(ai@#0vHmcViz&sglQ;(YOV4N}Aak@T^s zkOBd%Ge6NkT)&q6fVFO-Oa~&YwQ5QcSlfvKHOU&^Mra36r9?spX8hle4Ud441CvTh zS4pyS{U*Oe9_9A3J+R1G@F7@6t@-8grA!OaVH?4fY~~FG_igf1G^L);w97iN2Uf5d z)|EQ5S`3H)%gw$UAAU6_!#sYJ+Jy5|_EQ6?5^qCweE^?54@=V_e0$iJ!m14`8m3B{ z;| z)%_Mx8?IPrPO7=|M%qeU$jboX-=o7re?p$HR!duY^wx4Gi6B{5RHr}&iAMX{Ln6~d zD*)mF%fle|_B7T+FF`P3jLd~zPRw}I5MxOqrR9{l5I|YqO*E<`W><|;CdUfgDK02R zv_^Uo9Qsh{EJfdO`O|vkc1WnAp+%YvPHvq1nHbUhGTIUAtIP535u(%BMqO2HequH) zkbFc^R@tatcX+ABG(-ESBf&pyw-m;Q|g@kJ~D z?8>x%s04LGCpmRIoN#4;2uK_$1GEynV3v5x)tXnx8(L#5+PcLI#^K17=}-zpbuY9d zK2}_MC}UlsZ5|ASJW}PtJAvyb;`adt&OcfUK+j4UvzOi=f&^04d(%2b!#^`ca`p>~ zx%wEh2l(!zws8NHI|X)hN@XV1OEePS`#qFlo-V5wuW)&5>1&gI+fVp4VpM_ zLYJ@9%HK}>><9g6;<=1qeImU9b96TEU^$ouGVk_*;2%D%tQ(?U3ygb=5`f6kxAbEj0 zwA;Vy)dh6W_I17(c?yq(aCNd!VaS^bE-Pse7ew^@_-KtZEU!-&)OsaJO#0;z&@ot+ zrO`Ek!JxHMo+I^5mi#_}p~D1yb-KG?(;ox1qWIEn2 z_k0L_IY);Hcq$&bnG}itay6Zf^8_9cYlOZO3O#Q<)M%Ca^=bCeHnOvRcDY?Rg64}^`d;CCKM*g6oCb!ddQ&U zd02;{A3M49@aXFmpgO=m6ezIdAr(cFp)eAkm4;zikXt?D=eRe>$1A{cdcAhdAYzr} zIV(VToh4_A9uJLrbmC0#meWi;uvw%#FR*o7?NjPD~o5WSDA{rJ*(5C)J|fz z{z6B`z<*^pcu?Rb*H^vK6C)cS-W?*KbpVFkrm1i~abBC>Vh4{3v0fy zN%^Xh8w{m%m_g2Yh&x2ZDRtAYX#Xo22lj}jdlW*OdW%qOyxO;p^c=f5ao~3JLq2zW z)&KM6Q**@+7s6IUw;Ld!B*?vk>aTP;`5aLIGv0}Yb0)sxYeBl&`#Jx8Ti*8AM!%ZR ziSV@o$G06-swq*~7bSLr>bN@(B&kf_JILk;Dq)OssrP_tsgCLi?7*eiaWuQe!n~iB zbTE5wE3oTE+(C#PhlqoXkG1d7pHujLxSHf7#e6V_LR!K;Q7lJZpBF(xiMksRC;52> z#tR0MyoI-iydTb~{{h6g4>|$JtF4BCV{yBWqskvJt5MVVyt}YgUOj34MOxvA2=hzo zE#996WvcNZ^?MkkyW!+;>W7EtiTeDp{addsjcRcL_HqIi%Wj~joc-zg69JIwYse6T z`)->%4c=s1Y*(|(&d0)~Kf!JJ0gNH?Dl(yPn5lMWOQJw%RX3C0m1^PjQ+?8-y^-qb zb*{?twOMDUL*0pJsjtixi97Zue*)J}T}Hn2piA$17}YVHwmz1_Bt!2O;C z7V2pRWsZ8J_nz6^)pHxpc>~FhF7ePUgJb}{KAna8=25h4lAzsr(blX}#IsZqi z6G>Mqdc9QH2rNp&<{@!g25!$!|Jz&6DjJ_SQx~_Y3!8L;=N6nmM(jLRR^uN4 zGZMC5{T4Vg-qcE=(uTVh@VD91B)M00RLA$~gx@HR6jTD8P_Gu!%j-7Jm&2qdb4mv} z&{pXi*c-CvBwT-(7-PCPPf*27Ww_=&j@MlO{SBO#@pO_TMKiSJ(bLXu7u(SWV6)Ij_u-qmfwD-;RzbM;DsR|jD#{$0OYP|05L6mWh8;!$;Hl>K zbY|`~wVBa3lr=Ihtj$upA?L0&Sv>Cv0J2%4?ust)`7tAC33dQ?lIKf-^3LO%NnPtO zkS&^a-0&*{XNWcLgeyLPi@CrDlYx;Qb5FU=nwqHXKnf8%VVi-PE5cCz?pFe?CZ~(C zfA6^g&o_Ij*W+0)@;vf4L_b|K=jm0{b&E@14dv3!DUcNHbM&lpUI4MCJWvYPp$?PI z92UC+ntCr)V`EabC`F2~)hj&aZ&|Ucodg<#QueHDDVFSAVyomm#b)rmM#_Y7`T?I^jd+*k^FH_Bt1e!(T=_F>wb7@Mx*kiQdMU z`LbaznYr;^zzpYMuJ-5FIj@?bMDpt&3WiM7Tro1;D`tYYX)SWoI)>p*YC#RA_*3ga z?P%JO1aEuV7|^U`TSwwaf_AQHoo|R}ru-Z4e*YMw?@6rKZEkJvCT}LnDSpfkw|ieI zGNUfvgFdzJNBE`18Fp`bB4p@$WYX;_+YX8Zsc=uOc2QcvFFQSUn>3{f;W;f`oaap} zbi?L#f1it&IUmmXP17ZZ&(ED)%BhkNK2WVcyz0Y+aBfX?Zm`5l49Kofm?k2^Coshe z%6MnlcWqHRPsOQM*FFhufsqGOXjjaLS1iU8zyXTnNX?&4;$*b}23g12c_cE3jep%w z1OBF4cKq40?mlAzfXKoyUpjl1mL)r z+#!MIkjD?nArYFE6iBjI5u(Ujl}alY&@#yX(`1TY*28BI|EKdL5dM_v?O1KCg{ix< zK12NUjNT|mnL&%35XX;PVumrl`mlL>@nrF0|1M#j0Sw?sIN127lzi0M-(VgC66N|r zi8+3HzDCTEhmoA%`Xcx)_Bkbr*>8EJWbIxp(|Ihd@`{&mP$DPWLm}&cCa{|N@ zl!y(b#l%xm!oxgLd;I+i7TsE%^5%zs0h@*KCq0ZYQ@Pb>J~`*@hSiTEg)hpGqR?-* z?csh%6;?**Ekg&2HVf-`GE41O*2nk;?LF}W$UF~2>@cL-7&j0iLhPsT%kqKIi##~6 zviD=j>aU(u3pLMQPTX}&RBWGua%k@3LwG(ev){))3ASe(pZHS^+R-KrX|LwY;Qpa? zZ>rJZleJ4l8i`hgkwD$&*Mh>E_Ji_a(a=x2#Nk4e)~}b3U!AcVq>&qT=+B57T$=8m z#bH7h`bz_Ycld6Nh8-gW=;Hhwe~Cu4#!J(AKb&kK8ylqSGlEa4Rqz@821sr91J`8s z94}vK!;No^#FxfmdUTNCkV<>js!O(TDZt&F(+-2Oa)qv3-rpp;EgxReq_&f=10?#< zZ?QxMO`VZYB+;h$ge{hH@7f)C??g5B@~W{ShJ3A;ddypZmEODkNr&Tu|BTQ+*T}wz zz`dLdc9kaxxx3r$8(2(k$iy0oMGMWrcqh=KCpiS{A>j+&{z+2wBx~`!GQPQg@?Fqn zmR7BPN!i!2=kk@2&epSLWFFr^RyiT#-ErK9he;eTfwfky%c2?7CQ<%dka&+}Sl%a1jtYfQ*-3bI#ln>6qDd*h}tg(Ed&=O${sq!Ve zN;@?lw%emKP4zA% z(D1mWy)*c#?zi0UbRFshDacA?(@cv)7axTA8S|;+hjwU$<0p74Qg|o|XCRzWT{_*o zIi6Q{J4zC8Q4HOh9t?xYZq$Ygslf`ovUGkI()W`WMf&x*VtS&tHAKPAlB1E)CEzQ& zYBw$|cB+0dpnC6<=Q)J`93St!svfspJiX-Ym*TMU@SZ3d)_h6$XELN@F`;fM=hJiN z6H>pt2?_DT4+cVwm3y81@hL3~qwVjBnLUxgXA>CL?ww|1{iihv>o-vRQw{Zt(8tg_ z*XPWC0Js3mQx6g9o9}HqRLj@oUdOkTQzV1At{9X9e=`!eka>*x|LjK#R=-}{&L#M1 z-ja>*?y%qbgEQYUO6hZS@biO`TbOMBVrU&}62k)A>J4A8-j%>gFR%ebLs}8f(#g%v z#k*W7+6^w<+mu>HEGFn*DDT&f%(NCP{K)cw6UJc)^pmRFhLds+pzX~$X)kS3%Bf0= zrV#<$(2He}r~5n6(tJzFzhJrIZ7YF~G{T5{UyPW81rT=zfvH2)=|G6+hx=>%q2mi} zVrVOCG$F8jZ(GTp^$+Ltf-Rw4B1jRF96w=J0_h9;xXJLV>Gc!zm1wYNOW=@E;s$kB zV(9e-(gl~Kf9Vdk!j!>qkd9^+2PW#On#MbE@INW+20Mt8pZt%7PMw^gTZPqUII(ROV=}Y>BYOOcj-Vm>i6T) zPP|yb10ybh#3Bd~C;!NPX8TK~B~)D@_JZDdcm}~GbP*cm^x7AApp;QZDxf#{qQB|+a^^3Ja$OyK)F9N*t%E+~+p*z)SYYw)-AWAGa=N!zzKO51E_3%hAaY1S zP_SX2R_e;=>|bD_>*&n=BtD|w;)+2c@Fn|Q(%?eQ`PU2bge`!Erk~2Lel4n#&nTc* zKxC-LVRL@Yz4*0!}V8RM{=|!NC!U zOh%?FDEe0!>NxlEF(};(C%G-(dGG{fOJhr8B0|xZQGdS5!Sr4Bt}Y+g&sIa>aP{ z7af>N@DI+xF)FfJ!{(e({3*TA_98y&&sG2r{kEB4q41xFpJ=s0y5o=0&LICv!a%hn3EWC3} z2sTH5P9e`==$!aN>5|C&Q#dn0NotY#W_P0gttQhIowQy^zqgoOYTgHD44{$w;f$g+ zh7D})!pove!cZe%QG`>|c#elcm!rf(3FAmcJvJT^I|J6$#xaiw72zxt6-5q$F@707wTwY8mKPO-g;Ic5uX-L2q$>>uLRx*<*Yd-t0bnlv}wTj#pN z>n(p>^Rpo{d4;*fJYg=aK#?h<2yI-6J)zY?f{H=dzmia8n#xd`Gv_sub2m-HgYEjR z?MMCdCSRRts|R5gnI%hLXtI6*tcQwY(O^^*YPy0ARq>&Qrsv z50(UHHqL^*W9#v-{n(@zSjJ|KiKVWm!d$I1z-h=C|Mbh1Zofr{1uL!GC=j z85J*_mLUCM#bIWAm9N{u;CPHJ+41%e?e-aRCb1({elP=TW9tD~-lc;!FO23o%#PdS zWWVJAdm~+*&*4H7OL5QkNli2YGUA~#3iv{Tt`i7Cu&06A*^t4A3;A+XC~r_>0Yg2k zKTX@W%=o1r^;d!~AdW&39U@008)|<4#vA38K8^drg zFLvmiNu$5{rgD=Kd6bO(o1)M{3z@`w-smcF(PE}q+;qLfF|4rRV(PM$&(mFO< zzNloncnd8zV^YGCTKrnCAW)YUFei$=dNQ(KmC~d1Jwbf2EzRUbUCpH!Lgs| zCT%DqyL8LbS_t+zLyX=-2Fl)yb4&qRf>7_Qem;b!MV$whm}i))*@_$r<4*9=z)J9qrAn7RtjYk~em#g*smGj0qq zLj*HY@+&~0EfG$> zRfL>O)x-#@x#Y+W4kc*1D#ujR0tzcrE#CLuLxnAf>qv~c)TB9R9G zq&Tdvbx*;+zT<_0rTYIAXFeCBEwslmnfjh{UMje9Oi=)%&>rnNPcGDx9Wv$6(q}pF zS&@rL59iKZLXuqGo0Jo&qfiSo`(N_)T_fyAVyjix*(dA31_ z%crSB$Br_qsV8gbiUdX6i6t#a2MTv^uQ2oGAJAja7E#K!mEgVwW9bo8Sv~CRVa9u) zkd=bJH99T6-RwkG@teGXUd{}w=}w6s`<*B=lidK&MC&3xPFdWQ!PB9@Cl8;Se-R=r zETgM($uF4q0ctOwq3}|ZL+RYkv;Pulm$MY}heXW3Yd?$Y9RCZLwpanL>7MX}BQ9e* zxSi~WW-{y{P65BhcP)l2zNe}HX`ijWXl(S-#xI#g%(JfFWto!0Tg1XAw8sF2^0nMO z?F}_d?|LCRUPl@SCzn|X`r|_(>ouKvx;fI@(p$s&ucdyKjbksq%LJD6uE;-5PGH!c zuQX>?H|tX53bbMD!wrK@%DFwB7Le|Eoo?NX2dg7iu;{(DC~r;T1YlpY--^h#$NmEt z)(SD25=HgIQjQXl5wx^FT|GN=&7sp3$61K~2T1&KVU%Yn#IBCwljZIe zAaem099?qufJBWgcZ)@aYy4d${`Sa~ZdnD8H?}Xeytk|i*KR3rVb+NcjqX(=2YC~t zqv^JmodX40({v;91@!fP6cwj%c50K|FIH5WtxiZgpk7EP_NK(87fgdyK1u`k^1J#Y zJ{J#JgTJy=m-p1%y?96ab{jLsA!&(TP>3l{p7>NHLd0QkQ#wtAw|#m1VG02uJ;qWs zW6{Rsz#Re=3~5Av{offs{&%U*VKUS{jzQ;|7egc?#Fsjqu(^uCuXij8#1l_Z=)b%B zpI?6Z-q}9xVBWHnG8%JgdD)dO`#c-^z7^&`y;-ANI@h-gETvO?~1+C)Y(O(|jjI zQD>A}3LddQk~Bo1jTo%w2lZ==9C<6*?I+}EEUZ7jw~Gpw=9)GTzu34s^UNUdbSRa|;me$U2 zfz5KHMczcrtuvEo+k;rDH&`Gr6G<)l{sa2Ah0_1yW+_t`N@8Z{|jjkK0N96y2~#cFfG zgr9|Y!~T-MrrXTuD`OFlWG>0eH^Cn|;wd|t)6K@RItX{8a$-uWWObFq&a}fs8g9_P zNOwZdj3!va{45W4Bj1)psBVDup2g>Edta|;tfp8b?VJ!$!-C}7#K+`6@Rs}c%0xt$ z0`S`->|3;J{fwAMzvn!-d_mYkur_H&x|0jx0S`mZ3TPb>8w{+E@{&Eqa6Tocw$*LL zf_w4CKd>VB+F6H;1L}dqDGI;4GQni(^MQN6$Y4djcsVNPj^*%_TJHA~>QgJ?hT2fc zwyIcYq^Lq&-IZv#T#@hpap{gWg+LT~*>w#e_||A4gBs-nG#ndG-M&({k+QCy^s@Bm zlYi`d#5Zt&OGdH_qw>WP+H4w z{w2Bz&Y-F2C*v#_n?Vz>%a)8W#0BXP1ku5q#9Ry>+`VxJHs377qyzn57=rC$a+WUsUG)vIcn$GW+Wy+Xt}NzhRENByJ=r`$ zTe~EAJkq6!W3QSKAdXLd$M{V3`Ss)0^l&$^N7%Xa<#Y5zn>BD0Wluq;&J> zEQ?bI!5jK$&5&XBTQTzvsZA7nsf<;e3suXb8VBvYIZdNEq#qPwi3r&Sa9DE%B75gp zq#!Xhg+@CF1m6P?^gYO;>tPeNsUV}+ST$++g^_KFQ%QBeGxil;)43n5_sGN9P6igR zEAt<#yP~H`uFh&*X>WIaEN*(N{n5>rJv1>kSQ_#(1)~yUr?iHLpcwNqfbfe2?YL^jqmZsFa0w@0qPsUT%>SUu)i&m@U7)yAUJW>Q9GM_=THdO+c z+m$Y&h-ee5`4bDgP;CcHyBhnKTKmYYqsmU~T$R@-w!>0{b1dix;eeL0k6~jGOU83= z(RhhWm19ih?FqW?CLDnbf2WGFs+}l0G&<4)egCj*5hwb%%2EBg9{?gd!JoaU2{(GL zNVqm9mAHwtG_!!D`1e4i-POo$KTn)-P)3F$mdhKB_$mh;_VzxdW6ORPC}+%dJi_CP zJaT5mk%}wf{|p|xD}ixQKz8zOW;3WxEqD-Tn#~PC=tV*qSdlJfKIG8b7}onaAY=;P zIH%i6oH~a%LQZP;1L={NOlj#0dt*x%%ZYK)$G^;DQB`fwp9KRT{!ryzX4Zmm<>SRf zt>9*?0SA@Wr7B)Z?1YVit8wM`Ox)WmDyGt=@}(n@@Dd+63!l*2A`4S-pzv{_oBSHXu>HwGtB0g7F}to# z^rEqJY8;L8ESiNqL7T;z{?@cv!n6V*3qfh>36M0#3)HO$$v0_xNV-t6rXF`pr3mbM zjaaUqJqgIkl@4bjX5Vp0Sgu*d%tv^e_7bMgw0M|UJ=NWnwD$@8Rn%S?y<90DmujB{ zJIO+d30HKjIY%hBW=PlB<4^u!T>CCpy2p#iQQaSUdO|!)BOaf`z{d6z5Vfyo9JoU@ zwZq@`_uJN_(~qOaibpJL>G29p>Pc-g`}*nt!U_Bv!wmADlcv<+r>M#}_rjNyD=WYb zNzf&Angek{YzwF_;YE{c_a^l0$_GkhDcV{N=jyHCjH+hZH zQ)wq|;Zs&e&`e412S0!che^Q(PN1Dddn>pbsd5@NFL%4EyE*;mwGGTIl zmU5Oo-LPlHW)5~cXYxRUVK_b#mEjb9pdPOfgta6;aCl$k>4}@9YtMucMfFYEzY|oU z+dHM}H^IO?hMux1-!Wn5S#|$<;Wj~`%wUuqnZAUkHTO_~t>8_$ImOV0j*s(Mz7l&S z&NE?C73k4(zXVa_5j`_fB#B1b#YVw3-}esJE6}}HtYK{SRp7_%K5?W8e)lAhS2UU; zPAkctT#V@V$J~wLX5emQZE!eg10x)?*0TNs6zj;I#wQOTBQQVI*d;p{T0IF32|<;7 zyY#nSX>a#ruZcQ>a@y>yUi~{uu9HD|i>i}G>@RFR=1d8*m^Dc9FAGk2GU(^Yg>Glu z=B2M2FWG2*@Rg?hwb29|hcuW?O=Rx@I~6yDfcAX>>z;V;L#~#v?-#>B}v*CYEvbAGFY?HR)|CLcv^MK|5!;$_(JksY2=XoQp0$ZXK$%|a(I?^ z4Dshp=_LoxfeCTJINA+1f@#Me#+-qg}AOt?L1V)gM+^3XJ7K4)8KakTw0M&o1 zA8fxKenXys^+oaZ^2*~hL?Rh@CC&uBLSRatDg(NPfdvLEbnrR0TvE`Bu+9F@o8P27%LKea;aT^?!bGAIgDKqvtMHR5E^hN(=Y$|dr*!E2NGvy3Z!~5;$CH||Kty_d;1ti zOMX$!)@ElAaB`iszN^zP&53o25#j`lT}KEjx08Vv80y#d`GD1!-}rd&*~0> zN&E=%;ex)X0>Kvsd6+%HDQZZl_+J(}dw+cEW+(=r0_ti6mu~Ot_sT&Q|5r?6t9R^y z%wPol7jcDeU+d9@t55HCLk!Dj1s@0&V$@v4U=!e}pGXuv-{NV9t8rJ_dDReV54`ao z5IGGp!uR}@Rn}d73ckbVnK>9=of&4Ay_hU28Vih$m=XXjhztMpY$=%|I;PvqZr~#} zd-_oE4(h^=#$spU&7yAsJPP-rx)>|$)V-sijt(A$jfw2 z;xKM?f`aMHZfuC12ee3#Bs{f)8IT&>`EN{Qxnx{1dEIrRU&M+gjU4l0B$71M<1|t!hR9wI% zssfLmsFEviO?G;L6D4Z^`gMtCWdcO7ymn+tHuM=t*d3<^P{Y1YNJqBKQmuoZ4%>UP zs26V0xmVsLx!~07##Je)sX3p|~Nh$E*6xpWRD z{<^ej%{@n#o^Y$WL>1SaFW5hfv)@nPqpF3LvM%ns=au0o2?&?8d^pG_ZlsFZPV4X2 z&A@QFm$jgp-00>Toby{W3s3X@8^05}Qg=SY`G_zWT^#0Z&pOrVCA6S>CRZlA>MS7m z^BX4C*!+DC>~!jErLzNt=BPqm(pb6Vl`hkGjLS%7(TEH?qQKVg%o%g!1)Br{q&zc^ z%9PD32B8UJ7MEZ>hcB1T>K_S{dk)_WWCu}Fl~VW?ut3m_GUBq$LOp*C((co1e;aid zh(C+!N}`O+g14z6&RRW^U!H3gzrKD>cAK!26Q_t@q}ZCP38s71ZW!bROd=6m~84l^vqOx4Nr)NGKct@Oiwf{y=Wq2_!g?)mQ=F~vN&{xy~3 z{>49)vubSIXqP%%g0BpE=ptDKr&aHvlmEVHFC>!D>-{m<29&jd1 zlYCv*G!+!(7bfSqS3Zs&CS2C}h?LcJc_&aF``615j`q?`6Qoo;V(3jgGoMwB{eFkV z1t*l%0GEU=*4#x|J+GaJejRexJ=++lBz+ZS@wa8vSlnMajyaY-2AKPhl=o_8N;fxk zgdFRE09SGCJEHNZ^w)gtcy2ztVLi?VWRcHjJUP2o?LDcyxV5Q7$%`a^d&drQ-Z=-! zFt|jCHwCSyjk`n1tXeXoP3ijI<$c)>PhA-Ibky{s_VIAbtzSDGyJY5!M8W8 zU@W4cF6Cjy5B1TvLt8_vmK=JwcoFtv{jIvI>6}p5%~rr>lr!kW!*q1$txZ5hiE&}- zC6aAWH658-x|pT#c=O1`w229oqr}#PzMSU6b{AxBtqaWmefIRGlx~lbpI;(}jC(_# z@U1uwRzpe!s;UA2;KRA2yNbX0_vE$;Mq-Wo6om3e7z7w6uyZhxqyE5yKEltNBDgvQJ~^O1 zEAIpp^zKB)j`Ghi;bN@`Ktf>5!prC0@k<_m4HQZ861ls^{}50cG4HS`t(e1xG~!dk zk&|-xEHSPmtFDEKDPRJiQ{?uLwaz>*tKVU5MMMf%3H_{FIv4zes!}$r!?IZFCr&ur zto9hsBJ6IYIgRFZM!iF)*k=0}JnqhSi(XNo8AeRc-x-h$KQQh@2OZZ}4+_V+-(!sc z*5glYeg-zYDc-m$oQxb$FHFZ#qif6K<8(I1k-1)TgChR}-4onJcv)1+{juEgN=Uz# z=1DeOVs3!{v$~RDQd~`ZCQx$+<0dgu2x&C%p$Pb z$~9Q{m!WKnR~lv!m4v3;yZY>7BpY6!B|r?X8iXQxM^sZ+e`91mFs>LW!-TE4Zg zMiDx9>8Vq#Z;en%xmq&?(WlUc1#pj#R#5Yn%GaB8Lz?M zo&Nh7j%)&&;DHYb2sMhCWfVnFIh^^*zF4~`h8r;|h7AB61gH)|^!Ls#bB~|4D37Z+ zw(s`eBzg7FiWnFiLV?Bka7RZ*)J@Pa4dKbLK!`$uz|a8h)NhEdL8Aw zz9Azty$)*Zab&DZC&i?~@h-+}?#|pe4DwgGrtC{jEgz+pX1-{{iLnJlFa8c{`+chw z9h8i+QA}t;{h|y>db}kITjKqJ?^A^p%!Hf-90>ir^a5AmZH`DXuNpi;%bTY<0 z^iME{q)Q_~!b1u~(BwqQ52cB-GFG(io+KWYP>|+vAl^#bd~S8J6M7=|uqhLo%9pd8 zUc^b}6~mm228co8_j@sHEbSOy zqS)+OOLVC_^xLCxv|nd2{qEM5Zpj~|GINvHSDJ?2<;FN*DdX&(hS1j(0mODUOfPe9 zsBvR{Ml$#qfJNR);~aBWAHM0)ocOsgtX|HyB3h|b3vY7dqd@tQ@at=AZ{Ya6EWhbc zfH!~3Mri0NCXnGs~D#2%8FBcE<=S0jsj`<;)O=t@nUv(>&?#^ zBuD7rMBtSMb1lnW{_2k?4ut}NJ3;Vz<>d}YFYB^3YaG+T1S36J+YO#L#0tZK1W$g$ zm^>{N1n(?tTkH`kp=U3!@pn?c7>fh>a&vA*{-;|6S@ZAL?9FT?r~XKShgxr@9k=26 z>^6*6*Elo>DO2GY)5iR#84hLIyKidye?ShL-^PBglrFcWSL=L>SOd$6hcUN65M6@$!$5uF3T^;K&%01&Sn z@^B#95t#2}*|Ov`wX{iBO>e?|>5h9df@mx{QQ;u%geIf$ zWS^_GJd&Fxm^-070SAaNsxVVCb`+0WvynqCUwTvFjCm-#HmKid;ySW@*BWpno#PTy;Y{; zq*TtmV%pxkZR-?A64}%i`KjXJ!q1u#oBdnzIEBkh@Mh%*|H_z>)zW5ob_dWj_0=M0 zi(kiNg&x`Bkn9f|TC3U;rfn3DgsMS@yP6!M9)~_`q)mm|p^zIm-g5K*0pCC%zeJX| zEproNdf=RH3dg=HP%-qz78fJUfaLtGv#&ktQ&qCH5v|3kZR&7&IdslEHmCYk_mPQ)a6e@Kpba_Rz>jj+B}jrQ==S#&3yj=<%;@0#*$q_;X9SNiv~;* z`4~8_oIK%PzFSd7mAubC6Y(Ga(ETsSG^897US1^$y@nX=FjR+2~z&VZGSw!LbJz% zN`Q`)EW-@L(XmZjb@`tSSF{Mh#3;P&;V`7z4M@+f_ucOy9ZdQ)e$XNPHaZ(o0{Iu)PFz{F&K zdz$D<#~Xnph*UE|pt<#|8_hQI>wVNw76=CLImf+pQEycOW>VguV1GL4yewmRr;p55 z3<2BJ@t^UfQn#~@Ix?ODIKSM;@jLI9#%%G3q#yX{uMoi`C>b^OM~gfW9rdizX-l$a z1Q`DSgy;EJm00{aveG7x`%YB3yCfZjepDyhKi0j>w>6-tyY77Te2WtaaNvuX^A}^c9xGyRBX}jt5mBZ~(?DCFVb4xmf{lBYflr z94S5O%xfY}0h7!v#z`6csc+*y+1G9c3dH%q>%po^O-!eA%sfXT{{U-ADh*N6glmQe=hj+cEgl zS{zO;?dE%V$4MjMxa0%sJ*qi_2?D9Xa7g+Jt~}gfj%#mbxpgpgPVY+bh08iw>rD(& zF$$B9mN`E@6}`F|?iE*YR~+{h;p?%{qkJ;3!Od!EdfoPdg#dfoG0&DxW{eJgMn8Ce zvVQ|!P9p~QiaHf^maO&lh%Q2K4>b&y%2bj$to<`yu+<_H*vaOn0G@;&LJvdrtCHQn zo8^d`Z}+puon%ZZcDOPtxk2#G`%K85Ap)pz5eYlfpdGHVN zs&^Lu0P82qHqb~J$owi}b-m51#I6ob?Z9NOYGOp+l?a01}}06NZ21x{j&NUi|el1TNZZi&msyD}gd9+~Ny#Z|b9ea~`|Wc|Q4;C>|LoJ4Ir z!Jo^E;-HrD3u7Et2j-CAW}sJBmCQ$!#(x^CHjbA~EYeEh-3p8z21f_igH;vp zY9`C~OtB~c3?Is<&GxHlf?5P1@>u=cnx`4Rf;epBCUigpO8k?G(tDTriHHq@f(R$_ zt$A!F%t-8YDQ1S|>kvo_1Ld8|f=&-U*sUXGAh(ivq-WZ>IXq&dg|D2m+-(s8Jg`3T z80u=&@W*v(Rz=*Ws67oOQS5}G%%xZ-jya<_Vb7M|GT?h2pIWGu~_@W(W{Iem)bXwOX6z0IVS^2sDowB|vHHy^qO z0~F&M0(T&}xq{hXk~lFW{BM*DFRx6Rs|;6IT8v6-MeCX1`e^k8{Q|^Ku8_S^9MC zV&-eN-7avd&GK#aEu0F_VzR>5R}rerRE)1VQ`4tf0D1dp5?@_`96%EoQ^5z;tJz+k3)?S-wymAq?o?9Sx(GR>hN{43B7b!uJxpwIo_~b=-&*|FGsrPGOUl=Z>VC_S@lG2b6&XHR_C1V%MhMM( z$@@Y6%Krcjzi3-H4~Y_8Ggh+RVAVA6vD|#&0TSg%nLv#|51peSag$$7jzs`sysQ>7 zz80)&;pXEdvq#Y4@RV^hof`4HUqj&^+0*tI_#GEpM3Sc&vSAh{esyPx{lbo&zt#;HfG&XxSNhqw0yLUN(-9 zz3_rVYPST4T79R>5*7J^7RKT^#eX*c0JWEjzCP-o6K^zsj!gDfkOXUeI@U-2%kmd; z$1SoPE2|ykNJjE=oMQv}Tdyeit?+xmUl}|#H--FnrbmBj?y|=Fo++W4JneYpjI4-8 za6n)f5~m=Leclk_8aYlc+Sa6|o|pSk^H=`7(c|$xOCyV1%F>XkSeyVd zPo;i@e$8L7cZ_~3iys*LMrk}d;a4%w6aa3#)Zle<`?9Tw6FUy(1(`{}QUR~Kz6O86 zKfWFOCGkDai#$(%{{RV<+*2cI33qHEg2hnhb4Da|nqqN>U;^M3P)RlV2M|<28jJ-1 z06Jp6>xukMbBERPi>vQ9`6c?l_$SF^Tn5usb1nY>m2LB958ywCUJ>|h;p<-vcrQ$Y zOt7?yID*80NiUYWL(JfAmP6H@$t03{*Kh_p)Rc!E>+#B!B{{m1cW-m_`f!CfsKHqw zy$y1{F7daAJQJmzS6{qHBf;|1Ig(T8ia9@CgSoFU_@nXLK=3JjCrjI@>s*f^3G(72 z{(3M62mO)Ht}Ee9XI}9Ki}fjWe-zwFbu5SXXzjToUqoJtJ%%gX$}$Sr*(Wt)iaKGtBWA+KzRle=dN$VwClyrm)K3m1csHvI?&>%C4~I4P1<+;!7`;js`i;*EJ(X4?V>L zDakZe@ELC80S8jU*XdQRypcQ+#LA5MV59he>(kRUa^X|SZ!m69IUcngv{vgHy2b}} z_RR!IyAa*SzS-1eH{&IEJbQ}77V5+{_T0VmpU%1~w^0$UL+w(@axHB@Wnj$H1C>(0 z!}YD&D~;-MPqsQ%n+?MRaC(9t zf*6--E^r6A$*Dw`O?CDKl35Z$WB{BFL9IBZBFS!27A+#2f(GH5wD83$uwV}uAoExz zOQ^0@T4YOpimm&z_=Y*4M2fS>%G)v|{#kX$USf5c^=|IVjp)ssay(>?20mzskHfm#%5w!@zmp9nfY}-%bsfFm+5{M8)(yUOidy#0r$l}WY{W^NXgUnol^Z2 zUGI%oK3r~h1_!f_cYZZ`W}apbD#sTeaxsOgF~@Ic0)@%-?@XQHb;2BzU393WxyxF! zdli8rs}=*Gz~YF@d1pO&q$Dl|I?@&cA23|{*IZG=msT*y!45$+_CLd{zk+naI3)@F zYvba8uNC$OfP+Wiy)$q3KdpTK021BMg}UB2;5(lWRU%htIO{12juc-q{g}kTWo}Tp9Ko|h0 z>N-`N4&Z)NwYoB82;XsF++gCMnI(X{>foqfxKwrZ&sw)68Feb#26BDsmDiZ<y9Y6{PAI_uuI$Ufr z3>5i_o<>D#rO?d9vWghNmnBPdR8T4RlDa`Acf@h>a0xZ7G*eB$jUrX(;{>0;bgpjh z2L~-FWgSU9D&x76MSBShA2q@}W>5J2h(326qyhLEQf%7hGdj-5*62)y zfp!?c9+>v4h%Ln5=~DSgf=SLeA4-<%O|+IQ+epZ9!wxg=P-&J3k<0z1C!Pa*xg3T5 zb)8Lg6WrrGZ#em1aV!mbHAEkX3 zUCOAdo^~ox=hXkv{V@wqrNN^Hzg#~adLxbTx-)1`kNN7K&b+48g5b9SPI<3h@q|WA z6}kTama+c;I`T`HcBoqXPmN#vD15(Ge^2;Hra-^PFVLqsS8~hHoYZ*jND!gsrGYco zn)vhd$vp`oKx);!v&k;h86@_rF(%%6dRIqi@vLgFM7z73;L}R($2%c{&N#}zpcxyI zed_!c-dx>-dVMOgGBw#kuC70gPamCV!33!Lq>#wD7q40j(EsAd!+Y z(yk(fPzUqPRJ6-mjAZfOtzH1)zoBq-Er+DPy1yg%yEgv-f3N;2er!V9Mo&RacUFsV zVYP<_m4gAm$@HLkj+k}YLEr{Gah&WIwMZR8(%>--}OO3Yms%?LLz_3mq! zl!%vc1dnR!{4C1SNXy1nvc*5#vwQ?^h)nGAYk=0RVS@9_Rr%kNPpx;=!+3^I^x&B2 zqn}#f!#hhu(9C6ZW^?mKs>a~39ASa)>48d|uJt$rs3NM|sghB+3=g_oe4zT(wgutZ zt^;EmvDe=f>q%VjTTG`k5lb)(bG&puf~#J`Z4^lo@PB}I_p3tA-U!6ka_%Y7(Y`tRgqQXC5R-9oQxkz6BVpewvCqK<;Me_27fxScvgKn?PL4iRDh3s8goq`+?LV# zj;914lzw$yNo89rzdkV*A#wtoj+ES6FmIbf#Jp4*T}oIAupwjoa0O6x@?zbbU;)4x z2kTlMD%8qq(uso{B5cC@de=1#)Rzd&fTKKyM|Qk^!qi!s^B@o%6pVO>&Vng7~0IoB=j4o^6n+V%2&$=Ay3{1*!HX|;MCErELu*? z`cHs|9crB5g;!-K4lo8sKBpbMtIBkb7F=meEphX{Ezq~#BSFBpzS?X{k zmXmLfA--1YkEd$+iuh`jr1`cz7*&MyMr`_8G^SRNg2Onf62#G~1svmw=;j?U{zXM9 z`G~5k!8knQ0+OAOI9;67Y)4)x@<@s@><1aEH`;q9)``Adz@KWTD#FqR7%VGTG)F73 zj|BTJ2@#i#ixDO`8LdkjZz*MEa>7>KgV22{Pc}v25E1!MlTOkUC{?U%})<{%h{+k#X8Fgd~gE`KVuhfX_HW|1akxP+2d<7W*;XR=rU>3-Py{G6G&960nZse-nD5n3X$r}JIQV>ZzKNA zw@9KzJEhtP$@Cx|YfXGX8blRQo8|#cdQ@S+_=rF`V_??^i9J-fhb~ zQHD%pADf@ZRSG6l7rW+bksBn1ette$A-iKMq{{ek2yLJ)e!Wd-$7t^I`NT37-4+Mk z1EBm3Az^bPgA=yV&gJ0Zg3R)5Mcc=yz``Pc2^f5Y_Q9)?Ng23~-BhZKAE@=FCZhKv z>|)i>E(zScW7@5Q0a>MxmT0g(RQZ7V=B3KS*^RrI;GP#>xW^SFi#+lN6Nr#=gWjRI z5vzH!hHeLzr>0l9AI_;qdo9$WDb$4rjBr_f$6-x7kfK-^^IB94O)^#&7BCnLBg+MkH1NqgawTerK_6Xv(0GTj0u_vYv^P`N5h|u*E(9org+NXuPoeziueL#g!)bW z<^KR^ay!@6_Bw^0qi-GEjl_2l$^#^kFlAtU#eQi^6}0ONr}u(BV>#d-U#)hY5b>9Y zJQZ-48Z4$uc0awck&lw&uNeD{?mryY!RFj9la)*_+`h_ZysTDXorap=& zq-ZxB0ZIlb3t-~B`^VoCJU8(7O#5!HF8SmixwMS_`a|^o5$=Bs)kZRtQI6!)oM5@8 z?2g|4`qNCbmj3=_g5DL~B#v;Zqq#NlAH_e~67O4@PY-wkSuL&9;!Qyg*N}G!k;0!| z52bkzj{YThzv4yXKBaLbrkbu=8yJaL!2TWM_{i&%o@k$` zes`X6cQSNveL9bZ)m7T!OuWyM&c`f%rmRw_z~+*^_MI3~rm4jrpH{6pvvZrf3Mitq zMnOdsQvj%BxQZuO+~J(y3Q3-HWD!OkN%@IvWALXi5BG%vY@7@n(r--TJk&;AtP=#2 zQ7XqSW`%}N1gXzI<3P#XmA+bqm9VOSZST(`(-mEW(K3d0C5{GosNOwG?F%GwqQD2v zjJZEhXd+h|GEAu=$W@5>hSSk~>n<4JjW$36u1g$r+L|~fZK>dCQEG%xa9Tptzyh48}4fYxF8U7=~>bN6q}lxZyYW2z`ILz%`cH8 zs_%9J?9R{*M;#4x#x_@GaZ2DMYldOBXpOK2dht`n)4H-qo?bd1TvKcTk}}PMlfeTO zAWfML#e;TZky*EK%3VmERiN1 zK|ju?xn_zpCc?_2Bq`_c6u}qn4YI2=ZS#4Dmj}}n2+z~48xV8TNvY;eo4y`1#)^snw_=v#P`t8_FJ~hOR)Ki<^KSC*!Hc-WWCm{ zx7njuO0w-N$+7m1xIJ)a0>{{9cArpiB#sFiWNt9LcLt=ezIm;@;cRWj{kwmt_N_)+ zZ8t`ErD|3o-mgXxx98GUGmLCEt+-Dy3=+&g7 z9S%BG9OZUsDm<*^_Lcq;4N#d<5=cLVT887C1==%#ny-CqUO?oK2Vikri*vQ?LL}Y+ zBY{p4nHW9+0;2#R0dpRq$erMS1E)r6kqHUyh>_tR}dj9ZWiXsoV zhCEe+9&w8J*)@9~RFX3fc}1ki z*(Y(+f-Bu-^8*u(1%6`w*31vZaoqm^XWRb(TI<3=RF@&oh?gXJ&(hbpj{4Cqtyo+_ z<8m0=h6kYp8m${i6ktA3u|Ff!{PnRHsBctiIi*eJ07>OZrCDV?dR_m{#B641Es`i z`{KFWJ1G@zEBiZkEYcRp{`OR2nAYk?+hJgWPFYoQSXVmH>NeKLKj%)&W4A>ZUT}af zKc;I=Bx_q(((uO9uscGG;2&Pq4aB$Y8X_jf8 z$DkGXryFURwb?J!{-E%pHpnCXDSm}nV@Qi40&siPt0hKUN-_z+$;C!unV7L&Pinlu zLCSM(MNJlJ|~`>XOljdPoQezm`vUz-w& zegzm6-5n1wXzR~4()ege=1yf$I2#D`@1N4S%nIr}K2Mn^`=YV0{3#t+NZPboI-2v} z7F|OW#K;@uPCyyXYtXCDyia8;*YMm4nHD}mso}o0&4stL)alF-!-uks^ApGz{KtNF z$ioi~>14)@rTAmBV>1HcY^gAi>BO z&%YH3^(n0hqY}KUcCp6DZ8$!Z z!+SI<@`c*t10&Y6d$R;(BRG+=LRQNIG zMcQPLg>o`;^vzP13z?K&#=j}*eKA%bEZ}aFcc(m7vP+?wsO_mFkh5)mcISc(E2Amg z12?(FK^%&iV5PIu2Li0cHOnCl2rJ!}s1*#QC2OI=Hn~sU6;&J()jf>&Iwi{7X-+vD z#z_d@p~v7WFkhgX?AuP<<8k$?-gI!}lq}2BoC?Zxqa|r0YE`WsW7ceZSEp(wIER@W zq<-;aUAtS`laPBF=uTJ-roMf-noEe*Sc^p>467$le!i93cyq>z9c=ZYBF+^h{v7-I zX1pw3TGq7D-%A*!qdibW9X7Wh)+D-Gt))D@SqhRnQxfDq@G!@y#ZqD{w8(@bbu}zVm>?*@_Np?gOi-p2 zf-{kxD&Eq_W6m-$!KMXiCP?=futJ}luU;xEdy(fcEL(p?$4Yz}Y*wnmA%f>D$C6LE zrdzNQJ-$9={w=s4oggY*8{3&?haOaFP{hXsI6rtE^K-+^(oz`I_2n#S6 zIO72K{3@lbsEXVq-|0mdGLzJi$Ujk5W;scKvqf@HCoZbF1L{s{oY6--e`vH{v`9`F z!6QGJz^h7-f-*hLRhA2h z<5W0833p@L9jZC7Pwt}##ertV4QSm<1Qwf|l~Or9>t(T40WwNKGI7FMYXbqvIF;I@J4FXY=EnCr@onPBrlTZZuKCs zW8Vjgw{EwRHt?vir^}PeFvot`t9P)8AqH+zKbsvoRZC0R1TJQS$xz=QR^7RMX*n%` zCcKDTiw9k#p_3$FVMrg=n2Tk6OhuAyyp-mlx+73rd2CTmL4GhtrA?;WT3fS8uzAn- zr2ha9YH3&pX^}}Nk)(-ZjdGzuCxCkMS7)^|g93x)u*OI0SlWu+6C5_~%8vaNPb_^; zVOrL zp1H3FG{|UUAGF#>)#N-+jLYVserP_2={#<(p_PLJ$ROkpX;^?VPCe`5j{|&6*1Q;> zW4SM=X-EJN5icy!R=gc#UGCv$Bm?)K=97{9Uu^4xZwQk zJ-7$2HSsp3ae1w3lWQ8i#Fv+M4ZcYvJZ|nD z-b<{4QC~>ho4NB*@(L)TlHXF=uu(-ZH z!;*IKlg(hfAl@z8F)FG+UflPq+MJ1QuW>YzZ5)I^PadRID@%mExASAjQ_~%5cxZFf zzd~}Vq%rw(%|={L3#ncXM_=%$qSMtBmXX|Z!yNUdTEuO`BN59I0O?veT^Xe8OJ%kQ zmINPkaw+~(G~0>bR47BvWPuj|kt6rdrAgllTQ|x?FKr0RxZ{k0^`L1H zTU|lAJ9vXSla1b#%l%H;#$B?N+B$^{Tl^}{p9r>qBQ75wF~@&us|B>zLgUPBpfMqb z-XkBOq?qL!7ACriOK7EmcQm;XAU5$fQlE&_FsY*;yXs-LwPYs&?HLCLV`%i@6BXfUfkbIXSi}$lZDU90rV8cag5{>UalVr%C95J#bX+> ziDEH~`_dADy3-YjOp0bV9&6BwNoaX;lWy#?4tiH#45bt@o#9iF&Q#;KYT^-!vGf(+ zSl3G)cc>HVY z{{RCGz7NxKIr7i*uaBVIP8+3thv25qg*5*F`sMv==Qxx25%oP>r}&KN#F!l^JjOi- z>rdDlpv6G!W0hkgJBC58j*`5OsJ)R^{fW0A*EwAk?tGFBU~LuKLrua zZBN7cf@+#Y!`ocOL03cR&=JTZ>sbMt9cjUjdiLcL%x&Jt`_tg>?F-@kRKA1p4llCZ z%;_9AQKll4w*zoecAWbG?_XTo-$fiLZwq30k1$7_mNucxN$7N9(rW^n&6c=DIIj| z*E2FjrJkRBqSig-m-n7w+%S3!o|QUTc}82HPUz3doH7jhcdUzKx|TbamIwQ9Ax3hA zM?>vSwK}94Y?941XhG#b+(7*3ZS*pl)VFgaaoS|;Qs)Ywa(#PM2<9=wOhpEIsb0UO zYAm*b*hwRKfn(fCu6aJypQl|k)0*LKvq-y>Cvd{3KZiAA>=2cID|}O_z(Dy2(DbOLl4iG(*HRAdpamfIG^}X^x|}DE zQ_Jw&#jb?LK&+}WoPI!8&UN$`k7Nm;N|FU!U?9)TR=$1n6Vkq)Ew`{oiH&R8pa0YS zJftHuv{&mZbMfb~%6Pwx7^xWzGh?{rXkv)t`&& zvP7}&XwL^ckxVsvj+4#%NCA$_bnQ@Taj)3!o;3;@-bLOH1~J%otFy}_dq_gSo^jU| zPf%NTu)M)->2EI?;AKy0HRuf`7TNR1jgilYTLC~3$C3!iIO=g&_eR<6{HR!Yha7z? zL&XU>)+K3Q=19T9Ml=#FVJ#D+GvNGEd~VaGoF{o?|QoQrWYv#t=aJ=Kz zn6X>Dtbk=UjAz%qJX*?FLlT$W#s_+ta-v+N(4}Da7iDfGQ@1@>0a`OU%Wz8k%HRbf zq4lQ8ZY`w{%^5`}1$uK)Y+o^o7F3TLdC%!dIK7C2aaLu-M}!|R5D4JbJ;KK%oQ{=d zLoAmHUPd4f;yiu=ohaQF@&*M}kuFW@jh!c6ztbIjnHpG!SX|@*{?Da))|0Q<>h?sz zC`tJnNy7v5uL`r2B(7tKNaa8Ujxkb2JoYyqX}3O9=)t(;W7D41=Hl}8Q&{eLI6QAE zw9lh0MBYLRh7B$TBz-;QnIq;dy4tGn0eD} zNgmZYv6{SDLR&c8{^$Ypu6-eyCb$y^{{UIW?wG*)zJj{=*BPcu638&snyIag4s;-8 z!4(geNij2{S+FXDDCnu#-w`nAeUvMGDL0&Qs zaZ_GfV25TRF~Lj%8}q8u3#6G1s0fj$U4&p`(wuZ;v|5JND1q6WvB@OoCX3l#*4(sX zE!!uI`_yr?;_0{j%mM)9bpHSvo>z(745XX0^5;0mwGu>`VlwEG$YVlI=|~5swoON= zOM?@}QJ@Ku2;5JmD&5_b(u6Qc<$!)+jD}xA4N{A0J6+C(C=i&2@ql)mWDi5w){0sS z(2+=ZcCJtkpmElg3m}eWhBasH-x=>qa}wM%ep4rq-!z1r4mwn^T|6gh$GJ%3%R9LG z(*}+hUG5#&fmFa^omsL70RFX;YpF)_$!IN@TF408*&%`VrvCtB$1slK(ss8B{{V2O z{Iegz4r(S!O6H-s6JN?cW(b4~ecboWRtx6F_WuBI#(rKn=C;Pwi1{d^9^Gr7TLpqS zks0$j-b*ngf4iQ8*i=PWqgp70P)4Vm9I^JOWt|w|6$DJ7qL*;Xp5O|lEH^gaWhlXeoa^&_={6B?k0daqCWMZ1>aDe=dPB8UpWR~C`n>*By zMJJaW`y#!zR@&K6K4l(wTD*U_4 z0y>_wh7%;reBHu7yu5s*^Uo)>PKYE0kYo<^14U`BZ()u|t>fIgA9MFP73VhUs)2yP zZb+|BGWk~XDCCIP^(VD?s-43=wXHo_>0$NIc_4~y6jxeGIU?krE3YPizX1E(R+|Td9Pa*sTmAIT@F$^{QHio%YD~i5emsA&}tKjFPNaXW$+w z#zZ5N)|`BVW=#^q62}xGPBE7M0PEAB^A-)gSw{motr+76a>hw5$IM9-Y4$G^rY2L2 z?k(HsDj3Yr+Jv)PM8TmFxl&b09@)oC`qx_wHp6@&ywZc_H2EZw4=rPe$X&Ma*m|F8 zmIc~bTma4J0Hh4hmJha}wzQq?iV#Ur&H?GqV^zW(5(fr3FIqx(v2UCI0M%D7ZLi%} z*vuhu(2<&g8RN2;!ErCmAPz)_KU`1*bKOr1+sv`~P_musL1zcIPS~qgH?i8;o56+i z6tE0%2^|MMwD@oBZ}k|on@r1dD+NVSki_~|7vsG*S<*Ep)Adp1?jB@uXCNUY1Nnna zj5+H2KD&k@tEWraC3Ueve-`Sq##9L71Y-&gN8?<)&eHPal6snIj3KDl;dtwjUhO0!O zGN0M#jykPzJ3s)}Q5Enuh$4-*;00`C16=r}?&x*lz9VwRHX)k?Wk(0nmf|UK^X((s ztyhj#ir75lvBx+#KZYr-8#GvnnMvesB$1!3am85dmvJE*oP*k>^1{ejSdx2VimI(D z5Tq78&N|kVWto0z+9Wq+aigVugWzPxg=_)m_jU3KTKgBlV;&aMzw4cR-x7ZTdY-OG z?@~>jAp~?Z(x9m)lS8_YS3J`f&2#&&9BR>KkqU0JB%2mVY1QapS*&y-$|F#VslA zNnYzb@TnDm7^vu^7B)1Eds*=P>S|gQ?AA9CoQH+VXX*jLA6omf;b-kFWv{^ogX6iP zv%CrsE{=9i!=`;0`jd+M`=c2nHI(To4N0w!*~2Bp%u&l_Cg@~P%E)pGo`ez9Vwr9& z=Yn^*3_^UWa6u3B>-g8_$H700-YW3#h%~Pa+}u6popUl;s?6Cjd4wn+ka%BD(5CzZ!NJ63MXMp+t8%XI5=CfO6I3UGfK zu?^g2{bZ4ullYI`2h#vmEk?^wf&!`K&hj#XL*hgNw#H)>`$Phl@_)+ zABi^+&7|ByBaCfqs{!+JYv)~q)P8mJ7mtw#h9*~ad6k!QU$wpy7&t+lz9$j&`48R<0sQTMSG0`Hg(TN8i+)iwh#b8oS`De7IhwrqiA~iCtrjq6#yE z&ow=qXyF@f%nmXJKU%kL2$8nJ!?xj!AJ(>vQbrOrRQ#ZN3Q^XYqZI0;xr4oquFMa! zuh3PT3aw|%3-qfs{RM-U?kO+sugLy3&VS$Qe~Mq53<@wPqPtH%B8+3DbiN(~e4p;u zHP@Qyd_DK%{{VKeuly+;Sa$uiZgkbjYf>s)Dlr6+_}5!phPwck>2?sT2PXiU)sVP(OBh;B=-X#1ffOA?GTbaxwhqa*#+Cc2FGrx$bImvB8o+LFAd1X;8a0w4~!0A1Uiq1$LOE?5xk8 zJVPkG*Ct{Ld1)G!#xhFwu6r*TD?0(6pGwr%%gd|JEPwzufS|T7ep(p= zl4L8u$4cr_yE&w^5#^H#g?D50qNOnr~ixHbDq$>htGY~g_(mF?1TYesWbi-yIkdx-pxF?c=wstGS( zDtRTFzA285u5-vB@mbp(;4+L4O6o~DXmV6;*EDyzz-aJ2`3Ad9+|zjD>ceok8KgzV+&!8u5gBw2f_a1adD-{{RsB*UaNG+0%U852C_j;Xik& z+%hW+Fv#>Gv*)%Sc}L+|d>%(iV}-{&SC@8=O39qkTP$-+AQ)*npQi$h9M(*>?5xLc<5w9F;%3~%71XQeE<{8!pF>L``Rnq&NAjh(a|<-Rd$rR;g9$0L@f@G)p4P+t{-(Bmsa=1Ms0wxh)xw zazHj( z++kP`V~T~5qSIRD1DlnM1HT01e|QR#%6O-=-aM%1{{WVoI9=bBYGKK#cJkE6Xy;E= zjz-4fGm<-wY0zS5R#FZ~#tkwixH1%tgM?l(da?eM0lK=~5_u$V2XRsIsRzH-vzK#g zD-ZiUuw|b5QpN_|8!-cKw;e@4bg3-TqlOMLsz*cXnw6rCNJyOnk3us}je!b8Wq;W{ zN99kSOQK4#yo)S;dn$%pl1?&xg(UXZFCwfmGodWZTeq;QGg^z5+~^L|#zt{UuJEY} zto-AOmhY)fER0_?uGTjATZLcZ>C-gbHalx__Y$~SrvNx7)6i9>L=LU624jNQtlf!l zWF-M)Vxw>eIPN`-0$Y}+iC@lt2(4pt8J7f!f#wjs22U9^DaJA?oR*Q?su$!8Fdef> z$tLC6*a~^irE3{jz;TUk2hKWnseZ`vZH^wGcjli4#vq~Gq@0jCW9wTs#@qrTU{6Lq zhLypQ+S!QL!nS8E-e%-Qko)+?s z>T#j;b?O7|K>1;M%~c8;v4h}bEqu+BK@sZw8TRNA80%-xNKBABYGD)PcYk|MzxI9z z5MbqW`fv_sYF>STL!-GT93))owQWj<4B3q%j%qFC96Rc3b$fYn-y|7^hE${oK4AF) zkM^6d5V_;>L2ZDmZrYNvFhU~2;!tYaQh8)3Mjga@{IncAL?AOM>}S=j&3`Foq*%*e zisT|(=&r$zRojg(F5t6xdRv7w%ocJ!-wL#BYo$B;>`)N6hc`pkL<%I4XjaS=?zI_= zKW8Jr=vTz_C@@=D@kEmMVhhuv9#Cx*yy_^h({A7FtGwvm+&&|D&WPkt$9_4jtT!#+Yd zpj`?0#Vtd!Tf<7u1aZ1}!bWY?S)OOi$HedQmQ8kn!@hclL<7r!F}IWaU`GRa`G)lg zeq7dm$Dl-_d}@1=X0t8uNduwdI6w+2o9v;Y(E8+3g6Uoc%2{I-fCuY`t%3c@Lti;AXNL=AKVxF3Qtop z6)dG2w}iSvJd`Ey1L*Q%=dW4mPQqJEPN2Eml0WL3b z-e$>WmfrU>D%%+~N7FX^pjvkN7zDT-D*Kl5ZEDq^rvC%-_Sg=|3&UcVZBqNF98oOw zaA|g$?=@({w!xKl$6TB?%TGhFkF+M|whIoazAv$e+5E(l6zrhoO0HAUKRvw#tYa!G zPrdLU+E1Tu2A|nP@1|Yj101ku4<&|b1-7^R(&zoil6TV-?+m026nw_^{Ob*+pCuax zLXgfKn+si4afID7_c|P(*u55J{#$MD;ab;M_``F;Me+&{<^FO#w%XeGeJ|}OM%~XZ;BaVk0Q{n6ag2WEdWh3`SKfSYHxr;yT6!Dag0{5XbE5`gax%S zk6#C03ooWtJ{J?L8>WIog=u-W34T|MQ?A5sJU=xNK3g?>Hq|hVMn>kY#Ys4Lt2@0g z<|(iq34_av9?Y8Wo%9E&ozfb=M>3gSbe#b-WiZ78yT}66rGLKrUfnUrt7lSaDbgDF zcR@g-S`v?3LhiTsw5`Z{wk||!2%iR6r{Mcq($e&DO<_%ICLIy=0tj}#T^!a%;7qK) zdHTU}tJy_Z;AiR^*#2iJYEAk})h*Z4_^)9Cd_8WckWEhj`6i(taPu6bi8rySTkOuV z^_@D~*>HC0i}F}VF+;h5S|=imHsboAhqLn5-31O9eDiBOcWu2qlRB(=@biBCxYgsQtq2AK)Y<@=I>cnvi+j1RJj3v+2wF10oN5 z@elAfCx#X1FG`qfp9B5U*iG%7>golOQqZ%jrUoA&pkY@blQ@sLsSg~TQm15~ zTNs<*Vun#YYSNI0|0SxQN^z(rAfDx>BI??|EK7Oyu?`|Z&&2I<7?>`KJGt^%%ZObE zB+SfEX1Ft^c1!#{HHa6mYPca&D=~n;2=Rw#5O*oI7tM2jeF%?09&j6k|9bB5OdTov zHzY+TFET}HE)q5XDiBvMuv__1ol7e8J@H_!Sm9~WK{3&y(wDj~l$5Dro2g$>io9Rm zS{y0SF51mKdgA^A93YqB)`7Fs&uaIg?H%XsDywLIGT4l8m^$TLuxa&-hrB+xyj~sv zo%^f=)~Pz)Aq*s<0Q^U|&exl3_u9x*P3<1@ncusj8}9qC zdzxN*SFRzUp0ejQLUAIzH!b9Ym~9lw6ojZaN~tm4fuF146S1e zS%7YwB!y=v$fDowu0V)kDD?NTFDvW1p(DT)KY<584B(I9}T5c8*DdxMM!GLk2<>i?r|K&~VmW25pOzcxn73_T9XFbBlPI+fug0OC zP-C8q6;V^I*k$a7fo$OO@xgD{&0K+}VzAqHyc&JJf6=ZwmvJ_ookG*gBlxVOp z%_JVMPr){HyOoQLWLyQ)AP)6jaIvLvF;ihQ2GWSVM>Z!*%0(}q#0-lyITFNBZ)gx> z?+$ppepDO0$munUKp|%>DJLA#F8^gVe2&*TC1BxdR1WkIfQfcFy)zOjuJnqozir6H zPG0N5viEvu-bwOzJCR0Lu(wQa-;3%#nMnKKGzke?C7avWx@xhz4m{qhMR7?x?{btK zX_l_gC)Tf@r)0*yS`)geCi%3g!nkj(>fmNEDW*5PXV#Y*hbv+67A)B>$XLJ_=X(@2 zX_2&XQ&4O!pPLKH4bHqNR^6A$Q_&~Ck?Z`@q8x4Iwk5LDdO0dNGh;jf;S`Ql{*e#ddy;#FGmz-wE0gLERrxI#wzi*F zF}VYuOD6~O+#8ri1}I+sju)y)qYRe41&*1OX8z1YRKiZwiaE|gh|9veAncN$HsYuRTMT*4%P_a$pi$UT-FJWP2Ow+Vjy+e9>s({_S z`*2Z0W=9B~eVU8Ry&LR~GICe(divzx@m%mLj-1@BexE9_RB)uv{1nN1hnx;&c6z}1 zDWO4?11l+OQ$oE0w)9ZF(gr^5O;p} z!Bg=be!chM8P=J}D4j)uf`>nCWcHMBlrw)em(So)8#dd9kvgVllxw5VEiT)TfMM8H z7j{Sq^JRzUA2T?3j84=z*fxK&bxn{LS&wc;sDALfw@WQa?os(1uUp)@L~YG17H>K+ z)*8e1IIH1M7Di8-%8iSab5lwiSMF(4bw@%kko~7XLwd6HrwClp2mGn4%)-)((ro9ZI1P2oQB!u)0Wr0GT7HOs7sRx#rO#%(F2!=v4u2 zh>mx(>oxXuH^~wXy)5q-u#g_N>h~B*)ZF&UT6fh6WTgXw9<@z=KEUD|ppZ_H!y#*>@?}*zDDB%Is}TOh{_5tMfCfZw1R7TtBm*_3b9a ziSn+fuCGWqg%XASfCpc9g~|RLG<~4xOQZ$Hp*!UwS{?U$a1}ETYb|qv0;uKg)r^d5 zA)7`G*I!pLDjx!;n=0jc+4Vz1(er}4$rt9!@M$!3Y@8QLO3Q1#{sH=?XntRbr8vg% zZ9IOuPCGYkZL->X8uh}2pBgz!5?XNSOZHs3+(WICKe0()mYG=aSA~Q=7`k6p-pD_O~vNBwh)n1GKU5r8+QtiPeo~I<^-nCsxrDyDpl7w9vN>5Rc8kxt0$+K_Ctzn~ieHaD>eog%Dc*F5o6DSw$NJ z3{q9P&lgr!o-U<^^h+B!2_8p)Ws|6u+cB4r=9NhRZYF{cRW_PkE}L1qxn8JDX0dEd z7@r3xNTzrav75^t&GNblapOi&sX(=CEKf~nUM8iF3zVy8-uT2+m%LRB% z8%B7+=Lhta4XlLwB?QbsyboNN<{d*9j#PhI+=Ta&eC&{Q$z`8t|B!zTb-6vGeAD3^ z;ScWv+^HG`;OcxkD^0;8J+zRPh5l?=jSe^=0n8RWcDjENt>8QFIWbet`>0CSMG{0m zja{Ohig@Hv_(j54&8N*cKYmfMvtw6D`B=Bu8KpY;daY&{&wS>d1iOc9jG z6J(?`G{BX9g(1e>Af=Vg*}pDIp`v&A4c96L_S6P*w$rk)LI-J;57R!|G zEo$o&m{_Gr^;YJ1!poQ^JL22#Sajm>r6HDVMtXonqo6c2(=GimVQZ~d#e@CbX@=8F zA?oAyKLFH_)#y=k_W#%aIEvNq5Kt*9&aBd25*0V-94$-O&WznM33rT!N%8m3W5O%A<4+~p*`c+oSp78p==wj_3 z;134z!>f6eJ9bEfc>o?capFeiePxEXrMaf%TA0sQRhS0Dk{nyF{WZd$OaM?^7UbIW z<&KM=htx^9&}UdD8>n)+_9@x0r591)c}2@8cy)f+)V1X2ccfwXMpY^gDuZ+TKiFk> z0BFmojl+$+Wt=IT4d2g>f|(oyL;aI{4lk*;%3`@CE_MP=m`Sb?G9jBvw*+!q_PrAg zs6N7*SN$lX?%aTFH>@==ezN`G0hCG)1}S^g_upiVqUd)t&z@1b*ou;Wrb%A;qZ=x5 z?=O)LuhV-}Tq=4>Du2tZD{84K6EZOUy(;7U8T~}-cGWBW8oua1 z*A3aC#eQB5;$>dAf%3vKgeFk=EGPz3AI&?h@2nXPUfoKW6WoQ&6YaO!oi`@>$#I`Q zUy^vmWHHJW4Ikf)urJD}cAA93jcq|!s9(_7krjU6aWuc;82GWURgzm!GckM3?w`{0 zbNY+;V|Gx(EVk^!HZM`a?3Cw!peOGJCVvqAvkl!|Z?_lQ()R81=gY?OWAK1(Dn;P+ z`h9G=TG^G^v`^>VMxoJGBIO8TmGJxP2bc4Z_!kAXbzd6bH7VS5XQl_K7AKU}I--kl(@;DRJ76%!9sO>YbzZ;uZ18DQ2@IHKm7CNb<;UnPm zxf(DxUOkICl(~iBB*dgGGD5WCxzYCB{FD5mL#J`7AB32pRy2E~@n=qiUw6lBwAz$# zdl#8dxJ15}b8J(aZs3h+(U{Ov`c!N`gATyM6%o52x*cL;y+*HO^>}Vn%8l(pCNUZ5 zWT%gl-e|hDpA&5#u6V5bIz0{?+U)FLyeY4^S={dEEL%$h2Ed~nXG10r2@8L`L%*L$ z60UfGF-{o1@@?pv{f=LsM*Vvpv-Nu(f2R8ms04##8TXYui1$e5HHzUaLtN@UqW{HE zY#LC_$t_0aRH|N&p{X^`S|utWYCh_6Ojo>Py1?9pU3AJ+`T2zr=hch{>YL27D^3ys zw@O6Gh4Re{(5a=>RfiCLq4a#l@P^6%IZl=uHF2_1RdI2ZiaB~E3w?Rth2)GWiWq!b zg?6lv&gisIrXn4K&+T|>Fxmr5UHuu@jS|F=oNHq5YrnHbXZoss0~4@GP8Kq6AiaW! zBjp2~6dli}lk9TrQ`)uK16|6~s2ad#QSK<<6CkB-NWwlE+?>P$bZ3iy%SZB&RUm z2>Ii;if2rm3I9sZ@pEp%4JhY>+0f#)t7lW@r;27JgHXyJwL&4GT6-b~-AVsRFXR(h zQ5fQme2%a7GXG6lD0~V$xl?Wb2;G6-?6{qp}uj zUua!$@&TW&n{(Q^U;6P=B4XX8a)o30`J%YXxb@8Jgi5duBL_CX!uCX@U%}v)Ton9& z?dDX8&KW$N>ysiz%~-cnGBBpzu_fEXk{hp1LHg@PD9M1QDEaeUj6NUEUs8tE?!Sc8 z>VSok!A9y(Hy-t;Tq*I}f{HYDFlmLRM$wkGBW3e2tr(V{??Jr2qdvoJk{g>* zBJ1P39n(9`DsFoC;e=O>Y_j{LwK7o-CN^#X-zhJFZ39Dzypy-Obe_k%DBo#s^;k;6 zNPOy#@Mp5LkX{S*(>EGvA|z*--`hy8<)- zF~uu#G5C8h;l{jUqg0D-3OPH=)I_BYWRKrNM+S2e*;!XYKkb@P&pQBTP@qJRcVND0 z@H(&ZA%!Ev(*nUnT4TcWQvI*=TDm5!Ni)^4Zwz=GurI2KTwsj$qF)0i+ zp;UsUH{&;t%e2hh!t;KV9fKFy&yn-~x1*W^?}R;JpeL$7)>2^=aN1a)4^GYe1HiGF zbp_&C3n}&0%@Fw|21oU9il~MDM{*U=eYUfO_241~aNTR(sLyT6O_M!QeegPl0Xe+Z z!T~;^H`@o9#D#0u04ftsNJ=WJf6Jk}LPvyG@Lpv%{sDgfr+zjposy+NS0gbgg^qmv z&j$x6Q25yWbxeN=*zwtm%+I8dI>p7O#&VpFGwH_WiV0s?E{0x@v?W49h-W8V0z_ls z_E}xcG~NS%Fq>d#{!H|e#HEDG$p_D+QjsHN3u<#a4&=0 zxq{9I7m3ZEx<|aHAI)`=o{&d7a=}9L@c+ZdsUsYAl2=n7_=W*Kwv*X;bsYn;b8^$y zorY&!8ErhmH_Dwh?UzQrszlzO$nj_AzOUPdu*lF4N{UftjN>xbZn`#RoVtj8mRHg% zp&OnISoPxWB?>^SrFba|Bk>RV#ewT}$&B9af9d&mcH*3GN{y#&zBI)j#-)*;XF`An z2@MAiNeY$9kp;ea(kfT0&n0YiF_o*JxsT-5=DHd7?CP%%D_W0Pvp;a`ZQ|cXZK}}& z{J8uGT5w*^ZJ2uaGuc)Z)MPBL|AK6$U~Y5`NLV}@B-giu8}isEJmp_QOGmQLaITwwebugohp)*OaD9QRkRPMcg=v$fCpQJGnAn!3_M^M9 z6X7+hH}6OrXk|lk+wMy3X`vW4D!iEg&Vl>dz-0&(T~Q4xfWFuHVe9V$ky>NQj{gss z`#%8XC+_R#K^*XE6Mq-aa0AM|=AD6^O4W)KxxcHp%{|<$51cp@6-LzkxQ*Pxbyb

    fqVur)!^utdG}}U@jlO$4zfR-yhDoY{M_@_U@`oJW0ps3z1UW9TYDAqs z=J=HOHEPlOptXir#Ty@~%bWCETSnOgGf_8aWauO1{xE>#aR$u)T+Iyo7D7l?FirYQU`#8`$KDmkyCV86> z98jd;mtns3?Cj&ReCBOzcI7NMtLUTJv!WW8s#H&=EI{Z+Om8}oR4P`zfHWMs);0-{&z-+hp4e?;Ik8;E#78=7OOyTcnNGuZVez_C;8KR&*8RuBf z{n`|D2c^Z4#nZ5;mY5Cw?IxUKxU_JWdhxga<+H6hy~&oYWp``!1UjE5I38KFUz(J0 z)fG&hS%Z+~+gQFWIq6x7XJ7h`GcGyGI zUx$>lT`3 z$7}TdWnGwI{eE&cY3pP>bm(+R$WG~J)Esd>iHbmmq9`Jf;L&^fBohX9OeL;De3qx; z1VXB)dG_}4GL<$}E&)qr?E2arf(ciCDseQ=)NuW5Ry8g%PaJ z#~x4jn6>fhwc>G{JRymw@p=CFO%2znv;4(;v^NyUQCT08jkv1BnE6(^UjewVH*pHR zw3drZ6~aDzD^P;&-qJSQ+Z^{ixdNU96q*U`0*AUbv%H4i-8wd9|CE{K%@V%*1B>3u zChSO8fTZLf;hi*MOGJjN;qAt{eCkP98$E_Kx~{HDWpO%}Zt3 ze9&Uax@B5AUAO9jfN_;=hILC7;bpd{syLo!FO?m0CjG%%nYtjtodADKy37_rv3IQq z8hY^vgh-HErs~}2LZp-v6xmA0wL2NE^GO}Tk zR{a|y?k9A6fVNq_pZ3cekWflOF0ZMS1B-v`VO=-3>ySf~z*2)skW6{^pLa2N>W(zVKv)tML7nd9HB%PcqlJXTu;)K3eK z@>VRJUJvG{;n6KEj*Egp74t`mvO&I%nrMkUTw;a|XGT#O@45*2o9`>Bs4b{*g@?d% zJf)PQ0UXXTX(bzVS0;<&dXMQT=97+x>gM9X6v?b798;e7+-EcJ%w1%6G?#ef299BY z-^g2h8)z(#Nb%Cqdn;sH6$3mEnvTyQYHD}_UmV6q;4uj8zGWU?wDV)M#+7GT?~%+a zqfEi#rbJhEGRC2+zol2$k$IUq0^rUv^$vO^Y*Ah>^hYJ|DyeQ;kf-au)RcyJ)agyh zkYGSDX;(SM4l`93=6=+hwQ5M>KDz1k&k*hKZrT`zwvcZbzu?*EH1OU`!>Bn_x*@-u zbtj-J4oiN!P^~ZryG)*X-TB_Jwq&BpdR9!Oc65=!m=PO9v)#7KVW0kfs ziWKE|iH3P)OdJrz(LfD>gl!0EXQ4s?4E!S80vzgdnOZ&0W5!q3r4gE$Y>FT$1(#+t;CB7%2PQ(azKJ7v5^WhWL-(HlD45GaAhsxS83kU5=^X0 z?b)tPc~+HnnNqv}Gvd9yPgPdL>*KikMu%|B_RY7W_6uzl&o!K9?b9S&+3HEf{>leW zG}$1|kScgqzIJ++rhvrC5rpYTrBBn(h1fi}+UUy<5Ag&d9BtdzV#$mI0)$ebl>NkC z+HLBUdzIC5&eFEchwDF0=Q4ky_6Sq{;k%BZ<>WT!*Xh+BMnHIN znu;;XaW1RFQ7^(l8JGgOmH{2CK zKxrFQo%6{ldG%wH4-%Sp3|uui7E!L4M9VNfJfMM@WSyi&CwbK=zT(HfG~mCfK&8i= zoz7Z3XZ~aEYB7do5k4Fiu-_cC@%ylD)%bn&TecOErtsfb>m95j;6{V_TGE(@9&NbF zA3mUEqrGC-w4*t$9B%oDThfbdDN|J>T?E{I`{q#Qsehwk`=(FUREY`sPj(@vbi@$B zE$v_|8yNR7aEym40Xzdxs~(JDQ0+rkVcZPhI^@$0G(!W&&+QimlJaThSSrihae1;$ ztUYReW8CC(fU0VzWX^fEs^?#$e=22t(=@+dCC6D1OaA!uQ8dhMW2E2U7dV2Bq=fFB`jWJ}?p`l24(v@}}Fwt_ECVU~Jx zJl&B8<9?#D<~xiufPwc8?~DqnwW+EcEmk%R*lTWJWUD-_cG~j~x{*42Fjolf zQdRpsscaZ|OVLJSOa1eiL21{3hlSZyW~s3@W%&0}t0h@bd*07f-9rPTYAdjTj6>N} zgRUbA;$P5<=AXNK*)n~dXRHwcdBcuklP_!UBi;-;;0M%@@CyB@qimjK+LUu!tazrX zKJ!&D_YEm-bFYclrb2nqz3hHQIn*878?$2fP1EZwltxFiAgwhlQv;g%Y1Yg_t{Cs@ zrNbuZyN$7if!_$Lc3ZGHD{4&6Sce@C)tKr_ayB>-nD znX#30eQDQ!RYzYL1C}{eMnmZ-Gg(&8FwRd6garKf7EU2%IY)m+1X{>YGvH36;8> z>s)q$^}nRrw;vifJG+~i0OCCpM!t`<`sA1#8%8-Q@Xn5vse)SFo?{RF-u9~I_S7BC zSxIu+wkKC4uc*rLn74GMGSP9@N;{OG=y{=CWRrMYo%gzv8_($V{3LU%OJaxYwwRL3 z`bP@Lr!Q&QUXy~ZxQ#r+77h;$^!+HKBAELDR8b43dWc;vSC2r_Xw8Z{rIUYv?&sKj zDE_l*Y5K6XfhH{p-`Xu)I<(_2R0ym*rcFMQoEqq`jy|@-u96fP=g`Pm`jTUwcp4Su zJV=nm8$LXOW)?{04_C)O<}E&dIb{1Fi90}~cP>~uM!d!(%n685 z%RDRD4j-&2S&=!{^k$Z^?xle&1*)$d8`xhOA4-LpfZRmf=?qwvU<(BUNN4?A4;kxi z`CCVW>6|}?dOo6DRjklt3y|3Hlzl{Pr6o_&_HR*iKv7kb`+Aufv%0`6^Q9t8!P0RAJ#`17B&fO72w)o#tX|%@*bxK1c?~ zivCJdJW)QsKB*TgRW5;Yduf3k;kn*bOz5xze|EWWt>!7bXGE26FhGWZIo*~+XG1Y$ zCDnD44-gUSPz2C-b{;hk66a}B-|e^xh6|V+U}E^QU$*`^rZxjl22~Wll|B$9WxScW z(|v);V=w#g!*U&_Sxv(fW($OSW=7K5o0gTffc^a&1@WaQN4_64WB2U{vU4AeE@Z=o zx5R@K6?q8IZ3bg70m|TX8}9l0#&y@W+`KCoohZ_$c%G1InnIT5@x6N}%lzsc0W?`kn}` zFGM~DF4q7#-%pFDx9JG!EDY7nRZ(g!#;G?V5ArJaW6AzWmNtJmo0mZ;bn&{|F4eCc zG($*4H`=fzan!qruuu`DlQNm_lq|l;zw(jYE&r+^wKu|K&NC%#s0ymErCO#|Uv?h6 zFJJK+L*&bB@In|WtC6FlkYpcw^j`R`AH#2`VR@K*!2Qs+ox{aEx`(oiU1O5N(0OqOjQev%g5|R?XB)jpa1|T8t-R$+-~E*0 z$h`wR>Iexd^s&b+ybYIddr_*|{JW{}@EQ(J!WG7;I}h=DJL_ZUH?-GZL`j92$%Id-pz{t2A-ZMUZBDww8{&n!``q>()E=f$O zcW*FvuXNJ9wH8@Gw6o|k^0#_$)Z?mD_t-}G19@FQ&hRSLu8wF5p@I=?u-G&}In zUbxskG7`1CXKdzq=VYxOoo`z?;bm5ml=R%V+|<1f@lsF1!qf_;vh5Nh>i2c>u zCtJAuRpd8wwX2Akv83-?QRb0>_e{!Z zgWrD_f%y1*`*GW+9kbD!(-NEz1*uP}xnRp4TU~l;2g)j3iV19RlX;)O> zrV+^6s_?sW`Qm%7i~WpvXhaAMTp_EU&}EC)lSJO&p)CSREbo28g_sIT-&D|ID5g6f zDKYe$cMH4R6cYzf_h6UC6qW1gdZO3X<>R`Yr-LhV(%(#HZiCOPJ!S3Ac29-XA%Fb& z7Plc%#*8yJ_P8rY+M&w3axuRy{E~L6*^%D=M$^dJFk_1%>u+)-Bv7>-AN@v~;QTif zi(==82-mL8*GTh*ETdUe)<%R_>l>JzQHv-wW>s+$*M>1`l4Yl_)#|O4@n==rfeD*< zgnqX5>cYUOX4P0_eKOIanou!8;Je`g)iCGGeiSaaxMUJCt<+9#msgvnMV58P5%C$U-gN*O?>Jap1fn9>UZD zWD9mBrzZE)8^4+yDdiquC^hwHI(w9^z3T4WE}&`;GB$8B9#thRd8&EyPYrJ z?4}4gH`xti&aQ-vyra!8DkKWJMV_wHn?8}a6)86jg73r8-2f8Iv`V6nyW^u--!2#TODd2xE`pf>S-`|P8G}u;r__jtn zDkUn@ijZw@!8FSL4eB%B8z*yT9=i0L!XVaxXbVS+G`z^@g2!2%6BdHErzan9P%WqtCovUF{{pSd5IMe> zp`c~2-`_!no7$UOooQ|=L~ZhduS(AK)h?#JGV2|G#DuPwZUG}R2W%>lXoGq;GreQx zz(w-&e3{(XrG#ci$3ttkWeeX4BvJQGubKzQ3xBkSw=hz$ZRemAdf^`wq)Vljb=aBv zVqLb|35ag`eu4F*F%4eRAfniW3*B!UQp=G1PS%#hdaK9>dTdp-6@Mj=D4fWkhI#~j zV?vO?A&2YYJG{K@>yzYj*WWR{d^|Mp$B zy;HsSkC}qn)PR*l7skmsuD9yJu0r*Ufatu3jY`{M)1Rx zc2QG#lN{9}vR^ZAMk2tLh+Y0!GQgD@!ULc4YG`@Lb#BHh=Bg1!j2LCks6Az?Y(R!V7OW{d_N+_^NAdY!nBN-xSU;xOjT#i+Zd zf{QX@Vdsi`nvu6au|j1QayySm5EN<4yK>0n@y6^YN% z_Mu`^tQ#YUA5zfpImB`3Ty@s^TNUb|ToYMRYjW79`WrH2osQv)!Id1X;B~kI?PZPc ziRO|YJpDNC13dFuPxC=@)bV{ArFR;0^NyV=s}+2rpMSWOu1UWZypulAXJyFfY2#Uu zBV9G@kBYad-@|znEX-C)lb)o(P1Rx4GXh??_GET2`evI6u|G)_N>_{;AdJ|fxXP?+21l-<@}P2syajlRP5>9 zKa=)pixpzW&O9bc+Ua!)?_Jn&7w2p?f5A zyxd8$H7?w5&f!vGkj1h#vBYE1(Gs?%;kj2@RQeAf3*ug8)wQ60+Gq>i6f3rBVn!zcT-fPY?6 z*vh;rO8q_wbrS0my6E$@v!>GS2oC8yphSGw59TOpYUl*f8EbD^cBp<^gJ%?h@?Fua zYQEc68EAlC&EJD#ozvH5q9XT*V$;w;2$IJMs)Ok+*aAR20wWKSE8wj(v)#ndSz0hz zhP|f;t_Eh!o0WzD1X4^Xolrw6`B_|^Gpdkp97I@`Pa28U`YVU6E?40PqN7<3-d^(# zp;$Pw5FaC@PjT8#C<}+W&93Hlv|u0UcwQHOHdFRiMFd`ImkHU6_M(meSYcs9R-r@+ zR@$FTuT!`|f@ZLzp=z?4VvvwMS$6#-Jj+Eb548U*zNNt3qw&?hq*+b53y~%^2QmFc zHq(q=2W97$K@!I~VTy>1r#ZAo7TB7HMBjt!;0cWqkl1<;#WASU*vzXIzv@*|5e|oc zs|U}nTQA~$Iw{L``pL`e-+xH@={FzwDqU6KYNc&+07haQ*W25&f0g{aoE~FjoONqV zc_!!Ro$%Pki5%HXQg$mOoqp2!YsfOPC+^M~iXW}Ub{ITJ5Lv1%OZ8Eiug*pi-<3Q> zzbR^vb((OXCC*=`Kc1$Mu3C;tdMwI~VqrzM^x%5(ZdPu!*YqnpMF;Q0C2OpwWR;}PE!Sp#?IzIwopVMo5YpSs_P53^p` zL|e9pkH<07XK~?}3n!6Z;;ceSt zmdd1Tx5&+AjEMrZzVwgrzs8JyY%qjd7akk}P=46u*R61W$EFzwv@( zx~^9J2|*$OB}D(#iT}QG@6Xqru^JLfgb%}`_1wXnom*M26l)a@Oe9y$oIJc{WRvwN zc~ZKwe<}U?N-l#E;^WJ|Zu#{y&T4kLoC=JH+GjKvR-1`A_jqdMYUOKznQO+drlT(z zKfr*k4DO=1K4N_6Zu~*y(+zPpG<}^gtc$+y(Tx2A`C^{kqdJ&VJ>;z%W#)Bx1lVg^ z)lgaWEu{8Jv$H3hy;EO*;<%&k4|x;Ca@j?8n^pE5e9-uV%zz&lRD4W~<{nx`*tTjt2?&x5^Ttqw=Jkt89$ZHc=WryefYCjS}4aj z?}HTActw1Dzgt|ZH193kE-Q48mrI~j8u)g zB}~)LIG<9AG@bVk&koylB>|jOCs4b2`SG9gS`<}! zZQczUwkyV~OJ6u}C=cur0C2I<6L>Qd#FDa$6ONqaZaLVGdlq;O+Ai4*7wFdk7;Ww-ldg z;<`oL_nfHoF~pXUA|etRn7epvNBzq2unGi5YQ(QwuG3SRYvat8Qz2d>qr{0GVTr5j zR{E{^=)6GM5HG!`T+76k&FeqmV3wPf)qX=6P6%RSGMRktfk{J${Xv~?Iw*W7Q%APs zM8Bm_2Qm7pEU@bgD@qeRO*J4q=plCZOL5&0qVL6gE@>w>=^w2ro6Ue6T+W{BYX;r2 z<-=-aJIxnMox)FF<`Q;H9zN}+Jj}$FJT8<+$T}n%s*V(E&m>xqg`IJCV;JzCV<;7! z(VVal$4c*yKwkx0+Ox{xJIYdU-<$grAjcanK{hpEA4r&+?4zHUW4<+MHrlyA5`XEtu49Z8nZ9MHoHmQ_mn zhM&S;%f+~uuF1$gr-AzL;5W=!?Z%mJYsHM{nmk5lKl1}fkd ziVNqFtr+_byj1`2@kw9v7d3)=h9;~x=F-U?f8BGaig)F3 zBD&v7?W)+~{1%j&;hNn1$u(Qf+kDjN@*idQS?xvC4ECDeHewz?G0#he7 zCsstL(w{Plu(!FNhXfJ{0n?Q-YOzcW?9qza`gYz$;4N?dn;5|r^Pj^{3o7ySB<|3Z zEr@B#4pRlOCC?#z)^q@2e){xYUpE2rMRM>FXPmLC&x`~LfebWD@>?Un}Q0R z?W&qdt#z$7TS909gY%<;1R0EY_u4#ZKbm_*{+%P)iLAaOC~5h0SR75Q&;)qgPVa?` zZ9N38VjZfZx+7Pp3f$(HaQ*`@-RF-~@`5qmpUYKC?;y$|({-2ZH7m!Y;3P?}1V|Nr z9~k))*ryOY#+AV7tiAXy6$Md1)dc)WBQk3wcE0FpoZ($wnpPSit`ZWuksAs`LDecg zxv-R{K7o8qc(^UB8Qw}AI^Svg|IfR_?q}RvzMS;*8+a4d5)%Lyb;ETWUZ35T=@62< z)+fexuiom_)_+HqL6$jS6pv{-RHk=BpbVmu8sz1yP&{&Ql&7WTN=UFl{1V+Cz@g@f zA-19zIhuL3gqCAkeODGm0{NwA&2ZARt1MUhRvjtc3&0oxC$nf?YKx+}=xKB$Hyjs8 zk2y4EZ<$46>CwdAF%pwlylk(R6K+3kB7Kid#S|HsZb#EHZQt{pyHql@7-$bZ`^)&b z+h*q)c2gh)l2E5jq!o_l)0$@=t$nZ_5dORyJ5#AC_B%O3i}GxQgT)Eha;HOL_@Sa< z58ox$8^vTLj-dcboJob(7@V!6cBP?hs>yC@N@7A-8qXcNaymut^z7-2ke|LB%~;N{ z{z_oDy6Rz}RXvxws@=lX-o}H(m_+ONBFu2nY>To&(~yXs&)R(hm4GSz6sq_3Hgk4k z`W22YT|SY@aekU#feXOKrZLdtD21 z8q%M+|*4(yY!krPml6|~uw z)m0l)Oznc?xB$BSiUnir_D8J@t=;0CmK)GtNku+f!b=`}3^Y*Qo`}>bjx6}r5P0Hp z%OgFR#L{`XvDNk|EtHX1*d%b8)Z$fYN}M4#FsyOlSSSu&DbGA>UP_5x__o4;-3^y% zr(Kbw%K`-}SDT*jyaqc5yjOqlOJb{Vsq#ijkD zdbo3n1{DM?mtNfSu@lMj_bG*ufjZY_T;58=wiQ^#A*t z#Rsm{Xel4#;sN|%lzP5;Weu8Kgm)a7#;q?5M4#sAkP z+-l(a-HGG6BM~R{ELG2LGxs~c6p&r0*#y7=ZRo-qG5UD#X-vAjiftLabapwcO5gm= zN#qbCEaf+FmJ`-O-XTy{)OtCkC!82ONnu4Ur*PN;Gw$#G&-2w#V_OuHeq%dm>dxYKR=2wrC_XgQ6j z0p~59{5ydPT~j6h%HaL9QmB1pSWM$T%TW5`b2O5dmrs+bmIOEQKFH4fqx_}GkJ8^2 zF*f!q&`NOwMyTm)8)&apr}0^XyDeW`K1^fmWy$ngD#>6a?tcI=(4lLyXHVXa8(B)( zBx1h=bYcC2cLLou0ZjJ2Bp-PvSF{DEyPf-jF@{f-9Q^L`R2H@(G24kdD0b23i`!3) zJ5mbX+!TV{PlIJ*Q)v=&iVuw6xgQ;=sULFTQ7wH5_z#e@UeupabYuT&J9ecvU&+cp z34`a$EESSc@=T@u2e40;T#ZlO1Hk=@JDW^fR8S$dp&rBaH(HVgba&U4xos0<3T${e ze#d`X(*BLi+dehjmWf1IE^eQgp(?KUCMAfZ&^!mltdug<2!BE|N*mu4dOrFrEko`@ zSR*XHK=NMqX=*v#N**P1jl2Qp#&$byd}N0v^4i`!=SVAG{ZFx)zzIvPoXO8Z%F*rP z=&Dj8^AGe9lD@N}g(HLpO8me(_jd^(%sEv~#jh`<;Ct8yI{Awz`c>rW{ngD9^=huL zxjc)={Lw!^3Z&qH%OewsXP4pnj7r$|;#XF@=hA(V1)z0`Q69^uBEBo@{@zA`MfZgE z9SdjFJBVhOW|Ehv=$OMlZFtbYm8b$(GCtN1aES5iv$(0MWNM5uxCkIygvUK|KDDArKeu3 zc|9t)eyC3eCKdtk)h@^`?>ioHav+yKb_U9R@zQ83gR#`Kh&`=SpCDI0f0+F)z{YkP zA@r56RHz&2`DSwCF_p6ePERhx9l^DtUt~!kg>Oe_e=U49)79%8E(BLUP5?)64T06IDW_^6dFaWF@v%8Q5Z4lSzk8?XtqiUC5)T!s|rMJuvUXzEL+xIpC(h zPNR@Wj91AFs&x}Ih6q*u&Q-m0X)xb(xsh081g>N#?a<~VIo=HLVJeRM{kA_9JIqsV zN$#;0le?h)rf#o0k@+Bnv?T=WU z#!<2IZ)uD8E;R_1AG|;DgdS+vG`HP`x^k3N7%@zYOAWg0_v8Y%tQHje=mq`cZ(Unw zzZHydgq`|_3u}Fm{xW@I$6Pz6KSzEf61KosH)_Yq&5=j@rf`Wi`sxh*S$5hZ88)wF zb6oj9t;uLvF@A#S{eenkzJt8W7trR>M&T#Vl>H}Geg-&0VzSuRhv|!!TW(FISVBJ< zZBJH<#82_zLUcJb@~V!L4eP3k&zCdqCrWbdnM$)KmNl&nW(i%wgz+REZ&k1ZKj**s zPUXsFtaKr=-VrPgI!MsJ&*;*A8n!vP5tKA&afJXKp-8VpgM6o^g27WSD<1BTH=_0f zv@s?D;jz^Zvrk;!`HLfj>ebi?M&ZNmdsJqp|!5Q8^Vi&w`k>x5N0t`J&zdCDl+RQKgKHt@X3@#|y;L zk4$Z~3Z*nkn^B))uK6w^V1e7tvxt+@sp_7Ib;q((F5)EQhPa-FSN;buaSar{7WBIR zb1nLObnkM*$(G4V$XzT7@Sd2e?Mq)?bwAFdz-gWHuX2d}$iz)E?5ow|@=Z$0B&h6aX=CCeoTq)nx}Q_vp10eFQ7J;tA>g%{2iRRKe8F!0}4&h70V1QVo_4i z9hQ5XW5`1XKbDN3Xnau66o%)92WI<*7w79Eic!02)b|$1yZV=H2GSi<7B7bnakt5G z)UgHr?{K-b=ofORp~EGUiAMxu9@rT2pA#0d3f0oKF?K9dL3>%!rUI= z5x_$ND%wCp=YYC1_1>04??0iw6y&nIssO-J$x2L0Q=EFd@%$@()Xfy`{% z*aV-@2F%1{ObyCng|r6&1YLa)wSPMa&E-!~W zZzinUjGvKm0_eDSrCm*5&WQK4SMJl;TEUmRktasMZYk_$II$w0yglQ}WZ6}L+VjfQ zkm^wP47D;9hViUroYAIRBF#XllCN~3yv0f>a&3yO;HOEJwyLFMidlX+6%y_J0U%*~ zSx~5MVeZs=LQ|N`7qJ%E1vUBGA^>eU?`R*Q6(6Lbut#`^s&Dgd1mU~>$+`XFgIhT> zU#)<6H}xN&IWXD#;4t8K^SKFDn@ZZV-CqP#{a65lS&~)qYl5BenhGg4*F+sdXD)VG zjVluJ%-oFjXXjg^XUDz1O)$v&#C1nzT<3g~e{ofBYH zVL5ekqhqx0$LQpd5aeG0v|dMgclauQioc@`iNW0Nb)-hm+#EKmlFFt~G@uUGd@!tO z<$ktIY16$kQwXw3^6NCwhtF37bX40~zV6utZ)~EGXSPBm4b;8D(YidH-((MniANAB z6v^@0?O4Mlkw5-SU@A=Tb~umfny|lnQ!zkxaPeJc#FU@`UW;}5iU$r{Xcwu<*q9Pr zu++v6u1YavcbkUacx~~gm|Z9@CeaJ5FdAOL z*vp!)^HZ3vB^4F>R&J|7WVV&pVD!0ymWk zOioaAX)JABBd`xamG@0g0&`T!e|7SLwnX9h;bh4& z)(d*Hv6P)c$6Cr-`c7Z;+m@aue%KT5p?oETbc6u7ahv#O&1UE0gfE}2OxR|`3oCR3;*l5PzHRxht332nZ-ZT zZ;TWr-g#aw_wWQZ(ANCCeej3O4{sAug?CVlH?DE@SZNesy2>oht*%<1_}~I|(nfy+ zOV;3uckOFzwc__RtaGOP-j`4iqFb8VoXU02THhF$hl`cco6eJ}pNwcJC3os+yJBB5d2&Jk&| zOQKIQ9B{lv@3CYQAQvnhakO(kyx^e(c0^HsnN~^_z$$ zu9sf~PusiS?m9mMy#GA3AOQU94i+me46$Vv$oQMNF9w+p%nKYSsz4%19P@hQc=p9D z|6h%nLt3fD4vB8~pBIF#Od9siFC_Xsn^lxS-75a8{qJ9iZ9H3qg{SeL(|7)|fe2Lo zUr{;S?iCq;2H#b>7+gfS@LT1U7XBjt3ViNB@1aoblR5r5$4MhXquD5zCPJyN3cL&C zV#1o=agx&X_dTTsd9ljfsA!vgnKqbSbj-o-+M-jnL2$CCeU7qOyKgj^ zo`d!i#~8KSMjhX4^@bCoaeu%q#N$Rd+<;-Mjk=?KXp5!Z8i;ERuXfl0Ij9B37CnP5SSp^V=T7UAS0lIaMf!f2ouq5f8~Iw-c?z3E zVRs~>gc;#t53RO0-bm;Vqz+F}^m`4_;`-NxM#QC)DwbGAiQ@Plp-UMUjoE!;HaoZ+ z(e#~Bmt(bK{nDgsLFVs0mKyEE3U&GHHPC3`vDv1fI^%G*g)i9^MG9KREpS%JRKB;L zV5t25vYpyI(jZ4P?gAIq0lJWd32-e9PCL*Lis!&Ubxm(ivKSeG-Ryao#?pn3XcGye z1Hi|<@OW^tvGL<;#QN(;xc0+iYmCwlc33RhHkI2R;@wFF*fLUgTeOd7_+Tq6LI} zhYC0q6;&LYHP&4w)zaf=GNA<26*yV3ZV)Uh>u%9CjhTZpX5vSEeAlXdH=th&wQ1P2L;#4(T)X?T-SK6|ox300 zlAi2D{=ey`v;MESbIY9LLKO2Eb`Uc#*mPN|b2vD5KKgo%u*&Y%O6c3#Tw{NRd{!7T zc9-BD@TvY?9+Z}9;F8~3{omu5mneb$--B}oyoJS-Y?@%Z%q)+ym$odq^Bq;!^Kb3a% zOLjiadb-vZnLQ}xB!{C(NCuV@vri7nbYIZg3qHuc1sV7i#eY+yPC_~|j`q@#vz*jx z7)8qP1h|cJJz^*saQQNxWuk}HCB+36=-YvUdgaYyq#g{#b>YieA*+T*?@`?<+^!3K zk)z4&Y_|t5%7VAVxEDUAZ$D^+tD{NE?};%7jQEp{u>Hxa&2n_)4~$`*40I;%^v>* zRe!gPhO6Lkx}A+_aF$ef|8$lsw;ukjy#xbq78!08eAWf+0oetXyV-lGWc#{~)Q0S- z;fS8WUwiqst{WhQGyhrk)d`d=O2*z6BU|}iwiEgM3u`r?AZmb?>c;zzY@ZfnnoLrN zR;VmXn2gzXo`uB+nybQp>Fc6ns*Kn-x93#g>+Zwgsfj}r6zJyvQoXy^xD9HKh%&d7 z4u`$nxqm%BG5A_AYOY@?J>JT(-e%sXQR}H#Ek|l}ITDvA)<`NQqBARweIHnGuVX(# z=w?I^%0ai)NLg*fR*~#fsK?5$mPVs|qApBuve?Wfe9QDE3JELl2PLRZO4VXfQ1zO*uX+;WrvB7l?E?QT{W}cX-aX#^-K6 zzX5y+*UZ3C{|GFORYcALQxv&xjItq;Zf*9&Aq{G=g*b5;Hwk{^IZfba;B=Wll~6nu z)%CJ7Ti8Y;8=m{JGEpZzt$Arw!45Qc%Mn+!`1;$w zxg5m3Ddzgb>FcCZN3DBthj*SWHi8Ts<{PW3c(?YyGWCXNK1grX;te!FPU4vQz=R$) zRQk2`ptR59*x$+|d#Y?!xMKQwLUxew`)=G`F{Mo<&Rz#ITloJ&w>^oyZuw*TecP&h zpx_I9d`f(npLE`NDQm;U6@I1gR}>bM(K8xX5qy{FMd!-t`3Zu9!;)O>K`K3FvR41r z_Ae1zgM{(T?Zv-)LyKR9!@L6ySes1JEkG>aS@I}6-N?a--@dc!mvk+^0QLOakDBY^a^2-rJA4G*6xX9#}+eWh9KCVsE4^sq0^`A>kB-!p&(e(#KPSC5Ocu<1YO0(yl zCEk^Y4hF%tV$-nRJGMm#SplVzMgh zy;U?(4jIq;ZDgDHKx`WV!Tb9HVv$dt$-^pjmO2F;INvAx_U-#68;T~C(_6(dp#6!> z%A)quY>*X*uwhk%BEyrX4&fB8zm0@$@t#q0D-QNG3aWe5`7OzP zWw;3alo(WSbn$c&FdO6EcF|V&{I1v@VH6GSQ@k2w(Cwr%&zO6`D)*O7PhnK$!O%S) z5Dn0pv!&un>6QZa)K()8)PqtRv3JWXm;hc!Y60s+!55A0DAs(?^Xo%fUI&%h^)rI~ zF%hkrnewSkRFZ&RX!J~<-;SLRLo#uWy$bO!*!LIEoFCU_;7O$CibJ&2y;Zw8aq@Wb z4zas$qiCN((aiEm(E8JyLtph_8)PohQVhqx4@+>xSks{Cs3;F^X({+ET5$o6M?-R? z_s2kqc$GpsDq5-fEu(aX3yMe4Qc=n0$3zMKDc|B(t*veBUhAexQ;CZc#;3vuow(zN ziDHKTPvS)`W%K>BTFk)oOz(zePbk)5|8z7F)lOo1Z*Tn8UFceN%d88Qj)pX=~G1UtaGu(;jsD;^EKRz@yYI&cJn1QW@eNL!9mf;>;Tj@tHe}DOfgkFGO7&&^OCEd+v`M2nh`8UGMd{ijZ zEEh(k8`)@Pjf3HE_xdSo%|;gc4PX`O{e1W(+}1B@CC~HA0`%fKAB9+LWl3OBK?+t< zNSDFT+I-nn^T~^P>mX-Otq**e>q97LU&IK{K9c8_v)rk-KotF?LL@K%y)XrBN>~e; zDAFN#REPg!8MO@>u_$TD*q{tcPG{{S<8Sorzv(|!)o%&-rKkCG1upv*U?Q9|xT?P% z+$9?1ix1k@73CxD6=PUFB3_kDAo(mw%W5Sb$Fg2TvL!Thv^2d(f-iU{=ZVGMgOTq+ zRd+4Xb7G?1c3!X=e;ejeT_4wj0WhpKm4@tPnL?g)!g6Gum^5SM9&)66>OP!UE0#=F z4kp&Kyneh8FWwY+sAE%?F})}msPhcU z20V)$aj3Tttd7o075Hzv3;`AWRHE?T1ZBC3;Mgg5uofz+l+LO8W-X*RL{@%L`>_+U zANe}wTFoNjh&_!U#ZR5_QPLIHG^YU95(q=kPn|oVvtBWC_$MwA%5O;yeXUB0XOMbZ z$k(tC0xYVCT+DpVVY{uw!Fe8%5;t*J@7Vx7--o zHeTw<+7YbghaFDpRRBSbi%AZg;k=@0gH6_m$GFzl*32Ql>Or;9F(TBYhAsiEjLo`d zQ6SRc3%te_d)dO&r>H{bWlC0V!>CnkK)1CWgd1f%m2ou-@p{M-mD}jY9&-Ai*&hH|)%AL`Ws2E)y2pBgCQ$I&QDw{xO$Q2o5lbJU_o`aEUn zvWTKRMDpUL`lTpf*(As&ld!OLT=e^I?UTO_&bzqmA4)>KA1SP=+gbhi{wo*h(*M1J z#@m1cW3)=nId}3*QiB~flBaTGBT+^B>s{CEVivP3e-^UiFJdsJa)qBeGmxrRkK2mq zOx{eX{{YJWBWMw%;v_^`O`G2#ssL96#) zCr7z{*KTv~^rW#5sc0+mHW+ZA%J#v@6|1Z391Po%V`=GV_8w^SS14LG5DZu4(nlpv zp2s4eM$w7wO9RJ`fUnAVpC@lt$v1!AhvICWPN(GD9>)s!YF#CtnvXW2dV2Hyf=zZVIM7g|(-EOBan* z$Jg^6Utz6;!}-HN_WQBauicx@uRXTR63uJ7R)-)b?#e~BA4TK36gd9DW7;unWhaZe zG!CWX#FfMrFy zOnZHcsSDJ-%h}nCg>rp7sp#GYoIiIYgO`SEhDU5r9G;b+RJLr|R|q?nFBcQU5pdRNC7&ujS5Ome-s$M<|7r3zr1!p?H+XzHeCMBh9o<;g;N*ZgZ36 zQ^&~ux1LiTa3m+ckTcK}TF+kUuSb8f(%g_q@&I_nJ5YuBGHcQ{j6pZTamD*f^bGW? zJYd=n3UqkSO@*1>_$IaZUPuk_o3`T$7|Be6HDlR5_wdtrds3KMOuyjXV(XvBzS?y- zIF8~({}CuU6cqH6t6utaQ1Ev14@WJU6Cid(U8>V+9~FF(8PN$pYtxBz%ygH>XdJ&-TWrY=?AEaDac@juu#d?Lj^3k-EHU-0RN*5`p4|WBQXl3U z1^mWLj>I4WV@cuwVPF2Vpum48RLDJOC1(>KaG)AcM|on!-w-UXQ12*;h$X_->ZY?v+ReWn%7;3Yz7I_eAEM$i=)F z?Azuz%h3nVA{*GMeZ-TCw(+OxYps43OgD(GZ)YIJ>5WoaW!>}A0<0=dy74vnhA%um z-C&t1 zl>cYth+E5t8ZSsQ7|x~N@J`4~$y@g!$bn?Dpa!Bgb(|l~R^Q8$MUkWbWZh+v6Yl)z zAA(?JN`mjZ$-QCv$`8@bK7!AsC)8-BejHH|s#S)8``9KNy{FfRb716S1<{XWaOG3k z!_k0EsD8ts2rS3;uXQY&%T(hux})A8@=uIF{Ubju-ttn}OL060HcfnIO&@dWk?75^ znQ~@1`lxYSTER!N_6TI6^S{P&Jyz-_dc}uhS|9Q>wa}rbw8dqA|Hye8Uba*E#VyTA+5C$ic@ROq{K@df&dhcWg%$?@!9@GNE;Fyv>EvRB%T1pYV$=Gp{qD8MC z4z#nUl_w0~1sl|n^Ua=Ye61_`L2nQlN;e#f%FAY@y3djBkNvwoIpSNzt>S7V;l*ZFl(0Dl^pYxk*guKuZ3D}>@TsE zIscnL761PesKuZI4y^ktl?xK0inrx!-v7?mPT>&;bL}1%bu}YfY!)=@MqK{^+NBVf_{tr5SeYqFtcJ@DN1J-1(>p#r%m+*(ax8NWat_ z=J%UcOcnU2gSelQp^TX=#{=ceQWg* ze+=!L&qWH9x-X3W0?885h=h%q$1C#QQWHl2;^h$tdH9-GBzkoY@o@_b82 zf6S5qh7R$OBqN)CJI{ca^sDY4S;hmS)R=)bcBM+!slCKwmPH>PoJeY<#gDO+^W)b# zP)RpCfT^3fk^}&`>mqS#Vo!2&*%Yo>7(|q@$Jv6Z7BV(ld%CdWP`LY|- zj5}4sw`%v^E-UC0FINbOyEe+r$TuN(Dr!g-kY}SU;hCkg~xR1(rXuKEw%INStfv0Ah4&GF_BJH#-#cJ)IZ&@X*McJY+ z7Qv+YA+2KapwGZAEHX(33nR;`bla1dyKx^1S0NqmGsxQBV4LLg!D&BC8-+^U>E;ig~W6O0et5hyYq$ zWV|ZG+3O18UTo$eV)V5YfUyr-iBnBV{{irRjo;Qs8z58JJheXh&T14g*9a#Q zYAtjG209f=?=8C@1BMtIfP3=|cf#?OW}odB7EaV;$F0ACTK0AyqU^1YrVlg-sE*&# zuC@t#f3CC6)B!S-h2W9B4`L$ON983w`P|QsfLn-o71i5YB|{pni}1`E*Ia4`ShB=V zIpsvHT6mSDllgBfh@GW*gw%jb;iTIy;DbnBmPnu{lnTTxv+@z>!tAKmIY5;Yrmy5l zpC+$?TwVNbrCUThW??vpo6|+f*_dzjI~D{wt3mg-)}TSu@n4|SnKq-*Fg|ieeD<@; zy;Ts;m+gK_^^H)M%i=z^snwPoTBocsVn?}^n`dyhfdZKiJDyfnRgVSybyitP6!K$5 z2HkrQ!(f^a$s=J;+>VZvtid1azdCcjmd}{|IYroUc$VE2XddEcB}XW`6eBG>CkCL`nqg8edr}v|P7DpOYKz@K;a^NC_94*sndd5s*UU+^>0IWT z2X0Re*m}^iTKmXM@Ew?-iGC@(_wAol@*a*K!HN?yk8RaNl$Wk}BAHG1R%mC3$aMd? ziGU7$?tXlXp6*Spzm4OTA#9|=tLaPvjM z>$CF|==HKpSD^JorP!Leu4HS`o%+Y>LTW+hx6%om4oWKgM3AZpGwfy&j4Haeul^&! z8Tj)SpuYK13oMl6S*5yI|8Jn$@o8!@M3n}%4g)V|MP7L6ErVK*IBH$^^rMQ}c4#w>MF4psou0rmUZqE%6;qK~hS%TV{z z_Frw9R5$lW1>T7o=exg}!(-g&yomk-xWuRV6&7uU!nZPBM=`3tpXk`BN!8LWWSM*UiA~?ATA{ zNk3rtkDnuKzK;q&fm{MlH!nZ9pXiV_1esr!)TN8>oIm&p+0Si;4_H5Ya&o<7e22@T z%ynGUnej4LFm(5P*c6|bM>)~A27DbQqDBem@$qtAOv&Zf+5ttzW#z=X`kE&u>JCi| z%Yk3GU-F+RR_V^M`-{Qo%~LL}C3*C--lpCYpHw$Y(g&B8IX(>_ADOh@`x_@KR|^`X zi%8rvUPVOB*iDUP7N7H-zSfhVe$#(+{DI(!;}QPyuoeSUgvy@hpB}jdY-rGw1iAVj z{oM)?44(nuev5u0yDw#kf9PJ^HBQZ2I;cj8Oz6BEXoCbqx|#7D+C?EF0~hyopGd@a zTUoUf6vZ!Q!|r36VY*XVT!Xnu(D$;ootx14oce*E)4uS)a&KwZy8g_B6=my3o$OkA z-8?9#AkDcfXkRqWMfdS{uz7Lr>MtQ*`MjKgxt0SnAN$aF-3vOSKm8I*CVM7e+g3dT zoK^L}4<;YL8Dh7I!gakCy%o4=@1y|p^NZdervF}dpsOfJUP;khu2$_LVI}t!%k3U+?%xgpd90C6B@up^;5gqM)09}dyn!B5`%X81Leo9vCL%u#l&yiw-6rq7!XQtfx1TROi>Xp8biwLvJK4GQ2D$|g ztz~B`wQyPQF+Yh-XVj>h#G_*4!2P|-^8Wyq0PLK|jx|K?d7-p0X8N_SWd1IY*TdWT z^n!>|yu##BIku@1k$+bCK*8l>0oU{@X_)fyj9*|YHq}9K7(&4R^|xJwB~H^S(1fUF zr}Wl7wQQ$nwS%|*;f|+o(;Ks=o?A_ zEi(jrSTkMA69NHtlV;IEvU`THkc%bEGULTw9L+)0l`f&10sULPI2oZ>ZtnO_WX#y| zfZ|TqR)#?S`;{**<``?X0XD-1vet07aBuGzO+=1NPTlvWw%$5MIeNkQyT>(O+3kqq zC%sYBjJ~Q-y{Kzai2`}HSyLBhS=CBXzDHiwF7W5^I=vgU1_89vhS((N#Zq)01&>_= zw}D|S{jWhaVNF3l+OcZYI(Txj%C>6w-Li@8uT_Eixr$akeqgOI*`#TM^pM8) zeK2C!iHoI&oF|3l%~qC|(20Q%ZOWjT${DDHe*nwJQ3~}p3p)QuTh0rZ+VG87$sEpX zihQ)gQ3iWKh^!$}f|yTkB7Lp`#!A0gI!gRC%_WbaQ)DZ}2!ZiIA-Ix_x^SxH2aOr3 zh4M%iqP=xLmqwt(pw1J*RdrL&Y{sbS0BhzWG^_BN^P3%3MmE2XxG7``el(3g81t>D z-mnw<#Ghf)OYl}i@gn1r{DkY6k#n#`#hb>I4^qpemRY?-n*QT`1V>{QEIVSVo&SFS zhyxa&kzF5;J6n39%0VHZc%Qdr`gO}EB=*58_My(R4_4Xj{W`2(TZFUTngGGsZHw*d zz~u1I{-HU?g&o|3B82lFAY52G%5Xz;r011n=2_0WzS&**q-EC?rxO7zKn)8Z*>6TM zb5KNSZK60$lwdw-AgRp#%WXc?}oS0&opt0GtO<&u7m>jMl|NqAdaXgN3(B(hMv&#y*>VjL*YeDH8+$sa+U zh6j>0nZdy!KP!0t-VzSY=CY67k-TXdKt3-nrzFSg8{yod?87D)sR-MK)0t-eKt;wr z(--z*?QO9T5M=!MJJKp#{*>MG0n8s}tm40qZ&KD<{EiW2ud&P$WaH5V0*h&V-{p7W z`r*V#%`OHy!9}|`|HJgc`xN`~_GmK`E!%Ifyo(%{E(_s=F0Nul+XMi66JIvIxeSU0 zXji6e$~H0ouY@PoF4cKveaP$ED4f@f60Kq_o5{JOrOtL!AK6u+`~EOW08ZdCDvU}0 zIdgJqZnR@x?oIL^0*^M=R3Cn(4KfSeVV@Vz%<$zH! zYbK-yQ6F^t9SyY7mj4eRZaYji!rVqI%6~IKH9$J1O7(A+(mLu6y1w@e`lGEXonn`k z$2Snk@HU({`tXW++n~C<(OS( zYE3Wes{DfRrK|icFwFWFU(dj54Yj%FIU)DXg3xgEqEfCe@{lVs(m*b?;_gMVR$hPMaAbk+JnL8S4heVz3ypN=vV=3T&|_`ISNBYQ2Qt5oCY z65rW%razIcW)7&VRMz_XK3>osxSQj!OG?H{%ql2Wqu0b1LyPGssU!m$aQ=ydku2sm z&-*Yv5mZX8MmM^7RIuwy;fH#bV&RU zz^3sKoqt3g%cXhBXRQ#(p;1q=H!c-hiZNW-#eop+>b5EZryV@UsZV1!e7n@7ejnR~ z@4s!TVga$~XTQff!RI;6Y*eXFep1IkzK7!UEKkcB28!EVk!Wrg{ceyuxC+VAPZ_u> z+==5{rn(M&({dI~MU~iXg7^GoqgYMu7F%&waK@TS!F3WZ%OfUfJ_la+2&ej$MVP8_ zg>bX-RC7Ci-pA^l&;DzjtT28@@7GI_ALMB%N+ozhS$awxDpAWU2b&w0iQe8RoMv#X zQhM2qyum({6FNkz3`!Nu)e3#{Jsibh>8@sp>ZS|UsZmVrea-2U4Q6BFxqIq17M^T1 zvY01xBiV^ONkNy#XTAXtBr^udc#L`h>^3VGN_zqr8Z%X9>ujaPlO5moWIdm7XGh(j z$8=tQY@drW>kuhN#ej*W-<^9!Cm#AGUzoqj8B{**;J$`{g6lOHzCvy$>b%x~2qK$D zGlFNPhL{xm*qF_m6hA2IDoJ4;JKiBPJf-VxQhmMf zojAm?0n;LSl#im`2ZDqb_yJNZ7t2G;Qjral?E zT?YR3E9f+>JEdXCAE9YM3j8L7}YTZmw1M1GYN8xG< zriC|mS$UjP1%=8xKC`E6Z$phdP|ZPzG)3_subc6gjX;uz5D~lErK-0RH zu?!0uokau0Z|H;CHyWyklD`o)dIi+2vNYzo`Oxwy&a`9;QJj#v#_Tfo09i3b#K35Q z=jF%Wv#f?25fy>ma5zL!^Vi9CNdBH!O~#+D&eaNF8)WLU@e|>C?34IPf*Cm+O}sW_ zm9a;8rE#|u{P~CfW%jTTE9x~6!j!MOrDukvh}^+Qc=}e}iffrK<#V;XOJgy~v+BEi zC6gMSkv*rubt*ugUNkie12?BvEGs}rLP|25o(T$tZiyHW1`P^uD>0_*VF}4n7KN*> zDh>nPvU{BeR?C-VD5}xSI^!fva1ipKase>XT}5- zL}P9vhw*vBw-WUogLAdaAkLcWn}5Q}%gO?$bl*c9_ilcoaag#o5HJ8lpS#Np>gf#n z0UYSHaKP`jicDk_j#N)lx`e>)O2gj7xW85Iu}FY@t|LkOUUlZ}7M!92=Xc(Zb2$;) zH4avzAA^qXx%Xf)-^%WxtVI!`5Y3BUbkS<|T__G^F7NjWn7nv%#TBb_tRT3tetcf0 zw}Fm&k|RJ?P7L@$=Yg?1E^FFSkR;8OiR961K-&h-CV9DJi|io&%(X~$${zgwXtCWt zL+y%!BMGUCW=|s9dHcpmVfxhwS4pT4{Cs3xoI^fF&Rxo5ejhclr>*a^ioHL^r7=zd z=t$cwM=LMTs5)~NhNEu2m9~kYe?ZAJ4Q^d0d|2h3pBD~s( zCpF1rH&(wOpEep*@!3ql@MT{=S|KkonD#Boy7kOV-JS$NrAp>c{X>xjq9u(#3112m zLX@kXCCkrijr1q;n_DQ-j@5gO8>?FQ>3&$L+|2$e6cmmZ>(vu>EwG3e$%%Sd(4d%C?}2A)a3_3cmb^1O+Yef|E&*f8T{?q9hNvMj3O2;b&# z^r%olYT1J@Xct}=KSzVg`^fYY%_^LE@>e=?p0ifdyOtM_NkfL<@u{ZF$0Dt7cPglX ziW|j%-KJ_@v6AZf(0)~1hPes1d7BE%FLS?ZMHm_6pKvx?L<|Nw2u6edh`=^mS?(3o zmmihcS%9;f!ryWV)Y*mSl0re6e${I?#rfAW*96TpgJR6X;+BuoxTFA6D|nmEk^YF4 z?YpLF>vijBiEXPniDP=7{{VUL_PUp(2DaEp3b%B&f!8n-dhfhfSqJmiZk?P*p`#;& zYL$_;ZC+;tj1XV)P4{2bD`^ErT*=9WA$qVI(`OPd403Z+kgy?SK0I0{)HB~1RZOR+ z1>n0rQq;CUQK5pSmmFEBCShKOhLi{Sh$UKO@Z(I5Q)Bv+Y*w`vZ5;aU9}9b?uVd0z z6&Ty5x0T}gj;FqYzOJjXLn%H#@~+jm8bf>@HLImxVO?oOlz+Is#C{F8v`ity&hCYA z4?thFtQP;Wc5lw0Qul?AOCYP4S5J~P#Il9ks>!kE+VQG`lH>fcd2%2(N6d5$VRQFE zegBY*&GQVR_)yZ`w^&i&2$S6$rF~U7crQ3a2M_7cSpVFXbsQ+-y?z$eT`-AwkV1q; zj(pWt&Dr%u)1sW3zkES~+oPV)TdE-`Q~4h*&l%g=j$;`Ggyc9*mLNIinQpj1=l3Ti z07)iXF%B$j8|mLH)e3D8403V$W_hnodq-r^E0xwOtDIq*@qtcz^H@lR0aNaxsk(*M z)AsfYQ&Pe*m}uslBNl7j_M-8XAVlQFZujv~V{7+~tt z`EHv8?SFGmnMN@MnJ+|gnMf*<+ZJwmI9KdlZn)_UrwI5&A8#X$?CA&A0hl z>YC@AsG{12?FLN5sC?M(Y#@>8v7bPl(Ng#zH|kb%qE2`sI8^TQtJX@r zna%bwgm3B~!LJ6dk*e3@`JjPEbRO_;DBTQ&>JYtrHet~U2Ug-X zD5^QFM6vTt_U!<8FGf#Rr+UeOH5PBaRe(Qb@A%X@wz?k;!L*^j;EbiV=2>Lyez9@s(`^tDkoBpc9{Wr=iPS!}Vn2{FYQDXx7M4!cx1Pwz}+rEhCr}cbN z0&Z`+MB5Xvp6^PqNlh64PF&Lvp!dZ|Pv+WrgAUkH0W%dyw9-|xbEVwy>GvBYG%vr; zgV!bqisTtf2|br8tx390o4xcji~vZN0SE9pmZfDq1i9An8At4`&a^($UF*G;;G(L~ z$2xXOr+evoKAN**o2Gn+#w8DKiARy=v1R8#;i+1KLM-U>HneI&v54)v^Fy4{uKAg% zX>>J8zu-!wEt;r#(U{wpUz?fV97N-n6$uGG2(s%u&M#5nXiH$2+KOLHSD+)TN8@%D zehWF;B)TxR>|P8+Ui)pq{Z}N%j)i&BXS~uK@8s9h%-4&XEcTrG(!d?-aVlQ#@p3HX zyi9UOmjv7t5eQ_S>h!$gbC=7~DdK{5B5L*Fhekt&qrgA?KfyDxtnL-WC;c{X9`Yiw zVOT(Tdk)Hc)%P6p^((&DdG(bA6sO(y3DBSBaAyPY!mI!-Pu=nMthvszd8Mab z5gp11@K~$$d&Xf*5R8ywt}i|~b*%Z5EZS1+;42V1IjvB8&CwsnbX9)m$e2eqC1I05 zOE}2_!Zl31pQBZ^uWr->*gJy)m}yd`7-X?8gZo@&TlLo7WiXSp*5MT*$+AQ*4MH!U zOBUAu4^w9u)m9g+YiMyOw75fY2u^WHffg-L+zG|q-QA^l@fJ!c5`w!FcPQ=_+zA>W z(3|g^d&juHGm^P%@2s`vJKrZ2bRcyMHMeA1J zXB0@p=(on2lng%yRg`O5d-Y(DLXk?!8JmmZHt}N&H14%_ZJ#yGJs9e;Q6`6Lt5^Bk z+xfVyX}Zd{K>u<_WuUkze=~s)la=_2&lYcon;czq^3Y(yV7X79B(Z?|18ir1m*VcW zBC~C((C@2zLLkRj<3@8&r(U%Lo;mIH(squ?dq&pO`Vy(_2*)z9&K{aj(QPLT%w4tY zQlu-vy-eKSE7*xGH|KgAWew*oG%+8NJt7j9P-s?XlN)Y7b>+2n`SQ+M^pfV61 zRMUoz`9yn#wIunT5bYK+GUkrl>NY0@FsOO{xDXxZv5rYQ|6MUdat=a$rZ(}J>1)h- zR!AkU5_%umc%F}ws_=Ybdd37>>F_0pst0tvu@`z!DI(=>wD>Za^?KkAc$O46N%Hvi zf^G{O9U&p^SXQYzN_%4;9~T3tqVY-j!5C;$L^Al;AbUN@?)I zlN+*}U%HC*lOvc9MHsjHeB5h;6u9HnN_Ra%_k_{B#=yu_%TKE(?1J)??hfbs+&q}`&ie&TCqL0h(k?waj#72O-4G3H?8}r zNxE^pb-3g^2D#MD>P-e})TYe4(ilzj4GL=X6-_=fSF#ZjH-+sI{7y5@iedLQ+6*f$ zta_%ubLUSPYbY9IoKb&lEkwn&NV5oXD& z5<9KbWsL;yw>x;j29Xo}gfANg_tljpmXOPFB}#`v!-UPufz@8 zrb_f3qb`Cjg=GLZ%j{Lf_tgo}j8Yj6Ln%mCux0r3R%N+nLdH)=IX8_N*_C&Jek%sm z`P&%6AH-*Gw;Pb}w+aLpkCBdVHLz~E6`)USn<~Lno*^j$kt_&MC@Eei-+Z)c&(YHx zD0Y>KU!G7mTB}s7@%NM{^sC_gDt2-@t38M%j;;yUmixdI10Vc!2a(U|!+ZQt(~RzW zSH_Z({*vPJ{&z+}I!@7@xJ)|1Z8^U0Z<9{fydFP)C;`W$S`YD?|CqoK{UwU0sEYmU zu@^sPrG8|SM8~-q_WsF+h%seJrkuaNx%=Mr$u{~FD{j1@8z}mcN>1E=4_xDRbkas9 zLLZ7aiti>s(rq0Ff1tqL6MnH&spRi)2`vA-=AL%I@$+*npQqbs9?e>327ODSA2m^4 z5N$fjtRAJz(RU$VkH<5s@86tM|p5%FA zvc775Kkx1rQYY&X`Xr_-N~tf~Z`(w6h7HP*Xpk9S=9hKjHELO4Sm z-4jz{)%BvxMCg=qHhZF8I=u*S`J<&;@Uc2YP85T22HGO~yC7Vp=gOwnmNB z^t$S1&bU#z@X|OOO%*tV-{c_L65Z{Z$0Qi#a)DCM1vX-`YY+&d#En@zm1(qExY zsXcrq{lMGMU zglU2n7qgy*gXI39^r{#)bYUDe)tU#Vn>+Xo^aD$H`o>@Iy!O#J!_=6>xt~IvZM}wY zz$wZ~qOyX2ft0@1j?^)ogeD9H(ae z4bORURMIe-UMrl`C{Y!#D_BFr5sx!2o=(Tp_}g#6GNqiL{_*Aiv)d+Psa#%Dv!%}@ z?Iq}=tWZTWEArSDv?akF;f9IFzw)Bk?w==o?~nh=u08C-8l4Pal*PPh7wiZ$E|~kk zpUj6zyQy2Ki;V?wx#RV1)le_@`ffqfQ$YNp`Q|r$OWe^LPKhW4JFF2z8=;%8Ayf(| zRLXID7liIpnoM*|@9{j{{tsnp5@h^5vMnapsD`+Wzo)z>=m%ZCvj^T1{_lq-K_gw4 zSo3hY>vWf!-&#q8Z@ zdJXrS>8ZtWx6R0VIk~L?yKoD(WenGGuIf}#VftaoNYfb&arVh?NA+!rPE}*mPRq3@ z3TN&n-R{i87#miM1HyjL_Ac4x)=ZnX+V{eQCwbw?1~=HOUUD83ZwlMi>JCA_FPPFI zPLlVNsVue=zqj>2M=Eaf|KTFzK$c&WS9Il0?Kz<}VE)FZrR=bx+4Q!n#Ks5BJ z>i2oZM{3l#tcvOuU-eBOT?vK?Z|Gg`_5~!5f9~3P5I!w5%{PQ|bCtOZ`Rj%QIpVE7 zzc0*#3H(Ee#71;6{#$sgeX!S^Wp;lZn-Se&c3zY_w{7c*I zZ3h0hm9OSKvzAUbFuxd2a7|+fAz;mWepfwC?8-FeEnxkoARAF<(6^J8F*4IjxKhbK z9?M9_Bd$!eB6&1zV6|Yg(u|Mmx%PRZQWuqKM9~D8Ycuh=aFBjB<(m?~>2lR=XgsY@ zRm|!~|5lOv0XPlQu(kfFjzL8XnM=(jc%3I3-1!hEhfVB-|DZs_=`#b=+U zGri1d^z|Fnlt^+>^d!leoZoj}3vQ{*IfaRQxrjb&$xRO@BD|#L$`edqHJc)7Vq9l$ zkhq&@>w%5*{iF_gl492vAXIVjw=qOXx!*j-Ej4r(4?bscP369t9%afR5>6Kfz^2>1 z2WLLyR9hR>$KaeECL7%5dZwNTe3G)bxq-Zf!LB^pf$Mr%j59gcm)?*22X>7j*pAcc zdk!cC5!7jL9c7f-lt0U^GlH!Z)fUZ+Gb#F-j)}1Y(9=m|;`D2-|F;RT)hLy!KLT$G zojBe#D*KYEes@DB8hL@|sUFwj{9Aw2u4WWl;Dz*;lzw$=mLZ{H0fj#nc{%!&YYc3$ zSA1wSr*Zk48x*EqNpsR+V~JN0Y$!O=qd_A5SSm!)cy6hhnjNadcAjA!jeOaZsBj#i!nVu=YKMPp`*#VD1*0-ZSZk zVz|d+h&%$d0vhkj`q6l*$gT6h@!U@Ac{u`nw{=0uUxIn={Y3oum7g zx%LB2_qQ_%_rYC_Vf5d$T1$it<&&om8idT*^Hg!k2?~wR&zs08E4p}SwxvvvN> z(L!s=t$Wa10XOI5Q*6nl;xRDz(N+a1k*I_PKcT+snh1Ut1u+xa8o3ql%6yOJ=>mzt+PmA9*?E(KX3m&0(d-W-1 zM7Dx^AMkYqYX}mqs~GVQh3YxunuQ;b7T;6Nd~Br1ZsSd=_oo}}q0sKM=fX)|j+?i) zBv|;_nifD8S2a^6{xne7vr@Nt9PUq&E}V2$`^ic8ON!se%aqG~rtLJ4N%C^V0QvsM z>EVaTe<*{1-EXFGYiO=`s84nV`;cx55`B|j6u`W$vGpj&gkoAHPtH!zPM|E(7)Kv^ zHS+;!sf1L$!*^djYQx*BfHnWO%bSUwy!>^9S_*dMg$@kJtMg)YXef{s0Re zSn~%hSk0cjJ7&wDBCfPjDYm&YvnL&e?$tP0teD$*|HjP50J1>%X1lgeJHyIZHBIL5 z@rQq)lW)y+fbI)E28xsV9v@Rk{T4LQ92;$sTy2&0!IojGQ%MV zoc6-y+$2}6W;M2dD22GNe<=Jxb-MS-NOS4P7O>;sA{XBcGU*;ynjF78{hQ9TezMBq z9}2W$xGi)!tY7+!{r|@E|A)f+Fb>+2)bHdz#)CC2Iw~gYSLPwfL311kvjy4vcuN~( z3U#ayq3uh-(ZzeNvYGc6uKfRk@;l z{bpVd49q0^B8T-#%Sgt3y*y>T)`%*N^%qUxIbvdH&*vw2!fzB|Iv+E58(6r{iq!O} z3-^Qd*N~p@^>`R!!~anFb<;Ns(&_vlH$dda|1+-0A_!Y(aNZ3BZs<3OfG-ER1;ezp zJK2wRo1T&usjpwL0IxYNFIA|ag%5>k!T-}vvfCuUJ@T2E18zCli{k`F&02W=H-w+M z$ORivp2B8AkN=BqA}41{ZjP+y2`;A)Mo#{Jlkq&&hg`2mGYI4PQdEP1xu7VuXO5y| zXjN5sJg%zW)&G3YxG_0br1$r$U>3R;u`~0tdpn-@dx%?d8_FZf<(_(;Y=2Uk6fT>;F*3)cBBbfjfTD2re#d z+2HTXi930F$j}9aBn{2q^&(5kKA9#X86I&Ec2BPZG4C?@HIy9SEF$+gfT!ah%C}%! zv*5gI1u3Q@uPcc|dk5tZzAWT|E8poN@$^>XTwJbp_Wo}Hj#4>GryZ-&Z;~7#7m?kT_ub=~O)@mIDy0>dN`rbQuTw5~Q+rRm zunS~hJcfzs_5c0Cm#320=is61Nw|T*`boU9X$xA}y7V9d2iay^kO5Qc(lYmUD(^oO z~n(1~hq3r;G%(O_d({UDK_+R&%z0SCgGX zv}bOMDel7*BzPSBx?OhjAIfWZrr|#nJi9kW1`32ShZ*8ELzbo#3Wr1e0j$IYpN_T{ zAoKNSRXh$=Kib3OViz(zL?jl5txU)*Z}@nK!qQ5G!1n7r=L@iV+BJNpK2FxT3TC#_ zV8MqV%vml^cZo9s(tfw9V$lb``TQKJW2+GPuU#B@G#fEh1Y5{@Pgvj$B&tEwtgbyj z!ew>H>%)oP%SF)>F`}6Qz?n?|5ZS^=F^MeAiJYVok8%=kC$}Kj`DK;-XR(ECBbA$k z%&xhLo*UX>hXs#EI_)&ebH3z;U#IA8fdsr4y1gra_%CHgWH7dbkf|E-7&@qn@1N_c z1wD8gM&d7?CDGK)wv+T+7KNy*Qgq zbJ;vfM5*skHa*owvr2@mJ7KP4uq5OYK<2GZ-3$0ZgHuD&WxvfsrjOz%=-;e#A*9s% zu<(voOtTJ@JfJ#w{;D9>9%c`9EP}4|Jyc5l;NCHMmP0-dxBl=77*Rj)_10erMJVo* z>WH#!$)n_4??^7R8`GZJ!m3jo>5D{gG6Dt0XX2|0*m?j3NjB0NYpy|eDNq=Z+o*ki zhp7HTDE@A`xP{*npsX|IDb^C`?c{Fcuaf#6alCNq=+FQ}ll+@NVVV%O_6(Ztl7|}c za2`NypR)Due+8L$Qhb~Fr{XHPn(1=kSU#LSzw>6bm%tqSzfaNK`BD`z7&Y7vjfE1o8Z3m# zQVA^1;R`CxKfn<$#C;Hl?nuzb#a?<}0?i+G`o}*vL&wwY<;@LTL{g=kL)yR%-L-3C z+o?a&GQEF^QdE~o1|;pJ7KhR#Lh34RCtp+*^SKtP8@acqsT54BCqPsGA~0dPh(yZC zrw9n}+I?XGY1LCscj9Q<8ZIGwEqF-~*R^9aAN>Ck+2c%Bl3t2Haa89gY685 z93(7*U@!inmfJ{O+5>qFaGg!3)(YTF2ofYjfM-hmS}4l`_H+E7a+syQuzFjbf2h+^}iyq1RfUg;5Y z67x?XM#f*6Y*w7n@TP~!vR=INbs)-7CVCuvntvAU?luaN1W<6Pj*tlcJ-+q_vTDl5 z9ebEwrWAlzzXE2t(v`+TFbIxKz7(0fTS$4k#xT;$fw^YtcOj-*5go@a1Q~71(>l!ECjq{P1q|dZfpgWt(OujQ64zG8IYaM-x$ny~Sk@sR~6&!fw zY%w_?sw8yxrmUw-yw$u!#pUkJ6|vpD#Lo7MRJL2-85WMh5OBIG_2;QXK||4uy2wF5 z+oNm%xF{51`xY45A20{{^gJb@Qbh1z&4W}32%731w2NfTzI6tUAtqz95Nv9@_jLdM zK5lE`H}O|HfxC~B{d)W(4D*rhH(r~l6MP1KnWKisu}+`=kY9##7E6VARKE1JW*&VX zM=OYu7@h%~=yC~f1*W)GNiC~Bf%e2$(ua3n(q-pmu;zC**sC{batgyvL2PCWgE)pi zM6Sza$TwZx0fc9Z+Fyn>`}I>X_~;r1x_5V2--f?D)q=QZ3Z=FJKD>>S%iooK*WO~{ z7Sv#}JObobo}#>e>Kn>*$E?b{;*;gKcB?kR`gmyZfMhW^&Ii&na)gesF z)WBaYR9ysPf8qidMxsGJqg!j#b|kg>Sk2|~pF&&J*81zqj-Fgih+u^4Y@YWYTXY#unO?A7O#LqO+oy{T1=UAoF@zeQ;0pXar zzgzE9VOCzC9r-kiI62=SkjJ3CrW@c3j=#EnSyeDMN8a;0!dyJu=^x6Cj#8b`7=+0b53%BBK3}iR7$=uHCH$fHB_>6g zWuIJ*G|$+3^}~MMFHUt8V&@vZD4SQB5_qFSZ=ytZqsec;{=9F528K?bCxKu~fZm!D zTjmE1Gv?w%x00;y*W-a5oBM!F12PLd|S7#YFYtLB_hRl<>*yl%NE9DkB=@2 z$;=t*pMNM83&Sv;i^JfUhjr3tLC{6rZZZjTy6)}ehLh;biEFYfiM!j1EX86aOX45K z1anwb@c_Eil+^_GTItdY$p~}7NTH%mzizC{A{;4GX}gC{R8M`}4|H#`*`BP58=I`` ze(?J$le;F7i6YAT{LSKIBG?_=)RYPq)zG4R7FDYTU0L(!>>4v&#V>2vBr!iFJI&3o zUrQkKrHNshH9j)yzbbtb9Dkv!TWatgw5h(*NyXG%WZ(`j4o~jADATdG*;zeiQQT() zPS>P*{XL5CHTl_P>93bE6;9|T>p=m|MsI15`tlJUiPxTN`26U=Pso&#x}?j*OTP#C zdFb!-s)A=o8ns#~-##1cDbCg-Y%9RBzQmF-FGyuh&EJT$)4fVNGmNF8~M$&3`|Pu+!^r>lx+zuAoK((>XhL z=5nhEISNOk9M;#^xvz>#QWL3)w{=gc&yNq1t!IndB-v>c$oV?N`2MAJ<0c1FhQ5Kw zgo`NSdZ8V}M#!}FX2;{#_dBfm#b35uQ2sJJx+K3V6$0{DS@X1U6-K6Mhc6QXvp5zO zvTvzgCTthXrRN+N{WJ>G$vI`*aG}D=Um;-D37OA0Fnvnab(rJT2buiG=yhyziHc z86@s*n+`=i9u(vh71zm&_}GuEO@R54e70tMnld=!#M`o2)C}fvQSIe!3z}PmwfX-pMZu@~I~PU0P=rvq zILihOj)Qs{3v{wAg=%QZzx`1-oqiwkRRo3G9i-a*$Oz{b*j0fME`|=AUk5MTWn3<0 zQCW)MNzr!ltI{sx*t(4vyenK%voCnYje;$fH`soGmYnQB*QVz{ZN|ybel9ix1oNHn zz1lD987 zndTFkYW1yo)5X)U46r0J>`--M%axQ6jJ@Z>Dc)~JpH5PBUvxL+Fp2$=m=-+zdQ~4cew51h+2&i|GmZ;}ydQWQ6dqg$jP^+M5?3P|^6U_rKOe65 z|2pQhl^7@Y{u7}QYQh30;=>r&oEB9Bd`eVjGGTlge6e9&GvCZZbK@E3$$lvqoaoij z5s*CjO8Hr8<$$B;)e_0w$E55>lFr;xnxNZLPr2Gg;c}Kv6r*no<0xz^iT8(tC7vc^ z7s4lBple<;y+O2@x993z?$od2b9Yy}m1*@*M9?nnYrA5Jc`wq-H~BdDIyP#jN4TlRIu^~snJ`9&iGsZ=8MRIloC2>alWKGQF^MmbR-JGN&Z0ua2Hsyd!6utx-;S17`CatqjIHRCnQT?~EZ-1YGr8dTrq3Yb zfNl~MZhXW(S8A%`&6FRl4r{c#ge0p$2eur*3yHN)W<1NWp228^puw$<2ReDC5bPEN z+2tl5om=FT7=_R2Xk$XZJ>onT-^M=`3L2aJL;27Mm){)qh{9qLUeFoGd&P*TAQZaO zQ2K4ADjwG>x=r21IaYn|#I3|#p0?*G$EUuM-Ppj8*i0E!Z}S4uO2(MHVcyl!Pmdv_tC$%t+gk&;_RgYM5sS0mha zcsgw{dz~j%K~P|lAn-I&38J4HF&c7C<>SH4j(X)vTlxm+M9d65PLJrFY`-VYB1+2Ij;0pqwd0SJ@I0- zGd^kCx!M9f$3=lm88=eW#SQe6e&?MO#y;Gm0}mF+bG$?1bA%r}>@U%FmQE#WX+qr8gyGUk@O4}B;wG!{zF5Mm9JEpw?RScCt{m0J% zRP`4a9nR2m?Dz3oiTq7U1vQ$4F`$bXrMRXHC8hkS2R9Ap>}}&ZHrX*3ZjMm81Dw$4NC7I@ci6 z8{>zLfdtvH@0!m|z_7;i9`9fx&z~0`6C0vZYKFea=eP&OO0&&QZnal>A#lz9BV5!`&oh3O+o#pyEF@?Lqzh; z%x(W{B$@lvKuPIvaJ2ZOKYr-=Z;yIE*j(e0)nlek?Mh^`OUMsdfd5(V23mDw5LEh8 zapy=Ui?o(xm9aUt$W8LS3%=y>=>|d2U=UrQ>P!QO2!4jeFk2jD!x+lgI2yJ=W}SJ zw^1H+4_RX+UOn<4&_f*p^ljZMkIB4FnH0qY$NH(*m~gyg3V9{Mk2E_d5K z{a9O0rSQPi7Q6yr0$XO5Y46=#qxm2Qcx)2vFfQ3k^hM_ooCldA*bMGZMhosCdv`~v zmXzM!bEJ#@%T!E8L_P*}%o<;mJ}ncH+er_{HG;}hgv zxN+f)d$g#NtsGyN*fb#)&m!el(}aa-T9N)E%o2|x^wvm=w7pI0@Bu*Xy_(X2BU-BM zL^xb}08r2+my{rN%9my=Psr)DpK(pz4Jnd9-S+PyF$@gX%PyUD_{-6jS6_pVdWWpB zJKj^+wsO$b-I6p=d*u^0Xb_#|$Wh0BFyvz?g@g}#$2JFLnun(onoJA325O!j+y)2B z)}d$5{?IsF=J5y|Eb;L@>+*LI8bH~+V`**vFN1*{Qn6gw>rT?AZnWHaVD&MTU%Vnj z8j&c?k76JbI03ki`nj{xxj)wwD6^GG<;&zoBEvT;P_vwuO+M5%RuTfB}2S&!_mg2eh+4nH4P0F5qs zF>Q4El))==-z4`{Cz#j)M_L!LU7SWIz+pQ z1OKMsl$*0eSE^KRuEFa+@((4nU$-S_VulGwe0}F0@;2RiDP-KoY0}lC;X)|5EX8{qFWTen{PT_Qs;y=MS92Vr+e&sZFG@l|EtRtu8X6qb!DKDIor#--R0OO>uGe1*PdboWB5Zv2C(`pD#0iekH6bi7lOZ9~N@k>yUox}PI9Wz0 zi$~iDYuWu^pZn{U`o9-oEHYZRr#SKYvInkf1SwKbY! z9DPbXy`C0tkq0YC|Nc0(e|Tu#tKL_GV4ViVqKu56`sS=V%Rk@T2u{qG1~jk_S~$XA zrTU7Hm))8>|CX4&x-Yu=@|f%MFpC?-l$2XD?#$I7@eqF(TNon6P63mv`-k!cF!@A% zbGvX-OnZ)X^rLJ4gl@W%6~;$*k$w!iee=JX8KB8TlMS zjL-3UvKiz0+@POffSlG&)Qg0w^Nde#B)*Gp4eygfi+avJZtu(I!*QEKoz&H#(bvZC z2{mf({hSHDg#3oGpOS;ib8k%r7MNbfji}F_YO`9*@2eL{6Z@^cvagRsH*_fNYCuXR z`xH`ILl>5tRrtsr^{un3v3f#y1&kLbRtW~W6gIUT1m&N$FMyQpiuJ@9iX!+_#v3j6Dg&AxKf*kVAW!_rM2HNsVB-+9%<~NN zaDLe3d5!?=RmMHBMl(8~Nd;%$7ZP^CKNpT(0{=}t#%pD&`@lZ!Sy}Ja{PjYVvxIyM zMUB%;s<;fQ822>DPpsD*C2UUlo57V}eN_qY_iS9>5_S#g0NgHWwYC9M`9y>%uj0Mu zvfY-OZWC*kC2HHgWtoRb3K-tO#6NXfyq!U*;#Udqh1qX=Ra`7UpBgfDxLk{ZtyhMb zYq#CnvqxWk6+H*Z!ECBEh((!v*O~e8u2^H4_j7`|g(t5O z*I;3N>o++Pg0;z;3IewRBmR;2xAa6cvagYMU<#}sGe1VjV|@Bjlg_wsY!EX_OrN|( z6YaNK;WNapv@D>cXjZU#Uy?2?ap9k zSb0?^_T4hUVTchUW8AEzoSo}$sw}eLUyx;8GAk6mmupkQ)t5FXl9EedKpv=D-rZ zOe9MZ-M8hVxRla1pvt~$QOHI)O_J)c|Cmkh82*x;^dTqgXszl+yQR;0K??-ZvUhaz zRz5{y(IVQ-oSoH(&JN;CJUt|gh^}FLmuh5Gglp;8Mj)ZpW#>vx*r-RH#3!R`Aq*bJ zW)zzKz0r$T^OD4EF}}IvikMffXGh1(Dc?X&6veyZq9uyqx15T~2dIHJOMTqixA^@= z9wCX-DC|@7L24(bm>VCLc+~+MbE4qFgOT%X}}-`iq@03L(so`sVPu($Mz6CTB4R zYaM&Tqe$;I@#pWg0{@+bl}?=dXJSTgG0$N{O2t+$b(|q;GqA@Mfy8d1B^KhmrERsG zLT2-6JI!N}NhdXg{M<$D(+%jNP{=3pS4L&3NUfnt?dWT)J?5LkxV^}B-nGC?y~fcd z<`6sug#FKj?+tBPYWzbax+*oA6QY;8al51i zci;5%$-8_X`f2gPo{Y6kjG<~Y9*_T>Q%z=UdM3Uo@lgGv?>_CPGO8c{P;}QJ(zo$0 z!%P;v-Dt4To6?CMA;&mfz;iWT@S|QVe2bV{WbC?u7#l>MeOYO>bKsCHwQT+9(XuzV?Mr zmRM#uR7lfmg?Oh-s=A4p)iYYXA09Z$Vi#L(ob>y8NYOe!J4Qa{VGdtiLLYu5SO3Ebj}E zZwdV@!l?%>Cd6}|98H^?&SxS|EO8^}-P9t;su}dTeLN{3oV937ck)z_WuJWL9YRC~ zgqOW&T)cynNwF6XZec8L+Z*?O6}z^(x7Gw6Y#)Of;ND7nqn3hgX$V3IhaR=%pO562 zjB!nlK^HX1Baa0cAHK3fl3oR_-H7Nf%1UT*!x!o7zTw~W_lx&z{oN*+S~}ueMI>H! zT@~$Q<-1jO2;*6Gsdl10PrC=ahnZw;l3lEIgpjH|XJ%2YJW=u?q%@vRg8jPqeQpL@ zBy2#DQ zJY}q?<&0VsbO^V~uU+8P34Z%%s=4DNHS=MC>!GR1xIyCTwh+E6RVN%HMHp<>i?amk z0oIe6Sp>h$RMi|MpH%Ary@>@$v;}$IC7%LZFL>l>nQNw}&Jo__CM%=vd}|EL4yi&Z zFI{54Es%FR`9X`S;gClb=E!>QAna7d?I^*F&WRw~7a@aB*6wLW5{$XMoI^IjR+bqh z6i%sgqnTud9XwHLTi1S!r7`^X=I%;e#sf2|;>IT}B{q*0KHw*BDdCI!9hgs+BHM}l z%4sbwHQhQ_V8^5L>d$rVi$B!Dx1LCH>OYi9gES>%Xx7nk16j+>Y{uqPC{yI~gxnHE zh~{%$O(nrf!hZiVSvT%;sO#|QMi7??p$OX5 zFeG`Td6c?t)yZBa!I&fC_mq=L8C%-`H^-W1T9-#4t#EWT1kh`0H*4oos_`q+wqw_O zx9FZ$o;14zZhtE=+t8M)Kzn6SppI872j+Mx@~lVKOGv;PCVyDSdLA?Y-j)u;8)h0+ z<(s>^5fpE!2TKlgh9pB-Pqr%&>=*mJ5GuQ?cYcBSKeJwUQ`bNbb=|{8O6e6&PyFWo zp+rlGKcz{LAssFUBRrL3xU0UrZW~Ke5_;K&LQKBbL9w?KDN!oe%}L^`)2CGqof8$g z9GOb$+ETNeh3erk!4|32=50AVigBwv{lO*$oz??DI#10CgeA;zQ9MJC{k!bqPu5Y; zlzsd~S7N*F=6&Jwn>G8-Q_xnM=eegsmy35X5!>Plt6D?}``|#sM^)scc>aK^$-$9g z|7L!8=1r;ZZpJrd;;C$DoIj4kqE(t(+{upu8$Lo5&#XrREB2iM-P5ARWFu(4)~Mue z0_$C}Bk(j-Zi-_4FXdXlz$)FcIl57a&1(mFK?Za*iNi2uu`-qU2$pjxYXCsmn9eKv z04Y67PLg2XEW``=0C(Q*BHB1G$a~AZ-#okLP1uYf>{v-6aP{^xFGVoXvTcD7ETOO_ ze?NZ1uq)7$=_~h7CnjOjSK16H%5A|9bxn}9?Gon$o=0Ig)Q7n$QwEC{`j&Wfbgl0($Vr$7 zr27Lv+dQ;BxdUWs7!_4^t)02fu7E}Pa%0KSK`Lqh%?6uWh!Z`|6DN!X@+c1H=~)22 zY{=KxR;&qu?#<~>`=TZ8rB0uS&s_~ulWdm+WW&tmpRzgIDI}1(l^d$~L=WZHal#p^ zU#`gL=eL3 zHbGJIVkJ%S2v)LKrj8s@dSBDV++i&v5?UW7i>v;i&;VL^B2w%^3Kuqpbs?20bkUV3 zxcwa6j?P4D~WKqLc7viB&_#UN6eRJJPPLR3?yOGbXi8<46;2CQ($Np{0 z`^DYlF|$dO34Aybd6(&gscswklKHK41+*gY6_`N3UuVfyqM>2VzFd~J2Pv5POF;7( zK_>reA!5|^&bX(#hD4BJ^M#$Q_Z?|Gb3{oWVZDD0d%`KBU>|(pHWF^UO@O!I6a4W4 zkJ6;1j9iCm%iby(ufR=Q2);0e79!19U|;mqr0}Q~hCI!^zi&ZkbesqR_Lo#Rn$e@0 zn(hJz#~+&?*UxfJt6FJ|?J`GTH|)@khNrBKbKCP#70;{Fva*Xijag6N;?*D-Wny2G z+x>D}EQ>oM`fI~Y%UF?N!TZNM-fjBALy#YW2M1`bMxB=SoKhBi5bt+Cx|!UQC=Ok3 z_-#vB(4K?mB{V9gbdbD9YStFyEj?(`aPCs|C34g<&kV+snFnZF5_a|iSmYj0(VF;2 zKoXKbGI8y@iiU=sf%?ZA_?={MAk_I!ptt(PG7|3A<5|R)X?n*ml43quE>Hf}Yn)aF z(bnES;HuYEGnU#k5QCO9_{@BlN2qo|`Jjf(&ejzpMD33!F%VjDJ%KVCCo>{Vjlo}e zP5->x^@fNmVxklptiUB7#Sgk(A$%l2w%b@YqiN`Ujt(__kqLjIf>U)RtR?wC=y|U5 z`kY;YJ%gl2GPO>ru4Ut7Y|H9xaZ18)&l$r)yM1;^6O-|@Zcd8kSBquIKTMsHsZPZ&b{i#7T{ znicH>6ic1ODxEEk)srelZ&peG;dw3e;{wGXKxs~j0qzf#IiM`LK4+AxUTOPKMf}cj zPkBgO4ry*~?0>5~g;1$n_X#(IXOoOSA;u;9z<4j+TZXq!$zT4(NB7+H*_@ zb4sF%ii&>doE67uIIq?zatZn!eTm!38~);|#rC)TXtpgaoogu9mdEeTIhgV4>lM^&Q-nvTi_}KC(3yBCsa?`JxkmAyeJ0sT|3jr+~tJLzU zmhF={m4cjRp6)_{UUJSUu?!c!@VecAGa7OBAe(Ct2Eqo;2#fAp=IMoPn!kNBN|Lp5 z0%mG#h{GySDgUurw8i@ZqQS8r3$&pN|A&Gepi&tCoIx-PmNoiBZro~vD}}D!RsRg9 z7bAUZvFuG?GUvQGiM%_!IPNas1(Eq^_Nzcv^?I4)NZydB)xY==D`$p}`f|Rell6JL z5MS_|{D>9+(@of!c52A}1jW{2`b?tD8@WnIi~@ks2E8tD=cL{@JSvz}c)mS@2%eAH zuVfj61J$NmM8ZVf`sCg{bxA$%ws6^l91)YmFx`RkrAeKOv(i$VE_{uF=e>D7a8+lI0v=afW-ROQH z3Qy|F@A`*A4T+cV{b5@`4$G|BdLq1fl@VH=;EX>;B8ynDPLf^n}acyb700g z-L{tcl|6LN``tucw_&HHT~&QdRz@b@{a%7)&HfGCdixIAUbULp=M`_}lLfLp-RzPy zs@hS8eUS+@LHW(QezkZLs@mE1Hy5`Qf}m4?ZvjA!b7H=#+(7Y%N%kVX7UJhs!cF1k zLN}Ha$C`ch@xu|^XaNXJKE5@FCvj?wzH6m8D>;BbKmJkcN4HTWR)0~!PNh{!ng zI5b&sNfI&mOewop=k_C3R0P^t{x{ef6yE9`Q{N#s3Dxp$BGqlYl6Jn0*STVf`)Kk0 zCDGMuOzNxt0CgB+cQu@7kTNU$#90nZlMqX3EcYEdzNiUR8NZa8$XK{%BbruxF2Hu{ zRA#VuIij)gp(K5JjBAz2b&y{`fDi)(CDr~G@0hcc^H;_v2-S^)*spuDbVsJSg~9Tf z{j&R8!^E7y3Gx%+#`3+Yv$GtpVEp7~)CY9f2J%0+7ZO9uQZv_Xyp4sB^hEy}Chnbz z#zXGq2+s&TQrJ!cfHn(zHc)*8KEi}zw|Ge8p;!&SwYf(W@uJo~Ig^XT%8DtX}a z_gaf8W_AbHXd|$wo#-y$LL}6Oq5SZ%C}JCN>$uXqs;H~k`gg`Rk8y8#PP3Pn{pw=e z^L11|zyXvc(#Li?E@Ix%znAaVM2W8MfLlwXQS{n(*jY&~J=Tb6!e}H+%`jK4ZGr2< z1j5Xdh*Hj;bVCE9Uf+Aunh(T@;@i|+MjK08Y@YqQ#6HKp(I5_oqTeesK3QXH?Ji03 z^5r5T5RXVsEAva$-ofND9KsooTa7^d}F;ev?@OWv@CIpEfD0p@z1q1+@m=27!+}nzY|IFswdfAV@w5P zL^v28fTce z-s6@*oN1bKb@&yvLP=FHltk!wBj5#mBm#}n!+Yg!m~tG>J{~`$Hbf zTi@m?NfO32`b;n_((p=sz}@`z?cf8C%i^0!|4$6fFGW*HTm1QGrpm#Xzbq+&kGa~618rj4O^h|s@&k%W7Uh(gv zi}j&kZ4*EVi5~U~!$jjG3N2hv{HEF*7J&@C(dWMs7JJi#krjE#TgB7)7jx8e!n1{V zf_&S%H=raUSO^v7q++DhHT_f4fRat`yhH+JAsx>wkdduDs$9YyS$BChJ#)#~-ciJ9}lAY(JdN#~lR+wJGvcK#KB2Ks+v5izo$Qvz4N28twmg3_+ z(jfIN>ZHAzUc}*AtO>`#{8e$Sl*F;xW;)cGuMOTxITL+;2;T_+eg0UT<$V0@lCeaB z*nJz?IVVfyr#1C)oO?;!%z+nzl<~X9aExBY&|*9(_Tf~sa&?Uucjps8c+L(FE&W{M z-SX@t@b822UCij(K|pIg2iZu7&yu06-_*WaX&&t>k@YpKxjOT1;ZTozefEn#y!v@4 zBIWx1@_(tD>8EA=HV#8+;VkEl8tTp7*u_T;0V{h#YyAvapPCR=0Eni`=FqO4l?eZ;$X$gyCxGSKstU=?nk6#2F*zdxzgK5$UVY-c)!0d^|NJmVP<=xg6_*N%nm^g ztdJ894a%<6#pH+#Nu=l|iO6PVv}H3_)EAHHhQC+Ym3i8Adv-}^$_;`Pr{uXEe=%&p zH=(>pC(#u{vCbKjpGewz$V8VoDHx*?`nTRwt^sR#Nd~l=g3y{n?sMFehBg@Zp|0_1 z7EtLih}UXUqLBoGjEVy{oZE`Du(v<4oUczIq1GP*g^Q8>SJW7?BLP*?Y(_OsY8uDq z=Bs(}Ztm9&&C1<&G`tb44O8T3la^h(P#a!tS8$)C}lrn_r3TQs|IIzsuV z&f3wb;3zecgCQkCAG$`nWvSH@=Qt<};mVZytLc?$U=@qdJaT|Sp7G?|(?|Bf)bQ-E zN#Y;iONS4??ZwmT*a=b~_{<+!3VziptAvU*WGb|nbS@J;L_XNj`nMLpviyCV{H!=% zZog|e-g>(dL7j~h?-|ZHhJX(Myo_FfGn*R{!9xkaLZcSN0+4}MjV#Oe zBYU|xnl)5^*oUWOgh6h4n4@KFAIkGurJ($_aM=p)`K-IGLnKjpyH{3LfBwVdr4f`> zD@*1Wrem9i9_Xe}xN z0Xf!biHid5)$Ok)r(s=aZ$r zO4BQ3hYxaSSDf_|i#)%V!Q};qnXhMeyt7LDi!Cp96sbj#Pfr|8Dy2MKt<)BHN=%Mw zgPE*oFqbtnJqoZlEM z@(Fr1Di97O;0rakTCpr^?s;L@gy0`KAGkN^=xw7T6^fmJ$`Vc_{hBsAjR~g9gY4BG z((|GtB)&Jm~XlUJEvY!1*NOOxrFrzWk>FyQrwVP$BG$_GK!%e02)Mu&X34K;Is23 zICjzf#QR{&lV?n!GV}?SMjm5tN`)=Wrooa^ox|5%ai7!D)`nmV1111cxRo=R&F&9L#p1( z-n#86AErJlMi&c+P7+mmuRrr3_dCMTq)Z=jA1$5XY1U&*`+hAnQatpuO-i6t7EVj? z=xk!0W?koU@}W6?g`yX%{oAjVow@u8m(`h7GA@{Vj~_=|zmD$6=8Lw5z3mu>)+7Ky za)hb~fP2exIx4bNc`;-UOIz_j7%1ajZ@iC-S)sW8Hl>M3@leEj_96U=2X`Dc$q{{D zE%H(%%mi0jneWKAG5D-o7}-0mW&WgY!qKwA3Wckry!CLQbHw0?xZoWhLn-aevMC?b zJ6?+encFk(muh+{kx}(;UTB5!DfTQ4r5rrk`*X~9naP}UVIue_)i!~0JnPTv`beF| zA+`=BQMsDDh-!&f6FnO1X*H^A>zmw@`QySC&d6Bz~$nhO>Tp9grMTZkoE;jbfriG z7rKT5xRg@zdyRApK1%`)IX7yKJ0hgPqh%j$e=+METaXLq;;K3<(-x{32y#4_p1)rJ zQ+sI}xTj2w(GE6fmJMcWK^H(tZpxKwsfQ~!$5D{Qz&%4=oi((89z5aPa}dooIClx) zp{zmrsi{@b@}$-`_)o4?Cd|2|=3!w7>eJ}3zAWpHl`};x!KrP&j&Iy-E@PaKleMsN z$Ml3@4PraGoN#x$WK9$Rhu;$5IqV=r2k4T<%4OWt`9zq_Su1J|fxHXfJ694;>oCyl zx3&!Zm@qJZKQPlfedx&ys=ST!_H!e60#Y6tzU#z0aE__1K$FYKA886Ed%@@wxUl^3 ztbK_LQpD@kacR0o6LkZI=czrb4*RJaR@3~U?r^?~giRS=`u7uh{;jlVn7Z*GBr@jx zKoz^s@PVMa1o&QP8kNGYcl&pw zv##2f?rq+smex@uduo-G_7J_ZeXa}U>WZUI^4Th`Y?~w9<>O)&&TBSHH{pU;Q5VDj zL#@MgY(Y%>Co27Co-{R$OX5TUrnEHbEsUbNxLiBPt!UHQ3wQ*=D~%mi7+9^*3QONA z8LgOpV%u9{3C+T8va5FfI|>VovH6*y(m>Car^*`TtKZ~n$pS+;s~3nvd7U@F2ot^h z+OGrZw)yp7R=*I!kmpR}=)+KJ+gl$u0I=LcLr}<1bm#SL_U*mVxBdh`%QbaV?Wk~if6XXV~f_V&Uooh7(sK3 z@Nyj5-8f^zB2rdZ(C^pLPNa*pfco(5*@kxsRPEte0?VmPt|B!FBQn>^3${K%7c|>B zaTo3i3&D>_UZJI4k@Gx};?2eZJLCIQ;=-^+b|tYRluHpjSd+o!LOAm?a7#DI+1F$F zXAUlo@2uo24zM>)*$x*%o@9owX46FQveQ5r=f=L9ni+e8;tum71A*B+eINzrEMk>PvAO?1+24S9Kj&lW+JXG@VEG4>38cVgCF?nhL#Gy8l>oAx*ExN*PI zu?%AL3;QD^bD;G8&`{aq_-^>pt?(rquP=`!Y`ZCV$fXHOSN~y>6*qZ=$QFPRgu7wF zYZ!6iF^)XDi@Me`4@{AoR^P=hDQzfEJ>;#5+%K0G7-7Tr0nOu-K4~a64=}V)uVmT( z))g}JCQaD1fA5jm<6p}YbK{R|eRbFra{IoY!*L|HA@<^jir>oyB9)#9h@ zcG=!vV76S9!Ew8equga@W9zYnhaf1Xdm)w(|2>}+&aA5@7?gkLFaO&a$3PmTE55qK z-&7rVS+o)YfD_yrI3d4qa9wx)cvQ84Iv-D^yUeu)uMHNaWGs7Ji!y{F6*)+A-Twmw zw%b0DKQql)TuKMKerTLU!bGJDebj`)AzCW(_$s>iV`TlB!jyYlsUTgZzRj|^Y%4js zUJRh(;W_8cdd*BvGrv8NGLdHe7eXiaHZoCpqpuwO%0C#kjTaT6qI{PGN7a~vM}vXe zXDqw={;};cf~MO{-~k4#5LbZ_yHDIb=2E;Wj)QIiS?*P;oeR|Ys-cGBA^KnOB}FuF z(yu+UwKN9iO3||l7V^J~zvI5EO6w&>zT}DH^0;Ane>iQp%16}5Oc|kk38$5I#anqL z5L9a?a^Gwt!?A8MV5zZ{mXe)0Z5%!)5~SK}ti}Bv`^z%Rr1S43=BfTh9Fn#2CM3X* zFm#SxdYR^j7>1X40wQHtvF)=kT$kj&CN3l>zWXrwdaaq1HXTLOY zw;6RR#i#FM9!ggT^*)Pa6Qo$(wH0bjR-4vC@XTlKO}xf_Nm#Rv-}jIRh@+4FO^eMy zAY_>1_uUd^vft<5VBeeR{}$X7WxZ{WG1N)0f^c<0q-nf5wSZv6hkos4x)JxuBj4aM zv|6}pOLxisyh_PHJClYaMOqU0m$ncev0xqNi#i|OKE+7m?_O*K8Kq@hV!_$esi6lFWu6~f?ov> z{uuOj)7%9vS6tK_c51Q8@jL-7U#1L6Jg&}rdDhrws|2yk?g-y!Me%B{E{_`U_+6yF zeZ8CZ)7$FI4CAO+$I|p5-bOi`4eS!sKO8G=J$G4qZf%%{Qua@>pL8ZuMvT>O$migy&w8+eNH5$imjBVM|-MS4k&V0~X_aPeM%-49s5V$@FN>$rPO^3k^|_ zPhfZGYmv`brTCV7y6pa5N|cf1pGo9D)#ZMy&mI?P5yIF9WaynFx~p2+{gm=B=8p^o zn!h})v}HlpR1af_iJ(0|mYN@-j{)q>X6{>SC`fu~KdB1Y>_SgfUMN^al3^kNH)%=)!@Yb~I=s`LmwxV+NdV*RgL0Tcdb}(h?&u#3he4PU?;T?J2qtkHg=a@o4M^8*6;t}u;^T2hEXbe$KY+u z88Cfmg_tjVH?%KMY9(O=ch?`cQ{-73R^c~NHkryVF1#~Sag{(O@rA0sLIlc7@}oL? zv=)ORRU&lQN;L%4srU(+SeMqmx|R1lzuLjI%IO6=S)kD_{61f?tLm8B^0vUGpzMO4+<4HogWgFkFADI&yn37d9JOBhTsW1i~gRtUEvT zZ7!#CeerNTX5(veS>kF>D6r6=BF0^%#{1#xJl_G@s6NHpoFMA$x~TMHIvmoxc=mXr znm}n`Ez2GwPH19`T*+tF_P`%Rxi*aR$2NuM{E{lQ3gQpBS(yq0<@;(8w0F#rPML>_ z{En(TCtMsfb+dQzTlom{$HEGZ_|#>c=2V;NuGN&c^wUg9zYdwFhmsaqp7NE=?f`>2 z4jV2X2P)gwoqH0w(|-^kc32I48W?1^5p4q?q;WlcNO%}Fo2`i_ku90TQxP9{qAjem(CDTCvyar0cm0?f9n?b_$6(GSvuDHj-Tz`(SYcsijJWX7RIv?MLL zb~@D*ovNuw+eb>9KX!%rvyTg#DZzKb$TO;vD7BwkOg#9U93S5p#4y+gL~;uqmDEK} zuYDjg8|OYs_#G5YkB0V&kh$e6B_z=26`Mzibj9?QJZIuh$cv-lC_$>gF@8pKi*M2H zeOs<^a9#x+3zeV#v&Mqi!n1ez>iBXrh+VfFBjk%o)FbNkh9v)%MQe_H+Gdek{EOZW zTv{fo2;E=Ml!1S)P*ry|cW#joHI@(uv?NQW;5V+r+4RcMF^hu0ATOnt<7a99+^WF> zbyMcp;^%ph$RmSYgDPG}TW$OKKUbH^{P@~`P{ZR?f@1&d)gE^Dvj-N<$`*yv`l9IT zl{c0+peybED&0LbNav`q&l1YE`{J**KQ-M7zYW(Y6CEhE<90Ri-SkE7Aj=*45Ru0X zc&P`UF1r#yiZBg(J4Mh=b(rp@*SB#Gl~!|i{Mz}FUaTL&xEN#gjj(7RB35J9b*inu zPBld2e1)oiR94He*&3lEAjkYwz1FdS{h(tzn13sD1eWE(3>`s2;}Oq%xKT?Kk-NC$ z{DSAcCNkZsFEH3dXc6REh_6u2E#7$YeR!W3#Dey>vhxlZIY-LzmTP-=(HfZY?XC^H z=S%7ymmgIemPOVzX6>mfd0#0zvzNrTjbHBALjK~YzG2qvNQ~^w)(vphzf0eIKfLa- zsTHpQ+QYxU;aWDREhcL8)KX}zqOiePC=D|3D!^84Hej*F4hF|x-*(d`I#&}HEf#HC^=2#x~J}PUYt&p*XouB zo;8L?_5=`6T}Ufsm!X6dp#9a8`u6i7`IFfBfW)4nwL@A|t-N*^BOV$$)y%5?=AIZ~ zzU@kJ8T{T!A7{EKUry86epN}CILRKn-YZQ>Qr7-h0Y+nMZ51p=!*PLB{%t*Sk8I&{dl`iHI0Y_=du&V8nvYr zo+1N9QpLbXamjFnjt1h2RFJ*9BjHQ-A9;+zR-eJr{){w>>{Dn%bWJSI7ZP6ltrl;Y z%Bs_EngtV_5`khG%8utcYI?}gx}`?&IO)m~q7bX^TZtkmrc zW2?C#-N92r0`>A=%So}m%G+w}KfK(FLb8bq`iXs~h3LD72u_vq>vj0PUXnqYB8Hwd z`2YR`l*IgxFlgGHj-JQo!?d8g!B>q->JG5bee|ujybM&^Z1$|TEczc{%%b`)+0sxS ze`wG~>F-(Y(8SYlROeV^owIEZkrQjrMFWC4l`Vdte(?jxs!4bq7Wra_#2+f`!vP!Z z!gkd(d@{Nbg8l1CA}u5>3%(Urj_FF?h}lxHTU=NSJ-1HotScM(i5Y+UM2EO+G~k2r zk4N>$Y4&6nhI+UJ^aSJQBEI)#Z!&rP?Y%(FF(M8VtAEROta4oAOQG(ye%=xp<_0f=V<;WY+vbH7Mqcq(U@Rt7PH`W zLj#xtC-8xtEhT8c&}&8b30F~#B;_D?ci1j0a1{{zD@#2Of4E%{GNW#)xw zeyc6V(>xYe|3oW9WV#L$8j`_bY))f8c3`tsZ8h(ew7xvL2~{jG^>gCI=d!e17GG|A zYv1k(T`%-&&M1o8Ypo|fnhmZ`crbl3{zd`IwW)HFO0d3trGJp>tbD1Zxawp^iD0Ch zu~ordG1`7u7}dCkp*!5HCkiX$5rMLN21W`@w185Q?y`FO4Y5(ZuMC%XLASIG%9v}i zU_oh0@WKER3Jr8-5J=CB6y^9rQJj2S231^Rfr<1uTeS1pzU-iAcB8C|CI z<~!HCfpmBY3b5RTrkN5ir+$o*&eS7lLb?k49sLexfW8HSKXj}~5|L?ffpe?sVehPv zfijaGdnNtG3j|$xNWJ~58$qh2N{6wll!C);H%g@;fWMLDQex_=xy^e+(U5UGQLp}X zD(Gd-0Og(&T02`g*v`$pesEUZ?DI5hhONsD^!%o8|5eM>(q_LX#{qO7Q`h{sAYUYC zE^Zc+A1|$ToH3X`BjJVh%|t-HbTvnWY0QBQT5npHh(}E)HSWMR38ub{gCwxS_+#l7)hxwnF| z)OrT(^(#Kq6*a`p%NK(5cU~Z8`;ES3R;F!{AUq@_K62@0()0?Tx2;t|S?9iUN8@cA z3H^=xr47b$t3qoqk*{Aix?bMbbBq*8GRF+6QNfmvW_$oueH!Rley}+??`mvbn9poN z{^0LvCC#r#(mEl`z0vZd8l8s{f#9^v^p%6uk_x7(`NKSBtuLd0c!$edlM~rlpt$ZE zCzR*dXEq-Mcv{|B=f#^PL?8n&B{FK<&6rGqpGbb0*E&NQtfpIAvP^Ej=h-^Dl^L5z zou9iss;(%1fl{EI@G>fsdrjH(YCDu2sax*xm0#j34o9piWV?OD@iQqq{*Ar3cSae@ zQAZ->PAXgaMKq$38>&3+z>zg~>ORczD+Z_yLjSX)>7uGz!H7e$Us_o@dax}wGh3IE zC}iO-J*pwbwM>)M044E+sO2Uwn#U|7@!r zbGtJNOr2z1DTTU{|G*N^HxwQHRQe*Ea{f6zp#W)vrkWU(_-sQ31LDQqZs+#4vA2&l zBH|KjkSuJq79VNI0ZTDt$q0A$aX))L2`*WCT)-5M#`(MZDpD6fP!v)Jp^T}wwP;qt#fHXA2d8UCPdM7qHo$nVi5`+f+Ba2XVwZy*F z>J74x)xHpKfkM!!(uywi4@ShFM9{LJ$a<0Om8UteW7U|{zMcst9#?8b6)iVMDY2~# zOdC14k`I}jWg!!@Oy1id`w1EUY-VT0?g>^IC6w;MyTYyb?GADiT!QjzozYq>Bt062WwL$=`97R42g9k`qjw*=fy!j*old+$%UEMO_g`gADJaF7T`YL*1c4Gz-I!k+3 zcnmWfeH2&BF71>4FpL878$a?y{3TqByC9EWXAx^eMZe1teTB&dQzmpgWz;J(=ksn? zyDyx2U|~M58dH27O_hb;2zif=x0S|bx^fdFsXYFM4)Z~Tz~g>w{`N<&fH}@wOlZUS z{Reyj;R_8{%bQm$gPyx!s0kH%9|06UvaNW)WON?RwW^WO{ANlDd^N6bP zh5A@recG1uWVOnV%iu8W(b=}EF?Agc#QsG^=I9OOyOKtP)Z>60q4ImYS1eyV;Iw3H zlP$P}&S^1`%Lijsnk>X;VA~L$!rQ#7EL#s}Tvklw8n?xMJt@b?6N;02Hi*?{)MPJ2 zqv-&@tU?aI_Yu~fG^U!y#4(ziuT7LM)?FYgeqBbvBdBZj{> z$^nKrT!q(CQ9@Z?#kB2`2V+h!xsKOxUeX$B9N*rDyCT(JWt%NwQ1Pk7V~BQF2aH{< zC{Y7b@bwM-!j6_29Rqu$R0h<6ZqJZ5#)43N{sl0C0@md}z#E(^efB)|uC+-?lf%2- zd968%Rbs~i)7Un37LgPe+aPRDcxA;8WHrOO8J|CTL)U`ar*ISmbeEQbV^qH8YSz-; z1Z)S9OakSO1h9v_{pq^`lH6QJhPY!cSC7anCyTQB6W%Opk}&rm0co+e9S{j_t3`0r z#f49}ET_%2v8!5+4$`(v4%SHj%xEbR%TWUxv=)?Y@F`d1SeF>$3N;&>n`ZGKO?=3W zLi{uN&Gg^c;|U_V&qS*+DyN3MQ-@Ko!yYQOJH?Mh6Y~H8{D><+Hxz$js9`O`YI}W8|-u zlGQ;x)3cx3H%X*$;}#lF<$9-xbV78o`kl(QyJq0HF!tg_LMg^F?g8D zq>)OoA}e^!e|2n1e7MWM}KX3IKPy?^P2TRE?a}Gce4_`@SjQL+)ezk2$eP& zzsxE0mDXEuy)d|1*SyGwU`OPG$t&#EdGh`G7*Yk7*74P`} zn|O6 zIWp1+-=fk^b1CF6ZME6{E2Lepu1;tI%^Nmq{Aqyw!C|v~bgc*zilW=_293Bra_ifd ztHG0eBK(8@0PV`NdP%a9xGR%Jza!}YI|N*ZT_~;-U<|V%xTz z5LlZqOdcq<)!zS!A7p#6m}75m%TM{$5i%$LALT@4e?fg@nIw+`Z{Jj zC=qnclRS}0XHPYJwcAf^Xm=X+sm1d8F@z&V!C0X1fj-M|LPslbvT&C5ge_}U0N0Kn zi_>Z-rZ0M7HJABU@8R4etUn4OH7Ft`Yb!JxX5=7xvK@CQFL=!u^`~?hLfIf!HLcJ` z^3LN_aiXIyK4nmD*r|2-zU(aF@`B~Az95a7663ci-j+T+58&k2h zDz)Wjx){lnpP|_=7v#PM!I9KT*O#f9mT-QbEWi;|kBm=UZ|Yw+5bWRlE991@Qp_QB zH)RTy+mj@g;oATjul1X+KX?)6ONDpULT;G{7(sGq2OO~iuGJmw^eWFR0cs(H-^*d& zZJu8i4SxxUR(C#=c0V)xxEai5dHpeWxuSic+H_53*~>h~@b$Uk0R(dSP%MG^$#vGk zv{6^UyEZc9p94>xEZjW9T3gD3F*BCVWSeDyTq+1MN_{Dmf@DC=>c)zlFQ0IQIKI{8 zg@4vi72>VRY^-qPZ7AdS*4L+0Z|pQOV;I~1B{X64!JE%^jkur;JCuaOz^cE+IzgVB zb>*(|gY2sM+k||Yb==?s<}h19jg$-lCcdiQO_&V08sFcXP8ZFd<*dpOP}obhW_CJAFQ56J%(`r({%Eli9SLTK<0Q9N_Na~A0&_(aNp#gH>D8C$k|Rus z$X)nxrOzZk7{P!zj&Pg1`K|?;cBwT>pCeEB45s79~~AGrYR8RczCL9bJdfMl>{bb_oxb| zvBjW!_9BXZeQ1>V_=n5|V|Ii(mWE9j+u?X7r?>p$0!lMApZX4a7UF5btom6Q)2w3P zZWu60jjEUA=TPYJJMl8d88SMAN~001XyQ-2!Z$-%UjWa2>lxx2bE)ize|p~>%d{am zmXXmrX5AxCw_9$PQ<36|;p{sm+e#tF!M|q0Tgy*$Z+{}@xD#I4$MVn%enrw%Kg&!# z?=HBb$Jie%+UR$eL1gP5sww@=KyDda7W_23l&@q)Mf~P**6rYz%sYUck_{cGT->IT zj(5aWhoi8UnLDwCQXc>#H=VDv12)kRJv#Vjt3Yen;T$dl3bG1f3_j)ElqJ=reV1#_ za)O;RnR7I-#jR9bv5u&Kd~3mi@3}@cmQj`x&G+408k%PW;>(+}8>z;z46fh4qkMlS z6jtft-!DEXk|6?npE*X$99<4Z9CxcmUgyetNfQ|h?Be$IcwtcG)#OcPL0`u$HMVy2 zrtd*bquLfuakxa}^|ygHWMRr%+<%wgv`%YA13y%u9=lY(JFbN`Opl=fS)OkgMefw} zwJ%nnz|cJh6y|sQSK#8tSbb^L&mf~H1=HcEbGuvQN01rsK2O6+{y)HvteGAar4a(C zWv@Nf(Y&=c*wq9JzL#EMz7*MbcA|UaaD=thJd-NJS!gA!KLf2AFAg>Pf8z3>uf!|c zbV8ax+ex82cZy8RX6(hiBE)Q`s@Uw0gD;W{{obC&`hJlA2N*2Vfbnfn3n=ae=HBH1 zA0ISs_`~PEb#{~u_v}>f(Hf9j`~ysoJ-_{QbwoBJY~vraMj>&nznIeje=Cnn7s|8L z45OiSEm2pL&rh-kjLs-3$Qu{MHr>O>d|6(>@fJs2sGd zHMYe_VM+f0F|!FiWS2x}QRTRg?Qqzh^}R_^8^YKcD4_CrIdo&o%lL;8ou%l2)rCq}in=lqkMIgc9v69gBb z2s4yf;uPVY$td=jO4E`r%PDpol$5_jW(g#v;V2^~`J(M`X%_*BcKvaV+2N!cnuo%W-_DE-_YK%En%w{`2y}d|SRf z`uH$wo1fui^G$b**bVNOd_UvnwE=Mb0eGS|^5=}M{`Go41Lju%hU@W0O3!H*EA zQT>M3x90xGOOEUIxq{!C)tz{&Unm?<#Yd%-&?|r_NH|s6s z8vQp}-(ZSccmzD3!6yd)wHN(1gK#ViFROk}nsZ~8bajK%_5TgLMZV)Es(XUu?7)Cq zZ*7bAAph&-nY2x%b`{pv+M%auxWW6ocsaMX0x{r2e!+!rmD4yOd?XRnzx^mIE)8+~ zFL1~fVTK~@4}%LE_YrpMW<1>A;2}G>w}`4DS$_C20tECASN;KJWc^`tz7m$jONSdj z#Muz4~<;k1YIS|PK#_DGCWS5^fy8hcAB0Q!38`V$U4duwd9YgD( zP9FFdH&}*s!7-qlpm>(U6Maq9?!QC-_(!HN1u|MZU!t#{;4}={Rbh)Xgj=cpKPzPa zSy|WsMQO3&d3hqfX2?Tc#eZW4x3)B* z7YjkNLj}tyf_?SF!WK?K`nAeJb)bUwNa|RX;CDV*7!8)E@6_5x-myCIM8Ar+3qr!L z8GDJF%O``;pu2l8Cyn$g}>y5Ng|D z|8^x=(!s)u!E&dJdT9kZ^r;8nT~MIMp*~Ij69Ff@;G}SF(r?NWRR%`pQ5FLEHOUX^ zTODGZv1S<{GpD0}!0~JjrJOt4 zXWLQ!2^I@P=EWcMCp>!Q+0>T+Nw{1Twxh1Ro0Pe&(yM->%+S~R zF=cM~2_g|u2W^*((<)R>sZU9O=s?$w(q!#T_;b@66Oj)MHaE{dEFb=fu#=nMD4CT0 zpv&);4KAjT02BTgLW=dgrEZv<6%d_p-nzU>4Zpq0A&vbkIM)y##TMgJif>EiLOb|Y z)BErB2M<#VDzmI@fLbAR$jwWQY5Nmeqtp97+ac2QYR>z_#^sM|{RzQsl4IZ4t}dj` zihf(A_arX$t-O68XtSJeL`C@d-7MM{Off5{Cdh7!MAi}O;N`4AR4Lc|)MHj?;QG$_ z{tW>EX>-YM*>ME9|=qE2{w{WqkH06*u9l1*?dEVzj*G z))i6Q>~=0}K3io{B}7%DQ}>EJiymt-{>oTzo)F0)K2!QdpR*`*6jJ|p0Q|LTP_#Sy zgm=HWJH{KG} zCG@^k?Sb^3zRz~Ze>GB#{|z_d9;q8VaZuPGO&0*-xCU>>5`aLaa11U=RA? zPdINvskZKFVz|*7(0y(wG@#n}<}v@I`IE>+Ne4lUw>b&)x(_2}1;oSEK3`DrBc7kf zR6q1{Iu&p%hj1kah(i>`Ko~g2kL$u==kWW-{oqr;esmV={Yk`kJK^&WU|8&M+L`y^;ni}NST6T2Mfd&%^WWTyMxs!tGLsnN zQcp!Kvnj>`73+=Uq3(EkLS~+62Bhs8Ya2x?Ou#MLRJf??n}&T2EZasFJo_B;(kS|$ zC>b^eqP3f4LeRaUkFTMb-->AUAb!xx`AH_rn00l`@vUCd%Y4%7kqF25_EGa`z1j~jby|K4DZImY-vBdSbZ8>0DPK7bY-4J?V{R)v3X-8+OX(Wkzh;V)5 z7~_>stV%$=u{7uHHMg9MeSFNmRX63|+v?6%Vd*qQsh>M>Ozc^?rjg%OSNgsWZ}2%k zQzBu^9&ER_ntpM6;*S;BiJjA^iqDJXG+s9P8cF%dnl^_W3#IGIXTeQVt0x6t+JPNBgXTVyHy=B?+Y4j zFLPzWBcwR-!wxb+{=M~#vz~8uC$F>AI#WMMb)$~M73AKpFo-HFD3lz1GcC9LQ&H~E zwG&A)md+=5qo%_q$eI^6q5X-uuj;Kiog2S(eQ|c8D<8rJL zKWJvdHSg_?52;CIRLS#_;4u!ce)?1aEzqlhN*2s6%}{D47>_|uU>2z@W1qsE+wQ#` zH(#n#ir&oj#mO;;p%!aKEoRxxe&Qty@I;2#OhcvdUUT`p{uk~mVJRy9_RC*J$L@l< zSP|az1@AzopWAUdMEdVxIfFN>BNQXMo3Y*f7#Qf*%hhRwe`tzW6^*(lO%voj?N(@! ziM=$6L$%VzK83mxr}7?8IL<9Nw?=>Om{HvG35gPD^69G~y%|eX4@_;_=2aP9>^aY& zOTGR{iB3*xtjBf?axZW|Zk?Q7u|0Zotaq#!v!a(0>-4D1Xxm%cq*?|)&>qi5rydh- zu{*t4@zX2^Eu@ZeFFlWAYV(^e<7$w4CJZmaPB15O3*E%sBKqic&g5zz977D)rH|G6 z(6BCvJQGQENy_;>mO{4*czzvg+EQ~{w~G%sEUT`LMl!eG6`WUNCtZbEWkWV zDtE-&G;vhkGmiV&MEUK+ldSVn?jF6BFkft=M^G0Ls5AI-c{h&vzGwILdPpS1YqkBo zU8jRJ843Kjs+6*`PP`MS{0<7CmUvie_i`yo&W? zB_JS3ih$(MB{0$@-J!H}*T4kl_WOI@C(e7G^T&D5ALqT!AN%4G@0-2vz1LcMt&I-$w_nA<^Lzp#mi8Z8Y-zk2;$?AG+Hu>kx6{qzkLDVkNS(C`YDyyd z{N8fqwsqRhZjQqPgWWgji4PwK?ZzgJ8kf}G8Z0EGk5NoU2|%g62P|h0Uxyyd_w*u~!ds-XY>jeu ztm?dRmBQAel2lB4y_ohSwA_w_cITWY=EsYYzMuZ;4l))wX+~Ku43m?De+Pg2MkJ@! zjU@7?PZ%+zhH5C9J}5b=9;k^=dNOGD95fKV?1ci#)+dRA?&@yDO& z&bGCDAEz{kRo+~%ch83#cUKM0CEV;iD~Pz&;C`tqu1z2Gy)JuOgMUF=@=Y}_E{B$e zhS%qn1#M5+9JkLtwC(X2f2l~u*Yr~&f=F{k)G>XH`)08NAXg@{hr&9aoBC+#M3fz=``(A*<{}NXT4ny6;`zNa^0PZD7DqQ zV!CpFDQpR}e^h1*Vw5PjHvK2(qtg$WaA=ZhV^sMXo$!~WuX9?}LEfTcx6KrVT!ZHB zeo(xTMdVP=`AoL0)X?^vq4G_L*OrD4%qh=y#Gv<&6t4s;&48J9lewzLaE^6Oh|Qk{ z6S|9v;i>1*MfHT&lFhq0Jxlg$czf>1&u4$wpMC`++FH)Ih9jiG!W|gF4Yf1j_Lf`WBZ)Jv`{0#>2u?h(0D!!i7 z=Pc7F>v{awI;u?WcK&&uQ^)p|eS(&B?N^Hq;|kL{bV)6eCt=33e!lyjTTf9{H;w;F z;JmZa_6d*_qAmg>VP7VFV8QJ?w5A@C{G6}8wP;HFLHDtcNi3MLcT?rJSHug>SButW z+ZUs*qa8fcDH?88p6hLMFEw1ub zRX`p1mnFKno{QfJCi-y!GNEib**DdY{fMu4T0%DpbdVC+tn`z=7t}dibRKCxAu?d` z2yWvMDZQQyG(u8Ib0c{=tRTJuTDh%nhxpPiRISwGeo>eBL8i#&SIdtckEB)%U`?OK z_^7-b%BT>_Eg?Ck_mECNeh%hM7E7Qc~l7Ts5bb7I4r0ZbEO!Odn5 z7jb(XzSeZRmypmB(U%GG)YWChe;NoMhd3{bFlwpu8*Z<8%lKKAC4H&0mdr-8?3``7 zl+Y);dRio8Pi4!ITTo#mn?1=%tGFgItkxyEO>bVl{bt!feevP;Y(G%p@+(l`+oO!j zZBlW=%cLclnKYd$fcH~hIeD22R(j&`=ZKa*qxlP0kKZEd5xMUvFi(a|EH3ojW;#9H zIlH#|wc=8~aW!O5>=uwFL~kiK!P*)~LtYo~gYK7ZKn0bOlpXP0>|{$62T;-ifys5GP#7XxNTyPu@f# zr_GEQW0^?0_FcB@&2r=Vy$qry*ZH1cc^}>9Xna~ZSLyvcPzl89L-o$?`aPF-H_Nn2 zLy9}1`jpyHevtxi`&FkCL^z-cPi{WZBW}2sf1Fo0CoG}IamP~H;`Cdtldxx_`R8ei zRY^MYihDoLd7YC-syMA&j-nIQf7Eo;k(WRMJ6tuV^0dH=P1lyy2MaZvNW3_bkv12QDd7^`;l{lPV9tCZ$+%qB>dF+`mJlm9zGK`d14uYMxW~)F{HE=d;rQ3~u&1uX`Mf$}J)7FStUFWh`R9WL0^hO~=TqPK zqN=NgQ_0W5m47CR)6dmhRLt_bJvSgUOIlZ;!Wk)F2t*lu9TVo z%lGeQ%53&~Ua~_JI#|V3_B)+TUG#Xpi;7yF!yNq+NQR`9p_P|!DMu&sV4dXc`0C?U zwn~|zt!l;|v&7BGmak~~GHZnOee)h=CzZwz!evv8#k@g1d9I}6g~ij6_Y>jFQ_~A|OipN%k-~Lm`EbAi{C29FXc=YHw}81Pc#wM(J+r`| z{cGV5(Q=xGt<;EV#E%bJN*cWLE!Xiw@LLEQ@U2IQOJwe$Z4-%@XS(%7Mzg#pgOc7i ziR}cr8wk_dX(IB%^0)O<`1E%(BuwtTQ8yUQVYn}oAT2H(D9O19*0$fw3IeJuo_A$2 zp>v;4`8{5;zXI)S2);_%FSEeN)IWDN*iW>P_JQr}|Ja}2Ut(xAwsE>suI}4Gn%!G$ z$5W}qczR?ZsgyECDEl?jerxesAm|4xi1%1}szc(}SgTIszTnXnDEJ6!*ozfR%7qCP zTVpGLuEa-hk#(HN5+Xb|LD`YBI%+`jHof%u2H5Bj%oNbqB5?>f6~&_1oKz$42eO+K zKa}=ax`}M!K}NS8DnQA^rG%AHoI~{;&aVwkNiz&;Q$9n*5ld0{+a!d@aM6%;ZZar< zPwSv|tXC$rb32%obS9n-z4OOw*q=B?oGmZF5U<&N-{A^mj$)Ee_P1BIX3aGZ0Hb>0xhGn9l=VNe3;xHu zZULR9Qr^r;r@Nf=+wp&wuHA`Y^ty0HA~#uxUsbK2s^B1B;!ZIVcy|D%&%OeMdnN$g zp|3zcQZMGA7EVmv$MR*iW;ms+EUfDn&FP`8D8Y310UnxGkmft+ek6|mOUmWNXCTJ| zcty*;9h>SO26XxmIc@__uXj~cIZla|qS`=!iy$1CTkZ5-YA|IH-SlXxHeYFHpxg>v zPMzo0Gbn7o!OR=R)af02UtT|9WWmz&RsA#mwg#z(c3VUGeuBjTeZTp~DH6L&Y`>k= zJ~}H=`kWl%taRbSJ^_Tu1Bt3VH~8ID^@$}rfS%3r0f8egKi025DxhN=u*HpgCpAg- zJ{TBu_C)-)ZY4ZZIdqA3ny5w!Wu;a;BnB!X76qRv9JWc$m2Mh?->9b}7U(Xo%NwuE z17X$xYQ7MfN^=EDak>IsL;Ue$0Pke1ForJdmWmM00uG%G+6}1X6^K#ZZ9T(U?e;nc zjT=xT2liW6_X>pcMja!u*Xe)`P;CK{Tuv?? zG@&XH(HRq=HTluNvA+a7BfyeB>LHMlb$?gPgranXUI59h2V8!F3_Hck^(lm~F}D-o zm?OTWoar2X;A1f!>&x^yt_0%_M_YCQ3R-{k?-b;gfEcO+hyZ#l%b+=9+&QvZR#iar zPLlc@ATe#hozfMeUv-9K0Zz6kbQ&f<629HyfZYb1Hh>L#Ynu}f_-}$lma2hT8%8vi zXV8Sav)NFT(ng`5eOmLMmQ@(`+0T>< z2%c!DOCCr7w#UK@uvN<_umyA;@W8L=9s=zZr_rTACPpS?1@{>V_(ZE{wShel(fk8< zz3=6li1>&2(UXY9&j{LnKqY)%+{xp7CHP`k_sAke=8I&YvDFA7OTT@8c|&jx<0x1ffxrg zKF+G(?%?5@ojJwPa9X(D`+?Q$h3D()0YXJ?5Rrjc-X>nqTc!c~BdOwiCdDc6;9>J^~!F|y%XF3xKJ zk@5G)*etXDH60(S5*7W-s-{%($7Z}Z1ic2#NnW>n{9(4`bDSBl5j1d@5mlVWKkp&_ z{R8QoW?)^{J!0%NSl5GifP#Vd5Al6?L4BfaX4~~lt|5F7+-*eNrRz=0EFd6Xp#Eb* zp@qu2sAu$XM&;|6HaPxnHl(s3`s{;y3?D>~#_jZicW>)HAVdDek!1KggUVO{kp%=u zIldSFF#HklV6x;UurMlZ$PB!j1E*h=Cy2e_gumOe_a&@bvI=AWZ!ER)KZFk;zg{3- zJ2AF=xP05#l|Cv2dUWr{Dr9d`5Gx!P9Wit^n0oPdt6{%I_3fjN<$aa?dp1o-KHhWu zhb0&H(N+*9#IUs*HXkT)b04wv(GPI>KFJU{0whB?aEk&N)i0~y03$&zBR>9%kq!`l z|2AqvBtJk0aY&13=Rx=5gQcw3{;nMErKNben}!g^d_a6lRs81!iT~>(0KJjtt_WU| zZ)SZ2q^19oJY>7p7Tdq~1pmbf@~^XFMp%KF*c<|mjOKU0ndAObmL zlg(H3lqka1zX!g>rS-3k>^?(P1vFdV45d)`VNrNjEeqMN1gI=^QDjzP>Jt9XQCM~V zV0m-&cVXXN{~pc2O2j`TEe)+Ih*Gu2n6|Tj2EKmTmgUU#P-{?3UD|pUj^}FW04~It zPgfw|o3cQw`(A%VFcWwN*_mnPF;NXfL;dBTQSqIrqY(59MsSah}TF4g5#}OH*v>%-S{9q0hHA~y*f_<|La!C)#Vp1yGK)VA9b!k z##MsArh`ujx&I|RmlH7}UhQve8e1}Iq}Hr@u^!{_gP*1X$A0a_oAs;~GzehEU%-qr zDSv0g-@&W@Yfn15&t4sxGIb#fyi-}cQV6HewwgP=%LhO5kp-ZDa~fCw79)&#By@78 z{~~2R_&>d<9nhb7|I?ck!wRb*=(Zcc)w=x{L9cic4*d$(8?>l27vPeangz6!y4UcZ zEvt}2aHZ2f8+sox6V-7fHzHeD;3&iBoeM$l>Od-|qQ0aIgc*!g$`RFnO!x8sopeqI zxGW~I|L2JTf%~&X=vcb7cr)r~g|u=J5cAcQ0Y6&=6NCy3$M9#G)GO>+9xYSAx%^7~ zOKMl3aq_Hx?uf|lo&Mzy`45XW=Ulp;qmcbZmkc{I#Rev#m$7-)!s)L?VXfU|^8UW^ zYV}nQ6Y8We@h=wnGKFbHV!@7*?9>lN;+DO>qR3AhEMW|jTf{F2k1zFd57u>Ns!iq~-<%GRW?xdy zCV5ed&>b~yr_M}?0M;$cl*2^rPrn&(R(s@N%97Zz(rCAvV@NC_jtFyTx+xEtGbr2M z1OMvSb zA@1GDt(m@+04V$Y!h@;~@@tyk=90P(!Uho!&h2>cbDigKXN1|(V`$TX7+Sf6E{(~F zU+qnOT-bcUQT~&IIHNkNIfSF!b_NhHZ+ZoihwlD_yPp&PYYHl$!=s97_2kcWbom8d z4>&zCv4|n@TJ#R(aolM0U?hC*6R;rgjx@LRc_s;QTgF=@`mMb_B{>O@>`s>73l#BQ zE}yolFiRP5s7t2jz~}y_7ihq3{whpH27CN8&V;0px}_Hb2gnZB9#*)~eUlZO*)|~l zkbYmX&h`WwF(J5+_kROWA_6G4b;{WHIyGI3F1U}1bC(2)a!BWANp4wqup(5>i+J$A z{(dy7+}S_t_Yf#-N#Hx%+3iy^^2MM9}iy+!5jc>G0+AP=){0yy9hZ zaPus}S6T;87h4KgAs!2fz!7NGqqR!ZFr8~iHCG6h_qR=%iH4PFlT3zU&RlT6jhUUV z5ieY4{R)KEHJXxdsUe((qP4}__}@g6ZNq^;nzojpFHLm{#8s>g2H*4jR-kQgs>=vX?#U@rMcEIzvnSRZ~ zfco$UToKDe)(@lXb4R@biJ`lc(Y)(UR9$$`79m`n|ElIcrm+~v;AM0f7TUK(16=QsjX zI5WRCK@6&x51AyqQYKMUci-NIC_LnA4}qdiS{)^=qn4Ipa%yUW*(GeG;nC1gf4Ced zlQ`!a_pELPdOb2$tnoM_pTok!hOID#KGc>r@dycxF^UDcldX55s)biNl5eNSQ+QGM z?(b4%p;h@>cR|Q&$VuV*i1M)$D95smv2aln(1I6nHHAZbf5zuTYspI$9UpJ-x~C&D zVQJyI^6rIxXNM=aA@{M$F#uuK-AgqL*JG`aK`~>(J)nFYPamH`*??ZeAlM2IprzeN zwaMKe==6L0i+9IqE~W#;G-)MiDEj@84!)O|?y#MG0&~LP-~$4<{a zb$KE|H|}NM+ISx6m|Zivd+*bHPrQPjTf=6CpFTJt-hnI7>xFkxnm-VskJzbe^b&Q_$(oHe2pa7hK=UHwU!Sc$86%W2 zm};3?OgHS%`RO8%A&&Xh%zzNT^ni<8J`0vG&4h*MJrG<_w!rs#B$^b)eGMGWg*e#`|ZplRUeW=Uknia z@LvNofG`&vudGBOrt_1h*a={%oJgGR3aGvT%Stzq`%19MhUFkrC#Tzf#*$VhFtXpg zeFPqDDF`!)x&Oq;7q{MXXj!1&ES0e9)Qu-GCyA3_oQrh-u5>R!8ML}+G4?xNXRKM0 z!}+4OdlRnY_c=7L`!s)#$U-_Yg`i%hsHKhdQV;&CT+BZ%67bh2fTJ7~UHC*u5exkJx3d+rNZd4da5W%>B1wek$|HDnEJW-R(2^@aAw# zN0!FK;LFdeWURMDf5wEiBcIJr&GQfSZ$B3M86bn7-?)n&jsMn`pJ++z3$kDNmXqd5 z^&^I(;)fm+=hUx-TFnr5O&Y9=5?`WS3|Ifpq?Rpbx5RlI?@-g@MSqs|jk0jA2F9Ny zQvL+UY1dDZW`NEd0*QXTt4ODX{Db%`-QR|yVAo~!|^0oE}$ zGq$#=c|+f_iM5nZM6D+S=_7fvul?^dnuxoNY$^_k61s#^RJa)Nhf64}tghzECgk#4 zj^6S{!X(c){3Qs$_-eLRPfZ@)_Z&Vv?t9>+e_0G2K1%A;$kwwGlS;!_{qYltY>*%< zmHu}3Q_bzo$a__Hc$Lr=XCa(kgJM^peCTA;-R8NokSkF940JmjNrId_y7%Tf^x0wN z-30FtO`WR|oWJh)%iqdOuoFZjsIOq?>lEwy;XW=DOz>hHxgrU)Hs*tueAiBk!5UO>GZ9Pu`ha~8@q9>hfa1& z;zUMdwuNo-gx|WoF2VM4prrQrd8g}^9-KH-Tk-ZO-6e`8qOGR`1ilkHW(r^4C$-HK zt4{0Iub>7Q_tk^f8X+&_cHw3pTsRME^TH<~@kgoal!`Q%S<=z%KLK_!0jUA)!Fc}l za4=5M7)T3n*j|LPqQM3SdfWUn43A8LmZQXTcy!%7bZg}N|aGBGoE6}L1 z&nfzxig|`JPyLrEG<9HUjbwDlf~nY6um@ul$%Fh6w-NZ?(=bUVh&J`)>$xb695KY` zsVzxRUJ>=dxaqz*SfaUCyf2M05`qOwaG1U($YT=6?J?y!5gv1 z?z+EaKQ5jS*oa9873CDkjT2aiHel^Q;|Dg^Gz(}OPV--9WEp|nX@6&_cp8CtzC`|S zF?a`BfY0r@3smPA1I93WGgs5tvUgXUEr7^c0E%t1=UUeOi&JUr6mB#iEav{7lENbU z$&x~3qvQGy;nB2DdhUfm8+N8`itUhUh^%<``CS0b@+Y4Pfz{!M)euWENr=sl@JkT# z7INz7zIagng2f>pP7pa>=XN!U3&!a52Z=s$czSIh@w>~VN0~1SmaO`E?Cb%@+8iH* zJ@dW4eL)bcC-hNbpUpVy%Hx(V#Y3x#oTcR^RM7;T{vY2$AC=^3OP8##dpU6@5Qy}z zeemD`6@$Mq(Zz6Od9SA~n~$uMhcTI+jxil~7ywO;7T?Btv zyGse`f?l3hTU)xAJ2jW-OuWtD&>eX_iriglh}~35Vs(_1%?ul!a=}QV*{6PFvP{0k z(0`bFX;{F!t)8 zV(sEy$GARh!AbT_LfxREV%{$2c6MB0-#CG@m^5<{S%hv_u>llK_(05ABUqq-E;@4UPD*G|CQ<`M4;z}m+V53c5S>qx+MBq zG5#oFS!(4sYT4u2tU9oYLj4$}HCYN=_9rmpr_7xw0CAyP`u*w@>43|TM2A6fE@8m0 zj1G24alst+TcV}YSXHRz_Nl)nP&*|czRWCD@J{qg<6h>Dk#_{o5>$itgrKCEM5LoH z+lq<^I3ZamN`~u>kD#JVt?_VNQOjO|4$=%ynp6tKL`0O z)E(^2O#^!?tGpRy>6mw$^=$(k!L|E3`!63>DPFfrQ05+1<}?nj=cAkBy4Ox>TNwby6 zPK!`I&kMqDUumoVgO1gN z6&UZlVmx8`lO=bZR_p(zp<_5f`~5?aRD$z$kW^$zO0uhv(t6CN6M|OmgJRMDm|gz= zIY<2uYA^VAYDdv@y^Yemyo6lxZ-3_p>VgKfr(S^?hlUTmE{e&t6vF7Fa*%avvUI$A z1{!WU{7)m*4fMA4Kd5t7%U;K2d!yrBAK$z^ggdBNo@Sreg+LIiUl*=GQ@nuR;H1~S z3p!+kbGvs1a>)zm!HP&btZXfOhXRSUv96=Wb8{v;UaQe}W~a%014SAuENhw=NN>Za z?(>jaQw25=1RqA;Pz27g^R(_OA7)sdU!@P&w)n6uD zRS;JmT$cpJy_w?m&9ke!K^$o_tK{`SC`WV2sLjJDoiSSKg(_l7tJt28o`Myf340uG zi7E1>d<-kYK1@D(Nxiu6;Om#w;Hsn_@?E?`mXUj8G-n^)@3zyepOzgjxuuE@JUJ5% z_VL);kc*X7ta1LMqY*5F&8N&17V_7@#%TbO{?Aa=|3vIOUu8dnGV#@88Zvf|L_w2c zWMYyra$SLpyPCC80}i|Aid%Lq1qMgdK>Pm6k@T36su27{#IeFV{V@}P^SF@8{URpo`HSk zqg}7X^rY;I5$}xaW_W9Tn|Du>Q|<4a<+{+7R&%^fR9lu9_NjUZ0+)K#WbKk1T!D02 znu6t-V~efLIGyM1r2M0&^y7QEACPsGxWsxA)CCU&82f3Wr&dsrbsAc z0qtOZUg@6-2xK*wvl`jcvGn)690FdK{tPCcn&{M^ycuc9g#~Y~KxKt9W!*VJKW)uF zex%Wkjpa8)>aC9y7mS3^YMEGj{k>a9KkKQlVL-eLO2k}@}5jy9>N zEp14lSjpzvY}yB1Qo9#6*VN8W^YSCLM*p$Gz~s1Wgbe}+5U+9yvN{~}=EYKo#D|KzCVeL*yp zwQbm4^7`S9Jp-PU=5SjF{R<<%JuL5KeX#z%?B2**n-723l6Pxur^bhP`0J1BY&Nb3 zWL|+<#ZcxD5*t~2ZnZQ0JOe%@8=7p{EHLiCM60_(3pfV#d^dt>yytiFyu{OVPu z9s<`Ne(gNk-lHSR-~HLqU{QjJi6Btyj155#CBO@Ky zu6aGKA?CvPoQ938TPqVS^b|1sNH+8t5jOL3QJwJ{{&zTmnN-Sl;$q7H#{_5mp&@@^xbVRedfSHOK=I64-h~< z!;^^|9BWT?|fajCJVHFm!+z}D8@*u zzy9;MnC);YtbP9cyC=W2OoBhmX#xvjkq=;)w{MX{NIJX2a}t-zK=_6;UPQ|jzn{u0mS((#E%7+ zRs%sl-@DUr2#b2IEZV4$arf?Q1jv#BM$usIsnhU+qgF`ig>F{tTL_w;IJ96wUyvFr z(o#K^@xyjkOk$5K;&B##PPCHCoa-?0?yeu;kGy{Sa3MN?HJ}fGN1KRCUfLLSFek^; z!fG|wenyC`Ds8a<#`cplWImn}YmN3kTxq38-I8UtvoExT@mV-4^zIhOk+n)&e_B(E z4sIp7rSd_u8*-orzYuYvx&lobKLc!Kv<=~bP8(27_IwxU$V|Oygj?^B@ztpvLptmg zRAjD-!nB?*eGO?fP2k~0Po35+Ix%c4+>&Pbf?jAJ%NzV$W-zxfrxNyHWH6{38q150 ztQtP`f2KYz7R6%7T{+s6Rr~W3FR1>tK|lFAd7pyZZT9*eYA3Ed$O*#!xfs%vA@ z_PMp*b7Ph}=#v`|pOIB|O zcXF4$LrPaeizLRwkr0F&{6InTf-8#(x+4bI$zRw5B~j=HKtAXdC<wTQtkmyLH;V@4fl*97`XJSFvu_#1PoQFLmewWJ}}1{bcClmOn|hq$yO97IK@j) z!COC-C9(ykLL7a{z&>Y}pbN}d9=hACBCquriEI;UCnohB^&f#?>q{_jN=2Crqj=kVRk(vfY4T)<;xfi#uxVdOPZL7>NH8> zJ8ADyn?{**DZ_VtgDaK~Q{TMud{8ejzhi(&!&Tw{3vG3xg$daF8{j_^SU`_y)qt)Z zV}S@I6Hx?C+<2IHAC9Hj?h4>V2Vr=;9(M*jns9V4H9t~UGcoDCwHg53?@#)U~y=B;Ginc-|UgsE4WKkgX)nxNEh z8Y5FFiM&6ISge+1Pd<}qQa$K8`aQ7#mdX5<&}6&C;l<#6_Pd$@6ATx`&$jA@v105iiW`PJR@3y!AHhSr`ZCsw9 zWEs#wOQpb1^6}l%7#7|4nzr6{n3W{()=g<3 z#Y^dS;1gPc(UJcJ^Xadm1^eu$Au-Ai-QGZKfK3xk9e~YwLZ5Yd0zDxv!&c$8*bC^b z@Vn4w-8y*&DiIV0a=g12aUmOO%@5?izIdn)ViX;(;WCC~As`ii19J(CozgANcQfjBji+N1foLwZ;c?T!Pm)BfPXbL3&=5yq_0!A zzZa=8Z6c8jZ*17f2)&P?7rn(l^x1`)+EYltZGPpXG&EL{W9D`rkZxw9{X>*wbQ}?M8y)9gwkYXOoNQk;x4Y{SVf0^4kW-wYmHk)wPxH$V}cH z3GvL~G&Bd|v`jO zq9kwJE=i=^TAZ#<)6MkQRMW4_r_fM-Ph7lLO7fqX(VcH5@4mj#fUyxz)_2#%J$QId z37ekEL-T$n3zbfk9Ks#dbjB2AaMQ-Y9^?v^K1lGH$w;ErGG#h7lc#+_@SlhJspyuI znH+@}3$wQv88av%M&F~*WCA8i;L#~;q;mqKrrPn)^hdzv(vtrLFFbwv?x!;`XPVLR z6)3WW3r_4OaR`Y}Qn06xS~eI}m_~B7QHgB*lw&(KUizleY@-!=WGkq@ZaN8Gb3Cg8fBK5%2-u206NZ7={Zl zL~Jzhu3~ksKnaeh66i^_|5OkFXq6e*pA}w#zA9n<08qDEv+FYEN{bsXE?t!Y)J;vF)19ed#cLfTfb@=drxi# zlJWLU(U#T^E3ZV=~gCtMv@;&fJJh8&WSkZQ7O%O z4zbuq_reb#T9Y&JLfH`i5bnLEB0D6cgZG-b?EDo#oiguFWdPVz;njj4!T3p z^Ic$u=zzX=u+KK>Bj9$b*p$gC^{NG8xRw0Jwm~K*M>3ZvB7V}g>A@04KQ#In{1D``m_|} zkC@&&aDz=;ABx116)k8MWs%rLSULH>eUfQxo2{H{coQ?kXr4F3KYnaFXX3TP(XRj2 zxVdEimuMP7bY#=Y3%%kz(xLFEc}z1m%Ef+gKrA3%e$?B|(ixIZe(j3|=jZidKDP{xu0A z_9xfvF&+m65uzwYG+gn}BjWaFjmaJU8Zu?#$BT_IR?&xBB|L-y0Ok%}u|I1)XwC}0 z0@;Bh@a*N`PaGJ+_pKxEVZNoroP8*I%tO6M*&c}x!xL^-dI90E4OT^Jo%2|k$yeqY zN*Gaw7eQzuI_x@rBJ*MS;r=eqsU2+2dnt<+ zT>2hI0_Y!-Z%!W9r;%8o-HSDPR_wtw#wLR##N$ff;bG#Vs-+kN>>&U9S9C%9@#mK? zh3~cFeAasU4__MfRCX*G(A|6aq?L%(l_KB}KL`YbslX|`vfA_)Zb5Awj19jZ3WmS1 z6W-KMxwf!_vc>%o`195~ui+DoGw&R1q1Ui9+?}Xk@ypPhY}>(H!!dVm!!rK6A3}`7 zt)8oo+>mg0X>h&ynu%}_x|ocDh&hEKs;?Kc^Y$lRfo@K9P)};Nb3g>xRCs>vzJ4++ z8lpxKbnK)Cbe9={bPqgg*GID*5^9c*u+PHDx8^#6fnx1FeciZ8g0OnzD)`L1v3?9M z<0sNwbwjxn3(xMoWm?vYARxqF9}R?l6-3{+-eoJsUPse6!rlu6^e;QHn8+sDNXo_m*%8R8nQtm2{A;pTJJmNp`#9lG~<=(>PBjm%^f@cWP%obgP7O1;S@k?bGj># z{ZN<@CDIlnF}qE@=~<`M`|Z)^T!AlHHqY-oQ!+~=w#77LgMVpKBU&GzEY&UA6Kt6U zq6#;pBBD!-YHEu;rMI~xUPt~>c9^y<3ZLQ#RMf)a(cN-%tdfVz|Ha8 z%)v8|K5xGV8K1BNXE0j_pLmxiLGrT%3V*~Ehynlyt}6q9_YffTY>5wrje!ed4G#)- zgAUs}2UfBoGD#2hOH0M7AH80*5pKnVtJ1!#01BJcA1U}BTn8ca7p~*)*)A})(0Lgz zJ0{ba9a4U#Sh>qu;zm_#(<&X$TGU6f9f{}10>RfD8Z~j3BrgFiaUHppizjK$Mc1{X zDzn0xper(5=knvC+ut|j^RQqism?$9+S+oAg!Lh=kLk_Hjtx3=m$t_Y8?Pj?eOPZ zfj0fb(181zsQC(%*n3cb^RfdpW5s&`eTY^)#Ogp35MONO00?f?rEGAo6PjziokDy? zW~vJs%3PEa2S2czd%i!)8!zd_Iiyvor|mK*{CFxgpksli20HWktZtOwPs6lpbnDWI z1pQ7j;0d!y-(8rqmlj2zA_vU~q0bZ${;Du^!S`Dd@XZ7O0KE!@HwsG!Tg>%)gv(Lv zU=x_{{OW~Yz&!IQyxI7X=-%x5_lGkN0j1$10q!yE7$BnScIXjEoUG&~5RWWc=r;!j z%?g_kNfK+LNH`-E&vPRzSP@>4IA=5oMt>vV4ldH6MF#!llzqExmhbFx^Y!(?ynMxq zU?5kdqyaZy)U26yJ%AG5y#RJc%_7fQ#!eA*3RBK(+6a1d(4gTpdBOAwYOi)REjmqI zB9Cma5w2X86cPx8&qIC)-u&HmcT#ZAz0}%Z$^%NF7&Ba5aPz^&X8EDM7(lA zhbD%?03gS(5I1*{&r>0(7&K*AsuU7tlb)mbr1EC5V$}!^H0a0SY>^om~<*kp4g5OlMc;zOCVoHhMLlidCT|K#W5Eu+(zRf=<~ zx`z1NcyJX5l^jG2(aIL^$N@ozy*9DF1cX%Oj7$v5zM^O8nUYb~q6uNU*05)P&S5%v zvOr@5lmH$0Nd6@=oS+(@#C{}S7(C&%8_^7rPD#M0Pjz&f)aV!jq@Mhen0U%n%6 zD9-WkL}l#-(VB&oL~X+X>Dj6V*yktG08g8o#Ni$p4HKVeVi>MKy~Tba!0C`f28u6$ zU3!;}=v_xI;OlJwW^$}UZup)F0{o?q214FnF+xAXj@tpcl>mMOfzUSe17<~yDBfaoE%fXf4fsDJ~bYVt^UlZVVv$3b6 z)yBg4dxwepj~D~vxfC{80+k3kmjj_ZS0Kwc^gTU%jX)t1KjGtkmPk=j(V4%I>{%ZC z_9mCIb=t45R!@FBS5V;QOm~6H;YT3{s+xhyK*%yX2U^A+v4o_u@p13Uv1e+@Jexe2 z?XtA7QcX@8?#&6WCHVWdF_Mf{{C6rFCXv_H2`Xb4 z*YOSUX)8^N7$ZchinLDl){B>$Qfrx?Wxj5DoZ;!c^!R5VQL~Mq;KcO75F2o=as58Qp zE6_6){0lrWqWARy6c^3xiH}pkBccEG0T5nMniz@x-Y&~G=8hmRF)>01S=#%{8b4`V zRQHH+3a{r>YI}Ms?$g8T&OexO2WY@HA__;_62Ipc6O3anD=tK5UfvO#NKOSJNj)@g z4}FAKD2qm9rJfV;iN~bWw{-duy*5fgnF6+l9BDGh7?JB*VqI=MQjS<_jx);|zUzvsa!+jwt=l z_kA>{b$7FIdlu~}I54fWH}Tn_UiD>YDs8l8e34Vt5hnV8FfT&3}V0kP1jibkY^*_GkEChcMxk<4gQ8VVLk0$Rl;7r^c4P zIL}Z8V_DsBFd!2}+N8VW^(`?^bGFb?bsD>k-g$Tk)rIZ=xN~;ok<^)HT>K8Ypy)cL zMGv_CTCx?Og<>ny0F)7T{s6wDxGXvGc%=x470m)Ne!n@U>#A$BC$TV}pUz+0SJkf4 z71R-vo4NZ5Sr($qJN%b7Z)4&v_W|k8?~ToxY$j}l9xk~6$u?J@Gx;(T_`x8S_%MEE zM4oOn6a5#a`OhQt!+G9Gei|sIh*(U7x$I9MZ^JhWmOsx=rN`z+tdC8CHzxVpbf;utYX}Ht>PR0FTH!IPpk8aQli=a6^($?jYcw_5h`*eIGvnuF zIi;X&jBhP(cAJdv?#^*j7T(x?*C#SGKwY?d?9aPXg#Cs?VYLOM?U#@@bKLO*V|gu! zS@AJDpJ}3TYxKKwfxnJi4l>ym1FAY0-=0PNL7Z(hV&4QLE_Yd+$})6{o~FHkpB)3b zTd$+vqblp1;%nx-IcU|T(8Gm}EJ`m`s!BVl_xFYSent)}`1PhdAiHL+K*-j!>dspG zZDei>=5Y{H(di<5dpiVn8{7jsegAOS{WqJRGWU@~-h~L-JYPp&!e)8)+=`oSoAxu7 zv*l=cQ1dQI74gb*A;+sbGo|()^y9l~?CR9!{tjsA1k7YnvzvmCN@(HkWLgO0y#y`* z6~&F;q*`ms*vCg%_;`*Jt}h8*=3dJQ@`|aET_th?Kj`l$y3*1FP19&kz4!Av)bZj~ zKR`tg_QcPkV|z5$UVS(2VY{c5K>RaW(Fp&t-&v%~+#0D zIYsTAX&UF?f@rt_^FbyTtuqodwZ~}#rQp@cwC8FVPmnVxG*?%Gb;V&;B>X2!W?!3h z+^=#i&gdt5Pj0g^B2z}qzHl_ogiI$ks>Xi z(nRS^T2w?jh;#@Epj0UWB1#KLuR*Hx-cdRTC>?162{n-7d4GHFIkWeknQLawH~YKJ zxvu?RLIQbt-Y3sm>t6SLFXDZo!*UjjMTJnOri=CnAb$f{FQ#d2E3CxCKQt0+>SzDP zvvw?4d0Z@VX4!W_mDG}uMZ8i8VQ3W#a+$URQm>Dw9FYy^%Q$58BJ<2d_ciGG9j0I= z+tvW$F`zt{0NPhP{>swjp#W=n8_1t$<9ujh3qSKz^73(TeQadgm|y=eL&&wgr*bgI zR#i}(Q+m&d^-LzH+TTGqYKAeL`WXaaES= z+4UAz_EO(pWgPqJqq#IM*ujRtyA|7(vQ@95CBofQ$zYz7kZRGJCqARHv!^%d@9?@` zOC`HtXc~R61`&Q)TbWIdGsieAGXr}pj!4yvrs&QT7PHcBJ_~Cb61^&PT_8;TP(%3X zuFnS9OJM5I#{Oy~o>yUzZTj|oJS{(@s9&d8;f=GS+D zuQGeA#7r&Fs@UJQ1HU#OMN%S`fT7|e3HwvcE^ZD>DngQn;yyQ$ey+bgJ}7saeP4N$ z@{s~JNpb+xHN|M^ZYYoz#a(T;yz44z^t5%_;}XA^HO+-h1cLJ8Bo`rET;b1{z^4&e)O?ibo)j`3*pPKa8M(Jd+?tg|~ud_nR6n%@url zK?-%25yFjs1`CVp^`~A6Ieju5)8aL(Th%LrR61>UgT>f5Uk;RVX(*o)* zs*djE>$ac1P*Z4fZMU-#J0Q=Hb6v}RyX;G1GSIQwAa=+VcK_b;oFT%;M$u;S!q)W>5 z&_YPfBAx-c;vT&+w9iQVGDO?3Owe4>nYiQf{@1>D`n|AYou=>}{J-BK0P%l|FF0W1W;Jd5c`rd6rrb;WmBl0$e zwnrWjev4PzoEJe(b)!vlj*78xp4Jp~5gImiX8P14y62y?jsv%q|Hyt836;u^%LI^+ zutm+Ne%`d-AoHe(g-els5T(Iw$X42|BPQG~j+NNk%1B6t@p`dYS5&VMJfA2|mob(- z)ku1GG8mxEE2^hI{L4>H!4!m@AyTuWa}8&RTv+*)y$>|W65$f=H+S#yB=Ocvk=Z|k zc~dJ3Wi8ml2*9VmBjmzZ(WmAZNXI^x^P8<3D>}QV8mrg9WbWEY=dmG@#LA+B3uWn1 zH0q=oq&UI~ih7ev4!w}S3}&3wDB5Ccsd?vDSABWx3r&gX+l1!`Dq;}A(F4ne>Y=0F z@l%}ng}Z5c1C6g@j?`^x&~-EVqOT-BY{WViD6;?X(o!)*3Pg+%N4frT?fe9i~ZJ?`{HxhjGDX@1sXH;b;j(RQPYaNREJX|~_ zS>N2Ik!HRN5qiTRMOI->M!XOC^$_=bTT_DOyFYLDi}~&jyO^MqYf<_fN?+DO0hcYp zMZ#O4$V;A!PpoiiRVCzAxN*ALTSYbBc-Fbjmq=^ATTcb6%89sd=wZ%j{UVR_gm-B_ zp%xqirD;~e&;iXk^!EksD&6%7!3=91GUKN`jT(2!nuDNwvE-<;9Fp$V`8 z9)W&;CE4S_pngHrvp0wS%0*LvwL;Pk@@B%LTo%y}!1Icy-E1kx^`Y1O-6h-w;@awt za^5DH*(=CDihVg6J4$h^;_@5BRS7gHWr??2fl4dv9bzB@ySionN4-1q!*_dt%<(&u z2F3P8SRu{>CW)!K7fJ1UNb+ z5Nijp8^1xeifzQi0caKS@7V}0%lqH7PB;k#tyIMERxmakBiQM=WqCQSAeZq}x5Vj` zpKE1yH8$Y6wa)dSD+DBwoYWA6)A)9$T)d zP4S@?={)4?8QiVQGJ~dOytxP;8X=Q6&~fo%8=~nS>yIZD;iX~XMyqoGMSYK4945Ip zjjj{S0J~cGnR;l+CS6N4JHx=n0>18#G|M zt#mUX)#HUDDdv<&t@9hiQ{Opq4#f6ZeO zPGUx3r1t2ScOJOAG3*<^JF`8R9vReqRZpMsL-&eXuNJI;gXoL0}v$ zkOrJ9&m;qH0{*H!T>`j9KVF%>41XQH?c$pzmlEU_#Je~FYLlf9cTg8sw>xGt9#b5mL*~yL1)z81VTNJU8Mt2X7!p!s_qrfajZ6yAJ z7Ipw$aS5K$3Ld+jAn>qtOwL43{|oOY?LM6F$F{Pzx%#CHXbp3_2NO)@?wz7V%Ybv^ z1}ff3x2Sisuen|h8?YxHBvArH17hzYT2CT=feA}k9$^Er$ZkcgNM*&5Vif+qs|O6X zsfJU2rsTBlN}>8}1$6Nu2tG<2hF%g=CBsvpBfFG*pb*`OF;t#mg53G)X}vV~G~ZVi z4{Wi&hgD{dqoE^(yL~unIFN6#BAk%Su)A2FWtQUyq^1uPEN$dU3+Gucx5Qo$%6fDG9n#Psr=b1lLL81o7w z5(>i8Eg_iaUW++sjz6hwsb}buj5ie&X%+BxZ!wKR0Zf_s^b=M>AzX$yp?U)wjo#=q z+)g9Ur%kJyc~IhX((|PyLrUk5D|qvGG~UF4mBWDI8x*ZK5(lPGzK-Mdmr%nvBq+;# z-1glsVJ)k@{N(HOPww;ZE^%yrijCh58fkKMvgaDX4FDp}iBkp+QUkODk&z8R$ib+e zxkS!5oHJtdB^{CW2uZvTJb(Sy&$H>aN;R+>X#MWA^1ATl#VFNlQ?ep`2fJmzYD-04 zM*4o7kqsU{MqvtXcc!) zP&$NU#3PA0)N&+{mq5VS`HoLc=Uman>FGNuQ#G^0h5^z7u+Q$cnrH86gp;9#(0ch_ z8U^sF^UD#{+0Z*5v8!tF+DEvda630Y_YKe5hS_(fi-&xn7C9qiQj8>PP_&K+~cuyMG1Lzjv9m*Zw9l%;2MgZWN>3RrNRYy_!) z0@LuC``FbqUA)LbdceNqhkxfm3M6(Ng^0}~)b`=YEP%!tVu8})(JjQ$O;jf|907bU z=Bc}&SCvIp#2nf&R$J%KKH2+u2~B&~&(cVjD!J*_J!_{3p_C|YA_sPHSp~Xs8set^^7`j26{lxYH|LAy#$B8~J{r?_ zaiGB8erA$5Q|HtMMl1oj1PAQuiWLy=Lmw+YUQYkO_wJI$q*-7}#?$pD9|RJw1!srT zU1h-Xee}+z`-E63g4QBKYert3A}+w_$}C+}xiQMo*Gfbk9*%eRQaog}_aC4ZEn^gR zxUS9z(nKi!0Z4;s_M80xn4ZXEk+IcES=~4-zD|1jwS8>94;Oq5D ztIJ3(w`7~P-f|ixz5DKDQwW;!0`}RCLyJX_lrL$xJ0utoRsSu6Yp!hJiKQnV|8vJm>v>YRZX?WzDa?5)qwN4_t@A|r0Uf(c2k-gwu+!y#1 z39viC0MJ~#jX3Td0d$C~C^nQm_BSBn8fMC$K_92L*!s|6aE+^a%Z9?_8jCS~D$1V147U27)l@7RVe zA+czH@=);`gyWf!lfb&}5B&zk#(xEv7~H3SFZLoU0N)EJ0Hd=nrIo5!IG6K-Oc2hg z`agX~P7y^#Pq|VAG7#hJKeP*Y-D8CEWX`^0dcJ3br&=vJLNw?46Ph5BgD(}BY+W3hAS3BX@BS5IL zTrh0l!zOsg(%oh^d_#|&c?~ldbyudwh;+AJ({AnbE;swB2U>Mj<0g!t@WP`{X?Pk!VY9WeIcB4mx&Poy z&b;q_H2)J{ghNsN^ISpao1dCOAT3#WGrtNWRrxLT(c&X3lP5|1RsIgR7rHjfYqLzB zJ|x3IqPJHSNM-SR-?z<~>yvGN3?5orYaa}WMtU_hhz@rj=m_$i%ATQK&AJw^N-3nh zw&15r_e1yzL(DKuH{)DsuTtu4$!#3m8^fI;zr#a_9bAkyUMs%ED<8sY0?CoJqy_tn zHg!|>?x87$E$AtXZ?$K=Mb@RzosSQguesYufPi4nu;vIIA=%}uhb zdiNP|!NaG421N99whqS+U|Z#~wQfYpXvD10^y+MZ9heS&wyk;{!bSUugii{YN$yhRxmHiA+p&Kv2_mY5)B<_aw+6On<-=|6F_meFb$j+ zk0XgONvQ6@)aC7=uaI*iDfCOap&pBX0R7(}?Zk5pC=YQ?bO4?9<4wxrQmZRkPnggy zJ0;kSlZE#k=&&D=!Is|;t5xPp-!B8*SG*v0j3Bo1)N0u}5PG<$)q>~1Xk${lz_k7$ zdxwIb;&1s?IsonALQyT>r()O{UYvnH2VR4Kgas@sj0ZR%wbHhyMIJXktSFQ6^I(YO zI7_}ArMC96$6xE$^;PgLl-y;3{%tQ?36{9_*jToBo=)8x zBhC|rCxZUx&@@PReEj8MGp{ax=VMu~!XXKh277D)g7akNGFiyW-T>6r)ssu;F@Uo9 zYc_kKDKa=PsN4V(_EGB~EbCVLYe*cN6mNxvEl^;;tRN$%M+aQ4Wum@so#@^YT^TFn znfjKT+)A!Bu=X;In2*1>jKbbgrqA}*724385EErxv5DIEVCenIT=&xm!A77121UvY z6&V#A6`i%}AWr(v;1>tayx2ZRmp^KN19e5a^3IBR)b#ucw2O}FqNdv&v$mFJKMu32 zD>E70ju6U+#F6Qi!PX0y)EH(nLPIk+M>vIzKYc#y1!qB3!)H+ncRnh3%IPlL6S-u@ zOc!QjuM0Gs<|opj;R%Jyuz^*nhPREk>v;ea%&7x{Puutc*m2Oph44>LFqhu|$m^xj zEvmE9rCd^sJ}|f&!%8+%x6Edk^rU*7=X`TQ?VSw`%I`zUxjAk)Q3U?0f?4EitE5w7L^V3K zcTP3gga60zX{4-+i;I86mOz?|j?qoSA0MJXfoVsULZoNPSP~o$5P`X{>*#sg891~5 zyFR@M<%ih|wl@0D%!I^t_p_58a%_a(H5v{gflhP&e~bqI-RSY3y&s4!B1CnwKhrva zzKG(V`yzi5$^m^5t3Uc8GQcz|WX|f2nNg{jpH-jp6Mf?9AI2#XQEh7uQ2c$smf{~9 zTL3Q`#>4wJNvKVogojKWvZ<1cMc<2!JG5&3yfJ>kE3LR`DDf#67^G4hwOy1)cIDD$ zKz^PKn(lToQRcnSE=GI}v95P-p3n@F0Y?Ci8|=Hc7jBq|1@_JuIKWZ@(2w6-Ke`~> z&q-OKtDX+7hXY$sL-9|)b6!ZK^7#0EBm{4O0$kHBt0aSu2MuQNmr$FItC<`)QrhIa z)pKIx;r*%}Xr$*sU1eE5AYLFAQZDbmZhJxqIY!X+it{u*2qMyd{uzJ&8nd1(`B%xi z&lX{PURjxxOlYRU4?%vH^Y{W%8ThpIAAG$XyPENI+8fS2byYi6UALG#Q>@dbrEdR6 z4W^W*U+Y1KMM&`FU@(r$U$z;6Q}cf^u;N!U{?pBJ7w|rr5`EkMnp>GHN$u0x>Er3v zq!g-Ks?5xS^k7>Ps=eq=r-tZCaxFQoAx@ zLH(*yuLJdm54!jsTccr_cslj*wsf~A!Hr*mt{Bu9;3xE=wlC4pm>+y88;NrPNWQ2c z0P;>`Yg0vM)oh#hnxs!1)b9jl!Ku-XZ8v}| zC`wR3MMFFuHYbFgjO>)nCE#D&!@0N80hw#W$Qq2c#>3{tBw{sbW*NpR;_%*qu!gvU z*;R#e^>j6}9!UIB!cY+D0r=Yx>$|c#REv?#t4k*f8cvlY73Jk-2q6*BQ`@9-CX}Go z7dzl`$PQcSF=ZRFq*BN8@Rrp8u{zCoLGrs~;ME61d$gAk`e{B#idjgvqWL#Z8p8!{ zU*dI*VR@mT%mx4wOlai~;mS{aGh7LFfUi~V3XHi&Hf6G12gugVZIgftMlQg$VUAdS zLMZ_EaG+&>c_Om0Y3vW+>-)yRHHVIlJ6;|v^I`B2H(Ha8%iMe+8t|5r3L{)cAZ_ld zm`I=B8%iIyYrQ9(gIZLbLbVj31@`^?)`U#+=e+Bp%@Gyys~fSrXmCdY3$_@KpWTwD zWVezVFtQ3=)7+kA5k4UAzs0hehK+ruT48@|ExaIQU|a8t7j4zrvVe&xw<^Ty&eqFs zxwQ3etdDf;vnFuD=gqSCV~2SreabDhjg}&k6g0|5?<$CrPk}&`AaB&1A`#YRN8KPm z$j3ANyyfeJiUyd&uHr9rR30GIYmXO%#^XfcT(NR-Liq&2E79a^aJ;X z5DSl<|3vInA1Q}A51QJS>Oh_ARKYp+fhL3!vMuA6z<7m?HUN)26AmmSyr>4cRQ@}? zrL%Ad5Q>ebAUi2YVsf9YEVKgPbX^uX(OR}vN9+op5}zRe+m?Uq!M=ARGGBS(0qFLW zO^>B>#Y%|K=Mp12=TT3SJj*-**R8{Vx+SjS17ugeZH1p+#vRY~`X9*&eYM^u?|IMW zNBJS>MG_#U$==%mOQiFb zEZw)RrSiTW75P=#RPx+B|A;=z;6N`Y|8c8DTjfq)nZ6eSb4>@n?N7i`Cw(n29dsq8 z9r*& z{ZT9pD!ga%SZ8&h(0l zzb&+`;O5Sf?54%yb1;YyWgql54kyv4cr7>Q`|d(%u$x%h2bL#cccq2*H0`< zS66Sy{EYulm9*=7yuQLot)0iS^qHnHxEns9c<1c?^hR$(F=A%Dj~MGrEZaGgnsKK% z*~@?V8zhkmK9?%pXcq9z5*!EPJa@58x>;Gw(+HfCr@I0RY6dXI6S$j>mt9?Sy8~qQ zV@5Npe1079{-|%jEBtF&4iUCHAs#@2 zR!ZO#Lfv=`wDL85rlG#$0`$cnY=#fLOf6Z455?9R;HRi%uO5Jixl@~Zl8u)3lW_*0 zdo-dp2iww+-!zx%cONdXL~o2tB97*LUCzx=h#q;^*Hl6f^xPyN6FnGNHMHG_+dt46 zY$LIPu&650>0jLfFl)Dn))Le^)eCbrJs6kO%h)d2FA{U^ zY_{D?!4&thuGOtcZboEKMvI!Xb068zZR#)Ax53lqD%pwIH{@Rb|Qku+vQZ zY)XdA2K>ToWzeZBs?`{Vjqa;9^Hz5Xw`PzI)&8bkgAE=ZhGZ`IhK|zKEJh<4WD}?W z;l3&}`naXGXQAOaZ|bh_?t1UnsIm*vr=ogZr;em^|K*s3x{0k`Rncv?y^QP*G?CQz zWE|~fSdxm3W~aFRP%rpa1*+4swzqBBjn}%#M5mUTJ?O(V$=f1d*czyl^hlJSrhzv9 z^jWAI*q+rE#2&xcJr8N zi!*cbHJ*^`C=9F$gvkBJ7yNl2jLpW%bj%e-8yc=xg{CnkD{&cjiadH5)=m0SjWdp^ z6EBCMtzH=+-y-tNJPup86tZvNsh_{rwR*XZ{j6Yd4@k z;Jo_31Ly8g+y4%n!Or8R0F&|0c-xUE3aEzEGKJw&+Hc`OFaUE;Dl9?#T|s30R|V0Y zK5_le1(5_H-}X0Xcm%MA`o~62l^b-2g~ma_LKmckDM|%$9;-2aH@$VOhQrj{@{xW2 zrCSs%74f~B)}53PMReCWQ8toiQu6*uHKlP@cVJ9V@~4?G!ye%WEJ-pcviv%lw$(YC zt-KTYV|ew)F2&#U>%Y`Chl_J3yO7(A^|M;hkg#&lr?t^Y&ff;0(N7u-ukNZbnVLKT zQ2@Q%3;7?BGWB9d%))K5wNyk?I6q7po3ix4!JrY78h-4?_G8aJ(V@8}NkL9m#8uT@ zxh=i&M>2HE)U2thdXn443d_?F{;BcS8|_F=>9R|x(q-ElS-M6qOwJ~EgWYOR78?#Z z^t&X#-sqG@ip1^apVps&`vbpxek#w^+vWgSO4);U0uH7LhJ@S#pg8&9vNaOjR@wIp zgSjDq#DV<1j8eZo4*n=V3;vS%@@B#na#kx9`5aQg=^BVVY8Fr!U%^)E)z8E?*rOKa zzgq^pTVc(P|G_CY*b&p;=li1MoDwEw)s7wN#aHx^{sXnILW;(79SA5_=Xz{G+*G=4W+5*`k1r(1`*ip8S$NwbSK{F_*4N! zFXbOAimr7k$V>B>vN&VoxEh%s70<)O=IMlbV;+Ooz|t=vqy0RBS1$Q|-M-Rbu;F|} zsuCV#%7lJzZR4s;$fr~oYE*5bkI1*>NU`&VB>*7Si5ONEz0sGZ)m=`--aS)eC!@9_ z+~?Zs|1`AzcJau7h~3Yx&s}z6gvzBFA_Q4wjjzfJq*UOphIbfrX>`6aG5*DrSYqP_ zt-gMiz8^$KY_V=`w6dt@vOwof&PY;guYX@lPI5`)&8gC!M)lT`xtP;C)lpvME)^D* zWs;6bOd`?WJuZkrAOK@WgDJ!sa{$o62iQHE0;s9W=0tTrig-|GbkS?zWT}#jwmysF5T;(T{VBMf)*NB00tF)EaC{~_cuIhqAvVaFZJ#W zo*psXz83p3w7m#n|3LMJHLo8os&ffpFe*5-7%9|^+>R({5(dIo1s_5N{ z%B-vCCYNGQU#{g>v03-u<1%iHFadnBE+;N%1e-|z3q-Yv!?zoxM@%b=Rc-i}_y<6^ z2WSwZj--C8BhfHBx8L=EIMeD4$rle}C@3W4nw&5_SBdAv?YIx(GmXVJ&3~x(|FpP8uj@1r_%O zduK%w9bfLk82Dr^J=)`b+)vWAnwZyI-x-E4gl6efJUd-E?P(x&Zqy19hg3y=%?d!M ztnPd+Nk~mlb9sk>r?{2FC+1Y*R!Y07SU*7D^e#}OXe#0RMSi7$4j~c9t2KydMc4Xk zo-O&iA1M8tZk(;!WzcWuy&S2d6$VE`8NEj$E3M`M^nd$w!!7d_1b$m@R0s4X53RtL z1(IFOVhe|<;O=jq`$RCz8&^B6#YlzIm|DG2gta5FOx_5#X6lulMfyzyy)yHKMW|yA zQ#q}?WZay_BbJaZQFT^|0-0~Xybxwp3CFrV3k-88tM}B|x@7-?t5ke`#KdUeO3Pv= zm*kkTTlSq5F=$^d<;BLKwfAz*CWU|fPzOHhRO(S2#%?4VB2kD=*(FXdbF*1Kvm{Zi!u_Z0SKy)=?v6x8^kU>*1gnuujdLcxc=eX(Y#r|n$X+RW|~dwd@NHIQ%Xn~B}> zW|`&rAhYX_#(zz*Mn0$%6g>I@+ri0O4V8?njBwh-4wY;R@Z0?`mX_k`9!Tg6Q3+as zbbp~+?oH>M{>b+8VDoKl^=mHIGDW04D=P#L7#w1PR;7CY#Q`W82Vsx-sW0zfz3Lu_ zsD8+XcUghVMLwADxx%&hpg!)S7OQp7RXs^M3UP$K^Z9v*^5ODKcP90U>f6&f+f8w| ze%(?X&!3%r1)fv-PZX9;nL7Mmuf}p+!^Zh6U$z}rINGr?e(v5K`^@hE))QwpA2$5W zlyxm_2?oA`-q@9@aAE_PZN!C#d~phDHb#&8-0~84lTD6Qgl8-WCD-elP5oEmMMBD? zsny+D{Pzz@)HqcDyA1Ga&*G_F7|HXrd+U7}&=D!0?;R$)tKViS@wc!EA7R>K@%j0C z5i8{MKP>yTK2T?U=rdJGfCs&_4To~Wl%o8DPGT`~(T~Hco5BL_8^-cvj`cVD&NoD^ zs&NfrL{@q~ERokw02$VlPtzjyr1E7dcLq7^f|_7xX!2JIrk@OWCv0K1&=kyS$?4Jf z3!19CP}seoj9A2?1xi6xqH#i11)I<g~{NHA+Rs2{8txE_z*YC>Wo=`UaXzBk_I~mAX zVznbb9|KpYr_LEw<`Gp_xqzjE)_*qW_r^tZY`neQb--=@9zHxiAlNzY?Ul&+V-)(x z`V&DXn>I+3kaNuWWYM&?27T5vUA|{VeOYx}E3puYgjM)nj!M_mSFU6dPsPl4H;T8o zXyTD0h(|8DpMp*@}X5-{t7|;4eR(%7E@e_%)McX$xvjz z3w&x!bAfWAP?#pscA5P%O(Z1iBFNFXjTPoqGISF_1(mORmO2?qnYO)^)Qu}7>&|}B z8hPuaVrW^vr^zEK+NTg|$thzwad`4tlMkQ0E7|};f0yLtp2@{N8(-+^YP8QRxwITH z&ZNN!x+L-wL819>v1TKNC&bmP{u#HQ1c&%yI>#tQj{nRm2b>af92s8AZV{2`C>pD@{JeqVKx*F%K-bEszTl8%C3V+-7ej zy^F{M0b8O^y~k%=BsYimh-4Cr9p_}^(Mm}-RT@ST&`B5;~s z=#NXtGi)vJ-qHs2(?(Y?Rn4v1chRoKcWPW-$|-+h3H5UyVd=939o&N7!uEB~$RIjD zq2HcYAgSNtyysI`BdtM_GJCTp6_M?WY$#a|22@zH*T;cn!js48&)3dgWH@*XlUxi@ z3Va#sAe+qGH$puhG0{x2oT+af!F;{7%h^aYYl){}seTcVGZSJByY3TS8_b^1Pi-_m{*n{pQCl*4tm;a5@lcBke)L18}_4=7h}t+Ym> z-nKc%sR|uEpzAhBmB-F`X9hQemx2ZK>30k<(zo?f=sDGeDfxqrKxjRec^{X;s_}h^C#*q10rrM7 zZ;iALP^oS9=`0nZ%33QAG5ot6+3n*JXEtijeyLFpTHt0JsIKsJy-Q*KP@t@`tnw#)hfw$Ly(6I+-<^cM?d%QncpWjJ9v*?(0ClcAsZ?V$CUW22Q@G z5W1_Tk3O#IwS<2}tacX;J)&EoJU5`I!A^8DV68eEQ=_uO-MKnRrMrhst;5JEL5VZt z%{*PJebF%8W!w!I=kmKL*$=Pcf@nl{J)Y*w+R+J&rTVakVM2m?eAtfh=Y_v;VuS&K zhu#)L@3yr8B*gkzI!^wf)jUk3@^kow0UwpgqTUcoV!CZgFQTHysUIRcp~D)5>r1JNouXDaX561 zaBV^9m6f=f3P{<>rL5KZ#ApUdgo8CNai5YmN1kL%uBkGQH;cIHq+3qf8NDpkqWjW* z@**V>Ckkuyi~h(Vye$&?4fdIsA{dKUsUUpBD`h8SEvnTs8^z~5{rQA$Qq&HUO_yII zCauD(Q8~`jJFn!J&9c8&R_plh*uDOH7O;Q(><|?VJ)i_m6co@r%T@^#3mEA} zosj{|#+lJSJgb{t?4Q2pxP`=(5$L$}cg&JX$^pR|(3mAq>-^D}<=2zCulq+|_Kzm) zUttcvkbkBDA^(L2#EzPI;Q+fVaTm_ieYttC2?2l*6aQEc;zDxU^t(5sqz^PZ^HpoQ z)R^9z`dNPN%(l;_IlETWUt6+c$|?LNjk~5f=$2BA!@k5Z)1-!|7k83O!-E$_f+|Aw zjcEg?T7$^umN*)nZ@(N%-py3nh#M?76>kgw(D`@yR;CpdA8Qj}ky%6E)5`sd@3>MG z?(-c1c03JzNmkJ`)f~y^K~%n1pC*0bpDcJtH|l#KKh7yJ-{x)sUt@wssbejh*h7tC zp}?3A8d4g7>ZaW&!BT%mFZ)9EvB%lP1;k8q+c{-9m5XrW+vltnk6!k3pWGxl*#A}H zwd{r zQ5YL7I-RiPS3h+29sM!iaPec7Y?7St3jpU(;N^|4A?d)2>dP;`d&%Y5+F=f{jyVIY zrn)cFEt?gtKxnZWUnkby9R{`~9b6trvA<3#`AfZnymhs%uC6*%%ymWkdX$^?szFrl zs8BoU*2A=f2)t71pdiqt5Sdj2sq{3zs3grEx}lIwYq#PS-WR3HzEcF##CD5_#a$EIjj|8=)KSB>pn=e{7*EYu&b17DQNc_M8HkfY`@AWz6e8AN7b`gjq z1i*VN+R$V^&T@=X!JbR$`-&8}%eA^UdSKj#`Sp!OUzMHs zDKYVpU9J2dm-r>AHOUR)D#2CvR8OR47Fr{k(GIVC*t?3Ln{~|0e0xszb#a*qr3`IS zfW73`HnO8#^Ilcu8MyfNG|K7XsH80lj6Z(z8If&TsEn0+EIULQl(g`&5R-FqY6 zEag@6An1~Bn&7RA`bsYp9?>nZlBS(}Ta6c*7JAFWO;wT$A{hlKQrY*^wya6G`7!9z zyrkoZ;baNC;(wFmuq~D-kLRAm%lJ7Tir36YR`uDRI8iGlhGuSGcyySg&e;G^DV{CT z?PPJG8IyJl2dEalv%mJY=3G(9cAt-pBdG1zwzw@J$RGX)uA8|t@iF7d-DnpUVJ*)C zH2i%=BMScp%}I34 zT{_#vL)^vlId2aQcY{wuV&Sq!>jkL=7P>c2V)#Qw+CTXULAidqb`DvqQRs&ar%BLQ}E+C8yO*OPAc$*I%V zamaq=*;MJ%&8<;X?1Vk-IKM!#Z0m>9>OoXUL@0CGF;)o_7QMU` z#~SuAMXftTt@j`Rl`keXK0eKE5kg7h!WKF~B`HL@j*LB*t6rw#assp5)HQ3auc5q_ z!{8u%mDjWPc}1+b3JK|SlK>%)$b4C5x76?X47=BfM76fSB4K#<;Bdc*>v;dKjGe5D z(1KwaBlF}h3!A-FuT1_Av8+_a@OfG!?rp`2ERotaZV}{E&1qdVs&y~_}8K7+8Sdw)J_@#%BD-eyQUR(0PLMO3=jJ)@gq(rSX0*( zzV2a0?bgt$j~XMF`&XGZ6_LAw=<%5cE}7EX!MA_qNLgxm2;YJ|XU#NK8!=AqyFb%1 zLdI;+(lYFH)97+Vl92tQpez174MX~5{&JmbwzH0bu&ZILPjwxq-o}S(L*Ejl)L0NS zc=eTiMt^@7xY$I_1lRhIWyh80y)-mR`%l#EGH7TegJz;&0uT%wA8_;;=n+Nj>?nSz z8>@M(G*HW~#O^4*C$pi?A(S+boZQk*Gpl@{f0HC`we5_-XCFuY1F>0v;vu|mz(Sb` zaBBePGdFQg+}x<9Ds zO@W?{QVb!h&W2m1fIz$n$(YK)nPmXhv&1%_r#^KAN9H=Q>8E*@vb#tqN2LBj=UMfC zrFJ9bdwP6a=&Y!hQg&PHI7lRD^X(p`7sFuodB{@iDCte;8L0&j$7>+KgoyVrVJ*BY zMRX)VF@ZpTG1xXLvFI*|sTy!*f;K^A*dsR{&f5ED2lAKLVo(sg0 zHA!*^i3MPmh!pgDfbRwnY_cCf@3|}mR?GBLU9+Sdwsnj_cfFpY zV#zm)f+s{->m+t{8DL5uV}K4yj%hIB?o{&~oT9~68qdvokB?j9qE4rI7US~Xqz(3D zb}d0uCz*xYzP%$xwGn&Y-`qFutj&S0~au>vUh~)-K+#Eunr+k&UW$$8#==;M(dJ9qX=sU;Fe<*Ku}0Wk4he zUYqOn(Qzhu{)f#s%MsZeV-LPJxy}4~n!Vg99jq+9mqGsWyJ~7!r{syrSIDDsMJkft|IKb-hwZ#-)JDue~hsV5*!rR5GU?M1Cj0 zkG`xpadh{QriSsP@~vfp<+O;H{)3vvu#59FFFNBtRe(A#sM>-!o`k4EFm2J0WdycK z3my3~=flx!*84N&6|pZ}9w{vy#TC`IwH_^%sm zc8!)^xCF#Y&L#b8bNIXHST=NZki}kC4<7^PH{SI}?4&QU`YyCSMrQ#kj{7c}xrlhzR7WUq1L?@hzoa7u&TP-pORdr$ z7)`VHQx3%YPv7m|k05&%qUwW5*rm()QV5*N_?u2X!M^A+vktMmS$260Jx|5zW;R#g zl6g=WR;%0gCe~OmvRPkRwZYGs&yT7o`Z?{fwK%Bnhv2qL<9IXwaxkr(XYQ7quNOyx zGz&eLqUbxh=Wg1jBk39>9LZ9Hys%liBoQ^Bni!NPbB7~=x=}e`FpTkE5G=;x?c-wnvTnjy||hSx-I@fm!uzLK7C#v(X4`cv}0+Xk1-5) zi89hO+++>qMVGSKzRulzlOD|0yU;6l%f3GP35}Ll4Gpp!6wudgBYpYm(q3tvRYs3} zei@ZMEB&rDvO?w|v>Js^dH^QiPy{f4kj9tt+TEr(+dCZSdveV$io73hnyk?u7yWV- z^&YaXo^Z>lslUG)LuRpRosl6NLasBg5~BJL+lg2?`NK%N0)yB*MwKq^$&XG4PbcI- zZ8{J^c5a?tu3j|boe6w*IqRq<@CUWD2MESynWZ ze0@8bCOudsx5h_58Z%ip&6hy=LOx-iwv7;2Ha6uOUzBM$-Qnl)9OAu%)QdbQW$I+) z#+W^slz5D*Vo+gEdqDU8S5bwN>_Fsse@g4C1WYNfO(b^3^N9P8@uIYw-#>mr)Qh!k zWFQN%lWrA#czUVxH;~@f7spyOyy(im*VFeF9}hsri-!FCAPoK8TwS4ORH7VxhMq;1 z^XQR=w#MeP&W6d2hQU5u%@L>mtfH7`Zx;IhmOT-=Oy| zrK*WxGAlIAGhN9y)V_Eji^5r5cOnL4ICHb+-wq+U{4KqHm%YXFi{f8Rbt@?0q2USV z>SEj$BFU$ZdXk+unmC^M7rzu^Ux8hm=DxL;-hZPe|Bq^Q;OF!y7H*F&sl2Wa?LYg_kc+~8Yn_A9f9QUhLs$Z>X@3JPvxO%t-y4nRhhzp zp^7Gi;%F895sw5caJT-JbMTja{I4wRz5C^%`I`xypFj?S}lH zo0J3mZ2&qX@wyE zMB9w}6Kzx8b^vHJ6YpgKb*8l|U;&n;h-oDF3HY^-9|25GwTa=xW5Y6pd3J|QV-!2?{0d;r;x;9##?mqlEMB&ADi{J2zL0h z1Qfy#_>d54VdxhuqC>etdvUyJ|6;Kfb?hk;2;!Cve24K#5W#Ef@>%E`%- z)25!*nDtL%k?pt}hlkb;z(~^T3;*FX(o998F}2I*#-93p`GT8{7o=~5J3qN`DVwUt zDcpH_?y)7P$mqRM1yjLpHdT<)xNDzfeO=u9Y^rn(!PM`jUux2YE>!9}```Oi_f`ya z`&u5Q(Z#~>>`R@Kp-%Ho-+(=nshiN5kPqTi98WM-LgY7Yd#3B~lwZ4#d+C^|j;p?^sKoL_vU~JoGE-@8^;*^ zmSndN{#qs?UyKK6fF5mUr@%?n&aixp2c-D;s|OUT>nUqUMj!cjO4j>wu#zOm>H_2* zX7Lve;1{2!60yKY>;<(x&IjW9#i)u7Y|^GdIJk5%#R}lM?&?PjD~rU7Tvfi^J^x|h zJCg7TP$eGzMU_}|p~<^s#LOz+ED%&EcwGKNiKucF*ma|Ek97zxetN$_33u^`BVIM; zqW^+{*nRr(GLXWkP4Xl9z6bCFp)2V8s6a)Hq!xEkj_{wD6Fl z>*gi#xqypC0v54(J47;A`D5IgzS;3Bkg7BO<^H)ubU z$QT?kMD*AC4T903OHKay(mz+@pL^qXz}O>1*Kg2mMv`#wacQ*4DiH33E(`)e&U7Zh>J3o)}h9fCJ(*B=4 zg8yF&UH{eV{XY$H6aQS_|B_na|8#vP|GB>Z(Dj}Ahp+FaW{pyT&}D+YO`@N8x`U^0 z*Q7f8EiiT$1argb!{>n$s^nDWzJ=@am^=J;# zvl&l7KYWo;c+6@)<-JiE)_YArduigD1usRm6Nikr%Dn6@^|T+6{F;U`i#ugBD*;^Ka0Q zS`P{Dbxm!;ZB(Z|RtVY-*u{xs>Rzu(kk1!nI;<@F&RmmvbcboZIJO@n1me-IDxOmS zI);}>kB}z<*vw#z)(K+Fqc$LI)o64r`EJvPd9VKI(EEDb2Nvy^Jwd)o!4?qYo^If-;9iI{R-so0`oS^*&**RxJ+=3`s z^hiPpfs}oJ&z*B;pR@OwIWud`z3a~G#mdT`(3J@50B18RGINI#6_lspy*^bF77`P6e_J2%ekRmJjv{2mRs z9BQ@)pTPiJM%-|EyrF*msO*el=MxPo=hD#GqerUxJjwpRP?=$nWLjIKTB%ze;^DAc za`z*aOiO+Ga>kXT?~$*=MRqHTDc|y@o~#r)%U^5Fb}QLT$?wlx)l)gA?JJn1ODQh; z<1}9k5ejrOy!7L43ueQoFU`4hnByiaqMu!sEEsn~zr3^_raK`zM+44h+%hT zqD)}!E9)a%^;6tdK057S>3PYQ+xF^lkbjR=AW(Fw{DVXWMcL{xZsBkkb#SWG%p7_6 z*W){XLr(oyqH0(=(Hh^fhziSM`hZQ3b0@wM_+mLzK!48z<;`=99!;{pla4X&M1RjFctF3pM6y zEGo={dhJcXX{xwtO|cb(Ng+lzVkvt04Ar@L*k@>U_D=tMa{tiwNzi(XDSY%Wtsz74 z?09+SIn57O>HGMU$3qk;3xgNnMl@d+>X6;L%tQ*jm5TRLDYwM+oh|u^V4wx#^>^1d zK88Q$xCMATV~7(#0w#xeA8>IjJD{mRc^6VKO@e6+AI`w5TzurGBU=~AW@CD?fzg_E zZJZb4JhL(7YCJ$BTAqtsq=FFLL!iK;r41+tbi*7o_#Gg6>e(I+W*{Nc(44((mp#2b!2C^drjLH@TW9_U%$8;LKK>@c*6T|B0nnX7 zY-uNix0S6hK}Fxrp>SS~G+vnUK#+MSw6gFh7M-$?fqz4$1&G&pkxbC-R-m?+qiHh? ze2x9F0}?mRE*&=hp1-!bdR$KFhvcRENnNaNA2VP56z$9a5$t+`6)}??NLpt#S*-W+ z-OYf9+V}F^3#;ETmi_oV1gZiB>yOfg04W0at-fw_AinCtCFby%U2q8xt&HyyV49l*{YDhYt5W z4xN%8>w`~v373QW8>M#k7qhEd`v%WB_wh7#VK}MB&Q@w|vfkrE`h8Eh4^qplXQ!CI z=wn>?t<^S`?K=LrK1`2N6)Z>?8~c6cn$7h%Vx1hI56qW3i;{ZemLbU-s7+`(*Zp0x z9F|427(2A)UW$&XbECb-H|8e$qPP6vvqv{x^FN5xX1pse{S)sjH4VX)0+PXMo81eh zDFj$&I!jvZaQR#R)@)t(LA*@bo{L4*)yolL8Mg>@rY5K@bT1$un&{HXQB0ZHD8 zpWVjwiH(&Goy?C{?+;;u5A9T&%;7uRgfsZkWvvs$ZPL3-Eny?r3AESLce|pk%d_c? ziJ6$(0Ee%(bEnSWo0j@h-@hRk;$a3=v082}D=jh{xm1Z&1^HY>y}gT!2VjyS(31;J zeeaT?6Xg{+$n0|TRp3qh?gg#!AJxq=5uKX{aMb19E#o9F; z?p$A}eWz~?goIy-t7Hte*h78u#F@3?Zx?Q5$>8&Iy4+UIxMxgv>I=f)kE&RHkz#p1 z$M)UF-tH)@K#~3_80(V)dA4Bt3uIA*#lmx&Io4vPLDee7`{EU*nP&913H z3oH&!3wM}0XV9u;mpXNwGaV_sKQdFZ?ovwcpQOe(`Dv9|o&O!F1OQGzdlsmgjj}G) zHOM$wxJ~5Rd2K(Jc<;6%Sr&gqd#_1%Lt~X!d=ElCGgY!x53dc1%zvTSFy{3JUtw(c z<-v`!#kQl*+0<2QcGV$;C_p6@vOG97*fD=$oG4o0H2t!%>!7~+eU6Y=OcEy;E;_U> z(CSw{IK{oP@Eql62-@PkjMdJzE3}&0l6sW%?F^MYn^`qt(laaQ30w-m4Cv1BB39z0%XgV%BkN{89eKN{{m^yVBaRpV5XRx=h2v2j(-Mc8Wvc5O|`jw<-$lbTzaz6h~9Q#|7{GW_z zC;p3lRvhMSDi4S2?TlPMtY0-{EEJO>sP4-qdz2s zomNsfA5ycVV*IEL7+eurkZK(?A5QHYR;?w+Sn)FaMBjt<+rYpEB9Bc$c!4Q zrC)#s8vLc`Pj4*uJp+qfjwV)PEqc9q9Hq&##(8gl1+}O{vH{-7?iOEM@)*3E3>jIk z$FN68n(Ebld_y~8y+D%^82;JcgnoxptfZ;ZqIDLSp>Ta=gd7%enJGDA>{fKuqY!rz z#bgHH_-gcokU0?Y7>7?4;JX%^K4`y_vHG+vN-+&?4W4S=vXRP1)kcu z517H7J@4%8nn#T7ug2^-#ewQVS0fYoH&4xrrvVa%!d>QkWdR$7^A%~t{ za!Fi5Dz0}x^alejCp?%wKmTTEA>_;(yWF4*=?AV~-XQrz&U{C&I9`y3zXXVhO%tN$ znMS=TXb~EtM_`p77($ZbO~x24!zu1&U~?bOHe z(p;?R%=UING!v5y-b~9LnAyJd0z}F-zW)JEgv|jUxN!s2QUGdGb>}*>zkT?3Htb)_ zA5s>2{t&IUfGfq)JR@rIRNwuEZ*iAgiKP^1bGmlkj3&0aC-kslGXRxQF>|%uH;b9{ z4&mk`CZkhi#c*@{SvUTDF2e`pjQY>cmx6o@3~fhnnVa>SpT{;UEEWr;%DxDEkzQ9z zP;?c!`+&4c^iFM~TKC|A4Wd}o=z;+}f3-ob!}8TY&w^=v`Mw~ycB7~32x(W+jHCNU zA3xe7fE=dA-5MzjB&QfqQ7XB4_34hT#904ez2`pY9#`0l;Hl{A@Tu$`2b(=2YG>m7 z{3GbfLG=r_k7OwYchwEbBvLHO$ufJIvJK{wC3JTMYZ!gY*!h&u)P=7mQG11ER2v#79cWZ+p9oFW2@9?i5JGC z$}XL$a0#uQ6Vm87V-KIdrMxVt^m(Jpk#esBH;-@g#iCZMcxf=Y&c$8@GFkZ-MI!M*VDH+6>7@tZgREH!(F?Sa5 z`*@(W$NCUTqTnL#8pgFsL#QQMsWxIJ8r54H#|}C9E|1JdsojtJ1i36tE!mOyBqgNo z|sgB~UjJmFgq_efUM-Qlv;x7%x1aodIuy|W@cluzvmRl@XUud?z`tBoQr`qhlB$RkRd^|*q zNzr5W8u=;886J9VVP_QkKzWd%8MC3VpQ+O^`PpEDOP=1X?^n>EE;om8+I1#jAE+Jb z;ovlHA5ewuDBAVl8L8Bzr?dl7+1TW!f5o1Ja(}?6zHU8^XNcxmY6Ie+}kxDX62DuYZ`wN_wE9_>90kMtoxtFP9CygUGB0?17eA19McnXvOa^_TT zCsePj_0S&kP<#ZPECYBlihuT2v<1a({G`#35IZymXz_rMh>3LC=oIVE>aS_S$#`cD zzyMh$0R!s5jugftX^CF>IZXd$b@|_BoQao6<;aI99-;(x?x+hIb>3GYn5(JDO%HBJ zKgp%qCy%2q-9H_kJp5VF7zn_rOEwlT#z30`o$m0t8?B=Uss<+_olZz!hikL-igWy0 zDo<(%k;Yqqp6sL$Gz3BCjqp~^W^|I7KWm9*vZK^b8QT?Y65dvagk>|Rok4hzs?-D< zVw;_WRs-x;e>sFQTWfxAuhI!!oi1SGY$p~ylh65Hd5sKs6~5{t2+^EPGUt%n#Hhgt z`dw{Z=rtXBF*Y3(WwO6;ip8)Nk966UcRgI7k`j;^We(^usBnm88HSD~T=ZLLx4G7g zZ^EQ?uMu>|+}{{~j5c7vl}jEy_DN!6dS&PPHFWZ&>pBmNatVbOfuRMKZVSn(-n<}w zFYR=vsCUV?eq3_}s7X^+3>}}JF1V++4sG=wgyVJE1Il}*4{x&@G-m_y3@`5HBTjzU z3j-b)dJBpffz}(w-*3zs*th%;>o4v4Y{n_T>vmk{xo68XpqjJy2Ef9h@Q=TOY6{yf zC@exc#357`UmMc9GPX^c&8CH$(z(B^XU2f;4BEb>(5-N(3jq$uPeh-3BlMNIw{cW!*zNoIcI{)trvBBm z`_Q-e9UX0gm+xM}f_oWq`1g@;zI9NDVX%-i?hV7@ZOh|2|oOiGL1_8Md3q8+C)2Xo1A$IbIHc zc<$>p6|&)V2+l-ZEKLtI{>&1bZfeeY(}p*jz^;GeIc|2ok3VmOGDOWuUENT<<*~~V zIR=FFm^xaL%*L25X-6;WtCxGaoql7KsZD=WgV`w*L`8$O@Ac+Y!A0+7k6SW#>DRDC|DbGGh0sIVrb9`vJ{P#@n@P4T< zuYNtGa{|-NiF7bcLK#QZb6hAXpL5_zY~bWUofgR7utyoi^$o#?^GWee4yDI3cLJ7C7XbJ*gq7%oBIWSa>7#83H$TQK5wAB?-@dFtQ9mWPuSWq_ z02|ILc?6K=OEA+GmMYbj9Xy809;rzb=^o~MPlNhF2v33!Q45Re$PVq^{$#4>f-HAV zbh>J#9y{9QqH?wO;Vv|hEaK#$hZZOpgzN|io`CG{djQte5!m`-aPX;JTqhuugv92yXE9*CJDGB~ z8p;{ISxa2JB+VMv8L9z)ts_T}1Fp70wi&s(zi(z+jI4{&i`hD&dN%ZtatA#LnzRH` zal`c=>hW^r`Cm=0_gK9Q<$tNTH(Eu3(k{wjlEn97vOADmtq|v6FjnS)c;|xKQ&!ou zbVsICmEa9S0fDD!rfqUWqMr?68MTv+ZP!cdUUUwIG`d)Bd+E3Y5)|6R6R!;Sb27$_ z*;|GwlG8+T7!{U|;%lXz+c1p+dVxKUs>T-YnLRW~zA?wE#CqW6T71Ow(9=5gKfvSs zueD{ifPb~DB9yeyZP?l39suxTM4bwtd`c^BR@Q<=HuZI{nGDhVUjbD8*0TPi<5K7e z!Q*6O;e--!9e;EDPZiPr(VPDD`QQ4YW`DD?ivCNmmjB88$Ug_WvTXDv1{4Aw`Y&9d zmVb|V`?s&#cwjA;kUtie8(ZG@FggkVI_2#8$wGA8o7v>8+`tY{vhL=c9gRJ zh(X%k_w0uqd~scdihO6U9+FFor#2d`*Q$z8=}xvD5?^=2W*$W9fBdw%q@l>Rn9Xjw zSlG2dW90|8z&?s(?KqlXFD|w`Q}!bfk=#B1NSn!cD4xL|=)`|!b9J1|j<}9@z;ey& zOcDih<|=S`_gm_7?N!TA4w?3x=8~m}8p@MdIYTs6_@)(xxT8;*F3Xc0mbH!XbfN`S zS$=mfCUdIP&vCmwprlZS-NV>*S)k(8&ilD^HDcrH7)*K{K3pkG=X|Pr3=^hIu)vRg zg6}1uHOXx!>EtxkAVUHW$n?@+V5IPK0PMpVw16MQo}XEQcUZf0n>1d=RdVc%A2az% z^_^X5Xnc09LU=IEw1GVa_qwwXrcC=(bu3D-!r-SgMfk3$0UlR-xj#%)~(c zsz!CcA)py^yHo{{U;Y+;O+0k!j{jFCdv!IO2<0T|5%M|j73@(*((>-J$@(*@Bp~X# zAJ11=SsS}U-R7D6kdcNmgel;Jo8X}PD=2&0gFwt)fxk=&9^hqoflG&n>lJNvXj*f? zcP*1^j`KNNF&uPBqihE~#48Q$6(@9l;lF|?699H^b;zSOPdBe9eJ2xp8tUBp9cr)H zQ~_3PWCWn91zkEODlPTB=zA0RqM*s4C4C@Kxuh6edF6v~S3u7!{f_yl+SU!-kVc zR1yRidp*$P{TIQr+M4dv3g8|qF5)_tEddQ)=|xy$DE9ARFr$d8c{8(cI(@b<>gP9} z_E2dt%T!tgFrLm~s%$+UJRHo5ZHbGF+zH5hpL;&OVLEbdEmJQ4nWLSi^5q}zRP&#U zhkxF*ImeDpQ5{cD_(AUSk>HC*s|fi!<E+_IXx+CNhJEvfgT!%{2UvIlGVr4oW4c&CZ~Lv1BYnphytWC+1noN0n`n#65gc=%r;D7 zYk{|Um&&GLsi`+V$Cx@=f8<-_nVeL+IF78WWze_WBLb*wr60}BE65aXwOQPNG(01l z(gWpaifP>PVDKyLXq1W~Lj3&;Zl0v(*m2a&$X3g~Vd^E!uI@?0gZ-lR37Sjtr2+1X zD5qxVQ?$2pTh8nLQg^>E7~^=|9%s-oyaQT?>@wus6n&3}Ey>}E3qS92S!wv>bLKuM zJ9URA?21bc(AFA>-~API0|sP*KIQ!iqFqKgsJbl6hZDq|d>7~rl#$Ha@EL_(obqdP z);+Fe>)nre&+K(=6nri0_I^?=awF9Vf`_pG3c3UX6U>~mfI2IMAJhmR zo!9ZsWdxrAsk=M#Stwqj_~|)%UUgqSnqA&n@QJh*W&Yo)Gyh8g{6FSD+8ATGS3EjA zB96}cv?`AyYw}P!$>7!AOW!rap2mD@Q{5ilUaCmI)lVm&r7=jukCBpgNk48Se*R@V zWeqBmb$aC>he;Fo|G$zlQ5%jBJRJZC;C`>!gQ|CEXIY%H!b3VY0A7))Y=WxA!~=Sw z8_iQ{V!vq<7Ses%eLZFd7hILWnlz!LHg#ViM^OBJy|#{Gp1?9qY%QF{k@qrQ$zjPw z=|Dm&cg#FcY0BN?tgU+0c|?sbWvmg+t1Wna^*thFNUApTY4>{16V<8-*Y~cgbeA6{ z$#i!(3y?+_b-Vor#D921q1X`Tur4}pEHSFs*82LF8cNd5kCbjkf71jfgV9|GI3~So zo(bcy6VR$WRvUM1-)uPTN4M<9Fa;;6$POKfj{=@dukS^9R&o(Mmm2$o!@S}EjRC#d zVW~rIMJcJvJEMK^qWS7wPbS3}8J}tZl(Q<6D@cF@nH2$P9G2_E%w{x1WX{PoZE$-{ z*v34Yvg1h_OZHQhVZ1s82iW#q3jJs&m)!d z=`DzCoaEZuDsFu*qYt#I)F9|mAv$$Q8O7;0vfV=#37gcg+?DD`TOR)!=UlelwW;%I?h;ZW@|IjkbybNPLzcelQT@X;Fuj zYVTLp7|E-9cDuO4-%T*_{XvJAK$_rNMw);_V*tm%WQjpW9~y%3Yoj-Y5~2(5Rf zzvpyI_g^4SZmRQE5u_0}u{vGQ=muT^tV^5FPC)thcW?Zf^Y(tZ&;Gs7Grmynz&h9~ zxFdt9(_`nO+-OAyz|oy-T+i3ZRJO5uTz;Ew=--}T3+g=3k z{|G>{SjZh{PQh+vCy*mtXm8Gq{t8+dheAlFho}LzGfB*zd;#4t0}QWvWe%e$u8@=!%t%zj3zz#9w)0-Nt$e30qmBQA z<(t>EWs}C*E3PTm-sZA=-t7vywGobcoYWrDzZh(wq&WJax8hB(FhZI1UhVScB%Fz; zJtyP$Qd&~Hhfx0EUbF}Uz@KL;H1xR(d>)a^pJMxpa5{lG|3evhU{R^g+3 zVd9%iQ-x$*Xc%gTBm3l>5ls&hCR_tAM^GoSV7g;(HJIuY^%pHDob;8ZoC{l5I)=TI zvi)HYT#T0>Kszvlomqld)H82^Wyo2Vh7`6jAuAOhS3;`NrodZH=^n5SUCJ7g3$=<4 zrbAAu0$*LLCFr9%Bm>T>qOc;xMxJ8I!=LM<>98pE3ud>(WZANmsIB%8G-D1Scw=N8 z4e9(nD28M56~bU#jSn!l^|#Vd^apQ(Tr7AYk>(R?#`KT)699!rCCWm%t!zvs%60cd z7|?^H2YktRBbtd;mQn9aNc)L->hcBkt;Wp!yeo4HEVZ*=V<>y$XAQZ8T~-7rDV;UZN0ZoiyOd zd08!wg&3#z$JH$$MbCpwa3eZ-FG*-KMW_3u+3-1{h6-9^dnsDApVC~1DeKXLyYIvT z)wX31FnJ&m04o*7N;Ye}_gm@0A>AhG=H%xdJ(v{dFHCJUbJa>y;yW)4;S1OZUe^p#;GX(GI#q@eu*PYx{IOa zO#^tyHF1@fojw8$I{U2x)vWXG8{TtCt#pYbeRJBW74oqzM7q!a@pQGMA*Uiw)>`XB zyxE7b7!0JOpk&GLH0J8XK?AwZ4OW`B9_Q?akBiSn#QUu^RZMa6 z!l#^8p6fT&*O!#Y=*fHmfu8DB2$C{e4*?!beJsEpa0cR`-@_Vf&bWSQke*xvehF(D15Og(Ul2cUdub9(|9Q&uc@+5O;#BjrN% z&0}&>4PEJv_8BoZth@xlJr>(p$W!R5P5zMZl*Y@>8M4C|#^|VIlWc=yF?5Xv3Bm+VY7|PwHxUr11>@wZSnk{yQ*0nXCrRHVz zi;2W0-Np4{!9*qi-r(vH0ikae#~{tHuxNymoo%T;l1_D2?3K}?GC9L>LojZbBlncVB&(64{LIc4_p0g<#ew)b24GhWzJPzN;XhFO~+FXWqX!M(@WZG|8&) zzqS&ZkCf<`kAwiq1eJBx0cMdoXLN2_XpE$VKmn4!-!!q9Ih5Wu%gzUxCO3Ei+1o?I z9v~qi7cJ!O^jC+;kple(aEpJ~=#Btm*>B)mFe|v%D16W2!fE~GmaDK4{z_2~{^DDL zVvGA9@5+M0Yl}^{M|V#|D3_y2zn~l`^0Y886Gojc7ON~-Q^z#>h2%k-v-s>^oB@AB z8lgFKyWr!px#bIM2|>7wR$bheZra!K?#jlVDYFUiI*}Zv0RT|Oqjs67i#b)ZXekwL zBRKlQyNJ5zL+Zn1#c-K4!Jw=wepK|8oJ#1yZ%Ar{REeL|l1^l^RC!K<(dfZ9yN|KV z^HzE84?i!a3OfnJ#*2?`f;5{IcWvE1<1FAhYLb z@_P-{7c6MF&9FM;bm0r6GcI-rB8woSSUvqh?I^_JgoHjp;~J2~Zt_e;d>tuZ4zMxM!Dt zo81I7m5Z)w_|mnBn16Vhmlt}MLW$?8_*My!vN!BeyExV?yd+`R8DNm4*5Yf+^oBRn z^=AK*Fz}srrMWO&u%2R9us6aI+tVdAtu4jq$8;MHufY2YOz<^l>>EYdM^Z%Hs%d87 z7b~ri7EFl+XQ7H@|Im1Wf19}d?J->3k8*K07P3`|Z_1|WGnjDukqWOe6cL05XWJEd9;5!zx0f<2VRg;n2nly1=aMHMsOj72T zV6*!^i5mPBbHu0k!Jt5w5^KcimJD2{u7pFH`Yuxj$Sv0H%a2b3r-2Z<5m-hQQyoi3 zopgYeA}l9#wXk^=H`~}Xytw_cG@&MLP5m<``8sV5)Bc4*0V-HNf)y`ROytJ0$GjW$ z4poToc1is>Dw`b9@-{AP{i`qsk#R8JX-EeNhI@p!$4uI!v7MhtJ9&G4M9=ODQqtk=*Q!(p z*TnCyY->_NV~*f#2+c_{J)W+e%*snY(X8)(Df~ihT%P)o^vvu`d(z97-3cLgF@YgZ z;Ksm=lnDOJ%ZwEcCo^k#ESH|~>YLLvZf-rFPHW=0@|t{m%ilzOfQgS+T?IB)`+GF> zKiuAaZiOae(3SDOz#`11^NWx@rDVtna}K@8*0kB(t!b|uIy&?7ITo3}K}U>{4vH)a zEYsE^uP_tt&SAU56bgl8s^C%!ZzbFWBR5=o9btc z)e9GmXaFvoLse(_ZIe3fF|c*XDK@7__4W`^#ghKE&!z9hO3OhWQ?qo>G$GB3wm^1q zvec|kL)lWlCBREy45mlld(A^L8*Z$A7663*&I|pQ?A?FE=e1;eDLgzA3{BNpY^><~ z^qHFdjCZZJ_MSTJ4;;ztx@Uq8Sow#jK8uaSL{i@ahDXAn#aWI`Y={!jz>{+t0|PuU zN2+31KNmZzy#%wc(a~0Fi}5QPQR}c-M_SvOO*vA$kYKUWcb&*u9b+VlII$K1!Xy%0P>uWnFjcN|6k58iTMtQ+M(7nf>A3v zIP@fO-*A%XNLljkBOEW2mk+X6&~omiXXzjmbUcUopc7)WV?rk=M3(bBpW_~o=iT%p^v?C7~=sp{A%W1 z5BKa$+rB3feeQ{5Hf!@jtsH8zQZdnzx@+YiRqDIOw~S4}(;>Xnx{9nrR}Ht-nE+yh zcR*A*Nx~O2lmi4x&tk_OKFZ&`Ezkmj*&0=kzf&H2Xhbb<5t>#Z^ONjNBl(>u?8}&z zBSq*<+ncQbNmqyn$ELj5$V-5dD?ME&9p+==?*%sO%k3-8vgWl0neJ`uZLI?@sIw9q zRT@8jAO1c>4V6rgeDCk|=ZXKrh9`dtCU0CFB6fOnN$m4Yznh@4v-xLCBYsl1rHa{G zBud;YRcA0OPIWSKHWC3Dn}o)wE02Zv=1^0z9v&7P7XO8&jS>J@;epmE@acY;ygUI} zz;of`O;xz*1iuLiiHR-0Cou8KJVY$D=*xI@?MUkw_*TAbxuvLHb%^Lr-jw+l6IXyo zPPJ$XePUMr(hZkG+oX!RDIXY|>f%wy9aKc|e$ZniE^8s*Vg{|f;0!9QnK{UHXxKEb zCm1(OhluUqNFsl^wsq{!i~k(3|K5|N*vBmMWs?X+{g=lUm1i@nL&Sl_S{!EDPB5a< za8|Fy0Dqt%(Zzt#kb^?%!yFK;_XVfL2O%J7{lA1_l0ZiqKz(*1&?28#kfhG=zn86S ztm#J1>COBgF)Lbk2G9+i@=L=eBlnG{gq-9FcV--C)S2B_-#7;q#MOdNP0Npc!{zdh zff@fi`g08a&pgNnCJCCgDX7G{Al(gg<9ueVDQ;xZh!XU39u}G`4pEqdX82G`fcQA_ z|H2^omKp@|Ed*MukN^=B@#jR$4-);*rKbhb(SkbWO*?{XO%_LtzMr~H2bcZfcA@&PiY z9Q{zo{G3yfhd+PBpBXhIPz?F`L;g*%J(mLbWuwc5M7RHOVg29C5B^8WCI64lrM)(a Q5e+X_`$G&2^w+8X1u%?H6#xJL literal 0 HcmV?d00001 diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..0645534 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,33 @@ +[bdist_wheel] +universal=1 + +[aliases] +test=pytest + +[yapf] +based_on_style = pep8 +blank_line_before_nested_class_or_def = true +split_before_expression_after_opening_paren = true + +[isort] +line_length = 79 +multi_line_output = 0 +extra_standard_library = pkg_resources,setuptools +known_first_party = mmpretrain +no_lines_before = STDLIB,LOCALFOLDER +default_section = THIRDPARTY + +[codespell] +skip = *.ipynb +quiet-level = 3 +ignore-words-list = patten,confectionary,nd,ty,formating,dows + +[flake8] +# The E251 check is conflict with yapf in some situation. +# See https://github.com/google/yapf/issues/393 +extend-ignore = E251 +# The F401 check is wrong if the `__all__` variable is modified +# in `__init__.py` +per-file-ignores = + */__init__.py: F401 + mmpretrain/configs/*: F401,F403,F405 diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..e68dff2 --- /dev/null +++ b/setup.py @@ -0,0 +1,198 @@ +import os +import os.path as osp +import shutil +import sys +import warnings +from setuptools import find_packages, setup + + +def readme(): + with open('README.md', encoding='utf-8') as f: + content = f.read() + return content + + +def get_version(): + version_file = 'mmpretrain/version.py' + with open(version_file, 'r', encoding='utf-8') as f: + exec(compile(f.read(), version_file, 'exec')) + return locals()['__version__'] + + +def parse_requirements(fname='requirements.txt', with_version=True): + """Parse the package dependencies listed in a requirements file but strips + specific versioning information. + + Args: + fname (str): path to requirements file + with_version (bool, default=False): if True include version specs + + Returns: + List[str]: list of requirements items + + CommandLine: + python -c "import setup; print(setup.parse_requirements())" + """ + import re + import sys + from os.path import exists + require_fpath = fname + + def parse_line(line): + """Parse information from a line in a requirements text file.""" + if line.startswith('-r '): + # Allow specifying requirements in other files + target = line.split(' ')[1] + for info in parse_require_file(target): + yield info + else: + info = {'line': line} + if line.startswith('-e '): + info['package'] = line.split('#egg=')[1] + else: + # Remove versioning from the package + pat = '(' + '|'.join(['>=', '==', '>']) + ')' + parts = re.split(pat, line, maxsplit=1) + parts = [p.strip() for p in parts] + + info['package'] = parts[0] + if len(parts) > 1: + op, rest = parts[1:] + if ';' in rest: + # Handle platform specific dependencies + # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies + version, platform_deps = map(str.strip, + rest.split(';')) + info['platform_deps'] = platform_deps + else: + version = rest # NOQA + if '--' in version: + # the `extras_require` doesn't accept options. + version = version.split('--')[0].strip() + info['version'] = (op, version) + yield info + + def parse_require_file(fpath): + with open(fpath, 'r') as f: + for line in f.readlines(): + line = line.strip() + if line and not line.startswith('#'): + for info in parse_line(line): + yield info + + def gen_packages_items(): + if exists(require_fpath): + for info in parse_require_file(require_fpath): + parts = [info['package']] + if with_version and 'version' in info: + parts.extend(info['version']) + if not sys.version.startswith('3.4'): + # apparently package_deps are broken in 3.4 + platform_deps = info.get('platform_deps') + if platform_deps is not None: + parts.append(';' + platform_deps) + item = ''.join(parts) + yield item + + packages = list(gen_packages_items()) + return packages + + +def add_mim_extension(): + """Add extra files that are required to support MIM into the package. + + These files will be added by creating a symlink to the originals if the + package is installed in `editable` mode (e.g. pip install -e .), or by + copying from the originals otherwise. + """ + + # parse installment mode + if 'develop' in sys.argv: + # installed by `pip install -e .` + mode = 'symlink' + elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv: + # installed by `pip install .` + # or create source distribution by `python setup.py sdist` + mode = 'copy' + else: + return + + filenames = ['tools', 'configs', 'model-index.yml', 'dataset-index.yml'] + repo_path = osp.dirname(__file__) + mim_path = osp.join(repo_path, 'mmpretrain', '.mim') + os.makedirs(mim_path, exist_ok=True) + + for filename in filenames: + if osp.exists(filename): + src_path = osp.join(repo_path, filename) + tar_path = osp.join(mim_path, filename) + + if osp.isfile(tar_path) or osp.islink(tar_path): + os.remove(tar_path) + elif osp.isdir(tar_path): + shutil.rmtree(tar_path) + + if mode == 'symlink': + src_relpath = osp.relpath(src_path, osp.dirname(tar_path)) + try: + os.symlink(src_relpath, tar_path) + except OSError: + # Creating a symbolic link on windows may raise an + # `OSError: [WinError 1314]` due to privilege. If + # the error happens, the src file will be copied + mode = 'copy' + warnings.warn( + f'Failed to create a symbolic link for {src_relpath}, ' + f'and it will be copied to {tar_path}') + else: + continue + + if mode == 'copy': + if osp.isfile(src_path): + shutil.copyfile(src_path, tar_path) + elif osp.isdir(src_path): + shutil.copytree(src_path, tar_path) + else: + warnings.warn(f'Cannot copy file {src_path}.') + else: + raise ValueError(f'Invalid mode {mode}') + + +if __name__ == '__main__': + add_mim_extension() + setup( + name='mmpretrain', + version=get_version(), + description='OpenMMLab Model Pretraining Toolbox and Benchmark', + long_description=readme(), + long_description_content_type='text/markdown', + keywords='computer vision, image classification, ' + 'unsupervised learning, self-supervised learning', + packages=find_packages(exclude=('configs', 'tools', 'demo', 'tests')), + include_package_data=True, + python_requires='>=3.7', + classifiers=[ + 'Development Status :: 4 - Beta', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', + 'Topic :: Scientific/Engineering :: Artificial Intelligence', + ], + url='https://github.com/open-mmlab/mmpretrain', + author='MMPretrain Contributors', + author_email='openmmlab@gmail.com', + license='Apache License 2.0', + install_requires=parse_requirements('requirements/runtime.txt'), + extras_require={ + 'all': parse_requirements('requirements.txt'), + 'tests': parse_requirements('requirements/tests.txt'), + 'optional': parse_requirements('requirements/optional.txt'), + 'mim': parse_requirements('requirements/mminstall.txt'), + 'multimodal': parse_requirements('requirements/multimodal.txt'), + }, + zip_safe=False) diff --git a/swin-b-test.py b/swin-b-test.py new file mode 100644 index 0000000..1cc59bf --- /dev/null +++ b/swin-b-test.py @@ -0,0 +1,29 @@ +_base_ = [ + 'configs/_base_/models/swin_transformer/tiny_base_224.py', + 'configs/_base_/datasets/tiny_imagenet_bs64_swin_224.py', + 'configs/_base_/schedules/imagenet_bs1024_adamw_swin.py', + 'configs/_base_/default_runtime.py' +] + +#import torch + +#torch.backends.cuda.matmul.allow_tf32=True +#torch.backends.cudnn.allow_tf32=True + + +# schedule settings +optim_wrapper = dict( + #type='AmpOptimWrapper', + #dtype='bfloat16', + clip_grad=dict(max_norm=5.0)) + +#custom_hooks = [ +# dict(type='ProfilerHook', by_epoch=False, +# profile_times=12, +# with_stack=True, +# with_flops=True, +# json_trace_path="trace_swin-b-tf32.json", +# on_trace_ready=dict(type="log_trace", sort_by="self_cuda_time_total"), +# activity_with_cuda=True, +# schedule=dict(wait=1, warmup=1, active=10, repeat=1)), +# ] diff --git a/swin-l-test.py b/swin-l-test.py new file mode 100644 index 0000000..8322aa9 --- /dev/null +++ b/swin-l-test.py @@ -0,0 +1,27 @@ +_base_ = [ + 'configs/_base_/models/swin_transformer/tiny_large_224.py', + 'configs/_base_/datasets/tiny_imagenet_bs64_swin_224.py', + 'configs/_base_/schedules/imagenet_bs1024_adamw_swin.py', + 'configs/_base_/default_runtime.py' +] +import torch + +#torch.backends.cuda.matmul.allow_tf32=False +#torch.backends.cudnn.allow_tf32=False + +# schedule settings +optim_wrapper = dict( + #type='AmpOptimWrapper', + #dtype='float16', + clip_grad=dict(max_norm=5.0)) + +#custom_hooks = [ +# dict(type='ProfilerHook', by_epoch=False, +# profile_times=5, +# with_stack=True, +# with_flops=True, +# json_trace_path="trace_swin-l-pure-fp32.json", +# on_trace_ready=dict(type="log_trace", sort_by="self_cuda_time_total"), +# activity_with_cuda=True, +# schedule=dict(wait=3, warmup=1, active=1, repeat=1)), +# ] diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..ef101fe --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/tests/data/color.jpg b/tests/data/color.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2f19ebc6c6e867372f61dceadba4d66de46e31ab GIT binary patch literal 39779 zcmbTcWl$YY&^~%_2|2m}l68rOc8UL;okRF#l;d zSXdZXcsO|Y|2Ywm5aAJ!5a8hvKOrI^|0f?;D4&o~{=4~akpJxp4F>}Qhl~J^@IOud zKdJX#00tt|H>gAyC~^Qa1{4ei)cYWS^utcL|1s~w*#8wMXc$;Hc!Uo#K7BN3LjN!y z2Ij+PxDSgzTKj)I2f$*$VSZ&5fyYudMj&_o!WNvAk4Pa}*N3e#3!-ErLALXW^Q3=Wo_f?=I-I?Wol@%US zL>a-@8H=1P81ai}Qhr?@5(T>o2;0PE78!@~`!3b>f6)F1+5bCWA^*RS{a?WTZ(M5t z6d0(F$%DZF2mxM^e!m%)K*dABr}?i(Qk?yO$$kguz;BMs6PAC$p*+~XDWuABHJYJl z0ke|D=<0889p7T#Uxj3{=jVSOLlBDda|dN7Rfi+s@h1#wF6BVCQHcM@VI5iOw&j&? z^gl*kP%apI)i}Lo`F!cZFzV;EVxsmS5ayE`&EyhMJngf}^OBSe!~U9Lu6O_}9U1(7t( zfrpnLtAhfrhg_3T#^$;2YE(B$u3++Jy+m^Qi@K)o9Uo2wD1P=4mp`6Mvc@-|xzct_ zWkIl)an{sP2SSH!JLmc3>}F9$;l0ib*p^+^(64z&S}W58nLC=v*t22I^f*A53jgl^ zwvI^hHwz4Z_E`yzf_kh}KKt8B@9~1N94&DTZN{gwJCX-qqw6Q>vZ9;D|Hv^3niIg= zKjEkph`N_1_J{yv8h6DK)xg_MRdw2a14Fwg)>VJGR-2=9<+CSdvx+D1NAK?9bl&sxtSIMZ9wpnUl=wGAVC7YfjQqj`o2CThkoxFov5FLwR-l`Ww$< z@|yKC&a#fj5J}$U&#smFld`Vu2PA^xRiH7W668*jm94;4Eo9H_r zk)&T-tcyy$9a*kcQ4!xjeY35|8DSJ_0+A=50)QNwGG^5_Fr}256rinzG1>zoQc$f( zX9Jxb9;)zu^wYV&xgv(?df&W3@4h=DJvB;)i*qjp2kNU1daHwPr(^tYAO42F zE$1`v0&zvpSx33j+=YHlzJmzvtt=|)({{0?o@g|S6iqin`C{Wa>*$%*vIi^?Geuji zL7Z;kWVymr0~_8$_YN_G#6RB%QzHpNL?*RJP zaET4XN=@`+t{pT58zl6LVF_z|?PQh>?*1}h3Ej2M=2rLo2$9Nq|0iqC{V>svuk8r3jig!J`WH@~XMEo$6R0kS4=R{!N zdQZo`1mN~C#t2d+urx5EKplfVd`9nzg7?{!4=}Qqv-FS$pD6ybH&;S^)u8Dq!)U+I zmX)t|+OG_u0ZZktKvCO;-L|*>pn+H0F+o!Hq8$=IAIC{^ScVh4S53BljcFR^Nx{Y= zV@^c;YPBNjmQXg25SQ4=T-_R{#Y8|;d4Vn|hx88I$(1D1S_K2U-iq}zacb%eExdCg zyC%giGY7l9dPW!{Zb~!fDZ`)1Qd7zD0U6;bZsdWS`)X*vm)Vkbb2sq$j=U=e;V!dhp0HKo5K0b9M*KEEpz?0DDhFhfGqkEsyvDNjLqD~J6_ z7x!tjYG)Q5MPjggAwDafQ)^*b4i=LU3j$EYe^IN_Ux5^K|6CW7(HvY zeJ9pTwS z`zYX#=#k}aA_;pWO9}jw-n&}slwHfAjITEr{0M%NQex<&lS|ZfFMI*Byf?p^!2Zq& z4+qL`){;$l{|>Bn8LUZrpDKqkjp|Y+)M2cTv8Mt3_N`Zj5~qQGffrw^8tOn^3E7v{ z!$Lr?8jc}Ifpm@fBD6Winnag><5owCFg*N55^F#+t%3#%l)Rd$z9AH8q*=Osa`Aw2 zF)VV*tEmmTdO!j%=1C{pa5m>LpoAqh`LIJqdN7%*raB!Rs^D3ApM8;^^u`-1qr3RN z`=frA{PRiC^>9jbGA5i#%79gg<1czD!=f+pv)VhLPY6P7Ab4G-!=(6hrKkv|AssyX zO=gppM+@0~2M}zfQ6tkyM=>-UOsOiMA|c&9)Oe~kYMFn-wYti=fAGr%{kV#3;n}Vg zxw%IU&0Q+r`WTY7)4$A>D;h&aARl{MicpmFOT5j3L!;C%#yVMw3Ce+sv&zQWr7&Ua zZS3-D(nkk!ezzcLeoHLNJoJPU)m$(o+Mu-v2Dlx?v@C&RrU8Y|W?qVYU_qB+cOu3Q z1@7!Zsj1cY@w0!E=Q{w_ADn}VJ2B88Tx#v)^*r^L#&72ev)? z?kb0*b2*laJpv=%*AUMQ>sC$30AvPSM?X)WK@eZAm&pC9WjBE?rtg5WMI@n}WYR|7 z5c1?zP=H}-eV|THjq&lr?5EF>io2G>kM!81K%dy88IN)Y)FT04wfMN&a*+#?Y)o7G zoN6Jr-3)>Z=m70vfg8en`k&(rk4B9yYG#8vxiF-C<#`7ro+hslw!IGd8-lhW!dC1f zq;uV_{PNXkt-HFk%?8W258hD3@o2jSa{DfEbUdTO3`?D&u^?Vkd?JFITs-S5%2&ER zho*g_WYvX7cDd=)WEyawJ-!p+OBbs_nAIq{&yGo}BaGydVp^DPJXffG@t=!)rCOVT z9DJ%_A(+gQ!5V8T!z;gVlq5q>HcB`2o9)(x8cHG?FYUdUGJGb}p_ zHnLn!!3(gr@(@3s2A4}V%jbdD?8iW?DFuEF`ySr9-L!Y_#}XN%|5hT11WT91Ga*qGlrr<>V&R9pM1dO&7G76 zR#=0%piPppf!*zP6z{(F=nhtJcB1VhFtSNN2i#XO#JHaJWE#SD009uZVC_7cJ2)+WtI&o?UtBL-^D%oZe^uyW8S{upZzxMv?gvxTE;`WgmhhUjei(wBF)e z;K%Y7)IBE{j-X2IB*PSD)6F0kRdBvF+OJ_XV6>ZScaV+E-S7`5q%T&WXU`NO_oD)_ zKZIW|jdpT?q=udoG7?Lc2Yx#c#%5!XYy~~m6dXUGzWs&qu7gZ>(?SUgnw|_HQ1IhW z+}5t3#cYFA(N-qzxWG^cTUKvq1xZ1V=MpbA zBjiVWJ0L9{TSb`nrsDWXr80U-&-*Q!DTfpW*U@U4Q1~r8&^k(=P@`pzF+9NO zIef3vhxYg#P^~YS`r#Aaz)jvw%$=UaIvt)jly-siFP8nh3E}*fyAwhLV5~t-oy#P@ z;`1hXdbLdac}g?vOGL8rr(fZ)C>m|EC=O}2TH%r^>lr@BseLf5N1d5%!%XZp$_;$^ zVWn_h9W}>8%|alB1oZRi>T6|b$XCar6kzi)B(<{m!Ynwoex_?yXzFY`I#1(Vi-)G~zp zvN+aYb!1a0V$XT0dT95BuMIn{EiBtPbRaM%*MOdc3!9-_CUs0f04=>L&@3UW-N_Ct zIJ?He^=*98`JSWB{)OlDdjpvc7-uvVrtPaS7u~`&5XZFc zFog+gF_5RdURGZi|H67EHsCE3sg&t=nB6-61ON4JW3uTRfMuGZS%e7Evt4neI z(aFx#cbUc&(JIZLS(ub3mD14f$2{??_@!qars`i&*n-CM#J4)b_QNOXV~yudxFop0Wld^|Xu0w9f=@D&nb8;r zCFEN*&RUrmniCY_-6Ve@jP{L<6+=dJEm%7lN1wD`qOtrhQ9Wn&DP{;@)iWgdh(J>L z`1b=43h=L`?e6mly{AKAW z?cmu{4el-0<&yMh1AMY$v2+>jW?5n}d3yJA-vR2n3G&Gv%4BHMxDM=U#b5C;tpC=h zsZlnwv1;x%vy+mS{w}!VmFih;6mncx;fRC+(4O2BrWAcu&3yC%gGc zk>RXE_Uk&IqX!+76y$YZC=o7Y@_K2%$KCXHefK ztJ%LN!LMOnJRSOqjk}f}vxT)Zmu4F*!h5ZZCX19Q%%s-@S50?RT9qUb(fiCHX-_Nu z=h>Rm^PZ^*71duBqo4H~ofFgdTd^x0VcJvi`k^OfV1n4ZIW&eTt+~>MXJjZ}H2L-% zF}Gw~Oe9y)Pe@zJA30_{sm$As7HUxh&(DA{cX9NLdpNCQ!`eY&VD}2K{o|NMOS+;> zO7z-K1T}yW(JDl9!Bdf1o<)A~t1#z8o?d0Dh`yD`|o*h|C6I#F9i!8$*<pJAZdmPCTM}Bfw zZP2e(V`@Z#zih4KP2_{Ioj(E_J=s3PasDZ)_+~F7DE@F-32Rm>1UYoxxwodWmxSax zpl7!u4tKa4rK4-k?NF7{;y^;+r#(5EAifqUqh~#;r?}@8t&_gLjurvVvg>eVLRD&E z-Em6*Wp~@3ykCjkie13^aXm{C7xEnB)AMOTG9mN)X-o z!d~yIEMQ;v{ir`G=3{AsP?y%+Y^r;bJmYPTep3Ma@y|91^BJ*pz~Db zFuuj24Km}{V*cwU_Qtf8h>=3GQy&^xH#OX%&rJ%LK#3)O2sv@8=nyx5DVu%UiXb|p zo)N^PV(rZYK6+#?j&{MMkBVGp0N6zw^t z!2K1xKS>dnYxw3d#3CqvyIF^YPqH;vgz8#iE<4dUu6nPceaLlBcsX)XtCJWz7zeK< zDo^(041ZNRu*&oeQz)J91bMt{UtoY}266=6iXlW`Qjr;ACU^Tlt!BB7E*|4X?emKq z1}j*~3FZp9EML9c8mhJ@(<(oqCAXGl(BzuHXj`WtTR#tliMHIY zZ#|EPDFAJ%eJIWqKwg--o4l&l@~at{-&n)raH9HZ&`X|HT59qY<-+lXP6a>})tC)Eeb zx5fZw$Y6j_j6O(U@6-Tq@j$E=>;h4|(=8ku1CUa?;ggHw_x>9Qe!+TS3z*qP6|!Lt zYtF?CAl}N?ro7?s$Jw2&{a#u4XGRvgZ5l~x*G#F3y@vrTvDbyA*V)|`qcl@f{Z$)9 zWyC3+?v^y$2YVx~Wd~E z_?AuF8uv>QOhrJx&qMDQJMgVECf~hib~}7ntyojuB;X_tM>1&2hTJOs@Ii2%w0w*R zW4;uDEVQpU1Yd1WLaAaBU(_toiw1#bM3iS(ijpW2^5s<&6fyKc+UVd8DFRN>oD0Dr zzzYl{gsZ%-8C1;~P<%Au;+Z^OFh7ZrP1wVpZi-=77FFU)styd@K0wd|9uz0L+sJal zyV=|}@o&0jFL~%{tj@)hc$0BANE571VcZC zuntQR`=Y#-QWz>WY#1vOS^cQQrwvfzOz9K29zPC&5{sv+|8NgsnbXGP?8dQX=R!;( zO5S7o>mp}y-GdG6Y^C*oy#Sv6`&S6ZDUBrH5dmP`K8i~dF{X4>yPWGyb~clgVL;(_ z=qWG33G*HJb5(Mroa5T*bwbw&Eb31mpV<5h`IZ}+Oz9d6jDmtf6V?#tB%sFV{V98n zaDh^Xn8H~0-~%PBrEmnkhVl&yB;xfRk7Fuzs484W$Q!>GMG=Ok_UU22jL=g94L75h zL5lqFXCU1OKggdmhcC5{KUA&y2hh#f{VD%tJvVqh_W3yb?KAG-Twf)fqM00_DY-0) zZy})Q7Hrn}#rd)lN2vBHl9z>shYef{+Op zMwae+$hJh^eyQP&^M&kGWf8|@JjS_wUc0t^zMUna^sAGEe)4RZh%=D{l>H>aUx<2s zjn(zq2Hr**Tmwplppx(L!aQT+EqwxyCrVPvKfpQPl7_TPQ;kw%i(F}<2#Y3yv%zqw z@CiIqQjwC=Y>w+C+ES{0T+>bDMQX4SIw}{lA*AO}C(aRjv02uX)XOsIzIwk6-b5$H zN|c`As{sp--b2Mz(bzHLBrY8492d;?T%#$Hky&s?I*B(w8l*{fDTV-P&TfD_7Uv|> zhlPD>(LoKzgz8*+*jc@LKc`>*hI!mu3ZbXi3dc)3*{a3p(0S%%VUPLZli|053&$*m zv6CU%$~?UI-%3bK@Fxh|0%3~CRa7KoKI$}54R0Gk!NYjZ8cm@i&JLA_040~rK-mf( zt4FB1whURRQG>QhCBN-J+uJ|HuTVN~day6Daw$j*7JUckf!uP)T9wF?Rj{LY*0NLd za0jcg?~a*pVX~diM&B4td*J=}{LywT5()`joQns({y^ayyKQ@QHjub(>7C=~Ts|UG z|5aZ`Lnn)Z5fz;%*G#dp8aPSonQ?R$|vO1GdP!>UhmE@PqVH#xF<=pQ8L_r*{tdVyB6i2lQP%?gAb)F+jWA@S* zdXwt6k!QFXW4K4cQQWGcfS_K|1u6V1qMu=jm)JgTqzX6rd=o?ESr+!s3C^1a@oqgY zLb9hl^@KyOS@ruC0-*$clXiieF%2P^bbpF>MVkCS-upoUzmDvF-vF`DS|r zvjqX`!ka2wskq>Ua|7EWuDcY3a6XZ*4KD37KmjvxXnkwqJJU_)|FN`wkhZinL-BP( z_~uc}DTHPF8Z^^hC+s=^h8YPUU4-z^3}|`i_IYBoHrg`{PVsF>ubYV>t)+}dQT6pg z_IDCkXS?&kxmDnpenTXV+6&!KQ{R%6xB4|=RO~cb2iUZDvj#S3m6Z)$sjtlz<5`Pg z8OeMsTipb#seJ493G`i(B?*Zmp$YxFd+S1x%WGk4rzAM>d$aC5SQofknyWPg#Di#z z1tG+R zV~YXup0fQ!^1&sF@qze#C>y2V=~43@$+|UH0O5HUygn%gNEo;_HRGBQsHPi zcuCjgc0D7R0|dYr7xRiS3e8Q?r%#urD9qF^W`I@R?9ijN30{ZM^_ThV7NPW>mX@F0 zl#uz{CPc7rp=|@lR%w+rfkD~B_!)kLl1bubAy&Ql&D{&OpFQfVxTpiF;yLgFX}8M* z;#+NRKQ8h)ba?HT8CsogdaZ?ADjwMc$^#B13taQ$>L_(L?ptgC8a+5-NRaGWPM)Oj zt-%}aV(1b_)rx7j5cZ!(-Dk`1&F6KY;q041&?D`Q!*dEKLZC;LKak@M^HrvP=Kh0XT|V>7(^5p7{j?!!sxH~;hH4(B z?$JTYw1LjXzd}yrFcd%#u*vKG7|yT1S1n{Pdr__L;P9<+eZoZ?H-4>U&75|G8*y@S z_2ek}`gA^`snJ7#U%)0BEvxOS2PBcZ7Vn7SC%+_Q9!LNVQ<)eJVRRfwnfjB~KA0jobbCMRVsW^m{Irm*-Hs zR4jzp5NKTU&&B#t$Dt63yE$ix7I&KL`UYD$w9njJ$fw6e5c5?DbUdQ?(|`cHxLe5` zFtle+=djl(BS(pgj6CIl#6!j=hfY(j_fQ)%T{g_IDeb=TtcQ|SIBwS!D)K$4(}=V8 z<|mJm2;4)`jOAm6Kl{}WX^PfHS4z1~+;0^4n+aJA=fWGMb?~+NqRml`j-?7P^}}D2 zro|*(OkLjr&`)S}MJB$nNH8hnMkdf-Pg{PxJ;JhTiR$ z)ueeHVU8jOW*w>QCwgK5^T;4ZYP|6gN4iLc#{M0l&Ix7rXNdwX^ptJ;#;X)xA}0%N zp}+Va<585J0gwDYlgqWAr{}^+&Q;FbLa;v{a47>15re6oG`aE51=EE_=l3jABb`Hn zj){RS<#&%$3>yS>)Wxt|oN>O$HKYg5@k-Ar3iIhrZSypIEli6vOC@p1Up3(pt7FZJ%M*^sTfos8b_W z6Bl=VdsO~kp^$-1;hK!y-Vs%Qvs^y^@miw;(-O43qJp-u#s;xA>s59@s>T>~lkgnp z7w7q#%2`Dc&fxe_17i+fEAMpK4=#h% z>UxRh%AO(xfBpM)7)4xO)j=5gG}mC<&ectV1KExdU}Jq+Ug?3KU6<6OdkH0SuZ`~} z+3^ZtE`zu~2=%#C7w6sF_|}&ICEUjap1bVl`vDPVbK_iN#pg*cTpNpEZ+ymK5{lK+Ls8`AfHW%{wu`K1-Vv zni#7C1U#xK&D#_R^SN`K%X>W`rk!&|H(6BhE8t!-vJujRGlRk%oKPJQNkl;X_mZ;# zj|PgcCzqwD*8OV^u|0$cO5PvCUNec8*v@19d9c3swSuy0r!7Z*FAY+?q_}=GZ-4w5 z@cw2ch{Q|qJ=lQRt6!zS5yf5(XegTo6XhEPC)YzzX7M$_)4dW(F4KJtl82WTzvPXod^wY?L&07hC z7(B2?ZtIf}W(O-uw!E|)44W)nA|FP2fzcUTM4yjDhU_FV%5g5#D0ET1(-Y3?B3I_+ zv3GM%;mx>2sOx1w?ZhIoHcs?;z*!kcL%Hn9Pj;p1Fp{>bQrg${?;U_3WD?2PX^s&4 zT>!~_r#be~sSo>rO>~T53}0MSXYB{#_4iMQ0H`&XX5WwPjbpsT$oV02G|AH8)z`SZ z96AciKLvzx-{yy5f7F>~Krv1)tJcfsW|TsZjbB7;+9x#yNkDH^+eVsl>>fA#iRH|F z$(%~1$L9zNtaDv7AJIWb{pmKk8$)hbZlhS}?p|gEgshS?0ty3{IQ;*VY z)k5mq>y~;83Ba`>MRq(&y&rHk9*PSaWwLCF*G$$`kCuK1c)G%dSgj%%CKWj0MX4>% zcEf@51ygF9BPy(v*R@WjJl0agn8WB(FWZij?~PG*W_N4!NCepaSe-Z+gjIWua8lRGC9loq5PjY9y zX(B7k)b&H*Lnwfg68?Wa(q5IV5nnI<^Ud)KR`S&Xqgyj7`qUs5Grnuz!C_u)Q$>lJXaJ>n-5zd#A% z{b%|`rX(?N%mpT`c)MaI5B%7;(nMhRg-6}Sdl zblblbcH);>=X?!;jM=L^Uz?}9xQG#5;gB5ZVgHl2Pij)A(wjeIJ%A0op1bKaj8Ev3)?Tr0o z%jX}uwyrRa`3*P-KU0G2pmC7aK2!f5f~4F_2TeT1kD}!cA-8zKOCKnF0i5!2n!QpBv8fo%TUr6 zG}IZCUJ8LJYKIIlJ`(nb{N^C3!QUYHGO+L9ry8p`6Qp3HtZzm!+-L+-C^0#EI~W5f z;M_PS2=;#!6d%cOupixi%sOE$x-H`MRB`%1V~r>~_0Y&=j8XeRx$@t-iRkMqFg4mm zZQ1Ha53yH5;)6LI>n+ypT^TYDC`#X&Z!K^Luv;-A9bowVTa@T)=Q|R5Otqzv>hRig z&CY-c8;`okk|25+8xJXz{5CDpq&xaEutlYC%8x6Og5osp6;6b1%D%fX)dat#Us2|i zlHR>hh=ZX3`T>^R_IJQ1%N9xOSmxA1w=NZYu{1raO##E_*g$>1E# zd{8*JrkDa(X@dLn{HLbAM>6sZD)>x1<0ly6joZ02UutdN^ak^?o?EomaX0>AU< z_;m#maby;MxvzN7>LRou5?1lzCM;|RTx6qhcl3h+@L5fFY_jJ`d%oo3q7!va$ryt% zjdnC|2mNmv4VP3h6c)mw6}gOtvE{HRxhh4qi>{hin>y=L;k&{~xDH6W_&=jYUKoeW z6e)kF9j$F5iAJj;yH13#`R59SiDyZyl2drBZ!-0F*!O+|Z?GmMc)-^9JIgn5t|zO0 zrJv+7a}83nJXOsO^tSogvdqXP=QDxD7PjJ3PWhK6UbK~ByA+$~_!#k)^ zEOwzm(+dR+jwC#3>@oh-KD^iZwr6#zOgUT9M$z3MvTz4o01D~ZJ$1ELyl$b(a^EbH z>JhYaN;cNj0@pl;)CKzp>PP;^=hvW_YfFgmJD|H>2tyCJH`7#ZE7QU5VExG<1w|l= zh)LkHPS*<`xjYO}#o_PP4=#j-h`z*3d#=bn|< zH}Jj-(N1AzE1!C@&OdB@iG50%$+?Z4s%<65V^uy2;nTVAXO_LD4k5@NWV z2P%GP?Jnw7jQI2hVfXe*3AO@q1A64FbrRBW$l;d`wNT7R170=mP9>e&`PUZcm9k=| zf@(Q$_LLRY6Axnv6SlK3?eh=-+$~5U&_CWemR1=X5m1 zWItY;#Gyig@hiH+=cq*)`^E}H9O1T00WXQ)(EJH{2YBrU3erQe_#God0j8{s3zB0rbVPE##J7FOhag%H5@-4aJ`M{N zOK!t7LeL&uh#nAy`WwBMSRLfXL4G6qToHjg>1#_VE3)#NSajd`X_^Z?lnZed1kZ2B?2pu}d$<;&(9 z|BnG^36jjc4jsPsIA7g$3K&cwKOnA;-Pj8FyLR6qNim)!bB{E5`TLzs5%OpH3 zZGyBz@QhN=*6B+k$optHvsU_w$)^Cm-#NQ5?bB$eo#9a zxoQ`Tx7Ma#d0Ts-{DSyu0M@=H9LR+~_j7)$^exXvvZl;8_#rq<`!jhxz=A9gxxqC= z>TLNOX75Zfk`(QVU%q%1gK_hI@%5UL5TiI*k@WzX^Z*su6C&bnz@qCp9@yl5SdcSWGYj8!P3bf4H03!!zM4Av%T3VwXD!C|z}cvjH#L=`pey6L zWyo<=7xJYg1e@yTdY`44d3!cgYke(cqBmuzA^$52m||aOjc`P>8y6%>u79$IQ(o0I zwJ=O;+Vr(E!V}~4rsuR6*COS^=p*NSD6{J!iCK+)q#Y2J=x>Lc}p+YT4Yqo#i0N>NZ;7uHYQ`yxV$FW6mZH zgojetvr|)}21`K1J>Cj@fDhvuSGu|P=aND*PQ-NDC-c@Y4#(Bjr>PMdIx-1mG?Wkz zh-5wnR9BG}5VkqWOK~{XpeW=|5C(gHevJF2XU!sIZ*2__j}vnL@-Q!3HOl6dq<}yJ z4UdpEdW#L<{{FOVSc!dq>qu`YVsK7&!N13bh8va(eH2*OE5l$6rdui%)w6xE4;uh-x z?TxSy8;~?OJODedTNWF)O}Lczf@tXbC+2PTdUBddngQWpWR*&7hTL>Rf@(o_Ke@Ix zFz~wj^mOmW{nPOloz{+uOOj;z4hl%HC_D2O+~^od`ssuBM0*t>P^L1rVGdQ|7l0KPiIY}y=LVpyh zCkXp9+y;(plR#)*f6RZcMe^!VZfTu&BW4m{9%!yR`78d7QA0OMg?1BNaGUee8}*1U zQ+_JBG66TNf!k7BaVaORtL5c|P%}H#Q{FD&U%>uIrqp0h7(w8mAu_v^?Oq%v;frV? z!g!YXq|t5P&AI~`&smPVl)F!gZStxPd|(5RC^$VSEEQY&nPgz8^$JYm8PcTh2|-#1 zjSL%nyJk#TEe*Lak!qP}?ACe9pjcnjmev~M?h{KItvRH?8sV(|Ld}__s7Z-hO`;&g z33vy{KDFQdp}$GUxC7-1P!2XXch4tXG6xe9(CJ~M+^{p;O$+KbSwJ}APjwL;#nKJO z+1OIQ{(=`vd8}_0(h$7Qb#|xg-l7r?QQy)9o2CNr?91m3_u3xF|c> z)i$=DXC-GtOuT_Pz+|7tgn3%DSU@eimZLO#`WXSln=sm+M{RqvX>3WGp4aYlflozo zzPw8kE8WA_y*5=dSg>b>*=wKp|?qsrox$|bez~L z-ID|xz`?licTN6b!nKPlCFyO(vy3#9rP9S-r*~fWH%EOK+q7uOd0Z<>p8cCrX!LN` zB8{$r0ugaN8;WZy%<~1$<3r`L@xFC6a|@eT>-W-pM0ICyA=-Fb%uC`+wFB3_m#89P zT^rQv1J-?6C3W*DuXAUW;e3sTxBV64*jIBEPuIq5bRns;GaU_2oCYLj9S$?I;kiu& zJcRAbT<~YtrM1-NEKf>SF-Vgf?PqYpK&Yw+kEJnDbfg|S`f&&X_k(76$^DFbTFFu& z9}rVYaI~4i6z!%AE`esA@EwqA^Cb3A@>XbeeeU9gFn}AK3HwcO{X1j$!hwP71tNd& ztNpE8Z*|XF!$WjiPm5!i0U+@lU*1(oEQt<(5?L}p*o8P^i^k|uF=7>L@TdyCcdxKa zaX5@dW*ks@N2K9i(Z;0Jg{I5gCyAt9*&`O+Iftvf__;4Ac$tU04^qg3zKA()>SZjP&lcG4g+oF*?x5VNE)9|&o_X;R7vS*v~ z@4i%9f~XzrVygw21-O3=h1UI?=uJi<)l%$F{PO+PYvH>;rVzrb)Pnv6Bl;&|$wOcU zsd-%b+adl=j_|1h_l=cZA#L~t7p9jx%vS~JSDCSAjq0Ks^2jWrZWH-JvPZ`eZqh4) zIcy$EClfX2FHhPJ4+(*mfo8vxQZwn$*~a4%Met!s#m+R7-CvaA$wlj3M~}K~!9v#s z?||LCl7u}Tonv`VxH;g*fXn3`F$boPgr2zqmrAg4^KRq z>ngtS^RBwKlr`#(2lR`H%~g7zXyGz-;R3&|`4_tj-r)Oc zf`SKLtg^_GnQx1qI;~je532$(HcklOGau)rgMx$+f*2@&Nro*9+UCpu|w&{8r0sHGRSc^!3Is z2eZ=z-zqJKxioz)E2)cpleJqnT;rR5uDM)tL)`l=O#aNylyQ`EG~L2Y1i16s{1_y& zcviQn##e=^yDv9wCJS8%JyTp8>S(q;KQuW?rR)KvS$j_KRftg*s42u(e~PO<<$~qE zW6(#4t68*TqP`K)3>S?BPB4(~knoyWUULK&|7}_@$u#1GCyaP{Uiv+VWZVf_b^iso zrX0fZ6pjcR;NHcc79K%O#jU93XSnwcAPwxtzZTM2$k{XdwI+W+sXVNlUWZKRM45<& zcVERErtXZkh2zNhUMw z2Gh~D9$t$^#{xd2R)Kx|?*Q8_(q0k)8Me=OuByRK45eRv=GBE?(eH#ybGD z$A+z&1#D_>>HLLBO}|K!^LAaYR?ktZw|9i0!9zr8J=`lYaOd|sps^&OSf*s+dV7y4 zzi?2N5@+jsm)4ipfiNBkhA=9%9fz+$*iWbw1K-q9%uWyS9wo81m>%TnZI<(#F6owF)>L#~xF zYz!4?O8mp?DNPA&*v}suIoK1BViZ5_3yl|aAkIp6u6vcWvJH)FyIm(=uiJL-Qvzjd zHg@fDD@8cyRp|HdPm5Eos-x$gSyHYXlTB@xT6cTA2f-?k!$*jW*c=9*6YW81lT>zW zMXY$NukqU%yT6Lvlz#_n=6SE}3aRnAQQ{!cTMM~}8R3>=*4oF)cBhwx>6kUaJ@sS>;YCwnm~e~>kaLN zuEvrD#a~tfZc5D07Pe_h8#-gQwEV{wTd0>#0^zpNA&W(hksWnAfz2($+JZ!{lnYmk zOs4T__15V5e8s{=oGe_J(0_r#(lpRLM1ebh-T^pvm)&&W;}H_N1&6)sj0Ka0#Eb{lBTS`|2mHV=~?XsQ*;U39OX^?&lR}9%|!BFhHdvz%VSYg#QD|KsCR^ zO$E)P#_cxz6A#Ol574RWxlc?C;8*46f`4Mqhu%B*z2Yy46XM^%o*R})FRvCw`z_VV zs}me-h}z3en_}KUZ*c1a0hS?=U&|}Ox&HE*M57l6gLW@kD^}Fph`69QT@&c_Lo5~ar8bY~^UoIFTkjyLc z-X7uQ!+462p^0@V&*4_Jm!FyKR#fWKRVNgs`ktZx00fc!qdY_LOW>!)9|K=rc^WR0 z;y7gR#geYnhB@H{!khv#EwGyhoV#{v`uxgl^t(9Xa5lV=kXr$Sk(SB-0D!9JJY?6v z-|$I43EX%yNcfXw5yE01B zCJ4l0H5?P}PhaU?Jf|d&ML{!q$5v4(8`$}iK+71$0+<*Z#vtp}Uc&a-KNTg*~ zmNtQrSpCwnZD2irgjYHJw=3UT{{WoK(I|Ogad;G3H1*)2sqjHG~$fFw*_h66+1(1S0 zaQXZNVOi_aOK8^@C6(GsiDL4OS98RNkOw@ETC=Wdi+c-dHc}MwUQn@C)+Dl z-Qr=&`OTEor=6vq?n5SG3nHrmIa3?)>y;msVqWSq-NtX`jTA}um4*Q&bK9pN)AZd< z+SXg8flw>FgdiAl3djNHzD-fpF52NQ76_3H5-})S7z-yKQHJYDwPRQ&rC~nRi5m83 z{^4V>Sa1PW2kC-qium90X}mk3f5JE6Y4NGu+l0EAPt9!*8#o{H(ZgX+U^jLZspB6T z>E00V^tWepvoiTMaRteh_PXQO0dPs|E5Ws0a{E%hpHtN@Ota53NYXwsyM{B5zyu%C zv6P%5&bl-eN%KpgpKYL<5R>0J6)Ch9V#KyV=lzw(;&WH$duwz^(CuczqAHF-0CVnf z&!9CO#luD%pa8ho%sb;J9>H^xeNA9$dd$&G(pznI+CU`q86=-#M?S)`O2?xV3S7@W$?nH$MxBo)Z&e^cseg(ynq{iN2LqmOEV)X4o@o;QEIN8~d~+JeD!*G$Wg zm6HR5(;tw)0bf{ttf9I;X--TWGkF_eYxJ{Vvs{;X3A2IpO)q zJr7FqnC>nmoz2w1MhM-5)2~l@r>p94+(1ge%NmXk13vYiwUsw?U)3X7a!XWxkbFD% zk>P(E_-@<9ng#TCc9W7-<3DtQ0aNHfBO|xcwr>1Zn_5WFJ7?w3VPBlT0RI4JeRJTi zirL~aTN1;GHEXC}Fy75s8&E!k540FnORUOu(+&b9HGKqYQh zr##mmul!s$ZpyL@7Cq}YRh9HMbaXwN#2*#zH96R=;l_EvJlB_eNAc{J8{?L8bH^=Q z)LuK?Y|PSi9Q@qZpZrAevE17xIXvffXs;<)x>UX6p}g?~7&&fFT%6{pPvRJXI3VEl zuQPuiND18hgVZ%hzA=tu-X!RHXRUHMw0cp)Ji5TH!%$Iw&Y@hlhsAIMjnf5KQ; zU`viT0OFDl7ZBe(-ZfNj9@*o}DYW@pCY-7cfegI0XLyy_M_f$o(tQ%W5~wj~_Or$;kbGL*!47 znrlnr2Z^TZv|zNiEL;T(OgrJ;UL?v!eLu#G0E)bEIlFjAnaK?)H}?#u=mu0gfd3O6@WEaw|w0 zNK3PyqI?PaJNU9)2G7Kr?~Y>fE#tHh_={DFM8%0AA>LtYn8_PSiCL!FyNsStAlJ=5 zvQLI?d^M`c<4b=BUd^mCtbS#XmIJf_iDZazG)Dt)QsG!+2hP_0D*ch}^#1@7JQ|jz zd!h$2o4K!iIpUPJbGej=o&|N3xXA^iMBmo~C2Q$0ST$Ls&jS}-)4Y+n@IOHDPl9|u zGxl8Kn$1 zvlPZp9ZK%t_T>6)?wO&(WY)jfuoT)uNhE5;lpN#K^MW`&we3{PaMHA9ozEhkWr&5` zEPif$1^)noUh6vV#j78R{{RzyD{65zm##^yjWvA9nrTu+5D?+Q1@h#ZdN6Ilf}Di~ zzO2{Sll$yh9Wpv>=?(e3-?BP4Cl3ak;BuYK|%`1#*F8Q#ktRO z*E|P*sm)@gq}t*oW3oaJpO-l}9mY=`>%H*sA~c=Rl!eA$g2Oq<$?IHKgyOccw1(Ki z;yJb#cX5@rBcy8xWgoK zYDtk>Rb%7N_$Y_$5PmsNCy%XkTU|*;gw|(IOMFTlCuxy`?#9BBOBoo*FS~#gFUNng z*X;T6C*aq|yKCKE;^DN5T{alvlKSc8M@DjD`9k4QLn5|FKPe*%Bw_ymV(w*mEf(51 znUsu>!;0f|&x9Tv@gMKsODfxe-k=f)?l&F}UY$2L_nSP}niMfE)ATNks?kf6Q{xYb zW8)XZzYa&E>RPP&UXgif%PoYqQpF)0sYRX{89{A@(IhHP4t9`MzD@YM`zY96c%16K z3)9*`BqtS#^wWh z7cnye8bsLQ74Ytpqw057z8SnXw(*9#y8t8+sS1rMAln-Xu1C4yQ4^E13`~LvolMH29&z4)C9sdBqJT!Z+416{5ev^8PFK+xpaPi%Faxk4% zI8ll;iln9$lzgBsmQ;M9N%n6Z%X{|KO7nc-n4IOsV2abK8c zuyD*IkG9I+w8F)ro}mTVoh{%CyxKxvbesp0LV5f^tUW?oyLqlb>@Iw^#(q+YymS5H z=kcwXFW%beASlr7lHDd&U<-WA-TDukAB9oX;J3Qd^J6=C#vonUK^SSAeuxL@UM-V% zJ!N&dk+1lPwAp8m%51By>~X(jBbd-Sa%WfEK*jk{e+DWZualaR;&Esi{n zarHmStLl2hp-~{1h}~0j7Qn}ne@p{h^!np5Mz9DbCS-XIf16*2(% zUk8>|w!=5;Nl7#%O3vo;>#}{AZ6#6U$Zgy(Fn=uIR2N<>w6uo$>i*+wTO{5ajtptW zeLekao`&Xq%Bd)LohU+(NM{&O?&ejZp35 zvHn;ek7~PXeQ6@P%NSLVV@zL9{!%cl_kct=UlT0mOOA9a8RSC;D3;(H_Egn zMaplnFqY!tZz*ELQI)~RTsBYFr{kK}gGRS_)HK0P2^$7LALCf|`mnQBO^RMY&Q3!A z0Ea>J&05p1C!Lw%YyvT`@sCsNYF?=qDE|N`?Mp+pLLh;}hn4|P0D7NV#?de1;zUdvd7F+$KjB33c{icePdHmNdYnEqMHp#E=T_kH zS}+ghkDx#Odh!;V#EJt$X)*RHO)BcXDus6aieLNS)^o{cB&=)wvrTHxXp6=Xk%YM0 z>Orr6)#po}3hDPy0MOo9TTX=cEQ;UzKj~i$!{Sd9206EOBdEoFq40i9L&krzH->yh zV^p3iKe612n{L*MM~q+`mia<-7~-X6^)Qj0pGwG0c^}*+Zjt@@o-j%9p@}eZZV&5yj-779fVT|$XUa|3ZZ5Kt<2B6+1<=1DC z4VZPEPdgbUbPS{r2*LZkGAo(WJT+^r;`hRT5By%aqS>aJ@f#?@g;kM= zUJnbw!!9_%$>Xhc&Eq{Y>}FYBClUgH22xLMGoQ=SM0w z;D?OmSwZXQeY;gT;@0fJAKFTYAuc0AyNTQ~GDiciQcif{zUonJ-J{|?9gRO3c!ti~ zQ$)!NAYkb6k3*73#sIDt;kStN-CM?*pNS^Cgv^o%Eke3*zc1bc(C!Dm6nYAe;}3^5 zpBVfi)Z0QgHaF2)%OH3nQ77Cv$0O!FaC-DP1cUKc!r%BPx5GaL>JsU{8~kgfcza)p z`7Jdh)EvZuOl=OKfWorjvN&9jaf;x@<6(xY7}R!5`mP+w<(f`@%T~8PPxQNpp|>7d zsE97y7V4)11oz;#=UonqtjQtsOsp7!rP=e_sp+0^&3GSyemZDcu7huUx{MM>JWC`} zMIy9rr=zLP(x-w5BoIO9L9Ltr0FV9#&@|hPOIYz%oo8=rZqr=Nd3zjD!x+hF9h`+l z7&&8rG1iII!%8Y{OH0DRVrkxN3R6v2-tdB;;zsCOC!BG|TFNyk%{NWm9(HAvSIgDwD>K-m zSQL`z81xxED)6*n0?z?Kh2(9?`c*fMXx}lj5;30OR;HJ0bt{O_g2ZG7{c(<9B;fP(;Qo28Q^Y!4Wf>xQ7!|>eV$N6S11SUBk?d>H zr5L(=uVc@tbyJ;gesK7|!CE)NJ!ixk4xiw=JsJZbdnJU;btjhOZZ9;ke)034l)L<( z5aXqJ&CTmv>QcCnJZkqBQs5Qn7BvIcJplEuvwv$X3Jc2zw6gH_v8Y~<3~}i?6Wp+M1H2Zw7JOY7im7TGf68G9n&*Ak;2BkKkuVix{kSVVPGRkq!K%#oHsy`GZXrO zn(FSn38luy)^Cb_8VBs8&bBLebs_T~92A3VbpH20t!7_%AL0CWQR(vduf$hoX_;ed zZwn*|`^QuyJhvah10TE5Ptv|k;Ooh4W8ID-lht4DD||81=F#*~tUM8Um)C!Ej&>w? zKTMKw^r-dAmVzkdp6liErzKBNz~>x(71U~%zYIJvrQ0RFp0_-3Z*cb-&AdP^4hGeQ zvw?s}IIc5P@h8KqG!Nk0spV@_i45A~oELCN`D0Pgj^>lY#yUAA_lLEUdPTDWdzo!6 z5X%}n2ZdP}WMvm|8T9E@wVTU$tm0|+7Hea)b_L{KPx>-Ir+K_bH`D|aXvBlo8e7fSgtJmA1%5-V@UkHMZbBpXPxRn`GC*Xp;Jj) zoqj*$Y0Uosm5;jA9^IvyV!O7=thE9-ta5d8Mw>Kz0JeV_?tEx9MEJh`dK(45A@@C!9i3o5_sc*f!Ll;t_5d7dvS5Pb(AV1 zl0j~ubOZ3K169+@3zl|v1cgNO?0@?I08`eM+9PgXBSJXQVQZOUULzX=oP6G&UYYvi z-k%<_BFL1kS8qX_@xc5CPsXxNxfGG@k`_`9a#tf5z{jsV{v_2|H7Mqk$!_dAsbkcS z{{USYEneeM<~Cx~o6V9*G6`-JV;HAeYEk{5tp5N$a!~i(#t+u84y(Adtinc7ftAQU z$JV8W?c|6_xO~mY=snF&vTUUUZ>{7oTy9817h~=@tt8Y!WFgz?i_jmfb2jtbNZ%kK zTZfPz<;`hWIwX6U^StK`#(z47oOWR1Z&^gzoNc&aw`87AA4=1T6^xC$M_>u${c8@w z{gOi-6*n&jJwN*OJeNVEGOfWO2*}-#O-C}mr1HC)3woIS^L^vBBOL*&D{T$Ldm#rs zSnmE+&sto`E*yZso)4vTz8saLgvR8AbReHkdS2sYILaupe`oz3V%u$F58m#{S z0Q6`^KFW=Q$P3T%uTp!^gL@#xdgrZHy1$J_B0tW)+1IM~K1-G;^CtKiqJ=x`=dVz{+fX+g?s2u8zeSmOppV}KD2Yb+XMQL1V;U>tnb*6Is?z?+~y zOyaetTM=vC!j(E*jygYuJ{^d&IgN4JI251H82szjJ{x!^S=0U(UfSupOWWJQcpe+g zQIguhEol5-;@0dt+qK6CX{oGDF9MtC(8s@@9d{gd$KhiT!j2&Rpv`2PURg$kQi zySPcDJd%144_t5uy;Ie#beT0NuOacm$9WV4vI%1zXB{z;e;_#TUoHF{_{pMp^G*KB zZ;Fu5WibZb?JZRSC@@gQcCIb+>NC|kaBjrCPBwscNNcGl^k_1XIFF4 z%W*iIvl6RN`JbBKvmcH;U+`<-Og{+xMW~r?ba8QQ0*(xEv$LdQp#gJ%I}QdZ-VXR@ z;y>FPQ5n>A}ZL zSI$c=ty2+BGk)xU73O~HLy2d>0H2rs+C}^C11y z#D&OV!43!0AoRhm_QUOZp#sM6Bw?~lQsZ|#g1O_^iuy%|g*sfYT&Vm%GtDtsjxp7* zHz($F$3wZ6LoJKM!#};;4+GoYoHV^&+TrDRl1ND94tV^l8Vw&`-@QCZENX*@k0A0m zIQFWxYvYYFSDN=;R+2>_WRX#_a&Slt4l$1X>y{Lx<+^_(k<^Ol&Wt{^-!obP5EuXu z>OP>1{uRx5t4G%_Co`WD*h3KG%Qo%l+^YgV4xW`$`&Y#ZB$Haz-~bY1duC#D{_A8n z&=K{oJNTjE3uvsR)Nj5o>Dpuk9a!IkXtFkNHnVZR+cnb)a*EXEoZ(VV$nf9WcR|*4 zUb6NEZ-7uJT+}?q-mZ3@PgbJeA$ou zBXL|Hc^E3N7dy)L>5eh)Ud{0r_J`BFKYo^a=Zt(eaT2M9%HvkF)*UW)k(M83jiHnQ z{86#!LFrs}tMS+2MzDt0#5Ugz?$qV*-X7HvxWcIn_D;a(xUYlG)f`4K@>b+)zTcVk zxlCtE45KPs$l$fVj~*zE_6-li4;Yck+ZX|UQ64e4o-yyiU<2 zd?XW3iIh8dQq5e+vw6b^!5Q911biSQIEjbDHiw%2O8+rW{a6$C;d#S2Yv-`9ya*1@TX6T zNp+tBn|5|*Ev9QSBVhpx#KKJT(0bQr<8OeM_xIyQ_(}28z+NJ~m*%#N_>$HZl&IQ) zQ6!j_8R&Lrjw`^vFaFAZwNHe@-gw{O{yUSHYu4LC>`Zsl4?SRa|V{{UCg zso7{=B@HH_EzP#O6lItl7VJ&51}x0c5VIgGah5t1tjX@C8wG}x?I%%G1ghnCBa z;~}xM4%w>>t;MDd@w$tJaplUAACqrQx#XPZn&GKdlohJMuT4J&dT_zisiiq5_?-^5 z;(=h&KvCA>MJmchJiM@GC;hB}*SB9o$=^=bF3&8??9G;Pcr~Q{Gr89gCyMRX{c(eK zBr)1^Vs_avqk9F$6f0vjnKrKF8R9@of;Se(K9x#qx!FSAIRhgKI#fl? zyWHuOD79l+JNv8HOgq6++1axPk5f!Z#iX8cADj#U{(Dw?>d9>o@)IoF2G2EK^8Wf# ze5MS{0RI3!)uNP})Q3%)*H^|!9#eCjqibMrDy_7#PQn;uV1Rtjw18@qE}?ea%5vP5 z$9k4cTHZ8UXuwqq^8wuYeQSSpM^U#Yudu>MHn|YsV zl0xKjTzK&mmn1o3@|^kuS`q6)OJ&0bW65H1SxV@&Nh79hM0~2uPd{~AlUJbCB91RD zUki>w>s)odrEm`MFDug(ZrbMJ;$JkrSPn}bD>=SZc2}~E&d%Rje3GOp1Nv3V9~RqN zN)6S>*xxA|xW#ey^Td&6=4Nq&lr7)Qv z2RIy7ZQKDS++`b2=}Rt|uSXNSRwZAOpsznnX0fLK0EC*_J@+wU&NP7BISuvKZhTsS-aG)C6FuKh}k$~VSs*_tc_~w;y?n8 z)08DB=ubLY7aY3NV$0O57}JGJb%1)vKqm(wtmsck$d#yo)$w!l@md*P-u(IrkN! zjHMoFvqvQrqBisd)pa?T>2`&LKQyz+fRTH#I8_}zPfFj@^3XYe7$!SgXoPUWJrxHXd-W%hNhYmnR$A7l zYMO+y#b;*DRc~&jDH9MrV4QB1heEN2E1p26t3~TgU(Jx|b6v^X$n;-{J~O`5^cTGF zq!w1z_tBSVV}JlKFhc>?2cCNq$9nnh;``$dkF|MxH*v3clTm`+a6kyXzy;WSwDsnL94p!YC5Nf<*?FT z*Ta4taACYBa{Z-Ol4Oxr%)5QqCM6zPk;E{AUQ`c*d@-rZs@nKiYRh2}n&(p2V@Vc4 zxCM#}Z<;K~#7DYcv|$OAQZ`%qVd3jf66l^G z)2FwRBeL$&buq{{6d=2^8Au@aDh_xZ2jY$PqvFqu9yvO%jdi<;$OdU&cG&G88&pRM z1GW_eq-d%Fg6h5UewF>3zi7Q5;T@%wuZgYWmK*mU*t%+h$H>o=lI%PrLt{r092p2{ z80?8wey2ZX+@g$VzI3Bev-JnUzAMs>qS|bJBt*28fQnBju^BvVZb-+lHR=8t@rH$E z;q&43)I|2)c`qxHWOf*4B$I^z4a8yC_dNjkXTyIVbW8g)EvBH1Bb^MwGnZy;l>nTB z_f9d?0^PG(Q-0Puji~bMY+qz-A%&zFTyET4laj}9Qh4mW`K@VYFr)EkcRa7_5z*ZG z=i;Y|blWWw=fql?!wgVmGx$|-x`+0h^2HNPEL)V4#K6Ds^-I9MF15YW+g6WINgq@fNBu?XGD z91a+?Y4deoGaQ0z+4OJQFTjSzEhEGUHLFOY$&>8t!AZdw$Eb*N!vYGCk-!Hd1dXMOp&00R&jDI9vqzwK)AoeZ zJUu*~8@SeQC5%P7D`wd&(gFM`#fv%g!xDWf=kJQY@K7&?9vIdyJUMf1tIQ;cqttEg z)?ipNjDZ}oEP#+m1!jwB!NVvu;=dYyYM%rASJkh)6XScUn@AB1c3QQv{`XBg)e6c8 z-z0e_A%nD7By8HvUM1s=Pse`^jG9l3Z|$|65UV^IZmQz$)Tkj$usC*-JRBpmE**F6 zS0T~Q$7Y;CSw*Zn(LGvQdY@I5WHe;cteyHR>Hh!@eFUGikA`(Q@1%#p?XCEBM`@-_ z7UNDbTaDRgp4U**?p%Z!R+ctc@>nbPsIC4be#^c*o>}xcZ1mfbvB_zsYJLrpG{{l{ zr21X@7E$-M`6aM;54Vq#>~)_AU)?UF;7<`tqiT@0_-!w5Fs{ILg6rimUFro=gQxYf$!*1nQ-79=`T+Y9JU9}~O7;Xf7V&vhVRS3>c!1(7q!7MjhyoWbCEvO(Pz`+z^#=2Be-Zai&kB6 zH%BR*0yW_rY;ZbEFn{0R5x536bPtG{@582fw4WVbX>H}5*7lcfp~v@GMnN9TE6J^k z!Bl^|rSp#K?>i{rF)@wlPVciYHU9t&YMNE^SnHZa#ht+!XfCaaF;o7@9S?6_^{1r# zK=Jp(`yp>><7>-kq)_i^G`?zW&)is~%4GWBR-FF;wQiMpU^IUi{65q5TNGfyqlqmY z4^mMh1|=l*aDmTf2b$(}yFUZ?%GzB^;ct%Gd%(H>0EBzQ8(_I0a26Su`EwukS(Uis zcR0tIq&@5vM*N#;{JJyHjutc18&8#Y-5uio)}AZZ^s}S*);KQU$Pp%~W}1uvIly$B zGpHHJXrxfRkk={VDgF>^kQ=>2!2bZ*DCGYD#K*%|&ep8Qvn-puq0cH)K9%IYAl0w@ zKdH^7YnE3Q_g3y!TT7^(H$$J|RsfG|fDdZRUx`a{vv2Lnbt*m-jLYucCH;KeC0uSzE zE0ffr);uYAB-A64+D!i71w8)%x^u|m)aT!~F?Hj#m0M8o#oJlW7$$o-op z{{XSuwnaAI_JY)Ht&NtZ$#rhvWNlK7$8{j9eMrxt7112FHRW|4R%0x5>8m+&CHJFp zZBEl$nEkBBXL7@Fn6462{?Pstk~#eA8(6)b)^?6*Hn=APcTwzpDuvgH{1xJ4GU+;! z>Gv+UXr*K2NPRKOFXqG8RyL#JmeEjH>-KJ8V~B1;0s;R3Wx8YAt!@34Sv^(!(F-)g zwWGJGu=nOGQXb?6Zot#6bwO;ZqE=@kpf$r@_|X<(QhmtWqn>j~YvZdMX$whkPDdni z2l!C3+Sd4W7xt9~{v*7TQF#~%0R(f6)hNA=Hq5(uCzH?lHRsa&M6->Lmdbq%Bm7Hc zQs!vD#{)dnEUyzUvTGdE3bndD0WTv#xl$lo7O*0QO~;@hb+0y?;)Stqn)Pry zGyeef>b(B|6*QgOe%LeH3^@98TSq6v{{ShRv&)BJ|)B2(fp5oZy9c2662`gZr}|q~FP(rZ=Ayd_|HZo=Hm&H)NZ(;Bq>jTEUOSIxd}Lkm|Y@h}ETx>_}PiIOsxy&0}8p zzd?c-Wbp2dsO~2$bqiTti0+}b3=W)N)J9NF{HLEd%}3%kF7>Ti>K8hnhb~M=-@BFE zbN)Rm16lDuh%Ve)*!Wp?1SH87z~Fk3o+~a zD~r!E;_BKcW6I8naHW-q<0O6GTwtGSZ2GKoTnps1y1J2$9^NZv*gcw0m5zM?_02#m zZ*`lYaC|24M2-S9p@PTIe5OGS+_hLKp8 zanq{0aS5%o*`rqp2?2!HDqHRmg0VIODGd8ifb{im8{R8O+V#|$)y&RJ z-ezQl3Gbe-+l~;9Fv6-h%yR}K=GECc=LvZZHi^}J7O4%gkv&ko^B*PcN z_zk-?zPMDcAiP^a{#pE~O0@*ii8G^GQM);B7~ATa+nrWNmD|hO4AJchpncUC0aSKT zg#&;90=(`*5%Doj;? z&r&n}2&_*P_&>thg|s&MuAdg7w$|n)k*%R}PnhkUzyf~%0Ihdn>c%Ze%Tvq5LRV{- zL&ts`crpn--)X5^Ngj}v-)-?F)w_9ewx0sIHmLno?ilR@JnRlZSeb?{g%^qZUE@2? zi`uO4K-TjIx09G)j^MH+5JGXBM5S3`OqGtHHsQ@YhkE*6qaC z;?6@Dl4Zb;jqC`G11RH_4EkZP+SX`{q58`exPH=b|6NNx< z%_ftLE!2^&9}!xLYn7j_Uk-Gi1KsKISz4?S$#ritTiY9aC&-T>kgJTL+8w}Qy!?T% zUklhl;O_{^Z(_F6p}C6Q?Uo3b0JjQek2^9rT&PuUG9*0?0Q~jvZ|$|>-w$ZQ;`hV4 zi`+#H&{E1%=Qozf#rP!eCv$htIq8inU-&4ej5LoGY5LvAhBbL@=Z@fAD6^Gzqq<|A zrwn|E-#xs;>CW-Q!<8h_w>^03u6^_2kApr4)~sD^v}QxR7hfzX#^nT@4KDY}xehj6MzMeh`4cYZs6bK+74IA;?0YsrANqIPK3$`M>t1 z_?M#iLqXK^Tg^yZ*|Md~GDh2;IZ(gMK_G%gG8l8VMavLD|8&g?gWp za_I5A&~4>L2;A6Fzz6-7;AKLn`S;>Cf_^o4vtGZr*Y&G8(*DZwJx1OOmNHM~#CFD@ zCfuV+?j>f!G_qj2;j1e6tUTS5v66-|$;n)!w$Ow1&sd(&3g#)=f-^VwIe-#Eh&92;q(d$?8>rJ9e64 z{2%zsr%vyo&EX3uq>;9ZT^2j3vAZA#nH=XMkDHFw;!YR}a&9w9=cO9B`mnV~Hh113 zyI&3Xo=q=Lnq5y%Xp-kf)a~PO1?`l8v=<6n18XQb+(A}XL-Qj6(AfNa@QgMv=-&~w zST1d?;|Xsir0UmJLnMH%cyt}&U{{vgu(4|M4;Oqg_~YZRhj!WrjQm*F7dmv;GHJ71 z-AeBiOkqAwJBKYRaxuO&04kOT&l~ni@vEx6--@F7`Y94*k6Z=LJ^E*kqP)7fc315$ z8oa6W-1I5qG43mxlau?8r95HrqSsK<;Y~lpDRyOz*TeeFiw0Iezx!Q7}hUGhfKu@}*BSy5H=;`nB`xHn&m;=1|kaaKAFM76g(82yAW# z)Sd-b)G`g+n>YIYe3hY1BD4O{cC+&>?4+Jp2F4GjbDUHU`$EBuk9ZYMNr^}c_=>%y z{>fTDIR`KV>fkLh66vCbyV4{{WsrKc8BOM~CSzcS@GH#J<&RBfr*XK*`JO#cf>g_k31-$#!vI?dMKA`5E zs(8Cw)h$1^bp1L&{$|^_yuOTci2xk|>Fh_i74CQc00;ah3&`)Gz$M-S7@OrI`@{~1 z+;^$8?*RB_+WIw%L9%Vp80JZM5`&!NvG*MMs6DHA=9zp|l{21Mc4a4bRQb`huNT}M z(dkmQP6rto`gE;5FT}qVt)n*9P&tA@hqabJGB+NDPC@>j)$eWa_ri9{MlG-#AjUV4 z2Sq$%KSNF)6!7XNn>LXog+>5q)dqTwjh^`F+NEbyV7=OC9#39w@~Y1ei^F~>@eQzr zbcyuod&v}KpkwQTa;NF}RcU?~d_#@)Pj<)%`B*Lk59?m{X{6eV-M0EQyazmol(K{C z&N!_%(KS04Up7@p*ko-8po7$d&u(ieXLC;KDI3Eii{4GM=X-C29wMDpu6#tFyy${X ze=JtTzwG6#-a?XGYib080D0Lb@CLn-PY)!~m${tCs^7a;aM;P@WAgm!<)4HS{bNY1 zLk!~)!~s8pWBJwTs%k1y-@u^o=(1Nk#p&%u$SaqS=8tJ%Gm*?sCm;kCx}A z_GX5qsQAlHx!-wrG%Ykhv`^;9-FjpW2joU-zx*V+wZvg=q?SV`1VeDltM9@3_NALt z()9@1TYXmFbA99%YR7M-53AehD$3W39^f$ERx-YrrD-_bhddFU()g!&M4sX`7$eSk z2Lq-FI3l#HHGdFV#<5Rp=dON9BWB42bR#$dsY$5GtQSp}pih9!95dvvH|@hUFw zGU15F%aRVA(VaFXYSIMS^eBO3C0Uh0@;O?>p zZga>3*wqbB#ZcQHvk2QEc~a+Z<*LP{SY^!qhc2caX^{?%H;hS|UCXOWn02?Hq0dK&7Xgywce6>%C}qAf2$I-G2RHCa`178f}`Uz^sg zcw@m{AU6`q`h1>K0;Dud0Uh}yiq+CQ6{!Fk?@7D1$sCJi&*nKb+33Cw*Q3+gXzy-B zBceU3=Dj+2S=#nF9aueXa~6IFnWR$I9$w*stgH$AxIc|_c0UYaoFuzufCUQTHOjd4 zIp@;1G))FuxpQ=91~M|w85_S^>a?u_%@nAPT$7c`=lRuB!cEAvI9qa$hWrI#Z4^m& zc(8)QY0eIL>OlI|wV#0Oqt!mt*ZYz%L1^wXo<=y$X~dTC1xU2W*p4#MX0Pc@Jk0jv zMTn_o&e(v-;AXdt1r=nCl_eHg^Rd<6YjF;g^}b(|6_kxZRFF8t$Wmr!{*VII7U1 zv}c@8;b@|Z?C|(@DC6CckI>-%06O#E9DFd6dzmk89#}krPQ+cuZ&87eFb-?!nEVrG zsDTC6pn-|uz~GNgwa)m@;cdM7ds>ZPhTY2u_Yyrml~r=6)Q$O1J{g3SwLW0K@J*`R z#F1RY0>!#kk_BO&Pw*Uf_3mpTUk&N;x?IlcLpjU+pC6A0IQ(ncJWKFcr0pcS#C~4i zmIQ^AAIS4uo~Pj0?Yzq?i-;ybFnrk975Zkp3{FoDX4*Y^8IpP?aMzX^Rn|la6k(Su z{*2(cQSwDf~^Ml!5MEizUZgQWQrr&rs zT$U2qExd3-SCd-+;_kmQ(xr@&jgvVB@Xmo4Xp>A17-w{F6;YpDwPora7}3KPbVZGR z@?#*Md>ZTy-h)3-gVrGE@ZXag7-n{nf?toh@+h_n$~$QRU%;)Ie~F!sF9Lz%eIw15K^k(Ef_ z8#6D>j(hX`{{RZFHJ^r-PdCfjSh^nQ?y+{R|vPrl9K|Z_+vULkaa4%%@LVz(N0m$jj(Z)LZoK{V?l|J2q5WGT&pdbVD(j;07v3+nw~oiEWePG@S}jUCpl$@z&UB7kYlDE zuG{D~FZ<6TUb#6uR82LiWkyLYO*>dDV{a}yrdHd@Gp$>K4JoDah3R4xg3UVP{A!J# zgRbWudPpNh=v}d&!n9YzmiD_(+RJB_81Ly==$@nJP1zQrip9dT7f!?=+(g45`eV|x zY_#|$kIze{LNmKEf^qp(Ya4rV;wTiH{{VQ_?v-h150xlVSak?9QsqyY$rB|jT=~|2 z2>9maU$Y3`Xvi!_@}-Z#em1v3ZnlA=Amp6nS6y-Ozr*2!haP4+l>&%_xuQf=+)5%1*3RtZ4;ib4}sDh!zAzB%pNp;wJ{SG+jT&aYN=@ zYM4{c&6;dq5N*8AA}Mz%7#RYz?EFUr*5C<%{^&lHjYlh8qqdB!OBR)?-a;B(U=G|b zIi$JM^>`3N9;z7!`ybAb_=?yr=3$Z36$^Nh#C+xR_*Xm>8#?Mrvs)6{!+LHNrMe-y z02hPxu8T+$nSh??o<}$tu5#1F781&&Ff2y^8tAm??aW?abGfT;aOIS$HK`|vNKD5S z?eB)SXVn&eT1<>!Z>y?KL?FNZC}bHtyoM?GxcAjpai!wZ>QgTWPMB zL-<+a4IM+vB*31#!Nxsn*QTmlRC$h6xvg`ewT|1#DGYmY#^&i)?UunU_DI3p4odf| z5#g^9sWF>|9^*A`%fs5VCoeX0#%hvkTOBm2rdhtaXvk|pAaCLo4~J*hc-~fc zQq`qt;j8&%z$LN40A{XeegxH;@qk7LcfNh8dnVdw^&wu=wLKrf=E~icS+AV@yrQu@ z;=8R6`yOR6Mv_R3KIo$lLpd>dth&isp+rvI3c>%M^@z@?~*2Bh1J0Crc zoE*10yA3J=s_{b-DCv=!HGL-9=EfnJ(D%F zki;G`bJNngxnC}*@#R&ySxCs%ye$a1GQI1O(mr|*TH*X_;Hzs{jOq)P9I*a%YhToU zC0eT6EG_bV<{;N6tZP3IuGyowMk+JcHRR&pn)pvc3stFWBc0atg|xb6jmY-RR~Hki zEN1wkr$u8~-YC2AR7}AlBZ5v_oYywZ;tAX}!@18qX1-4sO44W3P_24toqXDo+XE!G z`3IbOR%WB(t4RT0IFpQI8p65$)7h}2wnyF>;<9cnHCGsAan5>muQGHYb8l}Y=yX>; zFOLA2nSfFW2C}ZaVPO{GBuqVdu6I(>HI{wRT)KjK1B3Y1Mc$dO$TqakgXx;*ofuoW zy*jeq=UUz))246TH~~g^?^VXF1;Ys!0fFb`9M>N{pRJ$@t0x^#tyPlS#PK<3rIAK+ zjMgg+9_K`ACDiYl>eb?A@|CmGAaPl)7B$|uZ14dU#L0i+_?fmloF0Ly88wd;Z31B+ z8R`kBczHLt;WV^68RAJ;O3^3)k4md~J+PZ;h;{z$b24jQA~5XSU2!@60uWnfxFV1aW;z_W;M5L;~6R|0rjm?Pnlsk9*SwP>bK>?Nf9I* z^T4V04I^mOi)TY^e`E;)(42HTDrohqb$yR)RG#GqD-b4P|~6cJAv;@mMZfUG(U zW`{}^)rT~jxwQI?nvf#54fIS^nIIYbwA%|qM0PF>G+J2vQ)9z$FM_TCYEbjJ$x1jZ?bko+>V|8O4&x9hH zc^_+BG4$sctC9Vn5EfkHk(Q|7vI1NjjB;^T?d@%p?sLv~CcfTv_0ahxMPFB97~;I0 z33qMVxxuCTBN>QdY>lK|lH&ovO#9dq;jYm%g|_>3JXSzk$9=5L`O@p2 zDGSC%2VcsvM~c8~gPe2}=&H?ZV>bk}JFg9Cle~smW*9i?YtnohuUKeuqfAtsk8xfv zqIjxln=p<^9ZyQ?bi;joRJ54g*k-+KKh_}ANZ_oYC96K2@Xy9Q0xMA)BuA$w`Btu_ z@pHqs7W-B(f9sm(72qBk(yz8EnNK60Yid6YY7#c|oRE8N&3m+J*KpMyJxVx!YuxmO z@y3~TF=%msd#!0$>b7@Mf)D}M7_TyqPt+_|CPGemt*ry?zi3d>Kc)t2sxzIoGK6U< zd`G!>GfmU)V^AYgo}~4z^F#1;kdPRx)Dk+L_2yp;4a_d0Nf=}fwd~#(nio_h*pB4a zsX~_Hv|?M~zmmwsto>L*hFFWX2Rh^5EqLMS0G%;@3-pBm1YOSBz1sYZomG z1&C*-IW^|~G1o4wCT0v52R#U`TGgsq=oIL|%E_H(r{ZfiLSzU|4?Anl{9od0C7sM9 z6337QbNb(puh)mT~5*JYv-|enzq#*wG1>=jV)U0-%*@_9!Sqo zS<~5uLRtfX-#`6op>3$$MK;%zmg-0qq?azsj4}1EiOj0jjaJX9!qcZ3O2r8*fNzm8 zkyY+A)3+8j9+(_Z@7b87= zDbe1jU9uoMM;J6(WjVzyj5&N66MVmA9Muc&0>*x07jA)TtU7=Pb5$GdS0jZa(aAPA zR7WFq;K*Sc)iA!BYc}`8XuyPAA9b$2Ti1}0j-#QhoAWkFUnTB?*eT71P(IMJYb5`lp?p$l9EsSN*B6TBXPZ&Ow!Qu4|Ndp6^ zt!W~V9F_cqK`!t}T=ev;`DD!_S5k=5Em!8r$oHwCvA5lmE;H(Al^nMRzaUh)eXwap z;m5s8R(3j@ijB!(wTLp6Vrxdq)+9TbN6^&P_NdFT6cz(H6+NDtG>jP=KIC&sQMr<= zx+3+O!v6q#b@Z$7gAXf>y-6aWw9?%GEI=N(;;kfXgKG2i6+EqM$xa-_n<-@82bKrD zZ)q%K!Ew`}tX)1Sob5Zm5OMsgb5AW6(YSl#y$MR>)O78k@R#;I+4;hdNa!d!MeYkJ zCl%7hA$HoMkHdAF0_PNZ*r{22+W;i~Tv!nQaSqAAhA95=67BL3^#(5Rc z={9jBKxdA)$4cX^URKoZ!ohQDVx0;|*X2@u#aEw3a(WOw>#ulYWzPp4FeJhS z)okRHBe~}uDblW^mmsn2pTe=N0OH9q}rvDSe<1PAkg1DWph<2JCdjcREg$bkTrhIKc1Py(%+F1$tEW zN2F<`2W^lC8Qc^M{*}Xcui<=Jbc`85!8{IfYw1mMRI*gq z;Z$wpab8{HyUjhfx~L=)NjR??7eb1S(dprc|kRYCjX2 zp?4`Gabmr)JJmljO~f!gIOe>MV;i1=oSA-W&pWGNdJ3&Rp@Fe^BdN_*X?{`(JG1N3 ztIk*wbDn)`DN})QleWc%)C)HUAB`|w0rPk4dy1wOLfI-v$E`S_W$Co~Zm8ypMe?>- zX^RZ*IjZvz-!r#4rWKh70FmpOhTzJs#ez!#<)c_EVoR<3!HxsJk=() zW-jg78uqOU;Yk?HXEng)mz$9jTRaNjGQodBgfG+cBvpt)3(1I9mE+tRP&bA})cbj@+%_GCK%HFHkY zC5}LPJ)G+$p-=$8<0smtaZzS7l1RkY zfp*3_@@p>IKbwLHE%{caxQ@~ir-9z6S}H5>diSo1F5^EUQ$`{AfRGMw9D{;p@ zhO?y9q<7ChiQv_ZGX2D(z7II&wn;HU{Kug9PflqWQBN5e$gcZE(<4#^!sLB;t|P!x zh9e-6#d$=sL1yrKPMfugVAER+jm#h*;DTy*=s(6$E9J{#264iol4@ zN#NIfbUd2P%W}+~B%UNH=s@l}3eCUqC}7MO915wc$$a@mj2s_M)z9ipa6!uu4srm- zD>=#7!Z1!ne-wC}Iuk(qcQx`WcW%cC&19;#7XFnNm5bN!+Qcr zJx8@S>?k=%fW>P`Z4!@^{c+x_$r_+LQzz?Lb6m8HxomcWoM8HPrmd`CVE!4c0}IGD z`DdDrOCmaqW7e}$ZR%Ckii-pVPUHg}@l2A{TaPSb*j1o`PfB21oM6?Yh0NJP>c%!4 zFFt~(+}osTGCrcV$HD9v21Z9umTVX>*1P<&66DtC8Eq!sEXtw5@F++mOAho>e4{ zsxeLKWgB9t&T>6z0hS|>P66l*YR_#GAcWMR2D0|T1qtaVft z;Im+`=j&Ykq)E7O=~k5%G#fbPhZN3wj`lBV7b!5tcz;HfqO76qXPB#@u^0s;& zo{+3zAXnyyzpOg$*Y-4{O55RkJBE_lU9 zJ^T%lJv}QlPb)-8Z)&f3ETaTSdRNF}S2aW5p*IK`ZG446RyZ|UYt#oI{cAZSe(X=y zl6EDJJvvvI;*&Z(tePy%c^e(%fH9m3ttO!Yu^{BaF|3%{i~4Bq}H;=~ZJJ z*LxmwLk+($Jm(ZxZCILX`5AsfK>F5ZqjEnsySOY)bIntGr&h{>2dOoud2bn3 zY!QM-6`OF2zh`bmZx)iZhLy+~?m~AL;10%+-pa#iIr`Ld#=DPRwGtpQ5J~A)l3?^@ z$ZuR@cjjwC-tJzCGm6iKBxV`)tM*W#9QLD@B5qARO$%$;8HNzxcdZ*MsTsgg(x_R+ zzcA;Zt*tuB+>!F0YWX5#%_|e>^00Bcrg=E&Te^MRoB%s|RVzC<3j?;dG|NbF_j(FW LDJue#wx|Eu-QcpD literal 0 HcmV?d00001 diff --git a/tests/data/dataset/3.jpeg b/tests/data/dataset/3.jpeg new file mode 100644 index 0000000..e69de29 diff --git a/tests/data/dataset/a/1.JPG b/tests/data/dataset/a/1.JPG new file mode 100644 index 0000000..e3f9cd1 --- /dev/null +++ b/tests/data/dataset/a/1.JPG @@ -0,0 +1 @@ +../../color.jpg \ No newline at end of file diff --git a/tests/data/dataset/ann.json b/tests/data/dataset/ann.json new file mode 100644 index 0000000..a555393 --- /dev/null +++ b/tests/data/dataset/ann.json @@ -0,0 +1,28 @@ +{ + "metainfo": { + "categories": [ + { + "category_name": "first", + "id": 0 + }, + { + "category_name": "second", + "id": 1 + } + ] + }, + "data_list": [ + { + "img_path": "a/1.JPG", + "gt_label": 0 + }, + { + "img_path": "b/2.jpeg", + "gt_label": 1 + }, + { + "img_path": "b/subb/2.jpeg", + "gt_label": 1 + } + ] +} diff --git a/tests/data/dataset/ann.txt b/tests/data/dataset/ann.txt new file mode 100644 index 0000000..f929e87 --- /dev/null +++ b/tests/data/dataset/ann.txt @@ -0,0 +1,3 @@ +a/1.JPG 0 +b/2.jpeg 1 +b/subb/3.jpg 1 diff --git a/tests/data/dataset/ann_without_labels.txt b/tests/data/dataset/ann_without_labels.txt new file mode 100644 index 0000000..ea467ca --- /dev/null +++ b/tests/data/dataset/ann_without_labels.txt @@ -0,0 +1,3 @@ +a/1.JPG +b/2.jpeg +b/subb/3.jpg diff --git a/tests/data/dataset/b/2.jpeg b/tests/data/dataset/b/2.jpeg new file mode 100644 index 0000000..e3f9cd1 --- /dev/null +++ b/tests/data/dataset/b/2.jpeg @@ -0,0 +1 @@ +../../color.jpg \ No newline at end of file diff --git a/tests/data/dataset/b/subb/3.jpg b/tests/data/dataset/b/subb/3.jpg new file mode 100644 index 0000000..f40a58d --- /dev/null +++ b/tests/data/dataset/b/subb/3.jpg @@ -0,0 +1 @@ +../../../color.jpg \ No newline at end of file diff --git a/tests/data/dataset/classes.txt b/tests/data/dataset/classes.txt new file mode 100644 index 0000000..c012a51 --- /dev/null +++ b/tests/data/dataset/classes.txt @@ -0,0 +1,2 @@ +bus +car diff --git a/tests/data/dataset/multi-task.json b/tests/data/dataset/multi-task.json new file mode 100644 index 0000000..bf96384 --- /dev/null +++ b/tests/data/dataset/multi-task.json @@ -0,0 +1,40 @@ +{ + "metainfo": { + "tasks": [ + "gender", + "wear" + ] + }, + "data_list": [ + { + "img_path": "a/1.JPG", + "gt_label": { + "gender": 0 + } + }, + { + "img_path": "b/2.jpeg", + "gt_label": { + "gender": 0, + "wear": [ + 1, + 0, + 1, + 0 + ] + } + }, + { + "img_path": "b/subb/3.jpg", + "gt_label": { + "gender": 1, + "wear": [ + 0, + 1, + 0, + 1 + ] + } + } + ] +} diff --git a/tests/data/dataset/multi_label_ann.json b/tests/data/dataset/multi_label_ann.json new file mode 100644 index 0000000..5cd8a84 --- /dev/null +++ b/tests/data/dataset/multi_label_ann.json @@ -0,0 +1,28 @@ +{ + "metainfo": { + "categories": [ + { + "category_name": "first", + "id": 0 + }, + { + "category_name": "second", + "id": 1 + } + ] + }, + "data_list": [ + { + "img_path": "a/1.JPG", + "gt_label": [0] + }, + { + "img_path": "b/2.jpeg", + "gt_label": [1] + }, + { + "img_path": "b/subb/2.jpeg", + "gt_label": [0, 1] + } + ] +} diff --git a/tests/data/gray.jpg b/tests/data/gray.jpg new file mode 100644 index 0000000000000000000000000000000000000000..94edd7326f2fdcf311c11f4ad6a8edc62a9ac2a5 GIT binary patch literal 39088 zcmX6^30zWX`=y$4swt<%oHp%ivovK|)E1NJYs`u-k^Loj3gfe)?wqyl<^vW9F={X3wz# z_ni-J!1)$D=B!!Zk!FL30k=*Af5&~bX!hd&Y~43!iQlhQYtJp+cIn20uh;#z;)RX> z$IHgVnTh$!a70^sm#kccCDUp*$(3GzN2oV?qIIo6*GP$G}XUMw{$>d@OO$#_k9$l zI1);2m2#VQ^%m0jcBeSI3yPESTWK4-_I7hDI9l0&tLBQ^onx6J(jn=P$M78)7BXpi z(=J+l)|=32!-?xz;4&n2VurF6jf;)YsN++e7;Y}e3jlX!ls#QJ)tc8Ky?k?>^m5*g zTJilyrz?EM)0*t!Y6{0`e8;9chrP0M@6}Dsb7$=P3H-s~wmR(7z0%gKIC){PDbvog zKmTLb)Xx)@Nz~@1lPNlOC)^0>Rc!9{!eF&`Nc{Mh&4JvXrTYDMmMNF^ zWk_>m&&xG~3x}NNjHnvM zVFfxca7k2S^E%l4Uyj82oQ`cWVQ|P`DxtTLHAm{i=1@l*o3!5Q)f9$tUQ%G=r_=ee=CO@EVxsXg@NLY_f}n0=L(7#-tATo ziXVC2aypex{G9S}m0L}5nY#Euew6^?yWO)-rUb}b?T7huQ~^8-F4u3n1*asqrY2}> zd#&4J3wb6|&iYE(-^$b@Jc&!839o?%9zz2YuAVn_K0gaD!fN^`s!^#zzc=)n@aT%6 zTJ|4=?_8=@`{O6r3R!#pe63A6zfg|ef>~K_>o3e?5cRKJ{)N(I{R{tm1 zN`=9Lr^nn9CrixA3kOe3pr?mBN^j)1?$+C1Tcj_NLYWkkkHca5(PxXXA-pjP^P*m$ zw%nm-dak2>2(k@tpjbE>*Wd|htAWqKT4M!%2g)&9eo_&oSsr}8r??SWwrk=NX} zds3En4*zzG9}!iKrKw5`#DWlmdxR+{{Q}%ibWEx=BJ1bYR{p*_IUAm5;&y9Z`)n63 zyOBn3!0Z+9inKHf?!8l0)>La%p6EeA;>3Sxv`|Lu{Nd^$U;lc~;oo(~acLoJfwxQ4zY^AHYoR&NRZj?ChO`7{p&3|DfZE`LrtT4xEFD zRY#5>m)E%^+T8AGT*+k38IqxKc}py~FMG)2*19{|jivVu4^>1S2mMS6C5VkSeV+E( zqI9&>f?ML_9~w%-9Hvr8DekX*@I+4L7ex{>A+z8r%bi{;mw8GkS)uGsuW=#PDeb62 zgKF8A1zUKi1k|pIZnBRT2+>>tK8@MXJW`G%S#UmZEq1_ZaHp#iR4JVPuh5># zQlL5VAk-t1WU9~pAAkW|>JJ+|4eD_Bjoy;ags3i95w|UFhU~p+in3-rZz^`C=ow zOF6rP@u$F+F0^w1kay*EWgX-7+nL*A!tfKl9@t*xu!^17SWij_Db93`;?My8=SaCUnPEH=-V^5=KkX9*g6on{m&sN+qLB2B}GLPOqSSx1>M5ArA* zoDN+YJfNF=6eXT>LW#>&U-#yBk;WS>SI;Pi93Ou!h? zyww*ry4kPR{v~a@{BlNfVQ6YVo^A8Zq=}_UCzK&A1mF+W^JndBw!Er_+6d5*IWFp@ zU!FjgCN^NIVWci|(1L3gNNQodAPf#M7We9ZpIGfLg@%!d&^L%A+{;Dg-7%&!I|(0q zt}%%B@v`=5bE+!a8^_X^hDF@8mPxfJ?62<6(peFh3g~d4@i;6nsqf9wvb$4J- z%>;8dz20}}d}xA|9D2;tf*TQ>vfu=1X-}I{2*Y}q9is5+xrQgo9$r_S$}5A0vIck~ z#@pYXy*+q+t8rig>c(fWQvJHgp-f-*6Ml|K{*C>F#vzeri7NcsqGFb-ZH;*o=!!J{ zLcXZi|BUCuJTLb~eX%Ak#GT7pTfTE?(DCY>I{+Oa_L*9wyHi{ecPO{Z^ZWr>GTKc{ zQCo7tlC2{`^L;|`5qSB1t&SO(}MeLuWdavLmf3L zs6D-qJ|a(HVY-gZ ziRMO-ayV01XQ}_11|?W`UJ^{ zF}e?-PstjigeYDTA~zz@0Fxksd6s#SZEtv>9g@AY;It~hIYAk=!y*QIZ&T8o2d2Yt zbP_itsI>2pt+_H2`mD@onPe|OA9#Ag4;#rkw(*1p<63YMgX}HlDA0OmI>X(YKEy=j z5^C@{<7pb-2P&}z7uxK(JkdlnIkPXDT|C-`osTxr4b-?E=bg`QQaFy-Lz^F+jphe4nmC!^wYEk=ulRw;Cacg8 z7pNVOSJg6aQ$ISh=@>Hs+w)w}HfA*thL|1kSMzPbuBU|tv&>3GcoNHkyZ@)s5NS6# zsGNt{TX3&srlYSdIR16~Bob|mC+M%k`|=t0(rmlM%fKQ&ly}(c)=q=c^>L$2!FYN- z@4~4rr=t``rwCbmu-JlQnVXV-JgxG5mNc2w?@q_5b+z@t~#_RfUGRi<+ZfX?zpSD$Y_=Y7Y&O+m&f!50)I+co~}(yW1@~KeLOORAXp>y z$MH$leA-nh(tcwllif+YNV7G;ouV&3K^eVt0IJZawG-NJyfBq{lW6EJgQEuYhzi!l zFsHDb5AOBqqXCk&62ida62{0LC%gex{(#1Dg-yw=UKZR>0$)=fDZ%%rn~cL9&6q=Y zS}vXgH--klhJ|2TL=rS<(H9L(Zi0+XsDxrRq1#zJ$P^=E-$?6lEmLP-6GxD@&n71O4F{+*sZQzqDA zDCnm0$pF)H)5}iBk2Q1#N+f^Y5K_3Bq}Z-G-wj;-e13KkK|NbrfGwsOGhtS{iQUam z84^Uja{dyXPb<>Xgfauh>ckxP_Tv~>vNZ>Gb#blwgnrth2M-e610;lxF=y6E)}k5( z#1nLij1v2A+J_k6rDR5cH^*KTTbP4L&9!vKNY{Tpo8P4QoOjkf9Fhs`+e3=Yyhbd? zr06_u+CGqAhuov^^t2uPqr<%7%NCpz)B)1R*4IYfQF(0dy_VAB%9fYc;W3)++b!Hs zZULyx*nMN^BTna+ajJQlG7||%i+sP-X1wWv{u;vQhP37=Wg?wygI(ys79RP_(-< zH&@0N`}u&LvIvw*VBV@b*E4U>Jyv8-6W*XRqnkH9EDRE@rN7fXJkl!Pe8Tsz&4uTM zH#z$+77tfmk_&Ja+#h#SZLvnlTEn$MxiJU`YLP6*_N@h{VuSu&L6x)`5uFUYRgCBX zk3(@N9m_H)O_P``xnDzx8%EsKBxu0AC&&}&)eIvXIO@uAuF->SP8d`fCd-xGb`?EY z;Z!GfrLj7}?@TT<;y%i?4>MhzB7KnC^aC3GbscX(DIK*<4i9M@-|hbH6P!|Q{Xrt0 zHAGJ!kwU^Ht%z17kV5VzB*hFLp4fBEQ)P0&qbvK;-jd=q(0&UpE$H2D{B%1aNR5ap zm0dILGP3td{oY+=xjJ`JNCXgGk9EjMDoWtD(T*AhxTBuwSk@xYN%AXv`z<&gbE?Y@ zuIEgCQNxuz7F|rIanuxeen9E*6~58!5gZN+t0Pr+RG7 z8&-TnB7lBF7;t^3bd|7ao)WOgb{5=jqOEy;!oQ3XC)uj88SCPLwcff2y6$n3pKw~n zi;Yd^v9(%vosNzNUG97?egvKbmdAg^`$RWGhvebxOajMM1-W&;REft5jYx9y^9qUu zcj=B+Y}jliR3ELU+HwGGqnH|5H|34`_%rcAzjW4IknGD{_LmGbJ>lgf0U$qdj>*bCsZDE>TTm4~88Qih&;FkBR7_56#YSlHz765BM& zkx2PPUT|MPLw%|T*)>&5(rV8G#Obm6v4_hlt7TjZ@%?5WM6Uv@_{_wH4 z(3M2l1LUWawoaOzN#_1)XekL2uPxcuo9_EC6TaJ8rV#FF)EBkfWOw^K%sC+rIe*c7 z`gA9dY6TDLXu2l`_`adBTZpoh<}p}@{Dru=;=huAarXyZr);c{?WwK5g3)!55m3*c znDwcryPEt}KrCG;o+9L7O!K)vkdnt_>zOLKJ`bh$D~=g|(h_aojmDT7R9CgF-_Cm? zK6eR#|5FPtV*=?kKoQ`5^v+#xa&CnZ`XS9Xw6kZwUm?i(kh-0WWsxa;X%9E$Y{-S^ zVlQLDoWbYnjxpA1_vdhXf*)unkb-DZy7#xS#<-?egyQaSDyujPa+6`l%%BIq#=axG ze>6J6ppZ}NLJLet4nf=0foR8pX2`7BNH08{ynt0~uF6XG!uoYE__d71U+}-a02!l} zys$OT22}j=#)=}8x$lYCTnV^cJoYfJ$9f{%3TuaTgnWY7b<|dUtOq~LPdR3SrfLm2 z1S$Iy_NBLUk2Tco+La6oS0CLcE3EoJIg~2qqPb9IV|BpAD9oA?^Ac@*~5n?%MD7iFz16IviD?ZSz4})_#syC5pU+1 zJK#=2ugpk#V%{t;ywx#7ne1UQSlm8#&M%cjLX$bx=sYn9l@Kc2FH?MGbzg)!sw;)M zQmVu@k88-G{=yHS&N8RHyuA)?los8pgxhML(#<#E!-t=1t25BjLO|Fd z_ogeV@l(2gN7b@)&voIa6M~R2Kx`y1-9v#DbSo#d74y8CM^II&|b=E`Bo3?6az(`(yRkjN`>c2gpOd8J;so?XLn; z!`Vd|I{$6F5s8vFdTM%{F4Y=iQHl)V9r#hcnpbBM2CKQ53 z#MVTPArhkbbS?Q;!LyxvXG;Gn=>8NAM*}*}G~uU8;aWWm9rg&kW<|`xj!{(rx#i5T zKyH>mLU9Iyr;q6Y>8DpvK>lFHe_!+AH~@we&^Z(#wu?hhI#6c}jQ?xmTd+T!%L&M8 zDwW!CXSGbH*Xi3++lJRBnDBaK!I;Bh1_<>htCr^fOzv`=X!Ue$Nm zM;c`u>KnM~R6B(jjt}RJN9G25ltvjDk!eDu9{X?D>^;ACCO)^(+uiJK3}rh4CtnYb zOY!JeK-|C~c_G?Bb~{y~V>>lmfhnj+F~%+G{9XG(6e+rwSa14?XTcr*08`?&RMth1 zMi18p7WC%K+H_#s%*xL<@19AFhdI&Nig19YB20zvw%`IiO0@2+qV<`3@9WsxWDj_} z53Y=<$;=_VUK(%-J_NYV@6xzGKJz#qV#}+UxjcvBEXx}sZVBZo+%&m6hT>;kE3Xz}|29+FTu`N@w_jY~A`6h7pbumXuYhiwAZ6tR!GNGmB1F|UO zrry`)F+?GBzAj5IlHNI?}7tk{>%3gLVyc#0eBUD>n{e*1`gAYrk8g?r}T1&jyVM89z(UK!BE5J z;iE@wf?HLCdybav&Mqh!DX@=XU$x-69xxVIa37Cr575b`H;B}NGX-inlZcM>xVjoq z5e9A{?uNTsP3My{RO_KT`aE4I#0f=#>(IEkTAVkcN4-a7lrF-b$>FTlATIE;`-fAG z9aqHR8}$XfeQ`xrz+NnV#dk2_8^F5fGA5)OAn_{qnqHaBr=buZ2|K4O#E1;Yo}MaE z-{~AJ&y7_l3-RVLRI1cJMad5ZAJo*w7w7 z;ig!OfeDN&F^58QguQ<6cb)`01vrqeD$OBI9X6GoSjg-DL<%TVm>@`-eQs0WKMWfOx zM!C9w-lvlRA@_GGF_&bk8nB+Sk?`W#%Xhw6vrT&=!#^!3?n)3hsG3)lPFY&5hdlfn zxo0dmeA9djF2V6g@=Vr?i1c}?U|usvGrS5eP;Ff|c#eo~(p^3kOuriY&$H~dF(&^C zwkSN|rE*n>s&iX2)@V|4ep5zgp(e=HJ$B&HqQh;PXBXa-+rAQPGzGFSI;GFcK^G=C z6G751-*5)?5b)7NSGx*khyi*Y&n1zd!4;3V&@Y(%joWMKY{mK?m!8^nEQ|uySW;fj ziiy(F<#i#M(o$HKiuc=A`mj-Qp}lv!Kz`@wNImfDU>rZoiXkl(9w9q7=% zZwIGZ*7J&79z^dB_X-mfYZ=dTc?{jc+hszyB&qfrH>9o|YvFp$QiZ$R)G-+oOvbnw z`DZHd?MnO0i>l!j@2;H_7lxD+W#9I9?7cKS9r5v@BA3DNuE+=tV^w7}pIe3wpGg~( ztX`_YcNW~ZAZ8^&>J43cAiG323y#P`hp9&~mAPMF-$GpqL^_=)aX!weZN(zd64n4Q4ofxVxmk+jRR%fDC$L7v6s;vLv5r``bB_x0+RF$Fksmv>z`)_>#r1WBYT>924bCxCTypbybks<(AbId zfa%X4!v=E3VaCypN%r9k-Q#BU#qX|{0@5rSPgT~`9v^S{=LC%rCVgLO@=J^ukqsjq z6<+7S@SsjP!fwLsgV@f7k6I$J71U>rB8i)=_SKy6isOrydo0}doq6jhLu74j!4-xt zaOn?a((=?KEcVMy&31jnA!EYR*@H4xV%+sQAIklZMnD3h!-;*VW5Qo~os#9$h|sU2 z_E5*avCdn`Be{7J@7Il0tu4#n%*~&CN-l}MqZhK5o<7x-NepE#lFNl=!WQ?# z6^LH&%N}?E1zS7n!bj-^Aw#)>GszM36F0mpxNx{-#rFK}k@fpUE4PNIM}18xz@Kd+ zM5CXf!H`0ketA#Ogo!!N`|^vZla%G3X0PB5)9$OJ)x57cStd`Xa7Gjz3vDpncoe_7 z;p|rZej7W^ej%gsdOAwSx2Lv8W*$o|1rQOEr*}HQP<(gR%EA7MtJJ*Q(ASG^57J

    4esiCeo(Jgo zQh%Zo@aU1g7-OV;rxB|=^#o`*_O%^A*x7OYhz@|aIKzBBEE+VNihe)z_>k*1i3sz8 z(Yd8AD_v)Wfi9y*-jp3GwO0Fr>TV?DfQCiL7%O#+If{bGkV;-LflQmZBH2it_y(ct zr%ctV#HtxIHZ|3% z*H`TdTpYxF@ZtjhAyu%1aE?q~rZ#n;H}vo-#;^a7zi8S-U7pM?z$Z(xr?C?0G{JWr)9OqrLxDpv=?5=1e}a?i;)D#V zNg^}U1a)FVGQA&cCsfqHv&2Z_V8je9@fBg|4lyv;Y!v*ci2&@rw%O}|oLmyhiNn^MS9Mm2B z+gkhd;E|s1w`YzM?997(V5lK+c1~5`o5nmPg^Hxyk|yq%^V|)qK2t}0oh`V*+uxNz z3qVq%d2(0GMfUv0veQ$KpeV@>``gF)>6(}=+x%KIkM74_dvf51C{M{K{3>GWDEGP< z-wn?gszfWDCiw=a3~Km*mI^+2e(dgeVP|9Ko%8%}x7n{e$ThArHkp92b`Z^>XlS0q0erONM#s=k8*1Jho%7lhh6Vq;E-m==oL{0%^Llj-qcO!aSH|A} zRL4(9Da0&hNnIf{Ip~OdUSAKlTOq47ui*^)C<%V#8W^7|DSb(ye%L8vqN!%_TI&(5 zXw{gVwd60l4#fw6l4>+TT&U4ZCA-!CT!7Xs>9{|o)fwLSf4lZl{w{)|+(8I^U1!xxn1JSl z%aulCydjWe;@=v-J343Kob5{c??0u^(jG|HW&@8v3O$u895ks#ftU?MnT0oyTB+6_ zVqB_LhO+Q_1kEm24&*alWma{rmQ~e06Eu>yKzwvq5NDpvcb}nBP4q@S2Zdkf*B#@J z=S1hn&wsx) zq^WmLC~MnM-GfCSrF0i=3`vvm!@Mos<7Khj3K1111>42+D`-3OFv2;7$*Q5 z>3Pm=s$Qa%{lR%>K5XES$^CLU*l!JV4BV`;1i7)^f}3K#;oz^4AU+3pOeA3Q0_s6= zb3}2y*jMnv$RKT>w6tFlO^0fMw=RLj6M8#mZKE!~dX&#@2lG$vP`3F__@BTWFcUUS zsBR7)`g{tgldgO46GEm-InrG~I1NvrpkWL&Pt+WbbCeJ=R$ZEyeQ@uQL{XUs>*mXthvbp3F}Pp7vw=S*Qs3DCHCX zdOC&+ih;!UltiqacIJV>9Eef%U|i44iqgellIh8wt#;Nc9UYo%!HyBJZ+4Y0@qZ$5 z_BE#gygEkgA5mh-)CdDJu4g_Ing~ypli$&V%LpCv2u}%CjaLwD zn@}s`6zNZc++=c5nXCdh=zzG7xJ+};R^qFe`b=vLBv=ov2$xOynxE1?59;kFde;OM z|LwW(%kHA|GWd1*W~yXqL|hIk+#_3Xy;gVOz1#Jj!n4q~NE9?*)oJ^rd7?J>@j4@f z`VT9Rc=a<6^WW~@CMvGlFB=Y^>+rb*{ z>dZMwYuZL-<|PgtQ#;M;QA1^GoS$zy~tA|4JvL>D{zBa>Myu z@eXp|GwII&Q!s_!{4quhhBoaw}H76WU@l7n|$`8(QiK=x%&9# zkFN-B_K}_kZ(PX!?my6nxUbBvqsR+nej}Q5dMSw>+KB~MGwU8}oPcqCXnmZ2*xm79 z%mvs0v8;=W`rsDG5t?K$6e>mm>6SUn@ZkQJ-DW5nRrCM!=gUlgm#G-K^MBr*wCSVn zkX9jvpC0l!ye!o5rLuuOGOVcWH;=XtCr=84P=r8UkfOU_*kG%J^2l+{O!1cL%F3!Q z5AGG14~ZgDNCVO=ZCsCg{XQ}~HN35p(Ux_l(Jyr0Q$!(A=fTOrmuEt%9s!6vGFT5E*}iJAZ*^;`(S6+ zWOnqp@$r9awzzLBg`LY@QExcKjJwe$YmZVt6Wqg+t)CJi_nt5mJU@??4Q)QrJW0%g z+%<7+!?o$jzx>Ba@J>e7?g`;E1Fa5x^E{NKCtZj|w=)iFFY`^|GF`E`R_J1Q2r_;q zB+O06uP$+$HAq)T|(4;gh>{ZtEF8{J!7+;OpxUfK!LdTvA&EAA`m{_zH zg2wL1E_wvY=J?D3L+PT;C%|~jT~q&t6lYRi*f}M`DOc9{B~b9~ltXaJvbS(n-${k2 z=aBeEt$AN-u)I;0#u0=IRldfcxf-RD!i$72gP z?S0fiY68@~yuNjH;-?FD@7{z8gWTMB;g5oVI;+ievYlN8Eyw$WDa3zF0^{iu>u8 znDxI*tYfbX64(G|t58UxP5KdaB_OOs#w+Z9I@fA$>#x{ha1AFlfbU(p_qFb@pFemWrK6{A zVWb>AB60V<&|+WwlYu~^Tss<2WpcTVl{df>GU|WhBC&@# z)L~dmHXSxV2Mkbm+7<;FgcRx{0x zu9HLqD@cjjQwLtInfLFk=*zSEqbAvM-*&6+Cid^xVj-D=RjMe`QoPA`oL6kN#wE|5 zBJJyqGk8rBkHO6VivW6nvJ=&G(0E@D^m~kl?fRICj|P!-^3uvQV-&qFz|;ZH5Zn8i z*UKDw_FS`4LthX+s8@JiY+PQ#sxtlRHvo98dIN7GAT#D<)>9B6QX;&B`zGz((vdzg{=``{tuHP z$kc9IvYT#A%;z<2JB^3lCq#xQU)x7<=mFv!9osOOHS%=C#NYnmLdzeobxlDP*+WA^ zcuu&`#71Wsp2~h({dU&|zE=`gB$l82UxsR*!McGb1$I^*nA#Es3rZlWE!#huK1?%t zs+3DTlE{TwrEwbk2v?~f*4#8C99lQqT${FYoo;`()vIex-J8i?#95P|(M;si1- z>nneCNE<{i3qi|e0hz$b(3J6YDnj%jSEmqIP6OjWzp#i)*eycJ>XV&N+AWxIk4%@~ zYedElkYj%#bH$Hi;4K;8YzC}REdHNNbXwK}#vTYjZXg>y5X5YpIyHD~ESdyRUDXG2 zY}jjBi-S$dD_)@tJ&##CY4Bf;D4$P_I#Kuhz8gsUzNTP+wMU;-07#{Wlu}MnRL90o zQ!-eI4u>VqaIXxL3&(hH)t=932jJaZsi#KiK&Sjul#W$8zr~LTred&Q^$l~`^^9&b zWti)Y-FXCR0&;I0YG0VRH-Q>D8k$!gi}XE)hLA2u{J_fowUcA-D_u1g0;x0*2zuH; zH$dmv5sE1b?j|v4lwlU`s11I2(}LSC2aYkjPo(SV^q#C=pDh4vE)@imW`dI(9v_w=xs3MqU17>u z%28SO-!izloOKdd^OK0X0^NN!<BP)UJpkmPbM_NFzf5>nr-G*s?n&(>PiYYf5l;rM-$4VhvrxeaFJLkt=$;@DV$R`hfG=IlgW5>#8rn2HQ#5byA5pzLg zXDil`5B!MiYg+Qlsj%}6mEqyR(5G4eD3zL~S|g)q&*gg|F)#mT71zPxtBEz}${x*pL&WrM9sTQc^5Y6`+bN}E zV5f;1Q}`r#a_?5NnHyrTWk)l{=4(HLTxLI}LK&r>ynm?ie8f7pTmKTuTqYpN%UMyh zeiKQPD(WyhaXiOYU7R&5A!3%jyB4HT5q z;vE$rj{wpJ1E{oLS!>`2+lhc16B+_vVO^ksSRRNLA-kdHNoLDPaqR)Hwc6Q%S;{0jt7ydwv-?aC;|iotb2P$!1x3)6@1xR zQz2VJd8?&?sPf$pRNKMx2KN`d2@JN-OKQ$};FZL#bZ5N*;x&sOc zyqEorf@7(Q_nO+;h1-?s>O1Ip@Sem|#b97QS{Jcxn${eopUUYMjyss3;hRbWZS~gl z$;V{cONQ6dZMq%mfe!&YWD&jEqLx4NkwS3R=y>V@Z9jc<;N_U1AcB({+|zc$%T^g5 z$3sF!HGeZW-dB+)Q6?9Qo+MvClLa(q%EOfGzij=6_5xW(UGI_^i&u8!a2yzFUq@dfc_}hbcro5{3x_uj zH`gi0B8SXRhi7($C zHk$7OI`-kQI_4uVIe=htYP+ap&4j68@9Ze$X&pU8zpm-}rfH8glI&O=+Oo^DW8YLf zV>Z<~oGkMT2{%@XmWho~Ty&&nm1$LK-7f>4O)PKVa}A!C55@}uZg_!Q-N2~br*rCv zCB5S1r;{x>c?G5a?f7~>FBgpt?h2de9q$qN8?Ko=3=cKnOp4)}_5ddB#4xCGFuua2 z&AI({_O7t8=L>f_UHT+@r(A?>NF3K)(0Z|0(sUD|J}3ZT5LUD{4x-<@h5x6Pw&(8o zWXAbX_7&twEXT)1s%oOC)mUR}cv+jys!votBD#Amw8xdjQnpiN)1#FzK@ToUJt;rc`>q1PAZ3K+*s|unM ze(Xg=qDgUK*g8M4x0n7qCc{s4nEvKsL!0u>$He9Jz0RZgj?n(Sn!w(rO;9JHbP($x z_gpzcNN*aCJb8&5BiF9GcmJ&0$N?k0Z`YeK+PBmeXk}eg8T`xXylXIylK&_<;!8G|!O?~3f@LXeM0j}jSw%X80`c6gR8?KCD$nOKc zAK4zx_^V%5^~is{l=1M`o9yrq!07%*)<%#N`?nL%%?G}R z507iE+gF_%wMz#_aM(nfKIs>awrVy=!D_T>2kz0>}liWe`RiZmHY^a=x!lwWeW`qm zk@OavqY>$m(EyXhvM5-!qY)cdhv=uAKLX-_0O9~!kKF05JC&r}|3VN`UJ}J?0fC^L zR>%!d@|d)}!dUYLZB!}86bM=`INmWU+^wRQ32`Ec0Br`?Ie@lJmW!SX2sNDkFR;np zsf#oy4~hT)|$LOp_)bn`zi&?X)zoS0kuN~ryHj6?kA)M(?sm<9b<=#QcEd1 zXd)DGBTSLOGY)Cb3n2_l1qHD-$joVA<(WX70?S^l`YGczqn%1JbFKq?#FEU23%TIq zBtcCK22)CaZlEg};e^47Nmim}T9yKK7JDnq9USs1HVz>#qd`u5d=fA~PEa<8)HfDS ziA{aL49+ZT_DRp9o77Hx5oj1(ZcxsK-c;NdPur@!HMf%G(o_E=<)0sK5cnYB7@ zBA-#XvCw@A1aK@kGsz&cVMnCVQ76pG#MZcITFmH9#$xh+hg2p!7O^2GwG{4x+>H}c zCmF?U^1R2O9gfE*K^n7S`|!RAE zC&{0P=n4_T=LQYa(Ry>sC5`Xx8ss4iW)H0-&$1=f55vJ~q1B)sz8T73P7_i0-r(gET}Wd(y% z6p*`N1GZ6tcAoU-zv*3nYS#LblbTCrJO%WoplXB%w4AO4NghG)Z%V#(S+eKwKZZA* zbp!usz5h&Id0(ywhhMJj?MNreE#Ncmz&4S;O<04#7ga~{hZd3k*Zpj zfAAw7Dv}oh)wZ}VFe+HBZn^OE!lmb{4`2@}cMvjk0^4d6K0W666K)n7z$FoNSw%5O zUCtkiGj}<~*h#GZ!&L{_xW*eEfFazaLPc6x&(PpvyyM23{qlE1p8BSDsXN1B32wwq zA3jppb|5Hk+K7}9m)?jQ6ErSU|Bbx7#@P)o3cK`SqA5WRI6qX`i~j#SK+v_524?fDJL`~j@zl|puo zgL#aXMFxb~!3@ts4+56;iy|>Z6@az!Q1yVh5|w=d2YV8u%s;%3zn(J{^#ElKS6Gju z-YU{>53{{Qo;mh8!>8ASGZYu=!_J;|q+Q`O;fc+CJRUD|lex?=zlVO+Ifepo*KnAS zvWn|(^atrnclU^)bD3jo+r$nKU7)@#{2vySGxbh+EwR)_A&3gs)hdA@tpr+dN!>p1 z_b8O9_Dg~XCfNCtV%z9^Tge9AC|@RL^(nEeamNC7r%2ZP*x=sdD&@1&@&amdrJxs# z$6+qEe{~=xRS$wwc;%!R;;93NPMSScv9Q8ySG;^?{^S zF$h7!TZY|AfuoUg)H^L56g@QAaBqVMHJ7=EaS%X!;|Bm~1Ln=( zd}X$m*puViWT(96*IEF$;sp%Oxv=699L392VaJTM1g(;H4h$R?ze>si$)@Sl;(yEc z3x5V@l7f&Sa6pG0a|H2RsYipJ#(?JCDHLMraO7f7xmKkwe@SC)u)= zV~kxw-ZpY=QGNTqoR3$2UVdBhPmxz5S40d2nLH4SXonLp!4RH);+r8PO?mmv+fTk> zy$-*59xXNpaKQQa1EqyVh@YAGa4fIso(1=pPsTSlXx&z`)0a7JD?=iwP01Wtre|XU zoBT02R9InVZw@sC<{=GRKj0RUW?7A(Y2C^{2@6w?7NTX+A!v$B*-}dy#YR4C|EpnQ zmuTIQxZkUJ)>9E6mURd(Hiru0Y8`gUcSr6LA4x1rw%}eVmr`{qwVziXEN%^j-6ob> z&1>-dwI%*QEPeE(3TO*286~*<5xr@wa6H65hr8}qgPZDY_{Xw}T{G%09D|n;PWe_2 zp8w)$%gcP(GqjP&e)8Xn#;4Kj2lB%0d*u$l}atX_J)P^55&Cx{gu-07s1YjOPB)M|C_c~8m;IF^G zN%Z*R$o?R#usobB2XpW4l;A>I!?gZ+#Qf&#Ec4dai;v3kk4Nl2e0LyMwqvgq5qMnS zv_8-Dc1tzS{bn3NFF61Elhqwttef7x=(v4<2pmayM1ndxV~`wEX)G%hF<38|ZI1su zGWmWFYgxT4U>tCbEtN?SkKXX<3fFMp*PH0`I2yx>8QrqI9N@YqI*M)-!s1Vt?Ds_Q^qYwopE=0f(86g-T*8xO` z2+D|z5QfYULK4Uz$@M+?z5S=pqdt(_alYq#*15#VQf4Wi)1VQp2KkMTyA%ySJK?qb zWro9-5%Ac?(fEmYz!M>~tz$vm)FqL160ZUn(7glU*yRZ7N<*PH5u`_v@f^Y?>Mo?q!Loe&(0osv*w+<5kltj%IbA4Zc)+go!G-Gs$H9~XTMXXz# zlEl)3bQWVp1!iohnl9Jdj z%OM-LpklgaB4Jr$9%G+R110BIsKwE$ z!aO_MJjQbPR~d%!A=xJUrAzJ6VQhetnWeSr2<;L!+z8_l-^Jyw2tKNH|88^&l@XJR z(f9VgGP7bD6u<`8bQL#5RIaf1o5}rdlyWH2nzz72kYEc2$>0zNkua)2*QS4Umgg{8 zd}}>|qe^BoMQVA{oa1+=BEzGY{deANVhyAE(@f2l z8d>Q@PML2vNF}DW#x8#aPs+bfhE_^@$+`Y+LxiFwHcvO4zR+_nV~^jBE6Xw-MS|CO znwYy@ornU$I5^?ol*7GA7-_anoswZ)6I=75ddjXyKPPE9{XfMv!f;j&8F#>Bzt;zX zrs)Olf!K|=GUSKfXyfNQm?HNMi{@e{B{7{z@hFAT72`W?Ny?#DTF;5bx*{E~nA=x@KjlP|a`*+kAE@s+vE z&0DE4*+)`*5OPRc6~1-#`TpNt?b;f!w0l=Y@+8AJ`ej@y^yTBlHa^t4erBz_y6*ms zW25tK^OhZp?U()OS|7gf1=i)J4tPgk3+-E(psT>Yf?*&+SaQ?d>80lFk#l?R`V%k4 zdXgz*MRvW6mO9QOJP+4)Cg z4;}@hL%724`D-hsNh64Sq48?)%??aPRp%v>k7!5Zwt4^AWi{KMqyaUu$xB4RuVEqf zCsf6cx9{JgeQKF9uz)P|CxNfES&ZX((#mURM9HqF7maqoUF4#(!6ItR{T?&)*zGnm)VQ>K?Evfe+8 zK9GqkMYZ}y&#ncj*a=hwVv8g~iq#?GL3w6FEG)bNfDN-YJ(0L0^*L4K)-MCip$WNX6`oqP9wd zcD>iZKgbSBybKxLH1NLuq96C}^PyBW+`=c7y&6o;?{lr(j|QL*Dc~s;`W|m?K4`$q z^5Z1*4?Q~VgPTqZ8Fpfb6F_tYbt*mf-U{jBg+3Sg`jh(7&vanWS!l+d)hdyReu8dX zTeR^!+uATy_xpI;P%_6@P)b3!<@5l5o@zClBs5jvq}=uZlrB>NqS<_&kwfZFR>-x< ze`}+gH#O(yz(0sf?)B66Ct%&+Q^Wq(W(EpNLtpTEU9tm6LzlqpFis5}DPxv>HX5Im`&@BgVx5_K@Ug0^Q86$OPhFa^dBn*r~b0*R4nO9j~W! zb|$UsxIcQL%A6grK0z#TtF%f7xh58P5bboXx04=t9c5b;qmOtx1SPrbt>lO`3F%nE{g#)9R#?#g}2HMf%j0KzzRsdo2l-i2-6`SX+Aop}w39me$_c)GeR036A7tB&a#Rc?F>nzjYUNe&^X^1~6I z2N^EGq3?tMkU!660#oRRwM=Y82Hk|n$aQ6&r0!q;_Cx=Enta}p)FW3YY5HuIsMWiN zq8D7*$e8;6ee{gdHOuk0S7cd%LY33kh|d(p{-b_s z#;9zr9t$Q^L_hU+{lQ66U}opcJ$vivk2@!MY%%N2uq{=yw3{MFz8n1x%=IUl&2l{c zCExlsWp;7%9iiyn1tO)s^Taay&_;rKg%vMU?Uyl`wu95OoaUPFmJm*6xc89{7IVr7 z?-%iwh1|UGRXAogW?WTwX#%MTHkNo57n^e`4xCQ(NVWa)%$FTJ<6ir;&I={V5A)P9 z$~)=}J;dol1rH}{-4J1D!oeqj9rTEgyKdZOH$Dif+U9**jaDJE067LNIw1uwK3ImS z&Cjgwf6ccX&+6uHXU>JrqGF;CViy>6v1b}_q~GpoyU`@w&Ys?h0^!MRh|WzFokas; zlE-ZEWJ^L*%Dzq#O3OKcNR{yO_=IvmYUS#iC7D?E2)4ui~vFgQZGMUI$*Mjqskb&LymT4A%4$=_9KQ!_^8S#+NO;FpkvGhgWQv=@435?dX zbsN8lqY7&4gF!o4gf^^9j~EOn-=<`1%Im-q0srReQE=FGVyHilZ?6 zR_c(Ot{bnyyZwME_SiJtl@$)%uOZUu=}hmjobw$2yey&=6SqC3wQZ>c;D;ExZ|#Lp9Ks9 zqXD#vuKU%vZP+jfgK)t9uIbRpKD4x}t!1Jy$Rrsg;FtBpx+R4YN+PqP)gJ{!Fcr}f z)Dx&5VS!Iu!(jhi!0RYa6XM6lxbkea88?uh9Qgv>)3WVvD{)Y>SWg2z9cP z{wGbGd~5FX9zBhc1Fp5X8RV>#xxOh%HeX>Bab~8egW^UobD3I@LsCC=Nfmg$PF)4; zNNVxyJW$|SVSaNOA{fM$0LAeCKeMTbO~2N|+c#5pWZGp1UF9A`S#50<5W7hR8j8MF zpabqlm3p7h=wiaYaP}SfXt4M_5jxLBxamj z5sWAcOPU+<{`CCC)3&}r2OJ{+MxmFLmS7#TN%m?$Bg z9XoU__-T6yx-b&0G^giVC59APv0Pq|*SAxZt9)za8D1jE!~4QF z#8r#;#%s=wdjAQeFUBIli{JaQ02{&jb2m_UpC>X>`sj0Y)>@y~<@IIDwL2n5hVb`$ zW##w_d^V@Jnd-|NZiD%#;y_8bbGj-j&|iD-M$e{3_2h2jO6HBUXC!*1)jV>?(jwNE zj-N(Ydm`2>Oh5PXujB_-=@HhA$w)n_USc~gP-g10bkV#=!hjvhgPEbL(M?Ale!Goc zGtur@bW^|e2>Mr9Fe50HGlT8L^=vBh?Pr08aHnT*!HfRMw|04G%Z$RlvFR3Nwm?8? z2~`NV$6Mhrvn;6WvlB^ocHZF`CrwV8IUE*nN*4c~W5U$!+i0~JVTEcT0j11Uz2Nrh zVC$dl*M`!zI`mid1{()J?hR=M2T9jr?7;~9m}_bCw1eh#+8OclG`=VE+|9T zxJz~jx5-=!s(FrC5)7DLl5z!P3nw75Lx<+#v4Y`SE1P-5oIRx{&%Av zl253pZrd!U8Xe6r4w8etlIBhFDCW$B@kFE%FAar`Z)%t{Wog!<`QMEuhuRfcZH#;h zRG~dOVSN9!6Vmfv-_D~C#(Q2Iubw<0x(JB#CNaa^f|S*Ax*Mu#06F+C+Z!Y9%q<5#TKQL+qjs%M=F1yCnE5PoeAC=e2xI=={3< zE#h8M=yhV(O08!zyk(2nyKg(=%(~V(o5Rs7SFC(fr;T(xJA#^w35|IfCYU-5u2|-- zx!6z{vs)3dVdf6r{kquD`Ag?BKVgYh1^dCSsf%n5U+gusvdHm-$EmT>Y-+dPUTtr3 z#N*{c)5#&;^S$bpcS~E&3j#poF%F6~f1lPG(V+xiW;;p4EbqDQbnIH;ja!hQ5i!S; zCB3}O^xb!(slv%*CGv`dt5mu9PF}%0VkCvf=dZPV!Yu+q!CPl6C^PPKBvg?yU*-n{ z-FD51a+3(T6m8zq2T_EoXfxxab>q^+O}YMrcX9|-a8aUZuEM?iH9yxXD8KUOSzlP| zL7Bfyz@sm;9b>rj?g&Y(m9}LVF`fL{Tif<<(!%7YGp6>;Tb5uu4u+2(2rTkv)T#x{ zi^TT!b*X-5tG#;9n9lINAhW){F+T_8eGEW!KYEYo1wC2Z4efVTx%ThF;8*}X!#jd9 z_aI~3b^JhF>ZXM0&oxKV&$PV>yRzMNL|F3a%chNsA%Pa=aFYK^ETUF{CXZPY{|WnG zhEXizyW;G8a~TA%nNGC*bpJd`IdQB?x>OO9Z{?>S|e~d@sFvpJ_;E~-|N$#_Gf|pT^2azXq6KHd<`yoMK zSq7VT{(23n5)td1nwmqMF5T>}catI{5@Pe!-IMKJCOW$xQgOFJ_T$`22y&D`n%>oz zoIqgqoGT+$Rf*fl0Isq&&dUUnmfTAi%m_m^s0-givn;@y9%+i%R`wH2{MT>Pdkhy7 zcX)sjCDF*lUqo@BJdLP}b_gbBvBIMXNm#Ey??E3Es9?ezUFUDCY+|->zn~QOsBiAP z)`wLX$Xo)xT_2_YF-#S(*^rk-L+o|9XPArnVdnq(_Q3B(#lG0steWIEUVRstC$}*K zdMJb!pxh7HJd(S%5I?)7`+Py-QpLk;1gcmuvK0C}F=_L;xEz7K7p4;TWv>VjP1r*N zN$=f1AvU}IB1qs>2E92k?ZsyHSAVL)@K>Mm#|d-b$p#gT7(jxTdUAz%JebZ<9gg;= z_4`(sV?kOwGy*iwnATdGRZO<7SP;i(VzxpZ@dTv{*B>xAMcl5VVs(FAS)gpGc8Wen z?4tE7!^DA!Qfq#xVi9zedGv0v>+tK~a3Dep`8Ey*T4}&U|DQ#SOWt%xI$0f%f4^fpi&u?Na8Yj0$GcK}!ji#8@?pfZ%5VQc1xRvVKp*a^P zEi2tA+W&1%X;yRw&)IO$`{w#0>ye2-R*z6c^?S&zxD{=BR-{gM=CA}T7hiw7_rvm` zJNS@)ZTbDSkORC_k<@&i!^N~}h)`rfjw0mQl6!$U`yx5w<$seETlmTE%(k2xC+15{ z`iDb1MRSGDIgyA5=QC?Ma~xp38}z93k=fg1s^9CD{<^q{#-!F^=G@uP*~Gk4d3#a4 zN0^fH8eh){d#UWKd?LpNb8V>it*+RdtK+4z zA#@>n%njPVeZgqO;-)4aar|)pLc|8%iM-rG4W4p4H;4#FQU;>r2ZwRKMIdr9M4gS- zAaotR>C&qqY^)iX5L0tCb|eluh25HmeUAMK4e@ht7oAh?EuyR;;OXP)9K&;$83i38 z{Za0y(EliL!k&y?Bo#atzz8&$vUzB4RU;FtgWgOmATWZOi$Uyp4q7HBXl`;|2me`t z4jw?Ypxa648+V;61jJ5^NA1lf=%)EjJU7<9D?AP6`J5e&WnZD~>wr(UKYkpyZ2BfLw~!b>YQ7D+ z3hRGfC$*VbHa@VcLs#bdo@)R@|9#?5aFIVLOpIs=OkI&49Vfj#)ulh__ur!WF9w|I zkuHkN!Yyp;M=@Xl?lcoRzLGQvbo&}CY$0t+&Ba^xXFE9bE8B<{#wBhLkz|R8#8&d= zRUyxkezHfW?Xj?1KW$2imhpDV28xxr(+ZN>bjLN)sdFN@MQh;ZQ7|npIep%zb#M9; z6e2ij%!?qmSdK{cMcXj`@PVAv@P;*~MduxPr$XLLK?w8PGJaBq&PGV;0I}9RfRixP zgU31r?gP2r=-Z2LP9I-)#bIP!&ndCfJlz=p%8^z`(`=7nc1h%fdDDj$)r)ROSx`Eu5Jv1{8M(r0^uO4DwAL+w za>)_b%|6uN{}~ly?q2TKcHUVny7@9hM<7EXFQSFwS6+VQ}b4-{T65fN_rZRCSQ=g<3F&8}eV z6$H!{`cn;cY;&`hXA8Uc-IYaQ;tc&6@+TIV$U;AY+>rsf5^S5S(Q)EhWq)$!zR*rg z6OHpz59&l6`rfZMb-Sb^Q+mX8e}^^2I*xMV_2p*G<&X5KE{I4|P{FtOpuJrlwPlJ@ zR6phzEWI%9asbLNNmGC_S)4B;>2Zy|^HYjxKi<2MLM-#zI*fT?u%HPTytVs&%&#YXD`^GU0B6*0Oln~TPxCPSG>%t*nC!Doeg2)-G%I< zf@!aJJKXAu@U9olaEO86EIq9XF8m2}4pr>YcBTOalh_i^f3|4m#RPJSuU3S%Bt!Q- zD+FVg8B=e*VNNpzfW2h@7+nfH2&Qv9-TG3u{>H(qaS+a zro)J;d#h)aW^5?=A*_x$WbsB@$2z$VvaVbR>9cXAZ#FhI+mFx?HZx_=D;JLiw#<9b z%kymYV~F||Xsdv>dH5@Jx}JZkb4=Cn#Zu9Jfz_wn>MqfAh$nWPn7E0` zzRZ*cu<9eN-a}+j-Gr@R1a}2=671mtSII{+=@Wy7sWn-HUiQiF#~nkkIlI z{|vb3YYzTo*u~(>)WT=p=S1(ApqNzzbvS^!2F_@#mpPJ&_YgYV%Fc?D1(}D3NX^-e zzAk;SpCMp{rR4{5yAw$z5mo(BAa?SH1&e1Cm$xO3c}vtqt-Xl zzW7JOfJOtciUJL15>#BYYl}3Qe|Bs|N$54pZD7Z<_k@E;%N*y1G(lm%y#L(7dHqhJ zYDCXBMErr^wIrw&m}@}IZ$yKbtz-&w9?$(-}b$rRg0n)VwjwR!m+ z@Ag`4(btg^whxRRJt$tk-e#$v+ErE>AYkH6ox>xd&|%{6mpcg{#P zO8H^M77ZzDbdlvs^-^dTgM!lhUwYFZ>_{|n$0IS6Z@v7p>xcc48|r-(hjU{pT*$H@ z!7%4hbD-9{&bFy3M@rFR+lGG(yV(|Pxy8YIQ)kNvceYs65*<)(4pu*#xE$Q(r=D_p zgsw3D(86sa1jW`xg@^yOZHkPquxe|@TGRZQDWMj1bKUg)6UFEp>A?xFOb8c``_DfL z+GAHC`h$H3@AabSqe)HU*jxQEsM3UqKyyB#!~>5XmVD?RY*^-j&e_CwC+lh9#HcmC zy&yXe8bM(B3iE+(AEXGYpFol^_e2|*efv7(lDLGJSJgLqmX6Gx=~Zfa{TKMwk(=KQ zeHmI6%WS8LN2IQ|xgig)cKM>Fx;^^rN0o+fk?28-bQQiY2OD`X`}eV)t-0n-t2zIl zNq^S45Jtem54j;*Mo=j|I}`tRGvO5e<(`6akuM)JEuAQ z)`?#CCaJ2xjsP}Idh2c5Be=09#c6Q-r#*{H7I}XGW07_ZAhvsshqy4o7%D3r2S@j9$7fFa0;+;^M|EL?D~?U4jmY_9hKn-$~uuk9*_ z2e(;Q7v~zg82nMme`<5D<=uB+(x}4H;s>$@BBIP!gj44y<123aAFa_#rAPFA9rVqW z=%dbKhf6*zU3|I!(zpwvP9qIMD`%mLb0HIIG4-<6scSbBb0|X+gRULibf;0#I7qC)SfVdGfHLGtjsyN?U}bB zJGhR3vk>2xZVW+?*PVec9`($i)ZK$QUppI8&Nmj7wl!Z#H(R(lYGcZpV0zUfm}iS2 zWQGEnYWu7W%R<0kkpQKXfY*=}(w<(X()30igIaASNVkD}Z6(#{yU_+eue*^%))HcQ zB7WbcbnFAK*PpGd%7$`+mFIH)UMhhnh%iB3lY^-TW}vGJvx72BSnv!m+~I-EL+lk> zbwM3>b3jIlJBGhT#V<=SCXl+NsA*g4Z8JejEOUaa^z_irB)Y!i#Ka()u_mIlF;~A% zC;sD;qdRnlBJCJ9@yoKK9Z$KiEutlmrG>P4nBVo$U6fwf_0kJ}FyR2S`sdgWbYRlP zanG0?qtLM2dQ_B%b2ONCm9;?I9vxQIocU~k7zp25FVGiga6};)%Og}~*o|5mK*c#O zFUl~D7#~s%g^e$EdgOgSGM6-J1v;RRC#hRQG6e92GG^4ur99z~@jvHkHYmbs)R*>c z7zQb1JsQyqUSu@K)jp_j>fhsAd(UkUJj%1~o_f89Fm=d~Tn;%hSwd2~KULhPP>viA ze!u-*_xhZJv3YCv?_Ul|L&%4!q=3e4`E^Ueh;HEH%Wbi5EFbbMv{{54NcJxaZj3ZO=s)>N?UjN9assozc^a|1Xnoq-92efmN z|C;=od92C7R2e+=y*?N+HU#vih{iXsXwW;PWl0$A!_vGxhf{*z|4`PZe5Ox~B|trg z{`%?OrX{b+X#E_Pb~WDB;!fp#@|ZJkiU}4zZpxwtsxv#&>6ewCBaZ-n@niWF(JPWB zCz~}3i4jz1|uuoDn*;?doaDfO%d#jC8iib)w-Lv(Ytzy6;BwxWAEAB-0kBCIZ+@3AgVC(MQH_nX0z%f~18QZ8J)BPAAKuHy}P>v z93RY9$q~JvTFdyD%$DhPy7~)u=cU!{SdiJs_5utsBUBn3s6o`eAtN`+tIq6ly1)3p zjJ2H6&=e?y8m@OWY!}%KS&hZbKFh9Ii3%>(#NSE3bZbWGIWB>4xrzp1&$RdH2eG2% ziE#`(J{%kL^$xuw+w!sEWpjA%>IcQ5Bv$hLfo&P(C=-2`UVnNShl8-ln`i9` zq2*z#U*7VYHhWe4x8nMekn_1Ab;Ys2sk4D(A`M0$=4Hz>+g)Zq^I}78H{&MzS^kI8 zEpO((vA(rI&VGGzGe?=-6~h3Xx1b_A7Usz;{!{1B{+8`aVb4wR8M}*bs>V%jJY6Te zTh~e^0Zrywgca~ltYK<}3wy(lhjt#B8&ci3+31SbH%V=@@zrsOu}2X{YnPqla#l`E zL3??G8j+m?le4vMX{h_iwucrKhGqlg+&;%rA+Tlc%RDXG_I68MY`kL8#Rq~y{1cG{ zyP!;j&X5b~lB=I-aHc09>kksI-=9aPPkvyUa%KOHrB-2)#HtJnFe#dscg4Bc^m;8l9-R<>vufSm^@)}vbb*u9pelap>pt=GC{w))6b6=@X9YaKULhkw zdC0Zb7u4lJAa5k60s#)2-vBj~;s3Q|+fw=I%yBfVmZNV+KAKO2c@T7-oo$FFT5$}n zu}>^O{k%7A6OCzB*YmI#V64}8eK%4x`+DAm-_agnb+}q@Q#u-&h<(Anodk?$9~)PT z=`Wc7G*E)0hXfKY5p3_6=v|8TR2QKcm-3?Qy%B{O=z?%11eSg~hDYjyQZt{m4x)0a zrX{Janm}zPQXQ$Qw*J~Iy{y{~uLMt^MBtjm=3)XXeb`uE(ReN&AOo7-0~zD@8uCTW z@HmrLwstqQUUUkO@tHJb?(}QytdZhs4b$54khG!FGTv`b3QPOc-^5`PFU zcN1t`hi^MW;e1iXRGw#dq7y)JUV7ifr6~_4LGx=?dAXUdIg;3)u|kvjr(Wr|ZKB6A zrLfy}W&_AU%*$G}Re}Hu&VPPTA2hKdtvJ*w)v@l&YlBP*fHNcrmcICrp1>VPGAJp^ z)sl%@VGTb=F&*>;u}d?9(oA*BEp`%$NUfMOmbvaxM``2^{%+Sv-hGZ!&%Nc&lzH9F z1V?rY`U;Na*nQgE?4v%h%yNbN$+BN9P8TRHxXh9>-;2y!2cuO=e}fshHlU1!LB=V| z@$$yO_uK2)brGWEH7b8)Q!tx!dvWw2+5IBFG!5OjZ#{oNrElf%(} zE08AQlS{QgE@V7qj@n|nCHUk7fUg4b%93rM74oP_DBu3$jpFU2Q6D}nj8td|-5WWX z%!xW1xKx_rG3gy+s}4t5Kz_HWgp%@Tnl^! zjpXw`0m_;(QE4GLZ-iPFldW7Xns%sZX1UVWb^Kh#AWUkZ_h~~v?VNZqzDdb@+m@b- zOszbX1LhAo8&t>&s>3;8FwDZL0@kuv^?@vXV?kWqY9w7ssL6Uhyl8R*wiHXQ&GHB9 z2Kx`6qZ;>G?MHgWjcCjDyOup5P*;bVu$nJ|=dPe~&21MvXJxT2v9xdgyt55krs&?> z7-RZF=HnxAE24a!(n}peIx6MW1)lzG=Q~3>W3jcpXRALjUCKs@@v#RUfqdhOxP~oH zHC0c@b^mG}&U1P2*zNq49-9lhW|&r6v97)+%#Ga&!B9}{|8i@Ato=$#Unsq={pCNq zu6r+>Hg(_U>Q94-HunFjz5(xM|LIiQ;o_l<&3zW z`Ve<_EpN1L*te&a;6V#ehk>Rgsjeo1He660N=*<^_Z9rQbIP35z;&R@bn8DkVcbc- zMv)pvK4}JWVAH;ZutVaY-4l$J))5}wij}6?uM}TB%irp1| zosse8#*tH-ZUE!2M1(qSXu4c43ZU0hV+(o=+bd|=)y+0OOWHF|7ijq9Tu--4jxVsv zui@4Go0KPptz{cLpugHIaDi=rwBDusUTS||=>0+?lS0R9?=b%5fU9DVuCYj!xcg{! zvDE?hKZH4Z?|{Fm%jFwAdR43#ky?FveD|LsR?ZjlgC^gb(b@iy3BXM)AhnK?@5n3? zonWL-p}<|kS=~f)DeO|Ih4qLQdX^y!BQsW-3o^&1irBYNj^_Ngjw(ijBM97)DW zpu>|_AC*k}EDmqM;XeFBY%)mg6`Q`%i*{A=8OAQO9fmzy5Q;yZ!gbBLNPjZ}|A8AZ zdg5Ol0ld1ip<{X%Xof#l_2N~Z)0|z?%X#;1=S+m~&sUKLU9ZxA7_*w%TA739l)w@; zJ?$f^il7};f19b!t5PoXp9(7(P!WUmPTT<_@?M@*}D$6bEMY!Y4|E;NX zZgTkAtVXd3+elo~iQKTibpiNPTwF<3^?$O1`7Kq=eyx@z)zJ`mRifjNk z|BdKHmj8~c^8V}!tsu+=>hc}#pl|AZ32PB*>LwSSK9V)^;I|&8(3)|M_{*m03^xe7 z%BsC1?uW%;&0c)XMMmToq6DWrF% z{U~S&Mthrd%x;a6-Tu}x>%k4#hj~jCuF6CKv1^8mrgO!<-0R?J^ZQriM%$U7_o_JcY1-`q~3m-f6)rkl>Z3 zjED4u-nb0g4Si@oUyKq1-k!l?ksGvsBf+ThI8BzEe z%V&aBJxq(%R_OLdiJK8Aw@-t%Z0#;Hca^xpu0Y?HxMBo-tq5)>{?LQ}IBb(EV=6NR zO8d(aw5gTPh16|TZ3BCfOmeUoH^tx72R3bjd`MRtG@Za#1Rwi5=HWM)Wbk0x)rzB; zCt-f4;RU`2LD4O~v%#jPeH88dvm=ZBd0Y~sw;K`wDGv{MPX{<$nYC*AA+>jxBVh+L zgyCqE933vjT&X2}<$r3!AUcTS-&$y@zG3o4U}e+n!-9uzTy-?O?6v<;(H@8_6o#$nI_SzZG6- zlgpR&($H@G=JT={k(Q}%7ra%^L-tT-E6Fzsf&g>hSVB@5$^5~JcGNJiiBQH zvA5@MJo!`m%(}AD^q&t(Z?Bzbz4|Z3h5Vu{3Q}>f#2OI=5RjaY^})d%;b(SE`Y&8z zzT_-t^Y0(3ejGWh1|c;pZFnc>-kf6Fc-xu#-Xk%n{da&;QmT_YT6bDq|0&Svq?_~k z??yk4FeonB6j*KJ6RZ=0%CYQWQZD;^hT_o_iiY2!4mBT- zToPKwqaHbZ?1hH&syFP4ex5!FpDT<+-guc~LTv>o=R?{U;V~n#2%Q#Z+24>fveo#|wgGvLUn@%zFaxgk2>_9E(lM0%)Ukk~( zS(%aTz(3d-DKMXXa)kX6(WWOaiH>=!{aP}_j;BY~jpMH%PPddr(2F?SGEiOJUE zI+@zb1P<;BGNM*~=}lBSi0ngghku60wG8$7hS836CdnTK=_lD*lzo*s>ANqu7&b+#Q)SKcj+P>qr7);`%a|d?+~CbXQ6a8!HmIj72hkaJ zWd}eRB$zk>U&DdhFH~(0OWNY7854$@3L|GiPWb?K6tW#{RK9^CcJJZj@*=H#UV=P% zPV8~R8&NrXz&;@KzJ-pJu;Qy&-}PXmYS-kfKeioLU568tmo}sL9k_9$lvp5*Y|Vdt zaI;aJ+g7WH$(t@dhUyT68hqrg{7z(#M}eO)wtYqGEm|v~BNRai82{qj!CS%#0PS_NTYm!$M0A#`VA1)O+Kn|BP?% zwJ6^-239Qjmzvx`FPba+QrWYAKZzs+5-}z%Fv?0a|Q_(8PZUsCfc7Qh1C12BUEjj&0i|IeQ>As6#hstH3wz#q%5mCGa;|bH#hf1-ZeP`|AxHB71a9xK6&rxTx@J`tO6LL z@2`gWIrys>@2@u&i~{D|8^=@ycmvb}DA7fRzppDi`ZO@(8Nz?sbhhdVjbH4Szl<1vfm&&dx>(%t<< z)9`B-&o}+K^86Le@3-4S7=#Px9Ex-*l;%U}x!K_#GLh9Q*=ve18@quNg~BM(>s@iv zl)R>;1N0f{95F97AEfL0)h<9H`6LiZv~5zh&*7tr+(oZUO2Y!otLuN&XQn3*Lq3k-d=x=M<4G>)^E^o+h}bqELeIQm43p4a;~Ip#p^ z^w8D^w(} z3++}MNr<+s#3tU*>&(94E53k~Yv`VK4L08F>(!<{)-P*C8E0TCG$={Hw`RB);y&S; zQ@FDIZgQTeIs258!XX7Y2 zr_KrEXdsjvyaSd5TJer{77~P`m+Dxwb4-BnA8F5+LN-KzNITty2BMS%o9k#@hxo>~9v#`L z>Mkkr=?;Lry;H1HaJDuXA7v&(tP`;k#1#HP8UHg-SN{7lZIjL`SMjvwWy_IyE9dd1 z)9H(j_Q`AAyCW(><4H{AbHFE&mhVQ)gq3-zj`G4_irryrqf2K@J-2+zx6B;v@BT!C zCdby@rO6@pI*Ka6Z#EjDNpV?rd7;4Ql0Bhj-Nivp2qZv|>6%mK6>_XCFc`Zs&Czk| zw-YyH6eSH@p8M=Z2t3JD@H73de_k#1Fy1gr%qGraxnAQ00LvRn$?xW{G-%VdLS8AMWQyXa z8-*9YzA|C+1RfmtnErO+nxrA`m~5(wFE5btI{b7M5iP^(1|ixpO8;UBi#5A4^l-T7 z5Uza+WI{6D5^kI<%^$cRc&tV%=&k1q+E7N4j~8)5%Hj?vt0o1kU+=hXfb@x_FE`{z z8I(1kGWqPSnM2q1=wEu9JRee#3~p6>M2yNR+EkJKM(3vPJl{EPCK7Yka#*8fY*N<9 zGx?L<40JF@SC=~Gy~T+sX65%62P+xwS~rNHg`RX4T0CsvHz@<5OzHzit>S|NF*tX} zR#Ew{)8>=RsowLB)|^;2nQeT_Io2m)^k|Q@WO6qMV7u1K95{~UFB#-GTlRfCoxP@f z-K*mB`HpXvZv8bTG#>4w5vPw7&AZwHLw2#IKwq9Fsy*h<4WbKiOS(P2QW0GZTrU`&?VJ?E8ySCWD<9qA^b}GBuGj~duu=sctc2$62k)v5P-TbCYuMY3+LCr0 zT33C@*hui0v@6G-1bcscjwB)D-fpdh8?`B%UyV=oa4s2dnfFheN*Se3C9?CL$XqLe z?GQV{s^KMC?|XDYhe)+Qu8tV4TEcxhntWxf{D)>682xLK2%5mTzEss04u7GaS4w&L z$=0t;A0+#jL4}Jpyl^|Ckt<6Xw$# z+?ReZjeQkw@@rfLAL}b83x~#YZf#KKKa0 zF$KChT{`6qhd0bVl?m{{S_rb9Q${Oe}a#%F7z0PF6zb`hWXUx&`UkYKaKb%29WXQTu zpiy((DtmSGYj(VbQ+`Rm=P`F(5eIOzp3HPOD;S0d+m5a7CE@*}BOC6^Tmm4t8~|&N z1G|7A(6`D5@MUo{P>oM$v1HAro83F>W8EoGk21j_sh6h9jOBHTJl9J4jPP>U?#*~FaxC~gFKdsi zGOuh2q{V(IYx6)?A54k;H@M9^G`r5)iJqJmd)lDu23iXZbs?$2b^^a z*{gAfR9iuxq%t}wzI)*gxOk_r&+xn)?VXU->le0rIQ@DRHp0oA1tVN7Xfd%3*eE;a z*Ejr(X?`Z6%^gmPgTR1OKxsvRnH+8xMG>3R=6N8l#_a9D+;v0K#$9Ks?F2huvye9) z6)MM)`$GHBl<2yvZUe3GA1)rz7(R%1kX9xYE#43>4wyd50QZ+CBxFO60?+9}^r~h; z#UWg0*rK0_)T=y$Ozj36quuu(318PN1uBq zg4R~~W?d3@n6U5;2sxqb32a=>QH(HqQ)tKb$R+dpO%yi{oz2+aZTdKp;KCXWO{Y*! z%{>6$)G{=!KDSjGe7_~W>?4>TLWI*Ib`Q zT?rW+h-@iG=tS$|_peZTc;DDAnZfVnX^W41EE_knL#`1!AEAXArQO%V7b0^+npDeZ z7u$cmXGsIkANS~4Q!?e@Gt2uwpYPj(o(yfFHh;14MdtdSIv*j6QXxUFgmZ^$^UHYa zt{z$UW#=c0t;AIucF0aBPsw)aqSZCctCpl-4c)Z!m7c+(rB(U55^YIW%$W7#2I}fq zV-}nTFcvGcZarZCYzfEGr6FD1V*zK@Z{_{kl^;MGr9l+rtTN`X>GamGyDX~bvgL)u zn(s#K^A;DXutZTZgKI+lgqwWPG2A`CSWu=xYEub4%7Da%hQ*HJ0m94yB>-oncz% z18GTO?g0_wv77Dg3wS?d*Bp8zjx2$W)9l%QAvju}x|+vT zlTQ+Rn+x)s=C-1I#!F;e03Vv0J5&1xaP|{=b3qMmDs)y^sPpkSN1Hkp^Cuu{upoUC z)=pEqhpOV@w)ej@YCYaz5mzI*@q>0wqe}8 z)~xm6+p?@Qt#!0DF(@m^GFcz4x2#;6mY*!mOwG*R{7#;lGnX!XbE#<`R1OUZnI&2( zK9)mfWG05BhA3)cO3l1!5_eL9ru0R=Y5^OlL^2rKgp3r(3NyT8=I%5 zykK#!eLj&!aewz9pzSOp#dC>v`?ES!yShQU=Wsamh?#%scMg4x3MaoaMk4SxX(t4$X^_m{9WixWp(_)D#JYb}Tro`wuFL&G0 zK8;cwJOj=rso}^KMC)>G^n6f%y4QSQqyFMpnqvk~t=bkzW70W&5P_?QMY=PDx+%JA zuZQlP&CWaA9R>MY4wCR)cM>L=eFys5r%>z{`{*C->YZ|yh=UCBo(V8d0fhmwzL(1A zSYsyAuE-tnjFQKbj!c30hCPA%JPrOyAKzI=qo2njpL=~x>sn#kdXohX?=;Y}xEUDu zg(pj?ipj#2HzHS&W<^_%a`I#j#A#kSRn^ZStNdPQee}Pbkz%`%Ijcuntml*GM}H18 z`p<@kZn7cXvzSmo?#$~j)@{4}SF7dDIYbuTxTsuvmC8>Eu3!?G$%tre$;=&}lR6n+ z(~tO179N`WU^@G0gLq9H1-ilgFyt}eQvF_VK`VIb#2pwr3SA`uB4PKAe&ua1omxbr zQj1cpIXjM+Tk@5I9XehLLue_52U6ZAM30Eq79GE|yjguio$Wns<8*Oxt+S8F95<9{ z(*d)^XkF4iSVcqDv({G;RZZTj2F!OJeP#~&WjN_@gVwtTn3^mWha2iMC44VzjFyhc z{@lAJyCjzpcn24t6 z<+;?G8KBtVSt}2z)78_mya8(suM#lcADcQcCES(w)+k!6s|mrwIyk2QbBXOA{tx}T zf;E|$zF-7~{C`c7Ux>o~Ygh~zRG&V>eL@IAwzv?2j{esot-0l>TGr+^cdO!<#(YL( zJ`DSV^I`7l=$ZrT2$EGt1TxMzqZa#oi$C+u6b@+z|paLN~&T@oRRUm6`_*( zn195B7UShK$Mf%Y4KJIrwYv~%;WV#{E$lOpVIA5N?1+9-WGVStAr8{j4A@Y-67g=^ z^3{elSY~Ru5B^Mpj$7oi#}-y+G3im2Iv%Tcq_-@z>JQUjLLyOvi$eSrAh0w}N}s05 zVo67_;<(I4$fF8h4LvzQ-1z2}g!K{mBf-2hI$V<^$wGz}isbeXRwAs5&6{+|eA~Kh z?T`uMm#L;sfKEH*ek#43TI-2P{;I?q|x-UC}YDabHhUMxYK7~ zrTn)vmHYxD{mMp?9^*k|O|@(kl2c_X#B-{lxnG}$C{@ok7KtvxY zh&{Jk;p2-moPtnU?%F*tEh_4!a)ttMKAz~K%iyGY*Ee|jpPFQ6qAvT`Dk7@9y>lev z&H9S>tamZIs%&vSf$+m+iJN4lJey0IRALR2Kw;#|)QJ2j%t9HFSBMwRol4=`s#Jbs zRBjCYqZWF`sJvu>vEGHQPXFKAP4?Qi-N;Ne`~@wx;GUV2|0A#7BSY%1*aXd6{rAY%a#hFKGn zUQvRWodSLiWW{1$>f0`AqF5e)NFU`6e?2`oP zT(3ZSYA8|HNrl;;tTaz{NzKRwgzjcFT%oBEG(WkRzWR@|9?$q``FVq~yl_tuAc&I~kaid1 z$)ugc@?WClZz^n-&rVy`7FVt5G=C`=PT+$g3P^UL^YtrOf0!&~G<3s`4IA6GYdf8a z2EqSKF5s`QNI?G3?mAYE;YR`;&{E6BeDPTLmtZWuicfZjN*bW_%Br_ zG-YLEvqQlUqViR#{M(M27y_`_IBQ8o&iX{VjJ+$}Yfh~BDQAmFiag_)!ciSn1&;H+ zPj8aZwd|k6!+7i}p9-E-rtR*ZtTcu{FDz$%yChlEz0@CksU8~d@W8O z4cq~80y)Cu|2wk(N<8LI+>8Y?6NhewRCP2PG8j@sAZ8zrrCU!E%3%lP3daCzhGuO! zL=(=KJ#UK92+HSDq#TKYKOr9Z>)`@_e6D`5A2nvf8}M@Zpr#DoqFK+_e!b!BThK8@ zfmB;0LpkA59UTa(VA@CbIy8za!iA31vHRRBk$d7BQ@*;CeJ1Oy)g>9@+zJRIgU4St zg>*$hwygf}V(i}c?)kn4GKa_FevzP&^l{V_he`3HQm7i$gBHOzqswL`zP@GDak0&* z_uqKy4!fd?{{XF^&VungwsxL0&pwN9uoRio2>RQq$IRib^$tUyTNQyD z%59CC~mA!$kG(wuFe z!aCWZIj!#Kl+tha-9C9G&fg4Zh)x|7d8|U3zEwsm_qeCQH1>7lJ)l9Pca4Bu+3=un z+qqVy&j?1l{ys-k((d#Ijq<9v7TGdg`7o2e{e_dPh+yg7Roz#%%*ccyU_xZxKdIw~ zMNQ{T>lgzAwZFa{>0VOv*YA)ge&pek!R~-$(P^*yH+r}ZK_Rs(pU2(35=1JEGb#S814& z4Ze%1LQaJE!&(sVn1Q#`?4b%uf)&%8i-6fO^+A2mSogSwjJpa%t6=bh198XNe?C&S zk}_V)jBImBctrQ#-&32mexEKOHodQ}Y=!4^1(!X~DP06Vl0fvZLr>}$h1;TkOTv+M yhEGTpQkbb2@U2(u(;-uOW}x%_^xp2`PEql8B$oDUEaoGxbLi0h+;yD4d;brGvNf~- literal 0 HcmV?d00001 diff --git a/tests/data/meta.yml b/tests/data/meta.yml new file mode 100644 index 0000000..cd78630 --- /dev/null +++ b/tests/data/meta.yml @@ -0,0 +1,13 @@ +Models: + - Name: test_model + Metadata: + FLOPs: 319000000 + Parameters: 3500000 + Results: + - Dataset: ImageNet-1k + Metrics: + Top 1 Accuracy: 71.86 + Top 5 Accuracy: 90.42 + Task: Image Classification + Weights: test_weight.pth + Config: test_config.py diff --git a/tests/data/retinanet.py b/tests/data/retinanet.py new file mode 100644 index 0000000..e7e6ea0 --- /dev/null +++ b/tests/data/retinanet.py @@ -0,0 +1,83 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# small RetinaNet +num_classes = 3 + +# model settings +model = dict( + type='RetinaNet', + backbone=dict( + type='ResNet', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + start_level=1, + add_extra_convs='on_input', + num_outs=5), + bbox_head=dict( + type='RetinaHead', + num_classes=num_classes, + in_channels=256, + stacked_convs=1, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + octave_base_scale=4, + scales_per_octave=3, + ratios=[0.5, 1.0, 2.0], + strides=[8, 16, 32, 64, 128]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[.0, .0, .0, .0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='L1Loss', loss_weight=1.0)), + # model training and testing settings + train_cfg=dict( + assigner=dict( + type='MaxIoUAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.4, + min_pos_iou=0, + ignore_iof_thr=-1), + allowed_border=-1, + pos_weight=-1, + debug=False), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100)) + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1333, 800), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict(test=dict(pipeline=test_pipeline)) diff --git a/tests/data/vis_data.json b/tests/data/vis_data.json new file mode 100644 index 0000000..d10acaf --- /dev/null +++ b/tests/data/vis_data.json @@ -0,0 +1,21 @@ +{"lr": 0.1, "data_time": 0.0061125516891479496, "loss": 2.6531384229660033, "time": 0.14429793357849122, "epoch": 1, "step": 10} +{"lr": 0.1, "data_time": 0.00030262470245361327, "loss": 2.9456406116485594, "time": 0.0219132661819458, "epoch": 1, "step": 20} +{"lr": 0.1, "data_time": 0.00022499561309814454, "loss": 3.1025198698043823, "time": 0.021793675422668458, "epoch": 1, "step": 30} +{"lr": 0.1, "data_time": 0.00023109912872314452, "loss": 2.5765398740768433, "time": 0.021819329261779784, "epoch": 1, "step": 40} +{"lr": 0.1, "data_time": 0.00023169517517089843, "loss": 2.671005058288574, "time": 0.02181088924407959, "epoch": 1, "step": 50} +{"lr": 0.1, "data_time": 0.00021798610687255858, "loss": 2.5273321866989136, "time": 0.021781444549560547, "epoch": 1, "step": 60} +{"accuracy/top1": 18.80000114440918, "step": 1} +{"lr": 0.1, "data_time": 0.0007575273513793946, "loss": 2.3254310727119445, "time": 0.02237672805786133, "epoch": 2, "step": 73} +{"lr": 0.1, "data_time": 0.0002459049224853516, "loss": 2.194095492362976, "time": 0.021792054176330566, "epoch": 2, "step": 83} +{"lr": 0.1, "data_time": 0.00027666091918945315, "loss": 2.207821953296661, "time": 0.021822547912597655, "epoch": 2, "step": 93} +{"lr": 0.1, "data_time": 0.00025298595428466795, "loss": 2.090667963027954, "time": 0.02178535461425781, "epoch": 2, "step": 103} +{"lr": 0.1, "data_time": 0.0002483367919921875, "loss": 2.18342148065567, "time": 0.021893739700317383, "epoch": 2, "step": 113} +{"lr": 0.1, "data_time": 0.00030078887939453123, "loss": 2.2274346113204957, "time": 0.022345948219299316, "epoch": 2, "step": 123} +{"accuracy/top1": 21.100000381469727, "step": 2} +{"lr": 0.1, "data_time": 0.0008128643035888672, "loss": 2.017984461784363, "time": 0.02267434597015381, "epoch": 3, "step": 136} +{"lr": 0.1, "data_time": 0.00023736953735351563, "loss": 2.0648953437805178, "time": 0.02174344062805176, "epoch": 3, "step": 146} +{"lr": 0.1, "data_time": 0.00024063587188720702, "loss": 2.0859395623207093, "time": 0.022107195854187012, "epoch": 3, "step": 156} +{"lr": 0.1, "data_time": 0.0002336740493774414, "loss": 2.1662048220634462, "time": 0.021825361251831054, "epoch": 3, "step": 166} +{"lr": 0.1, "data_time": 0.0002296924591064453, "loss": 2.1007142066955566, "time": 0.021821355819702147, "epoch": 3, "step": 176} +{"lr": 0.1, "data_time": 0.00023157596588134765, "loss": 2.0436240792274476, "time": 0.021722936630249025, "epoch": 3, "step": 186} +{"accuracy/top1": 25.600000381469727, "step": 3} diff --git a/tests/test_apis/test_inference.py b/tests/test_apis/test_inference.py new file mode 100644 index 0000000..72b20e5 --- /dev/null +++ b/tests/test_apis/test_inference.py @@ -0,0 +1,116 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from tempfile import TemporaryDirectory +from unittest import TestCase +from unittest.mock import ANY, MagicMock, patch + +from mmcv.image import imread + +from mmpretrain.apis import (ImageClassificationInferencer, ModelHub, + get_model, inference_model) +from mmpretrain.models import MobileNetV3 +from mmpretrain.structures import DataSample +from mmpretrain.visualization import UniversalVisualizer + +MODEL = 'mobilenet-v3-small-050_3rdparty_in1k' +WEIGHT = 'https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/mobilenet-v3-small-050_3rdparty_in1k_20221114-e0b86be1.pth' # noqa: E501 +CONFIG = ModelHub.get(MODEL).config + + +class TestImageClassificationInferencer(TestCase): + + def test_init(self): + # test input BaseModel + model = get_model(MODEL) + inferencer = ImageClassificationInferencer(model) + self.assertEqual(model._config, inferencer.config) + self.assertIsInstance(inferencer.model.backbone, MobileNetV3) + + # test input model name + with patch('mmengine.runner.load_checkpoint') as mock: + inferencer = ImageClassificationInferencer(MODEL) + self.assertIsInstance(inferencer.model.backbone, MobileNetV3) + mock.assert_called_once_with(ANY, WEIGHT, map_location='cpu') + + # test input config path + inferencer = ImageClassificationInferencer(CONFIG.filename) + self.assertIsInstance(inferencer.model.backbone, MobileNetV3) + + # test input config object + inferencer = ImageClassificationInferencer(CONFIG) + self.assertIsInstance(inferencer.model.backbone, MobileNetV3) + + # test specify weights + with patch('mmengine.runner.load_checkpoint') as mock: + ImageClassificationInferencer(MODEL, pretrained='custom.pth') + mock.assert_called_once_with(ANY, 'custom.pth', map_location='cpu') + + def test_call(self): + img_path = osp.join(osp.dirname(__file__), '../data/color.jpg') + img = imread(img_path) + + # test inference classification model + inferencer = ImageClassificationInferencer(MODEL) + results = inferencer(img_path)[0] + self.assertEqual( + results.keys(), + {'pred_score', 'pred_scores', 'pred_label', 'pred_class'}) + + # test return_datasample=True + results = inferencer(img, return_datasamples=True)[0] + self.assertIsInstance(results, DataSample) + + def test_visualize(self): + img_path = osp.join(osp.dirname(__file__), '../data/color.jpg') + img = imread(img_path) + + inferencer = ImageClassificationInferencer(MODEL) + self.assertIsNone(inferencer.visualizer) + + with TemporaryDirectory() as tmpdir: + inferencer(img, show_dir=tmpdir) + self.assertIsInstance(inferencer.visualizer, UniversalVisualizer) + self.assertTrue(osp.exists(osp.join(tmpdir, '0.png'))) + + inferencer.visualizer = MagicMock(wraps=inferencer.visualizer) + inferencer( + img_path, rescale_factor=2., draw_score=False, show_dir=tmpdir) + self.assertTrue(osp.exists(osp.join(tmpdir, 'color.png'))) + inferencer.visualizer.visualize_cls.assert_called_once_with( + ANY, + ANY, + classes=inferencer.classes, + resize=None, + show=False, + wait_time=0, + rescale_factor=2., + draw_gt=False, + draw_pred=True, + draw_score=False, + name='color', + out_file=osp.join(tmpdir, 'color.png')) + + +class TestInferenceAPIs(TestCase): + + def test_inference_model(self): + # test backward compatibility + img_path = osp.join(osp.dirname(__file__), '../data/color.jpg') + img = imread(img_path) + + model = get_model(MODEL, pretrained=True) + results = inference_model(model, img_path) + self.assertEqual( + results.keys(), + {'pred_score', 'pred_scores', 'pred_label', 'pred_class'}) + + results = inference_model(model, img) + self.assertEqual( + results.keys(), + {'pred_score', 'pred_scores', 'pred_label', 'pred_class'}) + + # test input model name + results = inference_model(MODEL, img) + self.assertEqual( + results.keys(), + {'pred_score', 'pred_scores', 'pred_label', 'pred_class'}) diff --git a/tests/test_apis/test_model.py b/tests/test_apis/test_model.py new file mode 100644 index 0000000..1295e03 --- /dev/null +++ b/tests/test_apis/test_model.py @@ -0,0 +1,88 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +from unittest import TestCase +from unittest.mock import patch + +from mmengine import Config + +from mmpretrain.apis import ModelHub, get_model, init_model, list_models +from mmpretrain.models import ImageClassifier, MobileNetV2 + + +class TestModelHub(TestCase): + + def test_mmpretrain_models(self): + self.assertIn('resnet18_8xb32_in1k', ModelHub._models_dict) + + def test_register_model_index(self): + model_index_path = osp.join(osp.dirname(__file__), '../data/meta.yml') + + ModelHub.register_model_index(model_index_path) + self.assertIn('test_model', ModelHub._models_dict) + self.assertEqual( + ModelHub._models_dict['test_model'].config, + osp.abspath( + osp.join(osp.dirname(model_index_path), 'test_config.py'))) + + with self.assertRaisesRegex(ValueError, 'meta.yml'): + # test name conflict + ModelHub.register_model_index(model_index_path) + + # test specify config prefix + del ModelHub._models_dict['test_model'] + ModelHub.register_model_index( + model_index_path, config_prefix='configs') + self.assertEqual(ModelHub._models_dict['test_model'].config, + osp.abspath(osp.join('configs', 'test_config.py'))) + + def test_get_model(self): + metainfo = ModelHub.get('resnet18_8xb32_in1k') + self.assertIsInstance(metainfo.weights, str) + self.assertIsInstance(metainfo.config, Config) + + +class TestHubAPIs(TestCase): + + def test_list_models(self): + models_names = list_models() + self.assertIsInstance(models_names, list) + + models_names = list_models(pattern='swin*in1k') + for model_name in models_names: + self.assertTrue( + model_name.startswith('swin') and 'in1k' in model_name) + + def test_get_model(self): + model = get_model('mobilenet-v2_8xb32_in1k') + self.assertIsInstance(model, ImageClassifier) + self.assertIsInstance(model.backbone, MobileNetV2) + + with patch('mmengine.runner.load_checkpoint') as mock: + model = get_model('mobilenet-v2_8xb32_in1k', pretrained=True) + model = get_model('mobilenet-v2_8xb32_in1k', pretrained='test.pth') + + weight = mock.call_args_list[0][0][1] + self.assertIn('https', weight) + weight = mock.call_args_list[1][0][1] + self.assertEqual('test.pth', weight) + + with self.assertRaisesRegex(ValueError, 'Failed to find'): + get_model('unknown-model') + + def test_init_model(self): + # test init from config object + cfg = ModelHub.get('mobilenet-v2_8xb32_in1k').config + model = init_model(cfg) + self.assertIsInstance(model, ImageClassifier) + self.assertIsInstance(model.backbone, MobileNetV2) + + # test init from config file + cfg = ModelHub._models_dict['mobilenet-v2_8xb32_in1k'].config + self.assertIsInstance(cfg, str) + model = init_model(cfg) + self.assertIsInstance(model, ImageClassifier) + self.assertIsInstance(model.backbone, MobileNetV2) + + # test modify configs of the model + model = init_model(cfg, head=dict(num_classes=10)) + self.assertEqual(model.head.num_classes, 10) diff --git a/tests/test_datasets/test_dataset_utils.py b/tests/test_datasets/test_dataset_utils.py new file mode 100644 index 0000000..6e92424 --- /dev/null +++ b/tests/test_datasets/test_dataset_utils.py @@ -0,0 +1,39 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import random +import string +from unittest.mock import patch + +import pytest + +from mmpretrain.datasets.utils import (check_integrity, + open_maybe_compressed_file, rm_suffix) + + +def test_dataset_utils(): + # test rm_suffix + assert rm_suffix('a.jpg') == 'a' + assert rm_suffix('a.bak.jpg') == 'a.bak' + assert rm_suffix('a.bak.jpg', suffix='.jpg') == 'a.bak' + assert rm_suffix('a.bak.jpg', suffix='.bak.jpg') == 'a' + + # test check_integrity + rand_file = ''.join(random.sample(string.ascii_letters, 10)) + assert not check_integrity(rand_file, md5=None) + assert not check_integrity(rand_file, md5=2333) + test_file = osp.join(osp.dirname(__file__), '../data/color.jpg') + assert check_integrity(test_file, md5='08252e5100cb321fe74e0e12a724ce14') + assert not check_integrity(test_file, md5=2333) + + +@pytest.mark.parametrize('method,path', [('gzip.open', 'abc.gz'), + ('lzma.open', 'abc.xz'), + ('builtins.open', 'abc.txt'), + (None, 1)]) +def test_open_maybe_compressed_file(method, path): + if method: + with patch(method) as mock: + open_maybe_compressed_file(path) + mock.assert_called() + else: + assert open_maybe_compressed_file(path) == path diff --git a/tests/test_datasets/test_datasets.py b/tests/test_datasets/test_datasets.py new file mode 100644 index 0000000..0a5c6e3 --- /dev/null +++ b/tests/test_datasets/test_datasets.py @@ -0,0 +1,2201 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import os.path as osp +import pickle +import sys +import tempfile +from unittest import TestCase +from unittest.mock import MagicMock, call, patch + +import mat4py +import numpy as np +from mmengine.logging import MMLogger + +from mmpretrain.registry import DATASETS, TRANSFORMS + +ASSETS_ROOT = osp.abspath(osp.join(osp.dirname(__file__), '../data/dataset')) + + +class TestBaseDataset(TestCase): + DATASET_TYPE = 'BaseDataset' + + DEFAULT_ARGS = dict(data_root=ASSETS_ROOT, ann_file='ann.json') + + def test_initialize(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test loading metainfo from ann_file + cfg = {**self.DEFAULT_ARGS, 'metainfo': None, 'classes': None} + dataset = dataset_class(**cfg) + self.assertEqual( + dataset.CLASSES, + dataset_class.METAINFO.get('classes', ('first', 'second'))) + self.assertFalse(dataset.test_mode) + + # Test overriding metainfo by `metainfo` argument + cfg = {**self.DEFAULT_ARGS, 'metainfo': {'classes': ('bus', 'car')}} + dataset = dataset_class(**cfg) + self.assertEqual(dataset.CLASSES, ('bus', 'car')) + + # Test overriding metainfo by `classes` argument + cfg = {**self.DEFAULT_ARGS, 'classes': ['bus', 'car']} + dataset = dataset_class(**cfg) + self.assertEqual(dataset.CLASSES, ('bus', 'car')) + + classes_file = osp.join(ASSETS_ROOT, 'classes.txt') + cfg = {**self.DEFAULT_ARGS, 'classes': classes_file} + dataset = dataset_class(**cfg) + self.assertEqual(dataset.CLASSES, ('bus', 'car')) + self.assertEqual(dataset.class_to_idx, {'bus': 0, 'car': 1}) + + # Test invalid classes + cfg = {**self.DEFAULT_ARGS, 'classes': dict(classes=1)} + with self.assertRaisesRegex(ValueError, "type "): + dataset_class(**cfg) + + def test_get_cat_ids(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + dataset = dataset_class(**self.DEFAULT_ARGS) + + cat_ids = dataset.get_cat_ids(0) + self.assertIsInstance(cat_ids, list) + self.assertEqual(len(cat_ids), 1) + self.assertIsInstance(cat_ids[0], int) + + def test_repr(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + cfg = {**self.DEFAULT_ARGS, 'lazy_init': True} + dataset = dataset_class(**cfg) + + head = 'Dataset ' + dataset.__class__.__name__ + self.assertIn(head, repr(dataset)) + + if dataset.CLASSES is not None: + num_classes = len(dataset.CLASSES) + self.assertIn(f'Number of categories: \t{num_classes}', + repr(dataset)) + + self.assertIn('Haven\'t been initialized', repr(dataset)) + dataset.full_init() + self.assertIn(f'Number of samples: \t{len(dataset)}', repr(dataset)) + + TRANSFORMS.register_module(name='test_mock', module=MagicMock) + cfg = {**self.DEFAULT_ARGS, 'pipeline': [dict(type='test_mock')]} + dataset = dataset_class(**cfg) + self.assertIn('With transforms', repr(dataset)) + del TRANSFORMS.module_dict['test_mock'] + + def test_extra_repr(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + cfg = {**self.DEFAULT_ARGS, 'lazy_init': True} + dataset = dataset_class(**cfg) + + self.assertIn(f'Annotation file: \t{dataset.ann_file}', repr(dataset)) + self.assertIn(f'Prefix of images: \t{dataset.img_prefix}', + repr(dataset)) + + +class TestCustomDataset(TestBaseDataset): + DATASET_TYPE = 'CustomDataset' + + DEFAULT_ARGS = dict(data_root=ASSETS_ROOT, ann_file='ann.txt') + + def test_initialize(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test overriding metainfo by `metainfo` argument + cfg = {**self.DEFAULT_ARGS, 'metainfo': {'classes': ('bus', 'car')}} + dataset = dataset_class(**cfg) + self.assertEqual(dataset.CLASSES, ('bus', 'car')) + + # Test overriding metainfo by `classes` argument + cfg = {**self.DEFAULT_ARGS, 'classes': ['bus', 'car']} + dataset = dataset_class(**cfg) + self.assertEqual(dataset.CLASSES, ('bus', 'car')) + + classes_file = osp.join(ASSETS_ROOT, 'classes.txt') + cfg = {**self.DEFAULT_ARGS, 'classes': classes_file} + dataset = dataset_class(**cfg) + self.assertEqual(dataset.CLASSES, ('bus', 'car')) + self.assertEqual(dataset.class_to_idx, {'bus': 0, 'car': 1}) + + # Test invalid classes + cfg = {**self.DEFAULT_ARGS, 'classes': dict(classes=1)} + with self.assertRaisesRegex(ValueError, "type "): + dataset_class(**cfg) + + def test_load_data_list(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # test load without ann_file + cfg = { + **self.DEFAULT_ARGS, + 'data_prefix': ASSETS_ROOT, + 'ann_file': '', + } + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 3) + self.assertEqual(dataset.CLASSES, ('a', 'b')) # auto infer classes + self.assertGreaterEqual( + dataset.get_data_info(0).items(), { + 'img_path': osp.join(ASSETS_ROOT, 'a', '1.JPG'), + 'gt_label': 0 + }.items()) + self.assertGreaterEqual( + dataset.get_data_info(2).items(), { + 'img_path': osp.join(ASSETS_ROOT, 'b', 'subb', '3.jpg'), + 'gt_label': 1 + }.items()) + + # test load without ann_file and without labels + # (no specific folder structures) + cfg = { + **self.DEFAULT_ARGS, + 'data_prefix': ASSETS_ROOT, + 'ann_file': '', + 'with_label': False, + } + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 4) + self.assertIsNone(dataset.CLASSES, None) + self.assertGreaterEqual( + dataset.get_data_info(0).items(), { + 'img_path': osp.join(ASSETS_ROOT, '3.jpeg'), + }.items()) + self.assertGreaterEqual( + dataset.get_data_info(1).items(), { + 'img_path': osp.join(ASSETS_ROOT, 'a', '1.JPG'), + }.items()) + self.assertGreaterEqual( + dataset.get_data_info(3).items(), { + 'img_path': osp.join(ASSETS_ROOT, 'b', 'subb', '3.jpg'), + }.items()) + + # test ann_file assertion + cfg = { + **self.DEFAULT_ARGS, + 'data_prefix': ASSETS_ROOT, + 'ann_file': ['ann_file.txt'], + } + with self.assertRaisesRegex(TypeError, 'expected str'): + dataset_class(**cfg) + + # test load with ann_file + cfg = { + **self.DEFAULT_ARGS, + 'data_root': ASSETS_ROOT, + 'ann_file': 'ann.txt', + } + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 3) + # custom dataset won't infer CLASSES from ann_file + self.assertIsNone(dataset.CLASSES, None) + self.assertGreaterEqual( + dataset.get_data_info(0).items(), { + 'img_path': osp.join(ASSETS_ROOT, 'a/1.JPG'), + 'gt_label': 0, + }.items()) + self.assertGreaterEqual( + dataset.get_data_info(2).items(), { + 'img_path': osp.join(ASSETS_ROOT, 'b/subb/3.jpg'), + 'gt_label': 1 + }.items()) + np.testing.assert_equal(dataset.get_gt_labels(), np.array([0, 1, 1])) + + # test load with absolute ann_file + cfg = { + **self.DEFAULT_ARGS, + 'data_root': '', + 'data_prefix': '', + 'ann_file': osp.join(ASSETS_ROOT, 'ann.txt'), + } + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 3) + # custom dataset won't infer CLASSES from ann_file + self.assertIsNone(dataset.CLASSES, None) + self.assertGreaterEqual( + dataset.get_data_info(0).items(), { + 'img_path': 'a/1.JPG', + 'gt_label': 0, + }.items()) + self.assertGreaterEqual( + dataset.get_data_info(2).items(), { + 'img_path': 'b/subb/3.jpg', + 'gt_label': 1 + }.items()) + + # test load with absolute ann_file and without label + cfg = { + **self.DEFAULT_ARGS, + 'data_root': '', + 'data_prefix': '', + 'ann_file': osp.join(ASSETS_ROOT, 'ann_without_labels.txt'), + 'with_label': False, + } + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 3) + # custom dataset won't infer CLASSES from ann_file + self.assertIsNone(dataset.CLASSES, None) + self.assertGreaterEqual( + dataset.get_data_info(0).items(), { + 'img_path': 'a/1.JPG', + }.items()) + self.assertGreaterEqual( + dataset.get_data_info(2).items(), { + 'img_path': 'b/subb/3.jpg', + }.items()) + + # test extensions filter + cfg = { + **self.DEFAULT_ARGS, 'data_prefix': dict(img_path=ASSETS_ROOT), + 'ann_file': '', + 'extensions': ('.txt', ) + } + with self.assertRaisesRegex(RuntimeError, + 'Supported extensions are: .txt'): + dataset_class(**cfg) + + cfg = { + **self.DEFAULT_ARGS, 'data_prefix': ASSETS_ROOT, + 'ann_file': '', + 'extensions': ('.jpeg', ) + } + logger = MMLogger.get_current_instance() + with self.assertLogs(logger, 'WARN') as log: + dataset = dataset_class(**cfg) + self.assertIn('Supported extensions are: .jpeg', log.output[0]) + self.assertEqual(len(dataset), 1) + self.assertGreaterEqual( + dataset.get_data_info(0).items(), { + 'img_path': osp.join(ASSETS_ROOT, 'b', '2.jpeg'), + 'gt_label': 1 + }.items()) + + # test classes check + cfg = { + **self.DEFAULT_ARGS, + 'data_prefix': ASSETS_ROOT, + 'classes': ('apple', 'banana'), + 'ann_file': '', + } + dataset = dataset_class(**cfg) + self.assertEqual(dataset.CLASSES, ('apple', 'banana')) + + cfg['classes'] = ['apple', 'banana', 'dog'] + with self.assertRaisesRegex(AssertionError, + r"\(2\) doesn't match .* classes \(3\)"): + dataset_class(**cfg) + + +class TestImageNet(TestCustomDataset): + DATASET_TYPE = 'ImageNet' + + @classmethod + def setUpClass(cls) -> None: + super().setUpClass() + + tmpdir = tempfile.TemporaryDirectory() + cls.tmpdir = tmpdir + cls.root = tmpdir.name + cls.meta_folder = 'meta' + cls.train_file = 'train.txt' + cls.val_file = 'val.txt' + cls.test_file = 'test.txt' + cls.categories = ['cat', 'dog'] + + os.mkdir(osp.join(cls.root, cls.meta_folder)) + + cls.DEFAULT_ARGS = dict(data_root=cls.root, split='train') + + with open(osp.join(cls.root, cls.meta_folder, cls.train_file), + 'w') as f: + f.write('\n'.join([ + '1.jpg 0', + '2.jpg 1', + '3.jpg 1', + ])) + + with open(osp.join(cls.root, cls.meta_folder, cls.val_file), 'w') as f: + f.write('\n'.join([ + '11.jpg 0', + '22.jpg 1', + ])) + + with open(osp.join(cls.root, cls.meta_folder, cls.test_file), + 'w') as f: + f.write('\n'.join([ + 'aa.jpg', + 'bb.jpg', + ])) + + def test_initialize(self): + super().test_initialize() + + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test invalid split + with self.assertRaisesRegex(AssertionError, 'The split must be'): + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = 'unknown' + dataset_class(**cfg) + + # Test valid splits + splits = ['train', 'val'] + for split in splits: + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = split + cfg['classes'] = self.categories + dataset = dataset_class(**cfg) + self.assertEqual(dataset.data_root, self.root) + + # Test split="test" + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = 'test' + logger = MMLogger.get_current_instance() + with self.assertLogs(logger, 'INFO') as log: + dataset = dataset_class(**cfg) + self.assertFalse(dataset.with_label) + self.assertIn('Since the ImageNet1k test set', log.output[0]) + + def test_load_data_list(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test default behavior + dataset = dataset_class(**self.DEFAULT_ARGS) + self.assertEqual(len(dataset), 3) + + data_info = dataset[0] + self.assertEqual(data_info['img_path'], + osp.join(self.root, 'train', '1.jpg')) + self.assertEqual(data_info['gt_label'], 0) + + # Test split="val" + cfg = {**self.DEFAULT_ARGS, 'split': 'val'} + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 2) + + data_info = dataset[0] + self.assertEqual(data_info['img_path'], + osp.join(self.root, 'val', '11.jpg')) + self.assertEqual(data_info['gt_label'], 0) + + # Test split="test" + cfg = {**self.DEFAULT_ARGS, 'split': 'test'} + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 2) + + data_info = dataset[0] + self.assertEqual(data_info['img_path'], + osp.join(self.root, 'test', 'aa.jpg')) + + # test override classes + cfg = { + **self.DEFAULT_ARGS, + 'classes': ['cat', 'dog'], + } + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 3) + self.assertEqual(dataset.CLASSES, ('cat', 'dog')) + + def test_extra_repr(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + cfg = {**self.DEFAULT_ARGS} + dataset = dataset_class(**cfg) + + self.assertIn(f'Root of dataset: \t{dataset.data_root}', repr(dataset)) + + +class TestImageNet21k(TestCustomDataset): + DATASET_TYPE = 'ImageNet21k' + + @classmethod + def setUpClass(cls) -> None: + super().setUpClass() + + tmpdir = tempfile.TemporaryDirectory() + cls.tmpdir = tmpdir + cls.root = tmpdir.name + cls.meta_folder = 'meta' + cls.train_file = 'train.txt' + + os.mkdir(osp.join(cls.root, cls.meta_folder)) + + with open(osp.join(cls.root, cls.meta_folder, cls.train_file), + 'w') as f: + f.write('\n'.join([ + 'cat/a.jpg 0', + 'cat/b.jpg 0', + 'dog/a.jpg 1', + 'dog/b.jpg 1', + ])) + + cls.DEFAULT_ARGS = dict( + data_root=cls.root, + classes=['cat', 'dog'], + ann_file='meta/train.txt') + + def test_initialize(self): + super().test_initialize() + + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test invalid split + with self.assertRaisesRegex(AssertionError, 'The split must be'): + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = 'unknown' + dataset_class(**cfg) + + # Test valid splits + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = 'train' + dataset = dataset_class(**cfg) + self.assertEqual(dataset.split, 'train') + self.assertEqual(dataset.data_root, self.root) + + def test_load_data_list(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # The multi_label option is not implemented not. + cfg = {**self.DEFAULT_ARGS, 'multi_label': True} + with self.assertRaisesRegex(NotImplementedError, 'not supported'): + dataset_class(**cfg) + + # Warn about ann_file + cfg = {**self.DEFAULT_ARGS, 'ann_file': '', 'lazy_init': True} + ann_path = osp.join(self.root, self.meta_folder, self.train_file) + os.rename(ann_path, ann_path + 'copy') + logger = MMLogger.get_current_instance() + with self.assertLogs(logger, 'INFO') as log: + dataset_class(**cfg) + self.assertIn('specify the `ann_file`', log.output[0]) + os.rename(ann_path + 'copy', ann_path) + + # Warn about classes + cfg = {**self.DEFAULT_ARGS, 'classes': None} + with self.assertLogs(logger, 'WARN') as log: + dataset_class(**cfg) + self.assertIn('specify the `classes`', log.output[0]) + + # Test split='train' + cfg = {**self.DEFAULT_ARGS, 'split': 'train', 'classes': None} + dataset = dataset_class(**self.DEFAULT_ARGS) + self.assertEqual(len(dataset), 4) + + +class TestPlaces205(TestCustomDataset): + DATASET_TYPE = 'Places205' + + DEFAULT_ARGS = dict(data_root=ASSETS_ROOT, ann_file='ann.txt') + + def test_load_data_list(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # test classes number + cfg = { + **self.DEFAULT_ARGS, + 'data_prefix': ASSETS_ROOT, + 'ann_file': '', + } + with self.assertRaisesRegex(AssertionError, + r"\(2\) doesn't match .* classes \(205\)"): + dataset_class(**cfg) + + # test override classes + cfg = { + **self.DEFAULT_ARGS, + 'data_prefix': ASSETS_ROOT, + 'classes': ['cat', 'dog'], + 'ann_file': '', + } + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 3) + self.assertEqual(dataset.CLASSES, ('cat', 'dog')) + + +class TestCIFAR10(TestBaseDataset): + DATASET_TYPE = 'CIFAR10' + + @classmethod + def setUpClass(cls) -> None: + super().setUpClass() + + tmpdir = tempfile.TemporaryDirectory() + cls.tmpdir = tmpdir + cls.root = tmpdir.name + cls.DEFAULT_ARGS = dict(data_root=cls.root, split='train') + + dataset_class = DATASETS.get(cls.DATASET_TYPE) + base_folder = osp.join(cls.root, dataset_class.base_folder) + os.mkdir(base_folder) + + cls.fake_imgs = np.random.randint( + 0, 255, size=(6, 3 * 32 * 32), dtype=np.uint8) + cls.fake_labels = np.random.randint(0, 10, size=(6, )) + cls.fake_classes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + + batch1 = dict( + data=cls.fake_imgs[:2], labels=cls.fake_labels[:2].tolist()) + with open(osp.join(base_folder, 'data_batch_1'), 'wb') as f: + f.write(pickle.dumps(batch1)) + + batch2 = dict( + data=cls.fake_imgs[2:4], labels=cls.fake_labels[2:4].tolist()) + with open(osp.join(base_folder, 'data_batch_2'), 'wb') as f: + f.write(pickle.dumps(batch2)) + + test_batch = dict( + data=cls.fake_imgs[4:], fine_labels=cls.fake_labels[4:].tolist()) + with open(osp.join(base_folder, 'test_batch'), 'wb') as f: + f.write(pickle.dumps(test_batch)) + + meta = {dataset_class.meta['key']: cls.fake_classes} + meta_filename = dataset_class.meta['filename'] + with open(osp.join(base_folder, meta_filename), 'wb') as f: + f.write(pickle.dumps(meta)) + + dataset_class.train_list = [['data_batch_1', None], + ['data_batch_2', None]] + dataset_class.test_list = [['test_batch', None]] + dataset_class.meta['md5'] = None + + def test_initialize(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test with invalid split + with self.assertRaisesRegex(AssertionError, 'The split must be'): + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = 'unknown' + dataset_class(**cfg) + + # Test with valid split + splits = ['train', 'test'] + test_modes = [False, True] + + for split in splits: + for test_mode in test_modes: + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = split + cfg['test_mode'] = test_mode + + if split == 'train' and test_mode: + logger = MMLogger.get_current_instance() + with self.assertLogs(logger, 'WARN') as log: + dataset = dataset_class(**cfg) + self.assertEqual(dataset.split, split) + self.assertEqual(dataset.test_mode, test_mode) + self.assertEqual(dataset.data_root, self.root) + self.assertIn('training set will be used', log.output[0]) + else: + dataset = dataset_class(**cfg) + self.assertEqual(dataset.split, split) + self.assertEqual(dataset.test_mode, test_mode) + self.assertEqual(dataset.data_root, self.root) + + # Test without dataset path + with self.assertRaisesRegex(RuntimeError, 'specify the dataset path'): + dataset = dataset_class() + + # Test overriding metainfo by `metainfo` argument + cfg = {**self.DEFAULT_ARGS, 'metainfo': {'classes': ('bus', 'car')}} + dataset = dataset_class(**cfg) + self.assertEqual(dataset.CLASSES, ('bus', 'car')) + + # Test overriding metainfo by `classes` argument + cfg = {**self.DEFAULT_ARGS, 'classes': ['bus', 'car']} + dataset = dataset_class(**cfg) + self.assertEqual(dataset.CLASSES, ('bus', 'car')) + + classes_file = osp.join(ASSETS_ROOT, 'classes.txt') + cfg = {**self.DEFAULT_ARGS, 'classes': classes_file} + dataset = dataset_class(**cfg) + self.assertEqual(dataset.CLASSES, ('bus', 'car')) + self.assertEqual(dataset.class_to_idx, {'bus': 0, 'car': 1}) + + # Test invalid classes + cfg = {**self.DEFAULT_ARGS, 'classes': dict(classes=1)} + with self.assertRaisesRegex(ValueError, "type "): + dataset_class(**cfg) + + def test_load_data_list(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test default behavior + dataset = dataset_class(**self.DEFAULT_ARGS) + self.assertEqual(len(dataset), 4) + self.assertEqual(dataset.CLASSES, dataset_class.METAINFO['classes']) + + data_info = dataset[0] + fake_img = self.fake_imgs[0].reshape(3, 32, 32).transpose(1, 2, 0) + np.testing.assert_equal(data_info['img'], fake_img) + np.testing.assert_equal(data_info['gt_label'], self.fake_labels[0]) + + # Test with split='test' + cfg = {**self.DEFAULT_ARGS, 'split': 'test'} + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 2) + + data_info = dataset[0] + fake_img = self.fake_imgs[4].reshape(3, 32, 32).transpose(1, 2, 0) + np.testing.assert_equal(data_info['img'], fake_img) + np.testing.assert_equal(data_info['gt_label'], self.fake_labels[4]) + + # Test load meta + cfg = {**self.DEFAULT_ARGS, 'lazy_init': True} + dataset = dataset_class(**cfg) + dataset._metainfo = {} + dataset.full_init() + self.assertEqual(dataset.CLASSES, self.fake_classes) + + cfg = {**self.DEFAULT_ARGS, 'lazy_init': True} + dataset = dataset_class(**cfg) + dataset._metainfo = {} + dataset.meta['filename'] = 'invalid' + with self.assertRaisesRegex(RuntimeError, 'not found or corrupted'): + dataset.full_init() + + # Test automatically download + with patch('mmpretrain.datasets.cifar.download_and_extract_archive' + ) as mock: + cfg = {**self.DEFAULT_ARGS, 'lazy_init': True, 'split': 'test'} + dataset = dataset_class(**cfg) + dataset.test_list = [['invalid_batch', None]] + with self.assertRaisesRegex(AssertionError, 'Download failed'): + dataset.full_init() + mock.assert_called_once_with( + dataset.url, + dataset.data_prefix['root'], + filename=dataset.filename, + md5=dataset.tgz_md5) + + with self.assertRaisesRegex(RuntimeError, '`download=True`'): + cfg = { + **self.DEFAULT_ARGS, 'lazy_init': True, + 'split': 'test', + 'download': False + } + dataset = dataset_class(**cfg) + dataset.test_list = [['test_batch', 'invalid_md5']] + dataset.full_init() + + # Test different backend + cfg = { + **self.DEFAULT_ARGS, 'lazy_init': True, + 'data_prefix': 'http://openmmlab/cifar' + } + dataset = dataset_class(**cfg) + dataset._check_integrity = MagicMock(return_value=False) + with self.assertRaisesRegex(RuntimeError, 'http://openmmlab/cifar'): + dataset.full_init() + + def test_extra_repr(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + cfg = {**self.DEFAULT_ARGS, 'lazy_init': True} + dataset = dataset_class(**cfg) + + self.assertIn(f"Prefix of data: \t{dataset.data_prefix['root']}", + repr(dataset)) + + @classmethod + def tearDownClass(cls): + cls.tmpdir.cleanup() + + +class TestCIFAR100(TestCIFAR10): + DATASET_TYPE = 'CIFAR100' + + +class TestMultiLabelDataset(TestBaseDataset): + DATASET_TYPE = 'MultiLabelDataset' + + DEFAULT_ARGS = dict(data_root=ASSETS_ROOT, ann_file='multi_label_ann.json') + + def test_get_cat_ids(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + cfg = {**self.DEFAULT_ARGS} + dataset = dataset_class(**cfg) + + cat_ids = dataset.get_cat_ids(0) + self.assertTrue(cat_ids, [0]) + + cat_ids = dataset.get_cat_ids(1) + self.assertTrue(cat_ids, [1]) + + cat_ids = dataset.get_cat_ids(1) + self.assertTrue(cat_ids, [0, 1]) + + +class TestVOC(TestBaseDataset): + DATASET_TYPE = 'VOC' + + @classmethod + def setUpClass(cls) -> None: + super().setUpClass() + + tmpdir = tempfile.TemporaryDirectory() + cls.tmpdir = tmpdir + data_root = tmpdir.name + + cls.DEFAULT_ARGS = dict(data_root=data_root, split='trainval') + + cls.image_folder = osp.join(data_root, 'JPEGImages') + cls.ann_folder = osp.join(data_root, 'Annotations') + cls.image_set_folder = osp.join(data_root, 'ImageSets', 'Main') + os.makedirs(cls.image_set_folder) + os.mkdir(cls.image_folder) + os.mkdir(cls.ann_folder) + + cls.fake_img_paths = [f'{i}' for i in range(6)] + cls.fake_labels = [[ + np.random.randint(10) for _ in range(np.random.randint(1, 4)) + ] for _ in range(6)] + cls.fake_classes = [f'C_{i}' for i in range(10)] + train_list = [i for i in range(0, 4)] + test_list = [i for i in range(4, 6)] + + with open(osp.join(cls.image_set_folder, 'trainval.txt'), 'w') as f: + for train_item in train_list: + f.write(str(train_item) + '\n') + with open(osp.join(cls.image_set_folder, 'test.txt'), 'w') as f: + for test_item in test_list: + f.write(str(test_item) + '\n') + with open(osp.join(cls.image_set_folder, 'full_path_test.txt'), + 'w') as f: + for test_item in test_list: + f.write(osp.join(cls.image_folder, str(test_item)) + '\n') + + for train_item in train_list: + with open(osp.join(cls.ann_folder, f'{train_item}.xml'), 'w') as f: + temple = 'C_{}{}' + ann_data = ''.join([ + temple.format(label, '0') + for label in cls.fake_labels[train_item] + ]) + # add difficult label + ann_data += ''.join([ + temple.format(label, '1') + for label in cls.fake_labels[train_item] + ]) + xml_ann_data = f'{ann_data}' + f.write(xml_ann_data + '\n') + + for test_item in test_list: + with open(osp.join(cls.ann_folder, f'{test_item}.xml'), 'w') as f: + temple = 'C_{}{}' + ann_data = ''.join([ + temple.format(label, '0') + for label in cls.fake_labels[test_item] + ]) + xml_ann_data = f'{ann_data}' + f.write(xml_ann_data + '\n') + + def test_initialize(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test overriding metainfo by `classes` argument + cfg = {**self.DEFAULT_ARGS, 'classes': ['bus', 'car']} + dataset = dataset_class(**cfg) + self.assertEqual(dataset.CLASSES, ('bus', 'car')) + + # Test overriding CLASSES by classes file + classes_file = osp.join(ASSETS_ROOT, 'classes.txt') + cfg = {**self.DEFAULT_ARGS, 'classes': classes_file} + dataset = dataset_class(**cfg) + self.assertEqual(dataset.CLASSES, ('bus', 'car')) + self.assertEqual(dataset.class_to_idx, {'bus': 0, 'car': 1}) + + # Test invalid classes + cfg = {**self.DEFAULT_ARGS, 'classes': dict(classes=1)} + with self.assertRaisesRegex(ValueError, "type "): + dataset_class(**cfg) + + # Test invalid split + with self.assertRaisesRegex(AssertionError, 'The split must be'): + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = 'unknown' + dataset_class(**cfg) + + # Test valid splits + splits = ['trainval', 'test'] + for split in splits: + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = split + dataset = dataset_class(**cfg) + self.assertEqual(dataset.split, split) + + # Test split='trainval' and test_mode = True + logger = MMLogger.get_current_instance() + with self.assertLogs(logger, 'WARN') as log: + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = 'trainval' + cfg['test_mode'] = True + dataset = dataset_class(**cfg) + self.assertEqual(dataset.split, 'trainval') + self.assertEqual(dataset.test_mode, True) + self.assertIn('The trainval set will be used', log.output[0]) + + def test_get_cat_ids(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + cfg = {'classes': self.fake_classes, **self.DEFAULT_ARGS} + dataset = dataset_class(**cfg) + + cat_ids = dataset.get_cat_ids(0) + self.assertIsInstance(cat_ids, list) + self.assertIsInstance(cat_ids[0], int) + + def test_load_data_list(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test default behavior + dataset = dataset_class(**self.DEFAULT_ARGS) + self.assertEqual(len(dataset), 4) + self.assertEqual(len(dataset.CLASSES), 20) + + cfg = { + 'classes': self.fake_classes, + 'lazy_init': True, + **self.DEFAULT_ARGS + } + dataset = dataset_class(**cfg) + + self.assertIn('Haven\'t been initialized', repr(dataset)) + dataset.full_init() + self.assertIn(f'Number of samples: \t{len(dataset)}', repr(dataset)) + + data_info = dataset[0] + fake_img_path = osp.join(self.image_folder, self.fake_img_paths[0]) + self.assertEqual(data_info['img_path'], f'{fake_img_path}.jpg') + self.assertEqual(set(data_info['gt_label']), set(self.fake_labels[0])) + + # Test with split='test' + cfg['split'] = 'test' + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 2) + + data_info = dataset[0] + fake_img_path = osp.join(self.image_folder, self.fake_img_paths[4]) + self.assertEqual(data_info['img_path'], f'{fake_img_path}.jpg') + self.assertEqual(set(data_info['gt_label']), set(self.fake_labels[4])) + + # Test with test_mode=True and ann_path = None + cfg['split'] = '' + cfg['image_set_path'] = 'ImageSets/Main/test.txt' + cfg['test_mode'] = True + cfg['data_prefix'] = 'JPEGImages' + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 2) + + data_info = dataset[0] + fake_img_path = osp.join(self.image_folder, self.fake_img_paths[4]) + self.assertEqual(data_info['img_path'], f'{fake_img_path}.jpg') + self.assertEqual(data_info['gt_label'], None) + + # Test different backend + cfg = { + **self.DEFAULT_ARGS, 'lazy_init': True, + 'data_root': 's3://openmmlab/voc' + } + petrel_mock = MagicMock() + sys.modules['petrel_client'] = petrel_mock + dataset = dataset_class(**cfg) + petrel_mock.client.Client.assert_called() + + def test_extra_repr(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + cfg = {**self.DEFAULT_ARGS} + dataset = dataset_class(**cfg) + + self.assertIn(f'Path of image set: \t{dataset.image_set_path}', + repr(dataset)) + self.assertIn(f'Prefix of dataset: \t{dataset.data_root}', + repr(dataset)) + self.assertIn(f'Prefix of annotations: \t{dataset.ann_prefix}', + repr(dataset)) + self.assertIn(f'Prefix of images: \t{dataset.img_prefix}', + repr(dataset)) + + @classmethod + def tearDownClass(cls): + cls.tmpdir.cleanup() + + +class TestMNIST(TestBaseDataset): + DATASET_TYPE = 'MNIST' + + @classmethod + def setUpClass(cls) -> None: + super().setUpClass() + + tmpdir = tempfile.TemporaryDirectory() + cls.tmpdir = tmpdir + cls.root = tmpdir.name + data_prefix = tmpdir.name + cls.DEFAULT_ARGS = dict(data_root=cls.root, split='train') + + dataset_class = DATASETS.get(cls.DATASET_TYPE) + + def rm_suffix(s): + return s[:s.rfind('.')] + + train_image_file = osp.join(data_prefix, + rm_suffix(dataset_class.train_list[0][0])) + train_label_file = osp.join(data_prefix, + rm_suffix(dataset_class.train_list[1][0])) + test_image_file = osp.join(data_prefix, + rm_suffix(dataset_class.test_list[0][0])) + test_label_file = osp.join(data_prefix, + rm_suffix(dataset_class.test_list[1][0])) + cls.fake_img = np.random.randint(0, 255, size=(28, 28), dtype=np.uint8) + cls.fake_label = np.random.randint(0, 10, size=(1, ), dtype=np.uint8) + + for file in [train_image_file, test_image_file]: + magic = b'\x00\x00\x08\x03' # num_dims = 3, type = uint8 + head = b'\x00\x00\x00\x01' + b'\x00\x00\x00\x1c' * 2 # (1, 28, 28) + data = magic + head + cls.fake_img.flatten().tobytes() + with open(file, 'wb') as f: + f.write(data) + + for file in [train_label_file, test_label_file]: + magic = b'\x00\x00\x08\x01' # num_dims = 3, type = uint8 + head = b'\x00\x00\x00\x01' # (1, ) + data = magic + head + cls.fake_label.tobytes() + with open(file, 'wb') as f: + f.write(data) + + def test_initialize(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test with invalid split + with self.assertRaisesRegex(AssertionError, 'The split must be'): + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = 'unknown' + dataset_class(**cfg) + + # Test with valid split + splits = ['train', 'test'] + test_modes = [False, True] + + for split in splits: + for test_mode in test_modes: + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = split + cfg['test_mode'] = test_mode + + if split == 'train' and test_mode: + logger = MMLogger.get_current_instance() + with self.assertLogs(logger, 'WARN') as log: + dataset = dataset_class(**cfg) + self.assertEqual(dataset.split, split) + self.assertEqual(dataset.test_mode, test_mode) + self.assertEqual(dataset.data_root, self.root) + self.assertIn('training set will be used', log.output[0]) + else: + dataset = dataset_class(**cfg) + self.assertEqual(dataset.split, split) + self.assertEqual(dataset.test_mode, test_mode) + self.assertEqual(dataset.data_root, self.root) + + def test_load_data_list(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test default behavior + dataset = dataset_class(**self.DEFAULT_ARGS) + self.assertEqual(len(dataset), 1) + self.assertEqual(dataset.CLASSES, dataset_class.METAINFO['classes']) + + data_info = dataset[0] + np.testing.assert_equal(data_info['img'], self.fake_img) + np.testing.assert_equal(data_info['gt_label'], self.fake_label) + + # Test with split='test' + cfg = {**self.DEFAULT_ARGS, 'split': 'test'} + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 1) + + data_info = dataset[0] + np.testing.assert_equal(data_info['img'], self.fake_img) + np.testing.assert_equal(data_info['gt_label'], self.fake_label) + + # Test automatically download + with patch('mmpretrain.datasets.mnist.download_and_extract_archive' + ) as mock: + cfg = {**self.DEFAULT_ARGS, 'lazy_init': True, 'split': 'test'} + dataset = dataset_class(**cfg) + dataset.train_list = [['invalid_train_file', None]] + dataset.test_list = [['invalid_test_file', None]] + with self.assertRaisesRegex(AssertionError, 'Download failed'): + dataset.full_init() + calls = [ + call( + osp.join(dataset.url_prefix, dataset.train_list[0][0]), + download_root=dataset.data_prefix['root'], + filename=dataset.train_list[0][0], + md5=None), + call( + osp.join(dataset.url_prefix, dataset.test_list[0][0]), + download_root=dataset.data_prefix['root'], + filename=dataset.test_list[0][0], + md5=None) + ] + mock.assert_has_calls(calls) + + with self.assertRaisesRegex(RuntimeError, '`download=True`'): + cfg = { + **self.DEFAULT_ARGS, 'lazy_init': True, + 'split': 'test', + 'download': False + } + dataset = dataset_class(**cfg) + dataset._check_exists = MagicMock(return_value=False) + dataset.full_init() + + # Test different backend + cfg = { + **self.DEFAULT_ARGS, 'lazy_init': True, + 'data_prefix': 'http://openmmlab/mnist' + } + dataset = dataset_class(**cfg) + dataset._check_exists = MagicMock(return_value=False) + with self.assertRaisesRegex(RuntimeError, 'http://openmmlab/mnist'): + dataset.full_init() + + def test_extra_repr(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + cfg = {**self.DEFAULT_ARGS, 'lazy_init': True} + dataset = dataset_class(**cfg) + + self.assertIn(f"Prefix of data: \t{dataset.data_prefix['root']}", + repr(dataset)) + + @classmethod + def tearDownClass(cls): + cls.tmpdir.cleanup() + + +class FashionMNIST(TestMNIST): + DATASET_TYPE = 'FashionMNIST' + + +class TestCUB(TestBaseDataset): + DATASET_TYPE = 'CUB' + + @classmethod + def setUpClass(cls) -> None: + super().setUpClass() + + tmpdir = tempfile.TemporaryDirectory() + cls.tmpdir = tmpdir + cls.root = tmpdir.name + cls.ann_file = 'images.txt' + cls.image_folder = 'images' + cls.image_class_labels_file = 'image_class_labels.txt' + cls.train_test_split_file = 'train_test_split.txt' + + cls.DEFAULT_ARGS = dict( + data_root=cls.root, split='train', test_mode=False) + + with open(osp.join(cls.root, cls.ann_file), 'w') as f: + f.write('\n'.join([ + '1 1.txt', + '2 2.txt', + '3 3.txt', + ])) + + with open(osp.join(cls.root, cls.image_class_labels_file), 'w') as f: + f.write('\n'.join([ + '1 2', + '2 3', + '3 1', + ])) + + with open(osp.join(cls.root, cls.train_test_split_file), 'w') as f: + f.write('\n'.join([ + '1 0', + '2 1', + '3 1', + ])) + + def test_initialize(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test with invalid split + with self.assertRaisesRegex(AssertionError, 'The split must be'): + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = 'unknown' + dataset_class(**cfg) + + # Test with valid split + splits = ['train', 'test'] + test_modes = [False, True] + + for split in splits: + for test_mode in test_modes: + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = split + cfg['test_mode'] = test_mode + + if split == 'train' and test_mode: + logger = MMLogger.get_current_instance() + with self.assertLogs(logger, 'WARN') as log: + dataset = dataset = dataset_class(**cfg) + self.assertEqual(dataset.split, split) + self.assertEqual(dataset.test_mode, test_mode) + self.assertEqual(dataset.data_root, self.root) + self.assertEqual(dataset.ann_file, + osp.join(self.root, self.ann_file)) + self.assertIn('training set will be used', log.output[0]) + else: + dataset = dataset_class(**cfg) + self.assertEqual(dataset.split, split) + self.assertEqual(dataset.test_mode, test_mode) + self.assertEqual(dataset.data_root, self.root) + self.assertEqual(dataset.ann_file, + osp.join(self.root, self.ann_file)) + + def test_load_data_list(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test default behavior + dataset = dataset_class(**self.DEFAULT_ARGS) + self.assertEqual(len(dataset), 2) + + data_info = dataset[0] + self.assertEqual(data_info['img_path'], + osp.join(self.root, self.image_folder, '2.txt')) + self.assertEqual(data_info['gt_label'], 3 - 1) + + # # Test with split='test' + cfg = {**self.DEFAULT_ARGS, 'split': 'test'} + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 1) + + data_info = dataset[0] + self.assertEqual(data_info['img_path'], + osp.join(self.root, self.image_folder, '1.txt')) + self.assertEqual(data_info['gt_label'], 2 - 1) + + def test_extra_repr(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + cfg = {**self.DEFAULT_ARGS} + dataset = dataset_class(**cfg) + + self.assertIn(f'Root of dataset: \t{dataset.data_root}', repr(dataset)) + + @classmethod + def tearDownClass(cls): + cls.tmpdir.cleanup() + + +class TestMultiTaskDataset(TestCase): + DATASET_TYPE = 'MultiTaskDataset' + + DEFAULT_ARGS = dict( + data_root=ASSETS_ROOT, + ann_file=osp.join(ASSETS_ROOT, 'multi-task.json'), + pipeline=[]) + + def test_metainfo(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test default behavior + dataset = dataset_class(**self.DEFAULT_ARGS) + metainfo = {'tasks': ['gender', 'wear']} + self.assertDictEqual(dataset.metainfo, metainfo) + self.assertFalse(dataset.test_mode) + + def test_parse_data_info(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + dataset = dataset_class(**self.DEFAULT_ARGS) + + data = dataset.parse_data_info({ + 'img_path': 'a.jpg', + 'gt_label': { + 'gender': 0 + } + }) + self.assertDictContainsSubset( + { + 'img_path': os.path.join(ASSETS_ROOT, 'a.jpg'), + 'gt_label': { + 'gender': 0 + } + }, data) + np.testing.assert_equal(data['gt_label']['gender'], 0) + + # Test missing path + with self.assertRaisesRegex(AssertionError, 'have `img_path` field'): + dataset.parse_data_info( + {'gt_label': { + 'gender': 0, + 'wear': [1, 0, 1, 0] + }}) + + def test_repr(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + dataset = dataset_class(**self.DEFAULT_ARGS) + + task_doc = ('For 2 tasks\n gender \n wear ') + self.assertIn(task_doc, repr(dataset)) + + def test_load_data_list(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test default behavior + dataset = dataset_class(**self.DEFAULT_ARGS) + data = dataset.load_data_list(self.DEFAULT_ARGS['ann_file']) + self.assertIsInstance(data, list) + np.testing.assert_equal(len(data), 3) + np.testing.assert_equal(data[0]['gt_label'], {'gender': 0}) + np.testing.assert_equal(data[1]['gt_label'], { + 'gender': 0, + 'wear': [1, 0, 1, 0] + }) + + +class TestInShop(TestBaseDataset): + DATASET_TYPE = 'InShop' + + @classmethod + def setUpClass(cls) -> None: + super().setUpClass() + + tmpdir = tempfile.TemporaryDirectory() + cls.tmpdir = tmpdir + cls.root = tmpdir.name + cls.list_eval_partition = 'Eval/list_eval_partition.txt' + cls.DEFAULT_ARGS = dict(data_root=cls.root, split='train') + cls.ann_file = osp.join(cls.root, cls.list_eval_partition) + os.makedirs(osp.join(cls.root, 'Eval')) + with open(cls.ann_file, 'w') as f: + f.write('\n'.join([ + '8', + 'image_name item_id evaluation_status', + f'{osp.join("img", "02_1_front.jpg")} id_00000002 train', + f'{osp.join("img", "02_2_side.jpg")} id_00000002 train', + f'{osp.join("img", "12_3_back.jpg")} id_00007982 gallery', + f'{osp.join("img", "12_7_addition.jpg")} id_00007982 gallery', + f'{osp.join("img", "13_1_front.jpg")} id_00007982 query', + f'{osp.join("img", "13_2_side.jpg")} id_00007983 gallery', + f'{osp.join("img", "13_3_back.jpg")} id_00007983 query ', + f'{osp.join("img", "13_7_additional.jpg")} id_00007983 query', + ])) + + def test_initialize(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test with mode=train + cfg = {**self.DEFAULT_ARGS} + dataset = dataset_class(**cfg) + self.assertEqual(dataset.split, 'train') + self.assertEqual(dataset.data_root, self.root) + self.assertEqual(dataset.ann_file, self.ann_file) + + # Test with mode=query + cfg = {**self.DEFAULT_ARGS, 'split': 'query'} + dataset = dataset_class(**cfg) + self.assertEqual(dataset.split, 'query') + self.assertEqual(dataset.data_root, self.root) + self.assertEqual(dataset.ann_file, self.ann_file) + + # Test with mode=gallery + cfg = {**self.DEFAULT_ARGS, 'split': 'gallery'} + dataset = dataset_class(**cfg) + self.assertEqual(dataset.split, 'gallery') + self.assertEqual(dataset.data_root, self.root) + self.assertEqual(dataset.ann_file, self.ann_file) + + # Test with mode=other + cfg = {**self.DEFAULT_ARGS, 'split': 'other'} + with self.assertRaisesRegex(AssertionError, "'split' of `InS"): + dataset_class(**cfg) + + def test_load_data_list(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test with mode=train + cfg = {**self.DEFAULT_ARGS} + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 2) + data_info = dataset[0] + self.assertEqual( + data_info['img_path'], + os.path.join(self.root, 'Img', 'img', '02_1_front.jpg')) + self.assertEqual(data_info['gt_label'], 0) + + # Test with mode=query + cfg = {**self.DEFAULT_ARGS, 'split': 'query'} + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 3) + data_info = dataset[0] + self.assertEqual( + data_info['img_path'], + os.path.join(self.root, 'Img', 'img', '13_1_front.jpg')) + self.assertEqual(data_info['gt_label'], [0, 1]) + + # Test with mode=gallery + cfg = {**self.DEFAULT_ARGS, 'split': 'gallery'} + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 3) + data_info = dataset[0] + self.assertEqual( + data_info['img_path'], + os.path.join(self.root, 'Img', 'img', '12_3_back.jpg')) + self.assertEqual(data_info['sample_idx'], 0) + + def test_extra_repr(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + cfg = {**self.DEFAULT_ARGS} + dataset = dataset_class(**cfg) + + self.assertIn(f'Root of dataset: \t{dataset.data_root}', repr(dataset)) + + @classmethod + def tearDownClass(cls): + cls.tmpdir.cleanup() + + +class TestFlowers102(TestBaseDataset): + DATASET_TYPE = 'Flowers102' + + @classmethod + def setUpClass(cls) -> None: + super().setUpClass() + + tmpdir = tempfile.TemporaryDirectory() + cls.tmpdir = tmpdir + cls.root = tmpdir.name + cls.DEFAULT_ARGS = dict(data_root=cls.root, split='train') + cls.ann_file = osp.join(cls.root, 'imagelabels.mat') + cls.train_test_split_file = osp.join(cls.root, 'setid.mat') + + mat4py.savemat(cls.ann_file, + {'labels': [1, 1, 2, 2, 2, 3, 3, 4, 4, 5]}) + mat4py.savemat(cls.train_test_split_file, { + 'trnid': [1, 3, 5], + 'valid': [7, 9], + 'tstid': [2, 4, 6, 8, 10], + }) + + def test_initialize(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test invalid split + with self.assertRaisesRegex(AssertionError, 'The split must be'): + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = 'unknown' + dataset_class(**cfg) + + # Test valid splits + splits = ['train', 'val', 'trainval', 'test'] + for split in splits: + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = split + dataset = dataset_class(**cfg) + self.assertEqual(dataset.split, split) + self.assertEqual(dataset.data_root, self.root) + self.assertEqual(dataset.ann_file, self.ann_file) + + def test_load_data_list(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test with split="train" + cfg = {**self.DEFAULT_ARGS} + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 3) + data_info = dataset[0] + self.assertEqual(data_info['img_path'], + os.path.join(self.root, 'jpg', 'image_00001.jpg')) + self.assertEqual(data_info['gt_label'], 0) + + # Test with split="val" + cfg = {**self.DEFAULT_ARGS, 'split': 'val'} + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 2) + data_info = dataset[0] + self.assertEqual(data_info['img_path'], + os.path.join(self.root, 'jpg', 'image_00007.jpg')) + self.assertEqual(data_info['gt_label'], 2) + + # Test with split="trainval" + cfg = {**self.DEFAULT_ARGS, 'split': 'trainval'} + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 5) + data_info = dataset[2] + self.assertEqual(data_info['img_path'], + os.path.join(self.root, 'jpg', 'image_00005.jpg')) + self.assertEqual(data_info['gt_label'], 1) + + # Test with split="test" + cfg = {**self.DEFAULT_ARGS, 'split': 'test'} + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 5) + data_info = dataset[2] + self.assertEqual(data_info['img_path'], + os.path.join(self.root, 'jpg', 'image_00006.jpg')) + self.assertEqual(data_info['gt_label'], 2) + + def test_extra_repr(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + cfg = {**self.DEFAULT_ARGS} + dataset = dataset_class(**cfg) + + self.assertIn(f'Root of dataset: \t{dataset.data_root}', repr(dataset)) + + @classmethod + def tearDownClass(cls): + cls.tmpdir.cleanup() + + +class TestOxfordIIITPet(TestBaseDataset): + DATASET_TYPE = 'OxfordIIITPet' + + @classmethod + def setUpClass(cls) -> None: + super().setUpClass() + + tmpdir = tempfile.TemporaryDirectory() + cls.tmpdir = tmpdir + cls.root = tmpdir.name + cls.trainval_file = 'trainval.txt' + cls.image_folder = 'images' + cls.meta_folder = 'annotations' + cls.test_file = 'test.txt' + + os.mkdir(osp.join(cls.root, cls.meta_folder)) + + cls.DEFAULT_ARGS = dict(data_root=cls.root, split='trainval') + + with open(osp.join(cls.root, cls.meta_folder, cls.trainval_file), + 'w') as f: + f.write('\n'.join([ + 'Abyssinian_100 1 1 1', + 'american_bulldog_100 2 2 1', + 'basset_hound_126 4 2 3', + ])) + + with open(osp.join(cls.root, cls.meta_folder, cls.test_file), + 'w') as f: + f.write('\n'.join([ + 'Abyssinian_204 1 1 1', + 'american_bulldog_208 2 2 1', + ])) + + def test_initialize(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test invalid split + with self.assertRaisesRegex(AssertionError, 'The split must be'): + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = 'unknown' + dataset_class(**cfg) + + # Test valid splits + splits = ['trainval', 'test'] + for split in splits: + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = split + dataset = dataset_class(**cfg) + self.assertEqual(dataset.split, split) + self.assertEqual(dataset.data_root, self.root) + + def test_load_data_list(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test default behavior + dataset = dataset_class(**self.DEFAULT_ARGS) + self.assertEqual(len(dataset), 3) + + data_info = dataset[0] + self.assertEqual( + data_info['img_path'], + osp.join(self.root, self.image_folder, 'Abyssinian_100.jpg')) + self.assertEqual(data_info['gt_label'], 1 - 1) + + # Test with split="test" + cfg = {**self.DEFAULT_ARGS, 'split': 'test'} + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 2) + + data_info = dataset[0] + self.assertEqual( + data_info['img_path'], + osp.join(self.root, self.image_folder, 'Abyssinian_204.jpg')) + self.assertEqual(data_info['gt_label'], 1 - 1) + + def test_extra_repr(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + cfg = {**self.DEFAULT_ARGS} + dataset = dataset_class(**cfg) + + self.assertIn(f'Root of dataset: \t{dataset.data_root}', repr(dataset)) + + @classmethod + def tearDownClass(cls): + cls.tmpdir.cleanup() + + +class TestDTD(TestBaseDataset): + DATASET_TYPE = 'DTD' + + @classmethod + def setUpClass(cls) -> None: + super().setUpClass() + + tmpdir = tempfile.TemporaryDirectory() + cls.tmpdir = tmpdir + cls.root = tmpdir.name + cls.DEFAULT_ARGS = dict(data_root=cls.root, split='train') + + cls.meta_folder = 'imdb' + + os.makedirs(osp.join(cls.root, cls.meta_folder)) + + cls.ann_file = osp.join(cls.root, cls.meta_folder, 'imdb.mat') + + mat4py.savemat( + cls.ann_file, { + 'images': { + 'name': [ + '1.jpg', '2.jpg', '3.jpg', '4.jpg', '5.jpg', '6.jpg', + '7.jpg', '8.jpg', '9.jpg', '10.jpg' + ], + 'class': [1, 1, 2, 2, 2, 3, 3, 4, 4, 5], + 'set': [1, 2, 3, 1, 2, 3, 1, 2, 3, 1] + } + }) + + def test_initialize(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test invalid split + with self.assertRaisesRegex(AssertionError, 'The split must be'): + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = 'unknown' + dataset_class(**cfg) + + # Test valid splits + splits = ['train', 'val', 'trainval', 'test'] + for split in splits: + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = split + dataset = dataset_class(**cfg) + self.assertEqual(dataset.split, split) + self.assertEqual(dataset.data_root, self.root) + self.assertEqual(dataset.ann_file, self.ann_file) + + def test_load_data_list(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test with split="train" + cfg = {**self.DEFAULT_ARGS} + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 4) + data_info = dataset[0] + self.assertEqual(data_info['img_path'], + os.path.join(self.root, 'images', '1.jpg')) + self.assertEqual(data_info['gt_label'], 0) + + # Test with split="val" + cfg = {**self.DEFAULT_ARGS, 'split': 'val'} + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 3) + data_info = dataset[0] + self.assertEqual(data_info['img_path'], + os.path.join(self.root, 'images', '2.jpg')) + self.assertEqual(data_info['gt_label'], 0) + + # Test with split="trainval" + cfg = {**self.DEFAULT_ARGS, 'split': 'trainval'} + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 7) + data_info = dataset[2] + self.assertEqual(data_info['img_path'], + os.path.join(self.root, 'images', '4.jpg')) + self.assertEqual(data_info['gt_label'], 1) + + # Test with split="test" + cfg = {**self.DEFAULT_ARGS, 'split': 'test'} + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 3) + data_info = dataset[0] + self.assertEqual(data_info['img_path'], + os.path.join(self.root, 'images', '3.jpg')) + self.assertEqual(data_info['gt_label'], 1) + + def test_extra_repr(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + cfg = {**self.DEFAULT_ARGS} + dataset = dataset_class(**cfg) + + self.assertIn(f'Root of dataset: \t{dataset.data_root}', repr(dataset)) + + @classmethod + def tearDownClass(cls): + cls.tmpdir.cleanup() + + +class TestFGVCAircraft(TestBaseDataset): + DATASET_TYPE = 'FGVCAircraft' + + @classmethod + def setUpClass(cls) -> None: + super().setUpClass() + + tmpdir = tempfile.TemporaryDirectory() + cls.tmpdir = tmpdir + cls.root = tmpdir.name + + os.makedirs(osp.join(cls.root, 'data')) + + cls.train_file = osp.join('data', 'images_variant_train.txt') + cls.val_file = osp.join('data', 'images_variant_val.txt') + cls.trainval_file = osp.join('data', 'images_variant_trainval.txt') + cls.test_file = osp.join('data', 'images_variant_test.txt') + cls.image_folder = osp.join('data', 'images') + + cls.DEFAULT_ARGS = dict(data_root=cls.root, split='trainval') + + with open(osp.join(cls.root, cls.train_file), 'w') as f: + f.write('\n'.join([ + '1025794 707-320', + '1019011 727-200', + ])) + + with open(osp.join(cls.root, cls.val_file), 'w') as f: + f.write('\n'.join([ + '0209554 737-200', + ])) + + with open(osp.join(cls.root, cls.trainval_file), 'w') as f: + f.write('\n'.join([ + '1025794 707-320', + '1019011 727-200', + '0209554 737-200', + ])) + + with open(osp.join(cls.root, cls.test_file), 'w') as f: + f.write('\n'.join([ + '1514522 707-320', + '0116175 727-200', + '0713752 737-200', + '2126017 737-300', + ])) + + def test_initialize(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test invalid split + with self.assertRaisesRegex(AssertionError, 'The split must be'): + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = 'unknown' + dataset_class(**cfg) + + # Test valid splits + splits = ['train', 'val', 'trainval', 'test'] + ann_files = [ + self.train_file, self.val_file, self.trainval_file, self.test_file + ] + for i, split in enumerate(splits): + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = split + dataset = dataset_class(**cfg) + self.assertEqual(dataset.split, split) + self.assertEqual(dataset.data_root, self.root) + self.assertEqual(dataset.ann_file, + osp.join(self.root, ann_files[i])) + + def test_load_data_list(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test default behavior (split="trainval") + dataset = dataset_class(**self.DEFAULT_ARGS) + self.assertEqual(len(dataset), 3) + + data_info = dataset[0] + self.assertEqual(data_info['img_path'], + osp.join(self.root, self.image_folder, '1025794.jpg')) + self.assertEqual(data_info['gt_label'], 0) + + # # Test with split="train" + cfg = {**self.DEFAULT_ARGS, 'split': 'train'} + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 2) + + data_info = dataset[0] + self.assertEqual(data_info['img_path'], + osp.join(self.root, self.image_folder, '1025794.jpg')) + self.assertEqual(data_info['gt_label'], 0) + + # Test with split="val" + cfg = {**self.DEFAULT_ARGS, 'split': 'val'} + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 1) + + data_info = dataset[0] + self.assertEqual(data_info['img_path'], + osp.join(self.root, self.image_folder, '0209554.jpg')) + self.assertEqual(data_info['gt_label'], 2) + + # Test with split="test" + cfg = {**self.DEFAULT_ARGS, 'split': 'test'} + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 4) + + data_info = dataset[0] + self.assertEqual(data_info['img_path'], + osp.join(self.root, self.image_folder, '1514522.jpg')) + self.assertEqual(data_info['gt_label'], 0) + + def test_extra_repr(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + cfg = {**self.DEFAULT_ARGS} + dataset = dataset_class(**cfg) + + self.assertIn(f'Root of dataset: \t{dataset.data_root}', repr(dataset)) + + @classmethod + def tearDownClass(cls): + cls.tmpdir.cleanup() + + +class TestStanfordCars(TestBaseDataset): + DATASET_TYPE = 'StanfordCars' + + @classmethod + def setUpClass(cls) -> None: + super().setUpClass() + + tmpdir = tempfile.TemporaryDirectory() + cls.tmpdir = tmpdir + cls.root = tmpdir.name + cls.ann_file = osp.join(cls.root, 'cars_annos.mat') + cls.meta_folder = 'devkit' + cls.train_ann_file = osp.join(cls.root, cls.meta_folder, + 'cars_train_annos.mat') + cls.test_ann_file = osp.join(cls.root, cls.meta_folder, + 'cars_test_annos_withlabels.mat') + cls.train_folder = 'cars_train' + cls.test_folder = 'cars_test' + + os.makedirs(osp.join(cls.root, cls.meta_folder)) + + cls.DEFAULT_ARGS = dict(data_root=cls.root, split='train') + + mat4py.savemat( + cls.ann_file, { + 'annotations': { + 'relative_im_path': + ['car_ims/001.jpg', 'car_ims/002.jpg', 'car_ims/003.jpg'], + 'class': [1, 2, 3], + 'test': [0, 0, 1] + } + }) + + mat4py.savemat( + cls.train_ann_file, { + 'annotations': { + 'fname': ['001.jpg', '002.jpg', '012.jpg'], + 'class': [10, 15, 150], + } + }) + + mat4py.savemat( + cls.test_ann_file, { + 'annotations': { + 'fname': ['025.jpg', '111.jpg', '222.jpg'], + 'class': [150, 1, 15], + } + }) + + def test_initialize(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test first way + # Test invalid split + with self.assertRaisesRegex(AssertionError, 'The split must be'): + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = 'unknown' + dataset_class(**cfg) + + # Test valid splits + splits = ['train', 'test'] + for split in splits: + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = split + dataset = dataset_class(**cfg) + self.assertEqual(dataset.split, split) + self.assertEqual(dataset.data_root, self.root) + self.assertEqual(dataset.ann_file, self.ann_file) + + # Test second way + os.rename(self.ann_file, self.ann_file + 'copy') + # Test invalid split + with self.assertRaisesRegex(AssertionError, 'The split must be'): + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = 'unknown' + dataset_class(**cfg) + + # Test valid splits + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = 'train' + dataset = dataset_class(**cfg) + self.assertEqual(dataset.split, 'train') + self.assertEqual(dataset.data_root, self.root) + self.assertEqual(dataset.ann_file, + osp.join(self.meta_folder, self.train_ann_file)) + + # Test valid splits + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = 'test' + dataset = dataset_class(**cfg) + self.assertEqual(dataset.split, 'test') + self.assertEqual(dataset.data_root, self.root) + self.assertEqual(dataset.ann_file, + osp.join(self.meta_folder, self.test_ann_file)) + + # wrong dataset organization + os.rename(self.train_ann_file, self.train_ann_file + 'copy') + os.rename(self.test_ann_file, self.test_ann_file + 'copy') + + with self.assertRaisesRegex(RuntimeError, + 'The dataset is incorrectly organized'): + cfg = {**self.DEFAULT_ARGS} + dataset_class(**cfg) + + with self.assertRaisesRegex(RuntimeError, + 'The dataset is incorrectly organized'): + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = 'test' + dataset_class(**cfg) + + os.rename(self.train_ann_file + 'copy', self.train_ann_file) + os.rename(self.test_ann_file + 'copy', self.test_ann_file) + + os.rename(self.ann_file + 'copy', self.ann_file) + + def test_load_data_list(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test first way + # Test default behavior + assert osp.exists(osp.join(self.root, 'cars_annos.mat')), osp.join( + self.root, 'cars_annos.mat') + dataset = dataset_class(**self.DEFAULT_ARGS) + self.assertEqual(len(dataset), 2) + + data_info = dataset[0] + self.assertEqual(data_info['img_path'], + osp.join(self.root, 'car_ims/001.jpg')) + self.assertEqual(data_info['gt_label'], 0) + + # Test with split="test" + cfg = {**self.DEFAULT_ARGS, 'split': 'test'} + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 1) + + data_info = dataset[0] + self.assertEqual(data_info['img_path'], + osp.join(self.root, 'car_ims/003.jpg')) + self.assertEqual(data_info['gt_label'], 2) + + # Test second way + os.rename(self.ann_file, self.ann_file + 'copy') + # Test with split="train" + cfg = {**self.DEFAULT_ARGS} + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 3) + + data_info = dataset[0] + self.assertEqual(data_info['img_path'], + osp.join(self.root, self.train_folder, '001.jpg')) + self.assertEqual(data_info['gt_label'], 9) + + # Test with split="test" + cfg = {**self.DEFAULT_ARGS, 'split': 'test'} + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 3) + + data_info = dataset[0] + self.assertEqual(data_info['img_path'], + osp.join(self.root, self.test_folder, '025.jpg')) + self.assertEqual(data_info['gt_label'], 149) + + os.rename(self.ann_file + 'copy', self.ann_file) + + def test_extra_repr(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + cfg = {**self.DEFAULT_ARGS} + dataset = dataset_class(**cfg) + + self.assertIn(f'Root of dataset: \t{dataset.data_root}', repr(dataset)) + + @classmethod + def tearDownClass(cls): + cls.tmpdir.cleanup() + + +class TestCaltech101(TestBaseDataset): + DATASET_TYPE = 'Caltech101' + + @classmethod + def setUpClass(cls) -> None: + super().setUpClass() + + tmpdir = tempfile.TemporaryDirectory() + cls.tmpdir = tmpdir + cls.root = tmpdir.name + cls.image_folder = '101_ObjectCategories' + cls.meta_folder = 'meta' + cls.train_file = 'train.txt' + cls.test_file = 'test.txt' + + cls.DEFAULT_ARGS = dict(data_root=cls.root, split='train') + + os.makedirs(osp.join(cls.root, cls.meta_folder)) + + with open(osp.join(cls.root, cls.meta_folder, cls.train_file), + 'w') as f: + f.write('\n'.join([ + '1.jpg 0', + '2.jpg 1', + '3.jpg 2', + ])) + + with open(osp.join(cls.root, cls.meta_folder, cls.test_file), + 'w') as f: + f.write('\n'.join([ + '100.jpg 99', + '101.jpg 100', + '102.jpg 101', + '103.jpg 101', + ])) + + def test_initialize(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test invalid split + with self.assertRaisesRegex(AssertionError, 'The split must be'): + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = 'unknown' + dataset_class(**cfg) + + # Test valid splits + splits = ['train', 'test'] + for split in splits: + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = split + dataset = dataset_class(**cfg) + self.assertEqual(dataset.split, split) + self.assertEqual(dataset.data_root, self.root) + + def test_load_data_list(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test default behavior + dataset = dataset_class(**self.DEFAULT_ARGS) + self.assertEqual(len(dataset), 3) + + data_info = dataset[0] + self.assertEqual(data_info['img_path'], + osp.join(self.root, self.image_folder, '1.jpg')) + self.assertEqual(data_info['gt_label'], 0) + + # Test with split="test" + cfg = {**self.DEFAULT_ARGS, 'split': 'test'} + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 4) + + data_info = dataset[0] + self.assertEqual(data_info['img_path'], + osp.join(self.root, self.image_folder, '100.jpg')) + self.assertEqual(data_info['gt_label'], 99) + + def test_extra_repr(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + cfg = {**self.DEFAULT_ARGS} + dataset = dataset_class(**cfg) + + self.assertIn(f'Root of dataset: \t{dataset.data_root}', repr(dataset)) + + @classmethod + def tearDownClass(cls): + cls.tmpdir.cleanup() + + +class TestFood101(TestBaseDataset): + DATASET_TYPE = 'Food101' + + @classmethod + def setUpClass(cls) -> None: + super().setUpClass() + + tmpdir = tempfile.TemporaryDirectory() + cls.tmpdir = tmpdir + cls.root = tmpdir.name + cls.image_folder = 'images' + cls.meta_folder = 'meta' + cls.train_file = 'train.txt' + cls.test_file = 'test.txt' + + os.makedirs(osp.join(cls.root, cls.meta_folder)) + + cls.DEFAULT_ARGS = dict(data_root=cls.root, split='train') + + with open(osp.join(cls.root, cls.meta_folder, cls.train_file), + 'w') as f: + f.write('\n'.join([ + 'apple_pie/0001', + 'baby_back_ribs/0002', + 'baklava/0003', + ])) + + with open(osp.join(cls.root, cls.meta_folder, cls.test_file), + 'w') as f: + f.write('\n'.join([ + 'beef_carpaccio/0004', + 'beef_tartare/0005', + 'beet_salad/0006', + ])) + + def test_initialize(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test invalid split + with self.assertRaisesRegex(AssertionError, 'The split must be'): + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = 'unknown' + dataset_class(**cfg) + + # Test valid splits + splits = ['train', 'test'] + for split in splits: + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = split + dataset = dataset_class(**cfg) + self.assertEqual(dataset.split, split) + self.assertEqual(dataset.data_root, self.root) + + def test_load_data_list(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test default behavior + dataset = dataset_class(**self.DEFAULT_ARGS) + self.assertEqual(len(dataset), 3) + + data_info = dataset[0] + self.assertEqual( + data_info['img_path'], + osp.join(self.root, self.image_folder, 'apple_pie', '0001.jpg')) + self.assertEqual(data_info['gt_label'], 0) + + # Test with split="test" + cfg = {**self.DEFAULT_ARGS, 'split': 'test'} + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 3) + + data_info = dataset[0] + self.assertEqual( + data_info['img_path'], + osp.join(self.root, self.image_folder, 'beef_carpaccio', + '0004.jpg')) + self.assertEqual(data_info['gt_label'], 3) + + def test_extra_repr(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + cfg = {**self.DEFAULT_ARGS} + dataset = dataset_class(**cfg) + + self.assertIn(f'Root of dataset: \t{dataset.data_root}', repr(dataset)) + + @classmethod + def tearDownClass(cls): + cls.tmpdir.cleanup() + + +class TestSUN397(TestBaseDataset): + DATASET_TYPE = 'SUN397' + + @classmethod + def setUpClass(cls) -> None: + super().setUpClass() + + tmpdir = tempfile.TemporaryDirectory() + cls.tmpdir = tmpdir + cls.root = tmpdir.name + cls.train_file = 'Training_01.txt' + cls.test_file = 'Testing_01.txt' + cls.data_prefix = 'SUN397' + cls.meta_folder = 'Partitions' + + os.makedirs(osp.join(cls.root, cls.meta_folder)) + + cls.DEFAULT_ARGS = dict(data_root=cls.root, split='train') + + with open(osp.join(cls.root, cls.meta_folder, cls.train_file), + 'w') as f: + f.write('\n'.join([ + '/a/abbey/sun_aqswjsnjlrfzzhiz.jpg', + '/a/airplane_cabin/sun_blczihbhbntqccux.jpg', + '/a/assembly_line/sun_ajckcfldgdrdjogj.jpg', + ])) + + with open(osp.join(cls.root, cls.meta_folder, cls.test_file), + 'w') as f: + f.write('\n'.join([ + '/a/abbey/sun_ajkqrqitspwywirx.jpg', + '/a/airplane_cabin/sun_aqylhacwdsqfjuuu.jpg', + '/a/auto_factory/sun_apfsprenzdnzbhmt.jpg', + '/b/baggage_claim/sun_avittiqqaiibgcau.jpg', + ])) + + def test_initialize(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test invalid split + with self.assertRaisesRegex(AssertionError, 'The split must be'): + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = 'unknown' + dataset_class(**cfg) + + # Test valid splits + splits = ['train', 'test'] + for split in splits: + cfg = {**self.DEFAULT_ARGS} + cfg['split'] = split + dataset = dataset_class(**cfg) + self.assertEqual(dataset.split, split) + self.assertEqual(dataset.data_root, self.root) + + def test_load_data_list(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + + # Test default behavior + dataset = dataset_class(**self.DEFAULT_ARGS) + self.assertEqual(len(dataset), 3) + data_info = dataset[0] + self.assertEqual( + data_info['img_path'], + osp.join(self.root, self.data_prefix, + 'a/abbey/sun_aqswjsnjlrfzzhiz.jpg')) + self.assertEqual(data_info['gt_label'], 0) + + # Test with split='test' + cfg = {**self.DEFAULT_ARGS, 'split': 'test'} + dataset = dataset_class(**cfg) + self.assertEqual(len(dataset), 4) + data_info = dataset[-1] + self.assertEqual( + data_info['img_path'], + osp.join(self.root, self.data_prefix, + 'b/baggage_claim/sun_avittiqqaiibgcau.jpg')) + self.assertEqual(data_info['gt_label'], 26) + + def test_extra_repr(self): + dataset_class = DATASETS.get(self.DATASET_TYPE) + cfg = {**self.DEFAULT_ARGS} + dataset = dataset_class(**cfg) + + self.assertIn(f'Root of dataset: \t{dataset.data_root}', repr(dataset)) + + @classmethod + def tearDownClass(cls): + cls.tmpdir.cleanup() diff --git a/tests/test_datasets/test_samplers/test_repeat_aug.py b/tests/test_datasets/test_samplers/test_repeat_aug.py new file mode 100644 index 0000000..01926e9 --- /dev/null +++ b/tests/test_datasets/test_samplers/test_repeat_aug.py @@ -0,0 +1,98 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +import math +from unittest import TestCase +from unittest.mock import patch + +import torch +from mmengine.logging import MMLogger + +from mmpretrain.datasets import RepeatAugSampler + +file = 'mmpretrain.datasets.samplers.repeat_aug.' + + +class MockDist: + + def __init__(self, dist_info=(0, 1), seed=7): + self.dist_info = dist_info + self.seed = seed + + def get_dist_info(self): + return self.dist_info + + def sync_random_seed(self): + return self.seed + + def is_main_process(self): + return self.dist_info[0] == 0 + + +class TestRepeatAugSampler(TestCase): + + def setUp(self): + self.data_length = 100 + self.dataset = list(range(self.data_length)) + + @patch(file + 'get_dist_info', return_value=(0, 1)) + def test_non_dist(self, mock): + sampler = RepeatAugSampler(self.dataset, num_repeats=3, shuffle=False) + self.assertEqual(sampler.world_size, 1) + self.assertEqual(sampler.rank, 0) + self.assertEqual(sampler.total_size, self.data_length * 3) + self.assertEqual(sampler.num_samples, self.data_length * 3) + self.assertEqual(sampler.num_selected_samples, self.data_length) + self.assertEqual(len(sampler), sampler.num_selected_samples) + indices = [x for x in range(self.data_length) for _ in range(3)] + self.assertEqual(list(sampler), indices[:self.data_length]) + + logger = MMLogger.get_current_instance() + with self.assertLogs(logger, 'WARN') as log: + sampler = RepeatAugSampler(self.dataset, shuffle=False) + self.assertIn('always picks a fixed part', log.output[0]) + + @patch(file + 'get_dist_info', return_value=(2, 3)) + @patch(file + 'is_main_process', return_value=False) + def test_dist(self, mock1, mock2): + sampler = RepeatAugSampler(self.dataset, num_repeats=3, shuffle=False) + self.assertEqual(sampler.world_size, 3) + self.assertEqual(sampler.rank, 2) + self.assertEqual(sampler.num_samples, self.data_length) + self.assertEqual(sampler.total_size, self.data_length * 3) + self.assertEqual(sampler.num_selected_samples, + math.ceil(self.data_length / 3)) + self.assertEqual(len(sampler), sampler.num_selected_samples) + indices = [x for x in range(self.data_length) for _ in range(3)] + self.assertEqual( + list(sampler), indices[2::3][:sampler.num_selected_samples]) + + logger = MMLogger.get_current_instance() + with patch.object(logger, 'warning') as mock_log: + sampler = RepeatAugSampler(self.dataset, shuffle=False) + mock_log.assert_not_called() + + @patch(file + 'get_dist_info', return_value=(0, 1)) + @patch(file + 'sync_random_seed', return_value=7) + def test_shuffle(self, mock1, mock2): + # test seed=None + sampler = RepeatAugSampler(self.dataset, seed=None) + self.assertEqual(sampler.seed, 7) + + # test random seed + sampler = RepeatAugSampler(self.dataset, shuffle=True, seed=0) + sampler.set_epoch(10) + g = torch.Generator() + g.manual_seed(10) + indices = torch.randperm(len(self.dataset), generator=g).tolist() + indices = [x for x in indices + for _ in range(3)][:sampler.num_selected_samples] + self.assertEqual(list(sampler), indices) + + sampler = RepeatAugSampler(self.dataset, shuffle=True, seed=42) + sampler.set_epoch(10) + g = torch.Generator() + g.manual_seed(42 + 10) + indices = torch.randperm(len(self.dataset), generator=g).tolist() + indices = [x for x in indices + for _ in range(3)][:sampler.num_selected_samples] + self.assertEqual(list(sampler), indices) diff --git a/tests/test_datasets/test_transforms/test_auto_augment.py b/tests/test_datasets/test_transforms/test_auto_augment.py new file mode 100644 index 0000000..d9f65c3 --- /dev/null +++ b/tests/test_datasets/test_transforms/test_auto_augment.py @@ -0,0 +1,1330 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import math +from unittest import TestCase +from unittest.mock import ANY, patch + +import numpy as np + +from mmpretrain.registry import TRANSFORMS + + +def construct_toy_data(): + img = np.random.randint(0, 256, (100, 200, 3), dtype=np.uint8) + results = dict() + # image + results['ori_img'] = img + results['img'] = img + results['img2'] = copy.deepcopy(img) + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + results['img_fields'] = ['img', 'img2'] + return results + + +def construct_toy_data_photometric(): + img = np.array([[0, 128, 255], [1, 127, 254], [2, 129, 253]], + dtype=np.uint8) + img = np.stack([img, img, img], axis=-1) + results = dict() + # image + results['ori_img'] = img + results['img'] = img + results['img2'] = copy.deepcopy(img) + results['img_shape'] = img.shape + results['ori_shape'] = img.shape + results['img_fields'] = ['img', 'img2'] + return results + + +class TestAutoAugment(TestCase): + + def test_construct(self): + policies = [[ + dict(type='Posterize', bits=4, prob=0.4), + dict(type='Rotate', angle=30., prob=0.6) + ]] + + cfg = dict(type='AutoAugment', policies=policies) + transform = TRANSFORMS.build(cfg) + results = construct_toy_data() + with patch.object(transform.transforms[0], 'transform') as mock: + transform(results) + mock.assert_called_once() + + cfg = dict(type='AutoAugment', policies='imagenet') + transform = TRANSFORMS.build(cfg) + with patch.object(transform.transforms[5], 'transform') as mock: + with patch('numpy.random', np.random.RandomState(1)): + transform(results) + mock.assert_called() + + # test hparams + cfg = dict( + type='AutoAugment', + policies=policies, + hparams=dict(pad_val=[255, 255, 255])) + transform = TRANSFORMS.build(cfg) + self.assertEqual(transform.policies[0][1]['pad_val'], [255, 255, 255]) + self.assertNotIn('pad_val', transform.policies[0][0]) + + with self.assertRaisesRegex(AssertionError, 'choose from .*imagenet'): + cfg = dict(type='AutoAugment', policies='unknown') + transform = TRANSFORMS.build(cfg) + + def test_repr(self): + policies = [[ + dict(type='Posterize', bits=4, prob=0.4), + dict(type='Rotate', angle=30., prob=0.6) + ]] + + cfg = dict(type='AutoAugment', policies=policies) + transform = TRANSFORMS.build(cfg) + self.assertIn('Posterize, \tRotate', repr(transform)) + + +class TestRandAugment(TestCase): + DEFAULT_ARGS = dict( + type='RandAugment', + magnitude_level=7, + num_policies=1, + policies='timm_increasing') + + def test_construct(self): + policies = [ + dict(type='Posterize', magnitude_range=(4, 0)), + dict(type='Rotate', magnitude_range=(0, 30)) + ] + + cfg = {**self.DEFAULT_ARGS, 'policies': policies} + transform = TRANSFORMS.build(cfg) + self.assertEqual(len(list(transform)), 2) + results = construct_toy_data() + with patch.object(transform.transforms[1], 'transform') as mock: + with patch('numpy.random', np.random.RandomState(1)): + transform(results) + mock.assert_called_once() + + cfg = {**self.DEFAULT_ARGS, 'policies': 'timm_increasing'} + transform = TRANSFORMS.build(cfg) + with patch.object(transform.transforms[5], 'transform') as mock: + with patch('numpy.random', np.random.RandomState(1)): + transform(results) + mock.assert_called() + + # test hparams + cfg = { + **self.DEFAULT_ARGS, + 'policies': policies, + 'hparams': dict(pad_val=[255, 255, 255]), + } + transform = TRANSFORMS.build(cfg) + self.assertEqual(transform.policies[1]['pad_val'], [255, 255, 255]) + self.assertNotIn('pad_val', transform.policies[0]) + + # test magnitude related parameters + cfg = { + **self.DEFAULT_ARGS, 'policies': [ + dict(type='Equalize'), + dict(type='Rotate', magnitude_range=(0, 30)) + ] + } + transform = TRANSFORMS.build(cfg) + self.assertNotIn('magnitude_range', transform.policies[0]) + self.assertNotIn('magnitude_level', transform.policies[0]) + self.assertNotIn('magnitude_range', transform.policies[0]) + self.assertNotIn('total_level', transform.policies[0]) + self.assertEqual(transform.policies[1]['magnitude_range'], (0, 30)) + self.assertEqual(transform.policies[1]['magnitude_level'], 7) + self.assertEqual(transform.policies[1]['magnitude_std'], 0.) + self.assertEqual(transform.policies[1]['total_level'], 10) + + # test invalid policies + with self.assertRaisesRegex(AssertionError, + 'choose from .*timm_increasing'): + cfg = {**self.DEFAULT_ARGS, 'policies': 'unknown'} + transform = TRANSFORMS.build(cfg) + + # test invalid magnitude_std + with self.assertRaisesRegex(AssertionError, 'got "unknown" instead'): + cfg = {**self.DEFAULT_ARGS, 'magnitude_std': 'unknown'} + transform = TRANSFORMS.build(cfg) + + def test_repr(self): + policies = [ + dict(type='Posterize', magnitude_range=(4, 0)), + dict(type='Equalize') + ] + + cfg = {**self.DEFAULT_ARGS, 'policies': policies} + transform = TRANSFORMS.build(cfg) + self.assertIn(' Posterize (4, 0)\n Equalize\n', repr(transform)) + + +class TestShear(TestCase): + DEFAULT_ARGS = dict(type='Shear') + + def test_initialize(self): + with self.assertRaisesRegex(AssertionError, 'only one of'): + TRANSFORMS.build(self.DEFAULT_ARGS) + + with self.assertRaisesRegex(AssertionError, 'only one of'): + cfg = { + **self.DEFAULT_ARGS, 'magnitude': 1, + 'magnitude_range': (1, 2) + } + TRANSFORMS.build(cfg) + + with self.assertRaisesRegex(AssertionError, 'got "unknown" instead'): + cfg = {**self.DEFAULT_ARGS, 'magnitude': 1, 'direction': 'unknown'} + TRANSFORMS.build(cfg) + + def test_transform(self): + # test params inputs + with patch('mmcv.imshear') as mock: + cfg = { + **self.DEFAULT_ARGS, + 'magnitude': 0.2, + 'random_negative_prob': 0., + 'prob': 1., + 'direction': 'horizontal', + 'pad_val': 255, + 'interpolation': 'nearest', + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with( + ANY, + 0.2, + direction='horizontal', + border_value=255, + interpolation='nearest') + + # test random_negative_prob + with patch('mmcv.imshear') as mock: + cfg = { + **self.DEFAULT_ARGS, + 'magnitude': 0.2, + 'random_negative_prob': 1., + 'prob': 1., + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with( + ANY, -0.2, direction=ANY, border_value=ANY, interpolation=ANY) + + # test prob + with patch('mmcv.imshear') as mock: + cfg = { + **self.DEFAULT_ARGS, + 'magnitude': 0.2, + 'random_negative_prob': 0., + 'prob': 0., + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_not_called() + + # test sequeue pad_val + with patch('mmcv.imshear') as mock: + cfg = { + **self.DEFAULT_ARGS, + 'magnitude': 0.2, + 'random_negative_prob': 0., + 'prob': 1., + 'direction': 'horizontal', + 'pad_val': (255, 255, 255), + 'interpolation': 'nearest', + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with( + ANY, + 0.2, + direction='horizontal', + border_value=(255, 255, 255), + interpolation='nearest') + + # test magnitude_range + with patch('mmcv.imshear') as mock: + cfg = { + **self.DEFAULT_ARGS, + 'random_negative_prob': 0., + 'prob': 1., + 'magnitude_level': 6, + 'magnitude_range': (0, 0.3), + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with( + ANY, 0.18, direction=ANY, border_value=ANY, interpolation=ANY) + + # test magnitude_std is positive + with patch('mmcv.imshear') as mock: + cfg = { + **self.DEFAULT_ARGS, 'random_negative_prob': 0., + 'prob': 1., + 'magnitude_level': 6, + 'magnitude_range': (0, 0.3), + 'magnitude_std': 1 + } + with patch('numpy.random', np.random.RandomState(1)): + TRANSFORMS.build(cfg)(construct_toy_data()) + self.assertAlmostEqual(mock.call_args[0][1], 0.1811, places=4) + + # test magnitude_std = 'inf' + with patch('mmcv.imshear') as mock: + cfg = { + **self.DEFAULT_ARGS, 'random_negative_prob': 0., + 'prob': 1., + 'magnitude_level': 6, + 'magnitude_range': (0, 0.3), + 'magnitude_std': 'inf' + } + with patch('numpy.random', np.random.RandomState(9)): + TRANSFORMS.build(cfg)(construct_toy_data()) + self.assertAlmostEqual(mock.call_args[0][1], 0.0882, places=4) + + def test_repr(self): + cfg = {**self.DEFAULT_ARGS, 'magnitude': 0.1} + transform = TRANSFORMS.build(cfg) + self.assertIn('Shear(magnitude=0.1', repr(transform)) + self.assertNotIn('magnitude_range', repr(transform)) + + cfg = {**self.DEFAULT_ARGS, 'magnitude_range': (0, 0.3)} + transform = TRANSFORMS.build(cfg) + self.assertIn('Shear(magnitude=None', repr(transform)) + self.assertIn('magnitude_range=(0, 0.3)', repr(transform)) + + +class TestTranslate(TestCase): + DEFAULT_ARGS = dict(type='Translate') + + def test_initialize(self): + with self.assertRaisesRegex(AssertionError, 'only one of'): + TRANSFORMS.build(self.DEFAULT_ARGS) + + with self.assertRaisesRegex(AssertionError, 'only one of'): + cfg = { + **self.DEFAULT_ARGS, 'magnitude': 1, + 'magnitude_range': (1, 2) + } + TRANSFORMS.build(cfg) + + with self.assertRaisesRegex(AssertionError, 'got "unknown" instead'): + cfg = {**self.DEFAULT_ARGS, 'magnitude': 1, 'direction': 'unknown'} + TRANSFORMS.build(cfg) + + def test_transform(self): + transform_func = 'mmcv.imtranslate' + + # test params inputs + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'magnitude': 0.2, + 'random_negative_prob': 0., + 'prob': 1., + 'direction': 'horizontal', + 'pad_val': 255, + 'interpolation': 'nearest', + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with( + ANY, + 200 * 0.2, + direction='horizontal', + border_value=255, + interpolation='nearest') + + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'magnitude': 0.2, + 'random_negative_prob': 0., + 'prob': 1., + 'direction': 'vertical', + 'pad_val': 255, + 'interpolation': 'nearest', + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with( + ANY, + 100 * 0.2, + direction='vertical', + border_value=255, + interpolation='nearest') + + # test sequeue pad_val + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'magnitude': 0.2, + 'random_negative_prob': 0., + 'prob': 1., + 'direction': 'horizontal', + 'pad_val': [255, 255, 255], + 'interpolation': 'nearest', + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with( + ANY, + 200 * 0.2, + direction='horizontal', + border_value=(255, 255, 255), + interpolation='nearest') + + # test prob + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'magnitude': 0.2, + 'random_negative_prob': 0., + 'prob': 0., + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_not_called() + + # test random_negative_prob + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'magnitude': 0.2, + 'random_negative_prob': 1., + 'prob': 1., + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with( + ANY, + -0.2 * 200, + direction=ANY, + border_value=ANY, + interpolation=ANY) + + # test magnitude_range + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'random_negative_prob': 0., + 'prob': 1., + 'magnitude_level': 6, + 'magnitude_range': (0, 0.3), + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with( + ANY, + 0.18 * 200, + direction=ANY, + border_value=ANY, + interpolation=ANY) + + def test_repr(self): + cfg = {**self.DEFAULT_ARGS, 'magnitude': 0.1} + transform = TRANSFORMS.build(cfg) + self.assertIn('Translate(magnitude=0.1', repr(transform)) + self.assertNotIn('magnitude_range', repr(transform)) + + cfg = {**self.DEFAULT_ARGS, 'magnitude_range': (0, 0.3)} + transform = TRANSFORMS.build(cfg) + self.assertIn('Translate(magnitude=None', repr(transform)) + self.assertIn('magnitude_range=(0, 0.3)', repr(transform)) + + +class TestRotate(TestCase): + DEFAULT_ARGS = dict(type='Rotate') + + def test_initialize(self): + with self.assertRaisesRegex(AssertionError, 'only one of'): + TRANSFORMS.build(self.DEFAULT_ARGS) + + with self.assertRaisesRegex(AssertionError, 'only one of'): + cfg = {**self.DEFAULT_ARGS, 'angle': 30, 'magnitude_range': (1, 2)} + TRANSFORMS.build(cfg) + + def test_transform(self): + transform_func = 'mmcv.imrotate' + + # test params inputs + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'angle': 30, + 'center': (10, 10), + 'random_negative_prob': 0., + 'prob': 1., + 'scale': 1.5, + 'pad_val': 255, + 'interpolation': 'bilinear', + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with( + ANY, + 30, + center=(10, 10), + scale=1.5, + border_value=255, + interpolation='bilinear') + + # test params inputs + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'angle': 30, + 'center': (10, 10), + 'random_negative_prob': 0., + 'prob': 1., + 'scale': 1.5, + 'pad_val': (255, 255, 255), + 'interpolation': 'bilinear', + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with( + ANY, + 30, + center=(10, 10), + scale=1.5, + border_value=(255, 255, 255), + interpolation='bilinear') + + # test prob + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'angle': 30, + 'random_negative_prob': 0., + 'prob': 0., + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_not_called() + + # test random_negative_prob + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'angle': 30, + 'random_negative_prob': 1., + 'prob': 1., + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with( + ANY, + -30, + center=ANY, + scale=ANY, + border_value=ANY, + interpolation=ANY) + + # test magnitude_range + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'random_negative_prob': 0., + 'prob': 1., + 'magnitude_level': 6, + 'magnitude_range': (0, 30), + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with( + ANY, + 18, + center=ANY, + scale=ANY, + border_value=ANY, + interpolation=ANY) + + def test_repr(self): + cfg = {**self.DEFAULT_ARGS, 'angle': 30} + transform = TRANSFORMS.build(cfg) + self.assertIn('Rotate(angle=30', repr(transform)) + self.assertNotIn('magnitude_range', repr(transform)) + + cfg = {**self.DEFAULT_ARGS, 'magnitude_range': (0, 30)} + transform = TRANSFORMS.build(cfg) + self.assertIn('Rotate(angle=None', repr(transform)) + self.assertIn('magnitude_range=(0, 30)', repr(transform)) + + +class TestAutoContrast(TestCase): + DEFAULT_ARGS = dict(type='AutoContrast') + + def test_transform(self): + transform_func = 'mmcv.auto_contrast' + + # test prob + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'random_negative_prob': 0., + 'prob': 0., + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_not_called() + + # test random_negative_prob + # No effect + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'random_negative_prob': 1., + 'prob': 1., + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with(ANY) + + # test magnitude_range + # No effect + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'random_negative_prob': 0., + 'prob': 1., + 'magnitude_level': 6, + 'magnitude_range': (0, 30), + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with(ANY) + + def test_repr(self): + cfg = {**self.DEFAULT_ARGS, 'prob': 0.5} + transform = TRANSFORMS.build(cfg) + self.assertIn('AutoContrast(prob=0.5)', repr(transform)) + self.assertNotIn('magnitude_range', repr(transform)) + + cfg = {**self.DEFAULT_ARGS, 'magnitude_range': (0, 30)} + transform = TRANSFORMS.build(cfg) + self.assertIn('AutoContrast(prob=', repr(transform)) + self.assertNotIn('magnitude_range=(0, 30)', repr(transform)) + + +class TestInvert(TestCase): + DEFAULT_ARGS = dict(type='Invert') + + def test_transform(self): + transform_func = 'mmcv.iminvert' + + # test prob + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'random_negative_prob': 0., + 'prob': 0., + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_not_called() + + # test random_negative_prob + # No effect + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'random_negative_prob': 1., + 'prob': 1., + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with(ANY) + + # test magnitude_range + # No effect + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'random_negative_prob': 0., + 'prob': 1., + 'magnitude_level': 6, + 'magnitude_range': (0, 30), + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with(ANY) + + def test_repr(self): + cfg = {**self.DEFAULT_ARGS, 'prob': 0.5} + transform = TRANSFORMS.build(cfg) + self.assertIn('Invert(prob=0.5)', repr(transform)) + self.assertNotIn('magnitude_range', repr(transform)) + + cfg = {**self.DEFAULT_ARGS, 'magnitude_range': (0, 30)} + transform = TRANSFORMS.build(cfg) + self.assertIn('Invert(prob=', repr(transform)) + self.assertNotIn('magnitude_range=(0, 30)', repr(transform)) + + +class TestEqualize(TestCase): + DEFAULT_ARGS = dict(type='Equalize') + + def test_transform(self): + transform_func = 'mmcv.imequalize' + + # test prob + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'random_negative_prob': 0., + 'prob': 0., + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_not_called() + + # test random_negative_prob + # No effect + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'random_negative_prob': 1., + 'prob': 1., + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with(ANY) + + # test magnitude_range + # No effect + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'random_negative_prob': 0., + 'prob': 1., + 'magnitude_level': 6, + 'magnitude_range': (0, 30), + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with(ANY) + + def test_repr(self): + cfg = {**self.DEFAULT_ARGS, 'prob': 0.5} + transform = TRANSFORMS.build(cfg) + self.assertIn('Equalize(prob=0.5)', repr(transform)) + self.assertNotIn('magnitude_range', repr(transform)) + + cfg = {**self.DEFAULT_ARGS, 'magnitude_range': (0, 30)} + transform = TRANSFORMS.build(cfg) + self.assertIn('Equalize(prob=', repr(transform)) + self.assertNotIn('magnitude_range=(0, 30)', repr(transform)) + + +class TestSolarize(TestCase): + DEFAULT_ARGS = dict(type='Solarize') + + def test_initialize(self): + with self.assertRaisesRegex(AssertionError, 'only one of'): + TRANSFORMS.build(self.DEFAULT_ARGS) + + with self.assertRaisesRegex(AssertionError, 'only one of'): + cfg = {**self.DEFAULT_ARGS, 'thr': 1, 'magnitude_range': (1, 2)} + TRANSFORMS.build(cfg) + + def test_transform(self): + transform_func = 'mmcv.solarize' + + # test params inputs + with patch(transform_func) as mock: + cfg = {**self.DEFAULT_ARGS, 'thr': 128, 'prob': 1.} + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with(ANY, thr=128) + + # test prob + with patch(transform_func) as mock: + cfg = {**self.DEFAULT_ARGS, 'thr': 128, 'prob': 0.} + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_not_called() + + # test random_negative_prob + # cannot accept `random_negative_prob` argument + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'thr': 128, + 'random_negative_prob': 1., + 'prob': 1., + } + with self.assertRaisesRegex(TypeError, 'multiple values'): + TRANSFORMS.build(cfg)(construct_toy_data()) + + # test magnitude_range + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'prob': 1., + 'magnitude_level': 6, + 'magnitude_range': (256, 0), + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with(ANY, thr=256 * 0.4) + + def test_repr(self): + cfg = {**self.DEFAULT_ARGS, 'thr': 128} + transform = TRANSFORMS.build(cfg) + self.assertIn('Solarize(thr=128', repr(transform)) + self.assertNotIn('magnitude_range', repr(transform)) + + cfg = {**self.DEFAULT_ARGS, 'magnitude_range': (256, 0)} + transform = TRANSFORMS.build(cfg) + self.assertIn('Solarize(thr=None', repr(transform)) + self.assertIn('magnitude_range=(256, 0)', repr(transform)) + + +class TestSolarizeAdd(TestCase): + DEFAULT_ARGS = dict(type='SolarizeAdd') + + def test_initialize(self): + with self.assertRaisesRegex(AssertionError, 'only one of'): + TRANSFORMS.build(self.DEFAULT_ARGS) + + with self.assertRaisesRegex(AssertionError, 'only one of'): + cfg = { + **self.DEFAULT_ARGS, 'magnitude': 1, + 'magnitude_range': (1, 2) + } + TRANSFORMS.build(cfg) + + with self.assertRaisesRegex(AssertionError, 'str'): + cfg = {**self.DEFAULT_ARGS, 'magnitude': 1, 'thr': 'hi'} + TRANSFORMS.build(cfg) + + def test_transform(self): + + # test params inputs + cfg = {**self.DEFAULT_ARGS, 'magnitude': 100, 'thr': 128, 'prob': 1.} + results = construct_toy_data_photometric() + expected = np.where(results['img'] < 128, + np.minimum(results['img'] + 100, 255), + results['img']) + TRANSFORMS.build(cfg)(results) + np.testing.assert_allclose(results['img'], expected) + + # test prob + cfg = {**self.DEFAULT_ARGS, 'magnitude': 100, 'thr': 128, 'prob': 0.} + results = construct_toy_data_photometric() + expected = copy.deepcopy(results['img']) + TRANSFORMS.build(cfg)(results) + np.testing.assert_allclose(results['img'], expected) + + # test random_negative_prob + # cannot accept `random_negative_prob` argument + cfg = { + **self.DEFAULT_ARGS, + 'magnitude': 100, + 'thr': 128, + 'random_negative_prob': 1., + 'prob': 1., + } + with self.assertRaisesRegex(TypeError, 'multiple values'): + TRANSFORMS.build(cfg)(construct_toy_data()) + + # test magnitude_range + cfg = { + **self.DEFAULT_ARGS, + 'prob': 1., + 'magnitude_level': 6, + 'magnitude_range': (0, 110), + } + results = construct_toy_data_photometric() + expected = np.where(results['img'] < 128, + np.minimum(results['img'] + 110 * 0.6, 255), + results['img']) + TRANSFORMS.build(cfg)(results) + np.testing.assert_allclose(results['img'], expected) + + def test_repr(self): + cfg = {**self.DEFAULT_ARGS, 'magnitude': 100} + transform = TRANSFORMS.build(cfg) + self.assertIn('SolarizeAdd(magnitude=100', repr(transform)) + self.assertNotIn('magnitude_range', repr(transform)) + + cfg = {**self.DEFAULT_ARGS, 'magnitude_range': (0, 110)} + transform = TRANSFORMS.build(cfg) + self.assertIn('SolarizeAdd(magnitude=None', repr(transform)) + self.assertIn('magnitude_range=(0, 110)', repr(transform)) + + +class TestPosterize(TestCase): + DEFAULT_ARGS = dict(type='Posterize') + + def test_initialize(self): + with self.assertRaisesRegex(AssertionError, 'only one of'): + TRANSFORMS.build(self.DEFAULT_ARGS) + + with self.assertRaisesRegex(AssertionError, 'only one of'): + cfg = {**self.DEFAULT_ARGS, 'bits': 1, 'magnitude_range': (1, 2)} + TRANSFORMS.build(cfg) + + with self.assertRaisesRegex(AssertionError, 'got 100 instead'): + cfg = {**self.DEFAULT_ARGS, 'bits': 100} + TRANSFORMS.build(cfg) + + def test_transform(self): + transform_func = 'mmcv.posterize' + + # test params inputs + with patch(transform_func) as mock: + cfg = {**self.DEFAULT_ARGS, 'bits': 4, 'prob': 1.} + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with(ANY, bits=4) + + # test prob + with patch(transform_func) as mock: + cfg = {**self.DEFAULT_ARGS, 'bits': 4, 'prob': 0.} + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_not_called() + + # test random_negative_prob + # cannot accept `random_negative_prob` argument + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'bits': 4, + 'random_negative_prob': 1., + 'prob': 1., + } + with self.assertRaisesRegex(TypeError, 'multiple values'): + TRANSFORMS.build(cfg)(construct_toy_data()) + + # test magnitude_range + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'prob': 1., + 'magnitude_level': 6, + 'magnitude_range': (4, 0), + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with(ANY, bits=math.ceil(4 * 0.4)) + + def test_repr(self): + cfg = {**self.DEFAULT_ARGS, 'bits': 4} + transform = TRANSFORMS.build(cfg) + self.assertIn('Posterize(bits=4', repr(transform)) + self.assertNotIn('magnitude_range', repr(transform)) + + cfg = {**self.DEFAULT_ARGS, 'magnitude_range': (4, 0)} + transform = TRANSFORMS.build(cfg) + self.assertIn('Posterize(bits=None', repr(transform)) + self.assertIn('magnitude_range=(4, 0)', repr(transform)) + + +class TestContrast(TestCase): + DEFAULT_ARGS = dict(type='Contrast') + + def test_initialize(self): + with self.assertRaisesRegex(AssertionError, 'only one of'): + TRANSFORMS.build(self.DEFAULT_ARGS) + + with self.assertRaisesRegex(AssertionError, 'only one of'): + cfg = { + **self.DEFAULT_ARGS, 'magnitude': 1, + 'magnitude_range': (1, 2) + } + TRANSFORMS.build(cfg) + + def test_transform(self): + transform_func = 'mmcv.adjust_contrast' + + # test params inputs + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'magnitude': 0.5, + 'random_negative_prob': 0., + 'prob': 1., + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with(ANY, factor=1 + 0.5) + + # test prob + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'magnitude': 0.5, + 'random_negative_prob': 0., + 'prob': 0., + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_not_called() + + # test random_negative_prob + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'magnitude': 0.5, + 'random_negative_prob': 1., + 'prob': 1., + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with(ANY, factor=1 - 0.5) + + # test magnitude_range + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'random_negative_prob': 0., + 'prob': 1., + 'magnitude_level': 6, + 'magnitude_range': (0, 0.5), + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with(ANY, factor=1 + 0.6 * 0.5) + + def test_repr(self): + cfg = {**self.DEFAULT_ARGS, 'magnitude': 0.1} + transform = TRANSFORMS.build(cfg) + self.assertIn('Contrast(magnitude=0.1', repr(transform)) + self.assertNotIn('magnitude_range', repr(transform)) + + cfg = {**self.DEFAULT_ARGS, 'magnitude_range': (0, 0.3)} + transform = TRANSFORMS.build(cfg) + self.assertIn('Contrast(magnitude=None', repr(transform)) + self.assertIn('magnitude_range=(0, 0.3)', repr(transform)) + + +class TestColorTransform(TestCase): + DEFAULT_ARGS = dict(type='ColorTransform') + + def test_initialize(self): + with self.assertRaisesRegex(AssertionError, 'only one of'): + TRANSFORMS.build(self.DEFAULT_ARGS) + + with self.assertRaisesRegex(AssertionError, 'only one of'): + cfg = { + **self.DEFAULT_ARGS, 'magnitude': 1, + 'magnitude_range': (1, 2) + } + TRANSFORMS.build(cfg) + + def test_transform(self): + transform_func = 'mmcv.adjust_color' + + # test params inputs + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'magnitude': 0.5, + 'random_negative_prob': 0., + 'prob': 1., + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with(ANY, alpha=1 + 0.5) + + # test prob + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'magnitude': 0.5, + 'random_negative_prob': 0., + 'prob': 0., + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_not_called() + + # test random_negative_prob + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'magnitude': 0.5, + 'random_negative_prob': 1., + 'prob': 1., + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with(ANY, alpha=1 - 0.5) + + # test magnitude_range + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'random_negative_prob': 0., + 'prob': 1., + 'magnitude_level': 6, + 'magnitude_range': (0, 0.5), + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with(ANY, alpha=1 + 0.6 * 0.5) + + def test_repr(self): + cfg = {**self.DEFAULT_ARGS, 'magnitude': 0.1} + transform = TRANSFORMS.build(cfg) + self.assertIn('ColorTransform(magnitude=0.1', repr(transform)) + self.assertNotIn('magnitude_range', repr(transform)) + + cfg = {**self.DEFAULT_ARGS, 'magnitude_range': (0, 0.3)} + transform = TRANSFORMS.build(cfg) + self.assertIn('ColorTransform(magnitude=None', repr(transform)) + self.assertIn('magnitude_range=(0, 0.3)', repr(transform)) + + +class TestBrightness(TestCase): + DEFAULT_ARGS = dict(type='Brightness') + + def test_initialize(self): + with self.assertRaisesRegex(AssertionError, 'only one of'): + TRANSFORMS.build(self.DEFAULT_ARGS) + + with self.assertRaisesRegex(AssertionError, 'only one of'): + cfg = { + **self.DEFAULT_ARGS, 'magnitude': 1, + 'magnitude_range': (1, 2) + } + TRANSFORMS.build(cfg) + + def test_transform(self): + transform_func = 'mmcv.adjust_brightness' + + # test params inputs + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'magnitude': 0.5, + 'random_negative_prob': 0., + 'prob': 1., + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with(ANY, factor=1 + 0.5) + + # test prob + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'magnitude': 0.5, + 'random_negative_prob': 0., + 'prob': 0., + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_not_called() + + # test random_negative_prob + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'magnitude': 0.5, + 'random_negative_prob': 1., + 'prob': 1., + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with(ANY, factor=1 - 0.5) + + # test magnitude_range + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'random_negative_prob': 0., + 'prob': 1., + 'magnitude_level': 6, + 'magnitude_range': (0, 0.5), + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with(ANY, factor=1 + 0.6 * 0.5) + + def test_repr(self): + cfg = {**self.DEFAULT_ARGS, 'magnitude': 0.1} + transform = TRANSFORMS.build(cfg) + self.assertIn('Brightness(magnitude=0.1', repr(transform)) + self.assertNotIn('magnitude_range', repr(transform)) + + cfg = {**self.DEFAULT_ARGS, 'magnitude_range': (0, 0.3)} + transform = TRANSFORMS.build(cfg) + self.assertIn('Brightness(magnitude=None', repr(transform)) + self.assertIn('magnitude_range=(0, 0.3)', repr(transform)) + + +class TestSharpness(TestCase): + DEFAULT_ARGS = dict(type='Sharpness') + + def test_initialize(self): + with self.assertRaisesRegex(AssertionError, 'only one of'): + TRANSFORMS.build(self.DEFAULT_ARGS) + + with self.assertRaisesRegex(AssertionError, 'only one of'): + cfg = { + **self.DEFAULT_ARGS, 'magnitude': 1, + 'magnitude_range': (1, 2) + } + TRANSFORMS.build(cfg) + + def test_transform(self): + transform_func = 'mmcv.adjust_sharpness' + + # test params inputs + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'magnitude': 0.5, + 'random_negative_prob': 0., + 'prob': 1., + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with(ANY, factor=1 + 0.5) + + # test prob + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'magnitude': 0.5, + 'random_negative_prob': 0., + 'prob': 0., + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_not_called() + + # test random_negative_prob + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'magnitude': 0.5, + 'random_negative_prob': 1., + 'prob': 1., + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with(ANY, factor=1 - 0.5) + + # test magnitude_range + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'random_negative_prob': 0., + 'prob': 1., + 'magnitude_level': 6, + 'magnitude_range': (0, 0.5), + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with(ANY, factor=1 + 0.6 * 0.5) + + def test_repr(self): + cfg = {**self.DEFAULT_ARGS, 'magnitude': 0.1} + transform = TRANSFORMS.build(cfg) + self.assertIn('Sharpness(magnitude=0.1', repr(transform)) + self.assertNotIn('magnitude_range', repr(transform)) + + cfg = {**self.DEFAULT_ARGS, 'magnitude_range': (0, 0.3)} + transform = TRANSFORMS.build(cfg) + self.assertIn('Sharpness(magnitude=None', repr(transform)) + self.assertIn('magnitude_range=(0, 0.3)', repr(transform)) + + +class TestCutout(TestCase): + DEFAULT_ARGS = dict(type='Cutout') + + def test_initialize(self): + with self.assertRaisesRegex(AssertionError, 'only one of'): + TRANSFORMS.build(self.DEFAULT_ARGS) + + with self.assertRaisesRegex(AssertionError, 'only one of'): + cfg = { + **self.DEFAULT_ARGS, 'shape': 10, + 'magnitude_range': (10, 20) + } + TRANSFORMS.build(cfg) + + def test_transform(self): + transform_func = 'mmcv.cutout' + + # test params inputs + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'shape': (10, 15), + 'prob': 1., + 'pad_val': 255, + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with(ANY, (10, 15), pad_val=255) + + # test prob + with patch(transform_func) as mock: + cfg = {**self.DEFAULT_ARGS, 'shape': 10, 'prob': 0.} + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_not_called() + + # test sequeue pad_val + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'shape': (10, 15), + 'prob': 1., + 'pad_val': [255, 255, 255], + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with( + ANY, (10, 15), pad_val=(255, 255, 255)) + + # test random_negative_prob + # cannot accept `random_negative_prob` argument + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'shape': 10, + 'random_negative_prob': 1., + 'prob': 1., + } + with self.assertRaisesRegex(TypeError, 'multiple values'): + TRANSFORMS.build(cfg)(construct_toy_data()) + + # test magnitude_range + with patch(transform_func) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'prob': 1., + 'magnitude_level': 6, + 'magnitude_range': (1, 41), + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with(ANY, 40 * 0.6 + 1, pad_val=ANY) + + def test_repr(self): + cfg = {**self.DEFAULT_ARGS, 'shape': 15} + transform = TRANSFORMS.build(cfg) + self.assertIn('Cutout(shape=15', repr(transform)) + self.assertNotIn('magnitude_range', repr(transform)) + + cfg = {**self.DEFAULT_ARGS, 'magnitude_range': (0, 41)} + transform = TRANSFORMS.build(cfg) + self.assertIn('Cutout(shape=None', repr(transform)) + self.assertIn('magnitude_range=(0, 41)', repr(transform)) + + +class TestGaussianBlur(TestCase): + DEFAULT_ARGS = dict(type='GaussianBlur') + + def test_initialize(self): + with self.assertRaisesRegex(AssertionError, 'only one of'): + TRANSFORMS.build(self.DEFAULT_ARGS) + + with self.assertRaisesRegex(AssertionError, 'only one of'): + cfg = {**self.DEFAULT_ARGS, 'radius': 1, 'magnitude_range': (1, 2)} + TRANSFORMS.build(cfg) + + def test_transform(self): + transform_func = 'PIL.ImageFilter.GaussianBlur' + from PIL.ImageFilter import GaussianBlur + + # test params inputs + with patch(transform_func, wraps=GaussianBlur) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'radius': 0.5, + 'prob': 1., + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_called_once_with(radius=0.5) + + # test prob + with patch(transform_func, wraps=GaussianBlur) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'radius': 0.5, + 'prob': 0., + } + TRANSFORMS.build(cfg)(construct_toy_data()) + mock.assert_not_called() + + # test magnitude_range + with patch(transform_func, wraps=GaussianBlur) as mock: + cfg = { + **self.DEFAULT_ARGS, + 'magnitude_range': (0.1, 2), + 'magnitude_std': 'inf', + 'prob': 1., + } + TRANSFORMS.build(cfg)(construct_toy_data()) + self.assertTrue(0.1 < mock.call_args[1]['radius'] < 2) + + def test_repr(self): + cfg = {**self.DEFAULT_ARGS, 'radius': 0.1} + transform = TRANSFORMS.build(cfg) + self.assertIn('GaussianBlur(radius=0.1, prob=0.5', repr(transform)) + self.assertNotIn('magnitude_range', repr(transform)) + + cfg = {**self.DEFAULT_ARGS, 'magnitude_range': (0.1, 2)} + transform = TRANSFORMS.build(cfg) + self.assertIn('GaussianBlur(radius=None, prob=0.5', repr(transform)) + self.assertIn('magnitude_range=(0.1, 2)', repr(transform)) diff --git a/tests/test_datasets/test_transforms/test_formatting.py b/tests/test_datasets/test_transforms/test_formatting.py new file mode 100644 index 0000000..e515c6d --- /dev/null +++ b/tests/test_datasets/test_transforms/test_formatting.py @@ -0,0 +1,219 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import os.path as osp +import unittest + +import mmcv +import numpy as np +import torch +from PIL import Image + +from mmpretrain.registry import TRANSFORMS +from mmpretrain.structures import DataSample, MultiTaskDataSample + + +class TestPackInputs(unittest.TestCase): + + def test_transform(self): + img_path = osp.join(osp.dirname(__file__), '../../data/color.jpg') + data = { + 'sample_idx': 1, + 'img_path': img_path, + 'ori_shape': (300, 400), + 'img_shape': (300, 400), + 'scale_factor': 1.0, + 'flip': False, + 'img': mmcv.imread(img_path), + 'gt_label': 2, + 'custom_key': torch.tensor([1, 2, 3]) + } + + cfg = dict(type='PackInputs', algorithm_keys=['custom_key']) + transform = TRANSFORMS.build(cfg) + results = transform(copy.deepcopy(data)) + self.assertIn('inputs', results) + self.assertIsInstance(results['inputs'], torch.Tensor) + self.assertIn('data_samples', results) + self.assertIsInstance(results['data_samples'], DataSample) + self.assertIn('flip', results['data_samples'].metainfo_keys()) + self.assertIsInstance(results['data_samples'].gt_label, torch.Tensor) + self.assertIsInstance(results['data_samples'].custom_key, torch.Tensor) + + # Test grayscale image + data['img'] = data['img'].mean(-1) + results = transform(copy.deepcopy(data)) + self.assertIn('inputs', results) + self.assertIsInstance(results['inputs'], torch.Tensor) + self.assertEqual(results['inputs'].shape, (1, 300, 400)) + + # Test video input + data['img'] = np.random.randint( + 0, 256, (10, 3, 1, 224, 224), dtype=np.uint8) + results = transform(copy.deepcopy(data)) + self.assertIn('inputs', results) + self.assertIsInstance(results['inputs'], torch.Tensor) + self.assertEqual(results['inputs'].shape, (10, 3, 1, 224, 224)) + + # Test Pillow input + data['img'] = Image.open(img_path) + results = transform(copy.deepcopy(data)) + self.assertIn('inputs', results) + self.assertIsInstance(results['inputs'], torch.Tensor) + self.assertEqual(results['inputs'].shape, (3, 300, 400)) + + # Test without `img` and `gt_label` + del data['img'] + del data['gt_label'] + results = transform(copy.deepcopy(data)) + self.assertNotIn('gt_label', results['data_samples']) + + def test_repr(self): + cfg = dict(type='PackInputs', meta_keys=['flip', 'img_shape']) + transform = TRANSFORMS.build(cfg) + self.assertEqual( + repr(transform), "PackInputs(input_key='img', algorithm_keys=(), " + "meta_keys=['flip', 'img_shape'])") + + +class TestTranspose(unittest.TestCase): + + def test_transform(self): + cfg = dict(type='Transpose', keys=['img'], order=[2, 0, 1]) + transform = TRANSFORMS.build(cfg) + + data = {'img': np.random.randint(0, 256, (224, 224, 3), dtype='uint8')} + + results = transform(copy.deepcopy(data)) + self.assertEqual(results['img'].shape, (3, 224, 224)) + + def test_repr(self): + cfg = dict(type='Transpose', keys=['img'], order=(2, 0, 1)) + transform = TRANSFORMS.build(cfg) + self.assertEqual( + repr(transform), "Transpose(keys=['img'], order=(2, 0, 1))") + + +class TestToPIL(unittest.TestCase): + + def test_transform(self): + cfg = dict(type='ToPIL') + transform = TRANSFORMS.build(cfg) + + data = {'img': np.random.randint(0, 256, (224, 224, 3), dtype='uint8')} + + results = transform(copy.deepcopy(data)) + self.assertIsInstance(results['img'], Image.Image) + + cfg = dict(type='ToPIL', to_rgb=True) + transform = TRANSFORMS.build(cfg) + + data = {'img': np.random.randint(0, 256, (224, 224, 3), dtype='uint8')} + + results = transform(copy.deepcopy(data)) + self.assertIsInstance(results['img'], Image.Image) + np.equal(np.array(results['img']), data['img'][:, :, ::-1]) + + def test_repr(self): + cfg = dict(type='ToPIL', to_rgb=True) + transform = TRANSFORMS.build(cfg) + self.assertEqual(repr(transform), 'NumpyToPIL(to_rgb=True)') + + +class TestToNumpy(unittest.TestCase): + + def test_transform(self): + img_path = osp.join(osp.dirname(__file__), '../../data/color.jpg') + data = { + 'img': Image.open(img_path), + } + + cfg = dict(type='ToNumpy') + transform = TRANSFORMS.build(cfg) + results = transform(copy.deepcopy(data)) + self.assertIsInstance(results['img'], np.ndarray) + self.assertEqual(results['img'].dtype, 'uint8') + + cfg = dict(type='ToNumpy', to_bgr=True) + transform = TRANSFORMS.build(cfg) + results = transform(copy.deepcopy(data)) + self.assertIsInstance(results['img'], np.ndarray) + self.assertEqual(results['img'].dtype, 'uint8') + np.equal(results['img'], np.array(data['img'])[:, :, ::-1]) + + def test_repr(self): + cfg = dict(type='ToNumpy', to_bgr=True) + transform = TRANSFORMS.build(cfg) + self.assertEqual( + repr(transform), 'PILToNumpy(to_bgr=True, dtype=None)') + + +class TestCollect(unittest.TestCase): + + def test_transform(self): + data = {'img': [1, 2, 3], 'gt_label': 1} + + cfg = dict(type='Collect', keys=['img']) + transform = TRANSFORMS.build(cfg) + results = transform(copy.deepcopy(data)) + self.assertIn('img', results) + self.assertNotIn('gt_label', results) + + def test_repr(self): + cfg = dict(type='Collect', keys=['img']) + transform = TRANSFORMS.build(cfg) + self.assertEqual(repr(transform), "Collect(keys=['img'])") + + +class TestPackMultiTaskInputs(unittest.TestCase): + + def test_transform(self): + img_path = osp.join(osp.dirname(__file__), '../../data/color.jpg') + data = { + 'sample_idx': 1, + 'img_path': img_path, + 'ori_shape': (300, 400), + 'img_shape': (300, 400), + 'scale_factor': 1.0, + 'flip': False, + 'img': mmcv.imread(img_path), + 'gt_label': { + 'task1': 1, + 'task3': 3 + }, + } + + cfg = dict(type='PackMultiTaskInputs', multi_task_fields=['gt_label']) + transform = TRANSFORMS.build(cfg) + results = transform(copy.deepcopy(data)) + self.assertIn('inputs', results) + self.assertIsInstance(results['inputs'], torch.Tensor) + self.assertIn('data_samples', results) + self.assertIsInstance(results['data_samples'], MultiTaskDataSample) + self.assertIn('flip', results['data_samples'].task1.metainfo_keys()) + self.assertIsInstance(results['data_samples'].task1.gt_label, + torch.Tensor) + + # Test grayscale image + data['img'] = data['img'].mean(-1) + results = transform(copy.deepcopy(data)) + self.assertIn('inputs', results) + self.assertIsInstance(results['inputs'], torch.Tensor) + self.assertEqual(results['inputs'].shape, (1, 300, 400)) + + # Test without `img` and `gt_label` + del data['img'] + del data['gt_label'] + results = transform(copy.deepcopy(data)) + self.assertNotIn('gt_label', results['data_samples']) + + def test_repr(self): + cfg = dict( + type='PackMultiTaskInputs', + multi_task_fields=['gt_label'], + task_handlers=dict(task1=dict(type='PackInputs')), + ) + transform = TRANSFORMS.build(cfg) + self.assertEqual( + repr(transform), + "PackMultiTaskInputs(multi_task_fields=['gt_label'], " + "input_key='img', task_handlers={'task1': PackInputs})") diff --git a/tests/test_datasets/test_transforms/test_processing.py b/tests/test_datasets/test_transforms/test_processing.py new file mode 100644 index 0000000..3386568 --- /dev/null +++ b/tests/test_datasets/test_transforms/test_processing.py @@ -0,0 +1,959 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import math +import os.path as osp +import random +from unittest import TestCase +from unittest.mock import ANY, call, patch + +import mmengine +import numpy as np +import pytest +import torch +import torchvision +from mmcv.transforms import Compose +from mmengine.utils import digit_version +from PIL import Image +from torchvision import transforms + +from mmpretrain.datasets.transforms.processing import VISION_TRANSFORMS +from mmpretrain.registry import TRANSFORMS + +try: + import albumentations +except ImportError: + albumentations = None + + +def construct_toy_data(): + img = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], + dtype=np.uint8) + img = np.stack([img, img, img], axis=-1) + results = dict() + # image + results['ori_img'] = img + results['img'] = copy.deepcopy(img) + results['ori_shape'] = img.shape + results['img_shape'] = img.shape + return results + + +class TestRandomCrop(TestCase): + + def test_assertion(self): + with self.assertRaises(AssertionError): + cfg = dict(type='RandomCrop', crop_size=-1) + TRANSFORMS.build(cfg) + + with self.assertRaises(AssertionError): + cfg = dict(type='RandomCrop', crop_size=(1, 2, 3)) + TRANSFORMS.build(cfg) + + with self.assertRaises(AssertionError): + cfg = dict(type='RandomCrop', crop_size=(1, -2)) + TRANSFORMS.build(cfg) + + with self.assertRaises(AssertionError): + cfg = dict(type='RandomCrop', crop_size=224, padding_mode='co') + TRANSFORMS.build(cfg) + + def test_transform(self): + results = dict(img=np.random.randint(0, 256, (256, 256, 3), np.uint8)) + + # test random crop by default. + cfg = dict(type='RandomCrop', crop_size=224) + transform = TRANSFORMS.build(cfg) + results = transform(results) + self.assertTupleEqual(results['img'].shape, (224, 224, 3)) + + # test int padding and int pad_val. + cfg = dict( + type='RandomCrop', crop_size=(224, 224), padding=2, pad_val=1) + transform = TRANSFORMS.build(cfg) + results = transform(results) + self.assertTupleEqual(results['img'].shape, (224, 224, 3)) + + # test int padding and sequence pad_val. + cfg = dict( + type='RandomCrop', crop_size=224, padding=2, pad_val=(0, 50, 0)) + transform = TRANSFORMS.build(cfg) + results = transform(results) + self.assertTupleEqual(results['img'].shape, (224, 224, 3)) + + # test sequence padding. + cfg = dict(type='RandomCrop', crop_size=224, padding=(2, 3, 4, 5)) + transform = TRANSFORMS.build(cfg) + results = transform(results) + self.assertTupleEqual(results['img'].shape, (224, 224, 3)) + + # test pad_if_needed. + cfg = dict( + type='RandomCrop', + crop_size=300, + pad_if_needed=True, + padding_mode='edge') + transform = TRANSFORMS.build(cfg) + results = transform(results) + self.assertTupleEqual(results['img'].shape, (300, 300, 3)) + + # test large crop size. + results = dict(img=np.random.randint(0, 256, (256, 256, 3), np.uint8)) + cfg = dict(type='RandomCrop', crop_size=300) + transform = TRANSFORMS.build(cfg) + results = transform(results) + self.assertTupleEqual(results['img'].shape, (256, 256, 3)) + + # test equal size. + results = dict(img=np.random.randint(0, 256, (256, 256, 3), np.uint8)) + cfg = dict(type='RandomCrop', crop_size=256) + transform = TRANSFORMS.build(cfg) + results = transform(results) + self.assertTupleEqual(results['img'].shape, (256, 256, 3)) + + def test_repr(self): + cfg = dict(type='RandomCrop', crop_size=224) + transform = TRANSFORMS.build(cfg) + self.assertEqual( + repr(transform), 'RandomCrop(crop_size=(224, 224), padding=None, ' + 'pad_if_needed=False, pad_val=0, padding_mode=constant)') + + +class TestRandomResizedCrop(TestCase): + + def test_assertion(self): + with self.assertRaises(AssertionError): + cfg = dict(type='RandomResizedCrop', scale=-1) + TRANSFORMS.build(cfg) + + with self.assertRaises(AssertionError): + cfg = dict(type='RandomResizedCrop', scale=(1, 2, 3)) + TRANSFORMS.build(cfg) + + with self.assertRaises(AssertionError): + cfg = dict(type='RandomResizedCrop', scale=(1, -2)) + TRANSFORMS.build(cfg) + + with self.assertRaises(ValueError): + cfg = dict( + type='RandomResizedCrop', scale=224, crop_ratio_range=(1, 0.1)) + TRANSFORMS.build(cfg) + + with self.assertRaises(ValueError): + cfg = dict( + type='RandomResizedCrop', + scale=224, + aspect_ratio_range=(1, 0.1)) + TRANSFORMS.build(cfg) + + with self.assertRaises(AssertionError): + cfg = dict(type='RandomResizedCrop', scale=224, max_attempts=-1) + TRANSFORMS.build(cfg) + + with self.assertRaises(AssertionError): + cfg = dict(type='RandomResizedCrop', scale=224, interpolation='ne') + TRANSFORMS.build(cfg) + + def test_transform(self): + results = dict(img=np.random.randint(0, 256, (256, 256, 3), np.uint8)) + + # test random crop by default. + cfg = dict(type='RandomResizedCrop', scale=224) + transform = TRANSFORMS.build(cfg) + results = transform(results) + self.assertTupleEqual(results['img'].shape, (224, 224, 3)) + + # test crop_ratio_range. + cfg = dict( + type='RandomResizedCrop', + scale=(224, 224), + crop_ratio_range=(0.5, 0.8)) + transform = TRANSFORMS.build(cfg) + results = transform(results) + self.assertTupleEqual(results['img'].shape, (224, 224, 3)) + + # test aspect_ratio_range. + cfg = dict( + type='RandomResizedCrop', scale=224, aspect_ratio_range=(0.5, 0.8)) + transform = TRANSFORMS.build(cfg) + results = transform(results) + self.assertTupleEqual(results['img'].shape, (224, 224, 3)) + + # test max_attempts. + cfg = dict(type='RandomResizedCrop', scale=224, max_attempts=0) + transform = TRANSFORMS.build(cfg) + results = transform(results) + self.assertTupleEqual(results['img'].shape, (224, 224, 3)) + # test fall back with extreme low in_ratio + results = dict(img=np.random.randint(0, 256, (10, 256, 3), np.uint8)) + results = transform(results) + self.assertTupleEqual(results['img'].shape, (224, 224, 3)) + # test fall back with extreme low in_ratio + results = dict(img=np.random.randint(0, 256, (256, 10, 3), np.uint8)) + results = transform(results) + self.assertTupleEqual(results['img'].shape, (224, 224, 3)) + + # test large crop size. + results = dict(img=np.random.randint(0, 256, (256, 256, 3), np.uint8)) + cfg = dict(type='RandomResizedCrop', scale=300) + transform = TRANSFORMS.build(cfg) + results = transform(results) + self.assertTupleEqual(results['img'].shape, (300, 300, 3)) + + def test_repr(self): + cfg = dict(type='RandomResizedCrop', scale=224) + transform = TRANSFORMS.build(cfg) + self.assertEqual( + repr(transform), 'RandomResizedCrop(scale=(224, 224), ' + 'crop_ratio_range=(0.08, 1.0), aspect_ratio_range=(0.75, 1.3333), ' + 'max_attempts=10, interpolation=bilinear, backend=cv2)') + + +class TestEfficientNetRandomCrop(TestCase): + + def test_assertion(self): + with self.assertRaises(AssertionError): + cfg = dict(type='EfficientNetRandomCrop', scale=(1, 1)) + TRANSFORMS.build(cfg) + + with self.assertRaises(AssertionError): + cfg = dict( + type='EfficientNetRandomCrop', scale=224, min_covered=-1) + TRANSFORMS.build(cfg) + + with self.assertRaises(AssertionError): + cfg = dict( + type='EfficientNetRandomCrop', scale=224, crop_padding=-1) + TRANSFORMS.build(cfg) + + def test_transform(self): + results = dict(img=np.random.randint(0, 256, (256, 256, 3), np.uint8)) + + # test random crop by default. + cfg = dict(type='EfficientNetRandomCrop', scale=224) + transform = TRANSFORMS.build(cfg) + results = transform(results) + self.assertTupleEqual(results['img'].shape, (224, 224, 3)) + + # test crop_ratio_range. + cfg = dict( + type='EfficientNetRandomCrop', + scale=224, + crop_ratio_range=(0.5, 0.8)) + transform = TRANSFORMS.build(cfg) + results = transform(results) + self.assertTupleEqual(results['img'].shape, (224, 224, 3)) + + # test aspect_ratio_range. + cfg = dict( + type='EfficientNetRandomCrop', + scale=224, + aspect_ratio_range=(0.5, 0.8)) + transform = TRANSFORMS.build(cfg) + results = transform(results) + self.assertTupleEqual(results['img'].shape, (224, 224, 3)) + + # test max_attempts. + cfg = dict(type='EfficientNetRandomCrop', scale=224, max_attempts=0) + transform = TRANSFORMS.build(cfg) + results = transform(results) + self.assertTupleEqual(results['img'].shape, (224, 224, 3)) + + # test min_covered. + cfg = dict(type='EfficientNetRandomCrop', scale=224, min_covered=.9) + transform = TRANSFORMS.build(cfg) + results = transform(results) + self.assertTupleEqual(results['img'].shape, (224, 224, 3)) + + # test crop_padding. + cfg = dict( + type='EfficientNetRandomCrop', + scale=224, + min_covered=0.9, + crop_padding=10) + transform = TRANSFORMS.build(cfg) + results = transform(results) + self.assertTupleEqual(results['img'].shape, (224, 224, 3)) + + # test large crop size. + results = dict(img=np.random.randint(0, 256, (256, 256, 3), np.uint8)) + cfg = dict(type='EfficientNetRandomCrop', scale=300) + transform = TRANSFORMS.build(cfg) + results = transform(results) + self.assertTupleEqual(results['img'].shape, (300, 300, 3)) + + def test_repr(self): + cfg = dict(type='EfficientNetRandomCrop', scale=224) + transform = TRANSFORMS.build(cfg) + self.assertEqual( + repr(transform), 'EfficientNetRandomCrop(scale=(224, 224), ' + 'crop_ratio_range=(0.08, 1.0), aspect_ratio_range=(0.75, 1.3333), ' + 'max_attempts=10, interpolation=bicubic, backend=cv2, ' + 'min_covered=0.1, crop_padding=32)') + + +class TestResizeEdge(TestCase): + + def test_transform(self): + results = dict(img=np.random.randint(0, 256, (128, 256, 3), np.uint8)) + + # test resize short edge by default. + cfg = dict(type='ResizeEdge', scale=224) + transform = TRANSFORMS.build(cfg) + results = transform(results) + self.assertTupleEqual(results['img'].shape, (224, 448, 3)) + + # test resize long edge. + cfg = dict(type='ResizeEdge', scale=224, edge='long') + transform = TRANSFORMS.build(cfg) + results = transform(results) + self.assertTupleEqual(results['img'].shape, (112, 224, 3)) + + # test resize width. + cfg = dict(type='ResizeEdge', scale=224, edge='width') + transform = TRANSFORMS.build(cfg) + results = transform(results) + self.assertTupleEqual(results['img'].shape, (112, 224, 3)) + + # test resize height. + cfg = dict(type='ResizeEdge', scale=224, edge='height') + transform = TRANSFORMS.build(cfg) + results = transform(results) + self.assertTupleEqual(results['img'].shape, (224, 448, 3)) + + # test invalid edge + with self.assertRaisesRegex(AssertionError, 'Invalid edge "hi"'): + cfg = dict(type='ResizeEdge', scale=224, edge='hi') + TRANSFORMS.build(cfg) + + def test_repr(self): + cfg = dict(type='ResizeEdge', scale=224, edge='height') + transform = TRANSFORMS.build(cfg) + self.assertEqual( + repr(transform), 'ResizeEdge(scale=224, edge=height, backend=cv2, ' + 'interpolation=bilinear)') + + +class TestEfficientNetCenterCrop(TestCase): + + def test_assertion(self): + with self.assertRaises(AssertionError): + cfg = dict(type='EfficientNetCenterCrop', crop_size=(1, 1)) + TRANSFORMS.build(cfg) + + with self.assertRaises(AssertionError): + cfg = dict(type='EfficientNetCenterCrop', crop_size=-1) + TRANSFORMS.build(cfg) + + with self.assertRaises(AssertionError): + cfg = dict( + type='EfficientNetCenterCrop', crop_size=224, crop_padding=-1) + TRANSFORMS.build(cfg) + + def test_transform(self): + results = dict(img=np.random.randint(0, 256, (256, 256, 3), np.uint8)) + + # test random crop by default. + cfg = dict(type='EfficientNetCenterCrop', crop_size=224) + transform = TRANSFORMS.build(cfg) + results = transform(results) + self.assertTupleEqual(results['img'].shape, (224, 224, 3)) + + # test crop_padding. + cfg = dict( + type='EfficientNetCenterCrop', crop_size=224, crop_padding=10) + transform = TRANSFORMS.build(cfg) + results = transform(results) + self.assertTupleEqual(results['img'].shape, (224, 224, 3)) + + # test large crop size. + results = dict(img=np.random.randint(0, 256, (256, 256, 3), np.uint8)) + cfg = dict(type='EfficientNetCenterCrop', crop_size=300) + transform = TRANSFORMS.build(cfg) + results = transform(results) + self.assertTupleEqual(results['img'].shape, (300, 300, 3)) + + def test_repr(self): + cfg = dict(type='EfficientNetCenterCrop', crop_size=224) + transform = TRANSFORMS.build(cfg) + self.assertEqual( + repr(transform), 'EfficientNetCenterCrop(crop_size=224, ' + 'crop_padding=32, interpolation=bicubic, backend=cv2)') + + +class TestRandomErasing(TestCase): + + def test_initialize(self): + # test erase_prob assertion + with self.assertRaises(AssertionError): + cfg = dict(type='RandomErasing', erase_prob=-1.) + TRANSFORMS.build(cfg) + with self.assertRaises(AssertionError): + cfg = dict(type='RandomErasing', erase_prob=1) + TRANSFORMS.build(cfg) + + # test area_ratio assertion + with self.assertRaises(AssertionError): + cfg = dict(type='RandomErasing', min_area_ratio=-1.) + TRANSFORMS.build(cfg) + with self.assertRaises(AssertionError): + cfg = dict(type='RandomErasing', max_area_ratio=1) + TRANSFORMS.build(cfg) + with self.assertRaises(AssertionError): + # min_area_ratio should be smaller than max_area_ratio + cfg = dict( + type='RandomErasing', min_area_ratio=0.6, max_area_ratio=0.4) + TRANSFORMS.build(cfg) + + # test aspect_range assertion + with self.assertRaises(AssertionError): + cfg = dict(type='RandomErasing', aspect_range='str') + TRANSFORMS.build(cfg) + with self.assertRaises(AssertionError): + cfg = dict(type='RandomErasing', aspect_range=-1) + TRANSFORMS.build(cfg) + with self.assertRaises(AssertionError): + # In aspect_range (min, max), min should be smaller than max. + cfg = dict(type='RandomErasing', aspect_range=[1.6, 0.6]) + TRANSFORMS.build(cfg) + + # test mode assertion + with self.assertRaises(AssertionError): + cfg = dict(type='RandomErasing', mode='unknown') + TRANSFORMS.build(cfg) + + # test fill_std assertion + with self.assertRaises(AssertionError): + cfg = dict(type='RandomErasing', fill_std='unknown') + TRANSFORMS.build(cfg) + + # test implicit conversion of aspect_range + cfg = dict(type='RandomErasing', aspect_range=0.5) + random_erasing = TRANSFORMS.build(cfg) + assert random_erasing.aspect_range == (0.5, 2.) + + cfg = dict(type='RandomErasing', aspect_range=2.) + random_erasing = TRANSFORMS.build(cfg) + assert random_erasing.aspect_range == (0.5, 2.) + + # test implicit conversion of fill_color + cfg = dict(type='RandomErasing', fill_color=15) + random_erasing = TRANSFORMS.build(cfg) + assert random_erasing.fill_color == [15, 15, 15] + + # test implicit conversion of fill_std + cfg = dict(type='RandomErasing', fill_std=0.5) + random_erasing = TRANSFORMS.build(cfg) + assert random_erasing.fill_std == [0.5, 0.5, 0.5] + + def test_transform(self): + # test when erase_prob=0. + results = construct_toy_data() + cfg = dict( + type='RandomErasing', + erase_prob=0., + mode='const', + fill_color=(255, 255, 255)) + random_erasing = TRANSFORMS.build(cfg) + results = random_erasing(results) + np.testing.assert_array_equal(results['img'], results['ori_img']) + + # test mode 'const' + results = construct_toy_data() + cfg = dict( + type='RandomErasing', + erase_prob=1., + mode='const', + fill_color=(255, 255, 255)) + with patch('numpy.random', np.random.RandomState(0)): + random_erasing = TRANSFORMS.build(cfg) + results = random_erasing(results) + expect_out = np.array( + [[1, 255, 3, 4], [5, 255, 7, 8], [9, 10, 11, 12]], + dtype=np.uint8) + expect_out = np.stack([expect_out] * 3, axis=-1) + np.testing.assert_array_equal(results['img'], expect_out) + + # test mode 'rand' with normal distribution + results = construct_toy_data() + cfg = dict(type='RandomErasing', erase_prob=1., mode='rand') + with patch('numpy.random', np.random.RandomState(0)): + random_erasing = TRANSFORMS.build(cfg) + results = random_erasing(results) + expect_out = results['ori_img'] + expect_out[:2, 1] = [[159, 98, 76], [14, 69, 122]] + np.testing.assert_array_equal(results['img'], expect_out) + + # test mode 'rand' with uniform distribution + results = construct_toy_data() + cfg = dict( + type='RandomErasing', + erase_prob=1., + mode='rand', + fill_std=(10, 255, 0)) + with patch('numpy.random', np.random.RandomState(0)): + random_erasing = TRANSFORMS.build(cfg) + results = random_erasing(results) + + expect_out = results['ori_img'] + expect_out[:2, 1] = [[113, 255, 128], [126, 83, 128]] + np.testing.assert_array_equal(results['img'], expect_out) + + def test_repr(self): + cfg = dict( + type='RandomErasing', + erase_prob=0.5, + mode='const', + aspect_range=(0.3, 1.3), + fill_color=(255, 255, 255)) + transform = TRANSFORMS.build(cfg) + self.assertEqual( + repr(transform), + 'RandomErasing(erase_prob=0.5, min_area_ratio=0.02, ' + 'max_area_ratio=0.4, aspect_range=(0.3, 1.3), mode=const, ' + 'fill_color=(255, 255, 255), fill_std=None)') + + +class TestColorJitter(TestCase): + + DEFAULT_ARGS = dict( + type='ColorJitter', + brightness=0.5, + contrast=0.5, + saturation=0.5, + hue=0.2) + + def test_initialize(self): + cfg = dict( + type='ColorJitter', + brightness=(0.8, 1.2), + contrast=[0.5, 1.5], + saturation=0., + hue=0.2) + transform = TRANSFORMS.build(cfg) + self.assertEqual(transform.brightness, (0.8, 1.2)) + self.assertEqual(transform.contrast, (0.5, 1.5)) + self.assertIsNone(transform.saturation) + self.assertEqual(transform.hue, (-0.2, 0.2)) + + with self.assertRaisesRegex(ValueError, 'If hue is a single number'): + cfg = {**self.DEFAULT_ARGS, 'hue': -0.2} + TRANSFORMS.build(cfg) + + with self.assertRaisesRegex(TypeError, 'hue should be a single'): + cfg = {**self.DEFAULT_ARGS, 'hue': [0.5, 0.4, 0.2]} + TRANSFORMS.build(cfg) + + logger = mmengine.MMLogger.get_current_instance() + with self.assertLogs(logger, 'WARN') as log: + cfg = {**self.DEFAULT_ARGS, 'hue': [-1, 0.4]} + transform = TRANSFORMS.build(cfg) + self.assertIn('ColorJitter hue values', log.output[0]) + self.assertEqual(transform.hue, (-0.5, 0.4)) + + def test_transform(self): + ori_img = np.random.randint(0, 256, (256, 256, 3), np.uint8) + results = dict(img=copy.deepcopy(ori_img)) + + # test transform + cfg = copy.deepcopy(self.DEFAULT_ARGS) + transform = TRANSFORMS.build(cfg) + results = transform(results) + self.assertEqual(results['img'].dtype, ori_img.dtype) + assert not np.equal(results['img'], ori_img).all() + + # test call with brightness, contrast and saturation are all 0 + results = dict(img=copy.deepcopy(ori_img)) + cfg = dict( + type='ColorJitter', brightness=0., contrast=0., saturation=0.) + transform = TRANSFORMS.build(cfg) + results = transform(results) + self.assertEqual(results['img'].dtype, ori_img.dtype) + assert np.equal(results['img'], ori_img).all() + + # test call index + cfg = {**self.DEFAULT_ARGS, 'contrast': 0.} + transform = TRANSFORMS.build(cfg) + with patch('numpy.random', np.random.RandomState(0)): + mmcv_module = 'mmpretrain.datasets.transforms.processing.mmcv' + call_list = [ + call.adjust_color(ANY, alpha=ANY, backend='pillow'), + call.adjust_hue(ANY, ANY, backend='pillow'), + call.adjust_brightness(ANY, ANY, backend='pillow'), + ] + with patch(mmcv_module, autospec=True) as mock: + transform(results) + self.assertEqual(mock.mock_calls, call_list) + + def test_repr(self): + cfg = copy.deepcopy(self.DEFAULT_ARGS) + transform = TRANSFORMS.build(cfg) + self.assertEqual( + repr(transform), 'ColorJitter(brightness=(0.5, 1.5), ' + 'contrast=(0.5, 1.5), saturation=(0.5, 1.5), hue=(-0.2, 0.2))') + + +class TestLighting(TestCase): + + def setUp(self): + EIGVAL = [0.2175, 0.0188, 0.0045] + EIGVEC = [ + [-0.5836, -0.6948, 0.4203], + [-0.5808, -0.0045, -0.814], + [-0.5675, 0.7192, 0.4009], + ] + self.DEFAULT_ARGS = dict( + type='Lighting', + eigval=EIGVAL, + eigvec=EIGVEC, + alphastd=25.5, + to_rgb=False) + + def test_assertion(self): + with self.assertRaises(AssertionError): + cfg = copy.deepcopy(self.DEFAULT_ARGS) + cfg['eigval'] = -1 + TRANSFORMS.build(cfg) + + with self.assertRaises(AssertionError): + cfg = copy.deepcopy(self.DEFAULT_ARGS) + cfg['eigvec'] = None + TRANSFORMS.build(cfg) + + with self.assertRaises(AssertionError): + cfg = copy.deepcopy(self.DEFAULT_ARGS) + cfg['alphastd'] = 'Lighting' + TRANSFORMS.build(cfg) + + with self.assertRaises(AssertionError): + cfg = copy.deepcopy(self.DEFAULT_ARGS) + cfg['eigvec'] = dict() + TRANSFORMS.build(cfg) + + with self.assertRaises(AssertionError): + cfg = copy.deepcopy(self.DEFAULT_ARGS) + cfg['eigvec'] = [ + [-0.5836, -0.6948, 0.4203], + [-0.5808, -0.0045, -0.814], + [-0.5675, 0.7192, 0.4009, 0.10], + ] + TRANSFORMS.build(cfg) + + def test_transform(self): + ori_img = np.ones((256, 256, 3), np.uint8) * 127 + results = dict(img=copy.deepcopy(ori_img)) + + # Test transform with non-img-keyword result + with self.assertRaises(AssertionError): + cfg = copy.deepcopy(self.DEFAULT_ARGS) + lightening_module = TRANSFORMS.build(cfg) + empty_results = dict() + lightening_module(empty_results) + + # test call + cfg = copy.deepcopy(self.DEFAULT_ARGS) + lightening_module = TRANSFORMS.build(cfg) + with patch('numpy.random', np.random.RandomState(0)): + results = lightening_module(results) + self.assertEqual(results['img'].dtype, ori_img.dtype) + assert not np.equal(results['img'], ori_img).all() + + # test call with alphastd == 0 + results = dict(img=copy.deepcopy(ori_img)) + cfg = copy.deepcopy(self.DEFAULT_ARGS) + cfg['alphastd'] = 0.0 + lightening_module = TRANSFORMS.build(cfg) + results = lightening_module(results) + self.assertEqual(results['img'].dtype, ori_img.dtype) + assert np.equal(results['img'], ori_img).all() + + def test_repr(self): + cfg = copy.deepcopy(self.DEFAULT_ARGS) + transform = TRANSFORMS.build(cfg) + self.assertEqual( + repr(transform), 'Lighting(eigval=[0.2175, 0.0188, 0.0045], eigvec' + '=[[-0.5836, -0.6948, 0.4203], [-0.5808, -0.0045, -0.814], [' + '-0.5675, 0.7192, 0.4009]], alphastd=25.5, to_rgb=False)') + + +class TestAlbumentations(TestCase): + DEFAULT_ARGS = dict( + type='Albumentations', transforms=[dict(type='ChannelShuffle', p=1)]) + + @pytest.mark.skipif( + albumentations is None, reason='No Albumentations module.') + def test_assertion(self): + # Test with non-list transforms + with self.assertRaises(AssertionError): + cfg = copy.deepcopy(self.DEFAULT_ARGS) + cfg['transforms'] = 1 + TRANSFORMS.build(cfg) + + # Test with non-dict transforms item. + with self.assertRaises(AssertionError): + cfg = copy.deepcopy(self.DEFAULT_ARGS) + cfg['transforms'] = [dict(p=1)] + TRANSFORMS.build(cfg) + + # Test with dict transforms item without keyword 'type'. + with self.assertRaises(AssertionError): + cfg = copy.deepcopy(self.DEFAULT_ARGS) + cfg['transforms'] = [[]] + TRANSFORMS.build(cfg) + + # Test with dict transforms item with wrong type. + with self.assertRaises(TypeError): + cfg = copy.deepcopy(self.DEFAULT_ARGS) + cfg['transforms'] = [dict(type=[])] + TRANSFORMS.build(cfg) + + # Test with dict transforms item with wrong type. + with self.assertRaises(AssertionError): + cfg = copy.deepcopy(self.DEFAULT_ARGS) + cfg['keymap'] = [] + TRANSFORMS.build(cfg) + + @pytest.mark.skipif( + albumentations is None, reason='No Albumentations module.') + def test_transform(self): + ori_img = np.random.randint(0, 256, (256, 256, 3), np.uint8) + results = dict(img=copy.deepcopy(ori_img)) + + # Test transform with non-img-keyword result + with self.assertRaises(AssertionError): + cfg = copy.deepcopy(self.DEFAULT_ARGS) + albu_module = TRANSFORMS.build(cfg) + empty_results = dict() + albu_module(empty_results) + + # Test normal case + results = dict(img=copy.deepcopy(ori_img)) + cfg = copy.deepcopy(self.DEFAULT_ARGS) + albu_module = TRANSFORMS.build(cfg) + ablu_result = albu_module(results) + + # Test using 'Albu' + results = dict(img=copy.deepcopy(ori_img)) + cfg = copy.deepcopy(self.DEFAULT_ARGS) + cfg['type'] = 'Albu' + albu_module = TRANSFORMS.build(cfg) + ablu_result = albu_module(results) + + # Test with keymap + results = dict(img=copy.deepcopy(ori_img)) + cfg = copy.deepcopy(self.DEFAULT_ARGS) + cfg['keymap'] = dict(img='image') + albu_module = TRANSFORMS.build(cfg) + ablu_result = albu_module(results) + + # Test with nested transform + results = dict(img=copy.deepcopy(ori_img)) + cfg = copy.deepcopy(self.DEFAULT_ARGS) + nested_transform_cfg = [ + dict( + type='ShiftScaleRotate', + shift_limit=0.0625, + scale_limit=0.0, + rotate_limit=0, + interpolation=1, + p=0.5), + dict( + type='RandomBrightnessContrast', + brightness_limit=[0.1, 0.3], + contrast_limit=[0.1, 0.3], + p=0.2), + dict(type='ChannelShuffle', p=0.1), + dict( + type='OneOf', + transforms=[ + dict(type='Blur', blur_limit=3, p=1.0), + dict(type='MedianBlur', blur_limit=3, p=1.0) + ], + p=0.1), + ] + cfg['transforms'] = nested_transform_cfg + mmpretrain_module = TRANSFORMS.build(cfg) + mmpretrain_module(results) + + # test to be same with albumentations 3rd package + np.random.seed(0) + random.seed(0) + import albumentations as A + ablu_transform_3rd = A.Compose([ + A.RandomCrop(width=256, height=256), + A.HorizontalFlip(p=0.5), + A.RandomBrightnessContrast(p=0.2), + ]) + transformed_image_3rd = ablu_transform_3rd( + image=copy.deepcopy(ori_img))['image'] + + np.random.seed(0) + random.seed(0) + results = dict(img=copy.deepcopy(ori_img)) + cfg = copy.deepcopy(self.DEFAULT_ARGS) + cfg['transforms'] = [ + dict(type='RandomCrop', width=256, height=256), + dict(type='HorizontalFlip', p=0.5), + dict(type='RandomBrightnessContrast', p=0.2) + ] + mmpretrain_module = TRANSFORMS.build(cfg) + transformed_image_mmpretrain = mmpretrain_module(results)['img'] + assert np.equal(transformed_image_3rd, + transformed_image_mmpretrain).all() + + # Test class obj case + results = dict(img=np.random.randint(0, 256, (200, 300, 3), np.uint8)) + cfg = copy.deepcopy(self.DEFAULT_ARGS) + cfg['transforms'] = [ + dict(type=albumentations.SmallestMaxSize, max_size=400, p=1) + ] + albu_module = TRANSFORMS.build(cfg) + ablu_result = albu_module(results) + assert 'img' in ablu_result + assert min(ablu_result['img'].shape[:2]) == 400 + assert ablu_result['img_shape'] == (400, 600) + + @pytest.mark.skipif( + albumentations is None, reason='No Albumentations module.') + def test_repr(self): + cfg = copy.deepcopy(self.DEFAULT_ARGS) + transform = TRANSFORMS.build(cfg) + self.assertEqual( + repr(transform), "Albumentations(transforms=[{'type': " + "'ChannelShuffle', 'p': 1}])") + + +class TestSimMIMMaskGenerator(TestCase): + DEFAULT_ARGS = dict( + type='SimMIMMaskGenerator', + input_size=192, + mask_patch_size=32, + model_patch_size=4, + mask_ratio=0.6) + + def test_transform(self): + img = np.random.randint(0, 256, (3, 192, 192), np.uint8) + results = {'img': img} + module = TRANSFORMS.build(self.DEFAULT_ARGS) + + results = module(results) + + self.assertTupleEqual(results['img'].shape, (3, 192, 192)) + self.assertTupleEqual(results['mask'].shape, (48, 48)) + + def test_repr(self): + cfg = copy.deepcopy(self.DEFAULT_ARGS) + transform = TRANSFORMS.build(cfg) + self.assertEqual( + repr(transform), 'SimMIMMaskGenerator(input_size=192, ' + 'mask_patch_size=32, model_patch_size=4, mask_ratio=0.6)') + + +class TestBEiTMaskGenerator(TestCase): + DEFAULT_ARGS = dict( + type='BEiTMaskGenerator', + input_size=(14, 14), + num_masking_patches=75, + max_num_patches=None, + min_num_patches=16) + + def test_transform(self): + module = TRANSFORMS.build(self.DEFAULT_ARGS) + + results = module({}) + + self.assertTupleEqual(results['mask'].shape, (14, 14)) + + def test_repr(self): + cfg = copy.deepcopy(self.DEFAULT_ARGS) + transform = TRANSFORMS.build(cfg) + + log_aspect_ratio = (math.log(0.3), math.log(1 / 0.3)) + self.assertEqual( + repr(transform), 'BEiTMaskGenerator(height=14, width=14, ' + 'num_patches=196, num_masking_patches=75, min_num_patches=16, ' + f'max_num_patches=75, log_aspect_ratio={log_aspect_ratio})') + + +class TestVisionTransformWrapper(TestCase): + + def test_register(self): + for t in VISION_TRANSFORMS: + self.assertIn('torchvision/', t) + self.assertIn(t, TRANSFORMS) + + def test_transform(self): + img_path = osp.join(osp.dirname(__file__), '../../data/color.jpg') + data = {'img': Image.open(img_path)} + + # test normal transform + vision_trans = transforms.RandomResizedCrop(224) + vision_transformed_img = vision_trans(data['img']) + mmcls_trans = TRANSFORMS.build( + dict(type='torchvision/RandomResizedCrop', size=224)) + mmcls_transformed_img = mmcls_trans(data)['img'] + np.equal( + np.array(vision_transformed_img), np.array(mmcls_transformed_img)) + + # test convert type dtype + data = {'img': torch.randn(3, 224, 224)} + vision_trans = transforms.ConvertImageDtype(torch.float) + vision_transformed_img = vision_trans(data['img']) + mmcls_trans = TRANSFORMS.build( + dict(type='torchvision/ConvertImageDtype', dtype='float')) + mmcls_transformed_img = mmcls_trans(data)['img'] + np.equal( + np.array(vision_transformed_img), np.array(mmcls_transformed_img)) + + # test transform with interpolation + data = {'img': Image.open(img_path)} + if digit_version(torchvision.__version__) > digit_version('0.8.0'): + from torchvision.transforms import InterpolationMode + interpolation_t = InterpolationMode.NEAREST + else: + interpolation_t = Image.NEAREST + vision_trans = transforms.Resize(224, interpolation_t) + vision_transformed_img = vision_trans(data['img']) + mmcls_trans = TRANSFORMS.build( + dict(type='torchvision/Resize', size=224, interpolation='nearest')) + mmcls_transformed_img = mmcls_trans(data)['img'] + np.equal( + np.array(vision_transformed_img), np.array(mmcls_transformed_img)) + + # test compose transforms + data = {'img': Image.open(img_path)} + vision_trans = transforms.Compose([ + transforms.Resize(176), + transforms.RandomHorizontalFlip(), + transforms.PILToTensor(), + transforms.ConvertImageDtype(torch.float), + transforms.Normalize( + mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + ]) + vision_transformed_img = vision_trans(data['img']) + + pipeline_cfg = [ + dict(type='LoadImageFromFile'), + dict(type='NumpyToPIL', to_rgb=True), + dict(type='torchvision/Resize', size=176), + dict(type='torchvision/RandomHorizontalFlip'), + dict(type='torchvision/PILToTensor'), + dict(type='torchvision/ConvertImageDtype', dtype='float'), + dict( + type='torchvision/Normalize', + mean=(0.485, 0.456, 0.406), + std=(0.229, 0.224, 0.225), + ) + ] + pipeline = [TRANSFORMS.build(t) for t in pipeline_cfg] + mmcls_trans = Compose(transforms=pipeline) + mmcls_data = {'img_path': img_path} + mmcls_transformed_img = mmcls_trans(mmcls_data)['img'] + np.equal( + np.array(vision_transformed_img), np.array(mmcls_transformed_img)) + + def test_repr(self): + vision_trans = transforms.RandomResizedCrop(224) + mmcls_trans = TRANSFORMS.build( + dict(type='torchvision/RandomResizedCrop', size=224)) + + self.assertEqual(f'TorchVision{repr(vision_trans)}', repr(mmcls_trans)) diff --git a/tests/test_datasets/test_transforms/test_wrappers.py b/tests/test_datasets/test_transforms/test_wrappers.py new file mode 100644 index 0000000..fc487ed --- /dev/null +++ b/tests/test_datasets/test_transforms/test_wrappers.py @@ -0,0 +1,43 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +from mmcv.transforms import Resize + +from mmpretrain.datasets import GaussianBlur, MultiView, Solarize + + +def test_multi_view(): + original_img = np.ones((4, 4, 3), dtype=np.uint8) + + # test 1 pipeline with 2 views + pipeline1 = [ + Resize(2), + GaussianBlur(magnitude_range=(0.1, 2), magnitude_std='inf') + ] + + transform = MultiView([pipeline1], 2) + results = dict(img=original_img) + results = transform(results) + assert len(results['img']) == 2 + assert results['img'][0].shape == (2, 2, 3) + + transform = MultiView([pipeline1], [2]) + results = dict(img=original_img) + results = transform(results) + assert len(results['img']) == 2 + assert results['img'][0].shape == (2, 2, 3) + + # test 2 pipeline with 3 views + pipeline2 = [ + Solarize(thr=128), + GaussianBlur(magnitude_range=(0.1, 2), magnitude_std='inf') + ] + transform = MultiView([pipeline1, pipeline2], [1, 2]) + + results = dict(img=original_img) + results = transform(results) + assert len(results['img']) == 3 + assert results['img'][0].shape == (2, 2, 3) + assert results['img'][1].shape == (4, 4, 3) + + # test repr + assert isinstance(str(transform), str) diff --git a/tests/test_engine/test_hooks/test_arcface_hooks.py b/tests/test_engine/test_hooks/test_arcface_hooks.py new file mode 100644 index 0000000..6f2831f --- /dev/null +++ b/tests/test_engine/test_hooks/test_arcface_hooks.py @@ -0,0 +1,102 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import tempfile +from unittest import TestCase + +import numpy as np +import torch +from mmengine.runner import Runner +from torch.utils.data import DataLoader, Dataset + + +class ExampleDataset(Dataset): + + def __init__(self): + self.index = 0 + self.metainfo = None + + def __getitem__(self, idx): + results = dict(imgs=torch.rand((224, 224, 3)).float(), ) + return results + + def get_gt_labels(self): + gt_labels = np.array([0, 1, 2, 4, 0, 4, 1, 2, 2, 1]) + return gt_labels + + def __len__(self): + return 10 + + +class TestSetAdaptiveMarginsHook(TestCase): + DEFAULT_HOOK_CFG = dict(type='SetAdaptiveMarginsHook') + DEFAULT_MODEL = dict( + type='ImageClassifier', + backbone=dict( + type='ResNet', + depth=34, + num_stages=4, + out_indices=(3, ), + style='pytorch'), + neck=dict(type='GlobalAveragePooling'), + head=dict(type='ArcFaceClsHead', in_channels=512, num_classes=5)) + + def test_before_train(self): + default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=None, + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', interval=1), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='VisualizationHook', enable=False), + ) + tmpdir = tempfile.TemporaryDirectory() + loader = DataLoader(ExampleDataset(), batch_size=2) + self.runner = Runner( + model=self.DEFAULT_MODEL, + work_dir=tmpdir.name, + train_dataloader=loader, + train_cfg=dict(by_epoch=True, max_epochs=1), + log_level='WARNING', + optim_wrapper=dict( + optimizer=dict(type='SGD', lr=0.1, momentum=0.9)), + param_scheduler=dict( + type='MultiStepLR', milestones=[1, 2], gamma=0.1), + default_scope='mmpretrain', + default_hooks=default_hooks, + experiment_name='test_construct_with_arcface', + custom_hooks=[self.DEFAULT_HOOK_CFG]) + + default_margins = torch.tensor([0.5] * 5) + torch.allclose(self.runner.model.head.margins.cpu(), default_margins) + self.runner.call_hook('before_train') + # counts = [2 ,3 , 3, 0, 2] -> [2 ,3 , 3, 1, 2] at least occur once + # feqercy**-0.25 = [0.84089642, 0.75983569, 0.75983569, 1., 0.84089642] + # normized = [0.33752196, 0. , 0. , 1. , 0.33752196] + # margins = [0.20188488, 0.05, 0.05, 0.5, 0.20188488] + expert_margins = torch.tensor( + [0.20188488, 0.05, 0.05, 0.5, 0.20188488]) + torch.allclose(self.runner.model.head.margins.cpu(), expert_margins) + + model_cfg = {**self.DEFAULT_MODEL} + model_cfg['head'] = dict( + type='LinearClsHead', + num_classes=1000, + in_channels=512, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + topk=(1, 5), + ) + self.runner = Runner( + model=model_cfg, + work_dir=tmpdir.name, + train_dataloader=loader, + train_cfg=dict(by_epoch=True, max_epochs=1), + log_level='WARNING', + optim_wrapper=dict( + optimizer=dict(type='SGD', lr=0.1, momentum=0.9)), + param_scheduler=dict( + type='MultiStepLR', milestones=[1, 2], gamma=0.1), + default_scope='mmpretrain', + default_hooks=default_hooks, + experiment_name='test_construct_wo_arcface', + custom_hooks=[self.DEFAULT_HOOK_CFG]) + with self.assertRaises(ValueError): + self.runner.call_hook('before_train') diff --git a/tests/test_engine/test_hooks/test_class_num_check_hook.py b/tests/test_engine/test_hooks/test_class_num_check_hook.py new file mode 100644 index 0000000..5663c60 --- /dev/null +++ b/tests/test_engine/test_hooks/test_class_num_check_hook.py @@ -0,0 +1,52 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase +from unittest.mock import MagicMock, patch + +from mmpretrain.engine import ClassNumCheckHook + + +class TestClassNumCheckHook(TestCase): + + def setUp(self): + self.runner = MagicMock() + self.dataset = MagicMock() + self.hook = ClassNumCheckHook() + + def test_check_head(self): + # check sequence of string + with self.assertRaises(AssertionError): + self.hook._check_head(self.runner, self.dataset) + + # check no CLASSES + with patch.object(self.runner.logger, 'warning') as mock: + self.dataset.CLASSES = None + self.hook._check_head(self.runner, self.dataset) + mock.assert_called_once() + + # check no modules + self.dataset.CLASSES = ['str'] * 10 + self.hook._check_head(self.runner, self.dataset) + + # check number of classes not match + self.dataset.CLASSES = ['str'] * 10 + module1 = MagicMock(spec_set=True) + module2 = MagicMock(num_classes=5) + self.runner.model.named_modules.return_value = iter([(None, module1), + (None, module2)]) + with self.assertRaises(AssertionError): + self.hook._check_head(self.runner, self.dataset) + + def test_before_train(self): + with patch.object(self.hook, '_check_head') as mock: + self.hook.before_train(self.runner) + mock.assert_called_once() + + def test_before_val(self): + with patch.object(self.hook, '_check_head') as mock: + self.hook.before_val(self.runner) + mock.assert_called_once() + + def test_before_test(self): + with patch.object(self.hook, '_check_head') as mock: + self.hook.before_test(self.runner) + mock.assert_called_once() diff --git a/tests/test_engine/test_hooks/test_densecl_hook.py b/tests/test_engine/test_hooks/test_densecl_hook.py new file mode 100644 index 0000000..645d102 --- /dev/null +++ b/tests/test_engine/test_hooks/test_densecl_hook.py @@ -0,0 +1,113 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging +import tempfile +from unittest import TestCase + +import torch +import torch.nn as nn +from mmengine.device import get_device +from mmengine.logging import MMLogger +from mmengine.model import BaseModule +from mmengine.optim import OptimWrapper +from mmengine.runner import Runner +from mmengine.structures import LabelData +from torch.utils.data import Dataset + +from mmpretrain.engine import DenseCLHook +from mmpretrain.models.selfsup import BaseSelfSupervisor +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample +from mmpretrain.utils import get_ori_model + + +class DummyDataset(Dataset): + METAINFO = dict() # type: ignore + data = torch.randn(12, 2) + label = torch.ones(12) + + @property + def metainfo(self): + return self.METAINFO + + def __len__(self): + return self.data.size(0) + + def __getitem__(self, index): + data_sample = DataSample() + gt_label = LabelData(value=self.label[index]) + setattr(data_sample, 'gt_label', gt_label) + return dict(inputs=[self.data[index]], data_samples=data_sample) + + +@MODELS.register_module() +class DenseCLDummyLayer(BaseModule): + + def __init__(self, init_cfg=None): + super().__init__(init_cfg) + self.linear = nn.Linear(2, 1) + + def forward(self, x): + return self.linear(x) + + +class ToyModel(BaseSelfSupervisor): + + def __init__(self): + super().__init__(backbone=dict(type='DenseCLDummyLayer')) + self.loss_lambda = 0.5 + + def loss(self, inputs, data_samples): + labels = [] + for x in data_samples: + labels.append(x.gt_label.value) + labels = torch.stack(labels) + outputs = self.backbone(inputs[0]) + loss = (labels - outputs).sum() + outputs = dict(loss=loss) + return outputs + + +class TestDenseCLHook(TestCase): + + def setUp(self): + self.temp_dir = tempfile.TemporaryDirectory() + + def tearDown(self): + # `FileHandler` should be closed in Windows, otherwise we cannot + # delete the temporary directory + logging.shutdown() + MMLogger._instance_dict.clear() + self.temp_dir.cleanup() + + def test_densecl_hook(self): + device = get_device() + dummy_dataset = DummyDataset() + toy_model = ToyModel().to(device) + densecl_hook = DenseCLHook(start_iters=1) + + # test DenseCLHook with model wrapper + runner = Runner( + model=toy_model, + work_dir=self.temp_dir.name, + train_dataloader=dict( + dataset=dummy_dataset, + sampler=dict(type='DefaultSampler', shuffle=True), + collate_fn=dict(type='default_collate'), + batch_size=1, + num_workers=0), + optim_wrapper=OptimWrapper( + torch.optim.Adam(toy_model.parameters())), + param_scheduler=dict(type='MultiStepLR', milestones=[1]), + train_cfg=dict(by_epoch=True, max_epochs=2), + custom_hooks=[densecl_hook], + default_hooks=dict(logger=None), + log_processor=dict(window_size=1), + experiment_name='test_densecl_hook', + default_scope='mmpretrain') + + runner.train() + + if runner.iter >= 1: + assert get_ori_model(runner.model).loss_lambda == 0.5 + else: + assert get_ori_model(runner.model).loss_lambda == 0. diff --git a/tests/test_engine/test_hooks/test_ema_hook.py b/tests/test_engine/test_hooks/test_ema_hook.py new file mode 100644 index 0000000..0520725 --- /dev/null +++ b/tests/test_engine/test_hooks/test_ema_hook.py @@ -0,0 +1,224 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging +import os.path as osp +import tempfile +from collections import OrderedDict +from unittest import TestCase +from unittest.mock import ANY, MagicMock, call + +import torch +import torch.nn as nn +from mmengine.device import get_device +from mmengine.evaluator import Evaluator +from mmengine.logging import MMLogger +from mmengine.model import BaseModel +from mmengine.optim import OptimWrapper +from mmengine.runner import Runner +from mmengine.testing import assert_allclose +from torch.utils.data import Dataset + +from mmpretrain.engine import EMAHook + + +class SimpleModel(BaseModel): + + def __init__(self): + super().__init__() + self.para = nn.Parameter(torch.zeros(1)) + + def forward(self, *args, mode='tensor', **kwargs): + if mode == 'predict': + return self.para.clone() + elif mode == 'loss': + return {'loss': self.para.mean()} + + +class DummyDataset(Dataset): + METAINFO = dict() # type: ignore + data = torch.randn(6, 2) + label = torch.ones(6) + + @property + def metainfo(self): + return self.METAINFO + + def __len__(self): + return self.data.size(0) + + def __getitem__(self, index): + return dict(inputs=self.data[index], data_sample=self.label[index]) + + +class TestEMAHook(TestCase): + + def setUp(self): + self.temp_dir = tempfile.TemporaryDirectory() + state_dict = OrderedDict( + meta=dict(epoch=1, iter=2), + # The actual ema para + state_dict={'para': torch.tensor([1.])}, + # The actual original para + ema_state_dict={'module.para': torch.tensor([2.])}, + ) + self.ckpt = osp.join(self.temp_dir.name, 'ema.pth') + torch.save(state_dict, self.ckpt) + + def tearDown(self): + # `FileHandler` should be closed in Windows, otherwise we cannot + # delete the temporary directory + logging.shutdown() + MMLogger._instance_dict.clear() + self.temp_dir.cleanup() + + def test_load_state_dict(self): + device = get_device() + model = SimpleModel().to(device) + ema_hook = EMAHook() + runner = Runner( + model=model, + train_dataloader=dict( + dataset=DummyDataset(), + sampler=dict(type='DefaultSampler', shuffle=False), + batch_size=3, + num_workers=0), + optim_wrapper=OptimWrapper( + optimizer=torch.optim.Adam(model.parameters(), lr=0.)), + train_cfg=dict(by_epoch=True, max_epochs=2), + work_dir=self.temp_dir.name, + resume=False, + load_from=self.ckpt, + default_hooks=dict(logger=None), + custom_hooks=[ema_hook], + default_scope='mmpretrain', + experiment_name='load_state_dict') + runner.train() + assert_allclose(runner.model.para, torch.tensor([1.], device=device)) + + def test_evaluate_on_ema(self): + + device = get_device() + model = SimpleModel().to(device) + + # Test validate on ema model + evaluator = Evaluator([MagicMock()]) + runner = Runner( + model=model, + val_dataloader=dict( + dataset=DummyDataset(), + sampler=dict(type='DefaultSampler', shuffle=False), + batch_size=3, + num_workers=0), + val_evaluator=evaluator, + val_cfg=dict(), + work_dir=self.temp_dir.name, + load_from=self.ckpt, + default_hooks=dict(logger=None), + custom_hooks=[dict(type='EMAHook')], + default_scope='mmpretrain', + experiment_name='validate_on_ema') + runner.val() + evaluator.metrics[0].process.assert_has_calls([ + call(ANY, [torch.tensor([1.]).to(device)]), + ]) + self.assertNotIn( + call(ANY, [torch.tensor([2.]).to(device)]), + evaluator.metrics[0].process.mock_calls) + + # Test test on ema model + evaluator = Evaluator([MagicMock()]) + runner = Runner( + model=model, + test_dataloader=dict( + dataset=DummyDataset(), + sampler=dict(type='DefaultSampler', shuffle=False), + batch_size=3, + num_workers=0), + test_evaluator=evaluator, + test_cfg=dict(), + work_dir=self.temp_dir.name, + load_from=self.ckpt, + default_hooks=dict(logger=None), + custom_hooks=[dict(type='EMAHook')], + default_scope='mmpretrain', + experiment_name='test_on_ema') + runner.test() + evaluator.metrics[0].process.assert_has_calls([ + call(ANY, [torch.tensor([1.]).to(device)]), + ]) + self.assertNotIn( + call(ANY, [torch.tensor([2.]).to(device)]), + evaluator.metrics[0].process.mock_calls) + + # Test validate on both models + evaluator = Evaluator([MagicMock()]) + runner = Runner( + model=model, + val_dataloader=dict( + dataset=DummyDataset(), + sampler=dict(type='DefaultSampler', shuffle=True), + batch_size=3, + num_workers=0), + val_evaluator=evaluator, + val_cfg=dict(), + work_dir=self.temp_dir.name, + load_from=self.ckpt, + default_hooks=dict(logger=None), + custom_hooks=[dict(type='EMAHook', evaluate_on_origin=True)], + default_scope='mmpretrain', + experiment_name='validate_on_ema_false', + ) + runner.val() + evaluator.metrics[0].process.assert_has_calls([ + call(ANY, [torch.tensor([1.]).to(device)]), + call(ANY, [torch.tensor([2.]).to(device)]), + ]) + + # Test test on both models + evaluator = Evaluator([MagicMock()]) + runner = Runner( + model=model, + test_dataloader=dict( + dataset=DummyDataset(), + sampler=dict(type='DefaultSampler', shuffle=True), + batch_size=3, + num_workers=0), + test_evaluator=evaluator, + test_cfg=dict(), + work_dir=self.temp_dir.name, + load_from=self.ckpt, + default_hooks=dict(logger=None), + custom_hooks=[dict(type='EMAHook', evaluate_on_origin=True)], + default_scope='mmpretrain', + experiment_name='test_on_ema_false', + ) + runner.test() + evaluator.metrics[0].process.assert_has_calls([ + call(ANY, [torch.tensor([1.]).to(device)]), + call(ANY, [torch.tensor([2.]).to(device)]), + ]) + + # Test evaluate_on_ema=False + evaluator = Evaluator([MagicMock()]) + with self.assertWarnsRegex(UserWarning, 'evaluate_on_origin'): + runner = Runner( + model=model, + test_dataloader=dict( + dataset=DummyDataset(), + sampler=dict(type='DefaultSampler', shuffle=False), + batch_size=3, + num_workers=0), + test_evaluator=evaluator, + test_cfg=dict(), + work_dir=self.temp_dir.name, + load_from=self.ckpt, + default_hooks=dict(logger=None), + custom_hooks=[dict(type='EMAHook', evaluate_on_ema=False)], + default_scope='mmpretrain', + experiment_name='not_test_on_ema') + runner.test() + evaluator.metrics[0].process.assert_has_calls([ + call(ANY, [torch.tensor([2.]).to(device)]), + ]) + self.assertNotIn( + call(ANY, [torch.tensor([1.]).to(device)]), + evaluator.metrics[0].process.mock_calls) diff --git a/tests/test_engine/test_hooks/test_precise_bn_hook.py b/tests/test_engine/test_hooks/test_precise_bn_hook.py new file mode 100644 index 0000000..f549b0d --- /dev/null +++ b/tests/test_engine/test_hooks/test_precise_bn_hook.py @@ -0,0 +1,232 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import logging +import tempfile +from unittest import TestCase +from unittest.mock import MagicMock, patch + +import pytest +import torch +import torch.nn as nn +from mmengine.logging import MMLogger +from mmengine.model import BaseModel +from mmengine.runner import Runner +from torch.utils.data import DataLoader, Dataset + +from mmpretrain.models.utils import ClsDataPreprocessor +from mmpretrain.registry import HOOKS +from mmpretrain.structures import DataSample + + +class ExampleDataset(Dataset): + + def __init__(self): + self.index = 0 + self.metainfo = None + + def __getitem__(self, idx): + results = dict(imgs=torch.tensor([1.0], dtype=torch.float32)) + return results + + def __len__(self): + return 10 + + +class MockDataPreprocessor(ClsDataPreprocessor): + """mock preprocessor that do nothing.""" + + def forward(self, data, training=False): + + return dict(inputs=data['imgs'], data_samples=DataSample()) + + +class ExampleModel(BaseModel): + + def __init__(self): + super(ExampleModel, self).__init__() + self.data_preprocessor = MockDataPreprocessor() + self.conv = nn.Linear(1, 1) + self.bn = nn.BatchNorm1d(1) + self.test_cfg = None + + def forward(self, inputs, data_samples, mode='tensor'): + inputs = inputs.to(next(self.parameters()).device) + return self.bn(self.conv(inputs)) + + def train_step(self, data, optim_wrapper): + outputs = {'loss': 0.5, 'num_samples': 1} + return outputs + + +class SingleBNModel(ExampleModel): + + def __init__(self): + super().__init__() + self.bn = nn.BatchNorm1d(1) + self.test_cfg = None + + def forward(self, inputs, data_samples, mode='tensor'): + return self.bn(inputs) + + +class GNExampleModel(ExampleModel): + + def __init__(self): + super().__init__() + self.gn = nn.GroupNorm(1, 1) + self.test_cfg = None + + +class NoBNExampleModel(ExampleModel): + + def __init__(self): + super().__init__() + self.conv = nn.Linear(1, 1) + delattr(self, 'bn') + self.test_cfg = None + + def forward(self, inputs, data_samples, mode='tensor'): + return self.conv(inputs) + + +class TestPreciseBNHookHook(TestCase): + DEFAULT_ARGS = dict(type='PreciseBNHook', num_samples=4, interval=1) + count = 0 + + def setUp(self) -> None: + # optimizer + self.optim_wrapper = dict( + optimizer=dict( + type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001)) + # learning policy + self.epoch_param_scheduler = dict( + type='MultiStepLR', by_epoch=True, milestones=[1, 2], gamma=0.1) + self.iter_param_scheduler = dict( + type='MultiStepLR', by_epoch=False, milestones=[1, 2], gamma=0.1) + + self.default_hooks = dict( + timer=dict(type='IterTimerHook'), + logger=None, + param_scheduler=dict(type='ParamSchedulerHook'), + checkpoint=dict(type='CheckpointHook', interval=1), + sampler_seed=dict(type='DistSamplerSeedHook'), + visualization=dict(type='VisualizationHook', enable=False), + ) + self.epoch_train_cfg = dict(by_epoch=True, max_epochs=1) + self.iter_train_cfg = dict(by_epoch=False, max_iters=5) + self.tmpdir = tempfile.TemporaryDirectory() + self.preciseBN_cfg = copy.deepcopy(self.DEFAULT_ARGS) + + test_dataset = ExampleDataset() + self.loader = DataLoader(test_dataset, batch_size=2) + self.model = ExampleModel() + + def test_construct(self): + self.runner = Runner( + model=self.model, + work_dir=self.tmpdir.name, + train_dataloader=self.loader, + train_cfg=self.epoch_train_cfg, + log_level='WARNING', + optim_wrapper=self.optim_wrapper, + param_scheduler=self.epoch_param_scheduler, + default_scope='mmpretrain', + default_hooks=self.default_hooks, + experiment_name='test_construct', + custom_hooks=None) + + cfg = copy.deepcopy(self.DEFAULT_ARGS) + precise_bn = HOOKS.build(cfg) + self.assertEqual(precise_bn.num_samples, 4) + self.assertEqual(precise_bn.interval, 1) + + with pytest.raises(AssertionError): + # num_samples must be larger than 0 + cfg = copy.deepcopy(self.DEFAULT_ARGS) + cfg['num_samples'] = -1 + HOOKS.build(cfg) + + with pytest.raises(AssertionError): + # interval must be larger than 0 + cfg = copy.deepcopy(self.DEFAULT_ARGS) + cfg['interval'] = 0 + HOOKS.build(cfg) + + @patch('mmengine.dist.get_dist_info', MagicMock(return_value=(1, 2))) + @patch('torch.distributed.all_reduce', MagicMock()) + def test_after_train_epoch_multi_machines(self): + # Test with normal conv model in single machine + self.preciseBN_cfg['priority'] = 'ABOVE_NORMAL' + self.runner = Runner( + model=self.model, + work_dir=self.tmpdir.name, + train_dataloader=self.loader, + train_cfg=self.epoch_train_cfg, + log_level='WARNING', + optim_wrapper=self.optim_wrapper, + param_scheduler=self.epoch_param_scheduler, + default_scope='mmpretrain', + default_hooks=self.default_hooks, + experiment_name='test_after_train_epoch_multi_machines', + custom_hooks=[self.preciseBN_cfg]) + self.runner.train() + + def test_after_train_epoch(self): + self.preciseBN_cfg['priority'] = 'ABOVE_NORMAL' + self.runner = Runner( + model=self.model, + work_dir=self.tmpdir.name, + train_dataloader=self.loader, + train_cfg=self.epoch_train_cfg, + log_level='WARNING', + optim_wrapper=self.optim_wrapper, + param_scheduler=self.epoch_param_scheduler, + default_scope='mmpretrain', + default_hooks=self.default_hooks, + experiment_name='test_after_train_epoch', + custom_hooks=[self.preciseBN_cfg]) + + # Test with normal conv model in single machine + self.runner._train_loop = self.epoch_train_cfg + self.runner.train() + + # Test with only BN model + self.runner.model = SingleBNModel() + self.runner._train_loop = self.epoch_train_cfg + self.runner.train() + + # Test with GN model + self.runner.model = GNExampleModel() + self.runner._train_loop = self.epoch_train_cfg + self.runner.train() + + # Test with no BN model + self.runner.model = NoBNExampleModel() + self.runner._train_loop = self.epoch_train_cfg + self.runner.train() + + def test_after_train_iter(self): + # test precise bn hook in iter base loop + self.preciseBN_cfg['priority'] = 'ABOVE_NORMAL' + test_dataset = ExampleDataset() + self.loader = DataLoader(test_dataset, batch_size=2) + self.runner = Runner( + model=self.model, + work_dir=self.tmpdir.name, + train_dataloader=self.loader, + train_cfg=self.iter_train_cfg, + log_level='WARNING', + optim_wrapper=self.optim_wrapper, + param_scheduler=self.iter_param_scheduler, + default_scope='mmpretrain', + default_hooks=self.default_hooks, + experiment_name='test_after_train_iter', + custom_hooks=[self.preciseBN_cfg]) + self.runner.train() + + def tearDown(self) -> None: + # `FileHandler` should be closed in Windows, otherwise we cannot + # delete the temporary directory. + logging.shutdown() + MMLogger._instance_dict.clear() + self.tmpdir.cleanup() diff --git a/tests/test_engine/test_hooks/test_retrievers_hooks.py b/tests/test_engine/test_hooks/test_retrievers_hooks.py new file mode 100644 index 0000000..c14e70c --- /dev/null +++ b/tests/test_engine/test_hooks/test_retrievers_hooks.py @@ -0,0 +1,34 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase +from unittest.mock import MagicMock + +import torch + +from mmpretrain.engine import PrepareProtoBeforeValLoopHook +from mmpretrain.models.retrievers import BaseRetriever + + +class ToyRetriever(BaseRetriever): + + def forward(self, inputs, data_samples=None, mode: str = 'loss'): + self.prototype_inited is False + + def prepare_prototype(self): + """Preprocessing the prototype before predict.""" + self.prototype_vecs = torch.tensor([0]) + self.prototype_inited = True + + +class TestPrepareProtBeforeValLoopHook(TestCase): + + def setUp(self): + self.hook = PrepareProtoBeforeValLoopHook + self.runner = MagicMock() + self.runner.model = ToyRetriever() + + def test_before_val(self): + self.runner.model.prepare_prototype() + self.assertTrue(self.runner.model.prototype_inited) + self.hook.before_val(self, self.runner) + self.assertIsNotNone(self.runner.model.prototype_vecs) + self.assertTrue(self.runner.model.prototype_inited) diff --git a/tests/test_engine/test_hooks/test_simsiam_hook.py b/tests/test_engine/test_hooks/test_simsiam_hook.py new file mode 100644 index 0000000..29bda93 --- /dev/null +++ b/tests/test_engine/test_hooks/test_simsiam_hook.py @@ -0,0 +1,117 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging +import tempfile +from unittest import TestCase + +import torch +import torch.nn as nn +from mmengine.device import get_device +from mmengine.logging import MMLogger +from mmengine.model import BaseModule +from mmengine.runner import Runner +from mmengine.structures import LabelData +from torch.utils.data import Dataset + +from mmpretrain.engine import SimSiamHook +from mmpretrain.models.selfsup import BaseSelfSupervisor +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample + + +class DummyDataset(Dataset): + METAINFO = dict() # type: ignore + data = torch.randn(12, 2) + label = torch.ones(12) + + @property + def metainfo(self): + return self.METAINFO + + def __len__(self): + return self.data.size(0) + + def __getitem__(self, index): + data_sample = DataSample() + gt_label = LabelData(value=self.label[index]) + setattr(data_sample, 'gt_label', gt_label) + return dict(inputs=[self.data[index]], data_samples=data_sample) + + +@MODELS.register_module() +class SimSiamDummyLayer(BaseModule): + + def __init__(self, init_cfg=None): + super().__init__(init_cfg) + self.predictor = nn.Linear(2, 1) + + def forward(self, x): + return self.predictor(x) + + +class ToyModel(BaseSelfSupervisor): + + def __init__(self): + super().__init__(backbone=dict(type='SimSiamDummyLayer')) + + def extract_feat(self): + pass + + def loss(self, inputs, data_samples): + labels = [] + for x in data_samples: + labels.append(x.gt_label.value) + labels = torch.stack(labels) + outputs = self.backbone(inputs[0]) + loss = (labels - outputs).sum() + outputs = dict(loss=loss) + return outputs + + +class TestSimSiamHook(TestCase): + + def setUp(self): + self.temp_dir = tempfile.TemporaryDirectory() + + def tearDown(self): + # `FileHandler` should be closed in Windows, otherwise we cannot + # delete the temporary directory + logging.shutdown() + MMLogger._instance_dict.clear() + self.temp_dir.cleanup() + + def test_simsiam_hook(self): + device = get_device() + dummy_dataset = DummyDataset() + toy_model = ToyModel().to(device) + simsiam_hook = SimSiamHook( + fix_pred_lr=True, lr=0.05, adjust_by_epoch=False) + + # test SimSiamHook + runner = Runner( + model=toy_model, + work_dir=self.temp_dir.name, + train_dataloader=dict( + dataset=dummy_dataset, + sampler=dict(type='DefaultSampler', shuffle=True), + collate_fn=dict(type='default_collate'), + batch_size=1, + num_workers=0), + optim_wrapper=dict( + optimizer=dict(type='SGD', lr=0.05), + paramwise_cfg=dict( + custom_keys={'predictor': dict(fix_lr=True)})), + param_scheduler=dict(type='MultiStepLR', milestones=[1]), + train_cfg=dict(by_epoch=True, max_epochs=2), + custom_hooks=[simsiam_hook], + default_hooks=dict(logger=None), + log_processor=dict(window_size=1), + experiment_name='test_simsiam_hook', + default_scope='mmpretrain') + + runner.train() + + for param_group in runner.optim_wrapper.optimizer.param_groups: + if 'fix_lr' in param_group and param_group['fix_lr']: + assert param_group['lr'] == 0.05 + else: + assert param_group['lr'] != 0.05 diff --git a/tests/test_engine/test_hooks/test_swav_hook.py b/tests/test_engine/test_hooks/test_swav_hook.py new file mode 100644 index 0000000..2239ccb --- /dev/null +++ b/tests/test_engine/test_hooks/test_swav_hook.py @@ -0,0 +1,127 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging +import tempfile +from unittest import TestCase + +import torch +import torch.nn as nn +from mmengine.device import get_device +from mmengine.logging import MMLogger +from mmengine.model import BaseModule +from mmengine.optim import OptimWrapper +from mmengine.runner import Runner +from mmengine.structures import LabelData +from torch.utils.data import Dataset + +from mmpretrain.engine import SwAVHook +from mmpretrain.models.heads import SwAVHead +from mmpretrain.models.selfsup import BaseSelfSupervisor +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample +from mmpretrain.utils import get_ori_model + + +class DummyDataset(Dataset): + METAINFO = dict() # type: ignore + data = torch.randn(12, 2) + label = torch.ones(12) + + @property + def metainfo(self): + return self.METAINFO + + def __len__(self): + return self.data.size(0) + + def __getitem__(self, index): + data_sample = DataSample() + gt_label = LabelData(value=self.label[index]) + setattr(data_sample, 'gt_label', gt_label) + return dict(inputs=[self.data[index]], data_samples=data_sample) + + +@MODELS.register_module() +class SwAVDummyLayer(BaseModule): + + def __init__(self, init_cfg=None): + super().__init__(init_cfg) + self.linear = nn.Linear(2, 1) + + def forward(self, x): + return self.linear(x) + + +class ToyModel(BaseSelfSupervisor): + + def __init__(self): + super().__init__(backbone=dict(type='SwAVDummyLayer')) + self.prototypes_test = nn.Linear(1, 1) + self.head = SwAVHead( + loss=dict( + type='SwAVLoss', + feat_dim=2, + num_crops=[2, 6], + num_prototypes=3)) + + def loss(self, inputs, data_samples): + labels = [] + for x in data_samples: + labels.append(x.gt_label.value) + labels = torch.stack(labels) + outputs = self.backbone(inputs[0]) + loss = (labels - outputs).sum() + outputs = dict(loss=loss) + return outputs + + +class TestSwAVHook(TestCase): + + def setUp(self): + self.temp_dir = tempfile.TemporaryDirectory() + + def tearDown(self): + # `FileHandler` should be closed in Windows, otherwise we cannot + # delete the temporary directory + logging.shutdown() + MMLogger._instance_dict.clear() + self.temp_dir.cleanup() + + def test_swav_hook(self): + device = get_device() + dummy_dataset = DummyDataset() + toy_model = ToyModel().to(device) + swav_hook = SwAVHook( + batch_size=1, + epoch_queue_starts=15, + crops_for_assign=[0, 1], + feat_dim=128, + queue_length=300, + frozen_layers_cfg=dict(prototypes=2)) + + # test SwAVHook + runner = Runner( + model=toy_model, + work_dir=self.temp_dir.name, + train_dataloader=dict( + dataset=dummy_dataset, + sampler=dict(type='DefaultSampler', shuffle=True), + collate_fn=dict(type='default_collate'), + batch_size=1, + num_workers=0), + optim_wrapper=OptimWrapper( + torch.optim.Adam(toy_model.parameters())), + param_scheduler=dict(type='MultiStepLR', milestones=[1]), + train_cfg=dict(by_epoch=True, max_epochs=2), + custom_hooks=[swav_hook], + default_hooks=dict(logger=None), + log_processor=dict(window_size=1), + experiment_name='test_swav_hook', + default_scope='mmpretrain') + + runner.train() + + for hook in runner.hooks: + if isinstance(hook, SwAVHook): + assert hook.queue_length == 300 + + assert get_ori_model(runner.model).head.loss_module.use_queue is False diff --git a/tests/test_engine/test_hooks/test_switch_recipe_hook.py b/tests/test_engine/test_hooks/test_switch_recipe_hook.py new file mode 100644 index 0000000..c8c7b56 --- /dev/null +++ b/tests/test_engine/test_hooks/test_switch_recipe_hook.py @@ -0,0 +1,371 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import logging +import os.path as osp +import tempfile +from typing import List +from unittest import TestCase +from unittest.mock import MagicMock + +import torch +import torch.nn as nn +from mmcv.transforms import Compose +from mmengine.dataset import BaseDataset, ConcatDataset, RepeatDataset +from mmengine.device import get_device +from mmengine.logging import MMLogger +from mmengine.model import BaseDataPreprocessor, BaseModel +from mmengine.optim import OptimWrapper +from mmengine.runner import Runner + +from mmpretrain.engine import SwitchRecipeHook +from mmpretrain.models import CrossEntropyLoss +from mmpretrain.models.heads.cls_head import ClsHead +from mmpretrain.models.losses import LabelSmoothLoss +from mmpretrain.models.utils.batch_augments import RandomBatchAugment + + +class SimpleDataPreprocessor(BaseDataPreprocessor): + + def __init__(self): + super().__init__() + self.batch_augments = None + + def forward(self, data, training): + + data = self.cast_data(data) + assert 'inputs' in data, 'No `input` in data.' + inputs = data['inputs'] + labels = data['labels'] + + if self.batch_augments is not None and training: + inputs, labels = self.batch_augments(inputs, labels) + + return {'inputs': inputs, 'labels': labels} + + +class SimpleModel(BaseModel): + + def __init__(self): + super().__init__() + self.data_preprocessor = SimpleDataPreprocessor() + self.gap = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(3, 10) + self.loss_module = CrossEntropyLoss(use_soft=True) + + def forward(self, inputs, labels, mode='tensor'): + if mode == 'loss': + score = self.fc(self.gap(inputs).view(inputs.size(0), -1)) + loss = self.loss_module(score, labels) + return {'loss': loss} + else: + return None + + +class ExampleDataset(BaseDataset): + + def load_data_list(self) -> List[dict]: + return [{ + 'inputs': torch.rand(3, 12, 12), + 'labels': torch.rand(10), + } for _ in range(10)] + + +class EmptyTransform: + + def __call__(self, results): + return {} + + +class TestSwitchRecipeHook(TestCase): + + def setUp(self): + self.tmpdir = tempfile.TemporaryDirectory() + + def tearDown(self) -> None: + logging.shutdown() + MMLogger._instance_dict.clear() + self.tmpdir.cleanup() + + def test_init(self): + # test `action_epoch` is set + with self.assertRaisesRegex(AssertionError, 'Please set'): + SwitchRecipeHook([dict(batch_augments=None)]) + + # test `action_epoch` is not repeated + with self.assertRaisesRegex(AssertionError, 'is repeated'): + SwitchRecipeHook([dict(action_epoch=1), dict(action_epoch=1)]) + + # test recipe build + hook = SwitchRecipeHook([ + dict( + action_epoch=1, + train_pipeline=[dict(type='LoadImageFromFile')], + loss=dict(type='LabelSmoothLoss', label_smooth_val=0.1), + batch_augments=dict(augments=dict(type='Mixup', alpha=0.8)), + ) + ]) + self.assertIn(1, hook.schedule) + self.assertIsInstance(hook.schedule[1]['train_pipeline'], Compose) + self.assertIsInstance(hook.schedule[1]['loss'], LabelSmoothLoss) + self.assertIsInstance(hook.schedule[1]['batch_augments'], + RandomBatchAugment) + + # test recipe build with instance + hook = SwitchRecipeHook([ + dict( + action_epoch=1, + train_pipeline=[MagicMock()], + loss=MagicMock(), + batch_augments=MagicMock(), + ) + ]) + self.assertIn(1, hook.schedule) + self.assertIsInstance(hook.schedule[1]['train_pipeline'], Compose) + self.assertIsInstance(hook.schedule[1]['loss'], MagicMock) + self.assertIsInstance(hook.schedule[1]['batch_augments'], MagicMock) + + # test empty pieline and train_augments + hook = SwitchRecipeHook( + [dict(action_epoch=1, train_pipeline=[], batch_augments=None)]) + self.assertIn(1, hook.schedule) + self.assertIsInstance(hook.schedule[1]['train_pipeline'], Compose) + self.assertIsNone(hook.schedule[1]['batch_augments']) + + def test_do_switch(self): + device = get_device() + model = SimpleModel().to(device) + + loss = CrossEntropyLoss(use_soft=True) + loss.forward = MagicMock( + side_effect=lambda x, y: CrossEntropyLoss.forward(loss, x, y)) + batch_augments = RandomBatchAugment(dict(type='Mixup', alpha=0.5)) + switch_hook = SwitchRecipeHook([ + dict( + action_epoch=2, + train_pipeline=[MagicMock(side_effect=lambda x: x)], + loss=loss, + batch_augments=MagicMock( + side_effect=lambda x, y: batch_augments(x, y)), + ) + ]) + + runner = Runner( + model=model, + train_dataloader=dict( + dataset=ExampleDataset(), + sampler=dict(type='DefaultSampler', shuffle=True), + batch_size=5, + num_workers=0, + collate_fn=dict(type='default_collate'), + ), + optim_wrapper=OptimWrapper( + optimizer=torch.optim.Adam(model.parameters(), lr=0.)), + train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=10), + work_dir=self.tmpdir.name, + default_hooks=dict(logger=None), + custom_hooks=[switch_hook], + default_scope='mmpretrain', + experiment_name='test_switch') + runner.train() + self.assertEqual(switch_hook.schedule[2]['batch_augments'].call_count, + 2) + self.assertEqual(switch_hook.schedule[2]['loss'].forward.call_count, 2) + self.assertEqual( + switch_hook.schedule[2]['train_pipeline'].transforms[0].call_count, + 10) + + # Due to the unknown error in Windows environment, the unit test for + # `num_workers>0` is disabled temporarily + + # switch_hook = SwitchRecipeHook( + # [dict( + # action_epoch=2, + # train_pipeline=[EmptyTransform()], + # )]) + + # runner = Runner( + # model=model, + # train_dataloader=dict( + # dataset=ExampleDataset(), + # sampler=dict(type='DefaultSampler', shuffle=True), + # batch_size=5, + # num_workers=1, + # persistent_workers=True, + # collate_fn=dict(type='default_collate'), + # ), + # optim_wrapper=OptimWrapper( + # optimizer=torch.optim.Adam(model.parameters(), lr=0.)), + # train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=10), + # work_dir=self.tmpdir.name, + # default_hooks=dict(logger=None), + # custom_hooks=[switch_hook], + # default_scope='mmpretrain', + # experiment_name='test_switch_multi_workers') + # with self.assertRaisesRegex(AssertionError, 'No `input` in data.'): + # # If the pipeline switch works, the data_preprocessor cannot + # # receive `inputs`. + # runner.train() + + def test_resume(self): + device = get_device() + model = SimpleModel().to(device) + + loss = CrossEntropyLoss(use_soft=True) + loss.forward = MagicMock( + side_effect=lambda x, y: CrossEntropyLoss.forward(loss, x, y)) + batch_augments = RandomBatchAugment(dict(type='Mixup', alpha=0.5)) + switch_hook = SwitchRecipeHook([ + dict( + action_epoch=1, + train_pipeline=[MagicMock(side_effect=lambda x: x)]), + dict(action_epoch=2, loss=loss), + dict( + action_epoch=4, + batch_augments=MagicMock( + side_effect=lambda x, y: batch_augments(x, y)), + ), + ]) + + runner = Runner( + model=model, + train_dataloader=dict( + dataset=ExampleDataset(), + sampler=dict(type='DefaultSampler', shuffle=True), + batch_size=5, + num_workers=0, + collate_fn=dict(type='default_collate'), + ), + optim_wrapper=OptimWrapper( + optimizer=torch.optim.Adam(model.parameters(), lr=0.)), + train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=10), + work_dir=self.tmpdir.name, + default_hooks=dict(logger=None), + custom_hooks=[switch_hook], + default_scope='mmpretrain', + experiment_name='test_resume1') + runner.train() + + runner = Runner( + model=model, + train_dataloader=dict( + dataset=ExampleDataset(), + sampler=dict(type='DefaultSampler', shuffle=True), + batch_size=5, + num_workers=0, + collate_fn=dict(type='default_collate'), + ), + optim_wrapper=OptimWrapper( + optimizer=torch.optim.Adam(model.parameters(), lr=0.)), + train_cfg=dict(by_epoch=True, max_epochs=4, val_interval=10), + resume=True, + load_from=osp.join(self.tmpdir.name, 'epoch_2.pth'), + work_dir=self.tmpdir.name, + default_hooks=dict(logger=None), + custom_hooks=[switch_hook], + default_scope='mmpretrain', + experiment_name='test_resume2') + + with self.assertLogs(runner.logger, 'INFO') as logs: + runner.train() + prefix = 'INFO:mmengine:' + self.assertIn( + prefix + 'Switch train pipeline (resume recipe of epoch 1).', + logs.output) + self.assertIn(prefix + 'Switch loss (resume recipe of epoch 2).', + logs.output) + self.assertIn(prefix + 'Switch batch augments at epoch 4.', + logs.output) + + def test_switch_train_pipeline(self): + device = get_device() + model = SimpleModel().to(device) + + runner = Runner( + model=model, + train_dataloader=dict( + dataset=ConcatDataset([ExampleDataset(), + ExampleDataset()]), + sampler=dict(type='DefaultSampler', shuffle=True), + batch_size=5, + num_workers=0, + collate_fn=dict(type='default_collate'), + ), + optim_wrapper=OptimWrapper( + optimizer=torch.optim.Adam(model.parameters(), lr=0.)), + train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=10), + work_dir=self.tmpdir.name, + default_hooks=dict(logger=None), + default_scope='mmpretrain', + experiment_name='test_concat_dataset') + pipeline = MagicMock() + SwitchRecipeHook._switch_train_pipeline(runner, pipeline) + self.assertIs(runner.train_dataloader.dataset.datasets[0].pipeline, + pipeline) + self.assertIs(runner.train_dataloader.dataset.datasets[1].pipeline, + pipeline) + + runner = Runner( + model=model, + train_dataloader=dict( + dataset=RepeatDataset(ExampleDataset(), 3), + sampler=dict(type='DefaultSampler', shuffle=True), + batch_size=5, + num_workers=0, + collate_fn=dict(type='default_collate'), + ), + optim_wrapper=OptimWrapper( + optimizer=torch.optim.Adam(model.parameters(), lr=0.)), + train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=10), + work_dir=self.tmpdir.name, + default_hooks=dict(logger=None), + default_scope='mmpretrain', + experiment_name='test_repeat_dataset') + pipeline = MagicMock() + SwitchRecipeHook._switch_train_pipeline(runner, pipeline) + self.assertIs(runner.train_dataloader.dataset.dataset.pipeline, + pipeline) + + def test_switch_loss(self): + device = get_device() + model = SimpleModel().to(device) + + runner = Runner( + model=model, + train_dataloader=dict( + dataset=ExampleDataset(), + sampler=dict(type='DefaultSampler', shuffle=True), + batch_size=5, + num_workers=0, + collate_fn=dict(type='default_collate'), + ), + optim_wrapper=OptimWrapper( + optimizer=torch.optim.Adam(model.parameters(), lr=0.)), + train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=10), + work_dir=self.tmpdir.name, + default_hooks=dict(logger=None), + default_scope='mmpretrain', + experiment_name='test_model_loss') + loss = CrossEntropyLoss(use_soft=True) + SwitchRecipeHook._switch_loss(runner, loss) + self.assertIs(runner.model.loss_module, loss) + + model.add_module('head', ClsHead()) + del model.loss_module + runner = Runner( + model=model, + train_dataloader=dict( + dataset=ExampleDataset(), + sampler=dict(type='DefaultSampler', shuffle=True), + batch_size=5, + num_workers=0, + collate_fn=dict(type='default_collate'), + ), + optim_wrapper=OptimWrapper( + optimizer=torch.optim.Adam(model.parameters(), lr=0.)), + train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=10), + work_dir=self.tmpdir.name, + default_hooks=dict(logger=None), + default_scope='mmpretrain', + experiment_name='test_head_loss') + loss = CrossEntropyLoss(use_soft=True) + SwitchRecipeHook._switch_loss(runner, loss) + self.assertIs(runner.model.head.loss_module, loss) diff --git a/tests/test_engine/test_hooks/test_visualization_hook.py b/tests/test_engine/test_hooks/test_visualization_hook.py new file mode 100644 index 0000000..2fe0ae3 --- /dev/null +++ b/tests/test_engine/test_hooks/test_visualization_hook.py @@ -0,0 +1,148 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import tempfile +from unittest import TestCase +from unittest.mock import ANY, MagicMock, patch + +import torch +from mmengine.runner import EpochBasedTrainLoop, IterBasedTrainLoop + +from mmpretrain.engine import VisualizationHook +from mmpretrain.registry import HOOKS +from mmpretrain.structures import DataSample +from mmpretrain.visualization import UniversalVisualizer + + +class TestVisualizationHook(TestCase): + + def setUp(self) -> None: + UniversalVisualizer.get_instance('visualizer') + + data_sample = DataSample().set_gt_label(1).set_pred_label(2) + data_sample.set_metainfo({'img_path': 'tests/data/color.jpg'}) + self.data_batch = { + 'inputs': torch.randint(0, 256, (10, 3, 224, 224)), + 'data_sample': [data_sample] * 10 + } + + self.outputs = [data_sample] * 10 + + self.tmpdir = tempfile.TemporaryDirectory() + + def test_draw_samples(self): + # test enable=False + cfg = dict(type='VisualizationHook', enable=False) + hook: VisualizationHook = HOOKS.build(cfg) + with patch.object(hook._visualizer, 'visualize_cls') as mock: + hook._draw_samples(1, self.data_batch, self.outputs, step=1) + mock.assert_not_called() + + # test enable=True + cfg = dict(type='VisualizationHook', enable=True, show=True) + hook: VisualizationHook = HOOKS.build(cfg) + with patch.object(hook._visualizer, 'visualize_cls') as mock: + hook._draw_samples(0, self.data_batch, self.outputs, step=0) + mock.assert_called_once_with( + image=ANY, + data_sample=self.outputs[0], + step=0, + show=True, + name='color.jpg') + + # test samples without path + cfg = dict(type='VisualizationHook', enable=True) + hook: VisualizationHook = HOOKS.build(cfg) + with patch.object(hook._visualizer, 'visualize_cls') as mock: + outputs = [DataSample()] * 10 + hook._draw_samples(0, self.data_batch, outputs, step=0) + mock.assert_called_once_with( + image=ANY, + data_sample=outputs[0], + step=0, + show=False, + name='0') + + # test out_dir + cfg = dict( + type='VisualizationHook', enable=True, out_dir=self.tmpdir.name) + hook: VisualizationHook = HOOKS.build(cfg) + with patch.object(hook._visualizer, 'visualize_cls') as mock: + hook._draw_samples(0, self.data_batch, self.outputs, step=0) + mock.assert_called_once_with( + image=ANY, + data_sample=self.outputs[0], + step=0, + show=False, + name='color.jpg', + out_file=osp.join(self.tmpdir.name, 'color.jpg_0.png')) + + # test sample idx + cfg = dict(type='VisualizationHook', enable=True, interval=4) + hook: VisualizationHook = HOOKS.build(cfg) + with patch.object(hook._visualizer, 'visualize_cls') as mock: + hook._draw_samples(1, self.data_batch, self.outputs, step=0) + mock.assert_called_with( + image=ANY, + data_sample=self.outputs[2], + step=0, + show=False, + name='color.jpg', + ) + mock.assert_called_with( + image=ANY, + data_sample=self.outputs[6], + step=0, + show=False, + name='color.jpg', + ) + + def test_after_val_iter(self): + runner = MagicMock() + + # test epoch-based + runner.train_loop = MagicMock(spec=EpochBasedTrainLoop) + runner.epoch = 5 + cfg = dict(type='VisualizationHook', enable=True) + hook = HOOKS.build(cfg) + with patch.object(hook._visualizer, 'visualize_cls') as mock: + hook.after_val_iter(runner, 0, self.data_batch, self.outputs) + mock.assert_called_once_with( + image=ANY, + data_sample=self.outputs[0], + step=5, + show=False, + name='color.jpg', + ) + + # test iter-based + runner.train_loop = MagicMock(spec=IterBasedTrainLoop) + runner.iter = 300 + cfg = dict(type='VisualizationHook', enable=True) + hook = HOOKS.build(cfg) + with patch.object(hook._visualizer, 'visualize_cls') as mock: + hook.after_val_iter(runner, 0, self.data_batch, self.outputs) + mock.assert_called_once_with( + image=ANY, + data_sample=self.outputs[0], + step=300, + show=False, + name='color.jpg', + ) + + def test_after_test_iter(self): + runner = MagicMock() + + cfg = dict(type='VisualizationHook', enable=True) + hook = HOOKS.build(cfg) + with patch.object(hook._visualizer, 'visualize_cls') as mock: + hook.after_test_iter(runner, 0, self.data_batch, self.outputs) + mock.assert_called_once_with( + image=ANY, + data_sample=self.outputs[0], + step=0, + show=False, + name='color.jpg', + ) + + def tearDown(self) -> None: + self.tmpdir.cleanup() diff --git a/tests/test_engine/test_optimizers/test_layer_decay_optim_wrapper_constructor.py b/tests/test_engine/test_optimizers/test_layer_decay_optim_wrapper_constructor.py new file mode 100644 index 0000000..d92b297 --- /dev/null +++ b/tests/test_engine/test_optimizers/test_layer_decay_optim_wrapper_constructor.py @@ -0,0 +1,107 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch +from torch import nn + +from mmpretrain.engine import LearningRateDecayOptimWrapperConstructor +from mmpretrain.models import ImageClassifier, VisionTransformer + + +class ToyViTBackbone(nn.Module): + + get_layer_depth = VisionTransformer.get_layer_depth + + def __init__(self, num_layers=2): + super().__init__() + self.cls_token = nn.Parameter(torch.ones(1)) + self.pos_embed = nn.Parameter(torch.ones(1)) + self.num_layers = num_layers + self.layers = nn.ModuleList() + for _ in range(num_layers): + layer = nn.Conv2d(3, 3, 1) + self.layers.append(layer) + + +class ToyViT(nn.Module): + get_layer_depth = ImageClassifier.get_layer_depth + + def __init__(self): + super().__init__() + # add some variables to meet unit test coverate rate + self.backbone = ToyViTBackbone() + self.head = nn.Linear(1, 1) + + +class TestLearningRateDecayOptimWrapperConstructor(TestCase): + base_lr = 1.0 + base_wd = 0.05 + + def test_add_params(self): + model = ToyViT() + optim_wrapper_cfg = dict( + type='OptimWrapper', + optimizer=dict( + type='AdamW', + lr=self.base_lr, + betas=(0.9, 0.999), + weight_decay=self.base_wd)) + paramwise_cfg = dict( + layer_decay_rate=2.0, + bias_decay_mult=0., + custom_keys={ + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0), + }) + + constructor = LearningRateDecayOptimWrapperConstructor( + optim_wrapper_cfg=optim_wrapper_cfg, + paramwise_cfg=paramwise_cfg, + ) + optimizer_wrapper = constructor(model) + + expected_groups = [{ + 'weight_decay': 0.0, + 'lr': 8 * self.base_lr, + 'param_name': 'backbone.cls_token', + }, { + 'weight_decay': 0.0, + 'lr': 8 * self.base_lr, + 'param_name': 'backbone.pos_embed', + }, { + 'weight_decay': self.base_wd, + 'lr': 4 * self.base_lr, + 'param_name': 'backbone.layers.0.weight', + }, { + 'weight_decay': 0.0, + 'lr': 4 * self.base_lr, + 'param_name': 'backbone.layers.0.bias', + }, { + 'weight_decay': self.base_wd, + 'lr': 2 * self.base_lr, + 'param_name': 'backbone.layers.1.weight', + }, { + 'weight_decay': 0.0, + 'lr': 2 * self.base_lr, + 'param_name': 'backbone.layers.1.bias', + }, { + 'weight_decay': self.base_wd, + 'lr': 1 * self.base_lr, + 'param_name': 'head.weight', + }, { + 'weight_decay': 0.0, + 'lr': 1 * self.base_lr, + 'param_name': 'head.bias', + }] + self.assertIsInstance(optimizer_wrapper.optimizer, torch.optim.AdamW) + self.assertEqual(optimizer_wrapper.optimizer.defaults['lr'], + self.base_lr) + self.assertEqual(optimizer_wrapper.optimizer.defaults['weight_decay'], + self.base_wd) + param_groups = optimizer_wrapper.optimizer.param_groups + self.assertEqual(len(param_groups), len(expected_groups)) + + for expect, param in zip(expected_groups, param_groups): + self.assertEqual(param['param_name'], expect['param_name']) + self.assertEqual(param['lr'], expect['lr']) + self.assertEqual(param['weight_decay'], expect['weight_decay']) diff --git a/tests/test_evaluation/test_metrics/test_gqa.py b/tests/test_evaluation/test_metrics/test_gqa.py new file mode 100644 index 0000000..abb9d14 --- /dev/null +++ b/tests/test_evaluation/test_metrics/test_gqa.py @@ -0,0 +1,30 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.evaluator import Evaluator + +from mmpretrain.structures import DataSample + + +class TestScienceQAMetric: + + def test_evaluate(self): + meta_info = { + 'pred_answer': 'dog', + 'gt_answer': 'dog', + } + data_sample = DataSample(metainfo=meta_info) + data_samples = [data_sample for _ in range(10)] + evaluator = Evaluator(dict(type='mmpretrain.GQAAcc')) + evaluator.process(data_samples) + res = evaluator.evaluate(4) + assert res['GQA/acc'] == 1.0 + + meta_info = { + 'pred_answer': 'dog', + 'gt_answer': 'cat', + } + data_sample = DataSample(metainfo=meta_info) + data_samples = [data_sample for _ in range(10)] + evaluator = Evaluator(dict(type='mmpretrain.GQAAcc')) + evaluator.process(data_samples) + res = evaluator.evaluate(4) + assert res['GQA/acc'] == 0.0 diff --git a/tests/test_evaluation/test_metrics/test_metric_utils.py b/tests/test_evaluation/test_metrics/test_metric_utils.py new file mode 100644 index 0000000..3102ac5 --- /dev/null +++ b/tests/test_evaluation/test_metrics/test_metric_utils.py @@ -0,0 +1,33 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmpretrain.models.losses.utils import convert_to_one_hot + + +def ori_convert_to_one_hot(targets: torch.Tensor, classes) -> torch.Tensor: + assert (torch.max(targets).item() < + classes), 'Class Index must be less than number of classes' + one_hot_targets = torch.zeros((targets.shape[0], classes), + dtype=torch.long, + device=targets.device) + one_hot_targets.scatter_(1, targets.long(), 1) + return one_hot_targets + + +def test_convert_to_one_hot(): + # label should smaller than classes + targets = torch.tensor([1, 2, 3, 8, 5]) + classes = 5 + with pytest.raises(AssertionError): + _ = convert_to_one_hot(targets, classes) + + # test with original impl + classes = 10 + targets = torch.randint(high=classes, size=(10, 1)) + ori_one_hot_targets = torch.zeros((targets.shape[0], classes), + dtype=torch.long, + device=targets.device) + ori_one_hot_targets.scatter_(1, targets.long(), 1) + one_hot_targets = convert_to_one_hot(targets, classes) + assert torch.equal(ori_one_hot_targets, one_hot_targets) diff --git a/tests/test_evaluation/test_metrics/test_multi_label.py b/tests/test_evaluation/test_metrics/test_multi_label.py new file mode 100644 index 0000000..cfd2590 --- /dev/null +++ b/tests/test_evaluation/test_metrics/test_multi_label.py @@ -0,0 +1,388 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np +import sklearn.metrics +import torch +from mmengine.evaluator import Evaluator +from mmengine.registry import init_default_scope + +from mmpretrain.evaluation.metrics import AveragePrecision, MultiLabelMetric +from mmpretrain.structures import DataSample + +init_default_scope('mmpretrain') + + +class TestMultiLabel(TestCase): + + def test_calculate(self): + """Test using the metric from static method.""" + + y_true = [[0], [1, 3], [0, 1, 2], [3]] + y_pred = [[0, 3], [0, 2], [1, 2], [2, 3]] + y_true_binary = np.array([ + [1, 0, 0, 0], + [0, 1, 0, 1], + [1, 1, 1, 0], + [0, 0, 0, 1], + ]) + y_pred_binary = np.array([ + [1, 0, 0, 1], + [1, 0, 1, 0], + [0, 1, 1, 0], + [0, 0, 1, 1], + ]) + y_pred_score = np.array([ + [0.8, 0, 0, 0.6], + [0.2, 0, 0.6, 0], + [0, 0.9, 0.6, 0], + [0, 0, 0.2, 0.3], + ]) + + # Test with sequence of category indexes + res = MultiLabelMetric.calculate( + y_pred, + y_true, + pred_indices=True, + target_indices=True, + num_classes=4) + self.assertIsInstance(res, tuple) + precision, recall, f1_score, support = res + expect_precision = sklearn.metrics.precision_score( + y_true_binary, y_pred_binary, average='macro') * 100 + expect_recall = sklearn.metrics.recall_score( + y_true_binary, y_pred_binary, average='macro') * 100 + expect_f1 = sklearn.metrics.f1_score( + y_true_binary, y_pred_binary, average='macro') * 100 + self.assertTensorEqual(precision, expect_precision) + self.assertTensorEqual(recall, expect_recall) + self.assertTensorEqual(f1_score, expect_f1) + self.assertTensorEqual(support, 7) + + # Test with onehot input + res = MultiLabelMetric.calculate(y_pred_binary, + torch.from_numpy(y_true_binary)) + self.assertIsInstance(res, tuple) + precision, recall, f1_score, support = res + # Expected values come from sklearn + self.assertTensorEqual(precision, expect_precision) + self.assertTensorEqual(recall, expect_recall) + self.assertTensorEqual(f1_score, expect_f1) + self.assertTensorEqual(support, 7) + + # Test with topk argument + res = MultiLabelMetric.calculate( + y_pred_score, y_true, target_indices=True, topk=1, num_classes=4) + self.assertIsInstance(res, tuple) + precision, recall, f1_score, support = res + # Expected values come from sklearn + top1_y_pred = np.array([ + [1, 0, 0, 0], + [0, 0, 1, 0], + [0, 1, 0, 0], + [0, 0, 0, 1], + ]) + expect_precision = sklearn.metrics.precision_score( + y_true_binary, top1_y_pred, average='macro') * 100 + expect_recall = sklearn.metrics.recall_score( + y_true_binary, top1_y_pred, average='macro') * 100 + expect_f1 = sklearn.metrics.f1_score( + y_true_binary, top1_y_pred, average='macro') * 100 + self.assertTensorEqual(precision, expect_precision) + self.assertTensorEqual(recall, expect_recall) + self.assertTensorEqual(f1_score, expect_f1) + self.assertTensorEqual(support, 7) + + # Test with thr argument + res = MultiLabelMetric.calculate( + y_pred_score, y_true, target_indices=True, thr=0.25, num_classes=4) + self.assertIsInstance(res, tuple) + precision, recall, f1_score, support = res + # Expected values come from sklearn + thr_y_pred = np.array([ + [1, 0, 0, 1], + [0, 0, 1, 0], + [0, 1, 1, 0], + [0, 0, 0, 1], + ]) + expect_precision = sklearn.metrics.precision_score( + y_true_binary, thr_y_pred, average='macro') * 100 + expect_recall = sklearn.metrics.recall_score( + y_true_binary, thr_y_pred, average='macro') * 100 + expect_f1 = sklearn.metrics.f1_score( + y_true_binary, thr_y_pred, average='macro') * 100 + self.assertTensorEqual(precision, expect_precision) + self.assertTensorEqual(recall, expect_recall) + self.assertTensorEqual(f1_score, expect_f1) + self.assertTensorEqual(support, 7) + + # Test with invalid inputs + with self.assertRaisesRegex(TypeError, " is not"): + MultiLabelMetric.calculate(y_pred, 'hi', num_classes=10) + + # Test with invalid input + with self.assertRaisesRegex(AssertionError, + 'Invalid `average` argument,'): + MultiLabelMetric.calculate( + y_pred, y_true, average='m', num_classes=10) + + y_true_binary = np.array([[1, 0, 0, 0], [0, 1, 0, 1]]) + y_pred_binary = np.array([[1, 0, 0, 1], [1, 0, 1, 0], [0, 1, 1, 0]]) + # Test with invalid inputs + with self.assertRaisesRegex(AssertionError, 'The size of pred'): + MultiLabelMetric.calculate(y_pred_binary, y_true_binary) + + # Test with invalid inputs + with self.assertRaisesRegex(TypeError, 'The `pred` and `target` must'): + MultiLabelMetric.calculate(y_pred_binary, 5) + + def test_evaluate(self): + y_true = [[0], [1, 3], [0, 1, 2], [3]] + y_true_binary = torch.tensor([ + [1, 0, 0, 0], + [0, 1, 0, 1], + [1, 1, 1, 0], + [0, 0, 0, 1], + ]) + y_pred_score = torch.tensor([ + [0.8, 0, 0, 0.6], + [0.2, 0, 0.6, 0], + [0, 0.9, 0.6, 0], + [0, 0, 0.2, 0.3], + ]) + + pred = [ + DataSample(num_classes=4).set_pred_score(i).set_gt_label(j) + for i, j in zip(y_pred_score, y_true) + ] + + # Test with default argument + evaluator = Evaluator(dict(type='MultiLabelMetric')) + evaluator.process(pred) + res = evaluator.evaluate(4) + self.assertIsInstance(res, dict) + thr05_y_pred = np.array([ + [1, 0, 0, 1], + [0, 0, 1, 0], + [0, 1, 1, 0], + [0, 0, 0, 0], + ]) + expect_precision = sklearn.metrics.precision_score( + y_true_binary, thr05_y_pred, average='macro') * 100 + expect_recall = sklearn.metrics.recall_score( + y_true_binary, thr05_y_pred, average='macro') * 100 + expect_f1 = sklearn.metrics.f1_score( + y_true_binary, thr05_y_pred, average='macro') * 100 + self.assertEqual(res['multi-label/precision'], expect_precision) + self.assertEqual(res['multi-label/recall'], expect_recall) + self.assertEqual(res['multi-label/f1-score'], expect_f1) + + # Test with topk argument + evaluator = Evaluator(dict(type='MultiLabelMetric', topk=1)) + evaluator.process(pred) + res = evaluator.evaluate(4) + self.assertIsInstance(res, dict) + top1_y_pred = np.array([ + [1, 0, 0, 0], + [0, 0, 1, 0], + [0, 1, 0, 0], + [0, 0, 0, 1], + ]) + expect_precision = sklearn.metrics.precision_score( + y_true_binary, top1_y_pred, average='macro') * 100 + expect_recall = sklearn.metrics.recall_score( + y_true_binary, top1_y_pred, average='macro') * 100 + expect_f1 = sklearn.metrics.f1_score( + y_true_binary, top1_y_pred, average='macro') * 100 + self.assertEqual(res['multi-label/precision_top1'], expect_precision) + self.assertEqual(res['multi-label/recall_top1'], expect_recall) + self.assertEqual(res['multi-label/f1-score_top1'], expect_f1) + + # Test with both argument + evaluator = Evaluator(dict(type='MultiLabelMetric', thr=0.25, topk=1)) + evaluator.process(pred) + res = evaluator.evaluate(4) + self.assertIsInstance(res, dict) + # Expected values come from sklearn + thr_y_pred = np.array([ + [1, 0, 0, 1], + [0, 0, 1, 0], + [0, 1, 1, 0], + [0, 0, 0, 1], + ]) + expect_precision = sklearn.metrics.precision_score( + y_true_binary, thr_y_pred, average='macro') * 100 + expect_recall = sklearn.metrics.recall_score( + y_true_binary, thr_y_pred, average='macro') * 100 + expect_f1 = sklearn.metrics.f1_score( + y_true_binary, thr_y_pred, average='macro') * 100 + self.assertEqual(res['multi-label/precision_thr-0.25'], + expect_precision) + self.assertEqual(res['multi-label/recall_thr-0.25'], expect_recall) + self.assertEqual(res['multi-label/f1-score_thr-0.25'], expect_f1) + + # Test with average micro + evaluator = Evaluator(dict(type='MultiLabelMetric', average='micro')) + evaluator.process(pred) + res = evaluator.evaluate(4) + self.assertIsInstance(res, dict) + # Expected values come from sklearn + expect_precision = sklearn.metrics.precision_score( + y_true_binary, thr05_y_pred, average='micro') * 100 + expect_recall = sklearn.metrics.recall_score( + y_true_binary, thr05_y_pred, average='micro') * 100 + expect_f1 = sklearn.metrics.f1_score( + y_true_binary, thr05_y_pred, average='micro') * 100 + self.assertAlmostEqual( + res['multi-label/precision_micro'], expect_precision, places=4) + self.assertAlmostEqual( + res['multi-label/recall_micro'], expect_recall, places=4) + self.assertAlmostEqual( + res['multi-label/f1-score_micro'], expect_f1, places=4) + + # Test with average None + evaluator = Evaluator(dict(type='MultiLabelMetric', average=None)) + evaluator.process(pred) + res = evaluator.evaluate(4) + self.assertIsInstance(res, dict) + # Expected values come from sklearn + expect_precision = sklearn.metrics.precision_score( + y_true_binary, thr05_y_pred, average=None) * 100 + expect_recall = sklearn.metrics.recall_score( + y_true_binary, thr05_y_pred, average=None) * 100 + expect_f1 = sklearn.metrics.f1_score( + y_true_binary, thr05_y_pred, average=None) * 100 + np.testing.assert_allclose(res['multi-label/precision_classwise'], + expect_precision) + np.testing.assert_allclose(res['multi-label/recall_classwise'], + expect_recall) + np.testing.assert_allclose(res['multi-label/f1-score_classwise'], + expect_f1) + + # Test with gt_score + pred = [ + DataSample(num_classes=4).set_pred_score(i).set_gt_score(j) + for i, j in zip(y_pred_score, y_true_binary) + ] + + evaluator = Evaluator(dict(type='MultiLabelMetric', items=['support'])) + evaluator.process(pred) + res = evaluator.evaluate(4) + self.assertIsInstance(res, dict) + self.assertEqual(res['multi-label/support'], 7) + + def assertTensorEqual(self, + tensor: torch.Tensor, + value: float, + msg=None, + **kwarg): + tensor = tensor.to(torch.float32) + if tensor.dim() == 0: + tensor = tensor.unsqueeze(0) + value = torch.FloatTensor([value]) + try: + torch.testing.assert_allclose(tensor, value, **kwarg) + except AssertionError as e: + self.fail(self._formatMessage(msg, str(e) + str(tensor))) + + +class TestAveragePrecision(TestCase): + + def test_evaluate(self): + """Test using the metric in the same way as Evalutor.""" + y_pred = torch.tensor([ + [0.9, 0.8, 0.3, 0.2], + [0.1, 0.2, 0.2, 0.1], + [0.7, 0.5, 0.9, 0.3], + [0.8, 0.1, 0.1, 0.2], + ]) + y_true = torch.tensor([ + [1, 1, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [1, 0, 0, 0], + ]) + + pred = [ + DataSample(num_classes=4).set_pred_score(i).set_gt_score(j) + for i, j in zip(y_pred, y_true) + ] + + # Test with default macro avergae + evaluator = Evaluator(dict(type='AveragePrecision')) + evaluator.process(pred) + res = evaluator.evaluate(5) + self.assertIsInstance(res, dict) + self.assertAlmostEqual(res['multi-label/mAP'], 70.83333, places=4) + + # Test with average mode None + evaluator = Evaluator(dict(type='AveragePrecision', average=None)) + evaluator.process(pred) + res = evaluator.evaluate(5) + self.assertIsInstance(res, dict) + aps = res['multi-label/AP_classwise'] + self.assertAlmostEqual(aps[0], 100., places=4) + self.assertAlmostEqual(aps[1], 83.3333, places=4) + self.assertAlmostEqual(aps[2], 100, places=4) + self.assertAlmostEqual(aps[3], 0, places=4) + + # Test with gt_label without score + pred = [ + DataSample(num_classes=4).set_pred_score(i).set_gt_label(j) + for i, j in zip(y_pred, [[0, 1], [1], [2], [0]]) + ] + evaluator = Evaluator(dict(type='AveragePrecision')) + evaluator.process(pred) + res = evaluator.evaluate(5) + self.assertAlmostEqual(res['multi-label/mAP'], 70.83333, places=4) + + def test_calculate(self): + """Test using the metric from static method.""" + + y_true = np.array([ + [1, 0, 0, 0], + [0, 1, 0, 1], + [1, 1, 1, 0], + [0, 0, 0, 1], + ]) + y_pred = np.array([ + [0.9, 0.8, 0.3, 0.2], + [0.1, 0.2, 0.2, 0.1], + [0.7, 0.5, 0.9, 0.3], + [0.8, 0.1, 0.1, 0.2], + ]) + + ap_score = AveragePrecision.calculate(y_pred, y_true) + expect_ap = sklearn.metrics.average_precision_score(y_true, + y_pred) * 100 + self.assertTensorEqual(ap_score, expect_ap) + + # Test with invalid inputs + with self.assertRaisesRegex(AssertionError, + 'Invalid `average` argument,'): + AveragePrecision.calculate(y_pred, y_true, average='m') + + y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 1]]) + y_pred = np.array([[1, 0, 0, 1], [1, 0, 1, 0], [0, 1, 1, 0]]) + # Test with invalid inputs + with self.assertRaisesRegex(AssertionError, + 'Both `pred` and `target`'): + AveragePrecision.calculate(y_pred, y_true) + + # Test with invalid inputs + with self.assertRaisesRegex(TypeError, " is not an"): + AveragePrecision.calculate(y_pred, 5) + + def assertTensorEqual(self, + tensor: torch.Tensor, + value: float, + msg=None, + **kwarg): + tensor = tensor.to(torch.float32) + if tensor.dim() == 0: + tensor = tensor.unsqueeze(0) + value = torch.FloatTensor([value]) + try: + torch.testing.assert_allclose(tensor, value, **kwarg) + except AssertionError as e: + self.fail(self._formatMessage(msg, str(e) + str(tensor))) diff --git a/tests/test_evaluation/test_metrics/test_multi_task_metrics.py b/tests/test_evaluation/test_metrics/test_multi_task_metrics.py new file mode 100644 index 0000000..2502771 --- /dev/null +++ b/tests/test_evaluation/test_metrics/test_multi_task_metrics.py @@ -0,0 +1,134 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmpretrain.evaluation.metrics import MultiTasksMetric +from mmpretrain.structures import DataSample + + +class MultiTaskMetric(TestCase): + data_pred = [ + { + 'task0': torch.tensor([0.7, 0.0, 0.3]), + 'task1': torch.tensor([0.5, 0.2, 0.3]) + }, + { + 'task0': torch.tensor([0.0, 0.0, 1.0]), + 'task1': torch.tensor([0.0, 0.0, 1.0]) + }, + ] + data_gt = [{'task0': 0, 'task1': 2}, {'task1': 2}] + + preds = [] + for i, pred in enumerate(data_pred): + sample = {} + for task_name in pred: + task_sample = DataSample().set_pred_score(pred[task_name]) + if task_name in data_gt[i]: + task_sample.set_gt_label(data_gt[i][task_name]) + task_sample.set_field(True, 'eval_mask', field_type='metainfo') + else: + task_sample.set_field( + False, 'eval_mask', field_type='metainfo') + sample[task_name] = task_sample.to_dict() + + preds.append(sample) + data2 = zip([ + { + 'task0': torch.tensor([0.7, 0.0, 0.3]), + 'task1': { + 'task10': torch.tensor([0.5, 0.2, 0.3]), + 'task11': torch.tensor([0.4, 0.3, 0.3]) + } + }, + { + 'task0': torch.tensor([0.0, 0.0, 1.0]), + 'task1': { + 'task10': torch.tensor([0.1, 0.6, 0.3]), + 'task11': torch.tensor([0.5, 0.2, 0.3]) + } + }, + ], [{ + 'task0': 0, + 'task1': { + 'task10': 2, + 'task11': 0 + } + }, { + 'task0': 2, + 'task1': { + 'task10': 1, + 'task11': 0 + } + }]) + + pred2 = [] + for score, label in data2: + sample = {} + for task_name in score: + if type(score[task_name]) != dict: + task_sample = DataSample().set_pred_score(score[task_name]) + task_sample.set_gt_label(label[task_name]) + sample[task_name] = task_sample.to_dict() + sample[task_name]['eval_mask'] = True + else: + sample[task_name] = {} + sample[task_name]['eval_mask'] = True + for task_name2 in score[task_name]: + task_sample = DataSample().set_pred_score( + score[task_name][task_name2]) + task_sample.set_gt_label(label[task_name][task_name2]) + sample[task_name][task_name2] = task_sample.to_dict() + sample[task_name][task_name2]['eval_mask'] = True + + pred2.append(sample) + + pred3 = [{'task0': {'eval_mask': False}, 'task1': {'eval_mask': False}}] + task_metrics = { + 'task0': [dict(type='Accuracy', topk=(1, ))], + 'task1': [ + dict(type='Accuracy', topk=(1, 3)), + dict(type='SingleLabelMetric', items=['precision', 'recall']) + ] + } + task_metrics2 = { + 'task0': [dict(type='Accuracy', topk=(1, ))], + 'task1': [ + dict( + type='MultiTasksMetric', + task_metrics={ + 'task10': [ + dict(type='Accuracy', topk=(1, 3)), + dict(type='SingleLabelMetric', items=['precision']) + ], + 'task11': [dict(type='Accuracy', topk=(1, ))] + }) + ] + } + + def test_evaluate(self): + """Test using the metric in the same way as Evalutor.""" + + # Test with score (use score instead of label if score exists) + metric = MultiTasksMetric(self.task_metrics) + metric.process(None, self.preds) + results = metric.evaluate(2) + self.assertIsInstance(results, dict) + self.assertAlmostEqual(results['task0_accuracy/top1'], 100) + self.assertGreater(results['task1_single-label/precision'], 0) + + # Test nested + metric = MultiTasksMetric(self.task_metrics2) + metric.process(None, self.pred2) + results = metric.evaluate(2) + self.assertIsInstance(results, dict) + self.assertGreater(results['task1_task10_single-label/precision'], 0) + self.assertGreater(results['task1_task11_accuracy/top1'], 0) + + # Test with without any ground truth value + metric = MultiTasksMetric(self.task_metrics) + metric.process(None, self.pred3) + results = metric.evaluate(2) + self.assertIsInstance(results, dict) + self.assertEqual(results['task0_Accuracy'], 0) diff --git a/tests/test_evaluation/test_metrics/test_retrieval.py b/tests/test_evaluation/test_metrics/test_retrieval.py new file mode 100644 index 0000000..de49754 --- /dev/null +++ b/tests/test_evaluation/test_metrics/test_retrieval.py @@ -0,0 +1,227 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np +import torch + +from mmpretrain.evaluation.metrics import (RetrievalAveragePrecision, + RetrievalRecall) +from mmpretrain.registry import METRICS +from mmpretrain.structures import DataSample + + +class TestRetrievalRecall(TestCase): + + def test_evaluate(self): + """Test using the metric in the same way as Evalutor.""" + pred = [ + DataSample().set_pred_score(i).set_gt_label(k).to_dict() + for i, k in zip([ + torch.tensor([0.7, 0.0, 0.3]), + torch.tensor([0.5, 0.2, 0.3]), + torch.tensor([0.4, 0.5, 0.1]), + torch.tensor([0.0, 0.0, 1.0]), + torch.tensor([0.0, 0.0, 1.0]), + torch.tensor([0.0, 0.0, 1.0]), + ], [[0], [0, 1], [1], [2], [1, 2], [0, 1]]) + ] + + # Test with score (use score instead of label if score exists) + metric = METRICS.build(dict(type='RetrievalRecall', topk=1)) + metric.process(None, pred) + recall = metric.evaluate(6) + self.assertIsInstance(recall, dict) + self.assertAlmostEqual( + recall['retrieval/Recall@1'], 5 / 6 * 100, places=4) + + # Test with invalid topk + with self.assertRaisesRegex(RuntimeError, 'selected index k'): + metric = METRICS.build(dict(type='RetrievalRecall', topk=10)) + metric.process(None, pred) + metric.evaluate(6) + + with self.assertRaisesRegex(ValueError, '`topk` must be a'): + METRICS.build(dict(type='RetrievalRecall', topk=-1)) + + # Test initialization + metric = METRICS.build(dict(type='RetrievalRecall', topk=5)) + self.assertEqual(metric.topk, (5, )) + + # Test initialization + metric = METRICS.build(dict(type='RetrievalRecall', topk=(1, 2, 5))) + self.assertEqual(metric.topk, (1, 2, 5)) + + def test_calculate(self): + """Test using the metric from static method.""" + + # seq of indices format + y_true = [[0, 2, 5, 8, 9], [1, 4, 6]] + y_pred = [np.arange(10)] * 2 + + # test with average is 'macro' + recall_score = RetrievalRecall.calculate( + y_pred, y_true, topk=1, pred_indices=True, target_indices=True) + expect_recall = 50. + self.assertEqual(recall_score[0].item(), expect_recall) + + # test with tensor input + y_true = torch.Tensor([[1, 0, 1, 0, 0, 1, 0, 0, 1, 1], + [0, 1, 0, 0, 1, 0, 1, 0, 0, 0]]) + y_pred = np.array([np.linspace(0.95, 0.05, 10)] * 2) + recall_score = RetrievalRecall.calculate(y_pred, y_true, topk=1) + expect_recall = 50. + self.assertEqual(recall_score[0].item(), expect_recall) + + # test with topk is 5 + y_pred = np.array([np.linspace(0.95, 0.05, 10)] * 2) + recall_score = RetrievalRecall.calculate(y_pred, y_true, topk=2) + expect_recall = 100. + self.assertEqual(recall_score[0].item(), expect_recall) + + # test with topk is (1, 5) + y_pred = np.array([np.linspace(0.95, 0.05, 10)] * 2) + recall_score = RetrievalRecall.calculate(y_pred, y_true, topk=(1, 5)) + expect_recalls = [50., 100.] + self.assertEqual(len(recall_score), len(expect_recalls)) + for i in range(len(expect_recalls)): + self.assertEqual(recall_score[i].item(), expect_recalls[i]) + + # Test with invalid pred + y_pred = dict() + y_true = [[0, 2, 5, 8, 9], [1, 4, 6]] + with self.assertRaisesRegex(AssertionError, '`pred` must be Seq'): + RetrievalRecall.calculate(y_pred, y_true, True, True) + + # Test with invalid target + y_true = dict() + y_pred = [np.arange(10)] * 2 + with self.assertRaisesRegex(AssertionError, '`target` must be Seq'): + RetrievalRecall.calculate( + y_pred, y_true, topk=1, pred_indices=True, target_indices=True) + + # Test with different length `pred` with `target` + y_true = [[0, 2, 5, 8, 9], [1, 4, 6]] + y_pred = [np.arange(10)] * 3 + with self.assertRaisesRegex(AssertionError, 'Length of `pred`'): + RetrievalRecall.calculate( + y_pred, y_true, topk=1, pred_indices=True, target_indices=True) + + # Test with invalid pred + y_true = [[0, 2, 5, 8, 9], dict()] + y_pred = [np.arange(10)] * 2 + with self.assertRaisesRegex(AssertionError, '`target` should be'): + RetrievalRecall.calculate( + y_pred, y_true, topk=1, pred_indices=True, target_indices=True) + + # Test with invalid target + y_true = [[0, 2, 5, 8, 9], [1, 4, 6]] + y_pred = [np.arange(10), dict()] + with self.assertRaisesRegex(AssertionError, '`pred` should be'): + RetrievalRecall.calculate( + y_pred, y_true, topk=1, pred_indices=True, target_indices=True) + + +class TestRetrievalAveragePrecision(TestCase): + + def test_evaluate(self): + """Test using the metric in the same way as Evalutor.""" + y_true = torch.tensor([[1, 0, 1, 0, 0, 1, 0, 0, 1, 1], + [0, 1, 0, 0, 1, 0, 1, 0, 0, 0]]) + y_pred = torch.tensor([np.linspace(0.95, 0.05, 10)] * 2) + + pred = [ + DataSample().set_pred_score(i).set_gt_score(j) + for i, j in zip(y_pred, y_true) + ] + + # Test with default macro avergae + metric = METRICS.build(dict(type='RetrievalAveragePrecision', topk=10)) + metric.process([], pred) + res = metric.evaluate(len(pred)) + self.assertIsInstance(res, dict) + self.assertAlmostEqual( + res['retrieval/mAP@10'], 53.25396825396825, places=4) + + # Test with invalid topk + with self.assertRaisesRegex(ValueError, '`topk` must be a'): + METRICS.build(dict(type='RetrievalAveragePrecision', topk=-1)) + + # Test with invalid mode + with self.assertRaisesRegex(AssertionError, 'Invalid `mode` '): + METRICS.build( + dict(type='RetrievalAveragePrecision', topk=5, mode='m')) + + def test_calculate(self): + """Test using the metric from static method.""" + # Test IR mode + # example from https://zhuanlan.zhihu.com/p/35983818 + # or https://www.youtube.com/watch?v=pM6DJ0ZZee0 + + # seq of indices format + y_true = [[0, 2, 5, 8, 9], [1, 4, 6]] + y_pred = [np.arange(10)] * 2 + + # test with average is 'macro' + ap_score = RetrievalAveragePrecision.calculate(y_pred, y_true, 10, + True, True) + expect_ap = 53.25396825396825 + self.assertEqual(ap_score.item(), expect_ap) + + # test with tensor input + y_true = torch.Tensor([[1, 0, 1, 0, 0, 1, 0, 0, 1, 1], + [0, 1, 0, 0, 1, 0, 1, 0, 0, 0]]) + y_pred = np.array([np.linspace(0.95, 0.05, 10)] * 2) + ap_score = RetrievalAveragePrecision.calculate(y_pred, y_true, 10) + expect_ap = 53.25396825396825 + self.assertEqual(ap_score.item(), expect_ap) + + # test with topk is 5 + y_pred = np.array([np.linspace(0.95, 0.05, 10)] * 2) + ap_score = RetrievalAveragePrecision.calculate(y_pred, y_true, topk=5) + expect_ap = 31.666666666666664 + self.assertEqual(ap_score.item(), expect_ap) + + # Test with invalid mode + with self.assertRaisesRegex(AssertionError, 'Invalid `mode` '): + RetrievalAveragePrecision.calculate( + y_pred, y_true, True, True, mode='m') + + # Test with invalid pred + y_pred = dict() + y_true = [[0, 2, 5, 8, 9], [1, 4, 6]] + with self.assertRaisesRegex(AssertionError, '`pred` must be Seq'): + RetrievalAveragePrecision.calculate(y_pred, y_true, 10, True, True) + + # Test with invalid target + y_true = dict() + y_pred = [np.arange(10)] * 2 + with self.assertRaisesRegex(AssertionError, '`target` must be Seq'): + RetrievalAveragePrecision.calculate(y_pred, y_true, 10, True, True) + + # Test with different length `pred` with `target` + y_true = [[0, 2, 5, 8, 9], [1, 4, 6]] + y_pred = [np.arange(10)] * 3 + with self.assertRaisesRegex(AssertionError, 'Length of `pred`'): + RetrievalAveragePrecision.calculate(y_pred, y_true, 10, True, True) + + # Test with invalid pred + y_true = [[0, 2, 5, 8, 9], dict()] + y_pred = [np.arange(10)] * 2 + with self.assertRaisesRegex(AssertionError, '`target` should be'): + RetrievalAveragePrecision.calculate(y_pred, y_true, 10, True, True) + + # Test with invalid target + y_true = [[0, 2, 5, 8, 9], [1, 4, 6]] + y_pred = [np.arange(10), dict()] + with self.assertRaisesRegex(AssertionError, '`pred` should be'): + RetrievalAveragePrecision.calculate(y_pred, y_true, 10, True, True) + + # Test with mode 'integrate' + y_true = torch.Tensor([[1, 0, 1, 0, 0, 1, 0, 0, 1, 1], + [0, 1, 0, 0, 1, 0, 1, 0, 0, 0]]) + y_pred = np.array([np.linspace(0.95, 0.05, 10)] * 2) + + ap_score = RetrievalAveragePrecision.calculate( + y_pred, y_true, topk=5, mode='integrate') + expect_ap = 25.416666666666664 + self.assertEqual(ap_score.item(), expect_ap) diff --git a/tests/test_evaluation/test_metrics/test_scienceqa.py b/tests/test_evaluation/test_metrics/test_scienceqa.py new file mode 100644 index 0000000..5bfc81b --- /dev/null +++ b/tests/test_evaluation/test_metrics/test_scienceqa.py @@ -0,0 +1,44 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmengine.evaluator import Evaluator + +from mmpretrain.structures import DataSample + + +class TestScienceQAMetric: + + def test_evaluate(self): + meta_info = { + 'choices': ['A', 'B', 'C', 'D'], + 'pred_answer': 'A', + 'grade': 'grade1', + 'subject': 'language science', + 'gt_answer': 1, + 'hint': 'hint', + 'has_image': True + } + data_sample = DataSample(metainfo=meta_info) + data_samples = [data_sample for _ in range(10)] + evaluator = Evaluator(dict(type='mmpretrain.ScienceQAMetric')) + evaluator.process(data_samples) + res = evaluator.evaluate(4) + assert res['acc_grade_1_6'] == 0.0 + assert res['acc_language'] == 0.0 + assert res['all_acc'] == 0.0 + + meta_info = { + 'choices': ['A', 'B', 'C', 'D'], + 'pred_answer': 'A', + 'grade': 'grade1', + 'subject': 'language science', + 'gt_answer': 0, + 'hint': 'hint', + 'has_image': True + } + data_sample = DataSample(metainfo=meta_info) + data_samples = [data_sample for _ in range(10)] + evaluator = Evaluator(dict(type='mmpretrain.ScienceQAMetric')) + evaluator.process(data_samples) + res = evaluator.evaluate(4) + assert res['acc_grade_1_6'] == 1.0 + assert res['acc_language'] == 1.0 + assert res['all_acc'] == 1.0 diff --git a/tests/test_evaluation/test_metrics/test_shape_bias_metric.py b/tests/test_evaluation/test_metrics/test_shape_bias_metric.py new file mode 100644 index 0000000..d57ace8 --- /dev/null +++ b/tests/test_evaluation/test_metrics/test_shape_bias_metric.py @@ -0,0 +1,15 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmpretrain.evaluation import ShapeBiasMetric + + +def test_shape_bias_metric(): + data_sample = dict() + data_sample['pred_score'] = torch.rand(1000, ) + data_sample['pred_label'] = torch.tensor(1) + data_sample['gt_label'] = torch.tensor(1) + data_sample['img_path'] = 'tests/airplane/test.JPEG' + evaluator = ShapeBiasMetric( + csv_dir='tests/data', dataset_name='test', model_name='test') + evaluator.process(None, [data_sample]) diff --git a/tests/test_evaluation/test_metrics/test_single_label.py b/tests/test_evaluation/test_metrics/test_single_label.py new file mode 100644 index 0000000..33264ec --- /dev/null +++ b/tests/test_evaluation/test_metrics/test_single_label.py @@ -0,0 +1,409 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +from unittest import TestCase + +import numpy as np +import torch + +from mmpretrain.evaluation.metrics import (Accuracy, ConfusionMatrix, + SingleLabelMetric) +from mmpretrain.registry import METRICS +from mmpretrain.structures import DataSample + + +class TestAccuracy(TestCase): + + def test_evaluate(self): + """Test using the metric in the same way as Evalutor.""" + pred = [ + DataSample().set_pred_score(i).set_pred_label(j).set_gt_label( + k).to_dict() for i, j, k in zip([ + torch.tensor([0.7, 0.0, 0.3]), + torch.tensor([0.5, 0.2, 0.3]), + torch.tensor([0.4, 0.5, 0.1]), + torch.tensor([0.0, 0.0, 1.0]), + torch.tensor([0.0, 0.0, 1.0]), + torch.tensor([0.0, 0.0, 1.0]), + ], [0, 0, 1, 2, 2, 2], [0, 0, 1, 2, 1, 0]) + ] + + # Test with score (use score instead of label if score exists) + metric = METRICS.build(dict(type='Accuracy', thrs=0.6)) + metric.process(None, pred) + acc = metric.evaluate(6) + self.assertIsInstance(acc, dict) + self.assertAlmostEqual(acc['accuracy/top1'], 2 / 6 * 100, places=4) + + # Test with multiple thrs + metric = METRICS.build(dict(type='Accuracy', thrs=(0., 0.6, None))) + metric.process(None, pred) + acc = metric.evaluate(6) + self.assertSetEqual( + set(acc.keys()), { + 'accuracy/top1_thr-0.00', 'accuracy/top1_thr-0.60', + 'accuracy/top1_no-thr' + }) + + # Test with invalid topk + with self.assertRaisesRegex(ValueError, 'check the `val_evaluator`'): + metric = METRICS.build(dict(type='Accuracy', topk=(1, 5))) + metric.process(None, pred) + metric.evaluate(6) + + # Test with label + for sample in pred: + del sample['pred_score'] + metric = METRICS.build(dict(type='Accuracy', thrs=(0., 0.6, None))) + metric.process(None, pred) + acc = metric.evaluate(6) + self.assertIsInstance(acc, dict) + self.assertAlmostEqual(acc['accuracy/top1'], 4 / 6 * 100, places=4) + + # Test initialization + metric = METRICS.build(dict(type='Accuracy', thrs=0.6)) + self.assertTupleEqual(metric.thrs, (0.6, )) + metric = METRICS.build(dict(type='Accuracy', thrs=[0.6])) + self.assertTupleEqual(metric.thrs, (0.6, )) + metric = METRICS.build(dict(type='Accuracy', topk=5)) + self.assertTupleEqual(metric.topk, (5, )) + metric = METRICS.build(dict(type='Accuracy', topk=[5])) + self.assertTupleEqual(metric.topk, (5, )) + + def test_calculate(self): + """Test using the metric from static method.""" + + # Test with score + y_true = np.array([0, 0, 1, 2, 1, 0]) + y_label = torch.tensor([0, 0, 1, 2, 2, 2]) + y_score = [ + [0.7, 0.0, 0.3], + [0.5, 0.2, 0.3], + [0.4, 0.5, 0.1], + [0.0, 0.0, 1.0], + [0.0, 0.0, 1.0], + [0.0, 0.0, 1.0], + ] + + # Test with score + acc = Accuracy.calculate(y_score, y_true, thrs=(0.6, )) + self.assertIsInstance(acc, list) + self.assertIsInstance(acc[0], list) + self.assertIsInstance(acc[0][0], torch.Tensor) + self.assertTensorEqual(acc[0][0], 2 / 6 * 100) + + # Test with label + acc = Accuracy.calculate(y_label, y_true, thrs=(0.6, )) + self.assertIsInstance(acc, torch.Tensor) + # the thrs will be ignored + self.assertTensorEqual(acc, 4 / 6 * 100) + + # Test with invalid inputs + with self.assertRaisesRegex(TypeError, " is not"): + Accuracy.calculate(y_label, 'hi') + + # Test with invalid topk + with self.assertRaisesRegex(ValueError, 'Top-5 accuracy .* is 3'): + Accuracy.calculate(y_score, y_true, topk=(1, 5)) + + def assertTensorEqual(self, + tensor: torch.Tensor, + value: float, + msg=None, + **kwarg): + tensor = tensor.to(torch.float32) + value = torch.FloatTensor([value]) + try: + torch.testing.assert_allclose(tensor, value, **kwarg) + except AssertionError as e: + self.fail(self._formatMessage(msg, str(e))) + + +class TestSingleLabel(TestCase): + + def test_evaluate(self): + """Test using the metric in the same way as Evalutor.""" + pred = [ + DataSample().set_pred_score(i).set_pred_label(j).set_gt_label( + k).to_dict() for i, j, k in zip([ + torch.tensor([0.7, 0.0, 0.3]), + torch.tensor([0.5, 0.2, 0.3]), + torch.tensor([0.4, 0.5, 0.1]), + torch.tensor([0.0, 0.0, 1.0]), + torch.tensor([0.0, 0.0, 1.0]), + torch.tensor([0.0, 0.0, 1.0]), + ], [0, 0, 1, 2, 2, 2], [0, 0, 1, 2, 1, 0]) + ] + + # Test with score (use score instead of label if score exists) + metric = METRICS.build( + dict( + type='SingleLabelMetric', + thrs=0.6, + items=('precision', 'recall', 'f1-score', 'support'))) + metric.process(None, pred) + res = metric.evaluate(6) + self.assertIsInstance(res, dict) + self.assertAlmostEqual( + res['single-label/precision'], (1 + 0 + 1 / 3) / 3 * 100, places=4) + self.assertAlmostEqual( + res['single-label/recall'], (1 / 3 + 0 + 1) / 3 * 100, places=4) + self.assertAlmostEqual( + res['single-label/f1-score'], (1 / 2 + 0 + 1 / 2) / 3 * 100, + places=4) + self.assertEqual(res['single-label/support'], 6) + + # Test with multiple thrs + metric = METRICS.build( + dict(type='SingleLabelMetric', thrs=(0., 0.6, None))) + metric.process(None, pred) + res = metric.evaluate(6) + self.assertSetEqual( + set(res.keys()), { + 'single-label/precision_thr-0.00', + 'single-label/recall_thr-0.00', + 'single-label/f1-score_thr-0.00', + 'single-label/precision_thr-0.60', + 'single-label/recall_thr-0.60', + 'single-label/f1-score_thr-0.60', + 'single-label/precision_no-thr', 'single-label/recall_no-thr', + 'single-label/f1-score_no-thr' + }) + + # Test with average mode "micro" + metric = METRICS.build( + dict( + type='SingleLabelMetric', + average='micro', + items=('precision', 'recall', 'f1-score', 'support'))) + metric.process(None, pred) + res = metric.evaluate(6) + self.assertIsInstance(res, dict) + self.assertAlmostEqual( + res['single-label/precision_micro'], 66.666, places=2) + self.assertAlmostEqual( + res['single-label/recall_micro'], 66.666, places=2) + self.assertAlmostEqual( + res['single-label/f1-score_micro'], 66.666, places=2) + self.assertEqual(res['single-label/support_micro'], 6) + + # Test with average mode None + metric = METRICS.build( + dict( + type='SingleLabelMetric', + average=None, + items=('precision', 'recall', 'f1-score', 'support'))) + metric.process(None, pred) + res = metric.evaluate(6) + self.assertIsInstance(res, dict) + precision = res['single-label/precision_classwise'] + self.assertAlmostEqual(precision[0], 100., places=4) + self.assertAlmostEqual(precision[1], 100., places=4) + self.assertAlmostEqual(precision[2], 1 / 3 * 100, places=4) + recall = res['single-label/recall_classwise'] + self.assertAlmostEqual(recall[0], 2 / 3 * 100, places=4) + self.assertAlmostEqual(recall[1], 50., places=4) + self.assertAlmostEqual(recall[2], 100., places=4) + f1_score = res['single-label/f1-score_classwise'] + self.assertAlmostEqual(f1_score[0], 80., places=4) + self.assertAlmostEqual(f1_score[1], 2 / 3 * 100, places=4) + self.assertAlmostEqual(f1_score[2], 50., places=4) + self.assertEqual(res['single-label/support_classwise'], [3, 2, 1]) + + # Test with label, the thrs will be ignored + pred_no_score = copy.deepcopy(pred) + for sample in pred_no_score: + del sample['pred_score'] + del sample['num_classes'] + metric = METRICS.build( + dict(type='SingleLabelMetric', thrs=(0., 0.6), num_classes=3)) + metric.process(None, pred_no_score) + res = metric.evaluate(6) + self.assertIsInstance(res, dict) + # Expected values come from sklearn + self.assertAlmostEqual(res['single-label/precision'], 77.777, places=2) + self.assertAlmostEqual(res['single-label/recall'], 72.222, places=2) + self.assertAlmostEqual(res['single-label/f1-score'], 65.555, places=2) + + metric = METRICS.build(dict(type='SingleLabelMetric', thrs=(0., 0.6))) + with self.assertRaisesRegex(AssertionError, 'must be specified'): + metric.process(None, pred_no_score) + + # Test with empty items + metric = METRICS.build( + dict(type='SingleLabelMetric', items=tuple(), num_classes=3)) + metric.process(None, pred) + res = metric.evaluate(6) + self.assertIsInstance(res, dict) + self.assertEqual(len(res), 0) + + metric.process(None, pred_no_score) + res = metric.evaluate(6) + self.assertIsInstance(res, dict) + self.assertEqual(len(res), 0) + + # Test initialization + metric = METRICS.build(dict(type='SingleLabelMetric', thrs=0.6)) + self.assertTupleEqual(metric.thrs, (0.6, )) + metric = METRICS.build(dict(type='SingleLabelMetric', thrs=[0.6])) + self.assertTupleEqual(metric.thrs, (0.6, )) + + def test_calculate(self): + """Test using the metric from static method.""" + + # Test with score + y_true = np.array([0, 0, 1, 2, 1, 0]) + y_label = torch.tensor([0, 0, 1, 2, 2, 2]) + y_score = [ + [0.7, 0.0, 0.3], + [0.5, 0.2, 0.3], + [0.4, 0.5, 0.1], + [0.0, 0.0, 1.0], + [0.0, 0.0, 1.0], + [0.0, 0.0, 1.0], + ] + + # Test with score + res = SingleLabelMetric.calculate(y_score, y_true, thrs=(0.6, )) + self.assertIsInstance(res, list) + self.assertIsInstance(res[0], tuple) + precision, recall, f1_score, support = res[0] + self.assertTensorEqual(precision, (1 + 0 + 1 / 3) / 3 * 100) + self.assertTensorEqual(recall, (1 / 3 + 0 + 1) / 3 * 100) + self.assertTensorEqual(f1_score, (1 / 2 + 0 + 1 / 2) / 3 * 100) + self.assertTensorEqual(support, 6) + + # Test with label + res = SingleLabelMetric.calculate(y_label, y_true, num_classes=3) + self.assertIsInstance(res, tuple) + precision, recall, f1_score, support = res + # Expected values come from sklearn + self.assertTensorEqual(precision, 77.7777) + self.assertTensorEqual(recall, 72.2222) + self.assertTensorEqual(f1_score, 65.5555) + self.assertTensorEqual(support, 6) + + # Test with invalid inputs + with self.assertRaisesRegex(TypeError, " is not"): + SingleLabelMetric.calculate(y_label, 'hi') + + def assertTensorEqual(self, + tensor: torch.Tensor, + value: float, + msg=None, + **kwarg): + tensor = tensor.to(torch.float32) + value = torch.tensor(value).float() + try: + torch.testing.assert_allclose(tensor, value, **kwarg) + except AssertionError as e: + self.fail(self._formatMessage(msg, str(e))) + + +class TestConfusionMatrix(TestCase): + + def test_evaluate(self): + """Test using the metric in the same way as Evalutor.""" + pred = [ + DataSample().set_pred_score(i).set_pred_label(j).set_gt_label( + k).to_dict() for i, j, k in zip([ + torch.tensor([0.7, 0.0, 0.3]), + torch.tensor([0.5, 0.2, 0.3]), + torch.tensor([0.4, 0.5, 0.1]), + torch.tensor([0.0, 0.0, 1.0]), + torch.tensor([0.0, 0.0, 1.0]), + torch.tensor([0.0, 0.0, 1.0]), + ], [0, 0, 1, 2, 2, 2], [0, 0, 1, 2, 1, 0]) + ] + + # Test with score (use score instead of label if score exists) + metric = METRICS.build(dict(type='ConfusionMatrix')) + metric.process(None, pred) + res = metric.evaluate(6) + self.assertIsInstance(res, dict) + self.assertTensorEqual( + res['confusion_matrix/result'], + torch.tensor([ + [2, 0, 1], + [0, 1, 1], + [0, 0, 1], + ])) + + # Test with label + for sample in pred: + del sample['pred_score'] + metric = METRICS.build(dict(type='ConfusionMatrix')) + metric.process(None, pred) + with self.assertRaisesRegex(AssertionError, + 'Please specify the `num_classes`'): + metric.evaluate(6) + + metric = METRICS.build(dict(type='ConfusionMatrix', num_classes=3)) + metric.process(None, pred) + self.assertIsInstance(res, dict) + self.assertTensorEqual( + res['confusion_matrix/result'], + torch.tensor([ + [2, 0, 1], + [0, 1, 1], + [0, 0, 1], + ])) + + def test_calculate(self): + y_true = np.array([0, 0, 1, 2, 1, 0]) + y_label = torch.tensor([0, 0, 1, 2, 2, 2]) + y_score = [ + [0.7, 0.0, 0.3], + [0.5, 0.2, 0.3], + [0.4, 0.5, 0.1], + [0.0, 0.0, 1.0], + [0.0, 0.0, 1.0], + [0.0, 0.0, 1.0], + ] + + # Test with score + cm = ConfusionMatrix.calculate(y_score, y_true) + self.assertIsInstance(cm, torch.Tensor) + self.assertTensorEqual( + cm, torch.tensor([ + [2, 0, 1], + [0, 1, 1], + [0, 0, 1], + ])) + + # Test with label + with self.assertRaisesRegex(AssertionError, + 'Please specify the `num_classes`'): + ConfusionMatrix.calculate(y_label, y_true) + + cm = ConfusionMatrix.calculate(y_label, y_true, num_classes=3) + self.assertIsInstance(cm, torch.Tensor) + self.assertTensorEqual( + cm, torch.tensor([ + [2, 0, 1], + [0, 1, 1], + [0, 0, 1], + ])) + + # Test with invalid inputs + with self.assertRaisesRegex(TypeError, " is not"): + ConfusionMatrix.calculate(y_label, 'hi') + + def test_plot(self): + import matplotlib.pyplot as plt + + cm = torch.tensor([[2, 0, 1], [0, 1, 1], [0, 0, 1]]) + fig = ConfusionMatrix.plot(cm, include_values=True, show=False) + + self.assertIsInstance(fig, plt.Figure) + + def assertTensorEqual(self, + tensor: torch.Tensor, + value: float, + msg=None, + **kwarg): + tensor = tensor.to(torch.float32) + value = torch.tensor(value).float() + try: + torch.testing.assert_allclose(tensor, value, **kwarg) + except AssertionError as e: + self.fail(self._formatMessage(msg, str(e))) diff --git a/tests/test_evaluation/test_metrics/test_voc_metrics.py b/tests/test_evaluation/test_metrics/test_voc_metrics.py new file mode 100644 index 0000000..5101cf8 --- /dev/null +++ b/tests/test_evaluation/test_metrics/test_voc_metrics.py @@ -0,0 +1,228 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np +import sklearn.metrics +import torch +from mmengine.evaluator import Evaluator +from mmengine.registry import init_default_scope + +from mmpretrain.structures import DataSample + +init_default_scope('mmpretrain') + + +class TestVOCMultiLabel(TestCase): + + def test_evaluate(self): + # prepare input data + y_true_label = [[0], [1, 3], [0, 1, 2], [3]] + y_true_difficult = [[0], [2], [1], []] + y_pred_score = torch.tensor([ + [0.8, 0, 0, 0.6], + [0.2, 0, 0.6, 0], + [0, 0.9, 0.6, 0], + [0, 0, 0.2, 0.3], + ]) + + # generate data samples + pred = [ + DataSample(num_classes=4).set_pred_score(i).set_gt_label(j) + for i, j in zip(y_pred_score, y_true_label) + ] + for sample, difficult_label in zip(pred, y_true_difficult): + sample.set_metainfo({'gt_label_difficult': difficult_label}) + + # 1. Test with default argument + evaluator = Evaluator(dict(type='VOCMultiLabelMetric')) + evaluator.process(pred) + res = evaluator.evaluate(4) + self.assertIsInstance(res, dict) + + # generate sklearn input + y_true = np.array([ + [1, 0, 0, 0], + [0, 1, -1, 1], + [1, 1, 1, 0], + [0, 0, 0, 1], + ]) + ignored_index = y_true == -1 + y_true[ignored_index] = 0 + thr05_y_pred = np.array([ + [1, 0, 0, 1], + [0, 0, 1, 0], + [0, 1, 1, 0], + [0, 0, 0, 0], + ]) + thr05_y_pred[ignored_index] = 0 + + expect_precision = sklearn.metrics.precision_score( + y_true, thr05_y_pred, average='macro') * 100 + expect_recall = sklearn.metrics.recall_score( + y_true, thr05_y_pred, average='macro') * 100 + expect_f1 = sklearn.metrics.f1_score( + y_true, thr05_y_pred, average='macro') * 100 + self.assertEqual(res['multi-label/precision'], expect_precision) + self.assertEqual(res['multi-label/recall'], expect_recall) + # precision is different between torch and sklearn + self.assertAlmostEqual(res['multi-label/f1-score'], expect_f1, 5) + + # 2. Test with `difficult_as_positive`=False argument + evaluator = Evaluator( + dict(type='VOCMultiLabelMetric', difficult_as_positive=False)) + evaluator.process(pred) + res = evaluator.evaluate(4) + self.assertIsInstance(res, dict) + + # generate sklearn input + y_true = np.array([ + [1, 0, 0, 0], + [0, 1, 0, 1], + [1, 1, 1, 0], + [0, 0, 0, 1], + ]) + thr05_y_pred = np.array([ + [1, 0, 0, 1], + [0, 0, 1, 0], + [0, 1, 1, 0], + [0, 0, 0, 0], + ]) + + expect_precision = sklearn.metrics.precision_score( + y_true, thr05_y_pred, average='macro') * 100 + expect_recall = sklearn.metrics.recall_score( + y_true, thr05_y_pred, average='macro') * 100 + expect_f1 = sklearn.metrics.f1_score( + y_true, thr05_y_pred, average='macro') * 100 + self.assertEqual(res['multi-label/precision'], expect_precision) + self.assertEqual(res['multi-label/recall'], expect_recall) + # precision is different between torch and sklearn + self.assertAlmostEqual(res['multi-label/f1-score'], expect_f1, 5) + + # 3. Test with `difficult_as_positive`=True argument + evaluator = Evaluator( + dict(type='VOCMultiLabelMetric', difficult_as_positive=True)) + evaluator.process(pred) + res = evaluator.evaluate(4) + self.assertIsInstance(res, dict) + + # generate sklearn input + y_true = np.array([ + [1, 0, 0, 0], + [0, 1, 1, 1], + [1, 1, 1, 0], + [0, 0, 0, 1], + ]) + thr05_y_pred = np.array([ + [1, 0, 0, 1], + [0, 0, 1, 0], + [0, 1, 1, 0], + [0, 0, 0, 0], + ]) + + expect_precision = sklearn.metrics.precision_score( + y_true, thr05_y_pred, average='macro') * 100 + expect_recall = sklearn.metrics.recall_score( + y_true, thr05_y_pred, average='macro') * 100 + expect_f1 = sklearn.metrics.f1_score( + y_true, thr05_y_pred, average='macro') * 100 + self.assertEqual(res['multi-label/precision'], expect_precision) + self.assertEqual(res['multi-label/recall'], expect_recall) + # precision is different between torch and sklearn + self.assertAlmostEqual(res['multi-label/f1-score'], expect_f1, 5) + + +class TestVOCAveragePrecision(TestCase): + + def test_evaluate(self): + """Test using the metric in the same way as Evalutor.""" + # prepare input data + y_true_difficult = [[0], [2], [1], []] + y_pred_score = torch.tensor([ + [0.8, 0.1, 0, 0.6], + [0.2, 0.2, 0.7, 0], + [0.1, 0.9, 0.6, 0.1], + [0, 0, 0.2, 0.3], + ]) + y_true_label = [[0], [1, 3], [0, 1, 2], [3]] + y_true = torch.tensor([ + [1, 0, 0, 0], + [0, 1, 0, 1], + [1, 1, 1, 0], + [0, 0, 0, 1], + ]) + y_true_difficult = [[0], [2], [1], []] + + # generate data samples + pred = [ + DataSample(num_classes=4).set_pred_score(i).set_gt_score( + j).set_gt_label(k) + for i, j, k in zip(y_pred_score, y_true, y_true_label) + ] + for sample, difficult_label in zip(pred, y_true_difficult): + sample.set_metainfo({'gt_label_difficult': difficult_label}) + + # 1. Test with default + evaluator = Evaluator(dict(type='VOCAveragePrecision')) + evaluator.process(pred) + res = evaluator.evaluate(4) + self.assertIsInstance(res, dict) + + # prepare inputs for sklearn for this case + y_pred_score = [[0.8, 0.2, 0.1, 0], [0.1, 0.2, 0.9, 0], [0, 0.6, 0.2], + [0.6, 0, 0.1, 0.3]] + y_true = [[1, 0, 1, 0], [0, 1, 1, 0], [0, 1, 0], [0, 1, 0, 1]] + expected_res = [] + for pred_per_class, gt_per_class in zip(y_pred_score, y_true): + expected_res.append( + sklearn.metrics.average_precision_score( + gt_per_class, pred_per_class)) + + self.assertAlmostEqual( + res['multi-label/mAP'], + sum(expected_res) * 100 / len(expected_res), + places=4) + + # 2. Test with `difficult_as_positive`=False argument + evaluator = Evaluator( + dict(type='VOCAveragePrecision', difficult_as_positive=False)) + evaluator.process(pred) + res = evaluator.evaluate(4) + self.assertIsInstance(res, dict) + + # prepare inputs for sklearn for this case + y_pred_score = [[0.8, 0.2, 0.1, 0], [0.1, 0.2, 0.9, 0], + [0, 0.7, 0.6, 0.2], [0.6, 0, 0.1, 0.3]] + y_true = [[1, 0, 1, 0], [0, 1, 1, 0], [0, 0, 1, 0], [0, 1, 0, 1]] + expected_res = [] + for pred_per_class, gt_per_class in zip(y_pred_score, y_true): + expected_res.append( + sklearn.metrics.average_precision_score( + gt_per_class, pred_per_class)) + + self.assertAlmostEqual( + res['multi-label/mAP'], + sum(expected_res) * 100 / len(expected_res), + places=4) + + # 3. Test with `difficult_as_positive`=True argument + evaluator = Evaluator( + dict(type='VOCAveragePrecision', difficult_as_positive=True)) + evaluator.process(pred) + res = evaluator.evaluate(4) + self.assertIsInstance(res, dict) + + # prepare inputs for sklearn for this case + y_pred_score = [[0.8, 0.2, 0.1, 0], [0.1, 0.2, 0.9, 0], + [0, 0.7, 0.6, 0.2], [0.6, 0, 0.1, 0.3]] + y_true = [[1, 0, 1, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 1, 0, 1]] + expected_res = [] + for pred_per_class, gt_per_class in zip(y_pred_score, y_true): + expected_res.append( + sklearn.metrics.average_precision_score( + gt_per_class, pred_per_class)) + + self.assertAlmostEqual( + res['multi-label/mAP'], + sum(expected_res) * 100 / len(expected_res), + places=4) diff --git a/tests/test_models/test_backbones/__init__.py b/tests/test_models/test_backbones/__init__.py new file mode 100644 index 0000000..ef101fe --- /dev/null +++ b/tests/test_models/test_backbones/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/tests/test_models/test_backbones/test_beit.py b/tests/test_models/test_backbones/test_beit.py new file mode 100644 index 0000000..eed2be5 --- /dev/null +++ b/tests/test_models/test_backbones/test_beit.py @@ -0,0 +1,122 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from unittest import TestCase + +import torch + +from mmpretrain.models.backbones import BEiTViT + + +class TestBEiT(TestCase): + + def setUp(self): + self.cfg = dict( + arch='b', img_size=224, patch_size=16, drop_path_rate=0.1) + + def test_structure(self): + # Test invalid default arch + with self.assertRaisesRegex(AssertionError, 'not in default archs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = 'unknown' + BEiTViT(**cfg) + + # Test invalid custom arch + with self.assertRaisesRegex(AssertionError, 'Custom arch needs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'num_layers': 24, + 'num_heads': 16, + 'feedforward_channels': 4096 + } + BEiTViT(**cfg) + + # Test custom arch + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'embed_dims': 128, + 'num_layers': 24, + 'num_heads': 16, + 'feedforward_channels': 1024 + } + model = BEiTViT(**cfg) + self.assertEqual(model.embed_dims, 128) + self.assertEqual(model.num_layers, 24) + self.assertIsNone(model.pos_embed) + self.assertIsNone(model.rel_pos_bias) + for layer in model.layers: + self.assertEqual(layer.attn.num_heads, 16) + self.assertEqual(layer.ffn.feedforward_channels, 1024) + + # Test out_indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = {1: 1} + with self.assertRaisesRegex(AssertionError, "get "): + BEiTViT(**cfg) + cfg['out_indices'] = [0, 13] + with self.assertRaisesRegex(AssertionError, 'Invalid out_indices 13'): + BEiTViT(**cfg) + + # Test pos_embed + cfg = deepcopy(self.cfg) + cfg['use_abs_pos_emb'] = True + model = BEiTViT(**cfg) + self.assertEqual(model.pos_embed.shape, (1, 197, 768)) + + # Test model structure + cfg = deepcopy(self.cfg) + cfg['drop_path_rate'] = 0.1 + model = BEiTViT(**cfg) + self.assertEqual(len(model.layers), 12) + dpr_inc = 0.1 / (12 - 1) + dpr = 0 + for layer in model.layers: + self.assertEqual(layer.gamma_1.shape, (768, )) + self.assertEqual(layer.gamma_2.shape, (768, )) + self.assertEqual(layer.attn.embed_dims, 768) + self.assertEqual(layer.attn.num_heads, 12) + self.assertEqual(layer.ffn.feedforward_channels, 3072) + self.assertFalse(layer.ffn.add_identity) + self.assertAlmostEqual(layer.ffn.dropout_layer.drop_prob, dpr) + dpr += dpr_inc + + def test_forward(self): + imgs = torch.randn(1, 3, 224, 224) + + cfg = deepcopy(self.cfg) + cfg['out_type'] = 'cls_token' + model = BEiTViT(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + cls_token = outs[-1] + self.assertEqual(cls_token.shape, (1, 768)) + + # test without output cls_token + cfg = deepcopy(self.cfg) + model = BEiTViT(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + patch_token = outs[-1] + self.assertEqual(patch_token.shape, (1, 768)) + + # test without average + cfg = deepcopy(self.cfg) + cfg['out_type'] = 'featmap' + model = BEiTViT(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + patch_token = outs[-1] + self.assertEqual(patch_token.shape, (1, 768, 14, 14)) + + # Test forward with multi out indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = [-3, -2, -1] + model = BEiTViT(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 3) + for out in outs: + patch_token = out + self.assertEqual(patch_token.shape, (1, 768)) diff --git a/tests/test_models/test_backbones/test_conformer.py b/tests/test_models/test_backbones/test_conformer.py new file mode 100644 index 0000000..d28ad5a --- /dev/null +++ b/tests/test_models/test_backbones/test_conformer.py @@ -0,0 +1,112 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy + +import pytest +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpretrain.models.backbones import Conformer + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +@torch.no_grad() # To save memory +def test_conformer_backbone(): + + cfg_ori = dict( + arch='T', + drop_path_rate=0.1, + ) + + with pytest.raises(AssertionError): + # test invalid arch + cfg = deepcopy(cfg_ori) + cfg['arch'] = 'unknown' + Conformer(**cfg) + + with pytest.raises(AssertionError): + # test arch without essential keys + cfg = deepcopy(cfg_ori) + cfg['arch'] = {'embed_dims': 24, 'channel_ratio': 6, 'num_heads': 9} + Conformer(**cfg) + + # Test Conformer small model with patch size of 16 + model = Conformer(**cfg_ori) + model.init_weights() + model.train() + + assert check_norm_state(model.modules(), True) + + imgs = torch.randn(1, 3, 224, 224) + conv_feature, transformer_feature = model(imgs)[-1] + assert conv_feature.shape == (1, 64 * 1 * 4 + ) # base_channels * channel_ratio * 4 + assert transformer_feature.shape == (1, 384) + + # Test Conformer with irregular input size. + model = Conformer(**cfg_ori) + model.init_weights() + model.train() + + assert check_norm_state(model.modules(), True) + + imgs = torch.randn(1, 3, 241, 241) + conv_feature, transformer_feature = model(imgs)[-1] + assert conv_feature.shape == (1, 64 * 1 * 4 + ) # base_channels * channel_ratio * 4 + assert transformer_feature.shape == (1, 384) + + imgs = torch.randn(1, 3, 321, 221) + conv_feature, transformer_feature = model(imgs)[-1] + assert conv_feature.shape == (1, 64 * 1 * 4 + ) # base_channels * channel_ratio * 4 + assert transformer_feature.shape == (1, 384) + + # Test custom arch Conformer without output cls token + cfg = deepcopy(cfg_ori) + cfg['arch'] = { + 'embed_dims': 128, + 'depths': 15, + 'num_heads': 16, + 'channel_ratio': 3, + } + cfg['with_cls_token'] = False + cfg['base_channels'] = 32 + model = Conformer(**cfg) + conv_feature, transformer_feature = model(imgs)[-1] + assert conv_feature.shape == (1, 32 * 3 * 4) + assert transformer_feature.shape == (1, 128) + + # Test Conformer with multi out indices + cfg = deepcopy(cfg_ori) + cfg['out_indices'] = [4, 8, 12] + model = Conformer(**cfg) + outs = model(imgs) + assert len(outs) == 3 + # stage 1 + conv_feature, transformer_feature = outs[0] + assert conv_feature.shape == (1, 64 * 1) + assert transformer_feature.shape == (1, 384) + # stage 2 + conv_feature, transformer_feature = outs[1] + assert conv_feature.shape == (1, 64 * 1 * 2) + assert transformer_feature.shape == (1, 384) + # stage 3 + conv_feature, transformer_feature = outs[2] + assert conv_feature.shape == (1, 64 * 1 * 4) + assert transformer_feature.shape == (1, 384) diff --git a/tests/test_models/test_backbones/test_convmixer.py b/tests/test_models/test_backbones/test_convmixer.py new file mode 100644 index 0000000..abe6c13 --- /dev/null +++ b/tests/test_models/test_backbones/test_convmixer.py @@ -0,0 +1,85 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmpretrain.models.backbones import ConvMixer + + +def test_assertion(): + with pytest.raises(AssertionError): + ConvMixer(arch='unknown') + + with pytest.raises(AssertionError): + # ConvMixer arch dict should include essential_keys, + ConvMixer(arch=dict(channels=[2, 3, 4, 5])) + + with pytest.raises(AssertionError): + # ConvMixer out_indices should be valid depth. + ConvMixer(out_indices=-100) + + +@torch.no_grad() # To save memory +def test_convmixer(): + + # Test forward + model = ConvMixer(arch='768/32') + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 768, 32, 32]) + + # Test forward with multiple outputs + model = ConvMixer(arch='768/32', out_indices=range(32)) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 32 + for f in feat: + assert f.shape == torch.Size([1, 768, 32, 32]) + + # Test with custom arch + model = ConvMixer( + arch={ + 'embed_dims': 99, + 'depth': 5, + 'patch_size': 5, + 'kernel_size': 9 + }, + out_indices=range(5)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 5 + for f in feat: + assert f.shape == torch.Size([1, 99, 44, 44]) + + # Test with even kernel size arch + model = ConvMixer(arch={ + 'embed_dims': 99, + 'depth': 5, + 'patch_size': 5, + 'kernel_size': 8 + }) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 99, 44, 44]) + + # Test frozen_stages + model = ConvMixer(arch='768/32', frozen_stages=10) + model.init_weights() + model.train() + + for i in range(10): + assert not model.stages[i].training + + for i in range(10, 32): + assert model.stages[i].training diff --git a/tests/test_models/test_backbones/test_convnext.py b/tests/test_models/test_backbones/test_convnext.py new file mode 100644 index 0000000..5f63795 --- /dev/null +++ b/tests/test_models/test_backbones/test_convnext.py @@ -0,0 +1,106 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmpretrain.models.backbones import ConvNeXt + + +def test_assertion(): + with pytest.raises(AssertionError): + ConvNeXt(arch='unknown') + + with pytest.raises(AssertionError): + # ConvNeXt arch dict should include 'embed_dims', + ConvNeXt(arch=dict(channels=[2, 3, 4, 5])) + + with pytest.raises(AssertionError): + # ConvNeXt arch dict should include 'embed_dims', + ConvNeXt(arch=dict(depths=[2, 3, 4], channels=[2, 3, 4, 5])) + + +def test_convnext(): + + # Test forward + model = ConvNeXt(arch='tiny', out_indices=-1) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 768]) + + # Test forward with multiple outputs + model = ConvNeXt(arch='small', out_indices=(0, 1, 2, 3)) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 96]) + assert feat[1].shape == torch.Size([1, 192]) + assert feat[2].shape == torch.Size([1, 384]) + assert feat[3].shape == torch.Size([1, 768]) + + # Test with custom arch + model = ConvNeXt( + arch={ + 'depths': [2, 3, 4, 5, 6], + 'channels': [16, 32, 64, 128, 256] + }, + out_indices=(0, 1, 2, 3, 4)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 5 + assert feat[0].shape == torch.Size([1, 16]) + assert feat[1].shape == torch.Size([1, 32]) + assert feat[2].shape == torch.Size([1, 64]) + assert feat[3].shape == torch.Size([1, 128]) + assert feat[4].shape == torch.Size([1, 256]) + + # Test without gap before final norm + model = ConvNeXt( + arch='small', out_indices=(0, 1, 2, 3), gap_before_final_norm=False) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 96, 56, 56]) + assert feat[1].shape == torch.Size([1, 192, 28, 28]) + assert feat[2].shape == torch.Size([1, 384, 14, 14]) + assert feat[3].shape == torch.Size([1, 768, 7, 7]) + + # Test frozen_stages + model = ConvNeXt(arch='small', out_indices=(0, 1, 2, 3), frozen_stages=2) + model.init_weights() + model.train() + + for i in range(2): + assert not model.downsample_layers[i].training + assert not model.stages[i].training + + for i in range(2, 4): + assert model.downsample_layers[i].training + assert model.stages[i].training + + # Test Activation Checkpointing + model = ConvNeXt(arch='tiny', out_indices=-1, with_cp=True) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 768]) + + # Test linear_pw_conv=False + model = ConvNeXt(arch='tiny', out_indices=-1, linear_pw_conv=False) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 768]) diff --git a/tests/test_models/test_backbones/test_cspnet.py b/tests/test_models/test_backbones/test_cspnet.py new file mode 100644 index 0000000..9063e2f --- /dev/null +++ b/tests/test_models/test_backbones/test_cspnet.py @@ -0,0 +1,147 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from functools import partial +from unittest import TestCase + +import torch +from mmcv.cnn import ConvModule +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm + +from mmpretrain.models.backbones import CSPDarkNet, CSPResNet, CSPResNeXt +from mmpretrain.models.backbones.cspnet import (CSPNet, DarknetBottleneck, + ResNetBottleneck, + ResNeXtBottleneck) + + +class TestCSPNet(TestCase): + + def setUp(self): + self.arch = dict( + block_fn=(DarknetBottleneck, ResNetBottleneck, ResNeXtBottleneck), + in_channels=(32, 64, 128), + out_channels=(64, 128, 256), + num_blocks=(1, 2, 8), + expand_ratio=(2, 1, 1), + bottle_ratio=(3, 1, 1), + has_downsampler=True, + down_growth=True, + block_args=({}, {}, dict(base_channels=32))) + self.stem_fn = partial(torch.nn.Conv2d, out_channels=32, kernel_size=3) + + def test_structure(self): + # Test with attribute arch_setting. + model = CSPNet(arch=self.arch, stem_fn=self.stem_fn, out_indices=[-1]) + self.assertEqual(len(model.stages), 3) + self.assertEqual(type(model.stages[0].blocks[0]), DarknetBottleneck) + self.assertEqual(type(model.stages[1].blocks[0]), ResNetBottleneck) + self.assertEqual(type(model.stages[2].blocks[0]), ResNeXtBottleneck) + + +class TestCSPDarkNet(TestCase): + + def setUp(self): + self.class_name = CSPDarkNet + self.cfg = dict(depth=53) + self.out_channels = [64, 128, 256, 512, 1024] + self.all_out_indices = [0, 1, 2, 3, 4] + self.frozen_stages = 2 + self.stem_down = (1, 1) + self.num_stages = 5 + + def test_structure(self): + # Test invalid default depths + with self.assertRaisesRegex(AssertionError, 'depth must be one of'): + cfg = deepcopy(self.cfg) + cfg['depth'] = 'unknown' + self.class_name(**cfg) + + # Test out_indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = {1: 1} + with self.assertRaisesRegex(AssertionError, "get "): + self.class_name(**cfg) + cfg['out_indices'] = [0, 13] + with self.assertRaisesRegex(AssertionError, 'Invalid out_indices 13'): + self.class_name(**cfg) + + # Test model structure + cfg = deepcopy(self.cfg) + model = self.class_name(**cfg) + self.assertEqual(len(model.stages), self.num_stages) + + def test_forward(self): + imgs = torch.randn(3, 3, 224, 224) + + cfg = deepcopy(self.cfg) + model = self.class_name(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + self.assertEqual(outs[-1].size(), (3, self.out_channels[-1], 7, 7)) + + # Test forward with multi out indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = self.all_out_indices + model = self.class_name(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), len(self.all_out_indices)) + w, h = 224 / self.stem_down[0], 224 / self.stem_down[1] + for i, out in enumerate(outs): + self.assertEqual( + out.size(), + (3, self.out_channels[i], w // 2**(i + 1), h // 2**(i + 1))) + + # Test frozen stages + cfg = deepcopy(self.cfg) + cfg['frozen_stages'] = self.frozen_stages + model = self.class_name(**cfg) + model.init_weights() + model.train() + assert model.stem.training is False + for param in model.stem.parameters(): + assert param.requires_grad is False + for i in range(self.frozen_stages + 1): + stage = model.stages[i] + for mod in stage.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False, i + for param in stage.parameters(): + assert param.requires_grad is False + + +class TestCSPResNet(TestCSPDarkNet): + + def setUp(self): + self.class_name = CSPResNet + self.cfg = dict(depth=50) + self.out_channels = [128, 256, 512, 1024] + self.all_out_indices = [0, 1, 2, 3] + self.frozen_stages = 2 + self.stem_down = (2, 2) + self.num_stages = 4 + + def test_deep_stem(self, ): + cfg = deepcopy(self.cfg) + cfg['deep_stem'] = True + model = self.class_name(**cfg) + self.assertEqual(len(model.stem), 3) + for i in range(3): + self.assertEqual(type(model.stem[i]), ConvModule) + + +class TestCSPResNeXt(TestCSPDarkNet): + + def setUp(self): + self.class_name = CSPResNeXt + self.cfg = dict(depth=50) + self.out_channels = [256, 512, 1024, 2048] + self.all_out_indices = [0, 1, 2, 3] + self.frozen_stages = 2 + self.stem_down = (2, 2) + self.num_stages = 4 + + +if __name__ == '__main__': + import unittest + unittest.main() diff --git a/tests/test_models/test_backbones/test_davit.py b/tests/test_models/test_backbones/test_davit.py new file mode 100644 index 0000000..726db74 --- /dev/null +++ b/tests/test_models/test_backbones/test_davit.py @@ -0,0 +1,110 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from unittest import TestCase + +import torch + +from mmpretrain.models.backbones import DaViT +from mmpretrain.models.backbones.davit import SpatialBlock + + +class TestDaViT(TestCase): + + def setUp(self): + self.cfg = dict(arch='t', patch_size=4, drop_path_rate=0.1) + + def test_structure(self): + # Test invalid default arch + with self.assertRaisesRegex(AssertionError, 'not in default archs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = 'unknown' + DaViT(**cfg) + + # Test invalid custom arch + with self.assertRaisesRegex(AssertionError, 'Custom arch needs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'num_layers': 24, + 'num_heads': 16, + 'feedforward_channels': 4096 + } + DaViT(**cfg) + + # Test custom arch + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'embed_dims': 64, + 'num_heads': [3, 3, 3, 3], + 'depths': [1, 1, 2, 1] + } + model = DaViT(**cfg) + self.assertEqual(model.embed_dims, 64) + self.assertEqual(model.num_layers, 4) + for layer in model.stages: + self.assertEqual( + layer.blocks[0].spatial_block.attn.w_msa.num_heads, 3) + + def test_init_weights(self): + # test weight init cfg + cfg = deepcopy(self.cfg) + cfg['init_cfg'] = [ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ] + model = DaViT(**cfg) + ori_weight = model.patch_embed.projection.weight.clone().detach() + + model.init_weights() + initialized_weight = model.patch_embed.projection.weight + self.assertFalse(torch.allclose(ori_weight, initialized_weight)) + + def test_forward(self): + imgs = torch.randn(1, 3, 224, 224) + + cfg = deepcopy(self.cfg) + model = DaViT(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + self.assertEqual(outs[0].shape, (1, 768, 7, 7)) + + # Test forward with multi out indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = [2, 3] + model = DaViT(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 2) + self.assertEqual(outs[0].shape, (1, 384, 14, 14)) + self.assertEqual(outs[1].shape, (1, 768, 7, 7)) + + # test with checkpoint forward + cfg = deepcopy(self.cfg) + cfg['with_cp'] = True + model = DaViT(**cfg) + for m in model.modules(): + if isinstance(m, SpatialBlock): + self.assertTrue(m.with_cp) + model.init_weights() + model.train() + + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + self.assertEqual(outs[0].shape, (1, 768, 7, 7)) + + # Test forward with dynamic input size + imgs1 = torch.randn(1, 3, 224, 224) + imgs2 = torch.randn(1, 3, 256, 256) + imgs3 = torch.randn(1, 3, 256, 309) + cfg = deepcopy(self.cfg) + model = DaViT(**cfg) + for imgs in [imgs1, imgs2, imgs3]: + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + expect_feat_shape = (imgs.shape[2] // 32, imgs.shape[3] // 32) + self.assertEqual(outs[0].shape, (1, 768, *expect_feat_shape)) diff --git a/tests/test_models/test_backbones/test_deit.py b/tests/test_models/test_backbones/test_deit.py new file mode 100644 index 0000000..b2d096d --- /dev/null +++ b/tests/test_models/test_backbones/test_deit.py @@ -0,0 +1,111 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +import os +import tempfile +from copy import deepcopy +from unittest import TestCase + +import torch +from mmengine.runner import load_checkpoint, save_checkpoint + +from mmpretrain.models.backbones import DistilledVisionTransformer +from .utils import timm_resize_pos_embed + + +class TestDeiT(TestCase): + + def setUp(self): + self.cfg = dict( + arch='deit-tiny', img_size=224, patch_size=16, drop_rate=0.1) + + def test_init_weights(self): + # test weight init cfg + cfg = deepcopy(self.cfg) + cfg['init_cfg'] = [ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ] + model = DistilledVisionTransformer(**cfg) + ori_weight = model.patch_embed.projection.weight.clone().detach() + # The pos_embed is all zero before initialize + self.assertTrue(torch.allclose(model.dist_token, torch.tensor(0.))) + + model.init_weights() + initialized_weight = model.patch_embed.projection.weight + self.assertFalse(torch.allclose(ori_weight, initialized_weight)) + self.assertFalse(torch.allclose(model.dist_token, torch.tensor(0.))) + + # test load checkpoint + pretrain_pos_embed = model.pos_embed.clone().detach() + tmpdir = tempfile.gettempdir() + checkpoint = os.path.join(tmpdir, 'test.pth') + save_checkpoint(model.state_dict(), checkpoint) + cfg = deepcopy(self.cfg) + model = DistilledVisionTransformer(**cfg) + load_checkpoint(model, checkpoint, strict=True) + self.assertTrue(torch.allclose(model.pos_embed, pretrain_pos_embed)) + + # test load checkpoint with different img_size + cfg = deepcopy(self.cfg) + cfg['img_size'] = 384 + model = DistilledVisionTransformer(**cfg) + load_checkpoint(model, checkpoint, strict=True) + resized_pos_embed = timm_resize_pos_embed( + pretrain_pos_embed, model.pos_embed, num_tokens=2) + self.assertTrue(torch.allclose(model.pos_embed, resized_pos_embed)) + + os.remove(checkpoint) + + def test_forward(self): + imgs = torch.randn(1, 3, 224, 224) + + # test with output cls_token + cfg = deepcopy(self.cfg) + model = DistilledVisionTransformer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + cls_token, dist_token = outs[-1] + self.assertEqual(cls_token.shape, (1, 192)) + self.assertEqual(dist_token.shape, (1, 192)) + + # test without output cls_token + cfg = deepcopy(self.cfg) + cfg['out_type'] = 'featmap' + model = DistilledVisionTransformer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + patch_token = outs[-1] + self.assertEqual(patch_token.shape, (1, 192, 14, 14)) + + # Test forward with multi out indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = [-3, -2, -1] + model = DistilledVisionTransformer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 3) + for out in outs: + cls_token, dist_token = out + self.assertEqual(cls_token.shape, (1, 192)) + self.assertEqual(dist_token.shape, (1, 192)) + + # Test forward with dynamic input size + imgs1 = torch.randn(1, 3, 224, 224) + imgs2 = torch.randn(1, 3, 256, 256) + imgs3 = torch.randn(1, 3, 256, 309) + cfg = deepcopy(self.cfg) + cfg['out_type'] = 'featmap' + model = DistilledVisionTransformer(**cfg) + for imgs in [imgs1, imgs2, imgs3]: + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + featmap = outs[-1] + expect_feat_shape = (math.ceil(imgs.shape[2] / 16), + math.ceil(imgs.shape[3] / 16)) + self.assertEqual(featmap.shape, (1, 192, *expect_feat_shape)) diff --git a/tests/test_models/test_backbones/test_deit3.py b/tests/test_models/test_backbones/test_deit3.py new file mode 100644 index 0000000..7acb507 --- /dev/null +++ b/tests/test_models/test_backbones/test_deit3.py @@ -0,0 +1,167 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +import os +import tempfile +from copy import deepcopy +from unittest import TestCase + +import torch +from mmengine.runner import load_checkpoint, save_checkpoint + +from mmpretrain.models.backbones import DeiT3 + + +class TestDeiT3(TestCase): + + def setUp(self): + self.cfg = dict( + arch='b', img_size=224, patch_size=16, drop_path_rate=0.1) + + def test_structure(self): + # Test invalid default arch + with self.assertRaisesRegex(AssertionError, 'not in default archs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = 'unknown' + DeiT3(**cfg) + + # Test invalid custom arch + with self.assertRaisesRegex(AssertionError, 'Custom arch needs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'num_layers': 24, + 'num_heads': 16, + 'feedforward_channels': 4096 + } + DeiT3(**cfg) + + # Test custom arch + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'embed_dims': 128, + 'num_layers': 24, + 'num_heads': 16, + 'feedforward_channels': 1024 + } + model = DeiT3(**cfg) + self.assertEqual(model.embed_dims, 128) + self.assertEqual(model.num_layers, 24) + for layer in model.layers: + self.assertEqual(layer.attn.num_heads, 16) + self.assertEqual(layer.ffn.feedforward_channels, 1024) + + # Test out_indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = {1: 1} + with self.assertRaisesRegex(AssertionError, "get "): + DeiT3(**cfg) + cfg['out_indices'] = [0, 13] + with self.assertRaisesRegex(AssertionError, 'Invalid out_indices 13'): + DeiT3(**cfg) + + # Test model structure + cfg = deepcopy(self.cfg) + model = DeiT3(**cfg) + self.assertEqual(len(model.layers), 12) + dpr_inc = 0.1 / (12 - 1) + dpr = 0 + for layer in model.layers: + self.assertEqual(layer.attn.embed_dims, 768) + self.assertEqual(layer.attn.num_heads, 12) + self.assertEqual(layer.ffn.feedforward_channels, 3072) + self.assertAlmostEqual(layer.attn.out_drop.drop_prob, dpr) + self.assertAlmostEqual(layer.ffn.dropout_layer.drop_prob, dpr) + dpr += dpr_inc + + def test_init_weights(self): + # test weight init cfg + cfg = deepcopy(self.cfg) + cfg['init_cfg'] = [ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ] + model = DeiT3(**cfg) + ori_weight = model.patch_embed.projection.weight.clone().detach() + # The pos_embed is all zero before initialize + self.assertTrue(torch.allclose(model.pos_embed, torch.tensor(0.))) + + model.init_weights() + initialized_weight = model.patch_embed.projection.weight + self.assertFalse(torch.allclose(ori_weight, initialized_weight)) + self.assertFalse(torch.allclose(model.pos_embed, torch.tensor(0.))) + + # test load checkpoint + pretrain_pos_embed = model.pos_embed.clone().detach() + tmpdir = tempfile.gettempdir() + checkpoint = os.path.join(tmpdir, 'test.pth') + save_checkpoint(model.state_dict(), checkpoint) + cfg = deepcopy(self.cfg) + model = DeiT3(**cfg) + load_checkpoint(model, checkpoint, strict=True) + self.assertTrue(torch.allclose(model.pos_embed, pretrain_pos_embed)) + + # test load checkpoint with different img_size + cfg = deepcopy(self.cfg) + cfg['img_size'] = 384 + model = DeiT3(**cfg) + load_checkpoint(model, checkpoint, strict=True) + + os.remove(checkpoint) + + def test_forward(self): + imgs = torch.randn(1, 3, 224, 224) + + # test with_cls_token=False + cfg = deepcopy(self.cfg) + cfg['with_cls_token'] = False + cfg['out_type'] = 'cls_token' + with self.assertRaisesRegex(ValueError, 'must be True'): + DeiT3(**cfg) + + cfg = deepcopy(self.cfg) + cfg['with_cls_token'] = False + cfg['out_type'] = 'featmap' + model = DeiT3(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + patch_token = outs[-1] + self.assertEqual(patch_token.shape, (1, 768, 14, 14)) + + # test with output cls_token + cfg = deepcopy(self.cfg) + model = DeiT3(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + cls_token = outs[-1] + self.assertEqual(cls_token.shape, (1, 768)) + + # Test forward with multi out indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = [-3, -2, -1] + model = DeiT3(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 3) + for out in outs: + cls_token = out + self.assertEqual(cls_token.shape, (1, 768)) + + # Test forward with dynamic input size + imgs1 = torch.randn(1, 3, 224, 224) + imgs2 = torch.randn(1, 3, 256, 256) + imgs3 = torch.randn(1, 3, 256, 309) + cfg = deepcopy(self.cfg) + cfg['out_type'] = 'featmap' + model = DeiT3(**cfg) + for imgs in [imgs1, imgs2, imgs3]: + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + featmap = outs[-1] + expect_feat_shape = (math.ceil(imgs.shape[2] / 16), + math.ceil(imgs.shape[3] / 16)) + self.assertEqual(featmap.shape, (1, 768, *expect_feat_shape)) diff --git a/tests/test_models/test_backbones/test_densenet.py b/tests/test_models/test_backbones/test_densenet.py new file mode 100644 index 0000000..6b02bd1 --- /dev/null +++ b/tests/test_models/test_backbones/test_densenet.py @@ -0,0 +1,95 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmpretrain.models.backbones import DenseNet + + +def test_assertion(): + with pytest.raises(AssertionError): + DenseNet(arch='unknown') + + with pytest.raises(AssertionError): + # DenseNet arch dict should include essential_keys, + DenseNet(arch=dict(channels=[2, 3, 4, 5])) + + with pytest.raises(AssertionError): + # DenseNet out_indices should be valid depth. + DenseNet(out_indices=-100) + + +def test_DenseNet(): + + # Test forward + model = DenseNet(arch='121') + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 1024, 7, 7]) + + # Test memory efficient option + model = DenseNet(arch='121', memory_efficient=True) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 1024, 7, 7]) + + # Test drop rate + model = DenseNet(arch='121', drop_rate=0.05) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 1024, 7, 7]) + + # Test forward with multiple outputs + model = DenseNet(arch='121', out_indices=(0, 1, 2, 3)) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 128, 28, 28]) + assert feat[1].shape == torch.Size([1, 256, 14, 14]) + assert feat[2].shape == torch.Size([1, 512, 7, 7]) + assert feat[3].shape == torch.Size([1, 1024, 7, 7]) + + # Test with custom arch + model = DenseNet( + arch={ + 'growth_rate': 20, + 'depths': [4, 8, 12, 16, 20], + 'init_channels': 40, + }, + out_indices=(0, 1, 2, 3, 4)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 5 + assert feat[0].shape == torch.Size([1, 60, 28, 28]) + assert feat[1].shape == torch.Size([1, 110, 14, 14]) + assert feat[2].shape == torch.Size([1, 175, 7, 7]) + assert feat[3].shape == torch.Size([1, 247, 3, 3]) + assert feat[4].shape == torch.Size([1, 647, 3, 3]) + + # Test frozen_stages + model = DenseNet(arch='121', out_indices=(0, 1, 2, 3), frozen_stages=2) + model.init_weights() + model.train() + + for i in range(2): + assert not model.stages[i].training + assert not model.transitions[i].training + + for i in range(2, 4): + assert model.stages[i].training + assert model.transitions[i].training diff --git a/tests/test_models/test_backbones/test_edgenext.py b/tests/test_models/test_backbones/test_edgenext.py new file mode 100644 index 0000000..93b48a4 --- /dev/null +++ b/tests/test_models/test_backbones/test_edgenext.py @@ -0,0 +1,84 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmpretrain.models.backbones import EdgeNeXt + + +def test_assertion(): + with pytest.raises(AssertionError): + EdgeNeXt(arch='unknown') + + with pytest.raises(AssertionError): + # EdgeNeXt arch dict should include 'embed_dims', + EdgeNeXt(arch=dict(channels=[24, 48, 88, 168])) + + with pytest.raises(AssertionError): + # EdgeNeXt arch dict should include 'embed_dims', + EdgeNeXt(arch=dict(depths=[2, 2, 6, 2], channels=[24, 48, 88, 168])) + + +def test_edgenext(): + + # Test forward + model = EdgeNeXt(arch='xxsmall', out_indices=-1) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 168]) + + # Test forward with multiple outputs + model = EdgeNeXt(arch='xxsmall', out_indices=(0, 1, 2, 3)) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 24]) + assert feat[1].shape == torch.Size([1, 48]) + assert feat[2].shape == torch.Size([1, 88]) + assert feat[3].shape == torch.Size([1, 168]) + + # Test with custom arch + model = EdgeNeXt( + arch={ + 'depths': [2, 3, 4, 5], + 'channels': [20, 40, 80, 160], + 'num_heads': [4, 4, 4, 4] + }, + out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 20]) + assert feat[1].shape == torch.Size([1, 40]) + assert feat[2].shape == torch.Size([1, 80]) + assert feat[3].shape == torch.Size([1, 160]) + + # Test without gap before final norm + model = EdgeNeXt( + arch='small', out_indices=(0, 1, 2, 3), gap_before_final_norm=False) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 48, 56, 56]) + assert feat[1].shape == torch.Size([1, 96, 28, 28]) + assert feat[2].shape == torch.Size([1, 160, 14, 14]) + assert feat[3].shape == torch.Size([1, 304, 7, 7]) + + # Test frozen_stages + model = EdgeNeXt(arch='small', out_indices=(0, 1, 2, 3), frozen_stages=2) + model.init_weights() + model.train() + + for i in range(2): + assert not model.downsample_layers[i].training + assert not model.stages[i].training + + for i in range(2, 4): + assert model.downsample_layers[i].training + assert model.stages[i].training diff --git a/tests/test_models/test_backbones/test_efficientformer.py b/tests/test_models/test_backbones/test_efficientformer.py new file mode 100644 index 0000000..36876dc --- /dev/null +++ b/tests/test_models/test_backbones/test_efficientformer.py @@ -0,0 +1,199 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from unittest import TestCase + +import torch +from mmcv.cnn import ConvModule +from torch import nn + +from mmpretrain.models.backbones import EfficientFormer +from mmpretrain.models.backbones.efficientformer import (AttentionWithBias, + Flat, Meta3D, Meta4D) +from mmpretrain.models.backbones.poolformer import Pooling + + +class TestEfficientFormer(TestCase): + + def setUp(self): + self.cfg = dict(arch='l1', drop_path_rate=0.1) + self.arch = EfficientFormer.arch_settings['l1'] + self.custom_arch = { + 'layers': [1, 1, 1, 4], + 'embed_dims': [48, 96, 224, 448], + 'downsamples': [False, True, True, True], + 'vit_num': 2, + } + self.custom_cfg = dict(arch=self.custom_arch) + + def test_arch(self): + # Test invalid default arch + with self.assertRaisesRegex(AssertionError, 'Unavailable arch'): + cfg = deepcopy(self.cfg) + cfg['arch'] = 'unknown' + EfficientFormer(**cfg) + + # Test invalid custom arch + with self.assertRaisesRegex(AssertionError, 'must have'): + cfg = deepcopy(self.custom_cfg) + cfg['arch'].pop('layers') + EfficientFormer(**cfg) + + # Test vit_num < 0 + with self.assertRaisesRegex(AssertionError, "'vit_num' must"): + cfg = deepcopy(self.custom_cfg) + cfg['arch']['vit_num'] = -1 + EfficientFormer(**cfg) + + # Test vit_num > last stage layers + with self.assertRaisesRegex(AssertionError, "'vit_num' must"): + cfg = deepcopy(self.custom_cfg) + cfg['arch']['vit_num'] = 10 + EfficientFormer(**cfg) + + # Test out_ind + with self.assertRaisesRegex(AssertionError, '"out_indices" must'): + cfg = deepcopy(self.custom_cfg) + cfg['out_indices'] = dict + EfficientFormer(**cfg) + + # Test custom arch + cfg = deepcopy(self.custom_cfg) + model = EfficientFormer(**cfg) + self.assertEqual(len(model.patch_embed), 2) + layers = self.custom_arch['layers'] + downsamples = self.custom_arch['downsamples'] + vit_num = self.custom_arch['vit_num'] + + for i, stage in enumerate(model.network): + if downsamples[i]: + self.assertIsInstance(stage[0], ConvModule) + self.assertEqual(stage[0].conv.stride, (2, 2)) + self.assertTrue(hasattr(stage[0].conv, 'bias')) + self.assertTrue(isinstance(stage[0].bn, nn.BatchNorm2d)) + + if i < len(model.network) - 1: + self.assertIsInstance(stage[-1], Meta4D) + self.assertIsInstance(stage[-1].token_mixer, Pooling) + self.assertEqual(len(stage) - downsamples[i], layers[i]) + elif vit_num > 0: + self.assertIsInstance(stage[-1], Meta3D) + self.assertIsInstance(stage[-1].token_mixer, AttentionWithBias) + self.assertEqual(len(stage) - downsamples[i] - 1, layers[i]) + flat_layer_idx = len(stage) - vit_num - downsamples[i] + self.assertIsInstance(stage[flat_layer_idx], Flat) + count = 0 + for layer in stage: + if isinstance(layer, Meta3D): + count += 1 + self.assertEqual(count, vit_num) + + def test_init_weights(self): + # test weight init cfg + cfg = deepcopy(self.cfg) + cfg['init_cfg'] = [ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear'), + dict(type='Constant', layer=['LayerScale'], val=1e-4) + ] + model = EfficientFormer(**cfg) + ori_weight = model.patch_embed[0].conv.weight.clone().detach() + ori_ls_weight = model.network[0][-1].ls1.weight.clone().detach() + + model.init_weights() + initialized_weight = model.patch_embed[0].conv.weight + initialized_ls_weight = model.network[0][-1].ls1.weight + self.assertFalse(torch.allclose(ori_weight, initialized_weight)) + self.assertFalse(torch.allclose(ori_ls_weight, initialized_ls_weight)) + + def test_forward(self): + imgs = torch.randn(1, 3, 224, 224) + + # test last stage output + cfg = deepcopy(self.cfg) + model = EfficientFormer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + self.assertEqual(feat.shape, (1, 448, 49)) + assert hasattr(model, 'norm3') + assert isinstance(getattr(model, 'norm3'), nn.LayerNorm) + + # test multiple output indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = (0, 1, 2, 3) + cfg['reshape_last_feat'] = True + model = EfficientFormer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 4) + # Test out features shape + for dim, stride, out in zip(self.arch['embed_dims'], [1, 2, 4, 8], + outs): + self.assertEqual(out.shape, (1, dim, 56 // stride, 56 // stride)) + + # Test norm layer + for i in range(4): + assert hasattr(model, f'norm{i}') + stage_norm = getattr(model, f'norm{i}') + assert isinstance(stage_norm, nn.GroupNorm) + assert stage_norm.num_groups == 1 + + # Test vit_num == 0 + cfg = deepcopy(self.custom_cfg) + cfg['arch']['vit_num'] = 0 + cfg['out_indices'] = (0, 1, 2, 3) + model = EfficientFormer(**cfg) + for i in range(4): + assert hasattr(model, f'norm{i}') + stage_norm = getattr(model, f'norm{i}') + assert isinstance(stage_norm, nn.GroupNorm) + assert stage_norm.num_groups == 1 + + def test_structure(self): + # test drop_path_rate decay + cfg = deepcopy(self.cfg) + cfg['drop_path_rate'] = 0.2 + model = EfficientFormer(**cfg) + layers = self.arch['layers'] + for i, block in enumerate(model.network): + expect_prob = 0.2 / (sum(layers) - 1) * i + if hasattr(block, 'drop_path'): + if expect_prob == 0: + self.assertIsInstance(block.drop_path, torch.nn.Identity) + else: + self.assertAlmostEqual(block.drop_path.drop_prob, + expect_prob) + + # test with first stage frozen. + cfg = deepcopy(self.cfg) + frozen_stages = 1 + cfg['frozen_stages'] = frozen_stages + cfg['out_indices'] = (0, 1, 2, 3) + model = EfficientFormer(**cfg) + model.init_weights() + model.train() + + # the patch_embed and first stage should not require grad. + self.assertFalse(model.patch_embed.training) + for param in model.patch_embed.parameters(): + self.assertFalse(param.requires_grad) + for i in range(frozen_stages): + module = model.network[i] + for param in module.parameters(): + self.assertFalse(param.requires_grad) + for param in model.norm0.parameters(): + self.assertFalse(param.requires_grad) + + # the second stage should require grad. + for i in range(frozen_stages + 1, 4): + module = model.network[i] + for param in module.parameters(): + self.assertTrue(param.requires_grad) + if hasattr(model, f'norm{i}'): + norm = getattr(model, f'norm{i}') + for param in norm.parameters(): + self.assertTrue(param.requires_grad) diff --git a/tests/test_models/test_backbones/test_efficientnet.py b/tests/test_models/test_backbones/test_efficientnet.py new file mode 100644 index 0000000..37551ff --- /dev/null +++ b/tests/test_models/test_backbones/test_efficientnet.py @@ -0,0 +1,144 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpretrain.models.backbones import EfficientNet + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_efficientnet_backbone(): + archs = ['b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b7', 'b8', 'es', 'em', 'el'] + with pytest.raises(TypeError): + # pretrained must be a string path + model = EfficientNet() + model.init_weights(pretrained=0) + + with pytest.raises(AssertionError): + # arch must in arc_settings + EfficientNet(arch='others') + + for arch in archs: + with pytest.raises(ValueError): + # frozen_stages must less than 7 + EfficientNet(arch=arch, frozen_stages=12) + + # Test EfficientNet + model = EfficientNet() + model.init_weights() + model.train() + + # Test EfficientNet with first stage frozen + frozen_stages = 7 + model = EfficientNet(arch='b0', frozen_stages=frozen_stages) + model.init_weights() + model.train() + for i in range(frozen_stages): + layer = model.layers[i] + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + # Test EfficientNet with norm eval + model = EfficientNet(norm_eval=True) + model.init_weights() + model.train() + assert check_norm_state(model.modules(), False) + + # Test EfficientNet forward with 'b0' arch + out_channels = [32, 16, 24, 40, 112, 320, 1280] + model = EfficientNet(arch='b0', out_indices=(0, 1, 2, 3, 4, 5, 6)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 7 + assert feat[0].shape == torch.Size([1, out_channels[0], 112, 112]) + assert feat[1].shape == torch.Size([1, out_channels[1], 112, 112]) + assert feat[2].shape == torch.Size([1, out_channels[2], 56, 56]) + assert feat[3].shape == torch.Size([1, out_channels[3], 28, 28]) + assert feat[4].shape == torch.Size([1, out_channels[4], 14, 14]) + assert feat[5].shape == torch.Size([1, out_channels[5], 7, 7]) + assert feat[6].shape == torch.Size([1, out_channels[6], 7, 7]) + + # Test EfficientNet forward with 'b0' arch and GroupNorm + out_channels = [32, 16, 24, 40, 112, 320, 1280] + model = EfficientNet( + arch='b0', + out_indices=(0, 1, 2, 3, 4, 5, 6), + norm_cfg=dict(type='GN', num_groups=2, requires_grad=True)) + for m in model.modules(): + if is_norm(m): + assert isinstance(m, GroupNorm) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 7 + assert feat[0].shape == torch.Size([1, out_channels[0], 112, 112]) + assert feat[1].shape == torch.Size([1, out_channels[1], 112, 112]) + assert feat[2].shape == torch.Size([1, out_channels[2], 56, 56]) + assert feat[3].shape == torch.Size([1, out_channels[3], 28, 28]) + assert feat[4].shape == torch.Size([1, out_channels[4], 14, 14]) + assert feat[5].shape == torch.Size([1, out_channels[5], 7, 7]) + assert feat[6].shape == torch.Size([1, out_channels[6], 7, 7]) + + # Test EfficientNet forward with 'es' arch + out_channels = [32, 24, 32, 48, 144, 192, 1280] + model = EfficientNet(arch='es', out_indices=(0, 1, 2, 3, 4, 5, 6)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 7 + assert feat[0].shape == torch.Size([1, out_channels[0], 112, 112]) + assert feat[1].shape == torch.Size([1, out_channels[1], 112, 112]) + assert feat[2].shape == torch.Size([1, out_channels[2], 56, 56]) + assert feat[3].shape == torch.Size([1, out_channels[3], 28, 28]) + assert feat[4].shape == torch.Size([1, out_channels[4], 14, 14]) + assert feat[5].shape == torch.Size([1, out_channels[5], 7, 7]) + assert feat[6].shape == torch.Size([1, out_channels[6], 7, 7]) + + # Test EfficientNet forward with 'es' arch and GroupNorm + out_channels = [32, 24, 32, 48, 144, 192, 1280] + model = EfficientNet( + arch='es', + out_indices=(0, 1, 2, 3, 4, 5, 6), + norm_cfg=dict(type='GN', num_groups=2, requires_grad=True)) + for m in model.modules(): + if is_norm(m): + assert isinstance(m, GroupNorm) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 7 + assert feat[0].shape == torch.Size([1, out_channels[0], 112, 112]) + assert feat[1].shape == torch.Size([1, out_channels[1], 112, 112]) + assert feat[2].shape == torch.Size([1, out_channels[2], 56, 56]) + assert feat[3].shape == torch.Size([1, out_channels[3], 28, 28]) + assert feat[4].shape == torch.Size([1, out_channels[4], 14, 14]) + assert feat[5].shape == torch.Size([1, out_channels[5], 7, 7]) + assert feat[6].shape == torch.Size([1, out_channels[6], 7, 7]) diff --git a/tests/test_models/test_backbones/test_efficientnet_v2.py b/tests/test_models/test_backbones/test_efficientnet_v2.py new file mode 100644 index 0000000..ca5c9b0 --- /dev/null +++ b/tests/test_models/test_backbones/test_efficientnet_v2.py @@ -0,0 +1,150 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpretrain.models.backbones import EfficientNetV2 + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_efficientnet_v2_backbone(): + with pytest.raises(TypeError): + # pretrained must be a string path + model = EfficientNetV2() + model.init_weights(pretrained=0) + + with pytest.raises(AssertionError): + # arch must in arc_settings + EfficientNetV2(arch='others') + + with pytest.raises(ValueError): + # frozen_stages must less than 8 + EfficientNetV2(arch='b1', frozen_stages=12) + + # Test EfficientNetV2 + model = EfficientNetV2() + model.init_weights() + model.train() + x = torch.rand((1, 3, 224, 224)) + model(x) + + # Test EfficientNetV2 with first stage frozen + frozen_stages = 7 + model = EfficientNetV2(arch='b0', frozen_stages=frozen_stages) + model.init_weights() + model.train() + for i in range(frozen_stages): + layer = model.layers[i] + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + # Test EfficientNetV2 with norm eval + model = EfficientNetV2(norm_eval=True) + model.init_weights() + model.train() + assert check_norm_state(model.modules(), False) + + # Test EfficientNetV2 forward with 'b0' arch + out_channels = [32, 16, 32, 48, 96, 112, 192, 1280] + model = EfficientNetV2(arch='b0', out_indices=(0, 1, 2, 3, 4, 5, 6, 7)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 8 + assert feat[0].shape == torch.Size([1, out_channels[0], 112, 112]) + assert feat[1].shape == torch.Size([1, out_channels[1], 112, 112]) + assert feat[2].shape == torch.Size([1, out_channels[2], 56, 56]) + assert feat[3].shape == torch.Size([1, out_channels[3], 28, 28]) + assert feat[4].shape == torch.Size([1, out_channels[4], 14, 14]) + assert feat[5].shape == torch.Size([1, out_channels[5], 14, 14]) + assert feat[6].shape == torch.Size([1, out_channels[6], 7, 7]) + assert feat[6].shape == torch.Size([1, out_channels[6], 7, 7]) + + # Test EfficientNetV2 forward with 'b0' arch and GroupNorm + out_channels = [32, 16, 32, 48, 96, 112, 192, 1280] + model = EfficientNetV2( + arch='b0', + out_indices=(0, 1, 2, 3, 4, 5, 6, 7), + norm_cfg=dict(type='GN', num_groups=2, requires_grad=True)) + for m in model.modules(): + if is_norm(m): + assert isinstance(m, GroupNorm) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 64, 64) + feat = model(imgs) + assert len(feat) == 8 + assert feat[0].shape == torch.Size([1, out_channels[0], 32, 32]) + assert feat[1].shape == torch.Size([1, out_channels[1], 32, 32]) + assert feat[2].shape == torch.Size([1, out_channels[2], 16, 16]) + assert feat[3].shape == torch.Size([1, out_channels[3], 8, 8]) + assert feat[4].shape == torch.Size([1, out_channels[4], 4, 4]) + assert feat[5].shape == torch.Size([1, out_channels[5], 4, 4]) + assert feat[6].shape == torch.Size([1, out_channels[6], 2, 2]) + assert feat[7].shape == torch.Size([1, out_channels[7], 2, 2]) + + # Test EfficientNetV2 forward with 'm' arch + out_channels = [24, 24, 48, 80, 160, 176, 304, 512, 1280] + model = EfficientNetV2(arch='m', out_indices=(0, 1, 2, 3, 4, 5, 6, 7, 8)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 64, 64) + feat = model(imgs) + assert len(feat) == 9 + assert feat[0].shape == torch.Size([1, out_channels[0], 32, 32]) + assert feat[1].shape == torch.Size([1, out_channels[1], 32, 32]) + assert feat[2].shape == torch.Size([1, out_channels[2], 16, 16]) + assert feat[3].shape == torch.Size([1, out_channels[3], 8, 8]) + assert feat[4].shape == torch.Size([1, out_channels[4], 4, 4]) + assert feat[5].shape == torch.Size([1, out_channels[5], 4, 4]) + assert feat[6].shape == torch.Size([1, out_channels[6], 2, 2]) + assert feat[7].shape == torch.Size([1, out_channels[7], 2, 2]) + assert feat[8].shape == torch.Size([1, out_channels[8], 2, 2]) + + # Test EfficientNetV2 forward with 'm' arch and GroupNorm + out_channels = [24, 24, 48, 80, 160, 176, 304, 512, 1280] + model = EfficientNetV2( + arch='m', + out_indices=(0, 1, 2, 3, 4, 5, 6, 7, 8), + norm_cfg=dict(type='GN', num_groups=2, requires_grad=True)) + for m in model.modules(): + if is_norm(m): + assert isinstance(m, GroupNorm) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 64, 64) + feat = model(imgs) + assert len(feat) == 9 + assert feat[0].shape == torch.Size([1, out_channels[0], 32, 32]) + assert feat[1].shape == torch.Size([1, out_channels[1], 32, 32]) + assert feat[2].shape == torch.Size([1, out_channels[2], 16, 16]) + assert feat[3].shape == torch.Size([1, out_channels[3], 8, 8]) + assert feat[4].shape == torch.Size([1, out_channels[4], 4, 4]) + assert feat[5].shape == torch.Size([1, out_channels[5], 4, 4]) + assert feat[6].shape == torch.Size([1, out_channels[6], 2, 2]) + assert feat[7].shape == torch.Size([1, out_channels[7], 2, 2]) + assert feat[8].shape == torch.Size([1, out_channels[8], 2, 2]) diff --git a/tests/test_models/test_backbones/test_eva02.py b/tests/test_models/test_backbones/test_eva02.py new file mode 100644 index 0000000..0672754 --- /dev/null +++ b/tests/test_models/test_backbones/test_eva02.py @@ -0,0 +1,143 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from unittest import TestCase + +import torch + +from mmpretrain.models.backbones import ViTEVA02 + + +class TestEVA02(TestCase): + + def setUp(self): + self.cfg = dict( + arch='t', + img_size=336, + patch_size=14, + drop_path_rate=0.1, + drop_rate=0.1, + attn_drop_rate=0.2, + proj_drop_rate=0.3, + ) + + def test_structure(self): + # Test invalid default arch + with self.assertRaisesRegex(AssertionError, 'not in default archs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = 'unknown' + ViTEVA02(**cfg) + + # Test invalid custom arch + with self.assertRaisesRegex(AssertionError, 'Custom arch needs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'num_layers': 24, + 'num_heads': 16, + 'feedforward_channels': int(24 * 4 * 2 / 3) + } + ViTEVA02(**cfg) + + # Test custom arch + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'embed_dims': 128, + 'num_layers': 6, + 'num_heads': 16, + 'feedforward_channels': int(128 * 4 * 2 / 3) + } + model = ViTEVA02(**cfg) + self.assertEqual(model.embed_dims, 128) + self.assertEqual(model.num_layers, 6) + for layer in model.layers: + self.assertEqual(layer.attn.num_heads, 16) + + # Test out_indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = {1: 1} + with self.assertRaisesRegex(AssertionError, "get "): + ViTEVA02(**cfg) + cfg['out_indices'] = [0, 13] + with self.assertRaisesRegex(AssertionError, 'Invalid out_indices 13'): + ViTEVA02(**cfg) + + # Test model structure + cfg = deepcopy(self.cfg) + model = ViTEVA02(**cfg) + self.assertEqual(len(model.layers), 12) + self.assertEqual(model.cls_token.shape, (1, 1, 192)) + self.assertEqual(model.pos_embed.shape, (1, 577, 192)) + dpr_inc = 0.1 / (12 - 1) + dpr = 0 + for layer in model.layers: + self.assertEqual(layer.attn.embed_dims, 192) + self.assertEqual(layer.attn.num_heads, 3) + self.assertAlmostEqual(layer.drop_path.drop_prob, dpr) + self.assertAlmostEqual(layer.mlp.dropout_layer.p, 0.1) + self.assertAlmostEqual(layer.attn.attn_drop.p, 0.2) + self.assertAlmostEqual(layer.attn.proj_drop.p, 0.3) + dpr += dpr_inc + + # Test model structure: final_norm + cfg = deepcopy(self.cfg) + cfg['final_norm'] = True + model = ViTEVA02(**cfg) + self.assertNotEqual(model.norm1.__class__, torch.nn.Identity) + + def test_forward(self): + imgs = torch.randn(1, 3, 336, 336) + + # test with_cls_token=False + cfg = deepcopy(self.cfg) + cfg['with_cls_token'] = False + cfg['out_type'] = 'cls_token' + with self.assertRaisesRegex(ValueError, 'must be True'): + ViTEVA02(**cfg) + + cfg = deepcopy(self.cfg) + cfg['with_cls_token'] = False + cfg['out_type'] = 'raw' + model = ViTEVA02(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + patch_token = outs[-1] + self.assertEqual(patch_token.shape, (1, 24 * 24, 192)) + + cfg = deepcopy(self.cfg) + cfg['with_cls_token'] = False + cfg['out_type'] = 'featmap' + model = ViTEVA02(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + patch_token = outs[-1] + self.assertEqual(patch_token.shape, (1, 192, 24, 24)) + + cfg = deepcopy(self.cfg) + cfg['with_cls_token'] = False + cfg['out_type'] = 'avg_featmap' + model = ViTEVA02(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + patch_token = outs[-1] + self.assertEqual(patch_token.shape, (1, 192)) + + # test with output cls_token + cfg = deepcopy(self.cfg) + model = ViTEVA02(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + cls_token = outs[-1] + self.assertEqual(cls_token.shape, (1, 192)) + + # Test forward with multi out indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = [-3, -2, -1] + model = ViTEVA02(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 3) + for out in outs: + self.assertEqual(out.shape, (1, 192)) diff --git a/tests/test_models/test_backbones/test_hornet.py b/tests/test_models/test_backbones/test_hornet.py new file mode 100644 index 0000000..8031d1b --- /dev/null +++ b/tests/test_models/test_backbones/test_hornet.py @@ -0,0 +1,174 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from copy import deepcopy +from itertools import chain +from unittest import TestCase + +import pytest +import torch +from mmengine.utils import digit_version +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm +from torch import nn + +from mmpretrain.models.backbones import HorNet + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +@pytest.mark.skipif( + digit_version(torch.__version__) < digit_version('1.7.0'), + reason='torch.fft is not available before 1.7.0') +class TestHorNet(TestCase): + + def setUp(self): + self.cfg = dict( + arch='t', drop_path_rate=0.1, gap_before_final_norm=False) + + def test_arch(self): + # Test invalid default arch + with self.assertRaisesRegex(AssertionError, 'not in default archs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = 'unknown' + HorNet(**cfg) + + # Test invalid custom arch + with self.assertRaisesRegex(AssertionError, 'Custom arch needs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'depths': [1, 1, 1, 1], + 'orders': [1, 1, 1, 1], + } + HorNet(**cfg) + + # Test custom arch + cfg = deepcopy(self.cfg) + base_dim = 64 + depths = [2, 3, 18, 2] + embed_dims = [base_dim, base_dim * 2, base_dim * 4, base_dim * 8] + cfg['arch'] = { + 'base_dim': + base_dim, + 'depths': + depths, + 'orders': [2, 3, 4, 5], + 'dw_cfg': [ + dict(type='DW', kernel_size=7), + dict(type='DW', kernel_size=7), + dict(type='GF', h=14, w=8), + dict(type='GF', h=7, w=4) + ], + } + model = HorNet(**cfg) + + for i in range(len(depths)): + stage = model.stages[i] + self.assertEqual(stage[-1].out_channels, embed_dims[i]) + self.assertEqual(len(stage), depths[i]) + + def test_init_weights(self): + # test weight init cfg + cfg = deepcopy(self.cfg) + cfg['init_cfg'] = [ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ] + model = HorNet(**cfg) + ori_weight = model.downsample_layers[0][0].weight.clone().detach() + + model.init_weights() + initialized_weight = model.downsample_layers[0][0].weight + self.assertFalse(torch.allclose(ori_weight, initialized_weight)) + + def test_forward(self): + imgs = torch.randn(3, 3, 224, 224) + + cfg = deepcopy(self.cfg) + model = HorNet(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + self.assertEqual(feat.shape, (3, 512, 7, 7)) + + # test multiple output indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = (0, 1, 2, 3) + model = HorNet(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 4) + for emb_size, stride, out in zip([64, 128, 256, 512], [1, 2, 4, 8], + outs): + self.assertEqual(out.shape, + (3, emb_size, 56 // stride, 56 // stride)) + + # test with dynamic input shape + imgs1 = torch.randn(3, 3, 224, 224) + imgs2 = torch.randn(3, 3, 256, 256) + imgs3 = torch.randn(3, 3, 256, 309) + cfg = deepcopy(self.cfg) + model = HorNet(**cfg) + for imgs in [imgs1, imgs2, imgs3]: + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + expect_feat_shape = (math.floor(imgs.shape[2] / 32), + math.floor(imgs.shape[3] / 32)) + self.assertEqual(feat.shape, (3, 512, *expect_feat_shape)) + + def test_structure(self): + # test drop_path_rate decay + cfg = deepcopy(self.cfg) + cfg['drop_path_rate'] = 0.2 + model = HorNet(**cfg) + depths = model.arch_settings['depths'] + stages = model.stages + blocks = chain(*[stage for stage in stages]) + total_depth = sum(depths) + dpr = [ + x.item() + for x in torch.linspace(0, cfg['drop_path_rate'], total_depth) + ] + for i, (block, expect_prob) in enumerate(zip(blocks, dpr)): + if expect_prob == 0: + assert isinstance(block.drop_path, nn.Identity) + else: + self.assertAlmostEqual(block.drop_path.drop_prob, expect_prob) + + # test VAN with first stage frozen. + cfg = deepcopy(self.cfg) + frozen_stages = 0 + cfg['frozen_stages'] = frozen_stages + cfg['out_indices'] = (0, 1, 2, 3) + model = HorNet(**cfg) + model.init_weights() + model.train() + + # the patch_embed and first stage should not require grad. + for i in range(frozen_stages + 1): + down = model.downsample_layers[i] + for param in down.parameters(): + self.assertFalse(param.requires_grad) + blocks = model.stages[i] + for param in blocks.parameters(): + self.assertFalse(param.requires_grad) + + # the second stage should require grad. + for i in range(frozen_stages + 1, 4): + down = model.downsample_layers[i] + for param in down.parameters(): + self.assertTrue(param.requires_grad) + blocks = model.stages[i] + for param in blocks.parameters(): + self.assertTrue(param.requires_grad) diff --git a/tests/test_models/test_backbones/test_hrnet.py b/tests/test_models/test_backbones/test_hrnet.py new file mode 100644 index 0000000..96fec46 --- /dev/null +++ b/tests/test_models/test_backbones/test_hrnet.py @@ -0,0 +1,93 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpretrain.models.backbones import HRNet + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +@pytest.mark.parametrize('base_channels', [18, 30, 32, 40, 44, 48, 64]) +def test_hrnet_arch_zoo(base_channels): + + cfg_ori = dict(arch=f'w{base_channels}') + + # Test HRNet model with input size of 224 + model = HRNet(**cfg_ori) + model.init_weights() + model.train() + + assert check_norm_state(model.modules(), True) + + imgs = torch.randn(3, 3, 224, 224) + outs = model(imgs) + out_channels = base_channels + out_size = 56 + assert isinstance(outs, tuple) + for out in outs: + assert out.shape == (3, out_channels, out_size, out_size) + out_channels = out_channels * 2 + out_size = out_size // 2 + + +def test_hrnet_custom_arch(): + + cfg_ori = dict( + extra=dict( + stage1=dict( + num_modules=1, + num_branches=1, + block='BOTTLENECK', + num_blocks=(4, ), + num_channels=(64, )), + stage2=dict( + num_modules=1, + num_branches=2, + block='BASIC', + num_blocks=(4, 4), + num_channels=(32, 64)), + stage3=dict( + num_modules=4, + num_branches=3, + block='BOTTLENECK', + num_blocks=(4, 4, 2), + num_channels=(32, 64, 128)), + stage4=dict( + num_modules=3, + num_branches=4, + block='BASIC', + num_blocks=(4, 3, 4, 4), + num_channels=(32, 64, 152, 256)), + ), ) + + # Test HRNet model with input size of 224 + model = HRNet(**cfg_ori) + model.init_weights() + model.train() + + assert check_norm_state(model.modules(), True) + + imgs = torch.randn(3, 3, 224, 224) + outs = model(imgs) + out_channels = (32, 64, 152, 256) + out_size = 56 + assert isinstance(outs, tuple) + for out, out_channel in zip(outs, out_channels): + assert out.shape == (3, out_channel, out_size, out_size) + out_size = out_size // 2 diff --git a/tests/test_models/test_backbones/test_inception_v3.py b/tests/test_models/test_backbones/test_inception_v3.py new file mode 100644 index 0000000..4450dd2 --- /dev/null +++ b/tests/test_models/test_backbones/test_inception_v3.py @@ -0,0 +1,56 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from types import MethodType +from unittest import TestCase + +import torch + +from mmpretrain.models import InceptionV3 +from mmpretrain.models.backbones.inception_v3 import InceptionAux + + +class TestInceptionV3(TestCase): + DEFAULT_ARGS = dict(num_classes=10, aux_logits=False, dropout=0.) + + def test_structure(self): + # Test without auxiliary branch. + model = InceptionV3(**self.DEFAULT_ARGS) + self.assertIsNone(model.AuxLogits) + + # Test with auxiliary branch. + cfg = {**self.DEFAULT_ARGS, 'aux_logits': True} + model = InceptionV3(**cfg) + self.assertIsInstance(model.AuxLogits, InceptionAux) + + def test_init_weights(self): + cfg = {**self.DEFAULT_ARGS, 'aux_logits': True} + model = InceptionV3(**cfg) + + init_info = {} + + def get_init_info(self, *args): + for name, param in self.named_parameters(): + init_info[name] = ''.join( + self._params_init_info[param]['init_info']) + + model._dump_init_info = MethodType(get_init_info, model) + model.init_weights() + self.assertIn('TruncNormalInit: a=-2, b=2, mean=0, std=0.1, bias=0', + init_info['Conv2d_1a_3x3.conv.weight']) + self.assertIn('TruncNormalInit: a=-2, b=2, mean=0, std=0.01, bias=0', + init_info['AuxLogits.conv0.conv.weight']) + self.assertIn('TruncNormalInit: a=-2, b=2, mean=0, std=0.001, bias=0', + init_info['AuxLogits.fc.weight']) + + def test_forward(self): + inputs = torch.rand(2, 3, 299, 299) + + model = InceptionV3(**self.DEFAULT_ARGS) + aux_out, out = model(inputs) + self.assertIsNone(aux_out) + self.assertEqual(out.shape, (2, 10)) + + cfg = {**self.DEFAULT_ARGS, 'aux_logits': True} + model = InceptionV3(**cfg) + aux_out, out = model(inputs) + self.assertEqual(aux_out.shape, (2, 10)) + self.assertEqual(out.shape, (2, 10)) diff --git a/tests/test_models/test_backbones/test_levit.py b/tests/test_models/test_backbones/test_levit.py new file mode 100644 index 0000000..af274f1 --- /dev/null +++ b/tests/test_models/test_backbones/test_levit.py @@ -0,0 +1,169 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import tempfile + +import pytest +import torch +from mmengine.runner import load_checkpoint, save_checkpoint +from torch import nn +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpretrain.models.backbones import levit +from mmpretrain.models.backbones.levit import (Attention, AttentionSubsample, + LeViT) + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def is_levit_block(modules): + if isinstance(modules, (AttentionSubsample, Attention)): + return True + return False + + +def test_levit_attention(): + block = Attention(128, 16, 4, 2, act_cfg=dict(type='HSwish')) + block.eval() + x = torch.randn(1, 196, 128) + y = block(x) + assert y.shape == x.shape + assert hasattr(block, 'ab') + assert block.key_dim == 16 + assert block.attn_ratio == 2 + assert block.num_heads == 4 + assert block.qkv.linear.in_features == 128 + + +def test_levit(): + with pytest.raises(TypeError): + # arch must be str or dict + LeViT(arch=[4, 6, 16, 1]) + + with pytest.raises(AssertionError): + # arch must in arch_settings + LeViT(arch='512') + + with pytest.raises(AssertionError): + arch = dict(num_blocks=[2, 4, 14, 1]) + LeViT(arch=arch) + + # Test out_indices not type of int or Sequence + with pytest.raises(TypeError): + LeViT('128s', out_indices=dict()) + + # Test max(out_indices) < len(arch['num_blocks']) + with pytest.raises(AssertionError): + LeViT('128s', out_indices=(3, )) + + model = LeViT('128s', out_indices=(-1, )) + assert model.out_indices == [2] + + model = LeViT(arch='256', drop_path_rate=0.1) + model.eval() + assert model.key_dims == [32, 32, 32] + assert model.embed_dims == [256, 384, 512] + assert model.num_heads == [4, 6, 8] + assert model.depths == [4, 4, 4] + assert model.drop_path_rate == 0.1 + assert isinstance(model.stages[0][0].block.qkv, levit.LinearBatchNorm) + assert isinstance(model.patch_embed.patch_embed[0], + levit.ConvolutionBatchNorm) + + model = LeViT( + arch='128s', + hybrid_backbone=lambda embed_dims: nn.Conv2d( + embed_dims, embed_dims, kernel_size=2)) + model.eval() + assert isinstance(model.patch_embed, nn.Conv2d) + + # Test eval of "train" mode and "deploy" mode + model = LeViT(arch='128s', deploy=True) + model.eval() + assert not isinstance(model.stages[0][0].block.qkv, levit.LinearBatchNorm) + assert not isinstance(model.patch_embed.patch_embed[0], + levit.ConvolutionBatchNorm) + assert isinstance(model.stages[0][0].block.qkv, nn.Linear) + assert isinstance(model.patch_embed.patch_embed[0], nn.Conv2d) + + # Test LeViT forward with layer 2 forward + model = LeViT('128s', out_indices=(2, )) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert isinstance(feat, tuple) + assert len(feat) == 1 + assert isinstance(feat[0], torch.Tensor) + assert feat[0].shape == torch.Size((1, 384, 4, 4)) + + # Test LeViT forward + arch_settings = { + '128s': dict(out_channels=[128, 256, 384]), + '128': dict(out_channels=[128, 256, 384]), + '192': dict(out_channels=[192, 288, 384]), + '256': dict(out_channels=[256, 384, 512]), + '384': dict(out_channels=[384, 512, 768]) + } + + choose_models = ['128s', '192', '256', '384'] + # Test LeViT model forward + for model_name, model_arch in arch_settings.items(): + if model_name not in choose_models: + continue + model = LeViT(model_name, out_indices=(0, 1, 2)) + model.init_weights() + + # Test Norm + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + model.train() + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert feat[0].shape == torch.Size( + (1, model_arch['out_channels'][0], 14, 14)) + assert feat[1].shape == torch.Size( + (1, model_arch['out_channels'][1], 7, 7)) + assert feat[2].shape == torch.Size( + (1, model_arch['out_channels'][2], 4, 4)) + + +def test_load_deploy_LeViT(): + # Test output before and load from deploy checkpoint + model = LeViT('128s', out_indices=(0, 1, 2)) + inputs = torch.randn((1, 3, 224, 224)) + tmpdir = tempfile.gettempdir() + ckpt_path = os.path.join(tmpdir, 'ckpt.pth') + model.switch_to_deploy() + model.eval() + outputs = model(inputs) + + model_deploy = LeViT('128s', out_indices=(0, 1, 2), deploy=True) + save_checkpoint(model.state_dict(), ckpt_path) + load_checkpoint(model_deploy, ckpt_path) + + outputs_load = model_deploy(inputs) + for feat, feat_load in zip(outputs, outputs_load): + assert torch.allclose(feat, feat_load) + os.remove(ckpt_path) diff --git a/tests/test_models/test_backbones/test_mixmim.py b/tests/test_models/test_backbones/test_mixmim.py new file mode 100644 index 0000000..8d34963 --- /dev/null +++ b/tests/test_models/test_backbones/test_mixmim.py @@ -0,0 +1,40 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from unittest import TestCase + +import torch + +from mmpretrain.models.backbones import MixMIMTransformer + + +class TestMixMIM(TestCase): + + def setUp(self): + self.cfg = dict(arch='b', drop_rate=0.0, drop_path_rate=0.1) + + def test_structure(self): + + # Test custom arch + cfg = deepcopy(self.cfg) + + model = MixMIMTransformer(**cfg) + self.assertEqual(model.embed_dims, 128) + self.assertEqual(sum(model.depths), 24) + self.assertIsNotNone(model.absolute_pos_embed) + + num_heads = [4, 8, 16, 32] + for i, layer in enumerate(model.layers): + self.assertEqual(layer.blocks[0].num_heads, num_heads[i]) + self.assertEqual(layer.blocks[0].ffn.feedforward_channels, + 128 * (2**i) * 4) + + def test_forward(self): + imgs = torch.randn(1, 3, 224, 224) + + cfg = deepcopy(self.cfg) + model = MixMIMTransformer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + averaged_token = outs[-1] + self.assertEqual(averaged_token.shape, (1, 1024)) diff --git a/tests/test_models/test_backbones/test_mlp_mixer.py b/tests/test_models/test_backbones/test_mlp_mixer.py new file mode 100644 index 0000000..8a4f176 --- /dev/null +++ b/tests/test_models/test_backbones/test_mlp_mixer.py @@ -0,0 +1,119 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from unittest import TestCase + +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpretrain.models.backbones import MlpMixer + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +class TestMLPMixer(TestCase): + + def setUp(self): + self.cfg = dict( + arch='b', + img_size=224, + patch_size=16, + drop_rate=0.1, + init_cfg=[ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ]) + + def test_arch(self): + # Test invalid default arch + with self.assertRaisesRegex(AssertionError, 'not in default archs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = 'unknown' + MlpMixer(**cfg) + + # Test invalid custom arch + with self.assertRaisesRegex(AssertionError, 'Custom arch needs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'embed_dims': 24, + 'num_layers': 16, + 'tokens_mlp_dims': 4096 + } + MlpMixer(**cfg) + + # Test custom arch + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'embed_dims': 128, + 'num_layers': 6, + 'tokens_mlp_dims': 256, + 'channels_mlp_dims': 1024 + } + model = MlpMixer(**cfg) + self.assertEqual(model.embed_dims, 128) + self.assertEqual(model.num_layers, 6) + for layer in model.layers: + self.assertEqual(layer.token_mix.feedforward_channels, 256) + self.assertEqual(layer.channel_mix.feedforward_channels, 1024) + + def test_init_weights(self): + # test weight init cfg + cfg = deepcopy(self.cfg) + cfg['init_cfg'] = [ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ] + model = MlpMixer(**cfg) + ori_weight = model.patch_embed.projection.weight.clone().detach() + model.init_weights() + initialized_weight = model.patch_embed.projection.weight + self.assertFalse(torch.allclose(ori_weight, initialized_weight)) + + def test_forward(self): + imgs = torch.randn(1, 3, 224, 224) + + # test forward with single out indices + cfg = deepcopy(self.cfg) + model = MlpMixer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + self.assertEqual(feat.shape, (1, 768, 196)) + + # test forward with multi out indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = [-3, -2, -1] + model = MlpMixer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 3) + for feat in outs: + self.assertEqual(feat.shape, (1, 768, 196)) + + # test with invalid input shape + imgs2 = torch.randn(1, 3, 256, 256) + cfg = deepcopy(self.cfg) + model = MlpMixer(**cfg) + with self.assertRaisesRegex(AssertionError, 'dynamic input shape.'): + model(imgs2) diff --git a/tests/test_models/test_backbones/test_mobilenet_v2.py b/tests/test_models/test_backbones/test_mobilenet_v2.py new file mode 100644 index 0000000..ffe43ff --- /dev/null +++ b/tests/test_models/test_backbones/test_mobilenet_v2.py @@ -0,0 +1,259 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpretrain.models.backbones import MobileNetV2 +from mmpretrain.models.backbones.mobilenet_v2 import InvertedResidual + + +def is_block(modules): + """Check if is ResNet building block.""" + if isinstance(modules, (InvertedResidual, )): + return True + return False + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_mobilenetv2_invertedresidual(): + + with pytest.raises(AssertionError): + # stride must be in [1, 2] + InvertedResidual(16, 24, stride=3, expand_ratio=6) + + # Test InvertedResidual with checkpoint forward, stride=1 + block = InvertedResidual(16, 24, stride=1, expand_ratio=6) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size((1, 24, 56, 56)) + + # Test InvertedResidual with expand_ratio=1 + block = InvertedResidual(16, 16, stride=1, expand_ratio=1) + assert len(block.conv) == 2 + + # Test InvertedResidual with use_res_connect + block = InvertedResidual(16, 16, stride=1, expand_ratio=6) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + assert block.use_res_connect is True + assert x_out.shape == torch.Size((1, 16, 56, 56)) + + # Test InvertedResidual with checkpoint forward, stride=2 + block = InvertedResidual(16, 24, stride=2, expand_ratio=6) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size((1, 24, 28, 28)) + + # Test InvertedResidual with checkpoint forward + block = InvertedResidual(16, 24, stride=1, expand_ratio=6, with_cp=True) + assert block.with_cp + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size((1, 24, 56, 56)) + + # Test InvertedResidual with act_cfg=dict(type='ReLU') + block = InvertedResidual( + 16, 24, stride=1, expand_ratio=6, act_cfg=dict(type='ReLU')) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size((1, 24, 56, 56)) + + +def test_mobilenetv2_backbone(): + with pytest.raises(TypeError): + # pretrained must be a string path + model = MobileNetV2() + model.init_weights(pretrained=0) + + with pytest.raises(ValueError): + # frozen_stages must in range(-1, 8) + MobileNetV2(frozen_stages=8) + + with pytest.raises(ValueError): + # out_indices in range(0, 8) + MobileNetV2(out_indices=[8]) + + # Test MobileNetV2 with first stage frozen + frozen_stages = 1 + model = MobileNetV2(frozen_stages=frozen_stages) + model.init_weights() + model.train() + + for mod in model.conv1.modules(): + for param in mod.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + # Test MobileNetV2 with norm_eval=True + model = MobileNetV2(norm_eval=True) + model.init_weights() + model.train() + + assert check_norm_state(model.modules(), False) + + # Test MobileNetV2 forward with widen_factor=1.0 + model = MobileNetV2(widen_factor=1.0, out_indices=range(0, 8)) + model.init_weights() + model.train() + + assert check_norm_state(model.modules(), True) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 8 + assert feat[0].shape == torch.Size((1, 16, 112, 112)) + assert feat[1].shape == torch.Size((1, 24, 56, 56)) + assert feat[2].shape == torch.Size((1, 32, 28, 28)) + assert feat[3].shape == torch.Size((1, 64, 14, 14)) + assert feat[4].shape == torch.Size((1, 96, 14, 14)) + assert feat[5].shape == torch.Size((1, 160, 7, 7)) + assert feat[6].shape == torch.Size((1, 320, 7, 7)) + assert feat[7].shape == torch.Size((1, 1280, 7, 7)) + + # Test MobileNetV2 forward with widen_factor=0.5 + model = MobileNetV2(widen_factor=0.5, out_indices=range(0, 7)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 7 + assert feat[0].shape == torch.Size((1, 8, 112, 112)) + assert feat[1].shape == torch.Size((1, 16, 56, 56)) + assert feat[2].shape == torch.Size((1, 16, 28, 28)) + assert feat[3].shape == torch.Size((1, 32, 14, 14)) + assert feat[4].shape == torch.Size((1, 48, 14, 14)) + assert feat[5].shape == torch.Size((1, 80, 7, 7)) + assert feat[6].shape == torch.Size((1, 160, 7, 7)) + + # Test MobileNetV2 forward with widen_factor=2.0 + model = MobileNetV2(widen_factor=2.0) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size((1, 2560, 7, 7)) + + # Test MobileNetV2 forward with out_indices=None + model = MobileNetV2(widen_factor=1.0) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size((1, 1280, 7, 7)) + + # Test MobileNetV2 forward with dict(type='ReLU') + model = MobileNetV2( + widen_factor=1.0, act_cfg=dict(type='ReLU'), out_indices=range(0, 7)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 7 + assert feat[0].shape == torch.Size((1, 16, 112, 112)) + assert feat[1].shape == torch.Size((1, 24, 56, 56)) + assert feat[2].shape == torch.Size((1, 32, 28, 28)) + assert feat[3].shape == torch.Size((1, 64, 14, 14)) + assert feat[4].shape == torch.Size((1, 96, 14, 14)) + assert feat[5].shape == torch.Size((1, 160, 7, 7)) + assert feat[6].shape == torch.Size((1, 320, 7, 7)) + + # Test MobileNetV2 with BatchNorm forward + model = MobileNetV2(widen_factor=1.0, out_indices=range(0, 7)) + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 7 + assert feat[0].shape == torch.Size((1, 16, 112, 112)) + assert feat[1].shape == torch.Size((1, 24, 56, 56)) + assert feat[2].shape == torch.Size((1, 32, 28, 28)) + assert feat[3].shape == torch.Size((1, 64, 14, 14)) + assert feat[4].shape == torch.Size((1, 96, 14, 14)) + assert feat[5].shape == torch.Size((1, 160, 7, 7)) + assert feat[6].shape == torch.Size((1, 320, 7, 7)) + + # Test MobileNetV2 with GroupNorm forward + model = MobileNetV2( + widen_factor=1.0, + norm_cfg=dict(type='GN', num_groups=2, requires_grad=True), + out_indices=range(0, 7)) + for m in model.modules(): + if is_norm(m): + assert isinstance(m, GroupNorm) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 7 + assert feat[0].shape == torch.Size((1, 16, 112, 112)) + assert feat[1].shape == torch.Size((1, 24, 56, 56)) + assert feat[2].shape == torch.Size((1, 32, 28, 28)) + assert feat[3].shape == torch.Size((1, 64, 14, 14)) + assert feat[4].shape == torch.Size((1, 96, 14, 14)) + assert feat[5].shape == torch.Size((1, 160, 7, 7)) + assert feat[6].shape == torch.Size((1, 320, 7, 7)) + + # Test MobileNetV2 with layers 1, 3, 5 out forward + model = MobileNetV2(widen_factor=1.0, out_indices=(0, 2, 4)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size((1, 16, 112, 112)) + assert feat[1].shape == torch.Size((1, 32, 28, 28)) + assert feat[2].shape == torch.Size((1, 96, 14, 14)) + + # Test MobileNetV2 with checkpoint forward + model = MobileNetV2( + widen_factor=1.0, with_cp=True, out_indices=range(0, 7)) + for m in model.modules(): + if is_block(m): + assert m.with_cp + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 7 + assert feat[0].shape == torch.Size((1, 16, 112, 112)) + assert feat[1].shape == torch.Size((1, 24, 56, 56)) + assert feat[2].shape == torch.Size((1, 32, 28, 28)) + assert feat[3].shape == torch.Size((1, 64, 14, 14)) + assert feat[4].shape == torch.Size((1, 96, 14, 14)) + assert feat[5].shape == torch.Size((1, 160, 7, 7)) + assert feat[6].shape == torch.Size((1, 320, 7, 7)) diff --git a/tests/test_models/test_backbones/test_mobilenet_v3.py b/tests/test_models/test_backbones/test_mobilenet_v3.py new file mode 100644 index 0000000..560b948 --- /dev/null +++ b/tests/test_models/test_backbones/test_mobilenet_v3.py @@ -0,0 +1,175 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpretrain.models.backbones import MobileNetV3 +from mmpretrain.models.utils import InvertedResidual + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_mobilenetv3_backbone(): + with pytest.raises(TypeError): + # pretrained must be a string path + model = MobileNetV3() + model.init_weights(pretrained=0) + + with pytest.raises(AssertionError): + # arch must in [small, large] + MobileNetV3(arch='others') + + with pytest.raises(ValueError): + # frozen_stages must less than 13 when arch is small + MobileNetV3(arch='small', frozen_stages=13) + + with pytest.raises(ValueError): + # frozen_stages must less than 17 when arch is large + MobileNetV3(arch='large', frozen_stages=17) + + with pytest.raises(ValueError): + # max out_indices must less than 13 when arch is small + MobileNetV3(arch='small', out_indices=(13, )) + + with pytest.raises(ValueError): + # max out_indices must less than 17 when arch is large + MobileNetV3(arch='large', out_indices=(17, )) + + # Test MobileNetV3 + model = MobileNetV3() + model.init_weights() + model.train() + + # Test MobileNetV3 with first stage frozen + frozen_stages = 1 + model = MobileNetV3(frozen_stages=frozen_stages) + model.init_weights() + model.train() + for i in range(0, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + # Test MobileNetV3 with norm eval + model = MobileNetV3(norm_eval=True, out_indices=range(0, 12)) + model.init_weights() + model.train() + assert check_norm_state(model.modules(), False) + + # Test MobileNetV3 forward with small arch + model = MobileNetV3(out_indices=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 13 + assert feat[0].shape == torch.Size([1, 16, 112, 112]) + assert feat[1].shape == torch.Size([1, 16, 56, 56]) + assert feat[2].shape == torch.Size([1, 24, 28, 28]) + assert feat[3].shape == torch.Size([1, 24, 28, 28]) + assert feat[4].shape == torch.Size([1, 40, 14, 14]) + assert feat[5].shape == torch.Size([1, 40, 14, 14]) + assert feat[6].shape == torch.Size([1, 40, 14, 14]) + assert feat[7].shape == torch.Size([1, 48, 14, 14]) + assert feat[8].shape == torch.Size([1, 48, 14, 14]) + assert feat[9].shape == torch.Size([1, 96, 7, 7]) + assert feat[10].shape == torch.Size([1, 96, 7, 7]) + assert feat[11].shape == torch.Size([1, 96, 7, 7]) + assert feat[12].shape == torch.Size([1, 576, 7, 7]) + + # Test MobileNetV3 forward with small arch and GroupNorm + model = MobileNetV3( + out_indices=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), + norm_cfg=dict(type='GN', num_groups=2, requires_grad=True)) + for m in model.modules(): + if is_norm(m): + assert isinstance(m, GroupNorm) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 13 + assert feat[0].shape == torch.Size([1, 16, 112, 112]) + assert feat[1].shape == torch.Size([1, 16, 56, 56]) + assert feat[2].shape == torch.Size([1, 24, 28, 28]) + assert feat[3].shape == torch.Size([1, 24, 28, 28]) + assert feat[4].shape == torch.Size([1, 40, 14, 14]) + assert feat[5].shape == torch.Size([1, 40, 14, 14]) + assert feat[6].shape == torch.Size([1, 40, 14, 14]) + assert feat[7].shape == torch.Size([1, 48, 14, 14]) + assert feat[8].shape == torch.Size([1, 48, 14, 14]) + assert feat[9].shape == torch.Size([1, 96, 7, 7]) + assert feat[10].shape == torch.Size([1, 96, 7, 7]) + assert feat[11].shape == torch.Size([1, 96, 7, 7]) + assert feat[12].shape == torch.Size([1, 576, 7, 7]) + + # Test MobileNetV3 forward with large arch + model = MobileNetV3( + arch='large', + out_indices=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 17 + assert feat[0].shape == torch.Size([1, 16, 112, 112]) + assert feat[1].shape == torch.Size([1, 16, 112, 112]) + assert feat[2].shape == torch.Size([1, 24, 56, 56]) + assert feat[3].shape == torch.Size([1, 24, 56, 56]) + assert feat[4].shape == torch.Size([1, 40, 28, 28]) + assert feat[5].shape == torch.Size([1, 40, 28, 28]) + assert feat[6].shape == torch.Size([1, 40, 28, 28]) + assert feat[7].shape == torch.Size([1, 80, 14, 14]) + assert feat[8].shape == torch.Size([1, 80, 14, 14]) + assert feat[9].shape == torch.Size([1, 80, 14, 14]) + assert feat[10].shape == torch.Size([1, 80, 14, 14]) + assert feat[11].shape == torch.Size([1, 112, 14, 14]) + assert feat[12].shape == torch.Size([1, 112, 14, 14]) + assert feat[13].shape == torch.Size([1, 160, 7, 7]) + assert feat[14].shape == torch.Size([1, 160, 7, 7]) + assert feat[15].shape == torch.Size([1, 160, 7, 7]) + assert feat[16].shape == torch.Size([1, 960, 7, 7]) + + # Test MobileNetV3 forward with large arch + model = MobileNetV3(arch='large', out_indices=(0, )) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 16, 112, 112]) + + # Test MobileNetV3 with checkpoint forward + model = MobileNetV3(with_cp=True) + for m in model.modules(): + if isinstance(m, InvertedResidual): + assert m.with_cp + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 576, 7, 7]) diff --git a/tests/test_models/test_backbones/test_mobileone.py b/tests/test_models/test_backbones/test_mobileone.py new file mode 100644 index 0000000..93a13f1 --- /dev/null +++ b/tests/test_models/test_backbones/test_mobileone.py @@ -0,0 +1,337 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import tempfile + +import pytest +import torch +from mmengine.runner import load_checkpoint, save_checkpoint +from torch import nn +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpretrain.models.backbones import MobileOne +from mmpretrain.models.backbones.mobileone import MobileOneBlock +from mmpretrain.models.utils import SELayer + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def is_mobileone_block(modules): + if isinstance(modules, MobileOneBlock): + return True + return False + + +def test_mobileoneblock(): + # Test MobileOneBlock with kernel_size 3 + block = MobileOneBlock(5, 10, 3, 1, stride=1, groups=5) + block.eval() + x = torch.randn(1, 5, 16, 16) + y = block(x) + assert block.branch_norm is None + assert not hasattr(block, 'branch_reparam') + assert hasattr(block, 'branch_scale') + assert hasattr(block, 'branch_conv_list') + assert hasattr(block, 'branch_norm') + assert block.branch_conv_list[0].conv.kernel_size == (3, 3) + assert block.branch_conv_list[0].conv.groups == 5 + assert block.se_cfg is None + assert y.shape == torch.Size((1, 10, 16, 16)) + block.switch_to_deploy() + assert hasattr(block, 'branch_reparam') + assert block.branch_reparam.kernel_size == (3, 3) + assert block.branch_reparam.groups == 5 + assert block.deploy is True + y_deploy = block(x) + assert y_deploy.shape == torch.Size((1, 10, 16, 16)) + assert torch.allclose(y, y_deploy, atol=1e-5, rtol=1e-4) + + # Test MobileOneBlock with num_con = 4 + block = MobileOneBlock(5, 10, 3, 4, stride=1, groups=5) + block.eval() + x = torch.randn(1, 5, 16, 16) + y = block(x) + assert block.branch_norm is None + assert not hasattr(block, 'branch_reparam') + assert hasattr(block, 'branch_scale') + assert hasattr(block, 'branch_conv_list') + assert hasattr(block, 'branch_norm') + assert block.branch_conv_list[0].conv.kernel_size == (3, 3) + assert block.branch_conv_list[0].conv.groups == 5 + assert len(block.branch_conv_list) == 4 + assert block.se_cfg is None + assert y.shape == torch.Size((1, 10, 16, 16)) + block.switch_to_deploy() + assert hasattr(block, 'branch_reparam') + assert block.branch_reparam.kernel_size == (3, 3) + assert block.branch_reparam.groups == 5 + assert block.deploy is True + y_deploy = block(x) + assert y_deploy.shape == torch.Size((1, 10, 16, 16)) + assert torch.allclose(y, y_deploy, atol=1e-5, rtol=1e-4) + + # Test MobileOneBlock with kernel_size 1 + block = MobileOneBlock(5, 10, 1, 1, stride=1, padding=0) + block.eval() + x = torch.randn(1, 5, 16, 16) + y = block(x) + assert block.branch_norm is None + assert not hasattr(block, 'branch_reparam') + assert hasattr(block, 'branch_scale') + assert hasattr(block, 'branch_conv_list') + assert hasattr(block, 'branch_norm') + assert block.branch_conv_list[0].conv.kernel_size == (1, 1) + assert block.branch_conv_list[0].conv.groups == 1 + assert len(block.branch_conv_list) == 1 + assert block.se_cfg is None + assert y.shape == torch.Size((1, 10, 16, 16)) + block.switch_to_deploy() + assert hasattr(block, 'branch_reparam') + assert block.branch_reparam.kernel_size == (1, 1) + assert block.branch_reparam.groups == 1 + assert block.deploy is True + y_deploy = block(x) + assert y_deploy.shape == torch.Size((1, 10, 16, 16)) + assert torch.allclose(y, y_deploy, atol=1e-5, rtol=1e-4) + + # Test MobileOneBlock with stride = 2 + block = MobileOneBlock(10, 10, 3, 4, stride=2, groups=10) + x = torch.randn(1, 10, 16, 16) + block.eval() + y = block(x) + assert block.branch_norm is None + assert not hasattr(block, 'branch_reparam') + assert hasattr(block, 'branch_scale') + assert hasattr(block, 'branch_conv_list') + assert hasattr(block, 'branch_norm') + assert block.branch_conv_list[0].conv.kernel_size == (3, 3) + assert block.branch_conv_list[0].conv.groups == 10 + assert len(block.branch_conv_list) == 4 + assert block.se_cfg is None + assert y.shape == torch.Size((1, 10, 8, 8)) + block.switch_to_deploy() + assert hasattr(block, 'branch_reparam') + assert block.branch_reparam.kernel_size == (3, 3) + assert block.branch_reparam.groups == 10 + assert block.deploy is True + y_deploy = block(x) + assert y_deploy.shape == torch.Size((1, 10, 8, 8)) + assert torch.allclose(y, y_deploy, atol=1e-5, rtol=1e-4) + + # # Test MobileOneBlock with padding == dilation == 2 + block = MobileOneBlock( + 10, 10, 3, 4, stride=1, groups=10, padding=2, dilation=2) + x = torch.randn(1, 10, 16, 16) + block.eval() + y = block(x) + assert not hasattr(block, 'branch_reparam') + assert hasattr(block, 'branch_scale') + assert hasattr(block, 'branch_conv_list') + assert hasattr(block, 'branch_norm') + assert block.branch_conv_list[0].conv.kernel_size == (3, 3) + assert block.branch_conv_list[0].conv.groups == 10 + assert len(block.branch_conv_list) == 4 + assert block.se_cfg is None + assert y.shape == torch.Size((1, 10, 16, 16)) + block.switch_to_deploy() + assert hasattr(block, 'branch_reparam') + assert block.branch_reparam.kernel_size == (3, 3) + assert block.branch_reparam.groups == 10 + assert block.deploy is True + y_deploy = block(x) + assert y_deploy.shape == torch.Size((1, 10, 16, 16)) + assert torch.allclose(y, y_deploy, atol=1e-5, rtol=1e-4) + + # Test MobileOneBlock with se + se_cfg = dict(ratio=4, divisor=1) + block = MobileOneBlock(32, 32, 3, 4, stride=1, se_cfg=se_cfg, groups=32) + x = torch.randn(1, 32, 16, 16) + block.eval() + y = block(x) + assert not hasattr(block, 'branch_reparam') + assert hasattr(block, 'branch_scale') + assert hasattr(block, 'branch_conv_list') + assert hasattr(block, 'branch_norm') + assert block.branch_conv_list[0].conv.kernel_size == (3, 3) + assert block.branch_conv_list[0].conv.groups == 32 + assert len(block.branch_conv_list) == 4 + assert isinstance(block.se, SELayer) + assert y.shape == torch.Size((1, 32, 16, 16)) + block.switch_to_deploy() + assert hasattr(block, 'branch_reparam') + assert block.branch_reparam.kernel_size == (3, 3) + assert block.branch_reparam.groups == 32 + assert block.deploy is True + y_deploy = block(x) + assert y_deploy.shape == torch.Size((1, 32, 16, 16)) + assert torch.allclose(y, y_deploy, atol=1e-5, rtol=1e-4) + + # Test MobileOneBlock with deploy == True + se_cfg = dict(ratio=4, divisor=1) + block = MobileOneBlock( + 32, 32, 3, 4, stride=1, se_cfg=se_cfg, groups=32, deploy=True) + x = torch.randn(1, 32, 16, 16) + block.eval() + assert hasattr(block, 'branch_reparam') + assert block.branch_reparam.kernel_size == (3, 3) + assert block.branch_reparam.groups == 32 + assert isinstance(block.se, SELayer) + assert block.deploy is True + y = block(x) + assert y.shape == torch.Size((1, 32, 16, 16)) + + +def test_mobileone_backbone(): + with pytest.raises(TypeError): + # arch must be str or dict + MobileOne(arch=[4, 6, 16, 1]) + + with pytest.raises(AssertionError): + # arch must in arch_settings + MobileOne(arch='S3') + + with pytest.raises(KeyError): + arch = dict(num_blocks=[2, 4, 14, 1]) + MobileOne(arch=arch) + + # Test len(arch['num_blocks']) == len(arch['width_factor']) + with pytest.raises(AssertionError): + arch = dict( + num_blocks=[2, 4, 14, 1], + width_factor=[0.75, 0.75, 0.75], + num_conv_branches=[1, 1, 1, 1], + num_se_blocks=[0, 0, 5, 1]) + MobileOne(arch=arch) + + # Test max(out_indices) < len(arch['num_blocks']) + with pytest.raises(AssertionError): + MobileOne('s0', out_indices=dict()) + + # Test out_indices not type of int or Sequence + with pytest.raises(AssertionError): + MobileOne('s0', out_indices=(5, )) + + # Test MobileOne norm state + model = MobileOne('s0') + model.train() + assert check_norm_state(model.modules(), True) + + # Test MobileOne with first stage frozen + frozen_stages = 1 + model = MobileOne('s0', frozen_stages=frozen_stages) + model.train() + for param in model.stage0.parameters(): + assert param.requires_grad is False + for i in range(0, frozen_stages): + stage_name = model.stages[i] + stage = model.__getattr__(stage_name) + for mod in stage: + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in stage.parameters(): + assert param.requires_grad is False + + # Test MobileOne with norm_eval + model = MobileOne('s0', norm_eval=True) + model.train() + assert check_norm_state(model.modules(), False) + + # Test MobileOne forward with layer 3 forward + model = MobileOne('s0', out_indices=(3, )) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert isinstance(feat, tuple) + assert len(feat) == 1 + assert isinstance(feat[0], torch.Tensor) + assert feat[0].shape == torch.Size((1, 1024, 7, 7)) + + # Test MobileOne forward + arch_settings = { + 's0': dict(out_channels=[48, 128, 256, 1024], ), + 's1': dict(out_channels=[96, 192, 512, 1280]), + 's2': dict(out_channels=[96, 256, 640, 2048]), + 's3': dict(out_channels=[128, 320, 768, 2048], ), + 's4': dict(out_channels=[192, 448, 896, 2048], ) + } + + choose_models = ['s0', 's1', 's4'] + # Test RepVGG model forward + for model_name, model_arch in arch_settings.items(): + if model_name not in choose_models: + continue + model = MobileOne(model_name, out_indices=(0, 1, 2, 3)) + model.init_weights() + + # Test Norm + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + model.train() + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert feat[0].shape == torch.Size( + (1, model_arch['out_channels'][0], 56, 56)) + assert feat[1].shape == torch.Size( + (1, model_arch['out_channels'][1], 28, 28)) + assert feat[2].shape == torch.Size( + (1, model_arch['out_channels'][2], 14, 14)) + assert feat[3].shape == torch.Size( + (1, model_arch['out_channels'][3], 7, 7)) + + # Test eval of "train" mode and "deploy" mode + gap = nn.AdaptiveAvgPool2d(output_size=(1)) + fc = nn.Linear(model_arch['out_channels'][3], 10) + model.eval() + feat = model(imgs) + pred = fc(gap(feat[3]).flatten(1)) + model.switch_to_deploy() + for m in model.modules(): + if isinstance(m, MobileOneBlock): + assert m.deploy is True + feat_deploy = model(imgs) + pred_deploy = fc(gap(feat_deploy[3]).flatten(1)) + for i in range(4): + torch.allclose(feat[i], feat_deploy[i]) + torch.allclose(pred, pred_deploy) + + +def test_load_deploy_mobileone(): + # Test output before and load from deploy checkpoint + model = MobileOne('s0', out_indices=(0, 1, 2, 3)) + inputs = torch.randn((1, 3, 224, 224)) + tmpdir = tempfile.gettempdir() + ckpt_path = os.path.join(tmpdir, 'ckpt.pth') + model.switch_to_deploy() + model.eval() + outputs = model(inputs) + + model_deploy = MobileOne('s0', out_indices=(0, 1, 2, 3), deploy=True) + save_checkpoint(model.state_dict(), ckpt_path) + load_checkpoint(model_deploy, ckpt_path) + + outputs_load = model_deploy(inputs) + for feat, feat_load in zip(outputs, outputs_load): + assert torch.allclose(feat, feat_load) + os.remove(ckpt_path) diff --git a/tests/test_models/test_backbones/test_mobilevit.py b/tests/test_models/test_backbones/test_mobilevit.py new file mode 100644 index 0000000..2b7d8d9 --- /dev/null +++ b/tests/test_models/test_backbones/test_mobilevit.py @@ -0,0 +1,86 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmpretrain.models.backbones import MobileViT + + +def test_assertion(): + with pytest.raises(AssertionError): + MobileViT(arch='unknown') + + with pytest.raises(AssertionError): + # MobileViT out_indices should be valid depth. + MobileViT(out_indices=-100) + + +def test_mobilevit(): + + # Test forward + model = MobileViT(arch='small') + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 256, 256) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 640, 8, 8]) + + # Test custom arch + model = MobileViT(arch=[ + ['mobilenetv2', 16, 1, 1, 2], + ['mobilenetv2', 24, 2, 3, 2], + ['mobilevit', 48, 2, 64, 128, 2, 2], + ['mobilevit', 64, 2, 80, 160, 4, 2], + ['mobilevit', 80, 2, 96, 192, 3, 2], + ]) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 256, 256) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 320, 8, 8]) + + # Test last_exp_factor + model = MobileViT(arch='small', last_exp_factor=8) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 256, 256) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 1280, 8, 8]) + + # Test stem_channels + model = MobileViT(arch='small', stem_channels=32) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 256, 256) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 640, 8, 8]) + + # Test forward with multiple outputs + model = MobileViT(arch='small', out_indices=range(5)) + + imgs = torch.randn(1, 3, 256, 256) + feat = model(imgs) + assert len(feat) == 5 + assert feat[0].shape == torch.Size([1, 32, 128, 128]) + assert feat[1].shape == torch.Size([1, 64, 64, 64]) + assert feat[2].shape == torch.Size([1, 96, 32, 32]) + assert feat[3].shape == torch.Size([1, 128, 16, 16]) + assert feat[4].shape == torch.Size([1, 640, 8, 8]) + + # Test frozen_stages + model = MobileViT(arch='small', frozen_stages=2) + model.init_weights() + model.train() + + for i in range(2): + assert not model.layers[i].training + + for i in range(2, 5): + assert model.layers[i].training diff --git a/tests/test_models/test_backbones/test_mvit.py b/tests/test_models/test_backbones/test_mvit.py new file mode 100644 index 0000000..0a5e126 --- /dev/null +++ b/tests/test_models/test_backbones/test_mvit.py @@ -0,0 +1,130 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from copy import deepcopy +from unittest import TestCase + +import torch + +from mmpretrain.models import MViT + + +class TestMViT(TestCase): + + def setUp(self): + self.cfg = dict(arch='tiny', drop_path_rate=0.1) + + def test_structure(self): + # Test invalid default arch + with self.assertRaisesRegex(AssertionError, 'not in default archs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = 'unknown' + MViT(**cfg) + + # Test invalid custom arch + with self.assertRaisesRegex(AssertionError, 'Custom arch needs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'num_layers': 24, + 'num_heads': 16, + 'feedforward_channels': 4096 + } + MViT(**cfg) + + # Test custom arch + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'embed_dims': 96, + 'num_layers': 10, + 'num_heads': 1, + 'downscale_indices': [2, 5, 8] + } + stage_indices = [0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3] + model = MViT(**cfg) + self.assertEqual(model.embed_dims, 96) + self.assertEqual(model.num_layers, 10) + for i, block in enumerate(model.blocks): + stage = stage_indices[i] + self.assertEqual(block.out_dims, 96 * 2**(stage)) + + # Test out_indices + cfg = deepcopy(self.cfg) + cfg['out_scales'] = {1: 1} + with self.assertRaisesRegex(AssertionError, "get "): + MViT(**cfg) + cfg['out_scales'] = [0, 13] + with self.assertRaisesRegex(AssertionError, 'Invalid out_scales 13'): + MViT(**cfg) + + # Test model structure + cfg = deepcopy(self.cfg) + model = MViT(**cfg) + stage_indices = [0, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3] + self.assertEqual(len(model.blocks), 10) + dpr_inc = 0.1 / (10 - 1) + dpr = 0 + for i, block in enumerate(model.blocks): + stage = stage_indices[i] + print(i, stage) + self.assertEqual(block.attn.num_heads, 2**stage) + if dpr > 0: + self.assertAlmostEqual(block.drop_path.drop_prob, dpr) + dpr += dpr_inc + + def test_init_weights(self): + # test weight init cfg + cfg = deepcopy(self.cfg) + cfg['init_cfg'] = [ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ] + cfg['use_abs_pos_embed'] = True + model = MViT(**cfg) + ori_weight = model.patch_embed.projection.weight.clone().detach() + # The pos_embed is all zero before initialize + self.assertTrue(torch.allclose(model.pos_embed, torch.tensor(0.))) + + model.init_weights() + initialized_weight = model.patch_embed.projection.weight + self.assertFalse(torch.allclose(ori_weight, initialized_weight)) + self.assertFalse(torch.allclose(model.pos_embed, torch.tensor(0.))) + + def test_forward(self): + imgs = torch.randn(1, 3, 224, 224) + + cfg = deepcopy(self.cfg) + model = MViT(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + patch_token = outs[-1] + self.assertEqual(patch_token.shape, (1, 768, 7, 7)) + + # Test forward with multi out scales + cfg = deepcopy(self.cfg) + cfg['out_scales'] = (0, 1, 2, 3) + model = MViT(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 4) + for stage, out in enumerate(outs): + stride = 2**stage + self.assertEqual(out.shape, + (1, 96 * stride, 56 // stride, 56 // stride)) + + # Test forward with dynamic input size + imgs1 = torch.randn(1, 3, 224, 224) + imgs2 = torch.randn(1, 3, 256, 256) + imgs3 = torch.randn(1, 3, 256, 309) + cfg = deepcopy(self.cfg) + model = MViT(**cfg) + for imgs in [imgs1, imgs2, imgs3]: + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + patch_token = outs[-1] + expect_feat_shape = (math.ceil(imgs.shape[2] / 32), + math.ceil(imgs.shape[3] / 32)) + self.assertEqual(patch_token.shape, (1, 768, *expect_feat_shape)) diff --git a/tests/test_models/test_backbones/test_poolformer.py b/tests/test_models/test_backbones/test_poolformer.py new file mode 100644 index 0000000..f61b304 --- /dev/null +++ b/tests/test_models/test_backbones/test_poolformer.py @@ -0,0 +1,143 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from unittest import TestCase + +import torch + +from mmpretrain.models.backbones import PoolFormer +from mmpretrain.models.backbones.poolformer import PoolFormerBlock + + +class TestPoolFormer(TestCase): + + def setUp(self): + arch = 's12' + self.cfg = dict(arch=arch, drop_path_rate=0.1) + self.arch = PoolFormer.arch_settings[arch] + + def test_arch(self): + # Test invalid default arch + with self.assertRaisesRegex(AssertionError, 'Unavailable arch'): + cfg = deepcopy(self.cfg) + cfg['arch'] = 'unknown' + PoolFormer(**cfg) + + # Test invalid custom arch + with self.assertRaisesRegex(AssertionError, 'must have "layers"'): + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'embed_dims': 96, + 'num_heads': [3, 6, 12, 16], + } + PoolFormer(**cfg) + + # Test custom arch + cfg = deepcopy(self.cfg) + layers = [2, 2, 4, 2] + embed_dims = [6, 12, 6, 12] + mlp_ratios = [2, 3, 4, 4] + layer_scale_init_value = 1e-4 + cfg['arch'] = dict( + layers=layers, + embed_dims=embed_dims, + mlp_ratios=mlp_ratios, + layer_scale_init_value=layer_scale_init_value, + ) + model = PoolFormer(**cfg) + for i, stage in enumerate(model.network): + if not isinstance(stage, PoolFormerBlock): + continue + self.assertEqual(len(stage), layers[i]) + self.assertEqual(stage[0].mlp.fc1.in_channels, embed_dims[i]) + self.assertEqual(stage[0].mlp.fc1.out_channels, + embed_dims[i] * mlp_ratios[i]) + self.assertTrue( + torch.allclose(stage[0].layer_scale_1, + torch.tensor(layer_scale_init_value))) + self.assertTrue( + torch.allclose(stage[0].layer_scale_2, + torch.tensor(layer_scale_init_value))) + + def test_init_weights(self): + # test weight init cfg + cfg = deepcopy(self.cfg) + cfg['init_cfg'] = [ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ] + model = PoolFormer(**cfg) + ori_weight = model.patch_embed.proj.weight.clone().detach() + + model.init_weights() + initialized_weight = model.patch_embed.proj.weight + self.assertFalse(torch.allclose(ori_weight, initialized_weight)) + + def test_forward(self): + imgs = torch.randn(1, 3, 224, 224) + + cfg = deepcopy(self.cfg) + model = PoolFormer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + self.assertEqual(feat.shape, (1, 512, 7, 7)) + + # test multiple output indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = (0, 2, 4, 6) + model = PoolFormer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 4) + for dim, stride, out in zip(self.arch['embed_dims'], [1, 2, 4, 8], + outs): + self.assertEqual(out.shape, (1, dim, 56 // stride, 56 // stride)) + + def test_structure(self): + # test drop_path_rate decay + cfg = deepcopy(self.cfg) + cfg['drop_path_rate'] = 0.2 + model = PoolFormer(**cfg) + layers = self.arch['layers'] + for i, block in enumerate(model.network): + expect_prob = 0.2 / (sum(layers) - 1) * i + if hasattr(block, 'drop_path'): + if expect_prob == 0: + self.assertIsInstance(block.drop_path, torch.nn.Identity) + else: + self.assertAlmostEqual(block.drop_path.drop_prob, + expect_prob) + + # test with first stage frozen. + cfg = deepcopy(self.cfg) + frozen_stages = 1 + cfg['frozen_stages'] = frozen_stages + cfg['out_indices'] = (0, 2, 4, 6) + model = PoolFormer(**cfg) + model.init_weights() + model.train() + + # the patch_embed and first stage should not require grad. + self.assertFalse(model.patch_embed.training) + for param in model.patch_embed.parameters(): + self.assertFalse(param.requires_grad) + for i in range(frozen_stages): + module = model.network[i] + for param in module.parameters(): + self.assertFalse(param.requires_grad) + for param in model.norm0.parameters(): + self.assertFalse(param.requires_grad) + + # the second stage should require grad. + for i in range(frozen_stages + 1, 7): + module = model.network[i] + for param in module.parameters(): + self.assertTrue(param.requires_grad) + if hasattr(model, f'norm{i}'): + norm = getattr(model, f'norm{i}') + for param in norm.parameters(): + self.assertTrue(param.requires_grad) diff --git a/tests/test_models/test_backbones/test_regnet.py b/tests/test_models/test_backbones/test_regnet.py new file mode 100644 index 0000000..bed26fe --- /dev/null +++ b/tests/test_models/test_backbones/test_regnet.py @@ -0,0 +1,94 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmpretrain.models.backbones import RegNet + +regnet_test_data = [ + ('regnetx_400mf', + dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, + bot_mul=1.0), [32, 64, 160, 384]), + ('regnetx_800mf', + dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16, + bot_mul=1.0), [64, 128, 288, 672]), + ('regnetx_1.6gf', + dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18, + bot_mul=1.0), [72, 168, 408, 912]), + ('regnetx_3.2gf', + dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, + bot_mul=1.0), [96, 192, 432, 1008]), + ('regnetx_4.0gf', + dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23, + bot_mul=1.0), [80, 240, 560, 1360]), + ('regnetx_6.4gf', + dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17, + bot_mul=1.0), [168, 392, 784, 1624]), + ('regnetx_8.0gf', + dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23, + bot_mul=1.0), [80, 240, 720, 1920]), + ('regnetx_12gf', + dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, + bot_mul=1.0), [224, 448, 896, 2240]), +] + + +@pytest.mark.parametrize('arch_name,arch,out_channels', regnet_test_data) +def test_regnet_backbone(arch_name, arch, out_channels): + with pytest.raises(AssertionError): + # ResNeXt depth should be in [50, 101, 152] + RegNet(arch_name + '233') + + # output the last feature map + model = RegNet(arch_name) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert isinstance(feat[0], torch.Tensor) + assert feat[0].shape == (1, out_channels[-1], 7, 7) + + # output feature map of all stages + model = RegNet(arch_name, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, out_channels[0], 56, 56) + assert feat[1].shape == (1, out_channels[1], 28, 28) + assert feat[2].shape == (1, out_channels[2], 14, 14) + assert feat[3].shape == (1, out_channels[3], 7, 7) + + +@pytest.mark.parametrize('arch_name,arch,out_channels', regnet_test_data) +def test_custom_arch(arch_name, arch, out_channels): + # output the last feature map + model = RegNet(arch) + model.init_weights() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert isinstance(feat[0], torch.Tensor) + assert feat[0].shape == (1, out_channels[-1], 7, 7) + + # output feature map of all stages + model = RegNet(arch, out_indices=(0, 1, 2, 3)) + model.init_weights() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, out_channels[0], 56, 56) + assert feat[1].shape == (1, out_channels[1], 28, 28) + assert feat[2].shape == (1, out_channels[2], 14, 14) + assert feat[3].shape == (1, out_channels[3], 7, 7) + + +def test_exception(): + # arch must be a str or dict + with pytest.raises(TypeError): + _ = RegNet(50) diff --git a/tests/test_models/test_backbones/test_replknet.py b/tests/test_models/test_backbones/test_replknet.py new file mode 100644 index 0000000..ed9305c --- /dev/null +++ b/tests/test_models/test_backbones/test_replknet.py @@ -0,0 +1,304 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import tempfile + +import pytest +import torch +from mmengine.runner import load_checkpoint, save_checkpoint +from torch import nn +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpretrain.models.backbones import RepLKNet +from mmpretrain.models.backbones.replknet import ReparamLargeKernelConv + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def is_replk_block(modules): + if isinstance(modules, ReparamLargeKernelConv): + return True + return False + + +def test_replknet_replkblock(): + # Test ReparamLargeKernelConv with in_channels != out_channels, + # kernel_size = 31, stride = 1, groups=in_channels, small_kernel = 5 + block = ReparamLargeKernelConv( + 5, 10, kernel_size=31, stride=1, groups=5, small_kernel=5) + block.eval() + x = torch.randn(1, 5, 64, 64) + x_out_not_deploy = block(x) + assert block.small_kernel <= block.kernel_size + assert not hasattr(block, 'lkb_reparam') + assert hasattr(block, 'lkb_origin') + assert hasattr(block, 'small_conv') + assert x_out_not_deploy.shape == torch.Size((1, 10, 64, 64)) + block.merge_kernel() + assert block.small_kernel_merged is True + x_out_deploy = block(x) + assert x_out_deploy.shape == torch.Size((1, 10, 64, 64)) + assert torch.allclose(x_out_not_deploy, x_out_deploy, atol=1e-5, rtol=1e-4) + + # Test ReparamLargeKernelConv with in_channels == out_channels, + # kernel_size = 31, stride = 1, groups=in_channels, small_kernel = 5 + block = ReparamLargeKernelConv( + 12, 12, kernel_size=31, stride=1, groups=12, small_kernel=5) + block.eval() + x = torch.randn(1, 12, 64, 64) + x_out_not_deploy = block(x) + assert block.small_kernel <= block.kernel_size + assert not hasattr(block, 'lkb_reparam') + assert hasattr(block, 'lkb_origin') + assert hasattr(block, 'small_conv') + assert x_out_not_deploy.shape == torch.Size((1, 12, 64, 64)) + block.merge_kernel() + assert block.small_kernel_merged is True + x_out_deploy = block(x) + assert x_out_deploy.shape == torch.Size((1, 12, 64, 64)) + assert torch.allclose(x_out_not_deploy, x_out_deploy, atol=1e-5, rtol=1e-4) + + # Test ReparamLargeKernelConv with in_channels == out_channels, + # kernel_size = 31, stride = 2, groups=in_channels, small_kernel = 5 + block = ReparamLargeKernelConv( + 16, 16, kernel_size=31, stride=2, groups=16, small_kernel=5) + block.eval() + x = torch.randn(1, 16, 64, 64) + x_out_not_deploy = block(x) + assert block.small_kernel <= block.kernel_size + assert not hasattr(block, 'lkb_reparam') + assert hasattr(block, 'lkb_origin') + assert hasattr(block, 'small_conv') + assert x_out_not_deploy.shape == torch.Size((1, 16, 32, 32)) + block.merge_kernel() + assert block.small_kernel_merged is True + x_out_deploy = block(x) + assert x_out_deploy.shape == torch.Size((1, 16, 32, 32)) + assert torch.allclose(x_out_not_deploy, x_out_deploy, atol=1e-5, rtol=1e-4) + + # Test ReparamLargeKernelConv with in_channels == out_channels, + # kernel_size = 27, stride = 1, groups=in_channels, small_kernel = 5 + block = ReparamLargeKernelConv( + 12, 12, kernel_size=27, stride=1, groups=12, small_kernel=5) + block.eval() + x = torch.randn(1, 12, 48, 48) + x_out_not_deploy = block(x) + assert block.small_kernel <= block.kernel_size + assert not hasattr(block, 'lkb_reparam') + assert hasattr(block, 'lkb_origin') + assert hasattr(block, 'small_conv') + assert x_out_not_deploy.shape == torch.Size((1, 12, 48, 48)) + block.merge_kernel() + assert block.small_kernel_merged is True + x_out_deploy = block(x) + assert x_out_deploy.shape == torch.Size((1, 12, 48, 48)) + assert torch.allclose(x_out_not_deploy, x_out_deploy, atol=1e-5, rtol=1e-4) + + # Test ReparamLargeKernelConv with in_channels == out_channels, + # kernel_size = 31, stride = 1, groups=in_channels, small_kernel = 7 + block = ReparamLargeKernelConv( + 12, 12, kernel_size=31, stride=1, groups=12, small_kernel=7) + block.eval() + x = torch.randn(1, 12, 64, 64) + x_out_not_deploy = block(x) + assert block.small_kernel <= block.kernel_size + assert not hasattr(block, 'lkb_reparam') + assert hasattr(block, 'lkb_origin') + assert hasattr(block, 'small_conv') + assert x_out_not_deploy.shape == torch.Size((1, 12, 64, 64)) + block.merge_kernel() + assert block.small_kernel_merged is True + x_out_deploy = block(x) + assert x_out_deploy.shape == torch.Size((1, 12, 64, 64)) + assert torch.allclose(x_out_not_deploy, x_out_deploy, atol=1e-5, rtol=1e-4) + + # Test ReparamLargeKernelConv with deploy == True + block = ReparamLargeKernelConv( + 8, + 8, + kernel_size=31, + stride=1, + groups=8, + small_kernel=5, + small_kernel_merged=True) + assert isinstance(block.lkb_reparam, nn.Conv2d) + assert not hasattr(block, 'lkb_origin') + assert not hasattr(block, 'small_conv') + x = torch.randn(1, 8, 48, 48) + x_out = block(x) + assert x_out.shape == torch.Size((1, 8, 48, 48)) + + +def test_replknet_backbone(): + with pytest.raises(TypeError): + # arch must be str or dict + RepLKNet(arch=[4, 6, 16, 1]) + + with pytest.raises(AssertionError): + # arch must in arch_settings + RepLKNet(arch='31C') + + with pytest.raises(KeyError): + # arch must have num_blocks and width_factor + arch = dict(large_kernel_sizes=[31, 29, 27, 13]) + RepLKNet(arch=arch) + + with pytest.raises(KeyError): + # arch must have num_blocks and width_factor + arch = dict(large_kernel_sizes=[31, 29, 27, 13], layers=[2, 2, 18, 2]) + RepLKNet(arch=arch) + + with pytest.raises(KeyError): + # arch must have num_blocks and width_factor + arch = dict( + large_kernel_sizes=[31, 29, 27, 13], + layers=[2, 2, 18, 2], + channels=[128, 256, 512, 1024]) + RepLKNet(arch=arch) + + # len(arch['large_kernel_sizes']) == arch['layers']) + # == len(arch['channels']) + # == len(strides) == len(dilations) + with pytest.raises(AssertionError): + arch = dict( + large_kernel_sizes=[31, 29, 27, 13], + layers=[2, 2, 18, 2], + channels=[128, 256, 1024], + small_kernel=5, + dw_ratio=1) + RepLKNet(arch=arch) + + # len(strides) must equal to 4 + with pytest.raises(AssertionError): + RepLKNet('31B', strides=(2, 2, 2)) + + # len(dilations) must equal to 4 + with pytest.raises(AssertionError): + RepLKNet('31B', strides=(2, 2, 2, 2), dilations=(1, 1, 1)) + + # max(out_indices) < len(arch['num_blocks']) + with pytest.raises(AssertionError): + RepLKNet('31B', out_indices=(5, )) + + # Test RepLKNet norm state + model = RepLKNet('31B') + model.train() + assert check_norm_state(model.modules(), True) + + # Test RepLKNet with first stage frozen + frozen_stages = 1 + model = RepLKNet('31B', frozen_stages=frozen_stages) + model.train() + for param in model.stem.parameters(): + assert param.requires_grad is False + for i in range(0, frozen_stages): + stage = model.stages[i] + for mod in stage.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in stage.parameters(): + assert param.requires_grad is False + + # Test RepLKNet with norm_eval + model = RepLKNet('31B', norm_eval=True) + model.train() + assert check_norm_state(model.modules(), False) + + # Test RepLKNet forward with layer 3 forward + model = RepLKNet('31B', out_indices=(3, )) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert isinstance(feat, tuple) + assert len(feat) == 1 + assert isinstance(feat[0], torch.Tensor) + assert feat[0].shape == torch.Size((1, 1024, 7, 7)) + + # Test RepLKNet forward + model_test_settings = [ + dict(model_name='31B', out_sizes=(128, 256, 512, 1024)), + # dict(model_name='31L', out_sizes=(192, 384, 768, 1536)), + # dict(model_name='XL', out_sizes=(256, 512, 1024, 2048)) + ] + + choose_models = ['31B'] + # Test RepLKNet model forward + for model_test_setting in model_test_settings: + if model_test_setting['model_name'] not in choose_models: + continue + model = RepLKNet( + model_test_setting['model_name'], out_indices=(0, 1, 2, 3)) + model.init_weights() + + # Test Norm + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + model.train() + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert feat[0].shape == torch.Size( + (1, model_test_setting['out_sizes'][0], 56, 56)) + assert feat[1].shape == torch.Size( + (1, model_test_setting['out_sizes'][1], 28, 28)) + assert feat[2].shape == torch.Size( + (1, model_test_setting['out_sizes'][2], 14, 14)) + assert feat[3].shape == torch.Size( + (1, model_test_setting['out_sizes'][3], 7, 7)) + + # Test eval of "train" mode and "deploy" mode + gap = nn.AdaptiveAvgPool2d(output_size=(1)) + fc = nn.Linear(model_test_setting['out_sizes'][3], 10) + model.eval() + feat = model(imgs) + pred = fc(gap(feat[3]).flatten(1)) + model.switch_to_deploy() + for m in model.modules(): + if isinstance(m, ReparamLargeKernelConv): + assert m.small_kernel_merged is True + feat_deploy = model(imgs) + pred_deploy = fc(gap(feat_deploy[3]).flatten(1)) + for i in range(4): + torch.allclose(feat[i], feat_deploy[i]) + torch.allclose(pred, pred_deploy) + + +def test_replknet_load(): + # Test output before and load from deploy checkpoint + model = RepLKNet('31B', out_indices=(0, 1, 2, 3)) + inputs = torch.randn((1, 3, 224, 224)) + ckpt_path = os.path.join(tempfile.gettempdir(), 'ckpt.pth') + model.switch_to_deploy() + model.eval() + outputs = model(inputs) + + model_deploy = RepLKNet( + '31B', out_indices=(0, 1, 2, 3), small_kernel_merged=True) + model_deploy.eval() + save_checkpoint(model.state_dict(), ckpt_path) + load_checkpoint(model_deploy, ckpt_path, strict=True) + + outputs_load = model_deploy(inputs) + for feat, feat_load in zip(outputs, outputs_load): + assert torch.allclose(feat, feat_load) diff --git a/tests/test_models/test_backbones/test_repmlp.py b/tests/test_models/test_backbones/test_repmlp.py new file mode 100644 index 0000000..f03fce4 --- /dev/null +++ b/tests/test_models/test_backbones/test_repmlp.py @@ -0,0 +1,173 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import tempfile +from copy import deepcopy +from unittest import TestCase + +import torch +from mmengine.runner import load_checkpoint, save_checkpoint + +from mmpretrain.models.backbones import RepMLPNet + + +class TestRepMLP(TestCase): + + def setUp(self): + # default model setting + self.cfg = dict( + arch='b', + img_size=224, + out_indices=(3, ), + reparam_conv_kernels=(1, 3), + final_norm=True) + + # default model setting and output stage channels + self.model_forward_settings = [ + dict(model_name='B', out_sizes=(96, 192, 384, 768)), + ] + + # temp ckpt path + self.ckpt_path = os.path.join(tempfile.gettempdir(), 'ckpt.pth') + + def test_arch(self): + # Test invalid arch data type + with self.assertRaisesRegex(AssertionError, 'arch needs a dict'): + cfg = deepcopy(self.cfg) + cfg['arch'] = [96, 192, 384, 768] + RepMLPNet(**cfg) + + # Test invalid default arch + with self.assertRaisesRegex(AssertionError, 'not in default archs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = 'A' + RepMLPNet(**cfg) + + # Test invalid custom arch + with self.assertRaisesRegex(AssertionError, 'Custom arch needs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'channels': [96, 192, 384, 768], + 'depths': [2, 2, 12, 2] + } + RepMLPNet(**cfg) + + # test len(arch['depths']) equals to len(arch['channels']) + # equals to len(arch['sharesets_nums']) + with self.assertRaisesRegex(AssertionError, 'Length of setting'): + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'channels': [96, 192, 384, 768], + 'depths': [2, 2, 12, 2], + 'sharesets_nums': [1, 4, 32] + } + RepMLPNet(**cfg) + + # Test custom arch + cfg = deepcopy(self.cfg) + channels = [96, 192, 384, 768] + depths = [2, 2, 12, 2] + sharesets_nums = [1, 4, 32, 128] + cfg['arch'] = { + 'channels': channels, + 'depths': depths, + 'sharesets_nums': sharesets_nums + } + cfg['out_indices'] = (0, 1, 2, 3) + model = RepMLPNet(**cfg) + for i, stage in enumerate(model.stages): + self.assertEqual(len(stage), depths[i]) + self.assertEqual(stage[0].repmlp_block.channels, channels[i]) + self.assertEqual(stage[0].repmlp_block.deploy, False) + self.assertEqual(stage[0].repmlp_block.num_sharesets, + sharesets_nums[i]) + + def test_init(self): + # test weight init cfg + cfg = deepcopy(self.cfg) + cfg['init_cfg'] = [ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ] + model = RepMLPNet(**cfg) + ori_weight = model.patch_embed.projection.weight.clone().detach() + + model.init_weights() + initialized_weight = model.patch_embed.projection.weight + self.assertFalse(torch.allclose(ori_weight, initialized_weight)) + + def test_forward(self): + imgs = torch.randn(1, 3, 224, 224) + cfg = deepcopy(self.cfg) + model = RepMLPNet(**cfg) + feat = model(imgs) + self.assertTrue(isinstance(feat, tuple)) + self.assertEqual(len(feat), 1) + self.assertTrue(isinstance(feat[0], torch.Tensor)) + self.assertEqual(feat[0].shape, torch.Size((1, 768, 7, 7))) + + imgs = torch.randn(1, 3, 256, 256) + with self.assertRaisesRegex(AssertionError, "doesn't support dynamic"): + model(imgs) + + # Test RepMLPNet model forward + for model_test_setting in self.model_forward_settings: + model = RepMLPNet( + model_test_setting['model_name'], + out_indices=(0, 1, 2, 3), + final_norm=False) + model.init_weights() + + model.train() + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + self.assertEqual( + feat[0].shape, + torch.Size((1, model_test_setting['out_sizes'][1], 28, 28))) + self.assertEqual( + feat[1].shape, + torch.Size((1, model_test_setting['out_sizes'][2], 14, 14))) + self.assertEqual( + feat[2].shape, + torch.Size((1, model_test_setting['out_sizes'][3], 7, 7))) + self.assertEqual( + feat[3].shape, + torch.Size((1, model_test_setting['out_sizes'][3], 7, 7))) + + def test_deploy_(self): + # Test output before and load from deploy checkpoint + imgs = torch.randn((1, 3, 224, 224)) + cfg = dict( + arch='b', out_indices=( + 1, + 3, + ), reparam_conv_kernels=(1, 3, 5)) + model = RepMLPNet(**cfg) + + model.eval() + feats = model(imgs) + model.switch_to_deploy() + for m in model.modules(): + if hasattr(m, 'deploy'): + self.assertTrue(m.deploy) + model.eval() + feats_ = model(imgs) + assert len(feats) == len(feats_) + for i in range(len(feats)): + self.assertTrue( + torch.allclose( + feats[i].sum(), feats_[i].sum(), rtol=0.1, atol=0.1)) + + cfg['deploy'] = True + model_deploy = RepMLPNet(**cfg) + model_deploy.eval() + save_checkpoint(model.state_dict(), self.ckpt_path) + load_checkpoint(model_deploy, self.ckpt_path, strict=True) + feats__ = model_deploy(imgs) + + assert len(feats_) == len(feats__) + for i in range(len(feats)): + self.assertTrue( + torch.allclose(feats__[i], feats_[i], rtol=0.01, atol=0.01)) diff --git a/tests/test_models/test_backbones/test_repvgg.py b/tests/test_models/test_backbones/test_repvgg.py new file mode 100644 index 0000000..a558dbc --- /dev/null +++ b/tests/test_models/test_backbones/test_repvgg.py @@ -0,0 +1,351 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import tempfile + +import pytest +import torch +from mmengine.runner import load_checkpoint, save_checkpoint +from torch import nn +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpretrain.models.backbones import RepVGG +from mmpretrain.models.backbones.repvgg import RepVGGBlock +from mmpretrain.models.utils import SELayer + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def is_repvgg_block(modules): + if isinstance(modules, RepVGGBlock): + return True + return False + + +def test_repvgg_repvggblock(): + # Test RepVGGBlock with in_channels != out_channels, stride = 1 + block = RepVGGBlock(5, 10, stride=1) + block.eval() + x = torch.randn(1, 5, 16, 16) + x_out_not_deploy = block(x) + assert block.branch_norm is None + assert not hasattr(block, 'branch_reparam') + assert hasattr(block, 'branch_1x1') + assert hasattr(block, 'branch_3x3') + assert hasattr(block, 'branch_norm') + assert block.se_cfg is None + assert x_out_not_deploy.shape == torch.Size((1, 10, 16, 16)) + block.switch_to_deploy() + assert block.deploy is True + x_out_deploy = block(x) + assert x_out_deploy.shape == torch.Size((1, 10, 16, 16)) + assert torch.allclose(x_out_not_deploy, x_out_deploy, atol=1e-5, rtol=1e-4) + + # Test RepVGGBlock with in_channels == out_channels, stride = 1 + block = RepVGGBlock(12, 12, stride=1) + block.eval() + x = torch.randn(1, 12, 8, 8) + x_out_not_deploy = block(x) + assert isinstance(block.branch_norm, nn.BatchNorm2d) + assert not hasattr(block, 'branch_reparam') + assert x_out_not_deploy.shape == torch.Size((1, 12, 8, 8)) + block.switch_to_deploy() + assert block.deploy is True + x_out_deploy = block(x) + assert x_out_deploy.shape == torch.Size((1, 12, 8, 8)) + assert torch.allclose(x_out_not_deploy, x_out_deploy, atol=1e-5, rtol=1e-4) + + # Test RepVGGBlock with in_channels == out_channels, stride = 2 + block = RepVGGBlock(16, 16, stride=2) + block.eval() + x = torch.randn(1, 16, 8, 8) + x_out_not_deploy = block(x) + assert block.branch_norm is None + assert x_out_not_deploy.shape == torch.Size((1, 16, 4, 4)) + block.switch_to_deploy() + assert block.deploy is True + x_out_deploy = block(x) + assert x_out_deploy.shape == torch.Size((1, 16, 4, 4)) + assert torch.allclose(x_out_not_deploy, x_out_deploy, atol=1e-5, rtol=1e-4) + + # Test RepVGGBlock with padding == dilation == 2 + block = RepVGGBlock(14, 14, stride=1, padding=2, dilation=2) + block.eval() + x = torch.randn(1, 14, 16, 16) + x_out_not_deploy = block(x) + assert isinstance(block.branch_norm, nn.BatchNorm2d) + assert x_out_not_deploy.shape == torch.Size((1, 14, 16, 16)) + block.switch_to_deploy() + assert block.deploy is True + x_out_deploy = block(x) + assert x_out_deploy.shape == torch.Size((1, 14, 16, 16)) + assert torch.allclose(x_out_not_deploy, x_out_deploy, atol=1e-5, rtol=1e-4) + + # Test RepVGGBlock with groups = 2 + block = RepVGGBlock(4, 4, stride=1, groups=2) + block.eval() + x = torch.randn(1, 4, 5, 6) + x_out_not_deploy = block(x) + assert x_out_not_deploy.shape == torch.Size((1, 4, 5, 6)) + block.switch_to_deploy() + assert block.deploy is True + x_out_deploy = block(x) + assert x_out_deploy.shape == torch.Size((1, 4, 5, 6)) + assert torch.allclose(x_out_not_deploy, x_out_deploy, atol=1e-5, rtol=1e-4) + + # Test RepVGGBlock with se + se_cfg = dict(ratio=4, divisor=1) + block = RepVGGBlock(18, 18, stride=1, se_cfg=se_cfg) + block.train() + x = torch.randn(1, 18, 5, 5) + x_out_not_deploy = block(x) + assert isinstance(block.se_layer, SELayer) + assert x_out_not_deploy.shape == torch.Size((1, 18, 5, 5)) + + # Test RepVGGBlock with checkpoint forward + block = RepVGGBlock(24, 24, stride=1, with_cp=True) + assert block.with_cp + x = torch.randn(1, 24, 7, 7) + x_out = block(x) + assert x_out.shape == torch.Size((1, 24, 7, 7)) + + # Test RepVGGBlock with deploy == True + block = RepVGGBlock(8, 8, stride=1, deploy=True) + assert isinstance(block.branch_reparam, nn.Conv2d) + assert not hasattr(block, 'branch_3x3') + assert not hasattr(block, 'branch_1x1') + assert not hasattr(block, 'branch_norm') + x = torch.randn(1, 8, 16, 16) + x_out = block(x) + assert x_out.shape == torch.Size((1, 8, 16, 16)) + + +def test_repvgg_backbone(): + with pytest.raises(TypeError): + # arch must be str or dict + RepVGG(arch=[4, 6, 16, 1]) + + with pytest.raises(AssertionError): + # arch must in arch_settings + RepVGG(arch='A3') + + with pytest.raises(KeyError): + # arch must have num_blocks and width_factor + arch = dict(num_blocks=[2, 4, 14, 1]) + RepVGG(arch=arch) + + # len(arch['num_blocks']) == len(arch['width_factor']) + # == len(strides) == len(dilations) + with pytest.raises(AssertionError): + arch = dict(num_blocks=[2, 4, 14, 1], width_factor=[0.75, 0.75, 0.75]) + RepVGG(arch=arch) + + # len(strides) must equal to 4 + with pytest.raises(AssertionError): + RepVGG('A0', strides=(1, 1, 1)) + + # len(dilations) must equal to 4 + with pytest.raises(AssertionError): + RepVGG('A0', strides=(1, 1, 1, 1), dilations=(1, 1, 2)) + + # max(out_indices) < len(arch['num_blocks']) + with pytest.raises(AssertionError): + RepVGG('A0', out_indices=(5, )) + + # max(arch['group_idx'].keys()) <= sum(arch['num_blocks']) + with pytest.raises(AssertionError): + arch = dict( + num_blocks=[2, 4, 14, 1], + width_factor=[0.75, 0.75, 0.75], + group_idx={22: 2}) + RepVGG(arch=arch) + + # Test RepVGG norm state + model = RepVGG('A0') + model.train() + assert check_norm_state(model.modules(), True) + + # Test RepVGG with first stage frozen + frozen_stages = 1 + model = RepVGG('A0', frozen_stages=frozen_stages) + model.train() + for param in model.stem.parameters(): + assert param.requires_grad is False + for i in range(0, frozen_stages): + stage_name = model.stages[i] + stage = model.__getattr__(stage_name) + for mod in stage: + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in stage.parameters(): + assert param.requires_grad is False + + # Test RepVGG with norm_eval + model = RepVGG('A0', norm_eval=True) + model.train() + assert check_norm_state(model.modules(), False) + + # Test RepVGG forward with layer 3 forward + model = RepVGG('A0', out_indices=(3, )) + model.init_weights() + model.eval() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 32, 32) + feat = model(imgs) + assert isinstance(feat, tuple) + assert len(feat) == 1 + assert isinstance(feat[0], torch.Tensor) + assert feat[0].shape == torch.Size((1, 1280, 1, 1)) + + # Test with custom arch + cfg = dict( + num_blocks=[3, 5, 7, 3], + width_factor=[1, 1, 1, 1], + group_layer_map=None, + se_cfg=None, + stem_channels=16) + model = RepVGG(arch=cfg, out_indices=(3, )) + model.eval() + assert model.stem.out_channels == min(16, 64 * 1) + + imgs = torch.randn(1, 3, 32, 32) + feat = model(imgs) + assert isinstance(feat, tuple) + assert len(feat) == 1 + assert isinstance(feat[0], torch.Tensor) + assert feat[0].shape == torch.Size((1, 512, 1, 1)) + + # Test RepVGG forward + model_test_settings = [ + dict(model_name='A0', out_sizes=(48, 96, 192, 1280)), + dict(model_name='A1', out_sizes=(64, 128, 256, 1280)), + dict(model_name='A2', out_sizes=(96, 192, 384, 1408)), + dict(model_name='B0', out_sizes=(64, 128, 256, 1280)), + dict(model_name='B1', out_sizes=(128, 256, 512, 2048)), + dict(model_name='B1g2', out_sizes=(128, 256, 512, 2048)), + dict(model_name='B1g4', out_sizes=(128, 256, 512, 2048)), + dict(model_name='B2', out_sizes=(160, 320, 640, 2560)), + dict(model_name='B2g2', out_sizes=(160, 320, 640, 2560)), + dict(model_name='B2g4', out_sizes=(160, 320, 640, 2560)), + dict(model_name='B3', out_sizes=(192, 384, 768, 2560)), + dict(model_name='B3g2', out_sizes=(192, 384, 768, 2560)), + dict(model_name='B3g4', out_sizes=(192, 384, 768, 2560)), + dict(model_name='D2se', out_sizes=(160, 320, 640, 2560)) + ] + + choose_models = ['A0', 'B1', 'B1g2'] + # Test RepVGG model forward + for model_test_setting in model_test_settings: + if model_test_setting['model_name'] not in choose_models: + continue + model = RepVGG( + model_test_setting['model_name'], out_indices=(0, 1, 2, 3)) + model.init_weights() + model.eval() + + # Test Norm + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 32, 32) + feat = model(imgs) + assert feat[0].shape == torch.Size( + (1, model_test_setting['out_sizes'][0], 8, 8)) + assert feat[1].shape == torch.Size( + (1, model_test_setting['out_sizes'][1], 4, 4)) + assert feat[2].shape == torch.Size( + (1, model_test_setting['out_sizes'][2], 2, 2)) + assert feat[3].shape == torch.Size( + (1, model_test_setting['out_sizes'][3], 1, 1)) + + # Test eval of "train" mode and "deploy" mode + gap = nn.AdaptiveAvgPool2d(output_size=(1)) + fc = nn.Linear(model_test_setting['out_sizes'][3], 10) + model.eval() + feat = model(imgs) + pred = fc(gap(feat[3]).flatten(1)) + model.switch_to_deploy() + for m in model.modules(): + if isinstance(m, RepVGGBlock): + assert m.deploy is True + feat_deploy = model(imgs) + pred_deploy = fc(gap(feat_deploy[3]).flatten(1)) + for i in range(4): + torch.allclose(feat[i], feat_deploy[i]) + torch.allclose(pred, pred_deploy) + + # Test RepVGG forward with add_ppf + model = RepVGG('A0', out_indices=(3, ), add_ppf=True) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 64, 64) + feat = model(imgs) + assert isinstance(feat, tuple) + assert len(feat) == 1 + assert isinstance(feat[0], torch.Tensor) + assert feat[0].shape == torch.Size((1, 1280, 2, 2)) + + # Test RepVGG forward with 'stem_channels' not in arch + arch = dict( + num_blocks=[2, 4, 14, 1], + width_factor=[0.75, 0.75, 0.75, 2.5], + group_layer_map=None, + se_cfg=None) + model = RepVGG(arch, add_ppf=True) + model.stem.in_channels = min(64, 64 * 0.75) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 64, 64) + feat = model(imgs) + assert isinstance(feat, tuple) + assert len(feat) == 1 + assert isinstance(feat[0], torch.Tensor) + assert feat[0].shape == torch.Size((1, 1280, 2, 2)) + + +def test_repvgg_load(): + # Test output before and load from deploy checkpoint + model = RepVGG('A1', out_indices=(0, 1, 2, 3)) + inputs = torch.randn((1, 3, 32, 32)) + ckpt_path = os.path.join(tempfile.gettempdir(), 'ckpt.pth') + model.switch_to_deploy() + model.eval() + outputs = model(inputs) + + model_deploy = RepVGG('A1', out_indices=(0, 1, 2, 3), deploy=True) + model_deploy.eval() + save_checkpoint(model.state_dict(), ckpt_path) + load_checkpoint(model_deploy, ckpt_path, strict=True) + + outputs_load = model_deploy(inputs) + for feat, feat_load in zip(outputs, outputs_load): + assert torch.allclose(feat, feat_load) diff --git a/tests/test_models/test_backbones/test_res2net.py b/tests/test_models/test_backbones/test_res2net.py new file mode 100644 index 0000000..365f5f1 --- /dev/null +++ b/tests/test_models/test_backbones/test_res2net.py @@ -0,0 +1,71 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm + +from mmpretrain.models.backbones import Res2Net + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_resnet_cifar(): + # Only support depth 50, 101 and 152 + with pytest.raises(KeyError): + Res2Net(depth=18) + + # test the feature map size when depth is 50 + # and deep_stem=True, avg_down=True + model = Res2Net( + depth=50, out_indices=(0, 1, 2, 3), deep_stem=True, avg_down=True) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model.stem(imgs) + assert feat.shape == (1, 64, 112, 112) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, 256, 56, 56) + assert feat[1].shape == (1, 512, 28, 28) + assert feat[2].shape == (1, 1024, 14, 14) + assert feat[3].shape == (1, 2048, 7, 7) + + # test the feature map size when depth is 101 + # and deep_stem=False, avg_down=False + model = Res2Net( + depth=101, out_indices=(0, 1, 2, 3), deep_stem=False, avg_down=False) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model.conv1(imgs) + assert feat.shape == (1, 64, 112, 112) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, 256, 56, 56) + assert feat[1].shape == (1, 512, 28, 28) + assert feat[2].shape == (1, 1024, 14, 14) + assert feat[3].shape == (1, 2048, 7, 7) + + # Test Res2Net with first stage frozen + frozen_stages = 1 + model = Res2Net(depth=50, frozen_stages=frozen_stages, deep_stem=False) + model.init_weights() + model.train() + assert check_norm_state([model.norm1], False) + for param in model.conv1.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False diff --git a/tests/test_models/test_backbones/test_resnest.py b/tests/test_models/test_backbones/test_resnest.py new file mode 100644 index 0000000..7c265cb --- /dev/null +++ b/tests/test_models/test_backbones/test_resnest.py @@ -0,0 +1,44 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmpretrain.models.backbones import ResNeSt +from mmpretrain.models.backbones.resnest import Bottleneck as BottleneckS + + +def test_bottleneck(): + with pytest.raises(AssertionError): + # Style must be in ['pytorch', 'caffe'] + BottleneckS(64, 64, radix=2, reduction_factor=4, style='tensorflow') + + # Test ResNeSt Bottleneck structure + block = BottleneckS( + 64, 256, radix=2, reduction_factor=4, stride=2, style='pytorch') + assert block.avd_layer.stride == 2 + assert block.conv2.channels == 64 + + # Test ResNeSt Bottleneck forward + block = BottleneckS(64, 64, radix=2, reduction_factor=4) + x = torch.randn(2, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([2, 64, 56, 56]) + + +def test_resnest(): + with pytest.raises(KeyError): + # ResNeSt depth should be in [50, 101, 152, 200] + ResNeSt(depth=18) + + # Test ResNeSt with radix 2, reduction_factor 4 + model = ResNeSt( + depth=50, radix=2, reduction_factor=4, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(2, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([2, 256, 56, 56]) + assert feat[1].shape == torch.Size([2, 512, 28, 28]) + assert feat[2].shape == torch.Size([2, 1024, 14, 14]) + assert feat[3].shape == torch.Size([2, 2048, 7, 7]) diff --git a/tests/test_models/test_backbones/test_resnet.py b/tests/test_models/test_backbones/test_resnet.py new file mode 100644 index 0000000..bf2900d --- /dev/null +++ b/tests/test_models/test_backbones/test_resnet.py @@ -0,0 +1,618 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm + +from mmpretrain.models.backbones import ResNet, ResNetV1c, ResNetV1d +from mmpretrain.models.backbones.resnet import (BasicBlock, Bottleneck, + ResLayer, get_expansion) + + +def is_block(modules): + """Check if is ResNet building block.""" + if isinstance(modules, (BasicBlock, Bottleneck)): + return True + return False + + +def all_zeros(modules): + """Check if the weight(and bias) is all zero.""" + weight_zero = torch.equal(modules.weight.data, + torch.zeros_like(modules.weight.data)) + if hasattr(modules, 'bias'): + bias_zero = torch.equal(modules.bias.data, + torch.zeros_like(modules.bias.data)) + else: + bias_zero = True + + return weight_zero and bias_zero + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_get_expansion(): + assert get_expansion(Bottleneck, 2) == 2 + assert get_expansion(BasicBlock) == 1 + assert get_expansion(Bottleneck) == 4 + + class MyResBlock(nn.Module): + + expansion = 8 + + assert get_expansion(MyResBlock) == 8 + + # expansion must be an integer or None + with pytest.raises(TypeError): + get_expansion(Bottleneck, '0') + + # expansion is not specified and cannot be inferred + with pytest.raises(TypeError): + + class SomeModule(nn.Module): + pass + + get_expansion(SomeModule) + + +def test_basic_block(): + # expansion must be 1 + with pytest.raises(AssertionError): + BasicBlock(64, 64, expansion=2) + + # BasicBlock with stride 1, out_channels == in_channels + block = BasicBlock(64, 64) + assert block.in_channels == 64 + assert block.mid_channels == 64 + assert block.out_channels == 64 + assert block.conv1.in_channels == 64 + assert block.conv1.out_channels == 64 + assert block.conv1.kernel_size == (3, 3) + assert block.conv1.stride == (1, 1) + assert block.conv2.in_channels == 64 + assert block.conv2.out_channels == 64 + assert block.conv2.kernel_size == (3, 3) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + # BasicBlock with stride 1 and downsample + downsample = nn.Sequential( + nn.Conv2d(64, 128, kernel_size=1, bias=False), nn.BatchNorm2d(128)) + block = BasicBlock(64, 128, downsample=downsample) + assert block.in_channels == 64 + assert block.mid_channels == 128 + assert block.out_channels == 128 + assert block.conv1.in_channels == 64 + assert block.conv1.out_channels == 128 + assert block.conv1.kernel_size == (3, 3) + assert block.conv1.stride == (1, 1) + assert block.conv2.in_channels == 128 + assert block.conv2.out_channels == 128 + assert block.conv2.kernel_size == (3, 3) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 128, 56, 56]) + + # BasicBlock with stride 2 and downsample + downsample = nn.Sequential( + nn.Conv2d(64, 128, kernel_size=1, stride=2, bias=False), + nn.BatchNorm2d(128)) + block = BasicBlock(64, 128, stride=2, downsample=downsample) + assert block.in_channels == 64 + assert block.mid_channels == 128 + assert block.out_channels == 128 + assert block.conv1.in_channels == 64 + assert block.conv1.out_channels == 128 + assert block.conv1.kernel_size == (3, 3) + assert block.conv1.stride == (2, 2) + assert block.conv2.in_channels == 128 + assert block.conv2.out_channels == 128 + assert block.conv2.kernel_size == (3, 3) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 128, 28, 28]) + + # forward with checkpointing + block = BasicBlock(64, 64, with_cp=True) + assert block.with_cp + x = torch.randn(1, 64, 56, 56, requires_grad=True) + x_out = block(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + +def test_bottleneck(): + # style must be in ['pytorch', 'caffe'] + with pytest.raises(AssertionError): + Bottleneck(64, 64, style='tensorflow') + + # expansion must be divisible by out_channels + with pytest.raises(AssertionError): + Bottleneck(64, 64, expansion=3) + + # Test Bottleneck style + block = Bottleneck(64, 64, stride=2, style='pytorch') + assert block.conv1.stride == (1, 1) + assert block.conv2.stride == (2, 2) + block = Bottleneck(64, 64, stride=2, style='caffe') + assert block.conv1.stride == (2, 2) + assert block.conv2.stride == (1, 1) + + # Bottleneck with stride 1 + block = Bottleneck(64, 64, style='pytorch') + assert block.in_channels == 64 + assert block.mid_channels == 16 + assert block.out_channels == 64 + assert block.conv1.in_channels == 64 + assert block.conv1.out_channels == 16 + assert block.conv1.kernel_size == (1, 1) + assert block.conv2.in_channels == 16 + assert block.conv2.out_channels == 16 + assert block.conv2.kernel_size == (3, 3) + assert block.conv3.in_channels == 16 + assert block.conv3.out_channels == 64 + assert block.conv3.kernel_size == (1, 1) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == (1, 64, 56, 56) + + # Bottleneck with stride 1 and downsample + downsample = nn.Sequential( + nn.Conv2d(64, 128, kernel_size=1), nn.BatchNorm2d(128)) + block = Bottleneck(64, 128, style='pytorch', downsample=downsample) + assert block.in_channels == 64 + assert block.mid_channels == 32 + assert block.out_channels == 128 + assert block.conv1.in_channels == 64 + assert block.conv1.out_channels == 32 + assert block.conv1.kernel_size == (1, 1) + assert block.conv2.in_channels == 32 + assert block.conv2.out_channels == 32 + assert block.conv2.kernel_size == (3, 3) + assert block.conv3.in_channels == 32 + assert block.conv3.out_channels == 128 + assert block.conv3.kernel_size == (1, 1) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == (1, 128, 56, 56) + + # Bottleneck with stride 2 and downsample + downsample = nn.Sequential( + nn.Conv2d(64, 128, kernel_size=1, stride=2), nn.BatchNorm2d(128)) + block = Bottleneck( + 64, 128, stride=2, style='pytorch', downsample=downsample) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == (1, 128, 28, 28) + + # Bottleneck with expansion 2 + block = Bottleneck(64, 64, style='pytorch', expansion=2) + assert block.in_channels == 64 + assert block.mid_channels == 32 + assert block.out_channels == 64 + assert block.conv1.in_channels == 64 + assert block.conv1.out_channels == 32 + assert block.conv1.kernel_size == (1, 1) + assert block.conv2.in_channels == 32 + assert block.conv2.out_channels == 32 + assert block.conv2.kernel_size == (3, 3) + assert block.conv3.in_channels == 32 + assert block.conv3.out_channels == 64 + assert block.conv3.kernel_size == (1, 1) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == (1, 64, 56, 56) + + # Test Bottleneck with checkpointing + block = Bottleneck(64, 64, with_cp=True) + block.train() + assert block.with_cp + x = torch.randn(1, 64, 56, 56, requires_grad=True) + x_out = block(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + +def test_basicblock_reslayer(): + # 3 BasicBlock w/o downsample + layer = ResLayer(BasicBlock, 3, 32, 32) + assert len(layer) == 3 + for i in range(3): + assert layer[i].in_channels == 32 + assert layer[i].out_channels == 32 + assert layer[i].downsample is None + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + assert x_out.shape == (1, 32, 56, 56) + + # 3 BasicBlock w/ stride 1 and downsample + layer = ResLayer(BasicBlock, 3, 32, 64) + assert len(layer) == 3 + assert layer[0].in_channels == 32 + assert layer[0].out_channels == 64 + assert layer[0].downsample is not None and len(layer[0].downsample) == 2 + assert isinstance(layer[0].downsample[0], nn.Conv2d) + assert layer[0].downsample[0].stride == (1, 1) + for i in range(1, 3): + assert layer[i].in_channels == 64 + assert layer[i].out_channels == 64 + assert layer[i].downsample is None + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + assert x_out.shape == (1, 64, 56, 56) + + # 3 BasicBlock w/ stride 2 and downsample + layer = ResLayer(BasicBlock, 3, 32, 64, stride=2) + assert len(layer) == 3 + assert layer[0].in_channels == 32 + assert layer[0].out_channels == 64 + assert layer[0].stride == 2 + assert layer[0].downsample is not None and len(layer[0].downsample) == 2 + assert isinstance(layer[0].downsample[0], nn.Conv2d) + assert layer[0].downsample[0].stride == (2, 2) + for i in range(1, 3): + assert layer[i].in_channels == 64 + assert layer[i].out_channels == 64 + assert layer[i].stride == 1 + assert layer[i].downsample is None + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + assert x_out.shape == (1, 64, 28, 28) + + # 3 BasicBlock w/ stride 2 and downsample with avg pool + layer = ResLayer(BasicBlock, 3, 32, 64, stride=2, avg_down=True) + assert len(layer) == 3 + assert layer[0].in_channels == 32 + assert layer[0].out_channels == 64 + assert layer[0].stride == 2 + assert layer[0].downsample is not None and len(layer[0].downsample) == 3 + assert isinstance(layer[0].downsample[0], nn.AvgPool2d) + assert layer[0].downsample[0].stride == 2 + for i in range(1, 3): + assert layer[i].in_channels == 64 + assert layer[i].out_channels == 64 + assert layer[i].stride == 1 + assert layer[i].downsample is None + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + assert x_out.shape == (1, 64, 28, 28) + + +def test_bottleneck_reslayer(): + # 3 Bottleneck w/o downsample + layer = ResLayer(Bottleneck, 3, 32, 32) + assert len(layer) == 3 + for i in range(3): + assert layer[i].in_channels == 32 + assert layer[i].out_channels == 32 + assert layer[i].downsample is None + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + assert x_out.shape == (1, 32, 56, 56) + + # 3 Bottleneck w/ stride 1 and downsample + layer = ResLayer(Bottleneck, 3, 32, 64) + assert len(layer) == 3 + assert layer[0].in_channels == 32 + assert layer[0].out_channels == 64 + assert layer[0].stride == 1 + assert layer[0].conv1.out_channels == 16 + assert layer[0].downsample is not None and len(layer[0].downsample) == 2 + assert isinstance(layer[0].downsample[0], nn.Conv2d) + assert layer[0].downsample[0].stride == (1, 1) + for i in range(1, 3): + assert layer[i].in_channels == 64 + assert layer[i].out_channels == 64 + assert layer[i].conv1.out_channels == 16 + assert layer[i].stride == 1 + assert layer[i].downsample is None + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + assert x_out.shape == (1, 64, 56, 56) + + # 3 Bottleneck w/ stride 2 and downsample + layer = ResLayer(Bottleneck, 3, 32, 64, stride=2) + assert len(layer) == 3 + assert layer[0].in_channels == 32 + assert layer[0].out_channels == 64 + assert layer[0].stride == 2 + assert layer[0].conv1.out_channels == 16 + assert layer[0].downsample is not None and len(layer[0].downsample) == 2 + assert isinstance(layer[0].downsample[0], nn.Conv2d) + assert layer[0].downsample[0].stride == (2, 2) + for i in range(1, 3): + assert layer[i].in_channels == 64 + assert layer[i].out_channels == 64 + assert layer[i].conv1.out_channels == 16 + assert layer[i].stride == 1 + assert layer[i].downsample is None + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + assert x_out.shape == (1, 64, 28, 28) + + # 3 Bottleneck w/ stride 2 and downsample with avg pool + layer = ResLayer(Bottleneck, 3, 32, 64, stride=2, avg_down=True) + assert len(layer) == 3 + assert layer[0].in_channels == 32 + assert layer[0].out_channels == 64 + assert layer[0].stride == 2 + assert layer[0].conv1.out_channels == 16 + assert layer[0].downsample is not None and len(layer[0].downsample) == 3 + assert isinstance(layer[0].downsample[0], nn.AvgPool2d) + assert layer[0].downsample[0].stride == 2 + for i in range(1, 3): + assert layer[i].in_channels == 64 + assert layer[i].out_channels == 64 + assert layer[i].conv1.out_channels == 16 + assert layer[i].stride == 1 + assert layer[i].downsample is None + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + assert x_out.shape == (1, 64, 28, 28) + + # 3 Bottleneck with custom expansion + layer = ResLayer(Bottleneck, 3, 32, 32, expansion=2) + assert len(layer) == 3 + for i in range(3): + assert layer[i].in_channels == 32 + assert layer[i].out_channels == 32 + assert layer[i].stride == 1 + assert layer[i].conv1.out_channels == 16 + assert layer[i].downsample is None + x = torch.randn(1, 32, 56, 56) + x_out = layer(x) + assert x_out.shape == (1, 32, 56, 56) + + +def test_resnet(): + """Test resnet backbone.""" + with pytest.raises(KeyError): + # ResNet depth should be in [18, 34, 50, 101, 152] + ResNet(20) + + with pytest.raises(AssertionError): + # In ResNet: 1 <= num_stages <= 4 + ResNet(50, num_stages=0) + + with pytest.raises(AssertionError): + # In ResNet: 1 <= num_stages <= 4 + ResNet(50, num_stages=5) + + with pytest.raises(AssertionError): + # len(strides) == len(dilations) == num_stages + ResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3) + + with pytest.raises(TypeError): + # pretrained must be a string path + model = ResNet(50) + model.init_weights(pretrained=0) + + with pytest.raises(AssertionError): + # Style must be in ['pytorch', 'caffe'] + ResNet(50, style='tensorflow') + + # Test ResNet50 norm_eval=True + model = ResNet(50, norm_eval=True) + model.init_weights() + model.train() + assert check_norm_state(model.modules(), False) + + # Test ResNet50 with torchvision pretrained weight + model = ResNet( + depth=50, + norm_eval=True, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')) + model.init_weights() + model.train() + assert check_norm_state(model.modules(), False) + + # Test ResNet50 with first stage frozen + frozen_stages = 1 + model = ResNet(50, frozen_stages=frozen_stages) + model.init_weights() + model.train() + assert model.norm1.training is False + for layer in [model.conv1, model.norm1]: + for param in layer.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + # Test ResNet18 forward + model = ResNet(18, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, 64, 56, 56) + assert feat[1].shape == (1, 128, 28, 28) + assert feat[2].shape == (1, 256, 14, 14) + assert feat[3].shape == (1, 512, 7, 7) + + # Test ResNet50 with BatchNorm forward + model = ResNet(50, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, 256, 56, 56) + assert feat[1].shape == (1, 512, 28, 28) + assert feat[2].shape == (1, 1024, 14, 14) + assert feat[3].shape == (1, 2048, 7, 7) + + # Test ResNet50 with DropPath forward + model = ResNet(50, out_indices=(0, 1, 2, 3), drop_path_rate=0.5) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, 256, 56, 56) + assert feat[1].shape == (1, 512, 28, 28) + assert feat[2].shape == (1, 1024, 14, 14) + assert feat[3].shape == (1, 2048, 7, 7) + + # Test ResNet50 with layers 1, 2, 3 out forward + model = ResNet(50, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == (1, 256, 56, 56) + assert feat[1].shape == (1, 512, 28, 28) + assert feat[2].shape == (1, 1024, 14, 14) + + # Test ResNet50 with layers 3 (top feature maps) out forward + model = ResNet(50, out_indices=(3, )) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == (1, 2048, 7, 7) + + # Test ResNet50 with checkpoint forward + model = ResNet(50, out_indices=(0, 1, 2, 3), with_cp=True) + for m in model.modules(): + if is_block(m): + assert m.with_cp + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, 256, 56, 56) + assert feat[1].shape == (1, 512, 28, 28) + assert feat[2].shape == (1, 1024, 14, 14) + assert feat[3].shape == (1, 2048, 7, 7) + + # zero initialization of residual blocks + model = ResNet(50, out_indices=(0, 1, 2, 3), zero_init_residual=True) + model.init_weights() + for m in model.modules(): + if isinstance(m, Bottleneck): + assert all_zeros(m.norm3) + elif isinstance(m, BasicBlock): + assert all_zeros(m.norm2) + + # non-zero initialization of residual blocks + model = ResNet(50, out_indices=(0, 1, 2, 3), zero_init_residual=False) + model.init_weights() + for m in model.modules(): + if isinstance(m, Bottleneck): + assert not all_zeros(m.norm3) + elif isinstance(m, BasicBlock): + assert not all_zeros(m.norm2) + + +def test_resnet_v1c(): + model = ResNetV1c(depth=50, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + assert len(model.stem) == 3 + for i in range(3): + assert isinstance(model.stem[i], ConvModule) + + imgs = torch.randn(1, 3, 224, 224) + feat = model.stem(imgs) + assert feat.shape == (1, 64, 112, 112) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, 256, 56, 56) + assert feat[1].shape == (1, 512, 28, 28) + assert feat[2].shape == (1, 1024, 14, 14) + assert feat[3].shape == (1, 2048, 7, 7) + + # Test ResNet50V1d with first stage frozen + frozen_stages = 1 + model = ResNetV1d(depth=50, frozen_stages=frozen_stages) + assert len(model.stem) == 3 + for i in range(3): + assert isinstance(model.stem[i], ConvModule) + model.init_weights() + model.train() + check_norm_state(model.stem, False) + for param in model.stem.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + +def test_resnet_v1d(): + model = ResNetV1d(depth=50, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + assert len(model.stem) == 3 + for i in range(3): + assert isinstance(model.stem[i], ConvModule) + + imgs = torch.randn(1, 3, 224, 224) + feat = model.stem(imgs) + assert feat.shape == (1, 64, 112, 112) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, 256, 56, 56) + assert feat[1].shape == (1, 512, 28, 28) + assert feat[2].shape == (1, 1024, 14, 14) + assert feat[3].shape == (1, 2048, 7, 7) + + # Test ResNet50V1d with first stage frozen + frozen_stages = 1 + model = ResNetV1d(depth=50, frozen_stages=frozen_stages) + assert len(model.stem) == 3 + for i in range(3): + assert isinstance(model.stem[i], ConvModule) + model.init_weights() + model.train() + check_norm_state(model.stem, False) + for param in model.stem.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + +def test_resnet_half_channel(): + model = ResNet(50, base_channels=32, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, 128, 56, 56) + assert feat[1].shape == (1, 256, 28, 28) + assert feat[2].shape == (1, 512, 14, 14) + assert feat[3].shape == (1, 1024, 7, 7) diff --git a/tests/test_models/test_backbones/test_resnet_cifar.py b/tests/test_models/test_backbones/test_resnet_cifar.py new file mode 100644 index 0000000..4586566 --- /dev/null +++ b/tests/test_models/test_backbones/test_resnet_cifar.py @@ -0,0 +1,67 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm + +from mmpretrain.models.backbones import ResNet_CIFAR + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_resnet_cifar(): + # deep_stem must be False + with pytest.raises(AssertionError): + ResNet_CIFAR(depth=18, deep_stem=True) + + # test the feature map size when depth is 18 + model = ResNet_CIFAR(depth=18, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 32, 32) + feat = model.conv1(imgs) + assert feat.shape == (1, 64, 32, 32) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, 64, 32, 32) + assert feat[1].shape == (1, 128, 16, 16) + assert feat[2].shape == (1, 256, 8, 8) + assert feat[3].shape == (1, 512, 4, 4) + + # test the feature map size when depth is 50 + model = ResNet_CIFAR(depth=50, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 32, 32) + feat = model.conv1(imgs) + assert feat.shape == (1, 64, 32, 32) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == (1, 256, 32, 32) + assert feat[1].shape == (1, 512, 16, 16) + assert feat[2].shape == (1, 1024, 8, 8) + assert feat[3].shape == (1, 2048, 4, 4) + + # Test ResNet_CIFAR with first stage frozen + frozen_stages = 1 + model = ResNet_CIFAR(depth=50, frozen_stages=frozen_stages) + model.init_weights() + model.train() + check_norm_state([model.norm1], False) + for param in model.conv1.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False diff --git a/tests/test_models/test_backbones/test_resnext.py b/tests/test_models/test_backbones/test_resnext.py new file mode 100644 index 0000000..5c33f9a --- /dev/null +++ b/tests/test_models/test_backbones/test_resnext.py @@ -0,0 +1,61 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmpretrain.models.backbones import ResNeXt +from mmpretrain.models.backbones.resnext import Bottleneck as BottleneckX + + +def test_bottleneck(): + with pytest.raises(AssertionError): + # Style must be in ['pytorch', 'caffe'] + BottleneckX(64, 64, groups=32, width_per_group=4, style='tensorflow') + + # Test ResNeXt Bottleneck structure + block = BottleneckX( + 64, 256, groups=32, width_per_group=4, stride=2, style='pytorch') + assert block.conv2.stride == (2, 2) + assert block.conv2.groups == 32 + assert block.conv2.out_channels == 128 + + # Test ResNeXt Bottleneck forward + block = BottleneckX(64, 64, base_channels=16, groups=32, width_per_group=4) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + +def test_resnext(): + with pytest.raises(KeyError): + # ResNeXt depth should be in [50, 101, 152] + ResNeXt(depth=18) + + # Test ResNeXt with group 32, width_per_group 4 + model = ResNeXt( + depth=50, groups=32, width_per_group=4, out_indices=(0, 1, 2, 3)) + for m in model.modules(): + if isinstance(m, BottleneckX): + assert m.conv2.groups == 32 + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 256, 56, 56]) + assert feat[1].shape == torch.Size([1, 512, 28, 28]) + assert feat[2].shape == torch.Size([1, 1024, 14, 14]) + assert feat[3].shape == torch.Size([1, 2048, 7, 7]) + + # Test ResNeXt with group 32, width_per_group 4 and layers 3 out forward + model = ResNeXt(depth=50, groups=32, width_per_group=4, out_indices=(3, )) + for m in model.modules(): + if isinstance(m, BottleneckX): + assert m.conv2.groups == 32 + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 2048, 7, 7]) diff --git a/tests/test_models/test_backbones/test_revvit.py b/tests/test_models/test_backbones/test_revvit.py new file mode 100644 index 0000000..f18ca78 --- /dev/null +++ b/tests/test_models/test_backbones/test_revvit.py @@ -0,0 +1,131 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import tempfile +from copy import deepcopy +from unittest import TestCase + +import torch +from mmengine.runner import load_checkpoint, save_checkpoint + +from mmpretrain.models.backbones import RevVisionTransformer +from .utils import timm_resize_pos_embed + + +class TestRevVisionTransformer(TestCase): + + def setUp(self): + self.cfg = dict( + arch='b', img_size=224, patch_size=16, drop_path_rate=0.1) + + def test_structure(self): + # Test invalid default arch + with self.assertRaisesRegex(AssertionError, 'not in default archs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = 'unknown' + RevVisionTransformer(**cfg) + + # Test invalid custom arch + with self.assertRaisesRegex(AssertionError, 'Custom arch needs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'num_layers': 24, + 'num_heads': 16, + 'feedforward_channels': 4096 + } + RevVisionTransformer(**cfg) + + # Test custom arch + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'embed_dims': 128, + 'num_layers': 24, + 'num_heads': 16, + 'feedforward_channels': 1024 + } + model = RevVisionTransformer(**cfg) + self.assertEqual(model.embed_dims, 128) + self.assertEqual(model.num_layers, 24) + for layer in model.layers: + self.assertEqual(layer.attn.num_heads, 16) + self.assertEqual(layer.ffn.feedforward_channels, 1024) + + # Test model structure + cfg = deepcopy(self.cfg) + model = RevVisionTransformer(**cfg) + self.assertEqual(len(model.layers), 12) + dpr_inc = 0.1 / (12 - 1) + dpr = 0 + for layer in model.layers: + self.assertEqual(layer.attn.embed_dims, 768) + self.assertEqual(layer.attn.num_heads, 12) + self.assertEqual(layer.ffn.feedforward_channels, 3072) + # self.assertAlmostEqual(layer.attn.out_drop.drop_prob, dpr) + # self.assertAlmostEqual(layer.ffn.dropout_layer.drop_prob, dpr) + dpr += dpr_inc + + def test_init_weights(self): + # test weight init cfg + cfg = deepcopy(self.cfg) + cfg['init_cfg'] = [ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ] + model = RevVisionTransformer(**cfg) + ori_weight = model.patch_embed.projection.weight.clone().detach() + # The pos_embed is all zero before initialize + self.assertTrue(torch.allclose(model.pos_embed, torch.tensor(0.))) + + model.init_weights() + initialized_weight = model.patch_embed.projection.weight + self.assertFalse(torch.allclose(ori_weight, initialized_weight)) + self.assertFalse(torch.allclose(model.pos_embed, torch.tensor(0.))) + + # test load checkpoint + pretrain_pos_embed = model.pos_embed.clone().detach() + tmpdir = tempfile.gettempdir() + checkpoint = os.path.join(tmpdir, 'test.pth') + save_checkpoint(model.state_dict(), checkpoint) + cfg = deepcopy(self.cfg) + model = RevVisionTransformer(**cfg) + load_checkpoint(model, checkpoint, strict=True) + self.assertTrue(torch.allclose(model.pos_embed, pretrain_pos_embed)) + + # test load checkpoint with different img_size + cfg = deepcopy(self.cfg) + cfg['img_size'] = 384 + model = RevVisionTransformer(**cfg) + load_checkpoint(model, checkpoint, strict=True) + resized_pos_embed = timm_resize_pos_embed( + pretrain_pos_embed, model.pos_embed, num_tokens=0) + self.assertTrue(torch.allclose(model.pos_embed, resized_pos_embed)) + + os.remove(checkpoint) + + def test_forward(self): + imgs = torch.randn(1, 3, 224, 224) + + cfg = deepcopy(self.cfg) + cfg['with_cls_token'] = False + cfg['out_type'] = 'avg_featmap' + model = RevVisionTransformer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + patch_token = outs[-1] + self.assertEqual(patch_token.shape, (1, 768 * 2)) + + # Test forward with dynamic input size + imgs1 = torch.randn(1, 3, 224, 224) + imgs2 = torch.randn(1, 3, 256, 256) + imgs3 = torch.randn(1, 3, 256, 309) + cfg = deepcopy(self.cfg) + model = RevVisionTransformer(**cfg) + for imgs in [imgs1, imgs2, imgs3]: + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + avg_featmap = outs[-1] + self.assertEqual(avg_featmap.shape, (1, 768 * 2)) diff --git a/tests/test_models/test_backbones/test_riformer.py b/tests/test_models/test_backbones/test_riformer.py new file mode 100644 index 0000000..86847ee --- /dev/null +++ b/tests/test_models/test_backbones/test_riformer.py @@ -0,0 +1,168 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from unittest import TestCase + +import torch +import torch.nn as nn + +from mmpretrain.models.backbones import RIFormer +from mmpretrain.models.backbones.riformer import RIFormerBlock + + +class TestRIFormer(TestCase): + + def setUp(self): + arch = 's12' + self.cfg = dict(arch=arch, drop_path_rate=0.1) + self.arch = RIFormer.arch_settings[arch] + + def test_arch(self): + # Test invalid default arch + with self.assertRaisesRegex(AssertionError, 'Unavailable arch'): + cfg = deepcopy(self.cfg) + cfg['arch'] = 'unknown' + RIFormer(**cfg) + + # Test invalid custom arch + with self.assertRaisesRegex(AssertionError, 'must have "layers"'): + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'embed_dims': 96, + 'num_heads': [3, 6, 12, 16], + } + RIFormer(**cfg) + + # Test custom arch + cfg = deepcopy(self.cfg) + layers = [2, 2, 4, 2] + embed_dims = [6, 12, 6, 12] + mlp_ratios = [2, 3, 4, 4] + layer_scale_init_value = 1e-4 + cfg['arch'] = dict( + layers=layers, + embed_dims=embed_dims, + mlp_ratios=mlp_ratios, + layer_scale_init_value=layer_scale_init_value, + ) + model = RIFormer(**cfg) + for i, stage in enumerate(model.network): + if not isinstance(stage, RIFormerBlock): + continue + self.assertEqual(len(stage), layers[i]) + self.assertEqual(stage[0].mlp.fc1.in_channels, embed_dims[i]) + self.assertEqual(stage[0].mlp.fc1.out_channels, + embed_dims[i] * mlp_ratios[i]) + self.assertTrue( + torch.allclose(stage[0].layer_scale_1, + torch.tensor(layer_scale_init_value))) + self.assertTrue( + torch.allclose(stage[0].layer_scale_2, + torch.tensor(layer_scale_init_value))) + + def test_init_weights(self): + # test weight init cfg + cfg = deepcopy(self.cfg) + cfg['init_cfg'] = [ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ] + model = RIFormer(**cfg) + ori_weight = model.patch_embed.proj.weight.clone().detach() + + model.init_weights() + initialized_weight = model.patch_embed.proj.weight + self.assertFalse(torch.allclose(ori_weight, initialized_weight)) + + def test_forward(self): + imgs = torch.randn(1, 3, 224, 224) + + cfg = deepcopy(self.cfg) + model = RIFormer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + self.assertEqual(feat.shape, (1, 512, 7, 7)) + + # test multiple output indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = (0, 2, 4, 6) + model = RIFormer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 4) + for dim, stride, out in zip(self.arch['embed_dims'], [1, 2, 4, 8], + outs): + self.assertEqual(out.shape, (1, dim, 56 // stride, 56 // stride)) + + def test_repameterization(self): + # Test eval of "train" mode and "deploy" mode + imgs = torch.randn(1, 3, 224, 224) + gap = nn.AdaptiveAvgPool2d(output_size=(1)) + fc = nn.Linear(self.arch['embed_dims'][3], 10) + + cfg = deepcopy(self.cfg) + cfg['out_indices'] = (0, 2, 4, 6) + model = RIFormer(**cfg) + model.eval() + feats = model(imgs) + self.assertIsInstance(feats, tuple) + feat = feats[-1] + pred = fc(gap(feat).flatten(1)) + model.switch_to_deploy() + for m in model.modules(): + if isinstance(m, RIFormerBlock): + assert m.deploy is True + feats_deploy = model(imgs) + pred_deploy = fc(gap(feats_deploy[-1]).flatten(1)) + for i in range(4): + torch.allclose(feats[i], feats_deploy[i]) + torch.allclose(pred, pred_deploy) + + def test_structure(self): + # test drop_path_rate decay + cfg = deepcopy(self.cfg) + cfg['drop_path_rate'] = 0.2 + model = RIFormer(**cfg) + layers = self.arch['layers'] + for i, block in enumerate(model.network): + expect_prob = 0.2 / (sum(layers) - 1) * i + if hasattr(block, 'drop_path'): + if expect_prob == 0: + self.assertIsInstance(block.drop_path, torch.nn.Identity) + else: + self.assertAlmostEqual(block.drop_path.drop_prob, + expect_prob) + + # test with first stage frozen. + cfg = deepcopy(self.cfg) + frozen_stages = 1 + cfg['frozen_stages'] = frozen_stages + cfg['out_indices'] = (0, 2, 4, 6) + model = RIFormer(**cfg) + model.init_weights() + model.train() + + # the patch_embed and first stage should not require grad. + self.assertFalse(model.patch_embed.training) + for param in model.patch_embed.parameters(): + self.assertFalse(param.requires_grad) + for i in range(frozen_stages): + module = model.network[i] + for param in module.parameters(): + self.assertFalse(param.requires_grad) + for param in model.norm0.parameters(): + self.assertFalse(param.requires_grad) + + # the second stage should require grad. + for i in range(frozen_stages + 1, 7): + module = model.network[i] + for param in module.parameters(): + self.assertTrue(param.requires_grad) + if hasattr(model, f'norm{i}'): + norm = getattr(model, f'norm{i}') + for param in norm.parameters(): + self.assertTrue(param.requires_grad) diff --git a/tests/test_models/test_backbones/test_seresnet.py b/tests/test_models/test_backbones/test_seresnet.py new file mode 100644 index 0000000..d7f9dff --- /dev/null +++ b/tests/test_models/test_backbones/test_seresnet.py @@ -0,0 +1,247 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from torch.nn.modules import AvgPool2d +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpretrain.models.backbones import SEResNet +from mmpretrain.models.backbones.resnet import ResLayer +from mmpretrain.models.backbones.seresnet import SEBottleneck, SELayer + + +def all_zeros(modules): + """Check if the weight(and bias) is all zero.""" + weight_zero = torch.equal(modules.weight.data, + torch.zeros_like(modules.weight.data)) + if hasattr(modules, 'bias'): + bias_zero = torch.equal(modules.bias.data, + torch.zeros_like(modules.bias.data)) + else: + bias_zero = True + + return weight_zero and bias_zero + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_selayer(): + # Test selayer forward + layer = SELayer(64) + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + # Test selayer forward with different ratio + layer = SELayer(64, ratio=8) + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + +def test_bottleneck(): + + with pytest.raises(AssertionError): + # Style must be in ['pytorch', 'caffe'] + SEBottleneck(64, 64, style='tensorflow') + + # Test SEBottleneck with checkpoint forward + block = SEBottleneck(64, 64, with_cp=True) + assert block.with_cp + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + # Test Bottleneck style + block = SEBottleneck(64, 256, stride=2, style='pytorch') + assert block.conv1.stride == (1, 1) + assert block.conv2.stride == (2, 2) + block = SEBottleneck(64, 256, stride=2, style='caffe') + assert block.conv1.stride == (2, 2) + assert block.conv2.stride == (1, 1) + + # Test Bottleneck forward + block = SEBottleneck(64, 64) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + +def test_res_layer(): + # Test ResLayer of 3 Bottleneck w\o downsample + layer = ResLayer(SEBottleneck, 3, 64, 64, se_ratio=16) + assert len(layer) == 3 + assert layer[0].conv1.in_channels == 64 + assert layer[0].conv1.out_channels == 16 + for i in range(1, len(layer)): + assert layer[i].conv1.in_channels == 64 + assert layer[i].conv1.out_channels == 16 + for i in range(len(layer)): + assert layer[i].downsample is None + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + # Test ResLayer of 3 SEBottleneck with downsample + layer = ResLayer(SEBottleneck, 3, 64, 256, se_ratio=16) + assert layer[0].downsample[0].out_channels == 256 + for i in range(1, len(layer)): + assert layer[i].downsample is None + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + assert x_out.shape == torch.Size([1, 256, 56, 56]) + + # Test ResLayer of 3 SEBottleneck with stride=2 + layer = ResLayer(SEBottleneck, 3, 64, 256, stride=2, se_ratio=8) + assert layer[0].downsample[0].out_channels == 256 + assert layer[0].downsample[0].stride == (2, 2) + for i in range(1, len(layer)): + assert layer[i].downsample is None + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + assert x_out.shape == torch.Size([1, 256, 28, 28]) + + # Test ResLayer of 3 SEBottleneck with stride=2 and average downsample + layer = ResLayer( + SEBottleneck, 3, 64, 256, stride=2, avg_down=True, se_ratio=8) + assert isinstance(layer[0].downsample[0], AvgPool2d) + assert layer[0].downsample[1].out_channels == 256 + assert layer[0].downsample[1].stride == (1, 1) + for i in range(1, len(layer)): + assert layer[i].downsample is None + x = torch.randn(1, 64, 56, 56) + x_out = layer(x) + assert x_out.shape == torch.Size([1, 256, 28, 28]) + + +def test_seresnet(): + """Test resnet backbone.""" + with pytest.raises(KeyError): + # SEResNet depth should be in [50, 101, 152] + SEResNet(20) + + with pytest.raises(AssertionError): + # In SEResNet: 1 <= num_stages <= 4 + SEResNet(50, num_stages=0) + + with pytest.raises(AssertionError): + # In SEResNet: 1 <= num_stages <= 4 + SEResNet(50, num_stages=5) + + with pytest.raises(AssertionError): + # len(strides) == len(dilations) == num_stages + SEResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3) + + with pytest.raises(TypeError): + # pretrained must be a string path + model = SEResNet(50) + model.init_weights(pretrained=0) + + with pytest.raises(AssertionError): + # Style must be in ['pytorch', 'caffe'] + SEResNet(50, style='tensorflow') + + # Test SEResNet50 norm_eval=True + model = SEResNet(50, norm_eval=True) + model.init_weights() + model.train() + assert check_norm_state(model.modules(), False) + + # Test SEResNet50 with torchvision pretrained weight + model = SEResNet( + depth=50, + norm_eval=True, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')) + model.init_weights() + model.train() + assert check_norm_state(model.modules(), False) + + # Test SEResNet50 with first stage frozen + frozen_stages = 1 + model = SEResNet(50, frozen_stages=frozen_stages) + model.init_weights() + model.train() + assert model.norm1.training is False + for layer in [model.conv1, model.norm1]: + for param in layer.parameters(): + assert param.requires_grad is False + for i in range(1, frozen_stages + 1): + layer = getattr(model, f'layer{i}') + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + # Test SEResNet50 with BatchNorm forward + model = SEResNet(50, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 256, 56, 56]) + assert feat[1].shape == torch.Size([1, 512, 28, 28]) + assert feat[2].shape == torch.Size([1, 1024, 14, 14]) + assert feat[3].shape == torch.Size([1, 2048, 7, 7]) + + # Test SEResNet50 with layers 1, 2, 3 out forward + model = SEResNet(50, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size([1, 256, 56, 56]) + assert feat[1].shape == torch.Size([1, 512, 28, 28]) + assert feat[2].shape == torch.Size([1, 1024, 14, 14]) + + # Test SEResNet50 with layers 3 (top feature maps) out forward + model = SEResNet(50, out_indices=(3, )) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 2048, 7, 7]) + + # Test SEResNet50 with checkpoint forward + model = SEResNet(50, out_indices=(0, 1, 2, 3), with_cp=True) + for m in model.modules(): + if isinstance(m, SEBottleneck): + assert m.with_cp + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 256, 56, 56]) + assert feat[1].shape == torch.Size([1, 512, 28, 28]) + assert feat[2].shape == torch.Size([1, 1024, 14, 14]) + assert feat[3].shape == torch.Size([1, 2048, 7, 7]) + + # Test SEResNet50 zero initialization of residual + model = SEResNet(50, out_indices=(0, 1, 2, 3), zero_init_residual=True) + model.init_weights() + for m in model.modules(): + if isinstance(m, SEBottleneck): + assert all_zeros(m.norm3) + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 256, 56, 56]) + assert feat[1].shape == torch.Size([1, 512, 28, 28]) + assert feat[2].shape == torch.Size([1, 1024, 14, 14]) + assert feat[3].shape == torch.Size([1, 2048, 7, 7]) diff --git a/tests/test_models/test_backbones/test_seresnext.py b/tests/test_models/test_backbones/test_seresnext.py new file mode 100644 index 0000000..7b84f84 --- /dev/null +++ b/tests/test_models/test_backbones/test_seresnext.py @@ -0,0 +1,74 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmpretrain.models.backbones import SEResNeXt +from mmpretrain.models.backbones.seresnext import SEBottleneck as SEBottleneckX + + +def test_bottleneck(): + with pytest.raises(AssertionError): + # Style must be in ['pytorch', 'caffe'] + SEBottleneckX(64, 64, groups=32, width_per_group=4, style='tensorflow') + + # Test SEResNeXt Bottleneck structure + block = SEBottleneckX( + 64, 256, groups=32, width_per_group=4, stride=2, style='pytorch') + assert block.width_per_group == 4 + assert block.conv2.stride == (2, 2) + assert block.conv2.groups == 32 + assert block.conv2.out_channels == 128 + assert block.conv2.out_channels == block.mid_channels + + # Test SEResNeXt Bottleneck structure (groups=1) + block = SEBottleneckX( + 64, 256, groups=1, width_per_group=4, stride=2, style='pytorch') + assert block.conv2.stride == (2, 2) + assert block.conv2.groups == 1 + assert block.conv2.out_channels == 64 + assert block.mid_channels == 64 + assert block.conv2.out_channels == block.mid_channels + + # Test SEResNeXt Bottleneck forward + block = SEBottleneckX( + 64, 64, base_channels=16, groups=32, width_per_group=4) + x = torch.randn(1, 64, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size([1, 64, 56, 56]) + + +def test_seresnext(): + with pytest.raises(KeyError): + # SEResNeXt depth should be in [50, 101, 152] + SEResNeXt(depth=18) + + # Test SEResNeXt with group 32, width_per_group 4 + model = SEResNeXt( + depth=50, groups=32, width_per_group=4, out_indices=(0, 1, 2, 3)) + for m in model.modules(): + if isinstance(m, SEBottleneckX): + assert m.conv2.groups == 32 + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 256, 56, 56]) + assert feat[1].shape == torch.Size([1, 512, 28, 28]) + assert feat[2].shape == torch.Size([1, 1024, 14, 14]) + assert feat[3].shape == torch.Size([1, 2048, 7, 7]) + + # Test SEResNeXt with group 32, width_per_group 4 and layers 3 out forward + model = SEResNeXt( + depth=50, groups=32, width_per_group=4, out_indices=(3, )) + for m in model.modules(): + if isinstance(m, SEBottleneckX): + assert m.conv2.groups == 32 + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 2048, 7, 7]) diff --git a/tests/test_models/test_backbones/test_shufflenet_v1.py b/tests/test_models/test_backbones/test_shufflenet_v1.py new file mode 100644 index 0000000..3a55acf --- /dev/null +++ b/tests/test_models/test_backbones/test_shufflenet_v1.py @@ -0,0 +1,246 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpretrain.models.backbones import ShuffleNetV1 +from mmpretrain.models.backbones.shufflenet_v1 import ShuffleUnit + + +def is_block(modules): + """Check if is ResNet building block.""" + if isinstance(modules, (ShuffleUnit, )): + return True + return False + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_shufflenetv1_shuffleuint(): + + with pytest.raises(ValueError): + # combine must be in ['add', 'concat'] + ShuffleUnit(24, 16, groups=3, first_block=True, combine='test') + + with pytest.raises(AssertionError): + # in_channels must be equal tp = outplanes when combine='add' + ShuffleUnit(64, 24, groups=4, first_block=True, combine='add') + + # Test ShuffleUnit with combine='add' + block = ShuffleUnit(24, 24, groups=3, first_block=True, combine='add') + x = torch.randn(1, 24, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size((1, 24, 56, 56)) + + # Test ShuffleUnit with combine='concat' + block = ShuffleUnit(24, 240, groups=3, first_block=True, combine='concat') + x = torch.randn(1, 24, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size((1, 240, 28, 28)) + + # Test ShuffleUnit with checkpoint forward + block = ShuffleUnit( + 24, 24, groups=3, first_block=True, combine='add', with_cp=True) + assert block.with_cp + x = torch.randn(1, 24, 56, 56) + x.requires_grad = True + x_out = block(x) + assert x_out.shape == torch.Size((1, 24, 56, 56)) + + +def test_shufflenetv1_backbone(): + + with pytest.raises(ValueError): + # frozen_stages must be in range(-1, 4) + ShuffleNetV1(frozen_stages=10) + + with pytest.raises(ValueError): + # the item in out_indices must be in range(0, 4) + ShuffleNetV1(out_indices=[5]) + + with pytest.raises(ValueError): + # groups must be in [1, 2, 3, 4, 8] + ShuffleNetV1(groups=10) + + with pytest.raises(TypeError): + # pretrained must be str or None + model = ShuffleNetV1() + model.init_weights(pretrained=1) + + # Test ShuffleNetV1 norm state + model = ShuffleNetV1() + model.init_weights() + model.train() + assert check_norm_state(model.modules(), True) + + # Test ShuffleNetV1 with first stage frozen + frozen_stages = 1 + model = ShuffleNetV1(frozen_stages=frozen_stages, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + for param in model.conv1.parameters(): + assert param.requires_grad is False + for i in range(frozen_stages): + layer = model.layers[i] + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + # Test ShuffleNetV1 forward with groups=1 + model = ShuffleNetV1(groups=1, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size((1, 144, 28, 28)) + assert feat[1].shape == torch.Size((1, 288, 14, 14)) + assert feat[2].shape == torch.Size((1, 576, 7, 7)) + + # Test ShuffleNetV1 forward with groups=2 + model = ShuffleNetV1(groups=2, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size((1, 200, 28, 28)) + assert feat[1].shape == torch.Size((1, 400, 14, 14)) + assert feat[2].shape == torch.Size((1, 800, 7, 7)) + + # Test ShuffleNetV1 forward with groups=3 + model = ShuffleNetV1(groups=3, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size((1, 240, 28, 28)) + assert feat[1].shape == torch.Size((1, 480, 14, 14)) + assert feat[2].shape == torch.Size((1, 960, 7, 7)) + + # Test ShuffleNetV1 forward with groups=4 + model = ShuffleNetV1(groups=4, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size((1, 272, 28, 28)) + assert feat[1].shape == torch.Size((1, 544, 14, 14)) + assert feat[2].shape == torch.Size((1, 1088, 7, 7)) + + # Test ShuffleNetV1 forward with groups=8 + model = ShuffleNetV1(groups=8, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size((1, 384, 28, 28)) + assert feat[1].shape == torch.Size((1, 768, 14, 14)) + assert feat[2].shape == torch.Size((1, 1536, 7, 7)) + + # Test ShuffleNetV1 forward with GroupNorm forward + model = ShuffleNetV1( + groups=3, + norm_cfg=dict(type='GN', num_groups=2, requires_grad=True), + out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, GroupNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == torch.Size((1, 240, 28, 28)) + assert feat[1].shape == torch.Size((1, 480, 14, 14)) + assert feat[2].shape == torch.Size((1, 960, 7, 7)) + + # Test ShuffleNetV1 forward with layers 1, 2 forward + model = ShuffleNetV1(groups=3, out_indices=(1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 2 + assert feat[0].shape == torch.Size((1, 480, 14, 14)) + assert feat[1].shape == torch.Size((1, 960, 7, 7)) + + # Test ShuffleNetV1 forward with layers 2 forward + model = ShuffleNetV1(groups=3, out_indices=(2, )) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert isinstance(feat[0], torch.Tensor) + assert feat[0].shape == torch.Size((1, 960, 7, 7)) + + # Test ShuffleNetV1 forward with checkpoint forward + model = ShuffleNetV1(groups=3, with_cp=True) + for m in model.modules(): + if is_block(m): + assert m.with_cp + + # Test ShuffleNetV1 with norm_eval + model = ShuffleNetV1(norm_eval=True) + model.init_weights() + model.train() + + assert check_norm_state(model.modules(), False) diff --git a/tests/test_models/test_backbones/test_shufflenet_v2.py b/tests/test_models/test_backbones/test_shufflenet_v2.py new file mode 100644 index 0000000..84bcec1 --- /dev/null +++ b/tests/test_models/test_backbones/test_shufflenet_v2.py @@ -0,0 +1,205 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpretrain.models.backbones import ShuffleNetV2 +from mmpretrain.models.backbones.shufflenet_v2 import InvertedResidual + + +def is_block(modules): + """Check if is ResNet building block.""" + if isinstance(modules, (InvertedResidual, )): + return True + return False + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_shufflenetv2_invertedresidual(): + + with pytest.raises(AssertionError): + # when stride==1, in_channels should be equal to out_channels // 2 * 2 + InvertedResidual(24, 32, stride=1) + + with pytest.raises(AssertionError): + # when in_channels != out_channels // 2 * 2, stride should not be + # equal to 1. + InvertedResidual(24, 32, stride=1) + + # Test InvertedResidual forward + block = InvertedResidual(24, 48, stride=2) + x = torch.randn(1, 24, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size((1, 48, 28, 28)) + + # Test InvertedResidual with checkpoint forward + block = InvertedResidual(48, 48, stride=1, with_cp=True) + assert block.with_cp + x = torch.randn(1, 48, 56, 56) + x.requires_grad = True + x_out = block(x) + assert x_out.shape == torch.Size((1, 48, 56, 56)) + + +def test_shufflenetv2_backbone(): + + with pytest.raises(ValueError): + # groups must be in 0.5, 1.0, 1.5, 2.0] + ShuffleNetV2(widen_factor=3.0) + + with pytest.raises(ValueError): + # frozen_stages must be in [0, 1, 2, 3] + ShuffleNetV2(widen_factor=1.0, frozen_stages=4) + + with pytest.raises(ValueError): + # out_indices must be in [0, 1, 2, 3] + ShuffleNetV2(widen_factor=1.0, out_indices=(4, )) + + with pytest.raises(TypeError): + # pretrained must be str or None + model = ShuffleNetV2() + model.init_weights(pretrained=1) + + # Test ShuffleNetV2 norm state + model = ShuffleNetV2() + model.init_weights() + model.train() + assert check_norm_state(model.modules(), True) + + # Test ShuffleNetV2 with first stage frozen + frozen_stages = 1 + model = ShuffleNetV2(frozen_stages=frozen_stages) + model.init_weights() + model.train() + for param in model.conv1.parameters(): + assert param.requires_grad is False + for i in range(0, frozen_stages): + layer = model.layers[i] + for mod in layer.modules(): + if isinstance(mod, _BatchNorm): + assert mod.training is False + for param in layer.parameters(): + assert param.requires_grad is False + + # Test ShuffleNetV2 with norm_eval + model = ShuffleNetV2(norm_eval=True) + model.init_weights() + model.train() + + assert check_norm_state(model.modules(), False) + + # Test ShuffleNetV2 forward with widen_factor=0.5 + model = ShuffleNetV2(widen_factor=0.5, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size((1, 48, 28, 28)) + assert feat[1].shape == torch.Size((1, 96, 14, 14)) + assert feat[2].shape == torch.Size((1, 192, 7, 7)) + + # Test ShuffleNetV2 forward with widen_factor=1.0 + model = ShuffleNetV2(widen_factor=1.0, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size((1, 116, 28, 28)) + assert feat[1].shape == torch.Size((1, 232, 14, 14)) + assert feat[2].shape == torch.Size((1, 464, 7, 7)) + + # Test ShuffleNetV2 forward with widen_factor=1.5 + model = ShuffleNetV2(widen_factor=1.5, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size((1, 176, 28, 28)) + assert feat[1].shape == torch.Size((1, 352, 14, 14)) + assert feat[2].shape == torch.Size((1, 704, 7, 7)) + + # Test ShuffleNetV2 forward with widen_factor=2.0 + model = ShuffleNetV2(widen_factor=2.0, out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size((1, 244, 28, 28)) + assert feat[1].shape == torch.Size((1, 488, 14, 14)) + assert feat[2].shape == torch.Size((1, 976, 7, 7)) + + # Test ShuffleNetV2 forward with layers 3 forward + model = ShuffleNetV2(widen_factor=1.0, out_indices=(2, )) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert isinstance(feat[0], torch.Tensor) + assert feat[0].shape == torch.Size((1, 464, 7, 7)) + + # Test ShuffleNetV2 forward with layers 1 2 forward + model = ShuffleNetV2(widen_factor=1.0, out_indices=(1, 2)) + model.init_weights() + model.train() + + for m in model.modules(): + if is_norm(m): + assert isinstance(m, _BatchNorm) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 2 + assert feat[0].shape == torch.Size((1, 232, 14, 14)) + assert feat[1].shape == torch.Size((1, 464, 7, 7)) + + # Test ShuffleNetV2 forward with checkpoint forward + model = ShuffleNetV2(widen_factor=1.0, with_cp=True) + for m in model.modules(): + if is_block(m): + assert m.with_cp diff --git a/tests/test_models/test_backbones/test_swin_transformer.py b/tests/test_models/test_backbones/test_swin_transformer.py new file mode 100644 index 0000000..1437dac --- /dev/null +++ b/tests/test_models/test_backbones/test_swin_transformer.py @@ -0,0 +1,255 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +import os +import tempfile +from copy import deepcopy +from itertools import chain +from unittest import TestCase + +import torch +from mmengine.runner import load_checkpoint, save_checkpoint +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm + +from mmpretrain.models.backbones import SwinTransformer +from mmpretrain.models.backbones.swin_transformer import SwinBlock +from .utils import timm_resize_pos_embed + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +class TestSwinTransformer(TestCase): + + def setUp(self): + self.cfg = dict( + arch='tiny', img_size=224, patch_size=4, drop_path_rate=0.1) + + def test_arch(self): + # Test invalid default arch + with self.assertRaisesRegex(AssertionError, 'not in default archs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = 'unknown' + SwinTransformer(**cfg) + + # Test invalid custom arch + with self.assertRaisesRegex(AssertionError, 'Custom arch needs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'embed_dims': 96, + 'num_heads': [3, 6, 12, 16], + } + SwinTransformer(**cfg) + + # Test custom arch + cfg = deepcopy(self.cfg) + depths = [2, 2, 4, 2] + num_heads = [6, 12, 6, 12] + cfg['arch'] = { + 'embed_dims': 256, + 'depths': depths, + 'num_heads': num_heads + } + model = SwinTransformer(**cfg) + for i, stage in enumerate(model.stages): + self.assertEqual(stage.embed_dims, 256 * (2**i)) + self.assertEqual(len(stage.blocks), depths[i]) + self.assertEqual(stage.blocks[0].attn.w_msa.num_heads, + num_heads[i]) + + def test_init_weights(self): + # test weight init cfg + cfg = deepcopy(self.cfg) + cfg['use_abs_pos_embed'] = True + cfg['init_cfg'] = [ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ] + model = SwinTransformer(**cfg) + ori_weight = model.patch_embed.projection.weight.clone().detach() + # The pos_embed is all zero before initialize + self.assertTrue( + torch.allclose(model.absolute_pos_embed, torch.tensor(0.))) + + model.init_weights() + initialized_weight = model.patch_embed.projection.weight + self.assertFalse(torch.allclose(ori_weight, initialized_weight)) + self.assertFalse( + torch.allclose(model.absolute_pos_embed, torch.tensor(0.))) + + pretrain_pos_embed = model.absolute_pos_embed.clone().detach() + + tmpdir = tempfile.gettempdir() + # Save v3 checkpoints + checkpoint_v2 = os.path.join(tmpdir, 'v3.pth') + save_checkpoint(model.state_dict(), checkpoint_v2) + # Save v1 checkpoints + setattr(model, 'norm', model.norm3) + setattr(model.stages[0].blocks[1].attn, 'attn_mask', + torch.zeros(64, 49, 49)) + model._version = 1 + del model.norm3 + checkpoint_v1 = os.path.join(tmpdir, 'v1.pth') + save_checkpoint(model.state_dict(), checkpoint_v1) + + # test load v1 checkpoint + cfg = deepcopy(self.cfg) + cfg['use_abs_pos_embed'] = True + model = SwinTransformer(**cfg) + load_checkpoint(model, checkpoint_v1, strict=True) + + # test load v3 checkpoint + cfg = deepcopy(self.cfg) + cfg['use_abs_pos_embed'] = True + model = SwinTransformer(**cfg) + load_checkpoint(model, checkpoint_v2, strict=True) + + # test load v3 checkpoint with different img_size + cfg = deepcopy(self.cfg) + cfg['img_size'] = 384 + cfg['use_abs_pos_embed'] = True + model = SwinTransformer(**cfg) + load_checkpoint(model, checkpoint_v2, strict=True) + resized_pos_embed = timm_resize_pos_embed( + pretrain_pos_embed, model.absolute_pos_embed, num_tokens=0) + self.assertTrue( + torch.allclose(model.absolute_pos_embed, resized_pos_embed)) + + os.remove(checkpoint_v1) + os.remove(checkpoint_v2) + + def test_forward(self): + imgs = torch.randn(1, 3, 224, 224) + + cfg = deepcopy(self.cfg) + model = SwinTransformer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + self.assertEqual(feat.shape, (1, 768, 7, 7)) + + # test with window_size=12 + cfg = deepcopy(self.cfg) + cfg['window_size'] = 12 + model = SwinTransformer(**cfg) + outs = model(torch.randn(1, 3, 384, 384)) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + self.assertEqual(feat.shape, (1, 768, 12, 12)) + with self.assertRaisesRegex(AssertionError, r'the window size \(12\)'): + model(torch.randn(1, 3, 224, 224)) + + # test with pad_small_map=True + cfg = deepcopy(self.cfg) + cfg['window_size'] = 12 + cfg['pad_small_map'] = True + model = SwinTransformer(**cfg) + outs = model(torch.randn(1, 3, 224, 224)) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + self.assertEqual(feat.shape, (1, 768, 7, 7)) + + # test multiple output indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = (0, 1, 2, 3) + model = SwinTransformer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 4) + for stride, out in zip([1, 2, 4, 8], outs): + self.assertEqual(out.shape, + (1, 96 * stride, 56 // stride, 56 // stride)) + + # test with checkpoint forward + cfg = deepcopy(self.cfg) + cfg['with_cp'] = True + model = SwinTransformer(**cfg) + for m in model.modules(): + if isinstance(m, SwinBlock): + self.assertTrue(m.with_cp) + model.init_weights() + model.train() + + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + self.assertEqual(feat.shape, (1, 768, 7, 7)) + + # test with dynamic input shape + imgs1 = torch.randn(1, 3, 224, 224) + imgs2 = torch.randn(1, 3, 256, 256) + imgs3 = torch.randn(1, 3, 256, 309) + cfg = deepcopy(self.cfg) + model = SwinTransformer(**cfg) + for imgs in [imgs1, imgs2, imgs3]: + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + expect_feat_shape = (math.ceil(imgs.shape[2] / 32), + math.ceil(imgs.shape[3] / 32)) + self.assertEqual(feat.shape, (1, 768, *expect_feat_shape)) + + def test_structure(self): + # test drop_path_rate decay + cfg = deepcopy(self.cfg) + cfg['drop_path_rate'] = 0.2 + model = SwinTransformer(**cfg) + depths = model.arch_settings['depths'] + blocks = chain(*[stage.blocks for stage in model.stages]) + for i, block in enumerate(blocks): + expect_prob = 0.2 / (sum(depths) - 1) * i + self.assertAlmostEqual(block.ffn.dropout_layer.drop_prob, + expect_prob) + self.assertAlmostEqual(block.attn.drop.drop_prob, expect_prob) + + # test Swin-Transformer with norm_eval=True + cfg = deepcopy(self.cfg) + cfg['norm_eval'] = True + cfg['norm_cfg'] = dict(type='BN') + cfg['stage_cfgs'] = dict(block_cfgs=dict(norm_cfg=dict(type='BN'))) + model = SwinTransformer(**cfg) + model.init_weights() + model.train() + self.assertTrue(check_norm_state(model.modules(), False)) + + # test Swin-Transformer with first stage frozen. + cfg = deepcopy(self.cfg) + frozen_stages = 0 + cfg['frozen_stages'] = frozen_stages + cfg['out_indices'] = (0, 1, 2, 3) + model = SwinTransformer(**cfg) + model.init_weights() + model.train() + + # the patch_embed and first stage should not require grad. + self.assertFalse(model.patch_embed.training) + for param in model.patch_embed.parameters(): + self.assertFalse(param.requires_grad) + for i in range(frozen_stages + 1): + stage = model.stages[i] + for param in stage.parameters(): + self.assertFalse(param.requires_grad) + for param in model.norm0.parameters(): + self.assertFalse(param.requires_grad) + + # the second stage should require grad. + for i in range(frozen_stages + 1, 4): + stage = model.stages[i] + for param in stage.parameters(): + self.assertTrue(param.requires_grad) + norm = getattr(model, f'norm{i}') + for param in norm.parameters(): + self.assertTrue(param.requires_grad) diff --git a/tests/test_models/test_backbones/test_swin_transformer_v2.py b/tests/test_models/test_backbones/test_swin_transformer_v2.py new file mode 100644 index 0000000..02e238c --- /dev/null +++ b/tests/test_models/test_backbones/test_swin_transformer_v2.py @@ -0,0 +1,243 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +import os +import tempfile +from copy import deepcopy +from itertools import chain +from unittest import TestCase + +import torch +from mmengine.runner import load_checkpoint, save_checkpoint +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm + +from mmpretrain.models.backbones import SwinTransformerV2 +from mmpretrain.models.backbones.swin_transformer import SwinBlock +from .utils import timm_resize_pos_embed + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +class TestSwinTransformerV2(TestCase): + + def setUp(self): + self.cfg = dict( + arch='b', img_size=256, patch_size=4, drop_path_rate=0.1) + + def test_arch(self): + # Test invalid default arch + with self.assertRaisesRegex(AssertionError, 'not in default archs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = 'unknown' + SwinTransformerV2(**cfg) + + # Test invalid custom arch + with self.assertRaisesRegex(AssertionError, 'Custom arch needs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'embed_dims': 96, + 'num_heads': [3, 6, 12, 16], + } + SwinTransformerV2(**cfg) + + # Test custom arch + cfg = deepcopy(self.cfg) + depths = [2, 2, 6, 2] + num_heads = [6, 12, 6, 12] + cfg['arch'] = { + 'embed_dims': 256, + 'depths': depths, + 'num_heads': num_heads, + 'extra_norm_every_n_blocks': 2 + } + model = SwinTransformerV2(**cfg) + for i, stage in enumerate(model.stages): + self.assertEqual(stage.out_channels, 256 * (2**i)) + self.assertEqual(len(stage.blocks), depths[i]) + self.assertEqual(stage.blocks[0].attn.w_msa.num_heads, + num_heads[i]) + self.assertIsInstance(model.stages[2].blocks[5], torch.nn.Module) + + def test_init_weights(self): + # test weight init cfg + cfg = deepcopy(self.cfg) + cfg['use_abs_pos_embed'] = True + cfg['init_cfg'] = [ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ] + model = SwinTransformerV2(**cfg) + ori_weight = model.patch_embed.projection.weight.clone().detach() + # The pos_embed is all zero before initialize + self.assertTrue( + torch.allclose(model.absolute_pos_embed, torch.tensor(0.))) + + model.init_weights() + initialized_weight = model.patch_embed.projection.weight + self.assertFalse(torch.allclose(ori_weight, initialized_weight)) + self.assertFalse( + torch.allclose(model.absolute_pos_embed, torch.tensor(0.))) + + pretrain_pos_embed = model.absolute_pos_embed.clone().detach() + + tmpdir = tempfile.TemporaryDirectory() + # Save checkpoints + checkpoint = os.path.join(tmpdir.name, 'checkpoint.pth') + save_checkpoint(model.state_dict(), checkpoint) + + # test load checkpoint + cfg = deepcopy(self.cfg) + cfg['use_abs_pos_embed'] = True + model = SwinTransformerV2(**cfg) + load_checkpoint(model, checkpoint, strict=False) + + # test load checkpoint with different img_size + cfg = deepcopy(self.cfg) + cfg['img_size'] = 384 + cfg['use_abs_pos_embed'] = True + model = SwinTransformerV2(**cfg) + load_checkpoint(model, checkpoint, strict=False) + resized_pos_embed = timm_resize_pos_embed( + pretrain_pos_embed, model.absolute_pos_embed, num_tokens=0) + self.assertTrue( + torch.allclose(model.absolute_pos_embed, resized_pos_embed)) + + tmpdir.cleanup() + + def test_forward(self): + imgs = torch.randn(1, 3, 256, 256) + + cfg = deepcopy(self.cfg) + model = SwinTransformerV2(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + self.assertEqual(feat.shape, (1, 1024, 8, 8)) + + # test with window_size=12 + cfg = deepcopy(self.cfg) + cfg['window_size'] = 12 + model = SwinTransformerV2(**cfg) + outs = model(torch.randn(1, 3, 384, 384)) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + self.assertEqual(feat.shape, (1, 1024, 12, 12)) + with self.assertRaisesRegex(AssertionError, r'the window size \(12\)'): + model(torch.randn(1, 3, 256, 256)) + + # test with pad_small_map=True + cfg = deepcopy(self.cfg) + cfg['window_size'] = 12 + cfg['pad_small_map'] = True + model = SwinTransformerV2(**cfg) + outs = model(torch.randn(1, 3, 256, 256)) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + self.assertEqual(feat.shape, (1, 1024, 8, 8)) + + # test multiple output indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = (0, 1, 2, 3) + model = SwinTransformerV2(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 4) + for stride, out in zip([1, 2, 4, 8], outs): + self.assertEqual(out.shape, + (1, 128 * stride, 64 // stride, 64 // stride)) + + # test with checkpoint forward + cfg = deepcopy(self.cfg) + cfg['with_cp'] = True + model = SwinTransformerV2(**cfg) + for m in model.modules(): + if isinstance(m, SwinBlock): + self.assertTrue(m.with_cp) + model.init_weights() + model.train() + + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + self.assertEqual(feat.shape, (1, 1024, 8, 8)) + + # test with dynamic input shape + imgs1 = torch.randn(1, 3, 224, 224) + imgs2 = torch.randn(1, 3, 256, 256) + imgs3 = torch.randn(1, 3, 256, 309) + cfg = deepcopy(self.cfg) + cfg['pad_small_map'] = True + model = SwinTransformerV2(**cfg) + for imgs in [imgs1, imgs2, imgs3]: + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + expect_feat_shape = (math.ceil(imgs.shape[2] / 32), + math.ceil(imgs.shape[3] / 32)) + self.assertEqual(feat.shape, (1, 1024, *expect_feat_shape)) + + def test_structure(self): + # test drop_path_rate decay + cfg = deepcopy(self.cfg) + cfg['drop_path_rate'] = 0.2 + model = SwinTransformerV2(**cfg) + depths = model.arch_settings['depths'] + blocks = chain(*[stage.blocks for stage in model.stages]) + for i, block in enumerate(blocks): + expect_prob = 0.2 / (sum(depths) - 1) * i + self.assertAlmostEqual(block.ffn.dropout_layer.drop_prob, + expect_prob) + self.assertAlmostEqual(block.attn.drop.drop_prob, expect_prob) + + # test Swin-Transformer V2 with norm_eval=True + cfg = deepcopy(self.cfg) + cfg['norm_eval'] = True + cfg['norm_cfg'] = dict(type='BN') + cfg['stage_cfgs'] = dict(block_cfgs=dict(norm_cfg=dict(type='BN'))) + model = SwinTransformerV2(**cfg) + model.init_weights() + model.train() + self.assertTrue(check_norm_state(model.modules(), False)) + + # test Swin-Transformer V2 with first stage frozen. + cfg = deepcopy(self.cfg) + frozen_stages = 0 + cfg['frozen_stages'] = frozen_stages + cfg['out_indices'] = (0, 1, 2, 3) + model = SwinTransformerV2(**cfg) + model.init_weights() + model.train() + + # the patch_embed and first stage should not require grad. + self.assertFalse(model.patch_embed.training) + for param in model.patch_embed.parameters(): + self.assertFalse(param.requires_grad) + for i in range(frozen_stages + 1): + stage = model.stages[i] + for param in stage.parameters(): + self.assertFalse(param.requires_grad) + for param in model.norm0.parameters(): + self.assertFalse(param.requires_grad) + + # the second stage should require grad. + for i in range(frozen_stages + 1, 4): + stage = model.stages[i] + for param in stage.parameters(): + self.assertTrue(param.requires_grad) + norm = getattr(model, f'norm{i}') + for param in norm.parameters(): + self.assertTrue(param.requires_grad) diff --git a/tests/test_models/test_backbones/test_t2t_vit.py b/tests/test_models/test_backbones/test_t2t_vit.py new file mode 100644 index 0000000..76bfe9c --- /dev/null +++ b/tests/test_models/test_backbones/test_t2t_vit.py @@ -0,0 +1,144 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +import os +import tempfile +from copy import deepcopy +from unittest import TestCase + +import torch +from mmengine.runner import load_checkpoint, save_checkpoint + +from mmpretrain.models.backbones import T2T_ViT +from .utils import timm_resize_pos_embed + + +class TestT2TViT(TestCase): + + def setUp(self): + self.cfg = dict( + img_size=224, + in_channels=3, + embed_dims=384, + t2t_cfg=dict( + token_dims=64, + use_performer=False, + ), + num_layers=14, + drop_path_rate=0.1) + + def test_structure(self): + # The performer hasn't been implemented + cfg = deepcopy(self.cfg) + cfg['t2t_cfg']['use_performer'] = True + with self.assertRaises(NotImplementedError): + T2T_ViT(**cfg) + + # Test out_indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = {1: 1} + with self.assertRaisesRegex(AssertionError, "get "): + T2T_ViT(**cfg) + cfg['out_indices'] = [0, 15] + with self.assertRaisesRegex(AssertionError, 'Invalid out_indices 15'): + T2T_ViT(**cfg) + + # Test model structure + cfg = deepcopy(self.cfg) + model = T2T_ViT(**cfg) + self.assertEqual(len(model.encoder), 14) + dpr_inc = 0.1 / (14 - 1) + dpr = 0 + for layer in model.encoder: + self.assertEqual(layer.attn.embed_dims, 384) + # The default mlp_ratio is 3 + self.assertEqual(layer.ffn.feedforward_channels, 384 * 3) + self.assertAlmostEqual(layer.attn.out_drop.drop_prob, dpr) + self.assertAlmostEqual(layer.ffn.dropout_layer.drop_prob, dpr) + dpr += dpr_inc + + def test_init_weights(self): + # test weight init cfg + cfg = deepcopy(self.cfg) + cfg['init_cfg'] = [dict(type='TruncNormal', layer='Linear', std=.02)] + model = T2T_ViT(**cfg) + ori_weight = model.tokens_to_token.project.weight.clone().detach() + + model.init_weights() + initialized_weight = model.tokens_to_token.project.weight + self.assertFalse(torch.allclose(ori_weight, initialized_weight)) + + # test load checkpoint + pretrain_pos_embed = model.pos_embed.clone().detach() + tmpdir = tempfile.gettempdir() + checkpoint = os.path.join(tmpdir, 'test.pth') + save_checkpoint(model.state_dict(), checkpoint) + cfg = deepcopy(self.cfg) + model = T2T_ViT(**cfg) + load_checkpoint(model, checkpoint, strict=True) + self.assertTrue(torch.allclose(model.pos_embed, pretrain_pos_embed)) + + # test load checkpoint with different img_size + cfg = deepcopy(self.cfg) + cfg['img_size'] = 384 + model = T2T_ViT(**cfg) + load_checkpoint(model, checkpoint, strict=True) + resized_pos_embed = timm_resize_pos_embed(pretrain_pos_embed, + model.pos_embed) + self.assertTrue(torch.allclose(model.pos_embed, resized_pos_embed)) + + os.remove(checkpoint) + + def test_forward(self): + imgs = torch.randn(1, 3, 224, 224) + + # test with_cls_token=False + cfg = deepcopy(self.cfg) + cfg['with_cls_token'] = False + cfg['out_type'] = 'cls_token' + with self.assertRaisesRegex(ValueError, 'must be True'): + T2T_ViT(**cfg) + + cfg = deepcopy(self.cfg) + cfg['with_cls_token'] = False + cfg['out_type'] = 'featmap' + model = T2T_ViT(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + patch_token = outs[-1] + self.assertEqual(patch_token.shape, (1, 384, 14, 14)) + + # test with output cls_token + cfg = deepcopy(self.cfg) + model = T2T_ViT(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + cls_token = outs[-1] + self.assertEqual(cls_token.shape, (1, 384)) + + # Test forward with multi out indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = [-3, -2, -1] + model = T2T_ViT(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 3) + for out in outs: + self.assertEqual(out.shape, (1, 384)) + + # Test forward with dynamic input size + imgs1 = torch.randn(1, 3, 224, 224) + imgs2 = torch.randn(1, 3, 256, 256) + imgs3 = torch.randn(1, 3, 256, 309) + cfg = deepcopy(self.cfg) + cfg['out_type'] = 'featmap' + model = T2T_ViT(**cfg) + for imgs in [imgs1, imgs2, imgs3]: + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + patch_token = outs[-1] + expect_feat_shape = (math.ceil(imgs.shape[2] / 16), + math.ceil(imgs.shape[3] / 16)) + self.assertEqual(patch_token.shape, (1, 384, *expect_feat_shape)) diff --git a/tests/test_models/test_backbones/test_timm_backbone.py b/tests/test_models/test_backbones/test_timm_backbone.py new file mode 100644 index 0000000..cfc659b --- /dev/null +++ b/tests/test_models/test_backbones/test_timm_backbone.py @@ -0,0 +1,216 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import unittest + +import pytest +import torch +from torch import nn +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpretrain.models.backbones import TIMMBackbone + + +def has_timm() -> bool: + try: + import timm # noqa: F401 + return True + except ImportError: + return False + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +@unittest.skipIf(not has_timm(), 'timm is not installed') +def test_timm_backbone(): + """Test timm backbones, features_only=False (default).""" + with pytest.raises(TypeError): + # TIMMBackbone has 1 required positional argument: 'model_name' + model = TIMMBackbone(pretrained=True) + + with pytest.raises(TypeError): + # pretrained must be bool + model = TIMMBackbone(model_name='resnet18', pretrained='model.pth') + + # Test resnet18 from timm + model = TIMMBackbone(model_name='resnet18') + model.init_weights() + model.train() + assert check_norm_state(model.modules(), True) + assert isinstance(model.timm_model.global_pool.pool, nn.Identity) + assert isinstance(model.timm_model.fc, nn.Identity) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size((1, 512, 7, 7)) + + # Test efficientnet_b1 with pretrained weights + model = TIMMBackbone(model_name='efficientnet_b1', pretrained=True) + model.init_weights() + model.train() + assert isinstance(model.timm_model.global_pool.pool, nn.Identity) + assert isinstance(model.timm_model.classifier, nn.Identity) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size((1, 1280, 7, 7)) + + # Test vit_tiny_patch16_224 with pretrained weights + model = TIMMBackbone(model_name='vit_tiny_patch16_224', pretrained=True) + model.init_weights() + model.train() + assert isinstance(model.timm_model.head, nn.Identity) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + # Disable the test since TIMM's behavior changes between 0.5.4 and 0.5.5 + # assert feat[0].shape == torch.Size((1, 197, 192)) + + +@unittest.skipIf(not has_timm(), 'timm is not installed') +def test_timm_backbone_features_only(): + """Test timm backbones, features_only=True.""" + # Test different norm_layer, can be: 'SyncBN', 'BN2d', 'GN', 'LN', 'IN' + # Test resnet18 from timm, norm_layer='BN2d' + model = TIMMBackbone( + model_name='resnet18', + features_only=True, + pretrained=False, + output_stride=32, + norm_layer='BN2d') + + # Test resnet18 from timm, norm_layer='SyncBN' + model = TIMMBackbone( + model_name='resnet18', + features_only=True, + pretrained=False, + output_stride=32, + norm_layer='SyncBN') + + # Test resnet18 from timm, output_stride=32 + model = TIMMBackbone( + model_name='resnet18', + features_only=True, + pretrained=False, + output_stride=32) + model.init_weights() + model.train() + assert check_norm_state(model.modules(), True) + + imgs = torch.randn(1, 3, 224, 224) + feats = model(imgs) + assert len(feats) == 5 + assert feats[0].shape == torch.Size((1, 64, 112, 112)) + assert feats[1].shape == torch.Size((1, 64, 56, 56)) + assert feats[2].shape == torch.Size((1, 128, 28, 28)) + assert feats[3].shape == torch.Size((1, 256, 14, 14)) + assert feats[4].shape == torch.Size((1, 512, 7, 7)) + + # Test resnet18 from timm, output_stride=32, out_indices=(1, 2, 3) + model = TIMMBackbone( + model_name='resnet18', + features_only=True, + pretrained=False, + output_stride=32, + out_indices=(1, 2, 3)) + imgs = torch.randn(1, 3, 224, 224) + feats = model(imgs) + assert len(feats) == 3 + assert feats[0].shape == torch.Size((1, 64, 56, 56)) + assert feats[1].shape == torch.Size((1, 128, 28, 28)) + assert feats[2].shape == torch.Size((1, 256, 14, 14)) + + # Test resnet18 from timm, output_stride=16 + model = TIMMBackbone( + model_name='resnet18', + features_only=True, + pretrained=False, + output_stride=16) + imgs = torch.randn(1, 3, 224, 224) + feats = model(imgs) + assert len(feats) == 5 + assert feats[0].shape == torch.Size((1, 64, 112, 112)) + assert feats[1].shape == torch.Size((1, 64, 56, 56)) + assert feats[2].shape == torch.Size((1, 128, 28, 28)) + assert feats[3].shape == torch.Size((1, 256, 14, 14)) + assert feats[4].shape == torch.Size((1, 512, 14, 14)) + + # Test resnet18 from timm, output_stride=8 + model = TIMMBackbone( + model_name='resnet18', + features_only=True, + pretrained=False, + output_stride=8) + imgs = torch.randn(1, 3, 224, 224) + feats = model(imgs) + assert len(feats) == 5 + assert feats[0].shape == torch.Size((1, 64, 112, 112)) + assert feats[1].shape == torch.Size((1, 64, 56, 56)) + assert feats[2].shape == torch.Size((1, 128, 28, 28)) + assert feats[3].shape == torch.Size((1, 256, 28, 28)) + assert feats[4].shape == torch.Size((1, 512, 28, 28)) + + # Test efficientnet_b1 with pretrained weights + model = TIMMBackbone( + model_name='efficientnet_b1', features_only=True, pretrained=True) + imgs = torch.randn(1, 3, 64, 64) + feats = model(imgs) + assert len(feats) == 5 + assert feats[0].shape == torch.Size((1, 16, 32, 32)) + assert feats[1].shape == torch.Size((1, 24, 16, 16)) + assert feats[2].shape == torch.Size((1, 40, 8, 8)) + assert feats[3].shape == torch.Size((1, 112, 4, 4)) + assert feats[4].shape == torch.Size((1, 320, 2, 2)) + + # Test resnetv2_50x1_bitm from timm, output_stride=8 + model = TIMMBackbone( + model_name='resnetv2_50x1_bitm', + features_only=True, + pretrained=False, + output_stride=8) + imgs = torch.randn(1, 3, 8, 8) + feats = model(imgs) + assert len(feats) == 5 + assert feats[0].shape == torch.Size((1, 64, 4, 4)) + assert feats[1].shape == torch.Size((1, 256, 2, 2)) + assert feats[2].shape == torch.Size((1, 512, 1, 1)) + assert feats[3].shape == torch.Size((1, 1024, 1, 1)) + assert feats[4].shape == torch.Size((1, 2048, 1, 1)) + + # Test resnetv2_50x3_bitm from timm, output_stride=8 + model = TIMMBackbone( + model_name='resnetv2_50x3_bitm', + features_only=True, + pretrained=False, + output_stride=8) + imgs = torch.randn(1, 3, 8, 8) + feats = model(imgs) + assert len(feats) == 5 + assert feats[0].shape == torch.Size((1, 192, 4, 4)) + assert feats[1].shape == torch.Size((1, 768, 2, 2)) + assert feats[2].shape == torch.Size((1, 1536, 1, 1)) + assert feats[3].shape == torch.Size((1, 3072, 1, 1)) + assert feats[4].shape == torch.Size((1, 6144, 1, 1)) + + # Test resnetv2_101x1_bitm from timm, output_stride=8 + model = TIMMBackbone( + model_name='resnetv2_101x1_bitm', + features_only=True, + pretrained=False, + output_stride=8) + imgs = torch.randn(1, 3, 8, 8) + feats = model(imgs) + assert len(feats) == 5 + assert feats[0].shape == torch.Size((1, 64, 4, 4)) + assert feats[1].shape == torch.Size((1, 256, 2, 2)) + assert feats[2].shape == torch.Size((1, 512, 1, 1)) + assert feats[3].shape == torch.Size((1, 1024, 1, 1)) + assert feats[4].shape == torch.Size((1, 2048, 1, 1)) diff --git a/tests/test_models/test_backbones/test_tinyvit.py b/tests/test_models/test_backbones/test_tinyvit.py new file mode 100644 index 0000000..9747b76 --- /dev/null +++ b/tests/test_models/test_backbones/test_tinyvit.py @@ -0,0 +1,80 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmpretrain.models.backbones import TinyViT + + +def test_assertion(): + with pytest.raises(AssertionError): + TinyViT(arch='unknown') + + with pytest.raises(AssertionError): + # MobileViT out_indices should be valid depth. + TinyViT(out_indices=-100) + + +def test_tinyvit(): + + # Test forward + model = TinyViT(arch='5m') + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size([1, 320]) + + # Test forward with multiple outputs + model = TinyViT(arch='5m', out_indices=(0, 1, 2, 3)) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 128]) + assert feat[1].shape == torch.Size([1, 160]) + assert feat[2].shape == torch.Size([1, 320]) + assert feat[3].shape == torch.Size([1, 320]) + + # Test with custom arch + model = TinyViT( + arch={ + 'depths': [2, 3, 4, 5], + 'channels': [64, 128, 256, 448], + 'num_heads': [4, 4, 4, 4] + }, + out_indices=(0, 1, 2, 3)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + assert feat[0].shape == torch.Size([1, 128]) + assert feat[1].shape == torch.Size([1, 256]) + assert feat[2].shape == torch.Size([1, 448]) + assert feat[3].shape == torch.Size([1, 448]) + + # Test without gap before final norm + model = TinyViT( + arch='21m', out_indices=(0, 1, 2, 3), gap_before_final_norm=False) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 4 + + assert feat[0].shape == torch.Size([1, 192, 28, 28]) + assert feat[1].shape == torch.Size([1, 384, 14, 14]) + assert feat[2].shape == torch.Size([1, 576, 7, 7]) + assert feat[3].shape == torch.Size([1, 576, 7, 7]) + + # Test frozen_stages + model = TinyViT(arch='11m', out_indices=(0, 1, 2, 3), frozen_stages=2) + model.init_weights() + model.train() + + for i in range(2): + assert not model.stages[i].training + + for i in range(2, 4): + assert model.stages[i].training diff --git a/tests/test_models/test_backbones/test_tnt.py b/tests/test_models/test_backbones/test_tnt.py new file mode 100644 index 0000000..83b997d --- /dev/null +++ b/tests/test_models/test_backbones/test_tnt.py @@ -0,0 +1,50 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpretrain.models.backbones import TNT + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_tnt_backbone(): + with pytest.raises(TypeError): + # pretrained must be a string path + model = TNT() + model.init_weights(pretrained=0) + + # Test tnt_base_patch16_224 + model = TNT() + model.init_weights() + model.train() + assert check_norm_state(model.modules(), True) + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size((1, 640)) + + # Test tnt with embed_dims=768 + arch = { + 'embed_dims_outer': 768, + 'embed_dims_inner': 48, + 'num_layers': 12, + 'num_heads_outer': 6, + 'num_heads_inner': 4 + } + model = TNT(arch=arch) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == torch.Size((1, 768)) diff --git a/tests/test_models/test_backbones/test_twins.py b/tests/test_models/test_backbones/test_twins.py new file mode 100644 index 0000000..e7ca43e --- /dev/null +++ b/tests/test_models/test_backbones/test_twins.py @@ -0,0 +1,243 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import pytest +import torch +import torch.nn as nn + +from mmpretrain.models.backbones.twins import (PCPVT, SVT, + GlobalSubsampledAttention, + LocallyGroupedSelfAttention) + + +def test_LSA_module(): + lsa = LocallyGroupedSelfAttention(embed_dims=32, window_size=3) + outs = lsa(torch.randn(1, 3136, 32), (56, 56)) + assert outs.shape == torch.Size([1, 3136, 32]) + + +def test_GSA_module(): + gsa = GlobalSubsampledAttention(embed_dims=32, num_heads=8) + outs = gsa(torch.randn(1, 3136, 32), (56, 56)) + assert outs.shape == torch.Size([1, 3136, 32]) + + +def test_pcpvt(): + # test init + path = 'PATH_THAT_DO_NOT_EXIST' + + # init_cfg loads pretrain from an non-existent file + model = PCPVT('s', init_cfg=dict(type='Pretrained', checkpoint=path)) + assert model.init_cfg == dict(type='Pretrained', checkpoint=path) + + # Test loading a checkpoint from an non-existent file + with pytest.raises(OSError): + model.init_weights() + + # init_cfg=123, whose type is unsupported + model = PCPVT('s', init_cfg=123) + with pytest.raises(TypeError): + model.init_weights() + + H, W = (64, 64) + temp = torch.randn((1, 3, H, W)) + + # test output last feat + model = PCPVT('small') + model.init_weights() + outs = model(temp) + assert len(outs) == 1 + assert outs[-1].shape == (1, 512, H // 32, W // 32) + + # test with multi outputs + model = PCPVT('small', out_indices=(0, 1, 2, 3)) + model.init_weights() + outs = model(temp) + assert len(outs) == 4 + assert outs[0].shape == (1, 64, H // 4, W // 4) + assert outs[1].shape == (1, 128, H // 8, W // 8) + assert outs[2].shape == (1, 320, H // 16, W // 16) + assert outs[3].shape == (1, 512, H // 32, W // 32) + + # test with arch of dict + arch = { + 'embed_dims': [64, 128, 320, 512], + 'depths': [3, 4, 18, 3], + 'num_heads': [1, 2, 5, 8], + 'patch_sizes': [4, 2, 2, 2], + 'strides': [4, 2, 2, 2], + 'mlp_ratios': [8, 8, 4, 4], + 'sr_ratios': [8, 4, 2, 1] + } + + pcpvt_arch = copy.deepcopy(arch) + model = PCPVT(pcpvt_arch, out_indices=(0, 1, 2, 3)) + model.init_weights() + outs = model(temp) + assert len(outs) == 4 + assert outs[0].shape == (1, 64, H // 4, W // 4) + assert outs[1].shape == (1, 128, H // 8, W // 8) + assert outs[2].shape == (1, 320, H // 16, W // 16) + assert outs[3].shape == (1, 512, H // 32, W // 32) + + # assert length of arch value not equal + pcpvt_arch = copy.deepcopy(arch) + pcpvt_arch['sr_ratios'] = [8, 4, 2] + with pytest.raises(AssertionError): + model = PCPVT(pcpvt_arch, out_indices=(0, 1, 2, 3)) + + # assert lack arch essential_keys + pcpvt_arch = copy.deepcopy(arch) + del pcpvt_arch['sr_ratios'] + with pytest.raises(AssertionError): + model = PCPVT(pcpvt_arch, out_indices=(0, 1, 2, 3)) + + # assert arch value not list + pcpvt_arch = copy.deepcopy(arch) + pcpvt_arch['sr_ratios'] = 1 + with pytest.raises(AssertionError): + model = PCPVT(pcpvt_arch, out_indices=(0, 1, 2, 3)) + + pcpvt_arch = copy.deepcopy(arch) + pcpvt_arch['sr_ratios'] = '1, 2, 3, 4' + with pytest.raises(AssertionError): + model = PCPVT(pcpvt_arch, out_indices=(0, 1, 2, 3)) + + # test norm_after_stage is bool True + model = PCPVT('small', norm_after_stage=True, norm_cfg=dict(type='LN')) + for i in range(model.num_stage): + assert hasattr(model, f'norm_after_stage{i}') + assert isinstance(getattr(model, f'norm_after_stage{i}'), nn.LayerNorm) + + # test norm_after_stage is bool Flase + model = PCPVT('small', norm_after_stage=False) + for i in range(model.num_stage): + assert hasattr(model, f'norm_after_stage{i}') + assert isinstance(getattr(model, f'norm_after_stage{i}'), nn.Identity) + + # test norm_after_stage is bool list + norm_after_stage = [False, True, False, True] + model = PCPVT('small', norm_after_stage=norm_after_stage) + assert len(norm_after_stage) == model.num_stage + for i in range(model.num_stage): + assert hasattr(model, f'norm_after_stage{i}') + norm_layer = getattr(model, f'norm_after_stage{i}') + if norm_after_stage[i]: + assert isinstance(norm_layer, nn.LayerNorm) + else: + assert isinstance(norm_layer, nn.Identity) + + # test norm_after_stage is not bool list + norm_after_stage = [False, 'True', False, True] + with pytest.raises(AssertionError): + model = PCPVT('small', norm_after_stage=norm_after_stage) + + +def test_svt(): + # test init + path = 'PATH_THAT_DO_NOT_EXIST' + + # init_cfg loads pretrain from an non-existent file + model = SVT('s', init_cfg=dict(type='Pretrained', checkpoint=path)) + assert model.init_cfg == dict(type='Pretrained', checkpoint=path) + + # Test loading a checkpoint from an non-existent file + with pytest.raises(OSError): + model.init_weights() + + # init_cfg=123, whose type is unsupported + model = SVT('s', init_cfg=123) + with pytest.raises(TypeError): + model.init_weights() + + # Test feature map output + H, W = (64, 64) + temp = torch.randn((1, 3, H, W)) + + model = SVT('s') + model.init_weights() + outs = model(temp) + assert len(outs) == 1 + assert outs[-1].shape == (1, 512, H // 32, W // 32) + + # test with multi outputs + model = SVT('small', out_indices=(0, 1, 2, 3)) + model.init_weights() + outs = model(temp) + assert len(outs) == 4 + assert outs[0].shape == (1, 64, H // 4, W // 4) + assert outs[1].shape == (1, 128, H // 8, W // 8) + assert outs[2].shape == (1, 256, H // 16, W // 16) + assert outs[3].shape == (1, 512, H // 32, W // 32) + + # test with arch of dict + arch = { + 'embed_dims': [96, 192, 384, 768], + 'depths': [2, 2, 18, 2], + 'num_heads': [3, 6, 12, 24], + 'patch_sizes': [4, 2, 2, 2], + 'strides': [4, 2, 2, 2], + 'mlp_ratios': [4, 4, 4, 4], + 'sr_ratios': [8, 4, 2, 1], + 'window_sizes': [7, 7, 7, 7] + } + model = SVT(arch, out_indices=(0, 1, 2, 3)) + model.init_weights() + outs = model(temp) + assert len(outs) == 4 + assert outs[0].shape == (1, 96, H // 4, W // 4) + assert outs[1].shape == (1, 192, H // 8, W // 8) + assert outs[2].shape == (1, 384, H // 16, W // 16) + assert outs[3].shape == (1, 768, H // 32, W // 32) + + # assert length of arch value not equal + svt_arch = copy.deepcopy(arch) + svt_arch['sr_ratios'] = [8, 4, 2] + with pytest.raises(AssertionError): + model = SVT(svt_arch, out_indices=(0, 1, 2, 3)) + + # assert lack arch essential_keys + svt_arch = copy.deepcopy(arch) + del svt_arch['window_sizes'] + with pytest.raises(AssertionError): + model = SVT(svt_arch, out_indices=(0, 1, 2, 3)) + + # assert arch value not list + svt_arch = copy.deepcopy(arch) + svt_arch['sr_ratios'] = 1 + with pytest.raises(AssertionError): + model = SVT(svt_arch, out_indices=(0, 1, 2, 3)) + + svt_arch = copy.deepcopy(arch) + svt_arch['sr_ratios'] = '1, 2, 3, 4' + with pytest.raises(AssertionError): + model = SVT(svt_arch, out_indices=(0, 1, 2, 3)) + + # test norm_after_stage is bool True + model = SVT('small', norm_after_stage=True, norm_cfg=dict(type='LN')) + for i in range(model.num_stage): + assert hasattr(model, f'norm_after_stage{i}') + assert isinstance(getattr(model, f'norm_after_stage{i}'), nn.LayerNorm) + + # test norm_after_stage is bool Flase + model = SVT('small', norm_after_stage=False) + for i in range(model.num_stage): + assert hasattr(model, f'norm_after_stage{i}') + assert isinstance(getattr(model, f'norm_after_stage{i}'), nn.Identity) + + # test norm_after_stage is bool list + norm_after_stage = [False, True, False, True] + model = SVT('small', norm_after_stage=norm_after_stage) + assert len(norm_after_stage) == model.num_stage + for i in range(model.num_stage): + assert hasattr(model, f'norm_after_stage{i}') + norm_layer = getattr(model, f'norm_after_stage{i}') + if norm_after_stage[i]: + assert isinstance(norm_layer, nn.LayerNorm) + else: + assert isinstance(norm_layer, nn.Identity) + + # test norm_after_stage is not bool list + norm_after_stage = [False, 'True', False, True] + with pytest.raises(AssertionError): + model = SVT('small', norm_after_stage=norm_after_stage) diff --git a/tests/test_models/test_backbones/test_van.py b/tests/test_models/test_backbones/test_van.py new file mode 100644 index 0000000..fed9e3e --- /dev/null +++ b/tests/test_models/test_backbones/test_van.py @@ -0,0 +1,188 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from copy import deepcopy +from itertools import chain +from unittest import TestCase + +import torch +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm +from torch import nn + +from mmpretrain.models.backbones import VAN + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +class TestVAN(TestCase): + + def setUp(self): + self.cfg = dict(arch='t', drop_path_rate=0.1) + + def test_arch(self): + # Test invalid default arch + with self.assertRaisesRegex(AssertionError, 'not in default archs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = 'unknown' + VAN(**cfg) + + # Test invalid custom arch + with self.assertRaisesRegex(AssertionError, 'Custom arch needs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'embed_dims': [32, 64, 160, 256], + 'ffn_ratios': [8, 8, 4, 4], + } + VAN(**cfg) + + # Test custom arch + cfg = deepcopy(self.cfg) + embed_dims = [32, 64, 160, 256] + depths = [3, 3, 5, 2] + ffn_ratios = [8, 8, 4, 4] + cfg['arch'] = { + 'embed_dims': embed_dims, + 'depths': depths, + 'ffn_ratios': ffn_ratios + } + model = VAN(**cfg) + + for i in range(len(depths)): + stage = getattr(model, f'blocks{i + 1}') + self.assertEqual(stage[-1].out_channels, embed_dims[i]) + self.assertEqual(len(stage), depths[i]) + + def test_init_weights(self): + # test weight init cfg + cfg = deepcopy(self.cfg) + cfg['init_cfg'] = [ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ] + model = VAN(**cfg) + ori_weight = model.patch_embed1.projection.weight.clone().detach() + + model.init_weights() + initialized_weight = model.patch_embed1.projection.weight + self.assertFalse(torch.allclose(ori_weight, initialized_weight)) + + def test_forward(self): + imgs = torch.randn(3, 3, 224, 224) + + cfg = deepcopy(self.cfg) + model = VAN(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + self.assertEqual(feat.shape, (3, 256, 7, 7)) + + # test with patch_sizes + cfg = deepcopy(self.cfg) + cfg['patch_sizes'] = [7, 5, 5, 5] + model = VAN(**cfg) + outs = model(torch.randn(3, 3, 224, 224)) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + self.assertEqual(feat.shape, (3, 256, 3, 3)) + + # test multiple output indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = (0, 1, 2, 3) + model = VAN(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 4) + for emb_size, stride, out in zip([32, 64, 160, 256], [1, 2, 4, 8], + outs): + self.assertEqual(out.shape, + (3, emb_size, 56 // stride, 56 // stride)) + + # test with dynamic input shape + imgs1 = torch.randn(3, 3, 224, 224) + imgs2 = torch.randn(3, 3, 256, 256) + imgs3 = torch.randn(3, 3, 256, 309) + cfg = deepcopy(self.cfg) + model = VAN(**cfg) + for imgs in [imgs1, imgs2, imgs3]: + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + feat = outs[-1] + expect_feat_shape = (math.ceil(imgs.shape[2] / 32), + math.ceil(imgs.shape[3] / 32)) + self.assertEqual(feat.shape, (3, 256, *expect_feat_shape)) + + def test_structure(self): + # test drop_path_rate decay + cfg = deepcopy(self.cfg) + cfg['drop_path_rate'] = 0.2 + model = VAN(**cfg) + depths = model.arch_settings['depths'] + stages = [model.blocks1, model.blocks2, model.blocks3, model.blocks4] + blocks = chain(*[stage for stage in stages]) + total_depth = sum(depths) + dpr = [ + x.item() + for x in torch.linspace(0, cfg['drop_path_rate'], total_depth) + ] + for i, (block, expect_prob) in enumerate(zip(blocks, dpr)): + if expect_prob == 0: + assert isinstance(block.drop_path, nn.Identity) + else: + self.assertAlmostEqual(block.drop_path.drop_prob, expect_prob) + + # test VAN with norm_eval=True + cfg = deepcopy(self.cfg) + cfg['norm_eval'] = True + cfg['norm_cfg'] = dict(type='BN') + model = VAN(**cfg) + model.init_weights() + model.train() + self.assertTrue(check_norm_state(model.modules(), False)) + + # test VAN with first stage frozen. + cfg = deepcopy(self.cfg) + frozen_stages = 0 + cfg['frozen_stages'] = frozen_stages + cfg['out_indices'] = (0, 1, 2, 3) + model = VAN(**cfg) + model.init_weights() + model.train() + + # the patch_embed and first stage should not require grad. + self.assertFalse(model.patch_embed1.training) + for param in model.patch_embed1.parameters(): + self.assertFalse(param.requires_grad) + for i in range(frozen_stages + 1): + patch = getattr(model, f'patch_embed{i+1}') + for param in patch.parameters(): + self.assertFalse(param.requires_grad) + blocks = getattr(model, f'blocks{i + 1}') + for param in blocks.parameters(): + self.assertFalse(param.requires_grad) + norm = getattr(model, f'norm{i + 1}') + for param in norm.parameters(): + self.assertFalse(param.requires_grad) + + # the second stage should require grad. + for i in range(frozen_stages + 1, 4): + patch = getattr(model, f'patch_embed{i + 1}') + for param in patch.parameters(): + self.assertTrue(param.requires_grad) + blocks = getattr(model, f'blocks{i+1}') + for param in blocks.parameters(): + self.assertTrue(param.requires_grad) + norm = getattr(model, f'norm{i + 1}') + for param in norm.parameters(): + self.assertTrue(param.requires_grad) diff --git a/tests/test_models/test_backbones/test_vgg.py b/tests/test_models/test_backbones/test_vgg.py new file mode 100644 index 0000000..dd3910f --- /dev/null +++ b/tests/test_models/test_backbones/test_vgg.py @@ -0,0 +1,139 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm + +from mmpretrain.models.backbones import VGG + + +def check_norm_state(modules, train_state): + """Check if norm layer is in correct train state.""" + for mod in modules: + if isinstance(mod, _BatchNorm): + if mod.training != train_state: + return False + return True + + +def test_vgg(): + """Test VGG backbone.""" + with pytest.raises(KeyError): + # VGG depth should be in [11, 13, 16, 19] + VGG(18) + + with pytest.raises(AssertionError): + # In VGG: 1 <= num_stages <= 5 + VGG(11, num_stages=0) + + with pytest.raises(AssertionError): + # In VGG: 1 <= num_stages <= 5 + VGG(11, num_stages=6) + + with pytest.raises(AssertionError): + # len(dilations) == num_stages + VGG(11, dilations=(1, 1), num_stages=3) + + with pytest.raises(TypeError): + # pretrained must be a string path + model = VGG(11) + model.init_weights(pretrained=0) + + # Test VGG11 norm_eval=True + model = VGG(11, norm_eval=True) + model.init_weights() + model.train() + assert check_norm_state(model.modules(), False) + + # Test VGG11 forward without classifiers + model = VGG(11, out_indices=(0, 1, 2, 3, 4)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 5 + assert feat[0].shape == (1, 64, 112, 112) + assert feat[1].shape == (1, 128, 56, 56) + assert feat[2].shape == (1, 256, 28, 28) + assert feat[3].shape == (1, 512, 14, 14) + assert feat[4].shape == (1, 512, 7, 7) + + # Test VGG11 forward with classifiers + model = VGG(11, num_classes=10, out_indices=(0, 1, 2, 3, 4, 5)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 6 + assert feat[0].shape == (1, 64, 112, 112) + assert feat[1].shape == (1, 128, 56, 56) + assert feat[2].shape == (1, 256, 28, 28) + assert feat[3].shape == (1, 512, 14, 14) + assert feat[4].shape == (1, 512, 7, 7) + assert feat[5].shape == (1, 10) + + # Test VGG11BN forward + model = VGG(11, norm_cfg=dict(type='BN'), out_indices=(0, 1, 2, 3, 4)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 5 + assert feat[0].shape == (1, 64, 112, 112) + assert feat[1].shape == (1, 128, 56, 56) + assert feat[2].shape == (1, 256, 28, 28) + assert feat[3].shape == (1, 512, 14, 14) + assert feat[4].shape == (1, 512, 7, 7) + + # Test VGG11BN forward with classifiers + model = VGG( + 11, + num_classes=10, + norm_cfg=dict(type='BN'), + out_indices=(0, 1, 2, 3, 4, 5)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 6 + assert feat[0].shape == (1, 64, 112, 112) + assert feat[1].shape == (1, 128, 56, 56) + assert feat[2].shape == (1, 256, 28, 28) + assert feat[3].shape == (1, 512, 14, 14) + assert feat[4].shape == (1, 512, 7, 7) + assert feat[5].shape == (1, 10) + + # Test VGG13 with layers 1, 2, 3 out forward + model = VGG(13, out_indices=(0, 1, 2)) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 3 + assert feat[0].shape == (1, 64, 112, 112) + assert feat[1].shape == (1, 128, 56, 56) + assert feat[2].shape == (1, 256, 28, 28) + + # Test VGG16 with top feature maps out forward + model = VGG(16) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == (1, 512, 7, 7) + + # Test VGG19 with classification score out forward + model = VGG(19, num_classes=10) + model.init_weights() + model.train() + + imgs = torch.randn(1, 3, 224, 224) + feat = model(imgs) + assert len(feat) == 1 + assert feat[0].shape == (1, 10) diff --git a/tests/test_models/test_backbones/test_vision_transformer.py b/tests/test_models/test_backbones/test_vision_transformer.py new file mode 100644 index 0000000..d6638ae --- /dev/null +++ b/tests/test_models/test_backbones/test_vision_transformer.py @@ -0,0 +1,176 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +import os +import tempfile +from copy import deepcopy +from unittest import TestCase + +import torch +from mmengine.runner import load_checkpoint, save_checkpoint + +from mmpretrain.models.backbones import VisionTransformer +from .utils import timm_resize_pos_embed + + +class TestVisionTransformer(TestCase): + + def setUp(self): + self.cfg = dict( + arch='b', img_size=224, patch_size=16, drop_path_rate=0.1) + + def test_structure(self): + # Test invalid default arch + with self.assertRaisesRegex(AssertionError, 'not in default archs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = 'unknown' + VisionTransformer(**cfg) + + # Test invalid custom arch + with self.assertRaisesRegex(AssertionError, 'Custom arch needs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'num_layers': 24, + 'num_heads': 16, + 'feedforward_channels': 4096 + } + VisionTransformer(**cfg) + + # Test custom arch + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'embed_dims': 128, + 'num_layers': 24, + 'num_heads': 16, + 'feedforward_channels': 1024 + } + model = VisionTransformer(**cfg) + self.assertEqual(model.embed_dims, 128) + self.assertEqual(model.num_layers, 24) + for layer in model.layers: + self.assertEqual(layer.attn.num_heads, 16) + self.assertEqual(layer.ffn.feedforward_channels, 1024) + + # Test out_indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = {1: 1} + with self.assertRaisesRegex(AssertionError, "get "): + VisionTransformer(**cfg) + cfg['out_indices'] = [0, 13] + with self.assertRaisesRegex(AssertionError, 'Invalid out_indices 13'): + VisionTransformer(**cfg) + + # Test model structure + cfg = deepcopy(self.cfg) + model = VisionTransformer(**cfg) + self.assertEqual(len(model.layers), 12) + dpr_inc = 0.1 / (12 - 1) + dpr = 0 + for layer in model.layers: + self.assertEqual(layer.attn.embed_dims, 768) + self.assertEqual(layer.attn.num_heads, 12) + self.assertEqual(layer.ffn.feedforward_channels, 3072) + self.assertAlmostEqual(layer.attn.out_drop.drop_prob, dpr) + self.assertAlmostEqual(layer.ffn.dropout_layer.drop_prob, dpr) + dpr += dpr_inc + + # Test model structure: prenorm + cfg = deepcopy(self.cfg) + cfg['pre_norm'] = True + model = VisionTransformer(**cfg) + self.assertNotEqual(model.pre_norm.__class__, torch.nn.Identity) + + def test_init_weights(self): + # test weight init cfg + cfg = deepcopy(self.cfg) + cfg['init_cfg'] = [ + dict( + type='Kaiming', + layer='Conv2d', + mode='fan_in', + nonlinearity='linear') + ] + model = VisionTransformer(**cfg) + ori_weight = model.patch_embed.projection.weight.clone().detach() + # The pos_embed is all zero before initialize + self.assertTrue(torch.allclose(model.pos_embed, torch.tensor(0.))) + + model.init_weights() + initialized_weight = model.patch_embed.projection.weight + self.assertFalse(torch.allclose(ori_weight, initialized_weight)) + self.assertFalse(torch.allclose(model.pos_embed, torch.tensor(0.))) + + # test load checkpoint + pretrain_pos_embed = model.pos_embed.clone().detach() + tmpdir = tempfile.gettempdir() + checkpoint = os.path.join(tmpdir, 'test.pth') + save_checkpoint(model.state_dict(), checkpoint) + cfg = deepcopy(self.cfg) + model = VisionTransformer(**cfg) + load_checkpoint(model, checkpoint, strict=True) + self.assertTrue(torch.allclose(model.pos_embed, pretrain_pos_embed)) + + # test load checkpoint with different img_size + cfg = deepcopy(self.cfg) + cfg['img_size'] = 384 + model = VisionTransformer(**cfg) + load_checkpoint(model, checkpoint, strict=True) + resized_pos_embed = timm_resize_pos_embed(pretrain_pos_embed, + model.pos_embed) + self.assertTrue(torch.allclose(model.pos_embed, resized_pos_embed)) + + os.remove(checkpoint) + + def test_forward(self): + imgs = torch.randn(1, 3, 224, 224) + + # test with_cls_token=False + cfg = deepcopy(self.cfg) + cfg['with_cls_token'] = False + cfg['out_type'] = 'cls_token' + with self.assertRaisesRegex(ValueError, 'must be True'): + VisionTransformer(**cfg) + + cfg = deepcopy(self.cfg) + cfg['with_cls_token'] = False + cfg['out_type'] = 'featmap' + model = VisionTransformer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + patch_token = outs[-1] + self.assertEqual(patch_token.shape, (1, 768, 14, 14)) + + # test with output cls_token + cfg = deepcopy(self.cfg) + model = VisionTransformer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + cls_token = outs[-1] + self.assertEqual(cls_token.shape, (1, 768)) + + # Test forward with multi out indices + cfg = deepcopy(self.cfg) + cfg['out_indices'] = [-3, -2, -1] + model = VisionTransformer(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 3) + for out in outs: + self.assertEqual(out.shape, (1, 768)) + + # Test forward with dynamic input size + imgs1 = torch.randn(1, 3, 224, 224) + imgs2 = torch.randn(1, 3, 256, 256) + imgs3 = torch.randn(1, 3, 256, 309) + cfg = deepcopy(self.cfg) + cfg['out_type'] = 'featmap' + model = VisionTransformer(**cfg) + for imgs in [imgs1, imgs2, imgs3]: + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + patch_token = outs[-1] + expect_feat_shape = (math.ceil(imgs.shape[2] / 16), + math.ceil(imgs.shape[3] / 16)) + self.assertEqual(patch_token.shape, (1, 768, *expect_feat_shape)) diff --git a/tests/test_models/test_backbones/test_xcit.py b/tests/test_models/test_backbones/test_xcit.py new file mode 100644 index 0000000..95a8cfd --- /dev/null +++ b/tests/test_models/test_backbones/test_xcit.py @@ -0,0 +1,41 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# The basic forward/backward tests are in ../test_models.py +import torch + +from mmpretrain.apis import get_model + + +def test_out_type(): + inputs = torch.rand(1, 3, 224, 224) + + model = get_model( + 'xcit-nano-12-p16_3rdparty_in1k', + backbone=dict(out_type='raw'), + neck=None, + head=None) + outputs = model(inputs)[0] + assert outputs.shape == (1, 197, 128) + + model = get_model( + 'xcit-nano-12-p16_3rdparty_in1k', + backbone=dict(out_type='featmap'), + neck=None, + head=None) + outputs = model(inputs)[0] + assert outputs.shape == (1, 128, 14, 14) + + model = get_model( + 'xcit-nano-12-p16_3rdparty_in1k', + backbone=dict(out_type='cls_token'), + neck=None, + head=None) + outputs = model(inputs)[0] + assert outputs.shape == (1, 128) + + model = get_model( + 'xcit-nano-12-p16_3rdparty_in1k', + backbone=dict(out_type='avg_featmap'), + neck=None, + head=None) + outputs = model(inputs)[0] + assert outputs.shape == (1, 128) diff --git a/tests/test_models/test_backbones/utils.py b/tests/test_models/test_backbones/utils.py new file mode 100644 index 0000000..aba9caf --- /dev/null +++ b/tests/test_models/test_backbones/utils.py @@ -0,0 +1,31 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn.functional as F + + +def timm_resize_pos_embed(posemb, posemb_new, num_tokens=1, gs_new=()): + """Timm version pos embed resize function. + + copied from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py + """ # noqa:E501 + ntok_new = posemb_new.shape[1] + if num_tokens: + posemb_tok, posemb_grid = posemb[:, :num_tokens], posemb[0, + num_tokens:] + ntok_new -= num_tokens + else: + posemb_tok, posemb_grid = posemb[:, :0], posemb[0] + gs_old = int(math.sqrt(len(posemb_grid))) + if not len(gs_new): # backwards compatibility + gs_new = [int(math.sqrt(ntok_new))] * 2 + assert len(gs_new) >= 2 + posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, + -1).permute(0, 3, 1, 2) + posemb_grid = F.interpolate( + posemb_grid, size=gs_new, mode='bicubic', align_corners=False) + posemb_grid = posemb_grid.permute(0, 2, 3, + 1).reshape(1, gs_new[0] * gs_new[1], -1) + posemb = torch.cat([posemb_tok, posemb_grid], dim=1) + return posemb diff --git a/tests/test_models/test_classifiers.py b/tests/test_models/test_classifiers.py new file mode 100644 index 0000000..9ee75e2 --- /dev/null +++ b/tests/test_models/test_classifiers.py @@ -0,0 +1,471 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import unittest +from unittest import TestCase +from unittest.mock import MagicMock + +import torch +import torch.nn as nn +from mmengine import ConfigDict + +from mmpretrain.models import ImageClassifier +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample + + +def has_timm() -> bool: + try: + import timm # noqa: F401 + return True + except ImportError: + return False + + +def has_huggingface() -> bool: + try: + import transformers # noqa: F401 + return True + except ImportError: + return False + + +class TestImageClassifier(TestCase): + DEFAULT_ARGS = dict( + type='ImageClassifier', + backbone=dict(type='ResNet', depth=18), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=10, + in_channels=512, + loss=dict(type='CrossEntropyLoss'))) + + def test_initialize(self): + model = MODELS.build(self.DEFAULT_ARGS) + self.assertTrue(model.with_neck) + self.assertTrue(model.with_head) + + cfg = {**self.DEFAULT_ARGS, 'pretrained': 'checkpoint'} + model = MODELS.build(cfg) + self.assertDictEqual(model.init_cfg, + dict(type='Pretrained', checkpoint='checkpoint')) + + cfg = ConfigDict(self.DEFAULT_ARGS) + cfg.pop('neck') + model = MODELS.build(cfg) + self.assertFalse(model.with_neck) + + cfg = ConfigDict(self.DEFAULT_ARGS) + cfg.pop('head') + model = MODELS.build(cfg) + self.assertFalse(model.with_head) + + # test set batch augmentation from train_cfg + cfg = { + **self.DEFAULT_ARGS, 'train_cfg': + dict(augments=dict(type='Mixup', alpha=1.)) + } + model: ImageClassifier = MODELS.build(cfg) + self.assertIsNotNone(model.data_preprocessor.batch_augments) + + cfg = {**self.DEFAULT_ARGS, 'train_cfg': dict()} + model: ImageClassifier = MODELS.build(cfg) + self.assertIsNone(model.data_preprocessor.batch_augments) + + def test_extract_feat(self): + inputs = torch.rand(1, 3, 224, 224) + cfg = ConfigDict(self.DEFAULT_ARGS) + cfg.backbone.out_indices = (0, 1, 2, 3) + model: ImageClassifier = MODELS.build(cfg) + + # test backbone output + feats = model.extract_feat(inputs, stage='backbone') + self.assertEqual(len(feats), 4) + self.assertEqual(feats[0].shape, (1, 64, 56, 56)) + self.assertEqual(feats[1].shape, (1, 128, 28, 28)) + self.assertEqual(feats[2].shape, (1, 256, 14, 14)) + self.assertEqual(feats[3].shape, (1, 512, 7, 7)) + + # test neck output + feats = model.extract_feat(inputs, stage='neck') + self.assertEqual(len(feats), 4) + self.assertEqual(feats[0].shape, (1, 64)) + self.assertEqual(feats[1].shape, (1, 128)) + self.assertEqual(feats[2].shape, (1, 256)) + self.assertEqual(feats[3].shape, (1, 512)) + + # test pre_logits output + feats = model.extract_feat(inputs, stage='pre_logits') + self.assertEqual(feats.shape, (1, 512)) + + # TODO: test transformer style feature extraction + + # test extract_feats + multi_feats = model.extract_feats([inputs, inputs], stage='backbone') + self.assertEqual(len(multi_feats), 2) + for feats in multi_feats: + self.assertEqual(len(feats), 4) + self.assertEqual(feats[0].shape, (1, 64, 56, 56)) + self.assertEqual(feats[1].shape, (1, 128, 28, 28)) + self.assertEqual(feats[2].shape, (1, 256, 14, 14)) + self.assertEqual(feats[3].shape, (1, 512, 7, 7)) + + # Without neck, return backbone + cfg = ConfigDict(self.DEFAULT_ARGS) + cfg.backbone.out_indices = (0, 1, 2, 3) + cfg.pop('neck') + model: ImageClassifier = MODELS.build(cfg) + feats = model.extract_feat(inputs, stage='neck') + self.assertEqual(len(feats), 4) + self.assertEqual(feats[0].shape, (1, 64, 56, 56)) + self.assertEqual(feats[1].shape, (1, 128, 28, 28)) + self.assertEqual(feats[2].shape, (1, 256, 14, 14)) + self.assertEqual(feats[3].shape, (1, 512, 7, 7)) + + # Without head, raise error + cfg = ConfigDict(self.DEFAULT_ARGS) + cfg.backbone.out_indices = (0, 1, 2, 3) + cfg.pop('head') + model: ImageClassifier = MODELS.build(cfg) + with self.assertRaisesRegex(AssertionError, 'No head or the head'): + model.extract_feat(inputs, stage='pre_logits') + + with self.assertRaisesRegex(AssertionError, 'use `extract_feat`'): + model.extract_feats(inputs) + + def test_loss(self): + inputs = torch.rand(1, 3, 224, 224) + data_samples = [DataSample().set_gt_label(1)] + + model: ImageClassifier = MODELS.build(self.DEFAULT_ARGS) + losses = model.loss(inputs, data_samples) + self.assertGreater(losses['loss'].item(), 0) + + def test_predict(self): + inputs = torch.rand(1, 3, 224, 224) + data_samples = [DataSample().set_gt_label(1)] + + model: ImageClassifier = MODELS.build(self.DEFAULT_ARGS) + predictions = model.predict(inputs) + self.assertEqual(predictions[0].pred_score.shape, (10, )) + + predictions = model.predict(inputs, data_samples) + self.assertEqual(predictions[0].pred_score.shape, (10, )) + self.assertEqual(data_samples[0].pred_score.shape, (10, )) + torch.testing.assert_allclose(data_samples[0].pred_score, + predictions[0].pred_score) + + def test_forward(self): + inputs = torch.rand(1, 3, 224, 224) + data_samples = [DataSample().set_gt_label(1)] + model: ImageClassifier = MODELS.build(self.DEFAULT_ARGS) + + # test pure forward + outs = model(inputs) + self.assertIsInstance(outs, torch.Tensor) + + # test forward train + losses = model(inputs, data_samples, mode='loss') + self.assertGreater(losses['loss'].item(), 0) + + # test forward test + predictions = model(inputs, mode='predict') + self.assertEqual(predictions[0].pred_score.shape, (10, )) + + predictions = model(inputs, data_samples, mode='predict') + self.assertEqual(predictions[0].pred_score.shape, (10, )) + self.assertEqual(data_samples[0].pred_score.shape, (10, )) + torch.testing.assert_allclose(data_samples[0].pred_score, + predictions[0].pred_score) + + # test forward with invalid mode + with self.assertRaisesRegex(RuntimeError, 'Invalid mode "unknown"'): + model(inputs, mode='unknown') + + def test_train_step(self): + cfg = { + **self.DEFAULT_ARGS, 'data_preprocessor': + dict(mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5]) + } + model: ImageClassifier = MODELS.build(cfg) + + data = { + 'inputs': torch.randint(0, 256, (1, 3, 224, 224)), + 'data_samples': [DataSample().set_gt_label(1)] + } + + optim_wrapper = MagicMock() + log_vars = model.train_step(data, optim_wrapper) + self.assertIn('loss', log_vars) + optim_wrapper.update_params.assert_called_once() + + def test_val_step(self): + cfg = { + **self.DEFAULT_ARGS, 'data_preprocessor': + dict(mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5]) + } + model: ImageClassifier = MODELS.build(cfg) + + data = { + 'inputs': torch.randint(0, 256, (1, 3, 224, 224)), + 'data_samples': [DataSample().set_gt_label(1)] + } + + predictions = model.val_step(data) + self.assertEqual(predictions[0].pred_score.shape, (10, )) + + def test_test_step(self): + cfg = { + **self.DEFAULT_ARGS, 'data_preprocessor': + dict(mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5]) + } + model: ImageClassifier = MODELS.build(cfg) + + data = { + 'inputs': torch.randint(0, 256, (1, 3, 224, 224)), + 'data_samples': [DataSample().set_gt_label(1)] + } + + predictions = model.test_step(data) + self.assertEqual(predictions[0].pred_score.shape, (10, )) + + +@unittest.skipIf(not has_timm(), 'timm is not installed.') +class TestTimmClassifier(TestCase): + DEFAULT_ARGS = dict( + type='TimmClassifier', + model_name='resnet18', + loss=dict(type='CrossEntropyLoss'), + ) + + def test_initialize(self): + model = MODELS.build(self.DEFAULT_ARGS) + assert isinstance(model.model, nn.Module) + + # test set batch augmentation from train_cfg + cfg = { + **self.DEFAULT_ARGS, 'train_cfg': + dict(augments=dict(type='Mixup', alpha=1.)) + } + model: ImageClassifier = MODELS.build(cfg) + self.assertIsNotNone(model.data_preprocessor.batch_augments) + + cfg = {**self.DEFAULT_ARGS, 'train_cfg': dict()} + model: ImageClassifier = MODELS.build(cfg) + self.assertIsNone(model.data_preprocessor.batch_augments) + + def test_loss(self): + inputs = torch.rand(1, 3, 224, 224) + data_samples = [DataSample().set_gt_label(1)] + + model: ImageClassifier = MODELS.build(self.DEFAULT_ARGS) + losses = model.loss(inputs, data_samples) + self.assertGreater(losses['loss'].item(), 0) + + def test_predict(self): + inputs = torch.rand(1, 3, 224, 224) + data_samples = [DataSample().set_gt_label(1)] + + model: ImageClassifier = MODELS.build(self.DEFAULT_ARGS) + predictions = model.predict(inputs) + self.assertEqual(predictions[0].pred_score.shape, (1000, )) + + predictions = model.predict(inputs, data_samples) + self.assertEqual(predictions[0].pred_score.shape, (1000, )) + self.assertEqual(data_samples[0].pred_score.shape, (1000, )) + torch.testing.assert_allclose(data_samples[0].pred_score, + predictions[0].pred_score) + + def test_forward(self): + inputs = torch.rand(1, 3, 224, 224) + data_samples = [DataSample().set_gt_label(1)] + model: ImageClassifier = MODELS.build(self.DEFAULT_ARGS) + + # test pure forward + outs = model(inputs) + self.assertIsInstance(outs, torch.Tensor) + + # test forward train + losses = model(inputs, data_samples, mode='loss') + self.assertGreater(losses['loss'].item(), 0) + + # test forward test + predictions = model(inputs, mode='predict') + self.assertEqual(predictions[0].pred_score.shape, (1000, )) + + predictions = model(inputs, data_samples, mode='predict') + self.assertEqual(predictions[0].pred_score.shape, (1000, )) + self.assertEqual(data_samples[0].pred_score.shape, (1000, )) + torch.testing.assert_allclose(data_samples[0].pred_score, + predictions[0].pred_score) + + # test forward with invalid mode + with self.assertRaisesRegex(RuntimeError, 'Invalid mode "unknown"'): + model(inputs, mode='unknown') + + def test_train_step(self): + cfg = { + **self.DEFAULT_ARGS, 'data_preprocessor': + dict(mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5]) + } + model: ImageClassifier = MODELS.build(cfg) + + data = { + 'inputs': torch.randint(0, 256, (1, 3, 224, 224)), + 'data_samples': [DataSample().set_gt_label(1)] + } + + optim_wrapper = MagicMock() + log_vars = model.train_step(data, optim_wrapper) + self.assertIn('loss', log_vars) + optim_wrapper.update_params.assert_called_once() + + def test_val_step(self): + cfg = { + **self.DEFAULT_ARGS, 'data_preprocessor': + dict(mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5]) + } + model: ImageClassifier = MODELS.build(cfg) + + data = { + 'inputs': torch.randint(0, 256, (1, 3, 224, 224)), + 'data_samples': [DataSample().set_gt_label(1)] + } + + predictions = model.val_step(data) + self.assertEqual(predictions[0].pred_score.shape, (1000, )) + + def test_test_step(self): + cfg = { + **self.DEFAULT_ARGS, 'data_preprocessor': + dict(mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5]) + } + model: ImageClassifier = MODELS.build(cfg) + + data = { + 'inputs': torch.randint(0, 256, (1, 3, 224, 224)), + 'data_samples': [DataSample().set_gt_label(1)] + } + + predictions = model.test_step(data) + self.assertEqual(predictions[0].pred_score.shape, (1000, )) + + +@unittest.skipIf(not has_huggingface(), 'huggingface is not installed.') +class TestHuggingFaceClassifier(TestCase): + DEFAULT_ARGS = dict( + type='HuggingFaceClassifier', + model_name='microsoft/resnet-18', + loss=dict(type='CrossEntropyLoss'), + ) + + def test_initialize(self): + model = MODELS.build(self.DEFAULT_ARGS) + assert isinstance(model.model, nn.Module) + + # test set batch augmentation from train_cfg + cfg = { + **self.DEFAULT_ARGS, 'train_cfg': + dict(augments=dict(type='Mixup', alpha=1.)) + } + model: ImageClassifier = MODELS.build(cfg) + self.assertIsNotNone(model.data_preprocessor.batch_augments) + + cfg = {**self.DEFAULT_ARGS, 'train_cfg': dict()} + model: ImageClassifier = MODELS.build(cfg) + self.assertIsNone(model.data_preprocessor.batch_augments) + + def test_loss(self): + inputs = torch.rand(1, 3, 224, 224) + data_samples = [DataSample().set_gt_label(1)] + + model: ImageClassifier = MODELS.build(self.DEFAULT_ARGS) + losses = model.loss(inputs, data_samples) + self.assertGreater(losses['loss'].item(), 0) + + def test_predict(self): + inputs = torch.rand(1, 3, 224, 224) + data_samples = [DataSample().set_gt_label(1)] + + model: ImageClassifier = MODELS.build(self.DEFAULT_ARGS) + predictions = model.predict(inputs) + self.assertEqual(predictions[0].pred_score.shape, (1000, )) + + predictions = model.predict(inputs, data_samples) + self.assertEqual(predictions[0].pred_score.shape, (1000, )) + self.assertEqual(data_samples[0].pred_score.shape, (1000, )) + torch.testing.assert_allclose(data_samples[0].pred_score, + predictions[0].pred_score) + + def test_forward(self): + inputs = torch.rand(1, 3, 224, 224) + data_samples = [DataSample().set_gt_label(1)] + model: ImageClassifier = MODELS.build(self.DEFAULT_ARGS) + + # test pure forward + outs = model(inputs) + self.assertIsInstance(outs, torch.Tensor) + + # test forward train + losses = model(inputs, data_samples, mode='loss') + self.assertGreater(losses['loss'].item(), 0) + + # test forward test + predictions = model(inputs, mode='predict') + self.assertEqual(predictions[0].pred_score.shape, (1000, )) + + predictions = model(inputs, data_samples, mode='predict') + self.assertEqual(predictions[0].pred_score.shape, (1000, )) + self.assertEqual(data_samples[0].pred_score.shape, (1000, )) + torch.testing.assert_allclose(data_samples[0].pred_score, + predictions[0].pred_score) + + # test forward with invalid mode + with self.assertRaisesRegex(RuntimeError, 'Invalid mode "unknown"'): + model(inputs, mode='unknown') + + def test_train_step(self): + cfg = { + **self.DEFAULT_ARGS, 'data_preprocessor': + dict(mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5]) + } + model: ImageClassifier = MODELS.build(cfg) + + data = { + 'inputs': torch.randint(0, 256, (1, 3, 224, 224)), + 'data_samples': [DataSample().set_gt_label(1)] + } + + optim_wrapper = MagicMock() + log_vars = model.train_step(data, optim_wrapper) + self.assertIn('loss', log_vars) + optim_wrapper.update_params.assert_called_once() + + def test_val_step(self): + cfg = { + **self.DEFAULT_ARGS, 'data_preprocessor': + dict(mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5]) + } + model: ImageClassifier = MODELS.build(cfg) + + data = { + 'inputs': torch.randint(0, 256, (1, 3, 224, 224)), + 'data_samples': [DataSample().set_gt_label(1)] + } + + predictions = model.val_step(data) + self.assertEqual(predictions[0].pred_score.shape, (1000, )) + + def test_test_step(self): + cfg = { + **self.DEFAULT_ARGS, 'data_preprocessor': + dict(mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5]) + } + model: ImageClassifier = MODELS.build(cfg) + + data = { + 'inputs': torch.randint(0, 256, (1, 3, 224, 224)), + 'data_samples': [DataSample().set_gt_label(1)] + } + + predictions = model.test_step(data) + self.assertEqual(predictions[0].pred_score.shape, (1000, )) diff --git a/tests/test_models/test_heads.py b/tests/test_models/test_heads.py new file mode 100644 index 0000000..a4ddf49 --- /dev/null +++ b/tests/test_models/test_heads.py @@ -0,0 +1,736 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import os +import random +import tempfile +from unittest import TestCase + +import numpy as np +import torch +from mmengine import is_seq_of + +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample, MultiTaskDataSample + + +def setup_seed(seed): + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + np.random.seed(seed) + random.seed(seed) + torch.backends.cudnn.deterministic = True + + +class TestClsHead(TestCase): + DEFAULT_ARGS = dict(type='ClsHead') + FAKE_FEATS = (torch.rand(4, 10), ) + + def test_pre_logits(self): + head = MODELS.build(self.DEFAULT_ARGS) + + # return the last item + feats = (torch.rand(4, 10), torch.rand(4, 10)) + pre_logits = head.pre_logits(feats) + self.assertIs(pre_logits, feats[-1]) + + def test_forward(self): + head = MODELS.build(self.DEFAULT_ARGS) + + # return the last item (same as pre_logits) + feats = (torch.rand(4, 10), torch.rand(4, 10)) + outs = head(feats) + self.assertIs(outs, feats[-1]) + + def test_loss(self): + feats = self.FAKE_FEATS + data_samples = [DataSample().set_gt_label(1) for _ in range(4)] + + # with cal_acc = False + head = MODELS.build(self.DEFAULT_ARGS) + + losses = head.loss(feats, data_samples) + self.assertEqual(losses.keys(), {'loss'}) + self.assertGreater(losses['loss'].item(), 0) + + # with cal_acc = True + cfg = {**self.DEFAULT_ARGS, 'topk': (1, 2), 'cal_acc': True} + head = MODELS.build(cfg) + + losses = head.loss(feats, data_samples) + self.assertEqual(losses.keys(), + {'loss', 'accuracy_top-1', 'accuracy_top-2'}) + self.assertGreater(losses['loss'].item(), 0) + + # test assertion when cal_acc but data is batch agumented. + data_samples = [ + sample.set_gt_score(torch.rand(10)) for sample in data_samples + ] + cfg = { + **self.DEFAULT_ARGS, 'cal_acc': True, + 'loss': dict(type='CrossEntropyLoss', use_soft=True) + } + head = MODELS.build(cfg) + with self.assertRaisesRegex(AssertionError, 'batch augmentation'): + head.loss(feats, data_samples) + + def test_predict(self): + feats = (torch.rand(4, 10), ) + data_samples = [DataSample().set_gt_label(1) for _ in range(4)] + head = MODELS.build(self.DEFAULT_ARGS) + + # with without data_samples + predictions = head.predict(feats) + self.assertTrue(is_seq_of(predictions, DataSample)) + for pred in predictions: + self.assertIn('pred_label', pred) + self.assertIn('pred_score', pred) + + # with with data_samples + predictions = head.predict(feats, data_samples) + self.assertTrue(is_seq_of(predictions, DataSample)) + for sample, pred in zip(data_samples, predictions): + self.assertIs(sample, pred) + self.assertIn('pred_label', pred) + self.assertIn('pred_score', pred) + + +class TestLinearClsHead(TestCase): + DEFAULT_ARGS = dict(type='LinearClsHead', in_channels=10, num_classes=5) + FAKE_FEATS = (torch.rand(4, 10), ) + + def test_initialize(self): + with self.assertRaisesRegex(ValueError, 'num_classes=-5 must be'): + MODELS.build({**self.DEFAULT_ARGS, 'num_classes': -5}) + + def test_pre_logits(self): + head = MODELS.build(self.DEFAULT_ARGS) + + # return the last item + feats = (torch.rand(4, 10), torch.rand(4, 10)) + pre_logits = head.pre_logits(feats) + self.assertIs(pre_logits, feats[-1]) + + def test_forward(self): + head = MODELS.build(self.DEFAULT_ARGS) + + feats = (torch.rand(4, 10), torch.rand(4, 10)) + outs = head(feats) + self.assertEqual(outs.shape, (4, 5)) + + +class TestVisionTransformerClsHead(TestCase): + DEFAULT_ARGS = dict( + type='VisionTransformerClsHead', in_channels=10, num_classes=5) + fake_feats = ([torch.rand(4, 7, 7, 16), torch.rand(4, 10)], ) + + def test_initialize(self): + with self.assertRaisesRegex(ValueError, 'num_classes=-5 must be'): + MODELS.build({**self.DEFAULT_ARGS, 'num_classes': -5}) + + # test vit head default + head = MODELS.build(self.DEFAULT_ARGS) + assert not hasattr(head.layers, 'pre_logits') + assert not hasattr(head.layers, 'act') + + # test vit head hidden_dim + head = MODELS.build({**self.DEFAULT_ARGS, 'hidden_dim': 30}) + assert hasattr(head.layers, 'pre_logits') + assert hasattr(head.layers, 'act') + + # test vit head init_weights + head = MODELS.build(self.DEFAULT_ARGS) + head.init_weights() + + # test vit head init_weights with hidden_dim + head = MODELS.build({**self.DEFAULT_ARGS, 'hidden_dim': 30}) + head.init_weights() + assert abs(head.layers.pre_logits.weight).sum() > 0 + + def test_pre_logits(self): + # test default + head = MODELS.build(self.DEFAULT_ARGS) + pre_logits = head.pre_logits(self.fake_feats) + self.assertIs(pre_logits, self.fake_feats[-1][1]) + + # test hidden_dim + head = MODELS.build({**self.DEFAULT_ARGS, 'hidden_dim': 30}) + pre_logits = head.pre_logits(self.fake_feats) + self.assertEqual(pre_logits.shape, (4, 30)) + + def test_forward(self): + # test default + head = MODELS.build(self.DEFAULT_ARGS) + outs = head(self.fake_feats) + self.assertEqual(outs.shape, (4, 5)) + + # test hidden_dim + head = MODELS.build({**self.DEFAULT_ARGS, 'hidden_dim': 30}) + outs = head(self.fake_feats) + self.assertEqual(outs.shape, (4, 5)) + + +class TestDeiTClsHead(TestVisionTransformerClsHead): + DEFAULT_ARGS = dict(type='DeiTClsHead', in_channels=10, num_classes=5) + fake_feats = ([ + torch.rand(4, 7, 7, 16), + torch.rand(4, 10), + torch.rand(4, 10) + ], ) + + def test_pre_logits(self): + # test default + head = MODELS.build(self.DEFAULT_ARGS) + cls_token, dist_token = head.pre_logits(self.fake_feats) + self.assertIs(cls_token, self.fake_feats[-1][1]) + self.assertIs(dist_token, self.fake_feats[-1][2]) + + # test hidden_dim + head = MODELS.build({**self.DEFAULT_ARGS, 'hidden_dim': 30}) + cls_token, dist_token = head.pre_logits(self.fake_feats) + self.assertEqual(cls_token.shape, (4, 30)) + self.assertEqual(dist_token.shape, (4, 30)) + + +class TestConformerHead(TestCase): + DEFAULT_ARGS = dict( + type='ConformerHead', in_channels=[64, 96], num_classes=5) + fake_feats = ([torch.rand(4, 64), torch.rand(4, 96)], ) + + def test_initialize(self): + with self.assertRaisesRegex(ValueError, 'num_classes=-5 must be'): + MODELS.build({**self.DEFAULT_ARGS, 'num_classes': -5}) + + # test default + head = MODELS.build(self.DEFAULT_ARGS) + assert hasattr(head, 'conv_cls_head') + assert hasattr(head, 'trans_cls_head') + + # test init_weights + head = MODELS.build(self.DEFAULT_ARGS) + head.init_weights() + assert abs(head.conv_cls_head.weight).sum() > 0 + assert abs(head.trans_cls_head.weight).sum() > 0 + + def test_pre_logits(self): + # test default + head = MODELS.build(self.DEFAULT_ARGS) + pre_logits = head.pre_logits(self.fake_feats) + self.assertIs(pre_logits, self.fake_feats[-1]) + + def test_forward(self): + head = MODELS.build(self.DEFAULT_ARGS) + outs = head(self.fake_feats) + self.assertEqual(outs[0].shape, (4, 5)) + self.assertEqual(outs[1].shape, (4, 5)) + + def test_loss(self): + data_samples = [DataSample().set_gt_label(1) for _ in range(4)] + + # with cal_acc = False + head = MODELS.build(self.DEFAULT_ARGS) + + losses = head.loss(self.fake_feats, data_samples) + self.assertEqual(losses.keys(), {'loss'}) + self.assertGreater(losses['loss'].item(), 0) + + # with cal_acc = True + cfg = {**self.DEFAULT_ARGS, 'topk': (1, 2), 'cal_acc': True} + head = MODELS.build(cfg) + + losses = head.loss(self.fake_feats, data_samples) + self.assertEqual(losses.keys(), + {'loss', 'accuracy_top-1', 'accuracy_top-2'}) + self.assertGreater(losses['loss'].item(), 0) + + # test assertion when cal_acc but data is batch agumented. + data_samples = [ + sample.set_gt_score(torch.rand(5)) for sample in data_samples + ] + cfg = { + **self.DEFAULT_ARGS, 'cal_acc': True, + 'loss': dict(type='CrossEntropyLoss', use_soft=True) + } + head = MODELS.build(cfg) + with self.assertRaisesRegex(AssertionError, 'batch augmentation'): + head.loss(self.fake_feats, data_samples) + + def test_predict(self): + data_samples = [DataSample().set_gt_label(1) for _ in range(4)] + head = MODELS.build(self.DEFAULT_ARGS) + + # with without data_samples + predictions = head.predict(self.fake_feats) + self.assertTrue(is_seq_of(predictions, DataSample)) + for pred in predictions: + self.assertIn('pred_label', pred) + self.assertIn('pred_score', pred) + + # with with data_samples + predictions = head.predict(self.fake_feats, data_samples) + self.assertTrue(is_seq_of(predictions, DataSample)) + for sample, pred in zip(data_samples, predictions): + self.assertIs(sample, pred) + self.assertIn('pred_label', pred) + self.assertIn('pred_score', pred) + + +class TestStackedLinearClsHead(TestCase): + DEFAULT_ARGS = dict( + type='StackedLinearClsHead', in_channels=10, num_classes=5) + fake_feats = (torch.rand(4, 10), ) + + def test_initialize(self): + with self.assertRaisesRegex(ValueError, 'num_classes=-5 must be'): + MODELS.build({ + **self.DEFAULT_ARGS, 'num_classes': -5, + 'mid_channels': 10 + }) + + # test mid_channels + with self.assertRaisesRegex(AssertionError, 'should be a sequence'): + MODELS.build({**self.DEFAULT_ARGS, 'mid_channels': 10}) + + # test default + head = MODELS.build({**self.DEFAULT_ARGS, 'mid_channels': [20]}) + assert len(head.layers) == 2 + head.init_weights() + + def test_pre_logits(self): + # test default + head = MODELS.build({**self.DEFAULT_ARGS, 'mid_channels': [20, 30]}) + pre_logits = head.pre_logits(self.fake_feats) + self.assertEqual(pre_logits.shape, (4, 30)) + + def test_forward(self): + # test default + head = MODELS.build({**self.DEFAULT_ARGS, 'mid_channels': [20, 30]}) + outs = head(self.fake_feats) + self.assertEqual(outs.shape, (4, 5)) + + head = MODELS.build({ + **self.DEFAULT_ARGS, 'mid_channels': [8, 10], + 'dropout_rate': 0.2, + 'norm_cfg': dict(type='BN1d'), + 'act_cfg': dict(type='HSwish') + }) + outs = head(self.fake_feats) + self.assertEqual(outs.shape, (4, 5)) + + +class TestMultiLabelClsHead(TestCase): + DEFAULT_ARGS = dict(type='MultiLabelClsHead') + + def test_pre_logits(self): + head = MODELS.build(self.DEFAULT_ARGS) + + # return the last item + feats = (torch.rand(4, 10), torch.rand(4, 10)) + pre_logits = head.pre_logits(feats) + self.assertIs(pre_logits, feats[-1]) + + def test_forward(self): + head = MODELS.build(self.DEFAULT_ARGS) + + # return the last item (same as pre_logits) + feats = (torch.rand(4, 10), torch.rand(4, 10)) + outs = head(feats) + self.assertIs(outs, feats[-1]) + + def test_loss(self): + feats = (torch.rand(4, 10), ) + data_samples = [DataSample().set_gt_label([0, 3]) for _ in range(4)] + + # Test with thr and topk are all None + head = MODELS.build(self.DEFAULT_ARGS) + losses = head.loss(feats, data_samples) + self.assertEqual(head.thr, 0.5) + self.assertEqual(head.topk, None) + self.assertEqual(losses.keys(), {'loss'}) + self.assertGreater(losses['loss'].item(), 0) + + # Test with topk + cfg = copy.deepcopy(self.DEFAULT_ARGS) + cfg['topk'] = 2 + head = MODELS.build(cfg) + losses = head.loss(feats, data_samples) + self.assertEqual(head.thr, None, cfg) + self.assertEqual(head.topk, 2) + self.assertEqual(losses.keys(), {'loss'}) + self.assertGreater(losses['loss'].item(), 0) + + # Test with thr + setup_seed(0) + cfg = copy.deepcopy(self.DEFAULT_ARGS) + cfg['thr'] = 0.1 + head = MODELS.build(cfg) + thr_losses = head.loss(feats, data_samples) + self.assertEqual(head.thr, 0.1) + self.assertEqual(head.topk, None) + self.assertEqual(thr_losses.keys(), {'loss'}) + self.assertGreater(thr_losses['loss'].item(), 0) + + # Test with thr and topk are all not None + setup_seed(0) + cfg = copy.deepcopy(self.DEFAULT_ARGS) + cfg['thr'] = 0.1 + cfg['topk'] = 2 + head = MODELS.build(cfg) + thr_topk_losses = head.loss(feats, data_samples) + self.assertEqual(head.thr, 0.1) + self.assertEqual(head.topk, 2) + self.assertEqual(thr_topk_losses.keys(), {'loss'}) + self.assertGreater(thr_topk_losses['loss'].item(), 0) + + # Test with gt_lable with score + data_samples = [ + DataSample().set_gt_score(torch.rand((10, ))) for _ in range(4) + ] + + head = MODELS.build(self.DEFAULT_ARGS) + losses = head.loss(feats, data_samples) + self.assertEqual(head.thr, 0.5) + self.assertEqual(head.topk, None) + self.assertEqual(losses.keys(), {'loss'}) + self.assertGreater(losses['loss'].item(), 0) + + def test_predict(self): + feats = (torch.rand(4, 10), ) + data_samples = [DataSample().set_gt_label([1, 2]) for _ in range(4)] + head = MODELS.build(self.DEFAULT_ARGS) + + # with without data_samples + predictions = head.predict(feats) + self.assertTrue(is_seq_of(predictions, DataSample)) + for pred in predictions: + self.assertIn('pred_label', pred) + self.assertIn('pred_score', pred) + + # with with data_samples + predictions = head.predict(feats, data_samples) + self.assertTrue(is_seq_of(predictions, DataSample)) + for sample, pred in zip(data_samples, predictions): + self.assertIs(sample, pred) + self.assertIn('pred_label', pred) + self.assertIn('pred_score', pred) + + # Test with topk + cfg = copy.deepcopy(self.DEFAULT_ARGS) + cfg['topk'] = 2 + head = MODELS.build(cfg) + predictions = head.predict(feats, data_samples) + self.assertEqual(head.thr, None) + self.assertTrue(is_seq_of(predictions, DataSample)) + for sample, pred in zip(data_samples, predictions): + self.assertIs(sample, pred) + self.assertIn('pred_label', pred) + self.assertIn('pred_score', pred) + + +class EfficientFormerClsHead(TestClsHead): + DEFAULT_ARGS = dict( + type='EfficientFormerClsHead', + in_channels=10, + num_classes=10, + distillation=False) + FAKE_FEATS = (torch.rand(4, 10), ) + + def test_forward(self): + # test with distillation head + cfg = copy.deepcopy(self.DEFAULT_ARGS) + cfg['distillation'] = True + head = MODELS.build(cfg) + self.assertTrue(hasattr(head, 'dist_head')) + feats = (torch.rand(4, 10), torch.rand(4, 10)) + outs = head(feats) + self.assertEqual(outs.shape, (4, 10)) + + # test without distillation head + cfg = copy.deepcopy(self.DEFAULT_ARGS) + head = MODELS.build(cfg) + self.assertFalse(hasattr(head, 'dist_head')) + feats = (torch.rand(4, 10), torch.rand(4, 10)) + outs = head(feats) + self.assertEqual(outs.shape, (4, 10)) + + def test_loss(self): + feats = (torch.rand(4, 10), ) + data_samples = [DataSample().set_gt_label(1) for _ in range(4)] + + # test with distillation head + cfg = copy.deepcopy(self.DEFAULT_ARGS) + cfg['distillation'] = True + head = MODELS.build(cfg) + with self.assertRaisesRegex(NotImplementedError, 'MMPretrain '): + head.loss(feats, data_samples) + + # test without distillation head + super().test_loss() + + +class TestMultiLabelLinearClsHead(TestMultiLabelClsHead): + DEFAULT_ARGS = dict( + type='MultiLabelLinearClsHead', num_classes=10, in_channels=10) + + def test_forward(self): + head = MODELS.build(self.DEFAULT_ARGS) + self.assertTrue(hasattr(head, 'fc')) + self.assertTrue(isinstance(head.fc, torch.nn.Linear)) + + # return the last item (same as pre_logits) + feats = (torch.rand(4, 10), torch.rand(4, 10)) + head(feats) + + +class TestMultiTaskHead(TestCase): + DEFAULT_ARGS = dict( + type='MultiTaskHead', # <- Head config, depends on #675 + task_heads={ + 'task0': dict(type='LinearClsHead', num_classes=3), + 'task1': dict(type='LinearClsHead', num_classes=6), + }, + in_channels=10, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + ) + + DEFAULT_ARGS2 = dict( + type='MultiTaskHead', # <- Head config, depends on #675 + task_heads={ + 'task0': + dict( + type='MultiTaskHead', + task_heads={ + 'task00': dict(type='LinearClsHead', num_classes=3), + 'task01': dict(type='LinearClsHead', num_classes=6), + }), + 'task1': + dict(type='LinearClsHead', num_classes=6) + }, + in_channels=10, + loss=dict(type='CrossEntropyLoss', loss_weight=1.0), + ) + + def test_forward(self): + head = MODELS.build(self.DEFAULT_ARGS) + # return the last item (same as pre_logits) + feats = (torch.rand(4, 10), ) + outs = head(feats) + self.assertEqual(outs['task0'].shape, (4, 3)) + self.assertEqual(outs['task1'].shape, (4, 6)) + self.assertTrue(isinstance(outs, dict)) + + def test_loss(self): + feats = (torch.rand(4, 10), ) + data_samples = [] + + for _ in range(4): + data_sample = MultiTaskDataSample() + for task_name in self.DEFAULT_ARGS['task_heads']: + task_sample = DataSample().set_gt_label(1) + data_sample.set_field(task_sample, task_name) + data_samples.append(data_sample) + # with cal_acc = False + head = MODELS.build(self.DEFAULT_ARGS) + + losses = head.loss(feats, data_samples) + self.assertEqual( + losses.keys(), + {'task0_loss', 'task0_mask_size', 'task1_loss', 'task1_mask_size'}) + self.assertGreater(losses['task0_loss'].item(), 0) + self.assertGreater(losses['task1_loss'].item(), 0) + + def test_predict(self): + feats = (torch.rand(4, 10), ) + data_samples = [] + + for _ in range(4): + data_sample = MultiTaskDataSample() + for task_name in self.DEFAULT_ARGS['task_heads']: + task_sample = DataSample().set_gt_label(1) + data_sample.set_field(task_sample, task_name) + data_samples.append(data_sample) + head = MODELS.build(self.DEFAULT_ARGS) + # without data_samples + predictions = head.predict(feats) + self.assertTrue(is_seq_of(predictions, MultiTaskDataSample)) + for pred in predictions: + self.assertIn('task0', pred) + task0_sample = predictions[0].task0 + self.assertTrue(type(task0_sample.pred_score), 'torch.tensor') + + # with with data_samples + predictions = head.predict(feats, data_samples) + self.assertTrue(is_seq_of(predictions, MultiTaskDataSample)) + for sample, pred in zip(data_samples, predictions): + self.assertIs(sample, pred) + self.assertIn('task0', pred) + + # with data samples and nested + head_nested = MODELS.build(self.DEFAULT_ARGS2) + # adding a None data sample at the beginning + data_samples_nested = [None] + for _ in range(3): + data_sample_nested = MultiTaskDataSample() + data_sample_nested0 = MultiTaskDataSample() + data_sample_nested0.set_field(DataSample().set_gt_label(1), + 'task00') + data_sample_nested0.set_field(DataSample().set_gt_label(1), + 'task01') + data_sample_nested.set_field(data_sample_nested0, 'task0') + data_sample_nested.set_field(DataSample().set_gt_label(1), 'task1') + data_samples_nested.append(data_sample_nested) + + predictions = head_nested.predict(feats, data_samples_nested) + self.assertTrue(is_seq_of(predictions, MultiTaskDataSample)) + for i in range(3): + sample = data_samples_nested[i + 1] + pred = predictions[i + 1] + self.assertIn('task0', pred) + self.assertIn('task1', pred) + self.assertIn('task01', pred.get('task0')) + self.assertEqual( + sample.get('task0').get('task01').gt_label.numpy()[0], 1) + + def test_loss_empty_data_sample(self): + feats = (torch.rand(4, 10), ) + data_samples = [] + + for _ in range(4): + data_sample = MultiTaskDataSample() + data_samples.append(data_sample) + # with cal_acc = False + head = MODELS.build(self.DEFAULT_ARGS) + losses = head.loss(feats, data_samples) + self.assertEqual( + losses.keys(), + {'task0_loss', 'task0_mask_size', 'task1_loss', 'task1_mask_size'}) + self.assertEqual(losses['task0_loss'].item(), 0) + self.assertEqual(losses['task1_loss'].item(), 0) + + def test_nested_multi_task_loss(self): + + head = MODELS.build(self.DEFAULT_ARGS2) + # return the last item (same as pre_logits) + feats = (torch.rand(4, 10), ) + outs = head(feats) + self.assertEqual(outs['task0']['task01'].shape, (4, 6)) + self.assertTrue(isinstance(outs, dict)) + self.assertTrue(isinstance(outs['task0'], dict)) + + def test_nested_invalid_sample(self): + feats = (torch.rand(4, 10), ) + gt_label = {'task0': 1, 'task1': 1} + head = MODELS.build(self.DEFAULT_ARGS2) + data_sample = MultiTaskDataSample() + for task_name in gt_label: + task_sample = DataSample().set_gt_label(gt_label[task_name]) + data_sample.set_field(task_sample, task_name) + with self.assertRaises(Exception): + head.loss(feats, data_sample) + + def test_nested_invalid_sample2(self): + feats = (torch.rand(4, 10), ) + gt_label = {'task0': {'task00': 1, 'task01': 1}, 'task1': 1} + head = MODELS.build(self.DEFAULT_ARGS) + data_sample = MultiTaskDataSample() + task_sample = DataSample().set_gt_label(gt_label['task1']) + data_sample.set_field(task_sample, 'task1') + data_sample.set_field(MultiTaskDataSample(), 'task0') + for task_name in gt_label['task0']: + task_sample = DataSample().set_gt_label( + gt_label['task0'][task_name]) + data_sample.task0.set_field(task_sample, task_name) + with self.assertRaises(Exception): + head.loss(feats, data_sample) + + +class TestArcFaceClsHead(TestCase): + DEFAULT_ARGS = dict(type='ArcFaceClsHead', in_channels=10, num_classes=5) + + def test_initialize(self): + with self.assertRaises(AssertionError): + MODELS.build({**self.DEFAULT_ARGS, 'num_classes': -5}) + + with self.assertRaises(AssertionError): + MODELS.build({**self.DEFAULT_ARGS, 'num_subcenters': 0}) + + # Test margins + with self.assertRaises(AssertionError): + MODELS.build({**self.DEFAULT_ARGS, 'margins': dict()}) + + with self.assertRaises(AssertionError): + MODELS.build({**self.DEFAULT_ARGS, 'margins': [0.1] * 4}) + + with self.assertRaises(AssertionError): + MODELS.build({**self.DEFAULT_ARGS, 'margins': [0.1] * 4 + ['0.1']}) + + arcface = MODELS.build(self.DEFAULT_ARGS) + torch.allclose(arcface.margins, torch.tensor([0.5] * 5)) + + arcface = MODELS.build({**self.DEFAULT_ARGS, 'margins': [0.1] * 5}) + torch.allclose(arcface.margins, torch.tensor([0.1] * 5)) + + margins = [0.1, 0.2, 0.3, 0.4, 5] + with tempfile.TemporaryDirectory() as tmpdirname: + tmp_path = os.path.join(tmpdirname, 'margins.txt') + with open(tmp_path, 'w') as tmp_file: + for m in margins: + tmp_file.write(f'{m}\n') + arcface = MODELS.build({**self.DEFAULT_ARGS, 'margins': tmp_path}) + torch.allclose(arcface.margins, torch.tensor(margins)) + + def test_pre_logits(self): + head = MODELS.build(self.DEFAULT_ARGS) + + # return the last item + feats = (torch.rand(4, 10), torch.rand(4, 10)) + pre_logits = head.pre_logits(feats) + self.assertIs(pre_logits, feats[-1]) + + # Test with SubCenterArcFace + head = MODELS.build({**self.DEFAULT_ARGS, 'num_subcenters': 3}) + feats = (torch.rand(4, 10), torch.rand(4, 10)) + pre_logits = head.pre_logits(feats) + self.assertIs(pre_logits, feats[-1]) + + def test_forward(self): + head = MODELS.build(self.DEFAULT_ARGS) + # target is not None + feats = (torch.rand(4, 10), torch.rand(4, 10)) + target = torch.zeros(4).long() + outs = head(feats, target) + self.assertEqual(outs.shape, (4, 5)) + + # target is None + feats = (torch.rand(4, 10), torch.rand(4, 10)) + outs = head(feats) + self.assertEqual(outs.shape, (4, 5)) + + # Test with SubCenterArcFace + head = MODELS.build({**self.DEFAULT_ARGS, 'num_subcenters': 3}) + # target is not None + feats = (torch.rand(4, 10), torch.rand(4, 10)) + target = torch.zeros(4) + outs = head(feats, target) + self.assertEqual(outs.shape, (4, 5)) + + # target is None + feats = (torch.rand(4, 10), torch.rand(4, 10)) + outs = head(feats) + self.assertEqual(outs.shape, (4, 5)) + + def test_loss(self): + feats = (torch.rand(4, 10), ) + data_samples = [DataSample().set_gt_label(1) for _ in range(4)] + + # test loss with used='before' + head = MODELS.build(self.DEFAULT_ARGS) + losses = head.loss(feats, data_samples) + self.assertEqual(losses.keys(), {'loss'}) + self.assertGreater(losses['loss'].item(), 0) + + # Test with SubCenterArcFace + head = MODELS.build({**self.DEFAULT_ARGS, 'num_subcenters': 3}) + # test loss with used='before' + losses = head.loss(feats, data_samples) + self.assertEqual(losses.keys(), {'loss'}) + self.assertGreater(losses['loss'].item(), 0) diff --git a/tests/test_models/test_losses.py b/tests/test_models/test_losses.py new file mode 100644 index 0000000..a9bd09e --- /dev/null +++ b/tests/test_models/test_losses.py @@ -0,0 +1,403 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmpretrain.models import build_loss + + +def test_asymmetric_loss(): + # test asymmetric_loss + cls_score = torch.Tensor([[5, -5, 0], [5, -5, 0]]) + label = torch.Tensor([[1, 0, 1], [0, 1, 0]]) + weight = torch.tensor([0.5, 0.5]) + + loss_cfg = dict( + type='AsymmetricLoss', + gamma_pos=1.0, + gamma_neg=4.0, + clip=0.05, + reduction='mean', + loss_weight=1.0) + loss = build_loss(loss_cfg) + assert torch.allclose(loss(cls_score, label), torch.tensor(3.80845 / 3)) + + # test asymmetric_loss with weight + assert torch.allclose( + loss(cls_score, label, weight=weight), torch.tensor(3.80845 / 6)) + + # test asymmetric_loss without clip + loss_cfg = dict( + type='AsymmetricLoss', + gamma_pos=1.0, + gamma_neg=4.0, + clip=None, + reduction='mean', + loss_weight=1.0) + loss = build_loss(loss_cfg) + assert torch.allclose(loss(cls_score, label), torch.tensor(5.1186 / 3)) + + # test asymmetric_loss with softmax for single label task + cls_score = torch.Tensor([[5, -5, 0], [5, -5, 0]]) + label = torch.Tensor([0, 1]) + weight = torch.tensor([0.5, 0.5]) + loss_cfg = dict( + type='AsymmetricLoss', + gamma_pos=0.0, + gamma_neg=0.0, + clip=None, + reduction='mean', + loss_weight=1.0, + use_sigmoid=False, + eps=1e-8) + loss = build_loss(loss_cfg) + # test asymmetric_loss for single label task without weight + assert torch.allclose(loss(cls_score, label), torch.tensor(2.5045)) + # test asymmetric_loss for single label task with weight + assert torch.allclose( + loss(cls_score, label, weight=weight), torch.tensor(2.5045 * 0.5)) + + # test soft asymmetric_loss with softmax + cls_score = torch.Tensor([[5, -5, 0], [5, -5, 0]]) + label = torch.Tensor([[1, 0, 0], [0, 1, 0]]) + weight = torch.tensor([0.5, 0.5]) + loss_cfg = dict( + type='AsymmetricLoss', + gamma_pos=0.0, + gamma_neg=0.0, + clip=None, + reduction='mean', + loss_weight=1.0, + use_sigmoid=False, + eps=1e-8) + loss = build_loss(loss_cfg) + # test soft asymmetric_loss with softmax without weight + assert torch.allclose(loss(cls_score, label), torch.tensor(2.5045)) + # test soft asymmetric_loss with softmax with weight + assert torch.allclose( + loss(cls_score, label, weight=weight), torch.tensor(2.5045 * 0.5)) + + +def test_cross_entropy_loss(): + with pytest.raises(AssertionError): + # use_sigmoid and use_soft could not be set simultaneously + loss_cfg = dict( + type='CrossEntropyLoss', use_sigmoid=True, use_soft=True) + loss = build_loss(loss_cfg) + + # test ce_loss + cls_score = torch.Tensor([[-1000, 1000], [100, -100]]) + label = torch.Tensor([0, 1]).long() + class_weight = [0.3, 0.7] # class 0 : 0.3, class 1 : 0.7 + weight = torch.tensor([0.6, 0.4]) + + # test ce_loss without class weight + loss_cfg = dict(type='CrossEntropyLoss', reduction='mean', loss_weight=1.0) + loss = build_loss(loss_cfg) + assert torch.allclose(loss(cls_score, label), torch.tensor(1100.)) + # test ce_loss with weight + assert torch.allclose( + loss(cls_score, label, weight=weight), torch.tensor(640.)) + + # test ce_loss with class weight + loss_cfg = dict( + type='CrossEntropyLoss', + reduction='mean', + loss_weight=1.0, + class_weight=class_weight) + loss = build_loss(loss_cfg) + assert torch.allclose(loss(cls_score, label), torch.tensor(370.)) + # test ce_loss with weight + assert torch.allclose( + loss(cls_score, label, weight=weight), torch.tensor(208.)) + + # test bce_loss + cls_score = torch.Tensor([[-200, 100], [500, -1000], [300, -300]]) + label = torch.Tensor([[1, 0], [0, 1], [1, 0]]) + weight = torch.Tensor([0.6, 0.4, 0.5]) + class_weight = [0.1, 0.9] # class 0: 0.1, class 1: 0.9 + pos_weight = [0.1, 0.2] + + # test bce_loss without class weight + loss_cfg = dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=1.0) + loss = build_loss(loss_cfg) + assert torch.allclose(loss(cls_score, label), torch.tensor(300.)) + # test ce_loss with weight + assert torch.allclose( + loss(cls_score, label, weight=weight), torch.tensor(130.)) + + # test bce_loss with class weight + loss_cfg = dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=1.0, + class_weight=class_weight) + loss = build_loss(loss_cfg) + assert torch.allclose(loss(cls_score, label), torch.tensor(176.667)) + # test bce_loss with weight + assert torch.allclose( + loss(cls_score, label, weight=weight), torch.tensor(74.333)) + + # test bce loss with pos_weight + loss_cfg = dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=1.0, + pos_weight=pos_weight) + loss = build_loss(loss_cfg) + assert torch.allclose(loss(cls_score, label), torch.tensor(136.6667)) + + # test soft_ce_loss + cls_score = torch.Tensor([[-1000, 1000], [100, -100]]) + label = torch.Tensor([[1.0, 0.0], [0.0, 1.0]]) + class_weight = [0.3, 0.7] # class 0 : 0.3, class 1 : 0.7 + weight = torch.tensor([0.6, 0.4]) + + # test soft_ce_loss without class weight + loss_cfg = dict( + type='CrossEntropyLoss', + use_soft=True, + reduction='mean', + loss_weight=1.0) + loss = build_loss(loss_cfg) + assert torch.allclose(loss(cls_score, label), torch.tensor(1100.)) + # test soft_ce_loss with weight + assert torch.allclose( + loss(cls_score, label, weight=weight), torch.tensor(640.)) + + # test soft_ce_loss with class weight + loss_cfg = dict( + type='CrossEntropyLoss', + use_soft=True, + reduction='mean', + loss_weight=1.0, + class_weight=class_weight) + loss = build_loss(loss_cfg) + assert torch.allclose(loss(cls_score, label), torch.tensor(370.)) + # test soft_ce_loss with weight + assert torch.allclose( + loss(cls_score, label, weight=weight), torch.tensor(208.)) + + +def test_focal_loss(): + # test focal_loss + cls_score = torch.Tensor([[5, -5, 0], [5, -5, 0]]) + label = torch.Tensor([[1, 0, 1], [0, 1, 0]]) + weight = torch.tensor([0.5, 0.5]) + + loss_cfg = dict( + type='FocalLoss', + gamma=2.0, + alpha=0.25, + reduction='mean', + loss_weight=1.0) + loss = build_loss(loss_cfg) + assert torch.allclose(loss(cls_score, label), torch.tensor(0.8522)) + # test focal_loss with weight + assert torch.allclose( + loss(cls_score, label, weight=weight), torch.tensor(0.8522 / 2)) + # test focal loss for single label task + cls_score = torch.Tensor([[5, -5, 0], [5, -5, 0]]) + label = torch.Tensor([0, 1]) + weight = torch.tensor([0.5, 0.5]) + assert torch.allclose(loss(cls_score, label), torch.tensor(0.86664125)) + # test focal_loss single label with weight + assert torch.allclose( + loss(cls_score, label, weight=weight), torch.tensor(0.86664125 / 2)) + + +def test_label_smooth_loss(): + # test label_smooth_val assertion + with pytest.raises(AssertionError): + loss_cfg = dict(type='LabelSmoothLoss', label_smooth_val=1.0) + build_loss(loss_cfg) + + with pytest.raises(AssertionError): + loss_cfg = dict(type='LabelSmoothLoss', label_smooth_val='str') + build_loss(loss_cfg) + + # test reduction assertion + with pytest.raises(AssertionError): + loss_cfg = dict( + type='LabelSmoothLoss', label_smooth_val=0.1, reduction='unknown') + build_loss(loss_cfg) + + # test mode assertion + with pytest.raises(AssertionError): + loss_cfg = dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='unknown') + build_loss(loss_cfg) + + # test original mode label smooth loss + cls_score = torch.tensor([[1., -1.]]) + label = torch.tensor([0]) + + loss_cfg = dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='original', + reduction='mean', + loss_weight=1.0) + loss = build_loss(loss_cfg) + correct = 0.2269 # from timm + assert loss(cls_score, label) - correct <= 0.0001 + + loss_cfg = dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='original', + use_sigmoid=True, + reduction='mean', + loss_weight=1.0) + loss = build_loss(loss_cfg) + correct = 0.3633 # from timm + assert loss(cls_score, label) - correct <= 0.0001 + + # test classy_vision mode label smooth loss + loss_cfg = dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='classy_vision', + reduction='mean', + loss_weight=1.0) + loss = build_loss(loss_cfg) + correct = 0.2178 # from ClassyVision + assert loss(cls_score, label) - correct <= 0.0001 + + # test multi_label mode label smooth loss + cls_score = torch.tensor([[1., -1., 1]]) + label = torch.tensor([[1, 0, 1]]) + + loss_cfg = dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='multi_label', + reduction='mean', + loss_weight=1.0) + loss = build_loss(loss_cfg) + smooth_label = torch.tensor([[0.9, 0.1, 0.9]]) + correct = torch.binary_cross_entropy_with_logits(cls_score, + smooth_label).mean() + assert torch.allclose(loss(cls_score, label), correct) + + # test label linear combination smooth loss + cls_score = torch.tensor([[1., -1., 0.]]) + label1 = torch.tensor([[1., 0., 0.]]) + label2 = torch.tensor([[0., 0., 1.]]) + label_mix = label1 * 0.6 + label2 * 0.4 + + loss_cfg = dict( + type='LabelSmoothLoss', + label_smooth_val=0.1, + mode='original', + reduction='mean', + num_classes=3, + loss_weight=1.0) + loss = build_loss(loss_cfg) + smooth_label1 = loss.original_smooth_label(label1) + smooth_label2 = loss.original_smooth_label(label2) + label_smooth_mix = smooth_label1 * 0.6 + smooth_label2 * 0.4 + correct = (-torch.log_softmax(cls_score, -1) * label_smooth_mix).sum() + + assert loss(cls_score, label_mix) - correct <= 0.0001 + + # test label smooth loss with weight + cls_score = torch.tensor([[1., -1.], [1., -1.]]) + label = torch.tensor([0, 1]) + weight = torch.tensor([0.5, 0.5]) + + loss_cfg = dict( + type='LabelSmoothLoss', + reduction='mean', + label_smooth_val=0.1, + loss_weight=1.0) + loss = build_loss(loss_cfg) + assert torch.allclose( + loss(cls_score, label, weight=weight), + loss(cls_score, label) / 2) + + +# migrate from mmdetection with modifications +def test_seesaw_loss(): + # only softmax version of Seesaw Loss is implemented + with pytest.raises(AssertionError): + loss_cfg = dict(type='SeesawLoss', use_sigmoid=True, loss_weight=1.0) + build_loss(loss_cfg) + + # test that cls_score.size(-1) == num_classes + loss_cls_cfg = dict( + type='SeesawLoss', p=0.0, q=0.0, loss_weight=1.0, num_classes=2) + loss_cls = build_loss(loss_cls_cfg) + # the length of fake_pred should be num_classe = 4 + with pytest.raises(AssertionError): + fake_pred = torch.Tensor([[-100, 100, -100]]) + fake_label = torch.Tensor([1]).long() + loss_cls(fake_pred, fake_label) + # the length of fake_pred should be num_classes + 2 = 4 + with pytest.raises(AssertionError): + fake_pred = torch.Tensor([[-100, 100, -100, 100]]) + fake_label = torch.Tensor([1]).long() + loss_cls(fake_pred, fake_label) + + # test the calculation without p and q + loss_cls_cfg = dict( + type='SeesawLoss', p=0.0, q=0.0, loss_weight=1.0, num_classes=2) + loss_cls = build_loss(loss_cls_cfg) + fake_pred = torch.Tensor([[-100, 100]]) + fake_label = torch.Tensor([1]).long() + loss = loss_cls(fake_pred, fake_label) + assert torch.allclose(loss, torch.tensor(0.)) + + # test the calculation with p and without q + loss_cls_cfg = dict( + type='SeesawLoss', p=1.0, q=0.0, loss_weight=1.0, num_classes=2) + loss_cls = build_loss(loss_cls_cfg) + fake_pred = torch.Tensor([[-100, 100]]) + fake_label = torch.Tensor([0]).long() + loss_cls.cum_samples[0] = torch.exp(torch.Tensor([20])) + loss = loss_cls(fake_pred, fake_label) + assert torch.allclose(loss, torch.tensor(180.)) + + # test the calculation with q and without p + loss_cls_cfg = dict( + type='SeesawLoss', p=0.0, q=1.0, loss_weight=1.0, num_classes=2) + loss_cls = build_loss(loss_cls_cfg) + fake_pred = torch.Tensor([[-100, 100]]) + fake_label = torch.Tensor([0]).long() + loss = loss_cls(fake_pred, fake_label) + assert torch.allclose(loss, torch.tensor(200.) + torch.tensor(100.).log()) + + +def test_reconstruction_loss(): + + # test L2 loss + loss_config = dict(type='PixelReconstructionLoss', criterion='L2') + loss = build_loss(loss_config) + + fake_pred = torch.rand((2, 196, 768)) + fake_target = torch.rand((2, 196, 768)) + fake_mask = torch.ones((2, 196)) + loss_value = loss(fake_pred, fake_target, fake_mask) + + assert isinstance(loss_value.item(), float) + + # test L1 loss + loss_config = dict( + type='PixelReconstructionLoss', criterion='L1', channel=3) + loss = build_loss(loss_config) + + fake_pred = torch.rand((2, 3, 192, 192)) + fake_target = torch.rand((2, 3, 192, 192)) + fake_mask = torch.ones((2, 1, 192, 192)) + loss_value = loss(fake_pred, fake_target, fake_mask) + + assert isinstance(loss_value.item(), float) + + with pytest.raises(NotImplementedError): + loss_config = dict(type='PixelReconstructionLoss', criterion='L3') + loss = build_loss(loss_config) diff --git a/tests/test_models/test_models.py b/tests/test_models/test_models.py new file mode 100644 index 0000000..29e0877 --- /dev/null +++ b/tests/test_models/test_models.py @@ -0,0 +1,95 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from dataclasses import dataclass + +import pytest +import torch + +import mmpretrain.models +from mmpretrain.apis import ModelHub, get_model + + +@dataclass +class Cfg: + name: str + backbone: type + num_classes: int = 1000 + build: bool = True + forward: bool = True + backward: bool = True + extract_feat: bool = True + input_shape: tuple = (1, 3, 224, 224) + + +test_list = [ + Cfg(name='xcit-small-12-p16_3rdparty_in1k', + backbone=mmpretrain.models.XCiT), + Cfg(name='xcit-nano-12-p8_3rdparty-dist_in1k-384px', + backbone=mmpretrain.models.XCiT, + input_shape=(1, 3, 384, 384)), + Cfg(name='vit-base-p16_sam-pre_3rdparty_sa1b-1024px', + backbone=mmpretrain.models.ViTSAM, + forward=False, + backward=False), + Cfg(name='vit-base-p14_dinov2-pre_3rdparty', + backbone=mmpretrain.models.VisionTransformer, + forward=False, + backward=False), + Cfg(name='hivit-tiny-p16_16xb64_in1k', backbone=mmpretrain.models.HiViT), +] + + +@pytest.mark.parametrize('cfg', test_list) +def test_build(cfg: Cfg): + if not cfg.build: + return + + model_name = cfg.name + ModelHub._register_mmpretrain_models() + assert ModelHub.has(model_name) + + model = get_model(model_name) + backbone_class = cfg.backbone + assert isinstance(model.backbone, backbone_class) + + +@pytest.mark.parametrize('cfg', test_list) +def test_forward(cfg: Cfg): + if not cfg.forward: + return + + model = get_model(cfg.name) + inputs = torch.rand(*cfg.input_shape) + outputs = model(inputs) + assert outputs.shape == (1, cfg.num_classes) + + +@pytest.mark.parametrize('cfg', test_list) +def test_extract_feat(cfg: Cfg): + if not cfg.extract_feat: + return + + model = get_model(cfg.name) + inputs = torch.rand(*cfg.input_shape) + feats = model.extract_feat(inputs) + assert isinstance(feats, tuple) + assert len(feats) == 1 + + +@pytest.mark.parametrize('cfg', test_list) +def test_backward(cfg: Cfg): + if not cfg.backward: + return + + model = get_model(cfg.name) + inputs = torch.rand(*cfg.input_shape) + outputs = model(inputs) + outputs.mean().backward() + + for n, x in model.named_parameters(): + assert x.grad is not None, f'No gradient for {n}' + num_grad = sum( + [x.grad.numel() for x in model.parameters() if x.grad is not None]) + assert outputs.shape[-1] == cfg.num_classes + num_params = sum([x.numel() for x in model.parameters()]) + assert num_params == num_grad, 'Some parameters are missing gradients' + assert not torch.isnan(outputs).any(), 'Output included NaNs' diff --git a/tests/test_models/test_necks.py b/tests/test_models/test_necks.py new file mode 100644 index 0000000..f245c4a --- /dev/null +++ b/tests/test_models/test_necks.py @@ -0,0 +1,179 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmpretrain.models.necks import (GeneralizedMeanPooling, + GlobalAveragePooling, HRFuseScales, + LinearNeck) + + +def test_gap_neck(): + + # test 1d gap_neck + neck = GlobalAveragePooling(dim=1) + # batch_size, num_features, feature_size + fake_input = torch.rand(1, 16, 24) + + output = neck(fake_input) + # batch_size, num_features + assert output.shape == (1, 16) + + # test 1d gap_neck + neck = GlobalAveragePooling(dim=2) + # batch_size, num_features, feature_size(2) + fake_input = torch.rand(1, 16, 24, 24) + + output = neck(fake_input) + # batch_size, num_features + assert output.shape == (1, 16) + + # test 1d gap_neck + neck = GlobalAveragePooling(dim=3) + # batch_size, num_features, feature_size(3) + fake_input = torch.rand(1, 16, 24, 24, 5) + + output = neck(fake_input) + # batch_size, num_features + assert output.shape == (1, 16) + + with pytest.raises(AssertionError): + # dim must in [1, 2, 3] + GlobalAveragePooling(dim='other') + + +def test_gem_neck(): + + # test gem_neck + neck = GeneralizedMeanPooling() + + # default p is trainable + assert neck.p.requires_grad + + # batch_size, num_features, feature_size(2) + fake_input = torch.rand(1, 16, 24, 24) + + output = neck(fake_input) + # batch_size, num_features + assert output.shape == (1, 16) + + # test tuple input gem_neck + neck = GeneralizedMeanPooling() + # batch_size, num_features, feature_size(2) + fake_input = (torch.rand(1, 8, 24, 24), torch.rand(1, 16, 24, 24)) + + output = neck(fake_input) + # batch_size, num_features + assert output[0].shape == (1, 8) + assert output[1].shape == (1, 16) + + # test gem_neck with p_trainable=False + neck = GeneralizedMeanPooling(p_trainable=False) + + # p is not trainable + assert not neck.p.requires_grad + + # batch_size, num_features, feature_size(2) + fake_input = torch.rand(1, 16, 24, 24) + + output = neck(fake_input) + # batch_size, num_features + assert output.shape == (1, 16) + + with pytest.raises(AssertionError): + # p must be a value greater then 1 + GeneralizedMeanPooling(p=0.5) + + +def test_hr_fuse_scales(): + + in_channels = (18, 32, 64, 128) + neck = HRFuseScales(in_channels=in_channels, out_channels=1024) + + feat_size = 56 + inputs = [] + for in_channel in in_channels: + input_tensor = torch.rand(3, in_channel, feat_size, feat_size) + inputs.append(input_tensor) + feat_size = feat_size // 2 + + with pytest.raises(AssertionError): + neck(inputs) + + outs = neck(tuple(inputs)) + assert isinstance(outs, tuple) + assert len(outs) == 1 + assert outs[0].shape == (3, 1024, 7, 7) + + +def test_linear_reduction(): + # test linear_reduction without `act_cfg` and `norm_cfg` + neck = LinearNeck(10, 5, 0, None, None) + neck.eval() + assert isinstance(neck.gap, torch.nn.Identity) + assert isinstance(neck.act, torch.nn.Identity) + assert isinstance(neck.norm, torch.nn.Identity) + + # batch_size, in_channels, out_channels + fake_input = torch.rand(1, 10) + output = neck(fake_input) + # batch_size, out_features + assert output[-1].shape == (1, 5) + + # batch_size, in_features, feature_size(2) + fake_input = (torch.rand(1, 20), torch.rand(1, 10)) + + output = neck(fake_input) + # batch_size, out_features + assert output[-1].shape == (1, 5) + + # batch_size, in_channels, out_channels, gap_dim + neck = LinearNeck(10, 5, 1, None, None) + fake_input = torch.rand(1, 10, 10) + output = neck(fake_input) + # batch_size, out_features + assert output[-1].shape == (1, 5) + + # batch_size, in_channels, out_channels, gap_dim + neck = LinearNeck(10, 5, 2, None, None) + fake_input = torch.rand(1, 10, 10, 10) + output = neck(fake_input) + # batch_size, out_features + assert output[-1].shape == (1, 5) + + # batch_size, in_channels, out_channels, gap_dim + neck = LinearNeck(10, 5, 3, None, None) + fake_input = torch.rand(1, 10, 10, 10, 10) + output = neck(fake_input) + # batch_size, out_features + assert output[-1].shape == (1, 5) + + # batch_size, in_channels, out_channels, gap_dim + with pytest.raises(AssertionError): + neck = LinearNeck(10, 5, None, None, None) + + # test linear_reduction with `init_cfg` + neck = LinearNeck(10, 5, init_cfg=dict(type='Xavier', layer=['Linear'])) + + # test linear_reduction with `act_cfg` and `norm_cfg` + neck = LinearNeck( + 10, 5, act_cfg=dict(type='ReLU'), norm_cfg=dict(type='BN1d')) + neck.eval() + + assert isinstance(neck.act, torch.nn.ReLU) + assert isinstance(neck.norm, torch.nn.BatchNorm1d) + + # batch_size, in_channels, out_channels + fake_input = torch.rand(1, 10) + output = neck(fake_input) + # batch_size, out_features + assert output[-1].shape == (1, 5) + # + # # batch_size, in_features, feature_size(2) + fake_input = (torch.rand(1, 20), torch.rand(1, 10)) + + output = neck(fake_input) + # batch_size, out_features + assert output[-1].shape == (1, 5) + + with pytest.raises(AssertionError): + neck([]) diff --git a/tests/test_models/test_peft/test_lora.py b/tests/test_models/test_peft/test_lora.py new file mode 100644 index 0000000..d148538 --- /dev/null +++ b/tests/test_models/test_peft/test_lora.py @@ -0,0 +1,122 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import re + +import pytest +from mmengine.utils import digit_version +from mmengine.utils.dl_utils import TORCH_VERSION + +from mmpretrain.models.peft import LoRAModel + + +@pytest.mark.skipif( + digit_version(TORCH_VERSION) < digit_version('1.9.0'), + reason='get_submodule requires torch >= 1.9.0') +def test_lora_backbone(): + module = dict( + type='VisionTransformer', + arch='base', + img_size=224, + patch_size=16, + drop_path_rate=0.1, + out_type='avg_featmap', + final_norm=False) + + lora_cfg = dict( + module=module, + alpha=1, + rank=4, + drop_rate=0.1, + targets=[ + dict(type='qkv'), + dict(type='.*proj', alpha=2, rank=2, drop_rate=0.2), + ]) + + lora_model = LoRAModel(**lora_cfg) + + # test replace module + for name, module in lora_model.named_modules(): + if name.endswith('qkv'): + assert module.scaling == 0.25 + if re.fullmatch('.*proj', name): + assert module.scaling == 1 + + # test freeze module + for name, param in lora_model.named_parameters(): + if 'lora_' in name: + assert param.requires_grad + else: + assert not param.requires_grad + + # test get state dict + state_dict = lora_model.state_dict() + assert len(state_dict) != 0 + for name, param in state_dict.items(): + assert 'lora_' in name + + # test load state dict + incompatible_keys = lora_model.load_state_dict(state_dict, strict=True) + assert str(incompatible_keys) == '' + + +@pytest.mark.skipif( + digit_version(TORCH_VERSION) < digit_version('1.9.0'), + reason='get_submodule requires torch >= 1.9.0') +def test_lora_model(): + module = dict( + type='MAE', + backbone=dict(type='MAEViT', arch='b', patch_size=16, mask_ratio=0.75), + neck=dict( + type='MAEPretrainDecoder', + patch_size=16, + in_chans=3, + embed_dim=768, + decoder_embed_dim=512, + decoder_depth=8, + decoder_num_heads=16, + mlp_ratio=4., + ), + head=dict( + type='MAEPretrainHead', + norm_pix=True, + patch_size=16, + loss=dict(type='PixelReconstructionLoss', criterion='L2')), + init_cfg=[ + dict(type='Xavier', layer='Linear', distribution='uniform'), + dict(type='Constant', layer='LayerNorm', val=1.0, bias=0.0) + ]) + + lora_cfg = dict( + module=module, + alpha=1, + rank=4, + drop_rate=0.1, + targets=[ + dict(type='qkv'), + dict(type='.*proj', alpha=2, rank=2, drop_rate=0.2), + ]) + + lora_model = LoRAModel(**lora_cfg) + + # test replace module + for name, module in lora_model.named_modules(): + if name.endswith('qkv'): + assert module.scaling == 0.25 + if re.fullmatch('.*proj', name): + assert module.scaling == 1 + + # test freeze module + for name, param in lora_model.named_parameters(): + if 'lora_' in name: + assert param.requires_grad + else: + assert not param.requires_grad + + # test get state dict + state_dict = lora_model.state_dict() + assert len(state_dict) != 0 + for name, param in state_dict.items(): + assert 'lora_' in name + + # test load state dict + incompatible_keys = lora_model.load_state_dict(state_dict, strict=True) + assert str(incompatible_keys) == '' diff --git a/tests/test_models/test_retrievers.py b/tests/test_models/test_retrievers.py new file mode 100644 index 0000000..0e7d9dc --- /dev/null +++ b/tests/test_models/test_retrievers.py @@ -0,0 +1,273 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os +import tempfile +from typing import Callable +from unittest import TestCase +from unittest.mock import MagicMock + +import numpy as np +import torch +from mmengine import ConfigDict +from mmengine.dataset.utils import default_collate +from torch.utils.data import DataLoader, Dataset + +from mmpretrain.datasets.transforms import PackInputs +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample + + +class ExampleDataset(Dataset): + + def __init__(self): + self.metainfo = None + self.pipe = PackInputs() + + def __getitem__(self, idx): + results = dict( + img=np.random.random((64, 64, 3)), meta=dict(sampleidx=idx)) + + return self.pipe(results) + + def __len__(self): + return 10 + + +class TestImageToImageRetriever(TestCase): + DEFAULT_ARGS = dict( + type='ImageToImageRetriever', + image_encoder=[ + dict(type='ResNet', depth=18, out_indices=(3, )), + dict(type='GlobalAveragePooling'), + ], + head=dict( + type='LinearClsHead', + num_classes=10, + in_channels=512, + loss=dict(type='CrossEntropyLoss')), + prototype=torch.rand((10, 512)), + ) + + def test_initialize(self): + # test error prototype type + cfg = {**self.DEFAULT_ARGS, 'prototype': 5} + with self.assertRaises(AssertionError): + model = MODELS.build(cfg) + + # test prototype is tensor + model = MODELS.build(self.DEFAULT_ARGS) + self.assertEqual(type(model.prototype), torch.Tensor) + self.assertFalse(model.prototype_inited) + self.assertIsInstance(model.similarity_fn, Callable) + self.assertEqual(model.topk, -1) + + # test prototype is str + cfg = {**self.DEFAULT_ARGS, 'prototype': './proto.pth'} + model = MODELS.build(cfg) + self.assertEqual(type(model.prototype), str) + + # test prototype is dict + lodaer = DataLoader(ExampleDataset()) + cfg = {**self.DEFAULT_ARGS, 'prototype': lodaer} + model = MODELS.build(cfg) + self.assertEqual(type(model.prototype), DataLoader) + + # test prototype is dataloader + loader_cfg = dict( + batch_size=16, + num_workers=2, + dataset=dict( + type='CIFAR100', + data_prefix='data/cifar100', + test_mode=False, + pipeline=[]), + sampler=dict(type='DefaultSampler', shuffle=True), + persistent_workers=True) + cfg = {**self.DEFAULT_ARGS, 'prototype': loader_cfg} + model = MODELS.build(cfg) + self.assertEqual(type(model.prototype), dict) + + # test similarity function + self.assertEqual(model.similarity, 'cosine_similarity') + + def fn(a, b): + return a * b + + cfg = {**self.DEFAULT_ARGS, 'similarity_fn': fn} + model = MODELS.build(cfg) + self.assertEqual(model.similarity, fn) + self.assertIsInstance(model.similarity_fn, Callable) + + # test set batch augmentation from train_cfg + cfg = { + **self.DEFAULT_ARGS, 'train_cfg': + dict(augments=dict( + type='Mixup', + alpha=1., + )) + } + model = MODELS.build(cfg) + + self.assertIsNotNone(model.data_preprocessor.batch_augments) + + cfg = {**self.DEFAULT_ARGS, 'train_cfg': dict()} + model = MODELS.build(cfg) + self.assertIsNone(model.data_preprocessor.batch_augments) + + def test_extract_feat(self): + inputs = torch.rand(1, 3, 64, 64) + cfg = ConfigDict(self.DEFAULT_ARGS) + model = MODELS.build(cfg) + + # test extract_feat + feats = model.extract_feat(inputs) + self.assertEqual(len(feats), 1) + self.assertEqual(feats[0].shape, (1, 512)) + + def test_loss(self): + inputs = torch.rand(1, 3, 64, 64) + data_samples = [DataSample().set_gt_label(1)] + + model = MODELS.build(self.DEFAULT_ARGS) + losses = model.loss(inputs, data_samples) + self.assertGreater(losses['loss'].item(), 0) + + def test_prepare_prototype(self): + tmpdir = tempfile.TemporaryDirectory() + # tensor + cfg = {**self.DEFAULT_ARGS} + model = MODELS.build(cfg) + model.prepare_prototype() + self.assertEqual(type(model.prototype_vecs), torch.Tensor) + self.assertEqual(model.prototype_vecs.shape, (10, 512)) + self.assertTrue(model.prototype_inited) + + # test dump prototype + ori_proto_vecs = model.prototype_vecs + save_path = os.path.join(tmpdir.name, 'proto.pth') + model.dump_prototype(save_path) + + # Check whether the saved feature exists + feat = torch.load(save_path) + self.assertEqual(feat.shape, (10, 512)) + + # str + cfg = {**self.DEFAULT_ARGS, 'prototype': save_path} + model = MODELS.build(cfg) + model.prepare_prototype() + self.assertEqual(type(model.prototype_vecs), torch.Tensor) + self.assertEqual(model.prototype_vecs.shape, (10, 512)) + self.assertTrue(model.prototype_inited) + torch.allclose(ori_proto_vecs, model.prototype_vecs) + + # dict + lodaer = DataLoader(ExampleDataset(), collate_fn=default_collate) + cfg = {**self.DEFAULT_ARGS, 'prototype': lodaer} + model = MODELS.build(cfg) + model.prepare_prototype() + self.assertEqual(type(model.prototype_vecs), torch.Tensor) + self.assertEqual(model.prototype_vecs.shape, (10, 512)) + self.assertTrue(model.prototype_inited) + + tmpdir.cleanup() + + def test_predict(self): + inputs = torch.rand(1, 3, 64, 64) + data_samples = [DataSample().set_gt_label([1, 2, 6])] + # default + model = MODELS.build(self.DEFAULT_ARGS) + predictions = model.predict(inputs) + self.assertEqual(predictions[0].pred_score.shape, (10, )) + + predictions = model.predict(inputs, data_samples) + self.assertEqual(predictions[0].pred_score.shape, (10, )) + self.assertEqual(data_samples[0].pred_score.shape, (10, )) + torch.testing.assert_allclose(data_samples[0].pred_score, + predictions[0].pred_score) + + # k is not -1 + cfg = {**self.DEFAULT_ARGS, 'topk': 2} + model = MODELS.build(cfg) + + predictions = model.predict(inputs) + self.assertEqual(predictions[0].pred_score.shape, (10, )) + + predictions = model.predict(inputs, data_samples) + assert predictions is data_samples + self.assertEqual(data_samples[0].pred_score.shape, (10, )) + + def test_forward(self): + inputs = torch.rand(1, 3, 64, 64) + data_samples = [DataSample().set_gt_label(1)] + model = MODELS.build(self.DEFAULT_ARGS) + + # test pure forward + outs = model(inputs) + # assert False, type(outs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + self.assertIsInstance(outs[0], torch.Tensor) + + # test forward train + losses = model(inputs, data_samples, mode='loss') + self.assertGreater(losses['loss'].item(), 0) + + # test forward test + predictions = model(inputs, mode='predict') + self.assertEqual(predictions[0].pred_score.shape, (10, )) + + predictions = model(inputs, data_samples, mode='predict') + self.assertEqual(predictions[0].pred_score.shape, (10, )) + self.assertEqual(data_samples[0].pred_score.shape, (10, )) + torch.testing.assert_allclose(data_samples[0].pred_score, + predictions[0].pred_score) + + # test forward with invalid mode + with self.assertRaisesRegex(RuntimeError, 'Invalid mode "unknown"'): + model(inputs, mode='unknown') + + def test_train_step(self): + cfg = { + **self.DEFAULT_ARGS, 'data_preprocessor': + dict(mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5]) + } + model = MODELS.build(cfg) + + data = { + 'inputs': torch.randint(0, 256, (1, 3, 64, 64)), + 'data_samples': [DataSample().set_gt_label(1)] + } + + optim_wrapper = MagicMock() + log_vars = model.train_step(data, optim_wrapper) + self.assertIn('loss', log_vars) + optim_wrapper.update_params.assert_called_once() + + def test_val_step(self): + cfg = { + **self.DEFAULT_ARGS, 'data_preprocessor': + dict(mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5]) + } + model = MODELS.build(cfg) + + data = { + 'inputs': torch.randint(0, 256, (1, 3, 64, 64)), + 'data_samples': [DataSample().set_gt_label(1)] + } + + predictions = model.val_step(data) + self.assertEqual(predictions[0].pred_score.shape, (10, )) + + def test_test_step(self): + cfg = { + **self.DEFAULT_ARGS, 'data_preprocessor': + dict(mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5]) + } + model = MODELS.build(cfg) + + data = { + 'inputs': torch.randint(0, 256, (1, 3, 64, 64)), + 'data_samples': [DataSample().set_gt_label(1)] + } + + predictions = model.test_step(data) + self.assertEqual(predictions[0].pred_score.shape, (10, )) diff --git a/tests/test_models/test_selfsup/test_barlowtwins.py b/tests/test_models/test_selfsup/test_barlowtwins.py new file mode 100644 index 0000000..72502af --- /dev/null +++ b/tests/test_models/test_selfsup/test_barlowtwins.py @@ -0,0 +1,49 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import platform + +import pytest +import torch + +from mmpretrain.models import BarlowTwins +from mmpretrain.structures import DataSample + + +@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit') +def test_barlowtwins(): + data_preprocessor = { + 'mean': (123.675, 116.28, 103.53), + 'std': (58.395, 57.12, 57.375), + 'to_rgb': True + } + backbone = dict(type='ResNet', depth=18, norm_cfg=dict(type='BN')) + neck = dict( + type='NonLinearNeck', + in_channels=512, + hid_channels=2, + out_channels=2, + num_layers=3, + with_last_bn=False, + with_last_bn_affine=False, + with_avg_pool=True, + norm_cfg=dict(type='BN1d')) + head = dict( + type='LatentCrossCorrelationHead', + in_channels=2, + loss=dict(type='CrossCorrelationLoss')) + + alg = BarlowTwins( + backbone=backbone, + neck=neck, + head=head, + data_preprocessor=data_preprocessor) + + fake_data = { + 'inputs': + [torch.randn((2, 3, 224, 224)), + torch.randn((2, 3, 224, 224))], + 'data_sample': [DataSample() for _ in range(2)] + } + + fake_inputs = alg.data_preprocessor(fake_data) + fake_loss = alg(**fake_inputs, mode='loss') + assert isinstance(fake_loss['loss'].item(), float) diff --git a/tests/test_models/test_selfsup/test_beit.py b/tests/test_models/test_selfsup/test_beit.py new file mode 100644 index 0000000..7fbd5a7 --- /dev/null +++ b/tests/test_models/test_selfsup/test_beit.py @@ -0,0 +1,169 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import platform +from unittest import TestCase + +import pytest +import torch + +from mmpretrain.models import BEiT, BEiTPretrainViT +from mmpretrain.structures import DataSample + + +class TestBEiT(TestCase): + + @pytest.mark.skipif( + platform.system() == 'Windows', reason='Windows mem limit') + def test_beit_pretrain_vit(self): + backbone = dict( + arch='base', + patch_size=16, + drop_path_rate=0.1, + final_norm=True, + layer_scale_init_value=0.1, + ) + + beit_backbone = BEiTPretrainViT(**backbone) + beit_backbone.init_weights() + + fake_inputs = torch.randn((2, 3, 224, 224)) + fake_mask = torch.zeros((2, 196)) + fake_mask[:, 75:150] = 1 + + # test with mask + fake_outputs = beit_backbone(fake_inputs, fake_mask) + assert fake_outputs[0].shape == torch.Size([2, 197, 768]) + + # test without mask + fake_outputs = beit_backbone(fake_inputs, None) + assert fake_outputs[0].shape == torch.Size([2, 197, 768]) + + @pytest.mark.skipif( + platform.system() == 'Windows', reason='Windows mem limit') + def test_beitv1(self): + data_preprocessor = dict( + type='TwoNormDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + second_mean=[-31.875, -31.875, -31.875], + second_std=[318.75, 318.75, 318.75], + to_rgb=True) + + # model settings + backbone = dict( + type='BEiTPretrainViT', + arch='base', + patch_size=16, + drop_path_rate=0.1, + final_norm=True, + layer_scale_init_value=0.1) + neck = None + head = dict( + type='BEiTV1Head', + embed_dims=768, + num_embed=8192, + loss=dict(type='CrossEntropyLoss')) + target_generator = dict(type='DALL-E') + + # build model + model = BEiT( + backbone=backbone, + neck=neck, + head=head, + target_generator=target_generator, + data_preprocessor=data_preprocessor) + + fake_img = torch.rand((1, 3, 224, 224)) + fake_target_img = torch.rand((1, 3, 112, 112)) + fake_mask = torch.zeros((196)).bool() + fake_mask[75:150] = 1 + fake_data_sample = DataSample() + fake_data_sample.set_mask(fake_mask) + fake_data = { + 'inputs': [fake_img, fake_target_img], + 'data_samples': [fake_data_sample] + } + + fake_inputs = model.data_preprocessor(fake_data) + fake_outputs = model(**fake_inputs, mode='loss') + assert isinstance(fake_outputs['loss'].item(), float) + + @pytest.mark.skipif( + platform.system() == 'Windows', reason='Windows mem limit') + def test_beitv2(self): + data_preprocessor = dict( + type='TwoNormDataPreprocessor', + mean=(123.675, 116.28, 103.53), + std=(58.395, 57.12, 57.375), + second_mean=(127.5, 127.5, 127.5), + second_std=(127.5, 127.5, 127.5), + to_rgb=True) + + # model settings + vqkd_encoder = dict( + arch='base', + img_size=224, + patch_size=16, + in_channels=3, + out_indices=-1, + drop_rate=0., + drop_path_rate=0., + norm_cfg=dict(type='LN', eps=1e-6), + final_norm=True, + out_type='featmap', + with_cls_token=True, + frozen_stages=-1, + use_abs_pos_emb=True, + use_rel_pos_bias=False, + use_shared_rel_pos_bias=False, + layer_scale_init_value=0., + interpolate_mode='bicubic', + patch_cfg=dict(), + layer_cfgs=dict(), + init_cfg=None) + + layer_scale_init_value = 0.1 + drop_path_rate = 0. # 0. for 300 epochs and 0.1 for 1600 epochs. + backbone = dict( + type='BEiTPretrainViT', + arch='base', + patch_size=16, + out_indices=[-4, -1], + drop_path_rate=drop_path_rate, + final_norm=False, + layer_scale_init_value=layer_scale_init_value) + neck = dict( + type='BEiTV2Neck', + num_layers=1, + early_layers=9, + backbone_arch='base', + drop_path_rate=drop_path_rate, + layer_scale_init_value=layer_scale_init_value) + head = dict( + type='BEiTV2Head', + embed_dims=768, + num_embed=8192, + loss=dict(type='CrossEntropyLoss')) + target_generator = dict(type='VQKD', encoder_config=vqkd_encoder) + + model = BEiT( + backbone=backbone, + neck=neck, + head=head, + target_generator=target_generator, + data_preprocessor=data_preprocessor) + + fake_img = torch.rand((1, 3, 224, 224)) + fake_target_img = torch.rand((1, 3, 224, 224)) + fake_mask = torch.zeros((196)).bool() + fake_mask[75:150] = 1 + fake_data_sample = DataSample() + fake_data_sample.set_mask(fake_mask) + fake_data = { + 'inputs': [fake_img, fake_target_img], + 'data_samples': [fake_data_sample] + } + + fake_inputs = model.data_preprocessor(fake_data) + fake_outputs = model(**fake_inputs, mode='loss') + assert isinstance(fake_outputs['loss_1'].item(), float) + assert isinstance(fake_outputs['loss_2'].item(), float) diff --git a/tests/test_models/test_selfsup/test_byol.py b/tests/test_models/test_selfsup/test_byol.py new file mode 100644 index 0000000..59d9d87 --- /dev/null +++ b/tests/test_models/test_selfsup/test_byol.py @@ -0,0 +1,59 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import platform + +import pytest +import torch + +from mmpretrain.models import BYOL +from mmpretrain.structures import DataSample + + +@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit') +def test_byol(): + data_preprocessor = dict( + mean=(123.675, 116.28, 103.53), + std=(58.395, 57.12, 57.375), + to_rgb=True) + backbone = dict(type='ResNet', depth=18, norm_cfg=dict(type='BN')) + neck = dict( + type='NonLinearNeck', + in_channels=512, + hid_channels=2, + out_channels=2, + with_bias=True, + with_last_bn=False, + with_avg_pool=True, + norm_cfg=dict(type='BN1d')) + head = dict( + type='LatentPredictHead', + loss=dict(type='CosineSimilarityLoss'), + predictor=dict( + type='NonLinearNeck', + in_channels=2, + hid_channels=2, + out_channels=2, + with_bias=True, + with_last_bn=False, + with_avg_pool=False, + norm_cfg=dict(type='BN1d'))) + + alg = BYOL( + backbone=backbone, + neck=neck, + head=head, + data_preprocessor=data_preprocessor) + + fake_data = { + 'inputs': + [torch.randn((2, 3, 224, 224)), + torch.randn((2, 3, 224, 224))], + 'data_samples': [DataSample() for _ in range(2)] + } + fake_inputs = alg.data_preprocessor(fake_data) + + fake_loss = alg(**fake_inputs, mode='loss') + assert isinstance(fake_loss['loss'].item(), float) + assert fake_loss['loss'].item() > -4 + + fake_feats = alg(fake_inputs['inputs'][0], mode='tensor') + assert list(fake_feats[0].shape) == [2, 512, 7, 7] diff --git a/tests/test_models/test_selfsup/test_cae.py b/tests/test_models/test_selfsup/test_cae.py new file mode 100644 index 0000000..3c9127d --- /dev/null +++ b/tests/test_models/test_selfsup/test_cae.py @@ -0,0 +1,78 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import platform + +import pytest +import torch + +from mmpretrain.models import CAE, CAEPretrainViT +from mmpretrain.structures import DataSample + + +@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit') +def test_cae_vit(): + backbone = dict( + arch='deit-tiny', patch_size=16, layer_scale_init_value=0.1) + + cae_backbone = CAEPretrainViT(**backbone) + cae_backbone.init_weights() + fake_inputs = torch.randn((1, 3, 224, 224)) + fake_mask = torch.zeros((1, 196)).bool() + fake_mask[:, 75:150] = 1 + + # test with mask + fake_outputs = cae_backbone(fake_inputs, fake_mask) + assert list(fake_outputs.shape) == [1, 122, 192] + + # test without mask + fake_outputs = cae_backbone(fake_inputs, None) + assert fake_outputs[0].shape == torch.Size([1, 197, 192]) + + +@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit') +def test_cae(): + data_preprocessor = dict( + type='TwoNormDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + second_mean=[-31.875, -31.875, -31.875], + second_std=[318.75, 318.75, 318.75], + to_rgb=True) + + # model settings + backbone = dict( + type='CAEPretrainViT', + arch='deit-tiny', + patch_size=16, + layer_scale_init_value=0.1) + neck = dict( + type='CAENeck', + embed_dims=192, + num_heads=12, + regressor_depth=4, + decoder_depth=4, + mlp_ratio=4, + layer_scale_init_value=0.1) + head = dict(type='CAEHead', loss=dict(type='CAELoss', lambd=2)) + target_generator = dict(type='DALL-E') + + model = CAE( + backbone=backbone, + neck=neck, + head=head, + target_generator=target_generator, + data_preprocessor=data_preprocessor) + + fake_img = torch.rand((1, 3, 224, 224)) + fake_target_img = torch.rand((1, 3, 112, 112)) + fake_mask = torch.zeros((196)).bool() + fake_mask[75:150] = 1 + fake_data_sample = DataSample() + fake_data_sample.set_mask(fake_mask) + fake_data = { + 'inputs': [fake_img, fake_target_img], + 'data_samples': [fake_data_sample] + } + + fake_inputs = model.data_preprocessor(fake_data) + fake_outputs = model(**fake_inputs, mode='loss') + assert isinstance(fake_outputs['loss'].item(), float) diff --git a/tests/test_models/test_selfsup/test_densecl.py b/tests/test_models/test_selfsup/test_densecl.py new file mode 100644 index 0000000..2ae807f --- /dev/null +++ b/tests/test_models/test_selfsup/test_densecl.py @@ -0,0 +1,62 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import platform + +import pytest +import torch + +from mmpretrain.models import DenseCL +from mmpretrain.structures import DataSample + + +@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit') +def test_densecl(): + data_preprocessor = { + 'mean': (123.675, 116.28, 103.53), + 'std': (58.395, 57.12, 57.375), + 'to_rgb': True + } + queue_len = 32 + feat_dim = 2 + momentum = 0.001 + loss_lambda = 0.5 + backbone = dict(type='ResNet', depth=18, norm_cfg=dict(type='BN')) + neck = dict( + type='DenseCLNeck', + in_channels=512, + hid_channels=2, + out_channels=2, + num_grid=None) + head = dict( + type='ContrastiveHead', + loss=dict(type='CrossEntropyLoss'), + temperature=0.2) + + alg = DenseCL( + backbone=backbone, + neck=neck, + head=head, + queue_len=queue_len, + feat_dim=feat_dim, + momentum=momentum, + loss_lambda=loss_lambda, + data_preprocessor=data_preprocessor) + + # test init + assert alg.queue.size() == torch.Size([feat_dim, queue_len]) + assert alg.queue2.size() == torch.Size([feat_dim, queue_len]) + + # test loss + fake_data = { + 'inputs': + [torch.randn((2, 3, 224, 224)), + torch.randn((2, 3, 224, 224))], + 'data_samples': [DataSample() for _ in range(2)] + } + fake_inputs = alg.data_preprocessor(fake_data) + fake_loss = alg(**fake_inputs, mode='loss') + assert isinstance(fake_loss['loss_single'].item(), float) + assert isinstance(fake_loss['loss_dense'].item(), float) + assert fake_loss['loss_single'].item() > 0 + assert fake_loss['loss_dense'].item() > 0 + assert alg.queue_ptr.item() == 2 + assert alg.queue2_ptr.item() == 2 diff --git a/tests/test_models/test_selfsup/test_eva.py b/tests/test_models/test_selfsup/test_eva.py new file mode 100644 index 0000000..896ffc4 --- /dev/null +++ b/tests/test_models/test_selfsup/test_eva.py @@ -0,0 +1,51 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import platform +from unittest.mock import MagicMock + +import pytest +import torch + +from mmpretrain.models import EVA +from mmpretrain.structures import DataSample + + +@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit') +def test_eva(): + data_preprocessor = { + 'mean': [0.5, 0.5, 0.5], + 'std': [0.5, 0.5, 0.5], + 'to_rgb': True + } + backbone = dict(type='MAEViT', arch='b', patch_size=16, mask_ratio=0.75) + neck = dict( + type='MAEPretrainDecoder', + patch_size=16, + in_chans=3, + embed_dim=768, + decoder_embed_dim=512, + decoder_depth=8, + decoder_num_heads=16, + predict_feature_dim=512, + mlp_ratio=4.) + head = dict( + type='MIMHead', + loss=dict( + type='CosineSimilarityLoss', shift_factor=1.0, scale_factor=1.0)) + + alg = EVA( + backbone=backbone, + neck=neck, + head=head, + data_preprocessor=data_preprocessor) + + target_generator = MagicMock( + return_value=(torch.ones(2, 197, 512), torch.ones(2, 197, 197))) + alg.target_generator = target_generator + + fake_data = { + 'inputs': torch.randn((2, 3, 224, 224)), + 'data_samples': [DataSample() for _ in range(2)] + } + fake_inputs = alg.data_preprocessor(fake_data) + fake_outputs = alg(**fake_inputs, mode='loss') + assert isinstance(fake_outputs['loss'].item(), float) diff --git a/tests/test_models/test_selfsup/test_itpn.py b/tests/test_models/test_selfsup/test_itpn.py new file mode 100644 index 0000000..a22b9c8 --- /dev/null +++ b/tests/test_models/test_selfsup/test_itpn.py @@ -0,0 +1,57 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import platform + +import pytest +import torch + +from mmpretrain.models import iTPN +from mmpretrain.structures import DataSample + + +@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit') +def test_itpn(): + data_preprocessor = { + 'mean': [0.5, 0.5, 0.5], + 'std': [0.5, 0.5, 0.5], + 'to_rgb': True + } + backbone = dict( + type='iTPNHiViT', + arch='base', + reconstruction_type='pixel', + mask_ratio=0.75) + neck = dict( + type='iTPNPretrainDecoder', + num_patches=196, + patch_size=16, + in_chans=3, + embed_dim=512, + decoder_embed_dim=512, + decoder_depth=6, + decoder_num_heads=16, + mlp_ratio=4., + reconstruction_type='pixel', + # transformer pyramid + fpn_dim=256, + fpn_depth=2, + num_outs=3, + ) + head = dict( + type='MAEPretrainHead', + norm_pix=True, + patch_size=16, + loss=dict(type='PixelReconstructionLoss', criterion='L2')) + + alg = iTPN( + backbone=backbone, + neck=neck, + head=head, + data_preprocessor=data_preprocessor) + + fake_data = { + 'inputs': torch.randn((2, 3, 224, 224)), + 'data_samples': [DataSample() for _ in range(2)] + } + fake_inputs = alg.data_preprocessor(fake_data) + fake_outputs = alg(**fake_inputs, mode='loss') + assert isinstance(fake_outputs['loss'].item(), float) diff --git a/tests/test_models/test_selfsup/test_mae.py b/tests/test_models/test_selfsup/test_mae.py new file mode 100644 index 0000000..48fb88c --- /dev/null +++ b/tests/test_models/test_selfsup/test_mae.py @@ -0,0 +1,61 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import platform + +import pytest +import torch + +from mmpretrain.models import MAE, MAEViT +from mmpretrain.structures import DataSample + + +@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit') +def test_mae_vit(): + backbone = dict(arch='b', patch_size=16, mask_ratio=0.75) + mae_backbone = MAEViT(**backbone) + mae_backbone.init_weights() + fake_inputs = torch.randn((2, 3, 224, 224)) + + # test with mask + fake_outputs = mae_backbone(fake_inputs)[0] + assert list(fake_outputs.shape) == [2, 50, 768] + + # test without mask + fake_outputs = mae_backbone(fake_inputs, None) + assert fake_outputs[0].shape == torch.Size([2, 197, 768]) + + +@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit') +def test_mae(): + data_preprocessor = { + 'mean': [0.5, 0.5, 0.5], + 'std': [0.5, 0.5, 0.5], + 'to_rgb': True + } + backbone = dict(type='MAEViT', arch='b', patch_size=16, mask_ratio=0.75) + neck = dict( + type='MAEPretrainDecoder', + patch_size=16, + in_chans=3, + embed_dim=768, + decoder_embed_dim=512, + decoder_depth=8, + decoder_num_heads=16, + mlp_ratio=4., + ) + loss = dict(type='PixelReconstructionLoss', criterion='L2') + head = dict( + type='MAEPretrainHead', norm_pix=False, patch_size=16, loss=loss) + + alg = MAE( + backbone=backbone, + neck=neck, + head=head, + data_preprocessor=data_preprocessor) + + fake_data = { + 'inputs': torch.randn((2, 3, 224, 224)), + 'data_samples': [DataSample() for _ in range(2)] + } + fake_inputs = alg.data_preprocessor(fake_data) + fake_outputs = alg(**fake_inputs, mode='loss') + assert isinstance(fake_outputs['loss'].item(), float) diff --git a/tests/test_models/test_selfsup/test_maskfeat.py b/tests/test_models/test_selfsup/test_maskfeat.py new file mode 100644 index 0000000..75909c1 --- /dev/null +++ b/tests/test_models/test_selfsup/test_maskfeat.py @@ -0,0 +1,66 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import platform + +import pytest +import torch +from mmengine.utils import digit_version + +from mmpretrain.models import MaskFeat, MaskFeatViT +from mmpretrain.structures import DataSample + + +@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit') +def test_maskfeat_vit(): + maskfeat_backbone = MaskFeatViT() + maskfeat_backbone.init_weights() + fake_inputs = torch.randn((2, 3, 224, 224)) + fake_mask = torch.randn((2, 14, 14)).flatten(1).bool() + + # test with mask + fake_outputs = maskfeat_backbone(fake_inputs, fake_mask) + assert list(fake_outputs.shape) == [2, 197, 768] + + # test without mask + fake_outputs = maskfeat_backbone(fake_inputs, None) + assert fake_outputs[0].shape == torch.Size([2, 197, 768]) + + +@pytest.mark.skipif( + digit_version(torch.__version__) < digit_version('1.7.0'), + reason='torch version') +@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit') +def test_maskfeat(): + data_preprocessor = { + 'mean': [0.5, 0.5, 0.5], + 'std': [0.5, 0.5, 0.5], + 'to_rgb': True + } + + backbone = dict(type='MaskFeatViT', arch='b', patch_size=16) + neck = dict( + type='LinearNeck', in_channels=768, out_channels=108, gap_dim=0) + head = dict( + type='MIMHead', + loss=dict(type='PixelReconstructionLoss', criterion='L2')) + target_generator = dict( + type='HOGGenerator', nbins=9, pool=8, gaussian_window=16) + + alg = MaskFeat( + backbone=backbone, + neck=neck, + head=head, + target_generator=target_generator, + data_preprocessor=data_preprocessor) + + # test forward_train + fake_data_sample = DataSample() + fake_mask = torch.rand((14, 14)).bool() + fake_data_sample.set_mask(fake_mask) + fake_data = { + 'inputs': torch.randn((1, 3, 224, 224)), + 'data_samples': [fake_data_sample] + } + + fake_input = alg.data_preprocessor(fake_data) + fake_outputs = alg(**fake_input, mode='loss') + assert isinstance(fake_outputs['loss'].item(), float) diff --git a/tests/test_models/test_selfsup/test_mff.py b/tests/test_models/test_selfsup/test_mff.py new file mode 100644 index 0000000..3ad0295 --- /dev/null +++ b/tests/test_models/test_selfsup/test_mff.py @@ -0,0 +1,63 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import platform + +import pytest +import torch + +from mmpretrain.models import MFF, MFFViT +from mmpretrain.structures import DataSample + + +@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit') +def test_mae_vit(): + backbone = dict( + arch='b', patch_size=16, mask_ratio=0.75, out_indices=[1, 11]) + mae_backbone = MFFViT(**backbone) + mae_backbone.init_weights() + fake_inputs = torch.randn((2, 3, 224, 224)) + + # test with mask + fake_outputs = mae_backbone(fake_inputs)[0] + assert list(fake_outputs.shape) == [2, 50, 768] + + +@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit') +def test_mae(): + data_preprocessor = { + 'mean': [0.5, 0.5, 0.5], + 'std': [0.5, 0.5, 0.5], + 'to_rgb': True + } + backbone = dict( + type='MFFViT', + arch='b', + patch_size=16, + mask_ratio=0.75, + out_indices=[1, 11]) + neck = dict( + type='MAEPretrainDecoder', + patch_size=16, + in_chans=3, + embed_dim=768, + decoder_embed_dim=512, + decoder_depth=8, + decoder_num_heads=16, + mlp_ratio=4., + ) + loss = dict(type='PixelReconstructionLoss', criterion='L2') + head = dict( + type='MAEPretrainHead', norm_pix=False, patch_size=16, loss=loss) + + alg = MFF( + backbone=backbone, + neck=neck, + head=head, + data_preprocessor=data_preprocessor) + + fake_data = { + 'inputs': torch.randn((2, 3, 224, 224)), + 'data_samples': [DataSample() for _ in range(2)] + } + fake_inputs = alg.data_preprocessor(fake_data) + fake_outputs = alg(**fake_inputs, mode='loss') + assert isinstance(fake_outputs['loss'].item(), float) diff --git a/tests/test_models/test_selfsup/test_milan.py b/tests/test_models/test_selfsup/test_milan.py new file mode 100644 index 0000000..f45f766 --- /dev/null +++ b/tests/test_models/test_selfsup/test_milan.py @@ -0,0 +1,69 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import platform +from unittest.mock import MagicMock + +import pytest +import torch + +from mmpretrain.models import MILAN, MILANViT +from mmpretrain.structures import DataSample + + +@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit') +def test_milan_vit(): + backbone = dict(arch='b', patch_size=16, mask_ratio=0.75) + milan_backbone = MILANViT(**backbone) + milan_backbone.init_weights() + fake_inputs = torch.randn((2, 3, 224, 224)) + + # test with mask + fake_outputs = milan_backbone(fake_inputs, + torch.ones(2, 197, 197)[:, 0, 1:])[0] + assert list(fake_outputs.shape) == [2, 50, 768] + + # test without mask + fake_outputs = milan_backbone(fake_inputs, None) + assert fake_outputs[0].shape == torch.Size([2, 197, 768]) + + +@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit') +def test_milan(): + data_preprocessor = { + 'mean': [0.5, 0.5, 0.5], + 'std': [0.5, 0.5, 0.5], + 'to_rgb': True + } + + backbone = dict(type='MILANViT', arch='b', patch_size=16, mask_ratio=0.75) + neck = dict( + type='MILANPretrainDecoder', + patch_size=16, + in_chans=3, + embed_dim=768, + decoder_embed_dim=512, + decoder_depth=8, + decoder_num_heads=16, + mlp_ratio=4.) + head = dict( + type='MIMHead', + loss=dict( + type='CosineSimilarityLoss', shift_factor=2.0, scale_factor=2.0)) + + alg = MILAN( + backbone=backbone, + neck=neck, + head=head, + data_preprocessor=copy.deepcopy(data_preprocessor)) + + target_generator = MagicMock( + return_value=(torch.ones(2, 197, 512), torch.ones(2, 197, 197))) + alg.target_generator = target_generator + + fake_data = { + 'inputs': torch.randn((2, 3, 224, 224)), + 'data_samples': [DataSample() for _ in range(2)] + } + fake_inputs = alg.data_preprocessor(fake_data) + fake_outputs = alg(**fake_inputs, mode='loss') + assert isinstance(fake_outputs['loss'].item(), float) diff --git a/tests/test_models/test_selfsup/test_mixmim.py b/tests/test_models/test_selfsup/test_mixmim.py new file mode 100644 index 0000000..9e8e923 --- /dev/null +++ b/tests/test_models/test_selfsup/test_mixmim.py @@ -0,0 +1,71 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import platform + +import pytest +import torch + +from mmpretrain.models import MixMIM, MixMIMPretrainTransformer +from mmpretrain.structures import DataSample + + +@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit') +def test_mixmmim_backbone(): + mixmmim_backbone = MixMIMPretrainTransformer( + arch=dict(embed_dims=128, depths=[2, 2, 4, 2], num_heads=[4, 4, 4, 4])) + mixmmim_backbone.init_weights() + fake_inputs = torch.randn((1, 3, 224, 224)) + + # test with mask + fake_outputs, fake_mask_s4 = mixmmim_backbone(fake_inputs) + assert fake_outputs.shape == torch.Size([1, 49, 1024]) + assert fake_mask_s4.shape == torch.Size([1, 49, 1]) + + # test without mask + fake_outputs = mixmmim_backbone(fake_inputs, None) + assert len(fake_outputs) == 1 + assert fake_outputs[0].shape == torch.Size([1, 1024]) + + +@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit') +def test_simmim(): + data_preprocessor = { + 'mean': [0.5, 0.5, 0.5], + 'std': [0.5, 0.5, 0.5], + 'to_rgb': True + } + + # model config + backbone = dict( + type='MixMIMPretrainTransformer', + arch='B', + drop_rate=0.0, + drop_path_rate=0.0) + neck = dict( + type='MixMIMPretrainDecoder', + num_patches=49, + encoder_stride=32, + embed_dim=1024, + decoder_embed_dim=512, + decoder_depth=8, + decoder_num_heads=16) + head = dict( + type='MixMIMPretrainHead', + norm_pix=True, + loss=dict(type='PixelReconstructionLoss', criterion='L2')) + + model = MixMIM( + backbone=backbone, + neck=neck, + head=head, + data_preprocessor=data_preprocessor) + + # test forward_train + fake_data_sample = DataSample() + fake_data = { + 'inputs': torch.randn((2, 3, 224, 224)), + 'data_samples': [fake_data_sample for _ in range(2)] + } + + fake_inputs = model.data_preprocessor(fake_data) + fake_outputs = model(**fake_inputs, mode='loss') + assert isinstance(fake_outputs['loss'].item(), float) diff --git a/tests/test_models/test_selfsup/test_moco.py b/tests/test_models/test_selfsup/test_moco.py new file mode 100644 index 0000000..7db50ec --- /dev/null +++ b/tests/test_models/test_selfsup/test_moco.py @@ -0,0 +1,58 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import platform + +import pytest +import torch + +from mmpretrain.models import MoCo +from mmpretrain.structures import DataSample + +queue_len = 32 +feat_dim = 2 +momentum = 0.001 +backbone = dict(type='ResNet', depth=18, norm_cfg=dict(type='BN')) +neck = dict( + type='MoCoV2Neck', + in_channels=512, + hid_channels=2, + out_channels=2, + with_avg_pool=True) +head = dict( + type='ContrastiveHead', + loss=dict(type='CrossEntropyLoss'), + temperature=0.2) + + +@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit') +def test_moco(): + data_preprocessor = { + 'mean': (123.675, 116.28, 103.53), + 'std': (58.395, 57.12, 57.375), + 'to_rgb': True + } + + alg = MoCo( + backbone=backbone, + neck=neck, + head=head, + queue_len=queue_len, + feat_dim=feat_dim, + momentum=momentum, + data_preprocessor=data_preprocessor) + assert alg.queue.size() == torch.Size([feat_dim, queue_len]) + + fake_data = { + 'inputs': + [torch.randn((2, 3, 224, 224)), + torch.randn((2, 3, 224, 224))], + 'data_samples': [DataSample() for _ in range(2)] + } + + fake_inputs = alg.data_preprocessor(fake_data) + fake_loss = alg(**fake_inputs, mode='loss') + assert fake_loss['loss'] > 0 + assert alg.queue_ptr.item() == 2 + + # test extract + fake_feats = alg(fake_inputs['inputs'][0], mode='tensor') + assert fake_feats[0].size() == torch.Size([2, 512, 7, 7]) diff --git a/tests/test_models/test_selfsup/test_mocov3.py b/tests/test_models/test_selfsup/test_mocov3.py new file mode 100644 index 0000000..b9d89a9 --- /dev/null +++ b/tests/test_models/test_selfsup/test_mocov3.py @@ -0,0 +1,91 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import platform +from unittest import TestCase + +import pytest +import torch + +from mmpretrain.models import MoCoV3, MoCoV3ViT +from mmpretrain.structures import DataSample + + +class TestMoCoV3(TestCase): + + backbone = dict( + type='MoCoV3ViT', + arch='mocov3-small', # embed_dim = 384 + patch_size=16, + frozen_stages=12, + stop_grad_conv1=True, + norm_eval=True) + neck = dict( + type='NonLinearNeck', + in_channels=384, + hid_channels=2, + out_channels=2, + num_layers=2, + with_bias=False, + with_last_bn=True, + with_last_bn_affine=False, + with_last_bias=False, + with_avg_pool=False, + norm_cfg=dict(type='BN1d')) + head = dict( + type='MoCoV3Head', + predictor=dict( + type='NonLinearNeck', + in_channels=2, + hid_channels=2, + out_channels=2, + num_layers=2, + with_bias=False, + with_last_bn=True, + with_last_bn_affine=False, + with_last_bias=False, + with_avg_pool=False, + norm_cfg=dict(type='BN1d')), + loss=dict(type='CrossEntropyLoss', loss_weight=2 * 0.2), + temperature=0.2) + + @pytest.mark.skipif( + platform.system() == 'Windows', reason='Windows mem limit') + def test_vit(self): + vit = MoCoV3ViT( + arch='mocov3-small', + patch_size=16, + frozen_stages=12, + stop_grad_conv1=True, + norm_eval=True) + vit.init_weights() + vit.train() + + for p in vit.parameters(): + assert p.requires_grad is False + + @pytest.mark.skipif( + platform.system() == 'Windows', reason='Windows mem limit') + def test_mocov3(self): + data_preprocessor = dict( + mean=(123.675, 116.28, 103.53), + std=(58.395, 57.12, 57.375), + to_rgb=True) + alg = MoCoV3( + backbone=self.backbone, + neck=self.neck, + head=self.head, + data_preprocessor=data_preprocessor) + + fake_data = { + 'inputs': + [torch.randn((2, 3, 224, 224)), + torch.randn((2, 3, 224, 224))], + 'data_samples': [DataSample() for _ in range(2)] + } + + fake_inputs = alg.data_preprocessor(fake_data) + fake_loss = alg(**fake_inputs, mode='loss') + self.assertGreater(fake_loss['loss'], 0) + + # test extract + fake_feats = alg(fake_inputs['inputs'][0], mode='tensor') + self.assertEqual(fake_feats[0].size(), torch.Size([2, 384])) diff --git a/tests/test_models/test_selfsup/test_simclr.py b/tests/test_models/test_selfsup/test_simclr.py new file mode 100644 index 0000000..24981ff --- /dev/null +++ b/tests/test_models/test_selfsup/test_simclr.py @@ -0,0 +1,52 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import platform + +import pytest +import torch + +from mmpretrain.models import SimCLR +from mmpretrain.structures import DataSample + +backbone = dict(type='ResNet', depth=18, norm_cfg=dict(type='BN')) +neck = dict( + type='NonLinearNeck', # SimCLR non-linear neck + in_channels=512, + hid_channels=2, + out_channels=2, + num_layers=2, + with_avg_pool=True, + norm_cfg=dict(type='BN1d')) +head = dict( + type='ContrastiveHead', + loss=dict(type='CrossEntropyLoss'), + temperature=0.1) + + +@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit') +def test_simclr(): + data_preprocessor = { + 'mean': (123.675, 116.28, 103.53), + 'std': (58.395, 57.12, 57.375), + 'to_rgb': True, + } + + alg = SimCLR( + backbone=backbone, + neck=neck, + head=head, + data_preprocessor=data_preprocessor) + + fake_data = { + 'inputs': + [torch.randn((2, 3, 224, 224)), + torch.randn((2, 3, 224, 224))], + 'data_samples': [DataSample() for _ in range(2)] + } + + fake_inputs = alg.data_preprocessor(fake_data) + fake_loss = alg(**fake_inputs, mode='loss') + assert isinstance(fake_loss['loss'].item(), float) + + # test extract + fake_feat = alg(fake_inputs['inputs'][0], mode='tensor') + assert fake_feat[0].size() == torch.Size([2, 512, 7, 7]) diff --git a/tests/test_models/test_selfsup/test_simmim.py b/tests/test_models/test_selfsup/test_simmim.py new file mode 100644 index 0000000..0440365 --- /dev/null +++ b/tests/test_models/test_selfsup/test_simmim.py @@ -0,0 +1,70 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import platform + +import pytest +import torch + +from mmpretrain.models import SimMIM, SimMIMSwinTransformer +from mmpretrain.structures import DataSample + + +@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit') +def test_simmim_swin(): + backbone = dict( + arch='B', + img_size=192, + stage_cfgs=dict(block_cfgs=dict(window_size=6))) + simmim_backbone = SimMIMSwinTransformer(**backbone) + simmim_backbone.init_weights() + fake_inputs = torch.randn((2, 3, 192, 192)) + fake_mask = torch.rand((2, 48, 48)) + + # test with mask + fake_outputs = simmim_backbone(fake_inputs, fake_mask)[0] + assert fake_outputs.shape == torch.Size([2, 1024, 6, 6]) + + # test without mask + fake_outputs = simmim_backbone(fake_inputs, None) + assert len(fake_outputs) == 1 + assert fake_outputs[0].shape == torch.Size([2, 1024, 6, 6]) + + +@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit') +def test_simmim(): + data_preprocessor = { + 'mean': [0.5, 0.5, 0.5], + 'std': [0.5, 0.5, 0.5], + 'to_rgb': True + } + + # model config + backbone = dict( + type='SimMIMSwinTransformer', + arch='B', + img_size=192, + stage_cfgs=dict(block_cfgs=dict(window_size=6))) + neck = dict( + type='SimMIMLinearDecoder', in_channels=128 * 2**3, encoder_stride=32) + head = dict( + type='SimMIMHead', + patch_size=4, + loss=dict(type='PixelReconstructionLoss', criterion='L1', channel=3)) + + model = SimMIM( + backbone=backbone, + neck=neck, + head=head, + data_preprocessor=data_preprocessor) + + # test forward_train + fake_data_sample = DataSample() + fake_mask = torch.rand((48, 48)) + fake_data_sample.set_mask(fake_mask) + fake_data = { + 'inputs': torch.randn((2, 3, 192, 192)), + 'data_samples': [fake_data_sample for _ in range(2)] + } + + fake_inputs = model.data_preprocessor(fake_data) + fake_outputs = model(**fake_inputs, mode='loss') + assert isinstance(fake_outputs['loss'].item(), float) diff --git a/tests/test_models/test_selfsup/test_simsiam.py b/tests/test_models/test_selfsup/test_simsiam.py new file mode 100644 index 0000000..abbf754 --- /dev/null +++ b/tests/test_models/test_selfsup/test_simsiam.py @@ -0,0 +1,64 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy +import platform + +import pytest +import torch + +from mmpretrain.models import SimSiam +from mmpretrain.structures import DataSample + + +@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit') +def test_simsiam(): + data_preprocessor = { + 'mean': (123.675, 116.28, 103.53), + 'std': (58.395, 57.12, 57.375), + 'to_rgb': True, + } + backbone = dict( + type='ResNet', + depth=18, + norm_cfg=dict(type='BN'), + zero_init_residual=True) + neck = dict( + type='NonLinearNeck', + in_channels=512, + hid_channels=2, + out_channels=2, + num_layers=3, + with_last_bn_affine=False, + with_avg_pool=True, + norm_cfg=dict(type='BN1d')) + head = dict( + type='LatentPredictHead', + loss=dict(type='CosineSimilarityLoss'), + predictor=dict( + type='NonLinearNeck', + in_channels=2, + hid_channels=2, + out_channels=2, + with_avg_pool=False, + with_last_bn=False, + with_last_bias=True, + norm_cfg=dict(type='BN1d'))) + + alg = SimSiam( + backbone=backbone, + neck=neck, + head=head, + data_preprocessor=copy.deepcopy(data_preprocessor)) + + fake_data = { + 'inputs': + [torch.randn((2, 3, 224, 224)), + torch.randn((2, 3, 224, 224))], + 'data_samples': [DataSample() for _ in range(2)] + } + fake_inputs = alg.data_preprocessor(fake_data) + fake_loss = alg(**fake_inputs, mode='loss') + assert fake_loss['loss'] > -1 + + # test extract + fake_feat = alg(fake_inputs['inputs'][0], mode='tensor') + assert fake_feat[0].size() == torch.Size([2, 512, 7, 7]) diff --git a/tests/test_models/test_selfsup/test_spark.py b/tests/test_models/test_selfsup/test_spark.py new file mode 100644 index 0000000..cb4fe3d --- /dev/null +++ b/tests/test_models/test_selfsup/test_spark.py @@ -0,0 +1,51 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import platform + +import pytest +import torch + +from mmpretrain.models import SparK +from mmpretrain.structures import DataSample + + +@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit') +def test_spark(): + data_preprocessor = { + 'mean': (123.675, 116.28, 103.53), + 'std': (58.395, 57.12, 57.375), + 'to_rgb': True + } + + backbone = dict( + type='SparseResNet', + depth=50, + out_indices=(0, 1, 2, 3), + drop_path_rate=0.05, + norm_cfg=dict(type='BN')) + neck = dict( + type='SparKLightDecoder', + feature_dim=512, + upsample_ratio=32, # equal to downsample_raito + mid_channels=0, + norm_cfg=dict(type='BN'), + last_act=False) + head = dict( + type='SparKPretrainHead', + loss=dict(type='PixelReconstructionLoss', criterion='L2')) + + alg = SparK( + backbone=backbone, + neck=neck, + head=head, + data_preprocessor=data_preprocessor, + enc_dec_norm_cfg=dict(type='BN'), + ) + + fake_data = { + 'inputs': torch.randn((2, 3, 224, 224)), + 'data_sample': [DataSample() for _ in range(2)] + } + + fake_inputs = alg.data_preprocessor(fake_data) + fake_loss = alg(**fake_inputs, mode='loss') + assert isinstance(fake_loss['loss'].item(), float) diff --git a/tests/test_models/test_selfsup/test_swav.py b/tests/test_models/test_selfsup/test_swav.py new file mode 100644 index 0000000..4bfe9cc --- /dev/null +++ b/tests/test_models/test_selfsup/test_swav.py @@ -0,0 +1,61 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import platform + +import pytest +import torch + +from mmpretrain.models import SwAV +from mmpretrain.structures import DataSample + + +@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit') +def test_swav(): + data_preprocessor = { + 'mean': (123.675, 116.28, 103.53), + 'std': (58.395, 57.12, 57.375), + 'to_rgb': True + } + backbone = dict( + type='ResNet', + depth=18, + norm_cfg=dict(type='BN'), + zero_init_residual=True) + neck = dict( + type='SwAVNeck', + in_channels=512, + hid_channels=2, + out_channels=2, + norm_cfg=dict(type='BN1d'), + with_avg_pool=True) + head = dict( + type='SwAVHead', + loss=dict( + type='SwAVLoss', + feat_dim=2, # equal to neck['out_channels'] + epsilon=0.05, + temperature=0.1, + num_crops=[2, 6])) + + alg = SwAV( + backbone=backbone, + neck=neck, + head=head, + data_preprocessor=data_preprocessor) + + fake_data = { + 'inputs': [ + torch.randn((2, 3, 224, 224)), + torch.randn((2, 3, 224, 224)), + torch.randn((2, 3, 96, 96)), + torch.randn((2, 3, 96, 96)), + torch.randn((2, 3, 96, 96)), + torch.randn((2, 3, 96, 96)), + torch.randn((2, 3, 96, 96)), + torch.randn((2, 3, 96, 96)) + ], + 'data_samples': [DataSample() for _ in range(2)] + } + + fake_inputs = alg.data_preprocessor(fake_data) + fake_outputs = alg(**fake_inputs, mode='loss') + assert isinstance(fake_outputs['loss'].item(), float) diff --git a/tests/test_models/test_selfsup/test_target_generators.py b/tests/test_models/test_selfsup/test_target_generators.py new file mode 100644 index 0000000..f53530b --- /dev/null +++ b/tests/test_models/test_selfsup/test_target_generators.py @@ -0,0 +1,72 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import platform +from unittest import TestCase + +import pytest +import torch + +from mmpretrain.models import VQKD, DALLEEncoder, HOGGenerator + + +class TestDALLE(TestCase): + + @pytest.mark.skipif( + platform.system() == 'Windows', reason='Windows mem limit') + def test_dalle(self): + model = DALLEEncoder() + fake_inputs = torch.rand((2, 3, 112, 112)) + fake_outputs = model(fake_inputs) + + assert list(fake_outputs.shape) == [2, 8192, 14, 14] + + +class TestHOGGenerator(TestCase): + + def test_hog_generator(self): + hog_generator = HOGGenerator() + + fake_input = torch.randn((2, 3, 224, 224)) + fake_output = hog_generator(fake_input) + assert list(fake_output.shape) == [2, 196, 108] + + fake_hog_out = hog_generator.out[0].unsqueeze(0) + fake_hog_img = hog_generator.generate_hog_image(fake_hog_out) + assert fake_hog_img.shape == (224, 224) + + with pytest.raises(AssertionError): + fake_hog_img = hog_generator.generate_hog_image( + hog_generator.out[0]) + + +class TestVQKD(TestCase): + + ENCODER_CFG = dict( + arch='base', + img_size=224, + patch_size=16, + in_channels=3, + out_indices=-1, + drop_rate=0., + drop_path_rate=0., + norm_cfg=dict(type='LN', eps=1e-6), + final_norm=True, + out_type='featmap', + with_cls_token=True, + frozen_stages=-1, + use_abs_pos_emb=True, + use_rel_pos_bias=False, + use_shared_rel_pos_bias=False, + layer_scale_init_value=0., + interpolate_mode='bicubic', + patch_cfg=dict(), + layer_cfgs=dict(), + init_cfg=None) + + @pytest.mark.skipif( + platform.system() == 'Windows', reason='Windows mem limit') + def test_vqkd(self): + model = VQKD(encoder_config=self.ENCODER_CFG) + fake_inputs = torch.rand((2, 3, 224, 224)) + fake_outputs = model(fake_inputs) + + assert list(fake_outputs.shape) == [2, 196] diff --git a/tests/test_models/test_tta.py b/tests/test_models/test_tta.py new file mode 100644 index 0000000..36d7663 --- /dev/null +++ b/tests/test_models/test_tta.py @@ -0,0 +1,67 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from unittest import TestCase + +import torch +from mmengine import ConfigDict +from mmengine.registry import init_default_scope + +from mmpretrain.models import AverageClsScoreTTA, ImageClassifier +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample + +init_default_scope('mmpretrain') + + +class TestAverageClsScoreTTA(TestCase): + DEFAULT_ARGS = dict( + type='AverageClsScoreTTA', + module=dict( + type='ImageClassifier', + backbone=dict(type='ResNet', depth=18), + neck=dict(type='GlobalAveragePooling'), + head=dict( + type='LinearClsHead', + num_classes=10, + in_channels=512, + loss=dict(type='CrossEntropyLoss')))) + + def test_initialize(self): + model: AverageClsScoreTTA = MODELS.build(self.DEFAULT_ARGS) + self.assertIsInstance(model.module, ImageClassifier) + + def test_forward(self): + inputs = torch.rand(1, 3, 224, 224) + model: AverageClsScoreTTA = MODELS.build(self.DEFAULT_ARGS) + + # The forward of TTA model should not be called. + with self.assertRaisesRegex(NotImplementedError, 'will not be called'): + model(inputs) + + def test_test_step(self): + cfg = ConfigDict(deepcopy(self.DEFAULT_ARGS)) + cfg.module.data_preprocessor = dict( + mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5]) + model: AverageClsScoreTTA = MODELS.build(cfg) + + img1 = torch.randint(0, 256, (1, 3, 224, 224)) + img2 = torch.randint(0, 256, (1, 3, 224, 224)) + data1 = { + 'inputs': img1, + 'data_samples': [DataSample().set_gt_label(1)] + } + data2 = { + 'inputs': img2, + 'data_samples': [DataSample().set_gt_label(1)] + } + data_tta = { + 'inputs': [img1, img2], + 'data_samples': [[DataSample().set_gt_label(1)], + [DataSample().set_gt_label(1)]] + } + + score1 = model.module.test_step(data1)[0].pred_score + score2 = model.module.test_step(data2)[0].pred_score + score_tta = model.test_step(data_tta)[0].pred_score + + torch.testing.assert_allclose(score_tta, (score1 + score2) / 2) diff --git a/tests/test_models/test_utils/test_attention.py b/tests/test_models/test_utils/test_attention.py new file mode 100644 index 0000000..27c0e09 --- /dev/null +++ b/tests/test_models/test_utils/test_attention.py @@ -0,0 +1,189 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase +from unittest.mock import ANY, MagicMock + +import pytest +import torch + +from mmpretrain.models.utils.attention import (ShiftWindowMSA, WindowMSA, + torch_meshgrid) + + +def get_relative_position_index(window_size): + """Method from original code of Swin-Transformer.""" + coords_h = torch.arange(window_size[0]) + coords_w = torch.arange(window_size[1]) + coords = torch.stack(torch_meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + # 2, Wh*Ww, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] + # Wh*Ww, Wh*Ww, 2 + relative_coords = relative_coords.permute(1, 2, 0).contiguous() + relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + return relative_position_index + + +class TestWindowMSA(TestCase): + + def test_forward(self): + attn = WindowMSA(embed_dims=96, window_size=(7, 7), num_heads=4) + inputs = torch.rand((16, 7 * 7, 96)) + output = attn(inputs) + self.assertEqual(output.shape, inputs.shape) + + # test non-square window_size + attn = WindowMSA(embed_dims=96, window_size=(6, 7), num_heads=4) + inputs = torch.rand((16, 6 * 7, 96)) + output = attn(inputs) + self.assertEqual(output.shape, inputs.shape) + + def test_relative_pos_embed(self): + attn = WindowMSA(embed_dims=96, window_size=(7, 8), num_heads=4) + self.assertEqual(attn.relative_position_bias_table.shape, + ((2 * 7 - 1) * (2 * 8 - 1), 4)) + # test relative_position_index + expected_rel_pos_index = get_relative_position_index((7, 8)) + self.assertTrue( + torch.allclose(attn.relative_position_index, + expected_rel_pos_index)) + + # test default init + self.assertTrue( + torch.allclose(attn.relative_position_bias_table, + torch.tensor(0.))) + attn.init_weights() + self.assertFalse( + torch.allclose(attn.relative_position_bias_table, + torch.tensor(0.))) + + def test_qkv_bias(self): + # test qkv_bias=True + attn = WindowMSA( + embed_dims=96, window_size=(7, 7), num_heads=4, qkv_bias=True) + self.assertEqual(attn.qkv.bias.shape, (96 * 3, )) + + # test qkv_bias=False + attn = WindowMSA( + embed_dims=96, window_size=(7, 7), num_heads=4, qkv_bias=False) + self.assertIsNone(attn.qkv.bias) + + def tets_qk_scale(self): + # test default qk_scale + attn = WindowMSA( + embed_dims=96, window_size=(7, 7), num_heads=4, qk_scale=None) + head_dims = 96 // 4 + self.assertAlmostEqual(attn.scale, head_dims**-0.5) + + # test specified qk_scale + attn = WindowMSA( + embed_dims=96, window_size=(7, 7), num_heads=4, qk_scale=0.3) + self.assertEqual(attn.scale, 0.3) + + def test_attn_drop(self): + inputs = torch.rand(16, 7 * 7, 96) + attn = WindowMSA( + embed_dims=96, window_size=(7, 7), num_heads=4, attn_drop=1.0) + # drop all attn output, output shuold be equal to proj.bias + self.assertTrue(torch.allclose(attn(inputs), attn.proj.bias)) + + def test_prob_drop(self): + inputs = torch.rand(16, 7 * 7, 96) + attn = WindowMSA( + embed_dims=96, window_size=(7, 7), num_heads=4, proj_drop=1.0) + self.assertTrue(torch.allclose(attn(inputs), torch.tensor(0.))) + + def test_mask(self): + inputs = torch.rand(16, 7 * 7, 96) + attn = WindowMSA(embed_dims=96, window_size=(7, 7), num_heads=4) + mask = torch.zeros((4, 49, 49)) + # Mask the first column + mask[:, 0, :] = -100 + mask[:, :, 0] = -100 + outs = attn(inputs, mask=mask) + inputs[:, 0, :].normal_() + outs_with_mask = attn(inputs, mask=mask) + torch.testing.assert_allclose(outs[:, 1:, :], outs_with_mask[:, 1:, :]) + + +class TestShiftWindowMSA(TestCase): + + def test_forward(self): + inputs = torch.rand((1, 14 * 14, 96)) + attn = ShiftWindowMSA(embed_dims=96, window_size=7, num_heads=4) + output = attn(inputs, (14, 14)) + self.assertEqual(output.shape, inputs.shape) + self.assertEqual(attn.w_msa.relative_position_bias_table.shape, + ((2 * 7 - 1)**2, 4)) + + # test forward with shift_size + attn = ShiftWindowMSA( + embed_dims=96, window_size=7, num_heads=4, shift_size=3) + output = attn(inputs, (14, 14)) + assert output.shape == (inputs.shape) + + # test irregular input shape + input_resolution = (19, 18) + attn = ShiftWindowMSA(embed_dims=96, num_heads=4, window_size=7) + inputs = torch.rand((1, 19 * 18, 96)) + output = attn(inputs, input_resolution) + assert output.shape == (inputs.shape) + + # test wrong input_resolution + input_resolution = (14, 14) + attn = ShiftWindowMSA(embed_dims=96, num_heads=4, window_size=7) + inputs = torch.rand((1, 14 * 14, 96)) + with pytest.raises(AssertionError): + attn(inputs, (14, 15)) + + def test_pad_small_map(self): + # test pad_small_map=True + inputs = torch.rand((1, 6 * 7, 96)) + attn = ShiftWindowMSA( + embed_dims=96, + window_size=7, + num_heads=4, + shift_size=3, + pad_small_map=True) + attn.get_attn_mask = MagicMock(wraps=attn.get_attn_mask) + output = attn(inputs, (6, 7)) + self.assertEqual(output.shape, inputs.shape) + attn.get_attn_mask.assert_called_once_with((7, 7), + window_size=7, + shift_size=3, + device=ANY) + + # test pad_small_map=False + inputs = torch.rand((1, 6 * 7, 96)) + attn = ShiftWindowMSA( + embed_dims=96, + window_size=7, + num_heads=4, + shift_size=3, + pad_small_map=False) + with self.assertRaisesRegex(AssertionError, r'the window size \(7\)'): + attn(inputs, (6, 7)) + + # test pad_small_map=False, and the input size equals to window size + inputs = torch.rand((1, 7 * 7, 96)) + attn.get_attn_mask = MagicMock(wraps=attn.get_attn_mask) + output = attn(inputs, (7, 7)) + self.assertEqual(output.shape, inputs.shape) + attn.get_attn_mask.assert_called_once_with((7, 7), + window_size=7, + shift_size=0, + device=ANY) + + def test_drop_layer(self): + inputs = torch.rand((1, 14 * 14, 96)) + attn = ShiftWindowMSA( + embed_dims=96, + window_size=7, + num_heads=4, + dropout_layer=dict(type='Dropout', drop_prob=1.0)) + attn.init_weights() + # drop all attn output, output shuold be equal to proj.bias + self.assertTrue( + torch.allclose(attn(inputs, (14, 14)), torch.tensor(0.))) diff --git a/tests/test_models/test_utils/test_batch_augments.py b/tests/test_models/test_utils/test_batch_augments.py new file mode 100644 index 0000000..b4ba377 --- /dev/null +++ b/tests/test_models/test_utils/test_batch_augments.py @@ -0,0 +1,166 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase +from unittest.mock import MagicMock, patch + +import numpy as np +import torch + +from mmpretrain.models import Mixup, RandomBatchAugment +from mmpretrain.registry import BATCH_AUGMENTS + + +class TestRandomBatchAugment(TestCase): + + def test_initialize(self): + # test single augmentation + augments = dict(type='Mixup', alpha=1.) + batch_augments = RandomBatchAugment(augments) + self.assertIsInstance(batch_augments.augments, list) + self.assertEqual(len(batch_augments.augments), 1) + + # test specify augments with object + augments = Mixup(alpha=1.) + batch_augments = RandomBatchAugment(augments) + self.assertIsInstance(batch_augments.augments, list) + self.assertEqual(len(batch_augments.augments), 1) + + # test multiple augmentation + augments = [ + dict(type='Mixup', alpha=1.), + dict(type='CutMix', alpha=0.8), + ] + batch_augments = RandomBatchAugment(augments) + # mixup, cutmix + self.assertEqual(len(batch_augments.augments), 2) + self.assertIsNone(batch_augments.probs) + + # test specify probs + augments = [ + dict(type='Mixup', alpha=1.), + dict(type='CutMix', alpha=0.8), + ] + batch_augments = RandomBatchAugment(augments, probs=[0.5, 0.3]) + # mixup, cutmix and None + self.assertEqual(len(batch_augments.augments), 3) + self.assertAlmostEqual(batch_augments.probs[-1], 0.2) + + # test assertion + with self.assertRaisesRegex(AssertionError, 'Got 2 vs 1'): + RandomBatchAugment(augments, probs=0.5) + + with self.assertRaisesRegex(AssertionError, 'exceeds 1.'): + RandomBatchAugment(augments, probs=[0.5, 0.6]) + + def test_call(self): + inputs = torch.rand(2, 3, 224, 224) + scores = torch.rand(2, 10) + + augments = [ + dict(type='Mixup', alpha=1.), + dict(type='CutMix', alpha=0.8), + ] + batch_augments = RandomBatchAugment(augments, probs=[0.5, 0.3]) + + with patch('numpy.random', np.random.RandomState(0)): + batch_augments.augments[1] = MagicMock() + batch_augments(inputs, scores) + batch_augments.augments[1].assert_called_once_with(inputs, scores) + + augments = [ + dict(type='Mixup', alpha=1.), + dict(type='CutMix', alpha=0.8), + ] + batch_augments = RandomBatchAugment(augments, probs=[0.0, 0.0]) + mixed_inputs, mixed_samples = batch_augments(inputs, scores) + self.assertIs(mixed_inputs, inputs) + self.assertIs(mixed_samples, scores) + + +class TestMixup(TestCase): + DEFAULT_ARGS = dict(type='Mixup', alpha=1.) + + def test_initialize(self): + with self.assertRaises(AssertionError): + cfg = {**self.DEFAULT_ARGS, 'alpha': 'unknown'} + BATCH_AUGMENTS.build(cfg) + + def test_call(self): + inputs = torch.rand(2, 3, 224, 224) + scores = torch.rand(2, 10) + + mixup = BATCH_AUGMENTS.build(self.DEFAULT_ARGS) + mixed_inputs, mixed_scores = mixup(inputs, scores) + self.assertEqual(mixed_inputs.shape, (2, 3, 224, 224)) + self.assertEqual(mixed_scores.shape, (2, 10)) + + # test binary classification + scores = torch.rand(2, 1) + + mixed_inputs, mixed_scores = mixup(inputs, scores) + self.assertEqual(mixed_inputs.shape, (2, 3, 224, 224)) + self.assertEqual(mixed_scores.shape, (2, 1)) + + +class TestCutMix(TestCase): + DEFAULT_ARGS = dict(type='CutMix', alpha=1.) + + def test_initialize(self): + with self.assertRaises(AssertionError): + cfg = {**self.DEFAULT_ARGS, 'alpha': 'unknown'} + BATCH_AUGMENTS.build(cfg) + + def test_call(self): + inputs = torch.rand(2, 3, 224, 224) + scores = torch.rand(2, 10) + + # test with cutmix_minmax + cfg = {**self.DEFAULT_ARGS, 'cutmix_minmax': (0.1, 0.2)} + cutmix = BATCH_AUGMENTS.build(cfg) + mixed_inputs, mixed_scores = cutmix(inputs, scores) + self.assertEqual(mixed_inputs.shape, (2, 3, 224, 224)) + self.assertEqual(mixed_scores.shape, (2, 10)) + + # test without correct_lam + cfg = {**self.DEFAULT_ARGS, 'correct_lam': False} + cutmix = BATCH_AUGMENTS.build(cfg) + mixed_inputs, mixed_scores = cutmix(inputs, scores) + self.assertEqual(mixed_inputs.shape, (2, 3, 224, 224)) + self.assertEqual(mixed_scores.shape, (2, 10)) + + # test default settings + cutmix = BATCH_AUGMENTS.build(self.DEFAULT_ARGS) + mixed_inputs, mixed_scores = cutmix(inputs, scores) + self.assertEqual(mixed_inputs.shape, (2, 3, 224, 224)) + self.assertEqual(mixed_scores.shape, (2, 10)) + + # test binary classification + scores = torch.rand(2, 1) + + mixed_inputs, mixed_scores = cutmix(inputs, scores) + self.assertEqual(mixed_inputs.shape, (2, 3, 224, 224)) + self.assertEqual(mixed_scores.shape, (2, 1)) + + +class TestResizeMix(TestCase): + DEFAULT_ARGS = dict(type='ResizeMix', alpha=1.) + + def test_initialize(self): + with self.assertRaises(AssertionError): + cfg = {**self.DEFAULT_ARGS, 'alpha': 'unknown'} + BATCH_AUGMENTS.build(cfg) + + def test_call(self): + inputs = torch.rand(2, 3, 224, 224) + scores = torch.rand(2, 10) + + mixup = BATCH_AUGMENTS.build(self.DEFAULT_ARGS) + mixed_inputs, mixed_scores = mixup(inputs, scores) + self.assertEqual(mixed_inputs.shape, (2, 3, 224, 224)) + self.assertEqual(mixed_scores.shape, (2, 10)) + + # test binary classification + scores = torch.rand(2, 1) + + mixed_inputs, mixed_scores = mixup(inputs, scores) + self.assertEqual(mixed_inputs.shape, (2, 3, 224, 224)) + self.assertEqual(mixed_scores.shape, (2, 1)) diff --git a/tests/test_models/test_utils/test_data_preprocessor.py b/tests/test_models/test_utils/test_data_preprocessor.py new file mode 100644 index 0000000..2ae23bf --- /dev/null +++ b/tests/test_models/test_utils/test_data_preprocessor.py @@ -0,0 +1,248 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import pytest +import torch + +from mmpretrain.models import (ClsDataPreprocessor, RandomBatchAugment, + SelfSupDataPreprocessor, + TwoNormDataPreprocessor, VideoDataPreprocessor) +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample + + +class TestClsDataPreprocessor(TestCase): + + def test_stack_batch(self): + cfg = dict(type='ClsDataPreprocessor') + processor: ClsDataPreprocessor = MODELS.build(cfg) + + data = { + 'inputs': [torch.randint(0, 256, (3, 224, 224))], + 'data_samples': [DataSample().set_gt_label(1)] + } + processed_data = processor(data) + inputs = processed_data['inputs'] + data_samples = processed_data['data_samples'] + self.assertEqual(inputs.shape, (1, 3, 224, 224)) + self.assertEqual(len(data_samples), 1) + self.assertTrue((data_samples[0].gt_label == torch.tensor([1])).all()) + + def test_padding(self): + cfg = dict(type='ClsDataPreprocessor', pad_size_divisor=16) + processor: ClsDataPreprocessor = MODELS.build(cfg) + + data = { + 'inputs': [ + torch.randint(0, 256, (3, 255, 255)), + torch.randint(0, 256, (3, 224, 224)) + ] + } + inputs = processor(data)['inputs'] + self.assertEqual(inputs.shape, (2, 3, 256, 256)) + + data = {'inputs': torch.randint(0, 256, (2, 3, 255, 255))} + inputs = processor(data)['inputs'] + self.assertEqual(inputs.shape, (2, 3, 256, 256)) + + def test_to_rgb(self): + cfg = dict(type='ClsDataPreprocessor', to_rgb=True) + processor: ClsDataPreprocessor = MODELS.build(cfg) + + data = {'inputs': [torch.randint(0, 256, (3, 224, 224))]} + inputs = processor(data)['inputs'] + torch.testing.assert_allclose(data['inputs'][0].flip(0).float(), + inputs[0]) + + data = {'inputs': torch.randint(0, 256, (1, 3, 224, 224))} + inputs = processor(data)['inputs'] + torch.testing.assert_allclose(data['inputs'].flip(1).float(), inputs) + + def test_normalization(self): + cfg = dict( + type='ClsDataPreprocessor', + mean=[127.5, 127.5, 127.5], + std=[127.5, 127.5, 127.5]) + processor: ClsDataPreprocessor = MODELS.build(cfg) + + data = {'inputs': [torch.randint(0, 256, (3, 224, 224))]} + processed_data = processor(data) + inputs = processed_data['inputs'] + self.assertTrue((inputs >= -1).all()) + self.assertTrue((inputs <= 1).all()) + self.assertIsNone(processed_data['data_samples']) + + data = {'inputs': torch.randint(0, 256, (1, 3, 224, 224))} + inputs = processor(data)['inputs'] + self.assertTrue((inputs >= -1).all()) + self.assertTrue((inputs <= 1).all()) + + def test_batch_augmentation(self): + cfg = dict( + type='ClsDataPreprocessor', + num_classes=10, + batch_augments=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.) + ])) + processor: ClsDataPreprocessor = MODELS.build(cfg) + self.assertIsInstance(processor.batch_augments, RandomBatchAugment) + data = { + 'inputs': [torch.randint(0, 256, (3, 224, 224))], + 'data_samples': [DataSample().set_gt_label(1)] + } + processed_data = processor(data, training=True) + self.assertIn('inputs', processed_data) + self.assertIn('data_samples', processed_data) + + cfg['batch_augments'] = None + processor: ClsDataPreprocessor = MODELS.build(cfg) + self.assertIsNone(processor.batch_augments) + data = {'inputs': [torch.randint(0, 256, (3, 224, 224))]} + processed_data = processor(data, training=True) + self.assertIn('inputs', processed_data) + self.assertIsNone(processed_data['data_samples']) + + +class TestSelfSupDataPreprocessor(TestCase): + + def test_to_rgb(self): + cfg = dict(type='SelfSupDataPreprocessor', to_rgb=True) + processor: SelfSupDataPreprocessor = MODELS.build(cfg) + self.assertTrue(processor._channel_conversion) + + fake_data = { + 'inputs': + [torch.randn((2, 3, 224, 224)), + torch.randn((2, 3, 224, 224))], + 'data_samples': [DataSample(), DataSample()] + } + inputs = processor(fake_data)['inputs'] + torch.testing.assert_allclose(fake_data['inputs'][0].flip(1).float(), + inputs[0]) + torch.testing.assert_allclose(fake_data['inputs'][1].flip(1).float(), + inputs[1]) + + def test_forward(self): + data_preprocessor = SelfSupDataPreprocessor( + to_rgb=True, mean=[124, 117, 104], std=[59, 58, 58]) + + # test list inputs + fake_data = { + 'inputs': [torch.randn((2, 3, 224, 224))], + 'data_samples': [DataSample(), DataSample()] + } + fake_output = data_preprocessor(fake_data) + self.assertEqual(len(fake_output['inputs']), 1) + self.assertEqual(len(fake_output['data_samples']), 2) + + # test torch.Tensor inputs + fake_data = { + 'inputs': torch.randn((2, 3, 224, 224)), + 'data_samples': [DataSample(), DataSample()] + } + fake_output = data_preprocessor(fake_data) + self.assertEqual(fake_output['inputs'].shape, + torch.Size((2, 3, 224, 224))) + self.assertEqual(len(fake_output['data_samples']), 2) + + +class TestTwoNormDataPreprocessor(TestCase): + + def test_assertion(self): + with pytest.raises(AssertionError): + _ = TwoNormDataPreprocessor( + to_rgb=True, + mean=(123.675, 116.28, 103.53), + std=(58.395, 57.12, 57.375), + ) + with pytest.raises(AssertionError): + _ = TwoNormDataPreprocessor( + to_rgb=True, + mean=(123.675, 116.28, 103.53), + std=(58.395, 57.12, 57.375), + second_mean=(127.5, 127.5), + second_std=(127.5, 127.5, 127.5), + ) + with pytest.raises(AssertionError): + _ = TwoNormDataPreprocessor( + to_rgb=True, + mean=(123.675, 116.28, 103.53), + std=(58.395, 57.12, 57.375), + second_mean=(127.5, 127.5, 127.5), + second_std=(127.5, 127.5), + ) + + def test_forward(self): + data_preprocessor = dict( + mean=(123.675, 116.28, 103.53), + std=(58.395, 57.12, 57.375), + second_mean=(127.5, 127.5, 127.5), + second_std=(127.5, 127.5, 127.5), + to_rgb=True) + + data_preprocessor = TwoNormDataPreprocessor(**data_preprocessor) + fake_data = { + 'inputs': + [torch.randn((2, 3, 224, 224)), + torch.randn((2, 3, 224, 224))], + 'data_sample': [DataSample(), DataSample()] + } + fake_output = data_preprocessor(fake_data) + self.assertEqual(len(fake_output['inputs']), 2) + self.assertEqual(len(fake_output['data_samples']), 2) + + +class TestVideoDataPreprocessor(TestCase): + + def test_NCTHW_format(self): + data_preprocessor = VideoDataPreprocessor( + mean=[114.75, 114.75, 114.75], + std=[57.375, 57.375, 57.375], + to_rgb=True, + format_shape='NCTHW') + + # test list inputs + fake_data = { + 'inputs': [torch.randn((2, 3, 4, 224, 224))], + 'data_sample': [DataSample(), DataSample()] + } + fake_output = data_preprocessor(fake_data) + self.assertEqual(len(fake_output['inputs']), 1) + self.assertEqual(len(fake_output['data_samples']), 2) + + # test torch.Tensor inputs + fake_data = { + 'inputs': torch.randn((2, 3, 4, 224, 224)), + 'data_sample': [DataSample(), DataSample()] + } + fake_output = data_preprocessor(fake_data) + self.assertEqual(fake_output['inputs'].shape, + torch.Size((2, 3, 4, 224, 224))) + self.assertEqual(len(fake_output['data_samples']), 2) + + def test_NCHW_format(self): + data_preprocessor = VideoDataPreprocessor( + mean=[114.75, 114.75, 114.75], + std=[57.375, 57.375, 57.375], + to_rgb=True, + format_shape='NCHW') + + # test list inputs + fake_data = { + 'inputs': [torch.randn((2, 3, 224, 224))], + 'data_sample': [DataSample(), DataSample()] + } + fake_output = data_preprocessor(fake_data) + self.assertEqual(len(fake_output['inputs']), 1) + self.assertEqual(len(fake_output['data_samples']), 2) + + # test torch.Tensor inputs + fake_data = { + 'inputs': torch.randn((2, 3, 224, 224)), + 'data_sample': [DataSample(), DataSample()] + } + fake_output = data_preprocessor(fake_data) + self.assertEqual(fake_output['inputs'].shape, + torch.Size((2, 3, 224, 224))) + self.assertEqual(len(fake_output['data_samples']), 2) diff --git a/tests/test_models/test_utils/test_ema.py b/tests/test_models/test_utils/test_ema.py new file mode 100644 index 0000000..7ffb6ec --- /dev/null +++ b/tests/test_models/test_utils/test_ema.py @@ -0,0 +1,48 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from unittest import TestCase + +import torch +import torch.nn as nn +from mmengine.logging import MessageHub +from mmengine.testing import assert_allclose + +from mmpretrain.models.utils import CosineEMA + + +class TestEMA(TestCase): + + def test_cosine_ema(self): + model = nn.Sequential(nn.Conv2d(1, 5, kernel_size=3), nn.Linear(5, 10)) + + # init message hub + max_iters = 5 + test = dict(name='ema_test') + message_hub = MessageHub.get_instance(**test) + message_hub.update_info('max_iters', max_iters) + + # test EMA + momentum = 0.996 + end_momentum = 1. + + ema_model = CosineEMA(model, momentum=1 - momentum) + averaged_params = [ + torch.zeros_like(param) for param in model.parameters() + ] + + for i in range(max_iters): + updated_averaged_params = [] + for p, p_avg in zip(model.parameters(), averaged_params): + p.detach().add_(torch.randn_like(p)) + if i == 0: + updated_averaged_params.append(p.clone()) + else: + m = end_momentum - (end_momentum - momentum) * ( + math.cos(math.pi * i / float(max_iters)) + 1) / 2 + updated_averaged_params.append( + (p_avg * m + p * (1 - m)).clone()) + ema_model.update_parameters(model) + averaged_params = updated_averaged_params + + for p_target, p_ema in zip(averaged_params, ema_model.parameters()): + assert_allclose(p_target, p_ema) diff --git a/tests/test_models/test_utils/test_embed.py b/tests/test_models/test_utils/test_embed.py new file mode 100644 index 0000000..cb28200 --- /dev/null +++ b/tests/test_models/test_utils/test_embed.py @@ -0,0 +1,88 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmpretrain.models.backbones import VGG +from mmpretrain.models.utils import HybridEmbed, PatchEmbed, PatchMerging + + +def cal_unfold_dim(dim, kernel_size, stride, padding=0, dilation=1): + return (dim + 2 * padding - dilation * (kernel_size - 1) - 1) // stride + 1 + + +def test_patch_embed(): + # Test PatchEmbed + patch_embed = PatchEmbed() + img = torch.randn(1, 3, 224, 224) + img = patch_embed(img) + assert img.shape == torch.Size((1, 196, 768)) + + # Test PatchEmbed with stride = 8 + conv_cfg = dict(kernel_size=16, stride=8) + patch_embed = PatchEmbed(conv_cfg=conv_cfg) + img = torch.randn(1, 3, 224, 224) + img = patch_embed(img) + assert img.shape == torch.Size((1, 729, 768)) + + +def test_hybrid_embed(): + # Test VGG11 HybridEmbed + backbone = VGG(11, norm_eval=True) + backbone.init_weights() + patch_embed = HybridEmbed(backbone) + img = torch.randn(1, 3, 224, 224) + img = patch_embed(img) + assert img.shape == torch.Size((1, 49, 768)) + + +def test_patch_merging(): + settings = dict(in_channels=16, out_channels=32, padding=0) + downsample = PatchMerging(**settings) + + # test forward with wrong dims + with pytest.raises(AssertionError): + inputs = torch.rand((1, 16, 56 * 56)) + downsample(inputs, input_size=(56, 56)) + + # test patch merging forward + inputs = torch.rand((1, 56 * 56, 16)) + out, output_size = downsample(inputs, input_size=(56, 56)) + assert output_size == (28, 28) + assert out.shape == (1, 28 * 28, 32) + + # test different kernel_size in each direction + downsample = PatchMerging(kernel_size=(2, 3), **settings) + out, output_size = downsample(inputs, input_size=(56, 56)) + expected_dim = cal_unfold_dim(56, 2, 2) * cal_unfold_dim(56, 3, 3) + assert downsample.sampler.kernel_size == (2, 3) + assert output_size == (cal_unfold_dim(56, 2, 2), cal_unfold_dim(56, 3, 3)) + assert out.shape == (1, expected_dim, 32) + + # test default stride + downsample = PatchMerging(kernel_size=6, **settings) + assert downsample.sampler.stride == (6, 6) + + # test stride=3 + downsample = PatchMerging(kernel_size=6, stride=3, **settings) + out, output_size = downsample(inputs, input_size=(56, 56)) + assert downsample.sampler.stride == (3, 3) + assert out.shape == (1, cal_unfold_dim(56, 6, stride=3)**2, 32) + + # test padding + downsample = PatchMerging( + in_channels=16, out_channels=32, kernel_size=6, padding=2) + out, output_size = downsample(inputs, input_size=(56, 56)) + assert downsample.sampler.padding == (2, 2) + assert out.shape == (1, cal_unfold_dim(56, 6, 6, padding=2)**2, 32) + + # test str padding + downsample = PatchMerging(in_channels=16, out_channels=32, kernel_size=6) + out, output_size = downsample(inputs, input_size=(56, 56)) + assert downsample.sampler.padding == (0, 0) + assert out.shape == (1, cal_unfold_dim(56, 6, 6, padding=2)**2, 32) + + # test dilation + downsample = PatchMerging(kernel_size=6, dilation=2, **settings) + out, output_size = downsample(inputs, input_size=(56, 56)) + assert downsample.sampler.dilation == (2, 2) + assert out.shape == (1, cal_unfold_dim(56, 6, 6, dilation=2)**2, 32) diff --git a/tests/test_models/test_utils/test_inverted_residual.py b/tests/test_models/test_utils/test_inverted_residual.py new file mode 100644 index 0000000..e61ceb1 --- /dev/null +++ b/tests/test_models/test_utils/test_inverted_residual.py @@ -0,0 +1,82 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpretrain.models.utils import InvertedResidual, SELayer + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def test_inverted_residual(): + + with pytest.raises(AssertionError): + # stride must be in [1, 2] + InvertedResidual(16, 16, 32, stride=3) + + with pytest.raises(AssertionError): + # se_cfg must be None or dict + InvertedResidual(16, 16, 32, se_cfg=list()) + + # Add expand conv if in_channels and mid_channels is not the same + assert InvertedResidual(32, 16, 32).with_expand_conv is False + assert InvertedResidual(16, 16, 32).with_expand_conv is True + + # Test InvertedResidual forward, stride=1 + block = InvertedResidual(16, 16, 32, stride=1) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + assert getattr(block, 'se', None) is None + assert block.with_res_shortcut + assert x_out.shape == torch.Size((1, 16, 56, 56)) + + # Test InvertedResidual forward, stride=2 + block = InvertedResidual(16, 16, 32, stride=2) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + assert not block.with_res_shortcut + assert x_out.shape == torch.Size((1, 16, 28, 28)) + + # Test InvertedResidual forward with se layer + se_cfg = dict(channels=32) + block = InvertedResidual(16, 16, 32, stride=1, se_cfg=se_cfg) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + assert isinstance(block.se, SELayer) + assert x_out.shape == torch.Size((1, 16, 56, 56)) + + # Test InvertedResidual forward without expand conv + block = InvertedResidual(32, 16, 32) + x = torch.randn(1, 32, 56, 56) + x_out = block(x) + assert getattr(block, 'expand_conv', None) is None + assert x_out.shape == torch.Size((1, 16, 56, 56)) + + # Test InvertedResidual forward with GroupNorm + block = InvertedResidual( + 16, 16, 32, norm_cfg=dict(type='GN', num_groups=2)) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + for m in block.modules(): + if is_norm(m): + assert isinstance(m, GroupNorm) + assert x_out.shape == torch.Size((1, 16, 56, 56)) + + # Test InvertedResidual forward with HSigmoid + block = InvertedResidual(16, 16, 32, act_cfg=dict(type='HSigmoid')) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + assert x_out.shape == torch.Size((1, 16, 56, 56)) + + # Test InvertedResidual forward with checkpoint + block = InvertedResidual(16, 16, 32, with_cp=True) + x = torch.randn(1, 16, 56, 56) + x_out = block(x) + assert block.with_cp + assert x_out.shape == torch.Size((1, 16, 56, 56)) diff --git a/tests/test_models/test_utils/test_layer_scale.py b/tests/test_models/test_utils/test_layer_scale.py new file mode 100644 index 0000000..54b6b60 --- /dev/null +++ b/tests/test_models/test_utils/test_layer_scale.py @@ -0,0 +1,47 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmpretrain.models.utils import LayerScale + + +class TestLayerScale(TestCase): + + def test_init(self): + with self.assertRaisesRegex(AssertionError, "'data_format' could"): + cfg = dict( + dim=10, + data_format='BNC', + ) + LayerScale(**cfg) + + cfg = dict(dim=10) + ls = LayerScale(**cfg) + assert torch.equal(ls.weight, + torch.ones(10, requires_grad=True) * 1e-5) + + def forward(self): + # Test channels_last + cfg = dict(dim=256, inplace=False, data_format='channels_last') + ls_channels_last = LayerScale(**cfg) + x = torch.randn((4, 49, 256)) + out = ls_channels_last(x) + self.assertEqual(tuple(out.size()), (4, 49, 256)) + assert torch.equal(x * 1e-5, out) + + # Test channels_first + cfg = dict(dim=256, inplace=False, data_format='channels_first') + ls_channels_first = LayerScale(**cfg) + x = torch.randn((4, 256, 7, 7)) + out = ls_channels_first(x) + self.assertEqual(tuple(out.size()), (4, 256, 7, 7)) + assert torch.equal(x * 1e-5, out) + + # Test inplace True + cfg = dict(dim=256, inplace=True, data_format='channels_first') + ls_channels_first = LayerScale(**cfg) + x = torch.randn((4, 256, 7, 7)) + out = ls_channels_first(x) + self.assertEqual(tuple(out.size()), (4, 256, 7, 7)) + self.assertIs(x, out) diff --git a/tests/test_models/test_utils/test_misc.py b/tests/test_models/test_utils/test_misc.py new file mode 100644 index 0000000..49d233e --- /dev/null +++ b/tests/test_models/test_utils/test_misc.py @@ -0,0 +1,59 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from mmengine.utils import digit_version + +from mmpretrain.models.utils import channel_shuffle, is_tracing, make_divisible + + +def test_make_divisible(): + # test min_value is None + result = make_divisible(34, 8, None) + assert result == 32 + + # test when new_value > min_ratio * value + result = make_divisible(10, 8, min_ratio=0.9) + assert result == 16 + + # test min_value = 0.8 + result = make_divisible(33, 8, min_ratio=0.8) + assert result == 32 + + +def test_channel_shuffle(): + x = torch.randn(1, 24, 56, 56) + with pytest.raises(AssertionError): + # num_channels should be divisible by groups + channel_shuffle(x, 7) + + groups = 3 + batch_size, num_channels, height, width = x.size() + channels_per_group = num_channels // groups + out = channel_shuffle(x, groups) + # test the output value when groups = 3 + for b in range(batch_size): + for c in range(num_channels): + c_out = c % channels_per_group * groups + c // channels_per_group + for i in range(height): + for j in range(width): + assert x[b, c, i, j] == out[b, c_out, i, j] + + +@pytest.mark.skipif( + digit_version(torch.__version__) < digit_version('1.6.0'), + reason='torch.jit.is_tracing is not available before 1.6.0') +def test_is_tracing(): + + def foo(x): + if is_tracing(): + return x + else: + return x.tolist() + + x = torch.rand(3) + # test without trace + assert isinstance(foo(x), list) + + # test with trace + traced_foo = torch.jit.trace(foo, (torch.rand(1), )) + assert isinstance(traced_foo(x), torch.Tensor) diff --git a/tests/test_models/test_utils/test_norm.py b/tests/test_models/test_utils/test_norm.py new file mode 100644 index 0000000..a4d3a8b --- /dev/null +++ b/tests/test_models/test_utils/test_norm.py @@ -0,0 +1,60 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch +import torch.nn.functional as F + +from mmpretrain.models.utils import GRN, LayerNorm2d + + +class TestGRN(TestCase): + + def test_init(self): + module = GRN(in_channels=32, eps=1e-3) + self.assertEqual(module.in_channels, 32) + self.assertEqual(module.eps, 1e-3) + self.assertTrue(module.gamma.requires_grad) + self.assertTrue(module.beta.requires_grad) + self.assertEqual(module.gamma.shape, (32, )) + self.assertTrue(module.beta.shape, (32, )) + + def test_forward(self): + module = GRN(in_channels=32, eps=1e-3) + input_ = torch.rand(1, 28, 28, 32) + gx = torch.norm(input_, p=2, dim=(1, 2), keepdim=True) + nx = gx / (gx.mean(dim=3, keepdim=True) + 1e-3) + expected_out = module.gamma * input_ * nx + module.beta + input_ + + torch.testing.assert_allclose( + module(input_, data_format='channel_last'), expected_out) + + input_ = input_.permute([0, 3, 1, 2]) + expected_out = expected_out.permute([0, 3, 1, 2]) + torch.testing.assert_allclose( + module(input_, data_format='channel_first'), expected_out) + + +class TestLayerNorm2d(TestCase): + + def test_init(self): + module = LayerNorm2d(num_channels=32, eps=1e-3) + self.assertEqual(module.num_channels, 32) + self.assertEqual(module.eps, 1e-3) + self.assertTrue(module.weight.requires_grad) + self.assertTrue(module.bias.requires_grad) + self.assertEqual(module.weight.shape, (32, )) + self.assertTrue(module.bias.shape, (32, )) + + def test_forward(self): + module = LayerNorm2d(num_channels=32, eps=1e-3) + input_ = torch.rand(1, 28, 28, 32) + expected_out = F.layer_norm(input_, module.normalized_shape, + module.weight, module.bias, 1e-3) + + torch.testing.assert_allclose( + module(input_, data_format='channel_last'), expected_out) + + input_ = input_.permute([0, 3, 1, 2]) + expected_out = expected_out.permute([0, 3, 1, 2]) + torch.testing.assert_allclose( + module(input_, data_format='channel_first'), expected_out) diff --git a/tests/test_models/test_utils/test_position_encoding.py b/tests/test_models/test_utils/test_position_encoding.py new file mode 100644 index 0000000..7d80023 --- /dev/null +++ b/tests/test_models/test_utils/test_position_encoding.py @@ -0,0 +1,21 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmpretrain.models.utils import (ConditionalPositionEncoding, + RotaryEmbeddingFast) + + +def test_conditional_position_encoding_module(): + CPE = ConditionalPositionEncoding(in_channels=32, embed_dims=32, stride=2) + outs = CPE(torch.randn(1, 3136, 32), (56, 56)) + assert outs.shape == torch.Size([1, 784, 32]) + + +def test_rotary_embedding_fast_module(): + RoPE = RotaryEmbeddingFast(embed_dims=64, patch_resolution=24) + outs = RoPE(torch.randn(1, 2, 24 * 24, 64), (24, 24)) + assert outs.shape == torch.Size([1, 2, 24 * 24, 64]) + + RoPE = RotaryEmbeddingFast(embed_dims=64, patch_resolution=(14, 20)) + outs = RoPE(torch.randn(1, 2, 14 * 20, 64), (14, 20)) + assert outs.shape == torch.Size([1, 2, 14 * 20, 64]) diff --git a/tests/test_models/test_utils/test_se.py b/tests/test_models/test_utils/test_se.py new file mode 100644 index 0000000..447eb08 --- /dev/null +++ b/tests/test_models/test_utils/test_se.py @@ -0,0 +1,95 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch +from torch.nn.modules import GroupNorm +from torch.nn.modules.batchnorm import _BatchNorm + +from mmpretrain.models.utils import SELayer + + +def is_norm(modules): + """Check if is one of the norms.""" + if isinstance(modules, (GroupNorm, _BatchNorm)): + return True + return False + + +def test_se(): + with pytest.raises(AssertionError): + # base_channels must be a number + SELayer(16, squeeze_channels='32') + + with pytest.raises(AssertionError): + # base_channels must be None or a number larger than 0 + SELayer(16, squeeze_channels=-1) + + with pytest.raises(AssertionError): + # act_cfg must be two dict tuple + SELayer( + 16, + act_cfg=(dict(type='ReLU'), dict(type='Sigmoid'), + dict(type='ReLU'))) + + # Test SELayer forward, channels=64 + input = torch.randn((4, 64, 112, 112)) + se = SELayer(64) + output = se(input) + assert se.conv1.out_channels == 8 + assert se.conv2.in_channels == 8 + assert output.shape == torch.Size((4, 64, 112, 112)) + + # Test SELayer forward, ratio=4 + input = torch.randn((4, 128, 112, 112)) + se = SELayer(128, ratio=4) + output = se(input) + assert se.conv1.out_channels == 32 + assert se.conv2.in_channels == 32 + assert output.shape == torch.Size((4, 128, 112, 112)) + + # Test SELayer forward, channels=54, ratio=4 + # channels cannot be divisible by ratio + input = torch.randn((1, 54, 76, 103)) + se = SELayer(54, ratio=4) + output = se(input) + assert se.conv1.out_channels == 16 + assert se.conv2.in_channels == 16 + assert output.shape == torch.Size((1, 54, 76, 103)) + + # Test SELayer forward, divisor=2 + se = SELayer(54, ratio=4, divisor=2) + output = se(input) + assert se.conv1.out_channels == 14 + assert se.conv2.in_channels == 14 + assert output.shape == torch.Size((1, 54, 76, 103)) + + # Test SELayer forward, squeeze_channels=25 + input = torch.randn((1, 128, 56, 56)) + se = SELayer(128, squeeze_channels=25) + output = se(input) + assert se.conv1.out_channels == 25 + assert se.conv2.in_channels == 25 + assert output.shape == torch.Size((1, 128, 56, 56)) + + # Test SELayer forward, not used ratio and divisor + input = torch.randn((1, 128, 56, 56)) + se = SELayer( + 128, + squeeze_channels=13, + ratio=4, + divisor=8, + ) + output = se(input) + assert se.conv1.out_channels == 13 + assert se.conv2.in_channels == 13 + assert output.shape == torch.Size((1, 128, 56, 56)) + + # Test SELayer with HSigmoid activate layer + input = torch.randn((4, 128, 56, 56)) + se = SELayer( + 128, + squeeze_channels=25, + act_cfg=(dict(type='ReLU'), dict(type='HSigmoid'))) + output = se(input) + assert se.conv1.out_channels == 25 + assert se.conv2.in_channels == 25 + assert output.shape == torch.Size((4, 128, 56, 56)) diff --git a/tests/test_models/test_utils/test_swiglu_ffn.py b/tests/test_models/test_utils/test_swiglu_ffn.py new file mode 100644 index 0000000..1ae8d76 --- /dev/null +++ b/tests/test_models/test_utils/test_swiglu_ffn.py @@ -0,0 +1,53 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch +import torch.nn as nn + +from mmpretrain.models.utils import LayerScale, SwiGLUFFN, SwiGLUFFNFused + + +class TestSwiGLUFFN(TestCase): + + def test_init(self): + swiglu = SwiGLUFFN(embed_dims=4) + assert swiglu.w12.weight.shape == torch.ones((8, 4)).shape + assert swiglu.w3.weight.shape == torch.ones((4, 4)).shape + assert isinstance(swiglu.gamma2, nn.Identity) + + swiglu = SwiGLUFFN(embed_dims=4, layer_scale_init_value=0.1) + assert isinstance(swiglu.gamma2, LayerScale) + + def test_forward(self): + swiglu = SwiGLUFFN(embed_dims=4) + x = torch.randn((1, 8, 4)) + out = swiglu(x) + self.assertEqual(out.size(), x.size()) + + swiglu = SwiGLUFFN(embed_dims=4, out_dims=12) + x = torch.randn((1, 8, 4)) + out = swiglu(x) + self.assertEqual(tuple(out.size()), (1, 8, 12)) + + +class TestSwiGLUFFNFused(TestCase): + + def test_init(self): + swiglu = SwiGLUFFNFused(embed_dims=4) + assert swiglu.w12.weight.shape == torch.ones((16, 4)).shape + assert swiglu.w3.weight.shape == torch.ones((4, 8)).shape + assert isinstance(swiglu.gamma2, nn.Identity) + + swiglu = SwiGLUFFNFused(embed_dims=4, layer_scale_init_value=0.1) + assert isinstance(swiglu.gamma2, LayerScale) + + def test_forward(self): + swiglu = SwiGLUFFNFused(embed_dims=4) + x = torch.randn((1, 8, 4)) + out = swiglu(x) + self.assertEqual(out.size(), x.size()) + + swiglu = SwiGLUFFNFused(embed_dims=4, out_dims=12) + x = torch.randn((1, 8, 4)) + out = swiglu(x) + self.assertEqual(tuple(out.size()), (1, 8, 12)) diff --git a/tests/test_structures/test_datasample.py b/tests/test_structures/test_datasample.py new file mode 100644 index 0000000..088d316 --- /dev/null +++ b/tests/test_structures/test_datasample.py @@ -0,0 +1,108 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import numpy as np +import torch + +from mmpretrain.structures import DataSample, MultiTaskDataSample + + +class TestDataSample(TestCase): + + def _test_set_label(self, key): + data_sample = DataSample() + method = getattr(data_sample, 'set_' + key) + # Test number + method(1) + self.assertIn(key, data_sample) + label = getattr(data_sample, key) + self.assertIsInstance(label, torch.LongTensor) + + # Test tensor with single number + method(torch.tensor(2)) + self.assertIn(key, data_sample) + label = getattr(data_sample, key) + self.assertIsInstance(label, torch.LongTensor) + + # Test array with single number + method(np.array(3)) + self.assertIn(key, data_sample) + label = getattr(data_sample, key) + self.assertIsInstance(label, torch.LongTensor) + + # Test tensor + method(torch.tensor([1, 2, 3])) + self.assertIn(key, data_sample) + label = getattr(data_sample, key) + self.assertIsInstance(label, torch.Tensor) + self.assertTrue((label == torch.tensor([1, 2, 3])).all()) + + # Test array + method(np.array([1, 2, 3])) + self.assertIn(key, data_sample) + label = getattr(data_sample, key) + self.assertTrue((label == torch.tensor([1, 2, 3])).all()) + + # Test Sequence + method([1, 2, 3]) + self.assertIn(key, data_sample) + label = getattr(data_sample, key) + self.assertTrue((label == torch.tensor([1, 2, 3])).all()) + + # Test unavailable type + with self.assertRaisesRegex(TypeError, " is not"): + method('hi') + + def test_set_gt_label(self): + self._test_set_label('gt_label') + + def test_set_pred_label(self): + self._test_set_label('pred_label') + + def test_set_gt_score(self): + data_sample = DataSample() + data_sample.set_gt_score(torch.tensor([0.1, 0.1, 0.6, 0.1, 0.1])) + self.assertIn('gt_score', data_sample) + torch.testing.assert_allclose(data_sample.gt_score, + [0.1, 0.1, 0.6, 0.1, 0.1]) + + # Test invalid length + with self.assertRaisesRegex(AssertionError, 'should be equal to'): + data_sample.set_gt_score([1, 2]) + + # Test invalid dims + with self.assertRaisesRegex(AssertionError, 'but got 2'): + data_sample.set_gt_score(torch.tensor([[0.1, 0.1, 0.6, 0.1, 0.1]])) + + def test_set_pred_score(self): + data_sample = DataSample() + data_sample.set_pred_score(torch.tensor([0.1, 0.1, 0.6, 0.1, 0.1])) + self.assertIn('pred_score', data_sample) + torch.testing.assert_allclose(data_sample.pred_score, + [0.1, 0.1, 0.6, 0.1, 0.1]) + + # Test invalid length + with self.assertRaisesRegex(AssertionError, 'should be equal to'): + data_sample.set_gt_score([1, 2]) + + # Test invalid dims + with self.assertRaisesRegex(AssertionError, 'but got 2'): + data_sample.set_pred_score( + torch.tensor([[0.1, 0.1, 0.6, 0.1, 0.1]])) + + +class TestMultiTaskDataSample(TestCase): + + def test_multi_task_data_sample(self): + gt_label = {'task0': {'task00': 1, 'task01': 1}, 'task1': 1} + data_sample = MultiTaskDataSample() + task_sample = DataSample().set_gt_label(gt_label['task1']) + data_sample.set_field(task_sample, 'task1') + data_sample.set_field(MultiTaskDataSample(), 'task0') + for task_name in gt_label['task0']: + task_sample = DataSample().set_gt_label( + gt_label['task0'][task_name]) + data_sample.task0.set_field(task_sample, task_name) + self.assertIsInstance(data_sample.task0, MultiTaskDataSample) + self.assertIsInstance(data_sample.task1, DataSample) + self.assertIsInstance(data_sample.task0.task00, DataSample) diff --git a/tests/test_structures/test_utils.py b/tests/test_structures/test_utils.py new file mode 100644 index 0000000..b0ba439 --- /dev/null +++ b/tests/test_structures/test_utils.py @@ -0,0 +1,63 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch + +from mmpretrain.structures import (batch_label_to_onehot, cat_batch_labels, + tensor_split) + + +class TestStructureUtils(TestCase): + + def test_tensor_split(self): + tensor = torch.tensor([0, 1, 2, 3, 4, 5, 6]) + split_indices = [0, 2, 6, 6] + outs = tensor_split(tensor, split_indices) + self.assertEqual(len(outs), len(split_indices) + 1) + self.assertEqual(outs[0].size(0), 0) + self.assertEqual(outs[1].size(0), 2) + self.assertEqual(outs[2].size(0), 4) + self.assertEqual(outs[3].size(0), 0) + self.assertEqual(outs[4].size(0), 1) + + tensor = torch.tensor([]) + split_indices = [0, 0, 0, 0] + outs = tensor_split(tensor, split_indices) + self.assertEqual(len(outs), len(split_indices) + 1) + + def test_cat_batch_labels(self): + labels = [ + torch.tensor([1]), + torch.tensor([3, 2]), + torch.tensor([0, 1, 4]), + torch.tensor([], dtype=torch.int64), + torch.tensor([], dtype=torch.int64), + ] + + batch_label, split_indices = cat_batch_labels(labels) + self.assertEqual(split_indices, [1, 3, 6, 6]) + self.assertEqual(len(batch_label), 6) + labels = tensor_split(batch_label, split_indices) + self.assertEqual(labels[0].tolist(), [1]) + self.assertEqual(labels[1].tolist(), [3, 2]) + self.assertEqual(labels[2].tolist(), [0, 1, 4]) + self.assertEqual(labels[3].tolist(), []) + self.assertEqual(labels[4].tolist(), []) + + def test_batch_label_to_onehot(self): + labels = [ + torch.tensor([1]), + torch.tensor([3, 2]), + torch.tensor([0, 1, 4]), + torch.tensor([], dtype=torch.int64), + torch.tensor([], dtype=torch.int64), + ] + + batch_label, split_indices = cat_batch_labels(labels) + batch_score = batch_label_to_onehot( + batch_label, split_indices, num_classes=5) + self.assertEqual(batch_score[0].tolist(), [0, 1, 0, 0, 0]) + self.assertEqual(batch_score[1].tolist(), [0, 0, 1, 1, 0]) + self.assertEqual(batch_score[2].tolist(), [1, 1, 0, 0, 1]) + self.assertEqual(batch_score[3].tolist(), [0, 0, 0, 0, 0]) + self.assertEqual(batch_score[4].tolist(), [0, 0, 0, 0, 0]) diff --git a/tests/test_tools.py b/tests/test_tools.py new file mode 100644 index 0000000..013584d --- /dev/null +++ b/tests/test_tools.py @@ -0,0 +1,418 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import re +import tempfile +from collections import OrderedDict +from pathlib import Path +from subprocess import PIPE, Popen +from unittest import TestCase + +import mmengine +import torch +from mmengine.config import Config + +from mmpretrain import ModelHub, get_model +from mmpretrain.structures import DataSample + +MMPRE_ROOT = Path(__file__).parent.parent +ASSETS_ROOT = Path(__file__).parent / 'data' + + +class TestImageDemo(TestCase): + + def setUp(self): + self.tmpdir = tempfile.TemporaryDirectory() + self.dir = Path(self.tmpdir.name) + + def tearDown(self): + self.tmpdir.cleanup() + + def test_run(self): + command = [ + 'python', + 'demo/image_demo.py', + 'demo/demo.JPEG', + 'mobilevit-xxsmall_3rdparty_in1k', + '--device', + 'cpu', + ] + p = Popen(command, cwd=MMPRE_ROOT, stdout=PIPE) + out, _ = p.communicate() + self.assertIn('sea snake', out.decode()) + + +class TestAnalyzeLogs(TestCase): + + def setUp(self): + self.log_file = ASSETS_ROOT / 'vis_data.json' + self.tmpdir = tempfile.TemporaryDirectory() + self.out_file = Path(self.tmpdir.name) / 'out.png' + + def tearDown(self): + self.tmpdir.cleanup() + + def test_run(self): + command = [ + 'python', + 'tools/analysis_tools/analyze_logs.py', + 'cal_train_time', + str(self.log_file), + ] + p = Popen(command, cwd=MMPRE_ROOT, stdout=PIPE) + out, _ = p.communicate() + self.assertIn('slowest epoch 2, average time is 0.0219', out.decode()) + + command = [ + 'python', + 'tools/analysis_tools/analyze_logs.py', + 'plot_curve', + str(self.log_file), + '--keys', + 'accuracy/top1', + '--out', + str(self.out_file), + ] + p = Popen(command, cwd=MMPRE_ROOT, stdout=PIPE) + out, _ = p.communicate() + self.assertIn(str(self.log_file), out.decode()) + self.assertIn(str(self.out_file), out.decode()) + self.assertTrue(self.out_file.exists()) + + +class TestAnalyzeResults(TestCase): + + def setUp(self): + self.tmpdir = tempfile.TemporaryDirectory() + self.dir = Path(self.tmpdir.name) + + dataset_cfg = dict( + type='CustomDataset', + data_root=str(ASSETS_ROOT / 'dataset'), + ) + config = Config(dict(test_dataloader=dict(dataset=dataset_cfg))) + self.config_file = self.dir / 'config.py' + config.dump(self.config_file) + + results = [{ + 'gt_label': 1, + 'pred_label': 0, + 'pred_score': [0.9, 0.1], + 'sample_idx': 0, + }, { + 'gt_label': 0, + 'pred_label': 0, + 'pred_score': [0.9, 0.1], + 'sample_idx': 1, + }] + self.result_file = self.dir / 'results.pkl' + mmengine.dump(results, self.result_file) + + def tearDown(self): + self.tmpdir.cleanup() + + def test_run(self): + command = [ + 'python', + 'tools/analysis_tools/analyze_results.py', + str(self.config_file), + str(self.result_file), + '--out-dir', + str(self.tmpdir.name), + ] + p = Popen(command, cwd=MMPRE_ROOT, stdout=PIPE) + p.communicate() + self.assertTrue((self.dir / 'success/2.jpeg.png').exists()) + self.assertTrue((self.dir / 'fail/1.JPG.png').exists()) + + +class TestPrintConfig(TestCase): + + def setUp(self): + self.tmpdir = tempfile.TemporaryDirectory() + self.config_file = MMPRE_ROOT / 'configs/resnet/resnet18_8xb32_in1k.py' + + def tearDown(self): + self.tmpdir.cleanup() + + def test_run(self): + command = [ + 'python', + 'tools/misc/print_config.py', + str(self.config_file), + ] + p = Popen(command, cwd=MMPRE_ROOT, stdout=PIPE) + out, _ = p.communicate() + out = out.decode().strip().replace('\r\n', '\n') + self.assertEqual(out, + Config.fromfile(self.config_file).pretty_text.strip()) + + +class TestVerifyDataset(TestCase): + + def setUp(self): + self.tmpdir = tempfile.TemporaryDirectory() + self.dir = Path(self.tmpdir.name) + dataset_cfg = dict( + type='CustomDataset', + ann_file=str(self.dir / 'ann.txt'), + pipeline=[dict(type='LoadImageFromFile')], + data_root=str(ASSETS_ROOT / 'dataset'), + ) + ann_file = '\n'.join(['a/2.JPG 0', 'b/2.jpeg 1', 'b/subb/3.jpg 1']) + (self.dir / 'ann.txt').write_text(ann_file) + config = Config(dict(train_dataloader=dict(dataset=dataset_cfg))) + self.config_file = Path(self.tmpdir.name) / 'config.py' + config.dump(self.config_file) + + def tearDown(self): + self.tmpdir.cleanup() + + def test_run(self): + command = [ + 'python', + 'tools/misc/verify_dataset.py', + str(self.config_file), + '--out-path', + str(self.dir / 'log.log'), + ] + p = Popen(command, cwd=MMPRE_ROOT, stdout=PIPE) + out, _ = p.communicate() + self.assertIn( + f"{ASSETS_ROOT/'dataset/a/2.JPG'} cannot be read correctly", + out.decode().strip()) + self.assertTrue((self.dir / 'log.log').exists()) + + +class TestEvalMetric(TestCase): + + def setUp(self): + self.tmpdir = tempfile.TemporaryDirectory() + self.dir = Path(self.tmpdir.name) + + results = [ + DataSample().set_gt_label(1).set_pred_label(0).set_pred_score( + [0.6, 0.3, 0.1]).to_dict(), + DataSample().set_gt_label(0).set_pred_label(0).set_pred_score( + [0.6, 0.3, 0.1]).to_dict(), + ] + self.result_file = self.dir / 'results.pkl' + mmengine.dump(results, self.result_file) + + def tearDown(self): + self.tmpdir.cleanup() + + def test_run(self): + command = [ + 'python', + 'tools/analysis_tools/eval_metric.py', + str(self.result_file), + '--metric', + 'type=Accuracy', + 'topk=1,2', + ] + p = Popen(command, cwd=MMPRE_ROOT, stdout=PIPE) + out, _ = p.communicate() + self.assertIn('accuracy/top1', out.decode()) + self.assertIn('accuracy/top2', out.decode()) + + +class TestVisScheduler(TestCase): + + def setUp(self): + self.tmpdir = tempfile.TemporaryDirectory() + self.dir = Path(self.tmpdir.name) + + config = Config.fromfile(MMPRE_ROOT / + 'configs/resnet/resnet18_8xb32_in1k.py') + config.param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.01, + by_epoch=True, + end=1, + convert_to_iter_based=True), + dict(type='CosineAnnealingLR', by_epoch=True, begin=1), + ] + config.work_dir = str(self.dir) + config.train_cfg.max_epochs = 2 + self.config_file = Path(self.tmpdir.name) / 'config.py' + config.dump(self.config_file) + + def tearDown(self): + self.tmpdir.cleanup() + + def test_run(self): + command = [ + 'python', + 'tools/visualization/vis_scheduler.py', + str(self.config_file), + '--dataset-size', + '100', + '--not-show', + '--save-path', + str(self.dir / 'out.png'), + ] + p = Popen(command, cwd=MMPRE_ROOT, stdout=PIPE) + p.communicate() + self.assertTrue((self.dir / 'out.png').exists()) + + +class TestPublishModel(TestCase): + + def setUp(self): + self.tmpdir = tempfile.TemporaryDirectory() + self.dir = Path(self.tmpdir.name) + + ckpt = dict( + state_dict=OrderedDict({ + 'a': torch.tensor(1.), + }), + ema_state_dict=OrderedDict({ + 'step': 1, + 'module.a': torch.tensor(2.), + })) + self.ckpt_file = self.dir / 'ckpt.pth' + torch.save(ckpt, self.ckpt_file) + + def tearDown(self): + self.tmpdir.cleanup() + + def test_run(self): + command = [ + 'python', + 'tools/model_converters/publish_model.py', + str(self.ckpt_file), + str(self.ckpt_file), + '--dataset-type', + 'ImageNet', + '--no-ema', + ] + p = Popen(command, cwd=MMPRE_ROOT, stdout=PIPE) + out, _ = p.communicate() + self.assertIn('and drop the EMA weights.', out.decode()) + self.assertIn('Successfully generated', out.decode()) + output_ckpt = re.findall(r'ckpt_\d{8}-\w{8}.pth', out.decode()) + self.assertGreater(len(output_ckpt), 0) + output_ckpt = output_ckpt[0] + self.assertTrue((self.dir / output_ckpt).exists()) + # The input file won't be overridden. + self.assertTrue(self.ckpt_file.exists()) + + +class TestVisCam(TestCase): + + def setUp(self): + self.tmpdir = tempfile.TemporaryDirectory() + self.dir = Path(self.tmpdir.name) + + model = get_model('mobilevit-xxsmall_3rdparty_in1k') + self.config_file = self.dir / 'config.py' + model._config.dump(self.config_file) + + self.ckpt_file = self.dir / 'ckpt.pth' + torch.save(model.state_dict(), self.ckpt_file) + + def tearDown(self): + self.tmpdir.cleanup() + + def test_run(self): + command = [ + 'python', + 'tools/visualization/vis_cam.py', + str(ASSETS_ROOT / 'color.jpg'), + str(self.config_file), + str(self.ckpt_file), + '--save-path', + str(self.dir / 'cam.jpg'), + ] + p = Popen(command, cwd=MMPRE_ROOT, stdout=PIPE) + out, _ = p.communicate() + self.assertIn('backbone.conv_1x1_exp.bn', out.decode()) + self.assertTrue((self.dir / 'cam.jpg').exists()) + + +class TestConfusionMatrix(TestCase): + + def setUp(self): + self.tmpdir = tempfile.TemporaryDirectory() + self.dir = Path(self.tmpdir.name) + + self.config_file = MMPRE_ROOT / 'configs/resnet/resnet18_8xb32_in1k.py' + + results = [ + DataSample().set_gt_label(1).set_pred_label(0).set_pred_score( + [0.6, 0.3, 0.1]).to_dict(), + DataSample().set_gt_label(0).set_pred_label(0).set_pred_score( + [0.6, 0.3, 0.1]).to_dict(), + ] + self.result_file = self.dir / 'results.pkl' + mmengine.dump(results, self.result_file) + + def tearDown(self): + self.tmpdir.cleanup() + + def test_run(self): + command = [ + 'python', + 'tools/analysis_tools/confusion_matrix.py', + str(self.config_file), + str(self.result_file), + '--out', + str(self.dir / 'result.pkl'), + ] + Popen(command, cwd=MMPRE_ROOT, stdout=PIPE).wait() + result = mmengine.load(self.dir / 'result.pkl') + torch.testing.assert_allclose( + result, torch.tensor([ + [1, 0, 0], + [1, 0, 0], + [0, 0, 0], + ])) + + +class TestVisTsne(TestCase): + + def setUp(self): + self.tmpdir = tempfile.TemporaryDirectory() + self.dir = Path(self.tmpdir.name) + + config = ModelHub.get('mobilevit-xxsmall_3rdparty_in1k').config + test_dataloader = dict( + batch_size=1, + dataset=dict( + type='CustomDataset', + data_root=str(ASSETS_ROOT / 'dataset'), + pipeline=config.test_dataloader.dataset.pipeline, + ), + sampler=dict(type='DefaultSampler', shuffle=False), + ) + config.test_dataloader = mmengine.ConfigDict(test_dataloader) + self.config_file = self.dir / 'config.py' + config.dump(self.config_file) + + def tearDown(self): + self.tmpdir.cleanup() + + def test_run(self): + command = [ + 'python', + 'tools/visualization/vis_tsne.py', + str(self.config_file), + '--work-dir', + str(self.dir), + '--perplexity', + '2', + ] + Popen(command, cwd=MMPRE_ROOT, stdout=PIPE).wait() + self.assertTrue(len(list(self.dir.glob('tsne_*/feat_*.png'))) > 0) + + +class TestGetFlops(TestCase): + + def test_run(self): + command = [ + 'python', + 'tools/analysis_tools/get_flops.py', + 'mobilevit-xxsmall_3rdparty_in1k', + ] + ret_code = Popen(command, cwd=MMPRE_ROOT).wait() + self.assertEqual(ret_code, 0) diff --git a/tests/test_utils/test_analyze.py b/tests/test_utils/test_analyze.py new file mode 100644 index 0000000..d1bb2c4 --- /dev/null +++ b/tests/test_utils/test_analyze.py @@ -0,0 +1,43 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import os.path as osp +import tempfile + +from mmpretrain.utils import load_json_log + + +def test_load_json_log(): + demo_log = """\ +{"lr": 0.0001, "data_time": 0.003, "loss": 2.29, "time": 0.010, "epoch": 1, "step": 150} +{"lr": 0.0001, "data_time": 0.002, "loss": 2.28, "time": 0.007, "epoch": 1, "step": 300} +{"lr": 0.0001, "data_time": 0.001, "loss": 2.27, "time": 0.008, "epoch": 1, "step": 450} +{"accuracy/top1": 23.98, "accuracy/top5": 66.05, "step": 1} +{"lr": 0.0001, "data_time": 0.001, "loss": 2.25, "time": 0.014, "epoch": 2, "step": 619} +{"lr": 0.0001, "data_time": 0.000, "loss": 2.24, "time": 0.012, "epoch": 2, "step": 769} +{"lr": 0.0001, "data_time": 0.003, "loss": 2.23, "time": 0.009, "epoch": 2, "step": 919} +{"accuracy/top1": 41.82, "accuracy/top5": 81.26, "step": 2} +{"lr": 0.0001, "data_time": 0.002, "loss": 2.21, "time": 0.007, "epoch": 3, "step": 1088} +{"lr": 0.0001, "data_time": 0.005, "loss": 2.18, "time": 0.009, "epoch": 3, "step": 1238} +{"lr": 0.0001, "data_time": 0.002, "loss": 2.16, "time": 0.008, "epoch": 3, "step": 1388} +{"accuracy/top1": 54.07, "accuracy/top5": 89.80, "step": 3} +""" # noqa: E501 + with tempfile.TemporaryDirectory() as tmpdir: + json_log = osp.join(tmpdir, 'scalars.json') + with open(json_log, 'w') as f: + f.write(demo_log) + + log_dict = load_json_log(json_log) + + assert log_dict.keys() == {'train', 'val'} + assert log_dict['train'][3] == { + 'lr': 0.0001, + 'data_time': 0.001, + 'loss': 2.25, + 'time': 0.014, + 'epoch': 2, + 'step': 619 + } + assert log_dict['val'][2] == { + 'accuracy/top1': 54.07, + 'accuracy/top5': 89.80, + 'step': 3 + } diff --git a/tests/test_utils/test_setup_env.py b/tests/test_utils/test_setup_env.py new file mode 100644 index 0000000..4f8adee --- /dev/null +++ b/tests/test_utils/test_setup_env.py @@ -0,0 +1,40 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import datetime +import sys +from unittest import TestCase + +from mmengine import DefaultScope + +from mmpretrain.utils import register_all_modules + + +class TestSetupEnv(TestCase): + + def test_register_all_modules(self): + from mmpretrain.registry import DATASETS + + # not init default scope + sys.modules.pop('mmpretrain.datasets', None) + sys.modules.pop('mmpretrain.datasets.custom', None) + DATASETS._module_dict.pop('CustomDataset', None) + self.assertFalse('CustomDataset' in DATASETS.module_dict) + register_all_modules(init_default_scope=False) + self.assertTrue('CustomDataset' in DATASETS.module_dict) + + # init default scope + sys.modules.pop('mmpretrain.datasets') + sys.modules.pop('mmpretrain.datasets.custom') + DATASETS._module_dict.pop('CustomDataset', None) + self.assertFalse('CustomDataset' in DATASETS.module_dict) + register_all_modules(init_default_scope=True) + self.assertTrue('CustomDataset' in DATASETS.module_dict) + self.assertEqual(DefaultScope.get_current_instance().scope_name, + 'mmpretrain') + + # init default scope when another scope is init + name = f'test-{datetime.datetime.now()}' + DefaultScope.get_instance(name, scope_name='test') + with self.assertWarnsRegex( + Warning, + 'The current default scope "test" is not "mmpretrain"'): + register_all_modules(init_default_scope=True) diff --git a/tests/test_utils/test_version_utils.py b/tests/test_utils/test_version_utils.py new file mode 100644 index 0000000..07105e0 --- /dev/null +++ b/tests/test_utils/test_version_utils.py @@ -0,0 +1,21 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmpretrain import digit_version + + +def test_digit_version(): + assert digit_version('0.2.16') == (0, 2, 16, 0, 0, 0) + assert digit_version('1.2.3') == (1, 2, 3, 0, 0, 0) + assert digit_version('1.2.3rc0') == (1, 2, 3, 0, -1, 0) + assert digit_version('1.2.3rc1') == (1, 2, 3, 0, -1, 1) + assert digit_version('1.0rc0') == (1, 0, 0, 0, -1, 0) + assert digit_version('1.0') == digit_version('1.0.0') + assert digit_version('1.5.0+cuda90_cudnn7.6.3_lms') == digit_version('1.5') + assert digit_version('1.0.0dev') < digit_version('1.0.0a') + assert digit_version('1.0.0a') < digit_version('1.0.0a1') + assert digit_version('1.0.0a') < digit_version('1.0.0b') + assert digit_version('1.0.0b') < digit_version('1.0.0rc') + assert digit_version('1.0.0rc1') < digit_version('1.0.0') + assert digit_version('1.0.0') < digit_version('1.0.0post') + assert digit_version('1.0.0post') < digit_version('1.0.0post1') + assert digit_version('v1') == (1, 0, 0, 0, 0, 0) + assert digit_version('v1.1.5') == (1, 1, 5, 0, 0, 0) diff --git a/tests/test_visualization/test_visualizer.py b/tests/test_visualization/test_visualizer.py new file mode 100644 index 0000000..900e495 --- /dev/null +++ b/tests/test_visualization/test_visualizer.py @@ -0,0 +1,200 @@ +# Copyright (c) Open-MMLab. All rights reserved. +import os.path as osp +import tempfile +from unittest import TestCase +from unittest.mock import patch + +import numpy as np +import torch + +from mmpretrain.structures import DataSample +from mmpretrain.visualization import UniversalVisualizer + + +class TestUniversalVisualizer(TestCase): + + def setUp(self) -> None: + super().setUp() + tmpdir = tempfile.TemporaryDirectory() + self.tmpdir = tmpdir + self.vis = UniversalVisualizer( + save_dir=tmpdir.name, + vis_backends=[dict(type='LocalVisBackend')], + ) + + def test_visualize_cls(self): + image = np.ones((10, 10, 3), np.uint8) + data_sample = DataSample().set_gt_label(1).set_pred_label(1).\ + set_pred_score(torch.tensor([0.1, 0.8, 0.1])) + + # Test show + def mock_show(drawn_img, win_name, wait_time): + self.assertFalse((image == drawn_img).all()) + self.assertEqual(win_name, 'test_cls') + self.assertEqual(wait_time, 0) + + with patch.object(self.vis, 'show', mock_show): + self.vis.visualize_cls( + image=image, + data_sample=data_sample, + show=True, + name='test_cls', + step=1) + + # Test storage backend. + save_file = osp.join(self.tmpdir.name, + 'vis_data/vis_image/test_cls_1.png') + self.assertTrue(osp.exists(save_file)) + + # Test out_file + out_file = osp.join(self.tmpdir.name, 'results.png') + self.vis.visualize_cls( + image=image, data_sample=data_sample, out_file=out_file) + self.assertTrue(osp.exists(out_file)) + + # Test with dataset_meta + self.vis.dataset_meta = {'classes': ['cat', 'bird', 'dog']} + + def patch_texts(text, *_, **__): + self.assertEqual( + text, '\n'.join([ + 'Ground truth: 1 (bird)', + 'Prediction: 1, 0.80 (bird)', + ])) + + with patch.object(self.vis, 'draw_texts', patch_texts): + self.vis.visualize_cls(image, data_sample) + + # Test without pred_label + def patch_texts(text, *_, **__): + self.assertEqual(text, '\n'.join([ + 'Ground truth: 1 (bird)', + ])) + + with patch.object(self.vis, 'draw_texts', patch_texts): + self.vis.visualize_cls(image, data_sample, draw_pred=False) + + # Test without gt_label + def patch_texts(text, *_, **__): + self.assertEqual(text, '\n'.join([ + 'Prediction: 1, 0.80 (bird)', + ])) + + with patch.object(self.vis, 'draw_texts', patch_texts): + self.vis.visualize_cls(image, data_sample, draw_gt=False) + + # Test without score + del data_sample.pred_score + + def patch_texts(text, *_, **__): + self.assertEqual( + text, '\n'.join([ + 'Ground truth: 1 (bird)', + 'Prediction: 1 (bird)', + ])) + + with patch.object(self.vis, 'draw_texts', patch_texts): + self.vis.visualize_cls(image, data_sample) + + # Test adaptive font size + def assert_font_size(target_size): + + def draw_texts(text, font_sizes, *_, **__): + self.assertEqual(font_sizes, target_size) + + return draw_texts + + with patch.object(self.vis, 'draw_texts', assert_font_size(7)): + self.vis.visualize_cls( + np.ones((224, 384, 3), np.uint8), data_sample) + + with patch.object(self.vis, 'draw_texts', assert_font_size(2)): + self.vis.visualize_cls( + np.ones((10, 384, 3), np.uint8), data_sample) + + with patch.object(self.vis, 'draw_texts', assert_font_size(21)): + self.vis.visualize_cls( + np.ones((1000, 1000, 3), np.uint8), data_sample) + + # Test rescale image + with patch.object(self.vis, 'draw_texts', assert_font_size(14)): + self.vis.visualize_cls( + np.ones((224, 384, 3), np.uint8), + data_sample, + rescale_factor=2.) + + def test_visualize_image_retrieval(self): + image = np.ones((10, 10, 3), np.uint8) + data_sample = DataSample().set_pred_score([0.1, 0.8, 0.1]) + + class ToyPrototype: + + def get_data_info(self, idx): + img_path = osp.join(osp.dirname(__file__), '../data/color.jpg') + return {'img_path': img_path, 'sample_idx': idx} + + prototype_dataset = ToyPrototype() + + # Test show + def mock_show(drawn_img, win_name, wait_time): + if image.shape == drawn_img.shape: + self.assertFalse((image == drawn_img).all()) + self.assertEqual(win_name, 'test_retrieval') + self.assertEqual(wait_time, 0) + + with patch.object(self.vis, 'show', mock_show): + self.vis.visualize_image_retrieval( + image, + data_sample, + prototype_dataset, + show=True, + name='test_retrieval', + step=1) + + # Test storage backend. + save_file = osp.join(self.tmpdir.name, + 'vis_data/vis_image/test_retrieval_1.png') + self.assertTrue(osp.exists(save_file)) + + # Test out_file + out_file = osp.join(self.tmpdir.name, 'results.png') + self.vis.visualize_image_retrieval( + image, + data_sample, + prototype_dataset, + out_file=out_file, + ) + self.assertTrue(osp.exists(out_file)) + + def test_visualize_masked_image(self): + image = np.ones((10, 10, 3), np.uint8) + data_sample = DataSample().set_mask( + torch.tensor([ + [0, 0, 1, 1], + [0, 1, 1, 0], + [1, 1, 0, 0], + [1, 0, 0, 1], + ])) + + # Test show + def mock_show(drawn_img, win_name, wait_time): + self.assertTupleEqual(drawn_img.shape, (224, 224, 3)) + self.assertEqual(win_name, 'test_mask') + self.assertEqual(wait_time, 0) + + with patch.object(self.vis, 'show', mock_show): + self.vis.visualize_masked_image( + image, data_sample, show=True, name='test_mask', step=1) + + # Test storage backend. + save_file = osp.join(self.tmpdir.name, + 'vis_data/vis_image/test_mask_1.png') + self.assertTrue(osp.exists(save_file)) + + # Test out_file + out_file = osp.join(self.tmpdir.name, 'results.png') + self.vis.visualize_masked_image(image, data_sample, out_file=out_file) + self.assertTrue(osp.exists(out_file)) + + def tearDown(self): + self.tmpdir.cleanup() diff --git a/tools/analysis_tools/analyze_logs.py b/tools/analysis_tools/analyze_logs.py new file mode 100644 index 0000000..e12f034 --- /dev/null +++ b/tools/analysis_tools/analyze_logs.py @@ -0,0 +1,218 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import re +from itertools import groupby + +import matplotlib.pyplot as plt +import numpy as np + +from mmpretrain.utils import load_json_log + + +def cal_train_time(log_dicts, args): + """Compute the average time per training iteration.""" + for i, log_dict in enumerate(log_dicts): + print(f'{"-" * 5}Analyze train time of {args.json_logs[i]}{"-" * 5}') + train_logs = log_dict['train'] + + if 'epoch' in train_logs[0]: + epoch_ave_times = [] + for _, logs in groupby(train_logs, lambda log: log['epoch']): + if args.include_outliers: + all_time = np.array([log['time'] for log in logs]) + else: + all_time = np.array([log['time'] for log in logs])[1:] + epoch_ave_times.append(all_time.mean()) + epoch_ave_times = np.array(epoch_ave_times) + slowest_epoch = epoch_ave_times.argmax() + fastest_epoch = epoch_ave_times.argmin() + std_over_epoch = epoch_ave_times.std() + print(f'slowest epoch {slowest_epoch + 1}, ' + f'average time is {epoch_ave_times[slowest_epoch]:.4f}') + print(f'fastest epoch {fastest_epoch + 1}, ' + f'average time is {epoch_ave_times[fastest_epoch]:.4f}') + print(f'time std over epochs is {std_over_epoch:.4f}') + + avg_iter_time = np.array([log['time'] for log in train_logs]).mean() + print(f'average iter time: {avg_iter_time:.4f} s/iter') + print() + + +def get_legends(args): + """if legend is None, use {filename}_{key} as legend.""" + legend = args.legend + if legend is None: + legend = [] + for json_log in args.json_logs: + for metric in args.keys: + # remove '.json' in the end of log names + basename = os.path.basename(json_log)[:-5] + if basename.endswith('.log'): + basename = basename[:-4] + legend.append(f'{basename}_{metric}') + assert len(legend) == (len(args.json_logs) * len(args.keys)) + return legend + + +def plot_phase_train(metric, train_logs, curve_label): + """plot phase of train curve.""" + xs = np.array([log['step'] for log in train_logs]) + ys = np.array([log[metric] for log in train_logs]) + + if 'epoch' in train_logs[0]: + scale_factor = train_logs[-1]['step'] / train_logs[-1]['epoch'] + xs = xs / scale_factor + plt.xlabel('Epochs') + else: + plt.xlabel('Iters') + + plt.plot(xs, ys, label=curve_label, linewidth=0.75) + + +def plot_phase_val(metric, val_logs, curve_label): + """plot phase of val curve.""" + xs = np.array([log['step'] for log in val_logs]) + ys = np.array([log[metric] for log in val_logs]) + + plt.xlabel('Steps') + plt.plot(xs, ys, label=curve_label, linewidth=0.75) + + +def plot_curve_helper(log_dicts, metrics, args, legend): + """plot curves from log_dicts by metrics.""" + num_metrics = len(metrics) + for i, log_dict in enumerate(log_dicts): + for j, key in enumerate(metrics): + json_log = args.json_logs[i] + print(f'plot curve of {json_log}, metric is {key}') + curve_label = legend[i * num_metrics + j] + + train_keys = {} if len(log_dict['train']) == 0 else set( + log_dict['train'][0].keys()) - {'step', 'epoch'} + val_keys = {} if len(log_dict['val']) == 0 else set( + log_dict['val'][0].keys()) - {'step'} + + if key in val_keys: + plot_phase_val(key, log_dict['val'], curve_label) + elif key in train_keys: + plot_phase_train(key, log_dict['train'], curve_label) + else: + raise ValueError( + f'Invalid key "{key}", please choose from ' + f'{set.union(set(train_keys), set(val_keys))}.') + plt.legend() + + +def plot_curve(log_dicts, args): + """Plot train metric-iter graph.""" + # set style + try: + import seaborn as sns + sns.set_style(args.style) + except ImportError: + pass + + # set plot window size + wind_w, wind_h = args.window_size.split('*') + wind_w, wind_h = int(wind_w), int(wind_h) + plt.figure(figsize=(wind_w, wind_h)) + + # get legends and metrics + legends = get_legends(args) + metrics = args.keys + + # plot curves from log_dicts by metrics + plot_curve_helper(log_dicts, metrics, args, legends) + + # set title and show or save + if args.title is not None: + plt.title(args.title) + if args.out is None: + plt.show() + else: + print(f'save curve to: {args.out}') + plt.savefig(args.out) + plt.cla() + + +def add_plot_parser(subparsers): + parser_plt = subparsers.add_parser( + 'plot_curve', help='parser for plotting curves') + parser_plt.add_argument( + 'json_logs', + type=str, + nargs='+', + help='path of train log in json format') + parser_plt.add_argument( + '--keys', + type=str, + nargs='+', + default=['loss'], + help='the metric that you want to plot') + parser_plt.add_argument('--title', type=str, help='title of figure') + parser_plt.add_argument( + '--legend', + type=str, + nargs='+', + default=None, + help='legend of each plot') + parser_plt.add_argument( + '--style', + type=str, + default='whitegrid', + help='style of the figure, need `seaborn` package.') + parser_plt.add_argument('--out', type=str, default=None) + parser_plt.add_argument( + '--window-size', + default='12*7', + help='size of the window to display images, in format of "$W*$H".') + + +def add_time_parser(subparsers): + parser_time = subparsers.add_parser( + 'cal_train_time', + help='parser for computing the average time per training iteration') + parser_time.add_argument( + 'json_logs', + type=str, + nargs='+', + help='path of train log in json format') + parser_time.add_argument( + '--include-outliers', + action='store_true', + help='include the first value of every epoch when computing ' + 'the average time') + + +def parse_args(): + parser = argparse.ArgumentParser(description='Analyze Json Log') + # currently only support plot curve and calculate average train time + subparsers = parser.add_subparsers(dest='task', help='task parser') + add_plot_parser(subparsers) + add_time_parser(subparsers) + args = parser.parse_args() + + if hasattr(args, 'window_size') and args.window_size != '': + assert re.match(r'\d+\*\d+', args.window_size), \ + "'window-size' must be in format 'W*H'." + return args + + +def main(): + args = parse_args() + + json_logs = args.json_logs + for json_log in json_logs: + assert json_log.endswith('.json') + + log_dicts = [load_json_log(json_log) for json_log in json_logs] + + if args.task == 'cal_train_time': + cal_train_time(log_dicts, args) + elif args.task == 'plot_curve': + plot_curve(log_dicts, args) + + +if __name__ == '__main__': + main() diff --git a/tools/analysis_tools/analyze_results.py b/tools/analysis_tools/analyze_results.py new file mode 100644 index 0000000..5f2feb3 --- /dev/null +++ b/tools/analysis_tools/analyze_results.py @@ -0,0 +1,121 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from pathlib import Path + +import mmcv +import mmengine +import torch +from mmengine import DictAction + +from mmpretrain.datasets import build_dataset +from mmpretrain.structures import DataSample +from mmpretrain.visualization import UniversalVisualizer + + +def parse_args(): + parser = argparse.ArgumentParser( + description='MMPreTrain evaluate prediction success/fail') + parser.add_argument('config', help='test config file path') + parser.add_argument('result', help='test result json/pkl file') + parser.add_argument( + '--out-dir', required=True, help='dir to store output files') + parser.add_argument( + '--topk', + default=20, + type=int, + help='Number of images to select for success/fail') + parser.add_argument( + '--rescale-factor', + '-r', + type=float, + help='image rescale factor, which is useful if the output is too ' + 'large or too small.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + + return args + + +def save_imgs(result_dir, folder_name, results, dataset, rescale_factor=None): + full_dir = osp.join(result_dir, folder_name) + vis = UniversalVisualizer() + vis.dataset_meta = {'classes': dataset.CLASSES} + + # save imgs + dump_infos = [] + for data_sample in results: + data_info = dataset.get_data_info(data_sample.sample_idx) + if 'img' in data_info: + img = data_info['img'] + name = str(data_sample.sample_idx) + elif 'img_path' in data_info: + img = mmcv.imread(data_info['img_path'], channel_order='rgb') + name = Path(data_info['img_path']).name + else: + raise ValueError('Cannot load images from the dataset infos.') + if rescale_factor is not None: + img = mmcv.imrescale(img, rescale_factor) + vis.visualize_cls( + img, data_sample, out_file=osp.join(full_dir, name + '.png')) + + dump = dict() + for k, v in data_sample.items(): + if isinstance(v, torch.Tensor): + dump[k] = v.tolist() + else: + dump[k] = v + dump_infos.append(dump) + + mmengine.dump(dump_infos, osp.join(full_dir, folder_name + '.json')) + + +def main(): + args = parse_args() + + cfg = mmengine.Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + # build the dataloader + cfg.test_dataloader.dataset.pipeline = [] + dataset = build_dataset(cfg.test_dataloader.dataset) + + results = list() + for result in mmengine.load(args.result): + data_sample = DataSample() + data_sample.set_metainfo({'sample_idx': result['sample_idx']}) + data_sample.set_gt_label(result['gt_label']) + data_sample.set_pred_label(result['pred_label']) + data_sample.set_pred_score(result['pred_score']) + results.append(data_sample) + + # sort result + results = sorted(results, key=lambda x: torch.max(x.pred_score)) + + success = list() + fail = list() + for data_sample in results: + if (data_sample.pred_label == data_sample.gt_label).all(): + success.append(data_sample) + else: + fail.append(data_sample) + + success = success[:args.topk] + fail = fail[:args.topk] + + save_imgs(args.out_dir, 'success', success, dataset, args.rescale_factor) + save_imgs(args.out_dir, 'fail', fail, dataset, args.rescale_factor) + + +if __name__ == '__main__': + main() diff --git a/tools/analysis_tools/confusion_matrix.py b/tools/analysis_tools/confusion_matrix.py new file mode 100644 index 0000000..0e6382c --- /dev/null +++ b/tools/analysis_tools/confusion_matrix.py @@ -0,0 +1,108 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import tempfile + +import mmengine +from mmengine.config import Config, DictAction +from mmengine.evaluator import Evaluator +from mmengine.runner import Runner + +from mmpretrain.evaluation import ConfusionMatrix +from mmpretrain.registry import DATASETS +from mmpretrain.utils import register_all_modules + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Eval a checkpoint and draw the confusion matrix.') + parser.add_argument('config', help='test config file path') + parser.add_argument( + 'ckpt_or_result', + type=str, + help='The checkpoint file (.pth) or ' + 'dumpped predictions pickle file (.pkl).') + parser.add_argument('--out', help='the file to save the confusion matrix.') + parser.add_argument( + '--show', + action='store_true', + help='whether to display the metric result by matplotlib if supports.') + parser.add_argument( + '--show-path', type=str, help='Path to save the visualization image.') + parser.add_argument( + '--include-values', + action='store_true', + help='To draw the values in the figure.') + parser.add_argument( + '--cmap', + type=str, + default='viridis', + help='The color map to use. Defaults to "viridis".') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + # register all modules in mmpretrain into the registries + # do not init the default scope here because it will be init in the runner + register_all_modules(init_default_scope=False) + + # load config + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + if args.ckpt_or_result.endswith('.pth'): + # Set confusion matrix as the metric. + cfg.test_evaluator = dict(type='ConfusionMatrix') + + cfg.load_from = str(args.ckpt_or_result) + + with tempfile.TemporaryDirectory() as tmpdir: + cfg.work_dir = tmpdir + runner = Runner.from_cfg(cfg) + classes = runner.test_loop.dataloader.dataset.metainfo.get( + 'classes') + cm = runner.test()['confusion_matrix/result'] + else: + predictions = mmengine.load(args.ckpt_or_result) + evaluator = Evaluator(ConfusionMatrix()) + metrics = evaluator.offline_evaluate(predictions, None) + cm = metrics['confusion_matrix/result'] + try: + # Try to build the dataset. + dataset = DATASETS.build({ + **cfg.test_dataloader.dataset, 'pipeline': [] + }) + classes = dataset.metainfo.get('classes') + except Exception: + classes = None + + if args.out is not None: + mmengine.dump(cm, args.out) + + if args.show or args.show_path is not None: + fig = ConfusionMatrix.plot( + cm, + show=args.show, + classes=classes, + include_values=args.include_values, + cmap=args.cmap) + if args.show_path is not None: + fig.savefig(args.show_path) + print(f'The confusion matrix is saved at {args.show_path}.') + + +if __name__ == '__main__': + main() diff --git a/tools/analysis_tools/eval_metric.py b/tools/analysis_tools/eval_metric.py new file mode 100644 index 0000000..4b2fec1 --- /dev/null +++ b/tools/analysis_tools/eval_metric.py @@ -0,0 +1,62 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse + +import mmengine +import rich +from mmengine import DictAction +from mmengine.evaluator import Evaluator + +from mmpretrain.registry import METRICS + +HELP_URL = ( + 'https://mmpretrain.readthedocs.io/en/latest/useful_tools/' + 'log_result_analysis.html#how-to-conduct-offline-metric-evaluation') + +prog_description = f"""\ +Evaluate metric of the results saved in pkl format. + +The detailed usage can be found in {HELP_URL} +""" + + +def parse_args(): + parser = argparse.ArgumentParser(description=prog_description) + parser.add_argument('pkl_results', help='Results in pickle format') + parser.add_argument( + '--metric', + nargs='+', + action='append', + dest='metric_options', + help='The metric config, the key-value pair in xxx=yyy format will be ' + 'parsed as the metric config items. You can specify multiple metrics ' + 'by use multiple `--metric`. For list type value, you can use ' + '"key=[a,b]" or "key=a,b", and it also allows nested list/tuple ' + 'values, e.g. "key=[(a,b),(c,d)]".') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + if args.metric_options is None: + raise ValueError('Please speicfy at least one `--metric`. ' + f'The detailed usage can be found in {HELP_URL}') + + test_metrics = [] + for metric_option in args.metric_options: + metric_cfg = {} + for kv in metric_option: + k, v = kv.split('=', maxsplit=1) + metric_cfg[k] = DictAction._parse_iterable(v) + test_metrics.append(METRICS.build(metric_cfg)) + + predictions = mmengine.load(args.pkl_results) + + evaluator = Evaluator(test_metrics) + eval_results = evaluator.offline_evaluate(predictions, None) + rich.print(eval_results) + + +if __name__ == '__main__': + main() diff --git a/tools/analysis_tools/get_flops.py b/tools/analysis_tools/get_flops.py new file mode 100644 index 0000000..c705f6e --- /dev/null +++ b/tools/analysis_tools/get_flops.py @@ -0,0 +1,61 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse + +from mmengine.analysis import get_model_complexity_info + +from mmpretrain import get_model + + +def parse_args(): + parser = argparse.ArgumentParser(description='Get model flops and params') + parser.add_argument('config', help='config file path') + parser.add_argument( + '--shape', + type=int, + nargs='+', + default=[224, 224], + help='input image size') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + if len(args.shape) == 1: + input_shape = (3, args.shape[0], args.shape[0]) + elif len(args.shape) == 2: + input_shape = (3, ) + tuple(args.shape) + else: + raise ValueError('invalid input shape') + + model = get_model(args.config) + model.eval() + if hasattr(model, 'extract_feat'): + model.forward = model.extract_feat + else: + raise NotImplementedError( + 'FLOPs counter is currently not currently supported with {}'. + format(model.__class__.__name__)) + analysis_results = get_model_complexity_info( + model, + input_shape, + ) + flops = analysis_results['flops_str'] + params = analysis_results['params_str'] + activations = analysis_results['activations_str'] + out_table = analysis_results['out_table'] + out_arch = analysis_results['out_arch'] + print(out_arch) + print(out_table) + split_line = '=' * 30 + print(f'{split_line}\nInput shape: {input_shape}\n' + f'Flops: {flops}\nParams: {params}\n' + f'Activation: {activations}\n{split_line}') + print('!!!Only the backbone network is counted in FLOPs analysis.') + print('!!!Please be cautious if you use the results in papers. ' + 'You may need to check if all ops are supported and verify that the ' + 'flops computation is correct.') + + +if __name__ == '__main__': + main() diff --git a/tools/analysis_tools/shape_bias.py b/tools/analysis_tools/shape_bias.py new file mode 100644 index 0000000..52e9fe6 --- /dev/null +++ b/tools/analysis_tools/shape_bias.py @@ -0,0 +1,284 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Modified from https://github.com/bethgelab/model-vs-human +import argparse +import os +import os.path as osp + +import matplotlib as mpl +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +from mmengine.logging import MMLogger +from utils import FormatStrFormatter, ShapeBias + +# global default boundary settings for thin gray transparent +# boundaries to avoid not being able to see the difference +# between two partially overlapping datapoints of the same color: +PLOTTING_EDGE_COLOR = (0.3, 0.3, 0.3, 0.3) +PLOTTING_EDGE_WIDTH = 0.02 +ICONS_DIR = osp.join( + osp.dirname(__file__), '..', '..', 'resources', 'shape_bias_icons') + +parser = argparse.ArgumentParser() +parser.add_argument('--csv-dir', type=str, help='directory of csv files') +parser.add_argument( + '--result-dir', type=str, help='directory to save plotting results') +parser.add_argument('--model-names', nargs='+', default=[], help='model name') +parser.add_argument( + '--colors', + nargs='+', + type=float, + default=[], + help= # noqa + 'the colors for the plots of each model, and they should be in the same order as model_names' # noqa: E501 +) +parser.add_argument( + '--markers', + nargs='+', + type=str, + default=[], + help= # noqa + 'the markers for the plots of each model, and they should be in the same order as model_names' # noqa: E501 +) +parser.add_argument( + '--plotting-names', + nargs='+', + default=[], + help= # noqa + 'the plotting names for the plots of each model, and they should be in the same order as model_names' # noqa: E501 +) +parser.add_argument( + '--delete-icons', + action='store_true', + help='whether to delete the icons after plotting') + +humans = [ + 'subject-01', 'subject-02', 'subject-03', 'subject-04', 'subject-05', + 'subject-06', 'subject-07', 'subject-08', 'subject-09', 'subject-10' +] + +icon_names = [ + 'airplane.png', 'response_icons_vertical_reverse.png', 'bottle.png', + 'car.png', 'oven.png', 'elephant.png', 'dog.png', 'boat.png', 'clock.png', + 'chair.png', 'keyboard.png', 'bird.png', 'bicycle.png', + 'response_icons_horizontal.png', 'cat.png', 'bear.png', 'colorbar.pdf', + 'knife.png', 'response_icons_vertical.png', 'truck.png' +] + + +def read_csvs(csv_dir: str) -> pd.DataFrame: + """Reads all csv files in a directory and returns a single dataframe. + + Args: + csv_dir (str): directory of csv files. + + Returns: + pd.DataFrame: dataframe containing all csv files + """ + df = pd.DataFrame() + for csv in os.listdir(csv_dir): + if csv.endswith('.csv'): + cur_df = pd.read_csv(osp.join(csv_dir, csv)) + cur_df.columns = [c.lower() for c in cur_df.columns] + df = df.append(cur_df) + df.condition = df.condition.astype(str) + return df + + +def plot_shape_bias_matrixplot(args, analysis=ShapeBias()) -> None: + """Plots a matrixplot of shape bias. + + Args: + args (argparse.Namespace): arguments. + analysis (ShapeBias): shape bias analysis. Defaults to ShapeBias(). + """ + mpl.rcParams['font.family'] = ['serif'] + mpl.rcParams['font.serif'] = ['Times New Roman'] + + plt.figure(figsize=(9, 7)) + df = read_csvs(args.csv_dir) + + fontsize = 15 + ticklength = 10 + markersize = 250 + label_size = 20 + + classes = df['category'].unique() + num_classes = len(classes) + + # plot setup + fig = plt.figure(1, figsize=(12, 12), dpi=300.) + ax = plt.gca() + + ax.set_xlim([0, 1]) + ax.set_ylim([-.5, num_classes - 0.5]) + + # secondary reversed x axis + ax_top = ax.secondary_xaxis( + 'top', functions=(lambda x: 1 - x, lambda x: 1 - x)) + + # labels, ticks + plt.tick_params( + axis='y', which='both', left=False, right=False, labelleft=False) + ax.set_ylabel('Shape categories', labelpad=60, fontsize=label_size) + ax.set_xlabel( + "Fraction of 'texture' decisions", fontsize=label_size, labelpad=25) + ax_top.set_xlabel( + "Fraction of 'shape' decisions", fontsize=label_size, labelpad=25) + ax.xaxis.set_major_formatter(FormatStrFormatter('%g')) + ax_top.xaxis.set_major_formatter(FormatStrFormatter('%g')) + ax.get_xaxis().set_ticks(np.arange(0, 1.1, 0.1)) + ax_top.set_ticks(np.arange(0, 1.1, 0.1)) + ax.tick_params( + axis='both', which='major', labelsize=fontsize, length=ticklength) + ax_top.tick_params( + axis='both', which='major', labelsize=fontsize, length=ticklength) + + # arrows on x axes + plt.arrow( + x=0, + y=-1.75, + dx=1, + dy=0, + fc='black', + head_width=0.4, + head_length=0.03, + clip_on=False, + length_includes_head=True, + overhang=0.5) + plt.arrow( + x=1, + y=num_classes + 0.75, + dx=-1, + dy=0, + fc='black', + head_width=0.4, + head_length=0.03, + clip_on=False, + length_includes_head=True, + overhang=0.5) + + # icons besides y axis + # determine order of icons + df_selection = df.loc[(df['subj'].isin(humans))] + class_avgs = [] + for cl in classes: + df_class_selection = df_selection.query("category == '{}'".format(cl)) + class_avgs.append(1 - analysis.analysis( + df=df_class_selection)['shape-bias']) + sorted_indices = np.argsort(class_avgs) + classes = classes[sorted_indices] + + # icon placement is calculated in axis coordinates + WIDTH = 1 / num_classes + # placement left of yaxis (-WIDTH) plus some spacing (-.25*WIDTH) + XPOS = -1.25 * WIDTH + YPOS = -0.5 + HEIGHT = 1 + MARGINX = 1 / 10 * WIDTH # vertical whitespace between icons + MARGINY = 1 / 10 * HEIGHT # horizontal whitespace between icons + + left = XPOS + MARGINX + right = XPOS + WIDTH - MARGINX + + for i in range(num_classes): + bottom = i + MARGINY + YPOS + top = (i + 1) - MARGINY + YPOS + iconpath = osp.join(ICONS_DIR, '{}.png'.format(classes[i])) + plt.imshow( + plt.imread(iconpath), + extent=[left, right, bottom, top], + aspect='auto', + clip_on=False) + + # plot horizontal intersection lines + for i in range(num_classes - 1): + plt.plot([0, 1], [i + .5, i + .5], + c='gray', + linestyle='dotted', + alpha=0.4) + + # plot average shapebias + scatter points + for i in range(len(args.model_names)): + df_selection = df.loc[(df['subj'].isin(args.model_names[i]))] + result_df = analysis.analysis(df=df_selection) + avg = 1 - result_df['shape-bias'] + ax.plot([avg, avg], [-1, num_classes], color=args.colors[i]) + class_avgs = [] + for cl in classes: + df_class_selection = df_selection.query( + "category == '{}'".format(cl)) + class_avgs.append(1 - analysis.analysis( + df=df_class_selection)['shape-bias']) + + ax.scatter( + class_avgs, + classes, + color=args.colors[i], + marker=args.markers[i], + label=args.plotting_names[i], + s=markersize, + clip_on=False, + edgecolors=PLOTTING_EDGE_COLOR, + linewidths=PLOTTING_EDGE_WIDTH, + zorder=3) + plt.legend(frameon=True, labelspacing=1, loc=9) + + figure_path = osp.join(args.result_dir, + 'cue-conflict_shape-bias_matrixplot.pdf') + fig.savefig(figure_path, bbox_inches='tight') + plt.close() + + +def check_icons() -> bool: + """Check if icons are present, if not download them.""" + if not osp.exists(ICONS_DIR): + return False + for icon_name in icon_names: + if not osp.exists(osp.join(ICONS_DIR, icon_name)): + return False + return True + + +if __name__ == '__main__': + + if not check_icons(): + root_url = 'https://github.com/bethgelab/model-vs-human/raw/master/assets/icons' # noqa: E501 + os.makedirs(ICONS_DIR, exist_ok=True) + MMLogger.get_current_instance().info( + f'Downloading icons to {ICONS_DIR}') + for icon_name in icon_names: + url = osp.join(root_url, icon_name) + os.system('wget -O {} {}'.format( + osp.join(ICONS_DIR, icon_name), url)) + + args = parser.parse_args() + assert len(args.model_names) * 3 == len(args.colors), 'Number of colors \ + must be 3 times the number of models. Every three colors are the RGB \ + values for one model.' + + # preprocess colors + args.colors = [c / 255. for c in args.colors] + colors = [] + for i in range(len(args.model_names)): + colors.append(args.colors[3 * i:3 * i + 3]) + args.colors = colors + args.colors.append([165 / 255., 30 / 255., 55 / 255.]) # human color + + # if plotting names are not specified, use model names + if len(args.plotting_names) == 0: + args.plotting_names = args.model_names + + # preprocess markers + args.markers.append('D') # human marker + + # preprocess model names + args.model_names = [[m] for m in args.model_names] + args.model_names.append(humans) + + # preprocess plotting names + args.plotting_names.append('Humans') + + plot_shape_bias_matrixplot(args) + if args.delete_icons: + os.system('rm -rf {}'.format(ICONS_DIR)) diff --git a/tools/analysis_tools/utils.py b/tools/analysis_tools/utils.py new file mode 100644 index 0000000..184cb32 --- /dev/null +++ b/tools/analysis_tools/utils.py @@ -0,0 +1,277 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Modified from https://github.com/bethgelab/model-vs-human +from typing import Any, Dict, List, Optional + +import matplotlib as mpl +import pandas as pd +from matplotlib import _api +from matplotlib import transforms as mtransforms + + +class _DummyAxis: + """Define the minimal interface for a dummy axis. + + Args: + minpos (float): The minimum positive value for the axis. Defaults to 0. + """ + __name__ = 'dummy' + + # Once the deprecation elapses, replace dataLim and viewLim by plain + # _view_interval and _data_interval private tuples. + dataLim = _api.deprecate_privatize_attribute( + '3.6', alternative='get_data_interval() and set_data_interval()') + viewLim = _api.deprecate_privatize_attribute( + '3.6', alternative='get_view_interval() and set_view_interval()') + + def __init__(self, minpos: float = 0) -> None: + self._dataLim = mtransforms.Bbox.unit() + self._viewLim = mtransforms.Bbox.unit() + self._minpos = minpos + + def get_view_interval(self) -> Dict: + """Return the view interval as a tuple (*vmin*, *vmax*).""" + return self._viewLim.intervalx + + def set_view_interval(self, vmin: float, vmax: float) -> None: + """Set the view interval to (*vmin*, *vmax*).""" + self._viewLim.intervalx = vmin, vmax + + def get_minpos(self) -> float: + """Return the minimum positive value for the axis.""" + return self._minpos + + def get_data_interval(self) -> Dict: + """Return the data interval as a tuple (*vmin*, *vmax*).""" + return self._dataLim.intervalx + + def set_data_interval(self, vmin: float, vmax: float) -> None: + """Set the data interval to (*vmin*, *vmax*).""" + self._dataLim.intervalx = vmin, vmax + + def get_tick_space(self) -> int: + """Return the number of ticks to use.""" + # Just use the long-standing default of nbins==9 + return 9 + + +class TickHelper: + """A helper class for ticks and tick labels.""" + axis = None + + def set_axis(self, axis: Any) -> None: + """Set the axis instance.""" + self.axis = axis + + def create_dummy_axis(self, **kwargs) -> None: + """Create a dummy axis if no axis is set.""" + if self.axis is None: + self.axis = _DummyAxis(**kwargs) + + @_api.deprecated('3.5', alternative='`.Axis.set_view_interval`') + def set_view_interval(self, vmin: float, vmax: float) -> None: + """Set the view interval to (*vmin*, *vmax*).""" + self.axis.set_view_interval(vmin, vmax) + + @_api.deprecated('3.5', alternative='`.Axis.set_data_interval`') + def set_data_interval(self, vmin: float, vmax: float) -> None: + """Set the data interval to (*vmin*, *vmax*).""" + self.axis.set_data_interval(vmin, vmax) + + @_api.deprecated( + '3.5', + alternative='`.Axis.set_view_interval` and `.Axis.set_data_interval`') + def set_bounds(self, vmin: float, vmax: float) -> None: + """Set the view and data interval to (*vmin*, *vmax*).""" + self.set_view_interval(vmin, vmax) + self.set_data_interval(vmin, vmax) + + +class Formatter(TickHelper): + """Create a string based on a tick value and location.""" + # some classes want to see all the locs to help format + # individual ones + locs = [] + + def __call__(self, x: str, pos: Optional[Any] = None) -> str: + """Return the format for tick value *x* at position pos. + + ``pos=None`` indicates an unspecified location. + + This method must be overridden in the derived class. + + Args: + x (str): The tick value. + pos (Optional[Any]): The tick position. Defaults to None. + """ + raise NotImplementedError('Derived must override') + + def format_ticks(self, values: pd.Series) -> List[str]: + """Return the tick labels for all the ticks at once. + + Args: + values (pd.Series): The tick values. + + Returns: + List[str]: The tick labels. + """ + self.set_locs(values) + return [self(value, i) for i, value in enumerate(values)] + + def format_data(self, value: Any) -> str: + """Return the full string representation of the value with the position + unspecified. + + Args: + value (Any): The tick value. + + Returns: + str: The full string representation of the value. + """ + return self.__call__(value) + + def format_data_short(self, value: Any) -> str: + """Return a short string version of the tick value. + + Defaults to the position-independent long value. + + Args: + value (Any): The tick value. + + Returns: + str: The short string representation of the value. + """ + return self.format_data(value) + + def get_offset(self) -> str: + """Return the offset string.""" + return '' + + def set_locs(self, locs: List[Any]) -> None: + """Set the locations of the ticks. + + This method is called before computing the tick labels because some + formatters need to know all tick locations to do so. + """ + self.locs = locs + + @staticmethod + def fix_minus(s: str) -> str: + """Some classes may want to replace a hyphen for minus with the proper + Unicode symbol (U+2212) for typographical correctness. + + This is a + helper method to perform such a replacement when it is enabled via + :rc:`axes.unicode_minus`. + + Args: + s (str): The string to replace the hyphen with the Unicode symbol. + """ + return (s.replace('-', '\N{MINUS SIGN}') + if mpl.rcParams['axes.unicode_minus'] else s) + + def _set_locator(self, locator: Any) -> None: + """Subclasses may want to override this to set a locator.""" + pass + + +class FormatStrFormatter(Formatter): + """Use an old-style ('%' operator) format string to format the tick. + + The format string should have a single variable format (%) in it. + It will be applied to the value (not the position) of the tick. + + Negative numeric values will use a dash, not a Unicode minus; use mathtext + to get a Unicode minus by wrapping the format specifier with $ (e.g. + "$%g$"). + + Args: + fmt (str): Format string. + """ + + def __init__(self, fmt: str) -> None: + self.fmt = fmt + + def __call__(self, x: str, pos: Optional[Any]) -> str: + """Return the formatted label string. + + Only the value *x* is formatted. The position is ignored. + + Args: + x (str): The value to format. + pos (Any): The position of the tick. Ignored. + """ + return self.fmt % x + + +class ShapeBias: + """Compute the shape bias of a model. + + Reference: `ImageNet-trained CNNs are biased towards texture; + increasing shape bias improves accuracy and robustness + `_. + """ + num_input_models = 1 + + def __init__(self) -> None: + super().__init__() + self.plotting_name = 'shape-bias' + + @staticmethod + def _check_dataframe(df: pd.DataFrame) -> None: + """Check that the dataframe is valid.""" + assert len(df) > 0, 'empty dataframe' + + def analysis(self, df: pd.DataFrame) -> Dict[str, float]: + """Compute the shape bias of a model. + + Args: + df (pd.DataFrame): The dataframe containing the data. + + Returns: + Dict[str, float]: The shape bias. + """ + self._check_dataframe(df) + + df = df.copy() + df['correct_texture'] = df['imagename'].apply( + self.get_texture_category) + df['correct_shape'] = df['category'] + + # remove those rows where shape = texture, i.e. no cue conflict present + df2 = df.loc[df.correct_shape != df.correct_texture] + fraction_correct_shape = len( + df2.loc[df2.object_response == df2.correct_shape]) / len(df) + fraction_correct_texture = len( + df2.loc[df2.object_response == df2.correct_texture]) / len(df) + shape_bias = fraction_correct_shape / ( + fraction_correct_shape + fraction_correct_texture) + + result_dict = { + 'fraction-correct-shape': fraction_correct_shape, + 'fraction-correct-texture': fraction_correct_texture, + 'shape-bias': shape_bias + } + return result_dict + + def get_texture_category(self, imagename: str) -> str: + """Return texture category from imagename. + + e.g. 'XXX_dog10-bird2.png' -> 'bird ' + + Args: + imagename (str): Name of the image. + + Returns: + str: Texture category. + """ + assert type(imagename) is str + + # remove unnecessary words + a = imagename.split('_')[-1] + # remove .png etc. + b = a.split('.')[0] + # get texture category (last word) + c = b.split('-')[-1] + # remove number, e.g. 'bird2' -> 'bird' + d = ''.join([i for i in c if not i.isdigit()]) + return d diff --git a/tools/benchmarks/mmdetection/mim_dist_test.sh b/tools/benchmarks/mmdetection/mim_dist_test.sh new file mode 100644 index 0000000..1009d72 --- /dev/null +++ b/tools/benchmarks/mmdetection/mim_dist_test.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +set -x + +CFG=$1 +CHECKPOINT=$2 +GPUS=$3 +PY_ARGS=${@:4} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +mim test mmdet \ + $CFG \ + --checkpoint $CHECKPOINT \ + --launcher pytorch \ + -G $GPUS \ + $PY_ARGS diff --git a/tools/benchmarks/mmdetection/mim_dist_train_c4.sh b/tools/benchmarks/mmdetection/mim_dist_train_c4.sh new file mode 100644 index 0000000..e28d573 --- /dev/null +++ b/tools/benchmarks/mmdetection/mim_dist_train_c4.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +set -x + +CFG=$1 +PRETRAIN=$2 # pretrained model +GPUS=$3 +PY_ARGS=${@:4} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +mim train mmdet $CFG \ + --launcher pytorch -G $GPUS \ + --cfg-options model.backbone.init_cfg.type=Pretrained \ + model.backbone.init_cfg.checkpoint=$PRETRAIN \ + model.backbone.init_cfg.prefix="backbone." \ + model.roi_head.shared_head.init_cfg.type=Pretrained \ + model.roi_head.shared_head.init_cfg.checkpoint=$PRETRAIN \ + model.roi_head.shared_head.init_cfg.prefix="backbone." \ + $PY_ARGS diff --git a/tools/benchmarks/mmdetection/mim_dist_train_fpn.sh b/tools/benchmarks/mmdetection/mim_dist_train_fpn.sh new file mode 100644 index 0000000..9641851 --- /dev/null +++ b/tools/benchmarks/mmdetection/mim_dist_train_fpn.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +set -x + +CFG=$1 +PRETRAIN=$2 # pretrained model +GPUS=$3 +PY_ARGS=${@:4} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +mim train mmdet $CFG \ + --launcher pytorch -G $GPUS \ + --cfg-options model.backbone.init_cfg.type=Pretrained \ + model.backbone.init_cfg.checkpoint=$PRETRAIN \ + model.backbone.init_cfg.prefix="backbone." \ + $PY_ARGS diff --git a/tools/benchmarks/mmdetection/mim_slurm_test.sh b/tools/benchmarks/mmdetection/mim_slurm_test.sh new file mode 100644 index 0000000..7209e92 --- /dev/null +++ b/tools/benchmarks/mmdetection/mim_slurm_test.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +CFG=$2 +CHECKPOINT=$3 +GPUS=${GPUS:-8} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:4} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +mim test mmdet \ + $CFG \ + --checkpoint $CHECKPOINT \ + --launcher slurm -G $GPUS \ + --gpus-per-node $GPUS_PER_NODE \ + --cpus-per-task $CPUS_PER_TASK \ + --partition $PARTITION \ + --srun-args "$SRUN_ARGS" \ + $PY_ARGS diff --git a/tools/benchmarks/mmdetection/mim_slurm_train_c4.sh b/tools/benchmarks/mmdetection/mim_slurm_train_c4.sh new file mode 100644 index 0000000..5ababa9 --- /dev/null +++ b/tools/benchmarks/mmdetection/mim_slurm_train_c4.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +CFG=$2 +PRETRAIN=$3 # pretrained model +GPUS=${GPUS:-8} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:4} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +mim train mmdet $CFG \ + --launcher slurm -G $GPUS \ + --gpus-per-node $GPUS_PER_NODE \ + --cpus-per-task $CPUS_PER_TASK \ + --partition $PARTITION \ + --srun-args "$SRUN_ARGS" \ + --cfg-options model.backbone.init_cfg.type=Pretrained \ + model.backbone.init_cfg.checkpoint=$PRETRAIN \ + model.backbone.init_cfg.prefix="backbone." \ + model.roi_head.shared_head.init_cfg.type=Pretrained \ + model.roi_head.shared_head.init_cfg.checkpoint=$PRETRAIN \ + model.roi_head.shared_head.init_cfg.prefix="backbone." \ + $PY_ARGS diff --git a/tools/benchmarks/mmdetection/mim_slurm_train_fpn.sh b/tools/benchmarks/mmdetection/mim_slurm_train_fpn.sh new file mode 100644 index 0000000..514e036 --- /dev/null +++ b/tools/benchmarks/mmdetection/mim_slurm_train_fpn.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +CFG=$2 +PRETRAIN=$3 # pretrained model +GPUS=${GPUS:-8} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:4} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +mim train mmdet $CFG \ + --launcher slurm -G $GPUS \ + --gpus-per-node $GPUS_PER_NODE \ + --cpus-per-task $CPUS_PER_TASK \ + --partition $PARTITION \ + --srun-args "$SRUN_ARGS" \ + --cfg-options model.backbone.init_cfg.type=Pretrained \ + model.backbone.init_cfg.checkpoint=$PRETRAIN \ + model.backbone.init_cfg.prefix="backbone." \ + $PY_ARGS diff --git a/tools/benchmarks/mmsegmentation/mim_dist_test.sh b/tools/benchmarks/mmsegmentation/mim_dist_test.sh new file mode 100644 index 0000000..9ebb1a7 --- /dev/null +++ b/tools/benchmarks/mmsegmentation/mim_dist_test.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +set -x + +CFG=$1 +CHECKPOINT=$2 +GPUS=$3 +PY_ARGS=${@:4} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +mim test mmseg \ + $CFG \ + --checkpoint $CHECKPOINT \ + --launcher pytorch \ + -G $GPUS \ + $PY_ARGS diff --git a/tools/benchmarks/mmsegmentation/mim_dist_train.sh b/tools/benchmarks/mmsegmentation/mim_dist_train.sh new file mode 100644 index 0000000..d44da21 --- /dev/null +++ b/tools/benchmarks/mmsegmentation/mim_dist_train.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +set -x + +CFG=$1 +PRETRAIN=$2 # pretrained model +GPUS=$3 +PY_ARGS=${@:4} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +mim train mmseg $CFG \ + --launcher pytorch -G $GPUS \ + --cfg-options model.backbone.init_cfg.type=Pretrained \ + model.backbone.init_cfg.checkpoint=$PRETRAIN \ + model.backbone.init_cfg.prefix="backbone." \ + model.pretrained=None \ + $PY_ARGS diff --git a/tools/benchmarks/mmsegmentation/mim_slurm_test.sh b/tools/benchmarks/mmsegmentation/mim_slurm_test.sh new file mode 100644 index 0000000..7d25dea --- /dev/null +++ b/tools/benchmarks/mmsegmentation/mim_slurm_test.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +CFG=$2 +CHECKPOINT=$3 +GPUS=${GPUS:-4} +GPUS_PER_NODE=${GPUS_PER_NODE:-4} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:4} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +mim test mmseg \ + $CFG \ + --checkpoint $CHECKPOINT \ + --launcher slurm -G $GPUS \ + --gpus-per-node $GPUS_PER_NODE \ + --cpus-per-task $CPUS_PER_TASK \ + --partition $PARTITION \ + --srun-args "$SRUN_ARGS" \ + $PY_ARGS diff --git a/tools/benchmarks/mmsegmentation/mim_slurm_train.sh b/tools/benchmarks/mmsegmentation/mim_slurm_train.sh new file mode 100644 index 0000000..b5870bf --- /dev/null +++ b/tools/benchmarks/mmsegmentation/mim_slurm_train.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +CFG=$2 +PRETRAIN=$3 # pretrained model +GPUS=${GPUS:-4} +GPUS_PER_NODE=${GPUS_PER_NODE:-4} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:4} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +mim train mmseg $CFG \ + --launcher slurm -G $GPUS \ + --gpus-per-node $GPUS_PER_NODE \ + --cpus-per-task $CPUS_PER_TASK \ + --partition $PARTITION \ + --srun-args "$SRUN_ARGS" \ + --cfg-options model.backbone.init_cfg.type=Pretrained \ + model.backbone.init_cfg.checkpoint=$PRETRAIN \ + model.backbone.init_cfg.prefix="backbone." \ + model.pretrained=None \ + $PY_ARGS diff --git a/tools/dataset_converters/convert_flickr30k_ann.py b/tools/dataset_converters/convert_flickr30k_ann.py new file mode 100644 index 0000000..eebd079 --- /dev/null +++ b/tools/dataset_converters/convert_flickr30k_ann.py @@ -0,0 +1,56 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Create COCO-Style GT annotations based on raw annotation of Flickr30k. + +GT annotations are used for evaluation in image caption task. +""" + +import json + + +def main(): + with open('dataset_flickr30k.json', 'r') as f: + annotations = json.load(f) + ann_list = [] + img_list = [] + splits = ['train', 'val', 'test'] + for split in splits: + for img in annotations['images']: + + # img_example={ + # "sentids": [0, 1, 2], + # "imgid": 0, + # "sentences": [ + # {"raw": "Two men in green shirts standing in a yard.", + # "imgid": 0, "sentid": 0}, + # {"raw": "A man in a blue shirt standing in a garden.", + # "imgid": 0, "sentid": 1}, + # {"raw": "Two friends enjoy time spent together.", + # "imgid": 0, "sentid": 2} + # ], + # "split": "train", + # "filename": "1000092795.jpg" + # }, + + if img['split'] != split: + continue + + img_list.append({'id': img['imgid']}) + + for sentence in img['sentences']: + ann_info = { + 'image_id': img['imgid'], + 'id': sentence['sentid'], + 'caption': sentence['raw'] + } + ann_list.append(ann_info) + + json_file = {'annotations': ann_list, 'images': img_list} + + # generate flickr30k_train_gt.json, flickr30k_val_gt.json + # and flickr30k_test_gt.json + with open(f'flickr30k_{split}_gt.json', 'w') as f: + json.dump(json_file, f) + + +if __name__ == '__main__': + main() diff --git a/tools/dataset_converters/convert_imagenet_subsets.py b/tools/dataset_converters/convert_imagenet_subsets.py new file mode 100644 index 0000000..784002e --- /dev/null +++ b/tools/dataset_converters/convert_imagenet_subsets.py @@ -0,0 +1,48 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""SimCLR provides list files for semi-supervised benchmarks +https://github.com/google-research/simclr/tree/master/imagenet_subsets/""" +import argparse + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert ImageNet subset lists provided by SimCLR into ' + 'the required format in MMPretrain.') + parser.add_argument( + 'input', help='Input list file, downloaded from SimCLR github repo.') + parser.add_argument( + 'output', help='Output list file with the required format.') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + # create dict with full imagenet annotation file + with open('data/imagenet/meta/train.txt', 'r') as f: + lines = f.readlines() + keys = [line.split('/')[0] for line in lines] + labels = [line.strip().split()[1] for line in lines] + mapping = {} + for k, l in zip(keys, labels): + if k not in mapping: + mapping[k] = l + else: + assert mapping[k] == l + + # convert + with open(args.input, 'r') as f: + lines = f.readlines() + fns = [line.strip() for line in lines] + sample_keys = [line.split('_')[0] for line in lines] + sample_labels = [mapping[k] for k in sample_keys] + output_lines = [ + f'{k}/{fn} {l}\n' for k, fn, l in zip(sample_keys, fns, sample_labels) + ] + with open(args.output, 'w+') as f: + f.writelines(output_lines) + + +if __name__ == '__main__': + main() diff --git a/tools/dataset_converters/convert_inaturalist.py b/tools/dataset_converters/convert_inaturalist.py new file mode 100644 index 0000000..8020c06 --- /dev/null +++ b/tools/dataset_converters/convert_inaturalist.py @@ -0,0 +1,32 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse + +import mmcv + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Convert iNaturalist2018 annotations to MMPretrain format.' + ) + parser.add_argument('input', type=str, help='Input annotation json file.') + parser.add_argument('output', type=str, help='Output list file.') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + data = mmcv.load(args.input) + output_lines = [] + for img_item in data['images']: + for ann_item in data['annotations']: + if ann_item['image_id'] == img_item['id']: + output_lines.append( + f"{img_item['file_name']} {ann_item['category_id']}\n") + assert len(output_lines) == len(data['images']) + with open(args.output, 'w') as f: + f.writelines(output_lines) + + +if __name__ == '__main__': + main() diff --git a/tools/dataset_converters/odl_cub_preprocess.sh b/tools/dataset_converters/odl_cub_preprocess.sh new file mode 100644 index 0000000..6053d0e --- /dev/null +++ b/tools/dataset_converters/odl_cub_preprocess.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +set -x + +DOWNLOAD_DIR=$1 +DATA_ROOT=$2 + +# unzip all of data +cat $DOWNLOAD_DIR/CUB-200-2011/raw/*.tar.gz | tar -xvz -C $DOWNLOAD_DIR + +# move data into DATA_ROOT +mv -f $DOWNLOAD_DIR/CUB-200-2011/CUB-200-2011/* $DATA_ROOT/ + +# remove useless data file +rm -R $DOWNLOAD_DIR/CUB-200-2011/ diff --git a/tools/dataset_converters/odl_imagenet1k_preprocess.sh b/tools/dataset_converters/odl_imagenet1k_preprocess.sh new file mode 100644 index 0000000..e73ba37 --- /dev/null +++ b/tools/dataset_converters/odl_imagenet1k_preprocess.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +set -x + +DOWNLOAD_DIR=$1 +DATA_ROOT=$2 + +# unzip all of data +cat $DOWNLOAD_DIR/ImageNet-1K/raw/*.tar.gz.* | tar -xvz -C $DOWNLOAD_DIR + +# move images into data/imagenet +mv $DOWNLOAD_DIR/ImageNet-1K/{train,val,test} $DATA_ROOT + +# download the mate ann_files file +wget -P $DATA_ROOT https://download.openmmlab.com/mmclassification/datasets/imagenet/meta/caffe_ilsvrc12.tar.gz + +# unzip mate ann_files file and put it into 'meta' folder +mkdir $DATA_ROOT/meta +tar -xzvf $DATA_ROOT/caffe_ilsvrc12.tar.gz -C $DATA_ROOT/meta + +# remove useless data files +rm -R $DOWNLOAD_DIR/ImageNet-1K diff --git a/tools/dist_test.sh b/tools/dist_test.sh new file mode 100644 index 0000000..dea131b --- /dev/null +++ b/tools/dist_test.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +CONFIG=$1 +CHECKPOINT=$2 +GPUS=$3 +NNODES=${NNODES:-1} +NODE_RANK=${NODE_RANK:-0} +PORT=${PORT:-29500} +MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch \ + --nnodes=$NNODES \ + --node_rank=$NODE_RANK \ + --master_addr=$MASTER_ADDR \ + --nproc_per_node=$GPUS \ + --master_port=$PORT \ + $(dirname "$0")/test.py \ + $CONFIG \ + $CHECKPOINT \ + --launcher pytorch \ + ${@:4} diff --git a/tools/dist_train.sh b/tools/dist_train.sh new file mode 100644 index 0000000..3fca764 --- /dev/null +++ b/tools/dist_train.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +CONFIG=$1 +GPUS=$2 +NNODES=${NNODES:-1} +NODE_RANK=${NODE_RANK:-0} +PORT=${PORT:-29500} +MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +python -m torch.distributed.launch \ + --nnodes=$NNODES \ + --node_rank=$NODE_RANK \ + --master_addr=$MASTER_ADDR \ + --nproc_per_node=$GPUS \ + --master_port=$PORT \ + $(dirname "$0")/train.py \ + $CONFIG \ + --launcher pytorch ${@:3} diff --git a/tools/kfold-cross-valid.py b/tools/kfold-cross-valid.py new file mode 100644 index 0000000..3591254 --- /dev/null +++ b/tools/kfold-cross-valid.py @@ -0,0 +1,254 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import copy +import os +import os.path as osp + +from mmengine.config import Config, ConfigDict, DictAction +from mmengine.dist import sync_random_seed +from mmengine.fileio import dump, load +from mmengine.hooks import Hook +from mmengine.runner import Runner, find_latest_checkpoint +from mmengine.utils import digit_version +from mmengine.utils.dl_utils import TORCH_VERSION + +EXP_INFO_FILE = 'kfold_exp.json' + +prog_description = """K-Fold cross-validation. + +To start a 5-fold cross-validation experiment: + python tools/kfold-cross-valid.py $CONFIG --num-splits 5 + +To resume a 5-fold cross-validation from an interrupted experiment: + python tools/kfold-cross-valid.py $CONFIG --num-splits 5 --resume +""" # noqa: E501 + + +def parse_args(): + parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + description=prog_description) + parser.add_argument('config', help='train config file path') + parser.add_argument( + '--num-splits', + type=int, + help='The number of all folds.', + required=True) + parser.add_argument( + '--fold', + type=int, + help='The fold used to do validation. ' + 'If specify, only do an experiment of the specified fold.') + parser.add_argument('--work-dir', help='the dir to save logs and models') + parser.add_argument('--seed', type=int, default=None, help='random seed') + parser.add_argument( + '--resume', + action='store_true', + help='Resume the previous experiment.') + parser.add_argument( + '--amp', + action='store_true', + help='enable automatic-mixed-precision training') + parser.add_argument( + '--no-validate', + action='store_true', + help='whether not to evaluate the checkpoint during training') + parser.add_argument( + '--auto-scale-lr', + action='store_true', + help='whether to auto scale the learning rate according to the ' + 'actual batch size and the original batch size.') + parser.add_argument( + '--no-pin-memory', + action='store_true', + help='whether to disable the pin_memory option in dataloaders.') + parser.add_argument( + '--no-persistent-workers', + action='store_true', + help='whether to disable the persistent_workers option in dataloaders.' + ) + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + parser.add_argument('--local_rank', type=int, default=0) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + + return args + + +def merge_args(cfg, args): + """Merge CLI arguments to config.""" + if args.no_validate: + cfg.val_cfg = None + cfg.val_dataloader = None + cfg.val_evaluator = None + + cfg.launcher = args.launcher + + # work_dir is determined in this priority: CLI > segment in file > filename + if args.work_dir is not None: + # update configs according to CLI args if args.work_dir is not None + cfg.work_dir = args.work_dir + elif cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + cfg.work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + + # enable automatic-mixed-precision training + if args.amp is True: + optim_wrapper = cfg.optim_wrapper.get('type', 'OptimWrapper') + assert optim_wrapper in ['OptimWrapper', 'AmpOptimWrapper'], \ + '`--amp` is not supported custom optimizer wrapper type ' \ + f'`{optim_wrapper}.' + cfg.optim_wrapper.type = 'AmpOptimWrapper' + cfg.optim_wrapper.setdefault('loss_scale', 'dynamic') + + # enable auto scale learning rate + if args.auto_scale_lr: + cfg.auto_scale_lr.enable = True + + # set dataloader args + default_dataloader_cfg = ConfigDict( + pin_memory=True, + persistent_workers=True, + collate_fn=dict(type='default_collate'), + ) + if digit_version(TORCH_VERSION) < digit_version('1.8.0'): + default_dataloader_cfg.persistent_workers = False + + def set_default_dataloader_cfg(cfg, field): + if cfg.get(field, None) is None: + return + dataloader_cfg = copy.deepcopy(default_dataloader_cfg) + dataloader_cfg.update(cfg[field]) + cfg[field] = dataloader_cfg + if args.no_pin_memory: + cfg[field]['pin_memory'] = False + if args.no_persistent_workers: + cfg[field]['persistent_workers'] = False + + set_default_dataloader_cfg(cfg, 'train_dataloader') + set_default_dataloader_cfg(cfg, 'val_dataloader') + set_default_dataloader_cfg(cfg, 'test_dataloader') + + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + return cfg + + +def train_single_fold(cfg, num_splits, fold, resume_ckpt=None): + root_dir = cfg.work_dir + cfg.work_dir = osp.join(root_dir, f'fold{fold}') + if resume_ckpt is not None: + cfg.resume = True + cfg.load_from = resume_ckpt + dataset = cfg.train_dataloader.dataset + + # wrap the dataset cfg + def wrap_dataset(dataset, test_mode): + return dict( + type='KFoldDataset', + dataset=dataset, + fold=fold, + num_splits=num_splits, + seed=cfg.kfold_split_seed, + test_mode=test_mode, + ) + + train_dataset = copy.deepcopy(dataset) + cfg.train_dataloader.dataset = wrap_dataset(train_dataset, False) + + if cfg.val_dataloader is not None: + if 'pipeline' not in cfg.val_dataloader.dataset: + raise ValueError( + 'Cannot find `pipeline` in the validation dataset. ' + "If you are using dataset wrapper, please don't use this " + 'tool to act kfold cross validation. ' + 'Please write config files manually.') + val_dataset = copy.deepcopy(dataset) + val_dataset['pipeline'] = cfg.val_dataloader.dataset.pipeline + cfg.val_dataloader.dataset = wrap_dataset(val_dataset, True) + if cfg.test_dataloader is not None: + if 'pipeline' not in cfg.test_dataloader.dataset: + raise ValueError( + 'Cannot find `pipeline` in the test dataset. ' + "If you are using dataset wrapper, please don't use this " + 'tool to act kfold cross validation. ' + 'Please write config files manually.') + test_dataset = copy.deepcopy(dataset) + test_dataset['pipeline'] = cfg.test_dataloader.dataset.pipeline + cfg.test_dataloader.dataset = wrap_dataset(test_dataset, True) + + # build the runner from config + runner = Runner.from_cfg(cfg) + runner.logger.info( + f'----------- Cross-validation: [{fold+1}/{num_splits}] ----------- ') + runner.logger.info(f'Train dataset: \n{runner.train_dataloader.dataset}') + + class SaveInfoHook(Hook): + + def after_train_epoch(self, runner): + last_ckpt = find_latest_checkpoint(cfg.work_dir) + exp_info = dict( + fold=fold, + last_ckpt=last_ckpt, + kfold_split_seed=cfg.kfold_split_seed, + ) + dump(exp_info, osp.join(root_dir, EXP_INFO_FILE)) + + runner.register_hook(SaveInfoHook(), 'LOWEST') + + # start training + runner.train() + + +def main(): + args = parse_args() + + # load config + cfg = Config.fromfile(args.config) + + # merge cli arguments to config + cfg = merge_args(cfg, args) + + # set the unify random seed + cfg.kfold_split_seed = args.seed or sync_random_seed() + + # resume from the previous experiment + if args.resume: + experiment_info = load(osp.join(cfg.work_dir, EXP_INFO_FILE)) + resume_fold = experiment_info['fold'] + cfg.kfold_split_seed = experiment_info['kfold_split_seed'] + resume_ckpt = experiment_info.get('last_ckpt', None) + else: + resume_fold = 0 + resume_ckpt = None + + if args.fold is not None: + folds = [args.fold] + else: + folds = range(resume_fold, args.num_splits) + + for fold in folds: + cfg_ = copy.deepcopy(cfg) + train_single_fold(cfg_, args.num_splits, fold, resume_ckpt) + resume_ckpt = None + + +if __name__ == '__main__': + main() diff --git a/tools/misc/print_config.py b/tools/misc/print_config.py new file mode 100644 index 0000000..bc78600 --- /dev/null +++ b/tools/misc/print_config.py @@ -0,0 +1,38 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse + +import rich.console +from mmengine import Config, DictAction + +console = rich.console.Console() + + +def parse_args(): + parser = argparse.ArgumentParser(description='Print the whole config') + parser.add_argument('config', help='config file path') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + console.print(cfg.pretty_text, markup=False) + + +if __name__ == '__main__': + main() diff --git a/tools/misc/verify_dataset.py b/tools/misc/verify_dataset.py new file mode 100644 index 0000000..ccc5e89 --- /dev/null +++ b/tools/misc/verify_dataset.py @@ -0,0 +1,145 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import multiprocessing +import os +from pathlib import Path + +from mmengine import (Config, DictAction, track_parallel_progress, + track_progress) + +from mmpretrain.datasets import build_dataset +from mmpretrain.registry import TRANSFORMS + +file_lock = multiprocessing.Lock() + + +def parse_args(): + parser = argparse.ArgumentParser(description='Verify Dataset') + parser.add_argument('config', help='config file path') + parser.add_argument( + '--out-path', + type=str, + default='brokenfiles.log', + help='output path of all the broken files. If the specified path ' + 'already exists, delete the previous file ') + parser.add_argument( + '--phase', + default='train', + type=str, + choices=['train', 'test', 'val'], + help='phase of dataset to visualize, accept "train" "test" and "val".') + parser.add_argument( + '--num-process', type=int, default=1, help='number of process to use') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + assert args.out_path is not None + assert args.num_process > 0 + return args + + +class DatasetValidator(): + """the dataset tool class to check if all file are broken.""" + + def __init__(self, dataset_cfg, log_file_path): + super(DatasetValidator, self).__init__() + # keep only LoadImageFromFile pipeline + from mmpretrain.datasets import get_transform_idx + + load_idx = get_transform_idx(dataset_cfg.pipeline, 'LoadImageFromFile') + assert load_idx >= 0, \ + 'This tool is only for datasets needs to load image from files.' + self.pipeline = TRANSFORMS.build(dataset_cfg.pipeline[load_idx]) + dataset_cfg.pipeline = [] + dataset = build_dataset(dataset_cfg) + + self.dataset = dataset + self.log_file_path = log_file_path + + def valid_idx(self, idx): + item = self.dataset[idx] + try: + item = self.pipeline(item) + except Exception: + with open(self.log_file_path, 'a') as f: + # add file lock to prevent multi-process writing errors + filepath = str(Path(item['img_path'])) + file_lock.acquire() + f.write(filepath + '\n') + file_lock.release() + print(f'{filepath} cannot be read correctly, please check it.') + + def __len__(self): + return len(self.dataset) + + +def print_info(log_file_path): + """print some information and do extra action.""" + print() + with open(log_file_path, 'r') as f: + content = f.read().strip() + if content == '': + print('There is no broken file found.') + os.remove(log_file_path) + else: + num_file = len(content.split('\n')) + print(f'{num_file} broken files found, name list save in file:' + f'{log_file_path}') + print() + + +def main(): + # parse cfg and args + args = parse_args() + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + # touch output file to save broken files list. + output_path = Path(args.out_path) + if not output_path.parent.exists(): + raise Exception("Path '--out-path' parent directory not found.") + if output_path.exists(): + os.remove(output_path) + output_path.touch() + + if args.phase == 'train': + dataset_cfg = cfg.train_dataloader.dataset + elif args.phase == 'val': + dataset_cfg = cfg.val_dataloader.dataset + elif args.phase == 'test': + dataset_cfg = cfg.test_dataloader.dataset + else: + raise ValueError("'--phase' only support 'train', 'val' and 'test'.") + + # do validate + validator = DatasetValidator(dataset_cfg, output_path) + + if args.num_process > 1: + # The default chunksize calcuation method of Pool.map + chunksize, extra = divmod(len(validator), args.num_process * 8) + if extra: + chunksize += 1 + + track_parallel_progress( + validator.valid_idx, + list(range(len(validator))), + args.num_process, + chunksize=chunksize, + keep_order=False) + else: + track_progress(validator.valid_idx, list(range(len(validator)))) + + print_info(output_path) + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/clip_to_mmpretrain.py b/tools/model_converters/clip_to_mmpretrain.py new file mode 100644 index 0000000..5442628 --- /dev/null +++ b/tools/model_converters/clip_to_mmpretrain.py @@ -0,0 +1,72 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from collections import OrderedDict + +import mmengine +import torch +from mmengine.runner import CheckpointLoader + + +def convert_clip(ckpt): + new_ckpt = OrderedDict() + + for k, v in list(ckpt.items()): + new_v = v + if k.startswith('head'): + new_k = k.replace('head.', 'head.layers.head.') + new_ckpt[new_k] = new_v + continue + elif k.startswith('patch_embed'): + if 'proj.' in k: + new_k = k.replace('proj.', 'projection.') + else: + new_k = k + elif k.startswith('norm_pre'): + new_k = k.replace('norm_pre', 'pre_norm') + elif k.startswith('blocks'): + new_k = k.replace('blocks.', 'layers.') + if 'norm1' in k: + new_k = new_k.replace('norm1', 'ln1') + elif 'norm2' in k: + new_k = new_k.replace('norm2', 'ln2') + elif 'mlp.fc1' in k: + new_k = new_k.replace('mlp.fc1', 'ffn.layers.0.0') + elif 'mlp.fc2' in k: + new_k = new_k.replace('mlp.fc2', 'ffn.layers.1') + elif k.startswith('norm'): + new_k = k.replace('norm', 'ln1') + else: + new_k = k + + if not new_k.startswith('head'): + new_k = 'backbone.' + new_k + new_ckpt[new_k] = new_v + return new_ckpt + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in pretrained clip ' + 'models to mmpretrain style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + + if 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + else: + state_dict = checkpoint + + weight = convert_clip(state_dict) + mmengine.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(weight, args.dst) + + print('Done!!') + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/convnext_to_mmpretrain.py b/tools/model_converters/convnext_to_mmpretrain.py new file mode 100644 index 0000000..82f6236 --- /dev/null +++ b/tools/model_converters/convnext_to_mmpretrain.py @@ -0,0 +1,63 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from collections import OrderedDict + +import mmengine +import torch +from mmengine.runner import CheckpointLoader + + +def convert_convnext(ckpt): + + new_ckpt = OrderedDict() + + for k, v in list(ckpt.items()): + new_v = v + if k.startswith('head'): + new_k = k.replace('head.', 'head.fc.') + new_ckpt[new_k] = new_v + continue + elif k.startswith('stages'): + if 'dwconv' in k: + new_k = k.replace('dwconv', 'depthwise_conv') + elif 'pwconv' in k: + new_k = k.replace('pwconv', 'pointwise_conv') + else: + new_k = k + elif k.startswith('norm'): + new_k = k.replace('norm', 'norm3') + else: + new_k = k + + if not new_k.startswith('head'): + new_k = 'backbone.' + new_k + new_ckpt[new_k] = new_v + return new_ckpt + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in pretrained convnext ' + 'models to mmpretrain style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + + if 'model' in checkpoint: + state_dict = checkpoint['model'] + else: + state_dict = checkpoint + + weight = convert_convnext(state_dict) + mmengine.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(dict(state_dict=weight), args.dst) + + print('Done!!') + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/davit_to_mmpretrain.py b/tools/model_converters/davit_to_mmpretrain.py new file mode 100644 index 0000000..c578026 --- /dev/null +++ b/tools/model_converters/davit_to_mmpretrain.py @@ -0,0 +1,87 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from collections import OrderedDict + +import mmengine +import torch +from mmengine.runner import CheckpointLoader + + +def convert_davit(ckpt): + + new_ckpt = OrderedDict() + + for k, v in list(ckpt.items()): + new_v = v + if k.startswith('patch_embeds.0'): + new_k = k.replace('patch_embeds.0', 'patch_embed') + new_k = new_k.replace('proj', 'projection') + elif k.startswith('patch_embeds'): + if k.startswith('patch_embeds.1'): + new_k = k.replace('patch_embeds.1', 'stages.0.downsample') + elif k.startswith('patch_embeds.2'): + new_k = k.replace('patch_embeds.2', 'stages.1.downsample') + elif k.startswith('patch_embeds.3'): + new_k = k.replace('patch_embeds.3', 'stages.2.downsample') + new_k = new_k.replace('proj', 'projection') + elif k.startswith('main_blocks'): + new_k = k.replace('main_blocks', 'stages') + for num_stages in range(4): + for num_blocks in range(9): + if f'{num_stages}.{num_blocks}.0' in k: + new_k = new_k.replace( + f'{num_stages}.{num_blocks}.0', + f'{num_stages}.blocks.{num_blocks}.spatial_block') + elif f'{num_stages}.{num_blocks}.1' in k: + new_k = new_k.replace( + f'{num_stages}.{num_blocks}.1', + f'{num_stages}.blocks.{num_blocks}.channel_block') + if 'cpe.0' in k: + new_k = new_k.replace('cpe.0', 'cpe1') + elif 'cpe.1' in k: + new_k = new_k.replace('cpe.1', 'cpe2') + if 'mlp' in k: + new_k = new_k.replace('mlp.fc1', 'ffn.layers.0.0') + new_k = new_k.replace('mlp.fc2', 'ffn.layers.1') + if 'spatial_block.attn' in new_k: + new_k = new_k.replace('spatial_block.attn', + 'spatial_block.attn.w_msa') + elif k.startswith('norms'): + new_k = k.replace('norms', 'norm3') + elif k.startswith('head'): + new_k = k.replace('head', 'head.fc') + else: + new_k = k + + if not new_k.startswith('head'): + new_k = 'backbone.' + new_k + new_ckpt[new_k] = new_v + return new_ckpt + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in pretrained davit ' + 'models to mmpretrain style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + + if 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + else: + state_dict = checkpoint + + weight = convert_davit(state_dict) + mmengine.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(weight, args.dst) + + print('Done!!') + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/deit3_to_mmpretrain.py b/tools/model_converters/deit3_to_mmpretrain.py new file mode 100644 index 0000000..0ceed1f --- /dev/null +++ b/tools/model_converters/deit3_to_mmpretrain.py @@ -0,0 +1,75 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from collections import OrderedDict + +import mmengine +import torch +from mmengine.runner import CheckpointLoader + + +def convert_deit3(ckpt): + + new_ckpt = OrderedDict() + + for k, v in list(ckpt.items()): + new_v = v + if k.startswith('head'): + new_k = k.replace('head.', 'head.layers.head.') + new_ckpt[new_k] = new_v + continue + elif k.startswith('patch_embed'): + if 'proj.' in k: + new_k = k.replace('proj.', 'projection.') + else: + new_k = k + elif k.startswith('blocks'): + new_k = k.replace('blocks.', 'layers.') + if 'norm1' in k: + new_k = new_k.replace('norm1', 'ln1') + elif 'norm2' in k: + new_k = new_k.replace('norm2', 'ln2') + elif 'mlp.fc1' in k: + new_k = new_k.replace('mlp.fc1', 'ffn.layers.0.0') + elif 'mlp.fc2' in k: + new_k = new_k.replace('mlp.fc2', 'ffn.layers.1') + elif 'gamma_1' in k: + new_k = new_k.replace('gamma_1', 'attn.gamma1.weight') + elif 'gamma_2' in k: + new_k = new_k.replace('gamma_2', 'ffn.gamma2.weight') + elif k.startswith('norm'): + new_k = k.replace('norm', 'ln1') + else: + new_k = k + + if not new_k.startswith('head'): + new_k = 'backbone.' + new_k + new_ckpt[new_k] = new_v + return new_ckpt + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in pretrained deit3 ' + 'models to mmpretrain style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + + if 'model' in checkpoint: + state_dict = checkpoint['model'] + else: + state_dict = checkpoint + + weight = convert_deit3(state_dict) + mmengine.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(weight, args.dst) + + print('Done!!') + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/edgenext_to_mmpretrain.py b/tools/model_converters/edgenext_to_mmpretrain.py new file mode 100644 index 0000000..64a5468 --- /dev/null +++ b/tools/model_converters/edgenext_to_mmpretrain.py @@ -0,0 +1,74 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from pathlib import Path + +import torch + + +def convert_weights(weight): + """Weight Converter. + + Converts the weights from timm to mmpretrain + Args: + weight (dict): weight dict from timm + Returns: + Converted weight dict for mmpretrain + """ + result = dict() + result['meta'] = dict() + temp = dict() + mapping = { + 'dwconv': 'depthwise_conv', + 'pwconv1': 'pointwise_conv1', + 'pwconv2': 'pointwise_conv2', + 'xca': 'csa', + 'convs': 'conv_modules', + 'token_projection': 'proj', + 'pos_embd': 'pos_embed', + 'temperature': 'scale', + } + strict_mapping = { + 'norm.weight': 'norm3.weight', + 'norm.bias': 'norm3.bias', + } + + try: + weight = weight['model_ema'] + except KeyError: + weight = weight['state_dict'] # for model learned with usi + else: + raise NotImplementedError + + for k, v in weight.items(): + # keyword mapping + for mk, mv in mapping.items(): + if mk in k: + k = k.replace(mk, mv) + # strict mapping + for mk, mv in strict_mapping.items(): + if mk == k: + k = mv + + if k.startswith('head.'): + temp['head.fc.' + k[5:]] = v + else: + temp['backbone.' + k] = v + + result['state_dict'] = temp + return result + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument('src', help='src detectron model path') + parser.add_argument('dst', help='save path') + args = parser.parse_args() + dst = Path(args.dst) + if dst.suffix != '.pth': + print('The path should contain the name of the pth format file.') + exit(1) + dst.parent.mkdir(parents=True, exist_ok=True) + + original_model = torch.load(args.src, map_location='cpu') + converted_model = convert_weights(original_model) + torch.save(converted_model, args.dst) diff --git a/tools/model_converters/efficientnet_to_mmpretrain.py b/tools/model_converters/efficientnet_to_mmpretrain.py new file mode 100644 index 0000000..f1541e3 --- /dev/null +++ b/tools/model_converters/efficientnet_to_mmpretrain.py @@ -0,0 +1,222 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os + +import numpy as np +import torch +from mmengine.model import Sequential +from tensorflow.python.training import py_checkpoint_reader + +from mmpretrain.models.backbones.efficientnet import EfficientNet + + +def tf2pth(v): + if v.ndim == 4: + return np.ascontiguousarray(v.transpose(3, 2, 0, 1)) + elif v.ndim == 2: + return np.ascontiguousarray(v.transpose()) + return v + + +def read_ckpt(ckpt): + reader = py_checkpoint_reader.NewCheckpointReader(ckpt) + weights = { + n: torch.as_tensor(tf2pth(reader.get_tensor(n))) + for (n, _) in reader.get_variable_to_shape_map().items() + } + return weights + + +def map_key(weight, l2_flag): + m = dict() + has_expand_conv = set() + is_MBConv = set() + max_idx = 0 + name = None + for k, v in weight.items(): + seg = k.split('/') + if len(seg) == 1: + continue + if 'edgetpu' in seg[0]: + name = 'e' + seg[0][21:].lower() + else: + name = seg[0][13:] + if seg[2] == 'tpu_batch_normalization_2': + has_expand_conv.add(seg[1]) + if seg[1].startswith('blocks_'): + idx = int(seg[1][7:]) + 1 + max_idx = max(max_idx, idx) + if 'depthwise' in k: + is_MBConv.add(seg[1]) + + model = EfficientNet(name) + idx2key = [] + for idx, module in enumerate(model.layers): + if isinstance(module, Sequential): + for j in range(len(module)): + idx2key.append('{}.{}'.format(idx, j)) + else: + idx2key.append('{}'.format(idx)) + + for k, v in weight.items(): + if l2_flag: + k = k.replace('/ExponentialMovingAverage', '') + + if 'Exponential' in k or 'RMS' in k: + continue + + seg = k.split('/') + if len(seg) == 1: + continue + if seg[2] == 'depthwise_conv2d': + v = v.transpose(1, 0) + + if seg[1] == 'stem': + prefix = 'backbone.layers.{}'.format(idx2key[0]) + mapping = { + 'conv2d/kernel': 'conv.weight', + 'tpu_batch_normalization/beta': 'bn.bias', + 'tpu_batch_normalization/gamma': 'bn.weight', + 'tpu_batch_normalization/moving_mean': 'bn.running_mean', + 'tpu_batch_normalization/moving_variance': 'bn.running_var', + } + suffix = mapping['/'.join(seg[2:])] + m[prefix + '.' + suffix] = v + + elif seg[1].startswith('blocks_'): + idx = int(seg[1][7:]) + 1 + prefix = '.'.join(['backbone', 'layers', idx2key[idx]]) + if seg[1] not in is_MBConv: + mapping = { + 'conv2d/kernel': + 'conv1.conv.weight', + 'tpu_batch_normalization/gamma': + 'conv1.bn.weight', + 'tpu_batch_normalization/beta': + 'conv1.bn.bias', + 'tpu_batch_normalization/moving_mean': + 'conv1.bn.running_mean', + 'tpu_batch_normalization/moving_variance': + 'conv1.bn.running_var', + 'conv2d_1/kernel': + 'conv2.conv.weight', + 'tpu_batch_normalization_1/gamma': + 'conv2.bn.weight', + 'tpu_batch_normalization_1/beta': + 'conv2.bn.bias', + 'tpu_batch_normalization_1/moving_mean': + 'conv2.bn.running_mean', + 'tpu_batch_normalization_1/moving_variance': + 'conv2.bn.running_var', + } + else: + + base_mapping = { + 'depthwise_conv2d/depthwise_kernel': + 'depthwise_conv.conv.weight', + 'se/conv2d/kernel': 'se.conv1.conv.weight', + 'se/conv2d/bias': 'se.conv1.conv.bias', + 'se/conv2d_1/kernel': 'se.conv2.conv.weight', + 'se/conv2d_1/bias': 'se.conv2.conv.bias' + } + + if seg[1] not in has_expand_conv: + mapping = { + 'conv2d/kernel': + 'linear_conv.conv.weight', + 'tpu_batch_normalization/beta': + 'depthwise_conv.bn.bias', + 'tpu_batch_normalization/gamma': + 'depthwise_conv.bn.weight', + 'tpu_batch_normalization/moving_mean': + 'depthwise_conv.bn.running_mean', + 'tpu_batch_normalization/moving_variance': + 'depthwise_conv.bn.running_var', + 'tpu_batch_normalization_1/beta': + 'linear_conv.bn.bias', + 'tpu_batch_normalization_1/gamma': + 'linear_conv.bn.weight', + 'tpu_batch_normalization_1/moving_mean': + 'linear_conv.bn.running_mean', + 'tpu_batch_normalization_1/moving_variance': + 'linear_conv.bn.running_var', + } + else: + mapping = { + 'depthwise_conv2d/depthwise_kernel': + 'depthwise_conv.conv.weight', + 'conv2d/kernel': + 'expand_conv.conv.weight', + 'conv2d_1/kernel': + 'linear_conv.conv.weight', + 'tpu_batch_normalization/beta': + 'expand_conv.bn.bias', + 'tpu_batch_normalization/gamma': + 'expand_conv.bn.weight', + 'tpu_batch_normalization/moving_mean': + 'expand_conv.bn.running_mean', + 'tpu_batch_normalization/moving_variance': + 'expand_conv.bn.running_var', + 'tpu_batch_normalization_1/beta': + 'depthwise_conv.bn.bias', + 'tpu_batch_normalization_1/gamma': + 'depthwise_conv.bn.weight', + 'tpu_batch_normalization_1/moving_mean': + 'depthwise_conv.bn.running_mean', + 'tpu_batch_normalization_1/moving_variance': + 'depthwise_conv.bn.running_var', + 'tpu_batch_normalization_2/beta': + 'linear_conv.bn.bias', + 'tpu_batch_normalization_2/gamma': + 'linear_conv.bn.weight', + 'tpu_batch_normalization_2/moving_mean': + 'linear_conv.bn.running_mean', + 'tpu_batch_normalization_2/moving_variance': + 'linear_conv.bn.running_var', + } + mapping.update(base_mapping) + suffix = mapping['/'.join(seg[2:])] + m[prefix + '.' + suffix] = v + elif seg[1] == 'head': + seq_key = idx2key[max_idx + 1] + mapping = { + 'conv2d/kernel': + 'backbone.layers.{}.conv.weight'.format(seq_key), + 'tpu_batch_normalization/beta': + 'backbone.layers.{}.bn.bias'.format(seq_key), + 'tpu_batch_normalization/gamma': + 'backbone.layers.{}.bn.weight'.format(seq_key), + 'tpu_batch_normalization/moving_mean': + 'backbone.layers.{}.bn.running_mean'.format(seq_key), + 'tpu_batch_normalization/moving_variance': + 'backbone.layers.{}.bn.running_var'.format(seq_key), + 'dense/kernel': + 'head.fc.weight', + 'dense/bias': + 'head.fc.bias' + } + key = mapping['/'.join(seg[2:])] + if name.startswith('e') and 'fc' in key: + v = v[1:] + m[key] = v + return m + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('infile', type=str, help='Path to the ckpt.') + parser.add_argument('outfile', type=str, help='Output file.') + parser.add_argument( + '--l2', + action='store_true', + help='If true convert ExponentialMovingAverage weights. ' + 'l2 arch should use it.') + args = parser.parse_args() + assert args.outfile + + outdir = os.path.dirname(os.path.abspath(args.outfile)) + if not os.path.exists(outdir): + os.makedirs(outdir) + weights = read_ckpt(args.infile) + weights = map_key(weights, args.l2) + torch.save(weights, args.outfile) diff --git a/tools/model_converters/efficientnetv2_to_mmpretrain.py b/tools/model_converters/efficientnetv2_to_mmpretrain.py new file mode 100644 index 0000000..5ada7ec --- /dev/null +++ b/tools/model_converters/efficientnetv2_to_mmpretrain.py @@ -0,0 +1,100 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""convert the weights of efficientnetv2 in +timm(https://github.com/rwightman/pytorch-image-models) to mmpretrain +format.""" +import argparse +import os.path as osp + +import mmengine +import torch +from mmengine.runner import CheckpointLoader + + +def convert_from_efficientnetv2_timm(param): + # main change_key + param_lst = list(param.keys()) + op = str(int(param_lst[-9][7]) + 2) + new_key = dict() + for name in param_lst: + data = param[name] + if 'blocks' not in name: + if 'conv_stem' in name: + name = name.replace('conv_stem', 'backbone.layers.0.conv') + if 'bn1' in name: + name = name.replace('bn1', 'backbone.layers.0.bn') + if 'conv_head' in name: + # if efficientnet-v2_s/base/b1/b2/b3,op = 7, + # if for m/l/xl , op = 8 + name = name.replace('conv_head', f'backbone.layers.{op}.conv') + if 'bn2' in name: + name = name.replace('bn2', f'backbone.layers.{op}.bn') + if 'classifier' in name: + name = name.replace('classifier', 'head.fc') + else: + operator = int(name[7]) + if operator == 0: + name = name[:7] + str(operator + 1) + name[8:] + name = name.replace('blocks', 'backbone.layers') + if 'conv' in name: + name = name.replace('conv', 'conv') + if 'bn1' in name: + name = name.replace('bn1', 'bn') + elif operator < 3: + name = name[:7] + str(operator + 1) + name[8:] + name = name.replace('blocks', 'backbone.layers') + if 'conv_exp' in name: + name = name.replace('conv_exp', 'conv1.conv') + if 'conv_pwl' in name: + name = name.replace('conv_pwl', 'conv2.conv') + if 'bn1' in name: + name = name.replace('bn1', 'conv1.bn') + if 'bn2' in name: + name = name.replace('bn2', 'conv2.bn') + else: + name = name[:7] + str(operator + 1) + name[8:] + name = name.replace('blocks', 'backbone.layers') + if 'conv_pwl' in name: + name = name.replace('conv_pwl', 'linear_conv.conv') + if 'conv_pw' in name: + name = name.replace('conv_pw', 'expand_conv.conv') + if 'conv_dw' in name: + name = name.replace('conv_dw', 'depthwise_conv.conv') + if 'bn1' in name: + name = name.replace('bn1', 'expand_conv.bn') + if 'bn2' in name: + name = name.replace('bn2', 'depthwise_conv.bn') + if 'bn3' in name: + name = name.replace('bn3', 'linear_conv.bn') + if 'se.conv_reduce' in name: + name = name.replace('se.conv_reduce', 'se.conv1.conv') + if 'se.conv_expand' in name: + name = name.replace('se.conv_expand', 'se.conv2.conv') + new_key[name] = data + return new_key + + +def main(): + parser = argparse.ArgumentParser( + description='Convert pretrained efficientnetv2 ' + 'models in timm to mmpretrain style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + + if 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + else: + state_dict = checkpoint + + weight = convert_from_efficientnetv2_timm(state_dict) + mmengine.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(weight, args.dst) + + print('Done!!') + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/eva02_to_mmpretrain.py b/tools/model_converters/eva02_to_mmpretrain.py new file mode 100644 index 0000000..e5a8682 --- /dev/null +++ b/tools/model_converters/eva02_to_mmpretrain.py @@ -0,0 +1,153 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from collections import OrderedDict + +import mmengine +import torch +from mmengine.runner import CheckpointLoader + + +def convert_eva02(ckpt): + + new_ckpt = OrderedDict() + qkv_proj = {} + qkv_bias = {} + w12_weight = {} + w12_bias = {} + + banned = { + 'mask_token', + 'lm_head.weight', + 'lm_head.bias', + 'norm.weight', + 'norm.bias', + } + + for k, v in list(ckpt.items()): + + if k in banned: + continue + + if k.startswith('head'): + new_k = k.replace('head.', 'head.fc.') + new_ckpt[new_k] = v + else: + if k.startswith('patch_embed'): + new_k = k.replace('proj.', 'projection.') + + elif k.startswith('fc_norm') or k.startswith('norm'): + new_k = k.replace('norm.', 'ln2.') + new_k = k.replace('fc_norm.', 'ln2.') + + elif k.startswith('blocks'): + new_k = k.replace('blocks.', 'layers.') + + if 'mlp' in new_k: + if 'w1.' in new_k or 'w2.' in new_k: + # For base and large version, mlp is implemented with + # 2 linears, where w1 and w2 are required to integrate + # into w12. + s = new_k.split('.') # e.g. layers.0.mlp.w1.weight + idx = s[1] + if 'weight' in new_k: + # w1.weight or w2.weight + if idx not in w12_weight: + w12_weight[idx] = {} + w12_weight[idx][s[-2]] = v + else: + # w1.bias or w2.bias + if idx not in w12_bias: + w12_bias[idx] = {} + w12_bias[idx][s[-2]] = v + continue + + if 'ffn_ln' in new_k: + new_k = new_k.replace('ffn_ln.', 'norm.') + + elif 'attn' in new_k: + if 'q_proj.weight' in new_k or \ + 'k_proj.weight' in new_k or \ + 'v_proj.weight' in new_k: + # For base and large version, qkv projection is + # implemented with three linear layers, + s = new_k.split('.') + idx = s[1] + if idx not in qkv_proj: + qkv_proj[idx] = {} + qkv_proj[idx][s[-2]] = v + continue + + if 'q_bias' in new_k or 'v_bias' in new_k: + # k_bias is 0 + s = new_k.split('.') + idx = s[1] + if idx not in qkv_bias: + qkv_bias[idx] = {} + qkv_bias[idx][s[-1]] = v + continue + + else: + new_k = k + + new_k = 'backbone.' + new_k + new_ckpt[new_k] = v + + for idx in qkv_proj: + q_proj = qkv_proj[idx]['q_proj'] + k_proj = qkv_proj[idx]['k_proj'] + v_proj = qkv_proj[idx]['v_proj'] + weight = torch.cat((q_proj, k_proj, v_proj)) + new_k = f'backbone.layers.{idx}.attn.qkv.weight' + new_ckpt[new_k] = weight + + for idx in qkv_bias: + q_bias = qkv_bias[idx]['q_bias'] + k_bias = torch.zeros_like(q_bias) + v_bias = qkv_bias[idx]['v_bias'] + weight = torch.cat((q_bias, k_bias, v_bias)) + new_k = f'backbone.layers.{idx}.attn.qkv.bias' + new_ckpt[new_k] = weight + + for idx in w12_weight: + w1 = w12_weight[idx]['w1'] + w2 = w12_weight[idx]['w2'] + weight = torch.cat((w1, w2)) + new_k = f'backbone.layers.{idx}.mlp.w12.weight' + new_ckpt[new_k] = weight + + for idx in w12_bias: + w1 = w12_bias[idx]['w1'] + w2 = w12_bias[idx]['w2'] + weight = torch.cat((w1, w2)) + new_k = f'backbone.layers.{idx}.mlp.w12.bias' + new_ckpt[new_k] = weight + + return new_ckpt + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in pretrained eva02 ' + 'models to mmpretrain style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + + if 'module' in checkpoint: + state_dict = checkpoint['module'] + else: + state_dict = checkpoint + + weight = convert_eva02(state_dict) + mmengine.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(weight, args.dst) + + print('Done!!') + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/eva_to_mmpretrain.py b/tools/model_converters/eva_to_mmpretrain.py new file mode 100644 index 0000000..227e377 --- /dev/null +++ b/tools/model_converters/eva_to_mmpretrain.py @@ -0,0 +1,76 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from collections import OrderedDict + +import mmengine +import torch +from mmengine.runner import CheckpointLoader + + +def convert_eva(ckpt): + + new_ckpt = OrderedDict() + + for k, v in list(ckpt.items()): + if 'decoder' in k or 'mask_token' in k: + continue + new_v = v + if k.startswith('head'): + new_k = k.replace('head.', 'head.fc.') + new_ckpt[new_k] = new_v + continue + elif k.startswith('patch_embed'): + if 'proj.' in k: + new_k = k.replace('proj.', 'projection.') + else: + new_k = k + elif k.startswith('blocks'): + new_k = k.replace('blocks.', 'layers.') + if 'norm1' in k: + new_k = new_k.replace('norm1', 'ln1') + elif 'norm2' in k: + new_k = new_k.replace('norm2', 'ln2') + elif 'mlp.fc1' in k: + new_k = new_k.replace('mlp.fc1', 'ffn.layers.0.0') + elif 'mlp.fc2' in k: + new_k = new_k.replace('mlp.fc2', 'ffn.layers.1') + elif 'fc_norm' in k: + new_k = k.replace('fc_norm', 'ln2') + elif k.startswith('norm'): + # for mim pretrain + new_k = k.replace('norm', 'ln2') + else: + new_k = k + + if not new_k.startswith('head'): + new_k = 'backbone.' + new_k + new_ckpt[new_k] = new_v + return new_ckpt + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in pretrained eva ' + 'models to mmpretrain style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + + if 'model' in checkpoint: + state_dict = checkpoint['model'] + else: + state_dict = checkpoint + + weight = convert_eva(state_dict) + mmengine.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(weight, args.dst) + + print('Done!!') + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/glip_to_mmpretrain.py b/tools/model_converters/glip_to_mmpretrain.py new file mode 100644 index 0000000..91f3962 --- /dev/null +++ b/tools/model_converters/glip_to_mmpretrain.py @@ -0,0 +1,76 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from collections import OrderedDict + +import mmengine +import torch +from mmengine.runner import CheckpointLoader + + +def convert_glip(ckpt): + + def correct_unfold_reduction_order(x): + out_channel, in_channel = x.shape + x = x.reshape(out_channel, 4, in_channel // 4) + x = x[:, [0, 2, 1, 3], :].transpose(1, + 2).reshape(out_channel, in_channel) + return x + + def correct_unfold_norm_order(x): + in_channel = x.shape[0] + x = x.reshape(4, in_channel // 4) + x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel) + return x + + new_ckpt = OrderedDict() + + for k, v in list(ckpt.items()): + if 'language_backbone' in k or 'backbone' not in k or 'fpn' in k: + continue + new_v = v + new_k = k.replace('body.', '') + new_k = new_k.replace('module.', '') + if new_k.startswith('backbone.layers'): + new_k = new_k.replace('backbone.layers', 'backbone.stages') + if 'mlp' in new_k: + new_k = new_k.replace('mlp.fc1', 'ffn.layers.0.0') + new_k = new_k.replace('mlp.fc2', 'ffn.layers.1') + elif 'attn' in new_k: + new_k = new_k.replace('attn', 'attn.w_msa') + elif 'patch_embed' in k: + new_k = new_k.replace('proj', 'projection') + elif 'downsample' in new_k: + if 'reduction.' in k: + new_v = correct_unfold_reduction_order(new_v) + elif 'norm.' in k: + new_v = correct_unfold_norm_order(new_v) + + new_ckpt[new_k] = new_v + return new_ckpt + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in pretrained glip models to mmcls style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + + if 'model' in checkpoint: + state_dict = checkpoint['model'] + else: + state_dict = checkpoint + + weight = convert_glip(state_dict) + mmengine.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(weight, args.dst) + + print('Done!!') + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/hornet2mmpretrain.py b/tools/model_converters/hornet2mmpretrain.py new file mode 100644 index 0000000..667a94c --- /dev/null +++ b/tools/model_converters/hornet2mmpretrain.py @@ -0,0 +1,62 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from collections import OrderedDict + +import mmengine +import torch +from mmengine.runner import CheckpointLoader + + +def convert_hornet(ckpt): + + new_ckpt = OrderedDict() + + for k, v in list(ckpt.items()): + new_v = v + if k.startswith('head'): + new_k = k.replace('head.', 'head.fc.') + new_ckpt[new_k] = new_v + continue + elif k.startswith('norm'): + new_k = k.replace('norm.', 'norm3.') + elif 'gnconv.pws' in k: + new_k = k.replace('gnconv.pws', 'gnconv.projs') + elif 'gamma1' in k: + new_k = k.replace('gamma1', 'gamma1.weight') + elif 'gamma2' in k: + new_k = k.replace('gamma2', 'gamma2.weight') + else: + new_k = k + + if not new_k.startswith('head'): + new_k = 'backbone.' + new_k + new_ckpt[new_k] = new_v + return new_ckpt + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in pretrained hornet ' + 'models to mmpretrain style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + + if 'model' in checkpoint: + state_dict = checkpoint['model'] + else: + state_dict = checkpoint + + weight = convert_hornet(state_dict) + mmengine.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(weight, args.dst) + + print('Done!!') + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/levit2mmpretrain.py b/tools/model_converters/levit2mmpretrain.py new file mode 100644 index 0000000..4e28e28 --- /dev/null +++ b/tools/model_converters/levit2mmpretrain.py @@ -0,0 +1,80 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from collections import OrderedDict + +import mmengine +import torch + + +def convert_levit(args, ckpt): + new_ckpt = OrderedDict() + stage = 0 + block = 0 + change = True + for k, v in list(ckpt.items()): + new_v = v + if k.startswith('head_dist'): + new_k = k.replace('head_dist.', 'head.head_dist.') + new_k = new_k.replace('.l.', '.linear.') + new_ckpt[new_k] = new_v + continue + elif k.startswith('head'): + new_k = k.replace('head.', 'head.head.') + new_k = new_k.replace('.l.', '.linear.') + new_ckpt[new_k] = new_v + continue + elif k.startswith('patch_embed'): + new_k = k.replace('patch_embed.', + 'patch_embed.patch_embed.').replace( + '.c.', '.conv.') + elif k.startswith('blocks'): + strs = k.split('.') + # new_k = k.replace('.c.', '.').replace('.bn.', '.') + new_k = k + if '.m.' in k: + new_k = new_k.replace('.m.0', '.m.linear1') + new_k = new_k.replace('.m.2', '.m.linear2') + new_k = new_k.replace('.m.', '.block.') + change = True + elif change: + stage += 1 + block = int(strs[1]) + change = False + new_k = new_k.replace( + 'blocks.%s.' % (strs[1]), + 'stages.%d.%d.' % (stage, int(strs[1]) - block)) + new_k = new_k.replace('.c.', '.linear.') + else: + new_k = k + # print(new_k) + new_k = 'backbone.' + new_k + new_ckpt[new_k] = new_v + + return new_ckpt + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in timm pretrained vit models to ' + 'MMPretrain style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + checkpoint = torch.load(args.src, map_location='cpu') + checkpoint = checkpoint['model'] + if 'state_dict' in checkpoint: + # timm checkpoint + state_dict = checkpoint['state_dict'] + else: + state_dict = checkpoint + + weight = convert_levit(args, state_dict) + mmengine.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(weight, args.dst) + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/llava-delta2mmpre.py b/tools/model_converters/llava-delta2mmpre.py new file mode 100644 index 0000000..104ed07 --- /dev/null +++ b/tools/model_converters/llava-delta2mmpre.py @@ -0,0 +1,79 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from collections import OrderedDict +from itertools import chain +from pathlib import Path + +import torch +from huggingface_hub import snapshot_download +from transformers.modeling_utils import load_state_dict + +prog_description = """\ +Convert Llava weights and original weights. +""" + + +def parse_args(): + parser = argparse.ArgumentParser(description=prog_description) + parser.add_argument('src', type=str, help='The original checkpoint dir') + parser.add_argument('dst', type=str, help='The saved checkpoint path') + parser.add_argument('--delta', type=str, help='The delta checkpoint dir') + args = parser.parse_args() + return args + + +def load_checkpoint(path: Path): + path = Path(path) + if path.is_file(): + return torch.load(path) + + state_dict = OrderedDict() + for ckpt in chain( + path.rglob('*.bin'), path.rglob('*.pth'), + path.rglob('*.safetensors')): + state_dict.update(load_state_dict(str(ckpt))) + + return state_dict + + +def main(): + args = parse_args() + + if Path(args.src).exists(): + src_path = args.src + else: + src_path = snapshot_download( + args.src, allow_patterns='pytorch_model*.bin') + src_state_dict = load_checkpoint(src_path) + + if args.delta is None: + delta_state_dict = {} + elif Path(args.delta).exists(): + delta_state_dict = load_checkpoint(args.delta) + else: + delta_path = snapshot_download( + args.delta, allow_patterns='pytorch_model*.bin') + delta_state_dict = load_checkpoint(delta_path) + + new_state_dict = OrderedDict() + for k, v in src_state_dict.items(): + if k in delta_state_dict: + delta_v = delta_state_dict.pop(k) + if k in ['model.embed_tokens.weight', 'lm_head.weight']: + h, w = v.shape[:2] + delta_v[:h, :w] += v + v = delta_v + else: + v += delta_v + if 'rotary_emb.inv_freq' not in k: + new_state_dict['model.lang_encoder.' + k] = v + + for k, v in delta_state_dict.items(): + new_state_dict['model.lang_encoder.' + k] = v + + torch.save(new_state_dict, args.dst) + print('Done!!') + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/merge_lora_weight.py b/tools/model_converters/merge_lora_weight.py new file mode 100644 index 0000000..fc51f9f --- /dev/null +++ b/tools/model_converters/merge_lora_weight.py @@ -0,0 +1,90 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from pathlib import Path + +import torch +from mmengine.config import Config + +from mmpretrain.registry import MODELS + + +@torch.no_grad() +def merge_lora_weight(cfg, lora_weight): + """Merge base weight and lora weight. + + Args: + cfg (dict): config for LoRAModel. + lora_weight (dict): weight dict from LoRAModel. + Returns: + Merged weight. + """ + temp = dict() + mapping = dict() + for name, param in lora_weight['state_dict'].items(): + # backbone.module.layers.11.attn.qkv.lora_down.weight + if '.lora_' in name: + lora_split = name.split('.') + prefix = '.'.join(lora_split[:-2]) + if prefix not in mapping: + mapping[prefix] = dict() + lora_type = lora_split[-2] + mapping[prefix][lora_type] = param + else: + temp[name] = param + + model = MODELS.build(cfg['model']) + for name, param in model.named_parameters(): + if name in temp or '.lora_' in name: + continue + else: + name_split = name.split('.') + prefix = prefix = '.'.join(name_split[:-2]) + if prefix in mapping: + name_split.pop(-2) + if name_split[-1] == 'weight': + scaling = get_scaling(model, prefix) + lora_down = mapping[prefix]['lora_down'] + lora_up = mapping[prefix]['lora_up'] + param += lora_up @ lora_down * scaling + name_split.pop(1) + name = '.'.join(name_split) + temp[name] = param + + result = dict() + result['state_dict'] = temp + result['meta'] = lora_weight['meta'] + return result + + +def get_scaling(model, prefix): + """Get the scaling of target layer. + + Args: + model (LoRAModel): the LoRAModel. + prefix (str): the prefix of the layer. + Returns: + the scale of the LoRALinear. + """ + prefix_split = prefix.split('.') + for i in prefix_split: + model = getattr(model, i) + return model.scaling + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Merge LoRA weight') + parser.add_argument('cfg', help='cfg path') + parser.add_argument('src', help='src lora model path') + parser.add_argument('dst', help='save path') + args = parser.parse_args() + dst = Path(args.dst) + if dst.suffix != '.pth': + print('The path should contain the name of the pth format file.') + exit(1) + dst.parent.mkdir(parents=True, exist_ok=True) + + cfg = Config.fromfile(args.cfg) + lora_model = torch.load(args.src, map_location='cpu') + + merged_model = merge_lora_weight(cfg, lora_model) + torch.save(merged_model, args.dst) diff --git a/tools/model_converters/mixmim_to_mmpretrain.py b/tools/model_converters/mixmim_to_mmpretrain.py new file mode 100644 index 0000000..b31bb00 --- /dev/null +++ b/tools/model_converters/mixmim_to_mmpretrain.py @@ -0,0 +1,99 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from collections import OrderedDict + +import mmengine +import torch +from mmengine.runner import CheckpointLoader + + +def correct_unfold_reduction_order(x: torch.Tensor): + out_channel, in_channel = x.shape + x = x.reshape(out_channel, 4, in_channel // 4) + x = x[:, [0, 2, 1, 3], :].transpose(1, 2).reshape(out_channel, in_channel) + return x + + +def correct_unfold_norm_order(x): + in_channel = x.shape[0] + x = x.reshape(4, in_channel // 4) + x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel) + return x + + +def convert_mixmim(ckpt): + + new_ckpt = OrderedDict() + + for k, v in list(ckpt.items()): + new_v = v + + if k.startswith('patch_embed'): + new_k = k.replace('proj', 'projection') + + elif k.startswith('layers'): + if 'norm1' in k: + new_k = k.replace('norm1', 'ln1') + elif 'norm2' in k: + new_k = k.replace('norm2', 'ln2') + elif 'mlp.fc1' in k: + new_k = k.replace('mlp.fc1', 'ffn.layers.0.0') + elif 'mlp.fc2' in k: + new_k = k.replace('mlp.fc2', 'ffn.layers.1') + else: + new_k = k + + elif k.startswith('norm') or k.startswith('absolute_pos_embed'): + new_k = k + + elif k.startswith('head'): + new_k = k.replace('head.', 'head.fc.') + + else: + raise ValueError + + # print(new_k) + if not new_k.startswith('head'): + new_k = 'backbone.' + new_k + + if 'downsample' in new_k: + print('Covert {} in PatchMerging from timm to mmcv format!'.format( + new_k)) + + if 'reduction' in new_k: + new_v = correct_unfold_reduction_order(new_v) + elif 'norm' in new_k: + new_v = correct_unfold_norm_order(new_v) + + new_ckpt[new_k] = new_v + + return new_ckpt + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in pretrained mixmim ' + 'models to mmpretrain style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + + if 'model' in checkpoint: + state_dict = checkpoint['model'] + else: + state_dict = checkpoint + + weight = convert_mixmim(state_dict) + # weight = convert_official_mixmim(state_dict) + mmengine.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(weight, args.dst) + + print('Done!!') + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/mlpmixer_to_mmpretrain.py b/tools/model_converters/mlpmixer_to_mmpretrain.py new file mode 100644 index 0000000..e101514 --- /dev/null +++ b/tools/model_converters/mlpmixer_to_mmpretrain.py @@ -0,0 +1,58 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from pathlib import Path + +import torch + + +def convert_weights(weight): + """Weight Converter. + + Converts the weights from timm to mmpretrain + + Args: + weight (dict): weight dict from timm + + Returns: converted weight dict for mmpretrain + """ + result = dict() + result['meta'] = dict() + temp = dict() + mapping = { + 'stem': 'patch_embed', + 'proj': 'projection', + 'mlp_tokens.fc1': 'token_mix.layers.0.0', + 'mlp_tokens.fc2': 'token_mix.layers.1', + 'mlp_channels.fc1': 'channel_mix.layers.0.0', + 'mlp_channels.fc2': 'channel_mix.layers.1', + 'norm1': 'ln1', + 'norm2': 'ln2', + 'norm.': 'ln1.', + 'blocks': 'layers' + } + for k, v in weight.items(): + for mk, mv in mapping.items(): + if mk in k: + k = k.replace(mk, mv) + if k.startswith('head.'): + temp['head.fc.' + k[5:]] = v + else: + temp['backbone.' + k] = v + result['state_dict'] = temp + return result + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument('src', help='src detectron model path') + parser.add_argument('dst', help='save path') + args = parser.parse_args() + dst = Path(args.dst) + if dst.suffix != '.pth': + print('The path should contain the name of the pth format file.') + exit(1) + dst.parent.mkdir(parents=True, exist_ok=True) + + original_model = torch.load(args.src, map_location='cpu') + converted_model = convert_weights(original_model) + torch.save(converted_model, args.dst) diff --git a/tools/model_converters/mobilenetv2_to_mmpretrain.py b/tools/model_converters/mobilenetv2_to_mmpretrain.py new file mode 100644 index 0000000..878f737 --- /dev/null +++ b/tools/model_converters/mobilenetv2_to_mmpretrain.py @@ -0,0 +1,135 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from collections import OrderedDict + +import torch + + +def convert_conv1(model_key, model_weight, state_dict, converted_names): + if model_key.find('features.0.0') >= 0: + new_key = model_key.replace('features.0.0', 'backbone.conv1.conv') + else: + new_key = model_key.replace('features.0.1', 'backbone.conv1.bn') + state_dict[new_key] = model_weight + converted_names.add(model_key) + print(f'Convert {model_key} to {new_key}') + + +def convert_conv5(model_key, model_weight, state_dict, converted_names): + if model_key.find('features.18.0') >= 0: + new_key = model_key.replace('features.18.0', 'backbone.conv2.conv') + else: + new_key = model_key.replace('features.18.1', 'backbone.conv2.bn') + state_dict[new_key] = model_weight + converted_names.add(model_key) + print(f'Convert {model_key} to {new_key}') + + +def convert_head(model_key, model_weight, state_dict, converted_names): + new_key = model_key.replace('classifier.1', 'head.fc') + state_dict[new_key] = model_weight + converted_names.add(model_key) + print(f'Convert {model_key} to {new_key}') + + +def convert_block(model_key, model_weight, state_dict, converted_names): + split_keys = model_key.split('.') + layer_id = int(split_keys[1]) + new_layer_id = 0 + sub_id = 0 + if layer_id == 1: + new_layer_id = 1 + sub_id = 0 + elif layer_id in range(2, 4): + new_layer_id = 2 + sub_id = layer_id - 2 + elif layer_id in range(4, 7): + new_layer_id = 3 + sub_id = layer_id - 4 + elif layer_id in range(7, 11): + new_layer_id = 4 + sub_id = layer_id - 7 + elif layer_id in range(11, 14): + new_layer_id = 5 + sub_id = layer_id - 11 + elif layer_id in range(14, 17): + new_layer_id = 6 + sub_id = layer_id - 14 + elif layer_id == 17: + new_layer_id = 7 + sub_id = 0 + + new_key = model_key.replace(f'features.{layer_id}', + f'backbone.layer{new_layer_id}.{sub_id}') + if new_layer_id == 1: + if new_key.find('conv.0.0') >= 0: + new_key = new_key.replace('conv.0.0', 'conv.0.conv') + elif new_key.find('conv.0.1') >= 0: + new_key = new_key.replace('conv.0.1', 'conv.0.bn') + elif new_key.find('conv.1') >= 0: + new_key = new_key.replace('conv.1', 'conv.1.conv') + elif new_key.find('conv.2') >= 0: + new_key = new_key.replace('conv.2', 'conv.1.bn') + else: + raise ValueError(f'Unsupported conversion of key {model_key}') + else: + if new_key.find('conv.0.0') >= 0: + new_key = new_key.replace('conv.0.0', 'conv.0.conv') + elif new_key.find('conv.0.1') >= 0: + new_key = new_key.replace('conv.0.1', 'conv.0.bn') + elif new_key.find('conv.1.0') >= 0: + new_key = new_key.replace('conv.1.0', 'conv.1.conv') + elif new_key.find('conv.1.1') >= 0: + new_key = new_key.replace('conv.1.1', 'conv.1.bn') + elif new_key.find('conv.2') >= 0: + new_key = new_key.replace('conv.2', 'conv.2.conv') + elif new_key.find('conv.3') >= 0: + new_key = new_key.replace('conv.3', 'conv.2.bn') + else: + raise ValueError(f'Unsupported conversion of key {model_key}') + print(f'Convert {model_key} to {new_key}') + state_dict[new_key] = model_weight + converted_names.add(model_key) + + +def convert(src, dst): + """Convert keys in torchvision pretrained MobileNetV2 models to mmpretrain + style.""" + + # load pytorch model + blobs = torch.load(src, map_location='cpu') + + # convert to pytorch style + state_dict = OrderedDict() + converted_names = set() + + for key, weight in blobs.items(): + if 'features.0' in key: + convert_conv1(key, weight, state_dict, converted_names) + elif 'classifier' in key: + convert_head(key, weight, state_dict, converted_names) + elif 'features.18' in key: + convert_conv5(key, weight, state_dict, converted_names) + else: + convert_block(key, weight, state_dict, converted_names) + + # check if all layers are converted + for key in blobs: + if key not in converted_names: + print(f'not converted: {key}') + # save checkpoint + checkpoint = dict() + checkpoint['state_dict'] = state_dict + torch.save(checkpoint, dst) + + +def main(): + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument('src', help='src detectron model path') + parser.add_argument('dst', help='save path') + args = parser.parse_args() + convert(args.src, args.dst) + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/ofa.py b/tools/model_converters/ofa.py new file mode 100644 index 0000000..142c7ac --- /dev/null +++ b/tools/model_converters/ofa.py @@ -0,0 +1,111 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import re +from collections import OrderedDict, namedtuple +from pathlib import Path + +import torch + +prog_description = """\ +Convert OFA official models to MMPretrain format. +""" + +MapItem = namedtuple( + 'MapItem', 'pattern repl key_action value_action', defaults=[None] * 4) + + +def convert_by_mapdict(src_dict: dict, map_dict: Path): + dst_dict = OrderedDict() + convert_map_dict = dict() + + for k, v in src_dict.items(): + ori_k = k + for item in map_dict: + pattern = item.pattern + assert pattern is not None + match = next(re.finditer(pattern, k), None) + if match is None: + continue + match_group = match.groups() + repl = item.repl + + key_action = item.key_action + if key_action is not None: + assert callable(key_action) + match_group = key_action(*match_group) + if isinstance(match_group, str): + match_group = (match_group, ) + start, end = match.span(0) + if repl is not None: + k = k[:start] + repl.format(*match_group) + k[end:] + else: + for i, sub in enumerate(match_group): + start, end = match.span(i + 1) + k = k[:start] + str(sub) + k[end:] + + value_action = item.value_action + if value_action is not None: + assert callable(value_action) + v = value_action(v) + + if v is not None: + dst_dict[k] = v + convert_map_dict[k] = ori_k + return dst_dict, convert_map_dict + + +map_dict = [ + # Encoder modules + MapItem(r'\.type_embedding\.', '.embed_type.'), + MapItem(r'\.layernorm_embedding\.', '.embedding_ln.'), + MapItem(r'\.patch_layernorm_embedding\.', '.image_embedding_ln.'), + MapItem(r'encoder.layer_norm\.', 'encoder.final_ln.'), + # Encoder layers + MapItem(r'\.attn_ln\.', '.attn_mid_ln.'), + MapItem(r'\.ffn_layernorm\.', '.ffn_mid_ln.'), + MapItem(r'\.final_layer_norm', '.ffn_ln'), + MapItem(r'encoder.*(\.self_attn\.)', key_action=lambda _: '.attn.'), + MapItem( + r'encoder.*(\.self_attn_layer_norm\.)', + key_action=lambda _: '.attn_ln.'), + # Decoder modules + MapItem(r'\.code_layernorm_embedding\.', '.code_embedding_ln.'), + MapItem(r'decoder.layer_norm\.', 'decoder.final_ln.'), + # Decoder layers + MapItem(r'\.self_attn_ln', '.self_attn_mid_ln'), + MapItem(r'\.cross_attn_ln', '.cross_attn_mid_ln'), + MapItem(r'\.encoder_attn_layer_norm', '.cross_attn_ln'), + MapItem(r'\.encoder_attn', '.cross_attn'), + MapItem( + r'decoder.*(\.self_attn_layer_norm\.)', + key_action=lambda _: '.self_attn_ln.'), + # Remove version key + MapItem(r'version', '', value_action=lambda _: None), + # Add model prefix + MapItem(r'^', 'model.'), +] + + +def parse_args(): + parser = argparse.ArgumentParser(description=prog_description) + parser.add_argument('src', type=str, help='The official checkpoint path.') + parser.add_argument('dst', type=str, help='The save path.') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + src = torch.load(args.src) + if 'extra_state' in src and 'ema' in src['extra_state']: + print('Use EMA weights.') + src = src['extra_state']['ema'] + else: + src = src['model'] + dst, _ = convert_by_mapdict(src, map_dict) + torch.save(dst, args.dst) + print('Done!!') + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/openai-clip_to_mmpretrain-clip.py b/tools/model_converters/openai-clip_to_mmpretrain-clip.py new file mode 100644 index 0000000..7272550 --- /dev/null +++ b/tools/model_converters/openai-clip_to_mmpretrain-clip.py @@ -0,0 +1,77 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from collections import OrderedDict + +import mmengine +import torch +from mmengine.runner import CheckpointLoader + + +def convert_clip(ckpt): + new_ckpt = OrderedDict() + + for k, v in list(ckpt.items()): + new_v = v + if k.startswith('visual.conv1'): + new_k = k.replace('conv1', 'patch_embed.projection') + elif k.startswith('visual.positional_embedding'): + new_k = k.replace('positional_embedding', 'pos_embed') + new_v = v.unsqueeze(dim=0) + elif k.startswith('visual.class_embedding'): + new_k = k.replace('class_embedding', 'cls_token') + new_v = v.unsqueeze(dim=0).unsqueeze(dim=0) + elif k.startswith('visual.ln_pre'): + new_k = k.replace('ln_pre', 'pre_norm') + elif k.startswith('visual.transformer.resblocks'): + new_k = k.replace('transformer.resblocks', 'layers') + if 'ln_1' in k: + new_k = new_k.replace('ln_1', 'ln1') + elif 'ln_2' in k: + new_k = new_k.replace('ln_2', 'ln2') + elif 'mlp.c_fc' in k: + new_k = new_k.replace('mlp.c_fc', 'ffn.layers.0.0') + elif 'mlp.c_proj' in k: + new_k = new_k.replace('mlp.c_proj', 'ffn.layers.1') + elif 'attn.in_proj_weight' in k: + new_k = new_k.replace('in_proj_weight', 'qkv.weight') + elif 'attn.in_proj_bias' in k: + new_k = new_k.replace('in_proj_bias', 'qkv.bias') + elif 'attn.out_proj' in k: + new_k = new_k.replace('out_proj', 'proj') + elif k.startswith('visual.ln_post'): + new_k = k.replace('ln_post', 'ln1') + elif k.startswith('visual.proj'): + new_k = k.replace('visual.proj', 'visual_proj.proj') + else: + new_k = k + + new_ckpt[new_k] = new_v + return new_ckpt + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in pretrained clip ' + 'models to mmpretrain style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + + if 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + else: + state_dict = checkpoint + + weight = convert_clip(state_dict) + mmengine.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(weight, args.dst) + + print('Done!!') + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/otter2mmpre.py b/tools/model_converters/otter2mmpre.py new file mode 100644 index 0000000..5824518 --- /dev/null +++ b/tools/model_converters/otter2mmpre.py @@ -0,0 +1,66 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import re +from collections import OrderedDict +from itertools import chain +from pathlib import Path + +import torch + +prog_description = """\ +Convert Official Otter HF models to MMPreTrain format. +""" + + +def parse_args(): + parser = argparse.ArgumentParser(description=prog_description) + parser.add_argument( + 'name_or_dir', type=str, help='The Otter HF model name or directory.') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + if not Path(args.name_or_dir).is_dir(): + from huggingface_hub import snapshot_download + ckpt_dir = Path( + snapshot_download(args.name_or_dir, allow_patterns='*.bin')) + name = args.name_or_dir.replace('/', '_') + else: + ckpt_dir = Path(args.name_or_dir) + name = ckpt_dir.name + + state_dict = OrderedDict() + for k, v in chain.from_iterable( + torch.load(ckpt).items() for ckpt in ckpt_dir.glob('*.bin')): + adapter_patterns = [ + r'^perceiver', + r'lang_encoder.*embed_tokens', + r'lang_encoder.*gated_cross_attn_layer', + r'lang_encoder.*rotary_emb', + ] + if not any(re.match(pattern, k) for pattern in adapter_patterns): + # Drop encoder parameters to decrease the size. + continue + + # The keys are different between Open-Flamingo and Otter + if 'gated_cross_attn_layer.feed_forward' in k: + k = k.replace('feed_forward', 'ff') + if 'perceiver.layers' in k: + prefix_match = re.match(r'perceiver.layers.\d+.', k) + prefix = k[:prefix_match.end()] + suffix = k[prefix_match.end():] + if 'feed_forward' in k: + k = prefix + '1.' + suffix.replace('feed_forward.', '') + else: + k = prefix + '0.' + suffix + state_dict[k] = v + if len(state_dict) == 0: + raise RuntimeError('No checkpoint found in the specified directory.') + + torch.save(state_dict, name + '.pth') + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/publish_model.py b/tools/model_converters/publish_model.py new file mode 100644 index 0000000..d5ead57 --- /dev/null +++ b/tools/model_converters/publish_model.py @@ -0,0 +1,108 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import datetime +import hashlib +import shutil +import warnings +from collections import OrderedDict +from pathlib import Path + +import torch + +import mmpretrain + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Process a checkpoint to be published') + parser.add_argument('in_file', help='input checkpoint filename') + parser.add_argument('out_file', help='output checkpoint filename') + parser.add_argument( + '--no-ema', + action='store_true', + help='Use keys in `ema_state_dict` (no-ema keys).') + parser.add_argument( + '--dataset-type', + type=str, + help='The type of the dataset. If the checkpoint is converted ' + 'from other repository, this option is used to fill the dataset ' + 'meta information to the published checkpoint, like "ImageNet", ' + '"CIFAR10" and others.') + args = parser.parse_args() + return args + + +def process_checkpoint(in_file, out_file, args): + checkpoint = torch.load(in_file, map_location='cpu') + # remove unnecessary fields for smaller file size + for key in ['optimizer', 'param_schedulers', 'hook_msgs', 'message_hub']: + checkpoint.pop(key, None) + + # For checkpoint converted from the official weight + if 'state_dict' not in checkpoint: + checkpoint = dict(state_dict=checkpoint) + + meta = checkpoint.get('meta', {}) + meta.setdefault('mmpretrain_version', mmpretrain.__version__) + + # handle dataset meta information + if args.dataset_type is not None: + from mmpretrain.registry import DATASETS + dataset_class = DATASETS.get(args.dataset_type) + dataset_meta = getattr(dataset_class, 'METAINFO', {}) + else: + dataset_meta = {} + + meta.setdefault('dataset_meta', dataset_meta) + + if len(meta['dataset_meta']) == 0: + warnings.warn('Missing dataset meta information.') + + checkpoint['meta'] = meta + + ema_state_dict = OrderedDict() + if 'ema_state_dict' in checkpoint: + for k, v in checkpoint['ema_state_dict'].items(): + # The ema static dict has some extra fields + if k.startswith('module.'): + origin_k = k[len('module.'):] + assert origin_k in checkpoint['state_dict'] + ema_state_dict[origin_k] = v + del checkpoint['ema_state_dict'] + print('The input checkpoint has EMA weights, ', end='') + if args.no_ema: + # The values stored in `ema_state_dict` is original values. + print('and drop the EMA weights.') + assert ema_state_dict.keys() <= checkpoint['state_dict'].keys() + checkpoint['state_dict'].update(ema_state_dict) + else: + print('and use the EMA weights.') + + temp_out_file = Path(out_file).with_name('temp_' + Path(out_file).name) + torch.save(checkpoint, temp_out_file) + + with open(temp_out_file, 'rb') as f: + sha = hashlib.sha256(f.read()).hexdigest()[:8] + if out_file.endswith('.pth'): + out_file_name = out_file[:-4] + else: + out_file_name = out_file + + current_date = datetime.datetime.now().strftime('%Y%m%d') + final_file = out_file_name + f'_{current_date}-{sha[:8]}.pth' + shutil.move(temp_out_file, final_file) + + print(f'Successfully generated the publish-ckpt as {final_file}.') + + +def main(): + args = parse_args() + out_dir = Path(args.out_file).parent + if not out_dir.exists(): + raise ValueError(f'Directory {out_dir} does not exist, ' + 'please generate it manually.') + process_checkpoint(args.in_file, args.out_file, args) + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/ram2mmpretrain.py b/tools/model_converters/ram2mmpretrain.py new file mode 100644 index 0000000..5ee3b47 --- /dev/null +++ b/tools/model_converters/ram2mmpretrain.py @@ -0,0 +1,117 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from collections import OrderedDict +from copy import deepcopy + +import mmengine +import torch +from mmengine.runner import CheckpointLoader + + +def convert_swin(ckpt): + new_ckpt = OrderedDict() + convert_mapping = dict() + + def correct_unfold_reduction_order(x): + out_channel, in_channel = x.shape + x = x.reshape(out_channel, 4, in_channel // 4) + x = x[:, [0, 2, 1, 3], :].transpose(1, + 2).reshape(out_channel, in_channel) + return x + + def correct_unfold_norm_order(x): + in_channel = x.shape[0] + x = x.reshape(4, in_channel // 4) + x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel) + return x + + for k, v in ckpt.items(): + if 'attn_mask' in k: + continue + if k.startswith('head'): + continue + elif k.startswith('layers'): + new_v = v + if 'attn.' in k: + new_k = k.replace('attn.', 'attn.w_msa.') + elif 'mlp.' in k: + if 'mlp.fc1.' in k: + new_k = k.replace('mlp.fc1.', 'ffn.layers.0.0.') + elif 'mlp.fc2.' in k: + new_k = k.replace('mlp.fc2.', 'ffn.layers.1.') + else: + new_k = k.replace('mlp.', 'ffn.') + elif 'downsample' in k: + new_k = k + if 'reduction.' in k: + new_v = correct_unfold_reduction_order(v) + elif 'norm.' in k: + new_v = correct_unfold_norm_order(v) + else: + new_k = k + new_k = new_k.replace('layers', 'stages', 1) + elif k.startswith('patch_embed'): + new_v = v + if 'proj' in k: + new_k = k.replace('proj', 'projection') + else: + new_k = k + elif k.startswith('norm'): + new_v = v + new_k = k.replace('norm', 'norm3') + else: + new_v = v + new_k = k + + new_ckpt[new_k] = new_v + convert_mapping[k] = new_k + + return new_ckpt, convert_mapping + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in official pretrained RAM models to' + 'MMPretrain style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + if 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + elif 'model' in checkpoint: + state_dict = checkpoint['model'] + else: + state_dict = checkpoint + + visual_ckpt = OrderedDict() + for key in state_dict: + if key.startswith('visual_encoder.'): + new_key = key.replace('visual_encoder.', '') + visual_ckpt[new_key] = state_dict[key] + + new_visual_ckpt, convert_mapping = convert_swin(visual_ckpt) + new_ckpt = deepcopy(state_dict) + for key in state_dict: + if key.startswith('visual_encoder.'): + if 'attn_mask' in key: + del new_ckpt[key] + continue + del new_ckpt[key] + old_key = key.replace('visual_encoder.', '') + new_ckpt[key.replace(old_key, + convert_mapping[old_key])] = deepcopy( + new_visual_ckpt[key.replace( + old_key, + convert_mapping[old_key]).replace( + 'visual_encoder.', '')]) + + mmengine.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(new_ckpt, args.dst) + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/reparameterize_model.py b/tools/model_converters/reparameterize_model.py new file mode 100644 index 0000000..49a3913 --- /dev/null +++ b/tools/model_converters/reparameterize_model.py @@ -0,0 +1,57 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from pathlib import Path + +import torch + +from mmpretrain.apis import init_model +from mmpretrain.models.classifiers import ImageClassifier + + +def convert_classifier_to_deploy(model, checkpoint, save_path): + print('Converting...') + assert hasattr(model, 'backbone') and \ + hasattr(model.backbone, 'switch_to_deploy'), \ + '`model.backbone` must has method of "switch_to_deploy".' \ + f' But {model.backbone.__class__} does not have.' + + model.backbone.switch_to_deploy() + checkpoint['state_dict'] = model.state_dict() + torch.save(checkpoint, save_path) + + print('Done! Save at path "{}"'.format(save_path)) + + +def main(): + parser = argparse.ArgumentParser( + description='Convert the parameters of the repvgg block ' + 'from training mode to deployment mode.') + parser.add_argument( + 'config_path', + help='The path to the configuration file of the network ' + 'containing the repvgg block.') + parser.add_argument( + 'checkpoint_path', + help='The path to the checkpoint file corresponding to the model.') + parser.add_argument( + 'save_path', + help='The path where the converted checkpoint file is stored.') + args = parser.parse_args() + + save_path = Path(args.save_path) + if save_path.suffix != '.pth' and save_path.suffix != '.tar': + print('The path should contain the name of the pth format file.') + exit() + save_path.parent.mkdir(parents=True, exist_ok=True) + + model = init_model( + args.config_path, checkpoint=args.checkpoint_path, device='cpu') + assert isinstance(model, ImageClassifier), \ + '`model` must be a `mmpretrain.classifiers.ImageClassifier` instance.' + + checkpoint = torch.load(args.checkpoint_path) + convert_classifier_to_deploy(model, checkpoint, args.save_path) + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/replknet_to_mmpretrain.py b/tools/model_converters/replknet_to_mmpretrain.py new file mode 100644 index 0000000..584b440 --- /dev/null +++ b/tools/model_converters/replknet_to_mmpretrain.py @@ -0,0 +1,58 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from collections import OrderedDict +from pathlib import Path + +import torch + + +def convert(src, dst): + print('Converting...') + blobs = torch.load(src, map_location='cpu') + converted_state_dict = OrderedDict() + + for key in blobs: + splited_key = key.split('.') + print(splited_key) + splited_key = [ + 'backbone.stem' if i[:4] == 'stem' else i for i in splited_key + ] + splited_key = [ + 'backbone.stages' if i[:6] == 'stages' else i for i in splited_key + ] + splited_key = [ + 'backbone.transitions' if i[:11] == 'transitions' else i + for i in splited_key + ] + splited_key = [ + 'backbone.stages.3.norm' if i[:4] == 'norm' else i + for i in splited_key + ] + splited_key = [ + 'head.fc' if i[:4] == 'head' else i for i in splited_key + ] + + new_key = '.'.join(splited_key) + converted_state_dict[new_key] = blobs[key] + + torch.save(converted_state_dict, dst) + print('Done!') + + +def main(): + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument('src', help='src detectron model path') + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + dst = Path(args.dst) + if dst.suffix != '.pth': + print('The path should contain the name of the pth format file.') + exit(1) + dst.parent.mkdir(parents=True, exist_ok=True) + + convert(args.src, args.dst) + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/repvgg_to_mmpretrain.py b/tools/model_converters/repvgg_to_mmpretrain.py new file mode 100644 index 0000000..b7a1f05 --- /dev/null +++ b/tools/model_converters/repvgg_to_mmpretrain.py @@ -0,0 +1,60 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from collections import OrderedDict +from pathlib import Path + +import torch + + +def convert(src, dst): + print('Converting...') + blobs = torch.load(src, map_location='cpu') + converted_state_dict = OrderedDict() + + for key in blobs: + splited_key = key.split('.') + splited_key = ['norm' if i == 'bn' else i for i in splited_key] + splited_key = [ + 'branch_norm' if i == 'rbr_identity' else i for i in splited_key + ] + splited_key = [ + 'branch_1x1' if i == 'rbr_1x1' else i for i in splited_key + ] + splited_key = [ + 'branch_3x3' if i == 'rbr_dense' else i for i in splited_key + ] + splited_key = [ + 'backbone.stem' if i[:6] == 'stage0' else i for i in splited_key + ] + splited_key = [ + 'backbone.stage_' + i[5] if i[:5] == 'stage' else i + for i in splited_key + ] + splited_key = ['se_layer' if i == 'se' else i for i in splited_key] + splited_key = ['conv1.conv' if i == 'down' else i for i in splited_key] + splited_key = ['conv2.conv' if i == 'up' else i for i in splited_key] + splited_key = ['head.fc' if i == 'linear' else i for i in splited_key] + new_key = '.'.join(splited_key) + converted_state_dict[new_key] = blobs[key] + + torch.save(converted_state_dict, dst) + print('Done!') + + +def main(): + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument('src', help='src detectron model path') + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + dst = Path(args.dst) + if dst.suffix != '.pth': + print('The path should contain the name of the pth format file.') + exit(1) + dst.parent.mkdir(parents=True, exist_ok=True) + + convert(args.src, args.dst) + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/revvit_to_mmpretrain.py b/tools/model_converters/revvit_to_mmpretrain.py new file mode 100644 index 0000000..ec9bc0b --- /dev/null +++ b/tools/model_converters/revvit_to_mmpretrain.py @@ -0,0 +1,99 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from collections import OrderedDict + +import mmengine +import torch +from mmengine.runner import CheckpointLoader + + +def convert_revvit(ckpt): + + new_ckpt = OrderedDict() + + for k, v in list(ckpt.items()): + new_v = v + if k.startswith('head.projection'): + new_k = k.replace('head.projection', 'head.fc') + new_ckpt[new_k] = new_v + continue + elif k.startswith('patch_embed'): + if 'proj.' in k: + new_k = k.replace('proj.', 'projection.') + else: + new_k = k + elif k.startswith('rev_backbone'): + new_k = k.replace('rev_backbone.', '') + if 'F.norm' in k: + new_k = new_k.replace('F.norm', 'ln1') + elif 'G.norm' in k: + new_k = new_k.replace('G.norm', 'ln2') + elif 'F.attn' in k: + new_k = new_k.replace('F.attn', 'attn') + elif 'G.mlp.fc1' in k: + new_k = new_k.replace('G.mlp.fc1', 'ffn.layers.0.0') + elif 'G.mlp.fc2' in k: + new_k = new_k.replace('G.mlp.fc2', 'ffn.layers.1') + elif k.startswith('norm'): + new_k = k.replace('norm', 'ln1') + else: + new_k = k + + if not new_k.startswith('head'): + new_k = 'backbone.' + new_k + new_ckpt[new_k] = new_v + + tmp_weight_dir = [] + tmp_bias_dir = [] + final_ckpt = OrderedDict() + for k, v in list(new_ckpt.items()): + if 'attn.q.weight' in k: + tmp_weight_dir.append(v) + elif 'attn.k.weight' in k: + tmp_weight_dir.append(v) + elif 'attn.v.weight' in k: + tmp_weight_dir.append(v) + new_k = k.replace('attn.v.weight', 'attn.qkv.weight') + final_ckpt[new_k] = torch.cat(tmp_weight_dir, dim=0) + tmp_weight_dir = [] + elif 'attn.q.bias' in k: + tmp_bias_dir.append(v) + elif 'attn.k.bias' in k: + tmp_bias_dir.append(v) + elif 'attn.v.bias' in k: + tmp_bias_dir.append(v) + new_k = k.replace('attn.v.bias', 'attn.qkv.bias') + final_ckpt[new_k] = torch.cat(tmp_bias_dir, dim=0) + tmp_bias_dir = [] + else: + final_ckpt[k] = v + + return final_ckpt + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in pretrained revvit' + ' models to mmpretrain style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + + if 'model_state' in checkpoint: + state_dict = checkpoint['model_state'] + else: + state_dict = checkpoint + + weight = convert_revvit(state_dict) + mmengine.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(weight, args.dst) + + print('Done!!') + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/shufflenetv2_to_mmpretrain.py b/tools/model_converters/shufflenetv2_to_mmpretrain.py new file mode 100644 index 0000000..f909e41 --- /dev/null +++ b/tools/model_converters/shufflenetv2_to_mmpretrain.py @@ -0,0 +1,113 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from collections import OrderedDict + +import torch + + +def convert_conv1(model_key, model_weight, state_dict, converted_names): + if model_key.find('conv1.0') >= 0: + new_key = model_key.replace('conv1.0', 'backbone.conv1.conv') + else: + new_key = model_key.replace('conv1.1', 'backbone.conv1.bn') + state_dict[new_key] = model_weight + converted_names.add(model_key) + print(f'Convert {model_key} to {new_key}') + + +def convert_conv5(model_key, model_weight, state_dict, converted_names): + if model_key.find('conv5.0') >= 0: + new_key = model_key.replace('conv5.0', 'backbone.layers.3.conv') + else: + new_key = model_key.replace('conv5.1', 'backbone.layers.3.bn') + state_dict[new_key] = model_weight + converted_names.add(model_key) + print(f'Convert {model_key} to {new_key}') + + +def convert_head(model_key, model_weight, state_dict, converted_names): + new_key = model_key.replace('fc', 'head.fc') + state_dict[new_key] = model_weight + converted_names.add(model_key) + print(f'Convert {model_key} to {new_key}') + + +def convert_block(model_key, model_weight, state_dict, converted_names): + split_keys = model_key.split('.') + layer, block, branch = split_keys[:3] + layer_id = int(layer[-1]) - 2 + new_key = model_key.replace(layer, f'backbone.layers.{layer_id}') + + if branch == 'branch1': + if new_key.find('branch1.0') >= 0: + new_key = new_key.replace('branch1.0', 'branch1.0.conv') + elif new_key.find('branch1.1') >= 0: + new_key = new_key.replace('branch1.1', 'branch1.0.bn') + elif new_key.find('branch1.2') >= 0: + new_key = new_key.replace('branch1.2', 'branch1.1.conv') + elif new_key.find('branch1.3') >= 0: + new_key = new_key.replace('branch1.3', 'branch1.1.bn') + elif branch == 'branch2': + + if new_key.find('branch2.0') >= 0: + new_key = new_key.replace('branch2.0', 'branch2.0.conv') + elif new_key.find('branch2.1') >= 0: + new_key = new_key.replace('branch2.1', 'branch2.0.bn') + elif new_key.find('branch2.3') >= 0: + new_key = new_key.replace('branch2.3', 'branch2.1.conv') + elif new_key.find('branch2.4') >= 0: + new_key = new_key.replace('branch2.4', 'branch2.1.bn') + elif new_key.find('branch2.5') >= 0: + new_key = new_key.replace('branch2.5', 'branch2.2.conv') + elif new_key.find('branch2.6') >= 0: + new_key = new_key.replace('branch2.6', 'branch2.2.bn') + else: + raise ValueError(f'Unsupported conversion of key {model_key}') + else: + raise ValueError(f'Unsupported conversion of key {model_key}') + print(f'Convert {model_key} to {new_key}') + state_dict[new_key] = model_weight + converted_names.add(model_key) + + +def convert(src, dst): + """Convert keys in torchvision pretrained ShuffleNetV2 models to mmpretrain + style.""" + + # load pytorch model + blobs = torch.load(src, map_location='cpu') + + # convert to pytorch style + state_dict = OrderedDict() + converted_names = set() + + for key, weight in blobs.items(): + if 'conv1' in key: + convert_conv1(key, weight, state_dict, converted_names) + elif 'fc' in key: + convert_head(key, weight, state_dict, converted_names) + elif key.startswith('s'): + convert_block(key, weight, state_dict, converted_names) + elif 'conv5' in key: + convert_conv5(key, weight, state_dict, converted_names) + + # check if all layers are converted + for key in blobs: + if key not in converted_names: + print(f'not converted: {key}') + # save checkpoint + checkpoint = dict() + checkpoint['state_dict'] = state_dict + torch.save(checkpoint, dst) + + +def main(): + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument('src', help='src detectron model path') + parser.add_argument('dst', help='save path') + args = parser.parse_args() + convert(args.src, args.dst) + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/tinyvit_to_mmpretrain.py b/tools/model_converters/tinyvit_to_mmpretrain.py new file mode 100644 index 0000000..0aad47c --- /dev/null +++ b/tools/model_converters/tinyvit_to_mmpretrain.py @@ -0,0 +1,61 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from pathlib import Path + +import torch + + +def convert_weights(weight): + """Weight Converter. + + Converts the weights from timm to mmpretrain + Args: + weight (dict): weight dict from timm + Returns: + Converted weight dict for mmpretrain + """ + result = dict() + result['meta'] = dict() + temp = dict() + mapping = { + 'c.weight': 'conv2d.weight', + 'bn.weight': 'bn2d.weight', + 'bn.bias': 'bn2d.bias', + 'bn.running_mean': 'bn2d.running_mean', + 'bn.running_var': 'bn2d.running_var', + 'bn.num_batches_tracked': 'bn2d.num_batches_tracked', + 'layers': 'stages', + 'norm_head': 'norm3', + } + + weight = weight['model'] + + for k, v in weight.items(): + # keyword mapping + for mk, mv in mapping.items(): + if mk in k: + k = k.replace(mk, mv) + + if k.startswith('head.'): + temp['head.fc.' + k[5:]] = v + else: + temp['backbone.' + k] = v + + result['state_dict'] = temp + return result + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument('src', help='src detectron model path') + parser.add_argument('dst', help='save path') + args = parser.parse_args() + dst = Path(args.dst) + if dst.suffix != '.pth': + print('The path should contain the name of the pth format file.') + exit(1) + dst.parent.mkdir(parents=True, exist_ok=True) + + original_model = torch.load(args.src, map_location='cpu') + converted_model = convert_weights(original_model) + torch.save(converted_model, args.dst) diff --git a/tools/model_converters/torchvision_to_mmpretrain.py b/tools/model_converters/torchvision_to_mmpretrain.py new file mode 100644 index 0000000..679b791 --- /dev/null +++ b/tools/model_converters/torchvision_to_mmpretrain.py @@ -0,0 +1,63 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +from collections import OrderedDict +from pathlib import Path + +import torch + + +def convert_resnet(src_dict, dst_dict): + """convert resnet checkpoints from torchvision.""" + for key, value in src_dict.items(): + if not key.startswith('fc'): + dst_dict['backbone.' + key] = value + else: + dst_dict['head.' + key] = value + + +# model name to convert function +CONVERT_F_DICT = { + 'resnet': convert_resnet, +} + + +def convert(src: str, dst: str, convert_f: callable): + print('Converting...') + blobs = torch.load(src, map_location='cpu') + converted_state_dict = OrderedDict() + + # convert key in weight + convert_f(blobs, converted_state_dict) + + torch.save(converted_state_dict, dst) + print('Done!') + + +def main(): + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument('src', help='src detectron model path') + parser.add_argument('dst', help='save path') + parser.add_argument( + 'model', type=str, help='The algorithm needs to change the keys.') + args = parser.parse_args() + + dst = Path(args.dst) + if dst.suffix != '.pth': + print('The path should contain the name of the pth format file.') + exit(1) + dst.parent.mkdir(parents=True, exist_ok=True) + + # this tool only support model in CONVERT_F_DICT + support_models = list(CONVERT_F_DICT.keys()) + if args.model not in CONVERT_F_DICT: + print(f'The "{args.model}" has not been supported to convert now.') + print(f'This tool only supports {", ".join(support_models)}.') + print('If you have done the converting job, PR is welcome!') + exit(1) + + convert_f = CONVERT_F_DICT[args.model] + convert(args.src, args.dst, convert_f) + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/twins2mmpretrain.py b/tools/model_converters/twins2mmpretrain.py new file mode 100644 index 0000000..8489130 --- /dev/null +++ b/tools/model_converters/twins2mmpretrain.py @@ -0,0 +1,73 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from collections import OrderedDict + +import mmcv +import torch +from mmcv.runner import CheckpointLoader + + +def convert_twins(args, ckpt): + + new_ckpt = OrderedDict() + + for k, v in list(ckpt.items()): + new_v = v + if k.startswith('head'): + new_k = k.replace('head.', 'head.fc.') + new_ckpt[new_k] = new_v + continue + elif k.startswith('patch_embeds'): + if 'proj.' in k: + new_k = k.replace('proj.', 'projection.') + else: + new_k = k + elif k.startswith('blocks'): + k = k.replace('blocks', 'stages') + # Union + if 'mlp.fc1' in k: + new_k = k.replace('mlp.fc1', 'ffn.layers.0.0') + elif 'mlp.fc2' in k: + new_k = k.replace('mlp.fc2', 'ffn.layers.1') + + else: + new_k = k + new_k = new_k.replace('blocks.', 'layers.') + elif k.startswith('pos_block'): + new_k = k.replace('pos_block', 'position_encodings') + if 'proj.0.' in new_k: + new_k = new_k.replace('proj.0.', 'proj.') + elif k.startswith('norm'): + new_k = k.replace('norm', 'norm_after_stage3') + else: + new_k = k + new_k = 'backbone.' + new_k + new_ckpt[new_k] = new_v + return new_ckpt + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in timm pretrained vit models to ' + 'MMPretrain style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + + if 'state_dict' in checkpoint: + # timm checkpoint + state_dict = checkpoint['state_dict'] + else: + state_dict = checkpoint + + weight = convert_twins(args, state_dict) + mmcv.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(weight, args.dst) + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/van2mmpretrain.py b/tools/model_converters/van2mmpretrain.py new file mode 100644 index 0000000..563f3d9 --- /dev/null +++ b/tools/model_converters/van2mmpretrain.py @@ -0,0 +1,66 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +from collections import OrderedDict + +import mmengine +import torch +from mmengine.runner import CheckpointLoader + + +def convert_van(ckpt): + + new_ckpt = OrderedDict() + + for k, v in list(ckpt.items()): + new_v = v + if k.startswith('head'): + new_k = k.replace('head.', 'head.fc.') + new_ckpt[new_k] = new_v + continue + elif k.startswith('patch_embed'): + if 'proj.' in k: + new_k = k.replace('proj.', 'projection.') + else: + new_k = k + elif k.startswith('block'): + new_k = k.replace('block', 'blocks') + if 'attn.spatial_gating_unit' in new_k: + new_k = new_k.replace('conv0', 'DW_conv') + new_k = new_k.replace('conv_spatial', 'DW_D_conv') + if 'dwconv.dwconv' in new_k: + new_k = new_k.replace('dwconv.dwconv', 'dwconv') + else: + new_k = k + + if not new_k.startswith('head'): + new_k = 'backbone.' + new_k + new_ckpt[new_k] = new_v + return new_ckpt + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in pretrained van ' + 'models to mmpretrain style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + + if 'state_dict' in checkpoint: + state_dict = checkpoint['state_dict'] + else: + state_dict = checkpoint + + weight = convert_van(state_dict) + mmengine.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(weight, args.dst) + + print('Done!!') + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/vgg_to_mmpretrain.py b/tools/model_converters/vgg_to_mmpretrain.py new file mode 100644 index 0000000..46178db --- /dev/null +++ b/tools/model_converters/vgg_to_mmpretrain.py @@ -0,0 +1,118 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +from collections import OrderedDict + +import torch + + +def get_layer_maps(layer_num, with_bn): + layer_maps = {'conv': {}, 'bn': {}} + if with_bn: + if layer_num == 11: + layer_idxs = [0, 4, 8, 11, 15, 18, 22, 25] + elif layer_num == 13: + layer_idxs = [0, 3, 7, 10, 14, 17, 21, 24, 28, 31] + elif layer_num == 16: + layer_idxs = [0, 3, 7, 10, 14, 17, 20, 24, 27, 30, 34, 37, 40] + elif layer_num == 19: + layer_idxs = [ + 0, 3, 7, 10, 14, 17, 20, 23, 27, 30, 33, 36, 40, 43, 46, 49 + ] + else: + raise ValueError(f'Invalid number of layers: {layer_num}') + for i, layer_idx in enumerate(layer_idxs): + if i == 0: + new_layer_idx = layer_idx + else: + new_layer_idx += int((layer_idx - layer_idxs[i - 1]) / 2) + layer_maps['conv'][layer_idx] = new_layer_idx + layer_maps['bn'][layer_idx + 1] = new_layer_idx + else: + if layer_num == 11: + layer_idxs = [0, 3, 6, 8, 11, 13, 16, 18] + new_layer_idxs = [0, 2, 4, 5, 7, 8, 10, 11] + elif layer_num == 13: + layer_idxs = [0, 2, 5, 7, 10, 12, 15, 17, 20, 22] + new_layer_idxs = [0, 1, 3, 4, 6, 7, 9, 10, 12, 13] + elif layer_num == 16: + layer_idxs = [0, 2, 5, 7, 10, 12, 14, 17, 19, 21, 24, 26, 28] + new_layer_idxs = [0, 1, 3, 4, 6, 7, 8, 10, 11, 12, 14, 15, 16] + elif layer_num == 19: + layer_idxs = [ + 0, 2, 5, 7, 10, 12, 14, 16, 19, 21, 23, 25, 28, 30, 32, 34 + ] + new_layer_idxs = [ + 0, 1, 3, 4, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17, 18, 19 + ] + else: + raise ValueError(f'Invalid number of layers: {layer_num}') + + layer_maps['conv'] = { + layer_idx: new_layer_idx + for layer_idx, new_layer_idx in zip(layer_idxs, new_layer_idxs) + } + + return layer_maps + + +def convert(src, dst, layer_num, with_bn=False): + """Convert keys in torchvision pretrained VGG models to mmpretrain + style.""" + + # load pytorch model + assert os.path.isfile(src), f'no checkpoint found at {src}' + blobs = torch.load(src, map_location='cpu') + + # convert to pytorch style + state_dict = OrderedDict() + + layer_maps = get_layer_maps(layer_num, with_bn) + + prefix = 'backbone' + delimiter = '.' + for key, weight in blobs.items(): + if 'features' in key: + module, layer_idx, weight_type = key.split(delimiter) + new_key = delimiter.join([prefix, key]) + layer_idx = int(layer_idx) + for layer_key, maps in layer_maps.items(): + if layer_idx in maps: + new_layer_idx = maps[layer_idx] + new_key = delimiter.join([ + prefix, 'features', + str(new_layer_idx), layer_key, weight_type + ]) + state_dict[new_key] = weight + print(f'Convert {key} to {new_key}') + elif 'classifier' in key: + new_key = delimiter.join([prefix, key]) + state_dict[new_key] = weight + print(f'Convert {key} to {new_key}') + else: + state_dict[key] = weight + + # save checkpoint + checkpoint = dict() + checkpoint['state_dict'] = state_dict + torch.save(checkpoint, dst) + + +def main(): + parser = argparse.ArgumentParser(description='Convert model keys') + parser.add_argument('src', help='src torchvision model path') + parser.add_argument('dst', help='save path') + parser.add_argument( + '--bn', action='store_true', help='whether original vgg has BN') + parser.add_argument( + '--layer-num', + type=int, + choices=[11, 13, 16, 19], + default=11, + help='number of VGG layers') + args = parser.parse_args() + convert(args.src, args.dst, layer_num=args.layer_num, with_bn=args.bn) + + +if __name__ == '__main__': + main() diff --git a/tools/model_converters/vig_to_mmpretrain.py b/tools/model_converters/vig_to_mmpretrain.py new file mode 100644 index 0000000..2642c7d --- /dev/null +++ b/tools/model_converters/vig_to_mmpretrain.py @@ -0,0 +1,98 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +import re +from collections import OrderedDict + +import mmengine +import torch +from mmengine.runner import CheckpointLoader + + +def convert_vig(ckpt): + new_ckpt = OrderedDict() + + for k, v in ckpt.items(): + new_key = k + new_value = v + if 'pos_embed' in new_key: + new_key = new_key.replace('pos_embed', 'backbone.pos_embed') + elif 'stem' in new_key: + new_key = new_key.replace('stem.convs', 'backbone.stem') + elif 'backbone' in new_key: + new_key = new_key.replace('backbone', 'backbone.blocks') + elif 'prediction.0' in new_key: + new_key = new_key.replace('prediction.0', 'head.fc1') + new_value = v.squeeze(-1).squeeze(-1) + elif 'prediction.1' in new_key: + new_key = new_key.replace('prediction.1', 'head.bn') + elif 'prediction.4' in new_key: + new_key = new_key.replace('prediction.4', 'head.fc2') + new_value = v.squeeze(-1).squeeze(-1) + new_ckpt[new_key] = new_value + return new_ckpt + + +def convert_pvig(ckpt): + new_ckpt = OrderedDict() + + stage_idx = 0 + stage_blocks = 0 + for k, v in ckpt.items(): + new_key: str = k + new_value = v + if 'pos_embed' in new_key: + new_key = new_key.replace('pos_embed', 'backbone.pos_embed') + elif 'stem' in new_key: + new_key = new_key.replace('stem.convs', 'backbone.stem') + elif re.match(r'^backbone\.\d+\.conv', new_key) is not None: + if new_key.endswith('0.weight'): + stage_idx += 1 + stage_blocks = int(new_key.split('.')[1]) + other = new_key.split('.', maxsplit=3)[-1] + new_key = f'backbone.stages.{stage_idx}.0.' + other + elif 'backbone' in new_key: + block_idx = int(new_key.split('.')[1]) - stage_blocks + other = new_key.split('.', maxsplit=2)[-1] + new_key = f'backbone.stages.{stage_idx}.{block_idx}.' + other + elif 'prediction.0' in new_key: + new_key = new_key.replace('prediction.0', 'head.fc1') + new_value = v.squeeze(-1).squeeze(-1) + elif 'prediction.1' in new_key: + new_key = new_key.replace('prediction.1', 'head.bn') + elif 'prediction.4' in new_key: + new_key = new_key.replace('prediction.4', 'head.fc2') + new_value = v.squeeze(-1).squeeze(-1) + new_ckpt[new_key] = new_value + return new_ckpt + + +def main(): + parser = argparse.ArgumentParser( + description='Convert keys in pretrained vig models to ' + 'mmpretrain style.') + parser.add_argument('src', help='src model path or url') + # The dst path must be a full path of the new checkpoint. + parser.add_argument('dst', help='save path') + args = parser.parse_args() + + checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu') + + if 'model' in checkpoint: + state_dict = checkpoint['model'] + else: + state_dict = checkpoint + + if 'backbone.2.conv.0.weight' in state_dict: + weight = convert_pvig(state_dict) + else: + weight = convert_vig(state_dict) + + mmengine.mkdir_or_exist(osp.dirname(args.dst)) + torch.save(weight, args.dst) + + print('Done!!') + + +if __name__ == '__main__': + main() diff --git a/tools/slurm_test.sh b/tools/slurm_test.sh new file mode 100644 index 0000000..6dd67e5 --- /dev/null +++ b/tools/slurm_test.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +JOB_NAME=$2 +CONFIG=$3 +CHECKPOINT=$4 +GPUS=${GPUS:-8} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +PY_ARGS=${@:5} +SRUN_ARGS=${SRUN_ARGS:-""} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS} diff --git a/tools/slurm_train.sh b/tools/slurm_train.sh new file mode 100644 index 0000000..b3feb3d --- /dev/null +++ b/tools/slurm_train.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +set -x + +PARTITION=$1 +JOB_NAME=$2 +CONFIG=$3 +WORK_DIR=$4 +GPUS=${GPUS:-8} +GPUS_PER_NODE=${GPUS_PER_NODE:-8} +CPUS_PER_TASK=${CPUS_PER_TASK:-5} +SRUN_ARGS=${SRUN_ARGS:-""} +PY_ARGS=${@:5} + +PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ +srun -p ${PARTITION} \ + --job-name=${JOB_NAME} \ + --gres=gpu:${GPUS_PER_NODE} \ + --ntasks=${GPUS} \ + --ntasks-per-node=${GPUS_PER_NODE} \ + --cpus-per-task=${CPUS_PER_TASK} \ + --kill-on-bad-exit=1 \ + ${SRUN_ARGS} \ + python -u tools/train.py ${CONFIG} --work-dir=${WORK_DIR} --launcher="slurm" ${PY_ARGS} diff --git a/tools/test.py b/tools/test.py new file mode 100644 index 0000000..d9d99d2 --- /dev/null +++ b/tools/test.py @@ -0,0 +1,193 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp +from copy import deepcopy + +import mmengine +from mmengine.config import Config, ConfigDict, DictAction +from mmengine.evaluator import DumpResults +from mmengine.registry import RUNNERS +from mmengine.runner import Runner + + +def parse_args(): + parser = argparse.ArgumentParser( + description='MMPreTrain test (and eval) a model') + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument( + '--work-dir', + help='the directory to save the file containing evaluation metrics') + parser.add_argument('--out', help='the file to output results.') + parser.add_argument( + '--out-item', + choices=['metrics', 'pred'], + help='To output whether metrics or predictions. ' + 'Defaults to output predictions.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--amp', + action='store_true', + help='enable automatic-mixed-precision test') + parser.add_argument( + '--show-dir', + help='directory where the visualization images will be saved.') + parser.add_argument( + '--show', + action='store_true', + help='whether to display the prediction results in a window.') + parser.add_argument( + '--interval', + type=int, + default=1, + help='visualize per interval samples.') + parser.add_argument( + '--wait-time', + type=float, + default=2, + help='display time of every window. (second)') + parser.add_argument( + '--no-pin-memory', + action='store_true', + help='whether to disable the pin_memory option in dataloaders.') + parser.add_argument( + '--tta', + action='store_true', + help='Whether to enable the Test-Time-Aug (TTA). If the config file ' + 'has `tta_pipeline` and `tta_model` fields, use them to determine the ' + 'TTA transforms and how to merge the TTA results. Otherwise, use flip ' + 'TTA by averaging classification score.') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + # When using PyTorch version >= 2.0.0, the `torch.distributed.launch` + # will pass the `--local-rank` parameter to `tools/train.py` instead + # of `--local_rank`. + parser.add_argument('--local_rank', '--local-rank', type=int, default=0) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + return args + + +def merge_args(cfg, args): + """Merge CLI arguments to config.""" + cfg.launcher = args.launcher + + # work_dir is determined in this priority: CLI > segment in file > filename + if args.work_dir is not None: + # update configs according to CLI args if args.work_dir is not None + cfg.work_dir = args.work_dir + elif cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + cfg.work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + + cfg.load_from = args.checkpoint + + # enable automatic-mixed-precision test + if args.amp: + cfg.test_cfg.fp16 = True + + # -------------------- visualization -------------------- + if args.show or (args.show_dir is not None): + assert 'visualization' in cfg.default_hooks, \ + 'VisualizationHook is not set in the `default_hooks` field of ' \ + 'config. Please set `visualization=dict(type="VisualizationHook")`' + + cfg.default_hooks.visualization.enable = True + cfg.default_hooks.visualization.show = args.show + cfg.default_hooks.visualization.wait_time = args.wait_time + cfg.default_hooks.visualization.out_dir = args.show_dir + cfg.default_hooks.visualization.interval = args.interval + + # -------------------- TTA related args -------------------- + if args.tta: + if 'tta_model' not in cfg: + cfg.tta_model = dict(type='mmpretrain.AverageClsScoreTTA') + if 'tta_pipeline' not in cfg: + test_pipeline = cfg.test_dataloader.dataset.pipeline + cfg.tta_pipeline = deepcopy(test_pipeline) + flip_tta = dict( + type='TestTimeAug', + transforms=[ + [ + dict(type='RandomFlip', prob=1.), + dict(type='RandomFlip', prob=0.) + ], + [test_pipeline[-1]], + ]) + cfg.tta_pipeline[-1] = flip_tta + cfg.model = ConfigDict(**cfg.tta_model, module=cfg.model) + cfg.test_dataloader.dataset.pipeline = cfg.tta_pipeline + + # ----------------- Default dataloader args ----------------- + default_dataloader_cfg = ConfigDict( + pin_memory=True, + collate_fn=dict(type='default_collate'), + ) + + def set_default_dataloader_cfg(cfg, field): + if cfg.get(field, None) is None: + return + dataloader_cfg = deepcopy(default_dataloader_cfg) + dataloader_cfg.update(cfg[field]) + cfg[field] = dataloader_cfg + if args.no_pin_memory: + cfg[field]['pin_memory'] = False + + set_default_dataloader_cfg(cfg, 'test_dataloader') + + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + return cfg + + +def main(): + args = parse_args() + + if args.out is None and args.out_item is not None: + raise ValueError('Please use `--out` argument to specify the ' + 'path of the output file before using `--out-item`.') + + # load config + cfg = Config.fromfile(args.config) + + # merge cli arguments to config + cfg = merge_args(cfg, args) + + # build the runner from config + if 'runner_type' not in cfg: + # build the default runner + runner = Runner.from_cfg(cfg) + else: + # build customized runner from the registry + # if 'runner_type' is set in the cfg + runner = RUNNERS.build(cfg) + + if args.out and args.out_item in ['pred', None]: + runner.test_evaluator.metrics.append( + DumpResults(out_file_path=args.out)) + + # start testing + metrics = runner.test() + + if args.out and args.out_item == 'metrics': + mmengine.dump(metrics, args.out) + + +if __name__ == '__main__': + main() diff --git a/tools/torchserve/mmpretrain2torchserve.py b/tools/torchserve/mmpretrain2torchserve.py new file mode 100644 index 0000000..8d53bf3 --- /dev/null +++ b/tools/torchserve/mmpretrain2torchserve.py @@ -0,0 +1,112 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from argparse import ArgumentParser, Namespace +from pathlib import Path +from tempfile import TemporaryDirectory + +import mmengine + +try: + from model_archiver.model_packaging import package_model + from model_archiver.model_packaging_utils import ModelExportUtils +except ImportError: + raise ImportError( + 'Please run `pip install torchserve torch-model-archiver"` to ' + 'install required third-party libraries.') + + +def mmpretrain2torchserve( + config_file: str, + checkpoint_file: str, + output_folder: str, + model_name: str, + model_version: str = '1.0', + force: bool = False, +): + """Converts mmpretrain model (config + checkpoint) to TorchServe `.mar`. + + Args: + config_file: + In MMPretrain config format. + The contents vary for each task repository. + checkpoint_file: + In MMPretrain checkpoint format. + The contents vary for each task repository. + output_folder: + Folder where `{model_name}.mar` will be created. + The file created will be in TorchServe archive format. + model_name: + If not None, used for naming the `{model_name}.mar` file + that will be created under `output_folder`. + If None, `{Path(checkpoint_file).stem}` will be used. + model_version: + Model's version. + force: + If True, if there is an existing `{model_name}.mar` + file under `output_folder` it will be overwritten. + """ + mmengine.mkdir_or_exist(output_folder) + + config = mmengine.Config.fromfile(config_file) + + with TemporaryDirectory() as tmpdir: + config.dump(f'{tmpdir}/config.py') + + args = Namespace( + **{ + 'model_file': f'{tmpdir}/config.py', + 'serialized_file': checkpoint_file, + 'handler': f'{Path(__file__).parent}/mmpretrain_handler.py', + 'model_name': model_name or Path(checkpoint_file).stem, + 'version': model_version, + 'export_path': output_folder, + 'force': force, + 'requirements_file': None, + 'extra_files': None, + 'runtime': 'python', + 'archive_format': 'default' + }) + manifest = ModelExportUtils.generate_manifest_json(args) + package_model(args, manifest) + + +def parse_args(): + parser = ArgumentParser( + description='Convert mmpretrain models to TorchServe `.mar` format.') + parser.add_argument('config', type=str, help='config file path') + parser.add_argument('checkpoint', type=str, help='checkpoint file path') + parser.add_argument( + '--output-folder', + type=str, + required=True, + help='Folder where `{model_name}.mar` will be created.') + parser.add_argument( + '--model-name', + type=str, + default=None, + help='If not None, used for naming the `{model_name}.mar`' + 'file that will be created under `output_folder`.' + 'If None, `{Path(checkpoint_file).stem}` will be used.') + parser.add_argument( + '--model-version', + type=str, + default='1.0', + help='Number used for versioning.') + parser.add_argument( + '-f', + '--force', + action='store_true', + help='overwrite the existing `{model_name}.mar`') + args = parser.parse_args() + + return args + + +if __name__ == '__main__': + args = parse_args() + + if package_model is None: + raise ImportError('`torch-model-archiver` is required.' + 'Try: pip install torch-model-archiver') + + mmpretrain2torchserve(args.config, args.checkpoint, args.output_folder, + args.model_name, args.model_version, args.force) diff --git a/tools/torchserve/mmpretrain_handler.py b/tools/torchserve/mmpretrain_handler.py new file mode 100644 index 0000000..c924e08 --- /dev/null +++ b/tools/torchserve/mmpretrain_handler.py @@ -0,0 +1,68 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import base64 +import os + +import mmcv +import numpy as np +import torch +from ts.torch_handler.base_handler import BaseHandler + +import mmpretrain.models +from mmpretrain.apis import (ImageClassificationInferencer, + ImageRetrievalInferencer, get_model) + + +class MMPreHandler(BaseHandler): + + def initialize(self, context): + properties = context.system_properties + self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu' + self.device = torch.device(self.map_location + ':' + + str(properties.get('gpu_id')) if torch.cuda. + is_available() else self.map_location) + self.manifest = context.manifest + + model_dir = properties.get('model_dir') + serialized_file = self.manifest['model']['serializedFile'] + checkpoint = os.path.join(model_dir, serialized_file) + self.config_file = os.path.join(model_dir, 'config.py') + + model = get_model(self.config_file, checkpoint, self.device) + if isinstance(model, mmpretrain.models.ImageClassifier): + self.inferencer = ImageClassificationInferencer(model) + elif isinstance(model, mmpretrain.models.ImageToImageRetriever): + self.inferencer = ImageRetrievalInferencer(model) + else: + raise NotImplementedError( + f'No available inferencer for {type(model)}') + self.initialized = True + + def preprocess(self, data): + images = [] + + for row in data: + image = row.get('data') or row.get('body') + if isinstance(image, str): + image = base64.b64decode(image) + image = mmcv.imfrombytes(image) + images.append(image) + + return images + + def inference(self, data, *args, **kwargs): + results = [] + for image in data: + results.append(self.inferencer(image)[0]) + return results + + def postprocess(self, data): + processed_data = [] + for result in data: + processed_result = {} + for k, v in result.items(): + if isinstance(v, (torch.Tensor, np.ndarray)): + processed_result[k] = v.tolist() + else: + processed_result[k] = v + processed_data.append(processed_result) + return processed_data diff --git a/tools/torchserve/test_torchserver.py b/tools/torchserve/test_torchserver.py new file mode 100644 index 0000000..00bfa29 --- /dev/null +++ b/tools/torchserve/test_torchserver.py @@ -0,0 +1,43 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from argparse import ArgumentParser + +import numpy as np +import requests + +from mmpretrain.apis import get_model, inference_model + + +def parse_args(): + parser = ArgumentParser() + parser.add_argument('img', help='Image file') + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument('model_name', help='The model name in the server') + parser.add_argument( + '--inference-addr', + default='127.0.0.1:8080', + help='Address and port of the inference server') + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + args = parser.parse_args() + return args + + +def main(args): + # Inference single image by native apis. + model = get_model(args.config, args.checkpoint, device=args.device) + model_result = inference_model(model, args.img) + + # Inference single image by torchserve engine. + url = 'http://' + args.inference_addr + '/predictions/' + args.model_name + with open(args.img, 'rb') as image: + response = requests.post(url, image) + server_result = response.json() + + assert np.allclose(model_result['pred_score'], server_result['pred_score']) + print('Test complete, the results of PyTorch and TorchServe are the same.') + + +if __name__ == '__main__': + args = parse_args() + main(args) diff --git a/tools/train.py b/tools/train.py new file mode 100644 index 0000000..89c8548 --- /dev/null +++ b/tools/train.py @@ -0,0 +1,162 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp +from copy import deepcopy + +from mmengine.config import Config, ConfigDict, DictAction +from mmengine.registry import RUNNERS +from mmengine.runner import Runner +from mmengine.utils import digit_version +from mmengine.utils.dl_utils import TORCH_VERSION + + +def parse_args(): + parser = argparse.ArgumentParser(description='Train a model') + parser.add_argument('config', help='train config file path') + parser.add_argument('--work-dir', help='the dir to save logs and models') + parser.add_argument( + '--resume', + nargs='?', + type=str, + const='auto', + help='If specify checkpoint path, resume from it, while if not ' + 'specify, try to auto resume from the latest checkpoint ' + 'in the work directory.') + parser.add_argument( + '--amp', + action='store_true', + help='enable automatic-mixed-precision training') + parser.add_argument( + '--no-validate', + action='store_true', + help='whether not to evaluate the checkpoint during training') + parser.add_argument( + '--auto-scale-lr', + action='store_true', + help='whether to auto scale the learning rate according to the ' + 'actual batch size and the original batch size.') + parser.add_argument( + '--no-pin-memory', + action='store_true', + help='whether to disable the pin_memory option in dataloaders.') + parser.add_argument( + '--no-persistent-workers', + action='store_true', + help='whether to disable the persistent_workers option in dataloaders.' + ) + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument( + '--launcher', + choices=['none', 'pytorch', 'slurm', 'mpi'], + default='none', + help='job launcher') + # When using PyTorch version >= 2.0.0, the `torch.distributed.launch` + # will pass the `--local-rank` parameter to `tools/train.py` instead + # of `--local_rank`. + parser.add_argument('--local_rank', '--local-rank', type=int, default=0) + args = parser.parse_args() + if 'LOCAL_RANK' not in os.environ: + os.environ['LOCAL_RANK'] = str(args.local_rank) + + return args + + +def merge_args(cfg, args): + """Merge CLI arguments to config.""" + if args.no_validate: + cfg.val_cfg = None + cfg.val_dataloader = None + cfg.val_evaluator = None + + cfg.launcher = args.launcher + + # work_dir is determined in this priority: CLI > segment in file > filename + if args.work_dir is not None: + # update configs according to CLI args if args.work_dir is not None + cfg.work_dir = args.work_dir + elif cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + cfg.work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + + # enable automatic-mixed-precision training + if args.amp is True: + cfg.optim_wrapper.type = 'AmpOptimWrapper' + cfg.optim_wrapper.setdefault('loss_scale', 'dynamic') + + # resume training + if args.resume == 'auto': + cfg.resume = True + cfg.load_from = None + elif args.resume is not None: + cfg.resume = True + cfg.load_from = args.resume + + # enable auto scale learning rate + if args.auto_scale_lr: + cfg.auto_scale_lr.enable = True + + # set dataloader args + default_dataloader_cfg = ConfigDict( + pin_memory=True, + persistent_workers=True, + collate_fn=dict(type='default_collate'), + ) + if digit_version(TORCH_VERSION) < digit_version('1.8.0'): + default_dataloader_cfg.persistent_workers = False + + def set_default_dataloader_cfg(cfg, field): + if cfg.get(field, None) is None: + return + dataloader_cfg = deepcopy(default_dataloader_cfg) + dataloader_cfg.update(cfg[field]) + cfg[field] = dataloader_cfg + if args.no_pin_memory: + cfg[field]['pin_memory'] = False + if args.no_persistent_workers: + cfg[field]['persistent_workers'] = False + + set_default_dataloader_cfg(cfg, 'train_dataloader') + set_default_dataloader_cfg(cfg, 'val_dataloader') + set_default_dataloader_cfg(cfg, 'test_dataloader') + + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + return cfg + + +def main(): + args = parse_args() + + # load config + cfg = Config.fromfile(args.config) + + # merge cli arguments to config + cfg = merge_args(cfg, args) + + # build the runner from config + if 'runner_type' not in cfg: + # build the default runner + runner = Runner.from_cfg(cfg) + else: + # build customized runner from the registry + # if 'runner_type' is set in the cfg + runner = RUNNERS.build(cfg) + + # start training + runner.train() + + +if __name__ == '__main__': + main() diff --git a/tools/visualization/browse_dataset.py b/tools/visualization/browse_dataset.py new file mode 100644 index 0000000..18db4dc --- /dev/null +++ b/tools/visualization/browse_dataset.py @@ -0,0 +1,253 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +import sys +import textwrap + +from matplotlib import transforms +from mmengine.config import Config, DictAction +from mmengine.dataset import Compose +from mmengine.registry import init_default_scope +from mmengine.utils import ProgressBar +from mmengine.visualization.utils import img_from_canvas + +from mmpretrain.datasets.builder import build_dataset +from mmpretrain.structures import DataSample +from mmpretrain.visualization import UniversalVisualizer, create_figure + +try: + from matplotlib._tight_bbox import adjust_bbox +except ImportError: + # To be compatible with matplotlib 3.5 + from matplotlib.tight_bbox import adjust_bbox + + +def parse_args(): + parser = argparse.ArgumentParser(description='Browse a dataset') + parser.add_argument('config', help='train config file path') + parser.add_argument( + '--output-dir', + '-o', + default=None, + type=str, + help='If there is no display interface, you can save it.') + parser.add_argument('--not-show', default=False, action='store_true') + parser.add_argument( + '--phase', + '-p', + default='train', + type=str, + choices=['train', 'test', 'val'], + help='phase of dataset to visualize, accept "train" "test" and "val".' + ' Defaults to "train".') + parser.add_argument( + '--show-number', + '-n', + type=int, + default=sys.maxsize, + help='number of images selected to visualize, must bigger than 0. if ' + 'the number is bigger than length of dataset, show all the images in ' + 'dataset; default "sys.maxsize", show all images in dataset') + parser.add_argument( + '--show-interval', + '-i', + type=float, + default=2, + help='the interval of show (s)') + parser.add_argument( + '--mode', + '-m', + default='transformed', + type=str, + choices=['original', 'transformed', 'concat', 'pipeline'], + help='display mode; display original pictures or transformed pictures' + ' or comparison pictures. "original" means show images load from disk' + '; "transformed" means to show images after transformed; "concat" ' + 'means show images stitched by "original" and "output" images. ' + '"pipeline" means show all the intermediate images. ' + 'Defaults to "transformed".') + parser.add_argument( + '--rescale-factor', + '-r', + type=float, + help='(For `mode=original`) Image rescale factor, which is useful if' + 'the output is too large or too small.') + parser.add_argument( + '--channel-order', + '-c', + default='BGR', + choices=['BGR', 'RGB'], + help='The channel order of the showing images, could be "BGR" ' + 'or "RGB", Defaults to "BGR".') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + return args + + +def make_grid(imgs, names): + """Concat list of pictures into a single big picture, align height here.""" + # A large canvas to ensure all text clear. + figure = create_figure(dpi=150, figsize=(16, 9)) + + # deal with imgs + max_nrows = 1 + img_shapes = [] + for img in imgs: + if isinstance(img, list): + max_nrows = max(len(img), max_nrows) + img_shapes.append([i.shape[:2] for i in img]) + else: + img_shapes.append(img.shape[:2]) + gs = figure.add_gridspec(max_nrows, len(imgs)) + + for i, img in enumerate(imgs): + if isinstance(img, list): + for j in range(len(img)): + subplot = figure.add_subplot(gs[j, i]) + subplot.axis(False) + subplot.imshow(img[j]) + name = '\n'.join(textwrap.wrap(names[i] + str(j), width=20)) + subplot.set_title( + f'{name}\n{img_shapes[i][j]}', + fontsize=15, + family='monospace') + else: + subplot = figure.add_subplot(gs[:, i]) + subplot.axis(False) + subplot.imshow(img) + name = '\n'.join(textwrap.wrap(names[i], width=20)) + subplot.set_title( + f'{name}\n{img_shapes[i]}', fontsize=15, family='monospace') + + # Manage the gap of subplots + figure.tight_layout() + + # Remove the white boundary (reserve 0.5 inches at the top to show label) + points = figure.get_tightbbox( + figure.canvas.get_renderer()).get_points() + [[0, 0], [0, 0.5]] + adjust_bbox(figure, transforms.Bbox(points)) + + return img_from_canvas(figure.canvas) + + +class InspectCompose(Compose): + """Compose multiple transforms sequentially. + + And record "img" field of all results in one list. + """ + + def __init__(self, transforms, intermediate_imgs, visualizer): + super().__init__(transforms=transforms) + self.intermediate_imgs = intermediate_imgs + self.visualizer = visualizer + + def __call__(self, data): + if 'img' in data: + self.intermediate_imgs.append({ + 'name': 'Original', + 'img': data['img'].copy() + }) + + for t in self.transforms: + data = t(data) + if data is None: + return None + if 'img' in data: + img = data['img'].copy() + if 'mask' in data: + tmp_img = img[0] if isinstance(img, list) else img + tmp_img = self.visualizer.add_mask_to_image( + tmp_img, + DataSample().set_mask(data['mask']), + resize=tmp_img.shape[:2]) + img = [tmp_img] + img[1:] if isinstance(img, + list) else tmp_img + self.intermediate_imgs.append({ + 'name': t.__class__.__name__, + 'img': img + }) + return data + + +def main(): + args = parse_args() + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + init_default_scope('mmpretrain') # Use mmpretrain as default scope. + + dataset_cfg = cfg.get(args.phase + '_dataloader').get('dataset') + dataset = build_dataset(dataset_cfg) + + # init visualizer + cfg.visualizer.pop('type') + fig_cfg = dict(figsize=(16, 10)) + visualizer = UniversalVisualizer( + **cfg.visualizer, fig_show_cfg=fig_cfg, fig_save_cfg=fig_cfg) + visualizer.dataset_meta = dataset.metainfo + + # init inspection + intermediate_imgs = [] + dataset.pipeline = InspectCompose(dataset.pipeline.transforms, + intermediate_imgs, visualizer) + + # init visualization image number + display_number = min(args.show_number, len(dataset)) + progress_bar = ProgressBar(display_number) + + for i, item in zip(range(display_number), dataset): + + rescale_factor = None + if args.mode == 'original': + image = intermediate_imgs[0]['img'] + # Only original mode need rescale factor, `make_grid` will use + # matplotlib to manage the size of subplots. + rescale_factor = args.rescale_factor + elif args.mode == 'transformed': + image = make_grid([intermediate_imgs[-1]['img']], ['transformed']) + elif args.mode == 'concat': + ori_image = intermediate_imgs[0]['img'] + trans_image = intermediate_imgs[-1]['img'] + image = make_grid([ori_image, trans_image], + ['original', 'transformed']) + else: + image = make_grid([result['img'] for result in intermediate_imgs], + [result['name'] for result in intermediate_imgs]) + + intermediate_imgs.clear() + + data_sample = item['data_samples'].numpy() + + # get filename from dataset or just use index as filename + if hasattr(item['data_samples'], 'img_path'): + filename = osp.basename(item['data_samples'].img_path) + else: + # some dataset have not image path + filename = f'{i}.jpg' + + out_file = osp.join(args.output_dir, + filename) if args.output_dir is not None else None + + visualizer.visualize_cls( + image if args.channel_order == 'RGB' else image[..., ::-1], + data_sample, + rescale_factor=rescale_factor, + show=not args.not_show, + wait_time=args.show_interval, + name=filename, + out_file=out_file) + progress_bar.update() + + +if __name__ == '__main__': + main() diff --git a/tools/visualization/vis_cam.py b/tools/visualization/vis_cam.py new file mode 100644 index 0000000..2122390 --- /dev/null +++ b/tools/visualization/vis_cam.py @@ -0,0 +1,274 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import copy +import math +import pkg_resources +from functools import partial +from pathlib import Path + +import mmcv +import numpy as np +import torch.nn as nn +from mmcv.transforms import Compose +from mmengine.config import Config, DictAction +from mmengine.dataset import default_collate +from mmengine.utils import to_2tuple +from mmengine.utils.dl_utils import is_norm + +from mmpretrain import digit_version +from mmpretrain.apis import get_model +from mmpretrain.registry import TRANSFORMS + +try: + import pytorch_grad_cam as cam + from pytorch_grad_cam.activations_and_gradients import \ + ActivationsAndGradients + from pytorch_grad_cam.utils.image import show_cam_on_image +except ImportError: + raise ImportError('Please run `pip install "grad-cam>=1.3.6"` to install ' + '3rd party package pytorch_grad_cam.') + +# Alias name +METHOD_MAP = { + 'gradcam++': cam.GradCAMPlusPlus, +} +METHOD_MAP.update({ + cam_class.__name__.lower(): cam_class + for cam_class in cam.base_cam.BaseCAM.__subclasses__() +}) + + +def parse_args(): + parser = argparse.ArgumentParser(description='Visualize CAM') + parser.add_argument('img', help='Image file') + parser.add_argument('config', help='Config file') + parser.add_argument('checkpoint', help='Checkpoint file') + parser.add_argument( + '--target-layers', + default=[], + nargs='+', + type=str, + help='The target layers to get CAM, if not set, the tool will ' + 'specify the norm layer in the last block. Backbones ' + 'implemented by users are recommended to manually specify' + ' target layers in commmad statement.') + parser.add_argument( + '--preview-model', + default=False, + action='store_true', + help='To preview all the model layers') + parser.add_argument( + '--method', + default='GradCAM', + help='Type of method to use, supports ' + f'{", ".join(list(METHOD_MAP.keys()))}.') + parser.add_argument( + '--target-category', + default=[], + nargs='+', + type=int, + help='The target category to get CAM, default to use result ' + 'get from given model.') + parser.add_argument( + '--eigen-smooth', + default=False, + action='store_true', + help='Reduce noise by taking the first principle componenet of ' + '``cam_weights*activations``') + parser.add_argument( + '--aug-smooth', + default=False, + action='store_true', + help='Wether to use test time augmentation, default not to use') + parser.add_argument( + '--save-path', + type=Path, + help='The path to save visualize cam image, default not to save.') + parser.add_argument('--device', default='cpu', help='Device to use cpu') + parser.add_argument( + '--vit-like', + action='store_true', + help='Whether the network is a ViT-like network.') + parser.add_argument( + '--num-extra-tokens', + type=int, + help='The number of extra tokens in ViT-like backbones. Defaults to' + ' use num_extra_tokens of the backbone.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + if args.method.lower() not in METHOD_MAP.keys(): + raise ValueError(f'invalid CAM type {args.method},' + f' supports {", ".join(list(METHOD_MAP.keys()))}.') + + return args + + +def reshape_transform(tensor, model, args): + """Build reshape_transform for `cam.activations_and_grads`, which is + necessary for ViT-like networks.""" + # ViT_based_Transformers have an additional clstoken in features + if tensor.ndim == 4: + # For (B, C, H, W) + return tensor + elif tensor.ndim == 3: + if not args.vit_like: + raise ValueError(f"The tensor shape is {tensor.shape}, if it's a " + 'vit-like backbone, please specify `--vit-like`.') + # For (B, L, C) + num_extra_tokens = args.num_extra_tokens or getattr( + model.backbone, 'num_extra_tokens', 1) + + tensor = tensor[:, num_extra_tokens:, :] + # get heat_map_height and heat_map_width, preset input is a square + heat_map_area = tensor.size()[1] + height, width = to_2tuple(int(math.sqrt(heat_map_area))) + assert height * height == heat_map_area, \ + (f"The input feature's length ({heat_map_area+num_extra_tokens}) " + f'minus num-extra-tokens ({num_extra_tokens}) is {heat_map_area},' + ' which is not a perfect square number. Please check if you used ' + 'a wrong num-extra-tokens.') + # (B, L, C) -> (B, H, W, C) + result = tensor.reshape(tensor.size(0), height, width, tensor.size(2)) + # (B, H, W, C) -> (B, C, H, W) + result = result.permute(0, 3, 1, 2) + return result + else: + raise ValueError(f'Unsupported tensor shape {tensor.shape}.') + + +def init_cam(method, model, target_layers, use_cuda, reshape_transform): + """Construct the CAM object once, In order to be compatible with + mmpretrain, here we modify the ActivationsAndGradients object.""" + GradCAM_Class = METHOD_MAP[method.lower()] + cam = GradCAM_Class( + model=model, target_layers=target_layers, use_cuda=use_cuda) + # Release the original hooks in ActivationsAndGradients to use + # ActivationsAndGradients. + cam.activations_and_grads.release() + cam.activations_and_grads = ActivationsAndGradients( + cam.model, cam.target_layers, reshape_transform) + + return cam + + +def get_layer(layer_str, model): + """get model layer from given str.""" + for name, layer in model.named_modules(): + if name == layer_str: + return layer + raise AttributeError( + f'Cannot get the layer "{layer_str}". Please choose from: \n' + + '\n'.join(name for name, _ in model.named_modules())) + + +def show_cam_grad(grayscale_cam, src_img, title, out_path=None): + """fuse src_img and grayscale_cam and show or save.""" + grayscale_cam = grayscale_cam[0, :] + src_img = np.float32(src_img) / 255 + visualization_img = show_cam_on_image( + src_img, grayscale_cam, use_rgb=False) + + if out_path: + mmcv.imwrite(visualization_img, str(out_path)) + else: + mmcv.imshow(visualization_img, win_name=title) + + +def get_default_target_layers(model, args): + """get default target layers from given model, here choose nrom type layer + as default target layer.""" + norm_layers = [ + (name, layer) + for name, layer in model.backbone.named_modules(prefix='backbone') + if is_norm(layer) + ] + if args.vit_like: + # For ViT models, the final classification is done on the class token. + # And the patch tokens and class tokens won't interact each other after + # the final attention layer. Therefore, we need to choose the norm + # layer before the last attention layer. + num_extra_tokens = args.num_extra_tokens or getattr( + model.backbone, 'num_extra_tokens', 1) + + # models like swin have no attr 'out_type', set out_type to avg_featmap + out_type = getattr(model.backbone, 'out_type', 'avg_featmap') + if out_type == 'cls_token' or num_extra_tokens > 0: + # Assume the backbone feature is class token. + name, layer = norm_layers[-3] + print('Automatically choose the last norm layer before the ' + f'final attention block "{name}" as the target layer.') + return [layer] + + # For CNN models, use the last norm layer as the target-layer + name, layer = norm_layers[-1] + print('Automatically choose the last norm layer ' + f'"{name}" as the target layer.') + return [layer] + + +def main(): + args = parse_args() + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + # build the model from a config file and a checkpoint file + model: nn.Module = get_model(cfg, args.checkpoint, device=args.device) + if args.preview_model: + print(model) + print('\n Please remove `--preview-model` to get the CAM.') + return + + # apply transform and perpare data + transforms = Compose( + [TRANSFORMS.build(t) for t in cfg.test_dataloader.dataset.pipeline]) + data = transforms({'img_path': args.img}) + src_img = copy.deepcopy(data['inputs']).numpy().transpose(1, 2, 0) + data = model.data_preprocessor(default_collate([data]), False) + + # build target layers + if args.target_layers: + target_layers = [ + get_layer(layer, model) for layer in args.target_layers + ] + else: + target_layers = get_default_target_layers(model, args) + + # init a cam grad calculator + use_cuda = ('cuda' in args.device) + cam = init_cam(args.method, model, target_layers, use_cuda, + partial(reshape_transform, model=model, args=args)) + + # warp the target_category with ClassifierOutputTarget in grad_cam>=1.3.7, + # to fix the bug in #654. + targets = None + if args.target_category: + grad_cam_v = pkg_resources.get_distribution('grad_cam').version + if digit_version(grad_cam_v) >= digit_version('1.3.7'): + from pytorch_grad_cam.utils.model_targets import \ + ClassifierOutputTarget + targets = [ClassifierOutputTarget(c) for c in args.target_category] + else: + targets = args.target_category + + # calculate cam grads and show|save the visualization image + grayscale_cam = cam( + data['inputs'], + targets, + eigen_smooth=args.eigen_smooth, + aug_smooth=args.aug_smooth) + show_cam_grad( + grayscale_cam, src_img, title=args.method, out_path=args.save_path) + + +if __name__ == '__main__': + main() diff --git a/tools/visualization/vis_scheduler.py b/tools/visualization/vis_scheduler.py new file mode 100644 index 0000000..21207ae --- /dev/null +++ b/tools/visualization/vis_scheduler.py @@ -0,0 +1,280 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import json +import os.path as osp +import re +from pathlib import Path +from unittest.mock import MagicMock + +import matplotlib.pyplot as plt +import rich +import torch.nn as nn +from mmengine.config import Config, DictAction +from mmengine.hooks import Hook +from mmengine.model import BaseModel +from mmengine.runner import Runner +from mmengine.visualization import Visualizer +from rich.progress import BarColumn, MofNCompleteColumn, Progress, TextColumn + + +class SimpleModel(BaseModel): + """simple model that do nothing in train_step.""" + + def __init__(self): + super(SimpleModel, self).__init__() + self.data_preprocessor = nn.Identity() + self.conv = nn.Conv2d(1, 1, 1) + + def forward(self, inputs, data_samples, mode='tensor'): + pass + + def train_step(self, data, optim_wrapper): + pass + + +class ParamRecordHook(Hook): + + def __init__(self, by_epoch): + super().__init__() + self.by_epoch = by_epoch + self.lr_list = [] + self.momentum_list = [] + self.wd_list = [] + self.task_id = 0 + self.progress = Progress(BarColumn(), MofNCompleteColumn(), + TextColumn('{task.description}')) + + def before_train(self, runner): + if self.by_epoch: + total = runner.train_loop.max_epochs + self.task_id = self.progress.add_task( + 'epochs', start=True, total=total) + else: + total = runner.train_loop.max_iters + self.task_id = self.progress.add_task( + 'iters', start=True, total=total) + self.progress.start() + + def after_train_epoch(self, runner): + if self.by_epoch: + self.progress.update(self.task_id, advance=1) + + def after_train_iter(self, runner, batch_idx, data_batch, outputs): + if not self.by_epoch: + self.progress.update(self.task_id, advance=1) + self.lr_list.append(runner.optim_wrapper.get_lr()['lr'][0]) + self.momentum_list.append( + runner.optim_wrapper.get_momentum()['momentum'][0]) + self.wd_list.append( + runner.optim_wrapper.param_groups[0]['weight_decay']) + + def after_train(self, runner): + self.progress.stop() + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Visualize a Dataset Pipeline') + parser.add_argument('config', help='config file path') + parser.add_argument( + '-p', + '--parameter', + type=str, + default='lr', + choices=['lr', 'momentum', 'wd'], + help='The parameter to visualize its change curve, choose from' + '"lr", "wd" and "momentum". Defaults to "lr".') + parser.add_argument( + '-d', + '--dataset-size', + type=int, + help='The size of the dataset. If specify, `build_dataset` will ' + 'be skipped and use this size as the dataset size.') + parser.add_argument( + '-n', + '--ngpus', + type=int, + default=1, + help='The number of GPUs used in training.') + parser.add_argument( + '-s', + '--save-path', + type=Path, + help='The learning rate curve plot save path') + parser.add_argument( + '--log-level', + default='WARNING', + help='The log level of the handler and logger. Defaults to ' + 'WARNING.') + parser.add_argument('--title', type=str, help='title of figure') + parser.add_argument( + '--style', + type=str, + default='whitegrid', + help='style of the figure, need `seaborn` package.') + parser.add_argument('--not-show', default=False, action='store_true') + parser.add_argument( + '--window-size', + default='12*7', + help='Size of the window to display images, in format of "$W*$H".') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + if args.window_size != '': + assert re.match(r'\d+\*\d+', args.window_size), \ + "'window-size' must be in format 'W*H'." + + return args + + +def plot_curve(lr_list, args, param_name, iters_per_epoch, by_epoch=True): + """Plot learning rate vs iter graph.""" + try: + import seaborn as sns + sns.set_style(args.style) + except ImportError: + pass + + wind_w, wind_h = args.window_size.split('*') + wind_w, wind_h = int(wind_w), int(wind_h) + plt.figure(figsize=(wind_w, wind_h)) + + ax: plt.Axes = plt.subplot() + ax.plot(lr_list, linewidth=1) + + if by_epoch: + ax.xaxis.tick_top() + ax.set_xlabel('Iters') + ax.xaxis.set_label_position('top') + sec_ax = ax.secondary_xaxis( + 'bottom', + functions=(lambda x: x / iters_per_epoch, + lambda y: y * iters_per_epoch)) + sec_ax.set_xlabel('Epochs') + else: + plt.xlabel('Iters') + plt.ylabel(param_name) + + if args.title is None: + plt.title(f'{osp.basename(args.config)} {param_name} curve') + else: + plt.title(args.title) + + +def simulate_train(data_loader, cfg, by_epoch): + model = SimpleModel() + param_record_hook = ParamRecordHook(by_epoch=by_epoch) + default_hooks = dict( + param_scheduler=cfg.default_hooks['param_scheduler'], + runtime_info=None, + timer=None, + logger=None, + checkpoint=None, + sampler_seed=None, + param_record=param_record_hook) + + runner = Runner( + model=model, + work_dir=cfg.work_dir, + train_dataloader=data_loader, + train_cfg=cfg.train_cfg, + log_level=cfg.log_level, + optim_wrapper=cfg.optim_wrapper, + param_scheduler=cfg.param_scheduler, + default_scope=cfg.default_scope, + default_hooks=default_hooks, + visualizer=MagicMock(spec=Visualizer), + custom_hooks=cfg.get('custom_hooks', None)) + + runner.train() + + param_dict = dict( + lr=param_record_hook.lr_list, + momentum=param_record_hook.momentum_list, + wd=param_record_hook.wd_list) + + return param_dict + + +def main(): + args = parse_args() + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + if cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + cfg.work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + + cfg.log_level = args.log_level + + # make sure save_root exists + if args.save_path and not args.save_path.parent.exists(): + raise FileNotFoundError( + f'The save path is {args.save_path}, and directory ' + f"'{args.save_path.parent}' do not exist.") + + # init logger + print('Param_scheduler :') + rich.print_json(json.dumps(cfg.param_scheduler)) + + # prepare data loader + batch_size = cfg.train_dataloader.batch_size * args.ngpus + + if 'by_epoch' in cfg.train_cfg: + by_epoch = cfg.train_cfg.get('by_epoch') + elif 'type' in cfg.train_cfg: + by_epoch = cfg.train_cfg.get('type') == 'EpochBasedTrainLoop' + else: + raise ValueError('please set `train_cfg`.') + + if args.dataset_size is None and by_epoch: + from mmpretrain.datasets import build_dataset + dataset_size = len(build_dataset(cfg.train_dataloader.dataset)) + else: + dataset_size = args.dataset_size or batch_size + + class FakeDataloader(list): + dataset = MagicMock(metainfo=None) + + data_loader = FakeDataloader(range(dataset_size // batch_size)) + dataset_info = ( + f'\nDataset infos:' + f'\n - Dataset size: {dataset_size}' + f'\n - Batch size per GPU: {cfg.train_dataloader.batch_size}' + f'\n - Number of GPUs: {args.ngpus}' + f'\n - Total batch size: {batch_size}') + if by_epoch: + dataset_info += f'\n - Iterations per epoch: {len(data_loader)}' + rich.print(dataset_info + '\n') + + # simulation training process + param_dict = simulate_train(data_loader, cfg, by_epoch) + param_list = param_dict[args.parameter] + + if args.parameter == 'lr': + param_name = 'Learning Rate' + elif args.parameter == 'momentum': + param_name = 'Momentum' + else: + param_name = 'Weight Decay' + plot_curve(param_list, args, param_name, len(data_loader), by_epoch) + + if args.save_path: + plt.savefig(args.save_path) + print(f'\nThe {param_name} graph is saved at {args.save_path}') + + if not args.not_show: + plt.show() + + +if __name__ == '__main__': + main() diff --git a/tools/visualization/vis_tsne.py b/tools/visualization/vis_tsne.py new file mode 100644 index 0000000..2158f30 --- /dev/null +++ b/tools/visualization/vis_tsne.py @@ -0,0 +1,267 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os.path as osp +import time +from collections import defaultdict + +import matplotlib.pyplot as plt +import numpy as np +import rich.progress as progress +import torch +import torch.nn.functional as F +from mmengine.config import Config, DictAction +from mmengine.device import get_device +from mmengine.logging import MMLogger +from mmengine.runner import Runner +from mmengine.utils import mkdir_or_exist + +from mmpretrain.apis import get_model +from mmpretrain.registry import DATASETS + +try: + from sklearn.manifold import TSNE +except ImportError as e: + raise ImportError('Please install `sklearn` to calculate ' + 'TSNE by `pip install scikit-learn`') from e + + +def parse_args(): + parser = argparse.ArgumentParser(description='t-SNE visualization') + parser.add_argument('config', help='tsne config file path') + parser.add_argument('--checkpoint', default=None, help='checkpoint file') + parser.add_argument('--work-dir', help='the dir to save logs and models') + parser.add_argument( + '--test-cfg', + help='tsne config file path to load config of test dataloader.') + parser.add_argument( + '--vis-stage', + choices=['backbone', 'neck', 'pre_logits'], + help='The visualization stage of the model') + parser.add_argument( + '--class-idx', + nargs='+', + type=int, + help='The categories used to calculate t-SNE.') + parser.add_argument( + '--max-num-class', + type=int, + default=20, + help='The first N categories to apply t-SNE algorithms. ' + 'Defaults to 20.') + parser.add_argument( + '--max-num-samples', + type=int, + default=100, + help='The maximum number of samples per category. ' + 'Higher number need longer time to calculate. Defaults to 100.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + parser.add_argument('--device', help='Device used for inference') + parser.add_argument( + '--legend', + action='store_true', + help='Show the legend of all categories.') + parser.add_argument( + '--show', + action='store_true', + help='Display the result in a graphical window.') + + # t-SNE settings + parser.add_argument( + '--n-components', type=int, default=2, help='the dimension of results') + parser.add_argument( + '--perplexity', + type=float, + default=30.0, + help='The perplexity is related to the number of nearest neighbors' + 'that is used in other manifold learning algorithms.') + parser.add_argument( + '--early-exaggeration', + type=float, + default=12.0, + help='Controls how tight natural clusters in the original space are in' + 'the embedded space and how much space will be between them.') + parser.add_argument( + '--learning-rate', + type=float, + default=200.0, + help='The learning rate for t-SNE is usually in the range' + '[10.0, 1000.0]. If the learning rate is too high, the data may look' + 'like a ball with any point approximately equidistant from its nearest' + 'neighbours. If the learning rate is too low, most points may look' + 'compressed in a dense cloud with few outliers.') + parser.add_argument( + '--n-iter', + type=int, + default=1000, + help='Maximum number of iterations for the optimization. Should be at' + 'least 250.') + parser.add_argument( + '--n-iter-without-progress', + type=int, + default=300, + help='Maximum number of iterations without progress before we abort' + 'the optimization.') + parser.add_argument( + '--init', type=str, default='random', help='The init method') + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + # work_dir is determined in this priority: CLI > segment in file > filename + if args.work_dir is not None: + # update configs according to CLI args if args.work_dir is not None + cfg.work_dir = args.work_dir + elif cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + work_type = args.config.split('/')[1] + cfg.work_dir = osp.join('./work_dirs', work_type, + osp.splitext(osp.basename(args.config))[0]) + + # create work_dir + timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime()) + tsne_work_dir = osp.join(cfg.work_dir, f'tsne_{timestamp}/') + mkdir_or_exist(osp.abspath(tsne_work_dir)) + + # init the logger before other steps + log_file = osp.join(tsne_work_dir, 'tsne.log') + logger = MMLogger.get_instance( + 'mmpretrain', + logger_name='mmpretrain', + log_file=log_file, + log_level=cfg.log_level) + + # build the model from a config file and a checkpoint file + device = args.device or get_device() + model = get_model(cfg, args.checkpoint, device=device) + logger.info('Model loaded.') + + # build the dataset + if args.test_cfg is not None: + dataloader_cfg = Config.fromfile(args.test_cfg).get('test_dataloader') + elif 'test_dataloader' not in cfg: + raise ValueError('No `test_dataloader` in the config, you can ' + 'specify another config file that includes test ' + 'dataloader settings by the `--test-cfg` option.') + else: + dataloader_cfg = cfg.get('test_dataloader') + + dataset = DATASETS.build(dataloader_cfg.pop('dataset')) + classes = dataset.metainfo.get('classes') + + if args.class_idx is None: + num_classes = args.max_num_class if classes is None else len(classes) + args.class_idx = list(range(num_classes))[:args.max_num_class] + + if classes is not None: + classes = [classes[idx] for idx in args.class_idx] + else: + classes = args.class_idx + + # compress dataset, select that the label is less then max_num_class + subset_idx_list = [] + counter = defaultdict(int) + for i in range(len(dataset)): + gt_label = dataset.get_data_info(i)['gt_label'] + if (gt_label in args.class_idx + and counter[gt_label] < args.max_num_samples): + subset_idx_list.append(i) + counter[gt_label] += 1 + dataset.get_subset_(subset_idx_list) + logger.info(f'Apply t-SNE to visualize {len(subset_idx_list)} samples.') + + dataloader_cfg.dataset = dataset + dataloader_cfg.setdefault('collate_fn', dict(type='default_collate')) + dataloader = Runner.build_dataloader(dataloader_cfg) + + results = dict() + features = [] + labels = [] + for data in progress.track(dataloader, description='Calculating...'): + with torch.no_grad(): + # preprocess data + data = model.data_preprocessor(data) + batch_inputs, batch_data_samples = \ + data['inputs'], data['data_samples'] + batch_labels = torch.cat([i.gt_label for i in batch_data_samples]) + + # extract backbone features + extract_args = {} + if args.vis_stage: + extract_args['stage'] = args.vis_stage + batch_features = model.extract_feat(batch_inputs, **extract_args) + + # post process + if batch_features[0].ndim == 4: + # For (N, C, H, W) feature + batch_features = [ + F.adaptive_avg_pool2d(inputs, 1).squeeze() + for inputs in batch_features + ] + elif batch_features[0].ndim == 3: + # For (N, L, C) feature + batch_features = [inputs.mean(1) for inputs in batch_features] + + # save batch features + features.append(batch_features) + labels.extend(batch_labels.cpu().numpy()) + + for i in range(len(features[0])): + key = 'feat_' + str(model.backbone.out_indices[i]) + results[key] = np.concatenate( + [batch[i].cpu().numpy() for batch in features], axis=0) + + # save features + for key, val in results.items(): + output_file = f'{tsne_work_dir}{key}.npy' + np.save(output_file, val) + + # build t-SNE model + tsne_model = TSNE( + n_components=args.n_components, + perplexity=args.perplexity, + early_exaggeration=args.early_exaggeration, + learning_rate=args.learning_rate, + n_iter=args.n_iter, + n_iter_without_progress=args.n_iter_without_progress, + init=args.init) + + # run and get results + logger.info('Running t-SNE.') + for key, val in results.items(): + result = tsne_model.fit_transform(val) + res_min, res_max = result.min(0), result.max(0) + res_norm = (result - res_min) / (res_max - res_min) + _, ax = plt.subplots(figsize=(10, 10)) + scatter = ax.scatter( + res_norm[:, 0], + res_norm[:, 1], + alpha=1.0, + s=15, + c=labels, + cmap='tab20') + if args.legend: + legend = ax.legend(scatter.legend_elements()[0], classes) + ax.add_artist(legend) + plt.savefig(f'{tsne_work_dir}{key}.png') + if args.show: + plt.show() + logger.info(f'Save features and results to {tsne_work_dir}') + + +if __name__ == '__main__': + main() diff --git a/vgg16_8xb32_in1k.py b/vgg16_8xb32_in1k.py new file mode 100644 index 0000000..a085434 --- /dev/null +++ b/vgg16_8xb32_in1k.py @@ -0,0 +1,18 @@ +_base_ = [ + 'configs/_base_/models/vgg16.py', + 'configs/_base_/datasets/tiny_imagenet_bs32_pil_resize.py', + 'configs/_base_/schedules/imagenet_bs256.py', + 'configs/_base_/default_runtime.py', +] + +import torch +torch.backends.cuda.matmul.allow_tf32=True +torch.backends.cudnn.allow_tf32=True + +# schedule settings +optim_wrapper = dict( + #type='AmpOptimWrapper', + #dtype='float16', + optimizer=dict(lr=0.01) + ) + diff --git a/vit-base-p16_32xb128-mae_in200.py b/vit-base-p16_32xb128-mae_in200.py new file mode 100644 index 0000000..b1e903b --- /dev/null +++ b/vit-base-p16_32xb128-mae_in200.py @@ -0,0 +1,74 @@ +_base_ = [ + 'configs/_base_/datasets/tiny_imagenet_bs64_swin_224.py', + 'configs/_base_/schedules/imagenet_bs1024_adamw_swin.py', + 'configs/_base_/default_runtime.py' +] + +import os + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='base', + img_size=224, + patch_size=16, + drop_path_rate=0.1), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=200, + in_channels=768, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) + +# dataset settings +train_dataloader = dict(batch_size=128) + +# schedule settings +optim_wrapper = dict( + #type='AmpOptimWrapper', + #dtype='float16', + optimizer=dict( + type='AdamW', + lr=1e-4 * 4096 / 256, + weight_decay=0.3, + eps=1e-8, + betas=(0.9, 0.95)), + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0) + })) + +# runtime settings +custom_hooks = [dict(type='EMAHook', momentum=1e-4)] + +# 自定义hooks,添加ProfilerHook, 只在rank0启用 +#custom_hooks = [ +# dict(type='EMAHook', momentum=1e-4), +# dict(type='ProfilerHook', by_epoch=False, +# profile_times=12, +# on_trace_ready=dict(type="log_trace", sort_by="self_cuda_time_total"), +# json_trace_path=f"trace_vitb_bf16.json", +# activity_with_cuda=True, +# schedule=dict(wait=1, warmup=1, active=10, repeat=1)) # 这样的设置是10次 +#] if os.environ['LOCAL_RANK'] == '0' else [dict(type='EMAHook', momentum=1e-4)] + + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) diff --git a/vit-large-p16-64xb64-test.py b/vit-large-p16-64xb64-test.py new file mode 100644 index 0000000..344c819 --- /dev/null +++ b/vit-large-p16-64xb64-test.py @@ -0,0 +1,18 @@ +_base_ = [ + 'configs/_base_/models/tiny-vit-large-p16.py', + 'configs/_base_/datasets/tiny_imagenet_bs64_pil_resize_autoaug.py', + 'configs/_base_/schedules/imagenet_bs4096_AdamW.py', + 'configs/_base_/default_runtime.py' +] + +# model setting +model = dict( + head=dict(hidden_dim=3072), + train_cfg=dict(augments=dict(type='Mixup', alpha=0.2)), +) + +# schedule setting +optim_wrapper = dict( + type='AmpOptimWrapper', + dtype='bfloat16', + clip_grad=dict(max_norm=1.0)) diff --git a/vit-large-p16_32xb128-mae_in200.py b/vit-large-p16_32xb128-mae_in200.py new file mode 100644 index 0000000..8c26eef --- /dev/null +++ b/vit-large-p16_32xb128-mae_in200.py @@ -0,0 +1,74 @@ +_base_ = [ + 'configs/_base_/datasets/tiny_imagenet_bs64_swin_224.py', + 'configs/_base_/schedules/imagenet_bs1024_adamw_swin.py', + 'configs/_base_/default_runtime.py' +] + +import os + +# model settings +model = dict( + type='ImageClassifier', + backbone=dict( + type='VisionTransformer', + arch='large', + img_size=224, + patch_size=16, + drop_path_rate=0.1), + neck=None, + head=dict( + type='VisionTransformerClsHead', + num_classes=200, + in_channels=1024, + loss=dict( + type='LabelSmoothLoss', label_smooth_val=0.1, mode='original'), + ), + init_cfg=[ + dict(type='TruncNormal', layer='Linear', std=.02), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.), + ], + train_cfg=dict(augments=[ + dict(type='Mixup', alpha=0.8), + dict(type='CutMix', alpha=1.0) + ])) + +# dataset settings +train_dataloader = dict(batch_size=128) + +# schedule settings +optim_wrapper = dict( + #type='AmpOptimWrapper', + #dtype='bfloat16', + optimizer=dict( + type='AdamW', + lr=1e-4 * 4096 / 256, + weight_decay=0.3, + eps=1e-8, + betas=(0.9, 0.95)), + paramwise_cfg=dict( + norm_decay_mult=0.0, + bias_decay_mult=0.0, + custom_keys={ + '.cls_token': dict(decay_mult=0.0), + '.pos_embed': dict(decay_mult=0.0) + })) + +# runtime settings +custom_hooks = [dict(type='EMAHook', momentum=1e-4)] + +# 自定义hooks,添加ProfilerHook, 只在rank0启用 +#custom_hooks = [ +# dict(type='EMAHook', momentum=1e-4), +# dict(type='ProfilerHook', by_epoch=False, +# profile_times=12, +# on_trace_ready=dict(type="log_trace", sort_by="self_cuda_time_total"), +# json_trace_path=f"trace_vitb_bf16.json", +# activity_with_cuda=True, +# schedule=dict(wait=1, warmup=1, active=10, repeat=1)) # 这样的设置是10次 +#] if os.environ['LOCAL_RANK'] == '0' else [dict(type='EMAHook', momentum=1e-4)] + + +# NOTE: `auto_scale_lr` is for automatically scaling LR +# based on the actual training batch size. +# base_batch_size = (32 GPUs) x (128 samples per GPU) +auto_scale_lr = dict(base_batch_size=4096) -- GitLab

    z7+76QR%rE+sw&A6V5NE3eK)S{EhKeNgxyAUIayzMd@6`IA-2fu(1q(OR;$SFb3e)5+<> zrNpGQz=+Y_vOJSIT2B^}bynOjChL+H8}hbjnXNoAlMPBcM`w~V@ENPa^Pgm{)TR`G zY6;hFL%^JFTxN@;X1XMAco1t{-{GkHStNcB0#BE6%a~TuThe42n*7O{cQG@M9J=@e z48#>`u2PeE`Lw{i9@cyeP+F9gXIkyM%!$K&L=%sR3f|1=9i=0b;8I+V7#et5=-!d~ z*SJ3HBK1C0j}-XL!c$84RrpB-g+k#=WhBdlxOixMp?sm@<_tdji5Zukf-}S&_+@pZ zBegck&%;-jDJciE0G{bvf==^NB1-W=Qt~D`UF>zQGE$$SL^)KS0^ceyomT!OvRPLtZz8|-E?qB1w5_2mtH$y0 zy_an@cN7jucbaPw%IRo|uu%~=fi9B)3a~x`MOuuhXiTkJVbIN-srZB_t-zJF5oiKD zX;B^m^&#CDI7q`dGCVB~Q-kpbUYj?HsrpFb6?d_?Wo5`A)<><1EMK$BhU}nshzQEa zQk_WsC-kCKG?uymuVlev)hU=C>&I*QaP$9j>V_fSA>>mPB*!_`!)S-DG~taDrrP%harsotdY zvQmrDbQ;9Ma0i%W9~%l+Qjjr7 zCa~&GtCYWORq`yB5@O+CW@=>nwrb#6hRZ=DZdFNuW1Ov0CeEl;$_R@xJ>3R^Oi!z` zhW0lAM?M-;LqUKdfyxI9I;*2_7=Mjlub1kGyZ9cXoKOvv514hV14c!~8gw0BS!$St zh3dlGYMH?P%Cr{Ls#aH*QF9F6{LPv)=Q$}_(?e2?zC@EnY((TqMjNZ}mHJ`f2!Fk> z!p{*=0YQ9yrX3a&nV%vi;eU*;w2^@~l&8WX_DU4L86q&oCi2z-D?ckB!Yg~`UYRW< zC5E_5-W2aEvVN&ze(ftGG5$gQk}?1jK6Y_fch=* zM`-bnOp(Qp`{YZf_D$KFbl9!xP8dz130($|c00J?9&m9FvtCePlU5_%pUe`s39lh_ zDz!)nP@Z;Uctg~xd0$qUX_e0)m&-CIUy-OY)6$V?M$*GlAslqYPiRBEGeGfMjCNgA z+_I{sm?p6{$|ntt=$u+xT17auN+@01tpN%x%Aia}-r?q0E0FTad;i~5-N?%?L4JD+)uX%+ z_$daWhi0~%%v_k{(v(%DoK^pc$q^}rg`C)eE$hdxDjM_I;xyr2p(~TX?QzFREhoR8 zK)>q1_ql%lrsOy*Y~%icZXDA3$y<3@3&{HuaTXH6(Ifks67|**(=_ oHpGQQc0VL(%>4EVS({h08Sj#>kckG0pWJmXL*!M>D6Faf165yh^#A|> literal 0 HcmV?d00001 diff --git a/mmpretrain/models/heads/beitv1_head.py b/mmpretrain/models/heads/beitv1_head.py new file mode 100644 index 0000000..df422ea --- /dev/null +++ b/mmpretrain/models/heads/beitv1_head.py @@ -0,0 +1,55 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Union + +import torch +import torch.nn as nn +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class BEiTV1Head(BaseModule): + """Head for BEiT v1 Pre-training. + + Compute the logits and the cross entropy loss. + + Args: + embed_dims (int): The dimension of embedding. + num_embed (int): The number of classification types. + loss (dict): The config of loss. + init_cfg (dict or List[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__( + self, + embed_dims: int, + num_embed: int, + loss: dict, + init_cfg: Optional[Union[dict, List[dict]]] = dict( + type='TruncNormal', layer='Linear', std=0.02, bias=0) + ) -> None: + super().__init__(init_cfg=init_cfg) + self.cls_head = nn.Linear(embed_dims, num_embed) + self.loss_module = MODELS.build(loss) + + def loss(self, feats: torch.Tensor, target: torch.Tensor, + mask: torch.Tensor) -> torch.Tensor: + """Generate loss. + + Args: + feats (torch.Tensor): Features from backbone. + target (torch.Tensor): Target generated by target_generator. + mask (torch.Tensor): Generated mask for pretraing. + """ + mask = mask.flatten(1).to(torch.bool) + target = torch.argmax(target, dim=1).flatten(1) + target = target[mask] + + # remove cls_token + feats = feats[:, 1:] + logits = self.cls_head(feats[mask]) + + loss = self.loss_module(logits, target) + return loss diff --git a/mmpretrain/models/heads/beitv2_head.py b/mmpretrain/models/heads/beitv2_head.py new file mode 100644 index 0000000..cf677a2 --- /dev/null +++ b/mmpretrain/models/heads/beitv2_head.py @@ -0,0 +1,57 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Union + +import torch +import torch.nn as nn +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class BEiTV2Head(BaseModule): + """Head for BEiT v2 Pre-training. + + Compute the logits and the cross entropy loss. + + Args: + embed_dims (int): The dimension of embedding. + num_embed (int): The number of classification types. + loss (dict): The config of loss. + init_cfg (dict or List[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__( + self, + embed_dims: int, + num_embed: int, + loss: dict, + init_cfg: Optional[Union[dict, List[dict]]] = dict( + type='TruncNormal', layer='Linear', std=0.02, bias=0) + ) -> None: + super().__init__(init_cfg=init_cfg) + self.cls_head = nn.Linear(embed_dims, num_embed) + self.loss_module = MODELS.build(loss) + + def loss(self, feats: torch.Tensor, feats_cls_pt: torch.Tensor, + target: torch.Tensor, mask: torch.Tensor) -> torch.Tensor: + """Generate loss. + + Args: + feats (torch.Tensor): Features from backbone. + feats_cls_pt (torch.Tensor) : Features from class late layers for + pretraining. + target (torch.Tensor): Target generated by target_generator. + mask (torch.Tensor): Generated mask for pretraing. + """ + mask = mask.flatten(1).to(torch.bool) + target = target[mask] + + # shared cls head + logits = self.cls_head(feats[mask]) + logits_cls_pt = self.cls_head(feats_cls_pt[mask]) + + loss_1 = self.loss_module(logits, target) + loss_2 = self.loss_module(logits_cls_pt, target) + return loss_1, loss_2 diff --git a/mmpretrain/models/heads/cae_head.py b/mmpretrain/models/heads/cae_head.py new file mode 100644 index 0000000..18a07f0 --- /dev/null +++ b/mmpretrain/models/heads/cae_head.py @@ -0,0 +1,69 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Tuple, Union + +import torch +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class CAEHead(BaseModule): + """Head for CAE Pre-training. + + Compute the align loss and the main loss. In addition, this head also + generates the prediction target generated by dalle. + + Args: + loss (dict): The config of loss. + tokenizer_path (str): The path of the tokenizer. + init_cfg (dict or List[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + loss: dict, + init_cfg: Optional[Union[dict, List[dict]]] = None) -> None: + super().__init__(init_cfg=init_cfg) + self.loss_module = MODELS.build(loss) + + @torch.no_grad() + def _generate_target(self, logits_target: torch.Tensor) -> torch.Tensor: + """Generate the reconstruction target. + + Args: + logits_target (torch.Tensor): The logits generated by DALL-E.s + + Returns: + torch.Tensor: The logits target. + """ + target = torch.argmax(logits_target, dim=1) + return target.flatten(1) + + def loss(self, logits: torch.Tensor, logits_target: torch.Tensor, + latent_pred: torch.Tensor, latent_target: torch.Tensor, + mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """Generate loss. + + Args: + logits (torch.Tensor): Logits generated by decoder. + logits_target (img_target): Target generated by dalle for decoder + prediction. + latent_pred (torch.Tensor): Latent prediction by regressor. + latent_target (torch.Tensor): Target for latent prediction, + generated by teacher. + + Returns: + Tuple[torch.Tensor, torch.Tensor]: The tuple of loss. + - ``loss_main`` (torch.Tensor): Cross entropy loss. + - ``loss_align`` (torch.Tensor): MSE loss. + """ + + target = self._generate_target(logits_target) # target features + target = target[mask].detach() + + # loss main for decoder, loss align for regressor + loss_main, loss_align = self.loss_module(logits, target, latent_pred, + latent_target) + + return (loss_main, loss_align) diff --git a/mmpretrain/models/heads/cls_head.py b/mmpretrain/models/heads/cls_head.py new file mode 100644 index 0000000..4ac4c51 --- /dev/null +++ b/mmpretrain/models/heads/cls_head.py @@ -0,0 +1,156 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmengine.model import BaseModule + +from mmpretrain.evaluation.metrics import Accuracy +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample + + +@MODELS.register_module() +class ClsHead(BaseModule): + """Classification head. + + Args: + loss (dict): Config of classification loss. Defaults to + ``dict(type='CrossEntropyLoss', loss_weight=1.0)``. + topk (int | Tuple[int]): Top-k accuracy. Defaults to ``(1, )``. + cal_acc (bool): Whether to calculate accuracy during training. + If you use batch augmentations like Mixup and CutMix during + training, it is pointless to calculate accuracy. + Defaults to False. + init_cfg (dict, optional): the config to control the initialization. + Defaults to None. + """ + + def __init__(self, + loss: dict = dict(type='CrossEntropyLoss', loss_weight=1.0), + topk: Union[int, Tuple[int]] = (1, ), + cal_acc: bool = False, + init_cfg: Optional[dict] = None): + super(ClsHead, self).__init__(init_cfg=init_cfg) + + self.topk = topk + if not isinstance(loss, nn.Module): + loss = MODELS.build(loss) + self.loss_module = loss + self.cal_acc = cal_acc + + def pre_logits(self, feats: Tuple[torch.Tensor]) -> torch.Tensor: + """The process before the final classification head. + + The input ``feats`` is a tuple of tensor, and each tensor is the + feature of a backbone stage. In ``ClsHead``, we just obtain the feature + of the last stage. + """ + # The ClsHead doesn't have other module, just return after unpacking. + return feats[-1] + + def forward(self, feats: Tuple[torch.Tensor]) -> torch.Tensor: + """The forward process.""" + pre_logits = self.pre_logits(feats) + # The ClsHead doesn't have the final classification head, + # just return the unpacked inputs. + return pre_logits + + def loss(self, feats: Tuple[torch.Tensor], data_samples: List[DataSample], + **kwargs) -> dict: + """Calculate losses from the classification score. + + Args: + feats (tuple[Tensor]): The features extracted from the backbone. + Multiple stage inputs are acceptable but only the last stage + will be used to classify. The shape of every item should be + ``(num_samples, num_classes)``. + data_samples (List[DataSample]): The annotation data of + every samples. + **kwargs: Other keyword arguments to forward the loss module. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + # The part can be traced by torch.fx + cls_score = self(feats) + + # The part can not be traced by torch.fx + losses = self._get_loss(cls_score, data_samples, **kwargs) + return losses + + def _get_loss(self, cls_score: torch.Tensor, + data_samples: List[DataSample], **kwargs): + """Unpack data samples and compute loss.""" + # Unpack data samples and pack targets + if 'gt_score' in data_samples[0]: + # Batch augmentation may convert labels to one-hot format scores. + target = torch.stack([i.gt_score for i in data_samples]) + else: + target = torch.cat([i.gt_label for i in data_samples]) + + # compute loss + losses = dict() + loss = self.loss_module( + cls_score, target, avg_factor=cls_score.size(0), **kwargs) + losses['loss'] = loss + + # compute accuracy + if self.cal_acc: + assert target.ndim == 1, 'If you enable batch augmentation ' \ + 'like mixup during training, `cal_acc` is pointless.' + acc = Accuracy.calculate(cls_score, target, topk=self.topk) + losses.update( + {f'accuracy_top-{k}': a + for k, a in zip(self.topk, acc)}) + + return losses + + def predict( + self, + feats: Tuple[torch.Tensor], + data_samples: Optional[List[Optional[DataSample]]] = None + ) -> List[DataSample]: + """Inference without augmentation. + + Args: + feats (tuple[Tensor]): The features extracted from the backbone. + Multiple stage inputs are acceptable but only the last stage + will be used to classify. The shape of every item should be + ``(num_samples, num_classes)``. + data_samples (List[DataSample | None], optional): The annotation + data of every samples. If not None, set ``pred_label`` of + the input data samples. Defaults to None. + + Returns: + List[DataSample]: A list of data samples which contains the + predicted results. + """ + # The part can be traced by torch.fx + cls_score = self(feats) + + # The part can not be traced by torch.fx + predictions = self._get_predictions(cls_score, data_samples) + return predictions + + def _get_predictions(self, cls_score, data_samples): + """Post-process the output of head. + + Including softmax and set ``pred_label`` of data samples. + """ + pred_scores = F.softmax(cls_score, dim=1) + pred_labels = pred_scores.argmax(dim=1, keepdim=True).detach() + + out_data_samples = [] + if data_samples is None: + data_samples = [None for _ in range(pred_scores.size(0))] + + for data_sample, score, label in zip(data_samples, pred_scores, + pred_labels): + if data_sample is None: + data_sample = DataSample() + + data_sample.set_pred_score(score).set_pred_label(label) + out_data_samples.append(data_sample) + return out_data_samples diff --git a/mmpretrain/models/heads/conformer_head.py b/mmpretrain/models/heads/conformer_head.py new file mode 100644 index 0000000..eade90d --- /dev/null +++ b/mmpretrain/models/heads/conformer_head.py @@ -0,0 +1,122 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Sequence, Tuple + +import torch +import torch.nn as nn + +from mmpretrain.evaluation.metrics import Accuracy +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample +from .cls_head import ClsHead + + +@MODELS.register_module() +class ConformerHead(ClsHead): + """Linear classifier head. + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (Sequence[int]): Number of channels in the input + feature map. + init_cfg (dict | optional): The extra init config of layers. + Defaults to use ``dict(type='Normal', layer='Linear', std=0.01)``. + """ + + def __init__( + self, + num_classes: int, + in_channels: Sequence[int], # [conv_dim, trans_dim] + init_cfg: dict = dict(type='TruncNormal', layer='Linear', std=.02), + **kwargs): + super(ConformerHead, self).__init__(init_cfg=init_cfg, **kwargs) + + self.in_channels = in_channels + self.num_classes = num_classes + self.init_cfg = init_cfg + + if self.num_classes <= 0: + raise ValueError( + f'num_classes={num_classes} must be a positive integer') + + self.conv_cls_head = nn.Linear(self.in_channels[0], num_classes) + self.trans_cls_head = nn.Linear(self.in_channels[1], num_classes) + + def pre_logits(self, feats: Tuple[List[torch.Tensor]]) -> torch.Tensor: + """The process before the final classification head. + + The input ``feats`` is a tuple of tensor, and each tensor is the + feature of a backbone stage. In ``ConformerHead``, we just obtain the + feature of the last stage. + """ + # The ConformerHead doesn't have other module, + # just return after unpacking. + return feats[-1] + + def forward(self, feats: Tuple[List[torch.Tensor]]) -> Tuple[torch.Tensor]: + """The forward process.""" + x = self.pre_logits(feats) + # There are two outputs in the Conformer model + assert len(x) == 2 + + conv_cls_score = self.conv_cls_head(x[0]) + tran_cls_score = self.trans_cls_head(x[1]) + + return conv_cls_score, tran_cls_score + + def predict(self, + feats: Tuple[List[torch.Tensor]], + data_samples: List[DataSample] = None) -> List[DataSample]: + """Inference without augmentation. + + Args: + feats (tuple[Tensor]): The features extracted from the backbone. + Multiple stage inputs are acceptable but only the last stage + will be used to classify. The shape of every item should be + ``(num_samples, num_classes)``. + data_samples (List[DataSample], optional): The annotation + data of every samples. If not None, set ``pred_label`` of + the input data samples. Defaults to None. + + Returns: + List[DataSample]: A list of data samples which contains the + predicted results. + """ + # The part can be traced by torch.fx + conv_cls_score, tran_cls_score = self(feats) + cls_score = conv_cls_score + tran_cls_score + + # The part can not be traced by torch.fx + predictions = self._get_predictions(cls_score, data_samples) + return predictions + + def _get_loss(self, cls_score: Tuple[torch.Tensor], + data_samples: List[DataSample], **kwargs) -> dict: + """Unpack data samples and compute loss.""" + # Unpack data samples and pack targets + if 'gt_score' in data_samples[0]: + # Batch augmentation may convert labels to one-hot format scores. + target = torch.stack([i.gt_score for i in data_samples]) + else: + target = torch.cat([i.gt_label for i in data_samples]) + + # compute loss + losses = dict() + loss = sum([ + self.loss_module( + score, target, avg_factor=score.size(0), **kwargs) + for score in cls_score + ]) + losses['loss'] = loss + + # compute accuracy + if self.cal_acc: + assert target.ndim == 1, 'If you enable batch augmentation ' \ + 'like mixup during training, `cal_acc` is pointless.' + acc = Accuracy.calculate( + cls_score[0] + cls_score[1], target, topk=self.topk) + losses.update( + {f'accuracy_top-{k}': a + for k, a in zip(self.topk, acc)}) + + return losses diff --git a/mmpretrain/models/heads/contrastive_head.py b/mmpretrain/models/heads/contrastive_head.py new file mode 100644 index 0000000..6d1474a --- /dev/null +++ b/mmpretrain/models/heads/contrastive_head.py @@ -0,0 +1,50 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Union + +import torch +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class ContrastiveHead(BaseModule): + """Head for contrastive learning. + + The contrastive loss is implemented in this head and is used in SimCLR, + MoCo, DenseCL, etc. + + Args: + loss (dict): Config dict for module of loss functions. + temperature (float): The temperature hyper-parameter that + controls the concentration level of the distribution. + Defaults to 0.1. + init_cfg (dict or List[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + loss: dict, + temperature: float = 0.1, + init_cfg: Optional[Union[dict, List[dict]]] = None) -> None: + super().__init__(init_cfg=init_cfg) + self.loss_module = MODELS.build(loss) + self.temperature = temperature + + def loss(self, pos: torch.Tensor, neg: torch.Tensor) -> torch.Tensor: + """Forward function to compute contrastive loss. + + Args: + pos (torch.Tensor): Nx1 positive similarity. + neg (torch.Tensor): Nxk negative similarity. + + Returns: + torch.Tensor: The contrastive loss. + """ + N = pos.size(0) + logits = torch.cat((pos, neg), dim=1) + logits /= self.temperature + labels = torch.zeros((N, ), dtype=torch.long).to(pos.device) + + loss = self.loss_module(logits, labels) + return loss diff --git a/mmpretrain/models/heads/deit_head.py b/mmpretrain/models/heads/deit_head.py new file mode 100644 index 0000000..a96f6e1 --- /dev/null +++ b/mmpretrain/models/heads/deit_head.py @@ -0,0 +1,72 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings +from typing import List, Tuple + +import torch +import torch.nn as nn + +from mmpretrain.registry import MODELS +from .vision_transformer_head import VisionTransformerClsHead + + +@MODELS.register_module() +class DeiTClsHead(VisionTransformerClsHead): + """Distilled Vision Transformer classifier head. + + Comparing with the :class:`VisionTransformerClsHead`, this head adds an + extra linear layer to handle the dist token. The final classification score + is the average of both linear transformation results of ``cls_token`` and + ``dist_token``. + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + hidden_dim (int, optional): Number of the dimensions for hidden layer. + Defaults to None, which means no extra hidden layer. + act_cfg (dict): The activation config. Only available during + pre-training. Defaults to ``dict(type='Tanh')``. + init_cfg (dict): The extra initialization configs. Defaults to + ``dict(type='Constant', layer='Linear', val=0)``. + """ + + def _init_layers(self): + """"Init extra hidden linear layer to handle dist token if exists.""" + super(DeiTClsHead, self)._init_layers() + if self.hidden_dim is None: + head_dist = nn.Linear(self.in_channels, self.num_classes) + else: + head_dist = nn.Linear(self.hidden_dim, self.num_classes) + self.layers.add_module('head_dist', head_dist) + + def pre_logits(self, + feats: Tuple[List[torch.Tensor]]) -> Tuple[torch.Tensor]: + """The process before the final classification head. + + The input ``feats`` is a tuple of list of tensor, and each tensor is + the feature of a backbone stage. In ``DeiTClsHead``, we obtain the + feature of the last stage and forward in hidden layer if exists. + """ + feat = feats[-1] # Obtain feature of the last scale. + # For backward-compatibility with the previous ViT output + if len(feat) == 3: + _, cls_token, dist_token = feat + else: + cls_token, dist_token = feat + if self.hidden_dim is None: + return cls_token, dist_token + else: + cls_token = self.layers.act(self.layers.pre_logits(cls_token)) + dist_token = self.layers.act(self.layers.pre_logits(dist_token)) + return cls_token, dist_token + + def forward(self, feats: Tuple[List[torch.Tensor]]) -> torch.Tensor: + """The forward process.""" + if self.training: + warnings.warn('MMPretrain cannot train the ' + 'distilled version DeiT.') + cls_token, dist_token = self.pre_logits(feats) + # The final classification head. + cls_score = (self.layers.head(cls_token) + + self.layers.head_dist(dist_token)) / 2 + return cls_score diff --git a/mmpretrain/models/heads/efficientformer_head.py b/mmpretrain/models/heads/efficientformer_head.py new file mode 100644 index 0000000..09aa05b --- /dev/null +++ b/mmpretrain/models/heads/efficientformer_head.py @@ -0,0 +1,89 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple + +import torch +import torch.nn as nn + +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample +from .cls_head import ClsHead + + +@MODELS.register_module() +class EfficientFormerClsHead(ClsHead): + """EfficientFormer classifier head. + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + distillation (bool): Whether use a additional distilled head. + Defaults to True. + init_cfg (dict): The extra initialization configs. Defaults to + ``dict(type='Normal', layer='Linear', std=0.01)``. + """ + + def __init__(self, + num_classes, + in_channels, + distillation=True, + init_cfg=dict(type='Normal', layer='Linear', std=0.01), + *args, + **kwargs): + super(EfficientFormerClsHead, self).__init__( + init_cfg=init_cfg, *args, **kwargs) + self.in_channels = in_channels + self.num_classes = num_classes + self.dist = distillation + + if self.num_classes <= 0: + raise ValueError( + f'num_classes={num_classes} must be a positive integer') + + self.head = nn.Linear(self.in_channels, self.num_classes) + if self.dist: + self.dist_head = nn.Linear(self.in_channels, self.num_classes) + + def forward(self, feats: Tuple[torch.Tensor]) -> torch.Tensor: + """The forward process.""" + pre_logits = self.pre_logits(feats) + # The final classification head. + cls_score = self.head(pre_logits) + + if self.dist: + cls_score = (cls_score + self.dist_head(pre_logits)) / 2 + return cls_score + + def pre_logits(self, feats: Tuple[torch.Tensor]) -> torch.Tensor: + """The process before the final classification head. + + The input ``feats`` is a tuple of tensor, and each tensor is the + feature of a backbone stage. In :obj`EfficientFormerClsHead`, we just + obtain the feature of the last stage. + """ + # The EfficientFormerClsHead doesn't have other module, just return + # after unpacking. + return feats[-1] + + def loss(self, feats: Tuple[torch.Tensor], data_samples: List[DataSample], + **kwargs) -> dict: + """Calculate losses from the classification score. + + Args: + feats (tuple[Tensor]): The features extracted from the backbone. + Multiple stage inputs are acceptable but only the last stage + will be used to classify. The shape of every item should be + ``(num_samples, num_classes)``. + data_samples (List[DataSample]): The annotation data of + every samples. + **kwargs: Other keyword arguments to forward the loss module. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + if self.dist: + raise NotImplementedError( + "MMPretrain doesn't support to train" + ' the distilled version EfficientFormer.') + else: + return super().loss(feats, data_samples, **kwargs) diff --git a/mmpretrain/models/heads/grounding_head.py b/mmpretrain/models/heads/grounding_head.py new file mode 100644 index 0000000..a47512e --- /dev/null +++ b/mmpretrain/models/heads/grounding_head.py @@ -0,0 +1,217 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +import torch +import torch.nn.functional as F +from mmengine.model import BaseModule + +from mmpretrain.models.utils.box_utils import (box_cxcywh_to_xyxy, + generalized_box_iou) +from mmpretrain.registry import MODELS, TOKENIZER + + +@MODELS.register_module() +class GroundingHead(BaseModule): + """bbox Coordination generation head for multi-modal pre-trained task, + adapted by BLIP. Normally used for visual grounding. + + Args: + loss: dict, + decoder: dict, + init_cfg (dict, optional): the config to control the initialization. + Defaults to None. + """ + + def __init__( + self, + decoder: dict = None, + tokenizer: dict = None, + box_l1_loss_coeff=4.0, + box_giou_loss_coeff=2.0, + init_cfg: Optional[dict] = None, + ) -> None: + super(GroundingHead, self).__init__(init_cfg=init_cfg) + ''' init the decoder from med_config''' + self.decoder = None + if decoder: + self.decoder = MODELS.build(decoder) + self.loss_fn = torch.nn.CrossEntropyLoss( + reduction='none', ignore_index=-100) + + self.box_l1_loss_coeff = box_l1_loss_coeff + self.box_giou_loss_coeff = box_giou_loss_coeff + + if isinstance(tokenizer, dict): + self.tokenizer = TOKENIZER.build(tokenizer) + else: + self.tokenizer = tokenizer + + self.image_res = 640 + prefix_ids = torch.tensor( + self.tokenizer.convert_tokens_to_ids(['[unused339]'])) + target_ids = torch.tensor( + self.tokenizer.convert_tokens_to_ids( + [f'[unused{340+_}]' for _ in range(self.image_res + 1)])) + self.register_buffer('prefix_ids', prefix_ids) + self.register_buffer('target_ids', target_ids) + + bbox_prob_mask = torch.zeros(len(self.tokenizer)) + bbox_prob_mask[self.target_ids[0]:self.target_ids[-1] + 1] = 1 + bbox_prob_mask = (1.0 - bbox_prob_mask) * -10000.0 + self.register_buffer('bbox_prob_mask', bbox_prob_mask) + self.bin_start_idx = self.target_ids[0] + + def forward(self, text_embedding, text_embedding_mask, + encoder_hidden_states, encoder_attention_mask): + + # localize prompt token, text embedding + + merged_encode_hs = torch.cat([encoder_hidden_states, text_embedding], + 1) + merge_att_mask = torch.cat( + [encoder_attention_mask, text_embedding_mask], 1) + + loc_prompt = self.prompt.weight.T + loc_prompt = torch.repeat_interleave(loc_prompt, + merge_att_mask.shape[0], + 0).unsqueeze(1) + + loc_prompt_mask = torch.ones(loc_prompt.shape[:-1]).long().to( + loc_prompt.device) + + decoder_out = self.decoder( + inputs_embeds=loc_prompt, + attention_mask=loc_prompt_mask, + encoder_hidden_states=merged_encode_hs, + encoder_attention_mask=merge_att_mask, + output_hidden_states=True, + labels=None, + ) + decoder_hs = decoder_out.hidden_states[-1][:, 0, :] + box_pred = self.box_head(decoder_hs) + return decoder_out, decoder_hs, box_pred + + def loss(self, + text_embedding, + text_embedding_mask, + encoder_hidden_states, + encoder_attention_mask, + decoder_targets, + return_scores=False): + """Calculate losses from the extracted features. + + Args: + feats (dict): The features extracted from the backbone. + data_samples (List[BaseDataElement]): The annotation data of + every samples. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + + merged_encode_hs = torch.cat([encoder_hidden_states, text_embedding], + 1) + merge_att_mask = torch.cat( + [encoder_attention_mask, text_embedding_mask], 1) + + answer_targets = (decoder_targets * + self.image_res).long() + self.bin_start_idx + prefix_ids = torch.repeat_interleave(self.prefix_ids, + merge_att_mask.shape[0], + 0).unsqueeze(-1) + prefix_ids = torch.cat([prefix_ids, answer_targets], dim=1) + + answer_output = self.decoder( + prefix_ids, + encoder_hidden_states=merged_encode_hs, + encoder_attention_mask=merge_att_mask, + labels=None, + return_dict=True, + ) + prob_mask = self.bbox_prob_mask.view(1, 1, + self.bbox_prob_mask.shape[-1]) + prediction_scores = answer_output.logits + prob_mask + + shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() + labels = prefix_ids[:, 1:].contiguous() + vocab_size = len(self.tokenizer) + loss_seq_init = self.loss_fn( + shifted_prediction_scores.view(-1, vocab_size), labels.view(-1)) + + with torch.no_grad(): + pred_box = (torch.argmax( + prediction_scores[:, :-1, :].contiguous(), dim=-1) - + self.bin_start_idx) / self.image_res + weight_bbox = F.l1_loss( + pred_box, decoder_targets, reduction='none').clamp( + 0, 5) * self.box_l1_loss_coeff + weight_giou = (1 - torch.diag( + generalized_box_iou( + box_cxcywh_to_xyxy(pred_box), + box_cxcywh_to_xyxy(decoder_targets))) + ) * self.box_giou_loss_coeff + bs = text_embedding.shape[0] + loss_seq = loss_seq_init[:].view(bs, -1, 4) + loss_seq = loss_seq * weight_bbox + loss_seq = loss_seq * weight_giou.unsqueeze(1) + + loss_seq = loss_seq.mean() + + losses = { + 'loss_seq': loss_seq, + 'loss_seq_init': loss_seq_init.mean(), + 'loss': loss_seq, + 'box_l1': weight_bbox.mean(-1).mean().detach(), + 'box_giou': weight_giou.mean().detach() + } + + return losses + + def predict( + self, + text_embedding, + text_embedding_mask, + encoder_hidden_states, + encoder_attention_mask, + ): + """Generates the bbox coordinates at inference time.""" + + merged_encode_hs = torch.cat([encoder_hidden_states, text_embedding], + 1) + merge_att_mask = torch.cat( + [encoder_attention_mask, text_embedding_mask], 1) + + prefix_ids = torch.repeat_interleave(self.prefix_ids, + merge_att_mask.shape[0], + 0).unsqueeze(-1) + + for _ in range(4): + decoder_output = self.decoder( + prefix_ids, + encoder_hidden_states=merged_encode_hs, + encoder_attention_mask=merge_att_mask, + labels=None, + return_dict=True, + ) + prob_mask = self.bbox_prob_mask.view(1, 1, + self.bbox_prob_mask.shape[-1]) + prediction_scores = decoder_output.logits + prob_mask + + prefix_ids = torch.cat([ + prefix_ids, + torch.argmax(prediction_scores[:, -1, :], dim=-1).unsqueeze(1) + ], + dim=1) + + pred_box = self.process_bbox(prefix_ids[:, 1:]) # xywh 0-1 to xyxy 0-1 + + return pred_box + + @torch.no_grad() + def process_bbox(self, bbox): + bbox = bbox - self.bin_start_idx + bbox = torch.true_divide(bbox, self.image_res) + bbox = box_cxcywh_to_xyxy(bbox) + bbox = torch.clip(bbox, 0, 1) + assert torch.all(bbox <= 1) + return bbox diff --git a/mmpretrain/models/heads/itc_head.py b/mmpretrain/models/heads/itc_head.py new file mode 100644 index 0000000..006d52c --- /dev/null +++ b/mmpretrain/models/heads/itc_head.py @@ -0,0 +1,157 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmengine.dist import all_gather +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class ITCHead(BaseModule): + """Image-text matching head for multi-modal pre-trained task. Adapted by + BLIP, ALBEF. Normally used for retrieval task. + + Args: + embed_dim (int): Embed channel size for queue. + queue_size (int): Queue size for image and text. Defaults to 57600. + temperature (float): Temperature to calculate the similarity. + Defaults to 0.07. + use_distill (bool): Whether to use distill to calculate loss. + Defaults to True. + alpha (float): Weight for momentum similarity. Defaults to 0.4. + init_cfg (dict, optional): the config to control the initialization. + Defaults to None. + """ + + def __init__(self, + embed_dim: int, + queue_size: int = 57600, + temperature: float = 0.07, + use_distill: bool = True, + alpha: float = 0.4, + init_cfg: Optional[dict] = None): + super(ITCHead, self).__init__(init_cfg=init_cfg) + self.temp = nn.Parameter(temperature * torch.ones([])) + self.use_distill = use_distill + if self.use_distill: + # create the queue + self.register_buffer('image_queue', + torch.randn(embed_dim, queue_size)) + self.register_buffer('text_queue', + torch.randn(embed_dim, queue_size)) + self.register_buffer('idx_queue', torch.full((1, queue_size), + -100)) + self.register_buffer('queue_ptr', torch.zeros(1, dtype=torch.long)) + + self.image_queue = F.normalize(self.image_queue, dim=0) + self.text_queue = F.normalize(self.text_queue, dim=0) + + self.queue_size = queue_size + # This value will be warmup by `WarmupParamHook` + self.alpha = alpha + + def forward(self, feats: Tuple[torch.Tensor]) -> torch.Tensor: + """The forward process.""" + return feats[-1] + + def loss(self, feats: Tuple[torch.Tensor], data_samples, **kwargs) -> dict: + """Calculate losses from the classification score. + + Args: + feats (tuple[Tensor]): The features extracted from the backbone. + Multiple stage inputs are acceptable but only the last stage + will be used to classify. The shape of every item should be + ``(num_samples, num_classes)``. + data_samples (List[ClsDataSample]): The annotation data of + every samples. + **kwargs: Other keyword arguments to forward the loss module. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + + # The part can be traced by torch.fx + img_feats, text_feats, img_feats_m, text_feats_m = self(feats) + + img_feats_all = torch.cat( + [img_feats_m.t(), + self.image_queue.clone().detach()], dim=1) + text_feats_all = torch.cat( + [text_feats_m.t(), + self.text_queue.clone().detach()], dim=1) + + # The part can not be traced by torch.fx + losses = self._get_loss(img_feats, text_feats, img_feats_m, + text_feats_m, img_feats_all, text_feats_all, + data_samples, **kwargs) + return losses + + def _get_loss(self, img_feats, text_feats, img_feats_m, text_feats_m, + img_feats_all, text_feats_all, data_samples, **kwargs): + """Unpack data samples and compute loss.""" + + idx = torch.tensor([ds.image_id + for ds in data_samples]).to(img_feats.device) + idx = idx.view(-1, 1) + idx_all = torch.cat([idx.t(), self.idx_queue.clone().detach()], dim=1) + pos_idx = torch.eq(idx, idx_all).float() + sim_targets = pos_idx / pos_idx.sum(1, keepdim=True) + + with torch.no_grad(): + if self.use_distill: + sim_i2t_m = img_feats_m @ text_feats_all / self.temp + sim_t2i_m = text_feats_m @ img_feats_all / self.temp + + sim_i2t_targets = ( + self.alpha * F.softmax(sim_i2t_m, dim=1) + + (1 - self.alpha) * sim_targets) + sim_t2i_targets = ( + self.alpha * F.softmax(sim_t2i_m, dim=1) + + (1 - self.alpha) * sim_targets) + + sim_i2t = img_feats @ text_feats_all / self.temp + sim_t2i = text_feats @ img_feats_all / self.temp + + if self.use_distill: + loss_i2t = -torch.sum( + F.log_softmax(sim_i2t, dim=1) * sim_i2t_targets, dim=1).mean() + loss_t2i = -torch.sum( + F.log_softmax(sim_t2i, dim=1) * sim_t2i_targets, dim=1).mean() + else: + loss_i2t = -torch.sum( + F.log_softmax(sim_i2t, dim=1) * sim_targets, dim=1).mean() + loss_t2i = -torch.sum( + F.log_softmax(sim_t2i, dim=1) * sim_targets, dim=1).mean() + + # compute loss + losses = dict() + + losses['itc_loss'] = (loss_i2t + loss_t2i) / 2 + self._dequeue_and_enqueue(img_feats_m, text_feats_m, idx) + return losses + + @torch.no_grad() + def _dequeue_and_enqueue(self, image_feat, text_feat, idxs=None): + # gather keys before updating queue + image_feats = torch.cat(all_gather(image_feat)) + text_feats = torch.cat(all_gather(text_feat)) + + batch_size = image_feats.shape[0] + + ptr = int(self.queue_ptr) + assert self.queue_size % batch_size == 0 # for simplicity + + # replace the keys at ptr (dequeue and enqueue) + self.image_queue[:, ptr:ptr + batch_size] = image_feats.T + self.text_queue[:, ptr:ptr + batch_size] = text_feats.T + + if idxs is not None: + idxs = torch.cat(all_gather(idxs)) + self.idx_queue[:, ptr:ptr + batch_size] = idxs.T + + ptr = (ptr + batch_size) % self.queue_size # move pointer + self.queue_ptr[0] = ptr diff --git a/mmpretrain/models/heads/itm_head.py b/mmpretrain/models/heads/itm_head.py new file mode 100644 index 0000000..c7b42f3 --- /dev/null +++ b/mmpretrain/models/heads/itm_head.py @@ -0,0 +1,117 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Tuple + +import torch +import torch.nn as nn +from mmengine.model import BaseModule + +from mmpretrain.evaluation import Accuracy +from mmpretrain.registry import MODELS + + +class Pooler(nn.Module): + + def __init__(self, hidden_size): + super().__init__() + self.dense = nn.Linear(hidden_size, hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states): + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +@MODELS.register_module() +class ITMHead(BaseModule): + """Image-text matching head for multi-modal pre-trained task. Adapted by + BLIP, FLAVA. + + Args: + hidden_size (int): Hidden channel size out input features. + with_pooler (bool): Whether a pooler is added. Defaults to True. + loss (dict): Config of global contrasive loss. Defaults to + ``dict(type='GlobalContrasiveLoss')``. + cal_acc (bool): Whether to calculate accuracy during training. + If you use batch augmentations like Mixup and CutMix during + training, it is pointless to calculate accuracy. + Defaults to False. + init_cfg (dict, optional): the config to control the initialization. + Defaults to None. + """ + + def __init__(self, + hidden_size: int, + with_pooler: bool = True, + loss: dict = dict(type='CrossEntropyLoss', loss_weight=1.0), + cal_acc: bool = False, + init_cfg: Optional[dict] = None): + super(ITMHead, self).__init__(init_cfg=init_cfg) + self.hidden_size = hidden_size + + if with_pooler: + self.pooler = Pooler(hidden_size=self.hidden_size) + else: + self.pooler = nn.Identity() + self.fc = nn.Linear(self.hidden_size, 2) + + self.loss_module = MODELS.build(loss) + self.cal_acc = cal_acc + + def forward(self, feats: Tuple[torch.Tensor]) -> torch.Tensor: + """The forward process.""" + pre_logits = self.pooler(feats[-1]) + itm_logits = self.fc(pre_logits) + return itm_logits + + def loss(self, feats: Tuple[torch.Tensor], data_samples, **kwargs) -> dict: + """Calculate losses from the classification score. + + Args: + feats (tuple[Tensor]): The features extracted from the backbone. + Multiple stage inputs are acceptable but only the last stage + will be used to classify. The shape of every item should be + ``(num_samples, num_classes)``. + data_samples (List[ClsDataSample]): The annotation data of + every samples. + **kwargs: Other keyword arguments to forward the loss module. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + + # The part can be traced by torch.fx + itm_logits = self(feats) + + # deal with query + if itm_logits.ndim == 3: + itm_logits = itm_logits.mean(dim=1) + + # The part can not be traced by torch.fx + losses = self._get_loss(itm_logits, data_samples, **kwargs) + return losses + + def _get_loss(self, itm_logits: torch.Tensor, data_samples, **kwargs): + """Unpack data samples and compute loss.""" + # Unpack data samples and pack targets + # use `itm_label` in here temporarily + target = torch.tensor([i.is_matched + for i in data_samples]).to(itm_logits.device) + + # compute loss + losses = dict() + + loss = self.loss_module( + itm_logits, target.long(), avg_factor=itm_logits.size(0), **kwargs) + losses['itm_loss'] = loss + + # compute accuracy + if self.cal_acc: + # topk is meaningless for matching task + acc = Accuracy.calculate(itm_logits, target) + # acc is warpped with two lists of topk and thrs + # which are unnecessary here + losses.update({'itm_accuracy': acc[0][0]}) + + return losses diff --git a/mmpretrain/models/heads/itpn_clip_head.py b/mmpretrain/models/heads/itpn_clip_head.py new file mode 100644 index 0000000..52c49b8 --- /dev/null +++ b/mmpretrain/models/heads/itpn_clip_head.py @@ -0,0 +1,56 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Union + +import torch +import torch.nn as nn +from mmengine.device import get_device +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class iTPNClipHead(BaseModule): + """Head for iTPN Pre-training using Clip. + + Compute the logits and the cross entropy loss. + + Args: + embed_dims (int): The dimension of embedding. + num_embed (int): The number of classification types. + loss (dict): The config of loss. + init_cfg (dict or List[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__( + self, + embed_dims: int, + num_embed: int, + loss: dict, + init_cfg: Optional[Union[dict, List[dict]]] = dict( + type='TruncNormal', layer='Linear', std=0.02, bias=0) + ) -> None: + super().__init__(init_cfg=init_cfg) + self.cls_head = nn.Linear(embed_dims, num_embed) + self.loss_module = MODELS.build(loss) + + def loss(self, feats: torch.Tensor, target: torch.Tensor, + mask: torch.Tensor) -> torch.Tensor: + """Generate loss. + + Args: + feats (torch.Tensor): Features from backbone. + target (torch.Tensor): Target generated by target_generator. + mask (torch.Tensor): Generated mask for pretraing. + """ + mask = mask.to(get_device(), non_blocking=True) + mask = mask.flatten(1).to(torch.bool) + target = target[mask] + + # remove cls_token + # feats = feats[:, 1:] + logits = self.cls_head(feats[mask]) + + loss = self.loss_module(logits, target) + return loss diff --git a/mmpretrain/models/heads/latent_heads.py b/mmpretrain/models/heads/latent_heads.py new file mode 100644 index 0000000..a9662b5 --- /dev/null +++ b/mmpretrain/models/heads/latent_heads.py @@ -0,0 +1,94 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn as nn +from mmengine.dist import all_reduce, get_world_size +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class LatentPredictHead(BaseModule): + """Head for latent feature prediction. + + This head builds a predictor, which can be any registered neck component. + For example, BYOL and SimSiam call this head and build NonLinearNeck. + It also implements similarity loss between two forward features. + + Args: + loss (dict): Config dict for the loss. + predictor (dict): Config dict for the predictor. + init_cfg (dict or List[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + loss: dict, + predictor: dict, + init_cfg: Optional[Union[dict, List[dict]]] = None) -> None: + super().__init__(init_cfg=init_cfg) + self.loss_module = MODELS.build(loss) + self.predictor = MODELS.build(predictor) + + def loss(self, input: torch.Tensor, + target: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """Forward head. + + Args: + input (torch.Tensor): NxC input features. + target (torch.Tensor): NxC target features. + + Returns: + torch.Tensor: The latent predict loss. + """ + pred = self.predictor([input])[0] + target = target.detach() + + loss = self.loss_module(pred, target) + + return loss + + +@MODELS.register_module() +class LatentCrossCorrelationHead(BaseModule): + """Head for latent feature cross correlation. + + Part of the code is borrowed from `script + `_. + + Args: + in_channels (int): Number of input channels. + loss (dict): Config dict for module of loss functions. + init_cfg (dict or List[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + in_channels: int, + loss: dict, + init_cfg: Optional[Union[dict, List[dict]]] = None) -> None: + super().__init__(init_cfg=init_cfg) + self.world_size = get_world_size() + self.bn = nn.BatchNorm1d(in_channels, affine=False) + self.loss_module = MODELS.build(loss) + + def loss(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: + """Forward head. + + Args: + input (torch.Tensor): NxC input features. + target (torch.Tensor): NxC target features. + + Returns: + torch.Tensor: The cross correlation loss. + """ + # cross-correlation matrix + cross_correlation_matrix = self.bn(input).T @ self.bn(target) + cross_correlation_matrix.div_(input.size(0) * self.world_size) + + all_reduce(cross_correlation_matrix) + + loss = self.loss_module(cross_correlation_matrix) + return loss diff --git a/mmpretrain/models/heads/levit_head.py b/mmpretrain/models/heads/levit_head.py new file mode 100644 index 0000000..a74d7ec --- /dev/null +++ b/mmpretrain/models/heads/levit_head.py @@ -0,0 +1,81 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmengine.model import BaseModule + +from mmpretrain.models.heads import ClsHead +from mmpretrain.registry import MODELS +from ..utils import build_norm_layer + + +class BatchNormLinear(BaseModule): + + def __init__(self, in_channels, out_channels, norm_cfg=dict(type='BN1d')): + super(BatchNormLinear, self).__init__() + self.bn = build_norm_layer(norm_cfg, in_channels) + self.linear = nn.Linear(in_channels, out_channels) + + @torch.no_grad() + def fuse(self): + w = self.bn.weight / (self.bn.running_var + self.bn.eps)**0.5 + b = self.bn.bias - self.bn.running_mean * \ + self.bn.weight / (self.bn.running_var + self.bn.eps) ** 0.5 + w = self.linear.weight * w[None, :] + b = (self.linear.weight @ b[:, None]).view(-1) + self.linear.bias + + self.linear.weight.data.copy_(w) + self.linear.bias.data.copy_(b) + return self.linear + + def forward(self, x): + x = self.bn(x) + x = self.linear(x) + return x + + +def fuse_parameters(module): + for child_name, child in module.named_children(): + if hasattr(child, 'fuse'): + setattr(module, child_name, child.fuse()) + else: + fuse_parameters(child) + + +@MODELS.register_module() +class LeViTClsHead(ClsHead): + + def __init__(self, + num_classes=1000, + distillation=True, + in_channels=None, + deploy=False, + **kwargs): + super(LeViTClsHead, self).__init__(**kwargs) + self.num_classes = num_classes + self.distillation = distillation + self.deploy = deploy + self.head = BatchNormLinear(in_channels, num_classes) + if distillation: + self.head_dist = BatchNormLinear(in_channels, num_classes) + + if self.deploy: + self.switch_to_deploy(self) + + def switch_to_deploy(self): + if self.deploy: + return + fuse_parameters(self) + self.deploy = True + + def forward(self, x): + x = self.pre_logits(x) + if self.distillation: + x = self.head(x), self.head_dist(x) # 2 16 384 -> 2 1000 + if not self.training: + x = (x[0] + x[1]) / 2 + else: + raise NotImplementedError("MMPretrain doesn't support " + 'training in distillation mode.') + else: + x = self.head(x) + return x diff --git a/mmpretrain/models/heads/linear_head.py b/mmpretrain/models/heads/linear_head.py new file mode 100644 index 0000000..90b4c2b --- /dev/null +++ b/mmpretrain/models/heads/linear_head.py @@ -0,0 +1,63 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Tuple + +import torch +import torch.nn as nn + +from mmpretrain.registry import MODELS +from .cls_head import ClsHead + + +@MODELS.register_module() +class LinearClsHead(ClsHead): + """Linear classifier head. + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + loss (dict): Config of classification loss. Defaults to + ``dict(type='CrossEntropyLoss', loss_weight=1.0)``. + topk (int | Tuple[int]): Top-k accuracy. Defaults to ``(1, )``. + cal_acc (bool): Whether to calculate accuracy during training. + If you use batch augmentations like Mixup and CutMix during + training, it is pointless to calculate accuracy. + Defaults to False. + init_cfg (dict, optional): the config to control the initialization. + Defaults to ``dict(type='Normal', layer='Linear', std=0.01)``. + """ + + def __init__(self, + num_classes: int, + in_channels: int, + init_cfg: Optional[dict] = dict( + type='Normal', layer='Linear', std=0.01), + **kwargs): + super(LinearClsHead, self).__init__(init_cfg=init_cfg, **kwargs) + + self.in_channels = in_channels + self.num_classes = num_classes + + if self.num_classes <= 0: + raise ValueError( + f'num_classes={num_classes} must be a positive integer') + + self.fc = nn.Linear(self.in_channels, self.num_classes) + + def pre_logits(self, feats: Tuple[torch.Tensor]) -> torch.Tensor: + """The process before the final classification head. + + The input ``feats`` is a tuple of tensor, and each tensor is the + feature of a backbone stage. In ``LinearClsHead``, we just obtain the + feature of the last stage. + """ + # The LinearClsHead doesn't have other module, just return after + # unpacking. + return feats[-1] + + def forward(self, feats: Tuple[torch.Tensor]) -> torch.Tensor: + """The forward process.""" + pre_logits = self.pre_logits(feats) + # The final classification head. + cls_score = self.fc(pre_logits) + return cls_score diff --git a/mmpretrain/models/heads/mae_head.py b/mmpretrain/models/heads/mae_head.py new file mode 100644 index 0000000..b76eced --- /dev/null +++ b/mmpretrain/models/heads/mae_head.py @@ -0,0 +1,106 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class MAEPretrainHead(BaseModule): + """Head for MAE Pre-training. + + Args: + loss (dict): Config of loss. + norm_pix_loss (bool): Whether or not normalize target. + Defaults to False. + patch_size (int): Patch size. Defaults to 16. + in_channels (int): Number of input channels. Defaults to 3. + """ + + def __init__(self, + loss: dict, + norm_pix: bool = False, + patch_size: int = 16, + in_channels: int = 3) -> None: + super().__init__() + self.norm_pix = norm_pix + self.patch_size = patch_size + self.in_channels = in_channels + self.loss_module = MODELS.build(loss) + + def patchify(self, imgs: torch.Tensor) -> torch.Tensor: + r"""Split images into non-overlapped patches. + + Args: + imgs (torch.Tensor): A batch of images. The shape should + be :math:`(B, C, H, W)`. + + Returns: + torch.Tensor: Patchified images. The shape is + :math:`(B, L, \text{patch_size}^2 \times C)`. + """ + p = self.patch_size + assert imgs.shape[2] == imgs.shape[3] and imgs.shape[2] % p == 0 + + h = w = imgs.shape[2] // p + x = imgs.reshape(shape=(imgs.shape[0], self.in_channels, h, p, w, p)) + x = torch.einsum('nchpwq->nhwpqc', x) + x = x.reshape(shape=(imgs.shape[0], h * w, p**2 * self.in_channels)) + return x + + def unpatchify(self, x: torch.Tensor) -> torch.Tensor: + r"""Combine non-overlapped patches into images. + + Args: + x (torch.Tensor): The shape is + :math:`(B, L, \text{patch_size}^2 \times C)`. + + Returns: + torch.Tensor: The shape is :math:`(B, C, H, W)`. + """ + p = self.patch_size + h = w = int(x.shape[1]**.5) + assert h * w == x.shape[1] + + x = x.reshape(shape=(x.shape[0], h, w, p, p, self.in_channels)) + x = torch.einsum('nhwpqc->nchpwq', x) + imgs = x.reshape(shape=(x.shape[0], self.in_channels, h * p, h * p)) + return imgs + + def construct_target(self, target: torch.Tensor) -> torch.Tensor: + """Construct the reconstruction target. + + In addition to splitting images into tokens, this module will also + normalize the image according to ``norm_pix``. + + Args: + target (torch.Tensor): Image with the shape of B x C x H x W + + Returns: + torch.Tensor: Tokenized images with the shape of B x L x C + """ + target = self.patchify(target) + if self.norm_pix: + # normalize the target image + mean = target.mean(dim=-1, keepdim=True) + var = target.var(dim=-1, keepdim=True) + target = (target - mean) / (var + 1.e-6)**.5 + + return target + + def loss(self, pred: torch.Tensor, target: torch.Tensor, + mask: torch.Tensor) -> torch.Tensor: + """Generate loss. + + Args: + pred (torch.Tensor): The reconstructed image. + target (torch.Tensor): The target image. + mask (torch.Tensor): The mask of the target image. + + Returns: + torch.Tensor: The reconstruction loss. + """ + target = self.construct_target(target) + loss = self.loss_module(pred, target, mask) + + return loss diff --git a/mmpretrain/models/heads/margin_head.py b/mmpretrain/models/heads/margin_head.py new file mode 100644 index 0000000..3a88bf8 --- /dev/null +++ b/mmpretrain/models/heads/margin_head.py @@ -0,0 +1,300 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from typing import List, Optional, Sequence, Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmengine.fileio import list_from_file +from mmengine.runner import autocast +from mmengine.utils import is_seq_of + +from mmpretrain.models.losses import convert_to_one_hot +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample +from .cls_head import ClsHead + + +class NormProduct(nn.Linear): + """An enhanced linear layer with k clustering centers to calculate product + between normalized input and linear weight. + + Args: + in_features (int): size of each input sample. + out_features (int): size of each output sample + k (int): The number of clustering centers. Defaults to 1. + bias (bool): Whether there is bias. If set to ``False``, the + layer will not learn an additive bias. Defaults to ``True``. + feature_norm (bool): Whether to normalize the input feature. + Defaults to ``True``. + weight_norm (bool):Whether to normalize the weight. + Defaults to ``True``. + """ + + def __init__(self, + in_features: int, + out_features: int, + k=1, + bias: bool = False, + feature_norm: bool = True, + weight_norm: bool = True): + + super().__init__(in_features, out_features * k, bias=bias) + self.weight_norm = weight_norm + self.feature_norm = feature_norm + self.out_features = out_features + self.k = k + + def forward(self, input: torch.Tensor) -> torch.Tensor: + if self.feature_norm: + input = F.normalize(input) + if self.weight_norm: + weight = F.normalize(self.weight) + else: + weight = self.weight + cosine_all = F.linear(input, weight, self.bias) + + if self.k == 1: + return cosine_all + else: + cosine_all = cosine_all.view(-1, self.out_features, self.k) + cosine, _ = torch.max(cosine_all, dim=2) + return cosine + + +@MODELS.register_module() +class ArcFaceClsHead(ClsHead): + """ArcFace classifier head. + + A PyTorch implementation of paper `ArcFace: Additive Angular Margin Loss + for Deep Face Recognition `_ and + `Sub-center ArcFace: Boosting Face Recognition by Large-Scale Noisy Web + Faces `_ + + Example: + To use ArcFace in config files. + + 1. use vanilla ArcFace + + .. code:: python + + mode = dict( + backbone = xxx, + neck = xxxx, + head=dict( + type='ArcFaceClsHead', + num_classes=5000, + in_channels=1024, + loss = dict(type='CrossEntropyLoss', loss_weight=1.0), + init_cfg=None), + ) + + 2. use SubCenterArcFace with 3 sub-centers + + .. code:: python + + mode = dict( + backbone = xxx, + neck = xxxx, + head=dict( + type='ArcFaceClsHead', + num_classes=5000, + in_channels=1024, + num_subcenters=3, + loss = dict(type='CrossEntropyLoss', loss_weight=1.0), + init_cfg=None), + ) + + 3. use SubCenterArcFace With CountPowerAdaptiveMargins + + .. code:: python + + mode = dict( + backbone = xxx, + neck = xxxx, + head=dict( + type='ArcFaceClsHead', + num_classes=5000, + in_channels=1024, + num_subcenters=3, + loss = dict(type='CrossEntropyLoss', loss_weight=1.0), + init_cfg=None), + ) + + custom_hooks = [dict(type='SetAdaptiveMarginsHook')] + + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + num_subcenters (int): Number of subcenters. Defaults to 1. + scale (float): Scale factor of output logit. Defaults to 64.0. + margins (float): The penalty margin. Could be the fllowing formats: + + - float: The margin, would be same for all the categories. + - Sequence[float]: The category-based margins list. + - str: A '.txt' file path which contains a list. Each line + represents the margin of a category, and the number in the + i-th row indicates the margin of the i-th class. + + Defaults to 0.5. + easy_margin (bool): Avoid theta + m >= PI. Defaults to False. + loss (dict): Config of classification loss. Defaults to + ``dict(type='CrossEntropyLoss', loss_weight=1.0)``. + init_cfg (dict, optional): the config to control the initialization. + Defaults to None. + """ + + def __init__(self, + num_classes: int, + in_channels: int, + num_subcenters: int = 1, + scale: float = 64., + margins: Optional[Union[float, Sequence[float], str]] = 0.50, + easy_margin: bool = False, + loss: dict = dict(type='CrossEntropyLoss', loss_weight=1.0), + init_cfg: Optional[dict] = None): + + super(ArcFaceClsHead, self).__init__(init_cfg=init_cfg) + if not isinstance(loss, nn.Module): + loss = MODELS.build(loss) + self.loss_module = loss + + assert num_subcenters >= 1 and num_classes >= 0 + self.in_channels = in_channels + self.num_classes = num_classes + self.num_subcenters = num_subcenters + self.scale = scale + self.easy_margin = easy_margin + + self.norm_product = NormProduct(in_channels, num_classes, + num_subcenters) + + if isinstance(margins, float): + margins = [margins] * num_classes + elif isinstance(margins, str) and margins.endswith('.txt'): + margins = [float(item) for item in list_from_file(margins)] + else: + assert is_seq_of(list(margins), (float, int)), ( + 'the attribute `margins` in ``ArcFaceClsHead`` should be a ' + ' float, a Sequence of float, or a ".txt" file path.') + + assert len(margins) == num_classes, \ + 'The length of margins must be equal with num_classes.' + + self.register_buffer( + 'margins', torch.tensor(margins).float(), persistent=False) + # To make `phi` monotonic decreasing, refers to + # https://github.com/deepinsight/insightface/issues/108 + sinm_m = torch.sin(math.pi - self.margins) * self.margins + threshold = torch.cos(math.pi - self.margins) + self.register_buffer('sinm_m', sinm_m, persistent=False) + self.register_buffer('threshold', threshold, persistent=False) + + def set_margins(self, margins: Union[Sequence[float], float]) -> None: + """set margins of arcface head. + + Args: + margins (Union[Sequence[float], float]): The marigins. + """ + if isinstance(margins, float): + margins = [margins] * self.num_classes + assert is_seq_of( + list(margins), float) and (len(margins) == self.num_classes), ( + f'margins must be Sequence[Union(float, int)], get {margins}') + + self.margins = torch.tensor( + margins, device=self.margins.device, dtype=torch.float32) + self.sinm_m = torch.sin(self.margins) * self.margins + self.threshold = -torch.cos(self.margins) + + def pre_logits(self, feats: Tuple[torch.Tensor]) -> torch.Tensor: + """The process before the final classification head. + + The input ``feats`` is a tuple of tensor, and each tensor is the + feature of a backbone stage. In ``ArcFaceHead``, we just obtain the + feature of the last stage. + """ + # The ArcFaceHead doesn't have other module, just return after + # unpacking. + return feats[-1] + + def _get_logit_with_margin(self, pre_logits, target): + """add arc margin to the cosine in target index. + + The target must be in index format. + """ + assert target.dim() == 1 or ( + target.dim() == 2 and target.shape[1] == 1), \ + 'The target must be in index format.' + cosine = self.norm_product(pre_logits) + phi = torch.cos(torch.acos(cosine) + self.margins) + + if self.easy_margin: + # when cosine>0, choose phi + # when cosine<=0, choose cosine + phi = torch.where(cosine > 0, phi, cosine) + else: + # when cos>th, choose phi + # when cos<=th, choose cosine-mm + phi = torch.where(cosine > self.threshold, phi, + cosine - self.sinm_m) + + target = convert_to_one_hot(target, self.num_classes) + output = target * phi + (1 - target) * cosine + return output + + def forward(self, + feats: Tuple[torch.Tensor], + target: Optional[torch.Tensor] = None) -> torch.Tensor: + """The forward process.""" + # Disable AMP + with autocast(enabled=False): + pre_logits = self.pre_logits(feats) + + if target is None: + # when eval, logit is the cosine between W and pre_logits; + # cos(theta_yj) = (x/||x||) * (W/||W||) + logit = self.norm_product(pre_logits) + else: + # when training, add a margin to the pre_logits where target is + # True, then logit is the cosine between W and new pre_logits + logit = self._get_logit_with_margin(pre_logits, target) + + return self.scale * logit + + def loss(self, feats: Tuple[torch.Tensor], data_samples: List[DataSample], + **kwargs) -> dict: + """Calculate losses from the classification score. + + Args: + feats (tuple[Tensor]): The features extracted from the backbone. + Multiple stage inputs are acceptable but only the last stage + will be used to classify. The shape of every item should be + ``(num_samples, num_classes)``. + data_samples (List[DataSample]): The annotation data of + every samples. + **kwargs: Other keyword arguments to forward the loss module. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + # Unpack data samples and pack targets + label_target = torch.cat([i.gt_label for i in data_samples]) + if 'gt_score' in data_samples[0]: + # Batch augmentation may convert labels to one-hot format scores. + target = torch.stack([i.gt_score for i in data_samples]) + else: + target = label_target + + # the index format target would be used + cls_score = self(feats, label_target) + + # compute loss + losses = dict() + loss = self.loss_module( + cls_score, target, avg_factor=cls_score.size(0), **kwargs) + losses['loss'] = loss + + return losses diff --git a/mmpretrain/models/heads/mim_head.py b/mmpretrain/models/heads/mim_head.py new file mode 100644 index 0000000..bda90c8 --- /dev/null +++ b/mmpretrain/models/heads/mim_head.py @@ -0,0 +1,37 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +import torch +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class MIMHead(BaseModule): + """Pre-training head for Masked Image Modeling. + + Args: + loss (dict): Config dict for module of loss functions. + """ + + def __init__(self, loss: dict) -> None: + super().__init__() + self.loss_module = MODELS.build(loss) + + def loss(self, + pred: torch.Tensor, + target: torch.Tensor, + mask: Optional[torch.Tensor] = None) -> torch.Tensor: + """Forward head. + + Args: + pred (torch.Tensor): Predictions with shape B x L x C. + target (torch.Tensor): Targets with shape B x L x C. + mask (torch.Tensor): Mask with shape B x L. + + Returns: + torch.Tensor: The loss tensor. + """ + loss = self.loss_module(pred, target, mask) + return loss diff --git a/mmpretrain/models/heads/mixmim_head.py b/mmpretrain/models/heads/mixmim_head.py new file mode 100644 index 0000000..a709630 --- /dev/null +++ b/mmpretrain/models/heads/mixmim_head.py @@ -0,0 +1,49 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmpretrain.registry import MODELS +from .mae_head import MAEPretrainHead + + +@MODELS.register_module() +class MixMIMPretrainHead(MAEPretrainHead): + """Head for MixMIM Pre-training. + + Args: + loss (dict): Config of loss. + norm_pix_loss (bool): Whether or not normalize target. + Defaults to False. + patch_size (int): Patch size. Defaults to 16. + """ + + def __init__(self, + loss: dict, + norm_pix: bool = False, + patch_size: int = 16) -> None: + super().__init__(loss=loss, norm_pix=norm_pix, patch_size=patch_size) + + def loss(self, x_rec: torch.Tensor, target: torch.Tensor, + mask: torch.Tensor) -> torch.Tensor: + """Generate loss. + + Args: + pred (torch.Tensor): The reconstructed image. + target (torch.Tensor): The target image. + mask (torch.Tensor): The mask of the target image. + + Returns: + torch.Tensor: The reconstruction loss. + """ + target = self.construct_target(target) + + B, L, C = x_rec.shape + + # unmix tokens + x1_rec = x_rec[:B // 2] + x2_rec = x_rec[B // 2:] + + unmix_x_rec = x1_rec * mask + x2_rec.flip(0) * (1 - mask) + + loss_rec = self.loss_module(unmix_x_rec, target) + + return loss_rec diff --git a/mmpretrain/models/heads/mocov3_head.py b/mmpretrain/models/heads/mocov3_head.py new file mode 100644 index 0000000..c2bec2a --- /dev/null +++ b/mmpretrain/models/heads/mocov3_head.py @@ -0,0 +1,66 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmengine.dist import all_gather, get_rank +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class MoCoV3Head(BaseModule): + """Head for MoCo v3 Pre-training. + + This head builds a predictor, which can be any registered neck component. + It also implements latent contrastive loss between two forward features. + Part of the code is modified from: + ``_. + + Args: + predictor (dict): Config dict for module of predictor. + loss (dict): Config dict for module of loss functions. + temperature (float): The temperature hyper-parameter that + controls the concentration level of the distribution. + Defaults to 1.0. + """ + + def __init__(self, + predictor: dict, + loss: dict, + temperature: float = 1.0) -> None: + super().__init__() + self.predictor = MODELS.build(predictor) + self.loss_module = MODELS.build(loss) + self.temperature = temperature + + def loss(self, base_out: torch.Tensor, + momentum_out: torch.Tensor) -> torch.Tensor: + """Generate loss. + + Args: + base_out (torch.Tensor): NxC features from base_encoder. + momentum_out (torch.Tensor): NxC features from momentum_encoder. + + Returns: + torch.Tensor: The loss tensor. + """ + # predictor computation + pred = self.predictor([base_out])[0] + + # normalize + pred = nn.functional.normalize(pred, dim=1) + target = nn.functional.normalize(momentum_out, dim=1) + + # get negative samples + target = torch.cat(all_gather(target), dim=0) + + # Einstein sum is more intuitive + logits = torch.einsum('nc,mc->nm', [pred, target]) / self.temperature + + # generate labels + batch_size = logits.shape[0] + labels = (torch.arange(batch_size, dtype=torch.long) + + batch_size * get_rank()).to(logits.device) + + loss = self.loss_module(logits, labels) + return loss diff --git a/mmpretrain/models/heads/multi_label_cls_head.py b/mmpretrain/models/heads/multi_label_cls_head.py new file mode 100644 index 0000000..ca36bfe --- /dev/null +++ b/mmpretrain/models/heads/multi_label_cls_head.py @@ -0,0 +1,155 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional, Tuple + +import torch +import torch.nn as nn +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample, label_to_onehot + + +@MODELS.register_module() +class MultiLabelClsHead(BaseModule): + """Classification head for multilabel task. + + Args: + loss (dict): Config of classification loss. Defaults to + dict(type='CrossEntropyLoss', use_sigmoid=True). + thr (float, optional): Predictions with scores under the thresholds + are considered as negative. Defaults to None. + topk (int, optional): Predictions with the k-th highest scores are + considered as positive. Defaults to None. + init_cfg (dict, optional): The extra init config of layers. + Defaults to None. + + Notes: + If both ``thr`` and ``topk`` are set, use ``thr` to determine + positive predictions. If neither is set, use ``thr=0.5`` as + default. + """ + + def __init__(self, + loss: Dict = dict(type='CrossEntropyLoss', use_sigmoid=True), + thr: Optional[float] = None, + topk: Optional[int] = None, + init_cfg: Optional[dict] = None): + super(MultiLabelClsHead, self).__init__(init_cfg=init_cfg) + + if not isinstance(loss, nn.Module): + loss = MODELS.build(loss) + self.loss_module = loss + + if thr is None and topk is None: + thr = 0.5 + + self.thr = thr + self.topk = topk + + def pre_logits(self, feats: Tuple[torch.Tensor]) -> torch.Tensor: + """The process before the final classification head. + + The input ``feats`` is a tuple of tensor, and each tensor is the + feature of a backbone stage. In ``MultiLabelClsHead``, we just obtain + the feature of the last stage. + """ + # The MultiLabelClsHead doesn't have other module, just return after + # unpacking. + return feats[-1] + + def forward(self, feats: Tuple[torch.Tensor]) -> torch.Tensor: + """The forward process.""" + pre_logits = self.pre_logits(feats) + # The MultiLabelClsHead doesn't have the final classification head, + # just return the unpacked inputs. + return pre_logits + + def loss(self, feats: Tuple[torch.Tensor], data_samples: List[DataSample], + **kwargs) -> dict: + """Calculate losses from the classification score. + + Args: + feats (tuple[Tensor]): The features extracted from the backbone. + Multiple stage inputs are acceptable but only the last stage + will be used to classify. The shape of every item should be + ``(num_samples, num_classes)``. + data_samples (List[DataSample]): The annotation data of + every samples. + **kwargs: Other keyword arguments to forward the loss module. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + # The part can be traced by torch.fx + cls_score = self(feats) + + # The part can not be traced by torch.fx + losses = self._get_loss(cls_score, data_samples, **kwargs) + return losses + + def _get_loss(self, cls_score: torch.Tensor, + data_samples: List[DataSample], **kwargs): + """Unpack data samples and compute loss.""" + num_classes = cls_score.size()[-1] + # Unpack data samples and pack targets + if 'gt_score' in data_samples[0]: + target = torch.stack([i.gt_score.float() for i in data_samples]) + else: + target = torch.stack([ + label_to_onehot(i.gt_label, num_classes) for i in data_samples + ]).float() + + # compute loss + losses = dict() + loss = self.loss_module( + cls_score, target, avg_factor=cls_score.size(0), **kwargs) + losses['loss'] = loss + + return losses + + def predict(self, + feats: Tuple[torch.Tensor], + data_samples: List[DataSample] = None) -> List[DataSample]: + """Inference without augmentation. + + Args: + feats (tuple[Tensor]): The features extracted from the backbone. + Multiple stage inputs are acceptable but only the last stage + will be used to classify. The shape of every item should be + ``(num_samples, num_classes)``. + data_samples (List[DataSample], optional): The annotation + data of every samples. If not None, set ``pred_label`` of + the input data samples. Defaults to None. + + Returns: + List[DataSample]: A list of data samples which contains the + predicted results. + """ + # The part can be traced by torch.fx + cls_score = self(feats) + + # The part can not be traced by torch.fx + predictions = self._get_predictions(cls_score, data_samples) + return predictions + + def _get_predictions(self, cls_score: torch.Tensor, + data_samples: List[DataSample]): + """Post-process the output of head. + + Including softmax and set ``pred_label`` of data samples. + """ + pred_scores = torch.sigmoid(cls_score) + + if data_samples is None: + data_samples = [DataSample() for _ in range(cls_score.size(0))] + + for data_sample, score in zip(data_samples, pred_scores): + if self.thr is not None: + # a label is predicted positive if larger than thr + label = torch.where(score >= self.thr)[0] + else: + # top-k labels will be predicted positive for any example + _, label = score.topk(self.topk) + data_sample.set_pred_score(score).set_pred_label(label) + + return data_samples diff --git a/mmpretrain/models/heads/multi_label_csra_head.py b/mmpretrain/models/heads/multi_label_csra_head.py new file mode 100644 index 0000000..95a3a0e --- /dev/null +++ b/mmpretrain/models/heads/multi_label_csra_head.py @@ -0,0 +1,112 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Modified from https://github.com/Kevinz-code/CSRA +from typing import Tuple + +import torch +import torch.nn as nn +from mmengine.model import BaseModule, ModuleList + +from mmpretrain.registry import MODELS +from .multi_label_cls_head import MultiLabelClsHead + + +@MODELS.register_module() +class CSRAClsHead(MultiLabelClsHead): + """Class-specific residual attention classifier head. + + Please refer to the `Residual Attention: A Simple but Effective Method for + Multi-Label Recognition (ICCV 2021) `_ + for details. + + Args: + num_classes (int): Number of categories. + in_channels (int): Number of channels in the input feature map. + num_heads (int): Number of residual at tensor heads. + loss (dict): Config of classification loss. + lam (float): Lambda that combines global average and max pooling + scores. + init_cfg (dict, optional): The extra init config of layers. + Defaults to use ``dict(type='Normal', layer='Linear', std=0.01)``. + """ + temperature_settings = { # softmax temperature settings + 1: [1], + 2: [1, 99], + 4: [1, 2, 4, 99], + 6: [1, 2, 3, 4, 5, 99], + 8: [1, 2, 3, 4, 5, 6, 7, 99] + } + + def __init__(self, + num_classes: int, + in_channels: int, + num_heads: int, + lam: float, + init_cfg=dict(type='Normal', layer='Linear', std=0.01), + **kwargs): + assert num_heads in self.temperature_settings.keys( + ), 'The num of heads is not in temperature setting.' + assert lam > 0, 'Lambda should be between 0 and 1.' + super(CSRAClsHead, self).__init__(init_cfg=init_cfg, **kwargs) + self.temp_list = self.temperature_settings[num_heads] + self.csra_heads = ModuleList([ + CSRAModule(num_classes, in_channels, self.temp_list[i], lam) + for i in range(num_heads) + ]) + + def pre_logits(self, feats: Tuple[torch.Tensor]) -> torch.Tensor: + """The process before the final classification head. + + The input ``feats`` is a tuple of tensor, and each tensor is the + feature of a backbone stage. In ``CSRAClsHead``, we just obtain the + feature of the last stage. + """ + # The CSRAClsHead doesn't have other module, just return after + # unpacking. + return feats[-1] + + def forward(self, feats: Tuple[torch.Tensor]) -> torch.Tensor: + """The forward process.""" + pre_logits = self.pre_logits(feats) + logit = sum([head(pre_logits) for head in self.csra_heads]) + return logit + + +class CSRAModule(BaseModule): + """Basic module of CSRA with different temperature. + + Args: + num_classes (int): Number of categories. + in_channels (int): Number of channels in the input feature map. + T (int): Temperature setting. + lam (float): Lambda that combines global average and max pooling + scores. + init_cfg (dict | optional): The extra init config of layers. + Defaults to use dict(type='Normal', layer='Linear', std=0.01). + """ + + def __init__(self, + num_classes: int, + in_channels: int, + T: int, + lam: float, + init_cfg=None): + + super(CSRAModule, self).__init__(init_cfg=init_cfg) + self.T = T # temperature + self.lam = lam # Lambda + self.head = nn.Conv2d(in_channels, num_classes, 1, bias=False) + self.softmax = nn.Softmax(dim=2) + + def forward(self, x): + score = self.head(x) / torch.norm( + self.head.weight, dim=1, keepdim=True).transpose(0, 1) + score = score.flatten(2) + base_logit = torch.mean(score, dim=2) + + if self.T == 99: # max-pooling + att_logit = torch.max(score, dim=2)[0] + else: + score_soft = self.softmax(score * self.T) + att_logit = torch.sum(score * score_soft, dim=2) + + return base_logit + self.lam * att_logit diff --git a/mmpretrain/models/heads/multi_label_linear_head.py b/mmpretrain/models/heads/multi_label_linear_head.py new file mode 100644 index 0000000..81217ec --- /dev/null +++ b/mmpretrain/models/heads/multi_label_linear_head.py @@ -0,0 +1,66 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, Optional, Tuple + +import torch +import torch.nn as nn + +from mmpretrain.registry import MODELS +from .multi_label_cls_head import MultiLabelClsHead + + +@MODELS.register_module() +class MultiLabelLinearClsHead(MultiLabelClsHead): + """Linear classification head for multilabel task. + + Args: + loss (dict): Config of classification loss. Defaults to + dict(type='CrossEntropyLoss', use_sigmoid=True). + thr (float, optional): Predictions with scores under the thresholds + are considered as negative. Defaults to None. + topk (int, optional): Predictions with the k-th highest scores are + considered as positive. Defaults to None. + init_cfg (dict, optional): The extra init config of layers. + Defaults to use dict(type='Normal', layer='Linear', std=0.01). + + Notes: + If both ``thr`` and ``topk`` are set, use ``thr` to determine + positive predictions. If neither is set, use ``thr=0.5`` as + default. + """ + + def __init__(self, + num_classes: int, + in_channels: int, + loss: Dict = dict(type='CrossEntropyLoss', use_sigmoid=True), + thr: Optional[float] = None, + topk: Optional[int] = None, + init_cfg: Optional[dict] = dict( + type='Normal', layer='Linear', std=0.01)): + super(MultiLabelLinearClsHead, self).__init__( + loss=loss, thr=thr, topk=topk, init_cfg=init_cfg) + + assert num_classes > 0, f'num_classes ({num_classes}) must be a ' \ + 'positive integer.' + + self.in_channels = in_channels + self.num_classes = num_classes + + self.fc = nn.Linear(self.in_channels, self.num_classes) + + def pre_logits(self, feats: Tuple[torch.Tensor]) -> torch.Tensor: + """The process before the final classification head. + + The input ``feats`` is a tuple of tensor, and each tensor is the + feature of a backbone stage. In ``MultiLabelLinearClsHead``, we just + obtain the feature of the last stage. + """ + # The obtain the MultiLabelLinearClsHead doesn't have other module, + # just return after unpacking. + return feats[-1] + + def forward(self, feats: Tuple[torch.Tensor]) -> torch.Tensor: + """The forward process.""" + pre_logits = self.pre_logits(feats) + # The final classification head. + cls_score = self.fc(pre_logits) + return cls_score diff --git a/mmpretrain/models/heads/multi_task_head.py b/mmpretrain/models/heads/multi_task_head.py new file mode 100644 index 0000000..3515a2b --- /dev/null +++ b/mmpretrain/models/heads/multi_task_head.py @@ -0,0 +1,153 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Sequence, Tuple + +import torch +import torch.nn as nn +from mmengine.model import BaseModule, ModuleDict + +from mmpretrain.registry import MODELS +from mmpretrain.structures import MultiTaskDataSample + + +def loss_convertor(loss_func, task_name): + + def wrapped(inputs, data_samples, **kwargs): + mask = torch.empty(len(data_samples), dtype=torch.bool) + task_data_samples = [] + for i, data_sample in enumerate(data_samples): + assert isinstance(data_sample, MultiTaskDataSample) + sample_mask = task_name in data_sample + mask[i] = sample_mask + if sample_mask: + task_data_samples.append(data_sample.get(task_name)) + + if len(task_data_samples) == 0: + # This makes it possible to perform loss.backward when a + # task does not have gt_labels within a batch. + loss = (inputs[0] * 0).sum() + return {'loss': loss, 'mask_size': torch.tensor(0.)} + + # Mask the inputs of the task + def mask_inputs(inputs, mask): + if isinstance(inputs, Sequence): + return type(inputs)( + [mask_inputs(input, mask) for input in inputs]) + elif isinstance(inputs, torch.Tensor): + return inputs[mask] + + masked_inputs = mask_inputs(inputs, mask) + loss_output = loss_func(masked_inputs, task_data_samples, **kwargs) + loss_output['mask_size'] = mask.sum().to(torch.float) + return loss_output + + return wrapped + + +@MODELS.register_module() +class MultiTaskHead(BaseModule): + """Multi task head. + + Args: + task_heads (dict): Sub heads to use, the key will be use to rename the + loss components. + common_cfg (dict): The common settings for all heads. Defaults to an + empty dict. + init_cfg (dict, optional): The extra initialization settings. + Defaults to None. + """ + + def __init__(self, task_heads, init_cfg=None, **kwargs): + super(MultiTaskHead, self).__init__(init_cfg=init_cfg) + + assert isinstance(task_heads, dict), 'The `task_heads` argument' \ + "should be a dict, which's keys are task names and values are" \ + 'configs of head for the task.' + + self.task_heads = ModuleDict() + + for task_name, sub_head in task_heads.items(): + if not isinstance(sub_head, nn.Module): + sub_head = MODELS.build(sub_head, default_args=kwargs) + sub_head.loss = loss_convertor(sub_head.loss, task_name) + self.task_heads[task_name] = sub_head + + def forward(self, feats): + """The forward process.""" + return { + task_name: head(feats) + for task_name, head in self.task_heads.items() + } + + def loss(self, feats: Tuple[torch.Tensor], + data_samples: List[MultiTaskDataSample], **kwargs) -> dict: + """Calculate losses from the classification score. + + Args: + feats (tuple[Tensor]): The features extracted from the backbone. + data_samples (List[MultiTaskDataSample]): The annotation data of + every samples. + **kwargs: Other keyword arguments to forward the loss module. + + Returns: + dict[str, Tensor]: a dictionary of loss components, each task loss + key will be prefixed by the task_name like "task1_loss" + """ + losses = dict() + for task_name, head in self.task_heads.items(): + head_loss = head.loss(feats, data_samples, **kwargs) + for k, v in head_loss.items(): + losses[f'{task_name}_{k}'] = v + return losses + + def predict( + self, + feats: Tuple[torch.Tensor], + data_samples: List[MultiTaskDataSample] = None + ) -> List[MultiTaskDataSample]: + """Inference without augmentation. + + Args: + feats (tuple[Tensor]): The features extracted from the backbone. + data_samples (List[MultiTaskDataSample], optional): The annotation + data of every samples. If not None, set ``pred_label`` of + the input data samples. Defaults to None. + + Returns: + List[MultiTaskDataSample]: A list of data samples which contains + the predicted results. + """ + predictions_dict = dict() + + for task_name, head in self.task_heads.items(): + task_samples = None + if data_samples is not None: + task_samples = [ + data_sample.get(task_name, None) if data_sample else None + for data_sample in data_samples + ] + + task_samples = head.predict(feats, task_samples) + batch_size = len(task_samples) + predictions_dict[task_name] = task_samples + + if data_samples is None: + data_samples = [MultiTaskDataSample() for _ in range(batch_size)] + else: + data_samples = [ + MultiTaskDataSample() if data_sample is None else data_sample + for data_sample in data_samples + ] + + for task_name, task_samples in predictions_dict.items(): + for data_sample, task_sample in zip(data_samples, task_samples): + task_sample.set_field( + task_name in data_sample.tasks, + 'eval_mask', + field_type='metainfo') + + if task_name in data_sample.tasks: + data_sample.get(task_name).update(task_sample) + else: + data_sample.set_field(task_sample, task_name) + + return data_samples diff --git a/mmpretrain/models/heads/seq_gen_head.py b/mmpretrain/models/heads/seq_gen_head.py new file mode 100644 index 0000000..b2e9b10 --- /dev/null +++ b/mmpretrain/models/heads/seq_gen_head.py @@ -0,0 +1,188 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +import torch +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class SeqGenerationHead(BaseModule): + """Generation head for multi-modal pre-trained task, adopted by BLIP. + Normally used for generation task. + + Args: + decoder (dict): Decoder for blip generation head. + init_cfg (dict, optional): the config to control the initialization. + Defaults to None. + """ + + def __init__( + self, + decoder: dict, + ignore_index=-100, + loss: dict = dict(type='LabelSmoothLoss', label_smooth_val=0.1), + init_cfg: Optional[dict] = None, + ) -> None: + super(SeqGenerationHead, self).__init__(init_cfg=init_cfg) + self.decoder = MODELS.build(decoder) + self.loss_fn = MODELS.build(loss) + self.ignore_index = ignore_index + + def forward(self, input_ids: torch.Tensor, + encoder_hidden_states: torch.Tensor, + encoder_attention_mask: torch.Tensor, labels: torch.Tensor): + """Forward to get decoder output. + + Args: + input_ids (torch.Tensor): The tokenized input text tensor. + encoder_hidden_states (torch.Tensor): Hidden states from image + embeddings. + encoder_attention_mask (torch.Tensor): Image embeddings hidden + states attention mask. + labels (torch.Tensor): Decoder target for calculate loss. + + Returns: + dict[str, Tensor]: a dictionary of decoder outputs. + """ + + decoder_out = self.decoder( + input_ids=input_ids, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + labels=labels, + return_dict=True, + ) + return decoder_out + + def loss(self, input_ids, encoder_hidden_states, encoder_attention_mask, + labels): + """Calculate losses from the extracted features. + + Args: + input_ids (torch.Tensor): The tokenized input text tensor. + encoder_hidden_states (torch.Tensor): Hidden states from image + embeddings. + encoder_attention_mask (torch.Tensor): Image embeddings hidden + states attention mask. + labels (torch.Tensor): Decoder target for calculate loss. + + Returns: + dict[str, Tensor]: a dictionary of loss components. + """ + + decoder_out = self( + input_ids=input_ids, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + labels=labels, + ) + prediction_scores = decoder_out['logits'] + # we are doing next-token prediction; + # shift prediction scores and input ids by one + shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() + labels = labels[:, 1:].contiguous() + + vocab_size = prediction_scores.shape[-1] + + # mask ignored index + if (labels == self.ignore_index).any(): + labels = labels.view(-1).clone() + ignore_mask = (labels == self.ignore_index) + labels.masked_fill_(ignore_mask, 0) + weight = torch.logical_not(ignore_mask) + avg_factor = max(weight.sum(), 1) + else: + weight = None + avg_factor = labels.size(0) + + lm_loss = self.loss_fn( + shifted_prediction_scores.view(-1, vocab_size), + labels, + weight=weight, + avg_factor=avg_factor, + ) + losses = { + 'seq_gen_lm_loss': lm_loss, + } + + return losses + + def predict(self, + input_ids, + encoder_hidden_states, + sep_token_id, + pad_token_id, + use_nucleus_sampling=False, + num_beams=3, + max_length=20, + min_length=2, + top_p=0.9, + repetition_penalty=1.0, + **kwargs): + """Decoder prediction method. + + Args: + input_ids (torch.Tensor): The tokenized input text tensor. + encoder_hidden_states (torch.Tensor): Hidden states from image + embeddings. + sep_token_id (int): Tokenid of separation token. + pad_token_id (int): Tokenid of pad token. + use_nucleus_sampling (bool): Whether to use nucleus sampling in + prediction. Defaults to False. + num_beams (int): Number of beams used in predition. + Defaults to 3. + max_length (int): Max length of generated text in predition. + Defaults to 20. + min_length (int): Min length of generated text in predition. + Defaults to 20. + top_p (float): + If < 1.0, only keep the top tokens with cumulative probability + >= top_p (nucleus filtering). Defaults to 0.9. + repetition_penalty (float): The parameter for repetition penalty. + Defaults to 1.0. + **kwarg: Other arguments that might used in generation. + + Returns: + dict[str, Tensor]: a dictionary of generation outputs. + """ + device = encoder_hidden_states.device + + # TODO: In old version of transformers + # Additional repeat interleave of hidden states should be add here. + image_atts = torch.ones( + encoder_hidden_states.size()[:-1], dtype=torch.long).to(device) + + model_kwargs = { + 'encoder_hidden_states': encoder_hidden_states, + 'encoder_attention_mask': image_atts, + } + model_kwargs.update(kwargs) + + if use_nucleus_sampling: + # nucleus sampling + outputs = self.decoder.generate( + input_ids=input_ids, + max_length=max_length, + min_length=min_length, + do_sample=True, + top_p=top_p, + num_return_sequences=1, + eos_token_id=sep_token_id, + pad_token_id=pad_token_id, + repetition_penalty=1.1, + **model_kwargs) + else: + # beam search + outputs = self.decoder.generate( + input_ids=input_ids, + max_length=max_length, + min_length=min_length, + num_beams=num_beams, + eos_token_id=sep_token_id, + pad_token_id=pad_token_id, + repetition_penalty=repetition_penalty, + **model_kwargs) + + return outputs diff --git a/mmpretrain/models/heads/simmim_head.py b/mmpretrain/models/heads/simmim_head.py new file mode 100644 index 0000000..b7af984 --- /dev/null +++ b/mmpretrain/models/heads/simmim_head.py @@ -0,0 +1,40 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class SimMIMHead(BaseModule): + """Head for SimMIM Pre-training. + + Args: + patch_size (int): Patch size of each token. + loss (dict): The config for loss. + """ + + def __init__(self, patch_size: int, loss: dict) -> None: + super().__init__() + self.patch_size = patch_size + self.loss_module = MODELS.build(loss) + + def loss(self, pred: torch.Tensor, target: torch.Tensor, + mask: torch.Tensor) -> torch.Tensor: + """Generate loss. + + This method will expand mask to the size of the original image. + + Args: + pred (torch.Tensor): The reconstructed image (B, C, H, W). + target (torch.Tensor): The target image (B, C, H, W). + mask (torch.Tensor): The mask of the target image. + + Returns: + torch.Tensor: The reconstruction loss. + """ + mask = mask.repeat_interleave(self.patch_size, 1).repeat_interleave( + self.patch_size, 2).unsqueeze(1).contiguous() + loss = self.loss_module(pred, target, mask) + + return loss diff --git a/mmpretrain/models/heads/spark_head.py b/mmpretrain/models/heads/spark_head.py new file mode 100644 index 0000000..a274876 --- /dev/null +++ b/mmpretrain/models/heads/spark_head.py @@ -0,0 +1,92 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class SparKPretrainHead(BaseModule): + """Pre-training head for SparK. + + Args: + loss (dict): Config of loss. + norm_pix (bool): Whether or not normalize target. Defaults to True. + patch_size (int): Patch size, equal to downsample ratio of backbone. + Defaults to 32. + """ + + def __init__(self, + loss: dict, + norm_pix: bool = True, + patch_size: int = 32) -> None: + super().__init__() + self.norm_pix = norm_pix + self.patch_size = patch_size + self.loss = MODELS.build(loss) + + def patchify(self, imgs): + """Split images into non-overlapped patches. + + Args: + imgs (torch.Tensor): A batch of images, of shape B x C x H x W. + Returns: + torch.Tensor: Patchified images. The shape is B x L x D. + """ + p = self.patch_size + assert len(imgs.shape + ) == 4 and imgs.shape[2] % p == 0 and imgs.shape[3] % p == 0 + + B, C, ori_h, ori_w = imgs.shape + h = ori_h // p + w = ori_w // p + x = imgs.reshape(shape=(B, C, h, p, w, p)) + x = torch.einsum('bchpwq->bhwpqc', x) + + # (B, f*f, downsample_raito*downsample_raito*3) + x = x.reshape(shape=(B, h * w, p**2 * C)) + return x + + def construct_target(self, target: torch.Tensor) -> torch.Tensor: + """Construct the reconstruction target. + + In addition to splitting images into tokens, this module will also + normalize the image according to ``norm_pix``. + Args: + target (torch.Tensor): Image with the shape of B x 3 x H x W + Returns: + torch.Tensor: Tokenized images with the shape of B x L x C + """ + target = self.patchify(target) + if self.norm_pix: + # normalize the target image + mean = target.mean(dim=-1, keepdim=True) + var = target.var(dim=-1, keepdim=True) + target = (target - mean) / (var + 1.e-6)**.5 + + return target + + def forward(self, pred: torch.Tensor, target: torch.Tensor, + active_mask: torch.Tensor) -> torch.Tensor: + """Forward function of MAE head. + + Args: + pred (torch.Tensor): The reconstructed image. + target (torch.Tensor): The target image. + active_mask (torch.Tensor): The mask of the target image. + Returns: + torch.Tensor: The reconstruction loss. + """ + # (B, C, H, W) -> (B, L, C) and perform normalization + target = self.construct_target(target) + + # (B, C, H, W) -> (B, L, C) + pred = self.patchify(pred) + + # (B, 1, f, f) -> (B, L) + non_active_mask = active_mask.logical_not().int().view( + active_mask.shape[0], -1) + + # MSE loss on masked patches + loss = self.loss(pred, target, non_active_mask) + return loss diff --git a/mmpretrain/models/heads/stacked_head.py b/mmpretrain/models/heads/stacked_head.py new file mode 100644 index 0000000..6cd819d --- /dev/null +++ b/mmpretrain/models/heads/stacked_head.py @@ -0,0 +1,135 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, Optional, Sequence, Tuple + +import torch +import torch.nn as nn +from mmcv.cnn import build_activation_layer, build_norm_layer +from mmengine.model import BaseModule, ModuleList + +from mmpretrain.registry import MODELS +from .cls_head import ClsHead + + +class LinearBlock(BaseModule): + """Linear block for StackedLinearClsHead.""" + + def __init__(self, + in_channels, + out_channels, + dropout_rate=0., + norm_cfg=None, + act_cfg=None, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.fc = nn.Linear(in_channels, out_channels) + + self.norm = None + self.act = None + self.dropout = None + + if norm_cfg is not None: + self.norm = build_norm_layer(norm_cfg, out_channels)[1] + if act_cfg is not None: + self.act = build_activation_layer(act_cfg) + if dropout_rate > 0: + self.dropout = nn.Dropout(p=dropout_rate) + + def forward(self, x): + """The forward process.""" + x = self.fc(x) + if self.norm is not None: + x = self.norm(x) + if self.act is not None: + x = self.act(x) + if self.dropout is not None: + x = self.dropout(x) + return x + + +@MODELS.register_module() +class StackedLinearClsHead(ClsHead): + """Classifier head with several hidden fc layer and a output fc layer. + + Args: + num_classes (int): Number of categories. + in_channels (int): Number of channels in the input feature map. + mid_channels (Sequence[int]): Number of channels in the hidden fc + layers. + dropout_rate (float): Dropout rate after each hidden fc layer, + except the last layer. Defaults to 0. + norm_cfg (dict, optional): Config dict of normalization layer after + each hidden fc layer, except the last layer. Defaults to None. + act_cfg (dict, optional): Config dict of activation function after each + hidden layer, except the last layer. Defaults to use "ReLU". + """ + + def __init__(self, + num_classes: int, + in_channels: int, + mid_channels: Sequence[int], + dropout_rate: float = 0., + norm_cfg: Optional[Dict] = None, + act_cfg: Optional[Dict] = dict(type='ReLU'), + **kwargs): + super(StackedLinearClsHead, self).__init__(**kwargs) + self.num_classes = num_classes + self.in_channels = in_channels + if self.num_classes <= 0: + raise ValueError( + f'num_classes={num_classes} must be a positive integer') + + assert isinstance(mid_channels, Sequence), \ + f'`mid_channels` of StackedLinearClsHead should be a sequence, ' \ + f'instead of {type(mid_channels)}' + self.mid_channels = mid_channels + + self.dropout_rate = dropout_rate + self.norm_cfg = norm_cfg + self.act_cfg = act_cfg + + self._init_layers() + + def _init_layers(self): + """"Init layers.""" + self.layers = ModuleList() + in_channels = self.in_channels + for hidden_channels in self.mid_channels: + self.layers.append( + LinearBlock( + in_channels, + hidden_channels, + dropout_rate=self.dropout_rate, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg)) + in_channels = hidden_channels + + self.layers.append( + LinearBlock( + self.mid_channels[-1], + self.num_classes, + dropout_rate=0., + norm_cfg=None, + act_cfg=None)) + + def pre_logits(self, feats: Tuple[torch.Tensor]) -> torch.Tensor: + """The process before the final classification head. + + The input ``feats`` is a tuple of tensor, and each tensor is the + feature of a backbone stage. + """ + x = feats[-1] + for layer in self.layers[:-1]: + x = layer(x) + return x + + @property + def fc(self): + """Full connected layer.""" + return self.layers[-1] + + def forward(self, feats: Tuple[torch.Tensor]) -> torch.Tensor: + """The forward process.""" + pre_logits = self.pre_logits(feats) + # The final classification head. + cls_score = self.fc(pre_logits) + return cls_score diff --git a/mmpretrain/models/heads/swav_head.py b/mmpretrain/models/heads/swav_head.py new file mode 100644 index 0000000..8f3a302 --- /dev/null +++ b/mmpretrain/models/heads/swav_head.py @@ -0,0 +1,31 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class SwAVHead(BaseModule): + """Head for SwAV Pre-training. + + Args: + loss (dict): Config dict for module of loss functions. + """ + + def __init__(self, loss: dict) -> None: + super().__init__() + self.loss_module = MODELS.build(loss) + + def loss(self, pred: torch.Tensor) -> torch.Tensor: + """Generate loss. + + Args: + pred (torch.Tensor): NxC input features. + + Returns: + torch.Tensor: The SwAV loss. + """ + loss = self.loss_module(pred) + + return loss diff --git a/mmpretrain/models/heads/vig_head.py b/mmpretrain/models/heads/vig_head.py new file mode 100644 index 0000000..ecb984d --- /dev/null +++ b/mmpretrain/models/heads/vig_head.py @@ -0,0 +1,65 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Tuple + +import torch +import torch.nn as nn +from mmcv.cnn import build_activation_layer + +from mmpretrain.registry import MODELS +from .cls_head import ClsHead + + +@MODELS.register_module() +class VigClsHead(ClsHead): + """The classification head for Vision GNN. + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + hidden_dim (int): The number of middle channels. Defaults to 1024. + act_cfg (dict): The config of activation function. + Defaults to ``dict(type='GELU')``. + dropout (float): The dropout rate. + loss (dict): Config of classification loss. Defaults to + ``dict(type='CrossEntropyLoss', loss_weight=1.0)``. + init_cfg (dict, optional): the config to control the initialization. + Defaults to None. + """ + + def __init__(self, + num_classes: int, + in_channels: int, + hidden_dim: int = 1024, + act_cfg: dict = dict(type='GELU'), + dropout: float = 0., + **kwargs): + super().__init__(**kwargs) + + self.fc1 = nn.Linear(in_channels, hidden_dim) + self.bn = nn.BatchNorm1d(hidden_dim) + self.act = build_activation_layer(act_cfg) + self.drop = nn.Dropout(dropout) + self.fc2 = nn.Linear(hidden_dim, num_classes) + + def pre_logits(self, feats: Tuple[torch.Tensor]) -> torch.Tensor: + """The process before the final classification head. + + The input ``feats`` is a tuple of tensor, and each tensor is the + feature of a stage_blocks stage. In ``VigClsHead``, we just obtain the + feature of the last stage. + """ + feats = feats[-1] + feats = self.fc1(feats) + feats = self.bn(feats) + feats = self.act(feats) + feats = self.drop(feats) + + return feats + + def forward(self, feats: Tuple[torch.Tensor]) -> torch.Tensor: + """The forward process.""" + pre_logits = self.pre_logits(feats) + # The final classification head. + cls_score = self.fc2(pre_logits) + return cls_score diff --git a/mmpretrain/models/heads/vision_transformer_head.py b/mmpretrain/models/heads/vision_transformer_head.py new file mode 100644 index 0000000..83e8fca --- /dev/null +++ b/mmpretrain/models/heads/vision_transformer_head.py @@ -0,0 +1,97 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from collections import OrderedDict +from typing import List, Optional, Tuple + +import torch +import torch.nn as nn +from mmcv.cnn import build_activation_layer +from mmengine.model import Sequential +from mmengine.model.weight_init import trunc_normal_ + +from mmpretrain.registry import MODELS +from .cls_head import ClsHead + + +@MODELS.register_module() +class VisionTransformerClsHead(ClsHead): + """Vision Transformer classifier head. + + Args: + num_classes (int): Number of categories excluding the background + category. + in_channels (int): Number of channels in the input feature map. + hidden_dim (int, optional): Number of the dimensions for hidden layer. + Defaults to None, which means no extra hidden layer. + act_cfg (dict): The activation config. Only available during + pre-training. Defaults to ``dict(type='Tanh')``. + init_cfg (dict): The extra initialization configs. Defaults to + ``dict(type='Constant', layer='Linear', val=0)``. + """ + + def __init__(self, + num_classes: int, + in_channels: int, + hidden_dim: Optional[int] = None, + act_cfg: dict = dict(type='Tanh'), + init_cfg: dict = dict(type='Constant', layer='Linear', val=0), + **kwargs): + super(VisionTransformerClsHead, self).__init__( + init_cfg=init_cfg, **kwargs) + self.in_channels = in_channels + self.num_classes = num_classes + self.hidden_dim = hidden_dim + self.act_cfg = act_cfg + + if self.num_classes <= 0: + raise ValueError( + f'num_classes={num_classes} must be a positive integer') + + self._init_layers() + + def _init_layers(self): + """"Init hidden layer if exists.""" + if self.hidden_dim is None: + layers = [('head', nn.Linear(self.in_channels, self.num_classes))] + else: + layers = [ + ('pre_logits', nn.Linear(self.in_channels, self.hidden_dim)), + ('act', build_activation_layer(self.act_cfg)), + ('head', nn.Linear(self.hidden_dim, self.num_classes)), + ] + self.layers = Sequential(OrderedDict(layers)) + + def init_weights(self): + """"Init weights of hidden layer if exists.""" + super(VisionTransformerClsHead, self).init_weights() + # Modified from ClassyVision + if hasattr(self.layers, 'pre_logits'): + # Lecun norm + trunc_normal_( + self.layers.pre_logits.weight, + std=math.sqrt(1 / self.layers.pre_logits.in_features)) + nn.init.zeros_(self.layers.pre_logits.bias) + + def pre_logits(self, feats: Tuple[List[torch.Tensor]]) -> torch.Tensor: + """The process before the final classification head. + + The input ``feats`` is a tuple of list of tensor, and each tensor is + the feature of a backbone stage. In ``VisionTransformerClsHead``, we + obtain the feature of the last stage and forward in hidden layer if + exists. + """ + feat = feats[-1] # Obtain feature of the last scale. + # For backward-compatibility with the previous ViT output + cls_token = feat[-1] if isinstance(feat, list) else feat + if self.hidden_dim is None: + return cls_token + else: + x = self.layers.pre_logits(cls_token) + return self.layers.act(x) + + def forward(self, feats: Tuple[List[torch.Tensor]]) -> torch.Tensor: + """The forward process.""" + pre_logits = self.pre_logits(feats) + # The final classification head. + cls_score = self.layers.head(pre_logits) + return cls_score diff --git a/mmpretrain/models/heads/vqa_head.py b/mmpretrain/models/heads/vqa_head.py new file mode 100644 index 0000000..c7b5fe5 --- /dev/null +++ b/mmpretrain/models/heads/vqa_head.py @@ -0,0 +1,246 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Union + +import mmengine +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class VQAGenerationHead(BaseModule): + """Generation head for multi-modal pre-trained task, adapted by BLIP. + Normally used for qa generation task (open-set) + + Args: + decoder (dict): Decoder for decoding answers. + inference_method (str): Inference method. One of 'rank', 'generate'. + - If 'rank', the model will return answers with the highest + probability from the answer list. + - If 'generate', the model will generate answers. + - Only for test, not for train / val. + num_beams (int): Number of beams for beam search. 1 means no beam + search. Only support when inference_method=='generate'. + Defaults to 3. + num_ans_candidates (int): Number of answer candidates, used to filter + out answers with low probability. Only support when + inference_method=='rank'. Defaults to 128. + loss (dict or nn.Module): Config of loss or module of loss. Defaults to + ``nn.CrossEntropyLoss(reduction='none', ignore_index=-100)``. + init_cfg (dict, optional): the config to control the initialization. + Defaults to None. + answer_list_path (str, optional): Path to `answer_list.json` + (json file of a answer list). Required when + inference_method=='rank'. + + + TODO: `mmcls.LabelSmoothLoss` has not support `ignore_index` param. + Now using `nn.CrossEntropyLoss`, without label_smoothing, in order to + maintain compatibility with torch < 1.10.0 + """ + + def __init__( + self, + decoder: dict, + inference_method: str = 'generate', + num_beams: int = 3, + num_ans_candidates: int = 128, + loss: Union[dict, nn.Module] = nn.CrossEntropyLoss( + reduction='none', ignore_index=-100), + init_cfg: Optional[dict] = None, + answer_list_path: Optional[str] = None, + ) -> None: + + super(VQAGenerationHead, self).__init__(init_cfg=init_cfg) + self.decoder = MODELS.build(decoder) + + if inference_method == 'generate': + assert isinstance(num_beams, int), \ + 'for VQA `generate` mode, `num_beams` must be a int.' + self.num_beams = num_beams + self.num_ans_candidates = None + self.answer_list = None + + elif inference_method == 'rank': + assert isinstance(num_ans_candidates, int), \ + 'for VQA `rank` mode, `num_ans_candidates` must be a int.' + assert isinstance(answer_list_path, str), \ + 'for VQA `rank` mode, `answer_list_path` must be set as ' \ + 'the path to `answer_list.json`.' + self.num_beams = None + self.answer_list = mmengine.load(answer_list_path) + if isinstance(self.answer_list, dict): + self.answer_list = list(self.answer_list.keys()) + assert isinstance(self.answer_list, list) and all( + isinstance(item, str) for item in self.answer_list), \ + 'for VQA `rank` mode, `answer_list.json` must be a list of str' + self.num_ans_candidates = min(num_ans_candidates, + len(self.answer_list)) + + else: + raise AssertionError( + 'for VQA, `inference_method` must be "generate" or "rank", ' + 'got {}.'.format(inference_method)) + + self.inference_method = inference_method + if not isinstance(loss, nn.Module): + loss = MODELS.build(loss) + self.loss_module = loss + + def forward(self, feats: dict): + prediction_logits = self.decoder( + feats['answer_input_ids'], + attention_mask=feats['answer_attention_mask'], + encoder_hidden_states=feats['question_states'], + encoder_attention_mask=feats['question_atts'], + labels=feats['answer_targets'], + return_dict=True, + return_logits=True, # directly return logits, not computing loss + reduction='none', + ) + return prediction_logits + + def loss(self, feats: dict, data_samples=None): + """Calculate losses from the extracted features. + + Args: + feats (dict): The features extracted from the backbone. + data_samples (List[BaseDataElement]): The annotation data of + every samples. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + shifted_prediction_scores = self(feats) + labels = feats['answer_targets'] + lm_loss = None + + # we are doing next-token prediction; + # shift prediction scores and input ids by one + labels = labels[:, 1:].contiguous() + lm_loss = self.loss_module( + shifted_prediction_scores.view(-1, + self.decoder.med_config.vocab_size), + labels.view(-1)) + lm_loss = lm_loss.view(shifted_prediction_scores.size(0), -1).sum(1) + # compute weighted loss + losses = dict() + loss = feats['answer_weight'] * lm_loss + loss = loss.sum() / feats['batch_size'] + losses['vqa_loss'] = loss + + return losses + + def predict_rank(self, feats: dict, data_samples=None): + """Predict rank in a close-set answer list.""" + question_states = feats['multimodal_embeds'] + question_atts = feats['question_atts'] + answer_candidates = feats['answer_candidates'] + assert answer_candidates is not None + + answer_ids = answer_candidates.input_ids + answer_atts = answer_candidates.attention_mask + num_ques = question_states.size(0) + start_ids = answer_ids[0, 0].repeat(num_ques, 1) # bos token + + start_output = self.decoder( + start_ids, + encoder_hidden_states=question_states, + encoder_attention_mask=question_atts, + return_dict=True, + reduction='none', + ) + logits = start_output.logits[:, 0, :] # first token's logit + + # topk_probs: top-k probability + # topk_ids: [num_question, k] + answer_first_token = answer_ids[:, 1] + prob_first_token = F.softmax( + logits, dim=1).index_select( + dim=1, index=answer_first_token) + topk_probs, topk_ids = prob_first_token.topk( + self.num_ans_candidates, dim=1) + + # answer input: [num_question*k, answer_len] + input_ids = [] + input_atts = [] + for b, topk_id in enumerate(topk_ids): + input_ids.append(answer_ids.index_select(dim=0, index=topk_id)) + input_atts.append(answer_atts.index_select(dim=0, index=topk_id)) + input_ids = torch.cat(input_ids, dim=0) + input_atts = torch.cat(input_atts, dim=0) + + targets_ids = input_ids.masked_fill(input_ids == feats['pad_token_id'], + -100) + + def tile(x, dim, n_tile): + init_dim = x.size(dim) + repeat_idx = [1] * x.dim() + repeat_idx[dim] = n_tile + x = x.repeat(*(repeat_idx)) + order_index = torch.LongTensor( + np.concatenate([ + init_dim * np.arange(n_tile) + i for i in range(init_dim) + ])) + return torch.index_select(x, dim, order_index.to(x.device)) + + # repeat encoder's output for top-k answers + question_states = tile(question_states, 0, self.num_ans_candidates) + question_atts = tile(question_atts, 0, self.num_ans_candidates) + + output = self.decoder( + input_ids, + attention_mask=input_atts, + encoder_hidden_states=question_states, + encoder_attention_mask=question_atts, + labels=targets_ids, + return_dict=True, + reduction='none', + ) + + log_probs_sum = -output.loss + log_probs_sum = log_probs_sum.view(num_ques, self.num_ans_candidates) + + max_topk_ids = log_probs_sum.argmax(dim=1) + max_ids = topk_ids[max_topk_ids >= 0, max_topk_ids] + + answers = [self.answer_list[max_id] for max_id in max_ids] + + return answers + + def predict_generate(self, feats: dict, data_samples=None): + """Predict answers in a generation manner.""" + device = feats['multimodal_embeds'].device + question_states = feats['multimodal_embeds'] + question_atts = torch.ones( + question_states.size()[:-1], dtype=torch.long).to(device) + model_kwargs = { + 'encoder_hidden_states': question_states, + 'encoder_attention_mask': question_atts + } + + bos_ids = torch.full((feats['multimodal_embeds'].shape[0], 1), + fill_value=feats['bos_token_id'], + device=device) + + outputs = self.decoder.generate( + input_ids=bos_ids, + max_length=10, + min_length=1, + num_beams=self.num_beams, + eos_token_id=feats['sep_token_id'], + pad_token_id=feats['pad_token_id'], + **model_kwargs) + + return outputs + + def predict(self, feats: dict, data_samples=None): + """Predict results from the extracted features.""" + if self.inference_method == 'generate': + return self.predict_generate(feats, data_samples) + elif self.inference_method == 'rank': + return self.predict_rank(feats, data_samples) diff --git a/mmpretrain/models/losses/__init__.py b/mmpretrain/models/losses/__init__.py new file mode 100644 index 0000000..b1b2ed7 --- /dev/null +++ b/mmpretrain/models/losses/__init__.py @@ -0,0 +1,35 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .asymmetric_loss import AsymmetricLoss, asymmetric_loss +from .cae_loss import CAELoss +from .cosine_similarity_loss import CosineSimilarityLoss +from .cross_correlation_loss import CrossCorrelationLoss +from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy, + cross_entropy) +from .focal_loss import FocalLoss, sigmoid_focal_loss +from .label_smooth_loss import LabelSmoothLoss +from .reconstruction_loss import PixelReconstructionLoss +from .seesaw_loss import SeesawLoss +from .swav_loss import SwAVLoss +from .utils import (convert_to_one_hot, reduce_loss, weight_reduce_loss, + weighted_loss) + +__all__ = [ + 'asymmetric_loss', + 'AsymmetricLoss', + 'cross_entropy', + 'binary_cross_entropy', + 'CrossEntropyLoss', + 'reduce_loss', + 'weight_reduce_loss', + 'LabelSmoothLoss', + 'weighted_loss', + 'FocalLoss', + 'sigmoid_focal_loss', + 'convert_to_one_hot', + 'SeesawLoss', + 'CAELoss', + 'CosineSimilarityLoss', + 'CrossCorrelationLoss', + 'PixelReconstructionLoss', + 'SwAVLoss', +] diff --git a/mmpretrain/models/losses/__pycache__/__init__.cpython-310.pyc b/mmpretrain/models/losses/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f08f1bd062ab576bacb808e379224de16cfa098 GIT binary patch literal 1016 zcmY*X%We}f6dmW$JSUH7Xww!xAe&~z4k45SW2@*P+9q#=L8njI3~ z!AE$@ieErN!q-#sm}rj9eZ+I_@zrtMjr{ZV@6S8kh@xK<>_0Uhyz{U9;USN5rXpm( zFoK9tTvIiyLp|`iYG4zZfj86jkW@eI!J9M16qE?9IQ<^69AUgq&Knhkz4FL_pT zgDbm16s3{bjkeYe({0R^Pt2HWEv~Mq=ILaitt`Y+qExAonv4tic74#M=f)HN2&j!hBxW0P)pRh ze0Uad`cZxn>XXQHVN0{hwmO*WE`_k^`nj>aTwi?-yx|hx{Ul7uOU-p5_=7H8&xm}L ziSQ|xtcARPDEWRau5u!;)Fh_v%`~(PrbEysXc3GF5`sR#F#+wA=@Eh_wBC=L2pn_Gq~=~ z?ww*7E3%#2;CuBy8Vrujf$dd=rhuLe)>j#t6*_jEK2;C`(;T%N( literal 0 HcmV?d00001 diff --git a/mmpretrain/models/losses/__pycache__/asymmetric_loss.cpython-310.pyc b/mmpretrain/models/losses/__pycache__/asymmetric_loss.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3cab02e896c3dce1a6c972ef47a6800ef123f79b GIT binary patch literal 4940 zcmdT|O^@728Lq0f-L|`DG6{j16<@`GHq*x?EL$tKP4t z9(TI3;u5%i_n$xY|M*ox{)!LFmkAHIpkza+0us=Objx2Fvo539W>GCRx(2jsff1Rp z)wSY!w@!&e9n-O#x?@AjaopfS& z_mq`w9Tqf(}xREr=Suc-3Ph*3e)bkq7&5hZNbzG;Z z>=P$%jI7)V>QCq?ZIHZqnH;a=EU+h3KFryPdrATmbQs+suzz;afW0%&q3k#GoF-^s zp0Z=6R&sbZxAOWU1Ej2gKODa>2=TL{I2KBVJ&s0hyDo?8<4E|K;8OI3pMHX{4_jhr*g34?`Jzd*xh}nz4!Ls-a9ux@jTv7r4|$j<%iKs>}@&7 zuIqkWWDL&bTPl^kVf&s)vQ)ON^Lwxo4um}wrU^d`)sSaHSeb8iw)xNBYPIL@D_;(T zdZmPcOeaaem7Kuqk-sk@p7ymCFUXz?!wsFYP8o{Tb+&mr#tr%r?D?)Ak0WvQFa(pK z5&86u#pWx^pdzg{e_!}M?)&>;6sjX0g9F2)Dg^fb!-WDm{dFL0Woi~4p!F~fV5bm1V7n3`v~$Y{ zMpw?RkF=LE?n}Wp6R`5;Hs3_B*yKJ0$YwT)H{1Lt{ZfivDa`m4oOy+BYj0M}0D~Er zeHto$Rt7PO(ot5`S!!HKP${$1o%+Y#_3^U>dh84 zP+2xXeqrhaFX~cgxe||NSjE7t7go9N!Yc8kaOR0OYeGI22Buo!%u}Us7jafJmI9!t zfp5A^6g`WHf{j)8;^GYOrYjj3S5Jw&0^9l5jUAb$YA252O(p#>sWj6-MA;6`7un8| zBpsRKqvFB~eGZ7sZbD_eX;ABoQRB?0xpa-WtjR3OC_6Kt4_Z*HvyIvswWxiz!ECxl zZQ6u;l^RsGKtvBp<$?oEU)e4A5u{54(q#ed)*R;49Am~@g4NGRp9W6gK6bi>)4&^e zOfy&k9%Ft+4AQlbu@tMf=f@)kmVd&|J29(A+uTtL;Qc%VuY~*Zx$v53UXi{A5) z5}-L`s!A(r_&n?RkuMjpijg)A+=aRCYyMcm3(%ShMD8sgR7(l+rguZft;b8mJ1*`0jen90%3*id2j$I&y(MRNBM26I9A_*s%R`ZPktA> zzXw&zE{se>{a3vr+c;}J^(ieJ&jW~s81OV+$t|346{=rA$=0A+wE;@zw@V+cSM$0( z+heDPMw(q{nH4DSLCJm%6)=B#03Q*WahuZ3%>?W+BF7f+Z}@fy81QjEZPy+!xt+5S z3)mZaL~bjvz1;PU34X`t^_K~S9urxypkHDHE8;S$g1zV3?D2>2;;=&g| zJ+Fh6E&tEwA`!+w6^*b*pM|eMwo%4w67EVGqr3$_o{{@)-r>lOI@^0oL|@M>^SPoy zg+Tp}BphYVz9=f-P^IEHZE>9bUuYYO{36s1=gkjwF9iZ9!vH>mfCyI<*Bt9SO8k<| zG2b+(@;Q+5FGP}GlC*qIl}4LSd5NQGotHlJmU$YEwTw<-AnDdfRn&l%YyR~dqMXPE zRIU0e!C(`|=U~ukYE;qGN?yPrBm{+V5Q@VRH;^^FC}{DzEq8GI>sWmQt8Ze3YjiKY z?1~b>Y>i;OC@aAS8Iwk!q7L7{hrSFF=*bY(n(Ix*&fa7jfJ8uZ;0B@t_pD|A-Do`g z+6%}u6$0viA0?j)>8)C)(`nTaZWH)G@w~$Iyf_Ue5w@G2_e=P$n~u~yFGza;mk5&b z8dSx~6hnI=ie@;fRoTTU8c`9=??B1WGBz<7yHsjX)~o+ym$7x}-x~K@c1IsLFYLHe z9zfuzb&yinC3#O0c@ZDKvf!9)DFz{g`jNyCD9nk1zY(-MHAmL;ueeZ75^}03{IjGr l);n18$HFQ0jnXDJk>Y1~B56v=)+jTVwPDo#hjn{{{ukIHHevt( literal 0 HcmV?d00001 diff --git a/mmpretrain/models/losses/__pycache__/cae_loss.cpython-310.pyc b/mmpretrain/models/losses/__pycache__/cae_loss.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2296cee090a767289f0f942f171913da3c33510 GIT binary patch literal 1804 zcmaJ>&2HpG5bkdOjx+foy96OPm|F&+$%)GfA(jxhBtarM8Ia|5wgwwH^H>_8xK`#$R(XkMc~AOPkOY*7;3iyb#Qo+qw?w+_J{Td zU<64hClC}(9L|Kx9qtzHYf@0|asPUlc;MWB8Sv23fu)h9j<7Q13-&%KBEH6l*KRU+ zOoXB&vYgbhMD48R zR#atWC+XFnt4aIJKE%f|t6uPUw~$SyN6+F@uyHQR$(goJ9-186SH`n%jIV^Asroo) zE9F5Rw*$M~1)|9nxuFX}$(+tf%CvLiE(mZQaK7aNP9R%)JPM81&Za_{Fip$4)M=^$ zFri!^W@}$TRtL3K&GZ7xVYV^NG)HkNI=KmsPHe?YXfLZ)jF@p-Aq#8j4K;%_&1D8p z(l_MqPxn;Q=)J0%LNW>p^?+@-knJ8C6761|i4@uK^gB&EZCg9be@t|{@k1U1;U$Ve^5fcZq#|E_!2TrfryC@F@p{^`1K!R0VQ3;@vd%E zel|W8b=#;O7tKrq&H#q0sVuU%$e~24g9Sdu)ur>Of$pk}C!!V#D#7Cy7ctMI6c0_T zROwMW-F=KzEc=G&L%N4`7fT!~F;Sw093J)XKe6^cdX4*(waD@_q5k8i7W%wVKdqGZ zJKG&AorWRViSzwxZ9j=uPd|1Yc;B+NiI+XSOk(6J97JjZi1E455SB&wC{UYt@e~PS zRP7*n4~Q9bJpnK`k!7uAwT%jvJVC7hrVC={130v3fB23M5NB^)>b?z~%gsBO_j;o> z?AJQ0M4Fl?O)EP`fgh&n&$CSSH$j^6CI_)+XN~P4toL1-3n^g? zEJNHv6^v8)K-OTl9|8%9%XUKMyEaaUm9c+Zhuv;C+qYJ86hRkWOv`%GdHDz)nT@Iv z^`xxD7=jj3;neD;&6DLw7z2jNR;vqzEonBpqfmXK*7{0-%R+{qb*1-to5#;4df&h0uAnQs+1fEFQa`f1cS{OET zl?_~#KBZmykK{+FO%Ys|I*9GeXtmxw#8tS^aE8N~;c({TmV<#$VElIddlET>{DH#i zutE3=KC=%*5=k?X(~wf+S;j&Jvz1vn4|%)BGnU(--ST$kam_BHzZeF+VDa2#Cb&$Dm~ddAB1+#m7CI6&XFtETH040&hNrW zaZWrrkn87m*n34d34J*2Tg`B{G-;tGX`W_6r`5^R(irgrC1kD$%$+94KoumjBF4nV zz8}EYE%WKDQbBd7R=2)T)v;1VP#u>+qC_>*$^@d2K?%2#r>AHjs#ObNT7MX$bN#hG zFgxuc1~P|fQbmb~t5OGBNmhz#v=i)OSkwfarc8pk&1l8<_|6GBdZH3B%c`B=@%Zsl zV`7o1``x4Ff4Ki_;r`D>bvx#-FPr{qHXyA8Zyc@Qx{AHnTG+viJnLrj9xxR-BNz0N zP*T&HM66~&ElDQiitdegW1HDj>Bftqv`DKc(%4oS(J`_bZd8^mSFUK&i=sFa#zfH- z`FnR;mu0n`=TkU}7HRPYTS}FgZLE_r+w;4QmQ5111)QGfbqIwAHGo6nTMqT<>4*0< z+jw!rhC@8rIF--`s%m&H%+E0g>&0+^K?Lu@XAXf>HZAoiJ^j7U$IF~3p6H>$KkX+c8q_&X0->r}M=|A6<`dDar z*Z;wNoz_C6YhUcOH(MB2FvVaCdN@8D@2kR;y6a+8q;Un>1zwiTiH7rAx!1H)E$#05 z{l~VIR#4P>S_wW!(0>rq<$=3#yuO72PnIRqd~f7xEK1`qUILLdy`t2)$N<;Nu4*hZ z%b|)&6kxIeCVGhEZ6uF?ggwkTLep? zjA7YspPja~8@sJxi2ZWqV)8b$r^Am8_*~4Z z^l*vdu~rAEsq~3P5H%ZfQRy!2*89qFmsjt7KEIeR;9r3l1a_Z!tWV*`9P1JN7gOl| AO#lD@ literal 0 HcmV?d00001 diff --git a/mmpretrain/models/losses/__pycache__/cross_correlation_loss.cpython-310.pyc b/mmpretrain/models/losses/__pycache__/cross_correlation_loss.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce7a0de1e6261f65696048df9eca2bd2eeaa9a70 GIT binary patch literal 1817 zcmZ`(OK&4Z5bo~ju^m5>1)?N8HJn%(_t(h(GiR zd5(wG1w3*Wwz&gD5=j#h;+G{fWg(;Z?}fiiA{>kVzVPa$S0g;~O!5 zN@6N&vVPhLec4d8!@A^1V=mWZ^OT1T*}B4Q%MNhYo)S*NCU~`BH~Ts_rk`uAlBgKv z*?l-pe{t<3HK(<@dIDO@(j zPxKnR@+x}Afi!88PT#(%7QG9%?9O#tX;kRZPgep*;9$^wG<_FH0b$SRg3bXxKt0KT z@&!3&Gcxy`WXbbpz&)(ZyiKxAj`@s3lDz5PU*!5I((ZcJyw~?b%lg$_AgBPoUD<4n;TKx{qFoHT4)=vj{S*Bw-GWZmfj z-Z8;ja4@%k+-fn(UU1l(Y@KpCeXD;%pZ2P2mR`jGA_{TfFmMlkX7rGJ%Z}MG*4O)< znbtk>GoS(+LQ=`j*z07@W=wiJ1nD+e09qtHgMXwk%0U;W%2W@0Qi(=r+Sit&r>E6$WD5O6dnnl`k4je|I9n@^FEQ7+ TQwI4FroHrlwrQKyye;}4k#Oh- literal 0 HcmV?d00001 diff --git a/mmpretrain/models/losses/__pycache__/cross_entropy_loss.cpython-310.pyc b/mmpretrain/models/losses/__pycache__/cross_entropy_loss.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63284883c88cc559ddc7c7901f2cdc608cc09fdc GIT binary patch literal 5934 zcmeHL&2Jn@6|d^A>1mH0uVa*CrHIBX*uw^g079@ryX?3uZqRRfZ_S@lRtDny3E+e^fCLm`1k={^;;wfOL)e5yv@1ETRm?FW&g#7G;^oOw;w82Bc)LDR;wN>K1mhC4I+p~=0?HPIchop$KV(TG zS$vNri-PYhKCmC~_EN&PSabO?X2e=eUU@&c{pO8ZTaP)u@wu`u(w*Hhk|G&&M3fau zRZdUDdB>w3qo;=d^nD$#`VNwk?eQTS@}ZmX#QMNF^oCYq54e0`XdPL{?9eLhAP-Kiq&lxdNN`)Rows@=FR!j0CI@Ol$(m?}L=KIm-=IT1%9i*DG- zVx`<1Zh)9m656W)q@{Y-p)KUG36H8ee@ z67)9c#cZf@yu_X;cZ(z(D5#(;LbFwR5=LES>R}k)+ljhy2TBSz(!87-`Sp83#ycV$ z)t9?*Nvavkj7{}K4o5)tFUV<&cse zF}Z@OC$C+VMNwYu_0YYPahlI%i$rAVDzz8t>fGqsb2s}3vH>Y|ylM%_$_3x{xy60% zJ#%bb_mBPh4mu3^!pOI^Ut6mTp6zJQUV%S5-=lAD&(gJg;`JfC-qv34jJ)2R^ZLJ> z^7^f!Tb|+i+NF&XcH%tThQX3j|JxwpG!#u;hFt6IU6I50(tIae>!t4w`fFE=yJ`5D zUpcj>=ebVye{c!^832l?(V+Eo8+`q~Ha;_}$7@gPvSO@d!O6 zIj=lN$pnAoGJO($$QLLfxG>3i?^r5$i~U+3_&DZYL>)h7iFJt`)`sqpHLNA}v32N{_MSt;;K)T! zcjzTf;(p*B`k)6K{oVjjKw4r$-Z!;8i&`F9W-OFDx{U|+L-MS$Hbh@dBWj=#{?G@F z@Z&$)CuziXn#c0sG^7|;gtF7}-^MI{@wMjEpFag@Fo+PD_Rc@nO{p4T?R zA9W2`(S!zUW1U*AhgYVDFUg_@l`8Iu5qpc3;d>-Xt9O1a>om~%bO)yA^8Xs|G0Fow zF4U(ZVQ83^>Ah2IY|eZ%JDIVqaO3l1V=YO6iG+JINfAfWFt93^E>W{FNw{VJ)t6sD zzH(H0UsRqHeE}AxF*Y=yFpr86c_Zi&0fNTdswRbIod$gCXM(TBU5&t(XfWP76NFdK z8t|v+pf-@KUiPg9VAug8^YGHod+Phk8k`&W(mc(QUqs^$-noZ(B|Yj@B!abh!rB1U zwq3XIv+E85M>h%HXI-vC$9h|rpll&Afus9Ab6A_=utv2!8+P3Smc;7>sYGbQ5ur8Y z|EypPQA4EConDb5CfhCw3T;4X5HQFSu@MCraBb6e&d2~^qtkERxo^gqnh7xUP{b8a zjZ=_pJsimi5x^)40wMS0(xym3{xTcPoEYqLg2mYxoWwGM^@%HM?hD<9Z|fkP)^>JK zc5U+;(?!k(7vaXORFyy1f_^y{5PsGn*;&x^&*t(OA=B+VJAgsQ+ZlrAG@moUH?H3p z9iv3Y@7|p;>$`X9NV+2jVq%>$k?&1OH$8tMZ)EJxueIiA6j3OGFM3rUNy$pwl|0b* zBAo6_SY(6Jun0tH&yg39+Va9%xGs9YL9lGpxI@%M9taeRYKW*<;89V)}l}3i63=}L6(GhQBrWFL<#5YA-0cmQ4Cae&|IusHRuZ| zLrlH$qbSYOGK#8&8HtrY7H1-3aM_8~8k$NVsbLI3a;5;lX(=S!QxfacaVUkzN@QL6 z5|w;~5{k0rS1FkQ?IT{*qsUlvG-bQ;%VaNT3dGvrqWLpR6X?Gf$89azFZYA=EAQW-zy zhkkA)3^Wycs_U*H=g4cL7B*Vchu)E!Si^dX_MGYV*wsDk@?H(r0kYe*L3(QxcmEEz zXuszVIpXkO7y!AQ7XI|fsBKVQz!W8E8waJNFqFE2q)}^N^8$Mbo49N@_%i(GPH@8%I zURik0kH0p%gIwQg##t3i^M==)pI#4bO0ffkmj@D8EUa<|WeSB8SI}N&{EhfTb4Xp4!cMGh~$`Cq@3%TL*!eoAt3W!b&qG-i>)B1MpaXv@6~(t-tWCG zHaEKj%9r2%ZSt3kg!~f+>&1n^J5b9MnwZ2iB^e!2TK8E-8KETWj2t_|QaAHP9`rl0 zlloaO3bJq%QWDb84ZYA0Y21s0IJ{3sUFn@}oRQy>xEpWWC-H`)2fdTd2_0?5^fu{l zJ^+qn*r&$({rfk5bL;j43gbRATjL_XE0vCP5f!WujnbO z$QeDRF{}J-*!Lo-eCX3{@*=rJj)TgOoz_Xcm?4BU@Vs^%knGP#BTe+2Y;sBK{D9smt z=n!%{nPx>2^GPulDMw`tx?K+Cx|){PY!63s&=%j*g&NNWAIZEdRR0?PXeRAKGS*3v z^TR~XcsUbu$@hl){FB%EgGcAJP*bVDm%>yPi#+C9E#P=6?#PrE6RywGenbo}^aMWj z+qU`a^J0z{6e-;Kw#eqGeEl#frInU9Csvnlu|Yxl1AaqJ#3I!t*99MfA*}?PiYya+ zZ;}>5H!7*8CWYdl((xh{I?1P2psa|O!-*J!Hdk~|rgbh?2XCwvMLM6YF}cp~h!m_7 zo&>mL*zlY7{chp=wdlMtxcZ9~A#3yYvGz*6e8NoXSrPL^3G_g|+Ar*VYjeP>EzA2> zlO-2Q@|_$sv9r&2&{cN0fJfXZ7un8$|87!Kv7C~U@8Zr~zTZ+U66dg3icB`oDazW= zkd(EAwk*5IXm=%zxI2w%J@UOI*R6NH4sI`|k~iymCNwfz3MuO4)`igIti&w^*3WG> zekk=qnQK+dk0LZ_fVXL)VP~9V z#H{F%OO%D%6%Nwz|9`!7QM9pyr zHTe}$KZS!||LTe=3VkKZ;E+}#$y?nbmT7qf_sjB1O~gLK{K#xI45HO^2n5Tk&^SMF zsP~Xj=V8FQbes05hkd9H!(P2|kp{5RqZg<{6^CPEsIB5Oqw*SbBpK0|j95%Zosfl{ z&nr^5zm%l?RWlFubb(nkMf5`|UCM#Ij%?eG_Z> z8Z??{i0~)ul$}8;Q$auKKvX4>Q+1$qA*PeaiwRf*9rk^2+{Iif^>a9Cf+$MzL`RX@ zhM~HQjrFRHH7WY8aY~s^9^0c{f!$vLHbl`l6(DnD{Yw1|iM$NW4X7njyWn}?1|IFw zdt282Zh?vhOV-0NioGl-IG%YIYFR;3xiP&OMScEDnG*{-O44azumVh2+WNQ_wQTn1%fQgYyG57fm_O$R>IpzIfQ7)CEi?1iR|9mR$CIZ z!q^W?SCHm1_iA}?9mQ~YSyov7e+iyQ_Gtlh$rk?~MYyPdFszao5=KZzT7n~RJuxvk zr*m{6i!uCZxbi)z$~j8wz`7Rt@!IFYKTbX=1;DkwGgKI^EX0@&}3F)yq#nt_tj1dRAd;p-ZAp%c(8KAcUn?30=2>P=ELg2U%3ei2A}t+j_( iy+-isT19W+2W^Rwqer)Bk6on9t%YN>`?dG_HvJBoe<>CK literal 0 HcmV?d00001 diff --git a/mmpretrain/models/losses/__pycache__/label_smooth_loss.cpython-310.pyc b/mmpretrain/models/losses/__pycache__/label_smooth_loss.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..434be68fccd60be9dbd71b6198e81d6a83421f1a GIT binary patch literal 6126 zcma)A-E-T<5eGn!1Sv{ZWH(mgq;pl8NOWY%N}G?WiCfqCn8c1UjXX(*aR-8lBMAu~ z%mLJwp?0SBOr{U%b7#`bNNr#ISLCgK1YY~l@mn7|$u#b74*&^Db`xp<;qLbKcK7yn ze|y37bX~#k&;R{LxBQHv{F55RUm1;G;t4;(%T{dFRodx))zjLVO5d7W^7OWj?~<*% zWv|k%$nUaS^^CTmDqKIR9V_oBcEzsVQ|v0&w(56Fch&Zkt!^ky;{kXp>rFMOy#K+Q zZ{65@^XD;ah$rg2Da`e4=lz6Zkt^L}nD2fzQo;5OrvJ@uz2v@HhD2N?Wy+ zwq~pCQcc5OsixP;^kvU}rgT*)RIRPshF!a-w99tgp2Gi%-LR+ezbfZw_7nDuedeCp zHa=5yrCo#AbIF+-)@|-?ctH^LNj&STs-nE>JCS3#&H)#!8weKlIdh4Qg_4lltV3!s zp6bP1G+OmK!|y}hVOl!IxpjE@8)*>uhtj10@2eyR^P{t;t}jj%$UX5yY2W=3Kn!HSCDoOSyu!e^PIim0INv;oneGF<9o!4c|$jHE~T3k zS>blJ8&^dyT$ALqH*(la_t4z2T(;D811oB-v76XooA(5Ve%*<^#_pxU69a$fnNma2 z%kiUS7RVj7T<};QdbhEKn4gif*qgj-K``kK>oDAylNZ>WErpT5#IeixLBzvl_JL#y zGr>I;1L5>w9CvY(9} zCJO~67ML^=IW6qMXqE$ymU7VuP8{h?KS(%CCCp4p`{_Z2z@~Z9jU^3F&$2wO>D2UH zwD^J#p%<3f0tvhz56wb2^cE&HTcpv&u?Q3b%w6vE`q9{}*vDxHe1a?~!^DJBthO#^ zp1Mw3KX%ni%t_}+8{o)Xytt5i!otOi%jr4VbzD~(hPa28HyNfz=3%)lEZ85EIU_c( z2H0c^>lSERE#_HKe{C#GYNMsqOSkwSblkvi-m~PXWww9FUSzkr!s^_~hY#;u zKP+Ey)jbgd4@ zf!s#KMsV!(SFTY^-A_{kTzwFH-yO?#rDzk{t85cjt{>i6eKBtpHtbO<7SH9R`=zlw zjhShjE+}|1EX%!018|lWz3;0G-0!7a*wg-(=_Hq4PmJ8d6P=Ffzfv&q?_YFk1&qjk{wylHHhqbsI*LIZfHPGPe3!tZ1$GiGvS<;q4TLrBF+Ipw6=E>lO1 zW94Yto<4@Ov9dJ->N9c4R)&g*?HON>tFo^qeC%K2i!5|QzAIJWKsRElsZKa&1Cr)QFRq$j%+kf!G$Fi2OS4@LQ%-{v+ zYP|pcjiA@#;^5bB1MQr2Z{+TQODK$mJYt(jJ$a6O$A$VHGYC!!#X0Skp|V3Xw9ID&(l74h-Rk zkp)nil;s8`vnS)Qn1j|hLP)lWZsLd~v!2I&z!BbZVFnywafV3FVdu=wPzV5GES<5f z0yg@fNzO|mcuq>*8IePu{~;$cs5qIR9k&Yj)-R?#FL{EYC!cE5pYYts~sYu9hHyiwCe}kveM`d{S<3O z);8cE&Z(>ia2x>n`Cf*e^U8qW8}L1qH!1BXJB*45X~}UZkz#TgR34BX4<6#ztRyAd z@!;Wo!Yrg)qVG6-H+6)k(QazudFn2wZnK1MAzw}Ey^vNr5zk9blM0v=E)PHuR`7a3 zKL_g{ym->a|7Y%m*(r3U7%W1yo9ckTKx!pzr!=q>PgaS@uXId2{#?u;`~QQfWrTEJmFvQ zO7$A5;u-pq=R$i*!FjnLGd%i`lc62X3;L1ob;{v>CGSC2Bp$GVUH)7>tj1MrE~vp_ z4ICsL<$mhVXUED>DXt!?hX!UM!*7+hKo=Wb4LMhj%eL{CQe64F`X}`qoxqy)6%K^d zHiwR~q!^>q$$FP?J5_INNgBL$llx&n73WRB?m}6&>qLDf4;HHRWp?XgRF}wQsl+*5_?=#Ql%-6< zMFO`k@_t}KaoV@XmbIr=rd1J>fjKPP09mdjS`$|bk1cdhaXNjEF(Sbw*HHL{%9YGm z+_^P&gOcND!g>l9-zp#z_+Bt{aW_Tf7-6K28=CUYswKVhlhxm5$PV`ag?Vf2;3-*A zoTgu6O;xPmSRZVhhzh->$9|N|o!;1HH4W^SAfKQ#k%n*yUw({cQqAt%nzc0Q*G39Toy4m&=QVo$ zjL0k7Kw7;}`~;2mOnyh1=C+d|bPY83@PstIK0imu1PBDku_WVv!LD!L_)b;~ARvz8*&@-TATFhwY5(m7mY>f&}>sx4P6h(5~OiSc@Bm@8CRg;aGIK!gnqk6J0s GtN#P{&(4|v literal 0 HcmV?d00001 diff --git a/mmpretrain/models/losses/__pycache__/reconstruction_loss.cpython-310.pyc b/mmpretrain/models/losses/__pycache__/reconstruction_loss.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e9cd6bf18c1726dedce12ae794b83dd6a71dc0aa GIT binary patch literal 2548 zcmaJ@PjA~c6elHGmR%<;lA=Zbp*#&W5IAU$#W1vMv#o&R4owGaK1@+WI#DW78YQ)J z;9iyjJ?%bV7?2$MMfM?d-6>yTSlYcu$&S4>C18s0k?--n-}^%+OG|A6cr0qf&d{MmI!Dc8@--0+;k+Tj zk?hbuq(fI&Tcq1O0d1s9OZRD-hl%o|0TAsk{Y-8qqKG6=&gRp{8~v>SPtFgp(v0j! z_((FOf`AotXp1FbOD33XNaq^fYmJ<@WJHA{ns1hd&6WixTmF2n!(As{IW&S@FA-Le9MDu)4a=mbdCnKJs zC=V6i^s^UI@F%gqD>*nQqfqVk+HDTw`Cgdu*f`B&>1TzOnb9Z0EcaC)`HswANvRAo z_w}yKd1y5GN+8pynY$fsI@=GDI4yE%gd;RJPj~{D$KgT6gsWxL+3H@k0iAn1V(bE)AxzXMV2-iIj2+8eBVRP=&eYm5n^n!Cq-8RXY8kQUU9wC@BkcfTIPbtA&kG_b9?yG6{(i8olO$h{;}l|` z{ZK6|vC6Y`%n+HaFJ^>i@+KYYi=g@@{IV68+SIjHs7qHUvn=XxVY`tbVov?ifs!54eSldfLU`{{jN>RnB<%-M2p`JKxe5g-nn9n26bc9)Zu!qWG+^ zpP4$GYk9^B%)r|6s^`raKIAOH=mtg8FFS8CLo=X|K{+?L}x6ezt zsU#hAJ6L4Ok0lgY+w-co6xJQj`>F7wdZ+1mA_;(PL&w+fc!L+vflbnDyt{O&km}En zSgK1akWu7$bB14qU5(ajOlJJ^lp*C3X7NkbC3ecF^#Mxey=z;i%w9P)=b#}t`8b7V z-&|c?xEw>3?}pG(%_FQ4?sE04fP46+fwg0eHB(-!NU3WVjp8}W&Tw@OAXx_{ N1hqpumSbO`{{n-q-O&I5 literal 0 HcmV?d00001 diff --git a/mmpretrain/models/losses/__pycache__/seesaw_loss.cpython-310.pyc b/mmpretrain/models/losses/__pycache__/seesaw_loss.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51a44dff1ac5238a1ae4971c6d6f0d62a5ff4d39 GIT binary patch literal 5444 zcmcgw&2t<_6`$#y-JSiCz-N379$R^shoVJfRw9N$6Pq`7w|{Sjp7h|<3<4m;`e&?GjhO{N-?VG?e6J*AKmZ$ z-g^pbwX%liH~;!$_k#*svNG+pzJc3#ae zuizKkMf7vr@JzqdF6mk+U($J*7kKfZ-Y$#cuyU-uqj8g$9%{TK*k1KP?t$K}as8Io zs5}8bt<})e!Us35z5V{JCpyL$)(?c++l_50_@EX6MC$ySGKh0n>h3^|sS99mKtE)w^od`?>- zxi!eNlxT?#>bSl~#?eZ`cy6G}KO{`iWbv5ITM3AYowVPGA&@var^I(@8cty^1-^$884V{KV(q}T@oe@93w?1HuvMFM=)0yLhp zBfRLToIK&kzzurY8rn^&O!-fVWY&6$$Sue3d*b4O8wsVYm93lZ>kNE5Qj1PYur=`M z{M>zEjhA#oX$hyZYjtq0y-?ntQi6u|NH=TE4dT&Z{TIdi)>_vKo$<=;?f&+*I2Y2bLHqwz(&8**z92~G?% zq*2;uhB-D6XTN7>hoBC2j?P>ALOMM$IwWj%VlufgO(*+2k%&l=5{iL!O>~`s7sEnB zs}%-fNBXSQ#n)5`E>etgEDA>DUoSK0+urY|zJZ#VJ zr+Ia~X|5k0q=pv;y|fsG-Pm{Tr3Efxm~NU^>ZAonIzdmQEDq_@zS|-CVK1$92qSEf zfrK<5OyjwiIP`rR}Kqf-jW8VI#(5Eg>l8Hx5nq+rR; zjv{I=XO3n|vM1YjV(H#X^KQV!J*-uJI$3*W>LVlbNDB(IqIM+`UN=Lqs9jZ)BVz64 zS%V$#!%0Qar-v`db(s6^iZpZ;v8?;_T8zbqJHTCbOV{Jz9lI}A4 zA)5M&OL~o#FsiV@s*)pI5C*~ z4#;%y&OLNB^h9r>5n5a4TAOjbohz|Yu4Et)D3ppNlP`R%b#;yi@vzi3N)`I!i@eHf zND%TLYlhY?AZje7rWDtC`Rcm0N@?+`EoJe3RdzW|f+6Q=>v>W#)_(aw4W7 zqwPpH77`gqx+q1@jWDox23_EnJdeF;{!k$D$rbb)CL%6ki2NQA{US#4`}hi>OLms| z*|v%x@+I2zT{K<15z0Mn!K?z|=l)m$(ZD=8T%Lk%bF62pLq*m2p*TXpa|>^Dfr_j= z*Y~xkitA!Y{u7ink5OWMq@yT7`G5k04RwwpC>x;=dkHHGj2YIT?0TSoqCGa$ z-n?uk`2bhGmpjVVQW>S~Q6cvu>^IeZ%wazYr9A_`<;QuomkskMzKTiln)c!Hkx2y! zl_z)D5v?pHW?nn5B_%4sUV;6+s@-j&oU5Raxs&9ECG5=w<%I3wC8^@dUWr#xg2D9m zsxYn^3PwhT2wLEbmkezY?-GpBI4+}rDko)_TFrTlY1;Kr9ypR)-9e!0lu=?w9z0*@ z(99&UIaNK}o+~y^7dum@COTh99YcghKrmN&%_Y6J{#R8)RHo_USCTao$u9t=EOk~3 zM$J6p&dE+^`mBlngyfB9LETA`e=2_`I?tBF)5?a?lJq|g1*2&Wh<LFWgs$xNss~?_!`(9sk zkYZTdnH>>nDyx5fSov_Y4S;$0B1zKOb%H>6*0i<^2Qi8=YwhsmhQ&iGIa-=;cmCXd4=)@ZimC5X{DBNyd46rHE-rky{ezn zO;)C?9IKY}!oLmUus(y%BeIR+b$OAPHL2O4W|Nvp)NSNityZH*VJmQafv{D!Z9n7# zkNQ>HzB_Qd(U+oa^RR=!LE%DPLzC7<(WE0hZyZsSazCIYDz;Emyn#1*0gb5{jGZM7 z$A1fdXU|i={!bM@8%)X8YAj@N&dhuv2vUZ3c|2;4(bDnK^0Y6Pz$0CrmP5@fc*^?{ zNQ~Pqq$pU99gZXIQdX=eioPYwwnVKgk+v%%D-d?NXBNvD zV77tLl3SQVvT}T4`4~Hw>|9)>bE;CwKj8Age?gZIsmh5t<&<)q(Dd&$=4(l%DwRrr z>PDmc_1Ejy@BQ9O8jY%j=co67(fhm0mh~_6v-;HV^BPk656FZi*uWaH4jb|gAKD$8 zQ5_#Ros#*w9oKxzL$6brwN;0;PR*32=QHB&APTEq3uI};2_po-WZENRxlxEs{f0RW@91e8pdoc>TZKkW?U=S!N#$8E&Z-l9QFA?Jb z{kiYG|JFCJ-_Uk4&h)u7if`>FDh{Gds*nasb@eDw0}-UrvD9TbN~1v%Ygfr^tm3Xi zlg}RFW*O;Cr1aOw1e-XSb;@!pw=Y>!cFL#Lq?DHqY{4&DIp4G}-U(w?j_(ubk-0fLVQ*O?%<>EV|wu`Z6Z7zKeuY$_`Q$tO=0N=~uH@IcJJ{k?~ z_}RYn1-P%Gy>TXmPi&0AMnAz}45MSiNk54VVcJcUO#NX9^4orv_`N6=RUZ!x6Q*g@ zkB2hO((S6Ly}2Kyes3Ii={S6phlz-K5vJ*>WN1kCvuu>UxUO#=jcH89MEbhkH`_NzY%(lpn?ZjhU!D^KP7sMvIYX?r_jg6t*(l zUQ{n-a6T?A#?*q_$CAb;_Kj51m1*ktRWcqeyU)bdzuFrlA+1Wu{&*0o`O%C*6%Hk2 z-tQ&qp{1=Z$E@c=Bl;`6T4mFV{`=!>1mO>j|LZe<`Ln+{eEA;3Em4%+ zY;$dkXsFA#q#U8pwpEiF*O2Lw$nK0}yQ)iODs9W#Bh4~hO83K&)OI=^s*)j8VcbW> zK*qWpC-JdViE5#xT~i(nSD}oQqN3Z{se>^=JRSyV7Vc^Gfo5-L_HL(y0fP@Tk21~1 zpIhn*Xn*_sj!KejXE+?;?kgihE5$^}LAoY zD$uxfBs)|~m)pNVW5lw1YbnZfS}QYMC9VGvuG+`53ySvHeOsZ5r4$kiA?c$hilZ#( z_WJ%+5p^M}$~VK%&&N+2twkmTBYp>jQ8<|2lWr3CqCVEw&6b1FfN#lO2*#wO&vuho z7OlIwTI`7Sshek)uD$sxR2}Dc#+10IAW^lE(jOwrtP|^i<+fmV8M88;^UOZsY4aWH zuUO{f`~*fOAVYx?48`UT`NV;0w@%A~XI>7ox@BdRgKExTJfPt3+EdP~!!1FoytMl{ zQ-6S2balzIw?Ajv*^9!oUBf*akEGIG5SS$gx*9={acAPL)Xso3{}y$x&<^RyQ(ciL zlMR(Hzd{61+Az4ds+rO}h5@}E#Zb&`S39X3^wd-6uf9x~SqO_XyM+~0f}lHqO$Y*W z3DuKC@ik=MMoQ^sH=QP{vKDLNbssnnjxRhUq1%g9v)f&mti?mNo0NjF*ME#c-l`6(Ek!;*u^z#LoF2`5`T#UUJ)bHX3|*#B@Glf)l|3N{I{BtMnv4yoee zI&3St=FNTf}aCpJ&G*GSw!QnU@%v#DpAd)VsZCoftyiz2A zia4bml6u2?wNC$TVHee((NAM`PF)G8H9IS_=I5!uIkhjL^a@h?I5N+27_U3`IpYsp z&VRRI^ZVRx-hZm5h^~ut=GmP6rGJgWs;UDLGI&L}eJ;vxWdibwEDOL)l*lQVuxFOC#Ay&nvtR2+FW0XS~o)=qCh7F?(n>hIk z+9UmR9eO_eckqcUGW{&H-ztDH4NxB$3`gpB2jdjhHj4Xx7#DbnVCT|M8btye18pe% z5Bx673-AzWn(v0n7+13PR85FzFHtHvnjs?pleDX%QD&%L`kz9hS3U`f@o5)_V>vDc z9!G;qEG>>Cwf-#+<5_1=@=8Rbs@V?`(t&@Lq)w*)LRqp?(|b0fKe z!skX}sonn~;@Ra4U6laV;PwXvn)c5aNmAFueM99(!XKw3InY~$ykW94)2t%$Gjo1) z?P>qnkC)Su@-GJc!ugg*XNb1Xj`j7223ly(^wFbVww)d1xog+1EtfO5uK&+A{vK(_ z{@?zLp3h&M75?c}gKAX+ZTCozM*j%BxvRbTy(oYvVNhLLIeXoD`0NUsYHF9}u}j!s z{!p2~FRdf}DY6+>@CvMub07nr!-xfT?jVQzNLV3vkplxFFAEsGz{@J9)hX;RzO|_p z!<9sB-Ly5bW?s(LPPlqLYXL))VFs-Eq1_m4uE@fx?@;y*GF^d~!d5{s=~D55L4w2I zj8cP23`&ziH(^qPrMpyseKNr6k0^VZGBPphS<1eStX(PW?zgDozng!_d}U19!XTSv zXXu(zRkPx<)%QGe0pXi~@*8-M*XIUf0ZIyMZvfybHX*?w>3EX{egQ2MN0?eac@j|p z>wq8F2hM2;(FC5k2jv4V2ka+Hh=>3VhT2anIpP7#`hg`J2k`v_(VWu*rX&%6E|B!P0JvpUjk;95w9rL zc7*XrxV%jhxe!20o7RuuY?v`C=(R4KF;jn!(nfk4z0Oe`gvLe0!TAyIAr2x+6vs&K ziZX;~12mgRj}>%?nG}0FKeeVjhs{Q`#EeT~n3rmpjqEsarsx5%OXKpF5P`|Pyo8nE zT5Mr`#mZ*$qi;c#ZRIrH1*D5ekDr)PX$(YYs*mvUSF5}PZ)(}4)5}OtAQ`?*!P(rK zazOI0OzMU^b;F(dk~^5^$|Y;U@)~yfBw{d)ylPI>&lyhDyNftc@tv|4tV!+gpK*0* zrr8XFU9c`$i1xUL@8oXwC5(sDLDa_0U7(c8@z39$W7ByM1=`F`;-~Iu?Q@{ z#Tn7q1H@DS_Dxj6C^YR1Wf<$)An6Bb(#wY7ZR0w%8%yl3*%90)c(Fpj7HhX~AvNDi z;cEe3Dsl@tWwHrDB#ft^*vO*s!Z+$a~db<}eFaX3|9g_FhE`3Iik;ok$r<$rTFufXqc(bIeIID8Oz zAgzUZo4X!s>LudN%ajrLSFcj`8Zuonv8ac<4D~v-lY?hYS-n9e@-P%>!j*k3{ff8H z@DWlAL*4yv+Y&!)yNL$|Xf(v>(?l zLKXpEbuC4s2_duz=f+&s)IcyApdD2gkq4=FPod5ua3l=+mAJ=1Fiu~akf&TMLhV+)I=y_X7a i{1t|NCJKdk6QWG@`O96qPRuJ#pVc3vK5jPv literal 0 HcmV?d00001 diff --git a/mmpretrain/models/losses/__pycache__/utils.cpython-310.pyc b/mmpretrain/models/losses/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ead668eeeedc9d250cdad941b76fbc0603515e39 GIT binary patch literal 3411 zcmb7GNlzR{6z=NonduppIM~YKEhQ9*X=@l3D^Y?BGD41&9EfF*6=6j~GgULR>19&Y z4F(ODfD~VnbB-Y8;y;o@en1X=&B=cu8+os)XA5>B7j#W6uikt0mhV+OF;Ov~{rK0< zt!UCPe#6Odvte={KKU3rW-t;MTl$x55j8hM%QT1)+FJ!;Fq2uQ##WI{umUTdlC2Ue zF$Y#0R_5esnN_fF*=&rBpW0g$Mm7w0@+I&#)|)2&Fuv9`&a87iw_tt~<~Gim4H61RzdYhBZE_mKQc_P++2KvU<#SfA zR4BAn(UwbUgfm4|-QH=mQ@<+ z0E*3=HcuKnO2f=XnVALRTr0EjT3NZTY&O{+D(V-87zc*GFV2ZLPUga}BX}Y_KN=av zj0bWKS#dd+CVn96ox^NQvEmI}cYuk!0^NAUF>T`HfA)od2s1qju@ z5=K%YoGy&68OKJ~g1I5CCI+mSCnl^6JpTcBnw$g84W#TvfF))Uli4TaTY_tD!P@|f zt#&%WA?4bgv&x?b+;8tD$}9Q`%HVGh6`Bp+dk_yf(@POFEOLmv?}lFZYXI35=Mm@)+4Z5CU?}7G+#2YC{UdsH`$zuHsa86Sy^{=^8^pcoWv; zJJ2~35P(*N*pBnZII(T#Z->}KT!pPBCQ+|pE*0RzNR-piB@kE7jdM~aWwT6J;X9*6 zn8obV@(DR6T|};;_bZUE3GOA}FRHbGyMp;8q5&A#qiug5%#Vhqtu#_Fk=uJSM$>>l zd`TH^#=-+H)|7un$Z43$gzkF#NXx18+mQ#L0bu0saRox#+jyXX{Hu_)448NVP@qQd z+}S&Tp>(&m>vR)N_jI6f1i27#&LmCZJUleFkpxkip!4Ds)8ugAL*zHCIk-3osK>z9 zDyfua2zPz210J?hgi<9E@6yvo>y{e%!kzjpgFweK$xF4QRg z4-smWUquK^k+9G5l~+)^x-?Z6>UW_Hba>T)S{@jhs(=d$2jPU&w)+JInADEU&OmgKa z?dYJSBbK&BoC3ZjB8A!(cso2mJ_u6iXJ28xVy1FD?^Gd_yIzOW>N>=(4maG)6`)l= z!YYn@UZ>vuO^}j9g@&cMmvN70cWwN#>Y)kyfBGtO1=jBDs?xC@MfF~wctK`HQC1numD>6N sg0BG`$3+dRnypHPL8mgpxfjMP4fqN^9wfeK;eBCF;Tz+cbL)EjAJ4YB-v9sr literal 0 HcmV?d00001 diff --git a/mmpretrain/models/losses/asymmetric_loss.py b/mmpretrain/models/losses/asymmetric_loss.py new file mode 100644 index 0000000..dcc9707 --- /dev/null +++ b/mmpretrain/models/losses/asymmetric_loss.py @@ -0,0 +1,149 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn + +from mmpretrain.registry import MODELS +from .utils import convert_to_one_hot, weight_reduce_loss + + +def asymmetric_loss(pred, + target, + weight=None, + gamma_pos=1.0, + gamma_neg=4.0, + clip=0.05, + reduction='mean', + avg_factor=None, + use_sigmoid=True, + eps=1e-8): + r"""asymmetric loss. + + Please refer to the `paper `__ for + details. + + Args: + pred (torch.Tensor): The prediction with shape (N, \*). + target (torch.Tensor): The ground truth label of the prediction with + shape (N, \*). + weight (torch.Tensor, optional): Sample-wise loss weight with shape + (N, ). Defaults to None. + gamma_pos (float): positive focusing parameter. Defaults to 0.0. + gamma_neg (float): Negative focusing parameter. We usually set + gamma_neg > gamma_pos. Defaults to 4.0. + clip (float, optional): Probability margin. Defaults to 0.05. + reduction (str): The method used to reduce the loss. + Options are "none", "mean" and "sum". If reduction is 'none' , loss + is same shape as pred and label. Defaults to 'mean'. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + use_sigmoid (bool): Whether the prediction uses sigmoid instead + of softmax. Defaults to True. + eps (float): The minimum value of the argument of logarithm. Defaults + to 1e-8. + + Returns: + torch.Tensor: Loss. + """ + assert pred.shape == \ + target.shape, 'pred and target should be in the same shape.' + + if use_sigmoid: + pred_sigmoid = pred.sigmoid() + else: + pred_sigmoid = nn.functional.softmax(pred, dim=-1) + + target = target.type_as(pred) + + if clip and clip > 0: + pt = (1 - pred_sigmoid + + clip).clamp(max=1) * (1 - target) + pred_sigmoid * target + else: + pt = (1 - pred_sigmoid) * (1 - target) + pred_sigmoid * target + asymmetric_weight = (1 - pt).pow(gamma_pos * target + gamma_neg * + (1 - target)) + loss = -torch.log(pt.clamp(min=eps)) * asymmetric_weight + if weight is not None: + assert weight.dim() == 1 + weight = weight.float() + if pred.dim() > 1: + weight = weight.reshape(-1, 1) + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + +@MODELS.register_module() +class AsymmetricLoss(nn.Module): + """asymmetric loss. + + Args: + gamma_pos (float): positive focusing parameter. + Defaults to 0.0. + gamma_neg (float): Negative focusing parameter. We + usually set gamma_neg > gamma_pos. Defaults to 4.0. + clip (float, optional): Probability margin. Defaults to 0.05. + reduction (str): The method used to reduce the loss into + a scalar. + loss_weight (float): Weight of loss. Defaults to 1.0. + use_sigmoid (bool): Whether the prediction uses sigmoid instead + of softmax. Defaults to True. + eps (float): The minimum value of the argument of logarithm. Defaults + to 1e-8. + """ + + def __init__(self, + gamma_pos=0.0, + gamma_neg=4.0, + clip=0.05, + reduction='mean', + loss_weight=1.0, + use_sigmoid=True, + eps=1e-8): + super(AsymmetricLoss, self).__init__() + self.gamma_pos = gamma_pos + self.gamma_neg = gamma_neg + self.clip = clip + self.reduction = reduction + self.loss_weight = loss_weight + self.use_sigmoid = use_sigmoid + self.eps = eps + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + r"""asymmetric loss. + + Args: + pred (torch.Tensor): The prediction with shape (N, \*). + target (torch.Tensor): The ground truth label of the prediction + with shape (N, \*), N or (N,1). + weight (torch.Tensor, optional): Sample-wise loss weight with shape + (N, \*). Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The method used to reduce the + loss into a scalar. Options are "none", "mean" and "sum". + Defaults to None. + + Returns: + torch.Tensor: Loss. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if target.dim() == 1 or (target.dim() == 2 and target.shape[1] == 1): + target = convert_to_one_hot(target.view(-1, 1), pred.shape[-1]) + loss_cls = self.loss_weight * asymmetric_loss( + pred, + target, + weight, + gamma_pos=self.gamma_pos, + gamma_neg=self.gamma_neg, + clip=self.clip, + reduction=reduction, + avg_factor=avg_factor, + use_sigmoid=self.use_sigmoid, + eps=self.eps) + return loss_cls diff --git a/mmpretrain/models/losses/cae_loss.py b/mmpretrain/models/losses/cae_loss.py new file mode 100644 index 0000000..1dc081b --- /dev/null +++ b/mmpretrain/models/losses/cae_loss.py @@ -0,0 +1,48 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Tuple + +import torch +from mmengine.model import BaseModule +from torch import nn + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class CAELoss(BaseModule): + """Loss function for CAE. + + Compute the align loss and the main loss. + + Args: + lambd (float): The weight for the align loss. + """ + + def __init__(self, lambd: float) -> None: + super().__init__() + self.lambd = lambd + self.loss_cross_entropy = nn.CrossEntropyLoss() + self.loss_mse = nn.MSELoss() + + def forward( + self, logits: torch.Tensor, target: torch.Tensor, + latent_pred: torch.Tensor, + latent_target: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """Forward function of CAE Loss. + + Args: + logits (torch.Tensor): The outputs from the decoder. + target (torch.Tensor): The targets generated by dalle. + latent_pred (torch.Tensor): The latent prediction from the + regressor. + latent_target (torch.Tensor): The latent target from the teacher + network. + + Returns: + Tuple[torch.Tensor, torch.Tensor]: The main loss and align loss. + """ + loss_main = self.loss_cross_entropy(logits, target) + loss_align = self.loss_mse(latent_pred, + latent_target.detach()) * self.lambd + + return loss_main, loss_align diff --git a/mmpretrain/models/losses/cosine_similarity_loss.py b/mmpretrain/models/losses/cosine_similarity_loss.py new file mode 100644 index 0000000..f0a5931 --- /dev/null +++ b/mmpretrain/models/losses/cosine_similarity_loss.py @@ -0,0 +1,55 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +from typing import Optional + +import torch +from mmengine.model import BaseModule +from torch import nn + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class CosineSimilarityLoss(BaseModule): + """Cosine similarity loss function. + + Compute the similarity between two features and optimize that similarity as + loss. + + Args: + shift_factor (float): The shift factor of cosine similarity. + Default: 0.0. + scale_factor (float): The scale factor of cosine similarity. + Default: 1.0. + """ + + def __init__(self, + shift_factor: float = 0.0, + scale_factor: float = 1.0) -> None: + super().__init__() + self.shift_factor = shift_factor + self.scale_factor = scale_factor + + def forward(self, + pred: torch.Tensor, + target: torch.Tensor, + mask: Optional[torch.Tensor] = None) -> torch.Tensor: + """Forward function of cosine similarity loss. + + Args: + pred (torch.Tensor): The predicted features. + target (torch.Tensor): The target features. + + Returns: + torch.Tensor: The cosine similarity loss. + """ + pred_norm = nn.functional.normalize(pred, dim=-1) + target_norm = nn.functional.normalize(target, dim=-1) + loss = self.shift_factor - self.scale_factor * ( + pred_norm * target_norm).sum(dim=-1) + + if mask is None: + loss = loss.mean() + else: + loss = (loss * mask).sum() / mask.sum() + return loss diff --git a/mmpretrain/models/losses/cross_correlation_loss.py b/mmpretrain/models/losses/cross_correlation_loss.py new file mode 100644 index 0000000..d26ce3d --- /dev/null +++ b/mmpretrain/models/losses/cross_correlation_loss.py @@ -0,0 +1,44 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class CrossCorrelationLoss(BaseModule): + """Cross correlation loss function. + + Compute the on-diagnal and off-diagnal loss. + + Args: + lambd (float): The weight for the off-diag loss. + """ + + def __init__(self, lambd: float = 0.0051) -> None: + super().__init__() + self.lambd = lambd + + def forward(self, cross_correlation_matrix: torch.Tensor) -> torch.Tensor: + """Forward function of cross correlation loss. + + Args: + cross_correlation_matrix (torch.Tensor): The cross correlation + matrix. + + Returns: + torch.Tensor: cross correlation loss. + """ + # loss + on_diag = torch.diagonal(cross_correlation_matrix).add_(-1).pow_( + 2).sum() + off_diag = self.off_diagonal(cross_correlation_matrix).pow_(2).sum() + loss = on_diag + self.lambd * off_diag + return loss + + def off_diagonal(self, x: torch.Tensor) -> torch.Tensor: + """Rreturn a flattened view of the off-diagonal elements of a square + matrix.""" + n, m = x.shape + assert n == m + return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten() diff --git a/mmpretrain/models/losses/cross_entropy_loss.py b/mmpretrain/models/losses/cross_entropy_loss.py new file mode 100644 index 0000000..5d418be --- /dev/null +++ b/mmpretrain/models/losses/cross_entropy_loss.py @@ -0,0 +1,209 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.nn.functional as F + +from mmpretrain.registry import MODELS +from .utils import weight_reduce_loss + + +def cross_entropy(pred, + label, + weight=None, + reduction='mean', + avg_factor=None, + class_weight=None): + """Calculate the CrossEntropy loss. + + Args: + pred (torch.Tensor): The prediction with shape (N, C), C is the number + of classes. + label (torch.Tensor): The gt label of the prediction. + weight (torch.Tensor, optional): Sample-wise loss weight. + reduction (str): The method used to reduce the loss. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + class_weight (torch.Tensor, optional): The weight for each class with + shape (C), C is the number of classes. Default None. + + Returns: + torch.Tensor: The calculated loss + """ + # element-wise losses + loss = F.cross_entropy(pred, label, weight=class_weight, reduction='none') + + # apply weights and do the reduction + if weight is not None: + weight = weight.float() + loss = weight_reduce_loss( + loss, weight=weight, reduction=reduction, avg_factor=avg_factor) + + return loss + + +def soft_cross_entropy(pred, + label, + weight=None, + reduction='mean', + class_weight=None, + avg_factor=None): + """Calculate the Soft CrossEntropy loss. The label can be float. + + Args: + pred (torch.Tensor): The prediction with shape (N, C), C is the number + of classes. + label (torch.Tensor): The gt label of the prediction with shape (N, C). + When using "mixup", the label can be float. + weight (torch.Tensor, optional): Sample-wise loss weight. + reduction (str): The method used to reduce the loss. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + class_weight (torch.Tensor, optional): The weight for each class with + shape (C), C is the number of classes. Default None. + + Returns: + torch.Tensor: The calculated loss + """ + # element-wise losses + loss = -label * F.log_softmax(pred, dim=-1) + if class_weight is not None: + loss *= class_weight + loss = loss.sum(dim=-1) + + # apply weights and do the reduction + if weight is not None: + weight = weight.float() + loss = weight_reduce_loss( + loss, weight=weight, reduction=reduction, avg_factor=avg_factor) + + return loss + + +def binary_cross_entropy(pred, + label, + weight=None, + reduction='mean', + avg_factor=None, + class_weight=None, + pos_weight=None): + r"""Calculate the binary CrossEntropy loss with logits. + + Args: + pred (torch.Tensor): The prediction with shape (N, \*). + label (torch.Tensor): The gt label with shape (N, \*). + weight (torch.Tensor, optional): Element-wise weight of loss with shape + (N, ). Defaults to None. + reduction (str): The method used to reduce the loss. + Options are "none", "mean" and "sum". If reduction is 'none' , loss + is same shape as pred and label. Defaults to 'mean'. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + class_weight (torch.Tensor, optional): The weight for each class with + shape (C), C is the number of classes. Default None. + pos_weight (torch.Tensor, optional): The positive weight for each + class with shape (C), C is the number of classes. Default None. + + Returns: + torch.Tensor: The calculated loss + """ + # Ensure that the size of class_weight is consistent with pred and label to + # avoid automatic boracast, + assert pred.dim() == label.dim() + + if class_weight is not None: + N = pred.size()[0] + class_weight = class_weight.repeat(N, 1) + loss = F.binary_cross_entropy_with_logits( + pred, + label.float(), # only accepts float type tensor + weight=class_weight, + pos_weight=pos_weight, + reduction='none') + + # apply weights and do the reduction + if weight is not None: + assert weight.dim() == 1 + weight = weight.float() + if pred.dim() > 1: + weight = weight.reshape(-1, 1) + loss = weight_reduce_loss( + loss, weight=weight, reduction=reduction, avg_factor=avg_factor) + return loss + + +@MODELS.register_module() +class CrossEntropyLoss(nn.Module): + """Cross entropy loss. + + Args: + use_sigmoid (bool): Whether the prediction uses sigmoid + of softmax. Defaults to False. + use_soft (bool): Whether to use the soft version of CrossEntropyLoss. + Defaults to False. + reduction (str): The method used to reduce the loss. + Options are "none", "mean" and "sum". Defaults to 'mean'. + loss_weight (float): Weight of the loss. Defaults to 1.0. + class_weight (List[float], optional): The weight for each class with + shape (C), C is the number of classes. Default None. + pos_weight (List[float], optional): The positive weight for each + class with shape (C), C is the number of classes. Only enabled in + BCE loss when ``use_sigmoid`` is True. Default None. + """ + + def __init__(self, + use_sigmoid=False, + use_soft=False, + reduction='mean', + loss_weight=1.0, + class_weight=None, + pos_weight=None): + super(CrossEntropyLoss, self).__init__() + self.use_sigmoid = use_sigmoid + self.use_soft = use_soft + assert not ( + self.use_soft and self.use_sigmoid + ), 'use_sigmoid and use_soft could not be set simultaneously' + + self.reduction = reduction + self.loss_weight = loss_weight + self.class_weight = class_weight + self.pos_weight = pos_weight + + if self.use_sigmoid: + self.cls_criterion = binary_cross_entropy + elif self.use_soft: + self.cls_criterion = soft_cross_entropy + else: + self.cls_criterion = cross_entropy + + def forward(self, + cls_score, + label, + weight=None, + avg_factor=None, + reduction_override=None, + **kwargs): + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + + if self.class_weight is not None: + class_weight = cls_score.new_tensor(self.class_weight) + else: + class_weight = None + + # only BCE loss has pos_weight + if self.pos_weight is not None and self.use_sigmoid: + pos_weight = cls_score.new_tensor(self.pos_weight) + kwargs.update({'pos_weight': pos_weight}) + else: + pos_weight = None + + loss_cls = self.loss_weight * self.cls_criterion( + cls_score, + label, + weight, + class_weight=class_weight, + reduction=reduction, + avg_factor=avg_factor, + **kwargs) + return loss_cls diff --git a/mmpretrain/models/losses/focal_loss.py b/mmpretrain/models/losses/focal_loss.py new file mode 100644 index 0000000..9d2cf50 --- /dev/null +++ b/mmpretrain/models/losses/focal_loss.py @@ -0,0 +1,116 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch.nn as nn +import torch.nn.functional as F + +from mmpretrain.registry import MODELS +from .utils import convert_to_one_hot, weight_reduce_loss + + +def sigmoid_focal_loss(pred, + target, + weight=None, + gamma=2.0, + alpha=0.25, + reduction='mean', + avg_factor=None): + r"""Sigmoid focal loss. + + Args: + pred (torch.Tensor): The prediction with shape (N, \*). + target (torch.Tensor): The ground truth label of the prediction with + shape (N, \*). + weight (torch.Tensor, optional): Sample-wise loss weight with shape + (N, ). Defaults to None. + gamma (float): The gamma for calculating the modulating factor. + Defaults to 2.0. + alpha (float): A balanced form for Focal Loss. Defaults to 0.25. + reduction (str): The method used to reduce the loss. + Options are "none", "mean" and "sum". If reduction is 'none' , + loss is same shape as pred and label. Defaults to 'mean'. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + + Returns: + torch.Tensor: Loss. + """ + assert pred.shape == \ + target.shape, 'pred and target should be in the same shape.' + pred_sigmoid = pred.sigmoid() + target = target.type_as(pred) + pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) + focal_weight = (alpha * target + (1 - alpha) * + (1 - target)) * pt.pow(gamma) + loss = F.binary_cross_entropy_with_logits( + pred, target, reduction='none') * focal_weight + if weight is not None: + assert weight.dim() == 1 + weight = weight.float() + if pred.dim() > 1: + weight = weight.reshape(-1, 1) + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + +@MODELS.register_module() +class FocalLoss(nn.Module): + """Focal loss. + + Args: + gamma (float): Focusing parameter in focal loss. + Defaults to 2.0. + alpha (float): The parameter in balanced form of focal + loss. Defaults to 0.25. + reduction (str): The method used to reduce the loss into + a scalar. Options are "none" and "mean". Defaults to 'mean'. + loss_weight (float): Weight of loss. Defaults to 1.0. + """ + + def __init__(self, + gamma=2.0, + alpha=0.25, + reduction='mean', + loss_weight=1.0): + + super(FocalLoss, self).__init__() + self.gamma = gamma + self.alpha = alpha + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + r"""Sigmoid focal loss. + + Args: + pred (torch.Tensor): The prediction with shape (N, \*). + target (torch.Tensor): The ground truth label of the prediction + with shape (N, \*), N or (N,1). + weight (torch.Tensor, optional): Sample-wise loss weight with shape + (N, \*). Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The method used to reduce the + loss into a scalar. Options are "none", "mean" and "sum". + Defaults to None. + + Returns: + torch.Tensor: Loss. + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if target.dim() == 1 or (target.dim() == 2 and target.shape[1] == 1): + target = convert_to_one_hot(target.view(-1, 1), pred.shape[-1]) + loss_cls = self.loss_weight * sigmoid_focal_loss( + pred, + target, + weight, + gamma=self.gamma, + alpha=self.alpha, + reduction=reduction, + avg_factor=avg_factor) + return loss_cls diff --git a/mmpretrain/models/losses/label_smooth_loss.py b/mmpretrain/models/losses/label_smooth_loss.py new file mode 100644 index 0000000..f117df3 --- /dev/null +++ b/mmpretrain/models/losses/label_smooth_loss.py @@ -0,0 +1,177 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn + +from mmpretrain.registry import MODELS +from .cross_entropy_loss import CrossEntropyLoss +from .utils import convert_to_one_hot + + +@MODELS.register_module() +class LabelSmoothLoss(nn.Module): + r"""Initializer for the label smoothed cross entropy loss. + + Refers to `Rethinking the Inception Architecture for Computer Vision + `_ + + This decreases gap between output scores and encourages generalization. + Labels provided to forward can be one-hot like vectors (NxC) or class + indices (Nx1). + And this accepts linear combination of one-hot like labels from mixup or + cutmix except multi-label task. + + Args: + label_smooth_val (float): The degree of label smoothing. + num_classes (int, optional): Number of classes. Defaults to None. + mode (str): Refers to notes, Options are 'original', 'classy_vision', + 'multi_label'. Defaults to 'original'. + use_sigmoid (bool, optional): Whether the prediction uses sigmoid of + softmax. Defaults to None, which means to use sigmoid in + "multi_label" mode and not use in other modes. + reduction (str): The method used to reduce the loss. + Options are "none", "mean" and "sum". Defaults to 'mean'. + loss_weight (float): Weight of the loss. Defaults to 1.0. + + Notes: + - if the mode is **"original"**, this will use the same label smooth + method as the original paper as: + + .. math:: + (1-\epsilon)\delta_{k, y} + \frac{\epsilon}{K} + + where :math:`\epsilon` is the ``label_smooth_val``, :math:`K` is the + ``num_classes`` and :math:`\delta_{k, y}` is Dirac delta, which + equals 1 for :math:`k=y` and 0 otherwise. + + - if the mode is **"classy_vision"**, this will use the same label + smooth method as the facebookresearch/ClassyVision repo as: + + .. math:: + \frac{\delta_{k, y} + \epsilon/K}{1+\epsilon} + + - if the mode is **"multi_label"**, this will accept labels from + multi-label task and smoothing them as: + + .. math:: + (1-2\epsilon)\delta_{k, y} + \epsilon + """ + + def __init__(self, + label_smooth_val, + num_classes=None, + use_sigmoid=None, + mode='original', + reduction='mean', + loss_weight=1.0, + class_weight=None, + pos_weight=None): + super().__init__() + self.num_classes = num_classes + self.loss_weight = loss_weight + + assert (isinstance(label_smooth_val, float) + and 0 <= label_smooth_val < 1), \ + f'LabelSmoothLoss accepts a float label_smooth_val ' \ + f'over [0, 1), but gets {label_smooth_val}' + self.label_smooth_val = label_smooth_val + + accept_reduction = {'none', 'mean', 'sum'} + assert reduction in accept_reduction, \ + f'LabelSmoothLoss supports reduction {accept_reduction}, ' \ + f'but gets {mode}.' + self.reduction = reduction + + accept_mode = {'original', 'classy_vision', 'multi_label'} + assert mode in accept_mode, \ + f'LabelSmoothLoss supports mode {accept_mode}, but gets {mode}.' + self.mode = mode + + self._eps = label_smooth_val + if mode == 'classy_vision': + self._eps = label_smooth_val / (1 + label_smooth_val) + + if mode == 'multi_label': + if not use_sigmoid: + from mmengine.logging import MMLogger + MMLogger.get_current_instance().warning( + 'For multi-label tasks, please set `use_sigmoid=True` ' + 'to use binary cross entropy.') + self.smooth_label = self.multilabel_smooth_label + use_sigmoid = True if use_sigmoid is None else use_sigmoid + else: + self.smooth_label = self.original_smooth_label + use_sigmoid = False if use_sigmoid is None else use_sigmoid + + self.ce = CrossEntropyLoss( + use_sigmoid=use_sigmoid, + use_soft=not use_sigmoid, + reduction=reduction, + class_weight=class_weight, + pos_weight=pos_weight) + + def generate_one_hot_like_label(self, label): + """This function takes one-hot or index label vectors and computes one- + hot like label vectors (float)""" + # check if targets are inputted as class integers + if label.dim() == 1 or (label.dim() == 2 and label.shape[1] == 1): + label = convert_to_one_hot(label.view(-1, 1), self.num_classes) + return label.float() + + def original_smooth_label(self, one_hot_like_label): + assert self.num_classes > 0 + smooth_label = one_hot_like_label * (1 - self._eps) + smooth_label += self._eps / self.num_classes + return smooth_label + + def multilabel_smooth_label(self, one_hot_like_label): + assert self.num_classes > 0 + smooth_label = torch.full_like(one_hot_like_label, self._eps) + smooth_label.masked_fill_(one_hot_like_label > 0, 1 - self._eps) + return smooth_label + + def forward(self, + cls_score, + label, + weight=None, + avg_factor=None, + reduction_override=None, + **kwargs): + r"""Label smooth loss. + + Args: + pred (torch.Tensor): The prediction with shape (N, \*). + label (torch.Tensor): The ground truth label of the prediction + with shape (N, \*). + weight (torch.Tensor, optional): Sample-wise loss weight with shape + (N, \*). Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The method used to reduce the + loss into a scalar. Options are "none", "mean" and "sum". + Defaults to None. + + Returns: + torch.Tensor: Loss. + """ + if self.num_classes is not None: + assert self.num_classes == cls_score.shape[1], \ + f'num_classes should equal to cls_score.shape[1], ' \ + f'but got num_classes: {self.num_classes} and ' \ + f'cls_score.shape[1]: {cls_score.shape[1]}' + else: + self.num_classes = cls_score.shape[1] + + one_hot_like_label = self.generate_one_hot_like_label(label=label) + assert one_hot_like_label.shape == cls_score.shape, \ + f'LabelSmoothLoss requires output and target ' \ + f'to be same shape, but got output.shape: {cls_score.shape} ' \ + f'and target.shape: {one_hot_like_label.shape}' + + smoothed_label = self.smooth_label(one_hot_like_label) + return self.loss_weight * self.ce.forward( + cls_score, + smoothed_label, + weight=weight, + avg_factor=avg_factor, + reduction_override=reduction_override, + **kwargs) diff --git a/mmpretrain/models/losses/reconstruction_loss.py b/mmpretrain/models/losses/reconstruction_loss.py new file mode 100644 index 0000000..40e6bfd --- /dev/null +++ b/mmpretrain/models/losses/reconstruction_loss.py @@ -0,0 +1,67 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +import torch +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class PixelReconstructionLoss(BaseModule): + """Loss for the reconstruction of pixel in Masked Image Modeling. + + This module measures the distance between the target image and the + reconstructed image and compute the loss to optimize the model. Currently, + This module only provides L1 and L2 loss to penalize the reconstructed + error. In addition, a mask can be passed in the ``forward`` function to + only apply loss on visible region, like that in MAE. + + Args: + criterion (str): The loss the penalize the reconstructed error. + Currently, only supports L1 and L2 loss + channel (int, optional): The number of channels to average the + reconstruction loss. If not None, the reconstruction loss + will be divided by the channel. Defaults to None. + """ + + def __init__(self, criterion: str, channel: Optional[int] = None) -> None: + super().__init__() + + if criterion == 'L1': + self.penalty = torch.nn.L1Loss(reduction='none') + elif criterion == 'L2': + self.penalty = torch.nn.MSELoss(reduction='none') + else: + raise NotImplementedError(f'Currently, PixelReconstructionLoss \ + only supports L1 and L2 loss, but get {criterion}') + + self.channel = channel if channel is not None else 1 + + def forward(self, + pred: torch.Tensor, + target: torch.Tensor, + mask: Optional[torch.Tensor] = None) -> torch.Tensor: + """Forward function to compute the reconstrction loss. + + Args: + pred (torch.Tensor): The reconstructed image. + target (torch.Tensor): The target image. + mask (torch.Tensor): The mask of the target image. + + Returns: + torch.Tensor: The reconstruction loss. + """ + loss = self.penalty(pred, target) + + # if the dim of the loss is 3, take the average of the loss + # along the last dim + if len(loss.shape) == 3: + loss = loss.mean(dim=-1) + + if mask is None: + loss = loss.mean() + else: + loss = (loss * mask).sum() / mask.sum() / self.channel + + return loss diff --git a/mmpretrain/models/losses/seesaw_loss.py b/mmpretrain/models/losses/seesaw_loss.py new file mode 100644 index 0000000..4aaaa45 --- /dev/null +++ b/mmpretrain/models/losses/seesaw_loss.py @@ -0,0 +1,173 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# migrate from mmdetection with modifications +import torch +import torch.nn as nn +import torch.nn.functional as F + +from mmpretrain.registry import MODELS +from .utils import weight_reduce_loss + + +def seesaw_ce_loss(cls_score, + labels, + weight, + cum_samples, + num_classes, + p, + q, + eps, + reduction='mean', + avg_factor=None): + """Calculate the Seesaw CrossEntropy loss. + + Args: + cls_score (torch.Tensor): The prediction with shape (N, C), + C is the number of classes. + labels (torch.Tensor): The learning label of the prediction. + weight (torch.Tensor): Sample-wise loss weight. + cum_samples (torch.Tensor): Cumulative samples for each category. + num_classes (int): The number of classes. + p (float): The ``p`` in the mitigation factor. + q (float): The ``q`` in the compenstation factor. + eps (float): The minimal value of divisor to smooth + the computation of compensation factor + reduction (str, optional): The method used to reduce the loss. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + + Returns: + torch.Tensor: The calculated loss + """ + assert cls_score.size(-1) == num_classes + assert len(cum_samples) == num_classes + + onehot_labels = F.one_hot(labels, num_classes) + seesaw_weights = cls_score.new_ones(onehot_labels.size()) + + # mitigation factor + if p > 0: + sample_ratio_matrix = cum_samples[None, :].clamp( + min=1) / cum_samples[:, None].clamp(min=1) + index = (sample_ratio_matrix < 1.0).float() + sample_weights = sample_ratio_matrix.pow(p) * index + (1 - index + ) # M_{ij} + mitigation_factor = sample_weights[labels.long(), :] + seesaw_weights = seesaw_weights * mitigation_factor + + # compensation factor + if q > 0: + scores = F.softmax(cls_score.detach(), dim=1) + self_scores = scores[ + torch.arange(0, len(scores)).to(scores.device).long(), + labels.long()] + score_matrix = scores / self_scores[:, None].clamp(min=eps) + index = (score_matrix > 1.0).float() + compensation_factor = score_matrix.pow(q) * index + (1 - index) + seesaw_weights = seesaw_weights * compensation_factor + + cls_score = cls_score + (seesaw_weights.log() * (1 - onehot_labels)) + + loss = F.cross_entropy(cls_score, labels, weight=None, reduction='none') + + if weight is not None: + weight = weight.float() + loss = weight_reduce_loss( + loss, weight=weight, reduction=reduction, avg_factor=avg_factor) + return loss + + +@MODELS.register_module() +class SeesawLoss(nn.Module): + """Implementation of seesaw loss. + + Refers to `Seesaw Loss for Long-Tailed Instance Segmentation (CVPR 2021) + `_ + + Args: + use_sigmoid (bool): Whether the prediction uses sigmoid of softmax. + Only False is supported. Defaults to False. + p (float): The ``p`` in the mitigation factor. + Defaults to 0.8. + q (float): The ``q`` in the compenstation factor. + Defaults to 2.0. + num_classes (int): The number of classes. + Defaults to 1000 for the ImageNet dataset. + eps (float): The minimal value of divisor to smooth + the computation of compensation factor, default to 1e-2. + reduction (str): The method that reduces the loss to a scalar. + Options are "none", "mean" and "sum". Defaults to "mean". + loss_weight (float): The weight of the loss. Defaults to 1.0 + """ + + def __init__(self, + use_sigmoid=False, + p=0.8, + q=2.0, + num_classes=1000, + eps=1e-2, + reduction='mean', + loss_weight=1.0): + super(SeesawLoss, self).__init__() + assert not use_sigmoid, '`use_sigmoid` is not supported' + self.use_sigmoid = False + self.p = p + self.q = q + self.num_classes = num_classes + self.eps = eps + self.reduction = reduction + self.loss_weight = loss_weight + + self.cls_criterion = seesaw_ce_loss + + # cumulative samples for each category + self.register_buffer('cum_samples', + torch.zeros(self.num_classes, dtype=torch.float)) + + def forward(self, + cls_score, + labels, + weight=None, + avg_factor=None, + reduction_override=None): + """Forward function. + + Args: + cls_score (torch.Tensor): The prediction with shape (N, C). + labels (torch.Tensor): The learning label of the prediction. + weight (torch.Tensor, optional): Sample-wise loss weight. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction (str, optional): The method used to reduce the loss. + Options are "none", "mean" and "sum". + Returns: + torch.Tensor: The calculated loss + """ + assert reduction_override in (None, 'none', 'mean', 'sum'), \ + f'The `reduction_override` should be one of (None, "none", ' \ + f'"mean", "sum"), but get "{reduction_override}".' + assert cls_score.size(0) == labels.view(-1).size(0), \ + f'Expected `labels` shape [{cls_score.size(0)}], ' \ + f'but got {list(labels.size())}' + reduction = ( + reduction_override if reduction_override else self.reduction) + assert cls_score.size(-1) == self.num_classes, \ + f'The channel number of output ({cls_score.size(-1)}) does ' \ + f'not match the `num_classes` of seesaw loss ({self.num_classes}).' + + # accumulate the samples for each category + unique_labels = labels.unique() + for u_l in unique_labels: + inds_ = labels == u_l.item() + self.cum_samples[u_l] += inds_.sum() + + if weight is not None: + weight = weight.float() + else: + weight = labels.new_ones(labels.size(), dtype=torch.float) + + # calculate loss_cls_classes + loss_cls = self.loss_weight * self.cls_criterion( + cls_score, labels, weight, self.cum_samples, self.num_classes, + self.p, self.q, self.eps, reduction, avg_factor) + + return loss_cls diff --git a/mmpretrain/models/losses/swav_loss.py b/mmpretrain/models/losses/swav_loss.py new file mode 100644 index 0000000..c7dbb78 --- /dev/null +++ b/mmpretrain/models/losses/swav_loss.py @@ -0,0 +1,190 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Union + +import numpy as np +import torch +import torch.distributed as dist +import torch.nn as nn +from mmengine.dist import all_reduce +from mmengine.model import BaseModule + +from mmpretrain.registry import MODELS + + +@torch.no_grad() +def distributed_sinkhorn(out: torch.Tensor, sinkhorn_iterations: int, + world_size: int, epsilon: float) -> torch.Tensor: + """Apply the distributed sinknorn optimization on the scores matrix to find + the assignments. + + This function is modified from + https://github.com/facebookresearch/swav/blob/main/main_swav.py + + Args: + out (torch.Tensor): The scores matrix + sinkhorn_iterations (int): Number of iterations in Sinkhorn-Knopp + algorithm. + world_size (int): The world size of the process group. + epsilon (float): regularization parameter for Sinkhorn-Knopp algorithm. + + Returns: + torch.Tensor: Output of sinkhorn algorithm. + """ + eps_num_stab = 1e-12 + Q = torch.exp(out / epsilon).t( + ) # Q is K-by-B for consistency with notations from our paper + B = Q.shape[1] * world_size # number of samples to assign + K = Q.shape[0] # how many prototypes + + # make the matrix sums to 1 + sum_Q = torch.sum(Q) + all_reduce(sum_Q) + Q /= sum_Q + + for it in range(sinkhorn_iterations): + # normalize each row: total weight per prototype must be 1/K + u = torch.sum(Q, dim=1, keepdim=True) + if len(torch.nonzero(u == 0)) > 0: + Q += eps_num_stab + u = torch.sum(Q, dim=1, keepdim=True, dtype=Q.dtype) + all_reduce(u) + Q /= u + Q /= K + + # normalize each column: total weight per sample must be 1/B + Q /= torch.sum(Q, dim=0, keepdim=True) + Q /= B + + Q *= B # the columns must sum to 1 so that Q is an assignment + return Q.t() + + +class MultiPrototypes(BaseModule): + """Multi-prototypes for SwAV head. + + Args: + output_dim (int): The output dim from SwAV neck. + num_prototypes (List[int]): The number of prototypes needed. + init_cfg (dict or List[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + output_dim: int, + num_prototypes: List[int], + init_cfg: Optional[Union[List[dict], dict]] = None) -> None: + super().__init__(init_cfg=init_cfg) + assert isinstance(num_prototypes, list) + self.num_heads = len(num_prototypes) + for i, k in enumerate(num_prototypes): + self.add_module('prototypes' + str(i), + nn.Linear(output_dim, k, bias=False)) + + def forward(self, x: torch.Tensor) -> List[torch.Tensor]: + """Run forward for every prototype.""" + out = [] + for i in range(self.num_heads): + out.append(getattr(self, 'prototypes' + str(i))(x)) + return out + + +@MODELS.register_module() +class SwAVLoss(BaseModule): + """The Loss for SwAV. + + This Loss contains clustering and sinkhorn algorithms to compute Q codes. + Part of the code is borrowed from `script + `_. + The queue is built in `engine/hooks/swav_hook.py`. + + Args: + feat_dim (int): feature dimension of the prototypes. + sinkhorn_iterations (int): number of iterations in Sinkhorn-Knopp + algorithm. Defaults to 3. + epsilon (float): regularization parameter for Sinkhorn-Knopp algorithm. + Defaults to 0.05. + temperature (float): temperature parameter in training loss. + Defaults to 0.1. + crops_for_assign (List[int]): list of crops id used for computing + assignments. Defaults to [0, 1]. + num_crops (List[int]): list of number of crops. Defaults to [2]. + num_prototypes (int): number of prototypes. Defaults to 3000. + init_cfg (dict or List[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + feat_dim: int, + sinkhorn_iterations: int = 3, + epsilon: float = 0.05, + temperature: float = 0.1, + crops_for_assign: List[int] = [0, 1], + num_crops: List[int] = [2], + num_prototypes: int = 3000, + init_cfg: Optional[Union[List[dict], dict]] = None): + super().__init__(init_cfg=init_cfg) + self.sinkhorn_iterations = sinkhorn_iterations + self.epsilon = epsilon + self.temperature = temperature + self.crops_for_assign = crops_for_assign + self.num_crops = num_crops + self.use_queue = False + self.queue = None + self.world_size = dist.get_world_size() if dist.is_initialized() else 1 + + # prototype layer + self.prototypes = None + if isinstance(num_prototypes, list): + self.prototypes = MultiPrototypes(feat_dim, num_prototypes) + elif num_prototypes > 0: + self.prototypes = nn.Linear(feat_dim, num_prototypes, bias=False) + assert self.prototypes is not None + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Forward function of SwAV loss. + + Args: + x (torch.Tensor): NxC input features. + Returns: + torch.Tensor: The returned loss. + """ + # normalize the prototypes + with torch.no_grad(): + w = self.prototypes.weight.data.clone() + w = nn.functional.normalize(w, dim=1, p=2) + self.prototypes.weight.copy_(w) + + embedding, output = x, self.prototypes(x) + embedding = embedding.detach() + + bs = int(embedding.size(0) / sum(self.num_crops)) + loss = 0 + for i, crop_id in enumerate(self.crops_for_assign): + with torch.no_grad(): + out = output[bs * crop_id:bs * (crop_id + 1)].detach() + # time to use the queue + if self.queue is not None: + if self.use_queue or not torch.all(self.queue[i, + -1, :] == 0): + self.use_queue = True + out = torch.cat( + (torch.mm(self.queue[i], + self.prototypes.weight.t()), out)) + # fill the queue + self.queue[i, bs:] = self.queue[i, :-bs].clone() + self.queue[i, :bs] = embedding[crop_id * bs:(crop_id + 1) * + bs] + + # get assignments (batch_size * num_prototypes) + q = distributed_sinkhorn(out, self.sinkhorn_iterations, + self.world_size, self.epsilon)[-bs:] + + # cluster assignment prediction + subloss = 0 + for v in np.delete(np.arange(np.sum(self.num_crops)), crop_id): + x = output[bs * v:bs * (v + 1)] / self.temperature + subloss -= torch.mean( + torch.sum(q * nn.functional.log_softmax(x, dim=1), dim=1)) + loss += subloss / (np.sum(self.num_crops) - 1) + loss /= len(self.crops_for_assign) + return loss diff --git a/mmpretrain/models/losses/utils.py b/mmpretrain/models/losses/utils.py new file mode 100644 index 0000000..a65b68a --- /dev/null +++ b/mmpretrain/models/losses/utils.py @@ -0,0 +1,119 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import functools + +import torch +import torch.nn.functional as F + + +def reduce_loss(loss, reduction): + """Reduce loss as specified. + + Args: + loss (Tensor): Elementwise loss tensor. + reduction (str): Options are "none", "mean" and "sum". + + Return: + Tensor: Reduced loss tensor. + """ + reduction_enum = F._Reduction.get_enum(reduction) + # none: 0, elementwise_mean:1, sum: 2 + if reduction_enum == 0: + return loss + elif reduction_enum == 1: + return loss.mean() + elif reduction_enum == 2: + return loss.sum() + + +def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None): + """Apply element-wise weight and reduce loss. + + Args: + loss (Tensor): Element-wise loss. + weight (Tensor): Element-wise weights. + reduction (str): Same as built-in losses of PyTorch. + avg_factor (float): Average factor when computing the mean of losses. + + Returns: + Tensor: Processed loss values. + """ + # if weight is specified, apply element-wise weight + if weight is not None: + loss = loss * weight + + # if avg_factor is not specified, just reduce the loss + if avg_factor is None: + loss = reduce_loss(loss, reduction) + else: + # if reduction is mean, then average the loss by avg_factor + if reduction == 'mean': + loss = loss.sum() / avg_factor + # if reduction is 'none', then do nothing, otherwise raise an error + elif reduction != 'none': + raise ValueError('avg_factor can not be used with reduction="sum"') + return loss + + +def weighted_loss(loss_func): + """Create a weighted version of a given loss function. + + To use this decorator, the loss function must have the signature like + ``loss_func(pred, target, **kwargs)``. The function only needs to compute + element-wise loss without any reduction. This decorator will add weight + and reduction arguments to the function. The decorated function will have + the signature like ``loss_func(pred, target, weight=None, reduction='mean', + avg_factor=None, **kwargs)``. + + :Example: + + >>> import torch + >>> @weighted_loss + >>> def l1_loss(pred, target): + >>> return (pred - target).abs() + + >>> pred = torch.Tensor([0, 2, 3]) + >>> target = torch.Tensor([1, 1, 1]) + >>> weight = torch.Tensor([1, 0, 1]) + + >>> l1_loss(pred, target) + tensor(1.3333) + >>> l1_loss(pred, target, weight) + tensor(1.) + >>> l1_loss(pred, target, reduction='none') + tensor([1., 1., 2.]) + >>> l1_loss(pred, target, weight, avg_factor=2) + tensor(1.5000) + """ + + @functools.wraps(loss_func) + def wrapper(pred, + target, + weight=None, + reduction='mean', + avg_factor=None, + **kwargs): + # get element-wise loss + loss = loss_func(pred, target, **kwargs) + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + return wrapper + + +def convert_to_one_hot(targets: torch.Tensor, classes) -> torch.Tensor: + """This function converts target class indices to one-hot vectors, given + the number of classes. + + Args: + targets (Tensor): The ground truth label of the prediction + with shape (N, 1) + classes (int): the number of classes. + + Returns: + Tensor: Processed loss values. + """ + assert (torch.max(targets).item() < + classes), 'Class Index must be less than number of classes' + one_hot_targets = F.one_hot( + targets.long().squeeze(-1), num_classes=classes) + return one_hot_targets diff --git a/mmpretrain/models/multimodal/__init__.py b/mmpretrain/models/multimodal/__init__.py new file mode 100644 index 0000000..e68504c --- /dev/null +++ b/mmpretrain/models/multimodal/__init__.py @@ -0,0 +1,24 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmpretrain.utils.dependency import WITH_MULTIMODAL + +if WITH_MULTIMODAL: + from .blip import * # noqa: F401,F403 + from .blip2 import * # noqa: F401,F403 + from .chinese_clip import * # noqa: F401, F403 + from .clip import * # noqa: F401, F403 + from .flamingo import * # noqa: F401, F403 + from .llava import * # noqa: F401, F403 + from .minigpt4 import * # noqa: F401, F403 + from .ofa import * # noqa: F401, F403 + from .otter import * # noqa: F401, F403 + from .ram import * # noqa: F401, F403 +else: + from mmpretrain.registry import MODELS + from mmpretrain.utils.dependency import register_multimodal_placeholder + + register_multimodal_placeholder([ + 'Blip2Caption', 'Blip2Retrieval', 'Blip2VQA', 'BlipCaption', + 'BlipNLVR', 'BlipRetrieval', 'BlipGrounding', 'BlipVQA', 'Flamingo', + 'OFA', 'ChineseCLIP', 'MiniGPT4', 'Llava', 'Otter', 'CLIP', + 'CLIPZeroShot', 'RAM', 'RAMNormal', 'RAMOpenset' + ], MODELS) diff --git a/mmpretrain/models/multimodal/__pycache__/__init__.cpython-310.pyc b/mmpretrain/models/multimodal/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01a24bbdb5e756bf78b075306afefd0cb4f65700 GIT binary patch literal 742 zcmZ8eJ&)5c7`F3OUoYtmH!>ixm7$fYTaXZE6?+O*m)lLRZ-MNu)hBJu?{?he%j>mav^yQU-<=qA%-Q& zFvS>gtOv|tu5^~DX18IlVZY&^;a0<>;gg2j4W9zH7FNTPVHVK{+t>%a4()(mXMk}-zv$t?w-@|Qi@_QN(&=YPCIor=SB;Dmr6?N{Pz8jp4#Z)SY4jRw{sfV z^6`9~UTd|>naDS^WxMt^xsquH6-C|I75r?p6*({YC?4NXG7-7BzFA&SKbGlTO8uDu z%avDm!y36(i>)%$oewAU1YoLk2L9R*&I+EF+)TS?^huT#8qB96@9m6`Wsh;_GM=yY z+JPsc-Y0NGeTxGct`0Vm6{M9{BUwK<^=0jbfN!xW%msDTI<0(Fn{#!VRp*c74lk|u z+6NgO0DMO}vay9SL7wkZ72CFJLB>hK!`nu Q;Qx1sLnp+ None: + if data_preprocessor is None: + data_preprocessor = {} + if isinstance(data_preprocessor, dict): + data_preprocessor.setdefault('type', 'MultiModalDataPreprocessor') + data_preprocessor = MODELS.build(data_preprocessor) + + super(BlipGrounding, self).__init__( + init_cfg=init_cfg, data_preprocessor=data_preprocessor) + + self.tokenizer = TOKENIZER.build(tokenizer) + self.prompt = 'localize instance: ' + self.visual_encoder = MODELS.build(visual_encoder) + self.text_encoder = MODELS.build(text_encoder) + self.multimodal_encoder = MODELS.build(multimodal_encoder) + head.setdefault('tokenizer', self.tokenizer) + self.grounding_head = MODELS.build(head) + + def forward( + self, + images: torch.Tensor, + data_samples: Optional[List[DataSample]] = None, + mode: str = 'loss', + ): + """The unified entry for a forward process in both training and test. + The method should accept only one mode "loss": + + - "loss": Forward and return a dict of losses according to the given + inputs and data samples. + + Note that this method doesn't handle neither back propagation nor + optimizer updating, which are done in the :meth:`train_step`. + + Args: + inputs (torch.Tensor, tuple): The input tensor with shape + (N, C, ...) in general. + data_samples (List[VQADataSample], optional): The annotation + data of every samples. It's required if ``mode="loss"``. + Defaults to None. + mode (str): Return what kind of value. Defaults to 'loss'. + + Returns: + The return type depends on ``mode``. + - If ``mode="loss"``, return a dict of tensor. + """ + + if mode == 'loss': + return self.loss(images, data_samples) + elif mode == 'predict': + return self.predict(images, data_samples) + else: + raise RuntimeError(f'Invalid mode "{mode}".') + + def extract_feat(self, images: torch.Tensor) -> torch.Tensor: + """Extract features from the input tensor with shape (N, C, ...). + + Args: + inputs (Tensor): A batch of inputs. The shape of it should be + ``(num_samples, num_channels, *img_shape)``. + Returns: + image_embeds (Tensor): The output features. + """ + image_embeds = self.visual_encoder(images)[0] + return image_embeds + + def loss( + self, + images: torch.Tensor, + data_samples=None, + ) -> Union[torch.Tensor, Tuple[torch.Tensor]]: + """generate train_loss from the input tensor and data_samples. + + Args: + inputs (Tensor): A batch of inputs. The shape of it should be + ``(num_samples, num_channels, *img_shape)``. + data_samples (List[VQADataSample], optional): The annotation + data of every samples.. + + Returns: + Dict[torch.Tensor]: The losses features. + """ + + # extract image feature + image_embeds = self.extract_feat(images) + image_atts = image_embeds.new_ones( + image_embeds.size()[:-1], dtype=torch.long) + + raw_text = [] + box_targets = [] + for ds in data_samples: + + raw_text.append(ds.text) + box_t = copy.deepcopy(ds.box) * 1.0 + box_t[1] /= ds.img_shape[0] + box_t[3] /= ds.img_shape[0] + box_t[0] /= ds.img_shape[1] + box_t[2] /= ds.img_shape[1] + + box_targets.append(box_t) + + box_targets = image_embeds.new_tensor(np.stack(box_targets)) + box_targets = box_xyxy_to_cxcywh(box_targets) # xywh 0-1 + + text = self.tokenizer( + raw_text, + padding='longest', + truncation=True, + max_length=128, + return_tensors='pt', + ).to(image_embeds.device) + + text_embeds = self.text_encoder( + text.input_ids, + attention_mask=text.attention_mask, + mode='text', + return_dict=True) # bz, seq_len, hid + + # multimodal fusion + multimodal_embeds = self.multimodal_encoder( + encoder_embeds=text_embeds.last_hidden_state, + attention_mask=text.attention_mask, + encoder_hidden_states=image_embeds, + encoder_attention_mask=image_atts, + return_dict=True, + ) + + # put answer from data_samples into tensor form + losses = self.grounding_head.loss( + text_embedding=multimodal_embeds.last_hidden_state, + text_embedding_mask=text.attention_mask, + encoder_hidden_states=image_embeds, + encoder_attention_mask=image_atts, + decoder_targets=box_targets, + ) + + return losses + + def predict(self, images, data_samples=None): + """""" + + # extract image feature + image_embeds = self.extract_feat(images) + image_atts = image_embeds.new_ones( + image_embeds.size()[:-1], dtype=torch.long) + + raw_text = [] + for ds in data_samples: + raw_text.append(ds.text) + + text = self.tokenizer( + raw_text, + padding='longest', + truncation=True, + max_length=128, + return_tensors='pt', + ).to(image_embeds.device) + + text_embeds = self.text_encoder( + text.input_ids, + attention_mask=text.attention_mask, + mode='text', + return_dict=True) # bz, seq_len, hid + + # multimodal fusion + multimodal_embeds = self.multimodal_encoder( + encoder_embeds=text_embeds.last_hidden_state, + attention_mask=text.attention_mask, + encoder_hidden_states=image_embeds, + encoder_attention_mask=image_atts, + return_dict=True, + ) + + # put answer from data_samples into tensor form + output_boxes = self.grounding_head.predict( + text_embedding=multimodal_embeds.last_hidden_state, + text_embedding_mask=text.attention_mask, + encoder_hidden_states=image_embeds, + encoder_attention_mask=image_atts, + ) # xyxy 0-1 + + out_data_samples = [] + for bbox, data_sample, img in zip(output_boxes, data_samples, images): + if data_sample is None: + data_sample = DataSample() + + img_size = img.shape[-2:] + scale_factor = data_sample.get('scale_factor', (1, 1)) + bbox[0::2] = bbox[0::2] * img_size[1] / scale_factor[0] + bbox[1::2] = bbox[1::2] * img_size[0] / scale_factor[1] + bbox = bbox[None, :] + data_sample.pred_bboxes = bbox + + if 'gt_bboxes' in data_sample: + gt_bboxes = torch.Tensor(data_sample.get('gt_bboxes')) + gt_bboxes[:, 0::2] /= scale_factor[0] + gt_bboxes[:, 1::2] /= scale_factor[1] + data_sample.gt_bboxes = gt_bboxes + + out_data_samples.append(data_sample) + + return out_data_samples diff --git a/mmpretrain/models/multimodal/blip/blip_nlvr.py b/mmpretrain/models/multimodal/blip/blip_nlvr.py new file mode 100644 index 0000000..f96e3cc --- /dev/null +++ b/mmpretrain/models/multimodal/blip/blip_nlvr.py @@ -0,0 +1,205 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmengine.model import BaseModel + +from mmpretrain.registry import MODELS, TOKENIZER + + +@MODELS.register_module() +class BlipNLVR(BaseModel): + """BLIP NLVR. + + Args: + vision_backbone (dict): Backbone for extracting image features. + text_backbone (dict): Backbone for extracting text features. + but we integrate the vqa text extractor into the tokenizer part in + datasets/transform/ so we don't need text_backbone + multimodal_backbone (Optional[dict]): Backbone for extracting + multi-modal features. We apply this part as VQA fusion module. + neck (Optional[dict]): The neck module to process features from + backbone. Defaults to None. + head (Optional[dict]): The head module to calculate + loss from processed features. See :mod:`mmmultimodal.models.heads`. + Notice that if the head is not set, `loss` method cannot be used. + Defaults to None. + tokenizer: (Optional[dict]): The config for tokenizer + data_preprocessor (Optional[dict]): The config for preprocessing input + data. If None or no specified type, it will use + "MutimodalDataPreprocessor" as type. + See :class:`MutimodalDataPreprocessor` for more details. + Defaults to None. + init_cfg (Optional[dict]): the config to control the initialization. + Defaults to None. + """ + + def __init__(self, + vision_backbone: dict, + multimodal_backbone: dict, + tokenizer: Optional[dict] = None, + max_txt_len: int = 35, + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[dict] = None): + if data_preprocessor is None: + data_preprocessor = {} + if isinstance(data_preprocessor, dict): + data_preprocessor.setdefault('type', 'MultiModalDataPreprocessor') + data_preprocessor = MODELS.build(data_preprocessor) + + super().__init__( + init_cfg=init_cfg, data_preprocessor=data_preprocessor) + if tokenizer is not None: + self.tokenizer = TOKENIZER.build(tokenizer) + self.vision_backbone = MODELS.build(vision_backbone) + self.multimodal_backbone = MODELS.build(multimodal_backbone) + self.max_txt_len = max_txt_len + + # For simplity, directly use head definition here. + # If more complex head is designed, move this and loss to a new + # head module. + hidden_size = self.multimodal_backbone.config.hidden_size + self.head = nn.Sequential( + nn.Linear(hidden_size, hidden_size), + nn.ReLU(), + nn.Linear(hidden_size, 2), + ) + + @property + def device(self): + return next(self.parameters()).device + + def preprocess_text(self, data_samples): + + sample_item = data_samples[0] + + if sample_item is not None and 'text' in sample_item: + texts = [sample.get('text') for sample in data_samples] + else: + return None + + # perform tokenize first if satisfied conditions + texts = self.tokenizer( + texts, + padding='longest', + truncation=True, + max_length=self.max_txt_len, + return_tensors='pt', + ).to(self.device) + + return texts + + def forward( + self, + images: dict, + data_samples: Optional[List] = None, + mode: str = 'tensor', + ): + """The unified entry for a forward process in both training and test. + The method should accept only one mode "loss": + + - "loss": Forward and return a dict of losses according to the given + images and data samples. + + Note that this method doesn't handle neither back propagation nor + optimizer updating, which are done in the :meth:`train_step`. + + Args: + images (dict of torch.Tensor): + img: pre_processed img tensor (N, C, ...). + text: tokenized text (N, L) + data_samples (List[CaptionDataSample], optional): + The annotation data of every samples. + 'image': raw image data + 'text' tokenized text + mode (str): Return what kind of value. Defaults to 'tensor'. + + Returns: + The return type depends on ``mode``. + - If ``mode="loss"``, return a dict of tensor. + """ + # B, T, C, H, W to T*B, C, H, W + images = images.permute(1, 0, 2, 3, 4).flatten(0, 1) + + if mode == 'loss': + return self.loss(images, data_samples) + elif mode == 'predict': + return self.predict(images, data_samples) + else: + raise RuntimeError(f'Invalid mode "{mode}".') + + def predict(self, images, data_samples=None): + """Predict caption.""" + # prepare inputs for decoder generation. + image_embeds = self.vision_backbone(images)[0] + texts = self.preprocess_text(data_samples) + image_atts = torch.ones( + image_embeds.size()[:-1], dtype=torch.long).to(self.device) + + image0_embeds, image1_embeds = torch.split(image_embeds, + texts.input_ids.size(0)) + + # multimodal fusion + multimodal_embeds = self.multimodal_backbone( + texts.input_ids, + attention_mask=texts.attention_mask, + encoder_hidden_states=[image0_embeds, image1_embeds], + encoder_attention_mask=[ + image_atts[:image0_embeds.size(0)], + image_atts[image0_embeds.size(0):], + ], + return_dict=True, + ) + + # get prediction + outputs = self.head(multimodal_embeds.last_hidden_state[:, 0, :]) + + pred_scores = F.softmax(outputs, dim=1) + + for pred_score, data_sample in zip(pred_scores, data_samples): + data_sample.set_pred_score(pred_score) + data_sample.set_pred_label(pred_score.argmax(dim=0)) + + return data_samples + + def loss(self, images, data_samples): + """Calculate losses from a batch of inputs and data samples. + + Args: + images (torch.Tensor): The input tensor with shape + (N, C, ...) in general. + data_samples (List[ImageTextDataSample]): The annotation data of + every samples. + + Returns: + dict[str, Tensor]: a dictionary of loss components. + """ + # prepare inputs for decoder generation. + image_embeds = self.vision_backbone(images)[0] + texts = self.preprocess_text(data_samples) + image_atts = torch.ones( + image_embeds.size()[:-1], dtype=torch.long).to(self.device) + image0_embeds, image1_embeds = torch.split(image_embeds, + texts.input_ids.size(0)) + + # multimodal fusion + multimodal_embeds = self.multimodal_backbone( + texts.input_ids, + attention_mask=texts.attention_mask, + encoder_hidden_states=[image0_embeds, image1_embeds], + encoder_attention_mask=[ + image_atts[:image0_embeds.size(0)], + image_atts[image0_embeds.size(0):], + ], + return_dict=True, + ) + + # get prediction + outputs = self.head(multimodal_embeds.last_hidden_state[:, 0, :]) + + targets = torch.tensor([i.gt_label + for i in data_samples]).to(outputs.device) + loss = F.cross_entropy(outputs, targets) + return {'loss': loss} diff --git a/mmpretrain/models/multimodal/blip/blip_retrieval.py b/mmpretrain/models/multimodal/blip/blip_retrieval.py new file mode 100644 index 0000000..3ebc251 --- /dev/null +++ b/mmpretrain/models/multimodal/blip/blip_retrieval.py @@ -0,0 +1,716 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections import ChainMap +from copy import deepcopy +from typing import Dict, List, Optional, Tuple, Union + +import mmengine.dist as dist +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmengine.model import BaseModel +from torch import distributed as torch_dist + +from mmpretrain.registry import MODELS, TOKENIZER +from mmpretrain.structures import DataSample +from mmpretrain.utils import track_on_main_process + + +def all_gather_concat(data: torch.Tensor) -> torch.Tensor: + """Gather tensors with different first-dimension size and concat to one + tenosr. + + Note: + Only the first dimension should be different. + + Args: + data (Tensor): Tensor to be gathered. + + Returns: + torch.Tensor: The concatenated tenosr. + """ + if dist.get_world_size() == 1: + return data + + data_size = torch.tensor(data.size(0), device=data.device) + sizes_list = dist.all_gather(data_size) + + max_length = max(sizes_list) + size_diff = max_length.item() - data_size.item() + if size_diff: + padding = torch.zeros( + size_diff, *data.size()[1:], device=data.device, dtype=data.dtype) + data = torch.cat((data, padding)) + + gather_list = dist.all_gather(data) + + all_data = [] + for tensor, size in zip(gather_list, sizes_list): + + all_data.append(tensor[:size]) + + return torch.concat(all_data) + + +@MODELS.register_module() +class BlipRetrieval(BaseModel): + """BLIP Retriever. + + Args: + vision_backbone (dict): Backbone for extracting image features. + text_backbone (dict): Backbone for extracting text features. + multimodal_backbone (Optional[dict]): Backbone for extracting + multi-modal features. + vision_neck (Optional[dict]): The neck module to process image features + from vision backbone. Defaults to None. + text_neck (Optional[dict]): The neck module to process text features + from text backbone. Defaults to None. + head (Optional[Union[List[dict], dict]]): The head module to calculate + loss from processed single modality features. + See :mod:`mmmultimodal.models.heads`. + Notice that if the head is not set, `loss` method cannot be used. + Defaults to None. + multimodal_head (Optional[Union[List[dict], dict]]): The multi-modal + head module to calculate loss from processed multimodal features. + See :mod:`mmmultimodal.models.heads`. + Notice that if the head is not set, `loss` method cannot be used. + Defaults to None. + momentum (float): Momentum used for momentum contrast. + Defaults to .995. + negative_all_rank (bool): Whether to sample negative data from all + ranks for image text matching in training. Defaults to True. + temperature (float): Temperature parameter that controls the + concentration level of the distribution. Defaults to 0.07. + fast_match (bool): If False, select topk similarity as candidates and + compute the matching score. If True, return the similarity as the + matching score directly. Defaults to False. + topk (int): Select topk similarity as candidates for compute matching + scores. Notice that this is not the topk in evaluation. + Defaults to 256. + data_preprocessor (Optional[dict]): The config for preprocessing input + data. If None or no specified type, it will use + "MutimodalDataPreprocessor" as type. + See :class:`MutimodalDataPreprocessor` for more details. + Defaults to None. + init_cfg (Optional[dict]): the config to control the initialization. + Defaults to None. + """ + + def __init__(self, + vision_backbone: dict, + text_backbone: dict, + multimodal_backbone: Optional[dict] = None, + vision_neck: Optional[dict] = None, + text_neck: Optional[dict] = None, + head: Optional[Union[List[dict], dict]] = None, + multimodal_head: Optional[Union[List[dict], dict]] = None, + tokenizer: Optional[dict] = None, + momentum: float = .995, + negative_all_rank: bool = True, + temperature: float = 0.07, + fast_match: bool = False, + topk: int = 256, + max_txt_len: int = 20, + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[dict] = None): + if data_preprocessor is None: + data_preprocessor = {} + if isinstance(data_preprocessor, dict): + data_preprocessor.setdefault('type', 'MultiModalDataPreprocessor') + data_preprocessor = MODELS.build(data_preprocessor) + + super().__init__( + init_cfg=init_cfg, data_preprocessor=data_preprocessor) + + self.vision_backbone = MODELS.build(vision_backbone) + self.text_backbone = MODELS.build(text_backbone) + + if multimodal_backbone is not None: + self.multimodal_backbone = MODELS.build(multimodal_backbone) + + if vision_neck is not None: + self.vision_neck = MODELS.build(vision_neck) + + if text_neck is not None: + self.text_neck = MODELS.build(text_neck) + + if head is not None: + self.head = MODELS.build(head) + + if multimodal_head is not None: + self.multimodal_head = MODELS.build(multimodal_head) + + if tokenizer is not None: + self.tokenizer = TOKENIZER.build(tokenizer) + + self.momentum = momentum + self.negative_all_rank = negative_all_rank + self.temp = nn.Parameter(temperature * torch.ones([])) + # Shares the same para + self.head.temp = self.temp + + # create the momentum encoder + self.vision_backbone_m = deepcopy(self.vision_backbone) + self.text_backbone_m = deepcopy(self.text_backbone) + + self.vision_neck_m = deepcopy(self.vision_neck) + self.text_neck_m = deepcopy(self.text_neck) + + self.model_pairs = [ + [self.vision_backbone, self.vision_backbone_m], + [self.text_backbone, self.text_backbone_m], + [self.vision_neck, self.vision_neck_m], + [self.text_neck, self.text_neck_m], + ] + self.copy_params() + + # multimodal backbone shares weights with text backbone in BLIP + # No need to set up + + # Notice that this topk is used for select k candidate to compute + # image-text score, but not the final metric topk in evaluation. + self.fast_match = fast_match + self.topk = topk + + self.max_txt_len = max_txt_len + + @property + def device(self): + return next(self.parameters()).device + + def preprocess_text(self, data_samples): + sample_item = data_samples[0] + + if sample_item is not None and 'text' in sample_item: + if isinstance(sample_item.get('text'), (list, tuple)): + texts = [] + for sample in data_samples: + texts.extend(sample.get('text')) + elif isinstance(sample_item.get('text'), str): + texts = [sample.get('text') for sample in data_samples] + else: + raise TypeError('text must be a string or a list of strings') + else: + return None + + # perform tokenize first if satisfied conditions + texts = self.tokenizer( + texts, + padding='max_length', + truncation=True, + max_length=self.max_txt_len, + return_tensors='pt', + ).to(self.device) + + return texts + + def forward(self, + images: torch.tensor = None, + data_samples: Optional[List[DataSample]] = None, + mode: str = 'tensor') -> Union[Tuple, dict]: + """The unified entry for a forward process in both training and test. + The method should accept two modes: "tensor", and "loss": + + - "tensor": Forward the whole network and return tensor without any + post-processing, same as a common nn.Module. + - "loss": Forward and return a dict of losses according to the given + inputs and data samples. + + Note that this method doesn't handle neither back propagation nor + optimizer updating, which are done in the :meth:`train_step`. + + For unified "predict" mode in other mm repos. It is noticed that + image-text retrieval cannot perform batch prediction since it will go + through all the samples. A standard process of retrieval evaluation is + to extract and collect all feats, and then predict all samples. + Therefore the `predict` mode here is remained as a trigger + to inform use to choose the right configurations. + + Args: + images (torch.Tensor): The input inputs tensor of shape + (N, C, ...) in general. + data_samples (List[DataSample], optional): The annotation + data of every samples. It's required if ``mode="loss"``. + Defaults to None. + mode (str): Return what kind of value. Defaults to 'tensor'. + + Returns: + The return type depends on ``mode``. + - If ``mode="tensor"``, return a tuple. + - If ``mode="loss"``, return a dict of tensor. + """ + if mode == 'tensor': + return self.extract_feat(images, data_samples) + elif mode == 'loss': + return self.loss(images, data_samples) + elif mode == 'predict': + return self.predict(images, data_samples) + else: + raise RuntimeError(f'Invalid mode "{mode}".') + + def extract_feat( + self, + images: torch.Tensor = None, + data_samples: List[DataSample] = None, + return_texts=True, + return_embeds=None, + ) -> Dict[str, torch.Tensor]: + """Extract features from the input dict. + + Args: + images (tensor, optional): The images to extract features. + Defaults to None. + data_samples (list, optional): The data samples containing texts + to extract features. Defaults to None. + return_texts (bool): Whether to return the tokenized text and the + corresponding attention masks. Defaults to True. + return_embeds (bool): Whether to return the text embedding and + image embedding. Defaults to None, which means to use + ``self.fast_match``. + + Returns: + Tuple[torch.Tensor]: The output features. + If multimodal_backbone is not exist, tuple of torch.Tensor + will be returned. + """ + if data_samples is not None: + texts = self.preprocess_text(data_samples) + else: + texts = None + + assert images is not None or texts is not None, \ + 'At least single modality should be passed as inputs.' + + results = {} + if texts is not None and return_texts: + results.update({ + 'text_ids': texts.input_ids, + 'text_attn_mask': texts.attention_mask, + }) + + if return_embeds is None: + return_embeds = not self.fast_match + + # extract image features + if images is not None: + output = self._extract_feat(images, modality='images') + results['image_feat'] = output['image_feat'] + if return_embeds: + results['image_embeds'] = output['image_embeds'] + + # extract text features + if texts is not None: + output = self._extract_feat(texts, modality='texts') + results['text_feat'] = output['text_feat'] + if return_embeds: + results['text_embeds'] = output['text_embeds'] + + return results + + def _extract_feat(self, inputs: Union[torch.Tensor, dict], + modality: str) -> Tuple[torch.Tensor]: + """Extract features from the single modality. + + Args: + inputs (Union[torch.Tensor, dict]): A batch of inputs. + For image, a tensor of shape (N, C, ...) in general. + For text, a dict of tokenized text inputs. + modality (str): Modality feature to be extracted. Only two + options are supported. + + - ``images``: Only extract image features, mostly used for + inference. + - ``texts``: Only extract text features, mostly used for + inference. + + Returns: + Tuple[torch.Tensor]: The output features. + """ + + if modality == 'images': + # extract image features + image_embeds = self.vision_backbone(inputs)[0] + image_feat = F.normalize( + self.vision_neck(image_embeds[:, 0, :]), dim=-1) + return {'image_embeds': image_embeds, 'image_feat': image_feat} + elif modality == 'texts': + # extract text features + text_output = self.text_backbone( + inputs.input_ids, + attention_mask=inputs.attention_mask, + token_type_ids=None, + return_dict=True, + mode='text', + ) + text_embeds = text_output.last_hidden_state + text_feat = F.normalize( + self.text_neck(text_embeds[:, 0, :]), dim=-1) + return {'text_embeds': text_embeds, 'text_feat': text_feat} + else: + raise RuntimeError(f'Invalid modality "{modality}".') + + def loss( + self, + images: torch.Tensor, + data_samples: Optional[List[DataSample]] = None, + ) -> Dict[str, torch.tensor]: + """Calculate losses from a batch of inputs and data samples. + + Args: + inputs (dict): A batch of inputs. The input tensor with of + at least one modality. For image, the value is a tensor + of shape (N, C, ...) in general. + For text, the value is a dict of tokenized text inputs. + data_samples (Optional[List[DataSample]]): + The annotation data of every samples. Defaults to None. + + Returns: + Dict[str, torch.tensor]: a dictionary of loss components of + both head and multimodal head. + """ + output = self.extract_feat(images, data_samples, return_embeds=True) + + text_ids = output['text_ids'] + text_attn_mask = output['text_attn_mask'] + image_embeds = output['image_embeds'] + image_feat = output['image_feat'] + text_feat = output['text_feat'] + + image_atts = torch.ones( + image_embeds.size()[:-1], dtype=torch.long).to(self.device) + + # get momentum features + with torch.no_grad(): + self._momentum_update() + image_embeds_m = self.vision_backbone_m(images)[0] + image_feat_m = F.normalize( + self.vision_neck_m(image_embeds_m[:, 0, :]), dim=-1) + + text_output_m = self.text_backbone_m( + text_ids, + attention_mask=text_attn_mask, + token_type_ids=None, + return_dict=True, + mode='text', + ) + text_embeds_m = text_output_m.last_hidden_state + text_feat_m = F.normalize( + self.text_neck_m(text_embeds_m[:, 0, :]), dim=-1) + + loss = self.head.loss( + ([image_feat, text_feat, image_feat_m, text_feat_m], ), + data_samples) + + # prepare for itm + encoder_input_ids = text_ids.clone() + encoder_input_ids[:, + 0] = self.tokenizer.additional_special_tokens_ids[0] + output_pos = self.text_backbone( + encoder_input_ids, + attention_mask=text_attn_mask, + encoder_hidden_states=image_embeds, + encoder_attention_mask=image_atts, + return_dict=True, + ) + + idx = torch.tensor([i.image_id for i in data_samples]).view(-1, 1) + bs = idx.size(0) + idxs = torch.cat(dist.all_gather(idx)) + if self.negative_all_rank: + # compute sample similarity + with torch.no_grad(): + mask = torch.eq(idx, idxs.t()).to(self.device) + + image_feat_world = torch.cat(dist.all_gather(image_feat)) + text_feat_world = torch.cat(dist.all_gather(text_feat)) + + sim_i2t = image_feat @ text_feat_world.t() / self.temp + sim_t2i = text_feat @ image_feat_world.t() / self.temp + + weights_i2t = F.softmax(sim_i2t, dim=1) + weights_i2t.masked_fill_(mask, 0) + + weights_t2i = F.softmax(sim_t2i, dim=1) + weights_t2i.masked_fill_(mask, 0) + + world_size = dist.get_world_size() + if world_size == 1: + image_embeds_world = image_embeds + else: + image_embeds_world = torch.cat( + torch_dist.nn.all_gather(image_embeds)) + + # select a negative image (from all ranks) for each text + image_embeds_neg = [] + for b in range(bs): + neg_idx = torch.multinomial(weights_t2i[b], 1).item() + image_embeds_neg.append(image_embeds_world[neg_idx]) + image_embeds_neg = torch.stack(image_embeds_neg, dim=0) + + # select a negative text (from all ranks) for each image + input_ids_world = torch.cat(dist.all_gather(encoder_input_ids)) + att_mask_world = torch.cat(dist.all_gather(text_attn_mask)) + + text_ids_neg = [] + text_atts_neg = [] + for b in range(bs): + neg_idx = torch.multinomial(weights_i2t[b], 1).item() + text_ids_neg.append(input_ids_world[neg_idx]) + text_atts_neg.append(att_mask_world[neg_idx]) + + text_ids_neg = torch.stack(text_ids_neg, dim=0) + text_atts_neg = torch.stack(text_atts_neg, dim=0) + + text_ids_all = torch.cat([encoder_input_ids, text_ids_neg], dim=0) + text_atts_all = torch.cat([text_attn_mask, text_atts_neg], dim=0) + + image_embeds_all = torch.cat([image_embeds_neg, image_embeds], dim=0) + image_atts_all = torch.cat([image_atts, image_atts], dim=0) + + output_neg = self.text_backbone( + text_ids_all, + attention_mask=text_atts_all, + encoder_hidden_states=image_embeds_all, + encoder_attention_mask=image_atts_all, + return_dict=True, + ) + + vl_embeddings = torch.cat( + [ + output_pos.last_hidden_state[:, 0, :], + output_neg.last_hidden_state[:, 0, :], + ], + dim=0, + ) + + # create false data samples + data_samples.extend( + [DataSample(is_matched=False) for _ in range(2 * bs)]) + loss_multimodal = self.multimodal_head.loss((vl_embeddings, ), + data_samples) + + return dict(ChainMap(loss, loss_multimodal)) + + def predict(self, images, data_samples, cal_i2t=True, cal_t2i=True): + feats = self.extract_feat(images, data_samples) + + return self.predict_all( + feats, data_samples, cal_i2t=cal_i2t, cal_t2i=cal_t2i) + + def predict_all(self, + feats, + data_samples, + num_images=None, + num_texts=None, + cal_i2t=True, + cal_t2i=True): + text_ids = feats['text_ids'] + text_ids[:, 0] = self.tokenizer.additional_special_tokens_ids[0] + text_attn_mask = feats['text_attn_mask'] + image_embeds = feats.get('image_embeds', None) + image_feat = feats['image_feat'] + text_feat = feats['text_feat'] + + num_images = num_images or image_feat.size(0) + num_texts = num_texts or text_feat.size(0) + + if not self.fast_match: + image_embeds_all = all_gather_concat(image_embeds)[:num_images] + else: + image_embeds_all = None + image_feat_all = all_gather_concat(image_feat)[:num_images] + text_feat_all = all_gather_concat(text_feat)[:num_texts] + text_ids_all = all_gather_concat(text_ids)[:num_texts] + text_attn_mask_all = all_gather_concat(text_attn_mask)[:num_texts] + + results = [] + if cal_i2t: + result_i2t = self.compute_score_matrix_i2t( + image_feat, + image_embeds, + text_feat_all, + text_ids_all, + text_attn_mask_all, + ) + results.append( + self._get_predictions(result_i2t, data_samples, mode='i2t')) + if cal_t2i: + result_t2i = self.compute_score_matrix_t2i( + image_feat_all, + image_embeds_all, + text_feat, + text_ids, + text_attn_mask, + ) + results.append( + self._get_predictions(result_t2i, data_samples, mode='t2i')) + return tuple(results) + + def compute_score_matrix_i2t(self, img_feats, img_embeds, text_feats, + text_ids, text_atts): + """Compare the score matrix for image-to-text retrieval. Every image + should compare to all the text features. + + Args: + img_feats (torch.Tensor): The input img feats tensor with shape + (M, C). M stands for numbers of samples on a single GPU. + img_embeds (torch.Tensor): The input img embeds tensor with shape + (M, C). M stands for numbers of samples on a single GPU. + text_feats (torch.Tensor): The input text feats tensor with shape + (N, C). N stands for numbers of all samples on all GPUs. + text_ids (torch.Tensor): The input tensor with shape (N, C). + text_atts (torch.Tensor): The input tensor with shape (N, C). + + Returns: + torch.Tensor: Score matrix of image-to-text retrieval. + """ + + # compute i2t sim matrix + sim_matrix_i2t = img_feats @ text_feats.t() + if self.fast_match: + return sim_matrix_i2t + + score_matrix_i2t = torch.full((img_feats.size(0), text_feats.size(0)), + -100.0).to(self.device) + for i in track_on_main_process( + range(img_feats.size(0)), 'Compute I2T scores...'): + sims = sim_matrix_i2t[i] + topk_sim, topk_idx = sims.topk(k=self.topk, dim=0) + + encoder_output = img_embeds[i].repeat(self.topk, 1, 1) + encoder_att = torch.ones( + encoder_output.size()[:-1], dtype=torch.long).to(self.device) + output = self.text_backbone( + text_ids[topk_idx], + attention_mask=text_atts[topk_idx], + encoder_hidden_states=encoder_output, + encoder_attention_mask=encoder_att, + return_dict=True, + ) + score = self.multimodal_head( + (output.last_hidden_state[:, 0, :], ))[:, 1] + score_matrix_i2t[i, topk_idx] = score + topk_sim + + return score_matrix_i2t + + def compute_score_matrix_t2i(self, img_feats, img_embeds, text_feats, + text_ids, text_atts): + """Compare the score matrix for text-to-image retrieval. Every text + should compare to all the image features. + + Args: + img_feats (torch.Tensor): The input img feats tensor with shape + (M, C). M stands for numbers of samples on a single GPU. + img_embeds (torch.Tensor): The input img embeds tensor with shape + (M, C). M stands for numbers of samples on a single GPU. + text_feats (torch.Tensor): The input text feats tensor with shape + (N, C). N stands for numbers of all samples on all GPUs. + text_ids (torch.Tensor): The input tensor with shape (M, C). + text_atts (torch.Tensor): The input tensor with shape (M, C). + + Returns: + torch.Tensor: Score matrix of text-to-image retrieval. + """ + + # compute t2i sim matrix + sim_matrix_t2i = text_feats @ img_feats.t() + if self.fast_match: + return sim_matrix_t2i + + score_matrix_t2i = torch.full((text_feats.size(0), img_feats.size(0)), + -100.0).to(self.device) + for i in track_on_main_process( + range(text_feats.size(0)), 'Compute T2I scores...'): + sims = sim_matrix_t2i[i] + topk_sim, topk_idx = sims.topk(k=self.topk, dim=0) + + encoder_output = img_embeds[topk_idx] + encoder_att = torch.ones( + encoder_output.size()[:-1], dtype=torch.long).to(self.device) + output = self.text_backbone( + text_ids[i].repeat(self.topk, 1), + attention_mask=text_atts[i].repeat(self.topk, 1), + encoder_hidden_states=encoder_output, + encoder_attention_mask=encoder_att, + return_dict=True, + ) + score = self.multimodal_head( + (output.last_hidden_state[:, 0, :], ))[:, 1] + score_matrix_t2i[i, topk_idx] = score + topk_sim + + return score_matrix_t2i + + def _get_predictions(self, + result: torch.Tensor, + data_samples: List[DataSample], + mode: str = 'i2t'): + """Post-process the output of retriever. + + Args: + result (torch.Tensor): Score matrix of single retrieve, + either from image or text. + data_samples (List[DataSample], optional): The annotation + data of every samples. + mode (str): Retrieve mode, either `i2t` for image to text, or `t2i` + text to image. Defaults to `i2t`. + + Returns: + List[DataSample]: the raw data_samples with + the predicted results. + """ + + # create data sample if not exists + if data_samples is None: + data_samples = [DataSample() for _ in range(result.size(0))] + elif mode == 't2i': + # Process data samples to align with the num of texts. + new_data_samples = [] + for sample in data_samples: + if isinstance(sample.text, (list, tuple)): + texts = sample.text + else: + texts = [sample.text] + for i, text in enumerate(texts): + new_sample = DataSample(text=text) + if 'gt_image_id' in sample: + new_sample.gt_label = sample.gt_image_id[i] + new_data_samples.append(new_sample) + assert len(new_data_samples) == result.size(0) + data_samples = new_data_samples + elif mode == 'i2t': + for sample in data_samples: + if 'gt_text_id' in sample: + sample.gt_label = sample.gt_text_id + else: + raise ValueError(f'Type {mode} is not supported.') + + for data_sample, score in zip(data_samples, result): + idx = score.argmax(keepdim=True).detach() + + data_sample.set_pred_score(score) + data_sample.set_pred_label(idx) + return data_samples + + # TODO: add temperaily + @torch.no_grad() + def copy_params(self): + for model_pair in self.model_pairs: + for param, param_m in zip(model_pair[0].parameters(), + model_pair[1].parameters()): + param_m.data.copy_(param.data) # initialize + param_m.requires_grad = False # not update by gradient + + @torch.no_grad() + def _momentum_update(self): + for model_pair in self.model_pairs: + for (name, + param), (name_m, + param_m) in zip(model_pair[0].named_parameters(), + model_pair[1].named_parameters()): + # hack to behave the same + if any([i in name for i in ['8', '9', '10', '11'] + ]) and 'layers' in name and any( + [i in name for i in ['attn', 'ffn']]): + param_m.data = param.data + else: + param_m.data = param_m.data * self.momentum + \ + param.data * (1.0 - self.momentum) diff --git a/mmpretrain/models/multimodal/blip/blip_vqa.py b/mmpretrain/models/multimodal/blip/blip_vqa.py new file mode 100644 index 0000000..d0f4e58 --- /dev/null +++ b/mmpretrain/models/multimodal/blip/blip_vqa.py @@ -0,0 +1,265 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional, Tuple, Union + +import torch +from mmengine.model import BaseModel + +from mmpretrain.registry import MODELS, TOKENIZER +from mmpretrain.structures import DataSample + + +@MODELS.register_module() +class BlipVQA(BaseModel): + """BLIP VQA. + + Args: + tokenizer: (dict): The config for tokenizer. + vision_backbone (dict): Encoder for extracting image features. + multimodal_backbone (dict): Backbone for extracting + multi-modal features. We apply this part as VQA fusion module. + head (dict): The head module to calculate + loss from processed features. + data_preprocessor (Optional[dict]): The config for preprocessing input + data. If None or no specified type, it will use + `MutimodalDataPreprocessor` as type. + See :class:`MutimodalDataPreprocessor` for more details. + Defaults to None. + init_cfg (Optional[dict]): the config to control the initialization. + Defaults to None. + """ + + def __init__(self, + tokenizer: dict, + vision_backbone: dict, + multimodal_backbone: dict, + head: dict, + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[dict] = None): + + if data_preprocessor is None: + data_preprocessor = {} + data_preprocessor.setdefault('type', 'MultiModalDataPreprocessor') + data_preprocessor = MODELS.build(data_preprocessor) + + super(BlipVQA, self).__init__( + init_cfg=init_cfg, data_preprocessor=data_preprocessor) + + self.tokenizer = TOKENIZER.build(tokenizer) + self.vision_backbone = MODELS.build(vision_backbone) + self.multimodal_backbone = MODELS.build(multimodal_backbone) + self.vqa_head = MODELS.build(head) + + @property + def device(self): + return next(self.parameters()).device + + def forward( + self, + images: torch.Tensor, + data_samples: Optional[List[DataSample]] = None, + mode: str = 'loss', + ): + """The unified entry for a forward process in both training and test. + + - "loss": For training. Forward and return a dict of losses according + to the given inputs and data samples. Note that this method doesn't + handle neither back propagation nor optimizer updating, which are + done in the :meth:`train_step`. + - "predict": For testing. Forward and return a list of data_sample that + contains pred_answer for each question. + + Args: + images (Tensor): A batch of images. The shape of it should be + (B, C, H, W) for images and (B, T, C, H, W) for videos. + data_samples (List[DataSample], optional): The annotation data of + every samples. Required when ``mode="loss"``. Defaults to None. + mode (str): Return what kind of value. Defaults to 'loss'. + + Returns: + The return type depends on ``mode``. + - If ``mode="loss"``, return a dict of tensor. + - If ``mode="predict"``, return a list of `DataSample` + """ + + if mode == 'loss': + return self.loss(images, data_samples) + elif mode == 'predict': + return self.predict(images, data_samples) + else: + raise RuntimeError(f'Invalid mode "{mode}".') + + def extract_feat(self, images: torch.Tensor) -> torch.Tensor: + """Extract features from the input tensor with shape (N, C, ..). + + Args: + images (Tensor): A batch of images. The shape of it should be + (B, C, H, W) for images and (B, T, C, H, W) for videos. + + Returns: + visual_embeds (Tensor): The output features. + """ + # extract visual feature + if images.ndim == 4: + visual_embeds = self.vision_backbone(images)[0] + elif images.ndim == 5: + # [batch, T, C, H, W] -> [batch * T, C, H, W] + bs = images.size(0) + images = images.reshape(-1, *images.shape[2:]) + visual_embeds = self.vision_backbone(images)[0] + # [batch * num_segs, L, dim] -> [batch, num_segs * L, dim] + visual_embeds = visual_embeds.reshape(bs, -1, + *visual_embeds.shape[2:]) + else: + raise ValueError( + f'Images with {images.ndim} dims is not supported.') + return visual_embeds + + def loss( + self, + images: torch.Tensor, + data_samples: Optional[List[DataSample]] = None, + ) -> Union[torch.Tensor, Tuple[torch.Tensor]]: + """generate train_loss from the input tensor and data_samples. + + Args: + images (Tensor): A batch of images. The shape of it should be + (B, C, H, W) for images and (B, T, C, H, W) for videos. + data_samples (List[DataSample], optional): The annotation + data of every samples. + + Returns: + Dict[torch.Tensor]: The losses features. + """ + visual_embeds = self.extract_feat(images) + image_atts = torch.ones( + visual_embeds.size()[:-1], dtype=torch.long).to(self.device) + + questions = [] + for sample in data_samples: + questions.append(sample.get('question')) + questions = self.tokenizer( + questions, padding='longest', return_tensors='pt').to(self.device) + + questions.input_ids[:, 0] = \ + self.tokenizer.additional_special_tokens_ids[0] + + # multimodal fusion + multimodal_embeds = self.multimodal_backbone( + questions.input_ids, + attention_mask=questions.attention_mask, + encoder_hidden_states=visual_embeds, + encoder_attention_mask=image_atts, + return_dict=True, + ) + + # put answer from data_samples into tensor form + answer_raw_text = [] + for sample in data_samples: + answer_raw_text.extend(sample.gt_answer) + answer = self.tokenizer( + answer_raw_text, padding='longest', + return_tensors='pt').to(self.device) + answer_targets = answer.input_ids.masked_fill( + answer.input_ids == self.tokenizer.pad_token_id, -100) + for sample in data_samples: + # follow BLIP setting, set answer_weight to 0.2 for VG dataset. + if not hasattr(sample, 'gt_answer_weight'): + sample.gt_answer_weight = torch.tensor([0.2]) + else: + sample.gt_answer_weight = torch.tensor(sample.gt_answer_weight) + answer_weight = torch.cat( + [sample.gt_answer_weight for sample in data_samples], + dim=0).to(self.device) + answer_count = torch.tensor( + [len(sample.gt_answer) for sample in data_samples]).to(self.device) + + question_states, question_atts = [], [] + for b, n in enumerate(answer_count): + question_states += [multimodal_embeds.last_hidden_state[b]] * n + question_atts += [questions.attention_mask[b]] * n + + question_states = torch.stack(question_states, dim=0).to(self.device) + question_atts = torch.stack(question_atts, dim=0).to(self.device) + + head_feats = dict( + answer_input_ids=answer.input_ids, + answer_attention_mask=answer.attention_mask, + answer_weight=answer_weight, + answer_targets=answer_targets, + question_states=question_states, + question_atts=question_atts, + batch_size=len(data_samples), + ) + + losses = self.vqa_head.loss(head_feats) + + return losses + + def predict( + self, + images: torch.Tensor, + data_samples: Optional[List[DataSample]] = None, + ): + """update data_samples that contain pred_answer for each question. + + Args: + images (Tensor): A batch of images. The shape of it should be + (B, C, H, W) for images and (B, T, C, H, W) for videos. + data_samples (List[DataSample], optional): The annotation + data of every samples. + + Returns: + Dict[torch.Tensor]: The losses features. + """ + visual_embeds = self.extract_feat(images) + image_atts = torch.ones( + visual_embeds.size()[:-1], dtype=torch.long).to(self.device) + + questions = [] + for sample in data_samples: + questions.append(sample.get('question')) + questions = self.tokenizer( + questions, padding='longest', return_tensors='pt').to(self.device) + + questions.input_ids[:, 0] = \ + self.tokenizer.additional_special_tokens_ids[0] + + # multimodal fusion + multimodal_embeds = self.multimodal_backbone( + questions.input_ids, + attention_mask=questions.attention_mask, + encoder_hidden_states=visual_embeds, + encoder_attention_mask=image_atts, + return_dict=True, + ) + + if self.vqa_head.inference_method == 'rank': + answer_candidates = self.tokenizer( + self.vqa_head.answer_list, + padding='longest', + return_tensors='pt').to(self.device) + answer_candidates.input_ids[:, 0] = self.tokenizer.bos_token_id + elif self.vqa_head.inference_method == 'generate': + answer_candidates = None + + head_feats = dict( + multimodal_embeds=multimodal_embeds.last_hidden_state, + question_atts=questions.attention_mask, + answer_candidates=answer_candidates, + bos_token_id=self.tokenizer.bos_token_id, + sep_token_id=self.tokenizer.sep_token_id, + pad_token_id=self.tokenizer.pad_token_id, + ) + + if self.vqa_head.inference_method == 'rank': + answers = self.vqa_head.predict(head_feats) + for answer, data_sample in zip(answers, data_samples): + data_sample.pred_answer = answer + + elif self.vqa_head.inference_method == 'generate': + outputs = self.vqa_head.predict(head_feats) + for output, data_sample in zip(outputs, data_samples): + data_sample.pred_answer = self.tokenizer.decode( + output, skip_special_tokens=True) + + return data_samples diff --git a/mmpretrain/models/multimodal/blip/language_model.py b/mmpretrain/models/multimodal/blip/language_model.py new file mode 100644 index 0000000..48605a9 --- /dev/null +++ b/mmpretrain/models/multimodal/blip/language_model.py @@ -0,0 +1,1320 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +# flake8: noqa + +import math +from typing import Tuple + +import torch +import torch.nn as nn +from torch import Tensor, device + +try: + from transformers.activations import ACT2FN + from transformers.modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPoolingAndCrossAttentions, + CausalLMOutputWithCrossAttentions) + from transformers.modeling_utils import (PreTrainedModel, + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + prune_linear_layer) + from transformers.models.bert.configuration_bert import BertConfig +except: + ACT2FN = None + BaseModelOutputWithPastAndCrossAttentions = None + BaseModelOutputWithPoolingAndCrossAttentions = None + CausalLMOutputWithCrossAttentions = None + PreTrainedModel = None + apply_chunking_to_forward = None + find_pruneable_heads_and_indices = None + prune_linear_layer = None + BertConfig = None + +from mmpretrain.registry import MODELS + + +class BertEmbeddings(nn.Module): + """Construct the embeddings from word and position embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding( + config.vocab_size, + config.hidden_size, + padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, + config.hidden_size) + + if config.add_type_embeddings: + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, + config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm( + config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.register_buffer( + 'position_ids', + torch.arange(config.max_position_embeddings).expand((1, -1))) + self.position_embedding_type = getattr(config, + 'position_embedding_type', + 'absolute') + + self.config = config + + def forward( + self, + input_ids=None, + token_type_ids=None, + position_ids=None, + inputs_embeds=None, + past_key_values_length=0, + ): + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + if position_ids is None: + position_ids = self.position_ids[:, past_key_values_length: + seq_length + + past_key_values_length] + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + + if token_type_ids is not None: + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = inputs_embeds + token_type_embeddings + else: + embeddings = inputs_embeds + + if self.position_embedding_type == 'absolute': + position_embeddings = self.position_embeddings(position_ids) + embeddings += position_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class BertPooler(nn.Module): + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states): + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class BertPreTrainedModel(PreTrainedModel): + """An abstract class to handle weights initialization and a simple + interface for downloading and loading pretrained models.""" + + config_class = BertConfig + base_model_prefix = 'bert' + _keys_to_ignore_on_load_missing = [r'position_ids'] + + def _init_weights(self, module): + """Initialize the weights.""" + if isinstance(module, (nn.Linear, nn.Embedding)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_( + mean=0.0, std=self.config.initializer_range) + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + +class BertSelfAttention(nn.Module): + + def __init__(self, config, is_cross_attention): + super().__init__() + self.config = config + if config.hidden_size % config.num_attention_heads != 0 and not hasattr( + config, 'embedding_size'): + raise ValueError( + 'The hidden size (%d) is not a multiple of the number of attention ' + 'heads (%d)' % + (config.hidden_size, config.num_attention_heads)) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / + config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + if is_cross_attention: + self.key = nn.Linear(config.encoder_width, self.all_head_size) + self.value = nn.Linear(config.encoder_width, self.all_head_size) + else: + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = getattr(config, + 'position_embedding_type', + 'absolute') + if (self.position_embedding_type == 'relative_key' + or self.position_embedding_type == 'relative_key_query'): + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding( + 2 * config.max_position_embeddings - 1, + self.attention_head_size) + self.save_attention = False + + def save_attn_gradients(self, attn_gradients): + self.attn_gradients = attn_gradients + + def get_attn_gradients(self): + return self.attn_gradients + + def save_attention_map(self, attention_map): + self.attention_map = attention_map + + def get_attention_map(self): + return self.attention_map + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + ( + self.num_attention_heads, + self.attention_head_size, + ) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + mixed_query_layer = self.query(hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention: + key_layer = self.transpose_for_scores( + self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores( + self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + past_key_value = (key_layer, value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, + key_layer.transpose(-1, -2)) + + if (self.position_embedding_type == 'relative_key' + or self.position_embedding_type == 'relative_key_query'): + seq_length = hidden_states.size()[1] + position_ids_l = torch.arange( + seq_length, dtype=torch.long, + device=hidden_states.device).view(-1, 1) + position_ids_r = torch.arange( + seq_length, dtype=torch.long, + device=hidden_states.device).view(1, -1) + distance = position_ids_l - position_ids_r + positional_embedding = self.distance_embedding( + distance + self.max_position_embeddings - 1) + positional_embedding = positional_embedding.to( + dtype=query_layer.dtype) # fp16 compatibility + + if self.position_embedding_type == 'relative_key': + relative_position_scores = torch.einsum( + 'bhld,lrd->bhlr', query_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == 'relative_key_query': + relative_position_scores_query = torch.einsum( + 'bhld,lrd->bhlr', query_layer, positional_embedding) + relative_position_scores_key = torch.einsum( + 'bhrd,lrd->bhlr', key_layer, positional_embedding) + attention_scores = ( + attention_scores + relative_position_scores_query + + relative_position_scores_key) + + attention_scores = attention_scores / math.sqrt( + self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in BertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.Softmax(dim=-1)(attention_scores) + + if is_cross_attention and self.save_attention: + self.save_attention_map(attention_probs) + attention_probs.register_hook(self.save_attn_gradients) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs_dropped = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs_dropped = attention_probs_dropped * head_mask + + context_layer = torch.matmul(attention_probs_dropped, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + ( + self.all_head_size, ) + context_layer = context_layer.view(*new_context_layer_shape) + + outputs = ((context_layer, attention_probs) if output_attentions else + (context_layer, )) + + outputs = outputs + (past_key_value, ) + return outputs + + +class BertSelfOutput(nn.Module): + + def __init__(self, config, twin=False, merge=False): + super().__init__() + self.LayerNorm = nn.LayerNorm( + config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + if twin: + self.dense0 = nn.Linear(config.hidden_size, config.hidden_size) + self.dense1 = nn.Linear(config.hidden_size, config.hidden_size) + else: + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + if merge: + self.act = ACT2FN[config.hidden_act] + self.merge_layer = nn.Linear(config.hidden_size * 2, + config.hidden_size) + self.merge = True + else: + self.merge = False + + def forward(self, hidden_states, input_tensor): + if type(hidden_states) == list: + hidden_states0 = self.dense0(hidden_states[0]) + hidden_states1 = self.dense1(hidden_states[1]) + if self.merge: + hidden_states = self.merge_layer( + torch.cat([hidden_states0, hidden_states1], dim=-1)) + else: + hidden_states = (hidden_states0 + hidden_states1) / 2 + else: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertAttention(nn.Module): + + def __init__(self, config, is_cross_attention=False, layer_num=-1): + super().__init__() + is_nlvr = is_cross_attention and getattr(config, 'nlvr', False) + if is_nlvr: + self.self0 = BertSelfAttention(config, is_nlvr) + self.self1 = BertSelfAttention(config, is_nlvr) + else: + self.self = BertSelfAttention(config, is_cross_attention) + self.output = BertSelfOutput( + config, + twin=is_nlvr, + merge=(is_nlvr and layer_num >= 6), + ) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, + self.self.num_attention_heads, + self.self.attention_head_size, + self.pruned_heads, + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len( + heads) + self.self.all_head_size = ( + self.self.attention_head_size * self.self.num_attention_heads) + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + if type(encoder_hidden_states) == list: + self_outputs0 = self.self0( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states[0], + encoder_attention_mask[0], + past_key_value, + output_attentions, + ) + self_outputs1 = self.self1( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states[1], + encoder_attention_mask[1], + past_key_value, + output_attentions, + ) + attention_output = self.output( + [self_outputs0[0], self_outputs1[0]], hidden_states) + + outputs = (attention_output, ) + self_outputs0[ + 1:] # add attentions if we output them + else: + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output, + ) + self_outputs[1:] # add attentions if we output them + return outputs + + +class BertIntermediate(nn.Module): + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class BertOutput(nn.Module): + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm( + config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertLayer(nn.Module): + + def __init__(self, config, layer_num): + super().__init__() + self.config = config + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = BertAttention(config) + self.layer_num = layer_num + + # compatibility for ALBEF and BLIP + try: + # ALBEF & ALPRO + fusion_layer = self.config.fusion_layer + add_cross_attention = ( + fusion_layer <= layer_num and self.config.add_cross_attention) + + self.fusion_layer = fusion_layer + except AttributeError: + # BLIP + self.fusion_layer = self.config.num_hidden_layers + add_cross_attention = self.config.add_cross_attention + + # if self.config.add_cross_attention: + if self.config.add_cross_attention: + self.crossattention = BertAttention( + config, + is_cross_attention=self.config.add_cross_attention, + layer_num=layer_num, + ) + self.intermediate = BertIntermediate(config) + self.output = BertOutput(config) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + mode=None, + ): + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = ( + past_key_value[:2] if past_key_value is not None else None) + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + + # TODO line 482 in albef/models/xbert.py + # compatibility for ALBEF and BLIP + if mode in ['multimodal', 'fusion'] and hasattr( + self, 'crossattention'): + assert ( + encoder_hidden_states is not None + ), 'encoder_hidden_states must be given for cross-attention layers' + + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + output_attentions=output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = (outputs + cross_attention_outputs[1:-1] + ) # add cross attentions if we output attention weights + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, + self.chunk_size_feed_forward, + self.seq_len_dim, + attention_output, + ) + outputs = (layer_output, ) + outputs + + outputs = outputs + (present_key_value, ) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +class BertEncoder(nn.Module): + + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList( + [BertLayer(config, i) for i in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=False, + output_hidden_states=False, + return_dict=True, + mode='multimodal', + ): + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = (() if output_attentions + and self.config.add_cross_attention else None) + + next_decoder_cache = () if use_cache else None + + try: + # ALBEF + fusion_layer = self.config.fusion_layer + except AttributeError: + # BLIP + fusion_layer = self.config.num_hidden_layers + + if mode == 'text': + start_layer = 0 + # output_layer = self.config.fusion_layer + output_layer = fusion_layer + + elif mode == 'fusion': + # start_layer = self.config.fusion_layer + start_layer = fusion_layer + output_layer = self.config.num_hidden_layers + + elif mode == 'multimodal': + start_layer = 0 + output_layer = self.config.num_hidden_layers + + # compatibility for ALBEF and BLIP + # for i in range(self.config.num_hidden_layers): + for i in range(start_layer, output_layer): + layer_module = self.layer[i] + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states, ) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[ + i] if past_key_values is not None else None + + # TODO pay attention to this. + if self.gradient_checkpointing and self.training: + + if use_cache: + # TODO: logger here + # logger.warn( + # "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + # ) + use_cache = False + + def create_custom_forward(module): + + def custom_forward(*inputs): + return module(*inputs, past_key_value, + output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + mode=mode, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + mode=mode, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1], ) + if output_attentions: + all_self_attentions = all_self_attentions + ( + layer_outputs[1], ) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states, ) + + if not return_dict: + return tuple(v for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] if v is not None) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +class BertPredictionHeadTransform(nn.Module): + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + if isinstance(config.hidden_act, str): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + self.LayerNorm = nn.LayerNorm( + config.hidden_size, eps=config.layer_norm_eps) + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +class BertLMPredictionHead(nn.Module): + + def __init__(self, config): + super().__init__() + self.transform = BertPredictionHeadTransform(config) + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear( + config.hidden_size, config.vocab_size, bias=False) + + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + + # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` + self.decoder.bias = self.bias + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + return hidden_states + + +class BertOnlyMLMHead(nn.Module): + + def __init__(self, config): + super().__init__() + self.predictions = BertLMPredictionHead(config) + + def forward(self, sequence_output): + prediction_scores = self.predictions(sequence_output) + return prediction_scores + + +@MODELS.register_module() +class BertModel(BertPreTrainedModel): + """The model can behave as an encoder (with only self-attention) as well as + a decoder, in which case a layer of cross-attention is added between the + self-attention layers, following the architecture described in `Attention + is all you need `__ by Ashish Vaswani, + Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. + + Gomez, Lukasz Kaiser and Illia Polosukhin. argument and + :obj:`add_cross_attention` set to :obj:`True`; an + :obj:`encoder_hidden_states` is then expected as an input to the forward + pass. + """ + + def __init__(self, config, add_pooling_layer=True): + if not isinstance(config, BertConfig): + config = BertConfig.from_dict(config) + + super().__init__(config) + self.config = config + + self.embeddings = BertEmbeddings(config) + + self.encoder = BertEncoder(config) + + self.pooler = BertPooler(config) if add_pooling_layer else None + + self.init_weights() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """Prunes heads of the model. + + heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + def get_extended_attention_mask( + self, + attention_mask: Tensor, + input_shape: Tuple[int], + device: device, + is_decoder: bool, + ) -> Tensor: + """Makes broadcastable attention and causal masks so that future and + masked tokens are ignored. + + Arguments: + attention_mask (:obj:`torch.Tensor`): + Mask with ones indicating tokens to attend to, zeros for tokens to ignore. + input_shape (:obj:`Tuple[int]`): + The shape of the input to the model. + device: (:obj:`torch.device`): + The device of the input to the model. + + Returns: + :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`. + """ + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + if attention_mask.dim() == 3: + extended_attention_mask = attention_mask[:, None, :, :] + elif attention_mask.dim() == 2: + # Provided a padding mask of dimensions [batch_size, seq_length] + # - if the model is a decoder, apply a causal mask in addition to the padding mask + # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] + if is_decoder: + batch_size, seq_length = input_shape + + seq_ids = torch.arange(seq_length, device=device) + causal_mask = ( + seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= + seq_ids[None, :, None]) + # in case past_key_values are used we need to add a prefix ones mask to the causal mask + # causal and attention masks must have same type with pytorch version < 1.3 + causal_mask = causal_mask.to(attention_mask.dtype) + + if causal_mask.shape[1] < attention_mask.shape[1]: + prefix_seq_len = attention_mask.shape[ + 1] - causal_mask.shape[1] + causal_mask = torch.cat( + [ + torch.ones( + (batch_size, seq_length, prefix_seq_len), + device=device, + dtype=causal_mask.dtype, + ), + causal_mask, + ], + axis=-1, + ) + + extended_attention_mask = ( + causal_mask[:, None, :, :] * + attention_mask[:, None, None, :]) + else: + extended_attention_mask = attention_mask[:, None, None, :] + else: + raise ValueError( + 'Wrong shape for input_ids (shape {}) or attention_mask (shape {})' + .format(input_shape, attention_mask.shape)) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = extended_attention_mask.to( + dtype=self.dtype) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + return extended_attention_mask + + def forward( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + is_decoder=False, + mode='multimodal', + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + """ + output_attentions = ( + output_attentions if output_attentions is not None else + self.config.output_attentions) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else + self.config.output_hidden_states) + return_dict = ( + return_dict + if return_dict is not None else self.config.use_return_dict) + + if is_decoder: + use_cache = use_cache if use_cache is not None else self.config.use_cache + else: + use_cache = False + + if input_ids is not None and inputs_embeds is not None: + raise ValueError( + 'You cannot specify both input_ids and inputs_embeds at the same time' + ) + elif input_ids is not None: + input_shape = input_ids.size() + batch_size, seq_length = input_shape + device = input_ids.device + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + batch_size, seq_length = input_shape + device = inputs_embeds.device + elif encoder_embeds is not None: + input_shape = encoder_embeds.size()[:-1] + batch_size, seq_length = input_shape + device = encoder_embeds.device + else: + raise ValueError( + 'You have to specify either input_ids or inputs_embeds or encoder_embeds' + ) + + # past_key_values_length + past_key_values_length = ( + past_key_values[0][0].shape[2] + if past_key_values is not None else 0) + + if attention_mask is None: + attention_mask = torch.ones( + ((batch_size, seq_length + past_key_values_length)), + device=device) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask: torch.Tensor = self.get_extended_attention_mask( + attention_mask, input_shape, device, is_decoder) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if encoder_hidden_states is not None: + if type(encoder_hidden_states) == list: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[ + 0].size() + else: + ( + encoder_batch_size, + encoder_sequence_length, + _, + ) = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, + encoder_sequence_length) + + if type(encoder_attention_mask) == list: + encoder_extended_attention_mask = [ + self.invert_attention_mask(mask) + for mask in encoder_attention_mask + ] + elif encoder_attention_mask is None: + encoder_attention_mask = torch.ones( + encoder_hidden_shape, device=device) + encoder_extended_attention_mask = self.invert_attention_mask( + encoder_attention_mask) + else: + encoder_extended_attention_mask = self.invert_attention_mask( + encoder_attention_mask) + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, + self.config.num_hidden_layers) + + if encoder_embeds is None: + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + token_type_ids=token_type_ids, + inputs_embeds=inputs_embeds, + past_key_values_length=past_key_values_length, + ) + else: + embedding_output = encoder_embeds + + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + mode=mode, + ) + sequence_output = encoder_outputs[0] + pooled_output = ( + self.pooler(sequence_output) if self.pooler is not None else None) + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + +class BaseEncoder(nn.Module): + """Base class for primitive encoders, such as ViT, TimeSformer, etc.""" + + def __init__(self): + super().__init__() + + def forward_features(self, samples, **kwargs): + raise NotImplementedError + + @property + def device(self): + return list(self.parameters())[0].device + + +@MODELS.register_module() +class XBertEncoder(BertModel, BaseEncoder): + + def __init__(self, med_config, from_pretrained=False): + + med_config = BertConfig.from_dict(med_config) + super().__init__(config=med_config, add_pooling_layer=False) + + def forward_automask(self, tokenized_text, visual_embeds, **kwargs): + image_atts = torch.ones( + visual_embeds.size()[:-1], dtype=torch.long).to(self.device) + + text = tokenized_text + text_output = super().forward( + text.input_ids, + attention_mask=text.attention_mask, + encoder_hidden_states=visual_embeds, + encoder_attention_mask=image_atts, + return_dict=True, + ) + + return text_output + + def forward_text(self, tokenized_text, **kwargs): + text = tokenized_text + token_type_ids = kwargs.get('token_type_ids', None) + + text_output = super().forward( + text.input_ids, + attention_mask=text.attention_mask, + token_type_ids=token_type_ids, + return_dict=True, + mode='text', + ) + + return text_output + + +@MODELS.register_module() +class Linear(torch.nn.Linear): + """Wrapper for linear function.""" + + +@MODELS.register_module() +class BertLMHeadModel(BertPreTrainedModel): + + _keys_to_ignore_on_load_unexpected = [r'pooler'] + _keys_to_ignore_on_load_missing = [ + r'position_ids', r'predictions.decoder.bias' + ] + + def __init__(self, config): + super().__init__(config) + + self.bert = BertModel(config, add_pooling_layer=False) + self.cls = BertOnlyMLMHead(config) + + self.init_weights() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + labels=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + return_logits=False, + is_decoder=True, + reduction='mean', + mode='multimodal', + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in + ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are + ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]`` + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + Returns: + Example:: + >>> from transformers import BertTokenizer, + BertLMHeadModel, BertConfig + >>> import torch + >>> tokenizer = BertTokenizer.from_pretrained( + 'bert-base-cased') + >>> config = BertConfig.from_pretrained( + "bert-base-cased") + >>> model = BertLMHeadModel.from_pretrained( + 'bert-base-cased', config=config) + >>> inputs = tokenizer( + "Hello, my dog is cute", + return_tensors="pt") + >>> outputs = model(**inputs) + >>> prediction_logits = outputs.logits + """ + return_dict = ( + return_dict + if return_dict is not None else self.config.use_return_dict) + if labels is not None: + use_cache = False + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + is_decoder=is_decoder, + mode=mode, + ) + + sequence_output = outputs[0] + prediction_scores = self.cls(sequence_output) + + if return_logits: + return prediction_scores[:, :-1, :].contiguous() + + lm_loss = None + if labels is not None: + # we are doing next-token prediction; shift prediction scores and input ids by one + shifted_prediction_scores = prediction_scores[:, : + -1, :].contiguous() + labels = labels[:, 1:].contiguous() + loss_fct = torch.nn.CrossEntropyLoss( + reduction=reduction, label_smoothing=0.1) + lm_loss = loss_fct( + shifted_prediction_scores.view(-1, self.config.vocab_size), + labels.view(-1)) + if reduction == 'none': + lm_loss = lm_loss.view(prediction_scores.size(0), -1).sum(1) + + if not return_dict: + output = (prediction_scores, ) + outputs[2:] + return ((lm_loss, ) + output) if lm_loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=lm_loss, + logits=prediction_scores, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def prepare_inputs_for_generation(self, + input_ids, + past=None, + attention_mask=None, + **model_kwargs): + input_shape = input_ids.shape + # if model is used as a decoder in encoder-decoder model, + # the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_shape) + + # cut decoder_input_ids if past is used + if past is not None: + input_ids = input_ids[:, -1:] + + return { + 'input_ids': + input_ids, + 'attention_mask': + attention_mask, + 'past_key_values': + past, + 'encoder_hidden_states': + model_kwargs.get('encoder_hidden_states', None), + 'encoder_attention_mask': + model_kwargs.get('encoder_attention_mask', None), + 'is_decoder': + True, + } + + def _reorder_cache(self, past, beam_idx): + reordered_past = () + for layer_past in past: + reordered_past += (tuple( + past_state.index_select(0, beam_idx) + for past_state in layer_past), ) + return reordered_past + + +@MODELS.register_module() +class XBertLMHeadDecoder(BertLMHeadModel): + """This class decouples the decoder forward logic from the VL model. + + In this way, different VL models can share this decoder as long as they + feed encoder_embeds as required. + """ + + def __init__(self, med_config): + self.med_config = BertConfig.from_dict(med_config) + super(XBertLMHeadDecoder, self).__init__(config=self.med_config) + + def generate_from_encoder(self, + tokenized_prompt, + visual_embeds, + sep_token_id, + pad_token_id, + use_nucleus_sampling=False, + num_beams=3, + max_length=30, + min_length=10, + top_p=0.9, + repetition_penalty=1.0, + **kwargs): + + if not use_nucleus_sampling: + num_beams = num_beams + visual_embeds = visual_embeds.repeat_interleave(num_beams, dim=0) + + image_atts = torch.ones( + visual_embeds.size()[:-1], dtype=torch.long).to(self.device) + + model_kwargs = { + 'encoder_hidden_states': visual_embeds, + 'encoder_attention_mask': image_atts, + } + + if use_nucleus_sampling: + # nucleus sampling + outputs = self.generate( + input_ids=tokenized_prompt.input_ids, + max_length=max_length, + min_length=min_length, + do_sample=True, + top_p=top_p, + num_return_sequences=1, + eos_token_id=sep_token_id, + pad_token_id=pad_token_id, + repetition_penalty=1.1, + **model_kwargs) + else: + # beam search + outputs = self.generate( + input_ids=tokenized_prompt.input_ids, + max_length=max_length, + min_length=min_length, + num_beams=num_beams, + eos_token_id=sep_token_id, + pad_token_id=pad_token_id, + repetition_penalty=repetition_penalty, + **model_kwargs) + + return outputs diff --git a/mmpretrain/models/multimodal/blip2/Qformer.py b/mmpretrain/models/multimodal/blip2/Qformer.py new file mode 100644 index 0000000..4b1c7d1 --- /dev/null +++ b/mmpretrain/models/multimodal/blip2/Qformer.py @@ -0,0 +1,773 @@ +# flake8: noqa +""" + * Copyright (c) 2023, salesforce.com, inc. +""" +from typing import Tuple + +import torch +import torch.utils.checkpoint +from torch import Tensor, device, nn +from torch.nn import CrossEntropyLoss +from transformers.activations import ACT2FN +from transformers.modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPoolingAndCrossAttentions, + CausalLMOutputWithCrossAttentions) +from transformers.modeling_utils import apply_chunking_to_forward +from transformers.models.bert.configuration_bert import BertConfig +from transformers.utils import logging + +from mmpretrain.registry import MODELS +from ..blip.language_model import (BertAttention, BertIntermediate, + BertOnlyMLMHead, BertOutput, BertPooler, + BertPreTrainedModel) + +logger = logging.get_logger(__name__) + + +class BertEmbeddings(nn.Module): + """Construct the embeddings from word and position embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding( + config.vocab_size, + config.hidden_size, + padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, + config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm( + config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.register_buffer( + 'position_ids', + torch.arange(config.max_position_embeddings).expand((1, -1))) + self.position_embedding_type = getattr(config, + 'position_embedding_type', + 'absolute') + + self.config = config + + def forward( + self, + input_ids=None, + position_ids=None, + query_embeds=None, + past_key_values_length=0, + ): + if input_ids is not None: + seq_length = input_ids.size()[1] + else: + seq_length = 0 + + if position_ids is None: + position_ids = self.position_ids[:, past_key_values_length: + seq_length + + past_key_values_length].clone() + + if input_ids is not None: + embeddings = self.word_embeddings(input_ids) + if self.position_embedding_type == 'absolute': + position_embeddings = self.position_embeddings(position_ids) + embeddings = embeddings + position_embeddings + + if query_embeds is not None: + embeddings = torch.cat((query_embeds, embeddings), dim=1) + else: + embeddings = query_embeds + + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class BertLayer(nn.Module): + + def __init__(self, config, layer_num): + super().__init__() + self.config = config + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = BertAttention(config) + self.layer_num = layer_num + if (self.config.add_cross_attention + and layer_num % self.config.cross_attention_freq == 0): + self.crossattention = BertAttention( + config, is_cross_attention=self.config.add_cross_attention) + self.has_cross_attention = True + else: + self.has_cross_attention = False + self.intermediate = BertIntermediate(config) + self.output = BertOutput(config) + + self.intermediate_query = BertIntermediate(config) + self.output_query = BertOutput(config) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + query_length=0, + ): + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = ( + past_key_value[:2] if past_key_value is not None else None) + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + outputs = self_attention_outputs[1:-1] + + present_key_value = self_attention_outputs[-1] + + if query_length > 0: + query_attention_output = attention_output[:, :query_length, :] + + if self.has_cross_attention: + assert ( + encoder_hidden_states is not None + ), 'encoder_hidden_states must be given for cross-attention layers' + cross_attention_outputs = self.crossattention( + query_attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + output_attentions=output_attentions, + ) + query_attention_output = cross_attention_outputs[0] + outputs = ( + outputs + cross_attention_outputs[1:-1] + ) # add cross attentions if we output attention weights + + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk_query, + self.chunk_size_feed_forward, + self.seq_len_dim, + query_attention_output, + ) + if attention_output.shape[1] > query_length: + layer_output_text = apply_chunking_to_forward( + self.feed_forward_chunk, + self.chunk_size_feed_forward, + self.seq_len_dim, + attention_output[:, query_length:, :], + ) + layer_output = torch.cat([layer_output, layer_output_text], + dim=1) + else: + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, + self.chunk_size_feed_forward, + self.seq_len_dim, + attention_output, + ) + outputs = (layer_output, ) + outputs + + outputs = outputs + (present_key_value, ) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + def feed_forward_chunk_query(self, attention_output): + intermediate_output = self.intermediate_query(attention_output) + layer_output = self.output_query(intermediate_output, attention_output) + return layer_output + + +class BertEncoder(nn.Module): + + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList( + [BertLayer(config, i) for i in range(config.num_hidden_layers)]) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=False, + output_hidden_states=False, + return_dict=True, + query_length=0, + ): + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = (() if output_attentions + and self.config.add_cross_attention else None) + + next_decoder_cache = () if use_cache else None + + for i in range(self.config.num_hidden_layers): + layer_module = self.layer[i] + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states, ) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[ + i] if past_key_values is not None else None + + if getattr(self.config, 'gradient_checkpointing', + False) and self.training: + + if use_cache: + logger.warn( + '`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...' + ) + use_cache = False + + def create_custom_forward(module): + + def custom_forward(*inputs): + return module(*inputs, past_key_value, + output_attentions, query_length) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + query_length, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1], ) + if output_attentions: + all_self_attentions = all_self_attentions + ( + layer_outputs[1], ) + all_cross_attentions = all_cross_attentions + ( + layer_outputs[2], ) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states, ) + + if not return_dict: + return tuple(v for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] if v is not None) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +class BertModel(BertPreTrainedModel): + """The model can behave as an encoder (with only self-attention) as well as + a decoder, in which case a layer of cross-attention is added between the + self-attention layers, following the architecture described in `Attention + is all you need `__ by Ashish Vaswani, + Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. + + Gomez, Lukasz Kaiser and Illia Polosukhin. argument and + :obj:`add_cross_attention` set to :obj:`True`; an + :obj:`encoder_hidden_states` is then expected as an input to the forward + pass. + """ + + def __init__(self, config, add_pooling_layer=False): + super().__init__(config) + self.config = config + + self.embeddings = BertEmbeddings(config) + + self.encoder = BertEncoder(config) + + self.pooler = BertPooler(config) if add_pooling_layer else None + + self.init_weights() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """Prunes heads of the model. + + heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + def get_extended_attention_mask( + self, + attention_mask: Tensor, + input_shape: Tuple[int], + device: device, + is_decoder: bool, + has_query: bool = False, + ) -> Tensor: + """Makes broadcastable attention and causal masks so that future and + masked tokens are ignored. + + Arguments: + attention_mask (:obj:`torch.Tensor`): + Mask with ones indicating tokens to attend to, zeros for tokens to ignore. + input_shape (:obj:`Tuple[int]`): + The shape of the input to the model. + device: (:obj:`torch.device`): + The device of the input to the model. + + Returns: + :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`. + """ + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + if attention_mask.dim() == 3: + extended_attention_mask = attention_mask[:, None, :, :] + elif attention_mask.dim() == 2: + # Provided a padding mask of dimensions [batch_size, seq_length] + # - if the model is a decoder, apply a causal mask in addition to the padding mask + # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] + if is_decoder: + batch_size, seq_length = input_shape + + seq_ids = torch.arange(seq_length, device=device) + causal_mask = ( + seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= + seq_ids[None, :, None]) + + # add a prefix ones mask to the causal mask + # causal and attention masks must have same type with pytorch version < 1.3 + causal_mask = causal_mask.to(attention_mask.dtype) + + if causal_mask.shape[1] < attention_mask.shape[1]: + prefix_seq_len = attention_mask.shape[ + 1] - causal_mask.shape[1] + if has_query: # UniLM style attention mask + causal_mask = torch.cat( + [ + torch.zeros( + (batch_size, prefix_seq_len, seq_length), + device=device, + dtype=causal_mask.dtype, + ), + causal_mask, + ], + axis=1, + ) + causal_mask = torch.cat( + [ + torch.ones( + (batch_size, causal_mask.shape[1], + prefix_seq_len), + device=device, + dtype=causal_mask.dtype, + ), + causal_mask, + ], + axis=-1, + ) + extended_attention_mask = ( + causal_mask[:, None, :, :] * + attention_mask[:, None, None, :]) + else: + extended_attention_mask = attention_mask[:, None, None, :] + else: + raise ValueError( + 'Wrong shape for input_ids (shape {}) or attention_mask (shape {})' + .format(input_shape, attention_mask.shape)) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = extended_attention_mask.to( + dtype=self.dtype) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + return extended_attention_mask + + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + head_mask=None, + query_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + is_decoder=False, + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + """ + output_attentions = ( + output_attentions if output_attentions is not None else + self.config.output_attentions) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else + self.config.output_hidden_states) + return_dict = ( + return_dict + if return_dict is not None else self.config.use_return_dict) + + # use_cache = use_cache if use_cache is not None else self.config.use_cache + if input_ids is None: + assert ( + query_embeds is not None + ), 'You have to specify query_embeds when input_ids is None' + + # past_key_values_length + past_key_values_length = ( + past_key_values[0][0].shape[2] - + self.config.query_length if past_key_values is not None else 0) + + query_length = query_embeds.shape[1] if query_embeds is not None else 0 + + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + query_embeds=query_embeds, + past_key_values_length=past_key_values_length, + ) + + input_shape = embedding_output.size()[:-1] + batch_size, seq_length = input_shape + device = embedding_output.device + + if attention_mask is None: + attention_mask = torch.ones( + ((batch_size, seq_length + past_key_values_length)), + device=device) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + if is_decoder: + extended_attention_mask = self.get_extended_attention_mask( + attention_mask, + input_ids.shape, + device, + is_decoder, + has_query=(query_embeds is not None), + ) + else: + extended_attention_mask = self.get_extended_attention_mask( + attention_mask, input_shape, device, is_decoder) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if encoder_hidden_states is not None: + if type(encoder_hidden_states) == list: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[ + 0].size() + else: + ( + encoder_batch_size, + encoder_sequence_length, + _, + ) = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, + encoder_sequence_length) + + if type(encoder_attention_mask) == list: + encoder_extended_attention_mask = [ + self.invert_attention_mask(mask) + for mask in encoder_attention_mask + ] + elif encoder_attention_mask is None: + encoder_attention_mask = torch.ones( + encoder_hidden_shape, device=device) + encoder_extended_attention_mask = self.invert_attention_mask( + encoder_attention_mask) + else: + encoder_extended_attention_mask = self.invert_attention_mask( + encoder_attention_mask) + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, + self.config.num_hidden_layers) + + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + query_length=query_length, + ) + sequence_output = encoder_outputs[0] + pooled_output = ( + self.pooler(sequence_output) if self.pooler is not None else None) + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + +class BertLMHeadModel(BertPreTrainedModel): + + _keys_to_ignore_on_load_unexpected = [r'pooler'] + _keys_to_ignore_on_load_missing = [ + r'position_ids', r'predictions.decoder.bias' + ] + + def __init__(self, config): + super().__init__(config) + + self.bert = BertModel(config, add_pooling_layer=False) + self.cls = BertOnlyMLMHead(config) + + self.init_weights() + + def get_output_embeddings(self): + if self.cls is not None: + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + head_mask=None, + query_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + labels=None, + past_key_values=None, + use_cache=True, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + return_logits=False, + is_decoder=True, + reduction='mean', + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): + Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in + ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are + ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]`` + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 + tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + Returns: + Example:: + >>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig + >>> import torch + >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased') + >>> config = BertConfig.from_pretrained("bert-base-cased") + >>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config) + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs) + >>> prediction_logits = outputs.logits + """ + return_dict = ( + return_dict + if return_dict is not None else self.config.use_return_dict) + if labels is not None: + use_cache = False + if past_key_values is not None: + query_embeds = None + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + head_mask=head_mask, + query_embeds=query_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + is_decoder=is_decoder, + ) + + sequence_output = outputs[0] + if query_embeds is not None: + sequence_output = outputs[0][:, query_embeds.shape[1]:, :] + prediction_scores = self.cls(sequence_output) + + if return_logits: + return prediction_scores[:, :-1, :].contiguous() + + lm_loss = None + if labels is not None: + # we are doing next-token prediction; shift prediction scores and input ids by one + shifted_prediction_scores = prediction_scores[:, : + -1, :].contiguous() + labels = labels[:, 1:].contiguous() + loss_fct = CrossEntropyLoss( + reduction=reduction, label_smoothing=0.1) + lm_loss = loss_fct( + shifted_prediction_scores.view(-1, self.config.vocab_size), + labels.view(-1), + ) + if reduction == 'none': + lm_loss = lm_loss.view(prediction_scores.size(0), -1).sum(1) + + if not return_dict: + output = (prediction_scores, ) + outputs[2:] + return ((lm_loss, ) + output) if lm_loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=lm_loss, + logits=prediction_scores, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def prepare_inputs_for_generation(self, + input_ids, + query_embeds, + past=None, + attention_mask=None, + **model_kwargs): + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_ids.shape) + query_mask = input_ids.new_ones(query_embeds.shape[:-1]) + attention_mask = torch.cat([query_mask, attention_mask], dim=-1) + + # cut decoder_input_ids if past is used + if past is not None: + input_ids = input_ids[:, -1:] + + return { + 'input_ids': + input_ids, + 'query_embeds': + query_embeds, + 'attention_mask': + attention_mask, + 'past_key_values': + past, + 'encoder_hidden_states': + model_kwargs.get('encoder_hidden_states', None), + 'encoder_attention_mask': + model_kwargs.get('encoder_attention_mask', None), + 'is_decoder': + True, + } + + def _reorder_cache(self, past, beam_idx): + reordered_past = () + for layer_past in past: + reordered_past += (tuple( + past_state.index_select(0, beam_idx) + for past_state in layer_past), ) + return reordered_past + + +@MODELS.register_module() +class Qformer(BertLMHeadModel): + + def __init__(self, model_style: str, vision_model_width: int, + add_cross_attention: bool, cross_attention_freq: int, + num_query_token: int) -> None: + + config = BertConfig.from_pretrained(model_style) + config.add_cross_attention = add_cross_attention + config.encoder_width = vision_model_width + config.cross_attention_freq = cross_attention_freq + config.query_length = num_query_token + super().__init__(config) diff --git a/mmpretrain/models/multimodal/blip2/__init__.py b/mmpretrain/models/multimodal/blip2/__init__.py new file mode 100644 index 0000000..b5695f2 --- /dev/null +++ b/mmpretrain/models/multimodal/blip2/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .blip2_caption import Blip2Caption +from .blip2_opt_vqa import Blip2VQA +from .blip2_retriever import Blip2Retrieval +from .modeling_opt import OPTForCausalLM +from .Qformer import Qformer + +__all__ = [ + 'Blip2Caption', 'Blip2Retrieval', 'Blip2VQA', 'OPTForCausalLM', 'Qformer' +] diff --git a/mmpretrain/models/multimodal/blip2/blip2_caption.py b/mmpretrain/models/multimodal/blip2/blip2_caption.py new file mode 100644 index 0000000..acf6948 --- /dev/null +++ b/mmpretrain/models/multimodal/blip2/blip2_caption.py @@ -0,0 +1,315 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional + +import torch +from mmengine.model import BaseModel +from torch import nn + +from mmpretrain.registry import MODELS, TOKENIZER +from mmpretrain.structures import DataSample + + +@MODELS.register_module() +class Blip2Caption(BaseModel): + """BLIP2 Caption. + + Module for BLIP2 Caption task. + + Args: + vision_backbone (dict): The config dict for vision backbone. + text_backbone (dict): The config dict for text backbone. + multimodal_backbone (dict): The config dict for multimodal backbone. + vision_neck (dict): The config dict for vision neck. + tokenizer: (Optional[dict]): The config for tokenizer. + Defaults to None. + prompt (str): Prompt used for training and eval. + Defaults to ''. + max_txt_len (int): Max text length of input text. + num_captions (int): Number of captions to be generated for each image. + data_preprocessor (Optional[dict]): The config for preprocessing input + data. If None or no specified type, it will use + "MultiModalDataPreprocessor" as type. + See :class:`MultiModalDataPreprocessor` for more details. + Defaults to None. + init_cfg (Optional[dict]): the config to control the initialization. + Defaults to None. + """ + _no_split_modules = ['BEiTViT', 'OPTDecoderLayer', 'BertLayer'] + + def __init__(self, + vision_backbone: dict, + text_backbone: dict, + multimodal_backbone: dict, + vision_neck: dict, + tokenizer: Optional[dict] = None, + prompt: str = '', + max_txt_len: int = 20, + num_captions: int = 1, + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[dict] = None) -> None: + if data_preprocessor is None: + data_preprocessor = {} + if isinstance(data_preprocessor, dict): + data_preprocessor.setdefault('type', 'MultiModalDataPreprocessor') + data_preprocessor = MODELS.build(data_preprocessor) + + super().__init__( + init_cfg=init_cfg, data_preprocessor=data_preprocessor) + + self.tokenizer = TOKENIZER.build(tokenizer) + self.eos_token_id = self.tokenizer( + '\n', add_special_tokens=False).input_ids[0] + + self.vision_backbone = MODELS.build(vision_backbone) + self.ln_vision_backbone = nn.LayerNorm(self.vision_backbone.embed_dims) + + self.vision_neck = MODELS.build(vision_neck) + + self.text_backbone = MODELS.build(text_backbone) + + self.multimodal_backbone = MODELS.build(multimodal_backbone) + self.multimodal_backbone.cls = None + self.multimodal_backbone.bert.embeddings.word_embeddings = None + self.multimodal_backbone.bert.embeddings.position_embeddings = None + for layer in self.multimodal_backbone.bert.encoder.layer: + layer.output = None + layer.intermediate = None + + self.prompt = prompt + self.max_txt_len = max_txt_len + self.num_captions = num_captions + prompt_tokens = self.tokenizer(prompt, return_tensors='pt') + self.prompt_length = prompt_tokens.attention_mask.sum(1) + + self.query_tokens = nn.Parameter( + torch.zeros(1, self.multimodal_backbone.bert.config.query_length, + self.multimodal_backbone.bert.config.hidden_size)) + self.query_tokens.data.normal_( + mean=0.0, + std=self.multimodal_backbone.bert.config.initializer_range) + + # freeze the text backbone + for _, param in self.text_backbone.named_parameters(): + param.requires_grad = False + + if hasattr(self, 'register_load_state_dict_post_hook'): + self.register_load_state_dict_post_hook( + self._ignore_loading_llm_keys_hook) + + if hasattr(self, '_register_state_dict_hook'): + self._register_state_dict_hook(self._igonre_saving_llm_keys_hook) + + def forward(self, + images: torch.Tensor, + data_samples: Optional[List] = None, + mode: str = 'loss'): + """The unified entry for a forward process in both training and test. + The method should accept two modes: "predict" and "loss": + + - "predict": Forward and return the predictions, which are fully + processed to a list of :obj:`DataSample`. + - "loss": Forward and return a dict of losses according to the given + inputs and data samples. + + Note that this method doesn't handle neither back propagation nor + optimizer updating, which are done in the :meth:`train_step`. + + Args: + images (torch.Tensor): pre_processed img tensor (N, C, ...). + data_samples (List[DataSample], optional): + mode (str): Return what kind of value. Defaults to 'loss'. + + Returns: + The return type depends on ``mode``. + - If ``mode="loss"``, return a dict of tensor. + - If ``mode="predict"``, return a list of + :obj:`mmpretrain.structures.DataSample`. + """ + if mode == 'loss': + return self.loss(images, data_samples) + elif mode == 'predict': + return self.predict(images, data_samples) + else: + raise RuntimeError(f'Invalid mode "{mode}".') + + def loss(self, + images: torch.Tensor, + data_samples: Optional[list] = None, + **kwargs) -> Dict[str, torch.Tensor]: + """The forward function in training. + + Args: + images (torch.Tensor): The input tensor with shape + (N, C, ...) in general. + data_samples (List[DataSample], optional): The annotation + data of every samples. Defaults to None. + **kwargs: Other keyword arguments accepted by the ``loss`` + method of :attr:`head`. + + Returns: + Dict[str, torch.Tensor]: A dictionary of loss components. + """ + + # extract image features + image_embeds = self.ln_vision_backbone(self.vision_backbone(images)[0]) + image_atts = torch.ones( + image_embeds.size()[:-1], + dtype=torch.long, + ).to(images.device) + + # distill image features to query tokens + query_tokens = self.query_tokens.expand(image_embeds.size(0), -1, -1) + query_outputs = self.multimodal_backbone.bert( + query_embeds=query_tokens, + encoder_hidden_states=image_embeds, + encoder_attention_mask=image_atts, + return_dict=True, + ) + inputs_opt = self.vision_neck([query_outputs.last_hidden_state]) + attns_opt = torch.ones( + inputs_opt.size()[:-1], dtype=torch.long).to(images.device) + + self.tokenizer.padding_side = 'right' + + prompt = [ + self.prompt + data_sample.gt_caption + '\n' + for data_sample in data_samples + ] + + opt_tokens = self.tokenizer( + prompt, + return_tensors='pt', + padding='longest', + truncation=True, + max_length=self.max_txt_len, + ).to(images.device) + + targets = opt_tokens.input_ids.masked_fill( + opt_tokens.input_ids == self.tokenizer.pad_token_id, -100) + if self.prompt: + targets[:, :self.prompt_length] = -100 + + empty_targets = ( + torch.ones(attns_opt.size(), + dtype=torch.long).to(images.device).fill_(-100)) + targets = torch.cat([empty_targets, targets], dim=1) + + inputs_embeds = ( + self.text_backbone.model.decoder.embed_tokens( + opt_tokens.input_ids)) + inputs_embeds = torch.cat([inputs_opt, inputs_embeds], dim=1) + attention_mask = torch.cat([attns_opt, opt_tokens.attention_mask], + dim=1) + + outputs = self.text_backbone( + inputs_embeds=inputs_embeds, + attention_mask=attention_mask, + return_dict=True, + labels=targets, + ) + loss = outputs.loss + + return {'loss': loss} + + def predict(self, + images: torch.Tensor, + data_samples: Optional[list] = None, + **kwargs) -> List[DataSample]: + """Predict captions from a batch of inputs. + + Args: + images (torch.Tensor): The input tensor with shape + (N, C, ...) in general. + data_samples (List[DataSample], optional): The annotation + data of every samples. Defaults to None. + **kwargs: Other keyword arguments accepted by the ``predict`` + method of :attr:`head`. + + Returns: + List[DataSample]: Return list of data samples. + """ + + # extract image features + image_embeds = self.ln_vision_backbone(self.vision_backbone(images)[0]) + image_atts = torch.ones( + image_embeds.size()[:-1], + dtype=torch.long, + ).to(images.device) + + # distill image features to query tokens + query_tokens = self.query_tokens.expand(image_embeds.size(0), -1, -1) + query_outputs = self.multimodal_backbone.bert( + query_embeds=query_tokens, + encoder_hidden_states=image_embeds, + encoder_attention_mask=image_atts, + return_dict=True, + ) + inputs_opt = self.vision_neck([query_outputs.last_hidden_state]) + attns_opt = torch.ones( + inputs_opt.size()[:-1], dtype=torch.long).to(images.device) + + prompt = [self.prompt] * image_embeds.size(0) + + opt_tokens = self.tokenizer( + prompt, + return_tensors='pt', + padding='longest', + truncation=True, + max_length=self.max_txt_len, + ).to(images.device) + attention_mask = torch.cat([attns_opt, opt_tokens.attention_mask], + dim=1) + + inputs_embeds = ( + self.text_backbone.get_input_embeddings()(opt_tokens.input_ids)) + inputs_embeds = torch.cat([inputs_opt, inputs_embeds], dim=1) + + outputs = self.text_backbone.generate( + inputs_embeds=inputs_embeds, + attention_mask=attention_mask, + do_sample=False, + top_p=0.9, + temperature=1., + num_beams=5, + max_new_tokens=self.max_txt_len, + min_length=1, + eos_token_id=self.eos_token_id, + repetition_penalty=1.0, + length_penalty=1.0, + num_return_sequences=self.num_captions, + ) + + output_text = self.tokenizer.batch_decode( + outputs, skip_special_tokens=True) + output_text = [text.strip() for text in output_text] + + out_data_samples = [] + if data_samples is None: + data_samples = [None for _ in range(len(output_text))] + + for data_sample, decode_token in zip(data_samples, output_text): + if data_sample is None: + data_sample = DataSample() + data_sample.pred_caption = decode_token + out_data_samples.append(data_sample) + + return out_data_samples + + @staticmethod + def _ignore_loading_llm_keys_hook(module, incompatible_keys): + """Avoid warning missing keys of the LLM model.""" + import re + llm_pattern = '^text_backbone' + for key in list(incompatible_keys.missing_keys): + if re.match(llm_pattern, key): + incompatible_keys.missing_keys.remove(key) + + @staticmethod + def _igonre_saving_llm_keys_hook(module, state_dict, prefix, metadata): + """Avoid saving llm state dict.""" + import re + llm_pattern = '^text_backbone' + keys = [k for k, _ in state_dict.items()] + for key in keys: + if re.match(llm_pattern, key): + state_dict.pop(key) diff --git a/mmpretrain/models/multimodal/blip2/blip2_opt_vqa.py b/mmpretrain/models/multimodal/blip2/blip2_opt_vqa.py new file mode 100644 index 0000000..20e439f --- /dev/null +++ b/mmpretrain/models/multimodal/blip2/blip2_opt_vqa.py @@ -0,0 +1,92 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional + +import torch + +from mmpretrain.registry import MODELS +from mmpretrain.structures import DataSample +from .blip2_caption import Blip2Caption + + +@MODELS.register_module() +class Blip2VQA(Blip2Caption): + """BLIP2 VQA. + + Module for BLIP2 VQA task. For more details about the initialization + params, please refer to :class:`Blip2Caption`. + """ + + def predict(self, + images: torch.Tensor, + data_samples: Optional[list] = None, + **kwargs) -> List[DataSample]: + """Predict captions from a batch of inputs. + + Args: + images (torch.Tensor): The input tensor with shape + (N, C, ...) in general. + data_samples (List[DataSample], optional): The annotation + data of every samples. Defaults to None. + **kwargs: Other keyword arguments accepted by the ``predict`` + method of :attr:`head`. + + Returns: + List[DataSample]: Return list of data samples. + """ + questions = [d.question for d in data_samples] + + # extract image features from + image_embeds = self.ln_vision_backbone(self.vision_backbone(images)[0]) + image_atts = torch.ones( + image_embeds.size()[:-1], + dtype=torch.long, + ).to(images.device) + + # distill image features to query tokens + query_tokens = self.query_tokens.expand(image_embeds.size(0), -1, -1) + query_outputs = self.multimodal_backbone.bert( + query_embeds=query_tokens, + encoder_hidden_states=image_embeds, + encoder_attention_mask=image_atts, + return_dict=True, + ) + inputs_opt = self.vision_neck([query_outputs.last_hidden_state]) + attns_opt = torch.ones( + inputs_opt.size()[:-1], dtype=torch.long).to(images.device) + + prompt = [self.prompt.format(q) for q in questions] + + # use left padding + self.tokenizer.padding_side = 'left' + + opt_tokens = self.tokenizer( + prompt, return_tensors='pt', padding='longest').to(images.device) + input_ids = opt_tokens.input_ids + attention_mask = torch.cat([attns_opt, opt_tokens.attention_mask], + dim=1) + + inputs_embeds = self.text_backbone.model.decoder.embed_tokens( + input_ids) + inputs_embeds = torch.cat([inputs_opt, inputs_embeds], dim=1) + + outputs = self.text_backbone.generate( + inputs_embeds=inputs_embeds, + attention_mask=attention_mask, + do_sample=False, + num_beams=5, + max_new_tokens=self.max_txt_len, + min_length=1, + eos_token_id=self.eos_token_id, + length_penalty=-1.0, + ) + + output_text = self.tokenizer.batch_decode( + outputs, skip_special_tokens=True) + output_text = [text.strip() for text in output_text] + + out_data_samples = [] + for data_sample, decode_token in zip(data_samples, output_text): + data_sample.pred_answer = decode_token + out_data_samples.append(data_sample) + + return out_data_samples diff --git a/mmpretrain/models/multimodal/blip2/blip2_retriever.py b/mmpretrain/models/multimodal/blip2/blip2_retriever.py new file mode 100644 index 0000000..e626404 --- /dev/null +++ b/mmpretrain/models/multimodal/blip2/blip2_retriever.py @@ -0,0 +1,505 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional, Tuple, Union + +import mmengine.dist as dist +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmengine.utils import track_iter_progress + +from mmpretrain.registry import MODELS, TOKENIZER +from mmpretrain.structures import DataSample +from ..blip.blip_retrieval import BlipRetrieval, all_gather_concat + + +@MODELS.register_module() +class Blip2Retrieval(BlipRetrieval): + """BLIP2 Retriever. + + Args: + vision_backbone (dict): Backbone for extracting image features. + text_backbone (dict): Backbone for extracting text features. + multimodal_backbone (Optional[dict]): Backbone for extracting + multi-modal features. + vision_neck (Optional[dict]): The neck module to process image features + from vision backbone. Defaults to None. + text_neck (Optional[dict]): The neck module to process text features + from text backbone. Defaults to None. + head (Optional[Union[List[dict], dict]]): The head module to calculate + loss from processed single modality features. + See :mod:`mmmultimodal.models.heads`. + Notice that if the head is not set, `loss` method cannot be used. + Defaults to None. + multimodal_head (Optional[Union[List[dict], dict]]): The multi-modal + head module to calculate loss from processed multimodal features. + See :mod:`mmmultimodal.models.heads`. + Notice that if the head is not set, `loss` method cannot be used. + Defaults to None. + tokenizer (Optional[dict]): The config for tokenizer. Defaults to None. + temperature (float): Temperature parameter that controls the + concentration level of the distribution. Defaults to 0.07. + fast_match (bool): If False, select topk similarity as candidates and + compute the matching score. If True, return the similarity as the + matching score directly. Defaults to False. + topk (int): Select topk similarity as candidates for compute matching + scores. Notice that this is not the topk in evaluation. + Defaults to 256. + data_preprocessor (Optional[dict]): The config for preprocessing input + data. If None or no specified type, it will use + "MultiModalDataPreprocessor" as type. + See :class:`MultiModalDataPreprocessor` for more details. + Defaults to None. + init_cfg (Optional[dict]): the config to control the initialization. + Defaults to None. + """ + + def __init__(self, + vision_backbone: dict, + text_backbone: Optional[dict] = None, + multimodal_backbone: Optional[dict] = None, + vision_neck: Optional[dict] = None, + text_neck: Optional[dict] = None, + head: Optional[Union[List[dict], dict]] = None, + multimodal_head: Optional[Union[List[dict], dict]] = None, + tokenizer: Optional[dict] = None, + temperature: float = 0.07, + fast_match: bool = False, + topk: int = 256, + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[dict] = None) -> None: + if data_preprocessor is None: + data_preprocessor = {} + if isinstance(data_preprocessor, dict): + data_preprocessor.setdefault('type', 'MultiModalDataPreprocessor') + data_preprocessor = MODELS.build(data_preprocessor) + + # Skip BlipRetrieval init + super(BlipRetrieval, self).__init__( + init_cfg=init_cfg, data_preprocessor=data_preprocessor) + + self.vision_backbone = MODELS.build(vision_backbone) + self.ln_vision_backbone = nn.LayerNorm(self.vision_backbone.embed_dims) + self.tokenizer = TOKENIZER.build(tokenizer) + + if text_backbone is not None: + self.text_backbone = MODELS.build(text_backbone) + + if multimodal_backbone is not None: + self.multimodal_backbone = MODELS.build(multimodal_backbone) + self.multimodal_backbone.resize_token_embeddings( + len(self.tokenizer)) + self.query_tokens = nn.Parameter( + torch.zeros(1, self.multimodal_backbone.bert.config.query_length, + self.multimodal_backbone.bert.config.hidden_size)) + self.query_tokens.data.normal_( + mean=0.0, + std=self.multimodal_backbone.bert.config.initializer_range) + + if vision_neck is not None: + self.vision_neck = MODELS.build(vision_neck) + + if text_neck is not None: + self.text_neck = MODELS.build(text_neck) + + if head is not None: + self.head = MODELS.build(head) + + if multimodal_head is not None: + self.multimodal_head = MODELS.build(multimodal_head) + + self.temp = nn.Parameter(temperature * torch.ones([])) + + # Notice that this topk is used for select k candidate to compute + # image-text score, but not the final metric topk in evaluation. + self.fast_match = fast_match + self.topk = topk + + def _extract_feat(self, inputs: Union[torch.Tensor, dict], + modality: str) -> Tuple[torch.Tensor]: + """Extract features from the single modality. + Args: + inputs (Union[torch.Tensor, dict]): A batch of inputs. + For image, a tensor of shape (N, C, ...) in general. + For text, a dict of tokenized text inputs. + modality (str): Modality feature to be extracted. Only two + options are supported. + + - ``images``: Only extract image features, mostly used for + inference. + - ``texts``: Only extract text features, mostly used for + inference. + Returns: + Tuple[torch.Tensor]: The output features. + """ + if modality == 'images': + # extract image features + # TODO: + # Add layernorm inside backbone and handle the concat outside + image_embeds = self.ln_vision_backbone( + self.vision_backbone(inputs)[0]) + image_atts = torch.ones( + image_embeds.size()[:-1], dtype=torch.long).to(self.device) + + query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, + -1) + query_output = self.multimodal_backbone.bert( + query_embeds=query_tokens, + encoder_hidden_states=image_embeds, + encoder_attention_mask=image_atts, + use_cache=True, + return_dict=True, + ) + image_feat = F.normalize( + self.vision_neck([query_output.last_hidden_state]), dim=-1) + return { + 'image_embeds': image_embeds, + 'image_feat': image_feat, + 'query_output': query_output + } + elif modality == 'texts': + # extract text features + text_output = self.multimodal_backbone.bert( + inputs.input_ids, + attention_mask=inputs.attention_mask, + return_dict=True, + ) + text_embeds = text_output.last_hidden_state + text_feat = F.normalize( + self.text_neck([text_embeds[:, 0, :]]), dim=-1) + return {'text_embeds': text_embeds, 'text_feat': text_feat} + else: + raise RuntimeError(f'Invalid modality "{modality}".') + + def loss( + self, + images: torch.Tensor, + data_samples: Optional[List[DataSample]] = None, + ) -> Dict[str, torch.tensor]: + """Calculate losses from a batch of inputs and data samples. + + Args: + inputs (dict): A batch of inputs. The input tensor with of + at least one modality. For image, the value is a tensor + of shape (N, C, ...) in general. + For text, the value is a dict of tokenized text inputs. + data_samples (Optional[List[DataSample]]): + The annotation data of every samples. Defaults to None. + + Returns: + Dict[str, torch.tensor]: a dictionary of loss components of + both head and multimodal head. + """ + output = self.extract_feat(images, data_samples) + + text_ids = output['text_ids'] + text_attn_mask = output['text_attn_mask'] + image_embeds = output['image_embeds'] + image_feat = output['image_feat'] + text_feat = output['text_feat'] + query_output = output['query_output'] + + # ITC Loss + # B*world_size, num_query, D + image_feat_all = torch.cat(dist.all_gather(image_feat)) + # B*world_size, D + text_feat_all = torch.cat(dist.all_gather(text_feat)) + + # B, B*world_size, num_query + sim_q2t = torch.matmul( + image_feat.unsqueeze(1), text_feat_all.unsqueeze(-1)).squeeze() + + # image to text similarity + sim_i2t, _ = sim_q2t.max(-1) + sim_i2t = sim_i2t / self.temp + + # B, B*world_size, num_query + sim_t2q = torch.matmul( + text_feat.unsqueeze(1).unsqueeze(1), + image_feat_all.permute(0, 2, 1)).squeeze() + + # text-image similarity + sim_t2i, _ = sim_t2q.max(-1) + sim_t2i = sim_t2i / self.temp + + rank = dist.get_rank() + bs = images.size(0) + targets = torch.linspace( + rank * bs, rank * bs + bs - 1, bs, dtype=int).to(self.device) + + itc_loss = (F.cross_entropy(sim_i2t, targets, label_smoothing=0.1) + + F.cross_entropy(sim_t2i, targets, label_smoothing=0.1)) / 2 + + # prepare for itm + text_input_ids_world = torch.cat(dist.all_gather(text_ids)) + text_attention_mask_world = torch.cat(dist.all_gather(text_attn_mask)) + image_embeds_world = torch.cat(dist.all_gather(image_embeds)) + with torch.no_grad(): + weights_t2i = F.softmax(sim_t2i, dim=1) + 1e-4 + weights_t2i[:, rank * bs:rank * bs + bs].fill_diagonal_(0) + weights_i2t = F.softmax(sim_i2t, dim=1) + 1e-4 + weights_i2t[:, rank * bs:rank * bs + bs].fill_diagonal_(0) + + # select a negative image for each text + image_embeds_neg = [] + for b in range(bs): + neg_idx = torch.multinomial(weights_t2i[b], 1).item() + image_embeds_neg.append(image_embeds_world[neg_idx]) + image_embeds_neg = torch.stack(image_embeds_neg, dim=0) + + # select a negative text for each image + text_ids_neg = [] + text_atts_neg = [] + for b in range(bs): + neg_idx = torch.multinomial(weights_i2t[b], 1).item() + text_ids_neg.append(text_input_ids_world[neg_idx]) + text_atts_neg.append(text_attention_mask_world[neg_idx]) + + text_ids_neg = torch.stack(text_ids_neg, dim=0) + text_atts_neg = torch.stack(text_atts_neg, dim=0) + + text_ids_all = torch.cat([text_ids, text_ids, text_ids_neg], + dim=0) # pos, pos, neg + text_atts_all = torch.cat( + [text_attn_mask, text_attn_mask, text_atts_neg], + dim=0, + ) + + query_tokens_itm = self.query_tokens.expand(text_ids_all.shape[0], -1, + -1) + query_atts_itm = torch.ones( + query_tokens_itm.size()[:-1], dtype=torch.long).to(self.device) + attention_mask_all = torch.cat([query_atts_itm, text_atts_all], dim=1) + + image_embeds_all = torch.cat( + [image_embeds, image_embeds_neg, image_embeds], + dim=0) # pos, neg, pos + image_atts_all = torch.ones( + image_embeds_all.size()[:-1], dtype=torch.long).to(self.device) + + output_itm = self.multimodal_backbone.bert( + text_ids_all, + query_embeds=query_tokens_itm, + attention_mask=attention_mask_all, + encoder_hidden_states=image_embeds_all, + encoder_attention_mask=image_atts_all, + return_dict=True, + ) + + vl_embeddings = output_itm.last_hidden_state[:, :query_tokens_itm. + size(1), :] + + # create false data samples + data_samples.extend( + [DataSample(is_matched=False) for _ in range(2 * bs)]) + loss_multimodal = self.multimodal_head.loss((vl_embeddings, ), + data_samples) + + # LM loss + decoder_input_ids = text_ids.clone() + decoder_input_ids[:, 0] = self.tokenizer.bos_token_id + labels = decoder_input_ids.masked_fill( + decoder_input_ids == self.tokenizer.pad_token_id, -100) + + query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) + query_atts = torch.ones( + query_tokens.size()[:-1], dtype=torch.long).to(self.device) + attention_mask = torch.cat([query_atts, text_attn_mask], dim=1) + lm_output = self.multimodal_backbone( + decoder_input_ids, + attention_mask=attention_mask, + past_key_values=query_output.past_key_values, + return_dict=True, + labels=labels, + ) + + return dict( + itc_loss=itc_loss, **loss_multimodal, lm_loss=lm_output.loss) + + def predict_all(self, + feats: Dict[str, torch.Tensor], + data_samples: List[DataSample], + num_images: int = None, + num_texts: int = None, + cal_i2t: bool = True, + cal_t2i: bool = True) -> Tuple[torch.Tensor, torch.Tensor]: + """Compute similarity matrix between images and texts across all ranks. + + Args: + feats (Dict[str, torch.Tensor]): Features from the current rank. + data_samples (List[DataSample]): Data samples from the current + rank. + num_images (int, optional): Number of images to use. + Defaults to None. + num_texts (int, optional): Number of texts to use. + Defaults to None. + cal_i2t (bool, optional): Whether to compute image-to-text + similarity. Defaults to True. + cal_t2i (bool, optional): Whether to compute text-to-image + similarity. Defaults to True. + + Returns: + Tuple[torch.Tensor, torch.Tensor]: Image-to-text and text-to-image + similarity matrices. + """ + text_ids = feats['text_ids'] + text_attn_mask = feats['text_attn_mask'] + image_embeds = feats.get('image_embeds', None) + image_feat = feats['image_feat'] + text_feat = feats['text_feat'] + + num_images = num_images or image_feat.size(0) + num_texts = num_texts or text_feat.size(0) + + if not self.fast_match: + image_embeds_all = all_gather_concat(image_embeds)[:num_images] + else: + image_embeds_all = None + image_feat_all = all_gather_concat(image_feat)[:num_images] + text_feat_all = all_gather_concat(text_feat)[:num_texts] + text_ids_all = all_gather_concat(text_ids)[:num_texts] + text_attn_mask_all = all_gather_concat(text_attn_mask)[:num_texts] + + results = [] + if cal_i2t: + result_i2t = self.compute_score_matrix_i2t( + image_feat, + image_embeds, + text_feat_all, + text_ids_all, + text_attn_mask_all, + ) + results.append( + self._get_predictions(result_i2t, data_samples, mode='i2t')) + if cal_t2i: + result_t2i = self.compute_score_matrix_t2i( + image_feat_all, + image_embeds_all, + text_feat, + text_ids, + text_attn_mask, + ) + results.append( + self._get_predictions(result_t2i, data_samples, mode='t2i')) + return tuple(results) + + def compute_score_matrix_i2t(self, img_feats: torch.Tensor, + img_embeds: List[torch.Tensor], + text_feats: torch.Tensor, + text_ids: torch.Tensor, + text_atts: torch.Tensor) -> torch.Tensor: + """Compare the score matrix for image-to-text retrieval. Every image + should compare to all the text features. + + Args: + img_feats (torch.Tensor): The input tensor with shape (M, C). + M stands for numbers of samples on a single GPU. + img_embeds (List[torch.Tensor]): Image features from each layer of + the vision backbone. + text_feats (torch.Tensor): The input tensor with shape (N, C). + N stands for numbers of all samples on all GPUs. + text_ids (torch.Tensor): The input tensor with shape (N, C). + text_atts (torch.Tensor): The input tensor with shape (N, C). + + Returns: + torch.Tensor: Score matrix of image-to-text retrieval. + """ + + # compute i2t sim matrix + # TODO: check correctness + sim_matrix_i2t, _ = (img_feats @ text_feats.t()).max(1) + if self.fast_match: + return sim_matrix_i2t + + score_matrix_i2t = torch.full((img_feats.size(0), text_feats.size(0)), + -100.0).to(self.device) + + for i in track_iter_progress(range(img_feats.size(0))): + sims = sim_matrix_i2t[i] + topk_sim, topk_idx = sims.topk(k=self.topk, dim=0) + # get repeated image embeddings + encoder_output = img_embeds[i].repeat(self.topk, 1, 1) + encoder_att = torch.ones( + encoder_output.size()[:-1], dtype=torch.long).to(self.device) + # query embeds and attention masks + query_tokens = self.query_tokens.expand(encoder_output.shape[0], + -1, -1) + query_atts = torch.ones( + query_tokens.size()[:-1], dtype=torch.long).to(self.device) + attention_mask = torch.cat([query_atts, text_atts[topk_idx]], + dim=1) + output = self.multimodal_backbone.bert( + text_ids[topk_idx], + query_embeds=query_tokens, + attention_mask=attention_mask, + encoder_hidden_states=encoder_output, + encoder_attention_mask=encoder_att, + return_dict=True, + ) + score = self.multimodal_head( + (output.last_hidden_state[:, :query_tokens.size(1), :], + ))[:, :, 1].mean(dim=1) + score_matrix_i2t[i, topk_idx] = score + topk_sim + + return score_matrix_i2t + + def compute_score_matrix_t2i(self, img_feats: torch.Tensor, + img_embeds: List[torch.Tensor], + text_feats: torch.Tensor, + text_ids: torch.Tensor, + text_atts: torch.Tensor) -> torch.Tensor: + """Compare the score matrix for text-to-image retrieval. + + Every text should compare to all the image features. + + Args: + img_feats (torch.Tensor): The input tensor with shape (N, C). + N stands for numbers of all samples on all GPUs. + img_embeds (List[torch.Tensor]): Image features from each layer of + the vision backbone. + text_feats (torch.Tensor): The input tensor with shape (M, C). + M stands for numbers of samples on a single GPU. + text_ids (torch.Tensor): The input tensor with shape (M, C). + text_atts (torch.Tensor): The input tensor with shape (M, C). + + Returns: + torch.Tensor: Score matrix of text-to-image retrieval. + """ + + # compute t2i sim matrix + # TODO: check correctness + sim_matrix_i2t, _ = (img_feats @ text_feats.t()).max(1) + sim_matrix_t2i = sim_matrix_i2t.t() + if self.fast_match: + return sim_matrix_i2t + + score_matrix_t2i = torch.full((text_feats.size(0), img_feats.size(0)), + -100.0).to(self.device) + + for i in track_iter_progress(range(text_feats.size(0))): + sims = sim_matrix_t2i[i] + topk_sim, topk_idx = sims.topk(k=self.topk, dim=0) + # get topk image embeddings + encoder_output = img_embeds[topk_idx] + encoder_att = torch.ones( + encoder_output.size()[:-1], dtype=torch.long).to(self.device) + # get query embeds and attention masks + query_tokens = self.query_tokens.expand(encoder_output.shape[0], + -1, -1) + query_atts = torch.ones( + query_tokens.size()[:-1], dtype=torch.long).to(self.device) + attention_mask = torch.cat( + [query_atts, text_atts[i].repeat(self.topk, 1)], dim=1) + output = self.multimodal_backbone.bert( + text_ids[i].repeat(self.topk, 1), + query_embeds=query_tokens, + attention_mask=attention_mask, + encoder_hidden_states=encoder_output, + encoder_attention_mask=encoder_att, + return_dict=True, + ) + score = self.multimodal_head( + (output.last_hidden_state[:, :query_tokens.size(1), :], + ))[:, :, 1].mean(dim=1) + score_matrix_t2i[i, topk_idx] = score + topk_sim + + return score_matrix_t2i diff --git a/mmpretrain/models/multimodal/blip2/modeling_opt.py b/mmpretrain/models/multimodal/blip2/modeling_opt.py new file mode 100644 index 0000000..7cde0d7 --- /dev/null +++ b/mmpretrain/models/multimodal/blip2/modeling_opt.py @@ -0,0 +1,1083 @@ +# flake8: noqa +# Copyright 2022 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch OPT model.""" +import random +from typing import List, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import CrossEntropyLoss +from transformers.activations import ACT2FN +from transformers.modeling_outputs import (BaseModelOutputWithPast, + CausalLMOutputWithPast) +from transformers.modeling_utils import PreTrainedModel +from transformers.models.opt.configuration_opt import OPTConfig +from transformers.utils import (add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, logging, + replace_return_docstrings) + +from mmpretrain.models.utils import register_hf_model + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = 'facebook/opt-350m' +_CONFIG_FOR_DOC = 'OPTConfig' +_TOKENIZER_FOR_DOC = 'GPT2Tokenizer' + +# Base model docstring +_EXPECTED_OUTPUT_SHAPE = [1, 8, 1024] + +OPT_PRETRAINED_MODEL_ARCHIVE_LIST = [ + 'facebook/opt-125m', + 'facebook/opt-350m', + 'facebook/opt-1.3b', + 'facebook/opt-2.7b', + 'facebook/opt-6.7b', + 'facebook/opt-13b', + 'facebook/opt-30b', + # See all OPT models at https://huggingface.co/models?filter=opt +] + + +def _make_causal_mask(input_ids_shape: torch.Size, + dtype: torch.dtype, + past_key_values_length: int = 0): + """Make causal mask used for bi-directional self-attention.""" + bsz, tgt_len = input_ids_shape + mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min)) + mask_cond = torch.arange(mask.size(-1)) + mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) + mask = mask.to(dtype) + + if past_key_values_length > 0: + mask = torch.cat( + [torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], + dim=-1) + return mask[None, None, :, :].expand(bsz, 1, tgt_len, + tgt_len + past_key_values_length) + + +def _expand_mask(mask: torch.Tensor, + dtype: torch.dtype, + tgt_len: Optional[int] = None): + """Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, + src_seq_len]`.""" + bsz, src_len = mask.size() + tgt_len = tgt_len if tgt_len is not None else src_len + + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, + src_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill( + inverted_mask.to(torch.bool), + torch.finfo(dtype).min) + + +class OPTLearnedPositionalEmbedding(nn.Embedding): + """This module learns positional embeddings up to a fixed maximum size.""" + + def __init__(self, num_embeddings: int, embedding_dim: int): + # OPT is set up so that if padding_idx is specified then offset the embedding ids by 2 + # and adjust num_embeddings appropriately. Other models don't have this hack + self.offset = 2 + super().__init__(num_embeddings + self.offset, embedding_dim) + + def forward(self, + attention_mask: torch.LongTensor, + past_key_values_length: int = 0): + """`input_ids_shape` is expected to be [bsz x seqlen].""" + attention_mask = attention_mask.long() + + # create positions depending on attention_mask + positions = ( + torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * + attention_mask).long() - 1 + + # cut positions if `past_key_values_length` is > 0 + positions = positions[:, past_key_values_length:] + + return super().forward(positions + self.offset) + + +class OPTAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper.""" + + def __init__( + self, + embed_dim: int, + num_heads: int, + dropout: float = 0.0, + is_decoder: bool = False, + bias: bool = True, + ): + super().__init__() + self.embed_dim = embed_dim + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + + if (self.head_dim * num_heads) != self.embed_dim: + raise ValueError( + f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}' + f' and `num_heads`: {num_heads}).') + self.scaling = self.head_dim**-0.5 + self.is_decoder = is_decoder + + self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return (tensor.view(bsz, seq_len, self.num_heads, + self.head_dim).transpose(1, 2).contiguous()) + + def forward( + self, + hidden_states: torch.Tensor, + key_value_states: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], + Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel.""" + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + + bsz, tgt_len, _ = hidden_states.size() + + # get query proj + query_states = self.q_proj(hidden_states) * self.scaling + # get key, value proj + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_states = past_key_value[0] + value_states = past_key_value[1] + elif is_cross_attention: + # cross_attentions + key_states = self._shape(self.k_proj(key_value_states), -1, bsz) + value_states = self._shape(self.v_proj(key_value_states), -1, bsz) + elif past_key_value is not None: + # reuse k, v, self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + else: + # self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_states, value_states) + + proj_shape = (bsz * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, tgt_len, + bsz).view(*proj_shape) + key_states = key_states.view(*proj_shape) + value_states = value_states.view(*proj_shape) + + src_len = key_states.size(1) + attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) + + if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): + raise ValueError( + f'Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is' + f' {attn_weights.size()}') + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, tgt_len, src_len): + raise ValueError( + f'Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}' + ) + attn_weights = ( + attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + + attention_mask) + attn_weights = torch.max( + attn_weights, + torch.tensor(torch.finfo(attn_weights.dtype).min)) + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, + src_len) + + # upcast to fp32 if the weights are in fp16. Please see https://github.com/huggingface/transformers/pull/17437 + if attn_weights.dtype == torch.float16: + attn_weights = nn.functional.softmax( + attn_weights, dim=-1, dtype=torch.float32).to(torch.float16) + else: + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + if layer_head_mask is not None: + if layer_head_mask.size() != (self.num_heads, ): + raise ValueError( + f'Head mask for a single layer should be of size {(self.num_heads,)}, but is' + f' {layer_head_mask.size()}') + attn_weights = layer_head_mask.view( + 1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, + src_len) + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, + src_len) + + if output_attentions: + # this operation is a bit awkward, but it's required to + # make sure that attn_weights keeps its gradient. + # In order to do so, attn_weights have to be reshaped + # twice and have to be reused in the following + attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, + tgt_len, src_len) + attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, + tgt_len, src_len) + else: + attn_weights_reshaped = None + + attn_probs = nn.functional.dropout( + attn_weights, p=self.dropout, training=self.training) + + attn_output = torch.bmm(attn_probs, value_states) + + if attn_output.size() != (bsz * self.num_heads, tgt_len, + self.head_dim): + raise ValueError( + f'`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is' + f' {attn_output.size()}') + + attn_output = attn_output.view(bsz, self.num_heads, tgt_len, + self.head_dim) + attn_output = attn_output.transpose(1, 2) + + # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be + # partitioned aross GPUs when using tensor-parallelism. + attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) + + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights_reshaped, past_key_value + + +class OPTDecoderLayer(nn.Module): + + def __init__(self, config: OPTConfig): + super().__init__() + self.embed_dim = config.hidden_size + self.self_attn = OPTAttention( + embed_dim=self.embed_dim, + num_heads=config.num_attention_heads, + dropout=config.attention_dropout, + is_decoder=True, + ) + self.do_layer_norm_before = config.do_layer_norm_before + self.dropout = config.dropout + self.activation_fn = ACT2FN[config.activation_function] + + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.fc1 = nn.Linear(self.embed_dim, config.ffn_dim) + self.fc2 = nn.Linear(config.ffn_dim, self.embed_dim) + self.final_layer_norm = nn.LayerNorm(self.embed_dim) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, + torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + layer_head_mask (`torch.FloatTensor`, *optional*): mask for attention heads in a given layer of size + `(encoder_attention_heads,)`. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states + """ + + residual = hidden_states + + # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention + if self.do_layer_norm_before: + hidden_states = self.self_attn_layer_norm(hidden_states) + + # Self Attention + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + past_key_value=past_key_value, + attention_mask=attention_mask, + layer_head_mask=layer_head_mask, + output_attentions=output_attentions, + ) + hidden_states = nn.functional.dropout( + hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + + # 350m applies layer norm AFTER attention + if not self.do_layer_norm_before: + hidden_states = self.self_attn_layer_norm(hidden_states) + + # Fully Connected + hidden_states_shape = hidden_states.shape + hidden_states = hidden_states.reshape(-1, hidden_states.size(-1)) + residual = hidden_states + + # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention + if self.do_layer_norm_before: + hidden_states = self.final_layer_norm(hidden_states) + + hidden_states = self.fc1(hidden_states) + hidden_states = self.activation_fn(hidden_states) + + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout( + hidden_states, p=self.dropout, training=self.training) + + hidden_states = (residual + hidden_states).view(hidden_states_shape) + + # 350m applies layer norm AFTER attention + if not self.do_layer_norm_before: + hidden_states = self.final_layer_norm(hidden_states) + + outputs = (hidden_states, ) + + if output_attentions: + outputs += (self_attn_weights, ) + + if use_cache: + outputs += (present_key_value, ) + + return outputs + + +OPT_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`OPTConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +@add_start_docstrings( + 'The bare OPT Model outputting raw hidden-states without any specific head on top.', + OPT_START_DOCSTRING, +) +class OPTPreTrainedModel(PreTrainedModel): + + config_class = OPTConfig + base_model_prefix = 'model' + supports_gradient_checkpointing = True + _no_split_modules = ['OPTDecoderLayer'] + _keys_to_ignore_on_load_unexpected = [r'decoder\.version'] + + def _init_weights(self, module): + std = self.config.init_std + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, (OPTDecoder)): + module.gradient_checkpointing = value + + +OPT_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`GPT2Tokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + Indices can be obtained using [`OPTTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see + `past_key_values`). + + If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +class OPTDecoder(OPTPreTrainedModel): + """Transformer decoder consisting of *config.num_hidden_layers* layers. + Each layer is a [`OPTDecoderLayer`] + + Args: + config: OPTConfig + """ + + def __init__(self, config: OPTConfig): + super().__init__(config) + self.dropout = config.dropout + self.layerdrop = config.layerdrop + self.padding_idx = config.pad_token_id + self.max_target_positions = config.max_position_embeddings + self.vocab_size = config.vocab_size + + self.embed_tokens = nn.Embedding(config.vocab_size, + config.word_embed_proj_dim, + self.padding_idx) + self.embed_positions = OPTLearnedPositionalEmbedding( + config.max_position_embeddings, config.hidden_size) + + if config.word_embed_proj_dim != config.hidden_size: + self.project_out = nn.Linear( + config.hidden_size, config.word_embed_proj_dim, bias=False) + else: + self.project_out = None + + if config.word_embed_proj_dim != config.hidden_size: + self.project_in = nn.Linear( + config.word_embed_proj_dim, config.hidden_size, bias=False) + else: + self.project_in = None + + # Note that the only purpose of `config._remove_final_layer_norm` is to keep backward compatibility + # with checkpoints that have been fine-tuned before transformers v4.20.1 + # see https://github.com/facebookresearch/metaseq/pull/164 + if config.do_layer_norm_before and not config._remove_final_layer_norm: + self.final_layer_norm = nn.LayerNorm(config.hidden_size) + else: + self.final_layer_norm = None + + self.layers = nn.ModuleList( + [OPTDecoderLayer(config) for _ in range(config.num_hidden_layers)]) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask + def _prepare_decoder_attention_mask(self, attention_mask, input_shape, + inputs_embeds, past_key_values_length): + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + combined_attention_mask = None + if input_shape[-1] > 1: + combined_attention_mask = _make_causal_mask( + input_shape, + inputs_embeds.dtype, + past_key_values_length=past_key_values_length, + ).to(inputs_embeds.device) + + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + expanded_attn_mask = _expand_mask( + attention_mask, inputs_embeds.dtype, + tgt_len=input_shape[-1]).to(inputs_embeds.device) + combined_attention_mask = ( + expanded_attn_mask if combined_attention_mask is None else + expanded_attn_mask + combined_attention_mask) + + return combined_attention_mask + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + query_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you + provide it. + + Indices can be obtained using [`OPTTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of + shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the + cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those + that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of + all `decoder_input_ids` of shape `(batch_size, sequence_length)`. + + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = ( + output_attentions if output_attentions is not None else + self.config.output_attentions) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else + self.config.output_hidden_states) + use_cache = use_cache if use_cache is not None else self.config.use_cache + + return_dict = ( + return_dict + if return_dict is not None else self.config.use_return_dict) + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError( + 'You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time' + ) + elif input_ids is not None: + batch_size, seq_length = input_ids.shape + elif inputs_embeds is not None: + batch_size, seq_length, _ = inputs_embeds.shape + else: + raise ValueError( + 'You have to specify either decoder_input_ids or decoder_inputs_embeds' + ) + + seq_length_with_past = seq_length + past_key_values_length = 0 + + if past_key_values is not None: + past_key_values_length = past_key_values[0][0].shape[2] + seq_length_with_past = seq_length_with_past + past_key_values_length + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + if query_embeds is not None: + inputs_embeds = torch.cat([query_embeds, inputs_embeds], dim=1) + input_shape = inputs_embeds.size()[:-1] + else: + input_shape = (batch_size, seq_length) + + # embed positions + if attention_mask is None: + attention_mask = torch.ones( + inputs_embeds.shape[:2], + dtype=torch.bool, + device=inputs_embeds.device) + pos_embeds = self.embed_positions(attention_mask, + past_key_values_length) + + # embed positions + if attention_mask is None: + attention_mask = torch.ones((batch_size, seq_length_with_past), + dtype=torch.bool, + device=inputs_embeds.device) + + attention_mask = self._prepare_decoder_attention_mask( + attention_mask, input_shape, inputs_embeds, past_key_values_length) + + if self.project_in is not None: + inputs_embeds = self.project_in(inputs_embeds) + + hidden_states = inputs_embeds + pos_embeds + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = () if use_cache else None + + # check if head_mask has a correct number of layers specified if desired + for attn_mask, mask_name in zip([head_mask], ['head_mask']): + if attn_mask is not None: + if attn_mask.size()[0] != (len(self.layers)): + raise ValueError( + f'The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for' + f' {head_mask.size()[0]}.') + + for idx, decoder_layer in enumerate(self.layers): + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + if output_hidden_states: + all_hidden_states += (hidden_states, ) + + dropout_probability = random.uniform(0, 1) + if self.training and (dropout_probability < self.layerdrop): + continue + + past_key_value = ( + past_key_values[idx] if past_key_values is not None else None) + + if self.gradient_checkpointing and self.training: + + if use_cache: + logger.warning( + '`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...' + ) + use_cache = False + + def create_custom_forward(module): + + def custom_forward(*inputs): + # None for past_key_value + return module(*inputs, output_attentions, None) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(decoder_layer), + hidden_states, + attention_mask, + head_mask[idx] if head_mask is not None else None, + None, + ) + else: + + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + layer_head_mask=(head_mask[idx] + if head_mask is not None else None), + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache += ( + layer_outputs[2 if output_attentions else 1], ) + + if output_attentions: + all_self_attns += (layer_outputs[1], ) + + if self.final_layer_norm is not None: + hidden_states = self.final_layer_norm(hidden_states) + + if self.project_out is not None: + hidden_states = self.project_out(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states, ) + + next_cache = next_decoder_cache if use_cache else None + if not return_dict: + return tuple( + v for v in + [hidden_states, next_cache, all_hidden_states, all_self_attns] + if v is not None) + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + + +@add_start_docstrings( + 'The bare OPT Model outputting raw hidden-states without any specific head on top.', + OPT_START_DOCSTRING, +) +class OPTModel(OPTPreTrainedModel): + + def __init__(self, config: OPTConfig): + super().__init__(config) + self.decoder = OPTDecoder(config) + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.decoder.embed_tokens + + def set_input_embeddings(self, value): + self.decoder.embed_tokens = value + + def get_decoder(self): + return self.decoder + + @add_start_docstrings_to_model_forward(OPT_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutputWithPast, + config_class=_CONFIG_FOR_DOC, + expected_output=_EXPECTED_OUTPUT_SHAPE, + ) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + query_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + + output_attentions = ( + output_attentions if output_attentions is not None else + self.config.output_attentions) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else + self.config.output_hidden_states) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = ( + return_dict + if return_dict is not None else self.config.use_return_dict) + + # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) + decoder_outputs = self.decoder( + input_ids=input_ids, + attention_mask=attention_mask, + head_mask=head_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + query_embeds=query_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + if not return_dict: + return decoder_outputs + + return BaseModelOutputWithPast( + last_hidden_state=decoder_outputs.last_hidden_state, + past_key_values=decoder_outputs.past_key_values, + hidden_states=decoder_outputs.hidden_states, + attentions=decoder_outputs.attentions, + ) + + +@register_hf_model() +class OPTForCausalLM(OPTPreTrainedModel): + _keys_to_ignore_on_load_missing = [r'lm_head.weight'] + + def __init__(self, config): + super().__init__(config) + self.model = OPTModel(config) + + # the lm_head weight is automatically tied to the embed tokens weight + self.lm_head = nn.Linear( + config.word_embed_proj_dim, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.decoder.embed_tokens + + def set_input_embeddings(self, value): + self.model.decoder.embed_tokens = value + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def set_decoder(self, decoder): + self.model.decoder = decoder + + def get_decoder(self): + return self.model.decoder + + @replace_return_docstrings( + output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + query_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + reduction: Optional[str] = 'mean', + ) -> Union[Tuple, CausalLMOutputWithPast]: + r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you + provide it. + + Indices can be obtained using [`OPTTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of + shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of + shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional + tensors are only required when the model is used as a decoder in a Sequence to Sequence model. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the + cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those + that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of + all `decoder_input_ids` of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + + Returns: + + Example: + + ```python + >>> from transformers import GPT2Tokenizer, OPTForCausalLM + + >>> model = OPTForCausalLM.from_pretrained("facebook/opt-350m") + >>> tokenizer = GPT2Tokenizer.from_pretrained("facebook/opt-350m") + + >>> prompt = "Hey, are you consciours? Can you talk to me?" + >>> inputs = tokenizer(prompt, return_tensors="pt") + + >>> # Generate + >>> generate_ids = model.generate(inputs.input_ids, max_length=30) + >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + "Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you." + ```""" + + output_attentions = ( + output_attentions if output_attentions is not None else + self.config.output_attentions) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else + self.config.output_hidden_states) + return_dict = ( + return_dict + if return_dict is not None else self.config.use_return_dict) + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model.decoder( + input_ids=input_ids, + attention_mask=attention_mask, + head_mask=head_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + query_embeds=query_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + logits = self.lm_head(outputs[0]).contiguous() + + loss = None + if labels is not None: + logits = logits[:, -labels.size(1):, :] + + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss(reduction=reduction) + loss = loss_fct( + shift_logits.view(-1, self.config.vocab_size), + shift_labels.view(-1)) + if reduction == 'none': + loss = loss.view(shift_logits.size(0), -1).sum(1) + + if not return_dict: + output = (logits, ) + outputs[1:] + return (loss, ) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def prepare_inputs_for_generation( + self, + input_ids=None, + inputs_embeds=None, + query_embeds=None, + past_key_values=None, + attention_mask=None, + use_cache=None, + **kwargs, + ): + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + if input_ids is not None: + attention_mask = input_ids.new_ones(input_ids.shape) + if past_key_values: + input_ids = input_ids[:, -1:] + query_embeds = None + # first step, decoder_cached_states are empty + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and past_key_values is None: + model_inputs = {'inputs_embeds': inputs_embeds} + else: + model_inputs = {'input_ids': input_ids} + + model_inputs.update({ + 'query_embeds': query_embeds, + 'attention_mask': attention_mask, + 'past_key_values': past_key_values, + 'use_cache': use_cache, + }) + return model_inputs + + @staticmethod + def _reorder_cache(past, beam_idx): + reordered_past = () + for layer_past in past: + reordered_past += (tuple( + past_state.index_select(0, beam_idx) + for past_state in layer_past), ) + return reordered_past diff --git a/mmpretrain/models/multimodal/chinese_clip/__init__.py b/mmpretrain/models/multimodal/chinese_clip/__init__.py new file mode 100644 index 0000000..460e9e6 --- /dev/null +++ b/mmpretrain/models/multimodal/chinese_clip/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .bert import BertModelCN +from .chinese_clip import ChineseCLIP, ModifiedResNet + +__all__ = ['ChineseCLIP', 'ModifiedResNet', 'BertModelCN'] diff --git a/mmpretrain/models/multimodal/chinese_clip/bert.py b/mmpretrain/models/multimodal/chinese_clip/bert.py new file mode 100644 index 0000000..4e8dc73 --- /dev/null +++ b/mmpretrain/models/multimodal/chinese_clip/bert.py @@ -0,0 +1,263 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. + +# flake8: noqa +import math + +import torch +from torch import nn +from torch.utils.checkpoint import checkpoint + +try: + from transformers.models.bert.configuration_bert import BertConfig +except: + BertConfig = None + +from mmpretrain.registry import MODELS +from ..blip.language_model import BertAttention, BertIntermediate, BertOutput + + +def gelu(x): + """Original Implementation of the gelu activation function in Google Bert + repo when initially created. + + For information: OpenAI GPT's gelu is slightly different (and gives + slightly different results): + 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) + Also see https://arxiv.org/abs/1606.08415 + """ # noqa + return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) + + +def gelu_new(x): + """Implementation of the gelu activation function currently in Google Bert + repo (identical to OpenAI GPT) https://arxiv.org/abs/1606.08415.""" + return 0.5 * x * (1 + torch.tanh( + math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) + + +def swish(x): + return x * torch.sigmoid(x) + + +ACT2FN = { + 'gelu': gelu, + 'relu': torch.nn.functional.relu, + 'swish': swish, + 'gelu_new': gelu_new +} + + +class BertEmbeddings(nn.Module): + """Construct the embeddings from word, position and token_type + embeddings.""" + + def __init__(self, config): + super(BertEmbeddings, self).__init__() + self.word_embeddings = nn.Embedding( + config.vocab_size, config.hidden_size, padding_idx=0) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, + config.hidden_size) + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, + config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model + # variable name and be able to load any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm( + config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, input_ids, token_type_ids=None, position_ids=None): + seq_length = input_ids.size(1) + if position_ids is None: + position_ids = torch.arange( + seq_length, dtype=torch.long, device=input_ids.device) + position_ids = position_ids.unsqueeze(0).expand_as(input_ids) + if token_type_ids is None: + token_type_ids = torch.zeros_like(input_ids) + + words_embeddings = self.word_embeddings(input_ids) + position_embeddings = self.position_embeddings(position_ids) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = words_embeddings + position_embeddings \ + + token_type_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class BertLayer(nn.Module): + + def __init__(self, config): + super(BertLayer, self).__init__() + self.attention = BertAttention(config) + self.intermediate = BertIntermediate(config) + self.output = BertOutput(config) + + def forward(self, hidden_states, attention_mask=None, head_mask=None): + attention_outputs = self.attention(hidden_states, attention_mask, + head_mask) + attention_output = attention_outputs[0] + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + outputs = (layer_output, ) + attention_outputs[ + 1:] # add attentions if we output them + if len(outputs) == 1: + return outputs[0] + return outputs + + +class BertEncoder(nn.Module): + + def __init__(self, config): + super(BertEncoder, self).__init__() + self.output_attentions = config.output_attentions + self.output_hidden_states = config.output_hidden_states + self.grad_checkpointing = False + self.layer = nn.ModuleList( + [BertLayer(config) for _ in range(config.num_hidden_layers)]) + + def forward(self, hidden_states, attention_mask=None, head_mask=None): + all_hidden_states = () + all_attentions = () + for i, layer_module in enumerate(self.layer): + if self.output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states, ) + + if self.grad_checkpointing and not torch.jit.is_scripting(): + layer_outputs = checkpoint(layer_module, hidden_states, + attention_mask, head_mask[i]) + else: + layer_outputs = layer_module(hidden_states, attention_mask, + head_mask[i]) + if not isinstance(layer_outputs, tuple): + layer_outputs = (layer_outputs, ) + hidden_states = layer_outputs[0] + + if self.output_attentions: + all_attentions = all_attentions + (layer_outputs[1], ) + + # Add last layer + if self.output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states, ) + + outputs = (hidden_states, ) + if self.output_hidden_states: + outputs = outputs + (all_hidden_states, ) + if self.output_attentions: + outputs = outputs + (all_attentions, ) + # last-layer hidden state, (all hidden states), (all attentions) + return outputs + + +class BertPreTrainedModel(nn.Module): + base_model_prefix = 'bert' + + def __init__(self, config): + super(BertPreTrainedModel, self).__init__() + self.config = config + + def _init_weights(self, module): + """Initialize the weights.""" + if isinstance(module, (nn.Linear, nn.Embedding)): + # Slightly different from the TF version + # which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_( + mean=0.0, std=self.config.initializer_range) + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + +@MODELS.register_module() +class BertModelCN(BertPreTrainedModel): + """The BERT model implementation for Chinese CLIP.""" + + def __init__(self, config): + config = BertConfig.from_dict(config) + super(BertModelCN, self).__init__(config) + + self.embeddings = BertEmbeddings(config) + self.encoder = BertEncoder(config) + + self.apply(self._init_weights) + + @torch.jit.ignore + def set_grad_checkpointing(self, enable=True): + if enable: + assert not self.config.output_attentions, \ + 'Grad checkpointing is currently conflict with ' \ + 'output_attentions for BertEncoder, ' \ + 'please set it to False in BertConfig' + + self.encoder.grad_checkpointing = enable + + def forward(self, + input_ids, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None): + if attention_mask is None: + attention_mask = torch.ones_like(input_ids) + if token_type_ids is None: + token_type_ids = torch.zeros_like(input_ids) + + # We create a 3D attention mask from a 2D tensor mask. + # Sizes are [batch_size, 1, 1, to_seq_length] + # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] + # this attention mask is more simple than the triangular masking of causal attention + # used in OpenAI GPT, we just need to prepare the broadcast dimension here. + extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = extended_attention_mask.to( + dtype=next(self.parameters()).dtype) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + if head_mask is not None: + if head_mask.dim() == 1: + head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze( + -1).unsqueeze(-1) + head_mask = head_mask.expand(self.config.num_hidden_layers, -1, + -1, -1, -1) + elif head_mask.dim() == 2: + head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze( + -1) # We can specify head_mask for each layer + head_mask = head_mask.to(dtype=next(self.parameters( + )).dtype) # switch to fload if need + fp16 compatibility + else: + head_mask = [None] * self.config.num_hidden_layers + + embedding_output = self.embeddings( + input_ids, + position_ids=position_ids, + token_type_ids=token_type_ids) + encoder_outputs = self.encoder( + embedding_output, extended_attention_mask, head_mask=head_mask) + sequence_output = encoder_outputs[0] + # pooled_output = self.pooler(sequence_output) + pooled_output = None + + # add hidden_states and attentions if they are here + outputs = ( + sequence_output, + pooled_output, + ) + encoder_outputs[1:] + + # sequence_output, pooled_output, (hidden_states), (attentions) + return outputs diff --git a/mmpretrain/models/multimodal/chinese_clip/chinese_clip.py b/mmpretrain/models/multimodal/chinese_clip/chinese_clip.py new file mode 100644 index 0000000..40af564 --- /dev/null +++ b/mmpretrain/models/multimodal/chinese_clip/chinese_clip.py @@ -0,0 +1,446 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from collections import OrderedDict +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch +import torch.nn.functional as F +from mmengine.model import BaseModel, BaseModule +from torch import nn + +from mmpretrain.datasets.categories import CIFAR100_CATEGORIES_CN +from mmpretrain.registry import MODELS, TOKENIZER +from mmpretrain.structures import DataSample +from mmpretrain.utils import track_on_main_process +from .utils import OPENAI_PROMPT + +PROTOTYPE_MAP = {'cifar100': CIFAR100_CATEGORIES_CN} +PROMPT_MAP = {'openai': OPENAI_PROMPT} + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1): + super().__init__() + + self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + + self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + + self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity() + + self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * self.expansion) + + self.relu = nn.ReLU(inplace=True) + self.downsample = None + self.stride = stride + + if stride > 1 or inplanes != planes * Bottleneck.expansion: + self.downsample = nn.Sequential( + OrderedDict([('-1', nn.AvgPool2d(stride)), + ('0', + nn.Conv2d( + inplanes, + planes * self.expansion, + 1, + stride=1, + bias=False)), + ('1', nn.BatchNorm2d(planes * self.expansion))])) + + def forward(self, x: torch.Tensor): + identity = x + + out = self.relu(self.bn1(self.conv1(x))) + out = self.relu(self.bn2(self.conv2(out))) + out = self.avgpool(out) + out = self.bn3(self.conv3(out)) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + return out + + +class AttentionPool2d(nn.Module): + + def __init__(self, + spacial_dim: int, + embed_dim: int, + num_heads: int, + output_dim: int = None): + super().__init__() + self.positional_embedding = nn.Parameter( + torch.randn(spacial_dim**2 + 1, embed_dim) / embed_dim**0.5) + self.k_proj = nn.Linear(embed_dim, embed_dim) + self.q_proj = nn.Linear(embed_dim, embed_dim) + self.v_proj = nn.Linear(embed_dim, embed_dim) + self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim) + self.num_heads = num_heads + + def forward(self, x): + x = x.reshape(x.shape[0], x.shape[1], + x.shape[2] * x.shape[3]).permute(2, 0, + 1) # NCHW -> (HW)NC + x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC + x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC + x, _ = F.multi_head_attention_forward( + query=x, + key=x, + value=x, + embed_dim_to_check=x.shape[-1], + num_heads=self.num_heads, + q_proj_weight=self.q_proj.weight, + k_proj_weight=self.k_proj.weight, + v_proj_weight=self.v_proj.weight, + in_proj_weight=None, + in_proj_bias=torch.cat( + [self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]), + bias_k=None, + bias_v=None, + add_zero_attn=False, + dropout_p=0, + out_proj_weight=self.c_proj.weight, + out_proj_bias=self.c_proj.bias, + use_separate_proj_weight=True, + training=self.training, + need_weights=False) + + return x[0] + + +@MODELS.register_module() +class ModifiedResNet(BaseModule): + """A modified ResNet contains the following changes: + + - Apply deep stem with an average pool instead of a max pool. + - Performs anti-aliasing strided convolutions, where an avgpool is + prepended to convolutions with stride > 1 + - The final pooling layer is a QKV attention instead of an average pool + """ # noqa + + arch_settings = { + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, + depth: int = 50, + base_channels: int = 64, + input_size: int = 224, + num_attn_heads: int = 32, + output_dim: int = 1024, + init_cfg: Optional[dict] = None): + super().__init__(init_cfg=init_cfg) + self.input_size = input_size + self.block, stage_blocks = self.arch_settings[depth] + + # the 3-layer stem + self.conv1 = nn.Conv2d( + 3, + base_channels // 2, + kernel_size=3, + stride=2, + padding=1, + bias=False) + self.bn1 = nn.BatchNorm2d(base_channels // 2) + self.conv2 = nn.Conv2d( + base_channels // 2, + base_channels // 2, + kernel_size=3, + padding=1, + bias=False) + self.bn2 = nn.BatchNorm2d(base_channels // 2) + self.conv3 = nn.Conv2d( + base_channels // 2, + base_channels, + kernel_size=3, + padding=1, + bias=False) + self.bn3 = nn.BatchNorm2d(base_channels) + self.avgpool = nn.AvgPool2d(2) + self.relu = nn.ReLU(inplace=True) + + # residual layers + # this is a *mutable* variable used during construction + self._inplanes = base_channels + self.layer1 = self._make_layer(base_channels, stage_blocks[0]) + self.layer2 = self._make_layer( + base_channels * 2, stage_blocks[1], stride=2) + self.layer3 = self._make_layer( + base_channels * 4, stage_blocks[2], stride=2) + self.layer4 = self._make_layer( + base_channels * 8, stage_blocks[3], stride=2) + + embed_dim = base_channels * 32 + self.attnpool = AttentionPool2d(input_size // 32, embed_dim, + num_attn_heads, output_dim) + + def _make_layer(self, planes, blocks, stride=1): + layers = [Bottleneck(self._inplanes, planes, stride)] + + self._inplanes = planes * Bottleneck.expansion + for _ in range(1, blocks): + layers.append(Bottleneck(self._inplanes, planes)) + + return nn.Sequential(*layers) + + def forward(self, x): + + def stem(x): + for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), + (self.conv3, self.bn3)]: + x = self.relu(bn(conv(x))) + x = self.avgpool(x) + return x + + x = x.type(self.conv1.weight.dtype) + x = stem(x) + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + x = self.attnpool(x) + + return x + + +@MODELS.register_module() +class ChineseCLIP(BaseModel): + """The implementation of `ChineseCLIP `_. + + Args: + vision_backbone (dict): Config dict for vision backbone. + text_backbone (dict): Config dict for text backbone. + tokenizer (dict): Config dict for text tokenizer. + proj_dim (int): Projection dimension for similarity computation. + text_prototype (str): Text prototype, which can be a key in + `PROTOTYPE_MAP` or list of text. + text_prompt (str): The prompt for text prototype. Defaults to 'openai'. + context_length (int): The context length to use. Defaults to 52. + data_preprocessor (Union[dict, nn.Module], optional): The config for + preprocessing input data. If None or no specified type, it will use + "MultiModalDataPreprocessor" as type. + See :class:`MultiModalDataPreprocessor` for more details. + Defaults to None. + init_cfg (dict, optional): The config to control the initialization. + Defaults to None. + """ + + def __init__(self, + vision_backbone: dict, + text_backbone: dict, + tokenizer: dict, + proj_dim: int, + text_prototype: Union[str, List[str]], + text_prompt: str = 'openai', + context_length: int = 52, + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[dict] = None): + if data_preprocessor is None: + data_preprocessor = {} + data_preprocessor.setdefault('type', 'MultiModalDataPreprocessor') + data_preprocessor = MODELS.build(data_preprocessor) + + super().__init__( + data_preprocessor=data_preprocessor, init_cfg=init_cfg) + + self.vision_backbone = MODELS.build(vision_backbone) + self.text_backbone = MODELS.build(text_backbone) + + if not isinstance(self.vision_backbone, ModifiedResNet): + self.vision_projection = nn.Parameter( + torch.empty(self.vision_backbone.embed_dims, proj_dim)) + text_hidden_size = text_backbone['config']['hidden_size'] + self.text_projection = nn.Parameter( + torch.empty(text_hidden_size, proj_dim)) + + self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) + + self.tokenizer = TOKENIZER.build(tokenizer) + self.context_length = context_length + + # for zero-shot classification + if isinstance(text_prototype, + str) and text_prototype in PROTOTYPE_MAP.keys(): + self.prototype = PROTOTYPE_MAP[text_prototype] + else: + self.prototype = text_prototype + self.text_prototype_embeds = None + + self.prompt = PROMPT_MAP[text_prompt] + + def forward( + self, + images: torch.Tensor, + data_samples: Optional[list] = None, + mode: str = 'predict', + **kwargs, + ): + """The unified entry for a forward process in both training and test. + The method accepts the following modes: + + - "predict": Forward and return a list of data samples contain the + predict results. + + Args: + images (torch.Tensor): the preprocessed image tensor of shape + ``(N, C, H, W)``. + data_samples (List[DataSample], optional): The annotation data + of every samples. Defaults to None. + mode (str): Return what kind of value. Defaults to 'predict'. + """ + if mode == 'predict': + return self.predict(images, data_samples, **kwargs) + else: + raise RuntimeError(f'Invalid mode "{mode}".') + + def extract_image_feat(self, images: torch.Tensor) -> torch.Tensor: + """The function to extract image latent features.""" + if isinstance(self.vision_backbone, ModifiedResNet): + return self.vision_backbone(images) + return self.vision_backbone(images)[-1] @ self.vision_projection + + def extract_text_feat(self, texts: torch.Tensor) -> torch.Tensor: + """The function to extract text latent features.""" + pad_index = self.tokenizer.vocab['[PAD]'] + attn_mask = texts.ne(pad_index) + # [batch_size, seq_length, hidden_size] + x = self.text_backbone(texts, attention_mask=attn_mask)[0] + return x[:, 0, :] @ self.text_projection + + def extract_feat( + self, images: torch.Tensor, + texts: torch.Tensor) -> Union[torch.Tensor, Tuple[torch.Tensor]]: + """The function to extract image and text latent features, the input + image or text can not both be None.""" + + assert images is not None or texts is not None, \ + 'text and image cannot both be None!' + if images is None: + return self.extract_text_feat(texts) + elif texts is None: + return self.extract_image_feat(images) + + image_features = self.extract_image_feat(images) + text_features = self.extract_text_feat(texts) + + image_features = image_features / image_features.norm( + dim=-1, keepdim=True) + text_features = text_features / text_features.norm( + dim=-1, keepdim=True) + + return image_features, text_features + + def compute_similarity(self, images, texts): + """Extract images and texts features and compute cosine similarity.""" + image_features, text_features = self.extract_feat( + images=images, texts=texts) + + # cosine similarity as logits + logit_scale = self.logit_scale.exp() + logits_per_image = logit_scale * image_features @ text_features.t() + logits_per_text = logits_per_image.t() + + # shape (N, N) + return logits_per_image, logits_per_text + + def predict(self, + images: torch.Tensor, + data_samples: DataSample = None) -> DataSample: + """Predict the classes of the input images. + + The prediction is for zero-shot classification and the text prototypes + will be prepared in thisfunction. + + Args: + images (torch.Tensor): The input images. + data_samples (DataSample): The data samples with information from + dataset. + + Returns: + DataSample: The results of prediction. + """ + + if self.text_prototype_embeds is None: + self.prepare_text_prototype(device=images.device) + + image_features = self.extract_image_feat(images=images) + image_features /= image_features.norm(dim=-1, keepdim=True) + + # cosine similarity as logits + logits_per_image = image_features @ self.text_prototype_embeds.to( + image_features.device) * self.logit_scale.exp() + + pred_scores = F.softmax(logits_per_image, dim=1) + pred_labels = pred_scores.argmax(dim=1, keepdim=True).detach() + + out_data_samples = [] + if data_samples is None: + data_samples = [None for _ in range(pred_scores.size(0))] + + for data_sample, score, label in zip(data_samples, pred_scores, + pred_labels): + if data_sample is None: + data_sample = DataSample() + + data_sample.set_pred_score(score).set_pred_label(label) + out_data_samples.append(data_sample) + return out_data_samples + + def prepare_text_prototype(self, device) -> None: + """The function to prepare text prototypes with prompt.""" + class_embeddings = [] + for classname in track_on_main_process(self.prototype, + 'Prepare text prototype...'): + # format with class + texts = [prompt(classname) for prompt in self.prompt] + tokenized_texts = self.tokenize(texts) + class_features = self.extract_text_feat(tokenized_texts.to(device)) + class_features /= class_features.norm(dim=-1, keepdim=True) + class_feature = class_features.mean(dim=0) + class_feature /= class_feature.norm() + class_embeddings.append(class_feature) + self.text_prototype_embeds = torch.stack( + class_embeddings, dim=1).to(device) + + def tokenize(self, texts: Union[str, List[str]]) -> torch.LongTensor: + """Returns the tokenized representation of given input string(s) + + Args: + texts (Union[str, List[str]]): An input string or a list of input + strings to tokenize + context_length (int): The context length to use. Defaults to 52. + + Returns: + torch.Tensor: Resulting tokens. + """ + if isinstance(texts, str): + texts = [texts] + + all_tokens = [] + for text in texts: + # adapt the text to Chinese BERT vocab + text = text.lower().replace('“', "\"").replace('”', "\"") + + # add special tokens + all_tokens.append( + [self.tokenizer.vocab['[CLS]']] + + self.tokenizer.convert_tokens_to_ids( + self.tokenizer.tokenize(text))[:self.context_length - 2] + + [self.tokenizer.vocab['[SEP]']]) + + result = torch.zeros( + len(all_tokens), self.context_length, dtype=torch.long) + + for i, tokens in enumerate(all_tokens): + assert len(tokens) <= self.context_length + result[i, :len(tokens)] = torch.tensor(tokens) + + return result diff --git a/mmpretrain/models/multimodal/chinese_clip/utils.py b/mmpretrain/models/multimodal/chinese_clip/utils.py new file mode 100644 index 0000000..6964722 --- /dev/null +++ b/mmpretrain/models/multimodal/chinese_clip/utils.py @@ -0,0 +1,186 @@ +# Copyright (c) OpenMMLab. All rights reserved. +OPENAI_PROMPT = [ + lambda c: f'{c}的照片', + lambda c: f'质量差的{c}的照片', + lambda c: f'许多{c}的照片', + lambda c: f'{c}的雕塑', + lambda c: f'难以看到{c}的照片', + lambda c: f'{c}的低分辨率照片', + lambda c: f'{c}的渲染', + lambda c: f'涂鸦{c}', + lambda c: f'{c}的糟糕照片', + lambda c: f'{c}的裁剪照片', + lambda c: f'{c}的纹身', + lambda c: f'{c}的刺绣照片', + lambda c: f'很难看到{c}的照片', + lambda c: f'{c}的明亮照片', + lambda c: f'一张干净的{c}的照片', + lambda c: f'一张包含{c}的照片', + lambda c: f'{c}的深色照片', + lambda c: f'{c}的手绘画', + lambda c: f'我的{c}的照片', + lambda c: f'不自然的{c}的照片', + lambda c: f'一张酷的{c}的照片', + lambda c: f'{c}的特写照片', + lambda c: f'{c}的黑白照片', + lambda c: f'一幅{c}的画', + lambda c: f'一幅{c}的绘画', + lambda c: f'一张{c}的像素照片', + lambda c: f'{c}的雕像', + lambda c: f'一张{c}的明亮照片', + lambda c: f'{c}的裁剪照片', + lambda c: f'人造的{c}的照片', + lambda c: f'一张关于{c}的照片', + lambda c: f'损坏的{c}的jpeg照片', + lambda c: f'{c}的模糊照片', + lambda c: f'{c}的相片', + lambda c: f'一张{c}的好照片', + lambda c: f'{c}的渲染照', + lambda c: f'视频游戏中的{c}', + lambda c: f'一张{c}的照片', + lambda c: f'{c}的涂鸦', + lambda c: f'{c}的近距离照片', + lambda c: f'{c}的折纸', + lambda c: f'{c}在视频游戏中', + lambda c: f'{c}的草图', + lambda c: f'{c}的涂鸦照', + lambda c: f'{c}的折纸形状', + lambda c: f'低分辨率的{c}的照片', + lambda c: f'玩具{c}', + lambda c: f'{c}的副本', + lambda c: f'{c}的干净的照片', + lambda c: f'一张大{c}的照片', + lambda c: f'{c}的重现', + lambda c: f'一张漂亮的{c}的照片', + lambda c: f'一张奇怪的{c}的照片', + lambda c: f'模糊的{c}的照片', + lambda c: f'卡通{c}', + lambda c: f'{c}的艺术作品', + lambda c: f'{c}的素描', + lambda c: f'刺绣{c}', + lambda c: f'{c}的像素照', + lambda c: f'{c}的拍照', + lambda c: f'{c}的损坏的照片', + lambda c: f'高质量的{c}的照片', + lambda c: f'毛绒玩具{c}', + lambda c: f'漂亮的{c}的照片', + lambda c: f'小{c}的照片', + lambda c: f'照片是奇怪的{c}', + lambda c: f'漫画{c}', + lambda c: f'{c}的艺术照', + lambda c: f'{c}的图形', + lambda c: f'大{c}的照片', + lambda c: f'黑白的{c}的照片', + lambda c: f'{c}毛绒玩具', + lambda c: f'一张{c}的深色照片', + lambda c: f'{c}的摄影图', + lambda c: f'{c}的涂鸦照', + lambda c: f'玩具形状的{c}', + lambda c: f'拍了{c}的照片', + lambda c: f'酷酷的{c}的照片', + lambda c: f'照片里的小{c}', + lambda c: f'{c}的刺青', + lambda c: f'{c}的可爱的照片', + lambda c: f'一张{c}可爱的照片', + lambda c: f'{c}可爱图片', + lambda c: f'{c}酷炫图片', + lambda c: f'一张{c}的酷炫的照片', + lambda c: f'一张{c}的酷炫图片', + lambda c: f'这是{c}', + lambda c: f'{c}的好看照片', + lambda c: f'一张{c}的好看的图片', + lambda c: f'{c}的好看图片', + lambda c: f'{c}的照片。', + lambda c: f'质量差的{c}的照片。', + lambda c: f'许多{c}的照片。', + lambda c: f'{c}的雕塑。', + lambda c: f'难以看到{c}的照片。', + lambda c: f'{c}的低分辨率照片。', + lambda c: f'{c}的渲染。', + lambda c: f'涂鸦{c}。', + lambda c: f'{c}的糟糕照片。', + lambda c: f'{c}的裁剪照片。', + lambda c: f'{c}的纹身。', + lambda c: f'{c}的刺绣照片。', + lambda c: f'很难看到{c}的照片。', + lambda c: f'{c}的明亮照片。', + lambda c: f'一张干净的{c}的照片。', + lambda c: f'一张包含{c}的照片。', + lambda c: f'{c}的深色照片。', + lambda c: f'{c}的手绘画。', + lambda c: f'我的{c}的照片。', + lambda c: f'不自然的{c}的照片。', + lambda c: f'一张酷的{c}的照片。', + lambda c: f'{c}的特写照片。', + lambda c: f'{c}的黑白照片。', + lambda c: f'一幅{c}的画。', + lambda c: f'一幅{c}的绘画。', + lambda c: f'一张{c}的像素照片。', + lambda c: f'{c}的雕像。', + lambda c: f'一张{c}的明亮照片。', + lambda c: f'{c}的裁剪照片。', + lambda c: f'人造的{c}的照片。', + lambda c: f'一张关于{c}的照片。', + lambda c: f'损坏的{c}的jpeg照片。', + lambda c: f'{c}的模糊照片。', + lambda c: f'{c}的相片。', + lambda c: f'一张{c}的好照片。', + lambda c: f'{c}的渲染照。', + lambda c: f'视频游戏中的{c}。', + lambda c: f'一张{c}的照片。', + lambda c: f'{c}的涂鸦。', + lambda c: f'{c}的近距离照片。', + lambda c: f'{c}的折纸。', + lambda c: f'{c}在视频游戏中。', + lambda c: f'{c}的草图。', + lambda c: f'{c}的涂鸦照。', + lambda c: f'{c}的折纸形状。', + lambda c: f'低分辨率的{c}的照片。', + lambda c: f'玩具{c}。', + lambda c: f'{c}的副本。', + lambda c: f'{c}的干净的照片。', + lambda c: f'一张大{c}的照片。', + lambda c: f'{c}的重现。', + lambda c: f'一张漂亮的{c}的照片。', + lambda c: f'一张奇怪的{c}的照片。', + lambda c: f'模糊的{c}的照片。', + lambda c: f'卡通{c}。', + lambda c: f'{c}的艺术作品。', + lambda c: f'{c}的素描。', + lambda c: f'刺绣{c}。', + lambda c: f'{c}的像素照。', + lambda c: f'{c}的拍照。', + lambda c: f'{c}的损坏的照片。', + lambda c: f'高质量的{c}的照片。', + lambda c: f'毛绒玩具{c}。', + lambda c: f'漂亮的{c}的照片。', + lambda c: f'小{c}的照片。', + lambda c: f'照片是奇怪的{c}。', + lambda c: f'漫画{c}。', + lambda c: f'{c}的艺术照。', + lambda c: f'{c}的图形。', + lambda c: f'大{c}的照片。', + lambda c: f'黑白的{c}的照片。', + lambda c: f'{c}毛绒玩具。', + lambda c: f'一张{c}的深色照片。', + lambda c: f'{c}的摄影图。', + lambda c: f'{c}的涂鸦照。', + lambda c: f'玩具形状的{c}。', + lambda c: f'拍了{c}的照片。', + lambda c: f'酷酷的{c}的照片。', + lambda c: f'照片里的小{c}。', + lambda c: f'{c}的刺青。', + lambda c: f'{c}的可爱的照片。', + lambda c: f'一张{c}可爱的照片。', + lambda c: f'{c}可爱图片。', + lambda c: f'{c}酷炫图片。', + lambda c: f'一张{c}的酷炫的照片。', + lambda c: f'一张{c}的酷炫图片。', + lambda c: f'这是{c}。', + lambda c: f'{c}的好看照片。', + lambda c: f'一张{c}的好看的图片。', + lambda c: f'{c}的好看图片。', + lambda c: f'一种叫{c}的花的照片', + lambda c: f'一种叫{c}的食物的照片', + lambda c: f'{c}的卫星照片', +] diff --git a/mmpretrain/models/multimodal/clip/__init__.py b/mmpretrain/models/multimodal/clip/__init__.py new file mode 100644 index 0000000..f7a117e --- /dev/null +++ b/mmpretrain/models/multimodal/clip/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ..clip.clip import CLIP, CLIPZeroShot +from ..clip.clip_transformer import CLIPProjection, CLIPTransformer + +__all__ = ['CLIP', 'CLIPZeroShot', 'CLIPTransformer', 'CLIPProjection'] diff --git a/mmpretrain/models/multimodal/clip/clip.py b/mmpretrain/models/multimodal/clip/clip.py new file mode 100644 index 0000000..b509a63 --- /dev/null +++ b/mmpretrain/models/multimodal/clip/clip.py @@ -0,0 +1,364 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import abstractmethod +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch +import torch.nn.functional as F +from mmengine.model import BaseModel +from torch import nn + +from mmpretrain.datasets.categories import (CIFAR100_CATEGORIES, + IMAGENET_SIMPLE_CATEGORIES) +from mmpretrain.registry import MODELS, TOKENIZER +from mmpretrain.structures import DataSample +from mmpretrain.utils import track_on_main_process +from .utils import (OPENAI_CIFAR100_PROMPT, OPENAI_IMAGENET_PROMPT, + OPENAI_IMAGENET_PROMPT_SUB) + +CIFAR100_CATEGORIES = [' '.join(c.split('_')) for c in CIFAR100_CATEGORIES] +PROTOTYPE_MAP = { + 'imagenet': IMAGENET_SIMPLE_CATEGORIES, + 'cifar100': CIFAR100_CATEGORIES, +} +PROMPT_MAP = { + 'openai_imagenet': OPENAI_IMAGENET_PROMPT, + 'openai_cifar100': OPENAI_CIFAR100_PROMPT, + 'vanilla': [lambda c: f'a photo of a {c}'], + 'openai_imagenet_sub': OPENAI_IMAGENET_PROMPT_SUB +} + + +class LayerNorm(nn.LayerNorm): + """Subclass torch's LayerNorm to handle fp16.""" + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Forward function.""" + orig_type = x.dtype + ret = super().forward(x.type(torch.float32)) + return ret.type(orig_type) + + +class CLIP(BaseModel): + """The implementation of `CLIP `_. + + Args: + vision_backbone (dict): Config dict for vision backbone. + text_backbone (dict): Config dict for text backbone. + tokenizer (dict): Config dict for text tokenizer. + proj_dim (int): Projection dimension for similarity computation. + text_prototype (str): Text prototype, which can be a key in + `PROTOTYPE_MAP` or list of text. + text_prompt (str): The prompt for text prototype. + Defaults to 'vanilla',which refers to "a photo of {cls}". + context_length (int): The context length to use. Defaults to 77. + data_preprocessor (Union[dict, nn.Module], optional): The config for + preprocessing input data. If None or no specified type, it will use + "MultiModalDataPreprocessor" as type. + See :class:`MultiModalDataPreprocessor` for more details. + Defaults to None. + init_cfg (dict, optional): The config to control the initialization. + Defaults to None. + """ + + def __init__(self, + vision_backbone: dict, + projection: dict, + text_backbone: dict, + tokenizer: dict, + vocab_size: int, + transformer_width: int, + proj_dim: int, + context_length: int = 77, + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[dict] = None): + if data_preprocessor is None: + data_preprocessor = {} + data_preprocessor.setdefault('type', 'MultiModalDataPreprocessor') + data_preprocessor = MODELS.build(data_preprocessor) + + super().__init__( + data_preprocessor=data_preprocessor, init_cfg=init_cfg) + + self.context_length = context_length + + # build the vision transformer + self.visual = MODELS.build(vision_backbone) + + # build the visual projection + self.visual_proj = MODELS.build(projection) + + # build attn_mask for casual-attn + text_backbone['attn_mask'] = self.build_attention_mask() + + # build the text transformer + self.transformer = MODELS.build(text_backbone) + + self.vocab_size = vocab_size + self.token_embedding = nn.Embedding(vocab_size, transformer_width) + self.positional_embedding = nn.Parameter( + torch.empty(self.context_length, transformer_width)) + self.ln_final = LayerNorm(transformer_width) + + self.text_projection = nn.Parameter( + torch.empty(transformer_width, proj_dim)) + self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) + + self.initialize_parameters() + + self.tokenizer = TOKENIZER.build(tokenizer) + + self.tokenizer.vocab = self.tokenizer.get_vocab( + ) # CLIPTokenizer has no attribute named 'vocab', so manually + + def initialize_parameters(self) -> None: + """Initialize the parameters. + + The pretrained weight will override the initialized parameters by this + function. + """ + nn.init.normal_(self.token_embedding.weight, std=0.02) + nn.init.normal_(self.positional_embedding, std=0.01) + + proj_std = (self.transformer.width**-0.5) * ( + (2 * self.transformer.layers)**-0.5) + attn_std = self.transformer.width**-0.5 + fc_std = (2 * self.transformer.width)**-0.5 + for block in self.transformer.resblocks: + nn.init.normal_(block.attn.in_proj_weight, std=attn_std) + nn.init.normal_(block.attn.out_proj.weight, std=proj_std) + nn.init.normal_(block.mlp.c_fc.weight, std=fc_std) + nn.init.normal_(block.mlp.c_proj.weight, std=proj_std) + + if self.text_projection is not None: + nn.init.normal_( + self.text_projection, std=self.transformer.width**-0.5) + + def build_attention_mask(self): + # lazily create causal attention mask, + # with full attention between the vision tokens + # pytorch uses additive attention mask; fill with -inf + mask = torch.empty(self.context_length, self.context_length) + mask.fill_(float('-inf')) + mask.triu_(1) # zero out the lower diagonal + return mask + + def forward( + self, + images: torch.Tensor, + data_samples: Optional[list] = None, + mode: str = 'predict', + **kwargs, + ): + """The unified entry for a forward process in both training and test. + The method accepts the following modes: + + - "predict": Forward and return a list of data samples contain the + predict results. + + Args: + images (torch.Tensor): the preprocessed image tensor of shape + ``(N, C, H, W)``. + data_samples (List[DataSample], optional): The annotation data + of every samples. Defaults to None. + mode (str): Return what kind of value. Defaults to 'predict'. + """ + if mode == 'predict': + return self.predict(images, data_samples, **kwargs) + else: + raise RuntimeError(f'Invalid mode "{mode}".') + + def extract_image_feat(self, images: torch.Tensor) -> torch.Tensor: + """The function to extract image latent features.""" + return self.visual_proj(self.visual(images))[0] + + def extract_text_feat(self, texts: torch.Tensor) -> torch.Tensor: + """The function to extract text latent features.""" + x = self.token_embedding(texts) # [batch_size, n_ctx, d_model] + + x = x + self.positional_embedding + x = x.permute(1, 0, 2) # NLD -> LND + x = self.transformer(x)[0] + + x = x.permute(1, 0, 2) # LND -> NLD + x = self.ln_final(x) + + # x.shape = [batch_size, n_ctx, transformer.width] + # take features from the eot embedding + # (eot_token is the highest number in each sequence) + x = x[torch.arange(x.shape[0]), + texts.argmax(dim=-1)] @ self.text_projection + + return x + + def extract_feat( + self, images: torch.Tensor, + texts: torch.Tensor) -> Union[torch.Tensor, Tuple[torch.Tensor]]: + """The function to extract image and text latent features, the input + image or text can not both be None.""" + + assert images is not None or texts is not None, \ + 'text and image cannot both be None!' + if images is None: + return self.extract_text_feat(texts) + elif texts is None: + return self.extract_image_feat(images) + + image_features = self.extract_image_feat(images) + text_features = self.extract_text_feat(texts) + + image_features = image_features / image_features.norm( + dim=-1, keepdim=True) + text_features = text_features / text_features.norm( + dim=-1, keepdim=True) + + return image_features, text_features + + def compute_similarity(self, images, texts): + """Extract images and texts features and compute cosine similarity.""" + image_features, text_features = self.extract_feat( + images=images, texts=texts) + + # cosine similarity as logits + logit_scale = self.logit_scale.exp() + logits_per_image = logit_scale * image_features @ text_features.t() + logits_per_text = logits_per_image.t() + + # shape (N, N) + return logits_per_image, logits_per_text + + @abstractmethod + def predict(self, + images: torch.Tensor, + data_samples: DataSample = None) -> DataSample: + raise NotImplementedError + + def tokenize(self, texts: Union[str, List[str]]) -> torch.LongTensor: + """Returns the tokenized representation of given input string(s) + + Args: + texts (Union[str, List[str]]): An input string or a list of input + strings to tokenize + context_length (int): The context length to use. Defaults to 52. + + Returns: + torch.Tensor: Resulting tokens. + """ + if isinstance(texts, str): + texts = [texts] + + all_tokens = [] + for text in texts: + # adapt the text to Chinese BERT vocab + # text = text.lower().replace('“', "\"").replace('”', "\"") + + # add special tokens + all_tokens.append( + [self.tokenizer.vocab['<|startoftext|>'] + ] + # <|startoftext|>代表[CLS] token + self.tokenizer.convert_tokens_to_ids( + self.tokenizer.tokenize(text))[:self.context_length - 2] + + [self.tokenizer.vocab['<|endoftext|>']]) + + result = torch.zeros( + len(all_tokens), self.context_length, dtype=torch.long) + + for i, tokens in enumerate(all_tokens): + assert len(tokens) <= self.context_length + result[i, :len(tokens)] = torch.tensor(tokens) + + return result + + +@MODELS.register_module() +class CLIPZeroShot(CLIP): + + def __init__( + self, + vision_backbone: dict, + projection: dict, + text_backbone: dict, + tokenizer: dict, + vocab_size: int, + transformer_width: int, + proj_dim: int, + context_length: int = 77, + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[dict] = None, + text_prototype: Union[str, List[str]] = 'imagenet', + text_prompt: str = 'vanilla', + ): + super(CLIPZeroShot, + self).__init__(vision_backbone, projection, text_backbone, + tokenizer, vocab_size, transformer_width, + proj_dim, context_length, data_preprocessor, + init_cfg) + + # for zero-shot classification + if isinstance(text_prototype, + str) and text_prototype in PROTOTYPE_MAP.keys(): + self.prototype = PROTOTYPE_MAP[text_prototype] + else: + self.prototype = text_prototype + self.text_prototype_embeds = None + + self.prompt = PROMPT_MAP[text_prompt] + + def predict(self, + images: torch.Tensor, + data_samples: DataSample = None) -> DataSample: + """Predict the classes of the input images. + + The prediction is for zero-shot classification and the text prototypes + will be prepared in thisfunction. + + Args: + images (torch.Tensor): The input images. + data_samples (DataSample): The data samples with information from + dataset. + + Returns: + DataSample: The results of prediction. + """ + + if self.text_prototype_embeds is None: + self.prepare_text_prototype(device=images.device) + + image_features = self.extract_image_feat(images=images) + image_features /= image_features.norm(dim=-1, keepdim=True) + + # cosine similarity as logits + logits_per_image = image_features @ self.text_prototype_embeds.to( + image_features.device) * self.logit_scale.exp() + + pred_scores = F.softmax(logits_per_image, dim=1) + pred_labels = pred_scores.argmax(dim=1, keepdim=True).detach() + + out_data_samples = [] + if data_samples is None: + data_samples = [None for _ in range(pred_scores.size(0))] + + for data_sample, score, label in zip(data_samples, pred_scores, + pred_labels): + if data_sample is None: + data_sample = DataSample() + + data_sample.set_pred_score(score).set_pred_label(label) + out_data_samples.append(data_sample) + return out_data_samples + + def prepare_text_prototype(self, device) -> None: + """The function to prepare text prototypes with prompt.""" + class_embeddings = [] + for classname in track_on_main_process(self.prototype, + 'Prepare text prototype...'): + # format with class + texts = [prompt(classname) for prompt in self.prompt] + tokenized_texts = self.tokenize(texts) + class_features = self.extract_text_feat(tokenized_texts.to(device)) + class_features /= class_features.norm(dim=-1, keepdim=True) + class_feature = class_features.mean(dim=0) + class_feature /= class_feature.norm() + class_embeddings.append(class_feature) + self.text_prototype_embeds = torch.stack( + class_embeddings, dim=1).to(device) diff --git a/mmpretrain/models/multimodal/clip/clip_transformer.py b/mmpretrain/models/multimodal/clip/clip_transformer.py new file mode 100644 index 0000000..4b5f766 --- /dev/null +++ b/mmpretrain/models/multimodal/clip/clip_transformer.py @@ -0,0 +1,99 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Modified from https://github.com/zejiangh/MILAN +from typing import Optional, Tuple + +import torch +from mmengine.model import BaseModule +from torch import nn + +from mmpretrain.models.utils.clip_generator_helper import \ + ResidualAttentionBlock +from mmpretrain.registry import MODELS + + +@MODELS.register_module() +class CLIPTransformer(nn.Module): + """Transformer. + + Both visual and text branches use this transformer. + + Args: + width (int): The feature dimension. + layers (int): The number of layers. + heads (int): The number of attention heads. + attn_mask (torch.Tensor, optional): The attention mask. + """ + + def __init__(self, + width: int, + layers: int, + heads: int, + attn_mask: Optional[torch.Tensor] = None) -> None: + super().__init__() + self.width = width + self.layers = layers + self.resblocks = nn.ModuleList() + for _ in range(layers - 1): + self.resblocks.append( + ResidualAttentionBlock(width, heads, attn_mask)) + self.resblocks.append( + ResidualAttentionBlock( + width, heads, attn_mask, return_attention=True)) + + def forward( + self, x: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Forward function.""" + z = [] + for idx, blk in enumerate(self.resblocks): + if idx < self.layers - 1: + x = blk(x) + z.append(x.permute(1, 0, 2)) + else: + x, attention = blk(x) + z.append(x.permute(1, 0, 2)) + return x, attention, z + + +@MODELS.register_module() +class CLIPProjection(BaseModule): + """Neck with CLIP Projection. + + Args: + in_channels (int): Number of channels in the input. + out_channels (int): Number of channels in the output. + init_cfg (dict | list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + in_channels: int, + out_channels: int, + init_cfg: Optional[dict] = None): + super(CLIPProjection, self).__init__(init_cfg=init_cfg) + + self.in_channels = in_channels + self.out_channels = out_channels + scale = in_channels**-0.5 + self.proj = nn.Parameter(scale * + torch.randn(in_channels, out_channels)) + + def forward(self, inputs: Tuple) -> Tuple[torch.Tensor]: + """forward function. + + Args: + inputs (Tuple): The features extracted from + the backbone. Multiple stage inputs are acceptable but only + the last stage will be used. + Returns: + Tuple(torch.Tensor)): A tuple of reducted features. + """ + if isinstance(inputs, tuple): + inputs = inputs[-1] + out = inputs @ self.proj + elif isinstance(inputs, torch.Tensor): + out = inputs @ self.proj + else: + raise TypeError( + '`CLIPProjection` neck inputs should be tuple or torch.tensor') + return (out, ) diff --git a/mmpretrain/models/multimodal/clip/utils.py b/mmpretrain/models/multimodal/clip/utils.py new file mode 100644 index 0000000..65239bc --- /dev/null +++ b/mmpretrain/models/multimodal/clip/utils.py @@ -0,0 +1,115 @@ +# Copyright (c) OpenMMLab. All rights reserved. + +OPENAI_CIFAR100_PROMPT = [ + lambda c: f'a photo of a {c}.', + lambda c: f'a blurry photo of a {c}.', + lambda c: f'a black and white photo of a {c}.', + lambda c: f'a low contrast photo of a {c}.', + lambda c: f'a high contrast photo of a {c}.', + lambda c: f'a bad photo of a {c}.', + lambda c: f'a good photo of a {c}.', + lambda c: f'a photo of a small {c}.', + lambda c: f'a photo of a big {c}.', + lambda c: f'a photo of the {c}.', + lambda c: f'a blurry photo of the {c}.', + lambda c: f'a black and white photo of the {c}.', + lambda c: f'a low contrast photo of the {c}.', + lambda c: f'a high contrast photo of the {c}.', + lambda c: f'a bad photo of the {c}.', + lambda c: f'a good photo of the {c}.', + lambda c: f'a photo of the small {c}.', + lambda c: f'a photo of the big {c}.', +] + +OPENAI_IMAGENET_PROMPT_SUB = [ + lambda c: f'itap of a {c}.', + lambda c: f'a bad photo of the {c}.', + lambda c: f'a origami {c}.', + lambda c: f'a photo of the large {c}.', + lambda c: f'a {c} in a video game.', + lambda c: f'art of the {c}.', + lambda c: f'a photo of the small {c}.', +] + +OPENAI_IMAGENET_PROMPT = [ + lambda c: f'a bad photo of a {c}.', + lambda c: f'a photo of many {c}.', + lambda c: f'a sculpture of a {c}.', + lambda c: f'a photo of the hard to see {c}.', + lambda c: f'a low resolution photo of the {c}.', + lambda c: f'a rendering of a {c}.', + lambda c: f'graffiti of a {c}.', + lambda c: f'a bad photo of the {c}.', + lambda c: f'a cropped photo of the {c}.', + lambda c: f'a tattoo of a {c}.', + lambda c: f'the embroidered {c}.', + lambda c: f'a photo of a hard to see {c}.', + lambda c: f'a bright photo of a {c}.', + lambda c: f'a photo of a clean {c}.', + lambda c: f'a photo of a dirty {c}.', + lambda c: f'a dark photo of the {c}.', + lambda c: f'a drawing of a {c}.', + lambda c: f'a photo of my {c}.', + lambda c: f'the plastic {c}.', + lambda c: f'a photo of the cool {c}.', + lambda c: f'a close-up photo of a {c}.', + lambda c: f'a black and white photo of the {c}.', + lambda c: f'a painting of the {c}.', + lambda c: f'a painting of a {c}.', + lambda c: f'a pixelated photo of the {c}.', + lambda c: f'a sculpture of the {c}.', + lambda c: f'a bright photo of the {c}.', + lambda c: f'a cropped photo of a {c}.', + lambda c: f'a plastic {c}.', + lambda c: f'a photo of the dirty {c}.', + lambda c: f'a jpeg corrupted photo of a {c}.', + lambda c: f'a blurry photo of the {c}.', + lambda c: f'a photo of the {c}.', + lambda c: f'a good photo of the {c}.', + lambda c: f'a rendering of the {c}.', + lambda c: f'a {c} in a video game.', + lambda c: f'a photo of one {c}.', + lambda c: f'a doodle of a {c}.', + lambda c: f'a close-up photo of the {c}.', + lambda c: f'a photo of a {c}.', + lambda c: f'the origami {c}.', + lambda c: f'the {c} in a video game.', + lambda c: f'a sketch of a {c}.', + lambda c: f'a doodle of the {c}.', + lambda c: f'a origami {c}.', + lambda c: f'a low resolution photo of a {c}.', + lambda c: f'the toy {c}.', + lambda c: f'a rendition of the {c}.', + lambda c: f'a photo of the clean {c}.', + lambda c: f'a photo of a large {c}.', + lambda c: f'a rendition of a {c}.', + lambda c: f'a photo of a nice {c}.', + lambda c: f'a photo of a weird {c}.', + lambda c: f'a blurry photo of a {c}.', + lambda c: f'a cartoon {c}.', + lambda c: f'art of a {c}.', + lambda c: f'a sketch of the {c}.', + lambda c: f'a embroidered {c}.', + lambda c: f'a pixelated photo of a {c}.', + lambda c: f'itap of the {c}.', + lambda c: f'a jpeg corrupted photo of the {c}.', + lambda c: f'a good photo of a {c}.', + lambda c: f'a plushie {c}.', + lambda c: f'a photo of the nice {c}.', + lambda c: f'a photo of the small {c}.', + lambda c: f'a photo of the weird {c}.', + lambda c: f'the cartoon {c}.', + lambda c: f'art of the {c}.', + lambda c: f'a drawing of the {c}.', + lambda c: f'a photo of the large {c}.', + lambda c: f'a black and white photo of a {c}.', + lambda c: f'the plushie {c}.', + lambda c: f'a dark photo of a {c}.', + lambda c: f'itap of a {c}.', + lambda c: f'graffiti of the {c}.', + lambda c: f'a toy {c}.', + lambda c: f'itap of my {c}.', + lambda c: f'a photo of a cool {c}.', + lambda c: f'a photo of a small {c}.', + lambda c: f'a tattoo of the {c}.', +] diff --git a/mmpretrain/models/multimodal/flamingo/__init__.py b/mmpretrain/models/multimodal/flamingo/__init__.py new file mode 100644 index 0000000..e0bfd63 --- /dev/null +++ b/mmpretrain/models/multimodal/flamingo/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .adapter import FlamingoLMAdapter +from .flamingo import Flamingo + +__all__ = ['Flamingo', 'FlamingoLMAdapter'] diff --git a/mmpretrain/models/multimodal/flamingo/adapter.py b/mmpretrain/models/multimodal/flamingo/adapter.py new file mode 100644 index 0000000..bef0e2f --- /dev/null +++ b/mmpretrain/models/multimodal/flamingo/adapter.py @@ -0,0 +1,96 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import random + +import torch.nn as nn + +from mmpretrain.registry import MODELS +from .modules import FlamingoLayer, GatedCrossAttentionBlock +from .utils import getattr_recursive, setattr_recursive + + +@MODELS.register_module() +class FlamingoLMAdapter: + """Mixin to add cross-attention layers to a language model.""" + + @classmethod + def extend_init( + cls, + base: object, + vis_hidden_size: int, + cross_attn_every_n_layers: int, + use_media_placement_augmentation: bool, + only_attend_previous: bool = False, + ): + """Initialize Flamingo by adding a new gated cross attn to the decoder. + + Store the media token id for computing the media locations. + + Args: + base (object): Base module could be any object that represent + a instance of language model. + vis_hidden_size: (int): Hidden size of vision embeddings. + cross_attn_every_n_layers: (int): Additional cross attn for + every n layers. + use_media_placement_augmentation: (bool): Whether to use media + placement augmentation. + """ + base.set_decoder_layers_attr_name('model.layers') + gated_cross_attn_layers = nn.ModuleList([ + GatedCrossAttentionBlock( + dim=base.config.hidden_size, dim_visual=vis_hidden_size) if + (layer_idx + 1) % cross_attn_every_n_layers == 0 else None + for layer_idx, _ in enumerate(base._get_decoder_layers()) + ]) + base._set_decoder_layers( + nn.ModuleList([ + FlamingoLayer(gated_cross_attn_layer, decoder_layer) + for gated_cross_attn_layer, decoder_layer in zip( + gated_cross_attn_layers, base._get_decoder_layers()) + ])) + base.use_media_placement_augmentation = use_media_placement_augmentation # noqa + base.initialized_flamingo = True + base.only_attend_previous = only_attend_previous + return base + + def set_decoder_layers_attr_name(self, decoder_layers_attr_name): + """Set decoder layers attribute name.""" + self.decoder_layers_attr_name = decoder_layers_attr_name + + def _get_decoder_layers(self): + """Get decoder layers according to attribute name.""" + return getattr_recursive(self, self.decoder_layers_attr_name) + + def _set_decoder_layers(self, value): + """Set decoder layers according to attribute name.""" + setattr_recursive(self, self.decoder_layers_attr_name, value) + + def forward(self, *input, **kwargs): + """Condition the Flamingo layers on the media locations before forward + function.""" + input_ids = kwargs['input_ids'] if 'input_ids' in kwargs else input[0] + media_locations = input_ids == self.media_token_id + if self.only_attend_previous: + attend_previous = True + elif self.use_media_placement_augmentation: + attend_previous = (random.random() < 0.5) + else: + attend_previous = False + + for layer in self.get_decoder().layers: + layer.condition_media_locations(media_locations) + layer.condition_attend_previous(attend_previous) + + return super().forward( + *input, **kwargs) # Call the other parent's forward method + + def is_conditioned(self) -> bool: + """Check whether all decoder layers are already conditioned.""" + return all(layer.is_conditioned() + for layer in self._get_decoder_layers()) + + def clear_conditioned_layers(self): + """Clear all conditional layers.""" + for layer in self._get_decoder_layers(): + layer.condition_vis_x(None) + layer.condition_media_locations(None) + layer.condition_attend_previous(None) diff --git a/mmpretrain/models/multimodal/flamingo/flamingo.py b/mmpretrain/models/multimodal/flamingo/flamingo.py new file mode 100644 index 0000000..729d6c7 --- /dev/null +++ b/mmpretrain/models/multimodal/flamingo/flamingo.py @@ -0,0 +1,323 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import re +from typing import List, Optional + +import torch +from mmengine.model import BaseModel + +from mmpretrain.registry import MODELS, TOKENIZER +from mmpretrain.structures import DataSample +from .modules import PerceiverResampler +from .utils import ExtendModule + + +@MODELS.register_module() +class Flamingo(BaseModel): + """The Open Flamingo model for multiple tasks. + + Args: + vision_encoder (dict): The config of the vision encoder. + lang_encoder (dict): The config of the language encoder. + tokenizer (dict): The tokenizer to encode the text. + task (int): The task to perform prediction. + zeroshot_prompt (str): Prompt used for zero-shot inference. + Defaults to 'Output:'. + shot_prompt_tmpl (str): Prompt used for few-shot inference. + Defaults to ``Output:{caption}<|endofchunk|>``. + final_prompt_tmpl (str): Final part of prompt used for inference. + Defaults to 'Output:'. + generation_cfg (dict): The extra generation config, accept the keyword + arguments of [~`transformers.GenerationConfig`]. + Defaults to an empty dict. + data_preprocessor (Optional[dict]): The config for preprocessing input + data. If None or no specified type, it will use + "MutimodalDataPreprocessor" as type. + See :class:`MutimodalDataPreprocessor` for more details. + Defaults to None. + init_cfg (dict, optional): The initialization config. Defaults to None. + """ + + support_tasks = {'caption', 'vqa'} + _no_split_modules = [ + 'TransformerEncoderLayer', 'PerceiverAttention', + 'GatedCrossAttentionBlock', 'FlamingoLayer' + ] + + def __init__( + self, + vision_encoder: dict, + lang_encoder: dict, + tokenizer: dict, + task: str = 'caption', + zeroshot_prompt: str = 'Output:', + shot_prompt_tmpl: str = 'Output:{caption}<|endofchunk|>', + final_prompt_tmpl: str = 'Output:', + generation_cfg: dict = dict(), + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[dict] = None): + if data_preprocessor is None: + data_preprocessor = {} + if isinstance(data_preprocessor, dict): + data_preprocessor.setdefault('type', 'MultiModalDataPreprocessor') + data_preprocessor = MODELS.build(data_preprocessor) + + super().__init__( + init_cfg=init_cfg, data_preprocessor=data_preprocessor) + + if task not in self.support_tasks: + raise ValueError(f'Unsupported task {task}, please select ' + f'the task from {self.support_tasks}.') + self.task = task + + # init tokenizer + self.tokenizer = TOKENIZER.build(tokenizer) + # add Flamingo special tokens to the tokenizer + self.tokenizer.add_special_tokens( + {'additional_special_tokens': ['<|endofchunk|>', '']}) + self.tokenizer.bos_token_id = 1 + if self.tokenizer.pad_token is None: + # Issue: GPT models don't have a pad token, which we use to + # modify labels for the loss. + self.tokenizer.add_special_tokens({'pad_token': ''}) + + # Template to format the prompt input + self.zeroshot_prompt = zeroshot_prompt + self.shot_prompt_tmpl = shot_prompt_tmpl + self.final_prompt_tmpl = final_prompt_tmpl + + # init vision encoder related modules + vision_encoder_weight = vision_encoder.pop('pretrained', None) + self.vision_encoder = MODELS.build(vision_encoder) + if vision_encoder_weight is not None: + from mmengine.runner.checkpoint import load_checkpoint + load_checkpoint( + self.vision_encoder, + vision_encoder_weight, + map_location='cpu', + revise_keys=[(r'^backbone\.', '')], + ) + self.vision_encoder.is_init = True + + self.perceiver = PerceiverResampler(dim=self.vision_encoder.embed_dims) + + # init language encoder related modules + self.lang_encoder = ExtendModule(**lang_encoder) + self.lang_encoder.resize_token_embeddings(len(self.tokenizer)) + self.lang_encoder.media_token_id = self.tokenizer.encode('')[-1] + + # other necessary parameters + self.eoc_token_id = self.tokenizer.encode('<|endofchunk|>')[-1] + self.generation_cfg = { + 'num_beams': 1, + 'max_new_tokens': None, + 'temperature': 1.0, + 'top_k': 0, + 'top_p': 1.0, + 'no_repeat_ngram_size': 0, + 'prefix_allowed_tokens_fn': None, + 'length_penalty': 1.0, + 'num_return_sequences': 1, + 'do_sample': False, + 'early_stopping': False, + **generation_cfg, + } + + if hasattr(self, 'register_load_state_dict_post_hook'): + self.register_load_state_dict_post_hook(self._load_adapter_hook) + + def forward( + self, + images: torch.Tensor, + data_samples: Optional[List[DataSample]] = None, + mode: str = 'loss', + ): + """The unified entry for a forward process in both training and test. + The method should accept only one mode "loss": + + - "loss": Forward and return a dict of losses according to the given + inputs and data samples. + + Note that this method doesn't handle neither back propagation nor + optimizer updating, which are done in the :meth:`train_step`. + + Args: + images (torch.Tensor): The input image tensor with different ndim + according to the inputs. + data_samples (List[DataSample], optional): The annotation + data of every samples. It's required if ``mode="loss"``. + Defaults to None. + mode (str): Return what kind of value. Defaults to 'loss'. + + Returns: + The return type depends on ``mode``. + - If ``mode="loss"``, return a dict of tensor. + """ + + if mode == 'loss': + return self.loss(images, data_samples) + elif mode == 'predict': + return self.predict(images, data_samples) + else: + raise RuntimeError(f'Invalid mode "{mode}".') + + def extract_vision_feats(self, images: torch.Tensor) -> torch.Tensor: + """Extract vision features. + + Args: + images (torch.Tensor): For zero-shot, the input images tensor is + with shape (B, C, H, W), for few-shot, which is + (B, T_img, C, H, W) in general. Images in the same chunk + are collated along T_img. Video data is not supported yet. + + Returns: + torch.Tensor: Return extracted features. + """ + if images.ndim == 4: + # (B, C, H, W) -> (B, 1, C, H, W) for zero-shot. + images = images.unsqueeze(1) + b, T = images.shape[:2] + # b T c h w -> (b T) c h w + images = images.view(b * T, *images.shape[-3:]) + + with torch.no_grad(): + vision_feats = self.vision_encoder(images)[-1][:, 1:] + + # (b T F) v d -> b T F v d Only support F=1 here + vision_feats = vision_feats.view(b, T, 1, *vision_feats.shape[-2:]) + + vision_feats = self.perceiver(vision_feats) # reshapes to (b, T, n, d) + return vision_feats + + def predict(self, + images: torch.Tensor, + data_samples: Optional[List[DataSample]] = None, + **generation_cfg): + """Predict generation results from a batch of inputs. + + Args: + images (torch.Tensor): For zero-shot, the input images tensor is + with shape (B, C, H, W), for few-shot, which is + (B, T_img, C, H, W) in general. Images in the same chunk + are collated along T_img. Video data is not supported yet. + data_samples (List[DataSample], optional): The annotation + data of every samples. Defaults to None. + **generation_cfg: Other keyword arguments accepted by the + ``generate`` method of :attr:`lang_encoder`. + + Returns: + List[DataSample]: Return list of data samples. + """ + # generation_cfg in prediction should be dominant + generation_cfg = {**self.generation_cfg, **generation_cfg} + num_beams = generation_cfg['num_beams'] + + if num_beams > 1: + images = images.repeat_interleave(num_beams, dim=0) + + # extra vision feats and set as language condition feats + vision_x = self.extract_vision_feats(images) + for layer in self.lang_encoder._get_decoder_layers(): + layer.condition_vis_x(vision_x) + + input_text = self.preprocess_text(data_samples, device=images.device) + + outputs = self.lang_encoder.generate( + input_text.input_ids, + attention_mask=input_text.attention_mask, + eos_token_id=self.eoc_token_id, + **generation_cfg) + + # clear conditioned layers for language models + self.lang_encoder.clear_conditioned_layers() + + # remove prefix + outputs = outputs[:, len(input_text.input_ids[0]):] + + return self.post_process(outputs, data_samples) + + def preprocess_text(self, data_samples: List[DataSample], + device: torch.device) -> List[DataSample]: + """Preprocess text in advance before fed into language model. + + Args: + data_samples (List[DataSample]): The annotation + data of every samples. Defaults to None. + device (torch.device): Device for text to put on. + + Returns: + List[DataSample]: Return list of data samples. + """ + prompts = [] + for sample in data_samples: + if 'shots' in sample: + # few-shot + shot_prompt = ''.join([ + self.shot_prompt_tmpl.format(**shot) + for shot in sample.get('shots') + ]) + else: + # zero-shot + shot_prompt = self.zeroshot_prompt + + # add final prompt + final_prompt = self.final_prompt_tmpl.format(**sample.to_dict()) + prompts.append(shot_prompt + final_prompt) + + self.tokenizer.padding_side = 'left' + input_text = self.tokenizer( + prompts, + padding='longest', + truncation=True, + return_tensors='pt', + max_length=2000, + ).to(device) + return input_text + + def post_process( + self, outputs: torch.Tensor, + data_samples: Optional[List[DataSample]]) -> List[DataSample]: + """Perform post process for outputs for different task. + + Args: + outputs (torch.Tensor): The generated outputs. + data_samples (List[DataSample], optional): The annotation + data of every samples. + + Returns: + List[DataSample]: Return list of data samples. + """ + outputs = self.tokenizer.batch_decode( + outputs, skip_special_tokens=True) + + if data_samples is None: + data_samples = [DataSample() for _ in range(len(outputs))] + + for output, data_sample in zip(outputs, data_samples): + # remove text pattern + if self.task == 'caption': + data_sample.pred_caption = re.split('Output', output, + 1)[0].replace('"', '') + elif self.task == 'vqa': + data_sample.pred_answer = re.split('Question|Answer', output, + 1)[0] + + return data_samples + + @staticmethod + def _load_adapter_hook(module, incompatible_keys): + """Avoid warning missing keys except adapter keys.""" + adapter_patterns = [ + '^perceiver', + 'lang_encoder.*embed_tokens', + 'lang_encoder.*gated_cross_attn_layers', + 'lang_encoder.*rotary_emb', + ] + for key in list(incompatible_keys.missing_keys): + if not any(re.match(pattern, key) for pattern in adapter_patterns): + incompatible_keys.missing_keys.remove(key) + + for key in list(incompatible_keys.unexpected_keys): + if 'position_ids' in key: + incompatible_keys.unexpected_keys.remove(key) + if 'lang_encoder.gated_cross_attn_layers' in key: + incompatible_keys.unexpected_keys.remove(key) diff --git a/mmpretrain/models/multimodal/flamingo/modules.py b/mmpretrain/models/multimodal/flamingo/modules.py new file mode 100644 index 0000000..730c61b --- /dev/null +++ b/mmpretrain/models/multimodal/flamingo/modules.py @@ -0,0 +1,398 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""Taken from https://github.com/lucidrains/flamingo-pytorch.""" + +from typing import Optional + +import torch +from einops import rearrange, repeat +from torch import einsum, nn + + +def FeedForward(dim, mult: int = 4): + """Feedforward layers. + + Args: + mult (int): Layer expansion muliplier. Defaults to 4. + """ + inner_dim = int(dim * mult) + return nn.Sequential( + nn.LayerNorm(dim), + nn.Linear(dim, inner_dim, bias=False), + nn.GELU(), + nn.Linear(inner_dim, dim, bias=False), + ) + + +class PerceiverAttention(nn.Module): + """Perceiver attetion layers. + + Args: + dim (int): Input dimensions. + dim_head (int): Number of dimension heads. Defaults to 64. + heads (int): Number of heads. Defaults to 8. + """ + + def __init__(self, *, dim: int, dim_head: int = 64, heads: int = 8): + super().__init__() + self.scale = dim_head**-0.5 + self.heads = heads + inner_dim = dim_head * heads + + self.norm_media = nn.LayerNorm(dim) + self.norm_latents = nn.LayerNorm(dim) + + self.to_q = nn.Linear(dim, inner_dim, bias=False) + self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False) + self.to_out = nn.Linear(inner_dim, dim, bias=False) + + def forward(self, x: torch.Tensor, latents: torch.Tensor): + """Forward function. + + Args: + x (torch.Tensor): image features of shape (b, T, n1, D). + latent (torch.Tensor): latent features of shape (b, T, n2, D). + """ + x = self.norm_media(x) + latents = self.norm_latents(latents) + + h = self.heads + + q = self.to_q(latents) + kv_input = torch.cat((x, latents), dim=-2) + k, v = self.to_kv(kv_input).chunk(2, dim=-1) + q = rearrange(q, 'b t n (h d) -> b h t n d', h=h) + k = rearrange(k, 'b t n (h d) -> b h t n d', h=h) + v = rearrange(v, 'b t n (h d) -> b h t n d', h=h) + q = q * self.scale + + # attention + sim = einsum('... i d, ... j d -> ... i j', q, k) + sim = sim - sim.amax(dim=-1, keepdim=True).detach() + attn = sim.softmax(dim=-1) + + out = einsum('... i j, ... j d -> ... i d', attn, v) + out = rearrange(out, 'b h t n d -> b t n (h d)', h=h) + return self.to_out(out) + + +class PerceiverResampler(nn.Module): + """Perceiver resampler layers. + + Args: + dim (int): Input dimensions. + depth (int): Depth of resampler. Defaults to 6. + dim_head (int): Number of dimension heads. Defaults to 64. + heads (int): Number of heads. Defaults to 8. + num_latents (int): Number of latents. Defaults to 64. + max_num_media (int, optional): Max number of media. + Defaults to None. + max_num_frames (int, optional): Max number of frames. + Defaults to None. + ff_mult (int): Feed forward multiplier. Defaults to 4. + """ + + def __init__( + self, + *, + dim: int, + depth: int = 6, + dim_head: int = 64, + heads: int = 8, + num_latents: int = 64, + max_num_media: Optional[int] = None, + max_num_frames: Optional[int] = None, + ff_mult: int = 4, + ): + super().__init__() + self.latents = nn.Parameter(torch.randn(num_latents, dim)) + self.frame_embs = ( + nn.Parameter(torch.randn(max_num_frames, dim)) + if max_num_frames is not None else None) + self.media_time_embs = ( + nn.Parameter(torch.randn(max_num_media, 1, dim)) + if max_num_media is not None else None) + + self.layers = nn.ModuleList([]) + for _ in range(depth): + self.layers.append( + nn.ModuleList([ + PerceiverAttention( + dim=dim, dim_head=dim_head, heads=heads), + FeedForward(dim=dim, mult=ff_mult), + ])) + + self.norm = nn.LayerNorm(dim) + + def forward(self, x: torch.Tensor): + """Forward function for perceiver sampler. + + Args: + x (torch.Tensor): image features of shape (b, T, F, v, D) + + Returns: + torch.Tensor: shape (b, T, n, D) where n is self.num_latents + """ + b, T, F, v = x.shape[:4] + + # frame and media time embeddings + if self.frame_embs is not None: + frame_embs = repeat( + self.frame_embs[:F], 'F d -> b T F v d', b=b, T=T, v=v) + x = x + frame_embs + x = rearrange(x, 'b T F v d -> b T (F v) d' + ) # flatten the frame and spatial dimensions + if self.media_time_embs is not None: + x = x + self.media_time_embs[:T] + + # blocks + latents = repeat(self.latents, 'n d -> b T n d', b=b, T=T) + for attn, ff in self.layers: + latents = attn(x, latents) + latents + latents = ff(latents) + latents + return self.norm(latents) + + +class MaskedCrossAttention(nn.Module): + """Masked cross attention layers. + + Args: + dim (int): Input text feature dimensions. + dim_visual (int): Input visual feature dimensions. + dim_head (int): Number of dimension heads. Defaults to 64. + heads (int): Number of heads. Defaults to 8. + only_attend_immediate_media (bool): Whether attend immediate media. + Defaults to True. + """ + + def __init__( + self, + *, + dim: int, + dim_visual: int, + dim_head: int = 64, + heads: int = 8, + only_attend_immediate_media: bool = True, + ): + super().__init__() + self.scale = dim_head**-0.5 + self.heads = heads + inner_dim = dim_head * heads + + self.norm = nn.LayerNorm(dim) + + self.to_q = nn.Linear(dim, inner_dim, bias=False) + self.to_kv = nn.Linear(dim_visual, inner_dim * 2, bias=False) + self.to_out = nn.Linear(inner_dim, dim, bias=False) + + # whether for text to only attend to immediate preceding image + # or all previous images + self.only_attend_immediate_media = only_attend_immediate_media + + def forward(self, + x: torch.Tensor, + media: torch.Tensor, + media_locations: Optional[torch.Tensor] = None, + attend_previous: bool = True): + """Forward function for perceiver sampler. + + Args: + x (torch.Tensor): text features of shape (B, T_txt, D_txt). + media (torch.Tensor): image features of shape + (B, T_img, n, D_img) where n is the dim of the latents. + media_locations (torch.Tensor, optional): boolean mask identifying + the media tokens in x of shape (B, T_txt). Defaults to None. + attend_previous (bool): If false, ignores immediately preceding + image and starts attending when following image. + Defaults to True. + """ + _, T_img, n = media.shape[:3] + h = self.heads + + x = self.norm(x) + + q = self.to_q(x) + media = rearrange(media, 'b t n d -> b (t n) d') + + k, v = self.to_kv(media).chunk(2, dim=-1) + q = rearrange(q, 'b n (h d) -> b h n d', h=h) + k = rearrange(k, 'b n (h d) -> b h n d', h=h) + v = rearrange(v, 'b n (h d) -> b h n d', h=h) + + q = q * self.scale + + sim = einsum('... i d, ... j d -> ... i j', q, k) + + if media_locations is not None: + # at each boolean of True, increment the time counter + # (relative to media time) + text_time = media_locations.cumsum(dim=-1) + media_time = torch.arange(T_img, device=x.device) + 1 + + if not attend_previous: + text_time[~media_locations] += 1 + # make sure max is still the number of images in the sequence + text_time[text_time > repeat( + torch.count_nonzero(media_locations, dim=1), + 'b -> b i', + i=text_time.shape[1], + )] = 0 + + # text time must equal media time if only attending to most + # immediate image otherwise, as long as text time is greater than + # media time (if attending to all previous images / media) + mask_op = torch.eq if self.only_attend_immediate_media else torch.ge # noqa + + text_to_media_mask = mask_op( + rearrange(text_time, 'b i -> b 1 i 1'), + repeat(media_time, 'j -> 1 1 1 (j n)', n=n), + ) + sim = sim.masked_fill(~text_to_media_mask, + -torch.finfo(sim.dtype).max) + + sim = sim - sim.amax(dim=-1, keepdim=True).detach() + attn = sim.softmax(dim=-1) + + if media_locations is not None and self.only_attend_immediate_media: + # any text without a preceding media needs to have + # attention zeroed out + text_without_media_mask = text_time == 0 + text_without_media_mask = rearrange(text_without_media_mask, + 'b i -> b 1 i 1') + attn = attn.masked_fill(text_without_media_mask, 0.0) + + out = einsum('... i j, ... j d -> ... i d', attn, v) + out = rearrange(out, 'b h n d -> b n (h d)') + return self.to_out(out) + + +class GatedCrossAttentionBlock(nn.Module): + """Gated cross attention layers. + + Args: + dim (int): Input text feature dimensions. + dim_visual (int): Input visual feature dimensions. + dim_head (int): Number of dimension heads. Defaults to 64. + heads (int): Number of heads. Defaults to 8. + ff_mult (int): Feed forward multiplier. Defaults to 4. + only_attend_immediate_media (bool): Whether attend immediate media. + Defaults to True. + """ + + def __init__( + self, + *, + dim: int, + dim_visual: int, + dim_head: int = 64, + heads: int = 8, + ff_mult: int = 4, + only_attend_immediate_media: bool = True, + ): + super().__init__() + self.attn = MaskedCrossAttention( + dim=dim, + dim_visual=dim_visual, + dim_head=dim_head, + heads=heads, + only_attend_immediate_media=only_attend_immediate_media, + ) + self.attn_gate = nn.Parameter(torch.tensor([0.0])) + + self.ff = FeedForward(dim, mult=ff_mult) + self.ff_gate = nn.Parameter(torch.tensor([0.0])) + + def forward(self, + x: torch.Tensor, + media: torch.Tensor, + media_locations: Optional[torch.Tensor] = None, + attend_previous: bool = True): + """Forward function for perceiver sampler. + + Args: + x (torch.Tensor): text features of shape (B, T_txt, D_txt). + media (torch.Tensor): image features of shape + (B, T_img, n, D_img) where n is the dim of the latents. + media_locations (torch.Tensor, optional): boolean mask identifying + the media tokens in x of shape (B, T_txt). Defaults to None. + attend_previous (bool): If false, ignores immediately preceding + image and starts attending when following image. + Defaults to True. + """ + x = ( + self.attn( + x, + media, + media_locations=media_locations, + attend_previous=attend_previous, + ) * self.attn_gate.tanh() + x) + x = self.ff(x) * self.ff_gate.tanh() + x + + return x + + +class FlamingoLayer(nn.Module): + """Faminogo layers. + + Args: + gated_cross_attn_layer (nn.Module): Gated cross attention layer. + decoder_layer (nn.Module): Decoder layer. + """ + + def __init__(self, gated_cross_attn_layer: nn.Module, + decoder_layer: nn.Module): + super().__init__() + self.gated_cross_attn_layer = gated_cross_attn_layer + self.decoder_layer = decoder_layer + self.vis_x = None + self.media_locations = None + + def is_conditioned(self) -> bool: + """Check whether the layer is conditioned.""" + return self.vis_x is not None + + def condition_vis_x(self, vis_x): + """Set condition vision features.""" + self.vis_x = vis_x + + def condition_media_locations(self, media_locations): + """Set condition media locations.""" + self.media_locations = media_locations + + def condition_attend_previous(self, attend_previous): + """Set attend previous.""" + self.attend_previous = attend_previous + + def forward( + self, + lang_x: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + **decoder_layer_kwargs, + ): + """Forward function. + + Args: + lang_x (torch.Tensor): language inputs. + attention_mask (torch.Tensor, optional): text attention mask. + Defaults to None. + **decoder_layer_kwargs: Other decoder layer keyword arguments. + """ + if self.gated_cross_attn_layer is None: + return self.decoder_layer( + lang_x, attention_mask=attention_mask, **decoder_layer_kwargs) + + if self.vis_x is None: + raise ValueError('vis_x must be conditioned before forward pass') + + if self.media_locations is None: + raise ValueError( + 'media_locations must be conditioned before forward pass') + + lang_x = self.gated_cross_attn_layer( + lang_x, + self.vis_x, + media_locations=self.media_locations, + attend_previous=self.attend_previous, + ) + lang_x = self.decoder_layer( + lang_x, attention_mask=attention_mask, **decoder_layer_kwargs) + return lang_x diff --git a/mmpretrain/models/multimodal/flamingo/utils.py b/mmpretrain/models/multimodal/flamingo/utils.py new file mode 100644 index 0000000..1077e14 --- /dev/null +++ b/mmpretrain/models/multimodal/flamingo/utils.py @@ -0,0 +1,64 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Any, Type + +from mmpretrain.registry import MODELS + + +class ExtendModule: + """Combine the base language model with adapter. This module will create a + instance from base with extended functions in adapter. + + Args: + base (object): Base module could be any object that represent + a instance of language model or a dict that can build the + base module. + adapter: (dict): Dict to build the adapter. + """ + + def __new__(cls, base: object, adapter: dict): + + if isinstance(base, dict): + base = MODELS.build(base) + + adapter_module = MODELS.get(adapter.pop('type')) + cls.extend_instance(base, adapter_module) + return adapter_module.extend_init(base, **adapter) + + @classmethod + def extend_instance(cls, base: object, mixin: Type[Any]): + """Apply mixins to a class instance after creation. + + Args: + base (object): Base module instance. + mixin: (Type[Any]): Adapter class type to mixin. + """ + base_cls = base.__class__ + base_cls_name = base.__class__.__name__ + base.__class__ = type( + base_cls_name, (mixin, base_cls), + {}) # mixin needs to go first for our forward() logic to work + + +def getattr_recursive(obj, att): + """ + Return nested attribute of obj + Example: getattr_recursive(obj, 'a.b.c') is equivalent to obj.a.b.c + """ + if att == '': + return obj + i = att.find('.') + if i < 0: + return getattr(obj, att) + else: + return getattr_recursive(getattr(obj, att[:i]), att[i + 1:]) + + +def setattr_recursive(obj, att, val): + """ + Set nested attribute of obj + Example: setattr_recursive(obj, 'a.b.c', val) + is equivalent to obj.a.b.c = val + """ + if '.' in att: + obj = getattr_recursive(obj, '.'.join(att.split('.')[:-1])) + setattr(obj, att.split('.')[-1], val) diff --git a/mmpretrain/models/multimodal/llava/__init__.py b/mmpretrain/models/multimodal/llava/__init__.py new file mode 100644 index 0000000..aef10d3 --- /dev/null +++ b/mmpretrain/models/multimodal/llava/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .llava import Llava +from .modules import LlavaLlamaForCausalLM + +__all__ = ['Llava', 'LlavaLlamaForCausalLM'] diff --git a/mmpretrain/models/multimodal/llava/llava.py b/mmpretrain/models/multimodal/llava/llava.py new file mode 100644 index 0000000..f829b09 --- /dev/null +++ b/mmpretrain/models/multimodal/llava/llava.py @@ -0,0 +1,267 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import re +from typing import List, Optional + +import torch +from mmengine.model import BaseModel + +from mmpretrain.registry import MODELS, TOKENIZER +from mmpretrain.structures import DataSample +from ...utils import no_load_hf_pretrained_model +from .modules import LlavaLlamaForCausalLM + + +@MODELS.register_module() +class Llava(BaseModel): + """The LLaVA model for multiple tasks. + + Args: + vision_encoder (dict): The config of the vision encoder. + lang_encoder (dict): The config of the language encoder. + tokenizer (dict): The tokenizer to encode the text. + prompt_tmpl (str): Prompt template for inference. + task (int): The task to perform prediction. + use_im_start_end (bool): Whether to use the im_start and im_end tokens + mm_vision_select_layer (int): The index from vision encoder output. + Defaults to -1. + mm_proj_depth (int): The number of linear layers for multi-modal + projection. Defaults to 1. + load_lang_pretrained (bool): Whether to load the pretrained model of + language encoder. Defaults to False. + generation_cfg (dict): The extra generation config, accept the keyword + arguments of [~`transformers.GenerationConfig`]. + Defaults to an empty dict. + data_preprocessor (Optional[dict]): The config for preprocessing input + data. If None or no specified type, it will use + "MutimodalDataPreprocessor" as type. + See :class:`MutimodalDataPreprocessor` for more details. + Defaults to None. + init_cfg (dict, optional): The initialization config. Defaults to None. + """ + + support_tasks = {'caption', 'vqa'} + im_patch_token = '' + im_start_token = '' + im_end_token = '' + + def __init__(self, + vision_encoder: dict, + lang_encoder: dict, + tokenizer: dict, + mm_hidden_size: int, + prompt_tmpl: str, + task: str = 'caption', + use_im_patch: bool = True, + use_im_start_end: bool = False, + mm_vision_select_layer: int = -1, + mm_proj_depth: int = 1, + generation_cfg: dict = dict(), + load_lang_pretrained: bool = False, + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[dict] = None): + if data_preprocessor is None: + data_preprocessor = {} + if isinstance(data_preprocessor, dict): + data_preprocessor.setdefault('type', 'MultiModalDataPreprocessor') + data_preprocessor = MODELS.build(data_preprocessor) + + super().__init__( + init_cfg=init_cfg, data_preprocessor=data_preprocessor) + + if task not in self.support_tasks: + raise ValueError(f'Unsupported task {task}, please select ' + f'the task from {self.support_tasks}.') + self.task = task + + # init tokenizer + self.tokenizer = TOKENIZER.build(tokenizer) + # add Llava special tokens to the tokenizer + if use_im_patch: + self.tokenizer.add_tokens([self.im_patch_token], + special_tokens=True) + if use_im_start_end: + self.tokenizer.add_tokens([self.im_start_token, self.im_end_token], + special_tokens=True) + + # Template to format the prompt input + self.prompt_tmpl = prompt_tmpl + + # init vision encoder related modules + vision_encoder_weight = vision_encoder.pop('pretrained', None) + vision_encoder = MODELS.build(vision_encoder) + if vision_encoder_weight is not None: + from mmengine.runner.checkpoint import load_checkpoint + load_checkpoint( + vision_encoder, + vision_encoder_weight, + map_location='cpu', + revise_keys=[(r'^backbone\.', '')], + ) + vision_encoder.is_init = True + + # init language encoder related modules + if load_lang_pretrained: + lang_encoder = MODELS.build(lang_encoder) + else: + with no_load_hf_pretrained_model(): + lang_encoder = MODELS.build(lang_encoder) + lang_encoder.resize_token_embeddings(len(self.tokenizer)) + + self.model = LlavaLlamaForCausalLM( + vision_encoder=vision_encoder, + lang_encoder=lang_encoder, + mm_hidden_size=mm_hidden_size, + mm_proj_depth=mm_proj_depth, + use_im_start_end=use_im_start_end, + im_start_token=self.tokenizer.convert_tokens_to_ids( + self.im_start_token), + im_end_token=self.tokenizer.convert_tokens_to_ids( + self.im_end_token), + mm_vision_select_layer=mm_vision_select_layer) + + self.generation_cfg = generation_cfg + + if hasattr(self, 'register_load_state_dict_post_hook'): + self.register_load_state_dict_post_hook(self._load_ckpt_hook) + + def forward( + self, + images: torch.Tensor, + data_samples: Optional[List[DataSample]] = None, + mode: str = 'loss', + ): + """The unified entry for a forward process in both training and test. + + - "predict": Forward and return the predictions, which are fully + processed to a list of :obj:`DataSample`. + - "loss": Forward and return a dict of losses according to the given + inputs and data samples. + + Note that this method doesn't handle neither back propagation nor + optimizer updating, which are done in the :meth:`train_step`. + + Args: + images (torch.Tensor): The input image tensor with different ndim + according to the inputs. + data_samples (List[DataSample], optional): The annotation + data of every samples. It's required if ``mode="loss"``. + Defaults to None. + mode (str): Return what kind of value. Defaults to 'loss'. + + Returns: + The return type depends on ``mode``. + - If ``mode="loss"``, return a dict of tensor. + """ + + if mode == 'predict': + return self.predict(images, data_samples) + elif mode == 'loss': + raise NotImplementedError + else: + raise RuntimeError(f'Invalid mode "{mode}".') + + def predict(self, + images: torch.Tensor, + data_samples: Optional[List[DataSample]] = None, + **generation_cfg): + """Predict generation results from a batch of inputs. + + Args: + images (torch.Tensor): For zero-shot, the input images tensor is + with shape (B, C, H, W), for few-shot, which is + (B, T_img, C, H, W) in general. Images in the same chunk + are collated along T_img. Video data is not supported yet. + data_samples (List[DataSample], optional): The annotation + data of every samples. Defaults to None. + **generation_cfg: Other keyword arguments accepted by the + ``generate`` method of :attr:`lang_encoder`. + + Returns: + List[DataSample]: Return list of data samples. + """ + # generation_cfg in prediction should be dominant + generation_cfg = {**self.generation_cfg, **generation_cfg} + + input_text = self.preprocess_text(data_samples, device=images.device) + + outputs = self.model.generate( + input_text.input_ids, + attention_mask=input_text.attention_mask, + eos_token_id=self.tokenizer.eos_token_id, + images=images, + **generation_cfg) + + # remove prefix + outputs = outputs[:, len(input_text.input_ids[0]):] + + return self.post_process(outputs, data_samples) + + def preprocess_text(self, data_samples: List[DataSample], + device: torch.device) -> List[DataSample]: + """Preprocess text in advance before fed into language model. + + Args: + data_samples (List[DataSample]): The annotation + data of every samples. Defaults to None. + device (torch.device): Device for text to put on. + + Returns: + List[DataSample]: Return list of data samples. + """ + tokens = [] + for sample in data_samples: + prompt = self.prompt_tmpl.format(**sample.to_dict()) + input_ids = [] + while '' in prompt: + prefix, _, prompt = prompt.partition('') + input_ids.extend( + self.tokenizer(prefix, add_special_tokens=False).input_ids) + input_ids.append(-200) + if prompt: + input_ids.extend( + self.tokenizer(prompt, add_special_tokens=False).input_ids) + tokens.append(dict(input_ids=input_ids)) + + self.tokenizer.padding_side = 'left' + input_text = self.tokenizer.pad( + tokens, + padding='longest', + return_tensors='pt', + max_length=2000, + ).to(device) + return input_text + + def post_process( + self, outputs: torch.Tensor, + data_samples: Optional[List[DataSample]]) -> List[DataSample]: + """Perform post process for outputs for different task. + + Args: + outputs (torch.Tensor): The generated outputs. + data_samples (List[DataSample], optional): The annotation + data of every samples. + + Returns: + List[DataSample]: Return list of data samples. + """ + outputs = self.tokenizer.batch_decode( + outputs, skip_special_tokens=True) + + if data_samples is None: + data_samples = [DataSample() for _ in range(len(outputs))] + + for output, data_sample in zip(outputs, data_samples): + # remove text pattern + if self.task == 'caption': + data_sample.pred_caption = output + elif self.task == 'vqa': + data_sample.pred_answer = output + + return data_samples + + @staticmethod + def _load_ckpt_hook(module, incompatible_keys): + """Avoid warning missing keys except lang_encoder keys.""" + for key in list(incompatible_keys.missing_keys): + if re.match('model.vision_tower', key): + incompatible_keys.missing_keys.remove(key) diff --git a/mmpretrain/models/multimodal/llava/modules.py b/mmpretrain/models/multimodal/llava/modules.py new file mode 100644 index 0000000..fa3c6bb --- /dev/null +++ b/mmpretrain/models/multimodal/llava/modules.py @@ -0,0 +1,234 @@ +# Copyright 2023 Haotian Liu +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List, Optional, Union + +import torch +import torch.nn as nn +from transformers import PreTrainedModel + +DEFAULT_IMAGE_TOKEN = '' +DEFAULT_IMAGE_PATCH_TOKEN = '' +DEFAULT_IM_START_TOKEN = '' +DEFAULT_IM_END_TOKEN = '' + + +class LlavaLlamaForCausalLM(PreTrainedModel): + + def __init__(self, + vision_encoder, + lang_encoder, + mm_hidden_size, + use_im_start_end=True, + mm_proj_depth=1, + im_start_token: Optional[int] = None, + im_end_token: Optional[int] = None, + im_token_index: int = -200, + mm_vision_select_layer: int = -1): + super().__init__(lang_encoder.config) + self.vision_tower = vision_encoder + self.lang_encoder = lang_encoder + + self.use_im_start_end = use_im_start_end + self.im_start_token = im_start_token + self.im_end_token = im_end_token + self.mm_hidden_size = mm_hidden_size + self.mm_vision_select_layer = mm_vision_select_layer + self.im_token_index = im_token_index + self.lang_hidden_size = lang_encoder.config.hidden_size + + if mm_proj_depth == 1: + # Llava V1 + mm_projector = nn.Linear(self.mm_hidden_size, + self.lang_hidden_size) + self.lang_encoder.model.add_module('mm_projector', mm_projector) + elif mm_proj_depth > 1: + # Llava V1.5 + modules = [nn.Linear(self.mm_hidden_size, self.lang_hidden_size)] + for _ in range(1, mm_proj_depth): + modules.append(nn.GELU()) + modules.append( + nn.Linear(self.lang_hidden_size, self.lang_hidden_size)) + mm_projector = nn.Sequential(*modules) + self.lang_encoder.model.add_module('mm_projector', mm_projector) + elif mm_proj_depth == 0: + self.lang_encoder.model.add_module('mm_projector', nn.Identity()) + + self.post_init() + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + images: Optional[torch.FloatTensor] = None, + return_dict: Optional[bool] = None, + ): + output_attentions = ( + output_attentions if output_attentions is not None else + self.config.output_attentions) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else + self.config.output_hidden_states) + return_dict = ( + return_dict + if return_dict is not None else self.config.use_return_dict) + + (input_ids, attention_mask, past_key_values, inputs_embeds, + labels) = self.forward_vision_tower(input_ids, attention_mask, + past_key_values, labels, images) + + return self.lang_encoder( + input_ids=input_ids, + attention_mask=attention_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + labels=labels, + ) + + def prepare_inputs_for_generation(self, + input_ids, + past_key_values=None, + attention_mask=None, + inputs_embeds=None, + **kwargs): + if past_key_values: + input_ids = input_ids[:, -1:] + + # if `inputs_embeds` are passed, we only want to use + # them in the 1st generation step + if inputs_embeds is not None and past_key_values is None: + model_inputs = {'inputs_embeds': inputs_embeds} + else: + model_inputs = {'input_ids': input_ids} + + model_inputs.update({ + 'past_key_values': past_key_values, + 'use_cache': kwargs.get('use_cache'), + 'attention_mask': attention_mask, + 'images': kwargs.get('images', None), + }) + return model_inputs + + def forward_vision_tower( + self, + input_ids: torch.LongTensor, + attention_mask: torch.LongTensor, + past_key_values: torch.FloatTensor, + labels: torch.LongTensor, + images: Union[torch.FloatTensor, None] = None, + ): + if self.vision_tower is None or images is None or input_ids.shape[ + 1] == 1: + if (past_key_values is not None and self.vision_tower is not None + and images is not None and input_ids.shape[1] == 1): + attention_mask = torch.ones( + (attention_mask.shape[0], + past_key_values[-1][-1].shape[-2] + 1), + dtype=attention_mask.dtype, + device=attention_mask.device) + return input_ids, attention_mask, past_key_values, None, labels + + with torch.no_grad(): + # TODO: support variable number of images (single now) + feats = self.vision_tower(images) + image_features = feats[-1][:, 1:] + + image_features = self.lang_encoder.model.mm_projector(image_features) + + new_input_embeds = [] + new_labels = [] if labels is not None else None + new_attn_mask = [] if attention_mask is not None else None + for batch_idx, cur_input_ids in enumerate(input_ids): + cur_img = image_features[batch_idx] + + if (cur_input_ids != self.im_token_index).all(): + # multimodal LLM, but the current sample is not multimodal + new_input_embeds.append(self.embed_tokens(cur_input_ids)) + if labels is not None: + new_labels.append(labels[batch_idx]) + if attention_mask is not None: + new_attn_mask.append(attention_mask[batch_idx]) + continue + + img_idx = torch.where(cur_input_ids == self.im_token_index)[0][0] + if self.use_im_start_end: + cur_new_input_embeds = torch.cat( + [ + self.embed_tokens(cur_input_ids[:img_idx - 1]), + self.embed_tokens(cur_input_ids[img_idx - 1:img_idx]), + cur_img, + self.embed_tokens( + cur_input_ids[img_idx + 1:img_idx + 2]), + self.embed_tokens(cur_input_ids[img_idx + 2:]), + ], + dim=0, + ) + else: + cur_new_input_embeds = torch.cat( + [ + self.embed_tokens(cur_input_ids[:img_idx]), + cur_img, + self.embed_tokens(cur_input_ids[img_idx + 1:]), + ], + dim=0, + ) + new_input_embeds.append(cur_new_input_embeds) + + if labels is not None: + cur_new_labels = torch.cat([ + labels[batch_idx, :img_idx], + labels.new_full((cur_img.size(0), ), -100), + labels[batch_idx, img_idx + 1:], + ], + dim=0) + new_labels.append(cur_new_labels) + + if attention_mask is not None: + cur_attn_mask = torch.cat([ + attention_mask[batch_idx, :img_idx], + attention_mask.new_full((cur_img.size(0), ), True), + attention_mask[batch_idx, img_idx + 1:], + ], + dim=0) + new_attn_mask.append(cur_attn_mask) + + inputs_embeds = torch.stack(new_input_embeds, dim=0) + if labels is not None: + labels = torch.stack(new_labels, dim=0) + if attention_mask is not None: + attention_mask = torch.stack(new_attn_mask, dim=0) + + return None, attention_mask, past_key_values, inputs_embeds, labels + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += (tuple( + past_state.index_select(0, beam_idx) + for past_state in layer_past), ) + return reordered_past + + def embed_tokens(self, input_ids): + return self.lang_encoder.model.embed_tokens(input_ids) diff --git a/mmpretrain/models/multimodal/minigpt4/__init__.py b/mmpretrain/models/multimodal/minigpt4/__init__.py new file mode 100644 index 0000000..5358bb1 --- /dev/null +++ b/mmpretrain/models/multimodal/minigpt4/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .minigpt4 import MiniGPT4 + +__all__ = ['MiniGPT4'] diff --git a/mmpretrain/models/multimodal/minigpt4/minigpt4.py b/mmpretrain/models/multimodal/minigpt4/minigpt4.py new file mode 100644 index 0000000..d25d0b6 --- /dev/null +++ b/mmpretrain/models/multimodal/minigpt4/minigpt4.py @@ -0,0 +1,410 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import random +import re +from typing import List, Optional, Tuple + +import torch +import torch.nn as nn +from mmengine.logging import MMLogger +from mmengine.model import BaseModel + +from mmpretrain.registry import MODELS, TOKENIZER +from mmpretrain.structures import DataSample + + +@MODELS.register_module() +class MiniGPT4(BaseModel): + """The multi-modality model of MiniGPT-4. + + The implementation of `MiniGPT-4 `_. + Modified from https://github.com/Vision-CAIR/MiniGPT-4/blob/main/minigpt4/models/mini_gpt4.py + + Args: + vision_encoder (dict): The config for vision encoder. + q_former_model (dict): The config for Qformer. + lang_encoder (dict): The config for language model. + tokenizer (dict): The config for tokenizer. + task (str): To define the task, which control the processing of text. + Defaults to 'caption'. + freeze_vit (bool): Freeze the training of ViT. Defaults to True. + freeze_q_former (bool): Freeze the training of Qformer. Defaults to + True. + num_query_token (int): Number of query tokens of Qformer. Defaults to + 32. + prompt_template (dict): Multi-language prompt template of the model. Defaults to dict([ ('en', '###Ask: {} ###Answer: '), + ('zh', '###问:{} ###答:')]) + raw_prompts (dict): Prompts for training. Defaults to dict(). + max_txt_len (int): Max token length while doing tokenization. Defaults + to 32. + end_sym (str): Ended symbol of the sequence. Defaults to '###'. + generation_cfg (dict): The config of text generation. Defaults to + dict(). + data_preprocessor (:obj:`BaseDataPreprocessor`): Used for + pre-processing data sampled by dataloader to the format accepted by + :meth:`forward`. Defaults to None. + init_cfg (dict): Initialization config dict. Defaults to None. + """ # noqa + + def __init__(self, + vision_encoder: dict, + q_former_model: dict, + lang_encoder: dict, + tokenizer: dict, + task: str = 'caption', + freeze_vit: bool = True, + freeze_q_former: bool = True, + num_query_token: int = 32, + prompt_template: dict = dict([('en', + '###Ask: {} ###Answer: '), + ('zh', '###问:{} ###答:')]), + raw_prompts: dict = dict(), + max_txt_len: int = 32, + end_sym: str = '###', + generation_cfg: dict = dict(), + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[dict] = None): + if data_preprocessor is None: + data_preprocessor = {} + data_preprocessor.setdefault('type', 'MultiModalDataPreprocessor') + data_preprocessor = MODELS.build(data_preprocessor) + + super().__init__( + data_preprocessor=data_preprocessor, init_cfg=init_cfg) + self.task = task + logger = MMLogger.get_current_instance() + + # build vision model + vision_encoder_weight = vision_encoder.pop('pretrained', None) + self.vision_encoder = MODELS.build(vision_encoder) + self.ln_vision = nn.LayerNorm(self.vision_encoder.embed_dims) + + if vision_encoder_weight is not None: + from mmengine.runner.checkpoint import load_checkpoint + load_checkpoint(self.vision_encoder, vision_encoder_weight) + self.vision_encoder.is_init = True + if freeze_vit: + for name, param in self.ln_vision.named_parameters(): + param.requires_grad = False + self.ln_vision = self.ln_vision.eval() + else: + logger.warning('Please check `frozen_stages` in the dict of' + '`vision_encoder`. Also set it to be -1 if do not' + 'freeze ViT.') + + # build Qformer + q_former_model_weight = q_former_model.pop('pretrained', None) + self.q_former = MODELS.build(q_former_model) + self.q_former.cls = None + self.q_former.bert.embeddings.word_embeddings = None + self.q_former.bert.embeddings.position_embeddings = None + for layer in self.q_former.bert.encoder.layer: + layer.output = None + layer.intermediate = None + + self.query_tokens = nn.Parameter( + torch.zeros(1, num_query_token, self.q_former.config.hidden_size)) + self.query_tokens.data.normal_( + mean=0.0, std=self.q_former.config.initializer_range) + + if q_former_model_weight is not None: + from mmengine.runner.checkpoint import CheckpointLoader + state_dict = CheckpointLoader.load_checkpoint( + q_former_model_weight)['state_dict'] + self.load_state_dict(state_dict, strict=False) + # The ln_vision weights are also in the q-former checkpoint. + setattr(self.ln_vision, 'is_init', True) + setattr(self.q_former, 'is_init', True) + + if freeze_q_former: + for name, param in self.q_former.named_parameters(): + param.requires_grad = False + self.q_former.eval() + self.query_tokens.requires_grad = False + + # build language model + self.llama_tokenizer = TOKENIZER.build(tokenizer) + self.llama_tokenizer.pad_token = self.llama_tokenizer.eos_token + + self.llama_model = MODELS.build(lang_encoder) + for name, param in self.llama_model.named_parameters(): + param.requires_grad = False + + # build linear projection layer + self.llama_proj = nn.Linear(self.q_former.config.hidden_size, + self.llama_model.config.hidden_size) + self.max_txt_len = max_txt_len + self.end_sym = end_sym + self.end_token_id = self.llama_tokenizer.encode(end_sym)[-1] + + # set prompts + self.en_prompt_list, self.zh_prompt_list = [], [] + if raw_prompts.get('en') is not None: + en_filted_prompts = [ + raw_prompt for raw_prompt in raw_prompts['en'] + if '' in raw_prompt + ] + self.en_prompt_list = [ + prompt_template['en'].format(p) for p in en_filted_prompts + ] + if raw_prompts.get('zh') is not None: + zh_filted_prompts = [ + raw_prompt for raw_prompt in raw_prompts['zh'] + if '' in raw_prompt + ] + self.zh_prompt_list = [ + prompt_template['zh'].format(p) for p in zh_filted_prompts + ] + + # update generation configs + self.generation_cfg = dict( + max_new_tokens=300, + num_beams=1, + do_sample=True, + min_length=1, + top_p=0.9, + repetition_penalty=1.1, + length_penalty=1.0, + temperature=1.0) + self.generation_cfg.update(**generation_cfg) + + if hasattr(self, 'register_load_state_dict_post_hook'): + self.register_load_state_dict_post_hook(self._load_llama_proj_hook) + + def half(self): + self.llama_model = self.llama_model.half() + return self + + def encode_img(self, + images: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """The function to encode the images.""" + device = images.device + x = self.vision_encoder(images)[0] + image_embeds = self.ln_vision(x).to(device) + image_atts = torch.ones( + image_embeds.size()[:-1], dtype=torch.long).to(device) + + query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) + query_output = self.q_former.bert( + query_embeds=query_tokens, + encoder_hidden_states=image_embeds, + encoder_attention_mask=image_atts, + return_dict=True, + ) + + inputs_llama = self.llama_proj(query_output.last_hidden_state) + atts_llama = torch.ones( + inputs_llama.size()[:-1], dtype=torch.long).to(images.device) + return inputs_llama, atts_llama + + def prompt_wrap(self, img_embeds: torch.Tensor, atts_img: torch.Tensor, + prompt: List[str]) -> Tuple[torch.Tensor, torch.Tensor]: + """The function to wrap the image and prompt. + + Make sure that len(prompt) == img_embeds.shape[0]. + + Args: + img_embeds (torch.Tensor): The embedding of the input images. + atts_img (torch.Tensor): Attention map of the image embeddings. + prompt (List[str]): The prompt of the batch data. + + Returns: + Tuple[torch.Tensor, torch.Tensor]: The embedding and attention map. + """ + if len(prompt) > 0: + p_before_list, p_after_list = [], [] + for pro in prompt: + p_before, p_after = pro.split('') + p_before_list.append(p_before) + p_after_list.append(p_after) + p_before_tokens = self.llama_tokenizer( + p_before_list, + return_tensors='pt', + padding='longest', + add_special_tokens=False).to(img_embeds.device) + p_after_tokens = self.llama_tokenizer( + p_after_list, + return_tensors='pt', + padding='longest', + add_special_tokens=False).to(img_embeds.device) + p_before_embeds = self.llama_model.model.embed_tokens( + p_before_tokens.input_ids) + p_after_embeds = self.llama_model.model.embed_tokens( + p_after_tokens.input_ids) + wrapped_img_embeds = torch.cat( + [p_before_embeds, img_embeds, p_after_embeds], dim=1) + wrapped_atts_img = atts_img[:, :1].expand( + -1, wrapped_img_embeds.shape[1]) + return wrapped_img_embeds, wrapped_atts_img + else: + return img_embeds, atts_img + + def loss(self, + images: torch.Tensor, + data_samples: Optional[List[DataSample]] = None) -> dict: + """The forward function in training. + + Args: + inputs (List[torch.Tensor]): The input images. + data_samples (List[DataSample]): All elements required + during the forward function. + + Returns: + Dict[str, torch.Tensor]: A dictionary of loss components. + """ + img_embeds, atts_img = self.encode_img(images) + + self.llama_tokenizer.padding_side = 'right' + + prompts, texts = [], [] + for t in data_samples: + chat_content = t.chat_content + split_mark = '###Answer: ' if t.lang == 'en' else '###答:' + prompt, text = chat_content.split(split_mark) + prompt += split_mark + text += self.end_sym + prompts.append(prompt) + texts.append(text) + + img_embeds, atts_img = self.prompt_wrap(img_embeds, atts_img, prompts) + + to_regress_tokens = self.llama_tokenizer( + texts, + return_tensors='pt', + padding='longest', + truncation=True, + max_length=self.max_txt_len, + add_special_tokens=False).to(images.device) + + targets = to_regress_tokens.input_ids.masked_fill( + to_regress_tokens.input_ids == self.llama_tokenizer.pad_token_id, + -100) + + empty_targets = ( + torch.ones([atts_img.shape[0], atts_img.shape[1] + 1], + dtype=torch.long).to(images.device).fill_( + -100) # plus one for bos + ) + targets = torch.cat([empty_targets, targets], dim=1) + + batch_size = img_embeds.shape[0] + bos = torch.ones([batch_size, 1], + dtype=to_regress_tokens.input_ids.dtype, + device=to_regress_tokens.input_ids.device + ) * self.llama_tokenizer.bos_token_id + bos_embeds = self.llama_model.model.embed_tokens(bos) + atts_bos = atts_img[:, :1] + + to_regress_embeds = self.llama_model.model.embed_tokens( + to_regress_tokens.input_ids) + inputs_embeds = torch.cat([bos_embeds, img_embeds, to_regress_embeds], + dim=1) + attention_mask = torch.cat( + [atts_bos, atts_img, to_regress_tokens.attention_mask], dim=1) + + outputs = self.llama_model( + inputs_embeds=inputs_embeds, + attention_mask=attention_mask, + return_dict=True, + labels=targets, + ) + loss = outputs.loss + return dict(loss=loss) + + def predict( + self, + images: torch.Tensor, + data_samples: Optional[List[DataSample]] = None + ) -> List[DataSample]: + + with torch.no_grad(): + img_embeds, atts_img = self.encode_img(images) + + prompts = [ + random.choice(self.zh_prompt_list) if hasattr(t, 'lang') + and t.lang == 'zh' else random.choice(self.en_prompt_list) + for t in data_samples + ] + img_embeds, atts_img = self.prompt_wrap(img_embeds, atts_img, prompts) + + batch_size = img_embeds.shape[0] + bos = torch.ones( + [batch_size, 1], dtype=torch.long, + device=img_embeds.device) * self.llama_tokenizer.bos_token_id + bos_embeds = self.llama_model.model.embed_tokens(bos) + inputs_embeds = torch.cat([bos_embeds, img_embeds], dim=1) + + outputs = self.llama_model.generate( + inputs_embeds=inputs_embeds, + eos_token_id=self.end_token_id, + **self.generation_cfg) + + return self.post_process(outputs, data_samples) + + def post_process( + self, outputs: torch.Tensor, + data_samples: Optional[List[DataSample]]) -> List[DataSample]: + """Perform post process for outputs for different task. + + Args: + outputs (torch.Tensor): The generated outputs. + data_samples (List[DataSample], optional): The annotation + data of every samples. + + Returns: + List[DataSample]: Return list of data samples. + """ + outputs = self.llama_tokenizer.batch_decode( + outputs, skip_special_tokens=True) + + if data_samples is None: + data_samples = [DataSample() for _ in range(len(outputs))] + + for output, data_sample in zip(outputs, data_samples): + if self.task == 'caption': + output = output.split('###')[0] + data_sample.pred_caption = output + else: + # raw output + data_sample.pred_output = output + return data_samples + + def forward( + self, + images: torch.Tensor, + data_samples: Optional[list] = None, + mode: str = 'predict', + **kwargs, + ): + """The unified entry for a forward process in both training and test. + The method accepts the following modes: + + - "predict": Forward and return a list of data samples contain the + predict results. + + Args: + images (torch.Tensor): the preprocessed image tensor of shape + ``(N, C, H, W)``. + data_samples (List[DataSample], optional): The annotation data + of every samples. Defaults to None. + mode (str): Return what kind of value. Defaults to 'predict'. + """ + if mode == 'loss': + return self.loss(images, data_samples) + elif mode == 'predict': + return self.predict(images, data_samples, **kwargs) + else: + raise RuntimeError(f'Invalid mode "{mode}".') + + @staticmethod + def _load_llama_proj_hook(module, incompatible_keys): + """Avoid warning missing keys except LLaMA projection keys.""" + proj_patterns = [ + 'vision_encoder.*', + 'ln_vision.*', + 'q_former.*', + 'query_tokens', + 'llama_model.*', + ] + for key in list(incompatible_keys.missing_keys): + if any(re.match(pattern, key) for pattern in proj_patterns): + incompatible_keys.missing_keys.remove(key) diff --git a/mmpretrain/models/multimodal/ofa/__init__.py b/mmpretrain/models/multimodal/ofa/__init__.py new file mode 100644 index 0000000..bcb3f45 --- /dev/null +++ b/mmpretrain/models/multimodal/ofa/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .ofa import OFA +from .ofa_modules import OFADecoder, OFAEncoder, OFAEncoderDecoder + +__all__ = ['OFAEncoderDecoder', 'OFA', 'OFAEncoder', 'OFADecoder'] diff --git a/mmpretrain/models/multimodal/ofa/ofa.py b/mmpretrain/models/multimodal/ofa/ofa.py new file mode 100644 index 0000000..e15787a --- /dev/null +++ b/mmpretrain/models/multimodal/ofa/ofa.py @@ -0,0 +1,320 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import string +from collections import defaultdict +from functools import partial +from typing import Optional, Union + +import mmengine +import torch +from mmengine.model import BaseModel + +from mmpretrain.datasets import CleanCaption +from mmpretrain.registry import MODELS, TOKENIZER +from mmpretrain.structures import DataSample +from .ofa_modules import OFAEncoderDecoder + + +class TreeNode(): + + def __init__(self): + self.child = defaultdict(TreeNode) + + +class Trie: + + def __init__(self, eos): + self.root = TreeNode() + self.eos = eos + + def insert(self, word): + cur = self.root + for c in word: + cur = cur.child[c] + + def get_next_layer(self, word): + cur = self.root + for c in word: + cur = cur.child.get(c) + if cur is None: + return [self.eos] + return list(cur.child.keys()) + + +def apply_constraint( + input_ids: torch.Tensor, + logits: torch.Tensor, + decoder_prompts: Optional[list], + num_beams: int, + constraint_trie: Trie = None, +): + if decoder_prompts is None and constraint_trie is None: + return logits + + mask = logits.new_zeros(logits[:, -1, :].size(), dtype=torch.bool) + input_ids = input_ids.view(-1, num_beams, input_ids.shape[-1]) + for batch_id, beam_sent in enumerate(input_ids): + for beam_id, sent in enumerate(beam_sent): + if decoder_prompts is None: + prompt_len = 0 + else: + prompt_len = len(decoder_prompts[batch_id]) + + if sent.size(0) - 1 < prompt_len: + allowed_tokens = [decoder_prompts[batch_id][sent.size(0) - 1]] + mask[batch_id * num_beams + beam_id, allowed_tokens] = True + elif constraint_trie is not None: + answer_tokens = [0] + sent[prompt_len + 1:].tolist() + allowed_tokens = constraint_trie.get_next_layer(answer_tokens) + mask[batch_id * num_beams + beam_id, allowed_tokens] = True + else: + mask[batch_id * num_beams + beam_id, :] = True + logits[:, -1, :].masked_fill_(~mask, float('-inf')) + return logits + + +@MODELS.register_module() +class OFA(BaseModel): + """The OFA model for multiple tasks. + + Args: + encoder_cfg (dict): The config of the encoder, accept the keyword + arguments of :class:`OFAEncoder`. + decoder_cfg (dict): The config of the decoder, accept the keyword + arguments of :class:`OFADecoder`. + vocab_size (int): The size of the vocabulary. + embedding_dim (int): The embedding dimensions of both the encoder + and the decoder. + tokenizer (dict | PreTrainedTokenizer): The tokenizer to encode + the text. + task (str): The task name, supported tasks are "caption", "vqa" and + "refcoco". + prompt (str, optional): The prompt template for the following tasks, + If None, use default prompt: + + - **caption**: ' what does the image describe?' + - **refcoco**: ' which region does the text " {} " describe?' + + Defaults to None + ans2label (str | Sequence | None): The answer to label mapping for + the vqa task. If a string, it should be a pickle or json file. + The sequence constrains the output answers. Defaults to None, + which means no constraint. + generation_cfg (dict): The extra generation config, accept the keyword + arguments of :class:`~transformers.GenerationConfig`. + Defaults to an empty dict. + data_preprocessor (dict, optional): The config for preprocessing input + data. If None or no specified type, it will use + "MultiModalDataPreprocessor" as type. See :class: + `MultiModalDataPreprocessor` for more details. Defaults to None. + init_cfg (dict, optional): The initialization config. Defaults to None. + """ + support_tasks = {'caption', 'vqa', 'refcoco'} + + def __init__( + self, + encoder_cfg, + decoder_cfg, + vocab_size, + embedding_dim, + tokenizer, + task, + prompt=None, + ans2label: Union[dict, str, None] = None, + generation_cfg=dict(), + data_preprocessor: Optional[dict] = None, + init_cfg=None, + ): + if data_preprocessor is None: + data_preprocessor = {} + if isinstance(data_preprocessor, dict): + data_preprocessor.setdefault('type', 'MultiModalDataPreprocessor') + data_preprocessor = MODELS.build(data_preprocessor) + + super().__init__( + init_cfg=init_cfg, data_preprocessor=data_preprocessor) + + if isinstance(tokenizer, dict): + self.tokenizer = TOKENIZER.build(tokenizer) + else: + self.tokenizer = tokenizer + + if task not in self.support_tasks: + raise ValueError(f'Unsupported task {task}, please select ' + f'the task from {self.support_tasks}.') + + self.prompt = prompt + self.task = task + + if isinstance(ans2label, str): + self.ans2label = mmengine.load(ans2label) + else: + self.ans2label = ans2label + + if self.task == 'vqa' and self.ans2label is not None: + self.constraint_trie = Trie(eos=self.tokenizer.eos_token_id) + answers = [f' {answer}' for answer in self.ans2label] + answer_tokens = self.tokenizer(answers, padding=False) + for answer_token in answer_tokens['input_ids']: + self.constraint_trie.insert(answer_token) + else: + self.constraint_trie = None + + generation_cfg = { + 'num_beams': 5, + 'max_new_tokens': 20, + 'no_repeat_ngram_size': 3, + **generation_cfg, + } + self.model = OFAEncoderDecoder( + encoder_cfg=encoder_cfg, + decoder_cfg=decoder_cfg, + padding_idx=self.tokenizer.pad_token_id, + vocab_size=vocab_size, + embedding_dim=embedding_dim, + generation_cfg=generation_cfg, + ) + + def forward( + self, + images: torch.Tensor, + data_samples: Optional[list] = None, + mode: str = 'predict', + **kwargs, + ): + """The unified entry for a forward process in both training and test. + The method accepts the following modes: + + - "predict": Forward and return a list of data samples contain the + predict results. + + Args: + images (torch.Tensor): the preprocessed image tensor of shape + ``(N, C, H, W)``. + data_samples (List[DataSample], optional): The annotation data + of every samples. Defaults to None. + mode (str): Return what kind of value. Defaults to 'predict'. + """ + if mode == 'predict': + return self.predict(images, data_samples, **kwargs) + else: + raise RuntimeError(f'Invalid mode "{mode}".') + + def predict( + self, + images, + data_samples=None, + post_process=True, + **generation_config, + ): + text_tokens = self.preprocess_text(data_samples, images.size(0), + images.device) + + if 'images_mask' in data_samples[0]: + images_mask = torch.tensor([ + sample.get('images_mask') for sample in data_samples + ]).bool().to(images.device) + else: + images_mask = None + + num_beams = generation_config.get( + 'num_beams', getattr(self.model.generation_config, 'num_beams')) + decoder_prompts = self.get_decoder_prompts(data_samples) + constrain_fn = partial( + apply_constraint, + constraint_trie=self.constraint_trie, + decoder_prompts=decoder_prompts, + num_beams=num_beams, + ) + + outputs = self.model.generate( + input_ids=text_tokens, + images=images, + images_mask=images_mask, + constrain_fn=constrain_fn, + **generation_config, + ) + + if decoder_prompts is not None: + # Remove the prefix decoder prompt. + for prompt_ids, token in zip(decoder_prompts, outputs): + token[1:len(prompt_ids) + 1] = self.tokenizer.pad_token_id + + if post_process: + return self.post_process(outputs, data_samples) + else: + return outputs + + def get_decoder_prompts(self, data_samples): + decoder_prompts = [] + if 'decoder_prompt' not in data_samples[0]: + return None + for sample in data_samples: + prompt = ' ' + sample.get('decoder_prompt') + prompt_ids = self.tokenizer(prompt, add_special_tokens=False) + prompt_ids = prompt_ids['input_ids'] + decoder_prompts.append(prompt_ids) + return decoder_prompts + + def preprocess_text(self, data_samples, batch_size, device): + if self.task == 'caption': + prompt = self.prompt or ' what does the image describe?' + prompts = [prompt] * batch_size + prompts = self.tokenizer(prompts, return_tensors='pt') + return prompts.input_ids.to(device) + elif self.task == 'vqa': + prompts = [] + for sample in data_samples: + assert 'question' in sample + prompt = ' ' + sample.get('question') + prompts.append(prompt) + prompts = self.tokenizer( + prompts, return_tensors='pt', padding=True) + return prompts.input_ids.to(device) + elif self.task == 'refcoco': + prompt_template = self.prompt or \ + ' which region does the text " {} " describe?' + prompts = [] + for sample in data_samples: + assert 'text' in sample + prompt = prompt_template.format(sample.get('text')) + prompts.append(prompt) + prompts = self.tokenizer( + prompts, return_tensors='pt', padding=True) + return prompts.input_ids.to(device) + + def post_process(self, outputs, data_samples): + + out_data_samples = [] + if data_samples is None: + data_samples = [None] * outputs.size(0) + + for data_sample, token in zip(data_samples, outputs): + if data_sample is None: + data_sample = DataSample() + + if self.task == 'caption': + text = self.tokenizer.decode(token, skip_special_tokens=True) + text = CleanCaption( + lowercase=False, + remove_chars=string.punctuation).clean(text) + data_sample.pred_caption = text + elif self.task == 'vqa': + text = self.tokenizer.decode(token, skip_special_tokens=True) + data_sample.pred_answer = text.strip() + elif self.task == 'refcoco': + bbox = token[1:5] - self.tokenizer.bin_offset + # During training, the bbox is normalized by 512. It's related + # to the `max_image_size` config in the official repo. + bbox = bbox / self.tokenizer.num_bins * 512 + scale_factor = data_sample.get('scale_factor', (1, 1)) + bbox[0::2] /= scale_factor[0] + bbox[1::2] /= scale_factor[1] + data_sample.pred_bboxes = bbox.unsqueeze(0) + if 'gt_bboxes' in data_sample: + gt_bboxes = bbox.new_tensor(data_sample.gt_bboxes) + gt_bboxes[:, 0::2] /= scale_factor[0] + gt_bboxes[:, 1::2] /= scale_factor[1] + data_sample.gt_bboxes = gt_bboxes + out_data_samples.append(data_sample) + + return out_data_samples diff --git a/mmpretrain/models/multimodal/ofa/ofa_modules.py b/mmpretrain/models/multimodal/ofa/ofa_modules.py new file mode 100644 index 0000000..ef5c853 --- /dev/null +++ b/mmpretrain/models/multimodal/ofa/ofa_modules.py @@ -0,0 +1,1613 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from dataclasses import dataclass +from functools import partial +from typing import List, Optional, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn.bricks import DropPath +from mmengine.model import BaseModule +from mmengine.utils import digit_version +from transformers.modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, ModelOutput, Seq2SeqLMOutput) +from transformers.modeling_utils import (GenerationConfig, GenerationMixin, + PretrainedConfig) + +from mmpretrain.registry import MODELS +from ...backbones.resnet import Bottleneck, ResNet + +if digit_version(torch.__version__) >= digit_version('1.10.0'): + torch_meshgrid = partial(torch.meshgrid, indexing='ij') +else: + torch_meshgrid = torch.meshgrid + + +def make_token_bucket_position(bucket_size, max_position=1024): + context_pos = torch.arange(max_position, dtype=torch.long)[:, None] + memory_pos = torch.arange(max_position, dtype=torch.long)[None, :] + relative_pos = context_pos - memory_pos + sign = torch.sign(relative_pos) + mid = bucket_size // 2 + abs_pos = torch.where((relative_pos < mid) & (relative_pos > -mid), + mid - 1, torch.abs(relative_pos)) + log_pos = torch.ceil( + torch.log(abs_pos / mid) / math.log( + (max_position - 1) / mid) * (mid - 1)) + mid + log_pos = log_pos.int() + bucket_pos = torch.where(abs_pos.le(mid), relative_pos, + log_pos * sign).long() + return bucket_pos + bucket_size - 1 + + +def make_image_bucket_position(bucket_size, num_relative_distance): + coords_h = torch.arange(bucket_size) + coords_w = torch.arange(bucket_size) + # (2, h, w) + coords = torch.stack(torch_meshgrid([coords_h, coords_w])) + # (2, h*w) + coords_flatten = torch.flatten(coords, 1) + # (2, h*w, h*w) + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] + # (h*w, h*w, 2) + relative_coords = relative_coords.permute(1, 2, 0).contiguous() + relative_coords[:, :, 0] += bucket_size - 1 # shift to start from 0 + relative_coords[:, :, 1] += bucket_size - 1 + relative_coords[:, :, 0] *= 2 * bucket_size - 1 + relative_position_index = torch.zeros( + size=(bucket_size * bucket_size + 1, ) * 2, + dtype=relative_coords.dtype) + # (h*w, h*w) + relative_position_index[1:, 1:] = relative_coords.sum(-1) + relative_position_index[0, 0:] = num_relative_distance - 3 + relative_position_index[0:, 0] = num_relative_distance - 2 + relative_position_index[0, 0] = num_relative_distance - 1 + return relative_position_index + + +def _make_causal_mask(input_ids_shape: torch.Size, + dtype: torch.dtype, + past_key_values_length: int = 0): + """Make causal mask used for uni-directional self-attention.""" + bsz, tgt_len = input_ids_shape + mask = torch.full((tgt_len, tgt_len), float('-inf')) + mask_cond = torch.arange(mask.size(-1)) + mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) + mask = mask.to(dtype) + + if past_key_values_length > 0: + mask = torch.cat( + [torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], + dim=-1) + return mask[None, None, :, :].expand(bsz, 1, tgt_len, + tgt_len + past_key_values_length) + + +def _expand_mask(mask: torch.Tensor, + dtype: torch.dtype, + tgt_len: Optional[int] = None): + """Expands attention_mask from ``[B, L_s]`` to ``[B, 1, L_t, L_s]``. + + Where ``B`` is batch_size, `L_s`` is the source sequence length, and + ``L_t`` is the target sequence length. + """ + bsz, src_len = mask.size() + tgt_len = tgt_len if tgt_len is not None else src_len + + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, + src_len).to(dtype) + return expanded_mask.masked_fill(expanded_mask.bool(), + torch.finfo(dtype).min) + + +class MultiheadAttention(BaseModule): + """Multi-head Attention Module for OFA. + + Args: + embedding_dim (int): The embedding dimension of query. + num_heads (int): Parallel attention heads. + kdim (int, optional): The embedding dimension of key. + Defaults to None, which means the same as the `embedding_dim`. + vdim (int, optional): The embedding dimension of value. + Defaults to None, which means the same as the `embedding_dim`. + attn_drop (float): Dropout rate of the dropout layer after the + attention calculation of query and key. Defaults to 0. + qkv_bias (bool): If True, add a learnable bias to q, k, v. + Defaults to True. + scale_factor (float): The scale of qk will be + ``(head_dim * scale_factor) ** -0.5``. Defaults to 1. + proj_bias (bool) If True, add a learnable bias to output projection. + Defaults to True. + init_cfg (dict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + embedding_dim, + num_heads, + kdim=None, + vdim=None, + attn_drop=0., + scale_factor=1., + qkv_bias=True, + proj_bias=True, + scale_heads=False, + init_cfg=None): + super(MultiheadAttention, self).__init__(init_cfg=init_cfg) + + self.embedding_dim = embedding_dim + self.num_heads = num_heads + self.kdim = kdim or embedding_dim + self.vdim = vdim or embedding_dim + + self.head_dim = embedding_dim // num_heads + self.scale = (self.head_dim * scale_factor)**-0.5 + + self.q_proj = nn.Linear(embedding_dim, embedding_dim, bias=qkv_bias) + self.k_proj = nn.Linear(self.kdim, embedding_dim, bias=qkv_bias) + self.v_proj = nn.Linear(self.vdim, embedding_dim, bias=qkv_bias) + self.out_proj = nn.Linear(embedding_dim, embedding_dim, bias=proj_bias) + + self.attn_drop = nn.Dropout(p=attn_drop) + + if scale_heads: + self.c_attn = nn.Parameter(torch.ones(num_heads)) + else: + self.c_attn = None + + def forward( + self, + query, + key_value=None, + attn_mask=None, + attn_bias=None, + past_key_value=None, + output_attentions=False, + ): + B, _, C = query.shape + assert C == self.head_dim * self.num_heads + + is_cross_attention = key_value is not None + if key_value is None: + key_value = query + + # (B, L, C) -> (B, num_heads, L, head_dims) + q = self.q_proj(query).reshape(B, -1, self.num_heads, + self.head_dim).transpose(1, 2) + + if is_cross_attention and past_key_value is not None: + # Reuse key and value in cross_attentions + k, v = past_key_value + else: + k = self.k_proj(key_value).reshape(B, -1, self.num_heads, + self.head_dim).transpose(1, 2) + v = self.v_proj(key_value).reshape(B, -1, self.num_heads, + self.head_dim).transpose(1, 2) + if past_key_value is not None: + past_key, past_value = past_key_value + k = torch.cat([past_key, k], dim=2) + v = torch.cat([past_value, v], dim=2) + + past_key_value = (k, v) + + attn_weights = q @ k.transpose(-2, -1) * self.scale + + if attn_bias is not None: + src_len = k.size(2) + attn_weights[:, :, -src_len:] += attn_bias[:, :, -src_len:] + + if attn_mask is not None: + attn_weights += attn_mask + attn_weights = torch.softmax(attn_weights, dim=-1) + attn = self.attn_drop(attn_weights) @ v + + if self.c_attn is not None: + attn = torch.einsum('bhlc,h->bhlc', attn, self.c_attn) + + # (B, num_heads, L, head_dims) -> (B, L, C) + attn = attn.transpose(1, 2).reshape(B, -1, self.embedding_dim) + attn = self.out_proj(attn) + + if output_attentions: + return attn, attn_weights, past_key_value + else: + return attn, None, past_key_value + + +@MODELS.register_module(force=True) +class OFAResNet(ResNet): + """ResNet module for OFA. + + The ResNet in OFA has only three stages. + """ + arch_settings = { + 50: (Bottleneck, (3, 4, 6)), + 101: (Bottleneck, (3, 4, 23)), + 152: (Bottleneck, (3, 8, 36)), + } + + def __init__(self, depth, *args, **kwargs): + super().__init__( + depth=depth, + *args, + num_stages=3, + out_indices=(2, ), + dilations=(1, 1, 1), + strides=(1, 2, 2), + **kwargs) + + +@dataclass +class OFAEncoderOutput(ModelOutput): + """OFA encoder outputs. + + Args: + last_hidden_state (torch.tensor): The hidden-states of the output at + the last layer of the model. The shape is (B, L, C). + hidden_states (Tuple[torch.tensor]): The initial embedding and the + output of each layer. The shape of every item is (B, L, C). + attentions (Tuple[torch.tensor]): The attention weights after the + attention softmax, used to compute the weighted average in the + self-attention heads. The shape of every item is + (B, num_heads, L, L). + position_embedding (torch.tensor): The positional embeddings of the + inputs. The shape is (B, L, C). + """ + + last_hidden_state: torch.FloatTensor = None + padding_mask: torch.Tensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + position_embedding: Optional[torch.FloatTensor] = None + + +class OFAEncoderLayer(nn.Module): + """OFAEncoder layer block.""" + + def __init__(self, + embedding_dim, + num_heads, + dropout_rate=0., + drop_path_rate=0., + attn_drop=0., + act_drop=0., + scale_factor=2., + mlp_ratio=4., + scale_heads=True, + normformer=True, + pre_norm=True, + act_cfg=dict(type='GELU')): + super().__init__() + self.embedding_dim = embedding_dim + self.pre_norm = pre_norm + + self.attn = MultiheadAttention( + embedding_dim=embedding_dim, + num_heads=num_heads, + attn_drop=attn_drop, + scale_factor=scale_factor, + scale_heads=scale_heads, + ) + + mid_channels = int(embedding_dim * mlp_ratio) + self.fc1 = nn.Linear(embedding_dim, mid_channels) + self.fc2 = nn.Linear(mid_channels, embedding_dim) + self.act = MODELS.build(act_cfg) + self.act_drop = nn.Dropout( + act_drop) if act_drop > 0. else nn.Identity() + + # LayerNorm between attention block and ffn block. + self.attn_ln = nn.LayerNorm(embedding_dim) + self.ffn_ln = nn.LayerNorm(embedding_dim) + + # Extra LayerNorm + self.normformer = normformer + if self.normformer: + self.attn_mid_ln = nn.LayerNorm(embedding_dim) + self.ffn_mid_ln = nn.LayerNorm(mid_channels) + + self.dropout = nn.Dropout(dropout_rate) + self.drop_path = DropPath( + drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() + + def forward(self, + x, + attention_mask=None, + attn_bias=None, + output_attentions=False): + """Forward the encoder layer. + + Args: + x (torch.tensor): The input to the layer of shape ``(B, L, C)``. + attention_mask (torch.Tensor, optional): The attention mask of size + ``(B, 1, L, L)``, where padding elements are indicated by very + large negative values. Defaults to None. + attn_bias (torch.tensor, optional): The bias for positional + information. Defaults to None. + output_attentions (bool): Whether to return the attentions tensors + of the attention layer. + + Returns: + List[torch.tensor]: The first element is the encoded output of + shape ``(B, L, C)``. And the second element is the output + attentions if ``output_attentions=True``. + """ + residual = x + + # Attention block + if self.pre_norm: + x = self.attn_ln(x) + x, attn_weights, _ = self.attn( + query=x, + attn_mask=attention_mask, + attn_bias=attn_bias, + output_attentions=output_attentions) + if self.normformer: + x = self.attn_mid_ln(x) + x = self.dropout(x) + x = residual + self.drop_path(x) + if not self.pre_norm: + x = self.attn_ln(x) + + residual = x + + # FFN block + if self.pre_norm: + x = self.ffn_ln(x) + x = self.act(self.fc1(x)) + x = self.act_drop(x) + if self.normformer: + x = self.ffn_mid_ln(x) + x = self.fc2(x) + x = self.dropout(x) + x = residual + self.drop_path(x) + if not self.pre_norm: + x = self.ffn_ln(x) + + if output_attentions: + return [x, attn_weights] + else: + return [x] + + +class OFADecoderLayer(nn.Module): + """OFADecoder layer block.""" + + def __init__(self, + embedding_dim, + num_heads, + dropout_rate=0., + drop_path_rate=0., + attn_drop=0., + act_drop=0., + scale_factor=2., + mlp_ratio=4., + encoder_embed_dim=None, + scale_heads=True, + normformer=True, + pre_norm=True, + act_cfg=dict(type='GELU')): + super().__init__() + self.embedding_dim = embedding_dim + self.pre_norm = pre_norm + + self.self_attn = MultiheadAttention( + embedding_dim=embedding_dim, + num_heads=num_heads, + attn_drop=attn_drop, + scale_factor=scale_factor, + scale_heads=scale_heads, + ) + + self.cross_attn = MultiheadAttention( + embedding_dim=embedding_dim, + kdim=encoder_embed_dim, + vdim=encoder_embed_dim, + num_heads=num_heads, + attn_drop=attn_drop, + scale_factor=scale_factor, + scale_heads=scale_heads, + ) + + mid_channels = int(embedding_dim * mlp_ratio) + self.fc1 = nn.Linear(embedding_dim, mid_channels) + self.fc2 = nn.Linear(mid_channels, embedding_dim) + self.act = MODELS.build(act_cfg) + self.act_drop = nn.Dropout( + act_drop) if act_drop > 0. else nn.Identity() + + # LayerNorm between attention block and ffn block. + self.self_attn_ln = nn.LayerNorm(embedding_dim) + self.cross_attn_ln = nn.LayerNorm(embedding_dim) + self.ffn_ln = nn.LayerNorm(embedding_dim) + + # Extra LayerNorm + self.normformer = normformer + if self.normformer: + self.self_attn_mid_ln = nn.LayerNorm(embedding_dim) + self.cross_attn_mid_ln = nn.LayerNorm(embedding_dim) + self.ffn_mid_ln = nn.LayerNorm(mid_channels) + + self.dropout = nn.Dropout(dropout_rate) + self.drop_path = DropPath( + drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() + + def forward( + self, + x, + attention_mask=None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + past_key_value: Optional[List[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, + self_attn_bias: Optional[torch.Tensor] = None, + cross_attn_bias: Optional[torch.Tensor] = None, + ): + """Forward the decoder layer. + + Args: + x (torch.tensor): The input to the layer of shape ``(B, L, C)``. + attention_mask (torch.Tensor, optional): The attention mask of size + ``(B, 1, L, L)``, where padding elements are indicated by very + large negative values. Defaults to None. + encoder_hidden_states (torch.Tensor, optional): The cross attention + input to the layer of size ``(B, L, C)``. Defaults to None. + encoder_attention_mask (torch.Tensor, optional): The cross + attention mask where padding elements are indicated by very + large negative values. Defaults to None. + past_key_value (Tuple[torch.tensor], optional): The cached past key + and value projection states. Defaults to none. + output_attentions (bool): whether to return the attentions tensors + of all attention layers. Defaults to False. + use_cache (bool, optional): Whether to use cache. + Defaults to False. + self_attn_bias (torch.Tensor, optional): The self attention bias + for positional information. Defaults to None. + cross_attn_bias (torch.Tensor, optional): The cross attention bias + for positional information. Defaults to None. + + Returns: + List[torch.tensor]: The first element is the encoded output of + shape ``(B, L, C)``. The following two elements can be the output + self-attentions and cross-attentions if ``output_attentions=True``. + The following one element can be the cached past key and value + projection states. + """ + residual = x + + if past_key_value is not None: + self_past_key_value = past_key_value[:2] + cross_past_key_value = past_key_value[2:] + else: + self_past_key_value, cross_past_key_value = None, None + + # Self-Attention block + if self.pre_norm: + x = self.self_attn_ln(x) + x, self_attn_weights, present_key_value = self.self_attn( + query=x, + past_key_value=self_past_key_value, + attn_mask=attention_mask, + output_attentions=output_attentions, + attn_bias=self_attn_bias, + ) + if self.normformer: + x = self.self_attn_mid_ln(x) + x = self.dropout(x) + x = residual + self.drop_path(x) + if not self.pre_norm: + x = self.self_attn_ln(x) + + # Cross-Attention block + if encoder_hidden_states is not None: + residual = x + if self.pre_norm: + x = self.cross_attn_ln(x) + x, cross_attn_weights, cross_key_value = self.cross_attn.forward( + query=x, + key_value=encoder_hidden_states, + attn_mask=encoder_attention_mask, + past_key_value=cross_past_key_value, + output_attentions=output_attentions, + attn_bias=cross_attn_bias) + if self.normformer: + x = self.cross_attn_mid_ln(x) + x = self.dropout(x) + x = residual + self.drop_path(x) + if not self.pre_norm: + x = self.cross_attn_ln(x) + + present_key_value = present_key_value + cross_key_value + + residual = x + + # FFN block + if self.pre_norm: + x = self.ffn_ln(x) + x = self.act(self.fc1(x)) + x = self.act_drop(x) + if self.normformer: + x = self.ffn_mid_ln(x) + x = self.fc2(x) + x = self.dropout(x) + x = residual + self.drop_path(x) + if not self.pre_norm: + x = self.ffn_ln(x) + + outputs = [x] + + if output_attentions: + outputs.extend([self_attn_weights, cross_attn_weights]) + + if use_cache: + outputs.append(present_key_value) + + return outputs + + +class OFAEncoder(BaseModule): + """The encoder module of OFA. + + Args: + embed_tokens (nn.Embedding): The embedding module to embed the + input tokens. + embed_images (dict | nn.Module): The module to embed the input + images into features. The output number of channels should + be 1024. + num_layers (int): The number of encoder layers. Defaults to 6. + num_heads (int): The number of heads of attention. Defaults to 12. + dropout_rate (float): The prob of dropout for embedding and + transformer layers. Defaults to 0. + drop_path_rate (float): The prob of droppath for transformer layers. + Defaults to 0. + max_source_positions (int): The maximum length of the input tokens. + Defaults to 1024. + token_bucket_size (int): The token bucket size, it's used as the + maximum relative position index in relative position embedding + of input tokens. Defaults to 256. + image_bucket_size (int): The image bucket size, it's used to generate + the image relative position embedding table. It should be larger + than the shape of image feature map. Defaults to 42. + attn_scale_factor (float): The scale factor to calculate qk scale in + attentions. Defaults to 2. + scale_embedding (bool): Whether to scale the embeddings by the square + root of the dimension. Defaults to False. + add_embedding_ln (bool): Whether to add an extra layer norm for token + embeddings. Defaults to True. + add_image_embedding_ln (bool): Whether to add an extra layer norm for + image embeddings. Defaults to True. + pre_norm (bool): Whether to do layer norm before attention and ffn + blocks in transformer layers. Defaults to True. + entangle_position_embedding (bool): Whether to add the position + embedding on the embeddings directly. Defaults to False. + init_cfg (dict, optional): The initialization config. Defaults to None. + """ + + def __init__( + self, + embed_tokens, + embed_images: dict, + num_layers=6, + num_heads=12, + dropout_rate=0., + drop_path_rate=0., + max_source_positions=1024, + token_bucket_size=256, + image_bucket_size=42, + attn_scale_factor=2., + scale_embedding=False, + add_embedding_ln=True, + add_type_embed=True, + add_image_embedding_ln=True, + pre_norm=True, + entangle_position_embedding=False, + init_cfg=None, + ): + super().__init__(init_cfg=init_cfg) + + self.num_layers = num_layers + embedding_dim = embed_tokens.embedding_dim + self.embedding_dim = embedding_dim + self.padding_idx = embed_tokens.padding_idx + self.max_source_positions = max_source_positions + self.num_heads = num_heads + + # Build embedding process components + self.embed_tokens = embed_tokens + self.embedding_scale = math.sqrt( + embedding_dim) if scale_embedding else 1.0 + + if not isinstance(embed_images, nn.Module): + self.embed_images = MODELS.build(embed_images) + else: + self.embed_images = embed_images + self.image_proj = nn.Linear(1024, embedding_dim) + + if add_embedding_ln: + self.embedding_ln = nn.LayerNorm(embedding_dim) + else: + self.embedding_ln = None + + if add_type_embed: + self.embed_type = nn.Embedding(2, embedding_dim) + else: + self.embed_type = None + + if add_image_embedding_ln: + self.image_embedding_ln = nn.LayerNorm(embedding_dim) + else: + self.image_embedding_ln = None + + self.entangle_position_embedding = entangle_position_embedding + + # Build position embedding + self.embed_positions = nn.Embedding(self.max_source_positions + 2, + embedding_dim) + self.pos_ln = nn.LayerNorm(embedding_dim) + self.embed_image_positions = nn.Embedding(image_bucket_size**2 + 1, + embedding_dim) + self.image_pos_ln = nn.LayerNorm(embedding_dim) + + self.pos_scaling = float(embedding_dim / num_heads * + attn_scale_factor)**-0.5 + self.pos_q_linear = nn.Linear(embedding_dim, embedding_dim) + self.pos_k_linear = nn.Linear(embedding_dim, embedding_dim) + + self.dropout = nn.Dropout( + dropout_rate) if dropout_rate > 0. else nn.Identity() + + # Register token relative position embedding table + self.token_bucket_size = token_bucket_size + token_num_rel_dis = 2 * token_bucket_size - 1 + token_rp_bucket = make_token_bucket_position(token_bucket_size, + self.max_source_positions) + self.register_buffer('token_rp_bucket', token_rp_bucket) + self.token_rel_pos_table_list = nn.ModuleList() + + # Register image relative position embedding table + self.image_bucket_size = image_bucket_size + image_num_rel_dis = (2 * image_bucket_size - + 1) * (2 * image_bucket_size - 1) + 3 + image_rp_bucket = make_image_bucket_position(image_bucket_size, + image_num_rel_dis) + self.register_buffer('image_rp_bucket', image_rp_bucket) + self.image_rel_pos_table_list = nn.ModuleList() + + # Build encoder layers + self.layers = nn.ModuleList() + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, num_layers)] + for index in range(self.num_layers): + layer = OFAEncoderLayer( + embedding_dim=embedding_dim, + num_heads=num_heads, + dropout_rate=dropout_rate, + drop_path_rate=dpr[index], + scale_factor=attn_scale_factor, + pre_norm=pre_norm, + ) + self.layers.append(layer) + token_pos_table = nn.Embedding(token_num_rel_dis, self.num_heads) + image_pos_table = nn.Embedding(image_num_rel_dis, self.num_heads) + nn.init.constant_(token_pos_table.weight, 0.) + nn.init.constant_(image_pos_table.weight, 0.) + self.token_rel_pos_table_list.append(token_pos_table) + self.image_rel_pos_table_list.append(image_pos_table) + + if pre_norm: + self.final_ln = nn.LayerNorm(embedding_dim) + else: + self.final_ln = None + + main_input_name = 'input_ids' + + def forward(self, + input_ids, + images, + images_mask, + output_attentions=False, + output_hidden_states=False, + sample_patch_num=None): + padding_mask = input_ids.eq(self.padding_idx) + has_pads = padding_mask.any() + token_embedding = self.embed_tokens(input_ids) + token_embedding = self.embedding_scale * token_embedding + + # Embed the token position + src_pos_idx = torch.arange(input_ids.size(-1), device=input_ids.device) + src_pos_idx = src_pos_idx.expand(*input_ids.shape).contiguous() + pos_embedding = self.embed_positions(src_pos_idx) + + # Embed the input tokens + x = self.process_embedding( + embedding=token_embedding, + type_tokens=input_ids.new_zeros(token_embedding.shape[:2]), + pos_embedding=pos_embedding, + embedding_ln=self.embedding_ln, + ) + pos_embedding = self.pos_ln(pos_embedding) + + # Embed the input images + if images is not None: + (image_tokens, image_padding_mask, image_position_ids, + image_pos_embedding) = self.get_image_tokens( + images, + sample_patch_num, + images_mask, + ) + image_embedding = self.image_proj(image_tokens) + + image_x = self.process_embedding( + embedding=image_embedding, + type_tokens=input_ids.new_ones(image_embedding.shape[:2]), + pos_embedding=image_pos_embedding, + embedding_ln=self.image_embedding_ln, + ) + image_pos_embedding = self.image_pos_ln(image_pos_embedding) + + x = torch.cat([image_x, x], dim=1) + padding_mask = torch.cat([image_padding_mask, padding_mask], dim=1) + pos_embedding = torch.cat([image_pos_embedding, pos_embedding], + dim=1) + + # account for padding while computing the representation + if has_pads: + x = x * (1 - padding_mask.unsqueeze(-1).type_as(x)) + + # Decoupled position embedding + B, L = pos_embedding.shape[:2] + pos_q = self.pos_q_linear(pos_embedding).view( + B, L, self.num_heads, -1).transpose(1, 2) * self.pos_scaling + pos_k = self.pos_k_linear(pos_embedding).view(B, L, self.num_heads, + -1).transpose(1, 2) + abs_pos_bias = torch.matmul(pos_q, pos_k.transpose(2, 3)) + + all_hidden_states = [] if output_hidden_states else None + all_attentions = [] if output_attentions else None + + for idx, layer in enumerate(self.layers): + if output_hidden_states: + all_hidden_states.append(x) + + self_attn_bias = abs_pos_bias.clone() + # Add decoupled position embedding for input tokens. + token_len = input_ids.size(1) + rel_pos_bias = self.get_rel_pos_bias(input_ids, idx) + self_attn_bias[:, :, -token_len:, -token_len:] += rel_pos_bias + + # Add decoupled position embedding for images + if images is not None: + token_len = image_tokens.size(1) + rel_pos_bias = self.get_image_rel_pos_bias( + image_position_ids, idx) + self_attn_bias[:, :, :token_len, :token_len] += rel_pos_bias + + if has_pads: + attention_mask = _expand_mask(padding_mask, dtype=x.dtype) + else: + attention_mask = None + + out = layer( + x, + attention_mask=attention_mask, + attn_bias=self_attn_bias, + output_attentions=output_attentions) + x = out[0] + + if output_attentions: + all_attentions.append(out[1]) + + if output_hidden_states: + all_hidden_states.append(x) + + if self.final_ln is not None: + x = self.final_ln(x) + + return OFAEncoderOutput( + last_hidden_state=x, # (B, L, C) + padding_mask=padding_mask, # (B, L) + position_embedding=pos_embedding, # (B, L, C) + hidden_states=all_hidden_states, # list of (B, L, C) + attentions=all_attentions, # list of (B, num_heads, L, head_dims) + ) + + def get_image_tokens(self, images, sample_patch_num, images_mask): + image_embedding = self.embed_images(images)[-1] + B, C, H, W = image_embedding.shape + num_patches = H * W + + padding_mask = images.new_zeros((B, num_patches)).bool() + position_col = torch.arange(W).unsqueeze(0) + position_row = torch.arange(H).unsqueeze(1) * self.image_bucket_size + position_idx = (position_col + position_row + 1).view(-1) + position_idx = position_idx.to(images.device).expand(B, num_patches) + + # (B, C, H, W) -> (B, C, H*W) -> (B, H*W, C) + image_embedding = image_embedding.flatten(2).transpose(1, 2) + if sample_patch_num is not None: + patch_orders = torch.stack([ + torch.randperm(num_patches)[:sample_patch_num] + for _ in range(B) + ]) + num_patches = sample_patch_num + image_embedding = image_embedding.gather( + dim=1, index=patch_orders.unsqueeze(2).expand(-1, -1, C)) + padding_mask = padding_mask.gather(1, patch_orders) + position_idx = position_idx.gather(1, patch_orders) + + pos_embedding = self.embed_image_positions(position_idx) + padding_mask[~images_mask] = True + return image_embedding, padding_mask, position_idx, pos_embedding + + def process_embedding(self, + embedding, + pos_embedding=None, + type_tokens=None, + embedding_ln=None): + if self.entangle_position_embedding and pos_embedding is not None: + embedding += pos_embedding + if self.embed_type is not None: + embedding += self.embed_type(type_tokens) + if embedding_ln is not None: + embedding = embedding_ln(embedding) + embedding = self.dropout(embedding) + + return embedding + + def get_rel_pos_bias(self, x, idx): + seq_len = x.size(1) + rp_bucket = self.token_rp_bucket[:seq_len, :seq_len] + values = F.embedding(rp_bucket, + self.token_rel_pos_table_list[idx].weight) + values = values.unsqueeze(0).expand(x.size(0), -1, -1, -1) + values = values.permute([0, 3, 1, 2]) + return values.contiguous() + + def get_image_rel_pos_bias(self, image_position_ids, idx): + bsz, seq_len = image_position_ids.shape + rp_bucket_size = self.image_rp_bucket.size(1) + + rp_bucket = self.image_rp_bucket.unsqueeze(0).expand( + bsz, rp_bucket_size, rp_bucket_size).gather( + 1, image_position_ids[:, :, None].expand( + bsz, seq_len, rp_bucket_size)).gather( + 2, image_position_ids[:, None, :].expand( + bsz, seq_len, seq_len)) + values = F.embedding(rp_bucket, + self.image_rel_pos_table_list[idx].weight) + values = values.permute(0, 3, 1, 2) + return values + + +class OFADecoder(BaseModule): + """The decoder module of OFA. + + Args: + embed_tokens (nn.Embedding): The embedding module to embed the + input tokens. + num_layers (int): The number of decoder layers. Defaults to 6. + num_heads (int): The number of heads of attention. Defaults to 12. + dropout_rate (float): The prob of dropout for embedding and + transformer layers. Defaults to 0. + drop_path_rate (float): The prob of droppath for transformer layers. + Defaults to 0. + max_target_positions (int): The maximum length of the input tokens. + Defaults to 1024. + code_image_size (int): The resolution of the generated image in the + image infilling task. Defaults to 128. + token_bucket_size (int): The token bucket size, it's used as the + maximum relative position index in relative position embedding + of input tokens. Defaults to 256. + image_bucket_size (int): The image bucket size, it's used to generate + the image relative position embedding table. It should be larger + than the shape of image feature map. Defaults to 42. + attn_scale_factor (float): The scale factor to calculate qk scale in + attentions. Defaults to 2. + scale_embedding (bool): Whether to scale the embeddings by the square + root of the dimension. Defaults to False. + add_embedding_ln (bool): Whether to add an extra layer norm for token + embeddings. Defaults to True. + add_code_embedding_ln (bool): Whether to add an extra layer norm for + code embeddings. Defaults to True. + pre_norm (bool): Whether to do layer norm before attention and ffn + blocks in transformer layers. Defaults to True. + entangle_position_embedding (bool): Whether to add the position + embedding on the embeddings directly. Defaults to False. + share_input_output_embed (bool): Share the weights of the input token + embedding module and the output projection module. + Defaults to True. + init_cfg (dict, optional): The initialization config. Defaults to None. + """ + + def __init__( + self, + embed_tokens, + num_layers=6, + num_heads=12, + dropout_rate=0., + drop_layer_rate=0., + drop_path_rate=0., + max_target_positions=1024, + code_image_size=128, + token_bucket_size=256, + image_bucket_size=42, + attn_scale_factor=2., + scale_embedding=False, + add_embedding_ln=True, + add_code_embedding_ln=True, + pre_norm=True, + entangle_position_embedding=False, + share_input_output_embed=True, + init_cfg=None, + ): + super().__init__(init_cfg=init_cfg) + self._future_mask = torch.empty(0) + + self.num_layers = num_layers + embedding_dim = embed_tokens.embedding_dim + self.embedding_dim = embedding_dim + self.padding_idx = embed_tokens.padding_idx + self.max_target_positions = max_target_positions + self.num_heads = num_heads + + # Build embedding process components + self.embed_tokens = embed_tokens + self.embedding_scale = math.sqrt( + embedding_dim) if scale_embedding else 1.0 + + if add_embedding_ln: + self.embedding_ln = nn.LayerNorm(embedding_dim) + else: + self.embedding_ln = None + + if add_code_embedding_ln: + self.code_embedding_ln = nn.LayerNorm(embedding_dim) + else: + self.code_embedding_ln = None + + # Build position embedding + self.embed_positions = nn.Embedding(self.max_target_positions + 2, + embedding_dim) + self.pos_ln = nn.LayerNorm(embedding_dim) + self.embed_image_positions = nn.Embedding(image_bucket_size**2 + 1, + embedding_dim) + self.image_pos_ln = nn.LayerNorm(embedding_dim) + + self.pos_scaling = float(embedding_dim / num_heads * + attn_scale_factor)**-0.5 + self.self_pos_q_linear = nn.Linear(embedding_dim, embedding_dim) + self.self_pos_k_linear = nn.Linear(embedding_dim, embedding_dim) + self.cross_pos_q_linear = nn.Linear(embedding_dim, embedding_dim) + self.cross_pos_k_linear = nn.Linear(embedding_dim, embedding_dim) + + self.entangle_position_embedding = entangle_position_embedding + + self.dropout = nn.Dropout( + dropout_rate) if dropout_rate > 0. else nn.Identity() + if drop_layer_rate > 0.: + raise NotImplementedError + + # Register token relative position embedding table + self.token_bucket_size = token_bucket_size + token_num_rel_dis = 2 * token_bucket_size - 1 + token_rp_bucket = make_token_bucket_position(token_bucket_size) + self.register_buffer('token_rp_bucket', token_rp_bucket) + self.token_rel_pos_table_list = nn.ModuleList() + + # Register image relative position embedding table + self.image_bucket_size = image_bucket_size + image_num_rel_dis = (2 * image_bucket_size - + 1) * (2 * image_bucket_size - 1) + 3 + image_rp_bucket = make_image_bucket_position(image_bucket_size, + image_num_rel_dis) + self.register_buffer('image_rp_bucket', image_rp_bucket) + self.image_rel_pos_table_list = nn.ModuleList() + + self.window_size = code_image_size // 8 + + position_col = torch.arange(self.window_size).unsqueeze(0) + position_row = torch.arange( + self.window_size).unsqueeze(1) * self.image_bucket_size + image_position_idx = (position_col + position_row + 1) + image_position_idx = torch.cat( + [torch.tensor([0]), image_position_idx.view(-1)]) + image_position_idx = torch.cat( + [image_position_idx, + torch.tensor([1024] * 768)]) + self.register_buffer('image_position_idx', image_position_idx) + + # Build decoder layers + self.layers = nn.ModuleList() + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, num_layers)] + for index in range(self.num_layers): + layer = OFADecoderLayer( + embedding_dim=embedding_dim, + num_heads=num_heads, + dropout_rate=dropout_rate, + drop_path_rate=dpr[index], + scale_factor=attn_scale_factor, + pre_norm=pre_norm, + ) + self.layers.append(layer) + token_pos_table = nn.Embedding(token_num_rel_dis, self.num_heads) + image_pos_table = nn.Embedding(image_num_rel_dis, self.num_heads) + nn.init.constant_(token_pos_table.weight, 0.) + nn.init.constant_(image_pos_table.weight, 0.) + self.token_rel_pos_table_list.append(token_pos_table) + self.image_rel_pos_table_list.append(image_pos_table) + + if pre_norm: + self.final_ln = nn.LayerNorm(embedding_dim) + else: + self.final_ln = None + + # Build output projection + if share_input_output_embed: + self.output_projection = nn.Linear( + self.embed_tokens.weight.shape[1], + self.embed_tokens.weight.shape[0], + bias=False, + ) + self.output_projection.weight = self.embed_tokens.weight + else: + vocab_size = self.embed_tokens.num_embeddings + self.output_projection = nn.Linear( + embedding_dim, vocab_size, bias=False) + nn.init.normal_( + self.output_projection.weight, + mean=0, + std=embedding_dim**-0.5, + ) + + main_input_name = 'input_ids' + + def forward( + self, + input_ids: torch.Tensor = None, + attention_mask: torch.Tensor = None, + encoder_hidden_states: torch.Tensor = None, + encoder_attention_mask: torch.Tensor = None, + code_masks: Optional[torch.Tensor] = None, + encoder_pos_embedding: Optional[torch.Tensor] = None, + past_key_values: Optional[torch.Tensor] = None, + use_cache: bool = False, + output_attentions: bool = False, + output_hidden_states: bool = False, + ): + + if past_key_values is not None and len(past_key_values) > 0: + B, _, L_past, _ = past_key_values[0][0].shape + L = L_past + 1 + else: + B, L = input_ids.shape + L_past = 0 + + # Embed the token position + target_pos_idx = torch.arange( + L, device=input_ids.device).expand([B, L]).contiguous() + pos_embedding = self.embed_positions(target_pos_idx) + + # Embed the code positions + if code_masks is not None and torch.any(code_masks): + image_position_idx = self.image_position_idx[:input_ids.size(1)] + image_position_idx = image_position_idx.unsqueeze(0).expand(B, L) + pos_embedding[code_masks] = self.embed_image_positions( + image_position_idx)[code_masks] + + # Self-attention position bias (B, num_heads, L_t, L_t) + self_abs_pos_bias = self.get_pos_info(self.pos_ln(pos_embedding)) + if code_masks is not None and torch.any(code_masks): + self_image_abs_pos_bias = self.get_pos_info( + self.image_pos_ln(pos_embedding)) + self_abs_pos_bias[code_masks] = self_image_abs_pos_bias[code_masks] + + # Cross-attention position bias (B, num_heads, L_t, L_s) + cross_abs_pos_bias = self.get_pos_info( + self.pos_ln(pos_embedding), encoder_pos_embedding) + if code_masks is not None and torch.any(code_masks): + cross_image_abs_pos_bias = self.get_pos_info( + self.image_pos_ln(pos_embedding), encoder_pos_embedding) + cross_abs_pos_bias[code_masks] = cross_image_abs_pos_bias[ + code_masks] + + all_prev_output_tokens = input_ids.clone() + if past_key_values is not None and len(past_key_values) > 0: + input_ids = input_ids[:, -1:] + cross_abs_pos_bias = cross_abs_pos_bias[:, :, -1:, :] + pos_embedding = pos_embedding[:, -1:, :] + + # Embed the input tokens + x = self.embed_tokens(input_ids) * self.embedding_scale + + if self.entangle_position_embedding: + x += pos_embedding + + if self.embedding_ln is not None: + if (code_masks is None or not code_masks.any() + or self.code_embedding_ln is None): + x = self.embedding_ln(x) + elif code_masks is not None and code_masks.all(): + x = self.code_embedding_ln(x) + else: + x[~code_masks] = self.embedding_ln(x[~code_masks]) + x[code_masks] = self.code_embedding_ln(x[code_masks]) + + x = self.dropout(x) + + attention_mask = self._prepare_decoder_attention_mask( + attention_mask, input_ids.shape, x.dtype, L_past) + attention_mask = attention_mask.to(x.device) + + # decoder layers + all_hidden_states = [] if output_hidden_states else None + all_self_attns = [] if output_attentions else None + all_cross_attentions = [] if ( + output_attentions and encoder_hidden_states is not None) else None + next_decoder_cache = [] if use_cache else None + + for idx, layer in enumerate(self.layers): + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states.append(x) + + if past_key_values is not None and len(past_key_values) > 0: + past_key_value = past_key_values[idx] + else: + past_key_value = None + + self_attn_bias = self_abs_pos_bias.clone() + if code_masks is None or not code_masks.any(): + self_attn_bias += self.get_rel_pos_bias( + all_prev_output_tokens, idx) + elif code_masks is not None and code_masks.all(): + self_attn_bias += self.get_image_rel_pos_bias( + all_prev_output_tokens, idx) + else: + self_attn_bias[~code_masks] += self.get_rel_pos_bias( + all_prev_output_tokens, idx) + self_attn_bias[code_masks] += self.get_image_rel_pos_bias( + all_prev_output_tokens, idx) + + if past_key_value is not None: + self_attn_bias = self_attn_bias[:, :, -1:, :] + + out = layer( + x, + attention_mask=attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + self_attn_bias=self_attn_bias, + cross_attn_bias=cross_abs_pos_bias, + ) + x = out.pop(0) + + if output_attentions: + all_self_attns.append(out.pop(0)) + if encoder_hidden_states is not None: + all_cross_attentions.append(out.pop(0)) + + if use_cache: + next_decoder_cache.append(out.pop(0)) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (x, ) + + if self.final_ln is not None: + x = self.final_ln(x) + + x = self.output_projection(x) + + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=x, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + cross_attentions=all_cross_attentions, + ) + + def _prepare_decoder_attention_mask( + self, + attention_mask, + input_shape, + dtype, + past_key_values_length, + ): + r""" + Create causal mask for unidirectional decoding. + [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + """ + combined_attention_mask = None + if input_shape[-1] > 1: + combined_attention_mask = _make_causal_mask( + input_shape, + dtype, + past_key_values_length=past_key_values_length).to( + attention_mask.device) + + if attention_mask is not None: + # (B, L_s) -> (B, 1, L_t, L_s) + expanded_attention_mask = _expand_mask( + attention_mask, dtype, tgt_len=input_shape[-1]) + combined_attention_mask = ( + expanded_attention_mask if combined_attention_mask is None else + expanded_attention_mask + combined_attention_mask) + + return combined_attention_mask + + def get_pos_info(self, pos_embedding, src_pos_embedding=None): + B, tgt_len = pos_embedding.shape[:2] + if src_pos_embedding is not None: + src_len = src_pos_embedding.size(1) + pos_q = self.cross_pos_q_linear(pos_embedding).view( + B, tgt_len, self.num_heads, -1).transpose(1, 2) + pos_q = pos_q * self.pos_scaling + pos_k = self.cross_pos_k_linear(src_pos_embedding).view( + B, src_len, self.num_heads, -1).transpose(1, 2) + else: + pos_q = self.self_pos_q_linear(pos_embedding).view( + B, tgt_len, self.num_heads, -1).transpose(1, 2) + pos_q = pos_q * self.pos_scaling + pos_k = self.self_pos_k_linear(pos_embedding).view( + B, tgt_len, self.num_heads, -1).transpose(1, 2) + + abs_pos_bias = torch.matmul(pos_q, pos_k.transpose(2, 3)) + + return abs_pos_bias + + def get_rel_pos_bias(self, x, idx): + seq_len = x.size(1) + rp_bucket = self.token_rp_bucket[:seq_len, :seq_len] + values = F.embedding(rp_bucket, + self.token_rel_pos_table_list[idx].weight) + values = values.unsqueeze(0).expand(x.size(0), -1, -1, -1) + values = values.permute([0, 3, 1, 2]) + return values.contiguous() + + def get_image_rel_pos_bias(self, image_position_ids, idx): + bsz, seq_len = image_position_ids.shape + rp_bucket_size = self.image_rp_bucket.size(1) + + rp_bucket = self.image_rp_bucket.unsqueeze(0).expand( + bsz, rp_bucket_size, rp_bucket_size).gather( + 1, image_position_ids[:, :, None].expand( + bsz, seq_len, rp_bucket_size)).gather( + 2, image_position_ids[:, None, :].expand( + bsz, seq_len, seq_len)) + values = F.embedding(rp_bucket, + self.image_rel_pos_table_list[idx].weight) + values = values.permute(0, 3, 1, 2) + return values + + +class OFAEncoderDecoder(BaseModule, GenerationMixin): + """The OFA main architecture with an encoder and a decoder. + + Args: + encoder_cfg (dict): The config of the encoder, accept the keyword + arguments of :class:`OFAEncoder`. + decoder_cfg (dict): The config of the decoder, accept the keyword + arguments of :class:`OFADecoder`. + padding_idx (int): The index of the padding token. + vocab_size (int): The size of the vocabulary. + embedding_dim (int): The embedding dimensions of both the encoder + and the decoder. + generation_cfg (dict): The extra generation config, accept the keyword + arguments of :class:`~transformers.GenerationConfig`. + Defaults to an empty dict. + init_cfg (dict, optional): The initialization config. Defaults to None. + """ + base_model_prefix = '' + + def __init__( + self, + encoder_cfg, + decoder_cfg, + padding_idx, + vocab_size, + embedding_dim, + generation_cfg=dict(), + init_cfg=None, + ): + super().__init__(init_cfg=init_cfg) + + self.padding_idx = padding_idx + self.vocab_size = vocab_size + self.embedding_dim = embedding_dim + embed_tokens = nn.Embedding(vocab_size, embedding_dim, padding_idx) + + self.encoder = OFAEncoder(embed_tokens, **encoder_cfg) + self.decoder = OFADecoder(embed_tokens, **decoder_cfg) + + self.config = PretrainedConfig( + vocab_size=vocab_size, + embedding_dim=embedding_dim, + padding_idx=padding_idx, + bos_token_id=0, + decoder_start_token_id=0, + pad_token_id=1, + eos_token_id=2, + forced_eos_token_id=2, + use_cache=False, + is_encoder_decoder=True, + ) + self.config.update(generation_cfg) + + self.generation_config = GenerationConfig.from_model_config( + self.config) + + @property + def device(self): + return next(self.parameters()).device + + def can_generate(self): + return True + + def get_encoder(self): + return self.encoder + + def get_decoder(self): + return self.decoder + + def max_decoder_positions(self): + """Maximum length supported by the decoder.""" + return self.decoder.max_positions() + + def get_normalized_probs(self, net_output, log_probs: bool, sample=None): + """Get normalized probabilities (or log probs) from a net's output.""" + return self.get_normalized_probs_scriptable(net_output, log_probs, + sample) + + def get_normalized_probs_scriptable( + self, + net_output, + log_probs: bool, + sample=None, + ): + """Scriptable helper function for get_normalized_probs in. + + ~BaseFairseqModel. + """ + if hasattr(self, 'decoder'): + return self.decoder.get_normalized_probs(net_output, log_probs, + sample) + elif torch.is_tensor(net_output): + # syntactic sugar for simple models which don't have a decoder + # (e.g., the classification tutorial) + logits = net_output.float() + if log_probs: + return F.log_softmax(logits, dim=-1) + else: + return F.softmax(logits, dim=-1) + raise NotImplementedError + + main_input_name = 'input_ids' + + def forward(self, + input_ids=None, + images=None, + images_mask=None, + sample_patch_num=None, + decoder_input_ids=None, + code_masks=None, + attention_mask=None, + encoder_outputs=None, + past_key_values=None, + use_cache=False, + output_attentions=False, + output_hidden_states=False, + constrain_fn=None, + return_dict=False): + """Forword the module. + + Args: + input_ids (torch.Tensor): The indices of the input tokens in the + vocabulary, and padding will be ignored by default. The indices + can be obtained using :class:`OFATokenizer`. + The shape is (B, L). + images (torch.Tensor): The input images. The shape is (B, 3, H, W). + images_mask (torch.Tensor): The mask of all available images. The + shape is (B, ). + sample_patch_num (int): The number of patches to sample for the + images. Defaults to None, which means to use all patches. + decoder_input_ids (torch.Tensor): The indices of the input tokens + for the decoder. + code_masks (torch.Tensor): The mask of all samples for image + generation. The shape is (B, ). + attention_mask (torch.Tensor): The attention mask for decoding. + The shape is (B, L). + encoder_outputs (OFAEncoderOutput): The encoder outputs with hidden + states, positional embeddings, and padding masks. + past_key_values (Tuple[Tuple[torch.Tensor]]): If use cache, the + parameter is a tuple of length ``num_layers``. Every item is + also a tuple with four tensors, two for the key and value of + self-attention, two for the key and value of cross-attention. + use_cache (bool): Whether to use cache for faster inference. + Defaults to False. + output_attentions (bool): Whether to output attention weights. + Defaults to False. + output_hidden_states (bool): Whether to output hidden states. + Defaults to False. + constrain_fn (Callable, optional): The function to constrain the + output logits. Defaults to None. + return_dict (bool): Not used, it's only for compat with the + interface of the ``generate`` of ``transformers``. + + Returns: + Seq2SeqLMOutput: + + - logits (``torch.Tensor``): The last decoder hidden states. + The shape is (B, L, C). + - past_key_values (``Tuple[Tuple[torch.Tensor]]``): The past keys + and values for faster inference. + - decoder_hidden_states (``Tuple[torch.Tensor]``): the decoder + hidden states of all layers. + - decoder_attentions (``Tuple[torch.Tensor]``): The self-attention + weights of all layers in the decoder. + - cross_attentions (``Tuple[torch.Tensor]``): The cross-attention + weights of all layers in the decoder. + - encoder_last_hidden_state (``torch.Tensor``): The last encoder + hidden states. + - encoder_hidden_states (``Tuple[torch.Tensor]``): The encoder + hidden states of all layers, including the embeddings. + - encoder_attentions (``Tuple[torch.Tensor]``): The self-attention + weights of all layers in the encoder. + """ + + if encoder_outputs is None: + encoder_outputs = self.encoder( + input_ids=input_ids, + images=images, + images_mask=images_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + sample_patch_num=sample_patch_num, + ) + + if decoder_input_ids.eq(self.padding_idx).any(): + attention_mask = decoder_input_ids.eq(self.padding_idx) + + encoder_hidden_states = encoder_outputs.last_hidden_state + encoder_attention_mask = _expand_mask(encoder_outputs.padding_mask, + encoder_hidden_states.dtype, + decoder_input_ids.shape[-1]) + src_pos_embed = encoder_outputs.position_embedding + + decoder_outputs = self.decoder( + input_ids=decoder_input_ids, + attention_mask=attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + code_masks=code_masks, + encoder_pos_embedding=src_pos_embed, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + ) + + # The constrain operation for fine-tuned model in OFA is applied + # before log_softmax, therefore we cannot use + # `prefix_allowed_tokens_fn` to implement it. + if constrain_fn is not None: + logits = constrain_fn(decoder_input_ids, + decoder_outputs.last_hidden_state) + else: + logits = decoder_outputs.last_hidden_state + + return Seq2SeqLMOutput( + logits=logits, + past_key_values=decoder_outputs.past_key_values, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + ) + + def prepare_inputs_for_generation(self, + decoder_input_ids=None, + past=None, + attention_mask=None, + code_masks=None, + use_cache=False, + encoder_outputs=None, + constrain_fn=None, + **kwargs): + # if attention_mask is None: + attention_mask = decoder_input_ids.new_zeros(decoder_input_ids.shape) + + # cut decoder_input_ids if past is used + if past is not None: + decoder_input_ids = decoder_input_ids[:, -1:] + + return { + 'input_ids': None, + 'images': None, + 'images_mask': None, + 'sample_patch_num': None, + 'attention_mask': attention_mask, + 'encoder_outputs': encoder_outputs, + 'past_key_values': past, + 'decoder_input_ids': decoder_input_ids, + 'code_masks': code_masks, + 'use_cache': use_cache, + 'constrain_fn': constrain_fn, + } + + def _prepare_encoder_decoder_kwargs_for_generation( + self, + inputs_tensor: torch.Tensor, + model_kwargs, + model_input_name: Optional[str] = None): + # 1. get encoder + encoder = self.get_encoder() + + # 2. prepare encoder args and encoder kwargs from model kwargs + irrelevant_prefix = [ + 'decoder_', 'cross_attn', 'use_cache', 'attention_mask', + 'constrain_fn' + ] + encoder_kwargs = { + argument: value + for argument, value in model_kwargs.items() + if not any(argument.startswith(p) for p in irrelevant_prefix) + } + + if encoder_kwargs.get('images_mask') is None: + encoder_kwargs['images_mask'] = torch.tensor([True] * + inputs_tensor.size(0)) + + # 3. make sure that encoder returns `ModelOutput` + model_input_name = model_input_name or self.main_input_name + encoder_kwargs[model_input_name] = inputs_tensor + model_kwargs['encoder_outputs']: ModelOutput = encoder( + **encoder_kwargs) + model_kwargs['attention_mask'] = None + + return model_kwargs + + @staticmethod + def _reorder_cache(past, beam_idx): + reordered_past = () + for layer_past in past: + reordered_past += (tuple( + past_state.index_select(0, beam_idx) + for past_state in layer_past), ) + return reordered_past + + @staticmethod + def _expand_inputs_for_generation( + input_ids: torch.LongTensor, + expand_size: int = 1, + is_encoder_decoder: bool = False, + attention_mask: Optional[torch.LongTensor] = None, + encoder_outputs: Optional[ModelOutput] = None, + **model_kwargs, + ): + expanded_return_idx = ( + torch.arange(input_ids.shape[0]).view(-1, 1).repeat( + 1, expand_size).view(-1).to(input_ids.device)) + input_ids = input_ids.index_select(0, expanded_return_idx) + + if attention_mask is not None: + model_kwargs['attention_mask'] = attention_mask.index_select( + 0, expanded_return_idx) + + if is_encoder_decoder: + if encoder_outputs is None: + raise ValueError('If `is_encoder_decoder` is True, make ' + 'sure that `encoder_outputs` is defined.') + encoder_outputs['last_hidden_state'] = encoder_outputs.\ + last_hidden_state.index_select(0, expanded_return_idx) + encoder_outputs['position_embedding'] = encoder_outputs.\ + position_embedding.index_select(0, expanded_return_idx) + encoder_outputs['padding_mask'] = encoder_outputs.\ + padding_mask.index_select(0, expanded_return_idx) + model_kwargs['encoder_outputs'] = encoder_outputs + return input_ids, model_kwargs diff --git a/mmpretrain/models/multimodal/otter/__init__.py b/mmpretrain/models/multimodal/otter/__init__.py new file mode 100644 index 0000000..38a45a3 --- /dev/null +++ b/mmpretrain/models/multimodal/otter/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .otter import Otter + +__all__ = ['Otter'] diff --git a/mmpretrain/models/multimodal/otter/otter.py b/mmpretrain/models/multimodal/otter/otter.py new file mode 100644 index 0000000..7d30b50 --- /dev/null +++ b/mmpretrain/models/multimodal/otter/otter.py @@ -0,0 +1,143 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional + +import torch + +from mmpretrain.registry import MODELS, TOKENIZER +from mmpretrain.structures import DataSample +from ..flamingo.flamingo import ExtendModule, Flamingo, PerceiverResampler + + +@MODELS.register_module() +class Otter(Flamingo): + """The Otter model for multiple tasks. + + Args: + vision_encoder (dict): The config of the vision encoder. + lang_encoder (dict): The config of the language encoder. + tokenizer (dict): The tokenizer to encode the text. + task (int): The task to perform prediction. + zeroshot_prompt (str): Prompt used for zero-shot inference. + Defaults to an. + shot_prompt_tmpl (str): Prompt used for few-shot inference. + Defaults to ``User:Please describe the image. + GPT:{caption}<|endofchunk|>``. + final_prompt_tmpl (str): Final part of prompt used for inference. + Defaults to 'User:Please describe the image. GPT:'. + generation_cfg (dict): The extra generation config, accept the keyword + arguments of [~`transformers.GenerationConfig`]. + Defaults to an empty dict. + data_preprocessor (Optional[dict]): The config for preprocessing input + data. If None or no specified type, it will use + "MutimodalDataPreprocessor" as type. + See :class:`MutimodalDataPreprocessor` for more details. + Defaults to None. + init_cfg (dict, optional): The initialization config. Defaults to None. + """ + + support_tasks = {'caption', 'vqa'} + _no_split_modules = [ + 'TransformerEncoderLayer', 'PerceiverAttention', + 'GatedCrossAttentionBlock', 'FlamingoLayer' + ] + + def __init__( + self, + vision_encoder: dict, + lang_encoder: dict, + tokenizer: dict, + task: str = 'caption', + zeroshot_prompt: str = '', + shot_prompt_tmpl: str = ('User:Please describe the image. ' + 'GPT:{caption}<|endofchunk|>'), + final_prompt_tmpl: str = ('User:Please describe the image. ' + 'GPT:'), + generation_cfg: dict = dict(), + data_preprocessor: Optional[dict] = None, + init_cfg: Optional[dict] = None): + if data_preprocessor is None: + data_preprocessor = {} + if isinstance(data_preprocessor, dict): + data_preprocessor.setdefault('type', 'MultiModalDataPreprocessor') + data_preprocessor = MODELS.build(data_preprocessor) + + super(Flamingo, self).__init__( + init_cfg=init_cfg, data_preprocessor=data_preprocessor) + + if task not in self.support_tasks: + raise ValueError(f'Unsupported task {task}, please select ' + f'the task from {self.support_tasks}.') + self.task = task + + # init tokenizer + self.tokenizer = TOKENIZER.build(tokenizer) + # add Otter special tokens to the tokenizer + self.tokenizer.add_special_tokens({ + 'additional_special_tokens': + ['<|endofchunk|>', '', ''] + }) + self.tokenizer.bos_token_id = 1 + if self.tokenizer.pad_token is None: + # Issue: GPT models don't have a pad token, which we use to + # modify labels for the loss. + self.tokenizer.add_special_tokens({'pad_token': ''}) + + # Template to format the prompt input + self.zeroshot_prompt = zeroshot_prompt + self.shot_prompt_tmpl = shot_prompt_tmpl + self.final_prompt_tmpl = final_prompt_tmpl + + # init vision encoder related modules + vision_encoder_weight = vision_encoder.pop('pretrained', None) + self.vision_encoder = MODELS.build(vision_encoder) + if vision_encoder_weight is not None: + from mmengine.runner.checkpoint import load_checkpoint + load_checkpoint( + self.vision_encoder, + vision_encoder_weight, + map_location='cpu', + revise_keys=[(r'^backbone\.', '')], + ) + self.vision_encoder.is_init = True + + self.perceiver = PerceiverResampler(dim=self.vision_encoder.embed_dims) + + # init language encoder related modules + self.lang_encoder = ExtendModule(**lang_encoder) + self.lang_encoder.resize_token_embeddings(len(self.tokenizer)) + self.lang_encoder.media_token_id = self.tokenizer.encode('')[-1] + + # other necessary parameters + self.eoc_token_id = self.tokenizer.encode('<|endofchunk|>')[-1] + self.generation_cfg = generation_cfg + + if hasattr(self, 'register_load_state_dict_post_hook'): + self.register_load_state_dict_post_hook(self._load_adapter_hook) + + def post_process( + self, outputs: torch.Tensor, + data_samples: Optional[List[DataSample]]) -> List[DataSample]: + """Perform post process for outputs for different task. + + Args: + outputs (torch.Tensor): The generated outputs. + data_samples (List[DataSample], optional): The annotation + data of every samples. + + Returns: + List[DataSample]: Return list of data samples. + """ + outputs = self.tokenizer.batch_decode( + outputs, skip_special_tokens=True) + + if data_samples is None: + data_samples = [DataSample() for _ in range(len(outputs))] + + for output, data_sample in zip(outputs, data_samples): + # remove text pattern + if self.task == 'caption': + data_sample.pred_caption = output + elif self.task == 'vqa': + data_sample.pred_answer = output + + return data_samples diff --git a/mmpretrain/models/multimodal/ram/__init__.py b/mmpretrain/models/multimodal/ram/__init__.py new file mode 100644 index 0000000..35619d8 --- /dev/null +++ b/mmpretrain/models/multimodal/ram/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .ram import RAM, RAMNormal, RAMOpenset + +__all__ = ['RAM', 'RAMNormal', 'RAMOpenset'] diff --git a/mmpretrain/models/multimodal/ram/bert.py b/mmpretrain/models/multimodal/ram/bert.py new file mode 100644 index 0000000..f54b2ce --- /dev/null +++ b/mmpretrain/models/multimodal/ram/bert.py @@ -0,0 +1,1197 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Modify from: +# https://github.com/xinyu1205/recognize-anything/blob/main/ram/models/bert.py + +import math +from typing import Tuple + +import torch +import torch.utils.checkpoint +from torch import Tensor, device, nn +from torch.nn import CrossEntropyLoss +from transformers.activations import ACT2FN +from transformers.modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPoolingAndCrossAttentions, + CausalLMOutputWithCrossAttentions) +from transformers.modeling_utils import (PreTrainedModel, + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + prune_linear_layer) +from transformers.models.bert.configuration_bert import BertConfig +from transformers.utils import logging + +logger = logging.get_logger(__name__) + + +class BertEmbeddings_nopos(nn.Module): + """Construct the embeddings from word and position embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding( + config.vocab_size, + config.hidden_size, + padding_idx=config.pad_token_id) + # self.position_embeddings = nn.Embedding( + # config.max_position_embeddings, config.hidden_size) + '''self.LayerNorm is not snake-cased to stick with + TensorFlow model variable name and be able to load''' + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm( + config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + # position_ids (1, len position emb) is contiguous + # in memory and exported when serialized + # self.register_buffer("position_ids", + # torch.arange(config.max_position_embeddings).expand((1, -1))) + # self.position_embedding_type = \ + # getattr(config, "position_embedding_type", "absolute") + + self.config = config + + def forward(self, + input_ids=None, + position_ids=None, + inputs_embeds=None, + past_key_values_length=0): + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] # noqa: F841 + + # if position_ids is None: + # position_ids = self.position_ids[:, \ + # past_key_values_length : seq_length + \ + # past_key_values_length] + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + + embeddings = inputs_embeds + + # if self.position_embedding_type == "absolute": + # position_embeddings = self.position_embeddings(position_ids) + # # print('add position_embeddings!!!!') + # embeddings += position_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class BertEmbeddings(nn.Module): + """Construct the embeddings from word and position embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding( + config.vocab_size, + config.hidden_size, + padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, + config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with + # TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm( + config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + # position_ids (1, len position emb) is contiguous + # in memory and exported when serialized + self.register_buffer( + 'position_ids', + torch.arange(config.max_position_embeddings).expand((1, -1))) + self.position_embedding_type = getattr(config, + 'position_embedding_type', + 'absolute') + + self.config = config + + def forward(self, + input_ids=None, + position_ids=None, + inputs_embeds=None, + past_key_values_length=0): + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + if position_ids is None: + position_ids = self.position_ids[:, past_key_values_length: + seq_length + + past_key_values_length] + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + + embeddings = inputs_embeds + + if self.position_embedding_type == 'absolute': + position_embeddings = self.position_embeddings(position_ids) + # print('add position_embeddings!!!!') + embeddings += position_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class BertSelfAttention(nn.Module): + + def __init__(self, config, is_cross_attention): + super().__init__() + self.config = config + if config.hidden_size % config.num_attention_heads != 0 and \ + not hasattr(config, 'embedding_size'): + raise ValueError('''The hidden size (%d) is not a multiple of + the number of attention heads (%d)''' % + (config.hidden_size, config.num_attention_heads)) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / + config.num_attention_heads) + self.all_head_size = self.num_attention_heads * \ + self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + if is_cross_attention: + self.key = nn.Linear(config.encoder_width, self.all_head_size) + self.value = nn.Linear(config.encoder_width, self.all_head_size) + else: + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = getattr(config, + 'position_embedding_type', + 'absolute') + if (self.position_embedding_type == 'relative_key' + or self.position_embedding_type == 'relative_key_query'): + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding( + 2 * config.max_position_embeddings - 1, + self.attention_head_size) + self.save_attention = False + + def save_attn_gradients(self, attn_gradients): + self.attn_gradients = attn_gradients + + def get_attn_gradients(self): + return self.attn_gradients + + def save_attention_map(self, attention_map): + self.attention_map = attention_map + + def get_attention_map(self): + return self.attention_map + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, + self.attention_head_size) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + mixed_query_layer = self.query(hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention: + # print(self.key.weight.shape) + key_layer = self.transpose_for_scores( + self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores( + self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + past_key_value = (key_layer, value_layer) + + # compatible with higher versions of transformers + if key_layer.shape[0] > query_layer.shape[0]: + key_layer = key_layer[:query_layer.shape[0], :, :, :] + attention_mask = attention_mask[:query_layer.shape[0], :, :] + value_layer = value_layer[:query_layer.shape[0], :, :, :] + + # Take the dot product between "query" and "key" + # to get the raw attention scores. + attention_scores = torch.matmul(query_layer, + key_layer.transpose(-1, -2)) + + if (self.position_embedding_type == 'relative_key' + or self.position_embedding_type == 'relative_key_query'): + seq_length = hidden_states.size()[1] + position_ids_l = torch.arange( + seq_length, dtype=torch.long, + device=hidden_states.device).view(-1, 1) + position_ids_r = torch.arange( + seq_length, dtype=torch.long, + device=hidden_states.device).view(1, -1) + distance = position_ids_l - position_ids_r + positional_embedding = self.distance_embedding( + distance + self.max_position_embeddings - 1) + positional_embedding = positional_embedding.to( + dtype=query_layer.dtype) # fp16 compatibility + + if self.position_embedding_type == 'relative_key': + relative_position_scores = torch.einsum( + 'bhld,lrd->bhlr', query_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == 'relative_key_query': + relative_position_scores_query = torch.einsum( + 'bhld,lrd->bhlr', query_layer, positional_embedding) + relative_position_scores_key = torch.einsum( + 'bhrd,lrd->bhlr', key_layer, positional_embedding) + attention_scores = attention_scores + \ + relative_position_scores_query + \ + relative_position_scores_key + + attention_scores = attention_scores / math.sqrt( + self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for + # all layers in BertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.Softmax(dim=-1)(attention_scores) + + if is_cross_attention and self.save_attention: + self.save_attention_map(attention_probs) + attention_probs.register_hook(self.save_attn_gradients) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs_dropped = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs_dropped = attention_probs_dropped * head_mask + + context_layer = torch.matmul(attention_probs_dropped, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + ( + self.all_head_size, ) + context_layer = context_layer.view(*new_context_layer_shape) + + outputs = (context_layer, + attention_probs) if output_attentions else (context_layer, ) + + outputs = outputs + (past_key_value, ) + return outputs + + +class BertSelfOutput(nn.Module): + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm( + config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertAttention(nn.Module): + + def __init__(self, config, is_cross_attention=False): + super().__init__() + self.self = BertSelfAttention(config, is_cross_attention) + self.output = BertSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, + self.self.attention_head_size, self.pruned_heads) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len( + heads) + self.self.all_head_size = self.self.attention_head_size * \ + self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output, + ) + self_outputs[1:] # add attentions if we output them + return outputs + + +class BertIntermediate(nn.Module): + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class BertOutput(nn.Module): + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm( + config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertLayer(nn.Module): + + def __init__(self, config, layer_num): + super().__init__() + self.config = config + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = BertAttention(config) + self.layer_num = layer_num + if self.config.add_cross_attention: + self.crossattention = BertAttention( + config, is_cross_attention=self.config.add_cross_attention) + self.intermediate = BertIntermediate(config) + self.output = BertOutput(config) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + mode=None, + ): + + if mode == 'tagging': + + assert encoder_hidden_states is not None, \ + '''encoder_hidden_states must be given + for cross-attention layers''' + + cross_attention_outputs = self.crossattention( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + output_attentions=output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = cross_attention_outputs[ + 1:-1] # add cross attentions if we output attention weights + + present_key_value = cross_attention_outputs[-1] + + else: + # decoder uni-directional self-attention + # cached key/values tuple is at positions 1,2 + self_attn_past_key_value = \ + (past_key_value[:2] + if past_key_value is not None else None) + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + + if mode == 'multimodal': + assert encoder_hidden_states is not None, \ + '''encoder_hidden_states must be + given for cross-attention layers''' + + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + output_attentions=output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[ + 1: + -1] # add cross attentions if we output attention weights + layer_output = apply_chunking_to_forward(self.feed_forward_chunk, + self.chunk_size_feed_forward, + self.seq_len_dim, + attention_output) + outputs = (layer_output, ) + outputs + + outputs = outputs + (present_key_value, ) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +class BertEncoder(nn.Module): + + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList( + [BertLayer(config, i) for i in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=False, + output_hidden_states=False, + return_dict=True, + mode='multimodal', + ): + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = ( + ) if output_attentions and self.config.add_cross_attention else None + + next_decoder_cache = () if use_cache else None + + for i in range(self.config.num_hidden_layers): + layer_module = self.layer[i] + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states, ) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[ + i] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + if use_cache: + logger.warn('''`use_cache=True` is incompatible with + gradient checkpointing. Setting `use_cache=False`...''' + ) + use_cache = False + + def create_custom_forward(module): + + def custom_forward(*inputs): + return module(*inputs, past_key_value, + output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + mode=mode, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + mode=mode, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1], ) + if output_attentions: + all_self_attentions = all_self_attentions + ( + layer_outputs[1], ) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states, ) + + if not return_dict: + return tuple(v for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] if v is not None) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +class BertPooler(nn.Module): + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states): + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class BertPredictionHeadTransform(nn.Module): + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + if isinstance(config.hidden_act, str): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + self.LayerNorm = nn.LayerNorm( + config.hidden_size, eps=config.layer_norm_eps) + + def forward(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +class BertLMPredictionHead(nn.Module): + + def __init__(self, config): + super().__init__() + self.transform = BertPredictionHeadTransform(config) + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear( + config.hidden_size, config.vocab_size, bias=False) + + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + + # Need a link between the two variables so that + # the bias is correctly resized with `resize_token_embeddings` + self.decoder.bias = self.bias + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + return hidden_states + + +class BertOnlyMLMHead(nn.Module): + + def __init__(self, config): + super().__init__() + self.predictions = BertLMPredictionHead(config) + + def forward(self, sequence_output): + prediction_scores = self.predictions(sequence_output) + return prediction_scores + + +class BertPreTrainedModel(PreTrainedModel): + """An abstract class to handle weights initialization and a simple + interface for downloading and loading pretrained models.""" + + config_class = BertConfig + base_model_prefix = 'bert' + _keys_to_ignore_on_load_missing = [r'position_ids'] + + def _init_weights(self, module): + """Initialize the weights.""" + if isinstance(module, (nn.Linear, nn.Embedding)): + # Slightly different from the TF version + # which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_( + mean=0.0, std=self.config.initializer_range) + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + if isinstance(module, nn.Linear) and module.bias is not None: + module.bias.data.zero_() + + +class BertModel(BertPreTrainedModel): + """The model can behave as an encoder (with only self-attention) as well as + a decoder, in which case a layer of cross-attention is added between the + self-attention layers, following the architecture described in `Attention + is all you need `__ by Ashish Vaswani, + Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. + + Gomez, Lukasz Kaiser and Illia Polosukhin. argument and + :obj:`add_cross_attention` set to :obj:`True`; an + :obj:`encoder_hidden_states` is then expected as an input to the forward + pass. + """ + + def __init__(self, config, add_pooling_layer=True): + super().__init__(config) + self.config = config + + self.embeddings = BertEmbeddings(config) + + self.encoder = BertEncoder(config) + + self.pooler = BertPooler(config) if add_pooling_layer else None + + self.init_weights() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """Prunes heads of the model. + + heads_to_prune: + dict of {layer_num: list of heads to prune in this layer} + See base class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + def get_extended_attention_mask(self, attention_mask: Tensor, + input_shape: Tuple[int], device: device, + is_decoder: bool) -> Tensor: + """Makes broadcastable attention and causal masks so that future and + masked tokens are ignored. + + Arguments: + attention_mask (:obj:`torch.Tensor`): + Mask with ones indicating tokens to attend to, + zeros for tokens to ignore. + input_shape (:obj:`Tuple[int]`): + The shape of the input to the model. + device: (:obj:`torch.device`): + The device of the input to the model. + + Returns: + :obj:`torch.Tensor` The extended attention mask, + with a the same dtype as :obj:`attention_mask.dtype`. + """ + # We can provide a self-attention mask of dimensions + # [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it + # broadcastable to all heads. + if attention_mask.dim() == 3: + extended_attention_mask = attention_mask[:, None, :, :] + elif attention_mask.dim() == 2: + # Provided a padding mask of dimensions [batch_size, seq_length] + # - if the model is a decoder, apply a causal mask + # in addition to the padding mask + # - if the model is an encoder, make the mask + # broadcastable to [batch_size, num_heads, seq_length, seq_length] + if is_decoder: + batch_size, seq_length = input_shape + + seq_ids = torch.arange(seq_length, device=device) + causal_mask = seq_ids[None, None, :].repeat( + batch_size, seq_length, 1) <= seq_ids[None, :, None] + # in case past_key_values are used we need to + # add a prefix ones mask to the causal mask + # causal and attention masks must have same type + # with pytorch version < 1.3 + causal_mask = causal_mask.to(attention_mask.dtype) + + if causal_mask.shape[1] < attention_mask.shape[1]: + prefix_seq_len = attention_mask.shape[ + 1] - causal_mask.shape[1] + causal_mask = torch.cat( + [ + torch.ones( + (batch_size, seq_length, prefix_seq_len), + device=device, + dtype=causal_mask.dtype), + causal_mask, + ], + axis=-1, + ) + + extended_attention_mask = ( + causal_mask[:None, :, :] * + attention_mask[:, None, None, :]) + else: + extended_attention_mask = attention_mask[:, None, None, :] + else: + raise ValueError( + '''Wrong shape for input_ids (shape {}) or attention_mask + (shape {})'''.format(input_shape, attention_mask.shape)) + + # Since attention_mask is 1.0 + # for positions we want to attend and 0.0 + # for masked positions, this operation will + # create a tensor which is 0.0 for positions + # we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores + # before the softmax, this is effectively + # the same as removing these entirely. + extended_attention_mask = extended_attention_mask.to( + dtype=self.dtype) # fp16 compatibility + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + return extended_attention_mask + + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + is_decoder=False, + mode='multimodal', + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj: + `(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer + of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj: + `(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token + indices of the encoder input. This mask is used in + the cross-attention if the model is configured as + a decoder. Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length : + obj:`config.n_layers` with each tuple having 4 tensors of shape : + obj:`(batch_size, num_heads, sequence_length - 1, + embed_size_per_head)`): + Contains precomputed key and value hidden states of the + attention blocks. Can be used to speed up decoding. + If :obj:`past_key_values` are used, the user can optionally + input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to + this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj: + `(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value + states are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + """ + output_attentions = ( + output_attentions if output_attentions is not None else + self.config.output_attentions) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else + self.config.output_hidden_states) + return_dict = ( + return_dict + if return_dict is not None else self.config.use_return_dict) + + if is_decoder: + use_cache = ( + use_cache if use_cache is not None else self.config.use_cache) + else: + use_cache = False + + if input_ids is not None and inputs_embeds is not None: + raise ValueError('''You cannot specify both + input_ids and inputs_embeds at the same time''') + elif input_ids is not None: + input_shape = input_ids.size() + batch_size, seq_length = input_shape + device = input_ids.device + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + batch_size, seq_length = input_shape + device = inputs_embeds.device + elif encoder_embeds is not None: + input_shape = encoder_embeds.size()[:-1] + batch_size, seq_length = input_shape + device = encoder_embeds.device + else: + raise ValueError('''You have to specify either + input_ids or inputs_embeds or encoder_embeds''') + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[ + 2] if past_key_values is not None else 0 + + if attention_mask is None: + attention_mask = torch.ones( + ((batch_size, seq_length + past_key_values_length)), + device=device) + + # We can provide a self-attention mask of dimensions + # [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to + # make it broadcastable to all heads. + extended_attention_mask: torch.Tensor = \ + (self.get_extended_attention_mask( + attention_mask, input_shape, device, is_decoder)) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to + # [batch_size, num_heads, seq_length, seq_length] + if encoder_hidden_states is not None: + if type(encoder_hidden_states) == list: + encoder_batch_size, encoder_sequence_length, _ = \ + (encoder_hidden_states[0].size()) + else: + encoder_batch_size, encoder_sequence_length, _ = \ + (encoder_hidden_states.size()) + encoder_hidden_shape = (encoder_batch_size, + encoder_sequence_length) + + if type(encoder_attention_mask) == list: + encoder_extended_attention_mask = [ + self.invert_attention_mask(mask) + for mask in encoder_attention_mask + ] + elif encoder_attention_mask is None: + encoder_attention_mask = torch.ones( + encoder_hidden_shape, device=device) + encoder_extended_attention_mask = self.invert_attention_mask( + encoder_attention_mask) + else: + encoder_extended_attention_mask = self.invert_attention_mask( + encoder_attention_mask) + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape + # [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape + # [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, + self.config.num_hidden_layers) + + if encoder_embeds is None: + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + inputs_embeds=inputs_embeds, + past_key_values_length=past_key_values_length, + ) + else: + embedding_output = encoder_embeds + + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + mode=mode, + ) + sequence_output = encoder_outputs[0] + pooled_output = self.pooler( + sequence_output) if self.pooler is not None else None + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + +class BertLMHeadModel(BertPreTrainedModel): + + _keys_to_ignore_on_load_unexpected = [r'pooler'] + _keys_to_ignore_on_load_missing = [ + r'position_ids', r'predictions.decoder.bias' + ] + + def __init__(self, config): + super().__init__(config) + + self.bert = BertModel(config, add_pooling_layer=False) + self.cls = BertOnlyMLMHead(config) + + self.init_weights() + + def get_output_embeddings(self): + return self.cls.predictions.decoder + + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + labels=None, + past_key_values=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + return_logits=False, + is_decoder=True, + reduction='mean', + mode='multimodal', + ): + r""" + encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj: + `(batch_size, sequence_length, hidden_size)`, `optional`): + Sequence of hidden-states at the output of the last layer + of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj: + `(batch_size, sequence_length)`, `optional`): + Mask to avoid performing attention on the padding token + indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. + Mask values selected in ``[0, 1]``: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + labels (:obj:`torch.LongTensor` of shape :obj: + `(batch_size, sequence_length)`, `optional`): + Labels for computing the left-to-right + language modeling loss (next word prediction). + Indices should be in + ``[-100, 0, ..., config.vocab_size]`` + (see ``input_ids`` docstring) Tokens with indices set to + ``-100`` are ignored (masked), the loss is only computed + for the tokens with labels n ``[0, ..., config.vocab_size]`` + past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length + :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj: + `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention + blocks. Can be used to speed up decoding. + If :obj:`past_key_values` are used, the user can optionally + input only the last :obj:`decoder_input_ids` + (those that don't have their past key value states given to + this model) of shape :obj:`(batch_size, 1)` + instead of all :obj:`decoder_input_ids` of shape :obj: + `(batch_size, sequence_length)`. + use_cache (:obj:`bool`, `optional`): + If set to :obj:`True`, :obj:`past_key_values` key value states + are returned and can be used to speed up + decoding (see :obj:`past_key_values`). + Returns: + Example:: + >>> from transformers import (BertTokenizer, + BertLMHeadModel, BertConfig) + >>> import torch + >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased') + >>> config = BertConfig.from_pretrained("bert-base-cased") + >>> model = BertLMHeadModel.from_pretrained( + 'bert-base-cased', config=config) + >>> inputs = tokenizer("Hello, my dog is cute", + return_tensors="pt") + >>> outputs = model(**inputs) + >>> prediction_logits = outputs.logits + """ + return_dict = ( + return_dict + if return_dict is not None else self.config.use_return_dict) + if labels is not None: + use_cache = False + + outputs = self.bert( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + is_decoder=is_decoder, + mode=mode, + ) + + sequence_output = outputs[0] + prediction_scores = self.cls(sequence_output) + # sequence_output.shape torch.Size([85, 30, 768]) + # prediction_scores.shape torch.Size([85, 30, 30524]) + # labels.shape torch.Size([85, 30]) + + if return_logits: + return prediction_scores[:, :-1, :].contiguous() + + lm_loss = None + if labels is not None: + # we are doing next-token prediction; shift + # prediction scores and input ids by one + shifted_prediction_scores = prediction_scores[:, : + -1, :].contiguous() + labels = labels[:, 1:].contiguous() + loss_fct = CrossEntropyLoss( + reduction=reduction, label_smoothing=0.1) + lm_loss = loss_fct( + shifted_prediction_scores.view(-1, self.config.vocab_size), + labels.view(-1)) + if reduction == 'none': + lm_loss = lm_loss.view(prediction_scores.size(0), -1).sum(1) + + if not return_dict: + output = (prediction_scores, ) + outputs[2:] + return ((lm_loss, ) + output) if lm_loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=lm_loss, + logits=prediction_scores, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def prepare_inputs_for_generation(self, + input_ids, + past=None, + attention_mask=None, + **model_kwargs): + input_shape = input_ids.shape + # if model is used as a decoder in encoder-decoder model, + # the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_shape) + + # cut decoder_input_ids if past is used + if past is not None: + input_ids = input_ids[:, -1:] + + return { + 'input_ids': + input_ids, + 'attention_mask': + attention_mask, + 'past_key_values': + past, + 'encoder_hidden_states': + model_kwargs.get('encoder_hidden_states', None), + 'encoder_attention_mask': + model_kwargs.get('encoder_attention_mask', None), + 'is_decoder': + True, + } + + def _reorder_cache(self, past, beam_idx): + reordered_past = () + for layer_past in past: + reordered_past += (tuple( + past_state.index_select(0, beam_idx) + for past_state in layer_past), ) + return reordered_past diff --git a/mmpretrain/models/multimodal/ram/config/__init__.py b/mmpretrain/models/multimodal/ram/config/__init__.py new file mode 100644 index 0000000..ef101fe --- /dev/null +++ b/mmpretrain/models/multimodal/ram/config/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/mmpretrain/models/multimodal/ram/config/ram_swin_large_14m.py b/mmpretrain/models/multimodal/ram/config/ram_swin_large_14m.py new file mode 100644 index 0000000..e4b8865 --- /dev/null +++ b/mmpretrain/models/multimodal/ram/config/ram_swin_large_14m.py @@ -0,0 +1,93 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# data settings +test_transforms_cfg = [ + dict(type='Resize', scale=(384, 384), interpolation='bicubic'), + dict( + type='mmpretrain.PackInputs', + algorithm_keys=['text'], + meta_keys=['image_id', 'scale_factor'], + ), +] + + +def get_ram_cfg(mode='normal'): + assert mode in ['normal', 'openset'], 'mode must "normal" or "openset"' + model_type = 'RAMNormal' if mode == 'normal' else 'RAMOpenset' + model_cfg = dict( + type=model_type, + tokenizer=dict( + type='BertTokenizer', + name_or_path='/public/DATA/qbw/ckpt/bert-base-uncased', + use_fast=False), + vision_backbone=dict( + type='SwinTransformer', + arch='large', + img_size=384, + window_size=12, + ), + tag_encoder={ + 'architectures': ['BertModel'], + 'attention_probs_dropout_prob': 0.1, + 'hidden_act': 'gelu', + 'hidden_dropout_prob': 0.1, + 'hidden_size': 768, + 'initializer_range': 0.02, + 'intermediate_size': 3072, + 'layer_norm_eps': 1e-12, + 'max_position_embeddings': 512, + 'model_type': 'bert', + 'num_attention_heads': 12, + 'num_hidden_layers': 12, + 'pad_token_id': 0, + 'type_vocab_size': 2, + 'vocab_size': 30524, + 'encoder_width': 512, + 'add_cross_attention': True + }, + text_decoder={ + 'architectures': ['BertModel'], + 'attention_probs_dropout_prob': 0.1, + 'hidden_act': 'gelu', + 'hidden_dropout_prob': 0.1, + 'hidden_size': 768, + 'initializer_range': 0.02, + 'intermediate_size': 3072, + 'layer_norm_eps': 1e-12, + 'max_position_embeddings': 512, + 'model_type': 'bert', + 'num_attention_heads': 12, + 'num_hidden_layers': 12, + 'pad_token_id': 0, + 'type_vocab_size': 2, + 'vocab_size': 30524, + 'encoder_width': 768, + 'add_cross_attention': True + }, + tagging_head={ + 'architectures': ['BertModel'], + 'attention_probs_dropout_prob': 0.1, + 'hidden_act': 'gelu', + 'hidden_dropout_prob': 0.1, + 'hidden_size': 768, + 'initializer_range': 0.02, + 'intermediate_size': 3072, + 'layer_norm_eps': 1e-12, + 'max_position_embeddings': 512, + 'model_type': 'bert', + 'num_attention_heads': 4, + 'num_hidden_layers': 2, + 'pad_token_id': 0, + 'type_vocab_size': 2, + 'vocab_size': 30522, + 'encoder_width': 512, + 'add_cross_attention': True, + 'add_tag_cross_attention': False + }, + data_preprocessor=dict( + type='MultiModalDataPreprocessor', + mean=[0.48145466 * 255, 0.4578275 * 255, 0.40821073 * 255], + std=[0.26862954 * 255, 0.26130258 * 255, 0.27577711 * 255], + to_rgb=False, + ), + ) + return model_cfg diff --git a/mmpretrain/models/multimodal/ram/data/ram_tag_list.pickle b/mmpretrain/models/multimodal/ram/data/ram_tag_list.pickle new file mode 100644 index 0000000000000000000000000000000000000000..0519d1ee759eacdad99df2811ff59432369e1599 GIT binary patch literal 51099 zcmZX-%W~wolBXv-aleqv%u{E2#rDxJ>>HB_;e~@QXSw=YU$Hn0Q9PSPW;{WRZKmYgt``_ii|MSm({^RaH|9$bV z|F$@;_p9}Bvw!;Yk0~in+vVxBKK=Q}aCyIcxB!d*wuk-t&p#%+!+v=>ua6&p{xP^; zZ?}Y_<->Bd-T_UP4-f0p>EPEf@Mg8%pDX!rTx||}3N0TFN5Y%s!|`Lgd04zIH|FUr zANV`uZ?eq_SUhgFyVP~wylu`l%7?=dM3<}O>v>tZd&||zDp$|z)8=ixI4@tpb+}yp zyqu_Yuv}fX!DICXLYE^S$^5oHo;RoUE;x>s`}2CgU)tFEc(~s@(C^9V^|<_4JS;)D zzkEC%{`_OMd^&C(s4lhimKz$rTW*evhr@og5uG*Iefx)$PG1IK=O4F#569)> znVRo6)cu*TJFM1QDMseU&3a4iDY)H8eL*2w7y|CZYw)^V`b{JPF5Wl$)#2T7A?0wS zo)6SM6nd+wo|YAE*I>d2rWDWG z!o|i`dQ`KAk01qk5Y~XlUK0Mo(SH}B52Wjdke*NXY zUoxKvjvMFejVXc<^hB94@%8Fbyyz%jbMyl&cGQ|>*e8L!GlRuF6X1zl|IyeIGCncK zwu|-e56ibUY$HE~Wv0mHb-i6a!AmBlZ_aC5n_uBDmK0&s>;;*!XlZs0_5*_AQ^%)E zHyo}`s@pABo9&jWDYd&_x~|4RAm(H@@vxEmQOC|nLzTt12^RNDhs9bx;Gp}g>LdKm zaXlW_#YvAx8AFem5W<}4==0`2j4u>9FP@fL<_)E&>AYYFoMKbq6H7#9P?7V69pTSE z{>d+I(BN=ffIAHP*vMx24Mkpx3cyYm`g7hqw%tMoUc?hPEw+b8qP?X$B z&2n*G?=s&e%M%lM`Q*kn2Y!ZuESNH(6&v5~zM3hJ;x}%Gq<$=l3sc?c@UXdJ3C4&0 z@{(Q#KEP;f|NP7X49bBv4-)>&?kb9BMr_L80~o9wli~7Qa}wa32|Rn;JUzn%7C&9b zCbLne6#Z%&B^A9vl=ah-%%5x{R!DJ+{t^n?WD$C52$eJMhs(?HAgcv9hjGmT4hspJ z?fWLt-I}Sh7x78ra)1toi!u``pYZW;a07T)9+^p?>J+%M)Q;VhWxQKDkl((h)Mj_H zJ2?1n>*pF_QM&9{uGS5(cs$hX6Gh|O_0p9wYtdrKU97mp(8af86adzH`Pq-<8$(7r zrwd%TNjRhB+kr*SG#o77mu@Gw2^S9x3Cx==%qxLSHT!O#ori3HS^jR_zn0(vaKF4i zguwtf;S3h{hcg_MZX^a>j+>>NqQ0_;ve$H9*IV|pjNc@=9}fpwDfq;W)RmIZPOd(Z z84Gl5usR-K!{mK<**albGJ9v2<0D-gkRu}4{W2U2tO==%T2@S+l@bpi0>RmQ->&e2 z>Gt9r?AklL>@MJ7lgs}nt^R8rT=OQ^Vif|}!d#)BC{Ka_C7GTo@4^8P2-Su;Zq zUG^`M4@AfZ{I-l74DI$W`-7t;;+faB!q!KX5G2q7#t5P9!UsZAAx#s9UoaK2zru1^ z@JxxCLW;m*L9;Bfb2vRM9q-o6P z*gRyWhl*k5h!km@8EbR<#o?Vg#bUo$9o*&2c8pjp!1gSfR(6bZH)i3jAQOGJluWjb zd(DIlcbgQoM?#`&yeQV9&#qL~O^br-s>FJ=&wX~Yp_wCu&6n1-t@%xuN-J1IGCGAS2uSKiN4C{&c&a^n47EhN= zI67lQj6I89GKi-WQzFxlzZ5ttnHelPNggT@LxxVz0nhMIdOkkAZ+7pISCa*;j}A1@ zy$meR&#wH%7rhoLw8SWHjn}lKns0QZ#djOzr5ekYa@x2w=jQ`kBGM6ZG@m~Ubh;bE zcaD13e-D>19(8~ox4kt<5uiyN5vnk}l8?DHMwG$e>( zWIW57K)M)b91IxjTLbWU>d+XOkR8b!9xnIQi>QYrIX}!nAg&kW#|z_cvlnd& z^8ywpjU1I8^|g2JR!^m?rq5D(wRk*cqSH7VuhT?mmCcMASFa7#e7x%7G_g9NDK55X zna5 z@V_`yDMkExR({%ZRo(7WF-WMlqr&|09+0MHhO*kPU>lM%p2;Vot11B! zBtL}s(5a1w;Yx-9Ned*4Li^p9_oL?T!}4>koC66MSg&h@OxVlhgk2j!fgG3&t^j^$Dw7WOb8^8*C5g zhh!X*l0h&hs?t;~e6eamhiPSV;Ije}6u7LBLQACuU1C2SjRH?EfRN&{Ze&R^WC!Qo z>n4~KS>~CE3)a@j-cBo8+w&xm_vM!E^??}(laQ0wY76E)cXMa>idogct{k-@#`lN& z$88D@0UxY^idI36AobfB;mc;*rli{)mbfR-q!Yk?MQ`PybdrsB1AfA6B}f4^*Ao8{^L zfFjJ7-xp6zMYpG`oIx5k&9>7VewR-3i|cN6T(YSk*g=!u(=sgyuxUjaX7(gnXA*P* zWCl@sM7L8$P0{?G5WZq4x0)rySQA}%NG!XQpIzWdaK4(LcsWm~z84~r9H|5?ix5nb zGlI#)q?gAC6eHAf){z?yT*XNNW^4LYHE&|*hZqkp?3b*A^b={$64XQb`#~M%=Dnl` z7z3;*ZN;KIcq-{)DnZw$UAhYHWo-;lYZHF**yc80)8{@w7*Ml>(nIW9WI6{6dUkkV zsk%+iPX!??Y0i6qLxORi%Mcdu*;qVdt^p0;>-?pa(M=?xpi(T%suTBdY0qA`^)1aLSJfqd^9UL!b%;oWN0jG{5gnJ zU*oxehda!7*96RGQ7*q5u@YurC%g62vau=<#%xi^9LYd3#J;q)i+I;R!mARm9UU|{r630d zF%=-M*2gAGUG`;sxwsa|%R+!3Lkc^mKwcc1-j!Z5&=rNZyLziGW(dZ za7|3X?1oQAW~ALBCS4wuJ5GSJYto;cyA+sVF=steLI4Br-bq93a6txZ$f-D)HPaUi zcCRG^(2?D1F*c!7wcT1IGEPF| zZhCH=tDFu)HBdLwBnLXHxX}A)J}cczHG#t`w6G*VEixl#n9xP-{ksvwBNobaC$-qy zlCB7RM1qd&KqIle8X$SpXuZB(ICtN(@*vCW>EbL9()QD!I=Rm5x#cgSAA{M>ejzIf zv6{^VKbL8~An{T&7#|Uf%Cs@l{0{XwdtnkLR~f7e%ppO*_ngY4Y`3UEOl6uWKwn(L z%ntODCaMZI)^~G-Yie2HYV)M3U0}wOK{|?`y?Q!eQ>|0>LYRcIiP*;Gp52JT!uyW% z2yfnG)k)sGMF_Nh#rTFKbI`BMq%3!a1Xvbv+HOj3o<;TIJh7yXW;~m)t$K`WtDe4h z=l{(HX+7+knMVRFS=XXHay?5PKAJ0RL&9NUr7k8+AXzYiQIPtrOG9dmakyBkM6?i- z^uuXkS28xp)~n8g)JS7+LdZ#f*@D?F$OJWj!}6%Ez@|IgAwcOF!s2LFY_Mz+ zhjm2|WHm}G{?_EDzB5*IColB@vVCmoBs`zWHX_K`g}MRAoR^60ZS;T-7mvN@3@o~; zE-jB02{y0Lpz+Cbsaw{-8P3sx2Covz#i+C*rhOn8JXD~T3vK`?YjSsJB*iGkVG{Ab z{OjLA>0keTJuDbJAV2Y8^swHPP(5o9M{AjI*zJ)w-81-7a6Dcn6^oP0P`LOwdXFm( z`R{mcJ3C)X_@2O9-_~`wLuJlL&dqZ^Zj6(eD8z8KB}xp^LW9r+982n9uYw9=fjaG? z8jn{u5P=*>wqYBSgxJR*NYdzG((p#{z;%e#pacdoMwS@~u>{(KQajsSQOQMR2`$t~ ziHc2+3Dr2|OK1+K#^dV4Hr8xa3Y#`<%@Ieeq{7w-DFl$Tf}WKSErvmR826T7&nxm@i1LsXjGtjS$caT~@$+wIgmNJ23UW5cE zb&JPo!+t2ML15(7z4AMaqx&d!P*~L^~5Jai%JyLb*l+57qHc zV^Y%Y0evlNl41=mDTG$Lcuj$?390dKUviMZL^!;tfPKlh+qgu(eP)EK!rURm`G|^C zyTvoUBcT0~1FHSvg;S_>0}0l(_c@IZudsa;Bk=aX4r^LkYcO~#D$;zHz(&OQ{`Q%G zHA;JERz6sD`B0)`C4mNA66-Id?BY3rV{E#3vjTjnM4OL0_K(j@oF&1LT*qIKs;$SS zK28W_EDjY-R>CDlSq4|2GmsB-c7!~H48L7BENP|S+BI*JF3iM{!jpHL>^O^yX#&Y@ zy<^_aVfE)jc!`ycNe9D{A(0&%kI@4_=`;=MMqrpYB%i*!12$it`NM{>JaK%HLVco4 zZ)%2#q2&Z)z&i8=OW?iMgtv{t09_!VJT0sVN%sZ!P1*LWqWV#N#v~r(tYXdRk$hEM z{{dy#*4dk>gq)l~;EU5juo)RY#q%UktyK%Wm6wQBfS3XO7`yiq(oc9=|H~y?m|#&V<`oK+2MI(02u}oc z(@v;E4opRAn83GkbvlY|XbqVs{B&hV6p~th^!33{T(Ibtm1DIR`Q|8gm>^! zu(O)onL7!mXC!CJ-(FLiPyjX{P!%7t70pcnY9}i+jSVPsIDi(bOr(lEI11Eio=;!E zSxYE$z@%bKMoB}~9Q`~Tp7u-RuOZ_Sv6GJbfj}XjesN?G?m>!Xr~WB(`-WIM844$a zB*Ya?rb774C7Hexc~WP;l*9E{Z<^QaWY{Aj*3_8dBDjbuq*F&r{!AcsJ6PxZ4?7Q}aWEC50BKuzYSAC!l2iOO%E zk+0f}RgwoG#kB;5cbH~EEx))>%Q}%SxJDFP;|n5W%_H}3c?8*CVJZ!ZYaVHc%}!EL z(M>fH0z-nZl;aZ{0c;+QspDY&WT__X)T>~zd*(=sBK&2`ye44Cb;7@J8s2Pr*H3Ww zYMT*i#fx__ye;mbKnYG=N)sKx5&OlfT4VKCYerE!P~++h$KJIH>U*kE!iH$Lt=Hbi zAmTU|rLz@<%(kdl(MWXa9)yKr{Hvl}-%8*R8+<(vb-C<{Skm?q`_xF|c&U;iOZ zlY5&K@rJ4DwT(2E(R^gCrUGUM-{k)z_6u$!fSWkS0khD=`xB1DsF8T!^t&`gaX~d; z?|?GRD#R2st&p`Zu-8+=-EI=il$HL%bq0>d_Hv)2OmSocTr12>#2qG)M!vJ0YW3 z?An|lW<1Aj0A--ygGMN3Lzod*5poGcbpfleHYJe5M7LUW8t0btB?rJVPad)nf(1w) zJBJ}*e2S5j>T_{Ue6ZRz2EXsy(F&*>Lvh7RS6w=U=I_u$-s653#ngPk(KJpzM8UM| zgQT6{E{X`uxB^e;60sA*tGblVPzA|O`X8k&Sf$=d|A90iFI#MP7&1;9R8d-)ZuaQw z%}7B>JaXl`G+uUF2qeIQi&+r5TwGan5^DsvFX%m`fUS)23{>6&um@tOOb7%~!k9Xh z(9Z0syW-^q@S}^q9wJ1fOjU_wT4zOMao9lb5+7%)h#S3PWTAz!7_*IeAYjHaK3t4H zVdpQ9NSPGOi3KPl!gP$#&c=<Ty+M=q7*i7d! z%(JnS5`9zAtG6AffEq>u<}K^e?cQu#oG zfttE=jn=^fy~|f|qzngXmLZjWbrJa?@Bx2*kfL%=L_mQ#ki!xBjRsyxm=H?kFsVG* zEqua;uUc1dS+{Homu$#um4z=uWpQhKr=MndXYscbvQ8i3ykDCSI9wjE5b6I6h=S?_ z574+K6_{LAa`Kdl+OgiTq(!oD=SF)=FWy>x{)YCK2haK`N_zT@_Gu?oLgP6%Lg(>- zhQvoKE(%V`40ZVQ+L=u+ubvE3?J;uZ|*Ay^WT z>IZyO#q8k>5k%nSaqkB~28=ebYnG|5{F6rdDTOl!%6>V1P*;k~GJN>J#R+Wq`dTNx z^9D6B6ie~L^XWO3n(=ChU4uCQvGEL!y$9eK2cx8HUL4B73Nb_M2~UK*Rcto=t)mkF z$ivrct+Rxf*ka|MR;)w~swNB62ZvbKGEp#SGK)av_=3HHIoAhtvKn++{|YIy!FNGrB-V={1Or0B)-j#{{ch1X7T0BT{gS6KYETI(03oOKljNOJrmxq!yeivzUA zCaPB&5qi%mqV%i)q^H;cYnMtly6tMvbew5In4kkr^QV_E3z`oU8($xW%)07y7Q1fh@<0lc{* zsaIE;ExHrkHw0H~?z>NHyWnG(kv5vTy=(a^f7P0l#3d zfJR*;Ho;N8*v?HLt8ex*d^gJL1~hZzL;Zy1w_uvXW2oKwHld|sfUFNrG5S$+Y#bmY zq)_%qx~hmny$9~yvT-<~z~L@JZYEF{^8$k!0e~A*437IYg;iC_cEKV*2dL(O<{sZG z4=VodN-P3u8rn#3tLu_bx&2Im&d=CobCN8t0n?%G@KR`A02^pLd&q2hxoFZ5C`_3r zXEkX))9r9X6Ip}j{ndtM~M#@haNNzXDpVY`sS*fcAzTdm`J;kcLP{gc`^GjMg0^LghXs$SzcOB7>FFAFiB3#W%usy zqE-o*DOGr;Tr9<1xp?Nt$VmzA28f9Tf3GhC<5V~##5i5$jt7q%$W1xbGkEDlhz;8{ z@%;)f*b1 zXK1@EIcS$gr(OYgiuoM1{4=tsDM>!gX+dOGKw~fCirK^V!RvfX=I?xv3UVvva)y`8 z-6h=tTTLrO*qL!|l4`7o_uD_juVyOh(Xt|0ED^97%J}rwF2qmWt+hQyCH><0I)voe zV*BgkGp=wWJS0k(o1i9gVp%()FwQ6p^%%3foY!5*h2@q%0^r;P!riS|INJ|i0XQ{1 zVC2zqsBqJu&e(m8H$FM*zQ;%WGj?%X@mm%cn>*`D>9&WPszdeViuxFzx#L5Wxj8qw zxuLHcczlY!UhwX(e|MRa8(Hco>5=IGtK+xp-;rH}+0Rv(sY){;8e3!-@+vijFeBlx z*B%b{VuRZ~i=tkxAflhBEqx$~$0Rd+T%ruHH0K3$f;8#nblC@RFb$L@+rh~!@mF

  • *r{f+{{Tzt?vCqRXk)i#B^d`05_j{|fwP$@Vwly;PWvHvwqi!(* z*C~%>b0V%iHX|i@;XqYEmBsN)*F=x+2>4S_AMqo{Wz^^z(x%q~Z6 zG?ea8$fbZdbZG=-myO%w1KHar zCWuPs0w{!Pa&QzZXrSdt3f@MH9~ZRLT)rR#-lyna@LAja%k(R@Z};Uo5VpCgkxuOM zW?7-C4n#%K+YE3ErgsDFKcEh~!ps{Xb~DHLh88VGezf*C)z|L37AWc!A)$IKEGd#! zXs`*CU|}*z!sjFb*SUk@!=KB24`2TPW)|^Uxed;=y6S&MHHGH|#8Jr~{{TgZ=XW_c z$C00(!Ru@JoK029DieB!&n)vg`Yr*^Ax6qbv5bI(nr^hND`B zuU*RpO9-x^2*Ch=c>MnWpFLywOu}5`;x#CgzMR+XPh57ki`zN{Cn86VtyrP;TNopH z#gYF2-MF4lf!0qW;qDEk2;(xJrMs4B`*s>jPTgl(zTm}5*0oNxCKuGC71z%a6FQ7x2P6?>IXKr ztiwD}CWEDKVLOcD_bv#^FJ+b4p#K-a2$~Rk8IeQr4ks z4)KfnWoa5<4-tvVk+m`iU;}ahCmf7^8z|x!!g49?VA1w(dtQI=BV%X#`V52z5#!h;UOb**7Q5|aPLk^9uH1f{}eNS$@ zrP)?(G6@p15JH&PFDKjy2aE%QlYutz^`dSuWxFc{k4^8%Dn@Jfj+bOb8jn&|0 z1zUmu+l(3hjfTsZ-#LAVOsn+;l2~f@L9YP}OqG!`M&d|jE1muT#{(n#^tpU|hqg0@ zOnuasfVdzsZUbreV2!;M!=9+Lpg4%CiMxa9AAa`lt8-Aj zpnZ&XB_5d5w;Km(+vzs$2r2^~a03U%I@j?301wH}lw(%Zo_yFbEwG$+f2_Yzbrq1O za9W9w<^_b;ioiG^TXeH7M}Aen8&URfoWtd*KbrH(@zM5M${R23dW5I|gM@ldE)4&!C; znZ0T}`gio7_;=Fj>$=BkwOwmbn3(L|TL#67q~bisAON;T?ZB1~_cv|oI%QoaPOH9muM{@YJ5e_5q0D@Z;XPN{K?esp1fIO)_<_iO z-4oEu@X?J!WA1ow_=@hosQt_I=e0C`-tNj81ln(N>6a(c+F!SeR@0JD@#*Ge*(^Xu zR;#kY%&e=%;CA$<3ued@7`u;w*B{lFQknIKyuPMd?Ruh(t9_wbI@^>-2c;ybpVGt0 znb`vdQo8|M05aWu=fnOFiwX-iXMyH?NnQ2S{J8Ia&s|oTzh25y*W|n*f=P^M^4!dh z%CER?%x@zSa-#)($308T$e7NcT&B0w^7nDJqp5%9EfNY7#aXRIeuQeTHGRz@vn&a? zG{F``SA-blXMTNBvJSjg_>m}h&qp7QKXBCauBF-i!`sy#y1Sb~)G2l*i`$iN%#z8y zZjlqu6}2j=l4a~fsVFK-N;4Fu%PRSY;>pNa+)ho*BOd3KbS~%C`?gb}y^HDGeZz3w z)aQ;+e%g#IP)5we7mxb#%2qag>MO1oMltIr#8$qeHa->Vr|8e{?c80-r&Q8qw;Wm@ zZ)zx&C~5Osu_W51c;F2!n(C)(1Ch2!z>Xmr?m67Y(88wY6=VwEt*Y%1qE8gtOUu69s?GEp)sN9>sQ@OO83X(x2(tX5Qg+1zH^{Y1j07-S<5oAK}`Zlj-7j9+%vDoZ8&?C{q^kP*aKu@H1d2^%rQ}ovIaz zk7P^lefvk9lP%=YoAG$WspO+mSw5`y>$SZPS2}-j_XHZh^3fz-oobB$0Afn%HJ01y zylh(t#F5}~Jx5mIN_Y90aBu|uI+cI?iKX^n`yDGyift77LTD&3tktB95~iw+vPO;x zLJK}oR2&W)!SeCvp+jT2>v*e;mwKHCQPt%7m89D3XmrW1(`n&*S*Sns3LG-omB=6g z>Fr?32_b@#PCC(t%gV--L7T=!Ug35v(E4lIGexO)4!PbPu{@|2bEP8&u86N7^p)M_ zFSbV;e?qGePB6MWjL(l9j>p<&!hroPIKO9iC-B7_y8fOtPVdwtouaL+C7nU6eUmaa z$z^C_2vPE=3rInCIRKN^bMah7fh!q2Oo-O4ITRnm@9?K{U9+wB=h2hfdU2L(Cf9B2 zdX-4;NUI@lQYUwX*lo#FRcwL*DYSKs@nOt)alg0U`p-o?yvy5vdFI-mLwldKyK7VR zKCX86d}(oCHX>PTx+QAm-Z*2i8_HcRH>ixL6JU18-IdRklf?ckZ9-@sNFs{QbA1W= z?+1L)QD0VS`khH;f=PW{$d&-uWH9JgA&ALWoh$aW%melP;W^+uJU z>2&l6H0f=<9R&%qZL8TZSk){$KKQ^~n33K>%?w`M=o172$Ljoioc6mj!F37yr?j13toe`Sce`h_YNo|0^KlPx(`&mQfwxT&@45#aH z;cfwlo^D(g)q0ga(faSHeJbxtn)=)mQj+4&c8b}`U8r&QD2vBi`kD0>w{7ZD$sAfPi()fUuvaLMM(Ge9l9d?G zAQnvI=dZ{zc=)n?V}7cA;`zDgC^ReWD;56$@bdm0X;_Zbx{q{eQL;R9Mf}FZCdZt! z2AI1Q+;h(xusT!Vc&D*Z+v0veTIK%$zuI@2pTZy08$GcuiCP_M?K@DcY%%usVgSZ> zj43gaVqLBRocTE$^vl1hP_KJZ`kv;fi6ME` zzLHGHJ?;z<$0V6x4mWKCE;lZBSv*WRM;2f1L{HUbU8$UM&;A%+O4C=;-%jpdsLmz!aByiIvNcswLyHG*6~|?HwNC z(P7f9T!~t@-9=$Zi371+uL(tE1e1fDd>;p`STkYG^$-b}Gb7_XM^n&RewnM+NG6`X znLVHkQp*KraLb=*EW}BS5HXx&{>MJ1HgIVF0L00aLHx^O-IBRj^$6N5w%FIx%Vhrm zgyW3y@&E&^pRFKnPxQj57?wNLnrc@x?MnXu+SoEj40TGh7LIw5MhIfNNb)cN`8{U+ zRWKl#$HyR1jCFfG!4-I)N3)_w30BO}4b?T$xDkILmi#2GP>)QM!0sPrvW4^hh%ZRLGVe@GY# zamRz?@;c|tLw+V>-lT>~b79OviN-xPkwO0a93St|e2B*Q{{Y01z#@v(BCB0VSB)2c zy~iv8&$q`J1mJUmI?@$X(SdVcxKc7INequ1a{7A+RVl_e7|HlPGI{>oAYpVMzzEcy zCPhYY^CWSc=1hYNDucDM08^a)e{=FENE$NjwWfrN5LcCB4;kEmqsK3-xX+Rl5u6k5 z#~+e9C+X{Q3`qb~Y)Mu>b}mvF6v1m6$tSyz@Z45At$C9PoGd0IN)=) z1D<^3Mc34F!0Hw1M*)>Ul*Y`gyUy*nE&3FLftDB?A8!N3cw;@q%ES@&l94QT#I|L* zX&AF4Mocgu<2hhJCkG40KWuQ0jo6ZMDq=;Nb%j842gAZ3JjQgC9_tq0%yy|wJ z{{W}|08p${6QpgcN2E^l>xm9gnnFT_8w#AMU_mDUWMCd!D{$leW}$&dCignckQ^CQ zT-;QGvdTu_#0-Vr3CRF?1K@t!`D_`04kuIdtN6wGk84NQD${tRzO-o9^rAo0c9Lci zj0`ysLhN96_4JgqHipDgkQviE zLl7NQ2^-X6FgYwfojk=ojZCBA#0yhB8}wKBqxwyy>npG7y55Q|Tch^2V>6|EA4p6# zyT*v~fZ5t#d0;^#07sGWJS&w?2BZ0x!V0kFCvwyO0ElkfpGlGi?|lyBGDyo((q04Z zA<6W}KC3xC)+CbLmKgTr{0E)jc_V7KUupDS7EBdH{{VP~-`83VLfkQ5j>OsHWNPbt*%{7*V1jBQ`Y%VPLuJN?4{0GOPsy1$5y>6S|8N$sxU zeL%Tbktt0ap^FddEZa*)0chKE6+((ZBgoC;;rNDKt|qU&fAiGWmoJZuQ)CzXcJ5xibpHTM`T1++(l6x9E%AR8 z4oG<)#7C_CeeLZ7O?%o*^7qcU94$GwEOJ7LyLmDte|M+>ft0$23IJvxGh^@m3E<%H zlqfup-h8LUy!_egj)bFB`n5N7SeCDKUzPzcA)u};&m2k}nn_@Vvn)-7a>FDnZM4e3 zqigg0wOo9Z?s@Bt*}yxMN$=${*U0lsPdd+3T#gU;=U4i>GLl~ z^Dj_M^uPFP(zH!FjrhBt>V5N5Ow{|1)KN-C%} zJX~hGpCtbP?yx7R&$K_P-OGPU?+t!V$-Cd(m7xef^CS||Ll%o==5&YEyzscFBzWw& zPB#^0K`7nnGkLg>2FY8LUm2Z>uc_N~kE}mIyTZ1UYhCU;(W_UI{-IJ?>&GaOA&O^c ze&b-2Vpvrsc2dGU(eDZaYi!E;BT`6=lh~g~e~6^)Z{@weH+O3{`weZ$I_WyZ(X6o` z^p)FBkU=b{!6%A96yeD})I%bl7tHuFIp(INv3Y!2ns@C>y?buscDL0E-N~c&Jes@> zet9leu`MlPsLD+xS>uZq9tV)b(d~?73jywppX2f?QCDpC zpVZA;S=BA~mv76VQ?EFsPKCKl>wTR=dbx#)q$H^Wa|0$A{9w(-Yn2($3KQ2lCW~K7 z&~&ZZMfwtmp2U;G4#=mGR|?V;R7T2-5j3YEu?laEu)ny=BFQtj&6=V|twTE3lHpV*xq zCUm1MNTzMC84jRMzOlrPSy->>1-EsHa67jp+_P~VwFyzWo&DMtv>hw9ChbXW)4w%K z6(rHV*1T%%8Hlvo-4Mnk1jqvsA_RMKUyJg}-65YKi-WtO_P^wvgHpKCEyI3(W>`YqVont?ym z1fHiz?;rd?;nyhlB=tL%uVzVRs1>ws-_qVm?QZl$ZV8#8{=knY2_QYGcz+~ z$Si%N{SGW4YwRbM0ApmzE#IqJeT$IVmPfrAc?^Gik0Fi0$EUtbAf0_pTy(%Vq2+n-BxsWn+rTQOK^S?jd2XZo^$%+6Ts zT&l4E6&MP7*1)OyRv=WZoyuz3`k_x+)L&5geOI{Xyg*h>ItozCs*&KtS=}XXd%nj1 z01}A+qHE%v+A^F(2CQN-}1{3ZXH5gvW1A$XjVHU`;;hb zraWMtlP{T#Tv#Lf$H#yXQt4j28s*JS{<(JMiFWD;W7F;JniZ4NB^&J=F^P!A;gt=r z4Y!qrRgL{LONWT5~m3)!}&<7PDcItHcgB4Xe3_;W9jYbrQ@44aX{n zaw|Rm0NS-@lD4I%YJYZzYZQ;y3_b{N?T$V=(w;y9nRhe0@1Y*no}}|5!Kt;WrDk}Q zH;43)V}fu98Q-`b2Gft8x8%qhxqxE^Y=){l6@{8nvtc29{>h&i+Nzvn<96bB86HkL z;lyb7C^4I*Gk>_OO?QloOu%}(m59Mmxy~?2-I0(mJ~NK1$!~F#6a1ktfZ`ZZ6}K5X z99JL~J4)|dE^=6&ehxl5ZF-7w4pla+)vD#GR5nI;Gl~6KCFh4E@^DIita%6LsSK>7 zgVb}rG`LdlWsH7+`5}2Eh94t$jCDfrs@Cp747OoO z3PBFs7KF z{0KXji)~h+Y*cyTW=+Oe%yn222qV~bj2w~f{Qm%cn}Wp(0l1!9vi+x`+P#RE5)~zk zOkAIny}3BaB>entJwcbzj62Y|`KSD1_HF*~?ta;WLAOSX7c^^zNvzE`sWzbmk(jo} zf{nTfEFJ84F(QcB8Ob<4ATU4THctldRq1O?!MZ)q(`L*noHkzn5xeb zqsYc|4nT>$FH`-LvTl(h1JfXTtMQa?b1LGc-*c2}-Kv^>JJ$88w|i4hNMb4c*1Jb7 zYU~NGC79dPD52fdQHyMi6Qq(HWP_8=#t(8~nEfMWd9P8SdqvhBTF_b0ELH}ju-*(F z0-;=2nZQ7&KKNmW1Y`ooCA!y(&J+MK*AFpFHxH@YUF|DPS^KhC5_rL9zj=hn$jp67 zb16tI6@{bwMd8%sg2#u=<^>eTxnYDaVs#rG<*d=TuutFJ#Sk>(*&184vDYq+<|TOM zefQntDzqd9HNbBxp0PKQ0MVHSE-%O|b0cY=j`7s zOg1*fx>BcL=WZ7RtK##9fj2ZhApNZv`Z_w54R=o$^pUD}as$v2s=sL`CeyZ(iL4PBiK zdT(-Ty}R8Mp#FqaG@3Q+ED)AOVw27U5EH!RhD(P8DZtOr{5$C z)ONzjtk*es?ObBUDJOq%laC7PrlBzPOG!HA5 zoN7h%g&WBo2cy9T`;?pHP8mQ)*iM>lUwCntH4jS>yYbhB;Nwu(ZWiRgOp96jIExG!iT^ zvob$ZY^*)NxcHkP}IslJ-_bzLX_04jFpd>M2q+U@xCm|~R!Pihc0&*^P)MmI?xshy|N zM_iU8uM6WJ8pn?vF79aXyhnF0Q}z$(U1+-lwbQjUuc$P1D5@A?(zOULMLU|&soe`B zAy5g6)MJvxLHB@mpU=u;QdB(yy>~suXXq`@PH5jxG@0sX6e?*ct>_Rt-JwfR^{+#v z*~r{#+4T&Sf}N~@291>M;lECtULx_(vHm}w^PKrve{|_OXJY7e=tgu?t#WJp9urP{7I5~{51A^ay`f>BsPa?E? zk_-oM*|fmzx1gHSt^J~NH5lpR;JG+jo}vnHvj)|8+Q zvAk@hzd4c7zJ5pfo~&}DYd)%Vnwo1#>AbaLX$mj(3Wab35~Ko|3^t7CIU~jfI`$I{ z^e2$H2@+MIIkNIwo~@>AB#|^pgu@2p;QN5v?mxbAafUdRM^RXiYB5QpS;&^wY`c21 z?P7_%f;TAf@(ETW=N#jp0QVgju;L~jtE6uwg_7uEcx}?gyq{ptAL2OA0|Oim5Q;Z0 zmtRyKO(j-ZZyf&sXB;PJiY8bC?n1aJ85#Dm<0KLn@H#FzcRCPYp1L%0nvulpsO3y? zsoj7Tqabc92Ltst9Qh!wx}0*>r1fbUCF7DvwHQi)A@->#r9jF3Do30JzyrYKw-67g z%UYwrEZGr3 z>3}jB1`h8yJow`r=dJ>*nZ;LC8AS{>OsNB;=wd#Z2V{xPd0aB!dx+F zap!@~a6!QU^)+aLKC6rEzPIa^NDU<+gUG<(OV0){34CN>bI8ZIbA)_<#VbT^R35&p zkouP9uWDvM2`Fi#hi(SY#~I3=2P5(DW0AQXnZ*A9+bKHa;;Y)PHBVN=l1B{ARR~dm zlb>$(ZhuMOXMxn(x)H;khNqBzvUaC)cP%%Pg-gAysMgKKE?3sC>ArWf;6$j~MtCfN z*C*Y_ljUMLH#hj+Qi28Ucy|8)e|KMNQMx423M8CkMDHfbae z=*`CH-H)-9pD!Y^AH4Jmb<$B-n!SriYiq54cZR(V(q-_e$%p$L()+@Q) zqh(c|WA4~@W(d5l()L$=h6?(#^{yp!&7@-hTD19KDOX23EK!*j7a_9DF->PD@k zTkOqKu)8O;^cxixC}*`&lmN@N7ml2@gfWM#>s+*qk<98!mjJ%(YRLXMwQvo}W5V_> z%FHH|_F(b|H$go@x-O6Z7R4J7Lvh_UWqBg>yqsUyVlJUKIa-&Hn4kEr`pc>7vPo}P?XKN;T1h9C4IU_pgg@72a;#XwhyyZi zW_L~Cd%E<0OXH&01NzA2S1M@i%U5+-?tiGuqUfF9s%jLk>Y~+()#hl!Y4HzB?q!lj z!bcQk)-B2EyGqO((h3h=dH9GzN{}jNRcZv8$nX6L`kSR|cWddropQ`k+bU&|Jy_+P zUEP>T8y1qH$dJIcBg`2~Cm2^fP5%JYIP^3Ll|rQbJC>f=`bkIV{-3OOzkh4G7NOkG z>HD<$wk@JT9%D$t*m#E0GZL_9S$Q%mD`02k{ww~WY@b4#6Vk=tyN*Oq6T+XwO#c8; zyPLH~YIc8b>M_G2%dzzt#4@}poAVMA{Xn6&Z3F^8NI5-c@GysdStqrR8e>fz&r|;Z z1U=~wX?+yX{+oSXr=-u_nhaKSx}#FB_YF3iERp&#%96LIEG9u5Y_bCw5yH3wC$B@! z$^o&VG2z3^l4quSzqNZ0>Ry2!hflVTh%IN zkXHeLbJ%_=acH)rTf{f?&noNx0K*jee#rZ7jSV+K$gwn5Zb_)tGQ}&kd07lh(^^w6 zqSaMojbU}2&Nrfacz!gsN_8_nJ_Ow0$LP;?=rLQ9SM2`l(d(a;Oh8KOznKG+&D%W%d`<)w6(Jg04o))nLT4*giG~5Q7 zT3)mfH1gG$yZ!94?T#{W;0~_7Q2sGF4URIitkJWmyVsvl(=Olsa2Bh2zkdp9ZijE6 zjV%8F%vR!&x^`blBpea$2AKSj%E{EOH3~SHSM}o2^w{NU@YmDJP3*E<2lU+f^rn4~ zeg30a)HGo~qaNUZoF25No5`qkBb&ss8B7{oTIKsUE8Nvq#AxtMcFSBdn5RGg0EwyE z+dTfYk+oFRjBQ+U7ml0tC)udw2NDFkS6d#k(f&^$)!N+98go)xwhp2|dFt$1Oe|WR z2$mQ_dT?iRG4a&@09HVRR5e{l4@X@taA4X)K6~hlV))HA8h+L9==+ zYZjqojm)YL0qrJh6b*%YxF5)f#JMJ;<>GfSpQ>{LcC6Qif4Dd@#Ae=yoV-A8Tk-cb9WrV9B-v_N_ z#{l#Pm&f**F^Yw*<-EZqpLj5l$GPSVx&Q!%NeMzK1wM+_dVY2%JLP*{MV3Q)l|$*cN;M#88Bm{|@S z{{Rz36ig!lEyPbq_D9ez=AKm1JB@U$16`UM5>HY0w=aUf_(t9s=(T&m)TP3`n-DQ-)XwFxTLG6)>0V51W4{I+w@w^ z_>{L%VWv%ekM+a<=+Tbixc4G&yk4(XZ%f=kPUS%IwxuNS76w!OJ38p_;f8}QAg9HjbpR? zw)%!=G2C^MW%SF+swB#+as^`JEz&X!e_0wm%O-O66wg!Wt9Q1vO&u0a=) zOf)tRq54&3f(2B9V3sqzgoa?A-*>qjF#_@3e}C~OT#wb9%(djn+PabX(6sH}@DYhnkUHb~{;J2T_Q zJp2ad`woI;2T|J?8!}66BSPj0rj^3)+a#cQ@TWMy&mKqoI<1D8*-yATSEX_~%cxS1 zP>g$mdS$SG0fsVj@_ENxxJOlS{&DnY$)&=ZefHuJv2CG?h4|a<&IdmpJ`bL__}A1? za;Vfk~oQu{+5;pK%~vip;yos8>1b|GOgVlqk2 z2^q;gIQ_cJeiEaj6ZLC43)zh-MP@5?tjy%&{{T`6;NatN$iO6@!N)y%86Q>-=2Qmi zPPQvVs%D-VF^Gz@$u1RuDhD|LoackbIr+z3izq$9bZ$tt1N+`M=7`3`e@I~MY-byC ze13U9+dUJI4W8u_7y`tQ+_`26E4sx3dz6Nq93yeYHjueEI5-0!^Y9ljZsQC4G!w0!7OlV75uHy0ovlh>+S%!E8i}e%*4;f(VFCmCK zKn&Zqfx!cgayeBwjc#)x)kTL~y8=n}=8D7#NMCb1$?hKY2Oy|Y?YJHR2ORO2CQ~#b zlNLi}M2f}vsmpeEAz+FzwpcJp!6$>CYZuQ0A#sd!pE!|Ht4pEoJEUQ-)zV0zvn%X) zy;*-ll-fwZ7{)l-OLOBHBAD1Lo<-v!Z68ae1ohUvZ5lGN7O<)c?B{~{zyR$f$x)s& zj;g6*ghX9gGsr(u{{V>J;mfh0*Xh{qtt-AehKA#Ewuq71ma!S$U5u)VGRwG)*aUHs zGpKwh@w2I%xEr6IA6CDK^RH>|U+IU^KGBMsiR-nC^UHMFkVP{H@f##&U9l+$;O_;7 zNXJloK0Y@3!6s073MWF{ofegRn@uaUfJ# zGm^le!jZN7+(^9ffJ7;gnEhtQ^PZ0SN%S|pJF~UNbN96ivr~DiQWtN{y(ULoym^?h zFod-tGh=eJZU?2yADHw0B0gpEshc9OZ=io}r&I1f;CHbv%Try_d&2(!ZqGa*ww$yX ztHR1d^2G04Ov{u-WML9U!IyNa<>vnYQ57|JJue>yQY-H|M`8Z}4ZlfyCDUBhYQ?H+ zYRI*l?iREV1&N`JquM~3y=-amS2Q1+e>|3!t74;O;kg9{lB`S*r#8tXbi)}V!@{HjA@?df8RW%_&+?svW11J?NT=&EsjdG2 z2tBVWsI%DGR8vawM=gKnfC=7N!@Q~#kRSSLt-NiPGw2l?caKcw^H}YKDdOT*zUM}* z+6&v-GIj8^RMawkwu$Ut)v|>5j8y#pDBLC76rg z;kzKuBmH}!+OS6)-SPDIR5Qs=CK|4nrTXaxNsK?=!jBUYiqU3LLoP8Iz8|c({O>&t z4yLSM4Ds6N)bfpZe}<0Q*7XS`tZd$@O=)OdvcW3)Pne=;oQXFyjCyc4+}(F(aSIb?Q?Gz`~#R2&o0{s6SY{-&9>2ORw7BOElfm`t~(R5vPq1v@hvX zjnoNNMfBTrtG7610rGi19{CMBSMrWV6jQ(hQavZ=eEq?%MP8n_t$j%D`n#jmZCa-l zhof~>ob7>B!3b0V!Bl4p%Q0Zs_Iy7CD_$k3iTvWiuBUwMPvK9#HO*x;eJen1Qv8yM zZObGXLlk3qS1c6*g(Msi%YM9uJ$eJde)ll9mB`fe-jVzz{WiCxxcZPZO-5?+y>X}M zZj%aQ(hup_l_vn5j*Mf8m<{T_Lbp}wZMa(1ss1`^Z~zoY@O(Ik)M9_qcb+3loSxl=&x8;LL) zD%2gly3Ld6lvo-_oXSC0K&rb~hTP=tIHAprS5U4SJx@7zSMbI3KkELSM#iP3*3q?D zZOkEl&evqC2D|7=8WB8khlX5F1Y06x*d-t#CH9ev%SKQKaK0u7eSe9?JG=NV`b}2m zo2~a(bN4QtuIe%mQVn}W(=}~RRGlCZpH4NcT6=8JdYUrC|w-So+ ze_ky5cs`AB8QSR13-v+dWU3@wVDxTz=e>V~&#!%js%ZLI`XTkEwACwGSmn|CR()=f zl(+t(v~Nnqp$t<3rNMTw47mWtGu3hC%6i}R5V03l1_nr`?L9MC z(zV@S!L3C-GCs;C?ECO(v}9Hr%S9zI>OlD!tQI)nExTmpMsv57lsD}&lL#h@OMhqm z1)shVDfY+HAFDr5Brr;qVbeP+Q&=?;L6S;R+KM1r4D6OW1-)5z?P8Iyra|-a`V|Xe zRncMn=Wft{gWs)_YI~#X9`v1E1k#S+)8NwH=?lc<*s`r0`w}H)RcPEG4vd5C*dHam z&S0Eq8QG|R2!B@lqB-N#ew_V0qgv^@)yAdVmu|dmX%tTOgiu#W-I^fEz!I#hv?_?g z!Sd$5A&eOEC-a@}PyP(Prpxw+UHYc_YGMtM>CnwmyQ0J(0pwYd!yFB)W0E3ryn8?x zP7->R#pEKl5R=-Sh5S7BPttmJsYR^zCuZq7yve4$TCWUCE#;5YG0APW&nuEu{VAs) zAOQ~KWjXl;J8$Rsm!>|q_ub2;sq|-1p{-~=!Ajz20p~M%&8OFTHts~FVp~x}=_3Qb z+G7B=)%OD2ua$vXtNX^qwYQC%Hi5vheoJOet|?J zfDSN0@(E+F4h3LUPjY9K*&pI|Zrbe&w=8K|rlF#1RQ)iK#UkrTD=1I_Ai%zytBu8n zwfVqZqYJyk@WUP}#IUm0VCnX^?waWSp5pF!mGj8XGRGsFcm(tD){l@wYDu$_yNs*W6)L^O zT5!tf9b|;Fjt)q`{UmY${{2%L0H^N}3HQ15w+wWn`>3qTGKR>kp4I2ufP54Fg@?fB zr#Taidy1IPaeK`bn4t|VrAKGo_b`-b!1J^w21nqNG?FOm z1BMZcJ>|q#(+-W1mcOk0XyK&*zS)pp8uDW<~bFWp!UfqF|h3*gtN5bN>Jz+p4l3 zQQg4Nkk3^%J4<3y8$MYhU=X+a+2@=e{{S6Lkm}+G08-6&?36;H6lDZ~oNgXD{C)uZ zV~-tJXw*W*c1+SZ-YHRp@UjezfiM#-kaOj{_~ZT^F9HuC21_F5jT4!f7>`+rcMSgB z!1?{oK7Vd{eml5?*pMXXpzgm#Se%jL{=EMHey07*&)gC0h?Xeco(neTh0v8}{{Z%L z@H6@T{ANvrp5^i1D>FWFcJh4*fd#f3Y`q8+bXu z!w+x+^&IEOxxlq&xof}MAvP+yg7Ly5EwB|V6mx<`M?Nvf`F`bBRTP3YOqnl6BZ{=9 zNMz;q#s)$E00_fla67Y+6FwmXycH=(IloKfQ8=YbbS7vc_q0xC&o@sj=2<> z(qu=I`k$v|x}32LNQ`_yeZ^1GrIFce{hhsV&*wsk0(#G31y?Xx%VB zNd-s)1Yl&bz&OAJz`DJIC=@|f+v&?U+oq>p`+!jvMUrb6@P(gf+!S&E=YzKZ^n~$= zjWkR*`9xV5lbqw;OT+=Q;cd=@ODJ6r~aUgOe zb|hp1J-8=1RUBX(WAo5#KyKo*A9FD4bY)oU%RZlCWjSF^R>BRW0kw`$e}|us&NI{T z$JmD;AW<+$wP;pK!79q~$^@HiFRF+#0}-9OGvgy32_v3Wsd^CwK5wIMF$sK(kfrAi0=edi_@*KSZJSAJ` zMup#f&lhj^U5ejLd*Dqf#Zy&*rmv|qZdyp1NE`(!0FWBe8H8XlR#L$-d{4+J`%E~n z^61W=qJ0*x=} z=v~pPcfwC=N2l6y-K(dxS8M5&PoW5mIgOeILP9tj^X%@Dg|wyIn)yW! zNhC6=^RyosJJSV@ExDU6Yu%aID}N4uO{lCA*7`~G`n&qAv`LzMyFm1(@C$kwy7a_$B^eC0;`SeSTF}>?V*{DKP zGAc8+T1Qzk2w~~3xxr$Y#&_qazl=}r5kvY0bBq4~!{^fNPe-ApYTc!IXh&AY`qbyM zRGTNCt|T$}iL`e2vLnl%eOtKIo=2lCtY zE6t~~$F%HNWGbqFuF)FD>5^Gna{}2%gM?a-a?9jpQ+F%f%ltFc?)#m)sA^r!-};uP zN=R&Jvo-M%)6*n8!POPFl434gnIB;&2e@<_;Xr^6$c}vWQfh!diD19<=f5t}*R=Df zkGzr*7N2S;#5XH%L6@FGw90asb{kxc(U1mnMe*{eGG{(M=N-B9$Ls#7dhP!0??0vA zPrScFvFr= z4NF(_R`?{R z`zQA_u+h}D9ZyndH3#~ESG}f3cqtatTCC1;rDYM9g%(8$SC%*=ZBj@qzC){VbUeQM zulb3S#ZoL0U((_5>AmlBP}8)EorA8_sVuUH<<&cwW}){5m_-}NB?oL-K*_K$7~H(s zpE3GsC>Vgz-|~v!?C$V+;ugWRyxmr~ZI?A;?+l{P(1Ff&$#K~cWE(nhRf2ppej_hZJ~ z4LEV+}4jgg~Qn z9G(VFP65g5(8b`YYzse{lFj9ltN#Ed8&9B(>(o}Ta9dO7pJKp_QbJn+qh22nn@7^*JEp;FFC0Khv!P zDLOF#7pW9tqq=TE4)9bBxGnfN#~+N7pWCUb++&Ts^&*Nnl13@CU}G#v2z=z6%$j`@9ZybYH2h9?uU8fD6 zc;xU$IUlhU`L&@de1*u3q^rIu6;a3q1isc)1mF|2k>i~5bBql02Ytlo!Ir^k9hin)G#Ei6h$Qe0Q-S(PXUi0 z;~C@6Q*3;n2P~cT&rg)dMUN>AG@PichquV8s(d63l%-!GO;1X(aLqIL-!t zeyUM@LRf*^TWeDE;;mrQf&?;eAr!{DP5~ecl0xGg9Qjwa(^rx`ttEjX`b?x(nnM{;9wJS|s0XaC`et9K9nI|Qor%q^ z{Z9Hff4%#My;JyI>@7FEZ*7X+rKHf`_g3W*w91B5K!~mBZVQN=)wY5NW=|g1Ird>*AmP-t(R6Me-95x-sK?EcE zdzn;q*!&QyL_wFyHDglqS51nUrBZ&=mgrgk0NT-S=++IL{{X4bvW?l>wSgqyj!1OF zT?rJ)dnQ?;($+M1wEBV)P$6M5D#8i&?HJnZNdpazK68$=M9H(bbb53`Q5h^JEU4&! z@8L(caO7a%=llG0y6P~XKAp`W(<&IIQq?3@b}=)^T}cPQPb7eI`}rRjae;dxOk^b7 zQ>cLuSEbtv0oC(&$b zm21NZJxzkZ~R*1DE`LrSlzMRu;O zRzI`j+VPiX0D`3MU^f1r^UwKcIK#*;Ql|PcSiaW2AZyddak(iZ&46%D;g&mxIOOmT z_UXno8Ygp***0}HrjSO}WQIRGLjf@*7Y8|DGR?@~dEjyJ-y)?rlhi}0cUF^1Tae9b zR$8?-8+MX}(#K7>W@nXAlrRMgC?xG6xEUQ`&*Y*v6D^O$X4rvC+;z9_g$*XbBY-kj49XaAGm?K%&PmB9^r)vsI^}6z zz1%&!qHDT-liXd`rPkFo26%MsDqXR>r<0VxE$$$DnSf?E1FYGyV@9Sslo;}476ydp zA5}iH_V;7j?QIP^Zs&9By3Oz9mFmGbg(IIHvu>5tMnH^7BQ|!bqjTg4=g-9bD{{EJ zdmNn3nNLuT#FZp%FIbOKe{ofTu8(-Bd49N|V3pIJsnNXEzgJH1eF`cc2Voo{gS#sqU#br|pAKYUN6ZxHpt0uqO zM@ZH{Smd!zytXT=hU6=dvIJlk4CsWAPB!56KOOR4`-?^lh@evSEUBc&J!u5h(d&{6 z#(*-g-RB3-Ax_i&ryXoKffimj7DhONgyIgnQ{QEKvTa4Km=fgU(?}0 zq2bN64!=8J*yVfj4|4!m@#rWW01!mm;qIE9!P`2VdT-Q*`g+x(U$)eBDt8PJB!>}1YwcgSI3$RpkhG1o0fo<6 zvK8ZOc#ph}ByM&9Bl(F!?l0jNwf>=fO8Q6jtLq|9-9J)$Qnb)gy;W_|t6?QoXZ6*} zJc^7Rp^9AkMrpTaAyg;B#$^W~<@So+OQovlAI$Y0o7;K@i*7p_bqzM1k{AY_@*r?Q zVildxzA=I@NF4A1>(pF0@r??Z+_?jC{a&Q-Ls=qqikr6ZtDVviF@`ufIL-(B!0Tc# z63G7YMG?d7;11gb#0)UJMAD%b_et5<@+6bXDvKyPn1hPd0VmaxRDzSpXLd1;j zZV6ri28#5*elmUO9b8 z(TcL;8yF0Kh>^z`1K|AU$5p?ih@1$6{mfHEN-w^GGN8ExgppK?{{T=QXyZP4s`Gu#t>nJdUyg>*#`H%Sgh*qOScC2)5a)JyCqF+U{#^7~u0!OTjN^%nk_ya)+%8%|IT-TX z`Tqci!RZJmsK5mqGd_o25GwYG+iy} z)JpAMI1c3!GP?$lh4QL%f;{6O4l(i2KnJ+N#~U*%E$AjD)w)(R)f?R&!+MjJAck^; zDI9^Zl;HEb&T-?%^!IC({!20$jL$jf4l()l0I@rPkf?eOCQQhS}3E6d23`b zm(`So*j$$LmQux!Kda7gcALXE%>-H9X`g;w-TtVBD(VZhFJ z7Q^%I;A77m^uU55fKA*O-K>tWsDZ5S=L|NIeB&j&6hGTObJ7N0s&c6@4zsRBMgeSS zdfu?o;gTA0hBGNRZ`43h$Bg^FG1MQ)ui7qZOgy_aC7u_(0Q*#yPt*ZqY_kAB!jB#o zjPs2A^u^C)NfcmcPFvmd^otdzt*B{z(Q*Rk?hz$3LzpV7CbcMVXGN?BWL)1Syi{RA?VKA(Y}zE{fl zQOj*a8S5vSs=Qc(Jnh|`>#kPPbq?S9q4W>xFKpFG*;>RpTWSz87RivUF((Yk9z!V% zRZynI1Fsh^h2^Cb*&ms)6UKjFPG8%-!~8vsSnD@Fr1tiYG?$}yqp7>no=_e(FgnY! zMadW#JDGxRVq!!$`TkH}sZ7h{d_Nm$NHf%3r61DzTIDP04x9G#EmzaEDxl52Akuni z8!J9QM)xv+5Xx0{`f;+4*SQ=R5j_6@`dKa9;NM)kyXpR!uf?X&?H=@dH#CbHwu!6N zrq?Y005FC^HZ)rgq+Q3S>afWyP6l^%EDxDp)wMkwKMF&#b|s3x)6VJCy9ZI!b+4*A zb-ixPwDlNQQL5EjNr9Dzpi3MX18y#iHnzj<05ES~{;bPQ?0=b`^iFd9AXN6p@SF83 zy}N6=w6CMx)7)A=cIbMA18Yr|wX$Bk$XEtQnN&p_ikr3q(dT<621j2<;rv`@9qwDp z@Pel=bM$F0RHZkgHH)t!#>`YzmTlewa;Q_17-SLu08Y2QVl)Kt6>conJGV&FB$}fG z9o4R9^rr?0eHGmqec+XDLC65e6lZD2SaW%Gs@&C!9scg8n04RcdtBFhXSX#UrX81K zzuNVvp@KLxD{Tzw(@6)dN^QQKw2D}U^l71Ri0)e+!-V-cn@T2y@lYFMLSKHLcc0Y_ zT&$hv+a1%}OZ`b9uWF*J+uf3|x1{Ds4OfDJ0Q2zj- zSe?uPVsY1xoGZ8_bv;ZDQ}(obuXpd$pQbuyizjpc05j3(Ig$Y~?~=BkDnio~w>+3? z`%yvNF5EE*<+k;4A3NhD z6(O^O)o}T0+p3Z{sPR*CT}I_WroEcgEl*tHJcu(WJCGa@Sb)Cf-JSr>IR~c6lsMMo zrZT3UtHF8o0U=ry>d^wij`zL_O(VT!J8pV?P-0a23|*AdGXTu zFLP+iV!cFErtce9b>HPbQ8c}0Og+&?i3TShT1mxCp~I87%&xLXP^|Z zGQ)B$QjFCYYFa*zrTV*P7Awg-?Om0Ev?`cIZR~?B?Ftx^k_T4Fk$@m-COU0_WBg82 z-hJ=fG3f6;lcj3C*`?F3E}f>y4&WC0_`lW;;o4c6E>Ey8a7|0eU(F0O?OE8QQMjYkAJ5q?Pw;uIdYN{6_sQ$1B`(qG6G0t zJoRjzIAO^~q()K)8!;DkUiyVjT^CE!bp0Pp)9MK(l6x>4KWCA!3auFR5==Y47$H+= zZ*qYzCllBS)UG!{vpLt(-4E+$Z+Df=PJLe8&c>d!_dd;@TN9UOOw*DT3mtWYWEqOL z=uc~Ug_qC4;^s%SYaeO1Hhg&!xY3B#@AQ-3kZKBd(y|fzOay{pk8T*GYNA^)*k=Yn=-!#m;4;BmYMc}9;N#9Z zm|}x{%KWp&#DdGjzjI=76n1buSr#v7f9yEG$YM@EQP0O*7+uP;5-j%?$Wp9^e^?lm zckP9Dy01G|2uWt=0KPaU^VIV_`;+2E;jl!-tq)wXCg$}3Vh5eW(m32aHb;+dCmnK= zUCt~4pd=DOU)4pirYv>|DDLPpD_|)Z9ORM*xO{P*Iu6Wxop-yRHF$p7YO5xj3Iz1# zgbnc^AQCZwpVFWZGyRTFNNkD{%ac&ZW0Ai@%N9STA~BgaV}Qf~?&JVh@;UkGf*1b5GDvS(yEg?Saa6sHL zSoP3_9heN1W{y)lK@!Osl!5PzuLpZ$1+sh`0fXbB7>Ng8a}6D;?OCJLBY>(g84T>o zd=1#a7#@5P)_`7xnIaF`e5OzVI^}3&AQudQ$!v0S@%_3#s3TVi#=mHK)~8Wn-+qRb z8nD{T!ZjHf3cGSVm0T#!c=_wHQptR~76jfXDawMcsw6-;%)|Ug1y2jkS$H@;2OV}b zzU9`v&18a|O^`ztmX-<^i)O;?o46ZUhVDUMBa+zjfy`72ft8PDXe!iq`<1V{SxHbm z6O`Zn5(!|S@(u~fQ~Q!|)e^FfKM#_dI zxH#k=_0PZ-WmeL}%FGQ3t*9#)!MJA(hXaKJBpie1=d9TjAEZqf z+*aT^d_trViCXe%4dzK6Ngg6e01UAM0Qk-@aC&4)8iyucT87INk3j`{AVKO4%@{GX zZ^;J(BjjLy*gqW>M(QFx9rqPj)Ak7(6stQlW;U6CZ(-vs2Gj6-{{Yv6@@PQcdx6QL z#UmNfYgv()K152dpW7Zl2anjEGs#bvTc~Ily60i(7Y^yG*NI8k@3a!_w;PBD4U$=R zAD^F&r}9Dl(bUxE`sdJY$k)}W>iw~x>I~S0no27sP6+hQN9EY_#&gd-BY22cP_|FB z;GM<)0EhSR<*3_KxAd#EH2LC{h#GVgR`l#i7>LLXc2yEG`w3iRW1Nh2gm_r&nPl=^ zMx`;o@bLOCq~5uxX%@TtvuBkek%d0tw+IXu(;HxU7!$J|&;h_Flatn5p9a9+xJ?h| zW~=35Z$I!G+YKTmuc$v<`=+AAYqcxg?WqdjFEsBzQ5n^3W<+O=jW z%Ew_$oBc;sAS8tY2^4Pf*~aao+75Y?M^cHz}KOI^|{FWVVowX0r*5(#%O^z6i}&XJv{9k#nE zd;mxTpq^lFbvq}BLu^?5N|ui|boUjTTJEn~=*QGbY-O=h-)*T|dTg>VN39p!B8f;c zHYRO^sa9p&dWsU}i3U@MU#DVr?%30{wGr6veFka{5;j(gT9VVLjP1>dBLX~N0>ohp zla=s#)#G^O4j>(a@PR;D1I zRT!qfb~Hu``lKgIw80B)Z(S+_#y6@DJBZI1&N6z;zCjlm?lQR_JWE|e03b2I(ytHnTJ~bbDgc8+0tOKH)x*t?;5ej^)HX+ zYbr!h9I#|EBS$$3n^>t*=Wn<+I?u%szKNi-9kkwu)1Ll|Rfo8HSL)2#eufqyHjl1p zW@zE}Z*nAs(Ur)Mp)wf)lm%5zPgnURwuX1iuiPMg$@Gu1dy4cO&D?$0sc6^5wYrw{ zOY=cqDHuXs*lv{S(fGqLEs|hQ-;ZqxMs?1PgUok=B{7@c5@|EQJ8{ARewIuz;r$ZR9 zS_f)xCZLfg=^;Y}Y>mNW46HrO6cQJ$3xUbAp%>={>U6y)>7J{j&r-$jsJh0ndaOn| z&w7=INqHNjWqGq1!noWp2tPSv(`Nfc%3&Y9cOX_eBoonn(i61tPVt22)+ z>qY{rjz&OSg8o4u>>v{r1g7dY_3qULEOz&G>aa~PsizLTK{w$XVBhE46Sx z#51QZa5Ru6JN?k@Ke>Cexi0E@pK4gUq-jw%+9O>HKAMrHN?Wd0RB0lVH}xBN07goJ z`C^|v*y~XIr2AY|de?Xjc6yb*mg>5S&n# zu2plFfUzvg+L}Fm9<3>K<&RrvgwIk-Kt9apD*>FOY*=A=A$JDwa0tYtU%3Vh4)rKq z!`uDxadWmi+g|MLDzI3VKT0OkqDOGjyx!8YOhkJOtm>c_;~w6#=J5$1K_aE`c}f(m z1352sFL+(n^^VEbI}5nW`ZIZq-vNRQClnlzyt%&3&&l^H+{K_f68mEZEgL#u zpBec%^OOC)aYf84jG4!0@gu1)r6jV%CYDTqkt6Tgtj8r(u`82OD(gy70ZotUT$s~3pm;h=^GyVK&Cd7m|0iuiA%aQuA zkV(gnAEf+@b=fj8Hxqywxv!?GrP|w15!hi++!gp%!~UFnfzLk!u3Eb@xLkkK z>|tZvkV6b!n=~dW%-*aiE3$!rM&bx9f`7x#dHE-$#Z^o<3uM-}3)AAWZJi-DlE9d# zGeYl-hX-S!!lp3Vu0Yh9za5E6K9a~yujXL#2J6-*J{kaTA2Hcb*!N((k z=d8))Sv-1|`DK4}ily3><3+jLnhkl?{VWt%h$XmZ=vaa5u6j zRtO?sH*n(1h2-qW@-rC24aYpCu1(5~9YbS*Z9MFn-Nm+`tc>K1LuEx5xn5pC+*;o25;6xBEL#meJoec%y^XbtQ>mgC-f5 zV;llKoaceL4u7Ns8;-a|*)ha=;gScJLGAjJ)i!s#+px9Txz6Va9JzPF85=nF5OIuk zbn-nO;^f4m-lMv863?YG%YRX*tjjbebzxYM=lWq7h0206i~-L(y40*pi6hiwTSl&J z%WWya0Is1G>w}a5M=i;2LJ&v!XOeoT#yc_&fWBjcQk=5)kJlXp^a8>uEBjd z3t)gugfsZbAY;s*8nN8va-%}y7!5R_w5i&bc?u~=ug4_8m2eA?+f=ULzT?3-J_b5q zplC<3M{vTnC|0as^by@M;Hgkf8{ZPdG8ZQ#k^lvW#!p;LXhXpE(hK@W-vqT{4KCHz zje(R_K7uEwf>u}cFqkBB`2*)8K1IElK?Ld|w-pNMUsb2twY!LI{)^HPEXEUog^;@c z08bds3i}l9<0GyJI|AbXL859Ye$(uYLs9#?y_ecraIBuB7S~LxGLriVMLwFyHxg9F z0a-Sk*%%!Ija&F*DX(zaJ-yr1mrkkM{jp<6)cx{Sv0q4zlu;>ckiMF#AaB*T11@>N zAf70yDMU_e+c3NH>zZ9CuE3qcYO0ovt6a3!3Mp=VkcJY-O63&f0stfdg1JK~+REd| zys3>!u9@B4!uoxS8n0*f%><5Dj%C!RsIo~J$uhE+R7liuhin`iezBa`GO~(q@@BWj zgxEjgD(>&9nzwC^+=ooP#HK$prn-8?QK%_~vquaEweZib6?req4oMgs7 z+ntYtZRezP3hUgO8PtBMcKb)%G<_>kU8zcHma4}NM%J>Ue7IsU8v-4fhQt$TS8QfBLLE-#$rrPEOOyMZX9Ad-4U!dZ9R z;^210tZ^<)Oc742MwyqlkVf+EXJ!)2NSL!1+xd~)y+N{iFyQjaYByOae zE~SyO3aLf+U;r700n1={9aWIA)I=kwzN|Wpy+*~&Pq#Ea-33KoTD9b{B+3BVn@T8A zyF3i!@E7!vG%i}$f0@sbK>B-$b$-q4Zs^o2-0mF@w>5iD535>B8f;tRj|!oI+DiMM z^p-r4k=H&D$Lbdk%xmN!OWImqnN2BZ`Zk|J0)FB#6mUl(9sCm6ba!1XVtoylgsT`yPe-Ac}li6w10wN|bSyFg4>LLDMqs&<^P{-Mb%M@9TL zQN(I+@=f3VBT8C#cUG`#p4#lr-q0eNB{FE!u#(dHBqEr00ICY0+}Xex9OU#yCsq2Y zILryL{$bB=yzN5udw03Dd1k<{Yie}D%L+#r0A!7<#E?cEhUVq7$l=esh|ZqGqjvH7 zgJV~xrRx2p&!)XoY3Epyyan)1%L=kHs5`(fwZR?_Bd!aQ_9KN#vSVAbeN%&7xRNG; zrgpaGioBKQCd>hVRWOqnh#>iaj&_r^pBY(s9zIoi1XNLx4uG>0>%F`60)F4uleN2# zPknlM6|397HM-TIg@SEF-j3=9(8{ZrfyiLHv(h#vHUZRP=nRuV?SAa-`Mk)T;{WjkqECKXT;9!m3=3bS2e1mr>^HYfgPMJqtudP|Pr;T?`e&g6A zohfBwSimD8iQY!yd=}>bcX5-RWsl2d>`;&LY*Ox6 zZEG*3{k(8Ne%^@*c1w6DYJh;Kc#7q>fO6Rk9|}-|yR9pofY7lbai!VOnbUi&2)AkR zz{{(b9+sZ>l+WqlFRitC3RoU6Kvm=exDFmj>Y_doc4pJ}HI=B+LL{p!EYU&tR9r2$ z4}u|>Iz_Y+ppxJm;A9SiXAln>=Rc$MM&)_?rpIuY^^2=vhMh-E(dChn6h%8N%49M9 z#_>2Y$C$>{T(R6&EBP*`m-v_cB9W=pwCz%la6~TDp)7FEA!afrT4FfbK-$59YZ zGsjzTW%+{UEQBecCRCv==e=?TwYcx~U8Zx-1)mF(jCcTbUzB3}=#E-CmXoJbYVzB< zg$l<6!Ig@*z?*)br1ZS#bR^!QgkP4{(09Q~dJNeq@w6qaU7D&eZ zNzBSJf2emR5k}B)Zy;=>}ndND^zOJkzi<&-N@#zP=U8F zh(_Sdd1cR>;OCR1Bl6^05ctCJ0!?#EeO4BIQg|0@i2{lZrlno!U) zYQ3qW_WqSM8MR*OeHn)1BG`AGh$H%I>6IW6k7ytfpWJoLiEyO>7w7VFZab+KjL9yk zrqtE`^3iEor?)hXB$*gO&xUL$QNrYM2pGwt(Hn?eiK1j$k7>(p^}R{EtGcw*HOZQq zP9lb=EDi!XZdURPiou8nCm8EH_=?A7RdSUbMKix?Xw6eW)a~jTzM=q{2<}R<#cKp$ z`to~AGZI4#vG-)39W5x(7ap0nb5zu*)72%>)9K}#nw-kuOCFM0H+eJ-QU}1Rku#}b$5)N#R({DY8)*oF}n*q5c`qP1{q?k2hP$um+Hr08C+f~ zR;n$p-rrEK+SKNkvlmY7Mvgcs-L&yZMtMpSS;^ZYL|7CIRB}$%JOjf%QrGG*Tp@V3 za{9ORyD&%-Nz?RgI3s)9WiqD<+%_c6TvCL#Sc66X!S| zqwWaeUOO!56FcwrbmsU}8#hG%VN6*j5xIT_iE_#pnSxiN(rj`{CKE48PTvuYdZEk_1N_2$&qX5v(Oao1o(O^v`e zk2&dBAALY<>~$MywAP(V6^*GbQbrN7Lo!Y)D8NV~julXds;YejQb<}b*!S^zbJp6P@iO9a@1*(#wKp#e`#53Axtsm95 zmIo!USQ0p>`szbIRzFP$17}`)c#Mlg>RIy!Isi>IZsG2IW)@D- zy-FVFr3+cHqW4|N*{a!+m}IRTpY}~ZyAnkyWmRN8&uA(O5GdFDN-8eN7asPF! zT^{te`vPqs>5x6hbbU#tlEb`WUsR;Am5jvCH}zRY{{U!>NyBu?nS)t21etWMG(n5t z`k~yN)!mgX{V}Pd_g8Blxrb1-VLsKg85LGor;;cBtg+0Yl>&kTsZtXe>Wk)*#0}1D zXrukfuW)ER>Ggj|(64LVs}`H7QkvZ681Kb?t5)3}(6U6u45|Q;3FTQ>agch@eq;KR zS41`p2e!LI>Smp4JuZb_;?=809!yFj`(*?ajYNK;wnzotzDpdbz&#a5Q}~l-Pkm0; zqAzjjmIaMtSnZ8C+b9|dV^J+o8I@ys@${GO4y6+Ir`&OBV8Qh!y}Huu5J9JC zWu{i%Nm+*@VyXDs`w@}^GG!#tGRcfq#NSBzm)f!F+TNdQNBYMk)TY?;e=C~JI3|;F zfF}!vSKF1Cj1*#c2*gVtpGS)W{O2DGGVZfKQvMnqyCTyoFm4 zm_tT=Oh)g4yt4QhJZb$%BE-%y5&Wf^rl4=pdE=JDB0g;Tl-{Gy7Vcv)wjE0l3HTtK zXE+*hu{}xT3O6IID7mQ5r%gjwlIlc~&2Uu)w5nK<5=)QOl_ZUzjClAVauVEPwmU%A zCAAVI*%H0Uq!NExBMj~u5`1LFR29nS&JVlJc9P?Jilp(HSTQCyJ za=8GMQg<@0L0kqTbAg_VxUI!PN!(vM#IvQ#6kO7dHj;IO#UtgvP7z!X0mgYbDli9= zB_dORiuJm7H45gfJ22MN9U5m_bny^!Ny`}7OJ$Uu;g}7CV1fwX%03wWpxL~P9Q4uAm>qRGW{6;xeJj?EImF2<7nM)4t8Bnw$}3;gnYTcD?wThWYEzhV@HYYQ3)@ZeA78D2BJVsGGe&EVZ!c?R0rmckQ&PdCfs zaZpa`OA~Sga;Z<7>gx8|cK)87ryiNwvIJVwE%=t?(vnL+)k`>x?F@%IapXG>cTrz9 zx>^yxOe^pI028a}z3JM$=LLDBkGQnirNm!t888_!B!)80S(GvrP3jpy*^+z?wXRih z#OTb~120nD4wkB^9hA9n#!X{95h!UCcm)}D6i`a3UwB{f4zv#+=^*Y&a{8Q?yL&sd zT0yPrdXIYOZ%zYR7(KR92%9k^tsE)(2yB5M0Zx1!*~^#5P_|`;R$)szhL^0-zeIm3 zyI#`NRxyXO`ju2NtiGlWc4VAx;173I@w|XX8JV%s+&1R7uU7kLtE`I`q!?_5X%ys> zl|H8fpB!WH*0Nejn;0;fn%0puJHnT7X;-0adUc?gQA-u!3k)5{1-T>O3=VpmdFV$G z7xe6XT&QaQ08zD#Gf|IC8a9ul_WW@-+*p>t#=_?kU;?TMEODNGN%PiRo^!F|>TAd1 z3W3ev`ZlN|9ks3KX4KWAwNAV!zt@bh1p1PajH7;b6&?mYI!s<)%x)@WPlixjZ>Q`1 z!)2wh7N?|MqCM{o>7km;7%RJTR|P9%o_h5TY4_WfD8zxn;gY^*%pIh!rQF^*$dXH`?_ssUIBc!T0Vwq#cy%}OXmUasIB!GBk|MQZl$v2LZ9>>A>8R=WU!c?D%E;ZHBTV4R)1v!1f~4A}@k zYBn@cy12Ykn9!w8-1p4vNoy4)h9|bgSS!iMO{G9zOqoj&fHS~W!~mx5F=JLE8I75;P@E$>qby9rGX^8iK1b;ey2~nsvC4=hR&TW zWU!HkMV4nHB&>h~BV{l!BUL!bY;dT-m+Bggme;9Fuk_=#gqnm}C7Co9xaMm!>9c`u zOzkZag&R|F_OL29Wtb4bTc~64u+=Q4OwOQOZn{>N+fj`ghMV;hRMX^&H#Jt&(^XtP z2;N3_fVtX91zYk)ZQwIcZOUf^k(_a#oShV+JE0;UCH(>`3nVX2^wMlsXk`*QD=;Q9BRhb| zJYXm);A0&tW&#n8#^koGX>r!EHOSzvr$%>AJc``S|5ER(}J9d?A&5!^a*eZ|@ z1pJPWCh9Int<9`h{#O}>y(`wBS2F}hLV<}S@AX)2Z;TUx`54DVkgnrn$PaMZwur3j z1W$2*W(oG&WRd<04a6&DyyWM1IpeD%3E2=Hp(|dMsnV+HI$c?mNCcuFqB2y3C=qxE zB>Qo<1o506IRVfTL0aau{bBC=dO+jH#xeorJo}pml00-b@2OGtn?f}CtSB+n zC(>Brk=q+*-a*J90CMY}l1J~sQVIM-XCt^w1}TMSwRx;LVq=*=m8Eg*EsO!To>-0v z^YPP!u@to%Gg#xh8Z6f2(QH!6&bX%xiT zzN@I+Fv+$Xw1b-k6+LuwWqAQr1od5>bA@hYAnqwNJ2J%;igqYLHGLkYoW8%Q9D1U>CKpWQSyX_SA;1|bap!^7m14rgQluW|EABU5 z>a86sy}RFaOcKW~pDv5s&_K3ThFr8QCf96#^kr5nn9ehi)?BXS@nQX@q{Zfg~yi@D{(`= z%&TX;ve=%2^*g$f!*5pC>1fod(~2085oP{iDh2grN+@?2nX*K0At=mu3~)A#j8K+8 z+Gb&A=n`E@aFA*zw)I*XZ*oEC$z(&P!VJ)aIYK7{M(hZ1q>Qcx;7)pU9{t2b-o!;_ zht#0Z$uu4BtJbUo166`YJX?M7%#ma{rp8CG(G%(_u>{AG${6hCEbL z>FyiB;NY+Y!QgUdByuo(l^dy3hw9|Iec@B@bX`i(HOs6Op0oxw_V0-LjwBh}8C8_4 z1MT@SZ!9i>xm3a{dY9b;UahF;l`ZQKN!v+nnmFs-j6)@k7y!t=$Oj9+%CK$;$s)M& zk+%0DRC<=7M!guOlAIRx2qc-L-x7!189z`eRdI}w;}>#qIJljnvf3WISt>9MaqocO7AXS>gRJ{`UO{^RbYiFFW1&+`OCmjKVU6B;#{KgtzVQHPa za% z09!w-J~ucF3EXj<9OPgd6ZVRPbyJLePVAizy(iMGE&lP)ySq$|K}t$yS=<74Nxe~` zfx~cpHj^Vehso>B^8j(g&6wJaDU16~aV+R8YH?P9q?wdp#_=*a ziX5GxLBYoD#|6)wj~nCqc4KBm_qlh~`~Lv8J9^mDwQlzAoif(NlhLV5)QLg!hV+hk z-T409dkF7S&tyMplTV>q?#Uw3HC;EguIUF4d|a%ON?aVQu}K=Naj_B;vmV{ehu!7} z$1Hac<>C8GvS~fPQW_9xbgIx02-#L6zF$+2L$fllQ^q*jymQu+#bT;mvI@CN*L!bO zg2WbkIXi2%wFx5hzMx1qh8Q;4O>inShF9Z3}m6;tZ%iX zD#JSg$ic>P2U)zIAP(k*$H?gJRNGm3h?BQ;-71G}D~UgurI*v?cy@wgXxMuP>Q+*~ z`$i6OI)mmcyuAoc6}mey{CA-`h}0zRsk@4in`KtEXR@dqD}Wgy%VTd3kQ5&T2=`--or%3ks;S+@x4Ukxvej&B^1}s)W)nFU^Bc%P zCNQJ|RFa1tPBVZ!&o%{PW>HHn)aqo`&~!gWX{XY=YO_esD?I9E)Sb|k+z_eAWkNR( zJ5D+32#Sq!L2|-`y3;7aHLG2W9-~SFN)kBF0RI3>;Q99)4nG_Xz@Eg$;DbgGW{q~Y z+bj`C>?41+CSv7)-T*nz=>XuIc=OS}QhaN3btBfEN$0f?XDcLC;9vbX<2ecxmhf|& z=aa`w0QV{LELTwFsSIgd>sN|X5#8zqL+Pj^1&BT|^ZC!?JwE^i2;9wG$aM;n32U=h zO3vx%M#X|NGhvPuy^g>Qzz|MxgN%-%_zhT#^GP&1mAP+f5WCygELN*@&9am=LHZ>061vfo}q9>VVM%ntE1`jt2#SFU-+p3F}ZIw*xCzD$$27 zsb5W{M&0T<%?&5inUT^a*%GeYyrtRRHRmOW!OsKZrcA&YF@)E0TC>!*N-a{Cr+zyj z1WyZ+6;A9*GB*x3_|F484y|6qOo%iIn2o1-n6KQ=bb4jtn8h2$gC;d0ax!;20g`j` z!0JVd7{x%;mMM@*MR_V2*^U^%ulqH-G%3Oo!t^^sK6SyW}RXNSS6MO z(}TjEo6j4gC_JkaKI4;)Z~~A&B$q=ckk{2Cdlm$ip?yAMBJKevf6_)sP#KJ1fS>|P z0&|}rbk24qNdyUCvuzrPt>vE+b-hsYRQ?L6cL`^6&l7Rb7l;aVC$ z%(Q2&!8SY*DX_e;E}rd?lEmd$F&`i+0=C9?8rX-y1xnCF_O%z5oTR8vxWsCpVOB6b zySTt&PU3kuJr!Cv5zi&uB(-Qn^Q0QALPb?F#~KGzT;QSuw5|alIODE&PWN72OCiLFvV9jAsnURJ) z#dekCAiy5q+s{Lo22+jdGyAy$s=0xbEbD0g+p?No&}@eSm{vpaw1NmLTpTBrX_+3%G_SEDlCGmP#>^ z4a&^7X49s(U3T8}QN;DaCizs6wEBz?sqzYfqmn{pdd@Y}xNk;LCDJ?Y7_YX8sCM?6 z^E1dLxlGCS<%21dByv9aauo(dc@i-jKpjZq$ZV`-%!5t2A9HCs-QM26OzN%MI>c4c zB-XU;HY-y~&Z-^#5f`wjQUa@EEte#adW+#_(bTGDRcjr6r#tM3Yx$eb24w zkw)>?o47O;jnoGXB)-1oXFkt}%k7|s5Cya4ebE%`q&L5#urPA*u?uZi3mak&E zB+$pGHJUbIk>g}nu2v?HMI$RF;;V*PHlB0V%vp&v1fE3-JKTlzT@^KbIW6dMSDw6a zX1v<&YRu^xkVSaL*%%n38yw&foM$Atu3VT&A#dR%#}>P*wvsiwYhJH5o&l2D{{S*p zh{n7-2P`)P18D(C2MVJ;<3#cl^k*DLxGQ}_r>ac)o{1)ls`iqjrK=a-#5Q1c48#8b zN+V+Ygv)z&0DPW0M9drZi;DO-jz94Q(>taOLsW)_ttV;7mOr5i=~}B0(gYr_sa{b6 zn1jc)aIEY`3a2>-Fy(QbNLoGr0PncZN9`Wdtu<>}CvJAFsb!W?C7QG&c%vSs=`te& z2-^t?INjr(PM`yNnVEpkzTP8lVjCkJH&Rd0by<71^_2G!5%Y$3t|LR z2pIP{R-<;Nmnej5dim3Wzw>(Z^tj?ia)H)RT>k(`*9-a_aKs(EhCJ-92-MiCdNRik z)Lzq$GAHgEQ|b)zL|_{USp<9V!ca_V>O${qBDP5@w~Ar%g6PG|;~KFkm;V5WZp7`$ zV}raGT&H}!)R&ebFiKYk7Yz^%6z8C`(UsCCZKw>Gb)_a#p8x25VuC57C) zOoFai+AmGU*Btu*kw9+0T&ZPDuKZvX{UcqWs6S9Bh8H=Xdwn$RKH&O=^w+#K{^ss@ zwQU;FvrY{wPI&6-QyT6MuCC1-P$^}UZ7YY0F}rDT)P5n28iW4;iKhl^ikw$K&Wi_d z-O@c6yR*6UOU79h7Hc)7sXTGB`#2?;*zmY4M&dbAa4(A^FK0$eHZEHOa?Ku^m$f0O z!>ikhO06TBYp5@wAsE1&0<(KOgxm-$OJE+Z2w|xe4!=!79=&T(>C@=;=b=C*1$Yqx z*$pP_W?0Ka2rHZw$zz7+q$|kX)NsXn7YL_S?8`ZCxSj`vq${-pT4KO31w1H4T>Hx$ z;0`g-3+Tx;5pe5NwKlM5`ZH8SG0QAzI~boB11iT0k8As~a2E^-_fVlxxghOSv0{-* zNNCm5sm~177y|^r!|StKhHIR6-WnqhTAv581j$IKYh(79gX0 zSwE&Tv>n(a6~P}Kdauj_ay%@=dYm^bNnv$R)u}00m250?E=j;KIojyW`gWY|037mq zi~U{Am&AQB8g`Ft%#q1|w{+-aDmJXL$XG5v`#9%3Wd8tj)Xo+B$nyUH_Kj+55YG$~ z!>Q@|luV(T+>S(4(Xi1+D8cmdHV{rTxbSg~f_Z3laxI0fh?8p;XVaBrpmgU!8asOP zt#V{yRI1~4T(>z4Gq?lbbwlJJ79*Sns&X6s_1_78&3z+W?ATC7%INg0XHCKbyoN$2 zV}iLH4cW_W_hC75a^w!Cj9hA&XGN#3MG`)xskWabd1XTkkV!0Y*e=QdaWUGG9D+d{ zdq5fMX!3v;CU72Kh#s$~Y8tGv)q_Ir4KkHkoqpl%As6axWnirQ=HKD_qq+LBjot6?O}5df$+ev#@T;m&dA zE^=P7Ec6=~;tICsXVUeq+iJR+Y5FX>=h_JB%~lf9$)>(Y21zBr3&z~8ap#VD*^|mz zr4u9IH)QHD(xY8NPyYb#74xMut|+L4UT`pz6l46>cG_?=kX7M)vI#GsXS)8zT`D#ZO4J_ z_hW!gaBy?U2cl22C5pk3Qfk)SZqTHbAPjV6*=|L4 zS~E=*SZCJg$j(zW+7=+P4xq{ zD^C@FxQfg#W=eQS-b)^;1H?%ikGoK(}Wbh*XU?Y$b@!>0|T6c)GQqa+~=8oX> z{Ya#dqJ1@i-aT205af;AjQHo480GQ{e$f{%i~;(LY}&Ne#jP^VpkTKk0po`$1~&LC z51vQ2JC1YbfcY6n;@;;Bd(fUwyV>7esP~D#r&{dmy`>Bfe!LoSDXT|nK;$h@6X3uze>dbZsu&BMKxnOhS2(B-ifap*M6hwwRBYW>H1YFk=3hNjtO9>#E=*< z1)Butc6VSRcrV*DC&~nMI=yWo_uD0SpwnKIaRQ9!suh7b_8bIUDI19)i9a2B7|T^m z*&RmafAkGX3lnN_jhHFwOr~6@4CN3IaCZ(9cldbeikK0J>NFw=C9y8bpMNket?E2m zfno^3!(ea*21($M0L>IosK0Rqxn6pbNiBMn7PBiEAoUm5wofRideoy&C!jdf;O(y}(8Bv5Rt zVrW-nWDpri&RAhvEw>~NeDTYE<1ZbPH;eA-{lW>Lc*<`Pfh@9CJ9$rN3Ksx@;DP=V z=dNM-oyb?>M^ss5dlr~_g#ng1BOyq^Bh+3%0B##bPqY)0)8j&78X|;i4L?dq=9z4~ zvTl+Oxs%o?(#F~koJwD8JmLKKUjFP#F zVQBZMUL`EYaRGdOazMsLbB8BJXbd1)lrtlWnnmiA^E7c4c+BNbAhQVrsP^tT$T%z) z8;(`Qnh+895cyshR)M(@`?!?I+ld&%a)CFNkCHhV{{SFJ>q0P5Mkz`*B%*r|MIw2!Q+MS<%-S{fgvu;d@EkhiJNYTVG$@bP7G!jEBd1T0N zfFX8f8TS$HAdL0PhT6>L$y$SH_p~hsTpg<(kKS5!mx5LM*|gWNmLiEkm8P;?-B<&Eg)Q(ci zcidNJPVFAXt*IKFnGAKNj46F9Ns33aw1yx0VYk;%ef)ufc_i{>krj7RxE#KLmRee* z_H^hp{VmPw(B&mPVw zjeD~pw?3mHvsg*mLtT9*WIm!26vgVR?HKp$U=9v?k2K2NZZHgk?<|_Oi#DLO=<1SH z)nSL<1iEWW4a&~`)+kVhQsjmq!vYGBtVTtKA_oFsIigP4?W%fP*wg7%?wW==$igel zD%3zrpSVX{$n#^)WEeR1k8TDe3#hM{pWV54xV5y6J?vh()#x$=wPHF4bmuA&5m65y zum_JDfgVRyYEgFrwVH~m>a29i)>2JJy0rBoDWr~?>CIR->UTh%MsgQ!Km!L4(k^=( zmNTHbJ;9^b_5D(mmo%=Bm=5NX+s?P7MC3EEFBXDJ*#ee1Z>1zDqFbP1P6j zU{ZHia?+Sm*LzUum6B0jJ8FGcOktFrny&u#Ox`@G83YY%ene`gF zM!O_Q_g5l&F&BZVB1e)(^yI{W2zD4?0VH5A0O-a`3o;HzN?{t5o{bAq){{=JrWGwK zq;i(48iL|PBy-1vC>Ug89SoK>v$<-Z?;eq;H{XBeDg`A0(7ov)1#!mI-nbv-=RE}S z5Qx7my@Nc9r2QAE+1gc&DgL1SOru8Bf;OjAJ^7KJD}gEv#X0ve7)3Y(9eF?Wz8jmz zocTV3U(C+Wqv#r?JwEMP`ZsXsQ2ziUD`pkVVM(|0^qSL;AE5es$^004Zs!Kx`=IP z9oMNv6_H!DJ6}Q|SZLzYA-VUtW)f|7Bs6W7AxPbpKJ4QTJO(F`I*PtRCV;s9m)=tp z^xY@7`)Z{HT(}Q4Nn>EmxWNhsLaMt?3CJe{IXz!0A5bf)*^jH~Ei`6-w7gM&tzxW& z7(jXDT!A4ZV}NpZW9Orb#1vYj-zd8EE3ne^H~i2R-8qXqy1 z7&3v3{;isOl-WyX~1_68$IVbQ*_!&KS$|Iu!k_^+{ZT)#LZP>scK4hCyLpjRckqOVx)7Uy34~7%v9lk$ijoqo^!=WraEeuJc{Hb8BC&DZy$C`e$;0^x@wsU0I^6*MBW zjc@7JY}7$4m!d{y2xXO}ie1|S0H9_(tKgjDX*kH;(8k{4u_n~U7c6L6)Ry#`aa7bM zuOL;86tci}t135wMlg1%Bgj4q^>W^z7^|qpoLX#9C0M_gzoZ*x0>;F67*!eCGRK}U ze>mulBl*d*8>uJq`EuK3NMl8l3eftBq_ARk4{|W)k&J@F$AJh|3#cUbw5T9~qIoMv zV=FL$GnQ8FmzPs2vBR1d~*O-_p~XHMWYO?3Yy$J_#{~BXD7m zatF^D3VKuI?u@_5u|&qvn|9G-r(*q(CL(>&l3l<+*j%V1!RP+~amGpN%aGJoQSKi* z$6Pm`r zE4HfL(V}+=zM?XxfFgq#IKkY1Qx+qhFc39B;`b-A^Fdh8W&X-J8+pxy4C-(T$c()5 z%Z%e03~`Kf+=Xf&6#|TyXRlf+*qi0zOt#H6<4wNJm?q-2G`2hK7I@G_jh8<4z~ zYH1NiEqxi$V@aTn5@ucJAR)*M=g*Px{u}|*@zsczf3{6dN0!X7MFm|w^M+)M<~J&N zEI>keCph?C4?QikAc%F6$yy55o*AsnvWlkBQe^@$gD8xG`S%t8lZF6}KnM=6++d%1 z582wb(9Lqh>0qpKqyiv45>%%2LGE4)soF+Y001$E;iy%G(4R*{!Ae@5w3nhX6j>d2 z#-WG}{ZWTtPae;HdFROBN{z`nnX=o{>DP)ju_dWt1i+;FVEe%!9(Dn{h`Ur-s?$Hq z=UcUyGdoQ2go)Va4er{&uY^^`LHvQ&J08|LfOg$V)g3OqIp)=MF5ao8-(`vuY{3LV z+vTtx2!W6lz*Q%HR396N=`aiWk8r&a#2rDdQ>{F;Y-*Z&KBF3c>7t`6Mq_f!<)RJ_ z=`uk4F~9%;)tKCyEeaC7tM=BhaIM6(eA$Y>qRTK36hcA$NYTz%WlN!v4)7Vd00Gn= z75?7kQ!NT@&LQ7lLHm=xX;RVtpm*N2V@9p1J-9_Vf0tRR4(-e(g4Iedo6^)M3C+;?OeZ5Zjn+m~ddk21}uTb_xA!l?!cLLg*ZN$=h^ zvbn)rZWYOs(T#`!+~+z6)Gw|2-+1c!1f7dNWoS@SYf$q{ z#_0JZHwP2>4o?s@_kpqF(Nam5{TI0OJCJELZ)m-{4UIy%cb27?sTt;sGdyrrk;80F z#jwqjyyqV5*>kHX=T{phHU!sFy?ecNyEi+tO1)3Hdn$&PBnrBfc_-a@eWaS@Q~HGw z(6VK+g;#0LLci|fx)V+@Ly$ky?Jt!(i%4m$_U4hO>eoAtDsj(ZH>o9CN+iOhg(VGz zWqj?&IKaRJ4U3%t;&x}6_!{k|H62&E^!l2D=z4!@PpHB$={$v**Jvbfav~1ia@fle zkUaHPW~+|4@qxQ0bL;)x^<3SrEl;O9HF{!qj%1N%63E*@2Dh<;79POOv=T^N!;_yT zcd7ziQy3Kp)3raWnv?2uwY>wg>gfUh0QBu&QFWO%knQTi9|WQzWU)hmDi!vtb(_miaSu#dk0u-qx%C4hTCPEzx zo9Wup(y};sc?r<~z0Jp47nU7J`6u!*eW}`ukiFIRuX#dI^=7 zZWhF+>W!qa1h*a=k7mn78I@soDF|L2aJed;Mt*)d9tS_RFFS}D{4m7d?T0)!blebJKH;4k~Y2r zU=VS^7?-<+AMPZqbri0V8qKyeSsqxMY*3Oj>WuC?NY4k4cluWwU$~8{u`#M!(!@y8 zYZ2U%8zKmq?+F#BI13XpXKbYIACdjWI!w->>Lw68-iMlhdo}NgIFy znAdA&Om6rCkVY_1P;7&{ngdi{9F%R^Sn26D?91xMU>oPv66Ya=n}IQuLJm^|w;3aG z>LJbON)2q7Gf=aBG_$979;HeH7xg2i2(K%yaI!w;a1<$DYag5rMm07I5k_j+m7NE& z^=kC4X}X7ecTv)9s|joAk+m(X$8p&#vP&+0cLrQ}z%9g6z#hzT<`y~|m#Vr(k)-K~ zO>JwHszBnzvx{pJOYR#)(u}r911C8lhI5hDq(wnOXUNR$xp0Qyc^N+5dU7l6^^Kri zQy$~VFXNs;W6zI0XnHbc)IcRoJJPWB5ulld)|IPzo8lzx%XtKnM&FE_`5hwwXD&h} zuuBzROpPWwbEpOt^g}Qo<5emF+;B<8Gspv;xkmTY=4*K0A}`sl5?L2 ztDdzc2<~gAdeu$!&ZHfv!xHas#;>PIZ%dO-OF3yRQHhHTa;4+iRZBZ# z$UAY4n;2z3S5a~h)j|rkne17fdo@=9B7tLxP(;C5OEa;!W z;?wVXVvLohF@XCPDMJ<#Y$1tK3WneL{U-+i9-Z@Ep-e<~45uZVS0!luRFxlNOsCap zFrkT!2emwabK_~wNykM^h?x}LwULwX6Cyqc}mVIsH?u$p7W@<8xC+@E;be|$}J zcP<-JGh;U=4RLDNUW)1=0xER#U%L%3F+U3gJAK)q z%N5CF)oI?SuzHI;)1ici!4U}%@OLoC&p6HxNsG+H!%Pi8*tbMQ8n@K0$kLCTEX=o$}e(bDXDd=CZ9FiQtVS=HAVDC3fn;1 zcW|VDK|BNFJt$f+l}}d#lHJBKrKtpxmv!|4-yqyJmDuf(`mzf)++zc!%C|W(QL`3D zYQza06oyJ{SmDg%vJ8f2m0NPg8)g&&<;x$=;Ux#7EGw{Jlk1v|s+5NGA2Ui%#749H$++iI#>QiP!xTw=4 z(!7Q_2*@F0M%-MmBY;K}F#{REJTIvr;AqSw5Xm~rJTWGsq2)3|5LAmS0QAwfZWva} zWUt0@I&|2mMH8s8v>7uoU%uTM2;vVC1&J_0+mnTDp^pQ@9i#)Ei(x&+1QSpg^*gsB z6W67wY0S1+eKQgjmT=0(Pw_hf7a73-?`A#w=g6Tk?Il~fI8+%A55Ay_Zv{Bs4aDaSG7K? zQbu%-Pn`>_a?Hy9q?3A%JvJT#aKx#??d4Y+AE|_P0F6WBicwOQTGVoA85)Mmjt})2m@(u0V~fR8$ivG!Wt%3SCFwQy~o;}%iVfx6%)8M zZ3|yY<waXn8s+WI;5mq67e)bvT?)_a>O zkzLdEIO9Pk)9OyqO2`|_s;C%k%aPAmU&6{iGT*9C;+1U|>p$@w+MUe$chnBdp2mRo zql`zbRGPh_NeZb1ks`;Fza!L;j(o4_>lpHUoNZ1uGzGztb)!9zqJ2`+JD!j8T^c(} zQKV%lEDWxVocpq*9D=M0k_cdPl6v-2;sUk|@-ty&Y77js*R|Z*h1YLWQxP(}Mtc*b zca=dq!a%#W1lj;6hvc7vQy6RCae>A@ksC`jYSnc6*L58yLaY!UR@Kx{q$>d4tK&G_ zSOo*{PbaMkN3#dI5Q*;T)ggH_iQ?8`iN+#^c-}ci%!Otw3KBra>B-J`>N!vk+bfNj zEj)q^UsAWJeb|u228VLzN3H+ zk5GCYy>dUe)~xp*bW37L8hBu}VWmKJY}@wu}sWpih-#PuA+H7I1O z8dy5B!2bZ$k~<3iNai;0yfE9LsN9Hdoy_b7 zbP{cl6B};GR{2sm&U|8Tg|AR30NF9dpA^>UnI_lTEiMIP91US*+(A=?k+Q(97?KWo z@-vRAF_jk?&4{Pfp5Tj8g|&I(i+o0Oj#4WxIT6Brr3+&$0m)!M==kc_WW{<>UZs|J z3dshW1oWYUK8c>mAV(l(Cujr=61nFf4CEYRtq#WGA`fu;dbX2Vv&|flTM@yqOFDXw zDCD?Efeae};c>_V9Qajg+?{%QczM(8|M9C{7SSoSP z>LlRDgN{!p@zIQ!^>8>V+i6Xy-o2P2K8jZns1Dnd?=mH)e3mn6!0{2br~ zJT5u-9R_en5UqNU&m;nX)OCq1X_|V$>R1xnXpDYJmd_}ooE0aZ^ysq%lWGRNz$)6U zdMj3RHzNRL*soF|)e%Dpf9ywGwvm`9-um*D)E(v3m8DavAwjPOhFoJ1a57maK(<%g;EC!NY9Fgv1Ta5K#T>XH!f+R);Wi#z2iHIC+o?QCibbp z{+>qv0CqM_5P%!GCGBnsQ&6#~!)jS9%wq~nlD(NxSgOW^a0wXNe4aSZQFg7g zbzAo;l6Ji&RvmehS*^H_7HlJSn z`i6&JM$$D5fR=03jvEQJ0&htpRoY6A)G^1$9N+XR7IS$FPUfTOuWU^m`c3^NhNGrS zQ|+gzaGAe=naT@N#a5ObsI)f1Qx%P# z7-u75`P_NP$B*BN0b~$06yvi%$ev^qN^DM`naaZmJ3wNY_J#!K$GePUka3dO^hq*o zq%{a6aK|LeVkw{-*t5*7fB|Ej{yY{L$QVCAg=@R2m6gW7Dg~)VzKL5-khqd*CoJVy zWU43|Gs-bMkH;apA25%n)T%O$;dRS<1{Jv-z^{4{jK(&%c;Ba|&XFnfU~4!+RSN|;PFOKw033sy`8_`|6D9Rf zdzQ;lj_0hViNn0nyNUf-957Q9W8WSKau;?>hGBuv46(*L-$ox5n|^{?)Be*={+O0s zv565GeYim;(BKC78RUWRI%LVnYC*>nMpj_dEK2c*wQ7xqmDamTgkhD`g2QuQ0?eQ& z+mJFz+tFv#e1wjp3);XKr5%ed_Ma~l6Q{95;O+&0kBpq)oD7_RGdImgaH=2O2C|H} zqo>xMv&s=gbh3vmToCHNoFfy!IgI!?$#c7fSCFGBz0cUs-INh@jbpjAr-|9t#2`E| z%FM&uqXTkpJAeB}EKUr3tY0x}+`b&&Ea+-B(`jlNEme#8hu=;lSYmWm`EekMM| z$jQ5eL03>Hg1w8cAN1vx9GHR-d=7jOpZAFvBY-*$l*Cl_+L?#%r$Xxbg2TClY;6<{ zxjcZcOuc_$Tjf8lvHlt<$1(c}UKJ4u|RZ@A2$-o{)3F{s#s!gbt zZy%4>GPmB!iRbOhG!`WFE54voH;DoF0SP1`fr3sz+l~R_q0jDW99`>kSntOo2y2No z1u{JAZ(nr9q5( zjB?o_fdzphBxA=@%s^f-(TO%7x_#ZqQmq)x^8TB{JdsR-KX4m@ju#8SUQRL3fh=l3 zp&3p0BaFzmW~&4>VM`H(k*I176kw^&5-6wFF;W1?Vxz(MQl#<+RygQbfif}C8idt9 zaW#=Cu?1#DO88zOROF#zfN&H5G5F-;Jt=DuNF>yW=+=7Ch!EIV<0S7@Za>1?xHtni zAden7JtTGnPo_&l`m<`fm#opGo`3+x>bcyE8yggsQ;g*2qS!d>PDkbYgxY;JR))M* zzS?5Zsf^BKkA68a7E`sk$irtp=fNsEA^_HP3{lYG6g3N&w`)= z7?5~41ohYVjz}j_tTmqWt09UCk18XH)Q!)9!DF0dc^seZ$mzfbLPH^=xW?OCkyKRF zH3wIHt2I)HirY>Dqps!{4%pj_0x~@G0Igi8PDRf}{nqZeKalEegvH@DrMPB_Pb`}_vPMB$JPMbg=YSW~O8 zvY)t+6*9>rK=Mx2$hjoJb3pJl?6z_ z1xPQDar2n7+0cxvLsLveq+fX*VwAwr&mO@%rc_n^Ml`?}PzwN<;eq`?^~;L3JA~yI zP>N7M(Ifq=x?HLVj7DQ+vWMX9_LK}%f<8w+4o)PXOhes9RqMwTFv)0Bhf$VQK`fvd zB>w<)ImjH5kX6@wjFHHUsATrzWU^-rHx&#)=hFar!S9d&i~5stk32ok29%m@ zDwg!J>e2ekVI_*5N%vQchF(Cq{d-PEdGK*@aCJ6(!B+#)w4H9eF{Dy=Bt;-a-)~T2 zZ&;IYc732=VSh^P!=I7K=futQz;Z8e+TVXkqj@Lw5+Wj%rLyw8d|{R5e1^d%01RP& zJatt@K~NSAxJ~}$kGJEFe&Dxmsd&;BMztJqPZ$6wc9g0xW^LH>?)+n|36t58ZycVd zJk&plucX>N$$@zi+-|_6&8Z{ZsxSR^M-5?VTe=?yDj)*EIc3gI5=(-+++K5#0x*Hn|PEe&;w+I*wl` z1_zJ%l*G$lRHgT)(?8;;>OJ{;hJUYJ@$}ad69$Xg)(kZ0xNzno3H2;V`bjf07UO76 zSEBj+lH9P6qwU}MGNZ}Hrj=|-dpETI0F4gO)F^0={88wd){6(CVY8}eF={p(XKaSa zc_Ygzs-3%tEac&K5JnF(AZ{%pDmR5*QYyGRNG`2I)qj_N3Kku+nLhXz?a+`&Fjd$c@8# zL=xN(9R8E=KpjDY2}R@VO(C;+%X-LTiZ_z9@z^7_L2Y?vbRL$cCfI>Ade7 z1$8XNh8+9O8B{ZEKLh+dD(7Kxjf@>ht7^59KS3t9QV67s$m-!FBQAX;t`vpgsqd`h7Hr=hSZdp#rcq~ATeqN`soHW>XcUOnNqt}%dkz#DLP5y8h= zKTbfI!_kU<&eV-8??Xt5j$ufm4iSoOBQndl<#$GL$OH8Ldd|K%7cMg>BE(U4IV#4s zr?F#Ex?))ci;^G(#OkF>GbZ(u0Y=l2oN0{|88qdVU0+nLi34wz&0s7E3*pY(mq|ys z=PE#E8O|`ls8Ngofla1}uWE8quU`J641!wd%CNnEW0EAYtalBFpf9F&wm}V+V!??Z zNxFh1nvoZ%tdy=pc4yrmuOr6tP8S3j0wEiRxcqJ;a57I?>E)%6s6dWG8BbnNE}(=!=Y;qt0SBl~#pI?QuF+H~a9_1#jW_N&+W zciwPC8wIN^B&!6}Jw^eFEN|`vhLODSka_DkJbDt?-AdKVdo32JWi^@TS%j|NeO6H3 z-OL_7PT~N{fA%qnQs}_Y6FGX<-FuD1&KWIpFL>=dx%t# zR~Lt4e!lcuWHZ#l~tJ+B0Do&vBV1+jyZft(yF&HbeG<+tFdQ z>FMj++K#8E#_@n*vm8hY`?HWiE1kYF7-t_Rrpb&vlD83NPa4Q)hJ?0hKq98LBuFci zc`?BBn7@I6ka7Bs2|Yo;=uH>y6X+Rr3k@7kEUuvIZ(fC)I)0<9tP?p7Bas*Y zvN#K|`M@oO+qjT&dee&{(GgA$wEEOcv9|SiqJ~A8L_wLd0nCZF!Q^?!IOOrwS*SpY zFHmo`NHJqiNC9^eF4y%71K^iF;KXe zRweTi#`XYkb%6j;h45^aBvC7MJldK>bWkW(92dS5n__~24q-S zqj;eNWc!Fig#}Jd2nWD9=zQCisxnWwqgvcl7FxFCn{0*(@+p!q>Jdp$PRRU{-yn{B zbtQ04rAP*AV6+#K3es5hL{8z>hpzHB3+y~+g&YES!9N`|lgEs_ae`(cG)|E}yO6tSV{RHd?oljAUPvH_3PEg@Lw4P zDhMdC-0AucV0I>*Ex9z9w2BEM7+%5z24@(^3rSePB45X37D+!8>@9y+1tdbjS41Pwl1e3w8`iYg5r$7?aXM=ERZJP}a6a zvN|Xzt(JBnR|h*V0I(L)*q7?3>UmpH)W3>`(lm=&U)H~)P+f^b-5Q@|`iSmiJH!*A ze6&~%hXA+$WPgXx@w}YB#0CEVSxmx>fjhs^JsZC&rn9d1zjjpb{aaW2XfJ)fjTJie z-acZSs2ORDZmPp4XQY_UIQ4vd*$#~l{rt=sM(j^Ttdc6B3ArAp_0?l$c(PCU--4q& zrUy9NoVQmueZZ}9Ry3s4UO^H=cLXyuj-)V6%3_dX+*yGvxl#GZ>$v+egJZaLN;6iO zQ6sXl%@!Cec~IWb?JN%FA%957@CosT3w;?fN3PsgaitEod~PqjOX<(^izA0c8W z-_uMTkLoT)0|s1x!7F=>%jx8OO)O}&8vL@8Pf@WDMzYF|6DmoLH`};v9!K~?5^xVp zFplmiZCDc(saLD^bmqT4tacspNLh?0d6Nv}D{WE^7_q|>n9F z{WUy+&NGbU?im)YCl(@vihWt-CMr?eMuxbUmrb$$l0gKF?b>$}z-~GG7Rjm-Y)84R zY2>!__pHXWjM09e*}&eRm~uxven=ekA%5X!07&fJ)MAPWVEcHk258lVK{GH?%oUZC zgOC8p@sdUdo-bO29npxaTcIsk{{Ww9a-~oh+Vy5i@uY?K7~K)snOVp=*^X2KI6Yv< zf9{%-7PTR*uCQObtYFpQWs#X7vWn5i>{TUX3f^6T-O7@=$!?S9eS~(zJ;A5e=|iW< zYSxh`D+!TlC6#4}gYGi3Hx1!~C?!3kX*eAP%X_FB7$S*>%xz32qpIq9O^IB*Z%(hN zvJ8)N6k^BD7%?Lrp*wgTRTjpi+)&?ASSdTuMAW6LT^cNsBoQj@c5T6zJbr=Wf}8=6 z0VqVD(nU>Z+Sc_6D9H`2OH-jUdtocIq)Q+8Ko|!g`?<&nIL6V!?y4b00-!gkP?FS= zPowGfCO9l?$gq_-@T$r(2j`KycPAY`AT<-^G1xM@6&KK8sY^}OMHv$@?AoMFk+de( z8%Y~UB$X!zIR~tbjjq9##^iqKYp)cxwCfe9$xK>C^?f^PF#hZp%w+R{?ZNi(`09sZ z1;=8HFbLtTr+PJ{k{!gD+D~X;GT!C^n11-rkMGkx)hw#lvl3PA?HkpWDeg;7!DF%~ zt0Ko1PBXmZo!-zmz`?-j6Dj?*xTgrdQN%FJ>plHKX@%{fzoEA)8xM~MIRF;m@^R;` z2uY(I&c2x9mX!=KLq?Nnx>86mM)f&nIRtqG@^F9a&sqjCAkN)Im#e%4RE@PQPKXFC zr0xI_o(n7Gix4t)h2#U1o}^CYF*QM@r8MHklSCferS9~^J46wTdnEM66v}cvrG^R5 z&Zklb}#d7HbCd@E3fio8=h zK_r&tFS>8*u|$yk<*+l!;PHY_Rr+MWC!q?cn`_yQph~J^+VVW48$RrQqImE|I3xVW zS{SOV=2j305}m6lBUft@%%%@kV9X;N^SQCSAJgsqiR&D_k=$LWO$8YeIH75o$PTR| zewH|6ji7GH03RN5MmoyjROm}CGGGzc)F!uD)H*yg0!9(!`<6CH4Urg;K-hqeS0^Cx z(6Q9F7W>OJDK!GsWVJnXrWj80DU>$<04k`w06FDGM}U0wMrzTS_}u4R$@OpPZ*0`n zF<^U@3)jR?OvL84_lx&FWXKJ42sO3tg^ z38~X?mrkFeTC~v5Bzxo9RuUCjJi6^5Fu*50CQRs`xYTe>>N4#Qt(t$;Di`AI%H7eU zt#V_UeR8vDKB3yK3PCG+=i0#ffmIuD19nL048 zV-L9S&Z~lQIVXSz$0MmL8FCV6= z-*SON7G+bou`ACW;s!=K0Z0ZsekHJiDN@P1ET}?nO`%ID1?CI2ecZPPjEr>2eVB`_ zO=qrPo@s8X#|-MgGRpfGYX#+#1wl9=Kp4&m=(-cJ)Lvvlp@PcTw;XQ_jq0qM3I-xD zgV=!kj&L_*i~y5BY`+jO!RR8G%aC5=a4d1jf9y2PLrZB}-zA9M5kGK2V}cJOoMYpT zxdrtLw(c&S>uL30eJw)M5i?0*oIBMNZ3hLHAT~fM0BjBhdXu*frHH+VinBODVFPggM|c?_%+1;Hgi0OaR!;NTuH)p6%0DAD$R1kU23!{yiFyIqkp3#D+2Pm4WMi)<90TM^TE$EGaB_DhOQ@? zwxg_RvejxjcBxJP(LrLfH1f+30IMSAJh23V1~8-N$%b2B+dB*K?@_Yp;{AAURIy^M zxBjH?RT5KnnU%KgAmwt-K_na>k~+|ETI|kEHswa8eJv3H&3e3cy zjAd7JjLjMk-GJ&CafNF$owZjlHMCQ9X`aN=SVrF>sgyLO25I&*rN?r!9D@hmohUBxaK#1Zxr!=k+UKbMt@~J!nOtVoks+MD7`0 z`o^~F?6DTB9KW>i%;BXW#w7zF14XP_M3;~i3b5oLP%TxaVS*<(~8PI9c2nv<=n3ypA zW7)FVHFsmj8lyis+DzN#zPN`jBq*%pJ~rV zNo7||iJ{VIRc9sTF|3=T4ypkz=Xqn63xXK%amfRxC_Tw5u_Tu}!HRVX zw6T9X=Z)M6TQUyvDNQ!<&0 z?&VcD+~hdUPI>&ECi>ZuRn?1``&-Q#q!Vfs>npT_(z-{d$=aUYSb|&nXCRZeox6n! zgD9kdVi`S1Xj5mu91yH3(nP}!jH-8>0g^s(^SEPi!0L&EQz>S>%}sz>y?H9yMH!FN zYMYZJ1|HJbBR<^x;BNT>%B#lAP^FB(p}4C}Qc0_p^?$0t1F4Zo8^{~-G2~$2AI4M= zVv$;s$yiM0TM@||r~D?th^QO@Ljp;_9PyvP@z+wr#s2b(KXFG!(#d9}n5nF(?1?>d zVKL>3{(w&Wm%$rH7(ICln1eK`3fK9KD{He}7ADf7wd#qTEc$4N2O~IMc-lxDf0xf% zk%2W8n})79u8$flwhoz1fe}V_mn)8UF!P^wGw?7vvaLyvsiBi8RC%X@1%UfZ>9q?5 z2i<@UF_VmAJF*8un2vD8mNil0e^KKN$WmuB(JG# zI6mf071435Sem&=$EQDB+QS}?@eR9RAH5&Ey z!xebYJh#kjiwNMT%Hwf4Jaf(oA3b(F&3l*ND_zZX8LdcOSZ_^cbz@#i@H)vb+=*+MMM#k8l`Ms6 zZrjuKJCejc;h{$g{^9{dLR4d(a!yCidbk7)n~h+4)RyJi5ZA1ey=^wC&WMN*jq))8f-U`R+ zGAxng5>dwk7&zz2&&E2=;N8@!xFfhpI?BgtDSp-sUc~J2vES*|IM|zuk_k|Ag#Z>A z<2^Y9fDFh_)d`DB)+;Nb+OrvI`7bl?Ns|~Z#l3->gO40|^V0+aVrUE?&!JOk{WYOk zYRzMQC2v<`r0*q(2Ml)ex7)`9f!1T=sc7V$Gv?BSH|)h8mrmd)vb@ZII-FruN&O(; zU=j1vew+=6hkBb?U1jy{P{HL^s|mBSR-6+%}AEBRS3wwZ9qY6yzhLk8xd?r-kKPMuf2}<)H~8 z##Sa$jQ*pYs-ywQz!>@X%!mP_J0T?MF)tC)<}}J20!L-PG2Qjx~pf&ROLgG9EB60X%p= z9P!hw17;U<)Rt?(YfMtKw2PoB8H^(X;5Z(_NzQ&hzfSHCQ8*BLi$pq{DPn6@gmA{b ztH!`hmn3^bAj+c#-JIuw4;?_U9nRt|;1XBWZl$D_cCjf|XxI-|+!&nmln5|D05=5u z^ntAEQHrl}8`?Wrpet&cciuenJ0Du8ZOX@$XUG9qX9REnT=F_|j>Q;-m^UHRn#PZ0 zh~a_?l)y4Cr&P0IJ=<9BaqiwQN{#_jgY(o1SE$$alZ_Hd;Soz_wR;CSS=MD&x^=w^>2GS-?!Ty=*V_6AZP#a#OL~p9s~jLZNOr2=%_bKi zJ;&Nk&~kd79~&!acQ#@U=ZcNbH}^O3Q>9(etRJFXrQRP<>#|8|maF`p)qNr$$dUvK zLP*mQEG@TkhTNxc1ogA@*fITCjs65Hfl!Hk0pPaI*&40QDwMT~!CEzn1EJ>=MI&9rXz)YKX;yY(4;C8n(!{{T&tu?r>`24LGs9CcO4tC;KpLEOcu zb$CAABfoa63W27B(MtljVlyHmDaj{yxDYTwIpFoo2r_^GErgve)M)0gA!((7WA!3f z%u%QgRh(e{q3+5IXN(RwsRFiUZQR&*B{*!PRBKtjL31w30!T{+Im6^Az$HfQ+=0$U zM2HnGg2hNI_Xle`Nnp- zUpo)jw=NJUvlCVC-r}w!y;tc(?#TN{eO?%aWM|q7$|5*G*e#S(i42~nBLQ7jtD^t)zr`c%{6R zy(d26s-bYoGCkiXKaQbte^jUGIs}TIh5a(!G@SaLodUa}kNw@mWVhs#=Rcma97TOf zWy`8rZ_?w=tjBd~1kMlhBs{(!`ISSd0L{-QBidjSw%#8wo zxHvxm9D(usar<;b->Gy{>Q6?rHl~tTks$YOA8LdtWB&4ye$L#P?j{W;RS2|ksj z74%#zNe)9OY-5rKo^j4kO~r|!9?ctsuAQih@v_Mlp$suOf-@EXUB2Cp+(tQWK6CpH zm2fhfGGK=4Zz9+A;jwNgXN_c8og_Jq5;2xc_+ALd&nKR_p1n$fAu+YNlGT{>?K|&b z4d^j)?I7KZjhNbTkPb=Y^~R3ZxK_QupXyUTjpVZ%kL~+K^ljkR% zetN$>cP6WE3L1@Cm0*I!LsGnLE5QT^sv?XI0wCaydHj5QbdQt;h0w(-s@67a(Y2<< z1G5XO45)FmXZoC`6}%r$fNfOhH1)3Dk}{TUNi0#g6`bQ_a)4AfurHi| z3BkxcNatcY>S}BtEtp+RZ&lP@;i|GpU`DXRFkGU7H|ACd00WHkljlAK0qr0o1MN6C1O4K8>UkIqm{<8gY}eOyny^(@nrPBv>I7tgr;rb7G|UMgdve&v z#xgqVXR7J~Li&xBH2Y2IT%v5)cVA9FU-~v!9ou3gZa8Faa0eOvx*7vh(^cCah&xP-%;3or24gaWv!@5P)Di;RdZc{oXmMD2*BF8 zA&)*h0gUx5n0V|)86diSRMYW>tx3#4tX2?un?j!~ zPB;M%$C9MyqD_m46b(Q$%~d3f!#$?0Mpam1aW^uYZ^?2=3J&ZQILIH{spHLNr!rpS z2Wy_3&l6G;NU}UJwY)AvUL zw{bZGBWU9YNycyyTkj~<>Mw^^^@x>aqxK6MLSs_tEUZVgEJTIm9(mvso(brH+`keo z;EFQCX5>F_0Ft!sKg4r}jFMGA#!drg0~j7pk=4kqVf)VdrqS70*_x~(A6T|aNH=2*xsKz;c8+pR4?JL%J?cr@cQz>q zR!Z?f8H}bPgs_rjCm8??5yNAT$3H!H%O`WWYT&b3C7IG{$s#iqQ@C{}1=ft92=MB9uA_))+EAa3Wu!OOBGeqd8wM0&02b#Eox z%}s7NE@5!HTmXHWK*&TGkmH3J^NzFpv3D#>Fd|@S?WZ`oU3Hr}d)tn*Hz$RjM4i>0 zodjWut~Q0+?HL30ZX`0+#C5)!4QE=8oUp@6xAUzg270b#wIV5Atp&0twSAT zr&CQfwN$ArGss>kW>WG&<+6a~auq<3ew^Sa2a2h^84wLrnr7CbmY$-P>*?{t!KGmG zZHf@sauF1>zw{)VP@h<5mfq3Z@jH%R@CiSmdwf{j^)5=q_GujMi?%_ zbCy&%B}V1IIgvm0w(BGWaQA%=fSSlXi~fMDh03<+?=F$2efGs2p- zP+O>h2^%uFG|fC&ip}VIisT~9m!Ym&NQ{ysYZ#QHw$l4@eLchg0}crSM{QtEEI^=P zI=lA%pCtEZV>IuIoY=a2SUV`;FSY7IqHh-?nojLsXb~C#?;wny{b(TMI1_0LmpJ0 zXq9%h5;i#vxSlfIPa_KmdgZjG6SG0dp)cQ`F9Hk?xfhp9Su#!JcQpgu>-c#MX zIOD>uJe#B385Ak{VPjsUQc1&UcV#Zy$&Q=Ipx^>GBy2pX@^ks=n>89DAjUrO#ZFrF z&={oj&g^U$!~EnNXXDRax&=MYJga}{l1*;a6d*L7zE@~=AJhZD{{W72)bf%}^(>>0 z?U|C;#W5=EQz}OCMnPlyeEgsOI-LbIQNo$^MYk-*D0MRxZWj{Q1)Y?i@q>??6OMO& zdYHsdMmQh{$k`Cd6>=e0_o~S$PyyS@;QZskKc1zNC3P>45MgA*?Qt~3w;a$|cs3TZ z0DQXuMs{b(nUXd$(0B|qtsV*rMSOk6MrsZt7if=ZONPrmB>?^t_LIIsOA8R zS!VwHnp#&EQlhaudjiiKQCSK>a}qC*0LjnK87IV9c$oZ$Ug2=r^yf_K5sVp$pwdlUPPNWgVw8mc1kRldl0g8dV zlY^h0e*>);@qxHYlc6Woph;D(NbuHyLWnFp-q%y%z|QT*4T459{rad%Gmf}V_) zr?ICg5xf-PSI$rPvUe+-k(_~!dKtfNr2^4|#{q_xp&o>ESZ!FDjFugm!r0FN0-+h& zbD#cto-_3IFF+m547S$2nQTu_PsC+jc;Jx1mpfJkP{3|Jt{j~P^(9G7hKb{+ZL?cvLe0R4GF5_ zYsCy`pl)~TgTUS*p({P zqN%E~-kG&+c*@Y(Mva74j>Z0=+@Nj%1y&~*By*0Vg;X}?kCi#g;uXoW|h_E zor5HeTw^9Mcqqh$RgqaehcnH2igh+Y@BCEC+OKJG%>j1|Dh3+kQ@3C;$5 zpT>HmS%j6Rh8sm8Iz%K#3^GscQO@UP+(9RIEIfb*OrKN$R4?r}QL|%Dt6n&r8fEmt z#KB4Zc>pm|Kse9%Pd!s7D%3(2=*F8gSe7N@zP>q zG-Wt|C_=1NhQ)X!jjC6xD>Jz={{T^&1CMtBN`_<4Ho{i*E!w%L z)I^Qz!xTXg5k@`O9_7gg9z1^IsWR76jxVVpjjifBJTS>Z3UEihRfAxbCg3l)A7CIT zDo##7Jt`v}<52J1-dOc1Ua@DRZk-ugc&ouM-7<1;Ndp5Ua(Kdnj;a^bR$*2o8g!ni zmfVq@GR$&3Fjr(oKA2Sj+^NE!_i=!E<0GX+sW)NRF|E0w`(UyCtY{%K85r$WlYhZE z+{YyC@>_w`S*|WSlU9*0SCOOB<)>OV0XE1O*c^hyV3ff=bCHwJK=-JnN}Dl!As~gE zN@bMJs0>_hC5RaS@G+c{Fn`ylMI*SZqQo&xnGJjPWs^C8Hwv{K ziEUWFD?%u|Fp_4F7}psM$M`_O$^4Kw9R^}e$W3enp3+^3BCjMzW3qv|76WlAcH|J7 zhX;`U4tmPUtooY6P^h;vPAy4WSJbRQ3U5f(xv2V|XUjCe|t$u{Bi+rK85qNKi@uGtcQG&yKlJo}`rADtlT*d83JS2y0s_Au*Tt zqoDy*WR=>WkU%4my9c7jRA&QYQEWP>qY>0EY4g*C>Xh{?qi57r8Qh7IPS8iNoPm>{ zgc-Q?9k7Bb5ghfcTUqE)Q)a3=G=nR>*WKwbT;l|RjxabG=c=xDJCMzQ4KHvwcj?zc zvQMg2eV&@ifm9=({)|8dAhBReag&^d2MhC98oEI2EUvImA$UGJ#kYwz2asdvJUc7gl z>5%EnlFFsv8!#L(_b3dKoVT|da6$5{2M`Z%I{idnP&uI0r-`k{>rASC@P!VpFd2r4#?ermX{_X#Cnb#q}}5j=W~_UD;p z1=S{f;urlTRU6u>!JCW>`Oi2S2aJ1@Y^~5SgyyB4M42m~mN!`Ab#UM=a@okhP!2{h z&mMZI!Z}k!sum)}O*+0NuS7=}9+nesHeqIYIQTk3h1ZQ7WXr+=_>b9l{3-fu&YQo)W% zEUZT(&PF=p;OZO-LUlaKwslkwJP&cmsv1{{CI z+^<1$YtjnuemIGhCVJA=F7B*HD!|~E#~v~G9Yhk_jM!+{k?MEbPm(IhVf4w1NgaA6 zOQ9|f8JMY3LuBOqK2k@^Tc zjoyuMj2V9qW8+_7ttLo9Xl>$1n_W77$OBV&L;c2Hj#&Jd6f87Hldy$KsVh_azH zlC67|C5AEk|}5Hla-{nA38saERIR zGLj5Qy9zQgaHFcbd-Ve7+|3)p0Q9K)uTT`d z$4tucMr)}x)G3l_31wmd#uEyw&IWOw4;=MZ>f)}A2-Z0&$5m=aBD$z5rDFkQ&j(=n z@_8qYr$eDAR-_hw?kzec$6@y!J}tAw&AkTa^7 z-Ip-{7z6F{4n}-q#(L*!?kFVn0fOxIEka?Oy4IT}!a!14i-WsPb;_$>gs)JjN%E zQ!$W{?jcnBiN^%~bJB5AY8Ah6MxjQHd4!W`_oR3gW;Mfont9rDJ1}PiQ{qQ*MT;7N#v+2$vRsBz z%@l@f4-<@SY_GJOxjEW4j{_$i19vPN?+697!EQZ2M72*{u>c7vRwHIYu|P>GbCHa3 zk&Nf6h5}8Tw&Tg7n#~)@C0OB?(^&ztosy_b;g=haDoGac#IR5~a zNj|5h5cp)Qt06P(U@~LIazRs&eY?77SXkU&EpPp6+P7|fDuhh?mF%ygk(C~vNF$Qp z;l}0p^PY_uqa_NAVy04et0c8-`l!WcuAuB0J9nam0G+Fm{^?(F$>^vj`AIfXBnH)( zXGDa|7R8=aiA~5zVZr~BPVDt0T z`7;x7fD2U^X}WZjDpqq* ztw`R}8Bn9sIohNTfE&+~&sqSpBKZfXL|aFr4jb6m3Ue0E!C*g9;9&d@ZwIU4Rpm`2 z&QI=;NhKQ5F&>=zV=P=bk~IyM03LTA^3Q;Jncy1kb~GecQIyVByywyqTxLmK&d9*$ z8=t_y;14GQ_XDQ-B^MJ8jE!TCTbCc~k3QxvB&m*q+a$hxo`@-4?%LwDMW@75`vm}1-iWmd{ zdB===^Uu#g0Q6;4l^YNnx@^MK73u`314yotsdWVd46a5$sB%W^mB!>Fo{-|RLJ^X` zbQyH5GZ*BVCbMDV1C|jg#tV1@!R5DO^$!5&g4jUO)KilFs8o{uzRffUl>)2~s02I6 zHxQ&T*hvInZ8-`vo;smhPymar;J?fM&RHheYQ5=9Mn_g8?+ucJ2RH-91_m*Y9ZhiV zRAtmPuf97lHo( zQhE#p+;B;n#Wji$iJ`Ab)#=JdT1Zc(KrR(ow(oTtOo!SJgPa0SNX{!!!k^-wdYWa5 zms$(IWXmC%dC~ntD{oLs92O0bP=S&#Gs);6j4t|sqzx^4NlmE1VnO#Fk`0nHZ6?oJ zAi(fRQ-{Knk>GGiu{&PosXwm|Al9uVhMq}ZF{A$ghhcGyoGy4g0L(^DOs;yIR1Ju( zS+6_Vf(X9mNWqISuK3)pOKfex90fQy3Oo#)Vy&I3q)s1WsT=BTBtLTu@Nbes24s)Z z+aWmE1OBF9gV~P&}h3RfVMF^HnF%Ca#+pVbT)q5wF+ zCw4r6)Zd7jQtxvzT(52BwGG)Mp*U#K<6Wy7s*u1C$`1tPfd#Rgj~zppKel#7JDaU_ zv0j^fwvSa-Ad45HC@}y|0UqEF&IUaA2R%14Gkl{W)U_QllvU>R?MAUl5L&?psQl+{ zGs1v#{f2%z%;a`NzBVVg4b4Gp*tHg)BcuAt&bGny#ts7lIXFL_GvlZ;pHjmLgK{aP znY8%oikEc7VIs_9lr*fl;fNrj{5C%asJfqWxS3DwF>A{;C?dGj?9Zges|@U`Bgr}( z0Cu4LIRlP7ZAKMt+X^$Y1N^IbPy+~-t@r7kbX+%1b|0O*XmBe z1Q@?ktu2}`*Jsmp^Mpuh!lWynH|}*j;O7}Q{GO)3gLfzit?Cz9<%Vlk?^0MRD`GV= zGoqFN6g-@^SM?8R+mGL;#z8jFH{wliQN3>HG-R<=VTXK$pfVpBMM8v}A9hde;NbMR zZo~saacxSMHH3S8&#CG2Jb3kBuPPWPat7F@@?3%yxIW|OjyiP4N7P2)a!G{Jw_)R+ zD|&qzH8w}~u&77Q2?h3oKv98!etJ5EaZYQ!o~s57#!oz$3c)2aTd&J3sb1p7R0rh{D8X8CRn=+954HaIVV3k&ssnO zQ5jgx=C>S8TG!uL)Cosb4H(-S3}?h1y8@Jxsl7YTzYxE8n#mY7^=@#MTj&D%TQ6D6}_YEu0ORB##Hc_&rF1 zO^8XaQH+opx^Z1UPKp)&pxGG_s@` zr*dytg?pDEW-{2bIS>6z?h&G3V*q(4{JJ9%W4X-hESgLdYs8Z@^2K^U3b`euRn9;J zf=D9(f`1>5wJspLkY>0*tEWp9MyXi(XxrmSQ!b6j&frc5C;YxTqo4xd2BW`T3F3y@ zr>hI2A52CS9Fhie=N~`o$6CcushcJ}#mx~!u=7A=WRu^)Z%mVc6b2TZu=ZcUw-LayoQEtLC+ zCP@}8zM0&oZv?xJa&U3}dFzM-Mo@aFqMolzol1>;LYy{esx0M06TR}gah^_2GtO~= zo_d%cXzI5#v2JM6w%Sois5D5dXN1KKcgazPAQL7Fwj09ZC!niRj?^x0)M?OJS5%*S z!w^2auFBZJ%P9RLI0S*o+n*gfAnd@I2^41b6YhkHvBB%uK=R75?)!+giM zZEj{6+IrHM;ij?2hGsB^RT#)*P<^>1d$N9b$5q0&^no*zLPvIZEU1QhX<}I$J6V|v zZObZ#7$kW=9d|1{TT#^3#NNENC7tbCaVaw+ZfTp^Mslj07EzF-=jZX@Z~|yxyeLVh zaRM5Z!^<8$Xu>!i2^&c{1QLAy#N!VTC7fz+O*1F!BhL#f%LZU8!@pY}P@gSVra@lZFhdoIA*Y z!7Y_6pk$~A&f(T(%I(ykRK0gIr(TfP((TvQ9qTpELcx`qF(Lp=nMVZjF|_VreY}jG ziZcPY!9duXS(Yfsk4u{4T2)m_k=TtIsBS=FK?8%}o-%Rq)QP^M1_QXlqDYNaI`G`C zVpyYjnn8)O#O6lHW;`}Z;2eXGJv9(2Pa=~mvf8k3>Q?l_aw>9cLF=@w6dbT%s7S*c z@;}|iP6tS@eag~+0iQt8*;wS$^eul(v=anP7>U(*1ZNG)7Req!P)-5nSIa_CKmy6R zv}8|e)|+0_%G)Lr%-gG&=4K$W9iU3%{o;250Rxal5ie4CJdy`eb6Sj6Y&^5QxC18i zEhMVzfC$^l;CTe#;2h@|>chn&axI_l08Kt!Qd=@unk!eLnn@5m;6)ro$=Z?<7xaKI zqbs=ckU&lhbrj?zYGtO^q_a4qQsi1hb4m9G!n%tYQUHv{o&NwNjHx*U=Zr1Zrh-%s z_cG0HvaP7&lUAAn(zuaBp^h?C1!f?LNZ=OW24UkJIKJdcf_8Nyfa+SDacXeTtgz$P zRNf9@dv&!wxpGY>@~?W5@}ezR*+gS1dn8dMkYuG&1JU3V$A=Z}Xqk&-r7g`Nnt2$?m0U zeNGtCS@jEb>E4#W5mlN^uVCeHhZw-lIOlNswy!nSU%0Uxj33alLop4T@@F@XciHbC;MmB<@Dtz*J&yO8fBN6n7!jGtnw{%;YbTnp`g_%qexhXxB zmB|DIXKxw6{Xd^4J$5@$5uUr7%c(6XT3gUYZJ=hXjXO&rEXO<4U`9p($mDzw4^du3 zni1BFoZT`d==+mOiFDahJ4A+4Dh;?Kp)5hhKyN=d&sIkQ#5q2pOB&W)LCkjR>JdSv zu4QQ~K)$Taw45w_kOn*f;~flvk5fK3y97lO*wr-cKD>JDdOGQ{GM2Ftye~Oyl2SF` zHgSxuGsitgks31Wiq$l>DAu(eq_IT$bg_C;#bQ$Pvw`{yn~n(1as~zleB+=Njmg<2 zs$$39*0EN^x+OQ$WrKP9i%Pa+BY@jL$pn1(!Nxiv*67Np1QQ&{X~|*=^d`LoE3{^@r$^CHNs2f3GSM>S8`RAtOWfvKT>9=gwu5awCb``*Z-HCZf z&RKv5jky3E6UXFrPoCbS@+d#L6AM#WYF^Z(vbXG`LgpymP^2-~6^7u4-ZH#nJm;rU zr)Efk4T)o0?7Drcaa2fEw^giXm@}qW4+A?$B$J*`@1CDBfH?@b@-Z}b7bdcGD;6|& zlTGdoYN~=_hA;#T#y1u@-~z;)a(MICE>1$~QGin?xA!w%sZzy_EJGTZq*!V^alC^V z%!GrI#c{#O1M`lP5jCmJexpaM)?;fvnp3+Kklc@QkTHNy8TSBjpOMf}yAhNd+$I>S z#N}P&mDds@`lN{gU&k08PCsw*>p>D(Tk5Iww=OLvlT6YQ)KX_}pqN_c!C($SB$9c? zPf^JHcCwhj9muIqJJ+{fv<|kTMZ?J_`$q%@CN;p_$-wc*>BdVOglvorMm7vMjhit| z=}QZb%T}O>4NpT50C3Mxs+#SJTQc4YfhfAmcoL&#gH!im9Ao zYqJNfsMC&lCS;PlOc0}4LAp;WFb4ph1_vH9^Vb$y63vX(q_V=$qz(5eR(CBJ0}e?8 zBaT4xoPI|=aLdQ3FP7z~$YtA5dQU9%(;lhamSe{(0Lyd0+CM!@=CmXLI)Yle_on4Y zYTBtFh0r^N+|B@O2-||VAa3B~e%b0I-sMUFH3hE(HR@cRJv|==IG1dJ+~i}yUTj$eZkf_=k~ecWJ?)Cgm#qQ~`F ztw2cveaPy`_t3jQ>upk2Xx*eFi3jRnN{~=zgU=bq!Rea&oXiQSED`B#H{469G$~+i zh!Q;~WeOMv8DiP+elwi&$73`s#UuDc9#68Gb}L`)&H@=+;7fo=UOa)n`ijz+hAh%Ye_G#If3#pbM z?*xR96z2nxxBv!lGoGDW?+{llPpdp|$!a6E%Dh`u!N72%4URY&K2Mx+k%Q1VYq&mB z&1{H$TC9^BE3uzxZ%fs1Ku|+qfy*4=a6WN@A7huO_onpp?4S~$E=gAXBQ20Y{^h_R(3#wdweASDR<*NC?!6f;Jd0sBDG~rkWp(zC zX~18`2T$_F?khPXeb1nV+qIoaYt^1lOVAU?Ut$1Uur7(Xj41#q$R&O}4j6v%)s1yN zX(pC$UCiHZ#hf#hK_26i&Oij5V<4V*>wzPw%UEG%5)v7r{& z)T4${)!>5TsC8LPhF6UGtOnDbKnQdGA+(NqA%TUlSJX_+nV!6B6G07_4lyGFHYq>j z1Prhtu)Jp{o;lD|P`N*JC&_9xihES-%LIz@zpNb?pL>=iK*8Eb8&4VF=N#E}`(gz= zw*^~kIvR5J(@f zC4@q3gye<*u-%Qp{PWZ?sjj443)wQcS4^i;S+xrFUhMHkLP>3bU`GcHjE2q!2f;pi zapBRpe^3v5khN`bFPpU{l8{oY+v7rHJZ=b70J+EmK5?F=JW@|1H4KV6jUkHs#kBj7 zLrS!c6+;(iAN|aEc5IxVIXrYifGMMg9-_@!^iX>Abo9K|Dy|$tzOE0>)!Z`VXOc!d z{BxRfXA1uJA;2-bt)s>A! zBX(pOwbCMr_KgKSUr}dOLivS}0_3Le0DwMDInPL>5{IUX2BkevCOL}UD-fE1$jHp z-`k+K$^~zhqMq_vxjebCK4Cr8n+H#hPu&4oPda zFBh*2rLzM1?yLYU>c@<3Rw1yXAY-0SQZ9B6Qq{()HWO;%ymB=wcB2dJAyU5f$MqS; zGC|;fbB~Uf8KJ~ZY-&srotwz@FKWxIXfw#n(Z_j~Ng@Og0aeIck`6)fj~xVVEwdfm z72>maE=x30y$cdFQB5RPUEI8K4cvv!PBIP;S{#pF&^>T1knt%#9OnK(W5w zK6exT6a9_{N{L#4=-7t+k+pDFj(5y!{s!CXHaXg($pmnJf8VOIeWx~GVgY7&<2K=f zTUV?qY~6wjIbf?EWHm>!_N0Zg3cfcK zBm!GLaBv2H4O06YBhG0D%4m5oMKF-*c~rm+>c z5?E|t7LgD1!vIJjut7U{P;dg|`RPJ~vnb8?liRl%&1OcbmMcbp0+0uoDNuckq$;ue zlgR0bsU|nnlTCtnl}#%%t%t9%r4nTNVX|7L@ zcCS7G!({QtS3tl~>T-7%$r^pcG6%gqzeLs&Z~Y1w9hkujoPr4Q03@EWKXSt$5(@^U zOFw-)H~kn{i~AXvE;4cF7}^HW`RAZhh=`;y+*36~YpG_1xS*G6xt1#kCuo?v6bM%< zhWC59&p$m&lQ->}6mi=|w(3QP8g#ZGwpHgvY1kI_k1`T@pnf!9IUIRh*bO)}=aL+8XAe7NM)a^wuz0B-7`zmMZ@Mw8}Yb zAPu+>LT)U>k+gLogLWq-V^$2a%F@PE$5L75dt%Co1cA_a+JC|bP)-jGk-#IT;6y6G z?8a6#8x^I#H2P9sXrYam&bwAw04?dpk^8U|5OTwIFfqyKyO1tR8iG$F7fi33dTg^x zcThWxsqELw`sbX5BVYlxsK!d*fyg}}Jci|eZpTo|7p13iO8SMlTI`$P3C=2W;m<7d!u-yDy9HR?`M2-j4}Amc8;t_?n!ag z!Ynn3B&!v9Wz=lENRJp+9?l8co(U?hK;sz6Bgaht08J4SN)5m@JFt4!9<-FLY89bb zMR{Rq=9K03d}ZRI3n&kXU>kS#o-v@{hR))Pq=;?x2AqNX(ElvHD~Wd#NLK zJ;B$H0A!GQh{;VT`0Q#yEZTmkFWst^;E|PPd7**F^g$&YZ2tfND#rkTbK{<@a|*kW zTx4j*8h6`Ttt8c2eLvoI!m^1ZjPiFCe>o%M(P3$!Rf?a0uXhgV0QrbrFRC zdlMC2!pCk|C8=t85(Fvs=DI0hNjp8`1LO}RpBX(9kXaCsbSkE`wG9%}T$$(7u1o7M za{J;cc);0?1~5nD5Po_`BIk0gK_k}VMw9L#tzNPdzCw144tD~6Jo)+S(a)SHqde?5 ziT$t%yS_Wp){S&%?&uZ|q+O&6TjOzDfq{|7lb*a^)HUC!>Yo%;O{@0-x@>6>v?k1! zP^1+Jkgx+GODW@kSp0v-q<6&DIh9&2_)ut6NWtDxI~oPtX8 zpP$cMF^25Qe8Qj@+fMr`x@0yiYdyNPH#Ae&mO&!R8XRtof!Z596Y>4WM<@%_RVAB3 zF|DYr%{s&vE3TVWmSF^uSl-fr*xV*3jOTy`agaK&A5EBFMuY~uFjbBjEL1ivn1fcW z6^GgYAE}wYzv}VSvSA1b1{GpN2w$FER%;r*rKnWZ86z-LiL)Ac0D~Ai@JkMUIXzDr zwepl^2qPO7tvQBIwuYI1>7&WJbRoD5Nl>64A19*5`tAi=JKWN38N{-Z!h7o&j>n;6 z)R->ZlvtDixC9W#_UiO1%!X5{20o>xEn8M>fm#h6-52h2wtFY!f^tiEQ}Ofts<^@2 zq%wqjD_GzD8}>NeXQxv7Z^h!T$YaTz7H9kS3h72s#i2R$zv)CxVxQsoNTP5F+iLcXHPs-~bb2=Z_uC9}McGwnF|<8Mv! zLD`g}T8W^mp-rf1x3&Eu-1>sZ)#@4QZ*@|m1mhbQAd(oL&m48Ya65#l^kwef-`de5 zj=rX}YEDu!EDE5HfOE-q@y2`u)f)Xmpac1ptCh6fCd_qJsl_yIGkY^#- z{PT{aaFM%=GZ7@yAg3Oy47Fg?t=I)sXSKK>EA7IDVlvq|W%JL^Ll`7k?iO-(VKCUC zAFZLE|Qv{8-aczCfmF@#!Y)#w+=#lEZ8g-Xq zkh@H*-xfqXOmP&@DFYI!k)9QL6+}z*aT^Q3DLLX(XJvY@d!f z>KK%(QIuy5xofItu93$Oj#Y(3CTBYoNHL5832bLM2OsCww0eOZ70aa55M-0qK+s=K!e{g!BT8IhvC0!;$ z+hm2>NZ)jFyqi`MRAGwaa|6#OBy+&ePJ+eE`CNvmTh)b`V`_GzhtZZX>N5o=k9G$b z@wEB#(R39V9FZ5zigl}r1ac%w-mJMNBmscGI5{Wx=p*rmCNOq;l2|^qR(QyPq-4x{ zIRp=%7yy5tJ#0CcixVXQxh)wU!?R9-0DdwL`E-m3HTrU^v=!HniYd2zi9!A z(OCpU4{V!`7>pr7;NTDV@y9`EpQ;ip>P0k#=aQt#vV|yiB5}B$Fn&Dz@$>Q2@;~3G zcsHn~#+Xt%(?wvVS)F8^1ERc~VM`LFh!|cJgXcYC2tm}=txHA&sZXg{rCOrt^SsLn zmQT2jGb#o=w+A4p4ecHWJf5598mKYiA9zpP#a212SBgn>DJ0k$w%KG-6;!G=NX`aH z{{RRl9y(kY+O7vPDKm*6*&$fVss!`_A<&#QK+ieJ&z$wPVj!Ieb6PB{*DG19VwD|A zI4vnr!5BEt&%oekreXl&9HE-Ei!()9@}kENEc^Eo9|3^}1CzLaY#;5?5`93WM{rF= z{X}T)S>0U9k<2VQY-brOw;+|zA0wa7j)Sd;>&KV#mFcTb9m!WtjZK+j+aoP-BMfrb zQ~>QGEX3gBKN-l(*vQzG4to1ZC)C?V)1tGfNoIXP$!MTSOMTR!vowL2`$y`|Fc&9| zMqiKu+BY(H>MoLPM_pQXblVlH#$$@>!*eYml;q?RHtlGb`Cc+|I2}tPaul(w)x%6W zgca>PTCB0$fG?|2I1!e96t3nuD}@>5usn0qfIyK_3D}AIf^9oW)wFoKlTn_=qk2Y( zC$$)?9zEbHB18dqR*3RPf}C;(Qfnrq@x8AhsipmPzOi#qsNKcfdL&ew&m`*`p|87m z{-k>t%0S$}a56`pq|Zj7OgvB0He?m)S+bGTpzEksv0@PNo20JAeC|OQM&)C{1-Q>3 zh1zR@kMoI^#?Nu&b#5|TT|Ui~wJe^j^ClK)w(iE?D!gqOz|PMYJ!l{>h*+=YSFIyg zsd~Ll2UwCxY^2RAGC03vf&FVGeESr$F5I^Rj-bJSqcmexJGljQoiv%wrEF^2jIa=p zM2#BG6~jri9004iS%71L3W4#|Q8@xy2HD~|o5>}cvdKo)n$~7;WmqD_W)P|w8*-JA zTP27gj?zX32TUDE<54S)$(!lgtC*Ib-)Ut-%o08B(wXu4c*lS_zy}7{&;bwETRNLZrdV-W3U)N~ zTFU&Pt{7mI{aDXbD{zlwZ}%jMhXBvPTVkF+@9X zBN$!841NLUnQ(7XPCvA?F?njrLKd$eL0rb`h(i|Kz$y1_Eyn;H9~@^^G#-S{l$)a~ zmuo{5<@C)q%=M%ms9_fzp99$3008oQbx+cm6F9ESqeH5F{{W}=20O5%N|Eg$(tNMD zvM~0Jf92L(iW{k@5W4p<7Mj(Z*riYIbdwO@G!CU>V0j`l=R5!hu<1y+T();fv;ezLECKR-^!W2`%q*aa zmH{Pq(cEg{52i~9 zMV8Yjo~VC@1Z@fiNFR@mvb~nMrLzt^Pjb*T2h!oEU$!)ga<`~qm9v3kAd*$b>TKb^ z9y+jd)XRT)w9!+LMX1-)wGCESWfMa3SXU7?gt?_ae0xTA+~hXrK&U0`=t_8O6+5Z7evn_ z9tPw1z~J>4uw&z&2sA-fw0ACFyO|i~ySnX@$PP#Z`*1io;~excfmUTA{Z>6r$E{yc zzK^DfmdJ@<+F^xqMhiS}Uny9W6{l(OU{LyF0y4-<)I* z$?HbUMmxExy**i~&qmG3;XtBhHJUAoa!Es+jN>@+2lwd|65E83JC+$XK${jMB5)@6 zuOp5Fesi92`*pA&B4uWyW-A(8c5_uWB;^&PF+>mOTmggdayb70UZJ1I1Zq)Uy~W@8 ze^t3N#WXa8OCd!mn1`pddq@ECM$ky{p0Z=Y+_y9~Vd`1rpp~f6l7v>~d69i@R!#B8 zjPf(j#@rw8*0)L)A_ll7*EcRnr$uK@Skg~NYO57Y0AD8~;fKyTnkx}za68FV{m|WwtBiY4=O4Kr zx9!r939}y>kZx$Yg?d`Pr&CL>R#+iJP?vvrW>8be#z7tiGmP{pLDaLI*^adN?gV=7 z^HH(N74*!C30?^}0I|k-{r>>JRZv^00>bW3_Q-;?vMOGjsg7w213cgRS+?y|BTYi}9 zDTY5taWrI;un+M3^Yg*##|t*3%LVr*62%)rW{DVvD-M0aGy9(mK^}U!LZXa_qqtpx zc4+?Ac`r@1e&uD~?kE5lqbYQn+tp&REsLf{drY*t0`f9{`gJfX1 z_yfnDruGGiTyMymjVZx1yBp>xwKdqo=LGSEIl%-Ddag3n5Rq3?aUPn}R#RZShCq%Y zkSGiedB;Db9Dkop$a|3pNxxBjn@?FP(2H)^(yHH=OIuGpnH5Q=Bw)1c zK_C_bIKty3`09C1wq-HnH*Er`S*liL`X64`8{76k8u>yxg4p;l6dQS8-Xt1yPc$dk>mV1 z@Oo|r2tx|hB(Efbq;DdBPl7Tscp1(Ma52&XM^liCvm!G*5KF#KxU8y{V-YB3ng0N0 zQ~v<8fDV2+>y=D##F~lbxVltv-@iQicr!X}Swv!K00n^~1<3t}&&V0-Ze;-UBH6TW zw6Ri?NvKCs7XF|v!0vX=WB>&#$jCSY1LXAwTaJf_Fw44?JvpY%q8e!dQx#1q zin$eflh>{Mf_Sx9rqp#ASJ8wqCd9Cg3V>J_@(4J|YywXh=wrfG7swn<>uI#EKCyFA zyd;uD`*~vwP*;gxbcnkS;@D+7zCisz^4p zo_H7mv+#evRv1-!nZ9cfMDpj=&*H!7m(u?LsK@DS{k!#N>UVs>rdHOqx_zPB>#3a* zoUa}vnlHGC?95Wdd5kct6iYRC6?-+|lBdVLiEJpuTNvZ&57y-D*uIZm&_0#@PWsUq^36b6KU^>veUwNNrm5b2U0Jv~25Q#H(7?&$abpuqoJhrIBKh#J@=Q zXJR;JJ?b&+zpbB5JI6=w-sJi_-QAP*Yq#{S-mK>zQIvfNMsm?ERgWdi({()AHU(__Z-RIotp?y5zQFOyC`p z{8ard@B6*EH`Co)>O38-tky`h&i$9RgnBIZKl0l5Dbgf~C4EV5N2AJZJaN-#jIha8 zDWsi!Hmdj}Zucq~ga!Wqj1Q)8{Xgvv{eI8-rFTTtG%ZWKH9JGJqN{VVyLz6ux;?#X zU8Oy^q=FHD)nB)bY+dxyc?=3FOa;aUsxl!5BHrh&H8_o4;lH9wS*N#JPa@dTqd8Tl>A^T&>|k;u@Nbz$6eB}TH*M=jcdIZO)^ zuGl1Cz7+=~f9~TxI?J4r^m>k|OJlhfnD(bh>)2Rj+bnp5`$FMJU5ow^!8rZ;&QOM_ zQIK^S3o=Ehid6Iq8r?b7rCaG5G!YT-0VMDFg{ za-Gqct)uF#UaevF7u(Yd9M)uTVqK?&#us-xiSfr6=?E{gC|xWqMs!Gf9uwg4;>njX9u`$n@|aB)w`+Jk|>B3j=+Hz9@EEwd||wi(&7b$ z_ZT1@>JaoMh1z&iPIyBBA#zexecYEm06g+Kb{sb$jg1v1FS9K1QkKfvI%b?FZ&114 z>iG9Fk}z|R+XJp!BIUUns*Nkudhy8(eMt<0LdVq}){v+hMn@wg5J&wwfeeF;WdLkQ zX02{LF2W=hE?AZ0+R>;2Cdv0N_88Cq0KckZQpRz~_l+yW))6AHO0}6`*s?OQ46^dX z&Q0f+FX_n+@#8Q`UR44-&Ad%0WGCcJ%LhQ#OOIV9%vFjKa ztx08ZxDrPnoIWsdt)~Ok2X@hug4rkI!8rNpm20>l^3a*;86%Dfgc!BLy&_6V(`PVyUMmeQ?%py134fMpE*4bl}B+IiG~eDH^*i-m7P@JJirg7P$88Xn5HOcB!u*xF^l&o@roE!k7Xg`8U{j=2@LZJi`aeQ@>KebzT zj8+yiB*kFnW4CI6!{{V~J zagP8FxiOQYBr-99O{YazPX?cAl?7G+HrD-F1d<2pBxD2mB$LvzaT1G?#Y3%sErLe2 z*EJa`uG=#Dppw!t_JR&bQJjE0bPhWg03;Ix7W;ZA9@5#XsCx-ml(R^(md0}2V4iSK z7~`qBiWMo3At!PQu}fZ@*Qrv7@GWZ1AWhPjEr`Lw?+1c%KO?N*(fVZ( zL5$7Fs>f69ZZuF}$uvHhc64rit&RyGpV)Ep(}f=7+Q#G(SkxZf2_i~$;Fib$I=85x z1MOTKvi|^sqT4$I_P88ulz{&kzzm7VxxjjJG>~V$DYVRy@#XqE7H@X%70Af$H?tXGe z{{XH$o`rgVDvrz!CZ$a+g|QWO5(^R-_M)+4fJQI@B!B?%j;TNji<2E5OVmV>*EXH3 z-PNYFgkf1dNEOJ*0PVt`{loHl&yj3K$CLB8p2clO{5pP;i5!$Hv6fU>8aYH;i)|r+ z0Zstq?m70J4_TWX=t~9zW*1R3swn!VoW|~=(E!T7)MQn`+<}j@U?-FQBgZ{LC=O-KMaxaLEpfz=EPV{pM+B_Gab34OkNsB`B4Z9vd z0Xql);X%gHfO+emBFZBEWY;gW6KXKWCZJ5SA4x-rhp1OMRb>Hka^Hi;j~!KVYG%lk zderh))gR$&wSJ)eUe$iA=>Gsx{;z!=`hjyvtK0h9c2=VmQ>sM+*JzBgI=9=*sZxx2 zKD00PwPZEDSVXTu<$?6gC`cQfis#ZE#hx#sbu{LZu8-I~!=mfmwWrjS`f4<)uLV6e zMvyzTK@|9Q&#YZpy->yT6w1K;%U39hxX6Woj0)T zcF%bC6(sH7r`@;o>rJuhY+|Hh!b%vCXT-^52oxnN2TIV2WhYt4E{u>cL%h zn3qGK6|lWcywhDE0hiKLn|3bDkLK2D)_YuCBYJG=Nr z`g7b}rKI-tU#dS;^e)qG-qyQMxoc^sPIm8J-PJpR_35;`a?4zr*|{yKp|@9MfnL>w z<=rKWr;~6tQV!>;dy`kxF4~V)o>_&Pa>-&~v3ep&-HRSLWZFw(pK6?Zj-c0bIRM)M z5kvg8)RyKE>edgcL36c~oDb03xnKEmK00m(?+|`W;JvqEYA`LR1Um#}H>)}@n7%@Q z4mJ~mj{~Jd)ZE~<{Z55TS)H|wG-}nOio@2Bp|0dOIKjd25AWx!Uo5w9W6D2B43)ljfzw)DLkBmC1<)HNA30OB|yS8)=@jspO2!;b`feDxQ>R~H|Y z=GW9iLQC{?tCl98OiJ#pGP4OJ$tU#=0bVdM@PBTmufR%>7;ZPxAhoZUCBBuO#5s8* zQS6TcAoKdkcali~45Ev9V(9fqH#WrC6hoSSQj~9!em_2`YL1-BCKJNct!=>T*d=wTY}u5vR+ul~!VM z0Brkz1LGZN`9KgRT#Vkjg}k+45+swTf#YU~dH|%P0l2vHpMZWkJOoheHw@DBy*dpc z?P>2MRgf{o++l$oomDde|1_p8f;A0?u{PaTJp#XIo z+I5ZT(0XdMBavcnOpMM5&Tx1c=Yz*n%$kjj&vQX`?xK)t4Pq@TPkqPIbu!Dl0Du^9 zFb9tx9YcX8<&K2<3NUIgQngNNT5UNM$ekM`DCcnpBr*MmT6mB(GcG_*`--dEs&!kk zwx@Pzh9a*Urc@E&kMGGj1LLUjH#9Qpz0F}+q>@|quS+Saf;kxhUs0G5!si(Fx1N(G z_F!Fp>6+H-$zGj^^hA;*jH!&s6iMR_JmI?TD#Q6gZuRb1C~2oq5Tx?K3yEE3u8{ko z!utZrpZ9UdJyRwgN-7{~G-NetNvu6{_p*tWLQIT+m2CML&wqk6m53BwohCBg)F@iFGZmhVkxt~5zqTz~>#U=C!6!9rQ7&|LQSbXu%`E=}rcLL-F zX)ux*g(^}i4@-DMoCA^p{{V+S@akhN{{Tr)E*P4PT}ok2Hv6W9!m|YQ#AE_6zd1iA z$48haQJ6A|feEYW(_Mn@nM`~3L=ckfbMoH<+&?`Sp@R*>;j?8Ug|4g+7TqGOD}_vF zIX+JS{{Vikfwx9WE-(d%zDpHr%+td(cPr$UV634R13uH|rX<}NFbOe~cB;c&XGKct zv33&CiS9Nw<{Q!?a3J+5O&~X@=3_)!8bdJ9Y$J*aXnUeBcfQO zZ)olyIvy~f1GJuelk?JK$wyL{(2;ZMSC;h{?uChUG8b`naw~@Je;A~VmH@P0h?LO~i403POa+L0BVqNM~gvWy(zGmH`x zeEW}{IXz3|o49$IKc!-}ptK>DtH@zVE1-lTqyyo+?rf?2XFnZHlkF~Ifl?KtLX7qhbueHaRDZgPeb_ zKo%`CvWnzn)}mT7QjS^dS5k4&FkNr#ZHm)NAK0K;fl_0aS_r= zm7@j6(wxSl{{T-CDNto%PV5u8xCD%FKRr+6{Zh@I)X1iOwam*(GWYdGz(HesLM{gL09yuvhF*=_)-A}kV9wS^#}3hoLNcK zh0O74)M-IxS{76&-ce7i>=~1OGD?I00JvmhBjc%@exg2J2IBhEUcR#=w!~L88E22w znkWJ@C=7+9CH>5Iu1MqBxKV@DT}Uy=W*6#d>djFtD1sJgnb6EOr>sa)8OpBWzr;8s0D@;;Q2mV8#Tr(GHi7<)GI|sp0#5_w-gpPN@bQ>#gK;f@T}~7swnU= z@ID#Tu!;%;xnPDFs$H*sTT<1dG9OmC_bHY3v1k2QQM46Q-~)_#$iy10rgqC_gb9wJ zsnNS}>(#kUCrIJ-+i+mJhV8iau{*K&$T{Px1AoePa#yKE?w;|{J1UJ>t|hfZuQj%k zsNB+fOPp@n0T?I8oxhHkD{<&e7`#K3NhYFeTI~~iPVS0lT4l0gSFFCC_IVUClFmpR z6(ubcX0{kf@`R=wkF9BSdc*!)QU&=)FW238MGjBch8S zQ9~rTe*fE^=f1l4?(>}U=IrM@`?J?M>+H4Hcl}%Zw+^7uQrA=m0D%Ai@b&=wTLGv5 z$Vo^@Nr=fwNl7Wl$tkEHG*ox)P_Z&F(n7e|d0<@ZT%7j=q#oSk7w6~X5>^rsN65;{ z%kv1SYN|X$O3BH~{^ub;ataEnJ5*2_8mKHE7oY6^bNkl?pd|(R1J6J}E&u^75JU_7 z*9&039VZd+zZ&3wG#~+pkcgOsl#HC>wnGyQfB*;r5fFlihzJR9dk5d%2N2Q{(Q%3> z5!372kZ^f{MZ@p~q}?pvg1w+TDfeu^A^7;4>r4zVO%_+$#-Y-s{Whd3Ce;+@*s*euK%#2|r zQa?20CJYVW%_4IxmuboRluHf%ZQ63i`a_35(jzIGG9?>hoRI8luhGUL_HA(FEGX9Pw)^#aLll`=2lcIk)_XB7@}(#zhWq!K)pX(=R3 zV+Qmx&rOBF`&_G{@SLhu^7n~&UG=y*hD_yfT_sfRQ;}iXNXQ^B)5X1#SzcfrWSuRd z(;)s{+EUMpkIzqgsC~>#>wgxGNR#?g9y&4EOcbM6Ex2?W?>cr_b@;c*XM7j=_1g#n zz53O{qqTnZ8S)R%DvLTU({SwVG1tyl5*m=U)_uaqoQcEsM^>m~&> z;NlZLyGP*T3`REruN-C2N8ryn%Ci)LDoSI#trMIwuiJKcg%bT%Ah8ZyUbV6?mLZt`) zh#4$Gt{c_ec<_`CgN}~Y0S~x8UVU>3^Xz7UZCmUNZFGFOdzIcyM&pUa5kx9{5BE9$ zo3^`Mw;i6W33IHG1u{MkrV%N8h2}5X>d2ySNd%G+|)keN69NFw$w+O zb^}Vw2`hyFI{)(tEkdedy`-y&lcLC&*VID~$qoDq-613i`Df+69Zqc>z`I`1jZm3j z<`sJvI0q%R6#==aEXySGe>5S8D~LV0NFT8I0vS^(-49<|lWKjwWUIIjkzS+Lqb@7` z@?=%zIWqzhXvyJ7-AFrK=#}UoqH!^j$lvg>JzO_a%O9{?R%P)=%jL1;H`CR(dFv`s zL+f6~+jutVt(39nui>Abvm`s(v`2;!5Nn1muj!ArJ z;QRq;)nf*>kpgsz2B8Fgx9E`yQh6gv5hxCu7T}V()d{Ei;3Gn>8PbA0N3+Tk*1C@L6U#(+PN7X>6!;|QZVn2mre*CQAxpnJS#5@ ze;%MgIH<297B(NBm%$3wsSV3!Zg|xZKVXKGot=y9w3(gff0I?w1`-$bLQVezgbio~ zepG&cUZKp@=AFq@RT(Gx*sQIMN^{I{gty@BHcW&2)F@|vTK@T%Nk9dXg&32Nz^x$O zbFLac0AiqT5|B_IdqmtebSWX&Mp{AQp;epRdOf#e#6jhQomAn9qNxNWFRdGgTLXwt z%__;i#)-_R>n*-?3Xrm{)=bS5Q@#PLW1PeV0Qq&}M}dcrG*FI|Ag{1JK7jPHMftJ* z@ydjdtay3B|@*hdn9g*58Rr$ z1rs@Kzg-PuuPd-q@YliTHazADS7Me%`23<`pp(_0a9wuLAcBND;OTS*qF3MIo~e7 zsO^+rgPHeJCbO1rdb<$6-=sS%bDf7yXJpxKtQt3s)KBMax&6%t#eFs$!h+-KPe3C< zO}gUygf6hs-R>pGw&IO!)=1WorlYeckXPXWm_vxk;UwOU1j6_J<@Ns0zQ{F`ZSM3C z5tRH4w{-AkKs9o~M~ggNdVss7n$hk7*{7i>o4p4DugdnuoO+KcMMCxD|AUesd{m zUcB(`(jp2lejIC-kxf_787;Gbc#C{hI`nZ^=u5y0`@COddjcU#Tg&eUaL8l)J| zaaZ3xVs_k9N4yj8ls0E6jT~npr+I`i$%|bKRqA?37i40xsXgB@YXA~X<$$>jlBJ9T z&jHLkgA?S^P%3!gTh;`ouOV3)r{i<^`CwN-CQtGZIE{$<8L zK$PTn?mJT!o{C2A@-j9FD(xc%_u#X=uqyM%hhgD?H}INy$8p)sEL&pRC0O=?_SZLL z&`g^3+R?EYz)#E7XA@ktiaG)vlXTJ?Cf2w>7V_MT^(+W;B6Bwd8Ox}&0F%kO_4z=t z@>f{9nk-6`fKk|TmsR8&dbI3PptJsjk7?ag08!*POU&vS1DqHXmh+k}PGG1#=ZRhp zgdrn@2d})sWr)sd7`6k8ouTPYt|{g++rK*6_}K{2I)H~p76M+}(IH@O=VBYti<8x_O}XjwX5iL(t^RX4~TTR%2ElEOwT*vi+F z%Mlje#MaoX`S#_XQ;hE)76ul!1PFZG z2~f6>>l&!0XpQ$C;6fTp26y5lcfWv*o3=+UPOmgsWGX_LHn-yWc7j9W?sctXdFDBH zODMJ(lRPwdpL41|?B=87?sbEz_8BlArrSEzIUN_#>bq;-wtD-L4CS8Frlve{Iz%Pa z2(%sRbr`(+ssGNXK!hnlxQUzD#sfp-rEN%-pJ(pEZ0idC`!l2|cz7L3ha8Zrl+Ydj zXg-5{#o#j|eW02An1CkKsn*i4*wVAe+Rq&1#*vFSUW@+A@jhi0bxt7Z3AVP!MHljY zO;^cg(}d3a5-say5-$zy>xfurE8#S1y%Wx|Aif0D4-I*0r(dufxO%o+Qhrs`vMLV8UW{Ir!DfiX<&CH z!+y-E;qBzCG{}TH13QZ>lZ?phwhMZa!Jx$XHxcV9V4SS)A9Qq{ADC|+ zW|P@!pz*mnLevswXoVcvmj4pCoHN!pyuro$9gGD zG=!+hGmQ^c{#<&JJTGo{D(0%J!Agyg7>XG}49r&uC|SCQkc5+btmYlxg{8}hDe>~w zDCF>&0VquXXE3AKcg$di$N4{#A6wE1x^{SzpY}O%YaKSVvR=f$m_fKjc>j{3bRrpEh7iMhkc<0NczmGVTUB z5A8AthyX3Dut zLmdAA4FFb`l8!byC68y5hv{Gk*E+zp&td$|LotdmPGwY0Kc`hJAZ!75-aN(`hvM4G zUxuEu{?N4>(+0iz$@A`0Stiq53p|@@iK%Kbb|65$wQ{*(aN+3WPJYe`S^p#kn7ddp zM0H`rhicdYh(4xpQ%qi_nSr}X!`6K{7W8fG>@EbFKKiMv$^u_dPjuQXcG0RP(o6OO zPePuSpxvwrTN+;b)C3Trl#v`Kp2209z0mCz6J=BgSW{mP`}Wb?>(Na7B5S#vzc_ld8GDvQ$Z3M!GjN$djI#(fWJ(Upd{d$7Sh zv2ZDYSbtD_ zEip|wVrVx$Dg07zna_j8nXcf3-L z=i@O%qp#(hvED_7rZWLPYVnjw&Qi>JVqGPzD-+dReW$`?_c9tPbMHvWmB)uc@Q+M* z9Lq8ro{b4-5mRDLAB_pH@kh?l@T2XRm7U4*6-#%YR-^m1AMh62bB@N#9T;rS24>UZfGzcP?Wq5UV*ZwHR$c$v_%ew^p$bwHC zTggF=C6)@F#KQ7Ta-FZ9Jc@5!MFQw`zS?G!eyBG?38u=!xPeSv5$bijZv>8MX*%Td;FX5Tk~rH(r6Iu zGT2Z=MMGfVrr=s z6OI9?FZaA>ecM)~pne3@F$#7Cy%Nu0>R<1nQc1iE7bX!PiBPtw7GuLepZwn<~3 zDQlO3Nd!>*>)SXbpwHK*P#0aS-DX5?6+;ey0v94IYm)uP^>V7i?{2MX9>yP3*VuC( zk{0)H3Z4*9NU?mJ*wuVhKJd-6(kjjftiiF3IsWrD?$np0{dQ0$Y;-kN-Pt`l3H(qsEc8upW9oFI4zvrJ}jM;9$Xv{=RtA$iyz1d z0@Q>5HW3=P@T8HEn>8m76fih$j=x4G%O;BC6MkPN8U4HS>Kk`j3gxKeA!0&8KE-{( zWQg(e61Rl8GR;lV5qHX-i}t4(O{$_|T6=Fr(oY<_%Z;`&LxqgG8Gtyv(v|LN*KYao zfi}acyn)zALpn?OeA>?61B=*5bVZ@C}M z#{{rWuOdsX9DSoL;Tv8*TN8Qdcm6s(-FK*F>@o`}-H+D8ztTEGnb#VwW#@NdE zwypPoD+&whT1iqM?-cg85wsuX*`$)ckd0!?J%0lsRx_P*aDuGN5Y9(+R(}_XPDmB6 z(yIxv(Oe{oqp0YwAMVN-ELx|LKQ~oJ={CK8HfWk3S$_p@yp%FNg?m_bkymnTevx>I z%o-WO*Vu_Z8rA*0iu?ya6z0c2#!%GbmdI1^9Xs4B;;00$d*8Hap*pOBaa$duud66x zRbJ5G%v2s4>fG?^LO;%tw7=@Opquw}* z0vLASZ}tI6gDOJLD+R=Qwl{;Tyvf+B^;c;d3hxZ8qo2dB;%SGN*v$bge&yBM5z70~ zgrZ5B2oUQ5g{Lhz8{lPPkGS8+-~j1K9L*}((;Wrlm6c)2ZXC47|+bH~2i(HKhZ9pZQtlg>RHn7*83y3MaZ4JU-U0PoqfE z0H=q-|7-{Y(kBXAK434(7>fHJ~}X#Kp`--4Z==F@(*pa}e|63smm zVK&VvVjSy?yjd!YT=w>4W;i&%|H_JVg7W6-Yb=+gaR%T>)o#8c5~%ibpu z6KkR2QaS>^8RK8hkK({BF%3$T?K2|>Yo?<{ZPmFwo~+$2!yvi$8U-2EYz?>d@~ONn zYOA%Gh1ZtCMseSN!Rl<9kOLt%MxW)4*YF5qY_ush!hgbeS$4)lRMb9UR%#T)Z>_M* zZm5+gv;>2H8iAYn#2^mBNox4z0zet_-_w`>OfhB36gId{)H3Mw__(};RxH9-X)q?* z<%<&`mE3gR)US7>rMq|K(}pDV!;YvKiu{zF3RJ}Y0i4>Bs*+Ph-5zv!9z@`B6wKG% ziyp{AQ&MtWs(+kK@FfoVnPo66eG?fP+Ngi{*ny#@V8?=>ZkWtS4s{7m$37YelvjMC zcv+;9Hh{s-q@)-VwWb8hg))svBUZwJpinvf*pMfn5OABVCg5}pud@11lH}1APzM4F zko#3qAw4jDH#f7i?1g<9H)}8-t@noZQyv4bY>a`1Rw9p`r8JaA5u%VFDs44*rF1yd z51$O1twh$R>ArsEZXRV%RL#$A6yTTWxowsmPF;7bC6&{oB9-?ZpxJ}Ho(uAxYHtiQ zQ@T#fXOOj0NP`81nL$MSfA>zmYkeh?S~z41qq?U^N)eh@Y!IuZX}=1le#08mY1Z6~ z3yRX@^+@$wO3Bmd6j0lK0GrAW=i!fb+6HY%AC|@O$`eR%Q9x|(}05>z|Istm(%U2a>RVb zDRRfV&(veqaevBRG=$`S98aQa>fXf3b?vT2c}DDZIquf#_T)Q}hUkf4omkP&8h=;5 zrldKP-nw|(<+7}3DcH~kFZ52<9m9QE%b!(!C#%6MTbbPFlUMTCv@YHPxpdE3+bf*- z?*6_>`ZZskm-PU4re$N{<<8yzR%IOEEHOMdEmKb$D(aOKl6h9_mEE-3 z@Y!8&kyH3+i#pYpT+X>3;gYe1&k(e?E&;OTC0JzI350E{H`G34gln7({q?h|-a@VA zi$+muWgiY5Yb!F`r&y~JlYaG*r|6Ptr;VC!sXD#>Azx*qFh}WU1k*-`D(>4-vPt4F zUS})7KpQ{x@{eECrhoMIVNHmar(V@|zm)KR(&3nNr&8t`)GzrZEM=#jJo}Qj)A`qj zyGX9MZMxW26a5q17Uei1<#ei^+QUy$7Ex+8v`?EOIwe-mQiU*+SI;N$0>^ zR5JV0y$YSO<*^@!+SeUp=d5I8Gd~xGg%d+Oip|>NclZs+GUzH#+|J5fEC8Dy98Wgz z5d!XNi&A0%yG;TXmTo4dcP~q=?+@L{4T+Hm{3@C5*$*bMCGRMiF8w$*9VIwG_)hA6 z=Nq&DCSonwmM`{bBVDO8^3~<=?zuz(>+Az@3Z4qmLXwwELSC~o)2c|a`@c*5PC>$7 zIFA$2$vfO^_nM!-rAv4Va~lPoUskM=8vI4+RT5eRxkESN>)!FS(oBfQ?)T4;cTrb^ zofoMdG&@I5XE{qa09++3TU_z+`MMJBQw^TZB86`m*&K6r!?LMdvU2=8DDAS2T{)@0 z?#ii8yM%Yx?0!J(6m!7qcd{Nc?Ig{SIv1k^yMW2TaHQK-W{-MNef1kFx%;nuZ1NUn z_ECjC2u;;;H+ycY6Y4j0XK%M&B%D=FQ&L`5m_Q5J(WM3aUkt*7zZ$pG!5g1G4@P%y zCCkhrr@0?%W)moFQ6?(sM*|xIS@$(L(k6z3(ONJ<*+6Wsy?=>zO27B5%@SCWvws^@ z(rIP;AW7=&DZ&Au5}zzhdg$rks+f+k1B+)VtJrd30Eu+JuE&pj$d;#G{?j)I;%IzI zse8m)!JX&}=w>z1Cw{?I4CoQXvypqOILHq)LW%^~dYp}-`OpvDVrzQ#b&>d?{{UJN zINv4&a@tdOu36cFdn-Y=HnY5d&?sAt8uWYIxYp5ac9Pt<^@69_E%ZdUNgE#8F?rLA z5+-0F+{PFsVec|QPZ~wIAPb`~BBQ;R4*$J%EKbEsA;*>&c|(ac7x_fshkz6rDh zF#!!{{a^v5Lw0N%H58x))^e)dj6}pXsfyEddo{Is7GmCU2*z*z`6ZQ#h?)Lf&ok(P zMu{1ha)q9|QP)KIy_kUfBa>CTCSoHw1sP9d4Hq?Jb8;@heOz_sl#NV3S=Ub9ye0~2868=`Evv9_Hlb`g=nrYK5P`HXQl*as@>B{^wo)d{o(+u$&3mG3D|ntw@C-qzHjACd z0)4LomI{-G2+AIu*g5Jxx^^sIS+NvojKvc0J;1RzZ(^qx$_8E?bSk}Vaka7Fwmm(s zap}Xh+y$tsRM~v+|2^)Lo%g~-^HvzJORy9nz~K0TCiDa#|DJi{yfU%Tk_;rY>Ziux zbn`5JK}ihCeU$+sG+HW6`-*ZV)1M;SH_GF7Apo&`X#6$4#TUqnKd%OadrMjSg7@e- z{D#iTwkaLqI=YYM=X0LX36F{nq^Q`%cW{Q!tNYopvz^yEbZ7v#E9>AIE#`i@2Krj-Iw_j>8=$-HP<8SeVA8ppK1pkJxh1Jsczw z#uQA|Uw;)DkF<;fVCz3*4f{ihYL_;Xh<20XkvCrE2VMJyZ=t&%(f$lL(_h+o=_-lE z3=0p)fe)upjPk+vGR9OSL~zc@B-Iv8)}{B#FUyTCszHyw!G4mjJX5fb{ClDUxhCIc zy{s4Cv7IX~dhYlSU~D~A5YKkf+I1uM3~RAykY)Z3%5R4?zR3A%(UD5OS{}}pFSX{8 ze`uBd`m4(wK7q*7684j+e$U_-^kkdvYSyy&^y*HU%N|bT`^Gy0X+6I4BQ!5VK}hS% z{&%a^hW3`#d^_cq!Sif$7@ah&5Tg%i*$Ho*20oFe#SxyDr5vMbs3B)%j%Dtr(U5WH zy?+4SM_Dex)2(|lm-*X@R~@L;0<$z!=tq$)Wmo{ZBhu8Pqo!uyIo0XKJ3UJCsE&FXk=+0CBvDO%KFHchc(dw!_zUf4DYL)Z8d{(hy4AiOm zQIeQyP5t)sZjypZBHHRp;F*m%7ZX7toMfc~m$Tnba3Tdwmc*Y`3UfDnt&@!~AXW0j z0(i4?Q-PGG$R8u=h^rV(3;K)vIEhM@Er-d;7KpQZ%UWG{*?nc?V# zTB3JVNA^@J@d_k?`q3Wl&ubXXAhH_oMX_avzZO(BSg?8g;CmJT?jzohV+RXoHSYfR z9&e?~7unusWkjW9S76~@xYRR|BDYr7-WN^mgu4X=aROcIY)#gh4#2Dfm?xqpTAUov z=d|zX`YQ1E*qCz(u>eBM^gh|$2wjj|`KtpR!jyjW9YOPE&`C9mSCU1sQ&%U6)`u7p zz>KnE>VqhPYJtPFnm5!4ARts}4(xZjujk&nzAEP= zA+8b(4HZM-Pp6ADK!^hr(d3Q3o4rdYV_D@E7y=fNM)edxlTrt#WDRnxhJrK$y}@pwfB?}6#@^t+%euByWL zVujBV7@f#D{&wbZ^+_?qW8X@H8XE&Sc*T!jmz5PAg?bubR8 z1s0Q1&}bGbee|@|+-rIx7#C{R~+j4 zditv=K_L5h65rRhhiBj|A|^{J61`_Y7V`WWb(skLb~>?f8Pum8TSX>uQDNiVMRGvt zj%fMgPsHhBb(ECpB}f%0j`|UKr%mbmu7??>aT9G9;9i;Gv7;~C+eQi^w==w~Z6D{o zMfe23I#F!e(tKh|j*kT8_alo^-IW!Go^}i=7uhw$p6d z_jfS84xMPti3}#I`cK&cHK!e}6)gV!?uUN4?WN{axCo5RdK{&Hv%w!KRS;K z?%Kb4X!;MkbV_#ToeA z2nmN(O#*1B&nNnU$vGc-+hML62zAm;mB{5|9Xu?Sy@qPJ+)aTp^TPVEE=BY95ygu| z@zn6HJ)nW&s`NFkza(slYu$;L6AY6tY*TlJ@l{NT01pr`Yl&;vHjNcoO?iwN}P z=gu1*=-64@!(toVH_mrbzR0;eaFI>*c&{U>tM$(N@8g5-9Crqeva!Kv2J7D4O0Q&5 z&1y;DTVWj_;M(x*NPCyCdLZ|7mR=c4EPzf7ckZFBd?KE32F~qML!*1cXaI0UIcG52 z*^QUkJKq>`j9+(I*jafY$X^HJTO`Yv6veL9LAD9AE$5Z{X{?V^1W(gH;0|r8q#YE7 z$BT@^M>X(?{pEB|HNI(Rr6R_C`V375*oRBhSA%{Y7)L&d8w6z4ckmvd@Ym#NEIaxs zDX64Z6*s{%YHIJNbb1$e&O8WA&EO-LkU8Q1={3r6;t58y%4HCsI-nfoHKD~2Sksui zc(ToV-}H@_c61CqUcJG1}k+Ceaf3#9>oA}^Y+~5 zRZ;-oopx3S&nlN(%(z9CYAxS7?p)bDx^@_jy>V>7zo(qK{0CreW4gMVRT6T=cJ41K z`2afaSft}GOMG6!OWn~VzpZ}q*n}u4r)VSPzGxR9`wZNJSpSQ9#j&>v(hV?zjB8$$ zLn=yJhx!nv{J#rg5q+vyjiX@ptpD_mR;(Jz@uj--upWaQ4PapVpFMp zRcZL%FcaAbYm;DCp_2NRmULxC7zfd2t!36DA#Y~)$bKcB-k^>O`ml3}X*Eag1XHe> zkbMxOa%YM|wmxOaYb{;1YjrajbI!Z3&o7J|Z&g)Y%6naT>qMA`Kj0Zil^WT5nmx$- zMou(1H56TPKjL%hFl?cy(sE4($s?O%tABt9@kF?YAN&yDzGcE*XeM`f_N)R)NN+XX zR+hu^IDVQ(-aaCmymRr3{o^uAT_0JQQ)XZN<+{>zRl%i!w(1}^EkxDEYDB=Jtp2%e zgY_C>>Rr_$G7A;?9jDe8pxl=--(Ea;E7m)~O(ebTwK6%+mg2_K*G5enRQFLIj?$<9qfoYz;a^`R|dJ+{kpkU65Gu`AaxO4OL52k|0_7%j)lIk!e9{;cz zTXGV>wyW^Bg7|5pI%#c-|JR((uF9m)SDjtU)CwcThRr!P3I3I$xdkyQ-<^%WX2z-V zc%2epg64Sg6BvB^J-s5X{L6|?fd=^%%}jq{z<5~#hVkd*Vl&IRTM)=;#FWpmTIf4B zZdt-{G3-W-$OFj!#MP}gA8#JC{Udef!>5~_TJ^59@#iE8VHK+HUjKAUdZU{Gv&rCx zBPS%foJselH6u>2w%ENJ8!09TFXWHgt2;rk8YjA zr)_lIXEcHOOcr3rFY=1Ht(l2o)`fe^-6g|)<6Zhr{bJ<^tb+c>NAsaK zienxhLYllew1kKsqilZn$Q>ws`QH33E@8_dv9!t2RfXd-bM86dQTo}T!2n0HEMeUve6D&9^KN+7eP|#;Whup=^M#12sVn}PJeu9~4b$qu^zc*SUF0DcBQLLVO z7eUhGjO5S>8N?OKeD;|9iZ_g<4VoM{7FgHq-5YRT^ZJu{gkS=Y8?*zR9dQYl)s9*2 z8yY-}FO}$Z?U?;h+NtE_)BX^eh$-HsZ&G4cF_ZFI?)fs2VXoXu6%oJd;Gb-n?4qle z$7zT#`tw-7F|4N62<+#chV+tsO5(f$`8%nuBjq$*_>+EwE4X?#ZFLwN6i}0hq+MJp z;#A)d)~vYx)_tLF>2`&L`5_LCUw3$7I}frGqBj2l9LX$0`^H!Ebl_60&WmnL=ywh! zO{1wxqaW!B*o0J#w7u^KhAAXK`~5rG#OVf)Nb#voZ#n+j8%H`(GV?kk*)Gajy%LR{P<=Y{4=`tV6=iEx0hFr~ zyS}(KMr)0u0uR1f47UN;!kVWDtt88~B31=G5Z1NvrWPR!xuB5#80T4OFBM zn}LHTCM93ws;^~zmE|*QRU1B<(Q3M9&zQ#*K3493nD0&c*U|{jui-FI68z_7nx6j( zyyZvg(1eG%-KywSK4snhv2m>3R_ciS+IvRO-t7)==8FyLFSKdnEb?h0Zya}FN3auF zDi?OkS?-qUTuo06Pv;T=XdZTS^7E^*FSc5Mt|y^(H-9Attt{VX3XVR?a9kBhF_Exx zKQ_qB(%=MM(fG4g@-NcQS_gddQPH|o$G4lOt@?2nTgzg2e_w8)hL6B)I;6& zv)(aNo=8`!HW=xqqGB`L`g8MzyGtuPb>Kr_4uJ!M{(PUjVy=6{A_f&s*@nQEK(&W< ztbF;m8_jZj2O83s46DyKCXt&D4czl{`9ljD5a0z4sz&;Lek}454Ct38E_mT%fM7cZ zeLc3m*5fEGH{2&AR}TIS#3tNh_3xoOHT?6W@d_j|3CJ$gScxJ<-a zB0SS#camsmbhFROl(EZOUM|G5#&lQuN%&jcUzfYTSLaRICZkuh>|T=;lvG?zXXl^ZCCB_pVIp*qkZ1jbA-x}FaD^G6=jJdW(eFV z@2u4F?=MPLLtBowKgqanC!MTxO1o%8G)DXr>}4sxO@zAnC?)aa;A!;5Lm~%IO`yaG zavP_MsFbwhxYgR4(Y$#5at9DrmZ_RDDigGyG9h!d6=sK^Y(HT1NlpdIWS0}pF zLR6n7pUFTkONVNn`lisIG4*=gK2gWHfop)t31By0!V8;);bt`#g>+rJzjG8TI{RFh-Mlj$dGt+=eembkQ*Ix&?!KrJ=)q||{0GQ8 z9dlYyza+1+>)$*n_(lTFJH0j@op46u3kNH}FBaXOADDnu9$1KZ4~o<)=MRK@{CY(m zycEL%G>#Eq^?_1XEFLMQIr`&ayQdviR@@oG9%-??%iuzO(Pn|h_liM`s{Q9VQ zacJG=SZ~wqRoh;Km^mlOuUmPS#MGAUw?$Ao;-Zh)cwwf%`04sMN%=njSBxdML!GP< zOl?@|e*Ko^y1*77Soa^GqV<^Zv!<70dS#M>*I$Wco!G)7@4K=c*)MMeQAX2hqu)w% zqDXX4m$QxABhs_joGO;zutXV23x-9jQ*)Kcrg208~>^w@!Fq3<^$w-4lQd@aSf|qKC6~!;+4$ zMb@-_pJBaK634)g_-1N`)D{`rMuzhEzyqy7@b))ibuj}oaX!=2e)zT8j6#%2s8jqT z?qO)}*5o}h7B_AW1_y3gFRbAvmJYt<1)MLZQv$0(*_xcW5bKU5INDXCvO=To0R~%p z+8$l^G#d_>Z=V^mg|^QW>4{(}$>f2x*mdnx4Y>BHKbLT`={aLNmr=QmNOJlO7)x3C zs)ak$Mbu@UJ(}i{|`v;Fm7|TBYRFOlqc81fu7(xLT zc#Api1#sZA!d9~52RnW`X1bTsT~#mX`Q5k>Z}!s-{|e{0`={_P16?`8?7VoJ03J50B!!Hebh{4XsY~bbJJ(%yiKgBP|H|2P7a;BlO ze`Xt|tAn!F4aBe3Yq!MA+9X{dFhkrDayOt&4nAyQ0bcA4A&z;2B~&of55!XGAj{jV zFOs5V1KD#Pq!C~VxxzSHZ;FDwv=VcL#SW2nslZFc8?@pDs(mlAy0M-hc zOwnV%)v?fESrl_*hqaMXheCqq^xJ4<&`cX)K`!RhdS0BNVccGK;CO84vC$u;}?*fZqka(oJD;X@$n^-Y^y`P_ei;pPNlpXFxn=JKRu^cc;)CV#6LIbGp=ds%GQhC@G<*jlB9IVqR1 zK-3vSeW8U0=7T-HVSt@59Tc!{+7e)=EB3Cg_2oPDf`%2vi56E>i>TyXZqUbt`l`x6 zy0umHnC;tALMruO^f#-<-oigyn{0*iH85mSI^C}OSM1ED-pXshodLn1IVt?&I+Xr6 zK3O1i>l`j~QCWZY;marSNQsrL44W)R**E1(($bezpYWgj*qYXU{R4bD`s$ZK4(Vq! z>W8FE{$3y*G~{gS-j#FVEocu=b?YY`_vy#XDBcmK)ZH~8r1A!kKzB0f+;dF+R2eW$ zcPaSiQ_b12-&Z0bhua1VF#2&QnEP7a1^0`q|JKz>jTb*`?fUCid|QH9RcXuWll>2% zHP>txO16wT%F(mO>||6S^zhgaBXO7RuAN9rO4|~OJy}DV&#zWb)gF<6 z4H?E=)WdpZ6Fnv2qWV z73YLLH8jN}##gz%;zIJ+%<1j4c4|DGmEd|YWM=X^u8ZA#TroamU(hAj=r^nIr*nKq z@TNdV=!i!%2ZnZtpl&y7W4mEm92>}!z@43K7f@Hkvhcd+8fdEJo&J3f%gvfJ8R^;Q zb2Vdy?(S)5*+i~r{0y(S8;;d&JtX7uH@25@{ND10N&gZSku&|Cs~c_s8snpw={#m}o* zEj5^+Cn1Csz*dqen^fe);4r1wZT-=BOusES6GFoJtE>%KwEc>8NkK^~{$*6_MUn0J z;x4GOG^R85zmJyOmICJHke-*hQ4Ks>^hPi^nyf`O zGrb#AJVRg&bH(6ThVe|59i|Ch_WNo-8z+&HFgb`K35UGi|XFG z3c^KCZRUvx1a18^Nw_VQ+Hx}fMxt~(m}9%Nh9k-Im2usx%gUkWAFQu>!X_QF&F(Wy zfBHVsEa?4N$|*igvUlXQk)FJ}NM3(w+w-|snM}dT*!Tj$HD|^B2C+%TgCj4KF&oo_ z>a1U4jV_I#P-zYfer4Z?IEEy1nCcg#n}h0O<{tqA*xFBEQqgv*bx6d%@jYJjdIan} z-$B$-@W<=hj}H-Lm-(@oABrzRD=c$=^t!1qrL}E|zW+rOdq@%LbNB~+I7+Im{@q?q zM-Fpa{N?!NGYz+FFLu|18>q-Oa8@9vlp>kWs4FB+;sH3n#UdhmFTW*!tI%Ynlz- zlX%S}VIy`vTL#YXJB;&Qmv1)es*Mp9N|j+hmTdK<@9ZANgosTZX{gkV(T zkyt&>fjB49kp^3#r6ZmxV^O~?)6jRIvh|ig+l~qETXvDN08a~9b`dt=#(h1jcG(WV zF^_s_64`PDBLEt`8W30c8iokY7ZrLG=nYVbBLLNy&~@!mq7B63HD)#2)~H0Nn?N7V ztj3xD0PEC9IR~{_Opf%>n*fA1N9R&6%A-AK1E(DK6!l)2A4+7l9MMvN$4V$B=g#+c zw^vdZxSgb8bMrM-+_}l==m!)HjrX!zm}DEUFfq_nw%+2hTMjLrMnLow_}gmd0R2Tq zNWl4tK7i7x&)&{G4FCoAImSN$NwJ;A(%pGIX|CiQssI%=xK?5Fl|J-rBFfu=2;LMi zPq3 zwnsy}d5(G7DP8t3^5g-t$X3QbDz4J9xN{<*>C+#rEqVbX#-#!c!~KSCpRH7QcQ(`o zPvcrpZ@4>vR|C@~qy|KpLMK!FDT-;d20V-9j6P4X$*6;)ebpaRTemi{NgGKrggNL= zKgO{iYDz;CTpojxX{H3QAl#+o$o}ez){aRS4ZI%R$ozl8vn}6qe(ok?>fB&@bcM(rgOzh6gY6A4AroNO$C{17=QzecrEK5DE2h`-&=28BLw7X`F{ApruT#`~X4gQcQyYuON(rQrf_isR7h5=o=vZeJdiaSB;fisQCy#QC&8b za>g(j)j;k?2C#3fL{fa&Mi}LmnH%|JF_ClV386lsdYxVU%ZP{!r_g{ou5L@|5y&TS z&rF(EyOGmtNJ#qCRw~VtMvNk?T#r;>%aW=A$N_O$w%VtdnIj&DcPI2U&g@oTBP9DC zwQA7G1i)nz(6u0F>XD>%%a;K4QcoxF=~nH6v;~ALTx93@RS;T=-Ib$cEX^lUy+6Z_f6q&PK!<#axMTrSaOgdM z&%dorVk2i$xW)%0k=xTgzfbz79PFZraTlAr?kwJy>89=mm4uc549<7jj&JRTLh5`Qo}; zGA&tyTdGVx>+b-8(xB!l7(;~&Mw4%Q~PZ{;C3mcWj(%_2bFC}$2+qWmFIO3KY z-J4XntlJt_D@khUoDBXIRnp)csS7dx0A{t5%x231qu!)> z6$fu~PU#iOW<#e)>$+A{_8ipXO+p9G!``eT?jB=*z(@6SD{!*b-uhEoL;`An3P^`z$pXpO-ULI((C8H2OxKHw|30>l1 zk}DP+RMK$+S?~9azFRo{={$a2s<90-;Nb`N6aN6MRMu%6XYQ!}5I?O?1f@^OA3fVR z{b;p-zZ7x?RE{!(&~^MzL0U&BfGlW9{xaK#`TQ#$8DDl3t1yMnP&$5<8@QAGa}-hi)CxZgRC4Mr{{XuTLXXIP6v(E-26qGw@5%fI zDOj^sCy63a1-W)^1Cz-7k2MsQ8%F5YE`Ija059YJ0PClwqYzDq08`1`)ct$@wIS5e zkgNRMVQ_YV^x~uo77*LmVdN_$$G2tt`~80^=%FF)yvBWoPU;8Deb4z7&01Vpf;S?& zXDX^bQTPGPYuj9`vZ0qHkK9c38R!SCG)pdhg%qmDqvs@q^d6_&kLOuhhK!TW%`9LL z8IA`dJwF=WhW-eoX{TjSy%lgt_Nr-n9QO?*q;1?z)94Qa^Uvi-4j)v|q=7@jlFGyo z2Tw}pZjp#SYT=Wx;}z-h>K4xwr{!#%9FDo_e@fzfLFBvU5I19-Rl-(wV@Y394JPo* z7`Kc8*mgwiO`rU2dOw4x?@|0FiJAAI_Yj?{qfO9d~lm z;f!H;#s+a&&dv$rk8xc*T2jpbKkV)p>x!!SQFU#lcp1U(Qm!kb1J88MDFwp0AS6h@qOQS#gA-0dl*0fPfIhm_%mzjA3pYz3a)7aZjrLUL*GJhE4{zIjC z%IXCivZAM;1-Sic^}mQO4%BBXtHuo+kD#2dxz<@Y-%SBw-JcJ$kTc***V{vTSvL%!guMzNxvc{#_aH0@QUH$kP4l5j$i zw0$rttTFF)FCax@fq}^S4Amu|jcW-bO~E8!xytN7#FBae-nXpo3$sZ*s_jF8y+1=( zcCn--Pnt@nA=Dl{_^y(|MW1QhN{$)8AhG;^pv^Q(DLj{QxcfYDtV5h0IR0eSJBw?` zXKSQfs2u_7Yo}{??U$AjD=#YNoO|#qKUKd`8Ca070|z}n3ZO9W@4nLjk?dG@U$OIjOAepSI-=RIqu(KU3sfvsm@h8PUpGAoRZ zJ>(LH(za}E&OrkKdVOn4CWzK7>R(EVMgIU+4tg5TpTqdcMIifEN2Igd*sD1iPnV}! zxAuL+-fQow3zj90ax#TewGF4|=_)Su^Fa^5?B}7gh^CM&HB=sc&p!QiV#k zNgnl^jg3@=w}b^y2R*A&)eZq+k~>r|L~y&84UUz11v|FwJa?f9lfjPVu~?E0zPFt^PVZuTzM_ENnF!ZTWt&psE9coYNfoffIeaPRLyi3(-la~kDXV` zAmX`=R?0h!h8S+lI*Qwx?2vgFtc$5Pu{omQW0km#?IqgapF`Nwqjz)2IP~pW*D$Qe zQN}^~qB%h}DKoiShxvrtk2id6fal%YJoHCLjaA_okN{w`7y=(x%#h zGdn8fcqh52!aRW{DX`h|g9kEbLsmv8) z5)eI&PMy#XtKdBmj`ap4qA9h$u+Tn&8NC z&6D-2%*38DMQsf}G0Mh2DjLO)8$idUH6&NFW-tzW5mGOhql(pA24DzIDnq6-5C9(j z)cvKlAF}Rd+uhwmASmZO05#IvY6V~dSBx>`!OLUnzx{g3{>yBVIOCI3N2kqdaL7uy z$;dq_T(0g88d%nQrN4!i(j9~YvRNMo-1f)Nezgs?^|iDu=1S7aS8FS%EIsp{2f3*K z0JC0jSrcN6VkBde=nX?7rLo#A;u41BmjGkeKj-nQ+``Q<_mLqnu?vX--G$gk;?#+x zHM6ABNQGs>gBchZ{Axe6#J4K1B9|j9VlDFff0J58mT0h#u}92zsO|WEl!RDIC$cFd zMO_Ku0)L)r{hp^HUaPb&szz9q;D3c-L3eiu985!-# zri9Dax2ZlSCO4CRUP|M>PC2SqaV)97=3M0MAJVEtC)ttZc?_=S{`t?nPZi{HM5@>f zdgHI>KpcLf6D8162oZuY^{B0&E`Cl2b6q|Cta8WroT ztrntg#S3ej8+Q@N;J13*wv{eqhAU(rJ8cA4G@2V++yu7`A>I5cYtno(;aM!oPk5yn z2gserx%8*aVCB0y3mXKtv`CQTFH`D$YJVVa=Zdh15w?PH+chVbh4ri()a**i#TSGE za0Yr*M$AZ<=tm>#T7Fl{$ju>5qmfbud{71eHw-G;Kmv|BRI%IVEsTF!lpr8_&@QBL zt_c{a(R13AM}-3)T9p$78fdG6rVUQ0G3h{10QI8;o=sU7xbN#v3yfo>AYN(eGuD|c ziaJq>I^vXYXdnklDR6ygpbwb#m0XT}b4+$5WP|jn8c-JtQM;A|X0t|w4#1EIIQmp> zr1Yn`!Rv}{bI|8CLJ~6U>Wh{n;8#g)5Ve6&b?V2|)_$F4(Knwe;j!`q*Xvp(^HR!J zwxjc&gPM^XuhyZ@%Sx@sZ_bc1*V-Sk;*i`(-^6OZ?kVfnnq=5{cVqXBHI$xdM^EKX zRLIU~3l-fnRcr%&LjKjaRE3u?trjHmLyB7AT(@TUV z4($H5yq3yX=KyD*sanl4N(_7AmB(WP`z{3ltUHgn{*=_Sj34zY?g*}l?DC_MeeqBW zP$cLN)|JXpGDWd06;LSl6y~%lPE?-R#cr*Vh5M%`zG<@CkOE@_(gci8voYfbG@oK- zI4XHKt;Mq-f(LFXQqyae+s=B?Vv;dNo+Eyl1F@s*1&6t<_O@Q>)KbN42L*Z0T2?B? zB3NMk%$jrAAY7H?*5}#4&N`YN)fq;5(O|KQ9-amZWRf$$sYafmxyEzWwneu9MCbjoCOJ-K)};*#}e@$E8PcVGO7lqX&y7aTLfe3~*2N)bwf!UkoQmc)m4)k** zQl{CDVWHY-%uT#%7$3gfe+ttG3EWLe$Ot$Y1Eo1rr?o?qX38o%0wE_Yig8Ti8S7F3 zmEcr}yiyC0I%hQ2PhP#M&di!mFOPblxe6&fkx$q+bmF84LCrl-7 zQQXvuy|L1rwwSIlLEG=zq*cfUfIX?;Vwo+5+;d6=8TF!=gBYMv_MlK8qKYU3<+CU` zKAowaZxwjMZ;}EJerd^VY#4s*6H;l)QI6VMll!s9_m2X!EwsT4hmpe-$W z0)^mWoF*)|Cz_kGlu6yraX@?;hj(L&aN?0fhC}#M)c2@C6!mP6YC^FpgY~D1j5q*h zoUNYYlTP9$Q@GXpYklN@Q&laOEO^PSi&*joKb0XDY^{PWMJLEouS{oD;#tA+^{QQAstREr|dgl=Y8{AOqfz>{3f( zvjhNpQ-lLJ>OJb}L;?A~ok)=b$8V(|GbKW*Mh-h;y+ITnY>l{m-a2tx4Qu9%mEd&r zr$-E6;XvuzwJQ`+G5yk`lj=HCL>q}NGIPMK0~YxXM*x0ioRKJAI)m5Vm=uH@bjP(d zAwt9v-met>=Hdsn5x`Wl9_EHLRA>M_DLkBRHFsyuPEW9-${6X^ft17{@Opn*77|GW z55}&D@TUNAN#&i~4ox7rS~xSybR8+_7tC@n3F%gZU@#t(tb~oHy#gZgLbmR>qhK-| z3b3dMAPz+@mxc-lQOyD(R0K8$AIhAOE;n&gc}0Fuc&QZuP`JX=0tJi}z!)7eY3py5 z$@JoF|OLNB2{hLn$#;du9{BLPQFv@((J-;VSO;%&$sF-oclBiFS! z0ZOZr>U+~d6$i6QRV!* zK=+^u{$Aj41_c2yn@AbWSqZcZocE?}-JA}4P*uphzDM$-UQ_I?zbR0Fh4$sz)Cv6m)D3wDtf12RWt!4xo|A z?NR^o^kk7(Qr6CngB*mPJ)`JA&xuLnIVQp<4y^Kili42e&N98l~X5+MbpgPxR(GQ%FVAWZU4T4+o_ zGJWbq2l1l-jkM(;3{xeJP)}a85y(9#1anTur7}jS;+JVOxWyUcB7hQ-mo&KMfDm+} zp49ASlu!e4ONv}#j+DeRp{3x{sSX7L8$~Y^PzTG!{OQ-8NvjUIa543&+I@}4Z#k6h z1EH&|FxfRG>tRHj1_d#go=K>g@WP#t!jnTCyNX@j^xx$`T+$?RvHU6dB9T;dr8we{ z$0_2chn5F8s>APkRf}*L{qsWusM3?=1f1r&drSmTo^xE*lt@56mD5=!2+au1mz@c> zZaSJ+;|kp>ZM&mk?Vc&ZKa^l~6=V@?$vGcN03(t*RG1ut)Nz4L!DGn4^r>jH4}b?+ zT#~rkOMrWEoYT}QfyE-w)C4c6^r;mvx%}xOk!A!hV}n*PgdqA-85-knJmQxGA4-uU z1Kax2GN>euNT7EkkP=2YriC~sps9lIBRvOEO?Bfray++UFkXs;= zfl`Fpq;)6OkXUO*GCxyH*~SO2TAyJGG6~|4fI}K#aTH`RKA05M8Rs=3XKC+1P^Sl_ zAR^iGvE&K}IUH1_bByp0Q|n29Nk4@HxWoV`;*==B6z#ZAY7v0J$32Y#0-z2$p0wO| z1A*&L#^>j+Lqp>O@}>lQXY0i&z#tw^K}yO9!2BtyGJV0#ASOY{$mg|6paGIHb5Vo% zRH`!A?0eG?btAPq8*xI8NFAxm$TSM#6(=3>PQay8k?tt~I6JYN(?Yno8-_W>D9&@u zA?wa@QgO6X5Y_{)@}*v#X%2b}P!2lK1b}CXUUDgl**$5*k6NKvdY?*mL8!(t#W**u zF*H9kXB6D$tshDSVSVYt6u@)EI)M(LaY>pplRyia38tQux#Nmp2NT}2Ps{2c-;2tYi zPPPt>#t%_L6K2MigmvkWT|KL)026>Zn&MTfPj1V zsVd`RRbRS3wKl+b^`?aiDcgWK6oTV`InQbeZYP27ngj0OgO8;@8Rs+w$X!Tm4wMw! zIp;LyQV1udEC&D#&uVgx548Y7%a+e?=Sr)V{*^Y+2Q=1P9y-t$B4x%;JX1zDXVg@V-za0# zy)X^Sk=l?BQINgyQlm#Q}0qMtaelnv6dp zl(t1s5^kpk$>~w&B9t~e&;+2;=M@NXiVU5pip7B3Pz^!mW34BgGeE3Z$juvos^($q zK$sd~V#e%HPvuq2pmIp6SNiRRf)h2u?gQ>GEPnybI|$NuR8qu%1$p<2e0LgomO5<1 zBjtOAJOlmJ_xG&76!^~bQdWY<`8J?@;2b*~^e5BUR~dMM<&(%6!o+JB@DIL&pDT5Lde^q~$+ zZLPM)AKfR4*S3!s9WZlI+QYl$Bk5WZMYkF2REcb{5Zi|JqT}x5bsUOvwgDZF7^fJ+ zfI8D8WxIv~uPkAgaO+gk*rVkRD&%b8a7_R;q8)H52Ff35vmQqgIFW6pc_ zsUvaeRreO-zqL6s6be=-v$z)j01A5K^~O6>5k}nNrA%PVMUR_Oc>p?&WJ*n6(K;tMzDmD3Vyzy2UGD#H~jAt}hE(^dX z98-rUf+;W$T+_B=3^VvqVO($+!5q|s81yv~gS#Mgr+`U0ri8{ZA-NssB;&3{Jfk-_ zrtEBB4rs8(qvDiH)HNPUb-~R$$s}>ogeF4L4_aZLfz4QJUHwSrnf3rU-Nic!1Lz7NVd^~%H;dguw1X^@lq&}fGVb^ zDH@DmXX#Mf-`m*5=1SpLp!cG}yB*}n*vSH+nM5jHNq_^c2TG*6+Fi(~uCPYn;n{}l zk8ah6c`Tk}kw#kKOq*6xNhQAyD7x5JH6K-z&1JWb%~0D^^xSdlRA<+3WC0*68(^xMqy1fHKE#Iq&tTUfvYk(a1Mrs2Iu5t!YFS-rjZ_g0hZC0CGBD z^{BS2WTLPG13CN%B;%2qtprh$%6;T2?hD3a=|xQUEp9?bBLRSLt;G~-O^nr)Ew7UT zDwSZsk^D!#ewC%A*{o0$ATyyqH?0&`BHZkKO)W)QJu_f#Ujsi=Tz;u)Ew36N*#e3y zL?mC)t-~B~$;EA2Sb)27Fj&z=N(NuGfmE>fqUIqZ0DWkpsGg+Oc4#CR1A~!U@Ip4I zJkdn}%d2dlWOl1Ds3Vcc=xCyu0~ty`T9F?edBqe^1g#>7_h9kGRc%9N;ev5R6o!z` zx9=SfOw&HmxZ?+!D4`9AB>A`%Vm914qKX*NYBRv8Py^3l+KMO{DC7zS6i^@>^`~G_ zMF1&4d8RPIqKW|wDhVc-P-7UPijk1CW2vabc;hrtNMwqb=xO0`)Agc?VkH9uBAf>l zQ9y)X0mrR1*Eyn!p@!U2?ewCG08qT*oB#u*6i@?#ah#f6xa1lrpn&Pk3x+<1iYNhd zid^J!D597HV?0wy#S~KlHgGe=P`uUF*D4X1=N^KJC_`prYE#{*8_Yp~DpRgW@BaYS zs@Ha|#0l;skyN^qQlt0;S!O9WiHwU?4k#l5$(ncE_!ASGLl_Z7t65eqg+@Cy|`dMKlb^`+dsEw7x(X z=7rQHc=j*I7?9ZSMHNJrEvL-BQBnKV;~$s##Y`bCg|~uKWl3Io@z2tVD#;t>NgKLE Qw=*>9jfmjD0& literal 0 HcmV?d00001 diff --git a/demo/image_demo.py b/demo/image_demo.py new file mode 100644 index 0000000..0158735 --- /dev/null +++ b/demo/image_demo.py @@ -0,0 +1,44 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from argparse import ArgumentParser + +from mmengine.fileio import dump +from rich import print_json + +from mmpretrain.apis import ImageClassificationInferencer + + +def main(): + parser = ArgumentParser() + parser.add_argument('img', help='Image file') + parser.add_argument('model', help='Model name or config file path') + parser.add_argument('--checkpoint', help='Checkpoint file path.') + parser.add_argument( + '--show', + action='store_true', + help='Whether to show the prediction result in a window.') + parser.add_argument( + '--show-dir', + type=str, + help='The directory to save the visualization image.') + parser.add_argument('--device', help='Device used for inference') + args = parser.parse_args() + + # build the model from a config file and a checkpoint file + try: + pretrained = args.checkpoint or True + inferencer = ImageClassificationInferencer( + args.model, pretrained=pretrained) + except ValueError: + raise ValueError( + f'Unavailable model "{args.model}", you can specify find a model ' + 'name or a config file or find a model name from ' + 'https://mmpretrain.readthedocs.io/en/latest/modelzoo_statistics.html#all-checkpoints' # noqa: E501 + ) + result = inferencer(args.img, show=args.show, show_dir=args.show_dir)[0] + # show the results + result.pop('pred_scores') # pred_scores is too verbose for a demo. + print_json(dump(result, file_format='json', indent=4)) + + +if __name__ == '__main__': + main() diff --git a/demo/ipu_train_example.sh b/demo/ipu_train_example.sh new file mode 100644 index 0000000..94c8456 --- /dev/null +++ b/demo/ipu_train_example.sh @@ -0,0 +1,9 @@ + + +# get SOTA accuracy 81.2 for 224 input ViT fine-tuning, reference is below: +# https://github.com/google-research/vision_transformer#available-vit-models +# cfg: vit-base-p16_ft-4xb544_in1k-224_ipu train model in fp16 precision +# 8 epoch, 2176 batch size, 16 IPUs, 4 replicas, model Tput = 5600 images, training time 0.6 hour roughly +cfg_name=vit-base-p16_ft-4xb544_in1k-224_ipu +python3 tools/train.py configs/vision_transformer/${cfg_name}.py --ipu-replicas 4 --no-validate && +python3 tools/test.py configs/vision_transformer/${cfg_name}.py work_dirs/${cfg_name}/latest.pth --metrics accuracy --device ipu diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 0000000..5f7df52 --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,26 @@ +ARG PYTORCH="1.12.1" +ARG CUDA="11.3" +ARG CUDNN="8" + +FROM pytorch/pytorch:${PYTORCH}-cuda${CUDA}-cudnn${CUDNN}-devel + +# fetch the key refer to https://forums.developer.nvidia.com/t/18-04-cuda-docker-image-is-broken/212892/9 +RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub 32 +RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub + +ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" +ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" +ENV CMAKE_PREFIX_PATH="(dirname(which conda))/../" + +RUN apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +# Install MIM +RUN pip install openmim + +# Install MMPretrain +RUN conda clean --all +RUN git clone https://github.com/open-mmlab/mmpretrain.git +WORKDIR ./mmpretrain +RUN mim install --no-cache-dir -e . diff --git a/docker/serve/Dockerfile b/docker/serve/Dockerfile new file mode 100644 index 0000000..c50c4e8 --- /dev/null +++ b/docker/serve/Dockerfile @@ -0,0 +1,37 @@ +ARG PYTORCH="2.0.1" +ARG CUDA="11.7" +ARG CUDNN="8" +FROM pytorch/torchserve:latest-gpu + +ARG MMPRE="1.2.0" + +ENV PYTHONUNBUFFERED TRUE + +ENV HOME="/home/model-server" +ENV PATH="/opt/conda/bin:$HOME/.local/bin:$PATH" +RUN export FORCE_CUDA=1 + +# TORCHSEVER +RUN pip install torchserve torch-model-archiver +RUN pip install nvgpu + +# OPEN-MMLAB +ARG PYTORCH +ARG CUDA +RUN pip install openmim +RUN mim install mmpretrain==${MMPRE} +RUN mkdir -p $HOME/tmp + +COPY --chown=model-server entrypoint.sh $HOME/.local/bin/entrypoint.sh + +RUN chmod +x $HOME/.local/bin/entrypoint.sh + +COPY --chown=model-server config.properties $HOME/config.properties + +EXPOSE 8080 8081 8082 + +USER model-server +WORKDIR $HOME +ENV TEMP=$HOME/tmp +ENTRYPOINT ["/home/model-server/.local/bin/entrypoint.sh"] +CMD ["serve"] diff --git a/docker/serve/config.properties b/docker/serve/config.properties new file mode 100644 index 0000000..efb9c47 --- /dev/null +++ b/docker/serve/config.properties @@ -0,0 +1,5 @@ +inference_address=http://0.0.0.0:8080 +management_address=http://0.0.0.0:8081 +metrics_address=http://0.0.0.0:8082 +model_store=/home/model-server/model-store +load_models=all diff --git a/docker/serve/entrypoint.sh b/docker/serve/entrypoint.sh new file mode 100644 index 0000000..41ba00b --- /dev/null +++ b/docker/serve/entrypoint.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -e + +if [[ "$1" = "serve" ]]; then + shift 1 + torchserve --start --ts-config /home/model-server/config.properties +else + eval "$@" +fi + +# prevent docker exit +tail -f /dev/null diff --git a/docs/en/Makefile b/docs/en/Makefile new file mode 100644 index 0000000..d4bb2cb --- /dev/null +++ b/docs/en/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/en/_static/css/readthedocs.css b/docs/en/_static/css/readthedocs.css new file mode 100644 index 0000000..4c7fa98 --- /dev/null +++ b/docs/en/_static/css/readthedocs.css @@ -0,0 +1,62 @@ +.header-logo { + background-image: url("../image/mmpt-logo.png"); + background-size: 183px 50px; + height: 50px; + width: 183px; +} + +@media screen and (min-width: 1100px) { + .header-logo { + top: -12px; + } +} + +pre { + white-space: pre; +} + +@media screen and (min-width: 2000px) { + .pytorch-content-left { + width: 1200px; + margin-left: 30px; + } + article.pytorch-article { + max-width: 1200px; + } + .pytorch-breadcrumbs-wrapper { + width: 1200px; + } + .pytorch-right-menu.scrolling-fixed { + position: fixed; + top: 45px; + left: 1580px; + } +} + + +article.pytorch-article section code { + padding: .2em .4em; + background-color: #f3f4f7; + border-radius: 5px; +} + +/* Disable the change in tables */ +article.pytorch-article section table code { + padding: unset; + background-color: unset; + border-radius: unset; +} + +table.autosummary td { + width: 50% +} + +img.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +article.pytorch-article p.rubric { + font-weight: bold; +} diff --git a/docs/en/_static/image/confusion-matrix.png b/docs/en/_static/image/confusion-matrix.png new file mode 100644 index 0000000000000000000000000000000000000000..a1dc7ba6a73700ff55f81e40d00bc16f4da26b31 GIT binary patch literal 51804 zcmd3NbyOQ)*Dn20N{f^N1&X&p3j~T6FOcHyPH~4K!KH#EP~5$^6WpaO?gfHNXmAZ~ zL2mlK@3-#y{=Ij7>)u(DHJLLrXZG3W%s$WF`#DfG6`&yJKNJ?f(0FkjVa&8dFH@@m|kE)5Y4u%goIROViozqnW#h zl`+NW5*F4=EP0TmmUqV9l9v(bCOz&UOp3(v35go$p_ZDuIx&b9;s+~Dv>$J&$QzwE zxTu_-9%q|YKP#GU?}6fDt|#Q|5q{ZYig|c?(Z}@aGd?x(ZJX(hE2Ci6;`)KO&q4Dh zmH*}8XN(kBSRIapyDk6vhXp38e(|q=|LySUS;T|CrH}tz`v1!*B%w?wK*3rvG65T_ z^qsC^2qc53qjL8PMpAh7>f}h75H6e?`>)_H|J&i2IrrZ}tXKao{U10bemw*Jt1)pQ zW-$XGudcd-97v0mwaJ~~*xTMX`3aUi z0Dy*uj=l3@zJ!%&*}By~{&s)6`n!OwmwF5oDwmoEPsxMJOBcjLRm_bBJsVHwVNMl7 z!h@x5_kxa$y2(Z&d{q$gM$ze#a0-N#Kvn_1AV zHAIghjZO0xRM$GpfJ*l_Co&g*H<$nz`~-}cGfx2Zan0N;jN_s4z+w!;M!0uEXcnuf&t^~4CNJp zVaVy4K?DC>R_mrFiiljPpVBjsAkD;<*@g`B!rU%9H7&hUf#Z=eBKI}xKF$5fOo`(6 zdCZXsTHTQz5jD%}xR)&Sgg~{$Sbb~@-hj=@9U%4V5;no_7xc%+IDrpbo<1Z`(Dxjj z{ZkRT#RH@le{N~%ES&v9+qXRK{k7UlR{!(_@VVKs;savh62S%GN~q+(&TFfoyN$k{ zE_xD^u=`^WW}`akrPQ+%&|=QLpm!rdjCRD1YVcV_fx`*|l&I@9v~~Unr~v@iFQ0de z4Ga+x4!yl%V`;BC<)b>uFwk@0QvJ}<(x8(HQ)$^Pzk7MYgX@@P0VqeSnGR@ zm(;;y8Xh@+|E1{+7&#%i-T)?_sm;W)YhaVn_4PyUC%q!Evw9r^fQ+)Wszw z&$z3b)OndnN!7a^QL%9pxdga(_2}$eFFssR8DTHx6TZb}RCj&DtsQIlY)`sB9;yu!m}qpa|5*uQP8yGy z98l==O58M=mf}~iG8a-Xs&pEgj!+(aAv@<>K49Cd#3AC7_e$Epqs&_JfwnRFm%-(G)#y~>+N2qaw9c(L4ceYrL3yH%T-}pe-16n zmj&77dZC8sP}0`cJ=bQu_SJ2RAv{!UhVzYk6>m=1cZ-uL%cuB4yIm-^8eZ0RBkz_H;tK-jd_F@nx0T=DkglCjq9+bUtUC z?P?|Z4oh=lb%ug*-GhtyrcarO)f?}uBfjP1bDV2aXUvc9z zo%KeB&2B$^6LWR+Lz5Ps>2A7cnQAQQIcjwwsEg=uF)zQ5$V-xRZ4dmK6so*sWl%7P zt7iJwTK?#Rf0rj6$vIa>?+i>mpvYfuO5~pkD_{G@;Dr$PlU4o(zGk(OB_=k7)#|v8 zSRJJ27tHW>d(DuAf4M;hlD)XKf-whDWz;B8xVIq32prPw2eo`UD8$=oUEe@4Mru97 zb>ysp+hjQ>)?V8_9(>K0c%_F5iFOG=S#2o2mnW8Y08F5X1g=#KpPPjEumd{jimO)6 z*Zc1ofuG!KYepCQqqbdy`{Y`FQNQ5Z+*Ql{;)s5Qb{Y&VD;b{Dp2$O#Yy5c*lL1tg zhRc^Uz_GG%yUnq%iUUfiA_~*l{dKig&fh4z1`(t3uybeppdtx!(JErHAkgBpXgkss zuj?z1!m#9vjBJC9O{%S(64G@N;(EJ$7Q@E0*jtylJrDZa_T0NE9&|O{NQ5#xZ9s`p zX(S<=m-xw&b8zAn_VE)2sZ_L~{f*Pl=te)edy zV>`#1Ia8;^Y}=6FfjH5>MsLwF#kDv`ZP3>?FN$_OVg23w*M4}K2NSyUFiMen8Bte` zX|=e3{N;tUIG;2K-jLl{8J4NO=0+#Hj-yAvv$r1>mYc%bpsk-m7GE|aqj(Ifi8~dP zD&fXg6bms-VF#If;`*m!17nPA@J>#a4&L7n_1N~N3be77gD%WX9 zoG5X18fhjt-u?6SWj2^YJS*ZT2cUBdsuhj;r!LvyZx1D~r^ZK;~?5IOZ3r-C-F@TeinM|mgVma0KU zoU+IL-*!sdg_0!$#rRhE&1oa@*cwamNZDrQaDGrc`1`V*0<9d!eHW%{enoa~E^5Xr z;mGC$HdDK$@A8Rkf$VQK(}PGpYT z{cgmcs8ew@iB5OSp1o^|{|xnj!}q-BUe3kLI6%e^3l%{5%S}h!#Dxy*m)%Vh8oU5Y zl!Zh8E`QAka@1Zq0a`3c8gPen8J^ZJwHMHuSZ7ZQiL;aa$|(xa;BhKifZ5@YQ)N)T zin&V8dCfkh;`%|fB)*IHyth(K3^^t)+b-KeUG~HV*#UWO*X1sJ7wD9 z9&zVbN0(NindQeK6smFx_Vy}W3&K%f~R7rcz+Ecl4ycTEa!+{LDwv0iFekMnK>UQv<<>F*9^ej%vHf|T&sTo!W zUojAab*|agB=^qhN>AU=9J9`$jP+?XpSQC^tWewKGLGZ2 z6_D;eS281__b9igO@-epUwk6_iv!`Nzb19_xERTj(%B+{b8YAD@oO7RRi6MqDEHDI zPw+NmDa%)w2RZ+mq+l3JP|v>z`PuPga}6)P1*IKGXIM@a&X$rKQp6>5$Ca~JEn&%2 z>w1I95aCJYh6p%2%hVkI6&6AAg`o+>CAK1ogPUuwAO36wBAI8=V3QN4+{F)PL89<&e z+91_PZFXQ!mA7{(cG{FC%0yO(+`UD6efgEYX5@9B;O7#dpsXD$L}C48nW9vn{PzY{ z@fekVvB|~8>r@(WHB;-0QWb=4(ENsAPB7d5w8sM}{uxF@ zU&{N}V^Jo|BoD<|2Y@x|EA{uxL>6z&rXJU0TEhhUUdSS?mI1f zSZWVuo&Lm!=n-I@>8{P`i&6Ly$L@O~{6?9<#xz*0^qz*79y!D~SXu_5efGFrGQRT$ z`!+@j<-5NyJpq4=(ek^3_LgcOV>0BS-6owZ&5Ht4cKp|nGoRw@ zsW1Jt&}Cvmprt@-Z86qwK8JM*6;5ll-lZN*$6(^^?3mb6y7+^tIZ2SC<7 zyDwur!H1BCsV){>*d#Sjty6>aRK8UQ){d_wFL|$=q`dVMFm8pMH7E+9*yi6ySBVxp}~4Bb*PU&P+86ov0cn_RHsM5`HA`KX*; zJ|RBoh?)5fXF5dgeWS7nLqOoRDg)C&Ps1fMP~Ev(F*}80f^QFAnHVK3dmX^ODZ_cR zZ+>={RH(roESC^ZPD}GF`8|C0M{cI8K7vU^yN$#t9r`J7K9^2zS$r&;7~}hYz8m_TY0WDJr<^m293WYr>+; zQO)Xn{SpuItTX1|VsAOqc3@%NiB_Z*C5Mwbz#TgrsCCB;GGI*w|H>WC16g?*st-D3 ze-ivBb+iaa{;mTSk23+(lki%2L@(}NJbLmJ^~ zbV78!*5B4Ud1Zf>B{qak2!gwt7K|*`3%K1-eJi*VpG0uKBeFTwO@oXXZgONZu#>&b%k0nv<|vJ+5GF*D|%}h4A%g#vOWM!3_GQpk0H;NkvW;hD;@85Im21A;dr30_s zXBL<70za^(ls-f+{iNvNjq@u}XmvUr7hj_ACK)ehzoPc-MwkeE+{uv}D6L&O@Eg-F zdFr_^VbeP(g_)R(4n9OdHZsgwM*I; z2(uqw;);TevYim8FWfo9!Biw6+399ecMG%}S6nlFR#6rgloY=PxoR>9vMZhnd<|lY zg+f^cF9$cOjYDrpUuK$(W?mUzE6uGN2$RNJMn($Lan|}PFY(L}s+uYqpWm|uwh&x^ zJ&pB>;%w%PWCMlo#Z(ISA{Zoh;%_w{=|u^xWUEhexG(AoowAN%r1hqwRfX3ef&+)| zKljl;e4X-k>(BT*0061HZKLTqz8gscbGlVh3@QszFQOo-LYy+D2b2(^D52AE%~3f=lRd zZ33r)jz*AA=cUgHws_pu<&%_@K+Qk<$SS({}Ie42h&}up4+l}Rb=}nG$3x{UN zt4JZl;CLo?IyUTAh@O)^Q1{nQm4)qN36a*iH69nba*cx*>3NN+}{8gB!n0c ze#sS=iF1PS5xOcHpNV~^?mQ?PgJ_#s6yAZ1BHkCZk*@L_$H&K;dS_%gyb^Du9`=gp z?^hj-DZ3*Mq0bUnDMAmw)^A6j8tWdQ-}7b%ph{m&5LTQ2;OHUx^aSnxt3TigD1Y3k zaOi&2bP1jA&NcbjNf}byyn6X(SWfbO*>^-3+~2-BOnno=;O9>lAcU@@6aJXq`AIHs zA@{HR*wQA;qE&U&q-r>^t=4z@hS#mH@kH+0^VW(&WExXBkz8i|L+QBFgdaL|6WoJR zSAXHVgcp^vggppSdR}sysdJX#1h?6#_wC zC@BvghHBXiCUC)oPY2sX^!*k zqfcC6RqW`X23SR)k|rt1L!TUGip8ef1kaU?CoPb z`rOOF`^JvV{@IK~FWq)!(d+M7+RbbEpQ*kd`fWeB z>~Mc-C37r%E+?6(pIUmooy&O(Hh2f>&OSpFj07mvdxN^i-)y}#A1OG|yaa)s1l@uK z^#korH|FQxM?V}!2Da{^I(#(lp90f)vIB;ZC(-n@)B|uCyvkAZzRp0nM>Rb7;e#V# zu69~Ci`|5u0oRdNe|}J+Ov|AVxyJiCR7$+Z>>pg19`jS5kHIC;6IX#BH&v0R1_56d zjUJR-AlB&IEb8M6>@>yN8`6Xv0>VWq!9>l~;P6Yu z;+bi;y|KRbtbK}2#-EWw)CX*{B=kc?)eGVNPe-i-hvR?lrH?I5M_1dI70qbD3PSd0 z&yr2TjP<8e6{$)i8CShr=g$__i~htWH>hkcjms_Sr03;W2!H4~D~q@sW)bB~B2X4??x)dp z_`vs6gojJ-Jh@6|^rV{Wfb<~DoU5QTn{&N~u#ng?{c4g%v2F~0u*;Vzv}qbZ2e_X; z>FP8gl_PJg(kuzh%n+~$x_AF483GCJy8|Yc7I-6WP`w%s4WT(y4PCw5EVI8mjRO?g zf$h(SvV~_iCf~MCKg?P^M{nBnHzustxu&7I3GlK)WQSB&+;5W0hQ((uP}1%Sg4OFO zQJ6_wa5(%qjnGcvM@4_e>>tkTzP`Q`q~TX5cX#z`jahG7!~%~VN6pU}N*{-ZPF>PJ zsxxn=jI`iYYm0tt0_xGWJ#zbEbZ;p@A2j}F675vg^^`f*>JcasM{zs#9ycBW{w?t~hbtwmplk*~{59yD- zv)6kKYwnv^C(=?jE%EJ-c3Ek_6?mSv>TS^=#=>MV<_O)!r@W>XlS~X1`V_@ZVhICH zUs}1oMwpHcS$h5upe0ZY`p4M#sAG?oqmjqKzL6=t^Xctx`JdM%LH{nR9j>(WoU1flge=|mthSl&ir#$WbE1yNk!G}3d;J|& zr!RkObL;qrs@%>f-{IAtYB;7}G0Zl4-NNg*(bsPr=b&e2O&^F{@9pz^**h3Vwr{3h z9DR4OB`f7~7%i$=kdg6*Mr$y%%&aklPt+`Hd&VWiu>2WT>#W?s>y5Qu{Max%a&4{< z?(J2}mv~#CWzp(%@gzV|dtqq!d&hpmR^tmS`=@>O*MAZP!(0fS#k|=$Mcx%9AJH;b z7zhQ_0nz+@!VH7*#MvY!&M%xB({JVUQeKG@wte%K){;SKfk4lx8SF%C3K4Z~1;mV3 zXmMZf%|z25{4RD8P%knkDt&_~iA>k94&k*_nYDl^5mycNW;VvynToH&j!q(!L(U^j zYOW|ve+zNHAZ!rEp7ZlC;4$H_ppIP~AE|7jX5$nRd{2{5s?!r?l@p59v_%9T#v)81 zm%odKKSe>aa^Z3y8nIOY&>ztAB((ydx~{!hV}T$sSLy>^hW0_)JW%tW2M2Tq;(>v5 z7X@UK69u}Go=KM35&}~PQC})0+?ZA$_I49PgR&axDV17Xo{i6;#dm2c*2Md$FXDAE z@pzl1UcC+QUc=sBFPPcR5Y}{Hb=$XSr*x71ODp;-{v*1ZR2RWbL}wt~@0tS<2b=C5 zoba&VlGs1%+HCb4<6lY2asl%9AZ4u~2;(v6iC0iIzmH3ab53ayRd(gVC7A#jIkej_7wlu0J4`gw$n{@bfJO=Z`lF#Tl;cAlxfgUM#ChhJ#Xa$P=1-@pYc@Nl)7$`vACi9)HllP(%4?$nxEFv=V z#s(fcoDQ15qE+Rg>~s^ZDFQj^I{K@nUNq}Q?1_}Qhg~Y??sQO96;axF6@dU6Lqz}> zzAs@shvs9`1&e3qoY}G;ADdl&u9?lvaVdq#G|QX0bU*pD*<@9b8w#G+UUY zlFJKFex-}`l!G_R08VcC=Do;{dTcK>N5WfCzyspgt73ZGI!^aRYq6q>5EM?|F|F% z9z*W$6iZ^TTAk1PBUpE$*|8H1d% z-K}N5j@w=09smGB3;_p;T)glRrJr0Gs+z#Ph8~2;Sd4e>vDd<0tAk89Z}rbLh8a(S z*dRBX({UEUa{&+iObLA z8_%TJ+vg~~+`hcF76s&1X)}az2nFtse)aRBTWnpWd=Z~rkWFc0Fjh za+-q#R$B}n%Q_k&$El7g;w`H@axDfp9}nK!C+0yrnv@;iw-Q!nq#>LJn(md3NSRDw zmuI%yKH(asd#*e*UeIl^41`wAmn_+2Un2?_TCwu^eFiHeu+0M0_<$OFaO-5QI9`nRS%c^LpC3lV zwT}g1AGiO;WGDB8?q8lX%^1-q_l$P+MN*184N6`rMrTpq4}#A=;12*59-Z7}=`Z!JQwijCpB>M5qVO32 zNEI3z!wUCvo4l*v^$U#Aq06^Uhyh%>ko6~g?hML=XkCIYF&+bMySLc!n+cPd?N2kY< z?YZ%3vRE{P)eQArT=POSDn_*~H=|DP9xa>vZMSj3bYs0?)sw4!g*f)j&h-W*a>>)^ zo)ft=tu^*N)eO<>hoP^)*!LP7|8nwjgeQwwi$$?M859`qnAy>TZBF${18_{d2Cn@*I~~6 z^(|#fOP(Xq(A;MYrNI@sTZI}=lpkJPllTA?r}yp+@MA@QiEOy8(iKNHcwvbIqsJCH z`;%4=Z^Qx*?JwMfziFa6ckix_YKT6NVrHR?9{iKJS^NI%3F>rjL}ujDIP6=8t^;kY#57!7?+(ul@qjWn*zhDQz%T!s zw+!5Tt#f8Bd)hDurlSuU69l6gtKk!ym?R1F)5Yu+XLdXh@m}BRFr;bjqXe5SStBX^1r;4Rn^2TP zUFM40sxuQ0H}Q7#vX^}zd*w~U-#{+EmJvW@7d7gKaKF+y{=s2pZ|SXPi`kC`w3luc zQqLceWfwH8?x3R^qk;O40Dwu^gEFJ`(FoqZHHrIrb;egW{qvfIn}aW7pnGQnad2@u zyOH1YQ~pFf$TZ7ABX|5ICX{?@kynX$UYKh$V{7A^;dd+|-V-&lHOmK2-|~t*hSZ5#jKYACSy=!@`&ccShrR+OeZx0b$-&kbgLNU^xe?iu$bJ_FskGvHMxi! zY=e%WTC%3=*sa$0-9vhy=k~9K2QG|a$jLC;N@aeDJlfY;IudWrFGKd|advc+xeV-^ zzeIG_G;~=L!G z8h4vMl3dH0b-mIiKnC+mrCur2Xgx5sfe0of8k zJEXCSAZ3xW+WS(?1@gV!?=O!MVPp8Juwo;?*|biSX9MkmeaW-+heq>3!Y>>3$F^%{ zuY+Eatb?w_ypacFi9;N@MNH^@ezH3cXi;yvYO^9}qf^Yg>UTzc-Gr;{ucO%@+ys-& z+>GF7omDHzbg(!GB+b*U%GSkxeEHoG#Y;?TC^q7AGras_*yo(A{lVslPtkO(3n}q7 zhtEpqM4`;+g0@a=ea|{9hR5q2lw_ejwOKmZ(@7+qQfr0XcpR1_6^ARxOl+5;cUJA5 zAoh=c3noA>DEb54d^yY1%3Jd-jdXkS?APYc-NtU3G+#*PnhV|Ss@r{q3>sz=Wwm|( z8G;x0PXL`n%hffo_AzeQsFR-8Q%UQ7cEs-)uD0Bi@T0;^nqdRVeYQDh=s0OkH87BC(v*GQ$dAIo|-;yW`ZvQCc9W~r3 zON3b8`Jz*(N9(8FL0KK{J_CTNLt<^XxTVFc@RD!L)R)ECu2GmR6g1Ln{z<{Rrmg2U ztrQ?_BkfSu6h~5X{$qj4Me;ntY5XrWz&`Z!XlhR1C)~M;M65L^ijt?Ex2!e2pOovC z`9S5T{~V-cv>OXtBFDJw!n^$1{{=Yrf}hLU_MHvydxohN-|D|{HVoN2RuY}1G}L(r zw%C+w9gks?^IvcA|NXJBi+Op)YL#uVL@pBFZOJEsa;NohS9NEg8jIdA-5L3GgHwsB zRc1aiG?&dYj3W;IuvAcTKV z@B(XRF+RlsZ2a&OS6b${8i3@|s2D?+_aFDx$@|}Ad9Bul*{hXZs@J%zZvHJo#08e* zaIoZxW^m;POqKR{|DBHVA6Cl$XG;VAA4s*+>4@kIT>zsTXHc8C17KrvtWRV5%e5A} zz}`Ds?yLUH(-uzrcR;nu-GAP-GYvvdPr^rZ#`zm}ek&Cm48~qxUuPb^0Zc%HbiE3& zie2y!?N81E(uXGPr{;pk$_8nIUGhxAtnv8MJojde$XMdJq&&a3n%WW&+x4y!^Ox)* z-_7BjbbXc1a~JZhPzHJkfsNCyAIS0)1MWM4XeS5@010cOZ1`1EIfR|v&}~>dL;Iv; zk#C1#W8Y|2y}}buYd7xZUVjIfrM3emlrFo|nLf`f-+8Mbld;wK!4yCoJN;{ahCEH% z_fbi-;M*HdRXGsc{o)7_YB%-qL5>D8dsOc#Th}>twfF*ku_CuSc3x-te51i@rz3^~ zpdpyiY&qJdvJ);l>gTx^ZI`Sm*NNyflrfl@$x{+Cp)s`Gjl=K;_t#wgGQh^kMJAVy z7evKQb0tSejH}&dU?3AUmNhVW7r~&s3%=i@v@%ju{2cv?X-&r8(Tc@4Ax7WD!u8kJ z)jm<=+%h!!?4<1wH`bsPde4!5@B@~Yci1`%I#RRtlq~TgrN60j%66fnJTq%mN5KEF#B!td{9fkrBi5t8umjSnqvK=Q=mx!Y4U4wNp3aj?lhtpnU=nKp zXI;%DOgrp;8L$ra6&6m`XyXrLcaZBIU$?GU zklJtvC#vZQ_DncA4H;>z*Ewhlj~8BM<3~n*i#|IHm(zTMP5U(p=`>$lFS%;K#sp`q zlyu*3;p~!SAh|=`W@O$@waDA4Ui^# z47GJVnuxyd5{wfeG7WyULmt*g_9U3GDe#At(M%$3!cL1QxNz!l>gZg^2Sr^fFbyx> z{ROAM%oS{_#V6@I+ecI5QS{$Qnr_b916$fghfFAIP=H|<%((o3(pkAtMM|~dqDTF^y|V3Zlqb?_B^L2VHGBuA&9j|iSwROEAVZy zM;Pi7EMy@Z4O|l~Bk(1x~!OkpVbg^oK9RzNjH;9RrAXdLw&;oRP z`Q_aa=)=y4$)irKu~RSHEjc+29;jBlVSwL{J6-ruNX0?=zwdv~xxFTEaVge0w&gN@ zg}AFV%Wq-`Z%Y=@b#9N7P2hU6V|K4}{U`X8&BL%r;{06T*?s(lFT#adr6?wJDmSRe zi$Zsddm#c2>@;le`Kk7kvhd9;c$;R1G1q!B8I|(8rn~i5qdM1tpF|64ois@nG}3Zj zwQslkrnn+7(Kq%h59Cl_!H?I*~&LA|Y3Q0;9*4j@r*T zI3ofG>IZfOfJt^s>QF-B`;RxjYGDw(S|*BAPE?IIKuonH(y-dsN_XH$c|t;d2o>mg zJ0Ih-f7Ww`xJ#DGM=z!Ibh7T1Gu&>_ci!Ke(w2BFbOg9{l87B?JJrKUAw6o^N71Tc zL`=Ddzcq9}y+3M9Cd}|<)zG`qu)REC4-1;wYS`I<_6V#moe^((G4UB4BxZD1WgzeA zf(u?ldcMSH;`iNu6jx9)Z*KQ$rq6_%{M6S@_!!i6D!-65H2pd_7o#(@lMCxAwEDWYU(Ix6`}8YYuZHwyG_CrYo8gCjZ4JCB zv?>wk2r2QpxV_qsRZIOU64Gnbw(5ISgz;RR^)HIBJ4?e%m>h~INuzE&rQ}Y}Psb|7 zP|@b7vK)f#S6<{?BHnB!j3@KyzjaN}=Lb_&nL7=xwiL=qC3v}XcU80R1KG%RyEMqX zY-O~r2xFhSa?5lK#kQUPPC?!u^jcE%A@g4r2F3j@<^Z^IIKS*Ci1d*@wH~P~LeT8Z zgxCAO{_Lk3h6xj=;;pM@Xd430`jg#*)iLyYkv3(_DyYTY^?c*2CZvcR`JrpN*codNi&6 zH|oaXiOzIk$o0@kJw7@%IVzWH)`%TF7Fz=k2*jD zvg@{#{8L(hK}F26D;59=^Rh|PiuUi8ubl?{*`G*YBX{y`;c$-@q8+C~S)W+0gQe*H z=a`%J??$Sga3q7*wmeh$$LEBzS;yT`tB(P>y-B3k>5Ax7+qpdDIbJms&+pV#UpWwC z%gVWLR0Ylu9y>vh+m{j$FWu6D-o{yQ8R@1Iv5R?>4sMya7@?LsX&dY+xG+L4#oKlM zTMDq%%irN}J)_ZH^&2`C!`@0UBV5`L5D+l-fJ^|mL<4D4UjYU$JoS0AI!~q#@rnj6 z{Z@V)_b}9-7oukY6H5{(-G5dO3@os zsO81xT{Mq0=vV%2#AZnO?16Bp4;fK=nhtw%gGlx36o_bJX7jSmUCdy2y+?M}l1*#4 z-#ZPb30SlR1+4;}Yv&E0SQu;HiCXlNfkqW2*M#gM*7H%h&Guo16}xnTorNAP;OaUU zCh{6-S{e!T9os5>5AR=AXftPTe>J>k)UgR7v1)5U!&C)`)zj%D0+`xOn`0xbgp8%@ z8|lr_n;TSp?FG{8>?`p37}>U{`oa!6QU~-LJgZ0( z;iqg|JKm8|A}Yvv$O!lfGk)pW-8Zc_l+64fs=Ap%ZL~~6~j+auM zamvEbF=t1`+$^is%0zvBwhD^-HNb?mW2IhGpL?Y(WkYV>BlR5Le3)MQBSUttH%Q(k z<3KB%DbFZ;u->`&s|GG#c$nJvq!3Z=iCrEMukmMhiM=~jFX+bxuf$sgs>B~^%l^hDYzE5fI@wWlG4|rMd?$4f z@q3B4zg@PMyh)l}7Q7}(zjcHSjoZ(zCE_MK`71JdWC!_vyr(QAQ=WYt9i;aoSR!im zi*jG5Qw6)plTp7(22YsYQCT!(xkW|9d{qQt#QdPp^cwyB(t{_ke)DD&S+X%AeAb4r^z{QQI&kSkEj zEHo$(t}%`h*oMenc1k-`qd{4>*F_gkmA3)pCqZK+hm-429 zItiENJ?@6KQe;|=qnTJ-Gm^dSZcBgrOnVFEnfbb?$I`)Kdq_U|ZYfm()LpOVX&#kl zWZ_(S_d{qQiJFx7$*u9|qZxJ8u``g-WoA)ec~FN-M83uLSyuLz=WnM<<$=Po!N}GC z5i??UW97@=dAX{E{?u$8)8>sI)bLZOc@p>tS}PGjFSq=zN|6Z|8VGNC>#9kzKEt|w zuuyNiwXx9$5Qve$D3CvYC(OiZQmFGSPpj0;9YY`cJUSqDrunOG_qOM{yp7!@ChNp! z*KWJ3O4Q7B+ApFm(n|?l)rL{1+yMy2bBhOhV^?eG6 zY(&A4^9xjuYPs&w7MpSVjHl?ggjS&%*cNf?%XrO~7qplT<1!M@Dgb<*H2VdxHb{~q zq)%Ti&n^_Qow^~btm^Bd$1wQg$7_3QjK1qS&upR}fIta>lW1*a4)w$7fSzzC@nMNx z-Xf1!9lG|D7WcFg`Kfzr0Yfu6{UO)$gz4drhi5E5dE664``CC{I!`T2?df;>I^3vR zpD2~#kK;7XFflm(*_G`}#r`@q(;i;7oad6^rvL&O@UjpJn9V(Qk`Xy$1mq5rcI4En z?Tq#1S#$*&ja}PZn5rGw4o22#lX+~FE%r(pCC6lHUzc^OB(Qe~GO(1Ll1Z-1P#q&W zzPBc)IGp-#J{c`opo!NgdSxMlY0}R7&SWx?H%iVug2RqTRyhE8)6cPMGP zgdh1cDrX3AR3zd`vUK=UNn(^0?6 zOv<+Q#c<7>ufRqk+Zy<$63=vBHD;LGRBlmxDmfEGb}S*qu)~M)T&zd^BeI>vyhW7y z1=jm=7OU^oHky5MSBd=B{QB7ZH^84?8^BDABe<430>C0^5C~QDHX`cQ{lswY%@woQ z8$6=+a*_@i9Q3i8MiLEC05YCU09xroVNHe5UZ8)Jc(Sh;xG_qj!XLCXmvsAkp*uea z7XzBT$rAd7WpMc2P1d^WemygiSUD~&&KQTX@*U={vCr#D@-A_bBft5vL)JepSo?Aa z2D>M8BX(0xy$TT8-|?t(%{_n_e%#VO{pb>BGvgMGQ%x7rCifSfyJXtKlCF{NE{2Zz zxxo0F2)tvIXt)F|u(XS}cN4ZBOs2w~p^7iJ(&z-)p8GsBQ<*W_XkmO_kw32&c6OGc zdocVdHci8g$8T{Uk>=-d@Z~hTM8|i&-n~hIfW-8%ZRPYEV1Z@wvu~pEIC`I}jwudN zv?d{)popK)S|h}GYDQ( zJau|_R}Fo$+i|{-8VdEK3z^yS=NeaMe!A+t)Qn>Mh#p*HNXEvmK%-!$F3FA?Z>Ej^ zVWf60kB}no*&t436h%Q$6IP6K3f7qvsr3Zs#wzq~$~l*wsumILRJrxBxLxxob({}- z7~M@foI5`YN3f7<3D(KYTFz11gCkJ^@P2RuiQ&x?1`}!ai<;PLhVzb%3d?dZZGITI zxamlYZvlTHRS|#-w7+7cAf3ErD_?I%6ZkhOt4? zL%4^-v*L9{vbdLOvVxK~^z$sV`1GwvB8peRyx#2!Y0xUZDTiWKYrwhMGB*aa37PC zN=ik~IOr~c9Ew_91M{GqMSG{^vFSQFn?d4cugja-auy@}K%-{@-l35>GZC?bWed;` zVdbd&Jg`yuGdhU^^cocf;bkc&DsyC{8VU9}=5004x*3j4cd`&T{dxyZxr;Z+58LwQ z=N=4m`$t6U^%#0mAM@QlryZKIQAG<=N2-VQ&RY=5cBdL0}uk#`qHD2R02 z9UtFT({*){zPiuUYAjivK26h4z{#PvWo6-t8d(!qYQYug8Vk4$YmLH z*)V(K9IPlFbyi*7>=4@<$!%Yc5h=fgWlXuod(?!qzDTY__tw>vc(sBipYM~U3Ca$7 z=8ow&e1`^x2N{k}KeD_QtJogKEstq0rHhGRzHC3}d9~h50hz9?G^-(g*WDDQ>xoc` z89uYV$Dp1ATRREOx3blFzvLW|5q(B;9weypay0Y?9Ul~L|E|Pmb-n(Jwp`rw$(DU8 z?DF~hje(D33g+g(d&cqPR5D)1)AhzfZ$k4o+1lr$!Y3lmx{RlH)Y>|m$G8}-=unu^ z#m+(dy1LJ*uy|DML7ndbj#YWT@2eAU@&aj} zRiBa(RE$C3@Yz>-{_0aI=*;AVMNA!L!TwuUzPiMo=+w1!J6>zMJNLGFla=W+mos<(_wZHj%x1~`($0uK zn(At=oDg18aJl^>kx^ZpbkOVgnrAmk*f5KXr(;CXeo zp4Wyc-Vd_Km+D>d!MgaR#x+e7ExSrhnKtx~@{T{86t5cl(8qG9hO*3d59FCWD!PUh z#|PZi!&(h(?0B42R1mTXd9yP2MwgT0NHs&%kLaJJpH35wud<*1F!4CKO558PWo@`v zP0V~%e8UOI%C|N%`*M62RXkzAAl?ukQ=9^Wya5CC1K(U)+%kWyF&^{9iA_QcYQf4N zgrs=y4_C)G%6-)8NMDT;cG`w>O#FlLa=rewpJ*7uU-B)yMXp-(#}8XVQ3V8e_e!OT znCOV+;_?l(@Q~&RU4lmLfu^*59_I^TCU*rT6Eg|OaRpp`xym}znP(s}%?H12`E)2T*Z1oJ~VYO1(l8P_!g`;)GDw+wI|w@Qafuy^hyQL1Au50lg~@>UsSBNX;N% z*y}RJ_I$jeqc>9q$tOlOFj=ag{7f&q5Lf=%Mx;rdz@2Wgsh+QlGcudXuWPK%zGyio z-b>?%z*R0A(8!q0Ncr{98O1?5jKe`GKc%NG()c@z_jq~vh<7=niYa^_jhXRi+^Uec zJm)Bgim?kALik=$o{GY=pvl@V6HyGNxNbR-9#)?~Jj#biJaC`iKuB>QnnT*_}Wr>_4;1;94xuFXuVIbk9cg%)zNZNu`Ggv zp74Dz+#(cf!)l135_D$h1^8?a*{ypZi}c3g5qimvlwZ@V*W&+P%zhkJSo>o|no z_p17RV0xo+BC+bF{cKYnSspfOEkRf znjXtqb9c&KF)36U@<(|9ku*&bAZu*-RgL!uPB{d&x2S;oZe4uD7Uks^!3R#5-B z>1`{-$$DNk~b+@jx0J*joHbaxcdDF8_1B`QDBd?k2EF|nSHbUeUD+i1#c0ekjG zlhQn3dt7n$Lujmc8pW1F?u!-*i?~<>JT6v_OjKEZYOAk0M{d zWe~B>oqMU160S2D`HzA~k*aomzl_fpIt*wvsfV27bSr$QDZ2-#8hq zCwYD2SqGEL?I)C#)r?YYWQ=XSPY9ke%9?mV&;30;VF;Q_yQM`_Qv!Kq!3lZq$Md{~ zZx+9vB=Vpy8hg3quJYNZXy8qih5zC)G(Wud&eXlRz|r9iRI=5DOsFI2$cHYU!1Y z3g6b7Fud_j#bY)PRN$0nn_*XQKXo30@Q>{_R65P!UyXG8SYYYU%qJJ)wX+gArD4`8 z;;lPJ6WCK-laDx)^3xm6!u>0)L@JBuxJ#=#1W9?RiILi-%v276H#9s%tluZRYiVO0 z4r@%WH}AK;-DB<;8QIb-;whE}2lLfj9JP=4$K-i51rur|HmxPt1x7dY+gy1mI|(=n z-qayWOP3y+>Ar+cH{J*NOSi{BejZv!Z+&aKvSXPpzGYG4Z9(MVR@NqW>t$EkVA8Mz&jcC2WE@3Mj zn|Yh^(sUJLNa%XLXyGc~aUXY!taH-LKKq85|4C=H)iIO<2XM1LMnGk&CU21{D9PCi z!?|C8stEQxjmi&l-Ebn+f9ZG@Qx)@$F{!|^wy)qM3W1-usmqxy`Xi+}MzGM9_nR3Y zYw+;gF#dAprqg&tu)f)ulR7O^vKf?{e{e@|R*Zj;f%HSq(m7?HW7xPboy0&-o9AJ@ z+l;WYx{3wu-G@cfVo=Qi=He%^Pi&P-Nm5-Gw!>Fcl%;#5^x{RFLTo63iP|v(=_pmh=F&#qucWy-4E#v0W9qF zPMGaKV+b9)m%#z|dS#~+jGrcR>@f()f5ejct-h|gaw7pIUY`|WD6uUXYFT>Ar~u~; z;8tG_tJZi`x5n}K&`}&+BC^AAO@T74%-<(1^NpQ#EgUd_(T;NxH&l>giTShG3jTTY zTit>BUPA`VO!!Wj(6HwZPcM^g?(JUGW%Wc{oDpXy$}-y#HBZ$Zo-Uos{;~6(ntX-^ zIIvbb9v&w(d*dZu=2K#9)J%yLIxQZjM$hdzsLBGD^31wvhpkPvSPjM#_Ql1Jqc9h1Zj(&bwMnc z%q1`HkhW*^7D~bv#`%1s$<3G)_=_h;40y%|+7jA#TE#1Y5m22PERN_)vo0Ud@~>!vb}B_5%EkWZ#Hi^x>lo_k-QR-DX$RE(N#Mu&n{~R zQ-1#NIMib5Xc3V6Ja5y7rs>=xIECct6I>HIc$7XVu!1W`QPtz6i8h(4c4l1UyhBb$ z;V4>j2TmcS3P?S1cBopPoEoROUGlIpw;4RYGrQBMEtzk6MB@bPKdy)NzmD>*jB+6s zW-OO--{H41*agUN;tmSlS&Y+9^EL!&hL;gHIEU>7G|$$84GE>&J`KY+UZSC$c)CFM zf}Vk^sq^@*w-^N2uuH1fj8+>F&$o80Xk0{|&M%uS!ZV=Kp?I0ti0s>69gO;yjV$`w zKB<>CTSWypQf1jw`n}^Fqo?v$elxk<0@&7QJ>=x4}G4ai45xIXv*n->6>71HwKqu_Po5gb zgK|Xk^ol}sn(LK3UUyKmcW-}&mZAyb&m&r4))_iEcC4173P*-_Y&z7hysC(9A9jFK zTCBm!RP+Ix&3N{VgNvis7?PQy)wW^G;l3+1STpS6DJWFni93N zhIZlAq@D}8a0>soNt>mA@1mX%7hpeqz7=c4VX5+%dbMrTY4tPx4_@U9y*%_bx8RZ+ z#iqo(_UOc8DTlHBqvN`LlijJEaV(|p!ADA_i!V3K4@W;WQ2WS{Yt3qLn-7fy8lQzM ziUOW$G-ZHJ#{vpqLxB90bH<8PVZW^4@9cK))%@4C(8)uo8Lu+StnX`QIS|u!&0%{&GstxX98hw(uijj8S zLgVoj59JD~Ox1aoO0$e*8O5Aa4o9iu^1RxW?ofatYb&cv>N~Lv!nLNZ@;d9m`pP;@ z9+|~M=cfx_RO7vuX?tGcVz6bI>@@b-HBal3LdOg?R_nvRAw>-~7(1VnG($w8B_z!S z@tC+=W<}P+w#2E~sZUPWz9##?E)QjJc1~HXteFpN2_v#cW@j^zieC%D095S*r4qe? zn@hiYROD2kr>Emd66fj6dOLO!f+g{3f;^^C+%npmmp#22BDl#jRHqKVdY|q!B<`WH z)8JE6v!3$pHx-rvs18|=rsf|b30hVW;AB|h z8xAN}+Wfn+Qwjw72=kar)&Xr8|T)xhy=iGFOl8NLS=t9uMi2}1 z>n)9@*53Qtc$fzdZ9l>{RxE14gW2l=mKP@+n$LO_~s@ku^8A1PmDVzbVEnjztFFB!YNitxV$d@o7VGf&DZHdhXQ z$4RqP_9d6AR+%yJK`lr8#@*|qFb>8gM)pWvAHN@bSpNXI)IE z_IP()^Ix@YLO8Kst!3DAeEFHtW!)oD!)^&T!E5@?uplBC zSZPH&7)bI_BdbzfK-5g=gK<5%0Nc_(%~03tXGUW@hoz}!OTrFSKrf8JZh9Yze{e)& z!P3#siieFxV>vO=DUj!1h!Pfdi=b7kYH&gaEmBBa;z}cLQ;RrKzsfB~CC5=@A_@U-?4fs}zy&OE;J$3+JgZG0}>AGJiwglHB_72K1`*3}3sa(H?*M z8}chklRQ!alS_|AMF&Mn%W4@rdY^Uv2R(I+xCM zoSxx9bAa)4P#2?K5oOZ9@`Ju4w)F}b7-B=ZLFGAx>gpea-+la-LF8{Y!eCzTQ(PLY zzWuL41%vJ%;s0yP$EfEugVjt5yxJ({2S#yO%^zfSCQHKq+J8g}%?iDTvxurtq9r~; z(==Cc6zN}UZ4k*7p!y-l^x%dZy9A#1%eCWDALol?XaLtXpD$x8L@@`NsejFouJyNU z>tJg(S@Pcr&dH$1d~&I77#1oq@3*EHY^D}*!^94iAJ`QgY)1JN#ZAw*v$GUP1nlW4 zDJ&Xprg!_seMw}%|3tv!#3DLpMUp#bw9fDJ0QGyRx5nfLlrEEsh{;hhaxl?_d}_RH?iG8uj)tP01ndpDV0S*L7qjJ<={&+1WbPkQuZv&4@2s~IKV>&|UwLJb|4>+>0$J!R1~ zXD}Uf+Jw!l%c`-u$ST;cnQdpJpum{1PGv6e8uH~~scFo_bFJH;42fI^Bg!!zXpiOj zN4XVloR+mI045B={K7YAQa<9p@b2Zrxj#KHzw@npSZ!L(7#A(OA?X`EkHGI_K5RMZ zT+t^Ox(TDMCup3wIhRpzIrgG8y7-LW)KQ6{_{rf~hzjtD@pwHz!$a&A`awL4p5%rr1FKFoXQ0!;}Z0&kH0s3&_Oi&DO)8u zz$neuJAurR_HB`kT}I}QVK^Vvv9Z1Ql|cV0C$4cjUo@NB;qsLXwz8H;Q&^w*br>6( z24qq)Ou!oe(iCdPJh-MU<_#c0GPAt&_%#k)G(|LPRC+hT(AZvAK6tSO;;niDktzg3fdYKSG94zso=TQZ{jSaqmq8&x&>zT#Q>sN&WY=SKSD zsC~xj0z>@UxS7)aNd0u)Vc{PC(29W4V`4C2P?)?8lb$4<+63%$mIT@4JC9!?8ic&s&|34aOi@R%?!^$1AiOROyO%6LeDA!}bP3 z7(f`MQv&;5eCNd^Z5Z_BfHT?(&+-`tOm4?R7{I_wrnd|Hv4NUQ$p3QzVw!(-8SuAJ zoNRgU8Q15%PZfGw)NPjmY)1o35L!-3Ji=6XwSUR@W_ z8Ew{Dz2=X4MCKBmP87>K3fAIu{%plO5xu!=nveQ^+1)VzRI{1vxF=bb!fZ{z!Sbcb_H_?5`Pv z>JK^G`mPSaDf#OE zJDdI6ZLb`)Pkc)$fFGVXO=x&ums?wH$f6xa5ga<>)dXrAoFCo}FE!PO6ic6V8-x?} z;c11ub9(rNRC~E=zx&)kz&LkpnrJSzhJ>>64sF?e-s&7gu9TQ#iq=?bgjMFce{a5h z50^-|g@N8ay?-+1C`sS<`Qf}`vUff-E~tfMlf1dAagc~K1aZ}$FH&%@lJj&IC}mRa z5V`f151SC%?sTFelbv zrrlk%gdcD|#JBgfwr+ano<-ePHU_KW%a~Jozv4Y}#cQ#Q79qQ!Q);`9dK15;n#FIh z&W-S`Jt#Cn?F?@I>S{(i%>i6w9!DS|tf5TOP=PL6)v11-QYc1Z4{JB?BE}Ioi5SK{ zuM|$khVwln=2P$`l)hGpv43#2*sq%hWX-$p{TdTZ^uzX}`?VZ%_g!Xw644Nj{t(zR zyY%55<0_xWR*g@odr32o$a-tbXuFZoh&yClyuY_yJ6}arwwqvN79&Fs>dhLDRHr#ItU`71?@h(y?-Mg}?p-?`}`{2?- z?J)s-JyDriF96voH}bLH$v%J51&eBzxc;1%{fcMl zA-$%cFeYLJ8T1MP&<0%a5 OE%afY>byOZy~7q&KTP%JYm-7RV`(n`+SHOcl~64 z$k-Ppwam(rHSmcsedbd+-)B`edO`KAUiGuK&CZ`|F_Cc41{X>9;w<_lDaopCC)1mq zGwFgyjBD~9vdC>S?TbSDf{68@4@m0&`Bva_Vh0yikqD!{(=lz!==w zv~%9>Ihc5=mgq~dqrzVJbfbl8HPh(;MICngAEfO2nV70#z~daUqi0_Rk{4Wv9}i!- z{8WJ(#CIE|9Rb*ZtAG#ZRj%Y1N$lzFgWkfxSJmVDFOSwIm#cq15pOpMoqKa|$J3qR z-LDG?N|ze7ZPYGT^OwO<#zD9qPM*xK`3zvj0>Lcox1P1jx(?qx%z6y~X3R&OlUCb2 z9Et-Gp4ZIZWNUBBcRU|Ej4JP_Z8V{nWK0TmMx{W=Qh#&j0w5@EfO{2rl!T6u@R=mlz;N5aBX!}(Cz~d8$od>X6#xKkDu67j zVq(C`Gas@%55)2WQr;6x4k}+{jcz6CZ_2%^n6Bv5%Oxd>8v zHxETxk7uT@OK?Ue<*gs~8+yxqhZPuk)ycRNS|*22Yxx5MXfw7JWnuzUzCYn;qMt+r zoE6#UNw%0go@#og2M3=*JT@98tKui#QFBzMdu?PqJ4;u~T>VDbAEISlG+L`$4$OD9 z&V64wld8VxXpsj4pj%cLPJ&Tiv4`-{XsIXcpF5@<(mJKE`!Gb_s(93XXPHYNOv|i-fPFn*f_dbi9aLbGGBcqCDU-2IHP>?|8mc7P4&RmRs@1ln!`I%!X3i8mhQ8MBi>e(=5LpPk?|Mr9%ftYiP z3;{baWaK?(TV(P?%V11dd6~$IVCDM&#w8xv!04f{>LiAF^WCIJolhqy3Ni z?iGbm(Taksfj2GyMf)pIh2yW8>=QacvEXcjUZSo>VSEmvkKnj3Lu3rJ2 z*Ew3?W}nF8!ts#K%S;g zZ|kz4Xr-+sQAo&f#mM-}>(X90=!fL>jhYSNfN*F*yCDYtU~Z2A0ECYOgkVd zD(aiQEtvRTp9wwY!*fQ*%wnxwj=zfrwRw~O3&hAsm4A7s`?Nf^%VQ-7rmQr?c8jE5g3tqMD zBxf){R{I(19KTBz^h|zv2-nVj31S&-fYg9I4t22r0>b1vwtgj2@&{Yo?p=d=<3ExY z`JPtrHEM82Me3>OUK4r({l6lwE=ehY0usUe8y(#|ugyC+@!Eo-tigGG$fP$)vC^Ul zfG%CnC%B7iTY3VMBP$3!JZHzSXHoY_f{;FFsm;(m`S8~gS`9iXC{(u9{@BhPlv;O) z00*;yiJ8!lp&tGmb{NUC1}JZS?hAj3AcDa$m2Z@_$8`Nkk;_YGg%SFC-4}sxOkG~j zP9lN{Pmf|n)>eEfJngg^Qg7J3C?*%=c=o89ALH zoy0O@sx*mY?AIOKj_H)=tpUYEGV*!VYVF;+S&5OhD$~tNpP~gatXq9r6>g8Y116{J zspRR;ozEBP1GcdvKBX-<$?uW!E`?hZDE-o0&=cThloJ}W15z@6irhO(Ny$B`uZEN4 zMz}FJ;8*3xC#`O;_(xmxOI53YzB$Nngeh~lGEU^rRf;f5@iPi}g;uDPnm?D12t5oZ zRb%xGxjCEAdzXjj+gG%<*lq+eq3VpFguj1;)Uwh-N+gxUqSEsUi!Ib!)2J^mNXvMCI1k$Umc|J&s$4t@As#d|*_<6x&EaH92ZYUir4v>F|!Q zB^79IG!UZn`y-0^=%~~dV;x9gSgAL5DCWMBlbb_<6IInjOI%4?bdA+&GB`8I<#=#8 zq<~JsZFjh0{kYjQZy|V{raxCk&eLR&@urLwA3Cb11OkPU`71Er$&hk+utv8m6&ook)|oae`tpMElkWKsAv?H1G?(Rzq5`V zCj^oYdM4*`zE&m_V~-NoiLG@TLx^@W1;Z+b^l zUnO_}@^kF0tjw%4YK)9KwMV@`+p$C7%6cg8XU3N)BFbdUBPW*h;wkchBTh3UjOz}5 zSKyb8B`#FJ!T9Hi=wIhhW0;ED%&qVj8Tbrkk+hiS>Gc4~+q`eSbXtna$iHGiuU z@Rv}HyGX_2akAp%Q}E#B%P$~{XS2JK-hiii80DHm#-P7wbd?j!Hp*&T$LxT5&A5@5 z7{$D2*9eA<sfl&uXF6_xIu;y+T4Hv3K=s--F*heu1gGFhaxQ!}k=5qUaBP3)jx{<5 zLV-RlC_}IHxq;3mwJUCCF%hGrs5`r{VG zXTF3oV!K@Hdrsoh-Ihc{eFq%FMuBPFk5%Ul#R@168pOwzzZ|}%G{@*rb-bjE3}EJO z4|4(c?BDIuHe?ozG$uEW>Q)-c^smE4{OB9w$AK~G4ZK#$CF^p2dP|UAw(kB2EWla7 zqExQrrdy~3bzI+l0#qReI`OW%&)jb#I)n0kTays210%H2(`2Wc_936tcYj6S!S2^C zSH7Tpxa7tJOt@{zFBhN1&A=axaxZO}eD&ct2V!NJr#JuVYrWOSUUIf19~6QAH6vDR z#Z%|MuLbzI8O-}>4iqSSc|wRK>4DL;b5fPIG&MfsmLsb(ZQ;{?S$d{9^(v_(uTTv~ zqhav7eEP6%&|Xe`yw|xJV1(FGhU`Rp*&1^TeG^6|**M{})K6;6yMZp%O`4ye+EbG-dF#Tx#MYG zI4>;ZtI{AZ7P>GX%)ErUdh)PgDZ@cIFvQPo>ur_NfW^%2?P?h7>h=0Rd@gGhY7Pvj zVOCylJHdiv4Z!_3D@MTkGOx3|h}<>D(I#Z%lVY;a=FzOh)dWVAk1q}2&;54!=upSj zc0yb2hcuXrH@bav)hzLj?!QF5-1=Tp=1&)6R1M7 z)*3sGD+qt0d(k#y_WN_UH9W$*mf|kyy5y2-t-Gx_F?vJzU{k*e77W;#aJR9q@^s`j zWK5jE9$BezYQ$*|f_}O`qz=39-CJqrfAed$r45uQxp#T2I8%9nUQlL5w!b!v5O?C; ze%pB(S$51}tgAN4d|=)WypkPWF#-6C#|U@i(=y2tOz#qn;vmGxC{M!}a{ zNt}&R$+NDQQIpe!1=Lb5)RqrLye-=1c6iEoObPMAq~lI3A}RT}4n{)yIGT`SQsuTB zgzXFFsAHbPXoytA`Mlipyk|(F=^7#@v@9&nsUYz(d=0=N z5;8L}ug{OJ4^r?70s^PpB5-`1mW4g`Lk0m6oLei#3U2{< z-sDJ|Mg>M!Sy`odC5nGeo1j#v+LtcLvGwTZABPL^i2yU1wwwFU_^uro>isugao_l0 zcu35lJbuGOAV@Rz=Edl|u<9Q}`JmwBpO~7Qat}|(MkX`pZH4_0m^3de&MQ*bL<~wz zk5U0pfewp8U*5PL)>jT^Z~~yOI=FBuX=_WxS&FIIqFs3%hK;^`9f`TS6SdaXBnS?+ zNUN^b_ea-vEs?Bl>*@b)et@egKYtHaVl??J=YE;9#mV&|je=ZbcV!w^8=gB*9=+wi z725pL?`l#6Y0O|_JB5vg?^WfaFax(S!oG$2VVyr~Dv-01JMzK4yD!bQ%2FIuH4a=e z7Gl=fCse)i(x+VKhELUny@}M5Mkrr#i|5QTT@2-tQkfP%f^&yQA+VAN9j=)jPFLby zdIsj_$Bs6RVPecv+IioOJUyp;!BC5O68xD5_XD8ave{DG&=&LzX#F(a#zO`G$RIeI zYqX4FfWA#E(nH_#$_utfR8pqX!=(S#Sij=@!1n{vRg?SMll;A#g>vzDSt`IM0r`}E z=e1e3-IZ>bzeGz%8yn`~VMXGItDV8)t$!QR5-tirk_-W*$ij+jH|BoDfK<;i@CtdP zm(byi_xs%@@*2tJJLr}L`pFa`T6r*C9Js@tUWtXt8bot1aE?>qsqv6IGyO($4B?#2 zpzuiG+2fL~d#!o2}lQlb>UD6{^WA(9c3se=Lh`t0?f(Pu?n_VwU3 z+z2qC&D@pJ4miR=3YGBef-YDqcQ7Ll9Vg!#9BTr(oZ0Hk3_+89wf5&QN;pIpa(jU9 zsiU^nOLHY|lzXvVh>_XJf(b2(O$M&H`;`MK_yFC^`;ZuL*wrT5ss}*eInWk0oJKwI z!{hgrb>nzc09+$tTO2r0UVQ8t!#Wu-Vfge}s#s9rFAKPI-Q{`4$+~8#WfbIWW<v}zMJWk$Ce>?mhPj!mJF$uCin`q+~QfxHP z6CgV-`E&xN}>ZT7eWPR^t#>+;}`uFB*sSxiOM z$N}i4q`(XcdxJmpKIdfTR>07W2t+TgB)#JSpID zRexM?IzA4tzoskYS6!|Nt-_46P%o`y!ax+g*Ck3YNLv|2B|C~d|HrQ{1d?Gknd4Pg7H+vC>&+PIN!SrR8Uk{JXm_Ej@aIX zs15kuIMRxHD#v%B&q^xDNXf~}g%gV)a$pl}BSLiz`Z77^_*(UqsX#Kj4Z z7b~ax{liyhc&?H}(1#@;*}v%>J0WbqNSxcv>ocE|lN2+o|FU&MvoVNkJM%_&(AlmU z{8v}{oG#1QAYWM5ccHf^Pbr0;ROi=}>*&BWy3jBG z;>6P_86_}5D2ua=4N$Gw+x)Mlv5A$9ZdhIt@s1e(rL&4j{1S4o((?x8v^ZmQ{?|fn zfw_2uY*1-}D*8Y8*iA^c%OKa1KbiEbsZU4KvPxAI0kBh$YuT3aK}G(BZn<=@!m~C^ z9f0=xH3|BIQQ{i*5+M&@$9eW&3!RM7&v=I1k|_~up8g{%G)#J@8AKQii9UC zxiV!9(f@jFAYmo2x3oxy)#1T((B0u&N&aV0d}DN`tk)Lsch+yNc0)f+3!?wJ4;i|L zR-LiRp%1dX=KPb{TmJBc6d2pF&^e;qk-z*u?O;Ly@zhGT&;%?zan991-(tIcJP_J{ z0%D;8u=`YPPe{uNZT-AXm05dv#r`Q|C!;m6ck=P2$^kGjF*9W;WkX<}_pzjMmX!jH zBNJ3A|MH=MxHw_yztl%?P1F+z?HUShw!-i=?BN{s=5gbXiUZ!czwt_L04+CR zmQ}rLMm+*r8gnwlj%sMJB-Pe$NZ=&vbBtHvgv8^a%n&9QQ(^lWDd2*DhJ;}FqZ$Q8 zAI?~;m`2?+=f6RuC^q0}!_;U4%rKj=bE2&`S}oJd*7f36xX-^9mmE)Y#1Pl%S^4&t z*W)Ps_A#Fc+yh;yjaNSiwVSJSIMdv>^#a-v9$VM(lCZ|Xny+sD+SWmkK*z|Yx@o%q zN^lf9&a9bNMH8J)W-@q^P_*1 zZ)++h#(&dW0h%^qOzxRfeGl6!!C2|PS+z=juFn)hMCrZDSWM$=^JTjJ-x!Z-mOSg; zs!+=?Ex1n9(0OJ%rZ``m`EP1|gc==$Y(rU59LzTW%o$dbNb4mfi9UMLf6I+@82fSZ{pQ&O}6H&k1T)nHNYVX z^(GjBJ?W+9tp4=`iDP;zC6rL|x1ppk|8KB<|IfHZ*$_7*t>)BNBPTd4?@;FHy2#bI z{!8p3{8y_SKMud#IuH&hx?J%mg#0JQgJ*{?INNU8m5Ozy>@MLyO>s-Fb#DzlKE*3i z{ZnZ>HFQ4_tr&%DCe`X@t?K3RIv;k>lgPF}@!#;>hc4Y|VAtn7htn(5rfCj!gM#wu z4*=!(%O{gu_itsj;r}%r7uxu33lK;b^jPx6Q#)bZ(iDi29srP99GpCO&HMGRWKQtk zn8K%mKAX@O&6LQG!#r%Mb4tBeZH4+@locj6OZ%F8Y~rAvs@b%vTz>e!aFV+4E(4f6 z1krcomX26b8tnf6mC242ihB}#s-hD5I-ezITukt9=tQ|modvG2{q55n-7^{*OIkxV z!+3<=NKCJy?{S3xBt|nrt6Rd6GN;q8J``TfQe(bn>e~DFpSqBsike(H`Eh+zG3|)t z@DTV-eLy|x>J~JcA{$gyzY7YX9Zc@I$?q-cAG$kJi_3QN|6G86@4nPz%~~d_{VnXj z54%0tbWYSF8*KlZ#^P#max`(12>Y%?yhW9~Zu8f#1w=ydKwwxT^br3hL%T7%} zesV$$>fcKb!Nj&JuJs(<+Wkurq`t1qPRMAkexIU*cfOsI>6CUAp=PnqeLdj3m!f!{ z^>yWbC#G$jznCMnV=M5SM|>4tuvkY>_S-k>MPQR%)$C2}(PbI7^*2py9Kb>fH=g8f zg;rE*O1#>Fyr`IzinU>8>BP48sqJ&zYVLG)nf(Q2BTxHW|JAvfvek(Pr4lCj>e zlsN>Up=9$Ju3HkE;wczw{5TZMy>45=(r zb+sZip?Jl(J($_3!f^#yoH=FA-d>HzLuv}r^q41KpE(d)5iUsfGJQ6kD8eo3JnaOb z6XIl?61GG2>rOmw78gyq~`=gc|A8?C+$uQC(1(`)=Xbvsq64O}oy#sk?v_GvqC z8p+q4UE?PWA89NMF)#p_yDs{y1oTqd8bE)z+E)0om%m+_n>jpzn4q#3YbLsei@%s& zwsQRes7s+iTFPQeQ-!}=?S{e;wM;d_D>^aGlN=JQeZd9?J{vr@(w>%wTaz=CQ`3Fd zB_yoK$gn;5En71714g~fwbDvlNrOX7_Af1}qCyzhk>E%c1c!wD0F-L`xsJrL!;i_D8(%Wt(XE@Q5kADYdqsJvYWAKWbWmWg- zeL(T?X0^jrsJCDBY_p6b6~?_6^$wys?`39zlYRXyGVcw8aNFpW{(29O#y+k?8jq`D zVyj}L317=8dK3)6%3bzIE&sXF^Iw~JHk$7`$2oR%lneAX*rpE`s+6W4-a zqJCIC=*?x$EzWp-9|5gh4-96gEd2C+^dWjdP}Xu6kEdqG-{7to;raM{ZsDcAVmQ#6 zSqkdVsLOKInto_d;JrArX~djzc7ksR+D#7V^`$q(8ED43O>R}oCWf-^#D1HfHznd+ z2X_D9wRTs?a}X*PaKDPoaf`wUBs zL1Lm)2rfwJiT%!6VSTIHEoI0y^=(G{rqBWm`#NEEQT?_fYlTh9H-3(U{p!i~s0u+F z0!n_%CoW;&U}0hsyeR!bcHy!(Sk$DK%V0uh=6aYIs*J_5fLxfLjqX&ksWiSGhC_d*Rbuup%mBnFkP%vSmh6I$g4UFA083Uxzfbt)lyw?n9E|9>~^9#p_@Z#Ez>=eR}n2jH*;tR1uxMcRo;Qc`D#lJ#hf|?X7j`U+???0tshKZ>KV^ zd+XYd0FyKChUXdx+g>{ZF4>=U;c*^`&R^>EuXWFH28GBX;%-XQ_R1GH2a7xht#TNp z%~}@Z9-=ogpOas;TUS3ujxV1Kaiu^^LP=UA-&;Mom-eprLXPXsPxFEeif7`u1aJGm zrf1N{nkH*o$K9cX8(1LNZsOS3?vhyMZ`AAS^2(H{*q8oLB$QUC%e9|@6~Nqgk@$*C zXKUw)j|>c_wYi?=u0@%=;~DC9XRc`*lsL+%;YsYb4Z-Qsy>N+-wsQ9;+->tf#RD6) z-p1!F9jO3RYZ6CTn(tZ5co%N~twC~hEDx1m4@bN8(9*cEl2XWT3Il%L?r<4mqnr_+ zVdiok7Zjw9U2XtAbx?a#dw+lRgNz<|7xy%lZPS_%GQMW^Zqlaazg5I0`kXXY+Iq3k zOYowcJaHR=fB(kkwK*1X&hZ1rSbOt=b&5MP;Mlby@{Tb&NjvJY=eet|| zFeyBp$@A2#%2cNK%hMS1a~mTZIbP!?KM(fF{d+@Q@A#i$5tg4jXY3_AYJN-wXaUqQ z#&mQiRC+SD#vv!wTku0Ack)qpTYm-#D0#t(A+JJz=0JQaj*|xavZFK*1dFA}jOn|x z*9HK`#dtSkvrVO9Nslh)yPsl_gxuJl6Et6_<*jDSNAefTBSsy2M5W;mZ7Q11mp!h{ zL0pD}EjFmS`-S6l_S>0{&*{zfo4RI}!7|G4-R?6xx@P}KY8}WMBZvJu#bf1X*B3ztDbN#N0#h?U zOFNYdHz6!o$}Fq6oZC$P7=_?48yT>HZD+`*=EOq8x$n~f7@k26T?^9UaMbE$&Y*XLF#eQ?|3s$)Mv5goJJ5TAtJ!Y5Z*=(lq9vb_ zR5qo1;>>2lVf}y2ePvf$(H3ne1=?c8i@O!~;-#dxyHmWlyS1gb6?ZA_65NYB!9756 z2oO9tFTMA^5APqmjQ!z^eX`Hk=j@T4v(}tz&QXHDsi|Fmw43-Y6J?j$9hV~-c4NO*h33gn)JAw zsEea9!akU29ns7|p}h}o*cbnCd;1Gf2Z*z|1{!=4h(`k4K@@(PnHqgM&4cY;8^D;! z5aDOR=j`Q{?&Yf*s~3>@atV>Ol(BDk2_X;E2t0Nh^5#aTK6CULPbyUrJe;x znTSr@K)`YKRGj_QU>~5F*n|k{d82tFR(LoG9H_!eUhDi)JU~2PajSCHcf`-#qI7)B zLik0GoK#awPEK~ZaNtoCi^)MAzkHytU*Bb#xDlHG17Y&!MK3TVF7T-3hPUyO!CN*! zTL3esb9;Spq!TNzEH1U)^$WK`^Y`rU%of>}1>zmJL#$V|%&Kq7;t@r!^u`jMUr8d- zplP+F8ttZ%!Snn@@;p7yQ4a(6QQzUAyB|aYiP1miU)*5U87vLQE%X752V zX2Za4B__B__#!sImggEDu=wN$`yoH|6Jl5S=8f0Y;@dk1E}}5;J?=05oB#z;qSm4k zN=XS#Y2!Yv(i4`uTd^>dL_LhmE<0S# zBAZJ@DQaOqZ8_jM`uLp9&w02nGH{`_BW&jdq7JMV)lDVZ-%V7f*20~@W9ZFNqhFyu zH=u?TZXn{EUe^%E05Z$DKN0nRP!fqH4fNi1F^tXUL0A)`mM=vc2~E28vQfNszX5+k z3iTzGX}6asSXfpKm23v&E?Y0=(IeZhSl0<2;9xtDuee-kV!6O5v+LCM{0{t!R&`tM z7Fj3&$+GJx`U96X#p4qSel9bQ``Oa91_%qS>l1@TL(DfLdJmD(N0GMPfZX}jfWudN zot5L`^-CK?pF7W<+@DK}y#XBNbJcr7C)tz9-sDl4G1({C_b6VT^`rAFfXTbiawr<1 z`yFWRy-#Smr!!xF5fkA1bB6-Sl6K$d`EA-Bo8_>{*Y(X&RSV-}ABCrCb{|m;`&qiM zaAHh?$~Kaw@F;)n1jRD6_?)hnljdXJBc%lU zTx7WDj*)#xZSD&hYjhP@TlwJiP{oAxw$1kG`&4WI$l@*$QNI*&2;fC=HLvAuh{7mj zvMjH#FTtQ{Vv1AMoSB>~`4QdYPYJLqcPC=6ICA)7w-`8iGwfj2%6hd0;JU|NO<(UV z+}?W#y2rjqOKl!*&+u$oLhnJQ?PV6e$0Y6SM96IFI8}r1Aif(_Pavjd%~)diJDW;V zaetHH1GcD3jLH~HWLp%m ze7NuRm1L8HNJX^>>=7|A%o}?-?P2M6fAHvtnMv^^n}P!uycbOWw4Gmc8Hw%+Ywpfh zyh;O88g6r3@6WZ~oMA6qUznLTF17kY0}nMa+o!r5pZVg}))j@lgo!pn*jF!Vo@25< zi&Gbh*~Tp%O8EfJ8(up8OmA(B<8_X+To7|z^t^WYUcVx+*6dNo`Y0a)-yK%nEs|rg z+S`QlEWr0CIusP_ggLw%16>1<>!ULV4GtYWP5(}NcZ<+9gWD!)y~vX6Dv7%}J^_-4 zQ^`{pJg3xOayrU491AmUtX~Zxo?%}*sSm1uF&T6YwvL&g-LXM^yQrLvB@mjSbx!4T zEzj80?*1V9r|qh%4k?RhN%q67=|ePuZ`uafX`r!7K<9PUhLF$JE+WXdx1A(`d!0Hw zde0VL9Cm)X_dB`TLlhnGQk)eJ+L1D3^G*^sf06Xepj99ArK8!qe)?tI`x9Ieo+QtB z{cZXH%?DMglHIe>K*(Z;9`U=n+YNnff4hM4nU77B4^a00!JS}^&;7C>TvCxdMAv@# z{>8Ug@C?`=Ex84s4&{#1zn^tMj64$RrEzUh3f>kA|G~GaBYRVpnUes$*w{+1b7Ua* z7d_a|_?%)2S>LmG1vyroRH0;E5~$9!J@2I#7x+C%_)V!gSf4Dde;c8~D_`||GP@i! z=C2x=15r*~eG8E9T5ojix=8qG=1JaWe7NRRX$BYDfOcrT#X|<@>{dYox6uF#L2{40 zo1#y9DO3oQ#aP)9!~wA=GV40u!1e+B5%fRT!hN+qjEP=p0DM|;<}don22mvyusdOK z84dj2mfNkgQW5<*8aeoYU)XF?l3RhgZ2XN@yNYR6;vMssctc2A|min!Y z300NQ+Ou@PpAOftuJjWk%MgbpkI-Qg6_TlPZPk)2yz~2*CIxv$yJv61;_27pBbJZK zFtU+Vq`x-??W~tviRTrzQKoIb%~G|jDEw6rWv!gs-S$sOra#@=@<%c@No35|1*~-=nnpiUip6^1LK{Uwo zcnt5ad0ANQ{TRXTd*yt4Ii-?vYXLXZ=cpIP{jbFH7vQmM>%xd~yt(~r$d}#TgeNL) zt-MD0fj%2f@q6(_c?6+zWac^b!b%X+2BBh9hR__T9`@I%fPC7|8dH`v3gkcBUpa~u zR7mezn4OI(htm)lC=jM6$afc@xL++D-#F@5gFNL)Jv~mwSW((p&IAXAa}pd=3#Xui zo^M6@huvnC@zMaHuJe0o3OrF)d)ZIEKBL^@=I%$LnO!$@Wx!Q1dTV<|OY(=O4~-oq z7c=`}v%!LGP;8@x@XG@sY8Ga8R=w@6Kfl1Z$KU5lh@U%wK1wm>XneDO*BxFp6A^0v zT}E+aZ{~tu+9wO9h50yY`d%LNwl`^i$}4|ZJKr?+QO;;hIv_?_F@@D{=Jlt6k-^+) z;|x`iM?fOuxq9-Nba5of%iz6!jyyLj!ytSgjCT_+Pli)+ekMl9zoBt3urxYuME4=^ zybJK!?~f9j&DrMtxQ)_W+K+Dw_^3Bj7n`R=HTQAd{kedBnV^~xA;CW?1^_^&@{jm{ zl~aoYg`xl|TB`c@NWaU*Ghz3fEq9K}UQx9d+HG|wHc0r!A>yu;uZ07U@NJDn(B;m%{c_WZR-gDJ2 z;9v(C@CDoBP;>S`|2C!2C;>r?&#Bb%i^vRZ!rS^>l^o}FXm*y8Rg^v=AbwuuI;uq@ zseh52Cq;=I?)dO2tx!U;HBwP*wuY29UUyR@Cag>ky}YQfA$&*+G{)K%zPfRY?pS0C zo^MI09pd|AGan-U?xw%^-Bh_>5HCiQen!~CLUm~B3u@EP#d6EkQxiTU7sS`=Ggb}o zgjttC>(8BVQ<^EP#zxZ-LeTU+n+0#ei(9!im%qBqN4ClDLx zI4o0)YiLiY#$C8xLiSHZ$ST=S1UX+`L^Dz1w`g&b=2@Hi6=Dtk0pSfH{YVb%@Zg)+ z!8Rr(OL@X)W$iTk*CM3{jl)T?e-(ZpCF=3MXXMaPUspgdc;Dhupv~snCC%PASyGRf zJ-5c@`KnLnbXpPiY^fvF>DlO#3)o+*kx!4gq~xT%yA~6BFSMx|m0L~6BVw?eU$#_X zuyD2+Vx{{r3f`T)#<&6}Z*W!;XLL2h`5eZh)|(pq;Tdut4}30RvFRP0#sAov7SobO znfKYR+gc|^ti%22quF07PSN*KU6!FzdOJtcLnX{_(N25a0(=nz4%1>5RH zdoO(5uCC#2rnxg}@C+6D@AZ_l*FRvhp(ztpcnNf8MyFnGdO|9AhUx1fTD>HP@8y+o zSLeO_E`}UUf->8xyF34iD07h~O0t7OS7F3R$EA}--9 zPuzQ{`WD*9P>B2^=TfS21-Tl@XtF#Oy_KJ%q5si#OwAULYxs0E>Sru$b5rbRM+Bk4 z33rdpp^?o01c|;aWFwvRDnv+y5Lu{vjdpr_f?xIUZ<|AuD7SDJ_s?!1G?0hQJE31@ zK&$UDTBFC)YMQgdAE=ldk{&j;C(yXFw8cNtSPI(AV3(iTe1I? zam{4#54|$!>IC=S8$Wv%K@VS(zw|#w8OClpY8yb2!#3R#&-$0OFplwhX)$ZsljY&5 zjkel~zR@2{RJ%nYU_!x;5XT~)9+hHZs6c3({d;JcYQOYn%X3F;%EK5cK2We@E#vKNAqjytUJCz?tm@v9pD+hP2`nNbW-StUdvfJu< zEPyKdTfs@c9tG#gT@vG&1Y7gtZCVpk(@yWV?z7TuiqZ?r+OuPyZul=AC*T)Z>9F>$ z9N7KEQ>KjnXW;7C{*JK}?A<#r4~zA~pw<$fhxc;AgSwALHu@6yqq4>2<2tI??E;Qr zB%BQy+tQEUQ?D~EK+jNfnzh>*co9N5?Uxg3aCK-?>cmHaarWhg*slv7_sckj{gqm6 zF+N%NwuX|flY1U8V>Hznt8;}&ecXu?>f)kx*@pSpEYKqz*x^%`rw19d+Rt8_`D~LP zJNsN&AQb7*JyEXCfg)DMOzP9n3Tx(;iu=M2v2U*pv}@*3MS*ubQ2xC95+mB@?PnaEB@WCM5#ral(Gky7 z=5I;yMwe~_n;gp;T29-$(^6KY?V{f{IUk#^=(j|_@gOydKl6INW|N-~-NUso(i5MV zx*2h5ydNqle?4WEhdNLlT~-OA3Z-Qij+L=P)3(5Fib)S^UA_8nh+Pc@i2LioflRS^ zG(n#n%9=Fi%;hmz^z(FRhU*-1UpK&VrST!Z*ieR1fm@wedh>J;R1j*w=;KqGQ(jof zS9tem&J(}JYWm%BGSC?aEXsiqjbUhu@6-+kENjXEUCv$nD7y9~`p(djz#|83Y)iL= zDFF?NF@D1XeJ+-DozKonp8kJJGjcL>#>Zsf)J*y8GX&M0DP~`!)eyr|K9AN%r_zV&sr!y&m6vZYVmyj(<$ry zEjRsKGj&t%XfQX^_I+$klJe}hwDPX! zyxtY?&wwopJ%2#pLt6gWFIB#03sNF@1ukBolN?s>(hDc&!A+11qDZEvT>Gk93^PDq zPoNY#a{V#m{Lp=;I>&8#LGQ$|WoF%v^x~p)XI--4Xl3*jgmR-;#9_9Ti4p~&P|RN+ zw&X2wWX|-O$fNG@R8rJeE3pyY$=qg|x_Y&o=4RFwR=tLxb@Kwv4)#g*9TkAUhpl_%bt0k>&ju&6nDQH& zC?N70h{BkjCeJ7Y^^Hx)V~4n56Zii4xl zAKxtXn*4wr4T;pG1%<}t&ECRDIAi$!{w5>2L33g_)6QeDuLdReY=7$bQk3kFos4Nx zRrZ%3%c--MsCLF^f`d3m3Y4({>x5c5gUeLmBV$OaqQQmmDx(y)`avjBVfAM*Rjq+* zwDP9%qpHLPGSReKA0N=JjNQc=hDrl$kWhCkcmhJmQN~qhveI~Lq1LS+6^lEvmvB@9 z&825b{yde2DMsz#MBX>su!UwA;1xZdt75oet;@5`e+WHR1Oiv_sJoR5{l0yI7A|D5 z89ZOYpl5R|YOW6KkCww{rU;Qj;V36sgcn1wKO-o;qOTI6M0v0h*hS^QmO)LM7I2!V z_zX$TwW_b=#xtc@Y&HcyGI|H!{-@D5ME_LLz%{tD+FMvvm)*SOJbPp(zgo17EU34_ z^hdW)%wz;P4waHR|G;0JvW4~;C6WHg0}2BOSa8Q zB3qt0jJZF5Z5G(QpID;2YCmU5H%@o?Yqj3_eW3*id{bphaf_@nd_om`)* zB|Ilr=yCX<*>)*=!_m(nZ!}kSQbb?h{S{t_KEsV^8WlZ<6t~WJpg=fEa2b+aC+zM+ zvv^E@R!$BH5)V%0U?w1(*vRx}U<*UrpmY=BF3!j9Y)FdZg6pR^R9ma(V;cTFYM!r2 zupG}|(hDIDXE63UCc3Fj%h#6yP$NP4%sy$uQ1&0qA58a_IIqR;6lIkj?=!-~S}#31 zBZmY3FBiaUckpjEge!75R|QUH&TMlUW4R8q;lhb?Hc8Yh^>P48xtQ<_paXF>cdkxg%U;clJRerEA0= z-+8-wUyALUzF-?#4&(JGaDFXCv5D!%@%MUiO}#c>zLJsiE~K~nxj*@}sO9csPzS=2 zeD6G>AJ6v|8@eN2kPXG@{R<yRr;m;#8M`HmSw}a0Z<6i`JoI#Ov;c*s~qG)y9C)Zc{)wnou8T#0&L0BYr zjr9)GV%ssQ1~t1M!0T<1p~)3D$N}0~1|XQZWkLPe?DV!qR;N(&X&HR^1H0;O_-z&A zZrx|n5%in_?aUVhQM(kF=~Sje>6sm1jax%sZ(uvrH8V;p?JG;yn9>;gY<@G@ ze$}_4%3&DFL-K6fcsPoa#SVW;oPeX)zYYuCSw82hya)4Vwz z`3u1{)z74+P?6#N$z6>+JFJAA)xi3!)bJ>ho5p$@N@*Ph;OaCoBi{3If!LZ6q%5dC zy@&DKPWrZOVfFmQL^(9pWuo|EO8LQPmYgU%mh-{qyeX6-85hU-Vg@&Okds-Mb+yHH z7~ns(qppcsh0s@+9;!?O0t>;5alUU{uGrNq1l`w@>mGMkH?)=-+7*z?48nh(w4UFw zT{tqb3nh=xz8{Yft1BNUu|uI{#KUPo%H=P^>Vjn_3%J7jxoOrSeYL@maxb}I2go(b z{ImZdxyEzK3<22Z6vk|V3((m%oiRg=g*&E#bjkj;zRRo--CVJES z^0dJeKnGtHfAZiets>K6Cp!_4;XuVhW#m$;voNKT7Z6DA4gM*O#Ny_@R3r2SWN}}{ z_1#Q?nzU& z(`^!18Dfiib==9xJt_+Ed1SZ``tv_l|1r!nQ$*s+-~c=D8a$1*;w|~*f%d{p&~VGE zX{+sR=|n0&dk_3gstZu2r4cN)&RA&m)S{v>GFh&B!Hz>9F6OoU=eF1a*isy52mzZz zz3gdd8rCkJvvfar_-@P77&&j=KEL;znRqIvMUR$b7+Ya$jXqv|F|Qn%)|4R$yySjn z=1B@^ulNan#LRre!SnrtK4FoIlRPHM%vC%$*YYq5&b3{+Fd%iq$vHyo#9fZ$HqmJyYi18G24VRth4=PJ6&E2GUQeM#l< z{2Pk0)hbZb8ax{KSVm|qAQV@mk_+8_r=^if&VO;FdrmUjbEJVj5Qvob6~(l?wt6y! zSUuki*mfPF*0gMy#rD|N4@{2FHc3iw2|dZL3ch0yj}h~{T5BmEt58kvfcRitd6Zd< zG-2%`!=Iq73nikJAa~%%rCCFqvxQZ^p;!`Xe@8V}=*3xrg8J4%|KK zc@{kD_(42}6~AY_h*~Mp_~ZxIq&Tt@;(s@!?ic6(j+9@>V^wRME9)rW#EGkrTG!Nn z&T_fm{60$LXqkxXe)3oBOH^d2BoV!Cp4x6aAqs6G3p|FeClizh&4`UHw=Mliz!tb$}MSA))n^KgHCm{g8S$r}nT-BR|=kzw}NjD0IR9-Vl zE}YvVq4S{7RQw+iLVf4?{EVnFBE)ZHhI9i@eLH{D!wE-ilP)p2|<0@E18@G80w?v}0c}6+bTfMqZ4LhP|IDHD8t)R*^-F`YMJR zG;n2n0p+7my$+^lCrT9ZY&cmNREP{KVdY~#_t4Wq(zjEzU5>b=;46#&6iIh%;>^4! z5vP|D5uUY^WN^G}&;mC)m_=Ocv=({<;#Tn9%bY_y>7+sIY-y_0gL!3@GfH51X!TyS zfYkWJ{HNt!(@j`gs-uuW?>9D!*@3=!Cj&j3n$&PF#5Dkil3Hzpd(Wtf!xPPvgaj8< zg1eTe!XC1N?OpkrO#u+{iqA?&m~0Eur+e=w9uM3yC{aB*6CEXibPMwf;MvCJ_T-kC z8}Cfbzx103g(bvWEsZX{z2x9oNX$3{>nr{=1>vZN0e@+N?pzt1``pDRy+P@^V7AlO zYxog+ne*QizRDJK;s(cSJz5D@P8LNk+OYiVrHV#mV^IGcm7rN;N7glecBEem(^m7? zL#{{ooC5%Qtkwu|Kk#FKr-Q2i!T=u8BV-L@``?hC|5g+Lcoq3?tpIQTYcBpv1FL1G zkw?cozl333YWH|$^GDwLGh8IvU&>v493z0Jyv{cXqTY^60UJvZy$&`B2=Lmk#~ zu2H2@T34MMKbl_uB(1Z=&h+SXyEdW5lmoNk78bS?I8w%|e_v;dsi#+UJA$m$(98yo zMmHO=9Ig!~Auo4EOYk6-zeNPCvI;VVCnmgKc zkk>pD_WAdg595K_7`Y0d(9v5Qys)3cXG!*Ej_r%hWMR48QVE&F0NaOP*1aq551B0+ zi2!2mTcLi7mF|ANrU28?crV;m>CNS^2KF=Wc9y#P5`QMZq)&T~eJ1A$bh{16n>Erb zOjd93$obfHY{Gbx2^ow*5E>-v0>;t4?wn7MFXbo#UBq$(ZR%pr*mt$6hNyB zY1cbV)3meMCCu+n46r;awDfn0h;^3`@7~d?&9N9=bX_$^n9TJHYU*E48UehW^+267rF~TeGWG!|48c(fpo9#@h z5)i22;W(bzvhrbU%SGt|i|llsfRY}fFYJ86FBZ4jNBcZ+Y`DW5B$0n#aq~z>K>4Oq zV0i6!RqfN{4;gLTV7rWwL)#rkV;K-LOt7nDlgL{)NXm9b`aL2eQP}dmq2>B#U^cI( zK7a(0G2}Y986Kt~4s&GJeq15~Pa{$S1~+9RNxmj){!?6BIA?-swfX@@mVaAZTz`a( z`nR0;cg-`>M6CO>f%*$|U+{LGr`B}DhKJ1-ebCbeRymH4{*u;s+O7Oj`(X>Kc+)wc zuc)wX#umZ8-r@1x>MHd%D{+Oz(T0cJbQe2{rL-$f`W2Knz0->l@bjoZQto?=oSWoJ z+_txYPDAtfHvnRzKjri16{X>nGBdn}ANNrKoFjnQNbsO`EpofPG$U?Qd*-1_BKkZ&NkJ><-Q$b}bc$ zELPHX9ciC{vugo)#hUjv;P-mM5sD&mw~t>IYha>60=dC*9=+|`F~%F}m)&*B9WO3P zPH`t$3s_j$1GUo>Svk}n#dU9%JYi+Ulnp7}0xvr2jL%V^n(&b1p~{$qIGy6@9p#|f;t z86iM{to1|x7crc;IBiGt--C0kA^ujXX2&1INC}}{gU1GxfHg<6d=c&r=`p~Aq zD_zT(Z>(V+SpFjWJ%V|O$Wy~eJ`C|)FL==plLV@-*AWIa4_?c=D=6LC z)5)1*t;Scfy;z_mLEmccxoi}`Tc$Q==BThRjlt`Z1<#{<=$8|s%SZ$XG{=v1TOyiI zp>m)M4`(Xvl$0Z*NEI-?Ww9OY3IrDLkKOhD%qXKJx11N{mHM?}rt8zCqfkzkW%+=YQ6HS&T5dRHC;l>;^)oOd*9Yvp7O3WtNA`)31^PI0Gi29EaZ{FD z9z$tKG^`ZIO#wWwKkc@J6-O`d-qaMZva(0BRE?Qy&d+BWb|vPOB)XV}F1#V??pIN?W~_)|ABicbZOxrWg+&0Q?qWEYYpejVUKqg91rC9Q@`XD$>oLl5FYO>zZQGmvjpj=5faWM(f#g* zQFgjn_@{5%w$rfur(eg&VcJOzmCviP1sphcmUeJz2?x*x(AXy=njb z7_=ll5sfMzjR@YO}kG1C*_{A zDK^KsheviGwft;3kyI7=*F!#6`47^0M%rSGn@ti$$y?gl?Z)Z!z&NTulGX*qjJlTKKeRG!V}25zkAe7hf>Q zHTK0F(-*=Z_q#;mbFI`BIt?2#y5%X300V7lI4OeNHmydJTHb>-A3_Nt`CSnV)}BcP zb{~u#)9>h0B7irH3hg3>@L$faJ@Q=&V7)=Kk*ku&aY0G`Zdmd(%-Y! z2L$4gnZe6y@2(TzB^}lVH1PtLhM=wG@t>DBKYjN+Do(jI5OMSJ_3&C(zecQj&ZA>C zB4cM+?kr9j*`Dx=g3{d{irtB7cW7}R*JiqfH@%hMfk~Kd`TO(^S9yKMbCI^`+_1*N zc&Lqvd?vUNlnDj6Oovldv95HX~gyW@Mon-(pYg_-WPi{TKrA*R(dk;e{i+po#L zr=X*w?s!+A;ca;*X*n#>L`_damuoJ5aq*;Zd3y7w$~9iuGPlBF%+kA$fnLCJbfY(} zN)8_gv}iLOwYzU99}52!m(m0?tM5RA4PSr?erUsUI@zgj9R?zN4v$%dhRa7AOF>uq zYsV#rkUTZ|of;{_{h+i0*=17bX$ZC;--(RZ$UtK6{>N}R_gE3vGkTJ#KrDwLe` z-tpR>t@!{ z7Ul_DaX+WBWOf&3HPnBu(o=9sg>rJ<8)@9$+x z_}kBlhQD^tSg~`4=Oy0sn&x@N5zR=t&}B(&LLxKU=ZK7DT(!AAhn|=tsP5l%1D5mV zf6df`1k{PH6vj2($Cq9%JrKpmO38FJXALv$-}EbZ#tHMfk%Rk6GhN}q4&nsnJbOS) z-+XqyMlS9zzLk#jTH%5ehAUtx{9d9!8+s6JYScq#+Q=&V7I_Ok3q!At)phshybSq* z9X@NaI{d)X!+aZMElU*z(oLpTq)8IXf^gS?BlNoUer^G=%G%&+>VP zT{&|FWsGrAf;`VPOhNhGs}2#zP`ukN|Aee-zy2gsYo+%LdBxbYi{Wh2EsK1nUz1Y} zU3L7(jp#z8IpurrmLshr;=YTj(&2n&w8GP@R?<~tFl~lJayQc7rT4U?v|u~b41bmM z_l?B7(TN1_BT=EU_6BCQC45|<5 z2VS0-m@lvTjyz>&W$|rmllgj9VbI!X|5-n=|`CF?Nk2F zgr@Jf9L>a2tgZ_uPPQ!%?RQynYLo<}JQr@=_j;%#Kr&psFub_TD1~vfV52|oe&2f5 z><#h?M71gXEHt^;oIaxl54eE`+Nr)LI~*!!%*%<4Ldva_jpqC`5DDy%Iiv)(l`YCw zxw69~H*D~GtDbNgOOM!EQU*O4=cCKoglloOrprGE+sJe?y4npnO2S)8E^lQ}Mbu?o8IN9tJXh(9lxH8~V7pWC7uQ7$SQZ zYGEHn3#CVi?3~}Eo6+uf>{uD;-1UnaJk^K2-+C zeb*{a7NgrdYV~p~mE;hV6YfeG&aFv5n5vAa{9UN7hJT5>vyC5sP(<#|E(Qr+7A#?j zj#4GzC%t6cLsWY!z9ZzR^ZhxtVyK`&)_!tVVzzd*^qw*9QK77KlXgi+#)AIMTS@g4 z#r~avf(5IP>y<$P?xX-d%g2kZIpngwKA5cJ=r=>3bjGP)w^ob+x=EjbTDI&3jE3Tk z7H2=dGZa}e6k&DF1R4Y$K5Y$kSHrIspDpH)mM-&ybqtvS;C06cwb-As7lzY!b zYg5`q6(41AOkH1=R)<%I?{nqgKf(S&wpIaLuI@S0n7&Jagx;9>CN=^ywzU%uhcUo; zR0-2s$}LWYD7-FDRBF@4UjJTMdNCY?cI6ezliIRC#_$IG4Ar3%zZ#%MNG;A_aBx{P zUNHvwvRm|4$T(2t>!~2^wbqwYGGR zi!)=7ih=Sp^z_-Zju7s`ZRd^xX!osZ& z(hNa8W_E#>1TrYZcr4erP!AWQ$LhUwfH+Gu8|ujyyB{XBRE6f4ic3l(kKJ*fpn1bOy2Q{ztAgCCJdod`nYhT~cyYq3uSk z21T47k-G<3+-+asRw|W{>xzsz>4Nimo);_n3gQX!~`!^usTtmah3VXQuev;l|Co zav`TIq*0H1>oEJip``jW`s!r#wWL(!oIR^ardnYR&<*@9rgFt zAgt~UcTq;Zp~Sjhn>S87nNhD0!cH@nnG5@sc`TWEB_1B* zEl)p$ll$4sFX$X;Py;Frwec8fEe{4B(Z*{66?48-A8M=ZTO87Q{DJ&($lJ3GO-3_0 zla_{9Tpeto`@XTsOR{8SV5DcT;_=3Nzl#U|=|IdbL#+bW!@I6SORyXrj}>mQLZ~XY zDXr}}g=?$^wzzvkood@u&x{zty29l^ONLBCZ*>H2i>!Ay>n>X@+1qBnK2YwxQ|#F96xKv-9!$Swmy)2xsvf{`i; zfw}x-YhbQBSN*%;hXnkA0jv312fX(_WTP1GeqA({3_kb*DmAr9jV*-+H&1i}w;c)>8 zt7skqn&o-}5)hAp+G5Eqj!XES-pp}r2p z;<(aRGL}NviTlI*gr)vw@Zbk+TY2pT3zOI1hvDF?d)WL{#sWnb6-{0hmSo$Co2PRP z)WKv{)$5&M&$abMuNQFp#!6=h!>pS}PtOjPy7J9ACp%}&|ACr9b@pj8ky)x|XAysF z@R^v)Pxj$Tey^9NR~a(=1->&oI5#?4sBh(r;>KIyhn9{nLa!NGbtT$VdYyBXzw^Bx zf&1YzM4yC83{6YU_d%pjQmDkHw(4RdUbN0v+~(RG+t_L)N{_UP9vUAg8wMiCbhk^h zYm+6^rNU(9TJn|F zA2Zhj*`k+k&JPXL=RJ-ZkwK&ZU71>+My?a2E^%A5?m80sfk5*e=ufg%t$YO12je?9 z+s?v<8ogfY?FLtbF;!P_Sx|+AEmR(QghYJ|;F5XN(BLC(QKfTrOo^ zVPrL_v(0g&z=`W28swS3zUNF>u`X+%r*XJE!#+QxLKXLk9XOgXpgA|pZ0q?esy;>9 zA9>2wdLh{Ye1oUu^rDh0#S~B7>76VPA*pYB65>ZTvh`H2hdV}_ht$=Nc3tWJsbn|a z%_U;*8lT1`)pI+wNhu2|Y34Lf`H)@xjqc4?#gT}m(zK)j}j=nfb ziwK>!U{iK8^Hw=a-rt6G8?nfON^}l~(ql6QwvT_>w;4^2MN{XcUUd9~)Tzr#B)UFPBn-+xwx5$Va zeZQRVxh5#O{+;}fR(h6>YP%`;;A^8-Flg>3x{19y+0C?bK;7jD7Lg`ld23n{>^6N= z=TwG@qk1FWM#&3x^W0JX0q=i{<4)2t+3_U#0MKiQsd63sii7jnQ8aV$@+#6+6W9CH z3qebQ-PFsE}@w1h0ifmNFSh{D)2<+}zr{7?Jj(b=cg( z5<$m*>+WuR6>Ka2E(V42heEaUcb3hkJH`x+*0Bsl!QZh4oUkbHmLLrgA4 zf?$qmv-@4T%24bOwo}>2YX59WW6rvaN~}OeHsES8v<;HOXCr^F`jJf~{6#MPNBTG4 zk^xCf|DZzv2~v;ge-8d%@bZ-j*MEAc{|kpw{qO8ggzy*Q^X-4w=G*^`{iK8OulwWw zkk9{b2Zt@U`@*YQR{l*EFgZ4<|DS1p`|nKw{(rsYuqSNJh)mL5_gwdX^OpUr1g!e> H<;VX4@!Fbi literal 0 HcmV?d00001 diff --git a/docs/en/_static/image/mmpt-logo.png b/docs/en/_static/image/mmpt-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..f4e060716520ece5db7e85df3c3ad8fd9e0eda57 GIT binary patch literal 28982 zcmcG#1yq!6yFW^&G)M~!Ejh%{J#>c%3?f4#-61XALrEwhsep7UFob}Df;3V?w+zxH zXVmw7zi;og_kZv8Kj*L(%rnni_Z7b@@0(}h^mJ5-2sCA* z)L&^13p&&bzPp;K7aAHr>#tvQ|57<$G&F(?MiOozxhg{u7BMY05Jb1@ph5{DEt!0Y@)5ltmNit%Ph(d z;kKa$2-_6rr04O1W;vpm;B*cfJ;PVP_^@jTMxq7kw%>iudW$o$c z?(OL2%KVEX)XL4rTLysA^p6QH?*FiL_4*4Y6kr1WPhXS^u`i%iF;AUwHm6mZQjjFLswy^0b9|yLlS9 zxjFysWxc<#VOBB(F>^h1bhUBw^Wy#`>@Rv-Fx1;t1|TFTB*-Ty#wRFh1Qd}Jl8_V; zv8>lz*A7WHs2=fVv8v(^7g(W0~LH~i+&BoC#;D03j$1##hZq9C= z+HN)|J%j3p{~}p>R=fF%2s|yM;l37JE*9bAQZ?aC}b3P~3 zc|sjs{~g)iAOGU@ZyyVw_z3(;#6K(r{u`nHy8X8`3MibtP=WVfsU-WinRY@@Yg-|p z6`!z`tsS3;xCDp~DvAP5MAXj4%F0$yRKyDSN4&G+f2+uUB@rb6tBUN24MXwNo-M``E%Rxzlf9n(IbDY^>eUAG5kMd&L0t8Zg$>&P)}P0 zdsICAPl@@<3BP=Ra^`>3+Y9RZf7V)5R0t>v6c-W}Q&jmCM+%~%$|}NQKoD3^QBYJE zl~{im{wJ+LMnIsXh_Ixn@ZTx?f1&kXtgRiOuJ*R5UL^qdzqiBM#>Pq)z{YZ-@?fq z>WRuZZ&Z590PH;7T$rKm?#_ zR}$41{)bLsWo;uNE(8_jgW3TF`9Q)VLVV&vKoFmeAXLa&NJL!NTEzBOcK+h#_W!0+ z{8PdIZM6EkmZIjhUzh(3il{gLjGDHtsGRjg4U*>z?jdMsc53Qi1tb4&`!D^n&FuqD zL+y9&o;>+z+{F8_2U}HXph!G2l1}YHt%fZejU_TNIeEK`LW>I@VWdco!JSH~rg;B@ zHT*$VLg0Z<>B7q+*(IifoR_{k!#R^#o1JeTetVfSzVy2fQ_aknt1kcUg)$ z3mHj13tnRK>%BUNK3l?MYQ>fH7YIViO37Z!TL*3ZL>zWMUA9hv7uaRC|k2zmGt zS*N;u8naw1g>Ik6{cNm4@QC?U6>9q%4k#1Jz|dT*%-d2-oN zSYQRw_+z`XBE=EZ;gIkV3?|Hw-ca4mXvHEuF3R{$dG-E1czQSqx*KLb5ij;B+AX0u z#uTP5x&^^|>~lP%BZ3G{7?u=PT(19X0YMbGo-)H*%n(BN@W&^i0_d#p1zDEhVqNhr zym=B{+}kM0rs#<;1U{VSPf<<#`}fa zJxKT&dMHT{Nh_K&MhLlohpx)fCdK&ki{0QmwI;f4zjY;GFKnRDto=GPjd6e#g3&63 z@JbHdClcIA|B4$ffPR1*GQxC#_8sjF+`56zDyWD|pfy1_YXi8Q*_DaQOTLy7j#2;g@S7eAjK zV94S*AGO@_I=gIR6edok-bW#U(h5V8RS)egP6*DN^zl1*dRTYjlpM|;PUkAc$lUe* z^}XhhV?+2Y+Gdzj7$oem>RNT|e7yE{7luDj4?XL`Mx-Xv=r=m-h4I^d3t78isNq(=#OZn&~&@A2u?gJHU9HHT*p zKUVz4NJE~=kcL$q{AZ$N=?huAkEPe-fEv{`%-Cy8z#N%Ga<+2p z`%x8@3r*+gn*+JHdkaj48Kh~V-EiIy&xhrvSh_sCn zcH5Ut*Iy;SrjmHg0C21AQ%6rm zKeJLs-qP?mcM&bOC@jVS3kI^^0qaS1@5KzJu)M$9J4Y5kPn-L~KK#sTuBY+hUinVAVh#C7Hk7v74xjgZ2ofn|Vx@94ZPA(UD zCP~Ay^Z2f{vd1yq7x%^ad!CXW+ylc0`I;Ra`&FW;{>0~-Ey8&C{i?eg@9NsKbul(} zUOPVPRRGIALwbPkDVIcMZ!&LqFe^jMzQrty`~JrGS5#wuxxheh!z=4VQv}emc3$I$ zmjm>Ml~{C@a5VlxWpq16!IBS2UX{6Cd9y?X zb!VVqjQXl6C<}_%yb%WYL2dX|@)HLQzzhW?c2a-RhztGY7ZC#blWQ`BFG2=xeREVJ zvB(??k2>SHB#)||X3f{1?3EtQ=06WS3g^Hw$2WxZpmNi@#m2vDceCl>~Z@*$q zNMebH4O(>3ZqtG9+U5od+|3O8ljm68-D}?+Jg|ZCC1Wy`wck1Xa!nC5wz;h+%OzO+X%WxQ> zPyMV!qC_0*0NU3(s7+%Ur`EYSSPsjkZS|O6)B*ojO$L<#KJex^TJpP!*+Uq z1CCz-z;X(owle&#ao1p6aNrXoqa>QW4g`?P?-D|0{as}Z3$S}55+u>FUUtaDsgX{F z{Rrz?+&F)?7cLdVm6&(Qgkx#QNmggO!mdmFF*3pjphKb|BqrE<7+=t*Eq$yW zlLXCkmZ3@=vA<9)@oveV#zHqif1JwN#|;lkJbkR29JS>yuh$d0&#qi;v({RzAexJ&q zTQ42CG*g}@Zk}&fL3HSi>21?}B)0SAjgKtL4I?5Y%c>gOkGQv(7^Kq$FIlK~jXEL6AY2Q32mZY&Q`aF1*G+#=dXc5uzZW(SN9wew^ zBm21mI_z5j=R=!K9K5#qYS^_a^So{A+PdyDn13qUQgiNVAY8R}`;_@bPuc42N6zB6 zx`K}hgRst3Ev1^(w{gQC!$H6fLjQNVTA}EzZE?qc6$KGP@nohCxs7%u>#BUZPGGQcSWOPU@<^DvHmr|ja)F+M0WrDN8-xbg?+bP z-)1@PHLsZP4(_R^q~|!u0aVp(Ds;nG#0zBi*r(GLRSj79GIwNvJU+Z{NU7=F_ z7X;`{9-10EK{ax$F|~BiVBEGB=UQkN&KJR1@_nCp870uJ&br~2y&hceSYls*ZKzfH z1?zFFvpB|XhWRhsNatSvR6!Wdct4$4%EaBn`Ep#_@d!>GOnxaGdyNA)kY6JPzlq&b zUIPFibxB$U=(0)s6%U`9{BEG=pRrE7zh5+@>))D!ho7XLP$V|wc>4-YF z|GatAUu@2KG3*OT0eIeneFzb|wi(`4s0tbz3ZJXfR!FeQlDM?sqwB^7`NQ@y-V)pAZw$vz{aw(&-* zmE-4OZrJMXR2|pM2Zt8j=8ct{-NRu|>Bly*&fcNPM>A?G0^vb-U`yQu1JB8M-_g@| z57V*2fv9Ot&YJ$YMatj^izhvRgr)uttl;d-PrmE&&kQ6&h-yM!S*cj1JqQ67&cnVLk{~F6i(=g=Qo5ui{ z2H2UFoCmQSh^d#N22ZQhIr#g#*7?!d%G~^4I%Ua5eP3YR!}&?Q=9_vJHcwPfv-ZIR z6Z^N(8DVxRidXo(LWxyd%vojDs%9}TcMivhSnj03B3Nyb7RlO?9`9`6=p`5Ba-7&) zSxMbjOY(fzz1~43nl?OVc=meH*D>?A^xLJ$ELCUxUUQ`z~6fzmoV7H`lBE=gDl`x$O-rF0nZ_;)%z;dZn1aD_D_iDIGv!havEt&| zZL`*8j$QhtbO`xVQXOW4{y{qFkE<8jirw{^N2q$fg!>Z@Rshcyve07ljqnC(N`F1} z8jui=t46Q3m0SMs<>au=n}q)Npv;B7kBR?zD$9$uXA+MPqd5ID*;l)oA6dlPv0RRX zK*4P_M;j$WyzMQ{GjGrBn#ri6T01^zUmkOs?M~u}>|(r_>*(Ui*fu~UX;#=Id`SMg z{Mx#|oyD{3T*Q}~IPXpoWyMl$2f11nM^1O?r{B4o6QMy>`g6HH8D_r+XZAvFwa@1j zq1Im_J@_!%aBQH3yZSVhPMl@okw&lZ9T~VYOG+|cXNI`N0QHU})q~nSh+(+E6{rmb zAP=ol`5PQ9`6wN)KxOSD)_KeOi3VwusLO3j*Ys8^6Sgm!z3|H=YioCpK61Smoj9-m znuBxs!5LI_X3>KWmKSyuN(95QqsjU!bWVK@=q42kax?6t&EhO16O+2%Ja9i6ujV!M zOH#8J+z0^^hga2kYJJ`u3%EOYMHWB-5VIb}NZe$Dv|yDla{}`7Z@pqbTQu%+?vfNM zIYZrB>=h@M?thO-KUoM<{9#nFyaXwMD*W`wI^QuG70NkNhpRxPEd2Run*Grs+M6mS z-6FJ7fO+evi|YFrpp@VSTL18e?8=)aH|#~U_q5CQzFnm-Y@Nt5$qnFAqHKmIhyJ|b0NOCbD_x%8>yb!QV=DUb)ivo68$p1 z?vK2W2CCe;EE{C0`c-c?A;66Qe>>WI?nZ)7XIq_=V6{W~SG;5=f*bmQ@gbLF*WX%0 zgT9myQvIIYfa&;K;(M?O}Yl*kjq&BRrDr$SzC(g9{Xzv!%j7Us`&$K2baxgtdK9NEW zqem)$jiFkvqufCW<9c_Y+C$E(@of`ws7X*%H_J%*L2+m>$EZ>kEoyaIyKyWI-pi74=?c)9HjdNg^ER zw;I6e->DY#O8-cCJ$P0RXl`6>{JX!}k)rcqreKCFkn4s}uSJ*T+0d>%dHjq|+4vgMB&8kARFCnZL=88r<-)x*BqdkK$$ZKxE)qc&y2HXmt}JMN?} zm@j6r-61{;kNiDW0^i{|pgRShOC$K;xMZ%xqei@d`hKPb)vZ9!DVPHIE!c+Hghn(_0pT?_yJJvUP(yaeI2Vb(Lx(X5SU7e8Dc5HB+LviQH?)NpzFW3E7sGZ}l zDvTup%zAcog%=u+Y`|0EE8!MT+S6p#{|+>t-fyKdL8pW~q*{&`*Wr*-WBvGkmYpM5 z7mZt6G4`W;&p>Qa&|S?NPT}YL(EKZNo101ZLxbEK=~$R*{ef@I5Wli0J8gINDosP) zw^mh`7i-qTHLO08aj)$Ve~G5bPZ;DRJG$Rzo|J9il*Gtlqy3{3@A^L2bDLF01n}?? zjHrzc_%LEk@c`iwIl&f}Hm0t{ivN1agGoc9S1tGs@TK(KbWPE-9~Z87uusvY7)B6{ ze()GF19d zTwQy41J!xP!`nWx78Oq!F-wpwkVs>lzrNVuC@DL-cV0`nitbi2iT=?$s4RMr4BX5r zlz;wWs((K}#_`R@k@lLQA@a|m5gm9@evLNvns_ih%wW*|)!1}^CDmXPbCl6n30%4Q zi6;LujK~Lg>v}(O2Hjq+E$qPbKcRW%#ij~P4yqDErjX8-P@x|I_yH@cu+Mo_x)Z7#?kfa8N%xJ6+wu-gw8(4}0LK5)?T<~_%^5lt0`-IzlP2ZC`{n1Bw zD>QOcOE2!9!A%@uMPb$VM;{aQ>{Lh?>+iX7K&@v7|?(JcP_*7zs)vFnm_;eXWs3V1HPOm=vdI zxv!ycf3i#Sz9GExaXy|Iu?fC;iYOp9dKEsC&0etmL?H!Z$>qgnB;#R`k|gWvF$FL- z>j3F&X2soc8Xlmi^q$jH{|obxXW`*xYOkaJ%ACfWqxWdpOoKy`fS9^nR7V$$r8FQL z3FNFRas4*W`De`wsnY)VYc)}wU)GFGliM9EG{Mj4EABL_FWE;qGjUr^%td|)C$y5 zwsfE&z!q$3=FApR`dv z+rB!OXXq4B#mS=?fo}J(RFyj>u#U-_zUe$^T8Mih=Ok+Kp20I}tz_}M=eR9`DNY%) z@K03lq`#FYA{*kWpO9d%L`TwqqYcyr14O09N?jN&b525^Th$YvV=-<6Sm6)DZ;H|k ziilKGd7scK3Z*|u(IHMCvz))ZrLzopRmyJxc5TNG_(lRU4`0#8^u>%pTV-ofbrG0z zXgNx}_ndTg)vsIAIYahw?PpI%JvSA)Tx<0za=kzM*p~=eooClr9~ZLm6g7Czsf2M+ z`y9RYU}W$o83FC;$M`V5*kM9ySCXylE=jBgs>AX3$RP%I@}n@E4?^Zm{h{(20g9ZTw2W|npOIQ*Tn&udkRH*RGl7cv0jhn#{NmNwsl-{X`DRhds# zYdl-1S#A?_gZke9lR?AoO zQic*(cTO^b2YTVA;CnsUy+veWhaU88=5627zViMYoxoTBQ-bE)650GP0At4WiP^iP z!gBhN14p^9z^`jDU`?^yeW1%og7Fqob%iyqT1+r6I(xqKz$Xqej>Q7_mCUo4guPm{ z)?7B9Ld)X9p)3~NE(u4m_m+97r=qY$4ueJBrnPi)tuRj*eq1W3i(^x8k(I_vC?hO6 z2|by;=quQryO#SfAg9coLG{-^968+P1E(8aKn^Y`HmBa5inlC^Zp;Th-$dPILEZTI zrS0Txfro2Hr6Av2>O+rN0uqcsaEL07dF{DsNwj-a3thW z668h_Spye9tE}@3lRyw4l;MxjmIkU9D>$7cDfAqD*Bx0Od=~qBmr;f8WkG6lW>YJWCs^Sd#!6{C1e$P0&)60rh>8)tmESnw^bZAy08Q>{+99{t;_S)=T zDfJlBl#sh0)E0lz+cM^XZY1X5S$ZTFOg%~Rr^8!99~I5g4?z>>%sr)8j5;$$U3(V0@j zIGXkq`^z2rAoASr=$vf=K}5T;-Wi1=@0o7ABN$-s!7mRG0oF7Dl&s}F^r=D10VAv& zwWwo6_m&LsTT??_ya|+x-B6Ba_rPqS6IF4qEcdA7bgz8pQR%m0+!X=25pg|PXq|k% zYFrQpdGzM-x^R~GUUClW^QY2}1jaH7l#aq{(0B}ZMX`eLTLP2v;oDmB&&EFYkrowR z_V<7GB6NSyexoLx#q3=7tr)7Uh<`Sn?D06z@m-e?rv!SY4hG$X?Gl~P=jc2w*-+Ck z?OqZ#j)D&WUeh%18ytT;aRxel=eVG;lAD}SDT_71`~b8OsgzBM=|Wf)x`QYPTW%sn zT{h{*=`$0lC1p5WA;vWqk_WyUo`Qah!*CM!sbIzUaRh{Ms>U};QNO$p-p-$uM;K(T zyX4JQ$Va9qxhtprPSzIu(#mOJ%MNT@HioGUVn}_!%mdh0ESCH%6*m?DzK6qq$vdDv zk;Kc=2blPh0aE9n(3V_TC?{{{w~e2Kf3rCprsK6p4EUN-y1msI>7z`C-({g4*10}x(t=={!p>*VAmRgK=T-Hs_E zmc_STI@j*%#X?TcZ)Br1-QIv$ZQK^8fKbeyPqOn4)GYO~-(JQw&Pjv~-ZXy~eYN_? zDsn~Xxn{eYzJcz-r*j(}D`U%D`E)AZ_;bef*GST44zY|83C4&ca->%nM}>W|_3%A% zV82P#5&nhA8#!UBS2fneNC=#G{!IdeZ|7l$B9Q0MyKp*j(Cb7*M+}2*cZK3E^v<~D z3uZeng(E1U!9zXPxb!DFm zlS!oHAl_ioMWb8NBVFcq%x>Bra@jFX+1DoT9*gKa#Gq4Cqj(ZVhhP83yGp0r9Ogmr zQv>{VyYU;_3oaZL4akn1AB&x;=c~o{uH)PIOez|yUtA0fa^F<3c#$`t(W5i&tp|Iz z6BK1-3|*!)jj{TH&-IloyF|b3%SqLtxb>Oy7Yg>ByXVHRvuK zI^)^%l%>fBGvEEx0NreK=-RRD@UB&3(cB`v;d=~2mW=i&AXG8Fc|NEN}1@b**cP$9Dc@oszO@!HX=v{au(MDGIXk~$pqmw z^&_*i)>|6Pb)g@__A|-nHYK0Xo`yBld9{4t>@-bN|F)Ks*fG^F9ltq=Dk1OLF7qU+ z$a?CHj2`>Qr(4M)ZRdB$Z+1}5!TyfYiDbv|g)5ebkc*akuf9V@+3IFx?V}#k(hO== zy9Y`wl&_RR6+Bh>B9F-dBqooMY~RYqW?dMUpi9Q(W}KyD383As4XAz2f3-ppcv6$WyXPtkG%qDA#l!DACu2kJ~T%9 zQzP-F@`WQpzIkmlzVp_WW*gOFIAie?86)?d^~{zU05Q`FINxDjv`frU8oL~+khIk9 zj#&;sRnx8ME=6GPvavLRB+#VycG>v*1ck%`o`cCw%Vgy|8nt{s8O+4A$T*^z95zY& z@);4HljJ^6T$Uuk^IXX&WbG0@(>Mc_qR6VjS<$O={kgocH45gGrUG-=lh_^KnohjhDo5gZ4TNqMOAcxr#XLnUQo{ikha5>N54H@?Zzy)%$WBMn^=UzP_auD+; zacwFoS18Ip^p|sgNIjH<)>TyQ)=sX6as3q%w@s6+sz7}8XqCk#zOZ7;6AXGL4g;%O z?Jna8)}zBJ&3r{hqqs_l8`F!v50nL3t3k2qGERfl)!BN~94gdqGFXbd!nN66V^&*# z1#nTOkyj?nY}7V<+^Bm@NVjOz@+`L{<douNWzaAzc5k6xzC`A;7Z?=XFNf; zZ=5JxV9QaD%T9d$6Bkm^d4q>rCHyu9(x$76Xk;toNan)tp;7UL`cL>KwUCt%kIgD# zJY;t6R+IRuEg5v@lJUfA)ollz4U$_$U%Gh1PZGwUJ1Z+2cr1{kZ1U^9TfhOuImNIecHdu*4u9jp@MLU zRMiSiMyyXvqUmFCf&Cxg9FgJYu~uu0fpxmRcoV}Sz?Zv>SUIeCXUrS!wPwg1=U9dnQ{!!Xm�Ei-is;{x!X zRxe^R&4x9ivF35&RRFm~oJFyh*j@GVWTLF5Qb#FTGnp{+ae@f^GgJiNK*SKd24i}~ zxU-bg*^FXTFZ|S3{{onbODaL7Q5d0avhXfg;kpxC_HsIcSDkO()hzc_?%4?qK&s;1 zKAR^C;M|;Dy<3wYX>8a$^-{x$=iUpJC{$eA<9A;W0Hdbo+%%O%(1QdPDbgx*2MGcYNA7rL*`15ZKk!VMx5f7yL% zQP>jB{WEn`f@wvi#boBgIa3FCJUR9!wd=Z=I}g1(52ieEOZ}aeAJX%m?A>{+1JlZ+ z(32?5TF$%*!Zt|Tm|zrNt+o{wx9jn2f}M#Ryf1?>0h;6>YIbJ-Jymgehc;USMH;`o zK8nP?D`{mlt>Qb-dsZOJytI{Nanu2od6KQLPANwr#rj1r$Hy3RlUX=&AXF$EF;1bH25AQVaFp0+RI zl!=()<&u0YzH)i_-D=_(pnWA##$A-7DxVHsaSwaHPqS(Bs^ZaZ;3lhif*|D=1=cet zKOm38S?1ZG7q5b|`gj3$0O^{Mr@@PejA-&7kppvNjvd43qjz7;+bBHILI+MWn?sq5 ztWIr%7y|moQ`d)9f~IznwVqm>hWj;#kJ9L11N-o}nE(sju9(m=R{<$|gTYNL_3n3> z`tzUm;pt<_4G^S%2hp#=TMJM~F&x2Vkb}vM7DGmiVNtcF&vRjh=B!|&;N31=@?FO! z|0L92P|r^!u^L-a1ij}T_`A}YvlW2WKO|OX($BS}x=bXw&wed(;vHsYDBpXP&&BLV z%6I4whgGBvV_lH6XQ@q2Q2*>WoP2)u_d(J^r+LBvY69LZ*t~MDw4eK_bGB@J zi`jA+GQV@=Uiqxyd02d;cVmmwqqKo+@>+VB67bQ{{_C|DC+_y9)K9mV>v3#cEhhv; zJn2cfUBLIwe8H1!9P6$TQw0D6I+~YJHVbDm#N)ey)vveXxGFs0yx3ku*#=e$r`}EI zn{^S#SMPF{`V+_N&HSVSI*{ z_mMcEPxEdrQk_;vkLtz(1aMrjf6|NZ$W<9xHT$T3^0HOEw7v|Xo|}E?NiR1wS=TyQ zu3F`V?s}5cI>`t>3p=~myZ|;zDr8+t9C{=C!b7o$SB>SOAPa?9B4+*_XwnhxwFzeZ z3XvyZP!$$=Rsi!uCA9@cGRIYTG*eq_rjPjZ_^^JT8C>O%@u?Jz7hpqbrJcaSU_PG<_5_v!w+vb=W%(!`!TL{bgS(3shWJ}D zkNCyr?G=ACz;9-Vf3hKiR$%M0<}r{Pp3u%NCZ~Y8%7{G%M0r@(iJR|1Up4;&97*lX z1c(nRc$VF+9tXV|-+%TY=oY%JGr!Y?FfOQ|22D26^i_8<&Jew)n3?%twt+5yeXp-N zguJEQ{SD*Bnh47W(M5SO*aFevDQV3yB5xaQP?VYQ*&~=DD|pBSll+KWT`tP^mG0hj za3|M=!xQ`xBcDwDk_6;(gQ;V6$h@egJ&PuE~m~ zAW_tlFb)>iB)nBjngAMBi(V@3o%d{ewYr*WaTPM3jR+@>W&1=f-iX~ekB<%G4*pi!D#I>i^OZ2MsmEs?jEYfVI%RK>ArhfiXy4Hy4CYC z%T`dvvCr?6*NJvurhAt>M7s@#-0*d5@l9LFAQ=(1>F^8Lcu4RK>U;_;_6`dM$Rn1S zs7u4|oZg#U9P_WggV*AEXR&KiWI80r?v-6caiT))!nVDFZ$#T@tQC8#3C<^N1*^l) z8|%CG4q&pUI?OE{K|)$CY@HGql3!W)cggM^J7y(Y1&1v+{$q@{z^ z-*5RPbs_R0ArB)Lb3HLEvy+40*wV;J{#|R)qS}wLB;UOuf46*G>9!%;+cz;@wdjxm zzZ=STk|St8A@mt#gF$s`M+IRJ{5!iZkrW~FV%i7oiA_~Lm5X)YzBXJ z;=)wPXbGL);8Q%Ne!VnU^TeAxMum~_w&5YG5NUvQGJ873)e&D;h-e4NJ2fql#(>9T zA|fYTM)x3oxeAvr%e)&zZd~8R?>Jtdp4p*Vhl1%KBFf7$z=>ZAKf!JMw^ zkVpB?GG~nTjHo8N?w$K3MOtj2`UazO`nZv!N>2In+xnu&%){I`tVV7+O4NfLH%O0E z8D{GupA@VGwNkvQfhUtxQpE-P7+dvsA!`tVrWUppm4(^m+u&5zk9mGQPh{~qN z=J*}{%YzSq`i=YWIKWflS`6SroebSZ(xDQ%!eLj8C!)ul_|7|?L`(bJ>o56ar`IL2 zS5+I|=B_;4l|(Q-D7mix^1S8Ag#Opaa@2v}39E2Qn8qE)9cEpMq8d!KN1BEhvh<{n znuIWS!+jddml^T!fN#-FRrJnOG}qiCYRShk*zi=HZ|oISU-~YpsLD9iBBj;U9Tl5l z>J&jZipLad4W=(Tl`CXpVA{-@R(_wlL4e68>V`;5!yVjkRq~)IOZu$uy~a+_FCdu3 z#ptfOe8q{`+RQ^_ADiNTl(QcRXNQ|{W?}y{Nr6ZfhCfa^(2ougaSE7~qOOne8mzIvIC z?n_e$W_#u5!!-|FF#}aW=Z4Db#PhFUTa;VsJWEM)qR-O7N1IVjw56E>O#n81!^t00mHh^<&nT? zYa2(;f=eT|U9PDcR$}Xr#$$g68vURjrU@C3F-GoKu04tTV0>g`=Hy2q=H^IYK+;Oz z;yJ?Rdce@kbN$45yqlt~J%ux@YY%YQCeiD#(qWR^?()j+MRj+UKrL>%w{{Ke+sLv9c7yU-Hv`k1)$4hdzqlElH2W(y$|!p05L z-68K}HO@K%Esky?sO}o)4+*Z!wA2G=s!u0{0p!@+0Gyo}b{h6k3;O`KdBu1!0Wup0 zk%)p^fLzy!Og<0mde?m+`TEjZmr=CpdgP=v6SZsXN`YKJ=0A0`X0D?4Y^|Bat(mwU zJg8O^z~s6v0VhCc|6I~;F$u3$H9CIUY2p*PH0Zw6jM;KgV%u2 zU5wDYtg5vUVSAH1+^`VF0GCWM*UaxAFL_;@42$i)XY^UB8+v)6GLR_w@L#L^l)nwAkmOEfrVzC z0S}&o9#HUU+Ej3x0K9tZASghpS@+e$AVz-zS6f2YbrgP~ebswNCL^6y@14OnZ+$B?_E+DK$8XBC<|hgJzT^FfA5_tD@{SeooXp# zE?od?q?=}xIe>TS@L$VNEnN${^iB9byxhUY-&zy*oFrH)+jX%=J|&jq5I!1v+lS(!yFX^NH~db&?7i#-Lf zEfj^Q;(RIVn%>8Ud`9gxa~G>kM$qLFAOUw>*KZAmNuEJ20L{i%lgGUt&URJ|=Z^EA zG(C6*C|+y|Sm#{@vJ^d(ME!Sdn;jUO1tnlr?rWv|=mAdlNp0hLWe235i4-IqLqZfy zRApSKjE-n$xE?U+cu{;c1$V|}o7vlFykhP3I4J~!HKZbso;@*MeIh2TRd5EDw{Fyc ze0+j#4Pve5)jtjf5Eblbp7QtX&h^(h*5O)}zo(|pe31f#2-)=a-;h2#i0L0rAUoI{;FziDM`omqni98EXp*oW;GeQ zCeATs?$L5aTqqIWN;BWyAwg%#8_z2b~`kg|ZRzWOGMc8m{Z9eG#vS!D#Z3JN~JMp~Oo9Fhs zs^a*oJJ)Y?hGd7STXKuVDnBd1$31vR>_k+*_kkK-jtw!y`##EZvma@(MQGfL$`~X0k@~+p?|->qB|+Z*7&@B@?~AN^YtZ zC%knt{3C2^tcBrN5?1s&Tu$PAV-X!$_`S8X@>vFZKf)V9+okeSLFPIx_NmgTi(;ryr~ z#KUJr*15w~;M|>L=u07G(7VpSr)&Il_Te$4#D1T=n(=e|>RY2`i0cCQY-Xkw7Vvnp zkqfTGgw1(ZA~h|nm!BLXuj)RmyoA;EgPwhuhGcFj;YoK#@+N0AQ@m@3=#&B0i20J0 zWm3Yn%8ME(%-^qDf^g@mo68SmXc*+*n!`i@d^2>pG20?kf-_s2i^$#e?iUiZL2*f* zDhErMG!5@rs3%oE$;34vor6ub%dVlP%a!m3cOH8VkGy+Rc`};PBu+)o?+Of5Cu;Qo zyE2a*rv@B^4IXa{9X)MNb}ju@j+^O*b}%`#5Z+>gbia0So&}3>4KrZSW$GlpZy|^^ z2e0X;TEB#SF_9<=fcwPqI$!NE*J49Emw-#O`kAD?$mu#2GWDeIqPmje2yAm_XK(7HU{S4+*!S97RyFZ*b+(bWFumMb&N?7j@A zs%A1n?Gt0~!*EK8Vl%AP;{bU+f~fCitVw&igK} z{YFx-#?Gsf(%$xG5A;u%fTDqpocn&^s4v()M)M^vb>eJn2VxUrs=UQ&z}6~i%9`?4 z8K7u$zmJ{-KMOBMHP}eVA%E{_%h~_a(^*Ef(Qa)Rf(B1;cc(bT-Jw|MON$iOB1MZ7 z30fQiv{2lk#ogVZxLa}82G@S+d(QcOWF>2om3ijbGka$CzVBS;EZ?2EvN0lCEWGscNPY{`Aw6cNQA@^?#mzI4K3#RloXjPhDMbfa|C?CPE&GLt zD*IMZg`!hRv8)Dvf^cZeLqMwy0CV41O!p#2^fiZEl__I(-S72{S(#R=XB0>d2<_T{ zvRUQ(NyNvlh7G6nqAIyxYpu}_Rh-LdA3e|}Vyu1%8y;&sNIGCms5ZsLy}?U3@LdGp zxL2*PuU{nIX6H`5A~6|qe)p+pMApQb=N1~TnYUp%9#EML3pJagFc+JA*9QPJcC?l>(53v+ADL+ZYZ2F>}=vrG(^KdjQ zDY_{wXcO8qz0=7PN6c!b(?k*^cBo8o!>FP@%qL~84_L&kUg>D!k=Zs-P^)?%QC3mB zUbvs(jup=o!H9~tQBniiK@Pim+V*zye3~Wqp9*I@EL!mTjU06sUEHTS*O7L^Mq_p> zZyTSOOP6)rUd+jEI$$uHxy21~L^tF!yB?@^x}SZ9+yVn8MR-R}bsu5j8u zpTrK#My!TviU#=b2-R-~b$Q>kR2V`Bo4(^xCGA1{IU<-40;mk z>8l78yI1$^do1}}y62-H>4V3nX-0oO^Li)|_{4$R@nt-<#$@=#Lhj>M)r`<{`U^SB z7?17uiMhFp#sZG0tKU?avyYrPHK$X84fVqf^`i`%Kn?KME5lLFoE7l4p^gwkQC`J3 zX8XyLI)O)}*3`uqu08bz)LOBj0pDb}7R`{CpH1ONpq4SJ;6M2UC3SZ8GB=?K6*Ay3 z9u*!+zG@tha|(Vnh(39Nn$3$=8RWgtfOS&s!V2WmFfAijSJPE6Yaz}ZkA|j(WbV!k z?K3F&BaIXNesuTSK6k*iPGy}^qSzADOJ!#$+qkttsY?lC(+{pJpGA%w(e*4z!LiH; zGAT!Wy{9h6weQ-b%zBsNc{q>Cir9Vn4qVq0(}M7Qq+^Goj(=0fipgtCg1R_!XppVY zx`$0SiNWnyEa!_Rs#Fn_ z7}xZ!^kR-=DqI*)&cHh|Jqxzlm3`)e_WDvcQoRaJ-wwj8uXGG_8xPY}pJXWte;2jM zpVb~=7TbN%JhU>U(3Bay%26_ic;Dmw9SXDOwf2ToGe_;RmZ6}Ppe#`#<0hN@Ah#Tn zv+OdmePEM|z2i9WZ7SV!wVaCcf&@RIJYGZ7uY_tsJ7aT9k+Qw>5?`Mo}L??-9RbK5e!pRhdBkF}R4dzbc|1f!T z!SR;_DotsAQU|4yDh5UAeGA;sOXj2qX>Xg6c^5-e69vUPU~VtHxo*v@9|>}3k*jgE zO)W&NmH*7&%g2@~;uTH>ph0~fA$~~7|fLYbqqnMty}wKG4M#ctPk9l}nw=9lfA!J&tXMC)&R_OnLUv-JCy z#@}%ValJW8QR(`mFxO8}gT+^3xKo&aVMLaQTs2ShNF+N~_tpk6cdqX3mSIl$Ni2Vm zJIlByDu8}!hdjt%UdKqCpk9Wrd7m=3SG}zN^nyY9PawoCVo&JlADjG3O_aVZ&Ww6);l7Y6dCGXH8S!3v{$nn2nHYu!n{RZVEI0NUpI zylo4ty)o5Hb6r#FgnY%|;eE1>z?|`P7x z)X(RMd}M0Y&s4G`WvC0$OTVITx4Fe#Fwiz{IRL^``OOIy(Odw4Tr6SV{E~&AUmTe$At^(WEL?g@&KWTAy z<#D7NdMf;i9yjnXt)fE3(v?o$n9b{?BvjDst7Cyr)bt0}*DU!;q~44tAJ)=>ZOU2f zy8PO7CCTxS)=+8aI5KubhEl5#3j4u?h`UwS$6BRr*7q`rKY$3EeC}YgwVNmDNGs^A zh(`a22O?g78xf0Niw&>rb9qFBTYzlfU#~$W%8RmXnsn6xL9;izmPvwsYYwD7s0f>} z8&^AH6ZX?jHaQAaH4CF8M4MiaT#P{x#BIlz6XWiQxUn(+Ii=u{eSm5EK!rnBOH=L2 z&hLrYKahvH>1+Cep435kM?t38a;HNF4_kEj=Q7@HQ?otBY zcCZMdA-rXaD+hX3N#7vJ%5q-GcqV0`Wzmm+>c7}r9;=WCM zZ_+$P&Kzbj0~Nk|)z24=v+0@5`kxF_)GbIR6sE`KL2}()GR-mf|Hyj zaps$F;({9qL-r~MSd2j`>uDsUElDroEbaxKY&J3rr`xyn~HkOT9V@H<3A8g>8$D=^6xlfi~yF?O)vcFfsFYoNjZ8t0tA z1s@M_^NIIl2k2?N?s&M5spjafbzm(ceF z52ojwhO9^ibEO|uj6{i#@pt3Sl>Kf+dN&oJRNp+yjtiy{m570kNj=(=jyy6sU5QD0 z)Zm$%xHk25w%oUVIJ4R{9O2-mL@od=*v-(VvNUAQh^VxDD9x$Mk!f81I8; z8G4fSh5EU;@6fw`fkQF+W6VhYWT`go1JnZZzF2jUclL&2f`$J6+L}?dl?H<^?a4Y{ zF4wAmz^oDnrQlLWdq8#<*AS38!W`kt)0J`+b)`fZ5_=7PCW7H+;CnRSn=C$L$){T{|Y@eR8H&Zd|4 z@O$>_X62%fSKYSQ@Mk6wWDJs%gMLCbB_D8l4*v{QDJOSi|sbseVchjwofJY4Dtr_(2T+5{p?$?G0{ur_O&^|!A( z%R_~=38NpMCiPxSDbl48kZYF8iaEu1@$(17mSJd+J>_ddmm0I4V?9h#qr40W&c7^! z?&M0Rq(vhRG)Jzn^oQg;Q7MryxMp0H32Xeo9Tlno3Dv1W&J9q+D1Rq+!nl*42v-hE z_*t@|0s{cXM;!?m{-8$uhWq)(*|f`qcXLgy)t`6v?wBrbTuSb^_mq(v?$OunpYMqx zhnTW~Ev5;^Rh-}FeD(ig)uV9Wd3BTzQ7@m zMVGN5Tp1=i&!lO;kVZ&~C7e9$;(DilGyYf(q{a3A`JlSq(m0DK5dL%F0^b`}Tz`@* zFF;yBUkNqzGut#UZ%7>|YbU$quK&(nL8`zfiYmx4HRaSo{+4ndt;|ijds5Stgor#^ z&8;3*8=LkA;j7HC)QYoHwU+ywE5fig>CD`<_p%d zTkoRz`iFfTq1s*cl|2{ibt7xI$9`LNA&rtiutY^kZBl%`0!~+=jkBW~`R2$@9w<&%BnZ{Tx?V?!#-Bex zXGhXyUy5^~mBR+-5}pZcQ9GT9wQ=W|66hNGGl_kyGgu_laqda#<;kC;PGTQ8adHF{ zJU#o1Sf5bgV_oC3*AyG}OBxb{JMpKbz$abm_%vszDxnboEU3;P&;T zRV{|yH8`pQIUv3(6^_^t@pCH&wKh!-O|vgD?RdTfUi7@_T`#j_oR^Fzvzp+^oofdX z+CTXL92?sOlvQppWf?rrm*6^OzFy2P0MXFS-V~?>q zH4j_-REu_ae^s0e#dM||(sWe)f$IKs%fo-hu6cuIUC8H-g}4JqH8-7|a);`kD=8k5 zo?3nuBu)@p`w=#2q3Uwr75jVt~iuk?fIoBZKg>&DkN`IvM5PS3`jCz2zWkwUPEvwbo(lw z{9@c;CH}CNYcMxKY3WnxhW%yzj=RYle|NJt8^Df2(e`qgWj)vS8?xXc4>l;QB zxW~XG_PZ>RAy5EU9Ad}(^##6*%a{Qo3ywteNv$_^F4|Q#*aYsa>s{}Mws5)ah<8@( zBZ7(tSW=(XaIQULU_clH@nbmp4Xndv;{e^v3O_mz6og#|4mNjb>+zXxs>XA3zRwR@ z>FK3Ycc|NkjMpln9f2ld$J3~~tp^gn#DbcsAsh_B%%8->A&oXZDYktxFVkI6-b~Y*zZ=}VZRd)b z>h*+W>Y?SqEoNEkU^G&h9X4;2x3< zch8(OYHJ9Sl@o&Jv8pU3FIJHlpL?EmY8)b2Mt#NPtjy5bUt6`}zphhy0l|M@18x`P z&dq*gSbq8?PaPM2>(HM^`|kY3cfX1kqsE@(SlaZw>tw+etzd>U(4yD3VQh2+IHlVF zM^imAgDy%V%hzYB*zJm`(LzdHT(j>L3(0_=`ELW$ZT{0Ck@J`9)+@vQZM?d!<-Ww6 z5gzkQj=urt;FV!}XU==YeAL=Fm0Yx?jLrvtg=@va?ilx+tRZ%VQ{*vb>3(C#7&4Q< z51J$*k~|fqheaOvq5@aj_1Jau3OJ)KXX+f%5K{9TpuX+Os5B>*nbR`q_f%|=9}Sx& zYsz)&2v#Fq!ptwyREc~Q9*SsEAUU8v@A4may@94Vb1oyUl-uZZeaerKGBH=MqOV=l z|D28kHCj0DsgZ>=D=~c`31qNM5^_aL{LzgEg}QHG#|up5z%!xSF5>G2eQE1th-*TP zj(f@;iJYnD+H70n3@<9p_q4joBuE=vJB*=r0$T4IiAfhQedjv9>Tl?*?*-C!mr0be z8u9^AlX4U?)^+%$&GLtkLwdrg@V179OtEdC|0p^`0S@gk@*f! zF82qrroJZTZ}+9O3st&GytfOhD<|yLEmSbhoc0QLC1KTD&zyJXk;j<*PmNzp0D4F~ z7kN7R@sloyZe>Yei8H5ktl^!so55*_E1 zwfQOREx`Q;*5pWinO3r+VMb%1p&e~Y(fZ9cB?5F-YjH-zM8@jOnbldg5_A<5kQ(3{ z0C*&=@4CRrSe-0-RGA9^c$yxmX92l7%Op@5;GmV^*Dj0t3p*2cQxu{B;!akQl|Zk8 zfYpu&A+YUBP~-a9Lk9{inm_n$tf4oe(e1{^OvaOyZ|~toy4*;=^Rj(qr|MCQwtPL6 z`+93j;4`%QQ=l~3^YjM_tOONsZ^{IwzU57mC_23I%#ciGH|Vqubo$B}U?!fB%Q^1S zEG_=sOO}isSCl3v(aY0);l{Ryou!3hA~r1de$3h_Nkwc}C1V0{^*?s=o8r;vMl2=%x;V%f>FM?f{= zcdzOlso$qkx*`5X2(kDD;#Gq9-0{WHFpkyeW-#BV#L;lvqbj3@y@Bu7jz`7SMry`^ zxwMiE82Jg4p+aW+M&=hboRn@2eGx7C*2tPHWqH;g^D~G>&XNoAhEVQ3mw0Znxa_G0 z@$9iuNP8duG;03V!R&xkqqx+FyF@}isiZ--8b$kAET0JE0~_3c!7o-&Nv;&N{_m7X zm-5*Ex1Z1K5V|%fm<0(9& z!ybHx@~Q?6&2Kd>CA#x4NiABAnPIP{^ExF757ry`zF%dB_mAo0#e7PDCKPJUDGJ<3s-MMqejsQKzI-_T*+|FWXy z$k47W81u3)+M@?Uxj!V?fEKAaPHC+B7FCC1O|g$zF;uzanS1#K!=b52>QhYL?_a%_ zZJ{02P>NMGTK;$S;^2)y|S9YHvW&t_95az;cilC z4LZ&s5yU%1r&u_OE(m>`bWd!^@+12>_8KmGXLK{Ah*;C|P<}5F{M+?-SlNW-3|f{f zZt;OQxj|42l!VochG!tse>|o&JFq!R10S6bLDLk}Wo7RCm$}&YlOel}H(sE#2a^=6 zOeCHL>koWzl-D%?%=PWlQ31K+^J1eAIc~xRXdLiHD7`I4Rc4VjRDEjvY9_Qwy-KJk z*v0;12(V=|V*kSkN@Q{_NYyzdVej8-&~5->K?uSHaW(W|TCjr|U&)XwQ5pu9aswzprRzD% zcDn+}#_mYWT*bBxPcJq1o6;2UCu@f_9`2`!g#V|1eL%FI!UoS6;C2Z1oDt>@@d>J) z7Xb`KQkAv9afQAcsLI3yAOQ#BZ~$>%NMlaYoao2dsaZ5eKw13MWUzu&ytyIiiIzBO zE@S*d_`oJtTr9@*8G~^*!8MP3(0yqU!W%`;oK+*L5cxcg^8fU$whi#y@L_Q0a^-U! zg1HFzgF)p4x^?RF4mq>fmN`UEedTSKmZzgVYJ*{C_rW&O_I+gaeyha08N9@KQQhvs zbyC(}h0r7nSdQpW|9Ltyzlg*e^f1kosdJDnm6SPF4=b~YRIqLS z(xMc2Ct;(ZM`DnZEsS=a%cAd6#lu&EA!Jfqp*e{OoEd%WGed};#V}moJFQu!M&X}* zm1kaOl3V4{qKl);60QFp3c)p@?6&_4_bXJT8=`6{8tJ2g&$iyAQR~c(x^|715-T3ZA zSEEyh@UsYQY>n7rA1r4ss}x(H#Q7$vzGJK_xF9wns^NkfqNoSXPnr*V%^uzkyds(l1&tOmrlqLzH5p6aP0h~~{B0HXO zeD{Rt5N-x>*cfmJ1nXvFFgCAxwj8uz%pQ5n+ETUpju~u4#?}|-s>=2yR+syv=(U*2 zclW`BiA}_uq9jFrX4}Dsz=v#`oa0@8BNcSXCxzju+l}$DkBQRXbwzC^;UaUpT1j(RH=}M%Z#DyoBR8!eH;_*KYXZ{pp=u|5{e5DTKrNJ_pxaKZ z&mu{UHUF&?b6?;zB21ij8*T}dVyt89XgMMrB6VSc(3}y$wqQp&19^z#ggNqXACm>t z^*4BDlYUq+%ajBWZ82Nr-$*-C^uq;=N!SByq?!=!46|(aKv!=CbPS%s&U1El{Ile# zbZSWYr#K??4vwbH2Ie;mi)@Dt-~Mxc(IgzSTC|LMXA6J`$#MjX#pT9w6s!Ll+AZCo z&eGm(A>T@Zv$E~PY7f)qy%v%E?v(&JASwvdg-}T_-{t5g&`K~ibzR6E0Em=b;6lAH zy^`79)c-U#=k>)TY^9@va911ydk(F?D73FR?n@ z)MnYOae;8n;T;UN9UPi4%e;{7;rp2X z&b9!4yN|Y>#+n&u*mPtf#O~61YN`EMoEs+>b08*ByL zAho-^BsgNK;&iz)@&&yTHYb3HfU@`HByQqlfQo!&UHjDQ(^f!C#pU!w!btWJ>OD~i zNj*+dPJA5Y77?74o#a}V`^+l4HaT#UCg8ZxXi4$=CGXCZUy{0~4#yKu-{w3U$#bCj z^99jG7Jb%(NbJRc$XM2vh)JhQJ7BD#Q*=eURE_^~{9mY=nv4xyH)J_q-6S#THJB$u z0Sl1&1-+OLF@Af1m}ZsNFnD5a(6mm z0<9Et6P(;7k`&FxD1<=gC&>yz_3YF8IZwND@i*Prof|Lj{)ITJ1Hu8Yl|JC23FD#4 zc`GJURhtZPVJ+DF7}<9Q8~4K)xGR)ykJ(y@=vfsR>k**>#G=?hFFF;Q*>QFLrc?y+ z9j7P6z4^d}KvPECSlIF49@CghW29H;?2@8ONJeVS5tq#$cVAUqFiM$?I1a)=RU?#x zmgTh@2N|O-NS9U;4M;NI5ZQTj8Q!rV!iZJ3+iy$?i(B5!5M^bO_M#|qr@`8iV#hH( zZgf8ca}pwEEkJWx#ilhrpgxbtXhO@BfV8R`-lN=OLO5|4#& z&@?F2I!8ke$O^h$0^Wk(>s}79&ztbj78yyYdGE9BY~(?xq_{&j2ooqJUNlX4r$4fZ z7%xYxLQlft`X>mE*YH`j77WH`60LmhzWkSI>%I%t=7nHgCB=cGw7B?rp5~E-`oR+r zU?ifB5?P>2q^9f#_f9uI5b1l=c?dwfZMsA+kjJ zUlN!*e9*>v+mQ&C5l*GlIALy2{sWmpSq;dVJ1X1w%0s5_ENEQ5szfI*URs_$AYE>w zF(&L@niQ^)Hz61~T9J{RA}w1raVwc6uFB_`YJSR9@aR2^Vv{f83yE~YN903fcDyr2 zX?H*oo5-D_NRwT4TdAHkR?#;Xjmi1PC41g<>J>TutE_2Zk`N#=+Fdev7rv z$qUI$E2(MlvY()7YO#C8K{jx9f;4JKqXrz4YMgTq{tyyHUAe5BrAn(-IZq`!z^&3E zi^>XZH*g9z_0>YBuP^T8lU!Tab~==+ZvSD&T`?PW`$gZkA*}Adew08<275%B%!|hP z8R!e_AMm*TUu>2(kN?8M^`uJnQwX=T2EIj!-tg14=4=FigX zyB#w0e}Gs`o{+(|!1N2BPQ_cyZkxagvy(T8LP*3>&g;BmDB`8!34c@YUuNnw0XkXN z@TmljFY&GXdR-?E9b9vrHjt6p?o0$?pll9V>T!0}!>$$g5*btEG)$6K%JE7v{~c~} z<~>umbuqwBbQp5kI17AnoTt4Yn{)mSEA&sH_AM#=jwfN{3Abs*1d)AuU7o*Dts(t* zJ2LBs@D!ugWBY|THIFMkMEtv>AVTCexUXo6f9|-h-g(m50$o$wrVjT-hG7&R$tf9p zsuvQhBP=deYDdDj?s{I;X1jJh7BWquA{`X98QRlaD7HNazk1DcTJ&~7wd$PUZslp) z^(Nl)lx1&4gZsuWedQ+b!H=N>gYunJaM?sp+oe9N=H9>lDIqL&8u$*K18D{{P1KleO) ziw4D5XI`rA7I&m8m)?H=-`qyBk*$MYI{MO{E3V6TQU)Sy3TV$=0cV&DTQP!?xTE(# zjr-j(FV-|kE|M~d$*#8gasly~<;qqT*MrITw*Ld;7w3*QhQvHeLd)=<z`Y0jnM zd7vwMgmaVVU+l|i9rL`}VvX>|GD8UmvR}_}=KqcCzxi^2Z{1ko84b1^=xN?^xQ6yf ze%=vIg+}f;6c?q-S|A!NLKvg5ilbFZz4sP3mbT8$`(L`%-~FNnkhk#qF&T=_P>l=o zq=)+}JX66wAhkfFG=y@!%@-RMY&i)_8t)0kc3!&? h!u`+IGk~6@vkP(maXdrC`1@o-RYgsOayiqW{{s|x8hQW# literal 0 HcmV?d00001 diff --git a/docs/en/_static/image/tools/analysis/analyze_log.jpg b/docs/en/_static/image/tools/analysis/analyze_log.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8eb1a27d6464d255b84b23a7460a5f622f51712f GIT binary patch literal 68146 zcmeFa2|Sd2zdwG7C@r!@#8i|mv{4(ldbF*aO~>e*o-0EiKsTonVi_yFI|q0XpWLEJsgY zpl8)GXE^4-cE&68?JmBH*;VY?9XS58w;UhuW<1Ejd5G)y2?0SNVF^ho>2or&mo6(^ zQNF68s&hkEPv5}M$l|u;9V=@a+xt$=4<5R>x_LkG@%4M^9}pJ)A|f*CWpqq@LSj;K z%DdF}Ik|cH1%*Y$CDk>xb@dG&8=E@2x_f&2KKBn`$Hu=-OioSD%;J|Z#i}_9^{w69>;$h+TTX@#|HNJKQyxcH?V(D<&C=8QBxdSd#Tw#~cF z)0+CkI~RxMR-4ze38G^m1JdjEcyuLo3I781d6ps zGaI{nZiT!l)eyk1tnVi9;8WI>dNH?yAvL|c+#kwt>26ni(hX)D%C_V`9P@ZKb4u&Y z+epFNv~H27$M3aT(|#gJ67@WXF}z|VOFUxToS=hSx`BguQ;v)f?$lJ547GeR+6d6s zC?y_W>*h$lotI4>)A-tOQX9d%Z}F8c51e>|G7LK`uY?zZRWz1P)Ap`;a zTB3(l1LY8bhiGm)yp!OHSF2^6GdqK;3-Pr;YZsQ4+%VqglDs6V5HfJl-mc@i*3j^o zCpa2WzlBo#tvM{y0&IMfbULoa11F;2n}HO#yPnHdY^PcU>lH?T ze!sy=1t<9f%8n@rowwm#v4eEHOOeaQ?Li7rkpLf)_IJh{nBnqiu2WcB=qay@DAOFO zm&&P5mU^J5r93pbq*+jLk_uFvuHbV?9v|CyLSdrxMValziPsNmBdxH-^4z>ht~^~f zp*Ee(a1YU@JcpLO%)7P5Lt7!uVaKYXIz4xL){Wp&6cugIhf!Edlr22D7J?Dv&Dw^LFye2!tClTBH;Ky)P85(4qM zC<_#TJXeexsfMyh`|)>?Y>tn1BTdt)YJCv&oTuQ+P zm7?=@$7@?p6q%Zy$O*e@s8x7bS6>FIMUq4CLTkwTC_+@=4yyW^le@*((fC??f8df3 zk_sHX{m?S-(@@^o>^(u*@_h{`PRt!sxpf~Z@Hhy`+b)6fF_X*2QGv<~E){v$xTss} zjV^f=wp;ac${zw|wXdg~)bBXnRrJw6BS~NG>-fR|AQ&I;Kjvz|-zk2GU4RfT*4NAV z$IR~H3os0_H8RvbrMB}HARg$B=@R*Zpck zw%DCuB;IB!@Ua_xlK{4SNh|`K@Iopuo`<YhZ3B;12e%ZXC~ zK3zN3+F@wlDqP*Mt=dSn#`SJ^Z4Nw)p0SD3>fl!^hTilFH2y#;Lv0C+kK%=(0u8ok zUlMe@g|DVpRu}D`*TtTn=%9qT<~XQ({*WTVgb^quzs2)Wd`vm007DKHSeAqgKv@cy2VJ#bmyRGy*O?7eCa%^%!+ld+Pc5n*t~`{PIR zs@DD1JD3)j{2{Bt(-?w66cq@4w|x-{1>^QY|99TA{}zOmK(VtQzo4<(M$tJxPv>KU zakz~kmy#w^|H|w7{_&zT)c4b?1evNXftqNe1HjC@RC=ptJ6dAhYGs6e_W4()@=m_9=8e!vd`ZQG#?LYV2ez zOBsHDf^)jffViK$T*{|VwfD188xDp8#=H49kLktS9^$+A^w0-~$28MU`b~W(oEGe< zEG*b~{caEHPT+0Ah*-ImMGApyq+!pA2nVbCmdGnJ( zM96#{omSUeYAe0Jmb zjj%Y{_usFAOD^S7EH9|Y(oi$-a6UDel@`LvwxC}19V7=) zf$4>cLsS6!5OhB9v#Vq8M_ey#1;`gsfvFgZZWqJjuO|2i(rY_HU2Y_tr*-rA9;Xt} zN#4!x>Q64fSja;N%`jgS!E}ZSkld1SQ49BpLN4*d>$pN+j7^r`*0U53{s-$u&LKhR zma$`Zj_OGSTb^800R-v79L%0|G0 zJuLU2d{LxO1>_+R8|XmgEmfikLOzqPa4WUE+&vlcD!7emHx3_p(TtgmR69G5)Hyew z52Z2Ii|bjD?adPD&+TIb;|jSCA;&k~xaco{L#3k-REXD*nt@{@f%0}^WXO$>3wd3; zaKlMEOTw+(Hr6?8muQ>WTQa&sP9j+2Q#9!eZVjIqYDKVqH(WkWwYj&Qe7;t z75R9vhWg=sAG-2i8H>y*9HauOc)GqweCztc{gdlm7bJaJ*H)7D2Kcyoq)nUcz+)DK z@RX6Ul=)~p?U;Sk=m@Wih<0auF@B>a<4~kX`3cI-B11d7rKH#WLu_g*c0#&ZN9FO9Y6N;0t#O6v;mwY;0X&o4xG zf%NpP@44{+iOT_k2};15(&(3uT-!)2+5%um^!Pqf__1;0;vtDcx+x_u8&4*hUYC~R zH4M?#;17cjk_5hq;{hu0Sr@e$EBuihTR~6IScenN#*y~n#1h^O%_#EZ!`(%KjjX>G zJKOMHG)=gy?KlJ>h%$5c_V6Uab2K>fF;|I#=cTVz$RVuJ4#Fa(% zx{F%18?ygnio(EC6ny4}FJ@kX3T!FgH;&lm^3fFkL4C7BBg(UJIC2?RNShHm&nJ|G>rRh9bQP)~8e!K)0k83q_bfKQt`#Jvh4u8>qmQq239A zO-tz~m#%$&IoHQf_q2g(WtbFJ9}=DV0=v^y&TaaJg;1#ytUX`pzS~H>#uGDcFd*|D z3-L?$pDvXBw5#Fsj(&j|Lo}|P3hc2OSMHWHy@4(z2V&ER?ITxw55gMGWim$B>!>2b zYcD8%!6O#0YvANkCuA}VRxUmnSZe04SdYiB7RDE$38q&NF*R6$;7qnv#Qf3ysNHXN zkM^UkM1hEep$p*?N-fN0?~qz!g<5< z#e2`}3*>Nr7~4#iQ63dgIa`np{kK?w0S5PiLhSuSOz4+gA9Ex_f5~Olno_ zQ}jecYE@%*N{a1s#Mj7-P2NByHvL4`Do}p@OJw5*`El7;7#~g3;GukS8;)|u&D9Q{ zfU94+)-Adq&x$PA8hQ~q;8QmD&Sw52Hgn;DNAkG>v}7Ff&84b!EGXZtuRb4EzCI0%`+a=diS!eOn1-%8!N(i2og^Z%YfL<9@ zrIQq$MKRDVr&JEJ;3Lskji)enq|IoHMoku8d@M6<%;W6E-i08J-bg0){5RP}PgeHk zDBTI?d^M?YP6ViWv=JakizY-Pct(6#diCz*%nrDnes(d4ru50ZrVBcY`<1Nb6j?e~ zLzt~u2Zn6L+EQgh@27pacKWNT3yTbZ@4jz@26fI?S_ZOj-V*twZ^%K^$NM!S>+A@! zK;$FIiYkVUNh*L@qDXi=W?E{Y0;mxZoTIKJs!mOWFpd-OcG`EPyLxY~?J$3AlTx_A z#BQF~RZ*OBg|uN;&NVhshRE@l=|}>Iz3gemP;~py`e%}^BzUK24Zcs2s!+FX-7<7m z?e6YTwI%ahOn;ap$67ZM92QbxwL4EiswYst7eT4+0*-9KV9alMqNbdw6yW#FtR z98oAzS_tPif_V+}V71$YEtEv$yXW zIc%y+9`=6POc);pLj8B%NRNm3E|u*YL;32XEIabvs7UzxkgH9`rDC81Ms)B%^)#Nr zNc{_w=KwM-L2f+c08$fIHhdIsRVB_z*fA+GRjAHUFJ0n3r0T`|WY%eLuBa@>;N2&$ z6>dvlz;HnNNnSau)`+yBZxnFG_G+B5?Z$y1S7zSJ_B8Tq@_iv7vUm<@l}V^MPY5W1 z)MmKXkL+`z0uafq+Af6si~Cb-8v{Y=IfPNODoebXQL;W#kER3bX% zB>^u+INw~J(Oaw*eXzjB$`#9gn@0Hqz&QW)w?MXS3$lG(;f?`7&5vuFux(xcmJ#@? zsXsPX!pVhM(`H;BJoIbbdXmv_$VhQ{u|WbX?KE(s^d)NI{Xqqq zn1(d8`AI86yR+mK#4<9KPK4hHRmW0+{ll=!6Ss@`{muOo8a|yG{NP$O+S^7FZDScB zRqznl$L8YQRGcCoID}-O?`3TsRqUBW*-dDSzE7gCg9*`@17Rdjt0Zz@jQF$g5W;J7 zDv;DFj@#t^0~YRRAJkgGIFQM2_)sV43O1W+)Sfl@2wiv~HH+Ci=ruYLJb-S?K-%VI zw~=yj8$RP92omG0S`hi3;tQ0mDypicCyHDWSFsfqLJ5QPhx*|i*v6>+bsibSF zAuojLJkQ=oJ!Z5=sF!P*By{qq&{#(8IR2J3^Vz>eoko)NPzw8*c_+-}s}fal!v8{Z}5DG1H8 zt}$}4wb`(BBpp|TRmB=pfxX1>(hvfP98c1KWUae0Q_SEhdh;kN&SO=*o>It5-5Pk; zVL@xq5vsf$$UeQQMGlU_1gpZvQne`i<6)051*hjm9YP4^>Yl={Q2tXu>wP%J+SNV8j+~SAsrIE0G zDIV$4C{21EDNk$P#E!x*LO6@S^AP~U%`}Ql{`}gnpmh9VRk;keYeYv1_*q82RDsMszkgsHpsxEKG zOl{l%efYWtD!`!zs=f_y%FsjlU?!deLNW#Hr7x|%b6O(c{3ylZ%jvF)5NmVJz#;t< zGQAj-Q1S^U?Jd!wUuAS9#r>Uauw?x!x||O^DTnR^vD|~uhM(sNX7~G{QP3B4f^L#~ zP&fw%FfMGTSFR87s$eC|*kIG32WMr2O9;mGZV3Opl@xWZLt^umE4Ngn6DE&+ImK=8 zET|lwn+_%Dmq19ASq$ME2-%KJgBI5PE$BThf?dR7vV+VCY$>|Av z-4_mibFtjtqS_Wg*1+HH$y*KPR&T%Si%v3n8o$>dxHi(F@z!-U)>+rMJvV*|GyMWR znS-G3vbH3QxZ^X3BM+8*NmqbURIh-KzG)00b3ZhG@mnz7ckk;#0#RQTuvx@?P)g{R z-U)L;A?4CAC@vqmK-0j}@=pQ5?^+5&r6D%dZ;?R%GVCGfM1JNSqykE!c0YsY{%ux) zJ$y`@*9B6|e28Q?t{|eObhsknUEMB7;D_|t-h_+$oo~lS>2<=~Z9GrBNY%}fWOQ&u z#H(Y9Pk@%xC}50fVK;7)xZ%kZu7l@KSrqV-@LvqYEaYT`B@Df0W)azme;KcZ_nk;t*Y{cd^qD9-FTldk%)B~8BR zdUprH0!fh46I8*-4w41#4Unt^-=Zv=pggu(RG`S8!WVsQ@l@^bvz(|Q=>C5iUNcJq5 zR%V!!N21@Fd_`>jX`#)Ftl`9okHI6XBo%`5Xej!S%Z5@XDBu@FwFRN8QtVN7#Pdk{ zvO{@v7jc^X%PEqweG!9bwB()k(1;~UL(O(WG(Q)NRk?SA5nJ{rH7 zY}_1MaaVZclh)m}IXAVY9@sdT6rMSX47IV%YF+Ka+Ms5Xxjdy({6j)<-c^MZK>?rc zc5?VN7%E@&v@o$UhYY$K{g}M#Szu&tCW4E6>bG<}GSmTeWJ`s=J!{MYs=81K+}umk z&M0X3;6y>u>&t@+4D;gzGel1-&90MK6ZxqU?f=e@+R@)^0 z2$4*LX$(So1fo$Ndf$M)+jDReZ?iaGeq{#J@c%r37zigDqe|c$*ep_bIRm9~ePm0> zn$|u*FmA_x;@bNi^b3Og-x~$TM~s5;k61L;>?kwous_vm)quL|YoaY?eP=cm=qPk9 zHuxUW|6?@abm&2lo#tKxTjxTN`HC}|eu$s{KCeHiz^Lt|oIpzBx|5lhN#{ExHaV7# zFJ|n$?76o(C&SE__i)dh_|nG2iuF#Cx{YSm(QIrE&!ES!Cn1(&>CTmb&uB-KrHcA|P^=srxKe|Y*~ozHLJAPvN8yD%@=)wM-?zeJ9!3iTlgs9m zq1vP*)Z4q7bCYX%DOV&&Q(b7GyYGFgRJ7{>iktn)Ge^#8iuIDb7OVt!6 zf>pwxi^au)9OE~#yEB8Bfv%>GR2zQ#%-Xd({GD)l^x|L&WGV9wMKdCT^yQ1b)rYRe zqf1HJurJ6|